hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
414c695418d552a7ff048b7160f0ca3db8c50e3f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/hip/detail/KernelUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <c10/util/Exception.h>
#include <THH/THHAtomics.cuh>
#include <THH/THHNumerics.cuh>
#include <algorithm>
#include <cfloat>
#include <cmath>
namespace at {
namespace native {
using namespace at::cuda::detail;
namespace {
template <typename scalar_t, typename accscalar_t>
__device__ inline int get_interval(accscalar_t sample,
int index, int inputSize, int outputSize, int poolSize) {
accscalar_t alpha = static_cast<accscalar_t>(inputSize - poolSize) /
static_cast<accscalar_t>(outputSize - 1);
if (index == outputSize - 1) {
return inputSize - poolSize;
} else {
return static_cast<int>((index + sample) * alpha) -
static_cast<int>(sample * alpha);
}
}
template <typename scalar_t>
__global__ void fractional_max_pool2d_out_cuda_frame(
PackedTensorAccessor<scalar_t, 4> output,
PackedTensorAccessor<int64_t, 4> indices,
PackedTensorAccessor<scalar_t, 4> input,
PackedTensorAccessor<scalar_t, 3> samples,
int poolSizeH, int poolSizeW) {
using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>;
int ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
// Each thread generates a specific output point
if (ourOutputPoint < output.size(2) * output.size(3)) {
int outputW = ourOutputPoint % output.size(3);
int outputH = ourOutputPoint / output.size(3);
int poolW = get_interval<scalar_t, accscalar_t>(
static_cast<accscalar_t>(samples[batch][plane][0]),
outputW, input.size(3), output.size(3), poolSizeW);
int poolH = get_interval<scalar_t, accscalar_t>(
static_cast<accscalar_t>(samples[batch][plane][1]),
outputH, input.size(2), output.size(2), poolSizeH);
scalar_t maxVal = at::numeric_limits<scalar_t>::lower_bound();
int maxIndex = poolH * input.size(3) + poolW;
for (int h = poolH; h < poolH + poolSizeH; ++h) {
if (poolSizeW < 2 || poolSizeW > 7) {
for (int w = poolW; w < poolW + poolSizeW; ++w) {
scalar_t val = input[batch][plane][h][w];
// for consistency with THNN, favor the first max
if (val > maxVal || THCNumerics<scalar_t>::isnan(val)) {
maxIndex = h * input.size(3) + w;
maxVal = val;
}
}
} else {
for (int i = 0; i < poolSizeW; ++i) {
int w = i + poolW;
scalar_t val = input[batch][plane][h][w];
// for consistency with THNN, favor the first max
if (val > maxVal || THCNumerics<scalar_t>::isnan(val)) {
maxIndex = h * input.size(3) + w;
maxVal = val;
}
}
}
}
indices[batch][plane][outputH][outputW] = maxIndex;
output[batch][plane][outputH][outputW] = maxVal;
}
}
template <typename scalar_t>
__global__ void fractional_max_pool2d_backward_out_cuda_frame(
PackedTensorAccessor<scalar_t, 4> gradInput,
PackedTensorAccessor<scalar_t, 4> gradOutput,
PackedTensorAccessor<int64_t, 4> indices) {
// Output (h, w) point that this thread is responsible for
int ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
// Each thread generates a specific output point
if (ourOutputPoint < gradOutput.size(2) *
gradOutput.size(3)) {
int outputW = ourOutputPoint % gradOutput.size(3);
int outputH = ourOutputPoint / gradOutput.size(3);
int index = indices[batch][plane][outputH][outputW];
assert(index >= 0);
int inputW = index % gradInput.size(3);
int inputH = index / gradInput.size(3);
assert(inputH < gradInput.size(2));
gpuAtomicAddNoReturn(
&gradInput[batch][plane][inputH][inputW],
gradOutput[batch][plane][outputH][outputW]
);
}
}
} // anonymous namespace
TORCH_IMPL_FUNC(fractional_max_pool2d_out_cuda) (
const Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const Tensor& randomSamples,
const Tensor& output,
const Tensor& indices
) {
int planeDim = 0;
int dimh = 1;
int dimw = 2;
int ndims = input.ndimension();
if (ndims == 4) {
planeDim++;
dimh++;
dimw++;
}
/* sizes */
int numPlanes = input.size(planeDim);
int outputH = output_size[0];
int outputW = output_size[1];
int poolSizeH = pool_size[0];
int poolSizeW = pool_size[1];
auto output_ = output;
auto input_ = input;
auto indices_ = indices;
if(ndims == 3) {
output_ = output_.reshape({1, numPlanes, outputH, outputW});
indices_ = indices_.reshape({1, numPlanes, outputH, outputW});
input_ = input_.reshape({1, input.size(0), input.size(1), input.size(2)});
}
if (output_.numel() == 0) {
return;
}
// block is limited to 4 warps
// grid handles overflow per each plane
int outputPlaneSize = output_.size(2) *
output_.size(3);
dim3 grid((outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128)
input_.size(1),
input_.size(0));
dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(),
"fractional_max_pool2d_out_cuda_frame",
[&] {
auto devInput = input_.packed_accessor<scalar_t, 4>();
auto devOutput = output_.packed_accessor<scalar_t, 4>();
auto devIndices = indices_.packed_accessor<int64_t, 4>();
auto devSamples = randomSamples.packed_accessor<scalar_t, 3>();
hipLaunchKernelGGL(( fractional_max_pool2d_out_cuda_frame<scalar_t>)
, dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
devOutput, devIndices, devInput, devSamples,
poolSizeH, poolSizeW);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
);
}
namespace {
void fractional_max_pool2d_backward_out_cuda_template(
Tensor& gradInput,
const Tensor& gradOutput,
const Tensor& input,
IntArrayRef pool_size /* unused */,
IntArrayRef output_size,
const Tensor& indices)
{
int dimh = 1;
int dimw = 2;
int ndims = input.ndimension();
if (ndims == 4) {
dimh++;
dimw++;
}
/* sizes */
int inputH = input.size(dimh);
int inputW = input.size(dimw);
int outputH = output_size[0];
int outputW = output_size[1];
TORCH_CHECK(outputH == gradOutput.size(dimh),
"fractional_max_pool2d(): gradOutput height unexpected");
TORCH_CHECK(outputW == gradOutput.size(dimw),
"fractional_max_pool2d(): gradOutput width unexpected");
/* resize */
gradInput.resize_as_(input);
if (gradInput.numel() == 0) {
return;
}
gradInput.zero_();
auto gradInput_ = gradInput;
auto gradOutput_ = gradOutput;
auto indices_ = indices;
if(ndims == 3) {
gradInput_ = gradInput_.reshape({1, input.size(0), inputH, inputW});
gradOutput_ = gradOutput_.reshape({1, gradOutput.size(0), outputH, outputW});
indices_ = indices_.reshape({1, indices_.size(0), outputH, outputW});
}
/* backprop */
// block is limited to 4 warps
// grid handles overflow per each plane
int outputPlaneSize = gradOutput_.size(2) *
gradOutput_.size(3);
dim3 grid((outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128)
gradInput_.size(1),
gradInput_.size(0));
dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize);
auto devIndices = indices.packed_accessor<int64_t, 4>();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(),
"fractional_max_pool2d_backward_out_cuda_frame",
[&] {
auto devGradInput = gradInput_.packed_accessor<scalar_t, 4>();
auto devGradOutput = gradOutput_.packed_accessor<scalar_t, 4>();
hipLaunchKernelGGL(( fractional_max_pool2d_backward_out_cuda_frame<scalar_t>)
, dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
devGradInput, devGradOutput, devIndices);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
);
}
}// namespace
Tensor& fractional_max_pool2d_backward_out_cuda(const at::Tensor& gradOutput_,
const at::Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const at::Tensor& indices,
at::Tensor& gradInput)
{
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("fractional_max_pool2d_backward_out_cuda");
fractional_max_pool2d_backward_out_cuda_template(
gradInput,
gradOutput_,
input,
pool_size,
output_size,
indices);
return gradInput;
}
Tensor fractional_max_pool2d_backward_cuda(
const at::Tensor& gradOutput_,
const at::Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const at::Tensor& indices)
{
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("fractional_max_pool2d_backward_cuda");
Tensor gradInput = at::empty({0}, input.options());
fractional_max_pool2d_backward_out_cuda_template(
gradInput,
gradOutput_,
input,
pool_size,
output_size,
indices);
return gradInput;
}
}// at::native
}// at
| 414c695418d552a7ff048b7160f0ca3db8c50e3f.cu | #include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/cuda/detail/KernelUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <c10/util/Exception.h>
#include <THC/THCAtomics.cuh>
#include <THC/THCNumerics.cuh>
#include <algorithm>
#include <cfloat>
#include <cmath>
namespace at {
namespace native {
using namespace at::cuda::detail;
namespace {
template <typename scalar_t, typename accscalar_t>
__device__ inline int get_interval(accscalar_t sample,
int index, int inputSize, int outputSize, int poolSize) {
accscalar_t alpha = static_cast<accscalar_t>(inputSize - poolSize) /
static_cast<accscalar_t>(outputSize - 1);
if (index == outputSize - 1) {
return inputSize - poolSize;
} else {
return static_cast<int>((index + sample) * alpha) -
static_cast<int>(sample * alpha);
}
}
template <typename scalar_t>
__global__ void fractional_max_pool2d_out_cuda_frame(
PackedTensorAccessor<scalar_t, 4> output,
PackedTensorAccessor<int64_t, 4> indices,
PackedTensorAccessor<scalar_t, 4> input,
PackedTensorAccessor<scalar_t, 3> samples,
int poolSizeH, int poolSizeW) {
using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>;
int ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
// Each thread generates a specific output point
if (ourOutputPoint < output.size(2) * output.size(3)) {
int outputW = ourOutputPoint % output.size(3);
int outputH = ourOutputPoint / output.size(3);
int poolW = get_interval<scalar_t, accscalar_t>(
static_cast<accscalar_t>(samples[batch][plane][0]),
outputW, input.size(3), output.size(3), poolSizeW);
int poolH = get_interval<scalar_t, accscalar_t>(
static_cast<accscalar_t>(samples[batch][plane][1]),
outputH, input.size(2), output.size(2), poolSizeH);
scalar_t maxVal = at::numeric_limits<scalar_t>::lower_bound();
int maxIndex = poolH * input.size(3) + poolW;
for (int h = poolH; h < poolH + poolSizeH; ++h) {
if (poolSizeW < 2 || poolSizeW > 7) {
for (int w = poolW; w < poolW + poolSizeW; ++w) {
scalar_t val = input[batch][plane][h][w];
// for consistency with THNN, favor the first max
if (val > maxVal || THCNumerics<scalar_t>::isnan(val)) {
maxIndex = h * input.size(3) + w;
maxVal = val;
}
}
} else {
for (int i = 0; i < poolSizeW; ++i) {
int w = i + poolW;
scalar_t val = input[batch][plane][h][w];
// for consistency with THNN, favor the first max
if (val > maxVal || THCNumerics<scalar_t>::isnan(val)) {
maxIndex = h * input.size(3) + w;
maxVal = val;
}
}
}
}
indices[batch][plane][outputH][outputW] = maxIndex;
output[batch][plane][outputH][outputW] = maxVal;
}
}
template <typename scalar_t>
__global__ void fractional_max_pool2d_backward_out_cuda_frame(
PackedTensorAccessor<scalar_t, 4> gradInput,
PackedTensorAccessor<scalar_t, 4> gradOutput,
PackedTensorAccessor<int64_t, 4> indices) {
// Output (h, w) point that this thread is responsible for
int ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
// Each thread generates a specific output point
if (ourOutputPoint < gradOutput.size(2) *
gradOutput.size(3)) {
int outputW = ourOutputPoint % gradOutput.size(3);
int outputH = ourOutputPoint / gradOutput.size(3);
int index = indices[batch][plane][outputH][outputW];
assert(index >= 0);
int inputW = index % gradInput.size(3);
int inputH = index / gradInput.size(3);
assert(inputH < gradInput.size(2));
gpuAtomicAddNoReturn(
&gradInput[batch][plane][inputH][inputW],
gradOutput[batch][plane][outputH][outputW]
);
}
}
} // anonymous namespace
TORCH_IMPL_FUNC(fractional_max_pool2d_out_cuda) (
const Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const Tensor& randomSamples,
const Tensor& output,
const Tensor& indices
) {
int planeDim = 0;
int dimh = 1;
int dimw = 2;
int ndims = input.ndimension();
if (ndims == 4) {
planeDim++;
dimh++;
dimw++;
}
/* sizes */
int numPlanes = input.size(planeDim);
int outputH = output_size[0];
int outputW = output_size[1];
int poolSizeH = pool_size[0];
int poolSizeW = pool_size[1];
auto output_ = output;
auto input_ = input;
auto indices_ = indices;
if(ndims == 3) {
output_ = output_.reshape({1, numPlanes, outputH, outputW});
indices_ = indices_.reshape({1, numPlanes, outputH, outputW});
input_ = input_.reshape({1, input.size(0), input.size(1), input.size(2)});
}
if (output_.numel() == 0) {
return;
}
// block is limited to 4 warps
// grid handles overflow per each plane
int outputPlaneSize = output_.size(2) *
output_.size(3);
dim3 grid((outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128)
input_.size(1),
input_.size(0));
dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(),
"fractional_max_pool2d_out_cuda_frame",
[&] {
auto devInput = input_.packed_accessor<scalar_t, 4>();
auto devOutput = output_.packed_accessor<scalar_t, 4>();
auto devIndices = indices_.packed_accessor<int64_t, 4>();
auto devSamples = randomSamples.packed_accessor<scalar_t, 3>();
fractional_max_pool2d_out_cuda_frame<scalar_t>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
devOutput, devIndices, devInput, devSamples,
poolSizeH, poolSizeW);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
);
}
namespace {
void fractional_max_pool2d_backward_out_cuda_template(
Tensor& gradInput,
const Tensor& gradOutput,
const Tensor& input,
IntArrayRef pool_size /* unused */,
IntArrayRef output_size,
const Tensor& indices)
{
int dimh = 1;
int dimw = 2;
int ndims = input.ndimension();
if (ndims == 4) {
dimh++;
dimw++;
}
/* sizes */
int inputH = input.size(dimh);
int inputW = input.size(dimw);
int outputH = output_size[0];
int outputW = output_size[1];
TORCH_CHECK(outputH == gradOutput.size(dimh),
"fractional_max_pool2d(): gradOutput height unexpected");
TORCH_CHECK(outputW == gradOutput.size(dimw),
"fractional_max_pool2d(): gradOutput width unexpected");
/* resize */
gradInput.resize_as_(input);
if (gradInput.numel() == 0) {
return;
}
gradInput.zero_();
auto gradInput_ = gradInput;
auto gradOutput_ = gradOutput;
auto indices_ = indices;
if(ndims == 3) {
gradInput_ = gradInput_.reshape({1, input.size(0), inputH, inputW});
gradOutput_ = gradOutput_.reshape({1, gradOutput.size(0), outputH, outputW});
indices_ = indices_.reshape({1, indices_.size(0), outputH, outputW});
}
/* backprop */
// block is limited to 4 warps
// grid handles overflow per each plane
int outputPlaneSize = gradOutput_.size(2) *
gradOutput_.size(3);
dim3 grid((outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128)
gradInput_.size(1),
gradInput_.size(0));
dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize);
auto devIndices = indices.packed_accessor<int64_t, 4>();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gradOutput.scalar_type(),
"fractional_max_pool2d_backward_out_cuda_frame",
[&] {
auto devGradInput = gradInput_.packed_accessor<scalar_t, 4>();
auto devGradOutput = gradOutput_.packed_accessor<scalar_t, 4>();
fractional_max_pool2d_backward_out_cuda_frame<scalar_t>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
devGradInput, devGradOutput, devIndices);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
);
}
}// namespace
Tensor& fractional_max_pool2d_backward_out_cuda(const at::Tensor& gradOutput_,
const at::Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const at::Tensor& indices,
at::Tensor& gradInput)
{
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("fractional_max_pool2d_backward_out_cuda");
fractional_max_pool2d_backward_out_cuda_template(
gradInput,
gradOutput_,
input,
pool_size,
output_size,
indices);
return gradInput;
}
Tensor fractional_max_pool2d_backward_cuda(
const at::Tensor& gradOutput_,
const at::Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const at::Tensor& indices)
{
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("fractional_max_pool2d_backward_cuda");
Tensor gradInput = at::empty({0}, input.options());
fractional_max_pool2d_backward_out_cuda_template(
gradInput,
gradOutput_,
input,
pool_size,
output_size,
indices);
return gradInput;
}
}// at::native
}// at
|
54a261f45ecb6bba83ed4fc653fc2508f9811f1d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <unittest/unittest.h>
#include <thrust/transform.h>
#include <thrust/execution_policy.h>
template<typename Iterator1, typename Iterator2, typename Function, typename Iterator3>
__global__
void transform_kernel(Iterator1 first, Iterator1 last, Iterator2 result1, Function f, Iterator3 result2)
{
*result2 = thrust::transform(thrust::seq, first, last, result1, f);
}
void TestTransformUnaryDeviceSeq()
{
typedef thrust::device_vector<int> Vector;
typedef typename Vector::value_type T;
typename Vector::iterator iter;
Vector input(3);
Vector output(3);
Vector result(3);
input[0] = 1; input[1] = -2; input[2] = 3;
result[0] = -1; result[1] = 2; result[2] = -3;
thrust::device_vector<typename Vector::iterator> iter_vec(1);
hipLaunchKernelGGL(( transform_kernel), dim3(1),dim3(1), 0, 0, input.begin(), input.end(), output.begin(), thrust::negate<T>(), iter_vec.begin());
iter = iter_vec[0];
ASSERT_EQUAL(iter - output.begin(), input.size());
ASSERT_EQUAL(output, result);
}
DECLARE_UNITTEST(TestTransformUnaryDeviceSeq);
template<typename Iterator1, typename Iterator2, typename Function, typename Predicate, typename Iterator3>
__global__
void transform_if_kernel(Iterator1 first, Iterator1 last, Iterator2 result1, Function f, Predicate pred, Iterator3 result2)
{
*result2 = thrust::transform_if(thrust::seq, first, last, result1, f, pred);
}
void TestTransformIfUnaryNoStencilDeviceSeq()
{
typedef thrust::device_vector<int> Vector;
typedef typename Vector::value_type T;
typename Vector::iterator iter;
Vector input(3);
Vector output(3);
Vector result(3);
input[0] = 0; input[1] = -2; input[2] = 0;
output[0] = -1; output[1] = -2; output[2] = -3;
result[0] = -1; result[1] = 2; result[2] = -3;
thrust::device_vector<typename Vector::iterator> iter_vec(1);
hipLaunchKernelGGL(( transform_if_kernel), dim3(1),dim3(1), 0, 0, input.begin(), input.end(),
output.begin(),
thrust::negate<T>(),
thrust::identity<T>(),
iter_vec.begin());
iter = iter_vec[0];
ASSERT_EQUAL(iter - output.begin(), input.size());
ASSERT_EQUAL(output, result);
}
DECLARE_UNITTEST(TestTransformIfUnaryNoStencilDeviceSeq);
template<typename Iterator1, typename Iterator2, typename Iterator3, typename Function, typename Predicate, typename Iterator4>
__global__
void transform_if_kernel(Iterator1 first, Iterator1 last, Iterator2 stencil_first, Iterator3 result1, Function f, Predicate pred, Iterator4 result2)
{
*result2 = thrust::transform_if(thrust::seq, first, last, stencil_first, result1, f, pred);
}
void TestTransformIfUnaryDeviceSeq()
{
typedef thrust::device_vector<int> Vector;
typedef typename Vector::value_type T;
typename Vector::iterator iter;
Vector input(3);
Vector stencil(3);
Vector output(3);
Vector result(3);
input[0] = 1; input[1] = -2; input[2] = 3;
output[0] = 1; output[1] = 2; output[2] = 3;
stencil[0] = 1; stencil[1] = 0; stencil[2] = 1;
result[0] = -1; result[1] = 2; result[2] = -3;
iter = thrust::transform_if(input.begin(), input.end(),
stencil.begin(),
output.begin(),
thrust::negate<T>(),
thrust::identity<T>());
ASSERT_EQUAL(iter - output.begin(), input.size());
ASSERT_EQUAL(output, result);
}
DECLARE_UNITTEST(TestTransformIfUnaryDeviceSeq);
template<typename Iterator1, typename Iterator2, typename Iterator3, typename Function, typename Iterator4>
__global__
void transform_kernel(Iterator1 first1, Iterator1 last1, Iterator2 first2, Iterator3 result1, Function f, Iterator4 result2)
{
*result2 = thrust::transform(thrust::seq, first1, last1, first2, result1, f);
}
void TestTransformBinaryDeviceSeq()
{
typedef thrust::device_vector<int> Vector;
typedef typename Vector::value_type T;
typename Vector::iterator iter;
Vector input1(3);
Vector input2(3);
Vector output(3);
Vector result(3);
input1[0] = 1; input1[1] = -2; input1[2] = 3;
input2[0] = -4; input2[1] = 5; input2[2] = 6;
result[0] = 5; result[1] = -7; result[2] = -3;
thrust::device_vector<typename Vector::iterator> iter_vec(1);
hipLaunchKernelGGL(( transform_kernel), dim3(1),dim3(1), 0, 0, input1.begin(), input1.end(), input2.begin(), output.begin(), thrust::minus<T>(), iter_vec.begin());
iter = iter_vec[0];
ASSERT_EQUAL(iter - output.begin(), input1.size());
ASSERT_EQUAL(output, result);
}
DECLARE_UNITTEST(TestTransformBinaryDeviceSeq);
template<typename Iterator1, typename Iterator2, typename Iterator3, typename Iterator4, typename Function, typename Predicate, typename Iterator5>
__global__
void transform_if_kernel(Iterator1 first1, Iterator1 last1, Iterator2 first2, Iterator3 stencil_first, Iterator4 result1, Function f, Predicate pred, Iterator5 result2)
{
*result2 = thrust::transform_if(thrust::seq, first1, last1, first2, stencil_first, result1, f, pred);
}
void TestTransformIfBinaryDeviceSeq()
{
typedef thrust::device_vector<int> Vector;
typedef typename Vector::value_type T;
typename Vector::iterator iter;
Vector input1(3);
Vector input2(3);
Vector stencil(3);
Vector output(3);
Vector result(3);
input1[0] = 1; input1[1] = -2; input1[2] = 3;
input2[0] = -4; input2[1] = 5; input2[2] = 6;
stencil[0] = 0; stencil[1] = 1; stencil[2] = 0;
output[0] = 1; output[1] = 2; output[2] = 3;
result[0] = 5; result[1] = 2; result[2] = -3;
thrust::identity<T> identity;
iter = thrust::transform_if(input1.begin(), input1.end(),
input2.begin(),
stencil.begin(),
output.begin(),
thrust::minus<T>(),
thrust::not1(identity));
ASSERT_EQUAL(iter - output.begin(), input1.size());
ASSERT_EQUAL(output, result);
}
DECLARE_UNITTEST(TestTransformIfBinaryDeviceSeq);
| 54a261f45ecb6bba83ed4fc653fc2508f9811f1d.cu | #include <unittest/unittest.h>
#include <thrust/transform.h>
#include <thrust/execution_policy.h>
template<typename Iterator1, typename Iterator2, typename Function, typename Iterator3>
__global__
void transform_kernel(Iterator1 first, Iterator1 last, Iterator2 result1, Function f, Iterator3 result2)
{
*result2 = thrust::transform(thrust::seq, first, last, result1, f);
}
void TestTransformUnaryDeviceSeq()
{
typedef thrust::device_vector<int> Vector;
typedef typename Vector::value_type T;
typename Vector::iterator iter;
Vector input(3);
Vector output(3);
Vector result(3);
input[0] = 1; input[1] = -2; input[2] = 3;
result[0] = -1; result[1] = 2; result[2] = -3;
thrust::device_vector<typename Vector::iterator> iter_vec(1);
transform_kernel<<<1,1>>>(input.begin(), input.end(), output.begin(), thrust::negate<T>(), iter_vec.begin());
iter = iter_vec[0];
ASSERT_EQUAL(iter - output.begin(), input.size());
ASSERT_EQUAL(output, result);
}
DECLARE_UNITTEST(TestTransformUnaryDeviceSeq);
template<typename Iterator1, typename Iterator2, typename Function, typename Predicate, typename Iterator3>
__global__
void transform_if_kernel(Iterator1 first, Iterator1 last, Iterator2 result1, Function f, Predicate pred, Iterator3 result2)
{
*result2 = thrust::transform_if(thrust::seq, first, last, result1, f, pred);
}
void TestTransformIfUnaryNoStencilDeviceSeq()
{
typedef thrust::device_vector<int> Vector;
typedef typename Vector::value_type T;
typename Vector::iterator iter;
Vector input(3);
Vector output(3);
Vector result(3);
input[0] = 0; input[1] = -2; input[2] = 0;
output[0] = -1; output[1] = -2; output[2] = -3;
result[0] = -1; result[1] = 2; result[2] = -3;
thrust::device_vector<typename Vector::iterator> iter_vec(1);
transform_if_kernel<<<1,1>>>(input.begin(), input.end(),
output.begin(),
thrust::negate<T>(),
thrust::identity<T>(),
iter_vec.begin());
iter = iter_vec[0];
ASSERT_EQUAL(iter - output.begin(), input.size());
ASSERT_EQUAL(output, result);
}
DECLARE_UNITTEST(TestTransformIfUnaryNoStencilDeviceSeq);
template<typename Iterator1, typename Iterator2, typename Iterator3, typename Function, typename Predicate, typename Iterator4>
__global__
void transform_if_kernel(Iterator1 first, Iterator1 last, Iterator2 stencil_first, Iterator3 result1, Function f, Predicate pred, Iterator4 result2)
{
*result2 = thrust::transform_if(thrust::seq, first, last, stencil_first, result1, f, pred);
}
void TestTransformIfUnaryDeviceSeq()
{
typedef thrust::device_vector<int> Vector;
typedef typename Vector::value_type T;
typename Vector::iterator iter;
Vector input(3);
Vector stencil(3);
Vector output(3);
Vector result(3);
input[0] = 1; input[1] = -2; input[2] = 3;
output[0] = 1; output[1] = 2; output[2] = 3;
stencil[0] = 1; stencil[1] = 0; stencil[2] = 1;
result[0] = -1; result[1] = 2; result[2] = -3;
iter = thrust::transform_if(input.begin(), input.end(),
stencil.begin(),
output.begin(),
thrust::negate<T>(),
thrust::identity<T>());
ASSERT_EQUAL(iter - output.begin(), input.size());
ASSERT_EQUAL(output, result);
}
DECLARE_UNITTEST(TestTransformIfUnaryDeviceSeq);
template<typename Iterator1, typename Iterator2, typename Iterator3, typename Function, typename Iterator4>
__global__
void transform_kernel(Iterator1 first1, Iterator1 last1, Iterator2 first2, Iterator3 result1, Function f, Iterator4 result2)
{
*result2 = thrust::transform(thrust::seq, first1, last1, first2, result1, f);
}
void TestTransformBinaryDeviceSeq()
{
typedef thrust::device_vector<int> Vector;
typedef typename Vector::value_type T;
typename Vector::iterator iter;
Vector input1(3);
Vector input2(3);
Vector output(3);
Vector result(3);
input1[0] = 1; input1[1] = -2; input1[2] = 3;
input2[0] = -4; input2[1] = 5; input2[2] = 6;
result[0] = 5; result[1] = -7; result[2] = -3;
thrust::device_vector<typename Vector::iterator> iter_vec(1);
transform_kernel<<<1,1>>>(input1.begin(), input1.end(), input2.begin(), output.begin(), thrust::minus<T>(), iter_vec.begin());
iter = iter_vec[0];
ASSERT_EQUAL(iter - output.begin(), input1.size());
ASSERT_EQUAL(output, result);
}
DECLARE_UNITTEST(TestTransformBinaryDeviceSeq);
template<typename Iterator1, typename Iterator2, typename Iterator3, typename Iterator4, typename Function, typename Predicate, typename Iterator5>
__global__
void transform_if_kernel(Iterator1 first1, Iterator1 last1, Iterator2 first2, Iterator3 stencil_first, Iterator4 result1, Function f, Predicate pred, Iterator5 result2)
{
*result2 = thrust::transform_if(thrust::seq, first1, last1, first2, stencil_first, result1, f, pred);
}
void TestTransformIfBinaryDeviceSeq()
{
typedef thrust::device_vector<int> Vector;
typedef typename Vector::value_type T;
typename Vector::iterator iter;
Vector input1(3);
Vector input2(3);
Vector stencil(3);
Vector output(3);
Vector result(3);
input1[0] = 1; input1[1] = -2; input1[2] = 3;
input2[0] = -4; input2[1] = 5; input2[2] = 6;
stencil[0] = 0; stencil[1] = 1; stencil[2] = 0;
output[0] = 1; output[1] = 2; output[2] = 3;
result[0] = 5; result[1] = 2; result[2] = -3;
thrust::identity<T> identity;
iter = thrust::transform_if(input1.begin(), input1.end(),
input2.begin(),
stencil.begin(),
output.begin(),
thrust::minus<T>(),
thrust::not1(identity));
ASSERT_EQUAL(iter - output.begin(), input1.size());
ASSERT_EQUAL(output, result);
}
DECLARE_UNITTEST(TestTransformIfBinaryDeviceSeq);
|
49b0003fa68506f42cd8b1e04009952f57443c8f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <rocblas.h>
#include <util.cuh>
#include <basicOps.cuh>
#include <mpi.h>
#include <hip/hip_runtime.h>
#include <assert.h>
#include <util.cuh>
#include <clusterNet.h>
#include <time.h>
#include <batchAllocator.h>
#include <DeepNeuralNetwork.h>
#include <WikiMaxoutNet.h>
#include <WikiMaxoutNet_PCIe.h>
#include <WikiMaxoutNet_PCIe2.h>
#include <WikiNetDist.h>
#include <Layer.h>
using std::cout;
using std::endl;
void run_neural_network()
{
Matrix *X = read_hdf5("/home/tim/mnist_full_X.hdf5");
Matrix *y = read_hdf5("/home/tim/mnist_full_y.hdf5");
ClusterNet gpu = ClusterNet(12345);
cout << X->rows << endl;
int hidden_size = 1024;
Matrix *w1 = gpu.sparseInitWeight(784,hidden_size);
Matrix *w2 = gpu.sparseInitWeight(hidden_size,10);
Matrix *m1 = zeros(784,hidden_size);
Matrix *m2 = zeros(hidden_size,10);
Matrix *ms1 = zeros(784,hidden_size);
Matrix *ms2 = zeros(hidden_size,10);
Matrix *grad_w1_ms = zeros(784,hidden_size);
Matrix *grad_w2_ms = zeros(hidden_size,10);
Matrix *grad_w2 = empty(hidden_size,10);
Matrix *grad_w1 = empty(784,hidden_size);
float cv_error = 0;
float cv_size = 0.1428571f;
float train_error = 0.0f;
BatchAllocator b = BatchAllocator();
b.init(X, y, cv_size, 128, 512);
clock_t t1,t2;
t1=clock();
//code goes here
int epochs = 100;
gpu.tick();
float learning_rate = 0.003;
//size_t free = 0;
//size_t total = 0;
float momentum = 0.5;
for(int EPOCH = 0; EPOCH < epochs; EPOCH++)
{
std::cout << "EPOCH: " << EPOCH + 1 << std::endl;
//hipMemGetInfo(&free, &total);
//std::cout << free << std::endl;
momentum += 0.01;
if(momentum > 0.95) momentum = 0.95;
for(int i = 0; i < b.TOTAL_BATCHES; i++)
{
b.allocate_next_batch_async();
//nesterov updates
scalarMul(m1,momentum,m1);
scalarMul(m2,momentum,m2);
add(w1,m1,w1);
add(w2,m2,w2);
Matrix *d0 = gpu.dropout(b.CURRENT_BATCH,0.2);
Matrix *z1 = gpu.dot(d0, w1);
logistic(z1, z1);
Matrix *d1 = gpu.dropout(z1,0.5);
Matrix *a2 = gpu.dot(d1,w2);
Matrix *out = softmax(a2);
Matrix *t = create_t_matrix(b.CURRENT_BATCH_Y,10);
//backprop
Matrix *e1 = sub(out, t);
Matrix *e2 = gpu.dotT(e1, w2);
gpu.Tdot(z1,e1,grad_w2);
logisticGrad(z1,z1);
mul(e2,z1,e2);
gpu.Tdot(b.CURRENT_BATCH,e2,grad_w1);
b.allocate_next_batch_async();
RMSprop_with_momentum_weight_update(ms1,grad_w1,w1,m1,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum);
RMSprop_with_momentum_weight_update(ms2,grad_w2,w2,m2,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum);
hipFree(e1->data);
hipFree(e2->data);
hipFree(z1->data);
hipFree(a2->data);
hipFree(out->data);
hipFree(t->data);
hipFree(d0->data);
hipFree(d1->data);
b.replace_current_batch_with_next();
}
//Matrix *sum_value = sum(w1);
//std::cout << "weight 1 Sum: " << to_host(sum_value)->data[0] << std::endl;
train_error = 0;
for(int i = 0; i < b.TOTAL_BATCHES; i++)
{
b.broadcast_batch_to_processes();
//Matrix *d0 = scalarMul(b.CURRENT_BATCH,0.8);
Matrix *a1 = gpu.dot(b.CURRENT_BATCH,w1);
logistic(a1, a1);
//Matrix *d1 = scalarMul(a1,0.5);
Matrix *a2 = gpu.dot(a1,w2);
Matrix *out = softmax(a2);
Matrix *result = argmax(out);
Matrix *eq = equal(result,b.CURRENT_BATCH_Y);
b.allocate_next_batch_async();
float sum_value = sum(eq);
train_error += (b.CURRENT_BATCH->rows - sum_value)/ (1.0f * b.CURRENT_BATCH->rows *b.TOTAL_BATCHES) ;
hipFree(a1->data);
hipFree(a2->data);
hipFree(out->data);
hipFree(result->data);
hipFree(eq->data);
//hipFree(d0->data);
//hipFree(d1->data);
b.replace_current_batch_with_next();
}
std::cout << "Train error: " << train_error << std::endl;
cv_error = 0;
for(int i = 0; i < b.TOTAL_BATCHES_CV; i++)
{
b.broadcast_batch_cv_to_processes();
Matrix *d0 = scalarMul(b.CURRENT_BATCH_CV,0.8);
Matrix *a1 = gpu.dot(d0,w1);
logistic(a1, a1);
Matrix *d1 = scalarMul(a1,0.5);
Matrix *a2 = gpu.dot(d1,w2);
Matrix *out = softmax(a2);
Matrix *result = argmax(out);
Matrix *eq = equal(result,b.CURRENT_BATCH_CV_Y);
b.allocate_next_cv_batch_async();
float sum_value = sum(eq);
cv_error += (b.CURRENT_BATCH_CV->rows - sum_value)/ (1.0f * b.CURRENT_BATCH_CV->rows *b.TOTAL_BATCHES_CV) ;
hipFree(a1->data);
hipFree(a2->data);
hipFree(out->data);
hipFree(result->data);
hipFree(eq->data);
hipFree(d0->data);
hipFree(d1->data);
b.replace_current_cv_batch_with_next();
}
std::cout << "Cross validation error: " << cv_error << std::endl;
}
hipDeviceSynchronize();
t2=clock();
float diff ((float)t2-(float)t1);
float mseconds = (diff / CLOCKS_PER_SEC)/1000;
std::cout<<mseconds<<std::endl;
gpu.tock();
b.finish_batch_allocator();
//gpu.tock("batch replace");
//gpu.tock("async batch allocate");
//gpu.tock("feedforward");
printf("Finished!\n");
}
void run_maxout_network()
{
hipSetDevice(0);
Matrix *X = read_hdf5("/home/tim/mnist_full_X.hdf5");
Matrix *y = read_hdf5("/home/tim/mnist_full_y.hdf5");
ClusterNet gpus = ClusterNet(12345);
int hiddenunits = 512;
int maxout_Size = 8;
int batch_size = 128;
Matrix *w1 = gpus.uniformSqrtWeight(784,hiddenunits);
Matrix *w2 = gpus.uniformSqrtWeight(hiddenunits/maxout_Size,10);
Matrix *b1 = zeros(1,hiddenunits);
Matrix *b2 = zeros(1,10);
Matrix *m1 = zeros(784,hiddenunits);
Matrix *m2 = zeros(hiddenunits/maxout_Size,10);
Matrix *mb1 = zeros(1,hiddenunits);
Matrix *mb2 = zeros(1,10);
Matrix *ms1 = zeros(784,hiddenunits);
Matrix *ms2 = zeros(hiddenunits/maxout_Size,10);
Matrix *msb1 = zeros(1,hiddenunits);
Matrix *msb2 = zeros(1,10);
Matrix *grad_w1 = zeros(784,hiddenunits);
Matrix *grad_w2 = zeros(hiddenunits/maxout_Size,10);
Matrix *grad_b1 = zeros(1,hiddenunits);
Matrix *grad_b2 = zeros(1,10);
float cv_error = 0.0f;
float train_error = 0.0f;
BatchAllocator b = BatchAllocator();
b.init(X, y, 0.2, batch_size, 512);
int epochs = 1000;
float learning_rate = 0.001;
float momentum = 0.5;
for(int EPOCH = 1; EPOCH < epochs; EPOCH++)
{
cout << "EPOCH: " << EPOCH << endl;
//momentum += 0.01;
//if(momentum > 0.95) momentum = 0.95;
for(int i = 0; i < b.TOTAL_BATCHES; i++)
{
b.broadcast_batch_to_processes();
//nesterov updates
scalarMul(m1,momentum,m1);
scalarMul(m2,momentum,m2);
scalarMul(mb1,momentum,mb1);
scalarMul(mb2,momentum,mb2);
add(w1,m1,w1);
add(w2,m2,w2);
add(b1,mb1,b1);
add(b2,mb2,b2);
//feedforward
Matrix *d0 = gpus.dropout(b.CURRENT_BATCH,0.2);
Matrix *z1 = gpus.dot(d0, w1);
addMatrixVector(z1,b1,z1);
Matrix **a_paired = maxout(z1,maxout_Size);
Matrix *a1 = a_paired[0];
Matrix *a1_idx = a_paired[1];
Matrix *d1 = gpus.dropout(a1,0.5);
Matrix *a2 = gpus.dot(d1,w2);
addMatrixVector(a2,b2,a2);
Matrix *out = softmax(a2);
Matrix *t = create_t_matrix(b.CURRENT_BATCH_Y,10);
b.allocate_next_batch_async();
//backprop
Matrix *e1 = sub(out, t);
Matrix *e2_partial = gpus.dotT(e1, w2);
Matrix *e2 = empty(b.CURRENT_BATCH->rows,e2_partial->cols*maxout_Size);
Matrix *aB = ones(1,b.CURRENT_BATCH->rows);
gpus.Tdot(a1,e1,grad_w2);
gpus.dot(aB,e1,grad_b2);
expand_to_maxout_grad(e2_partial, a1_idx,e2);
gpus.Tdot(b.CURRENT_BATCH,e2,grad_w1);
gpus.dot(aB,e2,grad_b1);
//weight updates
//RMSProp
RMSprop_with_momentum_weight_update(ms1,grad_w1,w1,m1,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum);
RMSprop_with_momentum_weight_update(ms2,grad_w2,w2,m2,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum);
RMSprop_with_momentum_weight_update(msb1,grad_b1,b1,mb1,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum);
RMSprop_with_momentum_weight_update(msb2,grad_b2,b2,mb2,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum);
/*
scalarMul(grad_w1,learning_rate/(float)b.CURRENT_BATCH->rows,grad_w1);
scalarMul(grad_w2,learning_rate/(float)b.CURRENT_BATCH->rows,grad_w2);
scalarMul(grad_b1,learning_rate/(float)b.CURRENT_BATCH->rows,grad_b1);
scalarMul(grad_b2,learning_rate/(float)b.CURRENT_BATCH->rows,grad_b2);
//classical momentum
scalarMul(m1,momentum,m1);
scalarMul(m2,momentum,m2);
scalarMul(mb1,momentum,mb1);
scalarMul(mb2,momentum,mb2);
sub(m1,grad_w1,m1);
sub(m2,grad_w2,m2);
sub(mb1,grad_b1,mb1);
sub(mb2,grad_b2,mb2);
add(w1,m1,w1);
add(w2,m2,w2);
add(b1,mb1,b1);
add(b2,mb2,b2);
*/
/*
sub(w1,grad_w1,w1);
sub(w2,grad_w2,w2);
sub(b1,grad_b1,b1);
sub(b2,grad_b2,b2);
*/
hipFree(e1->data);
hipFree(e2->data);
hipFree(e2_partial->data);
hipFree(z1->data);
hipFree(a1->data);
hipFree(a1_idx->data);
hipFree(a2->data);
hipFree(out->data);
hipFree(t->data);
hipFree(d0->data);
hipFree(d1->data);
hipFree(aB->data);
free(a_paired);
b.replace_current_batch_with_next();
}
train_error = 0;
for(int i = 0; i < b.TOTAL_BATCHES; i++)
{
b.broadcast_batch_to_processes();
Matrix *d0 = scalarMul(b.CURRENT_BATCH,0.8);
Matrix *z1 = gpus.dot(d0,w1);
Matrix **a1_pair = maxout(z1,maxout_Size);
Matrix *a1 = a1_pair[0];
Matrix *d1 = scalarMul(a1,0.5);
Matrix *a2 = gpus.dot(d1,w2);
Matrix *out = softmax(a2);
Matrix *result = argmax(out);
Matrix *eq = equal(result,b.CURRENT_BATCH_Y);
b.allocate_next_batch_async();
float sum_value = sum(eq);
train_error += (b.CURRENT_BATCH->rows - sum_value)/ (1.0f * b.CURRENT_BATCH->rows *b.TOTAL_BATCHES) ;
hipFree(z1->data);
hipFree(a1->data);
hipFree(a1_pair[1]->data);
hipFree(a2->data);
hipFree(out->data);
hipFree(result->data);
hipFree(eq->data);
hipFree(d0->data);
hipFree(d1->data);
free(a1_pair);
b.replace_current_batch_with_next();
}
std::cout << "MAXOUT Train error: " << train_error << std::endl;
cv_error = 0;
for(int i = 0; i < b.TOTAL_BATCHES_CV; i++)
{
b.broadcast_batch_cv_to_processes();
Matrix *d0 = scalarMul(b.CURRENT_BATCH_CV,0.8);
Matrix *z1 = gpus.dot(d0,w1);
Matrix **a1_pair = maxout(z1,maxout_Size);
Matrix *a1 = a1_pair[0];
Matrix *d1 = scalarMul(a1,0.5);
Matrix *a2 = gpus.dot(d1,w2);
Matrix *out = softmax(a2);
Matrix *result = argmax(out);
Matrix *eq = equal(result,b.CURRENT_BATCH_CV_Y);
b.allocate_next_batch_async();
float sum_value = sum(eq);
cv_error += (b.CURRENT_BATCH_CV->rows - sum_value)/ (1.0f * b.CURRENT_BATCH_CV->rows *b.TOTAL_BATCHES_CV) ;
hipFree(z1->data);
hipFree(a1->data);
hipFree(a1_pair[1]->data);
hipFree(a2->data);
hipFree(out->data);
hipFree(result->data);
hipFree(eq->data);
hipFree(d0->data);
hipFree(d1->data);
free(a1_pair);
b.replace_current_cv_batch_with_next();
}
std::cout << "MAXOUT Cross validation error: " << cv_error << std::endl;
}
}
void run_normal_net()
{
hipSetDevice(2);
Matrix *X = read_hdf5("/home/tim/mnist_full_X.hdf5");
Matrix *y = read_hdf5("/home/tim/mnist_full_y.hdf5");
ClusterNet gpus = ClusterNet(12345);
int hiddenunits = 1024;
int maxout_Size = 1;
int batch_size = 128;
Matrix *w1 = gpus.uniformSqrtWeight(784,hiddenunits);
Matrix *w2 = gpus.uniformSqrtWeight(hiddenunits/maxout_Size,10);
Matrix *b1 = zeros(1,hiddenunits);
Matrix *b2 = zeros(1,10);
Matrix *m1 = zeros(784,hiddenunits);
Matrix *m2 = zeros(hiddenunits/maxout_Size,10);
Matrix *mb1 = zeros(1,hiddenunits);
Matrix *mb2 = zeros(1,10);
Matrix *ms1 = zeros(784,hiddenunits);
Matrix *ms2 = zeros(hiddenunits/maxout_Size,10);
Matrix *msb1 = zeros(1,hiddenunits);
Matrix *msb2 = zeros(1,10);
Matrix *grad_w1 = zeros(784,hiddenunits);
Matrix *grad_w2 = zeros(hiddenunits/maxout_Size,10);
Matrix *grad_b1 = zeros(1,hiddenunits);
Matrix *grad_b2 = zeros(1,10);
float cv_error = 0.0f;
float train_error = 0.0f;
BatchAllocator b = BatchAllocator();
b.init(X, y, 0.4, batch_size, 512);
int epochs = 500;
float learning_rate = 0.000001;
float momentum = 0.5;
for(int EPOCH = 1; EPOCH < epochs; EPOCH++)
{
cout << "EPOCH: " << EPOCH << endl;
momentum += 0.01;
if(momentum > 0.95) momentum = 0.95;
for(int i = 0; i < b.TOTAL_BATCHES; i++)
{
b.broadcast_batch_to_processes();
//nesterov updates
scalarMul(m1,momentum,m1);
scalarMul(m2,momentum,m2);
scalarMul(mb1,momentum,mb1);
scalarMul(mb2,momentum,mb2);
add(w1,m1,w1);
add(w2,m2,w2);
add(b1,mb1,b1);
add(b2,mb2,b2);
//feedforward
Matrix *d0 = gpus.dropout(b.CURRENT_BATCH,0.2);
Matrix *z1 = gpus.dot(d0, w1);
addMatrixVector(z1,b1,z1);
Matrix *a1 = logistic(z1);
//Matrix *a1 = rectified_linear(z1);
Matrix *d1 = gpus.dropout(a1,0.5);
Matrix *a2 = gpus.dot(d1,w2);
addMatrixVector(a2,b2,a2);
Matrix *out = softmax(a2);
Matrix *t = create_t_matrix(b.CURRENT_BATCH_Y,10);
b.allocate_next_batch_async();
//backprop
Matrix *e1 = sub(out, t);
Matrix *e2 = gpus.dotT(e1, w2);
Matrix *aB = ones(1,b.CURRENT_BATCH->rows);
gpus.Tdot(a1,e1,grad_w2);
gpus.dot(aB,e1,grad_b2);
//rectified_linear_derivative(a1,a1);
logisticGrad(a1,a1);
mul(e2,a1,e2);
gpus.Tdot(b.CURRENT_BATCH,e2,grad_w1);
gpus.dot(aB,e2,grad_b1);
/*
//about equal to momentum update + nesterov update -> momentum applyied to gradient+momentum better?
RMSprop_with_momentum_weight_update(ms1,grad_w1,w1,m1,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum);
RMSprop_with_momentum_weight_update(ms2,grad_w2,w2,m2,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum);
RMSprop_with_momentum_weight_update(msb1,grad_b1,b1,mb1,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum);
RMSprop_with_momentum_weight_update(msb2,grad_b2,b2,mb2,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum);
*/
/*
//slow and generally worse error, but sometimes better results in the end
RMSprop_with_momentum_update(ms1,grad_w1,w1,m1,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum);
RMSprop_with_momentum_update(ms2,grad_w2,w2,m2,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum);
RMSprop_with_momentum_update(msb1,grad_b1,b1,mb1,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum);
RMSprop_with_momentum_update(msb2,grad_b2,b2,mb2,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum);
*/
RMSprop_with_nesterov_weight_update(ms1,grad_w1,w1,m1,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum);
RMSprop_with_nesterov_weight_update(ms2,grad_w2,w2,m2,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum);
RMSprop_with_nesterov_weight_update(msb1,grad_b1,b1,mb1,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum);
RMSprop_with_nesterov_weight_update(msb2,grad_b2,b2,mb2,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum);
/*
//slower but equally good to nesterov momentum
RMSprop_with_weight_update(ms1,grad_w1,w1,m1,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum);
RMSprop_with_weight_update(ms2,grad_w2,w2,m2,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum);
RMSprop_with_weight_update(msb1,grad_b1,b1,mb1,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum);
RMSprop_with_weight_update(msb2,grad_b2,b2,mb2,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum);
*/
/*
scalarMul(grad_w1,learning_rate/(float)b.CURRENT_BATCH->rows,grad_w1);
scalarMul(grad_w2,learning_rate/(float)b.CURRENT_BATCH->rows,grad_w2);
scalarMul(grad_b1,learning_rate/(float)b.CURRENT_BATCH->rows,grad_b1);
scalarMul(grad_b2,learning_rate/(float)b.CURRENT_BATCH->rows,grad_b2);
//classical momentum
scalarMul(m1,momentum,m1);
scalarMul(m2,momentum,m2);
scalarMul(mb1,momentum,mb1);
scalarMul(mb2,momentum,mb2);
sub(m1,grad_w1,m1);
sub(m2,grad_w2,m2);
sub(mb1,grad_b1,mb1);
sub(mb2,grad_b2,mb2);
add(w1,m1,w1);
add(w2,m2,w2);
add(b1,mb1,b1);
add(b2,mb2,b2);
*/
/*
sub(w1,grad_w1,w1);
sub(w2,grad_w2,w2);
sub(b1,grad_b1,b1);
sub(b2,grad_b2,b2);
*/
hipFree(e1->data);
hipFree(e2->data);
hipFree(z1->data);
hipFree(a1->data);
hipFree(a2->data);
hipFree(out->data);
hipFree(t->data);
hipFree(d0->data);
hipFree(d1->data);
hipFree(aB->data);
b.replace_current_batch_with_next();
}
train_error = 0;
for(int i = 0; i < b.TOTAL_BATCHES; i++)
{
b.broadcast_batch_to_processes();
Matrix *d0 = scalarMul(b.CURRENT_BATCH,0.8);
Matrix *z1 = gpus.dot(d0,w1);
Matrix *a1 = logistic(z1);
//Matrix *a1 = rectified_linear(z1);
Matrix *d1 = scalarMul(a1,0.5);
Matrix *a2 = gpus.dot(d1,w2);
Matrix *out = softmax(a2);
Matrix *result = argmax(out);
Matrix *eq = equal(result,b.CURRENT_BATCH_Y);
b.allocate_next_batch_async();
float sum_value = sum(eq);
train_error += (b.CURRENT_BATCH->rows - sum_value)/ (1.0f * b.CURRENT_BATCH->rows *b.TOTAL_BATCHES) ;
hipFree(z1->data);
hipFree(a1->data);
hipFree(a2->data);
hipFree(out->data);
hipFree(result->data);
hipFree(eq->data);
hipFree(d0->data);
hipFree(d1->data);
b.replace_current_batch_with_next();
}
std::cout << "MAXOUT Train error: " << train_error << std::endl;
cv_error = 0;
for(int i = 0; i < b.TOTAL_BATCHES_CV; i++)
{
b.broadcast_batch_cv_to_processes();
Matrix *d0 = scalarMul(b.CURRENT_BATCH_CV,0.8);
Matrix *z1 = gpus.dot(d0,w1);
Matrix *a1 = logistic(z1);
//Matrix *a1 = rectified_linear(z1);
Matrix *d1 = scalarMul(a1,0.5);
Matrix *a2 = gpus.dot(d1,w2);
Matrix *out = softmax(a2);
Matrix *result = argmax(out);
Matrix *eq = equal(result,b.CURRENT_BATCH_CV_Y);
b.allocate_next_batch_async();
float sum_value = sum(eq);
cv_error += (b.CURRENT_BATCH_CV->rows - sum_value)/ (1.0f * b.CURRENT_BATCH_CV->rows *b.TOTAL_BATCHES_CV) ;
hipFree(z1->data);
hipFree(a1->data);
hipFree(a2->data);
hipFree(out->data);
hipFree(result->data);
hipFree(eq->data);
hipFree(d0->data);
hipFree(d1->data);
b.replace_current_cv_batch_with_next();
}
std::cout << "MAXOUT Cross validation error: " << cv_error << std::endl;
}
}
void MPI_benchmark_P2P(int argc, char *argv[])
{
char name[100];
int myrank, length, size;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
MPI_Get_processor_name(name, &length);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Status status;
int local_rank = myrank % 4;
int gpus;
hipGetDeviceCount(&gpus);
int mygpu_id;
int your_gpu_id;
if(myrank == 0)
{
mygpu_id = 0;
if(gpus > 1)
your_gpu_id = 1;
else
your_gpu_id = 0;
MPI_Send(&your_gpu_id,1, MPI_INT,1,0,MPI_COMM_WORLD);
}
else
{
MPI_Recv(&mygpu_id,1,MPI_INT,myrank-1,0,MPI_COMM_WORLD,&status);
if(gpus > mygpu_id+1)
your_gpu_id = mygpu_id + 1;
else
your_gpu_id = 0;
if(myrank < size-1)
MPI_Send(&your_gpu_id,1, MPI_INT,myrank+1,0,MPI_COMM_WORLD);
}
hipSetDevice(mygpu_id);
int batch_size = 128;
int inner_dim = 10000;
int outer_dim = 15000;
ClusterNet gpu = ClusterNet();
Matrix *A = gpu.rand(batch_size,inner_dim);
Matrix *B = gpu.rand(inner_dim,outer_dim);
Matrix *out = empty(batch_size,outer_dim);
Matrix *rec = empty(batch_size,outer_dim);
Matrix *A1 = gpu.rand(batch_size/2,inner_dim);
Matrix *B1 = gpu.rand(inner_dim,outer_dim);
Matrix *rec1 = empty(batch_size/2,outer_dim);
Matrix *out1 = empty(batch_size/2,outer_dim);
Matrix *A2 = gpu.rand(batch_size,inner_dim);
Matrix *B2 = gpu.rand(inner_dim,outer_dim/2);
Matrix *rec2 = empty(batch_size,outer_dim/2);
Matrix *out2 = empty(batch_size,outer_dim/2);
gpu.tick("Direct compute");
for(int i = 0; i< 100; i++)
{
gpu.dot(A,B, out);
//add(A, B, out);
}
gpu.tock("Direct compute");
gpu.tick("partial batch direct compute");
for(int i = 0; i< 100; i++)
{
gpu.dot(A1,B1, out1);
//add(A, B, out);
}
gpu.tock("partial batch direct compute");
gpu.tick("partial units direct compute");
for(int i = 0; i< 100; i++)
{
gpu.dot(A2,B2, out2);
//add(A, B, out);
}
gpu.tock("partial units direct compute");
gpu.tick("PCIe transfer");
for(int i = 0; i< 100; i++)
{
if(local_rank == 0 && gpus > 1)
{
MPI_Send(out->data, out->size, MPI_FLOAT, 1, 100, MPI_COMM_WORLD);
}
else if(local_rank == 1 && gpus > 1)
{
//add(A2,B, out);
MPI_Recv(rec->data, rec->size, MPI_FLOAT, 0, 100, MPI_COMM_WORLD, &status);
}
}
gpu.tock("PCIe transfer");
gpu.tick("PCIe dot");
for(int i = 0; i< 100; i++)
{
if(local_rank == 0 && gpus > 1)
{
gpu.dot(A2,B2,out2);
MPI_Send(out1->data, out1->size, MPI_FLOAT, 1, 100, MPI_COMM_WORLD);
}
else if(local_rank == 1 && gpus > 1)
{
gpu.dot(A2,B2,out2);
MPI_Recv(rec1->data, rec1->size, MPI_FLOAT, 0, 100, MPI_COMM_WORLD, &status);
vStack(out2,rec2,rec);
}
}
gpu.tock("PCIe dot");
gpu.tick("RDMA transfer");
for(int i = 0; i< 100; i++)
{
if(myrank == 0)
{
MPI_Send(out->data, out->size, MPI_FLOAT, 3, 100, MPI_COMM_WORLD);
}
else if(myrank == 3)
{
//add(A2,B, out);
MPI_Recv(rec->data, rec->size, MPI_FLOAT, 0, 100, MPI_COMM_WORLD, &status);
}
}
gpu.tock("RDMA transfer");
gpu.tick("RDMA dot");
for(int i = 0; i< 100; i++)
{
if(myrank == 0)
{
gpu.dot(A2,B2,out2);
MPI_Send(out->data, out->size, MPI_FLOAT, 3, 100, MPI_COMM_WORLD);
}
else if(myrank == 3)
{
//add(A2,B, out);
gpu.dot(A2,B2,out2);
MPI_Recv(rec->data, rec->size, MPI_FLOAT, 0, 100, MPI_COMM_WORLD, &status);
vStack(out2,rec2,rec);
}
}
gpu.tock("RDMA dot");
MPI_Finalize();
}
void MPI_benchmark(int argc, char *argv[])
{
int myrank;
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
ClusterNet gpu = ClusterNet();
int batch_rows = 128;
int w_in = 10000;
int w_out = 8000;
//dot
Matrix *B = gpu.rand(w_in,w_out);
Matrix *A = gpu.rand(batch_rows,w_in);
assert(test_matrix(A,batch_rows,w_in));
assert(test_matrix(B,w_in,w_out));
Matrix *out = empty(batch_rows, w_out);
Matrix *B1 = gpu.rand(w_in,w_out/2);
Matrix *B2 = gpu.rand(w_in,w_out/2);
Matrix *D = empty(batch_rows,w_out/2);
Matrix *A1 = gpu.rand(batch_rows/2,w_in);
Matrix *big_out = gpu.rand(batch_rows/2,w_out);
Matrix *grand_out = empty(batch_rows, w_out);
Matrix *C = gpu.rand(batch_rows/2,w_in);
Matrix *C_out = empty(batch_rows/2,w_out);
Matrix *E = gpu.rand(batch_rows/4,w_in);
Matrix *E_out = empty(batch_rows/4,w_out);
Matrix *E_merge = empty(batch_rows/2,w_out);
Matrix *E_merge2 = empty(batch_rows/2,w_out);
//add
/*
B = gpu.rand(w_in,w_out);
A = gpu.rand(w_in,w_out);
out = empty(w_in, w_out);
A1 = gpu.rand(w_in/2,w_out);
Matrix *A2 = gpu.rand(w_in/2,w_out);
D = empty(w_in/2,w_out);
*/
hipEvent_t* startstop = tick();
for(int i = 0; i< 100; i++)
{
gpu.dot(A,B, out);
//add(A, B, out);
}
printf("Direct compute:\n");
tock(startstop);
out = empty(batch_rows,w_out/2);
Matrix *out2 = empty(batch_rows,w_out/2);
startstop = tick();
for(int i = 0; i< 100; i++)
{
gpu.dot(A,B1, out);
gpu.dot(A,B2, out2);
vStack(out,out2,grand_out);
}
printf("Direct compute x2:\n");
tock(startstop);
Matrix *mergemat = empty(batch_rows, w_out);
out = empty(batch_rows,w_out/2);
startstop = tick();
//out = empty(w_in/2,w_out);
for(int i = 0; i < 100; i++)
{
if(myrank == 0)
{
gpu.dot(A,B1, out);
//add(A1, B,out);
MPI_Send(out->data, out->size, MPI_FLOAT, 1, 100, MPI_COMM_WORLD);
}
else
{
gpu.dot(A,B2, out);
//add(A2,B, out);
MPI_Recv(D->data, D->size, MPI_FLOAT, 0, 100, MPI_COMM_WORLD, &status);
vStack(out,D, mergemat);
}
}
if(myrank == 1)
{
printf("GPUDirect RDMA:\n");
tock(startstop);
}
out = empty(batch_rows/2,w_out);
startstop = tick();
gpu.tick("aa");
//out = empty(w_in/2,w_out);
for(int i = 0; i < 100; i++)
{
gpu.tick("dot");
gpu.dot(C,B, out);
gpu.tick("dot");
if(myrank == 0)
{
//add(A1, B,out);
gpu.tick("send");
MPI_Send(out->data, out->size, MPI_FLOAT, 1, 100, MPI_COMM_WORLD);
gpu.tick("send");
}
else
{
//add(A2,B, out);
gpu.tick("receive");
MPI_Recv(C_out->data, C_out->size, MPI_FLOAT, 0, 100, MPI_COMM_WORLD, &status);
vStack(out,C_out, grand_out);
gpu.tick("receive");
}
if(myrank == 1)
{
//add(A1, B,out);
gpu.tick("send");
MPI_Send(out->data, out->size, MPI_FLOAT, 0, 100, MPI_COMM_WORLD);
gpu.tick("send");
}
else
{
//add(A2,B, out);
gpu.tick("receive");
MPI_Recv(C_out->data, C_out->size, MPI_FLOAT, 1, 100, MPI_COMM_WORLD, &status);
vStack(out,C_out, grand_out);
gpu.tick("receive");
}
}
gpu.tock("dot");
if(myrank == 1)
{
printf("GPUDirect RDMA batch:\n");
tock(startstop);
gpu.tock("receive");
gpu.tock("aa");
}
else
{
gpu.tock("send");
}
MPI_Finalize();
}
void dotMPI_test(int argc, char *argv[])
{
/*
ClusterNet gpu = ClusterNet(argc, argv, 123465);
int inner = 2000;
int outer = 1200;
int batch_size = 128;
int reduced_left = 128;
int reduced_right = 400;
Matrix *A = gpu.rand(batch_size,inner);
Matrix *B = gpu.rand(inner,outer);
Matrix *A1 = gpu.rand(reduced_left,inner);
Matrix *B1 = gpu.rand(inner,reduced_right);
Matrix *out = empty(batch_size,outer);
Matrix *out1 = empty(reduced_left,reduced_right);
Matrix *recv1 = empty(reduced_left,reduced_right);
Matrix *recv2 = empty(reduced_left,reduced_right);
Matrix *recv3 = empty(reduced_left,reduced_right);
MPI_Status status;
gpu.tick("dot mpi batch");
for(int i = 0; i < 100; i++)
{
gpu.dotMPI_batchSlice(A,B);
}
gpu.tock("dot mpi batch");
gpu.tick("dot mpi unit");
for(int i = 0; i < 100; i++)
{
gpu.dotMPI_unitSlice(A,B);
}
gpu.tock("dot mpi unit");
printf("My rank: %i\n",gpu.MYRANK);
//gpu.benchmark_dot();
gpu.tick("dot normal");
for(int i = 0; i < 100; i++)
{
gpu.dot(A,B,out);
}
gpu.tock("dot normal");
//std::vector<MPI_Request> requests;
MPI_Request *requests = (MPI_Request*)malloc(sizeof(MPI_Request)*gpu.MPI_SIZE-1);
MPI_Request request_send;
std::vector<Matrix*> recv_buffer;
for(int i = 0; i < gpu.MPI_SIZE-1; i++)
{
MPI_Request request;
requests[i] = request;
}
int received_count = 0;
for(int i = 0; i < 100; i++)
{
for(int i = 0; i < recv_buffer.size(); i++)
hipFree(recv_buffer[i]->data);
recv_buffer.clear();
out1 = empty(reduced_left,reduced_right);
for(int i = 0; i < gpu.MPI_SIZE; i++)
{
recv_buffer.push_back(empty(reduced_left,reduced_right));
}
gpu.tick("all to all custom");
//cout << "a1 rows" << A1->rows << endl;
gpu.dot(A1,B1,out1);
recv_buffer[gpu.MYRANK]= out1;
for(int i = 0; i < gpu.MPI_SIZE; i++)
{
if(gpu.MYRANK == i) { continue; }
MPI_Isend(out1->data, out1->size, MPI_FLOAT, i, 100, MPI_COMM_WORLD, &request_send);
}
for(int i = 0; i < gpu.MPI_SIZE; i++)
{
if(gpu.MYRANK == i) { continue; }
MPI_Irecv(recv1->data, recv1->size, MPI_FLOAT, i, 100, MPI_COMM_WORLD, &requests[i]);
}
for(int i = 0; i < gpu.MPI_SIZE; i++)
{
if(gpu.MYRANK == i) { continue; }
MPI_Wait(&requests[i],MPI_STATUS_IGNORE);
}
received_count = 0;
while(received_count < gpu.MPI_SIZE-1)
{
for(int i = 0; i < gpu.MPI_SIZE; i++)
{
int received = 0;
if(gpu.MYRANK == i) { continue; }
MPI_Test(&requests[i],&received,&status);
if(received == 1)
{
out1 = hStack(out1,recv1);
received_count++;
}
}
}
gpu.tick("all to all custom");
}
gpu.tock("all to all custom");
int destination = gpu.MYRANK + 1;
int source = gpu.MYRANK - 1;
if(destination == gpu.MPI_SIZE){destination = 0; }
if(source < 0){ source = gpu.MPI_SIZE - 1;}
for(int i = 0; i < 100; i++)
{
out1 = empty(reduced_left,reduced_right);
recv1 = empty(reduced_left,reduced_right);
gpu.tick("chain custom");
gpu.dot(A1,B1,out1);
for(int i = 0; i < gpu.MPI_SIZE-1; i++)
{
if(i == 0)
MPI_Isend(out1->data, out1->size, MPI_FLOAT, destination, 100, MPI_COMM_WORLD, &request_send);
else
MPI_Isend(recv1->data, recv1->size, MPI_FLOAT, destination, 100, MPI_COMM_WORLD, &request_send);
MPI_Recv(recv1->data, recv1->size, MPI_FLOAT, source, 100, MPI_COMM_WORLD, &status);
//MPI_Wait(&requests[i],&status);
out1 = hStack(out1,recv1);
}
gpu.tick("chain custom");
}
gpu.tock("chain custom");
cout << gpu.MYRANK << endl;
int matrix_idx = gpu.MYRANK;
Matrix** arrOut = (Matrix**)malloc(sizeof(Matrix*)*gpu.MPI_SIZE);
for(int i = 0; i < gpu.MPI_SIZE; i++)
arrOut[i] = empty(reduced_left,reduced_right);
float **h_arrA = (float**)malloc(sizeof(float*)*gpu.MPI_SIZE);
for(int i = 0; i < gpu.MPI_SIZE; i++)
h_arrA[i] = arrOut[i]->data;
float **d_arrA;
hipMalloc((void**) &d_arrA,sizeof(float*)*gpu.MPI_SIZE);
hipMemcpy(d_arrA,h_arrA,sizeof(float*)*gpu.MPI_SIZE,hipMemcpyDefault);
gpu.tick("chain matrix array");
for(int i = 0; i < 100; i++)
{
gpu.dot(A1,B1,arrOut[gpu.MYRANK]);
matrix_idx = gpu.MYRANK;
for(int i = 0; i < gpu.MPI_SIZE-1; i++)
{
MPI_Isend(arrOut[matrix_idx]->data, arrOut[matrix_idx]->size, MPI_FLOAT, destination, 100, MPI_COMM_WORLD, &request_send);
matrix_idx = (matrix_idx - 1) < 0 ? gpu.MPI_SIZE-1 : (matrix_idx - 1);
MPI_Irecv(arrOut[matrix_idx]->data, arrOut[matrix_idx]->size, MPI_FLOAT, source, 100, MPI_COMM_WORLD,&requests[i]);
}
MPI_Waitall(gpu.MPI_SIZE-1,requests,MPI_STATUSES_IGNORE);
//hStackN(d_arrA,arrOut[0]->size, out,gpu.MPI_SIZE);
}
gpu.tock("chain matrix array");
gpu.shutdown();
*/
}
void async_test(int argc, char *argv[])
{
ClusterNet gpu = ClusterNet(argc,argv,1324);
int rows = 512;
int cols = 128;
/*
MPI_Request r = MPI_REQUEST_NULL;
MPI_Request s = MPI_REQUEST_NULL;
Matrix *a = gpu.rand(rows,cols);
Matrix *b = zeros(rows,cols);
if(gpu.MYRANK == 0)
{
MPI_Irecv(b->data,b->size,MPI_FLOAT,1,0,MPI_COMM_WORLD,&r);
MPI_Isend(a->data,a->size,MPI_FLOAT,1,0,MPI_COMM_WORLD,&s);
}
else
{
MPI_Irecv(b->data,b->size,MPI_FLOAT,0,0,MPI_COMM_WORLD,&r);
MPI_Isend(a->data,a->size,MPI_FLOAT,0,0,MPI_COMM_WORLD,&s);
}
MPI_Wait(&s,MPI_STATUS_IGNORE);
MPI_Wait(&r,MPI_STATUS_IGNORE);
gpu.tick("MPI");
for(int i = 0; i < 100; i++)
{
if(gpu.MYRANK == 0)
{
MPI_Irecv(b->data,b->size,MPI_FLOAT,1,0,MPI_COMM_WORLD,&r);
MPI_Isend(a->data,a->size,MPI_FLOAT,1,0,MPI_COMM_WORLD,&s);
}
else
{
MPI_Irecv(b->data,b->size,MPI_FLOAT,0,0,MPI_COMM_WORLD,&r);
MPI_Isend(a->data,a->size,MPI_FLOAT,0,0,MPI_COMM_WORLD,&s);
}
MPI_Wait(&s,MPI_STATUS_IGNORE);
MPI_Wait(&r,MPI_STATUS_IGNORE);
}
gpu.tock("MPI");
*/
if(gpu.MYRANK == 0)
{
hipSetDevice(0);
//hipDeviceEnablePeerAccess(1,0);
hipDeviceDisablePeerAccess(1);
Matrix *A1 = gpu.rand(rows,cols);
Matrix *A2 = gpu.rand(rows,cols);
hipSetDevice(1);
//hipDeviceEnablePeerAccess(0,0);
hipDeviceDisablePeerAccess(0);
Matrix *B1 = gpu.rand(rows,cols);
Matrix *B2 = gpu.rand(rows,cols);
hipSetDevice(0);
hipStream_t s;
hipStreamCreate(&s);
hipSetDevice(1);
hipStream_t s2;
hipStreamCreate(&s2);
hipSetDevice(0);
int access = 0;
hipDeviceCanAccessPeer(&access,0,1);
cout << access << endl;
hipDeviceCanAccessPeer(&access,1,0);
cout << access << endl;
hipSetDevice(0);
gpu.tick("cuda");
for(int i = 0; i < 100; i++)
{
hipMemcpyPeerAsync(B2->data,1,A2->data,0,A2->bytes,s);
hipSetDevice(1);
hipMemcpyPeerAsync(A1->data,0,B1->data,1,B1->bytes,s2);
hipSetDevice(0);
hipStreamSynchronize(s);
hipSetDevice(1);
hipStreamSynchronize(s2);
hipSetDevice(0);
}
gpu.tock("cuda");
}
MPI_Barrier(MPI_COMM_WORLD);
gpu.shutdown_MPI();
}
struct arg_struct
{
ClusterNet *gpus;
WikiMaxoutNet *net;
int device;
};
void *run_net(void * args)
{
struct arg_struct *_args = (struct arg_struct*)args;
cout << "device: " << _args->device << endl;
hipSetDevice(_args->device);
_args->net->run();
return 0;
}
void *print_message(void*)
{
ClusterNet gpu = ClusterNet(124345);
WikiMaxoutNet net = WikiMaxoutNet(gpu);
net.run();
return 0;
}
void bandwidth_test_MPI(int argc, char *argv[])
{
ClusterNet *gpu = new ClusterNet(argc,argv,1235,true);
std::vector<MPI_Request*> sends;
std::vector<MPI_Request*> recvs;
std::vector<Matrix*> lSync;
std::vector<Matrix*> lData;
int packages = 1;
float time = 0;
for(int epoch = 1; epoch < 20; epoch++)
{
if(lData.size() > 0)
{
for(int i = 0; i < packages; i++)
{
hipFree(lSync[i]->data);
hipFree(lData[i]->data);
}
lSync.clear();
lData.clear();
}
for(int i = 0; i < packages; i++)
{
lSync.push_back(zeros(128*epoch,128*epoch));
lData.push_back(gpu->rand(128*epoch,128*epoch));
}
for(int j = 0; j < packages; j++)
{
MPI_Request *send_request = new MPI_Request;
MPI_Request *recv_request = new MPI_Request;
sends.push_back(send_request);
recvs.push_back(recv_request);
int target = gpu->MYRANK +1 == gpu->MPI_SIZE ? 0 : gpu->MYRANK+1;
int source = gpu->MYRANK-1 == -1 ? gpu->MPI_SIZE-1 : gpu->MYRANK-1;
gpu->tick();
for (int i = 0; i < gpu->MPI_SIZE -1; i++)
{
//MPI_Irecv(lSync[j]->data,lSync[j]->size,MPI_FLOAT,source,999,MPI_COMM_WORLD,recv_request);
//MPI_Isend(lData[j]->data,lData[j]->size,MPI_FLOAT,target,999,MPI_COMM_WORLD,send_request);
//MPI_Isend(lData[j]->data,lData[j]->size,MPI_FLOAT,target,j,MPI_COMM_WORLD,send_request);
if(i == gpu->MYRANK)
{
MPI_Send(lData[j]->data,lData[j]->size,MPI_FLOAT,target,j,MPI_COMM_WORLD);
MPI_Recv(lSync[j]->data,lSync[j]->size,MPI_FLOAT,source,j,MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
else
{
MPI_Recv(lSync[j]->data,lSync[j]->size,MPI_FLOAT,source,j,MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Send(lData[j]->data,lData[j]->size,MPI_FLOAT,target,j,MPI_COMM_WORLD);
}
}
}
/*
gpu->tick();
for(int i = 0; i < packages; i++)
{
MPI_Wait(sends[i],MPI_STATUS_IGNORE);
MPI_Wait(recvs[i],MPI_STATUS_IGNORE);
}
*/
time = gpu->tock();
for(int i = 0; i < packages; i++)
assert(sum(lData[i]) == sum(lSync[i]));
printdim(lData[0]);
cout << 1000*2*packages*lData[0]->bytes/1024./1024./1024./time << " GB/s" << endl;
}
gpu->shutdown_MPI();
}
void bandwidth_test_peer()
{
ClusterNet *gpu = new ClusterNet(1235);
std::vector<Matrix*> lSync0;
std::vector<Matrix*> lData0;
std::vector<Matrix*> lSync1;
std::vector<Matrix*> lData1;
std::vector<hipStream_t> s0s;
std::vector<hipStream_t> s1s;
int packages = 1;
float time = 0;
hipSetDevice(0);
hipDeviceEnablePeerAccess(1,0);
hipSetDevice(1);
hipDeviceEnablePeerAccess(0,0);
for(int i = 0; i < packages; i++)
{
hipStream_t s0;
hipStream_t s1;
hipSetDevice(0);
hipStreamCreate(&s0);
hipSetDevice(1);
hipStreamCreate(&s1);
s0s.push_back(s0);
s1s.push_back(s1);
}
hipSetDevice(0);
int access = 0;
hipDeviceCanAccessPeer(&access,0,1);
cout << access << endl;
hipDeviceCanAccessPeer(&access,1,0);
cout << access << endl;
for(int epoch = 199; epoch < 200; epoch++)
{
if(lSync0.size() > 0)
{
for(int i = 0; i < packages; i++)
{
hipFree(lSync0[i]->data);
hipFree(lData0[i]->data);
hipFree(lSync1[i]->data);
hipFree(lData1[i]->data);
}
lSync0.clear();
lData0.clear();
lSync1.clear();
lData1.clear();
}
for(int i = 0; i < packages; i++)
{
hipSetDevice(0);
lSync0.push_back(zeros(128*epoch,128*epoch));
lData0.push_back(gpu->rand(128*epoch,128*epoch));
hipSetDevice(1);
lSync1.push_back(zeros(128*epoch,128*epoch));
lData1.push_back(gpu->rand(128*epoch,128*epoch));
}
hipSetDevice(0);
gpu->tick();
for(int j = 0; j < packages; j++)
{
hipMemcpyAsync(lSync1[j]->data,lData0[j]->data,lData0[j]->bytes,hipMemcpyDefault, s0s[j]);
hipSetDevice(1);
hipMemcpyAsync(lSync0[j]->data,lData1[j]->data,lData1[j]->bytes,hipMemcpyDefault,s1s[j]);
hipSetDevice(0);
}
for(int i = 0; i < packages; i++)
{
hipStreamSynchronize(s0s[i]);
hipStreamSynchronize(s1s[i]);
}
time = gpu->tock();
for(int i = 0; i < packages; i++)
//cout << sum(lData0[i]) << " vs. " << sum(lSync1[i]) << endl;
assert(sum(lData0[i]) == sum(lSync1[i]));
for(int i = 0; i < packages; i++)
assert(sum(lData1[i]) == sum(lSync0[i]));
cout << 1000*2*packages*lData0[0]->bytes/1024./1024./1024./time << " GB/s" << endl;
}
}
void bandwidth_test_kernel()
{
ClusterNet *gpu = new ClusterNet(1235);
std::vector<Matrix*> lSync0;
std::vector<Matrix*> lData0;
std::vector<Matrix*> lSync1;
std::vector<Matrix*> lData1;
std::vector<hipStream_t> s0s;
std::vector<hipStream_t> s1s;
int packages = 10;
float time = 0;
hipSetDevice(0);
hipDeviceEnablePeerAccess(1,0);
hipSetDevice(1);
hipDeviceEnablePeerAccess(0,0);
for(int i = 0; i < packages; i++)
{
hipStream_t s0;
hipStream_t s1;
hipSetDevice(0);
hipStreamCreate(&s0);
hipSetDevice(1);
hipStreamCreate(&s1);
s0s.push_back(s0);
s1s.push_back(s1);
}
hipSetDevice(0);
int access = 0;
hipDeviceCanAccessPeer(&access,0,1);
cout << access << endl;
hipDeviceCanAccessPeer(&access,1,0);
cout << access << endl;
for(int epoch = 1; epoch < 200; epoch++)
{
if(lSync0.size() > 0)
{
for(int i = 0; i < packages; i++)
{
hipFree(lSync0[i]->data);
hipFree(lData0[i]->data);
hipFree(lSync1[i]->data);
hipFree(lData1[i]->data);
}
lSync0.clear();
lData0.clear();
lSync1.clear();
lData1.clear();
}
for(int i = 0; i < packages; i++)
{
hipSetDevice(0);
lSync0.push_back(zeros(128*epoch,128*epoch));
lData0.push_back(gpu->rand(128*epoch,128*epoch));
hipSetDevice(1);
lSync1.push_back(zeros(128*epoch,128*epoch));
lData1.push_back(gpu->rand(128*epoch,128*epoch));
}
hipSetDevice(0);
gpu->tick();
for(int j = 0; j < packages; j++)
{
add(lSync0[j],lData1[j],lSync0[j]);
hipSetDevice(1);
add(lSync1[j],lData0[j],lSync1[j]);
hipSetDevice(0);
}
hipDeviceSynchronize();
hipSetDevice(1);
hipDeviceSynchronize();
hipSetDevice(0);
time = gpu->tock();
/*
for(int i = 0; i < packages; i++)
assert(sum(lData0[i]) == sum(lSync1[i]));
for(int i = 0; i < packages; i++)
assert(sum(lData1[i]) == sum(lSync0[i]));
*/
printdim(lSync0[0]);
cout << 1000*2*packages*lData0[0]->bytes/1024./1024./1024./time << " GB/s" << endl;
}
}
int main(int argc, char *argv[])
{
//bandwidth_test_peer();
//bandwidth_test_MPI(argc,argv);
//bandwidth_test_kernel();
//ClusterNet *gpu = new ClusterNet(234);
/*
Matrix *rdm = gpu->rand_numbers(10,10);
printmat(rdm);
*/
/*
ClusterNet *gpu = new ClusterNet(234);
int out_rows = 128;
int out_cols = 800;
int inner = 784;
Matrix *A = gpu->rand(out_rows,inner);
Matrix *B = gpu->rand(inner,out_cols);
Matrix *out1 = zeros(out_rows,out_cols);
Matrix *charA = empty_char(out_rows,inner);
Matrix *charB = empty_char(inner,out_cols);
Matrix *out2 = empty(out_rows,out_cols);
Matrix *out3 = empty(out_rows,out_cols);
gpu->tick();
for(int i = 0; i < 100; i++)
gpu->dot(A,B,out3);
gpu->tock();
float maxA = max(abs(A));
float maxB = max(abs(B));
gpu->compression_8bit(A,maxA,charA);
gpu->compression_8bit(B,maxB,charB);
//printmat(A);
//printmat(gpu->decompression_8bit(charA,maxA));
//printmat(B);
//printmat(gpu->decompression_8bit(charB,maxB));
//cout << sum(gpuSqrt(square(sub(B,gpu->decompression_8bit(charB,maxB)))))/(float)B->size << endl;
//cout << sum(gpuSqrt(square(sub(A,gpu->decompression_8bit(charA,maxA)))))/(float)B->size << endl;
//gpu->compression_8bit(A,maxA,charA);
//printmat(out1);
//printmat(out1,60,65,70,80);
gpu->tick();
for(int i = 0; i < 100; i++)
{
fill_matrix(out1,0.0f);
gpu->dot8bit(charA,charB,maxA,maxB,out1);
}
gpu->tock();
gpu->tick();
for(int i = 0; i < 100; i++)
gpu->dot8bit_shared(charA,charB,maxA,maxB,out2);
gpu->tock();
//printmat(gpu->decompression_8bit(charB,maxB));
//printmat(out1,60,65,70,80);
//printmat(out2,60,65,70,80);
//printmat(out1);
//printmat(out2);
//printsum(out1);
//printsum(out2);
cout << sum(gpuSqrt(square(sub(out1,out2))))/(float)out1->size << endl;
cout << sum(gpuSqrt(square(sub(out1,out3))))/(float)out1->size << endl;
cout << sum(gpuSqrt(square(sub(out2,out3))))/(float)out1->size << endl;
//cout << "max A " << maxA <<endl;
//cout << "max B " << maxB <<endl;
*/
ClusterNet *gpu = new ClusterNet(argc,argv,123635,true);
//Matrix *X = read_hdf5("/home/tim/data/mnist/X.hdf5");
//Matrix *y = read_hdf5("/home/tim/data/mnist/y.hdf5");
Matrix *X = gpu->distribute_rows_hdf5_file("/home/tim/data/mnist/X.hdf5");
Matrix *y = gpu->distribute_rows_hdf5_file("/home/tim/data/mnist/y.hdf5");
BatchAllocator b = BatchAllocator();
b.init(X,y,(1.0-0.85715),128,128,*gpu, Single_GPU);
Layer *l0 = new Layer(X->cols,128,Input,gpu);
//l0->PARALLELISM = DataParallelism;
Layer *l1 = new Layer(2048, Logistic, l0);
//l1->PARALLELISM = DataParallelism;
Layer *l2 = new Layer(2048, Logistic, l1);
//l2->PARALLELISM = DataParallelism;
Layer *l3 = new Layer(10, Softmax, l2);
//l3->PARALLELISM = DataParallelism;
l0->DROPOUT = 0.2f;
l0->set_hidden_dropout(0.5f);
cout << gpu->MYRANK << endl;
float decay = 0.99f;
gpu->tick();
gpu->tick("pass");
for(int epoch = 0; epoch < 100; epoch++)
{
cout << "EPOCH: " << epoch + 1 << endl;
b.propagate_through_layers(l0,Training);
b.propagate_through_layers(l0,Trainerror);
b.propagate_through_layers(l0,CVerror);
l0->learning_rate_decay(decay);
if(epoch == 75)
{
l0->dropout_decay();
decay = 0.85f;
}
}
gpu->tock("pass");
gpu->tock();
gpu->shutdown_MPI();
/*
hipSetDevice(0);
Matrix *X = read_hdf5("/home/tim/data/mnist/X.hdf5");
Matrix *y = read_hdf5("/home/tim/data/mnist/y.hdf5");
ClusterNet gpu = ClusterNet(1235);
BatchAllocator b = BatchAllocator();
std::vector<int> layers;
layers.push_back(1200);
layers.push_back(1200);
std::vector<float> dropout;
dropout.push_back(0.2f);
dropout.push_back(0.5f);
dropout.push_back(0.5f);
BatchAllocator allocator = BatchAllocator();
allocator.init(X,y,(1.0-0.8571429),128,256,gpu, Single_GPU);
DeepNeuralNetwork net = DeepNeuralNetwork(layers,Classification, gpu, allocator, 10);
net.EPOCHS = 500;
net.TRANSITION_EPOCH = 75;
net.LEARNING_RATE = 0.003;
net.UPDATE_TYPE = RMSProp;
net.DROPOUT = dropout;
//net.MAIN_UNIT = Double_Rectified_Linear;
net.train();
*/
//hipSetDevice(1);
//ClusterNet *gpus = new ClusterNet(123635);
//WikiMaxoutNet_PCIe net = WikiMaxoutNet_PCIe(gpus);
//net.run();
/*
hipSetDevice(0);
struct arg_struct *args0 = (arg_struct*)malloc(sizeof(arg_struct));
ClusterNet *gpus0 = new ClusterNet(23452345);
WikiMaxoutNet *net0 = new WikiMaxoutNet(gpus0[0]);
args0->gpus = gpus0;
args0->net = net0;
args0->device = 0;
net0->run();
pthread_t t0;
pthread_create(&t0, NULL, &run_net, args0);
hipSetDevice(1);
struct arg_struct *args1 = (arg_struct*)malloc(sizeof(arg_struct));
ClusterNet *gpus1 = new ClusterNet(23452345);
WikiMaxoutNet *net1 = new WikiMaxoutNet(gpus1[0]);
args1->gpus = gpus1;
args1->net = net1;
args1->device = 1;
pthread_t t1;
//pthread_create(&t1, NULL, &run_net, args1);
hipSetDevice(2);
struct arg_struct *args2 = (arg_struct*)malloc(sizeof(arg_struct));
ClusterNet *gpus2 = new ClusterNet(23452345);
WikiMaxoutNet *net2 = new WikiMaxoutNet(gpus2[0]);
args2->gpus = gpus2;
args2->net = net2;
args2->device = 2;
pthread_t t2;
//pthread_create(&t2, NULL, &run_net, args2);
cout << "rolfen kek!" << endl;
void* result0;
void* result1;
void* result2;
pthread_join(t0,&result0);
//pthread_join(t1,&result1);
//pthread_join(t2,&result2);
*/
}
| 49b0003fa68506f42cd8b1e04009952f57443c8f.cu | #include <stdio.h>
#include <cublas_v2.h>
#include <util.cuh>
#include <basicOps.cuh>
#include <mpi.h>
#include <cuda.h>
#include <assert.h>
#include <util.cuh>
#include <clusterNet.h>
#include <time.h>
#include <batchAllocator.h>
#include <DeepNeuralNetwork.h>
#include <WikiMaxoutNet.h>
#include <WikiMaxoutNet_PCIe.h>
#include <WikiMaxoutNet_PCIe2.h>
#include <WikiNetDist.h>
#include <Layer.h>
using std::cout;
using std::endl;
void run_neural_network()
{
Matrix *X = read_hdf5("/home/tim/mnist_full_X.hdf5");
Matrix *y = read_hdf5("/home/tim/mnist_full_y.hdf5");
ClusterNet gpu = ClusterNet(12345);
cout << X->rows << endl;
int hidden_size = 1024;
Matrix *w1 = gpu.sparseInitWeight(784,hidden_size);
Matrix *w2 = gpu.sparseInitWeight(hidden_size,10);
Matrix *m1 = zeros(784,hidden_size);
Matrix *m2 = zeros(hidden_size,10);
Matrix *ms1 = zeros(784,hidden_size);
Matrix *ms2 = zeros(hidden_size,10);
Matrix *grad_w1_ms = zeros(784,hidden_size);
Matrix *grad_w2_ms = zeros(hidden_size,10);
Matrix *grad_w2 = empty(hidden_size,10);
Matrix *grad_w1 = empty(784,hidden_size);
float cv_error = 0;
float cv_size = 0.1428571f;
float train_error = 0.0f;
BatchAllocator b = BatchAllocator();
b.init(X, y, cv_size, 128, 512);
clock_t t1,t2;
t1=clock();
//code goes here
int epochs = 100;
gpu.tick();
float learning_rate = 0.003;
//size_t free = 0;
//size_t total = 0;
float momentum = 0.5;
for(int EPOCH = 0; EPOCH < epochs; EPOCH++)
{
std::cout << "EPOCH: " << EPOCH + 1 << std::endl;
//cudaMemGetInfo(&free, &total);
//std::cout << free << std::endl;
momentum += 0.01;
if(momentum > 0.95) momentum = 0.95;
for(int i = 0; i < b.TOTAL_BATCHES; i++)
{
b.allocate_next_batch_async();
//nesterov updates
scalarMul(m1,momentum,m1);
scalarMul(m2,momentum,m2);
add(w1,m1,w1);
add(w2,m2,w2);
Matrix *d0 = gpu.dropout(b.CURRENT_BATCH,0.2);
Matrix *z1 = gpu.dot(d0, w1);
logistic(z1, z1);
Matrix *d1 = gpu.dropout(z1,0.5);
Matrix *a2 = gpu.dot(d1,w2);
Matrix *out = softmax(a2);
Matrix *t = create_t_matrix(b.CURRENT_BATCH_Y,10);
//backprop
Matrix *e1 = sub(out, t);
Matrix *e2 = gpu.dotT(e1, w2);
gpu.Tdot(z1,e1,grad_w2);
logisticGrad(z1,z1);
mul(e2,z1,e2);
gpu.Tdot(b.CURRENT_BATCH,e2,grad_w1);
b.allocate_next_batch_async();
RMSprop_with_momentum_weight_update(ms1,grad_w1,w1,m1,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum);
RMSprop_with_momentum_weight_update(ms2,grad_w2,w2,m2,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum);
cudaFree(e1->data);
cudaFree(e2->data);
cudaFree(z1->data);
cudaFree(a2->data);
cudaFree(out->data);
cudaFree(t->data);
cudaFree(d0->data);
cudaFree(d1->data);
b.replace_current_batch_with_next();
}
//Matrix *sum_value = sum(w1);
//std::cout << "weight 1 Sum: " << to_host(sum_value)->data[0] << std::endl;
train_error = 0;
for(int i = 0; i < b.TOTAL_BATCHES; i++)
{
b.broadcast_batch_to_processes();
//Matrix *d0 = scalarMul(b.CURRENT_BATCH,0.8);
Matrix *a1 = gpu.dot(b.CURRENT_BATCH,w1);
logistic(a1, a1);
//Matrix *d1 = scalarMul(a1,0.5);
Matrix *a2 = gpu.dot(a1,w2);
Matrix *out = softmax(a2);
Matrix *result = argmax(out);
Matrix *eq = equal(result,b.CURRENT_BATCH_Y);
b.allocate_next_batch_async();
float sum_value = sum(eq);
train_error += (b.CURRENT_BATCH->rows - sum_value)/ (1.0f * b.CURRENT_BATCH->rows *b.TOTAL_BATCHES) ;
cudaFree(a1->data);
cudaFree(a2->data);
cudaFree(out->data);
cudaFree(result->data);
cudaFree(eq->data);
//cudaFree(d0->data);
//cudaFree(d1->data);
b.replace_current_batch_with_next();
}
std::cout << "Train error: " << train_error << std::endl;
cv_error = 0;
for(int i = 0; i < b.TOTAL_BATCHES_CV; i++)
{
b.broadcast_batch_cv_to_processes();
Matrix *d0 = scalarMul(b.CURRENT_BATCH_CV,0.8);
Matrix *a1 = gpu.dot(d0,w1);
logistic(a1, a1);
Matrix *d1 = scalarMul(a1,0.5);
Matrix *a2 = gpu.dot(d1,w2);
Matrix *out = softmax(a2);
Matrix *result = argmax(out);
Matrix *eq = equal(result,b.CURRENT_BATCH_CV_Y);
b.allocate_next_cv_batch_async();
float sum_value = sum(eq);
cv_error += (b.CURRENT_BATCH_CV->rows - sum_value)/ (1.0f * b.CURRENT_BATCH_CV->rows *b.TOTAL_BATCHES_CV) ;
cudaFree(a1->data);
cudaFree(a2->data);
cudaFree(out->data);
cudaFree(result->data);
cudaFree(eq->data);
cudaFree(d0->data);
cudaFree(d1->data);
b.replace_current_cv_batch_with_next();
}
std::cout << "Cross validation error: " << cv_error << std::endl;
}
cudaThreadSynchronize();
t2=clock();
float diff ((float)t2-(float)t1);
float mseconds = (diff / CLOCKS_PER_SEC)/1000;
std::cout<<mseconds<<std::endl;
gpu.tock();
b.finish_batch_allocator();
//gpu.tock("batch replace");
//gpu.tock("async batch allocate");
//gpu.tock("feedforward");
printf("Finished!\n");
}
void run_maxout_network()
{
cudaSetDevice(0);
Matrix *X = read_hdf5("/home/tim/mnist_full_X.hdf5");
Matrix *y = read_hdf5("/home/tim/mnist_full_y.hdf5");
ClusterNet gpus = ClusterNet(12345);
int hiddenunits = 512;
int maxout_Size = 8;
int batch_size = 128;
Matrix *w1 = gpus.uniformSqrtWeight(784,hiddenunits);
Matrix *w2 = gpus.uniformSqrtWeight(hiddenunits/maxout_Size,10);
Matrix *b1 = zeros(1,hiddenunits);
Matrix *b2 = zeros(1,10);
Matrix *m1 = zeros(784,hiddenunits);
Matrix *m2 = zeros(hiddenunits/maxout_Size,10);
Matrix *mb1 = zeros(1,hiddenunits);
Matrix *mb2 = zeros(1,10);
Matrix *ms1 = zeros(784,hiddenunits);
Matrix *ms2 = zeros(hiddenunits/maxout_Size,10);
Matrix *msb1 = zeros(1,hiddenunits);
Matrix *msb2 = zeros(1,10);
Matrix *grad_w1 = zeros(784,hiddenunits);
Matrix *grad_w2 = zeros(hiddenunits/maxout_Size,10);
Matrix *grad_b1 = zeros(1,hiddenunits);
Matrix *grad_b2 = zeros(1,10);
float cv_error = 0.0f;
float train_error = 0.0f;
BatchAllocator b = BatchAllocator();
b.init(X, y, 0.2, batch_size, 512);
int epochs = 1000;
float learning_rate = 0.001;
float momentum = 0.5;
for(int EPOCH = 1; EPOCH < epochs; EPOCH++)
{
cout << "EPOCH: " << EPOCH << endl;
//momentum += 0.01;
//if(momentum > 0.95) momentum = 0.95;
for(int i = 0; i < b.TOTAL_BATCHES; i++)
{
b.broadcast_batch_to_processes();
//nesterov updates
scalarMul(m1,momentum,m1);
scalarMul(m2,momentum,m2);
scalarMul(mb1,momentum,mb1);
scalarMul(mb2,momentum,mb2);
add(w1,m1,w1);
add(w2,m2,w2);
add(b1,mb1,b1);
add(b2,mb2,b2);
//feedforward
Matrix *d0 = gpus.dropout(b.CURRENT_BATCH,0.2);
Matrix *z1 = gpus.dot(d0, w1);
addMatrixVector(z1,b1,z1);
Matrix **a_paired = maxout(z1,maxout_Size);
Matrix *a1 = a_paired[0];
Matrix *a1_idx = a_paired[1];
Matrix *d1 = gpus.dropout(a1,0.5);
Matrix *a2 = gpus.dot(d1,w2);
addMatrixVector(a2,b2,a2);
Matrix *out = softmax(a2);
Matrix *t = create_t_matrix(b.CURRENT_BATCH_Y,10);
b.allocate_next_batch_async();
//backprop
Matrix *e1 = sub(out, t);
Matrix *e2_partial = gpus.dotT(e1, w2);
Matrix *e2 = empty(b.CURRENT_BATCH->rows,e2_partial->cols*maxout_Size);
Matrix *aB = ones(1,b.CURRENT_BATCH->rows);
gpus.Tdot(a1,e1,grad_w2);
gpus.dot(aB,e1,grad_b2);
expand_to_maxout_grad(e2_partial, a1_idx,e2);
gpus.Tdot(b.CURRENT_BATCH,e2,grad_w1);
gpus.dot(aB,e2,grad_b1);
//weight updates
//RMSProp
RMSprop_with_momentum_weight_update(ms1,grad_w1,w1,m1,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum);
RMSprop_with_momentum_weight_update(ms2,grad_w2,w2,m2,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum);
RMSprop_with_momentum_weight_update(msb1,grad_b1,b1,mb1,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum);
RMSprop_with_momentum_weight_update(msb2,grad_b2,b2,mb2,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum);
/*
scalarMul(grad_w1,learning_rate/(float)b.CURRENT_BATCH->rows,grad_w1);
scalarMul(grad_w2,learning_rate/(float)b.CURRENT_BATCH->rows,grad_w2);
scalarMul(grad_b1,learning_rate/(float)b.CURRENT_BATCH->rows,grad_b1);
scalarMul(grad_b2,learning_rate/(float)b.CURRENT_BATCH->rows,grad_b2);
//classical momentum
scalarMul(m1,momentum,m1);
scalarMul(m2,momentum,m2);
scalarMul(mb1,momentum,mb1);
scalarMul(mb2,momentum,mb2);
sub(m1,grad_w1,m1);
sub(m2,grad_w2,m2);
sub(mb1,grad_b1,mb1);
sub(mb2,grad_b2,mb2);
add(w1,m1,w1);
add(w2,m2,w2);
add(b1,mb1,b1);
add(b2,mb2,b2);
*/
/*
sub(w1,grad_w1,w1);
sub(w2,grad_w2,w2);
sub(b1,grad_b1,b1);
sub(b2,grad_b2,b2);
*/
cudaFree(e1->data);
cudaFree(e2->data);
cudaFree(e2_partial->data);
cudaFree(z1->data);
cudaFree(a1->data);
cudaFree(a1_idx->data);
cudaFree(a2->data);
cudaFree(out->data);
cudaFree(t->data);
cudaFree(d0->data);
cudaFree(d1->data);
cudaFree(aB->data);
free(a_paired);
b.replace_current_batch_with_next();
}
train_error = 0;
for(int i = 0; i < b.TOTAL_BATCHES; i++)
{
b.broadcast_batch_to_processes();
Matrix *d0 = scalarMul(b.CURRENT_BATCH,0.8);
Matrix *z1 = gpus.dot(d0,w1);
Matrix **a1_pair = maxout(z1,maxout_Size);
Matrix *a1 = a1_pair[0];
Matrix *d1 = scalarMul(a1,0.5);
Matrix *a2 = gpus.dot(d1,w2);
Matrix *out = softmax(a2);
Matrix *result = argmax(out);
Matrix *eq = equal(result,b.CURRENT_BATCH_Y);
b.allocate_next_batch_async();
float sum_value = sum(eq);
train_error += (b.CURRENT_BATCH->rows - sum_value)/ (1.0f * b.CURRENT_BATCH->rows *b.TOTAL_BATCHES) ;
cudaFree(z1->data);
cudaFree(a1->data);
cudaFree(a1_pair[1]->data);
cudaFree(a2->data);
cudaFree(out->data);
cudaFree(result->data);
cudaFree(eq->data);
cudaFree(d0->data);
cudaFree(d1->data);
free(a1_pair);
b.replace_current_batch_with_next();
}
std::cout << "MAXOUT Train error: " << train_error << std::endl;
cv_error = 0;
for(int i = 0; i < b.TOTAL_BATCHES_CV; i++)
{
b.broadcast_batch_cv_to_processes();
Matrix *d0 = scalarMul(b.CURRENT_BATCH_CV,0.8);
Matrix *z1 = gpus.dot(d0,w1);
Matrix **a1_pair = maxout(z1,maxout_Size);
Matrix *a1 = a1_pair[0];
Matrix *d1 = scalarMul(a1,0.5);
Matrix *a2 = gpus.dot(d1,w2);
Matrix *out = softmax(a2);
Matrix *result = argmax(out);
Matrix *eq = equal(result,b.CURRENT_BATCH_CV_Y);
b.allocate_next_batch_async();
float sum_value = sum(eq);
cv_error += (b.CURRENT_BATCH_CV->rows - sum_value)/ (1.0f * b.CURRENT_BATCH_CV->rows *b.TOTAL_BATCHES_CV) ;
cudaFree(z1->data);
cudaFree(a1->data);
cudaFree(a1_pair[1]->data);
cudaFree(a2->data);
cudaFree(out->data);
cudaFree(result->data);
cudaFree(eq->data);
cudaFree(d0->data);
cudaFree(d1->data);
free(a1_pair);
b.replace_current_cv_batch_with_next();
}
std::cout << "MAXOUT Cross validation error: " << cv_error << std::endl;
}
}
void run_normal_net()
{
cudaSetDevice(2);
Matrix *X = read_hdf5("/home/tim/mnist_full_X.hdf5");
Matrix *y = read_hdf5("/home/tim/mnist_full_y.hdf5");
ClusterNet gpus = ClusterNet(12345);
int hiddenunits = 1024;
int maxout_Size = 1;
int batch_size = 128;
Matrix *w1 = gpus.uniformSqrtWeight(784,hiddenunits);
Matrix *w2 = gpus.uniformSqrtWeight(hiddenunits/maxout_Size,10);
Matrix *b1 = zeros(1,hiddenunits);
Matrix *b2 = zeros(1,10);
Matrix *m1 = zeros(784,hiddenunits);
Matrix *m2 = zeros(hiddenunits/maxout_Size,10);
Matrix *mb1 = zeros(1,hiddenunits);
Matrix *mb2 = zeros(1,10);
Matrix *ms1 = zeros(784,hiddenunits);
Matrix *ms2 = zeros(hiddenunits/maxout_Size,10);
Matrix *msb1 = zeros(1,hiddenunits);
Matrix *msb2 = zeros(1,10);
Matrix *grad_w1 = zeros(784,hiddenunits);
Matrix *grad_w2 = zeros(hiddenunits/maxout_Size,10);
Matrix *grad_b1 = zeros(1,hiddenunits);
Matrix *grad_b2 = zeros(1,10);
float cv_error = 0.0f;
float train_error = 0.0f;
BatchAllocator b = BatchAllocator();
b.init(X, y, 0.4, batch_size, 512);
int epochs = 500;
float learning_rate = 0.000001;
float momentum = 0.5;
for(int EPOCH = 1; EPOCH < epochs; EPOCH++)
{
cout << "EPOCH: " << EPOCH << endl;
momentum += 0.01;
if(momentum > 0.95) momentum = 0.95;
for(int i = 0; i < b.TOTAL_BATCHES; i++)
{
b.broadcast_batch_to_processes();
//nesterov updates
scalarMul(m1,momentum,m1);
scalarMul(m2,momentum,m2);
scalarMul(mb1,momentum,mb1);
scalarMul(mb2,momentum,mb2);
add(w1,m1,w1);
add(w2,m2,w2);
add(b1,mb1,b1);
add(b2,mb2,b2);
//feedforward
Matrix *d0 = gpus.dropout(b.CURRENT_BATCH,0.2);
Matrix *z1 = gpus.dot(d0, w1);
addMatrixVector(z1,b1,z1);
Matrix *a1 = logistic(z1);
//Matrix *a1 = rectified_linear(z1);
Matrix *d1 = gpus.dropout(a1,0.5);
Matrix *a2 = gpus.dot(d1,w2);
addMatrixVector(a2,b2,a2);
Matrix *out = softmax(a2);
Matrix *t = create_t_matrix(b.CURRENT_BATCH_Y,10);
b.allocate_next_batch_async();
//backprop
Matrix *e1 = sub(out, t);
Matrix *e2 = gpus.dotT(e1, w2);
Matrix *aB = ones(1,b.CURRENT_BATCH->rows);
gpus.Tdot(a1,e1,grad_w2);
gpus.dot(aB,e1,grad_b2);
//rectified_linear_derivative(a1,a1);
logisticGrad(a1,a1);
mul(e2,a1,e2);
gpus.Tdot(b.CURRENT_BATCH,e2,grad_w1);
gpus.dot(aB,e2,grad_b1);
/*
//about equal to momentum update + nesterov update -> momentum applyied to gradient+momentum better?
RMSprop_with_momentum_weight_update(ms1,grad_w1,w1,m1,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum);
RMSprop_with_momentum_weight_update(ms2,grad_w2,w2,m2,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum);
RMSprop_with_momentum_weight_update(msb1,grad_b1,b1,mb1,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum);
RMSprop_with_momentum_weight_update(msb2,grad_b2,b2,mb2,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum);
*/
/*
//slow and generally worse error, but sometimes better results in the end
RMSprop_with_momentum_update(ms1,grad_w1,w1,m1,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum);
RMSprop_with_momentum_update(ms2,grad_w2,w2,m2,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum);
RMSprop_with_momentum_update(msb1,grad_b1,b1,mb1,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum);
RMSprop_with_momentum_update(msb2,grad_b2,b2,mb2,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum);
*/
RMSprop_with_nesterov_weight_update(ms1,grad_w1,w1,m1,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum);
RMSprop_with_nesterov_weight_update(ms2,grad_w2,w2,m2,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum);
RMSprop_with_nesterov_weight_update(msb1,grad_b1,b1,mb1,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum);
RMSprop_with_nesterov_weight_update(msb2,grad_b2,b2,mb2,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum);
/*
//slower but equally good to nesterov momentum
RMSprop_with_weight_update(ms1,grad_w1,w1,m1,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum);
RMSprop_with_weight_update(ms2,grad_w2,w2,m2,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum);
RMSprop_with_weight_update(msb1,grad_b1,b1,mb1,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum);
RMSprop_with_weight_update(msb2,grad_b2,b2,mb2,0.9f,learning_rate,b.CURRENT_BATCH->rows, momentum);
*/
/*
scalarMul(grad_w1,learning_rate/(float)b.CURRENT_BATCH->rows,grad_w1);
scalarMul(grad_w2,learning_rate/(float)b.CURRENT_BATCH->rows,grad_w2);
scalarMul(grad_b1,learning_rate/(float)b.CURRENT_BATCH->rows,grad_b1);
scalarMul(grad_b2,learning_rate/(float)b.CURRENT_BATCH->rows,grad_b2);
//classical momentum
scalarMul(m1,momentum,m1);
scalarMul(m2,momentum,m2);
scalarMul(mb1,momentum,mb1);
scalarMul(mb2,momentum,mb2);
sub(m1,grad_w1,m1);
sub(m2,grad_w2,m2);
sub(mb1,grad_b1,mb1);
sub(mb2,grad_b2,mb2);
add(w1,m1,w1);
add(w2,m2,w2);
add(b1,mb1,b1);
add(b2,mb2,b2);
*/
/*
sub(w1,grad_w1,w1);
sub(w2,grad_w2,w2);
sub(b1,grad_b1,b1);
sub(b2,grad_b2,b2);
*/
cudaFree(e1->data);
cudaFree(e2->data);
cudaFree(z1->data);
cudaFree(a1->data);
cudaFree(a2->data);
cudaFree(out->data);
cudaFree(t->data);
cudaFree(d0->data);
cudaFree(d1->data);
cudaFree(aB->data);
b.replace_current_batch_with_next();
}
train_error = 0;
for(int i = 0; i < b.TOTAL_BATCHES; i++)
{
b.broadcast_batch_to_processes();
Matrix *d0 = scalarMul(b.CURRENT_BATCH,0.8);
Matrix *z1 = gpus.dot(d0,w1);
Matrix *a1 = logistic(z1);
//Matrix *a1 = rectified_linear(z1);
Matrix *d1 = scalarMul(a1,0.5);
Matrix *a2 = gpus.dot(d1,w2);
Matrix *out = softmax(a2);
Matrix *result = argmax(out);
Matrix *eq = equal(result,b.CURRENT_BATCH_Y);
b.allocate_next_batch_async();
float sum_value = sum(eq);
train_error += (b.CURRENT_BATCH->rows - sum_value)/ (1.0f * b.CURRENT_BATCH->rows *b.TOTAL_BATCHES) ;
cudaFree(z1->data);
cudaFree(a1->data);
cudaFree(a2->data);
cudaFree(out->data);
cudaFree(result->data);
cudaFree(eq->data);
cudaFree(d0->data);
cudaFree(d1->data);
b.replace_current_batch_with_next();
}
std::cout << "MAXOUT Train error: " << train_error << std::endl;
cv_error = 0;
for(int i = 0; i < b.TOTAL_BATCHES_CV; i++)
{
b.broadcast_batch_cv_to_processes();
Matrix *d0 = scalarMul(b.CURRENT_BATCH_CV,0.8);
Matrix *z1 = gpus.dot(d0,w1);
Matrix *a1 = logistic(z1);
//Matrix *a1 = rectified_linear(z1);
Matrix *d1 = scalarMul(a1,0.5);
Matrix *a2 = gpus.dot(d1,w2);
Matrix *out = softmax(a2);
Matrix *result = argmax(out);
Matrix *eq = equal(result,b.CURRENT_BATCH_CV_Y);
b.allocate_next_batch_async();
float sum_value = sum(eq);
cv_error += (b.CURRENT_BATCH_CV->rows - sum_value)/ (1.0f * b.CURRENT_BATCH_CV->rows *b.TOTAL_BATCHES_CV) ;
cudaFree(z1->data);
cudaFree(a1->data);
cudaFree(a2->data);
cudaFree(out->data);
cudaFree(result->data);
cudaFree(eq->data);
cudaFree(d0->data);
cudaFree(d1->data);
b.replace_current_cv_batch_with_next();
}
std::cout << "MAXOUT Cross validation error: " << cv_error << std::endl;
}
}
void MPI_benchmark_P2P(int argc, char *argv[])
{
char name[100];
int myrank, length, size;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
MPI_Get_processor_name(name, &length);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Status status;
int local_rank = myrank % 4;
int gpus;
cudaGetDeviceCount(&gpus);
int mygpu_id;
int your_gpu_id;
if(myrank == 0)
{
mygpu_id = 0;
if(gpus > 1)
your_gpu_id = 1;
else
your_gpu_id = 0;
MPI_Send(&your_gpu_id,1, MPI_INT,1,0,MPI_COMM_WORLD);
}
else
{
MPI_Recv(&mygpu_id,1,MPI_INT,myrank-1,0,MPI_COMM_WORLD,&status);
if(gpus > mygpu_id+1)
your_gpu_id = mygpu_id + 1;
else
your_gpu_id = 0;
if(myrank < size-1)
MPI_Send(&your_gpu_id,1, MPI_INT,myrank+1,0,MPI_COMM_WORLD);
}
cudaSetDevice(mygpu_id);
int batch_size = 128;
int inner_dim = 10000;
int outer_dim = 15000;
ClusterNet gpu = ClusterNet();
Matrix *A = gpu.rand(batch_size,inner_dim);
Matrix *B = gpu.rand(inner_dim,outer_dim);
Matrix *out = empty(batch_size,outer_dim);
Matrix *rec = empty(batch_size,outer_dim);
Matrix *A1 = gpu.rand(batch_size/2,inner_dim);
Matrix *B1 = gpu.rand(inner_dim,outer_dim);
Matrix *rec1 = empty(batch_size/2,outer_dim);
Matrix *out1 = empty(batch_size/2,outer_dim);
Matrix *A2 = gpu.rand(batch_size,inner_dim);
Matrix *B2 = gpu.rand(inner_dim,outer_dim/2);
Matrix *rec2 = empty(batch_size,outer_dim/2);
Matrix *out2 = empty(batch_size,outer_dim/2);
gpu.tick("Direct compute");
for(int i = 0; i< 100; i++)
{
gpu.dot(A,B, out);
//add(A, B, out);
}
gpu.tock("Direct compute");
gpu.tick("partial batch direct compute");
for(int i = 0; i< 100; i++)
{
gpu.dot(A1,B1, out1);
//add(A, B, out);
}
gpu.tock("partial batch direct compute");
gpu.tick("partial units direct compute");
for(int i = 0; i< 100; i++)
{
gpu.dot(A2,B2, out2);
//add(A, B, out);
}
gpu.tock("partial units direct compute");
gpu.tick("PCIe transfer");
for(int i = 0; i< 100; i++)
{
if(local_rank == 0 && gpus > 1)
{
MPI_Send(out->data, out->size, MPI_FLOAT, 1, 100, MPI_COMM_WORLD);
}
else if(local_rank == 1 && gpus > 1)
{
//add(A2,B, out);
MPI_Recv(rec->data, rec->size, MPI_FLOAT, 0, 100, MPI_COMM_WORLD, &status);
}
}
gpu.tock("PCIe transfer");
gpu.tick("PCIe dot");
for(int i = 0; i< 100; i++)
{
if(local_rank == 0 && gpus > 1)
{
gpu.dot(A2,B2,out2);
MPI_Send(out1->data, out1->size, MPI_FLOAT, 1, 100, MPI_COMM_WORLD);
}
else if(local_rank == 1 && gpus > 1)
{
gpu.dot(A2,B2,out2);
MPI_Recv(rec1->data, rec1->size, MPI_FLOAT, 0, 100, MPI_COMM_WORLD, &status);
vStack(out2,rec2,rec);
}
}
gpu.tock("PCIe dot");
gpu.tick("RDMA transfer");
for(int i = 0; i< 100; i++)
{
if(myrank == 0)
{
MPI_Send(out->data, out->size, MPI_FLOAT, 3, 100, MPI_COMM_WORLD);
}
else if(myrank == 3)
{
//add(A2,B, out);
MPI_Recv(rec->data, rec->size, MPI_FLOAT, 0, 100, MPI_COMM_WORLD, &status);
}
}
gpu.tock("RDMA transfer");
gpu.tick("RDMA dot");
for(int i = 0; i< 100; i++)
{
if(myrank == 0)
{
gpu.dot(A2,B2,out2);
MPI_Send(out->data, out->size, MPI_FLOAT, 3, 100, MPI_COMM_WORLD);
}
else if(myrank == 3)
{
//add(A2,B, out);
gpu.dot(A2,B2,out2);
MPI_Recv(rec->data, rec->size, MPI_FLOAT, 0, 100, MPI_COMM_WORLD, &status);
vStack(out2,rec2,rec);
}
}
gpu.tock("RDMA dot");
MPI_Finalize();
}
void MPI_benchmark(int argc, char *argv[])
{
int myrank;
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
ClusterNet gpu = ClusterNet();
int batch_rows = 128;
int w_in = 10000;
int w_out = 8000;
//dot
Matrix *B = gpu.rand(w_in,w_out);
Matrix *A = gpu.rand(batch_rows,w_in);
assert(test_matrix(A,batch_rows,w_in));
assert(test_matrix(B,w_in,w_out));
Matrix *out = empty(batch_rows, w_out);
Matrix *B1 = gpu.rand(w_in,w_out/2);
Matrix *B2 = gpu.rand(w_in,w_out/2);
Matrix *D = empty(batch_rows,w_out/2);
Matrix *A1 = gpu.rand(batch_rows/2,w_in);
Matrix *big_out = gpu.rand(batch_rows/2,w_out);
Matrix *grand_out = empty(batch_rows, w_out);
Matrix *C = gpu.rand(batch_rows/2,w_in);
Matrix *C_out = empty(batch_rows/2,w_out);
Matrix *E = gpu.rand(batch_rows/4,w_in);
Matrix *E_out = empty(batch_rows/4,w_out);
Matrix *E_merge = empty(batch_rows/2,w_out);
Matrix *E_merge2 = empty(batch_rows/2,w_out);
//add
/*
B = gpu.rand(w_in,w_out);
A = gpu.rand(w_in,w_out);
out = empty(w_in, w_out);
A1 = gpu.rand(w_in/2,w_out);
Matrix *A2 = gpu.rand(w_in/2,w_out);
D = empty(w_in/2,w_out);
*/
cudaEvent_t* startstop = tick();
for(int i = 0; i< 100; i++)
{
gpu.dot(A,B, out);
//add(A, B, out);
}
printf("Direct compute:\n");
tock(startstop);
out = empty(batch_rows,w_out/2);
Matrix *out2 = empty(batch_rows,w_out/2);
startstop = tick();
for(int i = 0; i< 100; i++)
{
gpu.dot(A,B1, out);
gpu.dot(A,B2, out2);
vStack(out,out2,grand_out);
}
printf("Direct compute x2:\n");
tock(startstop);
Matrix *mergemat = empty(batch_rows, w_out);
out = empty(batch_rows,w_out/2);
startstop = tick();
//out = empty(w_in/2,w_out);
for(int i = 0; i < 100; i++)
{
if(myrank == 0)
{
gpu.dot(A,B1, out);
//add(A1, B,out);
MPI_Send(out->data, out->size, MPI_FLOAT, 1, 100, MPI_COMM_WORLD);
}
else
{
gpu.dot(A,B2, out);
//add(A2,B, out);
MPI_Recv(D->data, D->size, MPI_FLOAT, 0, 100, MPI_COMM_WORLD, &status);
vStack(out,D, mergemat);
}
}
if(myrank == 1)
{
printf("GPUDirect RDMA:\n");
tock(startstop);
}
out = empty(batch_rows/2,w_out);
startstop = tick();
gpu.tick("aa");
//out = empty(w_in/2,w_out);
for(int i = 0; i < 100; i++)
{
gpu.tick("dot");
gpu.dot(C,B, out);
gpu.tick("dot");
if(myrank == 0)
{
//add(A1, B,out);
gpu.tick("send");
MPI_Send(out->data, out->size, MPI_FLOAT, 1, 100, MPI_COMM_WORLD);
gpu.tick("send");
}
else
{
//add(A2,B, out);
gpu.tick("receive");
MPI_Recv(C_out->data, C_out->size, MPI_FLOAT, 0, 100, MPI_COMM_WORLD, &status);
vStack(out,C_out, grand_out);
gpu.tick("receive");
}
if(myrank == 1)
{
//add(A1, B,out);
gpu.tick("send");
MPI_Send(out->data, out->size, MPI_FLOAT, 0, 100, MPI_COMM_WORLD);
gpu.tick("send");
}
else
{
//add(A2,B, out);
gpu.tick("receive");
MPI_Recv(C_out->data, C_out->size, MPI_FLOAT, 1, 100, MPI_COMM_WORLD, &status);
vStack(out,C_out, grand_out);
gpu.tick("receive");
}
}
gpu.tock("dot");
if(myrank == 1)
{
printf("GPUDirect RDMA batch:\n");
tock(startstop);
gpu.tock("receive");
gpu.tock("aa");
}
else
{
gpu.tock("send");
}
MPI_Finalize();
}
void dotMPI_test(int argc, char *argv[])
{
/*
ClusterNet gpu = ClusterNet(argc, argv, 123465);
int inner = 2000;
int outer = 1200;
int batch_size = 128;
int reduced_left = 128;
int reduced_right = 400;
Matrix *A = gpu.rand(batch_size,inner);
Matrix *B = gpu.rand(inner,outer);
Matrix *A1 = gpu.rand(reduced_left,inner);
Matrix *B1 = gpu.rand(inner,reduced_right);
Matrix *out = empty(batch_size,outer);
Matrix *out1 = empty(reduced_left,reduced_right);
Matrix *recv1 = empty(reduced_left,reduced_right);
Matrix *recv2 = empty(reduced_left,reduced_right);
Matrix *recv3 = empty(reduced_left,reduced_right);
MPI_Status status;
gpu.tick("dot mpi batch");
for(int i = 0; i < 100; i++)
{
gpu.dotMPI_batchSlice(A,B);
}
gpu.tock("dot mpi batch");
gpu.tick("dot mpi unit");
for(int i = 0; i < 100; i++)
{
gpu.dotMPI_unitSlice(A,B);
}
gpu.tock("dot mpi unit");
printf("My rank: %i\n",gpu.MYRANK);
//gpu.benchmark_dot();
gpu.tick("dot normal");
for(int i = 0; i < 100; i++)
{
gpu.dot(A,B,out);
}
gpu.tock("dot normal");
//std::vector<MPI_Request> requests;
MPI_Request *requests = (MPI_Request*)malloc(sizeof(MPI_Request)*gpu.MPI_SIZE-1);
MPI_Request request_send;
std::vector<Matrix*> recv_buffer;
for(int i = 0; i < gpu.MPI_SIZE-1; i++)
{
MPI_Request request;
requests[i] = request;
}
int received_count = 0;
for(int i = 0; i < 100; i++)
{
for(int i = 0; i < recv_buffer.size(); i++)
cudaFree(recv_buffer[i]->data);
recv_buffer.clear();
out1 = empty(reduced_left,reduced_right);
for(int i = 0; i < gpu.MPI_SIZE; i++)
{
recv_buffer.push_back(empty(reduced_left,reduced_right));
}
gpu.tick("all to all custom");
//cout << "a1 rows" << A1->rows << endl;
gpu.dot(A1,B1,out1);
recv_buffer[gpu.MYRANK]= out1;
for(int i = 0; i < gpu.MPI_SIZE; i++)
{
if(gpu.MYRANK == i) { continue; }
MPI_Isend(out1->data, out1->size, MPI_FLOAT, i, 100, MPI_COMM_WORLD, &request_send);
}
for(int i = 0; i < gpu.MPI_SIZE; i++)
{
if(gpu.MYRANK == i) { continue; }
MPI_Irecv(recv1->data, recv1->size, MPI_FLOAT, i, 100, MPI_COMM_WORLD, &requests[i]);
}
for(int i = 0; i < gpu.MPI_SIZE; i++)
{
if(gpu.MYRANK == i) { continue; }
MPI_Wait(&requests[i],MPI_STATUS_IGNORE);
}
received_count = 0;
while(received_count < gpu.MPI_SIZE-1)
{
for(int i = 0; i < gpu.MPI_SIZE; i++)
{
int received = 0;
if(gpu.MYRANK == i) { continue; }
MPI_Test(&requests[i],&received,&status);
if(received == 1)
{
out1 = hStack(out1,recv1);
received_count++;
}
}
}
gpu.tick("all to all custom");
}
gpu.tock("all to all custom");
int destination = gpu.MYRANK + 1;
int source = gpu.MYRANK - 1;
if(destination == gpu.MPI_SIZE){destination = 0; }
if(source < 0){ source = gpu.MPI_SIZE - 1;}
for(int i = 0; i < 100; i++)
{
out1 = empty(reduced_left,reduced_right);
recv1 = empty(reduced_left,reduced_right);
gpu.tick("chain custom");
gpu.dot(A1,B1,out1);
for(int i = 0; i < gpu.MPI_SIZE-1; i++)
{
if(i == 0)
MPI_Isend(out1->data, out1->size, MPI_FLOAT, destination, 100, MPI_COMM_WORLD, &request_send);
else
MPI_Isend(recv1->data, recv1->size, MPI_FLOAT, destination, 100, MPI_COMM_WORLD, &request_send);
MPI_Recv(recv1->data, recv1->size, MPI_FLOAT, source, 100, MPI_COMM_WORLD, &status);
//MPI_Wait(&requests[i],&status);
out1 = hStack(out1,recv1);
}
gpu.tick("chain custom");
}
gpu.tock("chain custom");
cout << gpu.MYRANK << endl;
int matrix_idx = gpu.MYRANK;
Matrix** arrOut = (Matrix**)malloc(sizeof(Matrix*)*gpu.MPI_SIZE);
for(int i = 0; i < gpu.MPI_SIZE; i++)
arrOut[i] = empty(reduced_left,reduced_right);
float **h_arrA = (float**)malloc(sizeof(float*)*gpu.MPI_SIZE);
for(int i = 0; i < gpu.MPI_SIZE; i++)
h_arrA[i] = arrOut[i]->data;
float **d_arrA;
cudaMalloc((void**) &d_arrA,sizeof(float*)*gpu.MPI_SIZE);
cudaMemcpy(d_arrA,h_arrA,sizeof(float*)*gpu.MPI_SIZE,cudaMemcpyDefault);
gpu.tick("chain matrix array");
for(int i = 0; i < 100; i++)
{
gpu.dot(A1,B1,arrOut[gpu.MYRANK]);
matrix_idx = gpu.MYRANK;
for(int i = 0; i < gpu.MPI_SIZE-1; i++)
{
MPI_Isend(arrOut[matrix_idx]->data, arrOut[matrix_idx]->size, MPI_FLOAT, destination, 100, MPI_COMM_WORLD, &request_send);
matrix_idx = (matrix_idx - 1) < 0 ? gpu.MPI_SIZE-1 : (matrix_idx - 1);
MPI_Irecv(arrOut[matrix_idx]->data, arrOut[matrix_idx]->size, MPI_FLOAT, source, 100, MPI_COMM_WORLD,&requests[i]);
}
MPI_Waitall(gpu.MPI_SIZE-1,requests,MPI_STATUSES_IGNORE);
//hStackN(d_arrA,arrOut[0]->size, out,gpu.MPI_SIZE);
}
gpu.tock("chain matrix array");
gpu.shutdown();
*/
}
void async_test(int argc, char *argv[])
{
ClusterNet gpu = ClusterNet(argc,argv,1324);
int rows = 512;
int cols = 128;
/*
MPI_Request r = MPI_REQUEST_NULL;
MPI_Request s = MPI_REQUEST_NULL;
Matrix *a = gpu.rand(rows,cols);
Matrix *b = zeros(rows,cols);
if(gpu.MYRANK == 0)
{
MPI_Irecv(b->data,b->size,MPI_FLOAT,1,0,MPI_COMM_WORLD,&r);
MPI_Isend(a->data,a->size,MPI_FLOAT,1,0,MPI_COMM_WORLD,&s);
}
else
{
MPI_Irecv(b->data,b->size,MPI_FLOAT,0,0,MPI_COMM_WORLD,&r);
MPI_Isend(a->data,a->size,MPI_FLOAT,0,0,MPI_COMM_WORLD,&s);
}
MPI_Wait(&s,MPI_STATUS_IGNORE);
MPI_Wait(&r,MPI_STATUS_IGNORE);
gpu.tick("MPI");
for(int i = 0; i < 100; i++)
{
if(gpu.MYRANK == 0)
{
MPI_Irecv(b->data,b->size,MPI_FLOAT,1,0,MPI_COMM_WORLD,&r);
MPI_Isend(a->data,a->size,MPI_FLOAT,1,0,MPI_COMM_WORLD,&s);
}
else
{
MPI_Irecv(b->data,b->size,MPI_FLOAT,0,0,MPI_COMM_WORLD,&r);
MPI_Isend(a->data,a->size,MPI_FLOAT,0,0,MPI_COMM_WORLD,&s);
}
MPI_Wait(&s,MPI_STATUS_IGNORE);
MPI_Wait(&r,MPI_STATUS_IGNORE);
}
gpu.tock("MPI");
*/
if(gpu.MYRANK == 0)
{
cudaSetDevice(0);
//cudaDeviceEnablePeerAccess(1,0);
cudaDeviceDisablePeerAccess(1);
Matrix *A1 = gpu.rand(rows,cols);
Matrix *A2 = gpu.rand(rows,cols);
cudaSetDevice(1);
//cudaDeviceEnablePeerAccess(0,0);
cudaDeviceDisablePeerAccess(0);
Matrix *B1 = gpu.rand(rows,cols);
Matrix *B2 = gpu.rand(rows,cols);
cudaSetDevice(0);
cudaStream_t s;
cudaStreamCreate(&s);
cudaSetDevice(1);
cudaStream_t s2;
cudaStreamCreate(&s2);
cudaSetDevice(0);
int access = 0;
cudaDeviceCanAccessPeer(&access,0,1);
cout << access << endl;
cudaDeviceCanAccessPeer(&access,1,0);
cout << access << endl;
cudaSetDevice(0);
gpu.tick("cuda");
for(int i = 0; i < 100; i++)
{
cudaMemcpyPeerAsync(B2->data,1,A2->data,0,A2->bytes,s);
cudaSetDevice(1);
cudaMemcpyPeerAsync(A1->data,0,B1->data,1,B1->bytes,s2);
cudaSetDevice(0);
cudaStreamSynchronize(s);
cudaSetDevice(1);
cudaStreamSynchronize(s2);
cudaSetDevice(0);
}
gpu.tock("cuda");
}
MPI_Barrier(MPI_COMM_WORLD);
gpu.shutdown_MPI();
}
struct arg_struct
{
ClusterNet *gpus;
WikiMaxoutNet *net;
int device;
};
void *run_net(void * args)
{
struct arg_struct *_args = (struct arg_struct*)args;
cout << "device: " << _args->device << endl;
cudaSetDevice(_args->device);
_args->net->run();
return 0;
}
void *print_message(void*)
{
ClusterNet gpu = ClusterNet(124345);
WikiMaxoutNet net = WikiMaxoutNet(gpu);
net.run();
return 0;
}
void bandwidth_test_MPI(int argc, char *argv[])
{
ClusterNet *gpu = new ClusterNet(argc,argv,1235,true);
std::vector<MPI_Request*> sends;
std::vector<MPI_Request*> recvs;
std::vector<Matrix*> lSync;
std::vector<Matrix*> lData;
int packages = 1;
float time = 0;
for(int epoch = 1; epoch < 20; epoch++)
{
if(lData.size() > 0)
{
for(int i = 0; i < packages; i++)
{
cudaFree(lSync[i]->data);
cudaFree(lData[i]->data);
}
lSync.clear();
lData.clear();
}
for(int i = 0; i < packages; i++)
{
lSync.push_back(zeros(128*epoch,128*epoch));
lData.push_back(gpu->rand(128*epoch,128*epoch));
}
for(int j = 0; j < packages; j++)
{
MPI_Request *send_request = new MPI_Request;
MPI_Request *recv_request = new MPI_Request;
sends.push_back(send_request);
recvs.push_back(recv_request);
int target = gpu->MYRANK +1 == gpu->MPI_SIZE ? 0 : gpu->MYRANK+1;
int source = gpu->MYRANK-1 == -1 ? gpu->MPI_SIZE-1 : gpu->MYRANK-1;
gpu->tick();
for (int i = 0; i < gpu->MPI_SIZE -1; i++)
{
//MPI_Irecv(lSync[j]->data,lSync[j]->size,MPI_FLOAT,source,999,MPI_COMM_WORLD,recv_request);
//MPI_Isend(lData[j]->data,lData[j]->size,MPI_FLOAT,target,999,MPI_COMM_WORLD,send_request);
//MPI_Isend(lData[j]->data,lData[j]->size,MPI_FLOAT,target,j,MPI_COMM_WORLD,send_request);
if(i == gpu->MYRANK)
{
MPI_Send(lData[j]->data,lData[j]->size,MPI_FLOAT,target,j,MPI_COMM_WORLD);
MPI_Recv(lSync[j]->data,lSync[j]->size,MPI_FLOAT,source,j,MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
else
{
MPI_Recv(lSync[j]->data,lSync[j]->size,MPI_FLOAT,source,j,MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Send(lData[j]->data,lData[j]->size,MPI_FLOAT,target,j,MPI_COMM_WORLD);
}
}
}
/*
gpu->tick();
for(int i = 0; i < packages; i++)
{
MPI_Wait(sends[i],MPI_STATUS_IGNORE);
MPI_Wait(recvs[i],MPI_STATUS_IGNORE);
}
*/
time = gpu->tock();
for(int i = 0; i < packages; i++)
assert(sum(lData[i]) == sum(lSync[i]));
printdim(lData[0]);
cout << 1000*2*packages*lData[0]->bytes/1024./1024./1024./time << " GB/s" << endl;
}
gpu->shutdown_MPI();
}
void bandwidth_test_peer()
{
ClusterNet *gpu = new ClusterNet(1235);
std::vector<Matrix*> lSync0;
std::vector<Matrix*> lData0;
std::vector<Matrix*> lSync1;
std::vector<Matrix*> lData1;
std::vector<cudaStream_t> s0s;
std::vector<cudaStream_t> s1s;
int packages = 1;
float time = 0;
cudaSetDevice(0);
cudaDeviceEnablePeerAccess(1,0);
cudaSetDevice(1);
cudaDeviceEnablePeerAccess(0,0);
for(int i = 0; i < packages; i++)
{
cudaStream_t s0;
cudaStream_t s1;
cudaSetDevice(0);
cudaStreamCreate(&s0);
cudaSetDevice(1);
cudaStreamCreate(&s1);
s0s.push_back(s0);
s1s.push_back(s1);
}
cudaSetDevice(0);
int access = 0;
cudaDeviceCanAccessPeer(&access,0,1);
cout << access << endl;
cudaDeviceCanAccessPeer(&access,1,0);
cout << access << endl;
for(int epoch = 199; epoch < 200; epoch++)
{
if(lSync0.size() > 0)
{
for(int i = 0; i < packages; i++)
{
cudaFree(lSync0[i]->data);
cudaFree(lData0[i]->data);
cudaFree(lSync1[i]->data);
cudaFree(lData1[i]->data);
}
lSync0.clear();
lData0.clear();
lSync1.clear();
lData1.clear();
}
for(int i = 0; i < packages; i++)
{
cudaSetDevice(0);
lSync0.push_back(zeros(128*epoch,128*epoch));
lData0.push_back(gpu->rand(128*epoch,128*epoch));
cudaSetDevice(1);
lSync1.push_back(zeros(128*epoch,128*epoch));
lData1.push_back(gpu->rand(128*epoch,128*epoch));
}
cudaSetDevice(0);
gpu->tick();
for(int j = 0; j < packages; j++)
{
cudaMemcpyAsync(lSync1[j]->data,lData0[j]->data,lData0[j]->bytes,cudaMemcpyDefault, s0s[j]);
cudaSetDevice(1);
cudaMemcpyAsync(lSync0[j]->data,lData1[j]->data,lData1[j]->bytes,cudaMemcpyDefault,s1s[j]);
cudaSetDevice(0);
}
for(int i = 0; i < packages; i++)
{
cudaStreamSynchronize(s0s[i]);
cudaStreamSynchronize(s1s[i]);
}
time = gpu->tock();
for(int i = 0; i < packages; i++)
//cout << sum(lData0[i]) << " vs. " << sum(lSync1[i]) << endl;
assert(sum(lData0[i]) == sum(lSync1[i]));
for(int i = 0; i < packages; i++)
assert(sum(lData1[i]) == sum(lSync0[i]));
cout << 1000*2*packages*lData0[0]->bytes/1024./1024./1024./time << " GB/s" << endl;
}
}
void bandwidth_test_kernel()
{
ClusterNet *gpu = new ClusterNet(1235);
std::vector<Matrix*> lSync0;
std::vector<Matrix*> lData0;
std::vector<Matrix*> lSync1;
std::vector<Matrix*> lData1;
std::vector<cudaStream_t> s0s;
std::vector<cudaStream_t> s1s;
int packages = 10;
float time = 0;
cudaSetDevice(0);
cudaDeviceEnablePeerAccess(1,0);
cudaSetDevice(1);
cudaDeviceEnablePeerAccess(0,0);
for(int i = 0; i < packages; i++)
{
cudaStream_t s0;
cudaStream_t s1;
cudaSetDevice(0);
cudaStreamCreate(&s0);
cudaSetDevice(1);
cudaStreamCreate(&s1);
s0s.push_back(s0);
s1s.push_back(s1);
}
cudaSetDevice(0);
int access = 0;
cudaDeviceCanAccessPeer(&access,0,1);
cout << access << endl;
cudaDeviceCanAccessPeer(&access,1,0);
cout << access << endl;
for(int epoch = 1; epoch < 200; epoch++)
{
if(lSync0.size() > 0)
{
for(int i = 0; i < packages; i++)
{
cudaFree(lSync0[i]->data);
cudaFree(lData0[i]->data);
cudaFree(lSync1[i]->data);
cudaFree(lData1[i]->data);
}
lSync0.clear();
lData0.clear();
lSync1.clear();
lData1.clear();
}
for(int i = 0; i < packages; i++)
{
cudaSetDevice(0);
lSync0.push_back(zeros(128*epoch,128*epoch));
lData0.push_back(gpu->rand(128*epoch,128*epoch));
cudaSetDevice(1);
lSync1.push_back(zeros(128*epoch,128*epoch));
lData1.push_back(gpu->rand(128*epoch,128*epoch));
}
cudaSetDevice(0);
gpu->tick();
for(int j = 0; j < packages; j++)
{
add(lSync0[j],lData1[j],lSync0[j]);
cudaSetDevice(1);
add(lSync1[j],lData0[j],lSync1[j]);
cudaSetDevice(0);
}
cudaDeviceSynchronize();
cudaSetDevice(1);
cudaDeviceSynchronize();
cudaSetDevice(0);
time = gpu->tock();
/*
for(int i = 0; i < packages; i++)
assert(sum(lData0[i]) == sum(lSync1[i]));
for(int i = 0; i < packages; i++)
assert(sum(lData1[i]) == sum(lSync0[i]));
*/
printdim(lSync0[0]);
cout << 1000*2*packages*lData0[0]->bytes/1024./1024./1024./time << " GB/s" << endl;
}
}
int main(int argc, char *argv[])
{
//bandwidth_test_peer();
//bandwidth_test_MPI(argc,argv);
//bandwidth_test_kernel();
//ClusterNet *gpu = new ClusterNet(234);
/*
Matrix *rdm = gpu->rand_numbers(10,10);
printmat(rdm);
*/
/*
ClusterNet *gpu = new ClusterNet(234);
int out_rows = 128;
int out_cols = 800;
int inner = 784;
Matrix *A = gpu->rand(out_rows,inner);
Matrix *B = gpu->rand(inner,out_cols);
Matrix *out1 = zeros(out_rows,out_cols);
Matrix *charA = empty_char(out_rows,inner);
Matrix *charB = empty_char(inner,out_cols);
Matrix *out2 = empty(out_rows,out_cols);
Matrix *out3 = empty(out_rows,out_cols);
gpu->tick();
for(int i = 0; i < 100; i++)
gpu->dot(A,B,out3);
gpu->tock();
float maxA = max(abs(A));
float maxB = max(abs(B));
gpu->compression_8bit(A,maxA,charA);
gpu->compression_8bit(B,maxB,charB);
//printmat(A);
//printmat(gpu->decompression_8bit(charA,maxA));
//printmat(B);
//printmat(gpu->decompression_8bit(charB,maxB));
//cout << sum(gpuSqrt(square(sub(B,gpu->decompression_8bit(charB,maxB)))))/(float)B->size << endl;
//cout << sum(gpuSqrt(square(sub(A,gpu->decompression_8bit(charA,maxA)))))/(float)B->size << endl;
//gpu->compression_8bit(A,maxA,charA);
//printmat(out1);
//printmat(out1,60,65,70,80);
gpu->tick();
for(int i = 0; i < 100; i++)
{
fill_matrix(out1,0.0f);
gpu->dot8bit(charA,charB,maxA,maxB,out1);
}
gpu->tock();
gpu->tick();
for(int i = 0; i < 100; i++)
gpu->dot8bit_shared(charA,charB,maxA,maxB,out2);
gpu->tock();
//printmat(gpu->decompression_8bit(charB,maxB));
//printmat(out1,60,65,70,80);
//printmat(out2,60,65,70,80);
//printmat(out1);
//printmat(out2);
//printsum(out1);
//printsum(out2);
cout << sum(gpuSqrt(square(sub(out1,out2))))/(float)out1->size << endl;
cout << sum(gpuSqrt(square(sub(out1,out3))))/(float)out1->size << endl;
cout << sum(gpuSqrt(square(sub(out2,out3))))/(float)out1->size << endl;
//cout << "max A " << maxA <<endl;
//cout << "max B " << maxB <<endl;
*/
ClusterNet *gpu = new ClusterNet(argc,argv,123635,true);
//Matrix *X = read_hdf5("/home/tim/data/mnist/X.hdf5");
//Matrix *y = read_hdf5("/home/tim/data/mnist/y.hdf5");
Matrix *X = gpu->distribute_rows_hdf5_file("/home/tim/data/mnist/X.hdf5");
Matrix *y = gpu->distribute_rows_hdf5_file("/home/tim/data/mnist/y.hdf5");
BatchAllocator b = BatchAllocator();
b.init(X,y,(1.0-0.85715),128,128,*gpu, Single_GPU);
Layer *l0 = new Layer(X->cols,128,Input,gpu);
//l0->PARALLELISM = DataParallelism;
Layer *l1 = new Layer(2048, Logistic, l0);
//l1->PARALLELISM = DataParallelism;
Layer *l2 = new Layer(2048, Logistic, l1);
//l2->PARALLELISM = DataParallelism;
Layer *l3 = new Layer(10, Softmax, l2);
//l3->PARALLELISM = DataParallelism;
l0->DROPOUT = 0.2f;
l0->set_hidden_dropout(0.5f);
cout << gpu->MYRANK << endl;
float decay = 0.99f;
gpu->tick();
gpu->tick("pass");
for(int epoch = 0; epoch < 100; epoch++)
{
cout << "EPOCH: " << epoch + 1 << endl;
b.propagate_through_layers(l0,Training);
b.propagate_through_layers(l0,Trainerror);
b.propagate_through_layers(l0,CVerror);
l0->learning_rate_decay(decay);
if(epoch == 75)
{
l0->dropout_decay();
decay = 0.85f;
}
}
gpu->tock("pass");
gpu->tock();
gpu->shutdown_MPI();
/*
cudaSetDevice(0);
Matrix *X = read_hdf5("/home/tim/data/mnist/X.hdf5");
Matrix *y = read_hdf5("/home/tim/data/mnist/y.hdf5");
ClusterNet gpu = ClusterNet(1235);
BatchAllocator b = BatchAllocator();
std::vector<int> layers;
layers.push_back(1200);
layers.push_back(1200);
std::vector<float> dropout;
dropout.push_back(0.2f);
dropout.push_back(0.5f);
dropout.push_back(0.5f);
BatchAllocator allocator = BatchAllocator();
allocator.init(X,y,(1.0-0.8571429),128,256,gpu, Single_GPU);
DeepNeuralNetwork net = DeepNeuralNetwork(layers,Classification, gpu, allocator, 10);
net.EPOCHS = 500;
net.TRANSITION_EPOCH = 75;
net.LEARNING_RATE = 0.003;
net.UPDATE_TYPE = RMSProp;
net.DROPOUT = dropout;
//net.MAIN_UNIT = Double_Rectified_Linear;
net.train();
*/
//cudaSetDevice(1);
//ClusterNet *gpus = new ClusterNet(123635);
//WikiMaxoutNet_PCIe net = WikiMaxoutNet_PCIe(gpus);
//net.run();
/*
cudaSetDevice(0);
struct arg_struct *args0 = (arg_struct*)malloc(sizeof(arg_struct));
ClusterNet *gpus0 = new ClusterNet(23452345);
WikiMaxoutNet *net0 = new WikiMaxoutNet(gpus0[0]);
args0->gpus = gpus0;
args0->net = net0;
args0->device = 0;
net0->run();
pthread_t t0;
pthread_create(&t0, NULL, &run_net, args0);
cudaSetDevice(1);
struct arg_struct *args1 = (arg_struct*)malloc(sizeof(arg_struct));
ClusterNet *gpus1 = new ClusterNet(23452345);
WikiMaxoutNet *net1 = new WikiMaxoutNet(gpus1[0]);
args1->gpus = gpus1;
args1->net = net1;
args1->device = 1;
pthread_t t1;
//pthread_create(&t1, NULL, &run_net, args1);
cudaSetDevice(2);
struct arg_struct *args2 = (arg_struct*)malloc(sizeof(arg_struct));
ClusterNet *gpus2 = new ClusterNet(23452345);
WikiMaxoutNet *net2 = new WikiMaxoutNet(gpus2[0]);
args2->gpus = gpus2;
args2->net = net2;
args2->device = 2;
pthread_t t2;
//pthread_create(&t2, NULL, &run_net, args2);
cout << "rolfen kek!" << endl;
void* result0;
void* result1;
void* result2;
pthread_join(t0,&result0);
//pthread_join(t1,&result1);
//pthread_join(t2,&result2);
*/
}
|
bfcb8d46f3ecb6fb2a12b6a7e0b2a75dc4d9340b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "query_cross_entropy.cuh"
#include <hip/hip_cooperative_groups.h>
#include <catboost/cuda/cuda_lib/kernel/kernel.cuh>
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <catboost/cuda/cuda_util/kernel/fill.cuh>
#include <contrib/libs/cub/cub/util_ptx.cuh>
#include <cassert>
#include <cstdio>
using namespace cooperative_groups;
namespace NKernel {
//TODO(noxoomo): multiple docs per thread to reduce sync overhead
template<int BlockSize, bool IsSingleClassBlock>
__forceinline__ __device__ void QueryCrossEntropySingleBlockImpl(const float alpha,
const float* targets,
const float* weights,
const float* values,
const int offset,
const int size,
const int* qids,
const ui32* qOffsets,
const bool* isSingleClassFlags,
float* functionValue,
float* ders,
float* ders2llp,
float* ders2llmax,
float* groupDers2) {
__shared__ float sharedDer[BlockSize];
__shared__ float sharedDer2[BlockSize];
isSingleClassFlags += offset;
qids += offset;
if (ders) {
ders += offset;
}
if (ders2llp) {
ders2llp += offset;
}
if (ders2llmax) {
ders2llmax += offset;
}
const float MAX_SHIFT = 20;
const int tid = threadIdx.x;
const int loadIdx = tid < size ? offset + tid : 0;
const bool isSingleClass = tid < size ? isSingleClassFlags[tid] : true;
const int tidQid = tid < size ? Ldg(qids + tid) : -1;
const ui32 queryOffset = tid < size ? Ldg(qOffsets + tidQid) : 0;
const int querySize = tid < size ? Ldg(qOffsets + tidQid + 1) - queryOffset : 0;
const int localIdx = tid < size ? offset + tid - queryOffset : 0;
const float clazz = tid < size ? Ldg(targets + loadIdx) : 0;
const float cursor = tid < size ? Ldg(values + loadIdx) : 0;
const float w = tid < size ? Ldg(weights + loadIdx) : 0;
float left = -MAX_SHIFT;
float right = MAX_SHIFT;
float bestShift = (left + right) / 2;
int reduceSize = 0;
if (!IsSingleClassBlock) {
{
sharedDer[tid] = querySize;
__syncthreads();
for (int s = BlockSize >> 1; s > 0; s >>= 1) {
if (tid < s) {
sharedDer[tid] = max(sharedDer[tid], sharedDer[tid + s]);
}
__syncthreads();
}
reduceSize = (1 << int(ceil(log2(sharedDer[0])) - 1));
__syncthreads();
}
float midDer = 0;
#pragma unroll
for (int i = 0; i < 8; ++i) {
const float tmp = __expf(cursor + bestShift);
const float p = ClipProb((isfinite(1.0f + tmp) ? (tmp / (1.0f + tmp)) : 1.0f));
sharedDer[tid] = w * (clazz - p);
__syncthreads();
for (int s = reduceSize; s > 0; s >>= 1) {
if ((localIdx < s) && ((localIdx + s) < querySize)) {
sharedDer[tid] += sharedDer[tid + s];
}
__syncthreads();
}
midDer = sharedDer[tid - localIdx];
if (midDer > 0) {
left = bestShift;
} else {
right = bestShift;
}
bestShift = (left + right) / 2;
__syncthreads();
}
#pragma unroll
for (int i = 0; i < 5; ++i) {
const float tmp = __expf(cursor + bestShift);
const float p = ClipProb(isfinite(1.0f + tmp) ? (tmp / (1.0f + tmp)) : 1.0f);
__syncthreads();
sharedDer[tid] = w * (clazz - p);
sharedDer2[tid] = w * (1.0f - p) * p;
__syncthreads();
for (int s = reduceSize; s > 0; s >>= 1) {
if ((localIdx < s) && ((localIdx + s) < querySize)) {
sharedDer[tid] += sharedDer[tid + s];
sharedDer2[tid] += sharedDer2[tid + s];
}
__syncthreads();
}
float currentDer = sharedDer[tid - localIdx];
if (currentDer > 0) {
left = bestShift;
} else {
right = bestShift;
}
bestShift += currentDer / (sharedDer2[tid - localIdx] + 1e-9f);
if (bestShift > right) {
bestShift = 0.1f * left + 0.9f * right;
}
if (bestShift < left) {
bestShift = 0.9f * left + 0.1f * right;
}
__syncthreads();
}
}
const float shiftedApprox = cursor + bestShift;
const float expVal = __expf(cursor);
const float expShiftedVal = __expf(shiftedApprox);
if (functionValue) {
const float logExpValPlusOne = isfinite(expVal) ? __logf(1.0f + expVal) : cursor;
const float llp = (tid < size) ? (clazz * cursor - logExpValPlusOne) : 0;
const float logExpValPlusOneShifted = isfinite(expShiftedVal) ? __logf(1.0f + expShiftedVal) : shiftedApprox;
const float llmax = (tid < size) ? (clazz * shiftedApprox - logExpValPlusOneShifted) : 0;
const float docScore = (1.0f - alpha) * llp + (isSingleClass ? 0 : alpha * llmax);
sharedDer[tid] = w * docScore;
__syncthreads();
float blockScore = FastInBlockReduce(tid, sharedDer, BlockSize);
if (tid == 0) {
atomicAdd(functionValue, blockScore);
}
}
const float prob = ClipProb(isfinite(expVal + 1.0f) ? expVal / (1.0f + expVal) : 1.0f);
const float shiftedProb = ClipProb(isfinite(expShiftedVal + 1.0f) ? expShiftedVal / (1.0f + expShiftedVal) : 1.0f);
if (ders && (tid < size)) {
const float derllp = clazz - prob;
const float derllmax = isSingleClass ? 0 : clazz - shiftedProb;
ders[tid] = w * ((1.0f - alpha) * derllp + alpha * derllmax);
}
if (ders2llp && (tid < size)) {
ders2llp[tid] = w * (1.0f - alpha) * prob * (1.0f - prob);
}
float der2llmax = isSingleClass ? 0 : w * alpha * shiftedProb * (1.0f - shiftedProb);
if (ders2llmax && (tid < size)) {
ders2llmax[tid] = der2llmax;
}
if (groupDers2) {
float groupDer2 = 0;
if (!IsSingleClassBlock) {
__syncthreads();
sharedDer2[tid] = der2llmax;
__syncthreads();
for (int s = reduceSize; s > 0; s >>= 1) {
if ((localIdx < s) && ((localIdx + s) < querySize)) {
sharedDer2[tid] += sharedDer2[tid + s];
}
__syncthreads();
}
if (localIdx == 0 && tid < size) {
groupDer2 = sharedDer2[tid - localIdx];
}
}
if (localIdx == 0 && tid < size) {
groupDers2[tidQid] = groupDer2;
}
}
}
template<int BlockSize>
__global__ void QueryCrossEntropyImpl(volatile int* qidCursor,
const int qCount,
const float alpha,
const float* targets,
const float* weights,
const float* values,
const int* qids,
const bool* isSingleClassQueries,
const ui32* qOffsets,
const int size,
float* functionValue,
float* ders,
float* ders2llp,
float* ders2llmax,
float* groupDers2) {
while (true) {
int taskQid = 0;
int offset = 0;
int nextTaskOffset = 0;
{
__shared__ int sharedTaskQid;
__shared__ int sharedTaskOffset;
__shared__ int sharedNextTaskOffset;
if (threadIdx.x == 0) {
taskQid = qidCursor[0];
while (true) {
if (taskQid >= qCount) {
break;
}
offset = qOffsets[taskQid];
nextTaskOffset = min(offset + BlockSize, size);
int nextTaskQid = nextTaskOffset < size ? qids[nextTaskOffset] : qCount;
int oldQid = atomicCAS(const_cast<int*>(qidCursor), taskQid, nextTaskQid);
if (oldQid == taskQid) {
nextTaskOffset = qOffsets[nextTaskQid];
break;
} else {
taskQid = oldQid;
}
}
}
if (threadIdx.x == 0) {
sharedTaskQid = taskQid;
sharedTaskOffset = offset;
sharedNextTaskOffset = nextTaskOffset;
}
__syncthreads();
taskQid = sharedTaskQid;
offset = sharedTaskOffset;
nextTaskOffset = sharedNextTaskOffset;
__syncthreads();
}
if (taskQid >= qCount) {
return;
}
const int blockSize = nextTaskOffset - offset;
//we assume, that docs are sorted by isSingleClass mask
//otherwise will be slower for adv-pools
//first part - queries with pairs
//second part - all other queries
bool isSingleClassBlock = threadIdx.x < blockSize ? Ldg(isSingleClassQueries + offset + threadIdx.x) : true;
{
__shared__ float sharedFlags[BlockSize];
sharedFlags[threadIdx.x] = isSingleClassBlock ? 1.0f : 0.0f;
using TOp = TCudaMultiply<float>;
float tmp = FastInBlockReduce<float, TOp>(threadIdx.x, sharedFlags, BlockSize);
if (threadIdx.x == 0) {
sharedFlags[0] = tmp;
}
__syncthreads();
isSingleClassBlock = sharedFlags[0] > 0;
__syncthreads();
}
#define COMPUTE_SINGLE_GROUP(IsSingleClassQuery) \
QueryCrossEntropySingleBlockImpl<BlockSize, IsSingleClassQuery>(alpha, \
targets, weights, values,\
offset, blockSize,\
qids, qOffsets,\
isSingleClassQueries,\
functionValue,\
ders,\
ders2llp,\
ders2llmax,\
groupDers2);
if (isSingleClassBlock) {
COMPUTE_SINGLE_GROUP(true);
} else {
COMPUTE_SINGLE_GROUP(false);
}
__syncthreads();
}
}
void QueryCrossEntropy(int* qidCursor, const int qCount,
const float alpha,
const float* targets,
const float* weights,
const float* values,
const ui32* qids,
const bool* isSingleClassQueries,
const ui32* qOffsets,
const int docCount,
float* functionValue,
float* ders,
float* ders2llp,
float* ders2llmax,
float* groupDers2,
TCudaStream stream)
{
const ui32 maxBlocksPerSm = 4;
const ui32 smCount = TArchProps::SMCount();
const int blockSize = 256;
FillBuffer(qidCursor, 0, 1, stream);
if (functionValue) {
FillBuffer(functionValue, 0.0f, 1, stream);
}
hipLaunchKernelGGL(( QueryCrossEntropyImpl<blockSize>) , dim3(maxBlocksPerSm * smCount), dim3(blockSize), 0, stream, qidCursor, qCount, alpha,
targets, weights, values,
(int*)qids, isSingleClassQueries, qOffsets,
docCount,
functionValue,
ders, ders2llp, ders2llmax, groupDers2);
}
__global__ void ComputeQueryLogitMatrixSizesImpl(const ui32* queryOffsets,
const bool* isSingleClassQuery,
ui32 qCount,
ui32* matrixSizes) {
const ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
const bool isSingleClassFlag = i < qCount ? Ldg(isSingleClassQuery + queryOffsets[i]) : true;
const ui32 qSize = (i < qCount && !isSingleClassFlag) ? queryOffsets[i + 1] - queryOffsets[i] : 0;
if (i <= qCount) {
matrixSizes[i] = qSize * (qSize - 1) / 2;
}
}
void ComputeQueryLogitMatrixSizes(const ui32* queryOffsets,
const bool* isSingleClassQuery,
ui32 qCount,
ui32* matrixSize,
TCudaStream stream) {
const ui32 blockSize = 256;
//matrix count is qCount + 1 (for last index)
const ui32 numBlocks = (qCount + blockSize) / blockSize;
hipLaunchKernelGGL(( ComputeQueryLogitMatrixSizesImpl), dim3(numBlocks), dim3(blockSize), 0, stream, queryOffsets, isSingleClassQuery, qCount, matrixSize);
}
template<int BlockSize, int ThreadsPerQuery>
__global__ void MakePairsQueryLogitImpl(const ui32* queryOffsets,
const ui32* matrixOffsets,
const bool* isSingleClassQuery,
ui32 queryCount,
uint2* pairs) {
const int queriesPerBlock = BlockSize / ThreadsPerQuery;
const int localQid = threadIdx.x / ThreadsPerQuery;
const int qid = blockIdx.x * queriesPerBlock + localQid;
ui32 queryOffset = qid < queryCount ? queryOffsets[qid] : 0;
const bool singleClassFlag = qid < queryCount ? isSingleClassQuery[queryOffset] : true;
ui32 querySize = (qid < queryCount && !singleClassFlag) ? queryOffsets[qid + 1] - queryOffset : 0;
ui32 matrixOffset = qid < queryCount ? matrixOffsets[qid] : 0;
const int x = threadIdx.x & (ThreadsPerQuery - 1);
const ui32 matrixSize = querySize * (querySize - 1) / 2;
pairs += matrixOffset;
for (int i = x; i < matrixSize; i += ThreadsPerQuery) {
uint2 pair = GetPair(i);
pair.x += queryOffset;
pair.y += queryOffset;
pairs[i] = pair;
}
}
void MakeQueryLogitPairs(const ui32* qOffsets,
const ui32* matrixOffset,
const bool* isSingleFlags,
double meanQuerySize,
ui32 qCount,
uint2* pairs,
TCudaStream stream) {
const int blockSize = 128;
#define MAKE_PAIRS(threadsPerQuery) \
const int numBlocks = (qCount * threadsPerQuery + blockSize - 1) / blockSize; \
if (numBlocks > 0) { \
hipLaunchKernelGGL(( MakePairsQueryLogitImpl<blockSize, threadsPerQuery>) , dim3(numBlocks), dim3(blockSize), 0, stream , qOffsets, matrixOffset, isSingleFlags, qCount, pairs); \
}
if (meanQuerySize < 4) {
MAKE_PAIRS(4)
} else if (meanQuerySize < 8) {
MAKE_PAIRS(8)
} else if (meanQuerySize < 16) {
MAKE_PAIRS(16)
} else {
MAKE_PAIRS(32)
}
#undef MAKE_PAIRS
}
template<int BlockSize, int ThreadsPerQuery>
__global__ void MakeIsSingleClassFlagsImpl(const int* queryOffsets, int queryCount,
const ui32* loadIndices, const float* targets,
bool* isSingleClassQuery) {
int bias = queryCount ? Ldg(queryOffsets) : 0;
auto workingTile = tiled_partition<ThreadsPerQuery>(this_thread_block());
const int queriesPerBlock = BlockSize / ThreadsPerQuery;
const int localQid = threadIdx.x / ThreadsPerQuery;
const int qid = blockIdx.x * queriesPerBlock + localQid;
__shared__ ui32 results[BlockSize];
const int queryOffset = (qid < queryCount) ? (queryOffsets[qid] - bias) : 0;
const int querySize = (qid < queryCount) ? (queryOffsets[qid + 1] - bias - queryOffset) : 0;
const ui32 firstIdx = qid < queryCount ? loadIndices[queryOffset] : 0;
float firstTarget = Ldg(targets + firstIdx);
int isSingleClass = 1;
for (int i = workingTile.thread_rank(); i < querySize; i += ThreadsPerQuery) {
const ui32 loadIdx = loadIndices[queryOffset + i];
float docTarget = Ldg(targets + loadIdx);
if (abs(firstTarget - docTarget) > 1e-5f) {
isSingleClass = 0;
}
}
using TOp = TCudaMultiply<int>;
isSingleClass = TileReduce<int, ThreadsPerQuery, TOp>(workingTile, isSingleClass);
if (workingTile.thread_rank() == 0) {
results[localQid] = isSingleClass;
workingTile.sync();
}
isSingleClass = results[localQid];
for (int i = workingTile.thread_rank(); i < querySize; i += ThreadsPerQuery) {
isSingleClassQuery[queryOffset + i] = isSingleClass == 1;
}
}
void MakeIsSingleClassFlags(const float* targets, const ui32* loadIndices,
const ui32* queryOffsets,
ui32 queryCount,
double meanQuerySize,
bool* isSingleClassQuery,
TCudaStream stream) {
const int blockSize = 128;
#define RUN_KERNEL(threadsPerQuery) \
const int numBlocks = (queryCount * threadsPerQuery + blockSize - 1) / blockSize; \
if (numBlocks > 0) { \
hipLaunchKernelGGL(( MakeIsSingleClassFlagsImpl<blockSize, threadsPerQuery>) , dim3(numBlocks), dim3(blockSize), 0, stream , (int*)queryOffsets, queryCount, loadIndices, targets, isSingleClassQuery); \
}
if (meanQuerySize < 2) {
RUN_KERNEL(2)
} else if (meanQuerySize < 4) {
RUN_KERNEL(4)
} else if (meanQuerySize < 8) {
RUN_KERNEL(8)
} else if (meanQuerySize < 16) {
RUN_KERNEL(16)
} else {
RUN_KERNEL(32)
}
#undef RUN_KERNEL
}
//for stochastic gradient
__global__ void FillPairDer2AndRemapPairDocumentsImpl(const float* ders2,
const float* groupDers2,
const ui32* docIds,
const ui32* qids,
ui32 pairCount,
float* pairDer2,
uint2* pairs) {
const int tid = threadIdx.x;
const int i = blockIdx.x * blockDim.x + tid;
if (i < pairCount) {
uint2 pair = Ldg(pairs + i);
const float der2x = Ldg(ders2 + pair.x);
const float der2y = Ldg(ders2 + pair.y);
const int qid = Ldg(qids + pair.x);
const float groupDer2 = Ldg(groupDers2 + qid);
pair.x = Ldg(docIds + pair.x);
pair.y = Ldg(docIds + pair.y);
pairDer2[i] = groupDer2 > 1e-20f ? der2x * der2y / (groupDer2 + 1e-20f) : 0;
pairs[i] = pair;
}
}
void FillPairDer2AndRemapPairDocuments(const float* ders2,
const float* groupDers2,
const ui32* docIds,
const ui32* qids,
ui32 pairCount,
float* pairDer2,
uint2* pairs,
TCudaStream stream
) {
const int blockSize = 256;
const int numBlocks = (pairCount + blockSize - 1) / blockSize;
if (numBlocks > 0) {
hipLaunchKernelGGL(( FillPairDer2AndRemapPairDocumentsImpl), dim3(numBlocks), dim3(blockSize),0, stream , ders2, groupDers2, docIds, qids, pairCount, pairDer2, pairs);
}
}
}
| bfcb8d46f3ecb6fb2a12b6a7e0b2a75dc4d9340b.cu | #include "query_cross_entropy.cuh"
#include <cooperative_groups.h>
#include <catboost/cuda/cuda_lib/kernel/kernel.cuh>
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <catboost/cuda/cuda_util/kernel/fill.cuh>
#include <contrib/libs/cub/cub/util_ptx.cuh>
#include <cassert>
#include <cstdio>
using namespace cooperative_groups;
namespace NKernel {
//TODO(noxoomo): multiple docs per thread to reduce sync overhead
template<int BlockSize, bool IsSingleClassBlock>
__forceinline__ __device__ void QueryCrossEntropySingleBlockImpl(const float alpha,
const float* targets,
const float* weights,
const float* values,
const int offset,
const int size,
const int* qids,
const ui32* qOffsets,
const bool* isSingleClassFlags,
float* functionValue,
float* ders,
float* ders2llp,
float* ders2llmax,
float* groupDers2) {
__shared__ float sharedDer[BlockSize];
__shared__ float sharedDer2[BlockSize];
isSingleClassFlags += offset;
qids += offset;
if (ders) {
ders += offset;
}
if (ders2llp) {
ders2llp += offset;
}
if (ders2llmax) {
ders2llmax += offset;
}
const float MAX_SHIFT = 20;
const int tid = threadIdx.x;
const int loadIdx = tid < size ? offset + tid : 0;
const bool isSingleClass = tid < size ? isSingleClassFlags[tid] : true;
const int tidQid = tid < size ? Ldg(qids + tid) : -1;
const ui32 queryOffset = tid < size ? Ldg(qOffsets + tidQid) : 0;
const int querySize = tid < size ? Ldg(qOffsets + tidQid + 1) - queryOffset : 0;
const int localIdx = tid < size ? offset + tid - queryOffset : 0;
const float clazz = tid < size ? Ldg(targets + loadIdx) : 0;
const float cursor = tid < size ? Ldg(values + loadIdx) : 0;
const float w = tid < size ? Ldg(weights + loadIdx) : 0;
float left = -MAX_SHIFT;
float right = MAX_SHIFT;
float bestShift = (left + right) / 2;
int reduceSize = 0;
if (!IsSingleClassBlock) {
{
sharedDer[tid] = querySize;
__syncthreads();
for (int s = BlockSize >> 1; s > 0; s >>= 1) {
if (tid < s) {
sharedDer[tid] = max(sharedDer[tid], sharedDer[tid + s]);
}
__syncthreads();
}
reduceSize = (1 << int(ceil(log2(sharedDer[0])) - 1));
__syncthreads();
}
float midDer = 0;
#pragma unroll
for (int i = 0; i < 8; ++i) {
const float tmp = __expf(cursor + bestShift);
const float p = ClipProb((isfinite(1.0f + tmp) ? (tmp / (1.0f + tmp)) : 1.0f));
sharedDer[tid] = w * (clazz - p);
__syncthreads();
for (int s = reduceSize; s > 0; s >>= 1) {
if ((localIdx < s) && ((localIdx + s) < querySize)) {
sharedDer[tid] += sharedDer[tid + s];
}
__syncthreads();
}
midDer = sharedDer[tid - localIdx];
if (midDer > 0) {
left = bestShift;
} else {
right = bestShift;
}
bestShift = (left + right) / 2;
__syncthreads();
}
#pragma unroll
for (int i = 0; i < 5; ++i) {
const float tmp = __expf(cursor + bestShift);
const float p = ClipProb(isfinite(1.0f + tmp) ? (tmp / (1.0f + tmp)) : 1.0f);
__syncthreads();
sharedDer[tid] = w * (clazz - p);
sharedDer2[tid] = w * (1.0f - p) * p;
__syncthreads();
for (int s = reduceSize; s > 0; s >>= 1) {
if ((localIdx < s) && ((localIdx + s) < querySize)) {
sharedDer[tid] += sharedDer[tid + s];
sharedDer2[tid] += sharedDer2[tid + s];
}
__syncthreads();
}
float currentDer = sharedDer[tid - localIdx];
if (currentDer > 0) {
left = bestShift;
} else {
right = bestShift;
}
bestShift += currentDer / (sharedDer2[tid - localIdx] + 1e-9f);
if (bestShift > right) {
bestShift = 0.1f * left + 0.9f * right;
}
if (bestShift < left) {
bestShift = 0.9f * left + 0.1f * right;
}
__syncthreads();
}
}
const float shiftedApprox = cursor + bestShift;
const float expVal = __expf(cursor);
const float expShiftedVal = __expf(shiftedApprox);
if (functionValue) {
const float logExpValPlusOne = isfinite(expVal) ? __logf(1.0f + expVal) : cursor;
const float llp = (tid < size) ? (clazz * cursor - logExpValPlusOne) : 0;
const float logExpValPlusOneShifted = isfinite(expShiftedVal) ? __logf(1.0f + expShiftedVal) : shiftedApprox;
const float llmax = (tid < size) ? (clazz * shiftedApprox - logExpValPlusOneShifted) : 0;
const float docScore = (1.0f - alpha) * llp + (isSingleClass ? 0 : alpha * llmax);
sharedDer[tid] = w * docScore;
__syncthreads();
float blockScore = FastInBlockReduce(tid, sharedDer, BlockSize);
if (tid == 0) {
atomicAdd(functionValue, blockScore);
}
}
const float prob = ClipProb(isfinite(expVal + 1.0f) ? expVal / (1.0f + expVal) : 1.0f);
const float shiftedProb = ClipProb(isfinite(expShiftedVal + 1.0f) ? expShiftedVal / (1.0f + expShiftedVal) : 1.0f);
if (ders && (tid < size)) {
const float derllp = clazz - prob;
const float derllmax = isSingleClass ? 0 : clazz - shiftedProb;
ders[tid] = w * ((1.0f - alpha) * derllp + alpha * derllmax);
}
if (ders2llp && (tid < size)) {
ders2llp[tid] = w * (1.0f - alpha) * prob * (1.0f - prob);
}
float der2llmax = isSingleClass ? 0 : w * alpha * shiftedProb * (1.0f - shiftedProb);
if (ders2llmax && (tid < size)) {
ders2llmax[tid] = der2llmax;
}
if (groupDers2) {
float groupDer2 = 0;
if (!IsSingleClassBlock) {
__syncthreads();
sharedDer2[tid] = der2llmax;
__syncthreads();
for (int s = reduceSize; s > 0; s >>= 1) {
if ((localIdx < s) && ((localIdx + s) < querySize)) {
sharedDer2[tid] += sharedDer2[tid + s];
}
__syncthreads();
}
if (localIdx == 0 && tid < size) {
groupDer2 = sharedDer2[tid - localIdx];
}
}
if (localIdx == 0 && tid < size) {
groupDers2[tidQid] = groupDer2;
}
}
}
template<int BlockSize>
__global__ void QueryCrossEntropyImpl(volatile int* qidCursor,
const int qCount,
const float alpha,
const float* targets,
const float* weights,
const float* values,
const int* qids,
const bool* isSingleClassQueries,
const ui32* qOffsets,
const int size,
float* functionValue,
float* ders,
float* ders2llp,
float* ders2llmax,
float* groupDers2) {
while (true) {
int taskQid = 0;
int offset = 0;
int nextTaskOffset = 0;
{
__shared__ int sharedTaskQid;
__shared__ int sharedTaskOffset;
__shared__ int sharedNextTaskOffset;
if (threadIdx.x == 0) {
taskQid = qidCursor[0];
while (true) {
if (taskQid >= qCount) {
break;
}
offset = qOffsets[taskQid];
nextTaskOffset = min(offset + BlockSize, size);
int nextTaskQid = nextTaskOffset < size ? qids[nextTaskOffset] : qCount;
int oldQid = atomicCAS(const_cast<int*>(qidCursor), taskQid, nextTaskQid);
if (oldQid == taskQid) {
nextTaskOffset = qOffsets[nextTaskQid];
break;
} else {
taskQid = oldQid;
}
}
}
if (threadIdx.x == 0) {
sharedTaskQid = taskQid;
sharedTaskOffset = offset;
sharedNextTaskOffset = nextTaskOffset;
}
__syncthreads();
taskQid = sharedTaskQid;
offset = sharedTaskOffset;
nextTaskOffset = sharedNextTaskOffset;
__syncthreads();
}
if (taskQid >= qCount) {
return;
}
const int blockSize = nextTaskOffset - offset;
//we assume, that docs are sorted by isSingleClass mask
//otherwise will be slower for adv-pools
//first part - queries with pairs
//second part - all other queries
bool isSingleClassBlock = threadIdx.x < blockSize ? Ldg(isSingleClassQueries + offset + threadIdx.x) : true;
{
__shared__ float sharedFlags[BlockSize];
sharedFlags[threadIdx.x] = isSingleClassBlock ? 1.0f : 0.0f;
using TOp = TCudaMultiply<float>;
float tmp = FastInBlockReduce<float, TOp>(threadIdx.x, sharedFlags, BlockSize);
if (threadIdx.x == 0) {
sharedFlags[0] = tmp;
}
__syncthreads();
isSingleClassBlock = sharedFlags[0] > 0;
__syncthreads();
}
#define COMPUTE_SINGLE_GROUP(IsSingleClassQuery) \
QueryCrossEntropySingleBlockImpl<BlockSize, IsSingleClassQuery>(alpha, \
targets, weights, values,\
offset, blockSize,\
qids, qOffsets,\
isSingleClassQueries,\
functionValue,\
ders,\
ders2llp,\
ders2llmax,\
groupDers2);
if (isSingleClassBlock) {
COMPUTE_SINGLE_GROUP(true);
} else {
COMPUTE_SINGLE_GROUP(false);
}
__syncthreads();
}
}
void QueryCrossEntropy(int* qidCursor, const int qCount,
const float alpha,
const float* targets,
const float* weights,
const float* values,
const ui32* qids,
const bool* isSingleClassQueries,
const ui32* qOffsets,
const int docCount,
float* functionValue,
float* ders,
float* ders2llp,
float* ders2llmax,
float* groupDers2,
TCudaStream stream)
{
const ui32 maxBlocksPerSm = 4;
const ui32 smCount = TArchProps::SMCount();
const int blockSize = 256;
FillBuffer(qidCursor, 0, 1, stream);
if (functionValue) {
FillBuffer(functionValue, 0.0f, 1, stream);
}
QueryCrossEntropyImpl<blockSize> <<<maxBlocksPerSm * smCount, blockSize, 0, stream>>>(qidCursor, qCount, alpha,
targets, weights, values,
(int*)qids, isSingleClassQueries, qOffsets,
docCount,
functionValue,
ders, ders2llp, ders2llmax, groupDers2);
}
__global__ void ComputeQueryLogitMatrixSizesImpl(const ui32* queryOffsets,
const bool* isSingleClassQuery,
ui32 qCount,
ui32* matrixSizes) {
const ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
const bool isSingleClassFlag = i < qCount ? Ldg(isSingleClassQuery + queryOffsets[i]) : true;
const ui32 qSize = (i < qCount && !isSingleClassFlag) ? queryOffsets[i + 1] - queryOffsets[i] : 0;
if (i <= qCount) {
matrixSizes[i] = qSize * (qSize - 1) / 2;
}
}
void ComputeQueryLogitMatrixSizes(const ui32* queryOffsets,
const bool* isSingleClassQuery,
ui32 qCount,
ui32* matrixSize,
TCudaStream stream) {
const ui32 blockSize = 256;
//matrix count is qCount + 1 (for last index)
const ui32 numBlocks = (qCount + blockSize) / blockSize;
ComputeQueryLogitMatrixSizesImpl<<<numBlocks, blockSize, 0, stream>>>(queryOffsets, isSingleClassQuery, qCount, matrixSize);
}
template<int BlockSize, int ThreadsPerQuery>
__global__ void MakePairsQueryLogitImpl(const ui32* queryOffsets,
const ui32* matrixOffsets,
const bool* isSingleClassQuery,
ui32 queryCount,
uint2* pairs) {
const int queriesPerBlock = BlockSize / ThreadsPerQuery;
const int localQid = threadIdx.x / ThreadsPerQuery;
const int qid = blockIdx.x * queriesPerBlock + localQid;
ui32 queryOffset = qid < queryCount ? queryOffsets[qid] : 0;
const bool singleClassFlag = qid < queryCount ? isSingleClassQuery[queryOffset] : true;
ui32 querySize = (qid < queryCount && !singleClassFlag) ? queryOffsets[qid + 1] - queryOffset : 0;
ui32 matrixOffset = qid < queryCount ? matrixOffsets[qid] : 0;
const int x = threadIdx.x & (ThreadsPerQuery - 1);
const ui32 matrixSize = querySize * (querySize - 1) / 2;
pairs += matrixOffset;
for (int i = x; i < matrixSize; i += ThreadsPerQuery) {
uint2 pair = GetPair(i);
pair.x += queryOffset;
pair.y += queryOffset;
pairs[i] = pair;
}
}
void MakeQueryLogitPairs(const ui32* qOffsets,
const ui32* matrixOffset,
const bool* isSingleFlags,
double meanQuerySize,
ui32 qCount,
uint2* pairs,
TCudaStream stream) {
const int blockSize = 128;
#define MAKE_PAIRS(threadsPerQuery) \
const int numBlocks = (qCount * threadsPerQuery + blockSize - 1) / blockSize; \
if (numBlocks > 0) { \
MakePairsQueryLogitImpl<blockSize, threadsPerQuery> <<< numBlocks, blockSize, 0, stream >>> (qOffsets, matrixOffset, isSingleFlags, qCount, pairs); \
}
if (meanQuerySize < 4) {
MAKE_PAIRS(4)
} else if (meanQuerySize < 8) {
MAKE_PAIRS(8)
} else if (meanQuerySize < 16) {
MAKE_PAIRS(16)
} else {
MAKE_PAIRS(32)
}
#undef MAKE_PAIRS
}
template<int BlockSize, int ThreadsPerQuery>
__global__ void MakeIsSingleClassFlagsImpl(const int* queryOffsets, int queryCount,
const ui32* loadIndices, const float* targets,
bool* isSingleClassQuery) {
int bias = queryCount ? Ldg(queryOffsets) : 0;
auto workingTile = tiled_partition<ThreadsPerQuery>(this_thread_block());
const int queriesPerBlock = BlockSize / ThreadsPerQuery;
const int localQid = threadIdx.x / ThreadsPerQuery;
const int qid = blockIdx.x * queriesPerBlock + localQid;
__shared__ ui32 results[BlockSize];
const int queryOffset = (qid < queryCount) ? (queryOffsets[qid] - bias) : 0;
const int querySize = (qid < queryCount) ? (queryOffsets[qid + 1] - bias - queryOffset) : 0;
const ui32 firstIdx = qid < queryCount ? loadIndices[queryOffset] : 0;
float firstTarget = Ldg(targets + firstIdx);
int isSingleClass = 1;
for (int i = workingTile.thread_rank(); i < querySize; i += ThreadsPerQuery) {
const ui32 loadIdx = loadIndices[queryOffset + i];
float docTarget = Ldg(targets + loadIdx);
if (abs(firstTarget - docTarget) > 1e-5f) {
isSingleClass = 0;
}
}
using TOp = TCudaMultiply<int>;
isSingleClass = TileReduce<int, ThreadsPerQuery, TOp>(workingTile, isSingleClass);
if (workingTile.thread_rank() == 0) {
results[localQid] = isSingleClass;
workingTile.sync();
}
isSingleClass = results[localQid];
for (int i = workingTile.thread_rank(); i < querySize; i += ThreadsPerQuery) {
isSingleClassQuery[queryOffset + i] = isSingleClass == 1;
}
}
void MakeIsSingleClassFlags(const float* targets, const ui32* loadIndices,
const ui32* queryOffsets,
ui32 queryCount,
double meanQuerySize,
bool* isSingleClassQuery,
TCudaStream stream) {
const int blockSize = 128;
#define RUN_KERNEL(threadsPerQuery) \
const int numBlocks = (queryCount * threadsPerQuery + blockSize - 1) / blockSize; \
if (numBlocks > 0) { \
MakeIsSingleClassFlagsImpl<blockSize, threadsPerQuery> <<< numBlocks, blockSize, 0, stream >>> ((int*)queryOffsets, queryCount, loadIndices, targets, isSingleClassQuery); \
}
if (meanQuerySize < 2) {
RUN_KERNEL(2)
} else if (meanQuerySize < 4) {
RUN_KERNEL(4)
} else if (meanQuerySize < 8) {
RUN_KERNEL(8)
} else if (meanQuerySize < 16) {
RUN_KERNEL(16)
} else {
RUN_KERNEL(32)
}
#undef RUN_KERNEL
}
//for stochastic gradient
__global__ void FillPairDer2AndRemapPairDocumentsImpl(const float* ders2,
const float* groupDers2,
const ui32* docIds,
const ui32* qids,
ui32 pairCount,
float* pairDer2,
uint2* pairs) {
const int tid = threadIdx.x;
const int i = blockIdx.x * blockDim.x + tid;
if (i < pairCount) {
uint2 pair = Ldg(pairs + i);
const float der2x = Ldg(ders2 + pair.x);
const float der2y = Ldg(ders2 + pair.y);
const int qid = Ldg(qids + pair.x);
const float groupDer2 = Ldg(groupDers2 + qid);
pair.x = Ldg(docIds + pair.x);
pair.y = Ldg(docIds + pair.y);
pairDer2[i] = groupDer2 > 1e-20f ? der2x * der2y / (groupDer2 + 1e-20f) : 0;
pairs[i] = pair;
}
}
void FillPairDer2AndRemapPairDocuments(const float* ders2,
const float* groupDers2,
const ui32* docIds,
const ui32* qids,
ui32 pairCount,
float* pairDer2,
uint2* pairs,
TCudaStream stream
) {
const int blockSize = 256;
const int numBlocks = (pairCount + blockSize - 1) / blockSize;
if (numBlocks > 0) {
FillPairDer2AndRemapPairDocumentsImpl<<< numBlocks, blockSize,0, stream >>>(ders2, groupDers2, docIds, qids, pairCount, pairDer2, pairs);
}
}
}
|
b12aa61196597aa58f64d48695833bd5a168f87a.hip | // !!! This is a file automatically generated by hipify!!!
#include <wb.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#define wbCheck(stmt) \
do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \
return -1; \
} \
} while (0)
// Compute C = A * B
__global__ void matrixMultiplyShared(float *A, float *B, float *C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) {
//@@ Insert code to implement tiled matrix multiplication here
//@@ You have to use shared memory to write this kernel
#define TILE_WIDTH 16 //Chapter 4 page 98 from 3rd Edition of book.
__shared__ float Ads[TILE_WIDTH][TILE_WIDTH]; //Chapter 4 page 90-91 from 3rd Edition of book.
__shared__ float Bds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
float Cvalue = 0;
for(int ch = 0; ch < (TILE_WIDTH + numAColumns - 1) / TILE_WIDTH; ch++) {
if (Row < numARows && ch * TILE_WIDTH + tx < numAColumns) {
Ads[ty][tx] = A[Row * numAColumns + ch * TILE_WIDTH + tx];
}
else {
Ads[ty][tx] = 0.0;
}
if (ch * TILE_WIDTH + ty < numBRows && Col < numBColumns) {
Bds[ty][tx] = B[(ch * TILE_WIDTH + ty) * numBColumns + Col];
}
else {
Bds[ty][tx] = 0.0;
}
__syncthreads();
for(int k = 0; k < TILE_WIDTH; k++) {
Cvalue += Ads[ty][k] * Bds[k][tx];
}
__syncthreads();
}
if (Row < numCRows && Col < numCColumns) {
C[((by*blockDim.y + ty)*numCColumns) + (bx*blockDim.x) + tx] = Cvalue;
}
}
int main(int argc, char **argv) {
wbArg_t args;
float *hostA; // The A matrix
float *hostB; // The B matrix
float *hostC; // The output C matrix
float *deviceA;
float *deviceB;
float *deviceC;
int numARows; // number of rows in the matrix A
int numAColumns; // number of columns in the matrix A
int numBRows; // number of rows in the matrix B
int numBColumns; // number of columns in the matrix B
int numCRows; // number of rows in the matrix C (you have to set this)
int numCColumns; // number of columns in the matrix C (you have to set
// this)
hostC = NULL;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostA = (float *)wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns);
hostB = (float *)wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns);
//@@ Set numCRows and numCColumns
numCRows = numARows;
numCColumns = numBColumns;
//@@ Allocate the hostC matrix
wbTime_stop(Generic, "Importing data and creating memory on host");
int allocSizeC = numCRows * numCColumns * sizeof(float);
hostC = (float*)malloc(numCRows * numCColumns * sizeof(float));
wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns);
wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
int allocSizeA = sizeof(float) * numARows * numAColumns;
int allocSizeB = sizeof(float) * numBRows * numBColumns;
hipMalloc((void **)&deviceA, allocSizeA);
hipMalloc((void **)&deviceB, allocSizeB);
hipMalloc((void **)&deviceC, allocSizeC);
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
hipMemcpy(deviceA, hostA, allocSizeA, hipMemcpyHostToDevice);
hipMemcpy(deviceB, hostB, allocSizeB, hipMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
dim3 DimBlock(16, 16, 1);
dim3 DimGrid((numBColumns - 1) / 16 + 1, (numARows - 1) / 16 + 1, 1);
wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel here
hipLaunchKernelGGL(( matrixMultiplyShared) , dim3(DimGrid), dim3(DimBlock) , 0, 0, deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
hipDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
hipMemcpy(hostC, deviceC, allocSizeC, hipMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostC, numCRows, numCColumns);
free(hostA);
free(hostB);
free(hostC);
return 0;
}
| b12aa61196597aa58f64d48695833bd5a168f87a.cu | #include <wb.h>
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define wbCheck(stmt) \
do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \
return -1; \
} \
} while (0)
// Compute C = A * B
__global__ void matrixMultiplyShared(float *A, float *B, float *C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) {
//@@ Insert code to implement tiled matrix multiplication here
//@@ You have to use shared memory to write this kernel
#define TILE_WIDTH 16 //Chapter 4 page 98 from 3rd Edition of book.
__shared__ float Ads[TILE_WIDTH][TILE_WIDTH]; //Chapter 4 page 90-91 from 3rd Edition of book.
__shared__ float Bds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
float Cvalue = 0;
for(int ch = 0; ch < (TILE_WIDTH + numAColumns - 1) / TILE_WIDTH; ch++) {
if (Row < numARows && ch * TILE_WIDTH + tx < numAColumns) {
Ads[ty][tx] = A[Row * numAColumns + ch * TILE_WIDTH + tx];
}
else {
Ads[ty][tx] = 0.0;
}
if (ch * TILE_WIDTH + ty < numBRows && Col < numBColumns) {
Bds[ty][tx] = B[(ch * TILE_WIDTH + ty) * numBColumns + Col];
}
else {
Bds[ty][tx] = 0.0;
}
__syncthreads();
for(int k = 0; k < TILE_WIDTH; k++) {
Cvalue += Ads[ty][k] * Bds[k][tx];
}
__syncthreads();
}
if (Row < numCRows && Col < numCColumns) {
C[((by*blockDim.y + ty)*numCColumns) + (bx*blockDim.x) + tx] = Cvalue;
}
}
int main(int argc, char **argv) {
wbArg_t args;
float *hostA; // The A matrix
float *hostB; // The B matrix
float *hostC; // The output C matrix
float *deviceA;
float *deviceB;
float *deviceC;
int numARows; // number of rows in the matrix A
int numAColumns; // number of columns in the matrix A
int numBRows; // number of rows in the matrix B
int numBColumns; // number of columns in the matrix B
int numCRows; // number of rows in the matrix C (you have to set this)
int numCColumns; // number of columns in the matrix C (you have to set
// this)
hostC = NULL;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostA = (float *)wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns);
hostB = (float *)wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns);
//@@ Set numCRows and numCColumns
numCRows = numARows;
numCColumns = numBColumns;
//@@ Allocate the hostC matrix
wbTime_stop(Generic, "Importing data and creating memory on host");
int allocSizeC = numCRows * numCColumns * sizeof(float);
hostC = (float*)malloc(numCRows * numCColumns * sizeof(float));
wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns);
wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
int allocSizeA = sizeof(float) * numARows * numAColumns;
int allocSizeB = sizeof(float) * numBRows * numBColumns;
cudaMalloc((void **)&deviceA, allocSizeA);
cudaMalloc((void **)&deviceB, allocSizeB);
cudaMalloc((void **)&deviceC, allocSizeC);
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
cudaMemcpy(deviceA, hostA, allocSizeA, cudaMemcpyHostToDevice);
cudaMemcpy(deviceB, hostB, allocSizeB, cudaMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
dim3 DimBlock(16, 16, 1);
dim3 DimGrid((numBColumns - 1) / 16 + 1, (numARows - 1) / 16 + 1, 1);
wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel here
matrixMultiplyShared <<<DimGrid, DimBlock >>>(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
cudaDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
cudaMemcpy(hostC, deviceC, allocSizeC, cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostC, numCRows, numCColumns);
free(hostA);
free(hostB);
free(hostC);
return 0;
}
|
f28eed9d369eae61bbdae73be48d0b0bcc9c30f8.hip | // !!! This is a file automatically generated by hipify!!!
//Calculo de la FFT 1D usando "fftwf_plan_dft_1d".
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <hipfft.h>
#include <cufftw.h>
#define SIGNAL_SIZE 10
int main()
{
int i;
cuFloatComplex *h_xn;
cuFloatComplex *h_Xk;
fftwf_complex *in,*out;
//Se reserva memoria para h_xn en el host
h_xn = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*SIGNAL_SIZE);
//Se reserva memoria para h_Xk en el host
h_Xk = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*SIGNAL_SIZE);
//Se dan valores a x[n]
for(i=0;i<SIGNAL_SIZE;i++)
{
//h_xn[i] = make_cuFloatComplex((float)(rand()%11),(float)(rand()%21));
h_xn[i] = make_cuFloatComplex((float)(i+1),(float)(0.0));
}
//Se imprimen los valores de entrada x[n]
printf("\n---ELEMENTOS DE ENTRADA x[n]---\n\n");
for(i = 0; i<SIGNAL_SIZE;i++)
{
printf(" x[%d] = (%f) + (%f)\n",i,cuCrealf(h_xn[i]),cuCimagf(h_xn[i]));
}
//Se reserva memoria para "in" en el device
hipMalloc((void**)&in,sizeof(hipfftComplex)*SIGNAL_SIZE);
//Se reserva memoria para "out" en el device
hipMalloc((void**)&out,sizeof(hipfftComplex)*SIGNAL_SIZE);
//Se copian los datos de h_xn >>> in
hipMemcpy(in,h_xn,sizeof(cuFloatComplex)*SIGNAL_SIZE,hipMemcpyHostToDevice);
//CUFFT plan
fftw_plan plan;
//hipfftHandle plan;
plan = fftwf_plan_dft_1d(SIGNAL_SIZE,in,out,FFTW_FORWARD,FFTW_ESTIMATE);
//hipfftPlan1d(&plan, SIGNAL_SIZE, HIPFFT_C2C, 1);
//Ejecucion de la fft
fftwf_execute(plan);
//Se copian los datos de out >>> h_Xk
hipMemcpy(h_Xk,out,sizeof(hipfftComplex)*SIGNAL_SIZE,hipMemcpyDeviceToHost);
//Se imprimen los valores de salida X[k]
printf("\n---ELEMENTOS DE SALIDA X[k]---\n\n");
for(i = 0; i<SIGNAL_SIZE;i++)
{
printf(" X[%d] = (%f) + (%f)\n",i,cuCrealf(h_Xk[i]),cuCimagf(h_Xk[i]));
}
//Se destruye el plan
fftwf_destroy_plan(plan);
//Se liberan memorias
free(h_xn);
free(h_Xk);
hipFree(in);
hipFree(out);
}
| f28eed9d369eae61bbdae73be48d0b0bcc9c30f8.cu | //Calculo de la FFT 1D usando "fftwf_plan_dft_1d".
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <cufft.h>
#include <cufftw.h>
#define SIGNAL_SIZE 10
int main()
{
int i;
cuFloatComplex *h_xn;
cuFloatComplex *h_Xk;
fftwf_complex *in,*out;
//Se reserva memoria para h_xn en el host
h_xn = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*SIGNAL_SIZE);
//Se reserva memoria para h_Xk en el host
h_Xk = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*SIGNAL_SIZE);
//Se dan valores a x[n]
for(i=0;i<SIGNAL_SIZE;i++)
{
//h_xn[i] = make_cuFloatComplex((float)(rand()%11),(float)(rand()%21));
h_xn[i] = make_cuFloatComplex((float)(i+1),(float)(0.0));
}
//Se imprimen los valores de entrada x[n]
printf("\n---ELEMENTOS DE ENTRADA x[n]---\n\n");
for(i = 0; i<SIGNAL_SIZE;i++)
{
printf(" x[%d] = (%f) + (%f)\n",i,cuCrealf(h_xn[i]),cuCimagf(h_xn[i]));
}
//Se reserva memoria para "in" en el device
cudaMalloc((void**)&in,sizeof(cufftComplex)*SIGNAL_SIZE);
//Se reserva memoria para "out" en el device
cudaMalloc((void**)&out,sizeof(cufftComplex)*SIGNAL_SIZE);
//Se copian los datos de h_xn >>> in
cudaMemcpy(in,h_xn,sizeof(cuFloatComplex)*SIGNAL_SIZE,cudaMemcpyHostToDevice);
//CUFFT plan
fftw_plan plan;
//cufftHandle plan;
plan = fftwf_plan_dft_1d(SIGNAL_SIZE,in,out,FFTW_FORWARD,FFTW_ESTIMATE);
//cufftPlan1d(&plan, SIGNAL_SIZE, CUFFT_C2C, 1);
//Ejecucion de la fft
fftwf_execute(plan);
//Se copian los datos de out >>> h_Xk
cudaMemcpy(h_Xk,out,sizeof(cufftComplex)*SIGNAL_SIZE,cudaMemcpyDeviceToHost);
//Se imprimen los valores de salida X[k]
printf("\n---ELEMENTOS DE SALIDA X[k]---\n\n");
for(i = 0; i<SIGNAL_SIZE;i++)
{
printf(" X[%d] = (%f) + (%f)\n",i,cuCrealf(h_Xk[i]),cuCimagf(h_Xk[i]));
}
//Se destruye el plan
fftwf_destroy_plan(plan);
//Se liberan memorias
free(h_xn);
free(h_Xk);
cudaFree(in);
cudaFree(out);
}
|
be57ed61aadb4b54c77679b4ac479992a0ae55b5.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "ge_fmax.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int sd = 1;
const int fd = 1;
const REAL *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
const int offset_a = 1;
const int ld_a = 1;
const REAL *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
const int offset_b = 1;
const int ld_b = 1;
REAL *c = NULL;
hipMalloc(&c, XSIZE*YSIZE);
const int offset_c = 1;
const int ld_c = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
ge_fmax), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,fd,a,offset_a,ld_a,b,offset_b,ld_b,c,offset_c,ld_c);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
ge_fmax), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,fd,a,offset_a,ld_a,b,offset_b,ld_b,c,offset_c,ld_c);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
ge_fmax), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,fd,a,offset_a,ld_a,b,offset_b,ld_b,c,offset_c,ld_c);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | be57ed61aadb4b54c77679b4ac479992a0ae55b5.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "ge_fmax.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int sd = 1;
const int fd = 1;
const REAL *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
const int offset_a = 1;
const int ld_a = 1;
const REAL *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
const int offset_b = 1;
const int ld_b = 1;
REAL *c = NULL;
cudaMalloc(&c, XSIZE*YSIZE);
const int offset_c = 1;
const int ld_c = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
ge_fmax<<<gridBlock,threadBlock>>>(sd,fd,a,offset_a,ld_a,b,offset_b,ld_b,c,offset_c,ld_c);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
ge_fmax<<<gridBlock,threadBlock>>>(sd,fd,a,offset_a,ld_a,b,offset_b,ld_b,c,offset_c,ld_c);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
ge_fmax<<<gridBlock,threadBlock>>>(sd,fd,a,offset_a,ld_a,b,offset_b,ld_b,c,offset_c,ld_c);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
559ae3179351c698c2ef74d749148c33913a88ed.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Accelerating large graph algorithms on the GPU using CUDA
// http://dl.acm.org/citation.cfm?id=1782200
#define DEFER_MAX 16
#define D_BLOCK_QUEUE_SIZE 2048
__global__ void kernel_cuda_frontier_numbers_defer(
int *v_adj_list,
int *v_adj_begin,
int *v_adj_length,
int num_vertices,
int *result,
bool *still_running,
int iteration)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int num_threads = blockDim.x * gridDim.x;
__shared__ int queue_size;
__shared__ int next_queue[D_BLOCK_QUEUE_SIZE];
if (threadIdx.x == 0)
{
queue_size = 0;
}
__syncthreads();
for (int v = 0; v < num_vertices; v += num_threads)
{
int vertex = v + tid;
if (vertex < num_vertices && result[vertex] == iteration)
{
if (v_adj_length[vertex] < DEFER_MAX || queue_size >= D_BLOCK_QUEUE_SIZE - blockDim.x)
{
for (int n = 0; n < v_adj_length[vertex]; n++)
{
int neighbor = v_adj_list[v_adj_begin[vertex] + n];
if (result[neighbor] == MAX_DIST)
{
result[neighbor] = iteration + 1;
*still_running = true;
}
}
}
else
{
// Add to queue (atomicAdd returns original value)
int position = atomicAdd(&queue_size, 1);
next_queue[position] = vertex;
}
}
__syncthreads();
}
// Process outliers
for (int v = 0; v < queue_size; v += blockDim.x)
{
if (v + threadIdx.x < queue_size)
{
int vertex = next_queue[v + threadIdx.x];
for (int n = 0; n < v_adj_length[vertex]; n++)
{
int neighbor = v_adj_list[v_adj_begin[vertex] + n];
if (result[neighbor] == MAX_DIST)
{
result[neighbor] = iteration + 1;
*still_running = true;
}
}
}
}
}
int bfs_cuda_frontier_numbers_defer(
int *v_adj_list,
int *v_adj_begin,
int *v_adj_length,
int num_vertices,
int num_edges,
int start_vertex,
int *result)
{
int *k_v_adj_list;
int *k_v_adj_begin;
int *k_v_adj_length;
int *k_result;
bool *k_still_running;
int kernel_runs = 0;
fill_n(result, num_vertices, MAX_DIST);
result[start_vertex] = 0;
bool *still_running = new bool[1];
hipMalloc(&k_v_adj_list, sizeof(int) * num_edges);
hipMalloc(&k_v_adj_begin, sizeof(int) * num_vertices);
hipMalloc(&k_v_adj_length, sizeof(int) * num_vertices);
hipMalloc(&k_result, sizeof(int) * num_vertices);
hipMalloc(&k_still_running, sizeof(bool) * 1);
hipMemcpy(k_v_adj_list, v_adj_list, sizeof(int) * num_edges, hipMemcpyHostToDevice);
hipMemcpy(k_v_adj_begin, v_adj_begin, sizeof(int) * num_vertices, hipMemcpyHostToDevice);
hipMemcpy(k_v_adj_length, v_adj_length, sizeof(int) * num_vertices, hipMemcpyHostToDevice);
hipMemcpy(k_result, result, sizeof(int) * num_vertices, hipMemcpyHostToDevice);
// --- START MEASURE TIME ---
struct timeval t1, t2;
gettimeofday(&t1, NULL);
do
{
*still_running = false;
hipMemcpy(k_still_running, still_running, sizeof(bool) * 1, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernel_cuda_frontier_numbers_defer), dim3(BLOCKS), dim3(THREADS), 0, 0,
k_v_adj_list,
k_v_adj_begin,
k_v_adj_length,
num_vertices,
k_result,
k_still_running,
kernel_runs);
kernel_runs++;
hipMemcpy(still_running, k_still_running, sizeof(bool) * 1, hipMemcpyDeviceToHost);
} while (*still_running);
hipDeviceSynchronize();
gettimeofday(&t2, NULL);
long long time = get_elapsed_time(&t1, &t2);
if (report_time)
{
printf("%s,%i,%i,%i,%i,%lld\n", __FILE__, num_vertices, num_edges, BLOCKS, THREADS, time);
}
// --- END MEASURE TIME ---
hipMemcpy(result, k_result, sizeof(int) * num_vertices, hipMemcpyDeviceToHost);
hipFree(k_v_adj_list);
hipFree(k_v_adj_begin);
hipFree(k_v_adj_length);
hipFree(k_result);
hipFree(k_still_running);
// printf("%i kernel runs\n", kernel_runs);
return time;
}
| 559ae3179351c698c2ef74d749148c33913a88ed.cu | // Accelerating large graph algorithms on the GPU using CUDA
// http://dl.acm.org/citation.cfm?id=1782200
#define DEFER_MAX 16
#define D_BLOCK_QUEUE_SIZE 2048
__global__ void kernel_cuda_frontier_numbers_defer(
int *v_adj_list,
int *v_adj_begin,
int *v_adj_length,
int num_vertices,
int *result,
bool *still_running,
int iteration)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int num_threads = blockDim.x * gridDim.x;
__shared__ int queue_size;
__shared__ int next_queue[D_BLOCK_QUEUE_SIZE];
if (threadIdx.x == 0)
{
queue_size = 0;
}
__syncthreads();
for (int v = 0; v < num_vertices; v += num_threads)
{
int vertex = v + tid;
if (vertex < num_vertices && result[vertex] == iteration)
{
if (v_adj_length[vertex] < DEFER_MAX || queue_size >= D_BLOCK_QUEUE_SIZE - blockDim.x)
{
for (int n = 0; n < v_adj_length[vertex]; n++)
{
int neighbor = v_adj_list[v_adj_begin[vertex] + n];
if (result[neighbor] == MAX_DIST)
{
result[neighbor] = iteration + 1;
*still_running = true;
}
}
}
else
{
// Add to queue (atomicAdd returns original value)
int position = atomicAdd(&queue_size, 1);
next_queue[position] = vertex;
}
}
__syncthreads();
}
// Process outliers
for (int v = 0; v < queue_size; v += blockDim.x)
{
if (v + threadIdx.x < queue_size)
{
int vertex = next_queue[v + threadIdx.x];
for (int n = 0; n < v_adj_length[vertex]; n++)
{
int neighbor = v_adj_list[v_adj_begin[vertex] + n];
if (result[neighbor] == MAX_DIST)
{
result[neighbor] = iteration + 1;
*still_running = true;
}
}
}
}
}
int bfs_cuda_frontier_numbers_defer(
int *v_adj_list,
int *v_adj_begin,
int *v_adj_length,
int num_vertices,
int num_edges,
int start_vertex,
int *result)
{
int *k_v_adj_list;
int *k_v_adj_begin;
int *k_v_adj_length;
int *k_result;
bool *k_still_running;
int kernel_runs = 0;
fill_n(result, num_vertices, MAX_DIST);
result[start_vertex] = 0;
bool *still_running = new bool[1];
cudaMalloc(&k_v_adj_list, sizeof(int) * num_edges);
cudaMalloc(&k_v_adj_begin, sizeof(int) * num_vertices);
cudaMalloc(&k_v_adj_length, sizeof(int) * num_vertices);
cudaMalloc(&k_result, sizeof(int) * num_vertices);
cudaMalloc(&k_still_running, sizeof(bool) * 1);
cudaMemcpy(k_v_adj_list, v_adj_list, sizeof(int) * num_edges, cudaMemcpyHostToDevice);
cudaMemcpy(k_v_adj_begin, v_adj_begin, sizeof(int) * num_vertices, cudaMemcpyHostToDevice);
cudaMemcpy(k_v_adj_length, v_adj_length, sizeof(int) * num_vertices, cudaMemcpyHostToDevice);
cudaMemcpy(k_result, result, sizeof(int) * num_vertices, cudaMemcpyHostToDevice);
// --- START MEASURE TIME ---
struct timeval t1, t2;
gettimeofday(&t1, NULL);
do
{
*still_running = false;
cudaMemcpy(k_still_running, still_running, sizeof(bool) * 1, cudaMemcpyHostToDevice);
kernel_cuda_frontier_numbers_defer<<<BLOCKS, THREADS>>>(
k_v_adj_list,
k_v_adj_begin,
k_v_adj_length,
num_vertices,
k_result,
k_still_running,
kernel_runs);
kernel_runs++;
cudaMemcpy(still_running, k_still_running, sizeof(bool) * 1, cudaMemcpyDeviceToHost);
} while (*still_running);
cudaThreadSynchronize();
gettimeofday(&t2, NULL);
long long time = get_elapsed_time(&t1, &t2);
if (report_time)
{
printf("%s,%i,%i,%i,%i,%lld\n", __FILE__, num_vertices, num_edges, BLOCKS, THREADS, time);
}
// --- END MEASURE TIME ---
cudaMemcpy(result, k_result, sizeof(int) * num_vertices, cudaMemcpyDeviceToHost);
cudaFree(k_v_adj_list);
cudaFree(k_v_adj_begin);
cudaFree(k_v_adj_length);
cudaFree(k_result);
cudaFree(k_still_running);
// printf("%i kernel runs\n", kernel_runs);
return time;
}
|
afadb5668f60b15eb9cf5669af2838fbdbc30acd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_view.hpp>
#include <cudf/table/table_device_view.cuh>
#include <cudf/table/table_view.hpp>
#include <tests/utilities/base_fixture.hpp>
#include <tests/utilities/column_utilities.hpp>
#include <tests/utilities/column_wrapper.hpp>
#include <tests/utilities/cudf_gtest.hpp>
#include <tests/utilities/table_utilities.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <random>
#include <type_traits>
#include "../fixture/benchmark_fixture.hpp"
#include "../synchronization/synchronization.hpp"
using namespace cudf;
enum DispatchingType { HOST_DISPATCHING, DEVICE_DISPATCHING, NO_DISPATCHING };
enum FunctorType { BANDWIDTH_BOUND, COMPUTE_BOUND };
template <class NotFloat, FunctorType ft, class DisableNotFloat = void>
struct Functor {
static __device__ NotFloat f(NotFloat x) { return x; }
};
template <class Float, FunctorType ft>
struct Functor<Float, ft, typename std::enable_if_t<std::is_floating_point<Float>::value>> {
static __device__ Float f(Float x)
{
if (ft == BANDWIDTH_BOUND) {
return x + static_cast<Float>(1) - static_cast<Float>(1);
} else {
for (int i = 0; i < 1000; i++) {
x = (x * x + static_cast<Float>(1)) - x * x - static_cast<Float>(1);
}
return x;
}
return x;
}
};
constexpr int block_size = 256;
// This is for NO_DISPATCHING
template <FunctorType functor_type, class T>
__global__ void no_dispatching_kernel(T** A, cudf::size_type n_rows, cudf::size_type n_cols)
{
using F = Functor<T, functor_type>;
cudf::size_type index = blockIdx.x * blockDim.x + threadIdx.x;
while (index < n_rows) {
for (int c = 0; c < n_cols; c++) { A[c][index] = F::f(A[c][index]); }
index += blockDim.x * gridDim.x;
}
}
// This is for HOST_DISPATCHING
template <FunctorType functor_type, class T>
__global__ void host_dispatching_kernel(mutable_column_device_view source_column)
{
using F = Functor<T, functor_type>;
T* A = source_column.data<T>();
cudf::size_type index = blockIdx.x * blockDim.x + threadIdx.x;
while (index < source_column.size()) {
A[index] = F::f(A[index]);
index += blockDim.x * gridDim.x;
}
}
template <FunctorType functor_type>
struct ColumnHandle {
template <typename ColumnType>
void operator()(mutable_column_device_view source_column,
int work_per_thread,
hipStream_t stream = 0)
{
cudf::detail::grid_1d grid_config{source_column.size(), block_size};
int grid_size = grid_config.num_blocks;
// Launch the kernel.
hipLaunchKernelGGL(( host_dispatching_kernel<functor_type, ColumnType>)
, dim3(grid_size), dim3(block_size), 0, stream, source_column);
}
};
// The following is for DEVICE_DISPATCHING:
// The dispatching is done on device. The loop loops over
// each row (across different columns). Type is dispatched each time
// a column is visited so the total number of dispatching is
// n_rows * n_cols.
template <FunctorType functor_type>
struct RowHandle {
template <typename T>
__device__ void operator()(mutable_column_device_view source, cudf::size_type index)
{
using F = Functor<T, functor_type>;
source.data<T>()[index] = F::f(source.data<T>()[index]);
}
};
// This is for DEVICE_DISPATCHING
template <FunctorType functor_type>
__global__ void device_dispatching_kernel(mutable_table_device_view source)
{
const cudf::size_type n_rows = source.num_rows();
cudf::size_type index = threadIdx.x + blockIdx.x * blockDim.x;
while (index < n_rows) {
for (cudf::size_type i = 0; i < source.num_columns(); i++) {
cudf::type_dispatcher(
source.column(i).type(), RowHandle<functor_type>{}, source.column(i), index);
}
index += blockDim.x * gridDim.x;
} // while
}
template <FunctorType functor_type, DispatchingType dispatching_type, class T>
void launch_kernel(mutable_table_view input, T** d_ptr, int work_per_thread)
{
const cudf::size_type n_rows = input.num_rows();
const cudf::size_type n_cols = input.num_columns();
cudf::detail::grid_1d grid_config{n_rows, block_size};
int grid_size = grid_config.num_blocks;
if (dispatching_type == HOST_DISPATCHING) {
// std::vector<cudf::util::cuda::scoped_stream> v_stream(n_cols);
for (int c = 0; c < n_cols; c++) {
auto d_column = mutable_column_device_view::create(input.column(c));
cudf::type_dispatcher(
d_column->type(), ColumnHandle<functor_type>{}, *d_column, work_per_thread);
}
} else if (dispatching_type == DEVICE_DISPATCHING) {
auto d_table_view = mutable_table_device_view::create(input);
auto f = device_dispatching_kernel<functor_type>;
// Launch the kernel
hipLaunchKernelGGL(( f), dim3(grid_size), dim3(block_size), 0, 0, *d_table_view);
} else if (dispatching_type == NO_DISPATCHING) {
auto f = no_dispatching_kernel<functor_type, T>;
// Launch the kernel
hipLaunchKernelGGL(( f), dim3(grid_size), dim3(block_size), 0, 0, d_ptr, n_rows, n_cols);
}
}
template <class TypeParam, FunctorType functor_type, DispatchingType dispatching_type>
void type_dispatcher_benchmark(benchmark::State& state)
{
const cudf::size_type source_size = static_cast<cudf::size_type>(state.range(1));
const cudf::size_type n_cols = static_cast<cudf::size_type>(state.range(0));
const cudf::size_type work_per_thread = static_cast<cudf::size_type>(state.range(2));
auto data = cudf::test::make_counting_transform_iterator(0, [](auto i) { return i; });
std::vector<cudf::test::fixed_width_column_wrapper<TypeParam>> source_column_wrappers;
std::vector<cudf::mutable_column_view> source_columns;
for (int i = 0; i < n_cols; ++i) {
source_column_wrappers.push_back(
cudf::test::fixed_width_column_wrapper<TypeParam>(data, data + source_size));
source_columns.push_back(source_column_wrappers[i]);
}
cudf::mutable_table_view source_table{source_columns};
// For no dispatching
std::vector<rmm::device_vector<TypeParam>> h_vec(n_cols,
rmm::device_vector<TypeParam>(source_size, 0));
std::vector<TypeParam*> h_vec_p(n_cols);
for (int c = 0; c < n_cols; c++) { h_vec_p[c] = h_vec[c].data().get(); }
rmm::device_vector<TypeParam*> d_vec(n_cols);
if (dispatching_type == NO_DISPATCHING) {
CUDA_TRY(hipMemcpy(
d_vec.data().get(), h_vec_p.data(), sizeof(TypeParam*) * n_cols, hipMemcpyHostToDevice));
}
// Warm up
launch_kernel<functor_type, dispatching_type>(source_table, d_vec.data().get(), work_per_thread);
CUDA_TRY(hipDeviceSynchronize());
for (auto _ : state) {
cuda_event_timer raii(state, true); // flush_l2_cache = true, stream = 0
launch_kernel<functor_type, dispatching_type>(
source_table, d_vec.data().get(), work_per_thread);
}
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * source_size * n_cols * 2 *
sizeof(TypeParam));
}
class TypeDispatcher : public cudf::benchmark {
};
#define TBM_BENCHMARK_DEFINE(name, TypeParam, functor_type, dispatching_type) \
BENCHMARK_DEFINE_F(TypeDispatcher, name)(::benchmark::State & state) \
{ \
type_dispatcher_benchmark<TypeParam, functor_type, dispatching_type>(state); \
} \
BENCHMARK_REGISTER_F(TypeDispatcher, name) \
->RangeMultiplier(2) \
->Ranges({{1, 8}, {1 << 10, 1 << 26}, {1, 1}}) \
->UseManualTime();
TBM_BENCHMARK_DEFINE(fp64_bandwidth_host, double, BANDWIDTH_BOUND, HOST_DISPATCHING);
TBM_BENCHMARK_DEFINE(fp64_bandwidth_device, double, BANDWIDTH_BOUND, DEVICE_DISPATCHING);
TBM_BENCHMARK_DEFINE(fp64_bandwidth_no, double, BANDWIDTH_BOUND, NO_DISPATCHING);
TBM_BENCHMARK_DEFINE(fp64_compute_host, double, COMPUTE_BOUND, HOST_DISPATCHING);
TBM_BENCHMARK_DEFINE(fp64_compute_device, double, COMPUTE_BOUND, DEVICE_DISPATCHING);
TBM_BENCHMARK_DEFINE(fp64_compute_no, double, COMPUTE_BOUND, NO_DISPATCHING);
| afadb5668f60b15eb9cf5669af2838fbdbc30acd.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_view.hpp>
#include <cudf/table/table_device_view.cuh>
#include <cudf/table/table_view.hpp>
#include <tests/utilities/base_fixture.hpp>
#include <tests/utilities/column_utilities.hpp>
#include <tests/utilities/column_wrapper.hpp>
#include <tests/utilities/cudf_gtest.hpp>
#include <tests/utilities/table_utilities.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <random>
#include <type_traits>
#include "../fixture/benchmark_fixture.hpp"
#include "../synchronization/synchronization.hpp"
using namespace cudf;
enum DispatchingType { HOST_DISPATCHING, DEVICE_DISPATCHING, NO_DISPATCHING };
enum FunctorType { BANDWIDTH_BOUND, COMPUTE_BOUND };
template <class NotFloat, FunctorType ft, class DisableNotFloat = void>
struct Functor {
static __device__ NotFloat f(NotFloat x) { return x; }
};
template <class Float, FunctorType ft>
struct Functor<Float, ft, typename std::enable_if_t<std::is_floating_point<Float>::value>> {
static __device__ Float f(Float x)
{
if (ft == BANDWIDTH_BOUND) {
return x + static_cast<Float>(1) - static_cast<Float>(1);
} else {
for (int i = 0; i < 1000; i++) {
x = (x * x + static_cast<Float>(1)) - x * x - static_cast<Float>(1);
}
return x;
}
return x;
}
};
constexpr int block_size = 256;
// This is for NO_DISPATCHING
template <FunctorType functor_type, class T>
__global__ void no_dispatching_kernel(T** A, cudf::size_type n_rows, cudf::size_type n_cols)
{
using F = Functor<T, functor_type>;
cudf::size_type index = blockIdx.x * blockDim.x + threadIdx.x;
while (index < n_rows) {
for (int c = 0; c < n_cols; c++) { A[c][index] = F::f(A[c][index]); }
index += blockDim.x * gridDim.x;
}
}
// This is for HOST_DISPATCHING
template <FunctorType functor_type, class T>
__global__ void host_dispatching_kernel(mutable_column_device_view source_column)
{
using F = Functor<T, functor_type>;
T* A = source_column.data<T>();
cudf::size_type index = blockIdx.x * blockDim.x + threadIdx.x;
while (index < source_column.size()) {
A[index] = F::f(A[index]);
index += blockDim.x * gridDim.x;
}
}
template <FunctorType functor_type>
struct ColumnHandle {
template <typename ColumnType>
void operator()(mutable_column_device_view source_column,
int work_per_thread,
cudaStream_t stream = 0)
{
cudf::detail::grid_1d grid_config{source_column.size(), block_size};
int grid_size = grid_config.num_blocks;
// Launch the kernel.
host_dispatching_kernel<functor_type, ColumnType>
<<<grid_size, block_size, 0, stream>>>(source_column);
}
};
// The following is for DEVICE_DISPATCHING:
// The dispatching is done on device. The loop loops over
// each row (across different columns). Type is dispatched each time
// a column is visited so the total number of dispatching is
// n_rows * n_cols.
template <FunctorType functor_type>
struct RowHandle {
template <typename T>
__device__ void operator()(mutable_column_device_view source, cudf::size_type index)
{
using F = Functor<T, functor_type>;
source.data<T>()[index] = F::f(source.data<T>()[index]);
}
};
// This is for DEVICE_DISPATCHING
template <FunctorType functor_type>
__global__ void device_dispatching_kernel(mutable_table_device_view source)
{
const cudf::size_type n_rows = source.num_rows();
cudf::size_type index = threadIdx.x + blockIdx.x * blockDim.x;
while (index < n_rows) {
for (cudf::size_type i = 0; i < source.num_columns(); i++) {
cudf::type_dispatcher(
source.column(i).type(), RowHandle<functor_type>{}, source.column(i), index);
}
index += blockDim.x * gridDim.x;
} // while
}
template <FunctorType functor_type, DispatchingType dispatching_type, class T>
void launch_kernel(mutable_table_view input, T** d_ptr, int work_per_thread)
{
const cudf::size_type n_rows = input.num_rows();
const cudf::size_type n_cols = input.num_columns();
cudf::detail::grid_1d grid_config{n_rows, block_size};
int grid_size = grid_config.num_blocks;
if (dispatching_type == HOST_DISPATCHING) {
// std::vector<cudf::util::cuda::scoped_stream> v_stream(n_cols);
for (int c = 0; c < n_cols; c++) {
auto d_column = mutable_column_device_view::create(input.column(c));
cudf::type_dispatcher(
d_column->type(), ColumnHandle<functor_type>{}, *d_column, work_per_thread);
}
} else if (dispatching_type == DEVICE_DISPATCHING) {
auto d_table_view = mutable_table_device_view::create(input);
auto f = device_dispatching_kernel<functor_type>;
// Launch the kernel
f<<<grid_size, block_size>>>(*d_table_view);
} else if (dispatching_type == NO_DISPATCHING) {
auto f = no_dispatching_kernel<functor_type, T>;
// Launch the kernel
f<<<grid_size, block_size>>>(d_ptr, n_rows, n_cols);
}
}
template <class TypeParam, FunctorType functor_type, DispatchingType dispatching_type>
void type_dispatcher_benchmark(benchmark::State& state)
{
const cudf::size_type source_size = static_cast<cudf::size_type>(state.range(1));
const cudf::size_type n_cols = static_cast<cudf::size_type>(state.range(0));
const cudf::size_type work_per_thread = static_cast<cudf::size_type>(state.range(2));
auto data = cudf::test::make_counting_transform_iterator(0, [](auto i) { return i; });
std::vector<cudf::test::fixed_width_column_wrapper<TypeParam>> source_column_wrappers;
std::vector<cudf::mutable_column_view> source_columns;
for (int i = 0; i < n_cols; ++i) {
source_column_wrappers.push_back(
cudf::test::fixed_width_column_wrapper<TypeParam>(data, data + source_size));
source_columns.push_back(source_column_wrappers[i]);
}
cudf::mutable_table_view source_table{source_columns};
// For no dispatching
std::vector<rmm::device_vector<TypeParam>> h_vec(n_cols,
rmm::device_vector<TypeParam>(source_size, 0));
std::vector<TypeParam*> h_vec_p(n_cols);
for (int c = 0; c < n_cols; c++) { h_vec_p[c] = h_vec[c].data().get(); }
rmm::device_vector<TypeParam*> d_vec(n_cols);
if (dispatching_type == NO_DISPATCHING) {
CUDA_TRY(cudaMemcpy(
d_vec.data().get(), h_vec_p.data(), sizeof(TypeParam*) * n_cols, cudaMemcpyHostToDevice));
}
// Warm up
launch_kernel<functor_type, dispatching_type>(source_table, d_vec.data().get(), work_per_thread);
CUDA_TRY(cudaDeviceSynchronize());
for (auto _ : state) {
cuda_event_timer raii(state, true); // flush_l2_cache = true, stream = 0
launch_kernel<functor_type, dispatching_type>(
source_table, d_vec.data().get(), work_per_thread);
}
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * source_size * n_cols * 2 *
sizeof(TypeParam));
}
class TypeDispatcher : public cudf::benchmark {
};
#define TBM_BENCHMARK_DEFINE(name, TypeParam, functor_type, dispatching_type) \
BENCHMARK_DEFINE_F(TypeDispatcher, name)(::benchmark::State & state) \
{ \
type_dispatcher_benchmark<TypeParam, functor_type, dispatching_type>(state); \
} \
BENCHMARK_REGISTER_F(TypeDispatcher, name) \
->RangeMultiplier(2) \
->Ranges({{1, 8}, {1 << 10, 1 << 26}, {1, 1}}) \
->UseManualTime();
TBM_BENCHMARK_DEFINE(fp64_bandwidth_host, double, BANDWIDTH_BOUND, HOST_DISPATCHING);
TBM_BENCHMARK_DEFINE(fp64_bandwidth_device, double, BANDWIDTH_BOUND, DEVICE_DISPATCHING);
TBM_BENCHMARK_DEFINE(fp64_bandwidth_no, double, BANDWIDTH_BOUND, NO_DISPATCHING);
TBM_BENCHMARK_DEFINE(fp64_compute_host, double, COMPUTE_BOUND, HOST_DISPATCHING);
TBM_BENCHMARK_DEFINE(fp64_compute_device, double, COMPUTE_BOUND, DEVICE_DISPATCHING);
TBM_BENCHMARK_DEFINE(fp64_compute_no, double, COMPUTE_BOUND, NO_DISPATCHING);
|
986b7a6d15f6cbb8b0af835ffc6597a6b9e731f8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//#include "cudacpp\DeviceVector.h"
template<typename type, int size>
__global__ void setKernel(type* c, type val)
{
auto idx = threadIdx.x * size;
#pragma unroll(size)
for (auto i = 0; i < size; i++) {
c[idx] = val;
idx++;
}
} | 986b7a6d15f6cbb8b0af835ffc6597a6b9e731f8.cu |
//#include "cudacpp\DeviceVector.h"
template<typename type, int size>
__global__ void setKernel(type* c, type val)
{
auto idx = threadIdx.x * size;
#pragma unroll(size)
for (auto i = 0; i < size; i++) {
c[idx] = val;
idx++;
}
} |
a88ec20a8ca860f63270d07bf93c9c35848120fa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
*
* tex2d_addressing_device.cu
*
* Microdemo for 2D texturing from device memory.
*
* Build with: nvcc -I ../chLib <options> tex2d_addressing_device.cu
* Requires: No minimum SM requirement.
*
* Copyright (c) 2011-2012, Archaea Software, LLC.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <stdio.h>
#include <float.h>
#include <assert.h>
#include <chError.h>
texture<float2, 2, hipReadModeElementType> tex;
extern "C" __global__ void
TexReadout( float4 *out, size_t Width, size_t Pitch, size_t Height, float2 base, float2 increment )
{
for ( int row = blockIdx.y*blockDim.y + threadIdx.y;
row < Height;
row += blockDim.y*gridDim.y )
{
float4 *outrow = (float4 *) ((char *) out+row*Pitch);
for ( int col = blockIdx.x*blockDim.x + threadIdx.x;
col < Width;
col += blockDim.x*gridDim.x )
{
float4 value;
float2 texvalue;
value.x = base.x+(float)col*increment.x;
value.y = base.y+(float)row*increment.y;
texvalue = tex2D( tex, value.x, value.y);
value.z = texvalue.x;
value.w = texvalue.y;
outrow[col] = value;
}
}
}
template<class T>
void
CreateAndPrintTex(
T *initTex,
size_t inWidth, size_t inHeight,
size_t outWidth, size_t outHeight,
float2 base, float2 increment,
hipTextureFilterMode filterMode,
hipTextureAddressMode addressModeX,
hipTextureAddressMode addressModeY )
{
T *texContents = 0;
T *texDevice = 0;
size_t texPitch;
float4 *outHost = 0, *outDevice = 0;
hipError_t status;
size_t outPitch;
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<T>();
dim3 blocks, threads;
// use caller-provided array, if any, to initialize texture
if ( initTex ) {
texContents = initTex;
}
else {
// default is to initialize with identity elements
texContents = (T *) malloc( inWidth*inHeight*sizeof(T) );
if ( ! texContents )
goto Error;
for ( int row = 0; row < inHeight; row++ ) {
T *rowptr = texContents + row*inWidth;
for ( int col = 0; col < outHeight; col++ ) {
T value;
value.x = (float) col;
value.y = (float) row;
rowptr[col] = value;
}
}
}
cuda(MallocPitch( &texDevice,
&texPitch,
inWidth*sizeof(T),
inHeight));
cuda(Memcpy2D( texDevice, texPitch,
texContents, inWidth*sizeof(T),
inWidth*sizeof(T),
inHeight,
hipMemcpyHostToDevice));
cuda(BindTexture2D( NULL,
&tex,
texDevice,
&channelDesc,
inWidth,
inHeight,
texPitch ));
outPitch = outWidth*sizeof(float4);
outPitch = (outPitch+0x3f)&~0x3f;
cuda(HostAlloc( (void **) &outHost, outWidth*outPitch, hipHostMallocMapped));
cuda(HostGetDevicePointer( (void **) &outDevice, outHost, 0 ));
tex.filterMode = filterMode;
tex.addressMode[0] = addressModeX;
tex.addressMode[1] = addressModeY;
blocks.x = 2;
blocks.y = 1;
threads.x = 64; threads.y = 4;
hipLaunchKernelGGL(( TexReadout), dim3(blocks),dim3(threads), 0, 0, outDevice, outWidth, outPitch, outHeight, base, increment );
cuda(DeviceSynchronize());
for ( int row = 0; row < outHeight; row++ ) {
float4 *outrow = (float4 *) ((char *) outHost + row*outPitch);
for ( int col = 0; col < outWidth; col++ ) {
printf( "(%.1f, %.1f) ", outrow[col].z, outrow[col].w );
}
printf( "\n" );
}
printf( "\n" );
Error:
if ( ! initTex ) free( texContents );
hipFree( texDevice );
hipHostFree( outHost );
}
int
main( int argc, char *argv[] )
{
int ret = 1;
hipError_t status;
cuda(SetDeviceFlags(hipDeviceMapHost));
cuda(Free(0));
// go through once each with linear and point filtering
do {
tex.normalized = false;
tex.filterMode = hipFilterModePoint;
tex.addressMode[0] = hipAddressModeClamp;
tex.addressMode[1] = hipAddressModeClamp;
float2 base, increment;
base.x = 0.0f;//-1.0f;
base.y = 0.0f;//-1.0f;
increment.x = 1.0f;
increment.y = 1.0f;
// CreateAndPrintTex<float2>( NULL, 8, 8, 8, 8, base, increment, tex.filterMode, tex.addressMode[0], tex.addressMode[1] );
CreateAndPrintTex<float2>( NULL, 256, 256, 256, 256, base, increment, tex.filterMode, tex.addressMode[0], tex.addressMode[1] );
} while ( tex.filterMode == hipFilterModeLinear );
ret = 0;
Error:
return ret;
}
| a88ec20a8ca860f63270d07bf93c9c35848120fa.cu | /*
*
* tex2d_addressing_device.cu
*
* Microdemo for 2D texturing from device memory.
*
* Build with: nvcc -I ../chLib <options> tex2d_addressing_device.cu
* Requires: No minimum SM requirement.
*
* Copyright (c) 2011-2012, Archaea Software, LLC.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <stdio.h>
#include <float.h>
#include <assert.h>
#include <chError.h>
texture<float2, 2, cudaReadModeElementType> tex;
extern "C" __global__ void
TexReadout( float4 *out, size_t Width, size_t Pitch, size_t Height, float2 base, float2 increment )
{
for ( int row = blockIdx.y*blockDim.y + threadIdx.y;
row < Height;
row += blockDim.y*gridDim.y )
{
float4 *outrow = (float4 *) ((char *) out+row*Pitch);
for ( int col = blockIdx.x*blockDim.x + threadIdx.x;
col < Width;
col += blockDim.x*gridDim.x )
{
float4 value;
float2 texvalue;
value.x = base.x+(float)col*increment.x;
value.y = base.y+(float)row*increment.y;
texvalue = tex2D( tex, value.x, value.y);
value.z = texvalue.x;
value.w = texvalue.y;
outrow[col] = value;
}
}
}
template<class T>
void
CreateAndPrintTex(
T *initTex,
size_t inWidth, size_t inHeight,
size_t outWidth, size_t outHeight,
float2 base, float2 increment,
cudaTextureFilterMode filterMode,
cudaTextureAddressMode addressModeX,
cudaTextureAddressMode addressModeY )
{
T *texContents = 0;
T *texDevice = 0;
size_t texPitch;
float4 *outHost = 0, *outDevice = 0;
cudaError_t status;
size_t outPitch;
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<T>();
dim3 blocks, threads;
// use caller-provided array, if any, to initialize texture
if ( initTex ) {
texContents = initTex;
}
else {
// default is to initialize with identity elements
texContents = (T *) malloc( inWidth*inHeight*sizeof(T) );
if ( ! texContents )
goto Error;
for ( int row = 0; row < inHeight; row++ ) {
T *rowptr = texContents + row*inWidth;
for ( int col = 0; col < outHeight; col++ ) {
T value;
value.x = (float) col;
value.y = (float) row;
rowptr[col] = value;
}
}
}
cuda(MallocPitch( &texDevice,
&texPitch,
inWidth*sizeof(T),
inHeight));
cuda(Memcpy2D( texDevice, texPitch,
texContents, inWidth*sizeof(T),
inWidth*sizeof(T),
inHeight,
cudaMemcpyHostToDevice));
cuda(BindTexture2D( NULL,
&tex,
texDevice,
&channelDesc,
inWidth,
inHeight,
texPitch ));
outPitch = outWidth*sizeof(float4);
outPitch = (outPitch+0x3f)&~0x3f;
cuda(HostAlloc( (void **) &outHost, outWidth*outPitch, cudaHostAllocMapped));
cuda(HostGetDevicePointer( (void **) &outDevice, outHost, 0 ));
tex.filterMode = filterMode;
tex.addressMode[0] = addressModeX;
tex.addressMode[1] = addressModeY;
blocks.x = 2;
blocks.y = 1;
threads.x = 64; threads.y = 4;
TexReadout<<<blocks,threads>>>( outDevice, outWidth, outPitch, outHeight, base, increment );
cuda(DeviceSynchronize());
for ( int row = 0; row < outHeight; row++ ) {
float4 *outrow = (float4 *) ((char *) outHost + row*outPitch);
for ( int col = 0; col < outWidth; col++ ) {
printf( "(%.1f, %.1f) ", outrow[col].z, outrow[col].w );
}
printf( "\n" );
}
printf( "\n" );
Error:
if ( ! initTex ) free( texContents );
cudaFree( texDevice );
cudaFreeHost( outHost );
}
int
main( int argc, char *argv[] )
{
int ret = 1;
cudaError_t status;
cuda(SetDeviceFlags(cudaDeviceMapHost));
cuda(Free(0));
// go through once each with linear and point filtering
do {
tex.normalized = false;
tex.filterMode = cudaFilterModePoint;
tex.addressMode[0] = cudaAddressModeClamp;
tex.addressMode[1] = cudaAddressModeClamp;
float2 base, increment;
base.x = 0.0f;//-1.0f;
base.y = 0.0f;//-1.0f;
increment.x = 1.0f;
increment.y = 1.0f;
// CreateAndPrintTex<float2>( NULL, 8, 8, 8, 8, base, increment, tex.filterMode, tex.addressMode[0], tex.addressMode[1] );
CreateAndPrintTex<float2>( NULL, 256, 256, 256, 256, base, increment, tex.filterMode, tex.addressMode[0], tex.addressMode[1] );
} while ( tex.filterMode == cudaFilterModeLinear );
ret = 0;
Error:
return ret;
}
|
51a78d768e0f990b2821f0cd279151077a520f3c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "segmentMax.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_vec = NULL;
hipMalloc(&d_vec, XSIZE*YSIZE);
float *segmentMaxes = NULL;
hipMalloc(&segmentMaxes, XSIZE*YSIZE);
const int length = 1;
const int HighLength = 1;
const int HighSegmentLength = 1;
const int threadsHigh = 1;
const int LowSegmentLength = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
segmentMax), dim3(gridBlock),dim3(threadBlock), 0, 0, d_vec,segmentMaxes,length,HighLength,HighSegmentLength,threadsHigh,LowSegmentLength);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
segmentMax), dim3(gridBlock),dim3(threadBlock), 0, 0, d_vec,segmentMaxes,length,HighLength,HighSegmentLength,threadsHigh,LowSegmentLength);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
segmentMax), dim3(gridBlock),dim3(threadBlock), 0, 0, d_vec,segmentMaxes,length,HighLength,HighSegmentLength,threadsHigh,LowSegmentLength);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 51a78d768e0f990b2821f0cd279151077a520f3c.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "segmentMax.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_vec = NULL;
cudaMalloc(&d_vec, XSIZE*YSIZE);
float *segmentMaxes = NULL;
cudaMalloc(&segmentMaxes, XSIZE*YSIZE);
const int length = 1;
const int HighLength = 1;
const int HighSegmentLength = 1;
const int threadsHigh = 1;
const int LowSegmentLength = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
segmentMax<<<gridBlock,threadBlock>>>(d_vec,segmentMaxes,length,HighLength,HighSegmentLength,threadsHigh,LowSegmentLength);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
segmentMax<<<gridBlock,threadBlock>>>(d_vec,segmentMaxes,length,HighLength,HighSegmentLength,threadsHigh,LowSegmentLength);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
segmentMax<<<gridBlock,threadBlock>>>(d_vec,segmentMaxes,length,HighLength,HighSegmentLength,threadsHigh,LowSegmentLength);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
edaaa6bd6d602711078b672ee465531e9400f7de.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include <cutlass/wmma_matrix.h>
#ifdef CUTLASS_USE_WMMA_API
////////////////////////////////////////////////////////////////////////////////////////////////////
#include <cutlass/gemm/gemm.h>
#include <tools/test/perf/gemm/gemm_profiler.h>
#include <tools/test/perf/gemm/cutlass_dispatch.h>
#include <tools/test/perf/gemm/gemm_perf_testbed.h>
#include <cutlass/gemm/wmma_gemm_traits.h>
////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Traits>
struct WmmaGemmDispatch {
typedef cutlass::gemm::Gemm<Traits> Gemm;
typedef typename Gemm::Params Params;
/// Indicate warp-level GEMM
static bool const kThreadMultiplyAdd = false;
static cutlass::MatrixLayout::Kind const kLayoutA = Traits::kLayoutA;
static cutlass::MatrixLayout::Kind const kLayoutB = Traits::kLayoutB;
//
// Data members
//
/// Params argument
Params params;
//
// Methods
//
WmmaGemmDispatch() {}
/// Initializes params object
WmmaGemmDispatch(int m, int n, int k, float alpha, half const* d_a, int lda,
half const* d_b, int ldb, float beta, float const* d_c, int ldc,
float* d_d, int ldd) {
params.initialize(m, n, k, alpha, d_a, lda, d_b, ldb, beta, d_c, ldc, d_d, ldd);
}
/// Initializes params object
WmmaGemmDispatch(Params const& _params) : params(_params) {}
/// Launches kernel
hipError_t operator()() { return Gemm::launch(params); }
/// Determines if problem is aligned (assuming no padding)
static bool is_problem_aligned(
int m,
int n,
int k) {
bool aligned = true;
if (kLayoutA == cutlass::MatrixLayout::kColumnMajor) {
aligned = aligned && !(m % Gemm::Traits::GemmConfig::kScalarsPerLdgA);
}
else {
aligned = aligned && !(k % Gemm::Traits::GemmConfig::kScalarsPerLdgA);
}
if (kLayoutB == cutlass::MatrixLayout::kColumnMajor) {
aligned = aligned && !(k % Gemm::Traits::GemmConfig::kScalarsPerLdgB);
}
else {
aligned = aligned && !(n % Gemm::Traits::GemmConfig::kScalarsPerLdgB);
}
aligned = aligned && !(m % Gemm::Traits::GemmConfig::kScalarsPerLdgC);
return aligned;
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
namespace perf {
////////////////////////////////////////////////////////////////////////////////////////////////////
int profile_wmma_gemm(TestbenchOutput &output, TestbenchOptions const &options) {
typedef perf::GemmProfiler<cutlass::half_t, cutlass::half_t, float, float, float> GemmProfiler;
int results = 0;
if (!results) {
typedef cutlass::gemm::WmmaGemmTraits<cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kRowMajor>
WmmaGemmTraits;
typedef WmmaGemmDispatch<WmmaGemmTraits> Dispatch;
profile_gemm<Dispatch, GemmProfiler>(output, "wmma_gemm_nt", options);
}
if (!results) {
typedef cutlass::gemm::WmmaGemmTraits<cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kColumnMajor>
WmmaGemmTraits;
typedef WmmaGemmDispatch<WmmaGemmTraits> Dispatch;
profile_gemm<Dispatch, GemmProfiler>(output, "wmma_gemm_nn", options);
}
if (!results) {
typedef cutlass::gemm::WmmaGemmTraits<cutlass::MatrixLayout::kRowMajor,
cutlass::MatrixLayout::kColumnMajor>
WmmaGemmTraits;
typedef WmmaGemmDispatch<WmmaGemmTraits> Dispatch;
profile_gemm<Dispatch, GemmProfiler>(output, "wmma_gemm_tn", options);
}
if (!results) {
typedef cutlass::gemm::WmmaGemmTraits<cutlass::MatrixLayout::kRowMajor,
cutlass::MatrixLayout::kRowMajor>
WmmaGemmTraits;
typedef WmmaGemmDispatch<WmmaGemmTraits> Dispatch;
profile_gemm<Dispatch, GemmProfiler>(output, "wmma_gemm_tt", options);
}
return results;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace perf
////////////////////////////////////////////////////////////////////////////////////////////////////
#endif // defined CUTLASS_USE_WMMA_API
| edaaa6bd6d602711078b672ee465531e9400f7de.cu | /***************************************************************************************************
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include <cutlass/wmma_matrix.h>
#ifdef CUTLASS_USE_WMMA_API
////////////////////////////////////////////////////////////////////////////////////////////////////
#include <cutlass/gemm/gemm.h>
#include <tools/test/perf/gemm/gemm_profiler.h>
#include <tools/test/perf/gemm/cutlass_dispatch.h>
#include <tools/test/perf/gemm/gemm_perf_testbed.h>
#include <cutlass/gemm/wmma_gemm_traits.h>
////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Traits>
struct WmmaGemmDispatch {
typedef cutlass::gemm::Gemm<Traits> Gemm;
typedef typename Gemm::Params Params;
/// Indicate warp-level GEMM
static bool const kThreadMultiplyAdd = false;
static cutlass::MatrixLayout::Kind const kLayoutA = Traits::kLayoutA;
static cutlass::MatrixLayout::Kind const kLayoutB = Traits::kLayoutB;
//
// Data members
//
/// Params argument
Params params;
//
// Methods
//
WmmaGemmDispatch() {}
/// Initializes params object
WmmaGemmDispatch(int m, int n, int k, float alpha, half const* d_a, int lda,
half const* d_b, int ldb, float beta, float const* d_c, int ldc,
float* d_d, int ldd) {
params.initialize(m, n, k, alpha, d_a, lda, d_b, ldb, beta, d_c, ldc, d_d, ldd);
}
/// Initializes params object
WmmaGemmDispatch(Params const& _params) : params(_params) {}
/// Launches kernel
cudaError_t operator()() { return Gemm::launch(params); }
/// Determines if problem is aligned (assuming no padding)
static bool is_problem_aligned(
int m,
int n,
int k) {
bool aligned = true;
if (kLayoutA == cutlass::MatrixLayout::kColumnMajor) {
aligned = aligned && !(m % Gemm::Traits::GemmConfig::kScalarsPerLdgA);
}
else {
aligned = aligned && !(k % Gemm::Traits::GemmConfig::kScalarsPerLdgA);
}
if (kLayoutB == cutlass::MatrixLayout::kColumnMajor) {
aligned = aligned && !(k % Gemm::Traits::GemmConfig::kScalarsPerLdgB);
}
else {
aligned = aligned && !(n % Gemm::Traits::GemmConfig::kScalarsPerLdgB);
}
aligned = aligned && !(m % Gemm::Traits::GemmConfig::kScalarsPerLdgC);
return aligned;
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////
namespace perf {
////////////////////////////////////////////////////////////////////////////////////////////////////
int profile_wmma_gemm(TestbenchOutput &output, TestbenchOptions const &options) {
typedef perf::GemmProfiler<cutlass::half_t, cutlass::half_t, float, float, float> GemmProfiler;
int results = 0;
if (!results) {
typedef cutlass::gemm::WmmaGemmTraits<cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kRowMajor>
WmmaGemmTraits;
typedef WmmaGemmDispatch<WmmaGemmTraits> Dispatch;
profile_gemm<Dispatch, GemmProfiler>(output, "wmma_gemm_nt", options);
}
if (!results) {
typedef cutlass::gemm::WmmaGemmTraits<cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kColumnMajor>
WmmaGemmTraits;
typedef WmmaGemmDispatch<WmmaGemmTraits> Dispatch;
profile_gemm<Dispatch, GemmProfiler>(output, "wmma_gemm_nn", options);
}
if (!results) {
typedef cutlass::gemm::WmmaGemmTraits<cutlass::MatrixLayout::kRowMajor,
cutlass::MatrixLayout::kColumnMajor>
WmmaGemmTraits;
typedef WmmaGemmDispatch<WmmaGemmTraits> Dispatch;
profile_gemm<Dispatch, GemmProfiler>(output, "wmma_gemm_tn", options);
}
if (!results) {
typedef cutlass::gemm::WmmaGemmTraits<cutlass::MatrixLayout::kRowMajor,
cutlass::MatrixLayout::kRowMajor>
WmmaGemmTraits;
typedef WmmaGemmDispatch<WmmaGemmTraits> Dispatch;
profile_gemm<Dispatch, GemmProfiler>(output, "wmma_gemm_tt", options);
}
return results;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace perf
////////////////////////////////////////////////////////////////////////////////////////////////////
#endif // defined CUTLASS_USE_WMMA_API
|
9f57e6a0130d2afefb6ab695a87eda48260b6b4c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// Created by GS <[email protected]> on 3/21/2018.
//
#include "ResultSet.h"
#include <ops/declarable/helpers/matrix_diag.h>
#include <Status.h>
#include <ShapeUtils.h>
#include <ShapeUtils.h>
#include <TAD.h>
#include <cuda_exception.h>
#include <helpers/ConstantTadHelper.h>
namespace nd4j {
namespace ops {
namespace helpers {
template <typename T>
static __global__ void matrixDiagKernel(void const* inputBuffer, void* outputBuffer, Nd4jLong numTads, Nd4jLong inputLength,
Nd4jLong* tadOnlyInputShapeInfo, Nd4jLong *tadInputOffsets,
Nd4jLong* tadOnlyOutputShapeInfo, Nd4jLong *tadOutputOffsets) {
int totalThreads = blockDim.x;
for (Nd4jLong i = blockIdx.x; i < numTads; i += gridDim.x) {
auto yOffset = tadInputOffsets[i];
auto xOffset = tadOutputOffsets[i];
for (Nd4jLong j = threadIdx.x; j < inputLength; j += totalThreads) {
Nd4jLong coords[2] = {j, j};
Nd4jLong tadOffset = shape::getOffset(0, shape::shapeOf(tadOnlyOutputShapeInfo), shape::stride(tadOnlyOutputShapeInfo), coords, 2);
//shape::getIndexOffset(j, tadOnlyOutputShapeInfo, inputLength)
*(reinterpret_cast<T*>(outputBuffer) + xOffset + tadOffset) = *(reinterpret_cast<T const*>(inputBuffer) + yOffset + shape::getIndexOffset(j, tadOnlyInputShapeInfo, inputLength));
}
}
}
//////////////////////////////////////////////////////////////////////////
// Returns a batched matrix tensor with new batched diagonal values.
// for detailed explanations please take a look on web page: https://www.tensorflow.org/api_docs/python/tf/matrix_set_diag
template <typename T>
static int _matrixDiag(nd4j::LaunchContext * context, const NDArray* input, NDArray* output) {
hipStream_t* stream = context->getCudaStream();
//auto listOut = output->allTensorsAlongDimension({output->rankOf() - 2, output->rankOf() - 1});
//auto listDiag = input->allTensorsAlongDimension({input->rankOf() - 1});
//auto repeatDelta = shape::prodLong(newShape.data(), rank) / this->lengthOf();
std::vector<int> dimsToExclude = ShapeUtils::evalDimsToExclude(input->rankOf(), {input->rankOf() - 1});
const Nd4jLong numTads = ShapeUtils::getNumOfSubArrs(input->getShapeInfo(), dimsToExclude); //this->tensorsAlongDimension({dimension});
//printf("Repeat delta %lld, numTads %lld\n", repeatDelta, numTads);
//tadOnlyInputShapeInfo, tadInputOffsets, tadOnlyOutputShapeInfo, tadOutputOffsets;
std::vector<int> inputDims({input->rankOf() - 1});
std::vector<int> outputDims({output->rankOf() - 2, output->rankOf() - 1});
auto packX = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(input->getShapeInfo(), inputDims);
auto packZ = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(output->getShapeInfo(), outputDims);
if (!input->isActualOnDeviceSide())
input->syncToDevice();
if (!output->isActualOnDeviceSide())
output->syncToDevice();
// create cuda stream and LaunchContext
hipError_t cudaResult;
dim3 launchDims(256, 512, 8192);
hipLaunchKernelGGL(( matrixDiagKernel<T>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, input->getSpecialBuffer(), output->getSpecialBuffer(), numTads, input->sizeAt(-1), packX.specialShapeInfo(), packX.specialOffsets(), packZ.specialShapeInfo(), packZ.specialOffsets());
return Status::OK();
}
int matrixDiag(nd4j::LaunchContext * context, const NDArray* input, NDArray* output) {
BUILD_SINGLE_SELECTOR(input->dataType(), return _matrixDiag, (context, input, output), LIBND4J_TYPES);
}
BUILD_SINGLE_TEMPLATE(template int _matrixDiag, (nd4j::LaunchContext * context, const NDArray* input, NDArray* output), LIBND4J_TYPES);
}
}
} | 9f57e6a0130d2afefb6ab695a87eda48260b6b4c.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// Created by GS <[email protected]> on 3/21/2018.
//
#include "ResultSet.h"
#include <ops/declarable/helpers/matrix_diag.h>
#include <Status.h>
#include <ShapeUtils.h>
#include <ShapeUtils.h>
#include <TAD.h>
#include <cuda_exception.h>
#include <helpers/ConstantTadHelper.h>
namespace nd4j {
namespace ops {
namespace helpers {
template <typename T>
static __global__ void matrixDiagKernel(void const* inputBuffer, void* outputBuffer, Nd4jLong numTads, Nd4jLong inputLength,
Nd4jLong* tadOnlyInputShapeInfo, Nd4jLong *tadInputOffsets,
Nd4jLong* tadOnlyOutputShapeInfo, Nd4jLong *tadOutputOffsets) {
int totalThreads = blockDim.x;
for (Nd4jLong i = blockIdx.x; i < numTads; i += gridDim.x) {
auto yOffset = tadInputOffsets[i];
auto xOffset = tadOutputOffsets[i];
for (Nd4jLong j = threadIdx.x; j < inputLength; j += totalThreads) {
Nd4jLong coords[2] = {j, j};
Nd4jLong tadOffset = shape::getOffset(0, shape::shapeOf(tadOnlyOutputShapeInfo), shape::stride(tadOnlyOutputShapeInfo), coords, 2);
//shape::getIndexOffset(j, tadOnlyOutputShapeInfo, inputLength)
*(reinterpret_cast<T*>(outputBuffer) + xOffset + tadOffset) = *(reinterpret_cast<T const*>(inputBuffer) + yOffset + shape::getIndexOffset(j, tadOnlyInputShapeInfo, inputLength));
}
}
}
//////////////////////////////////////////////////////////////////////////
// Returns a batched matrix tensor with new batched diagonal values.
// for detailed explanations please take a look on web page: https://www.tensorflow.org/api_docs/python/tf/matrix_set_diag
template <typename T>
static int _matrixDiag(nd4j::LaunchContext * context, const NDArray* input, NDArray* output) {
cudaStream_t* stream = context->getCudaStream();
//auto listOut = output->allTensorsAlongDimension({output->rankOf() - 2, output->rankOf() - 1});
//auto listDiag = input->allTensorsAlongDimension({input->rankOf() - 1});
//auto repeatDelta = shape::prodLong(newShape.data(), rank) / this->lengthOf();
std::vector<int> dimsToExclude = ShapeUtils::evalDimsToExclude(input->rankOf(), {input->rankOf() - 1});
const Nd4jLong numTads = ShapeUtils::getNumOfSubArrs(input->getShapeInfo(), dimsToExclude); //this->tensorsAlongDimension({dimension});
//printf("Repeat delta %lld, numTads %lld\n", repeatDelta, numTads);
//tadOnlyInputShapeInfo, tadInputOffsets, tadOnlyOutputShapeInfo, tadOutputOffsets;
std::vector<int> inputDims({input->rankOf() - 1});
std::vector<int> outputDims({output->rankOf() - 2, output->rankOf() - 1});
auto packX = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(input->getShapeInfo(), inputDims);
auto packZ = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(output->getShapeInfo(), outputDims);
if (!input->isActualOnDeviceSide())
input->syncToDevice();
if (!output->isActualOnDeviceSide())
output->syncToDevice();
// create cuda stream and LaunchContext
cudaError_t cudaResult;
dim3 launchDims(256, 512, 8192);
matrixDiagKernel<T><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(input->getSpecialBuffer(), output->getSpecialBuffer(), numTads, input->sizeAt(-1), packX.specialShapeInfo(), packX.specialOffsets(), packZ.specialShapeInfo(), packZ.specialOffsets());
return Status::OK();
}
int matrixDiag(nd4j::LaunchContext * context, const NDArray* input, NDArray* output) {
BUILD_SINGLE_SELECTOR(input->dataType(), return _matrixDiag, (context, input, output), LIBND4J_TYPES);
}
BUILD_SINGLE_TEMPLATE(template int _matrixDiag, (nd4j::LaunchContext * context, const NDArray* input, NDArray* output), LIBND4J_TYPES);
}
}
} |
bb6f11acad506660b409916b97e7a5812a419afe.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2019-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gtest/gtest.h>
#include <vector>
#include <tuple>
#include "dali/core/geom/mat.h"
#include "dali/kernels/scratch.h"
#include "dali/core/tensor_shape.h"
#include "dali/kernels/common/copy.h"
#include "dali/test/mat2tensor.h"
#include "dali/test/tensor_test_utils.h"
#include "dali/kernels/test/kernel_test_utils.h"
#include "dali/kernels/imgproc/pointwise/linear_transformation_gpu.h"
#include "dali/test/cv_mat_utils.h"
#include "dali/kernels/imgproc/roi.h"
namespace dali {
namespace kernels {
namespace test {
namespace {
constexpr int kNDims = 3;
constexpr int kNChannelsIn = 5;
constexpr int kNChannelsOut = 2;
} // namespace
template <class InputOutputTypes>
class LinearTransformationGpuTest : public ::testing::Test {
using In = typename InputOutputTypes::In;
using Out = typename InputOutputTypes::Out;
public:
LinearTransformationGpuTest() {
input_host_.resize(dataset_size(in_shapes_));
}
void SetUp() final {
std::mt19937_64 rng;
UniformRandomFill(input_host_, rng, 0., 10.);
calc_output();
CUDA_CALL(hipMalloc(&input_device_, sizeof(In) * dataset_size(in_shapes_)));
CUDA_CALL(hipMemcpy(input_device_, input_host_.data(), input_host_.size() * sizeof(In),
hipMemcpyDefault));
CUDA_CALL(hipMalloc(&output_, dataset_size(out_shapes_) * sizeof(Out)));
CUDA_CALL(hipDeviceSynchronize());
}
void TearDown() final {
CUDA_CALL(hipFree(input_device_));
CUDA_CALL(hipFree(output_));
}
In *input_device_;
Out *output_;
std::vector<In> input_host_;
std::vector<float> ref_output_;
std::vector<TensorShape<kNDims>> in_shapes_ = {{4, 3, kNChannelsIn}, {4, 3, kNChannelsIn}};
std::vector<TensorShape<kNDims>> out_shapes_ = {{4, 3, kNChannelsOut}, {4, 3, kNChannelsOut}};
mat<kNChannelsOut, kNChannelsIn, float> mat_{{{1, 2, 3, 4, 5}, {6, 7, 8, 9, 10}}};
vec<kNChannelsOut, float> vec_{42, 69};
std::vector<mat<kNChannelsOut, kNChannelsIn, float>> vmat_ = {mat_, mat_ + 1.f};
std::vector<vec<kNChannelsOut, float>> vvec_ = {vec_, vec_ + 1.f};
std::vector<Roi<2>> rois_ = {{{1, 1}, {2, 2}},
{{0, 1}, {1, 2}}};
void calc_output() {
for (size_t i = 0; i < input_host_.size(); i += kNChannelsIn) {
for (size_t j = 0; j < kNChannelsOut; j++) {
float res = vec_.v[j];
for (size_t k = 0; k < kNChannelsIn; k++) {
res += static_cast<float>(input_host_[i + k]) * mat_.at(j, k);
}
ref_output_.push_back(res);
}
}
}
size_t dataset_size(const std::vector<TensorShape<kNDims>> &shapes) {
int ret = 0;
for (const auto &sh : shapes) {
ret += volume(sh);
}
return ret;
}
};
using TestTypes = std::tuple<float>;
/* Cause the line below takes RIDICULOUSLY long time to compile */
// using TestTypes = std::tuple<uint8_t, int8_t, uint16_t, int16_t, int32_t, float>;
INPUT_OUTPUT_TYPED_TEST_SUITE(LinearTransformationGpuTest, TestTypes);
namespace {
template <class GtestTypeParam>
using TheKernel = LinearTransformationGpu<typename GtestTypeParam::Out, typename GtestTypeParam::In,
kNChannelsOut, kNChannelsIn, kNDims - 1>;
} // namespace
TYPED_TEST(LinearTransformationGpuTest, check_kernel) {
check_kernel<TheKernel<TypeParam>>();
}
TYPED_TEST(LinearTransformationGpuTest, setup_test) {
TheKernel<TypeParam> kernel;
KernelContext ctx;
ctx.gpu.stream = 0;
InListGPU<typename TypeParam::In, kNDims> in(this->input_device_, this->in_shapes_);
auto reqs = kernel.Setup(ctx, in, make_cspan(this->vmat_), make_cspan(this->vvec_));
ASSERT_EQ(this->out_shapes_.size(), static_cast<size_t>(reqs.output_shapes[0].num_samples()))
<< "Kernel::Setup provides incorrect shape";
for (size_t i = 0; i < this->out_shapes_.size(); i++) {
EXPECT_EQ(this->out_shapes_[i], reqs.output_shapes[0][i])
<< "Kernel::Setup provides incorrect shape";
}
}
TYPED_TEST(LinearTransformationGpuTest, setup_test_with_roi) {
TheKernel<TypeParam> kernel;
KernelContext ctx;
ctx.gpu.stream = 0;
InListGPU<typename TypeParam::In, kNDims> in(this->input_device_, this->in_shapes_);
auto reqs = kernel.Setup(ctx, in, make_cspan(this->vmat_), make_cspan(this->vvec_),
make_cspan(this->rois_));
auto ref_shape = ShapeFromRoi(this->rois_[0], kNChannelsOut);
ASSERT_EQ(ref_shape, reqs.output_shapes[0][0]);
}
TYPED_TEST(LinearTransformationGpuTest, run_test) {
TheKernel<TypeParam> kernel;
KernelContext ctx;
ctx.gpu.stream = 0;
InListGPU<typename TypeParam::In, kNDims> in(this->input_device_, this->in_shapes_);
auto reqs = kernel.Setup(ctx, in, make_cspan(this->vmat_), make_cspan(this->vvec_));
ScratchpadAllocator sa;
sa.Reserve(reqs.scratch_sizes);
auto scratchpad = sa.GetScratchpad();
ctx.scratchpad = &scratchpad;
OutListGPU<typename TypeParam::Out, kNDims> out(
this->output_, reqs.output_shapes[0].template to_static<kNDims>());
kernel.Run(ctx, out, in, make_cspan(this->vmat_), make_cspan(this->vvec_));
CUDA_CALL(hipDeviceSynchronize());
auto res = copy<mm::memory_kind::host>(out[0]);
auto ref_tv = TensorView<StorageCPU, typename TypeParam::Out>(this->ref_output_.data(),
this->out_shapes_[0]);
Check(res.first, ref_tv, EqualUlp());
}
TYPED_TEST(LinearTransformationGpuTest, run_test_with_roi) {
TheKernel<TypeParam> kernel;
KernelContext ctx;
ctx.gpu.stream = 0;
InListGPU<typename TypeParam::In, kNDims> in(this->input_device_, this->in_shapes_);
auto reqs = kernel.Setup(ctx, in,
make_cspan(this->vmat_), make_cspan(this->vvec_),
make_cspan(this->rois_));
ScratchpadAllocator sa;
sa.Reserve(reqs.scratch_sizes);
auto scratchpad = sa.GetScratchpad();
ctx.scratchpad = &scratchpad;
OutListGPU<typename TypeParam::Out, kNDims> out(
this->output_, reqs.output_shapes[0].template to_static<kNDims>());
kernel.Run(ctx, out, in,
make_cspan(this->vmat_), make_cspan(this->vvec_), make_cspan(this->rois_));
CUDA_CALL(hipDeviceSynchronize());
auto res = copy<mm::memory_kind::host>(out[0]);
auto mat = testing::copy_to_mat<kNChannelsOut>(
this->rois_[0],
this->ref_output_.data(),
this->out_shapes_[0][0],
this->out_shapes_[0][1]);
Check(view_as_tensor<typename TypeParam::Out>(mat), res.first, EqualUlp());
}
} // namespace test
} // namespace kernels
} // namespace dali
| bb6f11acad506660b409916b97e7a5812a419afe.cu | // Copyright (c) 2019-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gtest/gtest.h>
#include <vector>
#include <tuple>
#include "dali/core/geom/mat.h"
#include "dali/kernels/scratch.h"
#include "dali/core/tensor_shape.h"
#include "dali/kernels/common/copy.h"
#include "dali/test/mat2tensor.h"
#include "dali/test/tensor_test_utils.h"
#include "dali/kernels/test/kernel_test_utils.h"
#include "dali/kernels/imgproc/pointwise/linear_transformation_gpu.h"
#include "dali/test/cv_mat_utils.h"
#include "dali/kernels/imgproc/roi.h"
namespace dali {
namespace kernels {
namespace test {
namespace {
constexpr int kNDims = 3;
constexpr int kNChannelsIn = 5;
constexpr int kNChannelsOut = 2;
} // namespace
template <class InputOutputTypes>
class LinearTransformationGpuTest : public ::testing::Test {
using In = typename InputOutputTypes::In;
using Out = typename InputOutputTypes::Out;
public:
LinearTransformationGpuTest() {
input_host_.resize(dataset_size(in_shapes_));
}
void SetUp() final {
std::mt19937_64 rng;
UniformRandomFill(input_host_, rng, 0., 10.);
calc_output();
CUDA_CALL(cudaMalloc(&input_device_, sizeof(In) * dataset_size(in_shapes_)));
CUDA_CALL(cudaMemcpy(input_device_, input_host_.data(), input_host_.size() * sizeof(In),
cudaMemcpyDefault));
CUDA_CALL(cudaMalloc(&output_, dataset_size(out_shapes_) * sizeof(Out)));
CUDA_CALL(cudaDeviceSynchronize());
}
void TearDown() final {
CUDA_CALL(cudaFree(input_device_));
CUDA_CALL(cudaFree(output_));
}
In *input_device_;
Out *output_;
std::vector<In> input_host_;
std::vector<float> ref_output_;
std::vector<TensorShape<kNDims>> in_shapes_ = {{4, 3, kNChannelsIn}, {4, 3, kNChannelsIn}};
std::vector<TensorShape<kNDims>> out_shapes_ = {{4, 3, kNChannelsOut}, {4, 3, kNChannelsOut}};
mat<kNChannelsOut, kNChannelsIn, float> mat_{{{1, 2, 3, 4, 5}, {6, 7, 8, 9, 10}}};
vec<kNChannelsOut, float> vec_{42, 69};
std::vector<mat<kNChannelsOut, kNChannelsIn, float>> vmat_ = {mat_, mat_ + 1.f};
std::vector<vec<kNChannelsOut, float>> vvec_ = {vec_, vec_ + 1.f};
std::vector<Roi<2>> rois_ = {{{1, 1}, {2, 2}},
{{0, 1}, {1, 2}}};
void calc_output() {
for (size_t i = 0; i < input_host_.size(); i += kNChannelsIn) {
for (size_t j = 0; j < kNChannelsOut; j++) {
float res = vec_.v[j];
for (size_t k = 0; k < kNChannelsIn; k++) {
res += static_cast<float>(input_host_[i + k]) * mat_.at(j, k);
}
ref_output_.push_back(res);
}
}
}
size_t dataset_size(const std::vector<TensorShape<kNDims>> &shapes) {
int ret = 0;
for (const auto &sh : shapes) {
ret += volume(sh);
}
return ret;
}
};
using TestTypes = std::tuple<float>;
/* Cause the line below takes RIDICULOUSLY long time to compile */
// using TestTypes = std::tuple<uint8_t, int8_t, uint16_t, int16_t, int32_t, float>;
INPUT_OUTPUT_TYPED_TEST_SUITE(LinearTransformationGpuTest, TestTypes);
namespace {
template <class GtestTypeParam>
using TheKernel = LinearTransformationGpu<typename GtestTypeParam::Out, typename GtestTypeParam::In,
kNChannelsOut, kNChannelsIn, kNDims - 1>;
} // namespace
TYPED_TEST(LinearTransformationGpuTest, check_kernel) {
check_kernel<TheKernel<TypeParam>>();
}
TYPED_TEST(LinearTransformationGpuTest, setup_test) {
TheKernel<TypeParam> kernel;
KernelContext ctx;
ctx.gpu.stream = 0;
InListGPU<typename TypeParam::In, kNDims> in(this->input_device_, this->in_shapes_);
auto reqs = kernel.Setup(ctx, in, make_cspan(this->vmat_), make_cspan(this->vvec_));
ASSERT_EQ(this->out_shapes_.size(), static_cast<size_t>(reqs.output_shapes[0].num_samples()))
<< "Kernel::Setup provides incorrect shape";
for (size_t i = 0; i < this->out_shapes_.size(); i++) {
EXPECT_EQ(this->out_shapes_[i], reqs.output_shapes[0][i])
<< "Kernel::Setup provides incorrect shape";
}
}
TYPED_TEST(LinearTransformationGpuTest, setup_test_with_roi) {
TheKernel<TypeParam> kernel;
KernelContext ctx;
ctx.gpu.stream = 0;
InListGPU<typename TypeParam::In, kNDims> in(this->input_device_, this->in_shapes_);
auto reqs = kernel.Setup(ctx, in, make_cspan(this->vmat_), make_cspan(this->vvec_),
make_cspan(this->rois_));
auto ref_shape = ShapeFromRoi(this->rois_[0], kNChannelsOut);
ASSERT_EQ(ref_shape, reqs.output_shapes[0][0]);
}
TYPED_TEST(LinearTransformationGpuTest, run_test) {
TheKernel<TypeParam> kernel;
KernelContext ctx;
ctx.gpu.stream = 0;
InListGPU<typename TypeParam::In, kNDims> in(this->input_device_, this->in_shapes_);
auto reqs = kernel.Setup(ctx, in, make_cspan(this->vmat_), make_cspan(this->vvec_));
ScratchpadAllocator sa;
sa.Reserve(reqs.scratch_sizes);
auto scratchpad = sa.GetScratchpad();
ctx.scratchpad = &scratchpad;
OutListGPU<typename TypeParam::Out, kNDims> out(
this->output_, reqs.output_shapes[0].template to_static<kNDims>());
kernel.Run(ctx, out, in, make_cspan(this->vmat_), make_cspan(this->vvec_));
CUDA_CALL(cudaDeviceSynchronize());
auto res = copy<mm::memory_kind::host>(out[0]);
auto ref_tv = TensorView<StorageCPU, typename TypeParam::Out>(this->ref_output_.data(),
this->out_shapes_[0]);
Check(res.first, ref_tv, EqualUlp());
}
TYPED_TEST(LinearTransformationGpuTest, run_test_with_roi) {
TheKernel<TypeParam> kernel;
KernelContext ctx;
ctx.gpu.stream = 0;
InListGPU<typename TypeParam::In, kNDims> in(this->input_device_, this->in_shapes_);
auto reqs = kernel.Setup(ctx, in,
make_cspan(this->vmat_), make_cspan(this->vvec_),
make_cspan(this->rois_));
ScratchpadAllocator sa;
sa.Reserve(reqs.scratch_sizes);
auto scratchpad = sa.GetScratchpad();
ctx.scratchpad = &scratchpad;
OutListGPU<typename TypeParam::Out, kNDims> out(
this->output_, reqs.output_shapes[0].template to_static<kNDims>());
kernel.Run(ctx, out, in,
make_cspan(this->vmat_), make_cspan(this->vvec_), make_cspan(this->rois_));
CUDA_CALL(cudaDeviceSynchronize());
auto res = copy<mm::memory_kind::host>(out[0]);
auto mat = testing::copy_to_mat<kNChannelsOut>(
this->rois_[0],
this->ref_output_.data(),
this->out_shapes_[0][0],
this->out_shapes_[0][1]);
Check(view_as_tensor<typename TypeParam::Out>(mat), res.first, EqualUlp());
}
} // namespace test
} // namespace kernels
} // namespace dali
|
edc46077f05a44dff1db56cdb570a371f4c40ed4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_calc_dt_kernel_min [2][2];
static int dims_calc_dt_kernel_min_h [2][2] = {0};
//user function
__device__
void calc_dt_kernel_min_gpu(const ACC<double>& dt_min,
double* dt_min_val) {
*dt_min_val = MIN(*dt_min_val, dt_min(0,0,0));
}
__global__ void ops_calc_dt_kernel_min(
double* __restrict arg0,
double* __restrict arg1,
int size0,
int size1,
int size2 ){
double arg1_l[1];
for (int d=0; d<1; d++) arg1_l[d] = INFINITY_double;
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_calc_dt_kernel_min[0][0] + idx_z * 1*1 * dims_calc_dt_kernel_min[0][0] * dims_calc_dt_kernel_min[0][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
const ACC<double> argp0(dims_calc_dt_kernel_min[0][0], dims_calc_dt_kernel_min[0][1], arg0);
calc_dt_kernel_min_gpu(argp0, arg1_l);
}
for (int d=0; d<1; d++)
ops_reduction_cuda<OPS_MIN>(&arg1[d+(blockIdx.x + blockIdx.y*gridDim.x + blockIdx.z*gridDim.x*gridDim.y)*1],arg1_l[d]);
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_calc_dt_kernel_min(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1) {
#else
void ops_par_loop_calc_dt_kernel_min_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[2] = { arg0, arg1};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,2,range,98)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(98,"calc_dt_kernel_min");
OPS_kernels[98].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 2,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
if (xdim0 != dims_calc_dt_kernel_min_h[0][0] || ydim0 != dims_calc_dt_kernel_min_h[0][1]) {
dims_calc_dt_kernel_min_h[0][0] = xdim0;
dims_calc_dt_kernel_min_h[0][1] = ydim0;
cutilSafeCall(hipMemcpyToSymbol( dims_calc_dt_kernel_min, dims_calc_dt_kernel_min_h, sizeof(dims_calc_dt_kernel_min)));
}
#if defined(OPS_LAZY) && !defined(OPS_MPI)
ops_block block = desc->block;
#endif
#ifdef OPS_MPI
double *arg1h = (double *)(((ops_reduction)args[1].data)->data + ((ops_reduction)args[1].data)->size * block->index);
#else
double *arg1h = (double *)(((ops_reduction)args[1].data)->data);
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int nblocks = ((x_size-1)/OPS_block_size_x+ 1)*((y_size-1)/OPS_block_size_y + 1)*((z_size-1)/OPS_block_size_z +1);
int maxblocks = nblocks;
int reduct_bytes = 0;
int reduct_size = 0;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
reduct_size = MAX(reduct_size,sizeof(double)*1);
reallocReductArrays(reduct_bytes);
reduct_bytes = 0;
arg1.data = OPS_reduct_h + reduct_bytes;
arg1.data_d = OPS_reduct_d + reduct_bytes;
for (int b=0; b<maxblocks; b++)
for (int d=0; d<1; d++) ((double *)arg1.data)[d+b*1] = INFINITY_double;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
mvReductArraysToDevice(reduct_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
char *p_a[2];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 2);
ops_halo_exchanges(args,2,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[98].mpi_time += t2-t1;
}
int nshared = 0;
int nthread = OPS_block_size_x*OPS_block_size_y*OPS_block_size_z;
nshared = MAX(nshared,sizeof(double)*1);
nshared = MAX(nshared*nthread,reduct_size*nthread);
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_calc_dt_kernel_min), dim3(grid), dim3(tblock), nshared , 0, (double *)p_a[0], (double *)arg1.data_d,x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
mvReductArraysToHost(reduct_bytes);
for ( int b=0; b<maxblocks; b++ ){
for ( int d=0; d<1; d++ ){
arg1h[d] = MIN(arg1h[d],((double *)arg1.data)[d+b*1]);
}
}
arg1.data = (char *)arg1h;
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[98].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 2);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[98].mpi_time += t2-t1;
OPS_kernels[98].transfer += ops_compute_transfer(dim, start, end, &arg0);
}
}
#ifdef OPS_LAZY
void ops_par_loop_calc_dt_kernel_min(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 98;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 98;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 2;
desc->args = (ops_arg*)malloc(2*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->function = ops_par_loop_calc_dt_kernel_min_execute;
if (OPS_diags > 1) {
ops_timing_realloc(98,"calc_dt_kernel_min");
}
ops_enqueue_kernel(desc);
}
#endif
| edc46077f05a44dff1db56cdb570a371f4c40ed4.cu | //
// auto-generated by ops.py
//
__constant__ int dims_calc_dt_kernel_min [2][2];
static int dims_calc_dt_kernel_min_h [2][2] = {0};
//user function
__device__
void calc_dt_kernel_min_gpu(const ACC<double>& dt_min,
double* dt_min_val) {
*dt_min_val = MIN(*dt_min_val, dt_min(0,0,0));
}
__global__ void ops_calc_dt_kernel_min(
double* __restrict arg0,
double* __restrict arg1,
int size0,
int size1,
int size2 ){
double arg1_l[1];
for (int d=0; d<1; d++) arg1_l[d] = INFINITY_double;
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_calc_dt_kernel_min[0][0] + idx_z * 1*1 * dims_calc_dt_kernel_min[0][0] * dims_calc_dt_kernel_min[0][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
const ACC<double> argp0(dims_calc_dt_kernel_min[0][0], dims_calc_dt_kernel_min[0][1], arg0);
calc_dt_kernel_min_gpu(argp0, arg1_l);
}
for (int d=0; d<1; d++)
ops_reduction_cuda<OPS_MIN>(&arg1[d+(blockIdx.x + blockIdx.y*gridDim.x + blockIdx.z*gridDim.x*gridDim.y)*1],arg1_l[d]);
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_calc_dt_kernel_min(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1) {
#else
void ops_par_loop_calc_dt_kernel_min_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[2] = { arg0, arg1};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,2,range,98)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(98,"calc_dt_kernel_min");
OPS_kernels[98].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 2,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
if (xdim0 != dims_calc_dt_kernel_min_h[0][0] || ydim0 != dims_calc_dt_kernel_min_h[0][1]) {
dims_calc_dt_kernel_min_h[0][0] = xdim0;
dims_calc_dt_kernel_min_h[0][1] = ydim0;
cutilSafeCall(cudaMemcpyToSymbol( dims_calc_dt_kernel_min, dims_calc_dt_kernel_min_h, sizeof(dims_calc_dt_kernel_min)));
}
#if defined(OPS_LAZY) && !defined(OPS_MPI)
ops_block block = desc->block;
#endif
#ifdef OPS_MPI
double *arg1h = (double *)(((ops_reduction)args[1].data)->data + ((ops_reduction)args[1].data)->size * block->index);
#else
double *arg1h = (double *)(((ops_reduction)args[1].data)->data);
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int nblocks = ((x_size-1)/OPS_block_size_x+ 1)*((y_size-1)/OPS_block_size_y + 1)*((z_size-1)/OPS_block_size_z +1);
int maxblocks = nblocks;
int reduct_bytes = 0;
int reduct_size = 0;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
reduct_size = MAX(reduct_size,sizeof(double)*1);
reallocReductArrays(reduct_bytes);
reduct_bytes = 0;
arg1.data = OPS_reduct_h + reduct_bytes;
arg1.data_d = OPS_reduct_d + reduct_bytes;
for (int b=0; b<maxblocks; b++)
for (int d=0; d<1; d++) ((double *)arg1.data)[d+b*1] = INFINITY_double;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double));
mvReductArraysToDevice(reduct_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
char *p_a[2];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 2);
ops_halo_exchanges(args,2,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[98].mpi_time += t2-t1;
}
int nshared = 0;
int nthread = OPS_block_size_x*OPS_block_size_y*OPS_block_size_z;
nshared = MAX(nshared,sizeof(double)*1);
nshared = MAX(nshared*nthread,reduct_size*nthread);
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_calc_dt_kernel_min<<<grid, tblock, nshared >>> ( (double *)p_a[0], (double *)arg1.data_d,x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
mvReductArraysToHost(reduct_bytes);
for ( int b=0; b<maxblocks; b++ ){
for ( int d=0; d<1; d++ ){
arg1h[d] = MIN(arg1h[d],((double *)arg1.data)[d+b*1]);
}
}
arg1.data = (char *)arg1h;
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[98].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 2);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[98].mpi_time += t2-t1;
OPS_kernels[98].transfer += ops_compute_transfer(dim, start, end, &arg0);
}
}
#ifdef OPS_LAZY
void ops_par_loop_calc_dt_kernel_min(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 98;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 98;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 2;
desc->args = (ops_arg*)malloc(2*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->function = ops_par_loop_calc_dt_kernel_min_execute;
if (OPS_diags > 1) {
ops_timing_realloc(98,"calc_dt_kernel_min");
}
ops_enqueue_kernel(desc);
}
#endif
|
6ce4a9d0ee7f49cde96abbbf9d35f9bfed3cccfe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C" __global__ void sgemm_nn_vec_128x128(
float* param_C,
const float* param_A,
const float* param_B,
float param_alpha,
float param_beta,
int param_flags,
int param_lda,
int param_ldb,
int param_ldc,
int param_m,
int param_n,
int param_k,
int param_ldaz,
int param_ldbz,
int param_ldcz,
int param_batch_loops
) {
__shared__ float share[128*8*2 + 128*8*2 + 4];
*param_C = share[0];
}
| 6ce4a9d0ee7f49cde96abbbf9d35f9bfed3cccfe.cu |
extern "C" __global__ void sgemm_nn_vec_128x128(
float* param_C,
const float* param_A,
const float* param_B,
float param_alpha,
float param_beta,
int param_flags,
int param_lda,
int param_ldb,
int param_ldc,
int param_m,
int param_n,
int param_k,
int param_ldaz,
int param_ldbz,
int param_ldcz,
int param_batch_loops
) {
__shared__ float share[128*8*2 + 128*8*2 + 4];
*param_C = share[0];
}
|
c7586b2224faf55e1ffd511cbd36cd73f060e432.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/strings/replace.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/strings/string_view.cuh>
#include "../utilities.hpp"
#include "../utilities.cuh"
namespace cudf
{
namespace strings
{
namespace detail
{
namespace
{
/**
* @brief Used as template parameter to divide size calculation from
* the actual string operation within a function.
*
* Useful when most of the logic is identical for both passes.
*/
enum class two_pass
{
SIZE_ONLY = 0, ///< calculate the size only
EXECUTE_OP ///< run the string operation
};
/**
* @brief Function logic for the replace API.
*
* This will perform a replace operation on each string.
*/
template <two_pass Pass=two_pass::SIZE_ONLY>
struct replace_fn
{
column_device_view const d_strings;
string_view const d_target;
string_view const d_repl;
int32_t max_repl;
const int32_t* d_offsets{};
char* d_chars{};
__device__ size_type operator()(size_type idx)
{
if( d_strings.is_null(idx) )
return 0; // null string
string_view d_str = d_strings.element<string_view>(idx);
auto max_n = max_repl;
if( max_n < 0 )
max_n = d_str.length(); // max possible replacements
char* out_ptr = nullptr;
if( Pass==two_pass::EXECUTE_OP )
out_ptr = d_chars + d_offsets[idx];
const char* in_ptr = d_str.data();
size_type bytes = d_str.size_bytes();
auto position = d_str.find(d_target);
size_type last_pos = 0;
while( (position >= 0) && (max_n > 0) )
{
if( Pass==two_pass::SIZE_ONLY )
bytes += d_repl.size_bytes() - d_target.size_bytes();
else // EXECUTE_OP
{
size_type curr_pos = d_str.byte_offset(position);
out_ptr = copy_and_increment(out_ptr, in_ptr + last_pos, curr_pos - last_pos); // copy left
out_ptr = copy_string(out_ptr, d_repl); // copy repl
last_pos = curr_pos + d_target.size_bytes();
}
position = d_str.find(d_target, position + d_target.size_bytes() );
--max_n;
}
if( Pass==two_pass::EXECUTE_OP ) // copy whats left (or right depending on your point of view)
memcpy(out_ptr, in_ptr + last_pos, d_str.size_bytes() - last_pos);
return bytes;
}
};
} // namespace
//
std::unique_ptr<column> replace( strings_column_view const& strings,
string_scalar const& target,
string_scalar const& repl,
int32_t maxrepl = -1,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
hipStream_t stream = 0 )
{
size_type strings_count = strings.size();
if( strings_count == 0 )
return make_empty_strings_column(mr,stream);
CUDF_EXPECTS( repl.is_valid(), "Parameter repl must be valid.");
CUDF_EXPECTS( target.is_valid(), "Parameter target must be valid.");
CUDF_EXPECTS( target.size()>0, "Parameter target must not be empty string.");
string_view d_target(target.data(),target.size());
string_view d_repl(repl.data(),repl.size());
auto strings_column = column_device_view::create(strings.parent(),stream);
auto d_strings = *strings_column;
// copy the null mask
rmm::device_buffer null_mask = copy_bitmask( strings.parent(), stream, mr);
// build offsets column
auto offsets_transformer_itr = thrust::make_transform_iterator( thrust::make_counting_iterator<int32_t>(0),
replace_fn<two_pass::SIZE_ONLY>{d_strings, d_target, d_repl, maxrepl} );
auto offsets_column = make_offsets_child_column(offsets_transformer_itr,
offsets_transformer_itr+strings_count,
mr, stream);
auto d_offsets = offsets_column->view().data<int32_t>();
// build chars column
size_type bytes = thrust::device_pointer_cast(d_offsets)[strings_count];
auto chars_column = create_chars_child_column( strings_count, strings.null_count(), bytes, mr, stream );
auto d_chars = chars_column->mutable_view().data<char>();
thrust::for_each_n(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<size_type>(0), strings_count,
replace_fn<two_pass::EXECUTE_OP>{d_strings, d_target, d_repl, maxrepl, d_offsets, d_chars} );
//
return make_strings_column(strings_count, std::move(offsets_column), std::move(chars_column),
strings.null_count(), std::move(null_mask), stream, mr);
}
namespace
{
/**
* @brief Function logic for the replace_slice API.
*
* This will perform a replace_slice operation on each string.
*/
template <two_pass Pass=two_pass::SIZE_ONLY>
struct replace_slice_fn
{
column_device_view const d_strings;
string_view const d_repl;
size_type start, stop;
const int32_t* d_offsets{};
char* d_chars{};
__device__ size_type operator()(size_type idx)
{
if( d_strings.is_null(idx) )
return 0; // null string
string_view d_str = d_strings.element<string_view>(idx);
auto length = d_str.length();
char* out_ptr = nullptr;
if( Pass==two_pass::EXECUTE_OP )
out_ptr = d_chars + d_offsets[idx];
const char* in_ptr = d_str.data();
size_type bytes = d_str.size_bytes();
size_type begin = ((start < 0) || (start > length) ? length : start);
size_type end = ((stop < 0) || (stop > length) ? length : stop);
begin = d_str.byte_offset(begin);
end = d_str.byte_offset(end);
bytes += d_repl.size_bytes() - (end - begin);
if( Pass==two_pass::EXECUTE_OP )
{
out_ptr = copy_and_increment( out_ptr, in_ptr, begin );
out_ptr = copy_string( out_ptr, d_repl );
out_ptr = copy_and_increment( out_ptr, in_ptr + end, d_str.size_bytes() - end );
}
return bytes;
}
};
}
std::unique_ptr<column> replace_slice( strings_column_view const& strings,
string_scalar const& repl,
size_type start, size_type stop = -1,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
hipStream_t stream = 0)
{
size_type strings_count = strings.size();
if( strings_count == 0 )
return make_empty_strings_column(mr,stream);
CUDF_EXPECTS( repl.is_valid(), "Parameter repl must be valid.");
if( stop > 0 )
CUDF_EXPECTS( start <= stop, "Parameter start must be less than or equal to stop.");
string_view d_repl(repl.data(),repl.size());
auto strings_column = column_device_view::create(strings.parent(),stream);
auto d_strings = *strings_column;
// copy the null mask
rmm::device_buffer null_mask = copy_bitmask( strings.parent(), stream, mr);
// build offsets column
auto offsets_transformer_itr = thrust::make_transform_iterator( thrust::make_counting_iterator<int32_t>(0),
replace_slice_fn<two_pass::SIZE_ONLY>{d_strings, d_repl, start, stop} );
auto offsets_column = make_offsets_child_column(offsets_transformer_itr,
offsets_transformer_itr+strings_count,
mr, stream);
auto offsets_view = offsets_column->view();
auto d_offsets = offsets_view.data<int32_t>();
// build chars column
size_type bytes = thrust::device_pointer_cast(d_offsets)[strings_count];
auto chars_column = create_chars_child_column( strings_count, strings.null_count(), bytes, mr, stream );
auto chars_view = chars_column->mutable_view();
auto d_chars = chars_view.data<char>();
thrust::for_each_n(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<size_type>(0), strings_count,
replace_slice_fn<two_pass::EXECUTE_OP>{d_strings, d_repl, start, stop, d_offsets, d_chars} );
//
return make_strings_column(strings_count, std::move(offsets_column), std::move(chars_column),
strings.null_count(), std::move(null_mask), stream, mr);
}
namespace
{
/**
* @brief Function logic for the replace_multi API.
*
* This will perform the multi-replace operation on each string.
*/
template <two_pass Pass=two_pass::SIZE_ONLY>
struct replace_multi_fn
{
column_device_view const d_strings;
column_device_view const d_targets;
column_device_view const d_repls;
const int32_t* d_offsets{};
char* d_chars{};
__device__ size_type operator()(size_type idx)
{
if( d_strings.is_null(idx) )
return 0;
string_view d_str = d_strings.element<string_view>(idx);
char* out_ptr = nullptr;
if( Pass==two_pass::EXECUTE_OP )
out_ptr = d_chars + d_offsets[idx];
const char* in_ptr = d_str.data();
size_type size = d_str.size_bytes();
size_type bytes = size, spos = 0, lpos = 0;
while( spos < size )
{ // check each character against each target
for( int tgt_idx=0; tgt_idx < d_targets.size(); ++tgt_idx )
{
string_view d_tgt = d_targets.element<string_view>(tgt_idx);
if( (d_tgt.size_bytes() <= (size-spos)) && // check fit
(d_tgt.compare(in_ptr+spos, d_tgt.size_bytes())==0) ) // does it match
{ // found one
string_view d_repl;
if( d_repls.size()==1 )
d_repl = d_repls.element<string_view>(0);
else
d_repl = d_repls.element<string_view>(tgt_idx);
if( Pass==two_pass::SIZE_ONLY )
bytes += d_repl.size_bytes() - d_tgt.size_bytes();
else
{
out_ptr = copy_and_increment(out_ptr,in_ptr+lpos,spos-lpos);
out_ptr = copy_string(out_ptr,d_repl);
lpos = spos + d_tgt.size_bytes();
}
spos += d_tgt.size_bytes()-1;
break;
}
}
++spos;
}
if( Pass==two_pass::EXECUTE_OP ) // copy remainder
memcpy(out_ptr,in_ptr+lpos,size-lpos);
return bytes;
}
};
}
std::unique_ptr<column> replace( strings_column_view const& strings,
strings_column_view const& targets,
strings_column_view const& repls,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
hipStream_t stream = 0)
{
auto strings_count = strings.size();
if( strings_count==0 )
return make_empty_strings_column(mr,stream);
CUDF_EXPECTS( ((targets.size() > 0) && (targets.null_count()==0)), "Parameters targets must not be empty and must not have nulls");
CUDF_EXPECTS( ((repls.size() > 0) && (repls.null_count()==0)), "Parameters repls must not be empty and must not have nulls");
if( repls.size() > 1 )
CUDF_EXPECTS( repls.size()==targets.size(), "Sizes for targets and repls must match");
auto strings_column = column_device_view::create(strings.parent(),stream);
auto d_strings = *strings_column;
auto targets_column = column_device_view::create(targets.parent(),stream);
auto d_targets = *targets_column;
auto repls_column = column_device_view::create(repls.parent(),stream);
auto d_repls = *repls_column;
// copy the null mask
rmm::device_buffer null_mask = copy_bitmask( strings.parent(), stream, mr);
// build offsets column
auto offsets_transformer_itr = thrust::make_transform_iterator( thrust::make_counting_iterator<int32_t>(0),
replace_multi_fn<two_pass::SIZE_ONLY>{d_strings, d_targets, d_repls} );
auto offsets_column = make_offsets_child_column(offsets_transformer_itr,
offsets_transformer_itr+strings_count,
mr, stream);
auto d_offsets = offsets_column->view().data<int32_t>();
// build chars column
size_type bytes = thrust::device_pointer_cast(d_offsets)[strings_count];
auto chars_column = create_chars_child_column( strings_count, strings.null_count(), bytes, mr, stream );
auto d_chars = chars_column->mutable_view().data<char>();
thrust::for_each_n(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<size_type>(0), strings_count,
replace_multi_fn<two_pass::EXECUTE_OP>{d_strings, d_targets, d_repls, d_offsets, d_chars} );
//
return make_strings_column(strings_count, std::move(offsets_column), std::move(chars_column),
strings.null_count(), std::move(null_mask), stream, mr);
}
std::unique_ptr<column> replace_nulls( strings_column_view const& strings,
string_scalar const& repl = string_scalar(""),
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
hipStream_t stream = 0 )
{
size_type strings_count = strings.size();
if( strings_count == 0 )
return make_empty_strings_column(mr,stream);
CUDF_EXPECTS( repl.is_valid(), "Parameter repl must be valid.");
string_view d_repl(repl.data(),repl.size());
auto strings_column = column_device_view::create(strings.parent(),stream);
auto d_strings = *strings_column;
// build offsets column
auto offsets_transformer_itr = thrust::make_transform_iterator( thrust::make_counting_iterator<int32_t>(0),
[d_strings, d_repl] __device__ (size_type idx) {
return d_strings.is_null(idx) ? d_repl.size_bytes() : d_strings.element<string_view>(idx).size_bytes();
} );
auto offsets_column = make_offsets_child_column(offsets_transformer_itr,
offsets_transformer_itr+strings_count,
mr, stream);
auto d_offsets = offsets_column->view().data<int32_t>();
// build chars column
size_type bytes = thrust::device_pointer_cast(d_offsets)[strings_count];
auto chars_column = strings::detail::create_chars_child_column( strings_count, strings.null_count(), bytes, mr, stream );
auto d_chars = chars_column->mutable_view().data<char>();
thrust::for_each_n(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<size_type>(0), strings_count,
[d_strings, d_repl, d_offsets, d_chars] __device__ (size_type idx) {
string_view d_str = d_repl;
if( !d_strings.is_null(idx) )
d_str = d_strings.element<string_view>(idx);
memcpy( d_chars + d_offsets[idx], d_str.data(), d_str.size_bytes() );
});
//
return make_strings_column(strings_count, std::move(offsets_column), std::move(chars_column),
0, rmm::device_buffer{}, stream, mr);
}
} // namespace detail
// external API
std::unique_ptr<column> replace( strings_column_view const& strings,
string_scalar const& target,
string_scalar const& repl,
int32_t maxrepl,
rmm::mr::device_memory_resource* mr)
{
return detail::replace(strings, target, repl, maxrepl, mr );
}
std::unique_ptr<column> replace_slice( strings_column_view const& strings,
string_scalar const& repl,
size_type start, size_type stop,
rmm::mr::device_memory_resource* mr)
{
return detail::replace_slice(strings, repl, start, stop, mr);
}
std::unique_ptr<column> replace( strings_column_view const& strings,
strings_column_view const& targets,
strings_column_view const& repls,
rmm::mr::device_memory_resource* mr)
{
return detail::replace(strings, targets, repls, mr);
}
std::unique_ptr<column> replace_nulls( strings_column_view const& strings,
string_scalar const& repl,
rmm::mr::device_memory_resource* mr)
{
return detail::replace_nulls(strings, repl, mr);
}
} // namespace strings
} // namespace cudf
| c7586b2224faf55e1ffd511cbd36cd73f060e432.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/strings/replace.hpp>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/strings/string_view.cuh>
#include "../utilities.hpp"
#include "../utilities.cuh"
namespace cudf
{
namespace strings
{
namespace detail
{
namespace
{
/**
* @brief Used as template parameter to divide size calculation from
* the actual string operation within a function.
*
* Useful when most of the logic is identical for both passes.
*/
enum class two_pass
{
SIZE_ONLY = 0, ///< calculate the size only
EXECUTE_OP ///< run the string operation
};
/**
* @brief Function logic for the replace API.
*
* This will perform a replace operation on each string.
*/
template <two_pass Pass=two_pass::SIZE_ONLY>
struct replace_fn
{
column_device_view const d_strings;
string_view const d_target;
string_view const d_repl;
int32_t max_repl;
const int32_t* d_offsets{};
char* d_chars{};
__device__ size_type operator()(size_type idx)
{
if( d_strings.is_null(idx) )
return 0; // null string
string_view d_str = d_strings.element<string_view>(idx);
auto max_n = max_repl;
if( max_n < 0 )
max_n = d_str.length(); // max possible replacements
char* out_ptr = nullptr;
if( Pass==two_pass::EXECUTE_OP )
out_ptr = d_chars + d_offsets[idx];
const char* in_ptr = d_str.data();
size_type bytes = d_str.size_bytes();
auto position = d_str.find(d_target);
size_type last_pos = 0;
while( (position >= 0) && (max_n > 0) )
{
if( Pass==two_pass::SIZE_ONLY )
bytes += d_repl.size_bytes() - d_target.size_bytes();
else // EXECUTE_OP
{
size_type curr_pos = d_str.byte_offset(position);
out_ptr = copy_and_increment(out_ptr, in_ptr + last_pos, curr_pos - last_pos); // copy left
out_ptr = copy_string(out_ptr, d_repl); // copy repl
last_pos = curr_pos + d_target.size_bytes();
}
position = d_str.find(d_target, position + d_target.size_bytes() );
--max_n;
}
if( Pass==two_pass::EXECUTE_OP ) // copy whats left (or right depending on your point of view)
memcpy(out_ptr, in_ptr + last_pos, d_str.size_bytes() - last_pos);
return bytes;
}
};
} // namespace
//
std::unique_ptr<column> replace( strings_column_view const& strings,
string_scalar const& target,
string_scalar const& repl,
int32_t maxrepl = -1,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
cudaStream_t stream = 0 )
{
size_type strings_count = strings.size();
if( strings_count == 0 )
return make_empty_strings_column(mr,stream);
CUDF_EXPECTS( repl.is_valid(), "Parameter repl must be valid.");
CUDF_EXPECTS( target.is_valid(), "Parameter target must be valid.");
CUDF_EXPECTS( target.size()>0, "Parameter target must not be empty string.");
string_view d_target(target.data(),target.size());
string_view d_repl(repl.data(),repl.size());
auto strings_column = column_device_view::create(strings.parent(),stream);
auto d_strings = *strings_column;
// copy the null mask
rmm::device_buffer null_mask = copy_bitmask( strings.parent(), stream, mr);
// build offsets column
auto offsets_transformer_itr = thrust::make_transform_iterator( thrust::make_counting_iterator<int32_t>(0),
replace_fn<two_pass::SIZE_ONLY>{d_strings, d_target, d_repl, maxrepl} );
auto offsets_column = make_offsets_child_column(offsets_transformer_itr,
offsets_transformer_itr+strings_count,
mr, stream);
auto d_offsets = offsets_column->view().data<int32_t>();
// build chars column
size_type bytes = thrust::device_pointer_cast(d_offsets)[strings_count];
auto chars_column = create_chars_child_column( strings_count, strings.null_count(), bytes, mr, stream );
auto d_chars = chars_column->mutable_view().data<char>();
thrust::for_each_n(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<size_type>(0), strings_count,
replace_fn<two_pass::EXECUTE_OP>{d_strings, d_target, d_repl, maxrepl, d_offsets, d_chars} );
//
return make_strings_column(strings_count, std::move(offsets_column), std::move(chars_column),
strings.null_count(), std::move(null_mask), stream, mr);
}
namespace
{
/**
* @brief Function logic for the replace_slice API.
*
* This will perform a replace_slice operation on each string.
*/
template <two_pass Pass=two_pass::SIZE_ONLY>
struct replace_slice_fn
{
column_device_view const d_strings;
string_view const d_repl;
size_type start, stop;
const int32_t* d_offsets{};
char* d_chars{};
__device__ size_type operator()(size_type idx)
{
if( d_strings.is_null(idx) )
return 0; // null string
string_view d_str = d_strings.element<string_view>(idx);
auto length = d_str.length();
char* out_ptr = nullptr;
if( Pass==two_pass::EXECUTE_OP )
out_ptr = d_chars + d_offsets[idx];
const char* in_ptr = d_str.data();
size_type bytes = d_str.size_bytes();
size_type begin = ((start < 0) || (start > length) ? length : start);
size_type end = ((stop < 0) || (stop > length) ? length : stop);
begin = d_str.byte_offset(begin);
end = d_str.byte_offset(end);
bytes += d_repl.size_bytes() - (end - begin);
if( Pass==two_pass::EXECUTE_OP )
{
out_ptr = copy_and_increment( out_ptr, in_ptr, begin );
out_ptr = copy_string( out_ptr, d_repl );
out_ptr = copy_and_increment( out_ptr, in_ptr + end, d_str.size_bytes() - end );
}
return bytes;
}
};
}
std::unique_ptr<column> replace_slice( strings_column_view const& strings,
string_scalar const& repl,
size_type start, size_type stop = -1,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
cudaStream_t stream = 0)
{
size_type strings_count = strings.size();
if( strings_count == 0 )
return make_empty_strings_column(mr,stream);
CUDF_EXPECTS( repl.is_valid(), "Parameter repl must be valid.");
if( stop > 0 )
CUDF_EXPECTS( start <= stop, "Parameter start must be less than or equal to stop.");
string_view d_repl(repl.data(),repl.size());
auto strings_column = column_device_view::create(strings.parent(),stream);
auto d_strings = *strings_column;
// copy the null mask
rmm::device_buffer null_mask = copy_bitmask( strings.parent(), stream, mr);
// build offsets column
auto offsets_transformer_itr = thrust::make_transform_iterator( thrust::make_counting_iterator<int32_t>(0),
replace_slice_fn<two_pass::SIZE_ONLY>{d_strings, d_repl, start, stop} );
auto offsets_column = make_offsets_child_column(offsets_transformer_itr,
offsets_transformer_itr+strings_count,
mr, stream);
auto offsets_view = offsets_column->view();
auto d_offsets = offsets_view.data<int32_t>();
// build chars column
size_type bytes = thrust::device_pointer_cast(d_offsets)[strings_count];
auto chars_column = create_chars_child_column( strings_count, strings.null_count(), bytes, mr, stream );
auto chars_view = chars_column->mutable_view();
auto d_chars = chars_view.data<char>();
thrust::for_each_n(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<size_type>(0), strings_count,
replace_slice_fn<two_pass::EXECUTE_OP>{d_strings, d_repl, start, stop, d_offsets, d_chars} );
//
return make_strings_column(strings_count, std::move(offsets_column), std::move(chars_column),
strings.null_count(), std::move(null_mask), stream, mr);
}
namespace
{
/**
* @brief Function logic for the replace_multi API.
*
* This will perform the multi-replace operation on each string.
*/
template <two_pass Pass=two_pass::SIZE_ONLY>
struct replace_multi_fn
{
column_device_view const d_strings;
column_device_view const d_targets;
column_device_view const d_repls;
const int32_t* d_offsets{};
char* d_chars{};
__device__ size_type operator()(size_type idx)
{
if( d_strings.is_null(idx) )
return 0;
string_view d_str = d_strings.element<string_view>(idx);
char* out_ptr = nullptr;
if( Pass==two_pass::EXECUTE_OP )
out_ptr = d_chars + d_offsets[idx];
const char* in_ptr = d_str.data();
size_type size = d_str.size_bytes();
size_type bytes = size, spos = 0, lpos = 0;
while( spos < size )
{ // check each character against each target
for( int tgt_idx=0; tgt_idx < d_targets.size(); ++tgt_idx )
{
string_view d_tgt = d_targets.element<string_view>(tgt_idx);
if( (d_tgt.size_bytes() <= (size-spos)) && // check fit
(d_tgt.compare(in_ptr+spos, d_tgt.size_bytes())==0) ) // does it match
{ // found one
string_view d_repl;
if( d_repls.size()==1 )
d_repl = d_repls.element<string_view>(0);
else
d_repl = d_repls.element<string_view>(tgt_idx);
if( Pass==two_pass::SIZE_ONLY )
bytes += d_repl.size_bytes() - d_tgt.size_bytes();
else
{
out_ptr = copy_and_increment(out_ptr,in_ptr+lpos,spos-lpos);
out_ptr = copy_string(out_ptr,d_repl);
lpos = spos + d_tgt.size_bytes();
}
spos += d_tgt.size_bytes()-1;
break;
}
}
++spos;
}
if( Pass==two_pass::EXECUTE_OP ) // copy remainder
memcpy(out_ptr,in_ptr+lpos,size-lpos);
return bytes;
}
};
}
std::unique_ptr<column> replace( strings_column_view const& strings,
strings_column_view const& targets,
strings_column_view const& repls,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
cudaStream_t stream = 0)
{
auto strings_count = strings.size();
if( strings_count==0 )
return make_empty_strings_column(mr,stream);
CUDF_EXPECTS( ((targets.size() > 0) && (targets.null_count()==0)), "Parameters targets must not be empty and must not have nulls");
CUDF_EXPECTS( ((repls.size() > 0) && (repls.null_count()==0)), "Parameters repls must not be empty and must not have nulls");
if( repls.size() > 1 )
CUDF_EXPECTS( repls.size()==targets.size(), "Sizes for targets and repls must match");
auto strings_column = column_device_view::create(strings.parent(),stream);
auto d_strings = *strings_column;
auto targets_column = column_device_view::create(targets.parent(),stream);
auto d_targets = *targets_column;
auto repls_column = column_device_view::create(repls.parent(),stream);
auto d_repls = *repls_column;
// copy the null mask
rmm::device_buffer null_mask = copy_bitmask( strings.parent(), stream, mr);
// build offsets column
auto offsets_transformer_itr = thrust::make_transform_iterator( thrust::make_counting_iterator<int32_t>(0),
replace_multi_fn<two_pass::SIZE_ONLY>{d_strings, d_targets, d_repls} );
auto offsets_column = make_offsets_child_column(offsets_transformer_itr,
offsets_transformer_itr+strings_count,
mr, stream);
auto d_offsets = offsets_column->view().data<int32_t>();
// build chars column
size_type bytes = thrust::device_pointer_cast(d_offsets)[strings_count];
auto chars_column = create_chars_child_column( strings_count, strings.null_count(), bytes, mr, stream );
auto d_chars = chars_column->mutable_view().data<char>();
thrust::for_each_n(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<size_type>(0), strings_count,
replace_multi_fn<two_pass::EXECUTE_OP>{d_strings, d_targets, d_repls, d_offsets, d_chars} );
//
return make_strings_column(strings_count, std::move(offsets_column), std::move(chars_column),
strings.null_count(), std::move(null_mask), stream, mr);
}
std::unique_ptr<column> replace_nulls( strings_column_view const& strings,
string_scalar const& repl = string_scalar(""),
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
cudaStream_t stream = 0 )
{
size_type strings_count = strings.size();
if( strings_count == 0 )
return make_empty_strings_column(mr,stream);
CUDF_EXPECTS( repl.is_valid(), "Parameter repl must be valid.");
string_view d_repl(repl.data(),repl.size());
auto strings_column = column_device_view::create(strings.parent(),stream);
auto d_strings = *strings_column;
// build offsets column
auto offsets_transformer_itr = thrust::make_transform_iterator( thrust::make_counting_iterator<int32_t>(0),
[d_strings, d_repl] __device__ (size_type idx) {
return d_strings.is_null(idx) ? d_repl.size_bytes() : d_strings.element<string_view>(idx).size_bytes();
} );
auto offsets_column = make_offsets_child_column(offsets_transformer_itr,
offsets_transformer_itr+strings_count,
mr, stream);
auto d_offsets = offsets_column->view().data<int32_t>();
// build chars column
size_type bytes = thrust::device_pointer_cast(d_offsets)[strings_count];
auto chars_column = strings::detail::create_chars_child_column( strings_count, strings.null_count(), bytes, mr, stream );
auto d_chars = chars_column->mutable_view().data<char>();
thrust::for_each_n(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<size_type>(0), strings_count,
[d_strings, d_repl, d_offsets, d_chars] __device__ (size_type idx) {
string_view d_str = d_repl;
if( !d_strings.is_null(idx) )
d_str = d_strings.element<string_view>(idx);
memcpy( d_chars + d_offsets[idx], d_str.data(), d_str.size_bytes() );
});
//
return make_strings_column(strings_count, std::move(offsets_column), std::move(chars_column),
0, rmm::device_buffer{}, stream, mr);
}
} // namespace detail
// external API
std::unique_ptr<column> replace( strings_column_view const& strings,
string_scalar const& target,
string_scalar const& repl,
int32_t maxrepl,
rmm::mr::device_memory_resource* mr)
{
return detail::replace(strings, target, repl, maxrepl, mr );
}
std::unique_ptr<column> replace_slice( strings_column_view const& strings,
string_scalar const& repl,
size_type start, size_type stop,
rmm::mr::device_memory_resource* mr)
{
return detail::replace_slice(strings, repl, start, stop, mr);
}
std::unique_ptr<column> replace( strings_column_view const& strings,
strings_column_view const& targets,
strings_column_view const& repls,
rmm::mr::device_memory_resource* mr)
{
return detail::replace(strings, targets, repls, mr);
}
std::unique_ptr<column> replace_nulls( strings_column_view const& strings,
string_scalar const& repl,
rmm::mr::device_memory_resource* mr)
{
return detail::replace_nulls(strings, repl, mr);
}
} // namespace strings
} // namespace cudf
|
13c73235dde6e208fb97390f1f20a9f2aeb13856.hip | // !!! This is a file automatically generated by hipify!!!
/**
* \file dnn/src/cuda/relayout/kern_contiguous.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "src/cuda/elemwise_helper.cuh"
#include "src/cuda/query_blocksize.cuh"
#include "src/cuda/relayout/kern_contiguous.cuh"
#include "src/cuda/utils.cuh"
namespace megdnn {
namespace cuda {
// dst is contiguous
void copy_last_contiguous(
const TensorND& dst, const TensorND& src, size_t contiguous_size,
hipStream_t stream) {
ElemwiseOpParamN<2> param;
param[0] = dst;
param[1] = src;
#define RUN(_dt) \
do { \
typedef DTypeTrait<dtype::_dt>::ctype ctype; \
param[0].layout.dtype = param[1].layout.dtype = dtype::_dt(); \
param.init_from_given_tensor(); \
param.assert_initialized(); \
contiguous_intl::UserOpInvoker<ctype, 2>(param, stream, contiguous_size); \
return; \
} while (0)
switch (dst.layout.dtype.size()) {
case 1:
RUN(Byte);
case 2:
RUN(Float16);
case 4:
RUN(Int32);
}
megdnn_assert(0, "bad dtype size");
}
} // namespace cuda
} // namespace megdnn
// vim: syntax=cpp.doxygen
| 13c73235dde6e208fb97390f1f20a9f2aeb13856.cu | /**
* \file dnn/src/cuda/relayout/kern_contiguous.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "src/cuda/elemwise_helper.cuh"
#include "src/cuda/query_blocksize.cuh"
#include "src/cuda/relayout/kern_contiguous.cuh"
#include "src/cuda/utils.cuh"
namespace megdnn {
namespace cuda {
// dst is contiguous
void copy_last_contiguous(
const TensorND& dst, const TensorND& src, size_t contiguous_size,
cudaStream_t stream) {
ElemwiseOpParamN<2> param;
param[0] = dst;
param[1] = src;
#define RUN(_dt) \
do { \
typedef DTypeTrait<dtype::_dt>::ctype ctype; \
param[0].layout.dtype = param[1].layout.dtype = dtype::_dt(); \
param.init_from_given_tensor(); \
param.assert_initialized(); \
contiguous_intl::UserOpInvoker<ctype, 2>(param, stream, contiguous_size); \
return; \
} while (0)
switch (dst.layout.dtype.size()) {
case 1:
RUN(Byte);
case 2:
RUN(Float16);
case 4:
RUN(Int32);
}
megdnn_assert(0, "bad dtype size");
}
} // namespace cuda
} // namespace megdnn
// vim: syntax=cpp.doxygen
|
ccf3247ac2a958e873828ab3a0091c64396854c5.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#define HANDLE_ERROR(x) checkCudaError(x, __LINE__)
void checkCudaError(hipError_t msg, int x)
{
if (msg != hipSuccess) {
fprintf(stderr, "line: %d %s\n", x, hipGetErrorString(msg));
exit(1);
}
return;
}
int main()
{
hipDeviceProp_t prop;
int count;
HANDLE_ERROR(hipGetDeviceCount(&count));
for (int i = 0; i < count; i++)
{
HANDLE_ERROR(hipGetDeviceProperties(&prop, i));
std::cout << " --- General Information for device " << i << " ---" << std::endl;
std::cout << "Name: " << prop.name << std::endl;
std::cout << "Compute Capability: " << prop.major << "." << prop.minor << std::endl;
std::cout << "Clock rate: " << prop.clockRate << std::endl;
std::cout << "Device copy overlap: ";
if (prop.deviceOverlap)
std::cout << "Enabled" << std::endl;
else
std::cout << "Disabled" << std::endl;
std::cout << "Kernel execition timeout: ";
if (prop.kernelExecTimeoutEnabled)
std::cout << "Enabled" << std::endl;
else
std::cout << "Disabled" << std::endl;
std::cout << "GPU integrated: " << prop.integrated << std::endl;
std::cout << std::endl;
std::cout << " --- Memory Information for device " << i << " ---"<< std::endl;
std::cout << "Total global mem: " << prop.totalGlobalMem << std::endl;
std::cout << "Total constant mem: " << prop.totalConstMem << std::endl;
std::cout << "Max mem pitch: " << prop.memPitch << std::endl;
std::cout << "Texture Alignment: " << prop.textureAlignment << std::endl;
std::cout << std::endl;
std::cout << " --- MP Information for device " << i << " ---" << std::endl;
std::cout << "Multiprocessor count: " << prop.multiProcessorCount << std::endl;
std::cout << "Shared mem per mp: " << prop.sharedMemPerBlock << std::endl;
std::cout << "Registers per mp: " << prop.regsPerBlock << std::endl;
std::cout << "Threads in warp: " << prop.warpSize << std::endl;
std::cout << "Max threads per block: " << prop.maxThreadsPerBlock << std::endl;
std::cout << "Max thread dimensions: (" << prop.maxThreadsDim[0] << ", "
<< prop.maxThreadsDim[1] << ", " << prop.maxThreadsDim[2] << ")" << std::endl;
std::cout << "Max grid dimensions: (" << prop.maxGridSize[0] << ", "
<< prop.maxGridSize[1] << ", " << prop.maxGridSize[2] << ")" << std::endl;
std::cout << std::endl;
std::cout << " ---------------------------------- " << std::endl << std::endl;
}
return 0;
} | ccf3247ac2a958e873828ab3a0091c64396854c5.cu | #include <iostream>
#include <cuda.h>
#define HANDLE_ERROR(x) checkCudaError(x, __LINE__)
void checkCudaError(cudaError_t msg, int x)
{
if (msg != cudaSuccess) {
fprintf(stderr, "line: %d %s\n", x, cudaGetErrorString(msg));
exit(1);
}
return;
}
int main()
{
cudaDeviceProp prop;
int count;
HANDLE_ERROR(cudaGetDeviceCount(&count));
for (int i = 0; i < count; i++)
{
HANDLE_ERROR(cudaGetDeviceProperties(&prop, i));
std::cout << " --- General Information for device " << i << " ---" << std::endl;
std::cout << "Name: " << prop.name << std::endl;
std::cout << "Compute Capability: " << prop.major << "." << prop.minor << std::endl;
std::cout << "Clock rate: " << prop.clockRate << std::endl;
std::cout << "Device copy overlap: ";
if (prop.deviceOverlap)
std::cout << "Enabled" << std::endl;
else
std::cout << "Disabled" << std::endl;
std::cout << "Kernel execition timeout: ";
if (prop.kernelExecTimeoutEnabled)
std::cout << "Enabled" << std::endl;
else
std::cout << "Disabled" << std::endl;
std::cout << "GPU integrated: " << prop.integrated << std::endl;
std::cout << std::endl;
std::cout << " --- Memory Information for device " << i << " ---"<< std::endl;
std::cout << "Total global mem: " << prop.totalGlobalMem << std::endl;
std::cout << "Total constant mem: " << prop.totalConstMem << std::endl;
std::cout << "Max mem pitch: " << prop.memPitch << std::endl;
std::cout << "Texture Alignment: " << prop.textureAlignment << std::endl;
std::cout << std::endl;
std::cout << " --- MP Information for device " << i << " ---" << std::endl;
std::cout << "Multiprocessor count: " << prop.multiProcessorCount << std::endl;
std::cout << "Shared mem per mp: " << prop.sharedMemPerBlock << std::endl;
std::cout << "Registers per mp: " << prop.regsPerBlock << std::endl;
std::cout << "Threads in warp: " << prop.warpSize << std::endl;
std::cout << "Max threads per block: " << prop.maxThreadsPerBlock << std::endl;
std::cout << "Max thread dimensions: (" << prop.maxThreadsDim[0] << ", "
<< prop.maxThreadsDim[1] << ", " << prop.maxThreadsDim[2] << ")" << std::endl;
std::cout << "Max grid dimensions: (" << prop.maxGridSize[0] << ", "
<< prop.maxGridSize[1] << ", " << prop.maxGridSize[2] << ")" << std::endl;
std::cout << std::endl;
std::cout << " ---------------------------------- " << std::endl << std::endl;
}
return 0;
} |
882cba4c3d078c81cab0471c9c250a92f9fe579f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ann.h"
namespace ann {
__global__ void
kernel(int n, float *arr){
volatile int idx = threadIdx.x + blockDim.x*blockIdx.x;
if(idx >= n) return;
arr[idx] *= 2.0f;
}
__global__ void
kernel_feedforward(
int layer_id,
int *l,
int *s,
int *sw,
float *z_arr,
float *a_arr,
float *w_arr
){
volatile int idx = threadIdx.x + blockDim.x*blockIdx.x;
int neuron_count = l[layer_id];
int neuron_count_prev = l[layer_id-1];
//printf("layer = %d idx = %d count = %d\n", layer_id, idx, neuron_count-1);
if(idx >= neuron_count-1) return;
float z = 0;
for(int k = 0; k < neuron_count_prev; k++){
z += w_arr[sw[layer_id-1] + k*(neuron_count - 1) + idx]*a_arr[s[layer_id-1] + k];
// printf("w_arr[%d] * a_arr[%d] = %.20f\n",
// sw[layer_id-1] + k*(neuron_count - 1) + idx ,
// s[layer_id-1] + k,
// w_arr[sw[layer_id-1] + k*(neuron_count - 1) + idx]*a_arr[s[layer_id-1] + k]);
// printf("%.10f * %.10f = %.10f\n", w_arr[sw[layer_id-1] + k*(neuron_count - 1) + idx ],
// a_arr[s[layer_id-1] + k],
// w_arr[sw[layer_id-1] + k*(neuron_count - 1) + idx]*a_arr[s[layer_id-1] + k]
// );
}
z_arr[s[layer_id] + idx] = z;
float a = 1.0 / (1.0 + expf(-z));
a_arr[s[layer_id] + idx] = a;
// printf("index = %d z = %.5f\n", s[layer_id] + idx, z);
// printf("a = %.20f\n", a);
}
__global__ void
kernel_calc_gL(
int layer_id,
int *l,
int *s,
float *z_arr,
float *a_arr,
float *t_arr,
float *gjl
){
volatile int idx = threadIdx.x + blockDim.x*blockIdx.x;
int neuron_count = l[layer_id];
if(idx >= neuron_count-1) return;
float z = z_arr[s[layer_id] + idx];
float tmp = 1 + expf(-z);
float f_deriv=expf(-z) / (tmp*tmp);
gjl[s[layer_id] + idx] = f_deriv*(a_arr[s[layer_id] + idx] - t_arr[idx]);
}
__global__ void
kernel_calc_gjL(
int layer_id,
int *l,
int *s,
int *sw,
float *z_arr,
float *a_arr,
float *t_arr,
float *gjl,
float *w_arr
){
volatile int idx = threadIdx.x + blockDim.x*blockIdx.x;
int neuron_count = l[layer_id];
int neuron_count_next = l[layer_id+1];
if(idx >= neuron_count-1) return;
//float f_deriv=expf(-z_arr[s[layer_id] + idx]) / powf((1 + expf(-z_arr[s[layer_id] + idx])),2.0f);
float z = z_arr[s[layer_id] + idx];
float tmp = 1 + expf(-z);
float f_deriv=expf(-z) / (tmp*tmp);
float sum = 0;
for (int k = 0; k < neuron_count_next-1; k++) {
sum += w_arr[sw[layer_id] + idx*(l[layer_id + 1] - 1) + k] * gjl[s[layer_id + 1] + k];
}
gjl[s[layer_id] + idx] = f_deriv*sum;
// printf("Kernelis %d - %.20f\n", s[layer_id] + idx, gjl[s[layer_id] + idx]);
}
__global__ void
kernel_weight_update(
int layer_id,
int *l,
int *s,
int *sw,
float *z_arr,
float *a_arr,
float *t_arr,
float *gjl,
float *w_arr,
float *dw_arr,
float eta,
float alpha
){
volatile int idx = threadIdx.x + blockDim.x*blockIdx.x;
int neuron_count = l[layer_id];
int neuron_count_next = l[layer_id+1];
if(idx >= neuron_count) return;
float a = a_arr[s[layer_id] + idx];
for(int k = 0; k < neuron_count_next-1; k++){
float grad=/*a_arr[s[layer_id] + idx]*/a*gjl[s[layer_id + 1] + k];
dw_arr[sw[layer_id] + idx*(neuron_count_next - 1) + k]=
-eta*grad+
alpha*dw_arr[sw[layer_id] + idx*(neuron_count_next - 1) + k];
w_arr[sw[layer_id] + idx*(neuron_count_next - 1) + k]+=
dw_arr[sw[layer_id] + idx*(neuron_count_next - 1) + k];
}
}
// CUDA2
__global__ void
kernel_feedforward_2(
int layer_id,
int *l,
int *s_ext,
int *sw_ext,
float *z_ext_arr,
float *a_ext_arr,
float *w_ext_arr
){
extern __shared__ int sm[];
float *sm_z = (float*)&sm[0];
int h = blockDim.x;
int h2 = blockDim.y;
int lidx = threadIdx.x;
int pidx = threadIdx.y;
int idx = threadIdx.x + blockDim.x*blockIdx.x;
int neuron_count = l[layer_id];
int neuron_count_prev = l[layer_id-1];
//printf("layer = %d idx = %d count = %d\n", layer_id, idx, neuron_count-1);
if(idx >= neuron_count-1) return;
float z = 0;
int index0=sw_ext[layer_id-1];
int index1=s_ext[layer_id-1];
for(int k = pidx; k < neuron_count_prev; k+=h2){
z += w_ext_arr[index0 + k*(neuron_count - 1) + idx]*a_ext_arr[index1 + k];
}
sm_z[pidx*h + lidx] = z;
__syncthreads();
if(pidx == 0){
z = 0;
for(int i = 0; i < h2; i++)
z += sm_z[i*h + lidx];
z_ext_arr[s_ext[layer_id] + idx] = z;
float a = 1.0 / (1.0 + expf(-z));
a_ext_arr[s_ext[layer_id] + idx] = a;
}
// printf("index = %d z = %.5f\n", s[layer_id] + idx, z);
// printf("a = %.20f\n", a);
}
__global__ void
kernel_calc_gL_2(
int layer_id,
int *l,
int *s_ext,
float *z_ext_arr,
float *a_ext_arr,
float *t_arr,
float *gjl_ext
){
volatile int idx = threadIdx.x + blockDim.x*blockIdx.x;
int neuron_count = l[layer_id];
if(idx >= neuron_count-1) return;
float z = z_ext_arr[s_ext[layer_id] + idx];
float tmp = 1 + expf(-z);
float f_deriv=expf(-z) / (tmp*tmp);
gjl_ext[s_ext[layer_id] + idx] = f_deriv*(a_ext_arr[s_ext[layer_id] + idx] - t_arr[idx]);
}
__global__ void
kernel_calc_gjL_2(
int layer_id,
int *l,
int *s_ext,
int *sw_ext,
float *z_ext_arr,
float *a_ext_arr,
float *t_arr,
float *gjl_ext,
float *w_ext_arr
){
int idx = threadIdx.y + blockDim.y*blockIdx.y;
int h = blockDim.x;
int pidx = threadIdx.y;
int lidx = threadIdx.x;
extern __shared__ int sm[];
float *sm_g = (float*)&sm[0];
int neuron_count = l[layer_id];
int neuron_count_next = l[layer_id+1];
if(idx >= neuron_count-1) return;
float sum = 0;
for (int k = lidx; k < neuron_count_next-1; k+=h) {
sum += w_ext_arr[sw_ext[layer_id] + idx*(l[layer_id + 1] - 1) + k] * gjl_ext[s_ext[layer_id + 1] + k];
}
sm_g[pidx*h + lidx] = sum;
__syncthreads();
if(lidx == 0){
float z = z_ext_arr[s_ext[layer_id] + idx];
float tmp = 1 + expf(-z);
float f_deriv = expf(-z) / (tmp*tmp);
sum = 0;
for(int i = 0; i < h; i++)
sum += sm_g[pidx*h + i];
gjl_ext[s_ext[layer_id] + idx] = f_deriv*sum;
}
}
__global__ void
kernel_weight_update_2(
int layer_id,
int *l,
int *s_ext,
int *sw_ext,
float *z_ext_arr,
float *a_ext_arr,
float *t_arr,
float *gjl_ext,
float *w_ext_arr,
float *dw_ext_arr,
float eta,
float alpha
){
int idx = threadIdx.y + blockDim.y*blockIdx.y;
int h = blockDim.x;
int pidx=threadIdx.x;
int neuron_count = l[layer_id];
int neuron_count_next = l[layer_id+1];
if(idx >= neuron_count) return;
float a = a_ext_arr[s_ext[layer_id] + idx];
int index0 = s_ext[layer_id + 1] + pidx;
int index1 = sw_ext[layer_id] + idx*(neuron_count_next - 1) + pidx;
for(int k = pidx; k < neuron_count_next-1; k+=h){
float grad = a*gjl_ext[index0];
index0 += h;
float dw = dw_ext_arr[index1] = -eta*grad + alpha*dw_ext_arr[index1];
w_ext_arr[index1] += dw;
index1 += h;
}
}
}
void run_cuda_sample(){
int deviceCount = 0;
checkCudaErrors( hipGetDeviceCount(&deviceCount));
if(deviceCount == 0){
printf("*** there is no CUDE device\n");
return;
}
checkCudaErrors( hipSetDevice(0) );
int n = 11; // number of elements
float *arr = new float[n];
for(int i = 0; i < n; i++)
arr[i] = i;
int h = 4; // number of threads in block
int g = (n + (h-n%h))/h; // number of grids
printf("n=%d, h=%d, g=%d\n", n, h, g);
int bc_arr = sizeof(float)*n;
float *dv_arr = NULL;
checkCudaErrors( hipMalloc((void **)&dv_arr, bc_arr) );
checkCudaErrors( hipMemcpy(dv_arr, arr, bc_arr, hipMemcpyHostToDevice) );
dim3 grid_dim(g, 1, 1);
dim3 block_dim(h, 1, 1);
hipLaunchKernelGGL(( ann::kernel), dim3(grid_dim), dim3(block_dim), 0, 0, n, dv_arr);
checkCudaErrors( hipMemcpy(arr, dv_arr, bc_arr, hipMemcpyDeviceToHost) );
for(int i = 0; i < n; i++)
printf("[%d] = %f\n", i, arr[i]);
checkCudaErrors( hipFree(dv_arr) );
checkCudaErrors(hipDeviceReset());
}
//
//AnnSerialFLT
//
void AnnCUDA::prepare( Topology *top){
cTopology = top;
l = new int[top->getLayerCount()];
s = new int[top->getLayerCount()];
int neuronCount = cTopology->obtainNeuronCount();
int weightCount = cTopology->obtainWeightCount();
a_arr = new float[neuronCount];
z_arr = new float[neuronCount];
W = new int[top->getLayerCount()];
sw = new int[top->getLayerCount()];
w_arr = new float[weightCount];
dw_arr = new float[weightCount];
t_arr = new float[top->getLayerSize(top->getLayerCount() - 1)];
gjl = new float[neuronCount];
// cuda
int deviceCount = 0;
checkCudaErrors( hipGetDeviceCount(&deviceCount));
if(deviceCount == 0){
printf("*** there is no CUDE device\n");
return;
}
checkCudaErrors( hipSetDevice(0) );
dv_l = NULL; bc_l = sizeof(int)*top->getLayerCount();
dv_s = NULL; bc_s = sizeof(int)*top->getLayerCount();;
dv_a_arr = NULL; bc_a_arr = sizeof(float)*neuronCount;
dv_z_arr = NULL; bc_z_arr = sizeof(float)*neuronCount;
dv_W = NULL; bc_W = sizeof(int)*top->getLayerCount();
dv_sw = NULL; bc_sw = sizeof(int)*top->getLayerCount();
dv_w_arr = NULL; bc_w_arr = sizeof(float)*weightCount;
dv_dw_arr = NULL; bc_dw_arr = sizeof(float)*weightCount;
dv_t_arr = NULL; bc_t_arr = sizeof(float)*top->getLayerSize(top->getLayerCount() - 1);
dv_gjl = NULL; bc_gjl = sizeof(float)*neuronCount;
checkCudaErrors( hipMalloc((void **)&dv_l, bc_l) );
checkCudaErrors( hipMalloc((void **)&dv_s, bc_s) );
checkCudaErrors( hipMalloc((void **)&dv_a_arr, bc_a_arr) );
checkCudaErrors( hipMalloc((void **)&dv_z_arr, bc_z_arr) );
checkCudaErrors( hipMalloc((void **)&dv_W, bc_W) );
checkCudaErrors( hipMalloc((void **)&dv_sw, bc_sw) );
checkCudaErrors( hipMalloc((void **)&dv_w_arr, bc_w_arr) );
checkCudaErrors( hipMalloc((void **)&dv_dw_arr, bc_dw_arr) );
checkCudaErrors( hipMalloc((void **)&dv_t_arr, bc_t_arr) );
checkCudaErrors( hipMalloc((void **)&dv_gjl, bc_gjl) );
}
void AnnCUDA::init(FILE *pFile=NULL){
L = cTopology->getLayerCount();
Random *rnd = new Random();
//Neuronu kiekiai sluoksnyje
for (int i = 0; i < L; i++) {
l[i] = cTopology->getLayerSize(i) + 1;
}
//Sluoksniu pradzios indeksai
for (int i = 0; i < L; i++) {
s[i] = 0;
for (int j = i; j > 0; j--) {
s[i] += l[j - 1];
}
}
//Bias neuronai
for (int i = 0; i < L - 1; i++) {
a_arr[s[i + 1] - 1] = 1;
}
//Svoriu kiekiai l-ame sluoksnyje
for (int i = 0; i < L - 1; i++) {
W[i] = l[i] * (l[i + 1] - 1);
sw[i] = 0;
if (i != 0) {
for (int j = 0; j < i; j++) {
sw[i] += W[j];
}
}
}
for (int i = 0; i < L - 1; i++)
for (int j = 0; j < W[i]; j++) {
w_arr[sw[i] + j] =(rnd->next()*2-1); // (double)rand() / double(RAND_MAX);
dw_arr[sw[i] + j] = 0.0;
}
checkCudaErrors( hipMemcpy(dv_w_arr, w_arr, bc_w_arr, hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(dv_dw_arr, dw_arr, bc_dw_arr, hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(dv_l, l, bc_l, hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(dv_s, s, bc_s, hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(dv_sw, sw, bc_sw, hipMemcpyHostToDevice) );
}
void AnnCUDA::train(float *a, float *b, float alpha, float eta){
for (int i = 0; i < cTopology->getLayerSize(0); i++) {
a_arr[i] = a[i];
}
for (int j = 0; j < cTopology->obtainNeuronCount(); j++) {
z_arr[j] = 0;
}
calc_feedForward();
// for (int i = 0; i < 10; i++) {
// printf("a[%d] = %.10f\n", i, a_arr[i]);
// }
// for (int i = 0; i < 7; i++) {
// printf("a[%d] = %.10f\n", i, a_arr[i]);
// }
// printf("\n");
// for (int i = 0; i < 7; i++) {
// printf("z[%d] = %.10f\n", i, z_arr[i]);
// }
for (int i = 0; i < cTopology->getLayerSize(cTopology->getLayerCount() - 1); i++) {
t_arr[i] = b[i];
}
calc_gjl();
// //back propogation:
// for (int i = 0; i <L - 1; i++) {//per sluoksnius
// for (int j = 0; j < l[i]; j++) {//per neuronus
// for (int k = 0; k < l[i + 1] - 1; k++) {//per kito sluoksnio neuronus
// dw_arr[sw[i] + k + j*(l[i + 1] - 1)] = delta_w(w_gradient(i, j, k), dw_arr[sw[i] + k + j*(l[i + 1] - 1)], alpha, eta);
// w_arr[sw[i] + k + j*(l[i + 1] - 1)] += dw_arr[sw[i] + k + j*(l[i + 1] - 1)];
// }
// }
// }
// checkCudaErrors( hipMemcpy(dv_a_arr, a_arr, bc_a_arr, hipMemcpyHostToDevice) );
//checkCudaErrors( hipMemcpy(dv_gjl, gjl, bc_gjl, hipMemcpyHostToDevice) );
//checkCudaErrors( hipMemcpy(dv_w_arr, w_arr, bc_w_arr, hipMemcpyHostToDevice) );
for (int i = 0; i < L-1; i++) {//per sluoksnius einu+
int neuron_count = l[i];
int h = 32; // number of threads in block
int g = (neuron_count + (h-neuron_count%h))/h; // number of grids
dim3 grid_dim(g, 1, 1);
dim3 block_dim(h, 1, 1);
// printf("%s\n", "A masyvas");
// for (int j = 0; j < 7; j++) {
// printf("a[%d] = %.20f\n", j, a_arr[j]);
// }
hipLaunchKernelGGL(( ann::kernel_weight_update), dim3(grid_dim), dim3(block_dim), 0, 0,
i,
dv_l,
dv_s,
dv_sw,
dv_z_arr,
dv_a_arr,
dv_t_arr,
dv_gjl,
dv_w_arr,
dv_dw_arr,
eta,
alpha
);
}
hipError_t err = hipGetLastError();
if (err != hipSuccess)
printf("Error: %s\n", hipGetErrorString(err));
}
void AnnCUDA::finishTraining(){
checkCudaErrors( hipMemcpy(w_arr, dv_w_arr, bc_w_arr, hipMemcpyDeviceToHost) );
checkCudaErrors( hipMemcpy(dw_arr, dv_dw_arr, bc_dw_arr, hipMemcpyDeviceToHost) );
}
void AnnCUDA::feedForward(float *a, float *b){
for (int i = 0; i < cTopology->getLayerSize(0); i++) {
a_arr[i] = a[i];
}
for (int j = 0; j < cTopology->obtainNeuronCount(); j++) {
z_arr[j] = 0;
}
calc_feedForward();
checkCudaErrors( hipMemcpy(a_arr, dv_a_arr, bc_a_arr, hipMemcpyDeviceToHost) );
for (int i = 0; i<cTopology->getLayerSize(cTopology->getLayerCount() - 1); i++){
b[i] = a_arr[s[L - 1] + i];
//printf("b[%d] = %.10f\n", i, b[i]);
}
}
void AnnCUDA::calc_feedForward(){
checkCudaErrors( hipMemcpy(dv_a_arr, a_arr, bc_a_arr, hipMemcpyHostToDevice) );
for (int i = 1; i < L; i++) {//per sluoksnius einu+
// printf("current layer_id = %d\n", i);
int neuron_count = l[i];
int h = 32; // number of threads in block
int g = (neuron_count + (h-neuron_count%h))/h; // number of grids
dim3 grid_dim(g, 1, 1);
dim3 block_dim(h, 1, 1);
hipLaunchKernelGGL(( ann::kernel_feedforward), dim3(grid_dim), dim3(block_dim), 0, 0,
i,
dv_l,
dv_s,
dv_sw,
dv_z_arr,
dv_a_arr,
dv_w_arr
);
}
}
void AnnCUDA::calc_gjl(){
checkCudaErrors( hipMemcpy(dv_t_arr, t_arr, bc_t_arr, hipMemcpyHostToDevice) );
// int last_layer_id=cTopology->getLayerCount()-1;
int last_layer_id=L-1;
int neuron_count = l[last_layer_id];
int h = 32; // number of threads in block
int g = (neuron_count + (h-neuron_count%h))/h; // number of grids
dim3 grid_dim(g, 1, 1);
dim3 block_dim(h, 1, 1);
hipLaunchKernelGGL(( ann::kernel_calc_gL), dim3(grid_dim), dim3(block_dim), 0, 0,
last_layer_id,
dv_l,
dv_s,
dv_z_arr,
dv_a_arr,
dv_t_arr,
dv_gjl
);
//Cia nezinau, ar i >= 0, ar i >= 1
for (int i = L - 2; i >= 1; i--) {
neuron_count = l[i];
h = 32; // number of threads in block
g = (neuron_count + (h-neuron_count%h))/h; // number of grids
dim3 grid_dim(g, 1, 1);
dim3 block_dim(h, 1, 1);
hipLaunchKernelGGL(( ann::kernel_calc_gjL), dim3(grid_dim), dim3(block_dim), 0, 0,
i,
dv_l,
dv_s,
dv_sw,
dv_z_arr,
dv_a_arr,
dv_t_arr,
dv_gjl,
dv_w_arr
);
}
// checkCudaErrors( hipMemcpy(gjl, dv_gjl, bc_gjl, hipMemcpyDeviceToHost) );
}
float AnnCUDA::delta_w(float grad, float dw, float alpha, float eta) {
return -eta*grad + alpha*dw;
}
float AnnCUDA::gL(float a, float z, float t) {
float w = f_deriv(z) * (a - t);
return w;
}
float AnnCUDA::f(float x) {
//return atanf(x)/M_PI + 0.5;
float y = 1 + exp(-x);
return 1 / y;
}
float AnnCUDA::f_deriv(float x) {
//return 1.0 / (1.0+ x*x);
return exp(-x) / pow((1 + exp(-x)), 2);
}
float AnnCUDA::w_gradient(int layer_id, int w_i, int w_j) {
return a_arr[s[layer_id] + w_i] * gjl[s[layer_id + 1] + w_j];
}
float AnnCUDA::obtainError(float *b){
checkCudaErrors( hipMemcpy(a_arr, dv_a_arr, bc_a_arr, hipMemcpyDeviceToHost) );
float error = 0;
for(int i = 0; i < l[L-1] - 1; i++){
float tmp = b[i] - a_arr[s[L-1] + i];
error += tmp*tmp;
//printf("a_arr[%d] = %.10f\n", s[L-1] + i, a_arr[s[L-1] + i]);
}
return error;
}
void AnnCUDA::destroy(){
delete[] l;
l = NULL;
delete[] s;
s = NULL;
delete[] a_arr;
a_arr = NULL;
delete[] z_arr;
z_arr = NULL;
delete[] W;
W = NULL;
delete[] sw;
sw = NULL;
delete[] w_arr;
w_arr = NULL;
delete[] dw_arr;
dw_arr = NULL;
delete[] t_arr;
t_arr = NULL;
delete[] gjl;
gjl = NULL;
checkCudaErrors( hipFree(dv_l) );
checkCudaErrors( hipFree(dv_s) );
checkCudaErrors( hipFree(dv_a_arr) );
checkCudaErrors( hipFree(dv_z_arr) );
checkCudaErrors( hipFree(dv_W) );
checkCudaErrors( hipFree(dv_sw) );
checkCudaErrors( hipFree(dv_w_arr) );
checkCudaErrors( hipFree(dv_dw_arr) );
checkCudaErrors( hipFree(dv_t_arr) );
checkCudaErrors( hipFree(dv_gjl) );
checkCudaErrors(hipDeviceReset());
}
float* AnnCUDA::getWeights(){
return w_arr;
}
void AnnCUDA::print_out(){
printf("z = %e\n", z_arr[s[L-1]+0]);
printf("g = %e\n", gjl[s[L-1]+0]);
for(int i = 0; i < l[L-2]; i++){
if(i < l[L-2]) printf("[%d] z=%e, a=%e, w=%e, grad = %e\n", i, z_arr[s[L-2]+i], a_arr[s[L-2]+i], w_arr[sw[L-2] + i*(l[L-1]-1)], a_arr[s[L-2]+i]*gjl[s[L-1]+0]);
}
}
void AnnCUDA::setWeights(float *t_w_arr) {
w_arr=t_w_arr;
checkCudaErrors( hipMemcpy(dv_w_arr, w_arr, bc_w_arr, hipMemcpyHostToDevice) );
}
void AnnCUDA::printf_Network(string filename){
FILE * pFile;
const char * c = filename.c_str();
pFile = fopen(c, "wb");
cTopology->printTopology(pFile);
int weightCount = cTopology->obtainWeightCount();
double *w_arr_dbl = new double[weightCount];
double *dw_arr_dbl = new double[weightCount];
for(int i = 0; i < weightCount; i++){
w_arr_dbl[i] = (double)w_arr[i];
dw_arr_dbl[i] = (double)dw_arr[i];
}
fwrite (w_arr_dbl , sizeof(double), weightCount, pFile);
fwrite (dw_arr_dbl , sizeof(double), weightCount, pFile);
fclose (pFile);
}
//******************Cuda 2.0***************************************
void AnnCUDA2::prepare( Topology *top){
h = 32;
h2 = 32;
cTopology = top;
l = new int[top->getLayerCount()];
l_ext = new int[top->getLayerCount()];
s_ext = new int[top->getLayerCount()];
int neuronCount = cTopology->obtainNeuronCount();
int weightCount = cTopology->obtainWeightCount();
int neuronCount_ext = obtainNeuronCountExt(cTopology);
int weightCount_ext = obtainWeightCountExt(cTopology);
// printf("neuronCount = %d\n", neuronCount);
// printf("neuronCount2 = %d\n", neuronCount2);
// printf("weightCount = %d\n", weightCount);
// printf("weightCount2 = %d\n", weightCount2);
a_ext_arr = new float[neuronCount_ext];
z_ext_arr = new float[neuronCount_ext];
sw_ext = new int[top->getLayerCount()];
w_ext_arr = new float[weightCount_ext];
dw_ext_arr = new float[weightCount_ext];
t_arr = new float[top->getLayerSize(top->getLayerCount() - 1)];
gjl_ext = new float[neuronCount_ext];
// cuda
int deviceCount = 0;
checkCudaErrors( hipGetDeviceCount(&deviceCount));
if(deviceCount == 0){
printf("*** there is no CUDE device\n");
return;
}
checkCudaErrors( hipSetDevice(0) );
dv_l = NULL; bc_l = sizeof(int)*top->getLayerCount();
dv_s_ext = NULL; bc_s_ext = sizeof(int)*top->getLayerCount();;
dv_a_ext_arr = NULL; bc_a_ext_arr = sizeof(float)*neuronCount_ext;
dv_z_ext_arr = NULL; bc_z_ext_arr = sizeof(float)*neuronCount_ext;
dv_sw_ext = NULL; bc_sw_ext = sizeof(int)*top->getLayerCount();
dv_w_ext_arr = NULL; bc_w_ext_arr = sizeof(float)*weightCount_ext;
dv_dw_ext_arr = NULL; bc_dw_ext_arr = sizeof(float)*weightCount_ext;
dv_t_arr = NULL; bc_t_arr = sizeof(float)*top->getLayerSize(top->getLayerCount() - 1);
dv_gjl_ext = NULL; bc_gjl_ext = sizeof(float)*neuronCount_ext;
checkCudaErrors( hipMalloc((void **)&dv_l, bc_l) );
checkCudaErrors( hipMalloc((void **)&dv_s_ext, bc_s_ext) );
checkCudaErrors( hipMalloc((void **)&dv_a_ext_arr, bc_a_ext_arr) );
checkCudaErrors( hipMalloc((void **)&dv_z_ext_arr, bc_z_ext_arr) );
checkCudaErrors( hipMalloc((void **)&dv_sw_ext, bc_sw_ext) );
checkCudaErrors( hipMalloc((void **)&dv_w_ext_arr, bc_w_ext_arr) );
checkCudaErrors( hipMalloc((void **)&dv_dw_ext_arr, bc_dw_ext_arr) );
checkCudaErrors( hipMalloc((void **)&dv_t_arr, bc_t_arr) );
checkCudaErrors( hipMalloc((void **)&dv_gjl_ext, bc_gjl_ext) );
}
void AnnCUDA2::init(FILE *pFile=NULL){
L = cTopology->getLayerCount();
int *W = new int[L];
int *W_ext = new int[L];
Random *rnd = new Random();
//Neuronu kiekiai sluoksnyje
for (int i = 0; i < L; i++) {
int neuron_count = cTopology -> getLayerSize(i) + 1;
l[i] = neuron_count;
l_ext[i] = neuron_count + (32 - neuron_count % 32);
}
//Sluoksniu pradzios indeksai
for (int i = 0; i < L; i++) {
s_ext[i] = 0;
for (int j = i; j > 0; j--) {
s_ext[i] += l_ext[j - 1];
}
}
//Bias neuronai
for (int i = 0; i < L - 1; i++) {
a_ext_arr[s_ext[i] + l[i] - 1] = 1;
}
//Svoriu kiekiai l-ame sluoksnyje
for (int i = 0; i < L - 1; i++) {
W[i] = l[i] * (l[i + 1] - 1);
W_ext[i] = W[i];
if (W_ext[i] % 32 != 0) {
W_ext[i] += (32 - W_ext[i] % 32);
}
sw_ext[i] = 0;
if (i != 0) {
for (int j = 0; j < i; j++) {
sw_ext[i] += W_ext[j];
}
}
}
for (int i = 0; i < L - 1; i++)
for (int j = 0; j < W_ext[i]; j++) {
if (j < W[i]){
w_ext_arr[sw_ext[i] + j] =(rnd->next()*2-1);
}
else{
w_ext_arr[sw_ext[i] + j] = 0.0;
}
dw_ext_arr[sw_ext[i] + j] = 0.0;
}
delete [] W;
delete [] W_ext;
checkCudaErrors( hipMemcpy(dv_w_ext_arr, w_ext_arr, bc_w_ext_arr, hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(dv_dw_ext_arr, dw_ext_arr, bc_dw_ext_arr, hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(dv_l, l, bc_l, hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(dv_s_ext, s_ext, bc_s_ext, hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(dv_sw_ext, sw_ext, bc_sw_ext, hipMemcpyHostToDevice) );
}
void AnnCUDA2::train(float *a, float *b, float alpha, float eta){
for (int i = 0; i < cTopology->getLayerSize(0); i++) {
a_ext_arr[i] = a[i];
}
for (int j = 0; j < obtainNeuronCountExt(cTopology); j++) {
z_ext_arr[j] = 0;
}
calc_feedForward();
for (int i = 0; i < cTopology->getLayerSize(cTopology->getLayerCount() - 1); i++) {
t_arr[i] = b[i];
}
calc_gjl();
for (int i = 0; i < L-1; i++) {//per sluoksnius einu+
int neuron_count = l[i];
int g = (neuron_count + (h2-neuron_count%h2))/h2; // number of grids
dim3 grid_dim(1, g, 1);
dim3 block_dim(h, h2, 1);
hipLaunchKernelGGL(( ann::kernel_weight_update_2), dim3(grid_dim), dim3(block_dim), 0, 0,
i,
dv_l,
dv_s_ext,
dv_sw_ext,
dv_z_ext_arr,
dv_a_ext_arr,
dv_t_arr,
dv_gjl_ext,
dv_w_ext_arr,
dv_dw_ext_arr,
eta,
alpha
);
// checkCudaErrors( hipDeviceSynchronize() );
}
hipError_t err = hipGetLastError();
if (err != hipSuccess)
printf("Error: %s\n", hipGetErrorString(err));
}
void AnnCUDA2::finishTraining(){
checkCudaErrors( hipMemcpy(w_ext_arr, dv_w_ext_arr, bc_w_ext_arr, hipMemcpyDeviceToHost) );
checkCudaErrors( hipMemcpy(dw_ext_arr, dv_dw_ext_arr, bc_dw_ext_arr, hipMemcpyDeviceToHost) );
}
void AnnCUDA2::feedForward(float *a, float *b){
for (int i = 0; i < cTopology->getLayerSize(0); i++) {
a_ext_arr[i] = a[i];
}
for (int j = 0; j < obtainNeuronCountExt(cTopology); j++) {
z_ext_arr[j] = 0;
}
calc_feedForward();
checkCudaErrors( hipMemcpy(a_ext_arr, dv_a_ext_arr, bc_a_ext_arr, hipMemcpyDeviceToHost) );
for (int i = 0; i < l[L - 1]; i++){
b[i] = a_ext_arr[s_ext[L - 1] + i];
//printf("b[%d] = %.10f\n", i, b[i]);
}
}
void AnnCUDA2::calc_feedForward(){
checkCudaErrors( hipMemcpy(dv_a_ext_arr, a_ext_arr, bc_a_ext_arr, hipMemcpyHostToDevice) );
for (int i = 1; i < L; i++) {//per sluoksnius einu+
int neuron_count = l[i];
int g = (neuron_count + (h-neuron_count%h))/h; // number of grids
dim3 grid_dim(g, 1, 1);
dim3 block_dim(h, h2, 1);
int bc_sm = sizeof(float)*h*h2;
hipLaunchKernelGGL(( ann::kernel_feedforward_2), dim3(grid_dim), dim3(block_dim), bc_sm, 0,
i,
dv_l,
dv_s_ext,
dv_sw_ext,
dv_z_ext_arr,
dv_a_ext_arr,
dv_w_ext_arr
);
// checkCudaErrors( hipDeviceSynchronize() );
}
}
void AnnCUDA2::calc_gjl(){
checkCudaErrors( hipMemcpy(dv_t_arr, t_arr, bc_t_arr, hipMemcpyHostToDevice) );
int last_layer_id = L-1;
int neuron_count = l[last_layer_id];
int g = (neuron_count + (h-neuron_count%h))/h; // number of grids
dim3 grid_dim(g, 1, 1);
dim3 block_dim(h, 1, 1);
hipLaunchKernelGGL(( ann::kernel_calc_gL_2), dim3(grid_dim), dim3(block_dim), 0, 0,
last_layer_id,
dv_l,
dv_s_ext,
dv_z_ext_arr,
dv_a_ext_arr,
dv_t_arr,
dv_gjl_ext
);
//checkCudaErrors( hipDeviceSynchronize() );
for (int i = L - 2; i >= 1; i--) {
neuron_count = l[i];
g = (neuron_count + (h2-neuron_count%h2))/h2; // number of grids
dim3 grid_dim(1, g, 1);
dim3 block_dim(h, h2, 1);
int bc_sm = sizeof(float)*h*h2;
hipLaunchKernelGGL(( ann::kernel_calc_gjL_2), dim3(grid_dim), dim3(block_dim),bc_sm, 0,
i,
dv_l,
dv_s_ext,
dv_sw_ext,
dv_z_ext_arr,
dv_a_ext_arr,
dv_t_arr,
dv_gjl_ext,
dv_w_ext_arr
);
}
}
float AnnCUDA2::obtainError(float *b){
checkCudaErrors( hipMemcpy(a_ext_arr, dv_a_ext_arr, bc_a_ext_arr, hipMemcpyDeviceToHost) );
float error = 0;
for(int i = 0; i < l[L-1] - 1; i++){
float tmp = b[i] - a_ext_arr[s_ext[L-1] + i];
error += tmp*tmp;
//printf("a_arr[%d] = %.10f\n", s[L-1] + i, a_arr[s[L-1] + i]);
}
return error;
}
void AnnCUDA2::destroy(){
delete[] l;
l = NULL;
delete[] l_ext;
l_ext = NULL;
delete[] s_ext;
s_ext = NULL;
delete[] a_ext_arr;
a_ext_arr = NULL;
delete[] z_ext_arr;
z_ext_arr = NULL;
delete[] sw_ext;
sw_ext = NULL;
delete[] w_ext_arr;
w_ext_arr = NULL;
delete[] dw_ext_arr;
dw_ext_arr = NULL;
delete[] t_arr;
t_arr = NULL;
delete[] gjl_ext;
gjl_ext = NULL;
checkCudaErrors( hipFree(dv_l) );
checkCudaErrors( hipFree(dv_s_ext) );
checkCudaErrors( hipFree(dv_a_ext_arr) );
checkCudaErrors( hipFree(dv_z_ext_arr) );
checkCudaErrors( hipFree(dv_sw_ext) );
checkCudaErrors( hipFree(dv_w_ext_arr) );
checkCudaErrors( hipFree(dv_dw_ext_arr) );
checkCudaErrors( hipFree(dv_t_arr) );
checkCudaErrors( hipFree(dv_gjl_ext) );
checkCudaErrors(hipDeviceReset());
}
float* AnnCUDA2::getWeights(){
return w_ext_arr;
}
float* AnnCUDA2::getA(){
return a_ext_arr;
}
void AnnCUDA2::print_out(){
printf("z = %e\n", z_ext_arr[s_ext[L-1]+0]);
printf("g = %e\n", gjl_ext[s_ext[L-1]+0]);
for(int i = 0; i < l[L-2]; i++){
if(i < l[L-2]) printf("[%d] z=%e, a=%e, w=%e, grad = %e\n",
i, z_ext_arr[s_ext[L-2]+i],
a_ext_arr[s_ext[L-2]+i],
w_ext_arr[sw_ext[L-2] + i*(l[L-1]-1)],
a_ext_arr[s_ext[L-2]+i]*gjl_ext[s_ext[L-1]+0]);
}
}
void AnnCUDA2::setWeights(float *t_w_arr) {
int prev_count = 0;
for (int i = 0; i < cTopology->getLayerCount() - 1; i++) {
for (int j = 0; j < l[i]*(l[i+1]-1); j++) {
int index_w = sw_ext[i] + j;
int index_t = prev_count + j;
w_ext_arr[index_w] = t_w_arr[index_t];
}
prev_count += l[i]*(l[i+1]-1);
}
checkCudaErrors( hipMemcpy(dv_w_ext_arr, w_ext_arr, bc_w_ext_arr, hipMemcpyHostToDevice) );
}
void AnnCUDA2::printf_Network(string filename){
FILE * pFile;
const char * c = filename.c_str();
pFile = fopen(c, "wb");
cTopology->printTopology(pFile);
int weightCount = cTopology->obtainWeightCount();
double *w_arr_dbl = new double[weightCount];
double *dw_arr_dbl = new double[weightCount];
int sw_index = 0;
for(int layer_id = 0; layer_id < L - 1; layer_id++){
for(int k = 0; k < l[layer_id]*(l[layer_id+1]-1); k++){
w_arr_dbl[sw_index+k] = (double)w_ext_arr[sw_ext[layer_id]+k];
dw_arr_dbl[sw_index+k] = (double)dw_ext_arr[sw_ext[layer_id]+k];
}
sw_index += l[layer_id]*(l[layer_id+1]-1);
}
fwrite (w_arr_dbl , sizeof(double), weightCount, pFile);
fwrite (dw_arr_dbl , sizeof(double), weightCount, pFile);
fclose (pFile);
}
/* static */
int AnnCUDA2::obtainNeuronCountExt(Topology *top){
int count = 0;
for (int i = 0; i < top->getLayerCount(); i++){
int neuron_count = top->getLayerSize(i)+1;
count += neuron_count;
if (neuron_count % 32 != 0)
count += 32 - neuron_count % 32;
}
return count;
}
/* static */
int AnnCUDA2::obtainWeightCountExt(Topology *top){
int count = 0;
for (int i = 0; i < top->getLayerCount()-1; i++){
int weight_count = (top->getLayerSize(i)+1)*top->getLayerSize(i+1); //((*ml)[i] + 1)*(*ml)[i+1];
count += weight_count;
if (weight_count % 32 != 0)
count += 32 - weight_count % 32;
}
return count;
}
| 882cba4c3d078c81cab0471c9c250a92f9fe579f.cu | #include "ann.h"
namespace ann {
__global__ void
kernel(int n, float *arr){
volatile int idx = threadIdx.x + blockDim.x*blockIdx.x;
if(idx >= n) return;
arr[idx] *= 2.0f;
}
__global__ void
kernel_feedforward(
int layer_id,
int *l,
int *s,
int *sw,
float *z_arr,
float *a_arr,
float *w_arr
){
volatile int idx = threadIdx.x + blockDim.x*blockIdx.x;
int neuron_count = l[layer_id];
int neuron_count_prev = l[layer_id-1];
//printf("layer = %d idx = %d count = %d\n", layer_id, idx, neuron_count-1);
if(idx >= neuron_count-1) return;
float z = 0;
for(int k = 0; k < neuron_count_prev; k++){
z += w_arr[sw[layer_id-1] + k*(neuron_count - 1) + idx]*a_arr[s[layer_id-1] + k];
// printf("w_arr[%d] * a_arr[%d] = %.20f\n",
// sw[layer_id-1] + k*(neuron_count - 1) + idx ,
// s[layer_id-1] + k,
// w_arr[sw[layer_id-1] + k*(neuron_count - 1) + idx]*a_arr[s[layer_id-1] + k]);
// printf("%.10f * %.10f = %.10f\n", w_arr[sw[layer_id-1] + k*(neuron_count - 1) + idx ],
// a_arr[s[layer_id-1] + k],
// w_arr[sw[layer_id-1] + k*(neuron_count - 1) + idx]*a_arr[s[layer_id-1] + k]
// );
}
z_arr[s[layer_id] + idx] = z;
float a = 1.0 / (1.0 + expf(-z));
a_arr[s[layer_id] + idx] = a;
// printf("index = %d z = %.5f\n", s[layer_id] + idx, z);
// printf("a = %.20f\n", a);
}
__global__ void
kernel_calc_gL(
int layer_id,
int *l,
int *s,
float *z_arr,
float *a_arr,
float *t_arr,
float *gjl
){
volatile int idx = threadIdx.x + blockDim.x*blockIdx.x;
int neuron_count = l[layer_id];
if(idx >= neuron_count-1) return;
float z = z_arr[s[layer_id] + idx];
float tmp = 1 + expf(-z);
float f_deriv=expf(-z) / (tmp*tmp);
gjl[s[layer_id] + idx] = f_deriv*(a_arr[s[layer_id] + idx] - t_arr[idx]);
}
__global__ void
kernel_calc_gjL(
int layer_id,
int *l,
int *s,
int *sw,
float *z_arr,
float *a_arr,
float *t_arr,
float *gjl,
float *w_arr
){
volatile int idx = threadIdx.x + blockDim.x*blockIdx.x;
int neuron_count = l[layer_id];
int neuron_count_next = l[layer_id+1];
if(idx >= neuron_count-1) return;
//float f_deriv=expf(-z_arr[s[layer_id] + idx]) / powf((1 + expf(-z_arr[s[layer_id] + idx])),2.0f);
float z = z_arr[s[layer_id] + idx];
float tmp = 1 + expf(-z);
float f_deriv=expf(-z) / (tmp*tmp);
float sum = 0;
for (int k = 0; k < neuron_count_next-1; k++) {
sum += w_arr[sw[layer_id] + idx*(l[layer_id + 1] - 1) + k] * gjl[s[layer_id + 1] + k];
}
gjl[s[layer_id] + idx] = f_deriv*sum;
// printf("Kernelis %d - %.20f\n", s[layer_id] + idx, gjl[s[layer_id] + idx]);
}
__global__ void
kernel_weight_update(
int layer_id,
int *l,
int *s,
int *sw,
float *z_arr,
float *a_arr,
float *t_arr,
float *gjl,
float *w_arr,
float *dw_arr,
float eta,
float alpha
){
volatile int idx = threadIdx.x + blockDim.x*blockIdx.x;
int neuron_count = l[layer_id];
int neuron_count_next = l[layer_id+1];
if(idx >= neuron_count) return;
float a = a_arr[s[layer_id] + idx];
for(int k = 0; k < neuron_count_next-1; k++){
float grad=/*a_arr[s[layer_id] + idx]*/a*gjl[s[layer_id + 1] + k];
dw_arr[sw[layer_id] + idx*(neuron_count_next - 1) + k]=
-eta*grad+
alpha*dw_arr[sw[layer_id] + idx*(neuron_count_next - 1) + k];
w_arr[sw[layer_id] + idx*(neuron_count_next - 1) + k]+=
dw_arr[sw[layer_id] + idx*(neuron_count_next - 1) + k];
}
}
// CUDA2
__global__ void
kernel_feedforward_2(
int layer_id,
int *l,
int *s_ext,
int *sw_ext,
float *z_ext_arr,
float *a_ext_arr,
float *w_ext_arr
){
extern __shared__ int sm[];
float *sm_z = (float*)&sm[0];
int h = blockDim.x;
int h2 = blockDim.y;
int lidx = threadIdx.x;
int pidx = threadIdx.y;
int idx = threadIdx.x + blockDim.x*blockIdx.x;
int neuron_count = l[layer_id];
int neuron_count_prev = l[layer_id-1];
//printf("layer = %d idx = %d count = %d\n", layer_id, idx, neuron_count-1);
if(idx >= neuron_count-1) return;
float z = 0;
int index0=sw_ext[layer_id-1];
int index1=s_ext[layer_id-1];
for(int k = pidx; k < neuron_count_prev; k+=h2){
z += w_ext_arr[index0 + k*(neuron_count - 1) + idx]*a_ext_arr[index1 + k];
}
sm_z[pidx*h + lidx] = z;
__syncthreads();
if(pidx == 0){
z = 0;
for(int i = 0; i < h2; i++)
z += sm_z[i*h + lidx];
z_ext_arr[s_ext[layer_id] + idx] = z;
float a = 1.0 / (1.0 + expf(-z));
a_ext_arr[s_ext[layer_id] + idx] = a;
}
// printf("index = %d z = %.5f\n", s[layer_id] + idx, z);
// printf("a = %.20f\n", a);
}
__global__ void
kernel_calc_gL_2(
int layer_id,
int *l,
int *s_ext,
float *z_ext_arr,
float *a_ext_arr,
float *t_arr,
float *gjl_ext
){
volatile int idx = threadIdx.x + blockDim.x*blockIdx.x;
int neuron_count = l[layer_id];
if(idx >= neuron_count-1) return;
float z = z_ext_arr[s_ext[layer_id] + idx];
float tmp = 1 + expf(-z);
float f_deriv=expf(-z) / (tmp*tmp);
gjl_ext[s_ext[layer_id] + idx] = f_deriv*(a_ext_arr[s_ext[layer_id] + idx] - t_arr[idx]);
}
__global__ void
kernel_calc_gjL_2(
int layer_id,
int *l,
int *s_ext,
int *sw_ext,
float *z_ext_arr,
float *a_ext_arr,
float *t_arr,
float *gjl_ext,
float *w_ext_arr
){
int idx = threadIdx.y + blockDim.y*blockIdx.y;
int h = blockDim.x;
int pidx = threadIdx.y;
int lidx = threadIdx.x;
extern __shared__ int sm[];
float *sm_g = (float*)&sm[0];
int neuron_count = l[layer_id];
int neuron_count_next = l[layer_id+1];
if(idx >= neuron_count-1) return;
float sum = 0;
for (int k = lidx; k < neuron_count_next-1; k+=h) {
sum += w_ext_arr[sw_ext[layer_id] + idx*(l[layer_id + 1] - 1) + k] * gjl_ext[s_ext[layer_id + 1] + k];
}
sm_g[pidx*h + lidx] = sum;
__syncthreads();
if(lidx == 0){
float z = z_ext_arr[s_ext[layer_id] + idx];
float tmp = 1 + expf(-z);
float f_deriv = expf(-z) / (tmp*tmp);
sum = 0;
for(int i = 0; i < h; i++)
sum += sm_g[pidx*h + i];
gjl_ext[s_ext[layer_id] + idx] = f_deriv*sum;
}
}
__global__ void
kernel_weight_update_2(
int layer_id,
int *l,
int *s_ext,
int *sw_ext,
float *z_ext_arr,
float *a_ext_arr,
float *t_arr,
float *gjl_ext,
float *w_ext_arr,
float *dw_ext_arr,
float eta,
float alpha
){
int idx = threadIdx.y + blockDim.y*blockIdx.y;
int h = blockDim.x;
int pidx=threadIdx.x;
int neuron_count = l[layer_id];
int neuron_count_next = l[layer_id+1];
if(idx >= neuron_count) return;
float a = a_ext_arr[s_ext[layer_id] + idx];
int index0 = s_ext[layer_id + 1] + pidx;
int index1 = sw_ext[layer_id] + idx*(neuron_count_next - 1) + pidx;
for(int k = pidx; k < neuron_count_next-1; k+=h){
float grad = a*gjl_ext[index0];
index0 += h;
float dw = dw_ext_arr[index1] = -eta*grad + alpha*dw_ext_arr[index1];
w_ext_arr[index1] += dw;
index1 += h;
}
}
}
void run_cuda_sample(){
int deviceCount = 0;
checkCudaErrors( cudaGetDeviceCount(&deviceCount));
if(deviceCount == 0){
printf("*** there is no CUDE device\n");
return;
}
checkCudaErrors( cudaSetDevice(0) );
int n = 11; // number of elements
float *arr = new float[n];
for(int i = 0; i < n; i++)
arr[i] = i;
int h = 4; // number of threads in block
int g = (n + (h-n%h))/h; // number of grids
printf("n=%d, h=%d, g=%d\n", n, h, g);
int bc_arr = sizeof(float)*n;
float *dv_arr = NULL;
checkCudaErrors( cudaMalloc((void **)&dv_arr, bc_arr) );
checkCudaErrors( cudaMemcpy(dv_arr, arr, bc_arr, cudaMemcpyHostToDevice) );
dim3 grid_dim(g, 1, 1);
dim3 block_dim(h, 1, 1);
ann::kernel<<<grid_dim, block_dim>>>(n, dv_arr);
checkCudaErrors( cudaMemcpy(arr, dv_arr, bc_arr, cudaMemcpyDeviceToHost) );
for(int i = 0; i < n; i++)
printf("[%d] = %f\n", i, arr[i]);
checkCudaErrors( cudaFree(dv_arr) );
checkCudaErrors(cudaDeviceReset());
}
//
//AnnSerialFLT
//
void AnnCUDA::prepare( Topology *top){
cTopology = top;
l = new int[top->getLayerCount()];
s = new int[top->getLayerCount()];
int neuronCount = cTopology->obtainNeuronCount();
int weightCount = cTopology->obtainWeightCount();
a_arr = new float[neuronCount];
z_arr = new float[neuronCount];
W = new int[top->getLayerCount()];
sw = new int[top->getLayerCount()];
w_arr = new float[weightCount];
dw_arr = new float[weightCount];
t_arr = new float[top->getLayerSize(top->getLayerCount() - 1)];
gjl = new float[neuronCount];
// cuda
int deviceCount = 0;
checkCudaErrors( cudaGetDeviceCount(&deviceCount));
if(deviceCount == 0){
printf("*** there is no CUDE device\n");
return;
}
checkCudaErrors( cudaSetDevice(0) );
dv_l = NULL; bc_l = sizeof(int)*top->getLayerCount();
dv_s = NULL; bc_s = sizeof(int)*top->getLayerCount();;
dv_a_arr = NULL; bc_a_arr = sizeof(float)*neuronCount;
dv_z_arr = NULL; bc_z_arr = sizeof(float)*neuronCount;
dv_W = NULL; bc_W = sizeof(int)*top->getLayerCount();
dv_sw = NULL; bc_sw = sizeof(int)*top->getLayerCount();
dv_w_arr = NULL; bc_w_arr = sizeof(float)*weightCount;
dv_dw_arr = NULL; bc_dw_arr = sizeof(float)*weightCount;
dv_t_arr = NULL; bc_t_arr = sizeof(float)*top->getLayerSize(top->getLayerCount() - 1);
dv_gjl = NULL; bc_gjl = sizeof(float)*neuronCount;
checkCudaErrors( cudaMalloc((void **)&dv_l, bc_l) );
checkCudaErrors( cudaMalloc((void **)&dv_s, bc_s) );
checkCudaErrors( cudaMalloc((void **)&dv_a_arr, bc_a_arr) );
checkCudaErrors( cudaMalloc((void **)&dv_z_arr, bc_z_arr) );
checkCudaErrors( cudaMalloc((void **)&dv_W, bc_W) );
checkCudaErrors( cudaMalloc((void **)&dv_sw, bc_sw) );
checkCudaErrors( cudaMalloc((void **)&dv_w_arr, bc_w_arr) );
checkCudaErrors( cudaMalloc((void **)&dv_dw_arr, bc_dw_arr) );
checkCudaErrors( cudaMalloc((void **)&dv_t_arr, bc_t_arr) );
checkCudaErrors( cudaMalloc((void **)&dv_gjl, bc_gjl) );
}
void AnnCUDA::init(FILE *pFile=NULL){
L = cTopology->getLayerCount();
Random *rnd = new Random();
//Neuronu kiekiai sluoksnyje
for (int i = 0; i < L; i++) {
l[i] = cTopology->getLayerSize(i) + 1;
}
//Sluoksniu pradzios indeksai
for (int i = 0; i < L; i++) {
s[i] = 0;
for (int j = i; j > 0; j--) {
s[i] += l[j - 1];
}
}
//Bias neuronai
for (int i = 0; i < L - 1; i++) {
a_arr[s[i + 1] - 1] = 1;
}
//Svoriu kiekiai l-ame sluoksnyje
for (int i = 0; i < L - 1; i++) {
W[i] = l[i] * (l[i + 1] - 1);
sw[i] = 0;
if (i != 0) {
for (int j = 0; j < i; j++) {
sw[i] += W[j];
}
}
}
for (int i = 0; i < L - 1; i++)
for (int j = 0; j < W[i]; j++) {
w_arr[sw[i] + j] =(rnd->next()*2-1); // (double)rand() / double(RAND_MAX);
dw_arr[sw[i] + j] = 0.0;
}
checkCudaErrors( cudaMemcpy(dv_w_arr, w_arr, bc_w_arr, cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(dv_dw_arr, dw_arr, bc_dw_arr, cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(dv_l, l, bc_l, cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(dv_s, s, bc_s, cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(dv_sw, sw, bc_sw, cudaMemcpyHostToDevice) );
}
void AnnCUDA::train(float *a, float *b, float alpha, float eta){
for (int i = 0; i < cTopology->getLayerSize(0); i++) {
a_arr[i] = a[i];
}
for (int j = 0; j < cTopology->obtainNeuronCount(); j++) {
z_arr[j] = 0;
}
calc_feedForward();
// for (int i = 0; i < 10; i++) {
// printf("a[%d] = %.10f\n", i, a_arr[i]);
// }
// for (int i = 0; i < 7; i++) {
// printf("a[%d] = %.10f\n", i, a_arr[i]);
// }
// printf("\n");
// for (int i = 0; i < 7; i++) {
// printf("z[%d] = %.10f\n", i, z_arr[i]);
// }
for (int i = 0; i < cTopology->getLayerSize(cTopology->getLayerCount() - 1); i++) {
t_arr[i] = b[i];
}
calc_gjl();
// //back propogation:
// for (int i = 0; i <L - 1; i++) {//per sluoksnius
// for (int j = 0; j < l[i]; j++) {//per neuronus
// for (int k = 0; k < l[i + 1] - 1; k++) {//per kito sluoksnio neuronus
// dw_arr[sw[i] + k + j*(l[i + 1] - 1)] = delta_w(w_gradient(i, j, k), dw_arr[sw[i] + k + j*(l[i + 1] - 1)], alpha, eta);
// w_arr[sw[i] + k + j*(l[i + 1] - 1)] += dw_arr[sw[i] + k + j*(l[i + 1] - 1)];
// }
// }
// }
// checkCudaErrors( cudaMemcpy(dv_a_arr, a_arr, bc_a_arr, cudaMemcpyHostToDevice) );
//checkCudaErrors( cudaMemcpy(dv_gjl, gjl, bc_gjl, cudaMemcpyHostToDevice) );
//checkCudaErrors( cudaMemcpy(dv_w_arr, w_arr, bc_w_arr, cudaMemcpyHostToDevice) );
for (int i = 0; i < L-1; i++) {//per sluoksnius einu+
int neuron_count = l[i];
int h = 32; // number of threads in block
int g = (neuron_count + (h-neuron_count%h))/h; // number of grids
dim3 grid_dim(g, 1, 1);
dim3 block_dim(h, 1, 1);
// printf("%s\n", "A masyvas");
// for (int j = 0; j < 7; j++) {
// printf("a[%d] = %.20f\n", j, a_arr[j]);
// }
ann::kernel_weight_update<<<grid_dim, block_dim>>>(
i,
dv_l,
dv_s,
dv_sw,
dv_z_arr,
dv_a_arr,
dv_t_arr,
dv_gjl,
dv_w_arr,
dv_dw_arr,
eta,
alpha
);
}
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
}
void AnnCUDA::finishTraining(){
checkCudaErrors( cudaMemcpy(w_arr, dv_w_arr, bc_w_arr, cudaMemcpyDeviceToHost) );
checkCudaErrors( cudaMemcpy(dw_arr, dv_dw_arr, bc_dw_arr, cudaMemcpyDeviceToHost) );
}
void AnnCUDA::feedForward(float *a, float *b){
for (int i = 0; i < cTopology->getLayerSize(0); i++) {
a_arr[i] = a[i];
}
for (int j = 0; j < cTopology->obtainNeuronCount(); j++) {
z_arr[j] = 0;
}
calc_feedForward();
checkCudaErrors( cudaMemcpy(a_arr, dv_a_arr, bc_a_arr, cudaMemcpyDeviceToHost) );
for (int i = 0; i<cTopology->getLayerSize(cTopology->getLayerCount() - 1); i++){
b[i] = a_arr[s[L - 1] + i];
//printf("b[%d] = %.10f\n", i, b[i]);
}
}
void AnnCUDA::calc_feedForward(){
checkCudaErrors( cudaMemcpy(dv_a_arr, a_arr, bc_a_arr, cudaMemcpyHostToDevice) );
for (int i = 1; i < L; i++) {//per sluoksnius einu+
// printf("current layer_id = %d\n", i);
int neuron_count = l[i];
int h = 32; // number of threads in block
int g = (neuron_count + (h-neuron_count%h))/h; // number of grids
dim3 grid_dim(g, 1, 1);
dim3 block_dim(h, 1, 1);
ann::kernel_feedforward<<<grid_dim, block_dim>>>(
i,
dv_l,
dv_s,
dv_sw,
dv_z_arr,
dv_a_arr,
dv_w_arr
);
}
}
void AnnCUDA::calc_gjl(){
checkCudaErrors( cudaMemcpy(dv_t_arr, t_arr, bc_t_arr, cudaMemcpyHostToDevice) );
// int last_layer_id=cTopology->getLayerCount()-1;
int last_layer_id=L-1;
int neuron_count = l[last_layer_id];
int h = 32; // number of threads in block
int g = (neuron_count + (h-neuron_count%h))/h; // number of grids
dim3 grid_dim(g, 1, 1);
dim3 block_dim(h, 1, 1);
ann::kernel_calc_gL<<<grid_dim, block_dim>>>(
last_layer_id,
dv_l,
dv_s,
dv_z_arr,
dv_a_arr,
dv_t_arr,
dv_gjl
);
//Cia nezinau, ar i >= 0, ar i >= 1
for (int i = L - 2; i >= 1; i--) {
neuron_count = l[i];
h = 32; // number of threads in block
g = (neuron_count + (h-neuron_count%h))/h; // number of grids
dim3 grid_dim(g, 1, 1);
dim3 block_dim(h, 1, 1);
ann::kernel_calc_gjL<<<grid_dim, block_dim>>>(
i,
dv_l,
dv_s,
dv_sw,
dv_z_arr,
dv_a_arr,
dv_t_arr,
dv_gjl,
dv_w_arr
);
}
// checkCudaErrors( cudaMemcpy(gjl, dv_gjl, bc_gjl, cudaMemcpyDeviceToHost) );
}
float AnnCUDA::delta_w(float grad, float dw, float alpha, float eta) {
return -eta*grad + alpha*dw;
}
float AnnCUDA::gL(float a, float z, float t) {
float w = f_deriv(z) * (a - t);
return w;
}
float AnnCUDA::f(float x) {
//return atanf(x)/M_PI + 0.5;
float y = 1 + exp(-x);
return 1 / y;
}
float AnnCUDA::f_deriv(float x) {
//return 1.0 / (1.0+ x*x);
return exp(-x) / pow((1 + exp(-x)), 2);
}
float AnnCUDA::w_gradient(int layer_id, int w_i, int w_j) {
return a_arr[s[layer_id] + w_i] * gjl[s[layer_id + 1] + w_j];
}
float AnnCUDA::obtainError(float *b){
checkCudaErrors( cudaMemcpy(a_arr, dv_a_arr, bc_a_arr, cudaMemcpyDeviceToHost) );
float error = 0;
for(int i = 0; i < l[L-1] - 1; i++){
float tmp = b[i] - a_arr[s[L-1] + i];
error += tmp*tmp;
//printf("a_arr[%d] = %.10f\n", s[L-1] + i, a_arr[s[L-1] + i]);
}
return error;
}
void AnnCUDA::destroy(){
delete[] l;
l = NULL;
delete[] s;
s = NULL;
delete[] a_arr;
a_arr = NULL;
delete[] z_arr;
z_arr = NULL;
delete[] W;
W = NULL;
delete[] sw;
sw = NULL;
delete[] w_arr;
w_arr = NULL;
delete[] dw_arr;
dw_arr = NULL;
delete[] t_arr;
t_arr = NULL;
delete[] gjl;
gjl = NULL;
checkCudaErrors( cudaFree(dv_l) );
checkCudaErrors( cudaFree(dv_s) );
checkCudaErrors( cudaFree(dv_a_arr) );
checkCudaErrors( cudaFree(dv_z_arr) );
checkCudaErrors( cudaFree(dv_W) );
checkCudaErrors( cudaFree(dv_sw) );
checkCudaErrors( cudaFree(dv_w_arr) );
checkCudaErrors( cudaFree(dv_dw_arr) );
checkCudaErrors( cudaFree(dv_t_arr) );
checkCudaErrors( cudaFree(dv_gjl) );
checkCudaErrors(cudaDeviceReset());
}
float* AnnCUDA::getWeights(){
return w_arr;
}
void AnnCUDA::print_out(){
printf("z = %e\n", z_arr[s[L-1]+0]);
printf("g = %e\n", gjl[s[L-1]+0]);
for(int i = 0; i < l[L-2]; i++){
if(i < l[L-2]) printf("[%d] z=%e, a=%e, w=%e, grad = %e\n", i, z_arr[s[L-2]+i], a_arr[s[L-2]+i], w_arr[sw[L-2] + i*(l[L-1]-1)], a_arr[s[L-2]+i]*gjl[s[L-1]+0]);
}
}
void AnnCUDA::setWeights(float *t_w_arr) {
w_arr=t_w_arr;
checkCudaErrors( cudaMemcpy(dv_w_arr, w_arr, bc_w_arr, cudaMemcpyHostToDevice) );
}
void AnnCUDA::printf_Network(string filename){
FILE * pFile;
const char * c = filename.c_str();
pFile = fopen(c, "wb");
cTopology->printTopology(pFile);
int weightCount = cTopology->obtainWeightCount();
double *w_arr_dbl = new double[weightCount];
double *dw_arr_dbl = new double[weightCount];
for(int i = 0; i < weightCount; i++){
w_arr_dbl[i] = (double)w_arr[i];
dw_arr_dbl[i] = (double)dw_arr[i];
}
fwrite (w_arr_dbl , sizeof(double), weightCount, pFile);
fwrite (dw_arr_dbl , sizeof(double), weightCount, pFile);
fclose (pFile);
}
//******************Cuda 2.0***************************************
void AnnCUDA2::prepare( Topology *top){
h = 32;
h2 = 32;
cTopology = top;
l = new int[top->getLayerCount()];
l_ext = new int[top->getLayerCount()];
s_ext = new int[top->getLayerCount()];
int neuronCount = cTopology->obtainNeuronCount();
int weightCount = cTopology->obtainWeightCount();
int neuronCount_ext = obtainNeuronCountExt(cTopology);
int weightCount_ext = obtainWeightCountExt(cTopology);
// printf("neuronCount = %d\n", neuronCount);
// printf("neuronCount2 = %d\n", neuronCount2);
// printf("weightCount = %d\n", weightCount);
// printf("weightCount2 = %d\n", weightCount2);
a_ext_arr = new float[neuronCount_ext];
z_ext_arr = new float[neuronCount_ext];
sw_ext = new int[top->getLayerCount()];
w_ext_arr = new float[weightCount_ext];
dw_ext_arr = new float[weightCount_ext];
t_arr = new float[top->getLayerSize(top->getLayerCount() - 1)];
gjl_ext = new float[neuronCount_ext];
// cuda
int deviceCount = 0;
checkCudaErrors( cudaGetDeviceCount(&deviceCount));
if(deviceCount == 0){
printf("*** there is no CUDE device\n");
return;
}
checkCudaErrors( cudaSetDevice(0) );
dv_l = NULL; bc_l = sizeof(int)*top->getLayerCount();
dv_s_ext = NULL; bc_s_ext = sizeof(int)*top->getLayerCount();;
dv_a_ext_arr = NULL; bc_a_ext_arr = sizeof(float)*neuronCount_ext;
dv_z_ext_arr = NULL; bc_z_ext_arr = sizeof(float)*neuronCount_ext;
dv_sw_ext = NULL; bc_sw_ext = sizeof(int)*top->getLayerCount();
dv_w_ext_arr = NULL; bc_w_ext_arr = sizeof(float)*weightCount_ext;
dv_dw_ext_arr = NULL; bc_dw_ext_arr = sizeof(float)*weightCount_ext;
dv_t_arr = NULL; bc_t_arr = sizeof(float)*top->getLayerSize(top->getLayerCount() - 1);
dv_gjl_ext = NULL; bc_gjl_ext = sizeof(float)*neuronCount_ext;
checkCudaErrors( cudaMalloc((void **)&dv_l, bc_l) );
checkCudaErrors( cudaMalloc((void **)&dv_s_ext, bc_s_ext) );
checkCudaErrors( cudaMalloc((void **)&dv_a_ext_arr, bc_a_ext_arr) );
checkCudaErrors( cudaMalloc((void **)&dv_z_ext_arr, bc_z_ext_arr) );
checkCudaErrors( cudaMalloc((void **)&dv_sw_ext, bc_sw_ext) );
checkCudaErrors( cudaMalloc((void **)&dv_w_ext_arr, bc_w_ext_arr) );
checkCudaErrors( cudaMalloc((void **)&dv_dw_ext_arr, bc_dw_ext_arr) );
checkCudaErrors( cudaMalloc((void **)&dv_t_arr, bc_t_arr) );
checkCudaErrors( cudaMalloc((void **)&dv_gjl_ext, bc_gjl_ext) );
}
void AnnCUDA2::init(FILE *pFile=NULL){
L = cTopology->getLayerCount();
int *W = new int[L];
int *W_ext = new int[L];
Random *rnd = new Random();
//Neuronu kiekiai sluoksnyje
for (int i = 0; i < L; i++) {
int neuron_count = cTopology -> getLayerSize(i) + 1;
l[i] = neuron_count;
l_ext[i] = neuron_count + (32 - neuron_count % 32);
}
//Sluoksniu pradzios indeksai
for (int i = 0; i < L; i++) {
s_ext[i] = 0;
for (int j = i; j > 0; j--) {
s_ext[i] += l_ext[j - 1];
}
}
//Bias neuronai
for (int i = 0; i < L - 1; i++) {
a_ext_arr[s_ext[i] + l[i] - 1] = 1;
}
//Svoriu kiekiai l-ame sluoksnyje
for (int i = 0; i < L - 1; i++) {
W[i] = l[i] * (l[i + 1] - 1);
W_ext[i] = W[i];
if (W_ext[i] % 32 != 0) {
W_ext[i] += (32 - W_ext[i] % 32);
}
sw_ext[i] = 0;
if (i != 0) {
for (int j = 0; j < i; j++) {
sw_ext[i] += W_ext[j];
}
}
}
for (int i = 0; i < L - 1; i++)
for (int j = 0; j < W_ext[i]; j++) {
if (j < W[i]){
w_ext_arr[sw_ext[i] + j] =(rnd->next()*2-1);
}
else{
w_ext_arr[sw_ext[i] + j] = 0.0;
}
dw_ext_arr[sw_ext[i] + j] = 0.0;
}
delete [] W;
delete [] W_ext;
checkCudaErrors( cudaMemcpy(dv_w_ext_arr, w_ext_arr, bc_w_ext_arr, cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(dv_dw_ext_arr, dw_ext_arr, bc_dw_ext_arr, cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(dv_l, l, bc_l, cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(dv_s_ext, s_ext, bc_s_ext, cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(dv_sw_ext, sw_ext, bc_sw_ext, cudaMemcpyHostToDevice) );
}
void AnnCUDA2::train(float *a, float *b, float alpha, float eta){
for (int i = 0; i < cTopology->getLayerSize(0); i++) {
a_ext_arr[i] = a[i];
}
for (int j = 0; j < obtainNeuronCountExt(cTopology); j++) {
z_ext_arr[j] = 0;
}
calc_feedForward();
for (int i = 0; i < cTopology->getLayerSize(cTopology->getLayerCount() - 1); i++) {
t_arr[i] = b[i];
}
calc_gjl();
for (int i = 0; i < L-1; i++) {//per sluoksnius einu+
int neuron_count = l[i];
int g = (neuron_count + (h2-neuron_count%h2))/h2; // number of grids
dim3 grid_dim(1, g, 1);
dim3 block_dim(h, h2, 1);
ann::kernel_weight_update_2<<<grid_dim, block_dim>>>(
i,
dv_l,
dv_s_ext,
dv_sw_ext,
dv_z_ext_arr,
dv_a_ext_arr,
dv_t_arr,
dv_gjl_ext,
dv_w_ext_arr,
dv_dw_ext_arr,
eta,
alpha
);
// checkCudaErrors( cudaDeviceSynchronize() );
}
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
}
void AnnCUDA2::finishTraining(){
checkCudaErrors( cudaMemcpy(w_ext_arr, dv_w_ext_arr, bc_w_ext_arr, cudaMemcpyDeviceToHost) );
checkCudaErrors( cudaMemcpy(dw_ext_arr, dv_dw_ext_arr, bc_dw_ext_arr, cudaMemcpyDeviceToHost) );
}
void AnnCUDA2::feedForward(float *a, float *b){
for (int i = 0; i < cTopology->getLayerSize(0); i++) {
a_ext_arr[i] = a[i];
}
for (int j = 0; j < obtainNeuronCountExt(cTopology); j++) {
z_ext_arr[j] = 0;
}
calc_feedForward();
checkCudaErrors( cudaMemcpy(a_ext_arr, dv_a_ext_arr, bc_a_ext_arr, cudaMemcpyDeviceToHost) );
for (int i = 0; i < l[L - 1]; i++){
b[i] = a_ext_arr[s_ext[L - 1] + i];
//printf("b[%d] = %.10f\n", i, b[i]);
}
}
void AnnCUDA2::calc_feedForward(){
checkCudaErrors( cudaMemcpy(dv_a_ext_arr, a_ext_arr, bc_a_ext_arr, cudaMemcpyHostToDevice) );
for (int i = 1; i < L; i++) {//per sluoksnius einu+
int neuron_count = l[i];
int g = (neuron_count + (h-neuron_count%h))/h; // number of grids
dim3 grid_dim(g, 1, 1);
dim3 block_dim(h, h2, 1);
int bc_sm = sizeof(float)*h*h2;
ann::kernel_feedforward_2<<<grid_dim, block_dim, bc_sm>>>(
i,
dv_l,
dv_s_ext,
dv_sw_ext,
dv_z_ext_arr,
dv_a_ext_arr,
dv_w_ext_arr
);
// checkCudaErrors( cudaDeviceSynchronize() );
}
}
void AnnCUDA2::calc_gjl(){
checkCudaErrors( cudaMemcpy(dv_t_arr, t_arr, bc_t_arr, cudaMemcpyHostToDevice) );
int last_layer_id = L-1;
int neuron_count = l[last_layer_id];
int g = (neuron_count + (h-neuron_count%h))/h; // number of grids
dim3 grid_dim(g, 1, 1);
dim3 block_dim(h, 1, 1);
ann::kernel_calc_gL_2<<<grid_dim, block_dim>>>(
last_layer_id,
dv_l,
dv_s_ext,
dv_z_ext_arr,
dv_a_ext_arr,
dv_t_arr,
dv_gjl_ext
);
//checkCudaErrors( cudaDeviceSynchronize() );
for (int i = L - 2; i >= 1; i--) {
neuron_count = l[i];
g = (neuron_count + (h2-neuron_count%h2))/h2; // number of grids
dim3 grid_dim(1, g, 1);
dim3 block_dim(h, h2, 1);
int bc_sm = sizeof(float)*h*h2;
ann::kernel_calc_gjL_2<<<grid_dim, block_dim,bc_sm>>>(
i,
dv_l,
dv_s_ext,
dv_sw_ext,
dv_z_ext_arr,
dv_a_ext_arr,
dv_t_arr,
dv_gjl_ext,
dv_w_ext_arr
);
}
}
float AnnCUDA2::obtainError(float *b){
checkCudaErrors( cudaMemcpy(a_ext_arr, dv_a_ext_arr, bc_a_ext_arr, cudaMemcpyDeviceToHost) );
float error = 0;
for(int i = 0; i < l[L-1] - 1; i++){
float tmp = b[i] - a_ext_arr[s_ext[L-1] + i];
error += tmp*tmp;
//printf("a_arr[%d] = %.10f\n", s[L-1] + i, a_arr[s[L-1] + i]);
}
return error;
}
void AnnCUDA2::destroy(){
delete[] l;
l = NULL;
delete[] l_ext;
l_ext = NULL;
delete[] s_ext;
s_ext = NULL;
delete[] a_ext_arr;
a_ext_arr = NULL;
delete[] z_ext_arr;
z_ext_arr = NULL;
delete[] sw_ext;
sw_ext = NULL;
delete[] w_ext_arr;
w_ext_arr = NULL;
delete[] dw_ext_arr;
dw_ext_arr = NULL;
delete[] t_arr;
t_arr = NULL;
delete[] gjl_ext;
gjl_ext = NULL;
checkCudaErrors( cudaFree(dv_l) );
checkCudaErrors( cudaFree(dv_s_ext) );
checkCudaErrors( cudaFree(dv_a_ext_arr) );
checkCudaErrors( cudaFree(dv_z_ext_arr) );
checkCudaErrors( cudaFree(dv_sw_ext) );
checkCudaErrors( cudaFree(dv_w_ext_arr) );
checkCudaErrors( cudaFree(dv_dw_ext_arr) );
checkCudaErrors( cudaFree(dv_t_arr) );
checkCudaErrors( cudaFree(dv_gjl_ext) );
checkCudaErrors(cudaDeviceReset());
}
float* AnnCUDA2::getWeights(){
return w_ext_arr;
}
float* AnnCUDA2::getA(){
return a_ext_arr;
}
void AnnCUDA2::print_out(){
printf("z = %e\n", z_ext_arr[s_ext[L-1]+0]);
printf("g = %e\n", gjl_ext[s_ext[L-1]+0]);
for(int i = 0; i < l[L-2]; i++){
if(i < l[L-2]) printf("[%d] z=%e, a=%e, w=%e, grad = %e\n",
i, z_ext_arr[s_ext[L-2]+i],
a_ext_arr[s_ext[L-2]+i],
w_ext_arr[sw_ext[L-2] + i*(l[L-1]-1)],
a_ext_arr[s_ext[L-2]+i]*gjl_ext[s_ext[L-1]+0]);
}
}
void AnnCUDA2::setWeights(float *t_w_arr) {
int prev_count = 0;
for (int i = 0; i < cTopology->getLayerCount() - 1; i++) {
for (int j = 0; j < l[i]*(l[i+1]-1); j++) {
int index_w = sw_ext[i] + j;
int index_t = prev_count + j;
w_ext_arr[index_w] = t_w_arr[index_t];
}
prev_count += l[i]*(l[i+1]-1);
}
checkCudaErrors( cudaMemcpy(dv_w_ext_arr, w_ext_arr, bc_w_ext_arr, cudaMemcpyHostToDevice) );
}
void AnnCUDA2::printf_Network(string filename){
FILE * pFile;
const char * c = filename.c_str();
pFile = fopen(c, "wb");
cTopology->printTopology(pFile);
int weightCount = cTopology->obtainWeightCount();
double *w_arr_dbl = new double[weightCount];
double *dw_arr_dbl = new double[weightCount];
int sw_index = 0;
for(int layer_id = 0; layer_id < L - 1; layer_id++){
for(int k = 0; k < l[layer_id]*(l[layer_id+1]-1); k++){
w_arr_dbl[sw_index+k] = (double)w_ext_arr[sw_ext[layer_id]+k];
dw_arr_dbl[sw_index+k] = (double)dw_ext_arr[sw_ext[layer_id]+k];
}
sw_index += l[layer_id]*(l[layer_id+1]-1);
}
fwrite (w_arr_dbl , sizeof(double), weightCount, pFile);
fwrite (dw_arr_dbl , sizeof(double), weightCount, pFile);
fclose (pFile);
}
/* static */
int AnnCUDA2::obtainNeuronCountExt(Topology *top){
int count = 0;
for (int i = 0; i < top->getLayerCount(); i++){
int neuron_count = top->getLayerSize(i)+1;
count += neuron_count;
if (neuron_count % 32 != 0)
count += 32 - neuron_count % 32;
}
return count;
}
/* static */
int AnnCUDA2::obtainWeightCountExt(Topology *top){
int count = 0;
for (int i = 0; i < top->getLayerCount()-1; i++){
int weight_count = (top->getLayerSize(i)+1)*top->getLayerSize(i+1); //((*ml)[i] + 1)*(*ml)[i+1];
count += weight_count;
if (weight_count % 32 != 0)
count += 32 - weight_count % 32;
}
return count;
}
|
06e21175faef1e48de365d164e6f65a7ab973f45.hip | // !!! This is a file automatically generated by hipify!!!
#define USE_MNIST_LOADER
#define MNIST_DOUBLE
#include "mnist.h"
#include <hip/hip_runtime.h>
#include <cstdio>
static mnist_data *train_set, *test_set;
static unsigned int train_cnt, test_cnt;
static void learn();
static void clear();
static unsigned int classify(double data[28][28]);
static void loaddata()
{
mnist_load("data/train-images-idx3-ubyte", "data/train-labels-idx1-ubyte",
&train_set, &train_cnt);
mnist_load("data/t10k-images-idx3-ubyte", "data/t10k-labels-idx1-ubyte",
&test_set, &test_cnt);
}
static void test()
{
int error = 0;
for (int i = 0; i < test_cnt; ++i) {
if (classify(test_set[i].data) != test_set[i].label) {
++error;
}
}
fprintf(stdout, "Error Rate: %.2lf%%\n",
double(error) / double(test_cnt) * 100.0);
}
int main(int argc, const char **argv)
{
srand(time(NULL));
if (hipInit(0) != hipSuccess) {
fprintf(stderr, "hipInit failed\n");
return 1;
}
loaddata();
learn();
test();
clear();
return 0;
}
///////////////////////////////////////////////////////////////////////////////
#include <cstdlib>
#include <vector>
#include <memory>
#include <rocblas.h>
const static float dt = 1.0E-01f;
const static float threshold = 1.0E-02f;
struct Layer {
float *output;
float *preact;
float *bias;
float *weight;
float *d_output;
float *d_preact;
float *d_weight;
const int M, N, O;
Layer(int M, int N, int O)
: M(M), N(N), O(O)
{
float h_bias[N];
float h_weight[N][M];
output = NULL;
preact = NULL;
bias = NULL;
weight = NULL;
for (int i = 0; i < N; ++i) {
h_bias[i] = 0.5f - float(rand()) / float(RAND_MAX);
/*h_bias[i] = 0.0f;*/
for (int j = 0; j < M; ++j) {
h_weight[i][j] = 0.5f - float(rand()) / float(RAND_MAX);
/*h_weight[i][j] = 0.05f;*/
}
}
hipMalloc(&output, sizeof(float) * O);
hipMalloc(&preact, sizeof(float) * O);
hipMalloc(&bias, sizeof(float) * N);
hipMalloc(&weight, sizeof(float) * M * N);
hipMalloc(&d_output, sizeof(float) * O);
hipMalloc(&d_preact, sizeof(float) * O);
hipMalloc(&d_weight, sizeof(float) * M * N);
hipMemcpy(bias, h_bias, sizeof(float) * N, hipMemcpyHostToDevice);
hipMemcpy(weight, h_weight, sizeof(float) * M * N, hipMemcpyHostToDevice);
}
~Layer()
{
hipFree(output);
hipFree(preact);
hipFree(bias);
hipFree(weight);
hipFree(d_output);
hipFree(d_preact);
hipFree(d_weight);
}
void setOutput(float *data)
{
hipMemcpy(output, data, sizeof(float) * O, hipMemcpyHostToDevice);
}
void clear()
{
hipMemset(output, 0x00, sizeof(float) * O);
hipMemset(preact, 0x00, sizeof(float) * O);
}
void bp_clear()
{
hipMemset(d_output, 0x00, sizeof(float) * O);
hipMemset(d_preact, 0x00, sizeof(float) * O);
hipMemset(d_weight, 0x00, sizeof(float) * M * N);
}
};
static hipblasHandle_t blas;
static Layer l_input(0, 0, 28*28), l_c1(5*5, 6, 24*24*6), l_s1(4*4, 1, 6*6*6), l_f(6*6*6, 10, 10);
__device__ float step_function(float v)
{
return 1 / (1 + exp(-v));
}
__global__ void apply_step_function(float *input, float *output, const int N)
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int size = blockDim.x * gridDim.x;
for (int idx = N * pos / size; idx < N * (pos+1) / size; ++idx) {
output[idx] = step_function(input[idx]);
}
}
__global__ void makeError(float *err, float *output, unsigned int Y, const int N)
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int size = blockDim.x * gridDim.x;
for (int idx = N * pos / size; idx < N * (pos+1) / size; ++idx) {
err[idx] = ((Y == idx ? 1.0f : 0.0f) - output[idx]);
}
}
__global__ void apply_grad(float *output, float *grad, const int N)
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int size = blockDim.x * gridDim.x;
for (int idx = N * pos / size; idx < N * (pos+1) / size; ++idx) {
output[idx] += dt * grad[idx];
}
}
__global__ void preact_c1(float input[28][28], float preact[6][24][24], float weight[6][5][5])
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int size = blockDim.x * gridDim.x;
const int N = 5*5*6*24*24;
for (int n = N * pos / size; n < N * (pos+1) / size; ++n) {
int idx = n;
const int i1 = ((idx /= 1 ) % 5);
const int i2 = ((idx /= 5 ) % 5);
const int i3 = ((idx /= 5 ) % 6);
const int i4 = ((idx /= 6 ) % 24);
const int i5 = ((idx /= 24 ) % 24);
atomicAdd(&preact[i3][i4][i5], weight[i3][i1][i2] * input[i4 + i1][i5 + i2]);
}
}
__global__ void bias_c1(float preact[6][24][24], float bias[6])
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int size = blockDim.x * gridDim.x;
const int N = 6*24*24;
for (int n = N * pos / size; n < N * (pos+1) / size; ++n) {
int idx = n;
const int i1 = ((idx /= 1 ) % 6);
const int i2 = ((idx /= 6 ) % 24);
const int i3 = ((idx /= 24 ) % 24);
preact[i1][i2][i3] += bias[i1];
}
}
__global__ void preact_s1(float input[6][24][24], float preact[6][6][6], float weight[1][4][4])
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int size = blockDim.x * gridDim.x;
const int N = 4*4*6*6*6;
for (int n = N * pos / size; n < N * (pos+1) / size; ++n) {
int idx = n;
const int i1 = ((idx /= 1 ) % 4);
const int i2 = ((idx /= 4 ) % 4);
const int i3 = ((idx /= 4 ) % 6);
const int i4 = ((idx /= 6 ) % 6);
const int i5 = ((idx /= 6 ) % 6);
atomicAdd(&preact[i3][i4][i5], weight[0][i1][i2] * input[i3][i4 * 4 + i1][i5 * 4 + i2]);
}
}
__global__ void bias_s1(float preact[6][6][6], float bias[1])
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int size = blockDim.x * gridDim.x;
const int N = 6*6*6;
for (int n = N * pos / size; n < N * (pos+1) / size; ++n) {
int idx = n;
const int i1 = ((idx /= 1 ) % 6);
const int i2 = ((idx /= 6 ) % 6);
const int i3 = ((idx /= 6 ) % 6);
preact[i1][i2][i3] += bias[0];
}
}
__global__ void preact_f(float input[6][6][6], float preact[10], float weight[10][6][6][6])
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int size = blockDim.x * gridDim.x;
const int N = 10*6*6*6;
for (int n = N * pos / size; n < N * (pos+1) / size; ++n) {
int idx = n;
const int i1 = ((idx /= 1 ) % 10);
const int i2 = ((idx /= 10 ) % 6);
const int i3 = ((idx /= 6 ) % 6);
const int i4 = ((idx /= 6 ) % 6);
atomicAdd(&preact[i1], weight[i1][i2][i3][i4] * input[i2][i3][i4]);
}
}
__global__ void bias_f(float preact[10], float bias[10])
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int size = blockDim.x * gridDim.x;
const int N = 10;
for (int idx = N * pos / size; idx < N * (pos+1) / size; ++idx) {
preact[idx] += bias[idx];
}
}
__global__ void bp_weight_f(float d_weight[10][6][6][6], float d_preact[10], float p_output[6][6][6])
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int size = blockDim.x * gridDim.x;
const int N = 10*6*6*6;
for (int n = N * pos / size; n < N * (pos+1) / size; ++n) {
int idx = n;
const int i1 = ((idx /= 1 ) % 10);
const int i2 = ((idx /= 10 ) % 6);
const int i3 = ((idx /= 6 ) % 6);
const int i4 = ((idx /= 6 ) % 6);
d_weight[i1][i2][i3][i4] = d_preact[i1] * p_output[i2][i3][i4];
}
}
__global__ void bp_bias_f(float bias[10], float d_preact[10])
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int size = blockDim.x * gridDim.x;
const int N = 10;
for (int idx = N * pos / size; idx < N * (pos+1) / size; ++idx) {
bias[idx] += dt * d_preact[idx];
}
}
__global__ void bp_output_s1(float d_output[6][6][6], float n_weight[10][6][6][6], float nd_preact[10])
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int size = blockDim.x * gridDim.x;
const int N = 10*6*6*6;
for (int n = N * pos / size; n < N * (pos+1) / size; ++n) {
int idx = n;
const int i1 = ((idx /= 1 ) % 10);
const int i2 = ((idx /= 10 ) % 6);
const int i3 = ((idx /= 6 ) % 6);
const int i4 = ((idx /= 6 ) % 6);
atomicAdd(&d_output[i2][i3][i4], n_weight[i1][i2][i3][i4] * nd_preact[i1]);
}
}
__global__ void bp_preact_s1(float d_preact[6][6][6], float d_output[6][6][6], float preact[6][6][6])
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int size = blockDim.x * gridDim.x;
const int N = 6*6*6;
for (int n = N * pos / size; n < N * (pos+1) / size; ++n) {
int idx = n;
const int i1 = ((idx /= 1 ) % 6);
const int i2 = ((idx /= 6 ) % 6);
const int i3 = ((idx /= 6 ) % 6);
const float o = step_function(preact[i1][i2][i3]);
d_preact[i1][i2][i3] = d_output[i1][i2][i3] * o * (1 - o);
}
}
__global__ void bp_weight_s1(float d_weight[1][4][4], float d_preact[6][6][6], float p_output[6][24][24])
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int size = blockDim.x * gridDim.x;
const int N = 1*4*4*6*6*6;
const float d = pow(6.0f, 3.0f);
for (int n = N * pos / size; n < N * (pos+1) / size; ++n) {
int idx = n;
const int i1 = ((idx /= 1 ) % 1);
const int i2 = ((idx /= 1 ) % 4);
const int i3 = ((idx /= 4 ) % 4);
const int i4 = ((idx /= 4 ) % 6);
const int i5 = ((idx /= 6 ) % 6);
const int i6 = ((idx /= 6 ) % 6);
atomicAdd(&d_weight[i1][i2][i3], d_preact[i4][i5][i6] * p_output[i4][i5 * 4 + i2][i6 * 4 + i3]);
}
}
__global__ void bp_bias_s1(float bias[1], float d_preact[6][6][6])
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int size = blockDim.x * gridDim.x;
const int N = 6*6*6;
const float d = pow(6.0f, 3.0f);
for (int n = N * pos / size; n < N * (pos+1) / size; ++n) {
int idx = n;
const int i1 = ((idx /= 1 ) % 6);
const int i2 = ((idx /= 6 ) % 6);
const int i3 = ((idx /= 6 ) % 6);
atomicAdd(&bias[0], dt * d_preact[i1][i2][i3] / d);
}
}
__global__ void bp_output_c1(float d_output[6][24][24], float n_weight[1][4][4], float nd_preact[6][6][6])
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int size = blockDim.x * gridDim.x;
const int N = 1*4*4*6*6*6;
for (int n = N * pos / size; n < N * (pos+1) / size; ++n) {
int idx = n;
const int i1 = ((idx /= 1 ) % 1);
const int i2 = ((idx /= 1 ) % 4);
const int i3 = ((idx /= 4 ) % 4);
const int i4 = ((idx /= 4 ) % 6);
const int i5 = ((idx /= 6 ) % 6);
const int i6 = ((idx /= 6 ) % 6);
atomicAdd(&d_output[i4][i5 * 4 + i2][i6 * 4 + i3], n_weight[i1][i2][i3] * nd_preact[i4][i5][i6]);
}
}
__global__ void bp_preact_c1(float d_preact[6][24][24], float d_output[6][24][24], float preact[6][24][24])
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int size = blockDim.x * gridDim.x;
const int N = 6*24*24;
for (int n = N * pos / size; n < N * (pos+1) / size; ++n) {
int idx = n;
const int i1 = ((idx /= 1 ) % 6);
const int i2 = ((idx /= 6 ) % 24);
const int i3 = ((idx /= 24 ) % 24);
const float o = step_function(preact[i1][i2][i3]);
d_preact[i1][i2][i3] = d_output[i1][i2][i3] * o * (1 - o);
}
}
__global__ void bp_weight_c1(float d_weight[6][5][5], float d_preact[6][24][24], float p_output[28][28])
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int size = blockDim.x * gridDim.x;
const int N = 6*5*5*24*24;
const float d = pow(24.0f, 2.0f);
for (int n = N * pos / size; n < N * (pos+1) / size; ++n) {
int idx = n;
const int i1 = ((idx /= 1 ) % 6);
const int i2 = ((idx /= 6 ) % 5);
const int i3 = ((idx /= 5 ) % 5);
const int i4 = ((idx /= 5 ) % 24);
const int i5 = ((idx /= 24 ) % 24);
atomicAdd(&d_weight[i1][i2][i3], d_preact[i1][i4][i5] * p_output[i4 + i2][i5 + i3] / d);
}
}
__global__ void bp_bias_c1(float bias[6], float d_preact[6][24][24])
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int size = blockDim.x * gridDim.x;
const int N = 6*24*24;
const float d = pow(24.0f, 2.0f);
for (int n = N * pos / size; n < N * (pos+1) / size; ++n) {
int idx = n;
const int i1 = ((idx /= 1 ) % 6);
const int i2 = ((idx /= 6 ) % 24);
const int i3 = ((idx /= 24 ) % 24);
atomicAdd(&bias[i1], dt * d_preact[i1][i2][i3] / d);
}
}
static void propagate(double data[28][28])
{
float input[28][28];
for (int i = 0; i < 28; ++i) {
for (int j = 0; j < 28; ++j) {
input[i][j] = data[i][j];
}
}
l_input.clear();
l_c1.clear();
l_s1.clear();
l_f.clear();
l_input.setOutput((float *)input);
hipLaunchKernelGGL(( preact_c1), dim3(64), dim3(64), 0, 0, (float (*)[28])l_input.output, (float (*)[24][24])l_c1.preact, (float (*)[5][5])l_c1.weight);
hipLaunchKernelGGL(( bias_c1), dim3(64), dim3(64), 0, 0, (float (*)[24][24])l_c1.preact, l_c1.bias);
hipLaunchKernelGGL(( apply_step_function), dim3(64), dim3(64), 0, 0, l_c1.preact, l_c1.output, l_c1.O);
hipLaunchKernelGGL(( preact_s1), dim3(64), dim3(64), 0, 0, (float (*)[24][24])l_c1.output, (float (*)[6][6])l_s1.preact, (float (*)[4][4])l_s1.weight);
hipLaunchKernelGGL(( bias_s1), dim3(64), dim3(64), 0, 0, (float (*)[6][6])l_s1.preact, l_s1.bias);
hipLaunchKernelGGL(( apply_step_function), dim3(64), dim3(64), 0, 0, l_s1.preact, l_s1.output, l_s1.O);
hipLaunchKernelGGL(( preact_f), dim3(64), dim3(64), 0, 0, (float (*)[6][6])l_s1.output, l_f.preact, (float (*)[6][6][6])l_f.weight);
hipLaunchKernelGGL(( bias_f), dim3(64), dim3(64), 0, 0, l_f.preact, l_f.bias);
hipLaunchKernelGGL(( apply_step_function), dim3(64), dim3(64), 0, 0, l_f.preact, l_f.output, l_f.O);
}
static void learn()
{
hipblasCreate(&blas);
float err;
int iter = 240;
while (iter < 0 || iter-- > 0) {
err = 0.0f;
for (int i = 0; i < train_cnt; ++i) {
float tmp;
propagate(train_set[i].data);
hipLaunchKernelGGL(( makeError), dim3(10), dim3(1), 0, 0, l_f.d_preact, l_f.output, train_set[i].label, 10);
hipblasSnrm2(blas, 10, l_f.d_preact, 1, &tmp);
err += tmp;
}
err /= train_cnt;
fprintf(stdout, "error: %e\n", err);
if (err < threshold)
break;
for (int i = 0; i < train_cnt; ++i) {
propagate(train_set[i].data);
l_f.bp_clear();
l_s1.bp_clear();
l_c1.bp_clear();
hipLaunchKernelGGL(( makeError), dim3(10), dim3(1), 0, 0, l_f.d_preact, l_f.output, train_set[i].label, 10);
hipLaunchKernelGGL(( bp_weight_f), dim3(64), dim3(64), 0, 0, (float (*)[6][6][6])l_f.d_weight, l_f.d_preact, (float (*)[6][6])l_s1.output);
hipLaunchKernelGGL(( bp_bias_f), dim3(64), dim3(64), 0, 0, l_f.bias, l_f.d_preact);
hipLaunchKernelGGL(( bp_output_s1), dim3(64), dim3(64), 0, 0, (float (*)[6][6])l_s1.d_output, (float (*)[6][6][6])l_f.weight, l_f.d_preact);
hipLaunchKernelGGL(( bp_preact_s1), dim3(64), dim3(64), 0, 0, (float (*)[6][6])l_s1.d_preact, (float (*)[6][6])l_s1.d_output, (float (*)[6][6])l_s1.preact);
hipLaunchKernelGGL(( bp_weight_s1), dim3(64), dim3(64), 0, 0, (float (*)[4][4])l_s1.d_weight, (float (*)[6][6])l_s1.d_preact, (float (*)[24][24])l_c1.output);
hipLaunchKernelGGL(( bp_bias_s1), dim3(64), dim3(64), 0, 0, l_s1.bias, (float (*)[6][6])l_s1.d_preact);
hipLaunchKernelGGL(( bp_output_c1), dim3(64), dim3(64), 0, 0, (float (*)[24][24])l_c1.d_output, (float (*)[4][4])l_s1.weight, (float (*)[6][6])l_s1.d_preact);
hipLaunchKernelGGL(( bp_preact_c1), dim3(64), dim3(64), 0, 0, (float (*)[24][24])l_c1.d_preact, (float (*)[24][24])l_c1.d_output, (float (*)[24][24])l_c1.preact);
hipLaunchKernelGGL(( bp_weight_c1), dim3(64), dim3(64), 0, 0, (float (*)[5][5])l_c1.d_weight, (float (*)[24][24])l_c1.d_preact, (float (*)[28])l_input.output);
hipLaunchKernelGGL(( bp_bias_c1), dim3(64), dim3(64), 0, 0, l_c1.bias, (float (*)[24][24])l_c1.d_preact);
hipLaunchKernelGGL(( apply_grad), dim3(64), dim3(64), 0, 0, l_f.weight, l_f.d_weight, l_f.M * l_f.N);
hipLaunchKernelGGL(( apply_grad), dim3(64), dim3(64), 0, 0, l_s1.weight, l_s1.d_weight, l_s1.M * l_s1.N);
hipLaunchKernelGGL(( apply_grad), dim3(64), dim3(64), 0, 0, l_c1.weight, l_c1.d_weight, l_c1.M * l_c1.N);
}
}
}
static unsigned int classify(double data[28][28])
{
float res[10];
propagate(data);
unsigned int max = 0;
hipMemcpy(res, l_f.output, sizeof(float) * 10, hipMemcpyDeviceToHost);
for (int i = 1; i < 10; ++i) {
if (res[max] < res[i]) {
max = i;
}
}
return max;
}
static void clear()
{
}
| 06e21175faef1e48de365d164e6f65a7ab973f45.cu | #define USE_MNIST_LOADER
#define MNIST_DOUBLE
#include "mnist.h"
#include <cuda.h>
#include <cstdio>
static mnist_data *train_set, *test_set;
static unsigned int train_cnt, test_cnt;
static void learn();
static void clear();
static unsigned int classify(double data[28][28]);
static void loaddata()
{
mnist_load("data/train-images-idx3-ubyte", "data/train-labels-idx1-ubyte",
&train_set, &train_cnt);
mnist_load("data/t10k-images-idx3-ubyte", "data/t10k-labels-idx1-ubyte",
&test_set, &test_cnt);
}
static void test()
{
int error = 0;
for (int i = 0; i < test_cnt; ++i) {
if (classify(test_set[i].data) != test_set[i].label) {
++error;
}
}
fprintf(stdout, "Error Rate: %.2lf%%\n",
double(error) / double(test_cnt) * 100.0);
}
int main(int argc, const char **argv)
{
srand(time(NULL));
if (cuInit(0) != CUDA_SUCCESS) {
fprintf(stderr, "cuInit failed\n");
return 1;
}
loaddata();
learn();
test();
clear();
return 0;
}
///////////////////////////////////////////////////////////////////////////////
#include <cstdlib>
#include <vector>
#include <memory>
#include <cublas_v2.h>
const static float dt = 1.0E-01f;
const static float threshold = 1.0E-02f;
struct Layer {
float *output;
float *preact;
float *bias;
float *weight;
float *d_output;
float *d_preact;
float *d_weight;
const int M, N, O;
Layer(int M, int N, int O)
: M(M), N(N), O(O)
{
float h_bias[N];
float h_weight[N][M];
output = NULL;
preact = NULL;
bias = NULL;
weight = NULL;
for (int i = 0; i < N; ++i) {
h_bias[i] = 0.5f - float(rand()) / float(RAND_MAX);
/*h_bias[i] = 0.0f;*/
for (int j = 0; j < M; ++j) {
h_weight[i][j] = 0.5f - float(rand()) / float(RAND_MAX);
/*h_weight[i][j] = 0.05f;*/
}
}
cudaMalloc(&output, sizeof(float) * O);
cudaMalloc(&preact, sizeof(float) * O);
cudaMalloc(&bias, sizeof(float) * N);
cudaMalloc(&weight, sizeof(float) * M * N);
cudaMalloc(&d_output, sizeof(float) * O);
cudaMalloc(&d_preact, sizeof(float) * O);
cudaMalloc(&d_weight, sizeof(float) * M * N);
cudaMemcpy(bias, h_bias, sizeof(float) * N, cudaMemcpyHostToDevice);
cudaMemcpy(weight, h_weight, sizeof(float) * M * N, cudaMemcpyHostToDevice);
}
~Layer()
{
cudaFree(output);
cudaFree(preact);
cudaFree(bias);
cudaFree(weight);
cudaFree(d_output);
cudaFree(d_preact);
cudaFree(d_weight);
}
void setOutput(float *data)
{
cudaMemcpy(output, data, sizeof(float) * O, cudaMemcpyHostToDevice);
}
void clear()
{
cudaMemset(output, 0x00, sizeof(float) * O);
cudaMemset(preact, 0x00, sizeof(float) * O);
}
void bp_clear()
{
cudaMemset(d_output, 0x00, sizeof(float) * O);
cudaMemset(d_preact, 0x00, sizeof(float) * O);
cudaMemset(d_weight, 0x00, sizeof(float) * M * N);
}
};
static cublasHandle_t blas;
static Layer l_input(0, 0, 28*28), l_c1(5*5, 6, 24*24*6), l_s1(4*4, 1, 6*6*6), l_f(6*6*6, 10, 10);
__device__ float step_function(float v)
{
return 1 / (1 + exp(-v));
}
__global__ void apply_step_function(float *input, float *output, const int N)
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int size = blockDim.x * gridDim.x;
for (int idx = N * pos / size; idx < N * (pos+1) / size; ++idx) {
output[idx] = step_function(input[idx]);
}
}
__global__ void makeError(float *err, float *output, unsigned int Y, const int N)
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int size = blockDim.x * gridDim.x;
for (int idx = N * pos / size; idx < N * (pos+1) / size; ++idx) {
err[idx] = ((Y == idx ? 1.0f : 0.0f) - output[idx]);
}
}
__global__ void apply_grad(float *output, float *grad, const int N)
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int size = blockDim.x * gridDim.x;
for (int idx = N * pos / size; idx < N * (pos+1) / size; ++idx) {
output[idx] += dt * grad[idx];
}
}
__global__ void preact_c1(float input[28][28], float preact[6][24][24], float weight[6][5][5])
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int size = blockDim.x * gridDim.x;
const int N = 5*5*6*24*24;
for (int n = N * pos / size; n < N * (pos+1) / size; ++n) {
int idx = n;
const int i1 = ((idx /= 1 ) % 5);
const int i2 = ((idx /= 5 ) % 5);
const int i3 = ((idx /= 5 ) % 6);
const int i4 = ((idx /= 6 ) % 24);
const int i5 = ((idx /= 24 ) % 24);
atomicAdd(&preact[i3][i4][i5], weight[i3][i1][i2] * input[i4 + i1][i5 + i2]);
}
}
__global__ void bias_c1(float preact[6][24][24], float bias[6])
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int size = blockDim.x * gridDim.x;
const int N = 6*24*24;
for (int n = N * pos / size; n < N * (pos+1) / size; ++n) {
int idx = n;
const int i1 = ((idx /= 1 ) % 6);
const int i2 = ((idx /= 6 ) % 24);
const int i3 = ((idx /= 24 ) % 24);
preact[i1][i2][i3] += bias[i1];
}
}
__global__ void preact_s1(float input[6][24][24], float preact[6][6][6], float weight[1][4][4])
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int size = blockDim.x * gridDim.x;
const int N = 4*4*6*6*6;
for (int n = N * pos / size; n < N * (pos+1) / size; ++n) {
int idx = n;
const int i1 = ((idx /= 1 ) % 4);
const int i2 = ((idx /= 4 ) % 4);
const int i3 = ((idx /= 4 ) % 6);
const int i4 = ((idx /= 6 ) % 6);
const int i5 = ((idx /= 6 ) % 6);
atomicAdd(&preact[i3][i4][i5], weight[0][i1][i2] * input[i3][i4 * 4 + i1][i5 * 4 + i2]);
}
}
__global__ void bias_s1(float preact[6][6][6], float bias[1])
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int size = blockDim.x * gridDim.x;
const int N = 6*6*6;
for (int n = N * pos / size; n < N * (pos+1) / size; ++n) {
int idx = n;
const int i1 = ((idx /= 1 ) % 6);
const int i2 = ((idx /= 6 ) % 6);
const int i3 = ((idx /= 6 ) % 6);
preact[i1][i2][i3] += bias[0];
}
}
__global__ void preact_f(float input[6][6][6], float preact[10], float weight[10][6][6][6])
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int size = blockDim.x * gridDim.x;
const int N = 10*6*6*6;
for (int n = N * pos / size; n < N * (pos+1) / size; ++n) {
int idx = n;
const int i1 = ((idx /= 1 ) % 10);
const int i2 = ((idx /= 10 ) % 6);
const int i3 = ((idx /= 6 ) % 6);
const int i4 = ((idx /= 6 ) % 6);
atomicAdd(&preact[i1], weight[i1][i2][i3][i4] * input[i2][i3][i4]);
}
}
__global__ void bias_f(float preact[10], float bias[10])
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int size = blockDim.x * gridDim.x;
const int N = 10;
for (int idx = N * pos / size; idx < N * (pos+1) / size; ++idx) {
preact[idx] += bias[idx];
}
}
__global__ void bp_weight_f(float d_weight[10][6][6][6], float d_preact[10], float p_output[6][6][6])
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int size = blockDim.x * gridDim.x;
const int N = 10*6*6*6;
for (int n = N * pos / size; n < N * (pos+1) / size; ++n) {
int idx = n;
const int i1 = ((idx /= 1 ) % 10);
const int i2 = ((idx /= 10 ) % 6);
const int i3 = ((idx /= 6 ) % 6);
const int i4 = ((idx /= 6 ) % 6);
d_weight[i1][i2][i3][i4] = d_preact[i1] * p_output[i2][i3][i4];
}
}
__global__ void bp_bias_f(float bias[10], float d_preact[10])
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int size = blockDim.x * gridDim.x;
const int N = 10;
for (int idx = N * pos / size; idx < N * (pos+1) / size; ++idx) {
bias[idx] += dt * d_preact[idx];
}
}
__global__ void bp_output_s1(float d_output[6][6][6], float n_weight[10][6][6][6], float nd_preact[10])
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int size = blockDim.x * gridDim.x;
const int N = 10*6*6*6;
for (int n = N * pos / size; n < N * (pos+1) / size; ++n) {
int idx = n;
const int i1 = ((idx /= 1 ) % 10);
const int i2 = ((idx /= 10 ) % 6);
const int i3 = ((idx /= 6 ) % 6);
const int i4 = ((idx /= 6 ) % 6);
atomicAdd(&d_output[i2][i3][i4], n_weight[i1][i2][i3][i4] * nd_preact[i1]);
}
}
__global__ void bp_preact_s1(float d_preact[6][6][6], float d_output[6][6][6], float preact[6][6][6])
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int size = blockDim.x * gridDim.x;
const int N = 6*6*6;
for (int n = N * pos / size; n < N * (pos+1) / size; ++n) {
int idx = n;
const int i1 = ((idx /= 1 ) % 6);
const int i2 = ((idx /= 6 ) % 6);
const int i3 = ((idx /= 6 ) % 6);
const float o = step_function(preact[i1][i2][i3]);
d_preact[i1][i2][i3] = d_output[i1][i2][i3] * o * (1 - o);
}
}
__global__ void bp_weight_s1(float d_weight[1][4][4], float d_preact[6][6][6], float p_output[6][24][24])
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int size = blockDim.x * gridDim.x;
const int N = 1*4*4*6*6*6;
const float d = pow(6.0f, 3.0f);
for (int n = N * pos / size; n < N * (pos+1) / size; ++n) {
int idx = n;
const int i1 = ((idx /= 1 ) % 1);
const int i2 = ((idx /= 1 ) % 4);
const int i3 = ((idx /= 4 ) % 4);
const int i4 = ((idx /= 4 ) % 6);
const int i5 = ((idx /= 6 ) % 6);
const int i6 = ((idx /= 6 ) % 6);
atomicAdd(&d_weight[i1][i2][i3], d_preact[i4][i5][i6] * p_output[i4][i5 * 4 + i2][i6 * 4 + i3]);
}
}
__global__ void bp_bias_s1(float bias[1], float d_preact[6][6][6])
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int size = blockDim.x * gridDim.x;
const int N = 6*6*6;
const float d = pow(6.0f, 3.0f);
for (int n = N * pos / size; n < N * (pos+1) / size; ++n) {
int idx = n;
const int i1 = ((idx /= 1 ) % 6);
const int i2 = ((idx /= 6 ) % 6);
const int i3 = ((idx /= 6 ) % 6);
atomicAdd(&bias[0], dt * d_preact[i1][i2][i3] / d);
}
}
__global__ void bp_output_c1(float d_output[6][24][24], float n_weight[1][4][4], float nd_preact[6][6][6])
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int size = blockDim.x * gridDim.x;
const int N = 1*4*4*6*6*6;
for (int n = N * pos / size; n < N * (pos+1) / size; ++n) {
int idx = n;
const int i1 = ((idx /= 1 ) % 1);
const int i2 = ((idx /= 1 ) % 4);
const int i3 = ((idx /= 4 ) % 4);
const int i4 = ((idx /= 4 ) % 6);
const int i5 = ((idx /= 6 ) % 6);
const int i6 = ((idx /= 6 ) % 6);
atomicAdd(&d_output[i4][i5 * 4 + i2][i6 * 4 + i3], n_weight[i1][i2][i3] * nd_preact[i4][i5][i6]);
}
}
__global__ void bp_preact_c1(float d_preact[6][24][24], float d_output[6][24][24], float preact[6][24][24])
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int size = blockDim.x * gridDim.x;
const int N = 6*24*24;
for (int n = N * pos / size; n < N * (pos+1) / size; ++n) {
int idx = n;
const int i1 = ((idx /= 1 ) % 6);
const int i2 = ((idx /= 6 ) % 24);
const int i3 = ((idx /= 24 ) % 24);
const float o = step_function(preact[i1][i2][i3]);
d_preact[i1][i2][i3] = d_output[i1][i2][i3] * o * (1 - o);
}
}
__global__ void bp_weight_c1(float d_weight[6][5][5], float d_preact[6][24][24], float p_output[28][28])
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int size = blockDim.x * gridDim.x;
const int N = 6*5*5*24*24;
const float d = pow(24.0f, 2.0f);
for (int n = N * pos / size; n < N * (pos+1) / size; ++n) {
int idx = n;
const int i1 = ((idx /= 1 ) % 6);
const int i2 = ((idx /= 6 ) % 5);
const int i3 = ((idx /= 5 ) % 5);
const int i4 = ((idx /= 5 ) % 24);
const int i5 = ((idx /= 24 ) % 24);
atomicAdd(&d_weight[i1][i2][i3], d_preact[i1][i4][i5] * p_output[i4 + i2][i5 + i3] / d);
}
}
__global__ void bp_bias_c1(float bias[6], float d_preact[6][24][24])
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int size = blockDim.x * gridDim.x;
const int N = 6*24*24;
const float d = pow(24.0f, 2.0f);
for (int n = N * pos / size; n < N * (pos+1) / size; ++n) {
int idx = n;
const int i1 = ((idx /= 1 ) % 6);
const int i2 = ((idx /= 6 ) % 24);
const int i3 = ((idx /= 24 ) % 24);
atomicAdd(&bias[i1], dt * d_preact[i1][i2][i3] / d);
}
}
static void propagate(double data[28][28])
{
float input[28][28];
for (int i = 0; i < 28; ++i) {
for (int j = 0; j < 28; ++j) {
input[i][j] = data[i][j];
}
}
l_input.clear();
l_c1.clear();
l_s1.clear();
l_f.clear();
l_input.setOutput((float *)input);
preact_c1<<<64, 64>>>((float (*)[28])l_input.output, (float (*)[24][24])l_c1.preact, (float (*)[5][5])l_c1.weight);
bias_c1<<<64, 64>>>((float (*)[24][24])l_c1.preact, l_c1.bias);
apply_step_function<<<64, 64>>>(l_c1.preact, l_c1.output, l_c1.O);
preact_s1<<<64, 64>>>((float (*)[24][24])l_c1.output, (float (*)[6][6])l_s1.preact, (float (*)[4][4])l_s1.weight);
bias_s1<<<64, 64>>>((float (*)[6][6])l_s1.preact, l_s1.bias);
apply_step_function<<<64, 64>>>(l_s1.preact, l_s1.output, l_s1.O);
preact_f<<<64, 64>>>((float (*)[6][6])l_s1.output, l_f.preact, (float (*)[6][6][6])l_f.weight);
bias_f<<<64, 64>>>(l_f.preact, l_f.bias);
apply_step_function<<<64, 64>>>(l_f.preact, l_f.output, l_f.O);
}
static void learn()
{
cublasCreate(&blas);
float err;
int iter = 240;
while (iter < 0 || iter-- > 0) {
err = 0.0f;
for (int i = 0; i < train_cnt; ++i) {
float tmp;
propagate(train_set[i].data);
makeError<<<10, 1>>>(l_f.d_preact, l_f.output, train_set[i].label, 10);
cublasSnrm2(blas, 10, l_f.d_preact, 1, &tmp);
err += tmp;
}
err /= train_cnt;
fprintf(stdout, "error: %e\n", err);
if (err < threshold)
break;
for (int i = 0; i < train_cnt; ++i) {
propagate(train_set[i].data);
l_f.bp_clear();
l_s1.bp_clear();
l_c1.bp_clear();
makeError<<<10, 1>>>(l_f.d_preact, l_f.output, train_set[i].label, 10);
bp_weight_f<<<64, 64>>>((float (*)[6][6][6])l_f.d_weight, l_f.d_preact, (float (*)[6][6])l_s1.output);
bp_bias_f<<<64, 64>>>(l_f.bias, l_f.d_preact);
bp_output_s1<<<64, 64>>>((float (*)[6][6])l_s1.d_output, (float (*)[6][6][6])l_f.weight, l_f.d_preact);
bp_preact_s1<<<64, 64>>>((float (*)[6][6])l_s1.d_preact, (float (*)[6][6])l_s1.d_output, (float (*)[6][6])l_s1.preact);
bp_weight_s1<<<64, 64>>>((float (*)[4][4])l_s1.d_weight, (float (*)[6][6])l_s1.d_preact, (float (*)[24][24])l_c1.output);
bp_bias_s1<<<64, 64>>>(l_s1.bias, (float (*)[6][6])l_s1.d_preact);
bp_output_c1<<<64, 64>>>((float (*)[24][24])l_c1.d_output, (float (*)[4][4])l_s1.weight, (float (*)[6][6])l_s1.d_preact);
bp_preact_c1<<<64, 64>>>((float (*)[24][24])l_c1.d_preact, (float (*)[24][24])l_c1.d_output, (float (*)[24][24])l_c1.preact);
bp_weight_c1<<<64, 64>>>((float (*)[5][5])l_c1.d_weight, (float (*)[24][24])l_c1.d_preact, (float (*)[28])l_input.output);
bp_bias_c1<<<64, 64>>>(l_c1.bias, (float (*)[24][24])l_c1.d_preact);
apply_grad<<<64, 64>>>(l_f.weight, l_f.d_weight, l_f.M * l_f.N);
apply_grad<<<64, 64>>>(l_s1.weight, l_s1.d_weight, l_s1.M * l_s1.N);
apply_grad<<<64, 64>>>(l_c1.weight, l_c1.d_weight, l_c1.M * l_c1.N);
}
}
}
static unsigned int classify(double data[28][28])
{
float res[10];
propagate(data);
unsigned int max = 0;
cudaMemcpy(res, l_f.output, sizeof(float) * 10, cudaMemcpyDeviceToHost);
for (int i = 1; i < 10; ++i) {
if (res[max] < res[i]) {
max = i;
}
}
return max;
}
static void clear()
{
}
|
dd7e4ebb9662807a4bfd027591f8bb7d1b69a957.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd(double *a, double *b, double *c, int n)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
c[id] = a[id] + b[id];
}
int main( int argc, char* argv[] )
{
// Size of vectors
int n = 100;
// Host input vectors
double *h_a;
double *h_b;
//Host output vector
double *h_c;
// Device input vectors
double *d_a;
double *d_b;
//Device output vector
double *d_c;
// Size, in bytes, of each vector
size_t bytes = n*sizeof(double);
// Allocate memory for each vector on host
h_a = (double*)malloc(bytes);
h_b = (double*)malloc(bytes);
h_c = (double*)malloc(bytes);
// Allocate memory for each vector on GPU
hipMalloc(&d_a, bytes);
hipMalloc(&d_b, bytes);
hipMalloc(&d_c, bytes);
int i;
// Initialize vectors on host
for( i = 0; i < n; i++ ) {
h_a[i] = i;
h_b[i] = i;
}
// Copy host vectors to device
hipMemcpy( d_a, h_a, bytes, hipMemcpyHostToDevice);
hipMemcpy( d_b, h_b, bytes, hipMemcpyHostToDevice);
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
// Number of thread blocks in grid
gridSize = (int)ceil((float)n/blockSize);
// Execute the kernel
hipLaunchKernelGGL(( vecAdd), dim3(gridSize), dim3(blockSize), 0, 0, d_a, d_b, d_c, n);
// Copy array back to host
hipMemcpy( h_c, d_c, bytes, hipMemcpyDeviceToHost );
// Sum up vector c and print result divided by n, this should equal 1 within error
double sum = 0;
for(i=0; i<n; i++)
printf(" %f + %f =%f\n",h_a[i],h_b[i],h_c[i]);
//printf("final result: %f\n", sum/(double)n);
// Release device memory
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
// Release host memory
free(h_a);
free(h_b);
free(h_c);
return 0;
} | dd7e4ebb9662807a4bfd027591f8bb7d1b69a957.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd(double *a, double *b, double *c, int n)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
c[id] = a[id] + b[id];
}
int main( int argc, char* argv[] )
{
// Size of vectors
int n = 100;
// Host input vectors
double *h_a;
double *h_b;
//Host output vector
double *h_c;
// Device input vectors
double *d_a;
double *d_b;
//Device output vector
double *d_c;
// Size, in bytes, of each vector
size_t bytes = n*sizeof(double);
// Allocate memory for each vector on host
h_a = (double*)malloc(bytes);
h_b = (double*)malloc(bytes);
h_c = (double*)malloc(bytes);
// Allocate memory for each vector on GPU
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
int i;
// Initialize vectors on host
for( i = 0; i < n; i++ ) {
h_a[i] = i;
h_b[i] = i;
}
// Copy host vectors to device
cudaMemcpy( d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy( d_b, h_b, bytes, cudaMemcpyHostToDevice);
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
// Number of thread blocks in grid
gridSize = (int)ceil((float)n/blockSize);
// Execute the kernel
vecAdd<<<gridSize, blockSize>>>(d_a, d_b, d_c, n);
// Copy array back to host
cudaMemcpy( h_c, d_c, bytes, cudaMemcpyDeviceToHost );
// Sum up vector c and print result divided by n, this should equal 1 within error
double sum = 0;
for(i=0; i<n; i++)
printf(" %f + %f =%f\n",h_a[i],h_b[i],h_c[i]);
//printf("final result: %f\n", sum/(double)n);
// Release device memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
// Release host memory
free(h_a);
free(h_b);
free(h_c);
return 0;
} |
41fbac2e16dc10713cdcff0b7c0f5d391fc5ac85.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "networktables/NetworkTable.h" //networktables
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/core/cuda.hpp>
#include "opencv2/cudalegacy.hpp"
#include <algorithm>
#include <thread>
#include <chrono>
#include <sys/socket.h>
#include <unistd.h>
#include <stdlib.h>
#include <netinet/in.h>
#include <string.h>
#include <arpa/inet.h>
using namespace std;
using namespace cv;
using namespace cv::cuda;
inline uint getFirstIndex(uchar, uchar, uchar);
shared_ptr<NetworkTable> myNetworkTable; //our networktable for reading/writing
string netTableAddress = "10.29.76.2";
const int sizeX = 640;
const int sizeY = 480;
const int fps = 15;
//TODO: String formatter
//Driver Station at 10.29.76.212 IP
const string STREAM_STRING = "appsrc ! videoconvert ! video/x-raw, format=(string)I420, width=(int)640, height=(int)480 ! omxh264enc bitrate=600000 ! video/x-h264, stream-format=(string)byte-stream ! h264parse ! rtph264pay ! udpsink host=10.0.0.60 port=5801 sync=true ";
const string DEBUG_STRING = "appsrc ! videoconvert ! video/x-raw, format=(string)I420, width=(int)640, height=(int)480 ! omxh264enc bitrate=600000 ! video/x-h264, stream-format=(string)byte-stream ! h264parse ! rtph264pay ! udpsink host=10.0.0.60 port=5802 sync=true ";
VideoWriter debug;
//const Mat camera_matrix = (cv::Mat_<float>(3,3) << 786.42, 0, 297.35, 0 , 780.45, 214.74, 0, 0, 1);
const Mat camera_matrix = (cv::Mat_<float>(3,3) << 665.126, 0, 328.04, 0 , 662.07, 244.97, 0, 0, 1);
//const Mat dist_coeffs = (cv::Mat_<float>(1,5) << 0, 0, 0, 0, 0);
const Mat dist_coeffs = (cv::Mat_<float>(1,5) << 0.11068, -0.8106, -0.004023, 0.00622, 1.1949);
const Mat model_points = (cv::Mat_<Point3f>(1,6) << /*Point3d(-5.377,-5.32,0),*/ Point3d(-7.313,-4.819,0), Point3d(-5.936,0.5,0), Point3d(-4,0,0), /*Point3d(5.377,-5.32,0),*/ Point3d(4,0,0),Point3d(5.936,0.5,0),Point3d(7.313,-4.819,0));
Scalar hsv_min(7,0,24); //good settings
//Scalar hsv_min(39,14,104); //bad settings
Scalar hsv_max(180,255,255);
const int minArea = 229;
const int minSolidity = 0.92;
const double expectedAspectRation = 3.51;
const double aspectRatioTolerance = 152;
uchar *LUMBGR2HSV;
uchar *d_LUMBGR2HSV;
__global__
void kernelconvert(uchar *LUT)
{
uint i = (blockIdx.x * blockDim.x) + threadIdx.x;
uint j = (blockIdx.y * blockDim.y) + threadIdx.y;
uint k = (blockIdx.z * blockDim.z) + threadIdx.z;
if (i < 256 && j < 256 && k < 256) {
uchar _b = i;
uchar _g = j;
uchar _r = k;
float b = (float)_b / 255.0;
float g = (float)_g / 255.0;
float r = (float)_r / 255.0;
float h, s, v;
float _min = min(min(b, g), r);
v = max(max(b, g), r);
float chroma = v - _min;
if (v != 0)
s = chroma / v; // s
else {
s = 0;
h = -1;
return;
}
if (r == v)
h = (g - b) / chroma;
else if (g == v)
h = 2 + (b - r) / chroma;
else
h = 4 + (r - g) / chroma;
h *= 30;
if (h < 0) h += 180;
s *= 255;
v *= 255;
uint index = 3 * 256 * 256 * i + 256 * 3 * j + 3 * k;
LUT[index] = (uchar)h;
LUT[index + 1] = (uchar)s; //height, width Saturation
LUT[index + 2] = (uchar)v; //height, width Value
}
}
__global__
void kernelSwap(PtrStepSz<uchar3> src, PtrStepSz<uchar3> dst, uchar *LUT) {
uint x = (blockIdx.x * blockDim.x) + threadIdx.x;
uint y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x >= src.cols || y >= src.rows) return;
uchar3 v = src(y,x);
uint index = 3 * 256 * 256 * v.x + 256 * 3 * v.y + 3 * v.z;
dst(y,x).x = LUT[index];
dst(y,x).y = LUT[index+1];
dst(y,x).z = LUT[index+2];
}
inline uint getFirstIndex(uchar b, uchar g, uchar r) {
return 3 * 256 * 256 * b + 256 * 3 * g + 3 * r;
}
void initializeLUM() {
hipSetDeviceFlags(hipDeviceMapHost);
hipHostMalloc((void **)&LUMBGR2HSV, 256*256*256*3, hipHostMallocMapped);
hipHostGetDevicePointer((void**)&d_LUMBGR2HSV, (void *) LUMBGR2HSV, 0);
dim3 threads_per_block(8, 8,8);
dim3 numBlocks(32,32,32);
kernelconvert << <numBlocks, threads_per_block >> >(d_LUMBGR2HSV);
}
void BGR2HSV_LUM(GpuMat src, GpuMat dst) {
const int m = 32;
int numRows = src.rows, numCols = src.cols;
if (numRows == 0 || numCols == 0) return;
// Attention! Cols Vs. Rows are reversed
const dim3 gridSize(ceil((float)numCols / m), ceil((float)numRows / m), 1);
const dim3 blockSize(m, m, 1);
kernelSwap << <gridSize, blockSize >> >(src, dst, d_LUMBGR2HSV);
}
__global__ void inRange_kernel(const cv::cuda::PtrStepSz<uchar3> src, cv::cuda::PtrStepSzb dst,
int lbc0, int ubc0, int lbc1, int ubc1, int lbc2, int ubc2) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= src.cols || y >= src.rows) return;
uchar3 v = src(y, x);
if (v.x >= lbc0 && v.x <= ubc0 && v.y >= lbc1 && v.y <= ubc1 && v.z >= lbc2 && v.z <= ubc2)
dst(y, x) = 255;
else
dst(y, x) = 0;
}
void inRange_gpu(cv::cuda::GpuMat &src, cv::Scalar &lowerb, cv::Scalar &upperb,
cv::cuda::GpuMat &dst) {
const int m = 32;
int numRows = src.rows, numCols = src.cols;
if (numRows == 0 || numCols == 0) return;
// Attention! Cols Vs. Rows are reversed
const dim3 gridSize(ceil((float)numCols / m), ceil((float)numRows / m), 1);
const dim3 blockSize(m, m, 1);
hipLaunchKernelGGL(( inRange_kernel), dim3(gridSize), dim3(blockSize), 0, 0, src, dst, lowerb[0], upperb[0], lowerb[1], upperb[1],
lowerb[2], upperb[2]);
}
Mat getHsvMasked(Mat frame) {
GpuMat frame_gpu, mask_gpu;
frame_gpu.upload(frame);
BGR2HSV_LUM(frame_gpu, frame_gpu);
mask_gpu.create(frame_gpu.rows, frame_gpu.cols, CV_8U);
//Mat inHSV(frame_gpu);
//imshow("HSV", inHSV);
inRange_gpu(frame_gpu, hsv_min, hsv_max, mask_gpu);
Mat mask(mask_gpu);
//imshow("threshold",mask);
//waitKey(1);
return mask;
}
vector<RotatedRect> getPotentialTargets(Mat mask) {
vector< vector<Point> > contours;
vector<Vec4i> hierarchy;
findContours(mask,contours,hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
vector<RotatedRect> targets;
//cout << "Contours Found: "<<contours.size() << "\n";
for(int i = 0; i < contours.size(); i++) {
int area = contourArea(contours[i]);
if(area > minArea) {
//cout << "Area " << area << "\n";
RotatedRect rect = minAreaRect(contours[i]);
//use shorter side as width when calculating aspect ratio
int height = (rect.size.height > rect.size.width) ? rect.size.height : rect.size.width;
int width = (rect.size.height > rect.size.width) ? rect.size.width : rect.size.height;
if(abs((float)((float)height/(float)width) - expectedAspectRation) < aspectRatioTolerance) {
vector<Point> hull;
convexHull(contours[i], hull);
int hull_area = contourArea(hull);
float solidity = float(area)/hull_area;
if(solidity > minSolidity) {
//cout << "Center of Potential Target: " << rect.center.x << ", " << rect.center.y << " Aspect " << (float)((float)height/(float)width) << "\n";
//cout << "solidity " << solidity << "\n";
targets.push_back(rect);
}
}
}
}
//sort based on left to right
sort(targets.begin(), targets.end(), [](const RotatedRect& a, const RotatedRect& b) {
return a.center.x < b.center.x;
});
return targets;
}
int getStripType(RotatedRect strip) {
if(strip.size.height > strip.size.width) {
return 1;
} else {
return 2;
}
}
class VisionTarget {
public:
RotatedRect left;
RotatedRect right;
int targetType;
int getCenterX() {
return (left.center.x + right.center.x)/2;
}
vector<Point2d> leftTargetPointsClockwiseFromLowest() {
vector<Point2d> points;
Point2f pts[4];
left.points(pts);
for (int i = 1 ; i < 4 ; i++) //ignore the bottom most point
{
points.push_back((Point2d)pts[i]);
}
return points;
}
vector<Point2d> rightTargetPointsClockwiseFromLowest() {
vector<Point2d> points;
Point2f pts[4];
right.points(pts);
for (int i = 1 ; i < 4 ; i++) //ignore the bottom most point
{
points.push_back((Point2d)pts[i]);
}
return points;
}
vector<Point2d> eightPointImageDescriptor() {
vector<Point2d> points;
vector<Point2d> leftPoints = leftTargetPointsClockwiseFromLowest();
vector<Point2d> rightPoints = rightTargetPointsClockwiseFromLowest();
points.reserve(6); //the three top points on each rectangle
points.insert(points.end(), leftPoints.begin(), leftPoints.end());
points.insert(points.end(), rightPoints.begin(), rightPoints.end());
return points;
}
};
VisionTarget getVisionTarget(vector<RotatedRect> potentialTargets) {
vector<VisionTarget> targets;
VisionTarget Target;
Target.targetType = 0;
if(potentialTargets.size() > 1) {
for(int i = 0; i < potentialTargets.size()-1; i++) {
if(getStripType(potentialTargets[i]) == 2 && getStripType(potentialTargets[i+1]) == 1) {
VisionTarget temp;
temp.right = potentialTargets[i+1];
temp.left = potentialTargets[i];
targets.push_back(temp);
}
}
//do this in O(n)
sort(targets.begin(), targets.end(), [](VisionTarget a, VisionTarget b) {
return abs(a.getCenterX()-sizeX/2) < abs(b.getCenterX()-sizeX/2);
});
if(targets.size() > 0) {
Target = targets[0];
Target.targetType = 1;
}
}
return Target;
}
vector<cv::Point2d> getImagePointsFromFrame(Mat* frame) {
Mat mask;
Scalar color(0,0,255);
vector<cv::Point2d> image_points;
mask = getHsvMasked(*frame);
vector<RotatedRect> targets = getPotentialTargets(mask);
if(targets.size() >= 1) {
VisionTarget target = getVisionTarget(targets);
if(target.targetType == 1) {
image_points = target.eightPointImageDescriptor();
for(Point2f p : image_points) {
circle(*frame, p, 5,color,5,LINE_8);
}
}
}
debug.write(*frame);
return image_points;
}
Vec3d getEulerAngles(Mat rotation_vector){
Mat rotation3x3;
Vec3d eulerAngles;
Rodrigues(rotation_vector, rotation3x3);
Mat cameraMatrix,rotMatrix,transVect,rotMatrixX,rotMatrixY,rotMatrixZ;
double* _r = rotation3x3.ptr<double>();
double projMatrix[12] = {_r[0],_r[1],_r[2],0,
_r[3],_r[4],_r[5],0,
_r[6],_r[7],_r[8],0};
decomposeProjectionMatrix( Mat(3,4,CV_64FC1,projMatrix),
cameraMatrix,
rotMatrix,
transVect,
rotMatrixX,
rotMatrixY,
rotMatrixZ,
eulerAngles);
return eulerAngles;
}
void getRotationAndTranslationVectors(Mat* frame,Mat* rotation_vector,Mat* translation_vector, bool* newVector) {
vector<cv::Point2d> image_points;
image_points = getImagePointsFromFrame(frame);
if(image_points.size() != 6) { //only need top 3 points of each target
*newVector = false;
return;
}
Mat image_points_matrix = Mat(image_points);
dist_coeffs.convertTo(dist_coeffs,CV_32F);
//maybe make this ransac tomorrow
*newVector = cv::solvePnP(model_points,image_points_matrix,camera_matrix,dist_coeffs,*rotation_vector, *translation_vector, false, SOLVEPNP_ITERATIVE);
}
void processFrameThread(Mat* frame,Mat* rotation_vector,Mat* translation_vector, bool* newImage, bool* newVector) {
for(; ; ) {
if(*newImage == false) continue;
getRotationAndTranslationVectors(frame,rotation_vector,translation_vector, newVector);
if(*newVector) {
Vec3d orientation = getEulerAngles(*rotation_vector);
string s = to_string((*translation_vector).at<double>(2,0)) + ";" + to_string((*translation_vector).at<double>(1,0)) + ";" + to_string((*translation_vector).at<double>(0,0)) + ";" + to_string(orientation[1]) + ";\n";
cout << s;
myNetworkTable -> PutNumber ("Z Displacement", (*translation_vector).at<double>(2,0));
myNetworkTable -> PutNumber ("Y Displacement", (*translation_vector).at<double>(1,0));
myNetworkTable -> PutNumber ("X Displacement", (*translation_vector).at<double>(0,0));
myNetworkTable -> PutNumber ("Yaw", orientation[1]);
myNetworkTable -> Flush();
}
*newImage = false;
}
}
int main(int argc, char** argv)
{
setDevice(0);
initializeLUM();
char setting_script[100];
sprintf (setting_script, "bash good_settings.sh %d", 1);
system (setting_script);
VideoCapture capture("/dev/video1");
//VideoWriter video;
Mat rotation_vector; // Rotation in axis-angle form
Mat translation_vector;
Mat frame;
bool newImage = false;
bool newVector = false;
debug.open(DEBUG_STRING, 0,15,cv::Size(sizeX, sizeY), true);
capture.set(CAP_PROP_FRAME_WIDTH, sizeX);
capture.set(CAP_PROP_FRAME_HEIGHT, sizeY);
capture.set(CAP_PROP_FPS, fps);
NetworkTable::SetClientMode();
NetworkTable::SetIPAddress(llvm::StringRef(netTableAddress));
NetworkTable::Initialize();
myNetworkTable = NetworkTable::GetTable("JetsonData");
//Start processing thread
thread process (processFrameThread,&frame,&rotation_vector,&translation_vector,&newImage, &newVector);
long i = 0;
for (; ; )
{
capture.read(frame);
if (frame.empty()) {
break;
}
//imwrite("/home/ubuntu/VisionProcessing/calibrateImage" + std::to_string(i) + ".jpg",frame);
i++;
if(i%50 == 0) {
myNetworkTable -> PutNumber ("IsAliveCounter", (i%1000)/10.0);
myNetworkTable -> Flush();
}
//std::this_thread::sleep_for(std::chrono::milliseconds(500));
//video.write(frame);
newImage = true;
}
}
| 41fbac2e16dc10713cdcff0b7c0f5d391fc5ac85.cu | #include <stdio.h>
#include "networktables/NetworkTable.h" //networktables
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/core/cuda.hpp>
#include "opencv2/cudalegacy.hpp"
#include <algorithm>
#include <thread>
#include <chrono>
#include <sys/socket.h>
#include <unistd.h>
#include <stdlib.h>
#include <netinet/in.h>
#include <string.h>
#include <arpa/inet.h>
using namespace std;
using namespace cv;
using namespace cv::cuda;
inline uint getFirstIndex(uchar, uchar, uchar);
shared_ptr<NetworkTable> myNetworkTable; //our networktable for reading/writing
string netTableAddress = "10.29.76.2";
const int sizeX = 640;
const int sizeY = 480;
const int fps = 15;
//TODO: String formatter
//Driver Station at 10.29.76.212 IP
const string STREAM_STRING = "appsrc ! videoconvert ! video/x-raw, format=(string)I420, width=(int)640, height=(int)480 ! omxh264enc bitrate=600000 ! video/x-h264, stream-format=(string)byte-stream ! h264parse ! rtph264pay ! udpsink host=10.0.0.60 port=5801 sync=true ";
const string DEBUG_STRING = "appsrc ! videoconvert ! video/x-raw, format=(string)I420, width=(int)640, height=(int)480 ! omxh264enc bitrate=600000 ! video/x-h264, stream-format=(string)byte-stream ! h264parse ! rtph264pay ! udpsink host=10.0.0.60 port=5802 sync=true ";
VideoWriter debug;
//const Mat camera_matrix = (cv::Mat_<float>(3,3) << 786.42, 0, 297.35, 0 , 780.45, 214.74, 0, 0, 1);
const Mat camera_matrix = (cv::Mat_<float>(3,3) << 665.126, 0, 328.04, 0 , 662.07, 244.97, 0, 0, 1);
//const Mat dist_coeffs = (cv::Mat_<float>(1,5) << 0, 0, 0, 0, 0);
const Mat dist_coeffs = (cv::Mat_<float>(1,5) << 0.11068, -0.8106, -0.004023, 0.00622, 1.1949);
const Mat model_points = (cv::Mat_<Point3f>(1,6) << /*Point3d(-5.377,-5.32,0),*/ Point3d(-7.313,-4.819,0), Point3d(-5.936,0.5,0), Point3d(-4,0,0), /*Point3d(5.377,-5.32,0),*/ Point3d(4,0,0),Point3d(5.936,0.5,0),Point3d(7.313,-4.819,0));
Scalar hsv_min(7,0,24); //good settings
//Scalar hsv_min(39,14,104); //bad settings
Scalar hsv_max(180,255,255);
const int minArea = 229;
const int minSolidity = 0.92;
const double expectedAspectRation = 3.51;
const double aspectRatioTolerance = 152;
uchar *LUMBGR2HSV;
uchar *d_LUMBGR2HSV;
__global__
void kernelconvert(uchar *LUT)
{
uint i = (blockIdx.x * blockDim.x) + threadIdx.x;
uint j = (blockIdx.y * blockDim.y) + threadIdx.y;
uint k = (blockIdx.z * blockDim.z) + threadIdx.z;
if (i < 256 && j < 256 && k < 256) {
uchar _b = i;
uchar _g = j;
uchar _r = k;
float b = (float)_b / 255.0;
float g = (float)_g / 255.0;
float r = (float)_r / 255.0;
float h, s, v;
float _min = min(min(b, g), r);
v = max(max(b, g), r);
float chroma = v - _min;
if (v != 0)
s = chroma / v; // s
else {
s = 0;
h = -1;
return;
}
if (r == v)
h = (g - b) / chroma;
else if (g == v)
h = 2 + (b - r) / chroma;
else
h = 4 + (r - g) / chroma;
h *= 30;
if (h < 0) h += 180;
s *= 255;
v *= 255;
uint index = 3 * 256 * 256 * i + 256 * 3 * j + 3 * k;
LUT[index] = (uchar)h;
LUT[index + 1] = (uchar)s; //height, width Saturation
LUT[index + 2] = (uchar)v; //height, width Value
}
}
__global__
void kernelSwap(PtrStepSz<uchar3> src, PtrStepSz<uchar3> dst, uchar *LUT) {
uint x = (blockIdx.x * blockDim.x) + threadIdx.x;
uint y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x >= src.cols || y >= src.rows) return;
uchar3 v = src(y,x);
uint index = 3 * 256 * 256 * v.x + 256 * 3 * v.y + 3 * v.z;
dst(y,x).x = LUT[index];
dst(y,x).y = LUT[index+1];
dst(y,x).z = LUT[index+2];
}
inline uint getFirstIndex(uchar b, uchar g, uchar r) {
return 3 * 256 * 256 * b + 256 * 3 * g + 3 * r;
}
void initializeLUM() {
cudaSetDeviceFlags(cudaDeviceMapHost);
cudaHostAlloc((void **)&LUMBGR2HSV, 256*256*256*3, cudaHostAllocMapped);
cudaHostGetDevicePointer((void**)&d_LUMBGR2HSV, (void *) LUMBGR2HSV, 0);
dim3 threads_per_block(8, 8,8);
dim3 numBlocks(32,32,32);
kernelconvert << <numBlocks, threads_per_block >> >(d_LUMBGR2HSV);
}
void BGR2HSV_LUM(GpuMat src, GpuMat dst) {
const int m = 32;
int numRows = src.rows, numCols = src.cols;
if (numRows == 0 || numCols == 0) return;
// Attention! Cols Vs. Rows are reversed
const dim3 gridSize(ceil((float)numCols / m), ceil((float)numRows / m), 1);
const dim3 blockSize(m, m, 1);
kernelSwap << <gridSize, blockSize >> >(src, dst, d_LUMBGR2HSV);
}
__global__ void inRange_kernel(const cv::cuda::PtrStepSz<uchar3> src, cv::cuda::PtrStepSzb dst,
int lbc0, int ubc0, int lbc1, int ubc1, int lbc2, int ubc2) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= src.cols || y >= src.rows) return;
uchar3 v = src(y, x);
if (v.x >= lbc0 && v.x <= ubc0 && v.y >= lbc1 && v.y <= ubc1 && v.z >= lbc2 && v.z <= ubc2)
dst(y, x) = 255;
else
dst(y, x) = 0;
}
void inRange_gpu(cv::cuda::GpuMat &src, cv::Scalar &lowerb, cv::Scalar &upperb,
cv::cuda::GpuMat &dst) {
const int m = 32;
int numRows = src.rows, numCols = src.cols;
if (numRows == 0 || numCols == 0) return;
// Attention! Cols Vs. Rows are reversed
const dim3 gridSize(ceil((float)numCols / m), ceil((float)numRows / m), 1);
const dim3 blockSize(m, m, 1);
inRange_kernel<<<gridSize, blockSize>>>(src, dst, lowerb[0], upperb[0], lowerb[1], upperb[1],
lowerb[2], upperb[2]);
}
Mat getHsvMasked(Mat frame) {
GpuMat frame_gpu, mask_gpu;
frame_gpu.upload(frame);
BGR2HSV_LUM(frame_gpu, frame_gpu);
mask_gpu.create(frame_gpu.rows, frame_gpu.cols, CV_8U);
//Mat inHSV(frame_gpu);
//imshow("HSV", inHSV);
inRange_gpu(frame_gpu, hsv_min, hsv_max, mask_gpu);
Mat mask(mask_gpu);
//imshow("threshold",mask);
//waitKey(1);
return mask;
}
vector<RotatedRect> getPotentialTargets(Mat mask) {
vector< vector<Point> > contours;
vector<Vec4i> hierarchy;
findContours(mask,contours,hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
vector<RotatedRect> targets;
//cout << "Contours Found: "<<contours.size() << "\n";
for(int i = 0; i < contours.size(); i++) {
int area = contourArea(contours[i]);
if(area > minArea) {
//cout << "Area " << area << "\n";
RotatedRect rect = minAreaRect(contours[i]);
//use shorter side as width when calculating aspect ratio
int height = (rect.size.height > rect.size.width) ? rect.size.height : rect.size.width;
int width = (rect.size.height > rect.size.width) ? rect.size.width : rect.size.height;
if(abs((float)((float)height/(float)width) - expectedAspectRation) < aspectRatioTolerance) {
vector<Point> hull;
convexHull(contours[i], hull);
int hull_area = contourArea(hull);
float solidity = float(area)/hull_area;
if(solidity > minSolidity) {
//cout << "Center of Potential Target: " << rect.center.x << ", " << rect.center.y << " Aspect " << (float)((float)height/(float)width) << "\n";
//cout << "solidity " << solidity << "\n";
targets.push_back(rect);
}
}
}
}
//sort based on left to right
sort(targets.begin(), targets.end(), [](const RotatedRect& a, const RotatedRect& b) {
return a.center.x < b.center.x;
});
return targets;
}
int getStripType(RotatedRect strip) {
if(strip.size.height > strip.size.width) {
return 1;
} else {
return 2;
}
}
class VisionTarget {
public:
RotatedRect left;
RotatedRect right;
int targetType;
int getCenterX() {
return (left.center.x + right.center.x)/2;
}
vector<Point2d> leftTargetPointsClockwiseFromLowest() {
vector<Point2d> points;
Point2f pts[4];
left.points(pts);
for (int i = 1 ; i < 4 ; i++) //ignore the bottom most point
{
points.push_back((Point2d)pts[i]);
}
return points;
}
vector<Point2d> rightTargetPointsClockwiseFromLowest() {
vector<Point2d> points;
Point2f pts[4];
right.points(pts);
for (int i = 1 ; i < 4 ; i++) //ignore the bottom most point
{
points.push_back((Point2d)pts[i]);
}
return points;
}
vector<Point2d> eightPointImageDescriptor() {
vector<Point2d> points;
vector<Point2d> leftPoints = leftTargetPointsClockwiseFromLowest();
vector<Point2d> rightPoints = rightTargetPointsClockwiseFromLowest();
points.reserve(6); //the three top points on each rectangle
points.insert(points.end(), leftPoints.begin(), leftPoints.end());
points.insert(points.end(), rightPoints.begin(), rightPoints.end());
return points;
}
};
VisionTarget getVisionTarget(vector<RotatedRect> potentialTargets) {
vector<VisionTarget> targets;
VisionTarget Target;
Target.targetType = 0;
if(potentialTargets.size() > 1) {
for(int i = 0; i < potentialTargets.size()-1; i++) {
if(getStripType(potentialTargets[i]) == 2 && getStripType(potentialTargets[i+1]) == 1) {
VisionTarget temp;
temp.right = potentialTargets[i+1];
temp.left = potentialTargets[i];
targets.push_back(temp);
}
}
//do this in O(n)
sort(targets.begin(), targets.end(), [](VisionTarget a, VisionTarget b) {
return abs(a.getCenterX()-sizeX/2) < abs(b.getCenterX()-sizeX/2);
});
if(targets.size() > 0) {
Target = targets[0];
Target.targetType = 1;
}
}
return Target;
}
vector<cv::Point2d> getImagePointsFromFrame(Mat* frame) {
Mat mask;
Scalar color(0,0,255);
vector<cv::Point2d> image_points;
mask = getHsvMasked(*frame);
vector<RotatedRect> targets = getPotentialTargets(mask);
if(targets.size() >= 1) {
VisionTarget target = getVisionTarget(targets);
if(target.targetType == 1) {
image_points = target.eightPointImageDescriptor();
for(Point2f p : image_points) {
circle(*frame, p, 5,color,5,LINE_8);
}
}
}
debug.write(*frame);
return image_points;
}
Vec3d getEulerAngles(Mat rotation_vector){
Mat rotation3x3;
Vec3d eulerAngles;
Rodrigues(rotation_vector, rotation3x3);
Mat cameraMatrix,rotMatrix,transVect,rotMatrixX,rotMatrixY,rotMatrixZ;
double* _r = rotation3x3.ptr<double>();
double projMatrix[12] = {_r[0],_r[1],_r[2],0,
_r[3],_r[4],_r[5],0,
_r[6],_r[7],_r[8],0};
decomposeProjectionMatrix( Mat(3,4,CV_64FC1,projMatrix),
cameraMatrix,
rotMatrix,
transVect,
rotMatrixX,
rotMatrixY,
rotMatrixZ,
eulerAngles);
return eulerAngles;
}
void getRotationAndTranslationVectors(Mat* frame,Mat* rotation_vector,Mat* translation_vector, bool* newVector) {
vector<cv::Point2d> image_points;
image_points = getImagePointsFromFrame(frame);
if(image_points.size() != 6) { //only need top 3 points of each target
*newVector = false;
return;
}
Mat image_points_matrix = Mat(image_points);
dist_coeffs.convertTo(dist_coeffs,CV_32F);
//maybe make this ransac tomorrow
*newVector = cv::solvePnP(model_points,image_points_matrix,camera_matrix,dist_coeffs,*rotation_vector, *translation_vector, false, SOLVEPNP_ITERATIVE);
}
void processFrameThread(Mat* frame,Mat* rotation_vector,Mat* translation_vector, bool* newImage, bool* newVector) {
for(; ; ) {
if(*newImage == false) continue;
getRotationAndTranslationVectors(frame,rotation_vector,translation_vector, newVector);
if(*newVector) {
Vec3d orientation = getEulerAngles(*rotation_vector);
string s = to_string((*translation_vector).at<double>(2,0)) + ";" + to_string((*translation_vector).at<double>(1,0)) + ";" + to_string((*translation_vector).at<double>(0,0)) + ";" + to_string(orientation[1]) + ";\n";
cout << s;
myNetworkTable -> PutNumber ("Z Displacement", (*translation_vector).at<double>(2,0));
myNetworkTable -> PutNumber ("Y Displacement", (*translation_vector).at<double>(1,0));
myNetworkTable -> PutNumber ("X Displacement", (*translation_vector).at<double>(0,0));
myNetworkTable -> PutNumber ("Yaw", orientation[1]);
myNetworkTable -> Flush();
}
*newImage = false;
}
}
int main(int argc, char** argv)
{
setDevice(0);
initializeLUM();
char setting_script[100];
sprintf (setting_script, "bash good_settings.sh %d", 1);
system (setting_script);
VideoCapture capture("/dev/video1");
//VideoWriter video;
Mat rotation_vector; // Rotation in axis-angle form
Mat translation_vector;
Mat frame;
bool newImage = false;
bool newVector = false;
debug.open(DEBUG_STRING, 0,15,cv::Size(sizeX, sizeY), true);
capture.set(CAP_PROP_FRAME_WIDTH, sizeX);
capture.set(CAP_PROP_FRAME_HEIGHT, sizeY);
capture.set(CAP_PROP_FPS, fps);
NetworkTable::SetClientMode();
NetworkTable::SetIPAddress(llvm::StringRef(netTableAddress));
NetworkTable::Initialize();
myNetworkTable = NetworkTable::GetTable("JetsonData");
//Start processing thread
thread process (processFrameThread,&frame,&rotation_vector,&translation_vector,&newImage, &newVector);
long i = 0;
for (; ; )
{
capture.read(frame);
if (frame.empty()) {
break;
}
//imwrite("/home/ubuntu/VisionProcessing/calibrateImage" + std::to_string(i) + ".jpg",frame);
i++;
if(i%50 == 0) {
myNetworkTable -> PutNumber ("IsAliveCounter", (i%1000)/10.0);
myNetworkTable -> Flush();
}
//std::this_thread::sleep_for(std::chrono::milliseconds(500));
//video.write(frame);
newImage = true;
}
}
|
29f2ec8eb92eadaecc96cf38bfc5c969c4c3cbc6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "cuda_utils.h"
// input: new_xyz(b, m, 3) xyz(b, n, 3)
// output: idx(b, m, nsample)
__global__ void query_ball_point_kernel(int b, int n, int m, float radius,
int nsample,
const float *__restrict__ new_xyz,
const float *__restrict__ xyz,
int *__restrict__ idx) {
int batch_index = blockIdx.x;
xyz += batch_index * n * 3;
new_xyz += batch_index * m * 3;
idx += m * nsample * batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
float radius2 = radius * radius;
for (int j = index; j < m; j += stride) {
float new_x = new_xyz[j * 3 + 0];
float new_y = new_xyz[j * 3 + 1];
float new_z = new_xyz[j * 3 + 2];
for (int k = 0, cnt = 0; k < n && cnt < nsample; ++k) {
float x = xyz[k * 3 + 0];
float y = xyz[k * 3 + 1];
float z = xyz[k * 3 + 2];
float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) +
(new_z - z) * (new_z - z);
if (d2 < radius2) {
//initialize as k
if (cnt == 0) {
for (int l = 0; l < nsample; ++l) {
idx[j * nsample + l] = k;
}
}
idx[j * nsample + cnt] = k;
++cnt;
}
}
}
}
void query_ball_point_kernel_wrapper(int b, int n, int m, float radius,
int nsample, const float *new_xyz,
const float *xyz, int *idx) {
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( query_ball_point_kernel), dim3(b), dim3(opt_n_threads(m)), 0, stream,
b, n, m, radius, nsample, new_xyz, xyz, idx);
CUDA_CHECK_ERRORS();
}
| 29f2ec8eb92eadaecc96cf38bfc5c969c4c3cbc6.cu | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "cuda_utils.h"
// input: new_xyz(b, m, 3) xyz(b, n, 3)
// output: idx(b, m, nsample)
__global__ void query_ball_point_kernel(int b, int n, int m, float radius,
int nsample,
const float *__restrict__ new_xyz,
const float *__restrict__ xyz,
int *__restrict__ idx) {
int batch_index = blockIdx.x;
xyz += batch_index * n * 3;
new_xyz += batch_index * m * 3;
idx += m * nsample * batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
float radius2 = radius * radius;
for (int j = index; j < m; j += stride) {
float new_x = new_xyz[j * 3 + 0];
float new_y = new_xyz[j * 3 + 1];
float new_z = new_xyz[j * 3 + 2];
for (int k = 0, cnt = 0; k < n && cnt < nsample; ++k) {
float x = xyz[k * 3 + 0];
float y = xyz[k * 3 + 1];
float z = xyz[k * 3 + 2];
float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) +
(new_z - z) * (new_z - z);
if (d2 < radius2) {
//initialize as k
if (cnt == 0) {
for (int l = 0; l < nsample; ++l) {
idx[j * nsample + l] = k;
}
}
idx[j * nsample + cnt] = k;
++cnt;
}
}
}
}
void query_ball_point_kernel_wrapper(int b, int n, int m, float radius,
int nsample, const float *new_xyz,
const float *xyz, int *idx) {
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
query_ball_point_kernel<<<b, opt_n_threads(m), 0, stream>>>(
b, n, m, radius, nsample, new_xyz, xyz, idx);
CUDA_CHECK_ERRORS();
}
|
bba9fa9c477a9444d63e3d7f9bbfabc1f29f4f50.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "FindClosestPoint.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float3 *points = NULL;
hipMalloc(&points, XSIZE*YSIZE);
int *closestPoint = NULL;
hipMalloc(&closestPoint, XSIZE*YSIZE);
const int numberPoints = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
FindClosestPoint), dim3(gridBlock),dim3(threadBlock), 0, 0, points,closestPoint,numberPoints);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
FindClosestPoint), dim3(gridBlock),dim3(threadBlock), 0, 0, points,closestPoint,numberPoints);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
FindClosestPoint), dim3(gridBlock),dim3(threadBlock), 0, 0, points,closestPoint,numberPoints);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | bba9fa9c477a9444d63e3d7f9bbfabc1f29f4f50.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "FindClosestPoint.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float3 *points = NULL;
cudaMalloc(&points, XSIZE*YSIZE);
int *closestPoint = NULL;
cudaMalloc(&closestPoint, XSIZE*YSIZE);
const int numberPoints = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
FindClosestPoint<<<gridBlock,threadBlock>>>(points,closestPoint,numberPoints);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
FindClosestPoint<<<gridBlock,threadBlock>>>(points,closestPoint,numberPoints);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
FindClosestPoint<<<gridBlock,threadBlock>>>(points,closestPoint,numberPoints);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
f2b50111d874e798550fa365302f384b5f52ce42.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gpu/mblas/tensor_functions.h"
#include "gpu/mblas/handles.h"
using namespace std;
namespace amunmt {
namespace GPU {
namespace mblas {
thread_local CudaStreamHandler CudaStreamHandler::instance_;
thread_local CublasHandler CublasHandler::instance_;
Tensor& Swap(Tensor& Out, Tensor& In) {
Out.swap(In);
return Out;
}
__global__ void gMean(TensorWrapper<float> out,
const TensorWrapper<float> in,
const VectorWrapper<unsigned> sentenceLengths)
{
// out = batches * states
// in = max sentence length * states * 1 * batches
// mapping = max length * batches
int id = threadIdx.x + blockIdx.x * blockDim.x;
//printf("id = %d in = %lu %lu %lu %lu = %lu %lu \n", id, in.dim(0), in.dim(1), in.dim(2), in.dim(3), in.size(), sizeof(in));
if (id < out.size()) {
unsigned indices[SHAPE_SIZE];
out.id2Indices(id, indices);
//printf("%d -> %lu %lu %lu %lu \n", id, indices[0], indices[1], indices[2], indices[3]);
unsigned batch = indices[0];
unsigned state = indices[1];
float sum = 0.0f;
int counter = 0;
for (unsigned row = 0; row < in.dim(0); ++row) {
bool isWord = row < sentenceLengths[batch];
//printf("batch=%lu startMapInd=%lu mapOffset=%lu -> %d \n", batch, startMapInd, mapOffset, isWord);
if (isWord) {
sum += in(row, state, 0, batch);
++counter;
}
}
sum /= (float) counter;
out[id] = sum;
}
}
void Mean(Tensor& Out,
const Tensor& In,
const mblas::Vector<unsigned> &sentenceLengths)
{
assert(Out.dim(2) == 1);
assert(Out.dim(3) == 1);
assert(Out.dim(0) == In.dim(3));
assert(Out.dim(1) == In.dim(1));
// mean of each ROW
unsigned batchNum = Out.dim(0) * Out.dim(2) * Out.dim(3);
unsigned stateLength = Out.dim(1);
unsigned sentenceLength = (In.dim(0) * In.dim(2) * In.dim(3)) / batchNum;
TensorWrapper<float> outWrap(Out);
TensorWrapper<float> inWrap(In);
//cerr << "outWrap=" << outWrap.Debug() << endl;
VectorWrapper<unsigned> sentenceLengthsWrap(sentenceLengths);
unsigned size = outWrap.size();
unsigned threads = ::min((unsigned)MAX_THREADS, size);
unsigned blocks = (size / threads) + ((size % threads == 0) ? 0 : 1);
hipLaunchKernelGGL(( gMean), dim3(blocks), dim3(threads), 0, CudaStreamHandler::GetStream(),
outWrap, inWrap, sentenceLengthsWrap);
HANDLE_ERROR(hipGetLastError());
}
__global__ void gWeightedMean(TensorWrapper<float> out,
const TensorWrapper<float> weights,
const TensorWrapper<float> in,
const VectorWrapper<unsigned> mapping
)
{
int numHypos = weights.dim(0);
int states = in.dim(1);
int srcLen = weights.dim(1);
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < numHypos * states) {
int hypoInd = id / states;
int batchInd = mapping[hypoInd];
int stateInd = id % states;
//printf("hypoInd=%d batchInd=%d stateInd=%d \n", hypoInd, batchInd, stateInd);
float sum = 0.0f;
for (unsigned i = 0; i < srcLen; ++i) {
sum += weights(hypoInd, i) * in(i, stateInd, 0, batchInd);
}
out[id] = sum;
}
}
void WeightedMean(Tensor& Out,const Tensor& Weights, const Tensor& In, const mblas::Vector<unsigned>& mapping)
{
int numHypos = Weights.dim(0);
int states = In.dim(1);
Out.NewSize(numHypos, states);
TensorWrapper<float> outWrap(Out);
TensorWrapper<float> weightsWrap(Weights);
TensorWrapper<float> inWrap(In);
VectorWrapper<unsigned> mappingWrap(mapping);
unsigned size = Out.size();
unsigned nThreads = ::min((unsigned) MAX_THREADS, (unsigned)size);
unsigned nBlocks = (size / nThreads) + ((size % nThreads == 0) ? 0 : 1);
hipLaunchKernelGGL(( gWeightedMean), dim3(nBlocks), dim3(nThreads), 0, CudaStreamHandler::GetStream(),
outWrap, weightsWrap, inWrap, mappingWrap);
HANDLE_ERROR(hipGetLastError());
/*
cerr << "nBlocks=" << nBlocks << endl;
cerr << "Out=" << outWrap.Debug() << endl;
cerr << "Weights=" << weightsWrap.Debug() << endl;
cerr << "In=" << inWrap.Debug() << endl;
cerr << "mapping=" << mapping.size() << endl;
for (unsigned i = 0; i < mapping.size(); ++i) {
cerr << mapping[i] << " ";
}
cerr << endl << endl;
*/
}
Tensor& Transpose(Tensor& Out, const Tensor& In) {
unsigned m = In.dim(0);
unsigned n = In.dim(1);
Out.NewSize(n, m);
float alpha = 1.0;
float beta = 0.0;
HANDLE_ERROR_CUBLAS(hipblasSgeam(CublasHandler::GetHandle(), HIPBLAS_OP_T, HIPBLAS_OP_T, m, n, &alpha, In.data(), n,
&beta, In.data(), n, Out.data(), m));
return Out;
}
Tensor& Transpose(Tensor& Out) {
thread_local Tensor Temp;
Transpose(Temp, Out);
Swap(Out, Temp);
return Out;
}
Tensor& Concat(Tensor& Out, const Tensor& In) {
unsigned oldSize = Out.size();
Out.Resize(Out.dim(0) + In.dim(0), Out.dim(1));
mblas::copy(In.data(), In.size(), Out.data() + oldSize, hipMemcpyDeviceToDevice);
return Out;
}
Tensor& Copy(Tensor& Out, const Tensor& In) {
Out.NewSize(In.dim(0), In.dim(1), In.dim(2), In.dim(3));
mblas::copy(In.data(), In.size(), Out.data(), hipMemcpyDeviceToDevice);
return Out;
}
__global__ void gPasteRows(TensorWrapper<float> out,
const TensorWrapper<float> in,
int rowNo, int colNo)
{
int inRows = in.dim(0);
int inCols = in.dim(1);
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < inRows * inCols) {
int outCols = out.dim(1);
int inRow = id / inCols;
int inCol = id % inCols;
//out[outID] = in[id];
out(rowNo, inCol + colNo, 0, inRow) = in(inRow, inCol);
}
}
void PasteRows(Tensor& Out, const Tensor& In, const unsigned rowNo, unsigned colNo)
{
TensorWrapper<float> outWrap(Out);
TensorWrapper<float> inWrap(In);
unsigned size = In.size();
unsigned nThreads = ::min((unsigned) MAX_THREADS, (unsigned)size);
unsigned nBlocks = (size / nThreads) + ((size % nThreads == 0) ? 0 : 1);
hipLaunchKernelGGL(( gPasteRows), dim3(nBlocks), dim3(nThreads), 0, CudaStreamHandler::GetStream(),
outWrap, inWrap, rowNo, colNo);
HANDLE_ERROR(hipGetLastError());
}
Tensor& PasteRow(Tensor& Out,
const Tensor& In,
const unsigned r, const unsigned c)
{
unsigned start = r * Out.dim(1) + c;
mblas::copy(In.data(), In.size(), Out.data() + start, hipMemcpyDeviceToDevice);
return Out;
}
Tensor& CopyRow(Tensor& Out,
const Tensor& In,
const unsigned r, const unsigned c) {
unsigned length = In.dim(1) - c;
Out.NewSize(1, length);
unsigned start = r * In.dim(1) + c;
//unsigned end = start + length;
//mblas::copy(In.begin() + start, In.begin() + end, Out.begin());
mblas::copy(In.data() + start, length , Out.data(), hipMemcpyDeviceToDevice);
return Out;
}
__global__ void gCopyRows(TensorWrapper<float> out,
const TensorWrapper<float> in,
const VectorWrapper<unsigned> indicesWrap)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < out.size()) {
unsigned dim[SHAPE_SIZE];
out.id2Indices(id, dim);
unsigned indicesInd = dim[0];
unsigned inRow =indicesWrap[indicesInd];
out(indicesInd, dim[1]) = in(inRow, dim[1]);
}
}
Tensor& CopyRows(Tensor& Out,
const Tensor& In,
const mblas::Vector<unsigned>& indices)
{
assert(In.dim(1) == Out.dim(1));
assert(Out.dim(0) == indices.size());
assert(In.dim(2) == 1);
assert(In.dim(3) == 1);
assert(Out.dim(2) == 1);
assert(Out.dim(3) == 1);
/*
cerr << "Out=" << Out.Debug(0) << endl;
cerr << "In=" << In.Debug(0) << endl;
cerr << "indices=" << Debug(indices, 2) << endl;
cerr << endl;
*/
unsigned size = Out.size();
unsigned numPairs = indices.size();
TensorWrapper<float> outWrap(Out);
const TensorWrapper<float> inWrap(In);
const VectorWrapper<unsigned> indicesWrap(indices);
//cerr << "size=" << size << endl;
unsigned threads = ::min((unsigned) MAX_THREADS, (unsigned)size);
unsigned blocks = size / threads + ((size % threads == 0) ? 0 : 1);
hipLaunchKernelGGL(( gCopyRows), dim3(blocks), dim3(threads), 0, CudaStreamHandler::GetStream(),
outWrap, inWrap, indicesWrap);
HANDLE_ERROR(hipGetLastError());
return Out;
}
Tensor& Assemble(Tensor& Out,
const Tensor& In,
const mblas::Vector<unsigned>& indices) {
Out.NewSize(indices.size(), In.dim(1));
//cerr << "Assemble=" << Out.Debug() << " " << In.Debug() << indices.size() << endl;
CopyRows(Out, In, indices);
return Out;
}
__global__ void gSlice(TensorWrapper<float> out,
const TensorWrapper<float> in,
unsigned n, unsigned dim)
{
unsigned row = blockIdx.x;
unsigned inCol = threadIdx.x + dim * n;
unsigned outCol = threadIdx.x;
while (outCol < out.dim(1)) {
out(row, outCol) = in(row, inCol);
inCol += blockDim.x;
outCol += blockDim.x;
}
}
Tensor& Slice(Tensor& Out,
const Tensor& In,
unsigned n, unsigned dim)
{
assert(In.dim(2) == 1);
assert(In.dim(3) == 1);
Out.NewSize(In.dim(0), dim);
TensorWrapper<float> outWrap(Out);
const TensorWrapper<float> inWrap(In);
/*
cerr << "outWrap=" << outWrap.Debug() << endl;
cerr << "inWrap=" << inWrap.Debug() << endl;
cerr << "n=" << n << endl;
cerr << "dim=" << dim << endl;
cerr << endl;
*/
unsigned threads = ::min((unsigned)MAX_THREADS, (unsigned)dim);
unsigned blocks = In.dim(0);
hipLaunchKernelGGL(( gSlice), dim3(blocks), dim3(threads), 0, CudaStreamHandler::GetStream(),
outWrap, inWrap, n, dim);
HANDLE_ERROR(hipGetLastError());
return Out;
}
Tensor& Prod(hipblasHandle_t handle, Tensor& C, const Tensor& A, const Tensor& B, bool transB)
{
BEGIN_TIMER("Prod");
assert((A.dim(2) == A.dim(3) == 1) || (B.dim(2) == B.dim(3) == 1));
Tensor::value_type alpha = 1.0;
Tensor::value_type beta = 0.0;
unsigned m = A.dim(0) * A.dim(2) * A.dim(3);
unsigned k = A.dim(1);
unsigned mOut = A.dim(0);
unsigned kOut = A.dim(1);
/*
if(transA) {
std::swap(m, k);
std::swap(mOut, kOut);
}
*/
unsigned l = B.dim(0) * B.dim(2) * B.dim(3);
unsigned n = B.dim(1);
unsigned lOut = B.dim(0);
unsigned nOut = B.dim(1);
if(transB) {
std::swap(l, n);
std::swap(lOut, nOut);
}
assert(k == l);
unsigned lda = A.dim(1);
unsigned ldb = B.dim(1);
unsigned ldc = transB ? B.dim(0) * B.dim(2) * B.dim(3) : B.dim(1);
unsigned dim2 = A.dim(2);
if (transB) {
// for GetAlignedSourceContext()
assert((A.dim(2) == A.dim(3) == 1));
C.NewSize(B.dim(2), nOut, 1, 1);
}
else {
C.NewSize(mOut, nOut, A.dim(2) * B.dim(2), A.dim(3) * B.dim(3));
}
/*
cerr << "C=" << C.Debug(0) << endl;
cerr << "A=" << A.Debug(0) << endl;
cerr << "B=" << B.Debug(0) << endl;
cerr << "transB=" << transB << endl;
cerr << m << " " << n << " " << k << endl;
cerr << lda << " " << ldb << " " << ldc << endl;
cerr << endl;
*/
bool transA = false;
hipblasOperation_t opA = transA ? HIPBLAS_OP_T : HIPBLAS_OP_N;
hipblasOperation_t opB = transB ? HIPBLAS_OP_T : HIPBLAS_OP_N;
HANDLE_ERROR_CUBLAS(hipblasSgemm(handle, opB, opA,
n, m, k,
&alpha,
B.data(), ldb,
A.data(), lda,
&beta,
C.data(), ldc));
PAUSE_TIMER("Prod");
return C;
}
Tensor& Prod(Tensor& C, const Tensor& A, const Tensor& B,
bool transB) {
//std::cerr << "1C=" << C.Debug() << std::endl;
//std::cerr << "1A=" << A.Debug() << std::endl;
//std::cerr << "1B=" << B.Debug() << std::endl;
Tensor &ret = Prod(CublasHandler::GetHandle(), C, A, B, transB);
//std::cerr << "2C=" << C.Debug() << std::endl;
return ret;
}
__global__ void gSoftMax(TensorWrapper<float> out,
const VectorWrapper<unsigned> batchIdsWrap,
const VectorWrapper<unsigned> sentenceLengthsWrap)
{
extern __shared__ float _share[];
unsigned numHypos = out.dim(0);
unsigned maxLength = out.dim(1);
int hypoInd = blockIdx.x;
int origSrcPos = threadIdx.x;
while (hypoInd < numHypos) {
unsigned batch = batchIdsWrap[hypoInd];
unsigned length = sentenceLengthsWrap[batch];
VectorWrapper<float> _max(_share, blockDim.x);
if (origSrcPos < length) {
_max[origSrcPos] = out(hypoInd, origSrcPos);
}
else {
_max[origSrcPos] = LOWEST_FLOAT;
}
for (int tid = 0; tid < length; tid += blockDim.x) {
int srcPos = tid + origSrcPos;
if (srcPos < length) {
float value = out(hypoInd, srcPos);
int batch = batchIdsWrap[hypoInd];
value *= srcPos < sentenceLengthsWrap[batch] ? 1 : 0;
if (value > _max[origSrcPos]) {
_max[origSrcPos] = value;
}
}
}
int len = blockDim.x;
while (len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if (origSrcPos < (len >> 1)) {
if(_max[origSrcPos + skip] > _max[origSrcPos])
_max[origSrcPos] = _max[origSrcPos + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
float max = _max[0];
__syncthreads();
//float* _sum = _share;// + blockDim.x;
VectorWrapper<float> _sum(_share, blockDim.x);
_sum[origSrcPos] = 0.0f;
for (int tid = 0; tid < maxLength; tid += blockDim.x) {
int srcPos = tid + origSrcPos;
if (srcPos < maxLength) {
out(hypoInd, srcPos) = __expf(out(hypoInd, srcPos) - max);
out(hypoInd, srcPos) *= srcPos < sentenceLengthsWrap[batch] ? 1 : 0; // sentencesMappingWrap(srcPos, batch, 0, 0);
_sum[origSrcPos] += out(hypoInd, srcPos);
}
}
__syncthreads();
len = blockDim.x;
while (len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if (origSrcPos < (len >> 1)) {
_sum[origSrcPos] += _sum[origSrcPos + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
for (int tid = 0; tid < maxLength; tid += blockDim.x) {
int srcPos = tid + origSrcPos;
if (srcPos < maxLength) {
out(hypoInd, srcPos) /= _sum[0];
}
}
__syncthreads();
hypoInd += gridDim.x;
}
}
Tensor& Softmax(Tensor& Out,
const mblas::Vector<unsigned>& batchIds,
const mblas::Vector<unsigned> &sentenceLengths)
{
unsigned numHypos = Out.dim(0);
unsigned maxLength = Out.dim(1);
TensorWrapper<float> outWrap(Out);
const VectorWrapper<unsigned> batchIdsWrap(batchIds);
const VectorWrapper<unsigned> sentenceLengthsWrap(sentenceLengths);
int blocks = ::min(MAX_BLOCKS, (int)numHypos);
int threads = ::min(MAX_THREADS, (int)maxLength);
int shared = sizeof(float) * threads;
/*
std::cerr << "Out=" << Out.Debug(2) << std::endl;
std::cerr << "batchIds=" << batchIds.Debug(1) << std::endl;
std::cerr << "sentenceLengths=" << sentenceLengths.Debug(1) << std::endl;
std::cerr << "blocks=" << blocks << std::endl;
std::cerr << "threads=" << threads << std::endl;
*/
hipLaunchKernelGGL(( gSoftMax), dim3(blocks), dim3(threads), shared, CudaStreamHandler::GetStream(),
outWrap, batchIdsWrap, sentenceLengthsWrap);
HANDLE_ERROR(hipGetLastError());
/*
std::cerr << "Out=" << Out.Debug(2) << std::endl;
std::cerr << std::endl;
*/
return Out;
}
__global__ void gLogSoftMax(TensorWrapper<float> out, unsigned shareSize)
{
extern __shared__ float _share[];
unsigned rows = out.dim(0);
unsigned cols = out.dim(1);
int rowIdx = blockIdx.x;
while (rowIdx < rows) {
//float* _max = _share;
VectorWrapper<float> _max(_share, shareSize);
_max[threadIdx.x] = out(rowIdx, threadIdx.x);
for (int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if (id < cols) {
const float &val = out(rowIdx, id);
if (val > _max[threadIdx.x]) {
_max[threadIdx.x] = val;
}
}
}
int len = blockDim.x;
while (len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if (threadIdx.x < (len >> 1)) {
if(_max[threadIdx.x + skip] > _max[threadIdx.x])
_max[threadIdx.x] = _max[threadIdx.x + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
float max = _max[0];
__syncthreads();
//float* _sum = _share;// + blockDim.x;
VectorWrapper<float> _sum(_share, shareSize);
_sum[threadIdx.x] = 0.0f;
for (int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if (id < cols) {
//row[id] = exp(row[id] - max);
float &val = out(rowIdx, id);
val = __expf(val - max);
_sum[threadIdx.x] += val;
}
}
len = blockDim.x;
while (len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if (threadIdx.x < (len >> 1)) {
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
for (int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if (id < cols) {
//row[id] = log(row[id]/_sum[0]);
float &val = out(rowIdx, id);
val = __logf(val /_sum[0]);
}
}
__syncthreads();
rowIdx += gridDim.x;
}
}
Tensor& LogSoftmax(Tensor& Out)
{
TensorWrapper<float> outWrap(Out);
int blocks = ::min(MAX_BLOCKS, (int)Out.dim(0));
int threads = ::min(MAX_THREADS, (int)Out.dim(1));
int shared = sizeof(float) * threads;
hipLaunchKernelGGL(( gLogSoftMax), dim3(blocks), dim3(threads), shared, CudaStreamHandler::GetStream(),
Out, threads);
HANDLE_ERROR(hipGetLastError());
return Out;
}
__global__ void gSetColumn(TensorWrapper<float> in, int noColumn, float value) {
int n_rows = in.dim(0);
int rowNumber = threadIdx.x + blockDim.x * blockIdx.x;
if (rowNumber < n_rows) {
in(rowNumber, noColumn) = value;
}
}
void SetColumn(Tensor& In, int noColumn, float value) {
int nRows = In.dim(0);
int nBlocks = nRows / MAX_THREADS + ((nRows % MAX_THREADS == 0) ? 0 : 1);
int nThreads = ::min(MAX_THREADS, nRows);
TensorWrapper<float> inWrap(In);
hipLaunchKernelGGL(( gSetColumn), dim3(nBlocks), dim3(nThreads), 0, mblas::CudaStreamHandler::GetStream(),
inWrap, noColumn, value);
HANDLE_ERROR(hipGetLastError());
}
__global__ void gFill(TensorWrapper<float> in, float val) {
int index = threadIdx.x + blockDim.x * blockIdx.x;
if (index < in.size()) {
in[index] = val;
}
}
void Fill(Tensor& In, float value) {
unsigned size = In.size();
if (value) {
int nThreads = ::min(MAX_THREADS, (int)size);
int nBlocks = (size / nThreads) + ((size % nThreads == 0) ? 0 : 1);
TensorWrapper<float> inWrap(In);
hipLaunchKernelGGL(( gFill), dim3(nBlocks), dim3(nThreads), 0, CudaStreamHandler::GetStream(),
inWrap, value);
HANDLE_ERROR(hipGetLastError());
}
else {
HANDLE_ERROR(hipMemsetAsync(In.data(), 0, size * sizeof(float), CudaStreamHandler::GetStream()));
}
}
__global__
void gMapMatrix(TensorWrapper<float> in,
const VectorWrapper<unsigned> sentenceLengthsWrap,
int i)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < in.size()) {
int numCols = in.dim(1);
int batchIdx = tid / numCols;
int col = tid % numCols;
//in[tid] *= mappingWrap(i, batchIdx, 0, 0);
in(batchIdx, col) *= (i < sentenceLengthsWrap[batchIdx] ? 1 : 0);
}
}
void MapMatrix(Tensor& state,
const mblas::Vector<unsigned> &sentenceLengths,
unsigned i)
{
// blank out rows in the state matrix where the word position i does not exist
// mapping is a concatenated array of 1 & 0 of each sentence in the batch to say whether word exists or not.
int batchSize = state.dim(0);
int stateLength = state.dim(1);
int numThreads = ::min((int)state.size(), MAX_THREADS);
int numBlocks = (state.size() / numThreads) + ((state.size() % numThreads == 0) ? 0 : 1);
TensorWrapper<float> stateWrap(state);
VectorWrapper<unsigned> sentenceLengthsWrap(sentenceLengths);
hipLaunchKernelGGL(( gMapMatrix), dim3(numBlocks), dim3(numThreads), 0, CudaStreamHandler::GetStream(),
stateWrap, sentenceLengthsWrap, i);
HANDLE_ERROR(hipGetLastError());
/*
cerr << "nBlocks=" << numBlocks << endl;
cerr << "nThreads=" << numThreads << endl;
cerr << "stateWrap=" << stateWrap.Debug() << endl;
cerr << "mapping=" << Debug(mapping, 2) << endl;
cerr << "i=" << i << endl;
cerr << std::endl;
HANDLE_ERROR(hipDeviceSynchronize());
*/
}
__device__ unsigned getIndex(const dim3 &dim, const dim3 &val)
{
unsigned ret = dim.x * val.x + dim.y * val.y + dim.z * val.z;
return ret;
}
__global__ void gLNormalization(TensorWrapper<float> out,
const TensorWrapper<float> in,
const TensorWrapper<float> alphaWrap,
const TensorWrapper<float> betaWrap,
float eps=0.00001)
{
extern __shared__ float _share[];
//printf("blockDim.x=%d gridDim.x=%d \n", blockDim.x, gridDim.x);
// blockDim.x=512 gridDim.x=1
int cols = in.dim(1);
assert(blockIdx.x < in.dim(0));
assert(blockIdx.y < in.dim(2));
assert(blockIdx.z < in.dim(3));
float* _sum = _share + blockDim.x;
_sum[threadIdx.x] = 0.0f;
for (int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if (id < cols) {
_sum[threadIdx.x] += in(blockIdx.x, id, blockIdx.y, blockIdx.z);
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if (threadIdx.x < (len >> 1)) {
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
float mean = _sum[0] / cols;
__syncthreads();
float* _sqSum = _share + blockDim.x;
_sqSum[threadIdx.x] = 0.0;
for (int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float ex = in(blockIdx.x, id, blockIdx.y, blockIdx.z) - mean;
out(blockIdx.x, id, blockIdx.y, blockIdx.z) = ex;
_sqSum[threadIdx.x] += ex * ex;
}
}
__syncthreads();
len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sqSum[threadIdx.x] += _sqSum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
float sigma = sqrtf(eps + (_sqSum[0] / cols));
__syncthreads();
for (int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float &val = out(blockIdx.x, id, blockIdx.y, blockIdx.z);
if (betaWrap.size()) {
val = alphaWrap[id] * (val / sigma) + betaWrap[id];
} else {
val = alphaWrap[id] * (val / sigma);
}
}
}
}
void Normalization(Tensor &out,
const Tensor &in,
const Tensor &alpha,
const Tensor *beta,
float eps)
{
assert(in.dim(0) < MAX_BLOCKS);
assert(in.dim(2) < MAX_BLOCKS);
assert(in.dim(3) < MAX_BLOCKS);
//out.Reshape(in.dim(0), in.dim(1), in.dim(2), in.dim(3));
int numThreads = ::min((unsigned) in.dim(1), (unsigned) MAX_THREADS);
dim3 numBlocks(in.dim(0), in.dim(2), in.dim(3));
int shared = numThreads * sizeof(float) * 2;
TensorWrapper<float> outWrap(out);
const TensorWrapper<float> inWrap(in);
const TensorWrapper<float> alphaWrap(alpha);
TensorWrapper<float> *betaWrap = beta ? new TensorWrapper<float>(*beta) : new TensorWrapper<float>();
hipLaunchKernelGGL(( gLNormalization), dim3(numBlocks), dim3(numThreads), shared, CudaStreamHandler::GetStream(),
outWrap, inWrap, alphaWrap, *betaWrap, eps);
HANDLE_ERROR(hipGetLastError());
/*
//std::cerr << "nBlocks=" << numBlocks << std::endl;
std::cerr << "nThreads=" << numThreads << std::endl;
std::cerr << "outWrap=" << outWrap.Debug() << std::endl;
std::cerr << "inWrap=" << inWrap.Debug() << std::endl;
std::cerr << "alphaWrap=" << alphaWrap.Debug() << std::endl;
std::cerr << "betaWrap=" << betaWrap->Debug() << std::endl;
std::cerr << std::endl;
HANDLE_ERROR(hipDeviceSynchronize());
*/
delete betaWrap;
}
void Normalization(Tensor& out, const Tensor& in, const Tensor& alpha, const Tensor& beta,
float eps)
{
Normalization(out, in, alpha, &beta, eps);
}
void Normalization(Tensor& out, const Tensor& in, const Tensor& alpha, float eps)
{
Normalization(out, in, alpha, nullptr, eps);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////
__global__
void gBeamSizeInit(VectorWrapper<unsigned> hypo2BeamSizeWrap,
VectorWrapper<unsigned> batch2HypoWrap,
VectorWrapper<unsigned> hypo2CandidateWrap,
bool isFirst,
unsigned beamSizeSum,
const VectorWrapper<unsigned> beamSizesWrap)
{
unsigned hypoInd = 0;
unsigned candidateInd = 0;
unsigned a = 0, b = 0;
//printf("beamSizesWrap.size()=%u \n", beamSizesWrap.size());
for (unsigned batchInd = 0; batchInd < beamSizesWrap.size(); ++batchInd) {
unsigned beamSize = beamSizesWrap[batchInd];
/*
printf("batchInd=%u ", batchInd);
printf("beamSize=%u ", beamSize);
printf("a=%u ", a);
printf("b=%u \n", b);
*/
if (beamSize) {
if (isFirst) {
assert(a < hypo2BeamSizeWrap.size());
assert(a < hypo2CandidateWrap.size());
hypo2BeamSizeWrap[a] = beamSize;
hypo2CandidateWrap[a] = candidateInd;
++a;
assert(b < batch2HypoWrap.size());
batch2HypoWrap[b] = batchInd;
++b;
candidateInd += beamSize;
}
else {
for (unsigned j = 0; j < beamSize; ++j) {
assert(a < hypo2BeamSizeWrap.size());
assert(a < hypo2CandidateWrap.size());
hypo2BeamSizeWrap[a] = beamSize;
hypo2CandidateWrap[a] = candidateInd;
++a;
candidateInd += beamSize;
}
assert(b < batch2HypoWrap.size());
batch2HypoWrap[b] = hypoInd;
++b;
}
hypoInd += beamSize;
}
}
}
__device__
float GetMaxScore(const TensorWrapper<NthOutBatch> &nBestMatrix)
{
float ret = LOWEST_FLOAT;
for (unsigned i = 0; i < nBestMatrix.dim(1); ++i) {
const NthOutBatch &curr = nBestMatrix[i];
if (curr.score > ret) {
ret = curr.score;
}
}
return ret;
}
__device__
void AddElement(float &minScore,
unsigned &i,
VectorWrapper<NthOutBatch> &vec,
bool forbidUNK,
unsigned vocabInd,
const NthOutBatch &ele)
{
const float score = ele.score;
if (forbidUNK && vocabInd == UNK_ID) {
vec[i].score = LOWEST_FLOAT;
minScore = LOWEST_FLOAT;
}
else {
vec[i] = ele;
if (score < minScore) {
minScore = score;
}
++i;
}
}
__device__
void MergeElement(float &minScore,
VectorWrapper<NthOutBatch> &vec,
unsigned arrSize,
const NthOutBatch &ele)
{
float newMinScore = HIGHEST_FLOAT;
bool found = false;
for (unsigned i = 0; i < arrSize; ++i) {
NthOutBatch &currEle = vec[i];
if (!found && minScore == currEle.score) {
currEle = ele;
found = true;
}
// update min score
if (currEle.score < newMinScore) {
newMinScore = currEle.score;
}
}
minScore = newMinScore;
}
__device__
void MergeElement(float &minScore,
VectorWrapper<NthOutBatch> &vec,
unsigned arrSize,
const NthOutBatch &ele,
bool forbidUNK,
unsigned vocabInd)
{
if (forbidUNK && vocabInd == UNK_ID) {
// do nothing
}
else if (ele.score > minScore) {
// replace element with min score
MergeElement(minScore, vec, arrSize, ele);
/*
printf("arrInd=%d ind=%d vocabId=%d \n",
arrInd,
_max[threadIdx.x].ind,
_max[threadIdx.x].vocabId);
*/
}
}
__device__
void MaxAndSum(float &max, float &tot, const float &val)
{
if (val > max) {
float delta = max - val; // val - max; // TODO see LogSoftmaxFn
tot *= __expf(delta);
max = val;
tot += 1; // exp(val - max) = exp(0) = 1
}
else {
tot += __expf(val - max);
}
}
__device__
void NBestAndMaxAndSum(VectorWrapper<NthOutBatch> &nBestCandidatesWrap,
TensorWrapper<NthOutBatch> &nBestMatrix,
VectorWrapper<float> &max,
VectorWrapper<float> &sum,
const TensorWrapper<float> &in,
const TensorWrapper<float> &b4Wrap,
const unsigned hypoInd,
const unsigned maxBeamSize,
const bool forbidUNK,
const VectorWrapper<unsigned> &hypo2BeamSizeWrap,
const VectorWrapper<unsigned> &hypo2CandidateWrap,
bool requireProb)
{
assert(max.size() == blockDim.x);
assert(sum.size() == blockDim.x);
VectorWrapper<NthOutBatch> row = nBestMatrix.Row(threadIdx.x);
unsigned vocabSize = in.dim(1);
assert(hypoInd < hypo2BeamSizeWrap.size());
unsigned beamSize = hypo2BeamSizeWrap[hypoInd];
float minScore = HIGHEST_FLOAT;
// init
unsigned vocabInd = threadIdx.x;
max[threadIdx.x] = LOWEST_FLOAT;
sum[threadIdx.x] = 0.0f;
unsigned i = 0;
while (vocabInd < vocabSize && i < beamSize) {
const float score = in(hypoInd, vocabInd) + b4Wrap(0, vocabInd);
unsigned arrInd = hypoInd * vocabSize + vocabInd;
NthOutBatch ele(arrInd, score, hypoInd, vocabInd);
AddElement(minScore, i, row, forbidUNK, vocabInd, ele);
// max & sum
if (requireProb) {
MaxAndSum(max[threadIdx.x], sum[threadIdx.x], score);
}
vocabInd += blockDim.x;
}
// MAIN LOOP
while (vocabInd < vocabSize) {
const float score = in(hypoInd, vocabInd) + b4Wrap(0, vocabInd);
unsigned arrInd = hypoInd * vocabSize + vocabInd;
NthOutBatch ele(arrInd, score, hypoInd, vocabInd);
MergeElement(minScore, row, beamSize, ele, forbidUNK, vocabInd);
// max & sum
MaxAndSum(max[threadIdx.x], sum[threadIdx.x], score);
vocabInd += blockDim.x;
} // while (vocabInd < vocabSize) {
// merge nbest from different threads
int len = blockDim.x;
while (len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if (threadIdx.x < (len >> 1)) {
for (unsigned i = 0; i < beamSize; ++i) {
const NthOutBatch &ele = nBestMatrix(threadIdx.x + skip, i);
if (ele.score > minScore) {
MergeElement(minScore, row, beamSize, ele);
}
}
}
len = (len + 1) >> 1;
}
__syncthreads();
if (threadIdx.x == 0) {
// copy to output array
assert(hypoInd < hypo2CandidateWrap.size());
unsigned candidateInd = hypo2CandidateWrap[hypoInd];
for (unsigned i = 0; i < beamSize; ++i) {
const NthOutBatch &curr = nBestMatrix(0, i);
//printf("vocabInd=%u \n", best.vocabInd);
assert(candidateInd + i < nBestCandidatesWrap.size());
nBestCandidatesWrap[candidateInd + i] = curr;
}
}
// top score and sum
if (requireProb) {
unsigned size = max.size();
unsigned len = (size + 1) >> 1;
//printf("size=%i %i \n", size, len);
unsigned ind = threadIdx.x;
float &max0 = max[ind];
float &sum0 = sum[ind];
while (len) {
__syncthreads();
//printf("size=%i %i \n", size, len);
unsigned otherInd = ind + len;
if (otherInd < size) {
const float &maxOther = max[otherInd];
const float &sumOther = sum[otherInd];
if (max0 > maxOther) {
float delta = maxOther - max0;
sum0 = sum0 + __expf(delta) * sumOther;
}
else {
float delta = max0 - maxOther;
sum0 = __expf(delta) * sum0 + sumOther;
max0 = maxOther;
}
}
size = len;
len = (len > 1) ? (len + 1) >> 1 : 0;
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////
__device__
void dLogSoftMax(VectorWrapper<NthOutBatch> &nBestCandidatesWrap,
const TensorWrapper<float> &in,
const TensorWrapper<float> &b4Wrap,
const unsigned hypoInd,
const unsigned maxBeamSize,
const float topScore,
const float sumExp,
const VectorWrapper<unsigned> &hypo2BeamSizeWrap,
const VectorWrapper<unsigned> &hypo2CandidateWrap)
{
unsigned vocabSize = in.dim(1);
// apply partition and log to top
if (threadIdx.x == 0) {
//printf("sum=%f \n", sum[0]);
//printf("val=%f %f \n", in(rowIdx, ele.vocabId, 0, 0), val);
// nbest
unsigned beamSize = hypo2BeamSizeWrap[hypoInd];
unsigned startPos = hypo2CandidateWrap[hypoInd];
for (unsigned i = 0; i < beamSize; ++i) {
NthOutBatch &ele = nBestCandidatesWrap[startPos + i];
float &val = ele.score;
val = __expf(val - topScore);
val = __logf(val /sumExp);
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void gLogSoftMax(VectorWrapper<NthOutBatch> nBestCandidatesWrap,
const TensorWrapper<float> in,
const TensorWrapper<float> b4Wrap,
unsigned maxBeamSize,
bool forbidUNK,
const VectorWrapper<unsigned> hypo2BeamSizeWrap,
const VectorWrapper<unsigned> hypo2CandidateWrap,
bool requireProb)
{
extern __shared__ char _sharePtr[];
void *ptrOffset = _sharePtr;
VectorWrapper<float> max((float*)ptrOffset, blockDim.x);
ptrOffset = _sharePtr + sizeof(float) * blockDim.x;
VectorWrapper<float> sum((float*)ptrOffset, blockDim.x);
ptrOffset = _sharePtr + 2 * sizeof(float) * blockDim.x;
TensorWrapper<NthOutBatch> nBestMatrix((NthOutBatch*)ptrOffset, blockDim.x, maxBeamSize, 1, 1);
unsigned hypos = in.dim(0);
unsigned vocabSize = in.dim(1);
unsigned hypoInd = blockIdx.x; // index of previous hypo
while (hypoInd < hypos) {
NBestAndMaxAndSum(nBestCandidatesWrap,
nBestMatrix,
max,
sum,
in,
b4Wrap,
hypoInd,
maxBeamSize,
forbidUNK,
hypo2BeamSizeWrap,
hypo2CandidateWrap,
requireProb);
__syncthreads();
if (requireProb) {
const float topScore = max[0];
const float sumExp = sum[0];
dLogSoftMax(nBestCandidatesWrap,
in,
b4Wrap,
hypoInd,
maxBeamSize,
topScore,
sumExp,
hypo2BeamSizeWrap,
hypo2CandidateWrap);
}
__syncthreads();
hypoInd += gridDim.x;
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void gNBestPerBatch(VectorWrapper<NthOutBatch> nBestWrap,
VectorWrapper<NthOutBatch> nBestCandidatesWrap,
const TensorWrapper<float> in,
const VectorWrapper<float> costsWrap,
unsigned maxBeamSize,
bool forbidUNK,
bool isFirst,
const VectorWrapper<unsigned> hypo2BeamSizeWrap,
const VectorWrapper<unsigned> batch2HypoWrap,
const VectorWrapper<unsigned> hypo2CandidateWrap)
{
//unsigned rows = in.dim(0);
unsigned batchSize = batch2HypoWrap.size();
unsigned batchInd = blockIdx.x;
while (batchInd < batchSize) {
assert(batchInd < batch2HypoWrap.size());
assert(batchInd < hypo2BeamSizeWrap.size());
assert(batchInd < nBestWrap.size());
unsigned hypoInd = batch2HypoWrap[batchInd];
unsigned beamSize = hypo2BeamSizeWrap[hypoInd];
assert(beamSize);
unsigned nextHypoInd;
if (isFirst) {
nextHypoInd = batchInd * beamSize;
}
else {
nextHypoInd = hypoInd;
}
// candiate from 1st hypo
float minScore = HIGHEST_FLOAT;
assert(hypoInd < hypo2CandidateWrap.size());
unsigned candidateInd = hypo2CandidateWrap[hypoInd];
for (unsigned i = 0; i < beamSize; ++i) {
float prevCost;
if (isFirst) {
assert(batchInd < costsWrap.size());
prevCost = costsWrap[batchInd];
}
else {
//printf("prevHypoInd=%, candidateInd=%d \n", prevHypoInd, candidateInd);
assert(hypoInd < costsWrap.size());
prevCost = costsWrap[hypoInd];
}
assert((nextHypoInd + i) < nBestWrap.size());
assert(candidateInd + i < nBestCandidatesWrap.size());
nBestWrap[nextHypoInd + i] = nBestCandidatesWrap[candidateInd + i];
float &score = nBestWrap[nextHypoInd + i].score;
score += prevCost;
if (score < minScore) {
minScore = score;
}
}
// candidates from other previous hypos
if (!isFirst) {
assert(nextHypoInd < nBestWrap.size());
VectorWrapper<NthOutBatch> offset = nBestWrap.Offset(nextHypoInd);
for (unsigned hypoOffset = 1; hypoOffset < beamSize; ++hypoOffset) {
//printf("hypoInd=%d \n", (hypoInd + hypoOffset));
//printf("prevHypoInd=%, candidateInd=%d \n", prevHypoInd, candidateInd);
assert((hypoInd + hypoOffset) < costsWrap.size());
float prevCost = costsWrap[hypoInd + hypoOffset];
assert((hypoInd + hypoOffset) < hypo2CandidateWrap.size());
unsigned candidateInd = hypo2CandidateWrap[hypoInd + hypoOffset];
for (unsigned candidateOffset = 0; candidateOffset < beamSize; ++candidateOffset) {
assert((candidateInd + candidateOffset) < nBestCandidatesWrap.size());
NthOutBatch &candidate = nBestCandidatesWrap[candidateInd + candidateOffset];
candidate.score += prevCost;
assert(nextHypoInd < nBestWrap.size());
NthOutBatch *arr = &nBestWrap[nextHypoInd];
if (candidate.score > minScore) {
MergeElement(minScore, offset, beamSize, candidate);
}
}
}
}
batchInd += gridDim.x;
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////
void LogSoftmaxAndNBest(mblas::Vector<NthOutBatch> &nBest,
const Tensor& in,
const Tensor& b4,
const mblas::Vector<float> &costs,
bool forbidUNK,
unsigned maxBeamSize,
const std::vector<unsigned>& beamSizes,
unsigned beamSizeSum,
bool isFirst,
bool requireProb)
{
//BEGIN_TIMER("LogSoftmax excl kernels");
bool safe = (maxBeamSize * MAX_THREADS) < in.dim(1);
if (!safe) {
cerr << "The target vocab size looks too small for the fused softmax function. If you experience a crash, add '--use-fused-softmax false' when running amun" << endl;
}
//cerr << "in=" << in.Debug(0) << endl;
//cerr << "beamSizes=" << beamSizes.size() << endl;
// create beam size vectors on GPU but exclude empty beams
unsigned batchSize = 0;
unsigned candidateInd = 0;
for (unsigned batchInd = 0; batchInd < beamSizes.size(); ++batchInd) {
unsigned beamSize = beamSizes[batchInd];
//cerr << "(" << beamSize << "," << hypoInd << ") ";
if (beamSize) {
if (isFirst) {
candidateInd += beamSize;
}
else {
candidateInd += beamSize * beamSize;
}
++batchSize;
}
}
mblas::Vector<unsigned> d_beamSizes(beamSizes);
mblas::Vector<unsigned> hypo2BeamSize(in.dim(0));
mblas::Vector<unsigned> hypo2Candidate(in.dim(0));
mblas::Vector<unsigned> batch2Hypo(batchSize);
mblas::Vector<NthOutBatch> nBestCandidates(candidateInd);
/*
cerr << "in=" << in.Debug(1) << endl;
cerr << "beamSizes=" << beamSizes.size() << endl;
cerr << "beamSizeSum=" << beamSizeSum << endl;
cerr << "batchSize=" << batchSize << endl;
cerr << "candidateInd=" << candidateInd << endl;
cerr << "hypo2BeamSize=" << hypo2BeamSize.Debug(0) << endl;
cerr << "hypo2Candidate=" << hypo2Candidate.Debug(0) << endl;
cerr << "batch2Hypo=" << batch2Hypo.Debug(0) << endl;
cerr << "nBest=" << nBest.Debug(0) << endl;
cerr << "nBestCandidates=" << nBestCandidates.Debug(0) << endl;
cerr << endl;
*/
TensorWrapper<float> inWrap(in);
TensorWrapper<float> b4Wrap(b4);
VectorWrapper<unsigned> hypo2BeamSizeWrap(hypo2BeamSize);
VectorWrapper<unsigned> hypo2CandidateWrap(hypo2Candidate);
VectorWrapper<unsigned> batch2HypoWrap(batch2Hypo);
VectorWrapper<NthOutBatch> nBestWrap(nBest);
VectorWrapper<NthOutBatch> nBestCandidatesWrap(nBestCandidates);
VectorWrapper<float> costsWrap(costs);
VectorWrapper<unsigned> beamSizesWrap(d_beamSizes);
//PAUSE_TIMER("LogSoftmax excl kernels");
//HANDLE_ERROR( hipStreamSynchronize(mblas::CudaStreamHandler::GetStream()));
//BEGIN_TIMER("gBeamSizeInit");
hipLaunchKernelGGL(( gBeamSizeInit), dim3(1), dim3(1), 0, CudaStreamHandler::GetStream(),
hypo2BeamSizeWrap,
batch2HypoWrap,
hypo2CandidateWrap,
isFirst,
beamSizeSum,
beamSizesWrap
);
HANDLE_ERROR(hipGetLastError());
//PAUSE_TIMER("gBeamSizeInit");
/*
cerr << "hypo2BeamSize=" << Debug(hypo2BeamSize, 2) << endl;
cerr << "hypo2Candidate=" << Debug(hypo2Candidate, 2) << endl;
cerr << "batch2Hypo=" << Debug(batch2Hypo, 2) << endl;
cerr << endl;
*/
//HANDLE_ERROR( hipStreamSynchronize(mblas::CudaStreamHandler::GetStream()));
unsigned blocks = ::min((unsigned) MAX_BLOCKS, in.dim(0));
unsigned threads = ::min((unsigned)MAX_THREADS, in.dim(1));
unsigned shared = sizeof(NthOutBatch) * threads * maxBeamSize
+ 2 * sizeof(float) * threads;
//BEGIN_TIMER("gLogSoftMax");
hipLaunchKernelGGL(( gLogSoftMax), dim3(blocks), dim3(threads), shared, CudaStreamHandler::GetStream(),
nBestCandidatesWrap,
inWrap,
b4Wrap,
maxBeamSize,
forbidUNK,
hypo2BeamSizeWrap,
hypo2CandidateWrap,
requireProb);
HANDLE_ERROR(hipGetLastError());
//PAUSE_TIMER("gLogSoftMax");
//HANDLE_ERROR( hipStreamSynchronize(mblas::CudaStreamHandler::GetStream()));
blocks = ::min((unsigned) MAX_BLOCKS, batchSize);
//BEGIN_TIMER("gNBestPerBatch");
hipLaunchKernelGGL(( gNBestPerBatch), dim3(blocks), dim3(1), 0, CudaStreamHandler::GetStream(),
nBestWrap,
nBestCandidatesWrap,
inWrap,
costsWrap,
maxBeamSize,
forbidUNK,
isFirst,
hypo2BeamSizeWrap,
batch2HypoWrap,
hypo2CandidateWrap);
HANDLE_ERROR(hipGetLastError());
//PAUSE_TIMER("gNBestPerBatch");
//HANDLE_ERROR( hipStreamSynchronize(mblas::CudaStreamHandler::GetStream()));
//cerr << "3costs=" << Debug(costs, 0) << endl;
}
void TestMemCpy()
{
using namespace std;
cerr << "Starting" << endl;
unsigned NUM = 10;
vector<float> h_vec1(NUM);
for (unsigned i = 0; i < NUM; ++i) {
h_vec1[i] = i * 3;
}
TestMemCpy(NUM, h_vec1.data());
cerr << "Finished" << endl;
}
} // namespace mblas
} // namespace GPU
} // namespace amunmt
| f2b50111d874e798550fa365302f384b5f52ce42.cu | #include "gpu/mblas/tensor_functions.h"
#include "gpu/mblas/handles.h"
using namespace std;
namespace amunmt {
namespace GPU {
namespace mblas {
thread_local CudaStreamHandler CudaStreamHandler::instance_;
thread_local CublasHandler CublasHandler::instance_;
Tensor& Swap(Tensor& Out, Tensor& In) {
Out.swap(In);
return Out;
}
__global__ void gMean(TensorWrapper<float> out,
const TensorWrapper<float> in,
const VectorWrapper<unsigned> sentenceLengths)
{
// out = batches * states
// in = max sentence length * states * 1 * batches
// mapping = max length * batches
int id = threadIdx.x + blockIdx.x * blockDim.x;
//printf("id = %d in = %lu %lu %lu %lu = %lu %lu \n", id, in.dim(0), in.dim(1), in.dim(2), in.dim(3), in.size(), sizeof(in));
if (id < out.size()) {
unsigned indices[SHAPE_SIZE];
out.id2Indices(id, indices);
//printf("%d -> %lu %lu %lu %lu \n", id, indices[0], indices[1], indices[2], indices[3]);
unsigned batch = indices[0];
unsigned state = indices[1];
float sum = 0.0f;
int counter = 0;
for (unsigned row = 0; row < in.dim(0); ++row) {
bool isWord = row < sentenceLengths[batch];
//printf("batch=%lu startMapInd=%lu mapOffset=%lu -> %d \n", batch, startMapInd, mapOffset, isWord);
if (isWord) {
sum += in(row, state, 0, batch);
++counter;
}
}
sum /= (float) counter;
out[id] = sum;
}
}
void Mean(Tensor& Out,
const Tensor& In,
const mblas::Vector<unsigned> &sentenceLengths)
{
assert(Out.dim(2) == 1);
assert(Out.dim(3) == 1);
assert(Out.dim(0) == In.dim(3));
assert(Out.dim(1) == In.dim(1));
// mean of each ROW
unsigned batchNum = Out.dim(0) * Out.dim(2) * Out.dim(3);
unsigned stateLength = Out.dim(1);
unsigned sentenceLength = (In.dim(0) * In.dim(2) * In.dim(3)) / batchNum;
TensorWrapper<float> outWrap(Out);
TensorWrapper<float> inWrap(In);
//cerr << "outWrap=" << outWrap.Debug() << endl;
VectorWrapper<unsigned> sentenceLengthsWrap(sentenceLengths);
unsigned size = outWrap.size();
unsigned threads = std::min((unsigned)MAX_THREADS, size);
unsigned blocks = (size / threads) + ((size % threads == 0) ? 0 : 1);
gMean<<<blocks, threads, 0, CudaStreamHandler::GetStream()>>>
(outWrap, inWrap, sentenceLengthsWrap);
HANDLE_ERROR(cudaGetLastError());
}
__global__ void gWeightedMean(TensorWrapper<float> out,
const TensorWrapper<float> weights,
const TensorWrapper<float> in,
const VectorWrapper<unsigned> mapping
)
{
int numHypos = weights.dim(0);
int states = in.dim(1);
int srcLen = weights.dim(1);
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < numHypos * states) {
int hypoInd = id / states;
int batchInd = mapping[hypoInd];
int stateInd = id % states;
//printf("hypoInd=%d batchInd=%d stateInd=%d \n", hypoInd, batchInd, stateInd);
float sum = 0.0f;
for (unsigned i = 0; i < srcLen; ++i) {
sum += weights(hypoInd, i) * in(i, stateInd, 0, batchInd);
}
out[id] = sum;
}
}
void WeightedMean(Tensor& Out,const Tensor& Weights, const Tensor& In, const mblas::Vector<unsigned>& mapping)
{
int numHypos = Weights.dim(0);
int states = In.dim(1);
Out.NewSize(numHypos, states);
TensorWrapper<float> outWrap(Out);
TensorWrapper<float> weightsWrap(Weights);
TensorWrapper<float> inWrap(In);
VectorWrapper<unsigned> mappingWrap(mapping);
unsigned size = Out.size();
unsigned nThreads = std::min((unsigned) MAX_THREADS, (unsigned)size);
unsigned nBlocks = (size / nThreads) + ((size % nThreads == 0) ? 0 : 1);
gWeightedMean<<<nBlocks, nThreads, 0, CudaStreamHandler::GetStream()>>>
(outWrap, weightsWrap, inWrap, mappingWrap);
HANDLE_ERROR(cudaGetLastError());
/*
cerr << "nBlocks=" << nBlocks << endl;
cerr << "Out=" << outWrap.Debug() << endl;
cerr << "Weights=" << weightsWrap.Debug() << endl;
cerr << "In=" << inWrap.Debug() << endl;
cerr << "mapping=" << mapping.size() << endl;
for (unsigned i = 0; i < mapping.size(); ++i) {
cerr << mapping[i] << " ";
}
cerr << endl << endl;
*/
}
Tensor& Transpose(Tensor& Out, const Tensor& In) {
unsigned m = In.dim(0);
unsigned n = In.dim(1);
Out.NewSize(n, m);
float alpha = 1.0;
float beta = 0.0;
HANDLE_ERROR_CUBLAS(cublasSgeam(CublasHandler::GetHandle(), CUBLAS_OP_T, CUBLAS_OP_T, m, n, &alpha, In.data(), n,
&beta, In.data(), n, Out.data(), m));
return Out;
}
Tensor& Transpose(Tensor& Out) {
thread_local Tensor Temp;
Transpose(Temp, Out);
Swap(Out, Temp);
return Out;
}
Tensor& Concat(Tensor& Out, const Tensor& In) {
unsigned oldSize = Out.size();
Out.Resize(Out.dim(0) + In.dim(0), Out.dim(1));
mblas::copy(In.data(), In.size(), Out.data() + oldSize, cudaMemcpyDeviceToDevice);
return Out;
}
Tensor& Copy(Tensor& Out, const Tensor& In) {
Out.NewSize(In.dim(0), In.dim(1), In.dim(2), In.dim(3));
mblas::copy(In.data(), In.size(), Out.data(), cudaMemcpyDeviceToDevice);
return Out;
}
__global__ void gPasteRows(TensorWrapper<float> out,
const TensorWrapper<float> in,
int rowNo, int colNo)
{
int inRows = in.dim(0);
int inCols = in.dim(1);
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < inRows * inCols) {
int outCols = out.dim(1);
int inRow = id / inCols;
int inCol = id % inCols;
//out[outID] = in[id];
out(rowNo, inCol + colNo, 0, inRow) = in(inRow, inCol);
}
}
void PasteRows(Tensor& Out, const Tensor& In, const unsigned rowNo, unsigned colNo)
{
TensorWrapper<float> outWrap(Out);
TensorWrapper<float> inWrap(In);
unsigned size = In.size();
unsigned nThreads = std::min((unsigned) MAX_THREADS, (unsigned)size);
unsigned nBlocks = (size / nThreads) + ((size % nThreads == 0) ? 0 : 1);
gPasteRows<<<nBlocks, nThreads, 0, CudaStreamHandler::GetStream()>>>
(outWrap, inWrap, rowNo, colNo);
HANDLE_ERROR(cudaGetLastError());
}
Tensor& PasteRow(Tensor& Out,
const Tensor& In,
const unsigned r, const unsigned c)
{
unsigned start = r * Out.dim(1) + c;
mblas::copy(In.data(), In.size(), Out.data() + start, cudaMemcpyDeviceToDevice);
return Out;
}
Tensor& CopyRow(Tensor& Out,
const Tensor& In,
const unsigned r, const unsigned c) {
unsigned length = In.dim(1) - c;
Out.NewSize(1, length);
unsigned start = r * In.dim(1) + c;
//unsigned end = start + length;
//mblas::copy(In.begin() + start, In.begin() + end, Out.begin());
mblas::copy(In.data() + start, length , Out.data(), cudaMemcpyDeviceToDevice);
return Out;
}
__global__ void gCopyRows(TensorWrapper<float> out,
const TensorWrapper<float> in,
const VectorWrapper<unsigned> indicesWrap)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < out.size()) {
unsigned dim[SHAPE_SIZE];
out.id2Indices(id, dim);
unsigned indicesInd = dim[0];
unsigned inRow =indicesWrap[indicesInd];
out(indicesInd, dim[1]) = in(inRow, dim[1]);
}
}
Tensor& CopyRows(Tensor& Out,
const Tensor& In,
const mblas::Vector<unsigned>& indices)
{
assert(In.dim(1) == Out.dim(1));
assert(Out.dim(0) == indices.size());
assert(In.dim(2) == 1);
assert(In.dim(3) == 1);
assert(Out.dim(2) == 1);
assert(Out.dim(3) == 1);
/*
cerr << "Out=" << Out.Debug(0) << endl;
cerr << "In=" << In.Debug(0) << endl;
cerr << "indices=" << Debug(indices, 2) << endl;
cerr << endl;
*/
unsigned size = Out.size();
unsigned numPairs = indices.size();
TensorWrapper<float> outWrap(Out);
const TensorWrapper<float> inWrap(In);
const VectorWrapper<unsigned> indicesWrap(indices);
//cerr << "size=" << size << endl;
unsigned threads = std::min((unsigned) MAX_THREADS, (unsigned)size);
unsigned blocks = size / threads + ((size % threads == 0) ? 0 : 1);
gCopyRows<<<blocks, threads, 0, CudaStreamHandler::GetStream()>>>
(outWrap, inWrap, indicesWrap);
HANDLE_ERROR(cudaGetLastError());
return Out;
}
Tensor& Assemble(Tensor& Out,
const Tensor& In,
const mblas::Vector<unsigned>& indices) {
Out.NewSize(indices.size(), In.dim(1));
//cerr << "Assemble=" << Out.Debug() << " " << In.Debug() << indices.size() << endl;
CopyRows(Out, In, indices);
return Out;
}
__global__ void gSlice(TensorWrapper<float> out,
const TensorWrapper<float> in,
unsigned n, unsigned dim)
{
unsigned row = blockIdx.x;
unsigned inCol = threadIdx.x + dim * n;
unsigned outCol = threadIdx.x;
while (outCol < out.dim(1)) {
out(row, outCol) = in(row, inCol);
inCol += blockDim.x;
outCol += blockDim.x;
}
}
Tensor& Slice(Tensor& Out,
const Tensor& In,
unsigned n, unsigned dim)
{
assert(In.dim(2) == 1);
assert(In.dim(3) == 1);
Out.NewSize(In.dim(0), dim);
TensorWrapper<float> outWrap(Out);
const TensorWrapper<float> inWrap(In);
/*
cerr << "outWrap=" << outWrap.Debug() << endl;
cerr << "inWrap=" << inWrap.Debug() << endl;
cerr << "n=" << n << endl;
cerr << "dim=" << dim << endl;
cerr << endl;
*/
unsigned threads = std::min((unsigned)MAX_THREADS, (unsigned)dim);
unsigned blocks = In.dim(0);
gSlice<<<blocks, threads, 0, CudaStreamHandler::GetStream()>>>
(outWrap, inWrap, n, dim);
HANDLE_ERROR(cudaGetLastError());
return Out;
}
Tensor& Prod(cublasHandle_t handle, Tensor& C, const Tensor& A, const Tensor& B, bool transB)
{
BEGIN_TIMER("Prod");
assert((A.dim(2) == A.dim(3) == 1) || (B.dim(2) == B.dim(3) == 1));
Tensor::value_type alpha = 1.0;
Tensor::value_type beta = 0.0;
unsigned m = A.dim(0) * A.dim(2) * A.dim(3);
unsigned k = A.dim(1);
unsigned mOut = A.dim(0);
unsigned kOut = A.dim(1);
/*
if(transA) {
std::swap(m, k);
std::swap(mOut, kOut);
}
*/
unsigned l = B.dim(0) * B.dim(2) * B.dim(3);
unsigned n = B.dim(1);
unsigned lOut = B.dim(0);
unsigned nOut = B.dim(1);
if(transB) {
std::swap(l, n);
std::swap(lOut, nOut);
}
assert(k == l);
unsigned lda = A.dim(1);
unsigned ldb = B.dim(1);
unsigned ldc = transB ? B.dim(0) * B.dim(2) * B.dim(3) : B.dim(1);
unsigned dim2 = A.dim(2);
if (transB) {
// for GetAlignedSourceContext()
assert((A.dim(2) == A.dim(3) == 1));
C.NewSize(B.dim(2), nOut, 1, 1);
}
else {
C.NewSize(mOut, nOut, A.dim(2) * B.dim(2), A.dim(3) * B.dim(3));
}
/*
cerr << "C=" << C.Debug(0) << endl;
cerr << "A=" << A.Debug(0) << endl;
cerr << "B=" << B.Debug(0) << endl;
cerr << "transB=" << transB << endl;
cerr << m << " " << n << " " << k << endl;
cerr << lda << " " << ldb << " " << ldc << endl;
cerr << endl;
*/
bool transA = false;
cublasOperation_t opA = transA ? CUBLAS_OP_T : CUBLAS_OP_N;
cublasOperation_t opB = transB ? CUBLAS_OP_T : CUBLAS_OP_N;
HANDLE_ERROR_CUBLAS(cublasSgemm(handle, opB, opA,
n, m, k,
&alpha,
B.data(), ldb,
A.data(), lda,
&beta,
C.data(), ldc));
PAUSE_TIMER("Prod");
return C;
}
Tensor& Prod(Tensor& C, const Tensor& A, const Tensor& B,
bool transB) {
//std::cerr << "1C=" << C.Debug() << std::endl;
//std::cerr << "1A=" << A.Debug() << std::endl;
//std::cerr << "1B=" << B.Debug() << std::endl;
Tensor &ret = Prod(CublasHandler::GetHandle(), C, A, B, transB);
//std::cerr << "2C=" << C.Debug() << std::endl;
return ret;
}
__global__ void gSoftMax(TensorWrapper<float> out,
const VectorWrapper<unsigned> batchIdsWrap,
const VectorWrapper<unsigned> sentenceLengthsWrap)
{
extern __shared__ float _share[];
unsigned numHypos = out.dim(0);
unsigned maxLength = out.dim(1);
int hypoInd = blockIdx.x;
int origSrcPos = threadIdx.x;
while (hypoInd < numHypos) {
unsigned batch = batchIdsWrap[hypoInd];
unsigned length = sentenceLengthsWrap[batch];
VectorWrapper<float> _max(_share, blockDim.x);
if (origSrcPos < length) {
_max[origSrcPos] = out(hypoInd, origSrcPos);
}
else {
_max[origSrcPos] = LOWEST_FLOAT;
}
for (int tid = 0; tid < length; tid += blockDim.x) {
int srcPos = tid + origSrcPos;
if (srcPos < length) {
float value = out(hypoInd, srcPos);
int batch = batchIdsWrap[hypoInd];
value *= srcPos < sentenceLengthsWrap[batch] ? 1 : 0;
if (value > _max[origSrcPos]) {
_max[origSrcPos] = value;
}
}
}
int len = blockDim.x;
while (len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if (origSrcPos < (len >> 1)) {
if(_max[origSrcPos + skip] > _max[origSrcPos])
_max[origSrcPos] = _max[origSrcPos + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
float max = _max[0];
__syncthreads();
//float* _sum = _share;// + blockDim.x;
VectorWrapper<float> _sum(_share, blockDim.x);
_sum[origSrcPos] = 0.0f;
for (int tid = 0; tid < maxLength; tid += blockDim.x) {
int srcPos = tid + origSrcPos;
if (srcPos < maxLength) {
out(hypoInd, srcPos) = __expf(out(hypoInd, srcPos) - max);
out(hypoInd, srcPos) *= srcPos < sentenceLengthsWrap[batch] ? 1 : 0; // sentencesMappingWrap(srcPos, batch, 0, 0);
_sum[origSrcPos] += out(hypoInd, srcPos);
}
}
__syncthreads();
len = blockDim.x;
while (len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if (origSrcPos < (len >> 1)) {
_sum[origSrcPos] += _sum[origSrcPos + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
for (int tid = 0; tid < maxLength; tid += blockDim.x) {
int srcPos = tid + origSrcPos;
if (srcPos < maxLength) {
out(hypoInd, srcPos) /= _sum[0];
}
}
__syncthreads();
hypoInd += gridDim.x;
}
}
Tensor& Softmax(Tensor& Out,
const mblas::Vector<unsigned>& batchIds,
const mblas::Vector<unsigned> &sentenceLengths)
{
unsigned numHypos = Out.dim(0);
unsigned maxLength = Out.dim(1);
TensorWrapper<float> outWrap(Out);
const VectorWrapper<unsigned> batchIdsWrap(batchIds);
const VectorWrapper<unsigned> sentenceLengthsWrap(sentenceLengths);
int blocks = std::min(MAX_BLOCKS, (int)numHypos);
int threads = std::min(MAX_THREADS, (int)maxLength);
int shared = sizeof(float) * threads;
/*
std::cerr << "Out=" << Out.Debug(2) << std::endl;
std::cerr << "batchIds=" << batchIds.Debug(1) << std::endl;
std::cerr << "sentenceLengths=" << sentenceLengths.Debug(1) << std::endl;
std::cerr << "blocks=" << blocks << std::endl;
std::cerr << "threads=" << threads << std::endl;
*/
gSoftMax<<<blocks, threads, shared, CudaStreamHandler::GetStream()>>>
(outWrap, batchIdsWrap, sentenceLengthsWrap);
HANDLE_ERROR(cudaGetLastError());
/*
std::cerr << "Out=" << Out.Debug(2) << std::endl;
std::cerr << std::endl;
*/
return Out;
}
__global__ void gLogSoftMax(TensorWrapper<float> out, unsigned shareSize)
{
extern __shared__ float _share[];
unsigned rows = out.dim(0);
unsigned cols = out.dim(1);
int rowIdx = blockIdx.x;
while (rowIdx < rows) {
//float* _max = _share;
VectorWrapper<float> _max(_share, shareSize);
_max[threadIdx.x] = out(rowIdx, threadIdx.x);
for (int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if (id < cols) {
const float &val = out(rowIdx, id);
if (val > _max[threadIdx.x]) {
_max[threadIdx.x] = val;
}
}
}
int len = blockDim.x;
while (len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if (threadIdx.x < (len >> 1)) {
if(_max[threadIdx.x + skip] > _max[threadIdx.x])
_max[threadIdx.x] = _max[threadIdx.x + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
float max = _max[0];
__syncthreads();
//float* _sum = _share;// + blockDim.x;
VectorWrapper<float> _sum(_share, shareSize);
_sum[threadIdx.x] = 0.0f;
for (int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if (id < cols) {
//row[id] = exp(row[id] - max);
float &val = out(rowIdx, id);
val = __expf(val - max);
_sum[threadIdx.x] += val;
}
}
len = blockDim.x;
while (len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if (threadIdx.x < (len >> 1)) {
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
for (int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if (id < cols) {
//row[id] = log(row[id]/_sum[0]);
float &val = out(rowIdx, id);
val = __logf(val /_sum[0]);
}
}
__syncthreads();
rowIdx += gridDim.x;
}
}
Tensor& LogSoftmax(Tensor& Out)
{
TensorWrapper<float> outWrap(Out);
int blocks = std::min(MAX_BLOCKS, (int)Out.dim(0));
int threads = std::min(MAX_THREADS, (int)Out.dim(1));
int shared = sizeof(float) * threads;
gLogSoftMax<<<blocks, threads, shared, CudaStreamHandler::GetStream()>>>
(Out, threads);
HANDLE_ERROR(cudaGetLastError());
return Out;
}
__global__ void gSetColumn(TensorWrapper<float> in, int noColumn, float value) {
int n_rows = in.dim(0);
int rowNumber = threadIdx.x + blockDim.x * blockIdx.x;
if (rowNumber < n_rows) {
in(rowNumber, noColumn) = value;
}
}
void SetColumn(Tensor& In, int noColumn, float value) {
int nRows = In.dim(0);
int nBlocks = nRows / MAX_THREADS + ((nRows % MAX_THREADS == 0) ? 0 : 1);
int nThreads = std::min(MAX_THREADS, nRows);
TensorWrapper<float> inWrap(In);
gSetColumn<<<nBlocks, nThreads, 0, mblas::CudaStreamHandler::GetStream()>>>
(inWrap, noColumn, value);
HANDLE_ERROR(cudaGetLastError());
}
__global__ void gFill(TensorWrapper<float> in, float val) {
int index = threadIdx.x + blockDim.x * blockIdx.x;
if (index < in.size()) {
in[index] = val;
}
}
void Fill(Tensor& In, float value) {
unsigned size = In.size();
if (value) {
int nThreads = std::min(MAX_THREADS, (int)size);
int nBlocks = (size / nThreads) + ((size % nThreads == 0) ? 0 : 1);
TensorWrapper<float> inWrap(In);
gFill<<<nBlocks, nThreads, 0, CudaStreamHandler::GetStream()>>>
(inWrap, value);
HANDLE_ERROR(cudaGetLastError());
}
else {
HANDLE_ERROR(cudaMemsetAsync(In.data(), 0, size * sizeof(float), CudaStreamHandler::GetStream()));
}
}
__global__
void gMapMatrix(TensorWrapper<float> in,
const VectorWrapper<unsigned> sentenceLengthsWrap,
int i)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < in.size()) {
int numCols = in.dim(1);
int batchIdx = tid / numCols;
int col = tid % numCols;
//in[tid] *= mappingWrap(i, batchIdx, 0, 0);
in(batchIdx, col) *= (i < sentenceLengthsWrap[batchIdx] ? 1 : 0);
}
}
void MapMatrix(Tensor& state,
const mblas::Vector<unsigned> &sentenceLengths,
unsigned i)
{
// blank out rows in the state matrix where the word position i does not exist
// mapping is a concatenated array of 1 & 0 of each sentence in the batch to say whether word exists or not.
int batchSize = state.dim(0);
int stateLength = state.dim(1);
int numThreads = std::min((int)state.size(), MAX_THREADS);
int numBlocks = (state.size() / numThreads) + ((state.size() % numThreads == 0) ? 0 : 1);
TensorWrapper<float> stateWrap(state);
VectorWrapper<unsigned> sentenceLengthsWrap(sentenceLengths);
gMapMatrix<<<numBlocks, numThreads, 0, CudaStreamHandler::GetStream()>>>
(stateWrap, sentenceLengthsWrap, i);
HANDLE_ERROR(cudaGetLastError());
/*
cerr << "nBlocks=" << numBlocks << endl;
cerr << "nThreads=" << numThreads << endl;
cerr << "stateWrap=" << stateWrap.Debug() << endl;
cerr << "mapping=" << Debug(mapping, 2) << endl;
cerr << "i=" << i << endl;
cerr << std::endl;
HANDLE_ERROR(cudaDeviceSynchronize());
*/
}
__device__ unsigned getIndex(const dim3 &dim, const dim3 &val)
{
unsigned ret = dim.x * val.x + dim.y * val.y + dim.z * val.z;
return ret;
}
__global__ void gLNormalization(TensorWrapper<float> out,
const TensorWrapper<float> in,
const TensorWrapper<float> alphaWrap,
const TensorWrapper<float> betaWrap,
float eps=0.00001)
{
extern __shared__ float _share[];
//printf("blockDim.x=%d gridDim.x=%d \n", blockDim.x, gridDim.x);
// blockDim.x=512 gridDim.x=1
int cols = in.dim(1);
assert(blockIdx.x < in.dim(0));
assert(blockIdx.y < in.dim(2));
assert(blockIdx.z < in.dim(3));
float* _sum = _share + blockDim.x;
_sum[threadIdx.x] = 0.0f;
for (int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if (id < cols) {
_sum[threadIdx.x] += in(blockIdx.x, id, blockIdx.y, blockIdx.z);
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if (threadIdx.x < (len >> 1)) {
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
float mean = _sum[0] / cols;
__syncthreads();
float* _sqSum = _share + blockDim.x;
_sqSum[threadIdx.x] = 0.0;
for (int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float ex = in(blockIdx.x, id, blockIdx.y, blockIdx.z) - mean;
out(blockIdx.x, id, blockIdx.y, blockIdx.z) = ex;
_sqSum[threadIdx.x] += ex * ex;
}
}
__syncthreads();
len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sqSum[threadIdx.x] += _sqSum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
float sigma = sqrtf(eps + (_sqSum[0] / cols));
__syncthreads();
for (int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float &val = out(blockIdx.x, id, blockIdx.y, blockIdx.z);
if (betaWrap.size()) {
val = alphaWrap[id] * (val / sigma) + betaWrap[id];
} else {
val = alphaWrap[id] * (val / sigma);
}
}
}
}
void Normalization(Tensor &out,
const Tensor &in,
const Tensor &alpha,
const Tensor *beta,
float eps)
{
assert(in.dim(0) < MAX_BLOCKS);
assert(in.dim(2) < MAX_BLOCKS);
assert(in.dim(3) < MAX_BLOCKS);
//out.Reshape(in.dim(0), in.dim(1), in.dim(2), in.dim(3));
int numThreads = std::min((unsigned) in.dim(1), (unsigned) MAX_THREADS);
dim3 numBlocks(in.dim(0), in.dim(2), in.dim(3));
int shared = numThreads * sizeof(float) * 2;
TensorWrapper<float> outWrap(out);
const TensorWrapper<float> inWrap(in);
const TensorWrapper<float> alphaWrap(alpha);
TensorWrapper<float> *betaWrap = beta ? new TensorWrapper<float>(*beta) : new TensorWrapper<float>();
gLNormalization<<<numBlocks, numThreads, shared, CudaStreamHandler::GetStream()>>>
(outWrap, inWrap, alphaWrap, *betaWrap, eps);
HANDLE_ERROR(cudaGetLastError());
/*
//std::cerr << "nBlocks=" << numBlocks << std::endl;
std::cerr << "nThreads=" << numThreads << std::endl;
std::cerr << "outWrap=" << outWrap.Debug() << std::endl;
std::cerr << "inWrap=" << inWrap.Debug() << std::endl;
std::cerr << "alphaWrap=" << alphaWrap.Debug() << std::endl;
std::cerr << "betaWrap=" << betaWrap->Debug() << std::endl;
std::cerr << std::endl;
HANDLE_ERROR(cudaDeviceSynchronize());
*/
delete betaWrap;
}
void Normalization(Tensor& out, const Tensor& in, const Tensor& alpha, const Tensor& beta,
float eps)
{
Normalization(out, in, alpha, &beta, eps);
}
void Normalization(Tensor& out, const Tensor& in, const Tensor& alpha, float eps)
{
Normalization(out, in, alpha, nullptr, eps);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////
__global__
void gBeamSizeInit(VectorWrapper<unsigned> hypo2BeamSizeWrap,
VectorWrapper<unsigned> batch2HypoWrap,
VectorWrapper<unsigned> hypo2CandidateWrap,
bool isFirst,
unsigned beamSizeSum,
const VectorWrapper<unsigned> beamSizesWrap)
{
unsigned hypoInd = 0;
unsigned candidateInd = 0;
unsigned a = 0, b = 0;
//printf("beamSizesWrap.size()=%u \n", beamSizesWrap.size());
for (unsigned batchInd = 0; batchInd < beamSizesWrap.size(); ++batchInd) {
unsigned beamSize = beamSizesWrap[batchInd];
/*
printf("batchInd=%u ", batchInd);
printf("beamSize=%u ", beamSize);
printf("a=%u ", a);
printf("b=%u \n", b);
*/
if (beamSize) {
if (isFirst) {
assert(a < hypo2BeamSizeWrap.size());
assert(a < hypo2CandidateWrap.size());
hypo2BeamSizeWrap[a] = beamSize;
hypo2CandidateWrap[a] = candidateInd;
++a;
assert(b < batch2HypoWrap.size());
batch2HypoWrap[b] = batchInd;
++b;
candidateInd += beamSize;
}
else {
for (unsigned j = 0; j < beamSize; ++j) {
assert(a < hypo2BeamSizeWrap.size());
assert(a < hypo2CandidateWrap.size());
hypo2BeamSizeWrap[a] = beamSize;
hypo2CandidateWrap[a] = candidateInd;
++a;
candidateInd += beamSize;
}
assert(b < batch2HypoWrap.size());
batch2HypoWrap[b] = hypoInd;
++b;
}
hypoInd += beamSize;
}
}
}
__device__
float GetMaxScore(const TensorWrapper<NthOutBatch> &nBestMatrix)
{
float ret = LOWEST_FLOAT;
for (unsigned i = 0; i < nBestMatrix.dim(1); ++i) {
const NthOutBatch &curr = nBestMatrix[i];
if (curr.score > ret) {
ret = curr.score;
}
}
return ret;
}
__device__
void AddElement(float &minScore,
unsigned &i,
VectorWrapper<NthOutBatch> &vec,
bool forbidUNK,
unsigned vocabInd,
const NthOutBatch &ele)
{
const float score = ele.score;
if (forbidUNK && vocabInd == UNK_ID) {
vec[i].score = LOWEST_FLOAT;
minScore = LOWEST_FLOAT;
}
else {
vec[i] = ele;
if (score < minScore) {
minScore = score;
}
++i;
}
}
__device__
void MergeElement(float &minScore,
VectorWrapper<NthOutBatch> &vec,
unsigned arrSize,
const NthOutBatch &ele)
{
float newMinScore = HIGHEST_FLOAT;
bool found = false;
for (unsigned i = 0; i < arrSize; ++i) {
NthOutBatch &currEle = vec[i];
if (!found && minScore == currEle.score) {
currEle = ele;
found = true;
}
// update min score
if (currEle.score < newMinScore) {
newMinScore = currEle.score;
}
}
minScore = newMinScore;
}
__device__
void MergeElement(float &minScore,
VectorWrapper<NthOutBatch> &vec,
unsigned arrSize,
const NthOutBatch &ele,
bool forbidUNK,
unsigned vocabInd)
{
if (forbidUNK && vocabInd == UNK_ID) {
// do nothing
}
else if (ele.score > minScore) {
// replace element with min score
MergeElement(minScore, vec, arrSize, ele);
/*
printf("arrInd=%d ind=%d vocabId=%d \n",
arrInd,
_max[threadIdx.x].ind,
_max[threadIdx.x].vocabId);
*/
}
}
__device__
void MaxAndSum(float &max, float &tot, const float &val)
{
if (val > max) {
float delta = max - val; // val - max; // TODO see LogSoftmaxFn
tot *= __expf(delta);
max = val;
tot += 1; // exp(val - max) = exp(0) = 1
}
else {
tot += __expf(val - max);
}
}
__device__
void NBestAndMaxAndSum(VectorWrapper<NthOutBatch> &nBestCandidatesWrap,
TensorWrapper<NthOutBatch> &nBestMatrix,
VectorWrapper<float> &max,
VectorWrapper<float> &sum,
const TensorWrapper<float> &in,
const TensorWrapper<float> &b4Wrap,
const unsigned hypoInd,
const unsigned maxBeamSize,
const bool forbidUNK,
const VectorWrapper<unsigned> &hypo2BeamSizeWrap,
const VectorWrapper<unsigned> &hypo2CandidateWrap,
bool requireProb)
{
assert(max.size() == blockDim.x);
assert(sum.size() == blockDim.x);
VectorWrapper<NthOutBatch> row = nBestMatrix.Row(threadIdx.x);
unsigned vocabSize = in.dim(1);
assert(hypoInd < hypo2BeamSizeWrap.size());
unsigned beamSize = hypo2BeamSizeWrap[hypoInd];
float minScore = HIGHEST_FLOAT;
// init
unsigned vocabInd = threadIdx.x;
max[threadIdx.x] = LOWEST_FLOAT;
sum[threadIdx.x] = 0.0f;
unsigned i = 0;
while (vocabInd < vocabSize && i < beamSize) {
const float score = in(hypoInd, vocabInd) + b4Wrap(0, vocabInd);
unsigned arrInd = hypoInd * vocabSize + vocabInd;
NthOutBatch ele(arrInd, score, hypoInd, vocabInd);
AddElement(minScore, i, row, forbidUNK, vocabInd, ele);
// max & sum
if (requireProb) {
MaxAndSum(max[threadIdx.x], sum[threadIdx.x], score);
}
vocabInd += blockDim.x;
}
// MAIN LOOP
while (vocabInd < vocabSize) {
const float score = in(hypoInd, vocabInd) + b4Wrap(0, vocabInd);
unsigned arrInd = hypoInd * vocabSize + vocabInd;
NthOutBatch ele(arrInd, score, hypoInd, vocabInd);
MergeElement(minScore, row, beamSize, ele, forbidUNK, vocabInd);
// max & sum
MaxAndSum(max[threadIdx.x], sum[threadIdx.x], score);
vocabInd += blockDim.x;
} // while (vocabInd < vocabSize) {
// merge nbest from different threads
int len = blockDim.x;
while (len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if (threadIdx.x < (len >> 1)) {
for (unsigned i = 0; i < beamSize; ++i) {
const NthOutBatch &ele = nBestMatrix(threadIdx.x + skip, i);
if (ele.score > minScore) {
MergeElement(minScore, row, beamSize, ele);
}
}
}
len = (len + 1) >> 1;
}
__syncthreads();
if (threadIdx.x == 0) {
// copy to output array
assert(hypoInd < hypo2CandidateWrap.size());
unsigned candidateInd = hypo2CandidateWrap[hypoInd];
for (unsigned i = 0; i < beamSize; ++i) {
const NthOutBatch &curr = nBestMatrix(0, i);
//printf("vocabInd=%u \n", best.vocabInd);
assert(candidateInd + i < nBestCandidatesWrap.size());
nBestCandidatesWrap[candidateInd + i] = curr;
}
}
// top score and sum
if (requireProb) {
unsigned size = max.size();
unsigned len = (size + 1) >> 1;
//printf("size=%i %i \n", size, len);
unsigned ind = threadIdx.x;
float &max0 = max[ind];
float &sum0 = sum[ind];
while (len) {
__syncthreads();
//printf("size=%i %i \n", size, len);
unsigned otherInd = ind + len;
if (otherInd < size) {
const float &maxOther = max[otherInd];
const float &sumOther = sum[otherInd];
if (max0 > maxOther) {
float delta = maxOther - max0;
sum0 = sum0 + __expf(delta) * sumOther;
}
else {
float delta = max0 - maxOther;
sum0 = __expf(delta) * sum0 + sumOther;
max0 = maxOther;
}
}
size = len;
len = (len > 1) ? (len + 1) >> 1 : 0;
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////
__device__
void dLogSoftMax(VectorWrapper<NthOutBatch> &nBestCandidatesWrap,
const TensorWrapper<float> &in,
const TensorWrapper<float> &b4Wrap,
const unsigned hypoInd,
const unsigned maxBeamSize,
const float topScore,
const float sumExp,
const VectorWrapper<unsigned> &hypo2BeamSizeWrap,
const VectorWrapper<unsigned> &hypo2CandidateWrap)
{
unsigned vocabSize = in.dim(1);
// apply partition and log to top
if (threadIdx.x == 0) {
//printf("sum=%f \n", sum[0]);
//printf("val=%f %f \n", in(rowIdx, ele.vocabId, 0, 0), val);
// nbest
unsigned beamSize = hypo2BeamSizeWrap[hypoInd];
unsigned startPos = hypo2CandidateWrap[hypoInd];
for (unsigned i = 0; i < beamSize; ++i) {
NthOutBatch &ele = nBestCandidatesWrap[startPos + i];
float &val = ele.score;
val = __expf(val - topScore);
val = __logf(val /sumExp);
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void gLogSoftMax(VectorWrapper<NthOutBatch> nBestCandidatesWrap,
const TensorWrapper<float> in,
const TensorWrapper<float> b4Wrap,
unsigned maxBeamSize,
bool forbidUNK,
const VectorWrapper<unsigned> hypo2BeamSizeWrap,
const VectorWrapper<unsigned> hypo2CandidateWrap,
bool requireProb)
{
extern __shared__ char _sharePtr[];
void *ptrOffset = _sharePtr;
VectorWrapper<float> max((float*)ptrOffset, blockDim.x);
ptrOffset = _sharePtr + sizeof(float) * blockDim.x;
VectorWrapper<float> sum((float*)ptrOffset, blockDim.x);
ptrOffset = _sharePtr + 2 * sizeof(float) * blockDim.x;
TensorWrapper<NthOutBatch> nBestMatrix((NthOutBatch*)ptrOffset, blockDim.x, maxBeamSize, 1, 1);
unsigned hypos = in.dim(0);
unsigned vocabSize = in.dim(1);
unsigned hypoInd = blockIdx.x; // index of previous hypo
while (hypoInd < hypos) {
NBestAndMaxAndSum(nBestCandidatesWrap,
nBestMatrix,
max,
sum,
in,
b4Wrap,
hypoInd,
maxBeamSize,
forbidUNK,
hypo2BeamSizeWrap,
hypo2CandidateWrap,
requireProb);
__syncthreads();
if (requireProb) {
const float topScore = max[0];
const float sumExp = sum[0];
dLogSoftMax(nBestCandidatesWrap,
in,
b4Wrap,
hypoInd,
maxBeamSize,
topScore,
sumExp,
hypo2BeamSizeWrap,
hypo2CandidateWrap);
}
__syncthreads();
hypoInd += gridDim.x;
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void gNBestPerBatch(VectorWrapper<NthOutBatch> nBestWrap,
VectorWrapper<NthOutBatch> nBestCandidatesWrap,
const TensorWrapper<float> in,
const VectorWrapper<float> costsWrap,
unsigned maxBeamSize,
bool forbidUNK,
bool isFirst,
const VectorWrapper<unsigned> hypo2BeamSizeWrap,
const VectorWrapper<unsigned> batch2HypoWrap,
const VectorWrapper<unsigned> hypo2CandidateWrap)
{
//unsigned rows = in.dim(0);
unsigned batchSize = batch2HypoWrap.size();
unsigned batchInd = blockIdx.x;
while (batchInd < batchSize) {
assert(batchInd < batch2HypoWrap.size());
assert(batchInd < hypo2BeamSizeWrap.size());
assert(batchInd < nBestWrap.size());
unsigned hypoInd = batch2HypoWrap[batchInd];
unsigned beamSize = hypo2BeamSizeWrap[hypoInd];
assert(beamSize);
unsigned nextHypoInd;
if (isFirst) {
nextHypoInd = batchInd * beamSize;
}
else {
nextHypoInd = hypoInd;
}
// candiate from 1st hypo
float minScore = HIGHEST_FLOAT;
assert(hypoInd < hypo2CandidateWrap.size());
unsigned candidateInd = hypo2CandidateWrap[hypoInd];
for (unsigned i = 0; i < beamSize; ++i) {
float prevCost;
if (isFirst) {
assert(batchInd < costsWrap.size());
prevCost = costsWrap[batchInd];
}
else {
//printf("prevHypoInd=%, candidateInd=%d \n", prevHypoInd, candidateInd);
assert(hypoInd < costsWrap.size());
prevCost = costsWrap[hypoInd];
}
assert((nextHypoInd + i) < nBestWrap.size());
assert(candidateInd + i < nBestCandidatesWrap.size());
nBestWrap[nextHypoInd + i] = nBestCandidatesWrap[candidateInd + i];
float &score = nBestWrap[nextHypoInd + i].score;
score += prevCost;
if (score < minScore) {
minScore = score;
}
}
// candidates from other previous hypos
if (!isFirst) {
assert(nextHypoInd < nBestWrap.size());
VectorWrapper<NthOutBatch> offset = nBestWrap.Offset(nextHypoInd);
for (unsigned hypoOffset = 1; hypoOffset < beamSize; ++hypoOffset) {
//printf("hypoInd=%d \n", (hypoInd + hypoOffset));
//printf("prevHypoInd=%, candidateInd=%d \n", prevHypoInd, candidateInd);
assert((hypoInd + hypoOffset) < costsWrap.size());
float prevCost = costsWrap[hypoInd + hypoOffset];
assert((hypoInd + hypoOffset) < hypo2CandidateWrap.size());
unsigned candidateInd = hypo2CandidateWrap[hypoInd + hypoOffset];
for (unsigned candidateOffset = 0; candidateOffset < beamSize; ++candidateOffset) {
assert((candidateInd + candidateOffset) < nBestCandidatesWrap.size());
NthOutBatch &candidate = nBestCandidatesWrap[candidateInd + candidateOffset];
candidate.score += prevCost;
assert(nextHypoInd < nBestWrap.size());
NthOutBatch *arr = &nBestWrap[nextHypoInd];
if (candidate.score > minScore) {
MergeElement(minScore, offset, beamSize, candidate);
}
}
}
}
batchInd += gridDim.x;
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////
void LogSoftmaxAndNBest(mblas::Vector<NthOutBatch> &nBest,
const Tensor& in,
const Tensor& b4,
const mblas::Vector<float> &costs,
bool forbidUNK,
unsigned maxBeamSize,
const std::vector<unsigned>& beamSizes,
unsigned beamSizeSum,
bool isFirst,
bool requireProb)
{
//BEGIN_TIMER("LogSoftmax excl kernels");
bool safe = (maxBeamSize * MAX_THREADS) < in.dim(1);
if (!safe) {
cerr << "The target vocab size looks too small for the fused softmax function. If you experience a crash, add '--use-fused-softmax false' when running amun" << endl;
}
//cerr << "in=" << in.Debug(0) << endl;
//cerr << "beamSizes=" << beamSizes.size() << endl;
// create beam size vectors on GPU but exclude empty beams
unsigned batchSize = 0;
unsigned candidateInd = 0;
for (unsigned batchInd = 0; batchInd < beamSizes.size(); ++batchInd) {
unsigned beamSize = beamSizes[batchInd];
//cerr << "(" << beamSize << "," << hypoInd << ") ";
if (beamSize) {
if (isFirst) {
candidateInd += beamSize;
}
else {
candidateInd += beamSize * beamSize;
}
++batchSize;
}
}
mblas::Vector<unsigned> d_beamSizes(beamSizes);
mblas::Vector<unsigned> hypo2BeamSize(in.dim(0));
mblas::Vector<unsigned> hypo2Candidate(in.dim(0));
mblas::Vector<unsigned> batch2Hypo(batchSize);
mblas::Vector<NthOutBatch> nBestCandidates(candidateInd);
/*
cerr << "in=" << in.Debug(1) << endl;
cerr << "beamSizes=" << beamSizes.size() << endl;
cerr << "beamSizeSum=" << beamSizeSum << endl;
cerr << "batchSize=" << batchSize << endl;
cerr << "candidateInd=" << candidateInd << endl;
cerr << "hypo2BeamSize=" << hypo2BeamSize.Debug(0) << endl;
cerr << "hypo2Candidate=" << hypo2Candidate.Debug(0) << endl;
cerr << "batch2Hypo=" << batch2Hypo.Debug(0) << endl;
cerr << "nBest=" << nBest.Debug(0) << endl;
cerr << "nBestCandidates=" << nBestCandidates.Debug(0) << endl;
cerr << endl;
*/
TensorWrapper<float> inWrap(in);
TensorWrapper<float> b4Wrap(b4);
VectorWrapper<unsigned> hypo2BeamSizeWrap(hypo2BeamSize);
VectorWrapper<unsigned> hypo2CandidateWrap(hypo2Candidate);
VectorWrapper<unsigned> batch2HypoWrap(batch2Hypo);
VectorWrapper<NthOutBatch> nBestWrap(nBest);
VectorWrapper<NthOutBatch> nBestCandidatesWrap(nBestCandidates);
VectorWrapper<float> costsWrap(costs);
VectorWrapper<unsigned> beamSizesWrap(d_beamSizes);
//PAUSE_TIMER("LogSoftmax excl kernels");
//HANDLE_ERROR( cudaStreamSynchronize(mblas::CudaStreamHandler::GetStream()));
//BEGIN_TIMER("gBeamSizeInit");
gBeamSizeInit<<<1, 1, 0, CudaStreamHandler::GetStream()>>>
(hypo2BeamSizeWrap,
batch2HypoWrap,
hypo2CandidateWrap,
isFirst,
beamSizeSum,
beamSizesWrap
);
HANDLE_ERROR(cudaGetLastError());
//PAUSE_TIMER("gBeamSizeInit");
/*
cerr << "hypo2BeamSize=" << Debug(hypo2BeamSize, 2) << endl;
cerr << "hypo2Candidate=" << Debug(hypo2Candidate, 2) << endl;
cerr << "batch2Hypo=" << Debug(batch2Hypo, 2) << endl;
cerr << endl;
*/
//HANDLE_ERROR( cudaStreamSynchronize(mblas::CudaStreamHandler::GetStream()));
unsigned blocks = std::min((unsigned) MAX_BLOCKS, in.dim(0));
unsigned threads = std::min((unsigned)MAX_THREADS, in.dim(1));
unsigned shared = sizeof(NthOutBatch) * threads * maxBeamSize
+ 2 * sizeof(float) * threads;
//BEGIN_TIMER("gLogSoftMax");
gLogSoftMax<<<blocks, threads, shared, CudaStreamHandler::GetStream()>>>
(nBestCandidatesWrap,
inWrap,
b4Wrap,
maxBeamSize,
forbidUNK,
hypo2BeamSizeWrap,
hypo2CandidateWrap,
requireProb);
HANDLE_ERROR(cudaGetLastError());
//PAUSE_TIMER("gLogSoftMax");
//HANDLE_ERROR( cudaStreamSynchronize(mblas::CudaStreamHandler::GetStream()));
blocks = std::min((unsigned) MAX_BLOCKS, batchSize);
//BEGIN_TIMER("gNBestPerBatch");
gNBestPerBatch<<<blocks, 1, 0, CudaStreamHandler::GetStream()>>>
(nBestWrap,
nBestCandidatesWrap,
inWrap,
costsWrap,
maxBeamSize,
forbidUNK,
isFirst,
hypo2BeamSizeWrap,
batch2HypoWrap,
hypo2CandidateWrap);
HANDLE_ERROR(cudaGetLastError());
//PAUSE_TIMER("gNBestPerBatch");
//HANDLE_ERROR( cudaStreamSynchronize(mblas::CudaStreamHandler::GetStream()));
//cerr << "3costs=" << Debug(costs, 0) << endl;
}
void TestMemCpy()
{
using namespace std;
cerr << "Starting" << endl;
unsigned NUM = 10;
vector<float> h_vec1(NUM);
for (unsigned i = 0; i < NUM; ++i) {
h_vec1[i] = i * 3;
}
TestMemCpy(NUM, h_vec1.data());
cerr << "Finished" << endl;
}
} // namespace mblas
} // namespace GPU
} // namespace amunmt
|
ec878350e1c49cb8f52eaad3c56e88b9decde3cf.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <iostream>
#include <random>
#include <cuq.h>
#define THREADS_PER_BLOCK 64
using namespace std;
__global__
void vectorAdd(float * a, float * b, float * c, int iterations) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
for (int j = 0; j < iterations; j++)
c[i] = a[i] + b[i];
}
int done[8];
int SIZE = 16384;
//Define GPU task by inheriting from GPUTask
//In fact it should hold data for calculations and code for calculations defined in doWork() method
class VectorAddTask: public GPUTask {
public:
//constructor can be arbitrary
VectorAddTask(float * _h_a, float * _h_b, float * _h_c, int _iterations, int _id) {
id = _id;
iterations = _iterations;
h_a = _h_a;
h_b = _h_b;
h_c = _h_c;
}
//All GPU calculations should be done in this method
void doWork() {
int device;
hipGetDevice(&device);
cout << "Device: " << device << ", running task: " << id << ", iterations: " << iterations << endl;
hipMalloc(&d_a, SIZE * sizeof(float));
hipMemcpy(d_a, h_a, SIZE * sizeof(float), hipMemcpyHostToDevice);
hipMalloc(&d_b, SIZE * sizeof(float));
hipMemcpy(d_b, h_b, SIZE * sizeof(float), hipMemcpyHostToDevice);
hipMalloc(&d_c, SIZE * sizeof(float));
int blocksCount = (int)ceil((float)SIZE / THREADS_PER_BLOCK);
for (int i = 0; i < 1024; i++) {
hipLaunchKernelGGL(( vectorAdd), dim3(blocksCount),dim3(THREADS_PER_BLOCK), 0, 0, d_a, d_b, d_c, iterations);
}
hipMemcpy(h_c, d_c, SIZE * sizeof(float), hipMemcpyDeviceToHost);
cout << "Device: " << device << ", task: " << id << " finished" << ", iterations: " << iterations << endl;
//increase number of finished tasks
done[device] += 1;
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
}
//Destructor is empty in this case
~VectorAddTask() {
}
private:
int iterations;
int id;
float * h_a;
float * h_b;
float * h_c;
float * d_a;
float * d_b;
float * d_c;
};
int pow(int a, int b) {
int res = 1;
for (int i = 0; i < b; i++)
res *= a;
return res;
}
int main(int argc, char *argv[]) {
int devicesCount;
if (argc <= 1)
devicesCount = 1;
else
devicesCount = std::stoi(argv[1]);
cout << "cuq demo on " << devicesCount << " devices..." << endl;
int tasksCount = 4096;
float * h_a = new float[SIZE];
float * h_b = new float[SIZE];
float * h_c = new float[SIZE];
for (int i = 0; i < SIZE; i++) {
h_a[i] = i;
h_b[i] = i + 100500;
}
std::random_device rd;
std::mt19937 mt(rd());
std::uniform_int_distribution<int> dist (0,12);
GPUTask ** tasks = new GPUTask *[tasksCount];
for (int i = 0; i < tasksCount; i++) {
//randomize interations number of task
int randSize = SIZE / pow(2, dist(mt));
tasks[i] = new VectorAddTask(h_a, h_b, h_c, randSize, i);
}
for (int i = 0; i < 8; i++) {
done[i] = 0;
}
processTasks(tasks, tasksCount, devicesCount, /*resetDeviceAfterFinish =*/ true, /*deleteTasksAutomatically =*/ true);
//number of finished tasks per device should be more or less equal
for (int i = 0; i < devicesCount; i++) {
cout << "Device: " << i << ", done: " << done[i] << endl;
}
delete[] tasks;
delete[] h_a;
delete[] h_b;
delete[] h_c;
return 0;
}
| ec878350e1c49cb8f52eaad3c56e88b9decde3cf.cu | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <iostream>
#include <random>
#include <cuq.h>
#define THREADS_PER_BLOCK 64
using namespace std;
__global__
void vectorAdd(float * a, float * b, float * c, int iterations) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
for (int j = 0; j < iterations; j++)
c[i] = a[i] + b[i];
}
int done[8];
int SIZE = 16384;
//Define GPU task by inheriting from GPUTask
//In fact it should hold data for calculations and code for calculations defined in doWork() method
class VectorAddTask: public GPUTask {
public:
//constructor can be arbitrary
VectorAddTask(float * _h_a, float * _h_b, float * _h_c, int _iterations, int _id) {
id = _id;
iterations = _iterations;
h_a = _h_a;
h_b = _h_b;
h_c = _h_c;
}
//All GPU calculations should be done in this method
void doWork() {
int device;
cudaGetDevice(&device);
cout << "Device: " << device << ", running task: " << id << ", iterations: " << iterations << endl;
cudaMalloc(&d_a, SIZE * sizeof(float));
cudaMemcpy(d_a, h_a, SIZE * sizeof(float), cudaMemcpyHostToDevice);
cudaMalloc(&d_b, SIZE * sizeof(float));
cudaMemcpy(d_b, h_b, SIZE * sizeof(float), cudaMemcpyHostToDevice);
cudaMalloc(&d_c, SIZE * sizeof(float));
int blocksCount = (int)ceil((float)SIZE / THREADS_PER_BLOCK);
for (int i = 0; i < 1024; i++) {
vectorAdd<<<blocksCount,THREADS_PER_BLOCK>>>(d_a, d_b, d_c, iterations);
}
cudaMemcpy(h_c, d_c, SIZE * sizeof(float), cudaMemcpyDeviceToHost);
cout << "Device: " << device << ", task: " << id << " finished" << ", iterations: " << iterations << endl;
//increase number of finished tasks
done[device] += 1;
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
}
//Destructor is empty in this case
~VectorAddTask() {
}
private:
int iterations;
int id;
float * h_a;
float * h_b;
float * h_c;
float * d_a;
float * d_b;
float * d_c;
};
int pow(int a, int b) {
int res = 1;
for (int i = 0; i < b; i++)
res *= a;
return res;
}
int main(int argc, char *argv[]) {
int devicesCount;
if (argc <= 1)
devicesCount = 1;
else
devicesCount = std::stoi(argv[1]);
cout << "cuq demo on " << devicesCount << " devices..." << endl;
int tasksCount = 4096;
float * h_a = new float[SIZE];
float * h_b = new float[SIZE];
float * h_c = new float[SIZE];
for (int i = 0; i < SIZE; i++) {
h_a[i] = i;
h_b[i] = i + 100500;
}
std::random_device rd;
std::mt19937 mt(rd());
std::uniform_int_distribution<int> dist (0,12);
GPUTask ** tasks = new GPUTask *[tasksCount];
for (int i = 0; i < tasksCount; i++) {
//randomize interations number of task
int randSize = SIZE / pow(2, dist(mt));
tasks[i] = new VectorAddTask(h_a, h_b, h_c, randSize, i);
}
for (int i = 0; i < 8; i++) {
done[i] = 0;
}
processTasks(tasks, tasksCount, devicesCount, /*resetDeviceAfterFinish =*/ true, /*deleteTasksAutomatically =*/ true);
//number of finished tasks per device should be more or less equal
for (int i = 0; i < devicesCount; i++) {
cout << "Device: " << i << ", done: " << done[i] << endl;
}
delete[] tasks;
delete[] h_a;
delete[] h_b;
delete[] h_c;
return 0;
}
|
d93e55fd5b10b081baf93f5018424050c19e9c58.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright (c) 2010.
Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory.
LLNL-CODE-461231
All rights reserved.
This file is part of LULESH, Version 1.0.
Please also read this link -- http://www.opensource.org/licenses/index.php
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC,
THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Additional BSD Notice
1. This notice is required to be provided under our contract with the U.S.
Department of Energy (DOE). This work was produced at Lawrence Livermore
National Laboratory under Contract No. DE-AC52-07NA27344 with the DOE.
2. Neither the United States Government nor Lawrence Livermore National
Security, LLC nor any of their employees, makes any warranty, express
or implied, or assumes any liability or responsibility for the accuracy,
completeness, or usefulness of any information, apparatus, product, or
process disclosed, or represents that its use would not infringe
privately-owned rights.
3. Also, reference herein to any specific commercial products, process, or
services by trade name, trademark, manufacturer or otherwise does not
necessarily constitute or imply its endorsement, recommendation, or
favoring by the United States Government or Lawrence Livermore National
Security, LLC. The views and opinions of authors expressed herein do not
necessarily state or reflect those of the United States Government or
Lawrence Livermore National Security, LLC, and shall not be used for
advertising or product endorsement purposes.
*/
#include <vector>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include "cuPrintf_hip.cuh"
#include "cuPrintf.hip"
float k1,k2,k3,k4,k5;
#define LULESH_SHOW_PROGRESS 1
//#define DP_PROFILING
//#define DP_PROFILING_KERNEL1
//#define DP_PROFILING_KERNEL2
//#define DP_PROFILING_KERNEL3
//#define DP_PROFILING_KERNEL4
//#define DP_PROFILING_KERNEL5
/*
#define AFTER_KERNEL1
#define AFTER_KERNEL2
#define AFTER_KERNEL3
#define AFTER_KERNEL4
#define AFTER_KERNEL5_2
*/
#ifdef DP_PROFILING
/*timestep to solution*/
int its = 0;
#endif
//#ifdef AFTER_KERNEL1
texture<int,1,hipReadModeElementType> tex_nodeElemCount;
texture<int,1,hipReadModeElementType> tex_nodeElemCornerList;
texture<float,1,hipReadModeElementType> tex_fx_elem;
texture<float,1,hipReadModeElementType> tex_fy_elem;
texture<float,1,hipReadModeElementType> tex_fz_elem;
//#endif
#ifdef AFTER_KERNEL3
texture<float,1,hipReadModeElementType> tex_determ;
texture<float,1,hipReadModeElementType> tex_x8n;
texture<float,1,hipReadModeElementType> tex_y8n;
texture<float,1,hipReadModeElementType> tex_z8n;
texture<float,1,hipReadModeElementType> tex_dvdx;
texture<float,1,hipReadModeElementType> tex_dvdy;
texture<float,1,hipReadModeElementType> tex_dvdz;
texture<int,1,hipReadModeElementType> tex3_nodelist;
texture<float,1,hipReadModeElementType> tex_ss;
texture<float,1,hipReadModeElementType> tex_elemMass;
texture<float,1,hipReadModeElementType> tex_xd;
texture<float,1,hipReadModeElementType> tex_yd;
texture<float,1,hipReadModeElementType> tex_zd;
#endif
#ifdef AFTER_KERNEL4
texture<float,1,hipReadModeElementType> tex_x;
texture<float,1,hipReadModeElementType> tex_y;
texture<float,1,hipReadModeElementType> tex_z;
texture<float,1,hipReadModeElementType> tex_volo;
texture<float,1,hipReadModeElementType> tex_v;
texture<int,1,hipReadModeElementType> tex_nodelist;
#endif
#ifdef AFTER_KERNEL5
texture<int,1,hipReadModeElementType> tex_matElemlist;
texture<int,1,hipReadModeElementType> tex_elemBC;
texture<int,1,hipReadModeElementType> tex_lxim;
texture<int,1,hipReadModeElementType> tex_lxip;
texture<int,1,hipReadModeElementType> tex_letam;
texture<int,1,hipReadModeElementType> tex_letap;
texture<int,1,hipReadModeElementType> tex_lzetam;
texture<int,1,hipReadModeElementType> tex_lzetap;
#endif
#ifdef AFTER_KERNEL5_2
texture<float,1,hipReadModeElementType> tex_delv_xi;
texture<float,1,hipReadModeElementType> tex_delv_eta;
texture<float,1,hipReadModeElementType> tex_delv_zeta;
#endif
enum { VolumeError = -1, QStopError = -2 } ;
/****************************************************/
/* Allow flexibility for arithmetic representations */
/****************************************************/
/* Could also support fixed point and interval arithmetic types */
typedef float real4 ;
typedef double real8 ;
typedef long double real10 ; /* 10 bytes on x86 */
typedef int Index_t ; /* array subscript and loop index */
typedef real4 Real_t ; /* floating point representation */
typedef int Int_t ; /* integer representation */
__host__ __device__ inline real4 SQRT(real4 arg) { return sqrtf(arg) ; }
__host__ __device__ inline real8 SQRT(real8 arg) { return sqrt(arg) ; }
__host__ inline real10 SQRT(real10 arg) { return sqrtl(arg) ; }
__host__ __device__ inline real4 CBRT(real4 arg) { return cbrtf(arg) ; }
__host__ __device__ inline real8 CBRT(real8 arg) { return cbrt(arg) ; }
__host__ inline real10 CBRT(real10 arg) { return cbrtl(arg) ; }
__host__ __device__ inline real4 FABS(real4 arg) { return fabsf(arg) ; }
__host__ __device__ inline real8 FABS(real8 arg) { return fabs(arg) ; }
__host__ inline real10 FABS(real10 arg) { return fabsl(arg) ; }
__host__ __device__ inline real4 FMAX(real4 arg1,real4 arg2) { return fmaxf(arg1,arg2) ; }
__host__ __device__ inline real8 FMAX(real8 arg1,real8 arg2) { return fmax(arg1,arg2) ; }
__host__ inline real10 FMAX(real10 arg1,real10 arg2) { return fmaxl(arg1,arg2) ; }
#define CUDA_SAFE_CALL( call) do { \
hipError_t err = call; \
if( hipSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, hipGetErrorString( err) ); \
exit(EXIT_FAILURE); \
} } while (0)
#define CUDA(call) CUDA_SAFE_CALL(call)
#ifdef CUDA_SYNC_ALL
#define CUDA_DEBUGSYNC CUDA(hipDeviceSynchronize())
#else
#define CUDA_DEBUGSYNC
#endif
#define BLOCKSIZE 256
/* Given a number of bytes, nbytes, and a byte alignment, align, (e.g., 2,
* 4, 8, or 16), return the smallest integer that is larger than nbytes and
* a multiple of align.
*/
#define PAD_DIV(nbytes, align) (((nbytes) + (align) - 1) / (align))
#define PAD(nbytes, align) (PAD_DIV((nbytes),(align)) * (align))
/* More general version of reduceInPlacePOT (this works for arbitrary
* numThreadsPerBlock <= 1024). Again, conditionals on
* numThreadsPerBlock are evaluated at compile time.
*/
template <class T, int numThreadsPerBlock>
__device__ void
reduceSum(T *sresult, const int threadID)
{
/* If number of threads is not a power of two, first add the ones
after the last power of two into the beginning. At most one of
these conditionals will be true for a given NPOT block size. */
if (numThreadsPerBlock > 512 && numThreadsPerBlock <= 1024)
{
__syncthreads();
if (threadID < numThreadsPerBlock-512)
sresult[threadID] += sresult[threadID + 512];
}
if (numThreadsPerBlock > 256 && numThreadsPerBlock < 512)
{
__syncthreads();
if (threadID < numThreadsPerBlock-256)
sresult[threadID] += sresult[threadID + 256];
}
if (numThreadsPerBlock > 128 && numThreadsPerBlock < 256)
{
__syncthreads();
if (threadID < numThreadsPerBlock-128)
sresult[threadID] += sresult[threadID + 128];
}
if (numThreadsPerBlock > 64 && numThreadsPerBlock < 128)
{
__syncthreads();
if (threadID < numThreadsPerBlock-64)
sresult[threadID] += sresult[threadID + 64];
}
if (numThreadsPerBlock > 32 && numThreadsPerBlock < 64)
{
__syncthreads();
if (threadID < numThreadsPerBlock-32)
sresult[threadID] += sresult[threadID + 32];
}
if (numThreadsPerBlock > 16 && numThreadsPerBlock < 32)
{
__syncthreads();
if (threadID < numThreadsPerBlock-16)
sresult[threadID] += sresult[threadID + 16];
}
if (numThreadsPerBlock > 8 && numThreadsPerBlock < 16)
{
__syncthreads();
if (threadID < numThreadsPerBlock-8)
sresult[threadID] += sresult[threadID + 8];
}
if (numThreadsPerBlock > 4 && numThreadsPerBlock < 8)
{
__syncthreads();
if (threadID < numThreadsPerBlock-4)
sresult[threadID] += sresult[threadID + 4];
}
if (numThreadsPerBlock > 2 && numThreadsPerBlock < 4)
{
__syncthreads();
if (threadID < numThreadsPerBlock-2)
sresult[threadID] += sresult[threadID + 2];
}
if (numThreadsPerBlock >= 512) {
__syncthreads();
if (threadID < 256)
sresult[threadID] += sresult[threadID + 256];
}
if (numThreadsPerBlock >= 256) {
__syncthreads();
if (threadID < 128)
sresult[threadID] += sresult[threadID + 128];
}
if (numThreadsPerBlock >= 128) {
__syncthreads();
if (threadID < 64)
sresult[threadID] += sresult[threadID + 64];
}
__syncthreads();
#ifdef _DEVICEEMU
if (numThreadsPerBlock >= 64) {
__syncthreads();
if (threadID < 32)
sresult[threadID] += sresult[threadID + 32];
}
if (numThreadsPerBlock >= 32) {
__syncthreads();
if (threadID < 16)
sresult[threadID] += sresult[threadID + 16];
}
if (numThreadsPerBlock >= 16) {
__syncthreads();
if (threadID < 8)
sresult[threadID] += sresult[threadID + 8];
}
if (numThreadsPerBlock >= 8) {
__syncthreads();
if (threadID < 4)
sresult[threadID] += sresult[threadID + 4];
}
if (numThreadsPerBlock >= 4) {
__syncthreads();
if (threadID < 2)
sresult[threadID] += sresult[threadID + 2];
}
if (numThreadsPerBlock >= 2) {
__syncthreads();
if (threadID < 1)
sresult[threadID] += sresult[threadID + 1];
}
#else
if (threadID < 32) {
volatile T *vol = sresult;
if (numThreadsPerBlock >= 64) vol[threadID] += vol[threadID + 32];
if (numThreadsPerBlock >= 32) vol[threadID] += vol[threadID + 16];
if (numThreadsPerBlock >= 16) vol[threadID] += vol[threadID + 8];
if (numThreadsPerBlock >= 8) vol[threadID] += vol[threadID + 4];
if (numThreadsPerBlock >= 4) vol[threadID] += vol[threadID + 2];
if (numThreadsPerBlock >= 2) vol[threadID] += vol[threadID + 1];
}
#endif
__syncthreads();
}
#define MINEQ(a,b) (a)=(((a)<(b))?(a):(b))
template <class T, int numThreadsPerBlock>
__device__ void
reduceMin(T *sresult, const int threadID)
{
/* If number of threads is not a power of two, first add the ones
after the last power of two into the beginning. At most one of
these conditionals will be true for a given NPOT block size. */
if (numThreadsPerBlock > 512 && numThreadsPerBlock <= 1024)
{
__syncthreads();
if (threadID < numThreadsPerBlock-512)
MINEQ(sresult[threadID],sresult[threadID + 512]);
}
if (numThreadsPerBlock > 256 && numThreadsPerBlock < 512)
{
__syncthreads();
if (threadID < numThreadsPerBlock-256)
MINEQ(sresult[threadID],sresult[threadID + 256]);
}
if (numThreadsPerBlock > 128 && numThreadsPerBlock < 256)
{
__syncthreads();
if (threadID < numThreadsPerBlock-128)
MINEQ(sresult[threadID],sresult[threadID + 128]);
}
if (numThreadsPerBlock > 64 && numThreadsPerBlock < 128)
{
__syncthreads();
if (threadID < numThreadsPerBlock-64)
MINEQ(sresult[threadID],sresult[threadID + 64]);
}
if (numThreadsPerBlock > 32 && numThreadsPerBlock < 64)
{
__syncthreads();
if (threadID < numThreadsPerBlock-32)
MINEQ(sresult[threadID],sresult[threadID + 32]);
}
if (numThreadsPerBlock > 16 && numThreadsPerBlock < 32)
{
__syncthreads();
if (threadID < numThreadsPerBlock-16)
MINEQ(sresult[threadID],sresult[threadID + 16]);
}
if (numThreadsPerBlock > 8 && numThreadsPerBlock < 16)
{
__syncthreads();
if (threadID < numThreadsPerBlock-8)
MINEQ(sresult[threadID],sresult[threadID + 8]);
}
if (numThreadsPerBlock > 4 && numThreadsPerBlock < 8)
{
__syncthreads();
if (threadID < numThreadsPerBlock-4)
MINEQ(sresult[threadID],sresult[threadID + 4]);
}
if (numThreadsPerBlock > 2 && numThreadsPerBlock < 4)
{
__syncthreads();
if (threadID < numThreadsPerBlock-2)
MINEQ(sresult[threadID],sresult[threadID + 2]);
}
if (numThreadsPerBlock >= 512) {
__syncthreads();
if (threadID < 256)
MINEQ(sresult[threadID],sresult[threadID + 256]);
}
if (numThreadsPerBlock >= 256) {
__syncthreads();
if (threadID < 128)
MINEQ(sresult[threadID],sresult[threadID + 128]);
}
if (numThreadsPerBlock >= 128) {
__syncthreads();
if (threadID < 64)
MINEQ(sresult[threadID],sresult[threadID + 64]);
}
__syncthreads();
#ifdef _DEVICEEMU
if (numThreadsPerBlock >= 64) {
__syncthreads();
if (threadID < 32)
MINEQ(sresult[threadID],sresult[threadID + 32]);
}
if (numThreadsPerBlock >= 32) {
__syncthreads();
if (threadID < 16)
MINEQ(sresult[threadID],sresult[threadID + 16]);
}
if (numThreadsPerBlock >= 16) {
__syncthreads();
if (threadID < 8)
MINEQ(sresult[threadID],sresult[threadID + 8]);
}
if (numThreadsPerBlock >= 8) {
__syncthreads();
if (threadID < 4)
MINEQ(sresult[threadID],sresult[threadID + 4]);
}
if (numThreadsPerBlock >= 4) {
__syncthreads();
if (threadID < 2)
MINEQ(sresult[threadID],sresult[threadID + 2]);
}
if (numThreadsPerBlock >= 2) {
__syncthreads();
if (threadID < 1)
MINEQ(sresult[threadID],sresult[threadID + 1]);
}
#else
if (threadID < 32) {
volatile T *vol = sresult;
if (numThreadsPerBlock >= 64) MINEQ(vol[threadID],vol[threadID + 32]);
if (numThreadsPerBlock >= 32) MINEQ(vol[threadID],vol[threadID + 16]);
if (numThreadsPerBlock >= 16) MINEQ(vol[threadID],vol[threadID + 8]);
if (numThreadsPerBlock >= 8) MINEQ(vol[threadID],vol[threadID + 4]);
if (numThreadsPerBlock >= 4) MINEQ(vol[threadID],vol[threadID + 2]);
if (numThreadsPerBlock >= 2) MINEQ(vol[threadID],vol[threadID + 1]);
}
#endif
__syncthreads();
}
void cuda_init()
{
int deviceCount, dev;
hipDeviceProp_t cuda_deviceProp;
char *s;
CUDA( hipGetDeviceCount(&deviceCount) );
if (deviceCount == 0) {
fprintf(stderr, "cuda_init(): no devices supporting CUDA.\n");
exit(1);
}
if (s=getenv("CUDA_DEVICE")) dev=atoi(s);
else dev=0;
if ((dev < 0) || (dev > deviceCount-1)) {
fprintf(stderr, "cuda_init(): requested device (%d) out of range [%d,%d]\n",
dev, 0, deviceCount-1);
exit(1);
}
CUDA( hipGetDeviceProperties(&cuda_deviceProp, dev) );
if (cuda_deviceProp.major < 1) {
fprintf(stderr, "cuda_init(): device %d does not support CUDA.\n", dev);
exit(1);
}
fprintf(stderr, "setting CUDA device %d\n",dev);
CUDA( hipSetDevice(dev) );
}
/************************************************************/
/* Allow for flexible data layout experiments by separating */
/* array interface from underlying implementation. */
/************************************************************/
struct Mesh {
/* This first implementation allows for runnable code */
/* and is not meant to be optimal. Final implementation */
/* should separate declaration and allocation phases */
/* so that allocation can be scheduled in a cache conscious */
/* manner. */
friend struct MeshGPU;
public:
/**************/
/* Allocation */
/**************/
void AllocateNodalPersistent(size_t size)
{
m_x.resize(size) ;
m_y.resize(size) ;
m_z.resize(size) ;
m_xd.resize(size, Real_t(0.)) ;
m_yd.resize(size, Real_t(0.)) ;
m_zd.resize(size, Real_t(0.)) ;
m_xdd.resize(size, Real_t(0.)) ;
m_ydd.resize(size, Real_t(0.)) ;
m_zdd.resize(size, Real_t(0.)) ;
m_fx.resize(size) ;
m_fy.resize(size) ;
m_fz.resize(size) ;
m_nodalMass.resize(size, Real_t(0.)) ;
}
void AllocateElemPersistent(size_t size)
{
m_matElemlist.resize(size) ;
m_nodelist.resize(8*size) ;
m_lxim.resize(size) ;
m_lxip.resize(size) ;
m_letam.resize(size) ;
m_letap.resize(size) ;
m_lzetam.resize(size) ;
m_lzetap.resize(size) ;
m_elemBC.resize(size) ;
m_e.resize(size, Real_t(0.)) ;
m_p.resize(size, Real_t(0.)) ;
m_q.resize(size) ;
m_ql.resize(size) ;
m_qq.resize(size) ;
m_v.resize(size, 1.0) ;
m_volo.resize(size) ;
m_delv.resize(size) ;
m_vdov.resize(size) ;
m_arealg.resize(size) ;
m_ss.resize(size) ;
m_elemMass.resize(size) ;
}
/* Temporaries should not be initialized in bulk but */
/* this is a runnable placeholder for now */
void AllocateElemTemporary(size_t size)
{
m_dxx.resize(size) ;
m_dyy.resize(size) ;
m_dzz.resize(size) ;
m_delv_xi.resize(size) ;
m_delv_eta.resize(size) ;
m_delv_zeta.resize(size) ;
m_delx_xi.resize(size) ;
m_delx_eta.resize(size) ;
m_delx_zeta.resize(size) ;
m_vnew.resize(size) ;
}
void AllocateNodesets(size_t size)
{
m_symmX.resize(size) ;
m_symmY.resize(size) ;
m_symmZ.resize(size) ;
}
void AllocateNodeElemIndexes()
{
Index_t i,j,nidx;
/* set up node-centered indexing of elements */
m_nodeElemCount.resize(m_numNode);
for (i=0;i<m_numNode;i++) m_nodeElemCount[i]=0;
m_nodeElemCornerList.resize(m_numNode*8);
for (i=0;i<m_numElem;i++) {
for (j=0;j<8;j++) {
nidx=nodelist(i,j);
m_nodeElemCornerList[nidx+m_numNode*m_nodeElemCount[nidx]++] = i+m_numElem*j;
if (m_nodeElemCount[nidx]>8) {
fprintf(stderr, "Node degree is higher than 8!\n");
exit(1);
}
}
}
}
/**********/
/* Access */
/**********/
/* Node-centered */
Real_t& x(Index_t idx) { return m_x[idx] ; }
Real_t& y(Index_t idx) { return m_y[idx] ; }
Real_t& z(Index_t idx) { return m_z[idx] ; }
Real_t& xd(Index_t idx) { return m_xd[idx] ; }
Real_t& yd(Index_t idx) { return m_yd[idx] ; }
Real_t& zd(Index_t idx) { return m_zd[idx] ; }
Real_t& xdd(Index_t idx) { return m_xdd[idx] ; }
Real_t& ydd(Index_t idx) { return m_ydd[idx] ; }
Real_t& zdd(Index_t idx) { return m_zdd[idx] ; }
Real_t& fx(Index_t idx) { return m_fx[idx] ; }
Real_t& fy(Index_t idx) { return m_fy[idx] ; }
Real_t& fz(Index_t idx) { return m_fz[idx] ; }
Real_t& nodalMass(Index_t idx) { return m_nodalMass[idx] ; }
Index_t& symmX(Index_t idx) { return m_symmX[idx] ; }
Index_t& symmY(Index_t idx) { return m_symmY[idx] ; }
Index_t& symmZ(Index_t idx) { return m_symmZ[idx] ; }
/* Element-centered */
Index_t& matElemlist(Index_t idx) { return m_matElemlist[idx] ; }
Index_t& nodelist(Index_t idx,Index_t nidx) { return m_nodelist[idx+nidx*m_numElem] ; }
Index_t& lxim(Index_t idx) { return m_lxim[idx] ; }
Index_t& lxip(Index_t idx) { return m_lxip[idx] ; }
Index_t& letam(Index_t idx) { return m_letam[idx] ; }
Index_t& letap(Index_t idx) { return m_letap[idx] ; }
Index_t& lzetam(Index_t idx) { return m_lzetam[idx] ; }
Index_t& lzetap(Index_t idx) { return m_lzetap[idx] ; }
Int_t& elemBC(Index_t idx) { return m_elemBC[idx] ; }
Real_t& dxx(Index_t idx) { return m_dxx[idx] ; }
Real_t& dyy(Index_t idx) { return m_dyy[idx] ; }
Real_t& dzz(Index_t idx) { return m_dzz[idx] ; }
Real_t& delv_xi(Index_t idx) { return m_delv_xi[idx] ; }
Real_t& delv_eta(Index_t idx) { return m_delv_eta[idx] ; }
Real_t& delv_zeta(Index_t idx) { return m_delv_zeta[idx] ; }
Real_t& delx_xi(Index_t idx) { return m_delx_xi[idx] ; }
Real_t& delx_eta(Index_t idx) { return m_delx_eta[idx] ; }
Real_t& delx_zeta(Index_t idx) { return m_delx_zeta[idx] ; }
Real_t& e(Index_t idx) { return m_e[idx] ; }
Real_t& p(Index_t idx) { return m_p[idx] ; }
Real_t& q(Index_t idx) { return m_q[idx] ; }
Real_t& ql(Index_t idx) { return m_ql[idx] ; }
Real_t& qq(Index_t idx) { return m_qq[idx] ; }
Real_t& v(Index_t idx) { return m_v[idx] ; }
Real_t& volo(Index_t idx) { return m_volo[idx] ; }
Real_t& vnew(Index_t idx) { return m_vnew[idx] ; }
Real_t& delv(Index_t idx) { return m_delv[idx] ; }
Real_t& vdov(Index_t idx) { return m_vdov[idx] ; }
Real_t& arealg(Index_t idx) { return m_arealg[idx] ; }
Real_t& ss(Index_t idx) { return m_ss[idx] ; }
Real_t& elemMass(Index_t idx) { return m_elemMass[idx] ; }
/* Params */
Real_t& dtfixed() { return m_dtfixed ; }
Real_t& time() { return m_time ; }
Real_t& deltatime() { return m_deltatime ; }
Real_t& deltatimemultlb() { return m_deltatimemultlb ; }
Real_t& deltatimemultub() { return m_deltatimemultub ; }
Real_t& stoptime() { return m_stoptime ; }
Real_t& u_cut() { return m_u_cut ; }
Real_t& hgcoef() { return m_hgcoef ; }
Real_t& qstop() { return m_qstop ; }
Real_t& monoq_max_slope() { return m_monoq_max_slope ; }
Real_t& monoq_limiter_mult() { return m_monoq_limiter_mult ; }
Real_t& e_cut() { return m_e_cut ; }
Real_t& p_cut() { return m_p_cut ; }
Real_t& ss4o3() { return m_ss4o3 ; }
Real_t& q_cut() { return m_q_cut ; }
Real_t& v_cut() { return m_v_cut ; }
Real_t& qlc_monoq() { return m_qlc_monoq ; }
Real_t& qqc_monoq() { return m_qqc_monoq ; }
Real_t& qqc() { return m_qqc ; }
Real_t& eosvmax() { return m_eosvmax ; }
Real_t& eosvmin() { return m_eosvmin ; }
Real_t& pmin() { return m_pmin ; }
Real_t& emin() { return m_emin ; }
Real_t& dvovmax() { return m_dvovmax ; }
Real_t& refdens() { return m_refdens ; }
Real_t& dtcourant() { return m_dtcourant ; }
Real_t& dthydro() { return m_dthydro ; }
Real_t& dtmax() { return m_dtmax ; }
Int_t& cycle() { return m_cycle ; }
Index_t& sizeX() { return m_sizeX ; }
Index_t& sizeY() { return m_sizeY ; }
Index_t& sizeZ() { return m_sizeZ ; }
Index_t& numElem() { return m_numElem ; }
Index_t& numNode() { return m_numNode ; }
//private:
/******************/
/* Implementation */
/******************/
/* Node-centered */
std::vector<Real_t> m_x ; /* coordinates */
std::vector<Real_t> m_y ;
std::vector<Real_t> m_z ;
std::vector<Real_t> m_xd ; /* velocities */
std::vector<Real_t> m_yd ;
std::vector<Real_t> m_zd ;
std::vector<Real_t> m_xdd ; /* accelerations */
std::vector<Real_t> m_ydd ;
std::vector<Real_t> m_zdd ;
std::vector<Real_t> m_fx ; /* forces */
std::vector<Real_t> m_fy ;
std::vector<Real_t> m_fz ;
std::vector<Real_t> m_nodalMass ; /* mass */
std::vector<Index_t> m_symmX ; /* symmetry plane nodesets */
std::vector<Index_t> m_symmY ;
std::vector<Index_t> m_symmZ ;
std::vector<Int_t> m_nodeElemCount ;
std::vector<Index_t> m_nodeElemCornerList ;
/* Element-centered */
std::vector<Index_t> m_matElemlist ; /* material indexset */
std::vector<Index_t> m_nodelist ; /* elemToNode connectivity */
std::vector<Index_t> m_lxim ; /* element connectivity across each face */
std::vector<Index_t> m_lxip ;
std::vector<Index_t> m_letam ;
std::vector<Index_t> m_letap ;
std::vector<Index_t> m_lzetam ;
std::vector<Index_t> m_lzetap ;
std::vector<Int_t> m_elemBC ; /* symmetry/free-surface flags for each elem face */
std::vector<Real_t> m_dxx ; /* principal strains -- temporary */
std::vector<Real_t> m_dyy ;
std::vector<Real_t> m_dzz ;
std::vector<Real_t> m_delv_xi ; /* velocity gradient -- temporary */
std::vector<Real_t> m_delv_eta ;
std::vector<Real_t> m_delv_zeta ;
std::vector<Real_t> m_delx_xi ; /* coordinate gradient -- temporary */
std::vector<Real_t> m_delx_eta ;
std::vector<Real_t> m_delx_zeta ;
std::vector<Real_t> m_e ; /* energy */
std::vector<Real_t> m_p ; /* pressure */
std::vector<Real_t> m_q ; /* q */
std::vector<Real_t> m_ql ; /* linear term for q */
std::vector<Real_t> m_qq ; /* quadratic term for q */
std::vector<Real_t> m_v ; /* relative volume */
std::vector<Real_t> m_volo ; /* reference volume */
std::vector<Real_t> m_vnew ; /* new relative volume -- temporary */
std::vector<Real_t> m_delv ; /* m_vnew - m_v */
std::vector<Real_t> m_vdov ; /* volume derivative over volume */
std::vector<Real_t> m_arealg ; /* characteristic length of an element */
std::vector<Real_t> m_ss ; /* "sound speed" */
std::vector<Real_t> m_elemMass ; /* mass */
/* Parameters */
Real_t m_dtfixed ; /* fixed time increment */
Real_t m_time ; /* current time */
Real_t m_deltatime ; /* variable time increment */
Real_t m_deltatimemultlb ;
Real_t m_deltatimemultub ;
Real_t m_stoptime ; /* end time for simulation */
Real_t m_u_cut ; /* velocity tolerance */
Real_t m_hgcoef ; /* hourglass control */
Real_t m_qstop ; /* excessive q indicator */
Real_t m_monoq_max_slope ;
Real_t m_monoq_limiter_mult ;
Real_t m_e_cut ; /* energy tolerance */
Real_t m_p_cut ; /* pressure tolerance */
Real_t m_ss4o3 ;
Real_t m_q_cut ; /* q tolerance */
Real_t m_v_cut ; /* relative volume tolerance */
Real_t m_qlc_monoq ; /* linear term coef for q */
Real_t m_qqc_monoq ; /* quadratic term coef for q */
Real_t m_qqc ;
Real_t m_eosvmax ;
Real_t m_eosvmin ;
Real_t m_pmin ; /* pressure floor */
Real_t m_emin ; /* energy floor */
Real_t m_dvovmax ; /* maximum allowable volume change */
Real_t m_refdens ; /* reference density */
Real_t m_dtcourant ; /* courant constraint */
Real_t m_dthydro ; /* volume change constraint */
Real_t m_dtmax ; /* maximum allowable time increment */
Int_t m_cycle ; /* iteration count for simulation */
Index_t m_sizeX ; /* X,Y,Z extent of this block */
Index_t m_sizeY ;
Index_t m_sizeZ ;
Index_t m_numElem ; /* Elements/Nodes in this domain */
Index_t m_numNode ;
} mesh ;
template <typename T>
T *Allocate(size_t size)
{
return static_cast<T *>(malloc(sizeof(T)*size)) ;
}
template <typename T>
void Release(T **ptr)
{
if (*ptr != NULL) {
free(*ptr) ;
*ptr = NULL ;
}
}
#define GPU_STALE 0
#define CPU_STALE 1
#define ALL_FRESH 2
template<typename T>
void freshenGPU(std::vector<T>&cpu,T **gpu,int& stale) {
if (stale!=GPU_STALE) return;
if (!(*gpu)) {CUDA( hipMalloc(gpu,sizeof(T)*cpu.size()) );}
CUDA( hipMemcpy(*gpu,&cpu[0],sizeof(T)*cpu.size(),hipMemcpyHostToDevice) );
stale=ALL_FRESH;
}
template<typename T>
void freshenCPU(std::vector<T>&cpu,T *gpu,int& stale) {
if (stale!=CPU_STALE) return;
if (!gpu) {fprintf(stderr,"freshenCPU(): NULL GPU data!\n");exit(1);}
CUDA( hipMemcpy(&cpu[0],gpu,sizeof(T)*cpu.size(),hipMemcpyDeviceToHost) );
stale=ALL_FRESH;
}
// freshen helpers
#define FC(var) freshenCPU(mesh.m_ ## var , meshGPU.m_ ## var ,meshGPU.m_ ## var ## _stale ); // freshen CPU
#define FG(var) freshenGPU(mesh.m_ ## var , &meshGPU.m_ ## var ,meshGPU.m_ ## var ## _stale ); // freshen GPU
// stale helpers
#define SC(var) meshGPU.m_ ## var ## _stale = CPU_STALE; // stale CPU
#define SG(var) meshGPU.m_ ## var ## _stale = GPU_STALE; // stale GPU
struct MeshGPU {
Mesh *m_mesh;
/******************/
/* Implementation */
/******************/
/* Node-centered */
Real_t *m_x ; /* coordinates */
Real_t *m_y ;
Real_t *m_z ;
Real_t *m_xd ; /* velocities */
Real_t *m_yd ;
Real_t *m_zd ;
Real_t *m_xdd ; /* accelerations */
Real_t *m_ydd ;
Real_t *m_zdd ;
Real_t *m_fx ; /* forces */
Real_t *m_fy ;
Real_t *m_fz ;
Real_t *m_nodalMass ; /* mass */
Index_t *m_symmX ; /* symmetry plane nodesets */
Index_t *m_symmY ;
Index_t *m_symmZ ;
Int_t *m_nodeElemCount ;
Index_t *m_nodeElemCornerList ;
/* Element-centered */
Index_t * m_matElemlist ; /* material indexset */
Index_t * m_nodelist ; /* elemToNode connectivity */
Index_t * m_lxim ; /* element connectivity across each face */
Index_t * m_lxip ;
Index_t * m_letam ;
Index_t * m_letap ;
Index_t * m_lzetam ;
Index_t * m_lzetap ;
Int_t * m_elemBC ; /* symmetry/free-surface flags for each elem face */
Real_t *m_dxx ; /* principal strains -- temporary */
Real_t *m_dyy ;
Real_t *m_dzz ;
Real_t *m_delv_xi ; /* velocity gradient -- temporary */
Real_t *m_delv_eta ;
Real_t *m_delv_zeta ;
Real_t *m_delx_xi ; /* coordinate gradient -- temporary */
Real_t *m_delx_eta ;
Real_t *m_delx_zeta ;
Real_t *m_e ; /* energy */
Real_t *m_p ; /* pressure */
Real_t *m_q ; /* q */
Real_t *m_ql ; /* linear term for q */
Real_t *m_qq ; /* quadratic term for q */
Real_t *m_v ; /* relative volume */
Real_t *m_volo ; /* reference volume */
Real_t *m_vnew ; /* new relative volume -- temporary */
Real_t *m_delv ; /* m_vnew - m_v */
Real_t *m_vdov ; /* volume derivative over volume */
Real_t *m_arealg ; /* characteristic length of an element */
Real_t *m_ss ; /* "sound speed" */
Real_t *m_elemMass ; /* mass */
/* Stale flags */
int m_x_stale,m_y_stale,m_z_stale;
int m_xd_stale,m_yd_stale,m_zd_stale;
int m_xdd_stale,m_ydd_stale,m_zdd_stale;
int m_fx_stale,m_fy_stale,m_fz_stale;
int m_nodalMass_stale;
int m_symmX_stale,m_symmY_stale,m_symmZ_stale;
int m_nodeElemCount_stale,m_nodeElemCornerList_stale;
int m_matElemlist_stale,m_nodelist_stale;
int m_lxim_stale,m_lxip_stale,m_letam_stale,m_letap_stale,m_lzetam_stale,m_lzetap_stale;
int m_elemBC_stale;
int m_dxx_stale,m_dyy_stale,m_dzz_stale;
int m_delv_xi_stale,m_delv_eta_stale,m_delv_zeta_stale;
int m_delx_xi_stale,m_delx_eta_stale,m_delx_zeta_stale;
int m_e_stale;
int m_p_stale,m_q_stale,m_ql_stale,m_qq_stale;
int m_v_stale,m_volo_stale,m_vnew_stale,m_delv_stale,m_vdov_stale;
int m_arealg_stale;
int m_ss_stale;
int m_elemMass_stale;
void init(Mesh *mesh) {
m_mesh=mesh;
m_x=m_y=m_z=NULL;
m_xd=m_yd=m_zd=NULL;
m_xdd=m_ydd=m_zdd=NULL;
m_fx=m_fy=m_fz=NULL;
m_nodalMass=NULL;
m_symmX=m_symmY=m_symmZ=NULL;
m_nodeElemCount=m_nodeElemCornerList=NULL;
m_matElemlist=m_nodelist=NULL;
m_lxim=m_lxip=m_letam=m_letap=m_lzetam=m_lzetap=NULL;
m_elemBC=NULL;
m_dxx=m_dyy=m_dzz=NULL;
m_delv_xi=m_delv_eta=m_delv_zeta=NULL;
m_delx_xi=m_delx_eta=m_delx_zeta=NULL;
m_e=NULL;
m_p=m_q=m_ql=m_qq=NULL;
m_v=m_volo=m_vnew=m_delv=m_vdov=NULL;
m_arealg=NULL;
m_ss=NULL;
m_elemMass=NULL;
m_x_stale=m_y_stale=m_z_stale=
m_xd_stale=m_yd_stale=m_zd_stale=
m_xdd_stale=m_ydd_stale=m_zdd_stale=
m_fx_stale=m_fy_stale=m_fz_stale=
m_nodalMass_stale=
m_symmX_stale=m_symmY_stale=m_symmZ_stale=
m_nodeElemCount_stale=m_nodeElemCornerList_stale=
m_matElemlist_stale=m_nodelist_stale=
m_lxim_stale=m_lxip_stale=m_letam_stale=m_letap_stale=m_lzetam_stale=m_lzetap_stale=
m_elemBC_stale=
m_dxx_stale=m_dyy_stale=m_dzz_stale=
m_delv_xi_stale=m_delv_eta_stale=m_delv_zeta_stale=
m_delx_xi_stale=m_delx_eta_stale=m_delx_zeta_stale=
m_e_stale=
m_p_stale=m_q_stale=m_ql_stale=m_qq_stale=
m_v_stale=m_volo_stale=m_vnew_stale=m_delv_stale=m_vdov_stale=
m_arealg_stale=
m_ss_stale=
m_elemMass_stale=
GPU_STALE;
}
void freshenGPU() {
#define F(var) ::freshenGPU(m_mesh->m_ ## var , &m_ ## var ,m_ ## var ## _stale);
F(x); F(y); F(z);
F(xd); F(yd); F(zd);
F(xdd); F(ydd); F(zdd);
F(fx); F(fy); F(fz);
F(nodalMass);
F(symmX); F(symmY); F(symmZ);
F(nodeElemCount); F(nodeElemCornerList);
F(matElemlist); F(nodelist);
F(lxim); F(lxip); F(letam); F(letap); F(lzetam); F(lzetap);
F(elemBC);
F(dxx); F(dyy); F(dzz);
F(delv_xi); F(delv_eta); F(delv_zeta);
F(delx_xi); F(delx_eta); F(delx_zeta);
F(e);
F(p); F(q); F(ql); F(qq);
F(v); F(volo); F(vnew); F(delv); F(vdov);
F(arealg);
F(ss);
F(elemMass);
#undef F
}
void freshenCPU() {
#define F(var) ::freshenCPU(m_mesh->m_ ## var , m_ ## var ,m_ ## var ## _stale);
F(x); F(y); F(z);
F(xd); F(yd); F(zd);
F(xdd); F(ydd); F(zdd);
F(fx); F(fy); F(fz);
F(nodalMass);
F(symmX); F(symmY); F(symmZ);
F(nodeElemCount); F(nodeElemCornerList);
F(matElemlist); F(nodelist);
F(lxim); F(lxip); F(letam); F(letap); F(lzetam); F(lzetap);
F(elemBC);
F(dxx); F(dyy); F(dzz);
F(delv_xi); F(delv_eta); F(delv_zeta);
F(delx_xi); F(delx_eta); F(delx_zeta);
F(e);
F(p); F(q); F(ql); F(qq);
F(v); F(volo); F(vnew); F(delv); F(vdov);
F(arealg);
F(ss);
F(elemMass);
#undef F
}
} meshGPU;
/* Stuff needed for boundary conditions */
/* 2 BCs on each of 6 hexahedral faces (12 bits) */
#define XI_M 0x003
#define XI_M_SYMM 0x001
#define XI_M_FREE 0x002
#define XI_P 0x00c
#define XI_P_SYMM 0x004
#define XI_P_FREE 0x008
#define ETA_M 0x030
#define ETA_M_SYMM 0x010
#define ETA_M_FREE 0x020
#define ETA_P 0x0c0
#define ETA_P_SYMM 0x040
#define ETA_P_FREE 0x080
#define ZETA_M 0x300
#define ZETA_M_SYMM 0x100
#define ZETA_M_FREE 0x200
#define ZETA_P 0xc00
#define ZETA_P_SYMM 0x400
#define ZETA_P_FREE 0x800
static inline
void TimeIncrement()
{
Real_t targetdt = mesh.stoptime() - mesh.time() ;
if ((mesh.dtfixed() <= Real_t(0.0)) && (mesh.cycle() != Int_t(0))) {
Real_t ratio ;
Real_t olddt = mesh.deltatime() ;
/* This will require a reduction in parallel */
Real_t newdt = Real_t(1.0e+20) ;
if (mesh.dtcourant() < newdt) {
newdt = mesh.dtcourant() / Real_t(2.0) ;
}
if (mesh.dthydro() < newdt) {
newdt = mesh.dthydro() * Real_t(2.0) / Real_t(3.0) ;
}
ratio = newdt / olddt ;
if (ratio >= Real_t(1.0)) {
if (ratio < mesh.deltatimemultlb()) {
newdt = olddt ;
}
else if (ratio > mesh.deltatimemultub()) {
newdt = olddt*mesh.deltatimemultub() ;
}
}
if (newdt > mesh.dtmax()) {
newdt = mesh.dtmax() ;
}
mesh.deltatime() = newdt ;
}
/* TRY TO PREVENT VERY SMALL SCALING ON THE NEXT CYCLE */
if ((targetdt > mesh.deltatime()) &&
(targetdt < (Real_t(4.0) * mesh.deltatime() / Real_t(3.0))) ) {
targetdt = Real_t(2.0) * mesh.deltatime() / Real_t(3.0) ;
}
if (targetdt < mesh.deltatime()) {
mesh.deltatime() = targetdt ;
}
mesh.time() += mesh.deltatime() ;
++mesh.cycle() ;
}
__global__
void InitStressTermsForElems_kernel(
int numElem,Real_t *sigxx, Real_t *sigyy, Real_t *sigzz, Real_t *p, Real_t *q)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i<numElem)
sigxx[i] = sigyy[i] = sigzz[i] = - p[i] - q[i] ;
}
static inline
void InitStressTermsForElems_gpu(Index_t numElem,
Real_t *sigxx, Real_t *sigyy, Real_t *sigzz)
{
dim3 dimBlock(BLOCKSIZE,1,1);
dim3 dimGrid(PAD_DIV(numElem,dimBlock.x),1,1);
hipFuncSetCacheConfig(InitStressTermsForElems_kernel,hipFuncCachePreferL1); // set as default for all kernels after this one
hipLaunchKernelGGL(( InitStressTermsForElems_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
numElem,sigxx,sigyy,sigzz,meshGPU.m_p,meshGPU.m_q);
CUDA_DEBUGSYNC;
}
static inline
void InitStressTermsForElems_cpu(Index_t numElem,
Real_t *sigxx, Real_t *sigyy, Real_t *sigzz)
{
//
// pull in the stresses appropriate to the hydro integration
//
for (Index_t i = 0 ; i < numElem ; ++i){
sigxx[i] = sigyy[i] = sigzz[i] = - mesh.p(i) - mesh.q(i) ;
}
}
static inline
void InitStressTermsForElems(Index_t numElem,
Real_t *sigxx, Real_t *sigyy, Real_t *sigzz,
int useCPU)
{
if (useCPU) {
FC(p); FC(q);
InitStressTermsForElems_cpu(numElem,sigxx,sigyy,sigzz);
}
else {
FG(p); FG(q);
InitStressTermsForElems_gpu(numElem,sigxx,sigyy,sigzz);
}
}
__host__ __device__
static inline
void CalcElemShapeFunctionDerivatives( const Real_t* const x,
const Real_t* const y,
const Real_t* const z,
Real_t b[][8],
Real_t* const volume )
{
const Real_t x0 = x[0] ; const Real_t x1 = x[1] ;
const Real_t x2 = x[2] ; const Real_t x3 = x[3] ;
const Real_t x4 = x[4] ; const Real_t x5 = x[5] ;
const Real_t x6 = x[6] ; const Real_t x7 = x[7] ;
const Real_t y0 = y[0] ; const Real_t y1 = y[1] ;
const Real_t y2 = y[2] ; const Real_t y3 = y[3] ;
const Real_t y4 = y[4] ; const Real_t y5 = y[5] ;
const Real_t y6 = y[6] ; const Real_t y7 = y[7] ;
const Real_t z0 = z[0] ; const Real_t z1 = z[1] ;
const Real_t z2 = z[2] ; const Real_t z3 = z[3] ;
const Real_t z4 = z[4] ; const Real_t z5 = z[5] ;
const Real_t z6 = z[6] ; const Real_t z7 = z[7] ;
Real_t fjxxi, fjxet, fjxze;
Real_t fjyxi, fjyet, fjyze;
Real_t fjzxi, fjzet, fjzze;
Real_t cjxxi, cjxet, cjxze;
Real_t cjyxi, cjyet, cjyze;
Real_t cjzxi, cjzet, cjzze;
fjxxi = Real_t(.125) * ( (x6-x0) + (x5-x3) - (x7-x1) - (x4-x2) );
fjxet = Real_t(.125) * ( (x6-x0) - (x5-x3) + (x7-x1) - (x4-x2) );
fjxze = Real_t(.125) * ( (x6-x0) + (x5-x3) + (x7-x1) + (x4-x2) );
fjyxi = Real_t(.125) * ( (y6-y0) + (y5-y3) - (y7-y1) - (y4-y2) );
fjyet = Real_t(.125) * ( (y6-y0) - (y5-y3) + (y7-y1) - (y4-y2) );
fjyze = Real_t(.125) * ( (y6-y0) + (y5-y3) + (y7-y1) + (y4-y2) );
fjzxi = Real_t(.125) * ( (z6-z0) + (z5-z3) - (z7-z1) - (z4-z2) );
fjzet = Real_t(.125) * ( (z6-z0) - (z5-z3) + (z7-z1) - (z4-z2) );
fjzze = Real_t(.125) * ( (z6-z0) + (z5-z3) + (z7-z1) + (z4-z2) );
/* compute cofactors */
cjxxi = (fjyet * fjzze) - (fjzet * fjyze);
cjxet = - (fjyxi * fjzze) + (fjzxi * fjyze);
cjxze = (fjyxi * fjzet) - (fjzxi * fjyet);
cjyxi = - (fjxet * fjzze) + (fjzet * fjxze);
cjyet = (fjxxi * fjzze) - (fjzxi * fjxze);
cjyze = - (fjxxi * fjzet) + (fjzxi * fjxet);
cjzxi = (fjxet * fjyze) - (fjyet * fjxze);
cjzet = - (fjxxi * fjyze) + (fjyxi * fjxze);
cjzze = (fjxxi * fjyet) - (fjyxi * fjxet);
/* calculate partials :
this need only be done for l = 0,1,2,3 since , by symmetry ,
(6,7,4,5) = - (0,1,2,3) .
*/
b[0][0] = - cjxxi - cjxet - cjxze;
b[0][1] = cjxxi - cjxet - cjxze;
b[0][2] = cjxxi + cjxet - cjxze;
b[0][3] = - cjxxi + cjxet - cjxze;
b[0][4] = -b[0][2];
b[0][5] = -b[0][3];
b[0][6] = -b[0][0];
b[0][7] = -b[0][1];
b[1][0] = - cjyxi - cjyet - cjyze;
b[1][1] = cjyxi - cjyet - cjyze;
b[1][2] = cjyxi + cjyet - cjyze;
b[1][3] = - cjyxi + cjyet - cjyze;
b[1][4] = -b[1][2];
b[1][5] = -b[1][3];
b[1][6] = -b[1][0];
b[1][7] = -b[1][1];
b[2][0] = - cjzxi - cjzet - cjzze;
b[2][1] = cjzxi - cjzet - cjzze;
b[2][2] = cjzxi + cjzet - cjzze;
b[2][3] = - cjzxi + cjzet - cjzze;
b[2][4] = -b[2][2];
b[2][5] = -b[2][3];
b[2][6] = -b[2][0];
b[2][7] = -b[2][1];
/* calculate jacobian determinant (volume) */
*volume = Real_t(8.) * ( fjxet * cjxet + fjyet * cjyet + fjzet * cjzet);
}
__host__ __device__
static inline
void SumElemFaceNormal(Real_t *normalX0, Real_t *normalY0, Real_t *normalZ0,
Real_t *normalX1, Real_t *normalY1, Real_t *normalZ1,
Real_t *normalX2, Real_t *normalY2, Real_t *normalZ2,
Real_t *normalX3, Real_t *normalY3, Real_t *normalZ3,
const Real_t x0, const Real_t y0, const Real_t z0,
const Real_t x1, const Real_t y1, const Real_t z1,
const Real_t x2, const Real_t y2, const Real_t z2,
const Real_t x3, const Real_t y3, const Real_t z3)
{
Real_t bisectX0 = Real_t(0.5) * (x3 + x2 - x1 - x0);
Real_t bisectY0 = Real_t(0.5) * (y3 + y2 - y1 - y0);
Real_t bisectZ0 = Real_t(0.5) * (z3 + z2 - z1 - z0);
Real_t bisectX1 = Real_t(0.5) * (x2 + x1 - x3 - x0);
Real_t bisectY1 = Real_t(0.5) * (y2 + y1 - y3 - y0);
Real_t bisectZ1 = Real_t(0.5) * (z2 + z1 - z3 - z0);
Real_t areaX = Real_t(0.25) * (bisectY0 * bisectZ1 - bisectZ0 * bisectY1);
Real_t areaY = Real_t(0.25) * (bisectZ0 * bisectX1 - bisectX0 * bisectZ1);
Real_t areaZ = Real_t(0.25) * (bisectX0 * bisectY1 - bisectY0 * bisectX1);
*normalX0 += areaX;
*normalX1 += areaX;
*normalX2 += areaX;
*normalX3 += areaX;
*normalY0 += areaY;
*normalY1 += areaY;
*normalY2 += areaY;
*normalY3 += areaY;
*normalZ0 += areaZ;
*normalZ1 += areaZ;
*normalZ2 += areaZ;
*normalZ3 += areaZ;
}
__host__ __device__
static inline
void CalcElemNodeNormals(Real_t pfx[8],
Real_t pfy[8],
Real_t pfz[8],
const Real_t x[8],
const Real_t y[8],
const Real_t z[8])
{
for (Index_t i = 0 ; i < 8 ; ++i) {
pfx[i] = Real_t(0.0);
pfy[i] = Real_t(0.0);
pfz[i] = Real_t(0.0);
}
/* evaluate face one: nodes 0, 1, 2, 3 */
SumElemFaceNormal(&pfx[0], &pfy[0], &pfz[0],
&pfx[1], &pfy[1], &pfz[1],
&pfx[2], &pfy[2], &pfz[2],
&pfx[3], &pfy[3], &pfz[3],
x[0], y[0], z[0], x[1], y[1], z[1],
x[2], y[2], z[2], x[3], y[3], z[3]);
/* evaluate face two: nodes 0, 4, 5, 1 */
SumElemFaceNormal(&pfx[0], &pfy[0], &pfz[0],
&pfx[4], &pfy[4], &pfz[4],
&pfx[5], &pfy[5], &pfz[5],
&pfx[1], &pfy[1], &pfz[1],
x[0], y[0], z[0], x[4], y[4], z[4],
x[5], y[5], z[5], x[1], y[1], z[1]);
/* evaluate face three: nodes 1, 5, 6, 2 */
SumElemFaceNormal(&pfx[1], &pfy[1], &pfz[1],
&pfx[5], &pfy[5], &pfz[5],
&pfx[6], &pfy[6], &pfz[6],
&pfx[2], &pfy[2], &pfz[2],
x[1], y[1], z[1], x[5], y[5], z[5],
x[6], y[6], z[6], x[2], y[2], z[2]);
/* evaluate face four: nodes 2, 6, 7, 3 */
SumElemFaceNormal(&pfx[2], &pfy[2], &pfz[2],
&pfx[6], &pfy[6], &pfz[6],
&pfx[7], &pfy[7], &pfz[7],
&pfx[3], &pfy[3], &pfz[3],
x[2], y[2], z[2], x[6], y[6], z[6],
x[7], y[7], z[7], x[3], y[3], z[3]);
/* evaluate face five: nodes 3, 7, 4, 0 */
SumElemFaceNormal(&pfx[3], &pfy[3], &pfz[3],
&pfx[7], &pfy[7], &pfz[7],
&pfx[4], &pfy[4], &pfz[4],
&pfx[0], &pfy[0], &pfz[0],
x[3], y[3], z[3], x[7], y[7], z[7],
x[4], y[4], z[4], x[0], y[0], z[0]);
/* evaluate face six: nodes 4, 7, 6, 5 */
SumElemFaceNormal(&pfx[4], &pfy[4], &pfz[4],
&pfx[7], &pfy[7], &pfz[7],
&pfx[6], &pfy[6], &pfz[6],
&pfx[5], &pfy[5], &pfz[5],
x[4], y[4], z[4], x[7], y[7], z[7],
x[6], y[6], z[6], x[5], y[5], z[5]);
}
__host__ __device__
static inline
void SumElemStressesToNodeForces( const Real_t B[][8],
const Real_t stress_xx,
const Real_t stress_yy,
const Real_t stress_zz,
Real_t* const fx,
Real_t* const fy,
Real_t* const fz,
int stride)
{
Real_t pfx0 = B[0][0] ; Real_t pfx1 = B[0][1] ;
Real_t pfx2 = B[0][2] ; Real_t pfx3 = B[0][3] ;
Real_t pfx4 = B[0][4] ; Real_t pfx5 = B[0][5] ;
Real_t pfx6 = B[0][6] ; Real_t pfx7 = B[0][7] ;
Real_t pfy0 = B[1][0] ; Real_t pfy1 = B[1][1] ;
Real_t pfy2 = B[1][2] ; Real_t pfy3 = B[1][3] ;
Real_t pfy4 = B[1][4] ; Real_t pfy5 = B[1][5] ;
Real_t pfy6 = B[1][6] ; Real_t pfy7 = B[1][7] ;
Real_t pfz0 = B[2][0] ; Real_t pfz1 = B[2][1] ;
Real_t pfz2 = B[2][2] ; Real_t pfz3 = B[2][3] ;
Real_t pfz4 = B[2][4] ; Real_t pfz5 = B[2][5] ;
Real_t pfz6 = B[2][6] ; Real_t pfz7 = B[2][7] ;
fx[0*stride] = -( stress_xx * pfx0 );
fx[1*stride] = -( stress_xx * pfx1 );
fx[2*stride] = -( stress_xx * pfx2 );
fx[3*stride] = -( stress_xx * pfx3 );
fx[4*stride] = -( stress_xx * pfx4 );
fx[5*stride] = -( stress_xx * pfx5 );
fx[6*stride] = -( stress_xx * pfx6 );
fx[7*stride] = -( stress_xx * pfx7 );
fy[0*stride] = -( stress_yy * pfy0 );
fy[1*stride] = -( stress_yy * pfy1 );
fy[2*stride] = -( stress_yy * pfy2 );
fy[3*stride] = -( stress_yy * pfy3 );
fy[4*stride] = -( stress_yy * pfy4 );
fy[5*stride] = -( stress_yy * pfy5 );
fy[6*stride] = -( stress_yy * pfy6 );
fy[7*stride] = -( stress_yy * pfy7 );
fz[0*stride] = -( stress_zz * pfz0 );
fz[1*stride] = -( stress_zz * pfz1 );
fz[2*stride] = -( stress_zz * pfz2 );
fz[3*stride] = -( stress_zz * pfz3 );
fz[4*stride] = -( stress_zz * pfz4 );
fz[5*stride] = -( stress_zz * pfz5 );
fz[6*stride] = -( stress_zz * pfz6 );
fz[7*stride] = -( stress_zz * pfz7 );
}
__global__
void IntegrateStressForElems_kernel( Index_t numElem, Index_t *nodelist,
Real_t *x, Real_t *y, Real_t *z,
Real_t *fx_elem, Real_t *fy_elem, Real_t *fz_elem,
Real_t *sigxx, Real_t *sigyy, Real_t *sigzz,
Real_t *determ)
{
Real_t B[3][8] ;// shape function derivatives
Real_t x_local[8] ;
Real_t y_local[8] ;
Real_t z_local[8] ;
int k=blockDim.x*blockIdx.x + threadIdx.x;
if (k<numElem) {
// get nodal coordinates from global arrays and copy into local arrays.
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
{
Index_t gnode = nodelist[k+lnode*numElem];
x_local[lnode] = x[gnode];
y_local[lnode] = y[gnode];
z_local[lnode] = z[gnode];
}
/* Volume calculation involves extra work for numerical consistency. */
CalcElemShapeFunctionDerivatives(x_local, y_local, z_local,
B, &determ[k]);
CalcElemNodeNormals( B[0] , B[1], B[2],
x_local, y_local, z_local );
SumElemStressesToNodeForces( B, sigxx[k], sigyy[k], sigzz[k],
&fx_elem[k], &fy_elem[k], &fz_elem[k], numElem ) ;
}
}
__global__
#ifdef DP_PROFILING_KERNEL1
void AddNodeForcesFromElems_kernel( Index_t numNode,
Int_t *nodeElemCount, Index_t *nodeElemCornerList,
volatile Real_t *fx_elem, volatile Real_t *fy_elem,volatile Real_t *fz_elem,
Real_t *fx_node, Real_t *fy_node, Real_t *fz_node, int its)
#else
void AddNodeForcesFromElems_kernel( Index_t numNode,
Int_t *nodeElemCount, Index_t *nodeElemCornerList,
Real_t *fx_elem, Real_t *fy_elem, Real_t *fz_elem,
Real_t *fx_node, Real_t *fy_node, Real_t *fz_node)
#endif
{
int i=blockDim.x*blockIdx.x + threadIdx.x;
if (i<numNode) {
//#ifdef AFTER_KERNEL1
// Int_t count=tex1Dfetch(tex_nodeElemCount,i);
//#else
Int_t count=nodeElemCount[i];
//#endif
#ifdef DP_PROFILING_KERNEL1
if(blockIdx.x==0 && blockIdx.y==0)
{
//nodeElemCount
//number of array ele is numNode=edgeNodes^3=(45+1)^3
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
//cuPrintf("sizeof(int)=%d, sizeof(real8)=%d\n", sizeof(int), sizeof(real8));
//4 byte for int and 8 byte for real8
cuPrintf("0 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
}
#endif
Real_t fx,fy,fz;
fx=fy=fz=Real_t(0.0);
for (int j=0;j<count;j++) {
//#ifdef AFTER_KERNEL1
// Index_t elem=tex1Dfetch(tex_nodeElemCornerList,i+numNode*j);
//#else
Index_t elem=nodeElemCornerList[i+numNode*j];
//#endif
#ifdef DP_PROFILING_KERNEL1
if(blockIdx.x==0 && blockIdx.y==0)
{
//nodeElemCornerList: the num of array elem is m_numNode*8
// which is edgeNodes^3*8=(45+1)^3*8
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("1 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i+numNode*j);
}
#endif
#ifdef AFTER_KERNEL1
fx+=tex1Dfetch(tex_fx_elem,elem); fy+=tex1Dfetch(tex_fy_elem,elem); fz+=tex1Dfetch(tex_fz_elem,elem);
#else
fx+=fx_elem[elem]; fy+=fy_elem[elem]; fz+=fz_elem[elem];
#endif
#ifdef DP_PROFILING_KERNEL1
if(blockIdx.x==0 && blockIdx.y==0)
{
//fx_elem, fy_elem, fz_elem
//number of array elem is numElem*8=edgeElems^3*8=45^3*8
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("2 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, elem);
cuPrintf("3 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, elem);
cuPrintf("4 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, elem);
}
#endif
}
fx_node[i]=fx; fy_node[i]=fy; fz_node[i]=fz;
#ifdef DP_PROFILING_KERNEL1
if(blockIdx.x==0 && blockIdx.y==0)
{
//fx_node, fy_node, fz_node
//number of array elem is numNode=edgeNodes^3=(45+1)^3
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("5 1 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
cuPrintf("6 1 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
cuPrintf("7 1 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
}
#endif
}
}
__global__
#ifdef DP_PROFILING_KERNEL2
void AddNodeForcesFromElems2_kernel( Index_t numNode,
Int_t *nodeElemCount, Index_t *nodeElemCornerList,
Real_t *fx_elem, Real_t *fy_elem, Real_t *fz_elem,
Real_t *fx_node, Real_t *fy_node, Real_t *fz_node, int its)
#else
void AddNodeForcesFromElems2_kernel( Index_t numNode,
Int_t *nodeElemCount, Index_t *nodeElemCornerList,
Real_t *fx_elem, Real_t *fy_elem, Real_t *fz_elem,
Real_t *fx_node, Real_t *fy_node, Real_t *fz_node)
#endif
{
int i=blockDim.x*blockIdx.x + threadIdx.x;
if (i<numNode) {
//#ifdef AFTER_KERNEL1
// Int_t count=tex1Dfetch(tex_nodeElemCount,i);
//#else
Int_t count=nodeElemCount[i];
//#endif
#ifdef DP_PROFILING_KERNEL2
if(blockIdx.x==0 && blockIdx.y==0)
{
//nodeElemCount
//number of array ele is numNode=edgeNodes^3=(45+1)^3
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
//cuPrintf("sizeof(int)=%d, sizeof(real8)=%d\n", sizeof(int), sizeof(real8));
//4 byte for int and 8 byte for real8
cuPrintf("0 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
}
#endif
Real_t fx,fy,fz;
fx=fy=fz=Real_t(0.0);
for (int j=0;j<count;j++) {
//#ifdef AFTER_KERNEL2
// Index_t elem=tex1Dfetch(tex_nodeElemCornerList,i+numNode*j);
//#else
Index_t elem=nodeElemCornerList[i+numNode*j];
//#endif
#ifdef DP_PROFILING_KERNEL2
if(blockIdx.x==0 && blockIdx.y==0)
{
//nodeElemCornerList: the num of array elem is m_numNode*8
// which is edgeNodes^3*8=(45+1)^3*8
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("1 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i+numNode*j);
}
#endif
#ifdef AFTER_KERNEL2
fx+=tex1Dfetch(tex_fx_elem,elem); fy+=tex1Dfetch(tex_fy_elem,elem); fz+=tex1Dfetch(tex_fz_elem,elem);
#else
fx+=fx_elem[elem]; fy+=fy_elem[elem]; fz+=fz_elem[elem];
#endif
#ifdef DP_PROFILING_KERNEL2
if(blockIdx.x==0 && blockIdx.y==0)
{
//fx_elem, fy_elem, fz_elem
//number of array elem is numElem*8=edgeElems^3*8=45^3*8
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("2 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, elem);
cuPrintf("3 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, elem);
cuPrintf("4 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, elem);
}
#endif
}
fx_node[i]+=fx; fy_node[i]+=fy; fz_node[i]+=fz;
#ifdef DP_PROFILING_KERNEL2
if(blockIdx.x==0 && blockIdx.y==0)
{
//fx_node, fy_node, fz_node
//number of array elem is numNode=edgeNodes^3=(45+1)^3
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("5 1 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
cuPrintf("6 1 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
cuPrintf("7 1 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
}
#endif
}
}
static inline
void IntegrateStressForElems_gpu( Index_t numElem,
Real_t *sigxx, Real_t *sigyy, Real_t *sigzz,
Real_t *determ, int& badvol)
{
Real_t *fx_elem,*fy_elem,*fz_elem;
CUDA( hipMalloc(&fx_elem,numElem*8*sizeof(Real_t)) );
CUDA( hipMalloc(&fy_elem,numElem*8*sizeof(Real_t)) );
CUDA( hipMalloc(&fz_elem,numElem*8*sizeof(Real_t)) );
dim3 dimBlock=dim3(BLOCKSIZE,1,1);
dim3 dimGrid=dim3(PAD_DIV(numElem,dimBlock.x),1,1);
hipLaunchKernelGGL(( IntegrateStressForElems_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0,
numElem, meshGPU.m_nodelist, meshGPU.m_x, meshGPU.m_y, meshGPU.m_z,
fx_elem, fy_elem, fz_elem, sigxx, sigyy, sigzz, determ);
CUDA_DEBUGSYNC;
dimGrid=dim3(PAD_DIV(mesh.numNode(),dimBlock.x),1,1);
#ifdef AFTER_KERNEL1
hipBindTexture(0,tex_nodeElemCount,meshGPU.m_nodeElemCount,mesh.numNode()*sizeof(int));
hipBindTexture(0,tex_nodeElemCornerList,meshGPU.m_nodeElemCornerList,8*mesh.numNode()*sizeof(int));
hipBindTexture(0,tex_fx_elem,fx_elem,8*mesh.numElem()*sizeof(float));
hipBindTexture(0,tex_fy_elem,fy_elem,8*mesh.numElem()*sizeof(float));
hipBindTexture(0,tex_fz_elem,fz_elem,8*mesh.numElem()*sizeof(float));
#endif
#ifdef DP_PROFILING_KERNEL1
printf("Kernel1: dimGrid: %d %d %d. dimBlock: %d %d %d\n",
dimGrid.x, dimGrid.y, dimGrid.z, dimBlock.x, dimBlock.y, dimBlock.z);
printf("Kernel1: numNode=%d, numElem=%d\n", mesh.numNode(), numElem);
printf("dimension: 1 1 1 1 1 1 1 1\n");
printf("sizeof %d %d %d %d %d %d %d %d\n",sizeof(Int_t),sizeof(Index_t)
,sizeof(Real_t),sizeof(Real_t),sizeof(Real_t),sizeof(Real_t),sizeof(Real_t),sizeof(Real_t));
int yy_size = numElem*8*sizeof(Real_t);
printf("array_size %d %d %d %d %d %d %d %d\n",mesh.numNode()*sizeof(Int_t),8*mesh.numNode()*sizeof(Index_t),yy_size,yy_size,yy_size,
mesh.numNode()*sizeof(Int_t),mesh.numNode()*sizeof(Int_t),mesh.numNode()*sizeof(Int_t));
hipLaunchKernelGGL(( AddNodeForcesFromElems_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0,
mesh.numNode(),meshGPU.m_nodeElemCount,meshGPU.m_nodeElemCornerList,
fx_elem,fy_elem,fz_elem,meshGPU.m_fx,meshGPU.m_fy,meshGPU.m_fz, its);
//cudaPrintfDisplay(stdout, true);
cudaPrintfDisplay(stdout, false);
#else
hipEvent_t k1_start, k1_stop;
hipEventCreate(&k1_start);
hipEventCreate(&k1_stop);
hipEventRecord(k1_start,0);
hipLaunchKernelGGL(( AddNodeForcesFromElems_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0,
mesh.numNode(),meshGPU.m_nodeElemCount,meshGPU.m_nodeElemCornerList,
fx_elem,fy_elem,fz_elem,meshGPU.m_fx,meshGPU.m_fy,meshGPU.m_fz);
hipDeviceSynchronize();
hipEventRecord(k1_stop, 0);
hipEventSynchronize(k1_stop);
float k1_time=0.0;
hipEventElapsedTime(&k1_time, k1_start, k1_stop);
printf("Kernel 1 time = %f\n",k1_time);
k1+=k1_time;
#endif
CUDA_DEBUGSYNC;
CUDA( hipFree(fx_elem) );
CUDA( hipFree(fy_elem) );
CUDA( hipFree(fz_elem) );
// JDC -- need a reduction step to check for non-positive element volumes
badvol=0;
}
static inline
void IntegrateStressForElems_cpu( Index_t numElem,
Real_t *sigxx, Real_t *sigyy, Real_t *sigzz,
Real_t *determ, int& badvol)
{
Real_t B[3][8] ;// shape function derivatives
Real_t x_local[8] ;
Real_t y_local[8] ;
Real_t z_local[8] ;
Real_t fx_local[8] ;
Real_t fy_local[8] ;
Real_t fz_local[8] ;
// loop over all elements
for( Index_t k=0 ; k<numElem ; ++k )
{
// get nodal coordinates from global arrays and copy into local arrays.
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
{
Index_t gnode = mesh.nodelist(k,lnode);
x_local[lnode] = mesh.x(gnode);
y_local[lnode] = mesh.y(gnode);
z_local[lnode] = mesh.z(gnode);
}
/* Volume calculation involves extra work for numerical consistency. */
CalcElemShapeFunctionDerivatives(x_local, y_local, z_local,
B, &determ[k]);
CalcElemNodeNormals( B[0] , B[1], B[2],
x_local, y_local, z_local );
SumElemStressesToNodeForces( B, sigxx[k], sigyy[k], sigzz[k],
fx_local, fy_local, fz_local, 1 ) ;
// copy nodal force contributions to global force arrray.
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
{
Index_t gnode = mesh.nodelist(k,lnode);
mesh.fx(gnode) += fx_local[lnode];
mesh.fy(gnode) += fy_local[lnode];
mesh.fz(gnode) += fz_local[lnode];
}
}
badvol=0;
for ( Index_t k=0 ; k<numElem ; ++k ) {
if (determ[k] <= Real_t(0.0)) {
badvol=1;
}
}
}
static inline
void IntegrateStressForElems( Index_t numElem,
Real_t *sigxx, Real_t *sigyy, Real_t *sigzz,
Real_t *determ, int& badvol, int useCPU)
{
if (useCPU) {
FC(nodelist); FC(x); FC(y); FC(z);
IntegrateStressForElems_cpu(numElem,sigxx,sigyy,sigzz,determ,badvol);
SG(fx); SG(fy); SG(fz);
}
else {
FG(nodelist); FG(nodeElemCount); FG(nodeElemCornerList);
FG(x); FG(y); FG(z);
IntegrateStressForElems_gpu(numElem,sigxx,sigyy,sigzz,determ,badvol);
SC(fx); SC(fy); SC(fz);
}
}
static inline
void CollectDomainNodesToElemNodes(const Index_t elemNum,
Real_t elemX[8],
Real_t elemY[8],
Real_t elemZ[8])
{
Index_t nd0i = mesh.nodelist(elemNum,0) ;
Index_t nd1i = mesh.nodelist(elemNum,1) ;
Index_t nd2i = mesh.nodelist(elemNum,2) ;
Index_t nd3i = mesh.nodelist(elemNum,3) ;
Index_t nd4i = mesh.nodelist(elemNum,4) ;
Index_t nd5i = mesh.nodelist(elemNum,5) ;
Index_t nd6i = mesh.nodelist(elemNum,6) ;
Index_t nd7i = mesh.nodelist(elemNum,7) ;
elemX[0] = mesh.x(nd0i);
elemX[1] = mesh.x(nd1i);
elemX[2] = mesh.x(nd2i);
elemX[3] = mesh.x(nd3i);
elemX[4] = mesh.x(nd4i);
elemX[5] = mesh.x(nd5i);
elemX[6] = mesh.x(nd6i);
elemX[7] = mesh.x(nd7i);
elemY[0] = mesh.y(nd0i);
elemY[1] = mesh.y(nd1i);
elemY[2] = mesh.y(nd2i);
elemY[3] = mesh.y(nd3i);
elemY[4] = mesh.y(nd4i);
elemY[5] = mesh.y(nd5i);
elemY[6] = mesh.y(nd6i);
elemY[7] = mesh.y(nd7i);
elemZ[0] = mesh.z(nd0i);
elemZ[1] = mesh.z(nd1i);
elemZ[2] = mesh.z(nd2i);
elemZ[3] = mesh.z(nd3i);
elemZ[4] = mesh.z(nd4i);
elemZ[5] = mesh.z(nd5i);
elemZ[6] = mesh.z(nd6i);
elemZ[7] = mesh.z(nd7i);
}
__host__
static inline
void VoluDer(const Real_t x0, const Real_t x1, const Real_t x2,
const Real_t x3, const Real_t x4, const Real_t x5,
const Real_t y0, const Real_t y1, const Real_t y2,
const Real_t y3, const Real_t y4, const Real_t y5,
const Real_t z0, const Real_t z1, const Real_t z2,
const Real_t z3, const Real_t z4, const Real_t z5,
Real_t* dvdx, Real_t* dvdy, Real_t* dvdz)
{
const Real_t twelfth = Real_t(1.0) / Real_t(12.0) ;
*dvdx =
(y1 + y2) * (z0 + z1) - (y0 + y1) * (z1 + z2) +
(y0 + y4) * (z3 + z4) - (y3 + y4) * (z0 + z4) -
(y2 + y5) * (z3 + z5) + (y3 + y5) * (z2 + z5);
*dvdy =
- (x1 + x2) * (z0 + z1) + (x0 + x1) * (z1 + z2) -
(x0 + x4) * (z3 + z4) + (x3 + x4) * (z0 + z4) +
(x2 + x5) * (z3 + z5) - (x3 + x5) * (z2 + z5);
*dvdz =
- (y1 + y2) * (x0 + x1) + (y0 + y1) * (x1 + x2) -
(y0 + y4) * (x3 + x4) + (y3 + y4) * (x0 + x4) +
(y2 + y5) * (x3 + x5) - (y3 + y5) * (x2 + x5);
*dvdx *= twelfth;
*dvdy *= twelfth;
*dvdz *= twelfth;
}
#if 0
__device__
static inline
void VOLUDER(const Real_t a0, const Real_t a1, const Real_t a2,
const Real_t a3, const Real_t a4, const Real_t a5,
const Real_t b0, const Real_t b1, const Real_t b2,
const Real_t b3, const Real_t b4, const Real_t b5,
Real_t& dvdc)
{
const Real_t twelfth = Real_t(1.0) / Real_t(12.0) ;
dvdc=
(a1 + a2) * (b0 + b1) - (a0 + a1) * (b1 + b2) +
(a0 + a4) * (b3 + b4) - (a3 + a4) * (b0 + b4) -
(a2 + a5) * (b3 + b5) + (a3 + a5) * (b2 + b5);
dvdc *= twelfth;
}
#else
// Even though the above version is inlined, it seems to prohibit some kind of compiler optimization.
// This macro version uses many fewer registers and avoids spill-over into local memory.
#define VOLUDER(a0,a1,a2,a3,a4,a5,b0,b1,b2,b3,b4,b5,dvdc) \
{ \
const Real_t twelfth = Real_t(1.0) / Real_t(12.0) ; \
\
dvdc= \
((a1) + (a2)) * ((b0) + (b1)) - ((a0) + (a1)) * ((b1) + (b2)) + \
((a0) + (a4)) * ((b3) + (b4)) - ((a3) + (a4)) * ((b0) + (b4)) - \
((a2) + (a5)) * ((b3) + (b5)) + ((a3) + (a5)) * ((b2) + (b5)); \
dvdc *= twelfth; \
}
#endif
__host__
static inline
void CalcElemVolumeDerivative(Real_t dvdx[8],
Real_t dvdy[8],
Real_t dvdz[8],
const Real_t x[8],
const Real_t y[8],
const Real_t z[8])
{
VoluDer(x[1], x[2], x[3], x[4], x[5], x[7],
y[1], y[2], y[3], y[4], y[5], y[7],
z[1], z[2], z[3], z[4], z[5], z[7],
&dvdx[0], &dvdy[0], &dvdz[0]);
VoluDer(x[0], x[1], x[2], x[7], x[4], x[6],
y[0], y[1], y[2], y[7], y[4], y[6],
z[0], z[1], z[2], z[7], z[4], z[6],
&dvdx[3], &dvdy[3], &dvdz[3]);
VoluDer(x[3], x[0], x[1], x[6], x[7], x[5],
y[3], y[0], y[1], y[6], y[7], y[5],
z[3], z[0], z[1], z[6], z[7], z[5],
&dvdx[2], &dvdy[2], &dvdz[2]);
VoluDer(x[2], x[3], x[0], x[5], x[6], x[4],
y[2], y[3], y[0], y[5], y[6], y[4],
z[2], z[3], z[0], z[5], z[6], z[4],
&dvdx[1], &dvdy[1], &dvdz[1]);
VoluDer(x[7], x[6], x[5], x[0], x[3], x[1],
y[7], y[6], y[5], y[0], y[3], y[1],
z[7], z[6], z[5], z[0], z[3], z[1],
&dvdx[4], &dvdy[4], &dvdz[4]);
VoluDer(x[4], x[7], x[6], x[1], x[0], x[2],
y[4], y[7], y[6], y[1], y[0], y[2],
z[4], z[7], z[6], z[1], z[0], z[2],
&dvdx[5], &dvdy[5], &dvdz[5]);
VoluDer(x[5], x[4], x[7], x[2], x[1], x[3],
y[5], y[4], y[7], y[2], y[1], y[3],
z[5], z[4], z[7], z[2], z[1], z[3],
&dvdx[6], &dvdy[6], &dvdz[6]);
VoluDer(x[6], x[5], x[4], x[3], x[2], x[0],
y[6], y[5], y[4], y[3], y[2], y[0],
z[6], z[5], z[4], z[3], z[2], z[0],
&dvdx[7], &dvdy[7], &dvdz[7]);
}
__device__
static inline
void CalcElemVolumeDerivative(Real_t& dvdx,
Real_t& dvdy,
Real_t& dvdz,
const Real_t x,
const Real_t y,
const Real_t z,
unsigned int node)
{
__shared__ Real_t array1[256],array2[256];
volatile Real_t *va1;
volatile Real_t *va2;
unsigned int idx,elem;
unsigned int ind0,ind1,ind2,ind3,ind4,ind5;
switch(node) {
case 0:
{ind0=1; ind1=2; ind2=3; ind3=4; ind4=5; ind5=7;
break;}
case 1:
{ind0=2; ind1=3; ind2=0; ind3=5; ind4=6; ind5=4;
break;}
case 2:
{ind0=3; ind1=0; ind2=1; ind3=6; ind4=7; ind5=5;
break;}
case 3:
{ind0=0; ind1=1; ind2=2; ind3=7; ind4=4; ind5=6;
break;}
case 4:
{ind0=7; ind1=6; ind2=5; ind3=0; ind4=3; ind5=1;
break;}
case 5:
{ind0=4; ind1=7; ind2=6; ind3=1; ind4=0; ind5=2;
break;}
case 6:
{ind0=5; ind1=4; ind2=7; ind3=2; ind4=1; ind5=3;
break;}
case 7:
{ind0=6; ind1=5; ind2=4; ind3=3; ind4=2; ind5=0;
break;}
default:
{ind0=ind1=ind2=ind3=ind4=ind5=0xFFFFFFFF;
break;}
}
idx=threadIdx.x;
elem=idx /*& 0x1F*/ - node*32;
va1=&array1[0];
va2=&array2[0];
// load y and z
__syncthreads();
va1[idx]=y; va2[idx]=z;
__syncthreads();
VOLUDER(va1[ind0*32+elem],va1[ind1*32+elem],va1[ind2*32+elem],
va1[ind3*32+elem],va1[ind4*32+elem],va1[ind5*32+elem],
va2[ind0*32+elem],va2[ind1*32+elem],va2[ind2*32+elem],
va2[ind3*32+elem],va2[ind4*32+elem],va2[ind5*32+elem],
dvdx);
// load x
__syncthreads();
va1[idx]=x;
__syncthreads();
VOLUDER(va2[ind0*32+elem],va2[ind1*32+elem],va2[ind2*32+elem],
va2[ind3*32+elem],va2[ind4*32+elem],va2[ind5*32+elem],
va1[ind0*32+elem],va1[ind1*32+elem],va1[ind2*32+elem],
va1[ind3*32+elem],va1[ind4*32+elem],va1[ind5*32+elem],
dvdy);
__syncthreads();
// load y
__syncthreads();
va2[idx]=y;
__syncthreads();
VOLUDER(va1[ind0*32+elem],va1[ind1*32+elem],va1[ind2*32+elem],
va1[ind3*32+elem],va1[ind4*32+elem],va1[ind5*32+elem],
va2[ind0*32+elem],va2[ind1*32+elem],va2[ind2*32+elem],
va2[ind3*32+elem],va2[ind4*32+elem],va2[ind5*32+elem],
dvdz);
__syncthreads();
}
__host__
static inline
void CalcElemFBHourglassForce(Real_t *xd, Real_t *yd, Real_t *zd, Real_t *hourgam0,
Real_t *hourgam1, Real_t *hourgam2, Real_t *hourgam3,
Real_t *hourgam4, Real_t *hourgam5, Real_t *hourgam6,
Real_t *hourgam7, Real_t coefficient,
Real_t *hgfx, Real_t *hgfy, Real_t *hgfz )
{
Index_t i00=0;
Index_t i01=1;
Index_t i02=2;
Index_t i03=3;
Real_t h00 =
hourgam0[i00] * xd[0] + hourgam1[i00] * xd[1] +
hourgam2[i00] * xd[2] + hourgam3[i00] * xd[3] +
hourgam4[i00] * xd[4] + hourgam5[i00] * xd[5] +
hourgam6[i00] * xd[6] + hourgam7[i00] * xd[7];
Real_t h01 =
hourgam0[i01] * xd[0] + hourgam1[i01] * xd[1] +
hourgam2[i01] * xd[2] + hourgam3[i01] * xd[3] +
hourgam4[i01] * xd[4] + hourgam5[i01] * xd[5] +
hourgam6[i01] * xd[6] + hourgam7[i01] * xd[7];
Real_t h02 =
hourgam0[i02] * xd[0] + hourgam1[i02] * xd[1]+
hourgam2[i02] * xd[2] + hourgam3[i02] * xd[3]+
hourgam4[i02] * xd[4] + hourgam5[i02] * xd[5]+
hourgam6[i02] * xd[6] + hourgam7[i02] * xd[7];
Real_t h03 =
hourgam0[i03] * xd[0] + hourgam1[i03] * xd[1] +
hourgam2[i03] * xd[2] + hourgam3[i03] * xd[3] +
hourgam4[i03] * xd[4] + hourgam5[i03] * xd[5] +
hourgam6[i03] * xd[6] + hourgam7[i03] * xd[7];
hgfx[0] = coefficient *
(hourgam0[i00] * h00 + hourgam0[i01] * h01 +
hourgam0[i02] * h02 + hourgam0[i03] * h03);
hgfx[1] = coefficient *
(hourgam1[i00] * h00 + hourgam1[i01] * h01 +
hourgam1[i02] * h02 + hourgam1[i03] * h03);
hgfx[2] = coefficient *
(hourgam2[i00] * h00 + hourgam2[i01] * h01 +
hourgam2[i02] * h02 + hourgam2[i03] * h03);
hgfx[3] = coefficient *
(hourgam3[i00] * h00 + hourgam3[i01] * h01 +
hourgam3[i02] * h02 + hourgam3[i03] * h03);
hgfx[4] = coefficient *
(hourgam4[i00] * h00 + hourgam4[i01] * h01 +
hourgam4[i02] * h02 + hourgam4[i03] * h03);
hgfx[5] = coefficient *
(hourgam5[i00] * h00 + hourgam5[i01] * h01 +
hourgam5[i02] * h02 + hourgam5[i03] * h03);
hgfx[6] = coefficient *
(hourgam6[i00] * h00 + hourgam6[i01] * h01 +
hourgam6[i02] * h02 + hourgam6[i03] * h03);
hgfx[7] = coefficient *
(hourgam7[i00] * h00 + hourgam7[i01] * h01 +
hourgam7[i02] * h02 + hourgam7[i03] * h03);
h00 =
hourgam0[i00] * yd[0] + hourgam1[i00] * yd[1] +
hourgam2[i00] * yd[2] + hourgam3[i00] * yd[3] +
hourgam4[i00] * yd[4] + hourgam5[i00] * yd[5] +
hourgam6[i00] * yd[6] + hourgam7[i00] * yd[7];
h01 =
hourgam0[i01] * yd[0] + hourgam1[i01] * yd[1] +
hourgam2[i01] * yd[2] + hourgam3[i01] * yd[3] +
hourgam4[i01] * yd[4] + hourgam5[i01] * yd[5] +
hourgam6[i01] * yd[6] + hourgam7[i01] * yd[7];
h02 =
hourgam0[i02] * yd[0] + hourgam1[i02] * yd[1]+
hourgam2[i02] * yd[2] + hourgam3[i02] * yd[3]+
hourgam4[i02] * yd[4] + hourgam5[i02] * yd[5]+
hourgam6[i02] * yd[6] + hourgam7[i02] * yd[7];
h03 =
hourgam0[i03] * yd[0] + hourgam1[i03] * yd[1] +
hourgam2[i03] * yd[2] + hourgam3[i03] * yd[3] +
hourgam4[i03] * yd[4] + hourgam5[i03] * yd[5] +
hourgam6[i03] * yd[6] + hourgam7[i03] * yd[7];
hgfy[0] = coefficient *
(hourgam0[i00] * h00 + hourgam0[i01] * h01 +
hourgam0[i02] * h02 + hourgam0[i03] * h03);
hgfy[1] = coefficient *
(hourgam1[i00] * h00 + hourgam1[i01] * h01 +
hourgam1[i02] * h02 + hourgam1[i03] * h03);
hgfy[2] = coefficient *
(hourgam2[i00] * h00 + hourgam2[i01] * h01 +
hourgam2[i02] * h02 + hourgam2[i03] * h03);
hgfy[3] = coefficient *
(hourgam3[i00] * h00 + hourgam3[i01] * h01 +
hourgam3[i02] * h02 + hourgam3[i03] * h03);
hgfy[4] = coefficient *
(hourgam4[i00] * h00 + hourgam4[i01] * h01 +
hourgam4[i02] * h02 + hourgam4[i03] * h03);
hgfy[5] = coefficient *
(hourgam5[i00] * h00 + hourgam5[i01] * h01 +
hourgam5[i02] * h02 + hourgam5[i03] * h03);
hgfy[6] = coefficient *
(hourgam6[i00] * h00 + hourgam6[i01] * h01 +
hourgam6[i02] * h02 + hourgam6[i03] * h03);
hgfy[7] = coefficient *
(hourgam7[i00] * h00 + hourgam7[i01] * h01 +
hourgam7[i02] * h02 + hourgam7[i03] * h03);
h00 =
hourgam0[i00] * zd[0] + hourgam1[i00] * zd[1] +
hourgam2[i00] * zd[2] + hourgam3[i00] * zd[3] +
hourgam4[i00] * zd[4] + hourgam5[i00] * zd[5] +
hourgam6[i00] * zd[6] + hourgam7[i00] * zd[7];
h01 =
hourgam0[i01] * zd[0] + hourgam1[i01] * zd[1] +
hourgam2[i01] * zd[2] + hourgam3[i01] * zd[3] +
hourgam4[i01] * zd[4] + hourgam5[i01] * zd[5] +
hourgam6[i01] * zd[6] + hourgam7[i01] * zd[7];
h02 =
hourgam0[i02] * zd[0] + hourgam1[i02] * zd[1]+
hourgam2[i02] * zd[2] + hourgam3[i02] * zd[3]+
hourgam4[i02] * zd[4] + hourgam5[i02] * zd[5]+
hourgam6[i02] * zd[6] + hourgam7[i02] * zd[7];
h03 =
hourgam0[i03] * zd[0] + hourgam1[i03] * zd[1] +
hourgam2[i03] * zd[2] + hourgam3[i03] * zd[3] +
hourgam4[i03] * zd[4] + hourgam5[i03] * zd[5] +
hourgam6[i03] * zd[6] + hourgam7[i03] * zd[7];
hgfz[0] = coefficient *
(hourgam0[i00] * h00 + hourgam0[i01] * h01 +
hourgam0[i02] * h02 + hourgam0[i03] * h03);
hgfz[1] = coefficient *
(hourgam1[i00] * h00 + hourgam1[i01] * h01 +
hourgam1[i02] * h02 + hourgam1[i03] * h03);
hgfz[2] = coefficient *
(hourgam2[i00] * h00 + hourgam2[i01] * h01 +
hourgam2[i02] * h02 + hourgam2[i03] * h03);
hgfz[3] = coefficient *
(hourgam3[i00] * h00 + hourgam3[i01] * h01 +
hourgam3[i02] * h02 + hourgam3[i03] * h03);
hgfz[4] = coefficient *
(hourgam4[i00] * h00 + hourgam4[i01] * h01 +
hourgam4[i02] * h02 + hourgam4[i03] * h03);
hgfz[5] = coefficient *
(hourgam5[i00] * h00 + hourgam5[i01] * h01 +
hourgam5[i02] * h02 + hourgam5[i03] * h03);
hgfz[6] = coefficient *
(hourgam6[i00] * h00 + hourgam6[i01] * h01 +
hourgam6[i02] * h02 + hourgam6[i03] * h03);
hgfz[7] = coefficient *
(hourgam7[i00] * h00 + hourgam7[i01] * h01 +
hourgam7[i02] * h02 + hourgam7[i03] * h03);
}
__shared__ Real_t shm_array[32*8];
__device__
static inline
Real_t SumOverNodes(Real_t val) {
// Sum up 8 node values for each element
// Assumes 256 threads: 32 elements, 8 nodes per element.
// NOTE: we could probably avoid some of the __syncthreads() if we map 8 nodes
// of an element to the same warp.
unsigned int tid=threadIdx.x;
#if 1
#if 0
unsigned int node=tid>>5;
unsigned int elem=tid-(node<<5);
#elif 1
unsigned int node=tid/32;
unsigned int elem=tid-(node*32);
#else
unsigned int elem=tid & 0x1F;
#endif
__syncthreads();
shm_array[tid]=val;
__syncthreads();
if (tid<128) shm_array[tid]+=shm_array[tid+128];
__syncthreads();
if (tid<64) shm_array[tid]+=shm_array[tid+64];
__syncthreads();
if (tid<32) shm_array[tid]+=shm_array[tid+32];
__syncthreads();
Real_t ret=shm_array[elem];
__syncthreads();
return ret;
#else
#if 0
unsigned int node=tid>>5;
unsigned int elem=tid-(node<<5);
#else
unsigned int node=tid/32;
unsigned int elem=tid-(node*32);
#endif
unsigned int idx=elem*8+node;
__syncthreads();
shm_array[idx]=val;
__syncthreads();
if (node<4) shm_array[idx]+=shm_array[idx+4];
if (node<2) shm_array[idx]+=shm_array[idx+2];
if (node<1) shm_array[idx]+=shm_array[idx+1];
__syncthreads();
return shm_array[elem*8];
#endif
}
__device__
static inline
void CalcElemFBHourglassForce(Real_t xd,Real_t yd,Real_t zd,
Real_t *hourgam,Real_t coefficient,
Real_t &hgfx, Real_t &hgfy, Real_t &hgfz)
{
hgfx=0;
for (int i=0;i<4;i++) {
Real_t h;
h=hourgam[i]*xd;
h=SumOverNodes(h);
hgfx+=hourgam[i]*h;
}
hgfx *= coefficient;
hgfy=0;
for (int i=0;i<4;i++) {
Real_t h;
h=hourgam[i]*yd;
h=SumOverNodes(h);
hgfy+=hourgam[i]*h;
}
hgfy *= coefficient;
hgfz=0;
for (int i=0;i<4;i++) {
Real_t h;
h=hourgam[i]*zd;
h=SumOverNodes(h);
hgfz+=hourgam[i]*h;
}
hgfz *= coefficient;
}
#ifdef DP_PROFILING_KERNEL3
__global__
void CalcFBHourglassForceForElems_kernel(
Real_t *determ,
Real_t *x8n, Real_t *y8n, Real_t *z8n,
Real_t *dvdx, Real_t *dvdy, Real_t *dvdz,
Real_t hourg,
Index_t numElem, Index_t *nodelist,
Real_t *ss, Real_t *elemMass,
Real_t *xd, Real_t *yd, Real_t *zd,
Real_t *fx_elem, Real_t *fy_elem, Real_t *fz_elem, int its)
#else
__global__
void CalcFBHourglassForceForElems_kernel(
Real_t *determ,
Real_t *x8n, Real_t *y8n, Real_t *z8n,
Real_t *dvdx, Real_t *dvdy, Real_t *dvdz,
Real_t hourg,
Index_t numElem, Index_t *nodelist,
Real_t *ss, Real_t *elemMass,
Real_t *xd, Real_t *yd, Real_t *zd,
Real_t *fx_elem, Real_t *fy_elem, Real_t *fz_elem)
#endif
{
/*************************************************
*
* FUNCTION: Calculates the Flanagan-Belytschko anti-hourglass
* force.
*
*************************************************/
Real_t hgfx, hgfy, hgfz;
Real_t coefficient;
Real_t hourgam[4];
Real_t xd1, yd1, zd1;
/*************************************************/
/* compute the hourglass modes */
const Real_t posf = Real_t( 1.);
const Real_t negf = Real_t(-1.);
// Assume we will launch 256 threads, which we map to 32 elements, each
// with 8 per-node threads. Organize so each warp of 32 consecutive
// threads operates on the same node of different elements.
// THESE ARE ALL GIVING ME DIFFERENT ANSWERS IN CUDA 4.0 !!?!!?!!
unsigned int tid=threadIdx.x;
unsigned int bid=blockIdx.x;
#if 0
unsigned int node=tid>>5;
unsigned int elem=bid<<5 + (tid - (node<<5));
#elif 1
unsigned int node=tid/32;
unsigned int elem=bid*32 + (tid-node*32);
#elif 0
unsigned int node=tid/32;;
unsigned int elem=bid*32 + (tid & 0x1F);
#elif 0
unsigned int node=tid/32;
unsigned int elem=bid<<5 + (tid & 0x1F);
#elif 0
unsigned int node=tid>>5;
unsigned int elem=bid*32 + (tid & 0x1F);
#else
unsigned int node=tid>>5;
unsigned int elem=bid<<5 + (tid & 0x1F);
#endif
if (elem>=numElem) elem=numElem-1; // don't return -- need thread to participate in sync operations
//if (elem<0) elem=0; // debugging test
#ifdef AFTER_KERNEL3
Real_t volinv=Real_t(1.0)/tex1Dfetch(tex_determ,elem);
#else
Real_t volinv=Real_t(1.0)/determ[elem];
#endif
#ifdef DP_PROFILING_KERNEL3
if(blockIdx.x==0 && blockIdx.y==0)
{
//determ
//number of array ele is numElem=edgeElems^3=45^3;
//The data type is real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("0 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, elem);
}
#endif
Real_t ss1, mass1, volume13 ;
Real_t xn,yn,zn,dvdxn,dvdyn,dvdzn;
Real_t hourmodx, hourmody, hourmodz;
#if 1
//#ifdef AFTER_KERNEL3
//xn=tex1Dfetch(tex_x8n,elem+numElem*node); yn=tex1Dfetch(tex_y8n,elem+numElem*node); zn=tex1Dfetch(tex_z8n,elem+numElem*node);
// dvdxn=tex1Dfetch(tex_dvdx,elem+numElem*node); dvdyn=tex1Dfetch(tex_dvdy,elem+numElem*node); dvdzn=tex1Dfetch(tex_dvdz,elem+numElem*node);
xn=x8n[elem+numElem*node]; yn=y8n[elem+numElem*node]; zn=z8n[elem+numElem*node];
dvdxn=dvdx[elem+numElem*node]; dvdyn=dvdy[elem+numElem*node]; dvdzn=dvdz[elem+numElem*node];
//#else
// xn=x8n[elem+numElem*node]; yn=y8n[elem+numElem*node]; zn=z8n[elem+numElem*node];
// dvdxn=dvdx[elem+numElem*node]; dvdyn=dvdy[elem+numElem*node]; dvdzn=dvdz[elem+numElem*node];
//#endif
#ifdef DP_PROFILING_KERNEL3
if(blockIdx.x==0 && blockIdx.y==0)
{
//x8n
//number of array ele is numElem8=numElem*8=edgeElems^3*8=45^3*8;
//The data type is Real_t;
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("1 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, elem+numElem*node);
//y8n
//number of array ele is numElem8=numElem*8=edgeElems^3*8=45^3*8;
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("2 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, elem+numElem*node);
//z8n
//number of array ele is numElem8=numElem*8=edgeElems^3*8=45^3*8;
//The data type is Real_t;
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("3 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, elem+numElem*node);
//dvdx
//number of array ele is numElem8=numElem*8=edgeElems^3*8=45^3*8;
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("4 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, elem+numElem*node);
//dvdy
//number of array ele is numElem8=numElem*8=edgeElems^3*8=45^3*8;
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("5 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, elem+numElem*node);
//dvdz
//number of array ele is numElem8=numElem*8=edgeElems^3*8=45^3*8;
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("6 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, elem+numElem*node);
}
#endif //if KERNEL3
#else
xn=yn=zn=posf; dvdxn=dvdyn=dvdzn=negf;
#endif
#if 1
#ifdef DP_PROFILING_KERNEL3
//Dong: don't work on SumOverNodes, because the data is in the share memory which seems to be
// optimal.
#endif
hourmodx=xn; hourmody=yn; hourmodz=zn;
if (node==2 || node==3 || node==4 || node==5) {
hourmodx *= negf; hourmody *= negf; hourmodz *= negf;
hourgam[0] = negf;
}
else hourgam[0] = posf;
hourmodx = SumOverNodes(hourmodx);
hourmody = SumOverNodes(hourmody);
hourmodz = SumOverNodes(hourmodz);
hourgam[0] -= volinv*(dvdxn*hourmodx + dvdyn*hourmody + dvdzn*hourmodz);
hourmodx=xn; hourmody=yn; hourmodz=zn;
if (node==1 || node==2 || node==4 || node==7) {
hourmodx *= negf; hourmody *= negf; hourmodz *= negf;
hourgam[1] = negf;
}
else hourgam[1] = posf;
hourmodx = SumOverNodes(hourmodx);
hourmody = SumOverNodes(hourmody);
hourmodz = SumOverNodes(hourmodz);
hourgam[1] -= volinv*(dvdxn*hourmodx + dvdyn*hourmody + dvdzn*hourmodz);
hourmodx=xn; hourmody=yn; hourmodz=zn;
if (node==1 || node==3 || node==5 || node==7) {
hourmodx *= negf; hourmody *= negf; hourmodz *= negf;
hourgam[2] = negf;
}
else hourgam[2] = posf;
hourmodx = SumOverNodes(hourmodx);
hourmody = SumOverNodes(hourmody);
hourmodz = SumOverNodes(hourmodz);
hourgam[2] -= volinv*(dvdxn*hourmodx + dvdyn*hourmody + dvdzn*hourmodz);
hourmodx=xn; hourmody=yn; hourmodz=zn;
if (node==0 || node==2 || node==5 || node==7) {
hourmodx *= negf; hourmody *= negf; hourmodz *= negf;
hourgam[3] = negf;
}
else hourgam[3] = posf;
hourmodx = SumOverNodes(hourmodx);
hourmody = SumOverNodes(hourmody);
hourmodz = SumOverNodes(hourmodz);
hourgam[3] -= volinv*(dvdxn*hourmodx + dvdyn*hourmody + dvdzn*hourmodz);
/* compute forces */
/* store forces into h arrays (force arrays) */
#ifdef AFTER_KERNEL3
ss1=tex1Dfetch(tex_ss,elem);
#else
ss1=ss[elem];
#endif
#ifdef DP_PROFILING_KERNEL3
if(blockIdx.x==0 && blockIdx.y==0)
{
//ss
//number of array ele is numElem=edgeElems^3=45^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("7 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, elem);
}
#endif
#ifdef AFTER_KERNEL3
mass1=tex1Dfetch(tex_elemMass,elem);
// mass1 = elemMass[elem];
#else
mass1=elemMass[elem];
#endif
#ifdef DP_PROFILING_KERNEL3
if(blockIdx.x==0 && blockIdx.y==0)
{
//elemMass
//number of array ele is numElem=edgeElems^3=45^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("8 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, elem);
}
#endif
#ifdef AFTER_KERNEL3
volume13=CBRT(tex1Dfetch(tex_determ,elem));
#else
volume13=CBRT(determ[elem]);
#endif
#ifdef DP_PROFILING_KERNEL3
if(blockIdx.x==0 && blockIdx.y==0)
{
//determ
//number of array ele is numElem=edgeElems^3=45^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("0 0 1 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, elem);
}
#endif
//#ifdef AFTER_KERNEL3
// Index_t ni = tex1Dfetch(tex3_nodelist,elem+numElem*node);
//#else
Index_t ni = nodelist[elem+numElem*node];
//#endif
#ifdef DP_PROFILING_KERNEL3
if(blockIdx.x==0 && blockIdx.y==0)
{
//nodelist
//The number of nodelist ele is 8*mesh.numElem=8*(numElem^3)=8*45^3 (note: 8 is not data type)
//The data type is Index_t (i.e., int)
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("9 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, elem+numElem*node);
}
#endif
#ifdef AFTER_KERNEL3
xd1=tex1Dfetch(tex_xd,ni); yd1=tex1Dfetch(tex_yd,ni); zd1=tex1Dfetch(tex_zd,ni);
//xd1=xd[ni]; yd1=yd[ni]; zd1=zd[ni];
#else
xd1=xd[ni]; yd1=yd[ni]; zd1=zd[ni];
#endif
#ifdef DP_PROFILING_KERNEL3
if(blockIdx.x==0 && blockIdx.y==0)
{
//xd
//number of xd ele is mesh.numNode=edgeNodes*edgeNodes*edgeNodes=46^3;
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("10 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, ni);
//yd
//number of yd ele is mesh.numNode=edgeNodes*edgeNodes*edgeNodes=46^3;
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("11 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, ni);
//zd
//number of zd ele is mesh.numNode=edgeNodes*edgeNodes*edgeNodes=46^3;
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("12 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, ni);
}
#endif
coefficient = - hourg * Real_t(0.01) * ss1 * mass1 / volume13;
CalcElemFBHourglassForce(xd1,yd1,zd1,hourgam,coefficient,hgfx,hgfy,hgfz);
#ifdef DP_PROFILING_KERNEL3
//Dong: don't work on the above call, because the data is in the share memory which seems to be
// optimal.
#endif
#else
hgfx=xn+dvdxn; hgfy=yn+dvdyn; hgfz=zn+dvdzn;
#endif
#if 1
fx_elem[elem+numElem*node]=hgfx; fy_elem[elem+numElem*node]=hgfy; fz_elem[elem+numElem*node]=hgfz;
#ifdef DP_PROFILING_KERNEL3
if(blockIdx.x==0 && blockIdx.y==0)
{
//fx_elem
//number of fx_elem ele is numElem*8=edgeElems^3*8=45^3*8
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("13 1 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, elem+numElem*node);
//fy_elem
//number of fy_elem ele is numElem*8=edgeElems^3*8=45^3*8
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("14 1 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, elem+numElem*node);
//fz_elem
//number of fz_elem ele is numElem*8=edgeElems^3*8=45^3*8
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("15 1 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, elem+numElem*node);
}
#endif
#else
fx_elem[0]=hgfx; fy_elem[0]=hgfy; fz_elem[0]=hgfz;
#endif
}
static inline
void CalcFBHourglassForceForElems_cpu(Real_t *determ,
Real_t *x8n, Real_t *y8n, Real_t *z8n,
Real_t *dvdx, Real_t *dvdy, Real_t *dvdz,
Real_t hourg)
{
/*************************************************
*
* FUNCTION: Calculates the Flanagan-Belytschko anti-hourglass
* force.
*
*************************************************/
Index_t numElem = mesh.numElem() ;
Real_t hgfx[8], hgfy[8], hgfz[8] ;
Real_t coefficient;
Real_t gamma[4][8];
Real_t hourgam0[4], hourgam1[4], hourgam2[4], hourgam3[4] ;
Real_t hourgam4[4], hourgam5[4], hourgam6[4], hourgam7[4];
Real_t xd1[8], yd1[8], zd1[8] ;
gamma[0][0] = Real_t( 1.);
gamma[0][1] = Real_t( 1.);
gamma[0][2] = Real_t(-1.);
gamma[0][3] = Real_t(-1.);
gamma[0][4] = Real_t(-1.);
gamma[0][5] = Real_t(-1.);
gamma[0][6] = Real_t( 1.);
gamma[0][7] = Real_t( 1.);
gamma[1][0] = Real_t( 1.);
gamma[1][1] = Real_t(-1.);
gamma[1][2] = Real_t(-1.);
gamma[1][3] = Real_t( 1.);
gamma[1][4] = Real_t(-1.);
gamma[1][5] = Real_t( 1.);
gamma[1][6] = Real_t( 1.);
gamma[1][7] = Real_t(-1.);
gamma[2][0] = Real_t( 1.);
gamma[2][1] = Real_t(-1.);
gamma[2][2] = Real_t( 1.);
gamma[2][3] = Real_t(-1.);
gamma[2][4] = Real_t( 1.);
gamma[2][5] = Real_t(-1.);
gamma[2][6] = Real_t( 1.);
gamma[2][7] = Real_t(-1.);
gamma[3][0] = Real_t(-1.);
gamma[3][1] = Real_t( 1.);
gamma[3][2] = Real_t(-1.);
gamma[3][3] = Real_t( 1.);
gamma[3][4] = Real_t( 1.);
gamma[3][5] = Real_t(-1.);
gamma[3][6] = Real_t( 1.);
gamma[3][7] = Real_t(-1.);
/*************************************************/
/* compute the hourglass modes */
for(Index_t i2=0;i2<numElem;++i2){
Index_t i3=8*i2;
Real_t volinv=Real_t(1.0)/determ[i2];
Real_t ss1, mass1, volume13 ;
for(Index_t i1=0;i1<4;++i1){
Real_t hourmodx =
x8n[i3] * gamma[i1][0] + x8n[i3+1] * gamma[i1][1] +
x8n[i3+2] * gamma[i1][2] + x8n[i3+3] * gamma[i1][3] +
x8n[i3+4] * gamma[i1][4] + x8n[i3+5] * gamma[i1][5] +
x8n[i3+6] * gamma[i1][6] + x8n[i3+7] * gamma[i1][7];
Real_t hourmody =
y8n[i3] * gamma[i1][0] + y8n[i3+1] * gamma[i1][1] +
y8n[i3+2] * gamma[i1][2] + y8n[i3+3] * gamma[i1][3] +
y8n[i3+4] * gamma[i1][4] + y8n[i3+5] * gamma[i1][5] +
y8n[i3+6] * gamma[i1][6] + y8n[i3+7] * gamma[i1][7];
Real_t hourmodz =
z8n[i3] * gamma[i1][0] + z8n[i3+1] * gamma[i1][1] +
z8n[i3+2] * gamma[i1][2] + z8n[i3+3] * gamma[i1][3] +
z8n[i3+4] * gamma[i1][4] + z8n[i3+5] * gamma[i1][5] +
z8n[i3+6] * gamma[i1][6] + z8n[i3+7] * gamma[i1][7];
hourgam0[i1] = gamma[i1][0] - volinv*(dvdx[i3 ] * hourmodx +
dvdy[i3 ] * hourmody +
dvdz[i3 ] * hourmodz );
hourgam1[i1] = gamma[i1][1] - volinv*(dvdx[i3+1] * hourmodx +
dvdy[i3+1] * hourmody +
dvdz[i3+1] * hourmodz );
hourgam2[i1] = gamma[i1][2] - volinv*(dvdx[i3+2] * hourmodx +
dvdy[i3+2] * hourmody +
dvdz[i3+2] * hourmodz );
hourgam3[i1] = gamma[i1][3] - volinv*(dvdx[i3+3] * hourmodx +
dvdy[i3+3] * hourmody +
dvdz[i3+3] * hourmodz );
hourgam4[i1] = gamma[i1][4] - volinv*(dvdx[i3+4] * hourmodx +
dvdy[i3+4] * hourmody +
dvdz[i3+4] * hourmodz );
hourgam5[i1] = gamma[i1][5] - volinv*(dvdx[i3+5] * hourmodx +
dvdy[i3+5] * hourmody +
dvdz[i3+5] * hourmodz );
hourgam6[i1] = gamma[i1][6] - volinv*(dvdx[i3+6] * hourmodx +
dvdy[i3+6] * hourmody +
dvdz[i3+6] * hourmodz );
hourgam7[i1] = gamma[i1][7] - volinv*(dvdx[i3+7] * hourmodx +
dvdy[i3+7] * hourmody +
dvdz[i3+7] * hourmodz );
}
/* compute forces */
/* store forces into h arrays (force arrays) */
ss1=mesh.ss(i2);
mass1=mesh.elemMass(i2);
volume13=CBRT(determ[i2]);
Index_t n0si2 = mesh.nodelist(i2,0);
Index_t n1si2 = mesh.nodelist(i2,1);
Index_t n2si2 = mesh.nodelist(i2,2);
Index_t n3si2 = mesh.nodelist(i2,3);
Index_t n4si2 = mesh.nodelist(i2,4);
Index_t n5si2 = mesh.nodelist(i2,5);
Index_t n6si2 = mesh.nodelist(i2,6);
Index_t n7si2 = mesh.nodelist(i2,7);
xd1[0] = mesh.xd(n0si2);
xd1[1] = mesh.xd(n1si2);
xd1[2] = mesh.xd(n2si2);
xd1[3] = mesh.xd(n3si2);
xd1[4] = mesh.xd(n4si2);
xd1[5] = mesh.xd(n5si2);
xd1[6] = mesh.xd(n6si2);
xd1[7] = mesh.xd(n7si2);
yd1[0] = mesh.yd(n0si2);
yd1[1] = mesh.yd(n1si2);
yd1[2] = mesh.yd(n2si2);
yd1[3] = mesh.yd(n3si2);
yd1[4] = mesh.yd(n4si2);
yd1[5] = mesh.yd(n5si2);
yd1[6] = mesh.yd(n6si2);
yd1[7] = mesh.yd(n7si2);
zd1[0] = mesh.zd(n0si2);
zd1[1] = mesh.zd(n1si2);
zd1[2] = mesh.zd(n2si2);
zd1[3] = mesh.zd(n3si2);
zd1[4] = mesh.zd(n4si2);
zd1[5] = mesh.zd(n5si2);
zd1[6] = mesh.zd(n6si2);
zd1[7] = mesh.zd(n7si2);
coefficient = - hourg * Real_t(0.01) * ss1 * mass1 / volume13;
CalcElemFBHourglassForce(xd1,yd1,zd1,
hourgam0,hourgam1,hourgam2,hourgam3,
hourgam4,hourgam5,hourgam6,hourgam7,
coefficient, hgfx, hgfy, hgfz);
mesh.fx(n0si2) += hgfx[0];
mesh.fy(n0si2) += hgfy[0];
mesh.fz(n0si2) += hgfz[0];
mesh.fx(n1si2) += hgfx[1];
mesh.fy(n1si2) += hgfy[1];
mesh.fz(n1si2) += hgfz[1];
mesh.fx(n2si2) += hgfx[2];
mesh.fy(n2si2) += hgfy[2];
mesh.fz(n2si2) += hgfz[2];
mesh.fx(n3si2) += hgfx[3];
mesh.fy(n3si2) += hgfy[3];
mesh.fz(n3si2) += hgfz[3];
mesh.fx(n4si2) += hgfx[4];
mesh.fy(n4si2) += hgfy[4];
mesh.fz(n4si2) += hgfz[4];
mesh.fx(n5si2) += hgfx[5];
mesh.fy(n5si2) += hgfy[5];
mesh.fz(n5si2) += hgfz[5];
mesh.fx(n6si2) += hgfx[6];
mesh.fy(n6si2) += hgfy[6];
mesh.fz(n6si2) += hgfz[6];
mesh.fx(n7si2) += hgfx[7];
mesh.fy(n7si2) += hgfy[7];
mesh.fz(n7si2) += hgfz[7];
}
}
static inline
void CalcFBHourglassForceForElems_gpu(Real_t *determ,
Real_t *x8n, Real_t *y8n, Real_t *z8n,
Real_t *dvdx, Real_t *dvdy, Real_t *dvdz,
Real_t hourg)
{
Index_t numElem = mesh.numElem();
Real_t *fx_elem,*fy_elem,*fz_elem;
CUDA( hipMalloc(&fx_elem,numElem*8*sizeof(Real_t)) );
CUDA( hipMalloc(&fy_elem,numElem*8*sizeof(Real_t)) );
CUDA( hipMalloc(&fz_elem,numElem*8*sizeof(Real_t)) );
dim3 dimBlock=dim3(256,1,1);
dim3 dimGrid=dim3(PAD_DIV(numElem*8,dimBlock.x),1,1);
#ifdef AFTER_KERNEL3
hipBindTexture(0,tex_determ,determ,numElem*sizeof(Real_t));
hipBindTexture(0,tex_x8n,x8n,8*numElem*sizeof(Real_t));
hipBindTexture(0,tex_y8n,y8n,8*numElem*sizeof(Real_t));
hipBindTexture(0,tex_z8n,z8n,8*numElem*sizeof(Real_t));
hipBindTexture(0,tex_dvdx,dvdx,8*numElem*sizeof(Real_t));
hipBindTexture(0,tex_dvdy,dvdy,8*numElem*sizeof(Real_t));
hipBindTexture(0,tex_dvdz,dvdz,8*numElem*sizeof(Real_t));
hipBindTexture(0,tex3_nodelist,meshGPU.m_nodelist,8*mesh.numNode()*sizeof(int));
hipBindTexture(0,tex_ss,meshGPU.m_ss,mesh.numElem()*sizeof(Real_t));
hipBindTexture(0,tex_elemMass,meshGPU.m_elemMass,mesh.numElem()*sizeof(Real_t));
hipBindTexture(0,tex_xd,meshGPU.m_xd,mesh.numNode()*sizeof(Real_t));
hipBindTexture(0,tex_yd,meshGPU.m_yd,mesh.numNode()*sizeof(Real_t));
hipBindTexture(0,tex_zd,meshGPU.m_zd,mesh.numNode()*sizeof(Real_t));
#endif
#ifdef DP_PROFILING_KERNEL3
printf("Kernel3: dimGrid: %d %d %d. dimBlock: %d %d %d\n",
dimGrid.x, dimGrid.y, dimGrid.z, dimBlock.x, dimBlock.y, dimBlock.z);
printf("Kernel3: numNode=%d, numElem=%d\n", mesh.numNode(), numElem);
hipLaunchKernelGGL(( CalcFBHourglassForceForElems_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0,
determ,x8n,y8n,z8n,dvdx,dvdy,dvdz,hourg,
numElem,meshGPU.m_nodelist,
meshGPU.m_ss,meshGPU.m_elemMass,
meshGPU.m_xd,meshGPU.m_yd,meshGPU.m_zd,
fx_elem,fy_elem,fz_elem, its);
cudaPrintfDisplay(stdout, false);
#else
hipEvent_t k3_start, k3_stop;
hipEventCreate(&k3_start);
hipEventCreate(&k3_stop);
hipEventRecord(k3_start,0);
hipLaunchKernelGGL(( CalcFBHourglassForceForElems_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0,
determ,x8n,y8n,z8n,dvdx,dvdy,dvdz,hourg,
numElem,meshGPU.m_nodelist,
meshGPU.m_ss,meshGPU.m_elemMass,
meshGPU.m_xd,meshGPU.m_yd,meshGPU.m_zd,
fx_elem,fy_elem,fz_elem);
hipDeviceSynchronize();
hipEventRecord(k3_stop, 0);
hipEventSynchronize(k3_stop);
float k3_time=0.0;
hipEventElapsedTime(&k3_time, k3_start, k3_stop);
printf("Kernel 3 time = %f\n",k3_time);
k3+=k3_time;
#endif
CUDA_DEBUGSYNC;
dimGrid=dim3(PAD_DIV(mesh.numNode(),dimBlock.x),1,1);
#ifdef AFTER_KERNEL2
hipBindTexture(0,tex_nodeElemCount,meshGPU.m_nodeElemCount,mesh.numNode()*sizeof(int));
hipBindTexture(0,tex_nodeElemCornerList,meshGPU.m_nodeElemCornerList,8*mesh.numNode()*sizeof(int));
hipBindTexture(0,tex_fx_elem,fx_elem,8*mesh.numElem()*sizeof(float));
hipBindTexture(0,tex_fy_elem,fy_elem,8*mesh.numElem()*sizeof(float));
hipBindTexture(0,tex_fz_elem,fz_elem,8*mesh.numElem()*sizeof(float));
#endif
#ifdef DP_PROFILING_KERNEL2
printf("Kernel2: dimGrid: %d %d %d. dimBlock: %d %d %d\n",
dimGrid.x, dimGrid.y, dimGrid.z, dimBlock.x, dimBlock.y, dimBlock.z);
printf("Kernel2: numNode=%d, numElem=%d\n", mesh.numNode(), numElem);
hipLaunchKernelGGL(( AddNodeForcesFromElems2_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0,
mesh.numNode(),meshGPU.m_nodeElemCount,meshGPU.m_nodeElemCornerList,
fx_elem,fy_elem,fz_elem,meshGPU.m_fx,meshGPU.m_fy,meshGPU.m_fz, its);
cudaPrintfDisplay(stdout, false);
#else
hipEvent_t k2_start, k2_stop;
hipEventCreate(&k2_start);
hipEventCreate(&k2_stop);
hipEventRecord(k2_start,0);
hipLaunchKernelGGL(( AddNodeForcesFromElems2_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0,
mesh.numNode(),meshGPU.m_nodeElemCount,meshGPU.m_nodeElemCornerList,
fx_elem,fy_elem,fz_elem,meshGPU.m_fx,meshGPU.m_fy,meshGPU.m_fz);
hipDeviceSynchronize();
hipEventRecord(k2_stop, 0);
hipEventSynchronize(k2_stop);
float k2_time=0.0;
hipEventElapsedTime(&k2_time, k2_start, k2_stop);
printf("Kernel 2 time = %f\n",k2_time);
k2+=k2_time;
#endif
CUDA_DEBUGSYNC;
CUDA( hipFree(fx_elem) );
CUDA( hipFree(fy_elem) );
CUDA( hipFree(fz_elem) );
}
#ifdef DP_PROFILING_KERNEL4
__global__
void CalcHourglassControlForElems_kernel(Int_t numElem,Index_t *nodelist,
Real_t *x,Real_t *y,Real_t *z,
Real_t *determ,Real_t *volo,Real_t *v,
Real_t *dvdx,Real_t *dvdy,Real_t *dvdz,
Real_t *x8n,Real_t *y8n,Real_t *z8n, int its)
#else
__global__
void CalcHourglassControlForElems_kernel(Int_t numElem,Index_t *nodelist,
Real_t *x,Real_t *y,Real_t *z,
Real_t *determ,Real_t *volo,Real_t *v,
Real_t *dvdx,Real_t *dvdy,Real_t *dvdz,
Real_t *x8n,Real_t *y8n,Real_t *z8n)
#endif
{
Real_t x1,y1,z1;
Real_t pfx,pfy,pfz;
// THESE ARE ALL GIVING ME DIFFERENT ANSWERS IN CUDA 4.0 !!?!!?!!
unsigned int tid=threadIdx.x;
unsigned int bid=blockIdx.x;
#if 0
unsigned int node=tid>>5;
unsigned int elem=bid<<5 + (tid - (node<<5));
#elif 1
unsigned int node=tid/32;
unsigned int elem=bid*32 + (tid-node*32);
#elif 0
unsigned int node=tid/32;;
unsigned int elem=bid*32 + (tid & 0x1F);
#elif 0
unsigned int node=tid/32;
unsigned int elem=bid<<5 + (tid & 0x1F);
#elif 0
unsigned int node=tid>>5;
unsigned int elem=bid*32 + (tid & 0x1F);
#else
unsigned int node=tid>>5;
unsigned int elem=bid<<5 + (tid & 0x1F);
#endif
if (elem>=numElem) elem=numElem-1; // don't return -- need thread to participate in sync operations
Index_t idx=elem+numElem*node;
#ifdef AFTER_KERNEL4
Index_t ni = tex1Dfetch(tex_nodelist,idx);
#else
Index_t ni = nodelist[idx];
#endif
#ifdef DP_PROFILING_KERNEL4
if(blockIdx.x==0 && blockIdx.y==0)
{
//nodelist
//number of nodelist ele is size*8=mesh.numElem=(numElem*numElem*numElem)*8=45^3*8;
//Th data type is Index_t (i.e., int)
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("0 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, idx);
}
#endif
#ifdef AFTER_KERNEL4
x1=tex1Dfetch(tex_x,ni); y1=tex1Dfetch(tex_y,ni); z1=tex1Dfetch(tex_z,ni);
#else
x1=x[ni]; y1=y[ni]; z1=z[ni];
#endif
#ifdef DP_PROFILING_KERNEL4
if(blockIdx.x==0 && blockIdx.y==0)
{
//x
//number of nodelist ele is edgeNodes^3=46^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("1 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, ni);
//y
//number of nodelist ele is edgeNodes^3=46^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("2 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, ni);
//z
//number of nodelist ele is edgeNodes^3=46^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("3 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, ni);
}
#endif
CalcElemVolumeDerivative(pfx, pfy, pfz, x1, y1, z1, node);
#ifdef DP_PROFILING_KERNEL4
//Dong: don't optimize the above; the major data is already in shared mem
#endif
/* load into temporary storage for FB Hour Glass control */
dvdx[idx] = pfx;
dvdy[idx] = pfy;
dvdz[idx] = pfz;
x8n[idx] = x1;
y8n[idx] = y1;
z8n[idx] = z1;
#ifdef DP_PROFILING_KERNEL4
if(blockIdx.x==0 && blockIdx.y==0)
{
//dvdx
//number of dvdx ele is numElem8=numElem*8=edgeElems^3*8=45^3*8
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("4 1 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, idx);
//dvdy
//number of dvdy ele is numElem8=numElem*8=edgeElems^3*8=45^3*8
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("5 1 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, idx);
//dvdz
//number of dvdz ele is numElem8=numElem*8=edgeElems^3*8=45^3*8
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("6 1 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, idx);
//x8n
//number of array ele is numElem8=numElem*8=edgeElems^3*8=45^3*8;
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("7 1 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, idx);
//y8n
//number of array ele is numElem8=numElem*8=edgeElems^3*8=45^3*8;
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("8 1 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, idx);
//z8n
//number of array ele is numElem8=numElem*8=edgeElems^3*8=45^3*8;
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("9 1 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, idx);
}
#endif
#ifdef AFTER_KERNEL4
determ[elem] = tex1Dfetch(tex_volo,elem) * tex1Dfetch(tex_v,elem);
#else
//if (node==0)
determ[elem] = volo[elem] * v[elem];
#endif
#ifdef DP_PROFILING_KERNEL4
if(blockIdx.x==0 && blockIdx.y==0)
{
//determ
//number of array ele is numElem=edgeElems^3=45^3;
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("10 1 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, elem);
//volo
//number of array ele is numElem=edgeElems^3=45^3;
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("11 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, elem);
//v
//number of array ele is numElem=edgeElems^3=45^3
//The data ype is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("12 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, elem);
}
#endif
#if 0 // JDC
/* Do a check for negative volumes */
if ( mesh.v(i) <= Real_t(0.0) ) {
exit(VolumeError) ;
}
#endif
}
static inline
void CalcHourglassControlForElems_gpu(Real_t determ[], Real_t hgcoef)
{
Index_t numElem = mesh.numElem() ;
Index_t numElem8 = numElem * 8 ;
Real_t *dvdx,*dvdy,*dvdz;
Real_t *x8n,*y8n,*z8n;
CUDA( hipMalloc(&dvdx,sizeof(Real_t)*numElem8) );
CUDA( hipMalloc(&dvdy,sizeof(Real_t)*numElem8) );
CUDA( hipMalloc(&dvdz,sizeof(Real_t)*numElem8) );
CUDA( hipMalloc(&x8n,sizeof(Real_t)*numElem8) );
CUDA( hipMalloc(&y8n,sizeof(Real_t)*numElem8) );
CUDA( hipMalloc(&z8n,sizeof(Real_t)*numElem8) );
dim3 dimBlock=dim3(256,1,1);
dim3 dimGrid=dim3(PAD_DIV(numElem*8,dimBlock.x),1,1);
#ifdef AFTER_KERNEL4
hipBindTexture(0,tex_nodelist,meshGPU.m_nodelist,8*mesh.numNode()*sizeof(int));
hipBindTexture(0,tex_x,meshGPU.m_x,mesh.numNode()*sizeof(float));
hipBindTexture(0,tex_y,meshGPU.m_y,mesh.numNode()*sizeof(float));
hipBindTexture(0,tex_z,meshGPU.m_z,mesh.numNode()*sizeof(float));
hipBindTexture(0,tex_volo,meshGPU.m_volo,mesh.numElem()*sizeof(float));
hipBindTexture(0,tex_v,meshGPU.m_v,mesh.numElem()*sizeof(float));
#endif
#ifdef DP_PROFILING_KERNEL4
printf("Kernel4: dimGrid: %d %d %d. dimBlock: %d %d %d\n",
dimGrid.x, dimGrid.y, dimGrid.z, dimBlock.x, dimBlock.y, dimBlock.z);
printf("Kernel4: numNode=%d, numElem=%d\n", mesh.numNode(), numElem);
hipLaunchKernelGGL(( CalcHourglassControlForElems_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0,
numElem, meshGPU.m_nodelist,
meshGPU.m_x,meshGPU.m_y,meshGPU.m_z,
determ,meshGPU.m_volo,meshGPU.m_v,
dvdx,dvdy,dvdz,x8n,y8n,z8n, its);
cudaPrintfDisplay(stdout, false);
#else
hipEvent_t k4_start, k4_stop;
hipEventCreate(&k4_start);
hipEventCreate(&k4_stop);
hipEventRecord(k4_start,0);
hipLaunchKernelGGL(( CalcHourglassControlForElems_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0,
numElem, meshGPU.m_nodelist,
meshGPU.m_x,meshGPU.m_y,meshGPU.m_z,
determ,meshGPU.m_volo,meshGPU.m_v,
dvdx,dvdy,dvdz,x8n,y8n,z8n);
hipDeviceSynchronize();
hipEventRecord(k4_stop, 0);
hipEventSynchronize(k4_stop);
float k4_time=0.0;
hipEventElapsedTime(&k4_time, k4_start, k4_stop);
printf("Kernel 4 time = %f\n",k4_time);
k4+=k4_time;
#endif
CUDA_DEBUGSYNC;
// JDC -- need a reduction to check for negative volumes
if ( hgcoef > Real_t(0.) ) {
CalcFBHourglassForceForElems_gpu(determ,x8n,y8n,z8n,dvdx,dvdy,dvdz,hgcoef) ;
}
CUDA( hipFree(dvdx) );
CUDA( hipFree(dvdy) );
CUDA( hipFree(dvdz) );
CUDA( hipFree(x8n) );
CUDA( hipFree(y8n) );
CUDA( hipFree(z8n) );
return ;
}
static inline
void CalcHourglassControlForElems_cpu(Real_t determ[], Real_t hgcoef)
{
Index_t i, ii, jj ;
Real_t x1[8], y1[8], z1[8] ;
Real_t pfx[8], pfy[8], pfz[8] ;
Index_t numElem = mesh.numElem() ;
Index_t numElem8 = numElem * 8 ;
Real_t *dvdx = Allocate<Real_t>(numElem8) ;
Real_t *dvdy = Allocate<Real_t>(numElem8) ;
Real_t *dvdz = Allocate<Real_t>(numElem8) ;
Real_t *x8n = Allocate<Real_t>(numElem8) ;
Real_t *y8n = Allocate<Real_t>(numElem8) ;
Real_t *z8n = Allocate<Real_t>(numElem8) ;
/* start loop over elements */
for (i=0 ; i<numElem ; ++i){
CollectDomainNodesToElemNodes(i, x1, y1, z1);
CalcElemVolumeDerivative(pfx, pfy, pfz, x1, y1, z1);
/* load into temporary storage for FB Hour Glass control */
for(ii=0;ii<8;++ii){
jj=8*i+ii;
dvdx[jj] = pfx[ii];
dvdy[jj] = pfy[ii];
dvdz[jj] = pfz[ii];
x8n[jj] = x1[ii];
y8n[jj] = y1[ii];
z8n[jj] = z1[ii];
}
determ[i] = mesh.volo(i) * mesh.v(i);
/* Do a check for negative volumes */
if ( mesh.v(i) <= Real_t(0.0) ) {
exit(VolumeError) ;
}
}
if ( hgcoef > Real_t(0.) ) {
CalcFBHourglassForceForElems_cpu(determ,x8n,y8n,z8n,dvdx,dvdy,dvdz,hgcoef) ;
}
Release(&z8n) ;
Release(&y8n) ;
Release(&x8n) ;
Release(&dvdz) ;
Release(&dvdy) ;
Release(&dvdx) ;
return ;
}
static inline
void CalcHourglassControlForElems(Real_t determ[], Real_t hgcoef, int useCPU)
{
if (useCPU) {
FC(x); FC(y); FC(z); FC(xd); FC(yd); FC(zd);
FC(nodelist); FC(ss); FC(elemMass);
FC(xd); FC(yd); FC(zd);
FC(fx); FC(fy); FC(fz);
CalcHourglassControlForElems_cpu(determ,hgcoef);
SG(fx); SG(fy); SG(fz);
}
else {
FG(x); FG(y); FG(z); FG(xd); FG(yd); FG(zd);
FG(nodelist); FG(ss); FG(elemMass);
FG(xd); FG(yd); FG(zd);
FG(fx); FG(fy); FG(fz);
CalcHourglassControlForElems_gpu(determ,hgcoef);
SC(fx); SC(fy); SC(fz);
}
}
static inline
void CalcVolumeForceForElems_gpu()
{
Index_t numElem = mesh.numElem() ;
if (numElem != 0) {
Real_t hgcoef = mesh.hgcoef() ;
Real_t *sigxx, *sigyy, *sigzz, *determ;
int badvol;
CUDA( hipMalloc(&sigxx,numElem*sizeof(Real_t)) );
CUDA( hipMalloc(&sigyy,numElem*sizeof(Real_t)) );
CUDA( hipMalloc(&sigzz,numElem*sizeof(Real_t)) );
CUDA( hipMalloc(&determ,numElem*sizeof(Real_t)) );
/* Sum contributions to total stress tensor */
InitStressTermsForElems(numElem, sigxx, sigyy, sigzz, 0);
// call elemlib stress integration loop to produce nodal forces from
// material stresses.
IntegrateStressForElems( numElem, sigxx, sigyy, sigzz, determ, badvol, 0) ;
CUDA( hipFree(sigxx) );
CUDA( hipFree(sigyy) );
CUDA( hipFree(sigzz) );
// check for negative element volume
if (badvol) exit(VolumeError) ;
CalcHourglassControlForElems(determ, hgcoef, 0) ;
CUDA( hipFree(determ) );
}
}
static inline
void CalcVolumeForceForElems_cpu()
{
Index_t numElem = mesh.numElem() ;
if (numElem != 0) {
Real_t hgcoef = mesh.hgcoef() ;
Real_t *sigxx = Allocate<Real_t>(numElem) ;
Real_t *sigyy = Allocate<Real_t>(numElem) ;
Real_t *sigzz = Allocate<Real_t>(numElem) ;
Real_t *determ = Allocate<Real_t>(numElem) ;
int badvol;
/* Sum contributions to total stress tensor */
InitStressTermsForElems(numElem, sigxx, sigyy, sigzz, 1);
// call elemlib stress integration loop to produce nodal forces from
// material stresses.
IntegrateStressForElems( numElem, sigxx, sigyy, sigzz, determ, badvol, 1) ;
Release(&sigzz) ;
Release(&sigyy) ;
Release(&sigxx) ;
// check for negative element volume
if (badvol) exit(VolumeError);
#if 0
for ( Index_t k=0 ; k<numElem ; ++k ) {
if (determ[k] <= Real_t(0.0)) {
exit(VolumeError) ;
}
}
#endif
CalcHourglassControlForElems(determ, hgcoef, 1) ;
Release(&determ) ;
}
}
static inline void CalcForceForNodes_gpu()
{
/* Calcforce calls partial, force, hourq */
CalcVolumeForceForElems_gpu() ;
/* Calculate Nodal Forces at domain boundaries */
/* problem->commSBN->Transfer(CommSBN::forces); */
}
static inline void CalcForceForNodes_cpu()
{
Index_t numNode = mesh.numNode() ;
for (Index_t i=0; i<numNode; ++i) {
mesh.fx(i) = Real_t(0.0) ;
mesh.fy(i) = Real_t(0.0) ;
mesh.fz(i) = Real_t(0.0) ;
}
/* Calcforce calls partial, force, hourq */
CalcVolumeForceForElems_cpu() ;
/* Calculate Nodal Forces at domain boundaries */
/* problem->commSBN->Transfer(CommSBN::forces); */
}
static inline void CalcForceForNodes(int useCPU)
{
if (useCPU) {
CalcForceForNodes_cpu();
}
else {
CalcForceForNodes_gpu();
}
}
__global__
void CalcAccelerationForNodes_kernel(int numNode,
Real_t *xdd, Real_t *ydd, Real_t *zdd,
Real_t *fx, Real_t *fy, Real_t *fz,
Real_t *nodalMass)
{
int i=blockDim.x*blockIdx.x + threadIdx.x;
if (i<numNode) {
xdd[i]=fx[i]/nodalMass[i];
ydd[i]=fy[i]/nodalMass[i];
zdd[i]=fz[i]/nodalMass[i];
}
}
static inline
void CalcAccelerationForNodes_gpu()
{
dim3 dimBlock = dim3(BLOCKSIZE,1,1);
dim3 dimGrid = dim3(PAD_DIV(mesh.numNode(),dimBlock.x),1,1);
hipLaunchKernelGGL(( CalcAccelerationForNodes_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
mesh.numNode(),
meshGPU.m_xdd,meshGPU.m_ydd,meshGPU.m_zdd,
meshGPU.m_fx,meshGPU.m_fy,meshGPU.m_fz,
meshGPU.m_nodalMass);
CUDA_DEBUGSYNC;
}
static inline
void CalcAccelerationForNodes_cpu()
{
Index_t numNode = mesh.numNode() ;
for (Index_t i = 0; i < numNode; ++i) {
mesh.xdd(i) = mesh.fx(i) / mesh.nodalMass(i);
mesh.ydd(i) = mesh.fy(i) / mesh.nodalMass(i);
mesh.zdd(i) = mesh.fz(i) / mesh.nodalMass(i);
}
}
static inline
void CalcAccelerationForNodes(int useCPU)
{
if (useCPU) {
FC(fx); FC(fy); FC(fz); FC(nodalMass);
CalcAccelerationForNodes_cpu();
SG(xdd); SG(ydd); SG(zdd);
}
else {
FG(fx); FG(fy); FG(fz); FG(nodalMass);
CalcAccelerationForNodes_gpu();
SC(xdd); SC(ydd); SC(zdd);
}
}
__global__
void ApplyAccelerationBoundaryConditionsForNodes_kernel(
int numNodeBC, Real_t *xdd, Real_t *ydd, Real_t *zdd,
Index_t *symmX, Index_t *symmY, Index_t *symmZ)
{
int i=blockDim.x*blockIdx.x + threadIdx.x;
if (i<numNodeBC) {
xdd[symmX[i]] = Real_t(0.0) ;
ydd[symmY[i]] = Real_t(0.0) ;
zdd[symmZ[i]] = Real_t(0.0) ;
}
}
static inline
void ApplyAccelerationBoundaryConditionsForNodes_gpu()
{
Index_t numNodeBC = (mesh.sizeX()+1)*(mesh.sizeX()+1) ;
dim3 dimBlock(BLOCKSIZE,1,1);
dim3 dimGrid(PAD_DIV(numNodeBC,dimBlock.x),1,1);
hipLaunchKernelGGL(( ApplyAccelerationBoundaryConditionsForNodes_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
numNodeBC,
meshGPU.m_xdd,meshGPU.m_ydd,meshGPU.m_zdd,
meshGPU.m_symmX,meshGPU.m_symmY,meshGPU.m_symmZ);
CUDA_DEBUGSYNC;
}
static inline
void ApplyAccelerationBoundaryConditionsForNodes_cpu()
{
Index_t numNodeBC = (mesh.sizeX()+1)*(mesh.sizeX()+1) ;
for(Index_t i=0 ; i<numNodeBC ; ++i)
mesh.xdd(mesh.symmX(i)) = Real_t(0.0) ;
for(Index_t i=0 ; i<numNodeBC ; ++i)
mesh.ydd(mesh.symmY(i)) = Real_t(0.0) ;
for(Index_t i=0 ; i<numNodeBC ; ++i)
mesh.zdd(mesh.symmZ(i)) = Real_t(0.0) ;
}
static inline
void ApplyAccelerationBoundaryConditionsForNodes(int useCPU)
{
if (useCPU) {
FC(xdd); FC(ydd); FC(zdd); FC(symmX); FC(symmY); FC(symmZ);
ApplyAccelerationBoundaryConditionsForNodes_cpu();
SG(xdd); SG(ydd); SG(zdd);
}
else {
FG(xdd); FG(ydd); FG(zdd); FG(symmX); FG(symmY); FG(symmZ);
ApplyAccelerationBoundaryConditionsForNodes_gpu();
SC(xdd); SC(ydd); SC(zdd);
}
}
__global__
void CalcVelocityForNodes_kernel(int numNode, const Real_t dt, const Real_t u_cut,
Real_t *xd, Real_t *yd, Real_t *zd,
Real_t *xdd, Real_t *ydd, Real_t *zdd)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i<numNode) {
Real_t xdtmp, ydtmp, zdtmp ;
xdtmp = xd[i] + xdd[i] * dt ;
if( FABS(xdtmp) < u_cut ) xdtmp = 0.0;//Real_t(0.0);
xd[i] = xdtmp ;
ydtmp = yd[i] + ydd[i] * dt ;
if( FABS(ydtmp) < u_cut ) ydtmp = Real_t(0.0);
yd[i] = ydtmp ;
zdtmp = zd[i] + zdd[i] * dt ;
if( FABS(zdtmp) < u_cut ) zdtmp = Real_t(0.0);
zd[i] = zdtmp ;
}
}
static inline
void CalcVelocityForNodes_gpu(const Real_t dt, const Real_t u_cut)
{
dim3 dimBlock(BLOCKSIZE,1,1);
dim3 dimGrid(PAD_DIV(mesh.numNode(),dimBlock.x),1,1);
hipLaunchKernelGGL(( CalcVelocityForNodes_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
mesh.numNode(),dt,u_cut,
meshGPU.m_xd,meshGPU.m_yd,meshGPU.m_zd,
meshGPU.m_xdd,meshGPU.m_ydd,meshGPU.m_zdd);
CUDA_DEBUGSYNC;
}
static inline
void CalcVelocityForNodes_cpu(const Real_t dt, const Real_t u_cut)
{
Index_t numNode = mesh.numNode() ;
for ( Index_t i = 0 ; i < numNode ; ++i )
{
Real_t xdtmp, ydtmp, zdtmp ;
xdtmp = mesh.xd(i) + mesh.xdd(i) * dt ;
if( FABS(xdtmp) < u_cut ) xdtmp = Real_t(0.0);
mesh.xd(i) = xdtmp ;
ydtmp = mesh.yd(i) + mesh.ydd(i) * dt ;
if( FABS(ydtmp) < u_cut ) ydtmp = Real_t(0.0);
mesh.yd(i) = ydtmp ;
zdtmp = mesh.zd(i) + mesh.zdd(i) * dt ;
if( FABS(zdtmp) < u_cut ) zdtmp = Real_t(0.0);
mesh.zd(i) = zdtmp ;
}
}
static inline
void CalcVelocityForNodes(const Real_t dt, const Real_t u_cut, int useCPU)
{
if (useCPU) {
FC(xd); FC(yd); FC(zd); FC(xdd); FC(ydd); FC(zdd);
CalcVelocityForNodes_cpu(dt,u_cut);
SG(xd); SG(yd); SG(zd);
}
else {
FG(xd); FG(yd); FG(zd); FG(xdd); FG(ydd); FG(zdd);
CalcVelocityForNodes_gpu(dt,u_cut);
SC(xd); SC(yd); SC(zd);
}
}
__global__
void CalcPositionForNodes_kernel(int numNode, Real_t dt,
Real_t *x, Real_t *y, Real_t *z,
Real_t *xd, Real_t *yd, Real_t *zd)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i<numNode) {
x[i] += xd[i] * dt;
y[i] += yd[i] * dt;
z[i] += zd[i] * dt;
}
}
static inline
void CalcPositionForNodes_gpu(const Real_t dt)
{
dim3 dimBlock(BLOCKSIZE,1,1);
dim3 dimGrid(PAD_DIV(mesh.numNode(),dimBlock.x),1,1);
hipLaunchKernelGGL(( CalcPositionForNodes_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
mesh.numNode(),dt,meshGPU.m_x,meshGPU.m_y,meshGPU.m_z,meshGPU.m_xd,meshGPU.m_yd,meshGPU.m_zd);
CUDA_DEBUGSYNC;
}
static inline
void CalcPositionForNodes_cpu(const Real_t dt)
{
Index_t numNode = mesh.numNode() ;
for ( Index_t i = 0 ; i < numNode ; ++i )
{
mesh.x(i) += mesh.xd(i) * dt ;
mesh.y(i) += mesh.yd(i) * dt ;
mesh.z(i) += mesh.zd(i) * dt ;
}
}
static inline
void CalcPositionForNodes(const Real_t dt,int useCPU)
{
if (useCPU) {
FC(x); FC(y); FC(z); FC(xd); FC(yd); FC(zd);
CalcPositionForNodes_cpu(dt);
SG(x); SG(y); SG(z);
}
else {
FG(x); FG(y); FG(z); FG(xd); FG(yd); FG(zd);
CalcPositionForNodes_gpu(dt);
SC(x); SC(y); SC(z);
}
}
static inline
void LagrangeNodal(int useCPU)
{
const Real_t delt = mesh.deltatime() ;
Real_t u_cut = mesh.u_cut() ;
/* time of boundary condition evaluation is beginning of step for force and
* acceleration boundary conditions. */
CalcForceForNodes(/*0*/useCPU);
CalcAccelerationForNodes(useCPU);
ApplyAccelerationBoundaryConditionsForNodes(useCPU);
CalcVelocityForNodes( delt, u_cut, useCPU ) ;
CalcPositionForNodes( delt, useCPU );
return;
}
__host__ __device__
static inline
Real_t CalcElemVolume( const Real_t x0, const Real_t x1,
const Real_t x2, const Real_t x3,
const Real_t x4, const Real_t x5,
const Real_t x6, const Real_t x7,
const Real_t y0, const Real_t y1,
const Real_t y2, const Real_t y3,
const Real_t y4, const Real_t y5,
const Real_t y6, const Real_t y7,
const Real_t z0, const Real_t z1,
const Real_t z2, const Real_t z3,
const Real_t z4, const Real_t z5,
const Real_t z6, const Real_t z7 )
{
Real_t twelveth = Real_t(1.0)/Real_t(12.0);
Real_t dx61 = x6 - x1;
Real_t dy61 = y6 - y1;
Real_t dz61 = z6 - z1;
Real_t dx70 = x7 - x0;
Real_t dy70 = y7 - y0;
Real_t dz70 = z7 - z0;
Real_t dx63 = x6 - x3;
Real_t dy63 = y6 - y3;
Real_t dz63 = z6 - z3;
Real_t dx20 = x2 - x0;
Real_t dy20 = y2 - y0;
Real_t dz20 = z2 - z0;
Real_t dx50 = x5 - x0;
Real_t dy50 = y5 - y0;
Real_t dz50 = z5 - z0;
Real_t dx64 = x6 - x4;
Real_t dy64 = y6 - y4;
Real_t dz64 = z6 - z4;
Real_t dx31 = x3 - x1;
Real_t dy31 = y3 - y1;
Real_t dz31 = z3 - z1;
Real_t dx72 = x7 - x2;
Real_t dy72 = y7 - y2;
Real_t dz72 = z7 - z2;
Real_t dx43 = x4 - x3;
Real_t dy43 = y4 - y3;
Real_t dz43 = z4 - z3;
Real_t dx57 = x5 - x7;
Real_t dy57 = y5 - y7;
Real_t dz57 = z5 - z7;
Real_t dx14 = x1 - x4;
Real_t dy14 = y1 - y4;
Real_t dz14 = z1 - z4;
Real_t dx25 = x2 - x5;
Real_t dy25 = y2 - y5;
Real_t dz25 = z2 - z5;
#define TRIPLE_PRODUCT(x1, y1, z1, x2, y2, z2, x3, y3, z3) \
((x1)*((y2)*(z3) - (z2)*(y3)) + (x2)*((z1)*(y3) - (y1)*(z3)) + (x3)*((y1)*(z2) - (z1)*(y2)))
Real_t volume =
TRIPLE_PRODUCT(dx31 + dx72, dx63, dx20,
dy31 + dy72, dy63, dy20,
dz31 + dz72, dz63, dz20) +
TRIPLE_PRODUCT(dx43 + dx57, dx64, dx70,
dy43 + dy57, dy64, dy70,
dz43 + dz57, dz64, dz70) +
TRIPLE_PRODUCT(dx14 + dx25, dx61, dx50,
dy14 + dy25, dy61, dy50,
dz14 + dz25, dz61, dz50);
#undef TRIPLE_PRODUCT
volume *= twelveth;
return volume ;
}
__host__ __device__
static inline
Real_t CalcElemVolume( const Real_t x[8], const Real_t y[8], const Real_t z[8] )
{
return CalcElemVolume( x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7],
y[0], y[1], y[2], y[3], y[4], y[5], y[6], y[7],
z[0], z[1], z[2], z[3], z[4], z[5], z[6], z[7]);
}
__host__ __device__
static inline
Real_t AreaFace( const Real_t x0, const Real_t x1,
const Real_t x2, const Real_t x3,
const Real_t y0, const Real_t y1,
const Real_t y2, const Real_t y3,
const Real_t z0, const Real_t z1,
const Real_t z2, const Real_t z3)
{
Real_t fx = (x2 - x0) - (x3 - x1);
Real_t fy = (y2 - y0) - (y3 - y1);
Real_t fz = (z2 - z0) - (z3 - z1);
Real_t gx = (x2 - x0) + (x3 - x1);
Real_t gy = (y2 - y0) + (y3 - y1);
Real_t gz = (z2 - z0) + (z3 - z1);
Real_t area =
(fx * fx + fy * fy + fz * fz) *
(gx * gx + gy * gy + gz * gz) -
(fx * gx + fy * gy + fz * gz) *
(fx * gx + fy * gy + fz * gz);
return area ;
}
__host__ __device__
static inline
Real_t CalcElemCharacteristicLength( const Real_t x[8],
const Real_t y[8],
const Real_t z[8],
const Real_t volume)
{
Real_t a, charLength = Real_t(0.0);
a = AreaFace(x[0],x[1],x[2],x[3],
y[0],y[1],y[2],y[3],
z[0],z[1],z[2],z[3]) ;
charLength = FMAX(a,charLength) ;
a = AreaFace(x[4],x[5],x[6],x[7],
y[4],y[5],y[6],y[7],
z[4],z[5],z[6],z[7]) ;
charLength = FMAX(a,charLength) ;
a = AreaFace(x[0],x[1],x[5],x[4],
y[0],y[1],y[5],y[4],
z[0],z[1],z[5],z[4]) ;
charLength = FMAX(a,charLength) ;
a = AreaFace(x[1],x[2],x[6],x[5],
y[1],y[2],y[6],y[5],
z[1],z[2],z[6],z[5]) ;
charLength = FMAX(a,charLength) ;
a = AreaFace(x[2],x[3],x[7],x[6],
y[2],y[3],y[7],y[6],
z[2],z[3],z[7],z[6]) ;
charLength = FMAX(a,charLength) ;
a = AreaFace(x[3],x[0],x[4],x[7],
y[3],y[0],y[4],y[7],
z[3],z[0],z[4],z[7]) ;
charLength = FMAX(a,charLength) ;
charLength = Real_t(4.0) * volume / SQRT(charLength);
return charLength;
}
__host__ __device__
static inline
void CalcElemVelocityGradient( const Real_t* const xvel,
const Real_t* const yvel,
const Real_t* const zvel,
const Real_t b[][8],
const Real_t detJ,
Real_t* const d )
{
const Real_t inv_detJ = Real_t(1.0) / detJ ;
Real_t dyddx, dxddy, dzddx, dxddz, dzddy, dyddz;
const Real_t* const pfx = b[0];
const Real_t* const pfy = b[1];
const Real_t* const pfz = b[2];
d[0] = inv_detJ * ( pfx[0] * (xvel[0]-xvel[6])
+ pfx[1] * (xvel[1]-xvel[7])
+ pfx[2] * (xvel[2]-xvel[4])
+ pfx[3] * (xvel[3]-xvel[5]) );
d[1] = inv_detJ * ( pfy[0] * (yvel[0]-yvel[6])
+ pfy[1] * (yvel[1]-yvel[7])
+ pfy[2] * (yvel[2]-yvel[4])
+ pfy[3] * (yvel[3]-yvel[5]) );
d[2] = inv_detJ * ( pfz[0] * (zvel[0]-zvel[6])
+ pfz[1] * (zvel[1]-zvel[7])
+ pfz[2] * (zvel[2]-zvel[4])
+ pfz[3] * (zvel[3]-zvel[5]) );
dyddx = inv_detJ * ( pfx[0] * (yvel[0]-yvel[6])
+ pfx[1] * (yvel[1]-yvel[7])
+ pfx[2] * (yvel[2]-yvel[4])
+ pfx[3] * (yvel[3]-yvel[5]) );
dxddy = inv_detJ * ( pfy[0] * (xvel[0]-xvel[6])
+ pfy[1] * (xvel[1]-xvel[7])
+ pfy[2] * (xvel[2]-xvel[4])
+ pfy[3] * (xvel[3]-xvel[5]) );
dzddx = inv_detJ * ( pfx[0] * (zvel[0]-zvel[6])
+ pfx[1] * (zvel[1]-zvel[7])
+ pfx[2] * (zvel[2]-zvel[4])
+ pfx[3] * (zvel[3]-zvel[5]) );
dxddz = inv_detJ * ( pfz[0] * (xvel[0]-xvel[6])
+ pfz[1] * (xvel[1]-xvel[7])
+ pfz[2] * (xvel[2]-xvel[4])
+ pfz[3] * (xvel[3]-xvel[5]) );
dzddy = inv_detJ * ( pfy[0] * (zvel[0]-zvel[6])
+ pfy[1] * (zvel[1]-zvel[7])
+ pfy[2] * (zvel[2]-zvel[4])
+ pfy[3] * (zvel[3]-zvel[5]) );
dyddz = inv_detJ * ( pfz[0] * (yvel[0]-yvel[6])
+ pfz[1] * (yvel[1]-yvel[7])
+ pfz[2] * (yvel[2]-yvel[4])
+ pfz[3] * (yvel[3]-yvel[5]) );
d[5] = Real_t( .5) * ( dxddy + dyddx );
d[4] = Real_t( .5) * ( dxddz + dzddx );
d[3] = Real_t( .5) * ( dzddy + dyddz );
}
__global__
void CalcKinematicsForElems_kernel(
Index_t numElem, Real_t dt,
Index_t *nodelist,Real_t *volo,Real_t *v,
Real_t *x,Real_t *y,Real_t *z,Real_t *xd,Real_t *yd,Real_t *zd,
Real_t *vnew,Real_t *delv,Real_t *arealg,Real_t *dxx,Real_t *dyy,Real_t *dzz
)
{
Real_t B[3][8] ; /** shape function derivatives */
Real_t D[6] ;
Real_t x_local[8] ;
Real_t y_local[8] ;
Real_t z_local[8] ;
Real_t xd_local[8] ;
Real_t yd_local[8] ;
Real_t zd_local[8] ;
Real_t detJ = Real_t(0.0) ;
int k=blockDim.x*blockIdx.x + threadIdx.x;
if (k<numElem) {
Real_t volume ;
Real_t relativeVolume ;
// get nodal coordinates from global arrays and copy into local arrays.
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
{
Index_t gnode = nodelist[k+lnode*numElem];
x_local[lnode] = x[gnode];
y_local[lnode] = y[gnode];
z_local[lnode] = z[gnode];
}
// volume calculations
volume = CalcElemVolume(x_local, y_local, z_local );
relativeVolume = volume / volo[k] ;
vnew[k] = relativeVolume ;
delv[k] = relativeVolume - v[k] ;
// set characteristic length
arealg[k] = CalcElemCharacteristicLength(x_local,y_local,z_local,volume);
// get nodal velocities from global array and copy into local arrays.
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
{
Index_t gnode = nodelist[k+lnode*numElem];
xd_local[lnode] = xd[gnode];
yd_local[lnode] = yd[gnode];
zd_local[lnode] = zd[gnode];
}
Real_t dt2 = Real_t(0.5) * dt;
for ( Index_t j=0 ; j<8 ; ++j )
{
x_local[j] -= dt2 * xd_local[j];
y_local[j] -= dt2 * yd_local[j];
z_local[j] -= dt2 * zd_local[j];
}
CalcElemShapeFunctionDerivatives(x_local,y_local,z_local,B,&detJ );
CalcElemVelocityGradient(xd_local,yd_local,zd_local,B,detJ,D);
// put velocity gradient quantities into their global arrays.
dxx[k] = D[0];
dyy[k] = D[1];
dzz[k] = D[2];
}
}
static inline
void CalcKinematicsForElems_gpu( Index_t numElem, Real_t dt )
{
dim3 dimBlock=dim3(BLOCKSIZE,1,1);
dim3 dimGrid=dim3(PAD_DIV(numElem,dimBlock.x),1,1);
hipLaunchKernelGGL(( CalcKinematicsForElems_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0,
numElem,dt,meshGPU.m_nodelist,meshGPU.m_volo,meshGPU.m_v,
meshGPU.m_x,meshGPU.m_y,meshGPU.m_z,meshGPU.m_xd,meshGPU.m_yd,meshGPU.m_zd,
meshGPU.m_vnew,meshGPU.m_delv,meshGPU.m_arealg,meshGPU.m_dxx,meshGPU.m_dyy,meshGPU.m_dzz);
CUDA_DEBUGSYNC;
}
static inline
void CalcKinematicsForElems_cpu( Index_t numElem, Real_t dt )
{
Real_t B[3][8] ; /** shape function derivatives */
Real_t D[6] ;
Real_t x_local[8] ;
Real_t y_local[8] ;
Real_t z_local[8] ;
Real_t xd_local[8] ;
Real_t yd_local[8] ;
Real_t zd_local[8] ;
Real_t detJ = Real_t(0.0) ;
// loop over all elements
for( Index_t k=0 ; k<numElem ; ++k )
{
Real_t volume ;
Real_t relativeVolume ;
// get nodal coordinates from global arrays and copy into local arrays.
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
{
Index_t gnode = mesh.nodelist(k,lnode);
x_local[lnode] = mesh.x(gnode);
y_local[lnode] = mesh.y(gnode);
z_local[lnode] = mesh.z(gnode);
}
// volume calculations
volume = CalcElemVolume(x_local, y_local, z_local );
relativeVolume = volume / mesh.volo(k) ;
mesh.vnew(k) = relativeVolume ;
mesh.delv(k) = relativeVolume - mesh.v(k) ;
// set characteristic length
mesh.arealg(k) = CalcElemCharacteristicLength(x_local,
y_local,
z_local,
volume);
// get nodal velocities from global array and copy into local arrays.
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
{
Index_t gnode = mesh.nodelist(k,lnode);
xd_local[lnode] = mesh.xd(gnode);
yd_local[lnode] = mesh.yd(gnode);
zd_local[lnode] = mesh.zd(gnode);
}
Real_t dt2 = Real_t(0.5) * dt;
for ( Index_t j=0 ; j<8 ; ++j )
{
x_local[j] -= dt2 * xd_local[j];
y_local[j] -= dt2 * yd_local[j];
z_local[j] -= dt2 * zd_local[j];
}
CalcElemShapeFunctionDerivatives( x_local,
y_local,
z_local,
B, &detJ );
CalcElemVelocityGradient( xd_local,
yd_local,
zd_local,
B, detJ, D );
// put velocity gradient quantities into their global arrays.
mesh.dxx(k) = D[0];
mesh.dyy(k) = D[1];
mesh.dzz(k) = D[2];
}
}
static inline
void CalcKinematicsForElems( Index_t numElem, Real_t dt, int useCPU )
{
if (useCPU) {
FC(nodelist); FC(volo); FC(v); FC(x); FC(y); FC(z); FC(xd); FC(yd); FC(zd);
CalcKinematicsForElems_cpu(numElem,dt);
SG(vnew); SG(delv); SG(arealg); SG(dxx); SG(dyy); SG(dzz);
}
else {
FG(nodelist); FG(volo); FG(v); FG(x); FG(y); FG(z); FG(xd); FG(yd); FG(zd);
CalcKinematicsForElems_gpu(numElem,dt);
SC(vnew); SC(delv); SC(arealg); SC(dxx); SC(dyy); SC(dzz);
}
}
__global__
void CalcLagrangeElementsPart2_kernel(
Index_t numElem,
Real_t *dxx,Real_t *dyy, Real_t *dzz,
Real_t *vdov
)
{
int k=blockDim.x*blockIdx.x + threadIdx.x;
if (k<numElem) {
// calc strain rate and apply as constraint (only done in FB element)
Real_t vdovNew = dxx[k] + dyy[k] + dzz[k] ;
Real_t vdovthird = vdovNew/Real_t(3.0) ;
// make the rate of deformation tensor deviatoric
vdov[k] = vdovNew ;
dxx[k] -= vdovthird ;
dyy[k] -= vdovthird ;
dzz[k] -= vdovthird ;
// See if any volumes are negative, and take appropriate action.
//if (mesh.vnew(k) <= Real_t(0.0))
//{
// exit(VolumeError) ;
//}
}
}
static inline
void CalcLagrangeElementsPart2_gpu()
{
Index_t numElem = mesh.numElem();
dim3 dimBlock=dim3(BLOCKSIZE,1,1);
dim3 dimGrid=dim3(PAD_DIV(numElem,dimBlock.x),1,1);
hipLaunchKernelGGL(( CalcLagrangeElementsPart2_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0,
numElem,
meshGPU.m_dxx,meshGPU.m_dyy,meshGPU.m_dzz,
meshGPU.m_vdov);
CUDA_DEBUGSYNC;
}
static inline
void CalcLagrangeElementsPart2_cpu()
{
Index_t numElem = mesh.numElem() ;
// element loop to do some stuff not included in the elemlib function.
for ( Index_t k=0 ; k<numElem ; ++k )
{
// calc strain rate and apply as constraint (only done in FB element)
Real_t vdov = mesh.dxx(k) + mesh.dyy(k) + mesh.dzz(k) ;
Real_t vdovthird = vdov/Real_t(3.0) ;
// make the rate of deformation tensor deviatoric
mesh.vdov(k) = vdov ;
mesh.dxx(k) -= vdovthird ;
mesh.dyy(k) -= vdovthird ;
mesh.dzz(k) -= vdovthird ;
// See if any volumes are negative, and take appropriate action.
if (mesh.vnew(k) <= Real_t(0.0))
{
exit(VolumeError) ;
}
}
}
static inline
void CalcLagrangeElementsPart2(int useCPU)
{
if (useCPU) {
FC(dxx); FC(dyy); FC(dzz);
CalcLagrangeElementsPart2_cpu();
SG(vdov); SG(dxx); SG(dyy); SG(dzz);
}
else {
FG(dxx); FG(dyy); FG(dzz);
CalcLagrangeElementsPart2_gpu();
SC(vdov); SC(dxx); SC(dyy); SC(dzz);
}
}
static inline
void CalcLagrangeElements(Real_t deltatime, int useCPU)
{
Index_t numElem = mesh.numElem() ;
if (numElem > 0) {
CalcKinematicsForElems(numElem, deltatime, useCPU);
CalcLagrangeElementsPart2(useCPU);
}
}
__global__
void CalcMonotonicQGradientsForElems_kernel(
Index_t numElem,
Index_t *nodelist,
Real_t *x,Real_t *y,Real_t *z,Real_t *xd,Real_t *yd,Real_t *zd,
Real_t *volo,Real_t *vnew,
Real_t *delx_zeta,Real_t *delv_zeta,
Real_t *delx_xi,Real_t *delv_xi,
Real_t *delx_eta,Real_t *delv_eta
)
{
#define SUM4(a,b,c,d) (a + b + c + d)
const Real_t ptiny = Real_t(1.e-36) ;
int i=blockDim.x*blockIdx.x + threadIdx.x;
if (i<numElem) {
Real_t ax,ay,az ;
Real_t dxv,dyv,dzv ;
Index_t n0 = nodelist[i+0*numElem] ;
Index_t n1 = nodelist[i+1*numElem] ;
Index_t n2 = nodelist[i+2*numElem] ;
Index_t n3 = nodelist[i+3*numElem] ;
Index_t n4 = nodelist[i+4*numElem] ;
Index_t n5 = nodelist[i+5*numElem] ;
Index_t n6 = nodelist[i+6*numElem] ;
Index_t n7 = nodelist[i+7*numElem] ;
Real_t x0 = x[n0] ;
Real_t x1 = x[n1] ;
Real_t x2 = x[n2] ;
Real_t x3 = x[n3] ;
Real_t x4 = x[n4] ;
Real_t x5 = x[n5] ;
Real_t x6 = x[n6] ;
Real_t x7 = x[n7] ;
Real_t y0 = y[n0] ;
Real_t y1 = y[n1] ;
Real_t y2 = y[n2] ;
Real_t y3 = y[n3] ;
Real_t y4 = y[n4] ;
Real_t y5 = y[n5] ;
Real_t y6 = y[n6] ;
Real_t y7 = y[n7] ;
Real_t z0 = z[n0] ;
Real_t z1 = z[n1] ;
Real_t z2 = z[n2] ;
Real_t z3 = z[n3] ;
Real_t z4 = z[n4] ;
Real_t z5 = z[n5] ;
Real_t z6 = z[n6] ;
Real_t z7 = z[n7] ;
Real_t xv0 = xd[n0] ;
Real_t xv1 = xd[n1] ;
Real_t xv2 = xd[n2] ;
Real_t xv3 = xd[n3] ;
Real_t xv4 = xd[n4] ;
Real_t xv5 = xd[n5] ;
Real_t xv6 = xd[n6] ;
Real_t xv7 = xd[n7] ;
Real_t yv0 = yd[n0] ;
Real_t yv1 = yd[n1] ;
Real_t yv2 = yd[n2] ;
Real_t yv3 = yd[n3] ;
Real_t yv4 = yd[n4] ;
Real_t yv5 = yd[n5] ;
Real_t yv6 = yd[n6] ;
Real_t yv7 = yd[n7] ;
Real_t zv0 = zd[n0] ;
Real_t zv1 = zd[n1] ;
Real_t zv2 = zd[n2] ;
Real_t zv3 = zd[n3] ;
Real_t zv4 = zd[n4] ;
Real_t zv5 = zd[n5] ;
Real_t zv6 = zd[n6] ;
Real_t zv7 = zd[n7] ;
Real_t vol = volo[i]*vnew[i] ;
Real_t norm = Real_t(1.0) / ( vol + ptiny ) ;
Real_t dxj = Real_t(-0.25)*(SUM4(x0,x1,x5,x4) - SUM4(x3,x2,x6,x7)) ;
Real_t dyj = Real_t(-0.25)*(SUM4(y0,y1,y5,y4) - SUM4(y3,y2,y6,y7)) ;
Real_t dzj = Real_t(-0.25)*(SUM4(z0,z1,z5,z4) - SUM4(z3,z2,z6,z7)) ;
Real_t dxi = Real_t( 0.25)*(SUM4(x1,x2,x6,x5) - SUM4(x0,x3,x7,x4)) ;
Real_t dyi = Real_t( 0.25)*(SUM4(y1,y2,y6,y5) - SUM4(y0,y3,y7,y4)) ;
Real_t dzi = Real_t( 0.25)*(SUM4(z1,z2,z6,z5) - SUM4(z0,z3,z7,z4)) ;
Real_t dxk = Real_t( 0.25)*(SUM4(x4,x5,x6,x7) - SUM4(x0,x1,x2,x3)) ;
Real_t dyk = Real_t( 0.25)*(SUM4(y4,y5,y6,y7) - SUM4(y0,y1,y2,y3)) ;
Real_t dzk = Real_t( 0.25)*(SUM4(z4,z5,z6,z7) - SUM4(z0,z1,z2,z3)) ;
/* find delvk and delxk ( i cross j ) */
ax = dyi*dzj - dzi*dyj ;
ay = dzi*dxj - dxi*dzj ;
az = dxi*dyj - dyi*dxj ;
delx_zeta[i] = vol / SQRT(ax*ax + ay*ay + az*az + ptiny) ;
ax *= norm ;
ay *= norm ;
az *= norm ;
dxv = Real_t(0.25)*(SUM4(xv4,xv5,xv6,xv7) - SUM4(xv0,xv1,xv2,xv3)) ;
dyv = Real_t(0.25)*(SUM4(yv4,yv5,yv6,yv7) - SUM4(yv0,yv1,yv2,yv3)) ;
dzv = Real_t(0.25)*(SUM4(zv4,zv5,zv6,zv7) - SUM4(zv0,zv1,zv2,zv3)) ;
delv_zeta[i] = ax*dxv + ay*dyv + az*dzv ;
/* find delxi and delvi ( j cross k ) */
ax = dyj*dzk - dzj*dyk ;
ay = dzj*dxk - dxj*dzk ;
az = dxj*dyk - dyj*dxk ;
delx_xi[i] = vol / SQRT(ax*ax + ay*ay + az*az + ptiny) ;
ax *= norm ;
ay *= norm ;
az *= norm ;
dxv = Real_t(0.25)*(SUM4(xv1,xv2,xv6,xv5) - SUM4(xv0,xv3,xv7,xv4)) ;
dyv = Real_t(0.25)*(SUM4(yv1,yv2,yv6,yv5) - SUM4(yv0,yv3,yv7,yv4)) ;
dzv = Real_t(0.25)*(SUM4(zv1,zv2,zv6,zv5) - SUM4(zv0,zv3,zv7,zv4)) ;
delv_xi[i] = ax*dxv + ay*dyv + az*dzv ;
/* find delxj and delvj ( k cross i ) */
ax = dyk*dzi - dzk*dyi ;
ay = dzk*dxi - dxk*dzi ;
az = dxk*dyi - dyk*dxi ;
delx_eta[i] = vol / SQRT(ax*ax + ay*ay + az*az + ptiny) ;
ax *= norm ;
ay *= norm ;
az *= norm ;
dxv = Real_t(-0.25)*(SUM4(xv0,xv1,xv5,xv4) - SUM4(xv3,xv2,xv6,xv7)) ;
dyv = Real_t(-0.25)*(SUM4(yv0,yv1,yv5,yv4) - SUM4(yv3,yv2,yv6,yv7)) ;
dzv = Real_t(-0.25)*(SUM4(zv0,zv1,zv5,zv4) - SUM4(zv3,zv2,zv6,zv7)) ;
delv_eta[i] = ax*dxv + ay*dyv + az*dzv ;
}
#undef SUM4
}
static inline
void CalcMonotonicQGradientsForElems_gpu()
{
Index_t numElem = mesh.numElem();
dim3 dimBlock=dim3(BLOCKSIZE,1,1);
dim3 dimGrid=dim3(PAD_DIV(numElem,dimBlock.x),1,1);
hipLaunchKernelGGL(( CalcMonotonicQGradientsForElems_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0,
numElem,
meshGPU.m_nodelist,
meshGPU.m_x,meshGPU.m_y,meshGPU.m_z,meshGPU.m_xd,meshGPU.m_yd,meshGPU.m_zd,
meshGPU.m_volo,meshGPU.m_vnew,
meshGPU.m_delx_zeta,meshGPU.m_delv_zeta,
meshGPU.m_delx_xi,meshGPU.m_delv_xi,
meshGPU.m_delx_eta,meshGPU.m_delv_eta);
CUDA_DEBUGSYNC;
}
static inline
void CalcMonotonicQGradientsForElems_cpu()
{
#define SUM4(a,b,c,d) (a + b + c + d)
Index_t numElem = mesh.numElem() ;
const Real_t ptiny = Real_t(1.e-36) ;
for (Index_t i = 0 ; i < numElem ; ++i ) {
Real_t ax,ay,az ;
Real_t dxv,dyv,dzv ;
Index_t n0 = mesh.nodelist(i,0) ;
Index_t n1 = mesh.nodelist(i,1) ;
Index_t n2 = mesh.nodelist(i,2) ;
Index_t n3 = mesh.nodelist(i,3) ;
Index_t n4 = mesh.nodelist(i,4) ;
Index_t n5 = mesh.nodelist(i,5) ;
Index_t n6 = mesh.nodelist(i,6) ;
Index_t n7 = mesh.nodelist(i,7) ;
Real_t x0 = mesh.x(n0) ;
Real_t x1 = mesh.x(n1) ;
Real_t x2 = mesh.x(n2) ;
Real_t x3 = mesh.x(n3) ;
Real_t x4 = mesh.x(n4) ;
Real_t x5 = mesh.x(n5) ;
Real_t x6 = mesh.x(n6) ;
Real_t x7 = mesh.x(n7) ;
Real_t y0 = mesh.y(n0) ;
Real_t y1 = mesh.y(n1) ;
Real_t y2 = mesh.y(n2) ;
Real_t y3 = mesh.y(n3) ;
Real_t y4 = mesh.y(n4) ;
Real_t y5 = mesh.y(n5) ;
Real_t y6 = mesh.y(n6) ;
Real_t y7 = mesh.y(n7) ;
Real_t z0 = mesh.z(n0) ;
Real_t z1 = mesh.z(n1) ;
Real_t z2 = mesh.z(n2) ;
Real_t z3 = mesh.z(n3) ;
Real_t z4 = mesh.z(n4) ;
Real_t z5 = mesh.z(n5) ;
Real_t z6 = mesh.z(n6) ;
Real_t z7 = mesh.z(n7) ;
Real_t xv0 = mesh.xd(n0) ;
Real_t xv1 = mesh.xd(n1) ;
Real_t xv2 = mesh.xd(n2) ;
Real_t xv3 = mesh.xd(n3) ;
Real_t xv4 = mesh.xd(n4) ;
Real_t xv5 = mesh.xd(n5) ;
Real_t xv6 = mesh.xd(n6) ;
Real_t xv7 = mesh.xd(n7) ;
Real_t yv0 = mesh.yd(n0) ;
Real_t yv1 = mesh.yd(n1) ;
Real_t yv2 = mesh.yd(n2) ;
Real_t yv3 = mesh.yd(n3) ;
Real_t yv4 = mesh.yd(n4) ;
Real_t yv5 = mesh.yd(n5) ;
Real_t yv6 = mesh.yd(n6) ;
Real_t yv7 = mesh.yd(n7) ;
Real_t zv0 = mesh.zd(n0) ;
Real_t zv1 = mesh.zd(n1) ;
Real_t zv2 = mesh.zd(n2) ;
Real_t zv3 = mesh.zd(n3) ;
Real_t zv4 = mesh.zd(n4) ;
Real_t zv5 = mesh.zd(n5) ;
Real_t zv6 = mesh.zd(n6) ;
Real_t zv7 = mesh.zd(n7) ;
Real_t vol = mesh.volo(i)*mesh.vnew(i) ;
Real_t norm = Real_t(1.0) / ( vol + ptiny ) ;
Real_t dxj = Real_t(-0.25)*(SUM4(x0,x1,x5,x4) - SUM4(x3,x2,x6,x7)) ;
Real_t dyj = Real_t(-0.25)*(SUM4(y0,y1,y5,y4) - SUM4(y3,y2,y6,y7)) ;
Real_t dzj = Real_t(-0.25)*(SUM4(z0,z1,z5,z4) - SUM4(z3,z2,z6,z7)) ;
Real_t dxi = Real_t( 0.25)*(SUM4(x1,x2,x6,x5) - SUM4(x0,x3,x7,x4)) ;
Real_t dyi = Real_t( 0.25)*(SUM4(y1,y2,y6,y5) - SUM4(y0,y3,y7,y4)) ;
Real_t dzi = Real_t( 0.25)*(SUM4(z1,z2,z6,z5) - SUM4(z0,z3,z7,z4)) ;
Real_t dxk = Real_t( 0.25)*(SUM4(x4,x5,x6,x7) - SUM4(x0,x1,x2,x3)) ;
Real_t dyk = Real_t( 0.25)*(SUM4(y4,y5,y6,y7) - SUM4(y0,y1,y2,y3)) ;
Real_t dzk = Real_t( 0.25)*(SUM4(z4,z5,z6,z7) - SUM4(z0,z1,z2,z3)) ;
/* find delvk and delxk ( i cross j ) */
ax = dyi*dzj - dzi*dyj ;
ay = dzi*dxj - dxi*dzj ;
az = dxi*dyj - dyi*dxj ;
mesh.delx_zeta(i) = vol / SQRT(ax*ax + ay*ay + az*az + ptiny) ;
ax *= norm ;
ay *= norm ;
az *= norm ;
dxv = Real_t(0.25)*(SUM4(xv4,xv5,xv6,xv7) - SUM4(xv0,xv1,xv2,xv3)) ;
dyv = Real_t(0.25)*(SUM4(yv4,yv5,yv6,yv7) - SUM4(yv0,yv1,yv2,yv3)) ;
dzv = Real_t(0.25)*(SUM4(zv4,zv5,zv6,zv7) - SUM4(zv0,zv1,zv2,zv3)) ;
mesh.delv_zeta(i) = ax*dxv + ay*dyv + az*dzv ;
/* find delxi and delvi ( j cross k ) */
ax = dyj*dzk - dzj*dyk ;
ay = dzj*dxk - dxj*dzk ;
az = dxj*dyk - dyj*dxk ;
mesh.delx_xi(i) = vol / SQRT(ax*ax + ay*ay + az*az + ptiny) ;
ax *= norm ;
ay *= norm ;
az *= norm ;
dxv = Real_t(0.25)*(SUM4(xv1,xv2,xv6,xv5) - SUM4(xv0,xv3,xv7,xv4)) ;
dyv = Real_t(0.25)*(SUM4(yv1,yv2,yv6,yv5) - SUM4(yv0,yv3,yv7,yv4)) ;
dzv = Real_t(0.25)*(SUM4(zv1,zv2,zv6,zv5) - SUM4(zv0,zv3,zv7,zv4)) ;
mesh.delv_xi(i) = ax*dxv + ay*dyv + az*dzv ;
/* find delxj and delvj ( k cross i ) */
ax = dyk*dzi - dzk*dyi ;
ay = dzk*dxi - dxk*dzi ;
az = dxk*dyi - dyk*dxi ;
mesh.delx_eta(i) = vol / SQRT(ax*ax + ay*ay + az*az + ptiny) ;
ax *= norm ;
ay *= norm ;
az *= norm ;
dxv = Real_t(-0.25)*(SUM4(xv0,xv1,xv5,xv4) - SUM4(xv3,xv2,xv6,xv7)) ;
dyv = Real_t(-0.25)*(SUM4(yv0,yv1,yv5,yv4) - SUM4(yv3,yv2,yv6,yv7)) ;
dzv = Real_t(-0.25)*(SUM4(zv0,zv1,zv5,zv4) - SUM4(zv3,zv2,zv6,zv7)) ;
mesh.delv_eta(i) = ax*dxv + ay*dyv + az*dzv ;
}
#undef SUM4
}
static inline
void CalcMonotonicQGradientsForElems(int useCPU)
{
if (useCPU) {
FC(nodelist); FC(x); FC(y); FC(z); FC(xd); FC(yd); FC(zd); FC(volo); FC(vnew);
CalcMonotonicQGradientsForElems_cpu();
SG(delx_zeta); SG(delv_zeta); SG(delx_xi); SG(delv_xi); SG(delx_eta); SG(delv_eta);
}
else {
FG(nodelist); FG(x); FG(y); FG(z); FG(xd); FG(yd); FG(zd); FG(volo); FG(vnew);
CalcMonotonicQGradientsForElems_gpu();
SC(delx_zeta); SC(delv_zeta); SC(delx_xi); SC(delv_xi); SC(delx_eta); SC(delv_eta);
}
}
#ifdef DP_PROFILING_KERNEL5
__global__
void CalcMonotonicQRegionForElems_kernel(
Real_t qlc_monoq,
Real_t qqc_monoq,
Real_t monoq_limiter_mult,
Real_t monoq_max_slope,
Real_t ptiny,
// the elementset length
Index_t elength,
Index_t *matElemlist,Index_t *elemBC,
Index_t *lxim,Index_t *lxip,
Index_t *letam,Index_t *letap,
Index_t *lzetam,Index_t *lzetap,
Real_t *delv_xi,Real_t *delv_eta,Real_t *delv_zeta,
Real_t *delx_xi,Real_t *delx_eta,Real_t *delx_zeta,
Real_t *vdov,Real_t *elemMass,Real_t *volo,Real_t *vnew,
Real_t *qq,Real_t *ql,
int its
)
#else
__global__
void CalcMonotonicQRegionForElems_kernel(
Real_t qlc_monoq,
Real_t qqc_monoq,
Real_t monoq_limiter_mult,
Real_t monoq_max_slope,
Real_t ptiny,
// the elementset length
Index_t elength,
Index_t *matElemlist,Index_t *elemBC,
Index_t *lxim,Index_t *lxip,
Index_t *letam,Index_t *letap,
Index_t *lzetam,Index_t *lzetap,
Real_t *delv_xi,Real_t *delv_eta,Real_t *delv_zeta,
Real_t *delx_xi,Real_t *delx_eta,Real_t *delx_zeta,
Real_t *vdov,Real_t *elemMass,Real_t *volo,Real_t *vnew,
Real_t *qq,Real_t *ql
)
#endif
{
int ielem=blockDim.x*blockIdx.x + threadIdx.x;
if (ielem<elength) {
Real_t qlin, qquad ;
Real_t phixi, phieta, phizeta ;
#ifdef AFTER_KERNEL5
Index_t i = tex1Dfetch(tex_matElemlist,ielem);
Int_t bcMask = tex1Dfetch(tex_elemBC,i) ;
#else
Index_t i = matElemlist[ielem];
Int_t bcMask = elemBC[i] ;
#endif
#ifdef DP_PROFILING_KERNEL5
if(blockIdx.x==0 && blockIdx.y==0)
{
//matElemlist
//number of array ele is edgeElems^3 = 45^3=91125
//The data type is Index_t (i.e., int)
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("0 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, ielem);
//elemBC
//number of array ele is edgeElems^3 = 45^3 = 91125
//The data type is Int_t (i.e., int)
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("1 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, matElemlist[ielem]);
}
#endif
Real_t delvm, delvp ;
/* phixi */
#ifdef AFTER_KERNEL5_2
Real_t norm = Real_t(1.) / ( tex1Dfetch(tex_delv_xi,i) + ptiny ) ;
#else
Real_t norm = Real_t(1.) / ( delv_xi[i] + ptiny ) ;
#endif
#ifdef DP_PROFILING_KERNEL5
if(blockIdx.x==0 && blockIdx.y==0)
{
//delv_xi
//number of array ele is edgeElems^3 = 45^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("2 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, matElemlist[ielem]);
}
#endif
switch (bcMask & XI_M) {
#ifdef AFTER_KERNEL5
case 0: delvm = delv_xi[tex1Dfetch(tex_lxim,i)];
#else
#ifdef AFTER_KERNEL5_2
case 0: delvm = tex1Dfetch(tex_delv_xi,lxim[i]);
#else
case 0: delvm = delv_xi[lxim[i]];
#endif
#endif
#ifdef DP_PROFILING_KERNEL5
if(blockIdx.x==0 && blockIdx.y==0)
{
//delv_xi
//number of array ele is edgeElems^3=45^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("2 0 1 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, lxim[i]);
//lxim
//number of array ele is edgeElems^3=45^3
//The data type is Index_t (i.e., int)
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("3 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
}
#endif
break ;
#ifdef AFTER_KERNEL5_2
case XI_M_SYMM: delvm = tex1Dfetch(tex_delv_xi,i);
#else
case XI_M_SYMM: delvm = delv_xi[i];
#endif
#ifdef DP_PROFILING_KERNEL5
if(blockIdx.x==0 && blockIdx.y==0)
{
//delv_xi
//number of array ele is edgeElems^3=45^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("2 0 1 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
}
#endif
break ;
case XI_M_FREE: delvm = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
switch (bcMask & XI_P) {
#ifdef AFTER_KERNEL5
case 0: delvp = delv_xi[tex1Dfetch(tex_lxip,i)] ;
#else
case 0: delvp = delv_xi[lxip[i]] ;
#endif
#ifdef DP_PROFILING_KERNEL5
if(blockIdx.x==0 && blockIdx.y==0)
{
//delv_xi
//number of array ele is edgeElems^3=45^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("2 0 2 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, lxip[i]);
//lxip
//number of array ele is edgeElems^3=45^3
//The data type is Index_t (i.e., int)
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("4 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
}
#endif
break ;
#ifdef AFTER_KERNEL5_2
case XI_P_SYMM: delvp = tex1Dfetch(tex_delv_xi,i) ;
#else
case XI_P_SYMM: delvp = delv_xi[i] ;
#endif
#ifdef DP_PROFILING_KERNEL5
if(blockIdx.x==0 && blockIdx.y==0)
{
//delv_xi
//number of array ele is edgeElems^3=45^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("2 0 2 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
}
#endif
break ;
case XI_P_FREE: delvp = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
delvm = delvm * norm ;
delvp = delvp * norm ;
phixi = Real_t(.5) * ( delvm + delvp ) ;
delvm *= monoq_limiter_mult ;
delvp *= monoq_limiter_mult ;
if ( delvm < phixi ) phixi = delvm ;
if ( delvp < phixi ) phixi = delvp ;
if ( phixi < Real_t(0.)) phixi = Real_t(0.) ;
if ( phixi > monoq_max_slope) phixi = monoq_max_slope;
/* phieta */
#ifdef AFTER_KERNEL5_2
norm = Real_t(1.) / ( tex1Dfetch(tex_delv_eta,i) + ptiny ) ;
#else
norm = Real_t(1.) / ( delv_eta[i] + ptiny ) ;
#endif
#ifdef DP_PROFILING_KERNEL5
if(blockIdx.x==0 && blockIdx.y==0)
{
//delv_eta
//number of array ele is edgeElems^3=45^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("5 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
}
#endif
switch (bcMask & ETA_M) {
#ifdef AFTER_KERNEL5
case 0: delvm = delv_eta[tex1Dfetch(tex_letam,i)] ;
#else
#ifdef AFTER_KERNEL5_2
case 0: delvm = tex1Dfetch(tex_delv_eta,letam[i]) ;
#else
case 0: delvm = delv_eta[letam[i]] ;
#endif
#endif
#ifdef DP_PROFILING_KERNEL5
if(blockIdx.x==0 && blockIdx.y==0)
{
//delv_eta
//number of array ele is edgeElems^3=45^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("5 0 1 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, letam[i]);
//letam
//number of array ele is edgeElems^3=45^3
//The data type is Index_t (i.e, int)
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("6 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
}
#endif
break ;
#ifdef AFTER_KERNEL5_2
case ETA_M_SYMM: delvm = tex1Dfetch(tex_delv_eta,i) ;
#else
case ETA_M_SYMM: delvm = delv_eta[i] ;
#endif
#ifdef DP_PROFILING_KERNEL5
if(blockIdx.x==0 && blockIdx.y==0)
{
//delv_eta
//number of array ele is edgeElems^3=45^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("5 0 1 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
}
#endif
break ;
case ETA_M_FREE: delvm = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
switch (bcMask & ETA_P) {
#ifdef AFTER_KERNEL5
case 0: delvp = delv_eta[tex1Dfetch(tex_letap,i)] ;
#else
#ifdef AFTER_KERNEL5_2
case 0: delvp = tex1Dfetch(tex_delv_eta,letap[i]) ;
#else
case 0: delvp = delv_eta[letap[i]] ;
#endif
#endif
#ifdef DP_PROFILING_KERNEL5
if(blockIdx.x==0 && blockIdx.y==0)
{
//delv_eta
//number of array ele is edgeElems^3=45^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("5 0 2 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, letap[i]);
//letap
//number of array ele is edgeElems^3
//The data type is Index_t (i.e., int)
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("7 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
}
#endif
break ;
#ifdef AFTER_KERNEL5_2
case ETA_P_SYMM: delvp = tex1Dfetch(tex_delv_eta,i) ;
#else
case ETA_P_SYMM: delvp = delv_eta[i] ;
#endif
#ifdef DP_PROFILING_KERNEL5
if(blockIdx.x==0 && blockIdx.y==0)
{
//delv_eta
//number of array ele is edgeElems^3=45^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("5 0 2 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
}
#endif
break ;
case ETA_P_FREE: delvp = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
delvm = delvm * norm ;
delvp = delvp * norm ;
phieta = Real_t(.5) * ( delvm + delvp ) ;
delvm *= monoq_limiter_mult ;
delvp *= monoq_limiter_mult ;
if ( delvm < phieta ) phieta = delvm ;
if ( delvp < phieta ) phieta = delvp ;
if ( phieta < Real_t(0.)) phieta = Real_t(0.) ;
if ( phieta > monoq_max_slope) phieta = monoq_max_slope;
/* phizeta */
#ifdef AFTER_KERNEL5_2
norm = Real_t(1.) / ( tex1Dfetch(tex_delv_zeta,i) + ptiny ) ;
#else
norm = Real_t(1.) / ( delv_zeta[i] + ptiny ) ;
#endif
#ifdef DP_PROFILING_KERNEL5
if(blockIdx.x==0 && blockIdx.y==0)
{
//delv_zeta
//number of array ele is edgeElems^3=45^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("8 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
}
#endif
switch (bcMask & ZETA_M) {
#ifdef AFTER_KERNEL5
case 0: delvm = delv_zeta[tex1Dfetch(tex_lzetam,i)] ;
#else
#ifdef AFTER_KERNEL5_2
case 0: delvm = tex1Dfetch(tex_delv_zeta,lzetam[i]) ;
#else
case 0: delvm = delv_zeta[lzetam[i]] ;
#endif
#endif
#ifdef DP_PROFILING_KERNEL5
if(blockIdx.x==0 && blockIdx.y==0)
{
//delv_zeta
//number of array ele is edgeElems^3=45^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("8 0 1 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, lzetam[i]);
//lzetam
//number of array ele is edgeElems^3=45^3
//The data type is Index_t (i.e., int)
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("9 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
}
#endif
break ;
#ifdef AFTER_KERNEL5_2
case ZETA_M_SYMM: delvm = tex1Dfetch(tex_delv_zeta,i) ;
#else
case ZETA_M_SYMM: delvm = delv_zeta[i] ;
#endif
#ifdef DP_PROFILING_KERNEL5
if(blockIdx.x==0 && blockIdx.y==0)
{
//delv_zeta
//number of array ele is edgeElems^3=45^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("8 0 1 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
}
#endif
break ;
case ZETA_M_FREE: delvm = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
switch (bcMask & ZETA_P) {
#ifdef AFTER_KERNEL5
case 0: delvp = delv_zeta[tex1Dfetch(tex_lzetap,i)];
#else
#ifdef AFTER_KERNEL5_2
case 0: delvp =tex1Dfetch(tex_delv_zeta,lzetap[i]);
#else
case 0: delvp = delv_zeta[lzetap[i]];
#endif
#endif
#ifdef DP_PROFILING_KERNEL5
if(blockIdx.x==0 && blockIdx.y==0)
{
//delv_zeta
//number of array ele is edgeElems^3=45^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("8 0 2 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, lzetap[i]);
//lzetap
//number of array ele is edgeElems^3=45^3
//The data type is Index_t (i.e., int)
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("10 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
}
#endif
break ;
#ifdef AFTER_KERNEL5_2
case ZETA_P_SYMM: delvp = tex1Dfetch(tex_delv_zeta,i) ;
#else
case ZETA_P_SYMM: delvp = delv_zeta[i] ;
#endif
#ifdef DP_PROFILING_KERNEL5
if(blockIdx.x==0 && blockIdx.y==0)
{
//delv_zeta
//number of array ele is edgeElems^3=45^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("8 0 2 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
}
#endif
break ;
case ZETA_P_FREE: delvp = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
delvm = delvm * norm ;
delvp = delvp * norm ;
phizeta = Real_t(.5) * ( delvm + delvp ) ;
delvm *= monoq_limiter_mult ;
delvp *= monoq_limiter_mult ;
if ( delvm < phizeta ) phizeta = delvm ;
if ( delvp < phizeta ) phizeta = delvp ;
if ( phizeta < Real_t(0.)) phizeta = Real_t(0.);
if ( phizeta > monoq_max_slope ) phizeta = monoq_max_slope;
/* Remove length scale */
#ifdef DP_PROFILING_KERNEL5
if(blockIdx.x==0 && blockIdx.y==0)
{
//vdov
//number of array ele is edgeElems^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("11 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
}
#endif
if ( vdov[i] > Real_t(0.) ) {
qlin = Real_t(0.) ;
qquad = Real_t(0.) ;
}
else {
#ifdef AFTER_KERNEL5_2
Real_t delvxxi = tex1Dfetch(tex_delv_xi,i) * delx_xi[i] ;
Real_t delvxeta = tex1Dfetch(tex_delv_eta,i) *delx_eta[i] ;
Real_t delvxzeta = tex1Dfetch(tex_delv_zeta,i)*delx_zeta[i] ;
#else
Real_t delvxxi = delv_xi[i] * delx_xi[i] ;
Real_t delvxeta = delv_eta[i] * delx_eta[i] ;
Real_t delvxzeta = delv_zeta[i] * delx_zeta[i] ;
#endif
#ifdef DP_PROFILING_KERNEL5
//Dong: since this is within a control flow, I am not sure if we should
//profile delv_xi/eta/zeta here. Also, the same memory access patterns
//to these three arrays have been seen before.
#endif
if ( delvxxi > Real_t(0.) ) delvxxi = Real_t(0.) ;
if ( delvxeta > Real_t(0.) ) delvxeta = Real_t(0.) ;
if ( delvxzeta > Real_t(0.) ) delvxzeta = Real_t(0.) ;
Real_t rho = elemMass[i] / (volo[i] * vnew[i]) ;
#ifdef DP_PROFILING_KERNEL5
//Dong: we should profile volo, vnew and elemMass here even if it is
//in a control flow, because they are referenced for only once
if(blockIdx.x==0 && blockIdx.y==0)
{
//elemMass
//number of array ele is edgeElems^3=45^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("12 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
//volo
//number of array ele is edgeElems^3=45^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("13 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
//vnew
//number of array ele is edgeElems^3=45^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("14 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
}
#endif
qlin = -qlc_monoq * rho *
( delvxxi * (Real_t(1.) - phixi) +
delvxeta * (Real_t(1.) - phieta) +
delvxzeta * (Real_t(1.) - phizeta) ) ;
qquad = qqc_monoq * rho *
( delvxxi*delvxxi * (Real_t(1.) - phixi*phixi) +
delvxeta*delvxeta * (Real_t(1.) - phieta*phieta) +
delvxzeta*delvxzeta * (Real_t(1.) - phizeta*phizeta) ) ;
}
qq[i] = qquad ;
ql[i] = qlin ;
#ifdef DP_PROFILING_KERNEL5
if(blockIdx.x==0 && blockIdx.y==0)
{
//qq
//number of array ele is edgeElems^3=45^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("15 1 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
//ql
//number of array ele is edgeElems^3=45^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("16 1 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
}
#endif
}
}
static inline
void CalcMonotonicQRegionForElems_gpu(// parameters
Real_t qlc_monoq,
Real_t qqc_monoq,
Real_t monoq_limiter_mult,
Real_t monoq_max_slope,
Real_t ptiny,
// the elementset length
Index_t elength )
{
dim3 dimBlock=dim3(BLOCKSIZE,1,1);
dim3 dimGrid=dim3(PAD_DIV(elength,dimBlock.x),1,1);
#ifdef AFTER_KERNEL5
hipBindTexture(0,tex_matElemlist,meshGPU.m_matElemlist,mesh.numNode()*sizeof(int));
hipBindTexture(0,tex_elemBC,meshGPU.m_elemBC,mesh.numNode()*sizeof(int));
hipBindTexture(0,tex_lxim,meshGPU.m_lxim,mesh.numNode()*sizeof(int));
hipBindTexture(0,tex_lxip,meshGPU.m_lxip,mesh.numNode()*sizeof(int));
hipBindTexture(0,tex_letam,meshGPU.m_letam,mesh.numNode()*sizeof(int));
hipBindTexture(0,tex_letap,meshGPU.m_letap,mesh.numNode()*sizeof(int));
hipBindTexture(0,tex_lzetam,meshGPU.m_lzetam,mesh.numNode()*sizeof(int));
hipBindTexture(0,tex_lzetap,meshGPU.m_lzetap,mesh.numNode()*sizeof(int));
#endif
#ifdef AFTER_KERNEL5_2
hipBindTexture(0,tex_delv_xi,meshGPU.m_delv_xi,mesh.numElem()*sizeof(int));
hipBindTexture(0,tex_delv_eta,meshGPU.m_delv_eta,mesh.numElem()*sizeof(int));
hipBindTexture(0,tex_delv_zeta,meshGPU.m_delv_zeta,mesh.numElem()*sizeof(int));
#endif
#ifdef DP_PROFILING_KERNEL5
printf("Kernel5: dimGrid: %d %d %d. dimBlock: %d %d %d\n",
dimGrid.x, dimGrid.y, dimGrid.z, dimBlock.x, dimBlock.y, dimBlock.z);
printf("Kernel5: numNode=%d, numElem=%d\n", mesh.numNode(), mesh.numElem());
hipLaunchKernelGGL(( CalcMonotonicQRegionForElems_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0,
qlc_monoq,qqc_monoq,monoq_limiter_mult,monoq_max_slope,ptiny,elength,
meshGPU.m_matElemlist,meshGPU.m_elemBC,
meshGPU.m_lxim,meshGPU.m_lxip,
meshGPU.m_letam,meshGPU.m_letap,
meshGPU.m_lzetam,meshGPU.m_lzetap,
meshGPU.m_delv_xi,meshGPU.m_delv_eta,meshGPU.m_delv_zeta,
meshGPU.m_delx_xi,meshGPU.m_delx_eta,meshGPU.m_delx_zeta,
meshGPU.m_vdov,meshGPU.m_elemMass,meshGPU.m_volo,meshGPU.m_vnew,
meshGPU.m_qq,meshGPU.m_ql, its);
cudaPrintfDisplay(stdout, false);
#else
hipEvent_t k5_start, k5_stop;
hipEventCreate(&k5_start);
hipEventCreate(&k5_stop);
hipEventRecord(k5_start,0);
hipLaunchKernelGGL(( CalcMonotonicQRegionForElems_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0,
qlc_monoq,qqc_monoq,monoq_limiter_mult,monoq_max_slope,ptiny,elength,
meshGPU.m_matElemlist,meshGPU.m_elemBC,
meshGPU.m_lxim,meshGPU.m_lxip,
meshGPU.m_letam,meshGPU.m_letap,
meshGPU.m_lzetam,meshGPU.m_lzetap,
meshGPU.m_delv_xi,meshGPU.m_delv_eta,meshGPU.m_delv_zeta,
meshGPU.m_delx_xi,meshGPU.m_delx_eta,meshGPU.m_delx_zeta,
meshGPU.m_vdov,meshGPU.m_elemMass,meshGPU.m_volo,meshGPU.m_vnew,
meshGPU.m_qq,meshGPU.m_ql);
hipDeviceSynchronize();
hipEventRecord(k5_stop, 0);
hipEventSynchronize(k5_stop);
float k5_time=0.0;
hipEventElapsedTime(&k5_time, k5_start, k5_stop);
printf("Kernel 5 time = %f\n",k5_time);
k5+=k5_time;
#endif
CUDA_DEBUGSYNC;
}
static inline
void CalcMonotonicQRegionForElems_cpu(// parameters
Real_t qlc_monoq,
Real_t qqc_monoq,
Real_t monoq_limiter_mult,
Real_t monoq_max_slope,
Real_t ptiny,
// the elementset length
Index_t elength )
{
for ( Index_t ielem = 0 ; ielem < elength; ++ielem ) {
Real_t qlin, qquad ;
Real_t phixi, phieta, phizeta ;
Index_t i = mesh.matElemlist(ielem);
Int_t bcMask = mesh.elemBC(i) ;
Real_t delvm, delvp ;
/* phixi */
Real_t norm = Real_t(1.) / ( mesh.delv_xi(i) + ptiny ) ;
switch (bcMask & XI_M) {
case 0: delvm = mesh.delv_xi(mesh.lxim(i)) ; break ;
case XI_M_SYMM: delvm = mesh.delv_xi(i) ; break ;
case XI_M_FREE: delvm = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
switch (bcMask & XI_P) {
case 0: delvp = mesh.delv_xi(mesh.lxip(i)) ; break ;
case XI_P_SYMM: delvp = mesh.delv_xi(i) ; break ;
case XI_P_FREE: delvp = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
delvm = delvm * norm ;
delvp = delvp * norm ;
phixi = Real_t(.5) * ( delvm + delvp ) ;
delvm *= monoq_limiter_mult ;
delvp *= monoq_limiter_mult ;
if ( delvm < phixi ) phixi = delvm ;
if ( delvp < phixi ) phixi = delvp ;
if ( phixi < Real_t(0.)) phixi = Real_t(0.) ;
if ( phixi > monoq_max_slope) phixi = monoq_max_slope;
/* phieta */
norm = Real_t(1.) / ( mesh.delv_eta(i) + ptiny ) ;
switch (bcMask & ETA_M) {
case 0: delvm = mesh.delv_eta(mesh.letam(i)) ; break ;
case ETA_M_SYMM: delvm = mesh.delv_eta(i) ; break ;
case ETA_M_FREE: delvm = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
switch (bcMask & ETA_P) {
case 0: delvp = mesh.delv_eta(mesh.letap(i)) ; break ;
case ETA_P_SYMM: delvp = mesh.delv_eta(i) ; break ;
case ETA_P_FREE: delvp = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
delvm = delvm * norm ;
delvp = delvp * norm ;
phieta = Real_t(.5) * ( delvm + delvp ) ;
delvm *= monoq_limiter_mult ;
delvp *= monoq_limiter_mult ;
if ( delvm < phieta ) phieta = delvm ;
if ( delvp < phieta ) phieta = delvp ;
if ( phieta < Real_t(0.)) phieta = Real_t(0.) ;
if ( phieta > monoq_max_slope) phieta = monoq_max_slope;
/* phizeta */
norm = Real_t(1.) / ( mesh.delv_zeta(i) + ptiny ) ;
switch (bcMask & ZETA_M) {
case 0: delvm = mesh.delv_zeta(mesh.lzetam(i)) ; break ;
case ZETA_M_SYMM: delvm = mesh.delv_zeta(i) ; break ;
case ZETA_M_FREE: delvm = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
switch (bcMask & ZETA_P) {
case 0: delvp = mesh.delv_zeta(mesh.lzetap(i)) ; break ;
case ZETA_P_SYMM: delvp = mesh.delv_zeta(i) ; break ;
case ZETA_P_FREE: delvp = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
delvm = delvm * norm ;
delvp = delvp * norm ;
phizeta = Real_t(.5) * ( delvm + delvp ) ;
delvm *= monoq_limiter_mult ;
delvp *= monoq_limiter_mult ;
if ( delvm < phizeta ) phizeta = delvm ;
if ( delvp < phizeta ) phizeta = delvp ;
if ( phizeta < Real_t(0.)) phizeta = Real_t(0.);
if ( phizeta > monoq_max_slope ) phizeta = monoq_max_slope;
/* Remove length scale */
if ( mesh.vdov(i) > Real_t(0.) ) {
qlin = Real_t(0.) ;
qquad = Real_t(0.) ;
}
else {
Real_t delvxxi = mesh.delv_xi(i) * mesh.delx_xi(i) ;
Real_t delvxeta = mesh.delv_eta(i) * mesh.delx_eta(i) ;
Real_t delvxzeta = mesh.delv_zeta(i) * mesh.delx_zeta(i) ;
if ( delvxxi > Real_t(0.) ) delvxxi = Real_t(0.) ;
if ( delvxeta > Real_t(0.) ) delvxeta = Real_t(0.) ;
if ( delvxzeta > Real_t(0.) ) delvxzeta = Real_t(0.) ;
Real_t rho = mesh.elemMass(i) / (mesh.volo(i) * mesh.vnew(i)) ;
qlin = -qlc_monoq * rho *
( delvxxi * (Real_t(1.) - phixi) +
delvxeta * (Real_t(1.) - phieta) +
delvxzeta * (Real_t(1.) - phizeta) ) ;
qquad = qqc_monoq * rho *
( delvxxi*delvxxi * (Real_t(1.) - phixi*phixi) +
delvxeta*delvxeta * (Real_t(1.) - phieta*phieta) +
delvxzeta*delvxzeta * (Real_t(1.) - phizeta*phizeta) ) ;
}
mesh.qq(i) = qquad ;
mesh.ql(i) = qlin ;
}
}
static inline
void CalcMonotonicQRegionForElems(// parameters
Real_t qlc_monoq,
Real_t qqc_monoq,
Real_t monoq_limiter_mult,
Real_t monoq_max_slope,
Real_t ptiny,
// the elementset length
Index_t elength,
int useCPU)
{
if (useCPU) {
FC(matElemlist); FC(elemBC); FC(lxim); FC(lxip); FC(letam); FC(letap); FC(lzetam); FC(lzetap);
FC(delv_xi); FC(delv_eta); FC(delv_zeta); FC(delx_xi); FC(delx_eta); FC(delx_zeta);
FC(vdov); FC(elemMass); FC(volo); FC(vnew);
CalcMonotonicQRegionForElems_cpu(qlc_monoq,qqc_monoq,
monoq_limiter_mult,monoq_max_slope,ptiny,
elength);
SG(qq); SG(ql);
}
else {
FG(matElemlist); FG(elemBC); FG(lxim); FG(lxip); FG(letam); FG(letap); FG(lzetam); FG(lzetap);
FG(delv_xi); FG(delv_eta); FG(delv_zeta); FG(delx_xi); FG(delx_eta); FG(delx_zeta);
FG(vdov); FG(elemMass); FG(volo); FG(vnew);
CalcMonotonicQRegionForElems_gpu(qlc_monoq,qqc_monoq,
monoq_limiter_mult,monoq_max_slope,ptiny,
elength);
SC(qq); SC(ql);
}
}
static inline
void CalcMonotonicQForElems(int useCPU)
{
//
// initialize parameters
//
const Real_t ptiny = Real_t(1.e-36) ;
Real_t monoq_max_slope = mesh.monoq_max_slope() ;
Real_t monoq_limiter_mult = mesh.monoq_limiter_mult() ;
//
// calculate the monotonic q for pure regions
//
Index_t elength = mesh.numElem() ;
if (elength > 0) {
Real_t qlc_monoq = mesh.qlc_monoq();
Real_t qqc_monoq = mesh.qqc_monoq();
CalcMonotonicQRegionForElems(// parameters
qlc_monoq,
qqc_monoq,
monoq_limiter_mult,
monoq_max_slope,
ptiny,
// the elemset length
elength,
useCPU);
}
}
static inline
void CalcQForElems(int useCPU)
{
Real_t qstop = mesh.qstop() ;
Index_t numElem = mesh.numElem() ;
//
// MONOTONIC Q option
//
/* Calculate velocity gradients */
CalcMonotonicQGradientsForElems(useCPU) ;
/* Transfer veloctiy gradients in the first order elements */
/* problem->commElements->Transfer(CommElements::monoQ) ; */
CalcMonotonicQForElems(useCPU) ;
/* Don't allow excessive artificial viscosity */
/*
if (numElem != 0) {
Index_t idx = -1;
for (Index_t i=0; i<numElem; ++i) {
if ( mesh.q(i) > qstop ) {
idx = i ;
break ;
}
}
if(idx >= 0) {
exit(QStopError) ;
}
}
*/
}
__global__
void CalcPressureForElems_kernel(Real_t* p_new, Real_t* bvc,
Real_t* pbvc, Real_t* e_old,
Real_t* compression, Real_t *vnewc,
Real_t pmin,
Real_t p_cut, Real_t eosvmax,
Index_t length, Real_t c1s)
{
int i=blockDim.x*blockIdx.x + threadIdx.x;
if (i<length) {
bvc[i] = c1s * (compression[i] + Real_t(1.));
pbvc[i] = c1s;
p_new[i] = bvc[i] * e_old[i] ;
if (FABS(p_new[i]) < p_cut )
p_new[i] = Real_t(0.0) ;
if ( vnewc[i] >= eosvmax ) /* impossible condition here? */
p_new[i] = Real_t(0.0) ;
if (p_new[i] < pmin)
p_new[i] = pmin ;
}
}
static inline
void CalcPressureForElems_gpu(Real_t* p_new, Real_t* bvc,
Real_t* pbvc, Real_t* e_old,
Real_t* compression, Real_t *vnewc,
Real_t pmin,
Real_t p_cut, Real_t eosvmax,
Index_t length)
{
Real_t c1s = Real_t(2.0)/Real_t(3.0) ;
dim3 dimBlock=dim3(BLOCKSIZE,1,1);
dim3 dimGrid=dim3(PAD_DIV(length,dimBlock.x),1,1);
hipLaunchKernelGGL(( CalcPressureForElems_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0,
p_new,bvc,pbvc,e_old,compression,vnewc,pmin,p_cut,eosvmax,length,c1s);
CUDA_DEBUGSYNC;
}
static inline
void CalcPressureForElems_cpu(Real_t* p_new, Real_t* bvc,
Real_t* pbvc, Real_t* e_old,
Real_t* compression, Real_t *vnewc,
Real_t pmin,
Real_t p_cut, Real_t eosvmax,
Index_t length)
{
Real_t c1s = Real_t(2.0)/Real_t(3.0) ;
for (Index_t i = 0; i < length ; ++i) {
bvc[i] = c1s * (compression[i] + Real_t(1.));
pbvc[i] = c1s;
}
for (Index_t i = 0 ; i < length ; ++i){
p_new[i] = bvc[i] * e_old[i] ;
if (FABS(p_new[i]) < p_cut )
p_new[i] = Real_t(0.0) ;
if ( vnewc[i] >= eosvmax ) /* impossible condition here? */
p_new[i] = Real_t(0.0) ;
if (p_new[i] < pmin)
p_new[i] = pmin ;
}
}
__global__
void CalcEnergyForElemsPart1_kernel(
Index_t length,Real_t emin,
Real_t *e_old,Real_t *delvc,Real_t *p_old,Real_t *q_old,Real_t *work,
Real_t *e_new)
{
int i=blockDim.x*blockIdx.x + threadIdx.x;
if (i<length) {
e_new[i] = e_old[i] - Real_t(0.5) * delvc[i] * (p_old[i] + q_old[i])
+ Real_t(0.5) * work[i];
if (e_new[i] < emin ) {
e_new[i] = emin ;
}
}
}
__global__
void CalcEnergyForElemsPart2_kernel(
Index_t length,Real_t rho0,Real_t e_cut,Real_t emin,
Real_t *compHalfStep,Real_t *delvc,Real_t *pbvc,Real_t *bvc,
Real_t *pHalfStep,Real_t *ql,Real_t *qq,Real_t *p_old,Real_t *q_old,Real_t *work,
Real_t *e_new,
Real_t *q_new
)
{
int i=blockDim.x*blockIdx.x + threadIdx.x;
if (i<length) {
Real_t vhalf = Real_t(1.) / (Real_t(1.) + compHalfStep[i]) ;
if ( delvc[i] > Real_t(0.) ) {
q_new[i] /* = qq[i] = ql[i] */ = Real_t(0.) ;
}
else {
Real_t ssc = ( pbvc[i] * e_new[i]
+ vhalf * vhalf * bvc[i] * pHalfStep[i] ) / rho0 ;
if ( ssc <= Real_t(0.) ) {
ssc =Real_t(.333333e-36) ;
} else {
ssc = SQRT(ssc) ;
}
q_new[i] = (ssc*ql[i] + qq[i]) ;
}
e_new[i] = e_new[i] + Real_t(0.5) * delvc[i]
* ( Real_t(3.0)*(p_old[i] + q_old[i])
- Real_t(4.0)*(pHalfStep[i] + q_new[i])) ;
e_new[i] += Real_t(0.5) * work[i];
if (FABS(e_new[i]) < e_cut) {
e_new[i] = Real_t(0.) ;
}
if ( e_new[i] < emin ) {
e_new[i] = emin ;
}
}
}
__global__
void CalcEnergyForElemsPart3_kernel(
Index_t length,Real_t rho0,Real_t sixth,Real_t e_cut,Real_t emin,
Real_t *pbvc,Real_t *vnewc,Real_t *bvc,Real_t *p_new,Real_t *ql,Real_t *qq,
Real_t *p_old,Real_t *q_old,Real_t *pHalfStep,Real_t *q_new,Real_t *delvc,
Real_t *e_new)
{
int i=blockDim.x*blockIdx.x + threadIdx.x;
if (i<length) {
Real_t q_tilde ;
if (delvc[i] > Real_t(0.)) {
q_tilde = Real_t(0.) ;
}
else {
Real_t ssc = ( pbvc[i] * e_new[i]
+ vnewc[i] * vnewc[i] * bvc[i] * p_new[i] ) / rho0 ;
if ( ssc <= Real_t(0.) ) {
ssc = Real_t(.333333e-36) ;
} else {
ssc = SQRT(ssc) ;
}
q_tilde = (ssc*ql[i] + qq[i]) ;
}
e_new[i] = e_new[i] - ( Real_t(7.0)*(p_old[i] + q_old[i])
- Real_t(8.0)*(pHalfStep[i] + q_new[i])
+ (p_new[i] + q_tilde)) * delvc[i]*sixth ;
if (FABS(e_new[i]) < e_cut) {
e_new[i] = Real_t(0.) ;
}
if ( e_new[i] < emin ) {
e_new[i] = emin ;
}
}
}
__global__
void CalcEnergyForElemsPart4_kernel(
Index_t length,Real_t rho0,Real_t q_cut,
Real_t *delvc,Real_t *pbvc,Real_t *e_new,Real_t *vnewc,Real_t *bvc,
Real_t *p_new,Real_t *ql,Real_t *qq,
Real_t *q_new)
{
int i=blockDim.x*blockIdx.x + threadIdx.x;
if (i<length) {
if ( delvc[i] <= Real_t(0.) ) {
Real_t ssc = ( pbvc[i] * e_new[i]
+ vnewc[i] * vnewc[i] * bvc[i] * p_new[i] ) / rho0 ;
if ( ssc <= Real_t(0.) ) {
ssc = Real_t(.333333e-36) ;
} else {
ssc = SQRT(ssc) ;
}
q_new[i] = (ssc*ql[i] + qq[i]) ;
if (FABS(q_new[i]) < q_cut) q_new[i] = Real_t(0.) ;
}
}
}
static inline
void CalcEnergyForElems_gpu(Real_t* p_new, Real_t* e_new, Real_t* q_new,
Real_t* bvc, Real_t* pbvc,
Real_t* p_old, Real_t* e_old, Real_t* q_old,
Real_t* compression, Real_t* compHalfStep,
Real_t* vnewc, Real_t* work, Real_t* delvc, Real_t pmin,
Real_t p_cut, Real_t e_cut, Real_t q_cut, Real_t emin,
Real_t* qq, Real_t* ql,
Real_t rho0,
Real_t eosvmax,
Index_t length)
{
const Real_t sixth = Real_t(1.0) / Real_t(6.0) ;
Real_t *pHalfStep;
dim3 dimBlock=dim3(BLOCKSIZE,1,1);
dim3 dimGrid=dim3(PAD_DIV(length,dimBlock.x),1,1);
CUDA( hipMalloc(&pHalfStep,sizeof(Real_t)*length) );
hipLaunchKernelGGL(( CalcEnergyForElemsPart1_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0,
length,emin,e_old,delvc,p_old,q_old,work,e_new);
CUDA_DEBUGSYNC;
CalcPressureForElems_gpu(pHalfStep, bvc, pbvc, e_new, compHalfStep, vnewc,
pmin, p_cut, eosvmax, length);
hipLaunchKernelGGL(( CalcEnergyForElemsPart2_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0,
length,rho0,e_cut,emin,
compHalfStep,delvc,pbvc,bvc,pHalfStep,ql,qq,p_old,q_old,work,
e_new,
q_new);
CUDA_DEBUGSYNC;
CalcPressureForElems_gpu(p_new, bvc, pbvc, e_new, compression, vnewc,
pmin, p_cut, eosvmax, length);
hipLaunchKernelGGL(( CalcEnergyForElemsPart3_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0,
length,rho0,sixth,e_cut,emin,
pbvc,vnewc,bvc,p_new,ql,qq,
p_old,q_old,pHalfStep,q_new,delvc,
e_new);
CUDA_DEBUGSYNC;
CalcPressureForElems_gpu(p_new, bvc, pbvc, e_new, compression, vnewc,
pmin, p_cut, eosvmax, length);
hipLaunchKernelGGL(( CalcEnergyForElemsPart4_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0,
length,rho0,q_cut,
delvc,pbvc,e_new,vnewc,bvc,
p_new,ql,qq,
q_new);
CUDA_DEBUGSYNC;
CUDA( hipFree(pHalfStep) );
return ;
}
static inline
void CalcEnergyForElems_cpu(Real_t* p_new, Real_t* e_new, Real_t* q_new,
Real_t* bvc, Real_t* pbvc,
Real_t* p_old, Real_t* e_old, Real_t* q_old,
Real_t* compression, Real_t* compHalfStep,
Real_t* vnewc, Real_t* work, Real_t* delvc, Real_t pmin,
Real_t p_cut, Real_t e_cut, Real_t q_cut, Real_t emin,
Real_t* qq, Real_t* ql,
Real_t rho0,
Real_t eosvmax,
Index_t length)
{
const Real_t sixth = Real_t(1.0) / Real_t(6.0) ;
Real_t *pHalfStep = Allocate<Real_t>(length) ;
for (Index_t i = 0 ; i < length ; ++i) {
e_new[i] = e_old[i] - Real_t(0.5) * delvc[i] * (p_old[i] + q_old[i])
+ Real_t(0.5) * work[i];
if (e_new[i] < emin ) {
e_new[i] = emin ;
}
}
CalcPressureForElems_cpu(pHalfStep, bvc, pbvc, e_new, compHalfStep, vnewc,
pmin, p_cut, eosvmax, length);
for (Index_t i = 0 ; i < length ; ++i) {
Real_t vhalf = Real_t(1.) / (Real_t(1.) + compHalfStep[i]) ;
if ( delvc[i] > Real_t(0.) ) {
q_new[i] /* = qq[i] = ql[i] */ = Real_t(0.) ;
}
else {
Real_t ssc = ( pbvc[i] * e_new[i]
+ vhalf * vhalf * bvc[i] * pHalfStep[i] ) / rho0 ;
if ( ssc <= Real_t(0.) ) {
ssc =Real_t(.333333e-36) ;
} else {
ssc = SQRT(ssc) ;
}
q_new[i] = (ssc*ql[i] + qq[i]) ;
}
e_new[i] = e_new[i] + Real_t(0.5) * delvc[i]
* ( Real_t(3.0)*(p_old[i] + q_old[i])
- Real_t(4.0)*(pHalfStep[i] + q_new[i])) ;
}
for (Index_t i = 0 ; i < length ; ++i) {
e_new[i] += Real_t(0.5) * work[i];
if (FABS(e_new[i]) < e_cut) {
e_new[i] = Real_t(0.) ;
}
if ( e_new[i] < emin ) {
e_new[i] = emin ;
}
}
CalcPressureForElems_cpu(p_new, bvc, pbvc, e_new, compression, vnewc,
pmin, p_cut, eosvmax, length);
for (Index_t i = 0 ; i < length ; ++i){
Real_t q_tilde ;
if (delvc[i] > Real_t(0.)) {
q_tilde = Real_t(0.) ;
}
else {
Real_t ssc = ( pbvc[i] * e_new[i]
+ vnewc[i] * vnewc[i] * bvc[i] * p_new[i] ) / rho0 ;
if ( ssc <= Real_t(0.) ) {
ssc = Real_t(.333333e-36) ;
} else {
ssc = SQRT(ssc) ;
}
q_tilde = (ssc*ql[i] + qq[i]) ;
}
e_new[i] = e_new[i] - ( Real_t(7.0)*(p_old[i] + q_old[i])
- Real_t(8.0)*(pHalfStep[i] + q_new[i])
+ (p_new[i] + q_tilde)) * delvc[i]*sixth ;
if (FABS(e_new[i]) < e_cut) {
e_new[i] = Real_t(0.) ;
}
if ( e_new[i] < emin ) {
e_new[i] = emin ;
}
}
CalcPressureForElems_cpu(p_new, bvc, pbvc, e_new, compression, vnewc,
pmin, p_cut, eosvmax, length);
for (Index_t i = 0 ; i < length ; ++i){
if ( delvc[i] <= Real_t(0.) ) {
Real_t ssc = ( pbvc[i] * e_new[i]
+ vnewc[i] * vnewc[i] * bvc[i] * p_new[i] ) / rho0 ;
if ( ssc <= Real_t(0.) ) {
ssc = Real_t(.333333e-36) ;
} else {
ssc = SQRT(ssc) ;
}
q_new[i] = (ssc*ql[i] + qq[i]) ;
if (FABS(q_new[i]) < q_cut) q_new[i] = Real_t(0.) ;
}
}
Release(&pHalfStep) ;
return ;
}
__global__
void CalcSoundSpeedForElems_kernel(Real_t *vnewc, Real_t rho0, Real_t *enewc,
Real_t *pnewc, Real_t *pbvc,
Real_t *bvc, Real_t ss4o3, Index_t nz,Index_t *matElemlist,
Real_t *ss)
{
int i=blockDim.x*blockIdx.x + threadIdx.x;
if (i<nz) {
Index_t iz = matElemlist[i];
Real_t ssTmp = (pbvc[i] * enewc[i] + vnewc[i] * vnewc[i] *
bvc[i] * pnewc[i]) / rho0;
if (ssTmp <= Real_t(1.111111e-36)) {
ssTmp = Real_t(1.111111e-36);
}
ss[iz] = SQRT(ssTmp);
}
}
static inline
void CalcSoundSpeedForElems_gpu(Real_t *vnewc, Real_t rho0, Real_t *enewc,
Real_t *pnewc, Real_t *pbvc,
Real_t *bvc, Real_t ss4o3, Index_t nz)
{
dim3 dimBlock=dim3(BLOCKSIZE,1,1);
dim3 dimGrid=dim3(PAD_DIV(nz,dimBlock.x),1,1);
hipLaunchKernelGGL(( CalcSoundSpeedForElems_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0,
vnewc,rho0,enewc,pnewc,pbvc,bvc,ss4o3,nz,meshGPU.m_matElemlist,meshGPU.m_ss);
CUDA_DEBUGSYNC;
}
static inline
void CalcSoundSpeedForElems_cpu(Real_t *vnewc, Real_t rho0, Real_t *enewc,
Real_t *pnewc, Real_t *pbvc,
Real_t *bvc, Real_t ss4o3, Index_t nz)
{
for (Index_t i = 0; i < nz ; ++i) {
Index_t iz = mesh.matElemlist(i);
Real_t ssTmp = (pbvc[i] * enewc[i] + vnewc[i] * vnewc[i] *
bvc[i] * pnewc[i]) / rho0;
if (ssTmp <= Real_t(1.111111e-36)) {
ssTmp = Real_t(1.111111e-36);
}
mesh.ss(iz) = SQRT(ssTmp);
}
}
__global__
void EvalEOSForElemsPart1_kernel(
Index_t length,Real_t eosvmin,Real_t eosvmax,
Index_t *matElemlist,
Real_t *e,Real_t *delv,Real_t *p,Real_t *q,Real_t *qq,Real_t *ql,
Real_t *vnewc,
Real_t *e_old,Real_t *delvc,Real_t *p_old,Real_t *q_old,
Real_t *compression,Real_t *compHalfStep,
Real_t *qq_old,Real_t *ql_old,Real_t *work)
{
int i=blockDim.x*blockIdx.x + threadIdx.x;
if (i<length) {
Index_t zidx = matElemlist[i];
e_old[i] = e[zidx];
delvc[i] = delv[zidx];
p_old[i] = p[zidx];
q_old[i] = q[zidx];
Real_t vchalf ;
compression[i] = Real_t(1.) / vnewc[i] - Real_t(1.);
vchalf = vnewc[i] - delvc[i] * Real_t(.5);
compHalfStep[i] = Real_t(1.) / vchalf - Real_t(1.);
if ( eosvmin != Real_t(0.) ) {
if (vnewc[i] <= eosvmin) { /* impossible due to calling func? */
compHalfStep[i] = compression[i] ;
}
}
if ( eosvmax != Real_t(0.) ) {
if (vnewc[i] >= eosvmax) { /* impossible due to calling func? */
p_old[i] = Real_t(0.) ;
compression[i] = Real_t(0.) ;
compHalfStep[i] = Real_t(0.) ;
}
}
qq_old[i] = qq[zidx] ;
ql_old[i] = ql[zidx] ;
work[i] = Real_t(0.) ;
}
}
__global__
void EvalEOSForElemsPart2_kernel(
Index_t length,
Index_t *matElemlist,Real_t *p_new,Real_t *e_new,Real_t *q_new,
Real_t *p,Real_t *e,Real_t *q)
{
int i=blockDim.x*blockIdx.x + threadIdx.x;
if (i<length) {
Index_t zidx = matElemlist[i] ;
p[zidx] = p_new[i];
e[zidx] = e_new[i];
q[zidx] = q_new[i];
}
}
static inline
void EvalEOSForElems_gpu(Real_t *vnewc, Index_t length)
{
Real_t e_cut = mesh.e_cut();
Real_t p_cut = mesh.p_cut();
Real_t ss4o3 = mesh.ss4o3();
Real_t q_cut = mesh.q_cut();
Real_t eosvmax = mesh.eosvmax() ;
Real_t eosvmin = mesh.eosvmin() ;
Real_t pmin = mesh.pmin() ;
Real_t emin = mesh.emin() ;
Real_t rho0 = mesh.refdens() ;
Real_t *e_old,*delvc,*p_old,*q_old;
Real_t *compression,*compHalfStep;
Real_t *qq,*ql,*work,*p_new,*e_new,*q_new,*bvc,*pbvc;
CUDA( hipMalloc(&e_old,sizeof(Real_t)*length) );
CUDA( hipMalloc(&delvc,sizeof(Real_t)*length) );
CUDA( hipMalloc(&p_old,sizeof(Real_t)*length) );
CUDA( hipMalloc(&q_old,sizeof(Real_t)*length) );
CUDA( hipMalloc(&compression,sizeof(Real_t)*length) );
CUDA( hipMalloc(&compHalfStep,sizeof(Real_t)*length) );
CUDA( hipMalloc(&qq,sizeof(Real_t)*length) );
CUDA( hipMalloc(&ql,sizeof(Real_t)*length) );
CUDA( hipMalloc(&work,sizeof(Real_t)*length) );
CUDA( hipMalloc(&p_new,sizeof(Real_t)*length) );
CUDA( hipMalloc(&e_new,sizeof(Real_t)*length) );
CUDA( hipMalloc(&q_new,sizeof(Real_t)*length) );
CUDA( hipMalloc(&bvc,sizeof(Real_t)*length) );
CUDA( hipMalloc(&pbvc,sizeof(Real_t)*length) );
dim3 dimBlock=dim3(BLOCKSIZE,1,1);
dim3 dimGrid=dim3(PAD_DIV(length,dimBlock.x),1,1);
hipLaunchKernelGGL(( EvalEOSForElemsPart1_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0,
length,eosvmin,eosvmax,
meshGPU.m_matElemlist,
meshGPU.m_e,meshGPU.m_delv,meshGPU.m_p,meshGPU.m_q,meshGPU.m_qq,meshGPU.m_ql,
vnewc,
e_old,delvc,p_old,q_old,
compression,compHalfStep,qq,ql,work);
CUDA_DEBUGSYNC;
CalcEnergyForElems_gpu(p_new, e_new, q_new, bvc, pbvc,
p_old, e_old, q_old, compression, compHalfStep,
vnewc, work, delvc, pmin,
p_cut, e_cut, q_cut, emin,
qq, ql, rho0, eosvmax, length);
hipLaunchKernelGGL(( EvalEOSForElemsPart2_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0,
length,
meshGPU.m_matElemlist,p_new,e_new,q_new,
meshGPU.m_p,meshGPU.m_e,meshGPU.m_q);
CUDA_DEBUGSYNC;
CalcSoundSpeedForElems_gpu(vnewc, rho0, e_new, p_new,
pbvc, bvc, ss4o3, length) ;
CUDA( hipFree(pbvc) );
CUDA( hipFree(bvc) );
CUDA( hipFree(q_new) );
CUDA( hipFree(e_new) );
CUDA( hipFree(p_new) );
CUDA( hipFree(work) );
CUDA( hipFree(ql) );
CUDA( hipFree(qq) );
CUDA( hipFree(compHalfStep) );
CUDA( hipFree(compression) );
CUDA( hipFree(q_old) );
CUDA( hipFree(p_old) );
CUDA( hipFree(delvc) );
CUDA( hipFree(e_old) );
}
static inline
void EvalEOSForElems_cpu(Real_t *vnewc, Index_t length)
{
Real_t e_cut = mesh.e_cut();
Real_t p_cut = mesh.p_cut();
Real_t ss4o3 = mesh.ss4o3();
Real_t q_cut = mesh.q_cut();
Real_t eosvmax = mesh.eosvmax() ;
Real_t eosvmin = mesh.eosvmin() ;
Real_t pmin = mesh.pmin() ;
Real_t emin = mesh.emin() ;
Real_t rho0 = mesh.refdens() ;
Real_t *e_old = Allocate<Real_t>(length) ;
Real_t *delvc = Allocate<Real_t>(length) ;
Real_t *p_old = Allocate<Real_t>(length) ;
Real_t *q_old = Allocate<Real_t>(length) ;
Real_t *compression = Allocate<Real_t>(length) ;
Real_t *compHalfStep = Allocate<Real_t>(length) ;
Real_t *qq = Allocate<Real_t>(length) ;
Real_t *ql = Allocate<Real_t>(length) ;
Real_t *work = Allocate<Real_t>(length) ;
Real_t *p_new = Allocate<Real_t>(length) ;
Real_t *e_new = Allocate<Real_t>(length) ;
Real_t *q_new = Allocate<Real_t>(length) ;
Real_t *bvc = Allocate<Real_t>(length) ;
Real_t *pbvc = Allocate<Real_t>(length) ;
/* compress data, minimal set */
for (Index_t i=0; i<length; ++i) {
Index_t zidx = mesh.matElemlist(i) ;
e_old[i] = mesh.e(zidx) ;
}
for (Index_t i=0; i<length; ++i) {
Index_t zidx = mesh.matElemlist(i) ;
delvc[i] = mesh.delv(zidx) ;
}
for (Index_t i=0; i<length; ++i) {
Index_t zidx = mesh.matElemlist(i) ;
p_old[i] = mesh.p(zidx) ;
}
for (Index_t i=0; i<length; ++i) {
Index_t zidx = mesh.matElemlist(i) ;
q_old[i] = mesh.q(zidx) ;
}
for (Index_t i = 0; i < length ; ++i) {
Real_t vchalf ;
compression[i] = Real_t(1.) / vnewc[i] - Real_t(1.);
vchalf = vnewc[i] - delvc[i] * Real_t(.5);
compHalfStep[i] = Real_t(1.) / vchalf - Real_t(1.);
}
/* Check for v > eosvmax or v < eosvmin */
if ( eosvmin != Real_t(0.) ) {
for(Index_t i=0 ; i<length ; ++i) {
if (vnewc[i] <= eosvmin) { /* impossible due to calling func? */
compHalfStep[i] = compression[i] ;
}
}
}
if ( eosvmax != Real_t(0.) ) {
for(Index_t i=0 ; i<length ; ++i) {
if (vnewc[i] >= eosvmax) { /* impossible due to calling func? */
p_old[i] = Real_t(0.) ;
compression[i] = Real_t(0.) ;
compHalfStep[i] = Real_t(0.) ;
}
}
}
for (Index_t i = 0 ; i < length ; ++i) {
Index_t zidx = mesh.matElemlist(i) ;
qq[i] = mesh.qq(zidx) ;
ql[i] = mesh.ql(zidx) ;
work[i] = Real_t(0.) ;
}
CalcEnergyForElems_cpu(p_new, e_new, q_new, bvc, pbvc,
p_old, e_old, q_old, compression, compHalfStep,
vnewc, work, delvc, pmin,
p_cut, e_cut, q_cut, emin,
qq, ql, rho0, eosvmax, length);
for (Index_t i=0; i<length; ++i) {
Index_t zidx = mesh.matElemlist(i) ;
mesh.p(zidx) = p_new[i] ;
}
for (Index_t i=0; i<length; ++i) {
Index_t zidx = mesh.matElemlist(i) ;
mesh.e(zidx) = e_new[i] ;
}
for (Index_t i=0; i<length; ++i) {
Index_t zidx = mesh.matElemlist(i) ;
mesh.q(zidx) = q_new[i] ;
}
CalcSoundSpeedForElems_cpu(vnewc, rho0, e_new, p_new,
pbvc, bvc, ss4o3, length) ;
Release(&pbvc) ;
Release(&bvc) ;
Release(&q_new) ;
Release(&e_new) ;
Release(&p_new) ;
Release(&work) ;
Release(&ql) ;
Release(&qq) ;
Release(&compHalfStep) ;
Release(&compression) ;
Release(&q_old) ;
Release(&p_old) ;
Release(&delvc) ;
Release(&e_old) ;
}
__global__
void ApplyMaterialPropertiesForElemsPart1_kernel(
Index_t length,Real_t eosvmin,Real_t eosvmax,
Index_t *matElemlist,Real_t *vnew,
Real_t *vnewc)
{
int i=blockDim.x*blockIdx.x + threadIdx.x;
if (i<length) {
Index_t zn = matElemlist[i] ;
vnewc[i] = vnew[zn] ;
if (eosvmin != Real_t(0.)) {
if (vnewc[i] < eosvmin)
vnewc[i] = eosvmin ;
}
if (eosvmax != Real_t(0.)) {
if (vnewc[i] > eosvmax)
vnewc[i] = eosvmax ;
}
}
}
static inline
void ApplyMaterialPropertiesForElems_gpu()
{
Index_t length = mesh.numElem() ;
if (length != 0) {
/* Expose all of the variables needed for material evaluation */
Real_t eosvmin = mesh.eosvmin() ;
Real_t eosvmax = mesh.eosvmax() ;
Real_t *vnewc;
CUDA( hipMalloc(&vnewc,sizeof(Real_t)*length) );
dim3 dimBlock=dim3(BLOCKSIZE,1,1);
dim3 dimGrid=dim3(PAD_DIV(length,dimBlock.x),1,1);
hipLaunchKernelGGL(( ApplyMaterialPropertiesForElemsPart1_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0,
length,eosvmin,eosvmax,
meshGPU.m_matElemlist,meshGPU.m_vnew,
vnewc);
CUDA_DEBUGSYNC;
/*
for (Index_t i=0; i<length; ++i) {
Index_t zn = mesh.matElemlist(i) ;
Real_t vc = mesh.v(zn) ;
if (eosvmin != Real_t(0.)) {
if (vc < eosvmin)
vc = eosvmin ;
}
if (eosvmax != Real_t(0.)) {
if (vc > eosvmax)
vc = eosvmax ;
}
if (vc <= 0.) {
exit(VolumeError) ;
}
}
*/
EvalEOSForElems_gpu(vnewc, length);
CUDA( hipFree(vnewc) );
}
}
static inline
void ApplyMaterialPropertiesForElems_cpu()
{
Index_t length = mesh.numElem() ;
if (length != 0) {
/* Expose all of the variables needed for material evaluation */
Real_t eosvmin = mesh.eosvmin() ;
Real_t eosvmax = mesh.eosvmax() ;
Real_t *vnewc = Allocate<Real_t>(length) ;
for (Index_t i=0 ; i<length ; ++i) {
Index_t zn = mesh.matElemlist(i) ;
vnewc[i] = mesh.vnew(zn) ;
}
if (eosvmin != Real_t(0.)) {
for(Index_t i=0 ; i<length ; ++i) {
if (vnewc[i] < eosvmin)
vnewc[i] = eosvmin ;
}
}
if (eosvmax != Real_t(0.)) {
for(Index_t i=0 ; i<length ; ++i) {
if (vnewc[i] > eosvmax)
vnewc[i] = eosvmax ;
}
}
for (Index_t i=0; i<length; ++i) {
Index_t zn = mesh.matElemlist(i) ;
Real_t vc = mesh.v(zn) ;
if (eosvmin != Real_t(0.)) {
if (vc < eosvmin)
vc = eosvmin ;
}
if (eosvmax != Real_t(0.)) {
if (vc > eosvmax)
vc = eosvmax ;
}
if (vc <= 0.) {
exit(VolumeError) ;
}
}
EvalEOSForElems_cpu(vnewc, length);
Release(&vnewc) ;
}
}
static inline
void ApplyMaterialPropertiesForElems(int useCPU)
{
if (useCPU) {
FC(matElemlist); FC(vnew); FC(v); FC(e); FC(delv); FC(p); FC(q); FC(qq); FC(ql);
ApplyMaterialPropertiesForElems_cpu();
SG(p); SG(e); SG(q); SG(ss);
}
else {
FG(matElemlist); FG(vnew); FG(v); FG(e); FG(delv); FG(p); FG(q); FG(qq); FG(ql);
ApplyMaterialPropertiesForElems_gpu();
SC(p); SC(e); SC(q); SC(ss);
}
}
__global__
void UpdateVolumesForElems_kernel(Index_t numElem,Real_t v_cut,
Real_t *vnew,
Real_t *v)
{
int i=blockDim.x*blockIdx.x + threadIdx.x;
if (i<numElem) {
Real_t tmpV ;
tmpV = vnew[i] ;
if ( FABS(tmpV - Real_t(1.0)) < v_cut )
tmpV = Real_t(1.0) ;
v[i] = tmpV ;
}
}
static inline
void UpdateVolumesForElems_gpu()
{
Index_t numElem = mesh.numElem();
if (numElem != 0) {
Real_t v_cut = mesh.v_cut();
dim3 dimBlock=dim3(BLOCKSIZE,1,1);
dim3 dimGrid=dim3(PAD_DIV(numElem,dimBlock.x),1,1);
hipLaunchKernelGGL(( UpdateVolumesForElems_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0,
numElem,v_cut,meshGPU.m_vnew,meshGPU.m_v);
}
}
static inline
void UpdateVolumesForElems_cpu()
{
Index_t numElem = mesh.numElem();
if (numElem != 0) {
Real_t v_cut = mesh.v_cut();
for(Index_t i=0 ; i<numElem ; ++i) {
Real_t tmpV ;
tmpV = mesh.vnew(i) ;
if ( FABS(tmpV - Real_t(1.0)) < v_cut )
tmpV = Real_t(1.0) ;
mesh.v(i) = tmpV ;
}
}
return ;
}
static inline
void UpdateVolumesForElems(int useCPU)
{
if (useCPU) {
FC(vnew);
UpdateVolumesForElems_cpu();
SG(v);
}
else {
FG(vnew);
UpdateVolumesForElems_gpu();
SC(v);
}
}
static inline
void LagrangeElements(int useCPU)
{
const Real_t deltatime = mesh.deltatime() ;
CalcLagrangeElements(deltatime, useCPU) ;
/* Calculate Q. (Monotonic q option requires communication) */
CalcQForElems(useCPU) ;
ApplyMaterialPropertiesForElems(useCPU) ;
UpdateVolumesForElems(useCPU) ;
}
__global__
void CalcCourantConstraintForElems_kernel(
Index_t length,Real_t qqc2,
Index_t *matElemlist,Real_t *ss,Real_t *vdov,Real_t *arealg,
Real_t *mindtcourant)
{
__shared__ Real_t minArray[BLOCKSIZE];
int i=blockDim.x*blockIdx.x + threadIdx.x;
Real_t dtcourant = Real_t(1.0e+20) ;
if (i<length) {
Index_t indx = matElemlist[i] ;
Real_t dtf = ss[indx] * ss[indx] ;
if ( vdov[indx] < Real_t(0.) ) {
dtf = dtf
+ qqc2 * arealg[indx] * arealg[indx]
* vdov[indx] * vdov[indx] ;
}
dtf = SQRT(dtf) ;
dtf = arealg[indx] / dtf ;
/* determine minimum timestep with its corresponding elem */
if (vdov[indx] != Real_t(0.)) {
if ( dtf < dtcourant ) {
dtcourant = dtf ;
}
}
}
minArray[threadIdx.x]=dtcourant;
reduceMin<Real_t,BLOCKSIZE>(minArray,threadIdx.x);
if (threadIdx.x==0)
mindtcourant[blockIdx.x]=minArray[0];
}
static inline
void CalcCourantConstraintForElems_gpu()
{
Real_t qqc = mesh.qqc();
Real_t qqc2 = Real_t(64.0) * qqc * qqc ;
Index_t length = mesh.numElem() ;
dim3 dimBlock=dim3(BLOCKSIZE,1,1);
dim3 dimGrid=dim3(PAD_DIV(length,dimBlock.x),1,1);
Real_t *dev_mindtcourant;
CUDA( hipMalloc(&dev_mindtcourant,sizeof(Real_t)*dimGrid.x) );
hipLaunchKernelGGL(( CalcCourantConstraintForElems_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0,
length,qqc2,
meshGPU.m_matElemlist,meshGPU.m_ss,meshGPU.m_vdov,meshGPU.m_arealg,
dev_mindtcourant);
CUDA_DEBUGSYNC;
Real_t *mindtcourant = (Real_t*)malloc(sizeof(Real_t)*dimGrid.x);
CUDA( hipMemcpy(mindtcourant,dev_mindtcourant,sizeof(Real_t)*dimGrid.x,hipMemcpyDeviceToHost) );
CUDA( hipFree(dev_mindtcourant) );
// finish the MIN computation over the thread blocks
Real_t dtcourant;
dtcourant=mindtcourant[0];
for (int i=1; i<dimGrid.x; i++) {
MINEQ(dtcourant,mindtcourant[i]);
}
free(mindtcourant);
if (dtcourant < Real_t(1.0e+20))
mesh.dtcourant() = dtcourant ;
}
static inline
void CalcCourantConstraintForElems_cpu()
{
Real_t dtcourant = Real_t(1.0e+20) ;
Index_t courant_elem = -1 ;
Real_t qqc = mesh.qqc() ;
Index_t length = mesh.numElem() ;
Real_t qqc2 = Real_t(64.0) * qqc * qqc ;
for (Index_t i = 0 ; i < length ; ++i) {
Index_t indx = mesh.matElemlist(i) ;
Real_t dtf = mesh.ss(indx) * mesh.ss(indx) ;
if ( mesh.vdov(indx) < Real_t(0.) ) {
dtf = dtf
+ qqc2 * mesh.arealg(indx) * mesh.arealg(indx)
* mesh.vdov(indx) * mesh.vdov(indx) ;
}
dtf = SQRT(dtf) ;
dtf = mesh.arealg(indx) / dtf ;
/* determine minimum timestep with its corresponding elem */
if (mesh.vdov(indx) != Real_t(0.)) {
if ( dtf < dtcourant ) {
dtcourant = dtf ;
courant_elem = indx ;
}
}
}
/* Don't try to register a time constraint if none of the elements
* were active */
if (courant_elem != -1) {
mesh.dtcourant() = dtcourant ;
}
return ;
}
static inline
void CalcCourantConstraintForElems(int useCPU)
{
if (useCPU) {
FC(matElemlist); FC(ss); FC(vdov); FC(arealg);
CalcCourantConstraintForElems_cpu();
}
else {
FG(matElemlist); FG(ss); FG(vdov); FG(arealg);
CalcCourantConstraintForElems_gpu();
}
}
__global__
void CalcHydroConstraintForElems_kernel(
Index_t length,Real_t dvovmax,
Index_t *matElemlist,Real_t *vdov,
Real_t *mindthydro)
{
__shared__ Real_t minArray[BLOCKSIZE];
int i=blockDim.x*blockIdx.x + threadIdx.x;
Real_t dthydro = Real_t(1.0e+20) ;
if (i<length) {
Index_t indx = matElemlist[i] ;
if (vdov[indx] != Real_t(0.)) {
Real_t dtdvov = dvovmax / (FABS(vdov[indx])+Real_t(1.e-20)) ;
if ( dthydro > dtdvov ) {
dthydro = dtdvov ;
}
}
}
minArray[threadIdx.x]=dthydro;
reduceMin<Real_t,BLOCKSIZE>(minArray,threadIdx.x);
if (threadIdx.x==0)
mindthydro[blockIdx.x]=minArray[0];
}
static inline
void CalcHydroConstraintForElems_gpu()
{
Real_t dvovmax = mesh.dvovmax() ;
Index_t length = mesh.numElem() ;
dim3 dimBlock=dim3(BLOCKSIZE,1,1);
dim3 dimGrid=dim3(PAD_DIV(length,dimBlock.x),1,1);
Real_t *dev_mindthydro;
CUDA( hipMalloc(&dev_mindthydro,sizeof(Real_t)*dimGrid.x) );
hipLaunchKernelGGL(( CalcHydroConstraintForElems_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0,
length,dvovmax,
meshGPU.m_matElemlist,meshGPU.m_vdov,
dev_mindthydro);
CUDA_DEBUGSYNC;
Real_t *mindthydro = (Real_t*)malloc(sizeof(Real_t)*dimGrid.x);
CUDA( hipMemcpy(mindthydro,dev_mindthydro,sizeof(Real_t)*dimGrid.x,hipMemcpyDeviceToHost) );
CUDA( hipFree(dev_mindthydro) );
// finish the MIN computation over the thread blocks
Real_t dthydro=mindthydro[0];
for (int i=1; i<dimGrid.x; i++) {
MINEQ(dthydro,mindthydro[i]);
}
free(mindthydro);
if (dthydro < Real_t(1.0e+20))
mesh.dthydro() = dthydro ;
}
static inline
void CalcHydroConstraintForElems_cpu()
{
Real_t dthydro = Real_t(1.0e+20) ;
Index_t hydro_elem = -1 ;
Real_t dvovmax = mesh.dvovmax() ;
Index_t length = mesh.numElem() ;
for (Index_t i = 0 ; i < length ; ++i) {
Index_t indx = mesh.matElemlist(i) ;
if (mesh.vdov(indx) != Real_t(0.)) {
Real_t dtdvov = dvovmax / (FABS(mesh.vdov(indx))+Real_t(1.e-20)) ;
if ( dthydro > dtdvov ) {
dthydro = dtdvov ;
hydro_elem = indx ;
}
}
}
if (hydro_elem != -1) {
mesh.dthydro() = dthydro ;
}
return ;
}
static inline
void CalcHydroConstraintForElems(int useCPU)
{
if (useCPU) {
FC(matElemlist); FC(vdov);
CalcHydroConstraintForElems_cpu();
}
else {
FG(matElemlist); FG(vdov);
CalcHydroConstraintForElems_gpu();
}
}
static inline
void CalcTimeConstraintsForElems(int useCPU) {
/* evaluate time constraint */
CalcCourantConstraintForElems(useCPU) ;
/* check hydro constraint */
CalcHydroConstraintForElems(useCPU) ;
}
static inline
void LagrangeLeapFrog(int useCPU)
{
/* calculate nodal forces, accelerations, velocities, positions, with
* applied boundary conditions and slide surface considerations */
LagrangeNodal(useCPU);
/* calculate element quantities (i.e. velocity gradient & q), and update
* material states */
LagrangeElements(useCPU);
CalcTimeConstraintsForElems(useCPU);
// LagrangeRelease() ; Creation/destruction of temps may be important to capture
}
int main(int argc, char *argv[])
{ struct timespec start, stop;
clock_gettime(CLOCK_MONOTONIC,&start);
Index_t edgeElems = atoi(argv[1]) ; //orig
Index_t edgeNodes = edgeElems+1 ;
// Real_t ds = Real_t(1.125)/Real_t(edgeElems) ; /* may accumulate roundoff */
Real_t tx, ty, tz ;
Index_t nidx, zidx ;
Index_t meshElems ;
/* get run options to measure various metrics */
/* ... */
cuda_init();
#ifdef DP_PROFILING
cudaPrintfInit(4096*4096); //enabling 1MB buffer for printf
#endif
/****************************/
/* Initialize Sedov Mesh */
/****************************/
/* construct a uniform box for this processor */
mesh.sizeX() = edgeElems ;
mesh.sizeY() = edgeElems ;
mesh.sizeZ() = edgeElems ;
mesh.numElem() = edgeElems*edgeElems*edgeElems ;
mesh.numNode() = edgeNodes*edgeNodes*edgeNodes ;
meshElems = mesh.numElem() ;
/* allocate field memory */
mesh.AllocateElemPersistent(mesh.numElem()) ;
mesh.AllocateElemTemporary (mesh.numElem()) ;
mesh.AllocateNodalPersistent(mesh.numNode()) ;
mesh.AllocateNodesets(edgeNodes*edgeNodes) ;
/* initialize nodal coordinates */
nidx = 0 ;
tz = Real_t(0.) ;
for (Index_t plane=0; plane<edgeNodes; ++plane) {
ty = Real_t(0.) ;
for (Index_t row=0; row<edgeNodes; ++row) {
tx = Real_t(0.) ;
for (Index_t col=0; col<edgeNodes; ++col) {
mesh.x(nidx) = tx ;
mesh.y(nidx) = ty ;
mesh.z(nidx) = tz ;
++nidx ;
// tx += ds ; /* may accumulate roundoff... */
tx = Real_t(1.125)*Real_t(col+1)/Real_t(edgeElems) ;
}
// ty += ds ; /* may accumulate roundoff... */
ty = Real_t(1.125)*Real_t(row+1)/Real_t(edgeElems) ;
}
// tz += ds ; /* may accumulate roundoff... */
tz = Real_t(1.125)*Real_t(plane+1)/Real_t(edgeElems) ;
}
/* embed hexehedral elements in nodal point lattice */
nidx = 0 ;
zidx = 0 ;
for (Index_t plane=0; plane<edgeElems; ++plane) {
for (Index_t row=0; row<edgeElems; ++row) {
for (Index_t col=0; col<edgeElems; ++col) {
mesh.nodelist(zidx,0) = nidx ;
mesh.nodelist(zidx,1) = nidx + 1 ;
mesh.nodelist(zidx,2) = nidx + edgeNodes + 1 ;
mesh.nodelist(zidx,3) = nidx + edgeNodes ;
mesh.nodelist(zidx,4) = nidx + edgeNodes*edgeNodes ;
mesh.nodelist(zidx,5) = nidx + edgeNodes*edgeNodes + 1 ;
mesh.nodelist(zidx,6) = nidx + edgeNodes*edgeNodes + edgeNodes + 1 ;
mesh.nodelist(zidx,7) = nidx + edgeNodes*edgeNodes + edgeNodes ;
++zidx ;
++nidx ;
}
++nidx ;
}
nidx += edgeNodes ;
}
/* Create a material IndexSet (entire mesh same material for now) */
for (Index_t i=0; i<meshElems; ++i) {
mesh.matElemlist(i) = i ;
}
/* initialize material parameters */
mesh.dtfixed() = Real_t(-1.0e-7) ;
mesh.deltatime() = Real_t(1.0e-7) ;
mesh.deltatimemultlb() = Real_t(1.1) ;
mesh.deltatimemultub() = Real_t(1.2) ;
mesh.stoptime() = Real_t(1.0e-2) ;
mesh.dtcourant() = Real_t(1.0e+20) ;
mesh.dthydro() = Real_t(1.0e+20) ;
mesh.dtmax() = Real_t(1.0e-2) ;
mesh.time() = Real_t(0.) ;
mesh.cycle() = 0 ;
mesh.e_cut() = Real_t(1.0e-7) ;
mesh.p_cut() = Real_t(1.0e-7) ;
mesh.q_cut() = Real_t(1.0e-7) ;
mesh.u_cut() = Real_t(1.0e-7) ;
mesh.v_cut() = Real_t(1.0e-10) ;
mesh.hgcoef() = Real_t(3.0) ;
mesh.ss4o3() = Real_t(4.0)/Real_t(3.0) ;
mesh.qstop() = Real_t(1.0e+12) ;
mesh.monoq_max_slope() = Real_t(1.0) ;
mesh.monoq_limiter_mult() = Real_t(2.0) ;
mesh.qlc_monoq() = Real_t(0.5) ;
mesh.qqc_monoq() = Real_t(2.0)/Real_t(3.0) ;
mesh.qqc() = Real_t(2.0) ;
mesh.pmin() = Real_t(0.) ;
mesh.emin() = Real_t(-1.0e+15) ;
mesh.dvovmax() = Real_t(0.1) ;
mesh.eosvmax() = Real_t(1.0e+9) ;
mesh.eosvmin() = Real_t(1.0e-9) ;
mesh.refdens() = Real_t(1.0) ;
/* initialize field data */
for (Index_t i=0; i<meshElems; ++i) {
Real_t x_local[8], y_local[8], z_local[8] ;
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
{
Index_t gnode = mesh.nodelist(i,lnode);
x_local[lnode] = mesh.x(gnode);
y_local[lnode] = mesh.y(gnode);
z_local[lnode] = mesh.z(gnode);
}
// volume calculations
Real_t volume = CalcElemVolume(x_local, y_local, z_local );
mesh.volo(i) = volume ;
mesh.elemMass(i) = volume ;
for (Index_t j=0; j<8; ++j) {
Index_t idx = mesh.nodelist(i,j);
mesh.nodalMass(idx) += volume / Real_t(8.0) ;
}
}
/* deposit energy */
mesh.e(0) = Real_t(3.948746e+7) ;
/* set up symmetry nodesets */
nidx = 0 ;
for (Index_t i=0; i<edgeNodes; ++i) {
Index_t planeInc = i*edgeNodes*edgeNodes ;
Index_t rowInc = i*edgeNodes ;
for (Index_t j=0; j<edgeNodes; ++j) {
mesh.symmX(nidx) = planeInc + j*edgeNodes ;
mesh.symmY(nidx) = planeInc + j ;
mesh.symmZ(nidx) = rowInc + j ;
++nidx ;
}
}
/* set up elemement connectivity information */
mesh.lxim(0) = 0 ;
for (Index_t i=1; i<meshElems; ++i) {
mesh.lxim(i) = i-1 ;
mesh.lxip(i-1) = i ;
}
mesh.lxip(meshElems-1) = meshElems-1 ;
for (Index_t i=0; i<edgeElems; ++i) {
mesh.letam(i) = i ;
mesh.letap(meshElems-edgeElems+i) = meshElems-edgeElems+i ;
}
for (Index_t i=edgeElems; i<meshElems; ++i) {
mesh.letam(i) = i-edgeElems ;
mesh.letap(i-edgeElems) = i ;
}
for (Index_t i=0; i<edgeElems*edgeElems; ++i) {
mesh.lzetam(i) = i ;
mesh.lzetap(meshElems-edgeElems*edgeElems+i) = meshElems-edgeElems*edgeElems+i ;
}
for (Index_t i=edgeElems*edgeElems; i<meshElems; ++i) {
mesh.lzetam(i) = i - edgeElems*edgeElems ;
mesh.lzetap(i-edgeElems*edgeElems) = i ;
}
/* set up boundary condition information */
for (Index_t i=0; i<meshElems; ++i) {
mesh.elemBC(i) = 0 ; /* clear BCs by default */
}
/* faces on "external" boundaries will be */
/* symmetry plane or free surface BCs */
for (Index_t i=0; i<edgeElems; ++i) {
Index_t planeInc = i*edgeElems*edgeElems ;
Index_t rowInc = i*edgeElems ;
for (Index_t j=0; j<edgeElems; ++j) {
mesh.elemBC(planeInc+j*edgeElems) |= XI_M_SYMM ;
mesh.elemBC(planeInc+j*edgeElems+edgeElems-1) |= XI_P_FREE ;
mesh.elemBC(planeInc+j) |= ETA_M_SYMM ;
mesh.elemBC(planeInc+j+edgeElems*edgeElems-edgeElems) |= ETA_P_FREE ;
mesh.elemBC(rowInc+j) |= ZETA_M_SYMM ;
mesh.elemBC(rowInc+j+meshElems-edgeElems*edgeElems) |= ZETA_P_FREE ;
}
}
mesh.AllocateNodeElemIndexes();
/* initialize meshGPU */
meshGPU.init(&mesh);
meshGPU.freshenGPU();
#ifndef DP_PROFILING
/* timestep to solution */
int its=0;
#endif
#ifdef DP_PROFILING
while (its<1)
{ //only profiling the first iteration
#else
while(mesh.time() < mesh.stoptime() )
{
#endif
TimeIncrement() ;
LagrangeLeapFrog(0) ;
its++;
/* problem->commNodes->Transfer(CommNodes::syncposvel) ; */
#if LULESH_SHOW_PROGRESS
printf("time = %e, dt=%e\n",
double(mesh.time()), double(mesh.deltatime()) ) ;
#endif
printf("iterations: %d\n",its);
}
#ifdef DP_PROFILING
cudaPrintfEnd();
#endif
// FC(x);
// FILE *fp = fopen("x.asc","wb");
// for (Index_t i=0; i<mesh.numElem(); i++)
// fprintf(fp,"%.6f\n",mesh.x(i));
// fclose(fp);
clock_gettime(CLOCK_MONOTONIC,&stop);
float time=(stop.tv_sec-start.tv_sec)*1000+(stop.tv_nsec-start.tv_nsec)/1000000;
printf("total time= %f\n",time);
printf("k1= %f,k2= %f,k3= %f,k4 =%f,k5= %f\n",k1,k2,k3,k4,k5);
return 0;
}
| d93e55fd5b10b081baf93f5018424050c19e9c58.cu | /*
Copyright (c) 2010.
Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory.
LLNL-CODE-461231
All rights reserved.
This file is part of LULESH, Version 1.0.
Please also read this link -- http://www.opensource.org/licenses/index.php
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC,
THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Additional BSD Notice
1. This notice is required to be provided under our contract with the U.S.
Department of Energy (DOE). This work was produced at Lawrence Livermore
National Laboratory under Contract No. DE-AC52-07NA27344 with the DOE.
2. Neither the United States Government nor Lawrence Livermore National
Security, LLC nor any of their employees, makes any warranty, express
or implied, or assumes any liability or responsibility for the accuracy,
completeness, or usefulness of any information, apparatus, product, or
process disclosed, or represents that its use would not infringe
privately-owned rights.
3. Also, reference herein to any specific commercial products, process, or
services by trade name, trademark, manufacturer or otherwise does not
necessarily constitute or imply its endorsement, recommendation, or
favoring by the United States Government or Lawrence Livermore National
Security, LLC. The views and opinions of authors expressed herein do not
necessarily state or reflect those of the United States Government or
Lawrence Livermore National Security, LLC, and shall not be used for
advertising or product endorsement purposes.
*/
#include <vector>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include "cuPrintf.cuh"
#include "cuPrintf.cu"
float k1,k2,k3,k4,k5;
#define LULESH_SHOW_PROGRESS 1
//#define DP_PROFILING
//#define DP_PROFILING_KERNEL1
//#define DP_PROFILING_KERNEL2
//#define DP_PROFILING_KERNEL3
//#define DP_PROFILING_KERNEL4
//#define DP_PROFILING_KERNEL5
/*
#define AFTER_KERNEL1
#define AFTER_KERNEL2
#define AFTER_KERNEL3
#define AFTER_KERNEL4
#define AFTER_KERNEL5_2
*/
#ifdef DP_PROFILING
/*timestep to solution*/
int its = 0;
#endif
//#ifdef AFTER_KERNEL1
texture<int,1,cudaReadModeElementType> tex_nodeElemCount;
texture<int,1,cudaReadModeElementType> tex_nodeElemCornerList;
texture<float,1,cudaReadModeElementType> tex_fx_elem;
texture<float,1,cudaReadModeElementType> tex_fy_elem;
texture<float,1,cudaReadModeElementType> tex_fz_elem;
//#endif
#ifdef AFTER_KERNEL3
texture<float,1,cudaReadModeElementType> tex_determ;
texture<float,1,cudaReadModeElementType> tex_x8n;
texture<float,1,cudaReadModeElementType> tex_y8n;
texture<float,1,cudaReadModeElementType> tex_z8n;
texture<float,1,cudaReadModeElementType> tex_dvdx;
texture<float,1,cudaReadModeElementType> tex_dvdy;
texture<float,1,cudaReadModeElementType> tex_dvdz;
texture<int,1,cudaReadModeElementType> tex3_nodelist;
texture<float,1,cudaReadModeElementType> tex_ss;
texture<float,1,cudaReadModeElementType> tex_elemMass;
texture<float,1,cudaReadModeElementType> tex_xd;
texture<float,1,cudaReadModeElementType> tex_yd;
texture<float,1,cudaReadModeElementType> tex_zd;
#endif
#ifdef AFTER_KERNEL4
texture<float,1,cudaReadModeElementType> tex_x;
texture<float,1,cudaReadModeElementType> tex_y;
texture<float,1,cudaReadModeElementType> tex_z;
texture<float,1,cudaReadModeElementType> tex_volo;
texture<float,1,cudaReadModeElementType> tex_v;
texture<int,1,cudaReadModeElementType> tex_nodelist;
#endif
#ifdef AFTER_KERNEL5
texture<int,1,cudaReadModeElementType> tex_matElemlist;
texture<int,1,cudaReadModeElementType> tex_elemBC;
texture<int,1,cudaReadModeElementType> tex_lxim;
texture<int,1,cudaReadModeElementType> tex_lxip;
texture<int,1,cudaReadModeElementType> tex_letam;
texture<int,1,cudaReadModeElementType> tex_letap;
texture<int,1,cudaReadModeElementType> tex_lzetam;
texture<int,1,cudaReadModeElementType> tex_lzetap;
#endif
#ifdef AFTER_KERNEL5_2
texture<float,1,cudaReadModeElementType> tex_delv_xi;
texture<float,1,cudaReadModeElementType> tex_delv_eta;
texture<float,1,cudaReadModeElementType> tex_delv_zeta;
#endif
enum { VolumeError = -1, QStopError = -2 } ;
/****************************************************/
/* Allow flexibility for arithmetic representations */
/****************************************************/
/* Could also support fixed point and interval arithmetic types */
typedef float real4 ;
typedef double real8 ;
typedef long double real10 ; /* 10 bytes on x86 */
typedef int Index_t ; /* array subscript and loop index */
typedef real4 Real_t ; /* floating point representation */
typedef int Int_t ; /* integer representation */
__host__ __device__ inline real4 SQRT(real4 arg) { return sqrtf(arg) ; }
__host__ __device__ inline real8 SQRT(real8 arg) { return sqrt(arg) ; }
__host__ inline real10 SQRT(real10 arg) { return sqrtl(arg) ; }
__host__ __device__ inline real4 CBRT(real4 arg) { return cbrtf(arg) ; }
__host__ __device__ inline real8 CBRT(real8 arg) { return cbrt(arg) ; }
__host__ inline real10 CBRT(real10 arg) { return cbrtl(arg) ; }
__host__ __device__ inline real4 FABS(real4 arg) { return fabsf(arg) ; }
__host__ __device__ inline real8 FABS(real8 arg) { return fabs(arg) ; }
__host__ inline real10 FABS(real10 arg) { return fabsl(arg) ; }
__host__ __device__ inline real4 FMAX(real4 arg1,real4 arg2) { return fmaxf(arg1,arg2) ; }
__host__ __device__ inline real8 FMAX(real8 arg1,real8 arg2) { return fmax(arg1,arg2) ; }
__host__ inline real10 FMAX(real10 arg1,real10 arg2) { return fmaxl(arg1,arg2) ; }
#define CUDA_SAFE_CALL( call) do { \
cudaError err = call; \
if( cudaSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, cudaGetErrorString( err) ); \
exit(EXIT_FAILURE); \
} } while (0)
#define CUDA(call) CUDA_SAFE_CALL(call)
#ifdef CUDA_SYNC_ALL
#define CUDA_DEBUGSYNC CUDA(cudaThreadSynchronize())
#else
#define CUDA_DEBUGSYNC
#endif
#define BLOCKSIZE 256
/* Given a number of bytes, nbytes, and a byte alignment, align, (e.g., 2,
* 4, 8, or 16), return the smallest integer that is larger than nbytes and
* a multiple of align.
*/
#define PAD_DIV(nbytes, align) (((nbytes) + (align) - 1) / (align))
#define PAD(nbytes, align) (PAD_DIV((nbytes),(align)) * (align))
/* More general version of reduceInPlacePOT (this works for arbitrary
* numThreadsPerBlock <= 1024). Again, conditionals on
* numThreadsPerBlock are evaluated at compile time.
*/
template <class T, int numThreadsPerBlock>
__device__ void
reduceSum(T *sresult, const int threadID)
{
/* If number of threads is not a power of two, first add the ones
after the last power of two into the beginning. At most one of
these conditionals will be true for a given NPOT block size. */
if (numThreadsPerBlock > 512 && numThreadsPerBlock <= 1024)
{
__syncthreads();
if (threadID < numThreadsPerBlock-512)
sresult[threadID] += sresult[threadID + 512];
}
if (numThreadsPerBlock > 256 && numThreadsPerBlock < 512)
{
__syncthreads();
if (threadID < numThreadsPerBlock-256)
sresult[threadID] += sresult[threadID + 256];
}
if (numThreadsPerBlock > 128 && numThreadsPerBlock < 256)
{
__syncthreads();
if (threadID < numThreadsPerBlock-128)
sresult[threadID] += sresult[threadID + 128];
}
if (numThreadsPerBlock > 64 && numThreadsPerBlock < 128)
{
__syncthreads();
if (threadID < numThreadsPerBlock-64)
sresult[threadID] += sresult[threadID + 64];
}
if (numThreadsPerBlock > 32 && numThreadsPerBlock < 64)
{
__syncthreads();
if (threadID < numThreadsPerBlock-32)
sresult[threadID] += sresult[threadID + 32];
}
if (numThreadsPerBlock > 16 && numThreadsPerBlock < 32)
{
__syncthreads();
if (threadID < numThreadsPerBlock-16)
sresult[threadID] += sresult[threadID + 16];
}
if (numThreadsPerBlock > 8 && numThreadsPerBlock < 16)
{
__syncthreads();
if (threadID < numThreadsPerBlock-8)
sresult[threadID] += sresult[threadID + 8];
}
if (numThreadsPerBlock > 4 && numThreadsPerBlock < 8)
{
__syncthreads();
if (threadID < numThreadsPerBlock-4)
sresult[threadID] += sresult[threadID + 4];
}
if (numThreadsPerBlock > 2 && numThreadsPerBlock < 4)
{
__syncthreads();
if (threadID < numThreadsPerBlock-2)
sresult[threadID] += sresult[threadID + 2];
}
if (numThreadsPerBlock >= 512) {
__syncthreads();
if (threadID < 256)
sresult[threadID] += sresult[threadID + 256];
}
if (numThreadsPerBlock >= 256) {
__syncthreads();
if (threadID < 128)
sresult[threadID] += sresult[threadID + 128];
}
if (numThreadsPerBlock >= 128) {
__syncthreads();
if (threadID < 64)
sresult[threadID] += sresult[threadID + 64];
}
__syncthreads();
#ifdef _DEVICEEMU
if (numThreadsPerBlock >= 64) {
__syncthreads();
if (threadID < 32)
sresult[threadID] += sresult[threadID + 32];
}
if (numThreadsPerBlock >= 32) {
__syncthreads();
if (threadID < 16)
sresult[threadID] += sresult[threadID + 16];
}
if (numThreadsPerBlock >= 16) {
__syncthreads();
if (threadID < 8)
sresult[threadID] += sresult[threadID + 8];
}
if (numThreadsPerBlock >= 8) {
__syncthreads();
if (threadID < 4)
sresult[threadID] += sresult[threadID + 4];
}
if (numThreadsPerBlock >= 4) {
__syncthreads();
if (threadID < 2)
sresult[threadID] += sresult[threadID + 2];
}
if (numThreadsPerBlock >= 2) {
__syncthreads();
if (threadID < 1)
sresult[threadID] += sresult[threadID + 1];
}
#else
if (threadID < 32) {
volatile T *vol = sresult;
if (numThreadsPerBlock >= 64) vol[threadID] += vol[threadID + 32];
if (numThreadsPerBlock >= 32) vol[threadID] += vol[threadID + 16];
if (numThreadsPerBlock >= 16) vol[threadID] += vol[threadID + 8];
if (numThreadsPerBlock >= 8) vol[threadID] += vol[threadID + 4];
if (numThreadsPerBlock >= 4) vol[threadID] += vol[threadID + 2];
if (numThreadsPerBlock >= 2) vol[threadID] += vol[threadID + 1];
}
#endif
__syncthreads();
}
#define MINEQ(a,b) (a)=(((a)<(b))?(a):(b))
template <class T, int numThreadsPerBlock>
__device__ void
reduceMin(T *sresult, const int threadID)
{
/* If number of threads is not a power of two, first add the ones
after the last power of two into the beginning. At most one of
these conditionals will be true for a given NPOT block size. */
if (numThreadsPerBlock > 512 && numThreadsPerBlock <= 1024)
{
__syncthreads();
if (threadID < numThreadsPerBlock-512)
MINEQ(sresult[threadID],sresult[threadID + 512]);
}
if (numThreadsPerBlock > 256 && numThreadsPerBlock < 512)
{
__syncthreads();
if (threadID < numThreadsPerBlock-256)
MINEQ(sresult[threadID],sresult[threadID + 256]);
}
if (numThreadsPerBlock > 128 && numThreadsPerBlock < 256)
{
__syncthreads();
if (threadID < numThreadsPerBlock-128)
MINEQ(sresult[threadID],sresult[threadID + 128]);
}
if (numThreadsPerBlock > 64 && numThreadsPerBlock < 128)
{
__syncthreads();
if (threadID < numThreadsPerBlock-64)
MINEQ(sresult[threadID],sresult[threadID + 64]);
}
if (numThreadsPerBlock > 32 && numThreadsPerBlock < 64)
{
__syncthreads();
if (threadID < numThreadsPerBlock-32)
MINEQ(sresult[threadID],sresult[threadID + 32]);
}
if (numThreadsPerBlock > 16 && numThreadsPerBlock < 32)
{
__syncthreads();
if (threadID < numThreadsPerBlock-16)
MINEQ(sresult[threadID],sresult[threadID + 16]);
}
if (numThreadsPerBlock > 8 && numThreadsPerBlock < 16)
{
__syncthreads();
if (threadID < numThreadsPerBlock-8)
MINEQ(sresult[threadID],sresult[threadID + 8]);
}
if (numThreadsPerBlock > 4 && numThreadsPerBlock < 8)
{
__syncthreads();
if (threadID < numThreadsPerBlock-4)
MINEQ(sresult[threadID],sresult[threadID + 4]);
}
if (numThreadsPerBlock > 2 && numThreadsPerBlock < 4)
{
__syncthreads();
if (threadID < numThreadsPerBlock-2)
MINEQ(sresult[threadID],sresult[threadID + 2]);
}
if (numThreadsPerBlock >= 512) {
__syncthreads();
if (threadID < 256)
MINEQ(sresult[threadID],sresult[threadID + 256]);
}
if (numThreadsPerBlock >= 256) {
__syncthreads();
if (threadID < 128)
MINEQ(sresult[threadID],sresult[threadID + 128]);
}
if (numThreadsPerBlock >= 128) {
__syncthreads();
if (threadID < 64)
MINEQ(sresult[threadID],sresult[threadID + 64]);
}
__syncthreads();
#ifdef _DEVICEEMU
if (numThreadsPerBlock >= 64) {
__syncthreads();
if (threadID < 32)
MINEQ(sresult[threadID],sresult[threadID + 32]);
}
if (numThreadsPerBlock >= 32) {
__syncthreads();
if (threadID < 16)
MINEQ(sresult[threadID],sresult[threadID + 16]);
}
if (numThreadsPerBlock >= 16) {
__syncthreads();
if (threadID < 8)
MINEQ(sresult[threadID],sresult[threadID + 8]);
}
if (numThreadsPerBlock >= 8) {
__syncthreads();
if (threadID < 4)
MINEQ(sresult[threadID],sresult[threadID + 4]);
}
if (numThreadsPerBlock >= 4) {
__syncthreads();
if (threadID < 2)
MINEQ(sresult[threadID],sresult[threadID + 2]);
}
if (numThreadsPerBlock >= 2) {
__syncthreads();
if (threadID < 1)
MINEQ(sresult[threadID],sresult[threadID + 1]);
}
#else
if (threadID < 32) {
volatile T *vol = sresult;
if (numThreadsPerBlock >= 64) MINEQ(vol[threadID],vol[threadID + 32]);
if (numThreadsPerBlock >= 32) MINEQ(vol[threadID],vol[threadID + 16]);
if (numThreadsPerBlock >= 16) MINEQ(vol[threadID],vol[threadID + 8]);
if (numThreadsPerBlock >= 8) MINEQ(vol[threadID],vol[threadID + 4]);
if (numThreadsPerBlock >= 4) MINEQ(vol[threadID],vol[threadID + 2]);
if (numThreadsPerBlock >= 2) MINEQ(vol[threadID],vol[threadID + 1]);
}
#endif
__syncthreads();
}
void cuda_init()
{
int deviceCount, dev;
cudaDeviceProp cuda_deviceProp;
char *s;
CUDA( cudaGetDeviceCount(&deviceCount) );
if (deviceCount == 0) {
fprintf(stderr, "cuda_init(): no devices supporting CUDA.\n");
exit(1);
}
if (s=getenv("CUDA_DEVICE")) dev=atoi(s);
else dev=0;
if ((dev < 0) || (dev > deviceCount-1)) {
fprintf(stderr, "cuda_init(): requested device (%d) out of range [%d,%d]\n",
dev, 0, deviceCount-1);
exit(1);
}
CUDA( cudaGetDeviceProperties(&cuda_deviceProp, dev) );
if (cuda_deviceProp.major < 1) {
fprintf(stderr, "cuda_init(): device %d does not support CUDA.\n", dev);
exit(1);
}
fprintf(stderr, "setting CUDA device %d\n",dev);
CUDA( cudaSetDevice(dev) );
}
/************************************************************/
/* Allow for flexible data layout experiments by separating */
/* array interface from underlying implementation. */
/************************************************************/
struct Mesh {
/* This first implementation allows for runnable code */
/* and is not meant to be optimal. Final implementation */
/* should separate declaration and allocation phases */
/* so that allocation can be scheduled in a cache conscious */
/* manner. */
friend struct MeshGPU;
public:
/**************/
/* Allocation */
/**************/
void AllocateNodalPersistent(size_t size)
{
m_x.resize(size) ;
m_y.resize(size) ;
m_z.resize(size) ;
m_xd.resize(size, Real_t(0.)) ;
m_yd.resize(size, Real_t(0.)) ;
m_zd.resize(size, Real_t(0.)) ;
m_xdd.resize(size, Real_t(0.)) ;
m_ydd.resize(size, Real_t(0.)) ;
m_zdd.resize(size, Real_t(0.)) ;
m_fx.resize(size) ;
m_fy.resize(size) ;
m_fz.resize(size) ;
m_nodalMass.resize(size, Real_t(0.)) ;
}
void AllocateElemPersistent(size_t size)
{
m_matElemlist.resize(size) ;
m_nodelist.resize(8*size) ;
m_lxim.resize(size) ;
m_lxip.resize(size) ;
m_letam.resize(size) ;
m_letap.resize(size) ;
m_lzetam.resize(size) ;
m_lzetap.resize(size) ;
m_elemBC.resize(size) ;
m_e.resize(size, Real_t(0.)) ;
m_p.resize(size, Real_t(0.)) ;
m_q.resize(size) ;
m_ql.resize(size) ;
m_qq.resize(size) ;
m_v.resize(size, 1.0) ;
m_volo.resize(size) ;
m_delv.resize(size) ;
m_vdov.resize(size) ;
m_arealg.resize(size) ;
m_ss.resize(size) ;
m_elemMass.resize(size) ;
}
/* Temporaries should not be initialized in bulk but */
/* this is a runnable placeholder for now */
void AllocateElemTemporary(size_t size)
{
m_dxx.resize(size) ;
m_dyy.resize(size) ;
m_dzz.resize(size) ;
m_delv_xi.resize(size) ;
m_delv_eta.resize(size) ;
m_delv_zeta.resize(size) ;
m_delx_xi.resize(size) ;
m_delx_eta.resize(size) ;
m_delx_zeta.resize(size) ;
m_vnew.resize(size) ;
}
void AllocateNodesets(size_t size)
{
m_symmX.resize(size) ;
m_symmY.resize(size) ;
m_symmZ.resize(size) ;
}
void AllocateNodeElemIndexes()
{
Index_t i,j,nidx;
/* set up node-centered indexing of elements */
m_nodeElemCount.resize(m_numNode);
for (i=0;i<m_numNode;i++) m_nodeElemCount[i]=0;
m_nodeElemCornerList.resize(m_numNode*8);
for (i=0;i<m_numElem;i++) {
for (j=0;j<8;j++) {
nidx=nodelist(i,j);
m_nodeElemCornerList[nidx+m_numNode*m_nodeElemCount[nidx]++] = i+m_numElem*j;
if (m_nodeElemCount[nidx]>8) {
fprintf(stderr, "Node degree is higher than 8!\n");
exit(1);
}
}
}
}
/**********/
/* Access */
/**********/
/* Node-centered */
Real_t& x(Index_t idx) { return m_x[idx] ; }
Real_t& y(Index_t idx) { return m_y[idx] ; }
Real_t& z(Index_t idx) { return m_z[idx] ; }
Real_t& xd(Index_t idx) { return m_xd[idx] ; }
Real_t& yd(Index_t idx) { return m_yd[idx] ; }
Real_t& zd(Index_t idx) { return m_zd[idx] ; }
Real_t& xdd(Index_t idx) { return m_xdd[idx] ; }
Real_t& ydd(Index_t idx) { return m_ydd[idx] ; }
Real_t& zdd(Index_t idx) { return m_zdd[idx] ; }
Real_t& fx(Index_t idx) { return m_fx[idx] ; }
Real_t& fy(Index_t idx) { return m_fy[idx] ; }
Real_t& fz(Index_t idx) { return m_fz[idx] ; }
Real_t& nodalMass(Index_t idx) { return m_nodalMass[idx] ; }
Index_t& symmX(Index_t idx) { return m_symmX[idx] ; }
Index_t& symmY(Index_t idx) { return m_symmY[idx] ; }
Index_t& symmZ(Index_t idx) { return m_symmZ[idx] ; }
/* Element-centered */
Index_t& matElemlist(Index_t idx) { return m_matElemlist[idx] ; }
Index_t& nodelist(Index_t idx,Index_t nidx) { return m_nodelist[idx+nidx*m_numElem] ; }
Index_t& lxim(Index_t idx) { return m_lxim[idx] ; }
Index_t& lxip(Index_t idx) { return m_lxip[idx] ; }
Index_t& letam(Index_t idx) { return m_letam[idx] ; }
Index_t& letap(Index_t idx) { return m_letap[idx] ; }
Index_t& lzetam(Index_t idx) { return m_lzetam[idx] ; }
Index_t& lzetap(Index_t idx) { return m_lzetap[idx] ; }
Int_t& elemBC(Index_t idx) { return m_elemBC[idx] ; }
Real_t& dxx(Index_t idx) { return m_dxx[idx] ; }
Real_t& dyy(Index_t idx) { return m_dyy[idx] ; }
Real_t& dzz(Index_t idx) { return m_dzz[idx] ; }
Real_t& delv_xi(Index_t idx) { return m_delv_xi[idx] ; }
Real_t& delv_eta(Index_t idx) { return m_delv_eta[idx] ; }
Real_t& delv_zeta(Index_t idx) { return m_delv_zeta[idx] ; }
Real_t& delx_xi(Index_t idx) { return m_delx_xi[idx] ; }
Real_t& delx_eta(Index_t idx) { return m_delx_eta[idx] ; }
Real_t& delx_zeta(Index_t idx) { return m_delx_zeta[idx] ; }
Real_t& e(Index_t idx) { return m_e[idx] ; }
Real_t& p(Index_t idx) { return m_p[idx] ; }
Real_t& q(Index_t idx) { return m_q[idx] ; }
Real_t& ql(Index_t idx) { return m_ql[idx] ; }
Real_t& qq(Index_t idx) { return m_qq[idx] ; }
Real_t& v(Index_t idx) { return m_v[idx] ; }
Real_t& volo(Index_t idx) { return m_volo[idx] ; }
Real_t& vnew(Index_t idx) { return m_vnew[idx] ; }
Real_t& delv(Index_t idx) { return m_delv[idx] ; }
Real_t& vdov(Index_t idx) { return m_vdov[idx] ; }
Real_t& arealg(Index_t idx) { return m_arealg[idx] ; }
Real_t& ss(Index_t idx) { return m_ss[idx] ; }
Real_t& elemMass(Index_t idx) { return m_elemMass[idx] ; }
/* Params */
Real_t& dtfixed() { return m_dtfixed ; }
Real_t& time() { return m_time ; }
Real_t& deltatime() { return m_deltatime ; }
Real_t& deltatimemultlb() { return m_deltatimemultlb ; }
Real_t& deltatimemultub() { return m_deltatimemultub ; }
Real_t& stoptime() { return m_stoptime ; }
Real_t& u_cut() { return m_u_cut ; }
Real_t& hgcoef() { return m_hgcoef ; }
Real_t& qstop() { return m_qstop ; }
Real_t& monoq_max_slope() { return m_monoq_max_slope ; }
Real_t& monoq_limiter_mult() { return m_monoq_limiter_mult ; }
Real_t& e_cut() { return m_e_cut ; }
Real_t& p_cut() { return m_p_cut ; }
Real_t& ss4o3() { return m_ss4o3 ; }
Real_t& q_cut() { return m_q_cut ; }
Real_t& v_cut() { return m_v_cut ; }
Real_t& qlc_monoq() { return m_qlc_monoq ; }
Real_t& qqc_monoq() { return m_qqc_monoq ; }
Real_t& qqc() { return m_qqc ; }
Real_t& eosvmax() { return m_eosvmax ; }
Real_t& eosvmin() { return m_eosvmin ; }
Real_t& pmin() { return m_pmin ; }
Real_t& emin() { return m_emin ; }
Real_t& dvovmax() { return m_dvovmax ; }
Real_t& refdens() { return m_refdens ; }
Real_t& dtcourant() { return m_dtcourant ; }
Real_t& dthydro() { return m_dthydro ; }
Real_t& dtmax() { return m_dtmax ; }
Int_t& cycle() { return m_cycle ; }
Index_t& sizeX() { return m_sizeX ; }
Index_t& sizeY() { return m_sizeY ; }
Index_t& sizeZ() { return m_sizeZ ; }
Index_t& numElem() { return m_numElem ; }
Index_t& numNode() { return m_numNode ; }
//private:
/******************/
/* Implementation */
/******************/
/* Node-centered */
std::vector<Real_t> m_x ; /* coordinates */
std::vector<Real_t> m_y ;
std::vector<Real_t> m_z ;
std::vector<Real_t> m_xd ; /* velocities */
std::vector<Real_t> m_yd ;
std::vector<Real_t> m_zd ;
std::vector<Real_t> m_xdd ; /* accelerations */
std::vector<Real_t> m_ydd ;
std::vector<Real_t> m_zdd ;
std::vector<Real_t> m_fx ; /* forces */
std::vector<Real_t> m_fy ;
std::vector<Real_t> m_fz ;
std::vector<Real_t> m_nodalMass ; /* mass */
std::vector<Index_t> m_symmX ; /* symmetry plane nodesets */
std::vector<Index_t> m_symmY ;
std::vector<Index_t> m_symmZ ;
std::vector<Int_t> m_nodeElemCount ;
std::vector<Index_t> m_nodeElemCornerList ;
/* Element-centered */
std::vector<Index_t> m_matElemlist ; /* material indexset */
std::vector<Index_t> m_nodelist ; /* elemToNode connectivity */
std::vector<Index_t> m_lxim ; /* element connectivity across each face */
std::vector<Index_t> m_lxip ;
std::vector<Index_t> m_letam ;
std::vector<Index_t> m_letap ;
std::vector<Index_t> m_lzetam ;
std::vector<Index_t> m_lzetap ;
std::vector<Int_t> m_elemBC ; /* symmetry/free-surface flags for each elem face */
std::vector<Real_t> m_dxx ; /* principal strains -- temporary */
std::vector<Real_t> m_dyy ;
std::vector<Real_t> m_dzz ;
std::vector<Real_t> m_delv_xi ; /* velocity gradient -- temporary */
std::vector<Real_t> m_delv_eta ;
std::vector<Real_t> m_delv_zeta ;
std::vector<Real_t> m_delx_xi ; /* coordinate gradient -- temporary */
std::vector<Real_t> m_delx_eta ;
std::vector<Real_t> m_delx_zeta ;
std::vector<Real_t> m_e ; /* energy */
std::vector<Real_t> m_p ; /* pressure */
std::vector<Real_t> m_q ; /* q */
std::vector<Real_t> m_ql ; /* linear term for q */
std::vector<Real_t> m_qq ; /* quadratic term for q */
std::vector<Real_t> m_v ; /* relative volume */
std::vector<Real_t> m_volo ; /* reference volume */
std::vector<Real_t> m_vnew ; /* new relative volume -- temporary */
std::vector<Real_t> m_delv ; /* m_vnew - m_v */
std::vector<Real_t> m_vdov ; /* volume derivative over volume */
std::vector<Real_t> m_arealg ; /* characteristic length of an element */
std::vector<Real_t> m_ss ; /* "sound speed" */
std::vector<Real_t> m_elemMass ; /* mass */
/* Parameters */
Real_t m_dtfixed ; /* fixed time increment */
Real_t m_time ; /* current time */
Real_t m_deltatime ; /* variable time increment */
Real_t m_deltatimemultlb ;
Real_t m_deltatimemultub ;
Real_t m_stoptime ; /* end time for simulation */
Real_t m_u_cut ; /* velocity tolerance */
Real_t m_hgcoef ; /* hourglass control */
Real_t m_qstop ; /* excessive q indicator */
Real_t m_monoq_max_slope ;
Real_t m_monoq_limiter_mult ;
Real_t m_e_cut ; /* energy tolerance */
Real_t m_p_cut ; /* pressure tolerance */
Real_t m_ss4o3 ;
Real_t m_q_cut ; /* q tolerance */
Real_t m_v_cut ; /* relative volume tolerance */
Real_t m_qlc_monoq ; /* linear term coef for q */
Real_t m_qqc_monoq ; /* quadratic term coef for q */
Real_t m_qqc ;
Real_t m_eosvmax ;
Real_t m_eosvmin ;
Real_t m_pmin ; /* pressure floor */
Real_t m_emin ; /* energy floor */
Real_t m_dvovmax ; /* maximum allowable volume change */
Real_t m_refdens ; /* reference density */
Real_t m_dtcourant ; /* courant constraint */
Real_t m_dthydro ; /* volume change constraint */
Real_t m_dtmax ; /* maximum allowable time increment */
Int_t m_cycle ; /* iteration count for simulation */
Index_t m_sizeX ; /* X,Y,Z extent of this block */
Index_t m_sizeY ;
Index_t m_sizeZ ;
Index_t m_numElem ; /* Elements/Nodes in this domain */
Index_t m_numNode ;
} mesh ;
template <typename T>
T *Allocate(size_t size)
{
return static_cast<T *>(malloc(sizeof(T)*size)) ;
}
template <typename T>
void Release(T **ptr)
{
if (*ptr != NULL) {
free(*ptr) ;
*ptr = NULL ;
}
}
#define GPU_STALE 0
#define CPU_STALE 1
#define ALL_FRESH 2
template<typename T>
void freshenGPU(std::vector<T>&cpu,T **gpu,int& stale) {
if (stale!=GPU_STALE) return;
if (!(*gpu)) {CUDA( cudaMalloc(gpu,sizeof(T)*cpu.size()) );}
CUDA( cudaMemcpy(*gpu,&cpu[0],sizeof(T)*cpu.size(),cudaMemcpyHostToDevice) );
stale=ALL_FRESH;
}
template<typename T>
void freshenCPU(std::vector<T>&cpu,T *gpu,int& stale) {
if (stale!=CPU_STALE) return;
if (!gpu) {fprintf(stderr,"freshenCPU(): NULL GPU data!\n");exit(1);}
CUDA( cudaMemcpy(&cpu[0],gpu,sizeof(T)*cpu.size(),cudaMemcpyDeviceToHost) );
stale=ALL_FRESH;
}
// freshen helpers
#define FC(var) freshenCPU(mesh.m_ ## var , meshGPU.m_ ## var ,meshGPU.m_ ## var ## _stale ); // freshen CPU
#define FG(var) freshenGPU(mesh.m_ ## var , &meshGPU.m_ ## var ,meshGPU.m_ ## var ## _stale ); // freshen GPU
// stale helpers
#define SC(var) meshGPU.m_ ## var ## _stale = CPU_STALE; // stale CPU
#define SG(var) meshGPU.m_ ## var ## _stale = GPU_STALE; // stale GPU
struct MeshGPU {
Mesh *m_mesh;
/******************/
/* Implementation */
/******************/
/* Node-centered */
Real_t *m_x ; /* coordinates */
Real_t *m_y ;
Real_t *m_z ;
Real_t *m_xd ; /* velocities */
Real_t *m_yd ;
Real_t *m_zd ;
Real_t *m_xdd ; /* accelerations */
Real_t *m_ydd ;
Real_t *m_zdd ;
Real_t *m_fx ; /* forces */
Real_t *m_fy ;
Real_t *m_fz ;
Real_t *m_nodalMass ; /* mass */
Index_t *m_symmX ; /* symmetry plane nodesets */
Index_t *m_symmY ;
Index_t *m_symmZ ;
Int_t *m_nodeElemCount ;
Index_t *m_nodeElemCornerList ;
/* Element-centered */
Index_t * m_matElemlist ; /* material indexset */
Index_t * m_nodelist ; /* elemToNode connectivity */
Index_t * m_lxim ; /* element connectivity across each face */
Index_t * m_lxip ;
Index_t * m_letam ;
Index_t * m_letap ;
Index_t * m_lzetam ;
Index_t * m_lzetap ;
Int_t * m_elemBC ; /* symmetry/free-surface flags for each elem face */
Real_t *m_dxx ; /* principal strains -- temporary */
Real_t *m_dyy ;
Real_t *m_dzz ;
Real_t *m_delv_xi ; /* velocity gradient -- temporary */
Real_t *m_delv_eta ;
Real_t *m_delv_zeta ;
Real_t *m_delx_xi ; /* coordinate gradient -- temporary */
Real_t *m_delx_eta ;
Real_t *m_delx_zeta ;
Real_t *m_e ; /* energy */
Real_t *m_p ; /* pressure */
Real_t *m_q ; /* q */
Real_t *m_ql ; /* linear term for q */
Real_t *m_qq ; /* quadratic term for q */
Real_t *m_v ; /* relative volume */
Real_t *m_volo ; /* reference volume */
Real_t *m_vnew ; /* new relative volume -- temporary */
Real_t *m_delv ; /* m_vnew - m_v */
Real_t *m_vdov ; /* volume derivative over volume */
Real_t *m_arealg ; /* characteristic length of an element */
Real_t *m_ss ; /* "sound speed" */
Real_t *m_elemMass ; /* mass */
/* Stale flags */
int m_x_stale,m_y_stale,m_z_stale;
int m_xd_stale,m_yd_stale,m_zd_stale;
int m_xdd_stale,m_ydd_stale,m_zdd_stale;
int m_fx_stale,m_fy_stale,m_fz_stale;
int m_nodalMass_stale;
int m_symmX_stale,m_symmY_stale,m_symmZ_stale;
int m_nodeElemCount_stale,m_nodeElemCornerList_stale;
int m_matElemlist_stale,m_nodelist_stale;
int m_lxim_stale,m_lxip_stale,m_letam_stale,m_letap_stale,m_lzetam_stale,m_lzetap_stale;
int m_elemBC_stale;
int m_dxx_stale,m_dyy_stale,m_dzz_stale;
int m_delv_xi_stale,m_delv_eta_stale,m_delv_zeta_stale;
int m_delx_xi_stale,m_delx_eta_stale,m_delx_zeta_stale;
int m_e_stale;
int m_p_stale,m_q_stale,m_ql_stale,m_qq_stale;
int m_v_stale,m_volo_stale,m_vnew_stale,m_delv_stale,m_vdov_stale;
int m_arealg_stale;
int m_ss_stale;
int m_elemMass_stale;
void init(Mesh *mesh) {
m_mesh=mesh;
m_x=m_y=m_z=NULL;
m_xd=m_yd=m_zd=NULL;
m_xdd=m_ydd=m_zdd=NULL;
m_fx=m_fy=m_fz=NULL;
m_nodalMass=NULL;
m_symmX=m_symmY=m_symmZ=NULL;
m_nodeElemCount=m_nodeElemCornerList=NULL;
m_matElemlist=m_nodelist=NULL;
m_lxim=m_lxip=m_letam=m_letap=m_lzetam=m_lzetap=NULL;
m_elemBC=NULL;
m_dxx=m_dyy=m_dzz=NULL;
m_delv_xi=m_delv_eta=m_delv_zeta=NULL;
m_delx_xi=m_delx_eta=m_delx_zeta=NULL;
m_e=NULL;
m_p=m_q=m_ql=m_qq=NULL;
m_v=m_volo=m_vnew=m_delv=m_vdov=NULL;
m_arealg=NULL;
m_ss=NULL;
m_elemMass=NULL;
m_x_stale=m_y_stale=m_z_stale=
m_xd_stale=m_yd_stale=m_zd_stale=
m_xdd_stale=m_ydd_stale=m_zdd_stale=
m_fx_stale=m_fy_stale=m_fz_stale=
m_nodalMass_stale=
m_symmX_stale=m_symmY_stale=m_symmZ_stale=
m_nodeElemCount_stale=m_nodeElemCornerList_stale=
m_matElemlist_stale=m_nodelist_stale=
m_lxim_stale=m_lxip_stale=m_letam_stale=m_letap_stale=m_lzetam_stale=m_lzetap_stale=
m_elemBC_stale=
m_dxx_stale=m_dyy_stale=m_dzz_stale=
m_delv_xi_stale=m_delv_eta_stale=m_delv_zeta_stale=
m_delx_xi_stale=m_delx_eta_stale=m_delx_zeta_stale=
m_e_stale=
m_p_stale=m_q_stale=m_ql_stale=m_qq_stale=
m_v_stale=m_volo_stale=m_vnew_stale=m_delv_stale=m_vdov_stale=
m_arealg_stale=
m_ss_stale=
m_elemMass_stale=
GPU_STALE;
}
void freshenGPU() {
#define F(var) ::freshenGPU(m_mesh->m_ ## var , &m_ ## var ,m_ ## var ## _stale);
F(x); F(y); F(z);
F(xd); F(yd); F(zd);
F(xdd); F(ydd); F(zdd);
F(fx); F(fy); F(fz);
F(nodalMass);
F(symmX); F(symmY); F(symmZ);
F(nodeElemCount); F(nodeElemCornerList);
F(matElemlist); F(nodelist);
F(lxim); F(lxip); F(letam); F(letap); F(lzetam); F(lzetap);
F(elemBC);
F(dxx); F(dyy); F(dzz);
F(delv_xi); F(delv_eta); F(delv_zeta);
F(delx_xi); F(delx_eta); F(delx_zeta);
F(e);
F(p); F(q); F(ql); F(qq);
F(v); F(volo); F(vnew); F(delv); F(vdov);
F(arealg);
F(ss);
F(elemMass);
#undef F
}
void freshenCPU() {
#define F(var) ::freshenCPU(m_mesh->m_ ## var , m_ ## var ,m_ ## var ## _stale);
F(x); F(y); F(z);
F(xd); F(yd); F(zd);
F(xdd); F(ydd); F(zdd);
F(fx); F(fy); F(fz);
F(nodalMass);
F(symmX); F(symmY); F(symmZ);
F(nodeElemCount); F(nodeElemCornerList);
F(matElemlist); F(nodelist);
F(lxim); F(lxip); F(letam); F(letap); F(lzetam); F(lzetap);
F(elemBC);
F(dxx); F(dyy); F(dzz);
F(delv_xi); F(delv_eta); F(delv_zeta);
F(delx_xi); F(delx_eta); F(delx_zeta);
F(e);
F(p); F(q); F(ql); F(qq);
F(v); F(volo); F(vnew); F(delv); F(vdov);
F(arealg);
F(ss);
F(elemMass);
#undef F
}
} meshGPU;
/* Stuff needed for boundary conditions */
/* 2 BCs on each of 6 hexahedral faces (12 bits) */
#define XI_M 0x003
#define XI_M_SYMM 0x001
#define XI_M_FREE 0x002
#define XI_P 0x00c
#define XI_P_SYMM 0x004
#define XI_P_FREE 0x008
#define ETA_M 0x030
#define ETA_M_SYMM 0x010
#define ETA_M_FREE 0x020
#define ETA_P 0x0c0
#define ETA_P_SYMM 0x040
#define ETA_P_FREE 0x080
#define ZETA_M 0x300
#define ZETA_M_SYMM 0x100
#define ZETA_M_FREE 0x200
#define ZETA_P 0xc00
#define ZETA_P_SYMM 0x400
#define ZETA_P_FREE 0x800
static inline
void TimeIncrement()
{
Real_t targetdt = mesh.stoptime() - mesh.time() ;
if ((mesh.dtfixed() <= Real_t(0.0)) && (mesh.cycle() != Int_t(0))) {
Real_t ratio ;
Real_t olddt = mesh.deltatime() ;
/* This will require a reduction in parallel */
Real_t newdt = Real_t(1.0e+20) ;
if (mesh.dtcourant() < newdt) {
newdt = mesh.dtcourant() / Real_t(2.0) ;
}
if (mesh.dthydro() < newdt) {
newdt = mesh.dthydro() * Real_t(2.0) / Real_t(3.0) ;
}
ratio = newdt / olddt ;
if (ratio >= Real_t(1.0)) {
if (ratio < mesh.deltatimemultlb()) {
newdt = olddt ;
}
else if (ratio > mesh.deltatimemultub()) {
newdt = olddt*mesh.deltatimemultub() ;
}
}
if (newdt > mesh.dtmax()) {
newdt = mesh.dtmax() ;
}
mesh.deltatime() = newdt ;
}
/* TRY TO PREVENT VERY SMALL SCALING ON THE NEXT CYCLE */
if ((targetdt > mesh.deltatime()) &&
(targetdt < (Real_t(4.0) * mesh.deltatime() / Real_t(3.0))) ) {
targetdt = Real_t(2.0) * mesh.deltatime() / Real_t(3.0) ;
}
if (targetdt < mesh.deltatime()) {
mesh.deltatime() = targetdt ;
}
mesh.time() += mesh.deltatime() ;
++mesh.cycle() ;
}
__global__
void InitStressTermsForElems_kernel(
int numElem,Real_t *sigxx, Real_t *sigyy, Real_t *sigzz, Real_t *p, Real_t *q)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i<numElem)
sigxx[i] = sigyy[i] = sigzz[i] = - p[i] - q[i] ;
}
static inline
void InitStressTermsForElems_gpu(Index_t numElem,
Real_t *sigxx, Real_t *sigyy, Real_t *sigzz)
{
dim3 dimBlock(BLOCKSIZE,1,1);
dim3 dimGrid(PAD_DIV(numElem,dimBlock.x),1,1);
cudaFuncSetCacheConfig(InitStressTermsForElems_kernel,cudaFuncCachePreferL1); // set as default for all kernels after this one
InitStressTermsForElems_kernel<<<dimGrid, dimBlock>>>
(numElem,sigxx,sigyy,sigzz,meshGPU.m_p,meshGPU.m_q);
CUDA_DEBUGSYNC;
}
static inline
void InitStressTermsForElems_cpu(Index_t numElem,
Real_t *sigxx, Real_t *sigyy, Real_t *sigzz)
{
//
// pull in the stresses appropriate to the hydro integration
//
for (Index_t i = 0 ; i < numElem ; ++i){
sigxx[i] = sigyy[i] = sigzz[i] = - mesh.p(i) - mesh.q(i) ;
}
}
static inline
void InitStressTermsForElems(Index_t numElem,
Real_t *sigxx, Real_t *sigyy, Real_t *sigzz,
int useCPU)
{
if (useCPU) {
FC(p); FC(q);
InitStressTermsForElems_cpu(numElem,sigxx,sigyy,sigzz);
}
else {
FG(p); FG(q);
InitStressTermsForElems_gpu(numElem,sigxx,sigyy,sigzz);
}
}
__host__ __device__
static inline
void CalcElemShapeFunctionDerivatives( const Real_t* const x,
const Real_t* const y,
const Real_t* const z,
Real_t b[][8],
Real_t* const volume )
{
const Real_t x0 = x[0] ; const Real_t x1 = x[1] ;
const Real_t x2 = x[2] ; const Real_t x3 = x[3] ;
const Real_t x4 = x[4] ; const Real_t x5 = x[5] ;
const Real_t x6 = x[6] ; const Real_t x7 = x[7] ;
const Real_t y0 = y[0] ; const Real_t y1 = y[1] ;
const Real_t y2 = y[2] ; const Real_t y3 = y[3] ;
const Real_t y4 = y[4] ; const Real_t y5 = y[5] ;
const Real_t y6 = y[6] ; const Real_t y7 = y[7] ;
const Real_t z0 = z[0] ; const Real_t z1 = z[1] ;
const Real_t z2 = z[2] ; const Real_t z3 = z[3] ;
const Real_t z4 = z[4] ; const Real_t z5 = z[5] ;
const Real_t z6 = z[6] ; const Real_t z7 = z[7] ;
Real_t fjxxi, fjxet, fjxze;
Real_t fjyxi, fjyet, fjyze;
Real_t fjzxi, fjzet, fjzze;
Real_t cjxxi, cjxet, cjxze;
Real_t cjyxi, cjyet, cjyze;
Real_t cjzxi, cjzet, cjzze;
fjxxi = Real_t(.125) * ( (x6-x0) + (x5-x3) - (x7-x1) - (x4-x2) );
fjxet = Real_t(.125) * ( (x6-x0) - (x5-x3) + (x7-x1) - (x4-x2) );
fjxze = Real_t(.125) * ( (x6-x0) + (x5-x3) + (x7-x1) + (x4-x2) );
fjyxi = Real_t(.125) * ( (y6-y0) + (y5-y3) - (y7-y1) - (y4-y2) );
fjyet = Real_t(.125) * ( (y6-y0) - (y5-y3) + (y7-y1) - (y4-y2) );
fjyze = Real_t(.125) * ( (y6-y0) + (y5-y3) + (y7-y1) + (y4-y2) );
fjzxi = Real_t(.125) * ( (z6-z0) + (z5-z3) - (z7-z1) - (z4-z2) );
fjzet = Real_t(.125) * ( (z6-z0) - (z5-z3) + (z7-z1) - (z4-z2) );
fjzze = Real_t(.125) * ( (z6-z0) + (z5-z3) + (z7-z1) + (z4-z2) );
/* compute cofactors */
cjxxi = (fjyet * fjzze) - (fjzet * fjyze);
cjxet = - (fjyxi * fjzze) + (fjzxi * fjyze);
cjxze = (fjyxi * fjzet) - (fjzxi * fjyet);
cjyxi = - (fjxet * fjzze) + (fjzet * fjxze);
cjyet = (fjxxi * fjzze) - (fjzxi * fjxze);
cjyze = - (fjxxi * fjzet) + (fjzxi * fjxet);
cjzxi = (fjxet * fjyze) - (fjyet * fjxze);
cjzet = - (fjxxi * fjyze) + (fjyxi * fjxze);
cjzze = (fjxxi * fjyet) - (fjyxi * fjxet);
/* calculate partials :
this need only be done for l = 0,1,2,3 since , by symmetry ,
(6,7,4,5) = - (0,1,2,3) .
*/
b[0][0] = - cjxxi - cjxet - cjxze;
b[0][1] = cjxxi - cjxet - cjxze;
b[0][2] = cjxxi + cjxet - cjxze;
b[0][3] = - cjxxi + cjxet - cjxze;
b[0][4] = -b[0][2];
b[0][5] = -b[0][3];
b[0][6] = -b[0][0];
b[0][7] = -b[0][1];
b[1][0] = - cjyxi - cjyet - cjyze;
b[1][1] = cjyxi - cjyet - cjyze;
b[1][2] = cjyxi + cjyet - cjyze;
b[1][3] = - cjyxi + cjyet - cjyze;
b[1][4] = -b[1][2];
b[1][5] = -b[1][3];
b[1][6] = -b[1][0];
b[1][7] = -b[1][1];
b[2][0] = - cjzxi - cjzet - cjzze;
b[2][1] = cjzxi - cjzet - cjzze;
b[2][2] = cjzxi + cjzet - cjzze;
b[2][3] = - cjzxi + cjzet - cjzze;
b[2][4] = -b[2][2];
b[2][5] = -b[2][3];
b[2][6] = -b[2][0];
b[2][7] = -b[2][1];
/* calculate jacobian determinant (volume) */
*volume = Real_t(8.) * ( fjxet * cjxet + fjyet * cjyet + fjzet * cjzet);
}
__host__ __device__
static inline
void SumElemFaceNormal(Real_t *normalX0, Real_t *normalY0, Real_t *normalZ0,
Real_t *normalX1, Real_t *normalY1, Real_t *normalZ1,
Real_t *normalX2, Real_t *normalY2, Real_t *normalZ2,
Real_t *normalX3, Real_t *normalY3, Real_t *normalZ3,
const Real_t x0, const Real_t y0, const Real_t z0,
const Real_t x1, const Real_t y1, const Real_t z1,
const Real_t x2, const Real_t y2, const Real_t z2,
const Real_t x3, const Real_t y3, const Real_t z3)
{
Real_t bisectX0 = Real_t(0.5) * (x3 + x2 - x1 - x0);
Real_t bisectY0 = Real_t(0.5) * (y3 + y2 - y1 - y0);
Real_t bisectZ0 = Real_t(0.5) * (z3 + z2 - z1 - z0);
Real_t bisectX1 = Real_t(0.5) * (x2 + x1 - x3 - x0);
Real_t bisectY1 = Real_t(0.5) * (y2 + y1 - y3 - y0);
Real_t bisectZ1 = Real_t(0.5) * (z2 + z1 - z3 - z0);
Real_t areaX = Real_t(0.25) * (bisectY0 * bisectZ1 - bisectZ0 * bisectY1);
Real_t areaY = Real_t(0.25) * (bisectZ0 * bisectX1 - bisectX0 * bisectZ1);
Real_t areaZ = Real_t(0.25) * (bisectX0 * bisectY1 - bisectY0 * bisectX1);
*normalX0 += areaX;
*normalX1 += areaX;
*normalX2 += areaX;
*normalX3 += areaX;
*normalY0 += areaY;
*normalY1 += areaY;
*normalY2 += areaY;
*normalY3 += areaY;
*normalZ0 += areaZ;
*normalZ1 += areaZ;
*normalZ2 += areaZ;
*normalZ3 += areaZ;
}
__host__ __device__
static inline
void CalcElemNodeNormals(Real_t pfx[8],
Real_t pfy[8],
Real_t pfz[8],
const Real_t x[8],
const Real_t y[8],
const Real_t z[8])
{
for (Index_t i = 0 ; i < 8 ; ++i) {
pfx[i] = Real_t(0.0);
pfy[i] = Real_t(0.0);
pfz[i] = Real_t(0.0);
}
/* evaluate face one: nodes 0, 1, 2, 3 */
SumElemFaceNormal(&pfx[0], &pfy[0], &pfz[0],
&pfx[1], &pfy[1], &pfz[1],
&pfx[2], &pfy[2], &pfz[2],
&pfx[3], &pfy[3], &pfz[3],
x[0], y[0], z[0], x[1], y[1], z[1],
x[2], y[2], z[2], x[3], y[3], z[3]);
/* evaluate face two: nodes 0, 4, 5, 1 */
SumElemFaceNormal(&pfx[0], &pfy[0], &pfz[0],
&pfx[4], &pfy[4], &pfz[4],
&pfx[5], &pfy[5], &pfz[5],
&pfx[1], &pfy[1], &pfz[1],
x[0], y[0], z[0], x[4], y[4], z[4],
x[5], y[5], z[5], x[1], y[1], z[1]);
/* evaluate face three: nodes 1, 5, 6, 2 */
SumElemFaceNormal(&pfx[1], &pfy[1], &pfz[1],
&pfx[5], &pfy[5], &pfz[5],
&pfx[6], &pfy[6], &pfz[6],
&pfx[2], &pfy[2], &pfz[2],
x[1], y[1], z[1], x[5], y[5], z[5],
x[6], y[6], z[6], x[2], y[2], z[2]);
/* evaluate face four: nodes 2, 6, 7, 3 */
SumElemFaceNormal(&pfx[2], &pfy[2], &pfz[2],
&pfx[6], &pfy[6], &pfz[6],
&pfx[7], &pfy[7], &pfz[7],
&pfx[3], &pfy[3], &pfz[3],
x[2], y[2], z[2], x[6], y[6], z[6],
x[7], y[7], z[7], x[3], y[3], z[3]);
/* evaluate face five: nodes 3, 7, 4, 0 */
SumElemFaceNormal(&pfx[3], &pfy[3], &pfz[3],
&pfx[7], &pfy[7], &pfz[7],
&pfx[4], &pfy[4], &pfz[4],
&pfx[0], &pfy[0], &pfz[0],
x[3], y[3], z[3], x[7], y[7], z[7],
x[4], y[4], z[4], x[0], y[0], z[0]);
/* evaluate face six: nodes 4, 7, 6, 5 */
SumElemFaceNormal(&pfx[4], &pfy[4], &pfz[4],
&pfx[7], &pfy[7], &pfz[7],
&pfx[6], &pfy[6], &pfz[6],
&pfx[5], &pfy[5], &pfz[5],
x[4], y[4], z[4], x[7], y[7], z[7],
x[6], y[6], z[6], x[5], y[5], z[5]);
}
__host__ __device__
static inline
void SumElemStressesToNodeForces( const Real_t B[][8],
const Real_t stress_xx,
const Real_t stress_yy,
const Real_t stress_zz,
Real_t* const fx,
Real_t* const fy,
Real_t* const fz,
int stride)
{
Real_t pfx0 = B[0][0] ; Real_t pfx1 = B[0][1] ;
Real_t pfx2 = B[0][2] ; Real_t pfx3 = B[0][3] ;
Real_t pfx4 = B[0][4] ; Real_t pfx5 = B[0][5] ;
Real_t pfx6 = B[0][6] ; Real_t pfx7 = B[0][7] ;
Real_t pfy0 = B[1][0] ; Real_t pfy1 = B[1][1] ;
Real_t pfy2 = B[1][2] ; Real_t pfy3 = B[1][3] ;
Real_t pfy4 = B[1][4] ; Real_t pfy5 = B[1][5] ;
Real_t pfy6 = B[1][6] ; Real_t pfy7 = B[1][7] ;
Real_t pfz0 = B[2][0] ; Real_t pfz1 = B[2][1] ;
Real_t pfz2 = B[2][2] ; Real_t pfz3 = B[2][3] ;
Real_t pfz4 = B[2][4] ; Real_t pfz5 = B[2][5] ;
Real_t pfz6 = B[2][6] ; Real_t pfz7 = B[2][7] ;
fx[0*stride] = -( stress_xx * pfx0 );
fx[1*stride] = -( stress_xx * pfx1 );
fx[2*stride] = -( stress_xx * pfx2 );
fx[3*stride] = -( stress_xx * pfx3 );
fx[4*stride] = -( stress_xx * pfx4 );
fx[5*stride] = -( stress_xx * pfx5 );
fx[6*stride] = -( stress_xx * pfx6 );
fx[7*stride] = -( stress_xx * pfx7 );
fy[0*stride] = -( stress_yy * pfy0 );
fy[1*stride] = -( stress_yy * pfy1 );
fy[2*stride] = -( stress_yy * pfy2 );
fy[3*stride] = -( stress_yy * pfy3 );
fy[4*stride] = -( stress_yy * pfy4 );
fy[5*stride] = -( stress_yy * pfy5 );
fy[6*stride] = -( stress_yy * pfy6 );
fy[7*stride] = -( stress_yy * pfy7 );
fz[0*stride] = -( stress_zz * pfz0 );
fz[1*stride] = -( stress_zz * pfz1 );
fz[2*stride] = -( stress_zz * pfz2 );
fz[3*stride] = -( stress_zz * pfz3 );
fz[4*stride] = -( stress_zz * pfz4 );
fz[5*stride] = -( stress_zz * pfz5 );
fz[6*stride] = -( stress_zz * pfz6 );
fz[7*stride] = -( stress_zz * pfz7 );
}
__global__
void IntegrateStressForElems_kernel( Index_t numElem, Index_t *nodelist,
Real_t *x, Real_t *y, Real_t *z,
Real_t *fx_elem, Real_t *fy_elem, Real_t *fz_elem,
Real_t *sigxx, Real_t *sigyy, Real_t *sigzz,
Real_t *determ)
{
Real_t B[3][8] ;// shape function derivatives
Real_t x_local[8] ;
Real_t y_local[8] ;
Real_t z_local[8] ;
int k=blockDim.x*blockIdx.x + threadIdx.x;
if (k<numElem) {
// get nodal coordinates from global arrays and copy into local arrays.
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
{
Index_t gnode = nodelist[k+lnode*numElem];
x_local[lnode] = x[gnode];
y_local[lnode] = y[gnode];
z_local[lnode] = z[gnode];
}
/* Volume calculation involves extra work for numerical consistency. */
CalcElemShapeFunctionDerivatives(x_local, y_local, z_local,
B, &determ[k]);
CalcElemNodeNormals( B[0] , B[1], B[2],
x_local, y_local, z_local );
SumElemStressesToNodeForces( B, sigxx[k], sigyy[k], sigzz[k],
&fx_elem[k], &fy_elem[k], &fz_elem[k], numElem ) ;
}
}
__global__
#ifdef DP_PROFILING_KERNEL1
void AddNodeForcesFromElems_kernel( Index_t numNode,
Int_t *nodeElemCount, Index_t *nodeElemCornerList,
volatile Real_t *fx_elem, volatile Real_t *fy_elem,volatile Real_t *fz_elem,
Real_t *fx_node, Real_t *fy_node, Real_t *fz_node, int its)
#else
void AddNodeForcesFromElems_kernel( Index_t numNode,
Int_t *nodeElemCount, Index_t *nodeElemCornerList,
Real_t *fx_elem, Real_t *fy_elem, Real_t *fz_elem,
Real_t *fx_node, Real_t *fy_node, Real_t *fz_node)
#endif
{
int i=blockDim.x*blockIdx.x + threadIdx.x;
if (i<numNode) {
//#ifdef AFTER_KERNEL1
// Int_t count=tex1Dfetch(tex_nodeElemCount,i);
//#else
Int_t count=nodeElemCount[i];
//#endif
#ifdef DP_PROFILING_KERNEL1
if(blockIdx.x==0 && blockIdx.y==0)
{
//nodeElemCount
//number of array ele is numNode=edgeNodes^3=(45+1)^3
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
//cuPrintf("sizeof(int)=%d, sizeof(real8)=%d\n", sizeof(int), sizeof(real8));
//4 byte for int and 8 byte for real8
cuPrintf("0 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
}
#endif
Real_t fx,fy,fz;
fx=fy=fz=Real_t(0.0);
for (int j=0;j<count;j++) {
//#ifdef AFTER_KERNEL1
// Index_t elem=tex1Dfetch(tex_nodeElemCornerList,i+numNode*j);
//#else
Index_t elem=nodeElemCornerList[i+numNode*j];
//#endif
#ifdef DP_PROFILING_KERNEL1
if(blockIdx.x==0 && blockIdx.y==0)
{
//nodeElemCornerList: the num of array elem is m_numNode*8
// which is edgeNodes^3*8=(45+1)^3*8
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("1 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i+numNode*j);
}
#endif
#ifdef AFTER_KERNEL1
fx+=tex1Dfetch(tex_fx_elem,elem); fy+=tex1Dfetch(tex_fy_elem,elem); fz+=tex1Dfetch(tex_fz_elem,elem);
#else
fx+=fx_elem[elem]; fy+=fy_elem[elem]; fz+=fz_elem[elem];
#endif
#ifdef DP_PROFILING_KERNEL1
if(blockIdx.x==0 && blockIdx.y==0)
{
//fx_elem, fy_elem, fz_elem
//number of array elem is numElem*8=edgeElems^3*8=45^3*8
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("2 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, elem);
cuPrintf("3 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, elem);
cuPrintf("4 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, elem);
}
#endif
}
fx_node[i]=fx; fy_node[i]=fy; fz_node[i]=fz;
#ifdef DP_PROFILING_KERNEL1
if(blockIdx.x==0 && blockIdx.y==0)
{
//fx_node, fy_node, fz_node
//number of array elem is numNode=edgeNodes^3=(45+1)^3
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("5 1 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
cuPrintf("6 1 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
cuPrintf("7 1 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
}
#endif
}
}
__global__
#ifdef DP_PROFILING_KERNEL2
void AddNodeForcesFromElems2_kernel( Index_t numNode,
Int_t *nodeElemCount, Index_t *nodeElemCornerList,
Real_t *fx_elem, Real_t *fy_elem, Real_t *fz_elem,
Real_t *fx_node, Real_t *fy_node, Real_t *fz_node, int its)
#else
void AddNodeForcesFromElems2_kernel( Index_t numNode,
Int_t *nodeElemCount, Index_t *nodeElemCornerList,
Real_t *fx_elem, Real_t *fy_elem, Real_t *fz_elem,
Real_t *fx_node, Real_t *fy_node, Real_t *fz_node)
#endif
{
int i=blockDim.x*blockIdx.x + threadIdx.x;
if (i<numNode) {
//#ifdef AFTER_KERNEL1
// Int_t count=tex1Dfetch(tex_nodeElemCount,i);
//#else
Int_t count=nodeElemCount[i];
//#endif
#ifdef DP_PROFILING_KERNEL2
if(blockIdx.x==0 && blockIdx.y==0)
{
//nodeElemCount
//number of array ele is numNode=edgeNodes^3=(45+1)^3
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
//cuPrintf("sizeof(int)=%d, sizeof(real8)=%d\n", sizeof(int), sizeof(real8));
//4 byte for int and 8 byte for real8
cuPrintf("0 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
}
#endif
Real_t fx,fy,fz;
fx=fy=fz=Real_t(0.0);
for (int j=0;j<count;j++) {
//#ifdef AFTER_KERNEL2
// Index_t elem=tex1Dfetch(tex_nodeElemCornerList,i+numNode*j);
//#else
Index_t elem=nodeElemCornerList[i+numNode*j];
//#endif
#ifdef DP_PROFILING_KERNEL2
if(blockIdx.x==0 && blockIdx.y==0)
{
//nodeElemCornerList: the num of array elem is m_numNode*8
// which is edgeNodes^3*8=(45+1)^3*8
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("1 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i+numNode*j);
}
#endif
#ifdef AFTER_KERNEL2
fx+=tex1Dfetch(tex_fx_elem,elem); fy+=tex1Dfetch(tex_fy_elem,elem); fz+=tex1Dfetch(tex_fz_elem,elem);
#else
fx+=fx_elem[elem]; fy+=fy_elem[elem]; fz+=fz_elem[elem];
#endif
#ifdef DP_PROFILING_KERNEL2
if(blockIdx.x==0 && blockIdx.y==0)
{
//fx_elem, fy_elem, fz_elem
//number of array elem is numElem*8=edgeElems^3*8=45^3*8
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("2 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, elem);
cuPrintf("3 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, elem);
cuPrintf("4 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, elem);
}
#endif
}
fx_node[i]+=fx; fy_node[i]+=fy; fz_node[i]+=fz;
#ifdef DP_PROFILING_KERNEL2
if(blockIdx.x==0 && blockIdx.y==0)
{
//fx_node, fy_node, fz_node
//number of array elem is numNode=edgeNodes^3=(45+1)^3
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("5 1 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
cuPrintf("6 1 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
cuPrintf("7 1 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
}
#endif
}
}
static inline
void IntegrateStressForElems_gpu( Index_t numElem,
Real_t *sigxx, Real_t *sigyy, Real_t *sigzz,
Real_t *determ, int& badvol)
{
Real_t *fx_elem,*fy_elem,*fz_elem;
CUDA( cudaMalloc(&fx_elem,numElem*8*sizeof(Real_t)) );
CUDA( cudaMalloc(&fy_elem,numElem*8*sizeof(Real_t)) );
CUDA( cudaMalloc(&fz_elem,numElem*8*sizeof(Real_t)) );
dim3 dimBlock=dim3(BLOCKSIZE,1,1);
dim3 dimGrid=dim3(PAD_DIV(numElem,dimBlock.x),1,1);
IntegrateStressForElems_kernel<<<dimGrid,dimBlock>>>
(numElem, meshGPU.m_nodelist, meshGPU.m_x, meshGPU.m_y, meshGPU.m_z,
fx_elem, fy_elem, fz_elem, sigxx, sigyy, sigzz, determ);
CUDA_DEBUGSYNC;
dimGrid=dim3(PAD_DIV(mesh.numNode(),dimBlock.x),1,1);
#ifdef AFTER_KERNEL1
cudaBindTexture(0,tex_nodeElemCount,meshGPU.m_nodeElemCount,mesh.numNode()*sizeof(int));
cudaBindTexture(0,tex_nodeElemCornerList,meshGPU.m_nodeElemCornerList,8*mesh.numNode()*sizeof(int));
cudaBindTexture(0,tex_fx_elem,fx_elem,8*mesh.numElem()*sizeof(float));
cudaBindTexture(0,tex_fy_elem,fy_elem,8*mesh.numElem()*sizeof(float));
cudaBindTexture(0,tex_fz_elem,fz_elem,8*mesh.numElem()*sizeof(float));
#endif
#ifdef DP_PROFILING_KERNEL1
printf("Kernel1: dimGrid: %d %d %d. dimBlock: %d %d %d\n",
dimGrid.x, dimGrid.y, dimGrid.z, dimBlock.x, dimBlock.y, dimBlock.z);
printf("Kernel1: numNode=%d, numElem=%d\n", mesh.numNode(), numElem);
printf("dimension: 1 1 1 1 1 1 1 1\n");
printf("sizeof %d %d %d %d %d %d %d %d\n",sizeof(Int_t),sizeof(Index_t)
,sizeof(Real_t),sizeof(Real_t),sizeof(Real_t),sizeof(Real_t),sizeof(Real_t),sizeof(Real_t));
int yy_size = numElem*8*sizeof(Real_t);
printf("array_size %d %d %d %d %d %d %d %d\n",mesh.numNode()*sizeof(Int_t),8*mesh.numNode()*sizeof(Index_t),yy_size,yy_size,yy_size,
mesh.numNode()*sizeof(Int_t),mesh.numNode()*sizeof(Int_t),mesh.numNode()*sizeof(Int_t));
AddNodeForcesFromElems_kernel<<<dimGrid,dimBlock>>>
(mesh.numNode(),meshGPU.m_nodeElemCount,meshGPU.m_nodeElemCornerList,
fx_elem,fy_elem,fz_elem,meshGPU.m_fx,meshGPU.m_fy,meshGPU.m_fz, its);
//cudaPrintfDisplay(stdout, true);
cudaPrintfDisplay(stdout, false);
#else
cudaEvent_t k1_start, k1_stop;
cudaEventCreate(&k1_start);
cudaEventCreate(&k1_stop);
cudaEventRecord(k1_start,0);
AddNodeForcesFromElems_kernel<<<dimGrid,dimBlock>>>
(mesh.numNode(),meshGPU.m_nodeElemCount,meshGPU.m_nodeElemCornerList,
fx_elem,fy_elem,fz_elem,meshGPU.m_fx,meshGPU.m_fy,meshGPU.m_fz);
cudaDeviceSynchronize();
cudaEventRecord(k1_stop, 0);
cudaEventSynchronize(k1_stop);
float k1_time=0.0;
cudaEventElapsedTime(&k1_time, k1_start, k1_stop);
printf("Kernel 1 time = %f\n",k1_time);
k1+=k1_time;
#endif
CUDA_DEBUGSYNC;
CUDA( cudaFree(fx_elem) );
CUDA( cudaFree(fy_elem) );
CUDA( cudaFree(fz_elem) );
// JDC -- need a reduction step to check for non-positive element volumes
badvol=0;
}
static inline
void IntegrateStressForElems_cpu( Index_t numElem,
Real_t *sigxx, Real_t *sigyy, Real_t *sigzz,
Real_t *determ, int& badvol)
{
Real_t B[3][8] ;// shape function derivatives
Real_t x_local[8] ;
Real_t y_local[8] ;
Real_t z_local[8] ;
Real_t fx_local[8] ;
Real_t fy_local[8] ;
Real_t fz_local[8] ;
// loop over all elements
for( Index_t k=0 ; k<numElem ; ++k )
{
// get nodal coordinates from global arrays and copy into local arrays.
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
{
Index_t gnode = mesh.nodelist(k,lnode);
x_local[lnode] = mesh.x(gnode);
y_local[lnode] = mesh.y(gnode);
z_local[lnode] = mesh.z(gnode);
}
/* Volume calculation involves extra work for numerical consistency. */
CalcElemShapeFunctionDerivatives(x_local, y_local, z_local,
B, &determ[k]);
CalcElemNodeNormals( B[0] , B[1], B[2],
x_local, y_local, z_local );
SumElemStressesToNodeForces( B, sigxx[k], sigyy[k], sigzz[k],
fx_local, fy_local, fz_local, 1 ) ;
// copy nodal force contributions to global force arrray.
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
{
Index_t gnode = mesh.nodelist(k,lnode);
mesh.fx(gnode) += fx_local[lnode];
mesh.fy(gnode) += fy_local[lnode];
mesh.fz(gnode) += fz_local[lnode];
}
}
badvol=0;
for ( Index_t k=0 ; k<numElem ; ++k ) {
if (determ[k] <= Real_t(0.0)) {
badvol=1;
}
}
}
static inline
void IntegrateStressForElems( Index_t numElem,
Real_t *sigxx, Real_t *sigyy, Real_t *sigzz,
Real_t *determ, int& badvol, int useCPU)
{
if (useCPU) {
FC(nodelist); FC(x); FC(y); FC(z);
IntegrateStressForElems_cpu(numElem,sigxx,sigyy,sigzz,determ,badvol);
SG(fx); SG(fy); SG(fz);
}
else {
FG(nodelist); FG(nodeElemCount); FG(nodeElemCornerList);
FG(x); FG(y); FG(z);
IntegrateStressForElems_gpu(numElem,sigxx,sigyy,sigzz,determ,badvol);
SC(fx); SC(fy); SC(fz);
}
}
static inline
void CollectDomainNodesToElemNodes(const Index_t elemNum,
Real_t elemX[8],
Real_t elemY[8],
Real_t elemZ[8])
{
Index_t nd0i = mesh.nodelist(elemNum,0) ;
Index_t nd1i = mesh.nodelist(elemNum,1) ;
Index_t nd2i = mesh.nodelist(elemNum,2) ;
Index_t nd3i = mesh.nodelist(elemNum,3) ;
Index_t nd4i = mesh.nodelist(elemNum,4) ;
Index_t nd5i = mesh.nodelist(elemNum,5) ;
Index_t nd6i = mesh.nodelist(elemNum,6) ;
Index_t nd7i = mesh.nodelist(elemNum,7) ;
elemX[0] = mesh.x(nd0i);
elemX[1] = mesh.x(nd1i);
elemX[2] = mesh.x(nd2i);
elemX[3] = mesh.x(nd3i);
elemX[4] = mesh.x(nd4i);
elemX[5] = mesh.x(nd5i);
elemX[6] = mesh.x(nd6i);
elemX[7] = mesh.x(nd7i);
elemY[0] = mesh.y(nd0i);
elemY[1] = mesh.y(nd1i);
elemY[2] = mesh.y(nd2i);
elemY[3] = mesh.y(nd3i);
elemY[4] = mesh.y(nd4i);
elemY[5] = mesh.y(nd5i);
elemY[6] = mesh.y(nd6i);
elemY[7] = mesh.y(nd7i);
elemZ[0] = mesh.z(nd0i);
elemZ[1] = mesh.z(nd1i);
elemZ[2] = mesh.z(nd2i);
elemZ[3] = mesh.z(nd3i);
elemZ[4] = mesh.z(nd4i);
elemZ[5] = mesh.z(nd5i);
elemZ[6] = mesh.z(nd6i);
elemZ[7] = mesh.z(nd7i);
}
__host__
static inline
void VoluDer(const Real_t x0, const Real_t x1, const Real_t x2,
const Real_t x3, const Real_t x4, const Real_t x5,
const Real_t y0, const Real_t y1, const Real_t y2,
const Real_t y3, const Real_t y4, const Real_t y5,
const Real_t z0, const Real_t z1, const Real_t z2,
const Real_t z3, const Real_t z4, const Real_t z5,
Real_t* dvdx, Real_t* dvdy, Real_t* dvdz)
{
const Real_t twelfth = Real_t(1.0) / Real_t(12.0) ;
*dvdx =
(y1 + y2) * (z0 + z1) - (y0 + y1) * (z1 + z2) +
(y0 + y4) * (z3 + z4) - (y3 + y4) * (z0 + z4) -
(y2 + y5) * (z3 + z5) + (y3 + y5) * (z2 + z5);
*dvdy =
- (x1 + x2) * (z0 + z1) + (x0 + x1) * (z1 + z2) -
(x0 + x4) * (z3 + z4) + (x3 + x4) * (z0 + z4) +
(x2 + x5) * (z3 + z5) - (x3 + x5) * (z2 + z5);
*dvdz =
- (y1 + y2) * (x0 + x1) + (y0 + y1) * (x1 + x2) -
(y0 + y4) * (x3 + x4) + (y3 + y4) * (x0 + x4) +
(y2 + y5) * (x3 + x5) - (y3 + y5) * (x2 + x5);
*dvdx *= twelfth;
*dvdy *= twelfth;
*dvdz *= twelfth;
}
#if 0
__device__
static inline
void VOLUDER(const Real_t a0, const Real_t a1, const Real_t a2,
const Real_t a3, const Real_t a4, const Real_t a5,
const Real_t b0, const Real_t b1, const Real_t b2,
const Real_t b3, const Real_t b4, const Real_t b5,
Real_t& dvdc)
{
const Real_t twelfth = Real_t(1.0) / Real_t(12.0) ;
dvdc=
(a1 + a2) * (b0 + b1) - (a0 + a1) * (b1 + b2) +
(a0 + a4) * (b3 + b4) - (a3 + a4) * (b0 + b4) -
(a2 + a5) * (b3 + b5) + (a3 + a5) * (b2 + b5);
dvdc *= twelfth;
}
#else
// Even though the above version is inlined, it seems to prohibit some kind of compiler optimization.
// This macro version uses many fewer registers and avoids spill-over into local memory.
#define VOLUDER(a0,a1,a2,a3,a4,a5,b0,b1,b2,b3,b4,b5,dvdc) \
{ \
const Real_t twelfth = Real_t(1.0) / Real_t(12.0) ; \
\
dvdc= \
((a1) + (a2)) * ((b0) + (b1)) - ((a0) + (a1)) * ((b1) + (b2)) + \
((a0) + (a4)) * ((b3) + (b4)) - ((a3) + (a4)) * ((b0) + (b4)) - \
((a2) + (a5)) * ((b3) + (b5)) + ((a3) + (a5)) * ((b2) + (b5)); \
dvdc *= twelfth; \
}
#endif
__host__
static inline
void CalcElemVolumeDerivative(Real_t dvdx[8],
Real_t dvdy[8],
Real_t dvdz[8],
const Real_t x[8],
const Real_t y[8],
const Real_t z[8])
{
VoluDer(x[1], x[2], x[3], x[4], x[5], x[7],
y[1], y[2], y[3], y[4], y[5], y[7],
z[1], z[2], z[3], z[4], z[5], z[7],
&dvdx[0], &dvdy[0], &dvdz[0]);
VoluDer(x[0], x[1], x[2], x[7], x[4], x[6],
y[0], y[1], y[2], y[7], y[4], y[6],
z[0], z[1], z[2], z[7], z[4], z[6],
&dvdx[3], &dvdy[3], &dvdz[3]);
VoluDer(x[3], x[0], x[1], x[6], x[7], x[5],
y[3], y[0], y[1], y[6], y[7], y[5],
z[3], z[0], z[1], z[6], z[7], z[5],
&dvdx[2], &dvdy[2], &dvdz[2]);
VoluDer(x[2], x[3], x[0], x[5], x[6], x[4],
y[2], y[3], y[0], y[5], y[6], y[4],
z[2], z[3], z[0], z[5], z[6], z[4],
&dvdx[1], &dvdy[1], &dvdz[1]);
VoluDer(x[7], x[6], x[5], x[0], x[3], x[1],
y[7], y[6], y[5], y[0], y[3], y[1],
z[7], z[6], z[5], z[0], z[3], z[1],
&dvdx[4], &dvdy[4], &dvdz[4]);
VoluDer(x[4], x[7], x[6], x[1], x[0], x[2],
y[4], y[7], y[6], y[1], y[0], y[2],
z[4], z[7], z[6], z[1], z[0], z[2],
&dvdx[5], &dvdy[5], &dvdz[5]);
VoluDer(x[5], x[4], x[7], x[2], x[1], x[3],
y[5], y[4], y[7], y[2], y[1], y[3],
z[5], z[4], z[7], z[2], z[1], z[3],
&dvdx[6], &dvdy[6], &dvdz[6]);
VoluDer(x[6], x[5], x[4], x[3], x[2], x[0],
y[6], y[5], y[4], y[3], y[2], y[0],
z[6], z[5], z[4], z[3], z[2], z[0],
&dvdx[7], &dvdy[7], &dvdz[7]);
}
__device__
static inline
void CalcElemVolumeDerivative(Real_t& dvdx,
Real_t& dvdy,
Real_t& dvdz,
const Real_t x,
const Real_t y,
const Real_t z,
unsigned int node)
{
__shared__ Real_t array1[256],array2[256];
volatile Real_t *va1;
volatile Real_t *va2;
unsigned int idx,elem;
unsigned int ind0,ind1,ind2,ind3,ind4,ind5;
switch(node) {
case 0:
{ind0=1; ind1=2; ind2=3; ind3=4; ind4=5; ind5=7;
break;}
case 1:
{ind0=2; ind1=3; ind2=0; ind3=5; ind4=6; ind5=4;
break;}
case 2:
{ind0=3; ind1=0; ind2=1; ind3=6; ind4=7; ind5=5;
break;}
case 3:
{ind0=0; ind1=1; ind2=2; ind3=7; ind4=4; ind5=6;
break;}
case 4:
{ind0=7; ind1=6; ind2=5; ind3=0; ind4=3; ind5=1;
break;}
case 5:
{ind0=4; ind1=7; ind2=6; ind3=1; ind4=0; ind5=2;
break;}
case 6:
{ind0=5; ind1=4; ind2=7; ind3=2; ind4=1; ind5=3;
break;}
case 7:
{ind0=6; ind1=5; ind2=4; ind3=3; ind4=2; ind5=0;
break;}
default:
{ind0=ind1=ind2=ind3=ind4=ind5=0xFFFFFFFF;
break;}
}
idx=threadIdx.x;
elem=idx /*& 0x1F*/ - node*32;
va1=&array1[0];
va2=&array2[0];
// load y and z
__syncthreads();
va1[idx]=y; va2[idx]=z;
__syncthreads();
VOLUDER(va1[ind0*32+elem],va1[ind1*32+elem],va1[ind2*32+elem],
va1[ind3*32+elem],va1[ind4*32+elem],va1[ind5*32+elem],
va2[ind0*32+elem],va2[ind1*32+elem],va2[ind2*32+elem],
va2[ind3*32+elem],va2[ind4*32+elem],va2[ind5*32+elem],
dvdx);
// load x
__syncthreads();
va1[idx]=x;
__syncthreads();
VOLUDER(va2[ind0*32+elem],va2[ind1*32+elem],va2[ind2*32+elem],
va2[ind3*32+elem],va2[ind4*32+elem],va2[ind5*32+elem],
va1[ind0*32+elem],va1[ind1*32+elem],va1[ind2*32+elem],
va1[ind3*32+elem],va1[ind4*32+elem],va1[ind5*32+elem],
dvdy);
__syncthreads();
// load y
__syncthreads();
va2[idx]=y;
__syncthreads();
VOLUDER(va1[ind0*32+elem],va1[ind1*32+elem],va1[ind2*32+elem],
va1[ind3*32+elem],va1[ind4*32+elem],va1[ind5*32+elem],
va2[ind0*32+elem],va2[ind1*32+elem],va2[ind2*32+elem],
va2[ind3*32+elem],va2[ind4*32+elem],va2[ind5*32+elem],
dvdz);
__syncthreads();
}
__host__
static inline
void CalcElemFBHourglassForce(Real_t *xd, Real_t *yd, Real_t *zd, Real_t *hourgam0,
Real_t *hourgam1, Real_t *hourgam2, Real_t *hourgam3,
Real_t *hourgam4, Real_t *hourgam5, Real_t *hourgam6,
Real_t *hourgam7, Real_t coefficient,
Real_t *hgfx, Real_t *hgfy, Real_t *hgfz )
{
Index_t i00=0;
Index_t i01=1;
Index_t i02=2;
Index_t i03=3;
Real_t h00 =
hourgam0[i00] * xd[0] + hourgam1[i00] * xd[1] +
hourgam2[i00] * xd[2] + hourgam3[i00] * xd[3] +
hourgam4[i00] * xd[4] + hourgam5[i00] * xd[5] +
hourgam6[i00] * xd[6] + hourgam7[i00] * xd[7];
Real_t h01 =
hourgam0[i01] * xd[0] + hourgam1[i01] * xd[1] +
hourgam2[i01] * xd[2] + hourgam3[i01] * xd[3] +
hourgam4[i01] * xd[4] + hourgam5[i01] * xd[5] +
hourgam6[i01] * xd[6] + hourgam7[i01] * xd[7];
Real_t h02 =
hourgam0[i02] * xd[0] + hourgam1[i02] * xd[1]+
hourgam2[i02] * xd[2] + hourgam3[i02] * xd[3]+
hourgam4[i02] * xd[4] + hourgam5[i02] * xd[5]+
hourgam6[i02] * xd[6] + hourgam7[i02] * xd[7];
Real_t h03 =
hourgam0[i03] * xd[0] + hourgam1[i03] * xd[1] +
hourgam2[i03] * xd[2] + hourgam3[i03] * xd[3] +
hourgam4[i03] * xd[4] + hourgam5[i03] * xd[5] +
hourgam6[i03] * xd[6] + hourgam7[i03] * xd[7];
hgfx[0] = coefficient *
(hourgam0[i00] * h00 + hourgam0[i01] * h01 +
hourgam0[i02] * h02 + hourgam0[i03] * h03);
hgfx[1] = coefficient *
(hourgam1[i00] * h00 + hourgam1[i01] * h01 +
hourgam1[i02] * h02 + hourgam1[i03] * h03);
hgfx[2] = coefficient *
(hourgam2[i00] * h00 + hourgam2[i01] * h01 +
hourgam2[i02] * h02 + hourgam2[i03] * h03);
hgfx[3] = coefficient *
(hourgam3[i00] * h00 + hourgam3[i01] * h01 +
hourgam3[i02] * h02 + hourgam3[i03] * h03);
hgfx[4] = coefficient *
(hourgam4[i00] * h00 + hourgam4[i01] * h01 +
hourgam4[i02] * h02 + hourgam4[i03] * h03);
hgfx[5] = coefficient *
(hourgam5[i00] * h00 + hourgam5[i01] * h01 +
hourgam5[i02] * h02 + hourgam5[i03] * h03);
hgfx[6] = coefficient *
(hourgam6[i00] * h00 + hourgam6[i01] * h01 +
hourgam6[i02] * h02 + hourgam6[i03] * h03);
hgfx[7] = coefficient *
(hourgam7[i00] * h00 + hourgam7[i01] * h01 +
hourgam7[i02] * h02 + hourgam7[i03] * h03);
h00 =
hourgam0[i00] * yd[0] + hourgam1[i00] * yd[1] +
hourgam2[i00] * yd[2] + hourgam3[i00] * yd[3] +
hourgam4[i00] * yd[4] + hourgam5[i00] * yd[5] +
hourgam6[i00] * yd[6] + hourgam7[i00] * yd[7];
h01 =
hourgam0[i01] * yd[0] + hourgam1[i01] * yd[1] +
hourgam2[i01] * yd[2] + hourgam3[i01] * yd[3] +
hourgam4[i01] * yd[4] + hourgam5[i01] * yd[5] +
hourgam6[i01] * yd[6] + hourgam7[i01] * yd[7];
h02 =
hourgam0[i02] * yd[0] + hourgam1[i02] * yd[1]+
hourgam2[i02] * yd[2] + hourgam3[i02] * yd[3]+
hourgam4[i02] * yd[4] + hourgam5[i02] * yd[5]+
hourgam6[i02] * yd[6] + hourgam7[i02] * yd[7];
h03 =
hourgam0[i03] * yd[0] + hourgam1[i03] * yd[1] +
hourgam2[i03] * yd[2] + hourgam3[i03] * yd[3] +
hourgam4[i03] * yd[4] + hourgam5[i03] * yd[5] +
hourgam6[i03] * yd[6] + hourgam7[i03] * yd[7];
hgfy[0] = coefficient *
(hourgam0[i00] * h00 + hourgam0[i01] * h01 +
hourgam0[i02] * h02 + hourgam0[i03] * h03);
hgfy[1] = coefficient *
(hourgam1[i00] * h00 + hourgam1[i01] * h01 +
hourgam1[i02] * h02 + hourgam1[i03] * h03);
hgfy[2] = coefficient *
(hourgam2[i00] * h00 + hourgam2[i01] * h01 +
hourgam2[i02] * h02 + hourgam2[i03] * h03);
hgfy[3] = coefficient *
(hourgam3[i00] * h00 + hourgam3[i01] * h01 +
hourgam3[i02] * h02 + hourgam3[i03] * h03);
hgfy[4] = coefficient *
(hourgam4[i00] * h00 + hourgam4[i01] * h01 +
hourgam4[i02] * h02 + hourgam4[i03] * h03);
hgfy[5] = coefficient *
(hourgam5[i00] * h00 + hourgam5[i01] * h01 +
hourgam5[i02] * h02 + hourgam5[i03] * h03);
hgfy[6] = coefficient *
(hourgam6[i00] * h00 + hourgam6[i01] * h01 +
hourgam6[i02] * h02 + hourgam6[i03] * h03);
hgfy[7] = coefficient *
(hourgam7[i00] * h00 + hourgam7[i01] * h01 +
hourgam7[i02] * h02 + hourgam7[i03] * h03);
h00 =
hourgam0[i00] * zd[0] + hourgam1[i00] * zd[1] +
hourgam2[i00] * zd[2] + hourgam3[i00] * zd[3] +
hourgam4[i00] * zd[4] + hourgam5[i00] * zd[5] +
hourgam6[i00] * zd[6] + hourgam7[i00] * zd[7];
h01 =
hourgam0[i01] * zd[0] + hourgam1[i01] * zd[1] +
hourgam2[i01] * zd[2] + hourgam3[i01] * zd[3] +
hourgam4[i01] * zd[4] + hourgam5[i01] * zd[5] +
hourgam6[i01] * zd[6] + hourgam7[i01] * zd[7];
h02 =
hourgam0[i02] * zd[0] + hourgam1[i02] * zd[1]+
hourgam2[i02] * zd[2] + hourgam3[i02] * zd[3]+
hourgam4[i02] * zd[4] + hourgam5[i02] * zd[5]+
hourgam6[i02] * zd[6] + hourgam7[i02] * zd[7];
h03 =
hourgam0[i03] * zd[0] + hourgam1[i03] * zd[1] +
hourgam2[i03] * zd[2] + hourgam3[i03] * zd[3] +
hourgam4[i03] * zd[4] + hourgam5[i03] * zd[5] +
hourgam6[i03] * zd[6] + hourgam7[i03] * zd[7];
hgfz[0] = coefficient *
(hourgam0[i00] * h00 + hourgam0[i01] * h01 +
hourgam0[i02] * h02 + hourgam0[i03] * h03);
hgfz[1] = coefficient *
(hourgam1[i00] * h00 + hourgam1[i01] * h01 +
hourgam1[i02] * h02 + hourgam1[i03] * h03);
hgfz[2] = coefficient *
(hourgam2[i00] * h00 + hourgam2[i01] * h01 +
hourgam2[i02] * h02 + hourgam2[i03] * h03);
hgfz[3] = coefficient *
(hourgam3[i00] * h00 + hourgam3[i01] * h01 +
hourgam3[i02] * h02 + hourgam3[i03] * h03);
hgfz[4] = coefficient *
(hourgam4[i00] * h00 + hourgam4[i01] * h01 +
hourgam4[i02] * h02 + hourgam4[i03] * h03);
hgfz[5] = coefficient *
(hourgam5[i00] * h00 + hourgam5[i01] * h01 +
hourgam5[i02] * h02 + hourgam5[i03] * h03);
hgfz[6] = coefficient *
(hourgam6[i00] * h00 + hourgam6[i01] * h01 +
hourgam6[i02] * h02 + hourgam6[i03] * h03);
hgfz[7] = coefficient *
(hourgam7[i00] * h00 + hourgam7[i01] * h01 +
hourgam7[i02] * h02 + hourgam7[i03] * h03);
}
__shared__ Real_t shm_array[32*8];
__device__
static inline
Real_t SumOverNodes(Real_t val) {
// Sum up 8 node values for each element
// Assumes 256 threads: 32 elements, 8 nodes per element.
// NOTE: we could probably avoid some of the __syncthreads() if we map 8 nodes
// of an element to the same warp.
unsigned int tid=threadIdx.x;
#if 1
#if 0
unsigned int node=tid>>5;
unsigned int elem=tid-(node<<5);
#elif 1
unsigned int node=tid/32;
unsigned int elem=tid-(node*32);
#else
unsigned int elem=tid & 0x1F;
#endif
__syncthreads();
shm_array[tid]=val;
__syncthreads();
if (tid<128) shm_array[tid]+=shm_array[tid+128];
__syncthreads();
if (tid<64) shm_array[tid]+=shm_array[tid+64];
__syncthreads();
if (tid<32) shm_array[tid]+=shm_array[tid+32];
__syncthreads();
Real_t ret=shm_array[elem];
__syncthreads();
return ret;
#else
#if 0
unsigned int node=tid>>5;
unsigned int elem=tid-(node<<5);
#else
unsigned int node=tid/32;
unsigned int elem=tid-(node*32);
#endif
unsigned int idx=elem*8+node;
__syncthreads();
shm_array[idx]=val;
__syncthreads();
if (node<4) shm_array[idx]+=shm_array[idx+4];
if (node<2) shm_array[idx]+=shm_array[idx+2];
if (node<1) shm_array[idx]+=shm_array[idx+1];
__syncthreads();
return shm_array[elem*8];
#endif
}
__device__
static inline
void CalcElemFBHourglassForce(Real_t xd,Real_t yd,Real_t zd,
Real_t *hourgam,Real_t coefficient,
Real_t &hgfx, Real_t &hgfy, Real_t &hgfz)
{
hgfx=0;
for (int i=0;i<4;i++) {
Real_t h;
h=hourgam[i]*xd;
h=SumOverNodes(h);
hgfx+=hourgam[i]*h;
}
hgfx *= coefficient;
hgfy=0;
for (int i=0;i<4;i++) {
Real_t h;
h=hourgam[i]*yd;
h=SumOverNodes(h);
hgfy+=hourgam[i]*h;
}
hgfy *= coefficient;
hgfz=0;
for (int i=0;i<4;i++) {
Real_t h;
h=hourgam[i]*zd;
h=SumOverNodes(h);
hgfz+=hourgam[i]*h;
}
hgfz *= coefficient;
}
#ifdef DP_PROFILING_KERNEL3
__global__
void CalcFBHourglassForceForElems_kernel(
Real_t *determ,
Real_t *x8n, Real_t *y8n, Real_t *z8n,
Real_t *dvdx, Real_t *dvdy, Real_t *dvdz,
Real_t hourg,
Index_t numElem, Index_t *nodelist,
Real_t *ss, Real_t *elemMass,
Real_t *xd, Real_t *yd, Real_t *zd,
Real_t *fx_elem, Real_t *fy_elem, Real_t *fz_elem, int its)
#else
__global__
void CalcFBHourglassForceForElems_kernel(
Real_t *determ,
Real_t *x8n, Real_t *y8n, Real_t *z8n,
Real_t *dvdx, Real_t *dvdy, Real_t *dvdz,
Real_t hourg,
Index_t numElem, Index_t *nodelist,
Real_t *ss, Real_t *elemMass,
Real_t *xd, Real_t *yd, Real_t *zd,
Real_t *fx_elem, Real_t *fy_elem, Real_t *fz_elem)
#endif
{
/*************************************************
*
* FUNCTION: Calculates the Flanagan-Belytschko anti-hourglass
* force.
*
*************************************************/
Real_t hgfx, hgfy, hgfz;
Real_t coefficient;
Real_t hourgam[4];
Real_t xd1, yd1, zd1;
/*************************************************/
/* compute the hourglass modes */
const Real_t posf = Real_t( 1.);
const Real_t negf = Real_t(-1.);
// Assume we will launch 256 threads, which we map to 32 elements, each
// with 8 per-node threads. Organize so each warp of 32 consecutive
// threads operates on the same node of different elements.
// THESE ARE ALL GIVING ME DIFFERENT ANSWERS IN CUDA 4.0 !!?!!?!!
unsigned int tid=threadIdx.x;
unsigned int bid=blockIdx.x;
#if 0
unsigned int node=tid>>5;
unsigned int elem=bid<<5 + (tid - (node<<5));
#elif 1
unsigned int node=tid/32;
unsigned int elem=bid*32 + (tid-node*32);
#elif 0
unsigned int node=tid/32;;
unsigned int elem=bid*32 + (tid & 0x1F);
#elif 0
unsigned int node=tid/32;
unsigned int elem=bid<<5 + (tid & 0x1F);
#elif 0
unsigned int node=tid>>5;
unsigned int elem=bid*32 + (tid & 0x1F);
#else
unsigned int node=tid>>5;
unsigned int elem=bid<<5 + (tid & 0x1F);
#endif
if (elem>=numElem) elem=numElem-1; // don't return -- need thread to participate in sync operations
//if (elem<0) elem=0; // debugging test
#ifdef AFTER_KERNEL3
Real_t volinv=Real_t(1.0)/tex1Dfetch(tex_determ,elem);
#else
Real_t volinv=Real_t(1.0)/determ[elem];
#endif
#ifdef DP_PROFILING_KERNEL3
if(blockIdx.x==0 && blockIdx.y==0)
{
//determ
//number of array ele is numElem=edgeElems^3=45^3;
//The data type is real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("0 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, elem);
}
#endif
Real_t ss1, mass1, volume13 ;
Real_t xn,yn,zn,dvdxn,dvdyn,dvdzn;
Real_t hourmodx, hourmody, hourmodz;
#if 1
//#ifdef AFTER_KERNEL3
//xn=tex1Dfetch(tex_x8n,elem+numElem*node); yn=tex1Dfetch(tex_y8n,elem+numElem*node); zn=tex1Dfetch(tex_z8n,elem+numElem*node);
// dvdxn=tex1Dfetch(tex_dvdx,elem+numElem*node); dvdyn=tex1Dfetch(tex_dvdy,elem+numElem*node); dvdzn=tex1Dfetch(tex_dvdz,elem+numElem*node);
xn=x8n[elem+numElem*node]; yn=y8n[elem+numElem*node]; zn=z8n[elem+numElem*node];
dvdxn=dvdx[elem+numElem*node]; dvdyn=dvdy[elem+numElem*node]; dvdzn=dvdz[elem+numElem*node];
//#else
// xn=x8n[elem+numElem*node]; yn=y8n[elem+numElem*node]; zn=z8n[elem+numElem*node];
// dvdxn=dvdx[elem+numElem*node]; dvdyn=dvdy[elem+numElem*node]; dvdzn=dvdz[elem+numElem*node];
//#endif
#ifdef DP_PROFILING_KERNEL3
if(blockIdx.x==0 && blockIdx.y==0)
{
//x8n
//number of array ele is numElem8=numElem*8=edgeElems^3*8=45^3*8;
//The data type is Real_t;
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("1 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, elem+numElem*node);
//y8n
//number of array ele is numElem8=numElem*8=edgeElems^3*8=45^3*8;
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("2 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, elem+numElem*node);
//z8n
//number of array ele is numElem8=numElem*8=edgeElems^3*8=45^3*8;
//The data type is Real_t;
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("3 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, elem+numElem*node);
//dvdx
//number of array ele is numElem8=numElem*8=edgeElems^3*8=45^3*8;
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("4 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, elem+numElem*node);
//dvdy
//number of array ele is numElem8=numElem*8=edgeElems^3*8=45^3*8;
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("5 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, elem+numElem*node);
//dvdz
//number of array ele is numElem8=numElem*8=edgeElems^3*8=45^3*8;
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("6 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, elem+numElem*node);
}
#endif //if KERNEL3
#else
xn=yn=zn=posf; dvdxn=dvdyn=dvdzn=negf;
#endif
#if 1
#ifdef DP_PROFILING_KERNEL3
//Dong: don't work on SumOverNodes, because the data is in the share memory which seems to be
// optimal.
#endif
hourmodx=xn; hourmody=yn; hourmodz=zn;
if (node==2 || node==3 || node==4 || node==5) {
hourmodx *= negf; hourmody *= negf; hourmodz *= negf;
hourgam[0] = negf;
}
else hourgam[0] = posf;
hourmodx = SumOverNodes(hourmodx);
hourmody = SumOverNodes(hourmody);
hourmodz = SumOverNodes(hourmodz);
hourgam[0] -= volinv*(dvdxn*hourmodx + dvdyn*hourmody + dvdzn*hourmodz);
hourmodx=xn; hourmody=yn; hourmodz=zn;
if (node==1 || node==2 || node==4 || node==7) {
hourmodx *= negf; hourmody *= negf; hourmodz *= negf;
hourgam[1] = negf;
}
else hourgam[1] = posf;
hourmodx = SumOverNodes(hourmodx);
hourmody = SumOverNodes(hourmody);
hourmodz = SumOverNodes(hourmodz);
hourgam[1] -= volinv*(dvdxn*hourmodx + dvdyn*hourmody + dvdzn*hourmodz);
hourmodx=xn; hourmody=yn; hourmodz=zn;
if (node==1 || node==3 || node==5 || node==7) {
hourmodx *= negf; hourmody *= negf; hourmodz *= negf;
hourgam[2] = negf;
}
else hourgam[2] = posf;
hourmodx = SumOverNodes(hourmodx);
hourmody = SumOverNodes(hourmody);
hourmodz = SumOverNodes(hourmodz);
hourgam[2] -= volinv*(dvdxn*hourmodx + dvdyn*hourmody + dvdzn*hourmodz);
hourmodx=xn; hourmody=yn; hourmodz=zn;
if (node==0 || node==2 || node==5 || node==7) {
hourmodx *= negf; hourmody *= negf; hourmodz *= negf;
hourgam[3] = negf;
}
else hourgam[3] = posf;
hourmodx = SumOverNodes(hourmodx);
hourmody = SumOverNodes(hourmody);
hourmodz = SumOverNodes(hourmodz);
hourgam[3] -= volinv*(dvdxn*hourmodx + dvdyn*hourmody + dvdzn*hourmodz);
/* compute forces */
/* store forces into h arrays (force arrays) */
#ifdef AFTER_KERNEL3
ss1=tex1Dfetch(tex_ss,elem);
#else
ss1=ss[elem];
#endif
#ifdef DP_PROFILING_KERNEL3
if(blockIdx.x==0 && blockIdx.y==0)
{
//ss
//number of array ele is numElem=edgeElems^3=45^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("7 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, elem);
}
#endif
#ifdef AFTER_KERNEL3
mass1=tex1Dfetch(tex_elemMass,elem);
// mass1 = elemMass[elem];
#else
mass1=elemMass[elem];
#endif
#ifdef DP_PROFILING_KERNEL3
if(blockIdx.x==0 && blockIdx.y==0)
{
//elemMass
//number of array ele is numElem=edgeElems^3=45^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("8 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, elem);
}
#endif
#ifdef AFTER_KERNEL3
volume13=CBRT(tex1Dfetch(tex_determ,elem));
#else
volume13=CBRT(determ[elem]);
#endif
#ifdef DP_PROFILING_KERNEL3
if(blockIdx.x==0 && blockIdx.y==0)
{
//determ
//number of array ele is numElem=edgeElems^3=45^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("0 0 1 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, elem);
}
#endif
//#ifdef AFTER_KERNEL3
// Index_t ni = tex1Dfetch(tex3_nodelist,elem+numElem*node);
//#else
Index_t ni = nodelist[elem+numElem*node];
//#endif
#ifdef DP_PROFILING_KERNEL3
if(blockIdx.x==0 && blockIdx.y==0)
{
//nodelist
//The number of nodelist ele is 8*mesh.numElem=8*(numElem^3)=8*45^3 (note: 8 is not data type)
//The data type is Index_t (i.e., int)
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("9 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, elem+numElem*node);
}
#endif
#ifdef AFTER_KERNEL3
xd1=tex1Dfetch(tex_xd,ni); yd1=tex1Dfetch(tex_yd,ni); zd1=tex1Dfetch(tex_zd,ni);
//xd1=xd[ni]; yd1=yd[ni]; zd1=zd[ni];
#else
xd1=xd[ni]; yd1=yd[ni]; zd1=zd[ni];
#endif
#ifdef DP_PROFILING_KERNEL3
if(blockIdx.x==0 && blockIdx.y==0)
{
//xd
//number of xd ele is mesh.numNode=edgeNodes*edgeNodes*edgeNodes=46^3;
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("10 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, ni);
//yd
//number of yd ele is mesh.numNode=edgeNodes*edgeNodes*edgeNodes=46^3;
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("11 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, ni);
//zd
//number of zd ele is mesh.numNode=edgeNodes*edgeNodes*edgeNodes=46^3;
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("12 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, ni);
}
#endif
coefficient = - hourg * Real_t(0.01) * ss1 * mass1 / volume13;
CalcElemFBHourglassForce(xd1,yd1,zd1,hourgam,coefficient,hgfx,hgfy,hgfz);
#ifdef DP_PROFILING_KERNEL3
//Dong: don't work on the above call, because the data is in the share memory which seems to be
// optimal.
#endif
#else
hgfx=xn+dvdxn; hgfy=yn+dvdyn; hgfz=zn+dvdzn;
#endif
#if 1
fx_elem[elem+numElem*node]=hgfx; fy_elem[elem+numElem*node]=hgfy; fz_elem[elem+numElem*node]=hgfz;
#ifdef DP_PROFILING_KERNEL3
if(blockIdx.x==0 && blockIdx.y==0)
{
//fx_elem
//number of fx_elem ele is numElem*8=edgeElems^3*8=45^3*8
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("13 1 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, elem+numElem*node);
//fy_elem
//number of fy_elem ele is numElem*8=edgeElems^3*8=45^3*8
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("14 1 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, elem+numElem*node);
//fz_elem
//number of fz_elem ele is numElem*8=edgeElems^3*8=45^3*8
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("15 1 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, elem+numElem*node);
}
#endif
#else
fx_elem[0]=hgfx; fy_elem[0]=hgfy; fz_elem[0]=hgfz;
#endif
}
static inline
void CalcFBHourglassForceForElems_cpu(Real_t *determ,
Real_t *x8n, Real_t *y8n, Real_t *z8n,
Real_t *dvdx, Real_t *dvdy, Real_t *dvdz,
Real_t hourg)
{
/*************************************************
*
* FUNCTION: Calculates the Flanagan-Belytschko anti-hourglass
* force.
*
*************************************************/
Index_t numElem = mesh.numElem() ;
Real_t hgfx[8], hgfy[8], hgfz[8] ;
Real_t coefficient;
Real_t gamma[4][8];
Real_t hourgam0[4], hourgam1[4], hourgam2[4], hourgam3[4] ;
Real_t hourgam4[4], hourgam5[4], hourgam6[4], hourgam7[4];
Real_t xd1[8], yd1[8], zd1[8] ;
gamma[0][0] = Real_t( 1.);
gamma[0][1] = Real_t( 1.);
gamma[0][2] = Real_t(-1.);
gamma[0][3] = Real_t(-1.);
gamma[0][4] = Real_t(-1.);
gamma[0][5] = Real_t(-1.);
gamma[0][6] = Real_t( 1.);
gamma[0][7] = Real_t( 1.);
gamma[1][0] = Real_t( 1.);
gamma[1][1] = Real_t(-1.);
gamma[1][2] = Real_t(-1.);
gamma[1][3] = Real_t( 1.);
gamma[1][4] = Real_t(-1.);
gamma[1][5] = Real_t( 1.);
gamma[1][6] = Real_t( 1.);
gamma[1][7] = Real_t(-1.);
gamma[2][0] = Real_t( 1.);
gamma[2][1] = Real_t(-1.);
gamma[2][2] = Real_t( 1.);
gamma[2][3] = Real_t(-1.);
gamma[2][4] = Real_t( 1.);
gamma[2][5] = Real_t(-1.);
gamma[2][6] = Real_t( 1.);
gamma[2][7] = Real_t(-1.);
gamma[3][0] = Real_t(-1.);
gamma[3][1] = Real_t( 1.);
gamma[3][2] = Real_t(-1.);
gamma[3][3] = Real_t( 1.);
gamma[3][4] = Real_t( 1.);
gamma[3][5] = Real_t(-1.);
gamma[3][6] = Real_t( 1.);
gamma[3][7] = Real_t(-1.);
/*************************************************/
/* compute the hourglass modes */
for(Index_t i2=0;i2<numElem;++i2){
Index_t i3=8*i2;
Real_t volinv=Real_t(1.0)/determ[i2];
Real_t ss1, mass1, volume13 ;
for(Index_t i1=0;i1<4;++i1){
Real_t hourmodx =
x8n[i3] * gamma[i1][0] + x8n[i3+1] * gamma[i1][1] +
x8n[i3+2] * gamma[i1][2] + x8n[i3+3] * gamma[i1][3] +
x8n[i3+4] * gamma[i1][4] + x8n[i3+5] * gamma[i1][5] +
x8n[i3+6] * gamma[i1][6] + x8n[i3+7] * gamma[i1][7];
Real_t hourmody =
y8n[i3] * gamma[i1][0] + y8n[i3+1] * gamma[i1][1] +
y8n[i3+2] * gamma[i1][2] + y8n[i3+3] * gamma[i1][3] +
y8n[i3+4] * gamma[i1][4] + y8n[i3+5] * gamma[i1][5] +
y8n[i3+6] * gamma[i1][6] + y8n[i3+7] * gamma[i1][7];
Real_t hourmodz =
z8n[i3] * gamma[i1][0] + z8n[i3+1] * gamma[i1][1] +
z8n[i3+2] * gamma[i1][2] + z8n[i3+3] * gamma[i1][3] +
z8n[i3+4] * gamma[i1][4] + z8n[i3+5] * gamma[i1][5] +
z8n[i3+6] * gamma[i1][6] + z8n[i3+7] * gamma[i1][7];
hourgam0[i1] = gamma[i1][0] - volinv*(dvdx[i3 ] * hourmodx +
dvdy[i3 ] * hourmody +
dvdz[i3 ] * hourmodz );
hourgam1[i1] = gamma[i1][1] - volinv*(dvdx[i3+1] * hourmodx +
dvdy[i3+1] * hourmody +
dvdz[i3+1] * hourmodz );
hourgam2[i1] = gamma[i1][2] - volinv*(dvdx[i3+2] * hourmodx +
dvdy[i3+2] * hourmody +
dvdz[i3+2] * hourmodz );
hourgam3[i1] = gamma[i1][3] - volinv*(dvdx[i3+3] * hourmodx +
dvdy[i3+3] * hourmody +
dvdz[i3+3] * hourmodz );
hourgam4[i1] = gamma[i1][4] - volinv*(dvdx[i3+4] * hourmodx +
dvdy[i3+4] * hourmody +
dvdz[i3+4] * hourmodz );
hourgam5[i1] = gamma[i1][5] - volinv*(dvdx[i3+5] * hourmodx +
dvdy[i3+5] * hourmody +
dvdz[i3+5] * hourmodz );
hourgam6[i1] = gamma[i1][6] - volinv*(dvdx[i3+6] * hourmodx +
dvdy[i3+6] * hourmody +
dvdz[i3+6] * hourmodz );
hourgam7[i1] = gamma[i1][7] - volinv*(dvdx[i3+7] * hourmodx +
dvdy[i3+7] * hourmody +
dvdz[i3+7] * hourmodz );
}
/* compute forces */
/* store forces into h arrays (force arrays) */
ss1=mesh.ss(i2);
mass1=mesh.elemMass(i2);
volume13=CBRT(determ[i2]);
Index_t n0si2 = mesh.nodelist(i2,0);
Index_t n1si2 = mesh.nodelist(i2,1);
Index_t n2si2 = mesh.nodelist(i2,2);
Index_t n3si2 = mesh.nodelist(i2,3);
Index_t n4si2 = mesh.nodelist(i2,4);
Index_t n5si2 = mesh.nodelist(i2,5);
Index_t n6si2 = mesh.nodelist(i2,6);
Index_t n7si2 = mesh.nodelist(i2,7);
xd1[0] = mesh.xd(n0si2);
xd1[1] = mesh.xd(n1si2);
xd1[2] = mesh.xd(n2si2);
xd1[3] = mesh.xd(n3si2);
xd1[4] = mesh.xd(n4si2);
xd1[5] = mesh.xd(n5si2);
xd1[6] = mesh.xd(n6si2);
xd1[7] = mesh.xd(n7si2);
yd1[0] = mesh.yd(n0si2);
yd1[1] = mesh.yd(n1si2);
yd1[2] = mesh.yd(n2si2);
yd1[3] = mesh.yd(n3si2);
yd1[4] = mesh.yd(n4si2);
yd1[5] = mesh.yd(n5si2);
yd1[6] = mesh.yd(n6si2);
yd1[7] = mesh.yd(n7si2);
zd1[0] = mesh.zd(n0si2);
zd1[1] = mesh.zd(n1si2);
zd1[2] = mesh.zd(n2si2);
zd1[3] = mesh.zd(n3si2);
zd1[4] = mesh.zd(n4si2);
zd1[5] = mesh.zd(n5si2);
zd1[6] = mesh.zd(n6si2);
zd1[7] = mesh.zd(n7si2);
coefficient = - hourg * Real_t(0.01) * ss1 * mass1 / volume13;
CalcElemFBHourglassForce(xd1,yd1,zd1,
hourgam0,hourgam1,hourgam2,hourgam3,
hourgam4,hourgam5,hourgam6,hourgam7,
coefficient, hgfx, hgfy, hgfz);
mesh.fx(n0si2) += hgfx[0];
mesh.fy(n0si2) += hgfy[0];
mesh.fz(n0si2) += hgfz[0];
mesh.fx(n1si2) += hgfx[1];
mesh.fy(n1si2) += hgfy[1];
mesh.fz(n1si2) += hgfz[1];
mesh.fx(n2si2) += hgfx[2];
mesh.fy(n2si2) += hgfy[2];
mesh.fz(n2si2) += hgfz[2];
mesh.fx(n3si2) += hgfx[3];
mesh.fy(n3si2) += hgfy[3];
mesh.fz(n3si2) += hgfz[3];
mesh.fx(n4si2) += hgfx[4];
mesh.fy(n4si2) += hgfy[4];
mesh.fz(n4si2) += hgfz[4];
mesh.fx(n5si2) += hgfx[5];
mesh.fy(n5si2) += hgfy[5];
mesh.fz(n5si2) += hgfz[5];
mesh.fx(n6si2) += hgfx[6];
mesh.fy(n6si2) += hgfy[6];
mesh.fz(n6si2) += hgfz[6];
mesh.fx(n7si2) += hgfx[7];
mesh.fy(n7si2) += hgfy[7];
mesh.fz(n7si2) += hgfz[7];
}
}
static inline
void CalcFBHourglassForceForElems_gpu(Real_t *determ,
Real_t *x8n, Real_t *y8n, Real_t *z8n,
Real_t *dvdx, Real_t *dvdy, Real_t *dvdz,
Real_t hourg)
{
Index_t numElem = mesh.numElem();
Real_t *fx_elem,*fy_elem,*fz_elem;
CUDA( cudaMalloc(&fx_elem,numElem*8*sizeof(Real_t)) );
CUDA( cudaMalloc(&fy_elem,numElem*8*sizeof(Real_t)) );
CUDA( cudaMalloc(&fz_elem,numElem*8*sizeof(Real_t)) );
dim3 dimBlock=dim3(256,1,1);
dim3 dimGrid=dim3(PAD_DIV(numElem*8,dimBlock.x),1,1);
#ifdef AFTER_KERNEL3
cudaBindTexture(0,tex_determ,determ,numElem*sizeof(Real_t));
cudaBindTexture(0,tex_x8n,x8n,8*numElem*sizeof(Real_t));
cudaBindTexture(0,tex_y8n,y8n,8*numElem*sizeof(Real_t));
cudaBindTexture(0,tex_z8n,z8n,8*numElem*sizeof(Real_t));
cudaBindTexture(0,tex_dvdx,dvdx,8*numElem*sizeof(Real_t));
cudaBindTexture(0,tex_dvdy,dvdy,8*numElem*sizeof(Real_t));
cudaBindTexture(0,tex_dvdz,dvdz,8*numElem*sizeof(Real_t));
cudaBindTexture(0,tex3_nodelist,meshGPU.m_nodelist,8*mesh.numNode()*sizeof(int));
cudaBindTexture(0,tex_ss,meshGPU.m_ss,mesh.numElem()*sizeof(Real_t));
cudaBindTexture(0,tex_elemMass,meshGPU.m_elemMass,mesh.numElem()*sizeof(Real_t));
cudaBindTexture(0,tex_xd,meshGPU.m_xd,mesh.numNode()*sizeof(Real_t));
cudaBindTexture(0,tex_yd,meshGPU.m_yd,mesh.numNode()*sizeof(Real_t));
cudaBindTexture(0,tex_zd,meshGPU.m_zd,mesh.numNode()*sizeof(Real_t));
#endif
#ifdef DP_PROFILING_KERNEL3
printf("Kernel3: dimGrid: %d %d %d. dimBlock: %d %d %d\n",
dimGrid.x, dimGrid.y, dimGrid.z, dimBlock.x, dimBlock.y, dimBlock.z);
printf("Kernel3: numNode=%d, numElem=%d\n", mesh.numNode(), numElem);
CalcFBHourglassForceForElems_kernel<<<dimGrid,dimBlock>>>(
determ,x8n,y8n,z8n,dvdx,dvdy,dvdz,hourg,
numElem,meshGPU.m_nodelist,
meshGPU.m_ss,meshGPU.m_elemMass,
meshGPU.m_xd,meshGPU.m_yd,meshGPU.m_zd,
fx_elem,fy_elem,fz_elem, its);
cudaPrintfDisplay(stdout, false);
#else
cudaEvent_t k3_start, k3_stop;
cudaEventCreate(&k3_start);
cudaEventCreate(&k3_stop);
cudaEventRecord(k3_start,0);
CalcFBHourglassForceForElems_kernel<<<dimGrid,dimBlock>>>(
determ,x8n,y8n,z8n,dvdx,dvdy,dvdz,hourg,
numElem,meshGPU.m_nodelist,
meshGPU.m_ss,meshGPU.m_elemMass,
meshGPU.m_xd,meshGPU.m_yd,meshGPU.m_zd,
fx_elem,fy_elem,fz_elem);
cudaDeviceSynchronize();
cudaEventRecord(k3_stop, 0);
cudaEventSynchronize(k3_stop);
float k3_time=0.0;
cudaEventElapsedTime(&k3_time, k3_start, k3_stop);
printf("Kernel 3 time = %f\n",k3_time);
k3+=k3_time;
#endif
CUDA_DEBUGSYNC;
dimGrid=dim3(PAD_DIV(mesh.numNode(),dimBlock.x),1,1);
#ifdef AFTER_KERNEL2
cudaBindTexture(0,tex_nodeElemCount,meshGPU.m_nodeElemCount,mesh.numNode()*sizeof(int));
cudaBindTexture(0,tex_nodeElemCornerList,meshGPU.m_nodeElemCornerList,8*mesh.numNode()*sizeof(int));
cudaBindTexture(0,tex_fx_elem,fx_elem,8*mesh.numElem()*sizeof(float));
cudaBindTexture(0,tex_fy_elem,fy_elem,8*mesh.numElem()*sizeof(float));
cudaBindTexture(0,tex_fz_elem,fz_elem,8*mesh.numElem()*sizeof(float));
#endif
#ifdef DP_PROFILING_KERNEL2
printf("Kernel2: dimGrid: %d %d %d. dimBlock: %d %d %d\n",
dimGrid.x, dimGrid.y, dimGrid.z, dimBlock.x, dimBlock.y, dimBlock.z);
printf("Kernel2: numNode=%d, numElem=%d\n", mesh.numNode(), numElem);
AddNodeForcesFromElems2_kernel<<<dimGrid,dimBlock>>>
(mesh.numNode(),meshGPU.m_nodeElemCount,meshGPU.m_nodeElemCornerList,
fx_elem,fy_elem,fz_elem,meshGPU.m_fx,meshGPU.m_fy,meshGPU.m_fz, its);
cudaPrintfDisplay(stdout, false);
#else
cudaEvent_t k2_start, k2_stop;
cudaEventCreate(&k2_start);
cudaEventCreate(&k2_stop);
cudaEventRecord(k2_start,0);
AddNodeForcesFromElems2_kernel<<<dimGrid,dimBlock>>>
(mesh.numNode(),meshGPU.m_nodeElemCount,meshGPU.m_nodeElemCornerList,
fx_elem,fy_elem,fz_elem,meshGPU.m_fx,meshGPU.m_fy,meshGPU.m_fz);
cudaDeviceSynchronize();
cudaEventRecord(k2_stop, 0);
cudaEventSynchronize(k2_stop);
float k2_time=0.0;
cudaEventElapsedTime(&k2_time, k2_start, k2_stop);
printf("Kernel 2 time = %f\n",k2_time);
k2+=k2_time;
#endif
CUDA_DEBUGSYNC;
CUDA( cudaFree(fx_elem) );
CUDA( cudaFree(fy_elem) );
CUDA( cudaFree(fz_elem) );
}
#ifdef DP_PROFILING_KERNEL4
__global__
void CalcHourglassControlForElems_kernel(Int_t numElem,Index_t *nodelist,
Real_t *x,Real_t *y,Real_t *z,
Real_t *determ,Real_t *volo,Real_t *v,
Real_t *dvdx,Real_t *dvdy,Real_t *dvdz,
Real_t *x8n,Real_t *y8n,Real_t *z8n, int its)
#else
__global__
void CalcHourglassControlForElems_kernel(Int_t numElem,Index_t *nodelist,
Real_t *x,Real_t *y,Real_t *z,
Real_t *determ,Real_t *volo,Real_t *v,
Real_t *dvdx,Real_t *dvdy,Real_t *dvdz,
Real_t *x8n,Real_t *y8n,Real_t *z8n)
#endif
{
Real_t x1,y1,z1;
Real_t pfx,pfy,pfz;
// THESE ARE ALL GIVING ME DIFFERENT ANSWERS IN CUDA 4.0 !!?!!?!!
unsigned int tid=threadIdx.x;
unsigned int bid=blockIdx.x;
#if 0
unsigned int node=tid>>5;
unsigned int elem=bid<<5 + (tid - (node<<5));
#elif 1
unsigned int node=tid/32;
unsigned int elem=bid*32 + (tid-node*32);
#elif 0
unsigned int node=tid/32;;
unsigned int elem=bid*32 + (tid & 0x1F);
#elif 0
unsigned int node=tid/32;
unsigned int elem=bid<<5 + (tid & 0x1F);
#elif 0
unsigned int node=tid>>5;
unsigned int elem=bid*32 + (tid & 0x1F);
#else
unsigned int node=tid>>5;
unsigned int elem=bid<<5 + (tid & 0x1F);
#endif
if (elem>=numElem) elem=numElem-1; // don't return -- need thread to participate in sync operations
Index_t idx=elem+numElem*node;
#ifdef AFTER_KERNEL4
Index_t ni = tex1Dfetch(tex_nodelist,idx);
#else
Index_t ni = nodelist[idx];
#endif
#ifdef DP_PROFILING_KERNEL4
if(blockIdx.x==0 && blockIdx.y==0)
{
//nodelist
//number of nodelist ele is size*8=mesh.numElem=(numElem*numElem*numElem)*8=45^3*8;
//Th data type is Index_t (i.e., int)
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("0 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, idx);
}
#endif
#ifdef AFTER_KERNEL4
x1=tex1Dfetch(tex_x,ni); y1=tex1Dfetch(tex_y,ni); z1=tex1Dfetch(tex_z,ni);
#else
x1=x[ni]; y1=y[ni]; z1=z[ni];
#endif
#ifdef DP_PROFILING_KERNEL4
if(blockIdx.x==0 && blockIdx.y==0)
{
//x
//number of nodelist ele is edgeNodes^3=46^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("1 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, ni);
//y
//number of nodelist ele is edgeNodes^3=46^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("2 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, ni);
//z
//number of nodelist ele is edgeNodes^3=46^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("3 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, ni);
}
#endif
CalcElemVolumeDerivative(pfx, pfy, pfz, x1, y1, z1, node);
#ifdef DP_PROFILING_KERNEL4
//Dong: don't optimize the above; the major data is already in shared mem
#endif
/* load into temporary storage for FB Hour Glass control */
dvdx[idx] = pfx;
dvdy[idx] = pfy;
dvdz[idx] = pfz;
x8n[idx] = x1;
y8n[idx] = y1;
z8n[idx] = z1;
#ifdef DP_PROFILING_KERNEL4
if(blockIdx.x==0 && blockIdx.y==0)
{
//dvdx
//number of dvdx ele is numElem8=numElem*8=edgeElems^3*8=45^3*8
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("4 1 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, idx);
//dvdy
//number of dvdy ele is numElem8=numElem*8=edgeElems^3*8=45^3*8
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("5 1 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, idx);
//dvdz
//number of dvdz ele is numElem8=numElem*8=edgeElems^3*8=45^3*8
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("6 1 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, idx);
//x8n
//number of array ele is numElem8=numElem*8=edgeElems^3*8=45^3*8;
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("7 1 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, idx);
//y8n
//number of array ele is numElem8=numElem*8=edgeElems^3*8=45^3*8;
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("8 1 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, idx);
//z8n
//number of array ele is numElem8=numElem*8=edgeElems^3*8=45^3*8;
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("9 1 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, idx);
}
#endif
#ifdef AFTER_KERNEL4
determ[elem] = tex1Dfetch(tex_volo,elem) * tex1Dfetch(tex_v,elem);
#else
//if (node==0)
determ[elem] = volo[elem] * v[elem];
#endif
#ifdef DP_PROFILING_KERNEL4
if(blockIdx.x==0 && blockIdx.y==0)
{
//determ
//number of array ele is numElem=edgeElems^3=45^3;
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("10 1 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, elem);
//volo
//number of array ele is numElem=edgeElems^3=45^3;
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("11 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, elem);
//v
//number of array ele is numElem=edgeElems^3=45^3
//The data ype is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("12 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, elem);
}
#endif
#if 0 // JDC
/* Do a check for negative volumes */
if ( mesh.v(i) <= Real_t(0.0) ) {
exit(VolumeError) ;
}
#endif
}
static inline
void CalcHourglassControlForElems_gpu(Real_t determ[], Real_t hgcoef)
{
Index_t numElem = mesh.numElem() ;
Index_t numElem8 = numElem * 8 ;
Real_t *dvdx,*dvdy,*dvdz;
Real_t *x8n,*y8n,*z8n;
CUDA( cudaMalloc(&dvdx,sizeof(Real_t)*numElem8) );
CUDA( cudaMalloc(&dvdy,sizeof(Real_t)*numElem8) );
CUDA( cudaMalloc(&dvdz,sizeof(Real_t)*numElem8) );
CUDA( cudaMalloc(&x8n,sizeof(Real_t)*numElem8) );
CUDA( cudaMalloc(&y8n,sizeof(Real_t)*numElem8) );
CUDA( cudaMalloc(&z8n,sizeof(Real_t)*numElem8) );
dim3 dimBlock=dim3(256,1,1);
dim3 dimGrid=dim3(PAD_DIV(numElem*8,dimBlock.x),1,1);
#ifdef AFTER_KERNEL4
cudaBindTexture(0,tex_nodelist,meshGPU.m_nodelist,8*mesh.numNode()*sizeof(int));
cudaBindTexture(0,tex_x,meshGPU.m_x,mesh.numNode()*sizeof(float));
cudaBindTexture(0,tex_y,meshGPU.m_y,mesh.numNode()*sizeof(float));
cudaBindTexture(0,tex_z,meshGPU.m_z,mesh.numNode()*sizeof(float));
cudaBindTexture(0,tex_volo,meshGPU.m_volo,mesh.numElem()*sizeof(float));
cudaBindTexture(0,tex_v,meshGPU.m_v,mesh.numElem()*sizeof(float));
#endif
#ifdef DP_PROFILING_KERNEL4
printf("Kernel4: dimGrid: %d %d %d. dimBlock: %d %d %d\n",
dimGrid.x, dimGrid.y, dimGrid.z, dimBlock.x, dimBlock.y, dimBlock.z);
printf("Kernel4: numNode=%d, numElem=%d\n", mesh.numNode(), numElem);
CalcHourglassControlForElems_kernel<<<dimGrid,dimBlock>>>
(numElem, meshGPU.m_nodelist,
meshGPU.m_x,meshGPU.m_y,meshGPU.m_z,
determ,meshGPU.m_volo,meshGPU.m_v,
dvdx,dvdy,dvdz,x8n,y8n,z8n, its);
cudaPrintfDisplay(stdout, false);
#else
cudaEvent_t k4_start, k4_stop;
cudaEventCreate(&k4_start);
cudaEventCreate(&k4_stop);
cudaEventRecord(k4_start,0);
CalcHourglassControlForElems_kernel<<<dimGrid,dimBlock>>>
(numElem, meshGPU.m_nodelist,
meshGPU.m_x,meshGPU.m_y,meshGPU.m_z,
determ,meshGPU.m_volo,meshGPU.m_v,
dvdx,dvdy,dvdz,x8n,y8n,z8n);
cudaDeviceSynchronize();
cudaEventRecord(k4_stop, 0);
cudaEventSynchronize(k4_stop);
float k4_time=0.0;
cudaEventElapsedTime(&k4_time, k4_start, k4_stop);
printf("Kernel 4 time = %f\n",k4_time);
k4+=k4_time;
#endif
CUDA_DEBUGSYNC;
// JDC -- need a reduction to check for negative volumes
if ( hgcoef > Real_t(0.) ) {
CalcFBHourglassForceForElems_gpu(determ,x8n,y8n,z8n,dvdx,dvdy,dvdz,hgcoef) ;
}
CUDA( cudaFree(dvdx) );
CUDA( cudaFree(dvdy) );
CUDA( cudaFree(dvdz) );
CUDA( cudaFree(x8n) );
CUDA( cudaFree(y8n) );
CUDA( cudaFree(z8n) );
return ;
}
static inline
void CalcHourglassControlForElems_cpu(Real_t determ[], Real_t hgcoef)
{
Index_t i, ii, jj ;
Real_t x1[8], y1[8], z1[8] ;
Real_t pfx[8], pfy[8], pfz[8] ;
Index_t numElem = mesh.numElem() ;
Index_t numElem8 = numElem * 8 ;
Real_t *dvdx = Allocate<Real_t>(numElem8) ;
Real_t *dvdy = Allocate<Real_t>(numElem8) ;
Real_t *dvdz = Allocate<Real_t>(numElem8) ;
Real_t *x8n = Allocate<Real_t>(numElem8) ;
Real_t *y8n = Allocate<Real_t>(numElem8) ;
Real_t *z8n = Allocate<Real_t>(numElem8) ;
/* start loop over elements */
for (i=0 ; i<numElem ; ++i){
CollectDomainNodesToElemNodes(i, x1, y1, z1);
CalcElemVolumeDerivative(pfx, pfy, pfz, x1, y1, z1);
/* load into temporary storage for FB Hour Glass control */
for(ii=0;ii<8;++ii){
jj=8*i+ii;
dvdx[jj] = pfx[ii];
dvdy[jj] = pfy[ii];
dvdz[jj] = pfz[ii];
x8n[jj] = x1[ii];
y8n[jj] = y1[ii];
z8n[jj] = z1[ii];
}
determ[i] = mesh.volo(i) * mesh.v(i);
/* Do a check for negative volumes */
if ( mesh.v(i) <= Real_t(0.0) ) {
exit(VolumeError) ;
}
}
if ( hgcoef > Real_t(0.) ) {
CalcFBHourglassForceForElems_cpu(determ,x8n,y8n,z8n,dvdx,dvdy,dvdz,hgcoef) ;
}
Release(&z8n) ;
Release(&y8n) ;
Release(&x8n) ;
Release(&dvdz) ;
Release(&dvdy) ;
Release(&dvdx) ;
return ;
}
static inline
void CalcHourglassControlForElems(Real_t determ[], Real_t hgcoef, int useCPU)
{
if (useCPU) {
FC(x); FC(y); FC(z); FC(xd); FC(yd); FC(zd);
FC(nodelist); FC(ss); FC(elemMass);
FC(xd); FC(yd); FC(zd);
FC(fx); FC(fy); FC(fz);
CalcHourglassControlForElems_cpu(determ,hgcoef);
SG(fx); SG(fy); SG(fz);
}
else {
FG(x); FG(y); FG(z); FG(xd); FG(yd); FG(zd);
FG(nodelist); FG(ss); FG(elemMass);
FG(xd); FG(yd); FG(zd);
FG(fx); FG(fy); FG(fz);
CalcHourglassControlForElems_gpu(determ,hgcoef);
SC(fx); SC(fy); SC(fz);
}
}
static inline
void CalcVolumeForceForElems_gpu()
{
Index_t numElem = mesh.numElem() ;
if (numElem != 0) {
Real_t hgcoef = mesh.hgcoef() ;
Real_t *sigxx, *sigyy, *sigzz, *determ;
int badvol;
CUDA( cudaMalloc(&sigxx,numElem*sizeof(Real_t)) );
CUDA( cudaMalloc(&sigyy,numElem*sizeof(Real_t)) );
CUDA( cudaMalloc(&sigzz,numElem*sizeof(Real_t)) );
CUDA( cudaMalloc(&determ,numElem*sizeof(Real_t)) );
/* Sum contributions to total stress tensor */
InitStressTermsForElems(numElem, sigxx, sigyy, sigzz, 0);
// call elemlib stress integration loop to produce nodal forces from
// material stresses.
IntegrateStressForElems( numElem, sigxx, sigyy, sigzz, determ, badvol, 0) ;
CUDA( cudaFree(sigxx) );
CUDA( cudaFree(sigyy) );
CUDA( cudaFree(sigzz) );
// check for negative element volume
if (badvol) exit(VolumeError) ;
CalcHourglassControlForElems(determ, hgcoef, 0) ;
CUDA( cudaFree(determ) );
}
}
static inline
void CalcVolumeForceForElems_cpu()
{
Index_t numElem = mesh.numElem() ;
if (numElem != 0) {
Real_t hgcoef = mesh.hgcoef() ;
Real_t *sigxx = Allocate<Real_t>(numElem) ;
Real_t *sigyy = Allocate<Real_t>(numElem) ;
Real_t *sigzz = Allocate<Real_t>(numElem) ;
Real_t *determ = Allocate<Real_t>(numElem) ;
int badvol;
/* Sum contributions to total stress tensor */
InitStressTermsForElems(numElem, sigxx, sigyy, sigzz, 1);
// call elemlib stress integration loop to produce nodal forces from
// material stresses.
IntegrateStressForElems( numElem, sigxx, sigyy, sigzz, determ, badvol, 1) ;
Release(&sigzz) ;
Release(&sigyy) ;
Release(&sigxx) ;
// check for negative element volume
if (badvol) exit(VolumeError);
#if 0
for ( Index_t k=0 ; k<numElem ; ++k ) {
if (determ[k] <= Real_t(0.0)) {
exit(VolumeError) ;
}
}
#endif
CalcHourglassControlForElems(determ, hgcoef, 1) ;
Release(&determ) ;
}
}
static inline void CalcForceForNodes_gpu()
{
/* Calcforce calls partial, force, hourq */
CalcVolumeForceForElems_gpu() ;
/* Calculate Nodal Forces at domain boundaries */
/* problem->commSBN->Transfer(CommSBN::forces); */
}
static inline void CalcForceForNodes_cpu()
{
Index_t numNode = mesh.numNode() ;
for (Index_t i=0; i<numNode; ++i) {
mesh.fx(i) = Real_t(0.0) ;
mesh.fy(i) = Real_t(0.0) ;
mesh.fz(i) = Real_t(0.0) ;
}
/* Calcforce calls partial, force, hourq */
CalcVolumeForceForElems_cpu() ;
/* Calculate Nodal Forces at domain boundaries */
/* problem->commSBN->Transfer(CommSBN::forces); */
}
static inline void CalcForceForNodes(int useCPU)
{
if (useCPU) {
CalcForceForNodes_cpu();
}
else {
CalcForceForNodes_gpu();
}
}
__global__
void CalcAccelerationForNodes_kernel(int numNode,
Real_t *xdd, Real_t *ydd, Real_t *zdd,
Real_t *fx, Real_t *fy, Real_t *fz,
Real_t *nodalMass)
{
int i=blockDim.x*blockIdx.x + threadIdx.x;
if (i<numNode) {
xdd[i]=fx[i]/nodalMass[i];
ydd[i]=fy[i]/nodalMass[i];
zdd[i]=fz[i]/nodalMass[i];
}
}
static inline
void CalcAccelerationForNodes_gpu()
{
dim3 dimBlock = dim3(BLOCKSIZE,1,1);
dim3 dimGrid = dim3(PAD_DIV(mesh.numNode(),dimBlock.x),1,1);
CalcAccelerationForNodes_kernel<<<dimGrid, dimBlock>>>
(mesh.numNode(),
meshGPU.m_xdd,meshGPU.m_ydd,meshGPU.m_zdd,
meshGPU.m_fx,meshGPU.m_fy,meshGPU.m_fz,
meshGPU.m_nodalMass);
CUDA_DEBUGSYNC;
}
static inline
void CalcAccelerationForNodes_cpu()
{
Index_t numNode = mesh.numNode() ;
for (Index_t i = 0; i < numNode; ++i) {
mesh.xdd(i) = mesh.fx(i) / mesh.nodalMass(i);
mesh.ydd(i) = mesh.fy(i) / mesh.nodalMass(i);
mesh.zdd(i) = mesh.fz(i) / mesh.nodalMass(i);
}
}
static inline
void CalcAccelerationForNodes(int useCPU)
{
if (useCPU) {
FC(fx); FC(fy); FC(fz); FC(nodalMass);
CalcAccelerationForNodes_cpu();
SG(xdd); SG(ydd); SG(zdd);
}
else {
FG(fx); FG(fy); FG(fz); FG(nodalMass);
CalcAccelerationForNodes_gpu();
SC(xdd); SC(ydd); SC(zdd);
}
}
__global__
void ApplyAccelerationBoundaryConditionsForNodes_kernel(
int numNodeBC, Real_t *xdd, Real_t *ydd, Real_t *zdd,
Index_t *symmX, Index_t *symmY, Index_t *symmZ)
{
int i=blockDim.x*blockIdx.x + threadIdx.x;
if (i<numNodeBC) {
xdd[symmX[i]] = Real_t(0.0) ;
ydd[symmY[i]] = Real_t(0.0) ;
zdd[symmZ[i]] = Real_t(0.0) ;
}
}
static inline
void ApplyAccelerationBoundaryConditionsForNodes_gpu()
{
Index_t numNodeBC = (mesh.sizeX()+1)*(mesh.sizeX()+1) ;
dim3 dimBlock(BLOCKSIZE,1,1);
dim3 dimGrid(PAD_DIV(numNodeBC,dimBlock.x),1,1);
ApplyAccelerationBoundaryConditionsForNodes_kernel<<<dimGrid, dimBlock>>>
(numNodeBC,
meshGPU.m_xdd,meshGPU.m_ydd,meshGPU.m_zdd,
meshGPU.m_symmX,meshGPU.m_symmY,meshGPU.m_symmZ);
CUDA_DEBUGSYNC;
}
static inline
void ApplyAccelerationBoundaryConditionsForNodes_cpu()
{
Index_t numNodeBC = (mesh.sizeX()+1)*(mesh.sizeX()+1) ;
for(Index_t i=0 ; i<numNodeBC ; ++i)
mesh.xdd(mesh.symmX(i)) = Real_t(0.0) ;
for(Index_t i=0 ; i<numNodeBC ; ++i)
mesh.ydd(mesh.symmY(i)) = Real_t(0.0) ;
for(Index_t i=0 ; i<numNodeBC ; ++i)
mesh.zdd(mesh.symmZ(i)) = Real_t(0.0) ;
}
static inline
void ApplyAccelerationBoundaryConditionsForNodes(int useCPU)
{
if (useCPU) {
FC(xdd); FC(ydd); FC(zdd); FC(symmX); FC(symmY); FC(symmZ);
ApplyAccelerationBoundaryConditionsForNodes_cpu();
SG(xdd); SG(ydd); SG(zdd);
}
else {
FG(xdd); FG(ydd); FG(zdd); FG(symmX); FG(symmY); FG(symmZ);
ApplyAccelerationBoundaryConditionsForNodes_gpu();
SC(xdd); SC(ydd); SC(zdd);
}
}
__global__
void CalcVelocityForNodes_kernel(int numNode, const Real_t dt, const Real_t u_cut,
Real_t *xd, Real_t *yd, Real_t *zd,
Real_t *xdd, Real_t *ydd, Real_t *zdd)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i<numNode) {
Real_t xdtmp, ydtmp, zdtmp ;
xdtmp = xd[i] + xdd[i] * dt ;
if( FABS(xdtmp) < u_cut ) xdtmp = 0.0;//Real_t(0.0);
xd[i] = xdtmp ;
ydtmp = yd[i] + ydd[i] * dt ;
if( FABS(ydtmp) < u_cut ) ydtmp = Real_t(0.0);
yd[i] = ydtmp ;
zdtmp = zd[i] + zdd[i] * dt ;
if( FABS(zdtmp) < u_cut ) zdtmp = Real_t(0.0);
zd[i] = zdtmp ;
}
}
static inline
void CalcVelocityForNodes_gpu(const Real_t dt, const Real_t u_cut)
{
dim3 dimBlock(BLOCKSIZE,1,1);
dim3 dimGrid(PAD_DIV(mesh.numNode(),dimBlock.x),1,1);
CalcVelocityForNodes_kernel<<<dimGrid, dimBlock>>>
(mesh.numNode(),dt,u_cut,
meshGPU.m_xd,meshGPU.m_yd,meshGPU.m_zd,
meshGPU.m_xdd,meshGPU.m_ydd,meshGPU.m_zdd);
CUDA_DEBUGSYNC;
}
static inline
void CalcVelocityForNodes_cpu(const Real_t dt, const Real_t u_cut)
{
Index_t numNode = mesh.numNode() ;
for ( Index_t i = 0 ; i < numNode ; ++i )
{
Real_t xdtmp, ydtmp, zdtmp ;
xdtmp = mesh.xd(i) + mesh.xdd(i) * dt ;
if( FABS(xdtmp) < u_cut ) xdtmp = Real_t(0.0);
mesh.xd(i) = xdtmp ;
ydtmp = mesh.yd(i) + mesh.ydd(i) * dt ;
if( FABS(ydtmp) < u_cut ) ydtmp = Real_t(0.0);
mesh.yd(i) = ydtmp ;
zdtmp = mesh.zd(i) + mesh.zdd(i) * dt ;
if( FABS(zdtmp) < u_cut ) zdtmp = Real_t(0.0);
mesh.zd(i) = zdtmp ;
}
}
static inline
void CalcVelocityForNodes(const Real_t dt, const Real_t u_cut, int useCPU)
{
if (useCPU) {
FC(xd); FC(yd); FC(zd); FC(xdd); FC(ydd); FC(zdd);
CalcVelocityForNodes_cpu(dt,u_cut);
SG(xd); SG(yd); SG(zd);
}
else {
FG(xd); FG(yd); FG(zd); FG(xdd); FG(ydd); FG(zdd);
CalcVelocityForNodes_gpu(dt,u_cut);
SC(xd); SC(yd); SC(zd);
}
}
__global__
void CalcPositionForNodes_kernel(int numNode, Real_t dt,
Real_t *x, Real_t *y, Real_t *z,
Real_t *xd, Real_t *yd, Real_t *zd)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i<numNode) {
x[i] += xd[i] * dt;
y[i] += yd[i] * dt;
z[i] += zd[i] * dt;
}
}
static inline
void CalcPositionForNodes_gpu(const Real_t dt)
{
dim3 dimBlock(BLOCKSIZE,1,1);
dim3 dimGrid(PAD_DIV(mesh.numNode(),dimBlock.x),1,1);
CalcPositionForNodes_kernel<<<dimGrid, dimBlock>>>
(mesh.numNode(),dt,meshGPU.m_x,meshGPU.m_y,meshGPU.m_z,meshGPU.m_xd,meshGPU.m_yd,meshGPU.m_zd);
CUDA_DEBUGSYNC;
}
static inline
void CalcPositionForNodes_cpu(const Real_t dt)
{
Index_t numNode = mesh.numNode() ;
for ( Index_t i = 0 ; i < numNode ; ++i )
{
mesh.x(i) += mesh.xd(i) * dt ;
mesh.y(i) += mesh.yd(i) * dt ;
mesh.z(i) += mesh.zd(i) * dt ;
}
}
static inline
void CalcPositionForNodes(const Real_t dt,int useCPU)
{
if (useCPU) {
FC(x); FC(y); FC(z); FC(xd); FC(yd); FC(zd);
CalcPositionForNodes_cpu(dt);
SG(x); SG(y); SG(z);
}
else {
FG(x); FG(y); FG(z); FG(xd); FG(yd); FG(zd);
CalcPositionForNodes_gpu(dt);
SC(x); SC(y); SC(z);
}
}
static inline
void LagrangeNodal(int useCPU)
{
const Real_t delt = mesh.deltatime() ;
Real_t u_cut = mesh.u_cut() ;
/* time of boundary condition evaluation is beginning of step for force and
* acceleration boundary conditions. */
CalcForceForNodes(/*0*/useCPU);
CalcAccelerationForNodes(useCPU);
ApplyAccelerationBoundaryConditionsForNodes(useCPU);
CalcVelocityForNodes( delt, u_cut, useCPU ) ;
CalcPositionForNodes( delt, useCPU );
return;
}
__host__ __device__
static inline
Real_t CalcElemVolume( const Real_t x0, const Real_t x1,
const Real_t x2, const Real_t x3,
const Real_t x4, const Real_t x5,
const Real_t x6, const Real_t x7,
const Real_t y0, const Real_t y1,
const Real_t y2, const Real_t y3,
const Real_t y4, const Real_t y5,
const Real_t y6, const Real_t y7,
const Real_t z0, const Real_t z1,
const Real_t z2, const Real_t z3,
const Real_t z4, const Real_t z5,
const Real_t z6, const Real_t z7 )
{
Real_t twelveth = Real_t(1.0)/Real_t(12.0);
Real_t dx61 = x6 - x1;
Real_t dy61 = y6 - y1;
Real_t dz61 = z6 - z1;
Real_t dx70 = x7 - x0;
Real_t dy70 = y7 - y0;
Real_t dz70 = z7 - z0;
Real_t dx63 = x6 - x3;
Real_t dy63 = y6 - y3;
Real_t dz63 = z6 - z3;
Real_t dx20 = x2 - x0;
Real_t dy20 = y2 - y0;
Real_t dz20 = z2 - z0;
Real_t dx50 = x5 - x0;
Real_t dy50 = y5 - y0;
Real_t dz50 = z5 - z0;
Real_t dx64 = x6 - x4;
Real_t dy64 = y6 - y4;
Real_t dz64 = z6 - z4;
Real_t dx31 = x3 - x1;
Real_t dy31 = y3 - y1;
Real_t dz31 = z3 - z1;
Real_t dx72 = x7 - x2;
Real_t dy72 = y7 - y2;
Real_t dz72 = z7 - z2;
Real_t dx43 = x4 - x3;
Real_t dy43 = y4 - y3;
Real_t dz43 = z4 - z3;
Real_t dx57 = x5 - x7;
Real_t dy57 = y5 - y7;
Real_t dz57 = z5 - z7;
Real_t dx14 = x1 - x4;
Real_t dy14 = y1 - y4;
Real_t dz14 = z1 - z4;
Real_t dx25 = x2 - x5;
Real_t dy25 = y2 - y5;
Real_t dz25 = z2 - z5;
#define TRIPLE_PRODUCT(x1, y1, z1, x2, y2, z2, x3, y3, z3) \
((x1)*((y2)*(z3) - (z2)*(y3)) + (x2)*((z1)*(y3) - (y1)*(z3)) + (x3)*((y1)*(z2) - (z1)*(y2)))
Real_t volume =
TRIPLE_PRODUCT(dx31 + dx72, dx63, dx20,
dy31 + dy72, dy63, dy20,
dz31 + dz72, dz63, dz20) +
TRIPLE_PRODUCT(dx43 + dx57, dx64, dx70,
dy43 + dy57, dy64, dy70,
dz43 + dz57, dz64, dz70) +
TRIPLE_PRODUCT(dx14 + dx25, dx61, dx50,
dy14 + dy25, dy61, dy50,
dz14 + dz25, dz61, dz50);
#undef TRIPLE_PRODUCT
volume *= twelveth;
return volume ;
}
__host__ __device__
static inline
Real_t CalcElemVolume( const Real_t x[8], const Real_t y[8], const Real_t z[8] )
{
return CalcElemVolume( x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7],
y[0], y[1], y[2], y[3], y[4], y[5], y[6], y[7],
z[0], z[1], z[2], z[3], z[4], z[5], z[6], z[7]);
}
__host__ __device__
static inline
Real_t AreaFace( const Real_t x0, const Real_t x1,
const Real_t x2, const Real_t x3,
const Real_t y0, const Real_t y1,
const Real_t y2, const Real_t y3,
const Real_t z0, const Real_t z1,
const Real_t z2, const Real_t z3)
{
Real_t fx = (x2 - x0) - (x3 - x1);
Real_t fy = (y2 - y0) - (y3 - y1);
Real_t fz = (z2 - z0) - (z3 - z1);
Real_t gx = (x2 - x0) + (x3 - x1);
Real_t gy = (y2 - y0) + (y3 - y1);
Real_t gz = (z2 - z0) + (z3 - z1);
Real_t area =
(fx * fx + fy * fy + fz * fz) *
(gx * gx + gy * gy + gz * gz) -
(fx * gx + fy * gy + fz * gz) *
(fx * gx + fy * gy + fz * gz);
return area ;
}
__host__ __device__
static inline
Real_t CalcElemCharacteristicLength( const Real_t x[8],
const Real_t y[8],
const Real_t z[8],
const Real_t volume)
{
Real_t a, charLength = Real_t(0.0);
a = AreaFace(x[0],x[1],x[2],x[3],
y[0],y[1],y[2],y[3],
z[0],z[1],z[2],z[3]) ;
charLength = FMAX(a,charLength) ;
a = AreaFace(x[4],x[5],x[6],x[7],
y[4],y[5],y[6],y[7],
z[4],z[5],z[6],z[7]) ;
charLength = FMAX(a,charLength) ;
a = AreaFace(x[0],x[1],x[5],x[4],
y[0],y[1],y[5],y[4],
z[0],z[1],z[5],z[4]) ;
charLength = FMAX(a,charLength) ;
a = AreaFace(x[1],x[2],x[6],x[5],
y[1],y[2],y[6],y[5],
z[1],z[2],z[6],z[5]) ;
charLength = FMAX(a,charLength) ;
a = AreaFace(x[2],x[3],x[7],x[6],
y[2],y[3],y[7],y[6],
z[2],z[3],z[7],z[6]) ;
charLength = FMAX(a,charLength) ;
a = AreaFace(x[3],x[0],x[4],x[7],
y[3],y[0],y[4],y[7],
z[3],z[0],z[4],z[7]) ;
charLength = FMAX(a,charLength) ;
charLength = Real_t(4.0) * volume / SQRT(charLength);
return charLength;
}
__host__ __device__
static inline
void CalcElemVelocityGradient( const Real_t* const xvel,
const Real_t* const yvel,
const Real_t* const zvel,
const Real_t b[][8],
const Real_t detJ,
Real_t* const d )
{
const Real_t inv_detJ = Real_t(1.0) / detJ ;
Real_t dyddx, dxddy, dzddx, dxddz, dzddy, dyddz;
const Real_t* const pfx = b[0];
const Real_t* const pfy = b[1];
const Real_t* const pfz = b[2];
d[0] = inv_detJ * ( pfx[0] * (xvel[0]-xvel[6])
+ pfx[1] * (xvel[1]-xvel[7])
+ pfx[2] * (xvel[2]-xvel[4])
+ pfx[3] * (xvel[3]-xvel[5]) );
d[1] = inv_detJ * ( pfy[0] * (yvel[0]-yvel[6])
+ pfy[1] * (yvel[1]-yvel[7])
+ pfy[2] * (yvel[2]-yvel[4])
+ pfy[3] * (yvel[3]-yvel[5]) );
d[2] = inv_detJ * ( pfz[0] * (zvel[0]-zvel[6])
+ pfz[1] * (zvel[1]-zvel[7])
+ pfz[2] * (zvel[2]-zvel[4])
+ pfz[3] * (zvel[3]-zvel[5]) );
dyddx = inv_detJ * ( pfx[0] * (yvel[0]-yvel[6])
+ pfx[1] * (yvel[1]-yvel[7])
+ pfx[2] * (yvel[2]-yvel[4])
+ pfx[3] * (yvel[3]-yvel[5]) );
dxddy = inv_detJ * ( pfy[0] * (xvel[0]-xvel[6])
+ pfy[1] * (xvel[1]-xvel[7])
+ pfy[2] * (xvel[2]-xvel[4])
+ pfy[3] * (xvel[3]-xvel[5]) );
dzddx = inv_detJ * ( pfx[0] * (zvel[0]-zvel[6])
+ pfx[1] * (zvel[1]-zvel[7])
+ pfx[2] * (zvel[2]-zvel[4])
+ pfx[3] * (zvel[3]-zvel[5]) );
dxddz = inv_detJ * ( pfz[0] * (xvel[0]-xvel[6])
+ pfz[1] * (xvel[1]-xvel[7])
+ pfz[2] * (xvel[2]-xvel[4])
+ pfz[3] * (xvel[3]-xvel[5]) );
dzddy = inv_detJ * ( pfy[0] * (zvel[0]-zvel[6])
+ pfy[1] * (zvel[1]-zvel[7])
+ pfy[2] * (zvel[2]-zvel[4])
+ pfy[3] * (zvel[3]-zvel[5]) );
dyddz = inv_detJ * ( pfz[0] * (yvel[0]-yvel[6])
+ pfz[1] * (yvel[1]-yvel[7])
+ pfz[2] * (yvel[2]-yvel[4])
+ pfz[3] * (yvel[3]-yvel[5]) );
d[5] = Real_t( .5) * ( dxddy + dyddx );
d[4] = Real_t( .5) * ( dxddz + dzddx );
d[3] = Real_t( .5) * ( dzddy + dyddz );
}
__global__
void CalcKinematicsForElems_kernel(
Index_t numElem, Real_t dt,
Index_t *nodelist,Real_t *volo,Real_t *v,
Real_t *x,Real_t *y,Real_t *z,Real_t *xd,Real_t *yd,Real_t *zd,
Real_t *vnew,Real_t *delv,Real_t *arealg,Real_t *dxx,Real_t *dyy,Real_t *dzz
)
{
Real_t B[3][8] ; /** shape function derivatives */
Real_t D[6] ;
Real_t x_local[8] ;
Real_t y_local[8] ;
Real_t z_local[8] ;
Real_t xd_local[8] ;
Real_t yd_local[8] ;
Real_t zd_local[8] ;
Real_t detJ = Real_t(0.0) ;
int k=blockDim.x*blockIdx.x + threadIdx.x;
if (k<numElem) {
Real_t volume ;
Real_t relativeVolume ;
// get nodal coordinates from global arrays and copy into local arrays.
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
{
Index_t gnode = nodelist[k+lnode*numElem];
x_local[lnode] = x[gnode];
y_local[lnode] = y[gnode];
z_local[lnode] = z[gnode];
}
// volume calculations
volume = CalcElemVolume(x_local, y_local, z_local );
relativeVolume = volume / volo[k] ;
vnew[k] = relativeVolume ;
delv[k] = relativeVolume - v[k] ;
// set characteristic length
arealg[k] = CalcElemCharacteristicLength(x_local,y_local,z_local,volume);
// get nodal velocities from global array and copy into local arrays.
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
{
Index_t gnode = nodelist[k+lnode*numElem];
xd_local[lnode] = xd[gnode];
yd_local[lnode] = yd[gnode];
zd_local[lnode] = zd[gnode];
}
Real_t dt2 = Real_t(0.5) * dt;
for ( Index_t j=0 ; j<8 ; ++j )
{
x_local[j] -= dt2 * xd_local[j];
y_local[j] -= dt2 * yd_local[j];
z_local[j] -= dt2 * zd_local[j];
}
CalcElemShapeFunctionDerivatives(x_local,y_local,z_local,B,&detJ );
CalcElemVelocityGradient(xd_local,yd_local,zd_local,B,detJ,D);
// put velocity gradient quantities into their global arrays.
dxx[k] = D[0];
dyy[k] = D[1];
dzz[k] = D[2];
}
}
static inline
void CalcKinematicsForElems_gpu( Index_t numElem, Real_t dt )
{
dim3 dimBlock=dim3(BLOCKSIZE,1,1);
dim3 dimGrid=dim3(PAD_DIV(numElem,dimBlock.x),1,1);
CalcKinematicsForElems_kernel<<<dimGrid,dimBlock>>>
(numElem,dt,meshGPU.m_nodelist,meshGPU.m_volo,meshGPU.m_v,
meshGPU.m_x,meshGPU.m_y,meshGPU.m_z,meshGPU.m_xd,meshGPU.m_yd,meshGPU.m_zd,
meshGPU.m_vnew,meshGPU.m_delv,meshGPU.m_arealg,meshGPU.m_dxx,meshGPU.m_dyy,meshGPU.m_dzz);
CUDA_DEBUGSYNC;
}
static inline
void CalcKinematicsForElems_cpu( Index_t numElem, Real_t dt )
{
Real_t B[3][8] ; /** shape function derivatives */
Real_t D[6] ;
Real_t x_local[8] ;
Real_t y_local[8] ;
Real_t z_local[8] ;
Real_t xd_local[8] ;
Real_t yd_local[8] ;
Real_t zd_local[8] ;
Real_t detJ = Real_t(0.0) ;
// loop over all elements
for( Index_t k=0 ; k<numElem ; ++k )
{
Real_t volume ;
Real_t relativeVolume ;
// get nodal coordinates from global arrays and copy into local arrays.
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
{
Index_t gnode = mesh.nodelist(k,lnode);
x_local[lnode] = mesh.x(gnode);
y_local[lnode] = mesh.y(gnode);
z_local[lnode] = mesh.z(gnode);
}
// volume calculations
volume = CalcElemVolume(x_local, y_local, z_local );
relativeVolume = volume / mesh.volo(k) ;
mesh.vnew(k) = relativeVolume ;
mesh.delv(k) = relativeVolume - mesh.v(k) ;
// set characteristic length
mesh.arealg(k) = CalcElemCharacteristicLength(x_local,
y_local,
z_local,
volume);
// get nodal velocities from global array and copy into local arrays.
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
{
Index_t gnode = mesh.nodelist(k,lnode);
xd_local[lnode] = mesh.xd(gnode);
yd_local[lnode] = mesh.yd(gnode);
zd_local[lnode] = mesh.zd(gnode);
}
Real_t dt2 = Real_t(0.5) * dt;
for ( Index_t j=0 ; j<8 ; ++j )
{
x_local[j] -= dt2 * xd_local[j];
y_local[j] -= dt2 * yd_local[j];
z_local[j] -= dt2 * zd_local[j];
}
CalcElemShapeFunctionDerivatives( x_local,
y_local,
z_local,
B, &detJ );
CalcElemVelocityGradient( xd_local,
yd_local,
zd_local,
B, detJ, D );
// put velocity gradient quantities into their global arrays.
mesh.dxx(k) = D[0];
mesh.dyy(k) = D[1];
mesh.dzz(k) = D[2];
}
}
static inline
void CalcKinematicsForElems( Index_t numElem, Real_t dt, int useCPU )
{
if (useCPU) {
FC(nodelist); FC(volo); FC(v); FC(x); FC(y); FC(z); FC(xd); FC(yd); FC(zd);
CalcKinematicsForElems_cpu(numElem,dt);
SG(vnew); SG(delv); SG(arealg); SG(dxx); SG(dyy); SG(dzz);
}
else {
FG(nodelist); FG(volo); FG(v); FG(x); FG(y); FG(z); FG(xd); FG(yd); FG(zd);
CalcKinematicsForElems_gpu(numElem,dt);
SC(vnew); SC(delv); SC(arealg); SC(dxx); SC(dyy); SC(dzz);
}
}
__global__
void CalcLagrangeElementsPart2_kernel(
Index_t numElem,
Real_t *dxx,Real_t *dyy, Real_t *dzz,
Real_t *vdov
)
{
int k=blockDim.x*blockIdx.x + threadIdx.x;
if (k<numElem) {
// calc strain rate and apply as constraint (only done in FB element)
Real_t vdovNew = dxx[k] + dyy[k] + dzz[k] ;
Real_t vdovthird = vdovNew/Real_t(3.0) ;
// make the rate of deformation tensor deviatoric
vdov[k] = vdovNew ;
dxx[k] -= vdovthird ;
dyy[k] -= vdovthird ;
dzz[k] -= vdovthird ;
// See if any volumes are negative, and take appropriate action.
//if (mesh.vnew(k) <= Real_t(0.0))
//{
// exit(VolumeError) ;
//}
}
}
static inline
void CalcLagrangeElementsPart2_gpu()
{
Index_t numElem = mesh.numElem();
dim3 dimBlock=dim3(BLOCKSIZE,1,1);
dim3 dimGrid=dim3(PAD_DIV(numElem,dimBlock.x),1,1);
CalcLagrangeElementsPart2_kernel<<<dimGrid,dimBlock>>>
(numElem,
meshGPU.m_dxx,meshGPU.m_dyy,meshGPU.m_dzz,
meshGPU.m_vdov);
CUDA_DEBUGSYNC;
}
static inline
void CalcLagrangeElementsPart2_cpu()
{
Index_t numElem = mesh.numElem() ;
// element loop to do some stuff not included in the elemlib function.
for ( Index_t k=0 ; k<numElem ; ++k )
{
// calc strain rate and apply as constraint (only done in FB element)
Real_t vdov = mesh.dxx(k) + mesh.dyy(k) + mesh.dzz(k) ;
Real_t vdovthird = vdov/Real_t(3.0) ;
// make the rate of deformation tensor deviatoric
mesh.vdov(k) = vdov ;
mesh.dxx(k) -= vdovthird ;
mesh.dyy(k) -= vdovthird ;
mesh.dzz(k) -= vdovthird ;
// See if any volumes are negative, and take appropriate action.
if (mesh.vnew(k) <= Real_t(0.0))
{
exit(VolumeError) ;
}
}
}
static inline
void CalcLagrangeElementsPart2(int useCPU)
{
if (useCPU) {
FC(dxx); FC(dyy); FC(dzz);
CalcLagrangeElementsPart2_cpu();
SG(vdov); SG(dxx); SG(dyy); SG(dzz);
}
else {
FG(dxx); FG(dyy); FG(dzz);
CalcLagrangeElementsPart2_gpu();
SC(vdov); SC(dxx); SC(dyy); SC(dzz);
}
}
static inline
void CalcLagrangeElements(Real_t deltatime, int useCPU)
{
Index_t numElem = mesh.numElem() ;
if (numElem > 0) {
CalcKinematicsForElems(numElem, deltatime, useCPU);
CalcLagrangeElementsPart2(useCPU);
}
}
__global__
void CalcMonotonicQGradientsForElems_kernel(
Index_t numElem,
Index_t *nodelist,
Real_t *x,Real_t *y,Real_t *z,Real_t *xd,Real_t *yd,Real_t *zd,
Real_t *volo,Real_t *vnew,
Real_t *delx_zeta,Real_t *delv_zeta,
Real_t *delx_xi,Real_t *delv_xi,
Real_t *delx_eta,Real_t *delv_eta
)
{
#define SUM4(a,b,c,d) (a + b + c + d)
const Real_t ptiny = Real_t(1.e-36) ;
int i=blockDim.x*blockIdx.x + threadIdx.x;
if (i<numElem) {
Real_t ax,ay,az ;
Real_t dxv,dyv,dzv ;
Index_t n0 = nodelist[i+0*numElem] ;
Index_t n1 = nodelist[i+1*numElem] ;
Index_t n2 = nodelist[i+2*numElem] ;
Index_t n3 = nodelist[i+3*numElem] ;
Index_t n4 = nodelist[i+4*numElem] ;
Index_t n5 = nodelist[i+5*numElem] ;
Index_t n6 = nodelist[i+6*numElem] ;
Index_t n7 = nodelist[i+7*numElem] ;
Real_t x0 = x[n0] ;
Real_t x1 = x[n1] ;
Real_t x2 = x[n2] ;
Real_t x3 = x[n3] ;
Real_t x4 = x[n4] ;
Real_t x5 = x[n5] ;
Real_t x6 = x[n6] ;
Real_t x7 = x[n7] ;
Real_t y0 = y[n0] ;
Real_t y1 = y[n1] ;
Real_t y2 = y[n2] ;
Real_t y3 = y[n3] ;
Real_t y4 = y[n4] ;
Real_t y5 = y[n5] ;
Real_t y6 = y[n6] ;
Real_t y7 = y[n7] ;
Real_t z0 = z[n0] ;
Real_t z1 = z[n1] ;
Real_t z2 = z[n2] ;
Real_t z3 = z[n3] ;
Real_t z4 = z[n4] ;
Real_t z5 = z[n5] ;
Real_t z6 = z[n6] ;
Real_t z7 = z[n7] ;
Real_t xv0 = xd[n0] ;
Real_t xv1 = xd[n1] ;
Real_t xv2 = xd[n2] ;
Real_t xv3 = xd[n3] ;
Real_t xv4 = xd[n4] ;
Real_t xv5 = xd[n5] ;
Real_t xv6 = xd[n6] ;
Real_t xv7 = xd[n7] ;
Real_t yv0 = yd[n0] ;
Real_t yv1 = yd[n1] ;
Real_t yv2 = yd[n2] ;
Real_t yv3 = yd[n3] ;
Real_t yv4 = yd[n4] ;
Real_t yv5 = yd[n5] ;
Real_t yv6 = yd[n6] ;
Real_t yv7 = yd[n7] ;
Real_t zv0 = zd[n0] ;
Real_t zv1 = zd[n1] ;
Real_t zv2 = zd[n2] ;
Real_t zv3 = zd[n3] ;
Real_t zv4 = zd[n4] ;
Real_t zv5 = zd[n5] ;
Real_t zv6 = zd[n6] ;
Real_t zv7 = zd[n7] ;
Real_t vol = volo[i]*vnew[i] ;
Real_t norm = Real_t(1.0) / ( vol + ptiny ) ;
Real_t dxj = Real_t(-0.25)*(SUM4(x0,x1,x5,x4) - SUM4(x3,x2,x6,x7)) ;
Real_t dyj = Real_t(-0.25)*(SUM4(y0,y1,y5,y4) - SUM4(y3,y2,y6,y7)) ;
Real_t dzj = Real_t(-0.25)*(SUM4(z0,z1,z5,z4) - SUM4(z3,z2,z6,z7)) ;
Real_t dxi = Real_t( 0.25)*(SUM4(x1,x2,x6,x5) - SUM4(x0,x3,x7,x4)) ;
Real_t dyi = Real_t( 0.25)*(SUM4(y1,y2,y6,y5) - SUM4(y0,y3,y7,y4)) ;
Real_t dzi = Real_t( 0.25)*(SUM4(z1,z2,z6,z5) - SUM4(z0,z3,z7,z4)) ;
Real_t dxk = Real_t( 0.25)*(SUM4(x4,x5,x6,x7) - SUM4(x0,x1,x2,x3)) ;
Real_t dyk = Real_t( 0.25)*(SUM4(y4,y5,y6,y7) - SUM4(y0,y1,y2,y3)) ;
Real_t dzk = Real_t( 0.25)*(SUM4(z4,z5,z6,z7) - SUM4(z0,z1,z2,z3)) ;
/* find delvk and delxk ( i cross j ) */
ax = dyi*dzj - dzi*dyj ;
ay = dzi*dxj - dxi*dzj ;
az = dxi*dyj - dyi*dxj ;
delx_zeta[i] = vol / SQRT(ax*ax + ay*ay + az*az + ptiny) ;
ax *= norm ;
ay *= norm ;
az *= norm ;
dxv = Real_t(0.25)*(SUM4(xv4,xv5,xv6,xv7) - SUM4(xv0,xv1,xv2,xv3)) ;
dyv = Real_t(0.25)*(SUM4(yv4,yv5,yv6,yv7) - SUM4(yv0,yv1,yv2,yv3)) ;
dzv = Real_t(0.25)*(SUM4(zv4,zv5,zv6,zv7) - SUM4(zv0,zv1,zv2,zv3)) ;
delv_zeta[i] = ax*dxv + ay*dyv + az*dzv ;
/* find delxi and delvi ( j cross k ) */
ax = dyj*dzk - dzj*dyk ;
ay = dzj*dxk - dxj*dzk ;
az = dxj*dyk - dyj*dxk ;
delx_xi[i] = vol / SQRT(ax*ax + ay*ay + az*az + ptiny) ;
ax *= norm ;
ay *= norm ;
az *= norm ;
dxv = Real_t(0.25)*(SUM4(xv1,xv2,xv6,xv5) - SUM4(xv0,xv3,xv7,xv4)) ;
dyv = Real_t(0.25)*(SUM4(yv1,yv2,yv6,yv5) - SUM4(yv0,yv3,yv7,yv4)) ;
dzv = Real_t(0.25)*(SUM4(zv1,zv2,zv6,zv5) - SUM4(zv0,zv3,zv7,zv4)) ;
delv_xi[i] = ax*dxv + ay*dyv + az*dzv ;
/* find delxj and delvj ( k cross i ) */
ax = dyk*dzi - dzk*dyi ;
ay = dzk*dxi - dxk*dzi ;
az = dxk*dyi - dyk*dxi ;
delx_eta[i] = vol / SQRT(ax*ax + ay*ay + az*az + ptiny) ;
ax *= norm ;
ay *= norm ;
az *= norm ;
dxv = Real_t(-0.25)*(SUM4(xv0,xv1,xv5,xv4) - SUM4(xv3,xv2,xv6,xv7)) ;
dyv = Real_t(-0.25)*(SUM4(yv0,yv1,yv5,yv4) - SUM4(yv3,yv2,yv6,yv7)) ;
dzv = Real_t(-0.25)*(SUM4(zv0,zv1,zv5,zv4) - SUM4(zv3,zv2,zv6,zv7)) ;
delv_eta[i] = ax*dxv + ay*dyv + az*dzv ;
}
#undef SUM4
}
static inline
void CalcMonotonicQGradientsForElems_gpu()
{
Index_t numElem = mesh.numElem();
dim3 dimBlock=dim3(BLOCKSIZE,1,1);
dim3 dimGrid=dim3(PAD_DIV(numElem,dimBlock.x),1,1);
CalcMonotonicQGradientsForElems_kernel<<<dimGrid,dimBlock>>>
(numElem,
meshGPU.m_nodelist,
meshGPU.m_x,meshGPU.m_y,meshGPU.m_z,meshGPU.m_xd,meshGPU.m_yd,meshGPU.m_zd,
meshGPU.m_volo,meshGPU.m_vnew,
meshGPU.m_delx_zeta,meshGPU.m_delv_zeta,
meshGPU.m_delx_xi,meshGPU.m_delv_xi,
meshGPU.m_delx_eta,meshGPU.m_delv_eta);
CUDA_DEBUGSYNC;
}
static inline
void CalcMonotonicQGradientsForElems_cpu()
{
#define SUM4(a,b,c,d) (a + b + c + d)
Index_t numElem = mesh.numElem() ;
const Real_t ptiny = Real_t(1.e-36) ;
for (Index_t i = 0 ; i < numElem ; ++i ) {
Real_t ax,ay,az ;
Real_t dxv,dyv,dzv ;
Index_t n0 = mesh.nodelist(i,0) ;
Index_t n1 = mesh.nodelist(i,1) ;
Index_t n2 = mesh.nodelist(i,2) ;
Index_t n3 = mesh.nodelist(i,3) ;
Index_t n4 = mesh.nodelist(i,4) ;
Index_t n5 = mesh.nodelist(i,5) ;
Index_t n6 = mesh.nodelist(i,6) ;
Index_t n7 = mesh.nodelist(i,7) ;
Real_t x0 = mesh.x(n0) ;
Real_t x1 = mesh.x(n1) ;
Real_t x2 = mesh.x(n2) ;
Real_t x3 = mesh.x(n3) ;
Real_t x4 = mesh.x(n4) ;
Real_t x5 = mesh.x(n5) ;
Real_t x6 = mesh.x(n6) ;
Real_t x7 = mesh.x(n7) ;
Real_t y0 = mesh.y(n0) ;
Real_t y1 = mesh.y(n1) ;
Real_t y2 = mesh.y(n2) ;
Real_t y3 = mesh.y(n3) ;
Real_t y4 = mesh.y(n4) ;
Real_t y5 = mesh.y(n5) ;
Real_t y6 = mesh.y(n6) ;
Real_t y7 = mesh.y(n7) ;
Real_t z0 = mesh.z(n0) ;
Real_t z1 = mesh.z(n1) ;
Real_t z2 = mesh.z(n2) ;
Real_t z3 = mesh.z(n3) ;
Real_t z4 = mesh.z(n4) ;
Real_t z5 = mesh.z(n5) ;
Real_t z6 = mesh.z(n6) ;
Real_t z7 = mesh.z(n7) ;
Real_t xv0 = mesh.xd(n0) ;
Real_t xv1 = mesh.xd(n1) ;
Real_t xv2 = mesh.xd(n2) ;
Real_t xv3 = mesh.xd(n3) ;
Real_t xv4 = mesh.xd(n4) ;
Real_t xv5 = mesh.xd(n5) ;
Real_t xv6 = mesh.xd(n6) ;
Real_t xv7 = mesh.xd(n7) ;
Real_t yv0 = mesh.yd(n0) ;
Real_t yv1 = mesh.yd(n1) ;
Real_t yv2 = mesh.yd(n2) ;
Real_t yv3 = mesh.yd(n3) ;
Real_t yv4 = mesh.yd(n4) ;
Real_t yv5 = mesh.yd(n5) ;
Real_t yv6 = mesh.yd(n6) ;
Real_t yv7 = mesh.yd(n7) ;
Real_t zv0 = mesh.zd(n0) ;
Real_t zv1 = mesh.zd(n1) ;
Real_t zv2 = mesh.zd(n2) ;
Real_t zv3 = mesh.zd(n3) ;
Real_t zv4 = mesh.zd(n4) ;
Real_t zv5 = mesh.zd(n5) ;
Real_t zv6 = mesh.zd(n6) ;
Real_t zv7 = mesh.zd(n7) ;
Real_t vol = mesh.volo(i)*mesh.vnew(i) ;
Real_t norm = Real_t(1.0) / ( vol + ptiny ) ;
Real_t dxj = Real_t(-0.25)*(SUM4(x0,x1,x5,x4) - SUM4(x3,x2,x6,x7)) ;
Real_t dyj = Real_t(-0.25)*(SUM4(y0,y1,y5,y4) - SUM4(y3,y2,y6,y7)) ;
Real_t dzj = Real_t(-0.25)*(SUM4(z0,z1,z5,z4) - SUM4(z3,z2,z6,z7)) ;
Real_t dxi = Real_t( 0.25)*(SUM4(x1,x2,x6,x5) - SUM4(x0,x3,x7,x4)) ;
Real_t dyi = Real_t( 0.25)*(SUM4(y1,y2,y6,y5) - SUM4(y0,y3,y7,y4)) ;
Real_t dzi = Real_t( 0.25)*(SUM4(z1,z2,z6,z5) - SUM4(z0,z3,z7,z4)) ;
Real_t dxk = Real_t( 0.25)*(SUM4(x4,x5,x6,x7) - SUM4(x0,x1,x2,x3)) ;
Real_t dyk = Real_t( 0.25)*(SUM4(y4,y5,y6,y7) - SUM4(y0,y1,y2,y3)) ;
Real_t dzk = Real_t( 0.25)*(SUM4(z4,z5,z6,z7) - SUM4(z0,z1,z2,z3)) ;
/* find delvk and delxk ( i cross j ) */
ax = dyi*dzj - dzi*dyj ;
ay = dzi*dxj - dxi*dzj ;
az = dxi*dyj - dyi*dxj ;
mesh.delx_zeta(i) = vol / SQRT(ax*ax + ay*ay + az*az + ptiny) ;
ax *= norm ;
ay *= norm ;
az *= norm ;
dxv = Real_t(0.25)*(SUM4(xv4,xv5,xv6,xv7) - SUM4(xv0,xv1,xv2,xv3)) ;
dyv = Real_t(0.25)*(SUM4(yv4,yv5,yv6,yv7) - SUM4(yv0,yv1,yv2,yv3)) ;
dzv = Real_t(0.25)*(SUM4(zv4,zv5,zv6,zv7) - SUM4(zv0,zv1,zv2,zv3)) ;
mesh.delv_zeta(i) = ax*dxv + ay*dyv + az*dzv ;
/* find delxi and delvi ( j cross k ) */
ax = dyj*dzk - dzj*dyk ;
ay = dzj*dxk - dxj*dzk ;
az = dxj*dyk - dyj*dxk ;
mesh.delx_xi(i) = vol / SQRT(ax*ax + ay*ay + az*az + ptiny) ;
ax *= norm ;
ay *= norm ;
az *= norm ;
dxv = Real_t(0.25)*(SUM4(xv1,xv2,xv6,xv5) - SUM4(xv0,xv3,xv7,xv4)) ;
dyv = Real_t(0.25)*(SUM4(yv1,yv2,yv6,yv5) - SUM4(yv0,yv3,yv7,yv4)) ;
dzv = Real_t(0.25)*(SUM4(zv1,zv2,zv6,zv5) - SUM4(zv0,zv3,zv7,zv4)) ;
mesh.delv_xi(i) = ax*dxv + ay*dyv + az*dzv ;
/* find delxj and delvj ( k cross i ) */
ax = dyk*dzi - dzk*dyi ;
ay = dzk*dxi - dxk*dzi ;
az = dxk*dyi - dyk*dxi ;
mesh.delx_eta(i) = vol / SQRT(ax*ax + ay*ay + az*az + ptiny) ;
ax *= norm ;
ay *= norm ;
az *= norm ;
dxv = Real_t(-0.25)*(SUM4(xv0,xv1,xv5,xv4) - SUM4(xv3,xv2,xv6,xv7)) ;
dyv = Real_t(-0.25)*(SUM4(yv0,yv1,yv5,yv4) - SUM4(yv3,yv2,yv6,yv7)) ;
dzv = Real_t(-0.25)*(SUM4(zv0,zv1,zv5,zv4) - SUM4(zv3,zv2,zv6,zv7)) ;
mesh.delv_eta(i) = ax*dxv + ay*dyv + az*dzv ;
}
#undef SUM4
}
static inline
void CalcMonotonicQGradientsForElems(int useCPU)
{
if (useCPU) {
FC(nodelist); FC(x); FC(y); FC(z); FC(xd); FC(yd); FC(zd); FC(volo); FC(vnew);
CalcMonotonicQGradientsForElems_cpu();
SG(delx_zeta); SG(delv_zeta); SG(delx_xi); SG(delv_xi); SG(delx_eta); SG(delv_eta);
}
else {
FG(nodelist); FG(x); FG(y); FG(z); FG(xd); FG(yd); FG(zd); FG(volo); FG(vnew);
CalcMonotonicQGradientsForElems_gpu();
SC(delx_zeta); SC(delv_zeta); SC(delx_xi); SC(delv_xi); SC(delx_eta); SC(delv_eta);
}
}
#ifdef DP_PROFILING_KERNEL5
__global__
void CalcMonotonicQRegionForElems_kernel(
Real_t qlc_monoq,
Real_t qqc_monoq,
Real_t monoq_limiter_mult,
Real_t monoq_max_slope,
Real_t ptiny,
// the elementset length
Index_t elength,
Index_t *matElemlist,Index_t *elemBC,
Index_t *lxim,Index_t *lxip,
Index_t *letam,Index_t *letap,
Index_t *lzetam,Index_t *lzetap,
Real_t *delv_xi,Real_t *delv_eta,Real_t *delv_zeta,
Real_t *delx_xi,Real_t *delx_eta,Real_t *delx_zeta,
Real_t *vdov,Real_t *elemMass,Real_t *volo,Real_t *vnew,
Real_t *qq,Real_t *ql,
int its
)
#else
__global__
void CalcMonotonicQRegionForElems_kernel(
Real_t qlc_monoq,
Real_t qqc_monoq,
Real_t monoq_limiter_mult,
Real_t monoq_max_slope,
Real_t ptiny,
// the elementset length
Index_t elength,
Index_t *matElemlist,Index_t *elemBC,
Index_t *lxim,Index_t *lxip,
Index_t *letam,Index_t *letap,
Index_t *lzetam,Index_t *lzetap,
Real_t *delv_xi,Real_t *delv_eta,Real_t *delv_zeta,
Real_t *delx_xi,Real_t *delx_eta,Real_t *delx_zeta,
Real_t *vdov,Real_t *elemMass,Real_t *volo,Real_t *vnew,
Real_t *qq,Real_t *ql
)
#endif
{
int ielem=blockDim.x*blockIdx.x + threadIdx.x;
if (ielem<elength) {
Real_t qlin, qquad ;
Real_t phixi, phieta, phizeta ;
#ifdef AFTER_KERNEL5
Index_t i = tex1Dfetch(tex_matElemlist,ielem);
Int_t bcMask = tex1Dfetch(tex_elemBC,i) ;
#else
Index_t i = matElemlist[ielem];
Int_t bcMask = elemBC[i] ;
#endif
#ifdef DP_PROFILING_KERNEL5
if(blockIdx.x==0 && blockIdx.y==0)
{
//matElemlist
//number of array ele is edgeElems^3 = 45^3=91125
//The data type is Index_t (i.e., int)
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("0 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, ielem);
//elemBC
//number of array ele is edgeElems^3 = 45^3 = 91125
//The data type is Int_t (i.e., int)
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("1 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, matElemlist[ielem]);
}
#endif
Real_t delvm, delvp ;
/* phixi */
#ifdef AFTER_KERNEL5_2
Real_t norm = Real_t(1.) / ( tex1Dfetch(tex_delv_xi,i) + ptiny ) ;
#else
Real_t norm = Real_t(1.) / ( delv_xi[i] + ptiny ) ;
#endif
#ifdef DP_PROFILING_KERNEL5
if(blockIdx.x==0 && blockIdx.y==0)
{
//delv_xi
//number of array ele is edgeElems^3 = 45^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("2 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, matElemlist[ielem]);
}
#endif
switch (bcMask & XI_M) {
#ifdef AFTER_KERNEL5
case 0: delvm = delv_xi[tex1Dfetch(tex_lxim,i)];
#else
#ifdef AFTER_KERNEL5_2
case 0: delvm = tex1Dfetch(tex_delv_xi,lxim[i]);
#else
case 0: delvm = delv_xi[lxim[i]];
#endif
#endif
#ifdef DP_PROFILING_KERNEL5
if(blockIdx.x==0 && blockIdx.y==0)
{
//delv_xi
//number of array ele is edgeElems^3=45^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("2 0 1 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, lxim[i]);
//lxim
//number of array ele is edgeElems^3=45^3
//The data type is Index_t (i.e., int)
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("3 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
}
#endif
break ;
#ifdef AFTER_KERNEL5_2
case XI_M_SYMM: delvm = tex1Dfetch(tex_delv_xi,i);
#else
case XI_M_SYMM: delvm = delv_xi[i];
#endif
#ifdef DP_PROFILING_KERNEL5
if(blockIdx.x==0 && blockIdx.y==0)
{
//delv_xi
//number of array ele is edgeElems^3=45^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("2 0 1 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
}
#endif
break ;
case XI_M_FREE: delvm = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
switch (bcMask & XI_P) {
#ifdef AFTER_KERNEL5
case 0: delvp = delv_xi[tex1Dfetch(tex_lxip,i)] ;
#else
case 0: delvp = delv_xi[lxip[i]] ;
#endif
#ifdef DP_PROFILING_KERNEL5
if(blockIdx.x==0 && blockIdx.y==0)
{
//delv_xi
//number of array ele is edgeElems^3=45^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("2 0 2 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, lxip[i]);
//lxip
//number of array ele is edgeElems^3=45^3
//The data type is Index_t (i.e., int)
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("4 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
}
#endif
break ;
#ifdef AFTER_KERNEL5_2
case XI_P_SYMM: delvp = tex1Dfetch(tex_delv_xi,i) ;
#else
case XI_P_SYMM: delvp = delv_xi[i] ;
#endif
#ifdef DP_PROFILING_KERNEL5
if(blockIdx.x==0 && blockIdx.y==0)
{
//delv_xi
//number of array ele is edgeElems^3=45^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("2 0 2 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
}
#endif
break ;
case XI_P_FREE: delvp = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
delvm = delvm * norm ;
delvp = delvp * norm ;
phixi = Real_t(.5) * ( delvm + delvp ) ;
delvm *= monoq_limiter_mult ;
delvp *= monoq_limiter_mult ;
if ( delvm < phixi ) phixi = delvm ;
if ( delvp < phixi ) phixi = delvp ;
if ( phixi < Real_t(0.)) phixi = Real_t(0.) ;
if ( phixi > monoq_max_slope) phixi = monoq_max_slope;
/* phieta */
#ifdef AFTER_KERNEL5_2
norm = Real_t(1.) / ( tex1Dfetch(tex_delv_eta,i) + ptiny ) ;
#else
norm = Real_t(1.) / ( delv_eta[i] + ptiny ) ;
#endif
#ifdef DP_PROFILING_KERNEL5
if(blockIdx.x==0 && blockIdx.y==0)
{
//delv_eta
//number of array ele is edgeElems^3=45^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("5 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
}
#endif
switch (bcMask & ETA_M) {
#ifdef AFTER_KERNEL5
case 0: delvm = delv_eta[tex1Dfetch(tex_letam,i)] ;
#else
#ifdef AFTER_KERNEL5_2
case 0: delvm = tex1Dfetch(tex_delv_eta,letam[i]) ;
#else
case 0: delvm = delv_eta[letam[i]] ;
#endif
#endif
#ifdef DP_PROFILING_KERNEL5
if(blockIdx.x==0 && blockIdx.y==0)
{
//delv_eta
//number of array ele is edgeElems^3=45^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("5 0 1 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, letam[i]);
//letam
//number of array ele is edgeElems^3=45^3
//The data type is Index_t (i.e, int)
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("6 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
}
#endif
break ;
#ifdef AFTER_KERNEL5_2
case ETA_M_SYMM: delvm = tex1Dfetch(tex_delv_eta,i) ;
#else
case ETA_M_SYMM: delvm = delv_eta[i] ;
#endif
#ifdef DP_PROFILING_KERNEL5
if(blockIdx.x==0 && blockIdx.y==0)
{
//delv_eta
//number of array ele is edgeElems^3=45^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("5 0 1 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
}
#endif
break ;
case ETA_M_FREE: delvm = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
switch (bcMask & ETA_P) {
#ifdef AFTER_KERNEL5
case 0: delvp = delv_eta[tex1Dfetch(tex_letap,i)] ;
#else
#ifdef AFTER_KERNEL5_2
case 0: delvp = tex1Dfetch(tex_delv_eta,letap[i]) ;
#else
case 0: delvp = delv_eta[letap[i]] ;
#endif
#endif
#ifdef DP_PROFILING_KERNEL5
if(blockIdx.x==0 && blockIdx.y==0)
{
//delv_eta
//number of array ele is edgeElems^3=45^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("5 0 2 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, letap[i]);
//letap
//number of array ele is edgeElems^3
//The data type is Index_t (i.e., int)
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("7 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
}
#endif
break ;
#ifdef AFTER_KERNEL5_2
case ETA_P_SYMM: delvp = tex1Dfetch(tex_delv_eta,i) ;
#else
case ETA_P_SYMM: delvp = delv_eta[i] ;
#endif
#ifdef DP_PROFILING_KERNEL5
if(blockIdx.x==0 && blockIdx.y==0)
{
//delv_eta
//number of array ele is edgeElems^3=45^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("5 0 2 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
}
#endif
break ;
case ETA_P_FREE: delvp = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
delvm = delvm * norm ;
delvp = delvp * norm ;
phieta = Real_t(.5) * ( delvm + delvp ) ;
delvm *= monoq_limiter_mult ;
delvp *= monoq_limiter_mult ;
if ( delvm < phieta ) phieta = delvm ;
if ( delvp < phieta ) phieta = delvp ;
if ( phieta < Real_t(0.)) phieta = Real_t(0.) ;
if ( phieta > monoq_max_slope) phieta = monoq_max_slope;
/* phizeta */
#ifdef AFTER_KERNEL5_2
norm = Real_t(1.) / ( tex1Dfetch(tex_delv_zeta,i) + ptiny ) ;
#else
norm = Real_t(1.) / ( delv_zeta[i] + ptiny ) ;
#endif
#ifdef DP_PROFILING_KERNEL5
if(blockIdx.x==0 && blockIdx.y==0)
{
//delv_zeta
//number of array ele is edgeElems^3=45^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("8 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
}
#endif
switch (bcMask & ZETA_M) {
#ifdef AFTER_KERNEL5
case 0: delvm = delv_zeta[tex1Dfetch(tex_lzetam,i)] ;
#else
#ifdef AFTER_KERNEL5_2
case 0: delvm = tex1Dfetch(tex_delv_zeta,lzetam[i]) ;
#else
case 0: delvm = delv_zeta[lzetam[i]] ;
#endif
#endif
#ifdef DP_PROFILING_KERNEL5
if(blockIdx.x==0 && blockIdx.y==0)
{
//delv_zeta
//number of array ele is edgeElems^3=45^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("8 0 1 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, lzetam[i]);
//lzetam
//number of array ele is edgeElems^3=45^3
//The data type is Index_t (i.e., int)
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("9 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
}
#endif
break ;
#ifdef AFTER_KERNEL5_2
case ZETA_M_SYMM: delvm = tex1Dfetch(tex_delv_zeta,i) ;
#else
case ZETA_M_SYMM: delvm = delv_zeta[i] ;
#endif
#ifdef DP_PROFILING_KERNEL5
if(blockIdx.x==0 && blockIdx.y==0)
{
//delv_zeta
//number of array ele is edgeElems^3=45^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("8 0 1 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
}
#endif
break ;
case ZETA_M_FREE: delvm = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
switch (bcMask & ZETA_P) {
#ifdef AFTER_KERNEL5
case 0: delvp = delv_zeta[tex1Dfetch(tex_lzetap,i)];
#else
#ifdef AFTER_KERNEL5_2
case 0: delvp =tex1Dfetch(tex_delv_zeta,lzetap[i]);
#else
case 0: delvp = delv_zeta[lzetap[i]];
#endif
#endif
#ifdef DP_PROFILING_KERNEL5
if(blockIdx.x==0 && blockIdx.y==0)
{
//delv_zeta
//number of array ele is edgeElems^3=45^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("8 0 2 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, lzetap[i]);
//lzetap
//number of array ele is edgeElems^3=45^3
//The data type is Index_t (i.e., int)
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("10 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
}
#endif
break ;
#ifdef AFTER_KERNEL5_2
case ZETA_P_SYMM: delvp = tex1Dfetch(tex_delv_zeta,i) ;
#else
case ZETA_P_SYMM: delvp = delv_zeta[i] ;
#endif
#ifdef DP_PROFILING_KERNEL5
if(blockIdx.x==0 && blockIdx.y==0)
{
//delv_zeta
//number of array ele is edgeElems^3=45^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("8 0 2 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
}
#endif
break ;
case ZETA_P_FREE: delvp = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
delvm = delvm * norm ;
delvp = delvp * norm ;
phizeta = Real_t(.5) * ( delvm + delvp ) ;
delvm *= monoq_limiter_mult ;
delvp *= monoq_limiter_mult ;
if ( delvm < phizeta ) phizeta = delvm ;
if ( delvp < phizeta ) phizeta = delvp ;
if ( phizeta < Real_t(0.)) phizeta = Real_t(0.);
if ( phizeta > monoq_max_slope ) phizeta = monoq_max_slope;
/* Remove length scale */
#ifdef DP_PROFILING_KERNEL5
if(blockIdx.x==0 && blockIdx.y==0)
{
//vdov
//number of array ele is edgeElems^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("11 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
}
#endif
if ( vdov[i] > Real_t(0.) ) {
qlin = Real_t(0.) ;
qquad = Real_t(0.) ;
}
else {
#ifdef AFTER_KERNEL5_2
Real_t delvxxi = tex1Dfetch(tex_delv_xi,i) * delx_xi[i] ;
Real_t delvxeta = tex1Dfetch(tex_delv_eta,i) *delx_eta[i] ;
Real_t delvxzeta = tex1Dfetch(tex_delv_zeta,i)*delx_zeta[i] ;
#else
Real_t delvxxi = delv_xi[i] * delx_xi[i] ;
Real_t delvxeta = delv_eta[i] * delx_eta[i] ;
Real_t delvxzeta = delv_zeta[i] * delx_zeta[i] ;
#endif
#ifdef DP_PROFILING_KERNEL5
//Dong: since this is within a control flow, I am not sure if we should
//profile delv_xi/eta/zeta here. Also, the same memory access patterns
//to these three arrays have been seen before.
#endif
if ( delvxxi > Real_t(0.) ) delvxxi = Real_t(0.) ;
if ( delvxeta > Real_t(0.) ) delvxeta = Real_t(0.) ;
if ( delvxzeta > Real_t(0.) ) delvxzeta = Real_t(0.) ;
Real_t rho = elemMass[i] / (volo[i] * vnew[i]) ;
#ifdef DP_PROFILING_KERNEL5
//Dong: we should profile volo, vnew and elemMass here even if it is
//in a control flow, because they are referenced for only once
if(blockIdx.x==0 && blockIdx.y==0)
{
//elemMass
//number of array ele is edgeElems^3=45^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("12 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
//volo
//number of array ele is edgeElems^3=45^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("13 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
//vnew
//number of array ele is edgeElems^3=45^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("14 0 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
}
#endif
qlin = -qlc_monoq * rho *
( delvxxi * (Real_t(1.) - phixi) +
delvxeta * (Real_t(1.) - phieta) +
delvxzeta * (Real_t(1.) - phizeta) ) ;
qquad = qqc_monoq * rho *
( delvxxi*delvxxi * (Real_t(1.) - phixi*phixi) +
delvxeta*delvxeta * (Real_t(1.) - phieta*phieta) +
delvxzeta*delvxzeta * (Real_t(1.) - phizeta*phizeta) ) ;
}
qq[i] = qquad ;
ql[i] = qlin ;
#ifdef DP_PROFILING_KERNEL5
if(blockIdx.x==0 && blockIdx.y==0)
{
//qq
//number of array ele is edgeElems^3=45^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("15 1 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
//ql
//number of array ele is edgeElems^3=45^3
//The data type is Real_t
//array ID, r/w, #occurence of the array, #iter, tid, array_idx
cuPrintf("16 1 0 %d %d %d\n", its, threadIdx.y*blockDim.x+threadIdx.x, i);
}
#endif
}
}
static inline
void CalcMonotonicQRegionForElems_gpu(// parameters
Real_t qlc_monoq,
Real_t qqc_monoq,
Real_t monoq_limiter_mult,
Real_t monoq_max_slope,
Real_t ptiny,
// the elementset length
Index_t elength )
{
dim3 dimBlock=dim3(BLOCKSIZE,1,1);
dim3 dimGrid=dim3(PAD_DIV(elength,dimBlock.x),1,1);
#ifdef AFTER_KERNEL5
cudaBindTexture(0,tex_matElemlist,meshGPU.m_matElemlist,mesh.numNode()*sizeof(int));
cudaBindTexture(0,tex_elemBC,meshGPU.m_elemBC,mesh.numNode()*sizeof(int));
cudaBindTexture(0,tex_lxim,meshGPU.m_lxim,mesh.numNode()*sizeof(int));
cudaBindTexture(0,tex_lxip,meshGPU.m_lxip,mesh.numNode()*sizeof(int));
cudaBindTexture(0,tex_letam,meshGPU.m_letam,mesh.numNode()*sizeof(int));
cudaBindTexture(0,tex_letap,meshGPU.m_letap,mesh.numNode()*sizeof(int));
cudaBindTexture(0,tex_lzetam,meshGPU.m_lzetam,mesh.numNode()*sizeof(int));
cudaBindTexture(0,tex_lzetap,meshGPU.m_lzetap,mesh.numNode()*sizeof(int));
#endif
#ifdef AFTER_KERNEL5_2
cudaBindTexture(0,tex_delv_xi,meshGPU.m_delv_xi,mesh.numElem()*sizeof(int));
cudaBindTexture(0,tex_delv_eta,meshGPU.m_delv_eta,mesh.numElem()*sizeof(int));
cudaBindTexture(0,tex_delv_zeta,meshGPU.m_delv_zeta,mesh.numElem()*sizeof(int));
#endif
#ifdef DP_PROFILING_KERNEL5
printf("Kernel5: dimGrid: %d %d %d. dimBlock: %d %d %d\n",
dimGrid.x, dimGrid.y, dimGrid.z, dimBlock.x, dimBlock.y, dimBlock.z);
printf("Kernel5: numNode=%d, numElem=%d\n", mesh.numNode(), mesh.numElem());
CalcMonotonicQRegionForElems_kernel<<<dimGrid,dimBlock>>>
(qlc_monoq,qqc_monoq,monoq_limiter_mult,monoq_max_slope,ptiny,elength,
meshGPU.m_matElemlist,meshGPU.m_elemBC,
meshGPU.m_lxim,meshGPU.m_lxip,
meshGPU.m_letam,meshGPU.m_letap,
meshGPU.m_lzetam,meshGPU.m_lzetap,
meshGPU.m_delv_xi,meshGPU.m_delv_eta,meshGPU.m_delv_zeta,
meshGPU.m_delx_xi,meshGPU.m_delx_eta,meshGPU.m_delx_zeta,
meshGPU.m_vdov,meshGPU.m_elemMass,meshGPU.m_volo,meshGPU.m_vnew,
meshGPU.m_qq,meshGPU.m_ql, its);
cudaPrintfDisplay(stdout, false);
#else
cudaEvent_t k5_start, k5_stop;
cudaEventCreate(&k5_start);
cudaEventCreate(&k5_stop);
cudaEventRecord(k5_start,0);
CalcMonotonicQRegionForElems_kernel<<<dimGrid,dimBlock>>>
(qlc_monoq,qqc_monoq,monoq_limiter_mult,monoq_max_slope,ptiny,elength,
meshGPU.m_matElemlist,meshGPU.m_elemBC,
meshGPU.m_lxim,meshGPU.m_lxip,
meshGPU.m_letam,meshGPU.m_letap,
meshGPU.m_lzetam,meshGPU.m_lzetap,
meshGPU.m_delv_xi,meshGPU.m_delv_eta,meshGPU.m_delv_zeta,
meshGPU.m_delx_xi,meshGPU.m_delx_eta,meshGPU.m_delx_zeta,
meshGPU.m_vdov,meshGPU.m_elemMass,meshGPU.m_volo,meshGPU.m_vnew,
meshGPU.m_qq,meshGPU.m_ql);
cudaDeviceSynchronize();
cudaEventRecord(k5_stop, 0);
cudaEventSynchronize(k5_stop);
float k5_time=0.0;
cudaEventElapsedTime(&k5_time, k5_start, k5_stop);
printf("Kernel 5 time = %f\n",k5_time);
k5+=k5_time;
#endif
CUDA_DEBUGSYNC;
}
static inline
void CalcMonotonicQRegionForElems_cpu(// parameters
Real_t qlc_monoq,
Real_t qqc_monoq,
Real_t monoq_limiter_mult,
Real_t monoq_max_slope,
Real_t ptiny,
// the elementset length
Index_t elength )
{
for ( Index_t ielem = 0 ; ielem < elength; ++ielem ) {
Real_t qlin, qquad ;
Real_t phixi, phieta, phizeta ;
Index_t i = mesh.matElemlist(ielem);
Int_t bcMask = mesh.elemBC(i) ;
Real_t delvm, delvp ;
/* phixi */
Real_t norm = Real_t(1.) / ( mesh.delv_xi(i) + ptiny ) ;
switch (bcMask & XI_M) {
case 0: delvm = mesh.delv_xi(mesh.lxim(i)) ; break ;
case XI_M_SYMM: delvm = mesh.delv_xi(i) ; break ;
case XI_M_FREE: delvm = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
switch (bcMask & XI_P) {
case 0: delvp = mesh.delv_xi(mesh.lxip(i)) ; break ;
case XI_P_SYMM: delvp = mesh.delv_xi(i) ; break ;
case XI_P_FREE: delvp = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
delvm = delvm * norm ;
delvp = delvp * norm ;
phixi = Real_t(.5) * ( delvm + delvp ) ;
delvm *= monoq_limiter_mult ;
delvp *= monoq_limiter_mult ;
if ( delvm < phixi ) phixi = delvm ;
if ( delvp < phixi ) phixi = delvp ;
if ( phixi < Real_t(0.)) phixi = Real_t(0.) ;
if ( phixi > monoq_max_slope) phixi = monoq_max_slope;
/* phieta */
norm = Real_t(1.) / ( mesh.delv_eta(i) + ptiny ) ;
switch (bcMask & ETA_M) {
case 0: delvm = mesh.delv_eta(mesh.letam(i)) ; break ;
case ETA_M_SYMM: delvm = mesh.delv_eta(i) ; break ;
case ETA_M_FREE: delvm = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
switch (bcMask & ETA_P) {
case 0: delvp = mesh.delv_eta(mesh.letap(i)) ; break ;
case ETA_P_SYMM: delvp = mesh.delv_eta(i) ; break ;
case ETA_P_FREE: delvp = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
delvm = delvm * norm ;
delvp = delvp * norm ;
phieta = Real_t(.5) * ( delvm + delvp ) ;
delvm *= monoq_limiter_mult ;
delvp *= monoq_limiter_mult ;
if ( delvm < phieta ) phieta = delvm ;
if ( delvp < phieta ) phieta = delvp ;
if ( phieta < Real_t(0.)) phieta = Real_t(0.) ;
if ( phieta > monoq_max_slope) phieta = monoq_max_slope;
/* phizeta */
norm = Real_t(1.) / ( mesh.delv_zeta(i) + ptiny ) ;
switch (bcMask & ZETA_M) {
case 0: delvm = mesh.delv_zeta(mesh.lzetam(i)) ; break ;
case ZETA_M_SYMM: delvm = mesh.delv_zeta(i) ; break ;
case ZETA_M_FREE: delvm = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
switch (bcMask & ZETA_P) {
case 0: delvp = mesh.delv_zeta(mesh.lzetap(i)) ; break ;
case ZETA_P_SYMM: delvp = mesh.delv_zeta(i) ; break ;
case ZETA_P_FREE: delvp = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
delvm = delvm * norm ;
delvp = delvp * norm ;
phizeta = Real_t(.5) * ( delvm + delvp ) ;
delvm *= monoq_limiter_mult ;
delvp *= monoq_limiter_mult ;
if ( delvm < phizeta ) phizeta = delvm ;
if ( delvp < phizeta ) phizeta = delvp ;
if ( phizeta < Real_t(0.)) phizeta = Real_t(0.);
if ( phizeta > monoq_max_slope ) phizeta = monoq_max_slope;
/* Remove length scale */
if ( mesh.vdov(i) > Real_t(0.) ) {
qlin = Real_t(0.) ;
qquad = Real_t(0.) ;
}
else {
Real_t delvxxi = mesh.delv_xi(i) * mesh.delx_xi(i) ;
Real_t delvxeta = mesh.delv_eta(i) * mesh.delx_eta(i) ;
Real_t delvxzeta = mesh.delv_zeta(i) * mesh.delx_zeta(i) ;
if ( delvxxi > Real_t(0.) ) delvxxi = Real_t(0.) ;
if ( delvxeta > Real_t(0.) ) delvxeta = Real_t(0.) ;
if ( delvxzeta > Real_t(0.) ) delvxzeta = Real_t(0.) ;
Real_t rho = mesh.elemMass(i) / (mesh.volo(i) * mesh.vnew(i)) ;
qlin = -qlc_monoq * rho *
( delvxxi * (Real_t(1.) - phixi) +
delvxeta * (Real_t(1.) - phieta) +
delvxzeta * (Real_t(1.) - phizeta) ) ;
qquad = qqc_monoq * rho *
( delvxxi*delvxxi * (Real_t(1.) - phixi*phixi) +
delvxeta*delvxeta * (Real_t(1.) - phieta*phieta) +
delvxzeta*delvxzeta * (Real_t(1.) - phizeta*phizeta) ) ;
}
mesh.qq(i) = qquad ;
mesh.ql(i) = qlin ;
}
}
static inline
void CalcMonotonicQRegionForElems(// parameters
Real_t qlc_monoq,
Real_t qqc_monoq,
Real_t monoq_limiter_mult,
Real_t monoq_max_slope,
Real_t ptiny,
// the elementset length
Index_t elength,
int useCPU)
{
if (useCPU) {
FC(matElemlist); FC(elemBC); FC(lxim); FC(lxip); FC(letam); FC(letap); FC(lzetam); FC(lzetap);
FC(delv_xi); FC(delv_eta); FC(delv_zeta); FC(delx_xi); FC(delx_eta); FC(delx_zeta);
FC(vdov); FC(elemMass); FC(volo); FC(vnew);
CalcMonotonicQRegionForElems_cpu(qlc_monoq,qqc_monoq,
monoq_limiter_mult,monoq_max_slope,ptiny,
elength);
SG(qq); SG(ql);
}
else {
FG(matElemlist); FG(elemBC); FG(lxim); FG(lxip); FG(letam); FG(letap); FG(lzetam); FG(lzetap);
FG(delv_xi); FG(delv_eta); FG(delv_zeta); FG(delx_xi); FG(delx_eta); FG(delx_zeta);
FG(vdov); FG(elemMass); FG(volo); FG(vnew);
CalcMonotonicQRegionForElems_gpu(qlc_monoq,qqc_monoq,
monoq_limiter_mult,monoq_max_slope,ptiny,
elength);
SC(qq); SC(ql);
}
}
static inline
void CalcMonotonicQForElems(int useCPU)
{
//
// initialize parameters
//
const Real_t ptiny = Real_t(1.e-36) ;
Real_t monoq_max_slope = mesh.monoq_max_slope() ;
Real_t monoq_limiter_mult = mesh.monoq_limiter_mult() ;
//
// calculate the monotonic q for pure regions
//
Index_t elength = mesh.numElem() ;
if (elength > 0) {
Real_t qlc_monoq = mesh.qlc_monoq();
Real_t qqc_monoq = mesh.qqc_monoq();
CalcMonotonicQRegionForElems(// parameters
qlc_monoq,
qqc_monoq,
monoq_limiter_mult,
monoq_max_slope,
ptiny,
// the elemset length
elength,
useCPU);
}
}
static inline
void CalcQForElems(int useCPU)
{
Real_t qstop = mesh.qstop() ;
Index_t numElem = mesh.numElem() ;
//
// MONOTONIC Q option
//
/* Calculate velocity gradients */
CalcMonotonicQGradientsForElems(useCPU) ;
/* Transfer veloctiy gradients in the first order elements */
/* problem->commElements->Transfer(CommElements::monoQ) ; */
CalcMonotonicQForElems(useCPU) ;
/* Don't allow excessive artificial viscosity */
/*
if (numElem != 0) {
Index_t idx = -1;
for (Index_t i=0; i<numElem; ++i) {
if ( mesh.q(i) > qstop ) {
idx = i ;
break ;
}
}
if(idx >= 0) {
exit(QStopError) ;
}
}
*/
}
__global__
void CalcPressureForElems_kernel(Real_t* p_new, Real_t* bvc,
Real_t* pbvc, Real_t* e_old,
Real_t* compression, Real_t *vnewc,
Real_t pmin,
Real_t p_cut, Real_t eosvmax,
Index_t length, Real_t c1s)
{
int i=blockDim.x*blockIdx.x + threadIdx.x;
if (i<length) {
bvc[i] = c1s * (compression[i] + Real_t(1.));
pbvc[i] = c1s;
p_new[i] = bvc[i] * e_old[i] ;
if (FABS(p_new[i]) < p_cut )
p_new[i] = Real_t(0.0) ;
if ( vnewc[i] >= eosvmax ) /* impossible condition here? */
p_new[i] = Real_t(0.0) ;
if (p_new[i] < pmin)
p_new[i] = pmin ;
}
}
static inline
void CalcPressureForElems_gpu(Real_t* p_new, Real_t* bvc,
Real_t* pbvc, Real_t* e_old,
Real_t* compression, Real_t *vnewc,
Real_t pmin,
Real_t p_cut, Real_t eosvmax,
Index_t length)
{
Real_t c1s = Real_t(2.0)/Real_t(3.0) ;
dim3 dimBlock=dim3(BLOCKSIZE,1,1);
dim3 dimGrid=dim3(PAD_DIV(length,dimBlock.x),1,1);
CalcPressureForElems_kernel<<<dimGrid,dimBlock>>>
(p_new,bvc,pbvc,e_old,compression,vnewc,pmin,p_cut,eosvmax,length,c1s);
CUDA_DEBUGSYNC;
}
static inline
void CalcPressureForElems_cpu(Real_t* p_new, Real_t* bvc,
Real_t* pbvc, Real_t* e_old,
Real_t* compression, Real_t *vnewc,
Real_t pmin,
Real_t p_cut, Real_t eosvmax,
Index_t length)
{
Real_t c1s = Real_t(2.0)/Real_t(3.0) ;
for (Index_t i = 0; i < length ; ++i) {
bvc[i] = c1s * (compression[i] + Real_t(1.));
pbvc[i] = c1s;
}
for (Index_t i = 0 ; i < length ; ++i){
p_new[i] = bvc[i] * e_old[i] ;
if (FABS(p_new[i]) < p_cut )
p_new[i] = Real_t(0.0) ;
if ( vnewc[i] >= eosvmax ) /* impossible condition here? */
p_new[i] = Real_t(0.0) ;
if (p_new[i] < pmin)
p_new[i] = pmin ;
}
}
__global__
void CalcEnergyForElemsPart1_kernel(
Index_t length,Real_t emin,
Real_t *e_old,Real_t *delvc,Real_t *p_old,Real_t *q_old,Real_t *work,
Real_t *e_new)
{
int i=blockDim.x*blockIdx.x + threadIdx.x;
if (i<length) {
e_new[i] = e_old[i] - Real_t(0.5) * delvc[i] * (p_old[i] + q_old[i])
+ Real_t(0.5) * work[i];
if (e_new[i] < emin ) {
e_new[i] = emin ;
}
}
}
__global__
void CalcEnergyForElemsPart2_kernel(
Index_t length,Real_t rho0,Real_t e_cut,Real_t emin,
Real_t *compHalfStep,Real_t *delvc,Real_t *pbvc,Real_t *bvc,
Real_t *pHalfStep,Real_t *ql,Real_t *qq,Real_t *p_old,Real_t *q_old,Real_t *work,
Real_t *e_new,
Real_t *q_new
)
{
int i=blockDim.x*blockIdx.x + threadIdx.x;
if (i<length) {
Real_t vhalf = Real_t(1.) / (Real_t(1.) + compHalfStep[i]) ;
if ( delvc[i] > Real_t(0.) ) {
q_new[i] /* = qq[i] = ql[i] */ = Real_t(0.) ;
}
else {
Real_t ssc = ( pbvc[i] * e_new[i]
+ vhalf * vhalf * bvc[i] * pHalfStep[i] ) / rho0 ;
if ( ssc <= Real_t(0.) ) {
ssc =Real_t(.333333e-36) ;
} else {
ssc = SQRT(ssc) ;
}
q_new[i] = (ssc*ql[i] + qq[i]) ;
}
e_new[i] = e_new[i] + Real_t(0.5) * delvc[i]
* ( Real_t(3.0)*(p_old[i] + q_old[i])
- Real_t(4.0)*(pHalfStep[i] + q_new[i])) ;
e_new[i] += Real_t(0.5) * work[i];
if (FABS(e_new[i]) < e_cut) {
e_new[i] = Real_t(0.) ;
}
if ( e_new[i] < emin ) {
e_new[i] = emin ;
}
}
}
__global__
void CalcEnergyForElemsPart3_kernel(
Index_t length,Real_t rho0,Real_t sixth,Real_t e_cut,Real_t emin,
Real_t *pbvc,Real_t *vnewc,Real_t *bvc,Real_t *p_new,Real_t *ql,Real_t *qq,
Real_t *p_old,Real_t *q_old,Real_t *pHalfStep,Real_t *q_new,Real_t *delvc,
Real_t *e_new)
{
int i=blockDim.x*blockIdx.x + threadIdx.x;
if (i<length) {
Real_t q_tilde ;
if (delvc[i] > Real_t(0.)) {
q_tilde = Real_t(0.) ;
}
else {
Real_t ssc = ( pbvc[i] * e_new[i]
+ vnewc[i] * vnewc[i] * bvc[i] * p_new[i] ) / rho0 ;
if ( ssc <= Real_t(0.) ) {
ssc = Real_t(.333333e-36) ;
} else {
ssc = SQRT(ssc) ;
}
q_tilde = (ssc*ql[i] + qq[i]) ;
}
e_new[i] = e_new[i] - ( Real_t(7.0)*(p_old[i] + q_old[i])
- Real_t(8.0)*(pHalfStep[i] + q_new[i])
+ (p_new[i] + q_tilde)) * delvc[i]*sixth ;
if (FABS(e_new[i]) < e_cut) {
e_new[i] = Real_t(0.) ;
}
if ( e_new[i] < emin ) {
e_new[i] = emin ;
}
}
}
__global__
void CalcEnergyForElemsPart4_kernel(
Index_t length,Real_t rho0,Real_t q_cut,
Real_t *delvc,Real_t *pbvc,Real_t *e_new,Real_t *vnewc,Real_t *bvc,
Real_t *p_new,Real_t *ql,Real_t *qq,
Real_t *q_new)
{
int i=blockDim.x*blockIdx.x + threadIdx.x;
if (i<length) {
if ( delvc[i] <= Real_t(0.) ) {
Real_t ssc = ( pbvc[i] * e_new[i]
+ vnewc[i] * vnewc[i] * bvc[i] * p_new[i] ) / rho0 ;
if ( ssc <= Real_t(0.) ) {
ssc = Real_t(.333333e-36) ;
} else {
ssc = SQRT(ssc) ;
}
q_new[i] = (ssc*ql[i] + qq[i]) ;
if (FABS(q_new[i]) < q_cut) q_new[i] = Real_t(0.) ;
}
}
}
static inline
void CalcEnergyForElems_gpu(Real_t* p_new, Real_t* e_new, Real_t* q_new,
Real_t* bvc, Real_t* pbvc,
Real_t* p_old, Real_t* e_old, Real_t* q_old,
Real_t* compression, Real_t* compHalfStep,
Real_t* vnewc, Real_t* work, Real_t* delvc, Real_t pmin,
Real_t p_cut, Real_t e_cut, Real_t q_cut, Real_t emin,
Real_t* qq, Real_t* ql,
Real_t rho0,
Real_t eosvmax,
Index_t length)
{
const Real_t sixth = Real_t(1.0) / Real_t(6.0) ;
Real_t *pHalfStep;
dim3 dimBlock=dim3(BLOCKSIZE,1,1);
dim3 dimGrid=dim3(PAD_DIV(length,dimBlock.x),1,1);
CUDA( cudaMalloc(&pHalfStep,sizeof(Real_t)*length) );
CalcEnergyForElemsPart1_kernel<<<dimGrid,dimBlock>>>
(length,emin,e_old,delvc,p_old,q_old,work,e_new);
CUDA_DEBUGSYNC;
CalcPressureForElems_gpu(pHalfStep, bvc, pbvc, e_new, compHalfStep, vnewc,
pmin, p_cut, eosvmax, length);
CalcEnergyForElemsPart2_kernel<<<dimGrid,dimBlock>>>
(length,rho0,e_cut,emin,
compHalfStep,delvc,pbvc,bvc,pHalfStep,ql,qq,p_old,q_old,work,
e_new,
q_new);
CUDA_DEBUGSYNC;
CalcPressureForElems_gpu(p_new, bvc, pbvc, e_new, compression, vnewc,
pmin, p_cut, eosvmax, length);
CalcEnergyForElemsPart3_kernel<<<dimGrid,dimBlock>>>
(length,rho0,sixth,e_cut,emin,
pbvc,vnewc,bvc,p_new,ql,qq,
p_old,q_old,pHalfStep,q_new,delvc,
e_new);
CUDA_DEBUGSYNC;
CalcPressureForElems_gpu(p_new, bvc, pbvc, e_new, compression, vnewc,
pmin, p_cut, eosvmax, length);
CalcEnergyForElemsPart4_kernel<<<dimGrid,dimBlock>>>
(length,rho0,q_cut,
delvc,pbvc,e_new,vnewc,bvc,
p_new,ql,qq,
q_new);
CUDA_DEBUGSYNC;
CUDA( cudaFree(pHalfStep) );
return ;
}
static inline
void CalcEnergyForElems_cpu(Real_t* p_new, Real_t* e_new, Real_t* q_new,
Real_t* bvc, Real_t* pbvc,
Real_t* p_old, Real_t* e_old, Real_t* q_old,
Real_t* compression, Real_t* compHalfStep,
Real_t* vnewc, Real_t* work, Real_t* delvc, Real_t pmin,
Real_t p_cut, Real_t e_cut, Real_t q_cut, Real_t emin,
Real_t* qq, Real_t* ql,
Real_t rho0,
Real_t eosvmax,
Index_t length)
{
const Real_t sixth = Real_t(1.0) / Real_t(6.0) ;
Real_t *pHalfStep = Allocate<Real_t>(length) ;
for (Index_t i = 0 ; i < length ; ++i) {
e_new[i] = e_old[i] - Real_t(0.5) * delvc[i] * (p_old[i] + q_old[i])
+ Real_t(0.5) * work[i];
if (e_new[i] < emin ) {
e_new[i] = emin ;
}
}
CalcPressureForElems_cpu(pHalfStep, bvc, pbvc, e_new, compHalfStep, vnewc,
pmin, p_cut, eosvmax, length);
for (Index_t i = 0 ; i < length ; ++i) {
Real_t vhalf = Real_t(1.) / (Real_t(1.) + compHalfStep[i]) ;
if ( delvc[i] > Real_t(0.) ) {
q_new[i] /* = qq[i] = ql[i] */ = Real_t(0.) ;
}
else {
Real_t ssc = ( pbvc[i] * e_new[i]
+ vhalf * vhalf * bvc[i] * pHalfStep[i] ) / rho0 ;
if ( ssc <= Real_t(0.) ) {
ssc =Real_t(.333333e-36) ;
} else {
ssc = SQRT(ssc) ;
}
q_new[i] = (ssc*ql[i] + qq[i]) ;
}
e_new[i] = e_new[i] + Real_t(0.5) * delvc[i]
* ( Real_t(3.0)*(p_old[i] + q_old[i])
- Real_t(4.0)*(pHalfStep[i] + q_new[i])) ;
}
for (Index_t i = 0 ; i < length ; ++i) {
e_new[i] += Real_t(0.5) * work[i];
if (FABS(e_new[i]) < e_cut) {
e_new[i] = Real_t(0.) ;
}
if ( e_new[i] < emin ) {
e_new[i] = emin ;
}
}
CalcPressureForElems_cpu(p_new, bvc, pbvc, e_new, compression, vnewc,
pmin, p_cut, eosvmax, length);
for (Index_t i = 0 ; i < length ; ++i){
Real_t q_tilde ;
if (delvc[i] > Real_t(0.)) {
q_tilde = Real_t(0.) ;
}
else {
Real_t ssc = ( pbvc[i] * e_new[i]
+ vnewc[i] * vnewc[i] * bvc[i] * p_new[i] ) / rho0 ;
if ( ssc <= Real_t(0.) ) {
ssc = Real_t(.333333e-36) ;
} else {
ssc = SQRT(ssc) ;
}
q_tilde = (ssc*ql[i] + qq[i]) ;
}
e_new[i] = e_new[i] - ( Real_t(7.0)*(p_old[i] + q_old[i])
- Real_t(8.0)*(pHalfStep[i] + q_new[i])
+ (p_new[i] + q_tilde)) * delvc[i]*sixth ;
if (FABS(e_new[i]) < e_cut) {
e_new[i] = Real_t(0.) ;
}
if ( e_new[i] < emin ) {
e_new[i] = emin ;
}
}
CalcPressureForElems_cpu(p_new, bvc, pbvc, e_new, compression, vnewc,
pmin, p_cut, eosvmax, length);
for (Index_t i = 0 ; i < length ; ++i){
if ( delvc[i] <= Real_t(0.) ) {
Real_t ssc = ( pbvc[i] * e_new[i]
+ vnewc[i] * vnewc[i] * bvc[i] * p_new[i] ) / rho0 ;
if ( ssc <= Real_t(0.) ) {
ssc = Real_t(.333333e-36) ;
} else {
ssc = SQRT(ssc) ;
}
q_new[i] = (ssc*ql[i] + qq[i]) ;
if (FABS(q_new[i]) < q_cut) q_new[i] = Real_t(0.) ;
}
}
Release(&pHalfStep) ;
return ;
}
__global__
void CalcSoundSpeedForElems_kernel(Real_t *vnewc, Real_t rho0, Real_t *enewc,
Real_t *pnewc, Real_t *pbvc,
Real_t *bvc, Real_t ss4o3, Index_t nz,Index_t *matElemlist,
Real_t *ss)
{
int i=blockDim.x*blockIdx.x + threadIdx.x;
if (i<nz) {
Index_t iz = matElemlist[i];
Real_t ssTmp = (pbvc[i] * enewc[i] + vnewc[i] * vnewc[i] *
bvc[i] * pnewc[i]) / rho0;
if (ssTmp <= Real_t(1.111111e-36)) {
ssTmp = Real_t(1.111111e-36);
}
ss[iz] = SQRT(ssTmp);
}
}
static inline
void CalcSoundSpeedForElems_gpu(Real_t *vnewc, Real_t rho0, Real_t *enewc,
Real_t *pnewc, Real_t *pbvc,
Real_t *bvc, Real_t ss4o3, Index_t nz)
{
dim3 dimBlock=dim3(BLOCKSIZE,1,1);
dim3 dimGrid=dim3(PAD_DIV(nz,dimBlock.x),1,1);
CalcSoundSpeedForElems_kernel<<<dimGrid,dimBlock>>>
(vnewc,rho0,enewc,pnewc,pbvc,bvc,ss4o3,nz,meshGPU.m_matElemlist,meshGPU.m_ss);
CUDA_DEBUGSYNC;
}
static inline
void CalcSoundSpeedForElems_cpu(Real_t *vnewc, Real_t rho0, Real_t *enewc,
Real_t *pnewc, Real_t *pbvc,
Real_t *bvc, Real_t ss4o3, Index_t nz)
{
for (Index_t i = 0; i < nz ; ++i) {
Index_t iz = mesh.matElemlist(i);
Real_t ssTmp = (pbvc[i] * enewc[i] + vnewc[i] * vnewc[i] *
bvc[i] * pnewc[i]) / rho0;
if (ssTmp <= Real_t(1.111111e-36)) {
ssTmp = Real_t(1.111111e-36);
}
mesh.ss(iz) = SQRT(ssTmp);
}
}
__global__
void EvalEOSForElemsPart1_kernel(
Index_t length,Real_t eosvmin,Real_t eosvmax,
Index_t *matElemlist,
Real_t *e,Real_t *delv,Real_t *p,Real_t *q,Real_t *qq,Real_t *ql,
Real_t *vnewc,
Real_t *e_old,Real_t *delvc,Real_t *p_old,Real_t *q_old,
Real_t *compression,Real_t *compHalfStep,
Real_t *qq_old,Real_t *ql_old,Real_t *work)
{
int i=blockDim.x*blockIdx.x + threadIdx.x;
if (i<length) {
Index_t zidx = matElemlist[i];
e_old[i] = e[zidx];
delvc[i] = delv[zidx];
p_old[i] = p[zidx];
q_old[i] = q[zidx];
Real_t vchalf ;
compression[i] = Real_t(1.) / vnewc[i] - Real_t(1.);
vchalf = vnewc[i] - delvc[i] * Real_t(.5);
compHalfStep[i] = Real_t(1.) / vchalf - Real_t(1.);
if ( eosvmin != Real_t(0.) ) {
if (vnewc[i] <= eosvmin) { /* impossible due to calling func? */
compHalfStep[i] = compression[i] ;
}
}
if ( eosvmax != Real_t(0.) ) {
if (vnewc[i] >= eosvmax) { /* impossible due to calling func? */
p_old[i] = Real_t(0.) ;
compression[i] = Real_t(0.) ;
compHalfStep[i] = Real_t(0.) ;
}
}
qq_old[i] = qq[zidx] ;
ql_old[i] = ql[zidx] ;
work[i] = Real_t(0.) ;
}
}
__global__
void EvalEOSForElemsPart2_kernel(
Index_t length,
Index_t *matElemlist,Real_t *p_new,Real_t *e_new,Real_t *q_new,
Real_t *p,Real_t *e,Real_t *q)
{
int i=blockDim.x*blockIdx.x + threadIdx.x;
if (i<length) {
Index_t zidx = matElemlist[i] ;
p[zidx] = p_new[i];
e[zidx] = e_new[i];
q[zidx] = q_new[i];
}
}
static inline
void EvalEOSForElems_gpu(Real_t *vnewc, Index_t length)
{
Real_t e_cut = mesh.e_cut();
Real_t p_cut = mesh.p_cut();
Real_t ss4o3 = mesh.ss4o3();
Real_t q_cut = mesh.q_cut();
Real_t eosvmax = mesh.eosvmax() ;
Real_t eosvmin = mesh.eosvmin() ;
Real_t pmin = mesh.pmin() ;
Real_t emin = mesh.emin() ;
Real_t rho0 = mesh.refdens() ;
Real_t *e_old,*delvc,*p_old,*q_old;
Real_t *compression,*compHalfStep;
Real_t *qq,*ql,*work,*p_new,*e_new,*q_new,*bvc,*pbvc;
CUDA( cudaMalloc(&e_old,sizeof(Real_t)*length) );
CUDA( cudaMalloc(&delvc,sizeof(Real_t)*length) );
CUDA( cudaMalloc(&p_old,sizeof(Real_t)*length) );
CUDA( cudaMalloc(&q_old,sizeof(Real_t)*length) );
CUDA( cudaMalloc(&compression,sizeof(Real_t)*length) );
CUDA( cudaMalloc(&compHalfStep,sizeof(Real_t)*length) );
CUDA( cudaMalloc(&qq,sizeof(Real_t)*length) );
CUDA( cudaMalloc(&ql,sizeof(Real_t)*length) );
CUDA( cudaMalloc(&work,sizeof(Real_t)*length) );
CUDA( cudaMalloc(&p_new,sizeof(Real_t)*length) );
CUDA( cudaMalloc(&e_new,sizeof(Real_t)*length) );
CUDA( cudaMalloc(&q_new,sizeof(Real_t)*length) );
CUDA( cudaMalloc(&bvc,sizeof(Real_t)*length) );
CUDA( cudaMalloc(&pbvc,sizeof(Real_t)*length) );
dim3 dimBlock=dim3(BLOCKSIZE,1,1);
dim3 dimGrid=dim3(PAD_DIV(length,dimBlock.x),1,1);
EvalEOSForElemsPart1_kernel<<<dimGrid,dimBlock>>>
(length,eosvmin,eosvmax,
meshGPU.m_matElemlist,
meshGPU.m_e,meshGPU.m_delv,meshGPU.m_p,meshGPU.m_q,meshGPU.m_qq,meshGPU.m_ql,
vnewc,
e_old,delvc,p_old,q_old,
compression,compHalfStep,qq,ql,work);
CUDA_DEBUGSYNC;
CalcEnergyForElems_gpu(p_new, e_new, q_new, bvc, pbvc,
p_old, e_old, q_old, compression, compHalfStep,
vnewc, work, delvc, pmin,
p_cut, e_cut, q_cut, emin,
qq, ql, rho0, eosvmax, length);
EvalEOSForElemsPart2_kernel<<<dimGrid,dimBlock>>>
(length,
meshGPU.m_matElemlist,p_new,e_new,q_new,
meshGPU.m_p,meshGPU.m_e,meshGPU.m_q);
CUDA_DEBUGSYNC;
CalcSoundSpeedForElems_gpu(vnewc, rho0, e_new, p_new,
pbvc, bvc, ss4o3, length) ;
CUDA( cudaFree(pbvc) );
CUDA( cudaFree(bvc) );
CUDA( cudaFree(q_new) );
CUDA( cudaFree(e_new) );
CUDA( cudaFree(p_new) );
CUDA( cudaFree(work) );
CUDA( cudaFree(ql) );
CUDA( cudaFree(qq) );
CUDA( cudaFree(compHalfStep) );
CUDA( cudaFree(compression) );
CUDA( cudaFree(q_old) );
CUDA( cudaFree(p_old) );
CUDA( cudaFree(delvc) );
CUDA( cudaFree(e_old) );
}
static inline
void EvalEOSForElems_cpu(Real_t *vnewc, Index_t length)
{
Real_t e_cut = mesh.e_cut();
Real_t p_cut = mesh.p_cut();
Real_t ss4o3 = mesh.ss4o3();
Real_t q_cut = mesh.q_cut();
Real_t eosvmax = mesh.eosvmax() ;
Real_t eosvmin = mesh.eosvmin() ;
Real_t pmin = mesh.pmin() ;
Real_t emin = mesh.emin() ;
Real_t rho0 = mesh.refdens() ;
Real_t *e_old = Allocate<Real_t>(length) ;
Real_t *delvc = Allocate<Real_t>(length) ;
Real_t *p_old = Allocate<Real_t>(length) ;
Real_t *q_old = Allocate<Real_t>(length) ;
Real_t *compression = Allocate<Real_t>(length) ;
Real_t *compHalfStep = Allocate<Real_t>(length) ;
Real_t *qq = Allocate<Real_t>(length) ;
Real_t *ql = Allocate<Real_t>(length) ;
Real_t *work = Allocate<Real_t>(length) ;
Real_t *p_new = Allocate<Real_t>(length) ;
Real_t *e_new = Allocate<Real_t>(length) ;
Real_t *q_new = Allocate<Real_t>(length) ;
Real_t *bvc = Allocate<Real_t>(length) ;
Real_t *pbvc = Allocate<Real_t>(length) ;
/* compress data, minimal set */
for (Index_t i=0; i<length; ++i) {
Index_t zidx = mesh.matElemlist(i) ;
e_old[i] = mesh.e(zidx) ;
}
for (Index_t i=0; i<length; ++i) {
Index_t zidx = mesh.matElemlist(i) ;
delvc[i] = mesh.delv(zidx) ;
}
for (Index_t i=0; i<length; ++i) {
Index_t zidx = mesh.matElemlist(i) ;
p_old[i] = mesh.p(zidx) ;
}
for (Index_t i=0; i<length; ++i) {
Index_t zidx = mesh.matElemlist(i) ;
q_old[i] = mesh.q(zidx) ;
}
for (Index_t i = 0; i < length ; ++i) {
Real_t vchalf ;
compression[i] = Real_t(1.) / vnewc[i] - Real_t(1.);
vchalf = vnewc[i] - delvc[i] * Real_t(.5);
compHalfStep[i] = Real_t(1.) / vchalf - Real_t(1.);
}
/* Check for v > eosvmax or v < eosvmin */
if ( eosvmin != Real_t(0.) ) {
for(Index_t i=0 ; i<length ; ++i) {
if (vnewc[i] <= eosvmin) { /* impossible due to calling func? */
compHalfStep[i] = compression[i] ;
}
}
}
if ( eosvmax != Real_t(0.) ) {
for(Index_t i=0 ; i<length ; ++i) {
if (vnewc[i] >= eosvmax) { /* impossible due to calling func? */
p_old[i] = Real_t(0.) ;
compression[i] = Real_t(0.) ;
compHalfStep[i] = Real_t(0.) ;
}
}
}
for (Index_t i = 0 ; i < length ; ++i) {
Index_t zidx = mesh.matElemlist(i) ;
qq[i] = mesh.qq(zidx) ;
ql[i] = mesh.ql(zidx) ;
work[i] = Real_t(0.) ;
}
CalcEnergyForElems_cpu(p_new, e_new, q_new, bvc, pbvc,
p_old, e_old, q_old, compression, compHalfStep,
vnewc, work, delvc, pmin,
p_cut, e_cut, q_cut, emin,
qq, ql, rho0, eosvmax, length);
for (Index_t i=0; i<length; ++i) {
Index_t zidx = mesh.matElemlist(i) ;
mesh.p(zidx) = p_new[i] ;
}
for (Index_t i=0; i<length; ++i) {
Index_t zidx = mesh.matElemlist(i) ;
mesh.e(zidx) = e_new[i] ;
}
for (Index_t i=0; i<length; ++i) {
Index_t zidx = mesh.matElemlist(i) ;
mesh.q(zidx) = q_new[i] ;
}
CalcSoundSpeedForElems_cpu(vnewc, rho0, e_new, p_new,
pbvc, bvc, ss4o3, length) ;
Release(&pbvc) ;
Release(&bvc) ;
Release(&q_new) ;
Release(&e_new) ;
Release(&p_new) ;
Release(&work) ;
Release(&ql) ;
Release(&qq) ;
Release(&compHalfStep) ;
Release(&compression) ;
Release(&q_old) ;
Release(&p_old) ;
Release(&delvc) ;
Release(&e_old) ;
}
__global__
void ApplyMaterialPropertiesForElemsPart1_kernel(
Index_t length,Real_t eosvmin,Real_t eosvmax,
Index_t *matElemlist,Real_t *vnew,
Real_t *vnewc)
{
int i=blockDim.x*blockIdx.x + threadIdx.x;
if (i<length) {
Index_t zn = matElemlist[i] ;
vnewc[i] = vnew[zn] ;
if (eosvmin != Real_t(0.)) {
if (vnewc[i] < eosvmin)
vnewc[i] = eosvmin ;
}
if (eosvmax != Real_t(0.)) {
if (vnewc[i] > eosvmax)
vnewc[i] = eosvmax ;
}
}
}
static inline
void ApplyMaterialPropertiesForElems_gpu()
{
Index_t length = mesh.numElem() ;
if (length != 0) {
/* Expose all of the variables needed for material evaluation */
Real_t eosvmin = mesh.eosvmin() ;
Real_t eosvmax = mesh.eosvmax() ;
Real_t *vnewc;
CUDA( cudaMalloc(&vnewc,sizeof(Real_t)*length) );
dim3 dimBlock=dim3(BLOCKSIZE,1,1);
dim3 dimGrid=dim3(PAD_DIV(length,dimBlock.x),1,1);
ApplyMaterialPropertiesForElemsPart1_kernel<<<dimGrid,dimBlock>>>
(length,eosvmin,eosvmax,
meshGPU.m_matElemlist,meshGPU.m_vnew,
vnewc);
CUDA_DEBUGSYNC;
/*
for (Index_t i=0; i<length; ++i) {
Index_t zn = mesh.matElemlist(i) ;
Real_t vc = mesh.v(zn) ;
if (eosvmin != Real_t(0.)) {
if (vc < eosvmin)
vc = eosvmin ;
}
if (eosvmax != Real_t(0.)) {
if (vc > eosvmax)
vc = eosvmax ;
}
if (vc <= 0.) {
exit(VolumeError) ;
}
}
*/
EvalEOSForElems_gpu(vnewc, length);
CUDA( cudaFree(vnewc) );
}
}
static inline
void ApplyMaterialPropertiesForElems_cpu()
{
Index_t length = mesh.numElem() ;
if (length != 0) {
/* Expose all of the variables needed for material evaluation */
Real_t eosvmin = mesh.eosvmin() ;
Real_t eosvmax = mesh.eosvmax() ;
Real_t *vnewc = Allocate<Real_t>(length) ;
for (Index_t i=0 ; i<length ; ++i) {
Index_t zn = mesh.matElemlist(i) ;
vnewc[i] = mesh.vnew(zn) ;
}
if (eosvmin != Real_t(0.)) {
for(Index_t i=0 ; i<length ; ++i) {
if (vnewc[i] < eosvmin)
vnewc[i] = eosvmin ;
}
}
if (eosvmax != Real_t(0.)) {
for(Index_t i=0 ; i<length ; ++i) {
if (vnewc[i] > eosvmax)
vnewc[i] = eosvmax ;
}
}
for (Index_t i=0; i<length; ++i) {
Index_t zn = mesh.matElemlist(i) ;
Real_t vc = mesh.v(zn) ;
if (eosvmin != Real_t(0.)) {
if (vc < eosvmin)
vc = eosvmin ;
}
if (eosvmax != Real_t(0.)) {
if (vc > eosvmax)
vc = eosvmax ;
}
if (vc <= 0.) {
exit(VolumeError) ;
}
}
EvalEOSForElems_cpu(vnewc, length);
Release(&vnewc) ;
}
}
static inline
void ApplyMaterialPropertiesForElems(int useCPU)
{
if (useCPU) {
FC(matElemlist); FC(vnew); FC(v); FC(e); FC(delv); FC(p); FC(q); FC(qq); FC(ql);
ApplyMaterialPropertiesForElems_cpu();
SG(p); SG(e); SG(q); SG(ss);
}
else {
FG(matElemlist); FG(vnew); FG(v); FG(e); FG(delv); FG(p); FG(q); FG(qq); FG(ql);
ApplyMaterialPropertiesForElems_gpu();
SC(p); SC(e); SC(q); SC(ss);
}
}
__global__
void UpdateVolumesForElems_kernel(Index_t numElem,Real_t v_cut,
Real_t *vnew,
Real_t *v)
{
int i=blockDim.x*blockIdx.x + threadIdx.x;
if (i<numElem) {
Real_t tmpV ;
tmpV = vnew[i] ;
if ( FABS(tmpV - Real_t(1.0)) < v_cut )
tmpV = Real_t(1.0) ;
v[i] = tmpV ;
}
}
static inline
void UpdateVolumesForElems_gpu()
{
Index_t numElem = mesh.numElem();
if (numElem != 0) {
Real_t v_cut = mesh.v_cut();
dim3 dimBlock=dim3(BLOCKSIZE,1,1);
dim3 dimGrid=dim3(PAD_DIV(numElem,dimBlock.x),1,1);
UpdateVolumesForElems_kernel<<<dimGrid,dimBlock>>>
(numElem,v_cut,meshGPU.m_vnew,meshGPU.m_v);
}
}
static inline
void UpdateVolumesForElems_cpu()
{
Index_t numElem = mesh.numElem();
if (numElem != 0) {
Real_t v_cut = mesh.v_cut();
for(Index_t i=0 ; i<numElem ; ++i) {
Real_t tmpV ;
tmpV = mesh.vnew(i) ;
if ( FABS(tmpV - Real_t(1.0)) < v_cut )
tmpV = Real_t(1.0) ;
mesh.v(i) = tmpV ;
}
}
return ;
}
static inline
void UpdateVolumesForElems(int useCPU)
{
if (useCPU) {
FC(vnew);
UpdateVolumesForElems_cpu();
SG(v);
}
else {
FG(vnew);
UpdateVolumesForElems_gpu();
SC(v);
}
}
static inline
void LagrangeElements(int useCPU)
{
const Real_t deltatime = mesh.deltatime() ;
CalcLagrangeElements(deltatime, useCPU) ;
/* Calculate Q. (Monotonic q option requires communication) */
CalcQForElems(useCPU) ;
ApplyMaterialPropertiesForElems(useCPU) ;
UpdateVolumesForElems(useCPU) ;
}
__global__
void CalcCourantConstraintForElems_kernel(
Index_t length,Real_t qqc2,
Index_t *matElemlist,Real_t *ss,Real_t *vdov,Real_t *arealg,
Real_t *mindtcourant)
{
__shared__ Real_t minArray[BLOCKSIZE];
int i=blockDim.x*blockIdx.x + threadIdx.x;
Real_t dtcourant = Real_t(1.0e+20) ;
if (i<length) {
Index_t indx = matElemlist[i] ;
Real_t dtf = ss[indx] * ss[indx] ;
if ( vdov[indx] < Real_t(0.) ) {
dtf = dtf
+ qqc2 * arealg[indx] * arealg[indx]
* vdov[indx] * vdov[indx] ;
}
dtf = SQRT(dtf) ;
dtf = arealg[indx] / dtf ;
/* determine minimum timestep with its corresponding elem */
if (vdov[indx] != Real_t(0.)) {
if ( dtf < dtcourant ) {
dtcourant = dtf ;
}
}
}
minArray[threadIdx.x]=dtcourant;
reduceMin<Real_t,BLOCKSIZE>(minArray,threadIdx.x);
if (threadIdx.x==0)
mindtcourant[blockIdx.x]=minArray[0];
}
static inline
void CalcCourantConstraintForElems_gpu()
{
Real_t qqc = mesh.qqc();
Real_t qqc2 = Real_t(64.0) * qqc * qqc ;
Index_t length = mesh.numElem() ;
dim3 dimBlock=dim3(BLOCKSIZE,1,1);
dim3 dimGrid=dim3(PAD_DIV(length,dimBlock.x),1,1);
Real_t *dev_mindtcourant;
CUDA( cudaMalloc(&dev_mindtcourant,sizeof(Real_t)*dimGrid.x) );
CalcCourantConstraintForElems_kernel<<<dimGrid,dimBlock>>>
(length,qqc2,
meshGPU.m_matElemlist,meshGPU.m_ss,meshGPU.m_vdov,meshGPU.m_arealg,
dev_mindtcourant);
CUDA_DEBUGSYNC;
Real_t *mindtcourant = (Real_t*)malloc(sizeof(Real_t)*dimGrid.x);
CUDA( cudaMemcpy(mindtcourant,dev_mindtcourant,sizeof(Real_t)*dimGrid.x,cudaMemcpyDeviceToHost) );
CUDA( cudaFree(dev_mindtcourant) );
// finish the MIN computation over the thread blocks
Real_t dtcourant;
dtcourant=mindtcourant[0];
for (int i=1; i<dimGrid.x; i++) {
MINEQ(dtcourant,mindtcourant[i]);
}
free(mindtcourant);
if (dtcourant < Real_t(1.0e+20))
mesh.dtcourant() = dtcourant ;
}
static inline
void CalcCourantConstraintForElems_cpu()
{
Real_t dtcourant = Real_t(1.0e+20) ;
Index_t courant_elem = -1 ;
Real_t qqc = mesh.qqc() ;
Index_t length = mesh.numElem() ;
Real_t qqc2 = Real_t(64.0) * qqc * qqc ;
for (Index_t i = 0 ; i < length ; ++i) {
Index_t indx = mesh.matElemlist(i) ;
Real_t dtf = mesh.ss(indx) * mesh.ss(indx) ;
if ( mesh.vdov(indx) < Real_t(0.) ) {
dtf = dtf
+ qqc2 * mesh.arealg(indx) * mesh.arealg(indx)
* mesh.vdov(indx) * mesh.vdov(indx) ;
}
dtf = SQRT(dtf) ;
dtf = mesh.arealg(indx) / dtf ;
/* determine minimum timestep with its corresponding elem */
if (mesh.vdov(indx) != Real_t(0.)) {
if ( dtf < dtcourant ) {
dtcourant = dtf ;
courant_elem = indx ;
}
}
}
/* Don't try to register a time constraint if none of the elements
* were active */
if (courant_elem != -1) {
mesh.dtcourant() = dtcourant ;
}
return ;
}
static inline
void CalcCourantConstraintForElems(int useCPU)
{
if (useCPU) {
FC(matElemlist); FC(ss); FC(vdov); FC(arealg);
CalcCourantConstraintForElems_cpu();
}
else {
FG(matElemlist); FG(ss); FG(vdov); FG(arealg);
CalcCourantConstraintForElems_gpu();
}
}
__global__
void CalcHydroConstraintForElems_kernel(
Index_t length,Real_t dvovmax,
Index_t *matElemlist,Real_t *vdov,
Real_t *mindthydro)
{
__shared__ Real_t minArray[BLOCKSIZE];
int i=blockDim.x*blockIdx.x + threadIdx.x;
Real_t dthydro = Real_t(1.0e+20) ;
if (i<length) {
Index_t indx = matElemlist[i] ;
if (vdov[indx] != Real_t(0.)) {
Real_t dtdvov = dvovmax / (FABS(vdov[indx])+Real_t(1.e-20)) ;
if ( dthydro > dtdvov ) {
dthydro = dtdvov ;
}
}
}
minArray[threadIdx.x]=dthydro;
reduceMin<Real_t,BLOCKSIZE>(minArray,threadIdx.x);
if (threadIdx.x==0)
mindthydro[blockIdx.x]=minArray[0];
}
static inline
void CalcHydroConstraintForElems_gpu()
{
Real_t dvovmax = mesh.dvovmax() ;
Index_t length = mesh.numElem() ;
dim3 dimBlock=dim3(BLOCKSIZE,1,1);
dim3 dimGrid=dim3(PAD_DIV(length,dimBlock.x),1,1);
Real_t *dev_mindthydro;
CUDA( cudaMalloc(&dev_mindthydro,sizeof(Real_t)*dimGrid.x) );
CalcHydroConstraintForElems_kernel<<<dimGrid,dimBlock>>>
(length,dvovmax,
meshGPU.m_matElemlist,meshGPU.m_vdov,
dev_mindthydro);
CUDA_DEBUGSYNC;
Real_t *mindthydro = (Real_t*)malloc(sizeof(Real_t)*dimGrid.x);
CUDA( cudaMemcpy(mindthydro,dev_mindthydro,sizeof(Real_t)*dimGrid.x,cudaMemcpyDeviceToHost) );
CUDA( cudaFree(dev_mindthydro) );
// finish the MIN computation over the thread blocks
Real_t dthydro=mindthydro[0];
for (int i=1; i<dimGrid.x; i++) {
MINEQ(dthydro,mindthydro[i]);
}
free(mindthydro);
if (dthydro < Real_t(1.0e+20))
mesh.dthydro() = dthydro ;
}
static inline
void CalcHydroConstraintForElems_cpu()
{
Real_t dthydro = Real_t(1.0e+20) ;
Index_t hydro_elem = -1 ;
Real_t dvovmax = mesh.dvovmax() ;
Index_t length = mesh.numElem() ;
for (Index_t i = 0 ; i < length ; ++i) {
Index_t indx = mesh.matElemlist(i) ;
if (mesh.vdov(indx) != Real_t(0.)) {
Real_t dtdvov = dvovmax / (FABS(mesh.vdov(indx))+Real_t(1.e-20)) ;
if ( dthydro > dtdvov ) {
dthydro = dtdvov ;
hydro_elem = indx ;
}
}
}
if (hydro_elem != -1) {
mesh.dthydro() = dthydro ;
}
return ;
}
static inline
void CalcHydroConstraintForElems(int useCPU)
{
if (useCPU) {
FC(matElemlist); FC(vdov);
CalcHydroConstraintForElems_cpu();
}
else {
FG(matElemlist); FG(vdov);
CalcHydroConstraintForElems_gpu();
}
}
static inline
void CalcTimeConstraintsForElems(int useCPU) {
/* evaluate time constraint */
CalcCourantConstraintForElems(useCPU) ;
/* check hydro constraint */
CalcHydroConstraintForElems(useCPU) ;
}
static inline
void LagrangeLeapFrog(int useCPU)
{
/* calculate nodal forces, accelerations, velocities, positions, with
* applied boundary conditions and slide surface considerations */
LagrangeNodal(useCPU);
/* calculate element quantities (i.e. velocity gradient & q), and update
* material states */
LagrangeElements(useCPU);
CalcTimeConstraintsForElems(useCPU);
// LagrangeRelease() ; Creation/destruction of temps may be important to capture
}
int main(int argc, char *argv[])
{ struct timespec start, stop;
clock_gettime(CLOCK_MONOTONIC,&start);
Index_t edgeElems = atoi(argv[1]) ; //orig
Index_t edgeNodes = edgeElems+1 ;
// Real_t ds = Real_t(1.125)/Real_t(edgeElems) ; /* may accumulate roundoff */
Real_t tx, ty, tz ;
Index_t nidx, zidx ;
Index_t meshElems ;
/* get run options to measure various metrics */
/* ... */
cuda_init();
#ifdef DP_PROFILING
cudaPrintfInit(4096*4096); //enabling 1MB buffer for printf
#endif
/****************************/
/* Initialize Sedov Mesh */
/****************************/
/* construct a uniform box for this processor */
mesh.sizeX() = edgeElems ;
mesh.sizeY() = edgeElems ;
mesh.sizeZ() = edgeElems ;
mesh.numElem() = edgeElems*edgeElems*edgeElems ;
mesh.numNode() = edgeNodes*edgeNodes*edgeNodes ;
meshElems = mesh.numElem() ;
/* allocate field memory */
mesh.AllocateElemPersistent(mesh.numElem()) ;
mesh.AllocateElemTemporary (mesh.numElem()) ;
mesh.AllocateNodalPersistent(mesh.numNode()) ;
mesh.AllocateNodesets(edgeNodes*edgeNodes) ;
/* initialize nodal coordinates */
nidx = 0 ;
tz = Real_t(0.) ;
for (Index_t plane=0; plane<edgeNodes; ++plane) {
ty = Real_t(0.) ;
for (Index_t row=0; row<edgeNodes; ++row) {
tx = Real_t(0.) ;
for (Index_t col=0; col<edgeNodes; ++col) {
mesh.x(nidx) = tx ;
mesh.y(nidx) = ty ;
mesh.z(nidx) = tz ;
++nidx ;
// tx += ds ; /* may accumulate roundoff... */
tx = Real_t(1.125)*Real_t(col+1)/Real_t(edgeElems) ;
}
// ty += ds ; /* may accumulate roundoff... */
ty = Real_t(1.125)*Real_t(row+1)/Real_t(edgeElems) ;
}
// tz += ds ; /* may accumulate roundoff... */
tz = Real_t(1.125)*Real_t(plane+1)/Real_t(edgeElems) ;
}
/* embed hexehedral elements in nodal point lattice */
nidx = 0 ;
zidx = 0 ;
for (Index_t plane=0; plane<edgeElems; ++plane) {
for (Index_t row=0; row<edgeElems; ++row) {
for (Index_t col=0; col<edgeElems; ++col) {
mesh.nodelist(zidx,0) = nidx ;
mesh.nodelist(zidx,1) = nidx + 1 ;
mesh.nodelist(zidx,2) = nidx + edgeNodes + 1 ;
mesh.nodelist(zidx,3) = nidx + edgeNodes ;
mesh.nodelist(zidx,4) = nidx + edgeNodes*edgeNodes ;
mesh.nodelist(zidx,5) = nidx + edgeNodes*edgeNodes + 1 ;
mesh.nodelist(zidx,6) = nidx + edgeNodes*edgeNodes + edgeNodes + 1 ;
mesh.nodelist(zidx,7) = nidx + edgeNodes*edgeNodes + edgeNodes ;
++zidx ;
++nidx ;
}
++nidx ;
}
nidx += edgeNodes ;
}
/* Create a material IndexSet (entire mesh same material for now) */
for (Index_t i=0; i<meshElems; ++i) {
mesh.matElemlist(i) = i ;
}
/* initialize material parameters */
mesh.dtfixed() = Real_t(-1.0e-7) ;
mesh.deltatime() = Real_t(1.0e-7) ;
mesh.deltatimemultlb() = Real_t(1.1) ;
mesh.deltatimemultub() = Real_t(1.2) ;
mesh.stoptime() = Real_t(1.0e-2) ;
mesh.dtcourant() = Real_t(1.0e+20) ;
mesh.dthydro() = Real_t(1.0e+20) ;
mesh.dtmax() = Real_t(1.0e-2) ;
mesh.time() = Real_t(0.) ;
mesh.cycle() = 0 ;
mesh.e_cut() = Real_t(1.0e-7) ;
mesh.p_cut() = Real_t(1.0e-7) ;
mesh.q_cut() = Real_t(1.0e-7) ;
mesh.u_cut() = Real_t(1.0e-7) ;
mesh.v_cut() = Real_t(1.0e-10) ;
mesh.hgcoef() = Real_t(3.0) ;
mesh.ss4o3() = Real_t(4.0)/Real_t(3.0) ;
mesh.qstop() = Real_t(1.0e+12) ;
mesh.monoq_max_slope() = Real_t(1.0) ;
mesh.monoq_limiter_mult() = Real_t(2.0) ;
mesh.qlc_monoq() = Real_t(0.5) ;
mesh.qqc_monoq() = Real_t(2.0)/Real_t(3.0) ;
mesh.qqc() = Real_t(2.0) ;
mesh.pmin() = Real_t(0.) ;
mesh.emin() = Real_t(-1.0e+15) ;
mesh.dvovmax() = Real_t(0.1) ;
mesh.eosvmax() = Real_t(1.0e+9) ;
mesh.eosvmin() = Real_t(1.0e-9) ;
mesh.refdens() = Real_t(1.0) ;
/* initialize field data */
for (Index_t i=0; i<meshElems; ++i) {
Real_t x_local[8], y_local[8], z_local[8] ;
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
{
Index_t gnode = mesh.nodelist(i,lnode);
x_local[lnode] = mesh.x(gnode);
y_local[lnode] = mesh.y(gnode);
z_local[lnode] = mesh.z(gnode);
}
// volume calculations
Real_t volume = CalcElemVolume(x_local, y_local, z_local );
mesh.volo(i) = volume ;
mesh.elemMass(i) = volume ;
for (Index_t j=0; j<8; ++j) {
Index_t idx = mesh.nodelist(i,j);
mesh.nodalMass(idx) += volume / Real_t(8.0) ;
}
}
/* deposit energy */
mesh.e(0) = Real_t(3.948746e+7) ;
/* set up symmetry nodesets */
nidx = 0 ;
for (Index_t i=0; i<edgeNodes; ++i) {
Index_t planeInc = i*edgeNodes*edgeNodes ;
Index_t rowInc = i*edgeNodes ;
for (Index_t j=0; j<edgeNodes; ++j) {
mesh.symmX(nidx) = planeInc + j*edgeNodes ;
mesh.symmY(nidx) = planeInc + j ;
mesh.symmZ(nidx) = rowInc + j ;
++nidx ;
}
}
/* set up elemement connectivity information */
mesh.lxim(0) = 0 ;
for (Index_t i=1; i<meshElems; ++i) {
mesh.lxim(i) = i-1 ;
mesh.lxip(i-1) = i ;
}
mesh.lxip(meshElems-1) = meshElems-1 ;
for (Index_t i=0; i<edgeElems; ++i) {
mesh.letam(i) = i ;
mesh.letap(meshElems-edgeElems+i) = meshElems-edgeElems+i ;
}
for (Index_t i=edgeElems; i<meshElems; ++i) {
mesh.letam(i) = i-edgeElems ;
mesh.letap(i-edgeElems) = i ;
}
for (Index_t i=0; i<edgeElems*edgeElems; ++i) {
mesh.lzetam(i) = i ;
mesh.lzetap(meshElems-edgeElems*edgeElems+i) = meshElems-edgeElems*edgeElems+i ;
}
for (Index_t i=edgeElems*edgeElems; i<meshElems; ++i) {
mesh.lzetam(i) = i - edgeElems*edgeElems ;
mesh.lzetap(i-edgeElems*edgeElems) = i ;
}
/* set up boundary condition information */
for (Index_t i=0; i<meshElems; ++i) {
mesh.elemBC(i) = 0 ; /* clear BCs by default */
}
/* faces on "external" boundaries will be */
/* symmetry plane or free surface BCs */
for (Index_t i=0; i<edgeElems; ++i) {
Index_t planeInc = i*edgeElems*edgeElems ;
Index_t rowInc = i*edgeElems ;
for (Index_t j=0; j<edgeElems; ++j) {
mesh.elemBC(planeInc+j*edgeElems) |= XI_M_SYMM ;
mesh.elemBC(planeInc+j*edgeElems+edgeElems-1) |= XI_P_FREE ;
mesh.elemBC(planeInc+j) |= ETA_M_SYMM ;
mesh.elemBC(planeInc+j+edgeElems*edgeElems-edgeElems) |= ETA_P_FREE ;
mesh.elemBC(rowInc+j) |= ZETA_M_SYMM ;
mesh.elemBC(rowInc+j+meshElems-edgeElems*edgeElems) |= ZETA_P_FREE ;
}
}
mesh.AllocateNodeElemIndexes();
/* initialize meshGPU */
meshGPU.init(&mesh);
meshGPU.freshenGPU();
#ifndef DP_PROFILING
/* timestep to solution */
int its=0;
#endif
#ifdef DP_PROFILING
while (its<1)
{ //only profiling the first iteration
#else
while(mesh.time() < mesh.stoptime() )
{
#endif
TimeIncrement() ;
LagrangeLeapFrog(0) ;
its++;
/* problem->commNodes->Transfer(CommNodes::syncposvel) ; */
#if LULESH_SHOW_PROGRESS
printf("time = %e, dt=%e\n",
double(mesh.time()), double(mesh.deltatime()) ) ;
#endif
printf("iterations: %d\n",its);
}
#ifdef DP_PROFILING
cudaPrintfEnd();
#endif
// FC(x);
// FILE *fp = fopen("x.asc","wb");
// for (Index_t i=0; i<mesh.numElem(); i++)
// fprintf(fp,"%.6f\n",mesh.x(i));
// fclose(fp);
clock_gettime(CLOCK_MONOTONIC,&stop);
float time=(stop.tv_sec-start.tv_sec)*1000+(stop.tv_nsec-start.tv_nsec)/1000000;
printf("total time= %f\n",time);
printf("k1= %f,k2= %f,k3= %f,k4 =%f,k5= %f\n",k1,k2,k3,k4,k5);
return 0;
}
|
2ecb62a036de60bc0af37ad6129016bb290bfd9b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition
(HOOMD-blue) Open Source Software License Copyright 2008-2011 Ames Laboratory
Iowa State University and The Regents of the University of Michigan All rights
reserved.
HOOMD-blue may contain modifications ("Contributions") provided, and to which
copyright is held, by various Contributors who have granted The Regents of the
University of Michigan the right to modify and/or distribute such Contributions.
You may redistribute, use, and create derivate works of HOOMD-blue, in source
and binary forms, provided you abide by the following conditions:
* Redistributions of source code must retain the above copyright notice, this
list of conditions, and the following disclaimer both in the code and
prominently in any materials provided with the distribution.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions, and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* All publications and presentations based on HOOMD-blue, including any reports
or published results obtained, in whole or in part, with HOOMD-blue, will
acknowledge its use according to the terms posted at the time of submission on:
http://codeblue.umich.edu/hoomd-blue/citations.html
* Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website:
http://codeblue.umich.edu/hoomd-blue/
* Apart from the above required attributions, neither the name of the copyright
holder nor the names of HOOMD-blue's contributors may be used to endorse or
promote products derived from this software without specific prior written
permission.
Disclaimer
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY
WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Maintainer: ndtrung
#include "QuaternionMath.h"
#include "TwoStepNVTRigidGPU.cuh"
#ifdef WIN32
#include <cassert>
#else
#include <assert.h>
#endif
/*! \file TwoStepNVTRigidGPU.cu
\brief Defines GPU kernel code for NVT integration on the GPU. Used by TwoStepNVTRigidGPU.
*/
// Flag for invalid particle index, identical to the sentinel value NO_INDEX in RigidData.h
#define INVALID_INDEX 0xffffffff
/*! Taylor expansion
\param x Point to take the expansion
*/
__device__ Scalar taylor_exp(Scalar x)
{
Scalar x2, x3, x4, x5;
x2 = x * x;
x3 = x2 * x;
x4 = x2 * x2;
x5 = x4 * x;
return (Scalar(1.0) + x + Scalar(1.0)/Scalar(2.0) * x2 + Scalar(1.0)/Scalar(6.0) * x3 + Scalar(1.0)/Scalar(24.0) * x4 + Scalar(1.0)/Scalar(120.0) * x5);
}
#pragma mark RIGID_STEP_ONE_KERNEL
/*! Takes the first half-step forward for rigid bodies in the velocity-verlet NVT integration
\param rdata_com Body center of mass
\param rdata_vel Body velocity
\param rdata_angmom Angular momentum
\param rdata_angvel Angular velocity
\param rdata_orientation Quaternion
\param rdata_body_image Body image
\param rdata_conjqm Conjugate quaternion momentum
\param d_rigid_mass Body mass
\param d_rigid_mi Body inertia moments
\param n_group_bodies Number of rigid bodies in my group
\param d_rigid_force Body forces
\param d_rigid_torque Body torques
\param d_rigid_group Body indices
\param n_group_bodies Number of rigid bodies in my group
\param n_bodies Total umber of rigid bodies
\param nvt_rdata_eta_dot_t0 Thermostat translational part
\param nvt_rdata_eta_dot_r0 Thermostat rotational part
\param nvt_rdata_partial_Ksum_t Body translational kinetic energy
\param nvt_rdata_partial_Ksum_r Body rotation kinetic energy
\param deltaT Timestep
\param box Box dimensions for periodic boundary condition handling
*/
extern "C" __global__ void gpu_nvt_rigid_step_one_body_kernel(Scalar4* rdata_com,
Scalar4* rdata_vel,
Scalar4* rdata_angmom,
Scalar4* rdata_angvel,
Scalar4* rdata_orientation,
int3* rdata_body_image,
Scalar4* rdata_conjqm,
Scalar *d_rigid_mass,
Scalar4 *d_rigid_mi,
Scalar4 *d_rigid_force,
Scalar4 *d_rigid_torque,
unsigned int *d_rigid_group,
unsigned int n_group_bodies,
unsigned int n_bodies,
Scalar4 nvt_rdata_scale_t,
Scalar nvt_rdata_scale_r,
Scalar* nvt_rdata_partial_Ksum_t,
Scalar* nvt_rdata_partial_Ksum_r,
BoxDim box,
Scalar deltaT)
{
unsigned int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
// do velocity verlet update
// v(t+deltaT/2) = v(t) + (1/2)a*deltaT
// r(t+deltaT) = r(t) + v(t+deltaT/2)*deltaT
if (group_idx >= n_group_bodies)
return;
Scalar body_mass;
Scalar4 moment_inertia, com, vel, orientation, ex_space, ey_space, ez_space, force, torque, conjqm;
int3 body_image;
Scalar4 mbody, tbody, fquat;
Scalar akin_t, akin_r;
Scalar dt_half = (Scalar)0.5f * deltaT;
unsigned int idx_body = d_rigid_group[group_idx];
body_mass = d_rigid_mass[idx_body];
moment_inertia = d_rigid_mi[idx_body];
com = rdata_com[idx_body];
vel = rdata_vel[idx_body];
orientation = rdata_orientation[idx_body];
body_image = rdata_body_image[idx_body];
force = d_rigid_force[idx_body];
torque = d_rigid_torque[idx_body];
conjqm = rdata_conjqm[idx_body];
exyzFromQuaternion(orientation, ex_space, ey_space, ez_space);
// update velocity
Scalar dtfm = dt_half / body_mass;
Scalar4 vel2;
vel2.x = vel.x + dtfm * force.x;
vel2.y = vel.y + dtfm * force.y;
vel2.z = vel.z + dtfm * force.z;
vel2.x *= nvt_rdata_scale_t.x;
vel2.y *= nvt_rdata_scale_t.y;
vel2.z *= nvt_rdata_scale_t.z;
vel2.w = (Scalar)0.0f;
akin_t = body_mass * (vel2.x * vel2.x + vel2.y * vel2.y + vel2.z * vel2.z);
// update position
Scalar3 pos2;
pos2.x = com.x + vel2.x * deltaT;
pos2.y = com.y + vel2.y * deltaT;
pos2.z = com.z + vel2.z * deltaT;
// time to fix the periodic boundary conditions
box.wrap(pos2, body_image);
matrix_dot(ex_space, ey_space, ez_space, torque, tbody);
quatvec(orientation, tbody, fquat);
Scalar4 conjqm2;
conjqm2.x = conjqm.x + deltaT * fquat.x;
conjqm2.y = conjqm.y + deltaT * fquat.y;
conjqm2.z = conjqm.z + deltaT * fquat.z;
conjqm2.w = conjqm.w + deltaT * fquat.w;
conjqm2.x *= nvt_rdata_scale_r;
conjqm2.y *= nvt_rdata_scale_r;
conjqm2.z *= nvt_rdata_scale_r;
conjqm2.w *= nvt_rdata_scale_r;
// step 1.4 to 1.13 - use no_squish rotate to update p and q
no_squish_rotate(3, conjqm2, orientation, moment_inertia, dt_half);
no_squish_rotate(2, conjqm2, orientation, moment_inertia, dt_half);
no_squish_rotate(1, conjqm2, orientation, moment_inertia, deltaT);
no_squish_rotate(2, conjqm2, orientation, moment_inertia, dt_half);
no_squish_rotate(3, conjqm2, orientation, moment_inertia, dt_half);
// update the exyz_space
// transform p back to angmom
// update angular velocity
Scalar4 angmom2;
exyzFromQuaternion(orientation, ex_space, ey_space, ez_space);
invquatvec(orientation, conjqm2, mbody);
transpose_dot(ex_space, ey_space, ez_space, mbody, angmom2);
angmom2.x *= (Scalar)0.5f;
angmom2.y *= (Scalar)0.5f;
angmom2.z *= (Scalar)0.5f;
Scalar4 angvel2;
computeAngularVelocity(angmom2, moment_inertia, ex_space, ey_space, ez_space, angvel2);
akin_r = angmom2.x * angvel2.x + angmom2.y * angvel2.y + angmom2.z * angvel2.z;
// write out the results (MEM_TRANSFER: ? bytes)
rdata_com[idx_body] = make_scalar4(pos2.x, pos2.y, pos2.z, com.w);
rdata_vel[idx_body] = vel2;
rdata_angmom[idx_body] = angmom2;
rdata_angvel[idx_body] = angvel2;
rdata_orientation[idx_body] = orientation;
rdata_body_image[idx_body] = body_image;
rdata_conjqm[idx_body] = conjqm2;
nvt_rdata_partial_Ksum_t[group_idx] = akin_t;
nvt_rdata_partial_Ksum_r[group_idx] = akin_r;
}
/*! \param rigid_data Rigid body data to step forward 1/2 step
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param d_net_force Particle net forces
\param box Box dimensions for periodic boundary condition handling
\param nvt_rdata Thermostat data
\param deltaT Amount of real time to step forward in one time step
*/
hipError_t gpu_nvt_rigid_step_one(const gpu_rigid_data_arrays& rigid_data,
unsigned int *d_group_members,
unsigned int group_size,
Scalar4 *d_net_force,
const BoxDim& box,
const gpu_nvt_rigid_data& nvt_rdata,
Scalar deltaT)
{
assert(d_net_force);
unsigned int n_bodies = rigid_data.n_bodies;
unsigned int n_group_bodies = rigid_data.n_group_bodies;
// setup the grid to run the kernel for rigid bodies
int block_size = 64;
int n_blocks = n_group_bodies / block_size + 1;
dim3 body_grid(n_blocks, 1, 1);
dim3 body_threads(block_size, 1, 1);
hipLaunchKernelGGL(( gpu_nvt_rigid_step_one_body_kernel), dim3(body_grid), dim3(body_threads) , 0, 0, rigid_data.com,
rigid_data.vel,
rigid_data.angmom,
rigid_data.angvel,
rigid_data.orientation,
rigid_data.body_image,
rigid_data.conjqm,
rigid_data.body_mass,
rigid_data.moment_inertia,
rigid_data.force,
rigid_data.torque,
rigid_data.body_indices,
n_group_bodies,
n_bodies,
nvt_rdata.scale_t,
nvt_rdata.scale_r,
nvt_rdata.partial_Ksum_t,
nvt_rdata.partial_Ksum_r,
box,
deltaT);
return hipSuccess;
}
#pragma mark RIGID_STEP_TWO_KERNEL
//! Takes the 2nd 1/2 step forward in the velocity-verlet NVT integration scheme
/*!
\param rdata_vel Body velocity
\param rdata_angmom Angular momentum
\param rdata_angvel Angular velocity
\param rdata_orientation Quaternion
\param rdata_conjqm Conjugate quaternion momentum
\param d_rigid_mass Body mass
\param d_rigid_mi Body inertia moments
\param d_rigid_force Body forces
\param d_rigid_torque Body torques
\param d_rigid_group Body indices
\param n_group_bodies Number of rigid bodies in my group
\param n_bodies Total number of rigid bodies
\param nvt_rdata_eta_dot_t0 Thermostat translational part
\param nvt_rdata_eta_dot_r0 Thermostat rotational part
\param nvt_rdata_partial_Ksum_t Body translational kinetic energy
\param nvt_rdata_partial_Ksum_r Body rotation kinetic energy
\param deltaT Timestep
\param box Box dimensions for periodic boundary condition handling
*/
extern "C" __global__ void gpu_nvt_rigid_step_two_body_kernel(Scalar4* rdata_vel,
Scalar4* rdata_angmom,
Scalar4* rdata_angvel,
Scalar4* rdata_orientation,
Scalar4* rdata_conjqm,
Scalar *d_rigid_mass,
Scalar4 *d_rigid_mi,
Scalar4 *d_rigid_force,
Scalar4 *d_rigid_torque,
unsigned int *d_rigid_group,
unsigned int n_group_bodies,
unsigned int n_bodies,
Scalar4 nvt_rdata_scale_t,
Scalar nvt_rdata_scale_r,
BoxDim box,
Scalar deltaT)
{
unsigned int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx >= n_group_bodies)
return;
Scalar body_mass;
Scalar4 moment_inertia, vel, ex_space, ey_space, ez_space, orientation, conjqm;
Scalar4 force, torque;
Scalar4 mbody, tbody, fquat;
Scalar dt_half = (Scalar)0.5f * deltaT;
unsigned int idx_body = d_rigid_group[group_idx];
// Update body velocity and angmom
body_mass = d_rigid_mass[idx_body];
moment_inertia = d_rigid_mi[idx_body];
vel = rdata_vel[idx_body];
force = d_rigid_force[idx_body];
torque = d_rigid_torque[idx_body];
orientation = rdata_orientation[idx_body];
conjqm = rdata_conjqm[idx_body];
exyzFromQuaternion(orientation, ex_space, ey_space, ez_space);
Scalar dtfm = dt_half / body_mass;
// update the velocity
Scalar4 vel2;
vel2.x = nvt_rdata_scale_t.x * vel.x + dtfm * force.x;
vel2.y = nvt_rdata_scale_t.y * vel.y + dtfm * force.y;
vel2.z = nvt_rdata_scale_t.z * vel.z + dtfm * force.z;
vel2.w = (Scalar)0.0f;
// update angular momentum
matrix_dot(ex_space, ey_space, ez_space, torque, tbody);
quatvec(orientation, tbody, fquat);
Scalar4 conjqm2, angmom2;
conjqm2.x = nvt_rdata_scale_r * conjqm.x + deltaT * fquat.x;
conjqm2.y = nvt_rdata_scale_r * conjqm.y + deltaT * fquat.y;
conjqm2.z = nvt_rdata_scale_r * conjqm.z + deltaT * fquat.z;
conjqm2.w = nvt_rdata_scale_r * conjqm.w + deltaT * fquat.w;
invquatvec(orientation, conjqm2, mbody);
transpose_dot(ex_space, ey_space, ez_space, mbody, angmom2);
angmom2.x *= (Scalar)0.5f;
angmom2.y *= (Scalar)0.5f;
angmom2.z *= (Scalar)0.5f;
angmom2.w = (Scalar)0.0f;
// update angular velocity
Scalar4 angvel2;
computeAngularVelocity(angmom2, moment_inertia, ex_space, ey_space, ez_space, angvel2);
// write out results
rdata_vel[idx_body] = vel2;
rdata_angmom[idx_body] = angmom2;
rdata_angvel[idx_body] = angvel2;
rdata_conjqm[idx_body] = conjqm2;
}
/*! \param rigid_data Rigid body data to step forward 1/2 step
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param d_net_force Particle net forces
\param d_net_virial Particle net virial
\param box Box dimensions for periodic boundary condition handling
\param nvt_rdata Thermostat data
\param deltaT Amount of real time to step forward in one time step
*/
hipError_t gpu_nvt_rigid_step_two( const gpu_rigid_data_arrays& rigid_data,
unsigned int *d_group_members,
unsigned int group_size,
Scalar4 *d_net_force,
Scalar *d_net_virial,
const BoxDim& box,
const gpu_nvt_rigid_data& nvt_rdata,
Scalar deltaT)
{
unsigned int n_bodies = rigid_data.n_bodies;
unsigned int n_group_bodies = rigid_data.n_group_bodies;
unsigned int block_size = 64;
unsigned int n_blocks = n_group_bodies / block_size + 1;
dim3 body_grid(n_blocks, 1, 1);
dim3 body_threads(block_size, 1, 1);
hipLaunchKernelGGL(( gpu_nvt_rigid_step_two_body_kernel), dim3(body_grid), dim3(body_threads) , 0, 0, rigid_data.vel,
rigid_data.angmom,
rigid_data.angvel,
rigid_data.orientation,
rigid_data.conjqm,
rigid_data.body_mass,
rigid_data.moment_inertia,
rigid_data.force,
rigid_data.torque,
rigid_data.body_indices,
n_group_bodies,
n_bodies,
nvt_rdata.scale_t,
nvt_rdata.scale_r,
box,
deltaT);
return hipSuccess;
}
#pragma mark RIGID_KINETIC_ENERGY_REDUCTION
//! Shared memory for kinetic energy reduction
extern __shared__ Scalar nvt_rigid_sdata[];
/*! Summing the kinetic energy of rigid bodies
\param nvt_rdata Thermostat data for rigid bodies
*/
extern "C" __global__ void gpu_nvt_rigid_reduce_ksum_kernel(gpu_nvt_rigid_data nvt_rdata)
{
int global_idx = blockIdx.x * blockDim.x + threadIdx.x;
Scalar* body_ke_t = nvt_rigid_sdata;
Scalar* body_ke_r = &nvt_rigid_sdata[blockDim.x];
Scalar Ksum_t = (Scalar)0.0f;
Scalar Ksum_r = (Scalar)0.0f;
// sum up the values in the partial sum via a sliding window
for (int start = 0; start < nvt_rdata.n_bodies; start += blockDim.x)
{
if (start + threadIdx.x < nvt_rdata.n_bodies)
{
body_ke_t[threadIdx.x] = nvt_rdata.partial_Ksum_t[start + threadIdx.x];
body_ke_r[threadIdx.x] = nvt_rdata.partial_Ksum_r[start + threadIdx.x];
}
else
{
body_ke_t[threadIdx.x] = (Scalar)0.0f;
body_ke_r[threadIdx.x] = (Scalar)0.0f;
}
__syncthreads();
// reduce the sum within a block
int offset = blockDim.x >> 1;
while (offset > 0)
{
if (threadIdx.x < offset)
{
body_ke_t[threadIdx.x] += body_ke_t[threadIdx.x + offset];
body_ke_r[threadIdx.x] += body_ke_r[threadIdx.x + offset];
}
offset >>= 1;
__syncthreads();
}
// everybody sums up Ksum
Ksum_t += body_ke_t[0];
Ksum_r += body_ke_r[0];
}
__syncthreads();
if (global_idx == 0)
{
*nvt_rdata.Ksum_t = Ksum_t;
*nvt_rdata.Ksum_r = Ksum_r;
}
}
/*!
\param nvt_rdata Thermostat data for rigid bodies
*/
hipError_t gpu_nvt_rigid_reduce_ksum(const gpu_nvt_rigid_data& nvt_rdata)
{
// setup the grid to run the kernel
int block_size = 128;
dim3 grid( 1, 1, 1);
dim3 threads(block_size, 1, 1);
// run the kernel: double the block size to accomodate Ksum_t and Ksum_r
hipLaunchKernelGGL(( gpu_nvt_rigid_reduce_ksum_kernel), dim3(grid), dim3(threads), 2 * block_size * sizeof(Scalar) , 0, nvt_rdata);
return hipSuccess;
}
| 2ecb62a036de60bc0af37ad6129016bb290bfd9b.cu | /*
Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition
(HOOMD-blue) Open Source Software License Copyright 2008-2011 Ames Laboratory
Iowa State University and The Regents of the University of Michigan All rights
reserved.
HOOMD-blue may contain modifications ("Contributions") provided, and to which
copyright is held, by various Contributors who have granted The Regents of the
University of Michigan the right to modify and/or distribute such Contributions.
You may redistribute, use, and create derivate works of HOOMD-blue, in source
and binary forms, provided you abide by the following conditions:
* Redistributions of source code must retain the above copyright notice, this
list of conditions, and the following disclaimer both in the code and
prominently in any materials provided with the distribution.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions, and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* All publications and presentations based on HOOMD-blue, including any reports
or published results obtained, in whole or in part, with HOOMD-blue, will
acknowledge its use according to the terms posted at the time of submission on:
http://codeblue.umich.edu/hoomd-blue/citations.html
* Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website:
http://codeblue.umich.edu/hoomd-blue/
* Apart from the above required attributions, neither the name of the copyright
holder nor the names of HOOMD-blue's contributors may be used to endorse or
promote products derived from this software without specific prior written
permission.
Disclaimer
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY
WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Maintainer: ndtrung
#include "QuaternionMath.h"
#include "TwoStepNVTRigidGPU.cuh"
#ifdef WIN32
#include <cassert>
#else
#include <assert.h>
#endif
/*! \file TwoStepNVTRigidGPU.cu
\brief Defines GPU kernel code for NVT integration on the GPU. Used by TwoStepNVTRigidGPU.
*/
// Flag for invalid particle index, identical to the sentinel value NO_INDEX in RigidData.h
#define INVALID_INDEX 0xffffffff
/*! Taylor expansion
\param x Point to take the expansion
*/
__device__ Scalar taylor_exp(Scalar x)
{
Scalar x2, x3, x4, x5;
x2 = x * x;
x3 = x2 * x;
x4 = x2 * x2;
x5 = x4 * x;
return (Scalar(1.0) + x + Scalar(1.0)/Scalar(2.0) * x2 + Scalar(1.0)/Scalar(6.0) * x3 + Scalar(1.0)/Scalar(24.0) * x4 + Scalar(1.0)/Scalar(120.0) * x5);
}
#pragma mark RIGID_STEP_ONE_KERNEL
/*! Takes the first half-step forward for rigid bodies in the velocity-verlet NVT integration
\param rdata_com Body center of mass
\param rdata_vel Body velocity
\param rdata_angmom Angular momentum
\param rdata_angvel Angular velocity
\param rdata_orientation Quaternion
\param rdata_body_image Body image
\param rdata_conjqm Conjugate quaternion momentum
\param d_rigid_mass Body mass
\param d_rigid_mi Body inertia moments
\param n_group_bodies Number of rigid bodies in my group
\param d_rigid_force Body forces
\param d_rigid_torque Body torques
\param d_rigid_group Body indices
\param n_group_bodies Number of rigid bodies in my group
\param n_bodies Total umber of rigid bodies
\param nvt_rdata_eta_dot_t0 Thermostat translational part
\param nvt_rdata_eta_dot_r0 Thermostat rotational part
\param nvt_rdata_partial_Ksum_t Body translational kinetic energy
\param nvt_rdata_partial_Ksum_r Body rotation kinetic energy
\param deltaT Timestep
\param box Box dimensions for periodic boundary condition handling
*/
extern "C" __global__ void gpu_nvt_rigid_step_one_body_kernel(Scalar4* rdata_com,
Scalar4* rdata_vel,
Scalar4* rdata_angmom,
Scalar4* rdata_angvel,
Scalar4* rdata_orientation,
int3* rdata_body_image,
Scalar4* rdata_conjqm,
Scalar *d_rigid_mass,
Scalar4 *d_rigid_mi,
Scalar4 *d_rigid_force,
Scalar4 *d_rigid_torque,
unsigned int *d_rigid_group,
unsigned int n_group_bodies,
unsigned int n_bodies,
Scalar4 nvt_rdata_scale_t,
Scalar nvt_rdata_scale_r,
Scalar* nvt_rdata_partial_Ksum_t,
Scalar* nvt_rdata_partial_Ksum_r,
BoxDim box,
Scalar deltaT)
{
unsigned int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
// do velocity verlet update
// v(t+deltaT/2) = v(t) + (1/2)a*deltaT
// r(t+deltaT) = r(t) + v(t+deltaT/2)*deltaT
if (group_idx >= n_group_bodies)
return;
Scalar body_mass;
Scalar4 moment_inertia, com, vel, orientation, ex_space, ey_space, ez_space, force, torque, conjqm;
int3 body_image;
Scalar4 mbody, tbody, fquat;
Scalar akin_t, akin_r;
Scalar dt_half = (Scalar)0.5f * deltaT;
unsigned int idx_body = d_rigid_group[group_idx];
body_mass = d_rigid_mass[idx_body];
moment_inertia = d_rigid_mi[idx_body];
com = rdata_com[idx_body];
vel = rdata_vel[idx_body];
orientation = rdata_orientation[idx_body];
body_image = rdata_body_image[idx_body];
force = d_rigid_force[idx_body];
torque = d_rigid_torque[idx_body];
conjqm = rdata_conjqm[idx_body];
exyzFromQuaternion(orientation, ex_space, ey_space, ez_space);
// update velocity
Scalar dtfm = dt_half / body_mass;
Scalar4 vel2;
vel2.x = vel.x + dtfm * force.x;
vel2.y = vel.y + dtfm * force.y;
vel2.z = vel.z + dtfm * force.z;
vel2.x *= nvt_rdata_scale_t.x;
vel2.y *= nvt_rdata_scale_t.y;
vel2.z *= nvt_rdata_scale_t.z;
vel2.w = (Scalar)0.0f;
akin_t = body_mass * (vel2.x * vel2.x + vel2.y * vel2.y + vel2.z * vel2.z);
// update position
Scalar3 pos2;
pos2.x = com.x + vel2.x * deltaT;
pos2.y = com.y + vel2.y * deltaT;
pos2.z = com.z + vel2.z * deltaT;
// time to fix the periodic boundary conditions
box.wrap(pos2, body_image);
matrix_dot(ex_space, ey_space, ez_space, torque, tbody);
quatvec(orientation, tbody, fquat);
Scalar4 conjqm2;
conjqm2.x = conjqm.x + deltaT * fquat.x;
conjqm2.y = conjqm.y + deltaT * fquat.y;
conjqm2.z = conjqm.z + deltaT * fquat.z;
conjqm2.w = conjqm.w + deltaT * fquat.w;
conjqm2.x *= nvt_rdata_scale_r;
conjqm2.y *= nvt_rdata_scale_r;
conjqm2.z *= nvt_rdata_scale_r;
conjqm2.w *= nvt_rdata_scale_r;
// step 1.4 to 1.13 - use no_squish rotate to update p and q
no_squish_rotate(3, conjqm2, orientation, moment_inertia, dt_half);
no_squish_rotate(2, conjqm2, orientation, moment_inertia, dt_half);
no_squish_rotate(1, conjqm2, orientation, moment_inertia, deltaT);
no_squish_rotate(2, conjqm2, orientation, moment_inertia, dt_half);
no_squish_rotate(3, conjqm2, orientation, moment_inertia, dt_half);
// update the exyz_space
// transform p back to angmom
// update angular velocity
Scalar4 angmom2;
exyzFromQuaternion(orientation, ex_space, ey_space, ez_space);
invquatvec(orientation, conjqm2, mbody);
transpose_dot(ex_space, ey_space, ez_space, mbody, angmom2);
angmom2.x *= (Scalar)0.5f;
angmom2.y *= (Scalar)0.5f;
angmom2.z *= (Scalar)0.5f;
Scalar4 angvel2;
computeAngularVelocity(angmom2, moment_inertia, ex_space, ey_space, ez_space, angvel2);
akin_r = angmom2.x * angvel2.x + angmom2.y * angvel2.y + angmom2.z * angvel2.z;
// write out the results (MEM_TRANSFER: ? bytes)
rdata_com[idx_body] = make_scalar4(pos2.x, pos2.y, pos2.z, com.w);
rdata_vel[idx_body] = vel2;
rdata_angmom[idx_body] = angmom2;
rdata_angvel[idx_body] = angvel2;
rdata_orientation[idx_body] = orientation;
rdata_body_image[idx_body] = body_image;
rdata_conjqm[idx_body] = conjqm2;
nvt_rdata_partial_Ksum_t[group_idx] = akin_t;
nvt_rdata_partial_Ksum_r[group_idx] = akin_r;
}
/*! \param rigid_data Rigid body data to step forward 1/2 step
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param d_net_force Particle net forces
\param box Box dimensions for periodic boundary condition handling
\param nvt_rdata Thermostat data
\param deltaT Amount of real time to step forward in one time step
*/
cudaError_t gpu_nvt_rigid_step_one(const gpu_rigid_data_arrays& rigid_data,
unsigned int *d_group_members,
unsigned int group_size,
Scalar4 *d_net_force,
const BoxDim& box,
const gpu_nvt_rigid_data& nvt_rdata,
Scalar deltaT)
{
assert(d_net_force);
unsigned int n_bodies = rigid_data.n_bodies;
unsigned int n_group_bodies = rigid_data.n_group_bodies;
// setup the grid to run the kernel for rigid bodies
int block_size = 64;
int n_blocks = n_group_bodies / block_size + 1;
dim3 body_grid(n_blocks, 1, 1);
dim3 body_threads(block_size, 1, 1);
gpu_nvt_rigid_step_one_body_kernel<<< body_grid, body_threads >>>(rigid_data.com,
rigid_data.vel,
rigid_data.angmom,
rigid_data.angvel,
rigid_data.orientation,
rigid_data.body_image,
rigid_data.conjqm,
rigid_data.body_mass,
rigid_data.moment_inertia,
rigid_data.force,
rigid_data.torque,
rigid_data.body_indices,
n_group_bodies,
n_bodies,
nvt_rdata.scale_t,
nvt_rdata.scale_r,
nvt_rdata.partial_Ksum_t,
nvt_rdata.partial_Ksum_r,
box,
deltaT);
return cudaSuccess;
}
#pragma mark RIGID_STEP_TWO_KERNEL
//! Takes the 2nd 1/2 step forward in the velocity-verlet NVT integration scheme
/*!
\param rdata_vel Body velocity
\param rdata_angmom Angular momentum
\param rdata_angvel Angular velocity
\param rdata_orientation Quaternion
\param rdata_conjqm Conjugate quaternion momentum
\param d_rigid_mass Body mass
\param d_rigid_mi Body inertia moments
\param d_rigid_force Body forces
\param d_rigid_torque Body torques
\param d_rigid_group Body indices
\param n_group_bodies Number of rigid bodies in my group
\param n_bodies Total number of rigid bodies
\param nvt_rdata_eta_dot_t0 Thermostat translational part
\param nvt_rdata_eta_dot_r0 Thermostat rotational part
\param nvt_rdata_partial_Ksum_t Body translational kinetic energy
\param nvt_rdata_partial_Ksum_r Body rotation kinetic energy
\param deltaT Timestep
\param box Box dimensions for periodic boundary condition handling
*/
extern "C" __global__ void gpu_nvt_rigid_step_two_body_kernel(Scalar4* rdata_vel,
Scalar4* rdata_angmom,
Scalar4* rdata_angvel,
Scalar4* rdata_orientation,
Scalar4* rdata_conjqm,
Scalar *d_rigid_mass,
Scalar4 *d_rigid_mi,
Scalar4 *d_rigid_force,
Scalar4 *d_rigid_torque,
unsigned int *d_rigid_group,
unsigned int n_group_bodies,
unsigned int n_bodies,
Scalar4 nvt_rdata_scale_t,
Scalar nvt_rdata_scale_r,
BoxDim box,
Scalar deltaT)
{
unsigned int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx >= n_group_bodies)
return;
Scalar body_mass;
Scalar4 moment_inertia, vel, ex_space, ey_space, ez_space, orientation, conjqm;
Scalar4 force, torque;
Scalar4 mbody, tbody, fquat;
Scalar dt_half = (Scalar)0.5f * deltaT;
unsigned int idx_body = d_rigid_group[group_idx];
// Update body velocity and angmom
body_mass = d_rigid_mass[idx_body];
moment_inertia = d_rigid_mi[idx_body];
vel = rdata_vel[idx_body];
force = d_rigid_force[idx_body];
torque = d_rigid_torque[idx_body];
orientation = rdata_orientation[idx_body];
conjqm = rdata_conjqm[idx_body];
exyzFromQuaternion(orientation, ex_space, ey_space, ez_space);
Scalar dtfm = dt_half / body_mass;
// update the velocity
Scalar4 vel2;
vel2.x = nvt_rdata_scale_t.x * vel.x + dtfm * force.x;
vel2.y = nvt_rdata_scale_t.y * vel.y + dtfm * force.y;
vel2.z = nvt_rdata_scale_t.z * vel.z + dtfm * force.z;
vel2.w = (Scalar)0.0f;
// update angular momentum
matrix_dot(ex_space, ey_space, ez_space, torque, tbody);
quatvec(orientation, tbody, fquat);
Scalar4 conjqm2, angmom2;
conjqm2.x = nvt_rdata_scale_r * conjqm.x + deltaT * fquat.x;
conjqm2.y = nvt_rdata_scale_r * conjqm.y + deltaT * fquat.y;
conjqm2.z = nvt_rdata_scale_r * conjqm.z + deltaT * fquat.z;
conjqm2.w = nvt_rdata_scale_r * conjqm.w + deltaT * fquat.w;
invquatvec(orientation, conjqm2, mbody);
transpose_dot(ex_space, ey_space, ez_space, mbody, angmom2);
angmom2.x *= (Scalar)0.5f;
angmom2.y *= (Scalar)0.5f;
angmom2.z *= (Scalar)0.5f;
angmom2.w = (Scalar)0.0f;
// update angular velocity
Scalar4 angvel2;
computeAngularVelocity(angmom2, moment_inertia, ex_space, ey_space, ez_space, angvel2);
// write out results
rdata_vel[idx_body] = vel2;
rdata_angmom[idx_body] = angmom2;
rdata_angvel[idx_body] = angvel2;
rdata_conjqm[idx_body] = conjqm2;
}
/*! \param rigid_data Rigid body data to step forward 1/2 step
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param d_net_force Particle net forces
\param d_net_virial Particle net virial
\param box Box dimensions for periodic boundary condition handling
\param nvt_rdata Thermostat data
\param deltaT Amount of real time to step forward in one time step
*/
cudaError_t gpu_nvt_rigid_step_two( const gpu_rigid_data_arrays& rigid_data,
unsigned int *d_group_members,
unsigned int group_size,
Scalar4 *d_net_force,
Scalar *d_net_virial,
const BoxDim& box,
const gpu_nvt_rigid_data& nvt_rdata,
Scalar deltaT)
{
unsigned int n_bodies = rigid_data.n_bodies;
unsigned int n_group_bodies = rigid_data.n_group_bodies;
unsigned int block_size = 64;
unsigned int n_blocks = n_group_bodies / block_size + 1;
dim3 body_grid(n_blocks, 1, 1);
dim3 body_threads(block_size, 1, 1);
gpu_nvt_rigid_step_two_body_kernel<<< body_grid, body_threads >>>(rigid_data.vel,
rigid_data.angmom,
rigid_data.angvel,
rigid_data.orientation,
rigid_data.conjqm,
rigid_data.body_mass,
rigid_data.moment_inertia,
rigid_data.force,
rigid_data.torque,
rigid_data.body_indices,
n_group_bodies,
n_bodies,
nvt_rdata.scale_t,
nvt_rdata.scale_r,
box,
deltaT);
return cudaSuccess;
}
#pragma mark RIGID_KINETIC_ENERGY_REDUCTION
//! Shared memory for kinetic energy reduction
extern __shared__ Scalar nvt_rigid_sdata[];
/*! Summing the kinetic energy of rigid bodies
\param nvt_rdata Thermostat data for rigid bodies
*/
extern "C" __global__ void gpu_nvt_rigid_reduce_ksum_kernel(gpu_nvt_rigid_data nvt_rdata)
{
int global_idx = blockIdx.x * blockDim.x + threadIdx.x;
Scalar* body_ke_t = nvt_rigid_sdata;
Scalar* body_ke_r = &nvt_rigid_sdata[blockDim.x];
Scalar Ksum_t = (Scalar)0.0f;
Scalar Ksum_r = (Scalar)0.0f;
// sum up the values in the partial sum via a sliding window
for (int start = 0; start < nvt_rdata.n_bodies; start += blockDim.x)
{
if (start + threadIdx.x < nvt_rdata.n_bodies)
{
body_ke_t[threadIdx.x] = nvt_rdata.partial_Ksum_t[start + threadIdx.x];
body_ke_r[threadIdx.x] = nvt_rdata.partial_Ksum_r[start + threadIdx.x];
}
else
{
body_ke_t[threadIdx.x] = (Scalar)0.0f;
body_ke_r[threadIdx.x] = (Scalar)0.0f;
}
__syncthreads();
// reduce the sum within a block
int offset = blockDim.x >> 1;
while (offset > 0)
{
if (threadIdx.x < offset)
{
body_ke_t[threadIdx.x] += body_ke_t[threadIdx.x + offset];
body_ke_r[threadIdx.x] += body_ke_r[threadIdx.x + offset];
}
offset >>= 1;
__syncthreads();
}
// everybody sums up Ksum
Ksum_t += body_ke_t[0];
Ksum_r += body_ke_r[0];
}
__syncthreads();
if (global_idx == 0)
{
*nvt_rdata.Ksum_t = Ksum_t;
*nvt_rdata.Ksum_r = Ksum_r;
}
}
/*!
\param nvt_rdata Thermostat data for rigid bodies
*/
cudaError_t gpu_nvt_rigid_reduce_ksum(const gpu_nvt_rigid_data& nvt_rdata)
{
// setup the grid to run the kernel
int block_size = 128;
dim3 grid( 1, 1, 1);
dim3 threads(block_size, 1, 1);
// run the kernel: double the block size to accomodate Ksum_t and Ksum_r
gpu_nvt_rigid_reduce_ksum_kernel<<< grid, threads, 2 * block_size * sizeof(Scalar) >>>(nvt_rdata);
return cudaSuccess;
}
|
32d2d33662ea5c2b8673d6fe78e375f9dbefc611.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "pytorch_cuda_helper.hpp"
#include "sigmoid_focal_loss_cuda_kernel.cuh"
#include "softmax_focal_loss_cuda_kernel.cuh"
void SigmoidFocalLossForwardCUDAKernelLauncher(Tensor input, Tensor target,
Tensor weight, Tensor output,
const float gamma,
const float alpha) {
int output_size = output.numel();
int num_classes = input.size(1);
AT_ASSERTM(target.max().item<long>() <= (long)num_classes,
"target label should smaller or equal than num classes");
#ifdef __NVCC__
at::hip::HIPGuardMasqueradingAsCUDA device_guard(input.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
#endif
#ifdef __HIP_PLATFORM_HCC__
// at::cuda::HIPGuard device_guard(input.device());
hipStream_t stream = at::cuda::getCurrentHIPStream();
#endif
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "sigmoid_focal_loss_forward_cuda_kernel", [&] {
hipLaunchKernelGGL(( sigmoid_focal_loss_forward_cuda_kernel<scalar_t>)
, dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, stream,
output_size, input.data_ptr<scalar_t>(),
target.data_ptr<int64_t>(), weight.data_ptr<scalar_t>(),
output.data_ptr<scalar_t>(), gamma, alpha, num_classes);
});
#ifdef __NVCC__
AT_CUDA_CHECK(hipGetLastError());
#endif
#ifdef __HIP_PLATFORM_HCC__
AT_CUDA_CHECK(hipGetLastError());
#endif
}
void SigmoidFocalLossBackwardCUDAKernelLauncher(Tensor input, Tensor target,
Tensor weight,
Tensor grad_input,
const float gamma,
const float alpha) {
int output_size = grad_input.numel();
int num_classes = input.size(1);
#ifdef __NVCC__
at::hip::HIPGuardMasqueradingAsCUDA device_guard(grad_input.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
#endif
#ifdef __HIP_PLATFORM_HCC__
// at::cuda::HIPGuard device_guard(grad_input.device());
hipStream_t stream = at::cuda::getCurrentHIPStream();
#endif
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "sigmoid_focal_loss_backward_cuda_kernel", [&] {
hipLaunchKernelGGL(( sigmoid_focal_loss_backward_cuda_kernel<scalar_t>)
, dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, stream,
output_size, input.data_ptr<scalar_t>(),
target.data_ptr<int64_t>(), weight.data_ptr<scalar_t>(),
grad_input.data_ptr<scalar_t>(), gamma, alpha, num_classes);
});
#ifdef __NVCC__
AT_CUDA_CHECK(hipGetLastError());
#endif
#ifdef __HIP_PLATFORM_HCC__
AT_CUDA_CHECK(hipGetLastError());
#endif
}
void SoftmaxFocalLossForwardCUDAKernelLauncher(Tensor softmax, Tensor target,
Tensor weight, Tensor output,
const float gamma,
const float alpha) {
int output_size = output.numel();
int num_classes = softmax.size(1);
AT_ASSERTM(target.max().item<long>() <= (long)num_classes,
"target label should smaller or equal than num classes");
#ifdef __NVCC__
at::hip::HIPGuardMasqueradingAsCUDA device_guard(softmax.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
#endif
#ifdef __HIP_PLATFORM_HCC__
// at::cuda::HIPGuard device_guard(softmax.device());
hipStream_t stream = at::cuda::getCurrentHIPStream();
#endif
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
softmax.scalar_type(), "softmax_focal_loss_forward_cuda_kernel", [&] {
hipLaunchKernelGGL(( softmax_focal_loss_forward_cuda_kernel<scalar_t>)
, dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, stream,
output_size, softmax.data_ptr<scalar_t>(),
target.data_ptr<int64_t>(), weight.data_ptr<scalar_t>(),
output.data_ptr<scalar_t>(), gamma, alpha, num_classes);
});
#ifdef __NVCC__
AT_CUDA_CHECK(hipGetLastError());
#endif
#ifdef __HIP_PLATFORM_HCC__
AT_CUDA_CHECK(hipGetLastError());
#endif
}
void SoftmaxFocalLossBackwardCUDAKernelLauncher(Tensor softmax, Tensor target,
Tensor weight, Tensor buff,
Tensor grad_input,
const float gamma,
const float alpha) {
int num_classes = softmax.size(1);
int output_size = buff.numel();
#ifdef __NVCC__
at::hip::HIPGuardMasqueradingAsCUDA device_guard(grad_input.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
#endif
#ifdef __HIP_PLATFORM_HCC__
// at::cuda::HIPGuard device_guard(grad_input.device());
hipStream_t stream = at::cuda::getCurrentHIPStream();
#endif
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_input.scalar_type(), "softmax_focal_loss_backward_cuda1_kernel",
[&] {
hipLaunchKernelGGL(( softmax_focal_loss_backward_cuda1_kernel<scalar_t>)
, dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, stream,
output_size, softmax.data_ptr<scalar_t>(),
target.data_ptr<int64_t>(), weight.data_ptr<scalar_t>(),
buff.data_ptr<scalar_t>(), gamma, alpha, num_classes);
});
#ifdef __NVCC__
AT_CUDA_CHECK(hipGetLastError());
#endif
#ifdef __HIP_PLATFORM_HCC__
AT_CUDA_CHECK(hipGetLastError());
#endif
output_size = grad_input.numel();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_input.scalar_type(), "softmax_focal_loss_backward_cuda2_kernel",
[&] {
hipLaunchKernelGGL(( softmax_focal_loss_backward_cuda2_kernel<scalar_t>)
, dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, stream,
output_size, softmax.data_ptr<scalar_t>(),
target.data_ptr<int64_t>(), buff.data_ptr<scalar_t>(),
grad_input.data_ptr<scalar_t>(), num_classes);
});
#ifdef __NVCC__
AT_CUDA_CHECK(hipGetLastError());
#endif
#ifdef __HIP_PLATFORM_HCC__
AT_CUDA_CHECK(hipGetLastError());
#endif
}
| 32d2d33662ea5c2b8673d6fe78e375f9dbefc611.cu | #include "pytorch_cuda_helper.hpp"
#include "sigmoid_focal_loss_cuda_kernel.cuh"
#include "softmax_focal_loss_cuda_kernel.cuh"
void SigmoidFocalLossForwardCUDAKernelLauncher(Tensor input, Tensor target,
Tensor weight, Tensor output,
const float gamma,
const float alpha) {
int output_size = output.numel();
int num_classes = input.size(1);
AT_ASSERTM(target.max().item<long>() <= (long)num_classes,
"target label should smaller or equal than num classes");
#ifdef __NVCC__
at::cuda::CUDAGuard device_guard(input.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
#endif
#ifdef __HIP_PLATFORM_HCC__
// at::cuda::HIPGuard device_guard(input.device());
hipStream_t stream = at::cuda::getCurrentHIPStream();
#endif
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "sigmoid_focal_loss_forward_cuda_kernel", [&] {
sigmoid_focal_loss_forward_cuda_kernel<scalar_t>
<<<GET_BLOCKS(output_size), THREADS_PER_BLOCK, 0, stream>>>(
output_size, input.data_ptr<scalar_t>(),
target.data_ptr<int64_t>(), weight.data_ptr<scalar_t>(),
output.data_ptr<scalar_t>(), gamma, alpha, num_classes);
});
#ifdef __NVCC__
AT_CUDA_CHECK(cudaGetLastError());
#endif
#ifdef __HIP_PLATFORM_HCC__
AT_CUDA_CHECK(hipGetLastError());
#endif
}
void SigmoidFocalLossBackwardCUDAKernelLauncher(Tensor input, Tensor target,
Tensor weight,
Tensor grad_input,
const float gamma,
const float alpha) {
int output_size = grad_input.numel();
int num_classes = input.size(1);
#ifdef __NVCC__
at::cuda::CUDAGuard device_guard(grad_input.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
#endif
#ifdef __HIP_PLATFORM_HCC__
// at::cuda::HIPGuard device_guard(grad_input.device());
hipStream_t stream = at::cuda::getCurrentHIPStream();
#endif
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "sigmoid_focal_loss_backward_cuda_kernel", [&] {
sigmoid_focal_loss_backward_cuda_kernel<scalar_t>
<<<GET_BLOCKS(output_size), THREADS_PER_BLOCK, 0, stream>>>(
output_size, input.data_ptr<scalar_t>(),
target.data_ptr<int64_t>(), weight.data_ptr<scalar_t>(),
grad_input.data_ptr<scalar_t>(), gamma, alpha, num_classes);
});
#ifdef __NVCC__
AT_CUDA_CHECK(cudaGetLastError());
#endif
#ifdef __HIP_PLATFORM_HCC__
AT_CUDA_CHECK(hipGetLastError());
#endif
}
void SoftmaxFocalLossForwardCUDAKernelLauncher(Tensor softmax, Tensor target,
Tensor weight, Tensor output,
const float gamma,
const float alpha) {
int output_size = output.numel();
int num_classes = softmax.size(1);
AT_ASSERTM(target.max().item<long>() <= (long)num_classes,
"target label should smaller or equal than num classes");
#ifdef __NVCC__
at::cuda::CUDAGuard device_guard(softmax.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
#endif
#ifdef __HIP_PLATFORM_HCC__
// at::cuda::HIPGuard device_guard(softmax.device());
hipStream_t stream = at::cuda::getCurrentHIPStream();
#endif
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
softmax.scalar_type(), "softmax_focal_loss_forward_cuda_kernel", [&] {
softmax_focal_loss_forward_cuda_kernel<scalar_t>
<<<GET_BLOCKS(output_size), THREADS_PER_BLOCK, 0, stream>>>(
output_size, softmax.data_ptr<scalar_t>(),
target.data_ptr<int64_t>(), weight.data_ptr<scalar_t>(),
output.data_ptr<scalar_t>(), gamma, alpha, num_classes);
});
#ifdef __NVCC__
AT_CUDA_CHECK(cudaGetLastError());
#endif
#ifdef __HIP_PLATFORM_HCC__
AT_CUDA_CHECK(hipGetLastError());
#endif
}
void SoftmaxFocalLossBackwardCUDAKernelLauncher(Tensor softmax, Tensor target,
Tensor weight, Tensor buff,
Tensor grad_input,
const float gamma,
const float alpha) {
int num_classes = softmax.size(1);
int output_size = buff.numel();
#ifdef __NVCC__
at::cuda::CUDAGuard device_guard(grad_input.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
#endif
#ifdef __HIP_PLATFORM_HCC__
// at::cuda::HIPGuard device_guard(grad_input.device());
hipStream_t stream = at::cuda::getCurrentHIPStream();
#endif
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_input.scalar_type(), "softmax_focal_loss_backward_cuda1_kernel",
[&] {
softmax_focal_loss_backward_cuda1_kernel<scalar_t>
<<<GET_BLOCKS(output_size), THREADS_PER_BLOCK, 0, stream>>>(
output_size, softmax.data_ptr<scalar_t>(),
target.data_ptr<int64_t>(), weight.data_ptr<scalar_t>(),
buff.data_ptr<scalar_t>(), gamma, alpha, num_classes);
});
#ifdef __NVCC__
AT_CUDA_CHECK(cudaGetLastError());
#endif
#ifdef __HIP_PLATFORM_HCC__
AT_CUDA_CHECK(hipGetLastError());
#endif
output_size = grad_input.numel();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_input.scalar_type(), "softmax_focal_loss_backward_cuda2_kernel",
[&] {
softmax_focal_loss_backward_cuda2_kernel<scalar_t>
<<<GET_BLOCKS(output_size), THREADS_PER_BLOCK, 0, stream>>>(
output_size, softmax.data_ptr<scalar_t>(),
target.data_ptr<int64_t>(), buff.data_ptr<scalar_t>(),
grad_input.data_ptr<scalar_t>(), num_classes);
});
#ifdef __NVCC__
AT_CUDA_CHECK(cudaGetLastError());
#endif
#ifdef __HIP_PLATFORM_HCC__
AT_CUDA_CHECK(hipGetLastError());
#endif
}
|
7d45b0e1e4d149f4789ad4e99cc68e4f44dc1cc2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/native/group_norm.h>
#include <type_traits>
#include <thrust/tuple.h>
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/native/SharedReduceOps.h>
#include <ATen/native/TensorIterator.h>
#include <c10/hip/HIPMathCompat.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/block_reduce.cuh>
namespace at {
namespace native {
namespace {
constexpr int kCUDANumThreads = 256;
constexpr int kReduceTileSize = 32;
template <typename T>
__global__ void RowwiseMomentsCUDAKernel(
int64_t N,
T eps,
const T* X,
T* mean,
T* rstd) {
using T_ACC = acc_type<T, true>;
using WelfordType = WelfordData<T_ACC, int64_t, T_ACC>;
using WelfordOp =
WelfordOps<T_ACC, T_ACC, int64_t, T_ACC, thrust::pair<T_ACC, T_ACC>>;
const int64_t i = blockIdx.x;
WelfordOp welford_op = {/*correction=*/0, /*take_sqrt=*/false};
WelfordType val(0, 0, 0, 0);
for (int64_t j = threadIdx.x; j < N; j += blockDim.x) {
const int64_t index = i * N + j;
val = welford_op.reduce(val, static_cast<T_ACC>(X[index]), index);
}
if (blockDim.x <= C10_WARP_SIZE) {
val = cuda_utils::WarpReduce(val, welford_op);
} else {
// There will be a warning if we declare a __shared__ WelfordType array.
// https://github.com/pytorch/pytorch/pull/13967
__shared__ typename std::aligned_storage<
sizeof(WelfordType),
alignof(WelfordType)>::type val_shared[C10_WARP_SIZE];
WelfordType* val_shared_ptr = reinterpret_cast<WelfordType*>(val_shared);
val = cuda_utils::BlockReduce(
val,
welford_op,
/*identity_element=*/WelfordType(0, 0, 0, 0),
val_shared_ptr);
}
if (threadIdx.x == 0) {
T_ACC m1;
T_ACC m2;
thrust::tie(m2, m1) = welford_op.project(val);
mean[i] = m1;
rstd[i] = c10::hip::compat::rsqrt(m2 + static_cast<T_ACC>(eps));
}
}
template <typename T>
__global__ void ComputeFusedParamsCUDAKernel(
int64_t N,
int64_t C,
int64_t group,
const T* mean,
const T* rstd,
const T* gamma,
const T* beta,
acc_type<T, true>* a,
acc_type<T, true>* b) {
using T_ACC = acc_type<T, true>;
const int64_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N * C) {
const int64_t ng = index / (C / group);
const int64_t c = index % C;
const T_ACC scale = (gamma == nullptr)
? static_cast<T_ACC>(rstd[ng])
: static_cast<T_ACC>(rstd[ng]) * static_cast<T_ACC>(gamma[c]);
a[index] = scale;
b[index] = -scale * static_cast<T_ACC>(mean[ng]) +
((beta == nullptr) ? 0 : static_cast<T_ACC>(beta[c]));
}
}
template <typename T>
__global__ void Compute1dBackwardFusedParamsCUDAKernel(
int64_t C,
int64_t group,
const T* dY,
const T* X,
const T* mean,
const T* rstd,
const T* gamma,
acc_type<T, true>* c2,
acc_type<T, true>* c3) {
using T_ACC = acc_type<T, true>;
const int64_t G = group;
const int64_t D = C / G;
const int64_t n = blockIdx.x;
const int64_t g = blockIdx.y;
const int64_t ng = n * G + g;
T_ACC sum1 = 0;
T_ACC sum2 = 0;
for (int64_t i = threadIdx.x; i < D; i += blockDim.x) {
const int64_t index = ng * D + i;
const int64_t c = g * D + i;
const T_ACC gamma_v =
gamma == nullptr ? T_ACC(1) : static_cast<T_ACC>(gamma[c]);
sum1 += dY[index] * X[index] * gamma_v;
sum2 += dY[index] * gamma_v;
}
if (blockDim.x <= C10_WARP_SIZE) {
sum1 = cuda_utils::WarpReduceSum<T_ACC>(sum1);
sum2 = cuda_utils::WarpReduceSum<T_ACC>(sum2);
} else {
__shared__ T_ACC ds_shared[C10_WARP_SIZE];
__shared__ T_ACC db_shared[C10_WARP_SIZE];
sum1 = cuda_utils::BlockReduceSum<T_ACC>(sum1, ds_shared);
sum2 = cuda_utils::BlockReduceSum<T_ACC>(sum2, db_shared);
}
if (threadIdx.x == 0) {
const T_ACC s = T_ACC(1) / static_cast<T_ACC>(D);
const T_ACC x = (sum2 * static_cast<T_ACC>(mean[ng]) - sum1) *
static_cast<T_ACC>(rstd[ng]) * static_cast<T_ACC>(rstd[ng]) *
static_cast<T_ACC>(rstd[ng]) * s;
c2[ng] = x;
c3[ng] = -x * static_cast<T_ACC>(mean[ng]) -
sum2 * static_cast<T_ACC>(rstd[ng]) * s;
}
}
template <typename T>
__global__ void GammaBeta1dBackwardCUDAKernel1(
int64_t N,
int64_t C,
int64_t group,
const T* dY,
const T* X,
const T* mean,
const T* rstd,
T* dgamma,
T* dbeta) {
using T_ACC = acc_type<T, true>;
const int64_t c = blockIdx.x * blockDim.x + threadIdx.x;
if (c < C) {
const int64_t G = group;
const int64_t D = C / G;
T_ACC sum1 = 0;
T_ACC sum2 = 0;
for (int64_t n = 0; n < N; ++n) {
const int64_t nc = n * C + c;
const int64_t ng = n * G + c / D;
const T_ACC dy_acc = static_cast<T_ACC>(dY[nc]);
const T_ACC x_acc = static_cast<T_ACC>(X[nc]);
sum1 += (dgamma == nullptr)
? T_ACC(0)
: ((dy_acc * x_acc - dy_acc * static_cast<T_ACC>(mean[ng])) *
static_cast<T_ACC>(rstd[ng]));
sum2 += (dbeta == nullptr) ? T_ACC(0) : dy_acc;
}
if (dgamma != nullptr) {
dgamma[c] = sum1;
}
if (dbeta != nullptr) {
dbeta[c] = sum2;
}
}
}
template <typename T>
__global__ void GammaBeta1dBackwardCUDAKernel2(
int64_t N,
int64_t C,
int64_t group,
const T* dY,
const T* X,
const T* mean,
const T* rstd,
T* dgamma,
T* dbeta) {
using T_ACC = acc_type<T, true>;
__shared__ T_ACC g_shared[kReduceTileSize][kReduceTileSize + 1];
__shared__ T_ACC b_shared[kReduceTileSize][kReduceTileSize + 1];
const int64_t c = blockIdx.x * blockDim.x + threadIdx.x;
T_ACC dg_sum1 = 0;
T_ACC dg_sum2 = 0;
T_ACC db_sum1 = 0;
T_ACC db_sum2 = 0;
if (c < C) {
const int64_t G = group;
const int64_t D = C / G;
// Accumulate each 32 cols into a 32 * 32 tile.
// Since the blockDim is (32, 16), accumulate twice for 1st and 2nd 16 rows
// of a 32 contiguous elements.
for (int64_t n = threadIdx.y; n < N; n += blockDim.y * 2) {
const int64_t n1 = n;
const int64_t n2 = n + blockDim.y;
const int64_t nc1 = n1 * C + c;
const int64_t nc2 = n2 * C + c;
const int64_t ng1 = n1 * G + c / D;
const int64_t ng2 = n2 * G + c / D;
const T_ACC dy1_acc = static_cast<T_ACC>(dY[nc1]);
const T_ACC x1_acc = static_cast<T_ACC>(X[nc1]);
dg_sum1 += dgamma == nullptr
? T_ACC(0)
: ((dy1_acc * x1_acc - dy1_acc * static_cast<T_ACC>(mean[ng1])) *
static_cast<T_ACC>(rstd[ng1]));
db_sum1 += dbeta == nullptr ? T_ACC(0) : dy1_acc;
if (n2 < N) {
const T_ACC dy2_acc = static_cast<T_ACC>(dY[nc2]);
const T_ACC x2_acc = static_cast<T_ACC>(X[nc2]);
dg_sum2 += dgamma == nullptr
? T_ACC(0)
: ((dy2_acc * x2_acc - dy2_acc * static_cast<T_ACC>(mean[ng2])) *
static_cast<T_ACC>(rstd[ng2]));
db_sum2 += dbeta == nullptr ? T_ACC(0) : dy2_acc;
}
}
}
// Write accumulated tile to shared memory.
g_shared[threadIdx.y][threadIdx.x] = dg_sum1;
g_shared[threadIdx.y + blockDim.y][threadIdx.x] = dg_sum2;
b_shared[threadIdx.y][threadIdx.x] = db_sum1;
b_shared[threadIdx.y + blockDim.y][threadIdx.x] = db_sum2;
__syncthreads();
// Do warp reduce for the 1st 16 cols in the tile.
T_ACC sum1 = g_shared[threadIdx.x][threadIdx.y];
T_ACC sum2 = b_shared[threadIdx.x][threadIdx.y];
sum1 = cuda_utils::WarpReduceSum<T_ACC>(sum1);
sum2 = cuda_utils::WarpReduceSum<T_ACC>(sum2);
if (threadIdx.x == 0) {
const int64_t c = blockIdx.x * blockDim.x + threadIdx.y;
if (c < C) {
if (dgamma != nullptr) {
dgamma[c] = sum1;
}
if (dbeta != nullptr) {
dbeta[c] = sum2;
}
}
}
// Do warp reduce for the 2nd 16 cols in the tile.
sum1 = g_shared[threadIdx.x][threadIdx.y + blockDim.y];
sum2 = b_shared[threadIdx.x][threadIdx.y + blockDim.y];
sum1 = cuda_utils::WarpReduceSum<T_ACC>(sum1);
sum2 = cuda_utils::WarpReduceSum<T_ACC>(sum2);
if (threadIdx.x == 0) {
const int64_t c = blockIdx.x * blockDim.x + threadIdx.y + blockDim.y;
if (c < C) {
if (dgamma != nullptr) {
dgamma[c] = sum1;
}
if (dbeta != nullptr) {
dbeta[c] = sum2;
}
}
}
}
template <typename T>
__global__ void ComputeInternalGradientsCUDAKernel(
int64_t HxW,
const T* dY,
const T* X,
acc_type<T, true>* ds,
acc_type<T, true>* db) {
using T_ACC = acc_type<T, true>;
const int64_t nc = blockIdx.x;
T_ACC sum1 = 0;
T_ACC sum2 = 0;
for (int64_t hw = threadIdx.x; hw < HxW; hw += blockDim.x) {
const int64_t index = nc * HxW + hw;
sum1 += static_cast<T_ACC>(dY[index]) * static_cast<T_ACC>(X[index]);
sum2 += static_cast<T_ACC>(dY[index]);
}
if (blockDim.x <= C10_WARP_SIZE) {
sum1 = cuda_utils::WarpReduceSum<T_ACC>(sum1);
sum2 = cuda_utils::WarpReduceSum<T_ACC>(sum2);
} else {
__shared__ T_ACC ds_shared[C10_WARP_SIZE];
__shared__ T_ACC db_shared[C10_WARP_SIZE];
sum1 = cuda_utils::BlockReduceSum<T_ACC>(sum1, ds_shared);
sum2 = cuda_utils::BlockReduceSum<T_ACC>(sum2, db_shared);
}
if (threadIdx.x == 0) {
ds[nc] = sum1;
db[nc] = sum2;
}
}
template <typename T>
__global__ void ComputeBackwardFusedParamsCUDAKernel(
int64_t C,
int64_t HxW,
int64_t group,
const T* mean,
const T* rstd,
const T* gamma,
const acc_type<T, true>* ds,
const acc_type<T, true>* db,
acc_type<T, true>* c2,
acc_type<T, true>* c3) {
using T_ACC = acc_type<T, true>;
const int64_t G = group;
const int64_t D = C / G;
const int64_t n = blockIdx.x;
const int64_t g = blockIdx.y;
const int64_t ng = n * G + g;
T_ACC sum1 = 0;
T_ACC sum2 = 0;
for (int64_t i = threadIdx.x; i < D; i += blockDim.x) {
const int64_t index = ng * D + i;
const int64_t c = g * D + i;
const T_ACC gamma_v =
gamma == nullptr ? T_ACC(1) : static_cast<T_ACC>(gamma[c]);
sum1 += ds[index] * gamma_v;
sum2 += db[index] * gamma_v;
}
if (blockDim.x <= C10_WARP_SIZE) {
sum1 = cuda_utils::WarpReduceSum<T_ACC>(sum1);
sum2 = cuda_utils::WarpReduceSum<T_ACC>(sum2);
} else {
__shared__ T_ACC ds_shared[C10_WARP_SIZE];
__shared__ T_ACC db_shared[C10_WARP_SIZE];
sum1 = cuda_utils::BlockReduceSum<T_ACC>(sum1, ds_shared);
sum2 = cuda_utils::BlockReduceSum<T_ACC>(sum2, db_shared);
}
if (threadIdx.x == 0) {
const T_ACC s = T_ACC(1) / static_cast<T_ACC>(D * HxW);
const T_ACC x = (sum2 * static_cast<T_ACC>(mean[ng]) - sum1) *
static_cast<T_ACC>(rstd[ng]) * static_cast<T_ACC>(rstd[ng]) *
static_cast<T_ACC>(rstd[ng]) * s;
c2[ng] = x;
c3[ng] = -x * static_cast<T_ACC>(mean[ng]) -
sum2 * static_cast<T_ACC>(rstd[ng]) * s;
}
}
template <typename T>
__global__ void GammaBetaBackwardCUDAKernel1(
int64_t N,
int64_t C,
int64_t group,
const T* mean,
const T* rstd,
const acc_type<T, true>* ds,
const acc_type<T, true>* db,
T* dgamma,
T* dbeta) {
using T_ACC = acc_type<T, true>;
const int64_t c = blockIdx.x * blockDim.x + threadIdx.x;
if (c < C) {
const int64_t G = group;
const int64_t D = C / G;
T_ACC sum1 = 0;
T_ACC sum2 = 0;
for (int64_t n = 0; n < N; ++n) {
const int64_t nc = n * C + c;
const int64_t ng = n * G + c / D;
sum1 += (dgamma == nullptr)
? T_ACC(0)
: ((ds[nc] - db[nc] * static_cast<T_ACC>(mean[ng])) *
static_cast<T_ACC>(rstd[ng]));
sum2 += (dbeta == nullptr) ? T_ACC(0) : db[nc];
}
if (dgamma != nullptr) {
dgamma[c] = sum1;
}
if (dbeta != nullptr) {
dbeta[c] = sum2;
}
}
}
template <typename T>
__global__ void GammaBetaBackwardCUDAKernel2(
int64_t N,
int64_t C,
int64_t group,
const T* mean,
const T* rstd,
const acc_type<T, true>* ds,
const acc_type<T, true>* db,
T* dgamma,
T* dbeta) {
using T_ACC = acc_type<T, true>;
__shared__ T_ACC g_shared[kReduceTileSize][kReduceTileSize + 1];
__shared__ T_ACC b_shared[kReduceTileSize][kReduceTileSize + 1];
const int64_t c = blockIdx.x * blockDim.x + threadIdx.x;
T_ACC dg_sum1 = 0;
T_ACC dg_sum2 = 0;
T_ACC db_sum1 = 0;
T_ACC db_sum2 = 0;
if (c < C) {
const int64_t G = group;
const int64_t D = C / G;
// Accumulate each 32 cols into a 32 * 32 tile.
// Since the blockDim is (32, 16), accumulate twice for 1st and 2nd 16 rows
// of a 32 contiguous elements.
for (int64_t n = threadIdx.y; n < N; n += blockDim.y * 2) {
const int64_t n1 = n;
const int64_t n2 = n + blockDim.y;
const int64_t nc1 = n1 * C + c;
const int64_t nc2 = n2 * C + c;
const int64_t ng1 = n1 * G + c / D;
const int64_t ng2 = n2 * G + c / D;
dg_sum1 += dgamma == nullptr
? T_ACC(0)
: ((ds[nc1] - db[nc1] * static_cast<T_ACC>(mean[ng1])) *
static_cast<T_ACC>(rstd[ng1]));
db_sum1 += dbeta == nullptr ? T_ACC(0) : db[nc1];
if (n2 < N) {
dg_sum2 += dgamma == nullptr
? T_ACC(0)
: ((ds[nc2] - db[nc2] * static_cast<T_ACC>(mean[ng2])) *
static_cast<T_ACC>(rstd[ng2]));
db_sum2 += dbeta == nullptr ? T_ACC(0) : db[nc2];
}
}
}
// Write accumulated tile to shared memory.
g_shared[threadIdx.y][threadIdx.x] = dg_sum1;
g_shared[threadIdx.y + blockDim.y][threadIdx.x] = dg_sum2;
b_shared[threadIdx.y][threadIdx.x] = db_sum1;
b_shared[threadIdx.y + blockDim.y][threadIdx.x] = db_sum2;
__syncthreads();
// Do warp reduce for the 1st 16 cols in the tile.
T_ACC sum1 = g_shared[threadIdx.x][threadIdx.y];
T_ACC sum2 = b_shared[threadIdx.x][threadIdx.y];
sum1 = cuda_utils::WarpReduceSum<T_ACC>(sum1);
sum2 = cuda_utils::WarpReduceSum<T_ACC>(sum2);
if (threadIdx.x == 0) {
const int64_t c = blockIdx.x * blockDim.x + threadIdx.y;
if (c < C) {
if (dgamma != nullptr) {
dgamma[c] = sum1;
}
if (dbeta != nullptr) {
dbeta[c] = sum2;
}
}
}
// Do warp reduce for the 2st 16 cols in the tile.
sum1 = g_shared[threadIdx.x][threadIdx.y + blockDim.y];
sum2 = b_shared[threadIdx.x][threadIdx.y + blockDim.y];
sum1 = cuda_utils::WarpReduceSum<T_ACC>(sum1);
sum2 = cuda_utils::WarpReduceSum<T_ACC>(sum2);
if (threadIdx.x == 0) {
const int64_t c = blockIdx.x * blockDim.x + threadIdx.y + blockDim.y;
if (c < C) {
if (dgamma != nullptr) {
dgamma[c] = sum1;
}
if (dbeta != nullptr) {
dbeta[c] = sum2;
}
}
}
}
template <typename T>
void GroupNorm1dForward(
const Tensor& X,
const Tensor& mean,
const Tensor& rstd,
const Tensor& gamma,
const Tensor& beta,
int64_t N,
int64_t C,
int64_t group,
Tensor& Y) {
using T_ACC = acc_type<T, true>;
const int64_t G = group;
const int64_t D = C / G;
if (gamma.defined() && beta.defined()) {
auto iter = TensorIteratorConfig()
.resize_outputs(false)
.add_owned_output(Y.view({N, G, D}))
.add_owned_input(X.view({N, G, D}))
.add_owned_input(mean.view({N, G, 1}))
.add_owned_input(rstd.view({N, G, 1}))
.add_owned_input(gamma.view({1, G, D}))
.add_owned_input(beta.view({1, G, D}))
.build();
gpu_kernel(iter, [] GPU_LAMBDA(T x, T mean, T rstd, T gamma, T beta) -> T {
return (static_cast<T_ACC>(x) - static_cast<T_ACC>(mean)) *
static_cast<T_ACC>(rstd) * static_cast<T_ACC>(gamma) +
static_cast<T_ACC>(beta);
});
} else if (gamma.defined()) {
auto iter = TensorIteratorConfig()
.resize_outputs(false)
.add_owned_output(Y.view({N, G, D}))
.add_owned_input(X.view({N, G, D}))
.add_owned_input(mean.view({N, G, 1}))
.add_owned_input(rstd.view({N, G, 1}))
.add_owned_input(gamma.view({1, G, D}))
.build();
gpu_kernel(iter, [] GPU_LAMBDA(T x, T mean, T rstd, T gamma) -> T {
return (static_cast<T_ACC>(x) - static_cast<T_ACC>(mean)) *
static_cast<T_ACC>(rstd) * static_cast<T_ACC>(gamma);
});
} else if (beta.defined()) {
auto iter = TensorIteratorConfig()
.resize_outputs(false)
.add_owned_output(Y.view({N, G, D}))
.add_owned_input(X.view({N, G, D}))
.add_owned_input(mean.view({N, G, 1}))
.add_owned_input(rstd.view({N, G, 1}))
.add_owned_input(beta.view({1, G, D}))
.build();
gpu_kernel(iter, [] GPU_LAMBDA(T x, T mean, T rstd, T beta) -> T {
return (static_cast<T_ACC>(x) - static_cast<T_ACC>(mean)) *
static_cast<T_ACC>(rstd) +
static_cast<T_ACC>(beta);
});
} else {
auto iter = TensorIteratorConfig()
.resize_outputs(false)
.add_owned_output(Y.view({N * G, D}))
.add_owned_input(X.view({N * G, D}))
.add_owned_input(mean.view({N * G, 1}))
.add_owned_input(rstd.view({N * G, 1}))
.build();
gpu_kernel(iter, [] GPU_LAMBDA(T x, T mean, T rstd) -> T {
return (static_cast<T_ACC>(x) - static_cast<T_ACC>(mean)) *
static_cast<T_ACC>(rstd);
});
}
AT_CUDA_CHECK(hipGetLastError());
}
template <typename T>
void GroupNormKernelImplInternal(
const Tensor& X,
const Tensor& gamma,
const Tensor& beta,
int64_t N,
int64_t C,
int64_t HxW,
int64_t group,
T eps,
Tensor& Y,
Tensor& mean,
Tensor& rstd) {
using T_ACC = acc_type<T, true>;
TORCH_CHECK(X.numel() == N * C * HxW);
TORCH_CHECK(!gamma.defined() || gamma.numel() == C);
TORCH_CHECK(!beta.defined() || beta.numel() == C);
if (N == 0) {
return;
}
const int64_t G = group;
const int64_t D = C / G;
const T* X_data = X.data_ptr<T>();
T* mean_data = mean.data_ptr<T>();
T* rstd_data = rstd.data_ptr<T>();
hipStream_t cuda_stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
const int64_t num_threads = D * HxW < cuda_utils::kCUDABlockReduceNumThreads
? at::cuda::warp_size()
: cuda_utils::kCUDABlockReduceNumThreads;
hipLaunchKernelGGL(( RowwiseMomentsCUDAKernel<T>), dim3(N * G), dim3(num_threads), 0, cuda_stream,
D * HxW, eps, X_data, mean_data, rstd_data);
C10_HIP_KERNEL_LAUNCH_CHECK();
if (HxW == 1) {
GroupNorm1dForward<T>(X, mean, rstd, gamma, beta, N, C, G, Y);
} else if (!gamma.defined() && !beta.defined()) {
auto iter = TensorIteratorConfig()
.resize_outputs(false)
.add_owned_output(Y.view({N * G, D * HxW}))
.add_owned_input(X.view({N * G, D * HxW}))
.add_owned_input(mean.view({N * G, 1}))
.add_owned_input(rstd.view({N * G, 1}))
.build();
gpu_kernel(iter, [] GPU_LAMBDA(T x, T mean, T rstd) -> T {
return (static_cast<T_ACC>(x) - static_cast<T_ACC>(mean)) *
static_cast<T_ACC>(rstd);
});
} else {
const auto kAccType =
(X.scalar_type() == kHalf || X.scalar_type() == kBFloat16)
? kFloat
: X.scalar_type();
Tensor a = at::empty({N, C}, X.options().dtype(kAccType));
Tensor b = at::empty({N, C}, X.options().dtype(kAccType));
const T* gamma_data = gamma.defined() ? gamma.data_ptr<T>() : nullptr;
const T* beta_data = beta.defined() ? beta.data_ptr<T>() : nullptr;
T_ACC* a_data = a.data_ptr<T_ACC>();
T_ACC* b_data = b.data_ptr<T_ACC>();
// TODO: Since there is some issues in gpu_kernel_multiple_outputs, we are
// using maunal kernel here. Make it using gpu_kernel_multiple_outputs once
// the issue fixed.
const int64_t B = (N * C + kCUDANumThreads - 1) / kCUDANumThreads;
hipLaunchKernelGGL(( ComputeFusedParamsCUDAKernel<T>), dim3(B), dim3(kCUDANumThreads), 0, cuda_stream,
N, C, G, mean_data, rstd_data, gamma_data, beta_data, a_data, b_data);
C10_HIP_KERNEL_LAUNCH_CHECK();
auto iter = TensorIteratorConfig()
.check_all_same_dtype(std::is_same<T, T_ACC>::value)
.resize_outputs(false)
.add_owned_output(Y.view({N * C, HxW}))
.add_owned_input(X.view({N * C, HxW}))
.add_owned_input(a.view({N * C, 1}))
.add_owned_input(b.view({N * C, 1}))
.build();
gpu_kernel(iter, [] GPU_LAMBDA(T x, T_ACC a, T_ACC b) -> T {
return a * static_cast<T_ACC>(x) + b;
});
}
AT_CUDA_CHECK(hipGetLastError());
}
void GroupNormKernelImpl(
const Tensor& X,
const Tensor& gamma,
const Tensor& beta,
int64_t N,
int64_t C,
int64_t HxW,
int64_t group,
double eps,
Tensor& Y,
Tensor& mean,
Tensor& rstd) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
X.scalar_type(),
"GroupNormKernelImpl",
[&]() {
GroupNormKernelImplInternal<scalar_t>(
X,
gamma,
beta,
N,
C,
HxW,
group,
static_cast<scalar_t>(eps),
Y,
mean,
rstd);
});
}
template <typename T>
void GroupNorm1dBackward(
const Tensor dY,
const Tensor X,
const Tensor mean,
const Tensor rstd,
const Tensor gamma,
int64_t N,
int64_t C,
int64_t group,
Tensor& dX,
Tensor& dgamma,
Tensor& dbeta) {
using T_ACC = acc_type<T, true>;
const int64_t G = group;
const int64_t D = C / G;
const T* dY_data = dY.data_ptr<T>();
const T* X_data = X.data_ptr<T>();
const T* mean_data = mean.data_ptr<T>();
const T* rstd_data = rstd.data_ptr<T>();
hipStream_t cuda_stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
if (dX.defined()) {
const T* gamma_data = gamma.defined() ? gamma.data_ptr<T>() : nullptr;
const auto kAccType =
(X.scalar_type() == kHalf || X.scalar_type() == kBFloat16)
? kFloat
: X.scalar_type();
Tensor c2 = at::empty({N, G}, X.options().dtype(kAccType));
Tensor c3 = at::empty({N, G}, X.options().dtype(kAccType));
T_ACC* c2_data = c2.data_ptr<T_ACC>();
T_ACC* c3_data = c3.data_ptr<T_ACC>();
const int64_t num_threads = (C / G) < cuda_utils::kCUDABlockReduceNumThreads
? at::cuda::warp_size()
: cuda_utils::kCUDABlockReduceNumThreads;
hipLaunchKernelGGL(( Compute1dBackwardFusedParamsCUDAKernel<T>)
, dim3(dim3(N, G)), dim3(num_threads), 0, cuda_stream,
C,
G,
dY_data,
X_data,
mean_data,
rstd_data,
gamma_data,
c2_data,
c3_data);
C10_HIP_KERNEL_LAUNCH_CHECK();
if (gamma.defined()) {
auto iter = TensorIteratorConfig()
.check_all_same_dtype(std::is_same<T, T_ACC>::value)
.resize_outputs(false)
.add_owned_output(dX.view({N, G, D}))
.add_owned_input(dY.view({N, G, D}))
.add_owned_input(X.view({N, G, D}))
.add_owned_input(rstd.view({N, G, 1}))
.add_owned_input(gamma.view({1, G, D}))
.add_owned_input(c2.view({N, G, 1}))
.add_owned_input(c3.view({N, G, 1}))
.build();
gpu_kernel(
iter,
[] GPU_LAMBDA(T dy, T x, T rstd, T gamma, T_ACC c2, T_ACC c3) -> T {
const T_ACC c1 =
static_cast<T_ACC>(rstd) * static_cast<T_ACC>(gamma);
return c1 * static_cast<T_ACC>(dy) + c2 * static_cast<T_ACC>(x) +
c3;
});
} else {
auto iter = TensorIteratorConfig()
.check_all_same_dtype(std::is_same<T, T_ACC>::value)
.resize_outputs(false)
.add_owned_output(dX.view({N * G, D}))
.add_owned_input(dY.view({N * G, D}))
.add_owned_input(X.view({N * G, D}))
.add_owned_input(rstd.view({N * G, 1}))
.add_owned_input(c2.view({N * G, 1}))
.add_owned_input(c3.view({N * G, 1}))
.build();
gpu_kernel(
iter, [] GPU_LAMBDA(T dy, T x, T rstd, T_ACC c2, T_ACC c3) -> T {
const T_ACC c1 = static_cast<T_ACC>(rstd);
return c1 * static_cast<T_ACC>(dy) + c2 * static_cast<T_ACC>(x) +
c3;
});
}
}
if (dgamma.defined() || dbeta.defined()) {
T* dgamma_data = dgamma.defined() ? dgamma.data_ptr<T>() : nullptr;
T* dbeta_data = dbeta.defined() ? dbeta.data_ptr<T>() : nullptr;
if (N <= 128) {
const int64_t B = (C + kCUDANumThreads - 1) / kCUDANumThreads;
hipLaunchKernelGGL(( GammaBeta1dBackwardCUDAKernel1<T>), dim3(B), dim3(kCUDANumThreads), 0, cuda_stream,
N,
C,
G,
dY_data,
X_data,
mean_data,
rstd_data,
dgamma_data,
dbeta_data);
C10_HIP_KERNEL_LAUNCH_CHECK();
} else {
const int64_t B = (C + kReduceTileSize - 1) / kReduceTileSize;
// The algorithm for colwise reduction here is to accumulate each 32 cols
// to a 32 * 32 tile and write the tile to shared memmory. Then do warp
// reduce for each col in the tile. So here the blockDim must be (32, 16).
constexpr int kThreadX = kReduceTileSize;
constexpr int kThreadY = kReduceTileSize / 2;
hipLaunchKernelGGL(( GammaBeta1dBackwardCUDAKernel2<T>)
, dim3(B), dim3(dim3(kThreadX, kThreadY)), 0, cuda_stream,
N,
C,
G,
dY_data,
X_data,
mean_data,
rstd_data,
dgamma_data,
dbeta_data);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
}
}
template <typename T>
void GroupNormBackwardKernelImplInternal(
const Tensor& dY,
const Tensor& X,
const Tensor& mean,
const Tensor& rstd,
const Tensor& gamma,
int64_t N,
int64_t C,
int64_t HxW,
int64_t group,
Tensor& dX,
Tensor& dgamma,
Tensor& dbeta) {
using T_ACC = acc_type<T, true>;
const int64_t G = group;
const int64_t D = C / G;
TORCH_CHECK(dY.numel() == N * C * HxW);
TORCH_CHECK(X.numel() == N * C * HxW);
TORCH_CHECK(mean.numel() == N * G);
TORCH_CHECK(rstd.numel() == N * G);
TORCH_CHECK(!gamma.defined() || gamma.numel() == C);
hipStream_t cuda_stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
if (N == 0) {
if (dgamma.defined()) {
dgamma.fill_(T(0));
}
if (dbeta.defined()) {
dbeta.fill_(T(0));
}
return;
}
const T* dY_data = dY.data_ptr<T>();
const T* X_data = X.data_ptr<T>();
const T* mean_data = mean.data_ptr<T>();
const T* rstd_data = rstd.data_ptr<T>();
const T* gamma_data = gamma.defined() ? gamma.data_ptr<T>() : nullptr;
const auto kAccType =
(X.scalar_type() == kHalf || X.scalar_type() == kBFloat16)
? kFloat
: X.scalar_type();
Tensor ds = at::empty({N, C}, X.options().dtype(kAccType));
Tensor db = at::empty({N, C}, X.options().dtype(kAccType));
T_ACC* ds_data = ds.data_ptr<T_ACC>();
T_ACC* db_data = db.data_ptr<T_ACC>();
if (HxW == 1) {
GroupNorm1dBackward<T>(
dY, X, mean, rstd, gamma, N, C, G, dX, dgamma, dbeta);
return;
}
int warp_size = at::cuda::warp_size();
int64_t num_threads = HxW < cuda_utils::kCUDABlockReduceNumThreads
? warp_size
: cuda_utils::kCUDABlockReduceNumThreads;
hipLaunchKernelGGL(( ComputeInternalGradientsCUDAKernel<T>), dim3(N * C), dim3(num_threads), 0, cuda_stream,
HxW, dY_data, X_data, ds_data, db_data);
C10_HIP_KERNEL_LAUNCH_CHECK();
if (dX.defined()) {
Tensor c1 = at::empty({0}, X.options().dtype(kAccType));
Tensor c2 = at::empty({N, G}, X.options().dtype(kAccType));
Tensor c3 = at::empty({N, G}, X.options().dtype(kAccType));
T_ACC* c2_data = c2.data_ptr<T_ACC>();
T_ACC* c3_data = c3.data_ptr<T_ACC>();
if (gamma.defined()) {
auto iter = TensorIteratorConfig()
.check_all_same_dtype(std::is_same<T, T_ACC>::value)
.add_output(c1)
.add_owned_input(rstd.view({N, G, 1}))
.add_owned_input(gamma.view({1, G, D}))
.build();
gpu_kernel(iter, [] GPU_LAMBDA(T rstd, T gamma) -> T_ACC {
return static_cast<T_ACC>(rstd) * static_cast<T_ACC>(gamma);
});
}
num_threads = (C / G) < cuda_utils::kCUDABlockReduceNumThreads
? warp_size
: cuda_utils::kCUDABlockReduceNumThreads;
hipLaunchKernelGGL(( ComputeBackwardFusedParamsCUDAKernel<T>)
, dim3(dim3(N, G)), dim3(num_threads), 0, cuda_stream,
C,
HxW,
G,
mean_data,
rstd_data,
gamma_data,
ds_data,
db_data,
c2_data,
c3_data);
C10_HIP_KERNEL_LAUNCH_CHECK();
if (gamma.defined()) {
auto iter = TensorIteratorConfig()
.check_all_same_dtype(std::is_same<T, T_ACC>::value)
.resize_outputs(false)
.add_owned_output(dX.view({N * G, D, HxW}))
.add_owned_input(dY.view({N * G, D, HxW}))
.add_owned_input(X.view({N * G, D, HxW}))
.add_owned_input(c1.view({N * G, D, 1}))
.add_owned_input(c2.view({N * G, 1, 1}))
.add_owned_input(c3.view({N * G, 1, 1}))
.build();
gpu_kernel(
iter, [] GPU_LAMBDA(T dy, T x, T_ACC c1, T_ACC c2, T_ACC c3) -> T {
return c1 * static_cast<T_ACC>(dy) + c2 * static_cast<T_ACC>(x) +
c3;
});
} else {
auto iter = TensorIteratorConfig()
.check_all_same_dtype(std::is_same<T, T_ACC>::value)
.resize_outputs(false)
.add_owned_output(dX.view({N * G, D * HxW}))
.add_owned_input(dY.view({N * G, D * HxW}))
.add_owned_input(X.view({N * G, D * HxW}))
.add_owned_input(rstd.view({N * G, 1}))
.add_owned_input(c2.view({N * G, 1}))
.add_owned_input(c3.view({N * G, 1}))
.build();
gpu_kernel(
iter, [] GPU_LAMBDA(T dy, T x, T_ACC c1, T_ACC c2, T_ACC c3) -> T {
return c1 * static_cast<T_ACC>(dy) + c2 * static_cast<T_ACC>(x) +
c3;
});
}
}
if (dgamma.defined() || dbeta.defined()) {
T* dgamma_data = dgamma.defined() ? dgamma.data_ptr<T>() : nullptr;
T* dbeta_data = dbeta.defined() ? dbeta.data_ptr<T>() : nullptr;
if (N <= 128) {
// For small batch size, do colwise reduce directly.
const int64_t B = (C + kCUDANumThreads - 1) / kCUDANumThreads;
hipLaunchKernelGGL(( GammaBetaBackwardCUDAKernel1<T>), dim3(B), dim3(kCUDANumThreads), 0, cuda_stream,
N,
C,
G,
mean_data,
rstd_data,
ds_data,
db_data,
dgamma_data,
dbeta_data);
C10_HIP_KERNEL_LAUNCH_CHECK();
} else {
const int64_t B = (C + kReduceTileSize - 1) / kReduceTileSize;
// The algorithm for colwise reduction here is to accumulate each 32 cols
// to a 32 * 32 tile and write the tile to shared memmory. Then do warp
// reduce for each col in the tile. So here the blockDim must be (32, 16).
constexpr int kThreadX = kReduceTileSize;
constexpr int kThreadY = kReduceTileSize / 2;
hipLaunchKernelGGL(( GammaBetaBackwardCUDAKernel2<T>)
, dim3(B), dim3(dim3(kThreadX, kThreadY)), 0, cuda_stream,
N,
C,
G,
mean_data,
rstd_data,
ds_data,
db_data,
dgamma_data,
dbeta_data);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
}
}
void GroupNormBackwardKernelImpl(
const Tensor& dY,
const Tensor& X,
const Tensor& mean,
const Tensor& rstd,
const Tensor& gamma,
int64_t N,
int64_t C,
int64_t HxW,
int64_t group,
Tensor& dX,
Tensor& dgamma,
Tensor& dbeta) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
X.scalar_type(),
"GroupNormBackwardKernelImpl",
[&]() {
GroupNormBackwardKernelImplInternal<scalar_t>(
dY, X, mean, rstd, gamma, N, C, HxW, group, dX, dgamma, dbeta);
});
}
} // namespace
REGISTER_DISPATCH(GroupNormKernel, &GroupNormKernelImpl);
REGISTER_DISPATCH(GroupNormBackwardKernel, &GroupNormBackwardKernelImpl);
} // namespace native
} // namespace at
| 7d45b0e1e4d149f4789ad4e99cc68e4f44dc1cc2.cu | #include <ATen/native/group_norm.h>
#include <type_traits>
#include <thrust/tuple.h>
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/native/SharedReduceOps.h>
#include <ATen/native/TensorIterator.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/block_reduce.cuh>
namespace at {
namespace native {
namespace {
constexpr int kCUDANumThreads = 256;
constexpr int kReduceTileSize = 32;
template <typename T>
__global__ void RowwiseMomentsCUDAKernel(
int64_t N,
T eps,
const T* X,
T* mean,
T* rstd) {
using T_ACC = acc_type<T, true>;
using WelfordType = WelfordData<T_ACC, int64_t, T_ACC>;
using WelfordOp =
WelfordOps<T_ACC, T_ACC, int64_t, T_ACC, thrust::pair<T_ACC, T_ACC>>;
const int64_t i = blockIdx.x;
WelfordOp welford_op = {/*correction=*/0, /*take_sqrt=*/false};
WelfordType val(0, 0, 0, 0);
for (int64_t j = threadIdx.x; j < N; j += blockDim.x) {
const int64_t index = i * N + j;
val = welford_op.reduce(val, static_cast<T_ACC>(X[index]), index);
}
if (blockDim.x <= C10_WARP_SIZE) {
val = cuda_utils::WarpReduce(val, welford_op);
} else {
// There will be a warning if we declare a __shared__ WelfordType array.
// https://github.com/pytorch/pytorch/pull/13967
__shared__ typename std::aligned_storage<
sizeof(WelfordType),
alignof(WelfordType)>::type val_shared[C10_WARP_SIZE];
WelfordType* val_shared_ptr = reinterpret_cast<WelfordType*>(val_shared);
val = cuda_utils::BlockReduce(
val,
welford_op,
/*identity_element=*/WelfordType(0, 0, 0, 0),
val_shared_ptr);
}
if (threadIdx.x == 0) {
T_ACC m1;
T_ACC m2;
thrust::tie(m2, m1) = welford_op.project(val);
mean[i] = m1;
rstd[i] = c10::cuda::compat::rsqrt(m2 + static_cast<T_ACC>(eps));
}
}
template <typename T>
__global__ void ComputeFusedParamsCUDAKernel(
int64_t N,
int64_t C,
int64_t group,
const T* mean,
const T* rstd,
const T* gamma,
const T* beta,
acc_type<T, true>* a,
acc_type<T, true>* b) {
using T_ACC = acc_type<T, true>;
const int64_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N * C) {
const int64_t ng = index / (C / group);
const int64_t c = index % C;
const T_ACC scale = (gamma == nullptr)
? static_cast<T_ACC>(rstd[ng])
: static_cast<T_ACC>(rstd[ng]) * static_cast<T_ACC>(gamma[c]);
a[index] = scale;
b[index] = -scale * static_cast<T_ACC>(mean[ng]) +
((beta == nullptr) ? 0 : static_cast<T_ACC>(beta[c]));
}
}
template <typename T>
__global__ void Compute1dBackwardFusedParamsCUDAKernel(
int64_t C,
int64_t group,
const T* dY,
const T* X,
const T* mean,
const T* rstd,
const T* gamma,
acc_type<T, true>* c2,
acc_type<T, true>* c3) {
using T_ACC = acc_type<T, true>;
const int64_t G = group;
const int64_t D = C / G;
const int64_t n = blockIdx.x;
const int64_t g = blockIdx.y;
const int64_t ng = n * G + g;
T_ACC sum1 = 0;
T_ACC sum2 = 0;
for (int64_t i = threadIdx.x; i < D; i += blockDim.x) {
const int64_t index = ng * D + i;
const int64_t c = g * D + i;
const T_ACC gamma_v =
gamma == nullptr ? T_ACC(1) : static_cast<T_ACC>(gamma[c]);
sum1 += dY[index] * X[index] * gamma_v;
sum2 += dY[index] * gamma_v;
}
if (blockDim.x <= C10_WARP_SIZE) {
sum1 = cuda_utils::WarpReduceSum<T_ACC>(sum1);
sum2 = cuda_utils::WarpReduceSum<T_ACC>(sum2);
} else {
__shared__ T_ACC ds_shared[C10_WARP_SIZE];
__shared__ T_ACC db_shared[C10_WARP_SIZE];
sum1 = cuda_utils::BlockReduceSum<T_ACC>(sum1, ds_shared);
sum2 = cuda_utils::BlockReduceSum<T_ACC>(sum2, db_shared);
}
if (threadIdx.x == 0) {
const T_ACC s = T_ACC(1) / static_cast<T_ACC>(D);
const T_ACC x = (sum2 * static_cast<T_ACC>(mean[ng]) - sum1) *
static_cast<T_ACC>(rstd[ng]) * static_cast<T_ACC>(rstd[ng]) *
static_cast<T_ACC>(rstd[ng]) * s;
c2[ng] = x;
c3[ng] = -x * static_cast<T_ACC>(mean[ng]) -
sum2 * static_cast<T_ACC>(rstd[ng]) * s;
}
}
template <typename T>
__global__ void GammaBeta1dBackwardCUDAKernel1(
int64_t N,
int64_t C,
int64_t group,
const T* dY,
const T* X,
const T* mean,
const T* rstd,
T* dgamma,
T* dbeta) {
using T_ACC = acc_type<T, true>;
const int64_t c = blockIdx.x * blockDim.x + threadIdx.x;
if (c < C) {
const int64_t G = group;
const int64_t D = C / G;
T_ACC sum1 = 0;
T_ACC sum2 = 0;
for (int64_t n = 0; n < N; ++n) {
const int64_t nc = n * C + c;
const int64_t ng = n * G + c / D;
const T_ACC dy_acc = static_cast<T_ACC>(dY[nc]);
const T_ACC x_acc = static_cast<T_ACC>(X[nc]);
sum1 += (dgamma == nullptr)
? T_ACC(0)
: ((dy_acc * x_acc - dy_acc * static_cast<T_ACC>(mean[ng])) *
static_cast<T_ACC>(rstd[ng]));
sum2 += (dbeta == nullptr) ? T_ACC(0) : dy_acc;
}
if (dgamma != nullptr) {
dgamma[c] = sum1;
}
if (dbeta != nullptr) {
dbeta[c] = sum2;
}
}
}
template <typename T>
__global__ void GammaBeta1dBackwardCUDAKernel2(
int64_t N,
int64_t C,
int64_t group,
const T* dY,
const T* X,
const T* mean,
const T* rstd,
T* dgamma,
T* dbeta) {
using T_ACC = acc_type<T, true>;
__shared__ T_ACC g_shared[kReduceTileSize][kReduceTileSize + 1];
__shared__ T_ACC b_shared[kReduceTileSize][kReduceTileSize + 1];
const int64_t c = blockIdx.x * blockDim.x + threadIdx.x;
T_ACC dg_sum1 = 0;
T_ACC dg_sum2 = 0;
T_ACC db_sum1 = 0;
T_ACC db_sum2 = 0;
if (c < C) {
const int64_t G = group;
const int64_t D = C / G;
// Accumulate each 32 cols into a 32 * 32 tile.
// Since the blockDim is (32, 16), accumulate twice for 1st and 2nd 16 rows
// of a 32 contiguous elements.
for (int64_t n = threadIdx.y; n < N; n += blockDim.y * 2) {
const int64_t n1 = n;
const int64_t n2 = n + blockDim.y;
const int64_t nc1 = n1 * C + c;
const int64_t nc2 = n2 * C + c;
const int64_t ng1 = n1 * G + c / D;
const int64_t ng2 = n2 * G + c / D;
const T_ACC dy1_acc = static_cast<T_ACC>(dY[nc1]);
const T_ACC x1_acc = static_cast<T_ACC>(X[nc1]);
dg_sum1 += dgamma == nullptr
? T_ACC(0)
: ((dy1_acc * x1_acc - dy1_acc * static_cast<T_ACC>(mean[ng1])) *
static_cast<T_ACC>(rstd[ng1]));
db_sum1 += dbeta == nullptr ? T_ACC(0) : dy1_acc;
if (n2 < N) {
const T_ACC dy2_acc = static_cast<T_ACC>(dY[nc2]);
const T_ACC x2_acc = static_cast<T_ACC>(X[nc2]);
dg_sum2 += dgamma == nullptr
? T_ACC(0)
: ((dy2_acc * x2_acc - dy2_acc * static_cast<T_ACC>(mean[ng2])) *
static_cast<T_ACC>(rstd[ng2]));
db_sum2 += dbeta == nullptr ? T_ACC(0) : dy2_acc;
}
}
}
// Write accumulated tile to shared memory.
g_shared[threadIdx.y][threadIdx.x] = dg_sum1;
g_shared[threadIdx.y + blockDim.y][threadIdx.x] = dg_sum2;
b_shared[threadIdx.y][threadIdx.x] = db_sum1;
b_shared[threadIdx.y + blockDim.y][threadIdx.x] = db_sum2;
__syncthreads();
// Do warp reduce for the 1st 16 cols in the tile.
T_ACC sum1 = g_shared[threadIdx.x][threadIdx.y];
T_ACC sum2 = b_shared[threadIdx.x][threadIdx.y];
sum1 = cuda_utils::WarpReduceSum<T_ACC>(sum1);
sum2 = cuda_utils::WarpReduceSum<T_ACC>(sum2);
if (threadIdx.x == 0) {
const int64_t c = blockIdx.x * blockDim.x + threadIdx.y;
if (c < C) {
if (dgamma != nullptr) {
dgamma[c] = sum1;
}
if (dbeta != nullptr) {
dbeta[c] = sum2;
}
}
}
// Do warp reduce for the 2nd 16 cols in the tile.
sum1 = g_shared[threadIdx.x][threadIdx.y + blockDim.y];
sum2 = b_shared[threadIdx.x][threadIdx.y + blockDim.y];
sum1 = cuda_utils::WarpReduceSum<T_ACC>(sum1);
sum2 = cuda_utils::WarpReduceSum<T_ACC>(sum2);
if (threadIdx.x == 0) {
const int64_t c = blockIdx.x * blockDim.x + threadIdx.y + blockDim.y;
if (c < C) {
if (dgamma != nullptr) {
dgamma[c] = sum1;
}
if (dbeta != nullptr) {
dbeta[c] = sum2;
}
}
}
}
template <typename T>
__global__ void ComputeInternalGradientsCUDAKernel(
int64_t HxW,
const T* dY,
const T* X,
acc_type<T, true>* ds,
acc_type<T, true>* db) {
using T_ACC = acc_type<T, true>;
const int64_t nc = blockIdx.x;
T_ACC sum1 = 0;
T_ACC sum2 = 0;
for (int64_t hw = threadIdx.x; hw < HxW; hw += blockDim.x) {
const int64_t index = nc * HxW + hw;
sum1 += static_cast<T_ACC>(dY[index]) * static_cast<T_ACC>(X[index]);
sum2 += static_cast<T_ACC>(dY[index]);
}
if (blockDim.x <= C10_WARP_SIZE) {
sum1 = cuda_utils::WarpReduceSum<T_ACC>(sum1);
sum2 = cuda_utils::WarpReduceSum<T_ACC>(sum2);
} else {
__shared__ T_ACC ds_shared[C10_WARP_SIZE];
__shared__ T_ACC db_shared[C10_WARP_SIZE];
sum1 = cuda_utils::BlockReduceSum<T_ACC>(sum1, ds_shared);
sum2 = cuda_utils::BlockReduceSum<T_ACC>(sum2, db_shared);
}
if (threadIdx.x == 0) {
ds[nc] = sum1;
db[nc] = sum2;
}
}
template <typename T>
__global__ void ComputeBackwardFusedParamsCUDAKernel(
int64_t C,
int64_t HxW,
int64_t group,
const T* mean,
const T* rstd,
const T* gamma,
const acc_type<T, true>* ds,
const acc_type<T, true>* db,
acc_type<T, true>* c2,
acc_type<T, true>* c3) {
using T_ACC = acc_type<T, true>;
const int64_t G = group;
const int64_t D = C / G;
const int64_t n = blockIdx.x;
const int64_t g = blockIdx.y;
const int64_t ng = n * G + g;
T_ACC sum1 = 0;
T_ACC sum2 = 0;
for (int64_t i = threadIdx.x; i < D; i += blockDim.x) {
const int64_t index = ng * D + i;
const int64_t c = g * D + i;
const T_ACC gamma_v =
gamma == nullptr ? T_ACC(1) : static_cast<T_ACC>(gamma[c]);
sum1 += ds[index] * gamma_v;
sum2 += db[index] * gamma_v;
}
if (blockDim.x <= C10_WARP_SIZE) {
sum1 = cuda_utils::WarpReduceSum<T_ACC>(sum1);
sum2 = cuda_utils::WarpReduceSum<T_ACC>(sum2);
} else {
__shared__ T_ACC ds_shared[C10_WARP_SIZE];
__shared__ T_ACC db_shared[C10_WARP_SIZE];
sum1 = cuda_utils::BlockReduceSum<T_ACC>(sum1, ds_shared);
sum2 = cuda_utils::BlockReduceSum<T_ACC>(sum2, db_shared);
}
if (threadIdx.x == 0) {
const T_ACC s = T_ACC(1) / static_cast<T_ACC>(D * HxW);
const T_ACC x = (sum2 * static_cast<T_ACC>(mean[ng]) - sum1) *
static_cast<T_ACC>(rstd[ng]) * static_cast<T_ACC>(rstd[ng]) *
static_cast<T_ACC>(rstd[ng]) * s;
c2[ng] = x;
c3[ng] = -x * static_cast<T_ACC>(mean[ng]) -
sum2 * static_cast<T_ACC>(rstd[ng]) * s;
}
}
template <typename T>
__global__ void GammaBetaBackwardCUDAKernel1(
int64_t N,
int64_t C,
int64_t group,
const T* mean,
const T* rstd,
const acc_type<T, true>* ds,
const acc_type<T, true>* db,
T* dgamma,
T* dbeta) {
using T_ACC = acc_type<T, true>;
const int64_t c = blockIdx.x * blockDim.x + threadIdx.x;
if (c < C) {
const int64_t G = group;
const int64_t D = C / G;
T_ACC sum1 = 0;
T_ACC sum2 = 0;
for (int64_t n = 0; n < N; ++n) {
const int64_t nc = n * C + c;
const int64_t ng = n * G + c / D;
sum1 += (dgamma == nullptr)
? T_ACC(0)
: ((ds[nc] - db[nc] * static_cast<T_ACC>(mean[ng])) *
static_cast<T_ACC>(rstd[ng]));
sum2 += (dbeta == nullptr) ? T_ACC(0) : db[nc];
}
if (dgamma != nullptr) {
dgamma[c] = sum1;
}
if (dbeta != nullptr) {
dbeta[c] = sum2;
}
}
}
template <typename T>
__global__ void GammaBetaBackwardCUDAKernel2(
int64_t N,
int64_t C,
int64_t group,
const T* mean,
const T* rstd,
const acc_type<T, true>* ds,
const acc_type<T, true>* db,
T* dgamma,
T* dbeta) {
using T_ACC = acc_type<T, true>;
__shared__ T_ACC g_shared[kReduceTileSize][kReduceTileSize + 1];
__shared__ T_ACC b_shared[kReduceTileSize][kReduceTileSize + 1];
const int64_t c = blockIdx.x * blockDim.x + threadIdx.x;
T_ACC dg_sum1 = 0;
T_ACC dg_sum2 = 0;
T_ACC db_sum1 = 0;
T_ACC db_sum2 = 0;
if (c < C) {
const int64_t G = group;
const int64_t D = C / G;
// Accumulate each 32 cols into a 32 * 32 tile.
// Since the blockDim is (32, 16), accumulate twice for 1st and 2nd 16 rows
// of a 32 contiguous elements.
for (int64_t n = threadIdx.y; n < N; n += blockDim.y * 2) {
const int64_t n1 = n;
const int64_t n2 = n + blockDim.y;
const int64_t nc1 = n1 * C + c;
const int64_t nc2 = n2 * C + c;
const int64_t ng1 = n1 * G + c / D;
const int64_t ng2 = n2 * G + c / D;
dg_sum1 += dgamma == nullptr
? T_ACC(0)
: ((ds[nc1] - db[nc1] * static_cast<T_ACC>(mean[ng1])) *
static_cast<T_ACC>(rstd[ng1]));
db_sum1 += dbeta == nullptr ? T_ACC(0) : db[nc1];
if (n2 < N) {
dg_sum2 += dgamma == nullptr
? T_ACC(0)
: ((ds[nc2] - db[nc2] * static_cast<T_ACC>(mean[ng2])) *
static_cast<T_ACC>(rstd[ng2]));
db_sum2 += dbeta == nullptr ? T_ACC(0) : db[nc2];
}
}
}
// Write accumulated tile to shared memory.
g_shared[threadIdx.y][threadIdx.x] = dg_sum1;
g_shared[threadIdx.y + blockDim.y][threadIdx.x] = dg_sum2;
b_shared[threadIdx.y][threadIdx.x] = db_sum1;
b_shared[threadIdx.y + blockDim.y][threadIdx.x] = db_sum2;
__syncthreads();
// Do warp reduce for the 1st 16 cols in the tile.
T_ACC sum1 = g_shared[threadIdx.x][threadIdx.y];
T_ACC sum2 = b_shared[threadIdx.x][threadIdx.y];
sum1 = cuda_utils::WarpReduceSum<T_ACC>(sum1);
sum2 = cuda_utils::WarpReduceSum<T_ACC>(sum2);
if (threadIdx.x == 0) {
const int64_t c = blockIdx.x * blockDim.x + threadIdx.y;
if (c < C) {
if (dgamma != nullptr) {
dgamma[c] = sum1;
}
if (dbeta != nullptr) {
dbeta[c] = sum2;
}
}
}
// Do warp reduce for the 2st 16 cols in the tile.
sum1 = g_shared[threadIdx.x][threadIdx.y + blockDim.y];
sum2 = b_shared[threadIdx.x][threadIdx.y + blockDim.y];
sum1 = cuda_utils::WarpReduceSum<T_ACC>(sum1);
sum2 = cuda_utils::WarpReduceSum<T_ACC>(sum2);
if (threadIdx.x == 0) {
const int64_t c = blockIdx.x * blockDim.x + threadIdx.y + blockDim.y;
if (c < C) {
if (dgamma != nullptr) {
dgamma[c] = sum1;
}
if (dbeta != nullptr) {
dbeta[c] = sum2;
}
}
}
}
template <typename T>
void GroupNorm1dForward(
const Tensor& X,
const Tensor& mean,
const Tensor& rstd,
const Tensor& gamma,
const Tensor& beta,
int64_t N,
int64_t C,
int64_t group,
Tensor& Y) {
using T_ACC = acc_type<T, true>;
const int64_t G = group;
const int64_t D = C / G;
if (gamma.defined() && beta.defined()) {
auto iter = TensorIteratorConfig()
.resize_outputs(false)
.add_owned_output(Y.view({N, G, D}))
.add_owned_input(X.view({N, G, D}))
.add_owned_input(mean.view({N, G, 1}))
.add_owned_input(rstd.view({N, G, 1}))
.add_owned_input(gamma.view({1, G, D}))
.add_owned_input(beta.view({1, G, D}))
.build();
gpu_kernel(iter, [] GPU_LAMBDA(T x, T mean, T rstd, T gamma, T beta) -> T {
return (static_cast<T_ACC>(x) - static_cast<T_ACC>(mean)) *
static_cast<T_ACC>(rstd) * static_cast<T_ACC>(gamma) +
static_cast<T_ACC>(beta);
});
} else if (gamma.defined()) {
auto iter = TensorIteratorConfig()
.resize_outputs(false)
.add_owned_output(Y.view({N, G, D}))
.add_owned_input(X.view({N, G, D}))
.add_owned_input(mean.view({N, G, 1}))
.add_owned_input(rstd.view({N, G, 1}))
.add_owned_input(gamma.view({1, G, D}))
.build();
gpu_kernel(iter, [] GPU_LAMBDA(T x, T mean, T rstd, T gamma) -> T {
return (static_cast<T_ACC>(x) - static_cast<T_ACC>(mean)) *
static_cast<T_ACC>(rstd) * static_cast<T_ACC>(gamma);
});
} else if (beta.defined()) {
auto iter = TensorIteratorConfig()
.resize_outputs(false)
.add_owned_output(Y.view({N, G, D}))
.add_owned_input(X.view({N, G, D}))
.add_owned_input(mean.view({N, G, 1}))
.add_owned_input(rstd.view({N, G, 1}))
.add_owned_input(beta.view({1, G, D}))
.build();
gpu_kernel(iter, [] GPU_LAMBDA(T x, T mean, T rstd, T beta) -> T {
return (static_cast<T_ACC>(x) - static_cast<T_ACC>(mean)) *
static_cast<T_ACC>(rstd) +
static_cast<T_ACC>(beta);
});
} else {
auto iter = TensorIteratorConfig()
.resize_outputs(false)
.add_owned_output(Y.view({N * G, D}))
.add_owned_input(X.view({N * G, D}))
.add_owned_input(mean.view({N * G, 1}))
.add_owned_input(rstd.view({N * G, 1}))
.build();
gpu_kernel(iter, [] GPU_LAMBDA(T x, T mean, T rstd) -> T {
return (static_cast<T_ACC>(x) - static_cast<T_ACC>(mean)) *
static_cast<T_ACC>(rstd);
});
}
AT_CUDA_CHECK(cudaGetLastError());
}
template <typename T>
void GroupNormKernelImplInternal(
const Tensor& X,
const Tensor& gamma,
const Tensor& beta,
int64_t N,
int64_t C,
int64_t HxW,
int64_t group,
T eps,
Tensor& Y,
Tensor& mean,
Tensor& rstd) {
using T_ACC = acc_type<T, true>;
TORCH_CHECK(X.numel() == N * C * HxW);
TORCH_CHECK(!gamma.defined() || gamma.numel() == C);
TORCH_CHECK(!beta.defined() || beta.numel() == C);
if (N == 0) {
return;
}
const int64_t G = group;
const int64_t D = C / G;
const T* X_data = X.data_ptr<T>();
T* mean_data = mean.data_ptr<T>();
T* rstd_data = rstd.data_ptr<T>();
cudaStream_t cuda_stream = at::cuda::getCurrentCUDAStream();
const int64_t num_threads = D * HxW < cuda_utils::kCUDABlockReduceNumThreads
? at::cuda::warp_size()
: cuda_utils::kCUDABlockReduceNumThreads;
RowwiseMomentsCUDAKernel<T><<<N * G, num_threads, 0, cuda_stream>>>(
D * HxW, eps, X_data, mean_data, rstd_data);
C10_CUDA_KERNEL_LAUNCH_CHECK();
if (HxW == 1) {
GroupNorm1dForward<T>(X, mean, rstd, gamma, beta, N, C, G, Y);
} else if (!gamma.defined() && !beta.defined()) {
auto iter = TensorIteratorConfig()
.resize_outputs(false)
.add_owned_output(Y.view({N * G, D * HxW}))
.add_owned_input(X.view({N * G, D * HxW}))
.add_owned_input(mean.view({N * G, 1}))
.add_owned_input(rstd.view({N * G, 1}))
.build();
gpu_kernel(iter, [] GPU_LAMBDA(T x, T mean, T rstd) -> T {
return (static_cast<T_ACC>(x) - static_cast<T_ACC>(mean)) *
static_cast<T_ACC>(rstd);
});
} else {
const auto kAccType =
(X.scalar_type() == kHalf || X.scalar_type() == kBFloat16)
? kFloat
: X.scalar_type();
Tensor a = at::empty({N, C}, X.options().dtype(kAccType));
Tensor b = at::empty({N, C}, X.options().dtype(kAccType));
const T* gamma_data = gamma.defined() ? gamma.data_ptr<T>() : nullptr;
const T* beta_data = beta.defined() ? beta.data_ptr<T>() : nullptr;
T_ACC* a_data = a.data_ptr<T_ACC>();
T_ACC* b_data = b.data_ptr<T_ACC>();
// TODO: Since there is some issues in gpu_kernel_multiple_outputs, we are
// using maunal kernel here. Make it using gpu_kernel_multiple_outputs once
// the issue fixed.
const int64_t B = (N * C + kCUDANumThreads - 1) / kCUDANumThreads;
ComputeFusedParamsCUDAKernel<T><<<B, kCUDANumThreads, 0, cuda_stream>>>(
N, C, G, mean_data, rstd_data, gamma_data, beta_data, a_data, b_data);
C10_CUDA_KERNEL_LAUNCH_CHECK();
auto iter = TensorIteratorConfig()
.check_all_same_dtype(std::is_same<T, T_ACC>::value)
.resize_outputs(false)
.add_owned_output(Y.view({N * C, HxW}))
.add_owned_input(X.view({N * C, HxW}))
.add_owned_input(a.view({N * C, 1}))
.add_owned_input(b.view({N * C, 1}))
.build();
gpu_kernel(iter, [] GPU_LAMBDA(T x, T_ACC a, T_ACC b) -> T {
return a * static_cast<T_ACC>(x) + b;
});
}
AT_CUDA_CHECK(cudaGetLastError());
}
void GroupNormKernelImpl(
const Tensor& X,
const Tensor& gamma,
const Tensor& beta,
int64_t N,
int64_t C,
int64_t HxW,
int64_t group,
double eps,
Tensor& Y,
Tensor& mean,
Tensor& rstd) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
X.scalar_type(),
"GroupNormKernelImpl",
[&]() {
GroupNormKernelImplInternal<scalar_t>(
X,
gamma,
beta,
N,
C,
HxW,
group,
static_cast<scalar_t>(eps),
Y,
mean,
rstd);
});
}
template <typename T>
void GroupNorm1dBackward(
const Tensor dY,
const Tensor X,
const Tensor mean,
const Tensor rstd,
const Tensor gamma,
int64_t N,
int64_t C,
int64_t group,
Tensor& dX,
Tensor& dgamma,
Tensor& dbeta) {
using T_ACC = acc_type<T, true>;
const int64_t G = group;
const int64_t D = C / G;
const T* dY_data = dY.data_ptr<T>();
const T* X_data = X.data_ptr<T>();
const T* mean_data = mean.data_ptr<T>();
const T* rstd_data = rstd.data_ptr<T>();
cudaStream_t cuda_stream = at::cuda::getCurrentCUDAStream();
if (dX.defined()) {
const T* gamma_data = gamma.defined() ? gamma.data_ptr<T>() : nullptr;
const auto kAccType =
(X.scalar_type() == kHalf || X.scalar_type() == kBFloat16)
? kFloat
: X.scalar_type();
Tensor c2 = at::empty({N, G}, X.options().dtype(kAccType));
Tensor c3 = at::empty({N, G}, X.options().dtype(kAccType));
T_ACC* c2_data = c2.data_ptr<T_ACC>();
T_ACC* c3_data = c3.data_ptr<T_ACC>();
const int64_t num_threads = (C / G) < cuda_utils::kCUDABlockReduceNumThreads
? at::cuda::warp_size()
: cuda_utils::kCUDABlockReduceNumThreads;
Compute1dBackwardFusedParamsCUDAKernel<T>
<<<dim3(N, G), num_threads, 0, cuda_stream>>>(
C,
G,
dY_data,
X_data,
mean_data,
rstd_data,
gamma_data,
c2_data,
c3_data);
C10_CUDA_KERNEL_LAUNCH_CHECK();
if (gamma.defined()) {
auto iter = TensorIteratorConfig()
.check_all_same_dtype(std::is_same<T, T_ACC>::value)
.resize_outputs(false)
.add_owned_output(dX.view({N, G, D}))
.add_owned_input(dY.view({N, G, D}))
.add_owned_input(X.view({N, G, D}))
.add_owned_input(rstd.view({N, G, 1}))
.add_owned_input(gamma.view({1, G, D}))
.add_owned_input(c2.view({N, G, 1}))
.add_owned_input(c3.view({N, G, 1}))
.build();
gpu_kernel(
iter,
[] GPU_LAMBDA(T dy, T x, T rstd, T gamma, T_ACC c2, T_ACC c3) -> T {
const T_ACC c1 =
static_cast<T_ACC>(rstd) * static_cast<T_ACC>(gamma);
return c1 * static_cast<T_ACC>(dy) + c2 * static_cast<T_ACC>(x) +
c3;
});
} else {
auto iter = TensorIteratorConfig()
.check_all_same_dtype(std::is_same<T, T_ACC>::value)
.resize_outputs(false)
.add_owned_output(dX.view({N * G, D}))
.add_owned_input(dY.view({N * G, D}))
.add_owned_input(X.view({N * G, D}))
.add_owned_input(rstd.view({N * G, 1}))
.add_owned_input(c2.view({N * G, 1}))
.add_owned_input(c3.view({N * G, 1}))
.build();
gpu_kernel(
iter, [] GPU_LAMBDA(T dy, T x, T rstd, T_ACC c2, T_ACC c3) -> T {
const T_ACC c1 = static_cast<T_ACC>(rstd);
return c1 * static_cast<T_ACC>(dy) + c2 * static_cast<T_ACC>(x) +
c3;
});
}
}
if (dgamma.defined() || dbeta.defined()) {
T* dgamma_data = dgamma.defined() ? dgamma.data_ptr<T>() : nullptr;
T* dbeta_data = dbeta.defined() ? dbeta.data_ptr<T>() : nullptr;
if (N <= 128) {
const int64_t B = (C + kCUDANumThreads - 1) / kCUDANumThreads;
GammaBeta1dBackwardCUDAKernel1<T><<<B, kCUDANumThreads, 0, cuda_stream>>>(
N,
C,
G,
dY_data,
X_data,
mean_data,
rstd_data,
dgamma_data,
dbeta_data);
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else {
const int64_t B = (C + kReduceTileSize - 1) / kReduceTileSize;
// The algorithm for colwise reduction here is to accumulate each 32 cols
// to a 32 * 32 tile and write the tile to shared memmory. Then do warp
// reduce for each col in the tile. So here the blockDim must be (32, 16).
constexpr int kThreadX = kReduceTileSize;
constexpr int kThreadY = kReduceTileSize / 2;
GammaBeta1dBackwardCUDAKernel2<T>
<<<B, dim3(kThreadX, kThreadY), 0, cuda_stream>>>(
N,
C,
G,
dY_data,
X_data,
mean_data,
rstd_data,
dgamma_data,
dbeta_data);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
}
}
template <typename T>
void GroupNormBackwardKernelImplInternal(
const Tensor& dY,
const Tensor& X,
const Tensor& mean,
const Tensor& rstd,
const Tensor& gamma,
int64_t N,
int64_t C,
int64_t HxW,
int64_t group,
Tensor& dX,
Tensor& dgamma,
Tensor& dbeta) {
using T_ACC = acc_type<T, true>;
const int64_t G = group;
const int64_t D = C / G;
TORCH_CHECK(dY.numel() == N * C * HxW);
TORCH_CHECK(X.numel() == N * C * HxW);
TORCH_CHECK(mean.numel() == N * G);
TORCH_CHECK(rstd.numel() == N * G);
TORCH_CHECK(!gamma.defined() || gamma.numel() == C);
cudaStream_t cuda_stream = at::cuda::getCurrentCUDAStream();
if (N == 0) {
if (dgamma.defined()) {
dgamma.fill_(T(0));
}
if (dbeta.defined()) {
dbeta.fill_(T(0));
}
return;
}
const T* dY_data = dY.data_ptr<T>();
const T* X_data = X.data_ptr<T>();
const T* mean_data = mean.data_ptr<T>();
const T* rstd_data = rstd.data_ptr<T>();
const T* gamma_data = gamma.defined() ? gamma.data_ptr<T>() : nullptr;
const auto kAccType =
(X.scalar_type() == kHalf || X.scalar_type() == kBFloat16)
? kFloat
: X.scalar_type();
Tensor ds = at::empty({N, C}, X.options().dtype(kAccType));
Tensor db = at::empty({N, C}, X.options().dtype(kAccType));
T_ACC* ds_data = ds.data_ptr<T_ACC>();
T_ACC* db_data = db.data_ptr<T_ACC>();
if (HxW == 1) {
GroupNorm1dBackward<T>(
dY, X, mean, rstd, gamma, N, C, G, dX, dgamma, dbeta);
return;
}
int warp_size = at::cuda::warp_size();
int64_t num_threads = HxW < cuda_utils::kCUDABlockReduceNumThreads
? warp_size
: cuda_utils::kCUDABlockReduceNumThreads;
ComputeInternalGradientsCUDAKernel<T><<<N * C, num_threads, 0, cuda_stream>>>(
HxW, dY_data, X_data, ds_data, db_data);
C10_CUDA_KERNEL_LAUNCH_CHECK();
if (dX.defined()) {
Tensor c1 = at::empty({0}, X.options().dtype(kAccType));
Tensor c2 = at::empty({N, G}, X.options().dtype(kAccType));
Tensor c3 = at::empty({N, G}, X.options().dtype(kAccType));
T_ACC* c2_data = c2.data_ptr<T_ACC>();
T_ACC* c3_data = c3.data_ptr<T_ACC>();
if (gamma.defined()) {
auto iter = TensorIteratorConfig()
.check_all_same_dtype(std::is_same<T, T_ACC>::value)
.add_output(c1)
.add_owned_input(rstd.view({N, G, 1}))
.add_owned_input(gamma.view({1, G, D}))
.build();
gpu_kernel(iter, [] GPU_LAMBDA(T rstd, T gamma) -> T_ACC {
return static_cast<T_ACC>(rstd) * static_cast<T_ACC>(gamma);
});
}
num_threads = (C / G) < cuda_utils::kCUDABlockReduceNumThreads
? warp_size
: cuda_utils::kCUDABlockReduceNumThreads;
ComputeBackwardFusedParamsCUDAKernel<T>
<<<dim3(N, G), num_threads, 0, cuda_stream>>>(
C,
HxW,
G,
mean_data,
rstd_data,
gamma_data,
ds_data,
db_data,
c2_data,
c3_data);
C10_CUDA_KERNEL_LAUNCH_CHECK();
if (gamma.defined()) {
auto iter = TensorIteratorConfig()
.check_all_same_dtype(std::is_same<T, T_ACC>::value)
.resize_outputs(false)
.add_owned_output(dX.view({N * G, D, HxW}))
.add_owned_input(dY.view({N * G, D, HxW}))
.add_owned_input(X.view({N * G, D, HxW}))
.add_owned_input(c1.view({N * G, D, 1}))
.add_owned_input(c2.view({N * G, 1, 1}))
.add_owned_input(c3.view({N * G, 1, 1}))
.build();
gpu_kernel(
iter, [] GPU_LAMBDA(T dy, T x, T_ACC c1, T_ACC c2, T_ACC c3) -> T {
return c1 * static_cast<T_ACC>(dy) + c2 * static_cast<T_ACC>(x) +
c3;
});
} else {
auto iter = TensorIteratorConfig()
.check_all_same_dtype(std::is_same<T, T_ACC>::value)
.resize_outputs(false)
.add_owned_output(dX.view({N * G, D * HxW}))
.add_owned_input(dY.view({N * G, D * HxW}))
.add_owned_input(X.view({N * G, D * HxW}))
.add_owned_input(rstd.view({N * G, 1}))
.add_owned_input(c2.view({N * G, 1}))
.add_owned_input(c3.view({N * G, 1}))
.build();
gpu_kernel(
iter, [] GPU_LAMBDA(T dy, T x, T_ACC c1, T_ACC c2, T_ACC c3) -> T {
return c1 * static_cast<T_ACC>(dy) + c2 * static_cast<T_ACC>(x) +
c3;
});
}
}
if (dgamma.defined() || dbeta.defined()) {
T* dgamma_data = dgamma.defined() ? dgamma.data_ptr<T>() : nullptr;
T* dbeta_data = dbeta.defined() ? dbeta.data_ptr<T>() : nullptr;
if (N <= 128) {
// For small batch size, do colwise reduce directly.
const int64_t B = (C + kCUDANumThreads - 1) / kCUDANumThreads;
GammaBetaBackwardCUDAKernel1<T><<<B, kCUDANumThreads, 0, cuda_stream>>>(
N,
C,
G,
mean_data,
rstd_data,
ds_data,
db_data,
dgamma_data,
dbeta_data);
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else {
const int64_t B = (C + kReduceTileSize - 1) / kReduceTileSize;
// The algorithm for colwise reduction here is to accumulate each 32 cols
// to a 32 * 32 tile and write the tile to shared memmory. Then do warp
// reduce for each col in the tile. So here the blockDim must be (32, 16).
constexpr int kThreadX = kReduceTileSize;
constexpr int kThreadY = kReduceTileSize / 2;
GammaBetaBackwardCUDAKernel2<T>
<<<B, dim3(kThreadX, kThreadY), 0, cuda_stream>>>(
N,
C,
G,
mean_data,
rstd_data,
ds_data,
db_data,
dgamma_data,
dbeta_data);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
}
}
void GroupNormBackwardKernelImpl(
const Tensor& dY,
const Tensor& X,
const Tensor& mean,
const Tensor& rstd,
const Tensor& gamma,
int64_t N,
int64_t C,
int64_t HxW,
int64_t group,
Tensor& dX,
Tensor& dgamma,
Tensor& dbeta) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
X.scalar_type(),
"GroupNormBackwardKernelImpl",
[&]() {
GroupNormBackwardKernelImplInternal<scalar_t>(
dY, X, mean, rstd, gamma, N, C, HxW, group, dX, dgamma, dbeta);
});
}
} // namespace
REGISTER_DISPATCH(GroupNormKernel, &GroupNormKernelImpl);
REGISTER_DISPATCH(GroupNormBackwardKernel, &GroupNormBackwardKernelImpl);
} // namespace native
} // namespace at
|
1fe97b4ac3f99a9e9c327889fe35b7c614ad0813.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <sys/time.h>
#include <assert.h>
__global__ void GPUkernel(int N, double * x, double * y, double * z, double * m,
double * ax, double * ay, double * az, double G, double eps) {
int i, j, jb;
double axi, ayi, azi, xi, yi, zi, dx, dy, dz, R2, invR, invR3;
i = blockIdx.x * blockDim.x + threadIdx.x;
axi = 0;
ayi = 0;
azi = 0;
xi = x[i];
yi = y[i];
zi = z[i];
extern __shared__ double xj[];
double *yj = &xj[blockDim.x];
double *zj = &yj[blockDim.x];
double *mj = &zj[blockDim.x];
for ( jb=0; jb<N/blockDim.x; jb++ ) {
__syncthreads();
xj[threadIdx.x] = x[jb*blockDim.x+threadIdx.x];
yj[threadIdx.x] = y[jb*blockDim.x+threadIdx.x];
zj[threadIdx.x] = z[jb*blockDim.x+threadIdx.x];
mj[threadIdx.x] = m[jb*blockDim.x+threadIdx.x];
__syncthreads();
#pragma unroll
for( j=0; j<blockDim.x; j++ ) {
dx = xi - xj[j];
dy = yi - yj[j];
dz = zi - zj[j];
R2 = dx * dx + dy * dy + dz * dz + eps;
invR = rsqrtf(R2);
invR3 = invR * invR * invR * G * mj[j];
axi -= dx * invR3;
ayi -= dy * invR3;
azi -= dz * invR3;
}
}
ax[i] = axi;
ay[i] = ayi;
az[i] = azi;
}
int main() {
int N, threads;
struct timeval tic, toc;
double OPS, G, eps, time;
double *x, *y, *z, *m, *ax, *ay, *az;
FILE *file;
if ( (file = fopen("initial.dat","rb")) == NULL ) {
fprintf(stderr, "File open error.\n");
exit(EXIT_FAILURE);
}
assert( fread(&N,sizeof(int),1,file) == 1 );
OPS = 20. * N * N * 1e-9;
hipMallocManaged((void**)&x, N * sizeof(double));
hipMallocManaged((void**)&y, N * sizeof(double));
hipMallocManaged((void**)&z, N * sizeof(double));
hipMallocManaged((void**)&m, N * sizeof(double));
hipMallocManaged((void**)&ax, N * sizeof(double));
hipMallocManaged((void**)&ay, N * sizeof(double));
hipMallocManaged((void**)&az, N * sizeof(double));
assert( fread(x,sizeof(double),N,file) == N );
assert( fread(y,sizeof(double),N,file) == N );
assert( fread(z,sizeof(double),N,file) == N );
assert( fread(m,sizeof(double),N,file) == N );
threads = 500;
G = 6.6743e-11;
eps = 1e-8;
gettimeofday(&tic,NULL);
hipLaunchKernelGGL(( GPUkernel), dim3(N/threads),dim3(threads),threads*4*sizeof(double), 0, N, x, y, z, m, ax, ay, az, G, eps);
hipDeviceSynchronize();
gettimeofday(&toc,NULL);
time = toc.tv_sec-tic.tv_sec+(toc.tv_usec-tic.tv_usec)*1e-6;
printf("GPU : %e s : %lf GFlops\n",time, OPS/time);
file = fopen("direct.dat","wb");
fwrite(&N,sizeof(int),1,file);
fwrite(ax,sizeof(double),N,file);
fwrite(ay,sizeof(double),N,file);
fwrite(az,sizeof(double),N,file);
fclose(file);
hipFree(x);
hipFree(y);
hipFree(z);
hipFree(m);
hipFree(ax);
hipFree(ay);
hipFree(az);
return 0;
}
| 1fe97b4ac3f99a9e9c327889fe35b7c614ad0813.cu | #include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <sys/time.h>
#include <assert.h>
__global__ void GPUkernel(int N, double * x, double * y, double * z, double * m,
double * ax, double * ay, double * az, double G, double eps) {
int i, j, jb;
double axi, ayi, azi, xi, yi, zi, dx, dy, dz, R2, invR, invR3;
i = blockIdx.x * blockDim.x + threadIdx.x;
axi = 0;
ayi = 0;
azi = 0;
xi = x[i];
yi = y[i];
zi = z[i];
extern __shared__ double xj[];
double *yj = &xj[blockDim.x];
double *zj = &yj[blockDim.x];
double *mj = &zj[blockDim.x];
for ( jb=0; jb<N/blockDim.x; jb++ ) {
__syncthreads();
xj[threadIdx.x] = x[jb*blockDim.x+threadIdx.x];
yj[threadIdx.x] = y[jb*blockDim.x+threadIdx.x];
zj[threadIdx.x] = z[jb*blockDim.x+threadIdx.x];
mj[threadIdx.x] = m[jb*blockDim.x+threadIdx.x];
__syncthreads();
#pragma unroll
for( j=0; j<blockDim.x; j++ ) {
dx = xi - xj[j];
dy = yi - yj[j];
dz = zi - zj[j];
R2 = dx * dx + dy * dy + dz * dz + eps;
invR = rsqrtf(R2);
invR3 = invR * invR * invR * G * mj[j];
axi -= dx * invR3;
ayi -= dy * invR3;
azi -= dz * invR3;
}
}
ax[i] = axi;
ay[i] = ayi;
az[i] = azi;
}
int main() {
int N, threads;
struct timeval tic, toc;
double OPS, G, eps, time;
double *x, *y, *z, *m, *ax, *ay, *az;
FILE *file;
if ( (file = fopen("initial.dat","rb")) == NULL ) {
fprintf(stderr, "File open error.\n");
exit(EXIT_FAILURE);
}
assert( fread(&N,sizeof(int),1,file) == 1 );
OPS = 20. * N * N * 1e-9;
cudaMallocManaged((void**)&x, N * sizeof(double));
cudaMallocManaged((void**)&y, N * sizeof(double));
cudaMallocManaged((void**)&z, N * sizeof(double));
cudaMallocManaged((void**)&m, N * sizeof(double));
cudaMallocManaged((void**)&ax, N * sizeof(double));
cudaMallocManaged((void**)&ay, N * sizeof(double));
cudaMallocManaged((void**)&az, N * sizeof(double));
assert( fread(x,sizeof(double),N,file) == N );
assert( fread(y,sizeof(double),N,file) == N );
assert( fread(z,sizeof(double),N,file) == N );
assert( fread(m,sizeof(double),N,file) == N );
threads = 500;
G = 6.6743e-11;
eps = 1e-8;
gettimeofday(&tic,NULL);
GPUkernel<<<N/threads,threads,threads*4*sizeof(double)>>>(N, x, y, z, m, ax, ay, az, G, eps);
cudaThreadSynchronize();
gettimeofday(&toc,NULL);
time = toc.tv_sec-tic.tv_sec+(toc.tv_usec-tic.tv_usec)*1e-6;
printf("GPU : %e s : %lf GFlops\n",time, OPS/time);
file = fopen("direct.dat","wb");
fwrite(&N,sizeof(int),1,file);
fwrite(ax,sizeof(double),N,file);
fwrite(ay,sizeof(double),N,file);
fwrite(az,sizeof(double),N,file);
fclose(file);
cudaFree(x);
cudaFree(y);
cudaFree(z);
cudaFree(m);
cudaFree(ax);
cudaFree(ay);
cudaFree(az);
return 0;
}
|
GalerkinCuda.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "GalerkinCuda.cuh"
#include "GalerkinData.h"
__global__ void GalerkinCuda::ElementInfluenceMatrix(float* infMatr, float* beinfo, float* fright)
{
uint beNum = blockIdx.x / 3; // each be generate 3 equation
uint funcNum = blockIdx.x % 3 + 1; // equation number (1..3) for fixed boundary element
uint beNumLocal = blockIdx.y / 3; // be num which influent on fixed global be
uint funcNumLocal = blockIdx.y % 3 + 1; // func num of be which influent on fixed global be
uint termNum = threadIdx.x; // numeric integral term (1..numIntDiscr)
// TO-DO: CHECK if coeff number is correct???!!!
uint coeffNumGlobal = blockIdx.x * gridDim.y + blockIdx.y; // global number of coefficient in full influence matrix
uint beNumInfoK = 8; // shift multiplier = size of beinfo struct
// global boundary element info
float xBE = beinfo[beNum * beNumInfoK+2];
float yBE = beinfo[beNum * beNumInfoK+3];
float alphaBE = beinfo[beNum * beNumInfoK+6];
float lngBE = beinfo[beNum * beNumInfoK+7];
//local boundary element info
float xBELoc = beinfo[beNumLocal * beNumInfoK+2];
float yBELoc = beinfo[beNumLocal * beNumInfoK+3];
float alphaBELoc = beinfo[beNumLocal * beNumInfoK+6];
float lngBELoc = beinfo[beNumLocal * beNumInfoK+7];
// info for discret integral
float discrStep = 2 * lngBE / blockDim.x;
float xSub = -lngBE + discrStep * termNum;
float ySub = 0;
float xSubTransofrmed = 0, ySubTransformed = 0;
Transform2D(xBE,yBE,alphaBE,
0,0,0,
xSub,ySub,
xSubTransofrmed,ySubTransformed);
float increment = 0;
float frightIncrement = 0;
switch (funcNum) {
case 1:
increment = discrStep * f1(xSub, lngBE) * IG(xSubTransofrmed, ySubTransformed, xBELoc, yBELoc, lngBELoc, alphaBELoc, funcNumLocal);
if (blockIdx.x == blockIdx.y)
frightIncrement = discrStep * f1(xSub, lngBE) * Problem::InitCondition(xSubTransofrmed, ySubTransformed);
break;
case 2:
increment = discrStep * f2(xSub, lngBE) * IG(xSubTransofrmed, ySubTransformed, xBELoc, yBELoc, lngBELoc, alphaBELoc, funcNumLocal);
if (blockIdx.x == blockIdx.y)
frightIncrement = discrStep * f2(xSub, lngBE) * Problem::InitCondition(xSubTransofrmed, ySubTransformed);
break;
case 3:
increment = discrStep * f3(xSub, lngBE) * IG(xSubTransofrmed, ySubTransformed, xBELoc, yBELoc, lngBELoc, alphaBELoc, funcNumLocal);
if (blockIdx.x == blockIdx.y)
frightIncrement = discrStep * f3(xSub, lngBE) * Problem::InitCondition(xSubTransofrmed, ySubTransformed);
break;
default:
break;
}
atomicAdd(&infMatr[coeffNumGlobal], increment);
atomicAdd(&fright[blockIdx.x], frightIncrement);
}
using namespace GalerkinMethod;
void GalerkinCuda::CalculateInfMatrix()
{
if (!initialisedData) {
printf("\nFalse while reading input data");
return;
}
ResetData();
hipSetDevice(0);
// try to parallel maximal effective
// we have 3N [N - number of boundary elements] equations
// each equation is a result of numeric integral and is a sum of p_j*k [j=1..N]
// so we need to calculate each k
//hipDeviceProp_t prop;
//hipGetDeviceProperties(&prop, 0);
//printf("Device is %s\nnumber of blocks %dx%dx%d (each %dx%dx%d) = number of threads %d\n", prop.name,
// prop.maxGridSize[0],
// prop.maxGridSize[1],
// prop.maxGridSize[2],
// prop.maxThreadsDim[0],
// prop.maxThreadsDim[1],
// prop.maxThreadsDim[2],
// prop.maxThreadsPerBlock);
dim3 blockSize = dim3(numIntDiscr, 1, 1); // each cofficient is a summ of numIntDiscr terms
dim3 gridSize = dim3(beNum*3, beNum*3, 1); // each boundary element have 3 equation which consist of (beNum * 3) coefficients
// data pointers for a kernel
float* cudaInfMatr;
float* cudaBeInfo;
float* cudaFright;
hipMalloc((void**)&cudaInfMatr, infMatrSize * sizeof(float));
hipMalloc((void**)&cudaBeInfo, beInfoSize * sizeof(float));
hipMalloc((void**)&cudaFright, fRightSize * sizeof(float));
hipMemcpy(cudaInfMatr, infMatr, infMatrSize * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(cudaBeInfo, beInfo, beInfoSize * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(cudaFright, fRight, fRightSize * sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( ElementInfluenceMatrix) , dim3(gridSize), dim3(blockSize) , 0, 0, cudaInfMatr, cudaBeInfo, cudaFright);
hipError_t cudaerr = hipDeviceSynchronize();
if (cudaerr != hipSuccess)
printf("kernel launch failed with error \"%s\".\n",
hipGetErrorString(cudaerr));
hipEvent_t syncEvent;
hipEventCreate(&syncEvent);
hipEventRecord(syncEvent, 0);
hipEventSynchronize(syncEvent);
hipMemcpy(infMatr, cudaInfMatr, infMatrSize * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(fRight, cudaFright, fRightSize * sizeof(float), hipMemcpyDeviceToHost);
hipDeviceReset();
printf("\nSolved success!");
}
__global__ void GalerkinCuda::NodePotential(float* nodes, float* beinfo, float* coeffs)
{
uint nodeInd = (blockIdx.x * gridDim.y + blockIdx.y) * 3;
float x = nodes[nodeInd + 0];
float y = nodes[nodeInd + 1];
// local boundary element info
uint beNumInfoK = 8; // shift multiplier = size of beinfo struct
uint localBE = threadIdx.x;
float xBELoc = beinfo[localBE * beNumInfoK + 2];
float yBELoc = beinfo[localBE * beNumInfoK + 3];
float alphaBELoc = beinfo[localBE * beNumInfoK + 6];
float lngBELoc = beinfo[localBE * beNumInfoK + 7];
uint func = threadIdx.y + 1;
uint coeffInd = threadIdx.x * 3;
float increment = 0;
switch (func) {
case 1:
increment = coeffs[coeffInd + 0] * IG(x, y, xBELoc, yBELoc, lngBELoc, alphaBELoc, 1);
break;
case 2:
increment = coeffs[coeffInd + 1] * IG(x, y, xBELoc, yBELoc, lngBELoc, alphaBELoc, 2);
break;
case 3:
increment = coeffs[coeffInd + 2] * IG(x, y, xBELoc, yBELoc, lngBELoc, alphaBELoc, 3);
break;
}
atomicAdd(&nodes[nodeInd + 2], increment);
}
void GalerkinCuda::CalculatePotentialField()
{
if (!initialisedData || !initialisedCoeffs) {
printf("\nFalse while reading input data");
return;
}
ResetData();
hipSetDevice(0);
// try to parallel maximal effective
// we have 3N [N - number of boundary elements] equations
// each equation is a result of numeric integral and is a sum of p_j*k [j=1..N]
// so we need to calculate each k
//hipDeviceProp_t prop;
//hipGetDeviceProperties(&prop, 0);
//printf("Device is %s\nnumber of blocks %dx%dx%d (each %dx%dx%d) = number of threads %d\n", prop.name,
// prop.maxGridSize[0],
// prop.maxGridSize[1],
// prop.maxGridSize[2],
// prop.maxThreadsDim[0],
// prop.maxThreadsDim[1],
// prop.maxThreadsDim[2],
// prop.maxThreadsPerBlock);
dim3 blockSize = dim3(beNum, 3, 1); // each cofficient is a summ of numIntDiscr terms
dim3 gridSize = dim3(fdSizeX, fdSizeY, 1); // each boundary element have 3 equation which consist of (beNum * 3) coefficients
// data pointers for a kernel
float* cudaPotField;
float* cudaBeInfo;
float* cudaCoeffs;
hipMalloc((void**)&cudaPotField, potFieldSize * sizeof(float));
hipMalloc((void**)&cudaBeInfo, beInfoSize * sizeof(float));
hipMalloc((void**)&cudaCoeffs, coeffsSize * sizeof(float));
hipMemcpy(cudaPotField, potField, potFieldSize * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(cudaBeInfo, beInfo, beInfoSize * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(cudaCoeffs, coeffs, coeffsSize * sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( NodePotential) , dim3(gridSize), dim3(blockSize) , 0, 0, cudaPotField, cudaBeInfo, cudaCoeffs);
hipError_t cudaerr = hipDeviceSynchronize();
if (cudaerr != hipSuccess)
printf("kernel launch failed with error \"%s\".\n",
hipGetErrorString(cudaerr));
hipEvent_t syncEvent;
hipEventCreate(&syncEvent);
hipEventRecord(syncEvent, 0);
hipEventSynchronize(syncEvent);
hipMemcpy(potField, cudaPotField, potFieldSize * sizeof(float), hipMemcpyDeviceToHost);
hipDeviceReset();
printf("\nSolved success!");
} | GalerkinCuda.cu | #include "GalerkinCuda.cuh"
#include "GalerkinData.h"
__global__ void GalerkinCuda::ElementInfluenceMatrix(float* infMatr, float* beinfo, float* fright)
{
uint beNum = blockIdx.x / 3; // each be generate 3 equation
uint funcNum = blockIdx.x % 3 + 1; // equation number (1..3) for fixed boundary element
uint beNumLocal = blockIdx.y / 3; // be num which influent on fixed global be
uint funcNumLocal = blockIdx.y % 3 + 1; // func num of be which influent on fixed global be
uint termNum = threadIdx.x; // numeric integral term (1..numIntDiscr)
// TO-DO: CHECK if coeff number is correct???!!!
uint coeffNumGlobal = blockIdx.x * gridDim.y + blockIdx.y; // global number of coefficient in full influence matrix
uint beNumInfoK = 8; // shift multiplier = size of beinfo struct
// global boundary element info
float xBE = beinfo[beNum * beNumInfoK+2];
float yBE = beinfo[beNum * beNumInfoK+3];
float alphaBE = beinfo[beNum * beNumInfoK+6];
float lngBE = beinfo[beNum * beNumInfoK+7];
//local boundary element info
float xBELoc = beinfo[beNumLocal * beNumInfoK+2];
float yBELoc = beinfo[beNumLocal * beNumInfoK+3];
float alphaBELoc = beinfo[beNumLocal * beNumInfoK+6];
float lngBELoc = beinfo[beNumLocal * beNumInfoK+7];
// info for discret integral
float discrStep = 2 * lngBE / blockDim.x;
float xSub = -lngBE + discrStep * termNum;
float ySub = 0;
float xSubTransofrmed = 0, ySubTransformed = 0;
Transform2D(xBE,yBE,alphaBE,
0,0,0,
xSub,ySub,
xSubTransofrmed,ySubTransformed);
float increment = 0;
float frightIncrement = 0;
switch (funcNum) {
case 1:
increment = discrStep * f1(xSub, lngBE) * IG(xSubTransofrmed, ySubTransformed, xBELoc, yBELoc, lngBELoc, alphaBELoc, funcNumLocal);
if (blockIdx.x == blockIdx.y)
frightIncrement = discrStep * f1(xSub, lngBE) * Problem::InitCondition(xSubTransofrmed, ySubTransformed);
break;
case 2:
increment = discrStep * f2(xSub, lngBE) * IG(xSubTransofrmed, ySubTransformed, xBELoc, yBELoc, lngBELoc, alphaBELoc, funcNumLocal);
if (blockIdx.x == blockIdx.y)
frightIncrement = discrStep * f2(xSub, lngBE) * Problem::InitCondition(xSubTransofrmed, ySubTransformed);
break;
case 3:
increment = discrStep * f3(xSub, lngBE) * IG(xSubTransofrmed, ySubTransformed, xBELoc, yBELoc, lngBELoc, alphaBELoc, funcNumLocal);
if (blockIdx.x == blockIdx.y)
frightIncrement = discrStep * f3(xSub, lngBE) * Problem::InitCondition(xSubTransofrmed, ySubTransformed);
break;
default:
break;
}
atomicAdd(&infMatr[coeffNumGlobal], increment);
atomicAdd(&fright[blockIdx.x], frightIncrement);
}
using namespace GalerkinMethod;
void GalerkinCuda::CalculateInfMatrix()
{
if (!initialisedData) {
printf("\nFalse while reading input data");
return;
}
ResetData();
cudaSetDevice(0);
// try to parallel maximal effective
// we have 3N [N - number of boundary elements] equations
// each equation is a result of numeric integral and is a sum of p_j*k [j=1..N]
// so we need to calculate each k
//cudaDeviceProp prop;
//cudaGetDeviceProperties(&prop, 0);
//printf("Device is %s\nnumber of blocks %dx%dx%d (each %dx%dx%d) = number of threads %d\n", prop.name,
// prop.maxGridSize[0],
// prop.maxGridSize[1],
// prop.maxGridSize[2],
// prop.maxThreadsDim[0],
// prop.maxThreadsDim[1],
// prop.maxThreadsDim[2],
// prop.maxThreadsPerBlock);
dim3 blockSize = dim3(numIntDiscr, 1, 1); // each cofficient is a summ of numIntDiscr terms
dim3 gridSize = dim3(beNum*3, beNum*3, 1); // each boundary element have 3 equation which consist of (beNum * 3) coefficients
// data pointers for a kernel
float* cudaInfMatr;
float* cudaBeInfo;
float* cudaFright;
cudaMalloc((void**)&cudaInfMatr, infMatrSize * sizeof(float));
cudaMalloc((void**)&cudaBeInfo, beInfoSize * sizeof(float));
cudaMalloc((void**)&cudaFright, fRightSize * sizeof(float));
cudaMemcpy(cudaInfMatr, infMatr, infMatrSize * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(cudaBeInfo, beInfo, beInfoSize * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(cudaFright, fRight, fRightSize * sizeof(float), cudaMemcpyHostToDevice);
ElementInfluenceMatrix <<< gridSize, blockSize >>> (cudaInfMatr, cudaBeInfo, cudaFright);
cudaError_t cudaerr = cudaDeviceSynchronize();
if (cudaerr != cudaSuccess)
printf("kernel launch failed with error \"%s\".\n",
cudaGetErrorString(cudaerr));
cudaEvent_t syncEvent;
cudaEventCreate(&syncEvent);
cudaEventRecord(syncEvent, 0);
cudaEventSynchronize(syncEvent);
cudaMemcpy(infMatr, cudaInfMatr, infMatrSize * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(fRight, cudaFright, fRightSize * sizeof(float), cudaMemcpyDeviceToHost);
cudaDeviceReset();
printf("\nSolved success!");
}
__global__ void GalerkinCuda::NodePotential(float* nodes, float* beinfo, float* coeffs)
{
uint nodeInd = (blockIdx.x * gridDim.y + blockIdx.y) * 3;
float x = nodes[nodeInd + 0];
float y = nodes[nodeInd + 1];
// local boundary element info
uint beNumInfoK = 8; // shift multiplier = size of beinfo struct
uint localBE = threadIdx.x;
float xBELoc = beinfo[localBE * beNumInfoK + 2];
float yBELoc = beinfo[localBE * beNumInfoK + 3];
float alphaBELoc = beinfo[localBE * beNumInfoK + 6];
float lngBELoc = beinfo[localBE * beNumInfoK + 7];
uint func = threadIdx.y + 1;
uint coeffInd = threadIdx.x * 3;
float increment = 0;
switch (func) {
case 1:
increment = coeffs[coeffInd + 0] * IG(x, y, xBELoc, yBELoc, lngBELoc, alphaBELoc, 1);
break;
case 2:
increment = coeffs[coeffInd + 1] * IG(x, y, xBELoc, yBELoc, lngBELoc, alphaBELoc, 2);
break;
case 3:
increment = coeffs[coeffInd + 2] * IG(x, y, xBELoc, yBELoc, lngBELoc, alphaBELoc, 3);
break;
}
atomicAdd(&nodes[nodeInd + 2], increment);
}
void GalerkinCuda::CalculatePotentialField()
{
if (!initialisedData || !initialisedCoeffs) {
printf("\nFalse while reading input data");
return;
}
ResetData();
cudaSetDevice(0);
// try to parallel maximal effective
// we have 3N [N - number of boundary elements] equations
// each equation is a result of numeric integral and is a sum of p_j*k [j=1..N]
// so we need to calculate each k
//cudaDeviceProp prop;
//cudaGetDeviceProperties(&prop, 0);
//printf("Device is %s\nnumber of blocks %dx%dx%d (each %dx%dx%d) = number of threads %d\n", prop.name,
// prop.maxGridSize[0],
// prop.maxGridSize[1],
// prop.maxGridSize[2],
// prop.maxThreadsDim[0],
// prop.maxThreadsDim[1],
// prop.maxThreadsDim[2],
// prop.maxThreadsPerBlock);
dim3 blockSize = dim3(beNum, 3, 1); // each cofficient is a summ of numIntDiscr terms
dim3 gridSize = dim3(fdSizeX, fdSizeY, 1); // each boundary element have 3 equation which consist of (beNum * 3) coefficients
// data pointers for a kernel
float* cudaPotField;
float* cudaBeInfo;
float* cudaCoeffs;
cudaMalloc((void**)&cudaPotField, potFieldSize * sizeof(float));
cudaMalloc((void**)&cudaBeInfo, beInfoSize * sizeof(float));
cudaMalloc((void**)&cudaCoeffs, coeffsSize * sizeof(float));
cudaMemcpy(cudaPotField, potField, potFieldSize * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(cudaBeInfo, beInfo, beInfoSize * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(cudaCoeffs, coeffs, coeffsSize * sizeof(float), cudaMemcpyHostToDevice);
NodePotential <<< gridSize, blockSize >>> (cudaPotField, cudaBeInfo, cudaCoeffs);
cudaError_t cudaerr = cudaDeviceSynchronize();
if (cudaerr != cudaSuccess)
printf("kernel launch failed with error \"%s\".\n",
cudaGetErrorString(cudaerr));
cudaEvent_t syncEvent;
cudaEventCreate(&syncEvent);
cudaEventRecord(syncEvent, 0);
cudaEventSynchronize(syncEvent);
cudaMemcpy(potField, cudaPotField, potFieldSize * sizeof(float), cudaMemcpyDeviceToHost);
cudaDeviceReset();
printf("\nSolved success!");
} |
a3fb5e938e411f34c1d1e93e50b263b28b224d3b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//Based on the work of Andrew Krepps
#include <stdio.h>
#include <stdio.h>
#define ARRAY_SIZE N
#define ARRAY_SIZE_IN_BYTES (sizeof(unsigned int) * (ARRAY_SIZE))
////////////////////////OPERATIONS//////////////////////////////////////////////
//ADD=1
__global__ void add(int * array1,int * array2,int * array3)
{
const unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x;
array3[i]=array1[i]+array2[i];
}
//SUBTRACT=2
__global__ void subtract(int * array1,int * array2,int * array3)
{
const unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x;
array3[i]=array1[i]-array2[i];
}
//MULTIPLY=3
__global__ void multiply(int * array1,int * array2,int * array3)
{
const unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x;
array3[i]=array1[i]*array2[i];
}
//MOD=4
__global__ void mod(int * array1,int * array2,int * array3)
{
const unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x;
array3[i]=array1[i]%array2[i];
}
//////////////////////////GPU FUNCTION//////////////////////////////////
void main_sub(int N, int BLOCK_SIZE, int NUM_BLOCKS, int whichOperation)
{
/* Declare statically four arrays of ARRAY_SIZE each */
int array1[ARRAY_SIZE];
int array2[ARRAY_SIZE];
int array3[ARRAY_SIZE];
for(int i = 0; i < ARRAY_SIZE; i++)
{
array1[i] = i;
array2[i] = (rand()%4);
//Check that array1 and array 2 inputs are correct
//printf("ARRAY1 at %i\nARRAY2 at %i\n\n", array1[i], array2[i]);
}
/* Declare pointers for GPU based params */
int *gpu_block1;
int *gpu_block2;
int *gpu_block3;
hipMalloc((void **)&gpu_block1, ARRAY_SIZE_IN_BYTES);
hipMalloc((void **)&gpu_block2, ARRAY_SIZE_IN_BYTES);
hipMalloc((void **)&gpu_block3, ARRAY_SIZE_IN_BYTES);
hipMemcpy( gpu_block1, array1, ARRAY_SIZE_IN_BYTES, hipMemcpyHostToDevice );
hipMemcpy( gpu_block2, array2, ARRAY_SIZE_IN_BYTES, hipMemcpyHostToDevice );
hipMemcpy( gpu_block3, array3, ARRAY_SIZE_IN_BYTES, hipMemcpyHostToDevice );
/* Execute our kernel */
switch(whichOperation) {
//ADD
case 1 :
printf("///////////////////////OUTPUT ADD///////////////\n");
hipLaunchKernelGGL(( add), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, gpu_block1,gpu_block2,gpu_block3);
break;
//SUBTRACT
case 2 :
printf("///////////////////////OUTPUT SUBTRACT///////////////\n");
hipLaunchKernelGGL(( subtract), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, gpu_block1,gpu_block2,gpu_block3);
break;
//MULTIPLY
case 3 :
printf("///////////////////////OUTPUT MULTIPLY///////////////\n");
hipLaunchKernelGGL(( multiply), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, gpu_block1,gpu_block2,gpu_block3);
break;
//MOD
case 4 :
printf("///////////////////////OUTPUT MOD///////////////\n");
hipLaunchKernelGGL(( mod), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, gpu_block1,gpu_block2,gpu_block3);
break;
}
/* Free the arrays on the GPU as now we're done with them */
hipMemcpy( array1, gpu_block1, ARRAY_SIZE_IN_BYTES, hipMemcpyDeviceToHost );
hipMemcpy( array2, gpu_block2, ARRAY_SIZE_IN_BYTES, hipMemcpyDeviceToHost );
hipMemcpy( array3, gpu_block3, ARRAY_SIZE_IN_BYTES, hipMemcpyDeviceToHost );
hipFree(gpu_block1);
hipFree(gpu_block2);
hipFree(gpu_block3);
/* Iterate through the arrays and print */
for(int i = 0; i < ARRAY_SIZE; i+=4)
{
printf("Index %i:\t %i\t\tIndex %i:\t %i\t\tIndex %i:\t %i\t\tIndex %i:\t %i\n", i, array3[i], i+1, array3[i+1],i+2, array3[i+2], i+3, array3[i+3]);
}
}
//////////////////////////MAIN///////////////////////////////////
int main(int argc, char** argv)
{
// read command line arguments
int totalThreads = (1 << 20);
int blockSize = 256;
if (argc >= 2) {
totalThreads = atoi(argv[1]);
}
if (argc >= 3) {
blockSize = atoi(argv[2]);
}
int numBlocks = totalThreads/blockSize;
// validate command line arguments
if (totalThreads % blockSize != 0) {
++numBlocks;
totalThreads = numBlocks*blockSize;
printf("Warning: Total thread count is not evenly divisible by the block size\n");
printf("The total number of threads will be rounded up to %d\n", totalThreads);
}
main_sub(totalThreads,blockSize,numBlocks, 1);
main_sub(totalThreads,blockSize,numBlocks, 2);
main_sub(totalThreads,blockSize,numBlocks, 3);
main_sub(totalThreads,blockSize,numBlocks, 4);
}
| a3fb5e938e411f34c1d1e93e50b263b28b224d3b.cu | //Based on the work of Andrew Krepps
#include <stdio.h>
#include <stdio.h>
#define ARRAY_SIZE N
#define ARRAY_SIZE_IN_BYTES (sizeof(unsigned int) * (ARRAY_SIZE))
////////////////////////OPERATIONS//////////////////////////////////////////////
//ADD=1
__global__ void add(int * array1,int * array2,int * array3)
{
const unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x;
array3[i]=array1[i]+array2[i];
}
//SUBTRACT=2
__global__ void subtract(int * array1,int * array2,int * array3)
{
const unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x;
array3[i]=array1[i]-array2[i];
}
//MULTIPLY=3
__global__ void multiply(int * array1,int * array2,int * array3)
{
const unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x;
array3[i]=array1[i]*array2[i];
}
//MOD=4
__global__ void mod(int * array1,int * array2,int * array3)
{
const unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x;
array3[i]=array1[i]%array2[i];
}
//////////////////////////GPU FUNCTION//////////////////////////////////
void main_sub(int N, int BLOCK_SIZE, int NUM_BLOCKS, int whichOperation)
{
/* Declare statically four arrays of ARRAY_SIZE each */
int array1[ARRAY_SIZE];
int array2[ARRAY_SIZE];
int array3[ARRAY_SIZE];
for(int i = 0; i < ARRAY_SIZE; i++)
{
array1[i] = i;
array2[i] = (rand()%4);
//Check that array1 and array 2 inputs are correct
//printf("ARRAY1 at %i\nARRAY2 at %i\n\n", array1[i], array2[i]);
}
/* Declare pointers for GPU based params */
int *gpu_block1;
int *gpu_block2;
int *gpu_block3;
cudaMalloc((void **)&gpu_block1, ARRAY_SIZE_IN_BYTES);
cudaMalloc((void **)&gpu_block2, ARRAY_SIZE_IN_BYTES);
cudaMalloc((void **)&gpu_block3, ARRAY_SIZE_IN_BYTES);
cudaMemcpy( gpu_block1, array1, ARRAY_SIZE_IN_BYTES, cudaMemcpyHostToDevice );
cudaMemcpy( gpu_block2, array2, ARRAY_SIZE_IN_BYTES, cudaMemcpyHostToDevice );
cudaMemcpy( gpu_block3, array3, ARRAY_SIZE_IN_BYTES, cudaMemcpyHostToDevice );
/* Execute our kernel */
switch(whichOperation) {
//ADD
case 1 :
printf("///////////////////////OUTPUT ADD///////////////\n");
add<<<NUM_BLOCKS, BLOCK_SIZE>>>(gpu_block1,gpu_block2,gpu_block3);
break;
//SUBTRACT
case 2 :
printf("///////////////////////OUTPUT SUBTRACT///////////////\n");
subtract<<<NUM_BLOCKS, BLOCK_SIZE>>>(gpu_block1,gpu_block2,gpu_block3);
break;
//MULTIPLY
case 3 :
printf("///////////////////////OUTPUT MULTIPLY///////////////\n");
multiply<<<NUM_BLOCKS, BLOCK_SIZE>>>(gpu_block1,gpu_block2,gpu_block3);
break;
//MOD
case 4 :
printf("///////////////////////OUTPUT MOD///////////////\n");
mod<<<NUM_BLOCKS, BLOCK_SIZE>>>(gpu_block1,gpu_block2,gpu_block3);
break;
}
/* Free the arrays on the GPU as now we're done with them */
cudaMemcpy( array1, gpu_block1, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost );
cudaMemcpy( array2, gpu_block2, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost );
cudaMemcpy( array3, gpu_block3, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost );
cudaFree(gpu_block1);
cudaFree(gpu_block2);
cudaFree(gpu_block3);
/* Iterate through the arrays and print */
for(int i = 0; i < ARRAY_SIZE; i+=4)
{
printf("Index %i:\t %i\t\tIndex %i:\t %i\t\tIndex %i:\t %i\t\tIndex %i:\t %i\n", i, array3[i], i+1, array3[i+1],i+2, array3[i+2], i+3, array3[i+3]);
}
}
//////////////////////////MAIN///////////////////////////////////
int main(int argc, char** argv)
{
// read command line arguments
int totalThreads = (1 << 20);
int blockSize = 256;
if (argc >= 2) {
totalThreads = atoi(argv[1]);
}
if (argc >= 3) {
blockSize = atoi(argv[2]);
}
int numBlocks = totalThreads/blockSize;
// validate command line arguments
if (totalThreads % blockSize != 0) {
++numBlocks;
totalThreads = numBlocks*blockSize;
printf("Warning: Total thread count is not evenly divisible by the block size\n");
printf("The total number of threads will be rounded up to %d\n", totalThreads);
}
main_sub(totalThreads,blockSize,numBlocks, 1);
main_sub(totalThreads,blockSize,numBlocks, 2);
main_sub(totalThreads,blockSize,numBlocks, 3);
main_sub(totalThreads,blockSize,numBlocks, 4);
}
|
SobelFilter_kernels.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
//This code was reuse by Mohd Hakimie to be use with Smalltalk
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_cooperative_groups.h>
namespace cg = cooperative_groups;
#include <helper_string.h>
#include "SobelFilter_kernels.h"
// Texture object for reading image
hipTextureObject_t texObject;
extern __shared__ unsigned char LocalBlock[];
static hipArray *array = NULL;
#define RADIUS 1
#ifdef FIXED_BLOCKWIDTH
#define BlockWidth 80
#define SharedPitch 384
#endif
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line)
{
if (hipSuccess != err)
{
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",
file, line, (int)err, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
__device__ unsigned char
ComputeSobel(unsigned char ul, // upper left
unsigned char um, // upper middle
unsigned char ur, // upper right
unsigned char ml, // middle left
unsigned char mm, // middle (unused)
unsigned char mr, // middle right
unsigned char ll, // lower left
unsigned char lm, // lower middle
unsigned char lr, // lower right
float fScale)
{
short Horz = ur + 2*mr + lr - ul - 2*ml - ll;
short Vert = ul + 2*um + ur - ll - 2*lm - lr;
short Sum = (short)(fScale*(abs((int)Horz)+abs((int)Vert)));
if (Sum < 0)
{
return 0;
}
else if (Sum > 0xff)
{
return 0xff;
}
return (unsigned char) Sum;
}
__global__ void
SobelCopyImage(Pixel *pSobelOriginal, unsigned int Pitch,
int w, int h, float fscale, hipTextureObject_t tex)
{
unsigned char *pSobel =
(unsigned char *)(((char *) pSobelOriginal)+blockIdx.x*Pitch);
for (int i = threadIdx.x; i < w; i += blockDim.x)
{
pSobel[i] = min(max((tex2D<unsigned char>(tex, (float) i, (float) blockIdx.x) * fscale), 0.f), 255.f);
}
}
__global__ void
SobelTex(Pixel *pSobelOriginal, unsigned int Pitch,
int w, int h, float fScale, hipTextureObject_t tex)
{
unsigned char *pSobel =
(unsigned char *)(((char *) pSobelOriginal)+blockIdx.x*Pitch);
for (int i = threadIdx.x; i < w; i += blockDim.x)
{
unsigned char pix00 = tex2D<unsigned char>(tex, (float) i-1, (float) blockIdx.x-1);
unsigned char pix01 = tex2D<unsigned char>(tex, (float) i+0, (float) blockIdx.x-1);
unsigned char pix02 = tex2D<unsigned char>(tex, (float) i+1, (float) blockIdx.x-1);
unsigned char pix10 = tex2D<unsigned char>(tex, (float) i-1, (float) blockIdx.x+0);
unsigned char pix11 = tex2D<unsigned char>(tex, (float) i+0, (float) blockIdx.x+0);
unsigned char pix12 = tex2D<unsigned char>(tex, (float) i+1, (float) blockIdx.x+0);
unsigned char pix20 = tex2D<unsigned char>(tex, (float) i-1, (float) blockIdx.x+1);
unsigned char pix21 = tex2D<unsigned char>(tex, (float) i+0, (float) blockIdx.x+1);
unsigned char pix22 = tex2D<unsigned char>(tex, (float) i+1, (float) blockIdx.x+1);
pSobel[i] = ComputeSobel(pix00, pix01, pix02,
pix10, pix11, pix12,
pix20, pix21, pix22, fScale);
}
}
extern "C" void setupTexture(int iw, int ih, Pixel *data, int Bpp)
{
hipChannelFormatDesc desc;
if (Bpp == 1)
{
desc = hipCreateChannelDesc<unsigned char>();
}
else
{
desc = hipCreateChannelDesc<uchar4>();
}
checkCudaErrors(hipMallocArray(&array, &desc, iw, ih));
checkCudaErrors(hipMemcpy2DToArray(array, 0, 0, data, iw * Bpp * sizeof(Pixel),
iw * Bpp * sizeof(Pixel), ih, hipMemcpyHostToDevice));
hipResourceDesc texRes;
memset(&texRes,0,sizeof(hipResourceDesc));
texRes.resType = hipResourceTypeArray;
texRes.res.array.array = array;
hipTextureDesc texDescr;
memset(&texDescr,0,sizeof(hipTextureDesc));
texDescr.normalizedCoords = false;
texDescr.filterMode = hipFilterModePoint;
texDescr.addressMode[0] = hipAddressModeWrap;
texDescr.readMode = hipReadModeElementType;
checkCudaErrors(hipCreateTextureObject(&texObject, &texRes, &texDescr, NULL));
}
extern "C" void deleteTexture(void)
{
checkCudaErrors(hipFreeArray(array));
checkCudaErrors(hipDestroyTextureObject(texObject));
}
// Wrapper for the __global__ call that sets up the texture and threads
extern "C" void sobelFilter(Pixel *odata, int iw, int ih, enum SobelDisplayMode mode, float fScale)
{
switch (mode)
{
case SOBELDISPLAY_IMAGE:
hipLaunchKernelGGL(( SobelCopyImage), dim3(ih), dim3(384), 0, 0, odata, iw, iw, ih, fScale, texObject);
break;
case SOBELDISPLAY_SOBELTEX:
hipLaunchKernelGGL(( SobelTex), dim3(ih), dim3(384), 0, 0, odata, iw, iw, ih, fScale, texObject);
break;
}
}
| SobelFilter_kernels.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
//This code was reuse by Mohd Hakimie to be use with Smalltalk
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <cooperative_groups.h>
namespace cg = cooperative_groups;
#include <helper_string.h>
#include "SobelFilter_kernels.h"
// Texture object for reading image
cudaTextureObject_t texObject;
extern __shared__ unsigned char LocalBlock[];
static cudaArray *array = NULL;
#define RADIUS 1
#ifdef FIXED_BLOCKWIDTH
#define BlockWidth 80
#define SharedPitch 384
#endif
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line)
{
if (cudaSuccess != err)
{
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",
file, line, (int)err, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
__device__ unsigned char
ComputeSobel(unsigned char ul, // upper left
unsigned char um, // upper middle
unsigned char ur, // upper right
unsigned char ml, // middle left
unsigned char mm, // middle (unused)
unsigned char mr, // middle right
unsigned char ll, // lower left
unsigned char lm, // lower middle
unsigned char lr, // lower right
float fScale)
{
short Horz = ur + 2*mr + lr - ul - 2*ml - ll;
short Vert = ul + 2*um + ur - ll - 2*lm - lr;
short Sum = (short)(fScale*(abs((int)Horz)+abs((int)Vert)));
if (Sum < 0)
{
return 0;
}
else if (Sum > 0xff)
{
return 0xff;
}
return (unsigned char) Sum;
}
__global__ void
SobelCopyImage(Pixel *pSobelOriginal, unsigned int Pitch,
int w, int h, float fscale, cudaTextureObject_t tex)
{
unsigned char *pSobel =
(unsigned char *)(((char *) pSobelOriginal)+blockIdx.x*Pitch);
for (int i = threadIdx.x; i < w; i += blockDim.x)
{
pSobel[i] = min(max((tex2D<unsigned char>(tex, (float) i, (float) blockIdx.x) * fscale), 0.f), 255.f);
}
}
__global__ void
SobelTex(Pixel *pSobelOriginal, unsigned int Pitch,
int w, int h, float fScale, cudaTextureObject_t tex)
{
unsigned char *pSobel =
(unsigned char *)(((char *) pSobelOriginal)+blockIdx.x*Pitch);
for (int i = threadIdx.x; i < w; i += blockDim.x)
{
unsigned char pix00 = tex2D<unsigned char>(tex, (float) i-1, (float) blockIdx.x-1);
unsigned char pix01 = tex2D<unsigned char>(tex, (float) i+0, (float) blockIdx.x-1);
unsigned char pix02 = tex2D<unsigned char>(tex, (float) i+1, (float) blockIdx.x-1);
unsigned char pix10 = tex2D<unsigned char>(tex, (float) i-1, (float) blockIdx.x+0);
unsigned char pix11 = tex2D<unsigned char>(tex, (float) i+0, (float) blockIdx.x+0);
unsigned char pix12 = tex2D<unsigned char>(tex, (float) i+1, (float) blockIdx.x+0);
unsigned char pix20 = tex2D<unsigned char>(tex, (float) i-1, (float) blockIdx.x+1);
unsigned char pix21 = tex2D<unsigned char>(tex, (float) i+0, (float) blockIdx.x+1);
unsigned char pix22 = tex2D<unsigned char>(tex, (float) i+1, (float) blockIdx.x+1);
pSobel[i] = ComputeSobel(pix00, pix01, pix02,
pix10, pix11, pix12,
pix20, pix21, pix22, fScale);
}
}
extern "C" void setupTexture(int iw, int ih, Pixel *data, int Bpp)
{
cudaChannelFormatDesc desc;
if (Bpp == 1)
{
desc = cudaCreateChannelDesc<unsigned char>();
}
else
{
desc = cudaCreateChannelDesc<uchar4>();
}
checkCudaErrors(cudaMallocArray(&array, &desc, iw, ih));
checkCudaErrors(cudaMemcpy2DToArray(array, 0, 0, data, iw * Bpp * sizeof(Pixel),
iw * Bpp * sizeof(Pixel), ih, cudaMemcpyHostToDevice));
cudaResourceDesc texRes;
memset(&texRes,0,sizeof(cudaResourceDesc));
texRes.resType = cudaResourceTypeArray;
texRes.res.array.array = array;
cudaTextureDesc texDescr;
memset(&texDescr,0,sizeof(cudaTextureDesc));
texDescr.normalizedCoords = false;
texDescr.filterMode = cudaFilterModePoint;
texDescr.addressMode[0] = cudaAddressModeWrap;
texDescr.readMode = cudaReadModeElementType;
checkCudaErrors(cudaCreateTextureObject(&texObject, &texRes, &texDescr, NULL));
}
extern "C" void deleteTexture(void)
{
checkCudaErrors(cudaFreeArray(array));
checkCudaErrors(cudaDestroyTextureObject(texObject));
}
// Wrapper for the __global__ call that sets up the texture and threads
extern "C" void sobelFilter(Pixel *odata, int iw, int ih, enum SobelDisplayMode mode, float fScale)
{
switch (mode)
{
case SOBELDISPLAY_IMAGE:
SobelCopyImage<<<ih, 384>>>(odata, iw, iw, ih, fScale, texObject);
break;
case SOBELDISPLAY_SOBELTEX:
SobelTex<<<ih, 384>>>(odata, iw, iw, ih, fScale, texObject);
break;
}
}
|
d356b3b76ede94a0eab8eea241fb35a2f09df963.hip | // !!! This is a file automatically generated by hipify!!!
/*
#ifndef __HIPCC__
#define __HIPCC__
#endif
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <hip/device_functions.h>
#include <string.h>
__global__ void whatever(char *d_a,char *d_res,int len)
{
int i=0;
int flag=1;
int in=blockIdx.x*blockDim.x+threadIdx.x;
}
int main()
{
int i;
char h_a[1000];
char h_res[1000];
char *d_a;
char *d_res;
printf("Enter the string: \n");
gets(h_a);
//puts(h_a);
int len=strlen(h_a);
int size=sizeof(char)*len;
hipMalloc((void **)&d_a,size);
hipMalloc((void **)&d_res,size);
hipMemcpy(d_a,h_a,size,hipMemcpyHostToDevice);
whatever<<<1,len>>>(d_a,d_res,len);
hipMemcpy(h_res,d_res,size,hipMemcpyDeviceToHost);
printf("OUTPUT :\n");
for(i=0;i<len;i++)
printf("%c, ",h_res[i]);
printf("\n\n");
hipFree(d_a);
hipFree(d_res);
}
*/ | d356b3b76ede94a0eab8eea241fb35a2f09df963.cu | /*
#ifndef __CUDACC__
#define __CUDACC__
#endif
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda.h>
#include <stdio.h>
#include <device_functions.h>
#include <string.h>
__global__ void whatever(char *d_a,char *d_res,int len)
{
int i=0;
int flag=1;
int in=blockIdx.x*blockDim.x+threadIdx.x;
}
int main()
{
int i;
char h_a[1000];
char h_res[1000];
char *d_a;
char *d_res;
printf("Enter the string: \n");
gets(h_a);
//puts(h_a);
int len=strlen(h_a);
int size=sizeof(char)*len;
cudaMalloc((void **)&d_a,size);
cudaMalloc((void **)&d_res,size);
cudaMemcpy(d_a,h_a,size,cudaMemcpyHostToDevice);
whatever<<<1,len>>>(d_a,d_res,len);
cudaMemcpy(h_res,d_res,size,cudaMemcpyDeviceToHost);
printf("OUTPUT :\n");
for(i=0;i<len;i++)
printf("%c, ",h_res[i]);
printf("\n\n");
cudaFree(d_a);
cudaFree(d_res);
}
*/ |
fcf948585c851bec594df51da4ed7a59266da21e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/LegacyTHFunctionsCUDA.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/native/hip/UpSample.cuh>
namespace at {
namespace native {
namespace {
#define MAX_THREADS 512
template <typename scalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_nearest3d_out_frame(
const scalar_t* input,
size_t dim_b,
size_t dim_c,
size_t src_dim_d,
size_t src_dim_h,
size_t src_dim_w,
size_t dst_dim_d,
size_t dst_dim_h,
size_t dst_dim_w,
scalar_t* output) {
size_t dst_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (dst_idx >= dim_c * dst_dim_d * dst_dim_h * dst_dim_w)
return;
int dst_c_stride = dst_dim_d * dst_dim_h * dst_dim_w;
int src_c_stride = src_dim_d * src_dim_h * src_dim_w;
int c = (dst_idx / (dst_c_stride)) % dim_c;
float scale_factor = (float)src_dim_d / (float)dst_dim_d;
int dst_z = (dst_idx / dst_dim_h / dst_dim_w) % dst_dim_d;
int src_z = nearest_neighbor_compute_source_index(scale_factor, dst_z, src_dim_d);
scale_factor = (float)src_dim_h / (float)dst_dim_h;
int dst_y = (dst_idx / dst_dim_w) % dst_dim_h;
int src_y = nearest_neighbor_compute_source_index(scale_factor, dst_y, src_dim_h);
scale_factor = (float)src_dim_w / (float)dst_dim_w;
int dst_x = dst_idx % dst_dim_w;
int src_x = nearest_neighbor_compute_source_index(scale_factor, dst_x, src_dim_w);
size_t src_idx = c * src_c_stride + src_z * src_dim_h * src_dim_w +
src_y * src_dim_w + src_x;
for (int b = 0; b < dim_b; b++) {
output[dst_idx] = input[src_idx];
src_idx += dim_c * src_c_stride;
dst_idx += dim_c * dst_c_stride;
}
}
// Backward operation
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_nearest3d_backward_out_frame(
const scalar_t* grad_o,
size_t dim_b,
size_t dim_c,
size_t src_dim_d,
size_t src_dim_h,
size_t src_dim_w,
size_t dst_dim_d,
size_t dst_dim_h,
size_t dst_dim_w,
scalar_t* grad_i) {
size_t dst_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (dst_idx >= dim_c * dst_dim_d * dst_dim_h * dst_dim_w)
return;
int dst_c_stride = dst_dim_d * dst_dim_h * dst_dim_w;
int src_c_stride = src_dim_d * src_dim_h * src_dim_w;
int c = (dst_idx / (dst_c_stride)) % dim_c;
float scale_factor = (float)src_dim_d / (float)dst_dim_d;
int dst_z = (dst_idx / dst_dim_h / dst_dim_w) % dst_dim_d;
int src_z = nearest_neighbor_compute_source_index(scale_factor, dst_z, src_dim_d);
int src_z_up = nearest_neighbor_compute_source_index(scale_factor, dst_z+1, src_dim_d+1);
scale_factor = (float)src_dim_h / (float)dst_dim_h;
int dst_y = (dst_idx / dst_dim_w) % dst_dim_h;
int src_y = nearest_neighbor_compute_source_index(scale_factor, dst_y, src_dim_h);
int src_y_up = nearest_neighbor_compute_source_index(scale_factor, dst_y+1, src_dim_h+1);
scale_factor = (float)src_dim_w / (float)dst_dim_w;
int dst_x = dst_idx % dst_dim_w;
int src_x = nearest_neighbor_compute_source_index(scale_factor, dst_x, src_dim_w);
int src_x_up = nearest_neighbor_compute_source_index(scale_factor, dst_x+1, src_dim_w+1);
for (int b = 0; b < dim_b; b++) {
accscalar_t grad = 0;
for (int z = src_z; z < src_z_up; z++) {
for (int y = src_y; y < src_y_up; y++) {
for (int x = src_x; x < src_x_up; x++) {
size_t src_idx = b * dim_c * src_c_stride + c * src_c_stride +
z * src_dim_h * src_dim_w + y * src_dim_w + x;
grad += grad_o[src_idx];
}
}
}
grad_i[dst_idx] = grad;
dst_idx += dim_c * dst_c_stride;
}
}
static void upsample_nearest3d_out_cuda_template(
Tensor& output,
const Tensor& input_,
IntArrayRef output_size) {
TensorArg input_arg{input_, "input_", 1}, output_arg{output, "output", 2};
checkAllSameGPU("upsample_nearest3d_out_cuda", {input_arg, output_arg});
TORCH_CHECK(
output_size.size() == 3,
"It is expected output_size equals to 3, but got size ",
output_size.size());
int output_depth = output_size[0];
int output_height = output_size[1];
int output_width = output_size[2];
int nbatch = input_.size(0);
int channels = input_.size(1);
int input_depth = input_.size(2);
int input_height = input_.size(3);
int input_width = input_.size(4);
upsample_3d_shape_check(
input_,
Tensor(),
nbatch,
channels,
input_depth,
input_height,
input_width,
output_depth,
output_height,
output_width);
AT_ASSERT(
input_depth > 0 && input_height > 0 && input_width > 0 &&
output_depth > 0 && output_height > 0 && output_width > 0);
Tensor input = input_.contiguous();
output.resize_({input.size(0),
input.size(1),
output_depth,
output_height,
output_width});
// upsample_3d_shape_check makes sure `nbatch != 0`
unsigned int n = output.numel() / nbatch;
dim3 bdim{std::min<unsigned int>(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, MAX_THREADS)};
dim3 gdim{cuda::ATenCeilDiv(n, bdim.x)};
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "upsample_nearest3d_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = input.data_ptr<scalar_t>();
auto odata = output.data_ptr<scalar_t>();
hipLaunchKernelGGL(( upsample_nearest3d_out_frame<scalar_t>), dim3(gdim), dim3(bdim), 0, stream,
idata,
nbatch,
channels,
input_depth,
input_height,
input_width,
output_depth,
output_height,
output_width,
odata);
});
AT_CUDA_CHECK(hipGetLastError());
}
static void upsample_nearest3d_backward_out_cuda_template(
Tensor& grad_input,
const Tensor& grad_output_,
IntArrayRef output_size,
IntArrayRef input_size) {
TensorArg grad_input_arg{grad_input, "grad_input", 1},
grad_output_arg{grad_output_, "grad_output_", 2};
checkAllSameGPU(
"upsample_nearest3d_backward_out_cuda",
{grad_output_arg, grad_input_arg});
TORCH_CHECK(
output_size.size() == 3,
"It is expected output_size equals to 3, but got size ",
output_size.size());
TORCH_CHECK(
input_size.size() == 5,
"It is expected input_size equals to 5, but got size ",
input_size.size());
int output_depth = output_size[0];
int output_height = output_size[1];
int output_width = output_size[2];
int nbatch = input_size[0];
int channels = input_size[1];
int input_depth = input_size[2];
int input_height = input_size[3];
int input_width = input_size[4];
upsample_3d_shape_check(
Tensor(),
grad_output_,
nbatch,
channels,
input_depth,
input_height,
input_width,
output_depth,
output_height,
output_width);
Tensor grad_output = grad_output_.contiguous();
grad_input.resize_(
{nbatch, channels, input_depth, input_height, input_width});
// upsample_3d_shape_check makes sure `nbatch != 0`
unsigned int n = grad_input.numel() / nbatch;
dim3 bdim{std::min<unsigned int>(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, MAX_THREADS)};
dim3 gdim{cuda::ATenCeilDiv(n, bdim.x)};
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_output.scalar_type(), "upsample_nearest3d_backward_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = grad_input.data_ptr<scalar_t>();
auto odata = grad_output.data_ptr<scalar_t>();
hipLaunchKernelGGL(( upsample_nearest3d_backward_out_frame<scalar_t, accscalar_t>)
, dim3(gdim), dim3(bdim), 0, stream,
odata,
nbatch,
channels,
output_depth,
output_height,
output_width,
input_depth,
input_height,
input_width,
idata);
});
AT_CUDA_CHECK(hipGetLastError());
}
} // namespace
Tensor& upsample_nearest3d_out_cuda(
Tensor& output,
const Tensor& input,
IntArrayRef output_size) {
upsample_nearest3d_out_cuda_template(output, input, output_size);
return output;
}
Tensor upsample_nearest3d_cuda(const Tensor& input, IntArrayRef output_size) {
Tensor output = at::empty_like(input);
upsample_nearest3d_out_cuda_template(output, input, output_size);
return output;
}
Tensor& upsample_nearest3d_backward_out_cuda(
Tensor& grad_input,
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size) {
upsample_nearest3d_backward_out_cuda_template(
grad_input, grad_output, output_size, input_size);
return grad_input;
}
Tensor upsample_nearest3d_backward_cuda(
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size) {
Tensor grad_input = at::empty_like(grad_output);
upsample_nearest3d_backward_out_cuda_template(
grad_input, grad_output, output_size, input_size);
return grad_input;
}
} // namespace native
} // namespace at
| fcf948585c851bec594df51da4ed7a59266da21e.cu | #include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/LegacyTHFunctionsCUDA.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/native/cuda/UpSample.cuh>
namespace at {
namespace native {
namespace {
#define MAX_THREADS 512
template <typename scalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_nearest3d_out_frame(
const scalar_t* input,
size_t dim_b,
size_t dim_c,
size_t src_dim_d,
size_t src_dim_h,
size_t src_dim_w,
size_t dst_dim_d,
size_t dst_dim_h,
size_t dst_dim_w,
scalar_t* output) {
size_t dst_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (dst_idx >= dim_c * dst_dim_d * dst_dim_h * dst_dim_w)
return;
int dst_c_stride = dst_dim_d * dst_dim_h * dst_dim_w;
int src_c_stride = src_dim_d * src_dim_h * src_dim_w;
int c = (dst_idx / (dst_c_stride)) % dim_c;
float scale_factor = (float)src_dim_d / (float)dst_dim_d;
int dst_z = (dst_idx / dst_dim_h / dst_dim_w) % dst_dim_d;
int src_z = nearest_neighbor_compute_source_index(scale_factor, dst_z, src_dim_d);
scale_factor = (float)src_dim_h / (float)dst_dim_h;
int dst_y = (dst_idx / dst_dim_w) % dst_dim_h;
int src_y = nearest_neighbor_compute_source_index(scale_factor, dst_y, src_dim_h);
scale_factor = (float)src_dim_w / (float)dst_dim_w;
int dst_x = dst_idx % dst_dim_w;
int src_x = nearest_neighbor_compute_source_index(scale_factor, dst_x, src_dim_w);
size_t src_idx = c * src_c_stride + src_z * src_dim_h * src_dim_w +
src_y * src_dim_w + src_x;
for (int b = 0; b < dim_b; b++) {
output[dst_idx] = input[src_idx];
src_idx += dim_c * src_c_stride;
dst_idx += dim_c * dst_c_stride;
}
}
// Backward operation
template <typename scalar_t, typename accscalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void upsample_nearest3d_backward_out_frame(
const scalar_t* grad_o,
size_t dim_b,
size_t dim_c,
size_t src_dim_d,
size_t src_dim_h,
size_t src_dim_w,
size_t dst_dim_d,
size_t dst_dim_h,
size_t dst_dim_w,
scalar_t* grad_i) {
size_t dst_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (dst_idx >= dim_c * dst_dim_d * dst_dim_h * dst_dim_w)
return;
int dst_c_stride = dst_dim_d * dst_dim_h * dst_dim_w;
int src_c_stride = src_dim_d * src_dim_h * src_dim_w;
int c = (dst_idx / (dst_c_stride)) % dim_c;
float scale_factor = (float)src_dim_d / (float)dst_dim_d;
int dst_z = (dst_idx / dst_dim_h / dst_dim_w) % dst_dim_d;
int src_z = nearest_neighbor_compute_source_index(scale_factor, dst_z, src_dim_d);
int src_z_up = nearest_neighbor_compute_source_index(scale_factor, dst_z+1, src_dim_d+1);
scale_factor = (float)src_dim_h / (float)dst_dim_h;
int dst_y = (dst_idx / dst_dim_w) % dst_dim_h;
int src_y = nearest_neighbor_compute_source_index(scale_factor, dst_y, src_dim_h);
int src_y_up = nearest_neighbor_compute_source_index(scale_factor, dst_y+1, src_dim_h+1);
scale_factor = (float)src_dim_w / (float)dst_dim_w;
int dst_x = dst_idx % dst_dim_w;
int src_x = nearest_neighbor_compute_source_index(scale_factor, dst_x, src_dim_w);
int src_x_up = nearest_neighbor_compute_source_index(scale_factor, dst_x+1, src_dim_w+1);
for (int b = 0; b < dim_b; b++) {
accscalar_t grad = 0;
for (int z = src_z; z < src_z_up; z++) {
for (int y = src_y; y < src_y_up; y++) {
for (int x = src_x; x < src_x_up; x++) {
size_t src_idx = b * dim_c * src_c_stride + c * src_c_stride +
z * src_dim_h * src_dim_w + y * src_dim_w + x;
grad += grad_o[src_idx];
}
}
}
grad_i[dst_idx] = grad;
dst_idx += dim_c * dst_c_stride;
}
}
static void upsample_nearest3d_out_cuda_template(
Tensor& output,
const Tensor& input_,
IntArrayRef output_size) {
TensorArg input_arg{input_, "input_", 1}, output_arg{output, "output", 2};
checkAllSameGPU("upsample_nearest3d_out_cuda", {input_arg, output_arg});
TORCH_CHECK(
output_size.size() == 3,
"It is expected output_size equals to 3, but got size ",
output_size.size());
int output_depth = output_size[0];
int output_height = output_size[1];
int output_width = output_size[2];
int nbatch = input_.size(0);
int channels = input_.size(1);
int input_depth = input_.size(2);
int input_height = input_.size(3);
int input_width = input_.size(4);
upsample_3d_shape_check(
input_,
Tensor(),
nbatch,
channels,
input_depth,
input_height,
input_width,
output_depth,
output_height,
output_width);
AT_ASSERT(
input_depth > 0 && input_height > 0 && input_width > 0 &&
output_depth > 0 && output_height > 0 && output_width > 0);
Tensor input = input_.contiguous();
output.resize_({input.size(0),
input.size(1),
output_depth,
output_height,
output_width});
// upsample_3d_shape_check makes sure `nbatch != 0`
unsigned int n = output.numel() / nbatch;
dim3 bdim{std::min<unsigned int>(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, MAX_THREADS)};
dim3 gdim{cuda::ATenCeilDiv(n, bdim.x)};
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "upsample_nearest3d_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = input.data_ptr<scalar_t>();
auto odata = output.data_ptr<scalar_t>();
upsample_nearest3d_out_frame<scalar_t><<<gdim, bdim, 0, stream>>>(
idata,
nbatch,
channels,
input_depth,
input_height,
input_width,
output_depth,
output_height,
output_width,
odata);
});
AT_CUDA_CHECK(cudaGetLastError());
}
static void upsample_nearest3d_backward_out_cuda_template(
Tensor& grad_input,
const Tensor& grad_output_,
IntArrayRef output_size,
IntArrayRef input_size) {
TensorArg grad_input_arg{grad_input, "grad_input", 1},
grad_output_arg{grad_output_, "grad_output_", 2};
checkAllSameGPU(
"upsample_nearest3d_backward_out_cuda",
{grad_output_arg, grad_input_arg});
TORCH_CHECK(
output_size.size() == 3,
"It is expected output_size equals to 3, but got size ",
output_size.size());
TORCH_CHECK(
input_size.size() == 5,
"It is expected input_size equals to 5, but got size ",
input_size.size());
int output_depth = output_size[0];
int output_height = output_size[1];
int output_width = output_size[2];
int nbatch = input_size[0];
int channels = input_size[1];
int input_depth = input_size[2];
int input_height = input_size[3];
int input_width = input_size[4];
upsample_3d_shape_check(
Tensor(),
grad_output_,
nbatch,
channels,
input_depth,
input_height,
input_width,
output_depth,
output_height,
output_width);
Tensor grad_output = grad_output_.contiguous();
grad_input.resize_(
{nbatch, channels, input_depth, input_height, input_width});
// upsample_3d_shape_check makes sure `nbatch != 0`
unsigned int n = grad_input.numel() / nbatch;
dim3 bdim{std::min<unsigned int>(
at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, MAX_THREADS)};
dim3 gdim{cuda::ATenCeilDiv(n, bdim.x)};
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_output.scalar_type(), "upsample_nearest3d_backward_out_frame", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
auto idata = grad_input.data_ptr<scalar_t>();
auto odata = grad_output.data_ptr<scalar_t>();
upsample_nearest3d_backward_out_frame<scalar_t, accscalar_t>
<<<gdim, bdim, 0, stream>>>(
odata,
nbatch,
channels,
output_depth,
output_height,
output_width,
input_depth,
input_height,
input_width,
idata);
});
AT_CUDA_CHECK(cudaGetLastError());
}
} // namespace
Tensor& upsample_nearest3d_out_cuda(
Tensor& output,
const Tensor& input,
IntArrayRef output_size) {
upsample_nearest3d_out_cuda_template(output, input, output_size);
return output;
}
Tensor upsample_nearest3d_cuda(const Tensor& input, IntArrayRef output_size) {
Tensor output = at::empty_like(input);
upsample_nearest3d_out_cuda_template(output, input, output_size);
return output;
}
Tensor& upsample_nearest3d_backward_out_cuda(
Tensor& grad_input,
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size) {
upsample_nearest3d_backward_out_cuda_template(
grad_input, grad_output, output_size, input_size);
return grad_input;
}
Tensor upsample_nearest3d_backward_cuda(
const Tensor& grad_output,
IntArrayRef output_size,
IntArrayRef input_size) {
Tensor grad_input = at::empty_like(grad_output);
upsample_nearest3d_backward_out_cuda_template(
grad_input, grad_output, output_size, input_size);
return grad_input;
}
} // namespace native
} // namespace at
|
10e9d1a0bbc07557c146fc23f802184c8f2df8a0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
# include "grav_rfi_ompcuda.h"
# include <stdlib.h>
# include <stdio.h>
# include <string.h>
# include <math.h>
# include <omp.h>
# include <hip/hip_runtime.h>
# include <hip/hip_runtime.h>
# include <device_launch_parameters.h>
struct Vz_struct
{
double* Vz, * x_obs, * y_obs;
double* m1_x, * m1_y, * m1_z;
};
struct input_struct
{
double* h_Vz, * h_x_obs, * h_y_obs, * h_m1_x, * h_m1_y, * h_m1_z;
double* d_Vz, * d_x_obs, * d_y_obs, * d_m1_x, * d_m1_y, * d_m1_z;
hipStream_t stream;
};
struct Vz_mat_mc_struct
{
double* d_Vz_mat_mc;
int nBlocks;
hipStream_t stream;
};
struct rfi_struct
{
int localNum, nBlocks, base, nBlocks_Am, nBlocks_Gm;
double* d_local_Wm, * d_local_Wv, * d_local_W, * d_local_g, * d_local_g0, * d_local_p, * d_local_p0, * d_local_q1, * d_local_q2;
double* d_local_m_temp, * d_local_m_real, * d_local_d_fit1, * d_local_d_fit2, * d_local_d_fit_temp1, * d_local_d_fit_temp2, * d_d_fit_temp;
double* h_local_g, * h_local_g0, * h_local_q, * h_local_m_real, * h_local_d_fit, * h_local_d_fit_temp;
hipStream_t stream;
};
void xy_cmp(int point_count, int lx, int ly, double* x, double* y, double* xmin, double* xmax, double* dx, double* ymin, double* ymax, double* dy);
double* rwt_foc_inv(int deviceCount, int h_point_count, int h_prism_count, int h_lx, int h_ly, int h_lz, int h_kmax,
double h_z_obs, double h_m_min, double h_m_max, double h_epsilon, double h_lambda, double h_sigma, struct Vz_struct VzX, int Max_GPU_Number, int nThreadPerBlock,double wn);
__global__ void Vz_mat_mc_sln(double* Vz_mat_mc, double* x_obs, double* y_obs, double* m1_x, double* m1_y, double* m1_z, double z_obs, int lx, int lz, int point_count, int prism_count);
__global__ void W_init_sln(double* Wm, double* Wv, double* W, double* m_temp, double* m_real, double* Vz_mat_mc, double sigma, int localNum, int base, int point_count, int lx, double wn);
__global__ void g0_sln(double* g, double* Vz_mat_mc, double* W, double* Vz, int localNum, int base, int point_count, int lx);
__global__ void A_mult_v_col_sln(double* q, double* Vz_mat_mc, double* W, double* vector, int localNum, int base, int point_count, int lx, int nThreadPerBlock);
__global__ void A_mult_v_sum_sln(double* q2, double* q1, double* vector, double lambda, int localNum, int base, int point_count, int prism_count, int nBlocks);
__global__ void m_sln(double* m_temp, double* m_real, double* p, double* W, double alpha, double m_min, double m_max, int localNum);
__global__ void G_mult_m_col_sln(double* d_fit, double* Vz_mat_mc, double* m_temp, int localNum, int base, int point_count, int lx, int nThreadPerBlock);
__global__ void G_mult_m_sum_sln(double* d_fit2, double* d_fit1, int localNum, int point_count, int nBlocks);
__global__ void p_sln(double* p, double* g, double* p0, double beta, int localNum);
__global__ void update_sln(double* p0, double* g0, double* W, double* m_temp, double* p, double* g, double* m_real, double* Wm, double* Wv, int localNum);
__global__ void g_sln(double* g, double* Vz_mat_mc, double* W, double* Vz, double* d_fit_temp, double lambda, int localNum, int base, int point_count, int lx);
double beta_sln(double* g, double* g0, int prism_count);
double vector_dot_product(double* a, double* b, int count);
int CheckCount()
{
int deviceCount = 0;
hipGetDeviceCount(&deviceCount);
return deviceCount;
}
double* foo(int h_point_count, int h_prism_count, int h_lx, int h_ly, int h_lz, int h_kmax, int Max_GPU_Number, int nThreadPerBlock, double h_z_obs, double h_dz, double h_zmax,
double h_m_min, double h_m_max, double h_epsilon, double h_lambda, double h_sigma,double wn, double *zc, double *thick, double* Vz, double* x, double* y)
{
struct Vz_struct VzX;
double xmin, xmax, ymin, ymax, dx, dy;
double* h_m_result, * m1_x, * m1_y, * m1_z, * x_obs, * y_obs;
int deviceCount = 0;
hipGetDeviceCount(&deviceCount);
if (deviceCount > Max_GPU_Number)
{
deviceCount = Max_GPU_Number;
}
xy_cmp(h_point_count, h_lx, h_ly, x, y, &xmin, &xmax, &dx, &ymin, &ymax, &dy);
x_obs = (double*)malloc(h_lx * sizeof(double));
y_obs = (double*)malloc(h_ly * sizeof(double));
for (int xi = 0; xi < h_lx; xi++)
{
*(x_obs + xi) = x[xi];
}
for (int yi = 0; yi < h_ly; yi++)
{
*(y_obs + yi) = y[yi * h_lx];
}
m1_x = (double*)malloc(2 * sizeof(double)); *(m1_x + 0) = xmin - 0.5 * dx; *(m1_x + 1) = xmin + 0.5 * dx;
m1_y = (double*)malloc(2 * sizeof(double)); *(m1_y + 0) = ymin - 0.5 * dy; *(m1_y + 1) = ymin + 0.5 * dy;
m1_z = (double*)malloc(2 * h_lz * sizeof(double));
for (int zi = 0; zi < h_lz; zi++)
{
*(m1_z + zi) = zc[zi] - 0.5 * thick[zi];
*(m1_z + zi + h_lz) = zc[zi] + 0.5 * thick[zi];
}
VzX.Vz = Vz;
VzX.x_obs = x_obs;
VzX.y_obs = y_obs;
VzX.m1_x = m1_x;
VzX.m1_y = m1_y;
VzX.m1_z = m1_z;
h_m_result = rwt_foc_inv(deviceCount, h_point_count, h_prism_count, h_lx, h_ly, h_lz, h_kmax, h_z_obs, h_m_min, h_m_max, h_epsilon, h_lambda, h_sigma, VzX, Max_GPU_Number, nThreadPerBlock,wn);
hipDeviceReset();
return h_m_result;
}
void xy_cmp(int point_count, int lx, int ly, double* x, double* y, double* xmin, double* xmax, double* dx, double* ymin, double* ymax, double* dy)
{
*xmin = x[0]; *xmax = x[0];
*ymin = y[0]; *ymax = y[0];
for (int ni = 1; ni < point_count; ni++)
{
*xmin = *xmin < x[ni] ? *xmin : x[ni];
*xmax = *xmax > x[ni] ? *xmax : x[ni];
*ymin = *ymin < y[ni] ? *ymin : y[ni];
*ymax = *ymax > y[ni] ? *ymax : y[ni];
}
*dx = (*xmax - *xmin) / (lx - 1);
*dy = (*ymax - *ymin) / (ly - 1);
}
double* rwt_foc_inv(int deviceCount, int h_point_count, int h_prism_count, int h_lx, int h_ly, int h_lz, int h_kmax,
double h_z_obs, double h_m_min, double h_m_max, double h_epsilon, double h_lambda, double h_sigma, struct Vz_struct VzX, int Max_GPU_Number, int nThreadPerBlock,double wn)
{
/*struct input_struct i_struct[Max_GPU_Number_list];
struct Vz_mat_mc_struct v_struct[Max_GPU_Number_list];
struct rfi_struct r_struct[Max_GPU_Number_list];*/
struct input_struct *i_struct = new struct input_struct[Max_GPU_Number];
struct Vz_mat_mc_struct *v_struct = new struct Vz_mat_mc_struct[Max_GPU_Number];
struct rfi_struct *r_struct = new struct rfi_struct[Max_GPU_Number];
int k = 0;
double alpha, beta, rms, h_d_square, h_phi_m;
double* h_data_misfit = (double*)malloc(h_point_count * sizeof(double));
double* h_data_fitting = (double*)malloc(h_point_count * sizeof(double));
double* h_g = (double*)malloc(h_prism_count * sizeof(double));
double* h_g0 = (double*)malloc(h_prism_count * sizeof(double));
double* h_q = (double*)malloc((h_prism_count + h_point_count) * sizeof(double));
double* h_d_fit_temp = (double*)malloc((h_prism_count + h_point_count) * sizeof(double));
double* inv_result = (double*)malloc(h_prism_count * sizeof(double));
#pragma omp parallel num_threads(deviceCount)
{
int i = omp_get_thread_num();
hipSetDevice(i);
hipStreamCreate(&i_struct[i].stream);
hipMalloc((void**)&i_struct[i].d_Vz, h_point_count * sizeof(double));
hipMalloc((void**)&i_struct[i].d_x_obs, h_lx * sizeof(double));
hipMalloc((void**)&i_struct[i].d_y_obs, h_ly * sizeof(double));
hipMalloc((void**)&i_struct[i].d_m1_x, 2 * sizeof(double));
hipMalloc((void**)&i_struct[i].d_m1_y, 2 * sizeof(double));
hipMalloc((void**)&i_struct[i].d_m1_z, 2 * h_lz * sizeof(double));
hipHostMalloc((void**)&i_struct[i].h_Vz, h_point_count * sizeof(double));
hipHostMalloc((void**)&i_struct[i].h_x_obs, h_lx * sizeof(double));
hipHostMalloc((void**)&i_struct[i].h_y_obs, h_ly * sizeof(double));
hipHostMalloc((void**)&i_struct[i].h_m1_x, 2 * sizeof(double));
hipHostMalloc((void**)&i_struct[i].h_m1_y, 2 * sizeof(double));
hipHostMalloc((void**)&i_struct[i].h_m1_z, 2 * h_lz * sizeof(double));
memcpy(i_struct[i].h_Vz, VzX.Vz, h_point_count * sizeof(double));
memcpy(i_struct[i].h_x_obs, VzX.x_obs, h_lx * sizeof(double));
memcpy(i_struct[i].h_y_obs, VzX.y_obs, h_ly * sizeof(double));
memcpy(i_struct[i].h_m1_x, VzX.m1_x, 2 * sizeof(double));
memcpy(i_struct[i].h_m1_y, VzX.m1_y, 2 * sizeof(double));
memcpy(i_struct[i].h_m1_z, VzX.m1_z, 2 * h_lz * sizeof(double));
}
#pragma omp parallel num_threads(deviceCount)
{
int i = omp_get_thread_num();
hipSetDevice(i);
hipMemcpyAsync(i_struct[i].d_Vz, i_struct[i].h_Vz, h_point_count * sizeof(double), hipMemcpyHostToDevice, i_struct[i].stream);
hipMemcpyAsync(i_struct[i].d_x_obs, i_struct[i].h_x_obs, h_lx * sizeof(double), hipMemcpyHostToDevice, i_struct[i].stream);
hipMemcpyAsync(i_struct[i].d_y_obs, i_struct[i].h_y_obs, h_ly * sizeof(double), hipMemcpyHostToDevice, i_struct[i].stream);
hipMemcpyAsync(i_struct[i].d_m1_x, i_struct[i].h_m1_x, 2 * sizeof(double), hipMemcpyHostToDevice, i_struct[i].stream);
hipMemcpyAsync(i_struct[i].d_m1_y, i_struct[i].h_m1_y, 2 * sizeof(double), hipMemcpyHostToDevice, i_struct[i].stream);
hipMemcpyAsync(i_struct[i].d_m1_z, i_struct[i].h_m1_z, 2 * h_lz * sizeof(double), hipMemcpyHostToDevice, i_struct[i].stream);
hipStreamSynchronize(i_struct[i].stream);
}
#pragma omp parallel num_threads(deviceCount)
{
int i = omp_get_thread_num();
hipSetDevice(i);
hipStreamDestroy(i_struct[i].stream);
hipStreamCreate(&v_struct[i].stream);
hipMalloc((void**)&v_struct[i].d_Vz_mat_mc, h_prism_count * sizeof(double));
v_struct[i].nBlocks = h_prism_count / nThreadPerBlock + ((h_prism_count % nThreadPerBlock) ? 1 : 0);
Vz_mat_mc_sln << <v_struct[i].nBlocks, nThreadPerBlock, 0, v_struct[i].stream >> > (v_struct[i].d_Vz_mat_mc, i_struct[i].d_x_obs, i_struct[i].d_y_obs,
i_struct[i].d_m1_x, i_struct[i].d_m1_y, i_struct[i].d_m1_z, h_z_obs, h_lx, h_lz, h_point_count, h_prism_count);
hipStreamSynchronize(v_struct[i].stream);
}
for (int i = 0; i < deviceCount; i++)
{
r_struct[i].localNum = h_prism_count / deviceCount;
}
for (int i = 0; i < h_prism_count % deviceCount; i++)
{
r_struct[i].localNum++;
}
#pragma omp parallel num_threads(deviceCount)
{
int i = omp_get_thread_num();
hipSetDevice(i);
hipStreamDestroy(v_struct[i].stream);
hipStreamCreate(&r_struct[i].stream);
r_struct[i].nBlocks = r_struct[i].localNum / nThreadPerBlock + ((r_struct[i].localNum % nThreadPerBlock) ? 1 : 0);
r_struct[i].nBlocks_Am = (h_prism_count + h_point_count) / nThreadPerBlock + (((h_prism_count + h_point_count) % nThreadPerBlock) ? 1 : 0);
r_struct[i].nBlocks_Gm = h_point_count / nThreadPerBlock + ((h_point_count % nThreadPerBlock) ? 1 : 0);
r_struct[i].base = 0;
for (int j = 0; j < i; j++)
{
r_struct[i].base += r_struct[j].localNum;
}
hipMalloc((void**)&r_struct[i].d_local_Wm, r_struct[i].localNum * sizeof(double));
hipMalloc((void**)&r_struct[i].d_local_Wv, r_struct[i].localNum * sizeof(double));
hipMalloc((void**)&r_struct[i].d_local_W, r_struct[i].localNum * sizeof(double));
hipMalloc((void**)&r_struct[i].d_local_g, r_struct[i].localNum * sizeof(double));
hipMalloc((void**)&r_struct[i].d_local_g0, r_struct[i].localNum * sizeof(double));
hipMalloc((void**)&r_struct[i].d_local_p, r_struct[i].localNum * sizeof(double));
hipMalloc((void**)&r_struct[i].d_local_p0, r_struct[i].localNum * sizeof(double));
hipMalloc((void**)&r_struct[i].d_local_q1, r_struct[i].nBlocks * h_point_count * sizeof(double));
hipMalloc((void**)&r_struct[i].d_local_q2, (h_prism_count + h_point_count) * sizeof(double));
hipMalloc((void**)&r_struct[i].d_local_m_temp, r_struct[i].localNum * sizeof(double));
hipMalloc((void**)&r_struct[i].d_local_m_real, r_struct[i].localNum * sizeof(double));
hipMalloc((void**)&r_struct[i].d_local_d_fit1, r_struct[i].nBlocks * h_point_count * sizeof(double));
hipMalloc((void**)&r_struct[i].d_local_d_fit2, h_point_count * sizeof(double));
hipMalloc((void**)&r_struct[i].d_local_d_fit_temp1, r_struct[i].nBlocks * h_point_count * sizeof(double));
hipMalloc((void**)&r_struct[i].d_local_d_fit_temp2, (h_prism_count + h_point_count) * sizeof(double));
hipMalloc((void**)&r_struct[i].d_d_fit_temp, (h_prism_count + h_point_count) * sizeof(double));
hipHostMalloc((void**)&r_struct[i].h_local_g, r_struct[i].localNum * sizeof(double));
hipHostMalloc((void**)&r_struct[i].h_local_g0, r_struct[i].localNum * sizeof(double));
hipHostMalloc((void**)&r_struct[i].h_local_q, (h_prism_count + h_point_count) * sizeof(double));
hipHostMalloc((void**)&r_struct[i].h_local_m_real, r_struct[i].localNum * sizeof(double));
hipHostMalloc((void**)&r_struct[i].h_local_d_fit, h_point_count * sizeof(double));
hipHostMalloc((void**)&r_struct[i].h_local_d_fit_temp, (h_prism_count + h_point_count) * sizeof(double));
W_init_sln << <r_struct[i].nBlocks, nThreadPerBlock, 0, r_struct[i].stream >> > (r_struct[i].d_local_Wm, r_struct[i].d_local_Wv, r_struct[i].d_local_W,
r_struct[i].d_local_m_temp, r_struct[i].d_local_m_real, v_struct[i].d_Vz_mat_mc, h_sigma, r_struct[i].localNum, r_struct[i].base, h_point_count, h_lx,wn);
g0_sln << <r_struct[i].nBlocks, nThreadPerBlock, 0, r_struct[i].stream >> > (r_struct[i].d_local_g, v_struct[i].d_Vz_mat_mc, r_struct[i].d_local_W,
i_struct[i].d_Vz, r_struct[i].localNum, r_struct[i].base, h_point_count, h_lx);
hipMemcpyAsync(r_struct[i].h_local_g, r_struct[i].d_local_g, r_struct[i].localNum * sizeof(double), hipMemcpyDeviceToHost, r_struct[i].stream);
hipStreamSynchronize(r_struct[i].stream);
}
#pragma omp parallel num_threads(deviceCount)
{
int i = omp_get_thread_num();
hipSetDevice(i);
for (int j = 0; j < r_struct[i].localNum; j++)
{
h_g[j + r_struct[i].base] = r_struct[i].h_local_g[j];
}
}
while (k < h_kmax)
{
k++;
if (k == 1)
{
#pragma omp parallel num_threads(deviceCount)
{
int i = omp_get_thread_num();
hipSetDevice(i);
hipMemcpy(r_struct[i].d_local_p, r_struct[i].d_local_g, r_struct[i].localNum * sizeof(double), hipMemcpyDeviceToDevice);
hipStreamSynchronize(r_struct[i].stream);
}
}
else
{
if (k == 2)
{
h_phi_m = 0;
#pragma omp parallel num_threads(deviceCount)
{
int i = omp_get_thread_num();
hipSetDevice(i);
hipMemcpyAsync(r_struct[i].h_local_m_real, r_struct[i].d_local_m_real, r_struct[i].localNum * sizeof(double), hipMemcpyDeviceToHost, r_struct[i].stream);
hipStreamSynchronize(r_struct[i].stream);
}
for (int i = 0; i < deviceCount; i++)
{
for (int j = 0; j < r_struct[i].localNum; j++)
{
h_phi_m += (r_struct[i].h_local_m_real[j] * r_struct[i].h_local_m_real[j]) / (r_struct[i].h_local_m_real[j] * r_struct[i].h_local_m_real[j] + h_sigma * h_sigma);
}
}
h_lambda = h_d_square / h_phi_m;
}
else
{
h_lambda = h_lambda / 2;
}
#pragma omp parallel num_threads(deviceCount)
{
int i = omp_get_thread_num();
hipSetDevice(i);
update_sln << <r_struct[i].nBlocks, nThreadPerBlock, 0, r_struct[i].stream >> > (r_struct[i].d_local_p0, r_struct[i].d_local_g0, r_struct[i].d_local_W,
r_struct[i].d_local_m_temp, r_struct[i].d_local_p, r_struct[i].d_local_g, r_struct[i].d_local_m_real, r_struct[i].d_local_Wm, r_struct[i].d_local_Wv, r_struct[i].localNum);
A_mult_v_col_sln << <r_struct[i].nBlocks, nThreadPerBlock, nThreadPerBlock * sizeof(double), r_struct[i].stream>> > (r_struct[i].d_local_d_fit_temp1, v_struct[i].d_Vz_mat_mc, r_struct[i].d_local_W,
r_struct[i].d_local_m_temp, r_struct[i].localNum, r_struct[i].base, h_point_count, h_lx, nThreadPerBlock);
A_mult_v_sum_sln << <r_struct[i].nBlocks_Am, nThreadPerBlock, 0, r_struct[i].stream >> > (r_struct[i].d_local_d_fit_temp2, r_struct[i].d_local_d_fit_temp1, r_struct[i].d_local_m_temp,
h_lambda, r_struct[i].localNum, r_struct[i].base, h_point_count, h_prism_count, r_struct[i].nBlocks);
hipMemcpyAsync(r_struct[i].h_local_d_fit_temp, r_struct[i].d_local_d_fit_temp2, (h_prism_count + h_point_count) * sizeof(double), hipMemcpyDeviceToHost, r_struct[i].stream);
hipStreamSynchronize(r_struct[i].stream);
}
memset(h_d_fit_temp, 0, (h_prism_count + h_point_count) * sizeof(double));
for (int i = 0; i < deviceCount; i++)
{
for (int j = 0; j < (h_prism_count + h_point_count); j++)
{
h_d_fit_temp[j] += r_struct[i].h_local_d_fit_temp[j];
}
}
#pragma omp parallel num_threads(deviceCount)
{
int i = omp_get_thread_num();
hipSetDevice(i);
hipMemcpyAsync(r_struct[i].d_d_fit_temp, h_d_fit_temp, (h_prism_count + h_point_count) * sizeof(double), hipMemcpyHostToDevice, r_struct[i].stream);
g_sln << <r_struct[i].nBlocks, nThreadPerBlock, 0, r_struct[i].stream >> > (r_struct[i].d_local_g, v_struct[i].d_Vz_mat_mc, r_struct[i].d_local_W,
i_struct[i].d_Vz, r_struct[i].d_d_fit_temp, h_lambda, r_struct[i].localNum, r_struct[i].base, h_point_count, h_lx);
hipMemcpyAsync(r_struct[i].h_local_g, r_struct[i].d_local_g, r_struct[i].localNum * sizeof(double), hipMemcpyDeviceToHost, r_struct[i].stream);
hipMemcpyAsync(r_struct[i].h_local_g0, r_struct[i].d_local_g0, r_struct[i].localNum * sizeof(double), hipMemcpyDeviceToHost, r_struct[i].stream);
hipStreamSynchronize(r_struct[i].stream);
}
#pragma omp parallel num_threads(deviceCount)
{
int i = omp_get_thread_num();
hipSetDevice(i);
for (int j = 0; j < r_struct[i].localNum; j++)
{
h_g[j + r_struct[i].base] = r_struct[i].h_local_g[j];
h_g0[j + r_struct[i].base] = r_struct[i].h_local_g0[j];
}
}
beta = beta_sln(h_g, h_g0, h_prism_count);
#pragma omp parallel num_threads(deviceCount)
{
int i = omp_get_thread_num();
hipSetDevice(i);
p_sln << <r_struct[i].nBlocks, nThreadPerBlock, 0, r_struct[i].stream >> > (r_struct[i].d_local_p, r_struct[i].d_local_g, r_struct[i].d_local_p0, beta, r_struct[i].localNum);
hipStreamSynchronize(r_struct[i].stream);
}
}
#pragma omp parallel num_threads(deviceCount)
{
int i = omp_get_thread_num();
hipSetDevice(i);
A_mult_v_col_sln << <r_struct[i].nBlocks, nThreadPerBlock,nThreadPerBlock * sizeof(double), r_struct[i].stream >> > (r_struct[i].d_local_q1, v_struct[i].d_Vz_mat_mc, r_struct[i].d_local_W, r_struct[i].d_local_p, r_struct[i].localNum, r_struct[i].base, h_point_count, h_lx, nThreadPerBlock);
A_mult_v_sum_sln << <r_struct[i].nBlocks_Am, nThreadPerBlock, 0, r_struct[i].stream >> > (r_struct[i].d_local_q2, r_struct[i].d_local_q1, r_struct[i].d_local_p, h_lambda, r_struct[i].localNum, r_struct[i].base, h_point_count, h_prism_count, r_struct[i].nBlocks);
hipMemcpyAsync(r_struct[i].h_local_q, r_struct[i].d_local_q2, (h_prism_count + h_point_count) * sizeof(double), hipMemcpyDeviceToHost, r_struct[i].stream);
hipStreamSynchronize(r_struct[i].stream);
}
memset(h_q, 0, (h_prism_count + h_point_count) * sizeof(double));
for (int i = 0; i < deviceCount; i++)
{
for (int j = 0; j < (h_prism_count + h_point_count); j++)
{
h_q[j] += r_struct[i].h_local_q[j];
}
}
alpha = vector_dot_product(h_g, h_g, h_prism_count) / vector_dot_product(h_q, h_q, (h_prism_count + h_point_count));
#pragma omp parallel num_threads(deviceCount)
{
int i = omp_get_thread_num();
hipSetDevice(i);
m_sln << <r_struct[i].nBlocks, nThreadPerBlock, 0, r_struct[i].stream >> > (r_struct[i].d_local_m_temp, r_struct[i].d_local_m_real,
r_struct[i].d_local_p, r_struct[i].d_local_W, alpha, h_m_min, h_m_max, r_struct[i].localNum);
G_mult_m_col_sln << <r_struct[i].nBlocks, nThreadPerBlock, nThreadPerBlock * sizeof(double), r_struct[i].stream >> > (r_struct[i].d_local_d_fit1, v_struct[i].d_Vz_mat_mc, r_struct[i].d_local_m_real, r_struct[i].localNum, r_struct[i].base, h_point_count, h_lx, nThreadPerBlock);
G_mult_m_sum_sln << <r_struct[i].nBlocks_Gm, nThreadPerBlock, 0, r_struct[i].stream >> > (r_struct[i].d_local_d_fit2, r_struct[i].d_local_d_fit1, r_struct[i].localNum, h_point_count, r_struct[i].nBlocks);
hipMemcpyAsync(r_struct[i].h_local_d_fit, r_struct[i].d_local_d_fit2, h_point_count * sizeof(double), hipMemcpyDeviceToHost, r_struct[i].stream);
hipStreamSynchronize(r_struct[i].stream);
}
memset(h_data_fitting, 0, h_point_count * sizeof(double));
for (int i = 0; i < deviceCount; i++)
{
for (int j = 0; j < h_point_count; j++)
{
h_data_fitting[j] += r_struct[i].h_local_d_fit[j];
}
}
for (int j = 0; j < h_point_count; j++)
{
h_data_misfit[j] = VzX.Vz[j] - h_data_fitting[j];
}
h_d_square = vector_dot_product(h_data_misfit, h_data_misfit, h_point_count);
rms = sqrt(h_d_square / h_point_count);
if (rms <= h_epsilon)
{
break;
}
}
#pragma omp parallel num_threads(deviceCount)
{
int i = omp_get_thread_num();
hipSetDevice(i);
hipMemcpyAsync(r_struct[i].h_local_m_real, r_struct[i].d_local_m_real, r_struct[i].localNum * sizeof(double), hipMemcpyDeviceToHost, r_struct[i].stream);
hipStreamSynchronize(r_struct[i].stream);
for (int j = 0; j < r_struct[i].localNum; j++)
{
inv_result[j + r_struct[i].base] = r_struct[i].h_local_m_real[j];
}
hipHostFree(i_struct[i].h_Vz);
hipHostFree(i_struct[i].h_x_obs);
hipHostFree(i_struct[i].h_y_obs);
hipHostFree(i_struct[i].h_m1_x);
hipHostFree(i_struct[i].h_m1_y);
hipHostFree(i_struct[i].h_m1_z);
hipHostFree(r_struct[i].h_local_g);
hipHostFree(r_struct[i].h_local_g0);
hipHostFree(r_struct[i].h_local_q);
hipHostFree(r_struct[i].h_local_m_real);
hipHostFree(r_struct[i].h_local_d_fit);
hipHostFree(r_struct[i].h_local_d_fit_temp);
hipFree(i_struct[i].d_Vz);
hipFree(i_struct[i].d_x_obs);
hipFree(i_struct[i].d_y_obs);
hipFree(i_struct[i].d_m1_x);
hipFree(i_struct[i].d_m1_y);
hipFree(i_struct[i].d_m1_z);
hipFree(v_struct[i].d_Vz_mat_mc);
hipFree(r_struct[i].d_local_Wm);
hipFree(r_struct[i].d_local_Wv);
hipFree(r_struct[i].d_local_W);
hipFree(r_struct[i].d_local_g);
hipFree(r_struct[i].d_local_g0);
hipFree(r_struct[i].d_local_p);
hipFree(r_struct[i].d_local_p0);
hipFree(r_struct[i].d_local_q1);
hipFree(r_struct[i].d_local_q2);
hipFree(r_struct[i].d_local_m_temp);
hipFree(r_struct[i].d_local_m_real);
hipFree(r_struct[i].d_local_d_fit1);
hipFree(r_struct[i].d_local_d_fit2);
hipFree(r_struct[i].d_local_d_fit_temp1);
hipFree(r_struct[i].d_local_d_fit_temp2);
hipFree(r_struct[i].d_d_fit_temp);
hipStreamDestroy(r_struct[i].stream);
}
return inv_result;
}
__global__ void Vz_mat_mc_sln(double* Vz_mat_mc, double* x_obs, double* y_obs, double* m1_x, double* m1_y, double* m1_z, double z_obs, int lx, int lz, int point_count, int prism_count)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int obs_x, obs_y, prm_z;
double r0, r1, r2, r3, r4, r5, r6, r7;
double xt1, xt2, yt1, yt2, zt1, zt2;
double d_G = 66.7;
if (i < prism_count)
{
obs_x = (i % point_count) % lx;
obs_y = (i % point_count) / lx;
prm_z = i / point_count;
xt1 = x_obs[obs_x] - m1_x[0]; xt2 = x_obs[obs_x] - m1_x[1];
yt1 = y_obs[obs_y] - m1_y[0]; yt2 = y_obs[obs_y] - m1_y[1];
zt1 = z_obs - m1_z[prm_z]; zt2 = z_obs - m1_z[prm_z + lz];
r0 = sqrt(pow(xt1, 2) + pow(yt1, 2) + pow(zt1, 2));
r1 = sqrt(pow(xt1, 2) + pow(yt1, 2) + pow(zt2, 2));
r2 = sqrt(pow(xt1, 2) + pow(yt2, 2) + pow(zt1, 2));
r3 = sqrt(pow(xt1, 2) + pow(yt2, 2) + pow(zt2, 2));
r4 = sqrt(pow(xt2, 2) + pow(yt1, 2) + pow(zt1, 2));
r5 = sqrt(pow(xt2, 2) + pow(yt1, 2) + pow(zt2, 2));
r6 = sqrt(pow(xt2, 2) + pow(yt2, 2) + pow(zt1, 2));
r7 = sqrt(pow(xt2, 2) + pow(yt2, 2) + pow(zt2, 2));
Vz_mat_mc[i] += d_G * (-atan(xt1 * yt1 / zt1 / r0) + atan(xt1 * yt1 / zt2 / r1) + atan(xt1 * yt2 / zt1 / r2) - atan(xt1 * yt2 / zt2 / r3)
+ atan(xt2 * yt1 / zt1 / r4) - atan(xt2 * yt1 / zt2 / r5) - atan(xt2 * yt2 / zt1 / r6) + atan(xt2 * yt2 / zt2 / r7));
}
}
__global__ void W_init_sln(double* Wm, double* Wv, double* W, double* m_temp, double* m_real, double* Vz_mat_mc, double sigma, int localNum, int base, int point_count, int lx,double wn)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int i_base, m, n, px, py, pi, pj, pk, index;
if (i < localNum)
{
Wm[i] = 0; m_temp[i] = 0; m_real[i] = 0;
i_base = i + base;
px = (i_base % point_count) % lx + 1;
py = (i_base % point_count) / lx + 1;
pk = i_base / point_count + 1;
for (int j = 0; j < point_count; j++)
{
m = j % lx + 1; n = j / lx + 1;
pi = abs(m - px) + 1;
pj = abs(n - py) + 1;
index = (pk - 1) * point_count + (pj - 1) * lx + pi - 1;
Wm[i] += pow(Vz_mat_mc[index], 2);
}
/*Wm[i] = 1 / sqrt(sqrt(Wm[i]));
Wv[i] = pow(sigma, 2) * Wm[i];*/
Wm[i] = 1 / pow(Wm[i], wn);
Wv[i] = pow(sigma, 2) * Wm[i];
W[i] = sqrt(Wv[i]);
}
}
__global__ void g0_sln(double* g, double* Vz_mat_mc, double* W, double* Vz, int localNum, int base, int point_count, int lx)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int i_base, m, n, px, py, pi, pj, pk, index;
if (i < localNum)
{
g[i] = 0;
i_base = i + base;
px = (i_base % point_count) % lx + 1;
py = (i_base % point_count) / lx + 1;
pk = i_base / point_count + 1;
for (int j = 0; j < point_count; j++)
{
m = j % lx + 1; n = j / lx + 1;
pi = abs(m - px) + 1;
pj = abs(n - py) + 1;
index = (pk - 1) * point_count + (pj - 1) * lx + pi - 1;
g[i] += Vz_mat_mc[index] * W[i] * Vz[j];
}
}
}
__global__ void A_mult_v_col_sln(double* q, double* Vz_mat_mc, double* W, double* vector, int localNum, int base, int point_count, int lx, int nThreadPerBlock)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int i_base, m, n, px, py, pi, pj, pk, index;
double temp;
extern __shared__ double V_temp_shared[];
V_temp_shared[threadIdx.x]=0;
if (i < localNum)
{
i_base = i + base;
px = (i_base % point_count) % lx + 1;
py = (i_base % point_count) / lx + 1;
pk = i_base / point_count + 1;
for (int j = 0; j < point_count; j++)
{
m = j % lx + 1; n = j / lx + 1;
pi = abs(m - px) + 1;
pj = abs(n - py) + 1;
index = (pk - 1) * point_count + (pj - 1) * lx + pi - 1;
V_temp_shared[threadIdx.x] = Vz_mat_mc[index] * W[i] * vector[i];
__syncthreads();
temp = 0;
if (threadIdx.x == 0)
{
for (int k = 0; k < nThreadPerBlock; k++)
{
temp += V_temp_shared[k];
}
q[blockIdx.x * point_count + j] = temp;
}
__syncthreads();
}
}
}
__global__ void A_mult_v_sum_sln(double* q2, double* q1, double* vector, double lambda, int localNum, int base, int point_count, int prism_count, int nBlocks)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < (prism_count + point_count))
{
q2[i] = 0;
if (i < point_count)
{
for (int j = 0; j < nBlocks; j++)
{
q2[i] += q1[j * point_count + i];
}
}
else if (i >= (point_count + base) && i < (point_count + base + localNum))
{
q2[i] = sqrt(lambda) * vector[i - point_count - base];
}
}
}
__global__ void m_sln(double* m_temp, double* m_real, double* p, double* W, double alpha, double m_min, double m_max, int localNum)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < localNum)
{
m_temp[i] += alpha * p[i];
m_real[i] = W[i] * m_temp[i];
if (m_real[i] < m_min)
{
m_real[i] = m_min;
}
else if (m_real[i] > m_max)
{
m_real[i] = m_max;
}
}
}
__global__ void G_mult_m_col_sln(double* d_fit, double* Vz_mat_mc, double* m_temp, int localNum, int base, int point_count, int lx, int nThreadPerBlock)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int i_base, m, n, px, py, pi, pj, pk, index;
double temp;
extern __shared__ double V_temp_shared[];
V_temp_shared[threadIdx.x]=0;
if (i < localNum)
{
i_base = i + base;
px = (i_base % point_count) % lx + 1;
py = (i_base % point_count) / lx + 1;
pk = i_base / point_count + 1;
for (int j = 0; j < point_count; j++)
{
m = j % lx + 1; n = j / lx + 1;
pi = abs(m - px) + 1;
pj = abs(n - py) + 1;
index = (pk - 1) * point_count + (pj - 1) * lx + pi - 1;
V_temp_shared[threadIdx.x] = Vz_mat_mc[index] * m_temp[i];
__syncthreads();
temp = 0;
if (threadIdx.x == 0)
{
for (int k = 0; k < nThreadPerBlock; k++)
{
temp += V_temp_shared[k];
}
d_fit[blockIdx.x * point_count + j] = temp;
}
__syncthreads();
}
}
}
__global__ void G_mult_m_sum_sln(double* d_fit2, double* d_fit1, int localNum, int point_count, int nBlocks)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < point_count)
{
d_fit2[i] = 0;
for (int j = 0; j < nBlocks; j++)
{
d_fit2[i] += d_fit1[j * point_count + i];
}
}
}
__global__ void p_sln(double* p, double* g, double* p0, double beta, int localNum)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < localNum)
{
p[i] = g[i] + beta * p0[i];
}
}
__global__ void update_sln(double* p0, double* g0, double* W, double* m_temp, double* p, double* g, double* m_real, double* Wm, double* Wv, int localNum)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < localNum)
{
p0[i] = p[i];
g0[i] = g[i];
W[i] = sqrt(pow(m_real[i], 2) * Wm[i] + Wv[i]);
m_temp[i] = m_real[i] / W[i];
}
}
__global__ void g_sln(double* g, double* Vz_mat_mc, double* W, double* Vz, double* d_fit_temp, double lambda, int localNum, int base, int point_count, int lx)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int i_base, m, n, px, py, pi, pj, pk, index;
if (i < localNum)
{
g[i] = 0;
i_base = i + base;
px = (i_base % point_count) % lx + 1;
py = (i_base % point_count) / lx + 1;
pk = i_base / point_count + 1;
for (int j = 0; j < point_count; j++)
{
m = j % lx + 1; n = j / lx + 1;
pi = abs(m - px) + 1;
pj = abs(n - py) + 1;
index = (pk - 1) * point_count + (pj - 1) * lx + pi - 1;
g[i] += Vz_mat_mc[index] * W[i] * (Vz[j] - d_fit_temp[j]);
}
g[i] += sqrt(lambda) * (0 - d_fit_temp[i_base + point_count]);
}
}
double beta_sln(double* g, double* g0, int prism_count)
{
double beta;
double beta_temp = 0;
for (int i = 0; i < prism_count; i++)
{
beta_temp += g[i] * (g[i] - g0[i]);
}
beta = beta_temp / vector_dot_product(g0, g0, prism_count);
return beta;
}
double vector_dot_product(double* a, double* b, int count)
{
double result = 0;
for (int vi = 0; vi < count; vi++)
{
result += a[vi] * b[vi];
}
return result;
}
| 10e9d1a0bbc07557c146fc23f802184c8f2df8a0.cu | # include "grav_rfi_ompcuda.h"
# include <stdlib.h>
# include <stdio.h>
# include <string.h>
# include <math.h>
# include <omp.h>
# include <cuda.h>
# include <cuda_runtime.h>
# include <device_launch_parameters.h>
struct Vz_struct
{
double* Vz, * x_obs, * y_obs;
double* m1_x, * m1_y, * m1_z;
};
struct input_struct
{
double* h_Vz, * h_x_obs, * h_y_obs, * h_m1_x, * h_m1_y, * h_m1_z;
double* d_Vz, * d_x_obs, * d_y_obs, * d_m1_x, * d_m1_y, * d_m1_z;
cudaStream_t stream;
};
struct Vz_mat_mc_struct
{
double* d_Vz_mat_mc;
int nBlocks;
cudaStream_t stream;
};
struct rfi_struct
{
int localNum, nBlocks, base, nBlocks_Am, nBlocks_Gm;
double* d_local_Wm, * d_local_Wv, * d_local_W, * d_local_g, * d_local_g0, * d_local_p, * d_local_p0, * d_local_q1, * d_local_q2;
double* d_local_m_temp, * d_local_m_real, * d_local_d_fit1, * d_local_d_fit2, * d_local_d_fit_temp1, * d_local_d_fit_temp2, * d_d_fit_temp;
double* h_local_g, * h_local_g0, * h_local_q, * h_local_m_real, * h_local_d_fit, * h_local_d_fit_temp;
cudaStream_t stream;
};
void xy_cmp(int point_count, int lx, int ly, double* x, double* y, double* xmin, double* xmax, double* dx, double* ymin, double* ymax, double* dy);
double* rwt_foc_inv(int deviceCount, int h_point_count, int h_prism_count, int h_lx, int h_ly, int h_lz, int h_kmax,
double h_z_obs, double h_m_min, double h_m_max, double h_epsilon, double h_lambda, double h_sigma, struct Vz_struct VzX, int Max_GPU_Number, int nThreadPerBlock,double wn);
__global__ void Vz_mat_mc_sln(double* Vz_mat_mc, double* x_obs, double* y_obs, double* m1_x, double* m1_y, double* m1_z, double z_obs, int lx, int lz, int point_count, int prism_count);
__global__ void W_init_sln(double* Wm, double* Wv, double* W, double* m_temp, double* m_real, double* Vz_mat_mc, double sigma, int localNum, int base, int point_count, int lx, double wn);
__global__ void g0_sln(double* g, double* Vz_mat_mc, double* W, double* Vz, int localNum, int base, int point_count, int lx);
__global__ void A_mult_v_col_sln(double* q, double* Vz_mat_mc, double* W, double* vector, int localNum, int base, int point_count, int lx, int nThreadPerBlock);
__global__ void A_mult_v_sum_sln(double* q2, double* q1, double* vector, double lambda, int localNum, int base, int point_count, int prism_count, int nBlocks);
__global__ void m_sln(double* m_temp, double* m_real, double* p, double* W, double alpha, double m_min, double m_max, int localNum);
__global__ void G_mult_m_col_sln(double* d_fit, double* Vz_mat_mc, double* m_temp, int localNum, int base, int point_count, int lx, int nThreadPerBlock);
__global__ void G_mult_m_sum_sln(double* d_fit2, double* d_fit1, int localNum, int point_count, int nBlocks);
__global__ void p_sln(double* p, double* g, double* p0, double beta, int localNum);
__global__ void update_sln(double* p0, double* g0, double* W, double* m_temp, double* p, double* g, double* m_real, double* Wm, double* Wv, int localNum);
__global__ void g_sln(double* g, double* Vz_mat_mc, double* W, double* Vz, double* d_fit_temp, double lambda, int localNum, int base, int point_count, int lx);
double beta_sln(double* g, double* g0, int prism_count);
double vector_dot_product(double* a, double* b, int count);
int CheckCount()
{
int deviceCount = 0;
cudaGetDeviceCount(&deviceCount);
return deviceCount;
}
double* foo(int h_point_count, int h_prism_count, int h_lx, int h_ly, int h_lz, int h_kmax, int Max_GPU_Number, int nThreadPerBlock, double h_z_obs, double h_dz, double h_zmax,
double h_m_min, double h_m_max, double h_epsilon, double h_lambda, double h_sigma,double wn, double *zc, double *thick, double* Vz, double* x, double* y)
{
struct Vz_struct VzX;
double xmin, xmax, ymin, ymax, dx, dy;
double* h_m_result, * m1_x, * m1_y, * m1_z, * x_obs, * y_obs;
int deviceCount = 0;
cudaGetDeviceCount(&deviceCount);
if (deviceCount > Max_GPU_Number)
{
deviceCount = Max_GPU_Number;
}
xy_cmp(h_point_count, h_lx, h_ly, x, y, &xmin, &xmax, &dx, &ymin, &ymax, &dy);
x_obs = (double*)malloc(h_lx * sizeof(double));
y_obs = (double*)malloc(h_ly * sizeof(double));
for (int xi = 0; xi < h_lx; xi++)
{
*(x_obs + xi) = x[xi];
}
for (int yi = 0; yi < h_ly; yi++)
{
*(y_obs + yi) = y[yi * h_lx];
}
m1_x = (double*)malloc(2 * sizeof(double)); *(m1_x + 0) = xmin - 0.5 * dx; *(m1_x + 1) = xmin + 0.5 * dx;
m1_y = (double*)malloc(2 * sizeof(double)); *(m1_y + 0) = ymin - 0.5 * dy; *(m1_y + 1) = ymin + 0.5 * dy;
m1_z = (double*)malloc(2 * h_lz * sizeof(double));
for (int zi = 0; zi < h_lz; zi++)
{
*(m1_z + zi) = zc[zi] - 0.5 * thick[zi];
*(m1_z + zi + h_lz) = zc[zi] + 0.5 * thick[zi];
}
VzX.Vz = Vz;
VzX.x_obs = x_obs;
VzX.y_obs = y_obs;
VzX.m1_x = m1_x;
VzX.m1_y = m1_y;
VzX.m1_z = m1_z;
h_m_result = rwt_foc_inv(deviceCount, h_point_count, h_prism_count, h_lx, h_ly, h_lz, h_kmax, h_z_obs, h_m_min, h_m_max, h_epsilon, h_lambda, h_sigma, VzX, Max_GPU_Number, nThreadPerBlock,wn);
cudaDeviceReset();
return h_m_result;
}
void xy_cmp(int point_count, int lx, int ly, double* x, double* y, double* xmin, double* xmax, double* dx, double* ymin, double* ymax, double* dy)
{
*xmin = x[0]; *xmax = x[0];
*ymin = y[0]; *ymax = y[0];
for (int ni = 1; ni < point_count; ni++)
{
*xmin = *xmin < x[ni] ? *xmin : x[ni];
*xmax = *xmax > x[ni] ? *xmax : x[ni];
*ymin = *ymin < y[ni] ? *ymin : y[ni];
*ymax = *ymax > y[ni] ? *ymax : y[ni];
}
*dx = (*xmax - *xmin) / (lx - 1);
*dy = (*ymax - *ymin) / (ly - 1);
}
double* rwt_foc_inv(int deviceCount, int h_point_count, int h_prism_count, int h_lx, int h_ly, int h_lz, int h_kmax,
double h_z_obs, double h_m_min, double h_m_max, double h_epsilon, double h_lambda, double h_sigma, struct Vz_struct VzX, int Max_GPU_Number, int nThreadPerBlock,double wn)
{
/*struct input_struct i_struct[Max_GPU_Number_list];
struct Vz_mat_mc_struct v_struct[Max_GPU_Number_list];
struct rfi_struct r_struct[Max_GPU_Number_list];*/
struct input_struct *i_struct = new struct input_struct[Max_GPU_Number];
struct Vz_mat_mc_struct *v_struct = new struct Vz_mat_mc_struct[Max_GPU_Number];
struct rfi_struct *r_struct = new struct rfi_struct[Max_GPU_Number];
int k = 0;
double alpha, beta, rms, h_d_square, h_phi_m;
double* h_data_misfit = (double*)malloc(h_point_count * sizeof(double));
double* h_data_fitting = (double*)malloc(h_point_count * sizeof(double));
double* h_g = (double*)malloc(h_prism_count * sizeof(double));
double* h_g0 = (double*)malloc(h_prism_count * sizeof(double));
double* h_q = (double*)malloc((h_prism_count + h_point_count) * sizeof(double));
double* h_d_fit_temp = (double*)malloc((h_prism_count + h_point_count) * sizeof(double));
double* inv_result = (double*)malloc(h_prism_count * sizeof(double));
#pragma omp parallel num_threads(deviceCount)
{
int i = omp_get_thread_num();
cudaSetDevice(i);
cudaStreamCreate(&i_struct[i].stream);
cudaMalloc((void**)&i_struct[i].d_Vz, h_point_count * sizeof(double));
cudaMalloc((void**)&i_struct[i].d_x_obs, h_lx * sizeof(double));
cudaMalloc((void**)&i_struct[i].d_y_obs, h_ly * sizeof(double));
cudaMalloc((void**)&i_struct[i].d_m1_x, 2 * sizeof(double));
cudaMalloc((void**)&i_struct[i].d_m1_y, 2 * sizeof(double));
cudaMalloc((void**)&i_struct[i].d_m1_z, 2 * h_lz * sizeof(double));
cudaMallocHost((void**)&i_struct[i].h_Vz, h_point_count * sizeof(double));
cudaMallocHost((void**)&i_struct[i].h_x_obs, h_lx * sizeof(double));
cudaMallocHost((void**)&i_struct[i].h_y_obs, h_ly * sizeof(double));
cudaMallocHost((void**)&i_struct[i].h_m1_x, 2 * sizeof(double));
cudaMallocHost((void**)&i_struct[i].h_m1_y, 2 * sizeof(double));
cudaMallocHost((void**)&i_struct[i].h_m1_z, 2 * h_lz * sizeof(double));
memcpy(i_struct[i].h_Vz, VzX.Vz, h_point_count * sizeof(double));
memcpy(i_struct[i].h_x_obs, VzX.x_obs, h_lx * sizeof(double));
memcpy(i_struct[i].h_y_obs, VzX.y_obs, h_ly * sizeof(double));
memcpy(i_struct[i].h_m1_x, VzX.m1_x, 2 * sizeof(double));
memcpy(i_struct[i].h_m1_y, VzX.m1_y, 2 * sizeof(double));
memcpy(i_struct[i].h_m1_z, VzX.m1_z, 2 * h_lz * sizeof(double));
}
#pragma omp parallel num_threads(deviceCount)
{
int i = omp_get_thread_num();
cudaSetDevice(i);
cudaMemcpyAsync(i_struct[i].d_Vz, i_struct[i].h_Vz, h_point_count * sizeof(double), cudaMemcpyHostToDevice, i_struct[i].stream);
cudaMemcpyAsync(i_struct[i].d_x_obs, i_struct[i].h_x_obs, h_lx * sizeof(double), cudaMemcpyHostToDevice, i_struct[i].stream);
cudaMemcpyAsync(i_struct[i].d_y_obs, i_struct[i].h_y_obs, h_ly * sizeof(double), cudaMemcpyHostToDevice, i_struct[i].stream);
cudaMemcpyAsync(i_struct[i].d_m1_x, i_struct[i].h_m1_x, 2 * sizeof(double), cudaMemcpyHostToDevice, i_struct[i].stream);
cudaMemcpyAsync(i_struct[i].d_m1_y, i_struct[i].h_m1_y, 2 * sizeof(double), cudaMemcpyHostToDevice, i_struct[i].stream);
cudaMemcpyAsync(i_struct[i].d_m1_z, i_struct[i].h_m1_z, 2 * h_lz * sizeof(double), cudaMemcpyHostToDevice, i_struct[i].stream);
cudaStreamSynchronize(i_struct[i].stream);
}
#pragma omp parallel num_threads(deviceCount)
{
int i = omp_get_thread_num();
cudaSetDevice(i);
cudaStreamDestroy(i_struct[i].stream);
cudaStreamCreate(&v_struct[i].stream);
cudaMalloc((void**)&v_struct[i].d_Vz_mat_mc, h_prism_count * sizeof(double));
v_struct[i].nBlocks = h_prism_count / nThreadPerBlock + ((h_prism_count % nThreadPerBlock) ? 1 : 0);
Vz_mat_mc_sln << <v_struct[i].nBlocks, nThreadPerBlock, 0, v_struct[i].stream >> > (v_struct[i].d_Vz_mat_mc, i_struct[i].d_x_obs, i_struct[i].d_y_obs,
i_struct[i].d_m1_x, i_struct[i].d_m1_y, i_struct[i].d_m1_z, h_z_obs, h_lx, h_lz, h_point_count, h_prism_count);
cudaStreamSynchronize(v_struct[i].stream);
}
for (int i = 0; i < deviceCount; i++)
{
r_struct[i].localNum = h_prism_count / deviceCount;
}
for (int i = 0; i < h_prism_count % deviceCount; i++)
{
r_struct[i].localNum++;
}
#pragma omp parallel num_threads(deviceCount)
{
int i = omp_get_thread_num();
cudaSetDevice(i);
cudaStreamDestroy(v_struct[i].stream);
cudaStreamCreate(&r_struct[i].stream);
r_struct[i].nBlocks = r_struct[i].localNum / nThreadPerBlock + ((r_struct[i].localNum % nThreadPerBlock) ? 1 : 0);
r_struct[i].nBlocks_Am = (h_prism_count + h_point_count) / nThreadPerBlock + (((h_prism_count + h_point_count) % nThreadPerBlock) ? 1 : 0);
r_struct[i].nBlocks_Gm = h_point_count / nThreadPerBlock + ((h_point_count % nThreadPerBlock) ? 1 : 0);
r_struct[i].base = 0;
for (int j = 0; j < i; j++)
{
r_struct[i].base += r_struct[j].localNum;
}
cudaMalloc((void**)&r_struct[i].d_local_Wm, r_struct[i].localNum * sizeof(double));
cudaMalloc((void**)&r_struct[i].d_local_Wv, r_struct[i].localNum * sizeof(double));
cudaMalloc((void**)&r_struct[i].d_local_W, r_struct[i].localNum * sizeof(double));
cudaMalloc((void**)&r_struct[i].d_local_g, r_struct[i].localNum * sizeof(double));
cudaMalloc((void**)&r_struct[i].d_local_g0, r_struct[i].localNum * sizeof(double));
cudaMalloc((void**)&r_struct[i].d_local_p, r_struct[i].localNum * sizeof(double));
cudaMalloc((void**)&r_struct[i].d_local_p0, r_struct[i].localNum * sizeof(double));
cudaMalloc((void**)&r_struct[i].d_local_q1, r_struct[i].nBlocks * h_point_count * sizeof(double));
cudaMalloc((void**)&r_struct[i].d_local_q2, (h_prism_count + h_point_count) * sizeof(double));
cudaMalloc((void**)&r_struct[i].d_local_m_temp, r_struct[i].localNum * sizeof(double));
cudaMalloc((void**)&r_struct[i].d_local_m_real, r_struct[i].localNum * sizeof(double));
cudaMalloc((void**)&r_struct[i].d_local_d_fit1, r_struct[i].nBlocks * h_point_count * sizeof(double));
cudaMalloc((void**)&r_struct[i].d_local_d_fit2, h_point_count * sizeof(double));
cudaMalloc((void**)&r_struct[i].d_local_d_fit_temp1, r_struct[i].nBlocks * h_point_count * sizeof(double));
cudaMalloc((void**)&r_struct[i].d_local_d_fit_temp2, (h_prism_count + h_point_count) * sizeof(double));
cudaMalloc((void**)&r_struct[i].d_d_fit_temp, (h_prism_count + h_point_count) * sizeof(double));
cudaMallocHost((void**)&r_struct[i].h_local_g, r_struct[i].localNum * sizeof(double));
cudaMallocHost((void**)&r_struct[i].h_local_g0, r_struct[i].localNum * sizeof(double));
cudaMallocHost((void**)&r_struct[i].h_local_q, (h_prism_count + h_point_count) * sizeof(double));
cudaMallocHost((void**)&r_struct[i].h_local_m_real, r_struct[i].localNum * sizeof(double));
cudaMallocHost((void**)&r_struct[i].h_local_d_fit, h_point_count * sizeof(double));
cudaMallocHost((void**)&r_struct[i].h_local_d_fit_temp, (h_prism_count + h_point_count) * sizeof(double));
W_init_sln << <r_struct[i].nBlocks, nThreadPerBlock, 0, r_struct[i].stream >> > (r_struct[i].d_local_Wm, r_struct[i].d_local_Wv, r_struct[i].d_local_W,
r_struct[i].d_local_m_temp, r_struct[i].d_local_m_real, v_struct[i].d_Vz_mat_mc, h_sigma, r_struct[i].localNum, r_struct[i].base, h_point_count, h_lx,wn);
g0_sln << <r_struct[i].nBlocks, nThreadPerBlock, 0, r_struct[i].stream >> > (r_struct[i].d_local_g, v_struct[i].d_Vz_mat_mc, r_struct[i].d_local_W,
i_struct[i].d_Vz, r_struct[i].localNum, r_struct[i].base, h_point_count, h_lx);
cudaMemcpyAsync(r_struct[i].h_local_g, r_struct[i].d_local_g, r_struct[i].localNum * sizeof(double), cudaMemcpyDeviceToHost, r_struct[i].stream);
cudaStreamSynchronize(r_struct[i].stream);
}
#pragma omp parallel num_threads(deviceCount)
{
int i = omp_get_thread_num();
cudaSetDevice(i);
for (int j = 0; j < r_struct[i].localNum; j++)
{
h_g[j + r_struct[i].base] = r_struct[i].h_local_g[j];
}
}
while (k < h_kmax)
{
k++;
if (k == 1)
{
#pragma omp parallel num_threads(deviceCount)
{
int i = omp_get_thread_num();
cudaSetDevice(i);
cudaMemcpy(r_struct[i].d_local_p, r_struct[i].d_local_g, r_struct[i].localNum * sizeof(double), cudaMemcpyDeviceToDevice);
cudaStreamSynchronize(r_struct[i].stream);
}
}
else
{
if (k == 2)
{
h_phi_m = 0;
#pragma omp parallel num_threads(deviceCount)
{
int i = omp_get_thread_num();
cudaSetDevice(i);
cudaMemcpyAsync(r_struct[i].h_local_m_real, r_struct[i].d_local_m_real, r_struct[i].localNum * sizeof(double), cudaMemcpyDeviceToHost, r_struct[i].stream);
cudaStreamSynchronize(r_struct[i].stream);
}
for (int i = 0; i < deviceCount; i++)
{
for (int j = 0; j < r_struct[i].localNum; j++)
{
h_phi_m += (r_struct[i].h_local_m_real[j] * r_struct[i].h_local_m_real[j]) / (r_struct[i].h_local_m_real[j] * r_struct[i].h_local_m_real[j] + h_sigma * h_sigma);
}
}
h_lambda = h_d_square / h_phi_m;
}
else
{
h_lambda = h_lambda / 2;
}
#pragma omp parallel num_threads(deviceCount)
{
int i = omp_get_thread_num();
cudaSetDevice(i);
update_sln << <r_struct[i].nBlocks, nThreadPerBlock, 0, r_struct[i].stream >> > (r_struct[i].d_local_p0, r_struct[i].d_local_g0, r_struct[i].d_local_W,
r_struct[i].d_local_m_temp, r_struct[i].d_local_p, r_struct[i].d_local_g, r_struct[i].d_local_m_real, r_struct[i].d_local_Wm, r_struct[i].d_local_Wv, r_struct[i].localNum);
A_mult_v_col_sln << <r_struct[i].nBlocks, nThreadPerBlock, nThreadPerBlock * sizeof(double), r_struct[i].stream>> > (r_struct[i].d_local_d_fit_temp1, v_struct[i].d_Vz_mat_mc, r_struct[i].d_local_W,
r_struct[i].d_local_m_temp, r_struct[i].localNum, r_struct[i].base, h_point_count, h_lx, nThreadPerBlock);
A_mult_v_sum_sln << <r_struct[i].nBlocks_Am, nThreadPerBlock, 0, r_struct[i].stream >> > (r_struct[i].d_local_d_fit_temp2, r_struct[i].d_local_d_fit_temp1, r_struct[i].d_local_m_temp,
h_lambda, r_struct[i].localNum, r_struct[i].base, h_point_count, h_prism_count, r_struct[i].nBlocks);
cudaMemcpyAsync(r_struct[i].h_local_d_fit_temp, r_struct[i].d_local_d_fit_temp2, (h_prism_count + h_point_count) * sizeof(double), cudaMemcpyDeviceToHost, r_struct[i].stream);
cudaStreamSynchronize(r_struct[i].stream);
}
memset(h_d_fit_temp, 0, (h_prism_count + h_point_count) * sizeof(double));
for (int i = 0; i < deviceCount; i++)
{
for (int j = 0; j < (h_prism_count + h_point_count); j++)
{
h_d_fit_temp[j] += r_struct[i].h_local_d_fit_temp[j];
}
}
#pragma omp parallel num_threads(deviceCount)
{
int i = omp_get_thread_num();
cudaSetDevice(i);
cudaMemcpyAsync(r_struct[i].d_d_fit_temp, h_d_fit_temp, (h_prism_count + h_point_count) * sizeof(double), cudaMemcpyHostToDevice, r_struct[i].stream);
g_sln << <r_struct[i].nBlocks, nThreadPerBlock, 0, r_struct[i].stream >> > (r_struct[i].d_local_g, v_struct[i].d_Vz_mat_mc, r_struct[i].d_local_W,
i_struct[i].d_Vz, r_struct[i].d_d_fit_temp, h_lambda, r_struct[i].localNum, r_struct[i].base, h_point_count, h_lx);
cudaMemcpyAsync(r_struct[i].h_local_g, r_struct[i].d_local_g, r_struct[i].localNum * sizeof(double), cudaMemcpyDeviceToHost, r_struct[i].stream);
cudaMemcpyAsync(r_struct[i].h_local_g0, r_struct[i].d_local_g0, r_struct[i].localNum * sizeof(double), cudaMemcpyDeviceToHost, r_struct[i].stream);
cudaStreamSynchronize(r_struct[i].stream);
}
#pragma omp parallel num_threads(deviceCount)
{
int i = omp_get_thread_num();
cudaSetDevice(i);
for (int j = 0; j < r_struct[i].localNum; j++)
{
h_g[j + r_struct[i].base] = r_struct[i].h_local_g[j];
h_g0[j + r_struct[i].base] = r_struct[i].h_local_g0[j];
}
}
beta = beta_sln(h_g, h_g0, h_prism_count);
#pragma omp parallel num_threads(deviceCount)
{
int i = omp_get_thread_num();
cudaSetDevice(i);
p_sln << <r_struct[i].nBlocks, nThreadPerBlock, 0, r_struct[i].stream >> > (r_struct[i].d_local_p, r_struct[i].d_local_g, r_struct[i].d_local_p0, beta, r_struct[i].localNum);
cudaStreamSynchronize(r_struct[i].stream);
}
}
#pragma omp parallel num_threads(deviceCount)
{
int i = omp_get_thread_num();
cudaSetDevice(i);
A_mult_v_col_sln << <r_struct[i].nBlocks, nThreadPerBlock,nThreadPerBlock * sizeof(double), r_struct[i].stream >> > (r_struct[i].d_local_q1, v_struct[i].d_Vz_mat_mc, r_struct[i].d_local_W, r_struct[i].d_local_p, r_struct[i].localNum, r_struct[i].base, h_point_count, h_lx, nThreadPerBlock);
A_mult_v_sum_sln << <r_struct[i].nBlocks_Am, nThreadPerBlock, 0, r_struct[i].stream >> > (r_struct[i].d_local_q2, r_struct[i].d_local_q1, r_struct[i].d_local_p, h_lambda, r_struct[i].localNum, r_struct[i].base, h_point_count, h_prism_count, r_struct[i].nBlocks);
cudaMemcpyAsync(r_struct[i].h_local_q, r_struct[i].d_local_q2, (h_prism_count + h_point_count) * sizeof(double), cudaMemcpyDeviceToHost, r_struct[i].stream);
cudaStreamSynchronize(r_struct[i].stream);
}
memset(h_q, 0, (h_prism_count + h_point_count) * sizeof(double));
for (int i = 0; i < deviceCount; i++)
{
for (int j = 0; j < (h_prism_count + h_point_count); j++)
{
h_q[j] += r_struct[i].h_local_q[j];
}
}
alpha = vector_dot_product(h_g, h_g, h_prism_count) / vector_dot_product(h_q, h_q, (h_prism_count + h_point_count));
#pragma omp parallel num_threads(deviceCount)
{
int i = omp_get_thread_num();
cudaSetDevice(i);
m_sln << <r_struct[i].nBlocks, nThreadPerBlock, 0, r_struct[i].stream >> > (r_struct[i].d_local_m_temp, r_struct[i].d_local_m_real,
r_struct[i].d_local_p, r_struct[i].d_local_W, alpha, h_m_min, h_m_max, r_struct[i].localNum);
G_mult_m_col_sln << <r_struct[i].nBlocks, nThreadPerBlock, nThreadPerBlock * sizeof(double), r_struct[i].stream >> > (r_struct[i].d_local_d_fit1, v_struct[i].d_Vz_mat_mc, r_struct[i].d_local_m_real, r_struct[i].localNum, r_struct[i].base, h_point_count, h_lx, nThreadPerBlock);
G_mult_m_sum_sln << <r_struct[i].nBlocks_Gm, nThreadPerBlock, 0, r_struct[i].stream >> > (r_struct[i].d_local_d_fit2, r_struct[i].d_local_d_fit1, r_struct[i].localNum, h_point_count, r_struct[i].nBlocks);
cudaMemcpyAsync(r_struct[i].h_local_d_fit, r_struct[i].d_local_d_fit2, h_point_count * sizeof(double), cudaMemcpyDeviceToHost, r_struct[i].stream);
cudaStreamSynchronize(r_struct[i].stream);
}
memset(h_data_fitting, 0, h_point_count * sizeof(double));
for (int i = 0; i < deviceCount; i++)
{
for (int j = 0; j < h_point_count; j++)
{
h_data_fitting[j] += r_struct[i].h_local_d_fit[j];
}
}
for (int j = 0; j < h_point_count; j++)
{
h_data_misfit[j] = VzX.Vz[j] - h_data_fitting[j];
}
h_d_square = vector_dot_product(h_data_misfit, h_data_misfit, h_point_count);
rms = sqrt(h_d_square / h_point_count);
if (rms <= h_epsilon)
{
break;
}
}
#pragma omp parallel num_threads(deviceCount)
{
int i = omp_get_thread_num();
cudaSetDevice(i);
cudaMemcpyAsync(r_struct[i].h_local_m_real, r_struct[i].d_local_m_real, r_struct[i].localNum * sizeof(double), cudaMemcpyDeviceToHost, r_struct[i].stream);
cudaStreamSynchronize(r_struct[i].stream);
for (int j = 0; j < r_struct[i].localNum; j++)
{
inv_result[j + r_struct[i].base] = r_struct[i].h_local_m_real[j];
}
cudaFreeHost(i_struct[i].h_Vz);
cudaFreeHost(i_struct[i].h_x_obs);
cudaFreeHost(i_struct[i].h_y_obs);
cudaFreeHost(i_struct[i].h_m1_x);
cudaFreeHost(i_struct[i].h_m1_y);
cudaFreeHost(i_struct[i].h_m1_z);
cudaFreeHost(r_struct[i].h_local_g);
cudaFreeHost(r_struct[i].h_local_g0);
cudaFreeHost(r_struct[i].h_local_q);
cudaFreeHost(r_struct[i].h_local_m_real);
cudaFreeHost(r_struct[i].h_local_d_fit);
cudaFreeHost(r_struct[i].h_local_d_fit_temp);
cudaFree(i_struct[i].d_Vz);
cudaFree(i_struct[i].d_x_obs);
cudaFree(i_struct[i].d_y_obs);
cudaFree(i_struct[i].d_m1_x);
cudaFree(i_struct[i].d_m1_y);
cudaFree(i_struct[i].d_m1_z);
cudaFree(v_struct[i].d_Vz_mat_mc);
cudaFree(r_struct[i].d_local_Wm);
cudaFree(r_struct[i].d_local_Wv);
cudaFree(r_struct[i].d_local_W);
cudaFree(r_struct[i].d_local_g);
cudaFree(r_struct[i].d_local_g0);
cudaFree(r_struct[i].d_local_p);
cudaFree(r_struct[i].d_local_p0);
cudaFree(r_struct[i].d_local_q1);
cudaFree(r_struct[i].d_local_q2);
cudaFree(r_struct[i].d_local_m_temp);
cudaFree(r_struct[i].d_local_m_real);
cudaFree(r_struct[i].d_local_d_fit1);
cudaFree(r_struct[i].d_local_d_fit2);
cudaFree(r_struct[i].d_local_d_fit_temp1);
cudaFree(r_struct[i].d_local_d_fit_temp2);
cudaFree(r_struct[i].d_d_fit_temp);
cudaStreamDestroy(r_struct[i].stream);
}
return inv_result;
}
__global__ void Vz_mat_mc_sln(double* Vz_mat_mc, double* x_obs, double* y_obs, double* m1_x, double* m1_y, double* m1_z, double z_obs, int lx, int lz, int point_count, int prism_count)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int obs_x, obs_y, prm_z;
double r0, r1, r2, r3, r4, r5, r6, r7;
double xt1, xt2, yt1, yt2, zt1, zt2;
double d_G = 66.7;
if (i < prism_count)
{
obs_x = (i % point_count) % lx;
obs_y = (i % point_count) / lx;
prm_z = i / point_count;
xt1 = x_obs[obs_x] - m1_x[0]; xt2 = x_obs[obs_x] - m1_x[1];
yt1 = y_obs[obs_y] - m1_y[0]; yt2 = y_obs[obs_y] - m1_y[1];
zt1 = z_obs - m1_z[prm_z]; zt2 = z_obs - m1_z[prm_z + lz];
r0 = sqrt(pow(xt1, 2) + pow(yt1, 2) + pow(zt1, 2));
r1 = sqrt(pow(xt1, 2) + pow(yt1, 2) + pow(zt2, 2));
r2 = sqrt(pow(xt1, 2) + pow(yt2, 2) + pow(zt1, 2));
r3 = sqrt(pow(xt1, 2) + pow(yt2, 2) + pow(zt2, 2));
r4 = sqrt(pow(xt2, 2) + pow(yt1, 2) + pow(zt1, 2));
r5 = sqrt(pow(xt2, 2) + pow(yt1, 2) + pow(zt2, 2));
r6 = sqrt(pow(xt2, 2) + pow(yt2, 2) + pow(zt1, 2));
r7 = sqrt(pow(xt2, 2) + pow(yt2, 2) + pow(zt2, 2));
Vz_mat_mc[i] += d_G * (-atan(xt1 * yt1 / zt1 / r0) + atan(xt1 * yt1 / zt2 / r1) + atan(xt1 * yt2 / zt1 / r2) - atan(xt1 * yt2 / zt2 / r3)
+ atan(xt2 * yt1 / zt1 / r4) - atan(xt2 * yt1 / zt2 / r5) - atan(xt2 * yt2 / zt1 / r6) + atan(xt2 * yt2 / zt2 / r7));
}
}
__global__ void W_init_sln(double* Wm, double* Wv, double* W, double* m_temp, double* m_real, double* Vz_mat_mc, double sigma, int localNum, int base, int point_count, int lx,double wn)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int i_base, m, n, px, py, pi, pj, pk, index;
if (i < localNum)
{
Wm[i] = 0; m_temp[i] = 0; m_real[i] = 0;
i_base = i + base;
px = (i_base % point_count) % lx + 1;
py = (i_base % point_count) / lx + 1;
pk = i_base / point_count + 1;
for (int j = 0; j < point_count; j++)
{
m = j % lx + 1; n = j / lx + 1;
pi = abs(m - px) + 1;
pj = abs(n - py) + 1;
index = (pk - 1) * point_count + (pj - 1) * lx + pi - 1;
Wm[i] += pow(Vz_mat_mc[index], 2);
}
/*Wm[i] = 1 / sqrt(sqrt(Wm[i]));
Wv[i] = pow(sigma, 2) * Wm[i];*/
Wm[i] = 1 / pow(Wm[i], wn);
Wv[i] = pow(sigma, 2) * Wm[i];
W[i] = sqrt(Wv[i]);
}
}
__global__ void g0_sln(double* g, double* Vz_mat_mc, double* W, double* Vz, int localNum, int base, int point_count, int lx)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int i_base, m, n, px, py, pi, pj, pk, index;
if (i < localNum)
{
g[i] = 0;
i_base = i + base;
px = (i_base % point_count) % lx + 1;
py = (i_base % point_count) / lx + 1;
pk = i_base / point_count + 1;
for (int j = 0; j < point_count; j++)
{
m = j % lx + 1; n = j / lx + 1;
pi = abs(m - px) + 1;
pj = abs(n - py) + 1;
index = (pk - 1) * point_count + (pj - 1) * lx + pi - 1;
g[i] += Vz_mat_mc[index] * W[i] * Vz[j];
}
}
}
__global__ void A_mult_v_col_sln(double* q, double* Vz_mat_mc, double* W, double* vector, int localNum, int base, int point_count, int lx, int nThreadPerBlock)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int i_base, m, n, px, py, pi, pj, pk, index;
double temp;
extern __shared__ double V_temp_shared[];
V_temp_shared[threadIdx.x]=0;
if (i < localNum)
{
i_base = i + base;
px = (i_base % point_count) % lx + 1;
py = (i_base % point_count) / lx + 1;
pk = i_base / point_count + 1;
for (int j = 0; j < point_count; j++)
{
m = j % lx + 1; n = j / lx + 1;
pi = abs(m - px) + 1;
pj = abs(n - py) + 1;
index = (pk - 1) * point_count + (pj - 1) * lx + pi - 1;
V_temp_shared[threadIdx.x] = Vz_mat_mc[index] * W[i] * vector[i];
__syncthreads();
temp = 0;
if (threadIdx.x == 0)
{
for (int k = 0; k < nThreadPerBlock; k++)
{
temp += V_temp_shared[k];
}
q[blockIdx.x * point_count + j] = temp;
}
__syncthreads();
}
}
}
__global__ void A_mult_v_sum_sln(double* q2, double* q1, double* vector, double lambda, int localNum, int base, int point_count, int prism_count, int nBlocks)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < (prism_count + point_count))
{
q2[i] = 0;
if (i < point_count)
{
for (int j = 0; j < nBlocks; j++)
{
q2[i] += q1[j * point_count + i];
}
}
else if (i >= (point_count + base) && i < (point_count + base + localNum))
{
q2[i] = sqrt(lambda) * vector[i - point_count - base];
}
}
}
__global__ void m_sln(double* m_temp, double* m_real, double* p, double* W, double alpha, double m_min, double m_max, int localNum)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < localNum)
{
m_temp[i] += alpha * p[i];
m_real[i] = W[i] * m_temp[i];
if (m_real[i] < m_min)
{
m_real[i] = m_min;
}
else if (m_real[i] > m_max)
{
m_real[i] = m_max;
}
}
}
__global__ void G_mult_m_col_sln(double* d_fit, double* Vz_mat_mc, double* m_temp, int localNum, int base, int point_count, int lx, int nThreadPerBlock)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int i_base, m, n, px, py, pi, pj, pk, index;
double temp;
extern __shared__ double V_temp_shared[];
V_temp_shared[threadIdx.x]=0;
if (i < localNum)
{
i_base = i + base;
px = (i_base % point_count) % lx + 1;
py = (i_base % point_count) / lx + 1;
pk = i_base / point_count + 1;
for (int j = 0; j < point_count; j++)
{
m = j % lx + 1; n = j / lx + 1;
pi = abs(m - px) + 1;
pj = abs(n - py) + 1;
index = (pk - 1) * point_count + (pj - 1) * lx + pi - 1;
V_temp_shared[threadIdx.x] = Vz_mat_mc[index] * m_temp[i];
__syncthreads();
temp = 0;
if (threadIdx.x == 0)
{
for (int k = 0; k < nThreadPerBlock; k++)
{
temp += V_temp_shared[k];
}
d_fit[blockIdx.x * point_count + j] = temp;
}
__syncthreads();
}
}
}
__global__ void G_mult_m_sum_sln(double* d_fit2, double* d_fit1, int localNum, int point_count, int nBlocks)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < point_count)
{
d_fit2[i] = 0;
for (int j = 0; j < nBlocks; j++)
{
d_fit2[i] += d_fit1[j * point_count + i];
}
}
}
__global__ void p_sln(double* p, double* g, double* p0, double beta, int localNum)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < localNum)
{
p[i] = g[i] + beta * p0[i];
}
}
__global__ void update_sln(double* p0, double* g0, double* W, double* m_temp, double* p, double* g, double* m_real, double* Wm, double* Wv, int localNum)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < localNum)
{
p0[i] = p[i];
g0[i] = g[i];
W[i] = sqrt(pow(m_real[i], 2) * Wm[i] + Wv[i]);
m_temp[i] = m_real[i] / W[i];
}
}
__global__ void g_sln(double* g, double* Vz_mat_mc, double* W, double* Vz, double* d_fit_temp, double lambda, int localNum, int base, int point_count, int lx)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int i_base, m, n, px, py, pi, pj, pk, index;
if (i < localNum)
{
g[i] = 0;
i_base = i + base;
px = (i_base % point_count) % lx + 1;
py = (i_base % point_count) / lx + 1;
pk = i_base / point_count + 1;
for (int j = 0; j < point_count; j++)
{
m = j % lx + 1; n = j / lx + 1;
pi = abs(m - px) + 1;
pj = abs(n - py) + 1;
index = (pk - 1) * point_count + (pj - 1) * lx + pi - 1;
g[i] += Vz_mat_mc[index] * W[i] * (Vz[j] - d_fit_temp[j]);
}
g[i] += sqrt(lambda) * (0 - d_fit_temp[i_base + point_count]);
}
}
double beta_sln(double* g, double* g0, int prism_count)
{
double beta;
double beta_temp = 0;
for (int i = 0; i < prism_count; i++)
{
beta_temp += g[i] * (g[i] - g0[i]);
}
beta = beta_temp / vector_dot_product(g0, g0, prism_count);
return beta;
}
double vector_dot_product(double* a, double* b, int count)
{
double result = 0;
for (int vi = 0; vi < count; vi++)
{
result += a[vi] * b[vi];
}
return result;
}
|
3d78478db28f6782f61225e4dc8969585485d550.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*-----------------------------------------------------------------------------
Name: GPU_ALL_KNN_LBT.cu
Desc: This file contains the ALL-KNN kd-tree GPU kernel
in left-balanced array order
Log: Created by Shawn D. Brown (2/01/10)
-----------------------------------------------------------------------------*/
#ifndef _GPU_ALL_KNN_LBT_H_
#define _GPU_ALL_KNN_LBT_H_
/*---------------------------------------------------------
Includes
---------------------------------------------------------*/
#include <stdio.h>
//#include <float.h>
#include "GPUTree_API.h"
/*---------------------------------------------------------
Function Definitions
---------------------------------------------------------*/
/*---------------------------------------------------------
Name: GPU_ALL_KNN_2D_LBT
Desc: Finds the 'k' Nearest Neighbors in
a search set 'S' for each query point in set 'Q'
Notes:
1. The search set S and query set Q are the same
for the All-KNN search.
2. We need to exclude zero distance results
Otherwise, each point will return itself as
its own nearest neighbor
3. The search set S is represented by a
static balanced cyclical KDTree
with one search point stored per kd-tree node
---------------------------------------------------------*/
__global__ void
GPU_ALL_KNN_2D_LBT
(
GPU_NN_Result * qrs, // OUT: Results of KD Nearest Neighbor Algorithm
GPUNode_2D_LBT * kdTree, // IN: KD Tree (Nodes)
unsigned int * ids, // IN: IDs (from Indexs)
unsigned int cNodes, // IN: 'n' number of nodes in tree
unsigned int k // IN: number of nearest neighbors to find
)
{
// Per Thread Local Parameters (shared memory)
__shared__ GPUNode_2D_LBT currNodes[ALL_KNN_THREADS_PER_BLOCK]; // Current kd-tree node
__shared__ GPU_Search searchStack[ALL_KNN_STACK_SIZE][ALL_KNN_THREADS_PER_BLOCK]; // Search Stack
__shared__ GPU_NN_Result knnHeap[KD_KNN_SIZE][ALL_KNN_THREADS_PER_BLOCK]; // 'k' NN Closest Heap
__shared__ GPUNode_2D_LBT queryPoints[ALL_KNN_THREADS_PER_BLOCK]; // Query Point
// Per Thread Local Parameters (registers)
unsigned int currIdx, currInOut;
unsigned int leftIdx, rightIdx;
unsigned int prevAxis, currAxis, nextAxis;
unsigned int stackTop, maxHeap, countHeap;
float queryValue, splitValue;
float dist2Heap, bestDist2;
float diff, diff2, diffDist2;
float dx, dy;
float * queryVals;
int tidx, width, currRow, currCol, qidx;
// Compute Thread index
tidx = (threadIdx.y*blockDim.x) + threadIdx.x;
// Compute Query Index
width = gridDim.x * blockDim.x;
currRow = (blockIdx.y * blockDim.y) + threadIdx.y;
currCol = (blockIdx.x * blockDim.x) + threadIdx.x;
qidx = (currRow * width) + currCol;
// Load current Query Point into local (fast) memory
// Slow read from RAM into shared memory
queryPoints[tidx] = kdTree[qidx];
queryVals = (float *)&(queryPoints[tidx]);
// Compute number of elements (in grid)
int height = gridDim.y * blockDim.y;
int nElems = height * width;
// Search Stack Variables
stackTop = 0;
// 'k' NN Heap variables
maxHeap = k; // Maximum # elements on knnHeap
countHeap = 0; // Current # elements on knnHeap
dist2Heap = 0.0f; // Max Dist of any element on heap
bestDist2 = 3.0e38f;
// Put root search info on stack
searchStack[stackTop][tidx].nodeFlags = FLAGS_ROOT_START;
searchStack[stackTop][tidx].splitVal = 3.0e+38F;
stackTop++;
while (stackTop != 0)
{
// Statistics
//best.cNodes++;
// Get Current Node from top of stack
stackTop--;
// Get Node Info (decompress 3 fields from 1)
currIdx = (searchStack[stackTop][tidx].nodeFlags & NODE_INDEX_MASK);
currAxis = (searchStack[stackTop][tidx].nodeFlags & SPLIT_AXIS_MASK) >> SPLIT_AXIS_SHIFT;
currInOut = (searchStack[stackTop][tidx].nodeFlags & ON_OFF_MASK) >> ON_OFF_SHIFT;
// Get left and right child indices from binary array layout
leftIdx = currIdx << 1;
rightIdx = leftIdx + 1;
nextAxis = ((currAxis == 1u) ? 0u : 1u);
prevAxis = ((currAxis == 0u) ? 1u : 0u);
// Early Exit Check
if (currInOut == 1) // KD_OUT
{
if (countHeap == maxHeap) // Is heap full yet ?!?
{
queryValue = queryVals[prevAxis];
splitValue = searchStack[stackTop][tidx].splitVal; // Split Value of Parent Node
diff = splitValue - queryValue;
diff2 = diff*diff;
if (diff2 >= dist2Heap)
{
// We can do an early exit for this node
continue;
}
}
}
// WARNING - It's Much faster to load this node from global memory after the "Early Exit check" !!!
// Load current node
// Slow read from RAM into shared memory
currNodes[tidx] = kdTree[currIdx];
// Get Best Fit Dist for checking child ranges
queryValue = queryVals[currAxis];
splitValue = currNodes[tidx].pos[currAxis];
diff = splitValue - queryValue;
diff2 = diff*diff;
// Calc Dist from Median Node to queryLocation
dx = currNodes[tidx].pos[0] - queryPoints[tidx].pos[0];
dy = currNodes[tidx].pos[1] - queryPoints[tidx].pos[1];
diffDist2 = (dx*dx) + (dy*dy);
// See if we should add this point to the 'k' NN Heap
if (diffDist2 <= 0.0f)
{
// Do nothing, The query point found itself in the kd-tree
// We don't want to add ourselves as a NN.
}
else if (countHeap < maxHeap)
{
//-------------------------------
// < 'k' elements on heap
// Do Simple Array append
//-------------------------------
countHeap++;
knnHeap[countHeap][tidx].Id = currIdx;
knnHeap[countHeap][tidx].Dist = diffDist2;
// Do we need to convert the array into a max distance heap ?!?
if (countHeap == maxHeap)
{
// Yes, turn array into a heap, takes O(k) time
for (unsigned int z = countHeap/2; z >= 1; z--)
{
//
// Demote each element in turn (to correct position in heap)
//
unsigned int parentHIdx = z; // Start at specified element
unsigned int childHIdx = z << 1; // left child of parent
// Compare Parent to it's children
while (childHIdx <= maxHeap)
{
// Update Distances
float parentD2 = knnHeap[parentHIdx][tidx].Dist;
float childD2 = knnHeap[childHIdx][tidx].Dist;
// Find largest child
if (childHIdx < maxHeap)
{
float rightD2 = knnHeap[childHIdx+1][tidx].Dist;
if (childD2 < rightD2)
{
// Use right child
childHIdx++;
childD2 = rightD2;
}
}
// Compare largest child to parent
if (parentD2 >= childD2)
{
// Parent is larger than both children, exit loop
break;
}
// Demote parent by swapping with it's largest child
GPU_NN_Result closeTemp = knnHeap[parentHIdx][tidx];
knnHeap[parentHIdx][tidx] = knnHeap[childHIdx][tidx];
knnHeap[childHIdx][tidx] = closeTemp;
// Update indices
parentHIdx = childHIdx;
childHIdx = parentHIdx<<1; // left child of parent
}
}
// Update trim distances
dist2Heap = knnHeap[1][tidx].Dist;
bestDist2 = dist2Heap;
}
}
else if (diffDist2 < dist2Heap)
{
//-------------------------------
// >= k elements on heap
// Do Heap Replacement
//-------------------------------
// Replace Root Element with new element
knnHeap[1][tidx].Id = currIdx;
knnHeap[1][tidx].Dist = diffDist2;
//
// Demote new element (to correct position in heap)
//
unsigned int parentHIdx = 1; // Start at Root
unsigned int childHIdx = 2; // left child of parent
// Compare current index to it's children
while (childHIdx <= maxHeap)
{
// Update Distances
float parentD2 = knnHeap[parentHIdx][tidx].Dist;
float childD2 = knnHeap[childHIdx][tidx].Dist;
// Find largest child
if (childHIdx < maxHeap)
{
float rightD2 = knnHeap[childHIdx+1][tidx].Dist;
if (childD2 < rightD2)
{
// Use right child
childHIdx++;
childD2 = rightD2;
}
}
// Compare largest child to parent
if (parentD2 >= childD2)
{
// Parent node is larger than both children, exit
break;
}
// Demote parent by swapping with it's largest child
GPU_NN_Result closeTemp = knnHeap[parentHIdx][tidx];
knnHeap[parentHIdx][tidx] = knnHeap[childHIdx][tidx];
knnHeap[childHIdx][tidx] = closeTemp;
// Update indices
parentHIdx = childHIdx;
childHIdx = parentHIdx<<1; // left child of parent
}
// Update Trim distances
dist2Heap = knnHeap[1][tidx].Dist;
bestDist2 = dist2Heap;
}
// update bestDist2
if (queryValue <= splitValue)
{
// [...QL...BD]...SV -> Include Left range only
// or
// [...QL...SV...BD] -> Include Both Left and Right Sub Ranges
// Check if we should add Right Sub-range to stack
if (diff2 < bestDist2)
{
if (rightIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (rightIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK)
| OFFSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
// Always Add Left Sub-range to search path
if (leftIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (leftIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK);
// | ONSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
else
{
// SV...[BD...QL...] -> Include Right sub range only
// or
// [BD...SV...QL...] -> Include Both Left and Right Sub Ranges
// Check if we should add left sub-range to search path
if (diff2 < bestDist2)
{
// Add to search stack
if (leftIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (leftIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK)
| OFFSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
// Always Add Right Sub-range
if (rightIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (rightIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK);
//| ONSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
}
/*-----------------------
Output Results
-----------------------*/
unsigned int i, offset, outIdx;
// Remap query node idx to query point idx
// Slow read from RAM memory
outIdx = ids[qidx];
// We now have a heap of the 'k' nearest neighbors
// Write heap elements to the results array row by row
for (i = 1; i <= countHeap; i++)
{
offset = (i-1) * nElems;
// REMAP: Convert Nearest Neighbor Info to final format
// Slow read from RAM memory
knnHeap[i][tidx].Id = ids[knnHeap[i][tidx].Id]; // Really need ID's not indexs
knnHeap[i][tidx].Dist = sqrtf( knnHeap[i][tidx].Dist ); // Get True distance (not distance squared)
// Store Result
// Slow write to RAM memory
qrs[outIdx+offset] = knnHeap[i][tidx];
}
}
/*---------------------------------------------------------
Name: GPU_ALL_KNN_3D_LBT
Desc: Finds the 'k' Nearest Neighbors in
a search set 'S' for each query point in set 'Q'
Notes:
1. The search set S and query set Q are the same
for the All-KNN search.
2. We need to exclude zero distance results
Otherwise, each point will return itself as
its own nearest neighbor
3. The search set S is represented by a
static balanced cyclical KDTree
with one search point stored per kd-tree node
---------------------------------------------------------*/
__global__ void
GPU_ALL_KNN_3D_LBT
(
GPU_NN_Result * qrs, // OUT: Results of KD Nearest Neighbor Algorithm
GPUNode_3D_LBT * kdTree, // IN: KD Tree (Nodes)
unsigned int * ids, // IN: IDs (from Indexs)
unsigned int cNodes, // IN: 'n' number of nodes in tree
unsigned int k // IN: number of nearest neighbors to find
)
{
// Per Thread Local Parameters (shared memory)
__shared__ GPUNode_3D_LBT currNodes[ALL_KNN_THREADS_PER_BLOCK]; // Current kd-tree node
__shared__ GPU_Search searchStack[ALL_KNN_STACK_SIZE][ALL_KNN_THREADS_PER_BLOCK]; // Search Stack
__shared__ GPU_NN_Result knnHeap[KD_KNN_SIZE][ALL_KNN_THREADS_PER_BLOCK]; // 'k' NN Closest Heap
__shared__ GPUNode_3D_LBT queryPoints[ALL_KNN_THREADS_PER_BLOCK]; // Query Point
// Per Thread Local Parameters (registers)
unsigned int currIdx, currInOut;
unsigned int leftIdx, rightIdx;
unsigned int currAxis, nextAxis, prevAxis;
unsigned int stackTop, maxHeap, countHeap;
float dx, dy, dz;
float diff, diff2, diffDist2;
float queryValue, splitValue;
float dist2Heap, bestDist2;
float * queryVals;
int tidx, width, currRow, currCol, qidx;
// Compute Thread index
tidx = (threadIdx.y*blockDim.x) + threadIdx.x;
// Compute Query Index
width = gridDim.x * blockDim.x;
currRow = (blockIdx.y * blockDim.y) + threadIdx.y;
currCol = (blockIdx.x * blockDim.x) + threadIdx.x;
qidx = (currRow * width) + currCol;
// Load current Query Point into local (fast) memory
// Slow read from RAM into shared memory
queryPoints[tidx] = kdTree[qidx];
queryVals = (float *)&(queryPoints[tidx]);
// Compute number of elements (in grid)
int height = gridDim.y * blockDim.y;
int nElems = height * width;
// Search Stack Variables
stackTop = 0;
// 'k' NN Heap variables
maxHeap = k; // Maximum # elements on knnHeap
countHeap = 0; // Current # elements on knnHeap
dist2Heap = 0.0f; // Max Dist of any element on heap
bestDist2 = 3.0e38f;
// Put root search info on stack
searchStack[stackTop][tidx].nodeFlags = FLAGS_ROOT_START;
searchStack[stackTop][tidx].splitVal = 3.0e+38F;
stackTop++;
while (stackTop != 0)
{
// Statistics
//best.cNodes++;
// Get Current Node from top of stack
stackTop--;
// Get Node Info
currIdx = (searchStack[stackTop][tidx].nodeFlags & NODE_INDEX_MASK);
currAxis = (searchStack[stackTop][tidx].nodeFlags & SPLIT_AXIS_MASK) >> SPLIT_AXIS_SHIFT;
currInOut = (searchStack[stackTop][tidx].nodeFlags & ON_OFF_MASK) >> ON_OFF_SHIFT;
// Get left and right child indices from binary array layout
leftIdx = currIdx << 1u;
rightIdx = leftIdx + 1u;
nextAxis = ((currAxis == 2u) ? 0u : currAxis+1u);
prevAxis = ((currAxis == 0u) ? 2u : currAxis-1u);
// Early Exit Check
if (currInOut == 1) // KD_OUT
{
if (countHeap == maxHeap) // Is heap full yet ?!?
{
queryValue = queryVals[prevAxis];
splitValue = searchStack[stackTop][tidx].splitVal; // Split Value of Parent Node
diff = splitValue - queryValue;
diff2 = diff*diff;
if (diff2 >= dist2Heap)
{
// We can do an early exit for this node
continue;
}
}
}
// WARNING - It's Much faster to load this node from global memory after the "Early Exit check" !!!
// Load current node
// Slow read from RAM into shared memory
currNodes[tidx] = kdTree[currIdx];
// Get Best Fit Dist for checking child ranges
queryValue = queryVals[currAxis];
splitValue = currNodes[tidx].pos[currAxis];
diff = splitValue - queryValue;
diff2 = diff*diff;
// Calc Dist from Median Node to queryLocation
dx = currNodes[tidx].pos[0] - queryVals[0];
dy = currNodes[tidx].pos[1] - queryVals[1];
dz = currNodes[tidx].pos[2] - queryVals[2];
diffDist2 = (dx*dx) + (dy*dy) + (dz*dz);
// See if we should add this point to the 'k' NN Heap
if (diffDist2 <= 0.0f)
{
// Do nothing, The query point found itself in the kd-tree
// We don't want to add ourselves as a NN.
}
else if (countHeap < maxHeap)
{
//-------------------------------
// < 'k' elements on heap
// Do Simple Array append
//-------------------------------
countHeap++;
knnHeap[countHeap][tidx].Id = currIdx;
knnHeap[countHeap][tidx].Dist = diffDist2;
// Do we need to convert the array into a max distance heap ?!?
if (countHeap == maxHeap)
{
// Yes, turn array into a heap, takes O(k) time
for (unsigned int z = countHeap/2; z >= 1; z--)
{
//
// Demote each element in turn (to correct position in heap)
//
unsigned int parentHIdx = z; // Start at specified element
unsigned int childHIdx = z << 1; // left child of parent
// Compare Parent to it's children
while (childHIdx <= maxHeap)
{
// Update Distances
float parentD2 = knnHeap[parentHIdx][tidx].Dist;
float childD2 = knnHeap[childHIdx][tidx].Dist;
// Find largest child
if (childHIdx < maxHeap)
{
float rightD2 = knnHeap[childHIdx+1][tidx].Dist;
if (childD2 < rightD2)
{
// Use right child
childHIdx++;
childD2 = rightD2;
}
}
// Compare largest child to parent
if (parentD2 >= childD2)
{
// Parent is larger than both children, exit loop
break;
}
// Demote parent by swapping with it's largest child
GPU_NN_Result closeTemp = knnHeap[parentHIdx][tidx];
knnHeap[parentHIdx][tidx] = knnHeap[childHIdx][tidx];
knnHeap[childHIdx][tidx] = closeTemp;
// Update indices
parentHIdx = childHIdx;
childHIdx = parentHIdx<<1; // left child of parent
}
}
// Update trim distances
dist2Heap = knnHeap[1][tidx].Dist;
bestDist2 = dist2Heap;
}
}
else if (diffDist2 < dist2Heap)
{
//-------------------------------
// >= k elements on heap
// Do Heap Replacement
//-------------------------------
// Replace Root Element with new element
knnHeap[1][tidx].Id = currIdx;
knnHeap[1][tidx].Dist = diffDist2;
//
// Demote new element (to correct position in heap)
//
unsigned int parentHIdx = 1; // Start at Root
unsigned int childHIdx = 2; // left child of parent
// Compare current index to it's children
while (childHIdx <= maxHeap)
{
// Update Distances
float parentD2 = knnHeap[parentHIdx][tidx].Dist;
float childD2 = knnHeap[childHIdx][tidx].Dist;
// Find largest child
if (childHIdx < maxHeap)
{
float rightD2 = knnHeap[childHIdx+1][tidx].Dist;
if (childD2 < rightD2)
{
// Use right child
childHIdx++;
childD2 = rightD2;
}
}
// Compare largest child to parent
if (parentD2 >= childD2)
{
// Parent node is larger than both children, exit
break;
}
// Demote parent by swapping with it's largest child
GPU_NN_Result closeTemp = knnHeap[parentHIdx][tidx];
knnHeap[parentHIdx][tidx] = knnHeap[childHIdx][tidx];
knnHeap[childHIdx][tidx] = closeTemp;
// Update indices
parentHIdx = childHIdx;
childHIdx = parentHIdx<<1; // left child of parent
}
// Update Trim distances
dist2Heap = knnHeap[1][tidx].Dist;
bestDist2 = dist2Heap;
}
// update bestDist2
if (queryValue <= splitValue)
{
// [...QL...BD]...SV -> Include Left range only
// or
// [...QL...SV...BD] -> Include Both Left and Right Sub Ranges
// Check if we should add Right Sub-range to stack
if (diff2 < bestDist2)
{
if (rightIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (rightIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK)
| OFFSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
// Always Add Left Sub-range to search path
if (leftIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (leftIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK);
//| ONSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
else
{
// SV...[BD...QL...] -> Include Right sub range only
// or
// [BD...SV...QL...] -> Include Both Left and Right Sub Ranges
// Check if we should add left sub-range to search path
if (diff2 < bestDist2)
{
// Add to search stack
if (leftIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (leftIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK)
| OFFSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
// Always Add Right Sub-range
if (rightIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (rightIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK);
//| ONSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
}
/*-----------------------
Output Results
-----------------------*/
unsigned int i, offset, outIdx;
// Remap query node idx to query point idx
// Slow read from RAM memory
outIdx = ids[qidx];
// We now have a heap of the 'k' nearest neighbors
// Write heap elements to the results array row by row
for (i = 1; i <= countHeap; i++)
{
offset = (i-1) * nElems;
// REMAP: Convert Nearest Neighbor Info to final format
// Slow read from RAM memory
knnHeap[i][tidx].Id = ids[knnHeap[i][tidx].Id]; // Really need ID's not indexs
knnHeap[i][tidx].Dist = sqrtf( knnHeap[i][tidx].Dist ); // Get True distance (not distance squared)
// Store Result
// Slow write to RAM memory
qrs[outIdx+offset] = knnHeap[i][tidx];
}
}
/*---------------------------------------------------------
Name: GPU_ALL_KNN_4D_LBT
Desc: Finds the 'k' Nearest Neighbors in
a search set 'S' for each query point in set 'Q'
Notes:
1. The search set S and query set Q are the same
for the All-KNN search.
2. We need to exclude zero distance results
Otherwise, each point will return itself as
its own nearest neighbor
3. The search set S is represented by a
static balanced cyclical KDTree
with one search point stored per kd-tree node
---------------------------------------------------------*/
__global__ void
GPU_ALL_KNN_4D_LBT
(
GPU_NN_Result * qrs, // OUT: Results of KD Nearest Neighbor Algorithm
GPUNode_4D_LBT * kdTree, // IN: KD Tree (Nodes)
unsigned int * ids, // IN: IDs (from Indexs)
unsigned int cNodes, // IN: 'n' number of nodes in tree
unsigned int k // IN: number of nearest neighbors to find
)
{
// Per Thread Local Parameters (shared memory)
__shared__ GPUNode_4D_LBT currNodes[ALL_KNN_THREADS_PER_BLOCK]; // Current kd-tree node
__shared__ GPU_Search searchStack[ALL_KNN_STACK_SIZE][ALL_KNN_THREADS_PER_BLOCK]; // Search Stack
__shared__ GPU_NN_Result knnHeap[KD_KNN_SIZE][ALL_KNN_THREADS_PER_BLOCK]; // 'k' NN Closest Heap
__shared__ GPUNode_4D_LBT queryPoints[ALL_KNN_THREADS_PER_BLOCK]; // Query Point
// Per Thread Local Parameters (registers)
unsigned int currIdx, currInOut;
unsigned int leftIdx, rightIdx;
unsigned int currAxis, nextAxis, prevAxis;
unsigned int stackTop, maxHeap, countHeap;
float queryValue, splitValue;
float dist2Heap, bestDist2;
float dx, dy, dz, dw;
float diff, diff2, diffDist2;
int tidx, width, currRow, currCol, qidx;
float * queryVals;
// Compute Thread index
tidx = (threadIdx.y*blockDim.x) + threadIdx.x;
// Compute Query Index
width = gridDim.x * blockDim.x;
currRow = (blockIdx.y * blockDim.y) + threadIdx.y;
currCol = (blockIdx.x * blockDim.x) + threadIdx.x;
qidx = (currRow * width) + currCol;
// Load current Query Point into local (fast) memory
// Slow read from RAM into shared memory
queryPoints[tidx] = kdTree[qidx];
queryVals = (float *)&(queryPoints[tidx]);
// Compute number of elements (in grid)
int height = gridDim.y * blockDim.y;
int nElems = height * width;
// Search Stack Variables
stackTop = 0;
// 'k' NN Heap variables
maxHeap = k; // Maximum # elements on knnHeap
countHeap = 0; // Current # elements on knnHeap
dist2Heap = 0.0f; // Max Dist of any element on heap
bestDist2 = 3.0e38f;
// Put root search info on stack
searchStack[stackTop][tidx].nodeFlags = FLAGS_ROOT_START;
searchStack[stackTop][tidx].splitVal = 3.0e+38F;
stackTop++;
while (stackTop != 0)
{
// Statistics
//best.cNodes++;
// Get Current Node from top of stack
stackTop--;
// Get Node Info
currIdx = (searchStack[stackTop][tidx].nodeFlags & NODE_INDEX_MASK);
currAxis = (searchStack[stackTop][tidx].nodeFlags & SPLIT_AXIS_MASK) >> SPLIT_AXIS_SHIFT;
currInOut = (searchStack[stackTop][tidx].nodeFlags & ON_OFF_MASK) >> ON_OFF_SHIFT;
// Get left and right child indices from binary array layout
leftIdx = currIdx << 1u;
rightIdx = leftIdx + 1u;
nextAxis = ((currAxis == 3u) ? 0u : currAxis+1u);
prevAxis = ((currAxis == 0u) ? 3u : currAxis-1u);
// Early Exit Check
if (currInOut == 1) // KD_OUT
{
if (countHeap == maxHeap) // Is heap full yet ?!?
{
queryValue = queryVals[prevAxis];
splitValue = searchStack[stackTop][tidx].splitVal; // Split Value of Parent Node
diff = splitValue - queryValue;
diff2 = diff*diff;
if (diff2 >= dist2Heap)
{
// We can do an early exit for this node
continue;
}
}
}
// WARNING - It's Much faster to load this node from global memory after the "Early Exit check" !!!
// Load current node
// Slow read from RAM into shared memory
currNodes[tidx] = kdTree[currIdx];
// Get Best Fit Dist for checking child ranges
queryValue = queryVals[currAxis];
splitValue = currNodes[tidx].pos[currAxis];
diff = splitValue - queryValue;
diff2 = diff*diff;
// Calc Dist from Median Node to queryLocation
dx = currNodes[tidx].pos[0] - queryVals[0];
dy = currNodes[tidx].pos[1] - queryVals[1];
dz = currNodes[tidx].pos[2] - queryVals[2];
dw = currNodes[tidx].pos[3] - queryVals[3];
diffDist2 = (dx*dx) + (dy*dy) + (dz*dz) + (dw*dw);
// See if we should add this point to the 'k' NN Heap
if (diffDist2 <= 0.0f)
{
// Do nothing, The query point found itself in the kd-tree
// We don't want to add ourselves as a NN.
}
else if (countHeap < maxHeap)
{
//-------------------------------
// < 'k' elements on heap
// Do Simple Array append
//-------------------------------
countHeap++;
knnHeap[countHeap][tidx].Id = currIdx;
knnHeap[countHeap][tidx].Dist = diffDist2;
// Do we need to convert the array into a max distance heap ?!?
if (countHeap == maxHeap)
{
// Yes, turn array into a heap, takes O(k) time
for (unsigned int z = countHeap/2; z >= 1; z--)
{
//
// Demote each element in turn (to correct position in heap)
//
unsigned int parentHIdx = z; // Start at specified element
unsigned int childHIdx = z << 1; // left child of parent
// Compare Parent to it's children
while (childHIdx <= maxHeap)
{
// Update Distances
float parentD2 = knnHeap[parentHIdx][tidx].Dist;
float childD2 = knnHeap[childHIdx][tidx].Dist;
// Find largest child
if (childHIdx < maxHeap)
{
float rightD2 = knnHeap[childHIdx+1][tidx].Dist;
if (childD2 < rightD2)
{
// Use right child
childHIdx++;
childD2 = rightD2;
}
}
// Compare largest child to parent
if (parentD2 >= childD2)
{
// Parent is larger than both children, exit loop
break;
}
// Demote parent by swapping with it's largest child
GPU_NN_Result closeTemp = knnHeap[parentHIdx][tidx];
knnHeap[parentHIdx][tidx] = knnHeap[childHIdx][tidx];
knnHeap[childHIdx][tidx] = closeTemp;
// Update indices
parentHIdx = childHIdx;
childHIdx = parentHIdx<<1; // left child of parent
}
}
// Update trim distances
dist2Heap = knnHeap[1][tidx].Dist;
bestDist2 = dist2Heap;
}
}
else if (diffDist2 < dist2Heap)
{
//-------------------------------
// >= k elements on heap
// Do Heap Replacement
//-------------------------------
// Replace Root Element with new element
knnHeap[1][tidx].Id = currIdx;
knnHeap[1][tidx].Dist = diffDist2;
//
// Demote new element (to correct position in heap)
//
unsigned int parentHIdx = 1; // Start at Root
unsigned int childHIdx = 2; // left child of parent
// Compare current index to it's children
while (childHIdx <= maxHeap)
{
// Update Distances
float parentD2 = knnHeap[parentHIdx][tidx].Dist;
float childD2 = knnHeap[childHIdx][tidx].Dist;
// Find largest child
if (childHIdx < maxHeap)
{
float rightD2 = knnHeap[childHIdx+1][tidx].Dist;
if (childD2 < rightD2)
{
// Use right child
childHIdx++;
childD2 = rightD2;
}
}
// Compare largest child to parent
if (parentD2 >= childD2)
{
// Parent node is larger than both children, exit
break;
}
// Demote parent by swapping with it's largest child
GPU_NN_Result closeTemp = knnHeap[parentHIdx][tidx];
knnHeap[parentHIdx][tidx] = knnHeap[childHIdx][tidx];
knnHeap[childHIdx][tidx] = closeTemp;
// Update indices
parentHIdx = childHIdx;
childHIdx = parentHIdx<<1; // left child of parent
}
// Update Trim distances
dist2Heap = knnHeap[1][tidx].Dist;
bestDist2 = dist2Heap;
}
// update bestDist2
if (queryValue <= splitValue)
{
// [...QL...BD]...SV -> Include Left range only
// or
// [...QL...SV...BD] -> Include Both Left and Right Sub Ranges
// Check if we should add Right Sub-range to stack
if (diff2 < bestDist2)
{
if (rightIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (rightIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK)
| OFFSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
// Always Add Left Sub-range to search path
if (leftIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (leftIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK);
// | ONSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
else
{
// SV...[BD...QL...] -> Include Right sub range only
// or
// [BD...SV...QL...] -> Include Both Left and Right Sub Ranges
// Check if we should add left sub-range to search path
if (diff2 < bestDist2)
{
// Add to search stack
if (leftIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (leftIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK)
| OFFSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
// Always Add Right Sub-range
if (rightIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (rightIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK);
// | ONSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
}
/*-----------------------
Output Results
-----------------------*/
unsigned int i, offset, outIdx;
// Remap query node idx to query point idx
// Slow read from RAM memory
outIdx = ids[qidx];
// We now have a heap of the 'k' nearest neighbors
// Write them to the results array
// Assume answers should be stored along z axis of 3 dimensional cube
// We now have a heap of the 'k' nearest neighbors
// Write heap elements to the results array row by row
for (i = 1; i <= countHeap; i++)
{
offset = (i-1) * nElems;
// REMAP: Convert Nearest Neighbor Info to final format
// Slow read from RAM memory
knnHeap[i][tidx].Id = ids[knnHeap[i][tidx].Id]; // Really need ID's not indexs
knnHeap[i][tidx].Dist = sqrtf( knnHeap[i][tidx].Dist ); // Get True distance (not distance squared)
// Store Result
// Slow write to RAM memory
qrs[outIdx+offset] = knnHeap[i][tidx];
}
}
/*---------------------------------------------------------
Name: GPU_ALL_KNN_6D_LBT
Desc: Finds the 'k' Nearest Neighbors in
a search set 'S' for each query point in set 'Q'
Notes:
1. The search set S and query set Q are the same
for the All-KNN search.
2. We need to exclude zero distance results
Otherwise, each point will return itself as
its own nearest neighbor
3. The search set S is represented by a
static balanced cyclical KDTree
with one search point stored per kd-tree node
---------------------------------------------------------*/
__global__ void
GPU_ALL_KNN_6D_LBT
(
GPU_NN_Result * qrs, // OUT: Results of KD Nearest Neighbor Algorithm
GPUNode_6D_LBT * kdTree, // IN: KD Tree (Nodes)
unsigned int * ids, // IN: IDs (from Indexs)
unsigned int cNodes, // IN: 'n' number of nodes in tree
unsigned int k // IN: number of nearest neighbors to find
)
{
// Per Thread Local Parameters (shared memory)
__shared__ GPUNode_6D_LBT currNodes[ALL_KNN_THREADS_PER_BLOCK]; // Current kd-tree node
__shared__ GPU_Search searchStack[ALL_KNN_STACK_SIZE][ALL_KNN_THREADS_PER_BLOCK]; // Search Stack
__shared__ GPU_NN_Result knnHeap[KD_KNN_SIZE][ALL_KNN_THREADS_PER_BLOCK]; // 'k' NN Closest Heap
__shared__ GPUNode_6D_LBT queryPoints[ALL_KNN_THREADS_PER_BLOCK]; // Query Point
// Per Thread Local Parameters (registers)
unsigned int currIdx, currInOut;
unsigned int leftIdx, rightIdx;
unsigned int currAxis, nextAxis, prevAxis;
unsigned int stackTop, maxHeap, countHeap;
float queryValue, splitValue;
float dist2Heap, bestDist2;
float dx, dy, dz, dw, ds, dt;
float diff, diff2, diffDist2;
int tidx, width, currRow, currCol, qidx;
float * queryVals;
// Compute Thread index
tidx = (threadIdx.y*blockDim.x) + threadIdx.x;
// Compute Query Index
width = gridDim.x * blockDim.x;
currRow = (blockIdx.y * blockDim.y) + threadIdx.y;
currCol = (blockIdx.x * blockDim.x) + threadIdx.x;
qidx = (currRow * width) + currCol;
// Load current Query Point into local (fast) memory
// Slow read from RAM into shared memory
queryPoints[tidx] = kdTree[qidx];
queryVals = (float *)&(queryPoints[tidx]);
// Compute number of elements (in grid)
int height = gridDim.y * blockDim.y;
int nElems = height * width;
// Search Stack Variables
stackTop = 0;
// 'k' NN Heap variables
maxHeap = k; // Maximum # elements on knnHeap
countHeap = 0; // Current # elements on knnHeap
dist2Heap = 0.0f; // Max Dist of any element on heap
bestDist2 = 3.0e38f;
// Put root search info on stack
searchStack[stackTop][tidx].nodeFlags = FLAGS_ROOT_START;
searchStack[stackTop][tidx].splitVal = 3.0e+38F;
stackTop++;
while (stackTop != 0)
{
// Statistics
//best.cNodes++;
// Get Current Node from top of stack
stackTop--;
// Get Node Info
currIdx = (searchStack[stackTop][tidx].nodeFlags & NODE_INDEX_MASK);
currAxis = (searchStack[stackTop][tidx].nodeFlags & SPLIT_AXIS_MASK) >> SPLIT_AXIS_SHIFT;
currInOut = (searchStack[stackTop][tidx].nodeFlags & ON_OFF_MASK) >> ON_OFF_SHIFT;
// Get left and right child indices from binary array layout
leftIdx = currIdx << 1u;
rightIdx = leftIdx + 1u;
nextAxis = ((currAxis == 5u) ? 0u : currAxis+1u);
prevAxis = ((currAxis == 0u) ? 5u : currAxis-1u);
// Early Exit Check
if (currInOut == 1) // KD_OUT
{
if (countHeap == maxHeap) // Is heap full yet ?!?
{
queryValue = queryVals[prevAxis];
splitValue = searchStack[stackTop][tidx].splitVal; // Split Value of Parent Node
diff = splitValue - queryValue;
diff2 = diff*diff;
if (diff2 >= dist2Heap)
{
// We can do an early exit for this node
continue;
}
}
}
// WARNING - It's Much faster to load this node from global memory after the "Early Exit check" !!!
// Load current node
// Slow read from RAM into shared memory
currNodes[tidx] = kdTree[currIdx];
// Get Best Fit Dist for checking child ranges
queryValue = queryVals[currAxis];
splitValue = currNodes[tidx].pos[currAxis];
diff = splitValue - queryValue;
diff2 = diff*diff;
// Calc Dist from Median Node to queryLocation
dx = currNodes[tidx].pos[0] - queryVals[0];
dy = currNodes[tidx].pos[1] - queryVals[1];
dz = currNodes[tidx].pos[2] - queryVals[2];
dw = currNodes[tidx].pos[3] - queryVals[3];
ds = currNodes[tidx].pos[4] - queryVals[4];
dt = currNodes[tidx].pos[5] - queryVals[5];
diffDist2 = (dx*dx) + (dy*dy) + (dz*dz) + (dw*dw) + (ds*ds) + (dt*dt);
// See if we should add this point to the 'k' NN Heap
if (diffDist2 <= 0.0f)
{
// Do nothing, The query point found itself in the kd-tree
// We don't want to add ourselves as a NN.
}
else if (countHeap < maxHeap)
{
//-------------------------------
// < 'k' elements on heap
// Do Simple Array append
//-------------------------------
countHeap++;
knnHeap[countHeap][tidx].Id = currIdx;
knnHeap[countHeap][tidx].Dist = diffDist2;
// Do we need to convert the array into a max distance heap ?!?
if (countHeap == maxHeap)
{
// Yes, turn array into a heap, takes O(k) time
for (unsigned int z = countHeap/2; z >= 1; z--)
{
//
// Demote each element in turn (to correct position in heap)
//
unsigned int parentHIdx = z; // Start at specified element
unsigned int childHIdx = z << 1; // left child of parent
// Compare Parent to it's children
while (childHIdx <= maxHeap)
{
// Update Distances
float parentD2 = knnHeap[parentHIdx][tidx].Dist;
float childD2 = knnHeap[childHIdx][tidx].Dist;
// Find largest child
if (childHIdx < maxHeap)
{
float rightD2 = knnHeap[childHIdx+1][tidx].Dist;
if (childD2 < rightD2)
{
// Use right child
childHIdx++;
childD2 = rightD2;
}
}
// Compare largest child to parent
if (parentD2 >= childD2)
{
// Parent is larger than both children, exit loop
break;
}
// Demote parent by swapping with it's largest child
GPU_NN_Result closeTemp = knnHeap[parentHIdx][tidx];
knnHeap[parentHIdx][tidx] = knnHeap[childHIdx][tidx];
knnHeap[childHIdx][tidx] = closeTemp;
// Update indices
parentHIdx = childHIdx;
childHIdx = parentHIdx<<1; // left child of parent
}
}
// Update trim distances
dist2Heap = knnHeap[1][tidx].Dist;
bestDist2 = dist2Heap;
}
}
else if (diffDist2 < dist2Heap)
{
//-------------------------------
// >= k elements on heap
// Do Heap Replacement
//-------------------------------
// Replace Root Element with new element
knnHeap[1][tidx].Id = currIdx;
knnHeap[1][tidx].Dist = diffDist2;
//
// Demote new element (to correct position in heap)
//
unsigned int parentHIdx = 1; // Start at Root
unsigned int childHIdx = 2; // left child of parent
// Compare current index to it's children
while (childHIdx <= maxHeap)
{
// Update Distances
float parentD2 = knnHeap[parentHIdx][tidx].Dist;
float childD2 = knnHeap[childHIdx][tidx].Dist;
// Find largest child
if (childHIdx < maxHeap)
{
float rightD2 = knnHeap[childHIdx+1][tidx].Dist;
if (childD2 < rightD2)
{
// Use right child
childHIdx++;
childD2 = rightD2;
}
}
// Compare largest child to parent
if (parentD2 >= childD2)
{
// Parent node is larger than both children, exit
break;
}
// Demote parent by swapping with it's largest child
GPU_NN_Result closeTemp = knnHeap[parentHIdx][tidx];
knnHeap[parentHIdx][tidx] = knnHeap[childHIdx][tidx];
knnHeap[childHIdx][tidx] = closeTemp;
// Update indices
parentHIdx = childHIdx;
childHIdx = parentHIdx<<1; // left child of parent
}
// Update Trim distances
dist2Heap = knnHeap[1][tidx].Dist;
bestDist2 = dist2Heap;
}
// update bestDist2
if (queryValue <= splitValue)
{
// [...QL...BD]...SV -> Include Left range only
// or
// [...QL...SV...BD] -> Include Both Left and Right Sub Ranges
// Check if we should add Right Sub-range to stack
if (diff2 < bestDist2)
{
if (rightIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (rightIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK)
| OFFSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
// Always Add Left Sub-range to search path
if (leftIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (leftIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK);
// | ONSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
else
{
// SV...[BD...QL...] -> Include Right sub range only
// or
// [BD...SV...QL...] -> Include Both Left and Right Sub Ranges
// Check if we should add left sub-range to search path
if (diff2 < bestDist2)
{
// Add to search stack
if (leftIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (leftIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK)
| OFFSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
// Always Add Right Sub-range
if (rightIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (rightIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK);
// | ONSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
}
/*-----------------------
Output Results
-----------------------*/
unsigned int i, offset, outIdx;
// Remap query node idx to query point idx
// Slow read from RAM memory
outIdx = ids[qidx];
// We now have a heap of the 'k' nearest neighbors
// Write them to the results array
// Assume answers should be stored along z axis of 3 dimensional cube
// We now have a heap of the 'k' nearest neighbors
// Write heap elements to the results array row by row
for (i = 1; i <= countHeap; i++)
{
offset = (i-1) * nElems;
// REMAP: Convert Nearest Neighbor Info to final format
// Slow read from RAM memory
knnHeap[i][tidx].Id = ids[knnHeap[i][tidx].Id]; // Really need ID's not indexs
knnHeap[i][tidx].Dist = sqrtf( knnHeap[i][tidx].Dist ); // Get True distance (not distance squared)
// Store Result
// Slow write to RAM memory
qrs[outIdx+offset] = knnHeap[i][tidx];
}
}
#endif // _GPU_ALL_KNN_LBT_H_
| 3d78478db28f6782f61225e4dc8969585485d550.cu | /*-----------------------------------------------------------------------------
Name: GPU_ALL_KNN_LBT.cu
Desc: This file contains the ALL-KNN kd-tree GPU kernel
in left-balanced array order
Log: Created by Shawn D. Brown (2/01/10)
-----------------------------------------------------------------------------*/
#ifndef _GPU_ALL_KNN_LBT_H_
#define _GPU_ALL_KNN_LBT_H_
/*---------------------------------------------------------
Includes
---------------------------------------------------------*/
#include <stdio.h>
//#include <float.h>
#include "GPUTree_API.h"
/*---------------------------------------------------------
Function Definitions
---------------------------------------------------------*/
/*---------------------------------------------------------
Name: GPU_ALL_KNN_2D_LBT
Desc: Finds the 'k' Nearest Neighbors in
a search set 'S' for each query point in set 'Q'
Notes:
1. The search set S and query set Q are the same
for the All-KNN search.
2. We need to exclude zero distance results
Otherwise, each point will return itself as
its own nearest neighbor
3. The search set S is represented by a
static balanced cyclical KDTree
with one search point stored per kd-tree node
---------------------------------------------------------*/
__global__ void
GPU_ALL_KNN_2D_LBT
(
GPU_NN_Result * qrs, // OUT: Results of KD Nearest Neighbor Algorithm
GPUNode_2D_LBT * kdTree, // IN: KD Tree (Nodes)
unsigned int * ids, // IN: IDs (from Indexs)
unsigned int cNodes, // IN: 'n' number of nodes in tree
unsigned int k // IN: number of nearest neighbors to find
)
{
// Per Thread Local Parameters (shared memory)
__shared__ GPUNode_2D_LBT currNodes[ALL_KNN_THREADS_PER_BLOCK]; // Current kd-tree node
__shared__ GPU_Search searchStack[ALL_KNN_STACK_SIZE][ALL_KNN_THREADS_PER_BLOCK]; // Search Stack
__shared__ GPU_NN_Result knnHeap[KD_KNN_SIZE][ALL_KNN_THREADS_PER_BLOCK]; // 'k' NN Closest Heap
__shared__ GPUNode_2D_LBT queryPoints[ALL_KNN_THREADS_PER_BLOCK]; // Query Point
// Per Thread Local Parameters (registers)
unsigned int currIdx, currInOut;
unsigned int leftIdx, rightIdx;
unsigned int prevAxis, currAxis, nextAxis;
unsigned int stackTop, maxHeap, countHeap;
float queryValue, splitValue;
float dist2Heap, bestDist2;
float diff, diff2, diffDist2;
float dx, dy;
float * queryVals;
int tidx, width, currRow, currCol, qidx;
// Compute Thread index
tidx = (threadIdx.y*blockDim.x) + threadIdx.x;
// Compute Query Index
width = gridDim.x * blockDim.x;
currRow = (blockIdx.y * blockDim.y) + threadIdx.y;
currCol = (blockIdx.x * blockDim.x) + threadIdx.x;
qidx = (currRow * width) + currCol;
// Load current Query Point into local (fast) memory
// Slow read from RAM into shared memory
queryPoints[tidx] = kdTree[qidx];
queryVals = (float *)&(queryPoints[tidx]);
// Compute number of elements (in grid)
int height = gridDim.y * blockDim.y;
int nElems = height * width;
// Search Stack Variables
stackTop = 0;
// 'k' NN Heap variables
maxHeap = k; // Maximum # elements on knnHeap
countHeap = 0; // Current # elements on knnHeap
dist2Heap = 0.0f; // Max Dist of any element on heap
bestDist2 = 3.0e38f;
// Put root search info on stack
searchStack[stackTop][tidx].nodeFlags = FLAGS_ROOT_START;
searchStack[stackTop][tidx].splitVal = 3.0e+38F;
stackTop++;
while (stackTop != 0)
{
// Statistics
//best.cNodes++;
// Get Current Node from top of stack
stackTop--;
// Get Node Info (decompress 3 fields from 1)
currIdx = (searchStack[stackTop][tidx].nodeFlags & NODE_INDEX_MASK);
currAxis = (searchStack[stackTop][tidx].nodeFlags & SPLIT_AXIS_MASK) >> SPLIT_AXIS_SHIFT;
currInOut = (searchStack[stackTop][tidx].nodeFlags & ON_OFF_MASK) >> ON_OFF_SHIFT;
// Get left and right child indices from binary array layout
leftIdx = currIdx << 1;
rightIdx = leftIdx + 1;
nextAxis = ((currAxis == 1u) ? 0u : 1u);
prevAxis = ((currAxis == 0u) ? 1u : 0u);
// Early Exit Check
if (currInOut == 1) // KD_OUT
{
if (countHeap == maxHeap) // Is heap full yet ?!?
{
queryValue = queryVals[prevAxis];
splitValue = searchStack[stackTop][tidx].splitVal; // Split Value of Parent Node
diff = splitValue - queryValue;
diff2 = diff*diff;
if (diff2 >= dist2Heap)
{
// We can do an early exit for this node
continue;
}
}
}
// WARNING - It's Much faster to load this node from global memory after the "Early Exit check" !!!
// Load current node
// Slow read from RAM into shared memory
currNodes[tidx] = kdTree[currIdx];
// Get Best Fit Dist for checking child ranges
queryValue = queryVals[currAxis];
splitValue = currNodes[tidx].pos[currAxis];
diff = splitValue - queryValue;
diff2 = diff*diff;
// Calc Dist from Median Node to queryLocation
dx = currNodes[tidx].pos[0] - queryPoints[tidx].pos[0];
dy = currNodes[tidx].pos[1] - queryPoints[tidx].pos[1];
diffDist2 = (dx*dx) + (dy*dy);
// See if we should add this point to the 'k' NN Heap
if (diffDist2 <= 0.0f)
{
// Do nothing, The query point found itself in the kd-tree
// We don't want to add ourselves as a NN.
}
else if (countHeap < maxHeap)
{
//-------------------------------
// < 'k' elements on heap
// Do Simple Array append
//-------------------------------
countHeap++;
knnHeap[countHeap][tidx].Id = currIdx;
knnHeap[countHeap][tidx].Dist = diffDist2;
// Do we need to convert the array into a max distance heap ?!?
if (countHeap == maxHeap)
{
// Yes, turn array into a heap, takes O(k) time
for (unsigned int z = countHeap/2; z >= 1; z--)
{
//
// Demote each element in turn (to correct position in heap)
//
unsigned int parentHIdx = z; // Start at specified element
unsigned int childHIdx = z << 1; // left child of parent
// Compare Parent to it's children
while (childHIdx <= maxHeap)
{
// Update Distances
float parentD2 = knnHeap[parentHIdx][tidx].Dist;
float childD2 = knnHeap[childHIdx][tidx].Dist;
// Find largest child
if (childHIdx < maxHeap)
{
float rightD2 = knnHeap[childHIdx+1][tidx].Dist;
if (childD2 < rightD2)
{
// Use right child
childHIdx++;
childD2 = rightD2;
}
}
// Compare largest child to parent
if (parentD2 >= childD2)
{
// Parent is larger than both children, exit loop
break;
}
// Demote parent by swapping with it's largest child
GPU_NN_Result closeTemp = knnHeap[parentHIdx][tidx];
knnHeap[parentHIdx][tidx] = knnHeap[childHIdx][tidx];
knnHeap[childHIdx][tidx] = closeTemp;
// Update indices
parentHIdx = childHIdx;
childHIdx = parentHIdx<<1; // left child of parent
}
}
// Update trim distances
dist2Heap = knnHeap[1][tidx].Dist;
bestDist2 = dist2Heap;
}
}
else if (diffDist2 < dist2Heap)
{
//-------------------------------
// >= k elements on heap
// Do Heap Replacement
//-------------------------------
// Replace Root Element with new element
knnHeap[1][tidx].Id = currIdx;
knnHeap[1][tidx].Dist = diffDist2;
//
// Demote new element (to correct position in heap)
//
unsigned int parentHIdx = 1; // Start at Root
unsigned int childHIdx = 2; // left child of parent
// Compare current index to it's children
while (childHIdx <= maxHeap)
{
// Update Distances
float parentD2 = knnHeap[parentHIdx][tidx].Dist;
float childD2 = knnHeap[childHIdx][tidx].Dist;
// Find largest child
if (childHIdx < maxHeap)
{
float rightD2 = knnHeap[childHIdx+1][tidx].Dist;
if (childD2 < rightD2)
{
// Use right child
childHIdx++;
childD2 = rightD2;
}
}
// Compare largest child to parent
if (parentD2 >= childD2)
{
// Parent node is larger than both children, exit
break;
}
// Demote parent by swapping with it's largest child
GPU_NN_Result closeTemp = knnHeap[parentHIdx][tidx];
knnHeap[parentHIdx][tidx] = knnHeap[childHIdx][tidx];
knnHeap[childHIdx][tidx] = closeTemp;
// Update indices
parentHIdx = childHIdx;
childHIdx = parentHIdx<<1; // left child of parent
}
// Update Trim distances
dist2Heap = knnHeap[1][tidx].Dist;
bestDist2 = dist2Heap;
}
// update bestDist2
if (queryValue <= splitValue)
{
// [...QL...BD]...SV -> Include Left range only
// or
// [...QL...SV...BD] -> Include Both Left and Right Sub Ranges
// Check if we should add Right Sub-range to stack
if (diff2 < bestDist2)
{
if (rightIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (rightIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK)
| OFFSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
// Always Add Left Sub-range to search path
if (leftIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (leftIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK);
// | ONSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
else
{
// SV...[BD...QL...] -> Include Right sub range only
// or
// [BD...SV...QL...] -> Include Both Left and Right Sub Ranges
// Check if we should add left sub-range to search path
if (diff2 < bestDist2)
{
// Add to search stack
if (leftIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (leftIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK)
| OFFSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
// Always Add Right Sub-range
if (rightIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (rightIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK);
//| ONSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
}
/*-----------------------
Output Results
-----------------------*/
unsigned int i, offset, outIdx;
// Remap query node idx to query point idx
// Slow read from RAM memory
outIdx = ids[qidx];
// We now have a heap of the 'k' nearest neighbors
// Write heap elements to the results array row by row
for (i = 1; i <= countHeap; i++)
{
offset = (i-1) * nElems;
// REMAP: Convert Nearest Neighbor Info to final format
// Slow read from RAM memory
knnHeap[i][tidx].Id = ids[knnHeap[i][tidx].Id]; // Really need ID's not indexs
knnHeap[i][tidx].Dist = sqrtf( knnHeap[i][tidx].Dist ); // Get True distance (not distance squared)
// Store Result
// Slow write to RAM memory
qrs[outIdx+offset] = knnHeap[i][tidx];
}
}
/*---------------------------------------------------------
Name: GPU_ALL_KNN_3D_LBT
Desc: Finds the 'k' Nearest Neighbors in
a search set 'S' for each query point in set 'Q'
Notes:
1. The search set S and query set Q are the same
for the All-KNN search.
2. We need to exclude zero distance results
Otherwise, each point will return itself as
its own nearest neighbor
3. The search set S is represented by a
static balanced cyclical KDTree
with one search point stored per kd-tree node
---------------------------------------------------------*/
__global__ void
GPU_ALL_KNN_3D_LBT
(
GPU_NN_Result * qrs, // OUT: Results of KD Nearest Neighbor Algorithm
GPUNode_3D_LBT * kdTree, // IN: KD Tree (Nodes)
unsigned int * ids, // IN: IDs (from Indexs)
unsigned int cNodes, // IN: 'n' number of nodes in tree
unsigned int k // IN: number of nearest neighbors to find
)
{
// Per Thread Local Parameters (shared memory)
__shared__ GPUNode_3D_LBT currNodes[ALL_KNN_THREADS_PER_BLOCK]; // Current kd-tree node
__shared__ GPU_Search searchStack[ALL_KNN_STACK_SIZE][ALL_KNN_THREADS_PER_BLOCK]; // Search Stack
__shared__ GPU_NN_Result knnHeap[KD_KNN_SIZE][ALL_KNN_THREADS_PER_BLOCK]; // 'k' NN Closest Heap
__shared__ GPUNode_3D_LBT queryPoints[ALL_KNN_THREADS_PER_BLOCK]; // Query Point
// Per Thread Local Parameters (registers)
unsigned int currIdx, currInOut;
unsigned int leftIdx, rightIdx;
unsigned int currAxis, nextAxis, prevAxis;
unsigned int stackTop, maxHeap, countHeap;
float dx, dy, dz;
float diff, diff2, diffDist2;
float queryValue, splitValue;
float dist2Heap, bestDist2;
float * queryVals;
int tidx, width, currRow, currCol, qidx;
// Compute Thread index
tidx = (threadIdx.y*blockDim.x) + threadIdx.x;
// Compute Query Index
width = gridDim.x * blockDim.x;
currRow = (blockIdx.y * blockDim.y) + threadIdx.y;
currCol = (blockIdx.x * blockDim.x) + threadIdx.x;
qidx = (currRow * width) + currCol;
// Load current Query Point into local (fast) memory
// Slow read from RAM into shared memory
queryPoints[tidx] = kdTree[qidx];
queryVals = (float *)&(queryPoints[tidx]);
// Compute number of elements (in grid)
int height = gridDim.y * blockDim.y;
int nElems = height * width;
// Search Stack Variables
stackTop = 0;
// 'k' NN Heap variables
maxHeap = k; // Maximum # elements on knnHeap
countHeap = 0; // Current # elements on knnHeap
dist2Heap = 0.0f; // Max Dist of any element on heap
bestDist2 = 3.0e38f;
// Put root search info on stack
searchStack[stackTop][tidx].nodeFlags = FLAGS_ROOT_START;
searchStack[stackTop][tidx].splitVal = 3.0e+38F;
stackTop++;
while (stackTop != 0)
{
// Statistics
//best.cNodes++;
// Get Current Node from top of stack
stackTop--;
// Get Node Info
currIdx = (searchStack[stackTop][tidx].nodeFlags & NODE_INDEX_MASK);
currAxis = (searchStack[stackTop][tidx].nodeFlags & SPLIT_AXIS_MASK) >> SPLIT_AXIS_SHIFT;
currInOut = (searchStack[stackTop][tidx].nodeFlags & ON_OFF_MASK) >> ON_OFF_SHIFT;
// Get left and right child indices from binary array layout
leftIdx = currIdx << 1u;
rightIdx = leftIdx + 1u;
nextAxis = ((currAxis == 2u) ? 0u : currAxis+1u);
prevAxis = ((currAxis == 0u) ? 2u : currAxis-1u);
// Early Exit Check
if (currInOut == 1) // KD_OUT
{
if (countHeap == maxHeap) // Is heap full yet ?!?
{
queryValue = queryVals[prevAxis];
splitValue = searchStack[stackTop][tidx].splitVal; // Split Value of Parent Node
diff = splitValue - queryValue;
diff2 = diff*diff;
if (diff2 >= dist2Heap)
{
// We can do an early exit for this node
continue;
}
}
}
// WARNING - It's Much faster to load this node from global memory after the "Early Exit check" !!!
// Load current node
// Slow read from RAM into shared memory
currNodes[tidx] = kdTree[currIdx];
// Get Best Fit Dist for checking child ranges
queryValue = queryVals[currAxis];
splitValue = currNodes[tidx].pos[currAxis];
diff = splitValue - queryValue;
diff2 = diff*diff;
// Calc Dist from Median Node to queryLocation
dx = currNodes[tidx].pos[0] - queryVals[0];
dy = currNodes[tidx].pos[1] - queryVals[1];
dz = currNodes[tidx].pos[2] - queryVals[2];
diffDist2 = (dx*dx) + (dy*dy) + (dz*dz);
// See if we should add this point to the 'k' NN Heap
if (diffDist2 <= 0.0f)
{
// Do nothing, The query point found itself in the kd-tree
// We don't want to add ourselves as a NN.
}
else if (countHeap < maxHeap)
{
//-------------------------------
// < 'k' elements on heap
// Do Simple Array append
//-------------------------------
countHeap++;
knnHeap[countHeap][tidx].Id = currIdx;
knnHeap[countHeap][tidx].Dist = diffDist2;
// Do we need to convert the array into a max distance heap ?!?
if (countHeap == maxHeap)
{
// Yes, turn array into a heap, takes O(k) time
for (unsigned int z = countHeap/2; z >= 1; z--)
{
//
// Demote each element in turn (to correct position in heap)
//
unsigned int parentHIdx = z; // Start at specified element
unsigned int childHIdx = z << 1; // left child of parent
// Compare Parent to it's children
while (childHIdx <= maxHeap)
{
// Update Distances
float parentD2 = knnHeap[parentHIdx][tidx].Dist;
float childD2 = knnHeap[childHIdx][tidx].Dist;
// Find largest child
if (childHIdx < maxHeap)
{
float rightD2 = knnHeap[childHIdx+1][tidx].Dist;
if (childD2 < rightD2)
{
// Use right child
childHIdx++;
childD2 = rightD2;
}
}
// Compare largest child to parent
if (parentD2 >= childD2)
{
// Parent is larger than both children, exit loop
break;
}
// Demote parent by swapping with it's largest child
GPU_NN_Result closeTemp = knnHeap[parentHIdx][tidx];
knnHeap[parentHIdx][tidx] = knnHeap[childHIdx][tidx];
knnHeap[childHIdx][tidx] = closeTemp;
// Update indices
parentHIdx = childHIdx;
childHIdx = parentHIdx<<1; // left child of parent
}
}
// Update trim distances
dist2Heap = knnHeap[1][tidx].Dist;
bestDist2 = dist2Heap;
}
}
else if (diffDist2 < dist2Heap)
{
//-------------------------------
// >= k elements on heap
// Do Heap Replacement
//-------------------------------
// Replace Root Element with new element
knnHeap[1][tidx].Id = currIdx;
knnHeap[1][tidx].Dist = diffDist2;
//
// Demote new element (to correct position in heap)
//
unsigned int parentHIdx = 1; // Start at Root
unsigned int childHIdx = 2; // left child of parent
// Compare current index to it's children
while (childHIdx <= maxHeap)
{
// Update Distances
float parentD2 = knnHeap[parentHIdx][tidx].Dist;
float childD2 = knnHeap[childHIdx][tidx].Dist;
// Find largest child
if (childHIdx < maxHeap)
{
float rightD2 = knnHeap[childHIdx+1][tidx].Dist;
if (childD2 < rightD2)
{
// Use right child
childHIdx++;
childD2 = rightD2;
}
}
// Compare largest child to parent
if (parentD2 >= childD2)
{
// Parent node is larger than both children, exit
break;
}
// Demote parent by swapping with it's largest child
GPU_NN_Result closeTemp = knnHeap[parentHIdx][tidx];
knnHeap[parentHIdx][tidx] = knnHeap[childHIdx][tidx];
knnHeap[childHIdx][tidx] = closeTemp;
// Update indices
parentHIdx = childHIdx;
childHIdx = parentHIdx<<1; // left child of parent
}
// Update Trim distances
dist2Heap = knnHeap[1][tidx].Dist;
bestDist2 = dist2Heap;
}
// update bestDist2
if (queryValue <= splitValue)
{
// [...QL...BD]...SV -> Include Left range only
// or
// [...QL...SV...BD] -> Include Both Left and Right Sub Ranges
// Check if we should add Right Sub-range to stack
if (diff2 < bestDist2)
{
if (rightIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (rightIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK)
| OFFSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
// Always Add Left Sub-range to search path
if (leftIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (leftIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK);
//| ONSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
else
{
// SV...[BD...QL...] -> Include Right sub range only
// or
// [BD...SV...QL...] -> Include Both Left and Right Sub Ranges
// Check if we should add left sub-range to search path
if (diff2 < bestDist2)
{
// Add to search stack
if (leftIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (leftIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK)
| OFFSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
// Always Add Right Sub-range
if (rightIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (rightIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK);
//| ONSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
}
/*-----------------------
Output Results
-----------------------*/
unsigned int i, offset, outIdx;
// Remap query node idx to query point idx
// Slow read from RAM memory
outIdx = ids[qidx];
// We now have a heap of the 'k' nearest neighbors
// Write heap elements to the results array row by row
for (i = 1; i <= countHeap; i++)
{
offset = (i-1) * nElems;
// REMAP: Convert Nearest Neighbor Info to final format
// Slow read from RAM memory
knnHeap[i][tidx].Id = ids[knnHeap[i][tidx].Id]; // Really need ID's not indexs
knnHeap[i][tidx].Dist = sqrtf( knnHeap[i][tidx].Dist ); // Get True distance (not distance squared)
// Store Result
// Slow write to RAM memory
qrs[outIdx+offset] = knnHeap[i][tidx];
}
}
/*---------------------------------------------------------
Name: GPU_ALL_KNN_4D_LBT
Desc: Finds the 'k' Nearest Neighbors in
a search set 'S' for each query point in set 'Q'
Notes:
1. The search set S and query set Q are the same
for the All-KNN search.
2. We need to exclude zero distance results
Otherwise, each point will return itself as
its own nearest neighbor
3. The search set S is represented by a
static balanced cyclical KDTree
with one search point stored per kd-tree node
---------------------------------------------------------*/
__global__ void
GPU_ALL_KNN_4D_LBT
(
GPU_NN_Result * qrs, // OUT: Results of KD Nearest Neighbor Algorithm
GPUNode_4D_LBT * kdTree, // IN: KD Tree (Nodes)
unsigned int * ids, // IN: IDs (from Indexs)
unsigned int cNodes, // IN: 'n' number of nodes in tree
unsigned int k // IN: number of nearest neighbors to find
)
{
// Per Thread Local Parameters (shared memory)
__shared__ GPUNode_4D_LBT currNodes[ALL_KNN_THREADS_PER_BLOCK]; // Current kd-tree node
__shared__ GPU_Search searchStack[ALL_KNN_STACK_SIZE][ALL_KNN_THREADS_PER_BLOCK]; // Search Stack
__shared__ GPU_NN_Result knnHeap[KD_KNN_SIZE][ALL_KNN_THREADS_PER_BLOCK]; // 'k' NN Closest Heap
__shared__ GPUNode_4D_LBT queryPoints[ALL_KNN_THREADS_PER_BLOCK]; // Query Point
// Per Thread Local Parameters (registers)
unsigned int currIdx, currInOut;
unsigned int leftIdx, rightIdx;
unsigned int currAxis, nextAxis, prevAxis;
unsigned int stackTop, maxHeap, countHeap;
float queryValue, splitValue;
float dist2Heap, bestDist2;
float dx, dy, dz, dw;
float diff, diff2, diffDist2;
int tidx, width, currRow, currCol, qidx;
float * queryVals;
// Compute Thread index
tidx = (threadIdx.y*blockDim.x) + threadIdx.x;
// Compute Query Index
width = gridDim.x * blockDim.x;
currRow = (blockIdx.y * blockDim.y) + threadIdx.y;
currCol = (blockIdx.x * blockDim.x) + threadIdx.x;
qidx = (currRow * width) + currCol;
// Load current Query Point into local (fast) memory
// Slow read from RAM into shared memory
queryPoints[tidx] = kdTree[qidx];
queryVals = (float *)&(queryPoints[tidx]);
// Compute number of elements (in grid)
int height = gridDim.y * blockDim.y;
int nElems = height * width;
// Search Stack Variables
stackTop = 0;
// 'k' NN Heap variables
maxHeap = k; // Maximum # elements on knnHeap
countHeap = 0; // Current # elements on knnHeap
dist2Heap = 0.0f; // Max Dist of any element on heap
bestDist2 = 3.0e38f;
// Put root search info on stack
searchStack[stackTop][tidx].nodeFlags = FLAGS_ROOT_START;
searchStack[stackTop][tidx].splitVal = 3.0e+38F;
stackTop++;
while (stackTop != 0)
{
// Statistics
//best.cNodes++;
// Get Current Node from top of stack
stackTop--;
// Get Node Info
currIdx = (searchStack[stackTop][tidx].nodeFlags & NODE_INDEX_MASK);
currAxis = (searchStack[stackTop][tidx].nodeFlags & SPLIT_AXIS_MASK) >> SPLIT_AXIS_SHIFT;
currInOut = (searchStack[stackTop][tidx].nodeFlags & ON_OFF_MASK) >> ON_OFF_SHIFT;
// Get left and right child indices from binary array layout
leftIdx = currIdx << 1u;
rightIdx = leftIdx + 1u;
nextAxis = ((currAxis == 3u) ? 0u : currAxis+1u);
prevAxis = ((currAxis == 0u) ? 3u : currAxis-1u);
// Early Exit Check
if (currInOut == 1) // KD_OUT
{
if (countHeap == maxHeap) // Is heap full yet ?!?
{
queryValue = queryVals[prevAxis];
splitValue = searchStack[stackTop][tidx].splitVal; // Split Value of Parent Node
diff = splitValue - queryValue;
diff2 = diff*diff;
if (diff2 >= dist2Heap)
{
// We can do an early exit for this node
continue;
}
}
}
// WARNING - It's Much faster to load this node from global memory after the "Early Exit check" !!!
// Load current node
// Slow read from RAM into shared memory
currNodes[tidx] = kdTree[currIdx];
// Get Best Fit Dist for checking child ranges
queryValue = queryVals[currAxis];
splitValue = currNodes[tidx].pos[currAxis];
diff = splitValue - queryValue;
diff2 = diff*diff;
// Calc Dist from Median Node to queryLocation
dx = currNodes[tidx].pos[0] - queryVals[0];
dy = currNodes[tidx].pos[1] - queryVals[1];
dz = currNodes[tidx].pos[2] - queryVals[2];
dw = currNodes[tidx].pos[3] - queryVals[3];
diffDist2 = (dx*dx) + (dy*dy) + (dz*dz) + (dw*dw);
// See if we should add this point to the 'k' NN Heap
if (diffDist2 <= 0.0f)
{
// Do nothing, The query point found itself in the kd-tree
// We don't want to add ourselves as a NN.
}
else if (countHeap < maxHeap)
{
//-------------------------------
// < 'k' elements on heap
// Do Simple Array append
//-------------------------------
countHeap++;
knnHeap[countHeap][tidx].Id = currIdx;
knnHeap[countHeap][tidx].Dist = diffDist2;
// Do we need to convert the array into a max distance heap ?!?
if (countHeap == maxHeap)
{
// Yes, turn array into a heap, takes O(k) time
for (unsigned int z = countHeap/2; z >= 1; z--)
{
//
// Demote each element in turn (to correct position in heap)
//
unsigned int parentHIdx = z; // Start at specified element
unsigned int childHIdx = z << 1; // left child of parent
// Compare Parent to it's children
while (childHIdx <= maxHeap)
{
// Update Distances
float parentD2 = knnHeap[parentHIdx][tidx].Dist;
float childD2 = knnHeap[childHIdx][tidx].Dist;
// Find largest child
if (childHIdx < maxHeap)
{
float rightD2 = knnHeap[childHIdx+1][tidx].Dist;
if (childD2 < rightD2)
{
// Use right child
childHIdx++;
childD2 = rightD2;
}
}
// Compare largest child to parent
if (parentD2 >= childD2)
{
// Parent is larger than both children, exit loop
break;
}
// Demote parent by swapping with it's largest child
GPU_NN_Result closeTemp = knnHeap[parentHIdx][tidx];
knnHeap[parentHIdx][tidx] = knnHeap[childHIdx][tidx];
knnHeap[childHIdx][tidx] = closeTemp;
// Update indices
parentHIdx = childHIdx;
childHIdx = parentHIdx<<1; // left child of parent
}
}
// Update trim distances
dist2Heap = knnHeap[1][tidx].Dist;
bestDist2 = dist2Heap;
}
}
else if (diffDist2 < dist2Heap)
{
//-------------------------------
// >= k elements on heap
// Do Heap Replacement
//-------------------------------
// Replace Root Element with new element
knnHeap[1][tidx].Id = currIdx;
knnHeap[1][tidx].Dist = diffDist2;
//
// Demote new element (to correct position in heap)
//
unsigned int parentHIdx = 1; // Start at Root
unsigned int childHIdx = 2; // left child of parent
// Compare current index to it's children
while (childHIdx <= maxHeap)
{
// Update Distances
float parentD2 = knnHeap[parentHIdx][tidx].Dist;
float childD2 = knnHeap[childHIdx][tidx].Dist;
// Find largest child
if (childHIdx < maxHeap)
{
float rightD2 = knnHeap[childHIdx+1][tidx].Dist;
if (childD2 < rightD2)
{
// Use right child
childHIdx++;
childD2 = rightD2;
}
}
// Compare largest child to parent
if (parentD2 >= childD2)
{
// Parent node is larger than both children, exit
break;
}
// Demote parent by swapping with it's largest child
GPU_NN_Result closeTemp = knnHeap[parentHIdx][tidx];
knnHeap[parentHIdx][tidx] = knnHeap[childHIdx][tidx];
knnHeap[childHIdx][tidx] = closeTemp;
// Update indices
parentHIdx = childHIdx;
childHIdx = parentHIdx<<1; // left child of parent
}
// Update Trim distances
dist2Heap = knnHeap[1][tidx].Dist;
bestDist2 = dist2Heap;
}
// update bestDist2
if (queryValue <= splitValue)
{
// [...QL...BD]...SV -> Include Left range only
// or
// [...QL...SV...BD] -> Include Both Left and Right Sub Ranges
// Check if we should add Right Sub-range to stack
if (diff2 < bestDist2)
{
if (rightIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (rightIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK)
| OFFSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
// Always Add Left Sub-range to search path
if (leftIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (leftIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK);
// | ONSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
else
{
// SV...[BD...QL...] -> Include Right sub range only
// or
// [BD...SV...QL...] -> Include Both Left and Right Sub Ranges
// Check if we should add left sub-range to search path
if (diff2 < bestDist2)
{
// Add to search stack
if (leftIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (leftIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK)
| OFFSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
// Always Add Right Sub-range
if (rightIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (rightIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK);
// | ONSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
}
/*-----------------------
Output Results
-----------------------*/
unsigned int i, offset, outIdx;
// Remap query node idx to query point idx
// Slow read from RAM memory
outIdx = ids[qidx];
// We now have a heap of the 'k' nearest neighbors
// Write them to the results array
// Assume answers should be stored along z axis of 3 dimensional cube
// We now have a heap of the 'k' nearest neighbors
// Write heap elements to the results array row by row
for (i = 1; i <= countHeap; i++)
{
offset = (i-1) * nElems;
// REMAP: Convert Nearest Neighbor Info to final format
// Slow read from RAM memory
knnHeap[i][tidx].Id = ids[knnHeap[i][tidx].Id]; // Really need ID's not indexs
knnHeap[i][tidx].Dist = sqrtf( knnHeap[i][tidx].Dist ); // Get True distance (not distance squared)
// Store Result
// Slow write to RAM memory
qrs[outIdx+offset] = knnHeap[i][tidx];
}
}
/*---------------------------------------------------------
Name: GPU_ALL_KNN_6D_LBT
Desc: Finds the 'k' Nearest Neighbors in
a search set 'S' for each query point in set 'Q'
Notes:
1. The search set S and query set Q are the same
for the All-KNN search.
2. We need to exclude zero distance results
Otherwise, each point will return itself as
its own nearest neighbor
3. The search set S is represented by a
static balanced cyclical KDTree
with one search point stored per kd-tree node
---------------------------------------------------------*/
__global__ void
GPU_ALL_KNN_6D_LBT
(
GPU_NN_Result * qrs, // OUT: Results of KD Nearest Neighbor Algorithm
GPUNode_6D_LBT * kdTree, // IN: KD Tree (Nodes)
unsigned int * ids, // IN: IDs (from Indexs)
unsigned int cNodes, // IN: 'n' number of nodes in tree
unsigned int k // IN: number of nearest neighbors to find
)
{
// Per Thread Local Parameters (shared memory)
__shared__ GPUNode_6D_LBT currNodes[ALL_KNN_THREADS_PER_BLOCK]; // Current kd-tree node
__shared__ GPU_Search searchStack[ALL_KNN_STACK_SIZE][ALL_KNN_THREADS_PER_BLOCK]; // Search Stack
__shared__ GPU_NN_Result knnHeap[KD_KNN_SIZE][ALL_KNN_THREADS_PER_BLOCK]; // 'k' NN Closest Heap
__shared__ GPUNode_6D_LBT queryPoints[ALL_KNN_THREADS_PER_BLOCK]; // Query Point
// Per Thread Local Parameters (registers)
unsigned int currIdx, currInOut;
unsigned int leftIdx, rightIdx;
unsigned int currAxis, nextAxis, prevAxis;
unsigned int stackTop, maxHeap, countHeap;
float queryValue, splitValue;
float dist2Heap, bestDist2;
float dx, dy, dz, dw, ds, dt;
float diff, diff2, diffDist2;
int tidx, width, currRow, currCol, qidx;
float * queryVals;
// Compute Thread index
tidx = (threadIdx.y*blockDim.x) + threadIdx.x;
// Compute Query Index
width = gridDim.x * blockDim.x;
currRow = (blockIdx.y * blockDim.y) + threadIdx.y;
currCol = (blockIdx.x * blockDim.x) + threadIdx.x;
qidx = (currRow * width) + currCol;
// Load current Query Point into local (fast) memory
// Slow read from RAM into shared memory
queryPoints[tidx] = kdTree[qidx];
queryVals = (float *)&(queryPoints[tidx]);
// Compute number of elements (in grid)
int height = gridDim.y * blockDim.y;
int nElems = height * width;
// Search Stack Variables
stackTop = 0;
// 'k' NN Heap variables
maxHeap = k; // Maximum # elements on knnHeap
countHeap = 0; // Current # elements on knnHeap
dist2Heap = 0.0f; // Max Dist of any element on heap
bestDist2 = 3.0e38f;
// Put root search info on stack
searchStack[stackTop][tidx].nodeFlags = FLAGS_ROOT_START;
searchStack[stackTop][tidx].splitVal = 3.0e+38F;
stackTop++;
while (stackTop != 0)
{
// Statistics
//best.cNodes++;
// Get Current Node from top of stack
stackTop--;
// Get Node Info
currIdx = (searchStack[stackTop][tidx].nodeFlags & NODE_INDEX_MASK);
currAxis = (searchStack[stackTop][tidx].nodeFlags & SPLIT_AXIS_MASK) >> SPLIT_AXIS_SHIFT;
currInOut = (searchStack[stackTop][tidx].nodeFlags & ON_OFF_MASK) >> ON_OFF_SHIFT;
// Get left and right child indices from binary array layout
leftIdx = currIdx << 1u;
rightIdx = leftIdx + 1u;
nextAxis = ((currAxis == 5u) ? 0u : currAxis+1u);
prevAxis = ((currAxis == 0u) ? 5u : currAxis-1u);
// Early Exit Check
if (currInOut == 1) // KD_OUT
{
if (countHeap == maxHeap) // Is heap full yet ?!?
{
queryValue = queryVals[prevAxis];
splitValue = searchStack[stackTop][tidx].splitVal; // Split Value of Parent Node
diff = splitValue - queryValue;
diff2 = diff*diff;
if (diff2 >= dist2Heap)
{
// We can do an early exit for this node
continue;
}
}
}
// WARNING - It's Much faster to load this node from global memory after the "Early Exit check" !!!
// Load current node
// Slow read from RAM into shared memory
currNodes[tidx] = kdTree[currIdx];
// Get Best Fit Dist for checking child ranges
queryValue = queryVals[currAxis];
splitValue = currNodes[tidx].pos[currAxis];
diff = splitValue - queryValue;
diff2 = diff*diff;
// Calc Dist from Median Node to queryLocation
dx = currNodes[tidx].pos[0] - queryVals[0];
dy = currNodes[tidx].pos[1] - queryVals[1];
dz = currNodes[tidx].pos[2] - queryVals[2];
dw = currNodes[tidx].pos[3] - queryVals[3];
ds = currNodes[tidx].pos[4] - queryVals[4];
dt = currNodes[tidx].pos[5] - queryVals[5];
diffDist2 = (dx*dx) + (dy*dy) + (dz*dz) + (dw*dw) + (ds*ds) + (dt*dt);
// See if we should add this point to the 'k' NN Heap
if (diffDist2 <= 0.0f)
{
// Do nothing, The query point found itself in the kd-tree
// We don't want to add ourselves as a NN.
}
else if (countHeap < maxHeap)
{
//-------------------------------
// < 'k' elements on heap
// Do Simple Array append
//-------------------------------
countHeap++;
knnHeap[countHeap][tidx].Id = currIdx;
knnHeap[countHeap][tidx].Dist = diffDist2;
// Do we need to convert the array into a max distance heap ?!?
if (countHeap == maxHeap)
{
// Yes, turn array into a heap, takes O(k) time
for (unsigned int z = countHeap/2; z >= 1; z--)
{
//
// Demote each element in turn (to correct position in heap)
//
unsigned int parentHIdx = z; // Start at specified element
unsigned int childHIdx = z << 1; // left child of parent
// Compare Parent to it's children
while (childHIdx <= maxHeap)
{
// Update Distances
float parentD2 = knnHeap[parentHIdx][tidx].Dist;
float childD2 = knnHeap[childHIdx][tidx].Dist;
// Find largest child
if (childHIdx < maxHeap)
{
float rightD2 = knnHeap[childHIdx+1][tidx].Dist;
if (childD2 < rightD2)
{
// Use right child
childHIdx++;
childD2 = rightD2;
}
}
// Compare largest child to parent
if (parentD2 >= childD2)
{
// Parent is larger than both children, exit loop
break;
}
// Demote parent by swapping with it's largest child
GPU_NN_Result closeTemp = knnHeap[parentHIdx][tidx];
knnHeap[parentHIdx][tidx] = knnHeap[childHIdx][tidx];
knnHeap[childHIdx][tidx] = closeTemp;
// Update indices
parentHIdx = childHIdx;
childHIdx = parentHIdx<<1; // left child of parent
}
}
// Update trim distances
dist2Heap = knnHeap[1][tidx].Dist;
bestDist2 = dist2Heap;
}
}
else if (diffDist2 < dist2Heap)
{
//-------------------------------
// >= k elements on heap
// Do Heap Replacement
//-------------------------------
// Replace Root Element with new element
knnHeap[1][tidx].Id = currIdx;
knnHeap[1][tidx].Dist = diffDist2;
//
// Demote new element (to correct position in heap)
//
unsigned int parentHIdx = 1; // Start at Root
unsigned int childHIdx = 2; // left child of parent
// Compare current index to it's children
while (childHIdx <= maxHeap)
{
// Update Distances
float parentD2 = knnHeap[parentHIdx][tidx].Dist;
float childD2 = knnHeap[childHIdx][tidx].Dist;
// Find largest child
if (childHIdx < maxHeap)
{
float rightD2 = knnHeap[childHIdx+1][tidx].Dist;
if (childD2 < rightD2)
{
// Use right child
childHIdx++;
childD2 = rightD2;
}
}
// Compare largest child to parent
if (parentD2 >= childD2)
{
// Parent node is larger than both children, exit
break;
}
// Demote parent by swapping with it's largest child
GPU_NN_Result closeTemp = knnHeap[parentHIdx][tidx];
knnHeap[parentHIdx][tidx] = knnHeap[childHIdx][tidx];
knnHeap[childHIdx][tidx] = closeTemp;
// Update indices
parentHIdx = childHIdx;
childHIdx = parentHIdx<<1; // left child of parent
}
// Update Trim distances
dist2Heap = knnHeap[1][tidx].Dist;
bestDist2 = dist2Heap;
}
// update bestDist2
if (queryValue <= splitValue)
{
// [...QL...BD]...SV -> Include Left range only
// or
// [...QL...SV...BD] -> Include Both Left and Right Sub Ranges
// Check if we should add Right Sub-range to stack
if (diff2 < bestDist2)
{
if (rightIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (rightIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK)
| OFFSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
// Always Add Left Sub-range to search path
if (leftIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (leftIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK);
// | ONSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
else
{
// SV...[BD...QL...] -> Include Right sub range only
// or
// [BD...SV...QL...] -> Include Both Left and Right Sub Ranges
// Check if we should add left sub-range to search path
if (diff2 < bestDist2)
{
// Add to search stack
if (leftIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (leftIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK)
| OFFSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
// Always Add Right Sub-range
if (rightIdx <= cNodes)
{
// Push Onto top of stack
searchStack[stackTop][tidx].nodeFlags = (rightIdx & NODE_INDEX_MASK)
| ((nextAxis << SPLIT_AXIS_SHIFT) & SPLIT_AXIS_MASK);
// | ONSIDE_VALUE;
searchStack[stackTop][tidx].splitVal = splitValue;
stackTop++;
}
}
}
/*-----------------------
Output Results
-----------------------*/
unsigned int i, offset, outIdx;
// Remap query node idx to query point idx
// Slow read from RAM memory
outIdx = ids[qidx];
// We now have a heap of the 'k' nearest neighbors
// Write them to the results array
// Assume answers should be stored along z axis of 3 dimensional cube
// We now have a heap of the 'k' nearest neighbors
// Write heap elements to the results array row by row
for (i = 1; i <= countHeap; i++)
{
offset = (i-1) * nElems;
// REMAP: Convert Nearest Neighbor Info to final format
// Slow read from RAM memory
knnHeap[i][tidx].Id = ids[knnHeap[i][tidx].Id]; // Really need ID's not indexs
knnHeap[i][tidx].Dist = sqrtf( knnHeap[i][tidx].Dist ); // Get True distance (not distance squared)
// Store Result
// Slow write to RAM memory
qrs[outIdx+offset] = knnHeap[i][tidx];
}
}
#endif // _GPU_ALL_KNN_LBT_H_
|
15029f45772e9740991a967babdf94225656a7e1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**********HEADERS**********/
#include <algorithm>
#include <iostream>
#include <iomanip>
#include <string>
#include <limits>
#include <stdlib.h>
#include <fstream>
#include <math.h>
#include <time.h>
#include "cuda_ptr.cuh"
#include "mimo-io.cuh"
using namespace std;
/**********DEFINING CONSTANTS***********/
#define NX 192 //was 201
#define NY 192 //was 201
#define NT 401
#define NS 640 //number of sensors
#define HX 0.001f
#define HY 0.001f
#define H 0.001f
#define DT 3.3333e-07f
#define OMEGAC 7.8540e+05f
#define TAO 4.0000e-06f
#define TT 8.1573e-06f
/**********FUNCTION DECLARATION**********/
//Host Functions
void Ultrasonic_Tomography(const string&, int, float, int, float, float, float, float);
void Position_Transducers(host_ptr<int>, host_ptr<int>, int);
float norm(host_ptr<float>, int, int);
//In-Line Functions
inline int grid_size(int, int);
template <typename T> __host__ __device__ void minmax(T &a, T &b);
//Device Functions
__global__ void propagation(kernel_ptr<int> const, kernel_ptr<int> const, kernel_ptr<float> const, kernel_ptr<float>, int, int, int);
__global__ void propagation_at_corners(kernel_ptr<float>, int);
__global__ void difference_signal(kernel_ptr<float> const, kernel_ptr<float> const, kernel_ptr<float> const, kernel_ptr<float> const, kernel_ptr<float> const, kernel_ptr<float>, kernel_ptr<float>, kernel_ptr<float>, kernel_ptr<float>, int);
__global__ void backpropagation1(kernel_ptr<float>, kernel_ptr<float> const, int, int);
__global__ void backpropagation2(kernel_ptr<float>, kernel_ptr<float> const, kernel_ptr<float> const, kernel_ptr<float> const, kernel_ptr<float> const, int, int);
__global__ void laplace(kernel_ptr<float> const, kernel_ptr<float>, int);
__global__ void laplace_corners(kernel_ptr<float> const, kernel_ptr<float>, int);
__global__ void update_differential(kernel_ptr<float>, kernel_ptr<float>, kernel_ptr<float> const, kernel_ptr<float> const, kernel_ptr<float> const, int);
__global__ void update_field(kernel_ptr<float>, kernel_ptr<float> const, kernel_ptr<float>, kernel_ptr<float> const, float, int);
__global__ void weights_differential1(
kernel_ptr<float> norm,
kernel_ptr<float> const df,
kernel_ptr<float> const df_avg,
int Ng)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int g = threadIdx.z + blockIdx.z * blockDim.z;
if (i < NX && j < NY && g < Ng) {
float val = df(i, j, g) - (df_avg(i, j) / Ng);
atomicAdd(
&norm(i, g),
val * val);
}
}
__global__ void weights_differential2(
kernel_ptr<float> weights,
kernel_ptr<float> total_weight,
kernel_ptr<float> const norm,
int Ng)
{
int g = threadIdx.x + blockIdx.x * blockDim.x;
if (g < Ng) {
float sum = 0.f;
for (int i = 0; i < NX; ++i) {
sum += norm(i, g);
}
weights(g) = 1.f / sqrtf(sum);
atomicAdd(
&total_weight(0),
weights(g));
}
}
__global__ void average_differential(
kernel_ptr<float> df_avg,
kernel_ptr<float> const df,
kernel_ptr<float> const weights,
kernel_ptr<float> const total_weight,
int Ng)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int g = threadIdx.z + blockIdx.z * blockDim.z;
if (i < NX && j < NY && g < Ng) {
float weight = weights(g) / total_weight(0);
atomicAdd(
&df_avg(i, j),
df(i, j, g) * weight);
}
}
/***************MAIN PROGRAM***************/
int main(int argc, char **argv)
{
//Command Line Argument Processing
if (argc != 9) {
cerr << "Usage: " << argv[0] << " <fo_filename> <sensor group size> <target epsilon> <max iterations> <omega> <alpha> <beta> <gamma>\n\n";
exit(1);
}
string fo_filename = argv[1];
if (count(fo_filename.begin(), fo_filename.end(), '.') != 1) {
cerr << "Error: '" << fo_filename << "' should have only one period.\n"
<< " It should be in the current directory "
<< "and have only one filetype extension.\n\n";
exit(1);
}
int group_size = stoi(argv[2]);
float target_epsilon = stof(argv[3]);
int max_iterations = stoi(argv[4]);
float omega = stof(argv[5]);
float alpha = stof(argv[6]);
float beta = stof(argv[7]);
float gamma = stof(argv[8]);
if (max_iterations == -1)
max_iterations = numeric_limits<int>::max();
cout << setprecision(9);
cerr << setprecision(9);
Ultrasonic_Tomography(fo_filename, group_size, target_epsilon, max_iterations, omega, alpha, beta, gamma);
hipDeviceReset();
}
/**********HOST FUNCTION DEFINITIONS**********/
void Ultrasonic_Tomography(const string &fo_filename, int group_size, float target_epsilon, int max_iterations, float omega, float alpha, float beta, float gamma)
{
// fo(i, j) =
// ground truth value at pos (i, j) of field
host_ptr<float> fo(NX, NY);
device_ptr<float> dev_fo(NX, NY);
// Ng = number of sensor groups that will be launched in parallel
int Ng = NS / group_size;
// gg_xxx(i, k, g) =
// initial signal at pos i in row/column xxx
// at time k, from sensor group
// e.g g_bottom stores the bottom row,
// g_right stores the right column
device_ptr<float> dev_g_bottom(NX, NT, Ng);
device_ptr<float> dev_g_right(NY, NT, Ng);
device_ptr<float> dev_g_top(NX, NT, Ng);
device_ptr<float> dev_g_left(NY, NT, Ng);
host_ptr<float> g_bottom(NX, NT, Ng);
host_ptr<float> g_right(NY, NT, Ng);
host_ptr<float> g_top(NX, NT, Ng);
host_ptr<float> g_left(NY, NT, Ng);
auto idx = fo_filename.find('.');
string basename = fo_filename.substr(0, idx);
{
ifstream fo_in(fo_filename);
if (!fo_in) {
cerr << "Error: '" << fo_filename << "' file not found in current directory.\n\n";
return;
}
string prefix = basename + "-data-";
string suffix = "-" + to_string(group_size) + ".txt";
string gb_name = prefix + "bottom" + suffix;
string gr_name = prefix + "right" + suffix;
string gt_name = prefix + "top" + suffix;
string gl_name = prefix + "left" + suffix;
ifstream gb_in(gb_name);
ifstream gr_in(gr_name);
ifstream gt_in(gt_name);
ifstream gl_in(gl_name);
if (!gb_in) {
cerr << "Error: '" << gb_name << "' file not found in current directory.\n\n";
return;
}
if (!gr_in) {
cerr << "Error: '" << gr_name << "' file not found in current directory.\n\n";
return;
}
if (!gt_in) {
cerr << "Error: '" << gt_name << "' file not found in current directory.\n\n";
return;
}
if (!gl_in) {
cerr << "Error: '" << gl_name << "' file not found in current directory.\n\n";
return;
}
read(fo_in, fo);
copy(dev_fo, fo);
read(gb_in, g_bottom);
copy(dev_g_bottom, g_bottom);
read(gr_in, g_right);
copy(dev_g_right, g_right);
read(gt_in, g_top);
copy(dev_g_top, g_top);
read(gl_in, g_left);
copy(dev_g_left, g_left);
}
// Position of the transducers
host_ptr<int> ii(NS);
host_ptr<int> jj(NS);
device_ptr<int> dev_ii(NS);
device_ptr<int> dev_jj(NS);
Position_Transducers(ii, jj, NS);
// copy from host to device
copy(dev_ii, ii);
copy(dev_jj, jj);
// u(i, j, k, g) =
// wave propagation at pos (i, j) of field, at time k, from sensor group g
device_ptr<float> dev_u(NX, NY, NT, Ng);
// Kaczmarz method
// propagation
// rr_xxx(i, k, g) =
// difference signal between xxx sensors in u and gg_xxx
// at time k, from sensor group g
device_ptr<float> dev_rr_bottom(NX, NT, Ng);
device_ptr<float> dev_rr_right(NX, NT, Ng);
device_ptr<float> dev_rr_top(NX, NT, Ng);
device_ptr<float> dev_rr_left(NX, NT, Ng);
// z(i, j, k, g) =
// wave back propagation at pos (i, j) of field,
// at time k, from sensor group g
device_ptr<float> dev_z(NX, NY, NT+1, Ng);
// Lu(i, j, k, g) =
// result of applying the Laplace operator to u(i, j, k, g)
device_ptr<float> dev_Lu(NX, NY, NT, Ng);
// f(i, j) =
// current reconstruction of field at pos (i, j)
host_ptr<float> f(NX, NY);
device_ptr<float> dev_f(NX, NY);
// df(i, j, g) =
// discretized differential of f(i, j) from sensor group g
device_ptr<float> dev_df(NX, NY, Ng);
device_ptr<float> dev_df_avg(NX, NY);
device_ptr<float> dev_norm(NX, Ng);
device_ptr<float> dev_weights(Ng);
device_ptr<float> dev_total_weight(1);
// f_minus_fo(i, j)
// difference of field and ground truth at pos (i, j)
host_ptr<float> f_minus_fo(NX, NY);
device_ptr<float> dev_f_minus_fo(NX, NY);
// initialize epsilon values
float prev_epsilon = 100.f;
float curr_epsilon = -std::numeric_limits<float>::infinity();
float file_epsilon = std::numeric_limits<float>::infinity();
/* cerr << "writing convergence to 'sirt_convergence.txt'...\n" */
/* << "writing time to 'sirt_time.txt'...\n\n"; */
ofstream convergence_file("sirt_convergence.txt");
ofstream time_file("sirt_time.txt");
// kernel launch parameters for propagation
dim3 threads_propagation(NX, 1, 1);
dim3 grid_propagation(
grid_size(NX, threads_propagation.x),
grid_size(NY, threads_propagation.y),
grid_size(Ng, threads_propagation.z));
// kernel launch parameters for propagation_at_corners
dim3 threads_prop_corners(NT, 1);
dim3 grid_prop_corners(
grid_size(NT, threads_prop_corners.x),
grid_size(Ng, threads_prop_corners.y));
// kernel launch parameters for difference_signal
dim3 threads_diff_signal(NX, 1, 1);
dim3 grid_diff_signal(
grid_size(NX, threads_diff_signal.x),
grid_size(NT, threads_diff_signal.y),
grid_size(Ng, threads_diff_signal.z));
// kernel launch parameters for backpropagation1
dim3 threads_backpropagation1(NX, 1, 1);
dim3 grid_backpropagation1(
grid_size(NX, threads_backpropagation1.x),
grid_size(NY, threads_backpropagation1.y),
grid_size(Ng, threads_backpropagation1.z));
// kernel launch parameters for backpropagation2
dim3 threads_backpropagation2(Ng, 1);
dim3 grid_backpropagation2(
grid_size(NX, threads_backpropagation2.x),
grid_size(Ng, threads_backpropagation2.y));
// kernel launch parameters for laplace
dim3 threads_laplace(NX, 1, 1);
dim3 grid_laplace(
grid_size(NX * NY, threads_laplace.x),
grid_size(NT, threads_laplace.y),
grid_size(Ng, threads_laplace.z));
// kernel launch parameters for laplace_corners
dim3 threads_laplace_corners(NX, 1, 1);
dim3 grid_laplace_corners(
grid_size(NX * NY, threads_laplace.x),
grid_size(NT, threads_laplace.y),
grid_size(Ng, threads_laplace.z));
// kernel launch parameters for update_differential
dim3 threads_differential(NX, 1, 1);
dim3 grid_differential(
grid_size(NX * NY, threads_differential.x),
grid_size(NT, threads_differential.y),
grid_size(Ng, threads_differential.z));
// kernel launch parameters for field kernels
dim3 threads_field(NX, 1);
dim3 grid_field(
grid_size(NX, threads_field.x),
grid_size(NY, threads_field.y));
dim3 threads_weights1(NX, 1, 1);
dim3 grid_weights1(
grid_size(NX, threads_weights1.x),
grid_size(NY, threads_weights1.y),
grid_size(Ng, threads_weights1.z));
dim3 threads_weights2(1);
dim3 grid_weights2(
grid_size(Ng, threads_weights2.x));
cerr << "group size: " << group_size << "\n"
<< "target epsilon: " << target_epsilon << "\n"
<< "omega: " << omega << "\n"
<< "alpha: " << alpha << "\n"
<< "beta: " << beta << "\n"
<< "gamma: " << gamma << "\n\n";
int w_iter = 6;
int w_eps = 12;
int w_diff = 15;
cout
<< setw(w_iter) << "iter" << " "
<< setw(w_eps) << "epsilon" << " "
<< setw(w_diff) << "difference" << " \n"
<< string(w_iter, '-') << " "
<< string(w_eps, '-') << " "
<< string(w_diff, '-') << " \n";
hipDeviceSynchronize();
int ti = clock();
for(int iter = 0; iter < max_iterations; iter++)
{
dev_u.set(0.f);
dev_z.set(0.f);
dev_df.set(0.f);
dev_df_avg.set(0.f);
dev_total_weight.set(0.f);
dev_norm.set(0.f);
// propagate wave over field, store in u
for (int k = 1; k < NT - 1; ++k)
hipLaunchKernelGGL(( propagation), dim3(grid_propagation), dim3(threads_propagation), 0, 0, dev_ii, dev_jj, dev_f, dev_u, k, group_size, Ng);
hipLaunchKernelGGL(( propagation_at_corners), dim3(grid_prop_corners), dim3(threads_prop_corners), 0, 0, dev_u, Ng);
// store difference signal of u at sensor positions and initial signal at g in rr
hipLaunchKernelGGL(( difference_signal), dim3(grid_diff_signal), dim3(threads_diff_signal), 0, 0, dev_u, dev_g_bottom, dev_g_right, dev_g_top, dev_g_left, dev_rr_bottom, dev_rr_right, dev_rr_top, dev_rr_left, Ng);
// do back propagation of wave over field, store in z
for(int k = NT - 2; k > 0; k--)
{
hipLaunchKernelGGL(( backpropagation1), dim3(grid_backpropagation1), dim3(threads_backpropagation1), 0, 0, dev_z, dev_f, k, Ng);
hipLaunchKernelGGL(( backpropagation2), dim3(grid_backpropagation2), dim3(threads_backpropagation2), 0, 0, dev_z, dev_rr_bottom, dev_rr_right, dev_rr_top, dev_rr_left, k, Ng);
}
// apply Laplace operator to u, store in Lu
hipLaunchKernelGGL(( laplace), dim3(grid_laplace), dim3(threads_laplace), 0, 0, dev_u, dev_Lu, Ng);
hipLaunchKernelGGL(( laplace_corners), dim3(grid_laplace_corners), dim3(threads_laplace_corners), 0, 0, dev_u, dev_Lu, Ng);
// update differential of f, store in df
hipLaunchKernelGGL(( update_differential), dim3(grid_differential), dim3(threads_differential), 0, 0, dev_df, dev_df_avg, dev_z, dev_Lu, dev_f, Ng);
hipLaunchKernelGGL(( weights_differential1), dim3(grid_weights1), dim3(threads_weights1), 0, 0, dev_norm, dev_df, dev_df_avg, Ng);
dev_df_avg.set(0.f);
hipLaunchKernelGGL(( weights_differential2), dim3(grid_weights2), dim3(threads_weights2), 0, 0, dev_weights, dev_total_weight, dev_norm, Ng);
hipLaunchKernelGGL(( average_differential), dim3(grid_propagation), dim3(threads_propagation), 0, 0, dev_df_avg, dev_df, dev_weights, dev_total_weight, Ng);
float scale = omega * (alpha + (beta / pow(iter+1, gamma)));
// update f and f_minus_fo
hipLaunchKernelGGL(( update_field), dim3(grid_field), dim3(threads_field), 0, 0, dev_f, dev_df_avg, dev_f_minus_fo, dev_fo, scale, Ng);
// error calculation
// copy from device to host
copy(f_minus_fo, dev_f_minus_fo);
curr_epsilon = norm(f_minus_fo, NX, NY) / norm(fo, NX, NY) * 100.f;
float current_t = (float)(clock()-ti) / CLOCKS_PER_SEC;
if (abs(file_epsilon - curr_epsilon) > 0.2f) {
convergence_file << curr_epsilon << " ";
time_file << current_t << " ";
file_epsilon = curr_epsilon;
}
cout << setw(w_iter) << iter << " "
<< setw(w_eps) << curr_epsilon << " "
<< setw(w_diff) << prev_epsilon - curr_epsilon << " "
<< scale << " \n";
// stop if reached target epsilon
if (curr_epsilon <= target_epsilon) {
cerr << "reached target epsilon = " << target_epsilon << ", at iter = " << iter << ", epsilon = " << curr_epsilon << "\n\n";
break;
}
// stop if epsilon diverges
if (curr_epsilon > prev_epsilon ||
std::isnan(curr_epsilon)) {
cerr << "diverged at iter = " << iter << ", epsilon = " << curr_epsilon << "\n\n";
break;
}
// update prev_epsilon
prev_epsilon = curr_epsilon;
}
hipDeviceSynchronize();
int tf = clock();
cout << endl;
cerr << "time (s): " << (float)(tf - ti) / CLOCKS_PER_SEC << "\n";
// copy from device to host
copy(f, dev_f);
string f_name = "sirt-norm-" + to_string(group_size) + "-" + basename + ".txt";
/* cerr << "writing to '" << f_name << "'...\n\n"; */
ofstream f_out(f_name);
write(f_out, f);
/* size_t free, total; */
/* hipMemGetInfo(&free, &total); */
/* cerr << "used mem: " << float(total - free) / (1024 * 1024) << " MB\n" */
/* << "free mem: " << float(free) / (1024 * 1024) << " MB\n" */
/* << "total mem: " << float(total) / (1024 * 1024) << " MB\n\n"; */
}
float norm(host_ptr<float> A, int nx, int ny)
{
float sum = 0;
for (int j = 0; j < ny; ++j)
for (int i = 0; i < nx; ++i)
sum += A(i, j) * A(i, j);
return sqrtf(sum);
}
void Position_Transducers(host_ptr<int> ii, host_ptr<int> jj, int num)
{
//returns the (x,y) coordinates of the number of total transducers
int p = 0;
for(p = 0; p < 160; p++)
{
ii(p) = 21 + (p + 1);
jj(p) = 181;
}
for(p = 160; p < 320; p++)
{
ii(p) = 181;
jj(p) = 181 - ((p + 1) - 160);
}
for(p = 320; p < 480; p++)
{
ii(p) = 181 - ((p + 1) - 320);
jj(p) = 21;
}
for(p = 480; p < num; p++)
{
ii(p) = 21;
jj(p) = 21 + ((p + 1) - 480);
}
}
/**********DEVICE FUNCTION DEFINITIONS***********/
__global__ void propagation(
kernel_ptr<int> const ii,
kernel_ptr<int> const jj,
kernel_ptr<float> const f,
kernel_ptr<float> u,
int k, int group_size, int Ng)
{
// Map from threadIdx / BlockIdx to pixel position
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int g = threadIdx.z + blockIdx.z * blockDim.z;
if(i < NX && j < NY && g < Ng) {
float v = 1500.f * sqrtf(1.f + f(i, j));
float r = v * DT / HX;
float s = 2.f - 4.f * r * r;
float val; // will hold new u at (i, j, k + 1)
// not at boundary
if (i != 0 && i != NX - 1 && j != 0 && j != NY - 1) {
val =
r * r *
(u(i+1, j, k, g) +
u(i-1, j, k, g) +
u(i, j-1, k, g) +
u(i, j+1, k, g)) +
s * u(i, j, k, g) -
u(i, j, k-1, g);
int p = g * group_size;
int jp1 = jj(p);
int jp2 = jj(p + group_size - 1);
int ip1 = ii(p);
int ip2 = ii(p + group_size - 1);
minmax(jp1, jp2);
minmax(ip1, ip2);
// at sensor, k <= 24
if (j + 1 >= jp1 && j + 1 <= jp2 && i + 1 >= ip1 && i + 1 <= ip2 && k + 1 <= 24) {
float t = k * DT - TT;
// add wave value
val +=
v * v * DT * DT *
cosf(OMEGAC * t) *
expf(-(t * t) / (2.f * TAO * TAO));
}
}
// at boundary
else {
// boundary booleans
bool top = (j == 0);
bool bottom = (j == NY - 1);
bool left = (i == 0);
bool right = (i == NX - 1);
// index variables for different boundary cases
int ja = top ? (j + 1) : bottom ? (j - 1) : j;
int jb = top ? (j + 2) : bottom ? (j - 2) : j;
int ia = left ? (i + 1) : right ? (i - 1) : i;
int ib = left ? (i + 2) : right ? (i - 2) : i;
val =
(2.f - 2.f * r - r * r) * u(i, j, k, g) +
2.f * r * (1.f + r) * u(ia, ja, k, g) -
r * r * u(ib, jb, k, g) +
(2.f * r - 1.f) * u(i, j, k-1, g) -
2.f * r * u(ia, ja, k-1, g);
}
u(i, j, k+1, g) = val;
}
}
__global__ void propagation_at_corners(
kernel_ptr<float> u,
int Ng)
{
int k = threadIdx.x + blockIdx.x * blockDim.x;
int g = threadIdx.y + blockIdx.y * blockDim.y;
if (k < NT && g < Ng) {
u(0, 0, k, g) =
1.f / 2.f * (u(0, 1, k, g) + u(1, 0, k, g));
u(NX-1, 0, k, g) =
1.f / 2.f * (u(NX-2, 0, k, g) + u(NX-1, 1, k, g));
u(0, NY-1, k, g) =
1.f / 2.f * (u(0, NY-2, k, g) + u(1, NY-1, k, g));
u(NX-1, NY-1, k, g) =
1.f / 2.f * (u(NX-2, NY-1, k, g) + u(NX-1, NY-2, k, g));
}
}
__global__ void difference_signal(
kernel_ptr<float> const u,
kernel_ptr<float> const g_bottom,
kernel_ptr<float> const g_right,
kernel_ptr<float> const g_top,
kernel_ptr<float> const g_left,
kernel_ptr<float> rr_bottom,
kernel_ptr<float> rr_right,
kernel_ptr<float> rr_top,
kernel_ptr<float> rr_left,
int Ng)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int k = threadIdx.y + blockIdx.y * blockDim.y;
int g = threadIdx.z + blockIdx.z * blockDim.z;
if (i > 20 && i < 180 && k > 1 && k < NT && g < Ng) {
// store difference at time k of original signal
// and current signal at bottom sensor row
rr_bottom(i, k, g) = g_bottom(i, k, g) - u(i, 180, k, g);
// store difference at time k of original signal
// and current signal at top sensor row
rr_top(i, k, g) = g_top(i, k, g) - u(i, 20, k, g);
// store difference at time k of original signal
// and current signal at right sensor column
rr_right(i, k, g) = g_right(i, k, g) - u(180, i, k, g);
// store difference at time k of original signal
// and current signal at left sensor column
rr_left(i, k, g) = g_left(i, k, g) - u(20, i, k, g);
}
}
__global__ void backpropagation1(
kernel_ptr<float> z,
kernel_ptr<float> const f,
int k, int Ng)
{
// Map from threadIdx / BlockIdx to pixel position
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int g = threadIdx.z + blockIdx.z * blockDim.z;
if(i >= 1 && i < (NX - 1) && j >= 1 && j < (NY - 1) && g < Ng)
{
z(i, j, k, g) =
1500.f * 1500.f * (DT * DT) *
((1.f + f(i, j-1)) * z(i, j-1, k+1, g) +
(1.f + f(i, j+1)) * z(i, j+1, k+1, g) +
(1.f + f(i-1, j)) * z(i-1, j, k+1, g) +
(1.f + f(i+1, j)) * z(i+1, j, k+1, g) -
4.f * (1.f + f(i, j)) *
z(i, j, k+1, g)) / (H * H) +
2.f * z(i, j, k+1, g) -
z(i, j, k+2, g);
}
}
__global__ void backpropagation2(
kernel_ptr<float> z,
kernel_ptr<float> const rr_bottom,
kernel_ptr<float> const rr_right,
kernel_ptr<float> const rr_top,
kernel_ptr<float> const rr_left,
int k, int Ng)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int g = threadIdx.y + blockIdx.y * blockDim.y;
if (g < Ng) {
if(i >= 21 && i < 180) {
z(i, 180, k, g) =
z(i, 179, k, g) +
rr_bottom(i, k, g) * H * 1000.f;
z(i, 20, k, g) =
z(i, 21, k, g) +
rr_top(i, k, g) * H * 1000.f;
z(180, i, k, g) =
z(179, i, k, g) +
rr_right(i, k, g) * H * 1000.f;
z(20, i, k, g) =
z(21, i, k, g) +
rr_left(i, k, g) * H * 1000.f;
}
if (i >= 1 && i < (NX - 1)) {
z(i, 0, k, g) =
z(i, 1, k, g);
z(i, NY-1, k, g) =
z(i, NY-2, k, g);
z(0, i, k, g) =
z(1, i, k, g);
z(NX-1, i, k, g) =
z(NX-2, i, k, g);
}
else if (i == 0) {
z(0, 0, k, g) =
(z(1, 0, k, g) +
z(0, 1, k, g)) / 2.f;
z(NX-1, 0, k, g) =
(z(NX-2, 0, k, g) +
z(NX-1, 1, k, g)) / 2.f;
z(0, NY-1, k, g) =
(z(1, NY-1, k, g) +
z(0, NY-2, k, g)) / 2.f;
z(NX-1, NY-1, k, g) =
(z(NX-2, NY-1, k, g) +
z(NX-1, NY-2, k, g)) / 2.f;
}
}
}
__global__ void laplace(
kernel_ptr<float> const u,
kernel_ptr<float> Lu,
int Ng)
{
// Map from threadIdx / BlockIdx to pixel position
int tx = threadIdx.x + blockIdx.x * blockDim.x;
int k = threadIdx.y + blockIdx.y * blockDim.y;
int g = threadIdx.z + blockIdx.z * blockDim.z;
if (tx < (NX * NY) && (k + 1) < NT && g < Ng) {
int i = tx % NX;
int j = tx / NX;
int ja = (j > 0) ? (j - 1) : j;
int jb = (j < NY - 1) ? (j + 1) : j;
int ia = (i > 0) ? (i - 1) : i;
int ib = (i < NX - 1) ? (i + 1) : i;
Lu(i, j, k+1, g) =
(u(i, ja, k+1, g) +
u(i, jb, k+1, g) +
u(ia, j, k+1, g) +
u(ib, j, k+1, g) -
4.f * u(i, j, k+1, g)) / (H * H);
}
}
__global__ void laplace_corners(
kernel_ptr<float> const u,
kernel_ptr<float> Lu,
int Ng)
{
int k = threadIdx.x + blockIdx.x * blockDim.x;
int g = threadIdx.y + blockIdx.y * blockDim.y;
if ((k + 1) < NT && g < Ng) {
Lu(0, 0, k+1, g) =
(Lu(1, 0, k+1, g) +
Lu(0, 1, k+1, g)) / 2.f;
Lu(NX-1, 0, k+1, g) =
(Lu(NX-2, 0, k+1, g) +
Lu(NX-1, 1, k+1, g)) / 2.f;
Lu(0, NY-1, k+1, g) =
(Lu(1, NY-1, k+1, g) +
Lu(0, NY-2, k+1, g)) / 2.f;
Lu(NX-1, NY-1, k+1, g) =
(Lu(NX-2, NY-1, k+1, g) +
Lu(NX-1, NY-2, k+1, g)) / 2.f;
}
}
__global__ void update_differential(
kernel_ptr<float> df,
kernel_ptr<float> df_avg,
kernel_ptr<float> const z,
kernel_ptr<float> const Lu,
kernel_ptr<float> const f,
int Ng)
{
// Map from threadIdx / BlockIdx to pixel position
int tx = threadIdx.x + blockIdx.x * blockDim.x;
int k = threadIdx.y + blockIdx.y * blockDim.y;
int g = threadIdx.z + blockIdx.z * blockDim.z;
if (tx < (NX * NY) && (k + 1) < NT && g < Ng) {
int i = tx % NX;
int j = tx / NX;
float val =
z(i, j, k+1, g) *
Lu(i, j, k+1, g) /
(1.f + f(i, j));
atomicAdd(&df(i, j, g), val);
atomicAdd(&df_avg(i, j), val);
}
}
__global__ void update_field(
kernel_ptr<float> f,
kernel_ptr<float> const df_avg,
kernel_ptr<float> f_minus_fo,
kernel_ptr<float> const fo,
float scale,
int Ng)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < NX && j < NY)
{
bool in_sensor_field = (i >= 21) && (i < 180) && (j >= 21) && (j < 180);
if (in_sensor_field)
f(i, j) += scale * df_avg(i, j);
f_minus_fo(i, j) = f(i, j) - fo(i, j);
}
}
/**********INLINE FUNCTION DEFINITIONS**********/
inline int grid_size(int n, int threads)
{
return ceil(float(n) / threads);
}
// POST-CONDITION: a <= b
template <typename T>
__host__ __device__
void minmax(T &a, T &b)
{
if (a > b) {
int t = a;
a = b;
b = t;
}
}
| 15029f45772e9740991a967babdf94225656a7e1.cu | /**********HEADERS**********/
#include <algorithm>
#include <iostream>
#include <iomanip>
#include <string>
#include <limits>
#include <stdlib.h>
#include <fstream>
#include <math.h>
#include <time.h>
#include "cuda_ptr.cuh"
#include "mimo-io.cuh"
using namespace std;
/**********DEFINING CONSTANTS***********/
#define NX 192 //was 201
#define NY 192 //was 201
#define NT 401
#define NS 640 //number of sensors
#define HX 0.001f
#define HY 0.001f
#define H 0.001f
#define DT 3.3333e-07f
#define OMEGAC 7.8540e+05f
#define TAO 4.0000e-06f
#define TT 8.1573e-06f
/**********FUNCTION DECLARATION**********/
//Host Functions
void Ultrasonic_Tomography(const string&, int, float, int, float, float, float, float);
void Position_Transducers(host_ptr<int>, host_ptr<int>, int);
float norm(host_ptr<float>, int, int);
//In-Line Functions
inline int grid_size(int, int);
template <typename T> __host__ __device__ void minmax(T &a, T &b);
//Device Functions
__global__ void propagation(kernel_ptr<int> const, kernel_ptr<int> const, kernel_ptr<float> const, kernel_ptr<float>, int, int, int);
__global__ void propagation_at_corners(kernel_ptr<float>, int);
__global__ void difference_signal(kernel_ptr<float> const, kernel_ptr<float> const, kernel_ptr<float> const, kernel_ptr<float> const, kernel_ptr<float> const, kernel_ptr<float>, kernel_ptr<float>, kernel_ptr<float>, kernel_ptr<float>, int);
__global__ void backpropagation1(kernel_ptr<float>, kernel_ptr<float> const, int, int);
__global__ void backpropagation2(kernel_ptr<float>, kernel_ptr<float> const, kernel_ptr<float> const, kernel_ptr<float> const, kernel_ptr<float> const, int, int);
__global__ void laplace(kernel_ptr<float> const, kernel_ptr<float>, int);
__global__ void laplace_corners(kernel_ptr<float> const, kernel_ptr<float>, int);
__global__ void update_differential(kernel_ptr<float>, kernel_ptr<float>, kernel_ptr<float> const, kernel_ptr<float> const, kernel_ptr<float> const, int);
__global__ void update_field(kernel_ptr<float>, kernel_ptr<float> const, kernel_ptr<float>, kernel_ptr<float> const, float, int);
__global__ void weights_differential1(
kernel_ptr<float> norm,
kernel_ptr<float> const df,
kernel_ptr<float> const df_avg,
int Ng)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int g = threadIdx.z + blockIdx.z * blockDim.z;
if (i < NX && j < NY && g < Ng) {
float val = df(i, j, g) - (df_avg(i, j) / Ng);
atomicAdd(
&norm(i, g),
val * val);
}
}
__global__ void weights_differential2(
kernel_ptr<float> weights,
kernel_ptr<float> total_weight,
kernel_ptr<float> const norm,
int Ng)
{
int g = threadIdx.x + blockIdx.x * blockDim.x;
if (g < Ng) {
float sum = 0.f;
for (int i = 0; i < NX; ++i) {
sum += norm(i, g);
}
weights(g) = 1.f / sqrtf(sum);
atomicAdd(
&total_weight(0),
weights(g));
}
}
__global__ void average_differential(
kernel_ptr<float> df_avg,
kernel_ptr<float> const df,
kernel_ptr<float> const weights,
kernel_ptr<float> const total_weight,
int Ng)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int g = threadIdx.z + blockIdx.z * blockDim.z;
if (i < NX && j < NY && g < Ng) {
float weight = weights(g) / total_weight(0);
atomicAdd(
&df_avg(i, j),
df(i, j, g) * weight);
}
}
/***************MAIN PROGRAM***************/
int main(int argc, char **argv)
{
//Command Line Argument Processing
if (argc != 9) {
cerr << "Usage: " << argv[0] << " <fo_filename> <sensor group size> <target epsilon> <max iterations> <omega> <alpha> <beta> <gamma>\n\n";
exit(1);
}
string fo_filename = argv[1];
if (count(fo_filename.begin(), fo_filename.end(), '.') != 1) {
cerr << "Error: '" << fo_filename << "' should have only one period.\n"
<< " It should be in the current directory "
<< "and have only one filetype extension.\n\n";
exit(1);
}
int group_size = stoi(argv[2]);
float target_epsilon = stof(argv[3]);
int max_iterations = stoi(argv[4]);
float omega = stof(argv[5]);
float alpha = stof(argv[6]);
float beta = stof(argv[7]);
float gamma = stof(argv[8]);
if (max_iterations == -1)
max_iterations = numeric_limits<int>::max();
cout << setprecision(9);
cerr << setprecision(9);
Ultrasonic_Tomography(fo_filename, group_size, target_epsilon, max_iterations, omega, alpha, beta, gamma);
cudaDeviceReset();
}
/**********HOST FUNCTION DEFINITIONS**********/
void Ultrasonic_Tomography(const string &fo_filename, int group_size, float target_epsilon, int max_iterations, float omega, float alpha, float beta, float gamma)
{
// fo(i, j) =
// ground truth value at pos (i, j) of field
host_ptr<float> fo(NX, NY);
device_ptr<float> dev_fo(NX, NY);
// Ng = number of sensor groups that will be launched in parallel
int Ng = NS / group_size;
// gg_xxx(i, k, g) =
// initial signal at pos i in row/column xxx
// at time k, from sensor group
// e.g g_bottom stores the bottom row,
// g_right stores the right column
device_ptr<float> dev_g_bottom(NX, NT, Ng);
device_ptr<float> dev_g_right(NY, NT, Ng);
device_ptr<float> dev_g_top(NX, NT, Ng);
device_ptr<float> dev_g_left(NY, NT, Ng);
host_ptr<float> g_bottom(NX, NT, Ng);
host_ptr<float> g_right(NY, NT, Ng);
host_ptr<float> g_top(NX, NT, Ng);
host_ptr<float> g_left(NY, NT, Ng);
auto idx = fo_filename.find('.');
string basename = fo_filename.substr(0, idx);
{
ifstream fo_in(fo_filename);
if (!fo_in) {
cerr << "Error: '" << fo_filename << "' file not found in current directory.\n\n";
return;
}
string prefix = basename + "-data-";
string suffix = "-" + to_string(group_size) + ".txt";
string gb_name = prefix + "bottom" + suffix;
string gr_name = prefix + "right" + suffix;
string gt_name = prefix + "top" + suffix;
string gl_name = prefix + "left" + suffix;
ifstream gb_in(gb_name);
ifstream gr_in(gr_name);
ifstream gt_in(gt_name);
ifstream gl_in(gl_name);
if (!gb_in) {
cerr << "Error: '" << gb_name << "' file not found in current directory.\n\n";
return;
}
if (!gr_in) {
cerr << "Error: '" << gr_name << "' file not found in current directory.\n\n";
return;
}
if (!gt_in) {
cerr << "Error: '" << gt_name << "' file not found in current directory.\n\n";
return;
}
if (!gl_in) {
cerr << "Error: '" << gl_name << "' file not found in current directory.\n\n";
return;
}
read(fo_in, fo);
copy(dev_fo, fo);
read(gb_in, g_bottom);
copy(dev_g_bottom, g_bottom);
read(gr_in, g_right);
copy(dev_g_right, g_right);
read(gt_in, g_top);
copy(dev_g_top, g_top);
read(gl_in, g_left);
copy(dev_g_left, g_left);
}
// Position of the transducers
host_ptr<int> ii(NS);
host_ptr<int> jj(NS);
device_ptr<int> dev_ii(NS);
device_ptr<int> dev_jj(NS);
Position_Transducers(ii, jj, NS);
// copy from host to device
copy(dev_ii, ii);
copy(dev_jj, jj);
// u(i, j, k, g) =
// wave propagation at pos (i, j) of field, at time k, from sensor group g
device_ptr<float> dev_u(NX, NY, NT, Ng);
// Kaczmarz method
// propagation
// rr_xxx(i, k, g) =
// difference signal between xxx sensors in u and gg_xxx
// at time k, from sensor group g
device_ptr<float> dev_rr_bottom(NX, NT, Ng);
device_ptr<float> dev_rr_right(NX, NT, Ng);
device_ptr<float> dev_rr_top(NX, NT, Ng);
device_ptr<float> dev_rr_left(NX, NT, Ng);
// z(i, j, k, g) =
// wave back propagation at pos (i, j) of field,
// at time k, from sensor group g
device_ptr<float> dev_z(NX, NY, NT+1, Ng);
// Lu(i, j, k, g) =
// result of applying the Laplace operator to u(i, j, k, g)
device_ptr<float> dev_Lu(NX, NY, NT, Ng);
// f(i, j) =
// current reconstruction of field at pos (i, j)
host_ptr<float> f(NX, NY);
device_ptr<float> dev_f(NX, NY);
// df(i, j, g) =
// discretized differential of f(i, j) from sensor group g
device_ptr<float> dev_df(NX, NY, Ng);
device_ptr<float> dev_df_avg(NX, NY);
device_ptr<float> dev_norm(NX, Ng);
device_ptr<float> dev_weights(Ng);
device_ptr<float> dev_total_weight(1);
// f_minus_fo(i, j)
// difference of field and ground truth at pos (i, j)
host_ptr<float> f_minus_fo(NX, NY);
device_ptr<float> dev_f_minus_fo(NX, NY);
// initialize epsilon values
float prev_epsilon = 100.f;
float curr_epsilon = -std::numeric_limits<float>::infinity();
float file_epsilon = std::numeric_limits<float>::infinity();
/* cerr << "writing convergence to 'sirt_convergence.txt'...\n" */
/* << "writing time to 'sirt_time.txt'...\n\n"; */
ofstream convergence_file("sirt_convergence.txt");
ofstream time_file("sirt_time.txt");
// kernel launch parameters for propagation
dim3 threads_propagation(NX, 1, 1);
dim3 grid_propagation(
grid_size(NX, threads_propagation.x),
grid_size(NY, threads_propagation.y),
grid_size(Ng, threads_propagation.z));
// kernel launch parameters for propagation_at_corners
dim3 threads_prop_corners(NT, 1);
dim3 grid_prop_corners(
grid_size(NT, threads_prop_corners.x),
grid_size(Ng, threads_prop_corners.y));
// kernel launch parameters for difference_signal
dim3 threads_diff_signal(NX, 1, 1);
dim3 grid_diff_signal(
grid_size(NX, threads_diff_signal.x),
grid_size(NT, threads_diff_signal.y),
grid_size(Ng, threads_diff_signal.z));
// kernel launch parameters for backpropagation1
dim3 threads_backpropagation1(NX, 1, 1);
dim3 grid_backpropagation1(
grid_size(NX, threads_backpropagation1.x),
grid_size(NY, threads_backpropagation1.y),
grid_size(Ng, threads_backpropagation1.z));
// kernel launch parameters for backpropagation2
dim3 threads_backpropagation2(Ng, 1);
dim3 grid_backpropagation2(
grid_size(NX, threads_backpropagation2.x),
grid_size(Ng, threads_backpropagation2.y));
// kernel launch parameters for laplace
dim3 threads_laplace(NX, 1, 1);
dim3 grid_laplace(
grid_size(NX * NY, threads_laplace.x),
grid_size(NT, threads_laplace.y),
grid_size(Ng, threads_laplace.z));
// kernel launch parameters for laplace_corners
dim3 threads_laplace_corners(NX, 1, 1);
dim3 grid_laplace_corners(
grid_size(NX * NY, threads_laplace.x),
grid_size(NT, threads_laplace.y),
grid_size(Ng, threads_laplace.z));
// kernel launch parameters for update_differential
dim3 threads_differential(NX, 1, 1);
dim3 grid_differential(
grid_size(NX * NY, threads_differential.x),
grid_size(NT, threads_differential.y),
grid_size(Ng, threads_differential.z));
// kernel launch parameters for field kernels
dim3 threads_field(NX, 1);
dim3 grid_field(
grid_size(NX, threads_field.x),
grid_size(NY, threads_field.y));
dim3 threads_weights1(NX, 1, 1);
dim3 grid_weights1(
grid_size(NX, threads_weights1.x),
grid_size(NY, threads_weights1.y),
grid_size(Ng, threads_weights1.z));
dim3 threads_weights2(1);
dim3 grid_weights2(
grid_size(Ng, threads_weights2.x));
cerr << "group size: " << group_size << "\n"
<< "target epsilon: " << target_epsilon << "\n"
<< "omega: " << omega << "\n"
<< "alpha: " << alpha << "\n"
<< "beta: " << beta << "\n"
<< "gamma: " << gamma << "\n\n";
int w_iter = 6;
int w_eps = 12;
int w_diff = 15;
cout
<< setw(w_iter) << "iter" << " "
<< setw(w_eps) << "epsilon" << " "
<< setw(w_diff) << "difference" << " \n"
<< string(w_iter, '-') << " "
<< string(w_eps, '-') << " "
<< string(w_diff, '-') << " \n";
cudaDeviceSynchronize();
int ti = clock();
for(int iter = 0; iter < max_iterations; iter++)
{
dev_u.set(0.f);
dev_z.set(0.f);
dev_df.set(0.f);
dev_df_avg.set(0.f);
dev_total_weight.set(0.f);
dev_norm.set(0.f);
// propagate wave over field, store in u
for (int k = 1; k < NT - 1; ++k)
propagation<<<grid_propagation, threads_propagation>>>(dev_ii, dev_jj, dev_f, dev_u, k, group_size, Ng);
propagation_at_corners<<<grid_prop_corners, threads_prop_corners>>>(dev_u, Ng);
// store difference signal of u at sensor positions and initial signal at g in rr
difference_signal<<<grid_diff_signal, threads_diff_signal>>>(dev_u, dev_g_bottom, dev_g_right, dev_g_top, dev_g_left, dev_rr_bottom, dev_rr_right, dev_rr_top, dev_rr_left, Ng);
// do back propagation of wave over field, store in z
for(int k = NT - 2; k > 0; k--)
{
backpropagation1<<<grid_backpropagation1, threads_backpropagation1>>>(dev_z, dev_f, k, Ng);
backpropagation2<<<grid_backpropagation2, threads_backpropagation2>>>(dev_z, dev_rr_bottom, dev_rr_right, dev_rr_top, dev_rr_left, k, Ng);
}
// apply Laplace operator to u, store in Lu
laplace<<<grid_laplace, threads_laplace>>>(dev_u, dev_Lu, Ng);
laplace_corners<<<grid_laplace_corners, threads_laplace_corners>>>(dev_u, dev_Lu, Ng);
// update differential of f, store in df
update_differential<<<grid_differential, threads_differential>>>(dev_df, dev_df_avg, dev_z, dev_Lu, dev_f, Ng);
weights_differential1<<<grid_weights1, threads_weights1>>>(dev_norm, dev_df, dev_df_avg, Ng);
dev_df_avg.set(0.f);
weights_differential2<<<grid_weights2, threads_weights2>>>(dev_weights, dev_total_weight, dev_norm, Ng);
average_differential<<<grid_propagation, threads_propagation>>>(dev_df_avg, dev_df, dev_weights, dev_total_weight, Ng);
float scale = omega * (alpha + (beta / pow(iter+1, gamma)));
// update f and f_minus_fo
update_field<<<grid_field, threads_field>>>(dev_f, dev_df_avg, dev_f_minus_fo, dev_fo, scale, Ng);
// error calculation
// copy from device to host
copy(f_minus_fo, dev_f_minus_fo);
curr_epsilon = norm(f_minus_fo, NX, NY) / norm(fo, NX, NY) * 100.f;
float current_t = (float)(clock()-ti) / CLOCKS_PER_SEC;
if (abs(file_epsilon - curr_epsilon) > 0.2f) {
convergence_file << curr_epsilon << " ";
time_file << current_t << " ";
file_epsilon = curr_epsilon;
}
cout << setw(w_iter) << iter << " "
<< setw(w_eps) << curr_epsilon << " "
<< setw(w_diff) << prev_epsilon - curr_epsilon << " "
<< scale << " \n";
// stop if reached target epsilon
if (curr_epsilon <= target_epsilon) {
cerr << "reached target epsilon = " << target_epsilon << ", at iter = " << iter << ", epsilon = " << curr_epsilon << "\n\n";
break;
}
// stop if epsilon diverges
if (curr_epsilon > prev_epsilon ||
std::isnan(curr_epsilon)) {
cerr << "diverged at iter = " << iter << ", epsilon = " << curr_epsilon << "\n\n";
break;
}
// update prev_epsilon
prev_epsilon = curr_epsilon;
}
cudaDeviceSynchronize();
int tf = clock();
cout << endl;
cerr << "time (s): " << (float)(tf - ti) / CLOCKS_PER_SEC << "\n";
// copy from device to host
copy(f, dev_f);
string f_name = "sirt-norm-" + to_string(group_size) + "-" + basename + ".txt";
/* cerr << "writing to '" << f_name << "'...\n\n"; */
ofstream f_out(f_name);
write(f_out, f);
/* size_t free, total; */
/* cudaMemGetInfo(&free, &total); */
/* cerr << "used mem: " << float(total - free) / (1024 * 1024) << " MB\n" */
/* << "free mem: " << float(free) / (1024 * 1024) << " MB\n" */
/* << "total mem: " << float(total) / (1024 * 1024) << " MB\n\n"; */
}
float norm(host_ptr<float> A, int nx, int ny)
{
float sum = 0;
for (int j = 0; j < ny; ++j)
for (int i = 0; i < nx; ++i)
sum += A(i, j) * A(i, j);
return sqrtf(sum);
}
void Position_Transducers(host_ptr<int> ii, host_ptr<int> jj, int num)
{
//returns the (x,y) coordinates of the number of total transducers
int p = 0;
for(p = 0; p < 160; p++)
{
ii(p) = 21 + (p + 1);
jj(p) = 181;
}
for(p = 160; p < 320; p++)
{
ii(p) = 181;
jj(p) = 181 - ((p + 1) - 160);
}
for(p = 320; p < 480; p++)
{
ii(p) = 181 - ((p + 1) - 320);
jj(p) = 21;
}
for(p = 480; p < num; p++)
{
ii(p) = 21;
jj(p) = 21 + ((p + 1) - 480);
}
}
/**********DEVICE FUNCTION DEFINITIONS***********/
__global__ void propagation(
kernel_ptr<int> const ii,
kernel_ptr<int> const jj,
kernel_ptr<float> const f,
kernel_ptr<float> u,
int k, int group_size, int Ng)
{
// Map from threadIdx / BlockIdx to pixel position
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int g = threadIdx.z + blockIdx.z * blockDim.z;
if(i < NX && j < NY && g < Ng) {
float v = 1500.f * sqrtf(1.f + f(i, j));
float r = v * DT / HX;
float s = 2.f - 4.f * r * r;
float val; // will hold new u at (i, j, k + 1)
// not at boundary
if (i != 0 && i != NX - 1 && j != 0 && j != NY - 1) {
val =
r * r *
(u(i+1, j, k, g) +
u(i-1, j, k, g) +
u(i, j-1, k, g) +
u(i, j+1, k, g)) +
s * u(i, j, k, g) -
u(i, j, k-1, g);
int p = g * group_size;
int jp1 = jj(p);
int jp2 = jj(p + group_size - 1);
int ip1 = ii(p);
int ip2 = ii(p + group_size - 1);
minmax(jp1, jp2);
minmax(ip1, ip2);
// at sensor, k <= 24
if (j + 1 >= jp1 && j + 1 <= jp2 && i + 1 >= ip1 && i + 1 <= ip2 && k + 1 <= 24) {
float t = k * DT - TT;
// add wave value
val +=
v * v * DT * DT *
cosf(OMEGAC * t) *
expf(-(t * t) / (2.f * TAO * TAO));
}
}
// at boundary
else {
// boundary booleans
bool top = (j == 0);
bool bottom = (j == NY - 1);
bool left = (i == 0);
bool right = (i == NX - 1);
// index variables for different boundary cases
int ja = top ? (j + 1) : bottom ? (j - 1) : j;
int jb = top ? (j + 2) : bottom ? (j - 2) : j;
int ia = left ? (i + 1) : right ? (i - 1) : i;
int ib = left ? (i + 2) : right ? (i - 2) : i;
val =
(2.f - 2.f * r - r * r) * u(i, j, k, g) +
2.f * r * (1.f + r) * u(ia, ja, k, g) -
r * r * u(ib, jb, k, g) +
(2.f * r - 1.f) * u(i, j, k-1, g) -
2.f * r * u(ia, ja, k-1, g);
}
u(i, j, k+1, g) = val;
}
}
__global__ void propagation_at_corners(
kernel_ptr<float> u,
int Ng)
{
int k = threadIdx.x + blockIdx.x * blockDim.x;
int g = threadIdx.y + blockIdx.y * blockDim.y;
if (k < NT && g < Ng) {
u(0, 0, k, g) =
1.f / 2.f * (u(0, 1, k, g) + u(1, 0, k, g));
u(NX-1, 0, k, g) =
1.f / 2.f * (u(NX-2, 0, k, g) + u(NX-1, 1, k, g));
u(0, NY-1, k, g) =
1.f / 2.f * (u(0, NY-2, k, g) + u(1, NY-1, k, g));
u(NX-1, NY-1, k, g) =
1.f / 2.f * (u(NX-2, NY-1, k, g) + u(NX-1, NY-2, k, g));
}
}
__global__ void difference_signal(
kernel_ptr<float> const u,
kernel_ptr<float> const g_bottom,
kernel_ptr<float> const g_right,
kernel_ptr<float> const g_top,
kernel_ptr<float> const g_left,
kernel_ptr<float> rr_bottom,
kernel_ptr<float> rr_right,
kernel_ptr<float> rr_top,
kernel_ptr<float> rr_left,
int Ng)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int k = threadIdx.y + blockIdx.y * blockDim.y;
int g = threadIdx.z + blockIdx.z * blockDim.z;
if (i > 20 && i < 180 && k > 1 && k < NT && g < Ng) {
// store difference at time k of original signal
// and current signal at bottom sensor row
rr_bottom(i, k, g) = g_bottom(i, k, g) - u(i, 180, k, g);
// store difference at time k of original signal
// and current signal at top sensor row
rr_top(i, k, g) = g_top(i, k, g) - u(i, 20, k, g);
// store difference at time k of original signal
// and current signal at right sensor column
rr_right(i, k, g) = g_right(i, k, g) - u(180, i, k, g);
// store difference at time k of original signal
// and current signal at left sensor column
rr_left(i, k, g) = g_left(i, k, g) - u(20, i, k, g);
}
}
__global__ void backpropagation1(
kernel_ptr<float> z,
kernel_ptr<float> const f,
int k, int Ng)
{
// Map from threadIdx / BlockIdx to pixel position
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int g = threadIdx.z + blockIdx.z * blockDim.z;
if(i >= 1 && i < (NX - 1) && j >= 1 && j < (NY - 1) && g < Ng)
{
z(i, j, k, g) =
1500.f * 1500.f * (DT * DT) *
((1.f + f(i, j-1)) * z(i, j-1, k+1, g) +
(1.f + f(i, j+1)) * z(i, j+1, k+1, g) +
(1.f + f(i-1, j)) * z(i-1, j, k+1, g) +
(1.f + f(i+1, j)) * z(i+1, j, k+1, g) -
4.f * (1.f + f(i, j)) *
z(i, j, k+1, g)) / (H * H) +
2.f * z(i, j, k+1, g) -
z(i, j, k+2, g);
}
}
__global__ void backpropagation2(
kernel_ptr<float> z,
kernel_ptr<float> const rr_bottom,
kernel_ptr<float> const rr_right,
kernel_ptr<float> const rr_top,
kernel_ptr<float> const rr_left,
int k, int Ng)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int g = threadIdx.y + blockIdx.y * blockDim.y;
if (g < Ng) {
if(i >= 21 && i < 180) {
z(i, 180, k, g) =
z(i, 179, k, g) +
rr_bottom(i, k, g) * H * 1000.f;
z(i, 20, k, g) =
z(i, 21, k, g) +
rr_top(i, k, g) * H * 1000.f;
z(180, i, k, g) =
z(179, i, k, g) +
rr_right(i, k, g) * H * 1000.f;
z(20, i, k, g) =
z(21, i, k, g) +
rr_left(i, k, g) * H * 1000.f;
}
if (i >= 1 && i < (NX - 1)) {
z(i, 0, k, g) =
z(i, 1, k, g);
z(i, NY-1, k, g) =
z(i, NY-2, k, g);
z(0, i, k, g) =
z(1, i, k, g);
z(NX-1, i, k, g) =
z(NX-2, i, k, g);
}
else if (i == 0) {
z(0, 0, k, g) =
(z(1, 0, k, g) +
z(0, 1, k, g)) / 2.f;
z(NX-1, 0, k, g) =
(z(NX-2, 0, k, g) +
z(NX-1, 1, k, g)) / 2.f;
z(0, NY-1, k, g) =
(z(1, NY-1, k, g) +
z(0, NY-2, k, g)) / 2.f;
z(NX-1, NY-1, k, g) =
(z(NX-2, NY-1, k, g) +
z(NX-1, NY-2, k, g)) / 2.f;
}
}
}
__global__ void laplace(
kernel_ptr<float> const u,
kernel_ptr<float> Lu,
int Ng)
{
// Map from threadIdx / BlockIdx to pixel position
int tx = threadIdx.x + blockIdx.x * blockDim.x;
int k = threadIdx.y + blockIdx.y * blockDim.y;
int g = threadIdx.z + blockIdx.z * blockDim.z;
if (tx < (NX * NY) && (k + 1) < NT && g < Ng) {
int i = tx % NX;
int j = tx / NX;
int ja = (j > 0) ? (j - 1) : j;
int jb = (j < NY - 1) ? (j + 1) : j;
int ia = (i > 0) ? (i - 1) : i;
int ib = (i < NX - 1) ? (i + 1) : i;
Lu(i, j, k+1, g) =
(u(i, ja, k+1, g) +
u(i, jb, k+1, g) +
u(ia, j, k+1, g) +
u(ib, j, k+1, g) -
4.f * u(i, j, k+1, g)) / (H * H);
}
}
__global__ void laplace_corners(
kernel_ptr<float> const u,
kernel_ptr<float> Lu,
int Ng)
{
int k = threadIdx.x + blockIdx.x * blockDim.x;
int g = threadIdx.y + blockIdx.y * blockDim.y;
if ((k + 1) < NT && g < Ng) {
Lu(0, 0, k+1, g) =
(Lu(1, 0, k+1, g) +
Lu(0, 1, k+1, g)) / 2.f;
Lu(NX-1, 0, k+1, g) =
(Lu(NX-2, 0, k+1, g) +
Lu(NX-1, 1, k+1, g)) / 2.f;
Lu(0, NY-1, k+1, g) =
(Lu(1, NY-1, k+1, g) +
Lu(0, NY-2, k+1, g)) / 2.f;
Lu(NX-1, NY-1, k+1, g) =
(Lu(NX-2, NY-1, k+1, g) +
Lu(NX-1, NY-2, k+1, g)) / 2.f;
}
}
__global__ void update_differential(
kernel_ptr<float> df,
kernel_ptr<float> df_avg,
kernel_ptr<float> const z,
kernel_ptr<float> const Lu,
kernel_ptr<float> const f,
int Ng)
{
// Map from threadIdx / BlockIdx to pixel position
int tx = threadIdx.x + blockIdx.x * blockDim.x;
int k = threadIdx.y + blockIdx.y * blockDim.y;
int g = threadIdx.z + blockIdx.z * blockDim.z;
if (tx < (NX * NY) && (k + 1) < NT && g < Ng) {
int i = tx % NX;
int j = tx / NX;
float val =
z(i, j, k+1, g) *
Lu(i, j, k+1, g) /
(1.f + f(i, j));
atomicAdd(&df(i, j, g), val);
atomicAdd(&df_avg(i, j), val);
}
}
__global__ void update_field(
kernel_ptr<float> f,
kernel_ptr<float> const df_avg,
kernel_ptr<float> f_minus_fo,
kernel_ptr<float> const fo,
float scale,
int Ng)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < NX && j < NY)
{
bool in_sensor_field = (i >= 21) && (i < 180) && (j >= 21) && (j < 180);
if (in_sensor_field)
f(i, j) += scale * df_avg(i, j);
f_minus_fo(i, j) = f(i, j) - fo(i, j);
}
}
/**********INLINE FUNCTION DEFINITIONS**********/
inline int grid_size(int n, int threads)
{
return ceil(float(n) / threads);
}
// POST-CONDITION: a <= b
template <typename T>
__host__ __device__
void minmax(T &a, T &b)
{
if (a > b) {
int t = a;
a = b;
b = t;
}
}
|
176b730ec33dd423ce01bb43a60cf8a2a48b2a03.hip | // !!! This is a file automatically generated by hipify!!!
#include <assert.h>
#include <cstdio>
#include <cstdlib>
#include <cmath>
#include <cstring>
#include <ctime>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <float.h>
#include <cmath>
/*
* This macro checks for API errors in the CUDA calls.
*/
#define gpuErrchk(ans) { gpuAssert( (ans), __FILE__, __LINE__ ); }
inline void
gpuAssert( hipError_t code, const char * file, int line, bool abort = true )
{
if ( hipSuccess != code )
{
fprintf( stderr, "\nGPUassert: %s %s %d\n", hipGetErrorString( code ), file, line );
if ( abort )
exit( code );
}
return;
} /* gpuAssert */
/* ========================================================================== */
/* Voronoi2D */
/* -------------------------------------------------------------------------- */
/*!
* @function Voronoi2D
*
* @abstract
*
* @discussion Calculates Voronoi cells
*
* @param inNbOfSites [input] The number of the sites (seeds).
* type: const size_t
*
* @param inWidth [input] The width of the Voronoi image.
* type: const size_t
*
* @param inHeight [input] The height of the Voronoi image.
* type: const size_t
*
* @param inX [input] The x coordinates of the points
* Dimensions : Nx , type: float
*
* @param inY [input] The y coordinates of the points
* Dimensions : Ny , type: float
*
* @param inV [input] The inV holds for applying a threshold/color
* to the cell region
* Dimensions : inNbOfSites, type: int
*
* @param ouVoronoi [output] The output data (pixels)
* Dimensions : The total number of threads in the grid
* ( theBlocksPerGridX * theBlocksPerGridY * theThreadsPerBlockX * theThreadsPerBlockY )
* type: float
*/
/* ========================================================================== */
__global__ void Voronoi2D(
const size_t inNbOfSites,
const size_t inWidth,
const size_t inHeight,
float * const inX,
float * const inY,
int * const inV,
int * const ouVoronoi )
{
float distX , distY;
float theTempDistance ,theDistance = FLT_MAX;
int theThreshold;
//loop through all points calculating distance
for ( int y = ( ( blockIdx.y * blockDim.y ) + threadIdx.y ); y < inHeight; y += blockDim.y * gridDim.y )
{
for ( int x = ( ( blockIdx.x * blockDim.x ) + threadIdx.x ); x < inWidth; x += blockDim.x * gridDim.x )
{
int theGlobalIdx = y * ( blockDim.x * gridDim.x ) + x;
//Calculate distances for all the points
for ( int i = 0; i < inNbOfSites; i++ )
{
distX = inX[ i ] - x;
distY = inY[ i ] - y;
theTempDistance = distX * distX + distY * distY;
//if this Point is closer , assign proper threshold
if ( theTempDistance < theDistance )
{
theDistance = theTempDistance;
theThreshold = inV[ i ];
}
}
//write result back to global memory
*( ouVoronoi + theGlobalIdx ) = theThreshold;
} /* x */
} /* y */
}
int main()
{
const size_t Width = 256 , Height = 256;
const size_t Nx = 128 , Ny = 128;
const size_t NbOfSites = 100; //should be <= Nx and Ny
const size_t ThreadsPerBlockX = 16 , ThreadsPerBlockY = 16 ,BlocksPerGridX = Width / 16 , BlocksPerGridY = Height / 16;
const size_t TotalNbOfPixels = ( Width * Height );
// Allocate host memory
float * X = (float*) malloc( Nx * sizeof (*X) );
assert( NULL != X );
float * Y = (float*) malloc( Ny * sizeof (*Y) );
assert( NULL != Y );
int * V = (int*) malloc( NbOfSites * sizeof (*V) );
assert( NULL != V );
int * VoronoiDiagram = (int*) malloc ( TotalNbOfPixels * sizeof(*VoronoiDiagram) );
assert( NULL != VoronoiDiagram );
float * devX , * devY;
int * devVoronoiDiagram , * devV;
// Allocate device memory
gpuErrchk( hipMalloc( (void**) &devX, Nx * sizeof(*devX) ) );
gpuErrchk( hipMalloc( (void**) &devY, Ny * sizeof(*devY) ) );
gpuErrchk( hipMalloc( (void**) &devV, NbOfSites * sizeof(*devV) ) );
gpuErrchk( hipMalloc( (void**) &devVoronoiDiagram, TotalNbOfPixels * sizeof(*devVoronoiDiagram) ) );
// Create random coordinates
srand((unsigned int)time(NULL));
for ( int i = 0; i < Nx; i++ ) X[ i ] = ( ( (float) rand() / (float) ( RAND_MAX ) ) * Width );
for ( int i = 0; i < Ny; i++ ) Y[ i ] = ( ( (float) rand() / (float) ( RAND_MAX ) ) * Height );
for ( int i = 0; i < NbOfSites; i++ ) V[ i ] = i;
// Define grid dimensions
dim3 BlocksDim ( BlocksPerGridX , BlocksPerGridY );
dim3 ThreadsPerBlock ( ThreadsPerBlockX , ThreadsPerBlockY );
gpuErrchk( hipMemcpy( devV , V , NbOfSites * sizeof( *V ), hipMemcpyHostToDevice ) );
gpuErrchk( hipMemcpy( devX , X , Nx * sizeof( *X ), hipMemcpyHostToDevice ) );
gpuErrchk( hipMemcpy( devY , Y , Ny * sizeof( *Y ), hipMemcpyHostToDevice ) );
hipEvent_t CurrentEventPre,
CurrentEventPost;
float CurrentPostPreTimeMS;
gpuErrchk( hipEventCreate( &CurrentEventPre ) );
gpuErrchk( hipEventCreate( &CurrentEventPost ) );
gpuErrchk( hipEventRecord( CurrentEventPre ) );
hipLaunchKernelGGL(( Voronoi2D), dim3(BlocksDim),dim3(ThreadsPerBlock) , 0, 0, NbOfSites,
Width,
Height,
devX,
devY,
devV,
devVoronoiDiagram );
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
gpuErrchk( hipEventRecord( CurrentEventPost ) );
gpuErrchk( hipEventSynchronize( CurrentEventPost ) );
gpuErrchk( hipEventElapsedTime( &CurrentPostPreTimeMS, CurrentEventPre, CurrentEventPost ) );
printf( "\nGPU time for calling Voronoi: %f ms\n", CurrentPostPreTimeMS );
gpuErrchk( hipMemcpy( VoronoiDiagram,
devVoronoiDiagram ,
TotalNbOfPixels * sizeof(*devVoronoiDiagram), hipMemcpyDeviceToHost ) );
{
FILE * theFile;
theFile = fopen( "Voronoi2D", "wb" );
assert( NULL != theFile );
assert( TotalNbOfPixels == fwrite( VoronoiDiagram , sizeof(*devVoronoiDiagram), TotalNbOfPixels , theFile ) );
fclose( theFile );
}
//free memory
gpuErrchk( hipFree( devX ) );
gpuErrchk( hipFree( devY ) );
gpuErrchk( hipFree( devV ) );
gpuErrchk( hipFree( devVoronoiDiagram ) );
free( X );
free( Y );
free( V );
free( VoronoiDiagram );
return 0;
}
| 176b730ec33dd423ce01bb43a60cf8a2a48b2a03.cu | #include <assert.h>
#include <cstdio>
#include <cstdlib>
#include <cmath>
#include <cstring>
#include <ctime>
#include <sys/time.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <float.h>
#include <cmath>
/*
* This macro checks for API errors in the CUDA calls.
*/
#define gpuErrchk(ans) { gpuAssert( (ans), __FILE__, __LINE__ ); }
inline void
gpuAssert( cudaError_t code, const char * file, int line, bool abort = true )
{
if ( cudaSuccess != code )
{
fprintf( stderr, "\nGPUassert: %s %s %d\n", cudaGetErrorString( code ), file, line );
if ( abort )
exit( code );
}
return;
} /* gpuAssert */
/* ========================================================================== */
/* Voronoi2D */
/* -------------------------------------------------------------------------- */
/*!
* @function Voronoi2D
*
* @abstract
*
* @discussion Calculates Voronoi cells
*
* @param inNbOfSites [input] The number of the sites (seeds).
* type: const size_t
*
* @param inWidth [input] The width of the Voronoi image.
* type: const size_t
*
* @param inHeight [input] The height of the Voronoi image.
* type: const size_t
*
* @param inX [input] The x coordinates of the points
* Dimensions : Nx , type: float
*
* @param inY [input] The y coordinates of the points
* Dimensions : Ny , type: float
*
* @param inV [input] The inV holds for applying a threshold/color
* to the cell region
* Dimensions : inNbOfSites, type: int
*
* @param ouVoronoi [output] The output data (pixels)
* Dimensions : The total number of threads in the grid
* ( theBlocksPerGridX * theBlocksPerGridY * theThreadsPerBlockX * theThreadsPerBlockY )
* type: float
*/
/* ========================================================================== */
__global__ void Voronoi2D(
const size_t inNbOfSites,
const size_t inWidth,
const size_t inHeight,
float * const inX,
float * const inY,
int * const inV,
int * const ouVoronoi )
{
float distX , distY;
float theTempDistance ,theDistance = FLT_MAX;
int theThreshold;
//loop through all points calculating distance
for ( int y = ( ( blockIdx.y * blockDim.y ) + threadIdx.y ); y < inHeight; y += blockDim.y * gridDim.y )
{
for ( int x = ( ( blockIdx.x * blockDim.x ) + threadIdx.x ); x < inWidth; x += blockDim.x * gridDim.x )
{
int theGlobalIdx = y * ( blockDim.x * gridDim.x ) + x;
//Calculate distances for all the points
for ( int i = 0; i < inNbOfSites; i++ )
{
distX = inX[ i ] - x;
distY = inY[ i ] - y;
theTempDistance = distX * distX + distY * distY;
//if this Point is closer , assign proper threshold
if ( theTempDistance < theDistance )
{
theDistance = theTempDistance;
theThreshold = inV[ i ];
}
}
//write result back to global memory
*( ouVoronoi + theGlobalIdx ) = theThreshold;
} /* x */
} /* y */
}
int main()
{
const size_t Width = 256 , Height = 256;
const size_t Nx = 128 , Ny = 128;
const size_t NbOfSites = 100; //should be <= Nx and Ny
const size_t ThreadsPerBlockX = 16 , ThreadsPerBlockY = 16 ,BlocksPerGridX = Width / 16 , BlocksPerGridY = Height / 16;
const size_t TotalNbOfPixels = ( Width * Height );
// Allocate host memory
float * X = (float*) malloc( Nx * sizeof (*X) );
assert( NULL != X );
float * Y = (float*) malloc( Ny * sizeof (*Y) );
assert( NULL != Y );
int * V = (int*) malloc( NbOfSites * sizeof (*V) );
assert( NULL != V );
int * VoronoiDiagram = (int*) malloc ( TotalNbOfPixels * sizeof(*VoronoiDiagram) );
assert( NULL != VoronoiDiagram );
float * devX , * devY;
int * devVoronoiDiagram , * devV;
// Allocate device memory
gpuErrchk( cudaMalloc( (void**) &devX, Nx * sizeof(*devX) ) );
gpuErrchk( cudaMalloc( (void**) &devY, Ny * sizeof(*devY) ) );
gpuErrchk( cudaMalloc( (void**) &devV, NbOfSites * sizeof(*devV) ) );
gpuErrchk( cudaMalloc( (void**) &devVoronoiDiagram, TotalNbOfPixels * sizeof(*devVoronoiDiagram) ) );
// Create random coordinates
srand((unsigned int)time(NULL));
for ( int i = 0; i < Nx; i++ ) X[ i ] = ( ( (float) rand() / (float) ( RAND_MAX ) ) * Width );
for ( int i = 0; i < Ny; i++ ) Y[ i ] = ( ( (float) rand() / (float) ( RAND_MAX ) ) * Height );
for ( int i = 0; i < NbOfSites; i++ ) V[ i ] = i;
// Define grid dimensions
dim3 BlocksDim ( BlocksPerGridX , BlocksPerGridY );
dim3 ThreadsPerBlock ( ThreadsPerBlockX , ThreadsPerBlockY );
gpuErrchk( cudaMemcpy( devV , V , NbOfSites * sizeof( *V ), cudaMemcpyHostToDevice ) );
gpuErrchk( cudaMemcpy( devX , X , Nx * sizeof( *X ), cudaMemcpyHostToDevice ) );
gpuErrchk( cudaMemcpy( devY , Y , Ny * sizeof( *Y ), cudaMemcpyHostToDevice ) );
cudaEvent_t CurrentEventPre,
CurrentEventPost;
float CurrentPostPreTimeMS;
gpuErrchk( cudaEventCreate( &CurrentEventPre ) );
gpuErrchk( cudaEventCreate( &CurrentEventPost ) );
gpuErrchk( cudaEventRecord( CurrentEventPre ) );
Voronoi2D<<< BlocksDim,ThreadsPerBlock >>>( NbOfSites,
Width,
Height,
devX,
devY,
devV,
devVoronoiDiagram );
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
gpuErrchk( cudaEventRecord( CurrentEventPost ) );
gpuErrchk( cudaEventSynchronize( CurrentEventPost ) );
gpuErrchk( cudaEventElapsedTime( &CurrentPostPreTimeMS, CurrentEventPre, CurrentEventPost ) );
printf( "\nGPU time for calling Voronoi: %f ms\n", CurrentPostPreTimeMS );
gpuErrchk( cudaMemcpy( VoronoiDiagram,
devVoronoiDiagram ,
TotalNbOfPixels * sizeof(*devVoronoiDiagram), cudaMemcpyDeviceToHost ) );
{
FILE * theFile;
theFile = fopen( "Voronoi2D", "wb" );
assert( NULL != theFile );
assert( TotalNbOfPixels == fwrite( VoronoiDiagram , sizeof(*devVoronoiDiagram), TotalNbOfPixels , theFile ) );
fclose( theFile );
}
//free memory
gpuErrchk( cudaFree( devX ) );
gpuErrchk( cudaFree( devY ) );
gpuErrchk( cudaFree( devV ) );
gpuErrchk( cudaFree( devVoronoiDiagram ) );
free( X );
free( Y );
free( V );
free( VoronoiDiagram );
return 0;
}
|
9a00d443661e9039aded739bcd5127a227cb749f.hip | // !!! This is a file automatically generated by hipify!!!
#ifdef __HIPCC__
#define CUDA_CONST_VAR __device__
#endif
# include "PixelQuintuplet.cuh"
#include "allocate.h"
SDL::pixelQuintuplets::pixelQuintuplets()
{
pixelIndices = nullptr;
T5Indices = nullptr;
nPixelQuintuplets = nullptr;
isDup = nullptr;
score = nullptr;
}
SDL::pixelQuintuplets::~pixelQuintuplets()
{
}
void SDL::pixelQuintuplets::freeMemory()
{
hipFree(pixelIndices);
hipFree(T5Indices);
hipFree(nPixelQuintuplets);
hipFree(isDup);
hipFree(score);
}
void SDL::createPixelQuintupletsInUnifiedMemory(struct SDL::pixelQuintuplets& pixelQuintupletsInGPU, unsigned int maxPixelQuintuplets)
{
hipMallocManaged(&pixelQuintupletsInGPU.pixelIndices, maxPixelQuintuplets * sizeof(unsigned int));
hipMallocManaged(&pixelQuintupletsInGPU.T5Indices, maxPixelQuintuplets * sizeof(unsigned int));
hipMallocManaged(&pixelQuintupletsInGPU.nPixelQuintuplets, sizeof(unsigned int));
hipMallocManaged(&pixelQuintupletsInGPU.isDup, maxPixelQuintuplets * sizeof(bool));
hipMallocManaged(&pixelQuintupletsInGPU.score, maxPixelQuintuplets * sizeof(float));
#ifdef CUT_VALUE_DEBUG
hipMallocManaged(&pixelQuintupletsInGPU.rzChiSquared, maxPixelQuintuplets * sizeof(unsigned int));
hipMallocManaged(&pixelQuintupletsInGPU.rPhiChiSquared, maxPixelQuintuplets * sizeof(unsigned int));
hipMallocManaged(&pixelQuintupletsInGPU.rPhiChiSquaredInwards, maxPixelQuintuplets * sizeof(unsigned int));
#endif
hipMemset(pixelQuintupletsInGPU.nPixelQuintuplets, 0, sizeof(unsigned int));
}
void SDL::createPixelQuintupletsInExplicitMemory(struct SDL::pixelQuintuplets& pixelQuintupletsInGPU, unsigned int maxPixelQuintuplets)
{
hipMalloc(&pixelQuintupletsInGPU.pixelIndices, maxPixelQuintuplets * sizeof(unsigned int));
hipMalloc(&pixelQuintupletsInGPU.T5Indices, maxPixelQuintuplets * sizeof(unsigned int));
hipMalloc(&pixelQuintupletsInGPU.nPixelQuintuplets, sizeof(unsigned int));
hipMalloc(&pixelQuintupletsInGPU.isDup, maxPixelQuintuplets * sizeof(bool));
hipMalloc(&pixelQuintupletsInGPU.score, maxPixelQuintuplets * sizeof(float));
hipMemset(pixelQuintupletsInGPU.nPixelQuintuplets, 0, sizeof(unsigned int));
}
__device__ void SDL::rmPixelQuintupletToMemory(struct pixelQuintuplets& pixelQuintupletsInGPU, unsigned int pixelQuintupletIndex)
{
pixelQuintupletsInGPU.isDup[pixelQuintupletIndex] = 1;
}
#ifdef CUT_VALUE_DEBUG
__device__ void SDL::addPixelQuintupletToMemory(struct pixelQuintuplets& pixelQuintupletsInGPU, unsigned int pixelIndex, unsigned int T5Index, unsigned int pixelQuintupletIndex, float& rzChiSquared, float& rPhiChiSquared, float& rPhiChiSquaredInwards, float score)
#else
__device__ void SDL::addPixelQuintupletToMemory(struct pixelQuintuplets& pixelQuintupletsInGPU, unsigned int pixelIndex, unsigned int T5Index, unsigned int pixelQuintupletIndex, float score)
#endif
{
pixelQuintupletsInGPU.pixelIndices[pixelQuintupletIndex] = pixelIndex;
pixelQuintupletsInGPU.T5Indices[pixelQuintupletIndex] = T5Index;
pixelQuintupletsInGPU.isDup[pixelQuintupletIndex] = 0;
pixelQuintupletsInGPU.score[pixelQuintupletIndex] = score;
#ifdef CUT_VALUE_DEBUG
pixelQuintupletsInGPU.rzChiSquared[pixelQuintupletIndex] = rzChiSquared;
pixelQuintupletsInGPU.rPhiChiSquared[pixelQuintupletIndex] = rPhiChiSquared;
pixelQuintupletsInGPU.rPhiChiSquaredInwards[pixelQuintupletIndex] = rPhiChiSquaredInwards;
#endif
}
__device__ bool SDL::runPixelQuintupletDefaultAlgo(struct modules& modulesInGPU, struct hits& hitsInGPU, struct miniDoublets& mdsInGPU, struct segments& segmentsInGPU, struct triplets& tripletsInGPU, struct quintuplets& quintupletsInGPU, unsigned int& pixelSegmentIndex, unsigned int& quintupletIndex, float& rzChiSquared, float& rPhiChiSquared, float& rPhiChiSquaredInwards)
{
bool pass = true;
unsigned int pixelModuleIndex = segmentsInGPU.innerLowerModuleIndices[pixelSegmentIndex];
unsigned int pixelSegmentArrayIndex = pixelSegmentIndex - (600 * pixelModuleIndex);
unsigned int T5InnerT3Index = quintupletsInGPU.tripletIndices[2 * quintupletIndex];
unsigned int T5OuterT3Index = quintupletsInGPU.tripletIndices[2 * quintupletIndex + 1];
unsigned int firstSegmentIndex = tripletsInGPU.segmentIndices[2 * T5InnerT3Index];
unsigned int secondSegmentIndex = tripletsInGPU.segmentIndices[2 * T5InnerT3Index + 1];
unsigned int thirdSegmentIndex = tripletsInGPU.segmentIndices[2 * T5OuterT3Index];
unsigned int fourthSegmentIndex = tripletsInGPU.segmentIndices[2 * T5OuterT3Index + 1];
unsigned int pixelInnerMDIndex = segmentsInGPU.mdIndices[2 * pixelSegmentIndex];
unsigned int pixelOuterMDIndex = segmentsInGPU.mdIndices[2 * pixelSegmentIndex + 1];
unsigned int pixelAnchorHitIndex1 = mdsInGPU.hitIndices[2 * pixelInnerMDIndex];
unsigned int pixelNonAnchorHitIndex1 = mdsInGPU.hitIndices[2 * pixelInnerMDIndex + 1];
unsigned int pixelAnchorHitIndex2 = mdsInGPU.hitIndices[2 * pixelOuterMDIndex];
unsigned int pixelNonAnchorHitIndex2 = mdsInGPU.hitIndices[2 * pixelOuterMDIndex + 1];
unsigned int anchorHitIndex1 = segmentsInGPU.innerMiniDoubletAnchorHitIndices[firstSegmentIndex];
unsigned int anchorHitIndex2 = segmentsInGPU.outerMiniDoubletAnchorHitIndices[firstSegmentIndex]; //same as second segment inner MD anchorhit index
unsigned int anchorHitIndex3 = segmentsInGPU.outerMiniDoubletAnchorHitIndices[secondSegmentIndex]; //same as third segment inner MD anchor hit index
unsigned int anchorHitIndex4 = segmentsInGPU.outerMiniDoubletAnchorHitIndices[thirdSegmentIndex]; //same as fourth segment inner MD anchor hit index
unsigned int anchorHitIndex5 = segmentsInGPU.outerMiniDoubletAnchorHitIndices[fourthSegmentIndex];
unsigned int lowerModuleIndex1 = quintupletsInGPU.lowerModuleIndices[5 * quintupletIndex];
unsigned int lowerModuleIndex2 = quintupletsInGPU.lowerModuleIndices[5 * quintupletIndex + 1];
unsigned int lowerModuleIndex3 = quintupletsInGPU.lowerModuleIndices[5 * quintupletIndex + 2];
unsigned int lowerModuleIndex4 = quintupletsInGPU.lowerModuleIndices[5 * quintupletIndex + 3];
unsigned int lowerModuleIndex5 = quintupletsInGPU.lowerModuleIndices[5 * quintupletIndex + 4];
unsigned int lowerModuleIndices[] = {lowerModuleIndex1, lowerModuleIndex2, lowerModuleIndex3, lowerModuleIndex4, lowerModuleIndex5};
unsigned int anchorHits[] = {anchorHitIndex1, anchorHitIndex2, anchorHitIndex3, anchorHitIndex4, anchorHitIndex5};
unsigned int pixelHits[] = {pixelAnchorHitIndex1, pixelAnchorHitIndex2};
float pixelRadius, pixelRadiusError, tripletRadius, rPhiChiSquaredTemp;
pass = pass & runPixelTripletDefaultAlgo(modulesInGPU, hitsInGPU, mdsInGPU, segmentsInGPU, tripletsInGPU, pixelSegmentIndex, T5InnerT3Index, pixelRadius, pixelRadiusError, tripletRadius, rPhiChiSquaredTemp);
rzChiSquared = computePT5RZChiSquared(modulesInGPU, hitsInGPU, pixelAnchorHitIndex1, pixelAnchorHitIndex2, anchorHits, lowerModuleIndices);
rPhiChiSquared = computePT5RPhiChiSquared(modulesInGPU, hitsInGPU, segmentsInGPU, pixelSegmentArrayIndex, anchorHits, lowerModuleIndices);
rPhiChiSquaredInwards = computePT5RPhiChiSquaredInwards(modulesInGPU, hitsInGPU, quintupletsInGPU, quintupletIndex, pixelHits);
if(segmentsInGPU.circleRadius[pixelSegmentArrayIndex] < 5.0/(2 * k2Rinv1GeVf))
{
pass = pass & passPT5RZChiSquaredCuts(modulesInGPU, lowerModuleIndex1, lowerModuleIndex2, lowerModuleIndex3, lowerModuleIndex4, lowerModuleIndex5, rzChiSquared);
pass = pass & passPT5RPhiChiSquaredCuts(modulesInGPU, lowerModuleIndex1, lowerModuleIndex2, lowerModuleIndex3, lowerModuleIndex4, lowerModuleIndex5, rPhiChiSquared);
}
if(quintupletsInGPU.regressionRadius[quintupletIndex] < 5.0/(2 * k2Rinv1GeVf))
{
pass = pass & passPT5RPhiChiSquaredInwardsCuts(modulesInGPU, lowerModuleIndex1, lowerModuleIndex2, lowerModuleIndex3, lowerModuleIndex4, lowerModuleIndex5, rPhiChiSquaredInwards);
}
//other cuts will be filled here!
return pass;
}
__device__ bool SDL::passPT5RPhiChiSquaredCuts(struct modules& modulesInGPU, unsigned int lowerModuleIndex1, unsigned int lowerModuleIndex2, unsigned int lowerModuleIndex3, unsigned int lowerModuleIndex4, unsigned int lowerModuleIndex5, float rPhiChiSquared)
{
const int layer1 = modulesInGPU.layers[lowerModuleIndex1] + 6 * (modulesInGPU.subdets[lowerModuleIndex1] == SDL::Endcap) + 5 * (modulesInGPU.subdets[lowerModuleIndex1] == SDL::Endcap and modulesInGPU.moduleType[lowerModuleIndex1] == SDL::TwoS);
const int layer2 = modulesInGPU.layers[lowerModuleIndex2] + 6 * (modulesInGPU.subdets[lowerModuleIndex2] == SDL::Endcap) + 5 * (modulesInGPU.subdets[lowerModuleIndex2] == SDL::Endcap and modulesInGPU.moduleType[lowerModuleIndex2] == SDL::TwoS);
const int layer3 = modulesInGPU.layers[lowerModuleIndex3] + 6 * (modulesInGPU.subdets[lowerModuleIndex3] == SDL::Endcap) + 5 * (modulesInGPU.subdets[lowerModuleIndex3] == SDL::Endcap and modulesInGPU.moduleType[lowerModuleIndex3] == SDL::TwoS);
const int layer4 = modulesInGPU.layers[lowerModuleIndex4] + 6 * (modulesInGPU.subdets[lowerModuleIndex4] == SDL::Endcap) + 5 * (modulesInGPU.subdets[lowerModuleIndex4] == SDL::Endcap and modulesInGPU.moduleType[lowerModuleIndex4] == SDL::TwoS);
const int layer5 = modulesInGPU.layers[lowerModuleIndex5] + 6 * (modulesInGPU.subdets[lowerModuleIndex5] == SDL::Endcap) + 5 * (modulesInGPU.subdets[lowerModuleIndex5] == SDL::Endcap and modulesInGPU.moduleType[lowerModuleIndex5] == SDL::TwoS);
if(layer1 == 1 and layer2 == 2 and layer3 == 3)
{
if(layer4 == 12 and layer5 == 13)
{
return rPhiChiSquared < 48.921;
}
else if(layer4 == 4 and layer5 == 12)
{
return rPhiChiSquared < 97.948;
}
else if(layer4 == 4 and layer5 == 5)
{
return rPhiChiSquared < 129.3;
}
else if(layer4 == 7 and layer5 == 13)
{
return rPhiChiSquared < 56.21;
}
else if(layer4 == 7 and layer5 == 8)
{
return rPhiChiSquared < 74.198;
}
}
else if(layer1 == 1 and layer2 == 2 and layer3 == 7)
{
if(layer4 == 13 and layer5 == 14)
{
return rPhiChiSquared < 21.265;
}
else if(layer4 == 8 and layer5 == 14)
{
return rPhiChiSquared < 37.058;
}
else if(layer4 == 8 and layer5 == 9)
{
return rPhiChiSquared < 42.578;
}
}
else if(layer1 == 1 and layer2 == 7 and layer3 == 8)
{
if(layer4 == 9 and layer5 == 10)
{
return rPhiChiSquared < 32.253;
}
else if(layer4 == 9 and layer5 == 15)
{
return rPhiChiSquared < 37.058;
}
}
else if(layer1 == 2 and layer2 == 3 and layer3 == 4)
{
if(layer4 == 12 and layer5 == 13)
{
return rPhiChiSquared < 97.947;
}
else if(layer4 == 5 and layer5 == 12)
{
return rPhiChiSquared < 129.3;
}
else if(layer4 == 5 and layer5 == 6)
{
return rPhiChiSquared < 170.68;
}
}
else if(layer1 == 2 and layer2 == 3 and layer3 == 7)
{
if(layer4 == 13 and layer5 == 14)
{
return rPhiChiSquared < 48.92;
}
else if(layer4 == 8 and layer5 == 14)
{
return rPhiChiSquared < 74.2;
}
}
else if(layer1 == 2 and layer2 == 7 and layer3 == 8)
{
if(layer4 == 14 and layer5 == 15)
{
return rPhiChiSquared < 42.58;
}
else if(layer4 == 9 and layer5 == 10)
{
return rPhiChiSquared < 37.06;
}
else if(layer4 == 9 and layer5 == 15)
{
return rPhiChiSquared < 48.92;
}
}
else if(layer1 == 3 and layer2 == 7 and layer3 == 8 and layer4 == 14 and layer5 == 15)
{
return rPhiChiSquared < 85.25;
}
else if(layer1 == 7 and layer2 == 8 and layer3 == 9)
{
if(layer4 == 10 and layer5 == 11)
{
return rPhiChiSquared < 42.58;
}
else if(layer4 == 10 and layer5 == 16)
{
return rPhiChiSquared < 37.06;
}
else if(layer4 == 15 and layer5 == 16)
{
return rPhiChiSquared < 37.06;
}
}
return true;
}
__device__ bool SDL::passPT5RPhiChiSquaredInwardsCuts(struct modules& modulesInGPU, unsigned int lowerModuleIndex1, unsigned int lowerModuleIndex2, unsigned int lowerModuleIndex3, unsigned int lowerModuleIndex4, unsigned int lowerModuleIndex5, float rPhiChiSquared)
{
const int layer1 = modulesInGPU.layers[lowerModuleIndex1] + 6 * (modulesInGPU.subdets[lowerModuleIndex1] == SDL::Endcap) + 5 * (modulesInGPU.subdets[lowerModuleIndex1] == SDL::Endcap and modulesInGPU.moduleType[lowerModuleIndex1] == SDL::TwoS);
const int layer2 = modulesInGPU.layers[lowerModuleIndex2] + 6 * (modulesInGPU.subdets[lowerModuleIndex2] == SDL::Endcap) + 5 * (modulesInGPU.subdets[lowerModuleIndex2] == SDL::Endcap and modulesInGPU.moduleType[lowerModuleIndex2] == SDL::TwoS);
const int layer3 = modulesInGPU.layers[lowerModuleIndex3] + 6 * (modulesInGPU.subdets[lowerModuleIndex3] == SDL::Endcap) + 5 * (modulesInGPU.subdets[lowerModuleIndex3] == SDL::Endcap and modulesInGPU.moduleType[lowerModuleIndex3] == SDL::TwoS);
const int layer4 = modulesInGPU.layers[lowerModuleIndex4] + 6 * (modulesInGPU.subdets[lowerModuleIndex4] == SDL::Endcap) + 5 * (modulesInGPU.subdets[lowerModuleIndex4] == SDL::Endcap and modulesInGPU.moduleType[lowerModuleIndex4] == SDL::TwoS);
const int layer5 = modulesInGPU.layers[lowerModuleIndex5] + 6 * (modulesInGPU.subdets[lowerModuleIndex5] == SDL::Endcap) + 5 * (modulesInGPU.subdets[lowerModuleIndex5] == SDL::Endcap and modulesInGPU.moduleType[lowerModuleIndex5] == SDL::TwoS);
if(layer1 == 1 and layer2 == 2 and layer3 == 3)
{
if(layer4 == 12 and layer5 == 13)
{
return rPhiChiSquared < 451.141;
}
else if(layer4 == 4 and layer5 == 12)
{
return rPhiChiSquared < 786.173;
}
else if(layer4 == 4 and layer5 == 5)
{
return rPhiChiSquared < 595.545;
}
else if(layer4 == 7 and layer5 == 13)
{
return rPhiChiSquared < 581.339;
}
else if(layer4 == 7 and layer5 == 8)
{
return rPhiChiSquared < 112.537;
}
}
else if(layer1 == 1 and layer2 == 2 and layer3 == 7)
{
if(layer4 == 13 and layer5 == 14)
{
return rPhiChiSquared < 225.322;
}
else if(layer4 == 8 and layer5 == 14)
{
return rPhiChiSquared < 1192.402;
}
else if(layer4 == 8 and layer5 == 9)
{
return rPhiChiSquared < 786.173;
}
}
else if(layer1 == 1 and layer2 == 7 and layer3 == 8)
{
if(layer4 == 9 and layer5 == 10)
{
return rPhiChiSquared < 1037.817;
}
else if(layer4 == 9 and layer5 == 15)
{
return rPhiChiSquared < 1808.536;
}
}
else if(layer1 == 2 and layer2 == 3 and layer3 == 4)
{
if(layer4 == 12 and layer5 == 13)
{
return rPhiChiSquared < 684.253;
}
else if(layer4 == 5 and layer5 == 12)
{
return rPhiChiSquared < 684.253;
}
else if(layer4 == 5 and layer5 == 6)
{
return rPhiChiSquared < 684.253;
}
}
else if(layer1 == 2 and layer2 == 3 and layer3 == 7)
{
if(layer4 == 13 and layer5 == 14)
{
return rPhiChiSquared < 451.141;
}
else if(layer4 == 8 and layer5 == 14)
{
return rPhiChiSquared < 518.34;
}
}
else if(layer1 == 2 and layer2 == 7 and layer3 == 8)
{
if(layer4 == 14 and layer5 == 15)
{
return rPhiChiSquared < 2077.92;
}
else if(layer4 == 9 and layer5 == 10)
{
return rPhiChiSquared < 74.20;
}
else if(layer4 == 9 and layer5 == 15)
{
return rPhiChiSquared < 1808.536;
}
}
else if(layer1 == 3 and layer2 == 7 and layer3 == 8 and layer4 == 14 and layer5 == 15)
{
return rPhiChiSquared < 786.173;
}
else if(layer1 == 7 and layer2 == 8 and layer3 == 9)
{
if(layer4 == 10 and layer5 == 11)
{
return rPhiChiSquared < 1574.076;
}
else if(layer4 == 10 and layer5 == 16)
{
return rPhiChiSquared < 5492.11;
}
else if(layer4 == 15 and layer5 == 16)
{
return rPhiChiSquared < 2743.037;
}
}
return true;
}
__device__ float SDL::computePT5RPhiChiSquared(struct modules& modulesInGPU, struct hits& hitsInGPU, struct segments& segmentsInGPU, unsigned int pixelSegmentArrayIndex, unsigned int* anchorHits, unsigned int* lowerModuleIndices)
{
/*
Compute circle parameters from 3 pixel hits, and then use them to compute the chi squared for the outer hits
*/
float g = segmentsInGPU.circleCenterX[pixelSegmentArrayIndex];
float f = segmentsInGPU.circleCenterY[pixelSegmentArrayIndex];
float radius = segmentsInGPU.circleRadius[pixelSegmentArrayIndex];
float delta1[5], delta2[5], slopes[5];
bool isFlat[5];
float xs[5];
float ys[5];
float chiSquared = 0;
for(size_t i = 0; i < 5; i++)
{
xs[i] = hitsInGPU.xs[anchorHits[i]];
ys[i] = hitsInGPU.ys[anchorHits[i]];
}
computeSigmasForRegression(modulesInGPU, lowerModuleIndices, delta1, delta2, slopes, isFlat);
chiSquared = computeChiSquared(5, xs, ys, delta1, delta2, slopes, isFlat, g, f, radius);
return chiSquared;
}
__device__ bool SDL::passPT5RZChiSquaredCuts(struct modules& modulesInGPU, unsigned int lowerModuleIndex1, unsigned int lowerModuleIndex2, unsigned int lowerModuleIndex3, unsigned int lowerModuleIndex4, unsigned int lowerModuleIndex5, float& rzChiSquared)
{
const int layer1 = modulesInGPU.layers[lowerModuleIndex1] + 6 * (modulesInGPU.subdets[lowerModuleIndex1] == SDL::Endcap) + 5 * (modulesInGPU.subdets[lowerModuleIndex1] == SDL::Endcap and modulesInGPU.moduleType[lowerModuleIndex1] == SDL::TwoS);
const int layer2 = modulesInGPU.layers[lowerModuleIndex2] + 6 * (modulesInGPU.subdets[lowerModuleIndex2] == SDL::Endcap) + 5 * (modulesInGPU.subdets[lowerModuleIndex2] == SDL::Endcap and modulesInGPU.moduleType[lowerModuleIndex2] == SDL::TwoS);
const int layer3 = modulesInGPU.layers[lowerModuleIndex3] + 6 * (modulesInGPU.subdets[lowerModuleIndex3] == SDL::Endcap) + 5 * (modulesInGPU.subdets[lowerModuleIndex3] == SDL::Endcap and modulesInGPU.moduleType[lowerModuleIndex3] == SDL::TwoS);
const int layer4 = modulesInGPU.layers[lowerModuleIndex4] + 6 * (modulesInGPU.subdets[lowerModuleIndex4] == SDL::Endcap) + 5 * (modulesInGPU.subdets[lowerModuleIndex4] == SDL::Endcap and modulesInGPU.moduleType[lowerModuleIndex4] == SDL::TwoS);
const int layer5 = modulesInGPU.layers[lowerModuleIndex5] + 6 * (modulesInGPU.subdets[lowerModuleIndex5] == SDL::Endcap) + 5 * (modulesInGPU.subdets[lowerModuleIndex5] == SDL::Endcap and modulesInGPU.moduleType[lowerModuleIndex5] == SDL::TwoS);
if(layer1 == 1 and layer2 == 2 and layer3 == 3)
{
if(layer4 == 12 and layer5 == 13)
{
return rzChiSquared < 451.141;
}
else if(layer4 == 4 and layer5 == 12)
{
return rzChiSquared < 392.654;
}
else if(layer4 == 4 and layer5 == 5)
{
return rzChiSquared < 225.322;
}
else if(layer4 == 7 and layer5 == 13)
{
return rzChiSquared < 595.546;
}
else if(layer4 == 7 and layer5 == 8)
{
return rzChiSquared < 196.111;
}
}
else if(layer1 == 1 and layer2 == 2 and layer3 == 7)
{
if(layer4 == 13 and layer5 == 14)
{
return rzChiSquared < 297.446;
}
else if(layer4 == 8 and layer5 == 14)
{
return rzChiSquared < 451.141;
}
else if(layer4 == 8 and layer5 == 9)
{
return rzChiSquared < 518.339;
}
}
else if(layer1 == 1 and layer2 == 7 and layer3 == 8)
{
if(layer4 == 9 and layer5 == 10)
{
return rzChiSquared < 341.75;
}
else if(layer4 == 9 and layer5 == 15)
{
return rzChiSquared < 341.75;
}
}
else if(layer1 == 2 and layer2 == 3 and layer3 == 4)
{
if(layer4 == 12 and layer5 == 13)
{
return rzChiSquared < 392.655;
}
else if(layer4 == 5 and layer5 == 12)
{
return rzChiSquared < 341.75;
}
else if(layer4 == 5 and layer5 == 6)
{
return rzChiSquared < 112.537;
}
}
else if(layer1 == 2 and layer2 == 3 and layer4 == 7)
{
if(layer4 == 13 and layer5 == 14)
{
return rzChiSquared < 595.545;
}
else if(layer4 == 8 and layer5 == 14)
{
return rzChiSquared < 74.198;
}
}
else if(layer1 == 2 and layer2 == 7 and layer3 == 8)
{
if(layer4 == 14 and layer5 == 15)
{
return rzChiSquared < 518.339;
}
else if(layer4 == 9 and layer5 == 10)
{
return rzChiSquared < 8.046;
}
else if(layer4 == 9 and layer5 == 15)
{
return rzChiSquared < 451.141;
}
}
else if(layer1 == 3 and layer2 == 7 and layer3 == 8 and layer4 == 14 and layer5 == 15)
{
return rzChiSquared < 56.207;
}
else if(layer1 == 7 and layer2 == 8 and layer3 == 9)
{
if(layer4 == 10 and layer5 == 11)
{
return rzChiSquared < 64.578;
}
else if(layer4 == 10 and layer5 == 16)
{
return rzChiSquared < 85.250;
}
else if(layer4 == 15 and layer5 == 16)
{
return rzChiSquared < 85.250;
}
}
return true;
}
__device__ float SDL::computePT5RPhiChiSquaredInwards(struct modules& modulesInGPU, struct hits& hitsInGPU, struct quintuplets& quintupletsInGPU, unsigned int quintupletIndex, unsigned int* pixelHits)
{
/*Using the computed regression center and radius, compute the chi squared for the pixels*/
float g = quintupletsInGPU.regressionG[quintupletIndex];
float f = quintupletsInGPU.regressionF[quintupletIndex];
float r = quintupletsInGPU.regressionRadius[quintupletIndex];
float x, y;
float chiSquared = 0;
for(size_t i = 0; i < 2; i++)
{
x = hitsInGPU.xs[pixelHits[i]];
y = hitsInGPU.ys[pixelHits[i]];
float residual = (x - g) * (x -g) + (y - f) * (y - f) - r * r;
chiSquared += residual * residual;
}
chiSquared /= 2;
return chiSquared;
}
__device__ float SDL::computePT5RZChiSquared(struct modules& modulesInGPU, struct hits& hitsInGPU, unsigned int& pixelAnchorHitIndex1, unsigned int& pixelAnchorHitIndex2, unsigned int* anchorHits, unsigned int* lowerModuleIndices)
{
//use the two anchor hits of the pixel segment to compute the slope
//then compute the pseudo chi squared of the five outer hits
float& rtPix1 = hitsInGPU.rts[pixelAnchorHitIndex1];
float& rtPix2 = hitsInGPU.rts[pixelAnchorHitIndex2];
float& zPix1 = hitsInGPU.zs[pixelAnchorHitIndex1];
float& zPix2 = hitsInGPU.zs[pixelAnchorHitIndex2];
float slope = (zPix2 - zPix1)/(rtPix2 - rtPix1);
float rtAnchor, zAnchor;
float residual = 0;
float error = 0;
//hardcoded array indices!!!
float RMSE = 0;
float drdz;
for(size_t i = 0; i < 5; i++)
{
unsigned int& anchorHitIndex = anchorHits[i];
unsigned int& lowerModuleIndex = lowerModuleIndices[i];
rtAnchor = hitsInGPU.rts[anchorHitIndex];
zAnchor = hitsInGPU.zs[anchorHitIndex];
const int moduleType = modulesInGPU.moduleType[lowerModuleIndex];
const int moduleSide = modulesInGPU.sides[lowerModuleIndex];
const int moduleLayerType = modulesInGPU.moduleLayerType[lowerModuleIndex];
const int layer = modulesInGPU.layers[lowerModuleIndex] + 6 * (modulesInGPU.subdets[lowerModuleIndex] == SDL::Endcap) + 5 * (modulesInGPU.subdets[lowerModuleIndex] == SDL::Endcap and modulesInGPU.moduleType[lowerModuleIndex] == SDL::TwoS);
residual = (layer <= 6) ? (zAnchor - zPix1) - slope * (rtAnchor - rtPix1) : (rtAnchor - rtPix1) - (zAnchor - zPix1)/slope;
//PS Modules
if(moduleType == 0)
{
error = 0.15;
}
else //2S modules
{
error = 5.0;
}
//special dispensation to tilted PS modules!
if(moduleType == 0 and layer <= 6 and moduleSide != Center)
{
if(moduleLayerType == Strip)
{
drdz = modulesInGPU.drdzs[lowerModuleIndex];
}
else
{
drdz = modulesInGPU.drdzs[modulesInGPU.partnerModuleIndex(lowerModuleIndex)];
}
error *= 1/sqrtf(1 + drdz * drdz);
}
RMSE += (residual * residual)/(error * error);
}
RMSE = sqrtf(0.2 * RMSE);
return RMSE;
}
| 9a00d443661e9039aded739bcd5127a227cb749f.cu | #ifdef __CUDACC__
#define CUDA_CONST_VAR __device__
#endif
# include "PixelQuintuplet.cuh"
#include "allocate.h"
SDL::pixelQuintuplets::pixelQuintuplets()
{
pixelIndices = nullptr;
T5Indices = nullptr;
nPixelQuintuplets = nullptr;
isDup = nullptr;
score = nullptr;
}
SDL::pixelQuintuplets::~pixelQuintuplets()
{
}
void SDL::pixelQuintuplets::freeMemory()
{
cudaFree(pixelIndices);
cudaFree(T5Indices);
cudaFree(nPixelQuintuplets);
cudaFree(isDup);
cudaFree(score);
}
void SDL::createPixelQuintupletsInUnifiedMemory(struct SDL::pixelQuintuplets& pixelQuintupletsInGPU, unsigned int maxPixelQuintuplets)
{
cudaMallocManaged(&pixelQuintupletsInGPU.pixelIndices, maxPixelQuintuplets * sizeof(unsigned int));
cudaMallocManaged(&pixelQuintupletsInGPU.T5Indices, maxPixelQuintuplets * sizeof(unsigned int));
cudaMallocManaged(&pixelQuintupletsInGPU.nPixelQuintuplets, sizeof(unsigned int));
cudaMallocManaged(&pixelQuintupletsInGPU.isDup, maxPixelQuintuplets * sizeof(bool));
cudaMallocManaged(&pixelQuintupletsInGPU.score, maxPixelQuintuplets * sizeof(float));
#ifdef CUT_VALUE_DEBUG
cudaMallocManaged(&pixelQuintupletsInGPU.rzChiSquared, maxPixelQuintuplets * sizeof(unsigned int));
cudaMallocManaged(&pixelQuintupletsInGPU.rPhiChiSquared, maxPixelQuintuplets * sizeof(unsigned int));
cudaMallocManaged(&pixelQuintupletsInGPU.rPhiChiSquaredInwards, maxPixelQuintuplets * sizeof(unsigned int));
#endif
cudaMemset(pixelQuintupletsInGPU.nPixelQuintuplets, 0, sizeof(unsigned int));
}
void SDL::createPixelQuintupletsInExplicitMemory(struct SDL::pixelQuintuplets& pixelQuintupletsInGPU, unsigned int maxPixelQuintuplets)
{
cudaMalloc(&pixelQuintupletsInGPU.pixelIndices, maxPixelQuintuplets * sizeof(unsigned int));
cudaMalloc(&pixelQuintupletsInGPU.T5Indices, maxPixelQuintuplets * sizeof(unsigned int));
cudaMalloc(&pixelQuintupletsInGPU.nPixelQuintuplets, sizeof(unsigned int));
cudaMalloc(&pixelQuintupletsInGPU.isDup, maxPixelQuintuplets * sizeof(bool));
cudaMalloc(&pixelQuintupletsInGPU.score, maxPixelQuintuplets * sizeof(float));
cudaMemset(pixelQuintupletsInGPU.nPixelQuintuplets, 0, sizeof(unsigned int));
}
__device__ void SDL::rmPixelQuintupletToMemory(struct pixelQuintuplets& pixelQuintupletsInGPU, unsigned int pixelQuintupletIndex)
{
pixelQuintupletsInGPU.isDup[pixelQuintupletIndex] = 1;
}
#ifdef CUT_VALUE_DEBUG
__device__ void SDL::addPixelQuintupletToMemory(struct pixelQuintuplets& pixelQuintupletsInGPU, unsigned int pixelIndex, unsigned int T5Index, unsigned int pixelQuintupletIndex, float& rzChiSquared, float& rPhiChiSquared, float& rPhiChiSquaredInwards, float score)
#else
__device__ void SDL::addPixelQuintupletToMemory(struct pixelQuintuplets& pixelQuintupletsInGPU, unsigned int pixelIndex, unsigned int T5Index, unsigned int pixelQuintupletIndex, float score)
#endif
{
pixelQuintupletsInGPU.pixelIndices[pixelQuintupletIndex] = pixelIndex;
pixelQuintupletsInGPU.T5Indices[pixelQuintupletIndex] = T5Index;
pixelQuintupletsInGPU.isDup[pixelQuintupletIndex] = 0;
pixelQuintupletsInGPU.score[pixelQuintupletIndex] = score;
#ifdef CUT_VALUE_DEBUG
pixelQuintupletsInGPU.rzChiSquared[pixelQuintupletIndex] = rzChiSquared;
pixelQuintupletsInGPU.rPhiChiSquared[pixelQuintupletIndex] = rPhiChiSquared;
pixelQuintupletsInGPU.rPhiChiSquaredInwards[pixelQuintupletIndex] = rPhiChiSquaredInwards;
#endif
}
__device__ bool SDL::runPixelQuintupletDefaultAlgo(struct modules& modulesInGPU, struct hits& hitsInGPU, struct miniDoublets& mdsInGPU, struct segments& segmentsInGPU, struct triplets& tripletsInGPU, struct quintuplets& quintupletsInGPU, unsigned int& pixelSegmentIndex, unsigned int& quintupletIndex, float& rzChiSquared, float& rPhiChiSquared, float& rPhiChiSquaredInwards)
{
bool pass = true;
unsigned int pixelModuleIndex = segmentsInGPU.innerLowerModuleIndices[pixelSegmentIndex];
unsigned int pixelSegmentArrayIndex = pixelSegmentIndex - (600 * pixelModuleIndex);
unsigned int T5InnerT3Index = quintupletsInGPU.tripletIndices[2 * quintupletIndex];
unsigned int T5OuterT3Index = quintupletsInGPU.tripletIndices[2 * quintupletIndex + 1];
unsigned int firstSegmentIndex = tripletsInGPU.segmentIndices[2 * T5InnerT3Index];
unsigned int secondSegmentIndex = tripletsInGPU.segmentIndices[2 * T5InnerT3Index + 1];
unsigned int thirdSegmentIndex = tripletsInGPU.segmentIndices[2 * T5OuterT3Index];
unsigned int fourthSegmentIndex = tripletsInGPU.segmentIndices[2 * T5OuterT3Index + 1];
unsigned int pixelInnerMDIndex = segmentsInGPU.mdIndices[2 * pixelSegmentIndex];
unsigned int pixelOuterMDIndex = segmentsInGPU.mdIndices[2 * pixelSegmentIndex + 1];
unsigned int pixelAnchorHitIndex1 = mdsInGPU.hitIndices[2 * pixelInnerMDIndex];
unsigned int pixelNonAnchorHitIndex1 = mdsInGPU.hitIndices[2 * pixelInnerMDIndex + 1];
unsigned int pixelAnchorHitIndex2 = mdsInGPU.hitIndices[2 * pixelOuterMDIndex];
unsigned int pixelNonAnchorHitIndex2 = mdsInGPU.hitIndices[2 * pixelOuterMDIndex + 1];
unsigned int anchorHitIndex1 = segmentsInGPU.innerMiniDoubletAnchorHitIndices[firstSegmentIndex];
unsigned int anchorHitIndex2 = segmentsInGPU.outerMiniDoubletAnchorHitIndices[firstSegmentIndex]; //same as second segment inner MD anchorhit index
unsigned int anchorHitIndex3 = segmentsInGPU.outerMiniDoubletAnchorHitIndices[secondSegmentIndex]; //same as third segment inner MD anchor hit index
unsigned int anchorHitIndex4 = segmentsInGPU.outerMiniDoubletAnchorHitIndices[thirdSegmentIndex]; //same as fourth segment inner MD anchor hit index
unsigned int anchorHitIndex5 = segmentsInGPU.outerMiniDoubletAnchorHitIndices[fourthSegmentIndex];
unsigned int lowerModuleIndex1 = quintupletsInGPU.lowerModuleIndices[5 * quintupletIndex];
unsigned int lowerModuleIndex2 = quintupletsInGPU.lowerModuleIndices[5 * quintupletIndex + 1];
unsigned int lowerModuleIndex3 = quintupletsInGPU.lowerModuleIndices[5 * quintupletIndex + 2];
unsigned int lowerModuleIndex4 = quintupletsInGPU.lowerModuleIndices[5 * quintupletIndex + 3];
unsigned int lowerModuleIndex5 = quintupletsInGPU.lowerModuleIndices[5 * quintupletIndex + 4];
unsigned int lowerModuleIndices[] = {lowerModuleIndex1, lowerModuleIndex2, lowerModuleIndex3, lowerModuleIndex4, lowerModuleIndex5};
unsigned int anchorHits[] = {anchorHitIndex1, anchorHitIndex2, anchorHitIndex3, anchorHitIndex4, anchorHitIndex5};
unsigned int pixelHits[] = {pixelAnchorHitIndex1, pixelAnchorHitIndex2};
float pixelRadius, pixelRadiusError, tripletRadius, rPhiChiSquaredTemp;
pass = pass & runPixelTripletDefaultAlgo(modulesInGPU, hitsInGPU, mdsInGPU, segmentsInGPU, tripletsInGPU, pixelSegmentIndex, T5InnerT3Index, pixelRadius, pixelRadiusError, tripletRadius, rPhiChiSquaredTemp);
rzChiSquared = computePT5RZChiSquared(modulesInGPU, hitsInGPU, pixelAnchorHitIndex1, pixelAnchorHitIndex2, anchorHits, lowerModuleIndices);
rPhiChiSquared = computePT5RPhiChiSquared(modulesInGPU, hitsInGPU, segmentsInGPU, pixelSegmentArrayIndex, anchorHits, lowerModuleIndices);
rPhiChiSquaredInwards = computePT5RPhiChiSquaredInwards(modulesInGPU, hitsInGPU, quintupletsInGPU, quintupletIndex, pixelHits);
if(segmentsInGPU.circleRadius[pixelSegmentArrayIndex] < 5.0/(2 * k2Rinv1GeVf))
{
pass = pass & passPT5RZChiSquaredCuts(modulesInGPU, lowerModuleIndex1, lowerModuleIndex2, lowerModuleIndex3, lowerModuleIndex4, lowerModuleIndex5, rzChiSquared);
pass = pass & passPT5RPhiChiSquaredCuts(modulesInGPU, lowerModuleIndex1, lowerModuleIndex2, lowerModuleIndex3, lowerModuleIndex4, lowerModuleIndex5, rPhiChiSquared);
}
if(quintupletsInGPU.regressionRadius[quintupletIndex] < 5.0/(2 * k2Rinv1GeVf))
{
pass = pass & passPT5RPhiChiSquaredInwardsCuts(modulesInGPU, lowerModuleIndex1, lowerModuleIndex2, lowerModuleIndex3, lowerModuleIndex4, lowerModuleIndex5, rPhiChiSquaredInwards);
}
//other cuts will be filled here!
return pass;
}
__device__ bool SDL::passPT5RPhiChiSquaredCuts(struct modules& modulesInGPU, unsigned int lowerModuleIndex1, unsigned int lowerModuleIndex2, unsigned int lowerModuleIndex3, unsigned int lowerModuleIndex4, unsigned int lowerModuleIndex5, float rPhiChiSquared)
{
const int layer1 = modulesInGPU.layers[lowerModuleIndex1] + 6 * (modulesInGPU.subdets[lowerModuleIndex1] == SDL::Endcap) + 5 * (modulesInGPU.subdets[lowerModuleIndex1] == SDL::Endcap and modulesInGPU.moduleType[lowerModuleIndex1] == SDL::TwoS);
const int layer2 = modulesInGPU.layers[lowerModuleIndex2] + 6 * (modulesInGPU.subdets[lowerModuleIndex2] == SDL::Endcap) + 5 * (modulesInGPU.subdets[lowerModuleIndex2] == SDL::Endcap and modulesInGPU.moduleType[lowerModuleIndex2] == SDL::TwoS);
const int layer3 = modulesInGPU.layers[lowerModuleIndex3] + 6 * (modulesInGPU.subdets[lowerModuleIndex3] == SDL::Endcap) + 5 * (modulesInGPU.subdets[lowerModuleIndex3] == SDL::Endcap and modulesInGPU.moduleType[lowerModuleIndex3] == SDL::TwoS);
const int layer4 = modulesInGPU.layers[lowerModuleIndex4] + 6 * (modulesInGPU.subdets[lowerModuleIndex4] == SDL::Endcap) + 5 * (modulesInGPU.subdets[lowerModuleIndex4] == SDL::Endcap and modulesInGPU.moduleType[lowerModuleIndex4] == SDL::TwoS);
const int layer5 = modulesInGPU.layers[lowerModuleIndex5] + 6 * (modulesInGPU.subdets[lowerModuleIndex5] == SDL::Endcap) + 5 * (modulesInGPU.subdets[lowerModuleIndex5] == SDL::Endcap and modulesInGPU.moduleType[lowerModuleIndex5] == SDL::TwoS);
if(layer1 == 1 and layer2 == 2 and layer3 == 3)
{
if(layer4 == 12 and layer5 == 13)
{
return rPhiChiSquared < 48.921;
}
else if(layer4 == 4 and layer5 == 12)
{
return rPhiChiSquared < 97.948;
}
else if(layer4 == 4 and layer5 == 5)
{
return rPhiChiSquared < 129.3;
}
else if(layer4 == 7 and layer5 == 13)
{
return rPhiChiSquared < 56.21;
}
else if(layer4 == 7 and layer5 == 8)
{
return rPhiChiSquared < 74.198;
}
}
else if(layer1 == 1 and layer2 == 2 and layer3 == 7)
{
if(layer4 == 13 and layer5 == 14)
{
return rPhiChiSquared < 21.265;
}
else if(layer4 == 8 and layer5 == 14)
{
return rPhiChiSquared < 37.058;
}
else if(layer4 == 8 and layer5 == 9)
{
return rPhiChiSquared < 42.578;
}
}
else if(layer1 == 1 and layer2 == 7 and layer3 == 8)
{
if(layer4 == 9 and layer5 == 10)
{
return rPhiChiSquared < 32.253;
}
else if(layer4 == 9 and layer5 == 15)
{
return rPhiChiSquared < 37.058;
}
}
else if(layer1 == 2 and layer2 == 3 and layer3 == 4)
{
if(layer4 == 12 and layer5 == 13)
{
return rPhiChiSquared < 97.947;
}
else if(layer4 == 5 and layer5 == 12)
{
return rPhiChiSquared < 129.3;
}
else if(layer4 == 5 and layer5 == 6)
{
return rPhiChiSquared < 170.68;
}
}
else if(layer1 == 2 and layer2 == 3 and layer3 == 7)
{
if(layer4 == 13 and layer5 == 14)
{
return rPhiChiSquared < 48.92;
}
else if(layer4 == 8 and layer5 == 14)
{
return rPhiChiSquared < 74.2;
}
}
else if(layer1 == 2 and layer2 == 7 and layer3 == 8)
{
if(layer4 == 14 and layer5 == 15)
{
return rPhiChiSquared < 42.58;
}
else if(layer4 == 9 and layer5 == 10)
{
return rPhiChiSquared < 37.06;
}
else if(layer4 == 9 and layer5 == 15)
{
return rPhiChiSquared < 48.92;
}
}
else if(layer1 == 3 and layer2 == 7 and layer3 == 8 and layer4 == 14 and layer5 == 15)
{
return rPhiChiSquared < 85.25;
}
else if(layer1 == 7 and layer2 == 8 and layer3 == 9)
{
if(layer4 == 10 and layer5 == 11)
{
return rPhiChiSquared < 42.58;
}
else if(layer4 == 10 and layer5 == 16)
{
return rPhiChiSquared < 37.06;
}
else if(layer4 == 15 and layer5 == 16)
{
return rPhiChiSquared < 37.06;
}
}
return true;
}
__device__ bool SDL::passPT5RPhiChiSquaredInwardsCuts(struct modules& modulesInGPU, unsigned int lowerModuleIndex1, unsigned int lowerModuleIndex2, unsigned int lowerModuleIndex3, unsigned int lowerModuleIndex4, unsigned int lowerModuleIndex5, float rPhiChiSquared)
{
const int layer1 = modulesInGPU.layers[lowerModuleIndex1] + 6 * (modulesInGPU.subdets[lowerModuleIndex1] == SDL::Endcap) + 5 * (modulesInGPU.subdets[lowerModuleIndex1] == SDL::Endcap and modulesInGPU.moduleType[lowerModuleIndex1] == SDL::TwoS);
const int layer2 = modulesInGPU.layers[lowerModuleIndex2] + 6 * (modulesInGPU.subdets[lowerModuleIndex2] == SDL::Endcap) + 5 * (modulesInGPU.subdets[lowerModuleIndex2] == SDL::Endcap and modulesInGPU.moduleType[lowerModuleIndex2] == SDL::TwoS);
const int layer3 = modulesInGPU.layers[lowerModuleIndex3] + 6 * (modulesInGPU.subdets[lowerModuleIndex3] == SDL::Endcap) + 5 * (modulesInGPU.subdets[lowerModuleIndex3] == SDL::Endcap and modulesInGPU.moduleType[lowerModuleIndex3] == SDL::TwoS);
const int layer4 = modulesInGPU.layers[lowerModuleIndex4] + 6 * (modulesInGPU.subdets[lowerModuleIndex4] == SDL::Endcap) + 5 * (modulesInGPU.subdets[lowerModuleIndex4] == SDL::Endcap and modulesInGPU.moduleType[lowerModuleIndex4] == SDL::TwoS);
const int layer5 = modulesInGPU.layers[lowerModuleIndex5] + 6 * (modulesInGPU.subdets[lowerModuleIndex5] == SDL::Endcap) + 5 * (modulesInGPU.subdets[lowerModuleIndex5] == SDL::Endcap and modulesInGPU.moduleType[lowerModuleIndex5] == SDL::TwoS);
if(layer1 == 1 and layer2 == 2 and layer3 == 3)
{
if(layer4 == 12 and layer5 == 13)
{
return rPhiChiSquared < 451.141;
}
else if(layer4 == 4 and layer5 == 12)
{
return rPhiChiSquared < 786.173;
}
else if(layer4 == 4 and layer5 == 5)
{
return rPhiChiSquared < 595.545;
}
else if(layer4 == 7 and layer5 == 13)
{
return rPhiChiSquared < 581.339;
}
else if(layer4 == 7 and layer5 == 8)
{
return rPhiChiSquared < 112.537;
}
}
else if(layer1 == 1 and layer2 == 2 and layer3 == 7)
{
if(layer4 == 13 and layer5 == 14)
{
return rPhiChiSquared < 225.322;
}
else if(layer4 == 8 and layer5 == 14)
{
return rPhiChiSquared < 1192.402;
}
else if(layer4 == 8 and layer5 == 9)
{
return rPhiChiSquared < 786.173;
}
}
else if(layer1 == 1 and layer2 == 7 and layer3 == 8)
{
if(layer4 == 9 and layer5 == 10)
{
return rPhiChiSquared < 1037.817;
}
else if(layer4 == 9 and layer5 == 15)
{
return rPhiChiSquared < 1808.536;
}
}
else if(layer1 == 2 and layer2 == 3 and layer3 == 4)
{
if(layer4 == 12 and layer5 == 13)
{
return rPhiChiSquared < 684.253;
}
else if(layer4 == 5 and layer5 == 12)
{
return rPhiChiSquared < 684.253;
}
else if(layer4 == 5 and layer5 == 6)
{
return rPhiChiSquared < 684.253;
}
}
else if(layer1 == 2 and layer2 == 3 and layer3 == 7)
{
if(layer4 == 13 and layer5 == 14)
{
return rPhiChiSquared < 451.141;
}
else if(layer4 == 8 and layer5 == 14)
{
return rPhiChiSquared < 518.34;
}
}
else if(layer1 == 2 and layer2 == 7 and layer3 == 8)
{
if(layer4 == 14 and layer5 == 15)
{
return rPhiChiSquared < 2077.92;
}
else if(layer4 == 9 and layer5 == 10)
{
return rPhiChiSquared < 74.20;
}
else if(layer4 == 9 and layer5 == 15)
{
return rPhiChiSquared < 1808.536;
}
}
else if(layer1 == 3 and layer2 == 7 and layer3 == 8 and layer4 == 14 and layer5 == 15)
{
return rPhiChiSquared < 786.173;
}
else if(layer1 == 7 and layer2 == 8 and layer3 == 9)
{
if(layer4 == 10 and layer5 == 11)
{
return rPhiChiSquared < 1574.076;
}
else if(layer4 == 10 and layer5 == 16)
{
return rPhiChiSquared < 5492.11;
}
else if(layer4 == 15 and layer5 == 16)
{
return rPhiChiSquared < 2743.037;
}
}
return true;
}
__device__ float SDL::computePT5RPhiChiSquared(struct modules& modulesInGPU, struct hits& hitsInGPU, struct segments& segmentsInGPU, unsigned int pixelSegmentArrayIndex, unsigned int* anchorHits, unsigned int* lowerModuleIndices)
{
/*
Compute circle parameters from 3 pixel hits, and then use them to compute the chi squared for the outer hits
*/
float g = segmentsInGPU.circleCenterX[pixelSegmentArrayIndex];
float f = segmentsInGPU.circleCenterY[pixelSegmentArrayIndex];
float radius = segmentsInGPU.circleRadius[pixelSegmentArrayIndex];
float delta1[5], delta2[5], slopes[5];
bool isFlat[5];
float xs[5];
float ys[5];
float chiSquared = 0;
for(size_t i = 0; i < 5; i++)
{
xs[i] = hitsInGPU.xs[anchorHits[i]];
ys[i] = hitsInGPU.ys[anchorHits[i]];
}
computeSigmasForRegression(modulesInGPU, lowerModuleIndices, delta1, delta2, slopes, isFlat);
chiSquared = computeChiSquared(5, xs, ys, delta1, delta2, slopes, isFlat, g, f, radius);
return chiSquared;
}
__device__ bool SDL::passPT5RZChiSquaredCuts(struct modules& modulesInGPU, unsigned int lowerModuleIndex1, unsigned int lowerModuleIndex2, unsigned int lowerModuleIndex3, unsigned int lowerModuleIndex4, unsigned int lowerModuleIndex5, float& rzChiSquared)
{
const int layer1 = modulesInGPU.layers[lowerModuleIndex1] + 6 * (modulesInGPU.subdets[lowerModuleIndex1] == SDL::Endcap) + 5 * (modulesInGPU.subdets[lowerModuleIndex1] == SDL::Endcap and modulesInGPU.moduleType[lowerModuleIndex1] == SDL::TwoS);
const int layer2 = modulesInGPU.layers[lowerModuleIndex2] + 6 * (modulesInGPU.subdets[lowerModuleIndex2] == SDL::Endcap) + 5 * (modulesInGPU.subdets[lowerModuleIndex2] == SDL::Endcap and modulesInGPU.moduleType[lowerModuleIndex2] == SDL::TwoS);
const int layer3 = modulesInGPU.layers[lowerModuleIndex3] + 6 * (modulesInGPU.subdets[lowerModuleIndex3] == SDL::Endcap) + 5 * (modulesInGPU.subdets[lowerModuleIndex3] == SDL::Endcap and modulesInGPU.moduleType[lowerModuleIndex3] == SDL::TwoS);
const int layer4 = modulesInGPU.layers[lowerModuleIndex4] + 6 * (modulesInGPU.subdets[lowerModuleIndex4] == SDL::Endcap) + 5 * (modulesInGPU.subdets[lowerModuleIndex4] == SDL::Endcap and modulesInGPU.moduleType[lowerModuleIndex4] == SDL::TwoS);
const int layer5 = modulesInGPU.layers[lowerModuleIndex5] + 6 * (modulesInGPU.subdets[lowerModuleIndex5] == SDL::Endcap) + 5 * (modulesInGPU.subdets[lowerModuleIndex5] == SDL::Endcap and modulesInGPU.moduleType[lowerModuleIndex5] == SDL::TwoS);
if(layer1 == 1 and layer2 == 2 and layer3 == 3)
{
if(layer4 == 12 and layer5 == 13)
{
return rzChiSquared < 451.141;
}
else if(layer4 == 4 and layer5 == 12)
{
return rzChiSquared < 392.654;
}
else if(layer4 == 4 and layer5 == 5)
{
return rzChiSquared < 225.322;
}
else if(layer4 == 7 and layer5 == 13)
{
return rzChiSquared < 595.546;
}
else if(layer4 == 7 and layer5 == 8)
{
return rzChiSquared < 196.111;
}
}
else if(layer1 == 1 and layer2 == 2 and layer3 == 7)
{
if(layer4 == 13 and layer5 == 14)
{
return rzChiSquared < 297.446;
}
else if(layer4 == 8 and layer5 == 14)
{
return rzChiSquared < 451.141;
}
else if(layer4 == 8 and layer5 == 9)
{
return rzChiSquared < 518.339;
}
}
else if(layer1 == 1 and layer2 == 7 and layer3 == 8)
{
if(layer4 == 9 and layer5 == 10)
{
return rzChiSquared < 341.75;
}
else if(layer4 == 9 and layer5 == 15)
{
return rzChiSquared < 341.75;
}
}
else if(layer1 == 2 and layer2 == 3 and layer3 == 4)
{
if(layer4 == 12 and layer5 == 13)
{
return rzChiSquared < 392.655;
}
else if(layer4 == 5 and layer5 == 12)
{
return rzChiSquared < 341.75;
}
else if(layer4 == 5 and layer5 == 6)
{
return rzChiSquared < 112.537;
}
}
else if(layer1 == 2 and layer2 == 3 and layer4 == 7)
{
if(layer4 == 13 and layer5 == 14)
{
return rzChiSquared < 595.545;
}
else if(layer4 == 8 and layer5 == 14)
{
return rzChiSquared < 74.198;
}
}
else if(layer1 == 2 and layer2 == 7 and layer3 == 8)
{
if(layer4 == 14 and layer5 == 15)
{
return rzChiSquared < 518.339;
}
else if(layer4 == 9 and layer5 == 10)
{
return rzChiSquared < 8.046;
}
else if(layer4 == 9 and layer5 == 15)
{
return rzChiSquared < 451.141;
}
}
else if(layer1 == 3 and layer2 == 7 and layer3 == 8 and layer4 == 14 and layer5 == 15)
{
return rzChiSquared < 56.207;
}
else if(layer1 == 7 and layer2 == 8 and layer3 == 9)
{
if(layer4 == 10 and layer5 == 11)
{
return rzChiSquared < 64.578;
}
else if(layer4 == 10 and layer5 == 16)
{
return rzChiSquared < 85.250;
}
else if(layer4 == 15 and layer5 == 16)
{
return rzChiSquared < 85.250;
}
}
return true;
}
__device__ float SDL::computePT5RPhiChiSquaredInwards(struct modules& modulesInGPU, struct hits& hitsInGPU, struct quintuplets& quintupletsInGPU, unsigned int quintupletIndex, unsigned int* pixelHits)
{
/*Using the computed regression center and radius, compute the chi squared for the pixels*/
float g = quintupletsInGPU.regressionG[quintupletIndex];
float f = quintupletsInGPU.regressionF[quintupletIndex];
float r = quintupletsInGPU.regressionRadius[quintupletIndex];
float x, y;
float chiSquared = 0;
for(size_t i = 0; i < 2; i++)
{
x = hitsInGPU.xs[pixelHits[i]];
y = hitsInGPU.ys[pixelHits[i]];
float residual = (x - g) * (x -g) + (y - f) * (y - f) - r * r;
chiSquared += residual * residual;
}
chiSquared /= 2;
return chiSquared;
}
__device__ float SDL::computePT5RZChiSquared(struct modules& modulesInGPU, struct hits& hitsInGPU, unsigned int& pixelAnchorHitIndex1, unsigned int& pixelAnchorHitIndex2, unsigned int* anchorHits, unsigned int* lowerModuleIndices)
{
//use the two anchor hits of the pixel segment to compute the slope
//then compute the pseudo chi squared of the five outer hits
float& rtPix1 = hitsInGPU.rts[pixelAnchorHitIndex1];
float& rtPix2 = hitsInGPU.rts[pixelAnchorHitIndex2];
float& zPix1 = hitsInGPU.zs[pixelAnchorHitIndex1];
float& zPix2 = hitsInGPU.zs[pixelAnchorHitIndex2];
float slope = (zPix2 - zPix1)/(rtPix2 - rtPix1);
float rtAnchor, zAnchor;
float residual = 0;
float error = 0;
//hardcoded array indices!!!
float RMSE = 0;
float drdz;
for(size_t i = 0; i < 5; i++)
{
unsigned int& anchorHitIndex = anchorHits[i];
unsigned int& lowerModuleIndex = lowerModuleIndices[i];
rtAnchor = hitsInGPU.rts[anchorHitIndex];
zAnchor = hitsInGPU.zs[anchorHitIndex];
const int moduleType = modulesInGPU.moduleType[lowerModuleIndex];
const int moduleSide = modulesInGPU.sides[lowerModuleIndex];
const int moduleLayerType = modulesInGPU.moduleLayerType[lowerModuleIndex];
const int layer = modulesInGPU.layers[lowerModuleIndex] + 6 * (modulesInGPU.subdets[lowerModuleIndex] == SDL::Endcap) + 5 * (modulesInGPU.subdets[lowerModuleIndex] == SDL::Endcap and modulesInGPU.moduleType[lowerModuleIndex] == SDL::TwoS);
residual = (layer <= 6) ? (zAnchor - zPix1) - slope * (rtAnchor - rtPix1) : (rtAnchor - rtPix1) - (zAnchor - zPix1)/slope;
//PS Modules
if(moduleType == 0)
{
error = 0.15;
}
else //2S modules
{
error = 5.0;
}
//special dispensation to tilted PS modules!
if(moduleType == 0 and layer <= 6 and moduleSide != Center)
{
if(moduleLayerType == Strip)
{
drdz = modulesInGPU.drdzs[lowerModuleIndex];
}
else
{
drdz = modulesInGPU.drdzs[modulesInGPU.partnerModuleIndex(lowerModuleIndex)];
}
error *= 1/sqrtf(1 + drdz * drdz);
}
RMSE += (residual * residual)/(error * error);
}
RMSE = sqrtf(0.2 * RMSE);
return RMSE;
}
|
15da8d45888ea5eb29d17c13d44d7d61908f67f1.hip | // !!! This is a file automatically generated by hipify!!!
//
//#include "hip/hip_runtime.h"
//#include "device_launch_parameters.h"
////#include <cudnn.h>
//
//#include <stdio.h>
//#include <windows.h>
//
//
//hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
//
//__global__ void addKernel(int *c, const int *a, const int *b)
//{
// int i = threadIdx.x;
// c[i] = a[i] + b[i];
//}
//
//int main()
//{
// const int arraySize = 5;
// const int a[arraySize] = { 1, 2, 3, 4, 5 };
// const int b[arraySize] = { 10, 20, 30, 40, 50 };
// int c[arraySize] = { 0 };
//
// // Add vectors in parallel.
// hipError_t cudaStatus = addWithCuda(c, a, b, arraySize);
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "addWithCuda failed!");
// fprintf(stderr, "%d", GetLastError());
// system("pause");
// return 1;
// }
//
// printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
// c[0], c[1], c[2], c[3], c[4]);
//
// // hipDeviceReset must be called before exiting in order for profiling and
// // tracing tools such as Nsight and Visual Profiler to show complete traces.
// cudaStatus = hipDeviceReset();
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipDeviceReset failed!");
// return 1;
// }
// system("pause");
// return 0;
//}
//
//// Helper function for using CUDA to add vectors in parallel.
//hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
//{
// int *dev_a = 0;
// int *dev_b = 0;
// int *dev_c = 0;
// hipError_t cudaStatus;
//
// // Choose which GPU to run on, change this on a multi-GPU system.
// cudaStatus = hipSetDevice(0);
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
// goto Error;
// }
//
// // Allocate GPU buffers for three vectors (two input, one output) .
// cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int));
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMalloc failed!");
// goto Error;
// }
//
// cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int));
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMalloc failed!");
// goto Error;
// }
//
// cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int));
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMalloc failed!");
// goto Error;
// }
//
// // Copy input vectors from host memory to GPU buffers.
// cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMemcpy failed!");
// goto Error;
// }
//
// cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMemcpy failed!");
// goto Error;
// }
//
// // Launch a kernel on the GPU with one thread for each element.
// addKernel<<<1, size>>>(dev_c, dev_a, dev_b);
//
// // Check for any errors launching the kernel
// cudaStatus = hipGetLastError();
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
// goto Error;
// }
//
// // hipDeviceSynchronize waits for the kernel to finish, and returns
// // any errors encountered during the launch.
// cudaStatus = hipDeviceSynchronize();
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
// goto Error;
// }
//
// // Copy output vector from GPU buffer to host memory.
// cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMemcpy failed!");
// goto Error;
// }
//
//Error:
// hipFree(dev_c);
// hipFree(dev_a);
// hipFree(dev_b);
//
// return cudaStatus;
//}
//
////#include <iostream>
////#include "cudnn.h"
////#include "hip/hip_runtime.h"
//
//
////#pragma comment(lib, "cudnn.lib")
////
////using namespace std;
////
////void main() {
//// cudnnHandle_t handle;
//// cudnnStatus_t t = cudnnCreate(&handle);
//// cout << cudnnGetErrorString(t);
//// getchar();
////}
#include <iostream>
#include <hip/hip_runtime.h>
#include <cudnn.h>
using namespace std;
void main() {
cudnnHandle_t handle;
cudnnStatus_t t = cudnnCreate(&handle);
cout << cudnnGetErrorString(t);
getchar();
}
| 15da8d45888ea5eb29d17c13d44d7d61908f67f1.cu | //
//#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
////#include <cudnn.h>
//
//#include <stdio.h>
//#include <windows.h>
//
//
//cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
//
//__global__ void addKernel(int *c, const int *a, const int *b)
//{
// int i = threadIdx.x;
// c[i] = a[i] + b[i];
//}
//
//int main()
//{
// const int arraySize = 5;
// const int a[arraySize] = { 1, 2, 3, 4, 5 };
// const int b[arraySize] = { 10, 20, 30, 40, 50 };
// int c[arraySize] = { 0 };
//
// // Add vectors in parallel.
// cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "addWithCuda failed!");
// fprintf(stderr, "%d", GetLastError());
// system("pause");
// return 1;
// }
//
// printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
// c[0], c[1], c[2], c[3], c[4]);
//
// // cudaDeviceReset must be called before exiting in order for profiling and
// // tracing tools such as Nsight and Visual Profiler to show complete traces.
// cudaStatus = cudaDeviceReset();
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaDeviceReset failed!");
// return 1;
// }
// system("pause");
// return 0;
//}
//
//// Helper function for using CUDA to add vectors in parallel.
//cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
//{
// int *dev_a = 0;
// int *dev_b = 0;
// int *dev_c = 0;
// cudaError_t cudaStatus;
//
// // Choose which GPU to run on, change this on a multi-GPU system.
// cudaStatus = cudaSetDevice(0);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
// goto Error;
// }
//
// // Allocate GPU buffers for three vectors (two input, one output) .
// cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error;
// }
//
// cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error;
// }
//
// cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error;
// }
//
// // Copy input vectors from host memory to GPU buffers.
// cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMemcpy failed!");
// goto Error;
// }
//
// cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMemcpy failed!");
// goto Error;
// }
//
// // Launch a kernel on the GPU with one thread for each element.
// addKernel<<<1, size>>>(dev_c, dev_a, dev_b);
//
// // Check for any errors launching the kernel
// cudaStatus = cudaGetLastError();
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
// goto Error;
// }
//
// // cudaDeviceSynchronize waits for the kernel to finish, and returns
// // any errors encountered during the launch.
// cudaStatus = cudaDeviceSynchronize();
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
// goto Error;
// }
//
// // Copy output vector from GPU buffer to host memory.
// cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMemcpy failed!");
// goto Error;
// }
//
//Error:
// cudaFree(dev_c);
// cudaFree(dev_a);
// cudaFree(dev_b);
//
// return cudaStatus;
//}
//
////#include <iostream>
////#include "cudnn.h"
////#include "cuda_runtime.h"
//
//
////#pragma comment(lib, "cudnn.lib")
////
////using namespace std;
////
////void main() {
//// cudnnHandle_t handle;
//// cudnnStatus_t t = cudnnCreate(&handle);
//// cout << cudnnGetErrorString(t);
//// getchar();
////}
#include <iostream>
#include <cuda_runtime.h>
#include <cudnn.h>
using namespace std;
void main() {
cudnnHandle_t handle;
cudnnStatus_t t = cudnnCreate(&handle);
cout << cudnnGetErrorString(t);
getchar();
}
|
beae6ace5f3762392e9e874dfd3ea76c88eb570d.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#include <stdio.h>
#include <MatKernel.hpp>
#if __CUDA_ARCH__ >= 300
#define MAXXGRID 2147483647
#else
#define MAXXGRID 65535
#endif
// Feature hashing multiply and multiply-transpose.
// This one enumerates, hashes and multiplies all pairs of features.
//
// NOTE: The single-matrix version (hashmult) uses a fast lookup recurrence which is only valid up to 3000 base features per column (approx 4.5 million pairs)
// Hash functions
// Adler32
__forceinline__ __device__ unsigned int adler32(const void *buf, size_t buflength) {
const unsigned char *buffer = (const unsigned char*)buf;
unsigned int s1 = 1;
unsigned int s2 = 0;
for (size_t n = 0; n < buflength; n++) {
s1 = (s1 + buffer[n]) % 65521;
s2 = (s2 + s1) % 65521;
}
return (s2 << 16) | s1;
}
// MurmurHash3
static const unsigned int c1 = 0xcc9e2d51;
static const unsigned int c2 = 0x1b873593;
static const unsigned int r1 = 15;
static const unsigned int r2 = 13;
static const unsigned int m = 5;
static const unsigned int n = 0xe6546b64;
__forceinline__ __device__ unsigned int h1(unsigned int k, unsigned int hash) {
k *= c1;
k = (k << r1) | (k >> (32-r1));
k *= c2;
hash ^= k;
hash = ((hash << r2) | (hash >> (32-r2)) * m) + n;
return hash;
}
const unsigned int seed = 3413413;
__forceinline__ __device__ unsigned int mmhashend(unsigned int hash, unsigned int mod)
{
hash ^= (hash >> 16);
hash *= 0x85ebca6b;
hash ^= (hash >> 13);
hash *= 0xc2b2ae35;
hash ^= (hash >> 16);
return (hash % mod);
}
__forceinline__ __device__ unsigned int mmhash1(unsigned int v1, unsigned int mod) {
unsigned int hash = seed;
hash = h1(v1, hash);
return mmhashend(hash, mod);
}
__forceinline__ __device__ unsigned int mmhash2(unsigned int v1, unsigned int v2, unsigned int mod) {
unsigned int hash = seed;
hash = h1(v1, hash);
hash = h1(v2, hash);
return mmhashend(hash, mod);
}
__forceinline__ __device__ int solve1(int j) {
float v = sqrtf((float)j);
#pragma unroll
for (int k = 0; k < 5; k++) {
v = v - (v*(v+1)-2*j)/(2*v+1); // Newton iterations to find first index.
}
return (int)(v+2e-5f);
}
// Given dense A and sparse B, for each column of B, enumerate all pairs of features, hash to a single feature index, and multiply by A into C
__global__ void __hashmult(int nrows, int nfeats, int ncols, float *A, float *Bdata, int *Bir, int *Bjc, float *C, int transpose) {
int istart = ((long long)blockIdx.x) * ncols/ gridDim.x;
int iend = ((long long)(blockIdx.x + 1)) * ncols / gridDim.x;
for (int i = istart; i < iend ; i++) { // i is the column index
int jstart = Bjc[i]; // Range of nz rows in this column
int jend = Bjc[i+1];
int nr = jend - jstart; // Number of nz rows
int todo = nr * (nr + 1) / 2; // Number of pairs to process (including k,k pairs)
for (int j = threadIdx.y; j < todo; j += blockDim.y) { // j indexes a worker for this column
int j1 = solve1(j); // Compute the first and second indices
int j2 = j - j1*(j1+1)/2;
float f1 = Bdata[jstart + j1]; // Get the two features
float f2 = Bdata[jstart + j2];
int r1 = Bir[jstart + j1]; // And their row indices
int r2 = Bir[jstart + j2];
int ind = mmhash2(r1, r2, nfeats); // Hash the indices
if (transpose > 0) {
float sum = A[threadIdx.x + nrows * i] * f1 * f2; // Do the product
atomicAdd(&C[threadIdx.x + nrows * ind], sum);
} else {
float sum = A[threadIdx.x + nrows * ind] * f1 * f2; // Do the product
atomicAdd(&C[threadIdx.x + nrows * i], sum);
}
}
}
}
int hashmult(int nrows, int nfeats, int ncols, float *A, float *Bdata, int *Bir, int *Bjc, float *C, int transpose) {
int nt = max(1, 256/nrows);
dim3 threadDim(nrows, nt, 1);
int nblocks = min(MAXXGRID, ncols);
hipLaunchKernelGGL(( __hashmult), dim3(nblocks),dim3(threadDim), 0, 0, nrows, nfeats, ncols, A, Bdata, Bir, Bjc, C, transpose);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
__global__ void __hashcross(int nrows, int nfeats, int ncols,
float *A,
float *Bdata, int *Bir, int *Bjc,
float *Cdata, int *Cir, int *Cjc,
float *D, int transpose) {
int r1, r2, ind;
int istart = ((long long)blockIdx.x) * ncols/ gridDim.x;
int iend = ((long long)(blockIdx.x + 1)) * ncols / gridDim.x;
for (int i = istart; i < iend ; i++) { // i is the column index
int jstart1 = Bjc[i]; // Range of nz rows in this column of B
int jend1 = Bjc[i+1];
int jstart2 = Cjc[i]; // Range of nz rows in this column of C
int jend2 = Cjc[i+1];
int nr1 = jend1 - jstart1; // Number of nz rows
int nr2 = jend2 - jstart2; // Number of nz rows
int todo = (nr1+1) * (nr2+1) - 1; // Number of pairs + singletons to process
for (int j = threadIdx.y; j < todo; j += blockDim.y) { // j indexes a worker for this column
int j1 = j / nr2;
int j2 = j - j1 * nr2;
float prod = 1.0f;
int hash = seed;
if (j1 < nr1) {
prod *= Bdata[jstart1 + j1]; // Get the two features
r1 = Bir[jstart1 + j1]; // And their row indices
hash = h1(r1, hash);
}
if (j2 < nr2) {
prod *= Cdata[jstart2 + j2];
r2 = Cir[jstart2 + j2];
hash = h1(r2, hash); // Hash the indices
}
ind = mmhashend(hash, nfeats);
if (transpose > 0) {
float sum = A[threadIdx.x + nrows * i] * prod; // Do the product
atomicAdd(&D[threadIdx.x + nrows * ind], sum);
} else {
float sum = A[threadIdx.x + nrows * ind] * prod;
atomicAdd(&D[threadIdx.x + nrows * i], sum);
}
}
}
}
int hashcross(int nrows, int nfeats, int ncols, float *A, float *Bdata, int *Bir, int *Bjc, float *Cdata, int *Cir, int *Cjc, float *D, int transpose) {
int nt = max(1, 256/nrows);
dim3 threadDim(nrows, nt, 1);
int nblocks = min(MAXXGRID, ncols);
hipLaunchKernelGGL(( __hashcross), dim3(nblocks),dim3(threadDim), 0, 0, nrows, nfeats, ncols, A, Bdata, Bir, Bjc, Cdata, Cir, Cjc, D, transpose);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
return err;
}
| beae6ace5f3762392e9e874dfd3ea76c88eb570d.cu | #include <cuda_runtime.h>
#include <curand_kernel.h>
#include <stdio.h>
#include <MatKernel.hpp>
#if __CUDA_ARCH__ >= 300
#define MAXXGRID 2147483647
#else
#define MAXXGRID 65535
#endif
// Feature hashing multiply and multiply-transpose.
// This one enumerates, hashes and multiplies all pairs of features.
//
// NOTE: The single-matrix version (hashmult) uses a fast lookup recurrence which is only valid up to 3000 base features per column (approx 4.5 million pairs)
// Hash functions
// Adler32
__forceinline__ __device__ unsigned int adler32(const void *buf, size_t buflength) {
const unsigned char *buffer = (const unsigned char*)buf;
unsigned int s1 = 1;
unsigned int s2 = 0;
for (size_t n = 0; n < buflength; n++) {
s1 = (s1 + buffer[n]) % 65521;
s2 = (s2 + s1) % 65521;
}
return (s2 << 16) | s1;
}
// MurmurHash3
static const unsigned int c1 = 0xcc9e2d51;
static const unsigned int c2 = 0x1b873593;
static const unsigned int r1 = 15;
static const unsigned int r2 = 13;
static const unsigned int m = 5;
static const unsigned int n = 0xe6546b64;
__forceinline__ __device__ unsigned int h1(unsigned int k, unsigned int hash) {
k *= c1;
k = (k << r1) | (k >> (32-r1));
k *= c2;
hash ^= k;
hash = ((hash << r2) | (hash >> (32-r2)) * m) + n;
return hash;
}
const unsigned int seed = 3413413;
__forceinline__ __device__ unsigned int mmhashend(unsigned int hash, unsigned int mod)
{
hash ^= (hash >> 16);
hash *= 0x85ebca6b;
hash ^= (hash >> 13);
hash *= 0xc2b2ae35;
hash ^= (hash >> 16);
return (hash % mod);
}
__forceinline__ __device__ unsigned int mmhash1(unsigned int v1, unsigned int mod) {
unsigned int hash = seed;
hash = h1(v1, hash);
return mmhashend(hash, mod);
}
__forceinline__ __device__ unsigned int mmhash2(unsigned int v1, unsigned int v2, unsigned int mod) {
unsigned int hash = seed;
hash = h1(v1, hash);
hash = h1(v2, hash);
return mmhashend(hash, mod);
}
__forceinline__ __device__ int solve1(int j) {
float v = sqrtf((float)j);
#pragma unroll
for (int k = 0; k < 5; k++) {
v = v - (v*(v+1)-2*j)/(2*v+1); // Newton iterations to find first index.
}
return (int)(v+2e-5f);
}
// Given dense A and sparse B, for each column of B, enumerate all pairs of features, hash to a single feature index, and multiply by A into C
__global__ void __hashmult(int nrows, int nfeats, int ncols, float *A, float *Bdata, int *Bir, int *Bjc, float *C, int transpose) {
int istart = ((long long)blockIdx.x) * ncols/ gridDim.x;
int iend = ((long long)(blockIdx.x + 1)) * ncols / gridDim.x;
for (int i = istart; i < iend ; i++) { // i is the column index
int jstart = Bjc[i]; // Range of nz rows in this column
int jend = Bjc[i+1];
int nr = jend - jstart; // Number of nz rows
int todo = nr * (nr + 1) / 2; // Number of pairs to process (including k,k pairs)
for (int j = threadIdx.y; j < todo; j += blockDim.y) { // j indexes a worker for this column
int j1 = solve1(j); // Compute the first and second indices
int j2 = j - j1*(j1+1)/2;
float f1 = Bdata[jstart + j1]; // Get the two features
float f2 = Bdata[jstart + j2];
int r1 = Bir[jstart + j1]; // And their row indices
int r2 = Bir[jstart + j2];
int ind = mmhash2(r1, r2, nfeats); // Hash the indices
if (transpose > 0) {
float sum = A[threadIdx.x + nrows * i] * f1 * f2; // Do the product
atomicAdd(&C[threadIdx.x + nrows * ind], sum);
} else {
float sum = A[threadIdx.x + nrows * ind] * f1 * f2; // Do the product
atomicAdd(&C[threadIdx.x + nrows * i], sum);
}
}
}
}
int hashmult(int nrows, int nfeats, int ncols, float *A, float *Bdata, int *Bir, int *Bjc, float *C, int transpose) {
int nt = max(1, 256/nrows);
dim3 threadDim(nrows, nt, 1);
int nblocks = min(MAXXGRID, ncols);
__hashmult<<<nblocks,threadDim>>>(nrows, nfeats, ncols, A, Bdata, Bir, Bjc, C, transpose);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
__global__ void __hashcross(int nrows, int nfeats, int ncols,
float *A,
float *Bdata, int *Bir, int *Bjc,
float *Cdata, int *Cir, int *Cjc,
float *D, int transpose) {
int r1, r2, ind;
int istart = ((long long)blockIdx.x) * ncols/ gridDim.x;
int iend = ((long long)(blockIdx.x + 1)) * ncols / gridDim.x;
for (int i = istart; i < iend ; i++) { // i is the column index
int jstart1 = Bjc[i]; // Range of nz rows in this column of B
int jend1 = Bjc[i+1];
int jstart2 = Cjc[i]; // Range of nz rows in this column of C
int jend2 = Cjc[i+1];
int nr1 = jend1 - jstart1; // Number of nz rows
int nr2 = jend2 - jstart2; // Number of nz rows
int todo = (nr1+1) * (nr2+1) - 1; // Number of pairs + singletons to process
for (int j = threadIdx.y; j < todo; j += blockDim.y) { // j indexes a worker for this column
int j1 = j / nr2;
int j2 = j - j1 * nr2;
float prod = 1.0f;
int hash = seed;
if (j1 < nr1) {
prod *= Bdata[jstart1 + j1]; // Get the two features
r1 = Bir[jstart1 + j1]; // And their row indices
hash = h1(r1, hash);
}
if (j2 < nr2) {
prod *= Cdata[jstart2 + j2];
r2 = Cir[jstart2 + j2];
hash = h1(r2, hash); // Hash the indices
}
ind = mmhashend(hash, nfeats);
if (transpose > 0) {
float sum = A[threadIdx.x + nrows * i] * prod; // Do the product
atomicAdd(&D[threadIdx.x + nrows * ind], sum);
} else {
float sum = A[threadIdx.x + nrows * ind] * prod;
atomicAdd(&D[threadIdx.x + nrows * i], sum);
}
}
}
}
int hashcross(int nrows, int nfeats, int ncols, float *A, float *Bdata, int *Bir, int *Bjc, float *Cdata, int *Cir, int *Cjc, float *D, int transpose) {
int nt = max(1, 256/nrows);
dim3 threadDim(nrows, nt, 1);
int nblocks = min(MAXXGRID, ncols);
__hashcross<<<nblocks,threadDim>>>(nrows, nfeats, ncols, A, Bdata, Bir, Bjc, Cdata, Cir, Cjc, D, transpose);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
return err;
}
|
9ab079b83f0d1c439b80f4de9da8f22076028cd6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2020 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "model.h"
#include "cuda_helper.h"
Tensor FFModel::add(std::string name,
const Tensor& in1,
const Tensor& in2)
{
ElementBinary *ele = new ElementBinary(*this, ElementBinary::OP_ADD, name, in1, in2);
ele->add_to_model(*this);
return ele->outputs[0];
}
ElementBinary* FFModel::add(std::string name)
{
ElementBinary* ele = new ElementBinary(*this, ElementBinary::OP_ADD, name);
return ele;
}
Tensor FFModel::subtract(std::string name,
const Tensor& in1,
const Tensor& in2)
{
ElementBinary *ele = new ElementBinary(*this, ElementBinary::OP_SUB, name, in1, in2);
ele->add_to_model(*this);
return ele->outputs[0];
}
ElementBinary* FFModel::subtract(std::string name)
{
ElementBinary* ele = new ElementBinary(*this, ElementBinary::OP_SUB, name);
return ele;
}
Tensor FFModel::multiply(std::string name,
const Tensor& in1,
const Tensor& in2)
{
ElementBinary *ele = new ElementBinary(*this, ElementBinary::OP_MUL, name, in1, in2);
ele->add_to_model(*this);
return ele->outputs[0];
}
ElementBinary* FFModel::multiply(std::string name)
{
ElementBinary* ele = new ElementBinary(*this, ElementBinary::OP_MUL, name);
return ele;
}
Tensor FFModel::divide(std::string name,
const Tensor& in1,
const Tensor& in2)
{
ElementBinary *ele = new ElementBinary(*this, ElementBinary::OP_DIV, name, in1, in2);
ele->add_to_model(*this);
return ele->outputs[0];
}
ElementBinary* FFModel::divide(std::string name)
{
ElementBinary* ele = new ElementBinary(*this, ElementBinary::OP_DIV, name);
return ele;
}
ElementBinary::ElementBinary(FFModel& model,
ElementBinary::OpType _op_type,
const std::string& pcname,
const Tensor& in1,
const Tensor& in2)
: Op(pcname, in1, in2), op_type(_op_type)
{
//TODO: implement broadcast op
assert(in1.numDim == in2.numDim);
int dim = in1.numDim;
for (int i = 0; i < dim; i++)
assert(in1.adim[i] == in2.adim[i]);
switch (dim) {
case 1:
{
task_is = model.get_or_create_task_is(1, name);
create_output_and_partition<1>(model);
break;
}
case 2:
{
task_is = model.get_or_create_task_is(2, name);
create_output_and_partition<2>(model);
break;
}
case 3:
{
task_is = model.get_or_create_task_is(3, name);
create_output_and_partition<3>(model);
break;
}
case 4:
{
task_is = model.get_or_create_task_is(4, name);
create_output_and_partition<4>(model);
break;
}
default:
{
// Unsupported dim for ElementBinarywise operator
assert(false);
}
}
}
ElementBinary::ElementBinary(FFModel& model,
ElementBinary::OpType _op_type,
const std::string& pcname)
: Op(pcname), op_type(_op_type)
{
}
Tensor ElementBinary::init_inout(FFModel& model,
const Tensor& input)
{
// TODO: currently disable this functional API since
// FlexFlow assumes a single tensor as input
assert(false);
Tensor in1 = input, in2 = input;
add_to_model(model);
inputs[0] = in1;
inputs[1] = in2;
//TODO: implement broadcast op
assert(in1.numDim == in2.numDim);
int dim = in1.numDim;
for (int i = 0; i < dim; i++)
assert(in1.adim[i] == in2.adim[i]);
switch (dim) {
case 1:
{
task_is = model.get_or_create_task_is(1, name);
create_output_and_partition<1>(model);
break;
}
case 2:
{
task_is = model.get_or_create_task_is(2, name);
create_output_and_partition<2>(model);
break;
}
case 3:
{
task_is = model.get_or_create_task_is(3, name);
create_output_and_partition<3>(model);
break;
}
case 4:
{
task_is = model.get_or_create_task_is(4, name);
create_output_and_partition<4>(model);
break;
}
default:
{
// Unsupported dim for ElementWiseBinary operator
assert(false);
}
}
return outputs[0];
}
void ElementBinary::add_to_model(FFModel& model)
{
model.layers.push_back(this);
}
template<int NDIM>
void ElementBinary::create_output_and_partition(FFModel& model)
{
// Retrive the task indexspace for the op
task_is = IndexSpaceT<NDIM>(model.get_or_create_task_is(NDIM, name));
Context ctx = model.config.lg_ctx;
Runtime* runtime = model.config.lg_hlr;
Rect<NDIM> part_rect = runtime->get_index_space_domain(ctx, task_is);
int dims[NDIM];
for (int i = 0; i < NDIM; i++)
dims[i] = inputs[0].adim[NDIM-1-i];
outputs[0] = model.create_tensor<NDIM>(dims, IndexSpaceT<NDIM>(task_is), DT_FLOAT);
Rect<NDIM> input_rect;
for (int i = 0; i < 2; i++) {
input_rect = runtime->get_index_partition_color_space(
ctx, inputs[i].part.get_index_partition());
if (input_rect == part_rect) {
input_lps[i] = inputs[i].part;
input_grad_lps[i] = inputs[i].part_grad;
} else {
model.create_disjoint_partition<NDIM>(
inputs[i], IndexSpaceT<NDIM>(task_is), input_lps[i], input_grad_lps[i]);
}
}
}
__host__
void ElementBinary::init_task(const Task* task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime* runtime)
{}
void ElementBinary::init(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
IndexLauncher launcher(ELEMENTBINARY_FWD_TASK_ID, task_is,
TaskArgument(this, sizeof(ElementBinary)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(input_lps[1], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[1].region));
launcher.add_field(1, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(2, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
__global__
void elewise_binary_forward_kernel(coord_t volume,
const float alpha,
const float beta,
ElementBinary::OpType type,
const float* in1,
const float* in2,
float* out)
{
CUDA_KERNEL_LOOP(i, volume)
{
switch (type) {
case ElementBinary::OP_ADD:
{
out[i] = alpha * (in1[i] + in2[i]) + beta * out[i];
break;
}
case ElementBinary::OP_SUB:
{
out[i] = alpha * (in1[i] - in2[i]) + beta * out[i];
break;
}
case ElementBinary::OP_MUL:
{
out[i] = alpha * in1[i] * in2[i] + beta * out[i];
break;
}
case ElementBinary::OP_DIV:
{
out[i] = alpha * (in1[i] / in2[i]) + beta * out[i];
break;
}
default:
assert(false);
}
}
}
/*
regions[0](I): in1
regions[1](I): in2
regions[2](O): output
*/
__host__
void ElementBinary::forward_task(const Task* task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime* runtime)
{
float alpha = 1.0f;
float beta = 0.0f;
assert(regions.size() == 3);
assert(task->regions.size() == 3);
const ElementBinary* ele = (const ElementBinary*) task->args;
Domain in1_domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
Domain in2_domain = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
Domain out_domain = runtime->get_index_space_domain(
ctx, task->regions[2].region.get_index_space());
assert(in1_domain == in2_domain);
assert(out_domain == in1_domain);
const float* in1_ptr = helperGetTensorPointerRO<float>(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
const float* in2_ptr = helperGetTensorPointerRO<float>(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
float* out_ptr = helperGetTensorPointerWO<float>(
regions[2], task->regions[2], FID_DATA, ctx, runtime);
hipLaunchKernelGGL(( elewise_binary_forward_kernel), dim3(GET_BLOCKS(out_domain.get_volume())), dim3(CUDA_NUM_THREADS), 0, 0,
out_domain.get_volume(), alpha, beta, ele->op_type, in1_ptr, in2_ptr, out_ptr);
}
void ElementBinary::forward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
IndexLauncher launcher(ELEMENTBINARY_FWD_TASK_ID, task_is,
TaskArgument(this, sizeof(ElementBinary)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(input_lps[1], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[1].region));
launcher.add_field(1, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(2, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
__global__
void elewise_binary_backward_kernel(coord_t volume,
const float alpha,
const float beta,
ElementBinary::OpType type,
const float* out_grad,
const float* in1,
const float* in2,
float* in1_grad,
float* in2_grad)
{
CUDA_KERNEL_LOOP(i, volume)
{
switch (type) {
case ElementBinary::OP_ADD:
{
in1_grad[i] = alpha * out_grad[i] + beta * in1_grad[i];
in2_grad[i] = alpha * out_grad[i] + beta * in2_grad[i];
break;
}
case ElementBinary::OP_SUB:
{
in1_grad[i] = alpha * out_grad[i] + beta * in1_grad[i];
in2_grad[i] = - alpha * out_grad[i] + beta * in2_grad[i];
break;
}
case ElementBinary::OP_MUL:
{
in1_grad[i] = alpha * out_grad[i] * in2[i] + beta * in1_grad[i];
in2_grad[i] = alpha * out_grad[i] * in1[i] + beta * in2_grad[i];
break;
}
case ElementBinary::OP_DIV:
{
in1_grad[i] = alpha * out_grad[i] / in2[i] + beta * in1_grad[i];
in2_grad[i] = - alpha * out_grad[i] * in1[i] / (in2[i] * in2[i]) + beta * in2_grad[i];
break;
}
default:
assert(false);
}
}
}
/*
regions[0](I): out_grad
regions[1](I): in0
regions[2](I): in1
regions[3](I/O): in0_grad
regions[4](I/O): in1_grad
*/
void ElementBinary::backward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime* runtime)
{
float alpha = 1.0f;
const ElementBinary* ele = (const ElementBinary*) task->args;
assert(regions.size() == 5);
assert(task->regions.size() == 5);
Domain out_grad_domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
Domain in0_domain = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
Domain in1_domain = runtime->get_index_space_domain(
ctx, task->regions[2].region.get_index_space());
Domain in0_grad_domain = runtime->get_index_space_domain(
ctx, task->regions[3].region.get_index_space());
Domain in1_grad_domain = runtime->get_index_space_domain(
ctx, task->regions[4].region.get_index_space());
assert(out_grad_domain == in0_domain);
assert(out_grad_domain == in1_domain);
assert(out_grad_domain == in0_grad_domain);
assert(out_grad_domain == in1_grad_domain);
const float* out_grad_ptr = helperGetTensorPointerRO<float>(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
const float* in1_ptr = helperGetTensorPointerRO<float>(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
const float* in2_ptr = helperGetTensorPointerRO<float>(
regions[2], task->regions[2], FID_DATA, ctx, runtime);
float* in1_grad_ptr = helperGetTensorPointerRW<float>(
regions[3], task->regions[3], FID_DATA, ctx, runtime);
float* in2_grad_ptr = helperGetTensorPointerRW<float>(
regions[4], task->regions[4], FID_DATA, ctx, runtime);
hipLaunchKernelGGL(( elewise_binary_backward_kernel), dim3(GET_BLOCKS(out_grad_domain.get_volume())), dim3(CUDA_NUM_THREADS), 0, 0,
out_grad_domain.get_volume(), alpha, alpha, ele->op_type, out_grad_ptr, in1_ptr, in2_ptr,
in1_grad_ptr, in2_grad_ptr);
}
void ElementBinary::backward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
IndexLauncher launcher(ELEMENTBINARY_BWD_TASK_ID, task_is,
TaskArgument(this, sizeof(Linear)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
// regions[0](I): output_grad
launcher.add_region_requirement(
RegionRequirement(outputs[0].part_grad, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, outputs[0].region_grad));
launcher.add_field(0, FID_DATA);
// regions[1](I): input0
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(1, FID_DATA);
// regions[2](I): input1
launcher.add_region_requirement(
RegionRequirement(input_lps[1], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[1].region));
launcher.add_field(2, FID_DATA);
// regions[3](I/O): input0_grad
launcher.add_region_requirement(
RegionRequirement(input_grad_lps[0], 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[0].region_grad));
launcher.add_field(3, FID_DATA);
// regions[4](I/O): input1_grad
launcher.add_region_requirement(
RegionRequirement(input_grad_lps[1], 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[1].region_grad));
launcher.add_field(4, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
| 9ab079b83f0d1c439b80f4de9da8f22076028cd6.cu | /* Copyright 2020 Stanford
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "model.h"
#include "cuda_helper.h"
Tensor FFModel::add(std::string name,
const Tensor& in1,
const Tensor& in2)
{
ElementBinary *ele = new ElementBinary(*this, ElementBinary::OP_ADD, name, in1, in2);
ele->add_to_model(*this);
return ele->outputs[0];
}
ElementBinary* FFModel::add(std::string name)
{
ElementBinary* ele = new ElementBinary(*this, ElementBinary::OP_ADD, name);
return ele;
}
Tensor FFModel::subtract(std::string name,
const Tensor& in1,
const Tensor& in2)
{
ElementBinary *ele = new ElementBinary(*this, ElementBinary::OP_SUB, name, in1, in2);
ele->add_to_model(*this);
return ele->outputs[0];
}
ElementBinary* FFModel::subtract(std::string name)
{
ElementBinary* ele = new ElementBinary(*this, ElementBinary::OP_SUB, name);
return ele;
}
Tensor FFModel::multiply(std::string name,
const Tensor& in1,
const Tensor& in2)
{
ElementBinary *ele = new ElementBinary(*this, ElementBinary::OP_MUL, name, in1, in2);
ele->add_to_model(*this);
return ele->outputs[0];
}
ElementBinary* FFModel::multiply(std::string name)
{
ElementBinary* ele = new ElementBinary(*this, ElementBinary::OP_MUL, name);
return ele;
}
Tensor FFModel::divide(std::string name,
const Tensor& in1,
const Tensor& in2)
{
ElementBinary *ele = new ElementBinary(*this, ElementBinary::OP_DIV, name, in1, in2);
ele->add_to_model(*this);
return ele->outputs[0];
}
ElementBinary* FFModel::divide(std::string name)
{
ElementBinary* ele = new ElementBinary(*this, ElementBinary::OP_DIV, name);
return ele;
}
ElementBinary::ElementBinary(FFModel& model,
ElementBinary::OpType _op_type,
const std::string& pcname,
const Tensor& in1,
const Tensor& in2)
: Op(pcname, in1, in2), op_type(_op_type)
{
//TODO: implement broadcast op
assert(in1.numDim == in2.numDim);
int dim = in1.numDim;
for (int i = 0; i < dim; i++)
assert(in1.adim[i] == in2.adim[i]);
switch (dim) {
case 1:
{
task_is = model.get_or_create_task_is(1, name);
create_output_and_partition<1>(model);
break;
}
case 2:
{
task_is = model.get_or_create_task_is(2, name);
create_output_and_partition<2>(model);
break;
}
case 3:
{
task_is = model.get_or_create_task_is(3, name);
create_output_and_partition<3>(model);
break;
}
case 4:
{
task_is = model.get_or_create_task_is(4, name);
create_output_and_partition<4>(model);
break;
}
default:
{
// Unsupported dim for ElementBinarywise operator
assert(false);
}
}
}
ElementBinary::ElementBinary(FFModel& model,
ElementBinary::OpType _op_type,
const std::string& pcname)
: Op(pcname), op_type(_op_type)
{
}
Tensor ElementBinary::init_inout(FFModel& model,
const Tensor& input)
{
// TODO: currently disable this functional API since
// FlexFlow assumes a single tensor as input
assert(false);
Tensor in1 = input, in2 = input;
add_to_model(model);
inputs[0] = in1;
inputs[1] = in2;
//TODO: implement broadcast op
assert(in1.numDim == in2.numDim);
int dim = in1.numDim;
for (int i = 0; i < dim; i++)
assert(in1.adim[i] == in2.adim[i]);
switch (dim) {
case 1:
{
task_is = model.get_or_create_task_is(1, name);
create_output_and_partition<1>(model);
break;
}
case 2:
{
task_is = model.get_or_create_task_is(2, name);
create_output_and_partition<2>(model);
break;
}
case 3:
{
task_is = model.get_or_create_task_is(3, name);
create_output_and_partition<3>(model);
break;
}
case 4:
{
task_is = model.get_or_create_task_is(4, name);
create_output_and_partition<4>(model);
break;
}
default:
{
// Unsupported dim for ElementWiseBinary operator
assert(false);
}
}
return outputs[0];
}
void ElementBinary::add_to_model(FFModel& model)
{
model.layers.push_back(this);
}
template<int NDIM>
void ElementBinary::create_output_and_partition(FFModel& model)
{
// Retrive the task indexspace for the op
task_is = IndexSpaceT<NDIM>(model.get_or_create_task_is(NDIM, name));
Context ctx = model.config.lg_ctx;
Runtime* runtime = model.config.lg_hlr;
Rect<NDIM> part_rect = runtime->get_index_space_domain(ctx, task_is);
int dims[NDIM];
for (int i = 0; i < NDIM; i++)
dims[i] = inputs[0].adim[NDIM-1-i];
outputs[0] = model.create_tensor<NDIM>(dims, IndexSpaceT<NDIM>(task_is), DT_FLOAT);
Rect<NDIM> input_rect;
for (int i = 0; i < 2; i++) {
input_rect = runtime->get_index_partition_color_space(
ctx, inputs[i].part.get_index_partition());
if (input_rect == part_rect) {
input_lps[i] = inputs[i].part;
input_grad_lps[i] = inputs[i].part_grad;
} else {
model.create_disjoint_partition<NDIM>(
inputs[i], IndexSpaceT<NDIM>(task_is), input_lps[i], input_grad_lps[i]);
}
}
}
__host__
void ElementBinary::init_task(const Task* task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime* runtime)
{}
void ElementBinary::init(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
IndexLauncher launcher(ELEMENTBINARY_FWD_TASK_ID, task_is,
TaskArgument(this, sizeof(ElementBinary)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(input_lps[1], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[1].region));
launcher.add_field(1, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(2, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
__global__
void elewise_binary_forward_kernel(coord_t volume,
const float alpha,
const float beta,
ElementBinary::OpType type,
const float* in1,
const float* in2,
float* out)
{
CUDA_KERNEL_LOOP(i, volume)
{
switch (type) {
case ElementBinary::OP_ADD:
{
out[i] = alpha * (in1[i] + in2[i]) + beta * out[i];
break;
}
case ElementBinary::OP_SUB:
{
out[i] = alpha * (in1[i] - in2[i]) + beta * out[i];
break;
}
case ElementBinary::OP_MUL:
{
out[i] = alpha * in1[i] * in2[i] + beta * out[i];
break;
}
case ElementBinary::OP_DIV:
{
out[i] = alpha * (in1[i] / in2[i]) + beta * out[i];
break;
}
default:
assert(false);
}
}
}
/*
regions[0](I): in1
regions[1](I): in2
regions[2](O): output
*/
__host__
void ElementBinary::forward_task(const Task* task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime* runtime)
{
float alpha = 1.0f;
float beta = 0.0f;
assert(regions.size() == 3);
assert(task->regions.size() == 3);
const ElementBinary* ele = (const ElementBinary*) task->args;
Domain in1_domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
Domain in2_domain = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
Domain out_domain = runtime->get_index_space_domain(
ctx, task->regions[2].region.get_index_space());
assert(in1_domain == in2_domain);
assert(out_domain == in1_domain);
const float* in1_ptr = helperGetTensorPointerRO<float>(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
const float* in2_ptr = helperGetTensorPointerRO<float>(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
float* out_ptr = helperGetTensorPointerWO<float>(
regions[2], task->regions[2], FID_DATA, ctx, runtime);
elewise_binary_forward_kernel<<<GET_BLOCKS(out_domain.get_volume()), CUDA_NUM_THREADS>>>(
out_domain.get_volume(), alpha, beta, ele->op_type, in1_ptr, in2_ptr, out_ptr);
}
void ElementBinary::forward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
IndexLauncher launcher(ELEMENTBINARY_FWD_TASK_ID, task_is,
TaskArgument(this, sizeof(ElementBinary)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(input_lps[1], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[1].region));
launcher.add_field(1, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(2, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
__global__
void elewise_binary_backward_kernel(coord_t volume,
const float alpha,
const float beta,
ElementBinary::OpType type,
const float* out_grad,
const float* in1,
const float* in2,
float* in1_grad,
float* in2_grad)
{
CUDA_KERNEL_LOOP(i, volume)
{
switch (type) {
case ElementBinary::OP_ADD:
{
in1_grad[i] = alpha * out_grad[i] + beta * in1_grad[i];
in2_grad[i] = alpha * out_grad[i] + beta * in2_grad[i];
break;
}
case ElementBinary::OP_SUB:
{
in1_grad[i] = alpha * out_grad[i] + beta * in1_grad[i];
in2_grad[i] = - alpha * out_grad[i] + beta * in2_grad[i];
break;
}
case ElementBinary::OP_MUL:
{
in1_grad[i] = alpha * out_grad[i] * in2[i] + beta * in1_grad[i];
in2_grad[i] = alpha * out_grad[i] * in1[i] + beta * in2_grad[i];
break;
}
case ElementBinary::OP_DIV:
{
in1_grad[i] = alpha * out_grad[i] / in2[i] + beta * in1_grad[i];
in2_grad[i] = - alpha * out_grad[i] * in1[i] / (in2[i] * in2[i]) + beta * in2_grad[i];
break;
}
default:
assert(false);
}
}
}
/*
regions[0](I): out_grad
regions[1](I): in0
regions[2](I): in1
regions[3](I/O): in0_grad
regions[4](I/O): in1_grad
*/
void ElementBinary::backward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime* runtime)
{
float alpha = 1.0f;
const ElementBinary* ele = (const ElementBinary*) task->args;
assert(regions.size() == 5);
assert(task->regions.size() == 5);
Domain out_grad_domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
Domain in0_domain = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
Domain in1_domain = runtime->get_index_space_domain(
ctx, task->regions[2].region.get_index_space());
Domain in0_grad_domain = runtime->get_index_space_domain(
ctx, task->regions[3].region.get_index_space());
Domain in1_grad_domain = runtime->get_index_space_domain(
ctx, task->regions[4].region.get_index_space());
assert(out_grad_domain == in0_domain);
assert(out_grad_domain == in1_domain);
assert(out_grad_domain == in0_grad_domain);
assert(out_grad_domain == in1_grad_domain);
const float* out_grad_ptr = helperGetTensorPointerRO<float>(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
const float* in1_ptr = helperGetTensorPointerRO<float>(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
const float* in2_ptr = helperGetTensorPointerRO<float>(
regions[2], task->regions[2], FID_DATA, ctx, runtime);
float* in1_grad_ptr = helperGetTensorPointerRW<float>(
regions[3], task->regions[3], FID_DATA, ctx, runtime);
float* in2_grad_ptr = helperGetTensorPointerRW<float>(
regions[4], task->regions[4], FID_DATA, ctx, runtime);
elewise_binary_backward_kernel<<<GET_BLOCKS(out_grad_domain.get_volume()), CUDA_NUM_THREADS>>>(
out_grad_domain.get_volume(), alpha, alpha, ele->op_type, out_grad_ptr, in1_ptr, in2_ptr,
in1_grad_ptr, in2_grad_ptr);
}
void ElementBinary::backward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
IndexLauncher launcher(ELEMENTBINARY_BWD_TASK_ID, task_is,
TaskArgument(this, sizeof(Linear)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
// regions[0](I): output_grad
launcher.add_region_requirement(
RegionRequirement(outputs[0].part_grad, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, outputs[0].region_grad));
launcher.add_field(0, FID_DATA);
// regions[1](I): input0
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(1, FID_DATA);
// regions[2](I): input1
launcher.add_region_requirement(
RegionRequirement(input_lps[1], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[1].region));
launcher.add_field(2, FID_DATA);
// regions[3](I/O): input0_grad
launcher.add_region_requirement(
RegionRequirement(input_grad_lps[0], 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[0].region_grad));
launcher.add_field(3, FID_DATA);
// regions[4](I/O): input1_grad
launcher.add_region_requirement(
RegionRequirement(input_grad_lps[1], 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[1].region_grad));
launcher.add_field(4, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
|
74e00b1d07b63186166781fe4e587e54f5100899.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define DIELECTRIC_OFFSET 0.009f
#define PROBE_RADIUS 0.14f
#define WARPS_PER_GROUP (FORCE_WORK_GROUP_SIZE/TILE_SIZE)
/**
* Reduce the Born sums to compute the Born radii.
*/
extern "C" __global__ void reduceBornSum(float alpha, float beta, float gamma, const long long* __restrict__ bornSum,
const float2* __restrict__ params, real* __restrict__ bornRadii, real* __restrict__ obcChain) {
for (unsigned int index = blockIdx.x*blockDim.x+threadIdx.x; index < NUM_ATOMS; index += blockDim.x*gridDim.x) {
// Get summed Born data
real sum = RECIP(0x100000000)*bornSum[index];
// Now calculate Born radius and OBC term.
float offsetRadius = params[index].x;
sum *= 0.5f*offsetRadius;
real sum2 = sum*sum;
real sum3 = sum*sum2;
real tanhSum = tanh(alpha*sum - beta*sum2 + gamma*sum3);
real nonOffsetRadius = offsetRadius + DIELECTRIC_OFFSET;
real radius = RECIP(RECIP(offsetRadius) - tanhSum/nonOffsetRadius);
real chain = offsetRadius*(alpha - 2.0f*beta*sum + 3.0f*gamma*sum2);
chain = (1-tanhSum*tanhSum)*chain / nonOffsetRadius;
bornRadii[index] = radius;
obcChain[index] = chain;
}
}
/**
* Reduce the Born force.
*/
extern "C" __global__ void reduceBornForce(long long* __restrict__ bornForce, real* __restrict__ energyBuffer,
const float2* __restrict__ params, const real* __restrict__ bornRadii, const real* __restrict__ obcChain) {
real energy = 0;
for (unsigned int index = blockIdx.x*blockDim.x+threadIdx.x; index < NUM_ATOMS; index += blockDim.x*gridDim.x) {
// Get summed Born force
real force = RECIP(0x100000000)*bornForce[index];
// Now calculate the actual force
float offsetRadius = params[index].x;
real bornRadius = bornRadii[index];
real r = offsetRadius+DIELECTRIC_OFFSET+PROBE_RADIUS;
real ratio6 = POW((offsetRadius+DIELECTRIC_OFFSET)/bornRadius, 6);
real saTerm = SURFACE_AREA_FACTOR*r*r*ratio6;
force += saTerm/bornRadius;
energy += saTerm;
force *= bornRadius*bornRadius*obcChain[index];
bornForce[index] = (long long) (force*0x100000000);
}
energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] += energy/-6;
}
typedef struct {
real x, y, z;
real q;
float radius, scaledRadius;
real bornSum;
} AtomData1;
/**
* Compute the Born sum.
*/
extern "C" __global__ void computeBornSum(unsigned long long* __restrict__ global_bornSum, const real4* __restrict__ posq, const float2* __restrict__ global_params,
#ifdef USE_CUTOFF
const int* __restrict__ tiles, const unsigned int* __restrict__ interactionCount, real4 periodicBoxSize, real4 invPeriodicBoxSize,
real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ, unsigned int maxTiles, const real4* __restrict__ blockCenter,
const real4* __restrict__ blockSize, const unsigned int* __restrict__ interactingAtoms,
#else
unsigned int numTiles,
#endif
const ushort2* __restrict__ exclusionTiles) {
const unsigned int totalWarps = (blockDim.x*gridDim.x)/TILE_SIZE;
const unsigned int warp = (blockIdx.x*blockDim.x+threadIdx.x)/TILE_SIZE;
const unsigned int tgx = threadIdx.x & (TILE_SIZE-1);
const unsigned int tbx = threadIdx.x - tgx;
__shared__ AtomData1 localData[FORCE_WORK_GROUP_SIZE];
// First loop: process tiles that contain exclusions.
const unsigned int firstExclusionTile = FIRST_EXCLUSION_TILE+warp*(LAST_EXCLUSION_TILE-FIRST_EXCLUSION_TILE)/totalWarps;
const unsigned int lastExclusionTile = FIRST_EXCLUSION_TILE+(warp+1)*(LAST_EXCLUSION_TILE-FIRST_EXCLUSION_TILE)/totalWarps;
for (int pos = firstExclusionTile; pos < lastExclusionTile; pos++) {
const ushort2 tileIndices = exclusionTiles[pos];
const unsigned int x = tileIndices.x;
const unsigned int y = tileIndices.y;
real bornSum = 0;
unsigned int atom1 = x*TILE_SIZE + tgx;
real4 posq1 = posq[atom1];
float2 params1 = global_params[atom1];
if (x == y) {
// This tile is on the diagonal.
localData[threadIdx.x].x = posq1.x;
localData[threadIdx.x].y = posq1.y;
localData[threadIdx.x].z = posq1.z;
localData[threadIdx.x].q = posq1.w;
localData[threadIdx.x].radius = params1.x;
localData[threadIdx.x].scaledRadius = params1.y;
for (unsigned int j = 0; j < TILE_SIZE; j++) {
real3 delta = make_real3(localData[tbx+j].x-posq1.x, localData[tbx+j].y-posq1.y, localData[tbx+j].z-posq1.z);
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(delta)
#endif
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
#ifdef USE_CUTOFF
if (atom1 < NUM_ATOMS && y*TILE_SIZE+j < NUM_ATOMS && r2 < CUTOFF_SQUARED) {
#else
if (atom1 < NUM_ATOMS && y*TILE_SIZE+j < NUM_ATOMS) {
#endif
real invR = RSQRT(r2);
real r = r2*invR;
float2 params2 = make_float2(localData[tbx+j].radius, localData[tbx+j].scaledRadius);
real rScaledRadiusJ = r+params2.y;
if ((j != tgx) && (params1.x < rScaledRadiusJ)) {
real l_ij = RECIP(max(params1.x, fabs(r-params2.y)));
real u_ij = RECIP(rScaledRadiusJ);
real l_ij2 = l_ij*l_ij;
real u_ij2 = u_ij*u_ij;
real ratio = LOG(u_ij * RECIP(l_ij));
bornSum += l_ij - u_ij + (0.50f*invR*ratio) + 0.25f*(r*(u_ij2-l_ij2) +
(params2.y*params2.y*invR)*(l_ij2-u_ij2));
bornSum += (params1.x < params2.y-r ? 2.0f*(RECIP(params1.x)-l_ij) : 0);
}
}
}
}
else {
// This is an off-diagonal tile.
unsigned int j = y*TILE_SIZE + tgx;
real4 tempPosq = posq[j];
localData[threadIdx.x].x = tempPosq.x;
localData[threadIdx.x].y = tempPosq.y;
localData[threadIdx.x].z = tempPosq.z;
localData[threadIdx.x].q = tempPosq.w;
float2 tempParams = global_params[j];
localData[threadIdx.x].radius = tempParams.x;
localData[threadIdx.x].scaledRadius = tempParams.y;
localData[threadIdx.x].bornSum = 0.0f;
// Compute the full set of interactions in this tile.
unsigned int tj = tgx;
for (j = 0; j < TILE_SIZE; j++) {
real3 delta = make_real3(localData[tbx+tj].x-posq1.x, localData[tbx+tj].y-posq1.y, localData[tbx+tj].z-posq1.z);
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(delta)
#endif
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
#ifdef USE_CUTOFF
if (atom1 < NUM_ATOMS && y*TILE_SIZE+tj < NUM_ATOMS && r2 < CUTOFF_SQUARED) {
#else
if (atom1 < NUM_ATOMS && y*TILE_SIZE+tj < NUM_ATOMS) {
#endif
real invR = RSQRT(r2);
real r = r2*invR;
float2 params2 = make_float2(localData[tbx+tj].radius, localData[tbx+tj].scaledRadius);
real rScaledRadiusJ = r+params2.y;
if (params1.x < rScaledRadiusJ) {
real l_ij = RECIP(max(params1.x, fabs(r-params2.y)));
real u_ij = RECIP(rScaledRadiusJ);
real l_ij2 = l_ij*l_ij;
real u_ij2 = u_ij*u_ij;
real ratio = LOG(u_ij * RECIP(l_ij));
bornSum += l_ij - u_ij + (0.50f*invR*ratio) + 0.25f*(r*(u_ij2-l_ij2) +
(params2.y*params2.y*invR)*(l_ij2-u_ij2));
bornSum += (params1.x < params2.y-r ? 2.0f*(RECIP(params1.x)-l_ij) : 0);
}
real rScaledRadiusI = r+params1.y;
if (params2.x < rScaledRadiusI) {
real l_ij = RECIP(max(params2.x, fabs(r-params1.y)));
real u_ij = RECIP(rScaledRadiusI);
real l_ij2 = l_ij*l_ij;
real u_ij2 = u_ij*u_ij;
real ratio = LOG(u_ij * RECIP(l_ij));
real term = l_ij - u_ij + (0.50f*invR*ratio) + 0.25f*(r*(u_ij2-l_ij2) +
(params1.y*params1.y*invR)*(l_ij2-u_ij2));
term += (params2.x < params1.y-r ? 2.0f*(RECIP(params2.x)-l_ij) : 0);
localData[tbx+tj].bornSum += term;
}
}
tj = (tj + 1) & (TILE_SIZE - 1);
}
}
// Write results.
unsigned int offset = x*TILE_SIZE + tgx;
atomicAdd(&global_bornSum[offset], static_cast<unsigned long long>((long long) (bornSum*0x100000000)));
if (x != y) {
offset = y*TILE_SIZE + tgx;
atomicAdd(&global_bornSum[offset], static_cast<unsigned long long>((long long) (localData[threadIdx.x].bornSum*0x100000000)));
}
}
// Second loop: tiles without exclusions, either from the neighbor list (with cutoff) or just enumerating all
// of them (no cutoff).
#ifdef USE_CUTOFF
unsigned int numTiles = interactionCount[0];
int pos = (int) (warp*(numTiles > maxTiles ? NUM_BLOCKS*((long long)NUM_BLOCKS+1)/2 : (long)numTiles)/totalWarps);
int end = (int) ((warp+1)*(numTiles > maxTiles ? NUM_BLOCKS*((long long)NUM_BLOCKS+1)/2 : (long)numTiles)/totalWarps);
#else
int pos = (int) (warp*(long long)numTiles/totalWarps);
int end = (int) ((warp+1)*(long long)numTiles/totalWarps);
#endif
int skipBase = 0;
int currentSkipIndex = tbx;
__shared__ int atomIndices[FORCE_WORK_GROUP_SIZE];
__shared__ volatile int skipTiles[FORCE_WORK_GROUP_SIZE];
skipTiles[threadIdx.x] = -1;
while (pos < end) {
real bornSum = 0;
bool includeTile = true;
// Extract the coordinates of this tile.
int x, y;
bool singlePeriodicCopy = false;
#ifdef USE_CUTOFF
if (numTiles <= maxTiles) {
x = tiles[pos];
real4 blockSizeX = blockSize[x];
singlePeriodicCopy = (0.5f*periodicBoxSize.x-blockSizeX.x >= CUTOFF &&
0.5f*periodicBoxSize.y-blockSizeX.y >= CUTOFF &&
0.5f*periodicBoxSize.z-blockSizeX.z >= CUTOFF);
}
else
#endif
{
y = (int) floor(NUM_BLOCKS+0.5f-SQRT((NUM_BLOCKS+0.5f)*(NUM_BLOCKS+0.5f)-2*pos));
x = (pos-y*NUM_BLOCKS+y*(y+1)/2);
if (x < y || x >= NUM_BLOCKS) { // Occasionally happens due to roundoff error.
y += (x < y ? -1 : 1);
x = (pos-y*NUM_BLOCKS+y*(y+1)/2);
}
// Skip over tiles that have exclusions, since they were already processed.
while (skipTiles[tbx+TILE_SIZE-1] < pos) {
if (skipBase+tgx < NUM_TILES_WITH_EXCLUSIONS) {
ushort2 tile = exclusionTiles[skipBase+tgx];
skipTiles[threadIdx.x] = tile.x + tile.y*NUM_BLOCKS - tile.y*(tile.y+1)/2;
}
else
skipTiles[threadIdx.x] = end;
skipBase += TILE_SIZE;
currentSkipIndex = tbx;
}
while (skipTiles[currentSkipIndex] < pos)
currentSkipIndex++;
includeTile = (skipTiles[currentSkipIndex] != pos);
}
if (includeTile) {
unsigned int atom1 = x*TILE_SIZE + tgx;
// Load atom data for this tile.
real4 posq1 = posq[atom1];
float2 params1 = global_params[atom1];
#ifdef USE_CUTOFF
unsigned int j = (numTiles <= maxTiles ? interactingAtoms[pos*TILE_SIZE+tgx] : y*TILE_SIZE + tgx);
#else
unsigned int j = y*TILE_SIZE + tgx;
#endif
atomIndices[threadIdx.x] = j;
if (j < PADDED_NUM_ATOMS) {
real4 tempPosq = posq[j];
localData[threadIdx.x].x = tempPosq.x;
localData[threadIdx.x].y = tempPosq.y;
localData[threadIdx.x].z = tempPosq.z;
localData[threadIdx.x].q = tempPosq.w;
float2 tempParams = global_params[j];
localData[threadIdx.x].radius = tempParams.x;
localData[threadIdx.x].scaledRadius = tempParams.y;
localData[threadIdx.x].bornSum = 0.0f;
}
#ifdef USE_PERIODIC
if (singlePeriodicCopy) {
// The box is small enough that we can just translate all the atoms into a single periodic
// box, then skip having to apply periodic boundary conditions later.
real4 blockCenterX = blockCenter[x];
APPLY_PERIODIC_TO_POS_WITH_CENTER(posq1, blockCenterX)
APPLY_PERIODIC_TO_POS_WITH_CENTER(localData[threadIdx.x], blockCenterX)
unsigned int tj = tgx;
for (j = 0; j < TILE_SIZE; j++) {
real3 delta = make_real3(localData[tbx+tj].x-posq1.x, localData[tbx+tj].y-posq1.y, localData[tbx+tj].z-posq1.z);
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
int atom2 = atomIndices[tbx+tj];
if (atom1 < NUM_ATOMS && atom2 < NUM_ATOMS && r2 < CUTOFF_SQUARED) {
real invR = RSQRT(r2);
real r = r2*invR;
float2 params2 = make_float2(localData[tbx+tj].radius, localData[tbx+tj].scaledRadius);
real rScaledRadiusJ = r+params2.y;
if (params1.x < rScaledRadiusJ) {
real l_ij = RECIP(max(params1.x, fabs(r-params2.y)));
real u_ij = RECIP(rScaledRadiusJ);
real l_ij2 = l_ij*l_ij;
real u_ij2 = u_ij*u_ij;
real ratio = LOG(u_ij * RECIP(l_ij));
bornSum += l_ij - u_ij + (0.50f*invR*ratio) + 0.25f*(r*(u_ij2-l_ij2) +
(params2.y*params2.y*invR)*(l_ij2-u_ij2));
bornSum += (params1.x < params2.y-r ? 2.0f*(RECIP(params1.x)-l_ij) : 0);
}
real rScaledRadiusI = r+params1.y;
if (params2.x < rScaledRadiusI) {
real l_ij = RECIP(max(params2.x, fabs(r-params1.y)));
real u_ij = RECIP(rScaledRadiusI);
real l_ij2 = l_ij*l_ij;
real u_ij2 = u_ij*u_ij;
real ratio = LOG(u_ij * RECIP(l_ij));
real term = l_ij - u_ij + (0.50f*invR*ratio) + 0.25f*(r*(u_ij2-l_ij2) +
(params1.y*params1.y*invR)*(l_ij2-u_ij2));
term += (params2.x < params1.y-r ? 2.0f*(RECIP(params2.x)-l_ij) : 0);
localData[tbx+tj].bornSum += term;
}
}
tj = (tj + 1) & (TILE_SIZE - 1);
}
}
else
#endif
{
// We need to apply periodic boundary conditions separately for each interaction.
unsigned int tj = tgx;
for (j = 0; j < TILE_SIZE; j++) {
real3 delta = make_real3(localData[tbx+tj].x-posq1.x, localData[tbx+tj].y-posq1.y, localData[tbx+tj].z-posq1.z);
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(delta)
#endif
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
int atom2 = atomIndices[tbx+tj];
#ifdef USE_CUTOFF
if (atom1 < NUM_ATOMS && atom2 < NUM_ATOMS && r2 < CUTOFF_SQUARED) {
#else
if (atom1 < NUM_ATOMS && atom2 < NUM_ATOMS) {
#endif
real invR = RSQRT(r2);
real r = r2*invR;
float2 params2 = make_float2(localData[tbx+tj].radius, localData[tbx+tj].scaledRadius);
real rScaledRadiusJ = r+params2.y;
if (params1.x < rScaledRadiusJ) {
real l_ij = RECIP(max(params1.x, fabs(r-params2.y)));
real u_ij = RECIP(rScaledRadiusJ);
real l_ij2 = l_ij*l_ij;
real u_ij2 = u_ij*u_ij;
real ratio = LOG(u_ij * RECIP(l_ij));
bornSum += l_ij - u_ij + (0.50f*invR*ratio) + 0.25f*(r*(u_ij2-l_ij2) +
(params2.y*params2.y*invR)*(l_ij2-u_ij2));
bornSum += (params1.x < params2.y-r ? 2.0f*(RECIP(params1.x)-l_ij) : 0);
}
real rScaledRadiusI = r+params1.y;
if (params2.x < rScaledRadiusI) {
real l_ij = RECIP(max(params2.x, fabs(r-params1.y)));
real u_ij = RECIP(rScaledRadiusI);
real l_ij2 = l_ij*l_ij;
real u_ij2 = u_ij*u_ij;
real ratio = LOG(u_ij * RECIP(l_ij));
real term = l_ij - u_ij + (0.50f*invR*ratio) + 0.25f*(r*(u_ij2-l_ij2) +
(params1.y*params1.y*invR)*(l_ij2-u_ij2));
term += (params2.x < params1.y-r ? 2.0f*(RECIP(params2.x)-l_ij) : 0);
localData[tbx+tj].bornSum += term;
}
}
tj = (tj + 1) & (TILE_SIZE - 1);
}
}
// Write results.
atomicAdd(&global_bornSum[atom1], static_cast<unsigned long long>((long long) (bornSum*0x100000000)));
#ifdef USE_CUTOFF
unsigned int atom2 = atomIndices[threadIdx.x];
#else
unsigned int atom2 = y*TILE_SIZE + tgx;
#endif
if (atom2 < PADDED_NUM_ATOMS)
atomicAdd(&global_bornSum[atom2], static_cast<unsigned long long>((long long) (localData[threadIdx.x].bornSum*0x100000000)));
}
pos++;
}
}
typedef struct {
real x, y, z;
real q;
real fx, fy, fz, fw;
real bornRadius;
} AtomData2;
/**
* First part of computing the GBSA interaction.
*/
extern "C" __global__ void computeGBSAForce1(unsigned long long* __restrict__ forceBuffers, unsigned long long* __restrict__ global_bornForce,
real* __restrict__ energyBuffer, const real4* __restrict__ posq, const real* __restrict__ global_bornRadii,
#ifdef USE_CUTOFF
const int* __restrict__ tiles, const unsigned int* __restrict__ interactionCount, real4 periodicBoxSize, real4 invPeriodicBoxSize,
real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ, unsigned int maxTiles, const real4* __restrict__ blockCenter,
const real4* __restrict__ blockSize, const unsigned int* __restrict__ interactingAtoms,
#else
unsigned int numTiles,
#endif
const ushort2* __restrict__ exclusionTiles) {
const unsigned int totalWarps = (blockDim.x*gridDim.x)/TILE_SIZE;
const unsigned int warp = (blockIdx.x*blockDim.x+threadIdx.x)/TILE_SIZE;
const unsigned int tgx = threadIdx.x & (TILE_SIZE-1);
const unsigned int tbx = threadIdx.x - tgx;
real energy = 0;
__shared__ AtomData2 localData[FORCE_WORK_GROUP_SIZE];
// First loop: process tiles that contain exclusions.
const unsigned int firstExclusionTile = FIRST_EXCLUSION_TILE+warp*(LAST_EXCLUSION_TILE-FIRST_EXCLUSION_TILE)/totalWarps;
const unsigned int lastExclusionTile = FIRST_EXCLUSION_TILE+(warp+1)*(LAST_EXCLUSION_TILE-FIRST_EXCLUSION_TILE)/totalWarps;
for (int pos = firstExclusionTile; pos < lastExclusionTile; pos++) {
const ushort2 tileIndices = exclusionTiles[pos];
const unsigned int x = tileIndices.x;
const unsigned int y = tileIndices.y;
real4 force = make_real4(0);
unsigned int atom1 = x*TILE_SIZE + tgx;
real4 posq1 = posq[atom1];
real bornRadius1 = global_bornRadii[atom1];
if (x == y) {
// This tile is on the diagonal.
localData[threadIdx.x].x = posq1.x;
localData[threadIdx.x].y = posq1.y;
localData[threadIdx.x].z = posq1.z;
localData[threadIdx.x].q = posq1.w;
localData[threadIdx.x].bornRadius = bornRadius1;
for (unsigned int j = 0; j < TILE_SIZE; j++) {
if (atom1 < NUM_ATOMS && y*TILE_SIZE+j < NUM_ATOMS) {
real4 posq2 = make_real4(localData[tbx+j].x, localData[tbx+j].y, localData[tbx+j].z, localData[tbx+j].q);
real3 delta = make_real3(posq2.x-posq1.x, posq2.y-posq1.y, posq2.z-posq1.z);
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(delta)
#endif
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
#ifdef USE_CUTOFF
if (r2 < CUTOFF_SQUARED) {
#endif
real invR = RSQRT(r2);
real r = r2*invR;
real bornRadius2 = localData[tbx+j].bornRadius;
real alpha2_ij = bornRadius1*bornRadius2;
real D_ij = r2*RECIP(4.0f*alpha2_ij);
real expTerm = EXP(-D_ij);
real denominator2 = r2 + alpha2_ij*expTerm;
real denominator = SQRT(denominator2);
real scaledChargeProduct = PREFACTOR*posq1.w*posq2.w;
real tempEnergy = scaledChargeProduct*RECIP(denominator);
real Gpol = tempEnergy*RECIP(denominator2);
real dGpol_dalpha2_ij = -0.5f*Gpol*expTerm*(1.0f+D_ij);
real dEdR = Gpol*(1.0f - 0.25f*expTerm);
force.w += dGpol_dalpha2_ij*bornRadius2;
#ifdef USE_CUTOFF
if (atom1 != y*TILE_SIZE+j)
tempEnergy -= scaledChargeProduct/CUTOFF;
#endif
energy += 0.5f*tempEnergy;
delta *= dEdR;
force.x -= delta.x;
force.y -= delta.y;
force.z -= delta.z;
#ifdef USE_CUTOFF
}
#endif
}
}
}
else {
// This is an off-diagonal tile.
unsigned int j = y*TILE_SIZE + tgx;
real4 tempPosq = posq[j];
localData[threadIdx.x].x = tempPosq.x;
localData[threadIdx.x].y = tempPosq.y;
localData[threadIdx.x].z = tempPosq.z;
localData[threadIdx.x].q = tempPosq.w;
localData[threadIdx.x].bornRadius = global_bornRadii[j];
localData[threadIdx.x].fx = 0.0f;
localData[threadIdx.x].fy = 0.0f;
localData[threadIdx.x].fz = 0.0f;
localData[threadIdx.x].fw = 0.0f;
unsigned int tj = tgx;
for (j = 0; j < TILE_SIZE; j++) {
if (atom1 < NUM_ATOMS && y*TILE_SIZE+tj < NUM_ATOMS) {
real4 posq2 = make_real4(localData[tbx+tj].x, localData[tbx+tj].y, localData[tbx+tj].z, localData[tbx+tj].q);
real3 delta = make_real3(posq2.x-posq1.x, posq2.y-posq1.y, posq2.z-posq1.z);
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(delta)
#endif
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
#ifdef USE_CUTOFF
if (r2 < CUTOFF_SQUARED) {
#endif
real invR = RSQRT(r2);
real r = r2*invR;
real bornRadius2 = localData[tbx+tj].bornRadius;
real alpha2_ij = bornRadius1*bornRadius2;
real D_ij = r2*RECIP(4.0f*alpha2_ij);
real expTerm = EXP(-D_ij);
real denominator2 = r2 + alpha2_ij*expTerm;
real denominator = SQRT(denominator2);
real scaledChargeProduct = PREFACTOR*posq1.w*posq2.w;
real tempEnergy = scaledChargeProduct*RECIP(denominator);
real Gpol = tempEnergy*RECIP(denominator2);
real dGpol_dalpha2_ij = -0.5f*Gpol*expTerm*(1.0f+D_ij);
real dEdR = Gpol*(1.0f - 0.25f*expTerm);
force.w += dGpol_dalpha2_ij*bornRadius2;
#ifdef USE_CUTOFF
tempEnergy -= scaledChargeProduct/CUTOFF;
#endif
energy += tempEnergy;
delta *= dEdR;
force.x -= delta.x;
force.y -= delta.y;
force.z -= delta.z;
localData[tbx+tj].fx += delta.x;
localData[tbx+tj].fy += delta.y;
localData[tbx+tj].fz += delta.z;
localData[tbx+tj].fw += dGpol_dalpha2_ij*bornRadius1;
#ifdef USE_CUTOFF
}
#endif
}
tj = (tj + 1) & (TILE_SIZE - 1);
}
}
// Write results.
unsigned int offset = x*TILE_SIZE + tgx;
atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (force.x*0x100000000)));
atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.y*0x100000000)));
atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.z*0x100000000)));
atomicAdd(&global_bornForce[offset], static_cast<unsigned long long>((long long) (force.w*0x100000000)));
if (x != y) {
offset = y*TILE_SIZE + tgx;
atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (localData[threadIdx.x].fx*0x100000000)));
atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].fy*0x100000000)));
atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].fz*0x100000000)));
atomicAdd(&global_bornForce[offset], static_cast<unsigned long long>((long long) (localData[threadIdx.x].fw*0x100000000)));
}
}
// Second loop: tiles without exclusions, either from the neighbor list (with cutoff) or just enumerating all
// of them (no cutoff).
#ifdef USE_CUTOFF
unsigned int numTiles = interactionCount[0];
int pos = (int) (warp*(numTiles > maxTiles ? NUM_BLOCKS*((long long)NUM_BLOCKS+1)/2 : (long)numTiles)/totalWarps);
int end = (int) ((warp+1)*(numTiles > maxTiles ? NUM_BLOCKS*((long long)NUM_BLOCKS+1)/2 : (long)numTiles)/totalWarps);
#else
int pos = (int) (warp*(long long)numTiles/totalWarps);
int end = (int) ((warp+1)*(long long)numTiles/totalWarps);
#endif
int skipBase = 0;
int currentSkipIndex = tbx;
__shared__ int atomIndices[FORCE_WORK_GROUP_SIZE];
__shared__ volatile int skipTiles[FORCE_WORK_GROUP_SIZE];
skipTiles[threadIdx.x] = -1;
while (pos < end) {
real4 force = make_real4(0);
bool includeTile = true;
// Extract the coordinates of this tile.
int x, y;
bool singlePeriodicCopy = false;
#ifdef USE_CUTOFF
if (numTiles <= maxTiles) {
x = tiles[pos];
real4 blockSizeX = blockSize[x];
singlePeriodicCopy = (0.5f*periodicBoxSize.x-blockSizeX.x >= CUTOFF &&
0.5f*periodicBoxSize.y-blockSizeX.y >= CUTOFF &&
0.5f*periodicBoxSize.z-blockSizeX.z >= CUTOFF);
}
else
#endif
{
y = (int) floor(NUM_BLOCKS+0.5f-SQRT((NUM_BLOCKS+0.5f)*(NUM_BLOCKS+0.5f)-2*pos));
x = (pos-y*NUM_BLOCKS+y*(y+1)/2);
if (x < y || x >= NUM_BLOCKS) { // Occasionally happens due to roundoff error.
y += (x < y ? -1 : 1);
x = (pos-y*NUM_BLOCKS+y*(y+1)/2);
}
// Skip over tiles that have exclusions, since they were already processed.
while (skipTiles[tbx+TILE_SIZE-1] < pos) {
if (skipBase+tgx < NUM_TILES_WITH_EXCLUSIONS) {
ushort2 tile = exclusionTiles[skipBase+tgx];
skipTiles[threadIdx.x] = tile.x + tile.y*NUM_BLOCKS - tile.y*(tile.y+1)/2;
}
else
skipTiles[threadIdx.x] = end;
skipBase += TILE_SIZE;
currentSkipIndex = tbx;
}
while (skipTiles[currentSkipIndex] < pos)
currentSkipIndex++;
includeTile = (skipTiles[currentSkipIndex] != pos);
}
if (includeTile) {
unsigned int atom1 = x*TILE_SIZE + tgx;
// Load atom data for this tile.
real4 posq1 = posq[atom1];
real bornRadius1 = global_bornRadii[atom1];
#ifdef USE_CUTOFF
unsigned int j = (numTiles <= maxTiles ? interactingAtoms[pos*TILE_SIZE+tgx] : y*TILE_SIZE + tgx);
#else
unsigned int j = y*TILE_SIZE + tgx;
#endif
atomIndices[threadIdx.x] = j;
if (j < PADDED_NUM_ATOMS) {
real4 tempPosq = posq[j];
localData[threadIdx.x].x = tempPosq.x;
localData[threadIdx.x].y = tempPosq.y;
localData[threadIdx.x].z = tempPosq.z;
localData[threadIdx.x].q = tempPosq.w;
localData[threadIdx.x].bornRadius = global_bornRadii[j];
localData[threadIdx.x].fx = 0.0f;
localData[threadIdx.x].fy = 0.0f;
localData[threadIdx.x].fz = 0.0f;
localData[threadIdx.x].fw = 0.0f;
}
#ifdef USE_PERIODIC
if (singlePeriodicCopy) {
// The box is small enough that we can just translate all the atoms into a single periodic
// box, then skip having to apply periodic boundary conditions later.
real4 blockCenterX = blockCenter[x];
APPLY_PERIODIC_TO_POS_WITH_CENTER(posq1, blockCenterX)
APPLY_PERIODIC_TO_POS_WITH_CENTER(localData[threadIdx.x], blockCenterX)
unsigned int tj = tgx;
for (j = 0; j < TILE_SIZE; j++) {
int atom2 = atomIndices[tbx+tj];
if (atom1 < NUM_ATOMS && atom2 < NUM_ATOMS) {
real4 posq2 = make_real4(localData[tbx+tj].x, localData[tbx+tj].y, localData[tbx+tj].z, localData[tbx+tj].q);
real3 delta = make_real3(posq2.x-posq1.x, posq2.y-posq1.y, posq2.z-posq1.z);
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
if (r2 < CUTOFF_SQUARED) {
real invR = RSQRT(r2);
real r = r2*invR;
real bornRadius2 = localData[tbx+tj].bornRadius;
real alpha2_ij = bornRadius1*bornRadius2;
real D_ij = r2*RECIP(4.0f*alpha2_ij);
real expTerm = EXP(-D_ij);
real denominator2 = r2 + alpha2_ij*expTerm;
real denominator = SQRT(denominator2);
real scaledChargeProduct = PREFACTOR*posq1.w*posq2.w;
real tempEnergy = scaledChargeProduct*RECIP(denominator);
real Gpol = tempEnergy*RECIP(denominator2);
real dGpol_dalpha2_ij = -0.5f*Gpol*expTerm*(1.0f+D_ij);
real dEdR = Gpol*(1.0f - 0.25f*expTerm);
force.w += dGpol_dalpha2_ij*bornRadius2;
#ifdef USE_CUTOFF
tempEnergy -= scaledChargeProduct/CUTOFF;
#endif
energy += tempEnergy;
delta *= dEdR;
force.x -= delta.x;
force.y -= delta.y;
force.z -= delta.z;
localData[tbx+tj].fx += delta.x;
localData[tbx+tj].fy += delta.y;
localData[tbx+tj].fz += delta.z;
localData[tbx+tj].fw += dGpol_dalpha2_ij*bornRadius1;
}
}
tj = (tj + 1) & (TILE_SIZE - 1);
}
}
else
#endif
{
// We need to apply periodic boundary conditions separately for each interaction.
unsigned int tj = tgx;
for (j = 0; j < TILE_SIZE; j++) {
int atom2 = atomIndices[tbx+tj];
if (atom1 < NUM_ATOMS && atom2 < NUM_ATOMS) {
real4 posq2 = make_real4(localData[tbx+tj].x, localData[tbx+tj].y, localData[tbx+tj].z, localData[tbx+tj].q);
real3 delta = make_real3(posq2.x-posq1.x, posq2.y-posq1.y, posq2.z-posq1.z);
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(delta)
#endif
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
#ifdef USE_CUTOFF
if (r2 < CUTOFF_SQUARED) {
#endif
real invR = RSQRT(r2);
real r = r2*invR;
real bornRadius2 = localData[tbx+tj].bornRadius;
real alpha2_ij = bornRadius1*bornRadius2;
real D_ij = r2*RECIP(4.0f*alpha2_ij);
real expTerm = EXP(-D_ij);
real denominator2 = r2 + alpha2_ij*expTerm;
real denominator = SQRT(denominator2);
real scaledChargeProduct = PREFACTOR*posq1.w*posq2.w;
real tempEnergy = scaledChargeProduct*RECIP(denominator);
real Gpol = tempEnergy*RECIP(denominator2);
real dGpol_dalpha2_ij = -0.5f*Gpol*expTerm*(1.0f+D_ij);
real dEdR = Gpol*(1.0f - 0.25f*expTerm);
force.w += dGpol_dalpha2_ij*bornRadius2;
#ifdef USE_CUTOFF
tempEnergy -= scaledChargeProduct/CUTOFF;
#endif
energy += tempEnergy;
delta *= dEdR;
force.x -= delta.x;
force.y -= delta.y;
force.z -= delta.z;
localData[tbx+tj].fx += delta.x;
localData[tbx+tj].fy += delta.y;
localData[tbx+tj].fz += delta.z;
localData[tbx+tj].fw += dGpol_dalpha2_ij*bornRadius1;
#ifdef USE_CUTOFF
}
#endif
}
tj = (tj + 1) & (TILE_SIZE - 1);
}
}
// Write results.
atomicAdd(&forceBuffers[atom1], static_cast<unsigned long long>((long long) (force.x*0x100000000)));
atomicAdd(&forceBuffers[atom1+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.y*0x100000000)));
atomicAdd(&forceBuffers[atom1+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.z*0x100000000)));
atomicAdd(&global_bornForce[atom1], static_cast<unsigned long long>((long long) (force.w*0x100000000)));
#ifdef USE_CUTOFF
unsigned int atom2 = atomIndices[threadIdx.x];
#else
unsigned int atom2 = y*TILE_SIZE + tgx;
#endif
if (atom2 < PADDED_NUM_ATOMS) {
atomicAdd(&forceBuffers[atom2], static_cast<unsigned long long>((long long) (localData[threadIdx.x].fx*0x100000000)));
atomicAdd(&forceBuffers[atom2+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].fy*0x100000000)));
atomicAdd(&forceBuffers[atom2+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].fz*0x100000000)));
atomicAdd(&global_bornForce[atom2], static_cast<unsigned long long>((long long) (localData[threadIdx.x].fw*0x100000000)));
}
}
pos++;
}
energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] += energy;
}
| 74e00b1d07b63186166781fe4e587e54f5100899.cu | #define DIELECTRIC_OFFSET 0.009f
#define PROBE_RADIUS 0.14f
#define WARPS_PER_GROUP (FORCE_WORK_GROUP_SIZE/TILE_SIZE)
/**
* Reduce the Born sums to compute the Born radii.
*/
extern "C" __global__ void reduceBornSum(float alpha, float beta, float gamma, const long long* __restrict__ bornSum,
const float2* __restrict__ params, real* __restrict__ bornRadii, real* __restrict__ obcChain) {
for (unsigned int index = blockIdx.x*blockDim.x+threadIdx.x; index < NUM_ATOMS; index += blockDim.x*gridDim.x) {
// Get summed Born data
real sum = RECIP(0x100000000)*bornSum[index];
// Now calculate Born radius and OBC term.
float offsetRadius = params[index].x;
sum *= 0.5f*offsetRadius;
real sum2 = sum*sum;
real sum3 = sum*sum2;
real tanhSum = tanh(alpha*sum - beta*sum2 + gamma*sum3);
real nonOffsetRadius = offsetRadius + DIELECTRIC_OFFSET;
real radius = RECIP(RECIP(offsetRadius) - tanhSum/nonOffsetRadius);
real chain = offsetRadius*(alpha - 2.0f*beta*sum + 3.0f*gamma*sum2);
chain = (1-tanhSum*tanhSum)*chain / nonOffsetRadius;
bornRadii[index] = radius;
obcChain[index] = chain;
}
}
/**
* Reduce the Born force.
*/
extern "C" __global__ void reduceBornForce(long long* __restrict__ bornForce, real* __restrict__ energyBuffer,
const float2* __restrict__ params, const real* __restrict__ bornRadii, const real* __restrict__ obcChain) {
real energy = 0;
for (unsigned int index = blockIdx.x*blockDim.x+threadIdx.x; index < NUM_ATOMS; index += blockDim.x*gridDim.x) {
// Get summed Born force
real force = RECIP(0x100000000)*bornForce[index];
// Now calculate the actual force
float offsetRadius = params[index].x;
real bornRadius = bornRadii[index];
real r = offsetRadius+DIELECTRIC_OFFSET+PROBE_RADIUS;
real ratio6 = POW((offsetRadius+DIELECTRIC_OFFSET)/bornRadius, 6);
real saTerm = SURFACE_AREA_FACTOR*r*r*ratio6;
force += saTerm/bornRadius;
energy += saTerm;
force *= bornRadius*bornRadius*obcChain[index];
bornForce[index] = (long long) (force*0x100000000);
}
energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] += energy/-6;
}
typedef struct {
real x, y, z;
real q;
float radius, scaledRadius;
real bornSum;
} AtomData1;
/**
* Compute the Born sum.
*/
extern "C" __global__ void computeBornSum(unsigned long long* __restrict__ global_bornSum, const real4* __restrict__ posq, const float2* __restrict__ global_params,
#ifdef USE_CUTOFF
const int* __restrict__ tiles, const unsigned int* __restrict__ interactionCount, real4 periodicBoxSize, real4 invPeriodicBoxSize,
real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ, unsigned int maxTiles, const real4* __restrict__ blockCenter,
const real4* __restrict__ blockSize, const unsigned int* __restrict__ interactingAtoms,
#else
unsigned int numTiles,
#endif
const ushort2* __restrict__ exclusionTiles) {
const unsigned int totalWarps = (blockDim.x*gridDim.x)/TILE_SIZE;
const unsigned int warp = (blockIdx.x*blockDim.x+threadIdx.x)/TILE_SIZE;
const unsigned int tgx = threadIdx.x & (TILE_SIZE-1);
const unsigned int tbx = threadIdx.x - tgx;
__shared__ AtomData1 localData[FORCE_WORK_GROUP_SIZE];
// First loop: process tiles that contain exclusions.
const unsigned int firstExclusionTile = FIRST_EXCLUSION_TILE+warp*(LAST_EXCLUSION_TILE-FIRST_EXCLUSION_TILE)/totalWarps;
const unsigned int lastExclusionTile = FIRST_EXCLUSION_TILE+(warp+1)*(LAST_EXCLUSION_TILE-FIRST_EXCLUSION_TILE)/totalWarps;
for (int pos = firstExclusionTile; pos < lastExclusionTile; pos++) {
const ushort2 tileIndices = exclusionTiles[pos];
const unsigned int x = tileIndices.x;
const unsigned int y = tileIndices.y;
real bornSum = 0;
unsigned int atom1 = x*TILE_SIZE + tgx;
real4 posq1 = posq[atom1];
float2 params1 = global_params[atom1];
if (x == y) {
// This tile is on the diagonal.
localData[threadIdx.x].x = posq1.x;
localData[threadIdx.x].y = posq1.y;
localData[threadIdx.x].z = posq1.z;
localData[threadIdx.x].q = posq1.w;
localData[threadIdx.x].radius = params1.x;
localData[threadIdx.x].scaledRadius = params1.y;
for (unsigned int j = 0; j < TILE_SIZE; j++) {
real3 delta = make_real3(localData[tbx+j].x-posq1.x, localData[tbx+j].y-posq1.y, localData[tbx+j].z-posq1.z);
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(delta)
#endif
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
#ifdef USE_CUTOFF
if (atom1 < NUM_ATOMS && y*TILE_SIZE+j < NUM_ATOMS && r2 < CUTOFF_SQUARED) {
#else
if (atom1 < NUM_ATOMS && y*TILE_SIZE+j < NUM_ATOMS) {
#endif
real invR = RSQRT(r2);
real r = r2*invR;
float2 params2 = make_float2(localData[tbx+j].radius, localData[tbx+j].scaledRadius);
real rScaledRadiusJ = r+params2.y;
if ((j != tgx) && (params1.x < rScaledRadiusJ)) {
real l_ij = RECIP(max(params1.x, fabs(r-params2.y)));
real u_ij = RECIP(rScaledRadiusJ);
real l_ij2 = l_ij*l_ij;
real u_ij2 = u_ij*u_ij;
real ratio = LOG(u_ij * RECIP(l_ij));
bornSum += l_ij - u_ij + (0.50f*invR*ratio) + 0.25f*(r*(u_ij2-l_ij2) +
(params2.y*params2.y*invR)*(l_ij2-u_ij2));
bornSum += (params1.x < params2.y-r ? 2.0f*(RECIP(params1.x)-l_ij) : 0);
}
}
}
}
else {
// This is an off-diagonal tile.
unsigned int j = y*TILE_SIZE + tgx;
real4 tempPosq = posq[j];
localData[threadIdx.x].x = tempPosq.x;
localData[threadIdx.x].y = tempPosq.y;
localData[threadIdx.x].z = tempPosq.z;
localData[threadIdx.x].q = tempPosq.w;
float2 tempParams = global_params[j];
localData[threadIdx.x].radius = tempParams.x;
localData[threadIdx.x].scaledRadius = tempParams.y;
localData[threadIdx.x].bornSum = 0.0f;
// Compute the full set of interactions in this tile.
unsigned int tj = tgx;
for (j = 0; j < TILE_SIZE; j++) {
real3 delta = make_real3(localData[tbx+tj].x-posq1.x, localData[tbx+tj].y-posq1.y, localData[tbx+tj].z-posq1.z);
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(delta)
#endif
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
#ifdef USE_CUTOFF
if (atom1 < NUM_ATOMS && y*TILE_SIZE+tj < NUM_ATOMS && r2 < CUTOFF_SQUARED) {
#else
if (atom1 < NUM_ATOMS && y*TILE_SIZE+tj < NUM_ATOMS) {
#endif
real invR = RSQRT(r2);
real r = r2*invR;
float2 params2 = make_float2(localData[tbx+tj].radius, localData[tbx+tj].scaledRadius);
real rScaledRadiusJ = r+params2.y;
if (params1.x < rScaledRadiusJ) {
real l_ij = RECIP(max(params1.x, fabs(r-params2.y)));
real u_ij = RECIP(rScaledRadiusJ);
real l_ij2 = l_ij*l_ij;
real u_ij2 = u_ij*u_ij;
real ratio = LOG(u_ij * RECIP(l_ij));
bornSum += l_ij - u_ij + (0.50f*invR*ratio) + 0.25f*(r*(u_ij2-l_ij2) +
(params2.y*params2.y*invR)*(l_ij2-u_ij2));
bornSum += (params1.x < params2.y-r ? 2.0f*(RECIP(params1.x)-l_ij) : 0);
}
real rScaledRadiusI = r+params1.y;
if (params2.x < rScaledRadiusI) {
real l_ij = RECIP(max(params2.x, fabs(r-params1.y)));
real u_ij = RECIP(rScaledRadiusI);
real l_ij2 = l_ij*l_ij;
real u_ij2 = u_ij*u_ij;
real ratio = LOG(u_ij * RECIP(l_ij));
real term = l_ij - u_ij + (0.50f*invR*ratio) + 0.25f*(r*(u_ij2-l_ij2) +
(params1.y*params1.y*invR)*(l_ij2-u_ij2));
term += (params2.x < params1.y-r ? 2.0f*(RECIP(params2.x)-l_ij) : 0);
localData[tbx+tj].bornSum += term;
}
}
tj = (tj + 1) & (TILE_SIZE - 1);
}
}
// Write results.
unsigned int offset = x*TILE_SIZE + tgx;
atomicAdd(&global_bornSum[offset], static_cast<unsigned long long>((long long) (bornSum*0x100000000)));
if (x != y) {
offset = y*TILE_SIZE + tgx;
atomicAdd(&global_bornSum[offset], static_cast<unsigned long long>((long long) (localData[threadIdx.x].bornSum*0x100000000)));
}
}
// Second loop: tiles without exclusions, either from the neighbor list (with cutoff) or just enumerating all
// of them (no cutoff).
#ifdef USE_CUTOFF
unsigned int numTiles = interactionCount[0];
int pos = (int) (warp*(numTiles > maxTiles ? NUM_BLOCKS*((long long)NUM_BLOCKS+1)/2 : (long)numTiles)/totalWarps);
int end = (int) ((warp+1)*(numTiles > maxTiles ? NUM_BLOCKS*((long long)NUM_BLOCKS+1)/2 : (long)numTiles)/totalWarps);
#else
int pos = (int) (warp*(long long)numTiles/totalWarps);
int end = (int) ((warp+1)*(long long)numTiles/totalWarps);
#endif
int skipBase = 0;
int currentSkipIndex = tbx;
__shared__ int atomIndices[FORCE_WORK_GROUP_SIZE];
__shared__ volatile int skipTiles[FORCE_WORK_GROUP_SIZE];
skipTiles[threadIdx.x] = -1;
while (pos < end) {
real bornSum = 0;
bool includeTile = true;
// Extract the coordinates of this tile.
int x, y;
bool singlePeriodicCopy = false;
#ifdef USE_CUTOFF
if (numTiles <= maxTiles) {
x = tiles[pos];
real4 blockSizeX = blockSize[x];
singlePeriodicCopy = (0.5f*periodicBoxSize.x-blockSizeX.x >= CUTOFF &&
0.5f*periodicBoxSize.y-blockSizeX.y >= CUTOFF &&
0.5f*periodicBoxSize.z-blockSizeX.z >= CUTOFF);
}
else
#endif
{
y = (int) floor(NUM_BLOCKS+0.5f-SQRT((NUM_BLOCKS+0.5f)*(NUM_BLOCKS+0.5f)-2*pos));
x = (pos-y*NUM_BLOCKS+y*(y+1)/2);
if (x < y || x >= NUM_BLOCKS) { // Occasionally happens due to roundoff error.
y += (x < y ? -1 : 1);
x = (pos-y*NUM_BLOCKS+y*(y+1)/2);
}
// Skip over tiles that have exclusions, since they were already processed.
while (skipTiles[tbx+TILE_SIZE-1] < pos) {
if (skipBase+tgx < NUM_TILES_WITH_EXCLUSIONS) {
ushort2 tile = exclusionTiles[skipBase+tgx];
skipTiles[threadIdx.x] = tile.x + tile.y*NUM_BLOCKS - tile.y*(tile.y+1)/2;
}
else
skipTiles[threadIdx.x] = end;
skipBase += TILE_SIZE;
currentSkipIndex = tbx;
}
while (skipTiles[currentSkipIndex] < pos)
currentSkipIndex++;
includeTile = (skipTiles[currentSkipIndex] != pos);
}
if (includeTile) {
unsigned int atom1 = x*TILE_SIZE + tgx;
// Load atom data for this tile.
real4 posq1 = posq[atom1];
float2 params1 = global_params[atom1];
#ifdef USE_CUTOFF
unsigned int j = (numTiles <= maxTiles ? interactingAtoms[pos*TILE_SIZE+tgx] : y*TILE_SIZE + tgx);
#else
unsigned int j = y*TILE_SIZE + tgx;
#endif
atomIndices[threadIdx.x] = j;
if (j < PADDED_NUM_ATOMS) {
real4 tempPosq = posq[j];
localData[threadIdx.x].x = tempPosq.x;
localData[threadIdx.x].y = tempPosq.y;
localData[threadIdx.x].z = tempPosq.z;
localData[threadIdx.x].q = tempPosq.w;
float2 tempParams = global_params[j];
localData[threadIdx.x].radius = tempParams.x;
localData[threadIdx.x].scaledRadius = tempParams.y;
localData[threadIdx.x].bornSum = 0.0f;
}
#ifdef USE_PERIODIC
if (singlePeriodicCopy) {
// The box is small enough that we can just translate all the atoms into a single periodic
// box, then skip having to apply periodic boundary conditions later.
real4 blockCenterX = blockCenter[x];
APPLY_PERIODIC_TO_POS_WITH_CENTER(posq1, blockCenterX)
APPLY_PERIODIC_TO_POS_WITH_CENTER(localData[threadIdx.x], blockCenterX)
unsigned int tj = tgx;
for (j = 0; j < TILE_SIZE; j++) {
real3 delta = make_real3(localData[tbx+tj].x-posq1.x, localData[tbx+tj].y-posq1.y, localData[tbx+tj].z-posq1.z);
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
int atom2 = atomIndices[tbx+tj];
if (atom1 < NUM_ATOMS && atom2 < NUM_ATOMS && r2 < CUTOFF_SQUARED) {
real invR = RSQRT(r2);
real r = r2*invR;
float2 params2 = make_float2(localData[tbx+tj].radius, localData[tbx+tj].scaledRadius);
real rScaledRadiusJ = r+params2.y;
if (params1.x < rScaledRadiusJ) {
real l_ij = RECIP(max(params1.x, fabs(r-params2.y)));
real u_ij = RECIP(rScaledRadiusJ);
real l_ij2 = l_ij*l_ij;
real u_ij2 = u_ij*u_ij;
real ratio = LOG(u_ij * RECIP(l_ij));
bornSum += l_ij - u_ij + (0.50f*invR*ratio) + 0.25f*(r*(u_ij2-l_ij2) +
(params2.y*params2.y*invR)*(l_ij2-u_ij2));
bornSum += (params1.x < params2.y-r ? 2.0f*(RECIP(params1.x)-l_ij) : 0);
}
real rScaledRadiusI = r+params1.y;
if (params2.x < rScaledRadiusI) {
real l_ij = RECIP(max(params2.x, fabs(r-params1.y)));
real u_ij = RECIP(rScaledRadiusI);
real l_ij2 = l_ij*l_ij;
real u_ij2 = u_ij*u_ij;
real ratio = LOG(u_ij * RECIP(l_ij));
real term = l_ij - u_ij + (0.50f*invR*ratio) + 0.25f*(r*(u_ij2-l_ij2) +
(params1.y*params1.y*invR)*(l_ij2-u_ij2));
term += (params2.x < params1.y-r ? 2.0f*(RECIP(params2.x)-l_ij) : 0);
localData[tbx+tj].bornSum += term;
}
}
tj = (tj + 1) & (TILE_SIZE - 1);
}
}
else
#endif
{
// We need to apply periodic boundary conditions separately for each interaction.
unsigned int tj = tgx;
for (j = 0; j < TILE_SIZE; j++) {
real3 delta = make_real3(localData[tbx+tj].x-posq1.x, localData[tbx+tj].y-posq1.y, localData[tbx+tj].z-posq1.z);
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(delta)
#endif
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
int atom2 = atomIndices[tbx+tj];
#ifdef USE_CUTOFF
if (atom1 < NUM_ATOMS && atom2 < NUM_ATOMS && r2 < CUTOFF_SQUARED) {
#else
if (atom1 < NUM_ATOMS && atom2 < NUM_ATOMS) {
#endif
real invR = RSQRT(r2);
real r = r2*invR;
float2 params2 = make_float2(localData[tbx+tj].radius, localData[tbx+tj].scaledRadius);
real rScaledRadiusJ = r+params2.y;
if (params1.x < rScaledRadiusJ) {
real l_ij = RECIP(max(params1.x, fabs(r-params2.y)));
real u_ij = RECIP(rScaledRadiusJ);
real l_ij2 = l_ij*l_ij;
real u_ij2 = u_ij*u_ij;
real ratio = LOG(u_ij * RECIP(l_ij));
bornSum += l_ij - u_ij + (0.50f*invR*ratio) + 0.25f*(r*(u_ij2-l_ij2) +
(params2.y*params2.y*invR)*(l_ij2-u_ij2));
bornSum += (params1.x < params2.y-r ? 2.0f*(RECIP(params1.x)-l_ij) : 0);
}
real rScaledRadiusI = r+params1.y;
if (params2.x < rScaledRadiusI) {
real l_ij = RECIP(max(params2.x, fabs(r-params1.y)));
real u_ij = RECIP(rScaledRadiusI);
real l_ij2 = l_ij*l_ij;
real u_ij2 = u_ij*u_ij;
real ratio = LOG(u_ij * RECIP(l_ij));
real term = l_ij - u_ij + (0.50f*invR*ratio) + 0.25f*(r*(u_ij2-l_ij2) +
(params1.y*params1.y*invR)*(l_ij2-u_ij2));
term += (params2.x < params1.y-r ? 2.0f*(RECIP(params2.x)-l_ij) : 0);
localData[tbx+tj].bornSum += term;
}
}
tj = (tj + 1) & (TILE_SIZE - 1);
}
}
// Write results.
atomicAdd(&global_bornSum[atom1], static_cast<unsigned long long>((long long) (bornSum*0x100000000)));
#ifdef USE_CUTOFF
unsigned int atom2 = atomIndices[threadIdx.x];
#else
unsigned int atom2 = y*TILE_SIZE + tgx;
#endif
if (atom2 < PADDED_NUM_ATOMS)
atomicAdd(&global_bornSum[atom2], static_cast<unsigned long long>((long long) (localData[threadIdx.x].bornSum*0x100000000)));
}
pos++;
}
}
typedef struct {
real x, y, z;
real q;
real fx, fy, fz, fw;
real bornRadius;
} AtomData2;
/**
* First part of computing the GBSA interaction.
*/
extern "C" __global__ void computeGBSAForce1(unsigned long long* __restrict__ forceBuffers, unsigned long long* __restrict__ global_bornForce,
real* __restrict__ energyBuffer, const real4* __restrict__ posq, const real* __restrict__ global_bornRadii,
#ifdef USE_CUTOFF
const int* __restrict__ tiles, const unsigned int* __restrict__ interactionCount, real4 periodicBoxSize, real4 invPeriodicBoxSize,
real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ, unsigned int maxTiles, const real4* __restrict__ blockCenter,
const real4* __restrict__ blockSize, const unsigned int* __restrict__ interactingAtoms,
#else
unsigned int numTiles,
#endif
const ushort2* __restrict__ exclusionTiles) {
const unsigned int totalWarps = (blockDim.x*gridDim.x)/TILE_SIZE;
const unsigned int warp = (blockIdx.x*blockDim.x+threadIdx.x)/TILE_SIZE;
const unsigned int tgx = threadIdx.x & (TILE_SIZE-1);
const unsigned int tbx = threadIdx.x - tgx;
real energy = 0;
__shared__ AtomData2 localData[FORCE_WORK_GROUP_SIZE];
// First loop: process tiles that contain exclusions.
const unsigned int firstExclusionTile = FIRST_EXCLUSION_TILE+warp*(LAST_EXCLUSION_TILE-FIRST_EXCLUSION_TILE)/totalWarps;
const unsigned int lastExclusionTile = FIRST_EXCLUSION_TILE+(warp+1)*(LAST_EXCLUSION_TILE-FIRST_EXCLUSION_TILE)/totalWarps;
for (int pos = firstExclusionTile; pos < lastExclusionTile; pos++) {
const ushort2 tileIndices = exclusionTiles[pos];
const unsigned int x = tileIndices.x;
const unsigned int y = tileIndices.y;
real4 force = make_real4(0);
unsigned int atom1 = x*TILE_SIZE + tgx;
real4 posq1 = posq[atom1];
real bornRadius1 = global_bornRadii[atom1];
if (x == y) {
// This tile is on the diagonal.
localData[threadIdx.x].x = posq1.x;
localData[threadIdx.x].y = posq1.y;
localData[threadIdx.x].z = posq1.z;
localData[threadIdx.x].q = posq1.w;
localData[threadIdx.x].bornRadius = bornRadius1;
for (unsigned int j = 0; j < TILE_SIZE; j++) {
if (atom1 < NUM_ATOMS && y*TILE_SIZE+j < NUM_ATOMS) {
real4 posq2 = make_real4(localData[tbx+j].x, localData[tbx+j].y, localData[tbx+j].z, localData[tbx+j].q);
real3 delta = make_real3(posq2.x-posq1.x, posq2.y-posq1.y, posq2.z-posq1.z);
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(delta)
#endif
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
#ifdef USE_CUTOFF
if (r2 < CUTOFF_SQUARED) {
#endif
real invR = RSQRT(r2);
real r = r2*invR;
real bornRadius2 = localData[tbx+j].bornRadius;
real alpha2_ij = bornRadius1*bornRadius2;
real D_ij = r2*RECIP(4.0f*alpha2_ij);
real expTerm = EXP(-D_ij);
real denominator2 = r2 + alpha2_ij*expTerm;
real denominator = SQRT(denominator2);
real scaledChargeProduct = PREFACTOR*posq1.w*posq2.w;
real tempEnergy = scaledChargeProduct*RECIP(denominator);
real Gpol = tempEnergy*RECIP(denominator2);
real dGpol_dalpha2_ij = -0.5f*Gpol*expTerm*(1.0f+D_ij);
real dEdR = Gpol*(1.0f - 0.25f*expTerm);
force.w += dGpol_dalpha2_ij*bornRadius2;
#ifdef USE_CUTOFF
if (atom1 != y*TILE_SIZE+j)
tempEnergy -= scaledChargeProduct/CUTOFF;
#endif
energy += 0.5f*tempEnergy;
delta *= dEdR;
force.x -= delta.x;
force.y -= delta.y;
force.z -= delta.z;
#ifdef USE_CUTOFF
}
#endif
}
}
}
else {
// This is an off-diagonal tile.
unsigned int j = y*TILE_SIZE + tgx;
real4 tempPosq = posq[j];
localData[threadIdx.x].x = tempPosq.x;
localData[threadIdx.x].y = tempPosq.y;
localData[threadIdx.x].z = tempPosq.z;
localData[threadIdx.x].q = tempPosq.w;
localData[threadIdx.x].bornRadius = global_bornRadii[j];
localData[threadIdx.x].fx = 0.0f;
localData[threadIdx.x].fy = 0.0f;
localData[threadIdx.x].fz = 0.0f;
localData[threadIdx.x].fw = 0.0f;
unsigned int tj = tgx;
for (j = 0; j < TILE_SIZE; j++) {
if (atom1 < NUM_ATOMS && y*TILE_SIZE+tj < NUM_ATOMS) {
real4 posq2 = make_real4(localData[tbx+tj].x, localData[tbx+tj].y, localData[tbx+tj].z, localData[tbx+tj].q);
real3 delta = make_real3(posq2.x-posq1.x, posq2.y-posq1.y, posq2.z-posq1.z);
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(delta)
#endif
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
#ifdef USE_CUTOFF
if (r2 < CUTOFF_SQUARED) {
#endif
real invR = RSQRT(r2);
real r = r2*invR;
real bornRadius2 = localData[tbx+tj].bornRadius;
real alpha2_ij = bornRadius1*bornRadius2;
real D_ij = r2*RECIP(4.0f*alpha2_ij);
real expTerm = EXP(-D_ij);
real denominator2 = r2 + alpha2_ij*expTerm;
real denominator = SQRT(denominator2);
real scaledChargeProduct = PREFACTOR*posq1.w*posq2.w;
real tempEnergy = scaledChargeProduct*RECIP(denominator);
real Gpol = tempEnergy*RECIP(denominator2);
real dGpol_dalpha2_ij = -0.5f*Gpol*expTerm*(1.0f+D_ij);
real dEdR = Gpol*(1.0f - 0.25f*expTerm);
force.w += dGpol_dalpha2_ij*bornRadius2;
#ifdef USE_CUTOFF
tempEnergy -= scaledChargeProduct/CUTOFF;
#endif
energy += tempEnergy;
delta *= dEdR;
force.x -= delta.x;
force.y -= delta.y;
force.z -= delta.z;
localData[tbx+tj].fx += delta.x;
localData[tbx+tj].fy += delta.y;
localData[tbx+tj].fz += delta.z;
localData[tbx+tj].fw += dGpol_dalpha2_ij*bornRadius1;
#ifdef USE_CUTOFF
}
#endif
}
tj = (tj + 1) & (TILE_SIZE - 1);
}
}
// Write results.
unsigned int offset = x*TILE_SIZE + tgx;
atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (force.x*0x100000000)));
atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.y*0x100000000)));
atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.z*0x100000000)));
atomicAdd(&global_bornForce[offset], static_cast<unsigned long long>((long long) (force.w*0x100000000)));
if (x != y) {
offset = y*TILE_SIZE + tgx;
atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (localData[threadIdx.x].fx*0x100000000)));
atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].fy*0x100000000)));
atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].fz*0x100000000)));
atomicAdd(&global_bornForce[offset], static_cast<unsigned long long>((long long) (localData[threadIdx.x].fw*0x100000000)));
}
}
// Second loop: tiles without exclusions, either from the neighbor list (with cutoff) or just enumerating all
// of them (no cutoff).
#ifdef USE_CUTOFF
unsigned int numTiles = interactionCount[0];
int pos = (int) (warp*(numTiles > maxTiles ? NUM_BLOCKS*((long long)NUM_BLOCKS+1)/2 : (long)numTiles)/totalWarps);
int end = (int) ((warp+1)*(numTiles > maxTiles ? NUM_BLOCKS*((long long)NUM_BLOCKS+1)/2 : (long)numTiles)/totalWarps);
#else
int pos = (int) (warp*(long long)numTiles/totalWarps);
int end = (int) ((warp+1)*(long long)numTiles/totalWarps);
#endif
int skipBase = 0;
int currentSkipIndex = tbx;
__shared__ int atomIndices[FORCE_WORK_GROUP_SIZE];
__shared__ volatile int skipTiles[FORCE_WORK_GROUP_SIZE];
skipTiles[threadIdx.x] = -1;
while (pos < end) {
real4 force = make_real4(0);
bool includeTile = true;
// Extract the coordinates of this tile.
int x, y;
bool singlePeriodicCopy = false;
#ifdef USE_CUTOFF
if (numTiles <= maxTiles) {
x = tiles[pos];
real4 blockSizeX = blockSize[x];
singlePeriodicCopy = (0.5f*periodicBoxSize.x-blockSizeX.x >= CUTOFF &&
0.5f*periodicBoxSize.y-blockSizeX.y >= CUTOFF &&
0.5f*periodicBoxSize.z-blockSizeX.z >= CUTOFF);
}
else
#endif
{
y = (int) floor(NUM_BLOCKS+0.5f-SQRT((NUM_BLOCKS+0.5f)*(NUM_BLOCKS+0.5f)-2*pos));
x = (pos-y*NUM_BLOCKS+y*(y+1)/2);
if (x < y || x >= NUM_BLOCKS) { // Occasionally happens due to roundoff error.
y += (x < y ? -1 : 1);
x = (pos-y*NUM_BLOCKS+y*(y+1)/2);
}
// Skip over tiles that have exclusions, since they were already processed.
while (skipTiles[tbx+TILE_SIZE-1] < pos) {
if (skipBase+tgx < NUM_TILES_WITH_EXCLUSIONS) {
ushort2 tile = exclusionTiles[skipBase+tgx];
skipTiles[threadIdx.x] = tile.x + tile.y*NUM_BLOCKS - tile.y*(tile.y+1)/2;
}
else
skipTiles[threadIdx.x] = end;
skipBase += TILE_SIZE;
currentSkipIndex = tbx;
}
while (skipTiles[currentSkipIndex] < pos)
currentSkipIndex++;
includeTile = (skipTiles[currentSkipIndex] != pos);
}
if (includeTile) {
unsigned int atom1 = x*TILE_SIZE + tgx;
// Load atom data for this tile.
real4 posq1 = posq[atom1];
real bornRadius1 = global_bornRadii[atom1];
#ifdef USE_CUTOFF
unsigned int j = (numTiles <= maxTiles ? interactingAtoms[pos*TILE_SIZE+tgx] : y*TILE_SIZE + tgx);
#else
unsigned int j = y*TILE_SIZE + tgx;
#endif
atomIndices[threadIdx.x] = j;
if (j < PADDED_NUM_ATOMS) {
real4 tempPosq = posq[j];
localData[threadIdx.x].x = tempPosq.x;
localData[threadIdx.x].y = tempPosq.y;
localData[threadIdx.x].z = tempPosq.z;
localData[threadIdx.x].q = tempPosq.w;
localData[threadIdx.x].bornRadius = global_bornRadii[j];
localData[threadIdx.x].fx = 0.0f;
localData[threadIdx.x].fy = 0.0f;
localData[threadIdx.x].fz = 0.0f;
localData[threadIdx.x].fw = 0.0f;
}
#ifdef USE_PERIODIC
if (singlePeriodicCopy) {
// The box is small enough that we can just translate all the atoms into a single periodic
// box, then skip having to apply periodic boundary conditions later.
real4 blockCenterX = blockCenter[x];
APPLY_PERIODIC_TO_POS_WITH_CENTER(posq1, blockCenterX)
APPLY_PERIODIC_TO_POS_WITH_CENTER(localData[threadIdx.x], blockCenterX)
unsigned int tj = tgx;
for (j = 0; j < TILE_SIZE; j++) {
int atom2 = atomIndices[tbx+tj];
if (atom1 < NUM_ATOMS && atom2 < NUM_ATOMS) {
real4 posq2 = make_real4(localData[tbx+tj].x, localData[tbx+tj].y, localData[tbx+tj].z, localData[tbx+tj].q);
real3 delta = make_real3(posq2.x-posq1.x, posq2.y-posq1.y, posq2.z-posq1.z);
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
if (r2 < CUTOFF_SQUARED) {
real invR = RSQRT(r2);
real r = r2*invR;
real bornRadius2 = localData[tbx+tj].bornRadius;
real alpha2_ij = bornRadius1*bornRadius2;
real D_ij = r2*RECIP(4.0f*alpha2_ij);
real expTerm = EXP(-D_ij);
real denominator2 = r2 + alpha2_ij*expTerm;
real denominator = SQRT(denominator2);
real scaledChargeProduct = PREFACTOR*posq1.w*posq2.w;
real tempEnergy = scaledChargeProduct*RECIP(denominator);
real Gpol = tempEnergy*RECIP(denominator2);
real dGpol_dalpha2_ij = -0.5f*Gpol*expTerm*(1.0f+D_ij);
real dEdR = Gpol*(1.0f - 0.25f*expTerm);
force.w += dGpol_dalpha2_ij*bornRadius2;
#ifdef USE_CUTOFF
tempEnergy -= scaledChargeProduct/CUTOFF;
#endif
energy += tempEnergy;
delta *= dEdR;
force.x -= delta.x;
force.y -= delta.y;
force.z -= delta.z;
localData[tbx+tj].fx += delta.x;
localData[tbx+tj].fy += delta.y;
localData[tbx+tj].fz += delta.z;
localData[tbx+tj].fw += dGpol_dalpha2_ij*bornRadius1;
}
}
tj = (tj + 1) & (TILE_SIZE - 1);
}
}
else
#endif
{
// We need to apply periodic boundary conditions separately for each interaction.
unsigned int tj = tgx;
for (j = 0; j < TILE_SIZE; j++) {
int atom2 = atomIndices[tbx+tj];
if (atom1 < NUM_ATOMS && atom2 < NUM_ATOMS) {
real4 posq2 = make_real4(localData[tbx+tj].x, localData[tbx+tj].y, localData[tbx+tj].z, localData[tbx+tj].q);
real3 delta = make_real3(posq2.x-posq1.x, posq2.y-posq1.y, posq2.z-posq1.z);
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(delta)
#endif
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
#ifdef USE_CUTOFF
if (r2 < CUTOFF_SQUARED) {
#endif
real invR = RSQRT(r2);
real r = r2*invR;
real bornRadius2 = localData[tbx+tj].bornRadius;
real alpha2_ij = bornRadius1*bornRadius2;
real D_ij = r2*RECIP(4.0f*alpha2_ij);
real expTerm = EXP(-D_ij);
real denominator2 = r2 + alpha2_ij*expTerm;
real denominator = SQRT(denominator2);
real scaledChargeProduct = PREFACTOR*posq1.w*posq2.w;
real tempEnergy = scaledChargeProduct*RECIP(denominator);
real Gpol = tempEnergy*RECIP(denominator2);
real dGpol_dalpha2_ij = -0.5f*Gpol*expTerm*(1.0f+D_ij);
real dEdR = Gpol*(1.0f - 0.25f*expTerm);
force.w += dGpol_dalpha2_ij*bornRadius2;
#ifdef USE_CUTOFF
tempEnergy -= scaledChargeProduct/CUTOFF;
#endif
energy += tempEnergy;
delta *= dEdR;
force.x -= delta.x;
force.y -= delta.y;
force.z -= delta.z;
localData[tbx+tj].fx += delta.x;
localData[tbx+tj].fy += delta.y;
localData[tbx+tj].fz += delta.z;
localData[tbx+tj].fw += dGpol_dalpha2_ij*bornRadius1;
#ifdef USE_CUTOFF
}
#endif
}
tj = (tj + 1) & (TILE_SIZE - 1);
}
}
// Write results.
atomicAdd(&forceBuffers[atom1], static_cast<unsigned long long>((long long) (force.x*0x100000000)));
atomicAdd(&forceBuffers[atom1+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.y*0x100000000)));
atomicAdd(&forceBuffers[atom1+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.z*0x100000000)));
atomicAdd(&global_bornForce[atom1], static_cast<unsigned long long>((long long) (force.w*0x100000000)));
#ifdef USE_CUTOFF
unsigned int atom2 = atomIndices[threadIdx.x];
#else
unsigned int atom2 = y*TILE_SIZE + tgx;
#endif
if (atom2 < PADDED_NUM_ATOMS) {
atomicAdd(&forceBuffers[atom2], static_cast<unsigned long long>((long long) (localData[threadIdx.x].fx*0x100000000)));
atomicAdd(&forceBuffers[atom2+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].fy*0x100000000)));
atomicAdd(&forceBuffers[atom2+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].fz*0x100000000)));
atomicAdd(&global_bornForce[atom2], static_cast<unsigned long long>((long long) (localData[threadIdx.x].fw*0x100000000)));
}
}
pos++;
}
energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] += energy;
}
|
023852cd507dc54eaf4f2c30462ee7d69ac19518.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* MIT License
*
* Copyright (c) 2022 CSCS, ETH Zurich
* 2022 University of Basel
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*! @file
* @brief Smoothing length update on the GPU
* @author Sebastian Keller <[email protected]>
*/
#include <cmath>
#include "cstone/util/util.hpp"
#include "cstone/tree/definitions.h"
#include "sph/kernels.hpp"
namespace sph
{
template<class Th>
__global__ void updateSmoothingLengthGpuKernel(size_t first, size_t last, unsigned ng0, const unsigned* nc, Th* h)
{
cstone::LocalIndex i = first + blockDim.x * blockIdx.x + threadIdx.x;
if (i >= last) { return; }
h[i] = updateH(ng0, nc[i], h[i]);
}
template<class Th>
void updateSmoothingLengthGpu(size_t first, size_t last, unsigned ng0, const unsigned* nc, Th* h)
{
unsigned numThreads = 256;
unsigned numBlocks = iceil(last - first, 256);
hipLaunchKernelGGL(( updateSmoothingLengthGpuKernel), dim3(numBlocks), dim3(numThreads), 0, 0, first, last, ng0, nc, h);
}
template void updateSmoothingLengthGpu(size_t first, size_t last, unsigned ng0, const unsigned* nc, float* h);
template void updateSmoothingLengthGpu(size_t first, size_t last, unsigned ng0, const unsigned* nc, double* h);
} // namespace sph | 023852cd507dc54eaf4f2c30462ee7d69ac19518.cu | /*
* MIT License
*
* Copyright (c) 2022 CSCS, ETH Zurich
* 2022 University of Basel
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*! @file
* @brief Smoothing length update on the GPU
* @author Sebastian Keller <[email protected]>
*/
#include <cmath>
#include "cstone/util/util.hpp"
#include "cstone/tree/definitions.h"
#include "sph/kernels.hpp"
namespace sph
{
template<class Th>
__global__ void updateSmoothingLengthGpuKernel(size_t first, size_t last, unsigned ng0, const unsigned* nc, Th* h)
{
cstone::LocalIndex i = first + blockDim.x * blockIdx.x + threadIdx.x;
if (i >= last) { return; }
h[i] = updateH(ng0, nc[i], h[i]);
}
template<class Th>
void updateSmoothingLengthGpu(size_t first, size_t last, unsigned ng0, const unsigned* nc, Th* h)
{
unsigned numThreads = 256;
unsigned numBlocks = iceil(last - first, 256);
updateSmoothingLengthGpuKernel<<<numBlocks, numThreads>>>(first, last, ng0, nc, h);
}
template void updateSmoothingLengthGpu(size_t first, size_t last, unsigned ng0, const unsigned* nc, float* h);
template void updateSmoothingLengthGpu(size_t first, size_t last, unsigned ng0, const unsigned* nc, double* h);
} // namespace sph |
420e41e401fb9e82555152b917eb175f1a672de8.hip | // !!! This is a file automatically generated by hipify!!!
#include <error.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <hip/hip_runtime.h>
#define DEVICE_NUMBER (0)
// membar implementation simmilar to https://bigfoot.cs.unc.edu:3000/otternes/cuda_scheduling_examiner/src/master/src/barrier_wait.c
typedef struct {
int *threadsRemaining;
int *sense;
int thread_count;
} barrier_t;
typedef struct {
barrier_t barrier;
int *targetBuffer;
uint64_t *target_realSum;
unsigned int *targetMeasOH;
unsigned int *target_times;
int32_t nof_repetitions;
int buffer_length;
hipStream_t stream;
} kernel_param_t;
typedef struct {
int nofThreads;
int nofBlocks;
int nofKernel;
int32_t nof_repetitions;
int data_size;
int buffer_length;
unsigned int hostMeasOH;
int *hostBuffer;
uint64_t host_realSum;
unsigned int *targetMeasOH;
unsigned int *host_times;
uint64_t *target_realSum;
FILE *fd;
} param_t;
// Prints a message and returns zero if the given value is not hipSuccess
#define CheckCUDAError(val) (InternalCheckCUDAError((val), #val, __FILE__, __LINE__))
// Called internally by CheckCUDAError
static int InternalCheckCUDAError(hipError_t result, const char *fn,
const char *file, int line) {
if (result == hipSuccess) return 0;
printf("CUDA error %d in %s, line %d (%s): %s\n", (int) result, file, line,
fn, hipGetErrorString(result));
return -1;
}
static void createSequentialArrayHost(param_t params){
// Link sequentially
for(int i = 0; i < params.buffer_length; i++){
params.hostBuffer[i]=(i+params.nofThreads*params.nofBlocks)%params.buffer_length;
}
}
static __global__ void getMeasurementOverhead(param_t params) {
unsigned int start, stop;
uint64_t sum = 0;
start = clock();
for (int j = 0; j < params.buffer_length; j++){
sum +=j;
}
stop = clock();
*params.targetMeasOH = (stop-start)/params.buffer_length;
*params.target_realSum = sum;
}
static __device__ inline int barrierWait(barrier_t barrier, int* local_sense) {
*local_sense = !(*local_sense);
int value = atomicSub(barrier.threadsRemaining, 1);
if(value==1) {
atomicExch(barrier.threadsRemaining, barrier.thread_count);
*(barrier.sense) = *local_sense;
return 1;
}
while (*(barrier.sense) != *local_sense){
asm volatile("membar.gl;" : : :);
continue;
}
return 1;
}
static __global__ void sequentialWalk(kernel_param_t params) {
int current;
unsigned int time_start, time_end, time_acc, oh;
uint64_t sum;
int local_sense = 0;
oh = *params.targetMeasOH;
int tindex = blockDim.x*blockIdx.x*params.nof_repetitions + params.nof_repetitions *threadIdx.x;
int current_start = (blockDim.x*blockIdx.x + threadIdx.x)%params.buffer_length;
// Warm up data cache
for(int i = 0; i < params.buffer_length; i++){
sum += params.targetBuffer[i%params.buffer_length];
}
// Run experiment multiple times. First iteration (-1) is to warm up icache
for (int i = -2; i < params.nof_repetitions; i++){
sum = 0;
time_acc = 0;
current = current_start;
barrierWait(params.barrier, &local_sense);
__syncthreads();
time_start = clock();
for(int j = 0; j < params.buffer_length; j++){
current = params.targetBuffer[current];
sum += current;
}
time_end = clock();
time_acc += (time_end - time_start);
__syncthreads();
*params.target_realSum = sum;
// Do not write time for warm up iteration
if (i>=0){
// Write element access time with measurement overhead
params.target_times[tindex+i] = time_acc/params.buffer_length-oh;
}
}
}
static int initializeTest(param_t *params){
//allocate buffer
params->hostBuffer = NULL;
params->hostBuffer = (int *) malloc(params->buffer_length*sizeof(int));
if (!params->hostBuffer) {
perror("Failed allocating host buffer: ");
return -1;
}
createSequentialArrayHost(*params);
//allocate device times
int size_time = params->nof_repetitions \
* params->nofThreads \
* params->nofBlocks \
* params->nofKernel \
* sizeof(unsigned int);
//allocate host times
params->host_times = NULL;
params->host_times = (unsigned int *) malloc(size_time);
if (!params->host_times) {
perror("Failed allocating host_times buffer: ");
return -1;
}
memset(params->host_times,0, size_time);
// Allocate device measOH
if (CheckCUDAError(hipMalloc(¶ms->targetMeasOH, \
sizeof(unsigned int)))) return -1;
if (CheckCUDAError(hipMalloc(¶ms->target_realSum, \
sizeof(uint64_t)))) return -1;
// Get measurement overhead
hipLaunchKernelGGL(( getMeasurementOverhead), dim3(1),dim3(1), 0, 0, *params);
if (CheckCUDAError(hipDeviceSynchronize())) return -1;
return 0;
}
static int runTest(param_t *params){
// Allocate streams for kernels
int size_time = params->nof_repetitions \
* params->nofThreads \
* params->nofBlocks \
* sizeof(unsigned int);
int *threadsRemaining;
int *sense;
if (CheckCUDAError(hipHostMalloc(&threadsRemaining, sizeof(int), hipHostMallocMapped))) return -1;
if (CheckCUDAError(hipHostMalloc(&sense, sizeof(int), hipHostMallocMapped))) return -1;
*threadsRemaining=params->nofThreads*params->nofBlocks*params->nofKernel;
*sense = 0;
kernel_param_t kernelp[params->nofKernel];
for (int i = 0; i < params->nofKernel; ++i){
hipStreamCreate(&kernelp[i].stream);
if (CheckCUDAError(hipMalloc(&kernelp[i].targetBuffer, \
params->buffer_length*sizeof(int)))) return -1;
if (CheckCUDAError(hipMemcpy(kernelp[i].targetBuffer, \
params->hostBuffer, \
params->buffer_length*sizeof(int), \
hipMemcpyHostToDevice))) return -1;
kernelp[i].barrier.threadsRemaining=threadsRemaining;
kernelp[i].barrier.sense=sense;
kernelp[i].barrier.thread_count = params->nofThreads*params->nofBlocks*params->nofKernel;
kernelp[i].buffer_length = params->buffer_length;
kernelp[i].targetMeasOH = params->targetMeasOH;
kernelp[i].nof_repetitions = params->nof_repetitions;
if (CheckCUDAError(hipMalloc(&kernelp[i].target_times, \
size_time))) return -1;
// Allocate device accumulator
if (CheckCUDAError(hipMalloc(&kernelp[i].target_realSum, \
sizeof(uint64_t)))) return -1;
}
for (int i = 0; i < params->nofKernel; ++i){
// Launch kernel
//randomWalkDiffElement<<<params->nofBlocks,params->nofThreads, 0, kernelp[i].stream>>>(kernelp[i]);
hipLaunchKernelGGL(( sequentialWalk), dim3(params->nofBlocks),dim3(params->nofThreads), 0, kernelp[i].stream, kernelp[i]);
}
// Synchronize with device
if (CheckCUDAError(hipDeviceSynchronize())) return -1;
for (int i = 0; i < params->nofKernel; ++i){
hipStreamDestroy(kernelp[i].stream);
// Copyback times
if (CheckCUDAError(hipMemcpy(¶ms->host_times[i*params->nof_repetitions*params->nofThreads*params->nofBlocks], \
kernelp[i].target_times, \
size_time, \
hipMemcpyDeviceToHost))) return -1;
// Copyback sum
params->host_realSum=0;
if (CheckCUDAError(hipMemcpy(¶ms->host_realSum, \
kernelp[i].target_realSum, \
sizeof(uint64_t), \
hipMemcpyDeviceToHost))) return -1;
hipFree(kernelp[i].target_realSum);
hipFree(kernelp[i].target_times);
}
// Copyback target meas oh
params->hostMeasOH=0;
if (CheckCUDAError(hipMemcpy(¶ms->hostMeasOH, \
params->targetMeasOH, \
sizeof(unsigned int), \
hipMemcpyDeviceToHost))) return -1;
return 0;
}
static int writeResults(param_t *params){
if (fprintf(params->fd,"{\n") < 0 ) return -1;
// Write device info
hipDeviceProp_t deviceProp;
if (CheckCUDAError(hipGetDeviceProperties(&deviceProp, DEVICE_NUMBER))) return -1;
int driverVersion = 0;
if (CheckCUDAError(hipDriverGetVersion(&driverVersion))) return -1;
int runtimeVersion = 0;
if (CheckCUDAError(hipRuntimeGetVersion(&runtimeVersion))) return -1;
if (fprintf(params->fd,"\"driverVer\": \"%d\",\n", driverVersion) < 0 ) return -1;
if (fprintf(params->fd,"\"runTimeVer\": \"%d\",\n", runtimeVersion) < 0 ) return -1;
if (fprintf(params->fd,"\"clockRate\": \"%d\",\n", deviceProp.clockRate) < 0 ) return -1;
if (fprintf(params->fd,"\"globalL1CacheSupported\": \"%d\",\n", deviceProp.globalL1CacheSupported) < 0 ) return -1;
if (fprintf(params->fd,"\"localL1CacheSupported\": \"%d\",\n", deviceProp.localL1CacheSupported) < 0 ) return -1;
if (fprintf(params->fd,"\"l2CacheSize\": \"%d\",\n", deviceProp.l2CacheSize) < 0 ) return -1;
if (fprintf(params->fd,"\"memoryBusWidth\": \"%d\",\n", deviceProp.memoryBusWidth) < 0 ) return -1;
if (fprintf(params->fd,"\"memoryClockRate\": \"%d\",\n", deviceProp.memoryClockRate) < 0 ) return -1;
if (fprintf(params->fd,"\"multiProcessorCount\": \"%d\",\n", deviceProp.multiProcessorCount) < 0 ) return -1;
if (fprintf(params->fd,"\"regsPerBlock\": \"%d\",\n", deviceProp.regsPerBlock) < 0 ) return -1;
if (fprintf(params->fd,"\"regsPerMultiprocessor\": \"%d\",\n", deviceProp.regsPerMultiprocessor) < 0 ) return -1;
if (fprintf(params->fd,"\"sharedMemPerBlock\": \"%zu\",\n", deviceProp.sharedMemPerBlock) < 0 ) return -1;
if (fprintf(params->fd,"\"sharedMemPerMultiprocessor\": \"%zu\",\n", deviceProp.sharedMemPerMultiprocessor) < 0 ) return -1;
if (fprintf(params->fd,"\"warpSize\": \"%d\",\n", deviceProp.warpSize) < 0 ) return -1;
hipFuncCache_t config;
if (CheckCUDAError(hipDeviceGetCacheConfig ( &config ) )) return -1;
if (fprintf(params->fd,"\"cacheConfig\": \"%d\",\n", config) < 0 ) return -1;
// Write header
if (fprintf(params->fd,"\"nofThreads\": \"%u\",\n", params->nofThreads) < 0 ) return -1;
if (fprintf(params->fd,"\"nofBlocks\": \"%u\",\n", params->nofBlocks) < 0 ) return -1;
if (fprintf(params->fd,"\"nofKernel\": \"%u\",\n", params->nofKernel) < 0 ) return -1;
if (fprintf(params->fd,"\"nof_repetitions\": \"%d\",\n", params->nof_repetitions) < 0 ) return -1;
if (fprintf(params->fd,"\"data_size\": \"%d\",\n", params->data_size) < 0 ) return -1;
if (fprintf(params->fd,"\"buffer_length\": \"%d\",\n", params->buffer_length) < 0 ) return -1;
if (fprintf(params->fd,"\"real_sum\": \"%llu\",\n", (unsigned long long)params->host_realSum) < 0 ) return -1;
if (fprintf(params->fd,"\"exp_sum\": \"%llu\",\n", ((unsigned long long)(params->buffer_length-1)*(unsigned long long)params->buffer_length)/2) < 0 ) return -1;
if (fprintf(params->fd,"\"measOH\": \"%u\",\n", params->hostMeasOH) < 0 ) return -1;
// Write times
int size_time = params->nof_repetitions \
* params->nofKernel \
* params->nofThreads \
* params->nofBlocks;
if (fprintf(params->fd,"\"times\":[\n") < 0 ) return -1;
for (int i = 0; i < size_time-1; i++){
if (fprintf(params->fd,"\"%u\",\n",params->host_times[i]) < 0 ) return -1;
}
if (fprintf(params->fd,"\"%u\"]\n}", params->host_times[size_time-1]) < 0 ) return -1;
if (fclose(params->fd) < 0) return -1;
return 0;
}
static int cleanUp(param_t *params){
// Free target buffers
hipFree(params->targetMeasOH);
// Free host buffers
free(params->hostBuffer);
free(params->host_times);
return 0;
}
static void PrintUsage(const char *name) {
printf("Usage: %s <#threads> <#blocks> <# kernel> <# of intervals> <size in KB>"
"<output JSON file name>\n", name);
}
int main(int argc, char **argv) {
if (argc != 7) {
PrintUsage(argv[0]);
return 1;
}
param_t params;
// Parse input parameter
int nof_threads = atoi(argv[1]);
if (nof_threads <= 0) {
printf("Min one thread. Got %s threads\n", argv[1]);
return EXIT_FAILURE;
}
int nof_blocks = atoi(argv[2]);
if (nof_blocks <= 0) {
printf("Min 1 block. Got %s blocks\n", argv[2]);
return EXIT_FAILURE;
}
int nof_kernel = atoi(argv[3]);
if (nof_kernel <= 0) {
printf("Min 1 kernel. Got %s blocks\n", argv[2]);
return EXIT_FAILURE;
}
params.nofThreads = nof_threads;
params.nofBlocks = nof_blocks;
params.nofKernel = nof_kernel;
int nof_repetitions = atoi(argv[4]);
if (nof_repetitions <= 0) {
printf("More than 0 repetitions need to be used. Got %s repetitions\n", argv[3]);
return EXIT_FAILURE;
}
int data_size = atoi(argv[5]);
if (data_size <= 0) {
printf("The buffer must be 1 or more KB. Got %s KB\n", argv[4]);
return EXIT_FAILURE;
}
params.nof_repetitions = nof_repetitions;
params.data_size = data_size*1024;
params.buffer_length = data_size*1024/sizeof(int);
params.fd = NULL;
params.fd = fopen(argv[6],"w");
if (params.fd == NULL) {
perror("Error opening output file:");
return EXIT_FAILURE;
}
// Set CUDA device
if (CheckCUDAError(hipSetDevice(DEVICE_NUMBER))) {
return EXIT_FAILURE;
}
// Initialize parameters
if (initializeTest(¶ms) < 0) return EXIT_FAILURE;
// Run test
if (runTest(¶ms) < 0) return EXIT_FAILURE;
// Write results
if (writeResults(¶ms) < 0){
perror("Error while writing outpufile: ");
return EXIT_FAILURE;
}
// Clean up
if (cleanUp(¶ms) < 0) return EXIT_FAILURE;
printf("Finished testrun\n");
hipDeviceReset();
return 0;
}
| 420e41e401fb9e82555152b917eb175f1a672de8.cu |
#include <error.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <cuda_runtime.h>
#define DEVICE_NUMBER (0)
// membar implementation simmilar to https://bigfoot.cs.unc.edu:3000/otternes/cuda_scheduling_examiner/src/master/src/barrier_wait.c
typedef struct {
int *threadsRemaining;
int *sense;
int thread_count;
} barrier_t;
typedef struct {
barrier_t barrier;
int *targetBuffer;
uint64_t *target_realSum;
unsigned int *targetMeasOH;
unsigned int *target_times;
int32_t nof_repetitions;
int buffer_length;
cudaStream_t stream;
} kernel_param_t;
typedef struct {
int nofThreads;
int nofBlocks;
int nofKernel;
int32_t nof_repetitions;
int data_size;
int buffer_length;
unsigned int hostMeasOH;
int *hostBuffer;
uint64_t host_realSum;
unsigned int *targetMeasOH;
unsigned int *host_times;
uint64_t *target_realSum;
FILE *fd;
} param_t;
// Prints a message and returns zero if the given value is not cudaSuccess
#define CheckCUDAError(val) (InternalCheckCUDAError((val), #val, __FILE__, __LINE__))
// Called internally by CheckCUDAError
static int InternalCheckCUDAError(cudaError_t result, const char *fn,
const char *file, int line) {
if (result == cudaSuccess) return 0;
printf("CUDA error %d in %s, line %d (%s): %s\n", (int) result, file, line,
fn, cudaGetErrorString(result));
return -1;
}
static void createSequentialArrayHost(param_t params){
// Link sequentially
for(int i = 0; i < params.buffer_length; i++){
params.hostBuffer[i]=(i+params.nofThreads*params.nofBlocks)%params.buffer_length;
}
}
static __global__ void getMeasurementOverhead(param_t params) {
unsigned int start, stop;
uint64_t sum = 0;
start = clock();
for (int j = 0; j < params.buffer_length; j++){
sum +=j;
}
stop = clock();
*params.targetMeasOH = (stop-start)/params.buffer_length;
*params.target_realSum = sum;
}
static __device__ inline int barrierWait(barrier_t barrier, int* local_sense) {
*local_sense = !(*local_sense);
int value = atomicSub(barrier.threadsRemaining, 1);
if(value==1) {
atomicExch(barrier.threadsRemaining, barrier.thread_count);
*(barrier.sense) = *local_sense;
return 1;
}
while (*(barrier.sense) != *local_sense){
asm volatile("membar.gl;" : : :);
continue;
}
return 1;
}
static __global__ void sequentialWalk(kernel_param_t params) {
int current;
unsigned int time_start, time_end, time_acc, oh;
uint64_t sum;
int local_sense = 0;
oh = *params.targetMeasOH;
int tindex = blockDim.x*blockIdx.x*params.nof_repetitions + params.nof_repetitions *threadIdx.x;
int current_start = (blockDim.x*blockIdx.x + threadIdx.x)%params.buffer_length;
// Warm up data cache
for(int i = 0; i < params.buffer_length; i++){
sum += params.targetBuffer[i%params.buffer_length];
}
// Run experiment multiple times. First iteration (-1) is to warm up icache
for (int i = -2; i < params.nof_repetitions; i++){
sum = 0;
time_acc = 0;
current = current_start;
barrierWait(params.barrier, &local_sense);
__syncthreads();
time_start = clock();
for(int j = 0; j < params.buffer_length; j++){
current = params.targetBuffer[current];
sum += current;
}
time_end = clock();
time_acc += (time_end - time_start);
__syncthreads();
*params.target_realSum = sum;
// Do not write time for warm up iteration
if (i>=0){
// Write element access time with measurement overhead
params.target_times[tindex+i] = time_acc/params.buffer_length-oh;
}
}
}
static int initializeTest(param_t *params){
//allocate buffer
params->hostBuffer = NULL;
params->hostBuffer = (int *) malloc(params->buffer_length*sizeof(int));
if (!params->hostBuffer) {
perror("Failed allocating host buffer: ");
return -1;
}
createSequentialArrayHost(*params);
//allocate device times
int size_time = params->nof_repetitions \
* params->nofThreads \
* params->nofBlocks \
* params->nofKernel \
* sizeof(unsigned int);
//allocate host times
params->host_times = NULL;
params->host_times = (unsigned int *) malloc(size_time);
if (!params->host_times) {
perror("Failed allocating host_times buffer: ");
return -1;
}
memset(params->host_times,0, size_time);
// Allocate device measOH
if (CheckCUDAError(cudaMalloc(¶ms->targetMeasOH, \
sizeof(unsigned int)))) return -1;
if (CheckCUDAError(cudaMalloc(¶ms->target_realSum, \
sizeof(uint64_t)))) return -1;
// Get measurement overhead
getMeasurementOverhead<<<1,1>>>(*params);
if (CheckCUDAError(cudaDeviceSynchronize())) return -1;
return 0;
}
static int runTest(param_t *params){
// Allocate streams for kernels
int size_time = params->nof_repetitions \
* params->nofThreads \
* params->nofBlocks \
* sizeof(unsigned int);
int *threadsRemaining;
int *sense;
if (CheckCUDAError(cudaHostAlloc(&threadsRemaining, sizeof(int), cudaHostAllocMapped))) return -1;
if (CheckCUDAError(cudaHostAlloc(&sense, sizeof(int), cudaHostAllocMapped))) return -1;
*threadsRemaining=params->nofThreads*params->nofBlocks*params->nofKernel;
*sense = 0;
kernel_param_t kernelp[params->nofKernel];
for (int i = 0; i < params->nofKernel; ++i){
cudaStreamCreate(&kernelp[i].stream);
if (CheckCUDAError(cudaMalloc(&kernelp[i].targetBuffer, \
params->buffer_length*sizeof(int)))) return -1;
if (CheckCUDAError(cudaMemcpy(kernelp[i].targetBuffer, \
params->hostBuffer, \
params->buffer_length*sizeof(int), \
cudaMemcpyHostToDevice))) return -1;
kernelp[i].barrier.threadsRemaining=threadsRemaining;
kernelp[i].barrier.sense=sense;
kernelp[i].barrier.thread_count = params->nofThreads*params->nofBlocks*params->nofKernel;
kernelp[i].buffer_length = params->buffer_length;
kernelp[i].targetMeasOH = params->targetMeasOH;
kernelp[i].nof_repetitions = params->nof_repetitions;
if (CheckCUDAError(cudaMalloc(&kernelp[i].target_times, \
size_time))) return -1;
// Allocate device accumulator
if (CheckCUDAError(cudaMalloc(&kernelp[i].target_realSum, \
sizeof(uint64_t)))) return -1;
}
for (int i = 0; i < params->nofKernel; ++i){
// Launch kernel
//randomWalkDiffElement<<<params->nofBlocks,params->nofThreads, 0, kernelp[i].stream>>>(kernelp[i]);
sequentialWalk<<<params->nofBlocks,params->nofThreads, 0, kernelp[i].stream>>>(kernelp[i]);
}
// Synchronize with device
if (CheckCUDAError(cudaDeviceSynchronize())) return -1;
for (int i = 0; i < params->nofKernel; ++i){
cudaStreamDestroy(kernelp[i].stream);
// Copyback times
if (CheckCUDAError(cudaMemcpy(¶ms->host_times[i*params->nof_repetitions*params->nofThreads*params->nofBlocks], \
kernelp[i].target_times, \
size_time, \
cudaMemcpyDeviceToHost))) return -1;
// Copyback sum
params->host_realSum=0;
if (CheckCUDAError(cudaMemcpy(¶ms->host_realSum, \
kernelp[i].target_realSum, \
sizeof(uint64_t), \
cudaMemcpyDeviceToHost))) return -1;
cudaFree(kernelp[i].target_realSum);
cudaFree(kernelp[i].target_times);
}
// Copyback target meas oh
params->hostMeasOH=0;
if (CheckCUDAError(cudaMemcpy(¶ms->hostMeasOH, \
params->targetMeasOH, \
sizeof(unsigned int), \
cudaMemcpyDeviceToHost))) return -1;
return 0;
}
static int writeResults(param_t *params){
if (fprintf(params->fd,"{\n") < 0 ) return -1;
// Write device info
cudaDeviceProp deviceProp;
if (CheckCUDAError(cudaGetDeviceProperties(&deviceProp, DEVICE_NUMBER))) return -1;
int driverVersion = 0;
if (CheckCUDAError(cudaDriverGetVersion(&driverVersion))) return -1;
int runtimeVersion = 0;
if (CheckCUDAError(cudaRuntimeGetVersion(&runtimeVersion))) return -1;
if (fprintf(params->fd,"\"driverVer\": \"%d\",\n", driverVersion) < 0 ) return -1;
if (fprintf(params->fd,"\"runTimeVer\": \"%d\",\n", runtimeVersion) < 0 ) return -1;
if (fprintf(params->fd,"\"clockRate\": \"%d\",\n", deviceProp.clockRate) < 0 ) return -1;
if (fprintf(params->fd,"\"globalL1CacheSupported\": \"%d\",\n", deviceProp.globalL1CacheSupported) < 0 ) return -1;
if (fprintf(params->fd,"\"localL1CacheSupported\": \"%d\",\n", deviceProp.localL1CacheSupported) < 0 ) return -1;
if (fprintf(params->fd,"\"l2CacheSize\": \"%d\",\n", deviceProp.l2CacheSize) < 0 ) return -1;
if (fprintf(params->fd,"\"memoryBusWidth\": \"%d\",\n", deviceProp.memoryBusWidth) < 0 ) return -1;
if (fprintf(params->fd,"\"memoryClockRate\": \"%d\",\n", deviceProp.memoryClockRate) < 0 ) return -1;
if (fprintf(params->fd,"\"multiProcessorCount\": \"%d\",\n", deviceProp.multiProcessorCount) < 0 ) return -1;
if (fprintf(params->fd,"\"regsPerBlock\": \"%d\",\n", deviceProp.regsPerBlock) < 0 ) return -1;
if (fprintf(params->fd,"\"regsPerMultiprocessor\": \"%d\",\n", deviceProp.regsPerMultiprocessor) < 0 ) return -1;
if (fprintf(params->fd,"\"sharedMemPerBlock\": \"%zu\",\n", deviceProp.sharedMemPerBlock) < 0 ) return -1;
if (fprintf(params->fd,"\"sharedMemPerMultiprocessor\": \"%zu\",\n", deviceProp.sharedMemPerMultiprocessor) < 0 ) return -1;
if (fprintf(params->fd,"\"warpSize\": \"%d\",\n", deviceProp.warpSize) < 0 ) return -1;
cudaFuncCache config;
if (CheckCUDAError(cudaDeviceGetCacheConfig ( &config ) )) return -1;
if (fprintf(params->fd,"\"cacheConfig\": \"%d\",\n", config) < 0 ) return -1;
// Write header
if (fprintf(params->fd,"\"nofThreads\": \"%u\",\n", params->nofThreads) < 0 ) return -1;
if (fprintf(params->fd,"\"nofBlocks\": \"%u\",\n", params->nofBlocks) < 0 ) return -1;
if (fprintf(params->fd,"\"nofKernel\": \"%u\",\n", params->nofKernel) < 0 ) return -1;
if (fprintf(params->fd,"\"nof_repetitions\": \"%d\",\n", params->nof_repetitions) < 0 ) return -1;
if (fprintf(params->fd,"\"data_size\": \"%d\",\n", params->data_size) < 0 ) return -1;
if (fprintf(params->fd,"\"buffer_length\": \"%d\",\n", params->buffer_length) < 0 ) return -1;
if (fprintf(params->fd,"\"real_sum\": \"%llu\",\n", (unsigned long long)params->host_realSum) < 0 ) return -1;
if (fprintf(params->fd,"\"exp_sum\": \"%llu\",\n", ((unsigned long long)(params->buffer_length-1)*(unsigned long long)params->buffer_length)/2) < 0 ) return -1;
if (fprintf(params->fd,"\"measOH\": \"%u\",\n", params->hostMeasOH) < 0 ) return -1;
// Write times
int size_time = params->nof_repetitions \
* params->nofKernel \
* params->nofThreads \
* params->nofBlocks;
if (fprintf(params->fd,"\"times\":[\n") < 0 ) return -1;
for (int i = 0; i < size_time-1; i++){
if (fprintf(params->fd,"\"%u\",\n",params->host_times[i]) < 0 ) return -1;
}
if (fprintf(params->fd,"\"%u\"]\n}", params->host_times[size_time-1]) < 0 ) return -1;
if (fclose(params->fd) < 0) return -1;
return 0;
}
static int cleanUp(param_t *params){
// Free target buffers
cudaFree(params->targetMeasOH);
// Free host buffers
free(params->hostBuffer);
free(params->host_times);
return 0;
}
static void PrintUsage(const char *name) {
printf("Usage: %s <#threads> <#blocks> <# kernel> <# of intervals> <size in KB>"
"<output JSON file name>\n", name);
}
int main(int argc, char **argv) {
if (argc != 7) {
PrintUsage(argv[0]);
return 1;
}
param_t params;
// Parse input parameter
int nof_threads = atoi(argv[1]);
if (nof_threads <= 0) {
printf("Min one thread. Got %s threads\n", argv[1]);
return EXIT_FAILURE;
}
int nof_blocks = atoi(argv[2]);
if (nof_blocks <= 0) {
printf("Min 1 block. Got %s blocks\n", argv[2]);
return EXIT_FAILURE;
}
int nof_kernel = atoi(argv[3]);
if (nof_kernel <= 0) {
printf("Min 1 kernel. Got %s blocks\n", argv[2]);
return EXIT_FAILURE;
}
params.nofThreads = nof_threads;
params.nofBlocks = nof_blocks;
params.nofKernel = nof_kernel;
int nof_repetitions = atoi(argv[4]);
if (nof_repetitions <= 0) {
printf("More than 0 repetitions need to be used. Got %s repetitions\n", argv[3]);
return EXIT_FAILURE;
}
int data_size = atoi(argv[5]);
if (data_size <= 0) {
printf("The buffer must be 1 or more KB. Got %s KB\n", argv[4]);
return EXIT_FAILURE;
}
params.nof_repetitions = nof_repetitions;
params.data_size = data_size*1024;
params.buffer_length = data_size*1024/sizeof(int);
params.fd = NULL;
params.fd = fopen(argv[6],"w");
if (params.fd == NULL) {
perror("Error opening output file:");
return EXIT_FAILURE;
}
// Set CUDA device
if (CheckCUDAError(cudaSetDevice(DEVICE_NUMBER))) {
return EXIT_FAILURE;
}
// Initialize parameters
if (initializeTest(¶ms) < 0) return EXIT_FAILURE;
// Run test
if (runTest(¶ms) < 0) return EXIT_FAILURE;
// Write results
if (writeResults(¶ms) < 0){
perror("Error while writing outpufile: ");
return EXIT_FAILURE;
}
// Clean up
if (cleanUp(¶ms) < 0) return EXIT_FAILURE;
printf("Finished testrun\n");
cudaDeviceReset();
return 0;
}
|
3ca542fcf7e9089ed97a64e70f9310fa2826fb5a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kPow(float* mat, float pow, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = powf(mat[i], pow);
} | 3ca542fcf7e9089ed97a64e70f9310fa2826fb5a.cu | #include "includes.h"
__global__ void kPow(float* mat, float pow, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = powf(mat[i], pow);
} |
b555dbfb58a4d426449f1822d3ddfd67ebe9dfad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** @file gpuinflate.cu
Derived from zlib's contrib/puff.c, original copyright notice below
*/
/*
Copyright (C) 2002-2013 Mark Adler, all rights reserved
version 2.3, 21 Jan 2013
This software is provided 'as-is', without any express or implied
warranty. In no event will the author be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
Mark Adler [email protected]
*/
#include "gpuinflate.hpp"
#include "io_uncomp.hpp"
#include <io/utilities/block_utils.cuh>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace io {
constexpr int max_bits = 15; // maximum bits in a code
constexpr int max_l_codes = 286; // maximum number of literal/length codes
constexpr int max_d_codes = 30; // maximum number of distance codes
constexpr int fix_l_codes = 288; // number of fixed literal/length codes
constexpr int log2_len_lut = 10;
constexpr int log2_dist_lut = 8;
/**
* @brief Intermediate arrays for building huffman tables
*/
struct scratch_arr {
int16_t lengths[max_l_codes + max_d_codes]; ///< descriptor code lengths
int16_t offs[max_bits + 1]; ///< offset in symbol table for each length (scratch)
};
/**
* @brief Huffman LUTs for length and distance codes
*/
struct lut_arr {
int32_t lenlut[1 << log2_len_lut]; ///< LUT for length decoding
int32_t distlut[1 << log2_dist_lut]; ///< LUT for fast distance decoding
};
/// 4 batches of 32 symbols
constexpr int log2_batch_count = 2; // 1..5
constexpr int log2_batch_size = 5;
constexpr int batch_count = (1 << log2_batch_count);
constexpr int batch_size = (1 << log2_batch_size);
/**
* @brief Inter-warp communication queue
*/
struct xwarp_s {
int32_t batch_len[batch_count]; //< Length of each batch - <0:end, 0:not ready, >0:symbol count
union {
uint32_t symqueue[batch_count * batch_size];
uint8_t symqueue8[batch_count * batch_size * 4];
} u;
};
#define ENABLE_PREFETCH 1
#if ENABLE_PREFETCH
constexpr int log2_prefetch_size = 9; // Must be at least LOG2_BATCH_SIZE+3
constexpr int prefetch_size = (1 << log2_prefetch_size);
/// @brief Prefetcher state
struct prefetch_queue_s {
uint8_t const* cur_p; ///< Prefetch location
int run; ///< prefetcher will exit when run=0
uint8_t pref_data[prefetch_size];
};
template <typename T>
inline __device__ volatile uint32_t* prefetch_addr32(volatile prefetch_queue_s& q, T* ptr)
{
return reinterpret_cast<volatile uint32_t*>(&q.pref_data[(prefetch_size - 4) & (size_t)(ptr)]);
}
#endif // ENABLE_PREFETCH
/**
* @brief Inflate decompressor state
*/
struct inflate_state_s {
// output state
uint8_t* out; ///< output buffer
uint8_t* outbase; ///< start of output buffer
uint8_t* outend; ///< end of output buffer
// Input state
uint8_t const* cur; ///< input buffer
uint8_t const* end; ///< end of input buffer
uint2 bitbuf; ///< bit buffer (64-bit)
uint32_t bitpos; ///< position in bit buffer
int32_t err; ///< Error status
int btype; ///< current block type
int blast; ///< last block
uint32_t stored_blk_len; ///< length of stored (uncompressed) block
uint16_t first_slow_len; ///< first code not in fast LUT
uint16_t index_slow_len;
uint16_t first_slow_dist;
uint16_t index_slow_dist;
volatile xwarp_s x;
#if ENABLE_PREFETCH
volatile prefetch_queue_s pref;
#endif
int16_t lencnt[max_bits + 1];
int16_t lensym[fix_l_codes]; // Assumes fix_l_codes >= max_l_codes
int16_t distcnt[max_bits + 1];
int16_t distsym[max_d_codes];
union {
scratch_arr scratch;
lut_arr lut;
} u;
};
inline __device__ unsigned int bfe(unsigned int source,
unsigned int bit_start,
unsigned int num_bits)
{
unsigned int bits;
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(bits) : "r"(source), "r"(bit_start), "r"(num_bits));
return bits;
};
inline __device__ uint32_t showbits(inflate_state_s* s, uint32_t n)
{
uint32_t next32 = __funnelshift_rc(s->bitbuf.x, s->bitbuf.y, s->bitpos);
return (next32 & ((1 << n) - 1));
}
inline __device__ uint32_t nextbits32(inflate_state_s* s)
{
return __funnelshift_rc(s->bitbuf.x, s->bitbuf.y, s->bitpos);
}
inline __device__ void skipbits(inflate_state_s* s, uint32_t n)
{
uint32_t bitpos = s->bitpos + n;
if (bitpos >= 32) {
auto cur = s->cur + 8;
s->bitbuf.x = s->bitbuf.y;
s->bitbuf.y = (cur < s->end) ? *reinterpret_cast<uint32_t const*>(cur) : 0;
s->cur = cur - 4;
bitpos &= 0x1f;
}
s->bitpos = bitpos;
}
// TODO: If we require 4-byte alignment of input bitstream & length (padded), reading bits would
// become quite a bit faster
__device__ uint32_t getbits(inflate_state_s* s, uint32_t n)
{
uint32_t v = showbits(s, n);
skipbits(s, n);
return v;
}
/**
* @brief Decode a code from the stream s using huffman table {symbols,counts}.
* Return the symbol or a negative value if there is an error.
* If all of the lengths are zero, i.e. an empty code, or if the code is
* incomplete and an invalid code is received, then -10 is returned after
* reading max_bits bits.
*
* Format notes:
*
* - The codes as stored in the compressed data are bit-reversed relative to
* a simple integer ordering of codes of the same lengths. Hence below the
* bits are pulled from the compressed data one at a time and used to
* build the code value reversed from what is in the stream in order to
* permit simple integer comparisons for decoding. A table-based decoding
* scheme (as used in zlib) does not need to do this reversal.
*
* - The first code for the shortest length is all zeros. Subsequent codes of
* the same length are simply integer increments of the previous code. When
* moving up a length, a zero bit is appended to the code. For a complete
* code, the last code of the longest length will be all ones.
*
* - Incomplete codes are handled by this decoder, since they are permitted
* in the deflate format. See the format notes for fixed() and dynamic().
*/
__device__ int decode(inflate_state_s* s, int16_t const* counts, int16_t const* symbols)
{
unsigned int len; // current number of bits in code
unsigned int code; // len bits being decoded
unsigned int first; // first code of length len
unsigned int count; // number of codes of length len
uint32_t next32r = __brev(nextbits32(s));
first = 0;
for (len = 1; len <= max_bits; len++) {
code = (next32r >> (32 - len)) - first;
count = counts[len];
if (code < count) // if length len, return symbol
{
skipbits(s, len);
return symbols[code];
}
symbols += count; // else update for next length
first += count;
first <<= 1;
}
return -10; // ran out of codes
}
/**
* @brief Given the list of code lengths length[0..n-1] representing a canonical
* Huffman code for n symbols, construct the tables required to decode those
* codes. Those tables are the number of codes of each length, and the symbols
* sorted by length, retaining their original order within each length. The
* return value is zero for a complete code set, negative for an over-
* subscribed code set, and positive for an incomplete code set. The tables
* can be used if the return value is zero or positive, but they cannot be used
* if the return value is negative. If the return value is zero, it is not
* possible for decode() using that table to return an error--any stream of
* enough bits will resolve to a symbol. If the return value is positive, then
* it is possible for decode() using that table to return an error for received
* codes past the end of the incomplete lengths.
*
* Not used by decode(), but used for error checking, count[0] is the number
* of the n symbols not in the code. So n - count[0] is the number of
* codes. This is useful for checking for incomplete codes that have more than
* one symbol, which is an error in a dynamic block.
*
* Assumption: for all i in 0..n-1, 0 <= length[i] <= max_bits
* This is assured by the construction of the length arrays in dynamic() and
* fixed() and is not verified by construct().
*
* Format notes:
*
* - Permitted and expected examples of incomplete codes are one of the fixed
* codes and any code with a single symbol which in deflate is coded as one
* bit instead of zero bits. See the format notes for fixed() and dynamic().
*
* - Within a given code length, the symbols are kept in ascending order for
* the code bits definition.
*/
__device__ int construct(
inflate_state_s* s, int16_t* counts, int16_t* symbols, int16_t const* length, int n)
{
int symbol; // current symbol when stepping through length[]
int len; // current length when stepping through counts[]
int left; // number of possible codes left of current length
int16_t* offs = s->u.scratch.offs;
// count number of codes of each length
for (len = 0; len <= max_bits; len++)
counts[len] = 0;
for (symbol = 0; symbol < n; symbol++)
(counts[length[symbol]])++; // assumes lengths are within bounds
if (counts[0] == n) // no codes!
return 0; // complete, but decode() will fail
// check for an over-subscribed or incomplete set of lengths
left = 1; // one possible code of zero length
for (len = 1; len <= max_bits; len++) {
left <<= 1; // one more bit, double codes left
left -= counts[len]; // deduct count from possible codes
if (left < 0) return left; // over-subscribed--return negative
} // left > 0 means incomplete
// generate offsets into symbol table for each length for sorting
offs[1] = 0;
for (len = 1; len < max_bits; len++)
offs[len + 1] = offs[len] + counts[len];
// put symbols in table sorted by length, by symbol order within each length
for (symbol = 0; symbol < n; symbol++)
if (length[symbol] != 0) symbols[offs[length[symbol]]++] = symbol;
// return zero for complete set, positive for incomplete set
return left;
}
/// permutation of code length codes
static const __device__ __constant__ uint8_t g_code_order[19 + 1] = {
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15, 0xff};
/// Dynamic block (custom huffman tables)
__device__ int init_dynamic(inflate_state_s* s)
{
int nlen, ndist, ncode; /* number of lengths in descriptor */
int index; /* index of lengths[] */
int err; /* construct() return value */
int16_t* lengths = s->u.scratch.lengths;
// get number of lengths in each table, check lengths
nlen = getbits(s, 5) + 257;
ndist = getbits(s, 5) + 1;
ncode = getbits(s, 4) + 4;
if (nlen > max_l_codes || ndist > max_d_codes) {
return -3; // bad counts
}
// read code length code lengths (really), missing lengths are zero
for (index = 0; index < ncode; index++)
lengths[g_code_order[index]] = getbits(s, 3);
for (; index < 19; index++)
lengths[g_code_order[index]] = 0;
// build huffman table for code lengths codes (use lencode temporarily)
err = construct(s, s->lencnt, s->lensym, lengths, 19);
if (err != 0) // require complete code set here
return -4;
// read length/literal and distance code length tables
index = 0;
while (index < nlen + ndist) {
int symbol = decode(s, s->lencnt, s->lensym);
if (symbol < 0) return symbol; // invalid symbol
if (symbol < 16) // length in 0..15
lengths[index++] = symbol;
else { // repeat instruction
int len = 0; // last length to repeat, assume repeating zeros
if (symbol == 16) { // repeat last length 3..6 times
if (index == 0) return -5; // no last length!
len = lengths[index - 1]; // last length
symbol = 3 + getbits(s, 2);
} else if (symbol == 17) // repeat zero 3..10 times
symbol = 3 + getbits(s, 3);
else // == 18, repeat zero 11..138 times
symbol = 11 + getbits(s, 7);
if (index + symbol > nlen + ndist) return -6; // too many lengths!
while (symbol--) // repeat last or zero symbol times
lengths[index++] = len;
}
}
// check for end-of-block code -- there better be one!
if (lengths[256] == 0) return -9;
// build huffman table for literal/length codes
err = construct(s, s->lencnt, s->lensym, lengths, nlen);
if (err && (err < 0 || nlen != s->lencnt[0] + s->lencnt[1]))
return -7; // incomplete code ok only for single length 1 code
// build huffman table for distance codes
err = construct(s, s->distcnt, s->distsym, &lengths[nlen], ndist);
if (err && (err < 0 || ndist != s->distcnt[0] + s->distcnt[1]))
return -8; // incomplete code ok only for single length 1 code
return 0;
}
/**
* @brief Initializes a fixed codes block.
*
* Format notes:
*
* - This block type can be useful for compressing small amounts of data for
* which the size of the code descriptions in a dynamic block exceeds the
* benefit of custom codes for that block. For fixed codes, no bits are
* spent on code descriptions. Instead the code lengths for literal/length
* codes and distance codes are fixed. The specific lengths for each symbol
* can be seen in the "for" loops below.
*
* - The literal/length code is complete, but has two symbols that are invalid
* and should result in an error if received. This cannot be implemented
* simply as an incomplete code since those two symbols are in the "middle"
* of the code. They are eight bits long and the longest literal/length\
* code is nine bits. Therefore the code must be constructed with those
* symbols, and the invalid symbols must be detected after decoding.
*
* - The fixed distance codes also have two invalid symbols that should result
* in an error if received. Since all of the distance codes are the same
* length, this can be implemented as an incomplete code. Then the invalid
* codes are detected while decoding.
*/
__device__ int init_fixed(inflate_state_s* s)
{
int16_t* lengths = s->u.scratch.lengths;
int symbol;
// literal/length table
for (symbol = 0; symbol < 144; symbol++)
lengths[symbol] = 8;
for (; symbol < 256; symbol++)
lengths[symbol] = 9;
for (; symbol < 280; symbol++)
lengths[symbol] = 7;
for (; symbol < fix_l_codes; symbol++)
lengths[symbol] = 8;
construct(s, s->lencnt, s->lensym, lengths, fix_l_codes);
// distance table
for (symbol = 0; symbol < max_d_codes; symbol++)
lengths[symbol] = 5;
// build huffman table for distance codes
construct(s, s->distcnt, s->distsym, lengths, max_d_codes);
return 0;
}
/**
* @brief Decode literal/length and distance codes until an end-of-block code.
*
* Format notes:
*
* - Compressed data that is after the block type if fixed or after the code
* description if dynamic is a combination of literals and length/distance
* pairs terminated by and end-of-block code. Literals are simply Huffman
* coded bytes. A length/distance pair is a coded length followed by a
* coded distance to represent a string that occurs earlier in the
* uncompressed data that occurs again at the current location.
*
* - Literals, lengths, and the end-of-block code are combined into a single
* code of up to 286 symbols. They are 256 literals (0..255), 29 length
* symbols (257..285), and the end-of-block symbol (256).
*
* - There are 256 possible lengths (3..258), and so 29 symbols are not enough
* to represent all of those. Lengths 3..10 and 258 are in fact represented
* by just a length symbol. Lengths 11..257 are represented as a symbol and
* some number of extra bits that are added as an integer to the base length
* of the length symbol. The number of extra bits is determined by the base
* length symbol. These are in the static arrays below, lens[] for the base
* lengths and lext[] for the corresponding number of extra bits.
*
* - The reason that 258 gets its own symbol is that the longest length is used
* often in highly redundant files. Note that 258 can also be coded as the
* base value 227 plus the maximum extra value of 31. While a good deflate
* should never do this, it is not an error, and should be decoded properly.
*
* - If a length is decoded, including its extra bits if any, then it is
* followed a distance code. There are up to 30 distance symbols. Again
* there are many more possible distances (1..32768), so extra bits are added
* to a base value represented by the symbol. The distances 1..4 get their
* own symbol, but the rest require extra bits. The base distances and
* corresponding number of extra bits are below in the static arrays dist[]
* and dext[].
*
* - Literal bytes are simply written to the output. A length/distance pair is
* an instruction to copy previously uncompressed bytes to the output. The
* copy is from distance bytes back in the output stream, copying for length
* bytes.
*
* - Distances pointing before the beginning of the output data are not
* permitted.
*
* - Overlapped copies, where the length is greater than the distance, are
* allowed and common. For example, a distance of one and a length of 258
* simply copies the last byte 258 times. A distance of four and a length of
* twelve copies the last four bytes three times. A simple forward copy
* ignoring whether the length is greater than the distance or not implements
* this correctly. You should not use memcpy() since its behavior is not
* defined for overlapped arrays. You should not use memmove() or bcopy()
* since though their behavior -is- defined for overlapping arrays, it is
* defined to do the wrong thing in this case.
*/
/// permutation of code length codes
static const __device__ __constant__ uint16_t g_lens[29] = { // Size base for length codes 257..285
3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27,
31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258};
static const __device__ __constant__ uint16_t
g_lext[29] = { // Extra bits for length codes 257..285
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0};
static const __device__ __constant__ uint16_t
g_dists[30] = { // Offset base for distance codes 0..29
1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129,
193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577};
static const __device__ __constant__ uint16_t g_dext[30] = { // Extra bits for distance codes 0..29
0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13};
/// @brief Thread 0 only: decode bitstreams and output symbols into the symbol queue
__device__ void decode_symbols(inflate_state_s* s)
{
uint32_t bitpos = s->bitpos;
uint2 bitbuf = s->bitbuf;
auto cur = s->cur;
auto end = s->end;
int32_t batch = 0;
int32_t sym, batch_len;
do {
volatile uint32_t* b = &s->x.u.symqueue[batch * batch_size];
// Wait for the next batch entry to be empty
#if ENABLE_PREFETCH
// Wait for prefetcher to fetch a worst-case of 48 bits per symbol
while ((*(volatile int32_t*)&s->pref.cur_p - (int32_t)(size_t)cur < batch_size * 6) ||
(s->x.batch_len[batch] != 0)) {}
#else
while (s->x.batch_len[batch] != 0) {}
#endif
batch_len = 0;
#if ENABLE_PREFETCH
if (cur + (bitpos >> 3) >= end) {
s->err = 1;
break;
}
#endif
// Inner loop decoding symbols
do {
uint32_t next32 = __funnelshift_rc(bitbuf.x, bitbuf.y, bitpos); // nextbits32(s);
uint32_t len;
sym = s->u.lut.lenlut[next32 & ((1 << log2_len_lut) - 1)];
if ((uint32_t)sym < (uint32_t)(0x100 << 5)) {
// We can lookup a second symbol if this was a short literal
len = sym & 0x1f;
sym >>= 5;
b[batch_len++] = sym;
next32 >>= len;
bitpos += len;
sym = s->u.lut.lenlut[next32 & ((1 << log2_len_lut) - 1)];
}
if (sym > 0) // short symbol
{
len = sym & 0x1f;
sym = ((sym >> 5) & 0x3ff) + ((next32 >> (sym >> 24)) & ((sym >> 16) & 0x1f));
} else {
// Slow length path
uint32_t next32r = __brev(next32);
int16_t const* symbols = &s->lensym[s->index_slow_len];
unsigned int first = s->first_slow_len;
int lext;
#pragma unroll 1
for (len = log2_len_lut + 1; len <= max_bits; len++) {
unsigned int code = (next32r >> (32 - len)) - first;
unsigned int count = s->lencnt[len];
if (code < count) // if length len, return symbol
{
sym = symbols[code];
break;
}
symbols += count; // else update for next length
first += count;
first <<= 1;
}
if (len > max_bits) {
s->err = -10;
sym = 256;
len = 0;
}
if (sym > 256) {
sym -= 257;
lext = g_lext[sym];
sym = 256 + g_lens[sym] + bfe(next32, len, lext);
len += lext;
}
}
if (sym > 256) {
int dist, dext;
// skipbits(s, len) inlined - no limit check
bitpos += len;
if (bitpos >= 32) {
bitbuf.x = bitbuf.y;
#if ENABLE_PREFETCH
bitbuf.y = *prefetch_addr32(s->pref, cur + 8);
cur += 4;
#else
cur += 8;
bitbuf.y = (cur < end) ? *(uint32_t const*)cur : 0;
cur -= 4;
#endif
bitpos &= 0x1f;
}
// get distance
next32 = __funnelshift_rc(bitbuf.x, bitbuf.y, bitpos); // nextbits32(s);
dist = s->u.lut.distlut[next32 & ((1 << log2_dist_lut) - 1)];
if (dist > 0) {
len = dist & 0x1f;
dext = bfe(dist, 20, 5);
dist = bfe(dist, 5, 15);
sym |= (dist + bfe(next32, len, dext)) << 16;
len += dext;
} else {
uint32_t next32r = __brev(next32);
int16_t const* symbols = &s->distsym[s->index_slow_dist];
unsigned int first = s->first_slow_dist;
#pragma unroll 1
for (len = log2_dist_lut + 1; len <= max_bits; len++) {
unsigned int code = (next32r >> (32 - len)) - first;
unsigned int count = s->distcnt[len];
if (code < count) // if length len, return symbol
{
dist = symbols[code];
break;
}
symbols += count; // else update for next length
first += count;
first <<= 1;
}
if (len > max_bits) {
s->err = -10;
sym = 256;
len = 0;
} else {
dext = g_dext[dist];
sym |= (g_dists[dist] + bfe(next32, len, dext)) << 16;
len += dext;
}
}
}
// skipbits(s, len) inlined with added error check for reading past the end of the input
// buffer
bitpos += len;
if (bitpos >= 32) {
bitbuf.x = bitbuf.y;
#if ENABLE_PREFETCH
bitbuf.y = *prefetch_addr32(s->pref, cur + 8);
cur += 4;
#else
cur += 8;
if (cur < end) {
bitbuf.y = *(uint32_t const*)cur;
cur -= 4;
} else {
bitbuf.y = 0;
cur -= 4;
if (cur > end) {
s->err = 1;
sym = 256;
}
}
#endif
bitpos &= 0x1f;
}
if (sym == 256) break;
b[batch_len++] = sym;
} while (batch_len < batch_size - 1);
s->x.batch_len[batch] = batch_len;
#if ENABLE_PREFETCH
((volatile inflate_state_s*)s)->cur = cur;
#endif
if (batch_len != 0) batch = (batch + 1) & (batch_count - 1);
} while (sym != 256);
while (s->x.batch_len[batch] != 0) {}
s->x.batch_len[batch] = -1;
s->bitbuf = bitbuf;
s->bitpos = bitpos;
#if !ENABLE_PREFETCH
s->cur = cur;
#endif
}
/**
* @brief Build lookup tables for faster decode
* LUT format is symbols*16+length
*/
__device__ void init_length_lut(inflate_state_s* s, int t)
{
int32_t* lut = s->u.lut.lenlut;
for (uint32_t bits = t; bits < (1 << log2_len_lut); bits += blockDim.x) {
int16_t const* cnt = s->lencnt;
int16_t const* symbols = s->lensym;
int sym = -10 << 5;
unsigned int first = 0;
unsigned int rbits = __brev(bits) >> (32 - log2_len_lut);
for (unsigned int len = 1; len <= log2_len_lut; len++) {
unsigned int code = (rbits >> (log2_len_lut - len)) - first;
unsigned int count = cnt[len];
if (code < count) {
sym = symbols[code];
if (sym > 256) {
int lext = g_lext[sym - 257];
sym = (256 + g_lens[sym - 257]) | (((1 << lext) - 1) << (16 - 5)) | (len << (24 - 5));
len += lext;
}
sym = (sym << 5) | len;
break;
}
symbols += count; // else update for next length
first += count;
first <<= 1;
}
lut[bits] = sym;
}
if (!t) {
unsigned int first = 0;
unsigned int index = 0;
int16_t const* cnt = s->lencnt;
for (unsigned int len = 1; len <= log2_len_lut; len++) {
unsigned int count = cnt[len];
index += count;
first += count;
first <<= 1;
}
s->first_slow_len = first;
s->index_slow_len = index;
}
}
/**
* @brief Build lookup tables for faster decode of distance symbol
* LUT format is symbols*16+length
*/
__device__ void init_distance_lut(inflate_state_s* s, int t)
{
int32_t* lut = s->u.lut.distlut;
for (uint32_t bits = t; bits < (1 << log2_dist_lut); bits += blockDim.x) {
int16_t const* cnt = s->distcnt;
int16_t const* symbols = s->distsym;
int sym = 0;
unsigned int first = 0;
unsigned int rbits = __brev(bits) >> (32 - log2_dist_lut);
for (unsigned int len = 1; len <= log2_dist_lut; len++) {
unsigned int code = (rbits >> (log2_dist_lut - len)) - first;
unsigned int count = cnt[len];
if (code < count) {
int dist = symbols[code];
int dext = g_dext[dist];
sym = g_dists[dist] | (dext << 15);
sym = (sym << 5) | len;
break;
}
symbols += count; // else update for next length
first += count;
first <<= 1;
}
lut[bits] = sym;
}
if (!t) {
unsigned int first = 0;
unsigned int index = 0;
int16_t const* cnt = s->distcnt;
for (unsigned int len = 1; len <= log2_dist_lut; len++) {
unsigned int count = cnt[len];
index += count;
first += count;
first <<= 1;
}
s->first_slow_dist = first;
s->index_slow_dist = index;
}
}
/// @brief WARP1: process symbols and output uncompressed stream
__device__ void process_symbols(inflate_state_s* s, int t)
{
uint8_t* out = s->out;
uint8_t const* outend = s->outend;
uint8_t const* outbase = s->outbase;
int batch = 0;
do {
volatile uint32_t* b = &s->x.u.symqueue[batch * batch_size];
int batch_len = 0;
if (t == 0) {
while ((batch_len = s->x.batch_len[batch]) == 0) {}
}
batch_len = shuffle(batch_len);
if (batch_len < 0) { break; }
auto const symt = (t < batch_len) ? b[t] : 256;
auto const lit_mask = ballot(symt >= 256);
auto pos = min((__ffs(lit_mask) - 1) & 0xff, 32);
if (t == 0) { s->x.batch_len[batch] = 0; }
if (t < pos && out + t < outend) { out[t] = symt; }
out += pos;
batch_len -= pos;
while (batch_len > 0) {
int dist, len, symbol;
// Process a non-literal symbol
symbol = shuffle(symt, pos);
len = max((symbol & 0xffff) - 256, 0); // max should be unnecessary, but just in case
dist = symbol >> 16;
for (int i = t; i < len; i += 32) {
uint8_t const* src = out + ((i >= dist) ? (i % dist) : i) - dist;
uint8_t b = (src < outbase) ? 0 : *src;
if (out + i < outend) { out[i] = b; }
}
out += len;
pos++;
batch_len--;
// Process subsequent literals, if any
if (!((lit_mask >> pos) & 1)) {
len = min((__ffs(lit_mask >> pos) - 1) & 0xff, batch_len);
symbol = shuffle(symt, (pos + t) & 0x1f);
if (t < len && out + t < outend) { out[t] = symbol; }
out += len;
pos += len;
batch_len -= len;
}
}
batch = (batch + 1) & (batch_count - 1);
} while (true);
if (t == 0) { s->out = out; }
}
/**
* @brief Initializes a stored block.
*
* Format notes:
*
* - After the two-bit stored block type (00), the stored block length and
* stored bytes are byte-aligned for fast copying. Therefore any leftover
* bits in the byte that has the last bit of the type, as many as seven, are
* discarded. The value of the discarded bits are not defined and should not
* be checked against any expectation.
*
* - The second inverted copy of the stored block length does not have to be
* checked, but it's probably a good idea to do so anyway.
*
* - A stored block can have zero length. This is sometimes used to byte-align
* subsets of the compressed data for random access or partial recovery.
*/
__device__ int init_stored(inflate_state_s* s)
{
uint32_t len, nlen; // length of stored block
// Byte align
if (s->bitpos & 7) { skipbits(s, 8 - (s->bitpos & 7)); }
if (s->cur + (s->bitpos >> 3) >= s->end) {
return 2; // Not enough input
}
// get length and check against its one's complement
len = getbits(s, 16);
nlen = getbits(s, 16);
if (len != (nlen ^ 0xffff)) {
return -2; // didn't match complement!
}
if (s->cur + (s->bitpos >> 3) + len > s->end) {
return 2; // Not enough input
}
s->stored_blk_len = len;
// done with a valid stored block
return 0;
}
/// Copy bytes from stored block to destination
__device__ void copy_stored(inflate_state_s* s, int t)
{
auto len = s->stored_blk_len;
auto cur = s->cur + s->bitpos / 8;
auto out = s->out;
auto outend = s->outend;
auto const slow_bytes = min(len, (int)((16 - reinterpret_cast<size_t>(out)) % 16));
// Slow copy until output is 16B aligned
if (slow_bytes) {
for (int i = t; i < slow_bytes; i += blockDim.x) {
if (out + i < outend) {
out[i] = cur[i]; // Input range has already been validated in init_stored()
}
}
cur += slow_bytes;
out += slow_bytes;
len -= slow_bytes;
}
auto fast_bytes = len;
if (out < outend) { fast_bytes = (int)min((size_t)fast_bytes, (outend - out)); }
fast_bytes &= ~0xf;
auto bitpos = ((int)((size_t)cur % 4)) * 8;
auto cur4 = cur - (bitpos / 8);
if (out < outend) {
// Fast copy 16 bytes at a time
for (int i = t * 16; i < fast_bytes; i += blockDim.x * 16) {
uint4 u;
u.x = *reinterpret_cast<uint32_t const*>(cur4 + i + 0 * 4);
u.y = *reinterpret_cast<uint32_t const*>(cur4 + i + 1 * 4);
u.z = *reinterpret_cast<uint32_t const*>(cur4 + i + 2 * 4);
u.w = *reinterpret_cast<uint32_t const*>(cur4 + i + 3 * 4);
if (bitpos != 0) {
uint32_t v = (bitpos != 0) ? *reinterpret_cast<uint32_t const*>(cur4 + i + 4 * 4) : 0;
u.x = __funnelshift_rc(u.x, u.y, bitpos);
u.y = __funnelshift_rc(u.y, u.z, bitpos);
u.z = __funnelshift_rc(u.z, u.w, bitpos);
u.w = __funnelshift_rc(u.w, v, bitpos);
}
*reinterpret_cast<uint4*>(out + i) = u;
}
}
cur += fast_bytes;
out += fast_bytes;
len -= fast_bytes;
// Slow copy for remaining bytes
for (int i = t; i < len; i += blockDim.x) {
if (out + i < outend) {
out[i] = cur[i]; // Input range has already been validated in init_stored()
}
}
out += len;
__syncthreads();
if (t == 0) {
// Reset bitstream to end of block
auto p = cur + len;
auto prefix_bytes = (uint32_t)(((size_t)p) & 3);
p -= prefix_bytes;
s->cur = p;
s->bitbuf.x = (p < s->end) ? *reinterpret_cast<uint32_t const*>(p) : 0;
p += 4;
s->bitbuf.y = (p < s->end) ? *reinterpret_cast<uint32_t const*>(p) : 0;
s->bitpos = prefix_bytes * 8;
s->out = out;
}
}
#if ENABLE_PREFETCH
__device__ void init_prefetcher(inflate_state_s* s, int t)
{
if (t == 0) {
s->pref.cur_p = s->cur;
s->pref.run = 1;
}
}
__device__ void prefetch_warp(volatile inflate_state_s* s, int t)
{
uint8_t const* cur_p = s->pref.cur_p;
uint8_t const* end = s->end;
while (shuffle((t == 0) ? s->pref.run : 0)) {
auto cur_lo = (int32_t)(size_t)cur_p;
int do_pref =
shuffle((t == 0) ? (cur_lo - *(volatile int32_t*)&s->cur < prefetch_size - 32 * 4 - 4) : 0);
if (do_pref) {
uint8_t const* p = cur_p + 4 * t;
*prefetch_addr32(s->pref, p) = (p < end) ? *reinterpret_cast<uint32_t const*>(p) : 0;
cur_p += 4 * 32;
__threadfence_block();
__syncwarp();
if (!t) {
s->pref.cur_p = cur_p;
__threadfence_block();
}
}
}
}
#endif // ENABLE_PREFETCH
/**
* @brief Parse GZIP header
* See https://tools.ietf.org/html/rfc1952
*/
__device__ int parse_gzip_header(uint8_t const* src, size_t src_size)
{
int hdr_len = -1;
if (src_size >= 18) {
uint32_t sig = (src[0] << 16) | (src[1] << 8) | src[2];
if (sig == 0x1f'8b08) // 24-bit GZIP inflate signature {0x1f, 0x8b, 0x08}
{
uint8_t flags = src[3];
hdr_len = 10;
if (flags & GZIPHeaderFlag::fextra) // Extra fields present
{
int xlen = src[hdr_len] | (src[hdr_len + 1] << 8);
hdr_len += xlen;
if (hdr_len >= src_size) return -1;
}
if (flags & GZIPHeaderFlag::fname) // Original file name present
{
// Skip zero-terminated string
do {
if (hdr_len >= src_size) return -1;
} while (src[hdr_len++] != 0);
}
if (flags & GZIPHeaderFlag::fcomment) // Comment present
{
// Skip zero-terminated string
do {
if (hdr_len >= src_size) return -1;
} while (src[hdr_len++] != 0);
}
if (flags & GZIPHeaderFlag::fhcrc) // Header CRC present
{
hdr_len += 2;
}
if (hdr_len + 8 >= src_size) hdr_len = -1;
}
}
return hdr_len;
}
/**
* @brief INFLATE decompression kernel
*
* blockDim {block_size,1,1}
*
* @tparam block_size Thread block dimension for this call
* @param inputs Source and destination buffer information per block
* @param outputs Destination buffer information per block
* @param results Decompression status buffer per block
* @param parse_hdr If nonzero, indicates that the compressed bitstream includes a GZIP header
*/
template <int block_size>
__global__ void __launch_bounds__(block_size)
inflate_kernel(device_span<device_span<uint8_t const> const> inputs,
device_span<device_span<uint8_t> const> outputs,
device_span<compression_result> results,
gzip_header_included parse_hdr)
{
__shared__ __align__(16) inflate_state_s state_g;
int t = threadIdx.x;
int z = blockIdx.x;
inflate_state_s* state = &state_g;
if (!t) {
auto p = inputs[z].data();
auto src_size = inputs[z].size();
// Parse header if needed
state->err = 0;
if (parse_hdr == gzip_header_included::YES) {
int hdr_len = parse_gzip_header(p, src_size);
src_size = (src_size >= 8) ? src_size - 8 : 0; // ignore footer
if (hdr_len >= 0) {
p += hdr_len;
src_size -= hdr_len;
} else {
state->err = hdr_len;
}
}
// Initialize shared state
state->out = outputs[z].data();
state->outbase = state->out;
state->outend = state->out + outputs[z].size();
state->end = p + src_size;
auto const prefix_bytes = (uint32_t)(((size_t)p) & 3);
p -= prefix_bytes;
state->cur = p;
state->bitbuf.x = (p < state->end) ? *reinterpret_cast<uint32_t const*>(p) : 0;
p += 4;
state->bitbuf.y = (p < state->end) ? *reinterpret_cast<uint32_t const*>(p) : 0;
state->bitpos = prefix_bytes * 8;
}
__syncthreads();
// Main loop decoding blocks
while (!state->err) {
if (!t) {
// Thread0: read last flag, block type and custom huffman tables if any
if (state->cur + (state->bitpos >> 3) >= state->end)
state->err = 2;
else {
state->blast = getbits(state, 1);
state->btype = getbits(state, 2);
if (state->btype == 0)
state->err = init_stored(state);
else if (state->btype == 1)
state->err = init_fixed(state);
else if (state->btype == 2)
state->err = init_dynamic(state);
else
state->err = -1; // Invalid block
}
}
__syncthreads();
if (!state->err && (state->btype == 1 || state->btype == 2)) {
// Initializes lookup tables (block wide)
init_length_lut(state, t);
init_distance_lut(state, t);
#if ENABLE_PREFETCH
// Initialize prefetcher
init_prefetcher(state, t);
#endif
if (t < batch_count) { state->x.batch_len[t] = 0; }
__syncthreads();
// decode data until end-of-block code
if (t < 1 * 32) {
// WARP0: decode variable-length symbols
if (!t) {
// Thread0: decode symbols (single threaded)
decode_symbols(state);
#if ENABLE_PREFETCH
state->pref.run = 0;
#endif
}
} else if (t < 2 * 32) {
// WARP1: perform LZ77 using length and distance codes from WARP0
process_symbols(state, t & 0x1f);
}
#if ENABLE_PREFETCH
else if (t < 3 * 32) {
// WARP2: Prefetcher: prefetch data for WARP0
prefetch_warp(state, t & 0x1f);
}
#endif
// else WARP3: idle
} else if (!state->err && state->btype == 0) {
// Uncompressed block (block-wide memcpy)
copy_stored(state, t);
}
if (state->blast) break;
__syncthreads();
}
__syncthreads();
// Output decompression status and length
if (!t) {
if (state->err == 0 && state->cur + ((state->bitpos + 7) >> 3) > state->end) {
// Read past the end of the input buffer
state->err = 2;
} else if (state->err == 0 && state->out > state->outend) {
// Output buffer too small
state->err = 1;
}
results[z].bytes_written = state->out - state->outbase;
results[z].status = [&]() {
switch (state->err) {
case 0: return compression_status::SUCCESS;
case 1: return compression_status::OUTPUT_OVERFLOW;
default: return compression_status::FAILURE;
}
}();
results[z].reserved = (int)(state->end - state->cur); // Here mainly for debug purposes
}
}
/**
* @brief Copy a group of buffers
*
* blockDim {1024,1,1}
*
* @param inputs Source and destination information per block
*/
__global__ void __launch_bounds__(1024)
copy_uncompressed_kernel(device_span<device_span<uint8_t const> const> inputs,
device_span<device_span<uint8_t> const> outputs)
{
__shared__ uint8_t const* volatile src_g;
__shared__ uint8_t* volatile dst_g;
__shared__ uint32_t volatile copy_len_g;
uint32_t t = threadIdx.x;
uint32_t z = blockIdx.x;
uint8_t const* src;
uint8_t* dst;
uint32_t len, src_align_bytes, src_align_bits, dst_align_bytes;
if (!t) {
src = inputs[z].data();
dst = outputs[z].data();
len = static_cast<uint32_t>(min(inputs[z].size(), outputs[z].size()));
src_g = src;
dst_g = dst;
copy_len_g = len;
}
__syncthreads();
src = src_g;
dst = dst_g;
len = copy_len_g;
// Align output to 32-bit
dst_align_bytes = 3 & -reinterpret_cast<intptr_t>(dst);
if (dst_align_bytes != 0) {
uint32_t align_len = min(dst_align_bytes, len);
if (t < align_len) { dst[t] = src[t]; }
src += align_len;
dst += align_len;
len -= align_len;
}
src_align_bytes = (uint32_t)(3 & reinterpret_cast<uintptr_t>(src));
src_align_bits = src_align_bytes << 3;
while (len >= 32) {
auto const* src32 = reinterpret_cast<uint32_t const*>(src - src_align_bytes);
uint32_t copy_cnt = min(len >> 2, 1024);
if (t < copy_cnt) {
uint32_t v = src32[t];
if (src_align_bits != 0) { v = __funnelshift_r(v, src32[t + 1], src_align_bits); }
reinterpret_cast<uint32_t*>(dst)[t] = v;
}
src += copy_cnt * 4;
dst += copy_cnt * 4;
len -= copy_cnt * 4;
}
if (t < len) { dst[t] = src[t]; }
}
void gpuinflate(device_span<device_span<uint8_t const> const> inputs,
device_span<device_span<uint8_t> const> outputs,
device_span<compression_result> results,
gzip_header_included parse_hdr,
rmm::cuda_stream_view stream)
{
constexpr int block_size = 128; // Threads per block
if (inputs.size() > 0) {
hipLaunchKernelGGL(( inflate_kernel<block_size>)
, dim3(inputs.size()), dim3(block_size), 0, stream.value(), inputs, outputs, results, parse_hdr);
}
}
void gpu_copy_uncompressed_blocks(device_span<device_span<uint8_t const> const> inputs,
device_span<device_span<uint8_t> const> outputs,
rmm::cuda_stream_view stream)
{
if (inputs.size() > 0) {
hipLaunchKernelGGL(( copy_uncompressed_kernel), dim3(inputs.size()), dim3(1024), 0, stream.value(), inputs, outputs);
}
}
} // namespace io
} // namespace cudf
| b555dbfb58a4d426449f1822d3ddfd67ebe9dfad.cu | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** @file gpuinflate.cu
Derived from zlib's contrib/puff.c, original copyright notice below
*/
/*
Copyright (C) 2002-2013 Mark Adler, all rights reserved
version 2.3, 21 Jan 2013
This software is provided 'as-is', without any express or implied
warranty. In no event will the author be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
Mark Adler [email protected]
*/
#include "gpuinflate.hpp"
#include "io_uncomp.hpp"
#include <io/utilities/block_utils.cuh>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace io {
constexpr int max_bits = 15; // maximum bits in a code
constexpr int max_l_codes = 286; // maximum number of literal/length codes
constexpr int max_d_codes = 30; // maximum number of distance codes
constexpr int fix_l_codes = 288; // number of fixed literal/length codes
constexpr int log2_len_lut = 10;
constexpr int log2_dist_lut = 8;
/**
* @brief Intermediate arrays for building huffman tables
*/
struct scratch_arr {
int16_t lengths[max_l_codes + max_d_codes]; ///< descriptor code lengths
int16_t offs[max_bits + 1]; ///< offset in symbol table for each length (scratch)
};
/**
* @brief Huffman LUTs for length and distance codes
*/
struct lut_arr {
int32_t lenlut[1 << log2_len_lut]; ///< LUT for length decoding
int32_t distlut[1 << log2_dist_lut]; ///< LUT for fast distance decoding
};
/// 4 batches of 32 symbols
constexpr int log2_batch_count = 2; // 1..5
constexpr int log2_batch_size = 5;
constexpr int batch_count = (1 << log2_batch_count);
constexpr int batch_size = (1 << log2_batch_size);
/**
* @brief Inter-warp communication queue
*/
struct xwarp_s {
int32_t batch_len[batch_count]; //< Length of each batch - <0:end, 0:not ready, >0:symbol count
union {
uint32_t symqueue[batch_count * batch_size];
uint8_t symqueue8[batch_count * batch_size * 4];
} u;
};
#define ENABLE_PREFETCH 1
#if ENABLE_PREFETCH
constexpr int log2_prefetch_size = 9; // Must be at least LOG2_BATCH_SIZE+3
constexpr int prefetch_size = (1 << log2_prefetch_size);
/// @brief Prefetcher state
struct prefetch_queue_s {
uint8_t const* cur_p; ///< Prefetch location
int run; ///< prefetcher will exit when run=0
uint8_t pref_data[prefetch_size];
};
template <typename T>
inline __device__ volatile uint32_t* prefetch_addr32(volatile prefetch_queue_s& q, T* ptr)
{
return reinterpret_cast<volatile uint32_t*>(&q.pref_data[(prefetch_size - 4) & (size_t)(ptr)]);
}
#endif // ENABLE_PREFETCH
/**
* @brief Inflate decompressor state
*/
struct inflate_state_s {
// output state
uint8_t* out; ///< output buffer
uint8_t* outbase; ///< start of output buffer
uint8_t* outend; ///< end of output buffer
// Input state
uint8_t const* cur; ///< input buffer
uint8_t const* end; ///< end of input buffer
uint2 bitbuf; ///< bit buffer (64-bit)
uint32_t bitpos; ///< position in bit buffer
int32_t err; ///< Error status
int btype; ///< current block type
int blast; ///< last block
uint32_t stored_blk_len; ///< length of stored (uncompressed) block
uint16_t first_slow_len; ///< first code not in fast LUT
uint16_t index_slow_len;
uint16_t first_slow_dist;
uint16_t index_slow_dist;
volatile xwarp_s x;
#if ENABLE_PREFETCH
volatile prefetch_queue_s pref;
#endif
int16_t lencnt[max_bits + 1];
int16_t lensym[fix_l_codes]; // Assumes fix_l_codes >= max_l_codes
int16_t distcnt[max_bits + 1];
int16_t distsym[max_d_codes];
union {
scratch_arr scratch;
lut_arr lut;
} u;
};
inline __device__ unsigned int bfe(unsigned int source,
unsigned int bit_start,
unsigned int num_bits)
{
unsigned int bits;
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(bits) : "r"(source), "r"(bit_start), "r"(num_bits));
return bits;
};
inline __device__ uint32_t showbits(inflate_state_s* s, uint32_t n)
{
uint32_t next32 = __funnelshift_rc(s->bitbuf.x, s->bitbuf.y, s->bitpos);
return (next32 & ((1 << n) - 1));
}
inline __device__ uint32_t nextbits32(inflate_state_s* s)
{
return __funnelshift_rc(s->bitbuf.x, s->bitbuf.y, s->bitpos);
}
inline __device__ void skipbits(inflate_state_s* s, uint32_t n)
{
uint32_t bitpos = s->bitpos + n;
if (bitpos >= 32) {
auto cur = s->cur + 8;
s->bitbuf.x = s->bitbuf.y;
s->bitbuf.y = (cur < s->end) ? *reinterpret_cast<uint32_t const*>(cur) : 0;
s->cur = cur - 4;
bitpos &= 0x1f;
}
s->bitpos = bitpos;
}
// TODO: If we require 4-byte alignment of input bitstream & length (padded), reading bits would
// become quite a bit faster
__device__ uint32_t getbits(inflate_state_s* s, uint32_t n)
{
uint32_t v = showbits(s, n);
skipbits(s, n);
return v;
}
/**
* @brief Decode a code from the stream s using huffman table {symbols,counts}.
* Return the symbol or a negative value if there is an error.
* If all of the lengths are zero, i.e. an empty code, or if the code is
* incomplete and an invalid code is received, then -10 is returned after
* reading max_bits bits.
*
* Format notes:
*
* - The codes as stored in the compressed data are bit-reversed relative to
* a simple integer ordering of codes of the same lengths. Hence below the
* bits are pulled from the compressed data one at a time and used to
* build the code value reversed from what is in the stream in order to
* permit simple integer comparisons for decoding. A table-based decoding
* scheme (as used in zlib) does not need to do this reversal.
*
* - The first code for the shortest length is all zeros. Subsequent codes of
* the same length are simply integer increments of the previous code. When
* moving up a length, a zero bit is appended to the code. For a complete
* code, the last code of the longest length will be all ones.
*
* - Incomplete codes are handled by this decoder, since they are permitted
* in the deflate format. See the format notes for fixed() and dynamic().
*/
__device__ int decode(inflate_state_s* s, int16_t const* counts, int16_t const* symbols)
{
unsigned int len; // current number of bits in code
unsigned int code; // len bits being decoded
unsigned int first; // first code of length len
unsigned int count; // number of codes of length len
uint32_t next32r = __brev(nextbits32(s));
first = 0;
for (len = 1; len <= max_bits; len++) {
code = (next32r >> (32 - len)) - first;
count = counts[len];
if (code < count) // if length len, return symbol
{
skipbits(s, len);
return symbols[code];
}
symbols += count; // else update for next length
first += count;
first <<= 1;
}
return -10; // ran out of codes
}
/**
* @brief Given the list of code lengths length[0..n-1] representing a canonical
* Huffman code for n symbols, construct the tables required to decode those
* codes. Those tables are the number of codes of each length, and the symbols
* sorted by length, retaining their original order within each length. The
* return value is zero for a complete code set, negative for an over-
* subscribed code set, and positive for an incomplete code set. The tables
* can be used if the return value is zero or positive, but they cannot be used
* if the return value is negative. If the return value is zero, it is not
* possible for decode() using that table to return an error--any stream of
* enough bits will resolve to a symbol. If the return value is positive, then
* it is possible for decode() using that table to return an error for received
* codes past the end of the incomplete lengths.
*
* Not used by decode(), but used for error checking, count[0] is the number
* of the n symbols not in the code. So n - count[0] is the number of
* codes. This is useful for checking for incomplete codes that have more than
* one symbol, which is an error in a dynamic block.
*
* Assumption: for all i in 0..n-1, 0 <= length[i] <= max_bits
* This is assured by the construction of the length arrays in dynamic() and
* fixed() and is not verified by construct().
*
* Format notes:
*
* - Permitted and expected examples of incomplete codes are one of the fixed
* codes and any code with a single symbol which in deflate is coded as one
* bit instead of zero bits. See the format notes for fixed() and dynamic().
*
* - Within a given code length, the symbols are kept in ascending order for
* the code bits definition.
*/
__device__ int construct(
inflate_state_s* s, int16_t* counts, int16_t* symbols, int16_t const* length, int n)
{
int symbol; // current symbol when stepping through length[]
int len; // current length when stepping through counts[]
int left; // number of possible codes left of current length
int16_t* offs = s->u.scratch.offs;
// count number of codes of each length
for (len = 0; len <= max_bits; len++)
counts[len] = 0;
for (symbol = 0; symbol < n; symbol++)
(counts[length[symbol]])++; // assumes lengths are within bounds
if (counts[0] == n) // no codes!
return 0; // complete, but decode() will fail
// check for an over-subscribed or incomplete set of lengths
left = 1; // one possible code of zero length
for (len = 1; len <= max_bits; len++) {
left <<= 1; // one more bit, double codes left
left -= counts[len]; // deduct count from possible codes
if (left < 0) return left; // over-subscribed--return negative
} // left > 0 means incomplete
// generate offsets into symbol table for each length for sorting
offs[1] = 0;
for (len = 1; len < max_bits; len++)
offs[len + 1] = offs[len] + counts[len];
// put symbols in table sorted by length, by symbol order within each length
for (symbol = 0; symbol < n; symbol++)
if (length[symbol] != 0) symbols[offs[length[symbol]]++] = symbol;
// return zero for complete set, positive for incomplete set
return left;
}
/// permutation of code length codes
static const __device__ __constant__ uint8_t g_code_order[19 + 1] = {
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15, 0xff};
/// Dynamic block (custom huffman tables)
__device__ int init_dynamic(inflate_state_s* s)
{
int nlen, ndist, ncode; /* number of lengths in descriptor */
int index; /* index of lengths[] */
int err; /* construct() return value */
int16_t* lengths = s->u.scratch.lengths;
// get number of lengths in each table, check lengths
nlen = getbits(s, 5) + 257;
ndist = getbits(s, 5) + 1;
ncode = getbits(s, 4) + 4;
if (nlen > max_l_codes || ndist > max_d_codes) {
return -3; // bad counts
}
// read code length code lengths (really), missing lengths are zero
for (index = 0; index < ncode; index++)
lengths[g_code_order[index]] = getbits(s, 3);
for (; index < 19; index++)
lengths[g_code_order[index]] = 0;
// build huffman table for code lengths codes (use lencode temporarily)
err = construct(s, s->lencnt, s->lensym, lengths, 19);
if (err != 0) // require complete code set here
return -4;
// read length/literal and distance code length tables
index = 0;
while (index < nlen + ndist) {
int symbol = decode(s, s->lencnt, s->lensym);
if (symbol < 0) return symbol; // invalid symbol
if (symbol < 16) // length in 0..15
lengths[index++] = symbol;
else { // repeat instruction
int len = 0; // last length to repeat, assume repeating zeros
if (symbol == 16) { // repeat last length 3..6 times
if (index == 0) return -5; // no last length!
len = lengths[index - 1]; // last length
symbol = 3 + getbits(s, 2);
} else if (symbol == 17) // repeat zero 3..10 times
symbol = 3 + getbits(s, 3);
else // == 18, repeat zero 11..138 times
symbol = 11 + getbits(s, 7);
if (index + symbol > nlen + ndist) return -6; // too many lengths!
while (symbol--) // repeat last or zero symbol times
lengths[index++] = len;
}
}
// check for end-of-block code -- there better be one!
if (lengths[256] == 0) return -9;
// build huffman table for literal/length codes
err = construct(s, s->lencnt, s->lensym, lengths, nlen);
if (err && (err < 0 || nlen != s->lencnt[0] + s->lencnt[1]))
return -7; // incomplete code ok only for single length 1 code
// build huffman table for distance codes
err = construct(s, s->distcnt, s->distsym, &lengths[nlen], ndist);
if (err && (err < 0 || ndist != s->distcnt[0] + s->distcnt[1]))
return -8; // incomplete code ok only for single length 1 code
return 0;
}
/**
* @brief Initializes a fixed codes block.
*
* Format notes:
*
* - This block type can be useful for compressing small amounts of data for
* which the size of the code descriptions in a dynamic block exceeds the
* benefit of custom codes for that block. For fixed codes, no bits are
* spent on code descriptions. Instead the code lengths for literal/length
* codes and distance codes are fixed. The specific lengths for each symbol
* can be seen in the "for" loops below.
*
* - The literal/length code is complete, but has two symbols that are invalid
* and should result in an error if received. This cannot be implemented
* simply as an incomplete code since those two symbols are in the "middle"
* of the code. They are eight bits long and the longest literal/length\
* code is nine bits. Therefore the code must be constructed with those
* symbols, and the invalid symbols must be detected after decoding.
*
* - The fixed distance codes also have two invalid symbols that should result
* in an error if received. Since all of the distance codes are the same
* length, this can be implemented as an incomplete code. Then the invalid
* codes are detected while decoding.
*/
__device__ int init_fixed(inflate_state_s* s)
{
int16_t* lengths = s->u.scratch.lengths;
int symbol;
// literal/length table
for (symbol = 0; symbol < 144; symbol++)
lengths[symbol] = 8;
for (; symbol < 256; symbol++)
lengths[symbol] = 9;
for (; symbol < 280; symbol++)
lengths[symbol] = 7;
for (; symbol < fix_l_codes; symbol++)
lengths[symbol] = 8;
construct(s, s->lencnt, s->lensym, lengths, fix_l_codes);
// distance table
for (symbol = 0; symbol < max_d_codes; symbol++)
lengths[symbol] = 5;
// build huffman table for distance codes
construct(s, s->distcnt, s->distsym, lengths, max_d_codes);
return 0;
}
/**
* @brief Decode literal/length and distance codes until an end-of-block code.
*
* Format notes:
*
* - Compressed data that is after the block type if fixed or after the code
* description if dynamic is a combination of literals and length/distance
* pairs terminated by and end-of-block code. Literals are simply Huffman
* coded bytes. A length/distance pair is a coded length followed by a
* coded distance to represent a string that occurs earlier in the
* uncompressed data that occurs again at the current location.
*
* - Literals, lengths, and the end-of-block code are combined into a single
* code of up to 286 symbols. They are 256 literals (0..255), 29 length
* symbols (257..285), and the end-of-block symbol (256).
*
* - There are 256 possible lengths (3..258), and so 29 symbols are not enough
* to represent all of those. Lengths 3..10 and 258 are in fact represented
* by just a length symbol. Lengths 11..257 are represented as a symbol and
* some number of extra bits that are added as an integer to the base length
* of the length symbol. The number of extra bits is determined by the base
* length symbol. These are in the static arrays below, lens[] for the base
* lengths and lext[] for the corresponding number of extra bits.
*
* - The reason that 258 gets its own symbol is that the longest length is used
* often in highly redundant files. Note that 258 can also be coded as the
* base value 227 plus the maximum extra value of 31. While a good deflate
* should never do this, it is not an error, and should be decoded properly.
*
* - If a length is decoded, including its extra bits if any, then it is
* followed a distance code. There are up to 30 distance symbols. Again
* there are many more possible distances (1..32768), so extra bits are added
* to a base value represented by the symbol. The distances 1..4 get their
* own symbol, but the rest require extra bits. The base distances and
* corresponding number of extra bits are below in the static arrays dist[]
* and dext[].
*
* - Literal bytes are simply written to the output. A length/distance pair is
* an instruction to copy previously uncompressed bytes to the output. The
* copy is from distance bytes back in the output stream, copying for length
* bytes.
*
* - Distances pointing before the beginning of the output data are not
* permitted.
*
* - Overlapped copies, where the length is greater than the distance, are
* allowed and common. For example, a distance of one and a length of 258
* simply copies the last byte 258 times. A distance of four and a length of
* twelve copies the last four bytes three times. A simple forward copy
* ignoring whether the length is greater than the distance or not implements
* this correctly. You should not use memcpy() since its behavior is not
* defined for overlapped arrays. You should not use memmove() or bcopy()
* since though their behavior -is- defined for overlapping arrays, it is
* defined to do the wrong thing in this case.
*/
/// permutation of code length codes
static const __device__ __constant__ uint16_t g_lens[29] = { // Size base for length codes 257..285
3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27,
31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258};
static const __device__ __constant__ uint16_t
g_lext[29] = { // Extra bits for length codes 257..285
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0};
static const __device__ __constant__ uint16_t
g_dists[30] = { // Offset base for distance codes 0..29
1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129,
193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577};
static const __device__ __constant__ uint16_t g_dext[30] = { // Extra bits for distance codes 0..29
0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13};
/// @brief Thread 0 only: decode bitstreams and output symbols into the symbol queue
__device__ void decode_symbols(inflate_state_s* s)
{
uint32_t bitpos = s->bitpos;
uint2 bitbuf = s->bitbuf;
auto cur = s->cur;
auto end = s->end;
int32_t batch = 0;
int32_t sym, batch_len;
do {
volatile uint32_t* b = &s->x.u.symqueue[batch * batch_size];
// Wait for the next batch entry to be empty
#if ENABLE_PREFETCH
// Wait for prefetcher to fetch a worst-case of 48 bits per symbol
while ((*(volatile int32_t*)&s->pref.cur_p - (int32_t)(size_t)cur < batch_size * 6) ||
(s->x.batch_len[batch] != 0)) {}
#else
while (s->x.batch_len[batch] != 0) {}
#endif
batch_len = 0;
#if ENABLE_PREFETCH
if (cur + (bitpos >> 3) >= end) {
s->err = 1;
break;
}
#endif
// Inner loop decoding symbols
do {
uint32_t next32 = __funnelshift_rc(bitbuf.x, bitbuf.y, bitpos); // nextbits32(s);
uint32_t len;
sym = s->u.lut.lenlut[next32 & ((1 << log2_len_lut) - 1)];
if ((uint32_t)sym < (uint32_t)(0x100 << 5)) {
// We can lookup a second symbol if this was a short literal
len = sym & 0x1f;
sym >>= 5;
b[batch_len++] = sym;
next32 >>= len;
bitpos += len;
sym = s->u.lut.lenlut[next32 & ((1 << log2_len_lut) - 1)];
}
if (sym > 0) // short symbol
{
len = sym & 0x1f;
sym = ((sym >> 5) & 0x3ff) + ((next32 >> (sym >> 24)) & ((sym >> 16) & 0x1f));
} else {
// Slow length path
uint32_t next32r = __brev(next32);
int16_t const* symbols = &s->lensym[s->index_slow_len];
unsigned int first = s->first_slow_len;
int lext;
#pragma unroll 1
for (len = log2_len_lut + 1; len <= max_bits; len++) {
unsigned int code = (next32r >> (32 - len)) - first;
unsigned int count = s->lencnt[len];
if (code < count) // if length len, return symbol
{
sym = symbols[code];
break;
}
symbols += count; // else update for next length
first += count;
first <<= 1;
}
if (len > max_bits) {
s->err = -10;
sym = 256;
len = 0;
}
if (sym > 256) {
sym -= 257;
lext = g_lext[sym];
sym = 256 + g_lens[sym] + bfe(next32, len, lext);
len += lext;
}
}
if (sym > 256) {
int dist, dext;
// skipbits(s, len) inlined - no limit check
bitpos += len;
if (bitpos >= 32) {
bitbuf.x = bitbuf.y;
#if ENABLE_PREFETCH
bitbuf.y = *prefetch_addr32(s->pref, cur + 8);
cur += 4;
#else
cur += 8;
bitbuf.y = (cur < end) ? *(uint32_t const*)cur : 0;
cur -= 4;
#endif
bitpos &= 0x1f;
}
// get distance
next32 = __funnelshift_rc(bitbuf.x, bitbuf.y, bitpos); // nextbits32(s);
dist = s->u.lut.distlut[next32 & ((1 << log2_dist_lut) - 1)];
if (dist > 0) {
len = dist & 0x1f;
dext = bfe(dist, 20, 5);
dist = bfe(dist, 5, 15);
sym |= (dist + bfe(next32, len, dext)) << 16;
len += dext;
} else {
uint32_t next32r = __brev(next32);
int16_t const* symbols = &s->distsym[s->index_slow_dist];
unsigned int first = s->first_slow_dist;
#pragma unroll 1
for (len = log2_dist_lut + 1; len <= max_bits; len++) {
unsigned int code = (next32r >> (32 - len)) - first;
unsigned int count = s->distcnt[len];
if (code < count) // if length len, return symbol
{
dist = symbols[code];
break;
}
symbols += count; // else update for next length
first += count;
first <<= 1;
}
if (len > max_bits) {
s->err = -10;
sym = 256;
len = 0;
} else {
dext = g_dext[dist];
sym |= (g_dists[dist] + bfe(next32, len, dext)) << 16;
len += dext;
}
}
}
// skipbits(s, len) inlined with added error check for reading past the end of the input
// buffer
bitpos += len;
if (bitpos >= 32) {
bitbuf.x = bitbuf.y;
#if ENABLE_PREFETCH
bitbuf.y = *prefetch_addr32(s->pref, cur + 8);
cur += 4;
#else
cur += 8;
if (cur < end) {
bitbuf.y = *(uint32_t const*)cur;
cur -= 4;
} else {
bitbuf.y = 0;
cur -= 4;
if (cur > end) {
s->err = 1;
sym = 256;
}
}
#endif
bitpos &= 0x1f;
}
if (sym == 256) break;
b[batch_len++] = sym;
} while (batch_len < batch_size - 1);
s->x.batch_len[batch] = batch_len;
#if ENABLE_PREFETCH
((volatile inflate_state_s*)s)->cur = cur;
#endif
if (batch_len != 0) batch = (batch + 1) & (batch_count - 1);
} while (sym != 256);
while (s->x.batch_len[batch] != 0) {}
s->x.batch_len[batch] = -1;
s->bitbuf = bitbuf;
s->bitpos = bitpos;
#if !ENABLE_PREFETCH
s->cur = cur;
#endif
}
/**
* @brief Build lookup tables for faster decode
* LUT format is symbols*16+length
*/
__device__ void init_length_lut(inflate_state_s* s, int t)
{
int32_t* lut = s->u.lut.lenlut;
for (uint32_t bits = t; bits < (1 << log2_len_lut); bits += blockDim.x) {
int16_t const* cnt = s->lencnt;
int16_t const* symbols = s->lensym;
int sym = -10 << 5;
unsigned int first = 0;
unsigned int rbits = __brev(bits) >> (32 - log2_len_lut);
for (unsigned int len = 1; len <= log2_len_lut; len++) {
unsigned int code = (rbits >> (log2_len_lut - len)) - first;
unsigned int count = cnt[len];
if (code < count) {
sym = symbols[code];
if (sym > 256) {
int lext = g_lext[sym - 257];
sym = (256 + g_lens[sym - 257]) | (((1 << lext) - 1) << (16 - 5)) | (len << (24 - 5));
len += lext;
}
sym = (sym << 5) | len;
break;
}
symbols += count; // else update for next length
first += count;
first <<= 1;
}
lut[bits] = sym;
}
if (!t) {
unsigned int first = 0;
unsigned int index = 0;
int16_t const* cnt = s->lencnt;
for (unsigned int len = 1; len <= log2_len_lut; len++) {
unsigned int count = cnt[len];
index += count;
first += count;
first <<= 1;
}
s->first_slow_len = first;
s->index_slow_len = index;
}
}
/**
* @brief Build lookup tables for faster decode of distance symbol
* LUT format is symbols*16+length
*/
__device__ void init_distance_lut(inflate_state_s* s, int t)
{
int32_t* lut = s->u.lut.distlut;
for (uint32_t bits = t; bits < (1 << log2_dist_lut); bits += blockDim.x) {
int16_t const* cnt = s->distcnt;
int16_t const* symbols = s->distsym;
int sym = 0;
unsigned int first = 0;
unsigned int rbits = __brev(bits) >> (32 - log2_dist_lut);
for (unsigned int len = 1; len <= log2_dist_lut; len++) {
unsigned int code = (rbits >> (log2_dist_lut - len)) - first;
unsigned int count = cnt[len];
if (code < count) {
int dist = symbols[code];
int dext = g_dext[dist];
sym = g_dists[dist] | (dext << 15);
sym = (sym << 5) | len;
break;
}
symbols += count; // else update for next length
first += count;
first <<= 1;
}
lut[bits] = sym;
}
if (!t) {
unsigned int first = 0;
unsigned int index = 0;
int16_t const* cnt = s->distcnt;
for (unsigned int len = 1; len <= log2_dist_lut; len++) {
unsigned int count = cnt[len];
index += count;
first += count;
first <<= 1;
}
s->first_slow_dist = first;
s->index_slow_dist = index;
}
}
/// @brief WARP1: process symbols and output uncompressed stream
__device__ void process_symbols(inflate_state_s* s, int t)
{
uint8_t* out = s->out;
uint8_t const* outend = s->outend;
uint8_t const* outbase = s->outbase;
int batch = 0;
do {
volatile uint32_t* b = &s->x.u.symqueue[batch * batch_size];
int batch_len = 0;
if (t == 0) {
while ((batch_len = s->x.batch_len[batch]) == 0) {}
}
batch_len = shuffle(batch_len);
if (batch_len < 0) { break; }
auto const symt = (t < batch_len) ? b[t] : 256;
auto const lit_mask = ballot(symt >= 256);
auto pos = min((__ffs(lit_mask) - 1) & 0xff, 32);
if (t == 0) { s->x.batch_len[batch] = 0; }
if (t < pos && out + t < outend) { out[t] = symt; }
out += pos;
batch_len -= pos;
while (batch_len > 0) {
int dist, len, symbol;
// Process a non-literal symbol
symbol = shuffle(symt, pos);
len = max((symbol & 0xffff) - 256, 0); // max should be unnecessary, but just in case
dist = symbol >> 16;
for (int i = t; i < len; i += 32) {
uint8_t const* src = out + ((i >= dist) ? (i % dist) : i) - dist;
uint8_t b = (src < outbase) ? 0 : *src;
if (out + i < outend) { out[i] = b; }
}
out += len;
pos++;
batch_len--;
// Process subsequent literals, if any
if (!((lit_mask >> pos) & 1)) {
len = min((__ffs(lit_mask >> pos) - 1) & 0xff, batch_len);
symbol = shuffle(symt, (pos + t) & 0x1f);
if (t < len && out + t < outend) { out[t] = symbol; }
out += len;
pos += len;
batch_len -= len;
}
}
batch = (batch + 1) & (batch_count - 1);
} while (true);
if (t == 0) { s->out = out; }
}
/**
* @brief Initializes a stored block.
*
* Format notes:
*
* - After the two-bit stored block type (00), the stored block length and
* stored bytes are byte-aligned for fast copying. Therefore any leftover
* bits in the byte that has the last bit of the type, as many as seven, are
* discarded. The value of the discarded bits are not defined and should not
* be checked against any expectation.
*
* - The second inverted copy of the stored block length does not have to be
* checked, but it's probably a good idea to do so anyway.
*
* - A stored block can have zero length. This is sometimes used to byte-align
* subsets of the compressed data for random access or partial recovery.
*/
__device__ int init_stored(inflate_state_s* s)
{
uint32_t len, nlen; // length of stored block
// Byte align
if (s->bitpos & 7) { skipbits(s, 8 - (s->bitpos & 7)); }
if (s->cur + (s->bitpos >> 3) >= s->end) {
return 2; // Not enough input
}
// get length and check against its one's complement
len = getbits(s, 16);
nlen = getbits(s, 16);
if (len != (nlen ^ 0xffff)) {
return -2; // didn't match complement!
}
if (s->cur + (s->bitpos >> 3) + len > s->end) {
return 2; // Not enough input
}
s->stored_blk_len = len;
// done with a valid stored block
return 0;
}
/// Copy bytes from stored block to destination
__device__ void copy_stored(inflate_state_s* s, int t)
{
auto len = s->stored_blk_len;
auto cur = s->cur + s->bitpos / 8;
auto out = s->out;
auto outend = s->outend;
auto const slow_bytes = min(len, (int)((16 - reinterpret_cast<size_t>(out)) % 16));
// Slow copy until output is 16B aligned
if (slow_bytes) {
for (int i = t; i < slow_bytes; i += blockDim.x) {
if (out + i < outend) {
out[i] = cur[i]; // Input range has already been validated in init_stored()
}
}
cur += slow_bytes;
out += slow_bytes;
len -= slow_bytes;
}
auto fast_bytes = len;
if (out < outend) { fast_bytes = (int)min((size_t)fast_bytes, (outend - out)); }
fast_bytes &= ~0xf;
auto bitpos = ((int)((size_t)cur % 4)) * 8;
auto cur4 = cur - (bitpos / 8);
if (out < outend) {
// Fast copy 16 bytes at a time
for (int i = t * 16; i < fast_bytes; i += blockDim.x * 16) {
uint4 u;
u.x = *reinterpret_cast<uint32_t const*>(cur4 + i + 0 * 4);
u.y = *reinterpret_cast<uint32_t const*>(cur4 + i + 1 * 4);
u.z = *reinterpret_cast<uint32_t const*>(cur4 + i + 2 * 4);
u.w = *reinterpret_cast<uint32_t const*>(cur4 + i + 3 * 4);
if (bitpos != 0) {
uint32_t v = (bitpos != 0) ? *reinterpret_cast<uint32_t const*>(cur4 + i + 4 * 4) : 0;
u.x = __funnelshift_rc(u.x, u.y, bitpos);
u.y = __funnelshift_rc(u.y, u.z, bitpos);
u.z = __funnelshift_rc(u.z, u.w, bitpos);
u.w = __funnelshift_rc(u.w, v, bitpos);
}
*reinterpret_cast<uint4*>(out + i) = u;
}
}
cur += fast_bytes;
out += fast_bytes;
len -= fast_bytes;
// Slow copy for remaining bytes
for (int i = t; i < len; i += blockDim.x) {
if (out + i < outend) {
out[i] = cur[i]; // Input range has already been validated in init_stored()
}
}
out += len;
__syncthreads();
if (t == 0) {
// Reset bitstream to end of block
auto p = cur + len;
auto prefix_bytes = (uint32_t)(((size_t)p) & 3);
p -= prefix_bytes;
s->cur = p;
s->bitbuf.x = (p < s->end) ? *reinterpret_cast<uint32_t const*>(p) : 0;
p += 4;
s->bitbuf.y = (p < s->end) ? *reinterpret_cast<uint32_t const*>(p) : 0;
s->bitpos = prefix_bytes * 8;
s->out = out;
}
}
#if ENABLE_PREFETCH
__device__ void init_prefetcher(inflate_state_s* s, int t)
{
if (t == 0) {
s->pref.cur_p = s->cur;
s->pref.run = 1;
}
}
__device__ void prefetch_warp(volatile inflate_state_s* s, int t)
{
uint8_t const* cur_p = s->pref.cur_p;
uint8_t const* end = s->end;
while (shuffle((t == 0) ? s->pref.run : 0)) {
auto cur_lo = (int32_t)(size_t)cur_p;
int do_pref =
shuffle((t == 0) ? (cur_lo - *(volatile int32_t*)&s->cur < prefetch_size - 32 * 4 - 4) : 0);
if (do_pref) {
uint8_t const* p = cur_p + 4 * t;
*prefetch_addr32(s->pref, p) = (p < end) ? *reinterpret_cast<uint32_t const*>(p) : 0;
cur_p += 4 * 32;
__threadfence_block();
__syncwarp();
if (!t) {
s->pref.cur_p = cur_p;
__threadfence_block();
}
}
}
}
#endif // ENABLE_PREFETCH
/**
* @brief Parse GZIP header
* See https://tools.ietf.org/html/rfc1952
*/
__device__ int parse_gzip_header(uint8_t const* src, size_t src_size)
{
int hdr_len = -1;
if (src_size >= 18) {
uint32_t sig = (src[0] << 16) | (src[1] << 8) | src[2];
if (sig == 0x1f'8b08) // 24-bit GZIP inflate signature {0x1f, 0x8b, 0x08}
{
uint8_t flags = src[3];
hdr_len = 10;
if (flags & GZIPHeaderFlag::fextra) // Extra fields present
{
int xlen = src[hdr_len] | (src[hdr_len + 1] << 8);
hdr_len += xlen;
if (hdr_len >= src_size) return -1;
}
if (flags & GZIPHeaderFlag::fname) // Original file name present
{
// Skip zero-terminated string
do {
if (hdr_len >= src_size) return -1;
} while (src[hdr_len++] != 0);
}
if (flags & GZIPHeaderFlag::fcomment) // Comment present
{
// Skip zero-terminated string
do {
if (hdr_len >= src_size) return -1;
} while (src[hdr_len++] != 0);
}
if (flags & GZIPHeaderFlag::fhcrc) // Header CRC present
{
hdr_len += 2;
}
if (hdr_len + 8 >= src_size) hdr_len = -1;
}
}
return hdr_len;
}
/**
* @brief INFLATE decompression kernel
*
* blockDim {block_size,1,1}
*
* @tparam block_size Thread block dimension for this call
* @param inputs Source and destination buffer information per block
* @param outputs Destination buffer information per block
* @param results Decompression status buffer per block
* @param parse_hdr If nonzero, indicates that the compressed bitstream includes a GZIP header
*/
template <int block_size>
__global__ void __launch_bounds__(block_size)
inflate_kernel(device_span<device_span<uint8_t const> const> inputs,
device_span<device_span<uint8_t> const> outputs,
device_span<compression_result> results,
gzip_header_included parse_hdr)
{
__shared__ __align__(16) inflate_state_s state_g;
int t = threadIdx.x;
int z = blockIdx.x;
inflate_state_s* state = &state_g;
if (!t) {
auto p = inputs[z].data();
auto src_size = inputs[z].size();
// Parse header if needed
state->err = 0;
if (parse_hdr == gzip_header_included::YES) {
int hdr_len = parse_gzip_header(p, src_size);
src_size = (src_size >= 8) ? src_size - 8 : 0; // ignore footer
if (hdr_len >= 0) {
p += hdr_len;
src_size -= hdr_len;
} else {
state->err = hdr_len;
}
}
// Initialize shared state
state->out = outputs[z].data();
state->outbase = state->out;
state->outend = state->out + outputs[z].size();
state->end = p + src_size;
auto const prefix_bytes = (uint32_t)(((size_t)p) & 3);
p -= prefix_bytes;
state->cur = p;
state->bitbuf.x = (p < state->end) ? *reinterpret_cast<uint32_t const*>(p) : 0;
p += 4;
state->bitbuf.y = (p < state->end) ? *reinterpret_cast<uint32_t const*>(p) : 0;
state->bitpos = prefix_bytes * 8;
}
__syncthreads();
// Main loop decoding blocks
while (!state->err) {
if (!t) {
// Thread0: read last flag, block type and custom huffman tables if any
if (state->cur + (state->bitpos >> 3) >= state->end)
state->err = 2;
else {
state->blast = getbits(state, 1);
state->btype = getbits(state, 2);
if (state->btype == 0)
state->err = init_stored(state);
else if (state->btype == 1)
state->err = init_fixed(state);
else if (state->btype == 2)
state->err = init_dynamic(state);
else
state->err = -1; // Invalid block
}
}
__syncthreads();
if (!state->err && (state->btype == 1 || state->btype == 2)) {
// Initializes lookup tables (block wide)
init_length_lut(state, t);
init_distance_lut(state, t);
#if ENABLE_PREFETCH
// Initialize prefetcher
init_prefetcher(state, t);
#endif
if (t < batch_count) { state->x.batch_len[t] = 0; }
__syncthreads();
// decode data until end-of-block code
if (t < 1 * 32) {
// WARP0: decode variable-length symbols
if (!t) {
// Thread0: decode symbols (single threaded)
decode_symbols(state);
#if ENABLE_PREFETCH
state->pref.run = 0;
#endif
}
} else if (t < 2 * 32) {
// WARP1: perform LZ77 using length and distance codes from WARP0
process_symbols(state, t & 0x1f);
}
#if ENABLE_PREFETCH
else if (t < 3 * 32) {
// WARP2: Prefetcher: prefetch data for WARP0
prefetch_warp(state, t & 0x1f);
}
#endif
// else WARP3: idle
} else if (!state->err && state->btype == 0) {
// Uncompressed block (block-wide memcpy)
copy_stored(state, t);
}
if (state->blast) break;
__syncthreads();
}
__syncthreads();
// Output decompression status and length
if (!t) {
if (state->err == 0 && state->cur + ((state->bitpos + 7) >> 3) > state->end) {
// Read past the end of the input buffer
state->err = 2;
} else if (state->err == 0 && state->out > state->outend) {
// Output buffer too small
state->err = 1;
}
results[z].bytes_written = state->out - state->outbase;
results[z].status = [&]() {
switch (state->err) {
case 0: return compression_status::SUCCESS;
case 1: return compression_status::OUTPUT_OVERFLOW;
default: return compression_status::FAILURE;
}
}();
results[z].reserved = (int)(state->end - state->cur); // Here mainly for debug purposes
}
}
/**
* @brief Copy a group of buffers
*
* blockDim {1024,1,1}
*
* @param inputs Source and destination information per block
*/
__global__ void __launch_bounds__(1024)
copy_uncompressed_kernel(device_span<device_span<uint8_t const> const> inputs,
device_span<device_span<uint8_t> const> outputs)
{
__shared__ uint8_t const* volatile src_g;
__shared__ uint8_t* volatile dst_g;
__shared__ uint32_t volatile copy_len_g;
uint32_t t = threadIdx.x;
uint32_t z = blockIdx.x;
uint8_t const* src;
uint8_t* dst;
uint32_t len, src_align_bytes, src_align_bits, dst_align_bytes;
if (!t) {
src = inputs[z].data();
dst = outputs[z].data();
len = static_cast<uint32_t>(min(inputs[z].size(), outputs[z].size()));
src_g = src;
dst_g = dst;
copy_len_g = len;
}
__syncthreads();
src = src_g;
dst = dst_g;
len = copy_len_g;
// Align output to 32-bit
dst_align_bytes = 3 & -reinterpret_cast<intptr_t>(dst);
if (dst_align_bytes != 0) {
uint32_t align_len = min(dst_align_bytes, len);
if (t < align_len) { dst[t] = src[t]; }
src += align_len;
dst += align_len;
len -= align_len;
}
src_align_bytes = (uint32_t)(3 & reinterpret_cast<uintptr_t>(src));
src_align_bits = src_align_bytes << 3;
while (len >= 32) {
auto const* src32 = reinterpret_cast<uint32_t const*>(src - src_align_bytes);
uint32_t copy_cnt = min(len >> 2, 1024);
if (t < copy_cnt) {
uint32_t v = src32[t];
if (src_align_bits != 0) { v = __funnelshift_r(v, src32[t + 1], src_align_bits); }
reinterpret_cast<uint32_t*>(dst)[t] = v;
}
src += copy_cnt * 4;
dst += copy_cnt * 4;
len -= copy_cnt * 4;
}
if (t < len) { dst[t] = src[t]; }
}
void gpuinflate(device_span<device_span<uint8_t const> const> inputs,
device_span<device_span<uint8_t> const> outputs,
device_span<compression_result> results,
gzip_header_included parse_hdr,
rmm::cuda_stream_view stream)
{
constexpr int block_size = 128; // Threads per block
if (inputs.size() > 0) {
inflate_kernel<block_size>
<<<inputs.size(), block_size, 0, stream.value()>>>(inputs, outputs, results, parse_hdr);
}
}
void gpu_copy_uncompressed_blocks(device_span<device_span<uint8_t const> const> inputs,
device_span<device_span<uint8_t> const> outputs,
rmm::cuda_stream_view stream)
{
if (inputs.size() > 0) {
copy_uncompressed_kernel<<<inputs.size(), 1024, 0, stream.value()>>>(inputs, outputs);
}
}
} // namespace io
} // namespace cudf
|
contraction_csr_sum.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <graph_contracting_visitor.hxx>
namespace nvgraph
{
//------------------------- Graph Contraction: ----------------------
//
CsrGraph<int>* contract_graph_csr_sum(CsrGraph<int>& graph,
int* pV, size_t n,
hipStream_t stream,
const int& VCombine,
const int& VReduce,
const int& ECombine,
const int& EReduce)
{
return contract_from_aggregates_t<int, double, SemiRingFctrSelector<Sum, double>::FctrType >(graph, pV, n, stream,
static_cast<SemiRingFunctorTypes>(VCombine),
static_cast<SemiRingFunctorTypes>(VReduce),
static_cast<SemiRingFunctorTypes>(ECombine),
static_cast<SemiRingFunctorTypes>(EReduce));
}
}
| contraction_csr_sum.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <graph_contracting_visitor.hxx>
namespace nvgraph
{
//------------------------- Graph Contraction: ----------------------
//
CsrGraph<int>* contract_graph_csr_sum(CsrGraph<int>& graph,
int* pV, size_t n,
cudaStream_t stream,
const int& VCombine,
const int& VReduce,
const int& ECombine,
const int& EReduce)
{
return contract_from_aggregates_t<int, double, SemiRingFctrSelector<Sum, double>::FctrType >(graph, pV, n, stream,
static_cast<SemiRingFunctorTypes>(VCombine),
static_cast<SemiRingFunctorTypes>(VReduce),
static_cast<SemiRingFunctorTypes>(ECombine),
static_cast<SemiRingFunctorTypes>(EReduce));
}
}
|
7429bf8f9727adafcc060e91958750b03dee4f60.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cudamat_kernels.cuh"
#include "float.h"
template<int NUM_THREADS>
__device__ void reduceToMax(float* sdata, unsigned int tid){
//Synchronize threads to share shared memory data
__syncthreads();
float mySum = sdata[tid];
// do reduction in shared mem
if (NUM_THREADS >= 512) { if (tid < 256) { sdata[tid] = mySum = fmaxf(mySum, sdata[tid + 256]); } __syncthreads(); }
if (NUM_THREADS >= 256) { if (tid < 128) { sdata[tid] = mySum = fmaxf(mySum, sdata[tid + 128]); } __syncthreads(); }
if (NUM_THREADS >= 128) { if (tid < 64) { sdata[tid] = mySum = fmaxf(mySum, sdata[tid + 64]); } __syncthreads(); }
if (NUM_THREADS == 32){
if (tid < 16)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile float* smem = sdata;
if (NUM_THREADS >= 32) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 16]); }
if (NUM_THREADS >= 16) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 8]); }
if (NUM_THREADS >= 8) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 4]); }
if (NUM_THREADS >= 4) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 2]); }
if (NUM_THREADS >= 2) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 1]); }
}
}
else
{
if (tid < 32)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile float* smem = sdata;
if (NUM_THREADS >= 64) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 32]); }
if (NUM_THREADS >= 32) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 16]); }
if (NUM_THREADS >= 16) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 8]); }
if (NUM_THREADS >= 8) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 4]); }
if (NUM_THREADS >= 4) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 2]); }
if (NUM_THREADS >= 2) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 1]); }
}
}
}
__device__ void reduceToMax32(float* sdata, unsigned int tid) {
//Synchronize threads to share shared memory data
__syncthreads();
float mySum = sdata[tid];
if (tid < 16) {
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile float* smem = sdata;
smem[tid] = mySum = fmaxf(mySum, smem[tid + 16]);
smem[tid] = mySum = fmaxf(mySum, smem[tid + 8]);
smem[tid] = mySum = fmaxf(mySum, smem[tid + 4]);
smem[tid] = mySum = fmaxf(mySum, smem[tid + 2]);
smem[tid] = mySum = fmaxf(mySum, smem[tid + 1]);
}
}
template __device__ void reduceToMax<NUM_VECTOR_OP_THREADS_PER_BLOCK>(float* sdata, unsigned int tid);
template<int NUM_THREADS>
__device__ void reduceToSumLocal(float* sdata, unsigned int tid)
{
//Synchronize threads to share shared memory data
__syncthreads();
float mySum = sdata[tid];
// do reduction in shared mem
if (NUM_THREADS >= 512) { if (tid < 256) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); }
if (NUM_THREADS >= 256) { if (tid < 128) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); }
if (NUM_THREADS >= 128) { if (tid < 64) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); }
if (NUM_THREADS == 32){
if (tid < 16)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile float* smem = sdata;
if (NUM_THREADS >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; }
if (NUM_THREADS >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; }
if (NUM_THREADS >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; }
if (NUM_THREADS >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; }
if (NUM_THREADS >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; }
}
}
else
{
if (tid < 32)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile float* smem = sdata;
if (NUM_THREADS >= 64) { smem[tid] = mySum = mySum + smem[tid + 32]; }
if (NUM_THREADS >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; }
if (NUM_THREADS >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; }
if (NUM_THREADS >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; }
if (NUM_THREADS >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; }
if (NUM_THREADS >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; }
}
}
}
__device__ void reduceToSumLocal32(float* sdata, unsigned int tid) {
//Synchronize threads to share shared memory data
__syncthreads();
float mySum = sdata[tid];
if (tid < 16) {
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile float* smem = sdata;
smem[tid] = mySum = mySum + smem[tid + 16];
smem[tid] = mySum = mySum + smem[tid + 8];
smem[tid] = mySum = mySum + smem[tid + 4];
smem[tid] = mySum = mySum + smem[tid + 2];
smem[tid] = mySum = mySum + smem[tid + 1];
}
}
/*
* tanh is predefined in CUDA.
__device__ inline float tanh(float x) {
return (1.0f - __expf(-x)) / (1.0f + __expf(-x));
}
*/
__device__ inline float relu(float x) {
return ((x > 0) ? x : 0);
}
__device__ inline float deriv_of_relu(float y) {
return ((y > 0) ? 1 : 0);
}
__device__ inline float sigmoid(float x) {
return 1.0f / (1.0f + __expf(-x));
}
__device__ inline float deriv_of_sigmoid(float y) {
return y * (1 - y);
}
__device__ inline float deriv_of_tanh(float y) {
return 1 - y*y;
}
template __device__ void reduceToSumLocal<NUM_VECTOR_OP_THREADS_PER_BLOCK>(float* sdata, unsigned int tid);
/* ------------------------- Random number generation ------------------------- */
__global__ void kSeedRandom(unsigned int* rndMults, unsigned long long* rndWords, unsigned int seed) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// The initial x is the seed and the initial carry is 1
unsigned long long rndWord = ((unsigned long long)seed << 32) + 1;
const unsigned int rndMult = rndMults[idx];
/*
* Run the chain for a few steps so that all the streams have a chance
* to differentiate. They start out generating similar random numbers
* because all the multipliers are similar.
*/
for(unsigned int i = 0; i < NUM_RND_BURNIN; i++) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
}
rndWords[idx] = rndWord;
}
__global__ void kRandomUniform(unsigned int* rndMults, unsigned long long* rndWords, float* gData, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
gData[i] = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
}
rndWords[idx] = rndWord;
}
__global__ void kRandomGaussian(unsigned int* rndMults, unsigned long long* rndWords, float* gData, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
float rnd1, rnd2, R, T;
for(unsigned int i = idx; i < numElements; i += 2*NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd1 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd2 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
T = 2 * PI * rnd2;
R = sqrtf(-2 * __logf(rnd1));
gData[i] = R * __cosf(T);
if (i + NUM_RND_STREAMS < numElements)
gData[i + NUM_RND_STREAMS] = R * __sinf(T);
}
rndWords[idx] = rndWord;
}
__global__ void kRandomGaussianDropout(unsigned int* rndMults, unsigned long long* rndWords, float* gData, unsigned int numElements, float scale) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
float rnd1, rnd2, R, T;
for(unsigned int i = idx; i < numElements; i += 2*NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd1 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd2 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
T = 2 * PI * rnd2;
R = sqrtf(-2 * __logf(rnd1));
gData[i] *= 1 + scale * R * __cosf(T);
if (i + NUM_RND_STREAMS < numElements)
gData[i + NUM_RND_STREAMS] *= 1 + scale * R * __sinf(T);
}
rndWords[idx] = rndWord;
}
__global__ void kRandomDropout(unsigned int* rndMults, unsigned long long* rndWords, float* gData, unsigned int numElements, float dropprob, float val, float scale) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
gData[i] = (((__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f) > dropprob) ? (scale * gData[i]) : val;
}
rndWords[idx] = rndWord;
}
__global__ void kSampleBernoulli(unsigned int* rndMults, unsigned long long* rndWords, float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
target[i] = ((__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f) < gData[i] ? 1:0;
}
rndWords[idx] = rndWord;
}
__global__ void kSampleBernoulliTanh(unsigned int* rndMults, unsigned long long* rndWords, float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
target[i] = ((__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f) < (1.0 + gData[i]) / 2.0 ? 1:0;
}
rndWords[idx] = rndWord;
}
__global__ void kSamplePoisson(unsigned int* rndMults, unsigned long long* rndWords, float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
target[i] = gData[i];
}
rndWords[idx] = rndWord;
}
__global__ void kSampleGaussian(unsigned int* rndMults, unsigned long long* rndWords, float* gData, float* target, unsigned int numElements, float mult) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
float rnd1, rnd2, R, T;
for(unsigned int i = idx; i < numElements; i += 2*NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd1 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd2 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
T = 2 * PI * rnd2;
R = sqrtf(-2 * __logf(rnd1));
target[i] = gData[i] + mult * R * __cosf(T);
if (i + NUM_RND_STREAMS < numElements)
target[i + NUM_RND_STREAMS] = gData[i + NUM_RND_STREAMS] + mult * R * __sinf(T);
}
rndWords[idx] = rndWord;
}
__global__ void kPerturbEnergy(unsigned int* rndMults, unsigned long long* rndWords, float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
float rnd;
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
target[i] = gData[i] - __logf( - __logf(rnd));
}
rndWords[idx] = rndWord;
}
__global__ void kPerturbProb(unsigned int* rndMults, unsigned long long* rndWords, float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
float rnd;
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
target[i] = - gData[i] / __logf(rnd);
}
rndWords[idx] = rndWord;
}
/* ------------------------- Data copying ------------------------- */
/*
Copy row slice from source to target. There is a block for every 32x32 chunk being copied.
*/
__global__ void kGetRowSlice(float* source, float* target, int start, int end, int width, int height) {
const int row = start + blockIdx.x * 32 + threadIdx.x;
const int start_col = blockIdx.y * 32;
const int end_col = (start_col + 32 < width) ? start_col + 32: width;
const int target_height = end - start;
if (row < end) {
for (int cur_col = start_col; cur_col < end_col; cur_col++)
target[cur_col * target_height + row - start] = source[cur_col * height + row];
}
}
__global__ void kSetRowSlice(float* source, float* target, int start, int end, int width, int height) {
const int row = start + blockIdx.x * 32 + threadIdx.x;
const int start_col = blockIdx.y * 32;
const int end_col = (start_col + 32 < width) ? start_col + 32: width;
const int source_height = end - start;
if (row < end) {
for (int cur_col = start_col; cur_col < end_col; cur_col++)
target[cur_col * height + row] = source[cur_col * source_height + row - start];
//source[cur_col * height + row - start] = target[cur_col * target_height + row];
}
}
__global__ void kTranspose(float *odata, float *idata, int width, int height) {
__shared__ float block[COPY_BLOCK_SIZE][COPY_BLOCK_SIZE+1];
// read the matrix tile into shared memory
unsigned int xIndex = blockIdx.x * COPY_BLOCK_SIZE + threadIdx.x;
unsigned int yIndex = blockIdx.y * COPY_BLOCK_SIZE + threadIdx.y;
if((xIndex < width) && (yIndex < height)) {
unsigned int index_in = yIndex * width + xIndex;
block[threadIdx.y][threadIdx.x] = idata[index_in];
}
__syncthreads();
// write the transposed matrix tile to global memory
xIndex = blockIdx.y * COPY_BLOCK_SIZE + threadIdx.x;
yIndex = blockIdx.x * COPY_BLOCK_SIZE + threadIdx.y;
if((xIndex < height) && (yIndex < width)) {
unsigned int index_out = yIndex * height + xIndex;
odata[index_out] = block[threadIdx.x][threadIdx.y];
}
}
__global__ void kTransposeBig(float *odata, float *idata, int height, int width) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
int r, c;
for (unsigned int i = idx; i < width * height; i += numThreads) {
r = i % width;
c = i / width;
odata[i] = idata[height * r + c];
}
}
/* ------------------------- Mathematical operations ------------------------- */
__global__ void kLessThan(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat1[i] < mat2[i];
}
__global__ void kLessThanEq(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat1[i] <= mat2[i];
}
__global__ void kLessThanScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat[i] < val;
}
__global__ void kLessThanEqScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat[i] <= val;
}
__global__ void kGreaterThan(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat1[i] > mat2[i];
}
__global__ void kGreaterThanEq(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat1[i] >= mat2[i];
}
__global__ void kGreaterThanScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat[i] > val;
}
__global__ void kGreaterThanEqScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat[i] >= val;
}
__global__ void kUpperBound(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat1[i] > mat2[i] ? mat2[i] : mat1[i];
}
__global__ void kLowerBound(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat1[i] < mat2[i] ? mat2[i] : mat1[i];
}
__global__ void kUpperBoundScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat[i] > val ? val:mat[i];
}
__global__ void kLowerBoundScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat[i] < val ? val:mat[i];
}
__global__ void kUpperBoundModScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat[i] > val ? val : (mat[i] < -val ? -val : mat[i]);
}
__global__ void kSparseDot(int m, int n, int k, float *data, int* indptr, int* indices, float *dense_data, float* target, float beta, float alpha) {
const unsigned int row = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row < m && col < n) {
const int start = indptr[row];
const int end = indptr[row + 1];
float sum = 0.f;
for (int i = start; i < end; i++) {
sum += data[i] * dense_data[col * k + indices[i]];
}
const int pos = col * m + row;
target[pos] = alpha * sum + ((beta == 0) ? 0 : beta * target[pos]);
}
}
__global__ void kSign(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat[i] ? copysignf(1., mat[i]) : 0;
}
__global__ void kApplySin(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = __sinf(mat[i]);
}
__global__ void kApplyCos(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = __cosf(mat[i]);
}
__global__ void kApplySigmoid(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = sigmoid(mat[i]);
}
__global__ void kApplyTanh(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
float mat_i, exp2x;
for (unsigned int i = idx; i < len; i += numThreads) {
mat_i = mat[i];
exp2x = __expf(2 * mat_i);
target[i] = 1 - 2 / (exp2x + 1);
}
}
__global__ void kApplyAbs(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat[i] * ((mat[i] > 0) - (mat[i] < 0));
}
__global__ void kApplyLog1PlusExp(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
float mat_i;
for (unsigned int i = idx; i < len; i += numThreads) {
mat_i = mat[i];
if (mat_i > 0)
target[i] = (__logf(1 + __expf(-mat_i)) + mat_i);
else
target[i] = __logf(1 + __expf(mat_i));
}
}
__global__ void kLog(float* mat, float* target, unsigned int len, float tiny) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = __logf(mat[i] + tiny);
}
__global__ void kSquashRelu(float* mat, float* target, unsigned int len, float lambda) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = 2 / (1 + __expf(-lambda * mat[i])) - 1;
}
__global__ void kExp(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = __expf(mat[i]);
}
__global__ void kCeil(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = ceil(mat[i]);
}
__global__ void kFloor(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = floor(mat[i]);
}
__global__ void kSqrt(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = sqrt(mat[i]);
}
__global__ void kPow(float* mat, float pow, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = powf(mat[i], pow);
}
__global__ void kPowMatrix(float* mat, float* pow, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = powf(mat[i], pow[i]);
}
__global__ void kCrossEntropy(float* mat, float* p, float* target, unsigned int len, float tiny) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = -mat[i] * __logf(p[i] + tiny);
}
__global__ void kCrossEntropyBernoulli(float* mat, float* p, float* target, unsigned int len, float tiny) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads)
target[i] = -mat[i] * __logf(p[i] + tiny) - (1 - mat[i]) * __logf(1 - p[i] + tiny);
}
__global__ void kCorrectPreds(float* mat, float* p, float* target, unsigned int len, float cutoff) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads)
target[i] = mat[i] * (p[i] >= cutoff) + (1 - mat[i]) * (p[i] < cutoff);
}
__global__ void kReciprocal(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = 1. / mat[i];
}
__global__ void kAddColVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] + vec[i % height];
}
}
__global__ void kAddDiagonalScalar(float* mat, float val, float* tgtMat, unsigned int width) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width; i += numThreads) {
tgtMat[width*i + i] = mat[width*i + i] + val;
}
}
__global__ void kAddDiagonal(float* mat, float* vec, float* tgtMat, unsigned int width) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width; i += numThreads) {
tgtMat[width*i + i] = mat[width*i + i] + vec[i];
}
}
__global__ void kMultDiagonalScalar(float* mat, float val, float* tgtMat, unsigned int width) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width; i += numThreads) {
tgtMat[width*i + i] = mat[width*i + i] * val;
}
}
__global__ void kMultDiagonal(float* mat, float* vec, float* tgtMat, unsigned int width) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width; i += numThreads) {
tgtMat[width*i + i] = mat[width*i + i] * vec[i];
}
}
__global__ void kAddRowVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] + vec[i / height];
}
}
__global__ void kAddColMult(float* mat, float* vec, float* tgtMat, float mult, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] + mult * vec[i % height];
}
}
__global__ void kAddToEachPixel(float* mat1, float* mat2, float* tgtMat, float mult, unsigned int width, unsigned int height, unsigned int num_pix) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat1[i] + mult * mat2[i % height + height * (i / (height * num_pix))];
}
}
__global__ void kAddRowMult(float* mat, float* vec, float* tgtMat, float mult, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] + mult * vec[i / height];
}
}
__global__ void kMultByColVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] * vec[i % height];
}
}
__global__ void kDivByRowVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] / vec[i / height];
}
}
__global__ void kDivByColVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] / vec[i % height];
}
}
__global__ void kMultByRowVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] * vec[i / height];
}
}
__global__ void kAddMultSign(float* a, float* b, unsigned int numEls, float mult) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
a[i] = a[i] + ((b[i] > 0) ? mult : ((b[i] < 0) ? -mult : 0));
}
}
__global__ void kAdd(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] + b[i];
}
}
__global__ void kSubtract(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] - b[i];
}
}
__global__ void kDivide(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] / b[i];
}
}
__global__ void kMult(float* a, float* b, float* dest, unsigned int numEls, float scale_targets) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
if (scale_targets == 0) {
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * b[i];
}
} else {
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = scale_targets * dest[i] + a[i] * b[i];
}
}
}
__global__ void kCosDeriv(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = -a[i] * __sinf(b[i]);
}
}
__global__ void kSinDeriv(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * __cosf(b[i]);
}
}
__global__ void kLogisticDeriv(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * b[i] * (1.0 - b[i]);
}
}
// target[i] < 0 means don't care.
__global__ void kLogisticGrad(float* mat, float* targets, float* out_grad, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
out_grad[i] = (targets[i] < 0) ? 0 : (mat[i] - targets[i]);
}
}
__global__ void kLogisticCorrectNormalized(float* mat, float* targets, float* out, unsigned int height, unsigned int width) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < height) {
float correct = 0;
float total = 0;
float p, t;
for (int i = idx; i < width * height; i += height) {
p = mat[i];
t = targets[i];
correct += (t < 0) ? 0 : (((t >= 0.5 && p >= 0.5) || (t < 0.5 && p < 0.5)) ? 1: 0);
total += (t < 0) ? 0 : 1;
__syncthreads();
}
out[idx] = (total > 0) ? (correct / total) : 0;
}
}
__global__ void kTanhDeriv(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * (1.0 + b[i]) * (1.0 - b[i]);
}
}
__global__ void kRectifiedLinearDeriv(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * (b[i] > 0 ? 1 : 0);
}
}
__global__ void kRectifiedLinearSmoothDeriv(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * (1 - __expf(-b[i]));
}
}
__global__ void kMultScalar(float* mat, float alpha, float* dest, unsigned int len, float scale_targets) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
if (scale_targets == 0) {
for (unsigned int i = idx; i < len; i += numThreads) {
dest[i] = alpha * mat[i];
}
} else {
for (unsigned int i = idx; i < len; i += numThreads) {
dest[i] = scale_targets * dest[i] + alpha * mat[i];
}
}
}
__global__ void kAssignScalar(float* dest, float alpha, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
dest[i] = alpha;
}
}
__global__ void kDivideScalar(float* mat, float alpha, float* dest, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
dest[i] = mat[i] / alpha;
}
}
__global__ void kAddScalar(float* a, float alpha, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] + alpha;
}
}
__global__ void kSelectRows(float* source, float* target, float* indices, int nRowIs, int nCols, int nSourceRows){
__shared__ int sourceRowIndices[32];
const int startTargetRowI = blockIdx.x * 32;
const int tid = threadIdx.x;
const int localNRowIs = min(32, nRowIs-startTargetRowI);
// cooperatively load 32 row indices
if (tid < localNRowIs){
sourceRowIndices[tid] = int(indices[startTargetRowI + tid]);
if (sourceRowIndices[tid]<0)
sourceRowIndices[tid] += nSourceRows;
if (sourceRowIndices[tid]<0 || sourceRowIndices[tid]>=nSourceRows)
sourceRowIndices[tid] = -1;
}
__syncthreads();
// copy 32 rows
for (int i=0; i<localNRowIs; i++){
const int targetRowI = startTargetRowI + i, sourceRowI = sourceRowIndices[i];
for (int colI=tid; colI<nCols; colI+=32)
target[targetRowI * nCols + colI] = sourceRowI==-1 ? (1.0/0.0 -1.0/0.0) : source[sourceRowI * nCols + colI];
}
}
__global__ void kSwapColumns(float* source, float* target, float* indices1, float* indices2, int cols, int width, int height){
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
float temp;
unsigned int column, row, source_pos, target_pos;
for (unsigned int i = idx; i < height * cols; i += numThreads) {
column = i / height;
row = i % height;
source_pos = height * (int)indices1[column] + row;
target_pos = height * (int)indices2[column] + row;
temp = source[source_pos];
source[source_pos] = target[target_pos];
target[target_pos] = temp;
}
}
__global__ void kShuffleColumns(float* source, float* target, float* indices, int width, int height){
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
float temp1, temp2;
unsigned int column, row, pos1, pos2;
for (unsigned int i = idx; i < height * ((width+1) / 2); i += numThreads) {
column = 2 * (i / height);
row = i % height;
if (column + 1 >= width) {
pos1 = height * (int)indices[column] + row;
target[pos1] = source[pos1];
} else {
pos1 = height * (int)indices[column] + row;
pos2 = height * (int)indices[column + 1] + row;
temp1 = source[pos1];
temp2 = source[pos2];
target[pos2] = temp1;
target[pos1] = temp2;
}
}
}
__global__ void kSetSelectedRows(float* target, float* source, float* indices, int nRowIs, int nCols, int nTargetRows){
__shared__ int targetRowIndices[32];
const int startSourceRowI = blockIdx.x * 32;
const int tid = threadIdx.x;
const int localNRowIs = min(32, nRowIs-startSourceRowI);
// cooperatively load 32 row indices
if (tid < localNRowIs){
targetRowIndices[tid] = int(indices[startSourceRowI + tid]);
if (targetRowIndices[tid]<0)
targetRowIndices[tid] += nTargetRows;
if (targetRowIndices[tid]<0 || targetRowIndices[tid]>=nTargetRows)
targetRowIndices[tid] = -1;
}
__syncthreads();
// copy 32 rows
for (int i=0; i<localNRowIs; i++){
const int sourceRowI = startSourceRowI + i, targetRowI = targetRowIndices[i];
for (int colI=tid; colI<nCols; colI+=32)
target[targetRowI * nCols + colI] = targetRowI==-1 ? (1.0/0.0 -1.0/0.0) : source[sourceRowI * nCols + colI];
}
}
__global__ void kBlockify(float* source, float* target, int numdims, int blocksize) {
const unsigned int idx = threadIdx.x;
const unsigned int numThreads = blockDim.x;
const int off = blockIdx.x * numdims;
for (unsigned int target_ind = idx; target_ind < numdims; target_ind += numThreads) {
const int block = target_ind / blocksize;
target[off + target_ind] = source[off + block * blocksize];
}
}
__global__ void kGenerateTranslationsBigVarOff(float* source, float* target, float* off_x_arr, float* off_y_arr, int source_w, int target_w, int num_channels) {
const unsigned int idx = threadIdx.x;
const unsigned int numThreads = blockDim.x;
int target_x, target_y;
int pad = (source_w - target_w)/2;
int target_tile_size = target_w * target_w;
int source_tile_size = source_w * source_w;
int off_x = off_x_arr[blockIdx.x];
int off_y = off_y_arr[blockIdx.x];
int target_off = blockIdx.x * target_tile_size;
int source_off = blockIdx.x * source_tile_size + (pad + off_x) * source_w + (pad + off_y);
for (unsigned int target_ind = idx; target_ind < target_tile_size; target_ind += numThreads) {
target_x = target_ind / target_w;
target_y = target_ind - target_x * target_w;
for (unsigned int ch = 0; ch < num_channels; ch += 1) {
target[num_channels*(target_off + target_x * target_w + target_y) + ch] = source[num_channels*(source_off + target_x * source_w + target_y) + ch];
}
}
}
__global__ void kSoftMaxGrad(float* mat, float* labels, float* target, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
target[i] = mat[i] - ((int)labels[i / height] == i % height ? 1 : 0);
}
}
__global__ void kSoftMaxGradRowMajor(float* mat, float* labels, float* target, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width*height; i += numThreads) {
target[i] = mat[i] - ((int)labels[i % height] == i / height ? 1 : 0);
}
}
__global__ void kHingeQuadraticRowMajor(float* mat, float* labels, float* target, unsigned int width, unsigned int height, float margin) {
int image_id = blockIdx.x * blockDim.x + threadIdx.x;
if (image_id < height) {
mat += image_id;
target += image_id;
const int correct_label = (int)labels[image_id];
const float correct_label_score = mat[correct_label * height];
float sum = 0;
for (unsigned int i = 0; i < width; i++) {
float diff = margin + mat[i*height] - correct_label_score;
float grad = (diff > 0) ? diff : 0;
target[i*height] = (i == correct_label) ? 0 : grad;
sum += (i == correct_label) ? 0 : grad;
}
target[correct_label * height] = -sum;
}
}
__global__ void kHingeLinearRowMajor(float* mat, float* labels, float* target, unsigned int width, unsigned int height, float margin) {
int image_id = blockIdx.x * blockDim.x + threadIdx.x;
if (image_id < height) {
mat += image_id;
target += image_id;
const int correct_label = (int)labels[image_id];
const float correct_label_score = mat[correct_label * height];
float sum = 0;
for (unsigned int i = 0; i < width; i++) {
float diff = margin + mat[i*height] - correct_label_score;
float grad = (diff > 0) ? 1 : 0;
target[i*height] = (i == correct_label) ? 0 : grad;
sum += (i == correct_label) ? 0 : grad;
}
target[correct_label * height] = -sum;
}
}
__global__ void kSoftMaxGradCLS(float* mat, int* labels, float* indices, float* target, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width*height; i += numThreads) {
target[i] = mat[i] - (labels[(int)indices[i % height]] == i / height ? 1 : 0);
}
}
__global__ void kSoftMaxCrossEntropy(float* mat, float* labels, float* target, unsigned int width, unsigned int height, float tiny) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width; i += numThreads) {
target[i] = -__logf(mat[height * i + (int)labels[i]] + tiny);
}
}
__global__ void kSoftMaxCrossEntropyRowMajor(float* mat, float* labels, float* target, unsigned int width, unsigned int height, float tiny) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < height; i += numThreads) {
target[i] = -__logf(mat[height * (int)labels[i] + i] + tiny);
}
}
__global__ void kSoftMaxCorrect(float* mat, float* labels, float* target, unsigned int width, unsigned int height) {
__shared__ float max_vals[32];
__shared__ unsigned int max_val_args[32];
float cur_max = -FLT_MAX;
unsigned int cur_argmax = 0;
float val = 0;
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float *cur_data = &mat[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
val = cur_data[i];
if (val > cur_max) {
cur_max = val;
cur_argmax = i;
}
}
max_vals[threadIdx.x] = cur_max;
max_val_args[threadIdx.x] = cur_argmax;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = -FLT_MAX;
cur_argmax = 0;
for (unsigned int i = 0; i < blockDim.x; i++)
if (max_vals[i] > cur_max) {
cur_max = max_vals[i];
cur_argmax = max_val_args[i];
}
target[column] = (cur_argmax == (int)labels[column]) ? 1 : 0;
}
}
}
__global__ void kSoftMaxCorrectRowMajor(float* mat, float* labels, float* target, unsigned int width, unsigned int height) {
__shared__ float max_vals[32];
__shared__ unsigned int max_val_args[32];
float cur_max = -FLT_MAX;
unsigned int cur_argmax = 0;
float val = 0;
const int row = gridDim.x * blockIdx.y + blockIdx.x;
if (row < height) {
float *cur_data = &mat[row] ;
for (unsigned int i = threadIdx.x; i < width; i += blockDim.x) {
val = cur_data[i * height];
if (val > cur_max) {
cur_max = val;
cur_argmax = i;
}
}
max_vals[threadIdx.x] = cur_max;
max_val_args[threadIdx.x] = cur_argmax;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = -FLT_MAX;
cur_argmax = 0;
for (unsigned int i = 0; i < blockDim.x; i++)
if (max_vals[i] > cur_max) {
cur_max = max_vals[i];
cur_argmax = max_val_args[i];
}
target[row] = (cur_argmax == (int)labels[row]) ? 1 : 0;
}
}
}
__global__ void kSoftMaxCorrectCLS(float* mat, int* labels, float* indices, float* target, unsigned int width, unsigned int height) {
__shared__ float max_vals[32];
__shared__ unsigned int max_val_args[32];
float cur_max = -FLT_MAX;
unsigned int cur_argmax = 0;
float val = 0;
const int row = gridDim.x * blockIdx.y + blockIdx.x;
if (row < height) {
float *cur_data = &mat[row] ;
for (unsigned int i = threadIdx.x; i < width; i += blockDim.x) {
val = cur_data[i * height];
if (val > cur_max) {
cur_max = val;
cur_argmax = i;
}
}
max_vals[threadIdx.x] = cur_max;
max_val_args[threadIdx.x] = cur_argmax;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = -FLT_MAX;
cur_argmax = 0;
for (unsigned int i = 0; i < blockDim.x; i++)
if (max_vals[i] > cur_max) {
cur_max = max_vals[i];
cur_argmax = max_val_args[i];
}
target[row] = (cur_argmax == labels[(int)indices[row]]) ? 1 : 0;
}
}
}
__global__ void kSoftMax(float* mat, float* target, unsigned int width, unsigned int height) {
extern __shared__ float max_vals[] ;
float cur_max = -FLT_MAX;
float val = 0;
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float *cur_data = &mat[column * height] ;
max_vals[threadIdx.x]=-FLT_MAX;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
val = cur_data[i];
if (val > cur_max) {
cur_max = val;
}
}
max_vals[threadIdx.x] = cur_max;
reduceToMax32(max_vals, threadIdx.x);
__syncthreads();
cur_max = max_vals[0] ;
__syncthreads();
val = 0;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
val += __expf(cur_data[i]-cur_max);
}
max_vals[threadIdx.x] = val;
reduceToSumLocal32(max_vals, threadIdx.x);
__syncthreads();
float norm = max_vals[0] ;
float *cur_target = &target[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_target[i] = __expf(cur_data[i]-cur_max) / norm ;
}
}
}
__global__ void kSoftMaxOverwrite(float* mat, unsigned int width, unsigned int height) {
extern __shared__ float max_vals[] ;
float cur_max = -FLT_MAX;
float val = 0;
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float *cur_data = &mat[column * height] ;
max_vals[threadIdx.x]=-FLT_MAX;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
val = cur_data[i];
if (val > cur_max) {
cur_max = val;
}
}
max_vals[threadIdx.x] = cur_max;
reduceToMax32(max_vals, threadIdx.x);
__syncthreads();
cur_max = max_vals[0] ;
__syncthreads();
val = 0;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_data[i] = __expf(cur_data[i]-cur_max);
val += cur_data[i];
}
max_vals[threadIdx.x] = val;
reduceToSumLocal32(max_vals, threadIdx.x);
__syncthreads();
float norm = max_vals[0] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_data[i] /= norm;
}
}
}
__global__ void kSoftMaxRowMajor(float* mat, unsigned int width, unsigned int height) {
extern __shared__ float max_vals[] ;
float cur_max = -FLT_MAX;
float val = 0;
const int row = gridDim.x * blockIdx.y + blockIdx.x;
if (row < height) {
float *cur_data = &mat[row] ;
max_vals[threadIdx.x]=-FLT_MAX;
for (unsigned int i = threadIdx.x; i < width; i += blockDim.x) {
val = cur_data[i * height];
if (val > cur_max) {
cur_max = val;
}
}
max_vals[threadIdx.x] = cur_max;
reduceToMax32(max_vals, threadIdx.x);
__syncthreads();
cur_max = max_vals[0] ;
__syncthreads();
val = 0;
for (unsigned int i = threadIdx.x; i < width; i += blockDim.x) {
cur_data[i * height] = __expf(cur_data[i * height]-cur_max);
val += cur_data[i * height];
}
max_vals[threadIdx.x] = val;
reduceToSumLocal32(max_vals, threadIdx.x);
__syncthreads();
float norm = max_vals[0] ;
for (unsigned int i = threadIdx.x; i < width; i += blockDim.x) {
cur_data[i * height] /= norm;
}
}
}
__global__ void kChooseMaxAndAccumulate(float* mat, float* target, unsigned int width, unsigned int height) {
__shared__ float max_vals[32];
__shared__ unsigned int max_val_args[32];
float cur_max = -FLT_MAX;
unsigned int cur_argmax = 0;
float val = 0;
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float *cur_data = &mat[column * height] ;
float *target_data = &target[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
val = cur_data[i];
if (val > cur_max) {
cur_max = val;
cur_argmax = i;
}
}
max_vals[threadIdx.x] = cur_max;
max_val_args[threadIdx.x] = cur_argmax;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = -FLT_MAX;
cur_argmax = 0;
for (unsigned int i = 0; i < blockDim.x; i++)
if (max_vals[i] > cur_max) {
cur_max = max_vals[i];
cur_argmax = max_val_args[i];
}
target_data[cur_argmax] += 1;
}
}
}
__global__ void kChooseMaxColumnwise(float* mat, float* target, unsigned int width, unsigned int height) {
__shared__ float max_vals[32];
__shared__ unsigned int max_val_args[32];
float cur_max = -FLT_MAX;
unsigned int cur_argmax = 0;
float val = 0;
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float *cur_data = &mat[column * height] ;
float *target_data = &target[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
val = cur_data[i];
target[i] = 0;
if (val > cur_max) {
cur_max = val;
cur_argmax = i;
}
}
max_vals[threadIdx.x] = cur_max;
max_val_args[threadIdx.x] = cur_argmax;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = -FLT_MAX;
cur_argmax = 0;
for (unsigned int i = 0; i < blockDim.x; i++)
if (max_vals[i] > cur_max) {
cur_max = max_vals[i];
cur_argmax = max_val_args[i];
}
target_data[cur_argmax] = 1;
}
}
}
__global__ void kMaxColumnwise(float* mat, float* target, unsigned int width, unsigned int height) {
extern __shared__ float max_vals[] ;
float cur_max = -FLT_MAX;
float val = 0;
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float *cur_data = &mat[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
val = cur_data[i];
if (val > cur_max) cur_max = val;
}
max_vals[threadIdx.x] = cur_max;
reduceToMax32(max_vals, threadIdx.x);
__syncthreads();
if (threadIdx.x == 0) target[column] = max_vals[0];
}
}
__global__ void kArgMaxColumnwise(float* mat, float* target, unsigned int width, unsigned int height) {
__shared__ float max_vals[32];
__shared__ unsigned int max_val_args[32];
float cur_max = -FLT_MAX;
unsigned int cur_argmax = 0;
float val = 0;
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float *cur_data = &mat[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
val = cur_data[i];
if (val > cur_max) {
cur_max = val;
cur_argmax = i;
}
}
max_vals[threadIdx.x] = cur_max;
max_val_args[threadIdx.x] = cur_argmax;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = -FLT_MAX;
cur_argmax = 0;
for (unsigned int i = 0; i < blockDim.x; i++)
if (max_vals[i] > cur_max) {
cur_max = max_vals[i];
cur_argmax = max_val_args[i];
}
target[column] = cur_argmax;
}
}
}
__global__ void kSqSumColumnwise(float* mat, float* target, unsigned int width, unsigned int height, float mult, float p) {
extern __shared__ float sum_vals[];
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float cur_sum = 0;
float *cur_data = &mat[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_sum += cur_data[i] * cur_data[i];
}
sum_vals[threadIdx.x] = cur_sum;
reduceToSumLocal32(sum_vals, threadIdx.x);
__syncthreads();
if (threadIdx.x == 0) target[column] = p * target[column] + mult * sum_vals[0];
}
}
__global__ void kSumColumnwise(float* mat, float* target, unsigned int width, unsigned int height, float mult, float p) {
extern __shared__ float sum_vals[];
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float cur_sum = 0;
float *cur_data = &mat[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_sum += cur_data[i];
}
sum_vals[threadIdx.x] = cur_sum;
reduceToSumLocal32(sum_vals, threadIdx.x);
__syncthreads();
if (threadIdx.x == 0) target[column] = p * target[column] + mult * sum_vals[0];
}
}
__global__ void kSumAll(float* mat, float* target, unsigned int len, unsigned int len_per_block, unsigned int left_over) {
extern __shared__ float sum_vals[];
float cur_sum = 0;
int block_id = blockIdx.x;
mat += block_id * len_per_block + (block_id < left_over ? block_id : left_over);
int l = len_per_block + (block_id < left_over ? 1 : 0);
__syncthreads();
for (unsigned int i = threadIdx.x; i < l; i += blockDim.x) {
cur_sum += mat[i];
}
sum_vals[threadIdx.x] = cur_sum;
reduceToSumLocal<NUM_VECTOR_OP_THREADS_PER_BLOCK>(sum_vals, threadIdx.x);
__syncthreads();
if (threadIdx.x == 0) target[block_id] = sum_vals[0];
}
__global__ void kSqSumRowwise(float* mat, float* target, unsigned int width, unsigned int height, float mult, float p) {
extern __shared__ float sum_vals[];
const int row = gridDim.x * blockIdx.y + blockIdx.x;
if (row < height) {
float cur_sum = 0;
float *cur_data = &mat[row] ;
for (unsigned int i = threadIdx.x; i < width; i += blockDim.x) {
cur_sum += cur_data[i * height] * cur_data[i * height];
}
sum_vals[threadIdx.x] = cur_sum;
reduceToSumLocal32(sum_vals, threadIdx.x);
__syncthreads();
if (threadIdx.x == 0) target[row] = p * target[row] + mult * sum_vals[0];
}
}
// Works well when number of rows is large.
__global__ void kSumRowwise(float* mat, float* target, unsigned int width, unsigned int height, float mult, float p) {
extern __shared__ float sum_vals[];
const int row = (gridDim.x * blockIdx.y + blockIdx.x) * blockDim.x + threadIdx.x;
if (row < height) {
float sum = 0;
float *data = mat + row;
for (unsigned int i = 0; i < width; i++) sum += data[i*height];
__syncthreads();
target[row] = p * target[row] + mult * sum;
}
}
__global__ void kNormLimitColumnwise(float* mat, float* target, float norm, unsigned int width, unsigned int height, int constraint) {
extern __shared__ float sum_vals[];
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float cur_sum = 0;
float *cur_data = &mat[column * height] ;
float *target_data = &target[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_sum += cur_data[i] * cur_data[i];
}
sum_vals[threadIdx.x] = cur_sum;
reduceToSumLocal32(sum_vals, threadIdx.x);
__syncthreads();
cur_sum = sqrt(sum_vals[0]);
cur_sum = (constraint == 1 || cur_sum > norm) ? (norm / cur_sum) : 1;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
target_data[i] = cur_data[i] * cur_sum;
}
__syncthreads();
}
}
__global__ void kNormalizeColumnwise(float* mat, float* target, unsigned int width, unsigned int height) {
extern __shared__ float sum_vals[];
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float cur_sum = 0;
float *cur_data = &mat[column * height] ;
float *target_data = &target[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_sum += cur_data[i];
}
sum_vals[threadIdx.x] = cur_sum;
reduceToSumLocal32(sum_vals, threadIdx.x);
__syncthreads();
cur_sum = sum_vals[0] / height;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
target_data[i] = cur_data[i] - cur_sum;
}
__syncthreads();
}
}
__global__ void kNormLimitRowwise(float* mat, float* target, float norm, unsigned int width, unsigned int height, int constraint) {
extern __shared__ float sum_vals[];
const int row = gridDim.x * blockIdx.y + blockIdx.x;
if (row < height) {
float cur_sum = 0;
float *cur_data = &mat[row] ;
float *target_data = &target[row] ;
for (unsigned int i = threadIdx.x; i < width; i += blockDim.x) {
cur_sum += cur_data[i * height] * cur_data[i * height];
}
sum_vals[threadIdx.x] = cur_sum;
reduceToSumLocal32(sum_vals, threadIdx.x);
__syncthreads();
cur_sum = sqrt(sum_vals[0]);
cur_sum = (constraint == 1 || cur_sum > norm) ? (norm / cur_sum) : 1;
for (unsigned int i = threadIdx.x; i < width; i += blockDim.x) {
target_data[i * height] = cur_data[i * height] * cur_sum;
}
__syncthreads();
}
}
__global__ void kExpand(float* source, float* indices, float* target, int height, int width, int target_width){
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < target_width*height; i += numThreads) {
const int pos = height * (int)indices[i / height] + i % height;
target[i] = (pos < height * width)? source[pos] : 1.0/0.0 - 1.0/0.0;
}
}
__global__ void kExpandAndAdd(float* source, float* mat, float* indices, float* target, int width, int height, float mult, int width2){
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width*height; i += numThreads) {
const int pos = height * (int)indices[i / height] + i % height;
target[i] = (pos < height * width2)? source[i] + mult * mat[pos] : 1.0/0.0 - 1.0/0.0;
}
}
__global__ void kAccumulateColumns(float* mat, float* indices, float* target, int mat_width, int target_width, int height, float mult, int avg){
const int row = gridDim.x * blockIdx.y + blockIdx.x;
const int column = threadIdx.x;
if (row < height && column < target_width) {
float cur_sum = 0.0;
unsigned int count = 0;
for (unsigned int i = 0; i < mat_width; i ++) {
count += ((int)indices[i] == column) ? 1 : 0 ;
cur_sum += ((int)indices[i] == column) ? mat[row + i * height] : 0 ;
}
target[row + height * column] = mult * cur_sum / ((avg == 1 && count > 0) ? count : 1);
}
}
__global__ void kExtractPatches(float* images, float* patches, float* indices, float* width_offset, float* height_offset, int num_images, int img_width, int img_height, int patch_width, int patch_height, int num_colors) {
const unsigned long idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned long numThreads = blockDim.x * gridDim.x;
const unsigned long total_pixels = patch_width * patch_height * num_colors * num_images;
unsigned long ind, pos;
unsigned long image_id, dest_row, dest_col, color, source_row, source_col;
for (unsigned long i = idx; i < total_pixels; i += numThreads) {
ind = i;
image_id = ind % num_images; ind /= num_images;
dest_col = ind % patch_width; ind /= patch_width;
dest_row = ind % patch_height; ind /= patch_height;
color = ind % num_colors;
source_row = int(height_offset[image_id]) + dest_row;
source_col = int(width_offset[image_id]) + dest_col;
pos = img_width * img_height * num_colors * (int)indices[image_id] + img_width * img_height * color + img_width * source_row + source_col;
patches[i] = images[pos];
}
}
__global__ void kExtractPatches2(float* images, float* patches, float* width_offset, float* height_offset, float* flip, int num_images, int img_width, int img_height, int patch_width, int patch_height, int num_colors) {
int image_id = blockIdx.z % num_images;
int color = blockIdx.z / num_images;
int dest_col = blockIdx.x * blockDim.x + threadIdx.x;
int dest_row = blockIdx.y * blockDim.y + threadIdx.y;
if (dest_col < patch_width && dest_row < patch_height) {
int source_row = int(height_offset[image_id]) + dest_row;
int source_col = int(width_offset[image_id]) + dest_col;
source_col = (flip[image_id] > 0.5) ? (img_width - source_col - 1) : source_col;
unsigned long dest_index = image_id + num_images * (dest_col + patch_width * (dest_row + patch_height * color));
unsigned long source_index = source_col + img_width * (source_row + img_height * (color + num_colors * image_id));
patches[dest_index] = images[source_index];
}
}
__global__ void kRectifyBoundingBox(
float* boxes, float* width_offset, float* height_offset, float* flip,
int num_images, int patch_width, int patch_height, int num_locs) {
for (int loc_id = blockIdx.x; loc_id < num_locs; loc_id += gridDim.x) {
float *xmin_block = boxes + num_images * loc_id,
*ymin_block = boxes + num_images * (loc_id + num_locs),
*xmax_block = boxes + num_images * (loc_id + num_locs * 2),
*ymax_block = boxes + num_images * (loc_id + num_locs * 3);
for (int image_id = threadIdx.x; image_id < num_images; image_id += blockDim.x) {
float xmin = (flip[image_id] > 0.5) ? (256.0/patch_width - xmax_block[image_id]) : xmin_block[image_id],
xmax = (flip[image_id] > 0.5) ? (256.0/patch_width - xmin_block[image_id]) : xmax_block[image_id],
ymin = ymin_block[image_id],
ymax = ymax_block[image_id],
wo = width_offset[image_id],
ho = height_offset[image_id];
xmin_block[image_id] = xmin - wo / patch_width;
xmax_block[image_id] = xmax - wo / patch_width;
ymin_block[image_id] = ymin - ho / patch_height;
ymax_block[image_id] = ymax - ho / patch_height;
}
}
}
__global__ void kAdagrad(float *history, float *grad, float delta, int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
float curr_norm = history[i] - delta;
history[i] = delta + sqrt(curr_norm * curr_norm + grad[i] * grad[i]);
}
}
__global__ void kRMSProp(float *history, float *grad, float factor, int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
history[i] = sqrt(factor * history[i] * history[i] + (1-factor) * grad[i] * grad[i]);
}
}
__global__ void kBoundingBoxLogisticGrad(
float* mat, int* bbox, int* label, int* seg, float* indices, float* width_offset, float* height_offset,
int size, int width, int height, int depth, float scale_width, float scale_height, float* grad) {
const int color = blockIdx.z;
/*
const int numXBlocksPerImage = DIVUP(width, blockDim.x);
const int image_id = blockIdx.x / numXBlocksPerImage;
const int col = (blockIdx.x % numXBlocksPerImage) * blockDim.x + threadIdx.x;
const int row = blockIdx.y * blockDim.y + threadIdx.y;
*/
const int image_id = threadIdx.x;
const int col = blockIdx.x;
const int row = blockIdx.y;
int num_bboxes = 0, num_bboxes_of_this_depth = 0, num_bboxes_of_this_depth_inside = 0;
if (col < width && row < height && image_id < size && color < depth) {
int src_image_id = (int)indices[image_id];
int src_col = (int)(scale_width * col);
int src_row = (int)(scale_height * row);
int start = seg[src_image_id];
int end = seg[src_image_id + 1];
int x1, y1, x2, y2, l, inside;
for (int box_id = start; box_id < end; box_id++) {
l = label[box_id];
x1 = bbox[box_id << 2] - width_offset[image_id];
y1 = bbox[(box_id << 2) + 1] - height_offset[image_id];
x2 = bbox[(box_id << 2) + 2] - width_offset[image_id];
y2 = bbox[(box_id << 2) + 3] - height_offset[image_id];
inside = (src_col >= x1 && src_col <= x2 && src_row >= y1 && src_row <= y2) ? 1:0;
num_bboxes += inside;
num_bboxes_of_this_depth += (l == color) ? 1: 0;
num_bboxes_of_this_depth_inside += (inside == 1 && l == color) ? 1: 0;
}
}
unsigned long i = image_id + size * (col + width * (row + height * color));
__syncthreads();
if (col < width && row < height && image_id < size && color < depth) {
if (num_bboxes > 0) {
grad[i] = (num_bboxes_of_this_depth_inside > 0) ? (mat[i] - 1) : 0;
} else {
grad[i] = (num_bboxes_of_this_depth > 0) ? mat[i] : 0;
}
}
}
__global__ void kLogisticCorrectBoundingBox(
float* mat, int* bbox, int* label, int* seg, float* indices,
float* width_offset, float* height_offset, int size, int width, int height,
int depth, float scale_width, float scale_height, float* target, float cutoff) {
const int color = blockIdx.z;
const int numXBlocksPerImage = DIVUP(width, blockDim.x);
const int image_id = blockIdx.x / numXBlocksPerImage;
const int col = (blockIdx.x % numXBlocksPerImage) * blockDim.x + threadIdx.x;
const int row = blockIdx.y * blockDim.y + threadIdx.y;
if (col < width && row < height && image_id < size && color < depth) {
int src_image_id = (int)indices[image_id];
int src_col = (int)(scale_width * col);
int src_row = (int)(scale_height * row);
int start = seg[src_image_id];
int end = seg[src_image_id + 1];
int x1, y1, x2, y2, l, inside;
int num_bboxes = 0, num_bboxes_of_this_depth = 0, num_bboxes_of_this_depth_inside = 0;
for (int box_id = start; box_id < end; box_id++) {
l = label[box_id];
x1 = bbox[box_id << 2] - width_offset[image_id];
y1 = bbox[(box_id << 2) + 1] - height_offset[image_id];
x2 = bbox[(box_id << 2) + 2] - width_offset[image_id];
y2 = bbox[(box_id << 2) + 3] - height_offset[image_id];
inside = (src_col >= x1 && src_col <= x2 && src_row >= y1 && src_row <= y2) ? 1:0;
num_bboxes += inside;
num_bboxes_of_this_depth += (l == color) ? 1: 0;
num_bboxes_of_this_depth_inside += (inside == 1 && l == color) ? 1: 0;
}
unsigned long i = image_id + size * (col + width * (row + height * color));
if (num_bboxes > 0) {
target[i] = (num_bboxes_of_this_depth_inside > 0 && mat[i] >= cutoff) ? 1 : 0;
} else {
target[i] = (num_bboxes_of_this_depth > 0 && mat[i] < cutoff) ? 1 : 0;
}
}
}
__global__ void kBoundingBoxSoftMaxGrad(
float* mat, int* bbox, int* label, int* seg, float* indices, float* width_offset, float* height_offset,
int size, int width, int height, int depth, float scale_width, float scale_height, float* grad) {
const unsigned int len = width * height * depth * size;
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
int ind, image_id, source_depth, x1, y1, x2, y2, start,
end, src_image_id, num_bboxes, num_bboxes_of_this_depth, box_id, inside;
float source_x, source_y;
for (unsigned int i = idx; i < len; i += numThreads) {
ind = i;
image_id = ind % size; ind /= size;
source_x = scale_width * (ind % width); ind /= width;
source_y = scale_height * (ind % height); ind /= height;
source_depth = ind % depth;
src_image_id = (int)indices[image_id];
start = seg[src_image_id];
end = seg[src_image_id + 1];
num_bboxes = 0;
num_bboxes_of_this_depth = 0;
for (box_id = start; box_id < end; box_id++) {
x1 = bbox[box_id << 2] - width_offset[image_id];
y1 = bbox[(box_id << 2) + 1] - height_offset[image_id];
x2 = bbox[(box_id << 2) + 2] - width_offset[image_id];
y2 = bbox[(box_id << 2) + 3] - height_offset[image_id];
inside = (source_x >= x1 && source_x <= x2 && source_y >= y1 && source_y <= y2) ? 1:0;
num_bboxes += inside;
num_bboxes_of_this_depth += (inside == 1 && label[box_id] == source_depth) ? 1: 0;
}
grad[i] = mat[i] - ((num_bboxes > 0) ? ((float)num_bboxes_of_this_depth / num_bboxes) : (source_depth == 0 ? 1:0));
}
}
__global__ void kSoftMaxCorrectBoundingBox(
float* mat, int* bbox, int* label, int* seg, float* indices,
float* width_offset, float* height_offset, int size, int width, int height,
int depth, float scale_width, float scale_height, float* target) {
const int row = gridDim.x * blockIdx.y + blockIdx.x;
const int num_pixels = size * width * height;
if (row < num_pixels) {
__shared__ float max_vals[32];
__shared__ unsigned int max_val_args[32];
float cur_max = -FLT_MAX;
unsigned int cur_argmax = 0;
float val = 0;
float *cur_data = &mat[row] ;
for (unsigned int i = threadIdx.x; i < depth; i += blockDim.x) {
val = cur_data[i * num_pixels];
if (val > cur_max) {
cur_max = val;
cur_argmax = i;
}
}
max_vals[threadIdx.x] = cur_max;
max_val_args[threadIdx.x] = cur_argmax;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = -FLT_MAX;
cur_argmax = 0;
for (unsigned int i = 0; i < blockDim.x; i++)
if (max_vals[i] > cur_max) {
cur_max = max_vals[i];
cur_argmax = max_val_args[i];
}
int ind, image_id, src_image_id, x1, y1, x2, y2, start,
end, num_bboxes, correct, box_id, inside;
float source_x, source_y;
ind = row;
image_id = ind % size; ind /= size;
source_x = scale_width * (ind % width); ind /= width;
source_y = scale_height * (ind % height); ind /= height;
src_image_id = (int)indices[image_id];
start = seg[src_image_id];
end = seg[src_image_id + 1];
num_bboxes = 0;
correct = 0;
for (box_id = start; box_id < end; box_id++) {
x1 = bbox[box_id << 2] - width_offset[image_id];
y1 = bbox[(box_id << 2) + 1] - height_offset[image_id];
x2 = bbox[(box_id << 2) + 2] - width_offset[image_id];
y2 = bbox[(box_id << 2) + 3] - height_offset[image_id];
inside = (source_x >= x1 && source_x <= x2 && source_y >= y1 && source_y <= y2) ? 1:0;
num_bboxes += inside;
correct += (inside == 1 && cur_argmax == label[box_id]) ? 1 : 0;
}
target[row] = (num_bboxes > 0) ? ((correct > 0) ? 1 : 0) : ((cur_argmax == 0) ? 1: 0);
}
}
}
__global__ void kLSTMFprop(float *s_in, float* s_out, float* w_diag, float* b, int numcases, int num_lstms, bool init, bool use_relu) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
int numEls = numcases * num_lstms;
if (idx < numEls) {
const unsigned int numThreads = blockDim.x * gridDim.x;
float *h_out = s_out,
*c_out = s_out + numEls,
*i_out = s_out + 2 * numEls,
*f_out = s_out + 3 * numEls,
*a_out = s_out + 4 * numEls,
*o_out = s_out + 5 * numEls;
float *c_in = s_in + 1 * numEls;
float *w_i = w_diag,
*w_f = w_diag + num_lstms,
*w_o = w_diag + 2 * num_lstms;
float *b_i = b,
*b_f = b + num_lstms,
*b_a = b + 2 * num_lstms,
*b_o = b + 3 * num_lstms;
float i, f, a, o, c, h;
for (unsigned int p = idx; p < numEls; p += numThreads) {
int j = p / numcases;
i = i_out[p];
f = f_out[p];
a = a_out[p];
o = o_out[p];
c = init ? 0 : c_in[p];
i = sigmoid(i + c * w_i[j] + b_i[j]);
f = sigmoid(f + c * w_f[j] + b_f[j]);
a = use_relu ? relu(a + b_a[j]) : tanh(a + b_a[j]);
c = c * f + i * a;
o = sigmoid(o + c * w_o[j] + b_o[j]);
h = o * (use_relu ? c : tanh(c)); // relu(c) = c, because c is always +ve here.
__syncthreads();
i_out[p] = i;
f_out[p] = f;
a_out[p] = a;
o_out[p] = o;
c_out[p] = c;
h_out[p] = h;
}
}
}
__global__ void kLSTMBprop(float *s_in, float* s_out, float* d_in, float* d_out, float* w_diag, int numcases, int num_lstms, bool init, bool use_relu) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
int numEls = numcases * num_lstms;
if (idx < numEls) {
const unsigned int numThreads = blockDim.x * gridDim.x;
float *s_c_out = s_out + numEls,
*s_i_out = s_out + 2 * numEls,
*s_f_out = s_out + 3 * numEls,
*s_a_out = s_out + 4 * numEls,
*s_o_out = s_out + 5 * numEls;
float *s_c_in = s_in + 1 * numEls;
float *d_h_out = d_out,
*d_c_out = d_out + numEls,
*d_i_out = d_out + 2 * numEls,
*d_f_out = d_out + 3 * numEls,
*d_a_out = d_out + 4 * numEls,
*d_o_out = d_out + 5 * numEls;
float *d_c_in = d_in + 1 * numEls;
float *w_i = w_diag,
*w_f = w_diag + num_lstms,
*w_o = w_diag + 2 * num_lstms;
float i, f, a, o, c,
grad_i, grad_f, grad_a, grad_o, grad_c, grad_h,
c_old, tanhc;
for (unsigned int p = idx; p < numEls; p += numThreads) {
int j = p / numcases;
grad_h = d_h_out[p];
grad_c = d_c_out[p];
i = s_i_out[p];
f = s_f_out[p];
a = s_a_out[p];
o = s_o_out[p];
c = s_c_out[p];
c_old = init ? 0 : s_c_in[p];
tanhc = use_relu ? c : tanh(c);
grad_o = grad_h * tanhc * deriv_of_sigmoid(o);
grad_c += grad_o * w_o[j] + grad_h * o * (use_relu ? deriv_of_relu(tanhc) : deriv_of_tanh(tanhc));
grad_a = grad_c * i * (use_relu ? deriv_of_relu(a) : deriv_of_tanh(a));
grad_i = grad_c * a * deriv_of_sigmoid(i);
grad_f = grad_c * c_old * deriv_of_sigmoid(f);
grad_c = grad_c * f + grad_f * w_f[j] + grad_i * w_i[j];
__syncthreads();
d_i_out[p] = grad_i;
d_f_out[p] = grad_f;
d_o_out[p] = grad_o;
d_a_out[p] = grad_a;
if (!init) d_c_in[p] = grad_c;
}
}
}
__global__ void kLSTMOutp(float* s_in, float* s_out, float* d_out, float* dw_diag, float* db, int numcases, int num_lstms, bool init) {
extern __shared__ float sum_vals[];
const int lstm_id = gridDim.x * blockIdx.y + blockIdx.x;
if (lstm_id < num_lstms) {
float* d_i = d_out + numcases * (num_lstms * 2 + lstm_id);
float* d_f = d_out + numcases * (num_lstms * 3 + lstm_id);
float* d_a = d_out + numcases * (num_lstms * 4 + lstm_id);
float* d_o = d_out + numcases * (num_lstms * 5 + lstm_id);
float* s_c = s_out + numcases * (num_lstms * 1 + lstm_id);
float* s_c_old = s_in + numcases * (num_lstms * 1 + lstm_id);
float dwi = 0, dwf = 0, dwo = 0, dbi = 0, dbf = 0, dba = 0, dbo = 0;
float c_old, grad_i, grad_f, grad_a, grad_o;
for (unsigned int i = threadIdx.x; i < numcases; i += blockDim.x) {
c_old = init ? 0 : s_c_old[i];
grad_i = d_i[i];
grad_f = d_f[i];
grad_a = d_a[i];
grad_o = d_o[i];
dwi += c_old * grad_i;
dwf += c_old * grad_f;
dwo += s_c[i] * grad_o;
dbi += grad_i;
dbf += grad_f;
dba += grad_a;
dbo += grad_o;
}
sum_vals[threadIdx.x] = dwi;reduceToSumLocal32(sum_vals, threadIdx.x); __syncthreads(); if (threadIdx.x == 0) dw_diag[lstm_id] += sum_vals[0];
sum_vals[threadIdx.x] = dwf;reduceToSumLocal32(sum_vals, threadIdx.x); __syncthreads(); if (threadIdx.x == 0) dw_diag[lstm_id + num_lstms] += sum_vals[0];
sum_vals[threadIdx.x] = dwo;reduceToSumLocal32(sum_vals, threadIdx.x); __syncthreads(); if (threadIdx.x == 0) dw_diag[lstm_id + num_lstms * 2] += sum_vals[0];
sum_vals[threadIdx.x] = dbi;reduceToSumLocal32(sum_vals, threadIdx.x); __syncthreads(); if (threadIdx.x == 0) db[lstm_id] += sum_vals[0];
sum_vals[threadIdx.x] = dbf;reduceToSumLocal32(sum_vals, threadIdx.x); __syncthreads(); if (threadIdx.x == 0) db[lstm_id + num_lstms] += sum_vals[0];
sum_vals[threadIdx.x] = dba;reduceToSumLocal32(sum_vals, threadIdx.x); __syncthreads(); if (threadIdx.x == 0) db[lstm_id + num_lstms * 2] += sum_vals[0];
sum_vals[threadIdx.x] = dbo;reduceToSumLocal32(sum_vals, threadIdx.x); __syncthreads(); if (threadIdx.x == 0) db[lstm_id + num_lstms * 3] += sum_vals[0];
}
}
__global__ void kBNBprop(float* d, float* x, float* gamma, float* mu, float* sigma,
float* target, unsigned int width, unsigned int height, float scale_targets) {
extern __shared__ float sum_vals[];
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float mu_val = mu[column];
float sigma_val = sigma[column];
float gamma_val = gamma[column];
__syncthreads();
float *cur_x = &x[column * height] ;
float *cur_d = &d[column * height] ;
float *cur_target = &target[column * height] ;
float cur_sum = 0, cur_sum2 = 0, val;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_sum += (cur_x[i] - mu_val) * cur_d[i];
}
sum_vals[threadIdx.x] = cur_sum / ((height - 1) * sigma_val * sigma_val);
reduceToSumLocal32(sum_vals, threadIdx.x);
__syncthreads();
cur_sum = sum_vals[0];
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
val = gamma_val * (cur_d[i] - (cur_x[i] - mu_val) * cur_sum) / sigma_val;
cur_sum2 += val;
cur_target[i] = scale_targets * cur_target[i] + val;
}
sum_vals[threadIdx.x] = cur_sum2 / height;
reduceToSumLocal32(sum_vals, threadIdx.x);
cur_sum = sum_vals[0];
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_target[i] -= cur_sum;
}
__syncthreads();
}
}
__global__ void kBNGrad(float* d, float* x, float* mu, float* sigma,
float* dgamma, float* dbeta, unsigned int width, unsigned int height) {
extern __shared__ float sum_vals[];
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float mu_val = mu[column];
float sigma_val = sigma[column];
__syncthreads();
float *cur_x = &x[column * height] ;
float *cur_d = &d[column * height] ;
float z, d, sum_gamma = 0, sum_beta = 0;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
z = (cur_x[i] - mu_val) / sigma_val;
d = cur_d[i];
sum_gamma += z * d;
sum_beta += d;
}
sum_vals[threadIdx.x] = sum_gamma; reduceToSumLocal32(sum_vals, threadIdx.x); __syncthreads();
if (threadIdx.x == 0) dgamma[column] = sum_vals[0];
__syncthreads();
sum_vals[threadIdx.x] = sum_beta; reduceToSumLocal32(sum_vals, threadIdx.x); __syncthreads();
if (threadIdx.x == 0) dbeta[column] = sum_vals[0];
__syncthreads();
}
}
| 7429bf8f9727adafcc060e91958750b03dee4f60.cu | #include "cudamat_kernels.cuh"
#include "float.h"
template<int NUM_THREADS>
__device__ void reduceToMax(float* sdata, unsigned int tid){
//Synchronize threads to share shared memory data
__syncthreads();
float mySum = sdata[tid];
// do reduction in shared mem
if (NUM_THREADS >= 512) { if (tid < 256) { sdata[tid] = mySum = fmaxf(mySum, sdata[tid + 256]); } __syncthreads(); }
if (NUM_THREADS >= 256) { if (tid < 128) { sdata[tid] = mySum = fmaxf(mySum, sdata[tid + 128]); } __syncthreads(); }
if (NUM_THREADS >= 128) { if (tid < 64) { sdata[tid] = mySum = fmaxf(mySum, sdata[tid + 64]); } __syncthreads(); }
if (NUM_THREADS == 32){
if (tid < 16)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile float* smem = sdata;
if (NUM_THREADS >= 32) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 16]); }
if (NUM_THREADS >= 16) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 8]); }
if (NUM_THREADS >= 8) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 4]); }
if (NUM_THREADS >= 4) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 2]); }
if (NUM_THREADS >= 2) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 1]); }
}
}
else
{
if (tid < 32)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile float* smem = sdata;
if (NUM_THREADS >= 64) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 32]); }
if (NUM_THREADS >= 32) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 16]); }
if (NUM_THREADS >= 16) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 8]); }
if (NUM_THREADS >= 8) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 4]); }
if (NUM_THREADS >= 4) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 2]); }
if (NUM_THREADS >= 2) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 1]); }
}
}
}
__device__ void reduceToMax32(float* sdata, unsigned int tid) {
//Synchronize threads to share shared memory data
__syncthreads();
float mySum = sdata[tid];
if (tid < 16) {
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile float* smem = sdata;
smem[tid] = mySum = fmaxf(mySum, smem[tid + 16]);
smem[tid] = mySum = fmaxf(mySum, smem[tid + 8]);
smem[tid] = mySum = fmaxf(mySum, smem[tid + 4]);
smem[tid] = mySum = fmaxf(mySum, smem[tid + 2]);
smem[tid] = mySum = fmaxf(mySum, smem[tid + 1]);
}
}
template __device__ void reduceToMax<NUM_VECTOR_OP_THREADS_PER_BLOCK>(float* sdata, unsigned int tid);
template<int NUM_THREADS>
__device__ void reduceToSumLocal(float* sdata, unsigned int tid)
{
//Synchronize threads to share shared memory data
__syncthreads();
float mySum = sdata[tid];
// do reduction in shared mem
if (NUM_THREADS >= 512) { if (tid < 256) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); }
if (NUM_THREADS >= 256) { if (tid < 128) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); }
if (NUM_THREADS >= 128) { if (tid < 64) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); }
if (NUM_THREADS == 32){
if (tid < 16)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile float* smem = sdata;
if (NUM_THREADS >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; }
if (NUM_THREADS >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; }
if (NUM_THREADS >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; }
if (NUM_THREADS >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; }
if (NUM_THREADS >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; }
}
}
else
{
if (tid < 32)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile float* smem = sdata;
if (NUM_THREADS >= 64) { smem[tid] = mySum = mySum + smem[tid + 32]; }
if (NUM_THREADS >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; }
if (NUM_THREADS >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; }
if (NUM_THREADS >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; }
if (NUM_THREADS >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; }
if (NUM_THREADS >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; }
}
}
}
__device__ void reduceToSumLocal32(float* sdata, unsigned int tid) {
//Synchronize threads to share shared memory data
__syncthreads();
float mySum = sdata[tid];
if (tid < 16) {
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile float* smem = sdata;
smem[tid] = mySum = mySum + smem[tid + 16];
smem[tid] = mySum = mySum + smem[tid + 8];
smem[tid] = mySum = mySum + smem[tid + 4];
smem[tid] = mySum = mySum + smem[tid + 2];
smem[tid] = mySum = mySum + smem[tid + 1];
}
}
/*
* tanh is predefined in CUDA.
__device__ inline float tanh(float x) {
return (1.0f - __expf(-x)) / (1.0f + __expf(-x));
}
*/
__device__ inline float relu(float x) {
return ((x > 0) ? x : 0);
}
__device__ inline float deriv_of_relu(float y) {
return ((y > 0) ? 1 : 0);
}
__device__ inline float sigmoid(float x) {
return 1.0f / (1.0f + __expf(-x));
}
__device__ inline float deriv_of_sigmoid(float y) {
return y * (1 - y);
}
__device__ inline float deriv_of_tanh(float y) {
return 1 - y*y;
}
template __device__ void reduceToSumLocal<NUM_VECTOR_OP_THREADS_PER_BLOCK>(float* sdata, unsigned int tid);
/* ------------------------- Random number generation ------------------------- */
__global__ void kSeedRandom(unsigned int* rndMults, unsigned long long* rndWords, unsigned int seed) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// The initial x is the seed and the initial carry is 1
unsigned long long rndWord = ((unsigned long long)seed << 32) + 1;
const unsigned int rndMult = rndMults[idx];
/*
* Run the chain for a few steps so that all the streams have a chance
* to differentiate. They start out generating similar random numbers
* because all the multipliers are similar.
*/
for(unsigned int i = 0; i < NUM_RND_BURNIN; i++) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
}
rndWords[idx] = rndWord;
}
__global__ void kRandomUniform(unsigned int* rndMults, unsigned long long* rndWords, float* gData, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
gData[i] = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
}
rndWords[idx] = rndWord;
}
__global__ void kRandomGaussian(unsigned int* rndMults, unsigned long long* rndWords, float* gData, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
float rnd1, rnd2, R, T;
for(unsigned int i = idx; i < numElements; i += 2*NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd1 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd2 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
T = 2 * PI * rnd2;
R = sqrtf(-2 * __logf(rnd1));
gData[i] = R * __cosf(T);
if (i + NUM_RND_STREAMS < numElements)
gData[i + NUM_RND_STREAMS] = R * __sinf(T);
}
rndWords[idx] = rndWord;
}
__global__ void kRandomGaussianDropout(unsigned int* rndMults, unsigned long long* rndWords, float* gData, unsigned int numElements, float scale) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
float rnd1, rnd2, R, T;
for(unsigned int i = idx; i < numElements; i += 2*NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd1 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd2 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
T = 2 * PI * rnd2;
R = sqrtf(-2 * __logf(rnd1));
gData[i] *= 1 + scale * R * __cosf(T);
if (i + NUM_RND_STREAMS < numElements)
gData[i + NUM_RND_STREAMS] *= 1 + scale * R * __sinf(T);
}
rndWords[idx] = rndWord;
}
__global__ void kRandomDropout(unsigned int* rndMults, unsigned long long* rndWords, float* gData, unsigned int numElements, float dropprob, float val, float scale) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
gData[i] = (((__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f) > dropprob) ? (scale * gData[i]) : val;
}
rndWords[idx] = rndWord;
}
__global__ void kSampleBernoulli(unsigned int* rndMults, unsigned long long* rndWords, float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
target[i] = ((__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f) < gData[i] ? 1:0;
}
rndWords[idx] = rndWord;
}
__global__ void kSampleBernoulliTanh(unsigned int* rndMults, unsigned long long* rndWords, float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
target[i] = ((__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f) < (1.0 + gData[i]) / 2.0 ? 1:0;
}
rndWords[idx] = rndWord;
}
__global__ void kSamplePoisson(unsigned int* rndMults, unsigned long long* rndWords, float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
target[i] = gData[i];
}
rndWords[idx] = rndWord;
}
__global__ void kSampleGaussian(unsigned int* rndMults, unsigned long long* rndWords, float* gData, float* target, unsigned int numElements, float mult) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
float rnd1, rnd2, R, T;
for(unsigned int i = idx; i < numElements; i += 2*NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd1 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd2 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
T = 2 * PI * rnd2;
R = sqrtf(-2 * __logf(rnd1));
target[i] = gData[i] + mult * R * __cosf(T);
if (i + NUM_RND_STREAMS < numElements)
target[i + NUM_RND_STREAMS] = gData[i + NUM_RND_STREAMS] + mult * R * __sinf(T);
}
rndWords[idx] = rndWord;
}
__global__ void kPerturbEnergy(unsigned int* rndMults, unsigned long long* rndWords, float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
float rnd;
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
target[i] = gData[i] - __logf( - __logf(rnd));
}
rndWords[idx] = rndWord;
}
__global__ void kPerturbProb(unsigned int* rndMults, unsigned long long* rndWords, float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
float rnd;
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
target[i] = - gData[i] / __logf(rnd);
}
rndWords[idx] = rndWord;
}
/* ------------------------- Data copying ------------------------- */
/*
Copy row slice from source to target. There is a block for every 32x32 chunk being copied.
*/
__global__ void kGetRowSlice(float* source, float* target, int start, int end, int width, int height) {
const int row = start + blockIdx.x * 32 + threadIdx.x;
const int start_col = blockIdx.y * 32;
const int end_col = (start_col + 32 < width) ? start_col + 32: width;
const int target_height = end - start;
if (row < end) {
for (int cur_col = start_col; cur_col < end_col; cur_col++)
target[cur_col * target_height + row - start] = source[cur_col * height + row];
}
}
__global__ void kSetRowSlice(float* source, float* target, int start, int end, int width, int height) {
const int row = start + blockIdx.x * 32 + threadIdx.x;
const int start_col = blockIdx.y * 32;
const int end_col = (start_col + 32 < width) ? start_col + 32: width;
const int source_height = end - start;
if (row < end) {
for (int cur_col = start_col; cur_col < end_col; cur_col++)
target[cur_col * height + row] = source[cur_col * source_height + row - start];
//source[cur_col * height + row - start] = target[cur_col * target_height + row];
}
}
__global__ void kTranspose(float *odata, float *idata, int width, int height) {
__shared__ float block[COPY_BLOCK_SIZE][COPY_BLOCK_SIZE+1];
// read the matrix tile into shared memory
unsigned int xIndex = blockIdx.x * COPY_BLOCK_SIZE + threadIdx.x;
unsigned int yIndex = blockIdx.y * COPY_BLOCK_SIZE + threadIdx.y;
if((xIndex < width) && (yIndex < height)) {
unsigned int index_in = yIndex * width + xIndex;
block[threadIdx.y][threadIdx.x] = idata[index_in];
}
__syncthreads();
// write the transposed matrix tile to global memory
xIndex = blockIdx.y * COPY_BLOCK_SIZE + threadIdx.x;
yIndex = blockIdx.x * COPY_BLOCK_SIZE + threadIdx.y;
if((xIndex < height) && (yIndex < width)) {
unsigned int index_out = yIndex * height + xIndex;
odata[index_out] = block[threadIdx.x][threadIdx.y];
}
}
__global__ void kTransposeBig(float *odata, float *idata, int height, int width) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
int r, c;
for (unsigned int i = idx; i < width * height; i += numThreads) {
r = i % width;
c = i / width;
odata[i] = idata[height * r + c];
}
}
/* ------------------------- Mathematical operations ------------------------- */
__global__ void kLessThan(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat1[i] < mat2[i];
}
__global__ void kLessThanEq(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat1[i] <= mat2[i];
}
__global__ void kLessThanScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat[i] < val;
}
__global__ void kLessThanEqScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat[i] <= val;
}
__global__ void kGreaterThan(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat1[i] > mat2[i];
}
__global__ void kGreaterThanEq(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat1[i] >= mat2[i];
}
__global__ void kGreaterThanScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat[i] > val;
}
__global__ void kGreaterThanEqScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat[i] >= val;
}
__global__ void kUpperBound(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat1[i] > mat2[i] ? mat2[i] : mat1[i];
}
__global__ void kLowerBound(float* mat1, float* mat2, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat1[i] < mat2[i] ? mat2[i] : mat1[i];
}
__global__ void kUpperBoundScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat[i] > val ? val:mat[i];
}
__global__ void kLowerBoundScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat[i] < val ? val:mat[i];
}
__global__ void kUpperBoundModScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat[i] > val ? val : (mat[i] < -val ? -val : mat[i]);
}
__global__ void kSparseDot(int m, int n, int k, float *data, int* indptr, int* indices, float *dense_data, float* target, float beta, float alpha) {
const unsigned int row = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row < m && col < n) {
const int start = indptr[row];
const int end = indptr[row + 1];
float sum = 0.f;
for (int i = start; i < end; i++) {
sum += data[i] * dense_data[col * k + indices[i]];
}
const int pos = col * m + row;
target[pos] = alpha * sum + ((beta == 0) ? 0 : beta * target[pos]);
}
}
__global__ void kSign(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat[i] ? copysignf(1., mat[i]) : 0;
}
__global__ void kApplySin(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = __sinf(mat[i]);
}
__global__ void kApplyCos(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = __cosf(mat[i]);
}
__global__ void kApplySigmoid(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = sigmoid(mat[i]);
}
__global__ void kApplyTanh(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
float mat_i, exp2x;
for (unsigned int i = idx; i < len; i += numThreads) {
mat_i = mat[i];
exp2x = __expf(2 * mat_i);
target[i] = 1 - 2 / (exp2x + 1);
}
}
__global__ void kApplyAbs(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat[i] * ((mat[i] > 0) - (mat[i] < 0));
}
__global__ void kApplyLog1PlusExp(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
float mat_i;
for (unsigned int i = idx; i < len; i += numThreads) {
mat_i = mat[i];
if (mat_i > 0)
target[i] = (__logf(1 + __expf(-mat_i)) + mat_i);
else
target[i] = __logf(1 + __expf(mat_i));
}
}
__global__ void kLog(float* mat, float* target, unsigned int len, float tiny) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = __logf(mat[i] + tiny);
}
__global__ void kSquashRelu(float* mat, float* target, unsigned int len, float lambda) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = 2 / (1 + __expf(-lambda * mat[i])) - 1;
}
__global__ void kExp(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = __expf(mat[i]);
}
__global__ void kCeil(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = ceil(mat[i]);
}
__global__ void kFloor(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = floor(mat[i]);
}
__global__ void kSqrt(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = sqrt(mat[i]);
}
__global__ void kPow(float* mat, float pow, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = powf(mat[i], pow);
}
__global__ void kPowMatrix(float* mat, float* pow, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = powf(mat[i], pow[i]);
}
__global__ void kCrossEntropy(float* mat, float* p, float* target, unsigned int len, float tiny) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = -mat[i] * __logf(p[i] + tiny);
}
__global__ void kCrossEntropyBernoulli(float* mat, float* p, float* target, unsigned int len, float tiny) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads)
target[i] = -mat[i] * __logf(p[i] + tiny) - (1 - mat[i]) * __logf(1 - p[i] + tiny);
}
__global__ void kCorrectPreds(float* mat, float* p, float* target, unsigned int len, float cutoff) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads)
target[i] = mat[i] * (p[i] >= cutoff) + (1 - mat[i]) * (p[i] < cutoff);
}
__global__ void kReciprocal(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = 1. / mat[i];
}
__global__ void kAddColVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] + vec[i % height];
}
}
__global__ void kAddDiagonalScalar(float* mat, float val, float* tgtMat, unsigned int width) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width; i += numThreads) {
tgtMat[width*i + i] = mat[width*i + i] + val;
}
}
__global__ void kAddDiagonal(float* mat, float* vec, float* tgtMat, unsigned int width) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width; i += numThreads) {
tgtMat[width*i + i] = mat[width*i + i] + vec[i];
}
}
__global__ void kMultDiagonalScalar(float* mat, float val, float* tgtMat, unsigned int width) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width; i += numThreads) {
tgtMat[width*i + i] = mat[width*i + i] * val;
}
}
__global__ void kMultDiagonal(float* mat, float* vec, float* tgtMat, unsigned int width) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width; i += numThreads) {
tgtMat[width*i + i] = mat[width*i + i] * vec[i];
}
}
__global__ void kAddRowVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] + vec[i / height];
}
}
__global__ void kAddColMult(float* mat, float* vec, float* tgtMat, float mult, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] + mult * vec[i % height];
}
}
__global__ void kAddToEachPixel(float* mat1, float* mat2, float* tgtMat, float mult, unsigned int width, unsigned int height, unsigned int num_pix) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat1[i] + mult * mat2[i % height + height * (i / (height * num_pix))];
}
}
__global__ void kAddRowMult(float* mat, float* vec, float* tgtMat, float mult, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] + mult * vec[i / height];
}
}
__global__ void kMultByColVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] * vec[i % height];
}
}
__global__ void kDivByRowVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] / vec[i / height];
}
}
__global__ void kDivByColVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] / vec[i % height];
}
}
__global__ void kMultByRowVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] * vec[i / height];
}
}
__global__ void kAddMultSign(float* a, float* b, unsigned int numEls, float mult) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
a[i] = a[i] + ((b[i] > 0) ? mult : ((b[i] < 0) ? -mult : 0));
}
}
__global__ void kAdd(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] + b[i];
}
}
__global__ void kSubtract(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] - b[i];
}
}
__global__ void kDivide(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] / b[i];
}
}
__global__ void kMult(float* a, float* b, float* dest, unsigned int numEls, float scale_targets) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
if (scale_targets == 0) {
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * b[i];
}
} else {
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = scale_targets * dest[i] + a[i] * b[i];
}
}
}
__global__ void kCosDeriv(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = -a[i] * __sinf(b[i]);
}
}
__global__ void kSinDeriv(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * __cosf(b[i]);
}
}
__global__ void kLogisticDeriv(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * b[i] * (1.0 - b[i]);
}
}
// target[i] < 0 means don't care.
__global__ void kLogisticGrad(float* mat, float* targets, float* out_grad, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
out_grad[i] = (targets[i] < 0) ? 0 : (mat[i] - targets[i]);
}
}
__global__ void kLogisticCorrectNormalized(float* mat, float* targets, float* out, unsigned int height, unsigned int width) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < height) {
float correct = 0;
float total = 0;
float p, t;
for (int i = idx; i < width * height; i += height) {
p = mat[i];
t = targets[i];
correct += (t < 0) ? 0 : (((t >= 0.5 && p >= 0.5) || (t < 0.5 && p < 0.5)) ? 1: 0);
total += (t < 0) ? 0 : 1;
__syncthreads();
}
out[idx] = (total > 0) ? (correct / total) : 0;
}
}
__global__ void kTanhDeriv(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * (1.0 + b[i]) * (1.0 - b[i]);
}
}
__global__ void kRectifiedLinearDeriv(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * (b[i] > 0 ? 1 : 0);
}
}
__global__ void kRectifiedLinearSmoothDeriv(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * (1 - __expf(-b[i]));
}
}
__global__ void kMultScalar(float* mat, float alpha, float* dest, unsigned int len, float scale_targets) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
if (scale_targets == 0) {
for (unsigned int i = idx; i < len; i += numThreads) {
dest[i] = alpha * mat[i];
}
} else {
for (unsigned int i = idx; i < len; i += numThreads) {
dest[i] = scale_targets * dest[i] + alpha * mat[i];
}
}
}
__global__ void kAssignScalar(float* dest, float alpha, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
dest[i] = alpha;
}
}
__global__ void kDivideScalar(float* mat, float alpha, float* dest, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
dest[i] = mat[i] / alpha;
}
}
__global__ void kAddScalar(float* a, float alpha, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] + alpha;
}
}
__global__ void kSelectRows(float* source, float* target, float* indices, int nRowIs, int nCols, int nSourceRows){
__shared__ int sourceRowIndices[32];
const int startTargetRowI = blockIdx.x * 32;
const int tid = threadIdx.x;
const int localNRowIs = min(32, nRowIs-startTargetRowI);
// cooperatively load 32 row indices
if (tid < localNRowIs){
sourceRowIndices[tid] = int(indices[startTargetRowI + tid]);
if (sourceRowIndices[tid]<0)
sourceRowIndices[tid] += nSourceRows;
if (sourceRowIndices[tid]<0 || sourceRowIndices[tid]>=nSourceRows)
sourceRowIndices[tid] = -1;
}
__syncthreads();
// copy 32 rows
for (int i=0; i<localNRowIs; i++){
const int targetRowI = startTargetRowI + i, sourceRowI = sourceRowIndices[i];
for (int colI=tid; colI<nCols; colI+=32)
target[targetRowI * nCols + colI] = sourceRowI==-1 ? (1.0/0.0 -1.0/0.0) : source[sourceRowI * nCols + colI];
}
}
__global__ void kSwapColumns(float* source, float* target, float* indices1, float* indices2, int cols, int width, int height){
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
float temp;
unsigned int column, row, source_pos, target_pos;
for (unsigned int i = idx; i < height * cols; i += numThreads) {
column = i / height;
row = i % height;
source_pos = height * (int)indices1[column] + row;
target_pos = height * (int)indices2[column] + row;
temp = source[source_pos];
source[source_pos] = target[target_pos];
target[target_pos] = temp;
}
}
__global__ void kShuffleColumns(float* source, float* target, float* indices, int width, int height){
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
float temp1, temp2;
unsigned int column, row, pos1, pos2;
for (unsigned int i = idx; i < height * ((width+1) / 2); i += numThreads) {
column = 2 * (i / height);
row = i % height;
if (column + 1 >= width) {
pos1 = height * (int)indices[column] + row;
target[pos1] = source[pos1];
} else {
pos1 = height * (int)indices[column] + row;
pos2 = height * (int)indices[column + 1] + row;
temp1 = source[pos1];
temp2 = source[pos2];
target[pos2] = temp1;
target[pos1] = temp2;
}
}
}
__global__ void kSetSelectedRows(float* target, float* source, float* indices, int nRowIs, int nCols, int nTargetRows){
__shared__ int targetRowIndices[32];
const int startSourceRowI = blockIdx.x * 32;
const int tid = threadIdx.x;
const int localNRowIs = min(32, nRowIs-startSourceRowI);
// cooperatively load 32 row indices
if (tid < localNRowIs){
targetRowIndices[tid] = int(indices[startSourceRowI + tid]);
if (targetRowIndices[tid]<0)
targetRowIndices[tid] += nTargetRows;
if (targetRowIndices[tid]<0 || targetRowIndices[tid]>=nTargetRows)
targetRowIndices[tid] = -1;
}
__syncthreads();
// copy 32 rows
for (int i=0; i<localNRowIs; i++){
const int sourceRowI = startSourceRowI + i, targetRowI = targetRowIndices[i];
for (int colI=tid; colI<nCols; colI+=32)
target[targetRowI * nCols + colI] = targetRowI==-1 ? (1.0/0.0 -1.0/0.0) : source[sourceRowI * nCols + colI];
}
}
__global__ void kBlockify(float* source, float* target, int numdims, int blocksize) {
const unsigned int idx = threadIdx.x;
const unsigned int numThreads = blockDim.x;
const int off = blockIdx.x * numdims;
for (unsigned int target_ind = idx; target_ind < numdims; target_ind += numThreads) {
const int block = target_ind / blocksize;
target[off + target_ind] = source[off + block * blocksize];
}
}
__global__ void kGenerateTranslationsBigVarOff(float* source, float* target, float* off_x_arr, float* off_y_arr, int source_w, int target_w, int num_channels) {
const unsigned int idx = threadIdx.x;
const unsigned int numThreads = blockDim.x;
int target_x, target_y;
int pad = (source_w - target_w)/2;
int target_tile_size = target_w * target_w;
int source_tile_size = source_w * source_w;
int off_x = off_x_arr[blockIdx.x];
int off_y = off_y_arr[blockIdx.x];
int target_off = blockIdx.x * target_tile_size;
int source_off = blockIdx.x * source_tile_size + (pad + off_x) * source_w + (pad + off_y);
for (unsigned int target_ind = idx; target_ind < target_tile_size; target_ind += numThreads) {
target_x = target_ind / target_w;
target_y = target_ind - target_x * target_w;
for (unsigned int ch = 0; ch < num_channels; ch += 1) {
target[num_channels*(target_off + target_x * target_w + target_y) + ch] = source[num_channels*(source_off + target_x * source_w + target_y) + ch];
}
}
}
__global__ void kSoftMaxGrad(float* mat, float* labels, float* target, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
target[i] = mat[i] - ((int)labels[i / height] == i % height ? 1 : 0);
}
}
__global__ void kSoftMaxGradRowMajor(float* mat, float* labels, float* target, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width*height; i += numThreads) {
target[i] = mat[i] - ((int)labels[i % height] == i / height ? 1 : 0);
}
}
__global__ void kHingeQuadraticRowMajor(float* mat, float* labels, float* target, unsigned int width, unsigned int height, float margin) {
int image_id = blockIdx.x * blockDim.x + threadIdx.x;
if (image_id < height) {
mat += image_id;
target += image_id;
const int correct_label = (int)labels[image_id];
const float correct_label_score = mat[correct_label * height];
float sum = 0;
for (unsigned int i = 0; i < width; i++) {
float diff = margin + mat[i*height] - correct_label_score;
float grad = (diff > 0) ? diff : 0;
target[i*height] = (i == correct_label) ? 0 : grad;
sum += (i == correct_label) ? 0 : grad;
}
target[correct_label * height] = -sum;
}
}
__global__ void kHingeLinearRowMajor(float* mat, float* labels, float* target, unsigned int width, unsigned int height, float margin) {
int image_id = blockIdx.x * blockDim.x + threadIdx.x;
if (image_id < height) {
mat += image_id;
target += image_id;
const int correct_label = (int)labels[image_id];
const float correct_label_score = mat[correct_label * height];
float sum = 0;
for (unsigned int i = 0; i < width; i++) {
float diff = margin + mat[i*height] - correct_label_score;
float grad = (diff > 0) ? 1 : 0;
target[i*height] = (i == correct_label) ? 0 : grad;
sum += (i == correct_label) ? 0 : grad;
}
target[correct_label * height] = -sum;
}
}
__global__ void kSoftMaxGradCLS(float* mat, int* labels, float* indices, float* target, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width*height; i += numThreads) {
target[i] = mat[i] - (labels[(int)indices[i % height]] == i / height ? 1 : 0);
}
}
__global__ void kSoftMaxCrossEntropy(float* mat, float* labels, float* target, unsigned int width, unsigned int height, float tiny) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width; i += numThreads) {
target[i] = -__logf(mat[height * i + (int)labels[i]] + tiny);
}
}
__global__ void kSoftMaxCrossEntropyRowMajor(float* mat, float* labels, float* target, unsigned int width, unsigned int height, float tiny) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < height; i += numThreads) {
target[i] = -__logf(mat[height * (int)labels[i] + i] + tiny);
}
}
__global__ void kSoftMaxCorrect(float* mat, float* labels, float* target, unsigned int width, unsigned int height) {
__shared__ float max_vals[32];
__shared__ unsigned int max_val_args[32];
float cur_max = -FLT_MAX;
unsigned int cur_argmax = 0;
float val = 0;
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float *cur_data = &mat[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
val = cur_data[i];
if (val > cur_max) {
cur_max = val;
cur_argmax = i;
}
}
max_vals[threadIdx.x] = cur_max;
max_val_args[threadIdx.x] = cur_argmax;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = -FLT_MAX;
cur_argmax = 0;
for (unsigned int i = 0; i < blockDim.x; i++)
if (max_vals[i] > cur_max) {
cur_max = max_vals[i];
cur_argmax = max_val_args[i];
}
target[column] = (cur_argmax == (int)labels[column]) ? 1 : 0;
}
}
}
__global__ void kSoftMaxCorrectRowMajor(float* mat, float* labels, float* target, unsigned int width, unsigned int height) {
__shared__ float max_vals[32];
__shared__ unsigned int max_val_args[32];
float cur_max = -FLT_MAX;
unsigned int cur_argmax = 0;
float val = 0;
const int row = gridDim.x * blockIdx.y + blockIdx.x;
if (row < height) {
float *cur_data = &mat[row] ;
for (unsigned int i = threadIdx.x; i < width; i += blockDim.x) {
val = cur_data[i * height];
if (val > cur_max) {
cur_max = val;
cur_argmax = i;
}
}
max_vals[threadIdx.x] = cur_max;
max_val_args[threadIdx.x] = cur_argmax;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = -FLT_MAX;
cur_argmax = 0;
for (unsigned int i = 0; i < blockDim.x; i++)
if (max_vals[i] > cur_max) {
cur_max = max_vals[i];
cur_argmax = max_val_args[i];
}
target[row] = (cur_argmax == (int)labels[row]) ? 1 : 0;
}
}
}
__global__ void kSoftMaxCorrectCLS(float* mat, int* labels, float* indices, float* target, unsigned int width, unsigned int height) {
__shared__ float max_vals[32];
__shared__ unsigned int max_val_args[32];
float cur_max = -FLT_MAX;
unsigned int cur_argmax = 0;
float val = 0;
const int row = gridDim.x * blockIdx.y + blockIdx.x;
if (row < height) {
float *cur_data = &mat[row] ;
for (unsigned int i = threadIdx.x; i < width; i += blockDim.x) {
val = cur_data[i * height];
if (val > cur_max) {
cur_max = val;
cur_argmax = i;
}
}
max_vals[threadIdx.x] = cur_max;
max_val_args[threadIdx.x] = cur_argmax;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = -FLT_MAX;
cur_argmax = 0;
for (unsigned int i = 0; i < blockDim.x; i++)
if (max_vals[i] > cur_max) {
cur_max = max_vals[i];
cur_argmax = max_val_args[i];
}
target[row] = (cur_argmax == labels[(int)indices[row]]) ? 1 : 0;
}
}
}
__global__ void kSoftMax(float* mat, float* target, unsigned int width, unsigned int height) {
extern __shared__ float max_vals[] ;
float cur_max = -FLT_MAX;
float val = 0;
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float *cur_data = &mat[column * height] ;
max_vals[threadIdx.x]=-FLT_MAX;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
val = cur_data[i];
if (val > cur_max) {
cur_max = val;
}
}
max_vals[threadIdx.x] = cur_max;
reduceToMax32(max_vals, threadIdx.x);
__syncthreads();
cur_max = max_vals[0] ;
__syncthreads();
val = 0;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
val += __expf(cur_data[i]-cur_max);
}
max_vals[threadIdx.x] = val;
reduceToSumLocal32(max_vals, threadIdx.x);
__syncthreads();
float norm = max_vals[0] ;
float *cur_target = &target[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_target[i] = __expf(cur_data[i]-cur_max) / norm ;
}
}
}
__global__ void kSoftMaxOverwrite(float* mat, unsigned int width, unsigned int height) {
extern __shared__ float max_vals[] ;
float cur_max = -FLT_MAX;
float val = 0;
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float *cur_data = &mat[column * height] ;
max_vals[threadIdx.x]=-FLT_MAX;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
val = cur_data[i];
if (val > cur_max) {
cur_max = val;
}
}
max_vals[threadIdx.x] = cur_max;
reduceToMax32(max_vals, threadIdx.x);
__syncthreads();
cur_max = max_vals[0] ;
__syncthreads();
val = 0;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_data[i] = __expf(cur_data[i]-cur_max);
val += cur_data[i];
}
max_vals[threadIdx.x] = val;
reduceToSumLocal32(max_vals, threadIdx.x);
__syncthreads();
float norm = max_vals[0] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_data[i] /= norm;
}
}
}
__global__ void kSoftMaxRowMajor(float* mat, unsigned int width, unsigned int height) {
extern __shared__ float max_vals[] ;
float cur_max = -FLT_MAX;
float val = 0;
const int row = gridDim.x * blockIdx.y + blockIdx.x;
if (row < height) {
float *cur_data = &mat[row] ;
max_vals[threadIdx.x]=-FLT_MAX;
for (unsigned int i = threadIdx.x; i < width; i += blockDim.x) {
val = cur_data[i * height];
if (val > cur_max) {
cur_max = val;
}
}
max_vals[threadIdx.x] = cur_max;
reduceToMax32(max_vals, threadIdx.x);
__syncthreads();
cur_max = max_vals[0] ;
__syncthreads();
val = 0;
for (unsigned int i = threadIdx.x; i < width; i += blockDim.x) {
cur_data[i * height] = __expf(cur_data[i * height]-cur_max);
val += cur_data[i * height];
}
max_vals[threadIdx.x] = val;
reduceToSumLocal32(max_vals, threadIdx.x);
__syncthreads();
float norm = max_vals[0] ;
for (unsigned int i = threadIdx.x; i < width; i += blockDim.x) {
cur_data[i * height] /= norm;
}
}
}
__global__ void kChooseMaxAndAccumulate(float* mat, float* target, unsigned int width, unsigned int height) {
__shared__ float max_vals[32];
__shared__ unsigned int max_val_args[32];
float cur_max = -FLT_MAX;
unsigned int cur_argmax = 0;
float val = 0;
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float *cur_data = &mat[column * height] ;
float *target_data = &target[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
val = cur_data[i];
if (val > cur_max) {
cur_max = val;
cur_argmax = i;
}
}
max_vals[threadIdx.x] = cur_max;
max_val_args[threadIdx.x] = cur_argmax;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = -FLT_MAX;
cur_argmax = 0;
for (unsigned int i = 0; i < blockDim.x; i++)
if (max_vals[i] > cur_max) {
cur_max = max_vals[i];
cur_argmax = max_val_args[i];
}
target_data[cur_argmax] += 1;
}
}
}
__global__ void kChooseMaxColumnwise(float* mat, float* target, unsigned int width, unsigned int height) {
__shared__ float max_vals[32];
__shared__ unsigned int max_val_args[32];
float cur_max = -FLT_MAX;
unsigned int cur_argmax = 0;
float val = 0;
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float *cur_data = &mat[column * height] ;
float *target_data = &target[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
val = cur_data[i];
target[i] = 0;
if (val > cur_max) {
cur_max = val;
cur_argmax = i;
}
}
max_vals[threadIdx.x] = cur_max;
max_val_args[threadIdx.x] = cur_argmax;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = -FLT_MAX;
cur_argmax = 0;
for (unsigned int i = 0; i < blockDim.x; i++)
if (max_vals[i] > cur_max) {
cur_max = max_vals[i];
cur_argmax = max_val_args[i];
}
target_data[cur_argmax] = 1;
}
}
}
__global__ void kMaxColumnwise(float* mat, float* target, unsigned int width, unsigned int height) {
extern __shared__ float max_vals[] ;
float cur_max = -FLT_MAX;
float val = 0;
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float *cur_data = &mat[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
val = cur_data[i];
if (val > cur_max) cur_max = val;
}
max_vals[threadIdx.x] = cur_max;
reduceToMax32(max_vals, threadIdx.x);
__syncthreads();
if (threadIdx.x == 0) target[column] = max_vals[0];
}
}
__global__ void kArgMaxColumnwise(float* mat, float* target, unsigned int width, unsigned int height) {
__shared__ float max_vals[32];
__shared__ unsigned int max_val_args[32];
float cur_max = -FLT_MAX;
unsigned int cur_argmax = 0;
float val = 0;
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float *cur_data = &mat[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
val = cur_data[i];
if (val > cur_max) {
cur_max = val;
cur_argmax = i;
}
}
max_vals[threadIdx.x] = cur_max;
max_val_args[threadIdx.x] = cur_argmax;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = -FLT_MAX;
cur_argmax = 0;
for (unsigned int i = 0; i < blockDim.x; i++)
if (max_vals[i] > cur_max) {
cur_max = max_vals[i];
cur_argmax = max_val_args[i];
}
target[column] = cur_argmax;
}
}
}
__global__ void kSqSumColumnwise(float* mat, float* target, unsigned int width, unsigned int height, float mult, float p) {
extern __shared__ float sum_vals[];
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float cur_sum = 0;
float *cur_data = &mat[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_sum += cur_data[i] * cur_data[i];
}
sum_vals[threadIdx.x] = cur_sum;
reduceToSumLocal32(sum_vals, threadIdx.x);
__syncthreads();
if (threadIdx.x == 0) target[column] = p * target[column] + mult * sum_vals[0];
}
}
__global__ void kSumColumnwise(float* mat, float* target, unsigned int width, unsigned int height, float mult, float p) {
extern __shared__ float sum_vals[];
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float cur_sum = 0;
float *cur_data = &mat[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_sum += cur_data[i];
}
sum_vals[threadIdx.x] = cur_sum;
reduceToSumLocal32(sum_vals, threadIdx.x);
__syncthreads();
if (threadIdx.x == 0) target[column] = p * target[column] + mult * sum_vals[0];
}
}
__global__ void kSumAll(float* mat, float* target, unsigned int len, unsigned int len_per_block, unsigned int left_over) {
extern __shared__ float sum_vals[];
float cur_sum = 0;
int block_id = blockIdx.x;
mat += block_id * len_per_block + (block_id < left_over ? block_id : left_over);
int l = len_per_block + (block_id < left_over ? 1 : 0);
__syncthreads();
for (unsigned int i = threadIdx.x; i < l; i += blockDim.x) {
cur_sum += mat[i];
}
sum_vals[threadIdx.x] = cur_sum;
reduceToSumLocal<NUM_VECTOR_OP_THREADS_PER_BLOCK>(sum_vals, threadIdx.x);
__syncthreads();
if (threadIdx.x == 0) target[block_id] = sum_vals[0];
}
__global__ void kSqSumRowwise(float* mat, float* target, unsigned int width, unsigned int height, float mult, float p) {
extern __shared__ float sum_vals[];
const int row = gridDim.x * blockIdx.y + blockIdx.x;
if (row < height) {
float cur_sum = 0;
float *cur_data = &mat[row] ;
for (unsigned int i = threadIdx.x; i < width; i += blockDim.x) {
cur_sum += cur_data[i * height] * cur_data[i * height];
}
sum_vals[threadIdx.x] = cur_sum;
reduceToSumLocal32(sum_vals, threadIdx.x);
__syncthreads();
if (threadIdx.x == 0) target[row] = p * target[row] + mult * sum_vals[0];
}
}
// Works well when number of rows is large.
__global__ void kSumRowwise(float* mat, float* target, unsigned int width, unsigned int height, float mult, float p) {
extern __shared__ float sum_vals[];
const int row = (gridDim.x * blockIdx.y + blockIdx.x) * blockDim.x + threadIdx.x;
if (row < height) {
float sum = 0;
float *data = mat + row;
for (unsigned int i = 0; i < width; i++) sum += data[i*height];
__syncthreads();
target[row] = p * target[row] + mult * sum;
}
}
__global__ void kNormLimitColumnwise(float* mat, float* target, float norm, unsigned int width, unsigned int height, int constraint) {
extern __shared__ float sum_vals[];
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float cur_sum = 0;
float *cur_data = &mat[column * height] ;
float *target_data = &target[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_sum += cur_data[i] * cur_data[i];
}
sum_vals[threadIdx.x] = cur_sum;
reduceToSumLocal32(sum_vals, threadIdx.x);
__syncthreads();
cur_sum = sqrt(sum_vals[0]);
cur_sum = (constraint == 1 || cur_sum > norm) ? (norm / cur_sum) : 1;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
target_data[i] = cur_data[i] * cur_sum;
}
__syncthreads();
}
}
__global__ void kNormalizeColumnwise(float* mat, float* target, unsigned int width, unsigned int height) {
extern __shared__ float sum_vals[];
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float cur_sum = 0;
float *cur_data = &mat[column * height] ;
float *target_data = &target[column * height] ;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_sum += cur_data[i];
}
sum_vals[threadIdx.x] = cur_sum;
reduceToSumLocal32(sum_vals, threadIdx.x);
__syncthreads();
cur_sum = sum_vals[0] / height;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
target_data[i] = cur_data[i] - cur_sum;
}
__syncthreads();
}
}
__global__ void kNormLimitRowwise(float* mat, float* target, float norm, unsigned int width, unsigned int height, int constraint) {
extern __shared__ float sum_vals[];
const int row = gridDim.x * blockIdx.y + blockIdx.x;
if (row < height) {
float cur_sum = 0;
float *cur_data = &mat[row] ;
float *target_data = &target[row] ;
for (unsigned int i = threadIdx.x; i < width; i += blockDim.x) {
cur_sum += cur_data[i * height] * cur_data[i * height];
}
sum_vals[threadIdx.x] = cur_sum;
reduceToSumLocal32(sum_vals, threadIdx.x);
__syncthreads();
cur_sum = sqrt(sum_vals[0]);
cur_sum = (constraint == 1 || cur_sum > norm) ? (norm / cur_sum) : 1;
for (unsigned int i = threadIdx.x; i < width; i += blockDim.x) {
target_data[i * height] = cur_data[i * height] * cur_sum;
}
__syncthreads();
}
}
__global__ void kExpand(float* source, float* indices, float* target, int height, int width, int target_width){
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < target_width*height; i += numThreads) {
const int pos = height * (int)indices[i / height] + i % height;
target[i] = (pos < height * width)? source[pos] : 1.0/0.0 - 1.0/0.0;
}
}
__global__ void kExpandAndAdd(float* source, float* mat, float* indices, float* target, int width, int height, float mult, int width2){
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width*height; i += numThreads) {
const int pos = height * (int)indices[i / height] + i % height;
target[i] = (pos < height * width2)? source[i] + mult * mat[pos] : 1.0/0.0 - 1.0/0.0;
}
}
__global__ void kAccumulateColumns(float* mat, float* indices, float* target, int mat_width, int target_width, int height, float mult, int avg){
const int row = gridDim.x * blockIdx.y + blockIdx.x;
const int column = threadIdx.x;
if (row < height && column < target_width) {
float cur_sum = 0.0;
unsigned int count = 0;
for (unsigned int i = 0; i < mat_width; i ++) {
count += ((int)indices[i] == column) ? 1 : 0 ;
cur_sum += ((int)indices[i] == column) ? mat[row + i * height] : 0 ;
}
target[row + height * column] = mult * cur_sum / ((avg == 1 && count > 0) ? count : 1);
}
}
__global__ void kExtractPatches(float* images, float* patches, float* indices, float* width_offset, float* height_offset, int num_images, int img_width, int img_height, int patch_width, int patch_height, int num_colors) {
const unsigned long idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned long numThreads = blockDim.x * gridDim.x;
const unsigned long total_pixels = patch_width * patch_height * num_colors * num_images;
unsigned long ind, pos;
unsigned long image_id, dest_row, dest_col, color, source_row, source_col;
for (unsigned long i = idx; i < total_pixels; i += numThreads) {
ind = i;
image_id = ind % num_images; ind /= num_images;
dest_col = ind % patch_width; ind /= patch_width;
dest_row = ind % patch_height; ind /= patch_height;
color = ind % num_colors;
source_row = int(height_offset[image_id]) + dest_row;
source_col = int(width_offset[image_id]) + dest_col;
pos = img_width * img_height * num_colors * (int)indices[image_id] + img_width * img_height * color + img_width * source_row + source_col;
patches[i] = images[pos];
}
}
__global__ void kExtractPatches2(float* images, float* patches, float* width_offset, float* height_offset, float* flip, int num_images, int img_width, int img_height, int patch_width, int patch_height, int num_colors) {
int image_id = blockIdx.z % num_images;
int color = blockIdx.z / num_images;
int dest_col = blockIdx.x * blockDim.x + threadIdx.x;
int dest_row = blockIdx.y * blockDim.y + threadIdx.y;
if (dest_col < patch_width && dest_row < patch_height) {
int source_row = int(height_offset[image_id]) + dest_row;
int source_col = int(width_offset[image_id]) + dest_col;
source_col = (flip[image_id] > 0.5) ? (img_width - source_col - 1) : source_col;
unsigned long dest_index = image_id + num_images * (dest_col + patch_width * (dest_row + patch_height * color));
unsigned long source_index = source_col + img_width * (source_row + img_height * (color + num_colors * image_id));
patches[dest_index] = images[source_index];
}
}
__global__ void kRectifyBoundingBox(
float* boxes, float* width_offset, float* height_offset, float* flip,
int num_images, int patch_width, int patch_height, int num_locs) {
for (int loc_id = blockIdx.x; loc_id < num_locs; loc_id += gridDim.x) {
float *xmin_block = boxes + num_images * loc_id,
*ymin_block = boxes + num_images * (loc_id + num_locs),
*xmax_block = boxes + num_images * (loc_id + num_locs * 2),
*ymax_block = boxes + num_images * (loc_id + num_locs * 3);
for (int image_id = threadIdx.x; image_id < num_images; image_id += blockDim.x) {
float xmin = (flip[image_id] > 0.5) ? (256.0/patch_width - xmax_block[image_id]) : xmin_block[image_id],
xmax = (flip[image_id] > 0.5) ? (256.0/patch_width - xmin_block[image_id]) : xmax_block[image_id],
ymin = ymin_block[image_id],
ymax = ymax_block[image_id],
wo = width_offset[image_id],
ho = height_offset[image_id];
xmin_block[image_id] = xmin - wo / patch_width;
xmax_block[image_id] = xmax - wo / patch_width;
ymin_block[image_id] = ymin - ho / patch_height;
ymax_block[image_id] = ymax - ho / patch_height;
}
}
}
__global__ void kAdagrad(float *history, float *grad, float delta, int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
float curr_norm = history[i] - delta;
history[i] = delta + sqrt(curr_norm * curr_norm + grad[i] * grad[i]);
}
}
__global__ void kRMSProp(float *history, float *grad, float factor, int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
history[i] = sqrt(factor * history[i] * history[i] + (1-factor) * grad[i] * grad[i]);
}
}
__global__ void kBoundingBoxLogisticGrad(
float* mat, int* bbox, int* label, int* seg, float* indices, float* width_offset, float* height_offset,
int size, int width, int height, int depth, float scale_width, float scale_height, float* grad) {
const int color = blockIdx.z;
/*
const int numXBlocksPerImage = DIVUP(width, blockDim.x);
const int image_id = blockIdx.x / numXBlocksPerImage;
const int col = (blockIdx.x % numXBlocksPerImage) * blockDim.x + threadIdx.x;
const int row = blockIdx.y * blockDim.y + threadIdx.y;
*/
const int image_id = threadIdx.x;
const int col = blockIdx.x;
const int row = blockIdx.y;
int num_bboxes = 0, num_bboxes_of_this_depth = 0, num_bboxes_of_this_depth_inside = 0;
if (col < width && row < height && image_id < size && color < depth) {
int src_image_id = (int)indices[image_id];
int src_col = (int)(scale_width * col);
int src_row = (int)(scale_height * row);
int start = seg[src_image_id];
int end = seg[src_image_id + 1];
int x1, y1, x2, y2, l, inside;
for (int box_id = start; box_id < end; box_id++) {
l = label[box_id];
x1 = bbox[box_id << 2] - width_offset[image_id];
y1 = bbox[(box_id << 2) + 1] - height_offset[image_id];
x2 = bbox[(box_id << 2) + 2] - width_offset[image_id];
y2 = bbox[(box_id << 2) + 3] - height_offset[image_id];
inside = (src_col >= x1 && src_col <= x2 && src_row >= y1 && src_row <= y2) ? 1:0;
num_bboxes += inside;
num_bboxes_of_this_depth += (l == color) ? 1: 0;
num_bboxes_of_this_depth_inside += (inside == 1 && l == color) ? 1: 0;
}
}
unsigned long i = image_id + size * (col + width * (row + height * color));
__syncthreads();
if (col < width && row < height && image_id < size && color < depth) {
if (num_bboxes > 0) {
grad[i] = (num_bboxes_of_this_depth_inside > 0) ? (mat[i] - 1) : 0;
} else {
grad[i] = (num_bboxes_of_this_depth > 0) ? mat[i] : 0;
}
}
}
__global__ void kLogisticCorrectBoundingBox(
float* mat, int* bbox, int* label, int* seg, float* indices,
float* width_offset, float* height_offset, int size, int width, int height,
int depth, float scale_width, float scale_height, float* target, float cutoff) {
const int color = blockIdx.z;
const int numXBlocksPerImage = DIVUP(width, blockDim.x);
const int image_id = blockIdx.x / numXBlocksPerImage;
const int col = (blockIdx.x % numXBlocksPerImage) * blockDim.x + threadIdx.x;
const int row = blockIdx.y * blockDim.y + threadIdx.y;
if (col < width && row < height && image_id < size && color < depth) {
int src_image_id = (int)indices[image_id];
int src_col = (int)(scale_width * col);
int src_row = (int)(scale_height * row);
int start = seg[src_image_id];
int end = seg[src_image_id + 1];
int x1, y1, x2, y2, l, inside;
int num_bboxes = 0, num_bboxes_of_this_depth = 0, num_bboxes_of_this_depth_inside = 0;
for (int box_id = start; box_id < end; box_id++) {
l = label[box_id];
x1 = bbox[box_id << 2] - width_offset[image_id];
y1 = bbox[(box_id << 2) + 1] - height_offset[image_id];
x2 = bbox[(box_id << 2) + 2] - width_offset[image_id];
y2 = bbox[(box_id << 2) + 3] - height_offset[image_id];
inside = (src_col >= x1 && src_col <= x2 && src_row >= y1 && src_row <= y2) ? 1:0;
num_bboxes += inside;
num_bboxes_of_this_depth += (l == color) ? 1: 0;
num_bboxes_of_this_depth_inside += (inside == 1 && l == color) ? 1: 0;
}
unsigned long i = image_id + size * (col + width * (row + height * color));
if (num_bboxes > 0) {
target[i] = (num_bboxes_of_this_depth_inside > 0 && mat[i] >= cutoff) ? 1 : 0;
} else {
target[i] = (num_bboxes_of_this_depth > 0 && mat[i] < cutoff) ? 1 : 0;
}
}
}
__global__ void kBoundingBoxSoftMaxGrad(
float* mat, int* bbox, int* label, int* seg, float* indices, float* width_offset, float* height_offset,
int size, int width, int height, int depth, float scale_width, float scale_height, float* grad) {
const unsigned int len = width * height * depth * size;
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
int ind, image_id, source_depth, x1, y1, x2, y2, start,
end, src_image_id, num_bboxes, num_bboxes_of_this_depth, box_id, inside;
float source_x, source_y;
for (unsigned int i = idx; i < len; i += numThreads) {
ind = i;
image_id = ind % size; ind /= size;
source_x = scale_width * (ind % width); ind /= width;
source_y = scale_height * (ind % height); ind /= height;
source_depth = ind % depth;
src_image_id = (int)indices[image_id];
start = seg[src_image_id];
end = seg[src_image_id + 1];
num_bboxes = 0;
num_bboxes_of_this_depth = 0;
for (box_id = start; box_id < end; box_id++) {
x1 = bbox[box_id << 2] - width_offset[image_id];
y1 = bbox[(box_id << 2) + 1] - height_offset[image_id];
x2 = bbox[(box_id << 2) + 2] - width_offset[image_id];
y2 = bbox[(box_id << 2) + 3] - height_offset[image_id];
inside = (source_x >= x1 && source_x <= x2 && source_y >= y1 && source_y <= y2) ? 1:0;
num_bboxes += inside;
num_bboxes_of_this_depth += (inside == 1 && label[box_id] == source_depth) ? 1: 0;
}
grad[i] = mat[i] - ((num_bboxes > 0) ? ((float)num_bboxes_of_this_depth / num_bboxes) : (source_depth == 0 ? 1:0));
}
}
__global__ void kSoftMaxCorrectBoundingBox(
float* mat, int* bbox, int* label, int* seg, float* indices,
float* width_offset, float* height_offset, int size, int width, int height,
int depth, float scale_width, float scale_height, float* target) {
const int row = gridDim.x * blockIdx.y + blockIdx.x;
const int num_pixels = size * width * height;
if (row < num_pixels) {
__shared__ float max_vals[32];
__shared__ unsigned int max_val_args[32];
float cur_max = -FLT_MAX;
unsigned int cur_argmax = 0;
float val = 0;
float *cur_data = &mat[row] ;
for (unsigned int i = threadIdx.x; i < depth; i += blockDim.x) {
val = cur_data[i * num_pixels];
if (val > cur_max) {
cur_max = val;
cur_argmax = i;
}
}
max_vals[threadIdx.x] = cur_max;
max_val_args[threadIdx.x] = cur_argmax;
__syncthreads();
if (threadIdx.x == 0) {
cur_max = -FLT_MAX;
cur_argmax = 0;
for (unsigned int i = 0; i < blockDim.x; i++)
if (max_vals[i] > cur_max) {
cur_max = max_vals[i];
cur_argmax = max_val_args[i];
}
int ind, image_id, src_image_id, x1, y1, x2, y2, start,
end, num_bboxes, correct, box_id, inside;
float source_x, source_y;
ind = row;
image_id = ind % size; ind /= size;
source_x = scale_width * (ind % width); ind /= width;
source_y = scale_height * (ind % height); ind /= height;
src_image_id = (int)indices[image_id];
start = seg[src_image_id];
end = seg[src_image_id + 1];
num_bboxes = 0;
correct = 0;
for (box_id = start; box_id < end; box_id++) {
x1 = bbox[box_id << 2] - width_offset[image_id];
y1 = bbox[(box_id << 2) + 1] - height_offset[image_id];
x2 = bbox[(box_id << 2) + 2] - width_offset[image_id];
y2 = bbox[(box_id << 2) + 3] - height_offset[image_id];
inside = (source_x >= x1 && source_x <= x2 && source_y >= y1 && source_y <= y2) ? 1:0;
num_bboxes += inside;
correct += (inside == 1 && cur_argmax == label[box_id]) ? 1 : 0;
}
target[row] = (num_bboxes > 0) ? ((correct > 0) ? 1 : 0) : ((cur_argmax == 0) ? 1: 0);
}
}
}
__global__ void kLSTMFprop(float *s_in, float* s_out, float* w_diag, float* b, int numcases, int num_lstms, bool init, bool use_relu) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
int numEls = numcases * num_lstms;
if (idx < numEls) {
const unsigned int numThreads = blockDim.x * gridDim.x;
float *h_out = s_out,
*c_out = s_out + numEls,
*i_out = s_out + 2 * numEls,
*f_out = s_out + 3 * numEls,
*a_out = s_out + 4 * numEls,
*o_out = s_out + 5 * numEls;
float *c_in = s_in + 1 * numEls;
float *w_i = w_diag,
*w_f = w_diag + num_lstms,
*w_o = w_diag + 2 * num_lstms;
float *b_i = b,
*b_f = b + num_lstms,
*b_a = b + 2 * num_lstms,
*b_o = b + 3 * num_lstms;
float i, f, a, o, c, h;
for (unsigned int p = idx; p < numEls; p += numThreads) {
int j = p / numcases;
i = i_out[p];
f = f_out[p];
a = a_out[p];
o = o_out[p];
c = init ? 0 : c_in[p];
i = sigmoid(i + c * w_i[j] + b_i[j]);
f = sigmoid(f + c * w_f[j] + b_f[j]);
a = use_relu ? relu(a + b_a[j]) : tanh(a + b_a[j]);
c = c * f + i * a;
o = sigmoid(o + c * w_o[j] + b_o[j]);
h = o * (use_relu ? c : tanh(c)); // relu(c) = c, because c is always +ve here.
__syncthreads();
i_out[p] = i;
f_out[p] = f;
a_out[p] = a;
o_out[p] = o;
c_out[p] = c;
h_out[p] = h;
}
}
}
__global__ void kLSTMBprop(float *s_in, float* s_out, float* d_in, float* d_out, float* w_diag, int numcases, int num_lstms, bool init, bool use_relu) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
int numEls = numcases * num_lstms;
if (idx < numEls) {
const unsigned int numThreads = blockDim.x * gridDim.x;
float *s_c_out = s_out + numEls,
*s_i_out = s_out + 2 * numEls,
*s_f_out = s_out + 3 * numEls,
*s_a_out = s_out + 4 * numEls,
*s_o_out = s_out + 5 * numEls;
float *s_c_in = s_in + 1 * numEls;
float *d_h_out = d_out,
*d_c_out = d_out + numEls,
*d_i_out = d_out + 2 * numEls,
*d_f_out = d_out + 3 * numEls,
*d_a_out = d_out + 4 * numEls,
*d_o_out = d_out + 5 * numEls;
float *d_c_in = d_in + 1 * numEls;
float *w_i = w_diag,
*w_f = w_diag + num_lstms,
*w_o = w_diag + 2 * num_lstms;
float i, f, a, o, c,
grad_i, grad_f, grad_a, grad_o, grad_c, grad_h,
c_old, tanhc;
for (unsigned int p = idx; p < numEls; p += numThreads) {
int j = p / numcases;
grad_h = d_h_out[p];
grad_c = d_c_out[p];
i = s_i_out[p];
f = s_f_out[p];
a = s_a_out[p];
o = s_o_out[p];
c = s_c_out[p];
c_old = init ? 0 : s_c_in[p];
tanhc = use_relu ? c : tanh(c);
grad_o = grad_h * tanhc * deriv_of_sigmoid(o);
grad_c += grad_o * w_o[j] + grad_h * o * (use_relu ? deriv_of_relu(tanhc) : deriv_of_tanh(tanhc));
grad_a = grad_c * i * (use_relu ? deriv_of_relu(a) : deriv_of_tanh(a));
grad_i = grad_c * a * deriv_of_sigmoid(i);
grad_f = grad_c * c_old * deriv_of_sigmoid(f);
grad_c = grad_c * f + grad_f * w_f[j] + grad_i * w_i[j];
__syncthreads();
d_i_out[p] = grad_i;
d_f_out[p] = grad_f;
d_o_out[p] = grad_o;
d_a_out[p] = grad_a;
if (!init) d_c_in[p] = grad_c;
}
}
}
__global__ void kLSTMOutp(float* s_in, float* s_out, float* d_out, float* dw_diag, float* db, int numcases, int num_lstms, bool init) {
extern __shared__ float sum_vals[];
const int lstm_id = gridDim.x * blockIdx.y + blockIdx.x;
if (lstm_id < num_lstms) {
float* d_i = d_out + numcases * (num_lstms * 2 + lstm_id);
float* d_f = d_out + numcases * (num_lstms * 3 + lstm_id);
float* d_a = d_out + numcases * (num_lstms * 4 + lstm_id);
float* d_o = d_out + numcases * (num_lstms * 5 + lstm_id);
float* s_c = s_out + numcases * (num_lstms * 1 + lstm_id);
float* s_c_old = s_in + numcases * (num_lstms * 1 + lstm_id);
float dwi = 0, dwf = 0, dwo = 0, dbi = 0, dbf = 0, dba = 0, dbo = 0;
float c_old, grad_i, grad_f, grad_a, grad_o;
for (unsigned int i = threadIdx.x; i < numcases; i += blockDim.x) {
c_old = init ? 0 : s_c_old[i];
grad_i = d_i[i];
grad_f = d_f[i];
grad_a = d_a[i];
grad_o = d_o[i];
dwi += c_old * grad_i;
dwf += c_old * grad_f;
dwo += s_c[i] * grad_o;
dbi += grad_i;
dbf += grad_f;
dba += grad_a;
dbo += grad_o;
}
sum_vals[threadIdx.x] = dwi;reduceToSumLocal32(sum_vals, threadIdx.x); __syncthreads(); if (threadIdx.x == 0) dw_diag[lstm_id] += sum_vals[0];
sum_vals[threadIdx.x] = dwf;reduceToSumLocal32(sum_vals, threadIdx.x); __syncthreads(); if (threadIdx.x == 0) dw_diag[lstm_id + num_lstms] += sum_vals[0];
sum_vals[threadIdx.x] = dwo;reduceToSumLocal32(sum_vals, threadIdx.x); __syncthreads(); if (threadIdx.x == 0) dw_diag[lstm_id + num_lstms * 2] += sum_vals[0];
sum_vals[threadIdx.x] = dbi;reduceToSumLocal32(sum_vals, threadIdx.x); __syncthreads(); if (threadIdx.x == 0) db[lstm_id] += sum_vals[0];
sum_vals[threadIdx.x] = dbf;reduceToSumLocal32(sum_vals, threadIdx.x); __syncthreads(); if (threadIdx.x == 0) db[lstm_id + num_lstms] += sum_vals[0];
sum_vals[threadIdx.x] = dba;reduceToSumLocal32(sum_vals, threadIdx.x); __syncthreads(); if (threadIdx.x == 0) db[lstm_id + num_lstms * 2] += sum_vals[0];
sum_vals[threadIdx.x] = dbo;reduceToSumLocal32(sum_vals, threadIdx.x); __syncthreads(); if (threadIdx.x == 0) db[lstm_id + num_lstms * 3] += sum_vals[0];
}
}
__global__ void kBNBprop(float* d, float* x, float* gamma, float* mu, float* sigma,
float* target, unsigned int width, unsigned int height, float scale_targets) {
extern __shared__ float sum_vals[];
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float mu_val = mu[column];
float sigma_val = sigma[column];
float gamma_val = gamma[column];
__syncthreads();
float *cur_x = &x[column * height] ;
float *cur_d = &d[column * height] ;
float *cur_target = &target[column * height] ;
float cur_sum = 0, cur_sum2 = 0, val;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_sum += (cur_x[i] - mu_val) * cur_d[i];
}
sum_vals[threadIdx.x] = cur_sum / ((height - 1) * sigma_val * sigma_val);
reduceToSumLocal32(sum_vals, threadIdx.x);
__syncthreads();
cur_sum = sum_vals[0];
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
val = gamma_val * (cur_d[i] - (cur_x[i] - mu_val) * cur_sum) / sigma_val;
cur_sum2 += val;
cur_target[i] = scale_targets * cur_target[i] + val;
}
sum_vals[threadIdx.x] = cur_sum2 / height;
reduceToSumLocal32(sum_vals, threadIdx.x);
cur_sum = sum_vals[0];
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
cur_target[i] -= cur_sum;
}
__syncthreads();
}
}
__global__ void kBNGrad(float* d, float* x, float* mu, float* sigma,
float* dgamma, float* dbeta, unsigned int width, unsigned int height) {
extern __shared__ float sum_vals[];
const int column = gridDim.x * blockIdx.y + blockIdx.x;
if (column < width) {
float mu_val = mu[column];
float sigma_val = sigma[column];
__syncthreads();
float *cur_x = &x[column * height] ;
float *cur_d = &d[column * height] ;
float z, d, sum_gamma = 0, sum_beta = 0;
for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) {
z = (cur_x[i] - mu_val) / sigma_val;
d = cur_d[i];
sum_gamma += z * d;
sum_beta += d;
}
sum_vals[threadIdx.x] = sum_gamma; reduceToSumLocal32(sum_vals, threadIdx.x); __syncthreads();
if (threadIdx.x == 0) dgamma[column] = sum_vals[0];
__syncthreads();
sum_vals[threadIdx.x] = sum_beta; reduceToSumLocal32(sum_vals, threadIdx.x); __syncthreads();
if (threadIdx.x == 0) dbeta[column] = sum_vals[0];
__syncthreads();
}
}
|
193334096bd1b2ba8db9ac067c187aa8a175834c.hip | // !!! This is a file automatically generated by hipify!!!
#include <thrust/sequence.h>
#include <thrust/count.h>
#include <thrust/execution_policy.h>
#include <thrust/copy.h>
#include <thrust/find.h>
#include <iostream>
#include <utility>
using namespace std;
struct saxpy
{
int *N;
saxpy(int a) {
hipHostMalloc(&N, sizeof(int), 0);
*N = a;
}
__host__ __device__
bool operator() (const int a) {
return a%(*N) == 0;
}
};
__host__ __device__
int ordinal(const int N, const int x, const int y)
{
return (N*x)+y;
}
void print(int *begin, int *end, int start, int diff, const int N)
{
int *first = thrust::find(thrust::device, begin, end, 1);
int *second = thrust::find(thrust::device, first+1, end, 1);
int a = first - begin;
int b = second - begin;
int ordA = start + diff * a;
int ordB = start + diff * b;
cout << "NO" << endl;
cout << ordA/N << " " << ordA % N << endl;
cout << ordB/N << " " << ordB % N << endl;
}
int evaluate(int *A, int *B, int *C, const int N)
{
int *begin, *end;
// row
for (int i=0; i<N; ++i) {
begin = A+ordinal(N, i, 0);
end = A+1+ordinal(N,i,N-1);
if (thrust::count(thrust::device, begin, end, 1) > 1) {
print(begin, end, begin-A, 1, N);
return 0;
}
}
// column
thrust::sequence(thrust::device, B, B+N*N);
for (int i=0; i<N; ++i) {
begin = A+ordinal(N,0,i);
end = A+1+ordinal(N,N-1,i);
int *r_end = thrust::copy_if(thrust::device, begin, end, B, C, saxpy(N));
if (thrust::count(thrust::device, C, r_end, 1) > 1) {
print(C, r_end, begin-A, N, N);
return 0;
}
}
// diagonal 1
for (int i=1; i<N; ++i) {
begin = A+ordinal(N,0,i);
end = A+1+ordinal(N,i,0);
int *r_end = thrust::copy_if(thrust::device, begin, end, B, C, saxpy(N-1));
if (thrust::count(thrust::device, C, r_end, 1) > 1) {
print(C, r_end, begin-A, N-1, N);
return 0;
}
}
for (int i=1; i<N-1; ++i) {
begin = A+ordinal(N,i,N-1);
end = A+1+ordinal(N,N-1,i);
int *r_end = thrust::copy_if(thrust::device, begin, end, B, C, saxpy(N-1));
if (thrust::count(thrust::device, C, r_end, 1) > 1) {
print(C, r_end, begin-A, N-1, N);
return 0;
}
}
//diagonal 2
for (int i=0; i<N-1; ++i) {
begin = A+ordinal(N,0,i);
end = A+1+ordinal(N,N-i-1,N-1);
int *r_end = thrust::copy_if(thrust::device, begin, end, B, C, saxpy(N+1));
if (thrust::count(thrust::device, C, r_end, 1) > 1) {
print(C, r_end, begin-A, N+1, N);
return 0;
}
}
for (int i=1; i<N-1; ++i) {
begin = A+ordinal(N,i,0);
end = A+1+ordinal(N,N-1,N-i-1);
int *r_end = thrust::copy_if(thrust::device, begin, end, B, C, saxpy(N+1));
if (thrust::count(thrust::device, C, r_end, 1) > 1) {
print(C, r_end, begin-A, N+1, N);
return 0;
}
}
return 1;
}
int main(int argc, char const *argv[])
{
int N;
cin >> N;
int *cA = new int[N*N];
for (int i=0; i<N*N; ++i) {
cin >> cA[i];
}
int *A, *B, *C;
hipMalloc(&A, sizeof(int)*N*N);
hipMalloc(&B, sizeof(int)*N*N);
hipMalloc(&C, sizeof(int)*N);
hipMemcpy(A, cA, sizeof(int)*N*N, hipMemcpyHostToDevice);
if (evaluate(A, B, C, N)) {
cout << "YES" << endl;
}
hipFree(A);
hipFree(B);
hipFree(C);
delete[] cA;
return 0;
} | 193334096bd1b2ba8db9ac067c187aa8a175834c.cu | #include <thrust/sequence.h>
#include <thrust/count.h>
#include <thrust/execution_policy.h>
#include <thrust/copy.h>
#include <thrust/find.h>
#include <iostream>
#include <utility>
using namespace std;
struct saxpy
{
int *N;
saxpy(int a) {
cudaHostAlloc(&N, sizeof(int), 0);
*N = a;
}
__host__ __device__
bool operator() (const int a) {
return a%(*N) == 0;
}
};
__host__ __device__
int ordinal(const int N, const int x, const int y)
{
return (N*x)+y;
}
void print(int *begin, int *end, int start, int diff, const int N)
{
int *first = thrust::find(thrust::device, begin, end, 1);
int *second = thrust::find(thrust::device, first+1, end, 1);
int a = first - begin;
int b = second - begin;
int ordA = start + diff * a;
int ordB = start + diff * b;
cout << "NO" << endl;
cout << ordA/N << " " << ordA % N << endl;
cout << ordB/N << " " << ordB % N << endl;
}
int evaluate(int *A, int *B, int *C, const int N)
{
int *begin, *end;
// row
for (int i=0; i<N; ++i) {
begin = A+ordinal(N, i, 0);
end = A+1+ordinal(N,i,N-1);
if (thrust::count(thrust::device, begin, end, 1) > 1) {
print(begin, end, begin-A, 1, N);
return 0;
}
}
// column
thrust::sequence(thrust::device, B, B+N*N);
for (int i=0; i<N; ++i) {
begin = A+ordinal(N,0,i);
end = A+1+ordinal(N,N-1,i);
int *r_end = thrust::copy_if(thrust::device, begin, end, B, C, saxpy(N));
if (thrust::count(thrust::device, C, r_end, 1) > 1) {
print(C, r_end, begin-A, N, N);
return 0;
}
}
// diagonal 1
for (int i=1; i<N; ++i) {
begin = A+ordinal(N,0,i);
end = A+1+ordinal(N,i,0);
int *r_end = thrust::copy_if(thrust::device, begin, end, B, C, saxpy(N-1));
if (thrust::count(thrust::device, C, r_end, 1) > 1) {
print(C, r_end, begin-A, N-1, N);
return 0;
}
}
for (int i=1; i<N-1; ++i) {
begin = A+ordinal(N,i,N-1);
end = A+1+ordinal(N,N-1,i);
int *r_end = thrust::copy_if(thrust::device, begin, end, B, C, saxpy(N-1));
if (thrust::count(thrust::device, C, r_end, 1) > 1) {
print(C, r_end, begin-A, N-1, N);
return 0;
}
}
//diagonal 2
for (int i=0; i<N-1; ++i) {
begin = A+ordinal(N,0,i);
end = A+1+ordinal(N,N-i-1,N-1);
int *r_end = thrust::copy_if(thrust::device, begin, end, B, C, saxpy(N+1));
if (thrust::count(thrust::device, C, r_end, 1) > 1) {
print(C, r_end, begin-A, N+1, N);
return 0;
}
}
for (int i=1; i<N-1; ++i) {
begin = A+ordinal(N,i,0);
end = A+1+ordinal(N,N-1,N-i-1);
int *r_end = thrust::copy_if(thrust::device, begin, end, B, C, saxpy(N+1));
if (thrust::count(thrust::device, C, r_end, 1) > 1) {
print(C, r_end, begin-A, N+1, N);
return 0;
}
}
return 1;
}
int main(int argc, char const *argv[])
{
int N;
cin >> N;
int *cA = new int[N*N];
for (int i=0; i<N*N; ++i) {
cin >> cA[i];
}
int *A, *B, *C;
cudaMalloc(&A, sizeof(int)*N*N);
cudaMalloc(&B, sizeof(int)*N*N);
cudaMalloc(&C, sizeof(int)*N);
cudaMemcpy(A, cA, sizeof(int)*N*N, cudaMemcpyHostToDevice);
if (evaluate(A, B, C, N)) {
cout << "YES" << endl;
}
cudaFree(A);
cudaFree(B);
cudaFree(C);
delete[] cA;
return 0;
} |
b4b5749dd6027bb8856a7ff2ee642f042fb317e0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _VectorSubW12_KERNEL_H_
#define _VectorSubW12_KERNEL_H_
#include "CUDA_Kernels.h"
__global__ void vectorSub08_kernel(unsigned char *d_src, float *d_src_f, float *d_BG, float *d_sub,
int x0, int y0, int imageW, int imageH, int bg_w, int bg_h)
{
int idx_x = blockIdx.x * blockDim.x + threadIdx.x;
int idx_y = blockIdx.y * blockDim.y + threadIdx.y;
for (int y = idx_y; y < imageH; y += blockDim.y * gridDim.y) {
for (int x = idx_x; x < imageW; x += blockDim.x * gridDim.x) {
int _x = x0 + x;
int _y = y0 + y;
int gid_local = y*imageW + x;
if (gid_local > 20)
d_src_f[gid_local] = (float)d_src[gid_local];
else
d_src_f[gid_local] = 0;
if ((_x < bg_w) && (_y < bg_h)) {
int gid = _y*bg_w + _x;
d_sub[gid_local] = d_src_f[gid_local] - d_BG[gid];
if (d_sub[gid_local] < 0) d_sub[gid_local] = 0;
}
}
}
}
// Device code
extern "C" void vectorSub08_kernelGPU(
unsigned char *d_src,
float *d_src_f,
float *d_BG,
float *d_sub,
int x0, int y0,
int w, int h,
int bg_w, int bg_h
)
{
// Launch vectorSub12the Vector Add CUDA Kernel
dim3 blocks(32, 32);
dim3 threads(32, 32);
vectorSub08_kernel << <blocks, threads >> >(d_src, d_src_f, d_BG, d_sub, x0, y0, w, h, bg_w, bg_h);
}
__global__ void casting_u12_f32_Kernel(float *d_Dst, unsigned short *d_Src, int imageW, int imageH)
{
int idx_x = blockIdx.x * blockDim.x + threadIdx.x;
int idx_y = blockIdx.y * blockDim.y + threadIdx.y;
for (int y = idx_y; y < imageH; y += blockDim.y * gridDim.y) {
for (int x = idx_x; x < imageW; x += blockDim.x * gridDim.x) {
if (x % 2 == 0) {
int gid = y*imageW + x;
int gid2 = (int)(gid * 3 / 2);
unsigned char * pSource = (unsigned char*)d_Src + gid2;
unsigned char * b0 = pSource;
unsigned char * b1 = pSource + 1;
unsigned char * b2 = pSource + 2;
d_Dst[gid] = (float)(((unsigned short)(*b0) << 4) | (*b1 & 0x0F));
d_Dst[gid + 1] = (float)(((unsigned short)(*b2) << 4) | (*b1 & 0xF0));
}
}
}
}
__global__ void vectorSub_kernel(float *d_src_f, float *d_BG, float *d_sub,
int x0, int y0, int imageW, int imageH, int bg_w, int bg_h)
{
int idx_x = blockIdx.x * blockDim.x + threadIdx.x;
int idx_y = blockIdx.y * blockDim.y + threadIdx.y;
for (int y = idx_y; y < imageH; y += blockDim.y * gridDim.y) {
for (int x = idx_x; x < imageW; x += blockDim.x * gridDim.x) {
int _x = x0 + x;
int _y = y0 + y;
int gid_local = y*imageW + x;
if ((_x < bg_w) && (_y < bg_h)) {
int gid = _y*bg_w + _x;
d_sub[gid_local] = d_src_f[gid_local] - (float)d_BG[gid];
if (d_sub[gid_local] < 0) d_sub[gid_local] = 0;
}
}
}
}
__global__ void vectorSub16_kernel(unsigned short * d_src, float *d_src_f, float *d_BG, float *d_sub,
int x0, int y0, int imageW, int imageH, int bg_w, int bg_h, float max_intensity_set, float *d_save)
{
int idx_x = blockIdx.x * blockDim.x + threadIdx.x;
int idx_y = blockIdx.y * blockDim.y + threadIdx.y;
for (int y = idx_y; y < imageH; y += blockDim.y * gridDim.y) {
for (int x = idx_x; x < imageW; x += blockDim.x * gridDim.x) {
int _x = y;
int _y = imageW - 1 - x;
int gid_local = y*imageW + x;
int gid_local2 = _y*imageH + _x; // conversion
d_src_f[gid_local2] = (float)(d_src[gid_local] >> 4);
if ((_x < bg_w) && (_y < bg_h)) {
int gid = (_y + y0)*bg_w + (_x + x0);
d_sub[gid_local2] = d_src_f[gid_local2] - d_BG[gid];
if (d_sub[gid_local2] > max_intensity_set) d_sub[gid_local2] = max_intensity_set;
if (d_sub[gid_local2] < 0) d_sub[gid_local2] = 0;
d_save[gid_local2] = d_BG[gid];
}
else
d_save[gid_local2] = 0;
}
}
}
// Device code
extern "C" void vectorSub16_kernelGPU(
unsigned short *d_src,
float *d_src_f,
float *d_BG,
float *d_sub,
int x0, int y0,
int w, int h,
int bg_w, int bg_h,
float max_intensity_set,
float *d_save
)
{
// Launch vectorSub12the Vector Add CUDA Kernel
dim3 blocks(20, 20);
dim3 threads(32, 32);
vectorSub16_kernel << <blocks, threads >> >(d_src, d_src_f, d_BG, d_sub, x0, y0, w, h, bg_w, bg_h, max_intensity_set, d_save);
//casting_u12_f32_Kernel << <blocks, threads >> >(d_src_f, d_src, w, h);
//vectorSub_kernel << <blocks, threads >> >(d_src_f, d_BG, d_sub, x0, y0, w, h, bg_w, bg_h);
}
#endif | b4b5749dd6027bb8856a7ff2ee642f042fb317e0.cu | #ifndef _VectorSubW12_KERNEL_H_
#define _VectorSubW12_KERNEL_H_
#include "CUDA_Kernels.h"
__global__ void vectorSub08_kernel(unsigned char *d_src, float *d_src_f, float *d_BG, float *d_sub,
int x0, int y0, int imageW, int imageH, int bg_w, int bg_h)
{
int idx_x = blockIdx.x * blockDim.x + threadIdx.x;
int idx_y = blockIdx.y * blockDim.y + threadIdx.y;
for (int y = idx_y; y < imageH; y += blockDim.y * gridDim.y) {
for (int x = idx_x; x < imageW; x += blockDim.x * gridDim.x) {
int _x = x0 + x;
int _y = y0 + y;
int gid_local = y*imageW + x;
if (gid_local > 20)
d_src_f[gid_local] = (float)d_src[gid_local];
else
d_src_f[gid_local] = 0;
if ((_x < bg_w) && (_y < bg_h)) {
int gid = _y*bg_w + _x;
d_sub[gid_local] = d_src_f[gid_local] - d_BG[gid];
if (d_sub[gid_local] < 0) d_sub[gid_local] = 0;
}
}
}
}
// Device code
extern "C" void vectorSub08_kernelGPU(
unsigned char *d_src,
float *d_src_f,
float *d_BG,
float *d_sub,
int x0, int y0,
int w, int h,
int bg_w, int bg_h
)
{
// Launch vectorSub12the Vector Add CUDA Kernel
dim3 blocks(32, 32);
dim3 threads(32, 32);
vectorSub08_kernel << <blocks, threads >> >(d_src, d_src_f, d_BG, d_sub, x0, y0, w, h, bg_w, bg_h);
}
__global__ void casting_u12_f32_Kernel(float *d_Dst, unsigned short *d_Src, int imageW, int imageH)
{
int idx_x = blockIdx.x * blockDim.x + threadIdx.x;
int idx_y = blockIdx.y * blockDim.y + threadIdx.y;
for (int y = idx_y; y < imageH; y += blockDim.y * gridDim.y) {
for (int x = idx_x; x < imageW; x += blockDim.x * gridDim.x) {
if (x % 2 == 0) {
int gid = y*imageW + x;
int gid2 = (int)(gid * 3 / 2);
unsigned char * pSource = (unsigned char*)d_Src + gid2;
unsigned char * b0 = pSource;
unsigned char * b1 = pSource + 1;
unsigned char * b2 = pSource + 2;
d_Dst[gid] = (float)(((unsigned short)(*b0) << 4) | (*b1 & 0x0F));
d_Dst[gid + 1] = (float)(((unsigned short)(*b2) << 4) | (*b1 & 0xF0));
}
}
}
}
__global__ void vectorSub_kernel(float *d_src_f, float *d_BG, float *d_sub,
int x0, int y0, int imageW, int imageH, int bg_w, int bg_h)
{
int idx_x = blockIdx.x * blockDim.x + threadIdx.x;
int idx_y = blockIdx.y * blockDim.y + threadIdx.y;
for (int y = idx_y; y < imageH; y += blockDim.y * gridDim.y) {
for (int x = idx_x; x < imageW; x += blockDim.x * gridDim.x) {
int _x = x0 + x;
int _y = y0 + y;
int gid_local = y*imageW + x;
if ((_x < bg_w) && (_y < bg_h)) {
int gid = _y*bg_w + _x;
d_sub[gid_local] = d_src_f[gid_local] - (float)d_BG[gid];
if (d_sub[gid_local] < 0) d_sub[gid_local] = 0;
}
}
}
}
__global__ void vectorSub16_kernel(unsigned short * d_src, float *d_src_f, float *d_BG, float *d_sub,
int x0, int y0, int imageW, int imageH, int bg_w, int bg_h, float max_intensity_set, float *d_save)
{
int idx_x = blockIdx.x * blockDim.x + threadIdx.x;
int idx_y = blockIdx.y * blockDim.y + threadIdx.y;
for (int y = idx_y; y < imageH; y += blockDim.y * gridDim.y) {
for (int x = idx_x; x < imageW; x += blockDim.x * gridDim.x) {
int _x = y;
int _y = imageW - 1 - x;
int gid_local = y*imageW + x;
int gid_local2 = _y*imageH + _x; // conversion
d_src_f[gid_local2] = (float)(d_src[gid_local] >> 4);
if ((_x < bg_w) && (_y < bg_h)) {
int gid = (_y + y0)*bg_w + (_x + x0);
d_sub[gid_local2] = d_src_f[gid_local2] - d_BG[gid];
if (d_sub[gid_local2] > max_intensity_set) d_sub[gid_local2] = max_intensity_set;
if (d_sub[gid_local2] < 0) d_sub[gid_local2] = 0;
d_save[gid_local2] = d_BG[gid];
}
else
d_save[gid_local2] = 0;
}
}
}
// Device code
extern "C" void vectorSub16_kernelGPU(
unsigned short *d_src,
float *d_src_f,
float *d_BG,
float *d_sub,
int x0, int y0,
int w, int h,
int bg_w, int bg_h,
float max_intensity_set,
float *d_save
)
{
// Launch vectorSub12the Vector Add CUDA Kernel
dim3 blocks(20, 20);
dim3 threads(32, 32);
vectorSub16_kernel << <blocks, threads >> >(d_src, d_src_f, d_BG, d_sub, x0, y0, w, h, bg_w, bg_h, max_intensity_set, d_save);
//casting_u12_f32_Kernel << <blocks, threads >> >(d_src_f, d_src, w, h);
//vectorSub_kernel << <blocks, threads >> >(d_src_f, d_BG, d_sub, x0, y0, w, h, bg_w, bg_h);
}
#endif |
86c5b7bcd106255681e0b0d1a9ca6e40febb2ae5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _VECTOR_DOT_PRODUCT_KERNEL_H_
#define _VECTOR_DOT_PRODUCT_KERNEL_H_
#define BLOCK_SIZE 1024
#define GRID_SIZE 1024
/* Edit this function to complete the functionality of dot product on the GPU using atomics.
You may add other kernel functions as you deem necessary.
*/
__device__ void lock(int *mutex);
__device__ void unlock(int *mutex);
__global__ void vector_dot_product_kernel(int num_elements, float* a, float* b, float* result, int *mutex)
{
__shared__ float sum[BLOCK_SIZE];
float thread_sum = 0.0;
int tx = threadIdx.x;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int i = tid;
int slice = blockDim.x * gridDim.x;
while(i < num_elements){
thread_sum += a[i] * b[i];
i += slice;
}
sum[threadIdx.x] = thread_sum;
__syncthreads();
for(int slice = blockDim.x/2; slice > 0; slice /= 2){
if(tx < slice)
sum[tx] += sum[tx+slice];
__syncthreads();
}
if(threadIdx.x == 0) {
lock(mutex);
result[0] += sum[0];
unlock(mutex);
}
}
__device__ void lock(int *mutex){
while(atomicCAS(mutex, 0, 1) != 0);
}
__device__ void unlock(int *mutex){
atomicExch(mutex, 0);
}
#endif // #ifndef _VECTOR_DOT_PRODUCT_KERNEL_H
| 86c5b7bcd106255681e0b0d1a9ca6e40febb2ae5.cu | #ifndef _VECTOR_DOT_PRODUCT_KERNEL_H_
#define _VECTOR_DOT_PRODUCT_KERNEL_H_
#define BLOCK_SIZE 1024
#define GRID_SIZE 1024
/* Edit this function to complete the functionality of dot product on the GPU using atomics.
You may add other kernel functions as you deem necessary.
*/
__device__ void lock(int *mutex);
__device__ void unlock(int *mutex);
__global__ void vector_dot_product_kernel(int num_elements, float* a, float* b, float* result, int *mutex)
{
__shared__ float sum[BLOCK_SIZE];
float thread_sum = 0.0;
int tx = threadIdx.x;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int i = tid;
int slice = blockDim.x * gridDim.x;
while(i < num_elements){
thread_sum += a[i] * b[i];
i += slice;
}
sum[threadIdx.x] = thread_sum;
__syncthreads();
for(int slice = blockDim.x/2; slice > 0; slice /= 2){
if(tx < slice)
sum[tx] += sum[tx+slice];
__syncthreads();
}
if(threadIdx.x == 0) {
lock(mutex);
result[0] += sum[0];
unlock(mutex);
}
}
__device__ void lock(int *mutex){
while(atomicCAS(mutex, 0, 1) != 0);
}
__device__ void unlock(int *mutex){
atomicExch(mutex, 0);
}
#endif // #ifndef _VECTOR_DOT_PRODUCT_KERNEL_H
|
811ea75ef494fc05467d0564a327f1105a4bf0bc.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutSrc, int32_t, LayoutSrc, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, true>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const int8_t* d_src,
const int8_t* d_filter,
const int32_t* d_bias,
const int8_t* d_z,
int8_t* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| 811ea75ef494fc05467d0564a327f1105a4bf0bc.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutSrc, int32_t, LayoutSrc, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, true>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const int8_t* d_src,
const int8_t* d_filter,
const int32_t* d_bias,
const int8_t* d_z,
int8_t* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
5738d7536ff7f36b3d87d3e8e49cddad9f37ffc3.hip | // !!! This is a file automatically generated by hipify!!!
#include <THH/THHBlas.h>
#include <THH/THHGeneral.h>
#include <TH/THHalf.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPBlas.h>
#include <algorithm>
#include <mutex>
#ifdef __HIP_PLATFORM_HCC__
#include <hip/hip_version.h>
#endif
float THCudaBlas_Sdot(THCState *state, int64_t n, float *x, int64_t incx, float *y, int64_t incy)
{
if (n == 1) {
incx = 1;
incy = 1;
}
if ((n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX)) {
int i_n = (int)n;
int i_incx = (int)incx;
int i_incy = (int)incy;
float result;
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(hipblasSdot(handle, i_n, x, i_incx, y, i_incy, &result));
return result;
}
THError("Cublas_Sdot only supports n, incx and incy "
"up to signed integer limits: %d", INT_MAX);
return 0;
}
double THCudaBlas_Ddot(THCState *state, int64_t n, double *x, int64_t incx, double *y, int64_t incy)
{
if (n == 1) {
incx = 1;
incy = 1;
}
if ((n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX)) {
int i_n = (int)n;
int i_incx = (int)incx;
int i_incy = (int)incy;
double result;
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(hipblasDdot(handle, i_n, x, i_incx, y, i_incy, &result));
return result;
}
THError("Cublas_Ddot only supports n, incx and incy "
"up to signed integer limits: %d", INT_MAX);
return 0;
}
at::Half THCudaBlas_Hdot(THCState *state, int64_t n, at::Half *x, int64_t incx, at::Half *y, int64_t incy)
{
#if TORCH_HIP_VERSION >= 8000
if (n == 1) {
incx = 1;
incy = 1;
}
if ((n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX)) {
at::Half result;
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(hipblasDotEx_v2(handle, n,
x, HIP_R_16F, incx,
y, HIP_R_16F, incy,
&result, HIP_R_16F,
HIP_R_32F));
return result;
}
THError("Cublas_Hdot only supports n, incx and incy "
"up to signed integer limits: %d", INT_MAX);
return 0.0;
#elif HIP_VERSION >= 210
if (n == 1) {
incx = 1;
incy = 1;
}
at::Half result;
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
hipblasSetStream(handle, at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream());
THCublasCheck(rocblas_hdot(handle, n,
reinterpret_cast<rocblas_half*>(x), incx,
reinterpret_cast<rocblas_half*>(y), incy,
reinterpret_cast<rocblas_half*>(&result)));
return result;
#else
THError("Cublas_Hdot requires CUDA 8.0+");
return 0.0;
#endif
}
/* Level 2 */
void adjustLdLevel2(int64_t m, int64_t n, int64_t *lda)
{
// Note: leading dimensions generally are checked that they are > 0 and at least as big the result
// requires (even if the value won't be used).
// TODO: why does Level3 check trans but this doesn't?
if (n <= 1)
*lda = std::max<int64_t>(m, 1);
}
void THCudaBlas_Sgemv(THCState *state, char trans, int64_t m, int64_t n, float alpha, float *a, int64_t lda, float *x, int64_t incx, float beta, float *y, int64_t incy)
{
at::cuda::blas::gemv<float>(trans, m, n, alpha, a, lda, x, incx, beta, y, incy);
}
void THCudaBlas_Dgemv(THCState *state, char trans, int64_t m, int64_t n, double alpha, double *a, int64_t lda, double *x, int64_t incx, double beta, double *y, int64_t incy)
{
at::cuda::blas::gemv<double>(trans, m, n, alpha, a, lda, x, incx, beta, y, incy);
}
void THCudaBlas_Sger(THCState *state, int64_t m, int64_t n, float alpha, float *x, int64_t incx, float *y, int64_t incy, float *a, int64_t lda)
{
adjustLdLevel2(m, n, &lda);
if( (m <= INT_MAX) && (n <= INT_MAX) && (lda <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) )
{
int i_m = (int)m;
int i_n = (int)n;
int i_lda = (int)lda;
int i_incx = (int)incx;
int i_incy = (int)incy;
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(hipblasSger(handle, i_m, i_n, &alpha, x, i_incx, y, i_incy, a, i_lda));
return;
}
THError("Cublas_Sger only supports m, n, lda, incx, incy"
"with the bound [val] <= %d", INT_MAX);
}
void THCudaBlas_Dger(THCState *state, int64_t m, int64_t n, double alpha, double *x, int64_t incx, double *y, int64_t incy, double *a, int64_t lda)
{
adjustLdLevel2(m, n, &lda);
if( (m <= INT_MAX) && (n <= INT_MAX) && (lda <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) )
{
int i_m = (int)m;
int i_n = (int)n;
int i_lda = (int)lda;
int i_incx = (int)incx;
int i_incy = (int)incy;
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(hipblasDger(handle, i_m, i_n, &alpha, x, i_incx, y, i_incy, a, i_lda));
return;
}
THError("Cublas_Dger only supports m, n, lda, incx, incy"
"with the bound [val] <= %d", INT_MAX);
}
hipblasOperation_t convertTransToCublasOperation(char trans) {
if (trans == 't') return HIPBLAS_OP_T;
else if (trans == 'n') return HIPBLAS_OP_N;
else if (trans == 'c') return HIPBLAS_OP_C;
else {
THError("trans must be one of: t, n, c");
return HIPBLAS_OP_T;
}
}
void adjustLdLevel3(char transa, char transb, int64_t m, int64_t n, int64_t k, int64_t *lda, int64_t *ldb, int64_t *ldc)
{
int transa_ = ((transa == 't') || (transa == 'T'));
int transb_ = ((transb == 't') || (transb == 'T'));
// Note: leading dimensions generally are checked that they are > 0 and at least as big the result
// requires (even if the value won't be used).
if(n <= 1)
*ldc = std::max<int64_t>(m, 1);
if(transa_)
{
if(m <= 1)
*lda = std::max<int64_t>(k, 1);
}
else
{
if(k <= 1)
*lda = std::max<int64_t>(m, 1);
}
if(transb_)
{
if(k <= 1)
*ldb = std::max<int64_t>(n, 1);
}
else
{
if(n <= 1)
*ldb = std::max<int64_t>(k, 1);
}
}
// Check https://github.com/pytorch/pytorch/issues/22078
// for information about the bug. We don't know the exact conditions that trigger it,
// but using Sgemm or Hgemm on Maxwell or Pascal seems to be a
// necessary condition.
static void checkCuda90Bug(int i_m, int i_n, int i_k)
{
#if TORCH_HIP_VERSION < 9200 && TORCH_HIP_VERSION >= 9000
static std::once_flag alreadyWarned;
const int LIMIT = 1 << 21;
if (i_m > LIMIT || i_n > LIMIT || i_k > LIMIT) {
hipDeviceProp_t* prop = at::cuda::getCurrentDeviceProperties();
if (prop->major == 5 || prop->major == 6) {
std::call_once(alreadyWarned, []() {
TORCH_WARN("Matrix multiplication for dimensions larger than 2^21 has known bugs on your combination of CUDA version and device type. Please consider upgrading to CUDA 9.2 or later.");
});
}
}
#endif
}
/* Level 3 */
void THCudaBlas_Sgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, float alpha, float *a, int64_t lda, float *b, int64_t ldb, float beta, float *c, int64_t ldc)
{
checkCuda90Bug((int)m, (int)n, (int)k);
at::cuda::blas::gemm<float>(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
}
// In CUDA 8.0, definition of data types for sgemmex changed
#if TORCH_HIP_VERSION < 8000
# define HIP_R_16F HIPBLAS_DATA_HALF
#endif
void THCudaBlas_Hgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, at::Half alpha, at::Half *a, int64_t lda, at::Half *b, int64_t ldb, at::Half beta, at::Half *c, int64_t ldc)
{
checkCuda90Bug((int)m, (int)n, (int)k);
at::cuda::blas::gemm<at::Half>(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
}
#ifdef __HIP_PLATFORM_HCC__
void THCudaBlas_Bgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, at::BFloat16 alpha, at::BFloat16 *a, int64_t lda, at::BFloat16 *b, int64_t ldb, at::BFloat16 beta, at::BFloat16 *c, int64_t ldc)
{
at::cuda::blas::gemm<at::BFloat16>(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
}
#endif
void THCudaBlas_Dgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, double alpha, double *a, int64_t lda, double *b, int64_t ldb, double beta, double *c, int64_t ldc)
{
at::cuda::blas::gemm<double>(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
}
#if TORCH_HIP_VERSION >= 9010 || defined __HIP_PLATFORM_HCC__
void THCudaBlas_HgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
at::Half alpha, const at::Half *a, int64_t lda, int64_t strideA, const at::Half *b, int64_t ldb, int64_t strideB,
at::Half beta, at::Half *c, int64_t ldc, int64_t strideC, int64_t batchCount)
{
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_SgemmStridedBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
hipblasOperation_t opa = convertTransToCublasOperation(transa);
hipblasOperation_t opb = convertTransToCublasOperation(transb);
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
float fAlpha = alpha;
float fBeta = beta;
#ifdef __HIP_PLATFORM_HCC__
THCublasCheck(rocblas_gemm_strided_batched_ex(handle, opa, opb, (int)m, (int)n, (int)k,
(void*)&fAlpha, a, rocblas_datatype_f16_r, (int)lda, strideA,
b, rocblas_datatype_f16_r, (int)ldb, strideB,
(void*)&fBeta, c, rocblas_datatype_f16_r, (int)ldc, strideC,
c, rocblas_datatype_f16_r, (int)ldc, strideC,
(int) batchCount, rocblas_datatype_f32_r, rocblas_gemm_algo_standard,
0, 0));
#else
THCublasCheck(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH));
THCublasCheck(hipblasGemmStridedBatchedEx(handle,
opa, opb, (int)m, (int)n, (int)k,
(void*)&fAlpha, a, HIP_R_16F, (int)lda, strideA,
b, HIP_R_16F, (int)ldb, strideB,
(void*)&fBeta, c, HIP_R_16F, (int)ldc, strideC,
(int)batchCount, HIP_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP));
THCublasCheck(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH));
#endif // __HIP_PLATFORM_HCC__
}
#endif // TORCH_HIP_VERSION or __HIP_PLATFORM_HCC__
#ifdef __HIP_PLATFORM_HCC__
void THCudaBlas_BgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
at::BFloat16 alpha, const at::BFloat16 *a, int64_t lda, int64_t strideA, const at::BFloat16 *b, int64_t ldb, int64_t strideB,
at::BFloat16 beta, at::BFloat16 *c, int64_t ldc, int64_t strideC, int64_t batchCount)
{
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_SgemmStridedBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
hipblasOperation_t opa = convertTransToCublasOperation(transa);
hipblasOperation_t opb = convertTransToCublasOperation(transb);
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
float fAlpha = alpha;
float fBeta = beta;
THCublasCheck(rocblas_gemm_strided_batched_ex(handle, opa, opb, (int)m, (int)n, (int)k,
(void*)&fAlpha, a, rocblas_datatype_bf16_r, (int)lda, strideA,
b, rocblas_datatype_bf16_r, (int)ldb, strideB,
(void*)&fBeta, c, rocblas_datatype_bf16_r, (int)ldc, strideC,
c, rocblas_datatype_bf16_r, (int)ldc, strideC,
(int) batchCount, rocblas_datatype_f32_r, rocblas_gemm_algo_standard,
0, 0, NULL, NULL));
}
#endif // __HIP_PLATFORM_HCC__
void THCudaBlas_SgemmBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
float alpha, const float *a[], int64_t lda, const float *b[], int64_t ldb,
float beta, float *c[], int64_t ldc, int64_t batchCount)
{
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_SgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
#ifdef __HIP_PLATFORM_HCC__
const int64_t stridea = (transa == 'N' || transa == 'n') ? lda*k : lda*n;
const int64_t strideb = (transb == 'N' || transb == 'n') ? ldb*n : ldb*k;
const int64_t stridec = ldc*n;
THCudaBlas_SgemmStridedBatched(state, transa, transb, m, n, k, alpha, *a, lda, stridea, *b, ldb, strideb, beta, *c, ldc, stridec, batchCount);
#else
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
hipblasOperation_t opa = convertTransToCublasOperation(transa);
hipblasOperation_t opb = convertTransToCublasOperation(transb);
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(hipblasSgemmBatched(handle,
opa, opb, (int)m, (int)n, (int)k,
&alpha, a, (int)lda, b, (int)ldb, &beta, c, (int)ldc,
(int)batchCount));
#endif
}
#if TORCH_HIP_VERSION >= 8000 || defined __HIP_PLATFORM_HCC__
void THCudaBlas_SgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
float alpha, const float *a, int64_t lda, int64_t strideA, const float *b, int64_t ldb, int64_t strideB,
float beta, float *c, int64_t ldc, int64_t strideC, int64_t batchCount)
{
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_SgemmStridedBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
hipblasOperation_t opa = convertTransToCublasOperation(transa);
hipblasOperation_t opb = convertTransToCublasOperation(transb);
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(hipblasSgemmStridedBatched(handle,
opa, opb, (int)m, (int)n, (int)k,
&alpha, a, (int)lda, strideA, b, (int)ldb, strideB, &beta, c, (int)ldc, strideC,
(int)batchCount));
}
#endif
void THCudaBlas_DgemmBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
double alpha, const double *a[], int64_t lda, const double *b[], int64_t ldb,
double beta, double *c[], int64_t ldc, int64_t batchCount)
{
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_DgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
#ifdef __HIP_PLATFORM_HCC__
const int64_t stridea = (transa == 'N' || transa == 'n') ? lda*k : lda*n;
const int64_t strideb = (transb == 'N' || transb == 'n') ? ldb*n : ldb*k;
const int64_t stridec = ldc*n;
THCudaBlas_DgemmStridedBatched(state, transa, transb, m, n, k, alpha, *a, lda, stridea, *b, ldb, strideb, beta, *c, ldc, stridec, batchCount);
#else
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
hipblasOperation_t opa = convertTransToCublasOperation(transa);
hipblasOperation_t opb = convertTransToCublasOperation(transb);
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(hipblasDgemmBatched(handle,
opa, opb, (int)m, (int)n, (int)k,
&alpha, a, (int)lda, b, (int)ldb, &beta, c, (int)ldc,
(int)batchCount));
#endif
}
#if TORCH_HIP_VERSION >= 8000 || defined __HIP_PLATFORM_HCC__
void THCudaBlas_DgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
double alpha, const double *a, int64_t lda, int64_t strideA, const double *b, int64_t ldb, int64_t strideB,
double beta, double *c, int64_t ldc, int64_t strideC, int64_t batchCount)
{
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_DgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
hipblasOperation_t opa = convertTransToCublasOperation(transa);
hipblasOperation_t opb = convertTransToCublasOperation(transb);
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(hipblasDgemmStridedBatched(handle,
opa, opb, (int)m, (int)n, (int)k,
&alpha, a, (int)lda, strideA, b, (int)ldb, strideB, &beta, c, (int)ldc, strideC,
(int)batchCount));
}
#endif
| 5738d7536ff7f36b3d87d3e8e49cddad9f37ffc3.cu | #include <THC/THCBlas.h>
#include <THC/THCGeneral.h>
#include <TH/THHalf.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDABlas.h>
#include <algorithm>
#include <mutex>
#ifdef __HIP_PLATFORM_HCC__
#include <hip/hip_version.h>
#endif
float THCudaBlas_Sdot(THCState *state, int64_t n, float *x, int64_t incx, float *y, int64_t incy)
{
if (n == 1) {
incx = 1;
incy = 1;
}
if ((n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX)) {
int i_n = (int)n;
int i_incx = (int)incx;
int i_incy = (int)incy;
float result;
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(cublasSdot(handle, i_n, x, i_incx, y, i_incy, &result));
return result;
}
THError("Cublas_Sdot only supports n, incx and incy "
"up to signed integer limits: %d", INT_MAX);
return 0;
}
double THCudaBlas_Ddot(THCState *state, int64_t n, double *x, int64_t incx, double *y, int64_t incy)
{
if (n == 1) {
incx = 1;
incy = 1;
}
if ((n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX)) {
int i_n = (int)n;
int i_incx = (int)incx;
int i_incy = (int)incy;
double result;
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(cublasDdot(handle, i_n, x, i_incx, y, i_incy, &result));
return result;
}
THError("Cublas_Ddot only supports n, incx and incy "
"up to signed integer limits: %d", INT_MAX);
return 0;
}
at::Half THCudaBlas_Hdot(THCState *state, int64_t n, at::Half *x, int64_t incx, at::Half *y, int64_t incy)
{
#if CUDA_VERSION >= 8000
if (n == 1) {
incx = 1;
incy = 1;
}
if ((n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX)) {
at::Half result;
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(cublasDotEx(handle, n,
x, CUDA_R_16F, incx,
y, CUDA_R_16F, incy,
&result, CUDA_R_16F,
CUDA_R_32F));
return result;
}
THError("Cublas_Hdot only supports n, incx and incy "
"up to signed integer limits: %d", INT_MAX);
return 0.0;
#elif HIP_VERSION >= 210
if (n == 1) {
incx = 1;
incy = 1;
}
at::Half result;
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
cublasSetStream(handle, at::cuda::getCurrentCUDAStream().stream());
THCublasCheck(rocblas_hdot(handle, n,
reinterpret_cast<rocblas_half*>(x), incx,
reinterpret_cast<rocblas_half*>(y), incy,
reinterpret_cast<rocblas_half*>(&result)));
return result;
#else
THError("Cublas_Hdot requires CUDA 8.0+");
return 0.0;
#endif
}
/* Level 2 */
void adjustLdLevel2(int64_t m, int64_t n, int64_t *lda)
{
// Note: leading dimensions generally are checked that they are > 0 and at least as big the result
// requires (even if the value won't be used).
// TODO: why does Level3 check trans but this doesn't?
if (n <= 1)
*lda = std::max<int64_t>(m, 1);
}
void THCudaBlas_Sgemv(THCState *state, char trans, int64_t m, int64_t n, float alpha, float *a, int64_t lda, float *x, int64_t incx, float beta, float *y, int64_t incy)
{
at::cuda::blas::gemv<float>(trans, m, n, alpha, a, lda, x, incx, beta, y, incy);
}
void THCudaBlas_Dgemv(THCState *state, char trans, int64_t m, int64_t n, double alpha, double *a, int64_t lda, double *x, int64_t incx, double beta, double *y, int64_t incy)
{
at::cuda::blas::gemv<double>(trans, m, n, alpha, a, lda, x, incx, beta, y, incy);
}
void THCudaBlas_Sger(THCState *state, int64_t m, int64_t n, float alpha, float *x, int64_t incx, float *y, int64_t incy, float *a, int64_t lda)
{
adjustLdLevel2(m, n, &lda);
if( (m <= INT_MAX) && (n <= INT_MAX) && (lda <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) )
{
int i_m = (int)m;
int i_n = (int)n;
int i_lda = (int)lda;
int i_incx = (int)incx;
int i_incy = (int)incy;
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(cublasSger(handle, i_m, i_n, &alpha, x, i_incx, y, i_incy, a, i_lda));
return;
}
THError("Cublas_Sger only supports m, n, lda, incx, incy"
"with the bound [val] <= %d", INT_MAX);
}
void THCudaBlas_Dger(THCState *state, int64_t m, int64_t n, double alpha, double *x, int64_t incx, double *y, int64_t incy, double *a, int64_t lda)
{
adjustLdLevel2(m, n, &lda);
if( (m <= INT_MAX) && (n <= INT_MAX) && (lda <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) )
{
int i_m = (int)m;
int i_n = (int)n;
int i_lda = (int)lda;
int i_incx = (int)incx;
int i_incy = (int)incy;
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(cublasDger(handle, i_m, i_n, &alpha, x, i_incx, y, i_incy, a, i_lda));
return;
}
THError("Cublas_Dger only supports m, n, lda, incx, incy"
"with the bound [val] <= %d", INT_MAX);
}
cublasOperation_t convertTransToCublasOperation(char trans) {
if (trans == 't') return CUBLAS_OP_T;
else if (trans == 'n') return CUBLAS_OP_N;
else if (trans == 'c') return CUBLAS_OP_C;
else {
THError("trans must be one of: t, n, c");
return CUBLAS_OP_T;
}
}
void adjustLdLevel3(char transa, char transb, int64_t m, int64_t n, int64_t k, int64_t *lda, int64_t *ldb, int64_t *ldc)
{
int transa_ = ((transa == 't') || (transa == 'T'));
int transb_ = ((transb == 't') || (transb == 'T'));
// Note: leading dimensions generally are checked that they are > 0 and at least as big the result
// requires (even if the value won't be used).
if(n <= 1)
*ldc = std::max<int64_t>(m, 1);
if(transa_)
{
if(m <= 1)
*lda = std::max<int64_t>(k, 1);
}
else
{
if(k <= 1)
*lda = std::max<int64_t>(m, 1);
}
if(transb_)
{
if(k <= 1)
*ldb = std::max<int64_t>(n, 1);
}
else
{
if(n <= 1)
*ldb = std::max<int64_t>(k, 1);
}
}
// Check https://github.com/pytorch/pytorch/issues/22078
// for information about the bug. We don't know the exact conditions that trigger it,
// but using Sgemm or Hgemm on Maxwell or Pascal seems to be a
// necessary condition.
static void checkCuda90Bug(int i_m, int i_n, int i_k)
{
#if CUDA_VERSION < 9200 && CUDA_VERSION >= 9000
static std::once_flag alreadyWarned;
const int LIMIT = 1 << 21;
if (i_m > LIMIT || i_n > LIMIT || i_k > LIMIT) {
cudaDeviceProp* prop = at::cuda::getCurrentDeviceProperties();
if (prop->major == 5 || prop->major == 6) {
std::call_once(alreadyWarned, []() {
TORCH_WARN("Matrix multiplication for dimensions larger than 2^21 has known bugs on your combination of CUDA version and device type. Please consider upgrading to CUDA 9.2 or later.");
});
}
}
#endif
}
/* Level 3 */
void THCudaBlas_Sgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, float alpha, float *a, int64_t lda, float *b, int64_t ldb, float beta, float *c, int64_t ldc)
{
checkCuda90Bug((int)m, (int)n, (int)k);
at::cuda::blas::gemm<float>(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
}
// In CUDA 8.0, definition of data types for sgemmex changed
#if CUDA_VERSION < 8000
# define CUDA_R_16F CUBLAS_DATA_HALF
#endif
void THCudaBlas_Hgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, at::Half alpha, at::Half *a, int64_t lda, at::Half *b, int64_t ldb, at::Half beta, at::Half *c, int64_t ldc)
{
checkCuda90Bug((int)m, (int)n, (int)k);
at::cuda::blas::gemm<at::Half>(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
}
#ifdef __HIP_PLATFORM_HCC__
void THCudaBlas_Bgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, at::BFloat16 alpha, at::BFloat16 *a, int64_t lda, at::BFloat16 *b, int64_t ldb, at::BFloat16 beta, at::BFloat16 *c, int64_t ldc)
{
at::cuda::blas::gemm<at::BFloat16>(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
}
#endif
void THCudaBlas_Dgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, double alpha, double *a, int64_t lda, double *b, int64_t ldb, double beta, double *c, int64_t ldc)
{
at::cuda::blas::gemm<double>(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
}
#if CUDA_VERSION >= 9010 || defined __HIP_PLATFORM_HCC__
void THCudaBlas_HgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
at::Half alpha, const at::Half *a, int64_t lda, int64_t strideA, const at::Half *b, int64_t ldb, int64_t strideB,
at::Half beta, at::Half *c, int64_t ldc, int64_t strideC, int64_t batchCount)
{
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_SgemmStridedBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
cublasOperation_t opa = convertTransToCublasOperation(transa);
cublasOperation_t opb = convertTransToCublasOperation(transb);
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
float fAlpha = alpha;
float fBeta = beta;
#ifdef __HIP_PLATFORM_HCC__
THCublasCheck(rocblas_gemm_strided_batched_ex(handle, opa, opb, (int)m, (int)n, (int)k,
(void*)&fAlpha, a, rocblas_datatype_f16_r, (int)lda, strideA,
b, rocblas_datatype_f16_r, (int)ldb, strideB,
(void*)&fBeta, c, rocblas_datatype_f16_r, (int)ldc, strideC,
c, rocblas_datatype_f16_r, (int)ldc, strideC,
(int) batchCount, rocblas_datatype_f32_r, rocblas_gemm_algo_standard,
0, 0));
#else
THCublasCheck(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH));
THCublasCheck(cublasGemmStridedBatchedEx(handle,
opa, opb, (int)m, (int)n, (int)k,
(void*)&fAlpha, a, CUDA_R_16F, (int)lda, strideA,
b, CUDA_R_16F, (int)ldb, strideB,
(void*)&fBeta, c, CUDA_R_16F, (int)ldc, strideC,
(int)batchCount, CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP));
THCublasCheck(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH));
#endif // __HIP_PLATFORM_HCC__
}
#endif // CUDA_VERSION or __HIP_PLATFORM_HCC__
#ifdef __HIP_PLATFORM_HCC__
void THCudaBlas_BgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
at::BFloat16 alpha, const at::BFloat16 *a, int64_t lda, int64_t strideA, const at::BFloat16 *b, int64_t ldb, int64_t strideB,
at::BFloat16 beta, at::BFloat16 *c, int64_t ldc, int64_t strideC, int64_t batchCount)
{
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_SgemmStridedBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
cublasOperation_t opa = convertTransToCublasOperation(transa);
cublasOperation_t opb = convertTransToCublasOperation(transb);
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
float fAlpha = alpha;
float fBeta = beta;
THCublasCheck(rocblas_gemm_strided_batched_ex(handle, opa, opb, (int)m, (int)n, (int)k,
(void*)&fAlpha, a, rocblas_datatype_bf16_r, (int)lda, strideA,
b, rocblas_datatype_bf16_r, (int)ldb, strideB,
(void*)&fBeta, c, rocblas_datatype_bf16_r, (int)ldc, strideC,
c, rocblas_datatype_bf16_r, (int)ldc, strideC,
(int) batchCount, rocblas_datatype_f32_r, rocblas_gemm_algo_standard,
0, 0, NULL, NULL));
}
#endif // __HIP_PLATFORM_HCC__
void THCudaBlas_SgemmBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
float alpha, const float *a[], int64_t lda, const float *b[], int64_t ldb,
float beta, float *c[], int64_t ldc, int64_t batchCount)
{
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_SgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
#ifdef __HIP_PLATFORM_HCC__
const int64_t stridea = (transa == 'N' || transa == 'n') ? lda*k : lda*n;
const int64_t strideb = (transb == 'N' || transb == 'n') ? ldb*n : ldb*k;
const int64_t stridec = ldc*n;
THCudaBlas_SgemmStridedBatched(state, transa, transb, m, n, k, alpha, *a, lda, stridea, *b, ldb, strideb, beta, *c, ldc, stridec, batchCount);
#else
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
cublasOperation_t opa = convertTransToCublasOperation(transa);
cublasOperation_t opb = convertTransToCublasOperation(transb);
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(cublasSgemmBatched(handle,
opa, opb, (int)m, (int)n, (int)k,
&alpha, a, (int)lda, b, (int)ldb, &beta, c, (int)ldc,
(int)batchCount));
#endif
}
#if CUDA_VERSION >= 8000 || defined __HIP_PLATFORM_HCC__
void THCudaBlas_SgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
float alpha, const float *a, int64_t lda, int64_t strideA, const float *b, int64_t ldb, int64_t strideB,
float beta, float *c, int64_t ldc, int64_t strideC, int64_t batchCount)
{
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_SgemmStridedBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
cublasOperation_t opa = convertTransToCublasOperation(transa);
cublasOperation_t opb = convertTransToCublasOperation(transb);
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(cublasSgemmStridedBatched(handle,
opa, opb, (int)m, (int)n, (int)k,
&alpha, a, (int)lda, strideA, b, (int)ldb, strideB, &beta, c, (int)ldc, strideC,
(int)batchCount));
}
#endif
void THCudaBlas_DgemmBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
double alpha, const double *a[], int64_t lda, const double *b[], int64_t ldb,
double beta, double *c[], int64_t ldc, int64_t batchCount)
{
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_DgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
#ifdef __HIP_PLATFORM_HCC__
const int64_t stridea = (transa == 'N' || transa == 'n') ? lda*k : lda*n;
const int64_t strideb = (transb == 'N' || transb == 'n') ? ldb*n : ldb*k;
const int64_t stridec = ldc*n;
THCudaBlas_DgemmStridedBatched(state, transa, transb, m, n, k, alpha, *a, lda, stridea, *b, ldb, strideb, beta, *c, ldc, stridec, batchCount);
#else
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
cublasOperation_t opa = convertTransToCublasOperation(transa);
cublasOperation_t opb = convertTransToCublasOperation(transb);
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(cublasDgemmBatched(handle,
opa, opb, (int)m, (int)n, (int)k,
&alpha, a, (int)lda, b, (int)ldb, &beta, c, (int)ldc,
(int)batchCount));
#endif
}
#if CUDA_VERSION >= 8000 || defined __HIP_PLATFORM_HCC__
void THCudaBlas_DgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
double alpha, const double *a, int64_t lda, int64_t strideA, const double *b, int64_t ldb, int64_t strideB,
double beta, double *c, int64_t ldc, int64_t strideC, int64_t batchCount)
{
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_DgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
cublasOperation_t opa = convertTransToCublasOperation(transa);
cublasOperation_t opb = convertTransToCublasOperation(transb);
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(cublasDgemmStridedBatched(handle,
opa, opb, (int)m, (int)n, (int)k,
&alpha, a, (int)lda, strideA, b, (int)ldb, strideB, &beta, c, (int)ldc, strideC,
(int)batchCount));
}
#endif
|
92e0c0915050cbdeae9ef3d62bf4f6e6d5190fa2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* ******************************************************************************
* *
* *
* * This program and the accompanying materials are made available under the
* * terms of the Apache License, Version 2.0 which is available at
* * https://www.apache.org/licenses/LICENSE-2.0.
* *
* * See the NOTICE file distributed with this work for additional
* * information regarding copyright ownership.
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* * License for the specific language governing permissions and limitations
* * under the License.
* *
* * SPDX-License-Identifier: Apache-2.0
* *****************************************************************************
*/
//
// @author [email protected]
// @author Yurii Shyrma ([email protected])
//
#include <rocblas.h>
#include <exceptions/cuda_exception.h>
#include <helpers/PointersManager.h>
#include <helpers/ShapeUtils.h>
#include <ops/specials_cuda.h>
#include <numeric>
#include "../MmulHelper.h"
namespace sd {
//////////////////////////////////////////////////////////////////////////////
// MXK x KxN = MxN -> actual sequence of axes doesn't matter
template <typename T1, typename T2, typename T3>
static SD_KERNEL void usualCudaGemm(const void* vA, const sd::LongType* aShapeInfo, const void* vB,
const sd::LongType* bShapeInfo, void* vC, const sd::LongType* cShapeInfo,
const int aMaxis, const int aKaxis, const int bKaxis, const int bNaxis,
const int cMaxis, const int cNaxis, const double alpha, const double beta) {
const T1* A = reinterpret_cast<const T1*>(vA);
const T2* B = reinterpret_cast<const T2*>(vB);
T3* C = reinterpret_cast<T3*>(vC);
__shared__ int K, *coords;
__shared__ bool betaPresent;
__shared__ sd::LongType cLen, totalThreads;
__shared__ T3 alphaZ, betaZ;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
coords = reinterpret_cast<int*>(shmem);
cLen = shape::length(cShapeInfo);
K = shape::shapeOf(const_cast<sd::LongType*>(aShapeInfo))[aKaxis];
betaPresent = beta;
totalThreads = gridDim.x * blockDim.x;
alphaZ = alpha;
betaZ = beta;
}
__syncthreads();
auto aCoords = coords + threadIdx.x * 6; // 6 = (aRank + bRank + cRank)
auto bCoords = aCoords + 2;
auto cCoords = bCoords + 2;
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (sd::LongType i = tid; i < cLen; i += totalThreads) {
// evaluate C coordinates
shape::index2coords(i, cShapeInfo, cCoords);
// evaluate A coordinates
aCoords[aMaxis] = cCoords[cMaxis];
aCoords[aKaxis] = 0;
// evaluate B coordinates
bCoords[bKaxis] = 0;
bCoords[bNaxis] = cCoords[cNaxis];
auto aOffset = shape::getOffset(aShapeInfo, aCoords);
auto bOffset = shape::getOffset(bShapeInfo, bCoords);
T3 val = A[aOffset] * B[bOffset]; // first iteration
for (sd::Unsigned j = 1; j < K; ++j) { // rest iterations
aOffset += shape::stride(aShapeInfo)[aKaxis];
bOffset += shape::stride(bShapeInfo)[bKaxis];
val = val + A[aOffset] * B[bOffset];
}
auto cOffset = shape::getOffset(cShapeInfo, cCoords);
if (betaPresent)
C[cOffset] = alphaZ * val + betaZ * C[cOffset];
else
C[cOffset] = alphaZ * val;
}
}
////////////////////////////////////////////////////////////////////////
template <typename T1, typename T2, typename T3>
SD_HOST static void usualGemm(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem,
hipStream_t* stream, const void* vA, const sd::LongType* aShapeInfo, const void* vB,
const sd::LongType* bShapeInfo, void* vC, const sd::LongType* cShapeInfo,
const int aMaxis, const int aKaxis, const int bKaxis, const int bNaxis, const int cMaxis,
const int cNaxis, const double alpha, const double beta) {
hipLaunchKernelGGL(( usualCudaGemm<T1, T2, T3>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream,
vA, aShapeInfo, vB, bShapeInfo, vC, cShapeInfo, aMaxis, aKaxis, bKaxis, bNaxis, cMaxis, cNaxis, alpha, beta);
}
////////////////////////////////////////////////////////////////////////
// MXN x N = M -> actual sequence of {M,N} axes doesn't matter
template <typename T1, typename T2, typename T3>
static SD_KERNEL void usualCudaGemv(const void* vA, const sd::LongType* aShapeInfo, const void* vX,
const sd::LongType* xShapeInfo, void* vY, const sd::LongType* yShapeInfo,
const int incx, const int incy, const int aMaxis, const double alpha,
const double beta) {
const T1* A = reinterpret_cast<const T1*>(vA);
const T2* X = reinterpret_cast<const T2*>(vX);
T3* Y = reinterpret_cast<T3*>(vY);
__shared__ int M, N;
__shared__ bool betaPresent;
__shared__ sd::LongType cLen, totalThreads, aNstride, aMstride;
__shared__ T3 alphaZ, betaZ;
if (threadIdx.x == 0) {
N = shape::length(xShapeInfo);
M = shape::length(yShapeInfo);
aMstride = shape::stride(aShapeInfo)[aMaxis];
aNstride = shape::stride(aShapeInfo)[aMaxis == 0 ? 1 : 0];
totalThreads = gridDim.x * blockDim.x;
betaPresent = beta;
alphaZ = alpha;
betaZ = beta;
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (sd::LongType i = tid; i < M; i += totalThreads) {
// evaluate offsets
auto aOffset = i * aMstride;
auto xOffset = 0;
T3 val = A[aOffset] * X[xOffset]; // first iteration
for (sd::Unsigned j = 1; j < N; ++j) { // rest iterations
aOffset += aNstride;
xOffset += incx;
val = val + A[aOffset] * X[xOffset];
}
auto yOffset = i * incy;
if (betaPresent)
Y[yOffset] = alphaZ * val + betaZ * Y[yOffset];
else
Y[yOffset] = alphaZ * val;
}
}
////////////////////////////////////////////////////////////////////////
template <typename T1, typename T2, typename T3>
SD_HOST static void usualGemv(const int blocksPerGrid, const int threadsPerBlock, hipStream_t* stream, const void* vA,
const sd::LongType* aShapeInfo, const void* vX, const sd::LongType* xShapeInfo, void* vY,
const sd::LongType* yShapeInfo, const int incx, const int incy, const int aMaxis,
const double alpha, const double beta) {
hipLaunchKernelGGL(( usualCudaGemv<T1, T2, T3>), dim3(blocksPerGrid), dim3(threadsPerBlock), 512, *stream,
vA, aShapeInfo, vX, xShapeInfo, vY, yShapeInfo, incx, incy, aMaxis, alpha, beta);
}
//////////////////////////////////////////////////////////////////////////////
template <typename T1, typename T2, typename T3>
static SD_KERNEL void usualCudaDot(const sd::LongType length, const double alpha, const void* vX,
const sd::LongType incx, const void* vY, const sd::LongType incy, const double beta,
void* vZ) {
T1* X = reinterpret_cast<T1*>(const_cast<void*>(vX));
T2* Y = reinterpret_cast<T2*>(const_cast<void*>(vY));
T3* Z = reinterpret_cast<T3*>(vZ);
extern __shared__ unsigned char shmem[];
auto pairwiseMul = reinterpret_cast<T3*>(shmem);
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < length) pairwiseMul[tid] = X[tid * incx] * Y[tid * incy];
__syncthreads();
if (tid == 0) {
T3 sum = 0;
for (sd::LongType i = 0; i < length; ++i) sum = sum + pairwiseMul[i];
if (beta)
*Z = (T3)alpha * sum + (T3)beta * *Z;
else
*Z = (T3)alpha * sum;
}
}
////////////////////////////////////////////////////////////////////////
template <typename T1, typename T2, typename T3>
SD_HOST static void usualDot(const dim3& blocksPerGrid, const dim3& threadsPerBlock, hipStream_t* stream,
const sd::LongType length, const double alpha, const void* vX, const sd::LongType incx,
const void* vY, const sd::LongType incy, const double beta, void* vZ) {
hipLaunchKernelGGL(( usualCudaDot<T1, T2, T3>), dim3(blocksPerGrid), dim3(threadsPerBlock), length * sizeof(T3) + 128, *stream,
length, alpha, vX, incx, vY, incy, beta, vZ);
}
//////////////////////////////////////////////////////////////////////////////
// MXK x KxN = MxN
NDArray* MmulHelper::mmulMxM(const NDArray* A, const NDArray* B, NDArray* C, double alpha, double beta,
const char outOrder) {
if (A->rankOf() != 2) throw std::runtime_error("MmulHelper::mmulMxM cuda: rank of A array is not equal 2 !");
if (B->rankOf() != 2) throw std::runtime_error("MmulHelper::mmulMxM cuda: rank of B array is not equal 2 !");
const auto M = A->sizeAt(0);
const auto K = A->sizeAt(1);
const auto N = B->sizeAt(1);
if (C != nullptr && C->rankOf() != 2)
throw std::runtime_error("MmulHelper::mmulMxM cuda: rank of C array is not equal 2 !");
if (B->sizeAt(0) != K) throw std::runtime_error("MmulHelper::mmulMxM cuda: B array has wrong number of rows !");
if (C != nullptr && C->sizeAt(0) != M)
throw std::runtime_error("MmulHelper::mmulMxM cuda: C array has wrong number of rows !");
if (C != nullptr && C->sizeAt(1) != N)
throw std::runtime_error("MmulHelper::mmulMxM cuda: C array has wrong number of columns !");
if (C == nullptr)
C = new NDArray(outOrder, {M, N}, DataTypeUtils::pickPairwiseResultType(A->dataType(), B->dataType()),
A->getContext());
if (C->isEmpty()) return C;
const int major = Environment::getInstance().capabilities()[AffinityManager::currentDeviceId()].first();
const auto aType = A->dataType();
const auto bType = B->dataType();
const auto cType = C->dataType();
const bool AB(aType == bType), AC(aType == cType), ABC(AB && AC);
const bool typeDouble = ABC && aType == DataType::DOUBLE;
const bool typeFloat = ABC && aType == DataType::FLOAT32;
const bool typeHalf = ABC && aType == DataType::HALF && major >= 6;
const bool typeIntFloat = AB && aType == DataType::INT8 && cType == DataType::FLOAT32 && major >= 6;
const bool typeHalfFloat = AB && aType == DataType::HALF && cType == DataType::FLOAT32 && major >= 6;
std::lock_guard<std::mutex> lock(*LaunchContext::deviceMutex());
auto handle = reinterpret_cast<hipblasHandle_t*>(A->getContext()->getCublasHandle());
auto stream = A->getContext()->getCudaStream();
auto status = hipblasSetStream(*handle, *stream);
if (status != HIPBLAS_STATUS_SUCCESS) throw cuda_exception::build("MmulHelper::mmulMxM cuda failed !", status);
if (!typeDouble && !typeFloat && !typeHalf && !typeIntFloat && !typeHalfFloat) {
const int threadsPerBlock = SD_MAX_NUM_THREADS / 2;
const int blocksPerGrid = (C->lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * sizeof(int) * 6 + 128; // 6 = aRank + bRank + cRank
NDArray::prepareSpecialUse({C}, {A, B});
// BUILD_TRIPLE_SELECTOR(aType, bType, cType, usualGemm, (blocksPerGrid, threadsPerBlock, sharedMem, stream,
// A->specialBuffer(), A->specialShapeInfo(), B->specialBuffer(), B->specialShapeInfo(), C->specialBuffer(),
// C->special(), 0, 1, 0, 1, 0, 1, alpha, beta), SD_NUMERIC_TYPES, SD_NUMERIC_TYPES, SD_FLOAT_TYPES);
BUILD_SINGLE_SELECTOR_THRICE(aType, usualGemm,
(blocksPerGrid, threadsPerBlock, sharedMem, stream, A->specialBuffer(),
A->specialShapeInfo(), B->specialBuffer(), B->specialShapeInfo(), C->specialBuffer(),
C->specialShapeInfo(), 0, 1, 0, 1, 0, 1, alpha, beta),
SD_NUMERIC_TYPES)
NDArray::registerSpecialUse({C}, {A, B});
auto cudaResult = hipStreamSynchronize(*stream);
if (cudaResult != 0) throw cuda_exception::build("MmulHelper::mmulMxM cuda failed !", cudaResult);
} else {
std::vector<NDArray*> toDelete;
NDArray *pA(const_cast<NDArray*>(A)), *pB(const_cast<NDArray*>(B)), *pC(const_cast<NDArray*>(C));
bool aMcont = M == 1 || A->strideAt(0) == 1;
bool aKcont = K == 1 || A->strideAt(1) == 1;
bool bKcont = K == 1 || B->strideAt(0) == 1;
bool bNcont = N == 1 || B->strideAt(1) == 1;
bool cMcont = M == 1 || C->strideAt(0) == 1;
bool cNcont = N == 1 || C->strideAt(1) == 1;
if (!aMcont && !aKcont) {
pA = new NDArray(A->dup('f'));
toDelete.push_back(pA);
aMcont = true;
}
if (!bKcont && !bNcont) {
pB = new NDArray(B->dup('f'));
toDelete.push_back(pB);
bKcont = true;
}
if (!cMcont) {
pC = new NDArray(C->dup('f'));
toDelete.push_back(pC);
cMcont = true;
}
const bool transA = !aMcont;
const bool transB = !bKcont;
const int lda = (aMcont && aKcont) ? M : transA ? pA->strideAt(0) : pA->strideAt(1);
const int ldb = (bKcont && bNcont) ? K : transB ? pB->strideAt(0) : pB->strideAt(1);
const int ldc = (cMcont && cNcont) ? M : pC->strideAt(1);
const hipblasOperation_t transAblas = transA ? HIPBLAS_OP_T : HIPBLAS_OP_N;
const hipblasOperation_t transBblas = transB ? HIPBLAS_OP_T : HIPBLAS_OP_N;
NDArray::prepareSpecialUse({pC}, {pA, pB});
// choose appropriate cuda gemm api depending on data types
if (typeDouble) {
status = hipblasDgemm(*handle, transAblas, transBblas, M, N, K, &alpha, (double*)pA->specialBuffer(), lda,
(double*)pB->specialBuffer(), ldb, &beta, (double*)pC->specialBuffer(), ldc);
} else if (typeFloat) {
float alphaF(alpha), betaF(beta);
status = hipblasSgemm(*handle, transAblas, transBblas, M, N, K, &alphaF, (float*)pA->specialBuffer(), lda,
(float*)pB->specialBuffer(), ldb, &betaF, (float*)pC->specialBuffer(), ldc);
} else if (typeHalf) {
float16 alphaH(alpha), betaH(beta);
status = hipblasHgemm(*handle, transAblas, transBblas, M, N, K, &alphaH.data, (__half*)pA->specialBuffer(), lda,
(__half*)pB->specialBuffer(), ldb, &betaH.data, (__half*)pC->specialBuffer(), ldc);
} else if (typeIntFloat) {
float alphaF(alpha), betaF(beta);
status = cublasSgemmEx(*handle, transAblas, transBblas, M, N, K, &alphaF, pA->specialBuffer(), HIP_R_8I, lda,
pB->specialBuffer(), HIP_R_8I, ldb, &betaF, pC->specialBuffer(), HIP_R_32F, ldc);
} else if (typeHalfFloat) {
float alphaF(alpha), betaF(beta);
status = cublasSgemmEx(*handle, transAblas, transBblas, M, N, K, &alphaF, pA->specialBuffer(), HIP_R_16F, lda,
pB->specialBuffer(), HIP_R_16F, ldb, &betaF, pC->specialBuffer(), HIP_R_32F, ldc);
}
if (status != HIPBLAS_STATUS_SUCCESS) throw cuda_exception::build("MmulHelper::mmulMxM cuda failed !", status);
NDArray::registerSpecialUse({pC}, {pA, pB});
auto cudaResult = hipStreamSynchronize(*stream);
if (cudaResult != 0) throw cuda_exception::build("MmulHelper::mmulMxM cuda failed !", cudaResult);
if (C != pC) C->assign(pC);
for (int i = toDelete.size() - 1; i >= 0; --i) delete toDelete[i];
}
return C;
}
////////////////////////////////////////////////////////////////////////////
// MXN x N = M
NDArray* MmulHelper::mmulMxV(const NDArray* A, const NDArray* X, sd::NDArray* Y, const double alpha, const double beta,
const char outOrder) {
int xLenDim, yLenDim(0);
if (A->rankOf() != 2) throw std::runtime_error("MmulHelper::mmulMxV cuda: rank of A array is not equal 2 !");
if (!shape::isCommonVector(X->shapeInfo(), xLenDim))
throw std::runtime_error("MmulHelper::mmulMxV cuda: X array must be vector !");
const auto M = A->sizeAt(0);
const auto N = A->sizeAt(1);
if (Y != nullptr && !shape::isCommonVector(Y->shapeInfo(), yLenDim))
throw std::runtime_error("MmulHelper::mmulMxV cuda: Y array must be vector !");
if (X->lengthOf() != N) throw std::runtime_error("MmulHelper::mmulMxV cuda: X vector has wrong length !");
if (Y != nullptr && Y->lengthOf() != M)
throw std::runtime_error("MmulHelper::mmulMxV cuda: Y array has wrong length !");
if (Y == nullptr)
Y = new NDArray(outOrder, {M}, DataTypeUtils::pickPairwiseResultType(A->dataType(), X->dataType()),
A->getContext());
if (Y->isEmpty()) return Y;
const int incx = X->strideAt(xLenDim);
const int incy = Y->strideAt(yLenDim);
const auto aType = A->dataType();
const auto xType = X->dataType();
const auto yType = Y->dataType();
const bool AX(aType == xType), AY(aType == yType), AXY(AX && AY);
const bool typeDouble = AXY && aType == DataType::DOUBLE;
const bool typeFloat = AXY && aType == DataType::FLOAT32;
std::lock_guard<std::mutex> lock(*LaunchContext::deviceMutex());
auto handle = reinterpret_cast<hipblasHandle_t*>(A->getContext()->getCublasHandle());
auto stream = A->getContext()->getCudaStream();
auto status = hipblasSetStream(*handle, *stream);
if (status != HIPBLAS_STATUS_SUCCESS) throw cuda_exception::build("MmulHelper::mmulMxV cuda failed !", status);
if (!typeDouble && !typeFloat) {
const int threadsPerBlock = SD_MAX_NUM_THREADS;
const int blocksPerGrid = (M + threadsPerBlock - 1) / threadsPerBlock;
NDArray::prepareSpecialUse({Y}, {A, X});
// BUILD_TRIPLE_SELECTOR(aType, xType, yType, usualGemv, (blocksPerGrid, threadsPerBlock, stream,
// A->specialBuffer(), A->specialShapeInfo(), X->specialBuffer(), X->specialShapeInfo(), Y->specialBuffer(),
// Y->special(), incx, incy, 0, alpha, beta), SD_NUMERIC_TYPES, SD_NUMERIC_TYPES, SD_FLOAT_TYPES);
BUILD_SINGLE_SELECTOR_THRICE(
xType, usualGemv,
(blocksPerGrid, threadsPerBlock, stream, A->specialBuffer(), A->specialShapeInfo(), X->specialBuffer(),
X->specialShapeInfo(), Y->specialBuffer(), Y->specialShapeInfo(), incx, incy, 0, alpha, beta),
SD_NUMERIC_TYPES)
NDArray::registerSpecialUse({Y}, {A, X});
auto cudaResult = hipStreamSynchronize(*stream);
if (cudaResult != 0) throw cuda_exception::build("MmulHelper::mmulMxV cuda failed !", cudaResult);
} else {
NDArray* pA(const_cast<NDArray*>(A));
bool aMcont = M == 1 || A->strideAt(0) == 1;
bool aNcont = N == 1 || A->strideAt(1) == 1;
if (!aMcont && !aNcont) {
pA = new NDArray(A->dup('f'));
aMcont = true;
}
const bool transA = !aMcont;
const int lda = (aMcont && aNcont) ? M : transA ? pA->strideAt(0) : pA->strideAt(1);
const hipblasOperation_t transAblas = transA ? HIPBLAS_OP_T : HIPBLAS_OP_N;
NDArray::prepareSpecialUse({Y}, {pA, X});
// choose appropriate cuda gemm api depending on data types
if (typeDouble) {
status = hipblasDgemv(*handle, transAblas, transA ? N : M, transA ? M : N, &alpha, (double*)pA->specialBuffer(),
lda, (double*)X->specialBuffer(), incx, &beta, (double*)Y->specialBuffer(), incy);
} else if (typeFloat) {
float alphaF(alpha), betaF(beta);
status = hipblasSgemv(*handle, transAblas, transA ? N : M, transA ? M : N, &alphaF, (float*)pA->specialBuffer(),
lda, (float*)X->specialBuffer(), incx, &betaF, (float*)Y->specialBuffer(), incy);
}
if (status != HIPBLAS_STATUS_SUCCESS) throw cuda_exception::build("MmulHelper::mmulMxV cuda failed !", status);
auto cudaResult = hipStreamSynchronize(*stream);
if (cudaResult != 0) throw cuda_exception::build("MmulHelper::mmulMxV cuda failed !", cudaResult);
NDArray::registerSpecialUse({Y}, {pA, X});
if (pA != A) delete pA;
}
return Y;
}
////////////////////////////////////////////////////////////////////////////
// (X * Y) = Z[0]
NDArray* MmulHelper::dot(const NDArray* X, const NDArray* Y, sd::NDArray* Z, const double alpha, const double beta) {
int xLenDim(0), yLenDim(0);
if (!shape::isCommonVector(X->shapeInfo(), xLenDim))
throw std::runtime_error("MmulHelper::dot cuda: X array must be vector !");
if (!shape::isCommonVector(Y->shapeInfo(), yLenDim))
throw std::runtime_error("MmulHelper::dot cuda: Y array must be vector !");
if (Z != nullptr && !Z->isScalar()) throw std::runtime_error("MmulHelper::dot cuda: Z array must be scalar !");
const auto length = X->lengthOf();
if (Y->lengthOf() != length)
throw std::runtime_error("MmulHelper::dot cuda: lengths of input vectors are different !");
if (Z == nullptr)
Z = new NDArray(DataTypeUtils::pickPairwiseResultType(X->dataType(), Y->dataType()), X->getContext());
const sd::LongType incx = X->strideAt(xLenDim);
const sd::LongType incy = Y->strideAt(yLenDim);
const auto xType = X->dataType();
const auto yType = Y->dataType();
const auto zType = Z->dataType();
if (!X->isActualOnDeviceSide()) X->syncToDevice();
if (!Y->isActualOnDeviceSide()) Y->syncToDevice();
if (!Z->isActualOnDeviceSide()) Z->syncToDevice();
hipStream_t* stream = X->getContext()->getCudaStream();
dim3 threadsPerBlock(512);
dim3 blocksPerGrid(1);
if (length > 512) threadsPerBlock.x = math::sd_ceil<double, int>(static_cast<double>(length) / 512);
NDArray::prepareSpecialUse({Z}, {X, Y});
// BUILD_TRIPLE_SELECTOR(xType, yType, zType, usualDot, (blocksPerGrid, threadsPerBlock, stream, length, alpha,
// X->specialBuffer(), incx, Y->specialBuffer(), incy, beta, Z->specialBuffer()), SD_NUMERIC_TYPES, SD_NUMERIC_TYPES,
// SD_FLOAT_TYPES);
BUILD_SINGLE_SELECTOR_THRICE(xType, usualDot,
(blocksPerGrid, threadsPerBlock, stream, length, alpha, X->specialBuffer(), incx,
Y->specialBuffer(), incy, beta, Z->specialBuffer()),
SD_NUMERIC_TYPES)
auto cudaResult = hipStreamSynchronize(*stream);
if (cudaResult != 0) throw cuda_exception::build("MmulHelper::dot cuda failed !", cudaResult);
NDArray::registerSpecialUse({Z}, {X, Y});
return Z;
}
//////////////////////////////////////////////////////////////////////////////
// [bS,M,K] x [bS,K,N] = [bS,M,N]
// [bS,M,K] x [K,N] = [bS,M,N]
// [M,K] x [bS,K,N] = [bS,M,N]
// bS could stand for several axes
template <typename T1, typename T2, typename T3>
static SD_KERNEL void batchedCudaGemm(const void* vA, const sd::LongType* aShapeInfo, const void* vB,
const sd::LongType* bShapeInfo, void* vC, const sd::LongType* cShapeInfo,
const int* aBatchDims, const int* bBatchDims, const int* cBatchDims,
const int aMaxis, const int aKaxis, const int bKaxis, const int bNaxis,
const int cMaxis, const int cNaxis, const double alpha, const double beta) {
const T1* A = reinterpret_cast<const T1*>(vA);
const T2* B = reinterpret_cast<const T2*>(vB);
T3* C = reinterpret_cast<T3*>(vC);
__shared__ bool betaPresent;
__shared__ int aRank, bRank, cRank, K, *coords;
__shared__ sd::LongType cLen, totalThreads;
__shared__ T3 alphaZ, betaZ;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
coords = reinterpret_cast<int*>(shmem);
cLen = shape::length(cShapeInfo);
K = shape::shapeOf(const_cast<sd::LongType*>(aShapeInfo))[aKaxis];
totalThreads = gridDim.x * blockDim.x;
aRank = shape::rank(aShapeInfo);
bRank = shape::rank(bShapeInfo);
cRank = shape::rank(cShapeInfo);
betaPresent = beta;
alphaZ = alpha;
betaZ = beta;
}
__syncthreads();
auto aCoords = coords + threadIdx.x * (aRank + bRank + cRank);
auto bCoords = aCoords + aRank;
auto cCoords = bCoords + bRank;
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (sd::LongType i = tid; i < cLen; i += totalThreads) {
// evaluate C coordinates
shape::index2coords(i, cShapeInfo, cCoords);
// calculate index of current batch
sd::LongType batchInd;
if (cBatchDims != nullptr) batchInd = shape::coords2index(cShapeInfo, cBatchDims, cRank - 2, cCoords);
// evaluate A coordinates
if (aBatchDims != nullptr) shape::index2coords(batchInd, aShapeInfo, aBatchDims, aRank - 2, aCoords);
aCoords[aMaxis] = cCoords[cMaxis];
aCoords[aKaxis] = 0;
// evaluate B coordinates
if (bBatchDims != nullptr) shape::index2coords(batchInd, bShapeInfo, bBatchDims, bRank - 2, bCoords);
bCoords[bKaxis] = 0;
bCoords[bNaxis] = cCoords[cNaxis];
auto aOffset = shape::getOffset(aShapeInfo, aCoords);
auto bOffset = shape::getOffset(bShapeInfo, bCoords);
T3 val = A[aOffset] * B[bOffset]; // first iteration
for (sd::Unsigned j = 1; j < K; ++j) { // rest iterations
aOffset += shape::stride(aShapeInfo)[aKaxis];
bOffset += shape::stride(bShapeInfo)[bKaxis];
val = val + A[aOffset] * B[bOffset];
}
auto cOffset = shape::getOffset(cShapeInfo, cCoords);
if (betaPresent)
C[cOffset] = alphaZ * val + betaZ * C[cOffset];
else
C[cOffset] = alphaZ * val;
}
}
////////////////////////////////////////////////////////////////////////
template <typename T1, typename T2, typename T3>
SD_HOST static void batchedGemm(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem,
hipStream_t* stream, const void* vA, const sd::LongType* aShapeInfo, const void* vB,
const sd::LongType* bShapeInfo, void* vC, const sd::LongType* cShapeInfo,
const int* aBatchDims, const int* bBatchDims, const int* cBatchDims, const int aMaxis,
const int aKaxis, const int bKaxis, const int bNaxis, const int cMaxis,
const int cNaxis, const double alpha, const double beta) {
hipLaunchKernelGGL(( batchedCudaGemm<T1, T2, T3>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream,
vA, aShapeInfo, vB, bShapeInfo, vC, cShapeInfo, aBatchDims, bBatchDims, cBatchDims, aMaxis, aKaxis, bKaxis,
bNaxis, cMaxis, cNaxis, alpha, beta);
}
///////////////////////////////////////////////////////////////////
NDArray* MmulHelper::mmulNxN(const NDArray* A, const NDArray* B, NDArray* C, const double alpha, const double beta,
const char outOrder) {
const int aRank = A->rankOf();
const int bRank = B->rankOf();
// input ranks validation
if (aRank > bRank && bRank != 2)
throw std::runtime_error("MmulHelper::mmulNxN: rank of B array should be equal 2 !");
else if (bRank > aRank && aRank != 2)
throw std::runtime_error("MmulHelper::mmulNxN: rank of A array should be equal 2 !");
else if (aRank == bRank) {
for (int i = 0; i < aRank - 2; ++i)
if (A->sizeAt(i) != B->sizeAt(i))
throw std::runtime_error(
"MmulHelper::mmulNxN: shapes of A and B arrays are not suitable for matrix multiplication !");
}
if (A->sizeAt(-1) != B->sizeAt(-2))
throw std::runtime_error(
"MmulHelper::mmulNxN: shapes of A and B arrays are not suitable for matrix multiplication !");
// validation of C array
std::vector<sd::LongType> cExpectedShape = aRank > bRank ? A->getShapeAsVector() : B->getShapeAsVector();
cExpectedShape[cExpectedShape.size() - 2] = A->sizeAt(-2);
cExpectedShape[cExpectedShape.size() - 1] = B->sizeAt(-1);
if (C != nullptr) {
if (!C->isSameShape(cExpectedShape))
throw std::runtime_error("MmulHelper::mmulNxN: shape of C array is not suitable for AxB matrix multiplication !");
} else
C = new NDArray(outOrder, cExpectedShape, DataTypeUtils::pickPairwiseResultType(A->dataType(), B->dataType()),
A->getContext());
if (C->isEmpty()) return C;
const int cRank = C->rankOf();
const int aMaxis(aRank - 2), aKaxis(aRank - 1), bKaxis(bRank - 2), bNaxis(bRank - 1), cMaxis(cRank - 2),
cNaxis(cRank - 1);
const int threadsPerBlock = SD_MAX_NUM_THREADS / 8;
const int blocksPerGrid = (C->lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * sizeof(int) * (aRank + bRank + cRank) + 128;
PointersManager manager(A->getContext(), "MmulHelper::mmulNxN");
const int *aBatchDims(nullptr), *bBatchDims(nullptr), *cBatchDims(nullptr);
if (aRank > 2)
aBatchDims = reinterpret_cast<int*>(manager.replicatePointer(
ShapeUtils::evalDimsToExclude(aRank, {aMaxis, aKaxis}).data(), (aRank - 2) * sizeof(int)));
if (bRank > 2)
bBatchDims = reinterpret_cast<int*>(manager.replicatePointer(
ShapeUtils::evalDimsToExclude(bRank, {bKaxis, bNaxis}).data(), (bRank - 2) * sizeof(int)));
if (cRank > 2)
cBatchDims = reinterpret_cast<int*>(manager.replicatePointer(
ShapeUtils::evalDimsToExclude(cRank, {cMaxis, cNaxis}).data(), (cRank - 2) * sizeof(int)));
NDArray::prepareSpecialUse({C}, {A, B});
// BUILD_TRIPLE_SELECTOR(A->dataType(), b->dataType(), C->dataType(), batchedGemm, (blocksPerGrid, threadsPerBlock,
// A->getContext()->getCudaStream(), A->specialBuffer(), A->specialShapeInfo(), B->specialBuffer(),
// B->specialShapeInfo(), C->specialBuffer(), C->special(), aMaxis, aKaxis, bKaxis, bNaxis, cMaxis, cNaxis, alpha,
// beta), SD_NUMERIC_TYPES, SD_NUMERIC_TYPES, SD_FLOAT_TYPES);
BUILD_SINGLE_SELECTOR_THRICE(
A->dataType(), batchedGemm,
(blocksPerGrid, threadsPerBlock, sharedMem, A->getContext()->getCudaStream(), A->specialBuffer(),
A->specialShapeInfo(), B->specialBuffer(), B->specialShapeInfo(), C->specialBuffer(), C->specialShapeInfo(),
aBatchDims, bBatchDims, cBatchDims, aMaxis, aKaxis, bKaxis, bNaxis, cMaxis, cNaxis, alpha, beta),
SD_NUMERIC_TYPES)
NDArray::registerSpecialUse({C}, {A, B});
manager.synchronize();
return C;
}
/*
//////////////////////////////////////////////////////////////////////////////
// MXN x N = M
template <typename T1, typename T2, typename T3>
static SD_KERNEL void usualCudaGemv(const bool transA, const int M, const int N, const double alpha, const void* vA,
const int lda, const void* vX, const int incx, const double beta, void* vY, const int incy) {
T1* A = reinterpret_cast<T1*>(const_cast<void*>(vA));
T2* X = reinterpret_cast<T2*>(const_cast<void*>(vX));
T3* Y = reinterpret_cast<T3*>(vY);
__shared__ T3 alphaZ, betaZ;
__shared__ sd::LongType strideArow, strideAcol;
const int row = blockIdx.x * blockDim.x + threadIdx.x;
if(row == 0) {
alphaZ = alpha;
betaZ = beta;
if(transA) { strideArow = lda; strideAcol = 1; } else { strideArow = 1; strideAcol = lda; }
}
__syncthreads();
T3 val = 0;
if (row < M)
for (int i = 0; i < N; i++)
val = val + A[row * strideArow + i * strideAcol] * X[i * incx];
Y[row * incy] = alphaZ * val + betaZ * Y[row * incy];
}
////////////////////////////////////////////////////////////////////////
template <typename T1, typename T2, typename T3>
SD_HOST static void usualGemv(const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, hipStream_t *stream, const bool
transA, const int M, const int N, const double alpha, const void* vA, const int lda, const void* vX, const int incx,
const double beta, void* vY, const int incy) {
usualCudaGemv<T1,T2,T3><<<blocksPerGrid, threadsPerBlock, 1024, *stream>>>(transA, M, N, alpha, vA, lda, vX, incx,
beta, vY, incy);
}
*/
/*
//////////////////////////////////////////////////////////////////////////////
MXK x KxN = MxN
C array must be in f order
template <typename T1, typename T2, typename T3>
static SD_KERNEL void usualCudaGemm(const bool transA, const bool transB, const int M, const int N, const int K, const
double alpha, const void* vA, const int lda, const void* vB, const int ldb, const double beta, void* vC, const int ldc)
{
T1* A = reinterpret_cast<T1*>(const_cast<void*>(vA));
T2* B = reinterpret_cast<T2*>(const_cast<void*>(vB));
T3* C = reinterpret_cast<T3*>(vC);
__shared__ T3 alphaZ, betaZ;
__shared__ sd::LongType strideArow, strideAcol, strideBrow, strideBcol;
const int row = blockIdx.y * blockDim.y + threadIdx.y;
const int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row == 0 && col == 0) {
alphaZ = alpha;
betaZ = beta;
if(transA) { strideArow = lda; strideAcol = 1; } else { strideArow = 1; strideAcol = lda; }
if(transB) { strideBrow = ldb; strideBcol = 1; } else { strideBrow = 1; strideBcol = ldb; }
}
__syncthreads();
T3 val = 0;
if (row < M && col < N)
for (int i = 0; i < K; i++)
val = val + A[row * strideArow + i * strideAcol] * B[i * strideBrow + col * strideBcol];
C[row + col * ldc] = alphaZ * val + betaZ * C[row + col * ldc];
}
//////////////////////////////////////////////////////////////////////////////
template <typename T1, typename T2, typename T3>
SD_HOST static void usualGemm(const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, hipStream_t *stream, const bool
transA, const bool transB, const int M, const int N, const int K, const double alpha, const void* vA, const int lda,
const void* vB, const int ldb, const double beta, void* vC, const int ldc) {
usualCudaGemm<T1,T2,T3><<<blocksPerGrid, threadsPerBlock, 1024, *stream>>>(transA, transB, M, N, K, alpha, vA, lda,
vB, ldb, beta, vC, ldc);
}
*/
//////////////////////////////////////////////////////////////////////////
/*
NDArray* MmulHelper::mmulNxNold1(const NDArray* A, const NDArray* B, NDArray* C, const double alpha, const double beta,
const char outOrder) {
const int aRank = A->rankOf();
const int bRank = B->rankOf();
// input ranks validation
if(aRank > bRank && bRank != 2)
throw std::runtime_error("MmulHelper::mmulNxN: rank of B array should be equal 2 !");
else if(bRank > aRank && aRank != 2)
throw std::runtime_error("MmulHelper::mmulNxN: rank of A array should be equal 2 !");
else if (aRank == bRank ) {
for(int i = 0; i < aRank - 2; ++i)
if(A->sizeAt(i) != B->sizeAt(i))
throw std::runtime_error("MmulHelper::mmulNxN: shapes of A and B arrays are not suitable for matrix
multiplication !");
}
if(A->sizeAt(-1) != B->sizeAt(-2))
throw std::runtime_error("MmulHelper::mmulNxN: shapes of A and B arrays are not suitable for matrix
multiplication !");
// validation of C array
std::vector<sd::LongType> cExpectedShape = aRank > bRank ? A->getShapeAsVector() : B->getShapeAsVector();
cExpectedShape[cExpectedShape.size() - 2] = A->sizeAt(-2);
cExpectedShape[cExpectedShape.size() - 1] = B->sizeAt(-1);
if(C != nullptr ) {
if(!C->isSameShape(cExpectedShape))
throw std::runtime_error("MmulHelper::mmulNxN: shape of C array is not suitable for AxB matrix
multiplication !");
}
else {
C = new NDArray(outOrder, cExpectedShape, B->dataType());
}
// multiplication
const std::vector<int> dimsToExclude = ShapeUtils::evalDimsToExclude(C->rankOf(), {-2, -1});
const sd::LongType numOfSubArrs = ShapeUtils::getNumOfSubArrs(C->shapeInfo(), dimsToExclude);
std::vector<sd::LongType> idxRanges(2 * C->rankOf());
// #pragma omp parallel for schedule(guided) firstprivate(idxRanges)
for(sd::LongType i = 0; i < numOfSubArrs; ++i) {
ShapeUtils::evalIdxRangesForSubArr(i, C->shapeInfo(), dimsToExclude, idxRanges.data());
NDArray cSubArr = (*C)(idxRanges);
if(aRank > bRank) {
NDArray aSubArr = (*A)(idxRanges);
mmulMxM(&aSubArr, B, &cSubArr, 1., 0., outOrder);
}
else if(bRank > aRank) {
NDArray bSubArr = (*B)(idxRanges);
mmulMxM(A, &bSubArr, &cSubArr, 1., 0, outOrder);
}
else {
NDArray aSubArr = (*A)(idxRanges);
NDArray bSubArr = (*B)(idxRanges);
mmulMxM(&aSubArr, &bSubArr, &cSubArr, 1., 0., outOrder);
}
}
return C;
}
*/
//////////////////////////////////////////////////////////////////////////
// [bS,M,K] x [bS,K,N] = [bS,M,N]
// [bS,M,K] x [K,N] = [bS,M,N]
// [M,K] x [bS,K,N] = [bS,M,N]
// bS could stand for several axes
/*
NDArray* MmulHelper::mmulNxNold2(const NDArray* A, const NDArray* B, NDArray* C, const double alpha, const double beta,
const char outOrder) {
const int aRank = A->rankOf();
const int bRank = B->rankOf();
// input ranks validation
if(aRank > bRank && bRank != 2)
throw std::runtime_error("MmulHelper::mmulNxN: rank of B array should be equal 2 !");
else if(bRank > aRank && aRank != 2)
throw std::runtime_error("MmulHelper::mmulNxN: rank of A array should be equal 2 !");
else if (aRank == bRank ) {
for(int i = 0; i < aRank - 2; ++i)
if(A->sizeAt(i) != B->sizeAt(i))
throw std::runtime_error("MmulHelper::mmulNxN: shapes of A and B arrays are not suitable for matrix
multiplication !");
}
if(A->sizeAt(-1) != B->sizeAt(-2))
throw std::runtime_error("MmulHelper::mmulNxN: shapes of A and B arrays are not suitable for matrix
multiplication !");
// validation of C array
std::vector<sd::LongType> cExpectedShape = aRank > bRank ? A->getShapeAsVector() : B->getShapeAsVector();
cExpectedShape[cExpectedShape.size() - 2] = A->sizeAt(-2);
cExpectedShape[cExpectedShape.size() - 1] = B->sizeAt(-1);
if(C != nullptr ) {
if(!C->isSameShape(cExpectedShape))
throw std::runtime_error("MmulHelper::mmulNxN: shape of C array is not suitable for AxB matrix
multiplication !");
}
else
C = new NDArray(outOrder, cExpectedShape, B->dataType());
const int cRank = C->rankOf();
const auto M = A->sizeAt(-2);
const auto K = A->sizeAt(-1);
const auto N = B->sizeAt(-1);
NDArray *pA(const_cast<NDArray*>(A)), *pB(const_cast<NDArray*>(B)), *pC(const_cast<NDArray*>(C));
std::vector<NDArray*> toDelete;
bool aMcont = M == 1 || A->strideAt(-2) == 1;
bool aKcont = K == 1 || A->strideAt(-1) == 1;
bool bKcont = K == 1 || B->strideAt(-2) == 1;
bool bNcont = N == 1 || B->strideAt(-1) == 1;
bool cMcont = M == 1 || C->strideAt(-2) == 1;
bool cNcont = N == 1 || C->strideAt(-1) == 1;
if(!aMcont && !aKcont) {
pA = new NDArray(A->dup('c'));
toDelete.push_back(pA);
aKcont = true;
}
if(!bKcont && !bNcont) {
pB = new NDArray(B->dup('c'));
toDelete.push_back(pB);
bNcont = true;
}
std::vector<int> permut(cRank);
if(!cMcont) {
std::iota(permut.begin(), permut.end(), 0);
permut[cRank - 2] = cRank - 1;
permut[cRank - 1] = cRank - 2; // swap two last dimensions [..., M,N] -> [..., N,M]
auto Cpermut = C->permute(permut);
pC = new NDArray('c', Cpermut.getShapeAsVector(), Cpermut.dataType(), A->getContext());
pC->assign(Cpermut);
toDelete.push_back(pC);
cMcont = true;
}
const auto aType = pA->dataType();
const auto bType = pB->dataType();
const auto cType = pC->dataType();
const bool AB(aType == bType), AC(aType == cType), ABC(AB && AC);
bool badTypes = false;
hipDataType cudaType, cudaAType, cudaBType, cudaCType;
if(ABC && aType == DataType::HALF) {
cudaType = cudaAType = cudaBType = cudaCType = HIP_R_16F;
}
else if(ABC && aType == DataType::FLOAT32) {
cudaType = cudaAType = cudaBType = cudaCType = HIP_R_32F;
}
else if(ABC && aType == DataType::DOUBLE) {
cudaType = cudaAType = cudaBType = cudaCType = HIP_R_64F;
}
else if(AB && cType == DataType::FLOAT32 && aType == DataType::INT8) {
cudaType = cudaCType = HIP_R_32F;
cudaAType = cudaBType = HIP_R_8I;
}
else if(AB && cType == DataType::FLOAT32 && aType == DataType::HALF) {
cudaType = cudaCType = HIP_R_32F;
cudaAType = cudaBType = HIP_R_16F;
}
else
badTypes = true;
const int bS = pC->lengthOf() / (M*N);
const std::vector<int> dimsToExclude = ShapeUtils::evalDimsToExclude(cRank, {-2, -1});
NDArray::prepareSpecialUse({pC}, {pA, pB});
if(!badTypes) {
std::vector<sd::LongType> subArrOffsets(bS);
std::vector<sd::LongType> subArrShapeInfo(shape::shapeInfoLength(2)); // all sub-arrays
have rank = 2
std::vector<void*> aSubArrs(bS), bSubArrs(bS), cSubArrs(bS);
if(aRank > 2)
shape::calcSubArrsShapeInfoAndOffsets(pA->shapeInfo(), bS, dimsToExclude.size(), dimsToExclude.data(),
subArrShapeInfo.data(), subArrOffsets.data()); for (int i = 0; i < bS; ++i) aSubArrs[i] = aRank == 2 ?
pA->specialBuffer() : pA->specialBuffer() + subArrOffsets[i] * pA->sizeOfT();
if(bRank > 2)
shape::calcSubArrsShapeInfoAndOffsets(pB->shapeInfo(), bS, dimsToExclude.size(), dimsToExclude.data(),
subArrShapeInfo.data(), subArrOffsets.data()); for (int i = 0; i < bS; ++i) bSubArrs[i] = bRank == 2 ?
pB->specialBuffer() : pB->specialBuffer() + subArrOffsets[i] * pB->sizeOfT();
shape::calcSubArrsShapeInfoAndOffsets(pC->shapeInfo(), bS, dimsToExclude.size(), dimsToExclude.data(),
subArrShapeInfo.data(), subArrOffsets.data()); for (int i = 0; i < bS; ++i) cSubArrs[i] = pC->specialBuffer() +
subArrOffsets[i] * pC->sizeOfT();
PointersManager manager(A->getContext(), "mmulNxN");
const void** aSubArrsCuda = reinterpret_cast<const void **>(manager.replicatePointer(aSubArrs.data(),
aSubArrs.size() * sizeof(void*))); const void** bSubArrsCuda = reinterpret_cast<const void
**>(manager.replicatePointer(bSubArrs.data(), bSubArrs.size() * sizeof(void*))); void** cSubArrsCuda =
reinterpret_cast< void **>(manager.replicatePointer(cSubArrs.data(), cSubArrs.size() * sizeof(void*)));
const bool transA = !aMcont;
const bool transB = !bKcont;
const int lda = (aMcont && aKcont) ? M : transA ? pA->strideAt(-2) : pA->strideAt(-1);
const int ldb = (bKcont && bNcont) ? K : transB ? pB->strideAt(-2) : pB->strideAt(-1);
const int ldc = (cMcont && cNcont) ? M : C != pC ? pC->strideAt(-2) : pC->strideAt(-1);
const hipblasOperation_t transAblas = transA ? HIPBLAS_OP_T : HIPBLAS_OP_N;
const hipblasOperation_t transBblas = transB ? HIPBLAS_OP_T : HIPBLAS_OP_N;
union Coeff {__half _h; float _f; double _d; };
Coeff uAlpha, uBeta;
if(cudaType == HIP_R_16F) {
uAlpha._h = alpha;
uBeta._h = beta;
}
else if(cudaType == HIP_R_32F) {
uAlpha._f = alpha;
uBeta._f = beta;
}
else if(cudaType == HIP_R_64F) {
uAlpha._d = alpha;
uBeta._d = beta;
}
auto handle = reinterpret_cast<hipblasHandle_t *>(A->getContext()->getCublasHandle());
auto stream = A->getContext()->getCudaStream();
auto status = hipblasSetStream(*handle, *stream);
if (status != HIPBLAS_STATUS_SUCCESS)
throw cuda_exception::build("MmulHelper::mmulNxN cuda failed !", status);
status = hipblasGemmBatchedEx(*handle, transAblas, transBblas, M, N, K, &uAlpha, aSubArrsCuda, cudaAType, lda,
bSubArrsCuda, cudaBType, ldb, &uBeta, cSubArrsCuda, cudaCType, ldc, bS, cudaType, HIPBLAS_GEMM_DEFAULT);
if (status != HIPBLAS_STATUS_SUCCESS)
throw cuda_exception::build("MmulHelper::mmulNxN cuda failed !", status);
auto cudaResult = hipStreamSynchronize(*stream);
if (cudaResult != 0)
throw cuda_exception::build("MmulHelper::mmulNxN cuda failed !", cudaResult);
}
else {
std::vector<sd::LongType> idxRanges(2 * pC->rankOf());
for(sd::LongType i = 0; i < bS; ++i) {
ShapeUtils::evalIdxRangesForSubArr(i, pC->shapeInfo(), dimsToExclude, idxRanges.data());
NDArray cSubArr = (*pC)(idxRanges);
if(aRank > bRank) {
NDArray aSubArr = (*pA)(idxRanges);
mmulMxM(&aSubArr, pB, &cSubArr, 1., 0., pC->ordering());
}
else if(bRank > aRank) {
NDArray bSubArr = (*pB)(idxRanges);
mmulMxM(pA, &bSubArr, &cSubArr, 1., 0, pC->ordering());
}
else {
NDArray aSubArr = (*pA)(idxRanges);
NDArray bSubArr = (*pB)(idxRanges);
mmulMxM(&aSubArr, &bSubArr, &cSubArr, 1., 0., pC->ordering());
}
}
}
NDArray::registerSpecialUse({pC}, {pA, pB});
if(C != pC)
C->assign(pC->permute(permut));
for(int i = toDelete.size() - 1; i >= 0; --i)
delete toDelete[i];
return C;
}
*/
// BUILD_TRIPLE_TEMPLATE(template void usualGemm, (const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, hipStream_t
// *stream, const bool transA, const bool transB, const int M, const int N, const int K, const double alpha, const void*
// vA, const int lda, const void* vB, const int ldb, const double beta, void* vC, const int ldc), SD_NUMERIC_TYPES,
// SD_NUMERIC_TYPES, SD_FLOAT_TYPES); BUILD_TRIPLE_TEMPLATE(template void usualGemv, (const dim3 &blocksPerGrid, const
// dim3 &threadsPerBlock, hipStream_t *stream, const bool transA, const int M, const int N, const double alpha, const
// void* vA, const int lda, const void* vB, const int incx, const double beta, void* vC, const int incy),
// SD_NUMERIC_TYPES, SD_NUMERIC_TYPES, SD_FLOAT_TYPES); BUILD_TRIPLE_TEMPLATE(template void usualDot, (const dim3
// &blocksPerGrid, const dim3 &threadsPerBlock, hipStream_t *stream, const sd::LongType length, const double alpha,
// const void* vX, const sd::LongType incx, const void* vY, const sd::LongType incy, const double beta, void* vZ),
// SD_NUMERIC_TYPES, SD_NUMERIC_TYPES, SD_FLOAT_TYPES);
} // namespace sd
| 92e0c0915050cbdeae9ef3d62bf4f6e6d5190fa2.cu | /*
* ******************************************************************************
* *
* *
* * This program and the accompanying materials are made available under the
* * terms of the Apache License, Version 2.0 which is available at
* * https://www.apache.org/licenses/LICENSE-2.0.
* *
* * See the NOTICE file distributed with this work for additional
* * information regarding copyright ownership.
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* * License for the specific language governing permissions and limitations
* * under the License.
* *
* * SPDX-License-Identifier: Apache-2.0
* *****************************************************************************
*/
//
// @author [email protected]
// @author Yurii Shyrma ([email protected])
//
#include <cublas_v2.h>
#include <exceptions/cuda_exception.h>
#include <helpers/PointersManager.h>
#include <helpers/ShapeUtils.h>
#include <ops/specials_cuda.h>
#include <numeric>
#include "../MmulHelper.h"
namespace sd {
//////////////////////////////////////////////////////////////////////////////
// MXK x KxN = MxN -> actual sequence of axes doesn't matter
template <typename T1, typename T2, typename T3>
static SD_KERNEL void usualCudaGemm(const void* vA, const sd::LongType* aShapeInfo, const void* vB,
const sd::LongType* bShapeInfo, void* vC, const sd::LongType* cShapeInfo,
const int aMaxis, const int aKaxis, const int bKaxis, const int bNaxis,
const int cMaxis, const int cNaxis, const double alpha, const double beta) {
const T1* A = reinterpret_cast<const T1*>(vA);
const T2* B = reinterpret_cast<const T2*>(vB);
T3* C = reinterpret_cast<T3*>(vC);
__shared__ int K, *coords;
__shared__ bool betaPresent;
__shared__ sd::LongType cLen, totalThreads;
__shared__ T3 alphaZ, betaZ;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
coords = reinterpret_cast<int*>(shmem);
cLen = shape::length(cShapeInfo);
K = shape::shapeOf(const_cast<sd::LongType*>(aShapeInfo))[aKaxis];
betaPresent = beta;
totalThreads = gridDim.x * blockDim.x;
alphaZ = alpha;
betaZ = beta;
}
__syncthreads();
auto aCoords = coords + threadIdx.x * 6; // 6 = (aRank + bRank + cRank)
auto bCoords = aCoords + 2;
auto cCoords = bCoords + 2;
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (sd::LongType i = tid; i < cLen; i += totalThreads) {
// evaluate C coordinates
shape::index2coords(i, cShapeInfo, cCoords);
// evaluate A coordinates
aCoords[aMaxis] = cCoords[cMaxis];
aCoords[aKaxis] = 0;
// evaluate B coordinates
bCoords[bKaxis] = 0;
bCoords[bNaxis] = cCoords[cNaxis];
auto aOffset = shape::getOffset(aShapeInfo, aCoords);
auto bOffset = shape::getOffset(bShapeInfo, bCoords);
T3 val = A[aOffset] * B[bOffset]; // first iteration
for (sd::Unsigned j = 1; j < K; ++j) { // rest iterations
aOffset += shape::stride(aShapeInfo)[aKaxis];
bOffset += shape::stride(bShapeInfo)[bKaxis];
val = val + A[aOffset] * B[bOffset];
}
auto cOffset = shape::getOffset(cShapeInfo, cCoords);
if (betaPresent)
C[cOffset] = alphaZ * val + betaZ * C[cOffset];
else
C[cOffset] = alphaZ * val;
}
}
////////////////////////////////////////////////////////////////////////
template <typename T1, typename T2, typename T3>
SD_HOST static void usualGemm(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem,
cudaStream_t* stream, const void* vA, const sd::LongType* aShapeInfo, const void* vB,
const sd::LongType* bShapeInfo, void* vC, const sd::LongType* cShapeInfo,
const int aMaxis, const int aKaxis, const int bKaxis, const int bNaxis, const int cMaxis,
const int cNaxis, const double alpha, const double beta) {
usualCudaGemm<T1, T2, T3><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(
vA, aShapeInfo, vB, bShapeInfo, vC, cShapeInfo, aMaxis, aKaxis, bKaxis, bNaxis, cMaxis, cNaxis, alpha, beta);
}
////////////////////////////////////////////////////////////////////////
// MXN x N = M -> actual sequence of {M,N} axes doesn't matter
template <typename T1, typename T2, typename T3>
static SD_KERNEL void usualCudaGemv(const void* vA, const sd::LongType* aShapeInfo, const void* vX,
const sd::LongType* xShapeInfo, void* vY, const sd::LongType* yShapeInfo,
const int incx, const int incy, const int aMaxis, const double alpha,
const double beta) {
const T1* A = reinterpret_cast<const T1*>(vA);
const T2* X = reinterpret_cast<const T2*>(vX);
T3* Y = reinterpret_cast<T3*>(vY);
__shared__ int M, N;
__shared__ bool betaPresent;
__shared__ sd::LongType cLen, totalThreads, aNstride, aMstride;
__shared__ T3 alphaZ, betaZ;
if (threadIdx.x == 0) {
N = shape::length(xShapeInfo);
M = shape::length(yShapeInfo);
aMstride = shape::stride(aShapeInfo)[aMaxis];
aNstride = shape::stride(aShapeInfo)[aMaxis == 0 ? 1 : 0];
totalThreads = gridDim.x * blockDim.x;
betaPresent = beta;
alphaZ = alpha;
betaZ = beta;
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (sd::LongType i = tid; i < M; i += totalThreads) {
// evaluate offsets
auto aOffset = i * aMstride;
auto xOffset = 0;
T3 val = A[aOffset] * X[xOffset]; // first iteration
for (sd::Unsigned j = 1; j < N; ++j) { // rest iterations
aOffset += aNstride;
xOffset += incx;
val = val + A[aOffset] * X[xOffset];
}
auto yOffset = i * incy;
if (betaPresent)
Y[yOffset] = alphaZ * val + betaZ * Y[yOffset];
else
Y[yOffset] = alphaZ * val;
}
}
////////////////////////////////////////////////////////////////////////
template <typename T1, typename T2, typename T3>
SD_HOST static void usualGemv(const int blocksPerGrid, const int threadsPerBlock, cudaStream_t* stream, const void* vA,
const sd::LongType* aShapeInfo, const void* vX, const sd::LongType* xShapeInfo, void* vY,
const sd::LongType* yShapeInfo, const int incx, const int incy, const int aMaxis,
const double alpha, const double beta) {
usualCudaGemv<T1, T2, T3><<<blocksPerGrid, threadsPerBlock, 512, *stream>>>(
vA, aShapeInfo, vX, xShapeInfo, vY, yShapeInfo, incx, incy, aMaxis, alpha, beta);
}
//////////////////////////////////////////////////////////////////////////////
template <typename T1, typename T2, typename T3>
static SD_KERNEL void usualCudaDot(const sd::LongType length, const double alpha, const void* vX,
const sd::LongType incx, const void* vY, const sd::LongType incy, const double beta,
void* vZ) {
T1* X = reinterpret_cast<T1*>(const_cast<void*>(vX));
T2* Y = reinterpret_cast<T2*>(const_cast<void*>(vY));
T3* Z = reinterpret_cast<T3*>(vZ);
extern __shared__ unsigned char shmem[];
auto pairwiseMul = reinterpret_cast<T3*>(shmem);
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < length) pairwiseMul[tid] = X[tid * incx] * Y[tid * incy];
__syncthreads();
if (tid == 0) {
T3 sum = 0;
for (sd::LongType i = 0; i < length; ++i) sum = sum + pairwiseMul[i];
if (beta)
*Z = (T3)alpha * sum + (T3)beta * *Z;
else
*Z = (T3)alpha * sum;
}
}
////////////////////////////////////////////////////////////////////////
template <typename T1, typename T2, typename T3>
SD_HOST static void usualDot(const dim3& blocksPerGrid, const dim3& threadsPerBlock, cudaStream_t* stream,
const sd::LongType length, const double alpha, const void* vX, const sd::LongType incx,
const void* vY, const sd::LongType incy, const double beta, void* vZ) {
usualCudaDot<T1, T2, T3><<<blocksPerGrid, threadsPerBlock, length * sizeof(T3) + 128, *stream>>>(
length, alpha, vX, incx, vY, incy, beta, vZ);
}
//////////////////////////////////////////////////////////////////////////////
// MXK x KxN = MxN
NDArray* MmulHelper::mmulMxM(const NDArray* A, const NDArray* B, NDArray* C, double alpha, double beta,
const char outOrder) {
if (A->rankOf() != 2) throw std::runtime_error("MmulHelper::mmulMxM cuda: rank of A array is not equal 2 !");
if (B->rankOf() != 2) throw std::runtime_error("MmulHelper::mmulMxM cuda: rank of B array is not equal 2 !");
const auto M = A->sizeAt(0);
const auto K = A->sizeAt(1);
const auto N = B->sizeAt(1);
if (C != nullptr && C->rankOf() != 2)
throw std::runtime_error("MmulHelper::mmulMxM cuda: rank of C array is not equal 2 !");
if (B->sizeAt(0) != K) throw std::runtime_error("MmulHelper::mmulMxM cuda: B array has wrong number of rows !");
if (C != nullptr && C->sizeAt(0) != M)
throw std::runtime_error("MmulHelper::mmulMxM cuda: C array has wrong number of rows !");
if (C != nullptr && C->sizeAt(1) != N)
throw std::runtime_error("MmulHelper::mmulMxM cuda: C array has wrong number of columns !");
if (C == nullptr)
C = new NDArray(outOrder, {M, N}, DataTypeUtils::pickPairwiseResultType(A->dataType(), B->dataType()),
A->getContext());
if (C->isEmpty()) return C;
const int major = Environment::getInstance().capabilities()[AffinityManager::currentDeviceId()].first();
const auto aType = A->dataType();
const auto bType = B->dataType();
const auto cType = C->dataType();
const bool AB(aType == bType), AC(aType == cType), ABC(AB && AC);
const bool typeDouble = ABC && aType == DataType::DOUBLE;
const bool typeFloat = ABC && aType == DataType::FLOAT32;
const bool typeHalf = ABC && aType == DataType::HALF && major >= 6;
const bool typeIntFloat = AB && aType == DataType::INT8 && cType == DataType::FLOAT32 && major >= 6;
const bool typeHalfFloat = AB && aType == DataType::HALF && cType == DataType::FLOAT32 && major >= 6;
std::lock_guard<std::mutex> lock(*LaunchContext::deviceMutex());
auto handle = reinterpret_cast<cublasHandle_t*>(A->getContext()->getCublasHandle());
auto stream = A->getContext()->getCudaStream();
auto status = cublasSetStream_v2(*handle, *stream);
if (status != CUBLAS_STATUS_SUCCESS) throw cuda_exception::build("MmulHelper::mmulMxM cuda failed !", status);
if (!typeDouble && !typeFloat && !typeHalf && !typeIntFloat && !typeHalfFloat) {
const int threadsPerBlock = SD_MAX_NUM_THREADS / 2;
const int blocksPerGrid = (C->lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * sizeof(int) * 6 + 128; // 6 = aRank + bRank + cRank
NDArray::prepareSpecialUse({C}, {A, B});
// BUILD_TRIPLE_SELECTOR(aType, bType, cType, usualGemm, (blocksPerGrid, threadsPerBlock, sharedMem, stream,
// A->specialBuffer(), A->specialShapeInfo(), B->specialBuffer(), B->specialShapeInfo(), C->specialBuffer(),
// C->special(), 0, 1, 0, 1, 0, 1, alpha, beta), SD_NUMERIC_TYPES, SD_NUMERIC_TYPES, SD_FLOAT_TYPES);
BUILD_SINGLE_SELECTOR_THRICE(aType, usualGemm,
(blocksPerGrid, threadsPerBlock, sharedMem, stream, A->specialBuffer(),
A->specialShapeInfo(), B->specialBuffer(), B->specialShapeInfo(), C->specialBuffer(),
C->specialShapeInfo(), 0, 1, 0, 1, 0, 1, alpha, beta),
SD_NUMERIC_TYPES)
NDArray::registerSpecialUse({C}, {A, B});
auto cudaResult = cudaStreamSynchronize(*stream);
if (cudaResult != 0) throw cuda_exception::build("MmulHelper::mmulMxM cuda failed !", cudaResult);
} else {
std::vector<NDArray*> toDelete;
NDArray *pA(const_cast<NDArray*>(A)), *pB(const_cast<NDArray*>(B)), *pC(const_cast<NDArray*>(C));
bool aMcont = M == 1 || A->strideAt(0) == 1;
bool aKcont = K == 1 || A->strideAt(1) == 1;
bool bKcont = K == 1 || B->strideAt(0) == 1;
bool bNcont = N == 1 || B->strideAt(1) == 1;
bool cMcont = M == 1 || C->strideAt(0) == 1;
bool cNcont = N == 1 || C->strideAt(1) == 1;
if (!aMcont && !aKcont) {
pA = new NDArray(A->dup('f'));
toDelete.push_back(pA);
aMcont = true;
}
if (!bKcont && !bNcont) {
pB = new NDArray(B->dup('f'));
toDelete.push_back(pB);
bKcont = true;
}
if (!cMcont) {
pC = new NDArray(C->dup('f'));
toDelete.push_back(pC);
cMcont = true;
}
const bool transA = !aMcont;
const bool transB = !bKcont;
const int lda = (aMcont && aKcont) ? M : transA ? pA->strideAt(0) : pA->strideAt(1);
const int ldb = (bKcont && bNcont) ? K : transB ? pB->strideAt(0) : pB->strideAt(1);
const int ldc = (cMcont && cNcont) ? M : pC->strideAt(1);
const cublasOperation_t transAblas = transA ? CUBLAS_OP_T : CUBLAS_OP_N;
const cublasOperation_t transBblas = transB ? CUBLAS_OP_T : CUBLAS_OP_N;
NDArray::prepareSpecialUse({pC}, {pA, pB});
// choose appropriate cuda gemm api depending on data types
if (typeDouble) {
status = cublasDgemm(*handle, transAblas, transBblas, M, N, K, &alpha, (double*)pA->specialBuffer(), lda,
(double*)pB->specialBuffer(), ldb, &beta, (double*)pC->specialBuffer(), ldc);
} else if (typeFloat) {
float alphaF(alpha), betaF(beta);
status = cublasSgemm(*handle, transAblas, transBblas, M, N, K, &alphaF, (float*)pA->specialBuffer(), lda,
(float*)pB->specialBuffer(), ldb, &betaF, (float*)pC->specialBuffer(), ldc);
} else if (typeHalf) {
float16 alphaH(alpha), betaH(beta);
status = cublasHgemm(*handle, transAblas, transBblas, M, N, K, &alphaH.data, (__half*)pA->specialBuffer(), lda,
(__half*)pB->specialBuffer(), ldb, &betaH.data, (__half*)pC->specialBuffer(), ldc);
} else if (typeIntFloat) {
float alphaF(alpha), betaF(beta);
status = cublasSgemmEx(*handle, transAblas, transBblas, M, N, K, &alphaF, pA->specialBuffer(), CUDA_R_8I, lda,
pB->specialBuffer(), CUDA_R_8I, ldb, &betaF, pC->specialBuffer(), CUDA_R_32F, ldc);
} else if (typeHalfFloat) {
float alphaF(alpha), betaF(beta);
status = cublasSgemmEx(*handle, transAblas, transBblas, M, N, K, &alphaF, pA->specialBuffer(), CUDA_R_16F, lda,
pB->specialBuffer(), CUDA_R_16F, ldb, &betaF, pC->specialBuffer(), CUDA_R_32F, ldc);
}
if (status != CUBLAS_STATUS_SUCCESS) throw cuda_exception::build("MmulHelper::mmulMxM cuda failed !", status);
NDArray::registerSpecialUse({pC}, {pA, pB});
auto cudaResult = cudaStreamSynchronize(*stream);
if (cudaResult != 0) throw cuda_exception::build("MmulHelper::mmulMxM cuda failed !", cudaResult);
if (C != pC) C->assign(pC);
for (int i = toDelete.size() - 1; i >= 0; --i) delete toDelete[i];
}
return C;
}
////////////////////////////////////////////////////////////////////////////
// MXN x N = M
NDArray* MmulHelper::mmulMxV(const NDArray* A, const NDArray* X, sd::NDArray* Y, const double alpha, const double beta,
const char outOrder) {
int xLenDim, yLenDim(0);
if (A->rankOf() != 2) throw std::runtime_error("MmulHelper::mmulMxV cuda: rank of A array is not equal 2 !");
if (!shape::isCommonVector(X->shapeInfo(), xLenDim))
throw std::runtime_error("MmulHelper::mmulMxV cuda: X array must be vector !");
const auto M = A->sizeAt(0);
const auto N = A->sizeAt(1);
if (Y != nullptr && !shape::isCommonVector(Y->shapeInfo(), yLenDim))
throw std::runtime_error("MmulHelper::mmulMxV cuda: Y array must be vector !");
if (X->lengthOf() != N) throw std::runtime_error("MmulHelper::mmulMxV cuda: X vector has wrong length !");
if (Y != nullptr && Y->lengthOf() != M)
throw std::runtime_error("MmulHelper::mmulMxV cuda: Y array has wrong length !");
if (Y == nullptr)
Y = new NDArray(outOrder, {M}, DataTypeUtils::pickPairwiseResultType(A->dataType(), X->dataType()),
A->getContext());
if (Y->isEmpty()) return Y;
const int incx = X->strideAt(xLenDim);
const int incy = Y->strideAt(yLenDim);
const auto aType = A->dataType();
const auto xType = X->dataType();
const auto yType = Y->dataType();
const bool AX(aType == xType), AY(aType == yType), AXY(AX && AY);
const bool typeDouble = AXY && aType == DataType::DOUBLE;
const bool typeFloat = AXY && aType == DataType::FLOAT32;
std::lock_guard<std::mutex> lock(*LaunchContext::deviceMutex());
auto handle = reinterpret_cast<cublasHandle_t*>(A->getContext()->getCublasHandle());
auto stream = A->getContext()->getCudaStream();
auto status = cublasSetStream_v2(*handle, *stream);
if (status != CUBLAS_STATUS_SUCCESS) throw cuda_exception::build("MmulHelper::mmulMxV cuda failed !", status);
if (!typeDouble && !typeFloat) {
const int threadsPerBlock = SD_MAX_NUM_THREADS;
const int blocksPerGrid = (M + threadsPerBlock - 1) / threadsPerBlock;
NDArray::prepareSpecialUse({Y}, {A, X});
// BUILD_TRIPLE_SELECTOR(aType, xType, yType, usualGemv, (blocksPerGrid, threadsPerBlock, stream,
// A->specialBuffer(), A->specialShapeInfo(), X->specialBuffer(), X->specialShapeInfo(), Y->specialBuffer(),
// Y->special(), incx, incy, 0, alpha, beta), SD_NUMERIC_TYPES, SD_NUMERIC_TYPES, SD_FLOAT_TYPES);
BUILD_SINGLE_SELECTOR_THRICE(
xType, usualGemv,
(blocksPerGrid, threadsPerBlock, stream, A->specialBuffer(), A->specialShapeInfo(), X->specialBuffer(),
X->specialShapeInfo(), Y->specialBuffer(), Y->specialShapeInfo(), incx, incy, 0, alpha, beta),
SD_NUMERIC_TYPES)
NDArray::registerSpecialUse({Y}, {A, X});
auto cudaResult = cudaStreamSynchronize(*stream);
if (cudaResult != 0) throw cuda_exception::build("MmulHelper::mmulMxV cuda failed !", cudaResult);
} else {
NDArray* pA(const_cast<NDArray*>(A));
bool aMcont = M == 1 || A->strideAt(0) == 1;
bool aNcont = N == 1 || A->strideAt(1) == 1;
if (!aMcont && !aNcont) {
pA = new NDArray(A->dup('f'));
aMcont = true;
}
const bool transA = !aMcont;
const int lda = (aMcont && aNcont) ? M : transA ? pA->strideAt(0) : pA->strideAt(1);
const cublasOperation_t transAblas = transA ? CUBLAS_OP_T : CUBLAS_OP_N;
NDArray::prepareSpecialUse({Y}, {pA, X});
// choose appropriate cuda gemm api depending on data types
if (typeDouble) {
status = cublasDgemv(*handle, transAblas, transA ? N : M, transA ? M : N, &alpha, (double*)pA->specialBuffer(),
lda, (double*)X->specialBuffer(), incx, &beta, (double*)Y->specialBuffer(), incy);
} else if (typeFloat) {
float alphaF(alpha), betaF(beta);
status = cublasSgemv(*handle, transAblas, transA ? N : M, transA ? M : N, &alphaF, (float*)pA->specialBuffer(),
lda, (float*)X->specialBuffer(), incx, &betaF, (float*)Y->specialBuffer(), incy);
}
if (status != CUBLAS_STATUS_SUCCESS) throw cuda_exception::build("MmulHelper::mmulMxV cuda failed !", status);
auto cudaResult = cudaStreamSynchronize(*stream);
if (cudaResult != 0) throw cuda_exception::build("MmulHelper::mmulMxV cuda failed !", cudaResult);
NDArray::registerSpecialUse({Y}, {pA, X});
if (pA != A) delete pA;
}
return Y;
}
////////////////////////////////////////////////////////////////////////////
// (X * Y) = Z[0]
NDArray* MmulHelper::dot(const NDArray* X, const NDArray* Y, sd::NDArray* Z, const double alpha, const double beta) {
int xLenDim(0), yLenDim(0);
if (!shape::isCommonVector(X->shapeInfo(), xLenDim))
throw std::runtime_error("MmulHelper::dot cuda: X array must be vector !");
if (!shape::isCommonVector(Y->shapeInfo(), yLenDim))
throw std::runtime_error("MmulHelper::dot cuda: Y array must be vector !");
if (Z != nullptr && !Z->isScalar()) throw std::runtime_error("MmulHelper::dot cuda: Z array must be scalar !");
const auto length = X->lengthOf();
if (Y->lengthOf() != length)
throw std::runtime_error("MmulHelper::dot cuda: lengths of input vectors are different !");
if (Z == nullptr)
Z = new NDArray(DataTypeUtils::pickPairwiseResultType(X->dataType(), Y->dataType()), X->getContext());
const sd::LongType incx = X->strideAt(xLenDim);
const sd::LongType incy = Y->strideAt(yLenDim);
const auto xType = X->dataType();
const auto yType = Y->dataType();
const auto zType = Z->dataType();
if (!X->isActualOnDeviceSide()) X->syncToDevice();
if (!Y->isActualOnDeviceSide()) Y->syncToDevice();
if (!Z->isActualOnDeviceSide()) Z->syncToDevice();
cudaStream_t* stream = X->getContext()->getCudaStream();
dim3 threadsPerBlock(512);
dim3 blocksPerGrid(1);
if (length > 512) threadsPerBlock.x = math::sd_ceil<double, int>(static_cast<double>(length) / 512);
NDArray::prepareSpecialUse({Z}, {X, Y});
// BUILD_TRIPLE_SELECTOR(xType, yType, zType, usualDot, (blocksPerGrid, threadsPerBlock, stream, length, alpha,
// X->specialBuffer(), incx, Y->specialBuffer(), incy, beta, Z->specialBuffer()), SD_NUMERIC_TYPES, SD_NUMERIC_TYPES,
// SD_FLOAT_TYPES);
BUILD_SINGLE_SELECTOR_THRICE(xType, usualDot,
(blocksPerGrid, threadsPerBlock, stream, length, alpha, X->specialBuffer(), incx,
Y->specialBuffer(), incy, beta, Z->specialBuffer()),
SD_NUMERIC_TYPES)
auto cudaResult = cudaStreamSynchronize(*stream);
if (cudaResult != 0) throw cuda_exception::build("MmulHelper::dot cuda failed !", cudaResult);
NDArray::registerSpecialUse({Z}, {X, Y});
return Z;
}
//////////////////////////////////////////////////////////////////////////////
// [bS,M,K] x [bS,K,N] = [bS,M,N]
// [bS,M,K] x [K,N] = [bS,M,N]
// [M,K] x [bS,K,N] = [bS,M,N]
// bS could stand for several axes
template <typename T1, typename T2, typename T3>
static SD_KERNEL void batchedCudaGemm(const void* vA, const sd::LongType* aShapeInfo, const void* vB,
const sd::LongType* bShapeInfo, void* vC, const sd::LongType* cShapeInfo,
const int* aBatchDims, const int* bBatchDims, const int* cBatchDims,
const int aMaxis, const int aKaxis, const int bKaxis, const int bNaxis,
const int cMaxis, const int cNaxis, const double alpha, const double beta) {
const T1* A = reinterpret_cast<const T1*>(vA);
const T2* B = reinterpret_cast<const T2*>(vB);
T3* C = reinterpret_cast<T3*>(vC);
__shared__ bool betaPresent;
__shared__ int aRank, bRank, cRank, K, *coords;
__shared__ sd::LongType cLen, totalThreads;
__shared__ T3 alphaZ, betaZ;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
coords = reinterpret_cast<int*>(shmem);
cLen = shape::length(cShapeInfo);
K = shape::shapeOf(const_cast<sd::LongType*>(aShapeInfo))[aKaxis];
totalThreads = gridDim.x * blockDim.x;
aRank = shape::rank(aShapeInfo);
bRank = shape::rank(bShapeInfo);
cRank = shape::rank(cShapeInfo);
betaPresent = beta;
alphaZ = alpha;
betaZ = beta;
}
__syncthreads();
auto aCoords = coords + threadIdx.x * (aRank + bRank + cRank);
auto bCoords = aCoords + aRank;
auto cCoords = bCoords + bRank;
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (sd::LongType i = tid; i < cLen; i += totalThreads) {
// evaluate C coordinates
shape::index2coords(i, cShapeInfo, cCoords);
// calculate index of current batch
sd::LongType batchInd;
if (cBatchDims != nullptr) batchInd = shape::coords2index(cShapeInfo, cBatchDims, cRank - 2, cCoords);
// evaluate A coordinates
if (aBatchDims != nullptr) shape::index2coords(batchInd, aShapeInfo, aBatchDims, aRank - 2, aCoords);
aCoords[aMaxis] = cCoords[cMaxis];
aCoords[aKaxis] = 0;
// evaluate B coordinates
if (bBatchDims != nullptr) shape::index2coords(batchInd, bShapeInfo, bBatchDims, bRank - 2, bCoords);
bCoords[bKaxis] = 0;
bCoords[bNaxis] = cCoords[cNaxis];
auto aOffset = shape::getOffset(aShapeInfo, aCoords);
auto bOffset = shape::getOffset(bShapeInfo, bCoords);
T3 val = A[aOffset] * B[bOffset]; // first iteration
for (sd::Unsigned j = 1; j < K; ++j) { // rest iterations
aOffset += shape::stride(aShapeInfo)[aKaxis];
bOffset += shape::stride(bShapeInfo)[bKaxis];
val = val + A[aOffset] * B[bOffset];
}
auto cOffset = shape::getOffset(cShapeInfo, cCoords);
if (betaPresent)
C[cOffset] = alphaZ * val + betaZ * C[cOffset];
else
C[cOffset] = alphaZ * val;
}
}
////////////////////////////////////////////////////////////////////////
template <typename T1, typename T2, typename T3>
SD_HOST static void batchedGemm(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem,
cudaStream_t* stream, const void* vA, const sd::LongType* aShapeInfo, const void* vB,
const sd::LongType* bShapeInfo, void* vC, const sd::LongType* cShapeInfo,
const int* aBatchDims, const int* bBatchDims, const int* cBatchDims, const int aMaxis,
const int aKaxis, const int bKaxis, const int bNaxis, const int cMaxis,
const int cNaxis, const double alpha, const double beta) {
batchedCudaGemm<T1, T2, T3><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(
vA, aShapeInfo, vB, bShapeInfo, vC, cShapeInfo, aBatchDims, bBatchDims, cBatchDims, aMaxis, aKaxis, bKaxis,
bNaxis, cMaxis, cNaxis, alpha, beta);
}
///////////////////////////////////////////////////////////////////
NDArray* MmulHelper::mmulNxN(const NDArray* A, const NDArray* B, NDArray* C, const double alpha, const double beta,
const char outOrder) {
const int aRank = A->rankOf();
const int bRank = B->rankOf();
// input ranks validation
if (aRank > bRank && bRank != 2)
throw std::runtime_error("MmulHelper::mmulNxN: rank of B array should be equal 2 !");
else if (bRank > aRank && aRank != 2)
throw std::runtime_error("MmulHelper::mmulNxN: rank of A array should be equal 2 !");
else if (aRank == bRank) {
for (int i = 0; i < aRank - 2; ++i)
if (A->sizeAt(i) != B->sizeAt(i))
throw std::runtime_error(
"MmulHelper::mmulNxN: shapes of A and B arrays are not suitable for matrix multiplication !");
}
if (A->sizeAt(-1) != B->sizeAt(-2))
throw std::runtime_error(
"MmulHelper::mmulNxN: shapes of A and B arrays are not suitable for matrix multiplication !");
// validation of C array
std::vector<sd::LongType> cExpectedShape = aRank > bRank ? A->getShapeAsVector() : B->getShapeAsVector();
cExpectedShape[cExpectedShape.size() - 2] = A->sizeAt(-2);
cExpectedShape[cExpectedShape.size() - 1] = B->sizeAt(-1);
if (C != nullptr) {
if (!C->isSameShape(cExpectedShape))
throw std::runtime_error("MmulHelper::mmulNxN: shape of C array is not suitable for AxB matrix multiplication !");
} else
C = new NDArray(outOrder, cExpectedShape, DataTypeUtils::pickPairwiseResultType(A->dataType(), B->dataType()),
A->getContext());
if (C->isEmpty()) return C;
const int cRank = C->rankOf();
const int aMaxis(aRank - 2), aKaxis(aRank - 1), bKaxis(bRank - 2), bNaxis(bRank - 1), cMaxis(cRank - 2),
cNaxis(cRank - 1);
const int threadsPerBlock = SD_MAX_NUM_THREADS / 8;
const int blocksPerGrid = (C->lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * sizeof(int) * (aRank + bRank + cRank) + 128;
PointersManager manager(A->getContext(), "MmulHelper::mmulNxN");
const int *aBatchDims(nullptr), *bBatchDims(nullptr), *cBatchDims(nullptr);
if (aRank > 2)
aBatchDims = reinterpret_cast<int*>(manager.replicatePointer(
ShapeUtils::evalDimsToExclude(aRank, {aMaxis, aKaxis}).data(), (aRank - 2) * sizeof(int)));
if (bRank > 2)
bBatchDims = reinterpret_cast<int*>(manager.replicatePointer(
ShapeUtils::evalDimsToExclude(bRank, {bKaxis, bNaxis}).data(), (bRank - 2) * sizeof(int)));
if (cRank > 2)
cBatchDims = reinterpret_cast<int*>(manager.replicatePointer(
ShapeUtils::evalDimsToExclude(cRank, {cMaxis, cNaxis}).data(), (cRank - 2) * sizeof(int)));
NDArray::prepareSpecialUse({C}, {A, B});
// BUILD_TRIPLE_SELECTOR(A->dataType(), b->dataType(), C->dataType(), batchedGemm, (blocksPerGrid, threadsPerBlock,
// A->getContext()->getCudaStream(), A->specialBuffer(), A->specialShapeInfo(), B->specialBuffer(),
// B->specialShapeInfo(), C->specialBuffer(), C->special(), aMaxis, aKaxis, bKaxis, bNaxis, cMaxis, cNaxis, alpha,
// beta), SD_NUMERIC_TYPES, SD_NUMERIC_TYPES, SD_FLOAT_TYPES);
BUILD_SINGLE_SELECTOR_THRICE(
A->dataType(), batchedGemm,
(blocksPerGrid, threadsPerBlock, sharedMem, A->getContext()->getCudaStream(), A->specialBuffer(),
A->specialShapeInfo(), B->specialBuffer(), B->specialShapeInfo(), C->specialBuffer(), C->specialShapeInfo(),
aBatchDims, bBatchDims, cBatchDims, aMaxis, aKaxis, bKaxis, bNaxis, cMaxis, cNaxis, alpha, beta),
SD_NUMERIC_TYPES)
NDArray::registerSpecialUse({C}, {A, B});
manager.synchronize();
return C;
}
/*
//////////////////////////////////////////////////////////////////////////////
// MXN x N = M
template <typename T1, typename T2, typename T3>
static SD_KERNEL void usualCudaGemv(const bool transA, const int M, const int N, const double alpha, const void* vA,
const int lda, const void* vX, const int incx, const double beta, void* vY, const int incy) {
T1* A = reinterpret_cast<T1*>(const_cast<void*>(vA));
T2* X = reinterpret_cast<T2*>(const_cast<void*>(vX));
T3* Y = reinterpret_cast<T3*>(vY);
__shared__ T3 alphaZ, betaZ;
__shared__ sd::LongType strideArow, strideAcol;
const int row = blockIdx.x * blockDim.x + threadIdx.x;
if(row == 0) {
alphaZ = alpha;
betaZ = beta;
if(transA) { strideArow = lda; strideAcol = 1; } else { strideArow = 1; strideAcol = lda; }
}
__syncthreads();
T3 val = 0;
if (row < M)
for (int i = 0; i < N; i++)
val = val + A[row * strideArow + i * strideAcol] * X[i * incx];
Y[row * incy] = alphaZ * val + betaZ * Y[row * incy];
}
////////////////////////////////////////////////////////////////////////
template <typename T1, typename T2, typename T3>
SD_HOST static void usualGemv(const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, cudaStream_t *stream, const bool
transA, const int M, const int N, const double alpha, const void* vA, const int lda, const void* vX, const int incx,
const double beta, void* vY, const int incy) {
usualCudaGemv<T1,T2,T3><<<blocksPerGrid, threadsPerBlock, 1024, *stream>>>(transA, M, N, alpha, vA, lda, vX, incx,
beta, vY, incy);
}
*/
/*
//////////////////////////////////////////////////////////////////////////////
MXK x KxN = MxN
C array must be in f order
template <typename T1, typename T2, typename T3>
static SD_KERNEL void usualCudaGemm(const bool transA, const bool transB, const int M, const int N, const int K, const
double alpha, const void* vA, const int lda, const void* vB, const int ldb, const double beta, void* vC, const int ldc)
{
T1* A = reinterpret_cast<T1*>(const_cast<void*>(vA));
T2* B = reinterpret_cast<T2*>(const_cast<void*>(vB));
T3* C = reinterpret_cast<T3*>(vC);
__shared__ T3 alphaZ, betaZ;
__shared__ sd::LongType strideArow, strideAcol, strideBrow, strideBcol;
const int row = blockIdx.y * blockDim.y + threadIdx.y;
const int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row == 0 && col == 0) {
alphaZ = alpha;
betaZ = beta;
if(transA) { strideArow = lda; strideAcol = 1; } else { strideArow = 1; strideAcol = lda; }
if(transB) { strideBrow = ldb; strideBcol = 1; } else { strideBrow = 1; strideBcol = ldb; }
}
__syncthreads();
T3 val = 0;
if (row < M && col < N)
for (int i = 0; i < K; i++)
val = val + A[row * strideArow + i * strideAcol] * B[i * strideBrow + col * strideBcol];
C[row + col * ldc] = alphaZ * val + betaZ * C[row + col * ldc];
}
//////////////////////////////////////////////////////////////////////////////
template <typename T1, typename T2, typename T3>
SD_HOST static void usualGemm(const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, cudaStream_t *stream, const bool
transA, const bool transB, const int M, const int N, const int K, const double alpha, const void* vA, const int lda,
const void* vB, const int ldb, const double beta, void* vC, const int ldc) {
usualCudaGemm<T1,T2,T3><<<blocksPerGrid, threadsPerBlock, 1024, *stream>>>(transA, transB, M, N, K, alpha, vA, lda,
vB, ldb, beta, vC, ldc);
}
*/
//////////////////////////////////////////////////////////////////////////
/*
NDArray* MmulHelper::mmulNxNold1(const NDArray* A, const NDArray* B, NDArray* C, const double alpha, const double beta,
const char outOrder) {
const int aRank = A->rankOf();
const int bRank = B->rankOf();
// input ranks validation
if(aRank > bRank && bRank != 2)
throw std::runtime_error("MmulHelper::mmulNxN: rank of B array should be equal 2 !");
else if(bRank > aRank && aRank != 2)
throw std::runtime_error("MmulHelper::mmulNxN: rank of A array should be equal 2 !");
else if (aRank == bRank ) {
for(int i = 0; i < aRank - 2; ++i)
if(A->sizeAt(i) != B->sizeAt(i))
throw std::runtime_error("MmulHelper::mmulNxN: shapes of A and B arrays are not suitable for matrix
multiplication !");
}
if(A->sizeAt(-1) != B->sizeAt(-2))
throw std::runtime_error("MmulHelper::mmulNxN: shapes of A and B arrays are not suitable for matrix
multiplication !");
// validation of C array
std::vector<sd::LongType> cExpectedShape = aRank > bRank ? A->getShapeAsVector() : B->getShapeAsVector();
cExpectedShape[cExpectedShape.size() - 2] = A->sizeAt(-2);
cExpectedShape[cExpectedShape.size() - 1] = B->sizeAt(-1);
if(C != nullptr ) {
if(!C->isSameShape(cExpectedShape))
throw std::runtime_error("MmulHelper::mmulNxN: shape of C array is not suitable for AxB matrix
multiplication !");
}
else {
C = new NDArray(outOrder, cExpectedShape, B->dataType());
}
// multiplication
const std::vector<int> dimsToExclude = ShapeUtils::evalDimsToExclude(C->rankOf(), {-2, -1});
const sd::LongType numOfSubArrs = ShapeUtils::getNumOfSubArrs(C->shapeInfo(), dimsToExclude);
std::vector<sd::LongType> idxRanges(2 * C->rankOf());
// #pragma omp parallel for schedule(guided) firstprivate(idxRanges)
for(sd::LongType i = 0; i < numOfSubArrs; ++i) {
ShapeUtils::evalIdxRangesForSubArr(i, C->shapeInfo(), dimsToExclude, idxRanges.data());
NDArray cSubArr = (*C)(idxRanges);
if(aRank > bRank) {
NDArray aSubArr = (*A)(idxRanges);
mmulMxM(&aSubArr, B, &cSubArr, 1., 0., outOrder);
}
else if(bRank > aRank) {
NDArray bSubArr = (*B)(idxRanges);
mmulMxM(A, &bSubArr, &cSubArr, 1., 0, outOrder);
}
else {
NDArray aSubArr = (*A)(idxRanges);
NDArray bSubArr = (*B)(idxRanges);
mmulMxM(&aSubArr, &bSubArr, &cSubArr, 1., 0., outOrder);
}
}
return C;
}
*/
//////////////////////////////////////////////////////////////////////////
// [bS,M,K] x [bS,K,N] = [bS,M,N]
// [bS,M,K] x [K,N] = [bS,M,N]
// [M,K] x [bS,K,N] = [bS,M,N]
// bS could stand for several axes
/*
NDArray* MmulHelper::mmulNxNold2(const NDArray* A, const NDArray* B, NDArray* C, const double alpha, const double beta,
const char outOrder) {
const int aRank = A->rankOf();
const int bRank = B->rankOf();
// input ranks validation
if(aRank > bRank && bRank != 2)
throw std::runtime_error("MmulHelper::mmulNxN: rank of B array should be equal 2 !");
else if(bRank > aRank && aRank != 2)
throw std::runtime_error("MmulHelper::mmulNxN: rank of A array should be equal 2 !");
else if (aRank == bRank ) {
for(int i = 0; i < aRank - 2; ++i)
if(A->sizeAt(i) != B->sizeAt(i))
throw std::runtime_error("MmulHelper::mmulNxN: shapes of A and B arrays are not suitable for matrix
multiplication !");
}
if(A->sizeAt(-1) != B->sizeAt(-2))
throw std::runtime_error("MmulHelper::mmulNxN: shapes of A and B arrays are not suitable for matrix
multiplication !");
// validation of C array
std::vector<sd::LongType> cExpectedShape = aRank > bRank ? A->getShapeAsVector() : B->getShapeAsVector();
cExpectedShape[cExpectedShape.size() - 2] = A->sizeAt(-2);
cExpectedShape[cExpectedShape.size() - 1] = B->sizeAt(-1);
if(C != nullptr ) {
if(!C->isSameShape(cExpectedShape))
throw std::runtime_error("MmulHelper::mmulNxN: shape of C array is not suitable for AxB matrix
multiplication !");
}
else
C = new NDArray(outOrder, cExpectedShape, B->dataType());
const int cRank = C->rankOf();
const auto M = A->sizeAt(-2);
const auto K = A->sizeAt(-1);
const auto N = B->sizeAt(-1);
NDArray *pA(const_cast<NDArray*>(A)), *pB(const_cast<NDArray*>(B)), *pC(const_cast<NDArray*>(C));
std::vector<NDArray*> toDelete;
bool aMcont = M == 1 || A->strideAt(-2) == 1;
bool aKcont = K == 1 || A->strideAt(-1) == 1;
bool bKcont = K == 1 || B->strideAt(-2) == 1;
bool bNcont = N == 1 || B->strideAt(-1) == 1;
bool cMcont = M == 1 || C->strideAt(-2) == 1;
bool cNcont = N == 1 || C->strideAt(-1) == 1;
if(!aMcont && !aKcont) {
pA = new NDArray(A->dup('c'));
toDelete.push_back(pA);
aKcont = true;
}
if(!bKcont && !bNcont) {
pB = new NDArray(B->dup('c'));
toDelete.push_back(pB);
bNcont = true;
}
std::vector<int> permut(cRank);
if(!cMcont) {
std::iota(permut.begin(), permut.end(), 0);
permut[cRank - 2] = cRank - 1;
permut[cRank - 1] = cRank - 2; // swap two last dimensions [..., M,N] -> [..., N,M]
auto Cpermut = C->permute(permut);
pC = new NDArray('c', Cpermut.getShapeAsVector(), Cpermut.dataType(), A->getContext());
pC->assign(Cpermut);
toDelete.push_back(pC);
cMcont = true;
}
const auto aType = pA->dataType();
const auto bType = pB->dataType();
const auto cType = pC->dataType();
const bool AB(aType == bType), AC(aType == cType), ABC(AB && AC);
bool badTypes = false;
cudaDataType_t cudaType, cudaAType, cudaBType, cudaCType;
if(ABC && aType == DataType::HALF) {
cudaType = cudaAType = cudaBType = cudaCType = CUDA_R_16F;
}
else if(ABC && aType == DataType::FLOAT32) {
cudaType = cudaAType = cudaBType = cudaCType = CUDA_R_32F;
}
else if(ABC && aType == DataType::DOUBLE) {
cudaType = cudaAType = cudaBType = cudaCType = CUDA_R_64F;
}
else if(AB && cType == DataType::FLOAT32 && aType == DataType::INT8) {
cudaType = cudaCType = CUDA_R_32F;
cudaAType = cudaBType = CUDA_R_8I;
}
else if(AB && cType == DataType::FLOAT32 && aType == DataType::HALF) {
cudaType = cudaCType = CUDA_R_32F;
cudaAType = cudaBType = CUDA_R_16F;
}
else
badTypes = true;
const int bS = pC->lengthOf() / (M*N);
const std::vector<int> dimsToExclude = ShapeUtils::evalDimsToExclude(cRank, {-2, -1});
NDArray::prepareSpecialUse({pC}, {pA, pB});
if(!badTypes) {
std::vector<sd::LongType> subArrOffsets(bS);
std::vector<sd::LongType> subArrShapeInfo(shape::shapeInfoLength(2)); // all sub-arrays
have rank = 2
std::vector<void*> aSubArrs(bS), bSubArrs(bS), cSubArrs(bS);
if(aRank > 2)
shape::calcSubArrsShapeInfoAndOffsets(pA->shapeInfo(), bS, dimsToExclude.size(), dimsToExclude.data(),
subArrShapeInfo.data(), subArrOffsets.data()); for (int i = 0; i < bS; ++i) aSubArrs[i] = aRank == 2 ?
pA->specialBuffer() : pA->specialBuffer() + subArrOffsets[i] * pA->sizeOfT();
if(bRank > 2)
shape::calcSubArrsShapeInfoAndOffsets(pB->shapeInfo(), bS, dimsToExclude.size(), dimsToExclude.data(),
subArrShapeInfo.data(), subArrOffsets.data()); for (int i = 0; i < bS; ++i) bSubArrs[i] = bRank == 2 ?
pB->specialBuffer() : pB->specialBuffer() + subArrOffsets[i] * pB->sizeOfT();
shape::calcSubArrsShapeInfoAndOffsets(pC->shapeInfo(), bS, dimsToExclude.size(), dimsToExclude.data(),
subArrShapeInfo.data(), subArrOffsets.data()); for (int i = 0; i < bS; ++i) cSubArrs[i] = pC->specialBuffer() +
subArrOffsets[i] * pC->sizeOfT();
PointersManager manager(A->getContext(), "mmulNxN");
const void** aSubArrsCuda = reinterpret_cast<const void **>(manager.replicatePointer(aSubArrs.data(),
aSubArrs.size() * sizeof(void*))); const void** bSubArrsCuda = reinterpret_cast<const void
**>(manager.replicatePointer(bSubArrs.data(), bSubArrs.size() * sizeof(void*))); void** cSubArrsCuda =
reinterpret_cast< void **>(manager.replicatePointer(cSubArrs.data(), cSubArrs.size() * sizeof(void*)));
const bool transA = !aMcont;
const bool transB = !bKcont;
const int lda = (aMcont && aKcont) ? M : transA ? pA->strideAt(-2) : pA->strideAt(-1);
const int ldb = (bKcont && bNcont) ? K : transB ? pB->strideAt(-2) : pB->strideAt(-1);
const int ldc = (cMcont && cNcont) ? M : C != pC ? pC->strideAt(-2) : pC->strideAt(-1);
const cublasOperation_t transAblas = transA ? CUBLAS_OP_T : CUBLAS_OP_N;
const cublasOperation_t transBblas = transB ? CUBLAS_OP_T : CUBLAS_OP_N;
union Coeff {__half _h; float _f; double _d; };
Coeff uAlpha, uBeta;
if(cudaType == CUDA_R_16F) {
uAlpha._h = alpha;
uBeta._h = beta;
}
else if(cudaType == CUDA_R_32F) {
uAlpha._f = alpha;
uBeta._f = beta;
}
else if(cudaType == CUDA_R_64F) {
uAlpha._d = alpha;
uBeta._d = beta;
}
auto handle = reinterpret_cast<cublasHandle_t *>(A->getContext()->getCublasHandle());
auto stream = A->getContext()->getCudaStream();
auto status = cublasSetStream_v2(*handle, *stream);
if (status != CUBLAS_STATUS_SUCCESS)
throw cuda_exception::build("MmulHelper::mmulNxN cuda failed !", status);
status = cublasGemmBatchedEx(*handle, transAblas, transBblas, M, N, K, &uAlpha, aSubArrsCuda, cudaAType, lda,
bSubArrsCuda, cudaBType, ldb, &uBeta, cSubArrsCuda, cudaCType, ldc, bS, cudaType, CUBLAS_GEMM_DEFAULT);
if (status != CUBLAS_STATUS_SUCCESS)
throw cuda_exception::build("MmulHelper::mmulNxN cuda failed !", status);
auto cudaResult = cudaStreamSynchronize(*stream);
if (cudaResult != 0)
throw cuda_exception::build("MmulHelper::mmulNxN cuda failed !", cudaResult);
}
else {
std::vector<sd::LongType> idxRanges(2 * pC->rankOf());
for(sd::LongType i = 0; i < bS; ++i) {
ShapeUtils::evalIdxRangesForSubArr(i, pC->shapeInfo(), dimsToExclude, idxRanges.data());
NDArray cSubArr = (*pC)(idxRanges);
if(aRank > bRank) {
NDArray aSubArr = (*pA)(idxRanges);
mmulMxM(&aSubArr, pB, &cSubArr, 1., 0., pC->ordering());
}
else if(bRank > aRank) {
NDArray bSubArr = (*pB)(idxRanges);
mmulMxM(pA, &bSubArr, &cSubArr, 1., 0, pC->ordering());
}
else {
NDArray aSubArr = (*pA)(idxRanges);
NDArray bSubArr = (*pB)(idxRanges);
mmulMxM(&aSubArr, &bSubArr, &cSubArr, 1., 0., pC->ordering());
}
}
}
NDArray::registerSpecialUse({pC}, {pA, pB});
if(C != pC)
C->assign(pC->permute(permut));
for(int i = toDelete.size() - 1; i >= 0; --i)
delete toDelete[i];
return C;
}
*/
// BUILD_TRIPLE_TEMPLATE(template void usualGemm, (const dim3 &blocksPerGrid, const dim3 &threadsPerBlock, cudaStream_t
// *stream, const bool transA, const bool transB, const int M, const int N, const int K, const double alpha, const void*
// vA, const int lda, const void* vB, const int ldb, const double beta, void* vC, const int ldc), SD_NUMERIC_TYPES,
// SD_NUMERIC_TYPES, SD_FLOAT_TYPES); BUILD_TRIPLE_TEMPLATE(template void usualGemv, (const dim3 &blocksPerGrid, const
// dim3 &threadsPerBlock, cudaStream_t *stream, const bool transA, const int M, const int N, const double alpha, const
// void* vA, const int lda, const void* vB, const int incx, const double beta, void* vC, const int incy),
// SD_NUMERIC_TYPES, SD_NUMERIC_TYPES, SD_FLOAT_TYPES); BUILD_TRIPLE_TEMPLATE(template void usualDot, (const dim3
// &blocksPerGrid, const dim3 &threadsPerBlock, cudaStream_t *stream, const sd::LongType length, const double alpha,
// const void* vX, const sd::LongType incx, const void* vY, const sd::LongType incy, const double beta, void* vZ),
// SD_NUMERIC_TYPES, SD_NUMERIC_TYPES, SD_FLOAT_TYPES);
} // namespace sd
|
3bb8643e0b45ac362e85f1248d1de02b4e442ad2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Author: Dai-Ni Hsieh ([email protected])
// Date : 11/17/2020
#include <cmath>
#include "besselk.h"
#include "polybesselk.h"
#include "matvec.h"
#include "constants.h"
void setBesselkCoefficients()
{
hipMemcpyToSymbol(c_P01Vec, P01Vec, sizeof(double) * (P01Deg + 1), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(c_Q01Vec, Q01Vec, sizeof(double) * (Q01Deg + 1), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(c_P02Vec, P02Vec, sizeof(double) * (P02Deg + 1), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(c_Q02Vec, Q02Vec, sizeof(double) * (Q02Deg + 1), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(c_P03Vec, P03Vec, sizeof(double) * (P03Deg + 1), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(c_Q03Vec, Q03Vec, sizeof(double) * (Q03Deg + 1), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(c_P11Vec, P11Vec, sizeof(double) * (P11Deg + 1), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(c_Q11Vec, Q11Vec, sizeof(double) * (Q11Deg + 1), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(c_P12Vec, P12Vec, sizeof(double) * (P12Deg + 1), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(c_Q12Vec, Q12Vec, sizeof(double) * (Q12Deg + 1), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(c_P13Vec, P13Vec, sizeof(double) * (P13Deg + 1), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(c_Q13Vec, Q13Vec, sizeof(double) * (Q13Deg + 1), 0, hipMemcpyHostToDevice);
return;
}
__global__ void dqGaussian(double *d_dqKMat, double *d_lmkMat, double *d_lftMat, double *d_rgtMat,
double knlWidth, double knlWidthSqu, int lmkNum)
{
int lmkiIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( lmkiIdx < lmkNum )
{
vector dqKVec = {0.0, 0.0};
vector qiVec, liVec, riVec;
getVector(qiVec, d_lmkMat, lmkiIdx, lmkNum);
getVector(liVec, d_lftMat, lmkiIdx, lmkNum);
getVector(riVec, d_rgtMat, lmkiIdx, lmkNum);
for ( int lmkjIdx = 0; lmkjIdx < lmkNum; ++lmkjIdx )
{
vector qjVec, ljVec, rjVec;
getVector(qjVec, d_lmkMat, lmkjIdx, lmkNum);
getVector(ljVec, d_lftMat, lmkjIdx, lmkNum);
getVector(rjVec, d_rgtMat, lmkjIdx, lmkNum);
vector qijVec;
vectorSubtract(qijVec, qiVec, qjVec);
double dijSqu = eucnormSqu(qijVec) / knlWidthSqu;
double dqKVal = -2.0 / knlWidthSqu * exp(-dijSqu);
double lrVal = dotProduct(liVec, rjVec) + dotProduct(ljVec, riVec);
dqKVec.x += lrVal * dqKVal * qijVec.x;
dqKVec.y += lrVal * dqKVal * qijVec.y;
}
setVector(d_dqKMat, dqKVec, lmkiIdx, lmkNum);
}
return;
}
__global__ void dqiGaussian(double *d_dqiKMat, double *d_lmkiMat, double *d_lmkjMat,
double *d_lftMat, double *d_rgtMat,
double knlWidth, double knlWidthSqu, int lmkiNum, int lmkjNum)
{
int lmkiIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( lmkiIdx < lmkiNum )
{
vector dqKVec = {0.0, 0.0};
vector qiVec, liVec;
getVector(qiVec, d_lmkiMat, lmkiIdx, lmkiNum);
getVector(liVec, d_lftMat, lmkiIdx, lmkiNum);
for ( int lmkjIdx = 0; lmkjIdx < lmkjNum; ++lmkjIdx )
{
vector qjVec, rjVec;
getVector(qjVec, d_lmkjMat, lmkjIdx, lmkjNum);
getVector(rjVec, d_rgtMat, lmkjIdx, lmkjNum);
vector qijVec;
vectorSubtract(qijVec, qiVec, qjVec);
double dijSqu = eucnormSqu(qijVec) / knlWidthSqu;
double dqKVal = -2.0 / knlWidthSqu * exp(-dijSqu);
double lrVal = dotProduct(liVec, rjVec);
dqKVec.x += lrVal * dqKVal * qijVec.x;
dqKVec.y += lrVal * dqKVal * qijVec.y;
}
setVector(d_dqiKMat, dqKVec, lmkiIdx, lmkiNum);
}
return;
}
__global__ void dqjGaussian(double *d_dqjKMat, double *d_lmkiMat, double *d_lmkjMat,
double *d_lftMat, double *d_rgtMat,
double knlWidth, double knlWidthSqu, int lmkiNum, int lmkjNum)
{
int lmkjIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( lmkjIdx < lmkjNum )
{
vector dqKVec = {0.0, 0.0};
vector qjVec, rjVec;
getVector(qjVec, d_lmkjMat, lmkjIdx, lmkjNum);
getVector(rjVec, d_rgtMat, lmkjIdx, lmkjNum);
for ( int lmkiIdx = 0; lmkiIdx < lmkiNum; ++lmkiIdx )
{
vector qiVec, liVec;
getVector(qiVec, d_lmkiMat, lmkiIdx, lmkiNum);
getVector(liVec, d_lftMat, lmkiIdx, lmkiNum);
vector qjiVec;
vectorSubtract(qjiVec, qjVec, qiVec);
double dijSqu = eucnormSqu(qjiVec) / knlWidthSqu;
double dqKVal = -2.0 / knlWidthSqu * exp(-dijSqu);
double lrVal = dotProduct(liVec, rjVec);
dqKVec.x += lrVal * dqKVal * qjiVec.x;
dqKVec.y += lrVal * dqKVal * qjiVec.y;
}
setVector(d_dqjKMat, dqKVec, lmkjIdx, lmkjNum);
}
return;
}
__global__ void dqMatern1(double *d_dqKMat, double *d_lmkMat, double *d_lftMat, double *d_rgtMat,
double knlWidth, double knlWidthSqu, int lmkNum)
{
int lmkiIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( lmkiIdx < lmkNum )
{
vector dqKVec = {0.0, 0.0};
vector qiVec, liVec, riVec;
getVector(qiVec, d_lmkMat, lmkiIdx, lmkNum);
getVector(liVec, d_lftMat, lmkiIdx, lmkNum);
getVector(riVec, d_rgtMat, lmkiIdx, lmkNum);
for ( int lmkjIdx = 0; lmkjIdx < lmkNum; ++lmkjIdx )
{
vector qjVec, ljVec, rjVec;
getVector(qjVec, d_lmkMat, lmkjIdx, lmkNum);
getVector(ljVec, d_lftMat, lmkjIdx, lmkNum);
getVector(rjVec, d_rgtMat, lmkjIdx, lmkNum);
vector qijVec;
vectorSubtract(qijVec, qiVec, qjVec);
double dijVal = eucnorm(qijVec) / knlWidth;
double p1Val;
p1Fcn(p1Val, dijVal);
double dqKVal = -1.0 / (2.0 * knlWidthSqu) * p1Val;
double lrVal = dotProduct(liVec, rjVec) + dotProduct(ljVec, riVec);
dqKVec.x += lrVal * dqKVal * qijVec.x;
dqKVec.y += lrVal * dqKVal * qijVec.y;
}
setVector(d_dqKMat, dqKVec, lmkiIdx, lmkNum);
}
return;
}
__global__ void dqiMatern1(double *d_dqiKMat, double *d_lmkiMat, double *d_lmkjMat,
double *d_lftMat, double *d_rgtMat,
double knlWidth, double knlWidthSqu, int lmkiNum, int lmkjNum)
{
int lmkiIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( lmkiIdx < lmkiNum )
{
vector dqKVec = {0.0, 0.0};
vector qiVec, liVec;
getVector(qiVec, d_lmkiMat, lmkiIdx, lmkiNum);
getVector(liVec, d_lftMat, lmkiIdx, lmkiNum);
for ( int lmkjIdx = 0; lmkjIdx < lmkjNum; ++lmkjIdx )
{
vector qjVec, rjVec;
getVector(qjVec, d_lmkjMat, lmkjIdx, lmkjNum);
getVector(rjVec, d_rgtMat, lmkjIdx, lmkjNum);
vector qijVec;
vectorSubtract(qijVec, qiVec, qjVec);
double dijVal = eucnorm(qijVec) / knlWidth;
double p1Val;
p1Fcn(p1Val, dijVal);
double dqKVal = -1.0 / (2.0 * knlWidthSqu) * p1Val;
double lrVal = dotProduct(liVec, rjVec);
dqKVec.x += lrVal * dqKVal * qijVec.x;
dqKVec.y += lrVal * dqKVal * qijVec.y;
}
setVector(d_dqiKMat, dqKVec, lmkiIdx, lmkiNum);
}
return;
}
__global__ void dqjMatern1(double *d_dqjKMat, double *d_lmkiMat, double *d_lmkjMat,
double *d_lftMat, double *d_rgtMat,
double knlWidth, double knlWidthSqu, int lmkiNum, int lmkjNum)
{
int lmkjIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( lmkjIdx < lmkjNum )
{
vector dqKVec = {0.0, 0.0};
vector qjVec, rjVec;
getVector(qjVec, d_lmkjMat, lmkjIdx, lmkjNum);
getVector(rjVec, d_rgtMat, lmkjIdx, lmkjNum);
for ( int lmkiIdx = 0; lmkiIdx < lmkiNum; ++lmkiIdx )
{
vector qiVec, liVec;
getVector(qiVec, d_lmkiMat, lmkiIdx, lmkiNum);
getVector(liVec, d_lftMat, lmkiIdx, lmkiNum);
vector qjiVec;
vectorSubtract(qjiVec, qjVec, qiVec);
double dijVal = eucnorm(qjiVec) / knlWidth;
double p1Val;
p1Fcn(p1Val, dijVal);
double dqKVal = -1.0 / (2.0 * knlWidthSqu) * p1Val;
double lrVal = dotProduct(liVec, rjVec);
dqKVec.x += lrVal * dqKVal * qjiVec.x;
dqKVec.y += lrVal * dqKVal * qjiVec.y;
}
setVector(d_dqjKMat, dqKVec, lmkjIdx, lmkjNum);
}
return;
}
__global__ void dqMatern2(double *d_dqKMat, double *d_lmkMat, double *d_lftMat, double *d_rgtMat,
double knlWidth, double knlWidthSqu, int lmkNum)
{
int lmkiIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( lmkiIdx < lmkNum )
{
vector dqKVec = {0.0, 0.0};
vector qiVec, liVec, riVec;
getVector(qiVec, d_lmkMat, lmkiIdx, lmkNum);
getVector(liVec, d_lftMat, lmkiIdx, lmkNum);
getVector(riVec, d_rgtMat, lmkiIdx, lmkNum);
for ( int lmkjIdx = 0; lmkjIdx < lmkNum; ++lmkjIdx )
{
vector qjVec, ljVec, rjVec;
getVector(qjVec, d_lmkMat, lmkjIdx, lmkNum);
getVector(ljVec, d_lftMat, lmkjIdx, lmkNum);
getVector(rjVec, d_rgtMat, lmkjIdx, lmkNum);
vector qijVec;
vectorSubtract(qijVec, qiVec, qjVec);
double dijVal = eucnorm(qijVec) / knlWidth;
double p0Val, p1Val;
p0Fcn(p0Val, dijVal);
p1Fcn(p1Val, dijVal);
double dqKVal = -1.0 / (8.0 * knlWidthSqu) * (p0Val + 2.0 * p1Val);
double lrVal = dotProduct(liVec, rjVec) + dotProduct(ljVec, riVec);
dqKVec.x += lrVal * dqKVal * qijVec.x;
dqKVec.y += lrVal * dqKVal * qijVec.y;
}
setVector(d_dqKMat, dqKVec, lmkiIdx, lmkNum);
}
return;
}
__global__ void dqiMatern2(double *d_dqiKMat, double *d_lmkiMat, double *d_lmkjMat,
double *d_lftMat, double *d_rgtMat,
double knlWidth, double knlWidthSqu, int lmkiNum, int lmkjNum)
{
int lmkiIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( lmkiIdx < lmkiNum )
{
vector dqKVec = {0.0, 0.0};
vector qiVec, liVec;
getVector(qiVec, d_lmkiMat, lmkiIdx, lmkiNum);
getVector(liVec, d_lftMat, lmkiIdx, lmkiNum);
for ( int lmkjIdx = 0; lmkjIdx < lmkjNum; ++lmkjIdx )
{
vector qjVec, rjVec;
getVector(qjVec, d_lmkjMat, lmkjIdx, lmkjNum);
getVector(rjVec, d_rgtMat, lmkjIdx, lmkjNum);
vector qijVec;
vectorSubtract(qijVec, qiVec, qjVec);
double dijVal = eucnorm(qijVec) / knlWidth;
double p0Val, p1Val;
p0Fcn(p0Val, dijVal);
p1Fcn(p1Val, dijVal);
double dqKVal = -1.0 / (8.0 * knlWidthSqu) * (p0Val + 2.0 * p1Val);
double lrVal = dotProduct(liVec, rjVec);
dqKVec.x += lrVal * dqKVal * qijVec.x;
dqKVec.y += lrVal * dqKVal * qijVec.y;
}
setVector(d_dqiKMat, dqKVec, lmkiIdx, lmkiNum);
}
return;
}
__global__ void dqjMatern2(double *d_dqjKMat, double *d_lmkiMat, double *d_lmkjMat,
double *d_lftMat, double *d_rgtMat,
double knlWidth, double knlWidthSqu, int lmkiNum, int lmkjNum)
{
int lmkjIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( lmkjIdx < lmkjNum )
{
vector dqKVec = {0.0, 0.0};
vector qjVec, rjVec;
getVector(qjVec, d_lmkjMat, lmkjIdx, lmkjNum);
getVector(rjVec, d_rgtMat, lmkjIdx, lmkjNum);
for ( int lmkiIdx = 0; lmkiIdx < lmkiNum; ++lmkiIdx )
{
vector qiVec, liVec;
getVector(qiVec, d_lmkiMat, lmkiIdx, lmkiNum);
getVector(liVec, d_lftMat, lmkiIdx, lmkiNum);
vector qjiVec;
vectorSubtract(qjiVec, qjVec, qiVec);
double dijVal = eucnorm(qjiVec) / knlWidth;
double p0Val, p1Val;
p0Fcn(p0Val, dijVal);
p1Fcn(p1Val, dijVal);
double dqKVal = -1.0 / (8.0 * knlWidthSqu) * (p0Val + 2.0 * p1Val);
double lrVal = dotProduct(liVec, rjVec);
dqKVec.x += lrVal * dqKVal * qjiVec.x;
dqKVec.y += lrVal * dqKVal * qjiVec.y;
}
setVector(d_dqjKMat, dqKVec, lmkjIdx, lmkjNum);
}
return;
}
__global__ void dqMatern3(double *d_dqKMat, double *d_lmkMat, double *d_lftMat, double *d_rgtMat,
double knlWidth, double knlWidthSqu, int lmkNum)
{
int lmkiIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( lmkiIdx < lmkNum )
{
vector dqKVec = {0.0, 0.0};
vector qiVec, liVec, riVec;
getVector(qiVec, d_lmkMat, lmkiIdx, lmkNum);
getVector(liVec, d_lftMat, lmkiIdx, lmkNum);
getVector(riVec, d_rgtMat, lmkiIdx, lmkNum);
for ( int lmkjIdx = 0; lmkjIdx < lmkNum; ++lmkjIdx )
{
vector qjVec, ljVec, rjVec;
getVector(qjVec, d_lmkMat, lmkjIdx, lmkNum);
getVector(ljVec, d_lftMat, lmkjIdx, lmkNum);
getVector(rjVec, d_rgtMat, lmkjIdx, lmkNum);
vector qijVec;
vectorSubtract(qijVec, qiVec, qjVec);
double dijVal = eucnorm(qijVec) / knlWidth;
double dijSqu = dijVal * dijVal;
double p0Val, p1Val;
p0Fcn(p0Val, dijVal);
p1Fcn(p1Val, dijVal);
double dqKVal = -1.0 / (48.0 * knlWidthSqu) * (4.0 * p0Val + (8.0 + dijSqu) * p1Val);
double lrVal = dotProduct(liVec, rjVec) + dotProduct(ljVec, riVec);
dqKVec.x += lrVal * dqKVal * qijVec.x;
dqKVec.y += lrVal * dqKVal * qijVec.y;
}
setVector(d_dqKMat, dqKVec, lmkiIdx, lmkNum);
}
return;
}
__global__ void dqiMatern3(double *d_dqiKMat, double *d_lmkiMat, double *d_lmkjMat,
double *d_lftMat, double *d_rgtMat,
double knlWidth, double knlWidthSqu, int lmkiNum, int lmkjNum)
{
int lmkiIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( lmkiIdx < lmkiNum )
{
vector dqKVec = {0.0, 0.0};
vector qiVec, liVec;
getVector(qiVec, d_lmkiMat, lmkiIdx, lmkiNum);
getVector(liVec, d_lftMat, lmkiIdx, lmkiNum);
for ( int lmkjIdx = 0; lmkjIdx < lmkjNum; ++lmkjIdx )
{
vector qjVec, rjVec;
getVector(qjVec, d_lmkjMat, lmkjIdx, lmkjNum);
getVector(rjVec, d_rgtMat, lmkjIdx, lmkjNum);
vector qijVec;
vectorSubtract(qijVec, qiVec, qjVec);
double dijVal = eucnorm(qijVec) / knlWidth;
double dijSqu = dijVal * dijVal;
double p0Val, p1Val;
p0Fcn(p0Val, dijVal);
p1Fcn(p1Val, dijVal);
double dqKVal = -1.0 / (48.0 * knlWidthSqu) * (4.0 * p0Val + (8.0 + dijSqu) * p1Val);
double lrVal = dotProduct(liVec, rjVec);
dqKVec.x += lrVal * dqKVal * qijVec.x;
dqKVec.y += lrVal * dqKVal * qijVec.y;
}
setVector(d_dqiKMat, dqKVec, lmkiIdx, lmkiNum);
}
return;
}
__global__ void dqjMatern3(double *d_dqjKMat, double *d_lmkiMat, double *d_lmkjMat,
double *d_lftMat, double *d_rgtMat,
double knlWidth, double knlWidthSqu, int lmkiNum, int lmkjNum)
{
int lmkjIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( lmkjIdx < lmkjNum )
{
vector dqKVec = {0.0, 0.0};
vector qjVec, rjVec;
getVector(qjVec, d_lmkjMat, lmkjIdx, lmkjNum);
getVector(rjVec, d_rgtMat, lmkjIdx, lmkjNum);
for ( int lmkiIdx = 0; lmkiIdx < lmkiNum; ++lmkiIdx )
{
vector qiVec, liVec;
getVector(qiVec, d_lmkiMat, lmkiIdx, lmkiNum);
getVector(liVec, d_lftMat, lmkiIdx, lmkiNum);
vector qjiVec;
vectorSubtract(qjiVec, qjVec, qiVec);
double dijVal = eucnorm(qjiVec) / knlWidth;
double dijSqu = dijVal * dijVal;
double p0Val, p1Val;
p0Fcn(p0Val, dijVal);
p1Fcn(p1Val, dijVal);
double dqKVal = -1.0 / (48.0 * knlWidthSqu) * (4.0 * p0Val + (8.0 + dijSqu) * p1Val);
double lrVal = dotProduct(liVec, rjVec);
dqKVec.x += lrVal * dqKVal * qjiVec.x;
dqKVec.y += lrVal * dqKVal * qjiVec.y;
}
setVector(d_dqjKMat, dqKVec, lmkjIdx, lmkjNum);
}
return;
}
__global__ void dqMatern4(double *d_dqKMat, double *d_lmkMat, double *d_lftMat, double *d_rgtMat,
double knlWidth, double knlWidthSqu, int lmkNum)
{
int lmkiIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( lmkiIdx < lmkNum )
{
vector dqKVec = {0.0, 0.0};
vector qiVec, liVec, riVec;
getVector(qiVec, d_lmkMat, lmkiIdx, lmkNum);
getVector(liVec, d_lftMat, lmkiIdx, lmkNum);
getVector(riVec, d_rgtMat, lmkiIdx, lmkNum);
for ( int lmkjIdx = 0; lmkjIdx < lmkNum; ++lmkjIdx )
{
vector qjVec, ljVec, rjVec;
getVector(qjVec, d_lmkMat, lmkjIdx, lmkNum);
getVector(ljVec, d_lftMat, lmkjIdx, lmkNum);
getVector(rjVec, d_rgtMat, lmkjIdx, lmkNum);
vector qijVec;
vectorSubtract(qijVec, qiVec, qjVec);
double dijVal = eucnorm(qijVec) / knlWidth;
double dijSqu = dijVal * dijVal;
double p0Val, p1Val;
p0Fcn(p0Val, dijVal);
p1Fcn(p1Val, dijVal);
double dqKVal = -1.0 / (384.0 * knlWidthSqu)
* ((24.0 + dijSqu) * p0Val + 8.0 * (6.0 + dijSqu) * p1Val);
double lrVal = dotProduct(liVec, rjVec) + dotProduct(ljVec, riVec);
dqKVec.x += lrVal * dqKVal * qijVec.x;
dqKVec.y += lrVal * dqKVal * qijVec.y;
}
setVector(d_dqKMat, dqKVec, lmkiIdx, lmkNum);
}
return;
}
__global__ void dqiMatern4(double *d_dqiKMat, double *d_lmkiMat, double *d_lmkjMat,
double *d_lftMat, double *d_rgtMat,
double knlWidth, double knlWidthSqu, int lmkiNum, int lmkjNum)
{
int lmkiIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( lmkiIdx < lmkiNum )
{
vector dqKVec = {0.0, 0.0};
vector qiVec, liVec;
getVector(qiVec, d_lmkiMat, lmkiIdx, lmkiNum);
getVector(liVec, d_lftMat, lmkiIdx, lmkiNum);
for ( int lmkjIdx = 0; lmkjIdx < lmkjNum; ++lmkjIdx )
{
vector qjVec, rjVec;
getVector(qjVec, d_lmkjMat, lmkjIdx, lmkjNum);
getVector(rjVec, d_rgtMat, lmkjIdx, lmkjNum);
vector qijVec;
vectorSubtract(qijVec, qiVec, qjVec);
double dijVal = eucnorm(qijVec) / knlWidth;
double dijSqu = dijVal * dijVal;
double p0Val, p1Val;
p0Fcn(p0Val, dijVal);
p1Fcn(p1Val, dijVal);
double dqKVal = -1.0 / (384.0 * knlWidthSqu)
* ((24.0 + dijSqu) * p0Val + 8.0 * (6.0 + dijSqu) * p1Val);
double lrVal = dotProduct(liVec, rjVec);
dqKVec.x += lrVal * dqKVal * qijVec.x;
dqKVec.y += lrVal * dqKVal * qijVec.y;
}
setVector(d_dqiKMat, dqKVec, lmkiIdx, lmkiNum);
}
return;
}
__global__ void dqjMatern4(double *d_dqjKMat, double *d_lmkiMat, double *d_lmkjMat,
double *d_lftMat, double *d_rgtMat,
double knlWidth, double knlWidthSqu, int lmkiNum, int lmkjNum)
{
int lmkjIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( lmkjIdx < lmkjNum )
{
vector dqKVec = {0.0, 0.0};
vector qjVec, rjVec;
getVector(qjVec, d_lmkjMat, lmkjIdx, lmkjNum);
getVector(rjVec, d_rgtMat, lmkjIdx, lmkjNum);
for ( int lmkiIdx = 0; lmkiIdx < lmkiNum; ++lmkiIdx )
{
vector qiVec, liVec;
getVector(qiVec, d_lmkiMat, lmkiIdx, lmkiNum);
getVector(liVec, d_lftMat, lmkiIdx, lmkiNum);
vector qjiVec;
vectorSubtract(qjiVec, qjVec, qiVec);
double dijVal = eucnorm(qjiVec) / knlWidth;
double dijSqu = dijVal * dijVal;
double p0Val, p1Val;
p0Fcn(p0Val, dijVal);
p1Fcn(p1Val, dijVal);
double dqKVal = -1.0 / (384.0 * knlWidthSqu)
* ((24.0 + dijSqu) * p0Val + 8.0 * (6.0 + dijSqu) * p1Val);
double lrVal = dotProduct(liVec, rjVec);
dqKVec.x += lrVal * dqKVal * qjiVec.x;
dqKVec.y += lrVal * dqKVal * qjiVec.y;
}
setVector(d_dqjKMat, dqKVec, lmkjIdx, lmkjNum);
}
return;
}
void dqKernel(double *d_dqKMat, double *d_lmkMat, double *d_lftMat, double *d_rgtMat,
int knlOrder, double knlWidth, int lmkNum)
{
// order 0 to 4: Matern kernel of order 0 to 4
// order -1: Gaussian kernel
setBesselkCoefficients();
double knlWidthSqu = knlWidth * knlWidth;
int blkNum = (lmkNum - 1) / BLKDIM + 1;
switch ( knlOrder )
{
case -1:
hipLaunchKernelGGL(( dqGaussian) , dim3(blkNum), dim3(BLKDIM), 0, 0, d_dqKMat, d_lmkMat, d_lftMat, d_rgtMat,
knlWidth, knlWidthSqu, lmkNum);
break;
// Matern0 is not differentiable
case 1:
hipLaunchKernelGGL(( dqMatern1) , dim3(blkNum), dim3(BLKDIM), 0, 0, d_dqKMat, d_lmkMat, d_lftMat, d_rgtMat,
knlWidth, knlWidthSqu, lmkNum);
break;
case 2:
hipLaunchKernelGGL(( dqMatern2) , dim3(blkNum), dim3(BLKDIM), 0, 0, d_dqKMat, d_lmkMat, d_lftMat, d_rgtMat,
knlWidth, knlWidthSqu, lmkNum);
break;
case 3:
hipLaunchKernelGGL(( dqMatern3) , dim3(blkNum), dim3(BLKDIM), 0, 0, d_dqKMat, d_lmkMat, d_lftMat, d_rgtMat,
knlWidth, knlWidthSqu, lmkNum);
break;
case 4:
hipLaunchKernelGGL(( dqMatern4) , dim3(blkNum), dim3(BLKDIM), 0, 0, d_dqKMat, d_lmkMat, d_lftMat, d_rgtMat,
knlWidth, knlWidthSqu, lmkNum);
break;
}
return;
}
void dqKernel(double *d_dqiKMat, double *d_dqjKMat, double *d_lmkiMat, double *d_lmkjMat,
double *d_lftMat, double *d_rgtMat, int knlOrder, double knlWidth, int lmkiNum, int lmkjNum)
{
// order 0 to 4: Matern kernel of order 0 to 4
// order -1: Gaussian kernel
setBesselkCoefficients();
double knlWidthSqu = knlWidth * knlWidth;
int blkiNum = (lmkiNum - 1) / BLKDIM + 1;
int blkjNum = (lmkjNum - 1) / BLKDIM + 1;
switch ( knlOrder )
{
case -1:
hipLaunchKernelGGL(( dqiGaussian) , dim3(blkiNum), dim3(BLKDIM), 0, 0, d_dqiKMat, d_lmkiMat, d_lmkjMat, d_lftMat, d_rgtMat,
knlWidth, knlWidthSqu, lmkiNum, lmkjNum);
hipLaunchKernelGGL(( dqjGaussian) , dim3(blkjNum), dim3(BLKDIM), 0, 0, d_dqjKMat, d_lmkiMat, d_lmkjMat, d_lftMat, d_rgtMat,
knlWidth, knlWidthSqu, lmkiNum, lmkjNum);
break;
// Matern0 is not differentiable
case 1:
hipLaunchKernelGGL(( dqiMatern1) , dim3(blkiNum), dim3(BLKDIM), 0, 0, d_dqiKMat, d_lmkiMat, d_lmkjMat, d_lftMat, d_rgtMat,
knlWidth, knlWidthSqu, lmkiNum, lmkjNum);
hipLaunchKernelGGL(( dqjMatern1) , dim3(blkjNum), dim3(BLKDIM), 0, 0, d_dqjKMat, d_lmkiMat, d_lmkjMat, d_lftMat, d_rgtMat,
knlWidth, knlWidthSqu, lmkiNum, lmkjNum);
break;
case 2:
hipLaunchKernelGGL(( dqiMatern2) , dim3(blkiNum), dim3(BLKDIM), 0, 0, d_dqiKMat, d_lmkiMat, d_lmkjMat, d_lftMat, d_rgtMat,
knlWidth, knlWidthSqu, lmkiNum, lmkjNum);
hipLaunchKernelGGL(( dqjMatern2) , dim3(blkjNum), dim3(BLKDIM), 0, 0, d_dqjKMat, d_lmkiMat, d_lmkjMat, d_lftMat, d_rgtMat,
knlWidth, knlWidthSqu, lmkiNum, lmkjNum);
break;
case 3:
hipLaunchKernelGGL(( dqiMatern3) , dim3(blkiNum), dim3(BLKDIM), 0, 0, d_dqiKMat, d_lmkiMat, d_lmkjMat, d_lftMat, d_rgtMat,
knlWidth, knlWidthSqu, lmkiNum, lmkjNum);
hipLaunchKernelGGL(( dqjMatern3) , dim3(blkjNum), dim3(BLKDIM), 0, 0, d_dqjKMat, d_lmkiMat, d_lmkjMat, d_lftMat, d_rgtMat,
knlWidth, knlWidthSqu, lmkiNum, lmkjNum);
break;
case 4:
hipLaunchKernelGGL(( dqiMatern4) , dim3(blkiNum), dim3(BLKDIM), 0, 0, d_dqiKMat, d_lmkiMat, d_lmkjMat, d_lftMat, d_rgtMat,
knlWidth, knlWidthSqu, lmkiNum, lmkjNum);
hipLaunchKernelGGL(( dqjMatern4) , dim3(blkjNum), dim3(BLKDIM), 0, 0, d_dqjKMat, d_lmkiMat, d_lmkjMat, d_lftMat, d_rgtMat,
knlWidth, knlWidthSqu, lmkiNum, lmkjNum);
break;
}
return;
}
| 3bb8643e0b45ac362e85f1248d1de02b4e442ad2.cu | // Author: Dai-Ni Hsieh ([email protected])
// Date : 11/17/2020
#include <cmath>
#include "besselk.h"
#include "polybesselk.h"
#include "matvec.h"
#include "constants.h"
void setBesselkCoefficients()
{
cudaMemcpyToSymbol(c_P01Vec, P01Vec, sizeof(double) * (P01Deg + 1), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(c_Q01Vec, Q01Vec, sizeof(double) * (Q01Deg + 1), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(c_P02Vec, P02Vec, sizeof(double) * (P02Deg + 1), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(c_Q02Vec, Q02Vec, sizeof(double) * (Q02Deg + 1), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(c_P03Vec, P03Vec, sizeof(double) * (P03Deg + 1), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(c_Q03Vec, Q03Vec, sizeof(double) * (Q03Deg + 1), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(c_P11Vec, P11Vec, sizeof(double) * (P11Deg + 1), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(c_Q11Vec, Q11Vec, sizeof(double) * (Q11Deg + 1), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(c_P12Vec, P12Vec, sizeof(double) * (P12Deg + 1), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(c_Q12Vec, Q12Vec, sizeof(double) * (Q12Deg + 1), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(c_P13Vec, P13Vec, sizeof(double) * (P13Deg + 1), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(c_Q13Vec, Q13Vec, sizeof(double) * (Q13Deg + 1), 0, cudaMemcpyHostToDevice);
return;
}
__global__ void dqGaussian(double *d_dqKMat, double *d_lmkMat, double *d_lftMat, double *d_rgtMat,
double knlWidth, double knlWidthSqu, int lmkNum)
{
int lmkiIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( lmkiIdx < lmkNum )
{
vector dqKVec = {0.0, 0.0};
vector qiVec, liVec, riVec;
getVector(qiVec, d_lmkMat, lmkiIdx, lmkNum);
getVector(liVec, d_lftMat, lmkiIdx, lmkNum);
getVector(riVec, d_rgtMat, lmkiIdx, lmkNum);
for ( int lmkjIdx = 0; lmkjIdx < lmkNum; ++lmkjIdx )
{
vector qjVec, ljVec, rjVec;
getVector(qjVec, d_lmkMat, lmkjIdx, lmkNum);
getVector(ljVec, d_lftMat, lmkjIdx, lmkNum);
getVector(rjVec, d_rgtMat, lmkjIdx, lmkNum);
vector qijVec;
vectorSubtract(qijVec, qiVec, qjVec);
double dijSqu = eucnormSqu(qijVec) / knlWidthSqu;
double dqKVal = -2.0 / knlWidthSqu * exp(-dijSqu);
double lrVal = dotProduct(liVec, rjVec) + dotProduct(ljVec, riVec);
dqKVec.x += lrVal * dqKVal * qijVec.x;
dqKVec.y += lrVal * dqKVal * qijVec.y;
}
setVector(d_dqKMat, dqKVec, lmkiIdx, lmkNum);
}
return;
}
__global__ void dqiGaussian(double *d_dqiKMat, double *d_lmkiMat, double *d_lmkjMat,
double *d_lftMat, double *d_rgtMat,
double knlWidth, double knlWidthSqu, int lmkiNum, int lmkjNum)
{
int lmkiIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( lmkiIdx < lmkiNum )
{
vector dqKVec = {0.0, 0.0};
vector qiVec, liVec;
getVector(qiVec, d_lmkiMat, lmkiIdx, lmkiNum);
getVector(liVec, d_lftMat, lmkiIdx, lmkiNum);
for ( int lmkjIdx = 0; lmkjIdx < lmkjNum; ++lmkjIdx )
{
vector qjVec, rjVec;
getVector(qjVec, d_lmkjMat, lmkjIdx, lmkjNum);
getVector(rjVec, d_rgtMat, lmkjIdx, lmkjNum);
vector qijVec;
vectorSubtract(qijVec, qiVec, qjVec);
double dijSqu = eucnormSqu(qijVec) / knlWidthSqu;
double dqKVal = -2.0 / knlWidthSqu * exp(-dijSqu);
double lrVal = dotProduct(liVec, rjVec);
dqKVec.x += lrVal * dqKVal * qijVec.x;
dqKVec.y += lrVal * dqKVal * qijVec.y;
}
setVector(d_dqiKMat, dqKVec, lmkiIdx, lmkiNum);
}
return;
}
__global__ void dqjGaussian(double *d_dqjKMat, double *d_lmkiMat, double *d_lmkjMat,
double *d_lftMat, double *d_rgtMat,
double knlWidth, double knlWidthSqu, int lmkiNum, int lmkjNum)
{
int lmkjIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( lmkjIdx < lmkjNum )
{
vector dqKVec = {0.0, 0.0};
vector qjVec, rjVec;
getVector(qjVec, d_lmkjMat, lmkjIdx, lmkjNum);
getVector(rjVec, d_rgtMat, lmkjIdx, lmkjNum);
for ( int lmkiIdx = 0; lmkiIdx < lmkiNum; ++lmkiIdx )
{
vector qiVec, liVec;
getVector(qiVec, d_lmkiMat, lmkiIdx, lmkiNum);
getVector(liVec, d_lftMat, lmkiIdx, lmkiNum);
vector qjiVec;
vectorSubtract(qjiVec, qjVec, qiVec);
double dijSqu = eucnormSqu(qjiVec) / knlWidthSqu;
double dqKVal = -2.0 / knlWidthSqu * exp(-dijSqu);
double lrVal = dotProduct(liVec, rjVec);
dqKVec.x += lrVal * dqKVal * qjiVec.x;
dqKVec.y += lrVal * dqKVal * qjiVec.y;
}
setVector(d_dqjKMat, dqKVec, lmkjIdx, lmkjNum);
}
return;
}
__global__ void dqMatern1(double *d_dqKMat, double *d_lmkMat, double *d_lftMat, double *d_rgtMat,
double knlWidth, double knlWidthSqu, int lmkNum)
{
int lmkiIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( lmkiIdx < lmkNum )
{
vector dqKVec = {0.0, 0.0};
vector qiVec, liVec, riVec;
getVector(qiVec, d_lmkMat, lmkiIdx, lmkNum);
getVector(liVec, d_lftMat, lmkiIdx, lmkNum);
getVector(riVec, d_rgtMat, lmkiIdx, lmkNum);
for ( int lmkjIdx = 0; lmkjIdx < lmkNum; ++lmkjIdx )
{
vector qjVec, ljVec, rjVec;
getVector(qjVec, d_lmkMat, lmkjIdx, lmkNum);
getVector(ljVec, d_lftMat, lmkjIdx, lmkNum);
getVector(rjVec, d_rgtMat, lmkjIdx, lmkNum);
vector qijVec;
vectorSubtract(qijVec, qiVec, qjVec);
double dijVal = eucnorm(qijVec) / knlWidth;
double p1Val;
p1Fcn(p1Val, dijVal);
double dqKVal = -1.0 / (2.0 * knlWidthSqu) * p1Val;
double lrVal = dotProduct(liVec, rjVec) + dotProduct(ljVec, riVec);
dqKVec.x += lrVal * dqKVal * qijVec.x;
dqKVec.y += lrVal * dqKVal * qijVec.y;
}
setVector(d_dqKMat, dqKVec, lmkiIdx, lmkNum);
}
return;
}
__global__ void dqiMatern1(double *d_dqiKMat, double *d_lmkiMat, double *d_lmkjMat,
double *d_lftMat, double *d_rgtMat,
double knlWidth, double knlWidthSqu, int lmkiNum, int lmkjNum)
{
int lmkiIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( lmkiIdx < lmkiNum )
{
vector dqKVec = {0.0, 0.0};
vector qiVec, liVec;
getVector(qiVec, d_lmkiMat, lmkiIdx, lmkiNum);
getVector(liVec, d_lftMat, lmkiIdx, lmkiNum);
for ( int lmkjIdx = 0; lmkjIdx < lmkjNum; ++lmkjIdx )
{
vector qjVec, rjVec;
getVector(qjVec, d_lmkjMat, lmkjIdx, lmkjNum);
getVector(rjVec, d_rgtMat, lmkjIdx, lmkjNum);
vector qijVec;
vectorSubtract(qijVec, qiVec, qjVec);
double dijVal = eucnorm(qijVec) / knlWidth;
double p1Val;
p1Fcn(p1Val, dijVal);
double dqKVal = -1.0 / (2.0 * knlWidthSqu) * p1Val;
double lrVal = dotProduct(liVec, rjVec);
dqKVec.x += lrVal * dqKVal * qijVec.x;
dqKVec.y += lrVal * dqKVal * qijVec.y;
}
setVector(d_dqiKMat, dqKVec, lmkiIdx, lmkiNum);
}
return;
}
__global__ void dqjMatern1(double *d_dqjKMat, double *d_lmkiMat, double *d_lmkjMat,
double *d_lftMat, double *d_rgtMat,
double knlWidth, double knlWidthSqu, int lmkiNum, int lmkjNum)
{
int lmkjIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( lmkjIdx < lmkjNum )
{
vector dqKVec = {0.0, 0.0};
vector qjVec, rjVec;
getVector(qjVec, d_lmkjMat, lmkjIdx, lmkjNum);
getVector(rjVec, d_rgtMat, lmkjIdx, lmkjNum);
for ( int lmkiIdx = 0; lmkiIdx < lmkiNum; ++lmkiIdx )
{
vector qiVec, liVec;
getVector(qiVec, d_lmkiMat, lmkiIdx, lmkiNum);
getVector(liVec, d_lftMat, lmkiIdx, lmkiNum);
vector qjiVec;
vectorSubtract(qjiVec, qjVec, qiVec);
double dijVal = eucnorm(qjiVec) / knlWidth;
double p1Val;
p1Fcn(p1Val, dijVal);
double dqKVal = -1.0 / (2.0 * knlWidthSqu) * p1Val;
double lrVal = dotProduct(liVec, rjVec);
dqKVec.x += lrVal * dqKVal * qjiVec.x;
dqKVec.y += lrVal * dqKVal * qjiVec.y;
}
setVector(d_dqjKMat, dqKVec, lmkjIdx, lmkjNum);
}
return;
}
__global__ void dqMatern2(double *d_dqKMat, double *d_lmkMat, double *d_lftMat, double *d_rgtMat,
double knlWidth, double knlWidthSqu, int lmkNum)
{
int lmkiIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( lmkiIdx < lmkNum )
{
vector dqKVec = {0.0, 0.0};
vector qiVec, liVec, riVec;
getVector(qiVec, d_lmkMat, lmkiIdx, lmkNum);
getVector(liVec, d_lftMat, lmkiIdx, lmkNum);
getVector(riVec, d_rgtMat, lmkiIdx, lmkNum);
for ( int lmkjIdx = 0; lmkjIdx < lmkNum; ++lmkjIdx )
{
vector qjVec, ljVec, rjVec;
getVector(qjVec, d_lmkMat, lmkjIdx, lmkNum);
getVector(ljVec, d_lftMat, lmkjIdx, lmkNum);
getVector(rjVec, d_rgtMat, lmkjIdx, lmkNum);
vector qijVec;
vectorSubtract(qijVec, qiVec, qjVec);
double dijVal = eucnorm(qijVec) / knlWidth;
double p0Val, p1Val;
p0Fcn(p0Val, dijVal);
p1Fcn(p1Val, dijVal);
double dqKVal = -1.0 / (8.0 * knlWidthSqu) * (p0Val + 2.0 * p1Val);
double lrVal = dotProduct(liVec, rjVec) + dotProduct(ljVec, riVec);
dqKVec.x += lrVal * dqKVal * qijVec.x;
dqKVec.y += lrVal * dqKVal * qijVec.y;
}
setVector(d_dqKMat, dqKVec, lmkiIdx, lmkNum);
}
return;
}
__global__ void dqiMatern2(double *d_dqiKMat, double *d_lmkiMat, double *d_lmkjMat,
double *d_lftMat, double *d_rgtMat,
double knlWidth, double knlWidthSqu, int lmkiNum, int lmkjNum)
{
int lmkiIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( lmkiIdx < lmkiNum )
{
vector dqKVec = {0.0, 0.0};
vector qiVec, liVec;
getVector(qiVec, d_lmkiMat, lmkiIdx, lmkiNum);
getVector(liVec, d_lftMat, lmkiIdx, lmkiNum);
for ( int lmkjIdx = 0; lmkjIdx < lmkjNum; ++lmkjIdx )
{
vector qjVec, rjVec;
getVector(qjVec, d_lmkjMat, lmkjIdx, lmkjNum);
getVector(rjVec, d_rgtMat, lmkjIdx, lmkjNum);
vector qijVec;
vectorSubtract(qijVec, qiVec, qjVec);
double dijVal = eucnorm(qijVec) / knlWidth;
double p0Val, p1Val;
p0Fcn(p0Val, dijVal);
p1Fcn(p1Val, dijVal);
double dqKVal = -1.0 / (8.0 * knlWidthSqu) * (p0Val + 2.0 * p1Val);
double lrVal = dotProduct(liVec, rjVec);
dqKVec.x += lrVal * dqKVal * qijVec.x;
dqKVec.y += lrVal * dqKVal * qijVec.y;
}
setVector(d_dqiKMat, dqKVec, lmkiIdx, lmkiNum);
}
return;
}
__global__ void dqjMatern2(double *d_dqjKMat, double *d_lmkiMat, double *d_lmkjMat,
double *d_lftMat, double *d_rgtMat,
double knlWidth, double knlWidthSqu, int lmkiNum, int lmkjNum)
{
int lmkjIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( lmkjIdx < lmkjNum )
{
vector dqKVec = {0.0, 0.0};
vector qjVec, rjVec;
getVector(qjVec, d_lmkjMat, lmkjIdx, lmkjNum);
getVector(rjVec, d_rgtMat, lmkjIdx, lmkjNum);
for ( int lmkiIdx = 0; lmkiIdx < lmkiNum; ++lmkiIdx )
{
vector qiVec, liVec;
getVector(qiVec, d_lmkiMat, lmkiIdx, lmkiNum);
getVector(liVec, d_lftMat, lmkiIdx, lmkiNum);
vector qjiVec;
vectorSubtract(qjiVec, qjVec, qiVec);
double dijVal = eucnorm(qjiVec) / knlWidth;
double p0Val, p1Val;
p0Fcn(p0Val, dijVal);
p1Fcn(p1Val, dijVal);
double dqKVal = -1.0 / (8.0 * knlWidthSqu) * (p0Val + 2.0 * p1Val);
double lrVal = dotProduct(liVec, rjVec);
dqKVec.x += lrVal * dqKVal * qjiVec.x;
dqKVec.y += lrVal * dqKVal * qjiVec.y;
}
setVector(d_dqjKMat, dqKVec, lmkjIdx, lmkjNum);
}
return;
}
__global__ void dqMatern3(double *d_dqKMat, double *d_lmkMat, double *d_lftMat, double *d_rgtMat,
double knlWidth, double knlWidthSqu, int lmkNum)
{
int lmkiIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( lmkiIdx < lmkNum )
{
vector dqKVec = {0.0, 0.0};
vector qiVec, liVec, riVec;
getVector(qiVec, d_lmkMat, lmkiIdx, lmkNum);
getVector(liVec, d_lftMat, lmkiIdx, lmkNum);
getVector(riVec, d_rgtMat, lmkiIdx, lmkNum);
for ( int lmkjIdx = 0; lmkjIdx < lmkNum; ++lmkjIdx )
{
vector qjVec, ljVec, rjVec;
getVector(qjVec, d_lmkMat, lmkjIdx, lmkNum);
getVector(ljVec, d_lftMat, lmkjIdx, lmkNum);
getVector(rjVec, d_rgtMat, lmkjIdx, lmkNum);
vector qijVec;
vectorSubtract(qijVec, qiVec, qjVec);
double dijVal = eucnorm(qijVec) / knlWidth;
double dijSqu = dijVal * dijVal;
double p0Val, p1Val;
p0Fcn(p0Val, dijVal);
p1Fcn(p1Val, dijVal);
double dqKVal = -1.0 / (48.0 * knlWidthSqu) * (4.0 * p0Val + (8.0 + dijSqu) * p1Val);
double lrVal = dotProduct(liVec, rjVec) + dotProduct(ljVec, riVec);
dqKVec.x += lrVal * dqKVal * qijVec.x;
dqKVec.y += lrVal * dqKVal * qijVec.y;
}
setVector(d_dqKMat, dqKVec, lmkiIdx, lmkNum);
}
return;
}
__global__ void dqiMatern3(double *d_dqiKMat, double *d_lmkiMat, double *d_lmkjMat,
double *d_lftMat, double *d_rgtMat,
double knlWidth, double knlWidthSqu, int lmkiNum, int lmkjNum)
{
int lmkiIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( lmkiIdx < lmkiNum )
{
vector dqKVec = {0.0, 0.0};
vector qiVec, liVec;
getVector(qiVec, d_lmkiMat, lmkiIdx, lmkiNum);
getVector(liVec, d_lftMat, lmkiIdx, lmkiNum);
for ( int lmkjIdx = 0; lmkjIdx < lmkjNum; ++lmkjIdx )
{
vector qjVec, rjVec;
getVector(qjVec, d_lmkjMat, lmkjIdx, lmkjNum);
getVector(rjVec, d_rgtMat, lmkjIdx, lmkjNum);
vector qijVec;
vectorSubtract(qijVec, qiVec, qjVec);
double dijVal = eucnorm(qijVec) / knlWidth;
double dijSqu = dijVal * dijVal;
double p0Val, p1Val;
p0Fcn(p0Val, dijVal);
p1Fcn(p1Val, dijVal);
double dqKVal = -1.0 / (48.0 * knlWidthSqu) * (4.0 * p0Val + (8.0 + dijSqu) * p1Val);
double lrVal = dotProduct(liVec, rjVec);
dqKVec.x += lrVal * dqKVal * qijVec.x;
dqKVec.y += lrVal * dqKVal * qijVec.y;
}
setVector(d_dqiKMat, dqKVec, lmkiIdx, lmkiNum);
}
return;
}
__global__ void dqjMatern3(double *d_dqjKMat, double *d_lmkiMat, double *d_lmkjMat,
double *d_lftMat, double *d_rgtMat,
double knlWidth, double knlWidthSqu, int lmkiNum, int lmkjNum)
{
int lmkjIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( lmkjIdx < lmkjNum )
{
vector dqKVec = {0.0, 0.0};
vector qjVec, rjVec;
getVector(qjVec, d_lmkjMat, lmkjIdx, lmkjNum);
getVector(rjVec, d_rgtMat, lmkjIdx, lmkjNum);
for ( int lmkiIdx = 0; lmkiIdx < lmkiNum; ++lmkiIdx )
{
vector qiVec, liVec;
getVector(qiVec, d_lmkiMat, lmkiIdx, lmkiNum);
getVector(liVec, d_lftMat, lmkiIdx, lmkiNum);
vector qjiVec;
vectorSubtract(qjiVec, qjVec, qiVec);
double dijVal = eucnorm(qjiVec) / knlWidth;
double dijSqu = dijVal * dijVal;
double p0Val, p1Val;
p0Fcn(p0Val, dijVal);
p1Fcn(p1Val, dijVal);
double dqKVal = -1.0 / (48.0 * knlWidthSqu) * (4.0 * p0Val + (8.0 + dijSqu) * p1Val);
double lrVal = dotProduct(liVec, rjVec);
dqKVec.x += lrVal * dqKVal * qjiVec.x;
dqKVec.y += lrVal * dqKVal * qjiVec.y;
}
setVector(d_dqjKMat, dqKVec, lmkjIdx, lmkjNum);
}
return;
}
__global__ void dqMatern4(double *d_dqKMat, double *d_lmkMat, double *d_lftMat, double *d_rgtMat,
double knlWidth, double knlWidthSqu, int lmkNum)
{
int lmkiIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( lmkiIdx < lmkNum )
{
vector dqKVec = {0.0, 0.0};
vector qiVec, liVec, riVec;
getVector(qiVec, d_lmkMat, lmkiIdx, lmkNum);
getVector(liVec, d_lftMat, lmkiIdx, lmkNum);
getVector(riVec, d_rgtMat, lmkiIdx, lmkNum);
for ( int lmkjIdx = 0; lmkjIdx < lmkNum; ++lmkjIdx )
{
vector qjVec, ljVec, rjVec;
getVector(qjVec, d_lmkMat, lmkjIdx, lmkNum);
getVector(ljVec, d_lftMat, lmkjIdx, lmkNum);
getVector(rjVec, d_rgtMat, lmkjIdx, lmkNum);
vector qijVec;
vectorSubtract(qijVec, qiVec, qjVec);
double dijVal = eucnorm(qijVec) / knlWidth;
double dijSqu = dijVal * dijVal;
double p0Val, p1Val;
p0Fcn(p0Val, dijVal);
p1Fcn(p1Val, dijVal);
double dqKVal = -1.0 / (384.0 * knlWidthSqu)
* ((24.0 + dijSqu) * p0Val + 8.0 * (6.0 + dijSqu) * p1Val);
double lrVal = dotProduct(liVec, rjVec) + dotProduct(ljVec, riVec);
dqKVec.x += lrVal * dqKVal * qijVec.x;
dqKVec.y += lrVal * dqKVal * qijVec.y;
}
setVector(d_dqKMat, dqKVec, lmkiIdx, lmkNum);
}
return;
}
__global__ void dqiMatern4(double *d_dqiKMat, double *d_lmkiMat, double *d_lmkjMat,
double *d_lftMat, double *d_rgtMat,
double knlWidth, double knlWidthSqu, int lmkiNum, int lmkjNum)
{
int lmkiIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( lmkiIdx < lmkiNum )
{
vector dqKVec = {0.0, 0.0};
vector qiVec, liVec;
getVector(qiVec, d_lmkiMat, lmkiIdx, lmkiNum);
getVector(liVec, d_lftMat, lmkiIdx, lmkiNum);
for ( int lmkjIdx = 0; lmkjIdx < lmkjNum; ++lmkjIdx )
{
vector qjVec, rjVec;
getVector(qjVec, d_lmkjMat, lmkjIdx, lmkjNum);
getVector(rjVec, d_rgtMat, lmkjIdx, lmkjNum);
vector qijVec;
vectorSubtract(qijVec, qiVec, qjVec);
double dijVal = eucnorm(qijVec) / knlWidth;
double dijSqu = dijVal * dijVal;
double p0Val, p1Val;
p0Fcn(p0Val, dijVal);
p1Fcn(p1Val, dijVal);
double dqKVal = -1.0 / (384.0 * knlWidthSqu)
* ((24.0 + dijSqu) * p0Val + 8.0 * (6.0 + dijSqu) * p1Val);
double lrVal = dotProduct(liVec, rjVec);
dqKVec.x += lrVal * dqKVal * qijVec.x;
dqKVec.y += lrVal * dqKVal * qijVec.y;
}
setVector(d_dqiKMat, dqKVec, lmkiIdx, lmkiNum);
}
return;
}
__global__ void dqjMatern4(double *d_dqjKMat, double *d_lmkiMat, double *d_lmkjMat,
double *d_lftMat, double *d_rgtMat,
double knlWidth, double knlWidthSqu, int lmkiNum, int lmkjNum)
{
int lmkjIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( lmkjIdx < lmkjNum )
{
vector dqKVec = {0.0, 0.0};
vector qjVec, rjVec;
getVector(qjVec, d_lmkjMat, lmkjIdx, lmkjNum);
getVector(rjVec, d_rgtMat, lmkjIdx, lmkjNum);
for ( int lmkiIdx = 0; lmkiIdx < lmkiNum; ++lmkiIdx )
{
vector qiVec, liVec;
getVector(qiVec, d_lmkiMat, lmkiIdx, lmkiNum);
getVector(liVec, d_lftMat, lmkiIdx, lmkiNum);
vector qjiVec;
vectorSubtract(qjiVec, qjVec, qiVec);
double dijVal = eucnorm(qjiVec) / knlWidth;
double dijSqu = dijVal * dijVal;
double p0Val, p1Val;
p0Fcn(p0Val, dijVal);
p1Fcn(p1Val, dijVal);
double dqKVal = -1.0 / (384.0 * knlWidthSqu)
* ((24.0 + dijSqu) * p0Val + 8.0 * (6.0 + dijSqu) * p1Val);
double lrVal = dotProduct(liVec, rjVec);
dqKVec.x += lrVal * dqKVal * qjiVec.x;
dqKVec.y += lrVal * dqKVal * qjiVec.y;
}
setVector(d_dqjKMat, dqKVec, lmkjIdx, lmkjNum);
}
return;
}
void dqKernel(double *d_dqKMat, double *d_lmkMat, double *d_lftMat, double *d_rgtMat,
int knlOrder, double knlWidth, int lmkNum)
{
// order 0 to 4: Matern kernel of order 0 to 4
// order -1: Gaussian kernel
setBesselkCoefficients();
double knlWidthSqu = knlWidth * knlWidth;
int blkNum = (lmkNum - 1) / BLKDIM + 1;
switch ( knlOrder )
{
case -1:
dqGaussian <<<blkNum, BLKDIM>>> (d_dqKMat, d_lmkMat, d_lftMat, d_rgtMat,
knlWidth, knlWidthSqu, lmkNum);
break;
// Matern0 is not differentiable
case 1:
dqMatern1 <<<blkNum, BLKDIM>>> (d_dqKMat, d_lmkMat, d_lftMat, d_rgtMat,
knlWidth, knlWidthSqu, lmkNum);
break;
case 2:
dqMatern2 <<<blkNum, BLKDIM>>> (d_dqKMat, d_lmkMat, d_lftMat, d_rgtMat,
knlWidth, knlWidthSqu, lmkNum);
break;
case 3:
dqMatern3 <<<blkNum, BLKDIM>>> (d_dqKMat, d_lmkMat, d_lftMat, d_rgtMat,
knlWidth, knlWidthSqu, lmkNum);
break;
case 4:
dqMatern4 <<<blkNum, BLKDIM>>> (d_dqKMat, d_lmkMat, d_lftMat, d_rgtMat,
knlWidth, knlWidthSqu, lmkNum);
break;
}
return;
}
void dqKernel(double *d_dqiKMat, double *d_dqjKMat, double *d_lmkiMat, double *d_lmkjMat,
double *d_lftMat, double *d_rgtMat, int knlOrder, double knlWidth, int lmkiNum, int lmkjNum)
{
// order 0 to 4: Matern kernel of order 0 to 4
// order -1: Gaussian kernel
setBesselkCoefficients();
double knlWidthSqu = knlWidth * knlWidth;
int blkiNum = (lmkiNum - 1) / BLKDIM + 1;
int blkjNum = (lmkjNum - 1) / BLKDIM + 1;
switch ( knlOrder )
{
case -1:
dqiGaussian <<<blkiNum, BLKDIM>>> (d_dqiKMat, d_lmkiMat, d_lmkjMat, d_lftMat, d_rgtMat,
knlWidth, knlWidthSqu, lmkiNum, lmkjNum);
dqjGaussian <<<blkjNum, BLKDIM>>> (d_dqjKMat, d_lmkiMat, d_lmkjMat, d_lftMat, d_rgtMat,
knlWidth, knlWidthSqu, lmkiNum, lmkjNum);
break;
// Matern0 is not differentiable
case 1:
dqiMatern1 <<<blkiNum, BLKDIM>>> (d_dqiKMat, d_lmkiMat, d_lmkjMat, d_lftMat, d_rgtMat,
knlWidth, knlWidthSqu, lmkiNum, lmkjNum);
dqjMatern1 <<<blkjNum, BLKDIM>>> (d_dqjKMat, d_lmkiMat, d_lmkjMat, d_lftMat, d_rgtMat,
knlWidth, knlWidthSqu, lmkiNum, lmkjNum);
break;
case 2:
dqiMatern2 <<<blkiNum, BLKDIM>>> (d_dqiKMat, d_lmkiMat, d_lmkjMat, d_lftMat, d_rgtMat,
knlWidth, knlWidthSqu, lmkiNum, lmkjNum);
dqjMatern2 <<<blkjNum, BLKDIM>>> (d_dqjKMat, d_lmkiMat, d_lmkjMat, d_lftMat, d_rgtMat,
knlWidth, knlWidthSqu, lmkiNum, lmkjNum);
break;
case 3:
dqiMatern3 <<<blkiNum, BLKDIM>>> (d_dqiKMat, d_lmkiMat, d_lmkjMat, d_lftMat, d_rgtMat,
knlWidth, knlWidthSqu, lmkiNum, lmkjNum);
dqjMatern3 <<<blkjNum, BLKDIM>>> (d_dqjKMat, d_lmkiMat, d_lmkjMat, d_lftMat, d_rgtMat,
knlWidth, knlWidthSqu, lmkiNum, lmkjNum);
break;
case 4:
dqiMatern4 <<<blkiNum, BLKDIM>>> (d_dqiKMat, d_lmkiMat, d_lmkjMat, d_lftMat, d_rgtMat,
knlWidth, knlWidthSqu, lmkiNum, lmkjNum);
dqjMatern4 <<<blkjNum, BLKDIM>>> (d_dqjKMat, d_lmkiMat, d_lmkjMat, d_lftMat, d_rgtMat,
knlWidth, knlWidthSqu, lmkiNum, lmkjNum);
break;
}
return;
}
|
17cef5ad52d10ae149a9946e1001f333031e62df.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "blend_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *outSrc = NULL;
hipMalloc(&outSrc, XSIZE*YSIZE);
const float *inSrc = NULL;
hipMalloc(&inSrc, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
blend_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, outSrc,inSrc);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
blend_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, outSrc,inSrc);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
blend_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, outSrc,inSrc);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 17cef5ad52d10ae149a9946e1001f333031e62df.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "blend_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *outSrc = NULL;
cudaMalloc(&outSrc, XSIZE*YSIZE);
const float *inSrc = NULL;
cudaMalloc(&inSrc, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
blend_kernel<<<gridBlock,threadBlock>>>(outSrc,inSrc);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
blend_kernel<<<gridBlock,threadBlock>>>(outSrc,inSrc);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
blend_kernel<<<gridBlock,threadBlock>>>(outSrc,inSrc);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
4a844edba1ebfe1c65e6fd40284260f8f944e019.hip | // !!! This is a file automatically generated by hipify!!!
#include <vector>
#include <iterator>
#include <fstream>
#include<iostream>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <numeric>
#include<cuda.h>
#include<time.h>
#define N 50
#define M 10
#define SHARED_DATA 10*4
using namespace std;
static const int ArraySize = 500000000;
static const int BlockSize = 1024;
static const int GridSize = 24;
static const int arraySize = 1000000000;
__global__ void parallelSum(const int *input, int arraySize, int *output) {
int index = threadIdx.x + blockIdx.x*BlockSize;
const int gridSize = BlockSize*gridDim.x;
int parallelsum = 0;
for (int i = index; i < arraySize; i += gridSize)
parallelsum += input[i];
__shared__ int data[BlockSize];
data[threadIdx.x] = parallelsum;
__syncthreads();
for (int size = BlockSize/2; size>0; size/=2) {
if (threadIdx.x<size)
data[threadIdx.x] += data[threadIdx.x+size];
__syncthreads();
}
if (threadIdx.x == 0)
output[blockIdx.x] = data[0];
}
__global__ void sum(int *input, int *output)
{
__shared__ int data[SHARED_DATA];
int index = threadIdx.x+blockDim.x*blockIdx.x;
data[threadIdx.x] = input[index];
int i=64;
__syncthreads();
while(i!=0)
{
if(index+i<N && threadIdx.x<i)
data[threadIdx.x] += data[threadIdx.x+i];
i/=2;
__syncthreads();
}
if(threadIdx.x == 0)
output[blockIdx.x] = data[0];
}
int main() {
int size=N*sizeof(int);
clock_t startLoad50,startMove50,startSum50;
clock_t endLoad50,endMove50,endSum50;
startLoad50=clock();
std::ifstream file_handler("/mirror/data/50Numbers.txt");
std::vector<int> input1;
int number;
while (file_handler>>number) {
input1.push_back(number);
}
endLoad50=clock();
int *output1;
output1=(int*)malloc(size);
int *d_input1, *d_sum1; //Device variable Declaration
hipMalloc((void **)&d_input1, size);
hipMalloc((void **)&d_sum1, size);
startMove50=clock();
hipMemcpy(d_input1, input1.data(), size,hipMemcpyHostToDevice);
endMove50=clock();
hipMemcpy(d_sum1, output1, size, hipMemcpyHostToDevice);
//Launch Kernel
startSum50=clock();
sum << <(N+M-1)/M,M >> >(d_input1,d_sum1);
sum << <1,N >> >(d_input1,d_sum1);
endSum50=clock();
//Copy Device Memory to Host Memory
hipMemcpy(output1, d_sum1, sizeof(int), hipMemcpyDeviceToHost);
printf("50Numbers:\n");
printf("Time consumption to load the File:%f secs\n", (double) (endLoad50-startLoad50)/ CLOCKS_PER_SEC);
printf("Time consumption to move the file from main memory to device memory:%f secs \n", (double) (endMove50-startMove50)/ CLOCKS_PER_SEC);
printf("Time consumption to sum the file:%f secs\n", (double) (endSum50-startSum50)/ CLOCKS_PER_SEC);
printf("Sum is:%d\n",output1[0]);
printf("\n");
free(output1);
//Free Device Memory
hipFree(&d_input1);
hipFree(&d_sum1);
clock_t startLoadHalfBillion,startMoveHalfBillion,startSumHalfBillion;
clock_t endLoadHalfBillion,endMoveHalfBillion,endSumHalfBillion;
startLoadHalfBillion=clock();
std::ifstream file_handler1("/mirror/data/halfBillionNumbers.txt");
std::vector<int> input2;
int number1;
while (file_handler1>>number1) {
input2.push_back(number1);
}
endLoadHalfBillion=clock();
int* d_input2,*d_output2;
int output2;
hipMalloc((void**)&d_input2, ArraySize*sizeof(int));
hipMalloc((void**)&d_output2, sizeof(int)*GridSize);
startMoveHalfBillion=clock();
hipMemcpy(d_input2, input2.data(), ArraySize*sizeof(int), hipMemcpyHostToDevice);
endMoveHalfBillion=clock();
startSumHalfBillion=clock();
hipLaunchKernelGGL(( parallelSum), dim3(GridSize), dim3(BlockSize), 0, 0, d_input2, ArraySize, d_output2);
hipLaunchKernelGGL(( parallelSum), dim3(1), dim3(BlockSize), 0, 0, d_output2, GridSize, d_output2);
endSumHalfBillion=clock();
hipDeviceSynchronize();
hipMemcpy(&output2, d_output2, sizeof(int), hipMemcpyDeviceToHost);
hipFree(d_input2);
hipFree(d_output2);
printf("halfBillionNumbers:\n");
printf("Time consumption to load the File:%f secs\n", (double) (endLoadHalfBillion-startLoadHalfBillion)/ CLOCKS_PER_SEC);
printf("Time consumption to move the file from main memory to device memory:%f secs\n", (double) (endMoveHalfBillion-startMoveHalfBillion)/ CLOCKS_PER_SEC);
printf("Time consumption to sum the file:%f secs\n", (double) (endSumHalfBillion-startSumHalfBillion)/ CLOCKS_PER_SEC);
printf("Sum is:%d\n",output2);
printf("\n");
clock_t startLoadBillion,startMoveBillion,startSumBillion;
clock_t endLoadBillion,endMoveBillion,endSumBillion;
startLoadBillion=clock();
std::ifstream file_handler2("/mirror/data/1billionNumbers.txt");
std::vector<int> input3;
int number2;
while (file_handler2>>number2) {
input3.push_back(number2);
}
endLoadBillion=clock();
int* d_input3,*d_output3;
int output3;
hipMalloc((void**)&d_input3, arraySize*sizeof(int));
hipMalloc((void**)&d_output3, sizeof(int)*GridSize);
startMoveBillion=clock();
hipMemcpy(d_input3, input3.data(), arraySize*sizeof(int), hipMemcpyHostToDevice);
endMoveBillion=clock();
startSumBillion=clock();
hipLaunchKernelGGL(( parallelSum), dim3(GridSize), dim3(BlockSize), 0, 0, d_input3, arraySize, d_output3);
hipLaunchKernelGGL(( parallelSum), dim3(1), dim3(BlockSize), 0, 0, d_output3, GridSize, d_output3);
endSumBillion=clock();
hipDeviceSynchronize();
hipMemcpy(&output3, d_output3, sizeof(int), hipMemcpyDeviceToHost);
hipFree(d_input3);
hipFree(d_output3);
printf("1billionNumbers:\n");
printf("Time consumption to load the File:%f secs\n", (double) (endLoadBillion-startLoadBillion)/ CLOCKS_PER_SEC );
printf("Time consumption to move the file from main memory to device memory:%f secs\n", (double) (endMoveBillion-startMoveBillion)/ CLOCKS_PER_SEC);
printf("Time consumption to sum the file:%f secs\n", (double) (endSumBillion-startSumBillion)/ CLOCKS_PER_SEC);
printf("Sum is:%d\n",output3);
printf("\n");
return 0;
}
| 4a844edba1ebfe1c65e6fd40284260f8f944e019.cu | #include <vector>
#include <iterator>
#include <fstream>
#include<iostream>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <numeric>
#include<cuda.h>
#include<time.h>
#define N 50
#define M 10
#define SHARED_DATA 10*4
using namespace std;
static const int ArraySize = 500000000;
static const int BlockSize = 1024;
static const int GridSize = 24;
static const int arraySize = 1000000000;
__global__ void parallelSum(const int *input, int arraySize, int *output) {
int index = threadIdx.x + blockIdx.x*BlockSize;
const int gridSize = BlockSize*gridDim.x;
int parallelsum = 0;
for (int i = index; i < arraySize; i += gridSize)
parallelsum += input[i];
__shared__ int data[BlockSize];
data[threadIdx.x] = parallelsum;
__syncthreads();
for (int size = BlockSize/2; size>0; size/=2) {
if (threadIdx.x<size)
data[threadIdx.x] += data[threadIdx.x+size];
__syncthreads();
}
if (threadIdx.x == 0)
output[blockIdx.x] = data[0];
}
__global__ void sum(int *input, int *output)
{
__shared__ int data[SHARED_DATA];
int index = threadIdx.x+blockDim.x*blockIdx.x;
data[threadIdx.x] = input[index];
int i=64;
__syncthreads();
while(i!=0)
{
if(index+i<N && threadIdx.x<i)
data[threadIdx.x] += data[threadIdx.x+i];
i/=2;
__syncthreads();
}
if(threadIdx.x == 0)
output[blockIdx.x] = data[0];
}
int main() {
int size=N*sizeof(int);
clock_t startLoad50,startMove50,startSum50;
clock_t endLoad50,endMove50,endSum50;
startLoad50=clock();
std::ifstream file_handler("/mirror/data/50Numbers.txt");
std::vector<int> input1;
int number;
while (file_handler>>number) {
input1.push_back(number);
}
endLoad50=clock();
int *output1;
output1=(int*)malloc(size);
int *d_input1, *d_sum1; //Device variable Declaration
cudaMalloc((void **)&d_input1, size);
cudaMalloc((void **)&d_sum1, size);
startMove50=clock();
cudaMemcpy(d_input1, input1.data(), size,cudaMemcpyHostToDevice);
endMove50=clock();
cudaMemcpy(d_sum1, output1, size, cudaMemcpyHostToDevice);
//Launch Kernel
startSum50=clock();
sum << <(N+M-1)/M,M >> >(d_input1,d_sum1);
sum << <1,N >> >(d_input1,d_sum1);
endSum50=clock();
//Copy Device Memory to Host Memory
cudaMemcpy(output1, d_sum1, sizeof(int), cudaMemcpyDeviceToHost);
printf("50Numbers:\n");
printf("Time consumption to load the File:%f secs\n", (double) (endLoad50-startLoad50)/ CLOCKS_PER_SEC);
printf("Time consumption to move the file from main memory to device memory:%f secs \n", (double) (endMove50-startMove50)/ CLOCKS_PER_SEC);
printf("Time consumption to sum the file:%f secs\n", (double) (endSum50-startSum50)/ CLOCKS_PER_SEC);
printf("Sum is:%d\n",output1[0]);
printf("\n");
free(output1);
//Free Device Memory
cudaFree(&d_input1);
cudaFree(&d_sum1);
clock_t startLoadHalfBillion,startMoveHalfBillion,startSumHalfBillion;
clock_t endLoadHalfBillion,endMoveHalfBillion,endSumHalfBillion;
startLoadHalfBillion=clock();
std::ifstream file_handler1("/mirror/data/halfBillionNumbers.txt");
std::vector<int> input2;
int number1;
while (file_handler1>>number1) {
input2.push_back(number1);
}
endLoadHalfBillion=clock();
int* d_input2,*d_output2;
int output2;
cudaMalloc((void**)&d_input2, ArraySize*sizeof(int));
cudaMalloc((void**)&d_output2, sizeof(int)*GridSize);
startMoveHalfBillion=clock();
cudaMemcpy(d_input2, input2.data(), ArraySize*sizeof(int), cudaMemcpyHostToDevice);
endMoveHalfBillion=clock();
startSumHalfBillion=clock();
parallelSum<<<GridSize, BlockSize>>>(d_input2, ArraySize, d_output2);
parallelSum<<<1, BlockSize>>>(d_output2, GridSize, d_output2);
endSumHalfBillion=clock();
cudaDeviceSynchronize();
cudaMemcpy(&output2, d_output2, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_input2);
cudaFree(d_output2);
printf("halfBillionNumbers:\n");
printf("Time consumption to load the File:%f secs\n", (double) (endLoadHalfBillion-startLoadHalfBillion)/ CLOCKS_PER_SEC);
printf("Time consumption to move the file from main memory to device memory:%f secs\n", (double) (endMoveHalfBillion-startMoveHalfBillion)/ CLOCKS_PER_SEC);
printf("Time consumption to sum the file:%f secs\n", (double) (endSumHalfBillion-startSumHalfBillion)/ CLOCKS_PER_SEC);
printf("Sum is:%d\n",output2);
printf("\n");
clock_t startLoadBillion,startMoveBillion,startSumBillion;
clock_t endLoadBillion,endMoveBillion,endSumBillion;
startLoadBillion=clock();
std::ifstream file_handler2("/mirror/data/1billionNumbers.txt");
std::vector<int> input3;
int number2;
while (file_handler2>>number2) {
input3.push_back(number2);
}
endLoadBillion=clock();
int* d_input3,*d_output3;
int output3;
cudaMalloc((void**)&d_input3, arraySize*sizeof(int));
cudaMalloc((void**)&d_output3, sizeof(int)*GridSize);
startMoveBillion=clock();
cudaMemcpy(d_input3, input3.data(), arraySize*sizeof(int), cudaMemcpyHostToDevice);
endMoveBillion=clock();
startSumBillion=clock();
parallelSum<<<GridSize, BlockSize>>>(d_input3, arraySize, d_output3);
parallelSum<<<1, BlockSize>>>(d_output3, GridSize, d_output3);
endSumBillion=clock();
cudaDeviceSynchronize();
cudaMemcpy(&output3, d_output3, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_input3);
cudaFree(d_output3);
printf("1billionNumbers:\n");
printf("Time consumption to load the File:%f secs\n", (double) (endLoadBillion-startLoadBillion)/ CLOCKS_PER_SEC );
printf("Time consumption to move the file from main memory to device memory:%f secs\n", (double) (endMoveBillion-startMoveBillion)/ CLOCKS_PER_SEC);
printf("Time consumption to sum the file:%f secs\n", (double) (endSumBillion-startSumBillion)/ CLOCKS_PER_SEC);
printf("Sum is:%d\n",output3);
printf("\n");
return 0;
}
|
87716c2e8740e3680c13310ba80038c111525c96.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// kernel
__global__ void kernel_vec_add(const float* A, const float* B, float* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = A[i] + B[i];
}
extern "C"
void ivk_krnl_vec_add(const float *a, const float *b, float *c, int n)
{
dim3 block_size = dim3(256, 1, 1);
dim3 grid_size = dim3((n + block_size.x - 1) / block_size.x, 1, 1);
hipLaunchKernelGGL(( kernel_vec_add), dim3(grid_size), dim3(block_size), 0, 0, a, b, c, n);
}
| 87716c2e8740e3680c13310ba80038c111525c96.cu | // kernel
__global__ void kernel_vec_add(const float* A, const float* B, float* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = A[i] + B[i];
}
extern "C"
void ivk_krnl_vec_add(const float *a, const float *b, float *c, int n)
{
dim3 block_size = dim3(256, 1, 1);
dim3 grid_size = dim3((n + block_size.x - 1) / block_size.x, 1, 1);
kernel_vec_add<<<grid_size, block_size>>>(a, b, c, n);
}
|
191d3ddb458f1983d9d9a5c48d0a11b95789a5a1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* sample.cu
*
* Created on: Jan 17, 2015
* Author: Adam Kosiorek
*/
#include "calc_alignment.h"
__global__ void calc_alignment_GPU_impl(int* x, int* y, int* z, int nX, int nY,
int nZ, int* cube) {
}
Cube calc_alignment_GPU(const Sequence& seq) {
std::vector<int*> data(seq.size());
int totalSize = 1;
for (int i = 0; i < data.size(); ++i) {
hipMalloc((void**) data[i], seq[i].size());
hipMemcpy(data[i], &seq[i][0], seq[i].size() * sizeof(int),
hipMemcpyHostToDevice);
totalSize *= seq[i].size();
}
int* cube = NULL;
hipMalloc((void**) cube, totalSize);
hipLaunchKernelGGL(( calc_alignment_GPU_impl), dim3(1), dim3(1), 0, 0, data[0], data[1], data[2], seq[0].size(), seq[1].size(), seq[2].size(), cube);
Cube output(seq[0].size(),
std::vector<std::vector<int> >(seq[1].size(),
std::vector<int>(seq[2].size())));
for (int x = 0; x < seq[0].size(); ++x) {
for (int y = 0; y < seq[1].size(); ++y) {
hipMemcpy(&output[x][y][0], cube, seq[2].size() * sizeof(int),
hipMemcpyDeviceToHost);
cube += seq[2].size();
}
}
return output;
}
| 191d3ddb458f1983d9d9a5c48d0a11b95789a5a1.cu | /*
* sample.cu
*
* Created on: Jan 17, 2015
* Author: Adam Kosiorek
*/
#include "calc_alignment.h"
__global__ void calc_alignment_GPU_impl(int* x, int* y, int* z, int nX, int nY,
int nZ, int* cube) {
}
Cube calc_alignment_GPU(const Sequence& seq) {
std::vector<int*> data(seq.size());
int totalSize = 1;
for (int i = 0; i < data.size(); ++i) {
cudaMalloc((void**) data[i], seq[i].size());
cudaMemcpy(data[i], &seq[i][0], seq[i].size() * sizeof(int),
cudaMemcpyHostToDevice);
totalSize *= seq[i].size();
}
int* cube = NULL;
cudaMalloc((void**) cube, totalSize);
calc_alignment_GPU_impl<<<1, 1>>>(data[0], data[1], data[2], seq[0].size(), seq[1].size(), seq[2].size(), cube);
Cube output(seq[0].size(),
std::vector<std::vector<int> >(seq[1].size(),
std::vector<int>(seq[2].size())));
for (int x = 0; x < seq[0].size(); ++x) {
for (int y = 0; y < seq[1].size(); ++y) {
cudaMemcpy(&output[x][y][0], cube, seq[2].size() * sizeof(int),
cudaMemcpyDeviceToHost);
cube += seq[2].size();
}
}
return output;
}
|
f385bcbb9eb4715b2234273e10ed1c01433ae1b4.hip | // !!! This is a file automatically generated by hipify!!!
#include "cuda_types.h"
#include "cuda_utils.h"
#include "sparse_dataset.h"
void initMatDescriptors( DeviceDataset *d )
{
//Train
cusparseCheckError ( hipsparseCreateMatDescr(&(d->spTrain.descr)) );
cusparseCheckError ( hipsparseSetMatIndexBase(d->spTrain.descr, HIPSPARSE_INDEX_BASE_ZERO) );
cusparseCheckError ( hipsparseSetMatType(d->spTrain.descr, HIPSPARSE_MATRIX_TYPE_GENERAL) );
//Test
cusparseCheckError ( hipsparseCreateMatDescr(&(d->spTest.descr)) );
cusparseCheckError ( hipsparseSetMatIndexBase(d->spTest.descr, HIPSPARSE_INDEX_BASE_ZERO) );
cusparseCheckError ( hipsparseSetMatType(d->spTest.descr, HIPSPARSE_MATRIX_TYPE_GENERAL) );
}
void initMatDescriptorsForSampling( DeviceDataset *d ) {
//SubSampling - Hessian
cusparseCheckError ( hipsparseCreateMatDescr(&(d->spHessianSample.descr)) );
cusparseCheckError ( hipsparseSetMatIndexBase(d->spHessianSample.descr, HIPSPARSE_INDEX_BASE_ZERO) );
cusparseCheckError ( hipsparseSetMatType(d->spHessianSample.descr, HIPSPARSE_MATRIX_TYPE_GENERAL) );
//gradient
cusparseCheckError ( hipsparseCreateMatDescr(&(d->spGradientSample.descr)) );
cusparseCheckError ( hipsparseSetMatIndexBase(d->spGradientSample.descr, HIPSPARSE_INDEX_BASE_ZERO) );
cusparseCheckError ( hipsparseSetMatType(d->spGradientSample.descr, HIPSPARSE_MATRIX_TYPE_GENERAL) );
}
void initMatDescriptorsForSparseSampling( DeviceDataset *d ) {
//SubSampling - Hessian
cusparseCheckError ( hipsparseCreateMatDescr(&(d->spSampledHessianTrain.descr)) );
cusparseCheckError ( hipsparseSetMatIndexBase(d->spSampledHessianTrain.descr, HIPSPARSE_INDEX_BASE_ZERO) );
cusparseCheckError ( hipsparseSetMatType(d->spSampledHessianTrain.descr, HIPSPARSE_MATRIX_TYPE_GENERAL) );
//gradient
cusparseCheckError ( hipsparseCreateMatDescr(&(d->spSampledGradientTrain.descr)) );
cusparseCheckError ( hipsparseSetMatIndexBase(d->spSampledGradientTrain.descr, HIPSPARSE_INDEX_BASE_ZERO) );
cusparseCheckError ( hipsparseSetMatType(d->spSampledGradientTrain.descr, HIPSPARSE_MATRIX_TYPE_GENERAL) );
}
void convertGradientSampleToCSR (SparseDataset *spGradientSample, int sampleSize, int cols, real *devPtr) {
//make sure that the data is sorted here.
size_t pBufferSizeInBytes = 0;
void* pBuffer = (void *)devPtr;
//Sampled Dataset Here.
cusparseCheckError(
hipsparseXcoosort_bufferSizeExt(
cusparseHandle, sampleSize, cols, spGradientSample->nnz,
spGradientSample->rowPtr, spGradientSample->colPtr, &pBufferSizeInBytes ) );
cusparseCheckError(
hipsparseCreateIdentityPermutation( cusparseHandle, spGradientSample->nnz, spGradientSample->P) );
cusparseCheckError(
hipsparseXcoosortByRow( cusparseHandle, sampleSize, cols, spGradientSample->nnz,
spGradientSample->rowPtr, spGradientSample->colPtr, spGradientSample->P, pBuffer ) );
cusparseCheckError(
hipsparseDgthr( cusparseHandle, spGradientSample->nnz, spGradientSample->valPtr,
spGradientSample->sortedVals, spGradientSample->P, HIPSPARSE_INDEX_BASE_ZERO ) );
//convert to csr format.
cusparseCheckError(
hipsparseXcoo2csr( cusparseHandle, spGradientSample->rowPtr, spGradientSample->nnz, sampleSize,
spGradientSample->rowCsrPtr, HIPSPARSE_INDEX_BASE_ZERO )
);
//fprintf( stderr, "Converting gradient to CSR .... \n");
}
void convertHessianSampleToCSR (SparseDataset *spHessianSample, int sampleSize, int cols, real *devPtr) {
//make sure that the data is sorted here.
size_t pBufferSizeInBytes = 0;
void* pBuffer = (void *)devPtr;
//Sampled Dataset Here.
cusparseCheckError(
hipsparseXcoosort_bufferSizeExt(
cusparseHandle, sampleSize, cols, spHessianSample->nnz,
spHessianSample->rowPtr, spHessianSample->colPtr, &pBufferSizeInBytes ) );
cusparseCheckError(
hipsparseCreateIdentityPermutation( cusparseHandle, spHessianSample->nnz, spHessianSample->P) );
cusparseCheckError(
hipsparseXcoosortByRow( cusparseHandle, sampleSize, cols, spHessianSample->nnz,
spHessianSample->rowPtr, spHessianSample->colPtr, spHessianSample->P, pBuffer ) );
cusparseCheckError(
hipsparseDgthr( cusparseHandle, spHessianSample->nnz, spHessianSample->valPtr,
spHessianSample->sortedVals, spHessianSample->P, HIPSPARSE_INDEX_BASE_ZERO ) );
//convert to csr format.
cusparseCheckError(
hipsparseXcoo2csr( cusparseHandle, spHessianSample->rowPtr, spHessianSample->nnz, sampleSize,
spHessianSample->rowCsrPtr, HIPSPARSE_INDEX_BASE_ZERO )
);
//fprintf( stderr, "Converting hessian to CSR .... \n");
}
void convertToCSR( DeviceDataset *d, real *devPtr )
{
//make sure that the data is sorted here.
size_t pBufferSizeInBytes = 0;
void* pBuffer = (void *)devPtr;
//Train Dataset Here.
cusparseCheckError(
hipsparseXcoosort_bufferSizeExt(
cusparseHandle, d->rows, d->cols, d->spTrain.nnz,
d->spTrain.rowPtr, d->spTrain.colPtr, &pBufferSizeInBytes ) );
fprintf( stderr, "Memory needed to sort coo data --> %d \n", pBufferSizeInBytes );
cusparseCheckError(
hipsparseCreateIdentityPermutation( cusparseHandle, d->spTrain.nnz, d->spTrain.P) );
cusparseCheckError(
hipsparseXcoosortByRow( cusparseHandle, d->rows, d->cols, d->spTrain.nnz,
d->spTrain.rowPtr, d->spTrain.colPtr, d->spTrain.P, pBuffer ) );
cusparseCheckError(
hipsparseDgthr( cusparseHandle, d->spTrain.nnz, d->spTrain.valPtr,
d->spTrain.sortedVals, d->spTrain.P, HIPSPARSE_INDEX_BASE_ZERO ) );
//convert to csr format.
cusparseCheckError(
hipsparseXcoo2csr( cusparseHandle, d->spTrain.rowPtr, d->spTrain.nnz, d->rows,
d->spTrain.rowCsrPtr, HIPSPARSE_INDEX_BASE_ZERO )
);
//Test Dataset here.
cusparseCheckError(
hipsparseXcoosort_bufferSizeExt(
cusparseHandle, d->rows, d->cols, d->spTest.nnz,
d->spTest.rowPtr, d->spTest.colPtr, &pBufferSizeInBytes ) );
fprintf( stderr, "Memory needed to sort coo data --> %d \n", pBufferSizeInBytes );
cusparseCheckError(
hipsparseCreateIdentityPermutation( cusparseHandle, d->spTest.nnz, d->spTest.P) );
cusparseCheckError(
hipsparseXcoosortByRow( cusparseHandle, d->rows, d->cols, d->spTest.nnz,
d->spTest.rowPtr, d->spTest.colPtr, d->spTest.P, pBuffer ) );
cusparseCheckError(
hipsparseDgthr( cusparseHandle, d->spTest.nnz, d->spTest.valPtr,
d->spTest.sortedVals, d->spTest.P, HIPSPARSE_INDEX_BASE_ZERO ) );
//convert to csr format.
cusparseCheckError(
hipsparseXcoo2csr( cusparseHandle, d->spTest.rowPtr, d->spTest.nnz, d->rows,
d->spTest.rowCsrPtr, HIPSPARSE_INDEX_BASE_ZERO )
);
/*
cusparseCheckError(
hipsparseXcoo2csr( cusparseHandle, d->spTest.rowPtr, d->spTest.nnz, d->testSize,
d->spTest.rowCsrPtr, HIPSPARSE_INDEX_BASE_ZERO )
);
//convert the csr matrix to csc matrix here.
cusparseCheckError(
hipsparseDcsr2csc( cusparseHandle, d->rows, d->cols, d->spTrain.nnz,
d->spTrain.valPtr, d->spTrain.rowCsrPtr, d->spTrain.colPtr,
d->spTrain.cscValPtr, d->spTrain.cscRowPtr, d->spTrain.cscColPtr,
HIPSPARSE_ACTION_NUMERIC, HIPSPARSE_INDEX_BASE_ZERO ) );
*/
}
| f385bcbb9eb4715b2234273e10ed1c01433ae1b4.cu |
#include "cuda_types.h"
#include "cuda_utils.h"
#include "sparse_dataset.h"
void initMatDescriptors( DeviceDataset *d )
{
//Train
cusparseCheckError ( cusparseCreateMatDescr(&(d->spTrain.descr)) );
cusparseCheckError ( cusparseSetMatIndexBase(d->spTrain.descr, CUSPARSE_INDEX_BASE_ZERO) );
cusparseCheckError ( cusparseSetMatType(d->spTrain.descr, CUSPARSE_MATRIX_TYPE_GENERAL) );
//Test
cusparseCheckError ( cusparseCreateMatDescr(&(d->spTest.descr)) );
cusparseCheckError ( cusparseSetMatIndexBase(d->spTest.descr, CUSPARSE_INDEX_BASE_ZERO) );
cusparseCheckError ( cusparseSetMatType(d->spTest.descr, CUSPARSE_MATRIX_TYPE_GENERAL) );
}
void initMatDescriptorsForSampling( DeviceDataset *d ) {
//SubSampling - Hessian
cusparseCheckError ( cusparseCreateMatDescr(&(d->spHessianSample.descr)) );
cusparseCheckError ( cusparseSetMatIndexBase(d->spHessianSample.descr, CUSPARSE_INDEX_BASE_ZERO) );
cusparseCheckError ( cusparseSetMatType(d->spHessianSample.descr, CUSPARSE_MATRIX_TYPE_GENERAL) );
//gradient
cusparseCheckError ( cusparseCreateMatDescr(&(d->spGradientSample.descr)) );
cusparseCheckError ( cusparseSetMatIndexBase(d->spGradientSample.descr, CUSPARSE_INDEX_BASE_ZERO) );
cusparseCheckError ( cusparseSetMatType(d->spGradientSample.descr, CUSPARSE_MATRIX_TYPE_GENERAL) );
}
void initMatDescriptorsForSparseSampling( DeviceDataset *d ) {
//SubSampling - Hessian
cusparseCheckError ( cusparseCreateMatDescr(&(d->spSampledHessianTrain.descr)) );
cusparseCheckError ( cusparseSetMatIndexBase(d->spSampledHessianTrain.descr, CUSPARSE_INDEX_BASE_ZERO) );
cusparseCheckError ( cusparseSetMatType(d->spSampledHessianTrain.descr, CUSPARSE_MATRIX_TYPE_GENERAL) );
//gradient
cusparseCheckError ( cusparseCreateMatDescr(&(d->spSampledGradientTrain.descr)) );
cusparseCheckError ( cusparseSetMatIndexBase(d->spSampledGradientTrain.descr, CUSPARSE_INDEX_BASE_ZERO) );
cusparseCheckError ( cusparseSetMatType(d->spSampledGradientTrain.descr, CUSPARSE_MATRIX_TYPE_GENERAL) );
}
void convertGradientSampleToCSR (SparseDataset *spGradientSample, int sampleSize, int cols, real *devPtr) {
//make sure that the data is sorted here.
size_t pBufferSizeInBytes = 0;
void* pBuffer = (void *)devPtr;
//Sampled Dataset Here.
cusparseCheckError(
cusparseXcoosort_bufferSizeExt(
cusparseHandle, sampleSize, cols, spGradientSample->nnz,
spGradientSample->rowPtr, spGradientSample->colPtr, &pBufferSizeInBytes ) );
cusparseCheckError(
cusparseCreateIdentityPermutation( cusparseHandle, spGradientSample->nnz, spGradientSample->P) );
cusparseCheckError(
cusparseXcoosortByRow( cusparseHandle, sampleSize, cols, spGradientSample->nnz,
spGradientSample->rowPtr, spGradientSample->colPtr, spGradientSample->P, pBuffer ) );
cusparseCheckError(
cusparseDgthr( cusparseHandle, spGradientSample->nnz, spGradientSample->valPtr,
spGradientSample->sortedVals, spGradientSample->P, CUSPARSE_INDEX_BASE_ZERO ) );
//convert to csr format.
cusparseCheckError(
cusparseXcoo2csr( cusparseHandle, spGradientSample->rowPtr, spGradientSample->nnz, sampleSize,
spGradientSample->rowCsrPtr, CUSPARSE_INDEX_BASE_ZERO )
);
//fprintf( stderr, "Converting gradient to CSR .... \n");
}
void convertHessianSampleToCSR (SparseDataset *spHessianSample, int sampleSize, int cols, real *devPtr) {
//make sure that the data is sorted here.
size_t pBufferSizeInBytes = 0;
void* pBuffer = (void *)devPtr;
//Sampled Dataset Here.
cusparseCheckError(
cusparseXcoosort_bufferSizeExt(
cusparseHandle, sampleSize, cols, spHessianSample->nnz,
spHessianSample->rowPtr, spHessianSample->colPtr, &pBufferSizeInBytes ) );
cusparseCheckError(
cusparseCreateIdentityPermutation( cusparseHandle, spHessianSample->nnz, spHessianSample->P) );
cusparseCheckError(
cusparseXcoosortByRow( cusparseHandle, sampleSize, cols, spHessianSample->nnz,
spHessianSample->rowPtr, spHessianSample->colPtr, spHessianSample->P, pBuffer ) );
cusparseCheckError(
cusparseDgthr( cusparseHandle, spHessianSample->nnz, spHessianSample->valPtr,
spHessianSample->sortedVals, spHessianSample->P, CUSPARSE_INDEX_BASE_ZERO ) );
//convert to csr format.
cusparseCheckError(
cusparseXcoo2csr( cusparseHandle, spHessianSample->rowPtr, spHessianSample->nnz, sampleSize,
spHessianSample->rowCsrPtr, CUSPARSE_INDEX_BASE_ZERO )
);
//fprintf( stderr, "Converting hessian to CSR .... \n");
}
void convertToCSR( DeviceDataset *d, real *devPtr )
{
//make sure that the data is sorted here.
size_t pBufferSizeInBytes = 0;
void* pBuffer = (void *)devPtr;
//Train Dataset Here.
cusparseCheckError(
cusparseXcoosort_bufferSizeExt(
cusparseHandle, d->rows, d->cols, d->spTrain.nnz,
d->spTrain.rowPtr, d->spTrain.colPtr, &pBufferSizeInBytes ) );
fprintf( stderr, "Memory needed to sort coo data --> %d \n", pBufferSizeInBytes );
cusparseCheckError(
cusparseCreateIdentityPermutation( cusparseHandle, d->spTrain.nnz, d->spTrain.P) );
cusparseCheckError(
cusparseXcoosortByRow( cusparseHandle, d->rows, d->cols, d->spTrain.nnz,
d->spTrain.rowPtr, d->spTrain.colPtr, d->spTrain.P, pBuffer ) );
cusparseCheckError(
cusparseDgthr( cusparseHandle, d->spTrain.nnz, d->spTrain.valPtr,
d->spTrain.sortedVals, d->spTrain.P, CUSPARSE_INDEX_BASE_ZERO ) );
//convert to csr format.
cusparseCheckError(
cusparseXcoo2csr( cusparseHandle, d->spTrain.rowPtr, d->spTrain.nnz, d->rows,
d->spTrain.rowCsrPtr, CUSPARSE_INDEX_BASE_ZERO )
);
//Test Dataset here.
cusparseCheckError(
cusparseXcoosort_bufferSizeExt(
cusparseHandle, d->rows, d->cols, d->spTest.nnz,
d->spTest.rowPtr, d->spTest.colPtr, &pBufferSizeInBytes ) );
fprintf( stderr, "Memory needed to sort coo data --> %d \n", pBufferSizeInBytes );
cusparseCheckError(
cusparseCreateIdentityPermutation( cusparseHandle, d->spTest.nnz, d->spTest.P) );
cusparseCheckError(
cusparseXcoosortByRow( cusparseHandle, d->rows, d->cols, d->spTest.nnz,
d->spTest.rowPtr, d->spTest.colPtr, d->spTest.P, pBuffer ) );
cusparseCheckError(
cusparseDgthr( cusparseHandle, d->spTest.nnz, d->spTest.valPtr,
d->spTest.sortedVals, d->spTest.P, CUSPARSE_INDEX_BASE_ZERO ) );
//convert to csr format.
cusparseCheckError(
cusparseXcoo2csr( cusparseHandle, d->spTest.rowPtr, d->spTest.nnz, d->rows,
d->spTest.rowCsrPtr, CUSPARSE_INDEX_BASE_ZERO )
);
/*
cusparseCheckError(
cusparseXcoo2csr( cusparseHandle, d->spTest.rowPtr, d->spTest.nnz, d->testSize,
d->spTest.rowCsrPtr, CUSPARSE_INDEX_BASE_ZERO )
);
//convert the csr matrix to csc matrix here.
cusparseCheckError(
cusparseDcsr2csc( cusparseHandle, d->rows, d->cols, d->spTrain.nnz,
d->spTrain.valPtr, d->spTrain.rowCsrPtr, d->spTrain.colPtr,
d->spTrain.cscValPtr, d->spTrain.cscRowPtr, d->spTrain.cscColPtr,
CUSPARSE_ACTION_NUMERIC, CUSPARSE_INDEX_BASE_ZERO ) );
*/
}
|
0ce911fcdf4259c565fa092f0c17f97fdabe34b6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define N 1024
#define T 256 // max threads per block
#include <stdio.h>
__global__ void vecAdd (int *a, int *b, int *c);
void printArray(int a[], int b[], int c[]);
int main() {
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
clock_t t;
double time_taken;
FILE *fp;
fp = fopen ("output.txt","a");
// initialize a and b with real values (NOT SHOWN)
int size = N * sizeof(int);
for (int i = 0; i < N; i++) {
a[i] = i;
b[i] = i/2;
}
t = clock();
hipMalloc((void**)&dev_a, size);
hipMalloc((void**)&dev_b, size);
hipMalloc((void**)&dev_c, size);
hipMemcpy(dev_a, a, size,hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, size,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( vecAdd), dim3((int)ceil(N/T)),dim3(T), 0, 0, dev_a,dev_b,dev_c);
hipMemcpy(c, dev_c, size,hipMemcpyDeviceToHost);
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
t = clock() - t;
time_taken = ((double)t)/CLOCKS_PER_SEC;
printf("Vector addition with 256 threads per block and padding for array of length %d took %lf seconds to execute \n", N, time_taken);
fprintf (fp, "%d %lf\n", N, time_taken);
// printArray(a,b,c);
fclose(fp);
exit (0);
}
__global__ void vecAdd (int *a, int *b, int *c) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
c[i] = a[i] + b[i];
}
}
void printArray(int a[], int b[], int c[]) {
printf("Array a:\n");
for(int i = 0; i < N; i++){
printf("%d ", a[i]);
}
printf("\n\nArray b:\n");
for(int i = 0; i < N; i++){
printf("%d ", b[i]);
}
printf("\n\nArray c:\n");
for(int i = 0; i < N; i++){
printf("%d ", c[i]);
}
printf("\n");
}
| 0ce911fcdf4259c565fa092f0c17f97fdabe34b6.cu | #define N 1024
#define T 256 // max threads per block
#include <stdio.h>
__global__ void vecAdd (int *a, int *b, int *c);
void printArray(int a[], int b[], int c[]);
int main() {
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
clock_t t;
double time_taken;
FILE *fp;
fp = fopen ("output.txt","a");
// initialize a and b with real values (NOT SHOWN)
int size = N * sizeof(int);
for (int i = 0; i < N; i++) {
a[i] = i;
b[i] = i/2;
}
t = clock();
cudaMalloc((void**)&dev_a, size);
cudaMalloc((void**)&dev_b, size);
cudaMalloc((void**)&dev_c, size);
cudaMemcpy(dev_a, a, size,cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, size,cudaMemcpyHostToDevice);
vecAdd<<<(int)ceil(N/T),T>>>(dev_a,dev_b,dev_c);
cudaMemcpy(c, dev_c, size,cudaMemcpyDeviceToHost);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
t = clock() - t;
time_taken = ((double)t)/CLOCKS_PER_SEC;
printf("Vector addition with 256 threads per block and padding for array of length %d took %lf seconds to execute \n", N, time_taken);
fprintf (fp, "%d %lf\n", N, time_taken);
// printArray(a,b,c);
fclose(fp);
exit (0);
}
__global__ void vecAdd (int *a, int *b, int *c) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
c[i] = a[i] + b[i];
}
}
void printArray(int a[], int b[], int c[]) {
printf("Array a:\n");
for(int i = 0; i < N; i++){
printf("%d ", a[i]);
}
printf("\n\nArray b:\n");
for(int i = 0; i < N; i++){
printf("%d ", b[i]);
}
printf("\n\nArray c:\n");
for(int i = 0; i < N; i++){
printf("%d ", c[i]);
}
printf("\n");
}
|
b06d1fb272083ded1719652719a9701013dfd84b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.h"
#define MINUS_LOG_THRESHOLD -18.42
#define SOFTMAX_THREADS 128
__global__ void cunn_SoftMax_updateOutput_kernel(float *output, float *input, int nframe, int dim)
{
__shared__ float buffer[SOFTMAX_THREADS+1];
int k = blockIdx.x;
float *input_k = input + k*dim;
float *output_k = output + k*dim;
int i_start = threadIdx.x;
int i_end = dim;
int i_step = blockDim.x;
// max?
buffer[threadIdx.x] = -FLT_MAX;
for (int i=i_start; i<i_end; i+=i_step)
{
float z = input_k[i];
if(buffer[threadIdx.x] < z)
buffer[threadIdx.x] = z;
}
__syncthreads();
// reduce
if (threadIdx.x == 0)
{
float max_k = -FLT_MAX;
for (int i=0; i<blockDim.x; i++)
{
if(max_k < buffer[i])
max_k = buffer[i];
}
buffer[SOFTMAX_THREADS] = max_k;
}
__syncthreads();
// sum?
float max_k = buffer[SOFTMAX_THREADS];
buffer[threadIdx.x] = 0;
for (int i=i_start; i<i_end; i+=i_step) {
float z = __expf(input_k[i]-max_k);
buffer[threadIdx.x] += z;
output_k[i] = z;
}
__syncthreads();
// reduce
if (threadIdx.x == 0)
{
float sum_k = 0;
for (int i=0; i<blockDim.x; i++)
sum_k += buffer[i];
buffer[SOFTMAX_THREADS] = sum_k;
}
__syncthreads();
// softmax
float sum_k = buffer[SOFTMAX_THREADS];
for (int i=i_start; i<i_end; i+=i_step)
output_k[i] = output_k[i] / sum_k;
}
__global__ void cunn_SoftMax_updateGradInput_kernel(float *gradInput, float *output, float *gradOutput, int nframe, int dim)
{
__shared__ float buffer[SOFTMAX_THREADS];
int k = blockIdx.x;
float *gradInput_k = gradInput + k*dim;
float *output_k = output + k*dim;
float *gradOutput_k = gradOutput + k*dim;
int i_start = threadIdx.x;
int i_end = dim;
int i_step = blockDim.x;
// sum?
buffer[threadIdx.x] = 0;
for (int i=i_start; i<i_end; i+=i_step)
buffer[threadIdx.x] += gradOutput_k[i] * output_k[i];
__syncthreads();
// reduce
if (threadIdx.x == 0)
{
float sum_k = 0;
for (int i=0; i<blockDim.x; i++)
sum_k += buffer[i];
buffer[0] = sum_k;
}
__syncthreads();
float sum_k = buffer[0];
for (int i=i_start; i<i_end; i+=i_step)
gradInput_k[i] = output_k[i] * (gradOutput_k[i] - sum_k);
}
static int cunn_SoftMax_updateOutput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *output = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor");
THAssert(THCudaTensor_checkGPU(state, 2, input, output));
input = THCudaTensor_newContiguous(state, input);
THCudaTensor_resizeAs(state, output, input);
if(input->nDimension == 1)
{
dim3 blocks(1);
dim3 threads(SOFTMAX_THREADS);
hipLaunchKernelGGL(( cunn_SoftMax_updateOutput_kernel), dim3(blocks),dim3(threads),
0, THCState_getCurrentStream(state), THCudaTensor_data(state, output),
THCudaTensor_data(state, input),
1, input->size[0]);
}
else if(input->nDimension == 2)
{
dim3 blocks(input->size[0]);
dim3 threads(SOFTMAX_THREADS);
hipLaunchKernelGGL(( cunn_SoftMax_updateOutput_kernel), dim3(blocks),dim3(threads),
0, THCState_getCurrentStream(state), THCudaTensor_data(state, output),
THCudaTensor_data(state, input),
input->size[0], input->size[1]);
}
else if(input->nDimension == 3)
{
dim3 blocks(input->size[0]*input->size[1]);
dim3 threads(SOFTMAX_THREADS);
hipLaunchKernelGGL(( cunn_SoftMax_updateOutput_kernel), dim3(blocks),dim3(threads),
0, THCState_getCurrentStream(state), THCudaTensor_data(state, output),
THCudaTensor_data(state, input),
input->size[0]*input->size[1], input->size[2]);
}
else
THError("vector or matrix expected");
hipError_t errcode = hipGetLastError();
if(errcode != hipSuccess)
THError(hipGetErrorString(errcode));
THCudaTensor_free(state, input);
return 1;
}
struct softmaxupdateGradInput_functor
{
float value;
softmaxupdateGradInput_functor(float value_) : value(value_) {}
__host__ __device__ float operator()(const float& output, const float& gradOutput) const
{
return gradOutput - exp(output)*value;
}
};
static int cunn_SoftMax_updateGradInput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *gradOutput = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor");
THCudaTensor *output = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor");
THCudaTensor *gradInput = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor");
THAssert(THCudaTensor_checkGPU(state, 3, output, gradOutput, gradInput));
output = THCudaTensor_newContiguous(state, output);
gradOutput = THCudaTensor_newContiguous(state, gradOutput);
THCudaTensor_resizeAs(state, gradInput, output);
if(gradInput->nDimension == 1)
{
dim3 blocks(1);
dim3 threads(SOFTMAX_THREADS);
hipLaunchKernelGGL(( cunn_SoftMax_updateGradInput_kernel), dim3(blocks),dim3(threads),
0, THCState_getCurrentStream(state), THCudaTensor_data(state, gradInput),
THCudaTensor_data(state, output),
THCudaTensor_data(state, gradOutput),
1, gradInput->size[0]);
}
else if(gradInput->nDimension == 2)
{
dim3 blocks(gradInput->size[0]);
dim3 threads(SOFTMAX_THREADS);
hipLaunchKernelGGL(( cunn_SoftMax_updateGradInput_kernel), dim3(blocks),dim3(threads),
0, THCState_getCurrentStream(state), THCudaTensor_data(state, gradInput),
THCudaTensor_data(state, output),
THCudaTensor_data(state, gradOutput),
gradInput->size[0], gradInput->size[1]);
}
else if(gradInput->nDimension == 3)
{
dim3 blocks(gradInput->size[0]*gradInput->size[1]);
dim3 threads(SOFTMAX_THREADS);
hipLaunchKernelGGL(( cunn_SoftMax_updateGradInput_kernel), dim3(blocks),dim3(threads),
0, THCState_getCurrentStream(state), THCudaTensor_data(state, gradInput),
THCudaTensor_data(state, output),
THCudaTensor_data(state, gradOutput),
gradInput->size[0]*gradInput->size[1], gradInput->size[2]);
}
else
THError("vector or matrix expected");
hipError_t errcode = hipGetLastError();
if(errcode != hipSuccess)
THError(hipGetErrorString(errcode));
THCudaTensor_free(state, gradOutput);
THCudaTensor_free(state, output);
return 1;
}
static const struct luaL_Reg cunn_SoftMax__ [] = {
{"SoftMax_updateOutput", cunn_SoftMax_updateOutput},
{"SoftMax_updateGradInput", cunn_SoftMax_updateGradInput},
{NULL, NULL}
};
void cunn_SoftMax_init(lua_State *L)
{
luaT_pushmetatable(L, "torch.CudaTensor");
luaT_registeratname(L, cunn_SoftMax__, "nn");
lua_pop(L,1);
}
| b06d1fb272083ded1719652719a9701013dfd84b.cu | #include "utils.h"
#define MINUS_LOG_THRESHOLD -18.42
#define SOFTMAX_THREADS 128
__global__ void cunn_SoftMax_updateOutput_kernel(float *output, float *input, int nframe, int dim)
{
__shared__ float buffer[SOFTMAX_THREADS+1];
int k = blockIdx.x;
float *input_k = input + k*dim;
float *output_k = output + k*dim;
int i_start = threadIdx.x;
int i_end = dim;
int i_step = blockDim.x;
// max?
buffer[threadIdx.x] = -FLT_MAX;
for (int i=i_start; i<i_end; i+=i_step)
{
float z = input_k[i];
if(buffer[threadIdx.x] < z)
buffer[threadIdx.x] = z;
}
__syncthreads();
// reduce
if (threadIdx.x == 0)
{
float max_k = -FLT_MAX;
for (int i=0; i<blockDim.x; i++)
{
if(max_k < buffer[i])
max_k = buffer[i];
}
buffer[SOFTMAX_THREADS] = max_k;
}
__syncthreads();
// sum?
float max_k = buffer[SOFTMAX_THREADS];
buffer[threadIdx.x] = 0;
for (int i=i_start; i<i_end; i+=i_step) {
float z = __expf(input_k[i]-max_k);
buffer[threadIdx.x] += z;
output_k[i] = z;
}
__syncthreads();
// reduce
if (threadIdx.x == 0)
{
float sum_k = 0;
for (int i=0; i<blockDim.x; i++)
sum_k += buffer[i];
buffer[SOFTMAX_THREADS] = sum_k;
}
__syncthreads();
// softmax
float sum_k = buffer[SOFTMAX_THREADS];
for (int i=i_start; i<i_end; i+=i_step)
output_k[i] = output_k[i] / sum_k;
}
__global__ void cunn_SoftMax_updateGradInput_kernel(float *gradInput, float *output, float *gradOutput, int nframe, int dim)
{
__shared__ float buffer[SOFTMAX_THREADS];
int k = blockIdx.x;
float *gradInput_k = gradInput + k*dim;
float *output_k = output + k*dim;
float *gradOutput_k = gradOutput + k*dim;
int i_start = threadIdx.x;
int i_end = dim;
int i_step = blockDim.x;
// sum?
buffer[threadIdx.x] = 0;
for (int i=i_start; i<i_end; i+=i_step)
buffer[threadIdx.x] += gradOutput_k[i] * output_k[i];
__syncthreads();
// reduce
if (threadIdx.x == 0)
{
float sum_k = 0;
for (int i=0; i<blockDim.x; i++)
sum_k += buffer[i];
buffer[0] = sum_k;
}
__syncthreads();
float sum_k = buffer[0];
for (int i=i_start; i<i_end; i+=i_step)
gradInput_k[i] = output_k[i] * (gradOutput_k[i] - sum_k);
}
static int cunn_SoftMax_updateOutput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *output = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor");
THAssert(THCudaTensor_checkGPU(state, 2, input, output));
input = THCudaTensor_newContiguous(state, input);
THCudaTensor_resizeAs(state, output, input);
if(input->nDimension == 1)
{
dim3 blocks(1);
dim3 threads(SOFTMAX_THREADS);
cunn_SoftMax_updateOutput_kernel<<<blocks,threads,
0, THCState_getCurrentStream(state)>>>(THCudaTensor_data(state, output),
THCudaTensor_data(state, input),
1, input->size[0]);
}
else if(input->nDimension == 2)
{
dim3 blocks(input->size[0]);
dim3 threads(SOFTMAX_THREADS);
cunn_SoftMax_updateOutput_kernel<<<blocks,threads,
0, THCState_getCurrentStream(state)>>>(THCudaTensor_data(state, output),
THCudaTensor_data(state, input),
input->size[0], input->size[1]);
}
else if(input->nDimension == 3)
{
dim3 blocks(input->size[0]*input->size[1]);
dim3 threads(SOFTMAX_THREADS);
cunn_SoftMax_updateOutput_kernel<<<blocks,threads,
0, THCState_getCurrentStream(state)>>>(THCudaTensor_data(state, output),
THCudaTensor_data(state, input),
input->size[0]*input->size[1], input->size[2]);
}
else
THError("vector or matrix expected");
cudaError errcode = cudaGetLastError();
if(errcode != cudaSuccess)
THError(cudaGetErrorString(errcode));
THCudaTensor_free(state, input);
return 1;
}
struct softmaxupdateGradInput_functor
{
float value;
softmaxupdateGradInput_functor(float value_) : value(value_) {}
__host__ __device__ float operator()(const float& output, const float& gradOutput) const
{
return gradOutput - exp(output)*value;
}
};
static int cunn_SoftMax_updateGradInput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *gradOutput = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor");
THCudaTensor *output = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor");
THCudaTensor *gradInput = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor");
THAssert(THCudaTensor_checkGPU(state, 3, output, gradOutput, gradInput));
output = THCudaTensor_newContiguous(state, output);
gradOutput = THCudaTensor_newContiguous(state, gradOutput);
THCudaTensor_resizeAs(state, gradInput, output);
if(gradInput->nDimension == 1)
{
dim3 blocks(1);
dim3 threads(SOFTMAX_THREADS);
cunn_SoftMax_updateGradInput_kernel<<<blocks,threads,
0, THCState_getCurrentStream(state)>>>(THCudaTensor_data(state, gradInput),
THCudaTensor_data(state, output),
THCudaTensor_data(state, gradOutput),
1, gradInput->size[0]);
}
else if(gradInput->nDimension == 2)
{
dim3 blocks(gradInput->size[0]);
dim3 threads(SOFTMAX_THREADS);
cunn_SoftMax_updateGradInput_kernel<<<blocks,threads,
0, THCState_getCurrentStream(state)>>>(THCudaTensor_data(state, gradInput),
THCudaTensor_data(state, output),
THCudaTensor_data(state, gradOutput),
gradInput->size[0], gradInput->size[1]);
}
else if(gradInput->nDimension == 3)
{
dim3 blocks(gradInput->size[0]*gradInput->size[1]);
dim3 threads(SOFTMAX_THREADS);
cunn_SoftMax_updateGradInput_kernel<<<blocks,threads,
0, THCState_getCurrentStream(state)>>>(THCudaTensor_data(state, gradInput),
THCudaTensor_data(state, output),
THCudaTensor_data(state, gradOutput),
gradInput->size[0]*gradInput->size[1], gradInput->size[2]);
}
else
THError("vector or matrix expected");
cudaError errcode = cudaGetLastError();
if(errcode != cudaSuccess)
THError(cudaGetErrorString(errcode));
THCudaTensor_free(state, gradOutput);
THCudaTensor_free(state, output);
return 1;
}
static const struct luaL_Reg cunn_SoftMax__ [] = {
{"SoftMax_updateOutput", cunn_SoftMax_updateOutput},
{"SoftMax_updateGradInput", cunn_SoftMax_updateGradInput},
{NULL, NULL}
};
void cunn_SoftMax_init(lua_State *L)
{
luaT_pushmetatable(L, "torch.CudaTensor");
luaT_registeratname(L, cunn_SoftMax__, "nn");
lua_pop(L,1);
}
|
c39298115f8cb297bc7b1e7489ad0f9b562b1c0d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <math.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include "../../common/para.h"
#define TKK_NUM 19200
#define BTT_NUM (TKK_NUM/TK_NUM)
#define NUM_CHAN (TK_NUM * BTT_NUM)
double my_timer()
{
struct timeval time;
double _ret_val_0;
gettimeofday(( & time), 0);
_ret_val_0=(time.tv_sec+(time.tv_usec/1000000.0));
return _ret_val_0;
}
void BeamFirSetup(float *weight, float *buffer, int n);
void InputGenerate(float *input, int n);
__global__ void d_BeamFirFilter(int *len,
float *weight, float *buffer,
float *in, float *out, int *size, int *thread, int index);
void BeamFirFilter(int *len,
float *weight, float *buffer,
float *in, float *out, int *size);
int main(){
int i, j;
float **h_coarse_weight, **h_coarse_buffer;
float **d_coarse_weight, **d_coarse_buffer;
float **h_inputs, **h_predec, **h_postdec;
float **d_inputs, **d_predec, **d_postdec;
float **hh_postdec;
int *d_len[BTT_NUM];
int *d_num_thread;
int num_thread[NUM_CHAN];
int num_size[BTT_NUM];
int pos_task[BTT_NUM][TK_NUM];
int *pos_task_dev[BTT_NUM];
int len[BTT_NUM][TK_NUM];
FILE *f;
double start_timer, end_timer;
hipSetDevice(0);
setenv("CUDA_DEVICE_MAX_CONNECTIONS", "32", 1);
f = fopen("rand4.txt", "r");
for(i = 0; i < NUM_CHAN; i++)
fscanf(f, "%1d", &num_thread[i]);
fclose(f);
for(i = 0; i < BTT_NUM; i++){
num_size[i] = 0;
//printf("num_size:%d\n", num_size[i]);
}
for(i = 0; i < BTT_NUM; i++){
for(j = 0; j < TK_NUM; j++){
num_size[i] += (num_thread[i*TK_NUM+j] * 16)*
(num_thread[i*TK_NUM+j] * 16);
len[i][j] = (num_thread[i*TK_NUM+j] * 16)*
(num_thread[i*TK_NUM+j] * 16);
pos_task[i][j] = 0;
if(j > 0) pos_task[i][j] += pos_task[i][j-1] + (num_thread[i*TK_NUM+j-1] * 16)*
(num_thread[i*TK_NUM+j-1] * 16);
}
}
for(i = 0; i < NUM_CHAN; i++)
num_thread[i] *= 32;
d_coarse_weight = (float**)malloc(BTT_NUM * sizeof(float *));
d_coarse_buffer = (float**)malloc(BTT_NUM * sizeof(float *));
h_coarse_weight = (float**)malloc(BTT_NUM * sizeof(float *));
h_coarse_buffer = (float**)malloc(BTT_NUM * sizeof(float *));
h_inputs = (float**)malloc(BTT_NUM * sizeof(float *));
h_predec = (float**)malloc(BTT_NUM * sizeof(float *));
h_postdec = (float**)malloc(BTT_NUM * sizeof(float *));
d_inputs = (float**)malloc(BTT_NUM * sizeof(float *));
d_predec = (float**)malloc(BTT_NUM * sizeof(float *));
d_postdec = (float**)malloc(BTT_NUM * sizeof(float *));
hh_postdec = (float**)malloc(BTT_NUM * sizeof(float *));
for(i = 0; i < BTT_NUM; i++){
checkCudaErrors(hipHostMalloc(&h_inputs[i], 2*num_size[i]*sizeof(float), hipHostMallocDefault));
checkCudaErrors(hipHostMalloc(&h_postdec[i], 2*num_size[i]*sizeof(float), hipHostMallocDefault));
checkCudaErrors(hipHostMalloc(&h_coarse_weight[i], 2*num_size[i]*sizeof(float), hipHostMallocDefault));
checkCudaErrors(hipHostMalloc(&h_coarse_buffer[i], 2*num_size[i]*sizeof(float), hipHostMallocDefault));
checkCudaErrors(hipMalloc(&d_inputs[i], 2* num_size[i]*sizeof(float)));
checkCudaErrors(hipMalloc(&d_predec[i], 2* num_size[i]*sizeof(float)));
checkCudaErrors(hipMalloc(&d_postdec[i], 2* num_size[i]*sizeof(float)));
checkCudaErrors(hipMalloc(&d_coarse_weight[i], 2* num_size[i]*sizeof(float)));
checkCudaErrors(hipMalloc(&d_coarse_buffer[i], 2* num_size[i]*sizeof(float)));
h_predec[i] = (float*)malloc(2*num_size[i]*sizeof(float));
hh_postdec[i] = (float*)malloc(2*num_size[i]*sizeof(float));
checkCudaErrors(hipMalloc(&d_len[i], TK_NUM*sizeof(int)));
checkCudaErrors(hipMalloc(&pos_task_dev[i], TK_NUM*sizeof(int)));
}
checkCudaErrors(hipMalloc(&d_num_thread, NUM_CHAN*sizeof(int)));
printf("Inputs are generating\n");
// init data
for(i = 0; i < BTT_NUM; i++){
BeamFirSetup(h_coarse_weight[i], h_coarse_buffer[i], num_size[i]);
InputGenerate(h_inputs[i], num_size[i]);
}
// input transfer
start_timer = my_timer();
for(i = 0; i < BTT_NUM; i++){
checkCudaErrors(hipMemcpy(d_inputs[i], h_inputs[i], 2*num_size[i]*sizeof(float), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_coarse_weight[i], h_coarse_weight[i], 2*num_size[i]*sizeof(float), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_coarse_buffer[i], h_coarse_buffer[i], 2*num_size[i]*sizeof(float), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_len[i], len[i], TK_NUM*sizeof(int), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(pos_task_dev[i], pos_task[i], TK_NUM*sizeof(int), hipMemcpyHostToDevice));
}
checkCudaErrors(hipMemcpy(d_num_thread, num_thread, NUM_CHAN*sizeof(int), hipMemcpyHostToDevice));
checkCudaErrors(hipDeviceSynchronize());
printf("GPU program is running\n");
// task running
start_timer = my_timer();
for(i = 0; i < BTT_NUM; i++){
hipLaunchKernelGGL(( d_BeamFirFilter), dim3(TK_NUM), dim3(TDK_NUM), 0, 0, d_len[i],
d_coarse_weight[i], d_coarse_buffer[i],
d_inputs[i], d_predec[i], pos_task_dev[i], d_num_thread, i);
}
checkCudaErrors(hipDeviceSynchronize());
for(i = 0; i < BTT_NUM; i++){
hipLaunchKernelGGL(( d_BeamFirFilter), dim3(TK_NUM), dim3(TDK_NUM), 0, 0, d_len[i],
d_coarse_weight[i], d_coarse_buffer[i],
d_predec[i], d_postdec[i], pos_task_dev[i], d_num_thread, i);
}
checkCudaErrors(hipDeviceSynchronize());
end_timer = my_timer();
printf("Beamformer CUDA static fusion elapsed Time: %lf sec.\n", end_timer - start_timer);
// copy back
start_timer = my_timer();
for (i = 0; i < BTT_NUM; i++) {
checkCudaErrors(hipMemcpyAsync(h_postdec[i], d_postdec[i], 2*num_size[i]*sizeof(float), hipMemcpyDeviceToHost));
}
checkCudaErrors(hipDeviceSynchronize());
#if 0
//host task running
start_timer = my_timer();
for(i = 0; i < BTT_NUM; i++){
BeamFirFilter(len[i],
h_coarse_weight[i], h_coarse_buffer[i],
h_inputs[i], h_predec[i], pos_task[i]);
}
for(i = 0; i < BTT_NUM; i++){
BeamFirFilter(len[i],
h_coarse_weight[i], h_coarse_buffer[i],
h_predec[i], hh_postdec[i], pos_task[i]);
}
end_timer = my_timer();
printf("CPU exec. time:%lf\n", end_timer - start_timer);
//verifiy
for(i = 0; i < 1; i++){
for(j = 0; j < num_size[i]; j++){
if(abs(h_postdec[i][j] - hh_postdec[i][j]) > 0.1){
printf("Error:%f, %f, %d, %d\n", h_postdec[i][j], hh_postdec[i][j], i, j);
break;
}
}
}
#endif
//free mem
for(i = 0; i < BTT_NUM; i++){
checkCudaErrors(hipHostFree(h_inputs[i]));
checkCudaErrors(hipHostFree(h_postdec[i]));
checkCudaErrors(hipHostFree(h_coarse_weight[i]));
checkCudaErrors(hipHostFree(h_coarse_buffer[i]));
checkCudaErrors(hipFree(d_inputs[i]));
checkCudaErrors(hipFree(d_predec[i]));
checkCudaErrors(hipFree(d_postdec[i]));
checkCudaErrors(hipFree(d_coarse_weight[i]));
checkCudaErrors(hipFree(d_coarse_buffer[i]));
free(h_predec[i]);
free(hh_postdec[i]);
checkCudaErrors(hipFree(d_len[i]));
checkCudaErrors(hipFree(pos_task_dev[i]));
}
free(d_coarse_weight);
free(d_coarse_buffer);
free(h_coarse_weight);
free(h_coarse_buffer);
free(h_inputs);
free(h_predec);
free(h_postdec);
free(d_inputs);
free(d_predec);
free(d_postdec);
free(hh_postdec);
checkCudaErrors(hipFree(d_num_thread));
return 0;
}
void BeamFirSetup(float *weight, float *buffer, int n){
int i;
for(i = 0; i < n; i++){
int idx = i + 1;
//weight[i*2] = sin(idx) / ((float)idx);
//weight[i*2+1] = cos(idx) / ((float)idx);
weight[i*2] = 0.001;
weight[i*2+1] = 0.002;
buffer[i*2] = 0.0;
buffer[i*2+1] = 0.0;
}
}
void InputGenerate(float *input, int n){
int i;
for(i = 0; i < n; i++){
//input[2*i] = sqrt(i);
//input[2*i+1] = sqrt(i) + 1;
input[2*i] = 0.01;
input[2*i+1] = 0.02;
}
}
void BeamFirFilter(int *len,
float *weight, float *buffer,
float *in, float *out, int *size)
{
/* Input must be exactly 2*decimation_ratio long; output must be
* exactly 2 long. */
int i, j, t;
int modPos;
int mask, mask2;
for(t = 0; t < TK_NUM; t++){
mask = len[t] - 1;
mask2 = 2 * len[t] - 1;
for(j = 0; j < len[t]; j++){
float real_curr = 0;
float imag_curr = 0;
modPos = 2*(len[t] - 1 - (j & mask));
buffer[modPos + size[t]] = in[j * 2 + size[t]];
buffer[modPos+1 + size[t]] = in[j * 2 + 1 + size[t]];
/* Profiling says: this is the single inner loop that matters! */
for (i = 0; i < 2*len[t]; i+=2) {
float rd = buffer[modPos+size[t]];
float id = buffer[modPos+1+size[t]];
float rw = weight[i+size[t]];
float iw = weight[i+1+size[t]];
float rci = rd * rw + id * iw;
/* sign error? this is consistent with StreamIt --dzm */
float ici = id * rw + rd * iw;
#if 1
real_curr += rci;
imag_curr += ici;
#endif
modPos = (modPos + 2) & mask2;
}
out[j * 2+size[t]] = real_curr;
out[j * 2 + 1+size[t]] = imag_curr;
}
}
}
__global__ void d_BeamFirFilter(int *len,
float *weight, float *buffer,
float *in, float *out, int *size, int *thread, int index)
{
/* Input must be exactly 2*decimation_ratio long; output must be
* exactly 2 long. */
int tid = threadIdx.x;
int i, j;
int modPos;
int mask, mask2;
int bk = blockIdx.x;
int td;
td = thread[index*TK_NUM+bk];
mask = len[bk] - 1;
mask2 = 2 * len[bk] - 1;
//for(k = 0; k < TD_NUM; k++){
if(tid < td){
for(j = 0; j < (len[bk]/td); j++){
float real_curr = 0;
float imag_curr = 0;
modPos = 2*(len[bk] - 1 - ((j*td+tid) & mask));
buffer[modPos + size[bk]] = in[(j*td+tid) * 2 + size[bk]];
buffer[modPos+1 + size[bk]] = in[(j*td+tid)* 2 + 1 + size[bk]];
/* Profiling says: this is the single inner loop that matters! */
for (i = 0; i < 2*len[bk]; i+=2) {
float rd = buffer[modPos + size[bk]];
float id = buffer[modPos+1 + size[bk]];
float rw = weight[i + size[bk]];
float iw = weight[i+1 + size[bk]];
float rci = rd * rw + id * iw;
/* sign error? this is consistent with StreamIt --dzm */
float ici = id * rw + rd * iw;
real_curr += rci;
imag_curr += ici;
modPos = (modPos + 2) & mask2;
}
//out[(j*td+tid) * 2 + size[bk]] = bk;
//out[(j*td+tid) * 2 + 1 + size[bk]] = 1.0;
out[(j*td+tid) * 2 + size[bk]] = real_curr;
out[(j*td+tid) * 2 + 1 + size[bk]] = imag_curr;
}
}
}
| c39298115f8cb297bc7b1e7489ad0f9b562b1c0d.cu | #include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <math.h>
#include <sys/time.h>
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include "../../common/para.h"
#define TKK_NUM 19200
#define BTT_NUM (TKK_NUM/TK_NUM)
#define NUM_CHAN (TK_NUM * BTT_NUM)
double my_timer()
{
struct timeval time;
double _ret_val_0;
gettimeofday(( & time), 0);
_ret_val_0=(time.tv_sec+(time.tv_usec/1000000.0));
return _ret_val_0;
}
void BeamFirSetup(float *weight, float *buffer, int n);
void InputGenerate(float *input, int n);
__global__ void d_BeamFirFilter(int *len,
float *weight, float *buffer,
float *in, float *out, int *size, int *thread, int index);
void BeamFirFilter(int *len,
float *weight, float *buffer,
float *in, float *out, int *size);
int main(){
int i, j;
float **h_coarse_weight, **h_coarse_buffer;
float **d_coarse_weight, **d_coarse_buffer;
float **h_inputs, **h_predec, **h_postdec;
float **d_inputs, **d_predec, **d_postdec;
float **hh_postdec;
int *d_len[BTT_NUM];
int *d_num_thread;
int num_thread[NUM_CHAN];
int num_size[BTT_NUM];
int pos_task[BTT_NUM][TK_NUM];
int *pos_task_dev[BTT_NUM];
int len[BTT_NUM][TK_NUM];
FILE *f;
double start_timer, end_timer;
cudaSetDevice(0);
setenv("CUDA_DEVICE_MAX_CONNECTIONS", "32", 1);
f = fopen("rand4.txt", "r");
for(i = 0; i < NUM_CHAN; i++)
fscanf(f, "%1d", &num_thread[i]);
fclose(f);
for(i = 0; i < BTT_NUM; i++){
num_size[i] = 0;
//printf("num_size:%d\n", num_size[i]);
}
for(i = 0; i < BTT_NUM; i++){
for(j = 0; j < TK_NUM; j++){
num_size[i] += (num_thread[i*TK_NUM+j] * 16)*
(num_thread[i*TK_NUM+j] * 16);
len[i][j] = (num_thread[i*TK_NUM+j] * 16)*
(num_thread[i*TK_NUM+j] * 16);
pos_task[i][j] = 0;
if(j > 0) pos_task[i][j] += pos_task[i][j-1] + (num_thread[i*TK_NUM+j-1] * 16)*
(num_thread[i*TK_NUM+j-1] * 16);
}
}
for(i = 0; i < NUM_CHAN; i++)
num_thread[i] *= 32;
d_coarse_weight = (float**)malloc(BTT_NUM * sizeof(float *));
d_coarse_buffer = (float**)malloc(BTT_NUM * sizeof(float *));
h_coarse_weight = (float**)malloc(BTT_NUM * sizeof(float *));
h_coarse_buffer = (float**)malloc(BTT_NUM * sizeof(float *));
h_inputs = (float**)malloc(BTT_NUM * sizeof(float *));
h_predec = (float**)malloc(BTT_NUM * sizeof(float *));
h_postdec = (float**)malloc(BTT_NUM * sizeof(float *));
d_inputs = (float**)malloc(BTT_NUM * sizeof(float *));
d_predec = (float**)malloc(BTT_NUM * sizeof(float *));
d_postdec = (float**)malloc(BTT_NUM * sizeof(float *));
hh_postdec = (float**)malloc(BTT_NUM * sizeof(float *));
for(i = 0; i < BTT_NUM; i++){
checkCudaErrors(cudaHostAlloc(&h_inputs[i], 2*num_size[i]*sizeof(float), cudaHostAllocDefault));
checkCudaErrors(cudaHostAlloc(&h_postdec[i], 2*num_size[i]*sizeof(float), cudaHostAllocDefault));
checkCudaErrors(cudaHostAlloc(&h_coarse_weight[i], 2*num_size[i]*sizeof(float), cudaHostAllocDefault));
checkCudaErrors(cudaHostAlloc(&h_coarse_buffer[i], 2*num_size[i]*sizeof(float), cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&d_inputs[i], 2* num_size[i]*sizeof(float)));
checkCudaErrors(cudaMalloc(&d_predec[i], 2* num_size[i]*sizeof(float)));
checkCudaErrors(cudaMalloc(&d_postdec[i], 2* num_size[i]*sizeof(float)));
checkCudaErrors(cudaMalloc(&d_coarse_weight[i], 2* num_size[i]*sizeof(float)));
checkCudaErrors(cudaMalloc(&d_coarse_buffer[i], 2* num_size[i]*sizeof(float)));
h_predec[i] = (float*)malloc(2*num_size[i]*sizeof(float));
hh_postdec[i] = (float*)malloc(2*num_size[i]*sizeof(float));
checkCudaErrors(cudaMalloc(&d_len[i], TK_NUM*sizeof(int)));
checkCudaErrors(cudaMalloc(&pos_task_dev[i], TK_NUM*sizeof(int)));
}
checkCudaErrors(cudaMalloc(&d_num_thread, NUM_CHAN*sizeof(int)));
printf("Inputs are generating\n");
// init data
for(i = 0; i < BTT_NUM; i++){
BeamFirSetup(h_coarse_weight[i], h_coarse_buffer[i], num_size[i]);
InputGenerate(h_inputs[i], num_size[i]);
}
// input transfer
start_timer = my_timer();
for(i = 0; i < BTT_NUM; i++){
checkCudaErrors(cudaMemcpy(d_inputs[i], h_inputs[i], 2*num_size[i]*sizeof(float), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_coarse_weight[i], h_coarse_weight[i], 2*num_size[i]*sizeof(float), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_coarse_buffer[i], h_coarse_buffer[i], 2*num_size[i]*sizeof(float), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_len[i], len[i], TK_NUM*sizeof(int), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(pos_task_dev[i], pos_task[i], TK_NUM*sizeof(int), cudaMemcpyHostToDevice));
}
checkCudaErrors(cudaMemcpy(d_num_thread, num_thread, NUM_CHAN*sizeof(int), cudaMemcpyHostToDevice));
checkCudaErrors(cudaDeviceSynchronize());
printf("GPU program is running\n");
// task running
start_timer = my_timer();
for(i = 0; i < BTT_NUM; i++){
d_BeamFirFilter<<<TK_NUM, TDK_NUM>>>(d_len[i],
d_coarse_weight[i], d_coarse_buffer[i],
d_inputs[i], d_predec[i], pos_task_dev[i], d_num_thread, i);
}
checkCudaErrors(cudaDeviceSynchronize());
for(i = 0; i < BTT_NUM; i++){
d_BeamFirFilter<<<TK_NUM, TDK_NUM>>>(d_len[i],
d_coarse_weight[i], d_coarse_buffer[i],
d_predec[i], d_postdec[i], pos_task_dev[i], d_num_thread, i);
}
checkCudaErrors(cudaDeviceSynchronize());
end_timer = my_timer();
printf("Beamformer CUDA static fusion elapsed Time: %lf sec.\n", end_timer - start_timer);
// copy back
start_timer = my_timer();
for (i = 0; i < BTT_NUM; i++) {
checkCudaErrors(cudaMemcpyAsync(h_postdec[i], d_postdec[i], 2*num_size[i]*sizeof(float), cudaMemcpyDeviceToHost));
}
checkCudaErrors(cudaDeviceSynchronize());
#if 0
//host task running
start_timer = my_timer();
for(i = 0; i < BTT_NUM; i++){
BeamFirFilter(len[i],
h_coarse_weight[i], h_coarse_buffer[i],
h_inputs[i], h_predec[i], pos_task[i]);
}
for(i = 0; i < BTT_NUM; i++){
BeamFirFilter(len[i],
h_coarse_weight[i], h_coarse_buffer[i],
h_predec[i], hh_postdec[i], pos_task[i]);
}
end_timer = my_timer();
printf("CPU exec. time:%lf\n", end_timer - start_timer);
//verifiy
for(i = 0; i < 1; i++){
for(j = 0; j < num_size[i]; j++){
if(abs(h_postdec[i][j] - hh_postdec[i][j]) > 0.1){
printf("Error:%f, %f, %d, %d\n", h_postdec[i][j], hh_postdec[i][j], i, j);
break;
}
}
}
#endif
//free mem
for(i = 0; i < BTT_NUM; i++){
checkCudaErrors(cudaFreeHost(h_inputs[i]));
checkCudaErrors(cudaFreeHost(h_postdec[i]));
checkCudaErrors(cudaFreeHost(h_coarse_weight[i]));
checkCudaErrors(cudaFreeHost(h_coarse_buffer[i]));
checkCudaErrors(cudaFree(d_inputs[i]));
checkCudaErrors(cudaFree(d_predec[i]));
checkCudaErrors(cudaFree(d_postdec[i]));
checkCudaErrors(cudaFree(d_coarse_weight[i]));
checkCudaErrors(cudaFree(d_coarse_buffer[i]));
free(h_predec[i]);
free(hh_postdec[i]);
checkCudaErrors(cudaFree(d_len[i]));
checkCudaErrors(cudaFree(pos_task_dev[i]));
}
free(d_coarse_weight);
free(d_coarse_buffer);
free(h_coarse_weight);
free(h_coarse_buffer);
free(h_inputs);
free(h_predec);
free(h_postdec);
free(d_inputs);
free(d_predec);
free(d_postdec);
free(hh_postdec);
checkCudaErrors(cudaFree(d_num_thread));
return 0;
}
void BeamFirSetup(float *weight, float *buffer, int n){
int i;
for(i = 0; i < n; i++){
int idx = i + 1;
//weight[i*2] = sin(idx) / ((float)idx);
//weight[i*2+1] = cos(idx) / ((float)idx);
weight[i*2] = 0.001;
weight[i*2+1] = 0.002;
buffer[i*2] = 0.0;
buffer[i*2+1] = 0.0;
}
}
void InputGenerate(float *input, int n){
int i;
for(i = 0; i < n; i++){
//input[2*i] = sqrt(i);
//input[2*i+1] = sqrt(i) + 1;
input[2*i] = 0.01;
input[2*i+1] = 0.02;
}
}
void BeamFirFilter(int *len,
float *weight, float *buffer,
float *in, float *out, int *size)
{
/* Input must be exactly 2*decimation_ratio long; output must be
* exactly 2 long. */
int i, j, t;
int modPos;
int mask, mask2;
for(t = 0; t < TK_NUM; t++){
mask = len[t] - 1;
mask2 = 2 * len[t] - 1;
for(j = 0; j < len[t]; j++){
float real_curr = 0;
float imag_curr = 0;
modPos = 2*(len[t] - 1 - (j & mask));
buffer[modPos + size[t]] = in[j * 2 + size[t]];
buffer[modPos+1 + size[t]] = in[j * 2 + 1 + size[t]];
/* Profiling says: this is the single inner loop that matters! */
for (i = 0; i < 2*len[t]; i+=2) {
float rd = buffer[modPos+size[t]];
float id = buffer[modPos+1+size[t]];
float rw = weight[i+size[t]];
float iw = weight[i+1+size[t]];
float rci = rd * rw + id * iw;
/* sign error? this is consistent with StreamIt --dzm */
float ici = id * rw + rd * iw;
#if 1
real_curr += rci;
imag_curr += ici;
#endif
modPos = (modPos + 2) & mask2;
}
out[j * 2+size[t]] = real_curr;
out[j * 2 + 1+size[t]] = imag_curr;
}
}
}
__global__ void d_BeamFirFilter(int *len,
float *weight, float *buffer,
float *in, float *out, int *size, int *thread, int index)
{
/* Input must be exactly 2*decimation_ratio long; output must be
* exactly 2 long. */
int tid = threadIdx.x;
int i, j;
int modPos;
int mask, mask2;
int bk = blockIdx.x;
int td;
td = thread[index*TK_NUM+bk];
mask = len[bk] - 1;
mask2 = 2 * len[bk] - 1;
//for(k = 0; k < TD_NUM; k++){
if(tid < td){
for(j = 0; j < (len[bk]/td); j++){
float real_curr = 0;
float imag_curr = 0;
modPos = 2*(len[bk] - 1 - ((j*td+tid) & mask));
buffer[modPos + size[bk]] = in[(j*td+tid) * 2 + size[bk]];
buffer[modPos+1 + size[bk]] = in[(j*td+tid)* 2 + 1 + size[bk]];
/* Profiling says: this is the single inner loop that matters! */
for (i = 0; i < 2*len[bk]; i+=2) {
float rd = buffer[modPos + size[bk]];
float id = buffer[modPos+1 + size[bk]];
float rw = weight[i + size[bk]];
float iw = weight[i+1 + size[bk]];
float rci = rd * rw + id * iw;
/* sign error? this is consistent with StreamIt --dzm */
float ici = id * rw + rd * iw;
real_curr += rci;
imag_curr += ici;
modPos = (modPos + 2) & mask2;
}
//out[(j*td+tid) * 2 + size[bk]] = bk;
//out[(j*td+tid) * 2 + 1 + size[bk]] = 1.0;
out[(j*td+tid) * 2 + size[bk]] = real_curr;
out[(j*td+tid) * 2 + 1 + size[bk]] = imag_curr;
}
}
}
|
a2d09bc594c9c22019776fa136d1e6f5e2bf70a5.hip | // !!! This is a file automatically generated by hipify!!!
#include <algorithm>
#include <atomic>
#include <cstdlib>
#include <string>
#include <unordered_map>
#include "caffe2/core/THCCachingAllocator_gpu.h"
#include "hipcub/hipcub.hpp"
// Needed to be included first to check the CAFFE2_USE_CUDNN macros.
#include "caffe2/core/macros.h"
#include "caffe2/core/asan.h"
#include "caffe2/core/blob_stats.h"
#ifdef CAFFE2_USE_CUDNN
#include "caffe2/core/common_cudnn.h"
#endif // CAFFE2_USE_CUDNN
#include "caffe2/core/context_gpu.h"
#include "caffe2/core/init.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/tensor.h"
#include "caffe2/utils/string_utils.h"
C10_DEFINE_string(
caffe2_cuda_memory_pool,
"",
"Sets the memory pool used by caffe2. Possible values are "
"none, cnmem, thc and cub.");
// For description of CUB caching allocator configuration, see
// https://nvlabs.github.io/cub/structcub_1_1_caching_device_allocator.html
C10_DEFINE_int(
caffe2_cub_bin_growth,
8,
"If using cub as the memory allocator, sets the growth of bins "
"used by the cub pool.");
C10_DEFINE_int(
caffe2_cub_min_bin,
3,
"If using cub as the memory allocator, sets the min number of "
"bins.");
C10_DEFINE_int(
caffe2_cub_max_bin,
10,
"If using cub as the memory allocator, sets the max number of "
"bins.");
C10_DEFINE_int(
caffe2_cub_max_managed_mb,
10 * 1024,
"If using cub as the memory allocators, sets the maximum amount "
"of memory managed in gigabytes");
C10_DEFINE_bool(
caffe2_cub_print_allocation_events,
false,
"If true CachingDeviceAllocator will print allocation and deallocation "
"events to stdout.");
C10_DEFINE_bool(
caffe2_gpu_memory_tracking,
false,
"If set, logs changes in GPU memory allocations");
C10_DEFINE_int(
caffe2_gpu_memory_report_interval_mb,
128,
"The threshold in MB on how frequently to report memory changes");
namespace at {
REGISTER_CONTEXT(DeviceType::CUDA, caffe2::CUDAContext);
} // namespace at
namespace caffe2 {
// Generic implementation - CUDA will handle the right function to call for us
void CUDAContext::CopyBytesAsync(
size_t nbytes,
const void* src,
Device src_device,
void* dst,
Device dst_device) {
// TODO: verify that the CUDA handles copy from device to device correctly
// even without SetDevice()
// TODO: verify whether source or dest device should be a priority in picking
// the stream
// NB: right now the cross-device copy logic is invoked only in the contexts
// when surrounding code explicitly manages data dependencies and sets up
// events, so it's fine. In order to make it a standalone function proper
// synchronization between stream is required
int gpu_id = 0;
if (dst_device.type() == DeviceType::CUDA) {
gpu_id = dst_device.index();
} else if (src_device.type() == DeviceType::CUDA) {
gpu_id = src_device.index();
} else {
LOG(FATAL) << "shouldn't be called with non-cuda device";
}
CUDA_ENFORCE(hipMemcpyAsync(
dst,
src,
nbytes,
hipMemcpyDefault,
CUDAContext::getCudaObjects().GetStream(gpu_id)));
}
void CUDAContext::CopyBytesSync(
size_t nbytes,
const void* src,
Device src_device,
void* dst,
Device dst_device) {
// This emulates Caffe2 original behavior where sync copy doesn't change the
// device. It's probably better for clarity to switch to the target device
// explicitly here, but in the worst case CUDA would sync for us.
// TODO: change it to DeviceGuard
CUDAContext context(-1); // take current device
CUDA_ENFORCE(hipMemcpyAsync(
dst, src, nbytes, hipMemcpyDefault, context.cuda_stream()));
// destructor of context synchronizes
}
// For the CPU context, we also allow a (probably expensive) function
// to copy the data from a cuda context. Inside the function, we create
// a temporary CUDAContext object to carry out the copy. From the caller's
// side, these functions are synchronous with respect to the host, similar
// to a normal CPUContext::CopyBytes<CPUContext, CPUContext> call.
template <>
inline void CPUContext::CopyBytes<CUDAContext, CPUContext>(
size_t nbytes,
const void* src,
void* dst) {
CUDAContext context(GetGPUIDForPointer(src));
context.CopyBytes<CUDAContext, CPUContext>(nbytes, src, dst);
}
template <>
inline void CPUContext::CopyBytes<CPUContext, CUDAContext>(
size_t nbytes,
const void* src,
void* dst) {
CUDAContext context(GetGPUIDForPointer(dst));
context.CopyBytes<CPUContext, CUDAContext>(nbytes, src, dst);
}
} // namespace caffe2
namespace caffe2 {
ThreadLocalCUDAObjects& CUDAContext::getCudaObjects() {
static thread_local ThreadLocalCUDAObjects cuda_objects_;
return cuda_objects_;
}
// TODO(jiayq): these variables shouldn't be currently accessed during static
// initialization. We should consider moving them to a Mayer's singleton to
// be totally safe against SIOF.
// Static global variables for setting up the memory pool.
CudaMemoryPoolType g_cuda_memory_pool_type;
std::unique_ptr<hipcub::CachingDeviceAllocator> g_cub_allocator;
std::unique_ptr<THCCachingAllocator> g_thc_allocator;
// an unordered map that holds the map from the cuda memory pointer to the
// device id that it is allocated from. This is used in the cuda memory pool
// cases, where we need the device id to carry out the deletion.
// Note(jiayq): an alternate approach is to use cudaGetPointerAttributes, but
// that is usually quite slow. We might want to benchmark the speed difference
// though.
// Note(jiayq): another alternate approach is to augment the Tensor class that
// would allow one to record the device id. However, this does not address any
// non-tensor allocation and deallocation.
// Ideally, a memory pool should already have the device id information, as
// long as we are using UVA (as of CUDA 5 and later) so the addresses are
// unique.
static std::unordered_map<void*, uint8_t> g_cuda_device_affiliation;
// Data structures for optional memory tracking. Access to these structures
// is garded by the CUDAContext::mutex.
static std::unordered_map<void*, long> g_size_map;
static std::vector<long> g_total_by_gpu_map(CAFFE2_COMPILE_TIME_MAX_GPUS, 0);
static std::vector<long> g_max_by_gpu_map(CAFFE2_COMPILE_TIME_MAX_GPUS, 0);
static long g_total_mem = 0;
static long g_last_rep = 0;
CudaMemoryPoolType GetCudaMemoryPoolType() {
return g_cuda_memory_pool_type;
}
///////////////////////////////////////////////////////////////////////////////
// A wrapper to allow us to lazily initialize all cuda environments that Caffe
// uses. This gets done the first time a caffe2::CUDAContext::New() gets called
// which is probably the decisive indication that this caffe2 run is going to
// use GPUs. We avoid cuda initialization with core/init.h functionalities so
// that we have minimal resource impact in case we will need to run multiple
// caffe2 instances on a GPU machine.
///////////////////////////////////////////////////////////////////////////////
static void Caffe2InitializeCuda() {
// If the current run does not have any cuda devices, do nothing.
if (!HasCudaGPU()) {
VLOG(1) << "No cuda gpu present. Skipping.";
return;
}
// Check if the number of GPUs matches the expected compile-time max number
// of GPUs.
CAFFE_ENFORCE_LE(
NumCudaDevices(),
CAFFE2_COMPILE_TIME_MAX_GPUS,
"Number of CUDA devices on the machine is larger than the compiled "
"max number of gpus expected (",
CAFFE2_COMPILE_TIME_MAX_GPUS,
"). Increase that and recompile the caffe binary.");
for (int i = 0; i < NumCudaDevices(); ++i) {
DeviceGuard g(i);
// Enable peer access.
const int peer_group = i / CAFFE2_CUDA_MAX_PEER_SIZE;
const int peer_start = peer_group * CAFFE2_CUDA_MAX_PEER_SIZE;
const int peer_end = ::min(
NumCudaDevices(), (peer_group + 1) * CAFFE2_CUDA_MAX_PEER_SIZE);
VLOG(1) << "Enabling peer access within group #" << peer_group
<< ", from gpuid " << peer_start << " to " << peer_end - 1
<< ", for gpuid " << i << ".";
for (int j = peer_start; j < peer_end; ++j) {
if (i == j) continue;
int can_access;
CUDA_ENFORCE(hipDeviceCanAccessPeer(&can_access, i, j));
if (can_access) {
VLOG(1) << "Enabling peer access from " << i << " to " << j;
// Note: just for future reference, the 0 here is not a gpu id, it is
// a reserved flag for hipDeviceEnablePeerAccess that should always be
// zero currently.
CUDA_ENFORCE(hipDeviceEnablePeerAccess(j, 0));
}
}
}
#ifdef CAFFE2_USE_CUDNN
// Check the versions of cuDNN that were compiled and linked with are compatible
CheckCuDNNVersions();
#endif // CAFFE2_USE_CUDNN
}
static void SetUpCub() {
VLOG(1) << "Setting up cub memory pool.";
// Sets up the cub memory pool
try {
g_cub_allocator.reset(new hipcub::CachingDeviceAllocator(
FLAGS_caffe2_cub_bin_growth,
FLAGS_caffe2_cub_min_bin,
FLAGS_caffe2_cub_max_bin,
size_t(FLAGS_caffe2_cub_max_managed_mb) * 1024L * 1024L,
false,
FLAGS_caffe2_cub_print_allocation_events));
} catch (...) {
CAFFE_THROW("Some error happened at cub initialization.");
}
VLOG(1) << "Done setting up cub memory pool.";
}
static void Caffe2SetCUDAMemoryPool() {
if (FLAGS_caffe2_cuda_memory_pool == "" ||
FLAGS_caffe2_cuda_memory_pool == "none") {
g_cuda_memory_pool_type = CudaMemoryPoolType::NONE;
} else if (FLAGS_caffe2_cuda_memory_pool == "cnmem") {
CAFFE_THROW("CNMEM is no longer used by Caffe2. Use cub instead. "
"This error message may go away in the future.");
} else if (FLAGS_caffe2_cuda_memory_pool == "cub") {
// Sets up cub.
g_cuda_memory_pool_type = CudaMemoryPoolType::CUB;
SetUpCub();
} else if (FLAGS_caffe2_cuda_memory_pool == "thc") {
g_cuda_memory_pool_type = CudaMemoryPoolType::THC;
g_thc_allocator.reset(new THCCachingAllocator());
} else {
CAFFE_THROW(
"Unrecognized cuda memory pool type: ", FLAGS_caffe2_cuda_memory_pool);
}
}
static PinnedCPUAllocator g_pinned_cpu_alloc;
// An initialization function that sets the CPU side to use pinned cpu
// allocator.
void Caffe2UsePinnedCPUAllocator() {
#if CAFFE2_ASAN_ENABLED
// Note(jiayq): for more details, see
// https://github.com/google/sanitizers/issues/629
LOG(WARNING) << "There are known issues between address sanitizer and "
"hipHostMalloc. As a result, caffe2 will not enable pinned "
"memory allocation in asan mode. If you are expecting any "
"behavior that depends on asan, be advised that it is not "
"turned on.";
#else
if (!HasCudaGPU()) {
VLOG(1) << "No GPU present. I won't use pinned allocator then.";
return;
}
VLOG(1) << "Caffe2 gpu: setting CPUAllocator to PinnedCPUAllocator.";
SetCPUAllocator(&g_pinned_cpu_alloc);
#endif
}
// Caffe2CudaInitializerHelper is a minimal struct whose sole purpose is to
// detect the first hint that this Caffe2 run is going to use GPU: either
// CUDAContext is initialized or CUDAContext::New is called. It then runs
// all the related cuda initialization functions.
namespace {
struct Caffe2CudaInitializerHelper {
Caffe2CudaInitializerHelper() {
// We cannot use bool because nvcc changes bool to __nv_bool which does
// not have a std::atomic instantiation.
static std::atomic<char> first_call(1);
if (first_call.fetch_and((char)0)) {
Caffe2InitializeCuda();
Caffe2SetCUDAMemoryPool();
Caffe2UsePinnedCPUAllocator();
}
}
};
} // namespace
/**
* A utility function to rectify the gpu id. If the context specifies the
* gpu id to be -1, it means that we will just use the current gpu id when
* the function is being called.
*/
static inline int RectifyGPUID(const int gpu_id) {
return gpu_id == -1 ? CaffeCudaGetDevice() : gpu_id;
}
CUDAContext::CUDAContext(const int gpu_id)
: gpu_id_(RectifyGPUID(gpu_id)), random_seed_(RandomNumberSeed()) {
static Caffe2CudaInitializerHelper g_cuda_initializer_;
}
CUDAContext::CUDAContext(const DeviceOption& option)
: gpu_id_(
option.has_device_id() ? RectifyGPUID(option.device_id())
: CaffeCudaGetDevice()),
random_seed_(
option.has_random_seed() ? option.random_seed()
: RandomNumberSeed()) {
static Caffe2CudaInitializerHelper g_cuda_initializer_;
DCHECK_EQ(option.device_type(), PROTO_CUDA);
}
// shared mutex to lock out alloc / free during NCCL launches
std::mutex& CUDAContext::mutex() {
static std::mutex m;
return m;
}
std::vector<long> CUDAContext::TotalMemoryByGpu() {
std::lock_guard<std::mutex> lock(CUDAContext::mutex());
CAFFE_ENFORCE(
FLAGS_caffe2_gpu_memory_tracking,
"Pass --caffe2_gpu_memory_tracking to enable memory stats");
return g_total_by_gpu_map;
}
std::vector<long> CUDAContext::MaxMemoryByGpu() {
std::lock_guard<std::mutex> lock(CUDAContext::mutex());
CAFFE_ENFORCE(
FLAGS_caffe2_gpu_memory_tracking,
"Pass --caffe2_gpu_memory_tracking to enable memory stats");
return g_max_by_gpu_map;
}
namespace {
void TrackMemoryAlloc(size_t nbytes) {
int this_gpu = CaffeCudaGetDevice();
g_total_by_gpu_map[this_gpu] += nbytes;
g_max_by_gpu_map[this_gpu] =
max(g_max_by_gpu_map[this_gpu], g_total_by_gpu_map[this_gpu]);
g_total_mem += nbytes;
if (g_total_mem - g_last_rep >
FLAGS_caffe2_gpu_memory_report_interval_mb * 1024 * 1024) {
for (int gpu = 0; gpu < g_total_by_gpu_map.size(); gpu++) {
long t = g_total_by_gpu_map[gpu];
long max_t = g_max_by_gpu_map[gpu];
if (max_t > 0) {
if (max_t != t) {
LOG(INFO) << "GPU " << gpu << ": " << t / 1024 / 1024 << " MB"
<< " (max: " << max_t / 1024 / 1024 << " MB)";
} else {
LOG(INFO) << "GPU " << gpu << ": " << t / 1024 / 1024 << " MB";
}
}
}
LOG(INFO) << "Total: " << g_total_mem / 1024 / 1024 << " MB";
g_last_rep = g_total_mem;
}
}
}
struct DefaultCUDAAllocator final : public at::Allocator {
DefaultCUDAAllocator() {}
~DefaultCUDAAllocator() override {}
at::DataPtr allocate(size_t nbytes) const override {
// Lock the mutex
std::lock_guard<std::mutex> lock(CUDAContext::mutex());
// A one-time caffe2 cuda initializer.
static Caffe2CudaInitializerHelper g_cuda_initializer_;
void* ptr = nullptr;
if (FLAGS_caffe2_gpu_memory_tracking) {
TrackMemoryAlloc(nbytes);
}
switch (g_cuda_memory_pool_type) {
case CudaMemoryPoolType::NONE:
CUDA_ENFORCE(hipMalloc(&ptr, nbytes));
if (FLAGS_caffe2_gpu_memory_tracking) {
g_size_map[ptr] = nbytes;
g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice();
}
return {ptr, ptr, &Delete, at::Device(CUDA, CaffeCudaGetDevice())};
case CudaMemoryPoolType::CUB:
CUDA_ENFORCE(g_cub_allocator->DeviceAllocate(&ptr, nbytes));
g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice();
VLOG(2) << "CUB allocating pointer " << ptr << " on device "
<< CaffeCudaGetDevice();
if (FLAGS_caffe2_gpu_memory_tracking) {
g_size_map[ptr] = nbytes;
}
return {ptr, ptr, &Delete, at::Device(CUDA, CaffeCudaGetDevice())};
case CudaMemoryPoolType::THC:
CUDA_ENFORCE(g_thc_allocator->Alloc(&ptr, nbytes, 0 /* stream */));
if (FLAGS_caffe2_gpu_memory_tracking) {
g_size_map[ptr] = nbytes;
g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice();
}
return {ptr, ptr, &Delete, at::Device(CUDA, CaffeCudaGetDevice())};
}
return {nullptr, nullptr, &Delete, at::Device(CUDA, CaffeCudaGetDevice())};
}
at::DeleterFnPtr raw_deleter() const override {
return &Delete;
}
private:
static void Delete(void* ptr) {
// lock the mutex
std::lock_guard<std::mutex> lock(CUDAContext::mutex());
if (FLAGS_caffe2_gpu_memory_tracking) {
auto sz_it = g_size_map.find(ptr);
DCHECK(sz_it != g_size_map.end());
auto aff_it = g_cuda_device_affiliation.find(ptr);
DCHECK(aff_it != g_cuda_device_affiliation.end());
g_total_mem -= sz_it->second;
g_total_by_gpu_map[aff_it->second] -= sz_it->second;
g_size_map.erase(sz_it);
}
switch (g_cuda_memory_pool_type) {
case CudaMemoryPoolType::NONE: {
// If memory pool is not set up, use simple hipFree.
hipError_t error = hipFree(ptr);
// For some reason, in Python runtime we sometimes delete a data pointer
// after the cuda runtime exits - this is odd but is probably caused by
// a static workspace that pycaffe2 uses, and the destruction got
// entangled in some race condition. Anyway, since cuda runtime is
// exiting anyway, we will not need to worry about memory leak, so we
// basically ignore it. This is definitely not ideal but works for now.
if (error != hipSuccess && error != hipErrorDeinitialized) {
LOG(FATAL) << "Error at: " << __FILE__ << ":" << __LINE__ << ": "
<< hipGetErrorString(error);
}
if (FLAGS_caffe2_gpu_memory_tracking) {
g_cuda_device_affiliation.erase(g_cuda_device_affiliation.find(ptr));
}
break;
}
case CudaMemoryPoolType::CUB: {
auto it = g_cuda_device_affiliation.find(ptr);
DCHECK(it != g_cuda_device_affiliation.end());
VLOG(2) << "CUB freeing pointer " << ptr << " on device " << it->second;
CUDA_ENFORCE(g_cub_allocator->DeviceFree(it->second, ptr));
g_cuda_device_affiliation.erase(it);
break;
}
case CudaMemoryPoolType::THC: {
CUDA_ENFORCE(g_thc_allocator->Free(ptr));
if (FLAGS_caffe2_gpu_memory_tracking) {
g_cuda_device_affiliation.erase(g_cuda_device_affiliation.find(ptr));
}
break;
}
}
}
};
static DefaultCUDAAllocator g_cuda_alloc;
REGISTER_ALLOCATOR(CUDA, &g_cuda_alloc);
} // namespace caffe2
namespace at {
REGISTER_COPY_BYTES_FUNCTION(
DeviceType::CUDA,
DeviceType::CUDA,
caffe2::CUDAContext::CopyBytesSync,
caffe2::CUDAContext::CopyBytesAsync);
REGISTER_COPY_BYTES_FUNCTION(
DeviceType::CUDA,
DeviceType::CPU,
caffe2::CUDAContext::CopyBytesSync,
caffe2::CUDAContext::CopyBytesAsync);
REGISTER_COPY_BYTES_FUNCTION(
DeviceType::CPU,
DeviceType::CUDA,
caffe2::CUDAContext::CopyBytesSync,
caffe2::CUDAContext::CopyBytesAsync);
} // namespace at
| a2d09bc594c9c22019776fa136d1e6f5e2bf70a5.cu | #include <algorithm>
#include <atomic>
#include <cstdlib>
#include <string>
#include <unordered_map>
#include "caffe2/core/THCCachingAllocator_gpu.h"
#include "cub/util_allocator.cuh"
// Needed to be included first to check the CAFFE2_USE_CUDNN macros.
#include "caffe2/core/macros.h"
#include "caffe2/core/asan.h"
#include "caffe2/core/blob_stats.h"
#ifdef CAFFE2_USE_CUDNN
#include "caffe2/core/common_cudnn.h"
#endif // CAFFE2_USE_CUDNN
#include "caffe2/core/context_gpu.h"
#include "caffe2/core/init.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/tensor.h"
#include "caffe2/utils/string_utils.h"
C10_DEFINE_string(
caffe2_cuda_memory_pool,
"",
"Sets the memory pool used by caffe2. Possible values are "
"none, cnmem, thc and cub.");
// For description of CUB caching allocator configuration, see
// https://nvlabs.github.io/cub/structcub_1_1_caching_device_allocator.html
C10_DEFINE_int(
caffe2_cub_bin_growth,
8,
"If using cub as the memory allocator, sets the growth of bins "
"used by the cub pool.");
C10_DEFINE_int(
caffe2_cub_min_bin,
3,
"If using cub as the memory allocator, sets the min number of "
"bins.");
C10_DEFINE_int(
caffe2_cub_max_bin,
10,
"If using cub as the memory allocator, sets the max number of "
"bins.");
C10_DEFINE_int(
caffe2_cub_max_managed_mb,
10 * 1024,
"If using cub as the memory allocators, sets the maximum amount "
"of memory managed in gigabytes");
C10_DEFINE_bool(
caffe2_cub_print_allocation_events,
false,
"If true CachingDeviceAllocator will print allocation and deallocation "
"events to stdout.");
C10_DEFINE_bool(
caffe2_gpu_memory_tracking,
false,
"If set, logs changes in GPU memory allocations");
C10_DEFINE_int(
caffe2_gpu_memory_report_interval_mb,
128,
"The threshold in MB on how frequently to report memory changes");
namespace at {
REGISTER_CONTEXT(DeviceType::CUDA, caffe2::CUDAContext);
} // namespace at
namespace caffe2 {
// Generic implementation - CUDA will handle the right function to call for us
void CUDAContext::CopyBytesAsync(
size_t nbytes,
const void* src,
Device src_device,
void* dst,
Device dst_device) {
// TODO: verify that the CUDA handles copy from device to device correctly
// even without SetDevice()
// TODO: verify whether source or dest device should be a priority in picking
// the stream
// NB: right now the cross-device copy logic is invoked only in the contexts
// when surrounding code explicitly manages data dependencies and sets up
// events, so it's fine. In order to make it a standalone function proper
// synchronization between stream is required
int gpu_id = 0;
if (dst_device.type() == DeviceType::CUDA) {
gpu_id = dst_device.index();
} else if (src_device.type() == DeviceType::CUDA) {
gpu_id = src_device.index();
} else {
LOG(FATAL) << "shouldn't be called with non-cuda device";
}
CUDA_ENFORCE(cudaMemcpyAsync(
dst,
src,
nbytes,
cudaMemcpyDefault,
CUDAContext::getCudaObjects().GetStream(gpu_id)));
}
void CUDAContext::CopyBytesSync(
size_t nbytes,
const void* src,
Device src_device,
void* dst,
Device dst_device) {
// This emulates Caffe2 original behavior where sync copy doesn't change the
// device. It's probably better for clarity to switch to the target device
// explicitly here, but in the worst case CUDA would sync for us.
// TODO: change it to DeviceGuard
CUDAContext context(-1); // take current device
CUDA_ENFORCE(cudaMemcpyAsync(
dst, src, nbytes, cudaMemcpyDefault, context.cuda_stream()));
// destructor of context synchronizes
}
// For the CPU context, we also allow a (probably expensive) function
// to copy the data from a cuda context. Inside the function, we create
// a temporary CUDAContext object to carry out the copy. From the caller's
// side, these functions are synchronous with respect to the host, similar
// to a normal CPUContext::CopyBytes<CPUContext, CPUContext> call.
template <>
inline void CPUContext::CopyBytes<CUDAContext, CPUContext>(
size_t nbytes,
const void* src,
void* dst) {
CUDAContext context(GetGPUIDForPointer(src));
context.CopyBytes<CUDAContext, CPUContext>(nbytes, src, dst);
}
template <>
inline void CPUContext::CopyBytes<CPUContext, CUDAContext>(
size_t nbytes,
const void* src,
void* dst) {
CUDAContext context(GetGPUIDForPointer(dst));
context.CopyBytes<CPUContext, CUDAContext>(nbytes, src, dst);
}
} // namespace caffe2
namespace caffe2 {
ThreadLocalCUDAObjects& CUDAContext::getCudaObjects() {
static thread_local ThreadLocalCUDAObjects cuda_objects_;
return cuda_objects_;
}
// TODO(jiayq): these variables shouldn't be currently accessed during static
// initialization. We should consider moving them to a Mayer's singleton to
// be totally safe against SIOF.
// Static global variables for setting up the memory pool.
CudaMemoryPoolType g_cuda_memory_pool_type;
std::unique_ptr<cub::CachingDeviceAllocator> g_cub_allocator;
std::unique_ptr<THCCachingAllocator> g_thc_allocator;
// an unordered map that holds the map from the cuda memory pointer to the
// device id that it is allocated from. This is used in the cuda memory pool
// cases, where we need the device id to carry out the deletion.
// Note(jiayq): an alternate approach is to use cudaGetPointerAttributes, but
// that is usually quite slow. We might want to benchmark the speed difference
// though.
// Note(jiayq): another alternate approach is to augment the Tensor class that
// would allow one to record the device id. However, this does not address any
// non-tensor allocation and deallocation.
// Ideally, a memory pool should already have the device id information, as
// long as we are using UVA (as of CUDA 5 and later) so the addresses are
// unique.
static std::unordered_map<void*, uint8_t> g_cuda_device_affiliation;
// Data structures for optional memory tracking. Access to these structures
// is garded by the CUDAContext::mutex.
static std::unordered_map<void*, long> g_size_map;
static std::vector<long> g_total_by_gpu_map(CAFFE2_COMPILE_TIME_MAX_GPUS, 0);
static std::vector<long> g_max_by_gpu_map(CAFFE2_COMPILE_TIME_MAX_GPUS, 0);
static long g_total_mem = 0;
static long g_last_rep = 0;
CudaMemoryPoolType GetCudaMemoryPoolType() {
return g_cuda_memory_pool_type;
}
///////////////////////////////////////////////////////////////////////////////
// A wrapper to allow us to lazily initialize all cuda environments that Caffe
// uses. This gets done the first time a caffe2::CUDAContext::New() gets called
// which is probably the decisive indication that this caffe2 run is going to
// use GPUs. We avoid cuda initialization with core/init.h functionalities so
// that we have minimal resource impact in case we will need to run multiple
// caffe2 instances on a GPU machine.
///////////////////////////////////////////////////////////////////////////////
static void Caffe2InitializeCuda() {
// If the current run does not have any cuda devices, do nothing.
if (!HasCudaGPU()) {
VLOG(1) << "No cuda gpu present. Skipping.";
return;
}
// Check if the number of GPUs matches the expected compile-time max number
// of GPUs.
CAFFE_ENFORCE_LE(
NumCudaDevices(),
CAFFE2_COMPILE_TIME_MAX_GPUS,
"Number of CUDA devices on the machine is larger than the compiled "
"max number of gpus expected (",
CAFFE2_COMPILE_TIME_MAX_GPUS,
"). Increase that and recompile the caffe binary.");
for (int i = 0; i < NumCudaDevices(); ++i) {
DeviceGuard g(i);
// Enable peer access.
const int peer_group = i / CAFFE2_CUDA_MAX_PEER_SIZE;
const int peer_start = peer_group * CAFFE2_CUDA_MAX_PEER_SIZE;
const int peer_end = std::min(
NumCudaDevices(), (peer_group + 1) * CAFFE2_CUDA_MAX_PEER_SIZE);
VLOG(1) << "Enabling peer access within group #" << peer_group
<< ", from gpuid " << peer_start << " to " << peer_end - 1
<< ", for gpuid " << i << ".";
for (int j = peer_start; j < peer_end; ++j) {
if (i == j) continue;
int can_access;
CUDA_ENFORCE(cudaDeviceCanAccessPeer(&can_access, i, j));
if (can_access) {
VLOG(1) << "Enabling peer access from " << i << " to " << j;
// Note: just for future reference, the 0 here is not a gpu id, it is
// a reserved flag for cudaDeviceEnablePeerAccess that should always be
// zero currently.
CUDA_ENFORCE(cudaDeviceEnablePeerAccess(j, 0));
}
}
}
#ifdef CAFFE2_USE_CUDNN
// Check the versions of cuDNN that were compiled and linked with are compatible
CheckCuDNNVersions();
#endif // CAFFE2_USE_CUDNN
}
static void SetUpCub() {
VLOG(1) << "Setting up cub memory pool.";
// Sets up the cub memory pool
try {
g_cub_allocator.reset(new cub::CachingDeviceAllocator(
FLAGS_caffe2_cub_bin_growth,
FLAGS_caffe2_cub_min_bin,
FLAGS_caffe2_cub_max_bin,
size_t(FLAGS_caffe2_cub_max_managed_mb) * 1024L * 1024L,
false,
FLAGS_caffe2_cub_print_allocation_events));
} catch (...) {
CAFFE_THROW("Some error happened at cub initialization.");
}
VLOG(1) << "Done setting up cub memory pool.";
}
static void Caffe2SetCUDAMemoryPool() {
if (FLAGS_caffe2_cuda_memory_pool == "" ||
FLAGS_caffe2_cuda_memory_pool == "none") {
g_cuda_memory_pool_type = CudaMemoryPoolType::NONE;
} else if (FLAGS_caffe2_cuda_memory_pool == "cnmem") {
CAFFE_THROW("CNMEM is no longer used by Caffe2. Use cub instead. "
"This error message may go away in the future.");
} else if (FLAGS_caffe2_cuda_memory_pool == "cub") {
// Sets up cub.
g_cuda_memory_pool_type = CudaMemoryPoolType::CUB;
SetUpCub();
} else if (FLAGS_caffe2_cuda_memory_pool == "thc") {
g_cuda_memory_pool_type = CudaMemoryPoolType::THC;
g_thc_allocator.reset(new THCCachingAllocator());
} else {
CAFFE_THROW(
"Unrecognized cuda memory pool type: ", FLAGS_caffe2_cuda_memory_pool);
}
}
static PinnedCPUAllocator g_pinned_cpu_alloc;
// An initialization function that sets the CPU side to use pinned cpu
// allocator.
void Caffe2UsePinnedCPUAllocator() {
#if CAFFE2_ASAN_ENABLED
// Note(jiayq): for more details, see
// https://github.com/google/sanitizers/issues/629
LOG(WARNING) << "There are known issues between address sanitizer and "
"cudaMallocHost. As a result, caffe2 will not enable pinned "
"memory allocation in asan mode. If you are expecting any "
"behavior that depends on asan, be advised that it is not "
"turned on.";
#else
if (!HasCudaGPU()) {
VLOG(1) << "No GPU present. I won't use pinned allocator then.";
return;
}
VLOG(1) << "Caffe2 gpu: setting CPUAllocator to PinnedCPUAllocator.";
SetCPUAllocator(&g_pinned_cpu_alloc);
#endif
}
// Caffe2CudaInitializerHelper is a minimal struct whose sole purpose is to
// detect the first hint that this Caffe2 run is going to use GPU: either
// CUDAContext is initialized or CUDAContext::New is called. It then runs
// all the related cuda initialization functions.
namespace {
struct Caffe2CudaInitializerHelper {
Caffe2CudaInitializerHelper() {
// We cannot use bool because nvcc changes bool to __nv_bool which does
// not have a std::atomic instantiation.
static std::atomic<char> first_call(1);
if (first_call.fetch_and((char)0)) {
Caffe2InitializeCuda();
Caffe2SetCUDAMemoryPool();
Caffe2UsePinnedCPUAllocator();
}
}
};
} // namespace
/**
* A utility function to rectify the gpu id. If the context specifies the
* gpu id to be -1, it means that we will just use the current gpu id when
* the function is being called.
*/
static inline int RectifyGPUID(const int gpu_id) {
return gpu_id == -1 ? CaffeCudaGetDevice() : gpu_id;
}
CUDAContext::CUDAContext(const int gpu_id)
: gpu_id_(RectifyGPUID(gpu_id)), random_seed_(RandomNumberSeed()) {
static Caffe2CudaInitializerHelper g_cuda_initializer_;
}
CUDAContext::CUDAContext(const DeviceOption& option)
: gpu_id_(
option.has_device_id() ? RectifyGPUID(option.device_id())
: CaffeCudaGetDevice()),
random_seed_(
option.has_random_seed() ? option.random_seed()
: RandomNumberSeed()) {
static Caffe2CudaInitializerHelper g_cuda_initializer_;
DCHECK_EQ(option.device_type(), PROTO_CUDA);
}
// shared mutex to lock out alloc / free during NCCL launches
std::mutex& CUDAContext::mutex() {
static std::mutex m;
return m;
}
std::vector<long> CUDAContext::TotalMemoryByGpu() {
std::lock_guard<std::mutex> lock(CUDAContext::mutex());
CAFFE_ENFORCE(
FLAGS_caffe2_gpu_memory_tracking,
"Pass --caffe2_gpu_memory_tracking to enable memory stats");
return g_total_by_gpu_map;
}
std::vector<long> CUDAContext::MaxMemoryByGpu() {
std::lock_guard<std::mutex> lock(CUDAContext::mutex());
CAFFE_ENFORCE(
FLAGS_caffe2_gpu_memory_tracking,
"Pass --caffe2_gpu_memory_tracking to enable memory stats");
return g_max_by_gpu_map;
}
namespace {
void TrackMemoryAlloc(size_t nbytes) {
int this_gpu = CaffeCudaGetDevice();
g_total_by_gpu_map[this_gpu] += nbytes;
g_max_by_gpu_map[this_gpu] =
max(g_max_by_gpu_map[this_gpu], g_total_by_gpu_map[this_gpu]);
g_total_mem += nbytes;
if (g_total_mem - g_last_rep >
FLAGS_caffe2_gpu_memory_report_interval_mb * 1024 * 1024) {
for (int gpu = 0; gpu < g_total_by_gpu_map.size(); gpu++) {
long t = g_total_by_gpu_map[gpu];
long max_t = g_max_by_gpu_map[gpu];
if (max_t > 0) {
if (max_t != t) {
LOG(INFO) << "GPU " << gpu << ": " << t / 1024 / 1024 << " MB"
<< " (max: " << max_t / 1024 / 1024 << " MB)";
} else {
LOG(INFO) << "GPU " << gpu << ": " << t / 1024 / 1024 << " MB";
}
}
}
LOG(INFO) << "Total: " << g_total_mem / 1024 / 1024 << " MB";
g_last_rep = g_total_mem;
}
}
}
struct DefaultCUDAAllocator final : public at::Allocator {
DefaultCUDAAllocator() {}
~DefaultCUDAAllocator() override {}
at::DataPtr allocate(size_t nbytes) const override {
// Lock the mutex
std::lock_guard<std::mutex> lock(CUDAContext::mutex());
// A one-time caffe2 cuda initializer.
static Caffe2CudaInitializerHelper g_cuda_initializer_;
void* ptr = nullptr;
if (FLAGS_caffe2_gpu_memory_tracking) {
TrackMemoryAlloc(nbytes);
}
switch (g_cuda_memory_pool_type) {
case CudaMemoryPoolType::NONE:
CUDA_ENFORCE(cudaMalloc(&ptr, nbytes));
if (FLAGS_caffe2_gpu_memory_tracking) {
g_size_map[ptr] = nbytes;
g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice();
}
return {ptr, ptr, &Delete, at::Device(CUDA, CaffeCudaGetDevice())};
case CudaMemoryPoolType::CUB:
CUDA_ENFORCE(g_cub_allocator->DeviceAllocate(&ptr, nbytes));
g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice();
VLOG(2) << "CUB allocating pointer " << ptr << " on device "
<< CaffeCudaGetDevice();
if (FLAGS_caffe2_gpu_memory_tracking) {
g_size_map[ptr] = nbytes;
}
return {ptr, ptr, &Delete, at::Device(CUDA, CaffeCudaGetDevice())};
case CudaMemoryPoolType::THC:
CUDA_ENFORCE(g_thc_allocator->Alloc(&ptr, nbytes, 0 /* stream */));
if (FLAGS_caffe2_gpu_memory_tracking) {
g_size_map[ptr] = nbytes;
g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice();
}
return {ptr, ptr, &Delete, at::Device(CUDA, CaffeCudaGetDevice())};
}
return {nullptr, nullptr, &Delete, at::Device(CUDA, CaffeCudaGetDevice())};
}
at::DeleterFnPtr raw_deleter() const override {
return &Delete;
}
private:
static void Delete(void* ptr) {
// lock the mutex
std::lock_guard<std::mutex> lock(CUDAContext::mutex());
if (FLAGS_caffe2_gpu_memory_tracking) {
auto sz_it = g_size_map.find(ptr);
DCHECK(sz_it != g_size_map.end());
auto aff_it = g_cuda_device_affiliation.find(ptr);
DCHECK(aff_it != g_cuda_device_affiliation.end());
g_total_mem -= sz_it->second;
g_total_by_gpu_map[aff_it->second] -= sz_it->second;
g_size_map.erase(sz_it);
}
switch (g_cuda_memory_pool_type) {
case CudaMemoryPoolType::NONE: {
// If memory pool is not set up, use simple cudaFree.
cudaError_t error = cudaFree(ptr);
// For some reason, in Python runtime we sometimes delete a data pointer
// after the cuda runtime exits - this is odd but is probably caused by
// a static workspace that pycaffe2 uses, and the destruction got
// entangled in some race condition. Anyway, since cuda runtime is
// exiting anyway, we will not need to worry about memory leak, so we
// basically ignore it. This is definitely not ideal but works for now.
if (error != cudaSuccess && error != cudaErrorCudartUnloading) {
LOG(FATAL) << "Error at: " << __FILE__ << ":" << __LINE__ << ": "
<< cudaGetErrorString(error);
}
if (FLAGS_caffe2_gpu_memory_tracking) {
g_cuda_device_affiliation.erase(g_cuda_device_affiliation.find(ptr));
}
break;
}
case CudaMemoryPoolType::CUB: {
auto it = g_cuda_device_affiliation.find(ptr);
DCHECK(it != g_cuda_device_affiliation.end());
VLOG(2) << "CUB freeing pointer " << ptr << " on device " << it->second;
CUDA_ENFORCE(g_cub_allocator->DeviceFree(it->second, ptr));
g_cuda_device_affiliation.erase(it);
break;
}
case CudaMemoryPoolType::THC: {
CUDA_ENFORCE(g_thc_allocator->Free(ptr));
if (FLAGS_caffe2_gpu_memory_tracking) {
g_cuda_device_affiliation.erase(g_cuda_device_affiliation.find(ptr));
}
break;
}
}
}
};
static DefaultCUDAAllocator g_cuda_alloc;
REGISTER_ALLOCATOR(CUDA, &g_cuda_alloc);
} // namespace caffe2
namespace at {
REGISTER_COPY_BYTES_FUNCTION(
DeviceType::CUDA,
DeviceType::CUDA,
caffe2::CUDAContext::CopyBytesSync,
caffe2::CUDAContext::CopyBytesAsync);
REGISTER_COPY_BYTES_FUNCTION(
DeviceType::CUDA,
DeviceType::CPU,
caffe2::CUDAContext::CopyBytesSync,
caffe2::CUDAContext::CopyBytesAsync);
REGISTER_COPY_BYTES_FUNCTION(
DeviceType::CPU,
DeviceType::CUDA,
caffe2::CUDAContext::CopyBytesSync,
caffe2::CUDAContext::CopyBytesAsync);
} // namespace at
|
a03543ae07972be03be95cb3c9c9cb2706c613d1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <hiprand/hiprand_kernel.h>
#include "simple_derivation.cuh"
#include "lsystem.cuh"
const int NUM_THREADS = 16;
const int N = 100;
__global__
void hello(char *result) {
int id = threadIdx.x;// + blockIdx.x * 64;
hiprandState_t state;
hiprand_init(1234, id, 0, &state);
String model = Literal("X", 0, 124.0f, 0.0f);
vector<Action> actions = getActions(model);
int index = hiprand_uniform(&state) * actions.size();
model = actions[index].apply(model);
for (int i = 0; i < model.length(); ++i) {
printf("%s ", model[i].name);
}
}
void cudaMain() {
int size = sizeof(char) * NUM_THREADS * N;
char* hResult = (char*)malloc(size);
char* dResult;
hipMalloc((void**)&dResult, size);
hipMemcpy(dResult, hResult, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( hello), dim3(1), dim3(NUM_THREADS), 0, 0, dResult);
hipMemcpy(hResult, dResult, size, hipMemcpyDeviceToHost);
hipFree(dResult);
for (int i = 0; i < NUM_THREADS; ++i) {
for (int j = 0; j < N; ++j) {
if (hResult[i * N + j] == 0) break;
printf("%c, ", hResult[i * N + j]);
}
printf("\n");
}
} | a03543ae07972be03be95cb3c9c9cb2706c613d1.cu | #include <stdio.h>
#include <curand_kernel.h>
#include "simple_derivation.cuh"
#include "lsystem.cuh"
const int NUM_THREADS = 16;
const int N = 100;
__global__
void hello(char *result) {
int id = threadIdx.x;// + blockIdx.x * 64;
curandState state;
curand_init(1234, id, 0, &state);
String model = Literal("X", 0, 124.0f, 0.0f);
vector<Action> actions = getActions(model);
int index = curand_uniform(&state) * actions.size();
model = actions[index].apply(model);
for (int i = 0; i < model.length(); ++i) {
printf("%s ", model[i].name);
}
}
void cudaMain() {
int size = sizeof(char) * NUM_THREADS * N;
char* hResult = (char*)malloc(size);
char* dResult;
cudaMalloc((void**)&dResult, size);
cudaMemcpy(dResult, hResult, size, cudaMemcpyHostToDevice);
hello<<<1, NUM_THREADS>>>(dResult);
cudaMemcpy(hResult, dResult, size, cudaMemcpyDeviceToHost);
cudaFree(dResult);
for (int i = 0; i < NUM_THREADS; ++i) {
for (int j = 0; j < N; ++j) {
if (hResult[i * N + j] == 0) break;
printf("%c, ", hResult[i * N + j]);
}
printf("\n");
}
} |
4fca7dc2fdf1ed1670622bcfcb246e4bcf2da431.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2016, National University of Defense Technology
// Author: Xuhao Chen <[email protected]>
#define BFS_VARIANT "linear_base"
#include "bfs.h"
#include "worklistc.h"
#include "cuda_launch_config.hpp"
#include "cutil_subset.h"
#include "timer.h"
__global__ void bfs_kernel(int m, int *row_offsets, int *column_indices, DistT *dist, Worklist2 in_queue, Worklist2 out_queue) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int src;
if(in_queue.pop_id(tid, src)) {
int row_begin = row_offsets[src];
int row_end = row_offsets[src + 1];
for (int offset = row_begin; offset < row_end; ++ offset) {
int dst = column_indices[offset];
//DistT new_dist = dist[src] + 1;
if ((dist[dst] == MYINFINITY) && (atomicCAS(&dist[dst], MYINFINITY, dist[src]+1)==MYINFINITY)) {
//if (dist[dst] == MYINFINITY) {//Not visited
// dist[dst] = new_dist;
assert(out_queue.push(dst));
}
}
}
}
__global__ void insert(int source, Worklist2 queue) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id == 0) queue.push(source);
return;
}
void BFSSolver(int m, int nnz, int source, int *in_row_offsets, int *in_column_indices, int *h_row_offsets, int *h_column_indices, int *h_degree, DistT *h_dist) {
//print_device_info(0);
DistT zero = 0;
int *d_row_offsets, *d_column_indices;
CUDA_SAFE_CALL(hipMalloc((void **)&d_row_offsets, (m + 1) * sizeof(int)));
CUDA_SAFE_CALL(hipMalloc((void **)&d_column_indices, nnz * sizeof(int)));
CUDA_SAFE_CALL(hipMemcpy(d_row_offsets, h_row_offsets, (m + 1) * sizeof(int), hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(d_column_indices, h_column_indices, nnz * sizeof(int), hipMemcpyHostToDevice));
DistT * d_dist;
CUDA_SAFE_CALL(hipMalloc((void **)&d_dist, m * sizeof(DistT)));
CUDA_SAFE_CALL(hipMemcpy(d_dist, h_dist, m * sizeof(DistT), hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(&d_dist[source], &zero, sizeof(zero), hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipDeviceSynchronize());
Worklist2 queue1(m), queue2(m);
Worklist2 *in_frontier = &queue1, *out_frontier = &queue2;
int iter = 0;
int nitems = 1;
int nthreads = BLOCK_SIZE;
int nblocks = (m - 1) / nthreads + 1;
printf("Launching CUDA BFS solver (%d threads/CTA) ...\n", nthreads);
printf("row_offsets[0] %d) ...\n", h_row_offsets[0]);
printf("row_offsets[1] %d) ...\n", h_row_offsets[1]);
Timer t;
t.Start();
hipLaunchKernelGGL(( insert), dim3(1), dim3(nthreads), 0, 0, source, *in_frontier);
nitems = in_frontier->nitems();
printf("nitems_1 is %d) ...\n", nitems);
do {
++ iter;
nblocks = (nitems - 1) / nthreads + 1;
//printf("iteration %d: frontier_size = %d\n", iter, nitems);
hipLaunchKernelGGL(( bfs_kernel) , dim3(nblocks), dim3(nthreads), 0, 0, m, d_row_offsets, d_column_indices, d_dist, *in_frontier, *out_frontier);
CudaTest("solving failed");
nitems = out_frontier->nitems();
Worklist2 *tmp = in_frontier;
in_frontier = out_frontier;
out_frontier = tmp;
out_frontier->reset();
printf("nitems is %d) ...\n", nitems);
} while (nitems > 0);
CUDA_SAFE_CALL(hipDeviceSynchronize());
t.Stop();
printf("\titerations = %d.\n", iter);
printf("\truntime [%s] = %f ms.\n", BFS_VARIANT, t.Millisecs());
CUDA_SAFE_CALL(hipMemcpy(h_dist, d_dist, m * sizeof(DistT), hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipFree(d_row_offsets));
CUDA_SAFE_CALL(hipFree(d_column_indices));
CUDA_SAFE_CALL(hipFree(d_dist));
return;
}
| 4fca7dc2fdf1ed1670622bcfcb246e4bcf2da431.cu | // Copyright 2016, National University of Defense Technology
// Author: Xuhao Chen <[email protected]>
#define BFS_VARIANT "linear_base"
#include "bfs.h"
#include "worklistc.h"
#include "cuda_launch_config.hpp"
#include "cutil_subset.h"
#include "timer.h"
__global__ void bfs_kernel(int m, int *row_offsets, int *column_indices, DistT *dist, Worklist2 in_queue, Worklist2 out_queue) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int src;
if(in_queue.pop_id(tid, src)) {
int row_begin = row_offsets[src];
int row_end = row_offsets[src + 1];
for (int offset = row_begin; offset < row_end; ++ offset) {
int dst = column_indices[offset];
//DistT new_dist = dist[src] + 1;
if ((dist[dst] == MYINFINITY) && (atomicCAS(&dist[dst], MYINFINITY, dist[src]+1)==MYINFINITY)) {
//if (dist[dst] == MYINFINITY) {//Not visited
// dist[dst] = new_dist;
assert(out_queue.push(dst));
}
}
}
}
__global__ void insert(int source, Worklist2 queue) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id == 0) queue.push(source);
return;
}
void BFSSolver(int m, int nnz, int source, int *in_row_offsets, int *in_column_indices, int *h_row_offsets, int *h_column_indices, int *h_degree, DistT *h_dist) {
//print_device_info(0);
DistT zero = 0;
int *d_row_offsets, *d_column_indices;
CUDA_SAFE_CALL(cudaMalloc((void **)&d_row_offsets, (m + 1) * sizeof(int)));
CUDA_SAFE_CALL(cudaMalloc((void **)&d_column_indices, nnz * sizeof(int)));
CUDA_SAFE_CALL(cudaMemcpy(d_row_offsets, h_row_offsets, (m + 1) * sizeof(int), cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(d_column_indices, h_column_indices, nnz * sizeof(int), cudaMemcpyHostToDevice));
DistT * d_dist;
CUDA_SAFE_CALL(cudaMalloc((void **)&d_dist, m * sizeof(DistT)));
CUDA_SAFE_CALL(cudaMemcpy(d_dist, h_dist, m * sizeof(DistT), cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(&d_dist[source], &zero, sizeof(zero), cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaDeviceSynchronize());
Worklist2 queue1(m), queue2(m);
Worklist2 *in_frontier = &queue1, *out_frontier = &queue2;
int iter = 0;
int nitems = 1;
int nthreads = BLOCK_SIZE;
int nblocks = (m - 1) / nthreads + 1;
printf("Launching CUDA BFS solver (%d threads/CTA) ...\n", nthreads);
printf("row_offsets[0] %d) ...\n", h_row_offsets[0]);
printf("row_offsets[1] %d) ...\n", h_row_offsets[1]);
Timer t;
t.Start();
insert<<<1, nthreads>>>(source, *in_frontier);
nitems = in_frontier->nitems();
printf("nitems_1 is %d) ...\n", nitems);
do {
++ iter;
nblocks = (nitems - 1) / nthreads + 1;
//printf("iteration %d: frontier_size = %d\n", iter, nitems);
bfs_kernel <<<nblocks, nthreads>>> (m, d_row_offsets, d_column_indices, d_dist, *in_frontier, *out_frontier);
CudaTest("solving failed");
nitems = out_frontier->nitems();
Worklist2 *tmp = in_frontier;
in_frontier = out_frontier;
out_frontier = tmp;
out_frontier->reset();
printf("nitems is %d) ...\n", nitems);
} while (nitems > 0);
CUDA_SAFE_CALL(cudaDeviceSynchronize());
t.Stop();
printf("\titerations = %d.\n", iter);
printf("\truntime [%s] = %f ms.\n", BFS_VARIANT, t.Millisecs());
CUDA_SAFE_CALL(cudaMemcpy(h_dist, d_dist, m * sizeof(DistT), cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaFree(d_row_offsets));
CUDA_SAFE_CALL(cudaFree(d_column_indices));
CUDA_SAFE_CALL(cudaFree(d_dist));
return;
}
|
854648a5d48979c0332f18422a32626741fce071.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file quantized_conv.cu
* \brief
* \author Ziheng Jiang, Jun Wu
*/
#include "../nn/convolution-inl.h"
#include "./quantization_utils.h"
#include "../tensor/matrix_op-inl.h"
namespace mxnet {
namespace op {
// value + bias_value * (range1 / limit_range1) * (limit_range2 / range2)
struct QuantizedBiasAddKernel {
MSHADOW_XINLINE static void Map(int i,
size_t bias_size,
int32_t* out,
const int8_t* bias,
const float* min_out,
const float* max_out,
const float* min_bias,
const float* max_bias,
const size_t spatial_size) {
using mshadow::red::limits::MaxValue;
using mshadow::red::limits::MinValue;
float float_for_one_out_quant =
MaxAbs(*min_out, *max_out) / static_cast<double>(MaxValue<int32_t>());
float float_for_one_bias_quant =
MaxAbs(*min_bias, *max_bias) / static_cast<double>(MaxValue<int8_t>());
const size_t channel_id = (i / spatial_size) % bias_size;
out[i] = (out[i] * float_for_one_out_quant + bias[channel_id] * float_for_one_bias_quant) /
float_for_one_out_quant;
}
};
#if MXNET_USE_CUDNN == 1 && TORCH_HIP_VERSION >= 8000
STATIC_ASSERT_CUDNN_VERSION_GE(6000);
template <typename SrcType, typename DstType, typename CmpType>
class QuantizedCuDNNConvOp {
public:
QuantizedCuDNNConvOp() {
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc_));
CUDNN_CALL(cudnnCreateTensorDescriptor(&data_desc_));
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc_));
CUDNN_CALL(cudnnCreateFilterDescriptor(&filter_desc_));
}
void Init(const ConvolutionParam& param,
const OpContext& ctx,
const mxnet::ShapeVector& in_shape,
const mxnet::ShapeVector& out_shape) {
param_ = param;
CHECK_EQ(param_.kernel.ndim(), 2U)
<< "QuantizedCuDNNConvOp only supports 2D convolution for now";
if (param_.layout.has_value()) {
CHECK_EQ(param_.layout.value(), mshadow::kNCHW)
<< "QuantizedConvOp only supports NCHW for now";
}
if (param_.stride.ndim() == 0U)
param_.stride = mshadow::Shape2(1, 1);
if (param_.dilate.ndim() == 0U)
param_.dilate = mshadow::Shape2(1, 1);
if (param_.pad.ndim() == 0U)
param_.pad = mshadow::Shape2(0, 0);
N = 0, H = 2, W = 3, C = 1;
src_type_ = mshadow::DataType<SrcType>::kCudnnFlag;
dst_type_ = mshadow::DataType<DstType>::kCudnnFlag;
cmp_type_ = mshadow::DataType<CmpType>::kCudnnFlag;
algo_ = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM;
format_ = CUDNN_TENSOR_NHWC;
InitDescriptors(in_shape, out_shape);
GetTempSize(ctx);
}
~QuantizedCuDNNConvOp() {
CUDNN_CALL(cudnnDestroyFilterDescriptor(filter_desc_));
CUDNN_CALL(cudnnDestroyTensorDescriptor(data_desc_));
CUDNN_CALL(cudnnDestroyTensorDescriptor(out_desc_));
CUDNN_CALL(cudnnDestroyConvolutionDescriptor(conv_desc_));
}
void Forward(const OpContext& ctx,
const std::vector<TBlob>& in_data,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& out_data) {
CHECK_EQ(param_.kernel.ndim(), 2U)
<< "QuantizedCuDNNConvOp only supports 2D convolution for now";
using namespace mshadow;
CHECK_EQ(in_data.size(), param_.no_bias ? 6U : 9U);
CHECK_EQ(out_data.size(), 3U);
Stream<gpu>* s = ctx.get_stream<gpu>();
CHECK_EQ(s->dnn_handle_ownership_, Stream<gpu>::OwnHandle);
const TBlob& data = in_data[0];
const TBlob& filter = in_data[1];
const TBlob& out = out_data[0];
const mxnet::TShape& dshape = data.shape_;
const mxnet::TShape& fshape = filter.shape_;
const mxnet::TShape& oshape = out.shape_;
// allocate workspace
const int dev_id = ctx.run_ctx.ctx.dev_id;
const int dev_mask = gpu::kDevMask;
if (!param_.layout.has_value() || param_.layout.value() == mshadow::kNCHW) {
const size_t data_size = dshape.Size();
const size_t weight_size = fshape.Size();
const size_t output_size = oshape.Size();
size_t total_temp_bytes = (workspace_ + data_size + weight_size) * sizeof(SrcType) +
output_size * (sizeof(DstType) + sizeof(int32_t));
Tensor<gpu, 1, char> temp_space =
ctx.requested[0].get_space_typed<gpu, 1, char>(mshadow::Shape1(total_temp_bytes), s);
char* temp_dptr = temp_space.dptr_;
TBlob data_(reinterpret_cast<SrcType*>(temp_dptr),
mxnet::TShape({dshape[N], dshape[H], dshape[W], dshape[C]}),
dev_mask,
DataType<SrcType>::kFlag,
dev_id);
temp_dptr += data_size * sizeof(SrcType);
TBlob filter_(reinterpret_cast<SrcType*>(temp_dptr),
mxnet::TShape({fshape[N], fshape[H], fshape[W], fshape[C]}),
dev_mask,
DataType<SrcType>::kFlag,
dev_id);
temp_dptr += weight_size * sizeof(SrcType);
// input: [NCHW] => [NHWC](batch, in_height, in_width, in_channels)
// filter: [NCHW] => [NHWC](out_channels, filter_height, filter_width, in_channels)
TransposeImpl<gpu>(ctx.run_ctx, data, data_, mxnet::TShape({N, H, W, C}));
TransposeImpl<gpu>(ctx.run_ctx, filter, filter_, mxnet::TShape({N, H, W, C}));
TBlob out_(reinterpret_cast<DstType*>(temp_dptr),
mxnet::TShape({oshape[N], oshape[H], oshape[W], oshape[C]}),
dev_mask,
DataType<DstType>::kFlag,
dev_id);
temp_dptr += output_size * sizeof(DstType);
TBlob out_tcast(reinterpret_cast<int32_t*>(temp_dptr),
mxnet::TShape({oshape[N], oshape[H], oshape[W], oshape[C]}),
dev_mask,
DataType<int32_t>::kFlag,
dev_id);
temp_dptr += output_size * sizeof(int32_t);
// input: [NHWC](batch, in_height, in_width, in_channels)
// filter: [HWNC](out_channels, filter_height, filter_width, in_channels)
// output: [NHWC](batch, out_height, out_width, out_channels)
CUDNN_CALL(cudnnConvolutionForward(s->dnn_handle_,
&alpha_,
data_desc_,
data_.dptr_,
filter_desc_,
filter_.dptr_,
conv_desc_,
algo_,
temp_dptr,
workspace_byte_,
&beta_,
out_desc_,
out_.dptr_));
Tensor<gpu, 1, DstType> out_tensor = out_.FlatTo1D<gpu, DstType>(s);
Tensor<gpu, 1, int32_t> out_tcast_tensor = out_tcast.FlatTo1D<gpu, int32_t>(s);
Assign(out_tcast_tensor, kWriteTo, mshadow::expr::tcast<int32_t>(out_tensor));
// output: [NHWC](batch, out_height, out_width, out_channels) => [NCHW]
TransposeImpl<gpu>(ctx.run_ctx, out_tcast, out, mxnet::TShape({0, 3, 1, 2}));
} else {
LOG(FATAL) << "quantized_conv only supports NCHW for now";
}
// calculate the min/max range for out_data as it's a multiplication
// of in_data[0] and in_data[1]. Need to rescale the min/max range of out_data
// based on the min/max ranges of in_data[0] and in_data[1].
const size_t num_inputs = param_.no_bias ? 2 : 3;
mxnet_op::Kernel<QuantizationRangeForS8S8MultiplicationStruct, gpu>::Launch(
s,
1,
out_data[1].dptr<float>(),
out_data[2].dptr<float>(),
in_data[num_inputs].dptr<float>(),
in_data[num_inputs + 1].dptr<float>(),
in_data[num_inputs + 2].dptr<float>(),
in_data[num_inputs + 3].dptr<float>());
if (!param_.no_bias) {
if (param_.layout.has_value()) {
CHECK_EQ(param_.layout.value(), mshadow::kNCHW)
<< "quantized_conv only supports NCHW when there is a bias";
}
const TBlob& bias = in_data[2];
mxnet_op::Kernel<QuantizedBiasAddKernel, gpu>::Launch(s,
out.Size(),
bias.Size(),
out.dptr<int32_t>(),
bias.dptr<int8_t>(),
out_data[1].dptr<float>(),
out_data[2].dptr<float>(),
in_data[7].dptr<float>(),
in_data[8].dptr<float>(),
oshape[2] * oshape[3]);
}
}
void InitDescriptors(const mxnet::ShapeVector& in_shape, const mxnet::ShapeVector& out_shape) {
const mxnet::TShape& dshape = in_shape[0];
const mxnet::TShape& kshape = in_shape[1];
const mxnet::TShape& oshape = out_shape[0];
CUDNN_CALL(cudnnSetConvolution2dDescriptor(conv_desc_,
param_.pad[0],
param_.pad[1],
param_.stride[0],
param_.stride[1],
1,
1,
CUDNN_CROSS_CORRELATION,
cmp_type_));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
data_desc_, format_, src_type_, dshape[N], dshape[C], dshape[H], dshape[W]));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc_, format_, dst_type_, oshape[N], oshape[C], oshape[H], oshape[W]));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filter_desc_, src_type_, format_, kshape[N], kshape[C], kshape[H], kshape[W]));
}
void GetTempSize(const OpContext& ctx) {
mshadow::Stream<gpu>* s = ctx.get_stream<gpu>();
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
s->dnn_handle_, data_desc_, filter_desc_, conv_desc_, out_desc_, algo_, &workspace_byte_));
workspace_ = workspace_byte_ / sizeof(SrcType) + 1;
}
private:
ConvolutionParam param_;
size_t workspace_;
size_t workspace_byte_;
cudnnDataType_t src_type_;
cudnnDataType_t dst_type_;
cudnnDataType_t cmp_type_;
cudnnTensorFormat_t format_;
cudnnConvolutionDescriptor_t conv_desc_;
cudnnTensorDescriptor_t data_desc_;
cudnnFilterDescriptor_t filter_desc_;
cudnnTensorDescriptor_t out_desc_;
cudnnConvolutionFwdAlgo_t algo_;
uint32_t N, H, W, C;
float alpha_ = 1.0f;
float beta_ = 0.0f;
}; // class QuantizedCuDNNConvOp
#endif // MXNET_USE_CUDNN == 1 && TORCH_HIP_VERSION >= 8000
void QuantizedConvForwardGPU(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const ConvolutionParam& param = nnvm::get<ConvolutionParam>(attrs.parsed);
CHECK_EQ(param.kernel.ndim(), 2U)
<< "QuantizedConvForward<gpu> only supports 2D convolution for now";
#if MXNET_USE_CUDNN == 1 && TORCH_HIP_VERSION >= 8000
typedef QuantizedCuDNNConvOp<int8_t, float, int32_t> QuantizedConvOpInt8;
#if DMLC_CXX11_THREAD_LOCAL
static thread_local QuantizedConvOpInt8 op;
#else
static MX_THREAD_LOCAL QuantizedConvOpInt8 op;
#endif // DMLC_CXX11_THREAD_LOCAL
op.Init(param, ctx, {inputs[0].shape_, inputs[1].shape_}, {outputs[0].shape_});
op.Forward(ctx, inputs, req, outputs);
#else
LOG(FATAL) << "QuantizedConvForward<gpu> only supports cudnnConvolutionForward "
"with CUDNN >= 6.0 and CUDA >= 8.0";
#endif // MXNET_USE_CUDNN == 1 && TORCH_HIP_VERSION >= 8000
}
NNVM_REGISTER_OP(_contrib_quantized_conv)
.set_attr<FCompute>("FCompute<gpu>", QuantizedConvForwardGPU);
} // namespace op
} // namespace mxnet
| 854648a5d48979c0332f18422a32626741fce071.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file quantized_conv.cu
* \brief
* \author Ziheng Jiang, Jun Wu
*/
#include "../nn/convolution-inl.h"
#include "./quantization_utils.h"
#include "../tensor/matrix_op-inl.h"
namespace mxnet {
namespace op {
// value + bias_value * (range1 / limit_range1) * (limit_range2 / range2)
struct QuantizedBiasAddKernel {
MSHADOW_XINLINE static void Map(int i,
size_t bias_size,
int32_t* out,
const int8_t* bias,
const float* min_out,
const float* max_out,
const float* min_bias,
const float* max_bias,
const size_t spatial_size) {
using mshadow::red::limits::MaxValue;
using mshadow::red::limits::MinValue;
float float_for_one_out_quant =
MaxAbs(*min_out, *max_out) / static_cast<double>(MaxValue<int32_t>());
float float_for_one_bias_quant =
MaxAbs(*min_bias, *max_bias) / static_cast<double>(MaxValue<int8_t>());
const size_t channel_id = (i / spatial_size) % bias_size;
out[i] = (out[i] * float_for_one_out_quant + bias[channel_id] * float_for_one_bias_quant) /
float_for_one_out_quant;
}
};
#if MXNET_USE_CUDNN == 1 && CUDA_VERSION >= 8000
STATIC_ASSERT_CUDNN_VERSION_GE(6000);
template <typename SrcType, typename DstType, typename CmpType>
class QuantizedCuDNNConvOp {
public:
QuantizedCuDNNConvOp() {
CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc_));
CUDNN_CALL(cudnnCreateTensorDescriptor(&data_desc_));
CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc_));
CUDNN_CALL(cudnnCreateFilterDescriptor(&filter_desc_));
}
void Init(const ConvolutionParam& param,
const OpContext& ctx,
const mxnet::ShapeVector& in_shape,
const mxnet::ShapeVector& out_shape) {
param_ = param;
CHECK_EQ(param_.kernel.ndim(), 2U)
<< "QuantizedCuDNNConvOp only supports 2D convolution for now";
if (param_.layout.has_value()) {
CHECK_EQ(param_.layout.value(), mshadow::kNCHW)
<< "QuantizedConvOp only supports NCHW for now";
}
if (param_.stride.ndim() == 0U)
param_.stride = mshadow::Shape2(1, 1);
if (param_.dilate.ndim() == 0U)
param_.dilate = mshadow::Shape2(1, 1);
if (param_.pad.ndim() == 0U)
param_.pad = mshadow::Shape2(0, 0);
N = 0, H = 2, W = 3, C = 1;
src_type_ = mshadow::DataType<SrcType>::kCudnnFlag;
dst_type_ = mshadow::DataType<DstType>::kCudnnFlag;
cmp_type_ = mshadow::DataType<CmpType>::kCudnnFlag;
algo_ = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM;
format_ = CUDNN_TENSOR_NHWC;
InitDescriptors(in_shape, out_shape);
GetTempSize(ctx);
}
~QuantizedCuDNNConvOp() {
CUDNN_CALL(cudnnDestroyFilterDescriptor(filter_desc_));
CUDNN_CALL(cudnnDestroyTensorDescriptor(data_desc_));
CUDNN_CALL(cudnnDestroyTensorDescriptor(out_desc_));
CUDNN_CALL(cudnnDestroyConvolutionDescriptor(conv_desc_));
}
void Forward(const OpContext& ctx,
const std::vector<TBlob>& in_data,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& out_data) {
CHECK_EQ(param_.kernel.ndim(), 2U)
<< "QuantizedCuDNNConvOp only supports 2D convolution for now";
using namespace mshadow;
CHECK_EQ(in_data.size(), param_.no_bias ? 6U : 9U);
CHECK_EQ(out_data.size(), 3U);
Stream<gpu>* s = ctx.get_stream<gpu>();
CHECK_EQ(s->dnn_handle_ownership_, Stream<gpu>::OwnHandle);
const TBlob& data = in_data[0];
const TBlob& filter = in_data[1];
const TBlob& out = out_data[0];
const mxnet::TShape& dshape = data.shape_;
const mxnet::TShape& fshape = filter.shape_;
const mxnet::TShape& oshape = out.shape_;
// allocate workspace
const int dev_id = ctx.run_ctx.ctx.dev_id;
const int dev_mask = gpu::kDevMask;
if (!param_.layout.has_value() || param_.layout.value() == mshadow::kNCHW) {
const size_t data_size = dshape.Size();
const size_t weight_size = fshape.Size();
const size_t output_size = oshape.Size();
size_t total_temp_bytes = (workspace_ + data_size + weight_size) * sizeof(SrcType) +
output_size * (sizeof(DstType) + sizeof(int32_t));
Tensor<gpu, 1, char> temp_space =
ctx.requested[0].get_space_typed<gpu, 1, char>(mshadow::Shape1(total_temp_bytes), s);
char* temp_dptr = temp_space.dptr_;
TBlob data_(reinterpret_cast<SrcType*>(temp_dptr),
mxnet::TShape({dshape[N], dshape[H], dshape[W], dshape[C]}),
dev_mask,
DataType<SrcType>::kFlag,
dev_id);
temp_dptr += data_size * sizeof(SrcType);
TBlob filter_(reinterpret_cast<SrcType*>(temp_dptr),
mxnet::TShape({fshape[N], fshape[H], fshape[W], fshape[C]}),
dev_mask,
DataType<SrcType>::kFlag,
dev_id);
temp_dptr += weight_size * sizeof(SrcType);
// input: [NCHW] => [NHWC](batch, in_height, in_width, in_channels)
// filter: [NCHW] => [NHWC](out_channels, filter_height, filter_width, in_channels)
TransposeImpl<gpu>(ctx.run_ctx, data, data_, mxnet::TShape({N, H, W, C}));
TransposeImpl<gpu>(ctx.run_ctx, filter, filter_, mxnet::TShape({N, H, W, C}));
TBlob out_(reinterpret_cast<DstType*>(temp_dptr),
mxnet::TShape({oshape[N], oshape[H], oshape[W], oshape[C]}),
dev_mask,
DataType<DstType>::kFlag,
dev_id);
temp_dptr += output_size * sizeof(DstType);
TBlob out_tcast(reinterpret_cast<int32_t*>(temp_dptr),
mxnet::TShape({oshape[N], oshape[H], oshape[W], oshape[C]}),
dev_mask,
DataType<int32_t>::kFlag,
dev_id);
temp_dptr += output_size * sizeof(int32_t);
// input: [NHWC](batch, in_height, in_width, in_channels)
// filter: [HWNC](out_channels, filter_height, filter_width, in_channels)
// output: [NHWC](batch, out_height, out_width, out_channels)
CUDNN_CALL(cudnnConvolutionForward(s->dnn_handle_,
&alpha_,
data_desc_,
data_.dptr_,
filter_desc_,
filter_.dptr_,
conv_desc_,
algo_,
temp_dptr,
workspace_byte_,
&beta_,
out_desc_,
out_.dptr_));
Tensor<gpu, 1, DstType> out_tensor = out_.FlatTo1D<gpu, DstType>(s);
Tensor<gpu, 1, int32_t> out_tcast_tensor = out_tcast.FlatTo1D<gpu, int32_t>(s);
Assign(out_tcast_tensor, kWriteTo, mshadow::expr::tcast<int32_t>(out_tensor));
// output: [NHWC](batch, out_height, out_width, out_channels) => [NCHW]
TransposeImpl<gpu>(ctx.run_ctx, out_tcast, out, mxnet::TShape({0, 3, 1, 2}));
} else {
LOG(FATAL) << "quantized_conv only supports NCHW for now";
}
// calculate the min/max range for out_data as it's a multiplication
// of in_data[0] and in_data[1]. Need to rescale the min/max range of out_data
// based on the min/max ranges of in_data[0] and in_data[1].
const size_t num_inputs = param_.no_bias ? 2 : 3;
mxnet_op::Kernel<QuantizationRangeForS8S8MultiplicationStruct, gpu>::Launch(
s,
1,
out_data[1].dptr<float>(),
out_data[2].dptr<float>(),
in_data[num_inputs].dptr<float>(),
in_data[num_inputs + 1].dptr<float>(),
in_data[num_inputs + 2].dptr<float>(),
in_data[num_inputs + 3].dptr<float>());
if (!param_.no_bias) {
if (param_.layout.has_value()) {
CHECK_EQ(param_.layout.value(), mshadow::kNCHW)
<< "quantized_conv only supports NCHW when there is a bias";
}
const TBlob& bias = in_data[2];
mxnet_op::Kernel<QuantizedBiasAddKernel, gpu>::Launch(s,
out.Size(),
bias.Size(),
out.dptr<int32_t>(),
bias.dptr<int8_t>(),
out_data[1].dptr<float>(),
out_data[2].dptr<float>(),
in_data[7].dptr<float>(),
in_data[8].dptr<float>(),
oshape[2] * oshape[3]);
}
}
void InitDescriptors(const mxnet::ShapeVector& in_shape, const mxnet::ShapeVector& out_shape) {
const mxnet::TShape& dshape = in_shape[0];
const mxnet::TShape& kshape = in_shape[1];
const mxnet::TShape& oshape = out_shape[0];
CUDNN_CALL(cudnnSetConvolution2dDescriptor(conv_desc_,
param_.pad[0],
param_.pad[1],
param_.stride[0],
param_.stride[1],
1,
1,
CUDNN_CROSS_CORRELATION,
cmp_type_));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
data_desc_, format_, src_type_, dshape[N], dshape[C], dshape[H], dshape[W]));
CUDNN_CALL(cudnnSetTensor4dDescriptor(
out_desc_, format_, dst_type_, oshape[N], oshape[C], oshape[H], oshape[W]));
CUDNN_CALL(cudnnSetFilter4dDescriptor(
filter_desc_, src_type_, format_, kshape[N], kshape[C], kshape[H], kshape[W]));
}
void GetTempSize(const OpContext& ctx) {
mshadow::Stream<gpu>* s = ctx.get_stream<gpu>();
CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize(
s->dnn_handle_, data_desc_, filter_desc_, conv_desc_, out_desc_, algo_, &workspace_byte_));
workspace_ = workspace_byte_ / sizeof(SrcType) + 1;
}
private:
ConvolutionParam param_;
size_t workspace_;
size_t workspace_byte_;
cudnnDataType_t src_type_;
cudnnDataType_t dst_type_;
cudnnDataType_t cmp_type_;
cudnnTensorFormat_t format_;
cudnnConvolutionDescriptor_t conv_desc_;
cudnnTensorDescriptor_t data_desc_;
cudnnFilterDescriptor_t filter_desc_;
cudnnTensorDescriptor_t out_desc_;
cudnnConvolutionFwdAlgo_t algo_;
uint32_t N, H, W, C;
float alpha_ = 1.0f;
float beta_ = 0.0f;
}; // class QuantizedCuDNNConvOp
#endif // MXNET_USE_CUDNN == 1 && CUDA_VERSION >= 8000
void QuantizedConvForwardGPU(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const ConvolutionParam& param = nnvm::get<ConvolutionParam>(attrs.parsed);
CHECK_EQ(param.kernel.ndim(), 2U)
<< "QuantizedConvForward<gpu> only supports 2D convolution for now";
#if MXNET_USE_CUDNN == 1 && CUDA_VERSION >= 8000
typedef QuantizedCuDNNConvOp<int8_t, float, int32_t> QuantizedConvOpInt8;
#if DMLC_CXX11_THREAD_LOCAL
static thread_local QuantizedConvOpInt8 op;
#else
static MX_THREAD_LOCAL QuantizedConvOpInt8 op;
#endif // DMLC_CXX11_THREAD_LOCAL
op.Init(param, ctx, {inputs[0].shape_, inputs[1].shape_}, {outputs[0].shape_});
op.Forward(ctx, inputs, req, outputs);
#else
LOG(FATAL) << "QuantizedConvForward<gpu> only supports cudnnConvolutionForward "
"with CUDNN >= 6.0 and CUDA >= 8.0";
#endif // MXNET_USE_CUDNN == 1 && CUDA_VERSION >= 8000
}
NNVM_REGISTER_OP(_contrib_quantized_conv)
.set_attr<FCompute>("FCompute<gpu>", QuantizedConvForwardGPU);
} // namespace op
} // namespace mxnet
|
7682290e130aa86d85bc3a9a9fbe72e6b7a5581e.hip | // !!! This is a file automatically generated by hipify!!!
/* -------------------------------------------------------------------------------
* Tomocam Copyright (c) 2018
*
* The Regents of the University of California, through Lawrence Berkeley
*National Laboratory (subject to receipt of any required approvals from the
*U.S. Dept. of Energy). All rights reserved.
*
* If you have questions about your rights to use or distribute this software,
* please contact Berkeley Lab's Innovation & Partnerships Office at
*[email protected].
*
* NOTICE. This Software was developed under funding from the U.S. Department of
* Energy and the U.S. Government consequently retains certain rights. As such,
*the U.S. Government has been granted for itself and others acting on its
*behalf a paid-up, nonexclusive, irrevocable, worldwide license in the Software
*to reproduce, distribute copies to the public, prepare derivative works, and
* perform publicly and display publicly, and to permit other to do so.
*---------------------------------------------------------------------------------
*/
#include <hip/hip_runtime.h>
#include "dev_array.h"
#include "utils.cuh"
namespace tomocam {
template <typename T>
__global__ void _add_vecs(DeviceArray<T> a, DeviceArray<T> b, DeviceArray<T> res) {
int idx = Index1D();
if (idx < a.size()) res[idx] = a[idx] + b[idx];
}
template <typename T>
__global__ void _subtract_vecs(DeviceArray<T> a, DeviceArray<T> b, DeviceArray<T> res) {
int idx = Index1D();
if (idx < a.size()) res[idx] = a[idx] - b[idx];
}
template <typename T>
__global__ void _multiply_vecs(DeviceArray<T> a, DeviceArray<T> b, DeviceArray<T> res) {
int idx = Index1D();
if (idx < a.size()) res[idx] = a[idx] * b[idx];
}
template <typename T>
__global__ void _divide_vecs(DeviceArray<T> a, DeviceArray<T> b, DeviceArray<T> res) {
int idx = Index1D();
if (idx < a.size()) res[idx] = a[idx] / b[idx];
}
template <typename T>
__global__ void _scale_vecs(DeviceArray<T> a, T b, DeviceArray<T> res) {
int idx = Index1D();
if (idx < a.size()) res[idx] = a[idx] * b;
}
template <typename T>
__global__ void _shift_vecs(DeviceArray<T> a, T b, DeviceArray<T> res) {
int idx = Index1D();
if (idx < a.size()) res[idx] = a[idx] + b;
}
template <typename T>
void add_vectors(DeviceArray<T> a, DeviceArray<T> b, DeviceArray<T> res, hipStream_t stream) {
Grid grid(a.size());
hipLaunchKernelGGL(( _add_vecs) , dim3(grid.blocks()), dim3(grid.threads()), 0, stream, a, b, res);
}
template <typename T>
void subtract_vectors(DeviceArray<T> a, DeviceArray<T> b, DeviceArray<T> res, hipStream_t stream) {
Grid grid(a.size());
hipLaunchKernelGGL(( _subtract_vecs) , dim3(grid.blocks()), dim3(grid.threads()), 0, stream, a, b, res);
}
template <typename T>
void multiply_vectors(DeviceArray<T> a, DeviceArray<T> b, DeviceArray<T> res, hipStream_t stream) {
Grid grid(a.size());
hipLaunchKernelGGL(( _multiply_vecs) , dim3(grid.blocks()), dim3(grid.threads()), 0, stream, a, b, res);
}
template <typename T>
void divide_vectors(DeviceArray<T> a, DeviceArray<T> b, DeviceArray<T> res, hipStream_t stream) {
Grid grid(a.size());
hipLaunchKernelGGL(( _divide_vecs) , dim3(grid.blocks()), dim3(grid.threads()), 0, stream, a, b, res);
}
template <typename T>
void scale_vectors(DeviceArray<T> a, T b, DeviceArray<T> res, hipStream_t stream) {
Grid grid(a.size());
hipLaunchKernelGGL(( _scale_vecs) , dim3(grid.blocks()), dim3(grid.threads()), 0, stream, a, b, res);
}
template <typename T>
void shift_vectors(DeviceArray<T> a, DeviceArray<T> b, DeviceArray<T> res, hipStream_t stream) {
Grid grid(a.size());
hipLaunchKernelGGL(( _shift_vecs) , dim3(grid.blocks()), dim3(grid.threads()), 0, stream, a, b, res);
}
} // namespace
| 7682290e130aa86d85bc3a9a9fbe72e6b7a5581e.cu | /* -------------------------------------------------------------------------------
* Tomocam Copyright (c) 2018
*
* The Regents of the University of California, through Lawrence Berkeley
*National Laboratory (subject to receipt of any required approvals from the
*U.S. Dept. of Energy). All rights reserved.
*
* If you have questions about your rights to use or distribute this software,
* please contact Berkeley Lab's Innovation & Partnerships Office at
*[email protected].
*
* NOTICE. This Software was developed under funding from the U.S. Department of
* Energy and the U.S. Government consequently retains certain rights. As such,
*the U.S. Government has been granted for itself and others acting on its
*behalf a paid-up, nonexclusive, irrevocable, worldwide license in the Software
*to reproduce, distribute copies to the public, prepare derivative works, and
* perform publicly and display publicly, and to permit other to do so.
*---------------------------------------------------------------------------------
*/
#include <cuda.h>
#include "dev_array.h"
#include "utils.cuh"
namespace tomocam {
template <typename T>
__global__ void _add_vecs(DeviceArray<T> a, DeviceArray<T> b, DeviceArray<T> res) {
int idx = Index1D();
if (idx < a.size()) res[idx] = a[idx] + b[idx];
}
template <typename T>
__global__ void _subtract_vecs(DeviceArray<T> a, DeviceArray<T> b, DeviceArray<T> res) {
int idx = Index1D();
if (idx < a.size()) res[idx] = a[idx] - b[idx];
}
template <typename T>
__global__ void _multiply_vecs(DeviceArray<T> a, DeviceArray<T> b, DeviceArray<T> res) {
int idx = Index1D();
if (idx < a.size()) res[idx] = a[idx] * b[idx];
}
template <typename T>
__global__ void _divide_vecs(DeviceArray<T> a, DeviceArray<T> b, DeviceArray<T> res) {
int idx = Index1D();
if (idx < a.size()) res[idx] = a[idx] / b[idx];
}
template <typename T>
__global__ void _scale_vecs(DeviceArray<T> a, T b, DeviceArray<T> res) {
int idx = Index1D();
if (idx < a.size()) res[idx] = a[idx] * b;
}
template <typename T>
__global__ void _shift_vecs(DeviceArray<T> a, T b, DeviceArray<T> res) {
int idx = Index1D();
if (idx < a.size()) res[idx] = a[idx] + b;
}
template <typename T>
void add_vectors(DeviceArray<T> a, DeviceArray<T> b, DeviceArray<T> res, cudaStream_t stream) {
Grid grid(a.size());
_add_vecs <<<grid.blocks(), grid.threads(), 0, stream>>> (a, b, res);
}
template <typename T>
void subtract_vectors(DeviceArray<T> a, DeviceArray<T> b, DeviceArray<T> res, cudaStream_t stream) {
Grid grid(a.size());
_subtract_vecs <<<grid.blocks(), grid.threads(), 0, stream>>> (a, b, res);
}
template <typename T>
void multiply_vectors(DeviceArray<T> a, DeviceArray<T> b, DeviceArray<T> res, cudaStream_t stream) {
Grid grid(a.size());
_multiply_vecs <<<grid.blocks(), grid.threads(), 0, stream>>> (a, b, res);
}
template <typename T>
void divide_vectors(DeviceArray<T> a, DeviceArray<T> b, DeviceArray<T> res, cudaStream_t stream) {
Grid grid(a.size());
_divide_vecs <<<grid.blocks(), grid.threads(), 0, stream>>> (a, b, res);
}
template <typename T>
void scale_vectors(DeviceArray<T> a, T b, DeviceArray<T> res, cudaStream_t stream) {
Grid grid(a.size());
_scale_vecs <<<grid.blocks(), grid.threads(), 0, stream>>> (a, b, res);
}
template <typename T>
void shift_vectors(DeviceArray<T> a, DeviceArray<T> b, DeviceArray<T> res, cudaStream_t stream) {
Grid grid(a.size());
_shift_vecs <<<grid.blocks(), grid.threads(), 0, stream>>> (a, b, res);
}
} // namespace
|
a41d335ffb706e903c73ecdd793f11a681e79a3e.hip | // !!! This is a file automatically generated by hipify!!!
/*
Running without arguments is equivalent to 1000 iterations with the
5 celestial objects declared in the golden_bodies array.
$ nbody.exe 1000 5
The output of this shows the energy before and after the simulation,
and should be:
double:
-0.169075164
-0.169087605
float:
-0.169075206
-0.169086471
*/
#include <hip/hip_runtime.h>
#include <cuda_occupancy.h>
#include <malloc.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <chrono>
#include <cmath>
#include <cstdlib>
#include <iomanip>
#include <iostream>
#include <fstream>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <algorithm>
#define GPUTEST 1
using type = float;
const type pi{ 3.141592653589793 };
const type solar_mass{ 4 * pi * pi };
const type days_per_year{ 365.24 };
int blockSize;
int mingridSize;
int gridSize;
type *outData;
std::ofstream file;
template <typename T>
struct planet {
T x, y, z;
T vx, vy, vz;
T mass;
};
struct planet<type> golden_bodies[5] = {
{ /* sun */
0, 0, 0, 0, 0, 0, solar_mass
},
{ /* jupiter */
4.84143144246472090e+00,
-1.16032004402742839e+00,
-1.03622044471123109e-01,
1.66007664274403694e-03 * days_per_year,
7.69901118419740425e-03 * days_per_year,
-6.90460016972063023e-05 * days_per_year,
9.54791938424326609e-04 * solar_mass
},
{ /* saturn */
8.34336671824457987e+00,
4.12479856412430479e+00,
-4.03523417114321381e-01,
-2.76742510726862411e-03 * days_per_year,
4.99852801234917238e-03 * days_per_year,
2.30417297573763929e-05 * days_per_year,
2.85885980666130812e-04 * solar_mass
},
{ /* uranus */
1.28943695621391310e+01,
-1.51111514016986312e+01,
-2.23307578892655734e-01,
2.96460137564761618e-03 * days_per_year,
2.37847173959480950e-03 * days_per_year,
-2.96589568540237556e-05 * days_per_year,
4.36624404335156298e-05 * solar_mass
},
{ /* neptune */
1.53796971148509165e+01,
-2.59193146099879641e+01,
1.79258772950371181e-01,
2.68067772490389322e-03 * days_per_year,
1.62824170038242295e-03 * days_per_year,
-9.51592254519715870e-05 * days_per_year,
5.15138902046611451e-05 * solar_mass
}
};
const type DT{ 1e-2 };
const type RECIP_DT{ static_cast<type>(1.0) / DT };
//velocity update for the kernals
template <typename T>
__global__ void adv_Update_GPU(int nbodies, planet<T> *bodies)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < nbodies)
{
planet<T> &b1 = bodies[i];
for (int j = i + 1; j < nbodies; j++)
{
planet<T> &b2 = bodies[j];
T dx = b1.x - b2.x;
T dy = b1.y - b2.y;
T dz = b1.z - b2.z;
T inv_distance = 1.0 / sqrt(dx * dx + dy * dy + dz * dz);
T mag = inv_distance * inv_distance * inv_distance;
b1.vx -= dx * b2.mass * mag;
b1.vy -= dy * b2.mass * mag;
b1.vz -= dz * b2.mass * mag;
b2.vx += dx * b1.mass * mag;
b2.vy += dy * b1.mass * mag;
b2.vz += dz * b1.mass * mag;
}
b1.x += b1.vx;
b1.y += b1.vy;
b1.z += b1.vz;
}
}
template <typename T>
__global__ void scale_bodies_GPU(int nbodies, planet<T> *bodies, T scale)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < nbodies)
{
bodies[i].mass *= scale*scale;
bodies[i].vx *= scale;
bodies[i].vy *= scale;
bodies[i].vz *= scale;
}
}
//template <typename T>
//__global__ void energy_GPU(int nbodies, T *addReduc, T *subReduc, planet<T> *bodies)
//{
// extern __shared__ T e[];
//
// //T e = 0.0;
// unsigned int threadID = threadIdx.x;
//
// unsigned int i = threadIdx.x + blockIdx.x*blockDim.x;
//
//
//
// if (i < nbodies)
// {
// planet<T> &b = bodies[i];
// e[threadID] = 0.5 * b.mass * (b.vx * b.vx + b.vy * b.vy + b.vz * b.vz);
// }
//
//
// for (unsigned int stride = blockDim.x / 2; stride > 0; stride >>= 1)
// {
// if (threadID < stride)
// {
// e[threadID] += e[threadID + stride];
// }
//
// }
// if (threadID == 0)
// {
// addReduc[blockIdx.x] = e[0];
// }
//
//
// e[threadID] = 0;
//
// if (i < nbodies)
// {
// for (int iter = i + 1; iter < nbodies; iter++){
// planet<T> &b = bodies[i];
// planet<T> &b2 = bodies[iter];
// T dx = b.x - b2.x;
// T dy = b.y - b2.y;
// T dz = b.z - b2.z;
// T distance = sqrt(dx * dx + dy * dy + dz * dz);
// T var = ((b.mass * b2.mass) / distance);
// e[threadID] += var;
// }
// }
//
// __syncthreads();
//
// for (unsigned int stride = blockDim.x / 2; stride > 0; stride >>= 1)
// {
// if (threadID < stride)
// {
// e[threadID] += e[threadID + stride];
// }
// __syncthreads();
// }
//
// if (threadID == 0)
// {
// subReduc[blockIdx.x] = e[0];
// }
//}
template <typename T>
__global__ void offset_momentum_GPU(planet<T> *bodies, T *outData, int nbodies, int d_gridSize, T solarMass)
{
T px = 0.0, py = 0.0, pz = 0.0;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int doCount = 0;
reduction_offset_mom(nbodies, bodies, outData, 0);
if (doCount < 3)
{
if (i > 0 && i < d_gridSize)
{
outData[0] += outData[i];
}
if (doCount == 0)
px = outData[0];
else if (doCount == 1)
py = outData[0];
else if (doCount == 2)
pz = outData[0];
doCount++;
}
bodies[0].vx = -px / solarMass;
bodies[0].vy = -py / solarMass;
bodies[0].vz = -pz / solarMass;
}
template <typename T>
__device__ void reduction_offset_mom(int nbodies, planet<T> *bodies, T *outData, int step)
{
extern __shared__ T sharedData[];
// each thread loads one element from global to shared mem
unsigned int threadID = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
if (step == 0)
{
if (i < nbodies)
sharedData[threadID] = bodies[i].vx * bodies[i].mass;
}
else if (step == 1)
{
if (i < nbodies)
sharedData[threadID] = bodies[i].vy * bodies[i].mass;
}
else if (step == 2)
{
if (i < nbodies)
sharedData[threadID] = bodies[i].vz * bodies[i].mass;
}
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (threadID < s) {
sharedData[threadID] += sharedData[threadID + s];
}
}
// write result for this block to global mem
if (threadID == 0)
outData[blockIdx.x] = sharedData[0];
}
template <typename T>
void advance(int nbodies, planet<T> *bodies)
{
int i, j;
for (i = 0; i < nbodies; ++i) {
planet<T> &b = bodies[i];
for (j = i + 1; j < nbodies; j++) {
planet<T> &b2 = bodies[j];
T dx = b.x - b2.x;
T dy = b.y - b2.y;
T dz = b.z - b2.z;
T inv_distance = 1.0 / sqrt(dx * dx + dy * dy + dz * dz);
T mag = inv_distance * inv_distance * inv_distance;
b.vx -= dx * b2.mass * mag;
b.vy -= dy * b2.mass * mag;
b.vz -= dz * b2.mass * mag;
b2.vx += dx * b.mass * mag;
b2.vy += dy * b.mass * mag;
b2.vz += dz * b.mass * mag;
}
}
for (i = 0; i < nbodies; ++i) {
planet<T> &b = bodies[i];
b.x += b.vx;
b.y += b.vy;
b.z += b.vz;
}
}
template <typename T>
T energy(int nbodies, planet<T> *bodies)
{
T e = 0.0;
for (int i = 0; i < nbodies; ++i) {
planet<T> &b = bodies[i];
e += 0.5 * b.mass * (b.vx * b.vx + b.vy * b.vy + b.vz * b.vz);
for (int j = i + 1; j < nbodies; j++) {
planet<T> &b2 = bodies[j];
T dx = b.x - b2.x;
T dy = b.y - b2.y;
T dz = b.z - b2.z;
T distance = sqrt(dx * dx + dy * dy + dz * dz);
e -= (b.mass * b2.mass) / distance;
}
}
return e;
}
template <typename T>
void offset_momentum(int nbodies, planet<T> *bodies)
{
T px = 0.0, py = 0.0, pz = 0.0;
for (int i = 0; i < nbodies; ++i) {
px += bodies[i].vx * bodies[i].mass;
py += bodies[i].vy * bodies[i].mass;
pz += bodies[i].vz * bodies[i].mass;
}
bodies[0].vx = -px / solar_mass;
bodies[0].vy = -py / solar_mass;
bodies[0].vz = -pz / solar_mass;
}
/*
* Rescale certain properties of bodies. That allows doing
* consequential advance()'s as if dt were equal to 1.0.
*
* When all advances done, rescale bodies back to obtain correct energy.
*/
template <typename T>
void scale_bodies(int nbodies, planet<T> *bodies, T scale)
{
for (int i = 0; i < nbodies; ++i) {
bodies[i].mass *= scale*scale;
bodies[i].vx *= scale;
bodies[i].vy *= scale;
bodies[i].vz *= scale;
}
}
template <typename T>
void init_random_bodies(int nbodies, planet<T> *bodies)
{
for (int i = 0; i < nbodies; ++i) {
bodies[i].x = (T)rand() / RAND_MAX;
bodies[i].y = (T)rand() / RAND_MAX;
bodies[i].z = (T)rand() / RAND_MAX;
bodies[i].vx = (T)rand() / RAND_MAX;
bodies[i].vy = (T)rand() / RAND_MAX;
bodies[i].vz = (T)rand() / RAND_MAX;
bodies[i].mass = (T)rand() / RAND_MAX;
}
}
int isPowerOfTwo(unsigned int x)
{
return ((x != 0) && !(x & (x - 1)));
}
template <typename T>
void callOffSet(int nbodies, planet<T> *Gbodies)
{
T *d_Array;
hipMalloc(&d_Array, gridSize*sizeof(T));
offset_momentum_GPU << <gridSize, blockSize, nbodies * sizeof(T) >> >(Gbodies, d_Array, nbodies, gridSize, solar_mass); //need to fix outData
hipFree(d_Array);
}
//template <typename T>
//T callEnergy(int nbodies, planet<T> *Gbodies)
//{
//
// T *h_addArray = new T[gridSize];
// T *h_subArray = new T[gridSize];
//
// T *d_addArray; hipMalloc((void**)&d_addArray, gridSize * sizeof(T));
// T *d_subArray; hipMalloc((void**)&d_subArray, gridSize * sizeof(T));
//
// energy_GPU << <gridSize, blockSize, nbodies * sizeof(T) >> >(nbodies, d_addArray, d_subArray, Gbodies);
// hipMemcpy(h_addArray, d_addArray, gridSize * sizeof(T), hipMemcpyDeviceToHost);
// hipMemcpy(h_subArray, d_subArray, gridSize * sizeof(T), hipMemcpyDeviceToHost);
//
// for (int i = 1; i < gridSize; i++){
// h_addArray[0] += h_addArray[i];
// h_subArray[0] += h_subArray[i];
// }
//
// T e = h_addArray[0] - h_subArray[0];
//
// return e;
// }
void writeToFile(int numIter, int numBodies, double timeToMomentum, /*float energy1,*/ double timeToScale, double timeToAdvance, double timeToScale2, /*float energy2,*/ double total)
{
file << numIter << "," << numBodies << "," << timeToScale << ',' << timeToScale2 << "," << timeToAdvance << "," << timeToMomentum << "," <</* energy1 << ',' << energy2 << ',' <<*/ total << "\n";
}
template <typename T>
void gpuLoops(int niters, int nbodies)
{
type e1, e2;
auto t1 = std::chrono::steady_clock::now();
auto t2 = std::chrono::steady_clock::now();
auto Tadv = std::chrono::steady_clock::now();
auto Tadv2 = std::chrono::steady_clock::now();
/*auto t1 = std::chrono::steady_clock::now();
auto t2 = std::chrono::steady_clock::now();*/
auto momentumStart = std::chrono::steady_clock::now();
auto momentumEnd = std::chrono::steady_clock::now();
//auto energyStart1 = std::chrono::steady_clock::now();
//auto energyEnd1 = std::chrono::steady_clock::now();
auto scaleStart1 = std::chrono::steady_clock::now();
auto scaleEnd1 = std::chrono::steady_clock::now();
auto advStart = std::chrono::steady_clock::now();
auto advEnd = std::chrono::steady_clock::now();
auto scaleStart2 = std::chrono::steady_clock::now();
auto scaleEnd2 = std::chrono::steady_clock::now();
planet<type> *bodies;
if (nbodies == 5) {
bodies = golden_bodies; // Check accuracy with 1000 solar system iterations
}
else {
bodies = new planet<type>[nbodies];
init_random_bodies(nbodies, bodies);
}
planet<type> *Gbodies;
hipMalloc(&Gbodies, nbodies*sizeof(planet<type>));
hipMemcpy(Gbodies, bodies, nbodies*sizeof(planet<type>), hipMemcpyHostToDevice);
int maxThreads;
hipDeviceGetAttribute(&maxThreads, hipDeviceAttributeMaxThreadsPerBlock, 0);
if (nbodies < maxThreads)
blockSize = nbodies;
else
blockSize = maxThreads;
gridSize = (nbodies + blockSize) / blockSize;
t1 = std::chrono::steady_clock::now();
momentumStart = std::chrono::steady_clock::now();
callOffSet(nbodies, Gbodies);
momentumEnd = std::chrono::steady_clock::now();
hipError_t error = hipGetLastError();
if (error != hipSuccess)
{
std::cout << "Error in position kernal: " << hipGetErrorString(error) << std::endl;
}
hipDeviceSynchronize();
hipMemcpy(bodies, Gbodies, nbodies*sizeof(planet<type>), hipMemcpyDeviceToHost);
e1 = energy(nbodies, bodies);
hipMemcpy(Gbodies, bodies, nbodies*sizeof(planet<type>), hipMemcpyHostToDevice);
//Scaling initial
scaleStart1 = std::chrono::steady_clock::now();
scale_bodies_GPU << <gridSize, blockSize >> >(nbodies, Gbodies, DT);
scaleEnd1 = std::chrono::steady_clock::now();
//Tadv = std::chrono::steady_clock::now();
advStart = std::chrono::steady_clock::now();
for (auto i = 0; i < niters; ++i)
{
//Calling advanced
adv_Update_GPU << <gridSize, blockSize >> >(nbodies, Gbodies);
hipDeviceSynchronize();
// adv_Position_Update << <gridSize, blockSize >> >(nbodies, Gbodies);
}
advEnd = std::chrono::steady_clock::now();
//Scaling again
scaleStart2 = std::chrono::steady_clock::now();
scale_bodies_GPU << <gridSize, blockSize >> >(nbodies, Gbodies, RECIP_DT);
scaleEnd2 = std::chrono::steady_clock::now();
hipMemcpy(bodies, Gbodies, nbodies*sizeof(planet<type>), hipMemcpyDeviceToHost);
e2 = energy(nbodies, bodies);
t2 = std::chrono::steady_clock::now();
auto momDiff = momentumEnd - momentumStart;
auto TimeMom = std::chrono::duration<double>(momDiff).count();
//auto energyDiff = Tenergy2 - Tenergy;
//auto TimeEnergy = std::chrono::duration<double>(energyDiff).count();
auto scaleDiff = scaleEnd1 - scaleStart1;
auto TimeSc = std::chrono::duration<double>(scaleDiff).count();
auto advDiff = advEnd - advStart;
auto TimeAdv = std::chrono::duration<double>(advDiff).count();
auto scale2Diff = scaleEnd2 - scaleStart2;
auto TimeSc2 = std::chrono::duration<double>(scale2Diff).count();
//auto energy2Diff = Tenergy2 - Tenergy;
//auto TimeEnergy2 = std::chrono::duration<double>(energy2Diff).count();
auto diff = t2 - t1;
auto TimeTotal = std::chrono::duration<double>(diff).count();
writeToFile(niters, nbodies, TimeMom, /*energy1T*/ TimeSc, TimeAdv, TimeSc, /*energy2T*/ TimeTotal);
std::cout << "part done \n";
}
int main(int argc, char ** argv)
{
int niters = 1000, nbodies = 900;
if (argc > 1) { niters = atoi(argv[1]); }
if (argc > 2) { nbodies = atoi(argv[2]); }
std::cout << "niters=" << niters << " nbodies=" << nbodies << '\n';
outData = new type[gridSize];
//if (!GPUTEST)
//{
// t1 = std::chrono::steady_clock::now();
// offset_momentum(nbodies, bodies);
// e1 = energy(nbodies, bodies);
// scale_bodies(nbodies, bodies, DT);
// for (int i = 1; i <= niters; ++i) {
// advance(nbodies, bodies);
// }
// scale_bodies(nbodies, bodies, RECIP_DT);
// e2 = energy(nbodies, bodies);
// t2 = std::chrono::steady_clock::now();
//}
file.open("Test.csv");
file << "Iterations" << ',' << "Body Count" << ',' << "Time for Scale" << ',' << "Time for Scale 2" << ',' << "Time for Advance" << ',' << "Time for Momentum" << ',' <</* "Energy Before" << ',' << "Energy After" << ',' <<*/ "Total" << '\n';
for (nbodies = 100; nbodies <= 1000; nbodies += 100)
{
for (niters = 100; niters <= 1000; niters += 100)
{
gpuLoops<type>(niters, nbodies);
}
}
file.close();
//Free up the memory
/* hipFree(Gbodies);
}*/
/*auto diff = t2 - t1;
auto diff2 = Tadv2 - Tadv;
std::cout << std::setprecision(9);
std::cout << e1 << '\n' << e2 << '\n';
std::cout << std::chrono::duration<double>(diff).count() << " seconds.\n";
std::cout <<"adv: " << std::chrono::duration<double>(diff2).count() << " seconds.\n";
std::cout << "GridSize: " << gridSize << std::endl;*/
std::cout << "DONE \n";
std::cin.get();
delete[]outData;
//if (argc != 1) { delete[] bodies; }
return 0;
}
| a41d335ffb706e903c73ecdd793f11a681e79a3e.cu | /*
Running without arguments is equivalent to 1000 iterations with the
5 celestial objects declared in the golden_bodies array.
$ nbody.exe 1000 5
The output of this shows the energy before and after the simulation,
and should be:
double:
-0.169075164
-0.169087605
float:
-0.169075206
-0.169086471
*/
#include <cuda_runtime.h>
#include <cuda_occupancy.h>
#include <malloc.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <chrono>
#include <cmath>
#include <cstdlib>
#include <iomanip>
#include <iostream>
#include <fstream>
#include <curand.h>
#include <curand_kernel.h>
#include <algorithm>
#define GPUTEST 1
using type = float;
const type pi{ 3.141592653589793 };
const type solar_mass{ 4 * pi * pi };
const type days_per_year{ 365.24 };
int blockSize;
int mingridSize;
int gridSize;
type *outData;
std::ofstream file;
template <typename T>
struct planet {
T x, y, z;
T vx, vy, vz;
T mass;
};
struct planet<type> golden_bodies[5] = {
{ /* sun */
0, 0, 0, 0, 0, 0, solar_mass
},
{ /* jupiter */
4.84143144246472090e+00,
-1.16032004402742839e+00,
-1.03622044471123109e-01,
1.66007664274403694e-03 * days_per_year,
7.69901118419740425e-03 * days_per_year,
-6.90460016972063023e-05 * days_per_year,
9.54791938424326609e-04 * solar_mass
},
{ /* saturn */
8.34336671824457987e+00,
4.12479856412430479e+00,
-4.03523417114321381e-01,
-2.76742510726862411e-03 * days_per_year,
4.99852801234917238e-03 * days_per_year,
2.30417297573763929e-05 * days_per_year,
2.85885980666130812e-04 * solar_mass
},
{ /* uranus */
1.28943695621391310e+01,
-1.51111514016986312e+01,
-2.23307578892655734e-01,
2.96460137564761618e-03 * days_per_year,
2.37847173959480950e-03 * days_per_year,
-2.96589568540237556e-05 * days_per_year,
4.36624404335156298e-05 * solar_mass
},
{ /* neptune */
1.53796971148509165e+01,
-2.59193146099879641e+01,
1.79258772950371181e-01,
2.68067772490389322e-03 * days_per_year,
1.62824170038242295e-03 * days_per_year,
-9.51592254519715870e-05 * days_per_year,
5.15138902046611451e-05 * solar_mass
}
};
const type DT{ 1e-2 };
const type RECIP_DT{ static_cast<type>(1.0) / DT };
//velocity update for the kernals
template <typename T>
__global__ void adv_Update_GPU(int nbodies, planet<T> *bodies)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < nbodies)
{
planet<T> &b1 = bodies[i];
for (int j = i + 1; j < nbodies; j++)
{
planet<T> &b2 = bodies[j];
T dx = b1.x - b2.x;
T dy = b1.y - b2.y;
T dz = b1.z - b2.z;
T inv_distance = 1.0 / sqrt(dx * dx + dy * dy + dz * dz);
T mag = inv_distance * inv_distance * inv_distance;
b1.vx -= dx * b2.mass * mag;
b1.vy -= dy * b2.mass * mag;
b1.vz -= dz * b2.mass * mag;
b2.vx += dx * b1.mass * mag;
b2.vy += dy * b1.mass * mag;
b2.vz += dz * b1.mass * mag;
}
b1.x += b1.vx;
b1.y += b1.vy;
b1.z += b1.vz;
}
}
template <typename T>
__global__ void scale_bodies_GPU(int nbodies, planet<T> *bodies, T scale)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < nbodies)
{
bodies[i].mass *= scale*scale;
bodies[i].vx *= scale;
bodies[i].vy *= scale;
bodies[i].vz *= scale;
}
}
//template <typename T>
//__global__ void energy_GPU(int nbodies, T *addReduc, T *subReduc, planet<T> *bodies)
//{
// extern __shared__ T e[];
//
// //T e = 0.0;
// unsigned int threadID = threadIdx.x;
//
// unsigned int i = threadIdx.x + blockIdx.x*blockDim.x;
//
//
//
// if (i < nbodies)
// {
// planet<T> &b = bodies[i];
// e[threadID] = 0.5 * b.mass * (b.vx * b.vx + b.vy * b.vy + b.vz * b.vz);
// }
//
//
// for (unsigned int stride = blockDim.x / 2; stride > 0; stride >>= 1)
// {
// if (threadID < stride)
// {
// e[threadID] += e[threadID + stride];
// }
//
// }
// if (threadID == 0)
// {
// addReduc[blockIdx.x] = e[0];
// }
//
//
// e[threadID] = 0;
//
// if (i < nbodies)
// {
// for (int iter = i + 1; iter < nbodies; iter++){
// planet<T> &b = bodies[i];
// planet<T> &b2 = bodies[iter];
// T dx = b.x - b2.x;
// T dy = b.y - b2.y;
// T dz = b.z - b2.z;
// T distance = sqrt(dx * dx + dy * dy + dz * dz);
// T var = ((b.mass * b2.mass) / distance);
// e[threadID] += var;
// }
// }
//
// __syncthreads();
//
// for (unsigned int stride = blockDim.x / 2; stride > 0; stride >>= 1)
// {
// if (threadID < stride)
// {
// e[threadID] += e[threadID + stride];
// }
// __syncthreads();
// }
//
// if (threadID == 0)
// {
// subReduc[blockIdx.x] = e[0];
// }
//}
template <typename T>
__global__ void offset_momentum_GPU(planet<T> *bodies, T *outData, int nbodies, int d_gridSize, T solarMass)
{
T px = 0.0, py = 0.0, pz = 0.0;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int doCount = 0;
reduction_offset_mom(nbodies, bodies, outData, 0);
if (doCount < 3)
{
if (i > 0 && i < d_gridSize)
{
outData[0] += outData[i];
}
if (doCount == 0)
px = outData[0];
else if (doCount == 1)
py = outData[0];
else if (doCount == 2)
pz = outData[0];
doCount++;
}
bodies[0].vx = -px / solarMass;
bodies[0].vy = -py / solarMass;
bodies[0].vz = -pz / solarMass;
}
template <typename T>
__device__ void reduction_offset_mom(int nbodies, planet<T> *bodies, T *outData, int step)
{
extern __shared__ T sharedData[];
// each thread loads one element from global to shared mem
unsigned int threadID = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
if (step == 0)
{
if (i < nbodies)
sharedData[threadID] = bodies[i].vx * bodies[i].mass;
}
else if (step == 1)
{
if (i < nbodies)
sharedData[threadID] = bodies[i].vy * bodies[i].mass;
}
else if (step == 2)
{
if (i < nbodies)
sharedData[threadID] = bodies[i].vz * bodies[i].mass;
}
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (threadID < s) {
sharedData[threadID] += sharedData[threadID + s];
}
}
// write result for this block to global mem
if (threadID == 0)
outData[blockIdx.x] = sharedData[0];
}
template <typename T>
void advance(int nbodies, planet<T> *bodies)
{
int i, j;
for (i = 0; i < nbodies; ++i) {
planet<T> &b = bodies[i];
for (j = i + 1; j < nbodies; j++) {
planet<T> &b2 = bodies[j];
T dx = b.x - b2.x;
T dy = b.y - b2.y;
T dz = b.z - b2.z;
T inv_distance = 1.0 / sqrt(dx * dx + dy * dy + dz * dz);
T mag = inv_distance * inv_distance * inv_distance;
b.vx -= dx * b2.mass * mag;
b.vy -= dy * b2.mass * mag;
b.vz -= dz * b2.mass * mag;
b2.vx += dx * b.mass * mag;
b2.vy += dy * b.mass * mag;
b2.vz += dz * b.mass * mag;
}
}
for (i = 0; i < nbodies; ++i) {
planet<T> &b = bodies[i];
b.x += b.vx;
b.y += b.vy;
b.z += b.vz;
}
}
template <typename T>
T energy(int nbodies, planet<T> *bodies)
{
T e = 0.0;
for (int i = 0; i < nbodies; ++i) {
planet<T> &b = bodies[i];
e += 0.5 * b.mass * (b.vx * b.vx + b.vy * b.vy + b.vz * b.vz);
for (int j = i + 1; j < nbodies; j++) {
planet<T> &b2 = bodies[j];
T dx = b.x - b2.x;
T dy = b.y - b2.y;
T dz = b.z - b2.z;
T distance = sqrt(dx * dx + dy * dy + dz * dz);
e -= (b.mass * b2.mass) / distance;
}
}
return e;
}
template <typename T>
void offset_momentum(int nbodies, planet<T> *bodies)
{
T px = 0.0, py = 0.0, pz = 0.0;
for (int i = 0; i < nbodies; ++i) {
px += bodies[i].vx * bodies[i].mass;
py += bodies[i].vy * bodies[i].mass;
pz += bodies[i].vz * bodies[i].mass;
}
bodies[0].vx = -px / solar_mass;
bodies[0].vy = -py / solar_mass;
bodies[0].vz = -pz / solar_mass;
}
/*
* Rescale certain properties of bodies. That allows doing
* consequential advance()'s as if dt were equal to 1.0.
*
* When all advances done, rescale bodies back to obtain correct energy.
*/
template <typename T>
void scale_bodies(int nbodies, planet<T> *bodies, T scale)
{
for (int i = 0; i < nbodies; ++i) {
bodies[i].mass *= scale*scale;
bodies[i].vx *= scale;
bodies[i].vy *= scale;
bodies[i].vz *= scale;
}
}
template <typename T>
void init_random_bodies(int nbodies, planet<T> *bodies)
{
for (int i = 0; i < nbodies; ++i) {
bodies[i].x = (T)rand() / RAND_MAX;
bodies[i].y = (T)rand() / RAND_MAX;
bodies[i].z = (T)rand() / RAND_MAX;
bodies[i].vx = (T)rand() / RAND_MAX;
bodies[i].vy = (T)rand() / RAND_MAX;
bodies[i].vz = (T)rand() / RAND_MAX;
bodies[i].mass = (T)rand() / RAND_MAX;
}
}
int isPowerOfTwo(unsigned int x)
{
return ((x != 0) && !(x & (x - 1)));
}
template <typename T>
void callOffSet(int nbodies, planet<T> *Gbodies)
{
T *d_Array;
cudaMalloc(&d_Array, gridSize*sizeof(T));
offset_momentum_GPU << <gridSize, blockSize, nbodies * sizeof(T) >> >(Gbodies, d_Array, nbodies, gridSize, solar_mass); //need to fix outData
cudaFree(d_Array);
}
//template <typename T>
//T callEnergy(int nbodies, planet<T> *Gbodies)
//{
//
// T *h_addArray = new T[gridSize];
// T *h_subArray = new T[gridSize];
//
// T *d_addArray; cudaMalloc((void**)&d_addArray, gridSize * sizeof(T));
// T *d_subArray; cudaMalloc((void**)&d_subArray, gridSize * sizeof(T));
//
// energy_GPU << <gridSize, blockSize, nbodies * sizeof(T) >> >(nbodies, d_addArray, d_subArray, Gbodies);
// cudaMemcpy(h_addArray, d_addArray, gridSize * sizeof(T), cudaMemcpyDeviceToHost);
// cudaMemcpy(h_subArray, d_subArray, gridSize * sizeof(T), cudaMemcpyDeviceToHost);
//
// for (int i = 1; i < gridSize; i++){
// h_addArray[0] += h_addArray[i];
// h_subArray[0] += h_subArray[i];
// }
//
// T e = h_addArray[0] - h_subArray[0];
//
// return e;
// }
void writeToFile(int numIter, int numBodies, double timeToMomentum, /*float energy1,*/ double timeToScale, double timeToAdvance, double timeToScale2, /*float energy2,*/ double total)
{
file << numIter << "," << numBodies << "," << timeToScale << ',' << timeToScale2 << "," << timeToAdvance << "," << timeToMomentum << "," <</* energy1 << ',' << energy2 << ',' <<*/ total << "\n";
}
template <typename T>
void gpuLoops(int niters, int nbodies)
{
type e1, e2;
auto t1 = std::chrono::steady_clock::now();
auto t2 = std::chrono::steady_clock::now();
auto Tadv = std::chrono::steady_clock::now();
auto Tadv2 = std::chrono::steady_clock::now();
/*auto t1 = std::chrono::steady_clock::now();
auto t2 = std::chrono::steady_clock::now();*/
auto momentumStart = std::chrono::steady_clock::now();
auto momentumEnd = std::chrono::steady_clock::now();
//auto energyStart1 = std::chrono::steady_clock::now();
//auto energyEnd1 = std::chrono::steady_clock::now();
auto scaleStart1 = std::chrono::steady_clock::now();
auto scaleEnd1 = std::chrono::steady_clock::now();
auto advStart = std::chrono::steady_clock::now();
auto advEnd = std::chrono::steady_clock::now();
auto scaleStart2 = std::chrono::steady_clock::now();
auto scaleEnd2 = std::chrono::steady_clock::now();
planet<type> *bodies;
if (nbodies == 5) {
bodies = golden_bodies; // Check accuracy with 1000 solar system iterations
}
else {
bodies = new planet<type>[nbodies];
init_random_bodies(nbodies, bodies);
}
planet<type> *Gbodies;
cudaMalloc(&Gbodies, nbodies*sizeof(planet<type>));
cudaMemcpy(Gbodies, bodies, nbodies*sizeof(planet<type>), cudaMemcpyHostToDevice);
int maxThreads;
cudaDeviceGetAttribute(&maxThreads, cudaDevAttrMaxThreadsPerBlock, 0);
if (nbodies < maxThreads)
blockSize = nbodies;
else
blockSize = maxThreads;
gridSize = (nbodies + blockSize) / blockSize;
t1 = std::chrono::steady_clock::now();
momentumStart = std::chrono::steady_clock::now();
callOffSet(nbodies, Gbodies);
momentumEnd = std::chrono::steady_clock::now();
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess)
{
std::cout << "Error in position kernal: " << cudaGetErrorString(error) << std::endl;
}
cudaThreadSynchronize();
cudaMemcpy(bodies, Gbodies, nbodies*sizeof(planet<type>), cudaMemcpyDeviceToHost);
e1 = energy(nbodies, bodies);
cudaMemcpy(Gbodies, bodies, nbodies*sizeof(planet<type>), cudaMemcpyHostToDevice);
//Scaling initial
scaleStart1 = std::chrono::steady_clock::now();
scale_bodies_GPU << <gridSize, blockSize >> >(nbodies, Gbodies, DT);
scaleEnd1 = std::chrono::steady_clock::now();
//Tadv = std::chrono::steady_clock::now();
advStart = std::chrono::steady_clock::now();
for (auto i = 0; i < niters; ++i)
{
//Calling advanced
adv_Update_GPU << <gridSize, blockSize >> >(nbodies, Gbodies);
cudaDeviceSynchronize();
// adv_Position_Update << <gridSize, blockSize >> >(nbodies, Gbodies);
}
advEnd = std::chrono::steady_clock::now();
//Scaling again
scaleStart2 = std::chrono::steady_clock::now();
scale_bodies_GPU << <gridSize, blockSize >> >(nbodies, Gbodies, RECIP_DT);
scaleEnd2 = std::chrono::steady_clock::now();
cudaMemcpy(bodies, Gbodies, nbodies*sizeof(planet<type>), cudaMemcpyDeviceToHost);
e2 = energy(nbodies, bodies);
t2 = std::chrono::steady_clock::now();
auto momDiff = momentumEnd - momentumStart;
auto TimeMom = std::chrono::duration<double>(momDiff).count();
//auto energyDiff = Tenergy2 - Tenergy;
//auto TimeEnergy = std::chrono::duration<double>(energyDiff).count();
auto scaleDiff = scaleEnd1 - scaleStart1;
auto TimeSc = std::chrono::duration<double>(scaleDiff).count();
auto advDiff = advEnd - advStart;
auto TimeAdv = std::chrono::duration<double>(advDiff).count();
auto scale2Diff = scaleEnd2 - scaleStart2;
auto TimeSc2 = std::chrono::duration<double>(scale2Diff).count();
//auto energy2Diff = Tenergy2 - Tenergy;
//auto TimeEnergy2 = std::chrono::duration<double>(energy2Diff).count();
auto diff = t2 - t1;
auto TimeTotal = std::chrono::duration<double>(diff).count();
writeToFile(niters, nbodies, TimeMom, /*energy1T*/ TimeSc, TimeAdv, TimeSc, /*energy2T*/ TimeTotal);
std::cout << "part done \n";
}
int main(int argc, char ** argv)
{
int niters = 1000, nbodies = 900;
if (argc > 1) { niters = atoi(argv[1]); }
if (argc > 2) { nbodies = atoi(argv[2]); }
std::cout << "niters=" << niters << " nbodies=" << nbodies << '\n';
outData = new type[gridSize];
//if (!GPUTEST)
//{
// t1 = std::chrono::steady_clock::now();
// offset_momentum(nbodies, bodies);
// e1 = energy(nbodies, bodies);
// scale_bodies(nbodies, bodies, DT);
// for (int i = 1; i <= niters; ++i) {
// advance(nbodies, bodies);
// }
// scale_bodies(nbodies, bodies, RECIP_DT);
// e2 = energy(nbodies, bodies);
// t2 = std::chrono::steady_clock::now();
//}
file.open("Test.csv");
file << "Iterations" << ',' << "Body Count" << ',' << "Time for Scale" << ',' << "Time for Scale 2" << ',' << "Time for Advance" << ',' << "Time for Momentum" << ',' <</* "Energy Before" << ',' << "Energy After" << ',' <<*/ "Total" << '\n';
for (nbodies = 100; nbodies <= 1000; nbodies += 100)
{
for (niters = 100; niters <= 1000; niters += 100)
{
gpuLoops<type>(niters, nbodies);
}
}
file.close();
//Free up the memory
/* cudaFree(Gbodies);
}*/
/*auto diff = t2 - t1;
auto diff2 = Tadv2 - Tadv;
std::cout << std::setprecision(9);
std::cout << e1 << '\n' << e2 << '\n';
std::cout << std::chrono::duration<double>(diff).count() << " seconds.\n";
std::cout <<"adv: " << std::chrono::duration<double>(diff2).count() << " seconds.\n";
std::cout << "GridSize: " << gridSize << std::endl;*/
std::cout << "DONE \n";
std::cin.get();
delete[]outData;
//if (argc != 1) { delete[] bodies; }
return 0;
}
|
lulesh_simple.hip | // !!! This is a file automatically generated by hipify!!!
/*/*{{{*/
Copyright (c) 2010.
Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory.
LLNL-CODE-461231
All rights reserved.
This file is part of LULESH, Version 1.0.
Please also read this link -- http://www.opensource.org/licenses/index.php
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC,
THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Additional BSD Notice
1. This notice is required to be provided under our contract with the U.S.
Department of Energy (DOE). This work was produced at Lawrence Livermore
National Laboratory under Contract No. DE-AC52-07NA27344 with the DOE.
2. Neither the United States Government nor Lawrence Livermore National
Security, LLC nor any of their employees, makes any warranty, express
or implied, or assumes any liability or responsibility for the accuracy,
completeness, or usefulness of any information, apparatus, product, or
process disclosed, or represents that its use would not infringe
privately-owned rights.
3. Also, reference herein to any specific commercial products, process, or
services by trade name, trademark, manufacturer or otherwise does not
necessarily constitute or imply its endorsement, recommendation, or
favoring by the United States Government or Lawrence Livermore National
Security, LLC. The views and opinions of authors expressed herein do not
necessarily state or reflect those of the United States Government or
Lawrence Livermore National Security, LLC, and shall not be used for
advertising or product endorsement purposes.
*//*}}}*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <iomanip>
#include <sstream>
#include <util.h>
#include <sm_utils.inl>
#include <hip/hip_runtime.h>
#include <allocator.h>
#include "hip/hip_runtime_api.h"
#ifdef USE_MPI
#include <mpi.h>
#endif
#include <sys/time.h>
#include <unistd.h>
#include "lulesh.h"
/****************************************************/
/* Allow flexibility for arithmetic representations */
/****************************************************/
__device__ inline real4 SQRT(real4 arg) { return sqrtf(arg) ; }
__device__ inline real8 SQRT(real8 arg) { return sqrt(arg) ; }
__device__ inline real4 CBRT(real4 arg) { return cbrtf(arg) ; }
__device__ inline real8 CBRT(real8 arg) { return cbrt(arg) ; }
__device__ __host__ inline real4 FABS(real4 arg) { return fabsf(arg) ; }
__device__ __host__ inline real8 FABS(real8 arg) { return fabs(arg) ; }
__device__ inline real4 FMAX(real4 arg1,real4 arg2) { return fmaxf(arg1,arg2) ; }
__device__ inline real8 FMAX(real8 arg1,real8 arg2) { return fmax(arg1,arg2) ; }
//EJ
int global_block_size = 64 ;
//EJ end
#define MAX(a, b) ( ((a) > (b)) ? (a) : (b))
/* Stuff needed for boundary conditions */
/* 2 BCs on each of 6 hexahedral faces (12 bits) */
#define XI_M 0x00007
#define XI_M_SYMM 0x00001
#define XI_M_FREE 0x00002
#define XI_M_COMM 0x00004
#define XI_P 0x00038
#define XI_P_SYMM 0x00008
#define XI_P_FREE 0x00010
#define XI_P_COMM 0x00020
#define ETA_M 0x001c0
#define ETA_M_SYMM 0x00040
#define ETA_M_FREE 0x00080
#define ETA_M_COMM 0x00100
#define ETA_P 0x00e00
#define ETA_P_SYMM 0x00200
#define ETA_P_FREE 0x00400
#define ETA_P_COMM 0x00800
#define ZETA_M 0x07000
#define ZETA_M_SYMM 0x01000
#define ZETA_M_FREE 0x02000
#define ZETA_M_COMM 0x04000
#define ZETA_P 0x38000
#define ZETA_P_SYMM 0x08000
#define ZETA_P_FREE 0x10000
#define ZETA_P_COMM 0x20000
#define VOLUDER(a0,a1,a2,a3,a4,a5,b0,b1,b2,b3,b4,b5,dvdc) \
{ \
const Real_t twelfth = Real_t(1.0) / Real_t(12.0) ; \
\
dvdc= \
((a1) + (a2)) * ((b0) + (b1)) - ((a0) + (a1)) * ((b1) + (b2)) + \
((a0) + (a4)) * ((b3) + (b4)) - ((a3) + (a4)) * ((b0) + (b4)) - \
((a2) + (a5)) * ((b3) + (b5)) + ((a3) + (a5)) * ((b2) + (b5)); \
dvdc *= twelfth; \
}
__device__
static
__forceinline__
void SumOverNodesShfl(Real_t& val) {
val += utils::shfl_xor( val, 4, 8);
val += utils::shfl_xor( val, 2, 8);
val += utils::shfl_xor( val, 1, 8);
}
__host__ __device__
static
__forceinline__
Real_t CalcElemVolume( const Real_t x0, const Real_t x1,
const Real_t x2, const Real_t x3,
const Real_t x4, const Real_t x5,
const Real_t x6, const Real_t x7,
const Real_t y0, const Real_t y1,
const Real_t y2, const Real_t y3,
const Real_t y4, const Real_t y5,
const Real_t y6, const Real_t y7,
const Real_t z0, const Real_t z1,
const Real_t z2, const Real_t z3,
const Real_t z4, const Real_t z5,
const Real_t z6, const Real_t z7 )
{/*{{{*/
Real_t twelveth = Real_t(1.0)/Real_t(12.0);
Real_t dx61 = x6 - x1;
Real_t dy61 = y6 - y1;
Real_t dz61 = z6 - z1;
Real_t dx70 = x7 - x0;
Real_t dy70 = y7 - y0;
Real_t dz70 = z7 - z0;
Real_t dx63 = x6 - x3;
Real_t dy63 = y6 - y3;
Real_t dz63 = z6 - z3;
Real_t dx20 = x2 - x0;
Real_t dy20 = y2 - y0;
Real_t dz20 = z2 - z0;
Real_t dx50 = x5 - x0;
Real_t dy50 = y5 - y0;
Real_t dz50 = z5 - z0;
Real_t dx64 = x6 - x4;
Real_t dy64 = y6 - y4;
Real_t dz64 = z6 - z4;
Real_t dx31 = x3 - x1;
Real_t dy31 = y3 - y1;
Real_t dz31 = z3 - z1;
Real_t dx72 = x7 - x2;
Real_t dy72 = y7 - y2;
Real_t dz72 = z7 - z2;
Real_t dx43 = x4 - x3;
Real_t dy43 = y4 - y3;
Real_t dz43 = z4 - z3;
Real_t dx57 = x5 - x7;
Real_t dy57 = y5 - y7;
Real_t dz57 = z5 - z7;
Real_t dx14 = x1 - x4;
Real_t dy14 = y1 - y4;
Real_t dz14 = z1 - z4;
Real_t dx25 = x2 - x5;
Real_t dy25 = y2 - y5;
Real_t dz25 = z2 - z5;
#define TRIPLE_PRODUCT(x1, y1, z1, x2, y2, z2, x3, y3, z3) \
((x1)*((y2)*(z3) - (z2)*(y3)) + (x2)*((z1)*(y3) - (y1)*(z3)) + (x3)*((y1)*(z2) - (z1)*(y2)))
// 11 + 3*14
Real_t volume =
TRIPLE_PRODUCT(dx31 + dx72, dx63, dx20,
dy31 + dy72, dy63, dy20,
dz31 + dz72, dz63, dz20) +
TRIPLE_PRODUCT(dx43 + dx57, dx64, dx70,
dy43 + dy57, dy64, dy70,
dz43 + dz57, dz64, dz70) +
TRIPLE_PRODUCT(dx14 + dx25, dx61, dx50,
dy14 + dy25, dy61, dy50,
dz14 + dz25, dz61, dz50);
#undef TRIPLE_PRODUCT
volume *= twelveth;
return volume ;
}/*}}}*/
__host__ __device__
static
__forceinline__
Real_t CalcElemVolume( const Real_t x[8], const Real_t y[8], const Real_t z[8] )
{/*{{{*/
return CalcElemVolume( x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7],
y[0], y[1], y[2], y[3], y[4], y[5], y[6], y[7],
z[0], z[1], z[2], z[3], z[4], z[5], z[6], z[7]);
}/*}}}*/
void cuda_init(int rank)
{/*{{{*/
Int_t deviceCount, dev;
hipDeviceProp_t cuda_deviceProp;
cudaSafeCall( hipGetDeviceCount(&deviceCount) );
if (deviceCount == 0) {
fprintf(stderr, "cuda_init(): no devices supporting CUDA.\n");
exit(1);
}
dev = rank % deviceCount;
if ((dev < 0) || (dev > deviceCount-1)) {
fprintf(stderr, "cuda_init(): requested device (%d) out of range [%d,%d]\n",
dev, 0, deviceCount-1);
exit(1);
}
cudaSafeCall( hipSetDevice(dev) );
struct hipDeviceProp_t props;
hipGetDeviceProperties(&props, dev);
char hostname[256];
gethostname(hostname, sizeof(hostname));
printf("Host %s using GPU %i: %s\n", hostname, dev, props.name);
cudaSafeCall( hipGetDeviceProperties(&cuda_deviceProp, dev) );
if (cuda_deviceProp.major < 3) {
fprintf(stderr, "cuda_init(): This implementation of Lulesh requires device SM 3.0+.\n", dev);
exit(1);
}
#if CUDART_VERSION < 5000
fprintf(stderr,"cuda_init(): This implementation of Lulesh uses texture objects, which is requires Cuda 5.0+.\n");
exit(1);
#endif
}/*}}}*/
void AllocateNodalPersistent(Domain* domain, size_t domNodes)
{/*{{{*/
domain->x.resize(domNodes) ; /* coordinates */
domain->y.resize(domNodes) ;
domain->z.resize(domNodes) ;
domain->xd.resize(domNodes) ; /* velocities */
domain->yd.resize(domNodes) ;
domain->zd.resize(domNodes) ;
domain->xdd.resize(domNodes) ; /* accelerations */
domain->ydd.resize(domNodes) ;
domain->zdd.resize(domNodes) ;
domain->fx.resize(domNodes) ; /* forces */
domain->fy.resize(domNodes) ;
domain->fz.resize(domNodes) ;
domain->nodalMass.resize(domNodes) ; /* mass */
}/*}}}*/
void AllocateElemPersistent(Domain* domain, size_t domElems, size_t padded_domElems)
{/*{{{*/
domain->matElemlist.resize(domElems) ; /* material indexset */
domain->nodelist.resize(8*padded_domElems) ; /* elemToNode connectivity */
domain->lxim.resize(domElems) ; /* elem connectivity through face */
domain->lxip.resize(domElems) ;
domain->letam.resize(domElems) ;
domain->letap.resize(domElems) ;
domain->lzetam.resize(domElems) ;
domain->lzetap.resize(domElems) ;
domain->elemBC.resize(domElems) ; /* elem face symm/free-surf flag */
domain->e.resize(domElems) ; /* energy */
domain->p.resize(domElems) ; /* pressure */
domain->q.resize(domElems) ; /* q */
domain->ql.resize(domElems) ; /* linear term for q */
domain->qq.resize(domElems) ; /* quadratic term for q */
domain->v.resize(domElems) ; /* relative volume */
domain->volo.resize(domElems) ; /* reference volume */
domain->delv.resize(domElems) ; /* m_vnew - m_v */
domain->vdov.resize(domElems) ; /* volume derivative over volume */
domain->arealg.resize(domElems) ; /* elem characteristic length */
domain->ss.resize(domElems) ; /* "sound speed" */
domain->elemMass.resize(domElems) ; /* mass */
}/*}}}*/
void AllocateSymmX(Domain* domain, size_t size)
{/*{{{*/
domain->symmX.resize(size) ;
}/*}}}*/
void AllocateSymmY(Domain* domain, size_t size)
{/*{{{*/
domain->symmY.resize(size) ;
}/*}}}*/
void AllocateSymmZ(Domain* domain, size_t size)
{/*{{{*/
domain->symmZ.resize(size) ;
}/*}}}*/
void InitializeFields(Domain* domain)
{/*{{{*/
/* Basic Field Initialization */
thrust::fill(domain->ss.begin(),domain->ss.end(),0.);
thrust::fill(domain->e.begin(),domain->e.end(),0.);
thrust::fill(domain->p.begin(),domain->p.end(),0.);
thrust::fill(domain->q.begin(),domain->q.end(),0.);
thrust::fill(domain->v.begin(),domain->v.end(),1.);
thrust::fill(domain->xd.begin(),domain->xd.end(),0.);
thrust::fill(domain->yd.begin(),domain->yd.end(),0.);
thrust::fill(domain->zd.begin(),domain->zd.end(),0.);
thrust::fill(domain->xdd.begin(),domain->xdd.end(),0.);
thrust::fill(domain->ydd.begin(),domain->ydd.end(),0.);
thrust::fill(domain->zdd.begin(),domain->zdd.end(),0.);
thrust::fill(domain->nodalMass.begin(),domain->nodalMass.end(),0.);
}/*}}}*/
////////////////////////////////////////////////////////////////////////////////
void SetupConnectivityBC(Domain *domain, int edgeElems)
{/*{{{*/
int domElems = domain->numElem;
Vector_h<Index_t> lxim_h(domElems);
Vector_h<Index_t> lxip_h(domElems);
Vector_h<Index_t> letam_h(domElems);
Vector_h<Index_t> letap_h(domElems);
Vector_h<Index_t> lzetam_h(domElems);
Vector_h<Index_t> lzetap_h(domElems);
/* set up elemement connectivity information */
lxim_h[0] = 0 ;
for (Index_t i=1; i<domElems; ++i) {
lxim_h[i] = i-1 ;
lxip_h[i-1] = i ;
}
lxip_h[domElems-1] = domElems-1 ;
for (Index_t i=0; i<edgeElems; ++i) {
letam_h[i] = i ;
letap_h[domElems-edgeElems+i] = domElems-edgeElems+i ;
}
for (Index_t i=edgeElems; i<domElems; ++i) {
letam_h[i] = i-edgeElems ;
letap_h[i-edgeElems] = i ;
}
for (Index_t i=0; i<edgeElems*edgeElems; ++i) {
lzetam_h[i] = i ;
lzetap_h[domElems-edgeElems*edgeElems+i] = domElems-edgeElems*edgeElems+i ;
}
for (Index_t i=edgeElems*edgeElems; i<domElems; ++i) {
lzetam_h[i] = i - edgeElems*edgeElems ;
lzetap_h[i-edgeElems*edgeElems] = i ;
}
/* set up boundary condition information */
Vector_h<Index_t> elemBC_h(domElems);
for (Index_t i=0; i<domElems; ++i) {
elemBC_h[i] = 0 ; /* clear BCs by default */
}
Index_t ghostIdx[6] ; // offsets to ghost locations
for (Index_t i=0; i<6; ++i) {
ghostIdx[i] = INT_MIN ;
}
Int_t pidx = domElems ;
if (domain->m_planeMin != 0) {
ghostIdx[0] = pidx ;
pidx += domain->sizeX*domain->sizeY ;
}
if (domain->m_planeMax != 0) {
ghostIdx[1] = pidx ;
pidx += domain->sizeX*domain->sizeY ;
}
if (domain->m_rowMin != 0) {
ghostIdx[2] = pidx ;
pidx += domain->sizeX*domain->sizeZ ;
}
if (domain->m_rowMax != 0) {
ghostIdx[3] = pidx ;
pidx += domain->sizeX*domain->sizeZ ;
}
if (domain->m_colMin != 0) {
ghostIdx[4] = pidx ;
pidx += domain->sizeY*domain->sizeZ ;
}
if (domain->m_colMax != 0) {
ghostIdx[5] = pidx ;
}
/* symmetry plane or free surface BCs */
for (Index_t i=0; i<edgeElems; ++i) {
Index_t planeInc = i*edgeElems*edgeElems ;
Index_t rowInc = i*edgeElems ;
for (Index_t j=0; j<edgeElems; ++j) {
if (domain->m_planeLoc == 0) {
elemBC_h[rowInc+j] |= ZETA_M_SYMM ;
}
else {
elemBC_h[rowInc+j] |= ZETA_M_COMM ;
lzetam_h[rowInc+j] = ghostIdx[0] + rowInc + j ;
}
if (domain->m_planeLoc == domain->m_tp-1) {
elemBC_h[rowInc+j+domElems-edgeElems*edgeElems] |=
ZETA_P_FREE;
}
else {
elemBC_h[rowInc+j+domElems-edgeElems*edgeElems] |=
ZETA_P_COMM ;
lzetap_h[rowInc+j+domElems-edgeElems*edgeElems] =
ghostIdx[1] + rowInc + j ;
}
if (domain->m_rowLoc == 0) {
elemBC_h[planeInc+j] |= ETA_M_SYMM ;
}
else {
elemBC_h[planeInc+j] |= ETA_M_COMM ;
letam_h[planeInc+j] = ghostIdx[2] + rowInc + j ;
}
if (domain->m_rowLoc == domain->m_tp-1) {
elemBC_h[planeInc+j+edgeElems*edgeElems-edgeElems] |=
ETA_P_FREE ;
}
else {
elemBC_h[planeInc+j+edgeElems*edgeElems-edgeElems] |=
ETA_P_COMM ;
letap_h[planeInc+j+edgeElems*edgeElems-edgeElems] =
ghostIdx[3] + rowInc + j ;
}
if (domain->m_colLoc == 0) {
elemBC_h[planeInc+j*edgeElems] |= XI_M_SYMM ;
}
else {
elemBC_h[planeInc+j*edgeElems] |= XI_M_COMM ;
lxim_h[planeInc+j*edgeElems] = ghostIdx[4] + rowInc + j ;
}
if (domain->m_colLoc == domain->m_tp-1) {
elemBC_h[planeInc+j*edgeElems+edgeElems-1] |= XI_P_FREE ;
}
else {
elemBC_h[planeInc+j*edgeElems+edgeElems-1] |= XI_P_COMM ;
lxip_h[planeInc+j*edgeElems+edgeElems-1] =
ghostIdx[5] + rowInc + j ;
}
}
}
domain->elemBC = elemBC_h;
domain->lxim = lxim_h;
domain->lxip = lxip_h;
domain->letam = letam_h;
domain->letap = letap_h;
domain->lzetam = lzetam_h;
domain->lzetap = lzetap_h;
}/*}}}*/
Domain *NewDomain(char* argv[], Int_t numRanks, Index_t colLoc,
Index_t rowLoc, Index_t planeLoc,
Index_t nx, int tp, bool structured, Int_t nr, Int_t balance, Int_t cost)
{/*{{{*/
Domain *domain = new Domain ;
domain->max_streams = 32;
domain->streams.resize(domain->max_streams);
for (Int_t i=0;i<domain->max_streams;i++)
hipStreamCreate(&(domain->streams[i]));
hipEventCreateWithFlags(&domain->time_constraint_computed,hipEventDisableTiming);
Index_t domElems;
Index_t domNodes;
Index_t padded_domElems;
Vector_h<Index_t> nodelist_h;
Vector_h<Real_t> x_h;
Vector_h<Real_t> y_h;
Vector_h<Real_t> z_h;
if (structured)
{
domain->m_tp = tp ;
domain->m_numRanks = numRanks ;
domain->m_colLoc = colLoc ;
domain->m_rowLoc = rowLoc ;
domain->m_planeLoc = planeLoc ;
Index_t edgeElems = nx ;
Index_t edgeNodes = edgeElems+1 ;
domain->sizeX = edgeElems ;
domain->sizeY = edgeElems ;
domain->sizeZ = edgeElems ;
domain->numElem = domain->sizeX*domain->sizeY*domain->sizeZ ;
domain->padded_numElem = PAD(domain->numElem,32);
domain->numNode = (domain->sizeX+1)*(domain->sizeY+1)*(domain->sizeZ+1) ;
domain->padded_numNode = PAD(domain->numNode,32);
domElems = domain->numElem ;
domNodes = domain->numNode ;
padded_domElems = domain->padded_numElem ;
AllocateElemPersistent(domain,domElems,padded_domElems);
AllocateNodalPersistent(domain,domNodes);
domain->SetupCommBuffers(edgeNodes);
InitializeFields(domain);
domain->BuildMesh(nx, edgeNodes, edgeElems, domNodes, padded_domElems, x_h, y_h, z_h, nodelist_h);
domain->numSymmX = domain->numSymmY = domain->numSymmZ = 0;
if (domain->m_colLoc == 0)
domain->numSymmX = (edgeElems+1)*(edgeElems+1) ;
if (domain->m_rowLoc == 0)
domain->numSymmY = (edgeElems+1)*(edgeElems+1) ;
if (domain->m_planeLoc == 0)
domain->numSymmZ = (edgeElems+1)*(edgeElems+1) ;
AllocateSymmX(domain,edgeNodes*edgeNodes);
AllocateSymmY(domain,edgeNodes*edgeNodes);
AllocateSymmZ(domain,edgeNodes*edgeNodes);
/* set up symmetry nodesets */
Vector_h<Index_t> symmX_h(domain->symmX.size());
Vector_h<Index_t> symmY_h(domain->symmY.size());
Vector_h<Index_t> symmZ_h(domain->symmZ.size());
Int_t nidx = 0 ;
for (Index_t i=0; i<edgeNodes; ++i) {
Index_t planeInc = i*edgeNodes*edgeNodes ;
Index_t rowInc = i*edgeNodes ;
for (Index_t j=0; j<edgeNodes; ++j) {
if (domain->m_planeLoc == 0) {
symmZ_h[nidx] = rowInc + j ;
}
if (domain->m_rowLoc == 0) {
symmY_h[nidx] = planeInc + j ;
}
if (domain->m_colLoc == 0) {
symmX_h[nidx] = planeInc + j*edgeNodes ;
}
++nidx ;
}
}
if (domain->m_planeLoc == 0)
domain->symmZ = symmZ_h;
if (domain->m_rowLoc == 0)
domain->symmY = symmY_h;
if (domain->m_colLoc == 0)
domain->symmX = symmX_h;
SetupConnectivityBC(domain, edgeElems);
}
else
{
FILE *fp;
int ee, en;
if ((fp = fopen(argv[2], "r")) == 0) {
printf("could not open file %s\n", argv[2]) ;
exit( LFileError ) ;
}
bool fsuccess;
fsuccess = fscanf(fp, "%d %d", &ee, &en) ;
domain->numElem = Index_t(ee);
domain->padded_numElem = PAD(domain->numElem,32);
domain->numNode = Index_t(en);
domain->padded_numNode = PAD(domain->numNode,32);
domElems = domain->numElem ;
domNodes = domain->numNode ;
padded_domElems = domain->padded_numElem ;
AllocateElemPersistent(domain,domElems,padded_domElems);
AllocateNodalPersistent(domain,domNodes);
InitializeFields(domain);
/* initialize nodal coordinates */
x_h.resize(domNodes);
y_h.resize(domNodes);
z_h.resize(domNodes);
for (Index_t i=0; i<domNodes; ++i) {
double px, py, pz ;
fsuccess = fscanf(fp, "%lf %lf %lf", &px, &py, &pz) ;
x_h[i] = Real_t(px) ;
y_h[i] = Real_t(py) ;
z_h[i] = Real_t(pz) ;
}
domain->x = x_h;
domain->y = y_h;
domain->z = z_h;
/* embed hexehedral elements in nodal point lattice */
nodelist_h.resize(padded_domElems*8);
for (Index_t zidx=0; zidx<domElems; ++zidx) {
for (Index_t ni=0; ni<Index_t(8); ++ni) {
int n ;
fsuccess = fscanf(fp, "%d", &n) ;
nodelist_h[ni*padded_domElems+zidx] = Index_t(n);
}
}
domain->nodelist = nodelist_h;
/* set up face-based element neighbors */
Vector_h<Index_t> lxim_h(domElems);
Vector_h<Index_t> lxip_h(domElems);
Vector_h<Index_t> letam_h(domElems);
Vector_h<Index_t> letap_h(domElems);
Vector_h<Index_t> lzetam_h(domElems);
Vector_h<Index_t> lzetap_h(domElems);
for (Index_t i=0; i<domElems; ++i) {
int xi_m, xi_p, eta_m, eta_p, zeta_m, zeta_p ;
fsuccess = fscanf(fp, "%d %d %d %d %d %d",
&xi_m, &xi_p, &eta_m, &eta_p, &zeta_m, &zeta_p) ;
lxim_h[i] = Index_t(xi_m) ;
lxip_h[i] = Index_t(xi_p) ;
letam_h[i] = Index_t(eta_m) ;
letap_h[i] = Index_t(eta_p) ;
lzetam_h[i] = Index_t(zeta_m) ;
lzetap_h[i] = Index_t(zeta_p) ;
}
domain->lxim = lxim_h;
domain->lxip = lxip_h;
domain->letam = letam_h;
domain->letap = letap_h;
domain->lzetam = lzetam_h;
domain->lzetap = lzetap_h;
/* set up X symmetry nodeset */
fsuccess = fscanf(fp, "%d", &domain->numSymmX) ;
Vector_h<Index_t> symmX_h(domain->numSymmX);
for (Index_t i=0; i<domain->numSymmX; ++i) {
int n ;
fsuccess = fscanf(fp, "%d", &n) ;
symmX_h[i] = Index_t(n) ;
}
domain->symmX = symmX_h;
fsuccess = fscanf(fp, "%d", &domain->numSymmY) ;
Vector_h<Index_t> symmY_h(domain->numSymmY);
for (Index_t i=0; i<domain->numSymmY; ++i) {
int n ;
fsuccess = fscanf(fp, "%d", &n) ;
symmY_h[i] = Index_t(n) ;
}
domain->symmY = symmY_h;
fsuccess = fscanf(fp, "%d", &domain->numSymmZ) ;
Vector_h<Index_t> symmZ_h(domain->numSymmZ);
for (Index_t i=0; i<domain->numSymmZ; ++i) {
int n ;
fsuccess = fscanf(fp, "%d", &n) ;
symmZ_h[i] = Index_t(n) ;
}
domain->symmZ = symmZ_h;
/* set up free surface nodeset */
Index_t numFreeSurf;
fsuccess = fscanf(fp, "%d", &numFreeSurf) ;
Vector_h<Index_t> freeSurf_h(numFreeSurf);
for (Index_t i=0; i<numFreeSurf; ++i) {
int n ;
fsuccess = fscanf(fp, "%d", &n) ;
freeSurf_h[i] = Index_t(n) ;
}
printf("%c\n",fsuccess);//nothing
fclose(fp);
/* set up boundary condition information */
Vector_h<Index_t> elemBC_h(domElems);
Vector_h<Index_t> surfaceNode_h(domNodes);
for (Index_t i=0; i<domain->numElem; ++i) {
elemBC_h[i] = 0 ;
}
for (Index_t i=0; i<domain->numNode; ++i) {
surfaceNode_h[i] = 0 ;
}
for (Index_t i=0; i<domain->numSymmX; ++i) {
surfaceNode_h[symmX_h[i]] = 1 ;
}
for (Index_t i=0; i<domain->numSymmY; ++i) {
surfaceNode_h[symmY_h[i]] = 1 ;
}
for (Index_t i=0; i<domain->numSymmZ; ++i) {
surfaceNode_h[symmZ_h[i]] = 1 ;
}
for (Index_t zidx=0; zidx<domain->numElem; ++zidx) {
Int_t mask = 0 ;
for (Index_t ni=0; ni<8; ++ni) {
mask |= (surfaceNode_h[nodelist_h[ni*domain->padded_numElem+zidx]] << ni) ;
}
if ((mask & 0x0f) == 0x0f) elemBC_h[zidx] |= ZETA_M_SYMM ;
if ((mask & 0xf0) == 0xf0) elemBC_h[zidx] |= ZETA_P_SYMM ;
if ((mask & 0x33) == 0x33) elemBC_h[zidx] |= ETA_M_SYMM ;
if ((mask & 0xcc) == 0xcc) elemBC_h[zidx] |= ETA_P_SYMM ;
if ((mask & 0x99) == 0x99) elemBC_h[zidx] |= XI_M_SYMM ;
if ((mask & 0x66) == 0x66) elemBC_h[zidx] |= XI_P_SYMM ;
}
for (Index_t zidx=0; zidx<domain->numElem; ++zidx) {
if (elemBC_h[zidx] == (XI_M_SYMM | ETA_M_SYMM | ZETA_M_SYMM)) {
domain->octantCorner = zidx ;
break ;
}
}
for (Index_t i=0; i<domain->numNode; ++i) {
surfaceNode_h[i] = 0 ;
}
for (Index_t i=0; i<numFreeSurf; ++i) {
surfaceNode_h[freeSurf_h[i]] = 1 ;
}
for (Index_t zidx=0; zidx<domain->numElem; ++zidx) {
Int_t mask = 0 ;
for (Index_t ni=0; ni<8; ++ni) {
mask |= (surfaceNode_h[nodelist_h[ni*domain->padded_numElem+zidx]] << ni) ;
}
if ((mask & 0x0f) == 0x0f) elemBC_h[zidx] |= ZETA_M_SYMM ;
if ((mask & 0xf0) == 0xf0) elemBC_h[zidx] |= ZETA_P_SYMM ;
if ((mask & 0x33) == 0x33) elemBC_h[zidx] |= ETA_M_SYMM ;
if ((mask & 0xcc) == 0xcc) elemBC_h[zidx] |= ETA_P_SYMM ;
if ((mask & 0x99) == 0x99) elemBC_h[zidx] |= XI_M_SYMM ;
if ((mask & 0x66) == 0x66) elemBC_h[zidx] |= XI_P_SYMM ;
}
domain->elemBC = elemBC_h;
/* deposit energy */
domain->e[domain->octantCorner] = Real_t(3.948746e+7) ;
}
/* set up node-centered indexing of elements */
Vector_h<Index_t> nodeElemCount_h(domNodes);
for (Index_t i=0; i<domNodes; ++i) {
nodeElemCount_h[i] = 0 ;
}
for (Index_t i=0; i<domElems; ++i) {
for (Index_t j=0; j < 8; ++j) {
++(nodeElemCount_h[nodelist_h[j*padded_domElems+i]]);
}
}
Vector_h<Index_t> nodeElemStart_h(domNodes);
nodeElemStart_h[0] = 0;
for (Index_t i=1; i < domNodes; ++i) {
nodeElemStart_h[i] =
nodeElemStart_h[i-1] + nodeElemCount_h[i-1] ;
}
Vector_h<Index_t> nodeElemCornerList_h(nodeElemStart_h[domNodes-1] +
nodeElemCount_h[domNodes-1] );
for (Index_t i=0; i < domNodes; ++i) {
nodeElemCount_h[i] = 0;
}
for (Index_t j=0; j < 8; ++j) {
for (Index_t i=0; i < domElems; ++i) {
Index_t m = nodelist_h[padded_domElems*j+i];
Index_t k = padded_domElems*j + i ;
Index_t offset = nodeElemStart_h[m] +
nodeElemCount_h[m] ;
nodeElemCornerList_h[offset] = k;
++(nodeElemCount_h[m]) ;
}
}
Index_t clSize = nodeElemStart_h[domNodes-1] +
nodeElemCount_h[domNodes-1] ;
for (Index_t i=0; i < clSize; ++i) {
Index_t clv = nodeElemCornerList_h[i] ;
if ((clv < 0) || (clv > padded_domElems*8)) {
fprintf(stderr,
"AllocateNodeElemIndexes(): nodeElemCornerList entry out of range!\n");
exit(1);
}
}
domain->nodeElemStart = nodeElemStart_h;
domain->nodeElemCount = nodeElemCount_h;
domain->nodeElemCornerList = nodeElemCornerList_h;
/* Create a material IndexSet (entire domain same material for now) */
Vector_h<Index_t> matElemlist_h(domElems);
for (Index_t i=0; i<domElems; ++i) {
matElemlist_h[i] = i ;
}
domain->matElemlist = matElemlist_h;
hipHostMalloc(&domain->dtcourant_h,sizeof(Real_t),0);
hipHostMalloc(&domain->dthydro_h,sizeof(Real_t),0);
hipHostMalloc(&domain->bad_vol_h,sizeof(Index_t),0);
hipHostMalloc(&domain->bad_q_h,sizeof(Index_t),0);
*(domain->bad_vol_h)=-1;
*(domain->bad_q_h)=-1;
*(domain->dthydro_h)=1e20;
*(domain->dtcourant_h)=1e20;
/* initialize material parameters */
domain->time_h = Real_t(0.) ;
domain->dtfixed = Real_t(-1.0e-6) ;
domain->deltatimemultlb = Real_t(1.1) ;
domain->deltatimemultub = Real_t(1.2) ;
domain->stoptime = Real_t(1.0e-2) ;
domain->dtmax = Real_t(1.0e-2) ;
domain->cycle = 0 ;
domain->e_cut = Real_t(1.0e-7) ;
domain->p_cut = Real_t(1.0e-7) ;
domain->q_cut = Real_t(1.0e-7) ;
domain->u_cut = Real_t(1.0e-7) ;
domain->v_cut = Real_t(1.0e-10) ;
domain->hgcoef = Real_t(3.0) ;
domain->ss4o3 = Real_t(4.0)/Real_t(3.0) ;
domain->qstop = Real_t(1.0e+12) ;
domain->monoq_max_slope = Real_t(1.0) ;
domain->monoq_limiter_mult = Real_t(2.0) ;
domain->qlc_monoq = Real_t(0.5) ;
domain->qqc_monoq = Real_t(2.0)/Real_t(3.0) ;
domain->qqc = Real_t(2.0) ;
domain->pmin = Real_t(0.) ;
domain->emin = Real_t(-1.0e+15) ;
domain->dvovmax = Real_t(0.1) ;
domain->eosvmax = Real_t(1.0e+9) ;
domain->eosvmin = Real_t(1.0e-9) ;
domain->refdens = Real_t(1.0) ;
/* initialize field data */
Vector_h<Real_t> nodalMass_h(domNodes);
Vector_h<Real_t> volo_h(domElems);
Vector_h<Real_t> elemMass_h(domElems);
for (Index_t i=0; i<domElems; ++i) {
Real_t x_local[8], y_local[8], z_local[8] ;
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
{
Index_t gnode = nodelist_h[lnode*padded_domElems+i];
x_local[lnode] = x_h[gnode];
y_local[lnode] = y_h[gnode];
z_local[lnode] = z_h[gnode];
}
// volume calculations
Real_t volume = CalcElemVolume(x_local, y_local, z_local );
volo_h[i] = volume ;
elemMass_h[i] = volume ;
for (Index_t j=0; j<8; ++j) {
Index_t gnode = nodelist_h[j*padded_domElems+i];
nodalMass_h[gnode] += volume / Real_t(8.0) ;
}
}
domain->nodalMass = nodalMass_h;
domain->volo = volo_h;
domain->elemMass= elemMass_h;
/* deposit energy */
domain->octantCorner = 0;
// deposit initial energy
// An energy of 3.948746e+7 is correct for a problem with
// 45 zones along a side - we need to scale it
const Real_t ebase = 3.948746e+7;
Real_t scale = (nx*domain->m_tp)/45.0;
Real_t einit = ebase*scale*scale*scale;
//Real_t einit = ebase;
if (domain->m_rowLoc + domain->m_colLoc + domain->m_planeLoc == 0) {
// Dump into the first zone (which we know is in the corner)
// of the domain that sits at the origin
domain->e[0] = einit;
}
//set initial deltatime base on analytic CFL calculation
domain->deltatime_h = (.5*cbrt(domain->volo[0]))/sqrt(2*einit);
domain->cost = cost;
domain->regNumList.resize(domain->numElem) ; // material indexset
domain->regElemlist.resize(domain->numElem) ; // material indexset
domain->regCSR.resize(nr);
domain->regReps.resize(nr);
domain->regSorted.resize(nr);
// Setup region index sets. For now, these are constant sized
// throughout the run, but could be changed every cycle to
// simulate effects of ALE on the lagrange solver
domain->CreateRegionIndexSets(nr, balance);
return domain ;
}/*}}}*/
// simple function for int pow x^y, y >= 0
static Int_t POW(Int_t x, Int_t y)
{/*{{{*/
Int_t res = 1;
for (Int_t i = 0; i < y; i++)
res *= x;
return res;
}/*}}}*/
static inline
void TimeIncrement(Domain* domain)
{/*{{{*/
// To make sure dtcourant and dthydro have been updated on host
hipEventSynchronize(domain->time_constraint_computed);
Real_t targetdt = domain->stoptime - domain->time_h;
if ((domain->dtfixed <= Real_t(0.0)) && (domain->cycle != Int_t(0))) {
Real_t ratio ;
/* This will require a reduction in parallel */
Real_t gnewdt = Real_t(1.0e+20) ;
Real_t newdt;
if ( *(domain->dtcourant_h) < gnewdt) {
gnewdt = *(domain->dtcourant_h) / Real_t(2.0) ;
}
if ( *(domain->dthydro_h) < gnewdt) {
gnewdt = *(domain->dthydro_h) * Real_t(2.0) / Real_t(3.0) ;
}
#if USE_MPI
MPI_Allreduce(&gnewdt, &newdt, 1,
((sizeof(Real_t) == 4) ? MPI_FLOAT : MPI_DOUBLE),
MPI_MIN, MPI_COMM_WORLD) ;
#else
newdt = gnewdt;
#endif
Real_t olddt = domain->deltatime_h;
ratio = newdt / olddt ;
if (ratio >= Real_t(1.0)) {
if (ratio < domain->deltatimemultlb) {
newdt = olddt ;
}
else if (ratio > domain->deltatimemultub) {
newdt = olddt*domain->deltatimemultub ;
}
}
if (newdt > domain->dtmax) {
newdt = domain->dtmax ;
}
domain->deltatime_h = newdt ;
}
/* TRY TO PREVENT VERY SMALL SCALING ON THE NEXT CYCLE */
if ((targetdt > domain->deltatime_h) &&
(targetdt < (Real_t(4.0) * domain->deltatime_h / Real_t(3.0))) ) {
targetdt = Real_t(2.0) * domain->deltatime_h / Real_t(3.0) ;
}
if (targetdt < domain->deltatime_h) {
domain->deltatime_h = targetdt ;
}
domain->time_h += domain->deltatime_h ;
++domain->cycle ;
}/*}}}*/
__device__
static
__forceinline__
void CalcElemShapeFunctionDerivatives( const Real_t* const x,
const Real_t* const y,
const Real_t* const z,
Real_t b[][8],
Real_t* const volume )
{/*{{{*/
const Real_t x0 = x[0] ; const Real_t x1 = x[1] ;
const Real_t x2 = x[2] ; const Real_t x3 = x[3] ;
const Real_t x4 = x[4] ; const Real_t x5 = x[5] ;
const Real_t x6 = x[6] ; const Real_t x7 = x[7] ;
const Real_t y0 = y[0] ; const Real_t y1 = y[1] ;
const Real_t y2 = y[2] ; const Real_t y3 = y[3] ;
const Real_t y4 = y[4] ; const Real_t y5 = y[5] ;
const Real_t y6 = y[6] ; const Real_t y7 = y[7] ;
const Real_t z0 = z[0] ; const Real_t z1 = z[1] ;
const Real_t z2 = z[2] ; const Real_t z3 = z[3] ;
const Real_t z4 = z[4] ; const Real_t z5 = z[5] ;
const Real_t z6 = z[6] ; const Real_t z7 = z[7] ;
Real_t fjxxi, fjxet, fjxze;
Real_t fjyxi, fjyet, fjyze;
Real_t fjzxi, fjzet, fjzze;
Real_t cjxxi, cjxet, cjxze;
Real_t cjyxi, cjyet, cjyze;
Real_t cjzxi, cjzet, cjzze;
fjxxi = Real_t(.125) * ( (x6-x0) + (x5-x3) - (x7-x1) - (x4-x2) );
fjxet = Real_t(.125) * ( (x6-x0) - (x5-x3) + (x7-x1) - (x4-x2) );
fjxze = Real_t(.125) * ( (x6-x0) + (x5-x3) + (x7-x1) + (x4-x2) );
fjyxi = Real_t(.125) * ( (y6-y0) + (y5-y3) - (y7-y1) - (y4-y2) );
fjyet = Real_t(.125) * ( (y6-y0) - (y5-y3) + (y7-y1) - (y4-y2) );
fjyze = Real_t(.125) * ( (y6-y0) + (y5-y3) + (y7-y1) + (y4-y2) );
fjzxi = Real_t(.125) * ( (z6-z0) + (z5-z3) - (z7-z1) - (z4-z2) );
fjzet = Real_t(.125) * ( (z6-z0) - (z5-z3) + (z7-z1) - (z4-z2) );
fjzze = Real_t(.125) * ( (z6-z0) + (z5-z3) + (z7-z1) + (z4-z2) );
/* compute cofactors */
cjxxi = (fjyet * fjzze) - (fjzet * fjyze);
cjxet = - (fjyxi * fjzze) + (fjzxi * fjyze);
cjxze = (fjyxi * fjzet) - (fjzxi * fjyet);
cjyxi = - (fjxet * fjzze) + (fjzet * fjxze);
cjyet = (fjxxi * fjzze) - (fjzxi * fjxze);
cjyze = - (fjxxi * fjzet) + (fjzxi * fjxet);
cjzxi = (fjxet * fjyze) - (fjyet * fjxze);
cjzet = - (fjxxi * fjyze) + (fjyxi * fjxze);
cjzze = (fjxxi * fjyet) - (fjyxi * fjxet);
/* calculate partials :
this need only be done for l = 0,1,2,3 since , by symmetry ,
(6,7,4,5) = - (0,1,2,3) .
*/
b[0][0] = - cjxxi - cjxet - cjxze;
b[0][1] = cjxxi - cjxet - cjxze;
b[0][2] = cjxxi + cjxet - cjxze;
b[0][3] = - cjxxi + cjxet - cjxze;
b[0][4] = -b[0][2];
b[0][5] = -b[0][3];
b[0][6] = -b[0][0];
b[0][7] = -b[0][1];
/*
b[0][4] = - cjxxi - cjxet + cjxze;
b[0][5] = + cjxxi - cjxet + cjxze;
b[0][6] = + cjxxi + cjxet + cjxze;
b[0][7] = - cjxxi + cjxet + cjxze;
*/
b[1][0] = - cjyxi - cjyet - cjyze;
b[1][1] = cjyxi - cjyet - cjyze;
b[1][2] = cjyxi + cjyet - cjyze;
b[1][3] = - cjyxi + cjyet - cjyze;
b[1][4] = -b[1][2];
b[1][5] = -b[1][3];
b[1][6] = -b[1][0];
b[1][7] = -b[1][1];
b[2][0] = - cjzxi - cjzet - cjzze;
b[2][1] = cjzxi - cjzet - cjzze;
b[2][2] = cjzxi + cjzet - cjzze;
b[2][3] = - cjzxi + cjzet - cjzze;
b[2][4] = -b[2][2];
b[2][5] = -b[2][3];
b[2][6] = -b[2][0];
b[2][7] = -b[2][1];
/* calculate jacobian determinant (volume) */
*volume = Real_t(8.) * ( fjxet * cjxet + fjyet * cjyet + fjzet * cjzet);
}/*}}}*/
static
__device__
__forceinline__
void SumElemFaceNormal(Real_t *normalX0, Real_t *normalY0, Real_t *normalZ0,
Real_t *normalX1, Real_t *normalY1, Real_t *normalZ1,
Real_t *normalX2, Real_t *normalY2, Real_t *normalZ2,
Real_t *normalX3, Real_t *normalY3, Real_t *normalZ3,
const Real_t x0, const Real_t y0, const Real_t z0,
const Real_t x1, const Real_t y1, const Real_t z1,
const Real_t x2, const Real_t y2, const Real_t z2,
const Real_t x3, const Real_t y3, const Real_t z3)
{/*{{{*/
Real_t bisectX0 = Real_t(0.5) * (x3 + x2 - x1 - x0);
Real_t bisectY0 = Real_t(0.5) * (y3 + y2 - y1 - y0);
Real_t bisectZ0 = Real_t(0.5) * (z3 + z2 - z1 - z0);
Real_t bisectX1 = Real_t(0.5) * (x2 + x1 - x3 - x0);
Real_t bisectY1 = Real_t(0.5) * (y2 + y1 - y3 - y0);
Real_t bisectZ1 = Real_t(0.5) * (z2 + z1 - z3 - z0);
Real_t areaX = Real_t(0.25) * (bisectY0 * bisectZ1 - bisectZ0 * bisectY1);
Real_t areaY = Real_t(0.25) * (bisectZ0 * bisectX1 - bisectX0 * bisectZ1);
Real_t areaZ = Real_t(0.25) * (bisectX0 * bisectY1 - bisectY0 * bisectX1);
*normalX0 += areaX;
*normalX1 += areaX;
*normalX2 += areaX;
*normalX3 += areaX;
*normalY0 += areaY;
*normalY1 += areaY;
*normalY2 += areaY;
*normalY3 += areaY;
*normalZ0 += areaZ;
*normalZ1 += areaZ;
*normalZ2 += areaZ;
*normalZ3 += areaZ;
}/*}}}*/
static
__device__
__forceinline__
void SumElemFaceNormal_warp_per_4cell(
Real_t *normalX0, Real_t *normalY0, Real_t *normalZ0,
const Real_t x, const Real_t y, const Real_t z,
int node,
int n0, int n1, int n2, int n3)
{/*{{{*/
Real_t coef0 = Real_t(0.5);
Real_t coef1 = Real_t(0.5);
if (node == n0 || node == n1 || node==n2 || node==n3)
{
if (node == n0 || node == n1)
coef0 = -coef0;
if (node == n0 || node == n3)
coef1 = -coef1;
}
else
{
coef0 = Real_t(0.);
coef1 = Real_t(0.);
}
Real_t bisectX0 = coef0*x;
Real_t bisectY0 = coef0*y;
Real_t bisectZ0 = coef0*z;
Real_t bisectX1 = coef1*x;
Real_t bisectY1 = coef1*y;
Real_t bisectZ1 = coef1*z;
SumOverNodesShfl(bisectX0);
SumOverNodesShfl(bisectY0);
SumOverNodesShfl(bisectZ0);
SumOverNodesShfl(bisectX1);
SumOverNodesShfl(bisectY1);
SumOverNodesShfl(bisectZ1);
Real_t areaX = Real_t(0.25) * (bisectY0 * bisectZ1 - bisectZ0 * bisectY1);
Real_t areaY = Real_t(0.25) * (bisectZ0 * bisectX1 - bisectX0 * bisectZ1);
Real_t areaZ = Real_t(0.25) * (bisectX0 * bisectY1 - bisectY0 * bisectX1);
if (node == n0 || node == n1 || node==n2 || node==n3)
{
*normalX0 += areaX;
*normalY0 += areaY;
*normalZ0 += areaZ;
}
}/*}}}*/
__device__
static inline
void CalcElemNodeNormals(Real_t pfx[8],
Real_t pfy[8],
Real_t pfz[8],
const Real_t x[8],
const Real_t y[8],
const Real_t z[8])
{/*{{{*/
for (Index_t i = 0 ; i < 8 ; ++i) {
pfx[i] = Real_t(0.0);
pfy[i] = Real_t(0.0);
pfz[i] = Real_t(0.0);
}
/* evaluate face one: nodes 0, 1, 2, 3 */
SumElemFaceNormal(&pfx[0], &pfy[0], &pfz[0],
&pfx[1], &pfy[1], &pfz[1],
&pfx[2], &pfy[2], &pfz[2],
&pfx[3], &pfy[3], &pfz[3],
x[0], y[0], z[0], x[1], y[1], z[1],
x[2], y[2], z[2], x[3], y[3], z[3]);
/* evaluate face two: nodes 0, 4, 5, 1 */
SumElemFaceNormal(&pfx[0], &pfy[0], &pfz[0],
&pfx[4], &pfy[4], &pfz[4],
&pfx[5], &pfy[5], &pfz[5],
&pfx[1], &pfy[1], &pfz[1],
x[0], y[0], z[0], x[4], y[4], z[4],
x[5], y[5], z[5], x[1], y[1], z[1]);
/* evaluate face three: nodes 1, 5, 6, 2 */
SumElemFaceNormal(&pfx[1], &pfy[1], &pfz[1],
&pfx[5], &pfy[5], &pfz[5],
&pfx[6], &pfy[6], &pfz[6],
&pfx[2], &pfy[2], &pfz[2],
x[1], y[1], z[1], x[5], y[5], z[5],
x[6], y[6], z[6], x[2], y[2], z[2]);
/* evaluate face four: nodes 2, 6, 7, 3 */
SumElemFaceNormal(&pfx[2], &pfy[2], &pfz[2],
&pfx[6], &pfy[6], &pfz[6],
&pfx[7], &pfy[7], &pfz[7],
&pfx[3], &pfy[3], &pfz[3],
x[2], y[2], z[2], x[6], y[6], z[6],
x[7], y[7], z[7], x[3], y[3], z[3]);
/* evaluate face five: nodes 3, 7, 4, 0 */
SumElemFaceNormal(&pfx[3], &pfy[3], &pfz[3],
&pfx[7], &pfy[7], &pfz[7],
&pfx[4], &pfy[4], &pfz[4],
&pfx[0], &pfy[0], &pfz[0],
x[3], y[3], z[3], x[7], y[7], z[7],
x[4], y[4], z[4], x[0], y[0], z[0]);
/* evaluate face six: nodes 4, 7, 6, 5 */
SumElemFaceNormal(&pfx[4], &pfy[4], &pfz[4],
&pfx[7], &pfy[7], &pfz[7],
&pfx[6], &pfy[6], &pfz[6],
&pfx[5], &pfy[5], &pfz[5],
x[4], y[4], z[4], x[7], y[7], z[7],
x[6], y[6], z[6], x[5], y[5], z[5]);
}/*}}}*/
__global__
void AddNodeForcesFromElems_kernel( Index_t numNode,
Index_t padded_numNode,
const Int_t* nodeElemCount,
const Int_t* nodeElemStart,
const Index_t* nodeElemCornerList,
const Real_t* fx_elem,
const Real_t* fy_elem,
const Real_t* fz_elem,
Real_t* fx_node,
Real_t* fy_node,
Real_t* fz_node,
const Int_t num_threads)
{/*{{{*/
int tid=blockDim.x*blockIdx.x+threadIdx.x;
if (tid < num_threads)
{
Index_t g_i = tid;
Int_t count=nodeElemCount[g_i];
Int_t start=nodeElemStart[g_i];
Real_t fx,fy,fz;
fx=fy=fz=Real_t(0.0);
for (int j=0;j<count;j++)
{
Index_t pos=nodeElemCornerList[start+j]; // Uncoalesced access here
fx += fx_elem[pos];
fy += fy_elem[pos];
fz += fz_elem[pos];
}
fx_node[g_i]=fx;
fy_node[g_i]=fy;
fz_node[g_i]=fz;
}
}/*}}}*/
static
__device__
__forceinline__
void VoluDer(const Real_t x0, const Real_t x1, const Real_t x2,
const Real_t x3, const Real_t x4, const Real_t x5,
const Real_t y0, const Real_t y1, const Real_t y2,
const Real_t y3, const Real_t y4, const Real_t y5,
const Real_t z0, const Real_t z1, const Real_t z2,
const Real_t z3, const Real_t z4, const Real_t z5,
Real_t* dvdx, Real_t* dvdy, Real_t* dvdz)
{/*{{{*/
const Real_t twelfth = Real_t(1.0) / Real_t(12.0) ;
*dvdx =
(y1 + y2) * (z0 + z1) - (y0 + y1) * (z1 + z2) +
(y0 + y4) * (z3 + z4) - (y3 + y4) * (z0 + z4) -
(y2 + y5) * (z3 + z5) + (y3 + y5) * (z2 + z5);
*dvdy =
- (x1 + x2) * (z0 + z1) + (x0 + x1) * (z1 + z2) -
(x0 + x4) * (z3 + z4) + (x3 + x4) * (z0 + z4) +
(x2 + x5) * (z3 + z5) - (x3 + x5) * (z2 + z5);
*dvdz =
- (y1 + y2) * (x0 + x1) + (y0 + y1) * (x1 + x2) -
(y0 + y4) * (x3 + x4) + (y3 + y4) * (x0 + x4) +
(y2 + y5) * (x3 + x5) - (y3 + y5) * (x2 + x5);
*dvdx *= twelfth;
*dvdy *= twelfth;
*dvdz *= twelfth;
}/*}}}*/
static
__device__
__forceinline__
void CalcElemVolumeDerivative(Real_t dvdx[8],
Real_t dvdy[8],
Real_t dvdz[8],
const Real_t x[8],
const Real_t y[8],
const Real_t z[8])
{/*{{{*/
VoluDer(x[1], x[2], x[3], x[4], x[5], x[7],
y[1], y[2], y[3], y[4], y[5], y[7],
z[1], z[2], z[3], z[4], z[5], z[7],
&dvdx[0], &dvdy[0], &dvdz[0]);
VoluDer(x[0], x[1], x[2], x[7], x[4], x[6],
y[0], y[1], y[2], y[7], y[4], y[6],
z[0], z[1], z[2], z[7], z[4], z[6],
&dvdx[3], &dvdy[3], &dvdz[3]);
VoluDer(x[3], x[0], x[1], x[6], x[7], x[5],
y[3], y[0], y[1], y[6], y[7], y[5],
z[3], z[0], z[1], z[6], z[7], z[5],
&dvdx[2], &dvdy[2], &dvdz[2]);
VoluDer(x[2], x[3], x[0], x[5], x[6], x[4],
y[2], y[3], y[0], y[5], y[6], y[4],
z[2], z[3], z[0], z[5], z[6], z[4],
&dvdx[1], &dvdy[1], &dvdz[1]);
VoluDer(x[7], x[6], x[5], x[0], x[3], x[1],
y[7], y[6], y[5], y[0], y[3], y[1],
z[7], z[6], z[5], z[0], z[3], z[1],
&dvdx[4], &dvdy[4], &dvdz[4]);
VoluDer(x[4], x[7], x[6], x[1], x[0], x[2],
y[4], y[7], y[6], y[1], y[0], y[2],
z[4], z[7], z[6], z[1], z[0], z[2],
&dvdx[5], &dvdy[5], &dvdz[5]);
VoluDer(x[5], x[4], x[7], x[2], x[1], x[3],
y[5], y[4], y[7], y[2], y[1], y[3],
z[5], z[4], z[7], z[2], z[1], z[3],
&dvdx[6], &dvdy[6], &dvdz[6]);
VoluDer(x[6], x[5], x[4], x[3], x[2], x[0],
y[6], y[5], y[4], y[3], y[2], y[0],
z[6], z[5], z[4], z[3], z[2], z[0],
&dvdx[7], &dvdy[7], &dvdz[7]);
}/*}}}*/
static
__device__
__forceinline__
void CalcElemFBHourglassForce(Real_t *xd, Real_t *yd, Real_t *zd, Real_t *hourgam0,
Real_t *hourgam1, Real_t *hourgam2, Real_t *hourgam3,
Real_t *hourgam4, Real_t *hourgam5, Real_t *hourgam6,
Real_t *hourgam7, Real_t coefficient,
Real_t *hgfx, Real_t *hgfy, Real_t *hgfz )
{/*{{{*/
Index_t i00=0;
Index_t i01=1;
Index_t i02=2;
Index_t i03=3;
Real_t h00 =
hourgam0[i00] * xd[0] + hourgam1[i00] * xd[1] +
hourgam2[i00] * xd[2] + hourgam3[i00] * xd[3] +
hourgam4[i00] * xd[4] + hourgam5[i00] * xd[5] +
hourgam6[i00] * xd[6] + hourgam7[i00] * xd[7];
Real_t h01 =
hourgam0[i01] * xd[0] + hourgam1[i01] * xd[1] +
hourgam2[i01] * xd[2] + hourgam3[i01] * xd[3] +
hourgam4[i01] * xd[4] + hourgam5[i01] * xd[5] +
hourgam6[i01] * xd[6] + hourgam7[i01] * xd[7];
Real_t h02 =
hourgam0[i02] * xd[0] + hourgam1[i02] * xd[1]+
hourgam2[i02] * xd[2] + hourgam3[i02] * xd[3]+
hourgam4[i02] * xd[4] + hourgam5[i02] * xd[5]+
hourgam6[i02] * xd[6] + hourgam7[i02] * xd[7];
Real_t h03 =
hourgam0[i03] * xd[0] + hourgam1[i03] * xd[1] +
hourgam2[i03] * xd[2] + hourgam3[i03] * xd[3] +
hourgam4[i03] * xd[4] + hourgam5[i03] * xd[5] +
hourgam6[i03] * xd[6] + hourgam7[i03] * xd[7];
hgfx[0] += coefficient *
(hourgam0[i00] * h00 + hourgam0[i01] * h01 +
hourgam0[i02] * h02 + hourgam0[i03] * h03);
hgfx[1] += coefficient *
(hourgam1[i00] * h00 + hourgam1[i01] * h01 +
hourgam1[i02] * h02 + hourgam1[i03] * h03);
hgfx[2] += coefficient *
(hourgam2[i00] * h00 + hourgam2[i01] * h01 +
hourgam2[i02] * h02 + hourgam2[i03] * h03);
hgfx[3] += coefficient *
(hourgam3[i00] * h00 + hourgam3[i01] * h01 +
hourgam3[i02] * h02 + hourgam3[i03] * h03);
hgfx[4] += coefficient *
(hourgam4[i00] * h00 + hourgam4[i01] * h01 +
hourgam4[i02] * h02 + hourgam4[i03] * h03);
hgfx[5] += coefficient *
(hourgam5[i00] * h00 + hourgam5[i01] * h01 +
hourgam5[i02] * h02 + hourgam5[i03] * h03);
hgfx[6] += coefficient *
(hourgam6[i00] * h00 + hourgam6[i01] * h01 +
hourgam6[i02] * h02 + hourgam6[i03] * h03);
hgfx[7] += coefficient *
(hourgam7[i00] * h00 + hourgam7[i01] * h01 +
hourgam7[i02] * h02 + hourgam7[i03] * h03);
h00 =
hourgam0[i00] * yd[0] + hourgam1[i00] * yd[1] +
hourgam2[i00] * yd[2] + hourgam3[i00] * yd[3] +
hourgam4[i00] * yd[4] + hourgam5[i00] * yd[5] +
hourgam6[i00] * yd[6] + hourgam7[i00] * yd[7];
h01 =
hourgam0[i01] * yd[0] + hourgam1[i01] * yd[1] +
hourgam2[i01] * yd[2] + hourgam3[i01] * yd[3] +
hourgam4[i01] * yd[4] + hourgam5[i01] * yd[5] +
hourgam6[i01] * yd[6] + hourgam7[i01] * yd[7];
h02 =
hourgam0[i02] * yd[0] + hourgam1[i02] * yd[1]+
hourgam2[i02] * yd[2] + hourgam3[i02] * yd[3]+
hourgam4[i02] * yd[4] + hourgam5[i02] * yd[5]+
hourgam6[i02] * yd[6] + hourgam7[i02] * yd[7];
h03 =
hourgam0[i03] * yd[0] + hourgam1[i03] * yd[1] +
hourgam2[i03] * yd[2] + hourgam3[i03] * yd[3] +
hourgam4[i03] * yd[4] + hourgam5[i03] * yd[5] +
hourgam6[i03] * yd[6] + hourgam7[i03] * yd[7];
hgfy[0] += coefficient *
(hourgam0[i00] * h00 + hourgam0[i01] * h01 +
hourgam0[i02] * h02 + hourgam0[i03] * h03);
hgfy[1] += coefficient *
(hourgam1[i00] * h00 + hourgam1[i01] * h01 +
hourgam1[i02] * h02 + hourgam1[i03] * h03);
hgfy[2] += coefficient *
(hourgam2[i00] * h00 + hourgam2[i01] * h01 +
hourgam2[i02] * h02 + hourgam2[i03] * h03);
hgfy[3] += coefficient *
(hourgam3[i00] * h00 + hourgam3[i01] * h01 +
hourgam3[i02] * h02 + hourgam3[i03] * h03);
hgfy[4] += coefficient *
(hourgam4[i00] * h00 + hourgam4[i01] * h01 +
hourgam4[i02] * h02 + hourgam4[i03] * h03);
hgfy[5] += coefficient *
(hourgam5[i00] * h00 + hourgam5[i01] * h01 +
hourgam5[i02] * h02 + hourgam5[i03] * h03);
hgfy[6] += coefficient *
(hourgam6[i00] * h00 + hourgam6[i01] * h01 +
hourgam6[i02] * h02 + hourgam6[i03] * h03);
hgfy[7] += coefficient *
(hourgam7[i00] * h00 + hourgam7[i01] * h01 +
hourgam7[i02] * h02 + hourgam7[i03] * h03);
h00 =
hourgam0[i00] * zd[0] + hourgam1[i00] * zd[1] +
hourgam2[i00] * zd[2] + hourgam3[i00] * zd[3] +
hourgam4[i00] * zd[4] + hourgam5[i00] * zd[5] +
hourgam6[i00] * zd[6] + hourgam7[i00] * zd[7];
h01 =
hourgam0[i01] * zd[0] + hourgam1[i01] * zd[1] +
hourgam2[i01] * zd[2] + hourgam3[i01] * zd[3] +
hourgam4[i01] * zd[4] + hourgam5[i01] * zd[5] +
hourgam6[i01] * zd[6] + hourgam7[i01] * zd[7];
h02 =
hourgam0[i02] * zd[0] + hourgam1[i02] * zd[1]+
hourgam2[i02] * zd[2] + hourgam3[i02] * zd[3]+
hourgam4[i02] * zd[4] + hourgam5[i02] * zd[5]+
hourgam6[i02] * zd[6] + hourgam7[i02] * zd[7];
h03 =
hourgam0[i03] * zd[0] + hourgam1[i03] * zd[1] +
hourgam2[i03] * zd[2] + hourgam3[i03] * zd[3] +
hourgam4[i03] * zd[4] + hourgam5[i03] * zd[5] +
hourgam6[i03] * zd[6] + hourgam7[i03] * zd[7];
hgfz[0] += coefficient *
(hourgam0[i00] * h00 + hourgam0[i01] * h01 +
hourgam0[i02] * h02 + hourgam0[i03] * h03);
hgfz[1] += coefficient *
(hourgam1[i00] * h00 + hourgam1[i01] * h01 +
hourgam1[i02] * h02 + hourgam1[i03] * h03);
hgfz[2] += coefficient *
(hourgam2[i00] * h00 + hourgam2[i01] * h01 +
hourgam2[i02] * h02 + hourgam2[i03] * h03);
hgfz[3] += coefficient *
(hourgam3[i00] * h00 + hourgam3[i01] * h01 +
hourgam3[i02] * h02 + hourgam3[i03] * h03);
hgfz[4] += coefficient *
(hourgam4[i00] * h00 + hourgam4[i01] * h01 +
hourgam4[i02] * h02 + hourgam4[i03] * h03);
hgfz[5] += coefficient *
(hourgam5[i00] * h00 + hourgam5[i01] * h01 +
hourgam5[i02] * h02 + hourgam5[i03] * h03);
hgfz[6] += coefficient *
(hourgam6[i00] * h00 + hourgam6[i01] * h01 +
hourgam6[i02] * h02 + hourgam6[i03] * h03);
hgfz[7] += coefficient *
(hourgam7[i00] * h00 + hourgam7[i01] * h01 +
hourgam7[i02] * h02 + hourgam7[i03] * h03);
}/*}}}*/
__device__
__forceinline__
void CalcHourglassModes(const Real_t xn[8], const Real_t yn[8], const Real_t zn[8],
const Real_t dvdxn[8], const Real_t dvdyn[8], const Real_t dvdzn[8],
Real_t hourgam[8][4], Real_t volinv)
{/*{{{*/
Real_t hourmodx, hourmody, hourmodz;
hourmodx = xn[0] + xn[1] - xn[2] - xn[3] - xn[4] - xn[5] + xn[6] + xn[7];
hourmody = yn[0] + yn[1] - yn[2] - yn[3] - yn[4] - yn[5] + yn[6] + yn[7];
hourmodz = zn[0] + zn[1] - zn[2] - zn[3] - zn[4] - zn[5] + zn[6] + zn[7]; // 21
hourgam[0][0] = 1.0 - volinv*(dvdxn[0]*hourmodx + dvdyn[0]*hourmody + dvdzn[0]*hourmodz);
hourgam[1][0] = 1.0 - volinv*(dvdxn[1]*hourmodx + dvdyn[1]*hourmody + dvdzn[1]*hourmodz);
hourgam[2][0] = -1.0 - volinv*(dvdxn[2]*hourmodx + dvdyn[2]*hourmody + dvdzn[2]*hourmodz);
hourgam[3][0] = -1.0 - volinv*(dvdxn[3]*hourmodx + dvdyn[3]*hourmody + dvdzn[3]*hourmodz);
hourgam[4][0] = -1.0 - volinv*(dvdxn[4]*hourmodx + dvdyn[4]*hourmody + dvdzn[4]*hourmodz);
hourgam[5][0] = -1.0 - volinv*(dvdxn[5]*hourmodx + dvdyn[5]*hourmody + dvdzn[5]*hourmodz);
hourgam[6][0] = 1.0 - volinv*(dvdxn[6]*hourmodx + dvdyn[6]*hourmody + dvdzn[6]*hourmodz);
hourgam[7][0] = 1.0 - volinv*(dvdxn[7]*hourmodx + dvdyn[7]*hourmody + dvdzn[7]*hourmodz); // 60
hourmodx = xn[0] - xn[1] - xn[2] + xn[3] - xn[4] + xn[5] + xn[6] - xn[7];
hourmody = yn[0] - yn[1] - yn[2] + yn[3] - yn[4] + yn[5] + yn[6] - yn[7];
hourmodz = zn[0] - zn[1] - zn[2] + zn[3] - zn[4] + zn[5] + zn[6] - zn[7];
hourgam[0][1] = 1.0 - volinv*(dvdxn[0]*hourmodx + dvdyn[0]*hourmody + dvdzn[0]*hourmodz);
hourgam[1][1] = -1.0 - volinv*(dvdxn[1]*hourmodx + dvdyn[1]*hourmody + dvdzn[1]*hourmodz);
hourgam[2][1] = -1.0 - volinv*(dvdxn[2]*hourmodx + dvdyn[2]*hourmody + dvdzn[2]*hourmodz);
hourgam[3][1] = 1.0 - volinv*(dvdxn[3]*hourmodx + dvdyn[3]*hourmody + dvdzn[3]*hourmodz);
hourgam[4][1] = -1.0 - volinv*(dvdxn[4]*hourmodx + dvdyn[4]*hourmody + dvdzn[4]*hourmodz);
hourgam[5][1] = 1.0 - volinv*(dvdxn[5]*hourmodx + dvdyn[5]*hourmody + dvdzn[5]*hourmodz);
hourgam[6][1] = 1.0 - volinv*(dvdxn[6]*hourmodx + dvdyn[6]*hourmody + dvdzn[6]*hourmodz);
hourgam[7][1] = -1.0 - volinv*(dvdxn[7]*hourmodx + dvdyn[7]*hourmody + dvdzn[7]*hourmodz);
hourmodx = xn[0] - xn[1] + xn[2] - xn[3] + xn[4] - xn[5] + xn[6] - xn[7];
hourmody = yn[0] - yn[1] + yn[2] - yn[3] + yn[4] - yn[5] + yn[6] - yn[7];
hourmodz = zn[0] - zn[1] + zn[2] - zn[3] + zn[4] - zn[5] + zn[6] - zn[7];
hourgam[0][2] = 1.0 - volinv*(dvdxn[0]*hourmodx + dvdyn[0]*hourmody + dvdzn[0]*hourmodz);
hourgam[1][2] = -1.0 - volinv*(dvdxn[1]*hourmodx + dvdyn[1]*hourmody + dvdzn[1]*hourmodz);
hourgam[2][2] = 1.0 - volinv*(dvdxn[2]*hourmodx + dvdyn[2]*hourmody + dvdzn[2]*hourmodz);
hourgam[3][2] = -1.0 - volinv*(dvdxn[3]*hourmodx + dvdyn[3]*hourmody + dvdzn[3]*hourmodz);
hourgam[4][2] = 1.0 - volinv*(dvdxn[4]*hourmodx + dvdyn[4]*hourmody + dvdzn[4]*hourmodz);
hourgam[5][2] = -1.0 - volinv*(dvdxn[5]*hourmodx + dvdyn[5]*hourmody + dvdzn[5]*hourmodz);
hourgam[6][2] = 1.0 - volinv*(dvdxn[6]*hourmodx + dvdyn[6]*hourmody + dvdzn[6]*hourmodz);
hourgam[7][2] = -1.0 - volinv*(dvdxn[7]*hourmodx + dvdyn[7]*hourmody + dvdzn[7]*hourmodz);
hourmodx = -xn[0] + xn[1] - xn[2] + xn[3] + xn[4] - xn[5] + xn[6] - xn[7];
hourmody = -yn[0] + yn[1] - yn[2] + yn[3] + yn[4] - yn[5] + yn[6] - yn[7];
hourmodz = -zn[0] + zn[1] - zn[2] + zn[3] + zn[4] - zn[5] + zn[6] - zn[7];
hourgam[0][3] = -1.0 - volinv*(dvdxn[0]*hourmodx + dvdyn[0]*hourmody + dvdzn[0]*hourmodz);
hourgam[1][3] = 1.0 - volinv*(dvdxn[1]*hourmodx + dvdyn[1]*hourmody + dvdzn[1]*hourmodz);
hourgam[2][3] = -1.0 - volinv*(dvdxn[2]*hourmodx + dvdyn[2]*hourmody + dvdzn[2]*hourmodz);
hourgam[3][3] = 1.0 - volinv*(dvdxn[3]*hourmodx + dvdyn[3]*hourmody + dvdzn[3]*hourmodz);
hourgam[4][3] = 1.0 - volinv*(dvdxn[4]*hourmodx + dvdyn[4]*hourmody + dvdzn[4]*hourmodz);
hourgam[5][3] = -1.0 - volinv*(dvdxn[5]*hourmodx + dvdyn[5]*hourmody + dvdzn[5]*hourmodz);
hourgam[6][3] = 1.0 - volinv*(dvdxn[6]*hourmodx + dvdyn[6]*hourmody + dvdzn[6]*hourmodz);
hourgam[7][3] = -1.0 - volinv*(dvdxn[7]*hourmodx + dvdyn[7]*hourmody + dvdzn[7]*hourmodz);
}/*}}}*/
template< bool hourg_gt_zero >
__global__
#ifdef DOUBLE_PRECISION
__launch_bounds__(64,4)
#else
__launch_bounds__(64,8)
#endif
void CalcVolumeForceForElems_kernel
(/*{{{*/
const Real_t* __restrict__ volo,
const Real_t* __restrict__ v,
const Real_t* __restrict__ p,
const Real_t* __restrict__ q,
Real_t hourg,
Index_t numElem,
Index_t padded_numElem,
const Index_t* __restrict__ nodelist,
const Real_t* __restrict__ ss,
const Real_t* __restrict__ elemMass,
const Real_t* __restrict__ x, const Real_t* __restrict__ y, const Real_t* __restrict__ z,
const Real_t* __restrict__ xd, const Real_t* __restrict__ yd, const Real_t* __restrict__ zd,
//TextureObj<Real_t> x, TextureObj<Real_t> y, TextureObj<Real_t> z,
//TextureObj<Real_t> xd, TextureObj<Real_t> yd, TextureObj<Real_t> zd,
//TextureObj<Real_t>* x, TextureObj<Real_t>* y, TextureObj<Real_t>* z,
//TextureObj<Real_t>* xd, TextureObj<Real_t>* yd, TextureObj<Real_t>* zd,
#ifdef DOUBLE_PRECISION // For floats, use atomicAdd
Real_t* __restrict__ fx_elem,
Real_t* __restrict__ fy_elem,
Real_t* __restrict__ fz_elem,
#else
Real_t* __restrict__ fx_node,
Real_t* __restrict__ fy_node,
Real_t* __restrict__ fz_node,
#endif
Index_t* __restrict__ bad_vol,
const Index_t num_threads)/*}}}*/
{/*{{{*/
/*************************************************
* FUNCTION: Calculates the volume forces
*************************************************/
Real_t xn[8],yn[8],zn[8];;
Real_t xdn[8],ydn[8],zdn[8];;
Real_t dvdxn[8],dvdyn[8],dvdzn[8];;
Real_t hgfx[8],hgfy[8],hgfz[8];;
Real_t hourgam[8][4];
Real_t coefficient;
int elem=blockDim.x*blockIdx.x+threadIdx.x;
if (elem < num_threads)
{
Real_t volume = v[elem];
Real_t det = volo[elem] * volume;
// Check for bad volume
if (volume < 0.) {
*bad_vol = elem;
}
Real_t ss1 = ss[elem];
Real_t mass1 = elemMass[elem];
Real_t sigxx = -p[elem] - q[elem];
Index_t n[8];
#pragma unroll
for (int i=0;i<8;i++) {
n[i] = nodelist[elem+i*padded_numElem];
}
Real_t volinv = Real_t(1.0) / det;
//#pragma unroll
//for (int i=0;i<8;i++) {
// xn[i] =x[n[i]];
// yn[i] =y[n[i]];
// zn[i] =z[n[i]];
//}
#pragma unroll
for (int i=0;i<8;i++)
xn[i] =x[n[i]];
#pragma unroll
for (int i=0;i<8;i++)
yn[i] =y[n[i]];
#pragma unroll
for (int i=0;i<8;i++)
zn[i] =z[n[i]];
Real_t volume13 = CBRT(det);
coefficient = - hourg * Real_t(0.01) * ss1 * mass1 / volume13;
/*************************************************/
/* compute the volume derivatives */
/*************************************************/
CalcElemVolumeDerivative(dvdxn, dvdyn, dvdzn, xn, yn, zn);
/*************************************************/
/* compute the hourglass modes */
/*************************************************/
CalcHourglassModes(xn,yn,zn,dvdxn,dvdyn,dvdzn,hourgam,volinv);
/*************************************************/
/* CalcStressForElems */
/*************************************************/
Real_t B[3][8];
CalcElemShapeFunctionDerivatives(xn, yn, zn, B, &det);
CalcElemNodeNormals( B[0] , B[1], B[2], xn, yn, zn);
// Check for bad volume
if (det < 0.) {
*bad_vol = elem;
}
#pragma unroll
for (int i=0;i<8;i++)
{
hgfx[i] = -( sigxx*B[0][i] );
hgfy[i] = -( sigxx*B[1][i] );
hgfz[i] = -( sigxx*B[2][i] );
}
if (hourg_gt_zero)
{
/*************************************************/
/* CalcFBHourglassForceForElems */
/*************************************************/
// #pragma unroll
// for (int i=0;i<8;i++) {
// xdn[i] =xd[n[i]];
// ydn[i] =yd[n[i]];
// zdn[i] =zd[n[i]];
// }
#pragma unroll
for (int i=0;i<8;i++)
xdn[i] =xd[n[i]];
#pragma unroll
for (int i=0;i<8;i++)
ydn[i] =yd[n[i]];
#pragma unroll
for (int i=0;i<8;i++)
zdn[i] =zd[n[i]];
CalcElemFBHourglassForce
( &xdn[0],&ydn[0],&zdn[0],
hourgam[0],hourgam[1],hourgam[2],hourgam[3],
hourgam[4],hourgam[5],hourgam[6],hourgam[7],
coefficient,
&hgfx[0],&hgfy[0],&hgfz[0]
);
}
#ifdef DOUBLE_PRECISION
#pragma unroll
for (int node=0;node<8;node++)
{
Index_t store_loc = elem+padded_numElem*node;
fx_elem[store_loc]=hgfx[node];
fy_elem[store_loc]=hgfy[node];
fz_elem[store_loc]=hgfz[node];
}
#else
#pragma unroll
for (int i=0;i<8;i++)
{
Index_t ni= n[i];
atomicAdd(&fx_node[ni],hgfx[i]);
atomicAdd(&fy_node[ni],hgfy[i]);
atomicAdd(&fz_node[ni],hgfz[i]);
}
#endif
} // If elem < numElem
}/*}}}*/
template< bool hourg_gt_zero, int cta_size>
__global__
void CalcVolumeForceForElems_kernel_warp_per_4cell
(/*{{{*/
const Real_t* __restrict__ volo,
const Real_t* __restrict__ v,
const Real_t* __restrict__ p,
const Real_t* __restrict__ q,
Real_t hourg,
Index_t numElem,
Index_t padded_numElem,
const Index_t* __restrict__ nodelist,
const Real_t* __restrict__ ss,
const Real_t* __restrict__ elemMass,
//const Real_t __restrict__ *x, const Real_t __restrict__ *y, const Real_t __restrict__ *z,
//const Real_t __restrict__ *xd, const Real_t __restrict__ *yd, const Real_t __restrict__ *zd,
const Real_t *x, const Real_t *y, const Real_t *z,
const Real_t *xd, const Real_t *yd, const Real_t *zd,
#ifdef DOUBLE_PRECISION // For floats, use atomicAdd
Real_t* __restrict__ fx_elem,
Real_t* __restrict__ fy_elem,
Real_t* __restrict__ fz_elem,
#else
Real_t* __restrict__ fx_node,
Real_t* __restrict__ fy_node,
Real_t* __restrict__ fz_node,
#endif
Index_t* __restrict__ bad_vol,
const Index_t num_threads)/*}}}*/
{/*{{{*/
/*************************************************
* FUNCTION: Calculates the volume forces
*************************************************/
Real_t xn,yn,zn;;
Real_t xdn,ydn,zdn;;
Real_t dvdxn,dvdyn,dvdzn;;
Real_t hgfx,hgfy,hgfz;;
Real_t hourgam[4];
Real_t coefficient;
int tid=blockDim.x*blockIdx.x+threadIdx.x;
int elem = tid >> 3; // elem = tid/8
int node = tid & 7; // node = tid%8
// elem within cta
// int cta_elem = threadIdx.x/8;
if (elem < num_threads)
{
Real_t volume = v[elem];
Real_t det = volo[elem] * volume;
// Check for bad volume
if (volume < 0.) {
*bad_vol = elem;
}
Real_t ss1 = ss[elem];
Real_t mass1 = elemMass[elem];
Real_t sigxx = -p[elem] - q[elem];
Index_t node_id;
node_id = nodelist[elem+node*padded_numElem];
Real_t volinv = Real_t(1.0) / det;
xn =x[node_id];
yn =y[node_id];
zn =z[node_id];
Real_t volume13 = CBRT(det);
coefficient = - hourg * Real_t(0.01) * ss1 * mass1 / volume13;
/*************************************************/
/* compute the volume derivatives */
/*************************************************/
unsigned int ind0,ind1,ind2,ind3,ind4,ind5;
// Use octal number to represent the indices for each node
//ind0 = 012307456;
//ind1 = 023016745;
//ind2 = 030125674;
//ind3 = 045670123;
//ind4 = 056743012;
//ind5 = 074561230;
//int mask = 7u << (3*node;
switch(node) {
case 0:
{ind0=1; ind1=2; ind2=3; ind3=4; ind4=5; ind5=7;
break;}
case 1:
{ind0=2; ind1=3; ind2=0; ind3=5; ind4=6; ind5=4;
break;}
case 2:
{ind0=3; ind1=0; ind2=1; ind3=6; ind4=7; ind5=5;
break;}
case 3:
{ind0=0; ind1=1; ind2=2; ind3=7; ind4=4; ind5=6;
break;}
case 4:
{ind0=7; ind1=6; ind2=5; ind3=0; ind4=3; ind5=1;
break;}
case 5:
{ind0=4; ind1=7; ind2=6; ind3=1; ind4=0; ind5=2;
break;}
case 6:
{ind0=5; ind1=4; ind2=7; ind3=2; ind4=1; ind5=3;
break;}
case 7:
{ind0=6; ind1=5; ind2=4; ind3=3; ind4=2; ind5=0;
break;}
}
VOLUDER(utils::shfl(yn,ind0,8),utils::shfl(yn,ind1,8),utils::shfl(yn,ind2,8),
utils::shfl(yn,ind3,8),utils::shfl(yn,ind4,8),utils::shfl(yn,ind5,8),
utils::shfl(zn,ind0,8),utils::shfl(zn,ind1,8),utils::shfl(zn,ind2,8),
utils::shfl(zn,ind3,8),utils::shfl(zn,ind4,8),utils::shfl(zn,ind5,8),
dvdxn);
VOLUDER(utils::shfl(zn,ind0,8),utils::shfl(zn,ind1,8),utils::shfl(zn,ind2,8),
utils::shfl(zn,ind3,8),utils::shfl(zn,ind4,8),utils::shfl(zn,ind5,8),
utils::shfl(xn,ind0,8),utils::shfl(xn,ind1,8),utils::shfl(xn,ind2,8),
utils::shfl(xn,ind3,8),utils::shfl(xn,ind4,8),utils::shfl(xn,ind5,8),
dvdyn);
VOLUDER(utils::shfl(xn,ind0,8),utils::shfl(xn,ind1,8),utils::shfl(xn,ind2,8),
utils::shfl(xn,ind3,8),utils::shfl(xn,ind4,8),utils::shfl(xn,ind5,8),
utils::shfl(yn,ind0,8),utils::shfl(yn,ind1,8),utils::shfl(yn,ind2,8),
utils::shfl(yn,ind3,8),utils::shfl(yn,ind4,8),utils::shfl(yn,ind5,8),
dvdzn);
/*************************************************/
/* compute the hourglass modes */
/*************************************************/
Real_t hourmodx, hourmody, hourmodz;
const Real_t posf = Real_t( 1.);
const Real_t negf = Real_t(-1.);
hourmodx=xn; hourmody=yn; hourmodz=zn;
if (node==2 || node==3 || node==4 || node==5) {
hourmodx *= negf; hourmody *= negf; hourmodz *= negf;
hourgam[0] = negf;
}
else hourgam[0] = posf;
SumOverNodesShfl(hourmodx);
SumOverNodesShfl(hourmody);
SumOverNodesShfl(hourmodz);
hourgam[0] -= volinv*(dvdxn*hourmodx + dvdyn*hourmody + dvdzn*hourmodz);
hourmodx=xn; hourmody=yn; hourmodz=zn;
if (node==1 || node==2 || node==4 || node==7) {
hourmodx *= negf; hourmody *= negf; hourmodz *= negf;
hourgam[1] = negf;
}
else hourgam[1] = posf;
SumOverNodesShfl(hourmodx);
SumOverNodesShfl(hourmody);
SumOverNodesShfl(hourmodz);
hourgam[1] -= volinv*(dvdxn*hourmodx + dvdyn*hourmody + dvdzn*hourmodz);
hourmodx=xn; hourmody=yn; hourmodz=zn;
if (node==1 || node==3 || node==5 || node==7) {
hourmodx *= negf; hourmody *= negf; hourmodz *= negf;
hourgam[2] = negf;
}
else hourgam[2] = posf;
SumOverNodesShfl(hourmodx);
SumOverNodesShfl(hourmody);
SumOverNodesShfl(hourmodz);
hourgam[2] -= volinv*(dvdxn*hourmodx + dvdyn*hourmody + dvdzn*hourmodz);
hourmodx=xn; hourmody=yn; hourmodz=zn;
if (node==0 || node==2 || node==5 || node==7) {
hourmodx *= negf; hourmody *= negf; hourmodz *= negf;
hourgam[3] = negf;
}
else hourgam[3] = posf;
SumOverNodesShfl(hourmodx);
SumOverNodesShfl(hourmody);
SumOverNodesShfl(hourmodz);
hourgam[3] -= volinv*(dvdxn*hourmodx + dvdyn*hourmody + dvdzn*hourmodz);
/*************************************************/
/* CalcStressForElems */
/*************************************************/
Real_t b[3];
/*************************************************/
//CalcElemShapeFunctionDerivatives_warp_per_4cell(xn, yn, zn, B, &det);
/*************************************************/
Real_t fjxxi, fjxet, fjxze;
Real_t fjyxi, fjyet, fjyze;
Real_t fjzxi, fjzet, fjzze;
fjxxi = fjxet = fjxze = Real_t(0.125)*xn;
fjyxi = fjyet = fjyze = Real_t(0.125)*yn;
fjzxi = fjzet = fjzze = Real_t(0.125)*zn;
if (node==0 || node==3 || node==7 || node==4)
{
fjxxi = -fjxxi;
fjyxi = -fjyxi;
fjzxi = -fjzxi;
}
if (node==0 || node==5 || node==1 || node==4)
{
fjxet = -fjxet;
fjyet = -fjyet;
fjzet = -fjzet;
}
if (node==0 || node==3 || node==1 || node==2)
{
fjxze = -fjxze;
fjyze = -fjyze;
fjzze = -fjzze;
}
SumOverNodesShfl(fjxxi);
SumOverNodesShfl(fjxet);
SumOverNodesShfl(fjxze);
SumOverNodesShfl(fjyxi);
SumOverNodesShfl(fjyet);
SumOverNodesShfl(fjyze);
SumOverNodesShfl(fjzxi);
SumOverNodesShfl(fjzet);
SumOverNodesShfl(fjzze);
/* compute cofactors */
Real_t cjxxi, cjxet, cjxze;
Real_t cjyxi, cjyet, cjyze;
Real_t cjzxi, cjzet, cjzze;
cjxxi = (fjyet * fjzze) - (fjzet * fjyze);
cjxet = - (fjyxi * fjzze) + (fjzxi * fjyze);
cjxze = (fjyxi * fjzet) - (fjzxi * fjyet);
cjyxi = - (fjxet * fjzze) + (fjzet * fjxze);
cjyet = (fjxxi * fjzze) - (fjzxi * fjxze);
cjyze = - (fjxxi * fjzet) + (fjzxi * fjxet);
cjzxi = (fjxet * fjyze) - (fjyet * fjxze);
cjzet = - (fjxxi * fjyze) + (fjyxi * fjxze);
cjzze = (fjxxi * fjyet) - (fjyxi * fjxet);
Real_t coef_xi, coef_et, coef_ze;
if (node==0 || node==3 || node==4 || node==7)
coef_xi = Real_t(-1.);
else
coef_xi = Real_t(1.);
if (node==0 || node==1 || node==4 || node==5)
coef_et = Real_t(-1.);
else
coef_et = Real_t(1.);
if (node==0 || node==1 || node==2 || node==3)
coef_ze = Real_t(-1.);
else
coef_ze = Real_t(1.);
/* calculate partials :
this need only be done for l = 0,1,2,3 since , by symmetry ,
(6,7,4,5) = - (0,1,2,3) .
*/
b[0] = coef_xi * cjxxi + coef_et * cjxet + coef_ze * cjxze;
b[1] = coef_xi * cjyxi + coef_et * cjyet + coef_ze * cjyze;
b[2] = coef_xi * cjzxi + coef_et * cjzet + coef_ze * cjzze;
/* calculate jacobian determinant (volume) */
det = Real_t(8.) * ( fjxet * cjxet + fjyet * cjyet + fjzet * cjzet);
/*************************************************/
//CalcElemNodeNormals_warp_per_4cell( B[0] , B[1], B[2], xn, yn, zn);
/*************************************************/
b[0] = Real_t(0.0);
b[1] = Real_t(0.0);
b[2] = Real_t(0.0);
// Six faces, if no
SumElemFaceNormal_warp_per_4cell(&b[0], &b[1], &b[2],
xn, yn, zn, node, 0,1,2,3);
SumElemFaceNormal_warp_per_4cell(&b[0], &b[1], &b[2],
xn, yn, zn, node, 0,4,5,1);
SumElemFaceNormal_warp_per_4cell(&b[0], &b[1], &b[2],
xn, yn, zn, node, 1,5,6,2);
SumElemFaceNormal_warp_per_4cell(&b[0], &b[1], &b[2],
xn, yn, zn, node, 2,6,7,3);
SumElemFaceNormal_warp_per_4cell(&b[0], &b[1], &b[2],
xn, yn, zn, node, 3,7,4,0);
SumElemFaceNormal_warp_per_4cell(&b[0], &b[1], &b[2],
xn, yn, zn, node, 4,7,6,5);
// Check for bad volume
if (det < 0.) {
*bad_vol = elem;
}
hgfx = -( sigxx*b[0] );
hgfy = -( sigxx*b[1] );
hgfz = -( sigxx*b[2] );
if (hourg_gt_zero)
{
/*************************************************/
/* CalcFBHourglassForceForElems */
/*************************************************/
xdn = xd[node_id];
ydn = yd[node_id];
zdn = zd[node_id];
Real_t hgfx_temp=0;
#pragma unroll
for (int i=0;i<4;i++) {
Real_t h;
h=hourgam[i]*xdn;
SumOverNodesShfl(h);
hgfx_temp+=hourgam[i]*h;
}
hgfx_temp *= coefficient;
hgfx += hgfx_temp;
Real_t hgfy_temp=0;
#pragma unroll
for (int i=0;i<4;i++) {
Real_t h;
h=hourgam[i]*ydn;
SumOverNodesShfl(h);
hgfy_temp+=hourgam[i]*h;
}
hgfy_temp *= coefficient;
hgfy += hgfy_temp;
Real_t hgfz_temp=0;
#pragma unroll
for (int i=0;i<4;i++) {
Real_t h;
h=hourgam[i]*zdn;
SumOverNodesShfl(h);
hgfz_temp+=hourgam[i]*h;
}
hgfz_temp *= coefficient;
hgfz += hgfz_temp;
}
#ifdef DOUBLE_PRECISION
Index_t store_loc = elem+padded_numElem*node;
fx_elem[store_loc]=hgfx;
fy_elem[store_loc]=hgfy;
fz_elem[store_loc]=hgfz;
#else
atomicAdd(&fx_node[node_id],hgfx);
atomicAdd(&fy_node[node_id],hgfy);
atomicAdd(&fz_node[node_id],hgfz);
#endif
} // If elem < numElem
}/*}}}*/
static inline
void CalcVolumeForceForElems(const Real_t hgcoef,Domain *domain)
{/*{{{*/
Index_t numElem = domain->numElem ;
Index_t padded_numElem = domain->padded_numElem;
#ifdef DOUBLE_PRECISION
Vector_d<Real_t>* fx_elem = Allocator< Vector_d<Real_t> >::allocate(padded_numElem*8);
Vector_d<Real_t>* fy_elem = Allocator< Vector_d<Real_t> >::allocate(padded_numElem*8);
Vector_d<Real_t>* fz_elem = Allocator< Vector_d<Real_t> >::allocate(padded_numElem*8);
#else
thrust::fill(domain->fx.begin(),domain->fx.end(),0.);
thrust::fill(domain->fy.begin(),domain->fy.end(),0.);
thrust::fill(domain->fz.begin(),domain->fz.end(),0.);
#endif
int num_threads = numElem ;
//EJ
const int block_size = 64;
//int block_size = global_block_size ;
//EJ end
int dimGrid = PAD_DIV(num_threads,block_size);
bool hourg_gt_zero = hgcoef > Real_t(0.0);
if (hourg_gt_zero)
{
hipLaunchKernelGGL(( CalcVolumeForceForElems_kernel<true>) , dim3(dimGrid),dim3(block_size), 0, 0,
domain->volo.raw(),
domain->v.raw(),
domain->p.raw(),
domain->q.raw(),
hgcoef, numElem, padded_numElem,
domain->nodelist.raw(),
domain->ss.raw(),
domain->elemMass.raw(),
domain->x.raw(), domain->y.raw(), domain->z.raw(), domain->xd.raw(), domain->yd.raw(), domain->zd.raw(),
#ifdef DOUBLE_PRECISION
fx_elem->raw(),
fy_elem->raw(),
fz_elem->raw() ,
#else
domain->fx.raw(),
domain->fy.raw(),
domain->fz.raw(),
#endif
domain->bad_vol_h,
num_threads
);
}
else
{
hipLaunchKernelGGL(( CalcVolumeForceForElems_kernel<false>) , dim3(dimGrid),dim3(block_size), 0, 0,
domain->volo.raw(),
domain->v.raw(),
domain->p.raw(),
domain->q.raw(),
hgcoef, numElem, padded_numElem,
domain->nodelist.raw(),
domain->ss.raw(),
domain->elemMass.raw(),
domain->x.raw(), domain->y.raw(), domain->z.raw(), domain->xd.raw(), domain->yd.raw(), domain->zd.raw(),
#ifdef DOUBLE_PRECISION
fx_elem->raw(),
fy_elem->raw(),
fz_elem->raw() ,
#else
domain->fx.raw(),
domain->fy.raw(),
domain->fz.raw(),
#endif
domain->bad_vol_h,
num_threads
);
}
#ifdef DOUBLE_PRECISION
num_threads = domain->numNode;
// Launch boundary nodes first
dimGrid= PAD_DIV(num_threads,block_size);
hipLaunchKernelGGL(( AddNodeForcesFromElems_kernel), dim3(dimGrid),dim3(block_size), 0, 0,
domain->numNode,
domain->padded_numNode,
domain->nodeElemCount.raw(),
domain->nodeElemStart.raw(),
domain->nodeElemCornerList.raw(),
fx_elem->raw(),
fy_elem->raw(),
fz_elem->raw(),
domain->fx.raw(),
domain->fy.raw(),
domain->fz.raw(),
num_threads
);
// hipDeviceSynchronize();
// cudaCheckError();
Allocator<Vector_d<Real_t> >::free(fx_elem,padded_numElem*8);
Allocator<Vector_d<Real_t> >::free(fy_elem,padded_numElem*8);
Allocator<Vector_d<Real_t> >::free(fz_elem,padded_numElem*8);
#endif // ifdef DOUBLE_PRECISION
return ;
}/*}}}*/
static inline
void CalcVolumeForceForElems(Domain* domain)
{/*{{{*/
const Real_t hgcoef = domain->hgcoef ;
CalcVolumeForceForElems(hgcoef,domain);
//CalcVolumeForceForElems_warp_per_4cell(hgcoef,domain);
}/*}}}*/
static inline void checkErrors(Domain* domain,int its,int myRank)
{/*{{{*/
if (*(domain->bad_vol_h) != -1)
{
printf("Rank %i: Volume Error in cell %d at iteration %d\n",myRank,*(domain->bad_vol_h),its);
exit(VolumeError);
}
if (*(domain->bad_q_h) != -1)
{
printf("Rank %i: Q Error in cell %d at iteration %d\n",myRank,*(domain->bad_q_h),its);
exit(QStopError);
}
}/*}}}*/
static inline void CalcForceForNodes(Domain *domain)
{/*{{{*/
#if USE_MPI
CommRecv(*domain, MSG_COMM_SBN, 3,
domain->sizeX + 1, domain->sizeY + 1, domain->sizeZ + 1,
true, false) ;
#endif
CalcVolumeForceForElems(domain);
// moved here from the main loop to allow async execution with GPU work
TimeIncrement(domain);
#if USE_MPI
// initialize pointers
domain->d_fx = domain->fx.raw();
domain->d_fy = domain->fy.raw();
domain->d_fz = domain->fz.raw();
Domain_member fieldData[3] ;
fieldData[0] = &Domain::get_fx ;
fieldData[1] = &Domain::get_fy ;
fieldData[2] = &Domain::get_fz ;
CommSendGpu(*domain, MSG_COMM_SBN, 3, fieldData,
domain->sizeX + 1, domain->sizeY + 1, domain->sizeZ + 1,
true, false, domain->streams[2]) ;
CommSBNGpu(*domain, 3, fieldData, &domain->streams[2]) ;
#endif
}/*}}}*/
__global__
void CalcAccelerationForNodes_kernel(int numNode,
Real_t *xdd, Real_t *ydd, Real_t *zdd,
Real_t *fx, Real_t *fy, Real_t *fz,
Real_t *nodalMass)
{/*{{{*/
int tid=blockDim.x*blockIdx.x+threadIdx.x;
if (tid < numNode)
{
Real_t one_over_nMass = Real_t(1.)/nodalMass[tid];
xdd[tid]=fx[tid]*one_over_nMass;
ydd[tid]=fy[tid]*one_over_nMass;
zdd[tid]=fz[tid]*one_over_nMass;
}
}/*}}}*/
static inline
void CalcAccelerationForNodes(Domain *domain)
{/*{{{*/
Index_t dimBlock = 128;
Index_t dimGrid = PAD_DIV(domain->numNode,dimBlock);
hipLaunchKernelGGL(( CalcAccelerationForNodes_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
domain->numNode,
domain->xdd.raw(),domain->ydd.raw(),domain->zdd.raw(),
domain->fx.raw(),domain->fy.raw(),domain->fz.raw(),
domain->nodalMass.raw());
//hipDeviceSynchronize();
//cudaCheckError();
}/*}}}*/
__global__
void ApplyAccelerationBoundaryConditionsForNodes_kernel(
int numNodeBC, Real_t *xyzdd,
Index_t *symm)
{/*{{{*/
int i=blockDim.x*blockIdx.x+threadIdx.x;
if (i < numNodeBC)
{
xyzdd[symm[i]] = Real_t(0.0) ;
}
}/*}}}*/
static inline
void ApplyAccelerationBoundaryConditionsForNodes(Domain *domain)
{/*{{{*/
Index_t dimBlock = 128;
Index_t dimGrid = PAD_DIV(domain->numSymmX,dimBlock);
if (domain->numSymmX > 0)
hipLaunchKernelGGL(( ApplyAccelerationBoundaryConditionsForNodes_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
domain->numSymmX,
domain->xdd.raw(),
domain->symmX.raw());
dimGrid = PAD_DIV(domain->numSymmY,dimBlock);
if (domain->numSymmY > 0)
hipLaunchKernelGGL(( ApplyAccelerationBoundaryConditionsForNodes_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
domain->numSymmY,
domain->ydd.raw(),
domain->symmY.raw());
dimGrid = PAD_DIV(domain->numSymmZ,dimBlock);
if (domain->numSymmZ > 0)
hipLaunchKernelGGL(( ApplyAccelerationBoundaryConditionsForNodes_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
domain->numSymmZ,
domain->zdd.raw(),
domain->symmZ.raw());
}/*}}}*/
__global__
void CalcPositionAndVelocityForNodes_kernel(int numNode,
const Real_t deltatime,
const Real_t u_cut,
Real_t* __restrict__ x, Real_t* __restrict__ y, Real_t* __restrict__ z,
Real_t* __restrict__ xd, Real_t* __restrict__ yd, Real_t* __restrict__ zd,
const Real_t* __restrict__ xdd, const Real_t* __restrict__ ydd, const Real_t* __restrict__ zdd)
{/*{{{*/
int i=blockDim.x*blockIdx.x+threadIdx.x;
if (i < numNode)
{
Real_t xdtmp, ydtmp, zdtmp, dt;
dt = deltatime;
xdtmp = xd[i] + xdd[i] * dt ;
ydtmp = yd[i] + ydd[i] * dt ;
zdtmp = zd[i] + zdd[i] * dt ;
if( FABS(xdtmp) < u_cut ) xdtmp = 0.0;
if( FABS(ydtmp) < u_cut ) ydtmp = 0.0;
if( FABS(zdtmp) < u_cut ) zdtmp = 0.0;
x[i] += xdtmp * dt;
y[i] += ydtmp * dt;
z[i] += zdtmp * dt;
xd[i] = xdtmp;
yd[i] = ydtmp;
zd[i] = zdtmp;
}
}/*}}}*/
static inline
void CalcPositionAndVelocityForNodes(const Real_t u_cut, Domain* domain)
{/*{{{*/
Index_t dimBlock = 128;
Index_t dimGrid = PAD_DIV(domain->numNode,dimBlock);
hipLaunchKernelGGL(( CalcPositionAndVelocityForNodes_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
domain->numNode,domain->deltatime_h,u_cut,
domain->x.raw(),domain->y.raw(),domain->z.raw(),
domain->xd.raw(),domain->yd.raw(),domain->zd.raw(),
domain->xdd.raw(),domain->ydd.raw(),domain->zdd.raw());
//hipDeviceSynchronize();
//cudaCheckError();
}/*}}}*/
static inline
void LagrangeNodal(Domain *domain)
{/*{{{*/
#ifdef SEDOV_SYNC_POS_VEL_EARLY
Domain_member fieldData[6] ;
#endif
Real_t u_cut = domain->u_cut ;
/* time of boundary condition evaluation is beginning of step for force and
* acceleration boundary conditions. */
CalcForceForNodes(domain);
#if USE_MPI
#ifdef SEDOV_SYNC_POS_VEL_EARLY
CommRecv(*domain, MSG_SYNC_POS_VEL, 6,
domain->sizeX + 1, domain->sizeY + 1, domain->sizeZ + 1,
false, false) ;
#endif
#endif
CalcAccelerationForNodes(domain);
ApplyAccelerationBoundaryConditionsForNodes(domain);
CalcPositionAndVelocityForNodes(u_cut, domain);
#if USE_MPI
#ifdef SEDOV_SYNC_POS_VEL_EARLY
// initialize pointers
domain->d_x = domain->x.raw();
domain->d_y = domain->y.raw();
domain->d_z = domain->z.raw();
domain->d_xd = domain->xd.raw();
domain->d_yd = domain->yd.raw();
domain->d_zd = domain->zd.raw();
fieldData[0] = &Domain::get_x ;
fieldData[1] = &Domain::get_y ;
fieldData[2] = &Domain::get_z ;
fieldData[3] = &Domain::get_xd ;
fieldData[4] = &Domain::get_yd ;
fieldData[5] = &Domain::get_zd ;
CommSendGpu(*domain, MSG_SYNC_POS_VEL, 6, fieldData,
domain->sizeX + 1, domain->sizeY + 1, domain->sizeZ + 1,
false, false, domain->streams[2]) ;
CommSyncPosVelGpu(*domain, &domain->streams[2]) ;
#endif
#endif
return;
}/*}}}*/
__device__
static inline
Real_t AreaFace( const Real_t x0, const Real_t x1,
const Real_t x2, const Real_t x3,
const Real_t y0, const Real_t y1,
const Real_t y2, const Real_t y3,
const Real_t z0, const Real_t z1,
const Real_t z2, const Real_t z3)
{/*{{{*/
Real_t fx = (x2 - x0) - (x3 - x1);
Real_t fy = (y2 - y0) - (y3 - y1);
Real_t fz = (z2 - z0) - (z3 - z1);
Real_t gx = (x2 - x0) + (x3 - x1);
Real_t gy = (y2 - y0) + (y3 - y1);
Real_t gz = (z2 - z0) + (z3 - z1);
Real_t temp = (fx * gx + fy * gy + fz * gz);
Real_t area =
(fx * fx + fy * fy + fz * fz) *
(gx * gx + gy * gy + gz * gz) -
temp * temp;
return area ;
}/*}}}*/
__device__
static inline
Real_t CalcElemCharacteristicLength( const Real_t x[8],
const Real_t y[8],
const Real_t z[8],
const Real_t volume)
{/*{{{*/
Real_t a, charLength = Real_t(0.0);
a = AreaFace(x[0],x[1],x[2],x[3],
y[0],y[1],y[2],y[3],
z[0],z[1],z[2],z[3]) ; // 38
charLength = FMAX(a,charLength) ;
a = AreaFace(x[4],x[5],x[6],x[7],
y[4],y[5],y[6],y[7],
z[4],z[5],z[6],z[7]) ;
charLength = FMAX(a,charLength) ;
a = AreaFace(x[0],x[1],x[5],x[4],
y[0],y[1],y[5],y[4],
z[0],z[1],z[5],z[4]) ;
charLength = FMAX(a,charLength) ;
a = AreaFace(x[1],x[2],x[6],x[5],
y[1],y[2],y[6],y[5],
z[1],z[2],z[6],z[5]) ;
charLength = FMAX(a,charLength) ;
a = AreaFace(x[2],x[3],x[7],x[6],
y[2],y[3],y[7],y[6],
z[2],z[3],z[7],z[6]) ;
charLength = FMAX(a,charLength) ;
a = AreaFace(x[3],x[0],x[4],x[7],
y[3],y[0],y[4],y[7],
z[3],z[0],z[4],z[7]) ;
charLength = FMAX(a,charLength) ;
charLength = Real_t(4.0) * volume / SQRT(charLength);
return charLength;
}/*}}}*/
__device__
static
__forceinline__
void CalcElemVelocityGradient( const Real_t* const xvel,
const Real_t* const yvel,
const Real_t* const zvel,
const Real_t b[][8],
const Real_t detJ,
Real_t* const d )
{/*{{{*/
const Real_t inv_detJ = Real_t(1.0) / detJ ;
Real_t dyddx, dxddy, dzddx, dxddz, dzddy, dyddz;
const Real_t* const pfx = b[0];
const Real_t* const pfy = b[1];
const Real_t* const pfz = b[2];
Real_t tmp1 = (xvel[0]-xvel[6]);
Real_t tmp2 = (xvel[1]-xvel[7]);
Real_t tmp3 = (xvel[2]-xvel[4]);
Real_t tmp4 = (xvel[3]-xvel[5]);
d[0] = inv_detJ * ( pfx[0] * tmp1
+ pfx[1] * tmp2
+ pfx[2] * tmp3
+ pfx[3] * tmp4);
dxddy = inv_detJ * ( pfy[0] * tmp1
+ pfy[1] * tmp2
+ pfy[2] * tmp3
+ pfy[3] * tmp4);
dxddz = inv_detJ * ( pfz[0] * tmp1
+ pfz[1] * tmp2
+ pfz[2] * tmp3
+ pfz[3] * tmp4);
tmp1 = (yvel[0]-yvel[6]);
tmp2 = (yvel[1]-yvel[7]);
tmp3 = (yvel[2]-yvel[4]);
tmp4 = (yvel[3]-yvel[5]);
d[1] = inv_detJ * ( pfy[0] * tmp1
+ pfy[1] * tmp2
+ pfy[2] * tmp3
+ pfy[3] * tmp4);
dyddx = inv_detJ * ( pfx[0] * tmp1
+ pfx[1] * tmp2
+ pfx[2] * tmp3
+ pfx[3] * tmp4);
dyddz = inv_detJ * ( pfz[0] * tmp1
+ pfz[1] * tmp2
+ pfz[2] * tmp3
+ pfz[3] * tmp4);
tmp1 = (zvel[0]-zvel[6]);
tmp2 = (zvel[1]-zvel[7]);
tmp3 = (zvel[2]-zvel[4]);
tmp4 = (zvel[3]-zvel[5]);
d[2] = inv_detJ * ( pfz[0] * tmp1
+ pfz[1] * tmp2
+ pfz[2] * tmp3
+ pfz[3] * tmp4);
dzddx = inv_detJ * ( pfx[0] * tmp1
+ pfx[1] * tmp2
+ pfx[2] * tmp3
+ pfx[3] * tmp4);
dzddy = inv_detJ * ( pfy[0] * tmp1
+ pfy[1] * tmp2
+ pfy[2] * tmp3
+ pfy[3] * tmp4);
d[5] = Real_t( .5) * ( dxddy + dyddx );
d[4] = Real_t( .5) * ( dxddz + dzddx );
d[3] = Real_t( .5) * ( dzddy + dyddz );
}/*}}}*/
static __device__ __forceinline__
void CalcMonoGradient(Real_t *x, Real_t *y, Real_t *z,
Real_t *xv, Real_t *yv, Real_t *zv,
Real_t vol,
Real_t *delx_zeta,
Real_t *delv_zeta,
Real_t *delx_xi,
Real_t *delv_xi,
Real_t *delx_eta,
Real_t *delv_eta)
{/*{{{*/
#define SUM4(a,b,c,d) (a + b + c + d)
const Real_t ptiny = Real_t(1.e-36) ;
Real_t ax,ay,az ;
Real_t dxv,dyv,dzv ;
Real_t norm = Real_t(1.0) / ( vol + ptiny ) ;
Real_t dxj = Real_t(-0.25)*(SUM4(x[0],x[1],x[5],x[4]) - SUM4(x[3],x[2],x[6],x[7])) ;
Real_t dyj = Real_t(-0.25)*(SUM4(y[0],y[1],y[5],y[4]) - SUM4(y[3],y[2],y[6],y[7])) ;
Real_t dzj = Real_t(-0.25)*(SUM4(z[0],z[1],z[5],z[4]) - SUM4(z[3],z[2],z[6],z[7])) ;
Real_t dxi = Real_t( 0.25)*(SUM4(x[1],x[2],x[6],x[5]) - SUM4(x[0],x[3],x[7],x[4])) ;
Real_t dyi = Real_t( 0.25)*(SUM4(y[1],y[2],y[6],y[5]) - SUM4(y[0],y[3],y[7],y[4])) ;
Real_t dzi = Real_t( 0.25)*(SUM4(z[1],z[2],z[6],z[5]) - SUM4(z[0],z[3],z[7],z[4])) ;
Real_t dxk = Real_t( 0.25)*(SUM4(x[4],x[5],x[6],x[7]) - SUM4(x[0],x[1],x[2],x[3])) ;
Real_t dyk = Real_t( 0.25)*(SUM4(y[4],y[5],y[6],y[7]) - SUM4(y[0],y[1],y[2],y[3])) ;
Real_t dzk = Real_t( 0.25)*(SUM4(z[4],z[5],z[6],z[7]) - SUM4(z[0],z[1],z[2],z[3])) ;
/* find delvk and delxk ( i cross j ) */
ax = dyi*dzj - dzi*dyj ;
ay = dzi*dxj - dxi*dzj ;
az = dxi*dyj - dyi*dxj ;
*delx_zeta = vol / SQRT(ax*ax + ay*ay + az*az + ptiny) ;
ax *= norm ;
ay *= norm ;
az *= norm ;
dxv = Real_t(0.25)*(SUM4(xv[4],xv[5],xv[6],xv[7]) - SUM4(xv[0],xv[1],xv[2],xv[3])) ;
dyv = Real_t(0.25)*(SUM4(yv[4],yv[5],yv[6],yv[7]) - SUM4(yv[0],yv[1],yv[2],yv[3])) ;
dzv = Real_t(0.25)*(SUM4(zv[4],zv[5],zv[6],zv[7]) - SUM4(zv[0],zv[1],zv[2],zv[3])) ;
*delv_zeta = ax*dxv + ay*dyv + az*dzv ;
/* find delxi and delvi ( j cross k ) */
ax = dyj*dzk - dzj*dyk ;
ay = dzj*dxk - dxj*dzk ;
az = dxj*dyk - dyj*dxk ;
*delx_xi = vol / SQRT(ax*ax + ay*ay + az*az + ptiny) ;
ax *= norm ;
ay *= norm ;
az *= norm ;
dxv = Real_t(0.25)*(SUM4(xv[1],xv[2],xv[6],xv[5]) - SUM4(xv[0],xv[3],xv[7],xv[4])) ;
dyv = Real_t(0.25)*(SUM4(yv[1],yv[2],yv[6],yv[5]) - SUM4(yv[0],yv[3],yv[7],yv[4])) ;
dzv = Real_t(0.25)*(SUM4(zv[1],zv[2],zv[6],zv[5]) - SUM4(zv[0],zv[3],zv[7],zv[4])) ;
*delv_xi = ax*dxv + ay*dyv + az*dzv ;
/* find delxj and delvj ( k cross i ) */
ax = dyk*dzi - dzk*dyi ;
ay = dzk*dxi - dxk*dzi ;
az = dxk*dyi - dyk*dxi ;
*delx_eta = vol / SQRT(ax*ax + ay*ay + az*az + ptiny) ;
ax *= norm ;
ay *= norm ;
az *= norm ;
dxv = Real_t(-0.25)*(SUM4(xv[0],xv[1],xv[5],xv[4]) - SUM4(xv[3],xv[2],xv[6],xv[7])) ;
dyv = Real_t(-0.25)*(SUM4(yv[0],yv[1],yv[5],yv[4]) - SUM4(yv[3],yv[2],yv[6],yv[7])) ;
dzv = Real_t(-0.25)*(SUM4(zv[0],zv[1],zv[5],zv[4]) - SUM4(zv[3],zv[2],zv[6],zv[7])) ;
*delv_eta = ax*dxv + ay*dyv + az*dzv ;
#undef SUM4
}/*}}}*/
__global__
#ifdef DOUBLE_PRECISION
__launch_bounds__(64,8) // 64-bit
#else
__launch_bounds__(64,16) // 32-bit
#endif
void CalcKinematicsAndMonotonicQGradient_kernel
(/*{{{*/
Index_t numElem, Index_t padded_numElem, const Real_t dt,
const Index_t* __restrict__ nodelist, const Real_t* __restrict__ volo, const Real_t* __restrict__ v,
const Real_t* __restrict__ x,
const Real_t* __restrict__ y,
const Real_t* __restrict__ z,
const Real_t* __restrict__ xd,
const Real_t* __restrict__ yd,
const Real_t* __restrict__ zd,
Real_t* __restrict__ vnew,
Real_t* __restrict__ delv,
Real_t* __restrict__ arealg,
Real_t* __restrict__ dxx,
Real_t* __restrict__ dyy,
Real_t* __restrict__ dzz,
Real_t* __restrict__ vdov,
Real_t* __restrict__ delx_zeta,
Real_t* __restrict__ delv_zeta,
Real_t* __restrict__ delx_xi,
Real_t* __restrict__ delv_xi,
Real_t* __restrict__ delx_eta,
Real_t* __restrict__ delv_eta,
Index_t* __restrict__ bad_vol,
const Index_t num_threads
)/*}}}*/
{/*{{{*/
Real_t B[3][8] ; /** shape function derivatives */
Index_t nodes[8] ;
Real_t x_local[8] ;
Real_t y_local[8] ;
Real_t z_local[8] ;
Real_t xd_local[8] ;
Real_t yd_local[8] ;
Real_t zd_local[8] ;
Real_t D[6];
int k=blockDim.x*blockIdx.x+threadIdx.x;
if ( k < num_threads) {
Real_t volume ;
Real_t relativeVolume ;
// get nodal coordinates from global arrays and copy into local arrays.
//#pragma unroll
//for( Index_t lnode=0 ; lnode<8 ; ++lnode )
//{
// Index_t gnode = nodelist[k+lnode*padded_numElem];
// nodes[lnode] = gnode;
// x_local[lnode] = x[gnode];
// y_local[lnode] = y[gnode];
// z_local[lnode] = z[gnode];
//}
#pragma unroll
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
{
Index_t gnode = nodelist[k+lnode*padded_numElem];
nodes[lnode] = gnode;
}
#pragma unroll
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
x_local[lnode] = x[nodes[lnode]];
#pragma unroll
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
y_local[lnode] = y[nodes[lnode]];
#pragma unroll
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
z_local[lnode] = z[nodes[lnode]];
// volume calculations
volume = CalcElemVolume(x_local, y_local, z_local );
relativeVolume = volume / volo[k] ;
vnew[k] = relativeVolume ;
delv[k] = relativeVolume - v[k] ;
// set characteristic length
arealg[k] = CalcElemCharacteristicLength(x_local,y_local,z_local,volume);
// get nodal velocities from global array and copy into local arrays.
#pragma unroll
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
{
Index_t gnode = nodes[lnode];
xd_local[lnode] = xd[gnode];
yd_local[lnode] = yd[gnode];
zd_local[lnode] = zd[gnode];
}
Real_t dt2 = Real_t(0.5) * dt;
#pragma unroll
for ( Index_t j=0 ; j<8 ; ++j )
{
x_local[j] -= dt2 * xd_local[j];
y_local[j] -= dt2 * yd_local[j];
z_local[j] -= dt2 * zd_local[j];
}
Real_t detJ;
CalcElemShapeFunctionDerivatives(x_local,y_local,z_local,B,&detJ );
CalcElemVelocityGradient(xd_local,yd_local,zd_local,B,detJ,D);
// ------------------------
// CALC LAGRANGE ELEM 2
// ------------------------
// calc strain rate and apply as constraint (only done in FB element)
Real_t vdovNew = D[0] + D[1] + D[2];
Real_t vdovthird = vdovNew/Real_t(3.0) ;
// make the rate of deformation tensor deviatoric
vdov[k] = vdovNew ;
dxx[k] = D[0] - vdovthird ;
dyy[k] = D[1] - vdovthird ;
dzz[k] = D[2] - vdovthird ;
// ------------------------
// CALC MONOTONIC Q GRADIENT
// ------------------------
Real_t vol = volo[k]*vnew[k];
// Undo x_local update
#pragma unroll
for ( Index_t j=0 ; j<8 ; ++j ) {
x_local[j] += dt2 * xd_local[j];
y_local[j] += dt2 * yd_local[j];
z_local[j] += dt2 * zd_local[j];
}
CalcMonoGradient(x_local,y_local,z_local,xd_local,yd_local,zd_local,
vol,
&delx_zeta[k],&delv_zeta[k],&delx_xi[k],
&delv_xi[k], &delx_eta[k], &delv_eta[k]);
//Check for bad volume
if (relativeVolume < 0)
*bad_vol = k;
}
}/*}}}*/
static inline
void CalcKinematicsAndMonotonicQGradient(Domain *domain)
{/*{{{*/
Index_t numElem = domain->numElem ;
Index_t padded_numElem = domain->padded_numElem;
int num_threads = numElem;
const int block_size = 64;
int dimGrid = PAD_DIV(num_threads,block_size);
hipLaunchKernelGGL(( CalcKinematicsAndMonotonicQGradient_kernel), dim3(dimGrid),dim3(block_size), 0, 0,
numElem,padded_numElem, domain->deltatime_h,
domain->nodelist.raw(),
domain->volo.raw(),
domain->v.raw(),
domain->x.raw(), domain->y.raw(), domain->z.raw(), domain->xd.raw(), domain->yd.raw(), domain->zd.raw(),
domain->vnew->raw(),
domain->delv.raw(),
domain->arealg.raw(),
domain->dxx->raw(),
domain->dyy->raw(),
domain->dzz->raw(),
domain->vdov.raw(),
domain->delx_zeta->raw(),
domain->delv_zeta->raw(),
domain->delx_xi->raw(),
domain->delv_xi->raw(),
domain->delx_eta->raw(),
domain->delv_eta->raw(),
domain->bad_vol_h,
num_threads
);
//hipDeviceSynchronize();
//cudaCheckError();
}/*}}}*/
__global__
#ifdef DOUBLE_PRECISION
__launch_bounds__(128,16)
#else
__launch_bounds__(128,16)
#endif
void CalcMonotonicQRegionForElems_kernel
(/*{{{*/
Real_t qlc_monoq,
Real_t qqc_monoq,
Real_t monoq_limiter_mult,
Real_t monoq_max_slope,
Real_t ptiny,
// the elementset length
Index_t elength,
Index_t* regElemlist,
// const Index_t* __restrict__ regElemlist,
Index_t *elemBC,
Index_t *lxim,
Index_t *lxip,
Index_t *letam,
Index_t *letap,
Index_t *lzetam,
Index_t *lzetap,
Real_t *delv_xi,
Real_t *delv_eta,
Real_t *delv_zeta,
Real_t *delx_xi,
Real_t *delx_eta,
Real_t *delx_zeta,
Real_t *vdov,Real_t *elemMass,Real_t *volo,Real_t *vnew,
Real_t *qq, Real_t *ql,
Real_t *q,
Real_t qstop,
Index_t* bad_q
)/*}}}*/
{/*{{{*/
int ielem=blockDim.x*blockIdx.x + threadIdx.x;
if (ielem<elength) {
Real_t qlin, qquad ;
Real_t phixi, phieta, phizeta ;
Index_t i = regElemlist[ielem];
Int_t bcMask = elemBC[i] ;
Real_t delvm, delvp ;
/* phixi */
Real_t norm = Real_t(1.) / ( delv_xi[i] + ptiny ) ;
switch (bcMask & XI_M) {
case XI_M_COMM: /* needs comm data */
case 0: delvm = delv_xi[lxim[i]] ; break ;
case XI_M_SYMM: delvm = delv_xi[i] ; break ;
case XI_M_FREE: delvm = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
switch (bcMask & XI_P) {
case XI_P_COMM: /* needs comm data */
case 0: delvp = delv_xi[lxip[i]] ; break ;
case XI_P_SYMM: delvp = delv_xi[i] ; break ;
case XI_P_FREE: delvp = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
delvm = delvm * norm ;
delvp = delvp * norm ;
phixi = Real_t(.5) * ( delvm + delvp ) ;
delvm *= monoq_limiter_mult ;
delvp *= monoq_limiter_mult ;
if ( delvm < phixi ) phixi = delvm ;
if ( delvp < phixi ) phixi = delvp ;
if ( phixi < Real_t(0.)) phixi = Real_t(0.) ;
if ( phixi > monoq_max_slope) phixi = monoq_max_slope;
/* phieta */
norm = Real_t(1.) / ( delv_eta[i] + ptiny ) ;
switch (bcMask & ETA_M) {
case ETA_M_COMM: /* needs comm data */
case 0: delvm = delv_eta[letam[i]] ; break ;
case ETA_M_SYMM: delvm = delv_eta[i] ; break ;
case ETA_M_FREE: delvm = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
switch (bcMask & ETA_P) {
case ETA_P_COMM: /* needs comm data */
case 0: delvp = delv_eta[letap[i]] ; break ;
case ETA_P_SYMM: delvp = delv_eta[i] ; break ;
case ETA_P_FREE: delvp = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
delvm = delvm * norm ;
delvp = delvp * norm ;
phieta = Real_t(.5) * ( delvm + delvp ) ;
delvm *= monoq_limiter_mult ;
delvp *= monoq_limiter_mult ;
if ( delvm < phieta ) phieta = delvm ;
if ( delvp < phieta ) phieta = delvp ;
if ( phieta < Real_t(0.)) phieta = Real_t(0.) ;
if ( phieta > monoq_max_slope) phieta = monoq_max_slope;
/* phizeta */
norm = Real_t(1.) / ( delv_zeta[i] + ptiny ) ;
switch (bcMask & ZETA_M) {
case ZETA_M_COMM: /* needs comm data */
case 0: delvm = delv_zeta[lzetam[i]] ; break ;
case ZETA_M_SYMM: delvm = delv_zeta[i] ; break ;
case ZETA_M_FREE: delvm = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
switch (bcMask & ZETA_P) {
case ZETA_P_COMM: /* needs comm data */
case 0: delvp = delv_zeta[lzetap[i]] ; break ;
case ZETA_P_SYMM: delvp = delv_zeta[i] ; break ;
case ZETA_P_FREE: delvp = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
delvm = delvm * norm ;
delvp = delvp * norm ;
phizeta = Real_t(.5) * ( delvm + delvp ) ;
delvm *= monoq_limiter_mult ;
delvp *= monoq_limiter_mult ;
if ( delvm < phizeta ) phizeta = delvm ;
if ( delvp < phizeta ) phizeta = delvp ;
if ( phizeta < Real_t(0.)) phizeta = Real_t(0.);
if ( phizeta > monoq_max_slope ) phizeta = monoq_max_slope;
/* Remove length scale */
if ( vdov[i] > Real_t(0.) ) {
qlin = Real_t(0.) ;
qquad = Real_t(0.) ;
}
else {
Real_t delvxxi = delv_xi[i] * delx_xi[i] ;
Real_t delvxeta = delv_eta[i] * delx_eta[i] ;
Real_t delvxzeta = delv_zeta[i] * delx_zeta[i] ;
if ( delvxxi > Real_t(0.) ) delvxxi = Real_t(0.) ;
if ( delvxeta > Real_t(0.) ) delvxeta = Real_t(0.) ;
if ( delvxzeta > Real_t(0.) ) delvxzeta = Real_t(0.) ;
Real_t rho = elemMass[i] / (volo[i] * vnew[i]) ;
qlin = -qlc_monoq * rho *
( delvxxi * (Real_t(1.) - phixi) +
delvxeta * (Real_t(1.) - phieta) +
delvxzeta * (Real_t(1.) - phizeta) ) ;
qquad = qqc_monoq * rho *
( delvxxi*delvxxi * (Real_t(1.) - phixi*phixi) +
delvxeta*delvxeta * (Real_t(1.) - phieta*phieta) +
delvxzeta*delvxzeta * (Real_t(1.) - phizeta*phizeta) ) ;
}
qq[i] = qquad ;
ql[i] = qlin ;
// Don't allow excessive artificial viscosity
if (q[i] > qstop)
*(bad_q) = i;
}
}/*}}}*/
static inline
void CalcMonotonicQRegionForElems(Domain *domain)
{/*{{{*/
const Real_t ptiny = Real_t(1.e-36) ;
Real_t monoq_max_slope = domain->monoq_max_slope ;
Real_t monoq_limiter_mult = domain->monoq_limiter_mult ;
Real_t qlc_monoq = domain->qlc_monoq;
Real_t qqc_monoq = domain->qqc_monoq;
Index_t elength = domain->numElem;
Index_t dimBlock= 128;
Index_t dimGrid = PAD_DIV(elength,dimBlock);
hipLaunchKernelGGL(( CalcMonotonicQRegionForElems_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0,
qlc_monoq,qqc_monoq,monoq_limiter_mult,monoq_max_slope,ptiny,elength,
domain->regElemlist.raw(),domain->elemBC.raw(),
domain->lxim.raw(),domain->lxip.raw(),
domain->letam.raw(),domain->letap.raw(),
domain->lzetam.raw(),domain->lzetap.raw(),
domain->delv_xi->raw(),domain->delv_eta->raw(),domain->delv_zeta->raw(),
domain->delx_xi->raw(),domain->delx_eta->raw(),domain->delx_zeta->raw(),
domain->vdov.raw(),domain->elemMass.raw(),domain->volo.raw(),domain->vnew->raw(),
domain->qq.raw(),domain->ql.raw(),
domain->q.raw(),
domain->qstop,
domain->bad_q_h
);
//hipDeviceSynchronize();
//cudaCheckError();
}/*}}}*/
static
__device__ __forceinline__
void CalcPressureForElems_device(
Real_t& p_new, Real_t& bvc,
Real_t& pbvc, Real_t& e_old,
Real_t& compression, Real_t& vnewc,
Real_t pmin,
Real_t p_cut, Real_t eosvmax)
{/*{{{*/
Real_t c1s = Real_t(2.0)/Real_t(3.0);
Real_t p_temp = p_new;
bvc = c1s * (compression + Real_t(1.));
pbvc = c1s;
p_temp = bvc * e_old ;
if ( FABS(p_temp) < p_cut )
p_temp = Real_t(0.0) ;
if ( vnewc >= eosvmax ) /* impossible condition here? */
p_temp = Real_t(0.0) ;
if (p_temp < pmin)
p_temp = pmin ;
p_new = p_temp;
}/*}}}*/
static
__device__ __forceinline__
void CalcSoundSpeedForElems_device(Real_t& vnewc, Real_t rho0, Real_t &enewc,
Real_t &pnewc, Real_t &pbvc,
Real_t &bvc, Real_t ss4o3, Index_t nz,
Real_t *ss, Index_t iz)
{/*{{{*/
Real_t ssTmp = (pbvc * enewc + vnewc * vnewc *
bvc * pnewc) / rho0;
if (ssTmp <= Real_t(.1111111e-36)) {
ssTmp = Real_t(.3333333e-18);
}
else {
ssTmp = SQRT(ssTmp) ;
}
ss[iz] = ssTmp;
}/*}}}*/
static
__device__
__forceinline__
void ApplyMaterialPropertiesForElems_device(
Real_t& eosvmin, Real_t& eosvmax,
Real_t* vnew, Real_t *v,
Real_t& vnewc, Index_t* bad_vol, Index_t zn)
{/*{{{*/
vnewc = vnew[zn] ;
if (eosvmin != Real_t(0.)) {
if (vnewc < eosvmin)
vnewc = eosvmin ;
}
if (eosvmax != Real_t(0.)) {
if (vnewc > eosvmax)
vnewc = eosvmax ;
}
// Now check for valid volume
Real_t vc = v[zn];
if (eosvmin != Real_t(0.)) {
if (vc < eosvmin)
vc = eosvmin ;
}
if (eosvmax != Real_t(0.)) {
if (vc > eosvmax)
vc = eosvmax ;
}
if (vc <= 0.) {
*bad_vol = zn;
}
}/*}}}*/
static
__device__
__forceinline__
void UpdateVolumesForElems_device(Index_t numElem, Real_t& v_cut,
Real_t *vnew,
Real_t *v,
int i)
{/*{{{*/
Real_t tmpV ;
tmpV = vnew[i] ;
if ( FABS(tmpV - Real_t(1.0)) < v_cut )
tmpV = Real_t(1.0) ;
v[i] = tmpV ;
}/*}}}*/
static
__device__
__forceinline__
void CalcEnergyForElems_device(Real_t& p_new, Real_t& e_new, Real_t& q_new,
Real_t& bvc, Real_t& pbvc,
Real_t& p_old, Real_t& e_old, Real_t& q_old,
Real_t& compression, Real_t& compHalfStep,
Real_t& vnewc, Real_t& work, Real_t& delvc, Real_t pmin,
Real_t p_cut, Real_t e_cut, Real_t q_cut, Real_t emin,
Real_t& qq, Real_t& ql,
Real_t& rho0,
Real_t& eosvmax,
Index_t length)
{/*{{{*/
const Real_t sixth = Real_t(1.0) / Real_t(6.0) ;
Real_t pHalfStep;
e_new = e_old - Real_t(0.5) * delvc * (p_old + q_old)
+ Real_t(0.5) * work;
if (e_new < emin ) {
e_new = emin ;
}
CalcPressureForElems_device(pHalfStep, bvc, pbvc, e_new, compHalfStep, vnewc,
pmin, p_cut, eosvmax);
Real_t vhalf = Real_t(1.) / (Real_t(1.) + compHalfStep) ;
if ( delvc > Real_t(0.) ) {
q_new = Real_t(0.) ;
}
else {
Real_t ssc = ( pbvc * e_new
+ vhalf * vhalf * bvc * pHalfStep ) / rho0 ;
if ( ssc <= Real_t(.1111111e-36) ) {
ssc =Real_t(.3333333e-18) ;
} else {
ssc = SQRT(ssc) ;
}
q_new = (ssc*ql + qq) ;
}
e_new = e_new + Real_t(0.5) * delvc
* ( Real_t(3.0)*(p_old + q_old)
- Real_t(4.0)*(pHalfStep + q_new)) ;
e_new += Real_t(0.5) * work;
if (FABS(e_new) < e_cut) {
e_new = Real_t(0.) ;
}
if ( e_new < emin ) {
e_new = emin ;
}
CalcPressureForElems_device(p_new, bvc, pbvc, e_new, compression, vnewc,
pmin, p_cut, eosvmax);
Real_t q_tilde ;
if (delvc > Real_t(0.)) {
q_tilde = Real_t(0.) ;
}
else {
Real_t ssc = ( pbvc * e_new
+ vnewc * vnewc * bvc * p_new ) / rho0 ;
if ( ssc <= Real_t(.1111111e-36) ) {
ssc = Real_t(.3333333e-18) ;
} else {
ssc = SQRT(ssc) ;
}
q_tilde = (ssc*ql + qq) ;
}
e_new = e_new - ( Real_t(7.0)*(p_old + q_old)
- Real_t(8.0)*(pHalfStep + q_new)
+ (p_new + q_tilde)) * delvc*sixth ;
if (FABS(e_new) < e_cut) {
e_new = Real_t(0.) ;
}
if ( e_new < emin ) {
e_new = emin ;
}
CalcPressureForElems_device(p_new, bvc, pbvc, e_new, compression, vnewc,
pmin, p_cut, eosvmax);
if ( delvc <= Real_t(0.) ) {
Real_t ssc = ( pbvc * e_new
+ vnewc * vnewc * bvc * p_new ) / rho0 ;
if ( ssc <= Real_t(.1111111e-36) ) {
ssc = Real_t(.3333333e-18) ;
} else {
ssc = SQRT(ssc) ;
}
q_new = (ssc*ql + qq) ;
if (FABS(q_new) < q_cut) q_new = Real_t(0.) ;
}
return ;
}/*}}}*/
__device__ inline
Index_t giveMyRegion(const Index_t* regCSR,const Index_t i, const Index_t numReg)
{/*{{{*/
for(Index_t reg = 0; reg < numReg-1; reg++)
if(i < regCSR[reg])
return reg;
return (numReg-1);
}/*}}}*/
__global__
void ApplyMaterialPropertiesAndUpdateVolume_kernel
(/*{{{*/
Index_t length,
Real_t rho0,
Real_t e_cut,
Real_t emin,
Real_t* __restrict__ ql,
Real_t* __restrict__ qq,
Real_t* __restrict__ vnew,
Real_t* __restrict__ v,
Real_t pmin,
Real_t p_cut,
Real_t q_cut,
Real_t eosvmin,
Real_t eosvmax,
Index_t* __restrict__ regElemlist,
// const Index_t* __restrict__ regElemlist,
Real_t* __restrict__ e,
Real_t* __restrict__ delv,
Real_t* __restrict__ p,
Real_t* __restrict__ q,
Real_t ss4o3,
Real_t* __restrict__ ss,
Real_t v_cut,
Index_t* __restrict__ bad_vol,
const Int_t cost,
const Index_t* regCSR,
const Index_t* regReps,
const Index_t numReg
)/*}}}*/
{/*{{{*/
Real_t e_old, delvc, p_old, q_old, e_temp, delvc_temp, p_temp, q_temp;
Real_t compression, compHalfStep;
Real_t qq_old, ql_old, qq_temp, ql_temp, work;
Real_t p_new, e_new, q_new;
Real_t bvc, pbvc, vnewc;
Index_t i=blockDim.x*blockIdx.x + threadIdx.x;
if (i<length) {
Index_t zidx = regElemlist[i] ;
ApplyMaterialPropertiesForElems_device
(eosvmin,eosvmax,vnew,v,vnewc,bad_vol,zidx);
/********************** Start EvalEOSForElems **************************/
// Here we need to find out what region this element belongs to and what is the rep value!
Index_t region = giveMyRegion(regCSR,i,numReg);
Index_t rep = regReps[region];
e_temp = e[zidx];
p_temp = p[zidx];
q_temp = q[zidx];
qq_temp = qq[zidx] ;
ql_temp = ql[zidx] ;
delvc_temp = delv[zidx];
for(int r=0; r < rep; r++)
{
e_old = e_temp;
p_old = p_temp;
q_old = q_temp;
qq_old = qq_temp;
ql_old = ql_temp;
delvc = delvc_temp;
work = Real_t(0.);
Real_t vchalf ;
compression = Real_t(1.) / vnewc - Real_t(1.);
vchalf = vnewc - delvc * Real_t(.5);
compHalfStep = Real_t(1.) / vchalf - Real_t(1.);
if ( eosvmin != Real_t(0.) ) {
if (vnewc <= eosvmin) { /* impossible due to calling func? */
compHalfStep = compression ;
}
}
if ( eosvmax != Real_t(0.) ) {
if (vnewc >= eosvmax) { /* impossible due to calling func? */
p_old = Real_t(0.) ;
compression = Real_t(0.) ;
compHalfStep = Real_t(0.) ;
}
}
// qq_old = qq[zidx] ;
// ql_old = ql[zidx] ;
// work = Real_t(0.) ;
CalcEnergyForElems_device(p_new, e_new, q_new, bvc, pbvc,
p_old, e_old, q_old, compression, compHalfStep,
vnewc, work, delvc, pmin,
p_cut, e_cut, q_cut, emin,
qq_old, ql_old, rho0, eosvmax, length);
}//end for rep
p[zidx] = p_new ;
e[zidx] = e_new ;
q[zidx] = q_new ;
CalcSoundSpeedForElems_device
(vnewc,rho0,e_new,p_new,pbvc,bvc,ss4o3,length,ss,zidx);
/********************** End EvalEOSForElems **************************/
UpdateVolumesForElems_device(length,v_cut,vnew,v,zidx);
}
}/*}}}*/
static inline
void ApplyMaterialPropertiesAndUpdateVolume(Domain *domain)
{/*{{{*/
Index_t length = domain->numElem ;
if (length != 0) {
Index_t dimBlock = 128;
Index_t dimGrid = PAD_DIV(length,dimBlock);
hipLaunchKernelGGL(( ApplyMaterialPropertiesAndUpdateVolume_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0,
length,
domain->refdens,
domain->e_cut,
domain->emin,
domain->ql.raw(),
domain->qq.raw(),
domain->vnew->raw(),
domain->v.raw(),
domain->pmin,
domain->p_cut,
domain->q_cut,
domain->eosvmin,
domain->eosvmax,
domain->regElemlist.raw(),
domain->e.raw(),
domain->delv.raw(),
domain->p.raw(),
domain->q.raw(),
domain->ss4o3,
domain->ss.raw(),
domain->v_cut,
domain->bad_vol_h,
domain->cost,
domain->regCSR.raw(),
domain->regReps.raw(),
domain->numReg
);
//hipDeviceSynchronize();
//cudaCheckError();
}
}/*}}}*/
static inline
void LagrangeElements(Domain *domain)
{/*{{{*/
int allElem = domain->numElem + /* local elem */
2*domain->sizeX*domain->sizeY + /* plane ghosts */
2*domain->sizeX*domain->sizeZ + /* row ghosts */
2*domain->sizeY*domain->sizeZ ; /* col ghosts */
domain->vnew = Allocator< Vector_d<Real_t> >::allocate(domain->numElem);
domain->dxx = Allocator< Vector_d<Real_t> >::allocate(domain->numElem);
domain->dyy = Allocator< Vector_d<Real_t> >::allocate(domain->numElem);
domain->dzz = Allocator< Vector_d<Real_t> >::allocate(domain->numElem);
domain->delx_xi = Allocator< Vector_d<Real_t> >::allocate(domain->numElem);
domain->delx_eta = Allocator< Vector_d<Real_t> >::allocate(domain->numElem);
domain->delx_zeta = Allocator< Vector_d<Real_t> >::allocate(domain->numElem);
domain->delv_xi = Allocator< Vector_d<Real_t> >::allocate(allElem);
domain->delv_eta = Allocator< Vector_d<Real_t> >::allocate(allElem);
domain->delv_zeta = Allocator< Vector_d<Real_t> >::allocate(allElem);
#if USE_MPI
CommRecv(*domain, MSG_MONOQ, 3,
domain->sizeX, domain->sizeY, domain->sizeZ,
true, true) ;
#endif
/*********************************************/
/* Calc Kinematics and Monotic Q Gradient */
/*********************************************/
CalcKinematicsAndMonotonicQGradient(domain);
#if USE_MPI
Domain_member fieldData[3] ;
// initialize pointers
domain->d_delv_xi = domain->delv_xi->raw();
domain->d_delv_eta = domain->delv_eta->raw();
domain->d_delv_zeta = domain->delv_zeta->raw();
fieldData[0] = &Domain::get_delv_xi ;
fieldData[1] = &Domain::get_delv_eta ;
fieldData[2] = &Domain::get_delv_zeta ;
CommSendGpu(*domain, MSG_MONOQ, 3, fieldData,
domain->sizeX, domain->sizeY, domain->sizeZ,
true, true, domain->streams[2]) ;
CommMonoQGpu(*domain, domain->streams[2]) ;
#endif
Allocator<Vector_d<Real_t> >::free(domain->dxx,domain->numElem);
Allocator<Vector_d<Real_t> >::free(domain->dyy,domain->numElem);
Allocator<Vector_d<Real_t> >::free(domain->dzz,domain->numElem);
/**********************************
* Calc Monotic Q Region
**********************************/
CalcMonotonicQRegionForElems(domain);
Allocator<Vector_d<Real_t> >::free(domain->delx_xi,domain->numElem);
Allocator<Vector_d<Real_t> >::free(domain->delx_eta,domain->numElem);
Allocator<Vector_d<Real_t> >::free(domain->delx_zeta,domain->numElem);
Allocator<Vector_d<Real_t> >::free(domain->delv_xi,allElem);
Allocator<Vector_d<Real_t> >::free(domain->delv_eta,allElem);
Allocator<Vector_d<Real_t> >::free(domain->delv_zeta,allElem);
// printf("\n --Start of ApplyMaterials! \n");
ApplyMaterialPropertiesAndUpdateVolume(domain) ;
// printf("\n --End of ApplyMaterials! \n");
Allocator<Vector_d<Real_t> >::free(domain->vnew,domain->numElem);
}/*}}}*/
template<int block_size>
__global__
#ifdef DOUBLE_PRECISION
__launch_bounds__(128,16)
#else
__launch_bounds__(128,16)
#endif
void CalcTimeConstraintsForElems_kernel(
Index_t length,
Real_t qqc2,
Real_t dvovmax,
Index_t *matElemlist,
Real_t *ss,
Real_t *vdov,
Real_t *arealg,
Real_t *dev_mindtcourant,
Real_t *dev_mindthydro)
{/*{{{*/
int tid = threadIdx.x;
int i=blockDim.x*blockIdx.x + tid;
__shared__ volatile Real_t s_mindthydro[block_size];
__shared__ volatile Real_t s_mindtcourant[block_size];
Real_t mindthydro = Real_t(1.0e+20) ;
Real_t mindtcourant = Real_t(1.0e+20) ;
Real_t dthydro = mindthydro;
Real_t dtcourant = mindtcourant;
while (i<length) {
Index_t indx = matElemlist[i] ;
Real_t vdov_tmp = vdov[indx];
// Computing dt_hydro
if (vdov_tmp != Real_t(0.)) {
Real_t dtdvov = dvovmax / (FABS(vdov_tmp)+Real_t(1.e-20)) ;
if ( dthydro > dtdvov ) {
dthydro = dtdvov ;
}
}
if (dthydro < mindthydro)
mindthydro = dthydro;
// Computing dt_courant
Real_t ss_tmp = ss[indx];
Real_t area_tmp = arealg[indx];
Real_t dtf = ss_tmp * ss_tmp ;
dtf += ((vdov_tmp < 0.) ? qqc2*area_tmp*area_tmp*vdov_tmp*vdov_tmp : 0.);
dtf = area_tmp / SQRT(dtf) ;
/* determine minimum timestep with its corresponding elem */
if (vdov_tmp != Real_t(0.) && dtf < dtcourant) {
dtcourant = dtf ;
}
if (dtcourant< mindtcourant)
mindtcourant= dtcourant;
i += gridDim.x*blockDim.x;
}
s_mindthydro[tid] = mindthydro;
s_mindtcourant[tid] = mindtcourant;
__syncthreads();
// Do shared memory reduction
if (block_size >= 1024) {
if (tid < 512) {
s_mindthydro[tid] = min( s_mindthydro[tid] , s_mindthydro[tid + 512]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 512]) ; }
__syncthreads(); }
if (block_size >= 512) {
if (tid < 256) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 256]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 256]) ; }
__syncthreads(); }
if (block_size >= 256) {
if (tid < 128) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 128]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 128]) ; }
__syncthreads(); }
if (block_size >= 128) {
if (tid < 64) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 64]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 64]) ; }
__syncthreads(); }
if (tid < 32) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 32]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 32]) ;
}
if (tid < 16) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 16]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 16]) ;
}
if (tid < 8) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 8]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 8]) ;
}
if (tid < 4) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 4]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 4]) ;
}
if (tid < 2) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 2]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 2]) ;
}
if (tid < 1) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 1]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 1]) ;
}
// Store in global memory
if (tid==0) {
dev_mindtcourant[blockIdx.x] = s_mindtcourant[0];
dev_mindthydro[blockIdx.x] = s_mindthydro[0];
}
}/*}}}*/
template <int block_size>
__global__
void CalcMinDtOneBlock(Real_t* dev_mindthydro, Real_t* dev_mindtcourant, Real_t* dtcourant, Real_t* dthydro, Index_t shared_array_size)
{/*{{{*/
volatile __shared__ Real_t s_data[block_size];
int tid = threadIdx.x;
if (blockIdx.x==0)
{
if (tid < shared_array_size)
s_data[tid] = dev_mindtcourant[tid];
else
s_data[tid] = 1.0e20;
__syncthreads();
if (block_size >= 1024) { if (tid < 512) { s_data[tid] = min(s_data[tid],s_data[tid + 512]); } __syncthreads(); }
if (block_size >= 512) { if (tid < 256) { s_data[tid] = min(s_data[tid],s_data[tid + 256]); } __syncthreads(); }
if (block_size >= 256) { if (tid < 128) { s_data[tid] = min(s_data[tid],s_data[tid + 128]); } __syncthreads(); }
if (block_size >= 128) { if (tid < 64) { s_data[tid] = min(s_data[tid],s_data[tid + 64]); } __syncthreads(); }
if (tid < 32) { s_data[tid] = min(s_data[tid],s_data[tid + 32]); }
if (tid < 16) { s_data[tid] = min(s_data[tid],s_data[tid + 16]); }
if (tid < 8) { s_data[tid] = min(s_data[tid],s_data[tid + 8]); }
if (tid < 4) { s_data[tid] = min(s_data[tid],s_data[tid + 4]); }
if (tid < 2) { s_data[tid] = min(s_data[tid],s_data[tid + 2]); }
if (tid < 1) { s_data[tid] = min(s_data[tid],s_data[tid + 1]); }
if (tid<1)
{
*(dtcourant)= s_data[0];
}
}
else if (blockIdx.x==1)
{
if (tid < shared_array_size)
s_data[tid] = dev_mindthydro[tid];
else
s_data[tid] = 1.0e20;
__syncthreads();
if (block_size >= 1024) { if (tid < 512) { s_data[tid] = min(s_data[tid],s_data[tid + 512]); } __syncthreads(); }
if (block_size >= 512) { if (tid < 256) { s_data[tid] = min(s_data[tid],s_data[tid + 256]); } __syncthreads(); }
if (block_size >= 256) { if (tid < 128) { s_data[tid] = min(s_data[tid],s_data[tid + 128]); } __syncthreads(); }
if (block_size >= 128) { if (tid < 64) { s_data[tid] = min(s_data[tid],s_data[tid + 64]); } __syncthreads(); }
if (tid < 32) { s_data[tid] = min(s_data[tid],s_data[tid + 32]); }
if (tid < 16) { s_data[tid] = min(s_data[tid],s_data[tid + 16]); }
if (tid < 8) { s_data[tid] = min(s_data[tid],s_data[tid + 8]); }
if (tid < 4) { s_data[tid] = min(s_data[tid],s_data[tid + 4]); }
if (tid < 2) { s_data[tid] = min(s_data[tid],s_data[tid + 2]); }
if (tid < 1) { s_data[tid] = min(s_data[tid],s_data[tid + 1]); }
if (tid<1)
{
*(dthydro) = s_data[0];
}
}
}/*}}}*/
static inline
void CalcTimeConstraintsForElems(Domain* domain)
{/*{{{*/
Real_t qqc = domain->qqc;
Real_t qqc2 = Real_t(64.0) * qqc * qqc ;
Real_t dvovmax = domain->dvovmax ;
const Index_t length = domain->numElem;
const int max_dimGrid = 1024;
const int dimBlock = 128;
int dimGrid=::min(max_dimGrid,PAD_DIV(length,dimBlock));
hipFuncSetCacheConfig(CalcTimeConstraintsForElems_kernel<dimBlock>, hipFuncCachePreferShared);
Vector_d<Real_t>* dev_mindtcourant= Allocator< Vector_d<Real_t> >::allocate(dimGrid);
Vector_d<Real_t>* dev_mindthydro = Allocator< Vector_d<Real_t> >::allocate(dimGrid);
hipLaunchKernelGGL(( CalcTimeConstraintsForElems_kernel<dimBlock>) , dim3(dimGrid),dim3(dimBlock), 0, 0,
length,qqc2,dvovmax,
domain->matElemlist.raw(),domain->ss.raw(),domain->vdov.raw(),domain->arealg.raw(),
dev_mindtcourant->raw(),dev_mindthydro->raw());
// TODO: if dimGrid < 1024, should launch less threads
hipLaunchKernelGGL(( CalcMinDtOneBlock<max_dimGrid>) , dim3(2),dim3(max_dimGrid), max_dimGrid*sizeof(Real_t), domain->streams[1], dev_mindthydro->raw(),dev_mindtcourant->raw(),domain->dtcourant_h,domain->dthydro_h, dimGrid);
hipEventRecord(domain->time_constraint_computed,domain->streams[1]);
Allocator<Vector_d<Real_t> >::free(dev_mindtcourant,dimGrid);
Allocator<Vector_d<Real_t> >::free(dev_mindthydro,dimGrid);
}/*}}}*/
static inline
void LagrangeLeapFrog(Domain* domain)
{/*{{{*/
/* calculate nodal forces, accelerations, velocities, positions, with
* applied boundary conditions and slide surface considerations */
LagrangeNodal(domain);
/* calculate element quantities (i.e. velocity gradient & q), and update
* material states */
LagrangeElements(domain);
CalcTimeConstraintsForElems(domain);
}/*}}}*/
void printUsage(char* argv[])
{/*{{{*/
printf("Usage: \n");
printf("Unstructured grid: %s -u <file.lmesh> \n", argv[0]) ;
printf("Structured grid: %s -s numEdgeElems \n", argv[0]) ;
printf("\nExamples:\n") ;
printf("%s -s 45\n", argv[0]) ;
printf("%s -u sedov15oct.lmesh\n", argv[0]) ;
}/*}}}*/
#ifdef SAMI
#ifdef __cplusplus
extern "C" {
#endif
#include "silo.h"
#ifdef __cplusplus
}
#endif
#define MAX_LEN_SAMI_HEADER 10
#define SAMI_HDR_NUMBRICK 0
#define SAMI_HDR_NUMNODES 3
#define SAMI_HDR_NUMMATERIAL 4
#define SAMI_HDR_INDEX_START 6
#define SAMI_HDR_MESHDIM 7
#define MAX_ADJACENCY 14 /* must be 14 or greater */
void DumpSAMI(Domain *domain, char *name)
{/*{{{*/
DBfile *fp ;
int headerLen = MAX_LEN_SAMI_HEADER ;
int headerInfo[MAX_LEN_SAMI_HEADER];
char varName[] = "brick_nd0";
char coordName[] = "x";
int version = 121 ;
int numElem = int(domain->numElem) ;
int numNode = int(domain->numNode) ;
int count ;
int *materialID ;
int *nodeConnect ;
double *nodeCoord ;
if ((fp = DBCreate(name, DB_CLOBBER, DB_LOCAL,
NULL, DB_PDB)) == NULL)
{
printf("Couldn't create file %s\n", name) ;
exit(1);
}
for (int i=0; i<MAX_LEN_SAMI_HEADER; ++i) {
headerInfo[i] = 0 ;
}
headerInfo[SAMI_HDR_NUMBRICK] = numElem ;
headerInfo[SAMI_HDR_NUMNODES] = numNode ;
headerInfo[SAMI_HDR_NUMMATERIAL] = 1 ;
headerInfo[SAMI_HDR_INDEX_START] = 1 ;
headerInfo[SAMI_HDR_MESHDIM] = 3 ;
DBWrite(fp, "mesh_data", headerInfo, &headerLen, 1, DB_INT) ;
count = 1 ;
DBWrite(fp, "version", &version, &count, 1, DB_INT) ;
nodeConnect = new int[numElem] ;
Vector_h<Index_t> nodelist_h = domain->nodelist;
for (Index_t i=0; i<8; ++i)
{
for (Index_t j=0; j<numElem; ++j) {
nodeConnect[j] = int(nodelist_h[i*domain->padded_numElem + j]) + 1 ;
}
varName[8] = '0' + i;
DBWrite(fp, varName, nodeConnect, &numElem, 1, DB_INT) ;
}
delete [] nodeConnect ;
nodeCoord = new double[numNode] ;
Vector_h<Real_t> x_h = domain->x;
Vector_h<Real_t> y_h = domain->y;
Vector_h<Real_t> z_h = domain->z;
for (Index_t i=0; i<3; ++i)
{
for (Index_t j=0; j<numNode; ++j) {
Real_t coordVal ;
switch(i) {
case 0: coordVal = double(x_h[j]) ; break ;
case 1: coordVal = double(y_h[j]) ; break ;
case 2: coordVal = double(z_h[j]) ; break ;
}
nodeCoord[j] = coordVal ;
}
coordName[0] = 'x' + i ;
DBWrite(fp, coordName, nodeCoord, &numNode, 1, DB_DOUBLE) ;
}
delete [] nodeCoord ;
materialID = new int[numElem] ;
for (Index_t i=0; i<numElem; ++i)
materialID[i] = 1 ;
DBWrite(fp, "brick_material", materialID, &numElem, 1, DB_INT) ;
delete [] materialID ;
DBClose(fp);
}/*}}}*/
#ifdef SAMI
void DumpDomain(Domain *domain)
{/*{{{*/
char meshName[64] ;
printf("Dumping SAMI file\n");
sprintf(meshName, "sedov_%d.sami", int(domain->cycle)) ;
DumpSAMI(domain, meshName) ;
}/*}}}*/
#endif
void write_solution(Domain* locDom)
{/*{{{*/
Vector_h<Real_t> x_h = locDom->x;
Vector_h<Real_t> y_h = locDom->y;
Vector_h<Real_t> z_h = locDom->z;
// printf("Writing solution to file xyz.asc\n");
std::stringstream filename;
filename << "xyz.asc";
FILE *fout = fopen(filename.str().c_str(),"wb");
for (Index_t i=0; i<locDom->numNode; i++) {
fprintf(fout,"%10d\n",i);
fprintf(fout,"%.10f\n",x_h[i]);
fprintf(fout,"%.10f\n",y_h[i]);
fprintf(fout,"%.10f\n",z_h[i]);
}
fclose(fout);
}/*}}}*/
///////////////////////////////////////////////////////////////////////////
void InitMeshDecomp(Int_t numRanks, Int_t myRank,
Int_t *col, Int_t *row, Int_t *plane, Int_t *side)
{/*{{{*/
Int_t testProcs;
Int_t dx, dy, dz;
Int_t myDom;
// Assume cube processor layout for now
testProcs = Int_t(cbrt(Real_t(numRanks))+0.5) ;
if (testProcs*testProcs*testProcs != numRanks) {
printf("Num processors must be a cube of an integer (1, 8, 27, ...)\n") ;
#if USE_MPI
MPI_Abort(MPI_COMM_WORLD, -1) ;
#else
exit(-1);
#endif
}
if (sizeof(Real_t) != 4 && sizeof(Real_t) != 8) {
printf("MPI operations only support float and double right now...\n");
#if USE_MPI
MPI_Abort(MPI_COMM_WORLD, -1) ;
#else
exit(-1);
#endif
}
if (MAX_FIELDS_PER_MPI_COMM > CACHE_COHERENCE_PAD_REAL) {
printf("corner element comm buffers too small. Fix code.\n") ;
#if USE_MPI
MPI_Abort(MPI_COMM_WORLD, -1) ;
#else
exit(-1);
#endif
}
dx = testProcs ;
dy = testProcs ;
dz = testProcs ;
// temporary test
if (dx*dy*dz != numRanks) {
printf("error -- must have as many domains as procs\n") ;
#if USE_MPI
MPI_Abort(MPI_COMM_WORLD, -1) ;
#else
exit(-1);
#endif
}
Int_t remainder = dx*dy*dz % numRanks ;
if (myRank < remainder) {
myDom = myRank*( 1+ (dx*dy*dz / numRanks)) ;
}
else {
myDom = remainder*( 1+ (dx*dy*dz / numRanks)) +
(myRank - remainder)*(dx*dy*dz/numRanks) ;
}
*col = myDom % dx ;
*row = (myDom / dx) % dy ;
*plane = myDom / (dx*dy) ;
*side = testProcs;
return;
}/*}}}*/
void VerifyAndWriteFinalOutput(Real_t elapsed_time,
Domain& locDom,
Int_t its,
Int_t nx,
Int_t numRanks)
{/*{{{*/
size_t free_mem, total_mem, used_mem;
hipMemGetInfo(&free_mem, &total_mem);
used_mem= total_mem - free_mem;
#if LULESH_SHOW_PROGRESS == 0
printf(" Used Memory = %8.4f Mb\n", used_mem / (1024.*1024.) );
#endif
// GrindTime1 only takes a single domain into account, and is thus a good way to measure
// processor speed indepdendent of MPI parallelism.
// GrindTime2 takes into account speedups from MPI parallelism
Real_t grindTime1 = ((elapsed_time*1e6)/its)/(nx*nx*nx);
Real_t grindTime2 = ((elapsed_time*1e6)/its)/(nx*nx*nx*numRanks);
// Copy Energy back to Host
Real_t e_zero;
Real_t* d_ezero_ptr = locDom.e.raw() + locDom.octantCorner; /* octant corner supposed to be 0 */
hipMemcpy(&e_zero, d_ezero_ptr, sizeof(Real_t), hipMemcpyDeviceToHost);
printf("Run completed: \n");
printf(" Problem size = %i \n", nx);
printf(" MPI tasks = %i \n", numRanks);
printf(" Iteration count = %i \n", its);
printf(" Final Origin Energy = %12.6e \n", e_zero);
Real_t MaxAbsDiff = Real_t(0.0);
Real_t TotalAbsDiff = Real_t(0.0);
Real_t MaxRelDiff = Real_t(0.0);
Real_t *e_all = new Real_t[nx * nx];
hipMemcpy(e_all, locDom.e.raw(), nx * nx * sizeof(Real_t), hipMemcpyDeviceToHost);
for (Index_t j=0; j<nx; ++j) {
for (Index_t k=j+1; k<nx; ++k) {
Real_t AbsDiff = FABS(e_all[j*nx+k]-e_all[k*nx+j]);
TotalAbsDiff += AbsDiff;
if (MaxAbsDiff <AbsDiff) MaxAbsDiff = AbsDiff;
Real_t RelDiff = AbsDiff / e_all[k*nx+j];
if (MaxRelDiff <RelDiff) MaxRelDiff = RelDiff;
}
}
delete e_all;
// Quick symmetry check
printf(" Testing Plane 0 of Energy Array on rank 0:\n");
printf(" MaxAbsDiff = %12.6e\n", MaxAbsDiff );
printf(" TotalAbsDiff = %12.6e\n", TotalAbsDiff );
printf(" MaxRelDiff = %12.6e\n\n", MaxRelDiff );
// Timing information
printf("\nElapsed time = %10.2f (s)\n", elapsed_time);
printf("Grind time (us/z/c) = %10.8g (per dom) (%10.8g overall)\n", grindTime1, grindTime2);
printf("FOM = %10.8g (z/s)\n\n", 1000.0/grindTime2); // zones per second
bool write_solution_flag=true;
if (write_solution_flag) {
write_solution(&locDom);
}
return ;
}/*}}}*/
| lulesh_simple.cu | /*/*{{{*/
Copyright (c) 2010.
Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory.
LLNL-CODE-461231
All rights reserved.
This file is part of LULESH, Version 1.0.
Please also read this link -- http://www.opensource.org/licenses/index.php
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC,
THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Additional BSD Notice
1. This notice is required to be provided under our contract with the U.S.
Department of Energy (DOE). This work was produced at Lawrence Livermore
National Laboratory under Contract No. DE-AC52-07NA27344 with the DOE.
2. Neither the United States Government nor Lawrence Livermore National
Security, LLC nor any of their employees, makes any warranty, express
or implied, or assumes any liability or responsibility for the accuracy,
completeness, or usefulness of any information, apparatus, product, or
process disclosed, or represents that its use would not infringe
privately-owned rights.
3. Also, reference herein to any specific commercial products, process, or
services by trade name, trademark, manufacturer or otherwise does not
necessarily constitute or imply its endorsement, recommendation, or
favoring by the United States Government or Lawrence Livermore National
Security, LLC. The views and opinions of authors expressed herein do not
necessarily state or reflect those of the United States Government or
Lawrence Livermore National Security, LLC, and shall not be used for
advertising or product endorsement purposes.
*//*}}}*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <iomanip>
#include <sstream>
#include <util.h>
#include <sm_utils.inl>
#include <cuda.h>
#include <allocator.h>
#include "cuda_profiler_api.h"
#ifdef USE_MPI
#include <mpi.h>
#endif
#include <sys/time.h>
#include <unistd.h>
#include "lulesh.h"
/****************************************************/
/* Allow flexibility for arithmetic representations */
/****************************************************/
__device__ inline real4 SQRT(real4 arg) { return sqrtf(arg) ; }
__device__ inline real8 SQRT(real8 arg) { return sqrt(arg) ; }
__device__ inline real4 CBRT(real4 arg) { return cbrtf(arg) ; }
__device__ inline real8 CBRT(real8 arg) { return cbrt(arg) ; }
__device__ __host__ inline real4 FABS(real4 arg) { return fabsf(arg) ; }
__device__ __host__ inline real8 FABS(real8 arg) { return fabs(arg) ; }
__device__ inline real4 FMAX(real4 arg1,real4 arg2) { return fmaxf(arg1,arg2) ; }
__device__ inline real8 FMAX(real8 arg1,real8 arg2) { return fmax(arg1,arg2) ; }
//EJ
int global_block_size = 64 ;
//EJ end
#define MAX(a, b) ( ((a) > (b)) ? (a) : (b))
/* Stuff needed for boundary conditions */
/* 2 BCs on each of 6 hexahedral faces (12 bits) */
#define XI_M 0x00007
#define XI_M_SYMM 0x00001
#define XI_M_FREE 0x00002
#define XI_M_COMM 0x00004
#define XI_P 0x00038
#define XI_P_SYMM 0x00008
#define XI_P_FREE 0x00010
#define XI_P_COMM 0x00020
#define ETA_M 0x001c0
#define ETA_M_SYMM 0x00040
#define ETA_M_FREE 0x00080
#define ETA_M_COMM 0x00100
#define ETA_P 0x00e00
#define ETA_P_SYMM 0x00200
#define ETA_P_FREE 0x00400
#define ETA_P_COMM 0x00800
#define ZETA_M 0x07000
#define ZETA_M_SYMM 0x01000
#define ZETA_M_FREE 0x02000
#define ZETA_M_COMM 0x04000
#define ZETA_P 0x38000
#define ZETA_P_SYMM 0x08000
#define ZETA_P_FREE 0x10000
#define ZETA_P_COMM 0x20000
#define VOLUDER(a0,a1,a2,a3,a4,a5,b0,b1,b2,b3,b4,b5,dvdc) \
{ \
const Real_t twelfth = Real_t(1.0) / Real_t(12.0) ; \
\
dvdc= \
((a1) + (a2)) * ((b0) + (b1)) - ((a0) + (a1)) * ((b1) + (b2)) + \
((a0) + (a4)) * ((b3) + (b4)) - ((a3) + (a4)) * ((b0) + (b4)) - \
((a2) + (a5)) * ((b3) + (b5)) + ((a3) + (a5)) * ((b2) + (b5)); \
dvdc *= twelfth; \
}
__device__
static
__forceinline__
void SumOverNodesShfl(Real_t& val) {
val += utils::shfl_xor( val, 4, 8);
val += utils::shfl_xor( val, 2, 8);
val += utils::shfl_xor( val, 1, 8);
}
__host__ __device__
static
__forceinline__
Real_t CalcElemVolume( const Real_t x0, const Real_t x1,
const Real_t x2, const Real_t x3,
const Real_t x4, const Real_t x5,
const Real_t x6, const Real_t x7,
const Real_t y0, const Real_t y1,
const Real_t y2, const Real_t y3,
const Real_t y4, const Real_t y5,
const Real_t y6, const Real_t y7,
const Real_t z0, const Real_t z1,
const Real_t z2, const Real_t z3,
const Real_t z4, const Real_t z5,
const Real_t z6, const Real_t z7 )
{/*{{{*/
Real_t twelveth = Real_t(1.0)/Real_t(12.0);
Real_t dx61 = x6 - x1;
Real_t dy61 = y6 - y1;
Real_t dz61 = z6 - z1;
Real_t dx70 = x7 - x0;
Real_t dy70 = y7 - y0;
Real_t dz70 = z7 - z0;
Real_t dx63 = x6 - x3;
Real_t dy63 = y6 - y3;
Real_t dz63 = z6 - z3;
Real_t dx20 = x2 - x0;
Real_t dy20 = y2 - y0;
Real_t dz20 = z2 - z0;
Real_t dx50 = x5 - x0;
Real_t dy50 = y5 - y0;
Real_t dz50 = z5 - z0;
Real_t dx64 = x6 - x4;
Real_t dy64 = y6 - y4;
Real_t dz64 = z6 - z4;
Real_t dx31 = x3 - x1;
Real_t dy31 = y3 - y1;
Real_t dz31 = z3 - z1;
Real_t dx72 = x7 - x2;
Real_t dy72 = y7 - y2;
Real_t dz72 = z7 - z2;
Real_t dx43 = x4 - x3;
Real_t dy43 = y4 - y3;
Real_t dz43 = z4 - z3;
Real_t dx57 = x5 - x7;
Real_t dy57 = y5 - y7;
Real_t dz57 = z5 - z7;
Real_t dx14 = x1 - x4;
Real_t dy14 = y1 - y4;
Real_t dz14 = z1 - z4;
Real_t dx25 = x2 - x5;
Real_t dy25 = y2 - y5;
Real_t dz25 = z2 - z5;
#define TRIPLE_PRODUCT(x1, y1, z1, x2, y2, z2, x3, y3, z3) \
((x1)*((y2)*(z3) - (z2)*(y3)) + (x2)*((z1)*(y3) - (y1)*(z3)) + (x3)*((y1)*(z2) - (z1)*(y2)))
// 11 + 3*14
Real_t volume =
TRIPLE_PRODUCT(dx31 + dx72, dx63, dx20,
dy31 + dy72, dy63, dy20,
dz31 + dz72, dz63, dz20) +
TRIPLE_PRODUCT(dx43 + dx57, dx64, dx70,
dy43 + dy57, dy64, dy70,
dz43 + dz57, dz64, dz70) +
TRIPLE_PRODUCT(dx14 + dx25, dx61, dx50,
dy14 + dy25, dy61, dy50,
dz14 + dz25, dz61, dz50);
#undef TRIPLE_PRODUCT
volume *= twelveth;
return volume ;
}/*}}}*/
__host__ __device__
static
__forceinline__
Real_t CalcElemVolume( const Real_t x[8], const Real_t y[8], const Real_t z[8] )
{/*{{{*/
return CalcElemVolume( x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7],
y[0], y[1], y[2], y[3], y[4], y[5], y[6], y[7],
z[0], z[1], z[2], z[3], z[4], z[5], z[6], z[7]);
}/*}}}*/
void cuda_init(int rank)
{/*{{{*/
Int_t deviceCount, dev;
cudaDeviceProp cuda_deviceProp;
cudaSafeCall( cudaGetDeviceCount(&deviceCount) );
if (deviceCount == 0) {
fprintf(stderr, "cuda_init(): no devices supporting CUDA.\n");
exit(1);
}
dev = rank % deviceCount;
if ((dev < 0) || (dev > deviceCount-1)) {
fprintf(stderr, "cuda_init(): requested device (%d) out of range [%d,%d]\n",
dev, 0, deviceCount-1);
exit(1);
}
cudaSafeCall( cudaSetDevice(dev) );
struct cudaDeviceProp props;
cudaGetDeviceProperties(&props, dev);
char hostname[256];
gethostname(hostname, sizeof(hostname));
printf("Host %s using GPU %i: %s\n", hostname, dev, props.name);
cudaSafeCall( cudaGetDeviceProperties(&cuda_deviceProp, dev) );
if (cuda_deviceProp.major < 3) {
fprintf(stderr, "cuda_init(): This implementation of Lulesh requires device SM 3.0+.\n", dev);
exit(1);
}
#if CUDART_VERSION < 5000
fprintf(stderr,"cuda_init(): This implementation of Lulesh uses texture objects, which is requires Cuda 5.0+.\n");
exit(1);
#endif
}/*}}}*/
void AllocateNodalPersistent(Domain* domain, size_t domNodes)
{/*{{{*/
domain->x.resize(domNodes) ; /* coordinates */
domain->y.resize(domNodes) ;
domain->z.resize(domNodes) ;
domain->xd.resize(domNodes) ; /* velocities */
domain->yd.resize(domNodes) ;
domain->zd.resize(domNodes) ;
domain->xdd.resize(domNodes) ; /* accelerations */
domain->ydd.resize(domNodes) ;
domain->zdd.resize(domNodes) ;
domain->fx.resize(domNodes) ; /* forces */
domain->fy.resize(domNodes) ;
domain->fz.resize(domNodes) ;
domain->nodalMass.resize(domNodes) ; /* mass */
}/*}}}*/
void AllocateElemPersistent(Domain* domain, size_t domElems, size_t padded_domElems)
{/*{{{*/
domain->matElemlist.resize(domElems) ; /* material indexset */
domain->nodelist.resize(8*padded_domElems) ; /* elemToNode connectivity */
domain->lxim.resize(domElems) ; /* elem connectivity through face */
domain->lxip.resize(domElems) ;
domain->letam.resize(domElems) ;
domain->letap.resize(domElems) ;
domain->lzetam.resize(domElems) ;
domain->lzetap.resize(domElems) ;
domain->elemBC.resize(domElems) ; /* elem face symm/free-surf flag */
domain->e.resize(domElems) ; /* energy */
domain->p.resize(domElems) ; /* pressure */
domain->q.resize(domElems) ; /* q */
domain->ql.resize(domElems) ; /* linear term for q */
domain->qq.resize(domElems) ; /* quadratic term for q */
domain->v.resize(domElems) ; /* relative volume */
domain->volo.resize(domElems) ; /* reference volume */
domain->delv.resize(domElems) ; /* m_vnew - m_v */
domain->vdov.resize(domElems) ; /* volume derivative over volume */
domain->arealg.resize(domElems) ; /* elem characteristic length */
domain->ss.resize(domElems) ; /* "sound speed" */
domain->elemMass.resize(domElems) ; /* mass */
}/*}}}*/
void AllocateSymmX(Domain* domain, size_t size)
{/*{{{*/
domain->symmX.resize(size) ;
}/*}}}*/
void AllocateSymmY(Domain* domain, size_t size)
{/*{{{*/
domain->symmY.resize(size) ;
}/*}}}*/
void AllocateSymmZ(Domain* domain, size_t size)
{/*{{{*/
domain->symmZ.resize(size) ;
}/*}}}*/
void InitializeFields(Domain* domain)
{/*{{{*/
/* Basic Field Initialization */
thrust::fill(domain->ss.begin(),domain->ss.end(),0.);
thrust::fill(domain->e.begin(),domain->e.end(),0.);
thrust::fill(domain->p.begin(),domain->p.end(),0.);
thrust::fill(domain->q.begin(),domain->q.end(),0.);
thrust::fill(domain->v.begin(),domain->v.end(),1.);
thrust::fill(domain->xd.begin(),domain->xd.end(),0.);
thrust::fill(domain->yd.begin(),domain->yd.end(),0.);
thrust::fill(domain->zd.begin(),domain->zd.end(),0.);
thrust::fill(domain->xdd.begin(),domain->xdd.end(),0.);
thrust::fill(domain->ydd.begin(),domain->ydd.end(),0.);
thrust::fill(domain->zdd.begin(),domain->zdd.end(),0.);
thrust::fill(domain->nodalMass.begin(),domain->nodalMass.end(),0.);
}/*}}}*/
////////////////////////////////////////////////////////////////////////////////
void SetupConnectivityBC(Domain *domain, int edgeElems)
{/*{{{*/
int domElems = domain->numElem;
Vector_h<Index_t> lxim_h(domElems);
Vector_h<Index_t> lxip_h(domElems);
Vector_h<Index_t> letam_h(domElems);
Vector_h<Index_t> letap_h(domElems);
Vector_h<Index_t> lzetam_h(domElems);
Vector_h<Index_t> lzetap_h(domElems);
/* set up elemement connectivity information */
lxim_h[0] = 0 ;
for (Index_t i=1; i<domElems; ++i) {
lxim_h[i] = i-1 ;
lxip_h[i-1] = i ;
}
lxip_h[domElems-1] = domElems-1 ;
for (Index_t i=0; i<edgeElems; ++i) {
letam_h[i] = i ;
letap_h[domElems-edgeElems+i] = domElems-edgeElems+i ;
}
for (Index_t i=edgeElems; i<domElems; ++i) {
letam_h[i] = i-edgeElems ;
letap_h[i-edgeElems] = i ;
}
for (Index_t i=0; i<edgeElems*edgeElems; ++i) {
lzetam_h[i] = i ;
lzetap_h[domElems-edgeElems*edgeElems+i] = domElems-edgeElems*edgeElems+i ;
}
for (Index_t i=edgeElems*edgeElems; i<domElems; ++i) {
lzetam_h[i] = i - edgeElems*edgeElems ;
lzetap_h[i-edgeElems*edgeElems] = i ;
}
/* set up boundary condition information */
Vector_h<Index_t> elemBC_h(domElems);
for (Index_t i=0; i<domElems; ++i) {
elemBC_h[i] = 0 ; /* clear BCs by default */
}
Index_t ghostIdx[6] ; // offsets to ghost locations
for (Index_t i=0; i<6; ++i) {
ghostIdx[i] = INT_MIN ;
}
Int_t pidx = domElems ;
if (domain->m_planeMin != 0) {
ghostIdx[0] = pidx ;
pidx += domain->sizeX*domain->sizeY ;
}
if (domain->m_planeMax != 0) {
ghostIdx[1] = pidx ;
pidx += domain->sizeX*domain->sizeY ;
}
if (domain->m_rowMin != 0) {
ghostIdx[2] = pidx ;
pidx += domain->sizeX*domain->sizeZ ;
}
if (domain->m_rowMax != 0) {
ghostIdx[3] = pidx ;
pidx += domain->sizeX*domain->sizeZ ;
}
if (domain->m_colMin != 0) {
ghostIdx[4] = pidx ;
pidx += domain->sizeY*domain->sizeZ ;
}
if (domain->m_colMax != 0) {
ghostIdx[5] = pidx ;
}
/* symmetry plane or free surface BCs */
for (Index_t i=0; i<edgeElems; ++i) {
Index_t planeInc = i*edgeElems*edgeElems ;
Index_t rowInc = i*edgeElems ;
for (Index_t j=0; j<edgeElems; ++j) {
if (domain->m_planeLoc == 0) {
elemBC_h[rowInc+j] |= ZETA_M_SYMM ;
}
else {
elemBC_h[rowInc+j] |= ZETA_M_COMM ;
lzetam_h[rowInc+j] = ghostIdx[0] + rowInc + j ;
}
if (domain->m_planeLoc == domain->m_tp-1) {
elemBC_h[rowInc+j+domElems-edgeElems*edgeElems] |=
ZETA_P_FREE;
}
else {
elemBC_h[rowInc+j+domElems-edgeElems*edgeElems] |=
ZETA_P_COMM ;
lzetap_h[rowInc+j+domElems-edgeElems*edgeElems] =
ghostIdx[1] + rowInc + j ;
}
if (domain->m_rowLoc == 0) {
elemBC_h[planeInc+j] |= ETA_M_SYMM ;
}
else {
elemBC_h[planeInc+j] |= ETA_M_COMM ;
letam_h[planeInc+j] = ghostIdx[2] + rowInc + j ;
}
if (domain->m_rowLoc == domain->m_tp-1) {
elemBC_h[planeInc+j+edgeElems*edgeElems-edgeElems] |=
ETA_P_FREE ;
}
else {
elemBC_h[planeInc+j+edgeElems*edgeElems-edgeElems] |=
ETA_P_COMM ;
letap_h[planeInc+j+edgeElems*edgeElems-edgeElems] =
ghostIdx[3] + rowInc + j ;
}
if (domain->m_colLoc == 0) {
elemBC_h[planeInc+j*edgeElems] |= XI_M_SYMM ;
}
else {
elemBC_h[planeInc+j*edgeElems] |= XI_M_COMM ;
lxim_h[planeInc+j*edgeElems] = ghostIdx[4] + rowInc + j ;
}
if (domain->m_colLoc == domain->m_tp-1) {
elemBC_h[planeInc+j*edgeElems+edgeElems-1] |= XI_P_FREE ;
}
else {
elemBC_h[planeInc+j*edgeElems+edgeElems-1] |= XI_P_COMM ;
lxip_h[planeInc+j*edgeElems+edgeElems-1] =
ghostIdx[5] + rowInc + j ;
}
}
}
domain->elemBC = elemBC_h;
domain->lxim = lxim_h;
domain->lxip = lxip_h;
domain->letam = letam_h;
domain->letap = letap_h;
domain->lzetam = lzetam_h;
domain->lzetap = lzetap_h;
}/*}}}*/
Domain *NewDomain(char* argv[], Int_t numRanks, Index_t colLoc,
Index_t rowLoc, Index_t planeLoc,
Index_t nx, int tp, bool structured, Int_t nr, Int_t balance, Int_t cost)
{/*{{{*/
Domain *domain = new Domain ;
domain->max_streams = 32;
domain->streams.resize(domain->max_streams);
for (Int_t i=0;i<domain->max_streams;i++)
cudaStreamCreate(&(domain->streams[i]));
cudaEventCreateWithFlags(&domain->time_constraint_computed,cudaEventDisableTiming);
Index_t domElems;
Index_t domNodes;
Index_t padded_domElems;
Vector_h<Index_t> nodelist_h;
Vector_h<Real_t> x_h;
Vector_h<Real_t> y_h;
Vector_h<Real_t> z_h;
if (structured)
{
domain->m_tp = tp ;
domain->m_numRanks = numRanks ;
domain->m_colLoc = colLoc ;
domain->m_rowLoc = rowLoc ;
domain->m_planeLoc = planeLoc ;
Index_t edgeElems = nx ;
Index_t edgeNodes = edgeElems+1 ;
domain->sizeX = edgeElems ;
domain->sizeY = edgeElems ;
domain->sizeZ = edgeElems ;
domain->numElem = domain->sizeX*domain->sizeY*domain->sizeZ ;
domain->padded_numElem = PAD(domain->numElem,32);
domain->numNode = (domain->sizeX+1)*(domain->sizeY+1)*(domain->sizeZ+1) ;
domain->padded_numNode = PAD(domain->numNode,32);
domElems = domain->numElem ;
domNodes = domain->numNode ;
padded_domElems = domain->padded_numElem ;
AllocateElemPersistent(domain,domElems,padded_domElems);
AllocateNodalPersistent(domain,domNodes);
domain->SetupCommBuffers(edgeNodes);
InitializeFields(domain);
domain->BuildMesh(nx, edgeNodes, edgeElems, domNodes, padded_domElems, x_h, y_h, z_h, nodelist_h);
domain->numSymmX = domain->numSymmY = domain->numSymmZ = 0;
if (domain->m_colLoc == 0)
domain->numSymmX = (edgeElems+1)*(edgeElems+1) ;
if (domain->m_rowLoc == 0)
domain->numSymmY = (edgeElems+1)*(edgeElems+1) ;
if (domain->m_planeLoc == 0)
domain->numSymmZ = (edgeElems+1)*(edgeElems+1) ;
AllocateSymmX(domain,edgeNodes*edgeNodes);
AllocateSymmY(domain,edgeNodes*edgeNodes);
AllocateSymmZ(domain,edgeNodes*edgeNodes);
/* set up symmetry nodesets */
Vector_h<Index_t> symmX_h(domain->symmX.size());
Vector_h<Index_t> symmY_h(domain->symmY.size());
Vector_h<Index_t> symmZ_h(domain->symmZ.size());
Int_t nidx = 0 ;
for (Index_t i=0; i<edgeNodes; ++i) {
Index_t planeInc = i*edgeNodes*edgeNodes ;
Index_t rowInc = i*edgeNodes ;
for (Index_t j=0; j<edgeNodes; ++j) {
if (domain->m_planeLoc == 0) {
symmZ_h[nidx] = rowInc + j ;
}
if (domain->m_rowLoc == 0) {
symmY_h[nidx] = planeInc + j ;
}
if (domain->m_colLoc == 0) {
symmX_h[nidx] = planeInc + j*edgeNodes ;
}
++nidx ;
}
}
if (domain->m_planeLoc == 0)
domain->symmZ = symmZ_h;
if (domain->m_rowLoc == 0)
domain->symmY = symmY_h;
if (domain->m_colLoc == 0)
domain->symmX = symmX_h;
SetupConnectivityBC(domain, edgeElems);
}
else
{
FILE *fp;
int ee, en;
if ((fp = fopen(argv[2], "r")) == 0) {
printf("could not open file %s\n", argv[2]) ;
exit( LFileError ) ;
}
bool fsuccess;
fsuccess = fscanf(fp, "%d %d", &ee, &en) ;
domain->numElem = Index_t(ee);
domain->padded_numElem = PAD(domain->numElem,32);
domain->numNode = Index_t(en);
domain->padded_numNode = PAD(domain->numNode,32);
domElems = domain->numElem ;
domNodes = domain->numNode ;
padded_domElems = domain->padded_numElem ;
AllocateElemPersistent(domain,domElems,padded_domElems);
AllocateNodalPersistent(domain,domNodes);
InitializeFields(domain);
/* initialize nodal coordinates */
x_h.resize(domNodes);
y_h.resize(domNodes);
z_h.resize(domNodes);
for (Index_t i=0; i<domNodes; ++i) {
double px, py, pz ;
fsuccess = fscanf(fp, "%lf %lf %lf", &px, &py, &pz) ;
x_h[i] = Real_t(px) ;
y_h[i] = Real_t(py) ;
z_h[i] = Real_t(pz) ;
}
domain->x = x_h;
domain->y = y_h;
domain->z = z_h;
/* embed hexehedral elements in nodal point lattice */
nodelist_h.resize(padded_domElems*8);
for (Index_t zidx=0; zidx<domElems; ++zidx) {
for (Index_t ni=0; ni<Index_t(8); ++ni) {
int n ;
fsuccess = fscanf(fp, "%d", &n) ;
nodelist_h[ni*padded_domElems+zidx] = Index_t(n);
}
}
domain->nodelist = nodelist_h;
/* set up face-based element neighbors */
Vector_h<Index_t> lxim_h(domElems);
Vector_h<Index_t> lxip_h(domElems);
Vector_h<Index_t> letam_h(domElems);
Vector_h<Index_t> letap_h(domElems);
Vector_h<Index_t> lzetam_h(domElems);
Vector_h<Index_t> lzetap_h(domElems);
for (Index_t i=0; i<domElems; ++i) {
int xi_m, xi_p, eta_m, eta_p, zeta_m, zeta_p ;
fsuccess = fscanf(fp, "%d %d %d %d %d %d",
&xi_m, &xi_p, &eta_m, &eta_p, &zeta_m, &zeta_p) ;
lxim_h[i] = Index_t(xi_m) ;
lxip_h[i] = Index_t(xi_p) ;
letam_h[i] = Index_t(eta_m) ;
letap_h[i] = Index_t(eta_p) ;
lzetam_h[i] = Index_t(zeta_m) ;
lzetap_h[i] = Index_t(zeta_p) ;
}
domain->lxim = lxim_h;
domain->lxip = lxip_h;
domain->letam = letam_h;
domain->letap = letap_h;
domain->lzetam = lzetam_h;
domain->lzetap = lzetap_h;
/* set up X symmetry nodeset */
fsuccess = fscanf(fp, "%d", &domain->numSymmX) ;
Vector_h<Index_t> symmX_h(domain->numSymmX);
for (Index_t i=0; i<domain->numSymmX; ++i) {
int n ;
fsuccess = fscanf(fp, "%d", &n) ;
symmX_h[i] = Index_t(n) ;
}
domain->symmX = symmX_h;
fsuccess = fscanf(fp, "%d", &domain->numSymmY) ;
Vector_h<Index_t> symmY_h(domain->numSymmY);
for (Index_t i=0; i<domain->numSymmY; ++i) {
int n ;
fsuccess = fscanf(fp, "%d", &n) ;
symmY_h[i] = Index_t(n) ;
}
domain->symmY = symmY_h;
fsuccess = fscanf(fp, "%d", &domain->numSymmZ) ;
Vector_h<Index_t> symmZ_h(domain->numSymmZ);
for (Index_t i=0; i<domain->numSymmZ; ++i) {
int n ;
fsuccess = fscanf(fp, "%d", &n) ;
symmZ_h[i] = Index_t(n) ;
}
domain->symmZ = symmZ_h;
/* set up free surface nodeset */
Index_t numFreeSurf;
fsuccess = fscanf(fp, "%d", &numFreeSurf) ;
Vector_h<Index_t> freeSurf_h(numFreeSurf);
for (Index_t i=0; i<numFreeSurf; ++i) {
int n ;
fsuccess = fscanf(fp, "%d", &n) ;
freeSurf_h[i] = Index_t(n) ;
}
printf("%c\n",fsuccess);//nothing
fclose(fp);
/* set up boundary condition information */
Vector_h<Index_t> elemBC_h(domElems);
Vector_h<Index_t> surfaceNode_h(domNodes);
for (Index_t i=0; i<domain->numElem; ++i) {
elemBC_h[i] = 0 ;
}
for (Index_t i=0; i<domain->numNode; ++i) {
surfaceNode_h[i] = 0 ;
}
for (Index_t i=0; i<domain->numSymmX; ++i) {
surfaceNode_h[symmX_h[i]] = 1 ;
}
for (Index_t i=0; i<domain->numSymmY; ++i) {
surfaceNode_h[symmY_h[i]] = 1 ;
}
for (Index_t i=0; i<domain->numSymmZ; ++i) {
surfaceNode_h[symmZ_h[i]] = 1 ;
}
for (Index_t zidx=0; zidx<domain->numElem; ++zidx) {
Int_t mask = 0 ;
for (Index_t ni=0; ni<8; ++ni) {
mask |= (surfaceNode_h[nodelist_h[ni*domain->padded_numElem+zidx]] << ni) ;
}
if ((mask & 0x0f) == 0x0f) elemBC_h[zidx] |= ZETA_M_SYMM ;
if ((mask & 0xf0) == 0xf0) elemBC_h[zidx] |= ZETA_P_SYMM ;
if ((mask & 0x33) == 0x33) elemBC_h[zidx] |= ETA_M_SYMM ;
if ((mask & 0xcc) == 0xcc) elemBC_h[zidx] |= ETA_P_SYMM ;
if ((mask & 0x99) == 0x99) elemBC_h[zidx] |= XI_M_SYMM ;
if ((mask & 0x66) == 0x66) elemBC_h[zidx] |= XI_P_SYMM ;
}
for (Index_t zidx=0; zidx<domain->numElem; ++zidx) {
if (elemBC_h[zidx] == (XI_M_SYMM | ETA_M_SYMM | ZETA_M_SYMM)) {
domain->octantCorner = zidx ;
break ;
}
}
for (Index_t i=0; i<domain->numNode; ++i) {
surfaceNode_h[i] = 0 ;
}
for (Index_t i=0; i<numFreeSurf; ++i) {
surfaceNode_h[freeSurf_h[i]] = 1 ;
}
for (Index_t zidx=0; zidx<domain->numElem; ++zidx) {
Int_t mask = 0 ;
for (Index_t ni=0; ni<8; ++ni) {
mask |= (surfaceNode_h[nodelist_h[ni*domain->padded_numElem+zidx]] << ni) ;
}
if ((mask & 0x0f) == 0x0f) elemBC_h[zidx] |= ZETA_M_SYMM ;
if ((mask & 0xf0) == 0xf0) elemBC_h[zidx] |= ZETA_P_SYMM ;
if ((mask & 0x33) == 0x33) elemBC_h[zidx] |= ETA_M_SYMM ;
if ((mask & 0xcc) == 0xcc) elemBC_h[zidx] |= ETA_P_SYMM ;
if ((mask & 0x99) == 0x99) elemBC_h[zidx] |= XI_M_SYMM ;
if ((mask & 0x66) == 0x66) elemBC_h[zidx] |= XI_P_SYMM ;
}
domain->elemBC = elemBC_h;
/* deposit energy */
domain->e[domain->octantCorner] = Real_t(3.948746e+7) ;
}
/* set up node-centered indexing of elements */
Vector_h<Index_t> nodeElemCount_h(domNodes);
for (Index_t i=0; i<domNodes; ++i) {
nodeElemCount_h[i] = 0 ;
}
for (Index_t i=0; i<domElems; ++i) {
for (Index_t j=0; j < 8; ++j) {
++(nodeElemCount_h[nodelist_h[j*padded_domElems+i]]);
}
}
Vector_h<Index_t> nodeElemStart_h(domNodes);
nodeElemStart_h[0] = 0;
for (Index_t i=1; i < domNodes; ++i) {
nodeElemStart_h[i] =
nodeElemStart_h[i-1] + nodeElemCount_h[i-1] ;
}
Vector_h<Index_t> nodeElemCornerList_h(nodeElemStart_h[domNodes-1] +
nodeElemCount_h[domNodes-1] );
for (Index_t i=0; i < domNodes; ++i) {
nodeElemCount_h[i] = 0;
}
for (Index_t j=0; j < 8; ++j) {
for (Index_t i=0; i < domElems; ++i) {
Index_t m = nodelist_h[padded_domElems*j+i];
Index_t k = padded_domElems*j + i ;
Index_t offset = nodeElemStart_h[m] +
nodeElemCount_h[m] ;
nodeElemCornerList_h[offset] = k;
++(nodeElemCount_h[m]) ;
}
}
Index_t clSize = nodeElemStart_h[domNodes-1] +
nodeElemCount_h[domNodes-1] ;
for (Index_t i=0; i < clSize; ++i) {
Index_t clv = nodeElemCornerList_h[i] ;
if ((clv < 0) || (clv > padded_domElems*8)) {
fprintf(stderr,
"AllocateNodeElemIndexes(): nodeElemCornerList entry out of range!\n");
exit(1);
}
}
domain->nodeElemStart = nodeElemStart_h;
domain->nodeElemCount = nodeElemCount_h;
domain->nodeElemCornerList = nodeElemCornerList_h;
/* Create a material IndexSet (entire domain same material for now) */
Vector_h<Index_t> matElemlist_h(domElems);
for (Index_t i=0; i<domElems; ++i) {
matElemlist_h[i] = i ;
}
domain->matElemlist = matElemlist_h;
cudaMallocHost(&domain->dtcourant_h,sizeof(Real_t),0);
cudaMallocHost(&domain->dthydro_h,sizeof(Real_t),0);
cudaMallocHost(&domain->bad_vol_h,sizeof(Index_t),0);
cudaMallocHost(&domain->bad_q_h,sizeof(Index_t),0);
*(domain->bad_vol_h)=-1;
*(domain->bad_q_h)=-1;
*(domain->dthydro_h)=1e20;
*(domain->dtcourant_h)=1e20;
/* initialize material parameters */
domain->time_h = Real_t(0.) ;
domain->dtfixed = Real_t(-1.0e-6) ;
domain->deltatimemultlb = Real_t(1.1) ;
domain->deltatimemultub = Real_t(1.2) ;
domain->stoptime = Real_t(1.0e-2) ;
domain->dtmax = Real_t(1.0e-2) ;
domain->cycle = 0 ;
domain->e_cut = Real_t(1.0e-7) ;
domain->p_cut = Real_t(1.0e-7) ;
domain->q_cut = Real_t(1.0e-7) ;
domain->u_cut = Real_t(1.0e-7) ;
domain->v_cut = Real_t(1.0e-10) ;
domain->hgcoef = Real_t(3.0) ;
domain->ss4o3 = Real_t(4.0)/Real_t(3.0) ;
domain->qstop = Real_t(1.0e+12) ;
domain->monoq_max_slope = Real_t(1.0) ;
domain->monoq_limiter_mult = Real_t(2.0) ;
domain->qlc_monoq = Real_t(0.5) ;
domain->qqc_monoq = Real_t(2.0)/Real_t(3.0) ;
domain->qqc = Real_t(2.0) ;
domain->pmin = Real_t(0.) ;
domain->emin = Real_t(-1.0e+15) ;
domain->dvovmax = Real_t(0.1) ;
domain->eosvmax = Real_t(1.0e+9) ;
domain->eosvmin = Real_t(1.0e-9) ;
domain->refdens = Real_t(1.0) ;
/* initialize field data */
Vector_h<Real_t> nodalMass_h(domNodes);
Vector_h<Real_t> volo_h(domElems);
Vector_h<Real_t> elemMass_h(domElems);
for (Index_t i=0; i<domElems; ++i) {
Real_t x_local[8], y_local[8], z_local[8] ;
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
{
Index_t gnode = nodelist_h[lnode*padded_domElems+i];
x_local[lnode] = x_h[gnode];
y_local[lnode] = y_h[gnode];
z_local[lnode] = z_h[gnode];
}
// volume calculations
Real_t volume = CalcElemVolume(x_local, y_local, z_local );
volo_h[i] = volume ;
elemMass_h[i] = volume ;
for (Index_t j=0; j<8; ++j) {
Index_t gnode = nodelist_h[j*padded_domElems+i];
nodalMass_h[gnode] += volume / Real_t(8.0) ;
}
}
domain->nodalMass = nodalMass_h;
domain->volo = volo_h;
domain->elemMass= elemMass_h;
/* deposit energy */
domain->octantCorner = 0;
// deposit initial energy
// An energy of 3.948746e+7 is correct for a problem with
// 45 zones along a side - we need to scale it
const Real_t ebase = 3.948746e+7;
Real_t scale = (nx*domain->m_tp)/45.0;
Real_t einit = ebase*scale*scale*scale;
//Real_t einit = ebase;
if (domain->m_rowLoc + domain->m_colLoc + domain->m_planeLoc == 0) {
// Dump into the first zone (which we know is in the corner)
// of the domain that sits at the origin
domain->e[0] = einit;
}
//set initial deltatime base on analytic CFL calculation
domain->deltatime_h = (.5*cbrt(domain->volo[0]))/sqrt(2*einit);
domain->cost = cost;
domain->regNumList.resize(domain->numElem) ; // material indexset
domain->regElemlist.resize(domain->numElem) ; // material indexset
domain->regCSR.resize(nr);
domain->regReps.resize(nr);
domain->regSorted.resize(nr);
// Setup region index sets. For now, these are constant sized
// throughout the run, but could be changed every cycle to
// simulate effects of ALE on the lagrange solver
domain->CreateRegionIndexSets(nr, balance);
return domain ;
}/*}}}*/
// simple function for int pow x^y, y >= 0
static Int_t POW(Int_t x, Int_t y)
{/*{{{*/
Int_t res = 1;
for (Int_t i = 0; i < y; i++)
res *= x;
return res;
}/*}}}*/
static inline
void TimeIncrement(Domain* domain)
{/*{{{*/
// To make sure dtcourant and dthydro have been updated on host
cudaEventSynchronize(domain->time_constraint_computed);
Real_t targetdt = domain->stoptime - domain->time_h;
if ((domain->dtfixed <= Real_t(0.0)) && (domain->cycle != Int_t(0))) {
Real_t ratio ;
/* This will require a reduction in parallel */
Real_t gnewdt = Real_t(1.0e+20) ;
Real_t newdt;
if ( *(domain->dtcourant_h) < gnewdt) {
gnewdt = *(domain->dtcourant_h) / Real_t(2.0) ;
}
if ( *(domain->dthydro_h) < gnewdt) {
gnewdt = *(domain->dthydro_h) * Real_t(2.0) / Real_t(3.0) ;
}
#if USE_MPI
MPI_Allreduce(&gnewdt, &newdt, 1,
((sizeof(Real_t) == 4) ? MPI_FLOAT : MPI_DOUBLE),
MPI_MIN, MPI_COMM_WORLD) ;
#else
newdt = gnewdt;
#endif
Real_t olddt = domain->deltatime_h;
ratio = newdt / olddt ;
if (ratio >= Real_t(1.0)) {
if (ratio < domain->deltatimemultlb) {
newdt = olddt ;
}
else if (ratio > domain->deltatimemultub) {
newdt = olddt*domain->deltatimemultub ;
}
}
if (newdt > domain->dtmax) {
newdt = domain->dtmax ;
}
domain->deltatime_h = newdt ;
}
/* TRY TO PREVENT VERY SMALL SCALING ON THE NEXT CYCLE */
if ((targetdt > domain->deltatime_h) &&
(targetdt < (Real_t(4.0) * domain->deltatime_h / Real_t(3.0))) ) {
targetdt = Real_t(2.0) * domain->deltatime_h / Real_t(3.0) ;
}
if (targetdt < domain->deltatime_h) {
domain->deltatime_h = targetdt ;
}
domain->time_h += domain->deltatime_h ;
++domain->cycle ;
}/*}}}*/
__device__
static
__forceinline__
void CalcElemShapeFunctionDerivatives( const Real_t* const x,
const Real_t* const y,
const Real_t* const z,
Real_t b[][8],
Real_t* const volume )
{/*{{{*/
const Real_t x0 = x[0] ; const Real_t x1 = x[1] ;
const Real_t x2 = x[2] ; const Real_t x3 = x[3] ;
const Real_t x4 = x[4] ; const Real_t x5 = x[5] ;
const Real_t x6 = x[6] ; const Real_t x7 = x[7] ;
const Real_t y0 = y[0] ; const Real_t y1 = y[1] ;
const Real_t y2 = y[2] ; const Real_t y3 = y[3] ;
const Real_t y4 = y[4] ; const Real_t y5 = y[5] ;
const Real_t y6 = y[6] ; const Real_t y7 = y[7] ;
const Real_t z0 = z[0] ; const Real_t z1 = z[1] ;
const Real_t z2 = z[2] ; const Real_t z3 = z[3] ;
const Real_t z4 = z[4] ; const Real_t z5 = z[5] ;
const Real_t z6 = z[6] ; const Real_t z7 = z[7] ;
Real_t fjxxi, fjxet, fjxze;
Real_t fjyxi, fjyet, fjyze;
Real_t fjzxi, fjzet, fjzze;
Real_t cjxxi, cjxet, cjxze;
Real_t cjyxi, cjyet, cjyze;
Real_t cjzxi, cjzet, cjzze;
fjxxi = Real_t(.125) * ( (x6-x0) + (x5-x3) - (x7-x1) - (x4-x2) );
fjxet = Real_t(.125) * ( (x6-x0) - (x5-x3) + (x7-x1) - (x4-x2) );
fjxze = Real_t(.125) * ( (x6-x0) + (x5-x3) + (x7-x1) + (x4-x2) );
fjyxi = Real_t(.125) * ( (y6-y0) + (y5-y3) - (y7-y1) - (y4-y2) );
fjyet = Real_t(.125) * ( (y6-y0) - (y5-y3) + (y7-y1) - (y4-y2) );
fjyze = Real_t(.125) * ( (y6-y0) + (y5-y3) + (y7-y1) + (y4-y2) );
fjzxi = Real_t(.125) * ( (z6-z0) + (z5-z3) - (z7-z1) - (z4-z2) );
fjzet = Real_t(.125) * ( (z6-z0) - (z5-z3) + (z7-z1) - (z4-z2) );
fjzze = Real_t(.125) * ( (z6-z0) + (z5-z3) + (z7-z1) + (z4-z2) );
/* compute cofactors */
cjxxi = (fjyet * fjzze) - (fjzet * fjyze);
cjxet = - (fjyxi * fjzze) + (fjzxi * fjyze);
cjxze = (fjyxi * fjzet) - (fjzxi * fjyet);
cjyxi = - (fjxet * fjzze) + (fjzet * fjxze);
cjyet = (fjxxi * fjzze) - (fjzxi * fjxze);
cjyze = - (fjxxi * fjzet) + (fjzxi * fjxet);
cjzxi = (fjxet * fjyze) - (fjyet * fjxze);
cjzet = - (fjxxi * fjyze) + (fjyxi * fjxze);
cjzze = (fjxxi * fjyet) - (fjyxi * fjxet);
/* calculate partials :
this need only be done for l = 0,1,2,3 since , by symmetry ,
(6,7,4,5) = - (0,1,2,3) .
*/
b[0][0] = - cjxxi - cjxet - cjxze;
b[0][1] = cjxxi - cjxet - cjxze;
b[0][2] = cjxxi + cjxet - cjxze;
b[0][3] = - cjxxi + cjxet - cjxze;
b[0][4] = -b[0][2];
b[0][5] = -b[0][3];
b[0][6] = -b[0][0];
b[0][7] = -b[0][1];
/*
b[0][4] = - cjxxi - cjxet + cjxze;
b[0][5] = + cjxxi - cjxet + cjxze;
b[0][6] = + cjxxi + cjxet + cjxze;
b[0][7] = - cjxxi + cjxet + cjxze;
*/
b[1][0] = - cjyxi - cjyet - cjyze;
b[1][1] = cjyxi - cjyet - cjyze;
b[1][2] = cjyxi + cjyet - cjyze;
b[1][3] = - cjyxi + cjyet - cjyze;
b[1][4] = -b[1][2];
b[1][5] = -b[1][3];
b[1][6] = -b[1][0];
b[1][7] = -b[1][1];
b[2][0] = - cjzxi - cjzet - cjzze;
b[2][1] = cjzxi - cjzet - cjzze;
b[2][2] = cjzxi + cjzet - cjzze;
b[2][3] = - cjzxi + cjzet - cjzze;
b[2][4] = -b[2][2];
b[2][5] = -b[2][3];
b[2][6] = -b[2][0];
b[2][7] = -b[2][1];
/* calculate jacobian determinant (volume) */
*volume = Real_t(8.) * ( fjxet * cjxet + fjyet * cjyet + fjzet * cjzet);
}/*}}}*/
static
__device__
__forceinline__
void SumElemFaceNormal(Real_t *normalX0, Real_t *normalY0, Real_t *normalZ0,
Real_t *normalX1, Real_t *normalY1, Real_t *normalZ1,
Real_t *normalX2, Real_t *normalY2, Real_t *normalZ2,
Real_t *normalX3, Real_t *normalY3, Real_t *normalZ3,
const Real_t x0, const Real_t y0, const Real_t z0,
const Real_t x1, const Real_t y1, const Real_t z1,
const Real_t x2, const Real_t y2, const Real_t z2,
const Real_t x3, const Real_t y3, const Real_t z3)
{/*{{{*/
Real_t bisectX0 = Real_t(0.5) * (x3 + x2 - x1 - x0);
Real_t bisectY0 = Real_t(0.5) * (y3 + y2 - y1 - y0);
Real_t bisectZ0 = Real_t(0.5) * (z3 + z2 - z1 - z0);
Real_t bisectX1 = Real_t(0.5) * (x2 + x1 - x3 - x0);
Real_t bisectY1 = Real_t(0.5) * (y2 + y1 - y3 - y0);
Real_t bisectZ1 = Real_t(0.5) * (z2 + z1 - z3 - z0);
Real_t areaX = Real_t(0.25) * (bisectY0 * bisectZ1 - bisectZ0 * bisectY1);
Real_t areaY = Real_t(0.25) * (bisectZ0 * bisectX1 - bisectX0 * bisectZ1);
Real_t areaZ = Real_t(0.25) * (bisectX0 * bisectY1 - bisectY0 * bisectX1);
*normalX0 += areaX;
*normalX1 += areaX;
*normalX2 += areaX;
*normalX3 += areaX;
*normalY0 += areaY;
*normalY1 += areaY;
*normalY2 += areaY;
*normalY3 += areaY;
*normalZ0 += areaZ;
*normalZ1 += areaZ;
*normalZ2 += areaZ;
*normalZ3 += areaZ;
}/*}}}*/
static
__device__
__forceinline__
void SumElemFaceNormal_warp_per_4cell(
Real_t *normalX0, Real_t *normalY0, Real_t *normalZ0,
const Real_t x, const Real_t y, const Real_t z,
int node,
int n0, int n1, int n2, int n3)
{/*{{{*/
Real_t coef0 = Real_t(0.5);
Real_t coef1 = Real_t(0.5);
if (node == n0 || node == n1 || node==n2 || node==n3)
{
if (node == n0 || node == n1)
coef0 = -coef0;
if (node == n0 || node == n3)
coef1 = -coef1;
}
else
{
coef0 = Real_t(0.);
coef1 = Real_t(0.);
}
Real_t bisectX0 = coef0*x;
Real_t bisectY0 = coef0*y;
Real_t bisectZ0 = coef0*z;
Real_t bisectX1 = coef1*x;
Real_t bisectY1 = coef1*y;
Real_t bisectZ1 = coef1*z;
SumOverNodesShfl(bisectX0);
SumOverNodesShfl(bisectY0);
SumOverNodesShfl(bisectZ0);
SumOverNodesShfl(bisectX1);
SumOverNodesShfl(bisectY1);
SumOverNodesShfl(bisectZ1);
Real_t areaX = Real_t(0.25) * (bisectY0 * bisectZ1 - bisectZ0 * bisectY1);
Real_t areaY = Real_t(0.25) * (bisectZ0 * bisectX1 - bisectX0 * bisectZ1);
Real_t areaZ = Real_t(0.25) * (bisectX0 * bisectY1 - bisectY0 * bisectX1);
if (node == n0 || node == n1 || node==n2 || node==n3)
{
*normalX0 += areaX;
*normalY0 += areaY;
*normalZ0 += areaZ;
}
}/*}}}*/
__device__
static inline
void CalcElemNodeNormals(Real_t pfx[8],
Real_t pfy[8],
Real_t pfz[8],
const Real_t x[8],
const Real_t y[8],
const Real_t z[8])
{/*{{{*/
for (Index_t i = 0 ; i < 8 ; ++i) {
pfx[i] = Real_t(0.0);
pfy[i] = Real_t(0.0);
pfz[i] = Real_t(0.0);
}
/* evaluate face one: nodes 0, 1, 2, 3 */
SumElemFaceNormal(&pfx[0], &pfy[0], &pfz[0],
&pfx[1], &pfy[1], &pfz[1],
&pfx[2], &pfy[2], &pfz[2],
&pfx[3], &pfy[3], &pfz[3],
x[0], y[0], z[0], x[1], y[1], z[1],
x[2], y[2], z[2], x[3], y[3], z[3]);
/* evaluate face two: nodes 0, 4, 5, 1 */
SumElemFaceNormal(&pfx[0], &pfy[0], &pfz[0],
&pfx[4], &pfy[4], &pfz[4],
&pfx[5], &pfy[5], &pfz[5],
&pfx[1], &pfy[1], &pfz[1],
x[0], y[0], z[0], x[4], y[4], z[4],
x[5], y[5], z[5], x[1], y[1], z[1]);
/* evaluate face three: nodes 1, 5, 6, 2 */
SumElemFaceNormal(&pfx[1], &pfy[1], &pfz[1],
&pfx[5], &pfy[5], &pfz[5],
&pfx[6], &pfy[6], &pfz[6],
&pfx[2], &pfy[2], &pfz[2],
x[1], y[1], z[1], x[5], y[5], z[5],
x[6], y[6], z[6], x[2], y[2], z[2]);
/* evaluate face four: nodes 2, 6, 7, 3 */
SumElemFaceNormal(&pfx[2], &pfy[2], &pfz[2],
&pfx[6], &pfy[6], &pfz[6],
&pfx[7], &pfy[7], &pfz[7],
&pfx[3], &pfy[3], &pfz[3],
x[2], y[2], z[2], x[6], y[6], z[6],
x[7], y[7], z[7], x[3], y[3], z[3]);
/* evaluate face five: nodes 3, 7, 4, 0 */
SumElemFaceNormal(&pfx[3], &pfy[3], &pfz[3],
&pfx[7], &pfy[7], &pfz[7],
&pfx[4], &pfy[4], &pfz[4],
&pfx[0], &pfy[0], &pfz[0],
x[3], y[3], z[3], x[7], y[7], z[7],
x[4], y[4], z[4], x[0], y[0], z[0]);
/* evaluate face six: nodes 4, 7, 6, 5 */
SumElemFaceNormal(&pfx[4], &pfy[4], &pfz[4],
&pfx[7], &pfy[7], &pfz[7],
&pfx[6], &pfy[6], &pfz[6],
&pfx[5], &pfy[5], &pfz[5],
x[4], y[4], z[4], x[7], y[7], z[7],
x[6], y[6], z[6], x[5], y[5], z[5]);
}/*}}}*/
__global__
void AddNodeForcesFromElems_kernel( Index_t numNode,
Index_t padded_numNode,
const Int_t* nodeElemCount,
const Int_t* nodeElemStart,
const Index_t* nodeElemCornerList,
const Real_t* fx_elem,
const Real_t* fy_elem,
const Real_t* fz_elem,
Real_t* fx_node,
Real_t* fy_node,
Real_t* fz_node,
const Int_t num_threads)
{/*{{{*/
int tid=blockDim.x*blockIdx.x+threadIdx.x;
if (tid < num_threads)
{
Index_t g_i = tid;
Int_t count=nodeElemCount[g_i];
Int_t start=nodeElemStart[g_i];
Real_t fx,fy,fz;
fx=fy=fz=Real_t(0.0);
for (int j=0;j<count;j++)
{
Index_t pos=nodeElemCornerList[start+j]; // Uncoalesced access here
fx += fx_elem[pos];
fy += fy_elem[pos];
fz += fz_elem[pos];
}
fx_node[g_i]=fx;
fy_node[g_i]=fy;
fz_node[g_i]=fz;
}
}/*}}}*/
static
__device__
__forceinline__
void VoluDer(const Real_t x0, const Real_t x1, const Real_t x2,
const Real_t x3, const Real_t x4, const Real_t x5,
const Real_t y0, const Real_t y1, const Real_t y2,
const Real_t y3, const Real_t y4, const Real_t y5,
const Real_t z0, const Real_t z1, const Real_t z2,
const Real_t z3, const Real_t z4, const Real_t z5,
Real_t* dvdx, Real_t* dvdy, Real_t* dvdz)
{/*{{{*/
const Real_t twelfth = Real_t(1.0) / Real_t(12.0) ;
*dvdx =
(y1 + y2) * (z0 + z1) - (y0 + y1) * (z1 + z2) +
(y0 + y4) * (z3 + z4) - (y3 + y4) * (z0 + z4) -
(y2 + y5) * (z3 + z5) + (y3 + y5) * (z2 + z5);
*dvdy =
- (x1 + x2) * (z0 + z1) + (x0 + x1) * (z1 + z2) -
(x0 + x4) * (z3 + z4) + (x3 + x4) * (z0 + z4) +
(x2 + x5) * (z3 + z5) - (x3 + x5) * (z2 + z5);
*dvdz =
- (y1 + y2) * (x0 + x1) + (y0 + y1) * (x1 + x2) -
(y0 + y4) * (x3 + x4) + (y3 + y4) * (x0 + x4) +
(y2 + y5) * (x3 + x5) - (y3 + y5) * (x2 + x5);
*dvdx *= twelfth;
*dvdy *= twelfth;
*dvdz *= twelfth;
}/*}}}*/
static
__device__
__forceinline__
void CalcElemVolumeDerivative(Real_t dvdx[8],
Real_t dvdy[8],
Real_t dvdz[8],
const Real_t x[8],
const Real_t y[8],
const Real_t z[8])
{/*{{{*/
VoluDer(x[1], x[2], x[3], x[4], x[5], x[7],
y[1], y[2], y[3], y[4], y[5], y[7],
z[1], z[2], z[3], z[4], z[5], z[7],
&dvdx[0], &dvdy[0], &dvdz[0]);
VoluDer(x[0], x[1], x[2], x[7], x[4], x[6],
y[0], y[1], y[2], y[7], y[4], y[6],
z[0], z[1], z[2], z[7], z[4], z[6],
&dvdx[3], &dvdy[3], &dvdz[3]);
VoluDer(x[3], x[0], x[1], x[6], x[7], x[5],
y[3], y[0], y[1], y[6], y[7], y[5],
z[3], z[0], z[1], z[6], z[7], z[5],
&dvdx[2], &dvdy[2], &dvdz[2]);
VoluDer(x[2], x[3], x[0], x[5], x[6], x[4],
y[2], y[3], y[0], y[5], y[6], y[4],
z[2], z[3], z[0], z[5], z[6], z[4],
&dvdx[1], &dvdy[1], &dvdz[1]);
VoluDer(x[7], x[6], x[5], x[0], x[3], x[1],
y[7], y[6], y[5], y[0], y[3], y[1],
z[7], z[6], z[5], z[0], z[3], z[1],
&dvdx[4], &dvdy[4], &dvdz[4]);
VoluDer(x[4], x[7], x[6], x[1], x[0], x[2],
y[4], y[7], y[6], y[1], y[0], y[2],
z[4], z[7], z[6], z[1], z[0], z[2],
&dvdx[5], &dvdy[5], &dvdz[5]);
VoluDer(x[5], x[4], x[7], x[2], x[1], x[3],
y[5], y[4], y[7], y[2], y[1], y[3],
z[5], z[4], z[7], z[2], z[1], z[3],
&dvdx[6], &dvdy[6], &dvdz[6]);
VoluDer(x[6], x[5], x[4], x[3], x[2], x[0],
y[6], y[5], y[4], y[3], y[2], y[0],
z[6], z[5], z[4], z[3], z[2], z[0],
&dvdx[7], &dvdy[7], &dvdz[7]);
}/*}}}*/
static
__device__
__forceinline__
void CalcElemFBHourglassForce(Real_t *xd, Real_t *yd, Real_t *zd, Real_t *hourgam0,
Real_t *hourgam1, Real_t *hourgam2, Real_t *hourgam3,
Real_t *hourgam4, Real_t *hourgam5, Real_t *hourgam6,
Real_t *hourgam7, Real_t coefficient,
Real_t *hgfx, Real_t *hgfy, Real_t *hgfz )
{/*{{{*/
Index_t i00=0;
Index_t i01=1;
Index_t i02=2;
Index_t i03=3;
Real_t h00 =
hourgam0[i00] * xd[0] + hourgam1[i00] * xd[1] +
hourgam2[i00] * xd[2] + hourgam3[i00] * xd[3] +
hourgam4[i00] * xd[4] + hourgam5[i00] * xd[5] +
hourgam6[i00] * xd[6] + hourgam7[i00] * xd[7];
Real_t h01 =
hourgam0[i01] * xd[0] + hourgam1[i01] * xd[1] +
hourgam2[i01] * xd[2] + hourgam3[i01] * xd[3] +
hourgam4[i01] * xd[4] + hourgam5[i01] * xd[5] +
hourgam6[i01] * xd[6] + hourgam7[i01] * xd[7];
Real_t h02 =
hourgam0[i02] * xd[0] + hourgam1[i02] * xd[1]+
hourgam2[i02] * xd[2] + hourgam3[i02] * xd[3]+
hourgam4[i02] * xd[4] + hourgam5[i02] * xd[5]+
hourgam6[i02] * xd[6] + hourgam7[i02] * xd[7];
Real_t h03 =
hourgam0[i03] * xd[0] + hourgam1[i03] * xd[1] +
hourgam2[i03] * xd[2] + hourgam3[i03] * xd[3] +
hourgam4[i03] * xd[4] + hourgam5[i03] * xd[5] +
hourgam6[i03] * xd[6] + hourgam7[i03] * xd[7];
hgfx[0] += coefficient *
(hourgam0[i00] * h00 + hourgam0[i01] * h01 +
hourgam0[i02] * h02 + hourgam0[i03] * h03);
hgfx[1] += coefficient *
(hourgam1[i00] * h00 + hourgam1[i01] * h01 +
hourgam1[i02] * h02 + hourgam1[i03] * h03);
hgfx[2] += coefficient *
(hourgam2[i00] * h00 + hourgam2[i01] * h01 +
hourgam2[i02] * h02 + hourgam2[i03] * h03);
hgfx[3] += coefficient *
(hourgam3[i00] * h00 + hourgam3[i01] * h01 +
hourgam3[i02] * h02 + hourgam3[i03] * h03);
hgfx[4] += coefficient *
(hourgam4[i00] * h00 + hourgam4[i01] * h01 +
hourgam4[i02] * h02 + hourgam4[i03] * h03);
hgfx[5] += coefficient *
(hourgam5[i00] * h00 + hourgam5[i01] * h01 +
hourgam5[i02] * h02 + hourgam5[i03] * h03);
hgfx[6] += coefficient *
(hourgam6[i00] * h00 + hourgam6[i01] * h01 +
hourgam6[i02] * h02 + hourgam6[i03] * h03);
hgfx[7] += coefficient *
(hourgam7[i00] * h00 + hourgam7[i01] * h01 +
hourgam7[i02] * h02 + hourgam7[i03] * h03);
h00 =
hourgam0[i00] * yd[0] + hourgam1[i00] * yd[1] +
hourgam2[i00] * yd[2] + hourgam3[i00] * yd[3] +
hourgam4[i00] * yd[4] + hourgam5[i00] * yd[5] +
hourgam6[i00] * yd[6] + hourgam7[i00] * yd[7];
h01 =
hourgam0[i01] * yd[0] + hourgam1[i01] * yd[1] +
hourgam2[i01] * yd[2] + hourgam3[i01] * yd[3] +
hourgam4[i01] * yd[4] + hourgam5[i01] * yd[5] +
hourgam6[i01] * yd[6] + hourgam7[i01] * yd[7];
h02 =
hourgam0[i02] * yd[0] + hourgam1[i02] * yd[1]+
hourgam2[i02] * yd[2] + hourgam3[i02] * yd[3]+
hourgam4[i02] * yd[4] + hourgam5[i02] * yd[5]+
hourgam6[i02] * yd[6] + hourgam7[i02] * yd[7];
h03 =
hourgam0[i03] * yd[0] + hourgam1[i03] * yd[1] +
hourgam2[i03] * yd[2] + hourgam3[i03] * yd[3] +
hourgam4[i03] * yd[4] + hourgam5[i03] * yd[5] +
hourgam6[i03] * yd[6] + hourgam7[i03] * yd[7];
hgfy[0] += coefficient *
(hourgam0[i00] * h00 + hourgam0[i01] * h01 +
hourgam0[i02] * h02 + hourgam0[i03] * h03);
hgfy[1] += coefficient *
(hourgam1[i00] * h00 + hourgam1[i01] * h01 +
hourgam1[i02] * h02 + hourgam1[i03] * h03);
hgfy[2] += coefficient *
(hourgam2[i00] * h00 + hourgam2[i01] * h01 +
hourgam2[i02] * h02 + hourgam2[i03] * h03);
hgfy[3] += coefficient *
(hourgam3[i00] * h00 + hourgam3[i01] * h01 +
hourgam3[i02] * h02 + hourgam3[i03] * h03);
hgfy[4] += coefficient *
(hourgam4[i00] * h00 + hourgam4[i01] * h01 +
hourgam4[i02] * h02 + hourgam4[i03] * h03);
hgfy[5] += coefficient *
(hourgam5[i00] * h00 + hourgam5[i01] * h01 +
hourgam5[i02] * h02 + hourgam5[i03] * h03);
hgfy[6] += coefficient *
(hourgam6[i00] * h00 + hourgam6[i01] * h01 +
hourgam6[i02] * h02 + hourgam6[i03] * h03);
hgfy[7] += coefficient *
(hourgam7[i00] * h00 + hourgam7[i01] * h01 +
hourgam7[i02] * h02 + hourgam7[i03] * h03);
h00 =
hourgam0[i00] * zd[0] + hourgam1[i00] * zd[1] +
hourgam2[i00] * zd[2] + hourgam3[i00] * zd[3] +
hourgam4[i00] * zd[4] + hourgam5[i00] * zd[5] +
hourgam6[i00] * zd[6] + hourgam7[i00] * zd[7];
h01 =
hourgam0[i01] * zd[0] + hourgam1[i01] * zd[1] +
hourgam2[i01] * zd[2] + hourgam3[i01] * zd[3] +
hourgam4[i01] * zd[4] + hourgam5[i01] * zd[5] +
hourgam6[i01] * zd[6] + hourgam7[i01] * zd[7];
h02 =
hourgam0[i02] * zd[0] + hourgam1[i02] * zd[1]+
hourgam2[i02] * zd[2] + hourgam3[i02] * zd[3]+
hourgam4[i02] * zd[4] + hourgam5[i02] * zd[5]+
hourgam6[i02] * zd[6] + hourgam7[i02] * zd[7];
h03 =
hourgam0[i03] * zd[0] + hourgam1[i03] * zd[1] +
hourgam2[i03] * zd[2] + hourgam3[i03] * zd[3] +
hourgam4[i03] * zd[4] + hourgam5[i03] * zd[5] +
hourgam6[i03] * zd[6] + hourgam7[i03] * zd[7];
hgfz[0] += coefficient *
(hourgam0[i00] * h00 + hourgam0[i01] * h01 +
hourgam0[i02] * h02 + hourgam0[i03] * h03);
hgfz[1] += coefficient *
(hourgam1[i00] * h00 + hourgam1[i01] * h01 +
hourgam1[i02] * h02 + hourgam1[i03] * h03);
hgfz[2] += coefficient *
(hourgam2[i00] * h00 + hourgam2[i01] * h01 +
hourgam2[i02] * h02 + hourgam2[i03] * h03);
hgfz[3] += coefficient *
(hourgam3[i00] * h00 + hourgam3[i01] * h01 +
hourgam3[i02] * h02 + hourgam3[i03] * h03);
hgfz[4] += coefficient *
(hourgam4[i00] * h00 + hourgam4[i01] * h01 +
hourgam4[i02] * h02 + hourgam4[i03] * h03);
hgfz[5] += coefficient *
(hourgam5[i00] * h00 + hourgam5[i01] * h01 +
hourgam5[i02] * h02 + hourgam5[i03] * h03);
hgfz[6] += coefficient *
(hourgam6[i00] * h00 + hourgam6[i01] * h01 +
hourgam6[i02] * h02 + hourgam6[i03] * h03);
hgfz[7] += coefficient *
(hourgam7[i00] * h00 + hourgam7[i01] * h01 +
hourgam7[i02] * h02 + hourgam7[i03] * h03);
}/*}}}*/
__device__
__forceinline__
void CalcHourglassModes(const Real_t xn[8], const Real_t yn[8], const Real_t zn[8],
const Real_t dvdxn[8], const Real_t dvdyn[8], const Real_t dvdzn[8],
Real_t hourgam[8][4], Real_t volinv)
{/*{{{*/
Real_t hourmodx, hourmody, hourmodz;
hourmodx = xn[0] + xn[1] - xn[2] - xn[3] - xn[4] - xn[5] + xn[6] + xn[7];
hourmody = yn[0] + yn[1] - yn[2] - yn[3] - yn[4] - yn[5] + yn[6] + yn[7];
hourmodz = zn[0] + zn[1] - zn[2] - zn[3] - zn[4] - zn[5] + zn[6] + zn[7]; // 21
hourgam[0][0] = 1.0 - volinv*(dvdxn[0]*hourmodx + dvdyn[0]*hourmody + dvdzn[0]*hourmodz);
hourgam[1][0] = 1.0 - volinv*(dvdxn[1]*hourmodx + dvdyn[1]*hourmody + dvdzn[1]*hourmodz);
hourgam[2][0] = -1.0 - volinv*(dvdxn[2]*hourmodx + dvdyn[2]*hourmody + dvdzn[2]*hourmodz);
hourgam[3][0] = -1.0 - volinv*(dvdxn[3]*hourmodx + dvdyn[3]*hourmody + dvdzn[3]*hourmodz);
hourgam[4][0] = -1.0 - volinv*(dvdxn[4]*hourmodx + dvdyn[4]*hourmody + dvdzn[4]*hourmodz);
hourgam[5][0] = -1.0 - volinv*(dvdxn[5]*hourmodx + dvdyn[5]*hourmody + dvdzn[5]*hourmodz);
hourgam[6][0] = 1.0 - volinv*(dvdxn[6]*hourmodx + dvdyn[6]*hourmody + dvdzn[6]*hourmodz);
hourgam[7][0] = 1.0 - volinv*(dvdxn[7]*hourmodx + dvdyn[7]*hourmody + dvdzn[7]*hourmodz); // 60
hourmodx = xn[0] - xn[1] - xn[2] + xn[3] - xn[4] + xn[5] + xn[6] - xn[7];
hourmody = yn[0] - yn[1] - yn[2] + yn[3] - yn[4] + yn[5] + yn[6] - yn[7];
hourmodz = zn[0] - zn[1] - zn[2] + zn[3] - zn[4] + zn[5] + zn[6] - zn[7];
hourgam[0][1] = 1.0 - volinv*(dvdxn[0]*hourmodx + dvdyn[0]*hourmody + dvdzn[0]*hourmodz);
hourgam[1][1] = -1.0 - volinv*(dvdxn[1]*hourmodx + dvdyn[1]*hourmody + dvdzn[1]*hourmodz);
hourgam[2][1] = -1.0 - volinv*(dvdxn[2]*hourmodx + dvdyn[2]*hourmody + dvdzn[2]*hourmodz);
hourgam[3][1] = 1.0 - volinv*(dvdxn[3]*hourmodx + dvdyn[3]*hourmody + dvdzn[3]*hourmodz);
hourgam[4][1] = -1.0 - volinv*(dvdxn[4]*hourmodx + dvdyn[4]*hourmody + dvdzn[4]*hourmodz);
hourgam[5][1] = 1.0 - volinv*(dvdxn[5]*hourmodx + dvdyn[5]*hourmody + dvdzn[5]*hourmodz);
hourgam[6][1] = 1.0 - volinv*(dvdxn[6]*hourmodx + dvdyn[6]*hourmody + dvdzn[6]*hourmodz);
hourgam[7][1] = -1.0 - volinv*(dvdxn[7]*hourmodx + dvdyn[7]*hourmody + dvdzn[7]*hourmodz);
hourmodx = xn[0] - xn[1] + xn[2] - xn[3] + xn[4] - xn[5] + xn[6] - xn[7];
hourmody = yn[0] - yn[1] + yn[2] - yn[3] + yn[4] - yn[5] + yn[6] - yn[7];
hourmodz = zn[0] - zn[1] + zn[2] - zn[3] + zn[4] - zn[5] + zn[6] - zn[7];
hourgam[0][2] = 1.0 - volinv*(dvdxn[0]*hourmodx + dvdyn[0]*hourmody + dvdzn[0]*hourmodz);
hourgam[1][2] = -1.0 - volinv*(dvdxn[1]*hourmodx + dvdyn[1]*hourmody + dvdzn[1]*hourmodz);
hourgam[2][2] = 1.0 - volinv*(dvdxn[2]*hourmodx + dvdyn[2]*hourmody + dvdzn[2]*hourmodz);
hourgam[3][2] = -1.0 - volinv*(dvdxn[3]*hourmodx + dvdyn[3]*hourmody + dvdzn[3]*hourmodz);
hourgam[4][2] = 1.0 - volinv*(dvdxn[4]*hourmodx + dvdyn[4]*hourmody + dvdzn[4]*hourmodz);
hourgam[5][2] = -1.0 - volinv*(dvdxn[5]*hourmodx + dvdyn[5]*hourmody + dvdzn[5]*hourmodz);
hourgam[6][2] = 1.0 - volinv*(dvdxn[6]*hourmodx + dvdyn[6]*hourmody + dvdzn[6]*hourmodz);
hourgam[7][2] = -1.0 - volinv*(dvdxn[7]*hourmodx + dvdyn[7]*hourmody + dvdzn[7]*hourmodz);
hourmodx = -xn[0] + xn[1] - xn[2] + xn[3] + xn[4] - xn[5] + xn[6] - xn[7];
hourmody = -yn[0] + yn[1] - yn[2] + yn[3] + yn[4] - yn[5] + yn[6] - yn[7];
hourmodz = -zn[0] + zn[1] - zn[2] + zn[3] + zn[4] - zn[5] + zn[6] - zn[7];
hourgam[0][3] = -1.0 - volinv*(dvdxn[0]*hourmodx + dvdyn[0]*hourmody + dvdzn[0]*hourmodz);
hourgam[1][3] = 1.0 - volinv*(dvdxn[1]*hourmodx + dvdyn[1]*hourmody + dvdzn[1]*hourmodz);
hourgam[2][3] = -1.0 - volinv*(dvdxn[2]*hourmodx + dvdyn[2]*hourmody + dvdzn[2]*hourmodz);
hourgam[3][3] = 1.0 - volinv*(dvdxn[3]*hourmodx + dvdyn[3]*hourmody + dvdzn[3]*hourmodz);
hourgam[4][3] = 1.0 - volinv*(dvdxn[4]*hourmodx + dvdyn[4]*hourmody + dvdzn[4]*hourmodz);
hourgam[5][3] = -1.0 - volinv*(dvdxn[5]*hourmodx + dvdyn[5]*hourmody + dvdzn[5]*hourmodz);
hourgam[6][3] = 1.0 - volinv*(dvdxn[6]*hourmodx + dvdyn[6]*hourmody + dvdzn[6]*hourmodz);
hourgam[7][3] = -1.0 - volinv*(dvdxn[7]*hourmodx + dvdyn[7]*hourmody + dvdzn[7]*hourmodz);
}/*}}}*/
template< bool hourg_gt_zero >
__global__
#ifdef DOUBLE_PRECISION
__launch_bounds__(64,4)
#else
__launch_bounds__(64,8)
#endif
void CalcVolumeForceForElems_kernel
(/*{{{*/
const Real_t* __restrict__ volo,
const Real_t* __restrict__ v,
const Real_t* __restrict__ p,
const Real_t* __restrict__ q,
Real_t hourg,
Index_t numElem,
Index_t padded_numElem,
const Index_t* __restrict__ nodelist,
const Real_t* __restrict__ ss,
const Real_t* __restrict__ elemMass,
const Real_t* __restrict__ x, const Real_t* __restrict__ y, const Real_t* __restrict__ z,
const Real_t* __restrict__ xd, const Real_t* __restrict__ yd, const Real_t* __restrict__ zd,
//TextureObj<Real_t> x, TextureObj<Real_t> y, TextureObj<Real_t> z,
//TextureObj<Real_t> xd, TextureObj<Real_t> yd, TextureObj<Real_t> zd,
//TextureObj<Real_t>* x, TextureObj<Real_t>* y, TextureObj<Real_t>* z,
//TextureObj<Real_t>* xd, TextureObj<Real_t>* yd, TextureObj<Real_t>* zd,
#ifdef DOUBLE_PRECISION // For floats, use atomicAdd
Real_t* __restrict__ fx_elem,
Real_t* __restrict__ fy_elem,
Real_t* __restrict__ fz_elem,
#else
Real_t* __restrict__ fx_node,
Real_t* __restrict__ fy_node,
Real_t* __restrict__ fz_node,
#endif
Index_t* __restrict__ bad_vol,
const Index_t num_threads)/*}}}*/
{/*{{{*/
/*************************************************
* FUNCTION: Calculates the volume forces
*************************************************/
Real_t xn[8],yn[8],zn[8];;
Real_t xdn[8],ydn[8],zdn[8];;
Real_t dvdxn[8],dvdyn[8],dvdzn[8];;
Real_t hgfx[8],hgfy[8],hgfz[8];;
Real_t hourgam[8][4];
Real_t coefficient;
int elem=blockDim.x*blockIdx.x+threadIdx.x;
if (elem < num_threads)
{
Real_t volume = v[elem];
Real_t det = volo[elem] * volume;
// Check for bad volume
if (volume < 0.) {
*bad_vol = elem;
}
Real_t ss1 = ss[elem];
Real_t mass1 = elemMass[elem];
Real_t sigxx = -p[elem] - q[elem];
Index_t n[8];
#pragma unroll
for (int i=0;i<8;i++) {
n[i] = nodelist[elem+i*padded_numElem];
}
Real_t volinv = Real_t(1.0) / det;
//#pragma unroll
//for (int i=0;i<8;i++) {
// xn[i] =x[n[i]];
// yn[i] =y[n[i]];
// zn[i] =z[n[i]];
//}
#pragma unroll
for (int i=0;i<8;i++)
xn[i] =x[n[i]];
#pragma unroll
for (int i=0;i<8;i++)
yn[i] =y[n[i]];
#pragma unroll
for (int i=0;i<8;i++)
zn[i] =z[n[i]];
Real_t volume13 = CBRT(det);
coefficient = - hourg * Real_t(0.01) * ss1 * mass1 / volume13;
/*************************************************/
/* compute the volume derivatives */
/*************************************************/
CalcElemVolumeDerivative(dvdxn, dvdyn, dvdzn, xn, yn, zn);
/*************************************************/
/* compute the hourglass modes */
/*************************************************/
CalcHourglassModes(xn,yn,zn,dvdxn,dvdyn,dvdzn,hourgam,volinv);
/*************************************************/
/* CalcStressForElems */
/*************************************************/
Real_t B[3][8];
CalcElemShapeFunctionDerivatives(xn, yn, zn, B, &det);
CalcElemNodeNormals( B[0] , B[1], B[2], xn, yn, zn);
// Check for bad volume
if (det < 0.) {
*bad_vol = elem;
}
#pragma unroll
for (int i=0;i<8;i++)
{
hgfx[i] = -( sigxx*B[0][i] );
hgfy[i] = -( sigxx*B[1][i] );
hgfz[i] = -( sigxx*B[2][i] );
}
if (hourg_gt_zero)
{
/*************************************************/
/* CalcFBHourglassForceForElems */
/*************************************************/
// #pragma unroll
// for (int i=0;i<8;i++) {
// xdn[i] =xd[n[i]];
// ydn[i] =yd[n[i]];
// zdn[i] =zd[n[i]];
// }
#pragma unroll
for (int i=0;i<8;i++)
xdn[i] =xd[n[i]];
#pragma unroll
for (int i=0;i<8;i++)
ydn[i] =yd[n[i]];
#pragma unroll
for (int i=0;i<8;i++)
zdn[i] =zd[n[i]];
CalcElemFBHourglassForce
( &xdn[0],&ydn[0],&zdn[0],
hourgam[0],hourgam[1],hourgam[2],hourgam[3],
hourgam[4],hourgam[5],hourgam[6],hourgam[7],
coefficient,
&hgfx[0],&hgfy[0],&hgfz[0]
);
}
#ifdef DOUBLE_PRECISION
#pragma unroll
for (int node=0;node<8;node++)
{
Index_t store_loc = elem+padded_numElem*node;
fx_elem[store_loc]=hgfx[node];
fy_elem[store_loc]=hgfy[node];
fz_elem[store_loc]=hgfz[node];
}
#else
#pragma unroll
for (int i=0;i<8;i++)
{
Index_t ni= n[i];
atomicAdd(&fx_node[ni],hgfx[i]);
atomicAdd(&fy_node[ni],hgfy[i]);
atomicAdd(&fz_node[ni],hgfz[i]);
}
#endif
} // If elem < numElem
}/*}}}*/
template< bool hourg_gt_zero, int cta_size>
__global__
void CalcVolumeForceForElems_kernel_warp_per_4cell
(/*{{{*/
const Real_t* __restrict__ volo,
const Real_t* __restrict__ v,
const Real_t* __restrict__ p,
const Real_t* __restrict__ q,
Real_t hourg,
Index_t numElem,
Index_t padded_numElem,
const Index_t* __restrict__ nodelist,
const Real_t* __restrict__ ss,
const Real_t* __restrict__ elemMass,
//const Real_t __restrict__ *x, const Real_t __restrict__ *y, const Real_t __restrict__ *z,
//const Real_t __restrict__ *xd, const Real_t __restrict__ *yd, const Real_t __restrict__ *zd,
const Real_t *x, const Real_t *y, const Real_t *z,
const Real_t *xd, const Real_t *yd, const Real_t *zd,
#ifdef DOUBLE_PRECISION // For floats, use atomicAdd
Real_t* __restrict__ fx_elem,
Real_t* __restrict__ fy_elem,
Real_t* __restrict__ fz_elem,
#else
Real_t* __restrict__ fx_node,
Real_t* __restrict__ fy_node,
Real_t* __restrict__ fz_node,
#endif
Index_t* __restrict__ bad_vol,
const Index_t num_threads)/*}}}*/
{/*{{{*/
/*************************************************
* FUNCTION: Calculates the volume forces
*************************************************/
Real_t xn,yn,zn;;
Real_t xdn,ydn,zdn;;
Real_t dvdxn,dvdyn,dvdzn;;
Real_t hgfx,hgfy,hgfz;;
Real_t hourgam[4];
Real_t coefficient;
int tid=blockDim.x*blockIdx.x+threadIdx.x;
int elem = tid >> 3; // elem = tid/8
int node = tid & 7; // node = tid%8
// elem within cta
// int cta_elem = threadIdx.x/8;
if (elem < num_threads)
{
Real_t volume = v[elem];
Real_t det = volo[elem] * volume;
// Check for bad volume
if (volume < 0.) {
*bad_vol = elem;
}
Real_t ss1 = ss[elem];
Real_t mass1 = elemMass[elem];
Real_t sigxx = -p[elem] - q[elem];
Index_t node_id;
node_id = nodelist[elem+node*padded_numElem];
Real_t volinv = Real_t(1.0) / det;
xn =x[node_id];
yn =y[node_id];
zn =z[node_id];
Real_t volume13 = CBRT(det);
coefficient = - hourg * Real_t(0.01) * ss1 * mass1 / volume13;
/*************************************************/
/* compute the volume derivatives */
/*************************************************/
unsigned int ind0,ind1,ind2,ind3,ind4,ind5;
// Use octal number to represent the indices for each node
//ind0 = 012307456;
//ind1 = 023016745;
//ind2 = 030125674;
//ind3 = 045670123;
//ind4 = 056743012;
//ind5 = 074561230;
//int mask = 7u << (3*node;
switch(node) {
case 0:
{ind0=1; ind1=2; ind2=3; ind3=4; ind4=5; ind5=7;
break;}
case 1:
{ind0=2; ind1=3; ind2=0; ind3=5; ind4=6; ind5=4;
break;}
case 2:
{ind0=3; ind1=0; ind2=1; ind3=6; ind4=7; ind5=5;
break;}
case 3:
{ind0=0; ind1=1; ind2=2; ind3=7; ind4=4; ind5=6;
break;}
case 4:
{ind0=7; ind1=6; ind2=5; ind3=0; ind4=3; ind5=1;
break;}
case 5:
{ind0=4; ind1=7; ind2=6; ind3=1; ind4=0; ind5=2;
break;}
case 6:
{ind0=5; ind1=4; ind2=7; ind3=2; ind4=1; ind5=3;
break;}
case 7:
{ind0=6; ind1=5; ind2=4; ind3=3; ind4=2; ind5=0;
break;}
}
VOLUDER(utils::shfl(yn,ind0,8),utils::shfl(yn,ind1,8),utils::shfl(yn,ind2,8),
utils::shfl(yn,ind3,8),utils::shfl(yn,ind4,8),utils::shfl(yn,ind5,8),
utils::shfl(zn,ind0,8),utils::shfl(zn,ind1,8),utils::shfl(zn,ind2,8),
utils::shfl(zn,ind3,8),utils::shfl(zn,ind4,8),utils::shfl(zn,ind5,8),
dvdxn);
VOLUDER(utils::shfl(zn,ind0,8),utils::shfl(zn,ind1,8),utils::shfl(zn,ind2,8),
utils::shfl(zn,ind3,8),utils::shfl(zn,ind4,8),utils::shfl(zn,ind5,8),
utils::shfl(xn,ind0,8),utils::shfl(xn,ind1,8),utils::shfl(xn,ind2,8),
utils::shfl(xn,ind3,8),utils::shfl(xn,ind4,8),utils::shfl(xn,ind5,8),
dvdyn);
VOLUDER(utils::shfl(xn,ind0,8),utils::shfl(xn,ind1,8),utils::shfl(xn,ind2,8),
utils::shfl(xn,ind3,8),utils::shfl(xn,ind4,8),utils::shfl(xn,ind5,8),
utils::shfl(yn,ind0,8),utils::shfl(yn,ind1,8),utils::shfl(yn,ind2,8),
utils::shfl(yn,ind3,8),utils::shfl(yn,ind4,8),utils::shfl(yn,ind5,8),
dvdzn);
/*************************************************/
/* compute the hourglass modes */
/*************************************************/
Real_t hourmodx, hourmody, hourmodz;
const Real_t posf = Real_t( 1.);
const Real_t negf = Real_t(-1.);
hourmodx=xn; hourmody=yn; hourmodz=zn;
if (node==2 || node==3 || node==4 || node==5) {
hourmodx *= negf; hourmody *= negf; hourmodz *= negf;
hourgam[0] = negf;
}
else hourgam[0] = posf;
SumOverNodesShfl(hourmodx);
SumOverNodesShfl(hourmody);
SumOverNodesShfl(hourmodz);
hourgam[0] -= volinv*(dvdxn*hourmodx + dvdyn*hourmody + dvdzn*hourmodz);
hourmodx=xn; hourmody=yn; hourmodz=zn;
if (node==1 || node==2 || node==4 || node==7) {
hourmodx *= negf; hourmody *= negf; hourmodz *= negf;
hourgam[1] = negf;
}
else hourgam[1] = posf;
SumOverNodesShfl(hourmodx);
SumOverNodesShfl(hourmody);
SumOverNodesShfl(hourmodz);
hourgam[1] -= volinv*(dvdxn*hourmodx + dvdyn*hourmody + dvdzn*hourmodz);
hourmodx=xn; hourmody=yn; hourmodz=zn;
if (node==1 || node==3 || node==5 || node==7) {
hourmodx *= negf; hourmody *= negf; hourmodz *= negf;
hourgam[2] = negf;
}
else hourgam[2] = posf;
SumOverNodesShfl(hourmodx);
SumOverNodesShfl(hourmody);
SumOverNodesShfl(hourmodz);
hourgam[2] -= volinv*(dvdxn*hourmodx + dvdyn*hourmody + dvdzn*hourmodz);
hourmodx=xn; hourmody=yn; hourmodz=zn;
if (node==0 || node==2 || node==5 || node==7) {
hourmodx *= negf; hourmody *= negf; hourmodz *= negf;
hourgam[3] = negf;
}
else hourgam[3] = posf;
SumOverNodesShfl(hourmodx);
SumOverNodesShfl(hourmody);
SumOverNodesShfl(hourmodz);
hourgam[3] -= volinv*(dvdxn*hourmodx + dvdyn*hourmody + dvdzn*hourmodz);
/*************************************************/
/* CalcStressForElems */
/*************************************************/
Real_t b[3];
/*************************************************/
//CalcElemShapeFunctionDerivatives_warp_per_4cell(xn, yn, zn, B, &det);
/*************************************************/
Real_t fjxxi, fjxet, fjxze;
Real_t fjyxi, fjyet, fjyze;
Real_t fjzxi, fjzet, fjzze;
fjxxi = fjxet = fjxze = Real_t(0.125)*xn;
fjyxi = fjyet = fjyze = Real_t(0.125)*yn;
fjzxi = fjzet = fjzze = Real_t(0.125)*zn;
if (node==0 || node==3 || node==7 || node==4)
{
fjxxi = -fjxxi;
fjyxi = -fjyxi;
fjzxi = -fjzxi;
}
if (node==0 || node==5 || node==1 || node==4)
{
fjxet = -fjxet;
fjyet = -fjyet;
fjzet = -fjzet;
}
if (node==0 || node==3 || node==1 || node==2)
{
fjxze = -fjxze;
fjyze = -fjyze;
fjzze = -fjzze;
}
SumOverNodesShfl(fjxxi);
SumOverNodesShfl(fjxet);
SumOverNodesShfl(fjxze);
SumOverNodesShfl(fjyxi);
SumOverNodesShfl(fjyet);
SumOverNodesShfl(fjyze);
SumOverNodesShfl(fjzxi);
SumOverNodesShfl(fjzet);
SumOverNodesShfl(fjzze);
/* compute cofactors */
Real_t cjxxi, cjxet, cjxze;
Real_t cjyxi, cjyet, cjyze;
Real_t cjzxi, cjzet, cjzze;
cjxxi = (fjyet * fjzze) - (fjzet * fjyze);
cjxet = - (fjyxi * fjzze) + (fjzxi * fjyze);
cjxze = (fjyxi * fjzet) - (fjzxi * fjyet);
cjyxi = - (fjxet * fjzze) + (fjzet * fjxze);
cjyet = (fjxxi * fjzze) - (fjzxi * fjxze);
cjyze = - (fjxxi * fjzet) + (fjzxi * fjxet);
cjzxi = (fjxet * fjyze) - (fjyet * fjxze);
cjzet = - (fjxxi * fjyze) + (fjyxi * fjxze);
cjzze = (fjxxi * fjyet) - (fjyxi * fjxet);
Real_t coef_xi, coef_et, coef_ze;
if (node==0 || node==3 || node==4 || node==7)
coef_xi = Real_t(-1.);
else
coef_xi = Real_t(1.);
if (node==0 || node==1 || node==4 || node==5)
coef_et = Real_t(-1.);
else
coef_et = Real_t(1.);
if (node==0 || node==1 || node==2 || node==3)
coef_ze = Real_t(-1.);
else
coef_ze = Real_t(1.);
/* calculate partials :
this need only be done for l = 0,1,2,3 since , by symmetry ,
(6,7,4,5) = - (0,1,2,3) .
*/
b[0] = coef_xi * cjxxi + coef_et * cjxet + coef_ze * cjxze;
b[1] = coef_xi * cjyxi + coef_et * cjyet + coef_ze * cjyze;
b[2] = coef_xi * cjzxi + coef_et * cjzet + coef_ze * cjzze;
/* calculate jacobian determinant (volume) */
det = Real_t(8.) * ( fjxet * cjxet + fjyet * cjyet + fjzet * cjzet);
/*************************************************/
//CalcElemNodeNormals_warp_per_4cell( B[0] , B[1], B[2], xn, yn, zn);
/*************************************************/
b[0] = Real_t(0.0);
b[1] = Real_t(0.0);
b[2] = Real_t(0.0);
// Six faces, if no
SumElemFaceNormal_warp_per_4cell(&b[0], &b[1], &b[2],
xn, yn, zn, node, 0,1,2,3);
SumElemFaceNormal_warp_per_4cell(&b[0], &b[1], &b[2],
xn, yn, zn, node, 0,4,5,1);
SumElemFaceNormal_warp_per_4cell(&b[0], &b[1], &b[2],
xn, yn, zn, node, 1,5,6,2);
SumElemFaceNormal_warp_per_4cell(&b[0], &b[1], &b[2],
xn, yn, zn, node, 2,6,7,3);
SumElemFaceNormal_warp_per_4cell(&b[0], &b[1], &b[2],
xn, yn, zn, node, 3,7,4,0);
SumElemFaceNormal_warp_per_4cell(&b[0], &b[1], &b[2],
xn, yn, zn, node, 4,7,6,5);
// Check for bad volume
if (det < 0.) {
*bad_vol = elem;
}
hgfx = -( sigxx*b[0] );
hgfy = -( sigxx*b[1] );
hgfz = -( sigxx*b[2] );
if (hourg_gt_zero)
{
/*************************************************/
/* CalcFBHourglassForceForElems */
/*************************************************/
xdn = xd[node_id];
ydn = yd[node_id];
zdn = zd[node_id];
Real_t hgfx_temp=0;
#pragma unroll
for (int i=0;i<4;i++) {
Real_t h;
h=hourgam[i]*xdn;
SumOverNodesShfl(h);
hgfx_temp+=hourgam[i]*h;
}
hgfx_temp *= coefficient;
hgfx += hgfx_temp;
Real_t hgfy_temp=0;
#pragma unroll
for (int i=0;i<4;i++) {
Real_t h;
h=hourgam[i]*ydn;
SumOverNodesShfl(h);
hgfy_temp+=hourgam[i]*h;
}
hgfy_temp *= coefficient;
hgfy += hgfy_temp;
Real_t hgfz_temp=0;
#pragma unroll
for (int i=0;i<4;i++) {
Real_t h;
h=hourgam[i]*zdn;
SumOverNodesShfl(h);
hgfz_temp+=hourgam[i]*h;
}
hgfz_temp *= coefficient;
hgfz += hgfz_temp;
}
#ifdef DOUBLE_PRECISION
Index_t store_loc = elem+padded_numElem*node;
fx_elem[store_loc]=hgfx;
fy_elem[store_loc]=hgfy;
fz_elem[store_loc]=hgfz;
#else
atomicAdd(&fx_node[node_id],hgfx);
atomicAdd(&fy_node[node_id],hgfy);
atomicAdd(&fz_node[node_id],hgfz);
#endif
} // If elem < numElem
}/*}}}*/
static inline
void CalcVolumeForceForElems(const Real_t hgcoef,Domain *domain)
{/*{{{*/
Index_t numElem = domain->numElem ;
Index_t padded_numElem = domain->padded_numElem;
#ifdef DOUBLE_PRECISION
Vector_d<Real_t>* fx_elem = Allocator< Vector_d<Real_t> >::allocate(padded_numElem*8);
Vector_d<Real_t>* fy_elem = Allocator< Vector_d<Real_t> >::allocate(padded_numElem*8);
Vector_d<Real_t>* fz_elem = Allocator< Vector_d<Real_t> >::allocate(padded_numElem*8);
#else
thrust::fill(domain->fx.begin(),domain->fx.end(),0.);
thrust::fill(domain->fy.begin(),domain->fy.end(),0.);
thrust::fill(domain->fz.begin(),domain->fz.end(),0.);
#endif
int num_threads = numElem ;
//EJ
const int block_size = 64;
//int block_size = global_block_size ;
//EJ end
int dimGrid = PAD_DIV(num_threads,block_size);
bool hourg_gt_zero = hgcoef > Real_t(0.0);
if (hourg_gt_zero)
{
CalcVolumeForceForElems_kernel<true> <<<dimGrid,block_size>>>
( domain->volo.raw(),
domain->v.raw(),
domain->p.raw(),
domain->q.raw(),
hgcoef, numElem, padded_numElem,
domain->nodelist.raw(),
domain->ss.raw(),
domain->elemMass.raw(),
domain->x.raw(), domain->y.raw(), domain->z.raw(), domain->xd.raw(), domain->yd.raw(), domain->zd.raw(),
#ifdef DOUBLE_PRECISION
fx_elem->raw(),
fy_elem->raw(),
fz_elem->raw() ,
#else
domain->fx.raw(),
domain->fy.raw(),
domain->fz.raw(),
#endif
domain->bad_vol_h,
num_threads
);
}
else
{
CalcVolumeForceForElems_kernel<false> <<<dimGrid,block_size>>>
( domain->volo.raw(),
domain->v.raw(),
domain->p.raw(),
domain->q.raw(),
hgcoef, numElem, padded_numElem,
domain->nodelist.raw(),
domain->ss.raw(),
domain->elemMass.raw(),
domain->x.raw(), domain->y.raw(), domain->z.raw(), domain->xd.raw(), domain->yd.raw(), domain->zd.raw(),
#ifdef DOUBLE_PRECISION
fx_elem->raw(),
fy_elem->raw(),
fz_elem->raw() ,
#else
domain->fx.raw(),
domain->fy.raw(),
domain->fz.raw(),
#endif
domain->bad_vol_h,
num_threads
);
}
#ifdef DOUBLE_PRECISION
num_threads = domain->numNode;
// Launch boundary nodes first
dimGrid= PAD_DIV(num_threads,block_size);
AddNodeForcesFromElems_kernel<<<dimGrid,block_size>>>
( domain->numNode,
domain->padded_numNode,
domain->nodeElemCount.raw(),
domain->nodeElemStart.raw(),
domain->nodeElemCornerList.raw(),
fx_elem->raw(),
fy_elem->raw(),
fz_elem->raw(),
domain->fx.raw(),
domain->fy.raw(),
domain->fz.raw(),
num_threads
);
// cudaDeviceSynchronize();
// cudaCheckError();
Allocator<Vector_d<Real_t> >::free(fx_elem,padded_numElem*8);
Allocator<Vector_d<Real_t> >::free(fy_elem,padded_numElem*8);
Allocator<Vector_d<Real_t> >::free(fz_elem,padded_numElem*8);
#endif // ifdef DOUBLE_PRECISION
return ;
}/*}}}*/
static inline
void CalcVolumeForceForElems(Domain* domain)
{/*{{{*/
const Real_t hgcoef = domain->hgcoef ;
CalcVolumeForceForElems(hgcoef,domain);
//CalcVolumeForceForElems_warp_per_4cell(hgcoef,domain);
}/*}}}*/
static inline void checkErrors(Domain* domain,int its,int myRank)
{/*{{{*/
if (*(domain->bad_vol_h) != -1)
{
printf("Rank %i: Volume Error in cell %d at iteration %d\n",myRank,*(domain->bad_vol_h),its);
exit(VolumeError);
}
if (*(domain->bad_q_h) != -1)
{
printf("Rank %i: Q Error in cell %d at iteration %d\n",myRank,*(domain->bad_q_h),its);
exit(QStopError);
}
}/*}}}*/
static inline void CalcForceForNodes(Domain *domain)
{/*{{{*/
#if USE_MPI
CommRecv(*domain, MSG_COMM_SBN, 3,
domain->sizeX + 1, domain->sizeY + 1, domain->sizeZ + 1,
true, false) ;
#endif
CalcVolumeForceForElems(domain);
// moved here from the main loop to allow async execution with GPU work
TimeIncrement(domain);
#if USE_MPI
// initialize pointers
domain->d_fx = domain->fx.raw();
domain->d_fy = domain->fy.raw();
domain->d_fz = domain->fz.raw();
Domain_member fieldData[3] ;
fieldData[0] = &Domain::get_fx ;
fieldData[1] = &Domain::get_fy ;
fieldData[2] = &Domain::get_fz ;
CommSendGpu(*domain, MSG_COMM_SBN, 3, fieldData,
domain->sizeX + 1, domain->sizeY + 1, domain->sizeZ + 1,
true, false, domain->streams[2]) ;
CommSBNGpu(*domain, 3, fieldData, &domain->streams[2]) ;
#endif
}/*}}}*/
__global__
void CalcAccelerationForNodes_kernel(int numNode,
Real_t *xdd, Real_t *ydd, Real_t *zdd,
Real_t *fx, Real_t *fy, Real_t *fz,
Real_t *nodalMass)
{/*{{{*/
int tid=blockDim.x*blockIdx.x+threadIdx.x;
if (tid < numNode)
{
Real_t one_over_nMass = Real_t(1.)/nodalMass[tid];
xdd[tid]=fx[tid]*one_over_nMass;
ydd[tid]=fy[tid]*one_over_nMass;
zdd[tid]=fz[tid]*one_over_nMass;
}
}/*}}}*/
static inline
void CalcAccelerationForNodes(Domain *domain)
{/*{{{*/
Index_t dimBlock = 128;
Index_t dimGrid = PAD_DIV(domain->numNode,dimBlock);
CalcAccelerationForNodes_kernel<<<dimGrid, dimBlock>>>
(domain->numNode,
domain->xdd.raw(),domain->ydd.raw(),domain->zdd.raw(),
domain->fx.raw(),domain->fy.raw(),domain->fz.raw(),
domain->nodalMass.raw());
//cudaDeviceSynchronize();
//cudaCheckError();
}/*}}}*/
__global__
void ApplyAccelerationBoundaryConditionsForNodes_kernel(
int numNodeBC, Real_t *xyzdd,
Index_t *symm)
{/*{{{*/
int i=blockDim.x*blockIdx.x+threadIdx.x;
if (i < numNodeBC)
{
xyzdd[symm[i]] = Real_t(0.0) ;
}
}/*}}}*/
static inline
void ApplyAccelerationBoundaryConditionsForNodes(Domain *domain)
{/*{{{*/
Index_t dimBlock = 128;
Index_t dimGrid = PAD_DIV(domain->numSymmX,dimBlock);
if (domain->numSymmX > 0)
ApplyAccelerationBoundaryConditionsForNodes_kernel<<<dimGrid, dimBlock>>>
(domain->numSymmX,
domain->xdd.raw(),
domain->symmX.raw());
dimGrid = PAD_DIV(domain->numSymmY,dimBlock);
if (domain->numSymmY > 0)
ApplyAccelerationBoundaryConditionsForNodes_kernel<<<dimGrid, dimBlock>>>
(domain->numSymmY,
domain->ydd.raw(),
domain->symmY.raw());
dimGrid = PAD_DIV(domain->numSymmZ,dimBlock);
if (domain->numSymmZ > 0)
ApplyAccelerationBoundaryConditionsForNodes_kernel<<<dimGrid, dimBlock>>>
(domain->numSymmZ,
domain->zdd.raw(),
domain->symmZ.raw());
}/*}}}*/
__global__
void CalcPositionAndVelocityForNodes_kernel(int numNode,
const Real_t deltatime,
const Real_t u_cut,
Real_t* __restrict__ x, Real_t* __restrict__ y, Real_t* __restrict__ z,
Real_t* __restrict__ xd, Real_t* __restrict__ yd, Real_t* __restrict__ zd,
const Real_t* __restrict__ xdd, const Real_t* __restrict__ ydd, const Real_t* __restrict__ zdd)
{/*{{{*/
int i=blockDim.x*blockIdx.x+threadIdx.x;
if (i < numNode)
{
Real_t xdtmp, ydtmp, zdtmp, dt;
dt = deltatime;
xdtmp = xd[i] + xdd[i] * dt ;
ydtmp = yd[i] + ydd[i] * dt ;
zdtmp = zd[i] + zdd[i] * dt ;
if( FABS(xdtmp) < u_cut ) xdtmp = 0.0;
if( FABS(ydtmp) < u_cut ) ydtmp = 0.0;
if( FABS(zdtmp) < u_cut ) zdtmp = 0.0;
x[i] += xdtmp * dt;
y[i] += ydtmp * dt;
z[i] += zdtmp * dt;
xd[i] = xdtmp;
yd[i] = ydtmp;
zd[i] = zdtmp;
}
}/*}}}*/
static inline
void CalcPositionAndVelocityForNodes(const Real_t u_cut, Domain* domain)
{/*{{{*/
Index_t dimBlock = 128;
Index_t dimGrid = PAD_DIV(domain->numNode,dimBlock);
CalcPositionAndVelocityForNodes_kernel<<<dimGrid, dimBlock>>>
(domain->numNode,domain->deltatime_h,u_cut,
domain->x.raw(),domain->y.raw(),domain->z.raw(),
domain->xd.raw(),domain->yd.raw(),domain->zd.raw(),
domain->xdd.raw(),domain->ydd.raw(),domain->zdd.raw());
//cudaDeviceSynchronize();
//cudaCheckError();
}/*}}}*/
static inline
void LagrangeNodal(Domain *domain)
{/*{{{*/
#ifdef SEDOV_SYNC_POS_VEL_EARLY
Domain_member fieldData[6] ;
#endif
Real_t u_cut = domain->u_cut ;
/* time of boundary condition evaluation is beginning of step for force and
* acceleration boundary conditions. */
CalcForceForNodes(domain);
#if USE_MPI
#ifdef SEDOV_SYNC_POS_VEL_EARLY
CommRecv(*domain, MSG_SYNC_POS_VEL, 6,
domain->sizeX + 1, domain->sizeY + 1, domain->sizeZ + 1,
false, false) ;
#endif
#endif
CalcAccelerationForNodes(domain);
ApplyAccelerationBoundaryConditionsForNodes(domain);
CalcPositionAndVelocityForNodes(u_cut, domain);
#if USE_MPI
#ifdef SEDOV_SYNC_POS_VEL_EARLY
// initialize pointers
domain->d_x = domain->x.raw();
domain->d_y = domain->y.raw();
domain->d_z = domain->z.raw();
domain->d_xd = domain->xd.raw();
domain->d_yd = domain->yd.raw();
domain->d_zd = domain->zd.raw();
fieldData[0] = &Domain::get_x ;
fieldData[1] = &Domain::get_y ;
fieldData[2] = &Domain::get_z ;
fieldData[3] = &Domain::get_xd ;
fieldData[4] = &Domain::get_yd ;
fieldData[5] = &Domain::get_zd ;
CommSendGpu(*domain, MSG_SYNC_POS_VEL, 6, fieldData,
domain->sizeX + 1, domain->sizeY + 1, domain->sizeZ + 1,
false, false, domain->streams[2]) ;
CommSyncPosVelGpu(*domain, &domain->streams[2]) ;
#endif
#endif
return;
}/*}}}*/
__device__
static inline
Real_t AreaFace( const Real_t x0, const Real_t x1,
const Real_t x2, const Real_t x3,
const Real_t y0, const Real_t y1,
const Real_t y2, const Real_t y3,
const Real_t z0, const Real_t z1,
const Real_t z2, const Real_t z3)
{/*{{{*/
Real_t fx = (x2 - x0) - (x3 - x1);
Real_t fy = (y2 - y0) - (y3 - y1);
Real_t fz = (z2 - z0) - (z3 - z1);
Real_t gx = (x2 - x0) + (x3 - x1);
Real_t gy = (y2 - y0) + (y3 - y1);
Real_t gz = (z2 - z0) + (z3 - z1);
Real_t temp = (fx * gx + fy * gy + fz * gz);
Real_t area =
(fx * fx + fy * fy + fz * fz) *
(gx * gx + gy * gy + gz * gz) -
temp * temp;
return area ;
}/*}}}*/
__device__
static inline
Real_t CalcElemCharacteristicLength( const Real_t x[8],
const Real_t y[8],
const Real_t z[8],
const Real_t volume)
{/*{{{*/
Real_t a, charLength = Real_t(0.0);
a = AreaFace(x[0],x[1],x[2],x[3],
y[0],y[1],y[2],y[3],
z[0],z[1],z[2],z[3]) ; // 38
charLength = FMAX(a,charLength) ;
a = AreaFace(x[4],x[5],x[6],x[7],
y[4],y[5],y[6],y[7],
z[4],z[5],z[6],z[7]) ;
charLength = FMAX(a,charLength) ;
a = AreaFace(x[0],x[1],x[5],x[4],
y[0],y[1],y[5],y[4],
z[0],z[1],z[5],z[4]) ;
charLength = FMAX(a,charLength) ;
a = AreaFace(x[1],x[2],x[6],x[5],
y[1],y[2],y[6],y[5],
z[1],z[2],z[6],z[5]) ;
charLength = FMAX(a,charLength) ;
a = AreaFace(x[2],x[3],x[7],x[6],
y[2],y[3],y[7],y[6],
z[2],z[3],z[7],z[6]) ;
charLength = FMAX(a,charLength) ;
a = AreaFace(x[3],x[0],x[4],x[7],
y[3],y[0],y[4],y[7],
z[3],z[0],z[4],z[7]) ;
charLength = FMAX(a,charLength) ;
charLength = Real_t(4.0) * volume / SQRT(charLength);
return charLength;
}/*}}}*/
__device__
static
__forceinline__
void CalcElemVelocityGradient( const Real_t* const xvel,
const Real_t* const yvel,
const Real_t* const zvel,
const Real_t b[][8],
const Real_t detJ,
Real_t* const d )
{/*{{{*/
const Real_t inv_detJ = Real_t(1.0) / detJ ;
Real_t dyddx, dxddy, dzddx, dxddz, dzddy, dyddz;
const Real_t* const pfx = b[0];
const Real_t* const pfy = b[1];
const Real_t* const pfz = b[2];
Real_t tmp1 = (xvel[0]-xvel[6]);
Real_t tmp2 = (xvel[1]-xvel[7]);
Real_t tmp3 = (xvel[2]-xvel[4]);
Real_t tmp4 = (xvel[3]-xvel[5]);
d[0] = inv_detJ * ( pfx[0] * tmp1
+ pfx[1] * tmp2
+ pfx[2] * tmp3
+ pfx[3] * tmp4);
dxddy = inv_detJ * ( pfy[0] * tmp1
+ pfy[1] * tmp2
+ pfy[2] * tmp3
+ pfy[3] * tmp4);
dxddz = inv_detJ * ( pfz[0] * tmp1
+ pfz[1] * tmp2
+ pfz[2] * tmp3
+ pfz[3] * tmp4);
tmp1 = (yvel[0]-yvel[6]);
tmp2 = (yvel[1]-yvel[7]);
tmp3 = (yvel[2]-yvel[4]);
tmp4 = (yvel[3]-yvel[5]);
d[1] = inv_detJ * ( pfy[0] * tmp1
+ pfy[1] * tmp2
+ pfy[2] * tmp3
+ pfy[3] * tmp4);
dyddx = inv_detJ * ( pfx[0] * tmp1
+ pfx[1] * tmp2
+ pfx[2] * tmp3
+ pfx[3] * tmp4);
dyddz = inv_detJ * ( pfz[0] * tmp1
+ pfz[1] * tmp2
+ pfz[2] * tmp3
+ pfz[3] * tmp4);
tmp1 = (zvel[0]-zvel[6]);
tmp2 = (zvel[1]-zvel[7]);
tmp3 = (zvel[2]-zvel[4]);
tmp4 = (zvel[3]-zvel[5]);
d[2] = inv_detJ * ( pfz[0] * tmp1
+ pfz[1] * tmp2
+ pfz[2] * tmp3
+ pfz[3] * tmp4);
dzddx = inv_detJ * ( pfx[0] * tmp1
+ pfx[1] * tmp2
+ pfx[2] * tmp3
+ pfx[3] * tmp4);
dzddy = inv_detJ * ( pfy[0] * tmp1
+ pfy[1] * tmp2
+ pfy[2] * tmp3
+ pfy[3] * tmp4);
d[5] = Real_t( .5) * ( dxddy + dyddx );
d[4] = Real_t( .5) * ( dxddz + dzddx );
d[3] = Real_t( .5) * ( dzddy + dyddz );
}/*}}}*/
static __device__ __forceinline__
void CalcMonoGradient(Real_t *x, Real_t *y, Real_t *z,
Real_t *xv, Real_t *yv, Real_t *zv,
Real_t vol,
Real_t *delx_zeta,
Real_t *delv_zeta,
Real_t *delx_xi,
Real_t *delv_xi,
Real_t *delx_eta,
Real_t *delv_eta)
{/*{{{*/
#define SUM4(a,b,c,d) (a + b + c + d)
const Real_t ptiny = Real_t(1.e-36) ;
Real_t ax,ay,az ;
Real_t dxv,dyv,dzv ;
Real_t norm = Real_t(1.0) / ( vol + ptiny ) ;
Real_t dxj = Real_t(-0.25)*(SUM4(x[0],x[1],x[5],x[4]) - SUM4(x[3],x[2],x[6],x[7])) ;
Real_t dyj = Real_t(-0.25)*(SUM4(y[0],y[1],y[5],y[4]) - SUM4(y[3],y[2],y[6],y[7])) ;
Real_t dzj = Real_t(-0.25)*(SUM4(z[0],z[1],z[5],z[4]) - SUM4(z[3],z[2],z[6],z[7])) ;
Real_t dxi = Real_t( 0.25)*(SUM4(x[1],x[2],x[6],x[5]) - SUM4(x[0],x[3],x[7],x[4])) ;
Real_t dyi = Real_t( 0.25)*(SUM4(y[1],y[2],y[6],y[5]) - SUM4(y[0],y[3],y[7],y[4])) ;
Real_t dzi = Real_t( 0.25)*(SUM4(z[1],z[2],z[6],z[5]) - SUM4(z[0],z[3],z[7],z[4])) ;
Real_t dxk = Real_t( 0.25)*(SUM4(x[4],x[5],x[6],x[7]) - SUM4(x[0],x[1],x[2],x[3])) ;
Real_t dyk = Real_t( 0.25)*(SUM4(y[4],y[5],y[6],y[7]) - SUM4(y[0],y[1],y[2],y[3])) ;
Real_t dzk = Real_t( 0.25)*(SUM4(z[4],z[5],z[6],z[7]) - SUM4(z[0],z[1],z[2],z[3])) ;
/* find delvk and delxk ( i cross j ) */
ax = dyi*dzj - dzi*dyj ;
ay = dzi*dxj - dxi*dzj ;
az = dxi*dyj - dyi*dxj ;
*delx_zeta = vol / SQRT(ax*ax + ay*ay + az*az + ptiny) ;
ax *= norm ;
ay *= norm ;
az *= norm ;
dxv = Real_t(0.25)*(SUM4(xv[4],xv[5],xv[6],xv[7]) - SUM4(xv[0],xv[1],xv[2],xv[3])) ;
dyv = Real_t(0.25)*(SUM4(yv[4],yv[5],yv[6],yv[7]) - SUM4(yv[0],yv[1],yv[2],yv[3])) ;
dzv = Real_t(0.25)*(SUM4(zv[4],zv[5],zv[6],zv[7]) - SUM4(zv[0],zv[1],zv[2],zv[3])) ;
*delv_zeta = ax*dxv + ay*dyv + az*dzv ;
/* find delxi and delvi ( j cross k ) */
ax = dyj*dzk - dzj*dyk ;
ay = dzj*dxk - dxj*dzk ;
az = dxj*dyk - dyj*dxk ;
*delx_xi = vol / SQRT(ax*ax + ay*ay + az*az + ptiny) ;
ax *= norm ;
ay *= norm ;
az *= norm ;
dxv = Real_t(0.25)*(SUM4(xv[1],xv[2],xv[6],xv[5]) - SUM4(xv[0],xv[3],xv[7],xv[4])) ;
dyv = Real_t(0.25)*(SUM4(yv[1],yv[2],yv[6],yv[5]) - SUM4(yv[0],yv[3],yv[7],yv[4])) ;
dzv = Real_t(0.25)*(SUM4(zv[1],zv[2],zv[6],zv[5]) - SUM4(zv[0],zv[3],zv[7],zv[4])) ;
*delv_xi = ax*dxv + ay*dyv + az*dzv ;
/* find delxj and delvj ( k cross i ) */
ax = dyk*dzi - dzk*dyi ;
ay = dzk*dxi - dxk*dzi ;
az = dxk*dyi - dyk*dxi ;
*delx_eta = vol / SQRT(ax*ax + ay*ay + az*az + ptiny) ;
ax *= norm ;
ay *= norm ;
az *= norm ;
dxv = Real_t(-0.25)*(SUM4(xv[0],xv[1],xv[5],xv[4]) - SUM4(xv[3],xv[2],xv[6],xv[7])) ;
dyv = Real_t(-0.25)*(SUM4(yv[0],yv[1],yv[5],yv[4]) - SUM4(yv[3],yv[2],yv[6],yv[7])) ;
dzv = Real_t(-0.25)*(SUM4(zv[0],zv[1],zv[5],zv[4]) - SUM4(zv[3],zv[2],zv[6],zv[7])) ;
*delv_eta = ax*dxv + ay*dyv + az*dzv ;
#undef SUM4
}/*}}}*/
__global__
#ifdef DOUBLE_PRECISION
__launch_bounds__(64,8) // 64-bit
#else
__launch_bounds__(64,16) // 32-bit
#endif
void CalcKinematicsAndMonotonicQGradient_kernel
(/*{{{*/
Index_t numElem, Index_t padded_numElem, const Real_t dt,
const Index_t* __restrict__ nodelist, const Real_t* __restrict__ volo, const Real_t* __restrict__ v,
const Real_t* __restrict__ x,
const Real_t* __restrict__ y,
const Real_t* __restrict__ z,
const Real_t* __restrict__ xd,
const Real_t* __restrict__ yd,
const Real_t* __restrict__ zd,
Real_t* __restrict__ vnew,
Real_t* __restrict__ delv,
Real_t* __restrict__ arealg,
Real_t* __restrict__ dxx,
Real_t* __restrict__ dyy,
Real_t* __restrict__ dzz,
Real_t* __restrict__ vdov,
Real_t* __restrict__ delx_zeta,
Real_t* __restrict__ delv_zeta,
Real_t* __restrict__ delx_xi,
Real_t* __restrict__ delv_xi,
Real_t* __restrict__ delx_eta,
Real_t* __restrict__ delv_eta,
Index_t* __restrict__ bad_vol,
const Index_t num_threads
)/*}}}*/
{/*{{{*/
Real_t B[3][8] ; /** shape function derivatives */
Index_t nodes[8] ;
Real_t x_local[8] ;
Real_t y_local[8] ;
Real_t z_local[8] ;
Real_t xd_local[8] ;
Real_t yd_local[8] ;
Real_t zd_local[8] ;
Real_t D[6];
int k=blockDim.x*blockIdx.x+threadIdx.x;
if ( k < num_threads) {
Real_t volume ;
Real_t relativeVolume ;
// get nodal coordinates from global arrays and copy into local arrays.
//#pragma unroll
//for( Index_t lnode=0 ; lnode<8 ; ++lnode )
//{
// Index_t gnode = nodelist[k+lnode*padded_numElem];
// nodes[lnode] = gnode;
// x_local[lnode] = x[gnode];
// y_local[lnode] = y[gnode];
// z_local[lnode] = z[gnode];
//}
#pragma unroll
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
{
Index_t gnode = nodelist[k+lnode*padded_numElem];
nodes[lnode] = gnode;
}
#pragma unroll
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
x_local[lnode] = x[nodes[lnode]];
#pragma unroll
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
y_local[lnode] = y[nodes[lnode]];
#pragma unroll
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
z_local[lnode] = z[nodes[lnode]];
// volume calculations
volume = CalcElemVolume(x_local, y_local, z_local );
relativeVolume = volume / volo[k] ;
vnew[k] = relativeVolume ;
delv[k] = relativeVolume - v[k] ;
// set characteristic length
arealg[k] = CalcElemCharacteristicLength(x_local,y_local,z_local,volume);
// get nodal velocities from global array and copy into local arrays.
#pragma unroll
for( Index_t lnode=0 ; lnode<8 ; ++lnode )
{
Index_t gnode = nodes[lnode];
xd_local[lnode] = xd[gnode];
yd_local[lnode] = yd[gnode];
zd_local[lnode] = zd[gnode];
}
Real_t dt2 = Real_t(0.5) * dt;
#pragma unroll
for ( Index_t j=0 ; j<8 ; ++j )
{
x_local[j] -= dt2 * xd_local[j];
y_local[j] -= dt2 * yd_local[j];
z_local[j] -= dt2 * zd_local[j];
}
Real_t detJ;
CalcElemShapeFunctionDerivatives(x_local,y_local,z_local,B,&detJ );
CalcElemVelocityGradient(xd_local,yd_local,zd_local,B,detJ,D);
// ------------------------
// CALC LAGRANGE ELEM 2
// ------------------------
// calc strain rate and apply as constraint (only done in FB element)
Real_t vdovNew = D[0] + D[1] + D[2];
Real_t vdovthird = vdovNew/Real_t(3.0) ;
// make the rate of deformation tensor deviatoric
vdov[k] = vdovNew ;
dxx[k] = D[0] - vdovthird ;
dyy[k] = D[1] - vdovthird ;
dzz[k] = D[2] - vdovthird ;
// ------------------------
// CALC MONOTONIC Q GRADIENT
// ------------------------
Real_t vol = volo[k]*vnew[k];
// Undo x_local update
#pragma unroll
for ( Index_t j=0 ; j<8 ; ++j ) {
x_local[j] += dt2 * xd_local[j];
y_local[j] += dt2 * yd_local[j];
z_local[j] += dt2 * zd_local[j];
}
CalcMonoGradient(x_local,y_local,z_local,xd_local,yd_local,zd_local,
vol,
&delx_zeta[k],&delv_zeta[k],&delx_xi[k],
&delv_xi[k], &delx_eta[k], &delv_eta[k]);
//Check for bad volume
if (relativeVolume < 0)
*bad_vol = k;
}
}/*}}}*/
static inline
void CalcKinematicsAndMonotonicQGradient(Domain *domain)
{/*{{{*/
Index_t numElem = domain->numElem ;
Index_t padded_numElem = domain->padded_numElem;
int num_threads = numElem;
const int block_size = 64;
int dimGrid = PAD_DIV(num_threads,block_size);
CalcKinematicsAndMonotonicQGradient_kernel<<<dimGrid,block_size>>>
( numElem,padded_numElem, domain->deltatime_h,
domain->nodelist.raw(),
domain->volo.raw(),
domain->v.raw(),
domain->x.raw(), domain->y.raw(), domain->z.raw(), domain->xd.raw(), domain->yd.raw(), domain->zd.raw(),
domain->vnew->raw(),
domain->delv.raw(),
domain->arealg.raw(),
domain->dxx->raw(),
domain->dyy->raw(),
domain->dzz->raw(),
domain->vdov.raw(),
domain->delx_zeta->raw(),
domain->delv_zeta->raw(),
domain->delx_xi->raw(),
domain->delv_xi->raw(),
domain->delx_eta->raw(),
domain->delv_eta->raw(),
domain->bad_vol_h,
num_threads
);
//cudaDeviceSynchronize();
//cudaCheckError();
}/*}}}*/
__global__
#ifdef DOUBLE_PRECISION
__launch_bounds__(128,16)
#else
__launch_bounds__(128,16)
#endif
void CalcMonotonicQRegionForElems_kernel
(/*{{{*/
Real_t qlc_monoq,
Real_t qqc_monoq,
Real_t monoq_limiter_mult,
Real_t monoq_max_slope,
Real_t ptiny,
// the elementset length
Index_t elength,
Index_t* regElemlist,
// const Index_t* __restrict__ regElemlist,
Index_t *elemBC,
Index_t *lxim,
Index_t *lxip,
Index_t *letam,
Index_t *letap,
Index_t *lzetam,
Index_t *lzetap,
Real_t *delv_xi,
Real_t *delv_eta,
Real_t *delv_zeta,
Real_t *delx_xi,
Real_t *delx_eta,
Real_t *delx_zeta,
Real_t *vdov,Real_t *elemMass,Real_t *volo,Real_t *vnew,
Real_t *qq, Real_t *ql,
Real_t *q,
Real_t qstop,
Index_t* bad_q
)/*}}}*/
{/*{{{*/
int ielem=blockDim.x*blockIdx.x + threadIdx.x;
if (ielem<elength) {
Real_t qlin, qquad ;
Real_t phixi, phieta, phizeta ;
Index_t i = regElemlist[ielem];
Int_t bcMask = elemBC[i] ;
Real_t delvm, delvp ;
/* phixi */
Real_t norm = Real_t(1.) / ( delv_xi[i] + ptiny ) ;
switch (bcMask & XI_M) {
case XI_M_COMM: /* needs comm data */
case 0: delvm = delv_xi[lxim[i]] ; break ;
case XI_M_SYMM: delvm = delv_xi[i] ; break ;
case XI_M_FREE: delvm = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
switch (bcMask & XI_P) {
case XI_P_COMM: /* needs comm data */
case 0: delvp = delv_xi[lxip[i]] ; break ;
case XI_P_SYMM: delvp = delv_xi[i] ; break ;
case XI_P_FREE: delvp = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
delvm = delvm * norm ;
delvp = delvp * norm ;
phixi = Real_t(.5) * ( delvm + delvp ) ;
delvm *= monoq_limiter_mult ;
delvp *= monoq_limiter_mult ;
if ( delvm < phixi ) phixi = delvm ;
if ( delvp < phixi ) phixi = delvp ;
if ( phixi < Real_t(0.)) phixi = Real_t(0.) ;
if ( phixi > monoq_max_slope) phixi = monoq_max_slope;
/* phieta */
norm = Real_t(1.) / ( delv_eta[i] + ptiny ) ;
switch (bcMask & ETA_M) {
case ETA_M_COMM: /* needs comm data */
case 0: delvm = delv_eta[letam[i]] ; break ;
case ETA_M_SYMM: delvm = delv_eta[i] ; break ;
case ETA_M_FREE: delvm = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
switch (bcMask & ETA_P) {
case ETA_P_COMM: /* needs comm data */
case 0: delvp = delv_eta[letap[i]] ; break ;
case ETA_P_SYMM: delvp = delv_eta[i] ; break ;
case ETA_P_FREE: delvp = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
delvm = delvm * norm ;
delvp = delvp * norm ;
phieta = Real_t(.5) * ( delvm + delvp ) ;
delvm *= monoq_limiter_mult ;
delvp *= monoq_limiter_mult ;
if ( delvm < phieta ) phieta = delvm ;
if ( delvp < phieta ) phieta = delvp ;
if ( phieta < Real_t(0.)) phieta = Real_t(0.) ;
if ( phieta > monoq_max_slope) phieta = monoq_max_slope;
/* phizeta */
norm = Real_t(1.) / ( delv_zeta[i] + ptiny ) ;
switch (bcMask & ZETA_M) {
case ZETA_M_COMM: /* needs comm data */
case 0: delvm = delv_zeta[lzetam[i]] ; break ;
case ZETA_M_SYMM: delvm = delv_zeta[i] ; break ;
case ZETA_M_FREE: delvm = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
switch (bcMask & ZETA_P) {
case ZETA_P_COMM: /* needs comm data */
case 0: delvp = delv_zeta[lzetap[i]] ; break ;
case ZETA_P_SYMM: delvp = delv_zeta[i] ; break ;
case ZETA_P_FREE: delvp = Real_t(0.0) ; break ;
default: /* ERROR */ ; break ;
}
delvm = delvm * norm ;
delvp = delvp * norm ;
phizeta = Real_t(.5) * ( delvm + delvp ) ;
delvm *= monoq_limiter_mult ;
delvp *= monoq_limiter_mult ;
if ( delvm < phizeta ) phizeta = delvm ;
if ( delvp < phizeta ) phizeta = delvp ;
if ( phizeta < Real_t(0.)) phizeta = Real_t(0.);
if ( phizeta > monoq_max_slope ) phizeta = monoq_max_slope;
/* Remove length scale */
if ( vdov[i] > Real_t(0.) ) {
qlin = Real_t(0.) ;
qquad = Real_t(0.) ;
}
else {
Real_t delvxxi = delv_xi[i] * delx_xi[i] ;
Real_t delvxeta = delv_eta[i] * delx_eta[i] ;
Real_t delvxzeta = delv_zeta[i] * delx_zeta[i] ;
if ( delvxxi > Real_t(0.) ) delvxxi = Real_t(0.) ;
if ( delvxeta > Real_t(0.) ) delvxeta = Real_t(0.) ;
if ( delvxzeta > Real_t(0.) ) delvxzeta = Real_t(0.) ;
Real_t rho = elemMass[i] / (volo[i] * vnew[i]) ;
qlin = -qlc_monoq * rho *
( delvxxi * (Real_t(1.) - phixi) +
delvxeta * (Real_t(1.) - phieta) +
delvxzeta * (Real_t(1.) - phizeta) ) ;
qquad = qqc_monoq * rho *
( delvxxi*delvxxi * (Real_t(1.) - phixi*phixi) +
delvxeta*delvxeta * (Real_t(1.) - phieta*phieta) +
delvxzeta*delvxzeta * (Real_t(1.) - phizeta*phizeta) ) ;
}
qq[i] = qquad ;
ql[i] = qlin ;
// Don't allow excessive artificial viscosity
if (q[i] > qstop)
*(bad_q) = i;
}
}/*}}}*/
static inline
void CalcMonotonicQRegionForElems(Domain *domain)
{/*{{{*/
const Real_t ptiny = Real_t(1.e-36) ;
Real_t monoq_max_slope = domain->monoq_max_slope ;
Real_t monoq_limiter_mult = domain->monoq_limiter_mult ;
Real_t qlc_monoq = domain->qlc_monoq;
Real_t qqc_monoq = domain->qqc_monoq;
Index_t elength = domain->numElem;
Index_t dimBlock= 128;
Index_t dimGrid = PAD_DIV(elength,dimBlock);
CalcMonotonicQRegionForElems_kernel<<<dimGrid,dimBlock>>>
( qlc_monoq,qqc_monoq,monoq_limiter_mult,monoq_max_slope,ptiny,elength,
domain->regElemlist.raw(),domain->elemBC.raw(),
domain->lxim.raw(),domain->lxip.raw(),
domain->letam.raw(),domain->letap.raw(),
domain->lzetam.raw(),domain->lzetap.raw(),
domain->delv_xi->raw(),domain->delv_eta->raw(),domain->delv_zeta->raw(),
domain->delx_xi->raw(),domain->delx_eta->raw(),domain->delx_zeta->raw(),
domain->vdov.raw(),domain->elemMass.raw(),domain->volo.raw(),domain->vnew->raw(),
domain->qq.raw(),domain->ql.raw(),
domain->q.raw(),
domain->qstop,
domain->bad_q_h
);
//cudaDeviceSynchronize();
//cudaCheckError();
}/*}}}*/
static
__device__ __forceinline__
void CalcPressureForElems_device(
Real_t& p_new, Real_t& bvc,
Real_t& pbvc, Real_t& e_old,
Real_t& compression, Real_t& vnewc,
Real_t pmin,
Real_t p_cut, Real_t eosvmax)
{/*{{{*/
Real_t c1s = Real_t(2.0)/Real_t(3.0);
Real_t p_temp = p_new;
bvc = c1s * (compression + Real_t(1.));
pbvc = c1s;
p_temp = bvc * e_old ;
if ( FABS(p_temp) < p_cut )
p_temp = Real_t(0.0) ;
if ( vnewc >= eosvmax ) /* impossible condition here? */
p_temp = Real_t(0.0) ;
if (p_temp < pmin)
p_temp = pmin ;
p_new = p_temp;
}/*}}}*/
static
__device__ __forceinline__
void CalcSoundSpeedForElems_device(Real_t& vnewc, Real_t rho0, Real_t &enewc,
Real_t &pnewc, Real_t &pbvc,
Real_t &bvc, Real_t ss4o3, Index_t nz,
Real_t *ss, Index_t iz)
{/*{{{*/
Real_t ssTmp = (pbvc * enewc + vnewc * vnewc *
bvc * pnewc) / rho0;
if (ssTmp <= Real_t(.1111111e-36)) {
ssTmp = Real_t(.3333333e-18);
}
else {
ssTmp = SQRT(ssTmp) ;
}
ss[iz] = ssTmp;
}/*}}}*/
static
__device__
__forceinline__
void ApplyMaterialPropertiesForElems_device(
Real_t& eosvmin, Real_t& eosvmax,
Real_t* vnew, Real_t *v,
Real_t& vnewc, Index_t* bad_vol, Index_t zn)
{/*{{{*/
vnewc = vnew[zn] ;
if (eosvmin != Real_t(0.)) {
if (vnewc < eosvmin)
vnewc = eosvmin ;
}
if (eosvmax != Real_t(0.)) {
if (vnewc > eosvmax)
vnewc = eosvmax ;
}
// Now check for valid volume
Real_t vc = v[zn];
if (eosvmin != Real_t(0.)) {
if (vc < eosvmin)
vc = eosvmin ;
}
if (eosvmax != Real_t(0.)) {
if (vc > eosvmax)
vc = eosvmax ;
}
if (vc <= 0.) {
*bad_vol = zn;
}
}/*}}}*/
static
__device__
__forceinline__
void UpdateVolumesForElems_device(Index_t numElem, Real_t& v_cut,
Real_t *vnew,
Real_t *v,
int i)
{/*{{{*/
Real_t tmpV ;
tmpV = vnew[i] ;
if ( FABS(tmpV - Real_t(1.0)) < v_cut )
tmpV = Real_t(1.0) ;
v[i] = tmpV ;
}/*}}}*/
static
__device__
__forceinline__
void CalcEnergyForElems_device(Real_t& p_new, Real_t& e_new, Real_t& q_new,
Real_t& bvc, Real_t& pbvc,
Real_t& p_old, Real_t& e_old, Real_t& q_old,
Real_t& compression, Real_t& compHalfStep,
Real_t& vnewc, Real_t& work, Real_t& delvc, Real_t pmin,
Real_t p_cut, Real_t e_cut, Real_t q_cut, Real_t emin,
Real_t& qq, Real_t& ql,
Real_t& rho0,
Real_t& eosvmax,
Index_t length)
{/*{{{*/
const Real_t sixth = Real_t(1.0) / Real_t(6.0) ;
Real_t pHalfStep;
e_new = e_old - Real_t(0.5) * delvc * (p_old + q_old)
+ Real_t(0.5) * work;
if (e_new < emin ) {
e_new = emin ;
}
CalcPressureForElems_device(pHalfStep, bvc, pbvc, e_new, compHalfStep, vnewc,
pmin, p_cut, eosvmax);
Real_t vhalf = Real_t(1.) / (Real_t(1.) + compHalfStep) ;
if ( delvc > Real_t(0.) ) {
q_new = Real_t(0.) ;
}
else {
Real_t ssc = ( pbvc * e_new
+ vhalf * vhalf * bvc * pHalfStep ) / rho0 ;
if ( ssc <= Real_t(.1111111e-36) ) {
ssc =Real_t(.3333333e-18) ;
} else {
ssc = SQRT(ssc) ;
}
q_new = (ssc*ql + qq) ;
}
e_new = e_new + Real_t(0.5) * delvc
* ( Real_t(3.0)*(p_old + q_old)
- Real_t(4.0)*(pHalfStep + q_new)) ;
e_new += Real_t(0.5) * work;
if (FABS(e_new) < e_cut) {
e_new = Real_t(0.) ;
}
if ( e_new < emin ) {
e_new = emin ;
}
CalcPressureForElems_device(p_new, bvc, pbvc, e_new, compression, vnewc,
pmin, p_cut, eosvmax);
Real_t q_tilde ;
if (delvc > Real_t(0.)) {
q_tilde = Real_t(0.) ;
}
else {
Real_t ssc = ( pbvc * e_new
+ vnewc * vnewc * bvc * p_new ) / rho0 ;
if ( ssc <= Real_t(.1111111e-36) ) {
ssc = Real_t(.3333333e-18) ;
} else {
ssc = SQRT(ssc) ;
}
q_tilde = (ssc*ql + qq) ;
}
e_new = e_new - ( Real_t(7.0)*(p_old + q_old)
- Real_t(8.0)*(pHalfStep + q_new)
+ (p_new + q_tilde)) * delvc*sixth ;
if (FABS(e_new) < e_cut) {
e_new = Real_t(0.) ;
}
if ( e_new < emin ) {
e_new = emin ;
}
CalcPressureForElems_device(p_new, bvc, pbvc, e_new, compression, vnewc,
pmin, p_cut, eosvmax);
if ( delvc <= Real_t(0.) ) {
Real_t ssc = ( pbvc * e_new
+ vnewc * vnewc * bvc * p_new ) / rho0 ;
if ( ssc <= Real_t(.1111111e-36) ) {
ssc = Real_t(.3333333e-18) ;
} else {
ssc = SQRT(ssc) ;
}
q_new = (ssc*ql + qq) ;
if (FABS(q_new) < q_cut) q_new = Real_t(0.) ;
}
return ;
}/*}}}*/
__device__ inline
Index_t giveMyRegion(const Index_t* regCSR,const Index_t i, const Index_t numReg)
{/*{{{*/
for(Index_t reg = 0; reg < numReg-1; reg++)
if(i < regCSR[reg])
return reg;
return (numReg-1);
}/*}}}*/
__global__
void ApplyMaterialPropertiesAndUpdateVolume_kernel
(/*{{{*/
Index_t length,
Real_t rho0,
Real_t e_cut,
Real_t emin,
Real_t* __restrict__ ql,
Real_t* __restrict__ qq,
Real_t* __restrict__ vnew,
Real_t* __restrict__ v,
Real_t pmin,
Real_t p_cut,
Real_t q_cut,
Real_t eosvmin,
Real_t eosvmax,
Index_t* __restrict__ regElemlist,
// const Index_t* __restrict__ regElemlist,
Real_t* __restrict__ e,
Real_t* __restrict__ delv,
Real_t* __restrict__ p,
Real_t* __restrict__ q,
Real_t ss4o3,
Real_t* __restrict__ ss,
Real_t v_cut,
Index_t* __restrict__ bad_vol,
const Int_t cost,
const Index_t* regCSR,
const Index_t* regReps,
const Index_t numReg
)/*}}}*/
{/*{{{*/
Real_t e_old, delvc, p_old, q_old, e_temp, delvc_temp, p_temp, q_temp;
Real_t compression, compHalfStep;
Real_t qq_old, ql_old, qq_temp, ql_temp, work;
Real_t p_new, e_new, q_new;
Real_t bvc, pbvc, vnewc;
Index_t i=blockDim.x*blockIdx.x + threadIdx.x;
if (i<length) {
Index_t zidx = regElemlist[i] ;
ApplyMaterialPropertiesForElems_device
(eosvmin,eosvmax,vnew,v,vnewc,bad_vol,zidx);
/********************** Start EvalEOSForElems **************************/
// Here we need to find out what region this element belongs to and what is the rep value!
Index_t region = giveMyRegion(regCSR,i,numReg);
Index_t rep = regReps[region];
e_temp = e[zidx];
p_temp = p[zidx];
q_temp = q[zidx];
qq_temp = qq[zidx] ;
ql_temp = ql[zidx] ;
delvc_temp = delv[zidx];
for(int r=0; r < rep; r++)
{
e_old = e_temp;
p_old = p_temp;
q_old = q_temp;
qq_old = qq_temp;
ql_old = ql_temp;
delvc = delvc_temp;
work = Real_t(0.);
Real_t vchalf ;
compression = Real_t(1.) / vnewc - Real_t(1.);
vchalf = vnewc - delvc * Real_t(.5);
compHalfStep = Real_t(1.) / vchalf - Real_t(1.);
if ( eosvmin != Real_t(0.) ) {
if (vnewc <= eosvmin) { /* impossible due to calling func? */
compHalfStep = compression ;
}
}
if ( eosvmax != Real_t(0.) ) {
if (vnewc >= eosvmax) { /* impossible due to calling func? */
p_old = Real_t(0.) ;
compression = Real_t(0.) ;
compHalfStep = Real_t(0.) ;
}
}
// qq_old = qq[zidx] ;
// ql_old = ql[zidx] ;
// work = Real_t(0.) ;
CalcEnergyForElems_device(p_new, e_new, q_new, bvc, pbvc,
p_old, e_old, q_old, compression, compHalfStep,
vnewc, work, delvc, pmin,
p_cut, e_cut, q_cut, emin,
qq_old, ql_old, rho0, eosvmax, length);
}//end for rep
p[zidx] = p_new ;
e[zidx] = e_new ;
q[zidx] = q_new ;
CalcSoundSpeedForElems_device
(vnewc,rho0,e_new,p_new,pbvc,bvc,ss4o3,length,ss,zidx);
/********************** End EvalEOSForElems **************************/
UpdateVolumesForElems_device(length,v_cut,vnew,v,zidx);
}
}/*}}}*/
static inline
void ApplyMaterialPropertiesAndUpdateVolume(Domain *domain)
{/*{{{*/
Index_t length = domain->numElem ;
if (length != 0) {
Index_t dimBlock = 128;
Index_t dimGrid = PAD_DIV(length,dimBlock);
ApplyMaterialPropertiesAndUpdateVolume_kernel<<<dimGrid,dimBlock>>>
(length,
domain->refdens,
domain->e_cut,
domain->emin,
domain->ql.raw(),
domain->qq.raw(),
domain->vnew->raw(),
domain->v.raw(),
domain->pmin,
domain->p_cut,
domain->q_cut,
domain->eosvmin,
domain->eosvmax,
domain->regElemlist.raw(),
domain->e.raw(),
domain->delv.raw(),
domain->p.raw(),
domain->q.raw(),
domain->ss4o3,
domain->ss.raw(),
domain->v_cut,
domain->bad_vol_h,
domain->cost,
domain->regCSR.raw(),
domain->regReps.raw(),
domain->numReg
);
//cudaDeviceSynchronize();
//cudaCheckError();
}
}/*}}}*/
static inline
void LagrangeElements(Domain *domain)
{/*{{{*/
int allElem = domain->numElem + /* local elem */
2*domain->sizeX*domain->sizeY + /* plane ghosts */
2*domain->sizeX*domain->sizeZ + /* row ghosts */
2*domain->sizeY*domain->sizeZ ; /* col ghosts */
domain->vnew = Allocator< Vector_d<Real_t> >::allocate(domain->numElem);
domain->dxx = Allocator< Vector_d<Real_t> >::allocate(domain->numElem);
domain->dyy = Allocator< Vector_d<Real_t> >::allocate(domain->numElem);
domain->dzz = Allocator< Vector_d<Real_t> >::allocate(domain->numElem);
domain->delx_xi = Allocator< Vector_d<Real_t> >::allocate(domain->numElem);
domain->delx_eta = Allocator< Vector_d<Real_t> >::allocate(domain->numElem);
domain->delx_zeta = Allocator< Vector_d<Real_t> >::allocate(domain->numElem);
domain->delv_xi = Allocator< Vector_d<Real_t> >::allocate(allElem);
domain->delv_eta = Allocator< Vector_d<Real_t> >::allocate(allElem);
domain->delv_zeta = Allocator< Vector_d<Real_t> >::allocate(allElem);
#if USE_MPI
CommRecv(*domain, MSG_MONOQ, 3,
domain->sizeX, domain->sizeY, domain->sizeZ,
true, true) ;
#endif
/*********************************************/
/* Calc Kinematics and Monotic Q Gradient */
/*********************************************/
CalcKinematicsAndMonotonicQGradient(domain);
#if USE_MPI
Domain_member fieldData[3] ;
// initialize pointers
domain->d_delv_xi = domain->delv_xi->raw();
domain->d_delv_eta = domain->delv_eta->raw();
domain->d_delv_zeta = domain->delv_zeta->raw();
fieldData[0] = &Domain::get_delv_xi ;
fieldData[1] = &Domain::get_delv_eta ;
fieldData[2] = &Domain::get_delv_zeta ;
CommSendGpu(*domain, MSG_MONOQ, 3, fieldData,
domain->sizeX, domain->sizeY, domain->sizeZ,
true, true, domain->streams[2]) ;
CommMonoQGpu(*domain, domain->streams[2]) ;
#endif
Allocator<Vector_d<Real_t> >::free(domain->dxx,domain->numElem);
Allocator<Vector_d<Real_t> >::free(domain->dyy,domain->numElem);
Allocator<Vector_d<Real_t> >::free(domain->dzz,domain->numElem);
/**********************************
* Calc Monotic Q Region
**********************************/
CalcMonotonicQRegionForElems(domain);
Allocator<Vector_d<Real_t> >::free(domain->delx_xi,domain->numElem);
Allocator<Vector_d<Real_t> >::free(domain->delx_eta,domain->numElem);
Allocator<Vector_d<Real_t> >::free(domain->delx_zeta,domain->numElem);
Allocator<Vector_d<Real_t> >::free(domain->delv_xi,allElem);
Allocator<Vector_d<Real_t> >::free(domain->delv_eta,allElem);
Allocator<Vector_d<Real_t> >::free(domain->delv_zeta,allElem);
// printf("\n --Start of ApplyMaterials! \n");
ApplyMaterialPropertiesAndUpdateVolume(domain) ;
// printf("\n --End of ApplyMaterials! \n");
Allocator<Vector_d<Real_t> >::free(domain->vnew,domain->numElem);
}/*}}}*/
template<int block_size>
__global__
#ifdef DOUBLE_PRECISION
__launch_bounds__(128,16)
#else
__launch_bounds__(128,16)
#endif
void CalcTimeConstraintsForElems_kernel(
Index_t length,
Real_t qqc2,
Real_t dvovmax,
Index_t *matElemlist,
Real_t *ss,
Real_t *vdov,
Real_t *arealg,
Real_t *dev_mindtcourant,
Real_t *dev_mindthydro)
{/*{{{*/
int tid = threadIdx.x;
int i=blockDim.x*blockIdx.x + tid;
__shared__ volatile Real_t s_mindthydro[block_size];
__shared__ volatile Real_t s_mindtcourant[block_size];
Real_t mindthydro = Real_t(1.0e+20) ;
Real_t mindtcourant = Real_t(1.0e+20) ;
Real_t dthydro = mindthydro;
Real_t dtcourant = mindtcourant;
while (i<length) {
Index_t indx = matElemlist[i] ;
Real_t vdov_tmp = vdov[indx];
// Computing dt_hydro
if (vdov_tmp != Real_t(0.)) {
Real_t dtdvov = dvovmax / (FABS(vdov_tmp)+Real_t(1.e-20)) ;
if ( dthydro > dtdvov ) {
dthydro = dtdvov ;
}
}
if (dthydro < mindthydro)
mindthydro = dthydro;
// Computing dt_courant
Real_t ss_tmp = ss[indx];
Real_t area_tmp = arealg[indx];
Real_t dtf = ss_tmp * ss_tmp ;
dtf += ((vdov_tmp < 0.) ? qqc2*area_tmp*area_tmp*vdov_tmp*vdov_tmp : 0.);
dtf = area_tmp / SQRT(dtf) ;
/* determine minimum timestep with its corresponding elem */
if (vdov_tmp != Real_t(0.) && dtf < dtcourant) {
dtcourant = dtf ;
}
if (dtcourant< mindtcourant)
mindtcourant= dtcourant;
i += gridDim.x*blockDim.x;
}
s_mindthydro[tid] = mindthydro;
s_mindtcourant[tid] = mindtcourant;
__syncthreads();
// Do shared memory reduction
if (block_size >= 1024) {
if (tid < 512) {
s_mindthydro[tid] = min( s_mindthydro[tid] , s_mindthydro[tid + 512]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 512]) ; }
__syncthreads(); }
if (block_size >= 512) {
if (tid < 256) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 256]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 256]) ; }
__syncthreads(); }
if (block_size >= 256) {
if (tid < 128) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 128]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 128]) ; }
__syncthreads(); }
if (block_size >= 128) {
if (tid < 64) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 64]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 64]) ; }
__syncthreads(); }
if (tid < 32) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 32]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 32]) ;
}
if (tid < 16) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 16]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 16]) ;
}
if (tid < 8) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 8]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 8]) ;
}
if (tid < 4) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 4]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 4]) ;
}
if (tid < 2) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 2]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 2]) ;
}
if (tid < 1) {
s_mindthydro[tid] = min( s_mindthydro[tid], s_mindthydro[tid + 1]) ;
s_mindtcourant[tid] = min( s_mindtcourant[tid], s_mindtcourant[tid + 1]) ;
}
// Store in global memory
if (tid==0) {
dev_mindtcourant[blockIdx.x] = s_mindtcourant[0];
dev_mindthydro[blockIdx.x] = s_mindthydro[0];
}
}/*}}}*/
template <int block_size>
__global__
void CalcMinDtOneBlock(Real_t* dev_mindthydro, Real_t* dev_mindtcourant, Real_t* dtcourant, Real_t* dthydro, Index_t shared_array_size)
{/*{{{*/
volatile __shared__ Real_t s_data[block_size];
int tid = threadIdx.x;
if (blockIdx.x==0)
{
if (tid < shared_array_size)
s_data[tid] = dev_mindtcourant[tid];
else
s_data[tid] = 1.0e20;
__syncthreads();
if (block_size >= 1024) { if (tid < 512) { s_data[tid] = min(s_data[tid],s_data[tid + 512]); } __syncthreads(); }
if (block_size >= 512) { if (tid < 256) { s_data[tid] = min(s_data[tid],s_data[tid + 256]); } __syncthreads(); }
if (block_size >= 256) { if (tid < 128) { s_data[tid] = min(s_data[tid],s_data[tid + 128]); } __syncthreads(); }
if (block_size >= 128) { if (tid < 64) { s_data[tid] = min(s_data[tid],s_data[tid + 64]); } __syncthreads(); }
if (tid < 32) { s_data[tid] = min(s_data[tid],s_data[tid + 32]); }
if (tid < 16) { s_data[tid] = min(s_data[tid],s_data[tid + 16]); }
if (tid < 8) { s_data[tid] = min(s_data[tid],s_data[tid + 8]); }
if (tid < 4) { s_data[tid] = min(s_data[tid],s_data[tid + 4]); }
if (tid < 2) { s_data[tid] = min(s_data[tid],s_data[tid + 2]); }
if (tid < 1) { s_data[tid] = min(s_data[tid],s_data[tid + 1]); }
if (tid<1)
{
*(dtcourant)= s_data[0];
}
}
else if (blockIdx.x==1)
{
if (tid < shared_array_size)
s_data[tid] = dev_mindthydro[tid];
else
s_data[tid] = 1.0e20;
__syncthreads();
if (block_size >= 1024) { if (tid < 512) { s_data[tid] = min(s_data[tid],s_data[tid + 512]); } __syncthreads(); }
if (block_size >= 512) { if (tid < 256) { s_data[tid] = min(s_data[tid],s_data[tid + 256]); } __syncthreads(); }
if (block_size >= 256) { if (tid < 128) { s_data[tid] = min(s_data[tid],s_data[tid + 128]); } __syncthreads(); }
if (block_size >= 128) { if (tid < 64) { s_data[tid] = min(s_data[tid],s_data[tid + 64]); } __syncthreads(); }
if (tid < 32) { s_data[tid] = min(s_data[tid],s_data[tid + 32]); }
if (tid < 16) { s_data[tid] = min(s_data[tid],s_data[tid + 16]); }
if (tid < 8) { s_data[tid] = min(s_data[tid],s_data[tid + 8]); }
if (tid < 4) { s_data[tid] = min(s_data[tid],s_data[tid + 4]); }
if (tid < 2) { s_data[tid] = min(s_data[tid],s_data[tid + 2]); }
if (tid < 1) { s_data[tid] = min(s_data[tid],s_data[tid + 1]); }
if (tid<1)
{
*(dthydro) = s_data[0];
}
}
}/*}}}*/
static inline
void CalcTimeConstraintsForElems(Domain* domain)
{/*{{{*/
Real_t qqc = domain->qqc;
Real_t qqc2 = Real_t(64.0) * qqc * qqc ;
Real_t dvovmax = domain->dvovmax ;
const Index_t length = domain->numElem;
const int max_dimGrid = 1024;
const int dimBlock = 128;
int dimGrid=std::min(max_dimGrid,PAD_DIV(length,dimBlock));
cudaFuncSetCacheConfig(CalcTimeConstraintsForElems_kernel<dimBlock>, cudaFuncCachePreferShared);
Vector_d<Real_t>* dev_mindtcourant= Allocator< Vector_d<Real_t> >::allocate(dimGrid);
Vector_d<Real_t>* dev_mindthydro = Allocator< Vector_d<Real_t> >::allocate(dimGrid);
CalcTimeConstraintsForElems_kernel<dimBlock> <<<dimGrid,dimBlock>>>
(length,qqc2,dvovmax,
domain->matElemlist.raw(),domain->ss.raw(),domain->vdov.raw(),domain->arealg.raw(),
dev_mindtcourant->raw(),dev_mindthydro->raw());
// TODO: if dimGrid < 1024, should launch less threads
CalcMinDtOneBlock<max_dimGrid> <<<2,max_dimGrid, max_dimGrid*sizeof(Real_t), domain->streams[1]>>>(dev_mindthydro->raw(),dev_mindtcourant->raw(),domain->dtcourant_h,domain->dthydro_h, dimGrid);
cudaEventRecord(domain->time_constraint_computed,domain->streams[1]);
Allocator<Vector_d<Real_t> >::free(dev_mindtcourant,dimGrid);
Allocator<Vector_d<Real_t> >::free(dev_mindthydro,dimGrid);
}/*}}}*/
static inline
void LagrangeLeapFrog(Domain* domain)
{/*{{{*/
/* calculate nodal forces, accelerations, velocities, positions, with
* applied boundary conditions and slide surface considerations */
LagrangeNodal(domain);
/* calculate element quantities (i.e. velocity gradient & q), and update
* material states */
LagrangeElements(domain);
CalcTimeConstraintsForElems(domain);
}/*}}}*/
void printUsage(char* argv[])
{/*{{{*/
printf("Usage: \n");
printf("Unstructured grid: %s -u <file.lmesh> \n", argv[0]) ;
printf("Structured grid: %s -s numEdgeElems \n", argv[0]) ;
printf("\nExamples:\n") ;
printf("%s -s 45\n", argv[0]) ;
printf("%s -u sedov15oct.lmesh\n", argv[0]) ;
}/*}}}*/
#ifdef SAMI
#ifdef __cplusplus
extern "C" {
#endif
#include "silo.h"
#ifdef __cplusplus
}
#endif
#define MAX_LEN_SAMI_HEADER 10
#define SAMI_HDR_NUMBRICK 0
#define SAMI_HDR_NUMNODES 3
#define SAMI_HDR_NUMMATERIAL 4
#define SAMI_HDR_INDEX_START 6
#define SAMI_HDR_MESHDIM 7
#define MAX_ADJACENCY 14 /* must be 14 or greater */
void DumpSAMI(Domain *domain, char *name)
{/*{{{*/
DBfile *fp ;
int headerLen = MAX_LEN_SAMI_HEADER ;
int headerInfo[MAX_LEN_SAMI_HEADER];
char varName[] = "brick_nd0";
char coordName[] = "x";
int version = 121 ;
int numElem = int(domain->numElem) ;
int numNode = int(domain->numNode) ;
int count ;
int *materialID ;
int *nodeConnect ;
double *nodeCoord ;
if ((fp = DBCreate(name, DB_CLOBBER, DB_LOCAL,
NULL, DB_PDB)) == NULL)
{
printf("Couldn't create file %s\n", name) ;
exit(1);
}
for (int i=0; i<MAX_LEN_SAMI_HEADER; ++i) {
headerInfo[i] = 0 ;
}
headerInfo[SAMI_HDR_NUMBRICK] = numElem ;
headerInfo[SAMI_HDR_NUMNODES] = numNode ;
headerInfo[SAMI_HDR_NUMMATERIAL] = 1 ;
headerInfo[SAMI_HDR_INDEX_START] = 1 ;
headerInfo[SAMI_HDR_MESHDIM] = 3 ;
DBWrite(fp, "mesh_data", headerInfo, &headerLen, 1, DB_INT) ;
count = 1 ;
DBWrite(fp, "version", &version, &count, 1, DB_INT) ;
nodeConnect = new int[numElem] ;
Vector_h<Index_t> nodelist_h = domain->nodelist;
for (Index_t i=0; i<8; ++i)
{
for (Index_t j=0; j<numElem; ++j) {
nodeConnect[j] = int(nodelist_h[i*domain->padded_numElem + j]) + 1 ;
}
varName[8] = '0' + i;
DBWrite(fp, varName, nodeConnect, &numElem, 1, DB_INT) ;
}
delete [] nodeConnect ;
nodeCoord = new double[numNode] ;
Vector_h<Real_t> x_h = domain->x;
Vector_h<Real_t> y_h = domain->y;
Vector_h<Real_t> z_h = domain->z;
for (Index_t i=0; i<3; ++i)
{
for (Index_t j=0; j<numNode; ++j) {
Real_t coordVal ;
switch(i) {
case 0: coordVal = double(x_h[j]) ; break ;
case 1: coordVal = double(y_h[j]) ; break ;
case 2: coordVal = double(z_h[j]) ; break ;
}
nodeCoord[j] = coordVal ;
}
coordName[0] = 'x' + i ;
DBWrite(fp, coordName, nodeCoord, &numNode, 1, DB_DOUBLE) ;
}
delete [] nodeCoord ;
materialID = new int[numElem] ;
for (Index_t i=0; i<numElem; ++i)
materialID[i] = 1 ;
DBWrite(fp, "brick_material", materialID, &numElem, 1, DB_INT) ;
delete [] materialID ;
DBClose(fp);
}/*}}}*/
#ifdef SAMI
void DumpDomain(Domain *domain)
{/*{{{*/
char meshName[64] ;
printf("Dumping SAMI file\n");
sprintf(meshName, "sedov_%d.sami", int(domain->cycle)) ;
DumpSAMI(domain, meshName) ;
}/*}}}*/
#endif
void write_solution(Domain* locDom)
{/*{{{*/
Vector_h<Real_t> x_h = locDom->x;
Vector_h<Real_t> y_h = locDom->y;
Vector_h<Real_t> z_h = locDom->z;
// printf("Writing solution to file xyz.asc\n");
std::stringstream filename;
filename << "xyz.asc";
FILE *fout = fopen(filename.str().c_str(),"wb");
for (Index_t i=0; i<locDom->numNode; i++) {
fprintf(fout,"%10d\n",i);
fprintf(fout,"%.10f\n",x_h[i]);
fprintf(fout,"%.10f\n",y_h[i]);
fprintf(fout,"%.10f\n",z_h[i]);
}
fclose(fout);
}/*}}}*/
///////////////////////////////////////////////////////////////////////////
void InitMeshDecomp(Int_t numRanks, Int_t myRank,
Int_t *col, Int_t *row, Int_t *plane, Int_t *side)
{/*{{{*/
Int_t testProcs;
Int_t dx, dy, dz;
Int_t myDom;
// Assume cube processor layout for now
testProcs = Int_t(cbrt(Real_t(numRanks))+0.5) ;
if (testProcs*testProcs*testProcs != numRanks) {
printf("Num processors must be a cube of an integer (1, 8, 27, ...)\n") ;
#if USE_MPI
MPI_Abort(MPI_COMM_WORLD, -1) ;
#else
exit(-1);
#endif
}
if (sizeof(Real_t) != 4 && sizeof(Real_t) != 8) {
printf("MPI operations only support float and double right now...\n");
#if USE_MPI
MPI_Abort(MPI_COMM_WORLD, -1) ;
#else
exit(-1);
#endif
}
if (MAX_FIELDS_PER_MPI_COMM > CACHE_COHERENCE_PAD_REAL) {
printf("corner element comm buffers too small. Fix code.\n") ;
#if USE_MPI
MPI_Abort(MPI_COMM_WORLD, -1) ;
#else
exit(-1);
#endif
}
dx = testProcs ;
dy = testProcs ;
dz = testProcs ;
// temporary test
if (dx*dy*dz != numRanks) {
printf("error -- must have as many domains as procs\n") ;
#if USE_MPI
MPI_Abort(MPI_COMM_WORLD, -1) ;
#else
exit(-1);
#endif
}
Int_t remainder = dx*dy*dz % numRanks ;
if (myRank < remainder) {
myDom = myRank*( 1+ (dx*dy*dz / numRanks)) ;
}
else {
myDom = remainder*( 1+ (dx*dy*dz / numRanks)) +
(myRank - remainder)*(dx*dy*dz/numRanks) ;
}
*col = myDom % dx ;
*row = (myDom / dx) % dy ;
*plane = myDom / (dx*dy) ;
*side = testProcs;
return;
}/*}}}*/
void VerifyAndWriteFinalOutput(Real_t elapsed_time,
Domain& locDom,
Int_t its,
Int_t nx,
Int_t numRanks)
{/*{{{*/
size_t free_mem, total_mem, used_mem;
cudaMemGetInfo(&free_mem, &total_mem);
used_mem= total_mem - free_mem;
#if LULESH_SHOW_PROGRESS == 0
printf(" Used Memory = %8.4f Mb\n", used_mem / (1024.*1024.) );
#endif
// GrindTime1 only takes a single domain into account, and is thus a good way to measure
// processor speed indepdendent of MPI parallelism.
// GrindTime2 takes into account speedups from MPI parallelism
Real_t grindTime1 = ((elapsed_time*1e6)/its)/(nx*nx*nx);
Real_t grindTime2 = ((elapsed_time*1e6)/its)/(nx*nx*nx*numRanks);
// Copy Energy back to Host
Real_t e_zero;
Real_t* d_ezero_ptr = locDom.e.raw() + locDom.octantCorner; /* octant corner supposed to be 0 */
cudaMemcpy(&e_zero, d_ezero_ptr, sizeof(Real_t), cudaMemcpyDeviceToHost);
printf("Run completed: \n");
printf(" Problem size = %i \n", nx);
printf(" MPI tasks = %i \n", numRanks);
printf(" Iteration count = %i \n", its);
printf(" Final Origin Energy = %12.6e \n", e_zero);
Real_t MaxAbsDiff = Real_t(0.0);
Real_t TotalAbsDiff = Real_t(0.0);
Real_t MaxRelDiff = Real_t(0.0);
Real_t *e_all = new Real_t[nx * nx];
cudaMemcpy(e_all, locDom.e.raw(), nx * nx * sizeof(Real_t), cudaMemcpyDeviceToHost);
for (Index_t j=0; j<nx; ++j) {
for (Index_t k=j+1; k<nx; ++k) {
Real_t AbsDiff = FABS(e_all[j*nx+k]-e_all[k*nx+j]);
TotalAbsDiff += AbsDiff;
if (MaxAbsDiff <AbsDiff) MaxAbsDiff = AbsDiff;
Real_t RelDiff = AbsDiff / e_all[k*nx+j];
if (MaxRelDiff <RelDiff) MaxRelDiff = RelDiff;
}
}
delete e_all;
// Quick symmetry check
printf(" Testing Plane 0 of Energy Array on rank 0:\n");
printf(" MaxAbsDiff = %12.6e\n", MaxAbsDiff );
printf(" TotalAbsDiff = %12.6e\n", TotalAbsDiff );
printf(" MaxRelDiff = %12.6e\n\n", MaxRelDiff );
// Timing information
printf("\nElapsed time = %10.2f (s)\n", elapsed_time);
printf("Grind time (us/z/c) = %10.8g (per dom) (%10.8g overall)\n", grindTime1, grindTime2);
printf("FOM = %10.8g (z/s)\n\n", 1000.0/grindTime2); // zones per second
bool write_solution_flag=true;
if (write_solution_flag) {
write_solution(&locDom);
}
return ;
}/*}}}*/
|
76e6c0ce950258354918d7ff18bf57f792f4cb23.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gpu_mat_ref_image.h"
#include "cudacc.h"
#include <iostream>
namespace colmap {
namespace mvs {
namespace {
//cudaReadModeNormalizedFloat
//cudaReadModeElementType
texture<uint8_t, hipTextureType2D, hipReadModeNormalizedFloat> image_texture;
template <int kWindowRadius>
__global__ void FilterKernel(GpuMat<uint8_t> image, GpuMat<float> sum_image,
GpuMat<float> squared_sum_image,
const float sigma_spatial,
const float sigma_color)
{
const size_t row = blockDim.y * blockIdx.y + threadIdx.y;
const size_t col = blockDim.x * blockIdx.x + threadIdx.x;
if (row >= image.GetHeight() || col >= image.GetWidth())
{
return;
}
const float center_color = tex2D(image_texture, col, row);
float sum = 0.0f;
float squared_sum = 0.0f;
float bilateral_weight_sum = 0.0f;
for (int win_col = -kWindowRadius; win_col <= kWindowRadius; win_col++) {
float sum_row = 0.0f;
float squared_sum_row = 0.0f;
float bilateral_weight_sum_row = 0.0f;
for (int win_row = -kWindowRadius; win_row <= kWindowRadius; win_row++) {
const float color = tex2D(image_texture, col + win_col, row + win_row);
const float bilateral_weight =
ComputeBilateralWeight(0.0f, 0.0f, win_col, win_row, center_color,
color, sigma_spatial, sigma_color);
sum_row += bilateral_weight * color;
squared_sum_row += bilateral_weight * color * color;
bilateral_weight_sum_row += bilateral_weight;
}
sum += sum_row;
squared_sum += squared_sum_row;
bilateral_weight_sum += bilateral_weight_sum_row;
}
sum /= bilateral_weight_sum;
squared_sum /= bilateral_weight_sum;
image.Set(row, col, static_cast<uint8_t>(255.0f * center_color));
sum_image.Set(row, col, sum);
squared_sum_image.Set(row, col, squared_sum);
}
} // namespace
GpuMatRefImage::GpuMatRefImage(const size_t width, const size_t height)
: height_(height), width_(width)
{
image.reset(new GpuMat<uint8_t>(width, height));
sum_image.reset(new GpuMat<float>(width, height));
squared_sum_image.reset(new GpuMat<float>(width, height));
}
void GpuMatRefImage::Filter(const uint8_t* image_data,
const size_t window_radius,
const float sigma_spatial,
const float sigma_color)
{
#define CALL_FILTER_FUNC(window_radius) \
case window_radius: \
Filter<window_radius>(image_data, sigma_spatial, sigma_color); \
break;
switch (window_radius) {
CALL_FILTER_FUNC(1)
CALL_FILTER_FUNC(2)
CALL_FILTER_FUNC(3)
CALL_FILTER_FUNC(4)
CALL_FILTER_FUNC(5)
CALL_FILTER_FUNC(6)
CALL_FILTER_FUNC(7)
CALL_FILTER_FUNC(8)
CALL_FILTER_FUNC(9)
CALL_FILTER_FUNC(10)
CALL_FILTER_FUNC(11)
CALL_FILTER_FUNC(12)
CALL_FILTER_FUNC(13)
CALL_FILTER_FUNC(14)
CALL_FILTER_FUNC(15)
CALL_FILTER_FUNC(16)
CALL_FILTER_FUNC(17)
CALL_FILTER_FUNC(18)
CALL_FILTER_FUNC(19)
CALL_FILTER_FUNC(20)
CALL_FILTER_FUNC(21)
CALL_FILTER_FUNC(22)
CALL_FILTER_FUNC(23)
CALL_FILTER_FUNC(24)
CALL_FILTER_FUNC(25)
CALL_FILTER_FUNC(26)
CALL_FILTER_FUNC(27)
CALL_FILTER_FUNC(28)
CALL_FILTER_FUNC(29)
CALL_FILTER_FUNC(30)
default:
std::cerr << "Error: Window size not supported" << std::endl;
exit(EXIT_FAILURE);
break;
}
#undef CALL_FILTER_FUNC
}
template <int kWindowRadius>
void GpuMatRefImage::Filter(const uint8_t* image_data,
const float sigma_spatial,
const float sigma_color)
{
CudaArrayWrapper<uint8_t> image_array(width_, height_, 1);
image_array.CopyToDevice(image_data);
image_texture.addressMode[0] = hipAddressModeBorder;
image_texture.addressMode[1] = hipAddressModeBorder;
image_texture.addressMode[2] = hipAddressModeBorder;
image_texture.filterMode = hipFilterModePoint; //
image_texture.normalized = false;
const dim3 block_size(kBlockDimX, kBlockDimY);
const dim3 grid_size((width_ - 1) / block_size.x + 1,
(height_ - 1) / block_size.y + 1);
CUDA_SAFE_CALL(hipBindTextureToArray(image_texture, image_array.GetPtr()));
FilterKernel<kWindowRadius> << <grid_size, block_size >> > (
*image, *sum_image, *squared_sum_image, sigma_spatial, sigma_color);
CUDA_CHECK_ERROR();
CUDA_SAFE_CALL(hipUnbindTexture(image_texture));
}
} // namespace mvs
} // namespace colmap
| 76e6c0ce950258354918d7ff18bf57f792f4cb23.cu | #include "gpu_mat_ref_image.h"
#include "cudacc.h"
#include <iostream>
namespace colmap {
namespace mvs {
namespace {
//cudaReadModeNormalizedFloat进行类型转化
//cudaReadModeElementType不改变返回值的类型
texture<uint8_t, cudaTextureType2D, cudaReadModeNormalizedFloat> image_texture;
template <int kWindowRadius>
__global__ void FilterKernel(GpuMat<uint8_t> image, GpuMat<float> sum_image,
GpuMat<float> squared_sum_image,
const float sigma_spatial,
const float sigma_color)
{
const size_t row = blockDim.y * blockIdx.y + threadIdx.y;
const size_t col = blockDim.x * blockIdx.x + threadIdx.x;
if (row >= image.GetHeight() || col >= image.GetWidth())
{
return;
}
const float center_color = tex2D(image_texture, col, row);
float sum = 0.0f;
float squared_sum = 0.0f;
float bilateral_weight_sum = 0.0f;
for (int win_col = -kWindowRadius; win_col <= kWindowRadius; win_col++) {
float sum_row = 0.0f;
float squared_sum_row = 0.0f;
float bilateral_weight_sum_row = 0.0f;
for (int win_row = -kWindowRadius; win_row <= kWindowRadius; win_row++) {
const float color = tex2D(image_texture, col + win_col, row + win_row);
const float bilateral_weight =
ComputeBilateralWeight(0.0f, 0.0f, win_col, win_row, center_color,
color, sigma_spatial, sigma_color);
sum_row += bilateral_weight * color;
squared_sum_row += bilateral_weight * color * color;
bilateral_weight_sum_row += bilateral_weight;
}
sum += sum_row;
squared_sum += squared_sum_row;
bilateral_weight_sum += bilateral_weight_sum_row;
}
sum /= bilateral_weight_sum;
squared_sum /= bilateral_weight_sum;
image.Set(row, col, static_cast<uint8_t>(255.0f * center_color));
sum_image.Set(row, col, sum);
squared_sum_image.Set(row, col, squared_sum);
}
} // namespace
GpuMatRefImage::GpuMatRefImage(const size_t width, const size_t height)
: height_(height), width_(width)
{
image.reset(new GpuMat<uint8_t>(width, height));
sum_image.reset(new GpuMat<float>(width, height));
squared_sum_image.reset(new GpuMat<float>(width, height));
}
void GpuMatRefImage::Filter(const uint8_t* image_data,
const size_t window_radius,
const float sigma_spatial,
const float sigma_color)
{
#define CALL_FILTER_FUNC(window_radius) \
case window_radius: \
Filter<window_radius>(image_data, sigma_spatial, sigma_color); \
break;
switch (window_radius) {
CALL_FILTER_FUNC(1)
CALL_FILTER_FUNC(2)
CALL_FILTER_FUNC(3)
CALL_FILTER_FUNC(4)
CALL_FILTER_FUNC(5)
CALL_FILTER_FUNC(6)
CALL_FILTER_FUNC(7)
CALL_FILTER_FUNC(8)
CALL_FILTER_FUNC(9)
CALL_FILTER_FUNC(10)
CALL_FILTER_FUNC(11)
CALL_FILTER_FUNC(12)
CALL_FILTER_FUNC(13)
CALL_FILTER_FUNC(14)
CALL_FILTER_FUNC(15)
CALL_FILTER_FUNC(16)
CALL_FILTER_FUNC(17)
CALL_FILTER_FUNC(18)
CALL_FILTER_FUNC(19)
CALL_FILTER_FUNC(20)
CALL_FILTER_FUNC(21)
CALL_FILTER_FUNC(22)
CALL_FILTER_FUNC(23)
CALL_FILTER_FUNC(24)
CALL_FILTER_FUNC(25)
CALL_FILTER_FUNC(26)
CALL_FILTER_FUNC(27)
CALL_FILTER_FUNC(28)
CALL_FILTER_FUNC(29)
CALL_FILTER_FUNC(30)
default:
std::cerr << "Error: Window size not supported" << std::endl;
exit(EXIT_FAILURE);
break;
}
#undef CALL_FILTER_FUNC
}
template <int kWindowRadius>
void GpuMatRefImage::Filter(const uint8_t* image_data,
const float sigma_spatial,
const float sigma_color)
{
CudaArrayWrapper<uint8_t> image_array(width_, height_, 1);
image_array.CopyToDevice(image_data);
image_texture.addressMode[0] = cudaAddressModeBorder;
image_texture.addressMode[1] = cudaAddressModeBorder;
image_texture.addressMode[2] = cudaAddressModeBorder;
image_texture.filterMode = cudaFilterModePoint; // 最近邻点
image_texture.normalized = false;
const dim3 block_size(kBlockDimX, kBlockDimY);
const dim3 grid_size((width_ - 1) / block_size.x + 1,
(height_ - 1) / block_size.y + 1);
CUDA_SAFE_CALL(cudaBindTextureToArray(image_texture, image_array.GetPtr()));
FilterKernel<kWindowRadius> << <grid_size, block_size >> > (
*image, *sum_image, *squared_sum_image, sigma_spatial, sigma_color);
CUDA_CHECK_ERROR();
CUDA_SAFE_CALL(cudaUnbindTexture(image_texture));
}
} // namespace mvs
} // namespace colmap
|
0297361f390ff95e74cf24fb50a737c4717860da.hip | // !!! This is a file automatically generated by hipify!!!
//pass
//--blockDim=64 --gridDim=1 --no-inline
#include "hip/hip_runtime.h"
__global__ void foo(float* A) {
A[threadIdx.x == 0 ? 1 : 2*threadIdx.x] = 2.4f;
}
| 0297361f390ff95e74cf24fb50a737c4717860da.cu | //pass
//--blockDim=64 --gridDim=1 --no-inline
#include "cuda.h"
__global__ void foo(float* A) {
A[threadIdx.x == 0 ? 1 : 2*threadIdx.x] = 2.4f;
}
|
3aa7529162622da100991f93b928cf468b47dc86.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "defs.h"
#include "cuda_defs.h"
#ifdef PPM
#define SLOPETHETA 2.
#else
#define SLOPETHETA 1.
#endif
__device__ __inline__ real slope_limiter(real slopeL, real slopeR) {
if (fabs(slopeR) < PRESSUREFLOOR) return SLOPETHETA;
real r = slopeL/slopeR;
return MAX2(0., MIN3(SLOPETHETA*r, .5*(1+r),SLOPETHETA));
}
#ifdef PCM
__global__ void reconstruct(real *cons, real *UL, real *UR, real *dx,
int dir1,int nx1, int nx2, int nx3, int size_x1, int size_x12,
int nf,int ntot, int offset, real g1, real dt) {
/*
* Piecewise constant reconstruction
*/
int i,j,k,n,indx,indxm,indxp;
int il, iu, jl, ju, kl, ku;
real dL, uL, uL2,uL3,pL,eL,sL;
real dC,uC,uC2,uC3,pC,eC,sC;
real dR,uR,uR2,uR3,pR,eR,sR;
real dLi,dCi,dRi, dLf, uLf, dRf, uRf;
real dxm, dxc, dxp;
real slopeL,slopeR,ke,r, fl,fr;
real dtdx;
int dir2, dir3;
/* 1->2->3
* 2->3->1
* 3->1->2
*/
dir2 = (dir1)%3 + 1;
dir3 = (dir2)%3 + 1;
if (dir1 == 1) {
il = -2; iu = nx1+2;
jl = -NGHX2; ju = nx2+NGHX2;
kl = -NGHX3; ku = nx3 + NGHX3;
}
else if (dir1 == 2) {
il = -NGHX1; iu = nx1+NGHX1;
jl = -2; ju = nx2+2;
kl = -NGHX3; ku = nx3 + NGHX3;
}
else {
il = -NGHX1; iu = nx1+NGHX1;
jl = -NGHX2; ju = nx2+NGHX2;
kl = -2; ku = nx3 + 2;
}
for(indx = blockIdx.x*blockDim.x + threadIdx.x; indx<ntot; indx+=blockDim.x*gridDim.x) {
unpack_indices(indx,&i,&j,&k,size_x1,size_x12);
if ((i>=il)&&(i<iu)&&(j>=jl)&&(j<ju)&&(k>=kl)&&(k<ku)) {
if (dir1 == 1) {
indxm = indx - 1 ; // (i-1,j,k)
}
else if (dir1 == 2) {
indxm = indx - size_x1;
}
else {
indxm = indx - size_x12;
}
for(n=0;n<nf;n++) {
UL[indx + n*ntot] = cons[indx + n*ntot];
UR[indxm + n*ntot] = cons[indx + n*ntot];
}
}
}
return;
}
#endif
#ifdef PLM
__global__ void reconstruct(real *cons, real *UL, real *UR, real *dx,
int dir1,int nx1, int nx2, int nx3, int size_x1, int size_x12,
int nf,int ntot, int offset, real g1, real dt) {
/*
* Piecewise linear reconstruction
*/
int i,j,k,n,indx,indxm,indxp;
int il, iu, jl, ju, kl, ku;
real dL, uL, uL2,uL3,pL,eL,sL;
real dC,uC,uC2,uC3,pC,eC,sC;
real dR,uR,uR2,uR3,pR,eR,sR;
real dLi,dCi,dRi, dLf, uLf, dRf, uRf;
real dxm, dxc, dxp;
real slopeL,slopeR,ke,r, fl,fr;
real dtdx;
int dir2, dir3;
/* 1->2->3
* 2->3->1
* 3->1->2
*/
dir2 = (dir1)%3 + 1;
dir3 = (dir2)%3 + 1;
if (dir1 == 1) {
il = -2; iu = nx1+2;
jl = -NGHX2; ju = nx2+NGHX2;
kl = -NGHX3; ku = nx3 + NGHX3;
}
else if (dir1 == 2) {
il = -NGHX1; iu = nx1+NGHX1;
jl = -2; ju = nx2+2;
kl = -NGHX3; ku = nx3 + NGHX3;
}
else {
il = -NGHX1; iu = nx1+NGHX1;
jl = -NGHX2; ju = nx2+NGHX2;
kl = -2; ku = nx3 + 2;
}
for(indx = blockIdx.x*blockDim.x + threadIdx.x; indx<ntot; indx+=blockDim.x*gridDim.x) {
unpack_indices(indx,&i,&j,&k,size_x1,size_x12);
if ((i>=il)&&(i<iu)&&(j>=jl)&&(j<ju)&&(k>=kl)&&(k<ku)) {
if (dir1 == 1) {
indxm = indx - 1 ; // (i-1,j,k)
indxp = indx + 1 ; // (i+1,j,k)
dxm = dx[i-1];
dxc = dx[i];
dxp = dx[i+1];
}
else if (dir1 == 2) {
indxm = indx - size_x1;
indxp = indx + size_x1;
dxm = dx[j-1];
dxc = dx[j];
dxp = dx[j+1];
}
else {
indxm = indx - size_x12;
indxp = indx + size_x12;
dxm = dx[k-1];
dxc = dx[k];
dxp = dx[k+1];
}
dtdx = .5*dt/dxc;
dL = cons[indxm + 0*ntot];
dLi = dL;
uL = cons[indxm + dir1*ntot]/dL;
uL2 = cons[indxm + dir2*ntot]/dL;
uL3 = cons[indxm + dir3*ntot]/dL;
eL = cons[indxm + 4*ntot];
pL = (eL-.5*dL*(uL*uL+uL2*uL2+uL3*uL3))*g1;
if (dL < PRESSUREFLOOR) dL = PRESSUREFLOOR;
if (pL < PRESSUREFLOOR) pL = PRESSUREFLOOR;
dC = cons[indx + 0*ntot];
dCi = dC;
uC = cons[indx + dir1*ntot]/dC;
uC2 = cons[indx + dir2*ntot]/dC;
uC3 = cons[indx + dir3*ntot]/dC;
eC = cons[indx + 4*ntot];
pC = (eC-.5*dC*(uC*uC+uC2*uC2+uC3*uC3))*g1;
if (dC < PRESSUREFLOOR) dC = PRESSUREFLOOR;
if (pC < PRESSUREFLOOR) pC = PRESSUREFLOOR;
dR = cons[indxp + 0*ntot];
dRi = dR;
uR = cons[indxp + dir1*ntot]/dR;
uR2 = cons[indxp + dir2*ntot]/dR;
uR3 = cons[indxp + dir3*ntot]/dR;
eR = cons[indxp + 4*ntot];
pR = (eR-.5*dR*(uR*uR+uR2*uR2+uR3*uR3))*g1;
if (dR < PRESSUREFLOOR) dR = PRESSUREFLOOR;
if (pR < PRESSUREFLOOR) pR = PRESSUREFLOOR;
/* Density */
slopeR = (dR-dC) * 2./(dxc + dxp);
slopeL = (dC-dL) * 2./(dxc + dxm);
r = slope_limiter(slopeL,slopeR);
dL = dC - .5*dxc * r * slopeR;
dR = dC + .5*dxc * r * slopeR;
if (dL < PRESSUREFLOOR) dL = PRESSUREFLOOR;
if (dR < PRESSUREFLOOR) dR = PRESSUREFLOOR;
dLf = dL;
dRf = dR;
/* ux1 */
slopeR = (uR-uC) * 2./(dxc + dxp);
slopeL = (uC-uL) * 2./(dxc + dxm);
r = slope_limiter(slopeL,slopeR);
uL = uC - .5*dxc * r * slopeR;
uR = uC + .5*dxc * r * slopeR;
uLf = uL;
uRf = uR;
/* ux2 */
slopeR = (uR2-uC2) * 2./(dxc + dxp);
slopeL = (uC2-uL2) * 2./(dxc + dxm);
r = slope_limiter(slopeL,slopeR);
uL2 = uC2 - .5*dxc * r * slopeR;
uR2 = uC2 + .5*dxc * r * slopeR;
/* ux3 */
slopeR = (uR3-uC3) * 2./(dxc + dxp);
slopeL = (uC3-uL3) * 2./(dxc + dxm);
r = slope_limiter(slopeL,slopeR);
uL3 = uC3 - .5*dxc * r * slopeR;
uR3 = uC3 + .5*dxc * r * slopeR;
/* pres */
slopeR = (pR-pC) * 2./(dxc + dxp);
slopeL = (pC-pL) * 2./(dxc + dxm);
r = slope_limiter(slopeL,slopeR);
pL = pC - .5*dxc * r * slopeR;
pR = pC + .5*dxc * r * slopeR;
if (pL < PRESSUREFLOOR) pL = PRESSUREFLOOR;
if (pR < PRESSUREFLOOR) pR = PRESSUREFLOOR;
/* Evolve cons for dt/2 */
ke = .5*dL*(uL*uL + uL2*uL2 + uL3*uL3);
uL *= dL;
uL2 *= dL;
uL3 *= dL;
eL = (pL/g1 + ke);
ke = .5*dR*(uR*uR + uR2*uR2 + uR3*uR3);
uR *= dR;
uR2 *= dR;
uR3 *= dR;
eR = (pR/g1 + ke);
/* Density */
fl = dLf*uLf; fr = dRf*uRf;
dL += dtdx*(fl-fr);
dR += dtdx*(fl-fr);
UL[indx + 0*ntot] = dR;
UR[indxm + 0*ntot] = dL;
/* MX1 */
fl = uLf*uL + pL; fr = uRf*uR + pR;
uL += dtdx*(fl-fr);
uR += dtdx*(fl-fr);
UL[indx + dir1*ntot] = uR;
UR[indxm + dir1*ntot] = uL;
/* MX2 */
fl = uLf*uL2; fr = uRf*uR2;
uL2 += dtdx*(fl-fr);
uR2 += dtdx*(fl-fr);
UL[indx + dir2*ntot] = uR2;
UR[indxm + dir2*ntot] = uL2;
/* MX3 */
fl = uLf*uL3; fr = uRf*uR3;
uL3 += dtdx*(fl-fr);
uR3 += dtdx*(fl-fr);
UL[indx + dir3*ntot] = uR3;
UR[indxm + dir3*ntot] = uL3;
/* Energy */
fl = uLf*(eL + pL); fr = uRf*(eR+pR);
eL += dtdx*(fl-fr);
eR += dtdx*(fl-fr);
UL[indx + 4*ntot] = eR;
UR[indxm + 4*ntot] = eL;
for(n=5;n<nf;n++) {
sR = cons[indxp + n*ntot]/dRi;
sL = cons[indxm + n*ntot]/dLi;
sC = cons[indx + n*ntot]/dCi;
slopeR = (sR-sC) * 2./(dxc + dxp);
slopeL = (sC-sL) * 2./(dxc + dxm);
r = slope_limiter(slopeL,slopeR);
sL = sC - .5*dxc * r * slopeR;
sR = sC + .5*dxc * r * slopeR;
sL *= dLf;
sR *= dRf;
fl = sL*uLf;
fr = sR*uRf;
sL += dtdx*(fl-fr);
sR += dtdx*(fl-fr);
UL[indx + n*ntot] = sR;
UR[indxm + n*ntot] = sL;
}
}
}
return;
}
#endif
| 3aa7529162622da100991f93b928cf468b47dc86.cu | #include "defs.h"
#include "cuda_defs.h"
#ifdef PPM
#define SLOPETHETA 2.
#else
#define SLOPETHETA 1.
#endif
__device__ __inline__ real slope_limiter(real slopeL, real slopeR) {
if (fabs(slopeR) < PRESSUREFLOOR) return SLOPETHETA;
real r = slopeL/slopeR;
return MAX2(0., MIN3(SLOPETHETA*r, .5*(1+r),SLOPETHETA));
}
#ifdef PCM
__global__ void reconstruct(real *cons, real *UL, real *UR, real *dx,
int dir1,int nx1, int nx2, int nx3, int size_x1, int size_x12,
int nf,int ntot, int offset, real g1, real dt) {
/*
* Piecewise constant reconstruction
*/
int i,j,k,n,indx,indxm,indxp;
int il, iu, jl, ju, kl, ku;
real dL, uL, uL2,uL3,pL,eL,sL;
real dC,uC,uC2,uC3,pC,eC,sC;
real dR,uR,uR2,uR3,pR,eR,sR;
real dLi,dCi,dRi, dLf, uLf, dRf, uRf;
real dxm, dxc, dxp;
real slopeL,slopeR,ke,r, fl,fr;
real dtdx;
int dir2, dir3;
/* 1->2->3
* 2->3->1
* 3->1->2
*/
dir2 = (dir1)%3 + 1;
dir3 = (dir2)%3 + 1;
if (dir1 == 1) {
il = -2; iu = nx1+2;
jl = -NGHX2; ju = nx2+NGHX2;
kl = -NGHX3; ku = nx3 + NGHX3;
}
else if (dir1 == 2) {
il = -NGHX1; iu = nx1+NGHX1;
jl = -2; ju = nx2+2;
kl = -NGHX3; ku = nx3 + NGHX3;
}
else {
il = -NGHX1; iu = nx1+NGHX1;
jl = -NGHX2; ju = nx2+NGHX2;
kl = -2; ku = nx3 + 2;
}
for(indx = blockIdx.x*blockDim.x + threadIdx.x; indx<ntot; indx+=blockDim.x*gridDim.x) {
unpack_indices(indx,&i,&j,&k,size_x1,size_x12);
if ((i>=il)&&(i<iu)&&(j>=jl)&&(j<ju)&&(k>=kl)&&(k<ku)) {
if (dir1 == 1) {
indxm = indx - 1 ; // (i-1,j,k)
}
else if (dir1 == 2) {
indxm = indx - size_x1;
}
else {
indxm = indx - size_x12;
}
for(n=0;n<nf;n++) {
UL[indx + n*ntot] = cons[indx + n*ntot];
UR[indxm + n*ntot] = cons[indx + n*ntot];
}
}
}
return;
}
#endif
#ifdef PLM
__global__ void reconstruct(real *cons, real *UL, real *UR, real *dx,
int dir1,int nx1, int nx2, int nx3, int size_x1, int size_x12,
int nf,int ntot, int offset, real g1, real dt) {
/*
* Piecewise linear reconstruction
*/
int i,j,k,n,indx,indxm,indxp;
int il, iu, jl, ju, kl, ku;
real dL, uL, uL2,uL3,pL,eL,sL;
real dC,uC,uC2,uC3,pC,eC,sC;
real dR,uR,uR2,uR3,pR,eR,sR;
real dLi,dCi,dRi, dLf, uLf, dRf, uRf;
real dxm, dxc, dxp;
real slopeL,slopeR,ke,r, fl,fr;
real dtdx;
int dir2, dir3;
/* 1->2->3
* 2->3->1
* 3->1->2
*/
dir2 = (dir1)%3 + 1;
dir3 = (dir2)%3 + 1;
if (dir1 == 1) {
il = -2; iu = nx1+2;
jl = -NGHX2; ju = nx2+NGHX2;
kl = -NGHX3; ku = nx3 + NGHX3;
}
else if (dir1 == 2) {
il = -NGHX1; iu = nx1+NGHX1;
jl = -2; ju = nx2+2;
kl = -NGHX3; ku = nx3 + NGHX3;
}
else {
il = -NGHX1; iu = nx1+NGHX1;
jl = -NGHX2; ju = nx2+NGHX2;
kl = -2; ku = nx3 + 2;
}
for(indx = blockIdx.x*blockDim.x + threadIdx.x; indx<ntot; indx+=blockDim.x*gridDim.x) {
unpack_indices(indx,&i,&j,&k,size_x1,size_x12);
if ((i>=il)&&(i<iu)&&(j>=jl)&&(j<ju)&&(k>=kl)&&(k<ku)) {
if (dir1 == 1) {
indxm = indx - 1 ; // (i-1,j,k)
indxp = indx + 1 ; // (i+1,j,k)
dxm = dx[i-1];
dxc = dx[i];
dxp = dx[i+1];
}
else if (dir1 == 2) {
indxm = indx - size_x1;
indxp = indx + size_x1;
dxm = dx[j-1];
dxc = dx[j];
dxp = dx[j+1];
}
else {
indxm = indx - size_x12;
indxp = indx + size_x12;
dxm = dx[k-1];
dxc = dx[k];
dxp = dx[k+1];
}
dtdx = .5*dt/dxc;
dL = cons[indxm + 0*ntot];
dLi = dL;
uL = cons[indxm + dir1*ntot]/dL;
uL2 = cons[indxm + dir2*ntot]/dL;
uL3 = cons[indxm + dir3*ntot]/dL;
eL = cons[indxm + 4*ntot];
pL = (eL-.5*dL*(uL*uL+uL2*uL2+uL3*uL3))*g1;
if (dL < PRESSUREFLOOR) dL = PRESSUREFLOOR;
if (pL < PRESSUREFLOOR) pL = PRESSUREFLOOR;
dC = cons[indx + 0*ntot];
dCi = dC;
uC = cons[indx + dir1*ntot]/dC;
uC2 = cons[indx + dir2*ntot]/dC;
uC3 = cons[indx + dir3*ntot]/dC;
eC = cons[indx + 4*ntot];
pC = (eC-.5*dC*(uC*uC+uC2*uC2+uC3*uC3))*g1;
if (dC < PRESSUREFLOOR) dC = PRESSUREFLOOR;
if (pC < PRESSUREFLOOR) pC = PRESSUREFLOOR;
dR = cons[indxp + 0*ntot];
dRi = dR;
uR = cons[indxp + dir1*ntot]/dR;
uR2 = cons[indxp + dir2*ntot]/dR;
uR3 = cons[indxp + dir3*ntot]/dR;
eR = cons[indxp + 4*ntot];
pR = (eR-.5*dR*(uR*uR+uR2*uR2+uR3*uR3))*g1;
if (dR < PRESSUREFLOOR) dR = PRESSUREFLOOR;
if (pR < PRESSUREFLOOR) pR = PRESSUREFLOOR;
/* Density */
slopeR = (dR-dC) * 2./(dxc + dxp);
slopeL = (dC-dL) * 2./(dxc + dxm);
r = slope_limiter(slopeL,slopeR);
dL = dC - .5*dxc * r * slopeR;
dR = dC + .5*dxc * r * slopeR;
if (dL < PRESSUREFLOOR) dL = PRESSUREFLOOR;
if (dR < PRESSUREFLOOR) dR = PRESSUREFLOOR;
dLf = dL;
dRf = dR;
/* ux1 */
slopeR = (uR-uC) * 2./(dxc + dxp);
slopeL = (uC-uL) * 2./(dxc + dxm);
r = slope_limiter(slopeL,slopeR);
uL = uC - .5*dxc * r * slopeR;
uR = uC + .5*dxc * r * slopeR;
uLf = uL;
uRf = uR;
/* ux2 */
slopeR = (uR2-uC2) * 2./(dxc + dxp);
slopeL = (uC2-uL2) * 2./(dxc + dxm);
r = slope_limiter(slopeL,slopeR);
uL2 = uC2 - .5*dxc * r * slopeR;
uR2 = uC2 + .5*dxc * r * slopeR;
/* ux3 */
slopeR = (uR3-uC3) * 2./(dxc + dxp);
slopeL = (uC3-uL3) * 2./(dxc + dxm);
r = slope_limiter(slopeL,slopeR);
uL3 = uC3 - .5*dxc * r * slopeR;
uR3 = uC3 + .5*dxc * r * slopeR;
/* pres */
slopeR = (pR-pC) * 2./(dxc + dxp);
slopeL = (pC-pL) * 2./(dxc + dxm);
r = slope_limiter(slopeL,slopeR);
pL = pC - .5*dxc * r * slopeR;
pR = pC + .5*dxc * r * slopeR;
if (pL < PRESSUREFLOOR) pL = PRESSUREFLOOR;
if (pR < PRESSUREFLOOR) pR = PRESSUREFLOOR;
/* Evolve cons for dt/2 */
ke = .5*dL*(uL*uL + uL2*uL2 + uL3*uL3);
uL *= dL;
uL2 *= dL;
uL3 *= dL;
eL = (pL/g1 + ke);
ke = .5*dR*(uR*uR + uR2*uR2 + uR3*uR3);
uR *= dR;
uR2 *= dR;
uR3 *= dR;
eR = (pR/g1 + ke);
/* Density */
fl = dLf*uLf; fr = dRf*uRf;
dL += dtdx*(fl-fr);
dR += dtdx*(fl-fr);
UL[indx + 0*ntot] = dR;
UR[indxm + 0*ntot] = dL;
/* MX1 */
fl = uLf*uL + pL; fr = uRf*uR + pR;
uL += dtdx*(fl-fr);
uR += dtdx*(fl-fr);
UL[indx + dir1*ntot] = uR;
UR[indxm + dir1*ntot] = uL;
/* MX2 */
fl = uLf*uL2; fr = uRf*uR2;
uL2 += dtdx*(fl-fr);
uR2 += dtdx*(fl-fr);
UL[indx + dir2*ntot] = uR2;
UR[indxm + dir2*ntot] = uL2;
/* MX3 */
fl = uLf*uL3; fr = uRf*uR3;
uL3 += dtdx*(fl-fr);
uR3 += dtdx*(fl-fr);
UL[indx + dir3*ntot] = uR3;
UR[indxm + dir3*ntot] = uL3;
/* Energy */
fl = uLf*(eL + pL); fr = uRf*(eR+pR);
eL += dtdx*(fl-fr);
eR += dtdx*(fl-fr);
UL[indx + 4*ntot] = eR;
UR[indxm + 4*ntot] = eL;
for(n=5;n<nf;n++) {
sR = cons[indxp + n*ntot]/dRi;
sL = cons[indxm + n*ntot]/dLi;
sC = cons[indx + n*ntot]/dCi;
slopeR = (sR-sC) * 2./(dxc + dxp);
slopeL = (sC-sL) * 2./(dxc + dxm);
r = slope_limiter(slopeL,slopeR);
sL = sC - .5*dxc * r * slopeR;
sR = sC + .5*dxc * r * slopeR;
sL *= dLf;
sR *= dRf;
fl = sL*uLf;
fr = sR*uRf;
sL += dtdx*(fl-fr);
sR += dtdx*(fl-fr);
UL[indx + n*ntot] = sR;
UR[indxm + n*ntot] = sL;
}
}
}
return;
}
#endif
|
e94117e9f8f5e28e525658a491edf788d38d3481.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
#define L2HYS_EPSILON 0.01f
#define L2HYS_EPSILONHYS 1.0f
#define L2HYS_CLIP 0.2f
#define data_h2y 30
//long h_windowx=Imagewidth/Windowx;
//long h_windowy=ImageHeight/Windowy;
//dim3 blocks(h_windowx,h_windowy);//h_windowx=ImageWidth/Windowx,h_windowy=ImageHeight/Windowy
//dim3 threads(Windowx,Windowy);//cell
//dim3 block(18,7);//cell18,7cell
__global__ void smoothcell(float *in,float *out){
int t_nleft,t_nright;
t_nleft=(threadIdx.x-1+10)%10;
t_nright=(threadIdx.x+1)%10;
float *t_ptemp,t_ftemp[10];
t_ptemp=in+blockIdx.x*70+blockIdx.y*10;//+threadIdx.y)*0.8f+0.1f*(in+blockIdx.x*70+threadIdx.x*10+t_left)
/*__syncthreads();*/
if(t_ptemp)
t_ftemp[threadIdx.x]=t_ptemp[threadIdx.x]*0.8f+0.1f*t_ptemp[t_nleft]+0.1f*t_ptemp[t_nright];
__syncthreads();
out[blockIdx.x*70+blockIdx.y*10+threadIdx.x]=t_ftemp[threadIdx.x];
__syncthreads();
} | e94117e9f8f5e28e525658a491edf788d38d3481.cu | #include "includes.h"
#define L2HYS_EPSILON 0.01f
#define L2HYS_EPSILONHYS 1.0f
#define L2HYS_CLIP 0.2f
#define data_h2y 30
//long h_windowx=Imagewidth/Windowx;
//long h_windowy=ImageHeight/Windowy;
//dim3 blocks(h_windowx,h_windowy);//h_windowx=ImageWidth/Windowx,h_windowy=ImageHeight/Windowy
//dim3 threads(Windowx,Windowy);//ÿһ¸öÏß³Ì¿é¼ÆËãÒ»¸öcellµÄÌØÕ÷Á¿
//dim3 block(18,7);//Ò»¸öcell·Ö18¸ö½Ç¶È·½Ïò,Ò»¸ö·½Ïò7¸öcell£¬
__global__ void smoothcell(float *in,float *out){
int t_nleft,t_nright;
t_nleft=(threadIdx.x-1+10)%10;
t_nright=(threadIdx.x+1)%10;
float *t_ptemp,t_ftemp[10];
t_ptemp=in+blockIdx.x*70+blockIdx.y*10;//+threadIdx.y)*0.8f+0.1f*(in+blockIdx.x*70+threadIdx.x*10+t_left)
/*__syncthreads();*/
if(t_ptemp)
t_ftemp[threadIdx.x]=t_ptemp[threadIdx.x]*0.8f+0.1f*t_ptemp[t_nleft]+0.1f*t_ptemp[t_nright];
__syncthreads();
out[blockIdx.x*70+blockIdx.y*10+threadIdx.x]=t_ftemp[threadIdx.x];
__syncthreads();
} |
8ec383221e6761d8791146604b68ed11f4c72f1f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "tfcc_cudaactivationinterface.h"
#include <limits>
#include <type_traits>
#include "exceptions/tfcc_cudaruntimeerror.h"
#include "exceptions/tfcc_cudnnruntimeerror.h"
#include "exceptions/tfcc_invalidargumenterror.h"
#include "exceptions/tfcc_notimplementederror.h"
#include "framework/tfcc_cudasession.h"
#include "framework/tfcc_session.h"
#include "framework/tfcc_types.h"
#include "kernel/tfcc_cudaatomickernel.hpp"
#include "kernel/tfcc_cudareducekernel.hpp"
#include "utils/tfcc_cudnnutils.h"
namespace tfcc {
// cuda functions
template <class T>
static void __global__ _cuda_sigmoid(const T* a, unsigned total, T* b) {
const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x;
const unsigned skip = blockDim.x * gridDim.x;
for (unsigned i = tid; i < total; i += skip) {
T v = a[i];
b[i] = 1 / (1 + exp(-v));
}
}
template <class T>
static void __global__ _cuda_tanh(const T* a, unsigned total, T* b) {
const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x;
const unsigned skip = blockDim.x * gridDim.x;
for (unsigned i = tid; i < total; i += skip) {
T v = a[i];
b[i] = tanh(v);
}
}
template <class T>
static void __global__ _cuda_relu(const T* a, unsigned total, T* b) {
const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x;
const unsigned skip = blockDim.x * gridDim.x;
for (unsigned i = tid; i < total; i += skip) {
T v = a[i];
b[i] = max(v, T(0));
}
}
template <class T>
static void __global__ _cuda_leaky_relu(const T* a, unsigned total, T alpha, T* b) {
const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x;
const unsigned skip = blockDim.x * gridDim.x;
for (unsigned i = tid; i < total; i += skip) {
T v = a[i];
T v2 = alpha * v;
b[i] = max(v, v2);
}
}
template <class T>
static void __global__ _cuda_softplus(const T* a, unsigned total, T* b) {
const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x;
const unsigned skip = blockDim.x * gridDim.x;
for (unsigned i = tid; i < total; i += skip) {
T v = a[i];
b[i] = log(exp(v) + 1);
}
}
template <class T>
static void __global__ _cuda_log(const T* a, unsigned total, T* b) {
const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x;
const unsigned skip = blockDim.x * gridDim.x;
for (unsigned i = tid; i < total; i += skip) {
T v = a[i];
b[i] = log(v);
}
}
template <class T>
static void __global__ _cuda_rsqrt(const T* a, unsigned total, T* b) {
const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x;
const unsigned skip = blockDim.x * gridDim.x;
for (unsigned i = tid; i < total; i += skip) {
T v = a[i];
b[i] = rsqrt(v);
}
}
constexpr float lowest_my = std::numeric_limits<float>::lowest();
template <class T, unsigned THREAD_COUNT>
static void __global__ _cuda_softmax_v1_small(const T* a, unsigned chunk, T* b) {
a += blockIdx.x * chunk;
b += blockIdx.x * chunk;
__shared__ T maxShared;
__shared__ T exSumShared;
unsigned tid = threadIdx.x;
T input = tid < chunk ? a[tid] : lowest_my;
T maxVal = _cuda_reduce_max<T, THREAD_COUNT>(input);
if (tid == 0)
maxShared = maxVal;
__syncthreads();
maxVal = maxShared;
T ex = tid < chunk ? exp(input - maxVal) : static_cast<T>(0);
T exSum = _cuda_reduce_sum<T, THREAD_COUNT>(ex);
if (tid == 0)
exSumShared = exSum;
__syncthreads();
exSum = exSumShared;
if (tid < chunk)
b[tid] = ex / exSum;
}
#define SOFTMAX_CASE(NUM) \
case NUM: \
while (offset < batchSize) { \
hipLaunchKernelGGL(( _cuda_softmax_v1_small<float, NUM>), dim3(blockCount), dim3(threadCount), 0, session->getImpl()->cudaStream(), \
a.data() + chunkSize * offset, \
chunkSize, \
result.data() + chunkSize * offset); \
offset += blockCount; \
} \
break
#define SOFTMAX_CASE_2(NUM1, NUM2) \
SOFTMAX_CASE(NUM1); \
SOFTMAX_CASE(NUM2)
#define SOFTMAX_CASE_4(NUM1, NUM2, NUM3, NUM4) \
SOFTMAX_CASE_2(NUM1, NUM2); \
SOFTMAX_CASE_2(NUM3, NUM4)
#define SOFTMAX_CASE_8(NUM1, NUM2, NUM3, NUM4, NUM5, NUM6, NUM7, NUM8) \
SOFTMAX_CASE_4(NUM1, NUM2, NUM3, NUM4); \
SOFTMAX_CASE_4(NUM5, NUM6, NUM7, NUM8)
#define SOFTMAX_SWITCH(THREADCNT) \
unsigned offset = 0; \
switch (THREADCNT) { \
SOFTMAX_CASE_8(32 * 1, 32 * 2, 32 * 3, 32 * 4, 32 * 5, 32 * 6, 32 * 7, 32 * 8); \
SOFTMAX_CASE_8(32 * 9, 32 * 10, 32 * 11, 32 * 12, 32 * 13, 32 * 14, 32 * 15, 32 * 16); \
SOFTMAX_CASE_8(32 * 17, 32 * 18, 32 * 19, 32 * 20, 32 * 21, 32 * 22, 32 * 23, 32 * 24); \
SOFTMAX_CASE_8(32 * 25, 32 * 26, 32 * 27, 32 * 28, 32 * 29, 32 * 30, 32 * 31, 32 * 32); \
}
template <class T>
static void __global__ _cuda_sin(const T* a, unsigned total, T* b) {
const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x;
const unsigned skip = blockDim.x * gridDim.x;
for (unsigned i = tid; i < total; i += skip) {
auto v = a[i];
b[i] = sin(v);
}
}
template <class T>
static void __global__ _cuda_cos(const T* a, unsigned total, T* b) {
const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x;
const unsigned skip = blockDim.x * gridDim.x;
for (unsigned i = tid; i < total; i += skip) {
T v = a[i];
b[i] = cos(v);
}
}
template <class T>
static void __global__ _cuda_pow(const T* a, unsigned total, T exponent, T* b) {
const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x;
const unsigned skip = blockDim.x * gridDim.x;
for (unsigned i = tid; i < total; i += skip) {
T v = a[i];
b[i] = pow(v, exponent);
}
}
template <class T>
static void __global__ _cuda_pow_v2(const T* a, const T* exponent, T* b, unsigned total) {
const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x;
const unsigned skip = blockDim.x * gridDim.x;
for (unsigned i = tid; i < total; i += skip) {
T v = a[i];
T e = exponent[i];
b[i] = pow(v, e);
}
}
template <class T>
static void __global__ _cuda_pow_v3(const T* exponent, unsigned total, T a, T* b) {
const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x;
const unsigned skip = blockDim.x * gridDim.x;
for (unsigned i = tid; i < total; i += skip) {
T e = exponent[i];
b[i] = pow(a, e);
}
}
template <class T>
static void __global__ _cuda_gelu(const T* a, unsigned total, T* b) {
const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x;
const unsigned skip = blockDim.x * gridDim.x;
for (unsigned i = tid; i < total; i += skip) {
T v = a[i];
T tmp = static_cast<T>(0.7978845608028654) * (v + 0.044715 * v * v * v);
b[i] = v * static_cast<T>(0.5) * (static_cast<T>(1.0) + tanh(tmp));
}
}
template <class T>
static void __global__ _cuda_gelu_accurate(const T* a, unsigned total, T* b) {
const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x;
const unsigned skip = blockDim.x * gridDim.x;
for (unsigned i = tid; i < total; i += skip) {
T v = a[i];
b[i] = 0.5 * v * (1 + erf(v / static_cast<T>(1.4142135623730951)));
}
}
template <class T>
static void __global__ _cuda_erf(const T* a, unsigned total, T* b) {
const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x;
const unsigned skip = blockDim.x * gridDim.x;
for (unsigned i = tid; i < total; i += skip) {
T v = a[i];
b[i] = erf(v);
}
}
template <class T>
static void __global__ _cuda_asin(const T* a, unsigned total, T* b) {
const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x;
const unsigned skip = blockDim.x * gridDim.x;
for (unsigned i = tid; i < total; i += skip) {
T v = a[i];
b[i] = asinf(v);
}
}
template <class T>
static void __global__ _cuda_asinh(const T* a, unsigned total, T* b) {
const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x;
const unsigned skip = blockDim.x * gridDim.x;
for (unsigned i = tid; i < total; i += skip) {
T v = a[i];
b[i] = asinhf(v);
}
}
template <class T>
static void __global__ _cuda_acos(const T* a, unsigned total, T* b) {
const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x;
const unsigned skip = blockDim.x * gridDim.x;
for (unsigned i = tid; i < total; i += skip) {
T v = a[i];
b[i] = acosf(v);
}
}
template <class T>
static void __global__ _cuda_acosh(const T* a, unsigned total, T* b) {
const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x;
const unsigned skip = blockDim.x * gridDim.x;
for (unsigned i = tid; i < total; i += skip) {
T v = a[i];
b[i] = acoshf(v);
}
}
template <class T>
static void __global__ _cuda_atan(const T* a, unsigned total, T* b) {
const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x;
const unsigned skip = blockDim.x * gridDim.x;
for (unsigned i = tid; i < total; i += skip) {
T v = a[i];
b[i] = atanf(v);
}
}
template <class T>
static void __global__ _cuda_atanh(const T* a, unsigned total, T* b) {
const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x;
const unsigned skip = blockDim.x * gridDim.x;
for (unsigned i = tid; i < total; i += skip) {
T v = a[i];
b[i] = atanhf(v);
}
}
template <class T>
static void __global__ _cuda_sign(const T* a, unsigned total, T* b) {
const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x;
const unsigned skip = blockDim.x * gridDim.x;
for (unsigned i = tid; i < total; i += skip) {
T v = a[i];
if (v < 0)
b[i] = static_cast<T>(-1);
else if (v > 0)
b[i] = static_cast<T>(1);
else
b[i] = 0;
}
}
// helper functions
template <class T>
static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type
_sigmoid_helper(const Tensor<T>& a, size_t blockCount, size_t threadCount) {
Variable<T> result(a.shape());
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
hipLaunchKernelGGL(( _cuda_sigmoid), dim3(blockCount), dim3(threadCount), 0, session->getImpl()->cudaStream(),
a.data(), a.size(),
result.data());
hipError_t ret = hipGetLastError();
if (ret != hipSuccess)
throw CUDARuntimeError(ret);
return result;
}
template <class T, class ST>
static inline Variable<T> _sigmoid_helper(const Tensor<T>& a, ST blockCount, ST threadCount) {
throw NotImplementedError();
}
template <class T>
static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type
_tanh_helper(const Tensor<T>& a, size_t blockCount, size_t threadCount) {
Variable<T> result(a.shape());
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
hipLaunchKernelGGL(( _cuda_tanh), dim3(blockCount), dim3(threadCount), 0, session->getImpl()->cudaStream(),
a.data(), a.size(),
result.data());
hipError_t ret = hipGetLastError();
if (ret != hipSuccess)
throw CUDARuntimeError(ret);
return result;
}
template <class T, class ST>
static inline Variable<T> _tanh_helper(const Tensor<T>& a, ST blockCount, ST threadCount) {
throw NotImplementedError();
}
template <class T>
static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type
_relu_helper(const Tensor<T>& a, size_t blockCount, size_t threadCount) {
Variable<T> result(a.shape());
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
hipLaunchKernelGGL(( _cuda_relu), dim3(blockCount), dim3(threadCount), 0, session->getImpl()->cudaStream(),
a.data(), a.size(),
result.data());
hipError_t ret = hipGetLastError();
if (ret != hipSuccess)
throw CUDARuntimeError(ret);
return result;
}
template <class T, class ST>
static inline Variable<T> _relu_helper(const Tensor<T>& a, ST blockCount, ST threadCount) {
throw NotImplementedError();
}
template <class T>
static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type
_leaky_relu_helper(const Tensor<T>& a, T alpha, size_t blockCount, size_t threadCount) {
Variable<T> result(a.shape());
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
hipLaunchKernelGGL(( _cuda_leaky_relu), dim3(blockCount), dim3(threadCount), 0, session->getImpl()->cudaStream(),
a.data(), a.size(),
alpha,
result.data());
hipError_t ret = hipGetLastError();
if (ret != hipSuccess)
throw CUDARuntimeError(ret);
return result;
}
template <class T, class ST>
static inline Variable<T> _leaky_relu_helper(const Tensor<T>& a, T alpha, ST blockCount, ST threadCount) {
throw NotImplementedError();
}
template <class T>
static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type
_softplus_helper(const Tensor<T>& a, size_t blockCount, size_t threadCount) {
Variable<T> result(a.shape());
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
hipLaunchKernelGGL(( _cuda_softplus), dim3(blockCount), dim3(threadCount), 0, session->getImpl()->cudaStream(),
a.data(), a.size(),
result.data());
hipError_t ret = hipGetLastError();
if (ret != hipSuccess)
throw CUDARuntimeError(ret);
return result;
}
template <class T, class ST>
static inline Variable<T> _softplus_helper(const Tensor<T>& a, ST blockCount, ST threadCount) {
throw NotImplementedError();
}
template <class T>
static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type
_log_helper(const Tensor<T>& a, size_t blockCount, size_t threadCount) {
Variable<T> result(a.shape());
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
hipLaunchKernelGGL(( _cuda_log), dim3(blockCount), dim3(threadCount), 0, session->getImpl()->cudaStream(),
a.data(), a.size(),
result.data());
hipError_t ret = hipGetLastError();
if (ret != hipSuccess)
throw CUDARuntimeError(ret);
return result;
}
template <class T, class ST>
static inline Variable<T> _log_helper(const Tensor<T>& a, ST blockCount, ST threadCount) {
throw NotImplementedError();
}
template <class T>
static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type
_rsqrt_helper(const Tensor<T>& a, size_t blockCount, size_t threadCount) {
Variable<T> result(a.shape());
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
hipLaunchKernelGGL(( _cuda_rsqrt), dim3(blockCount), dim3(threadCount), 0, session->getImpl()->cudaStream(),
a.data(), a.size(),
result.data());
hipError_t ret = hipGetLastError();
if (ret != hipSuccess)
throw CUDARuntimeError(ret);
return result;
}
template <class T, class ST>
static inline Variable<T> _rsqrt_helper(const Tensor<T>& a, ST blockCount, ST threadCount) {
throw NotImplementedError();
}
template <class T>
static inline void _cudnn_softmax(cudnnDataType_t cudnnType, unsigned s1, unsigned s2, unsigned s3, const T* a, T* b) {
cudnnTensorDescriptor_t aTensor;
cudnnStatus_t ret = cudnnCreateTensorDescriptor(&aTensor);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
CudnnTensorDescriptorGuard aTensorGuard(&aTensor);
cudnnSoftmaxMode_t softmaxModel;
if (s3 == 1) {
ret = cudnnSetTensor4dDescriptor(aTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, s1, 1, 1, s2);
softmaxModel = CUDNN_SOFTMAX_MODE_INSTANCE;
} else {
ret = cudnnSetTensor4dDescriptor(aTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, s1, s2, s3, 1);
softmaxModel = CUDNN_SOFTMAX_MODE_CHANNEL;
}
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
T alpha = 1.0, beta = 0.0;
ret = cudnnSoftmaxForward(
session->getImpl()->cudnnHandle(),
CUDNN_SOFTMAX_ACCURATE,
softmaxModel,
&alpha,
aTensor, a,
&beta,
aTensor, b);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
}
template <class T>
static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type
_sin_helper(const Tensor<T>& a, size_t blockCount, size_t threadCount) {
Variable<T> result(a.shape());
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
hipLaunchKernelGGL(( _cuda_sin), dim3(blockCount), dim3(threadCount), 0, session->getImpl()->cudaStream(),
a.data(), a.size(),
result.data());
hipError_t ret = hipGetLastError();
if (ret != hipSuccess)
throw CUDARuntimeError(ret);
return result;
}
template <class T, class ST>
static inline Variable<T> _sin_helper(const Tensor<T>& a, ST blockCount, ST threadCount) {
throw NotImplementedError();
}
template <class T>
static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type
_cos_helper(const Tensor<T>& a, size_t blockCount, size_t threadCount) {
Variable<T> result(a.shape());
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
hipLaunchKernelGGL(( _cuda_cos), dim3(blockCount), dim3(threadCount), 0, session->getImpl()->cudaStream(),
a.data(), a.size(),
result.data());
hipError_t ret = hipGetLastError();
if (ret != hipSuccess)
throw CUDARuntimeError(ret);
return result;
}
template <class T, class ST>
static inline Variable<T> _cos_helper(const Tensor<T>& a, ST blockCount, ST threadCount) {
throw NotImplementedError();
}
template <class T>
static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type
_pow_helper(const Tensor<T>& a, T exponent, size_t blockCount, size_t threadCount) {
Variable<T> result(a.shape());
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
hipLaunchKernelGGL(( _cuda_pow), dim3(blockCount), dim3(threadCount), 0, session->getImpl()->cudaStream(),
a.data(), a.size(),
exponent,
result.data());
hipError_t ret = hipGetLastError();
if (ret != hipSuccess)
throw CUDARuntimeError(ret);
return result;
}
template <class T, class ST>
static inline Variable<T> _pow_helper(const Tensor<T>& a, T exponent, ST blockCount, ST threadCount) {
throw NotImplementedError();
}
template <class T>
static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type
_pow_helper(const Tensor<T>& a, const Tensor<T>& exponent, size_t blockCount, size_t threadCount) {
Variable<T> result(a.shape());
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
hipLaunchKernelGGL(( _cuda_pow_v2), dim3(blockCount), dim3(threadCount), 0, session->getImpl()->cudaStream(),
a.data(), exponent.data(),
result.data(),
result.size());
hipError_t ret = hipGetLastError();
if (ret != hipSuccess)
throw CUDARuntimeError(ret);
return result;
}
template <class T, class ST>
static inline Variable<T> _pow_helper(const Tensor<T>& a, const Tensor<T>& exponent, ST blockCount, ST threadCount) {
throw NotImplementedError();
}
template <class T>
static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type
_pow_helper(T a, const Tensor<T>& exponent, size_t blockCount, size_t threadCount) {
Variable<T> result(exponent.shape());
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
hipLaunchKernelGGL(( _cuda_pow_v3), dim3(blockCount), dim3(threadCount), 0, session->getImpl()->cudaStream(),
exponent.data(), exponent.size(),
a,
result.data());
hipError_t ret = hipGetLastError();
if (ret != hipSuccess)
throw CUDARuntimeError(ret);
return result;
}
template <class T, class ST>
static inline Variable<T> _pow_helper(T a, const Tensor<T>& exponent, ST blockCount, ST threadCount) {
throw NotImplementedError();
}
template <class T>
static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type
_gelu_helper(const Tensor<T>& a, size_t blockCount, size_t threadCount) {
Variable<T> result(a.shape());
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
hipLaunchKernelGGL(( _cuda_gelu), dim3(blockCount), dim3(threadCount), 0, session->getImpl()->cudaStream(),
a.data(), a.size(),
result.data());
hipError_t ret = hipGetLastError();
if (ret != hipSuccess)
throw CUDARuntimeError(ret);
return result;
}
template <class T, class ST>
static inline Variable<T> _gelu_helper(const Tensor<T>& a, ST blockCount, ST threadCount) {
throw NotImplementedError();
}
template <class T>
static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type
_gelu_accurate_helper(const Tensor<T>& a, size_t blockCount, size_t threadCount) {
Variable<T> result(a.shape());
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
hipLaunchKernelGGL(( _cuda_gelu_accurate), dim3(blockCount), dim3(threadCount), 0, session->getImpl()->cudaStream(),
a.data(), a.size(),
result.data());
hipError_t ret = hipGetLastError();
if (ret != hipSuccess)
throw CUDARuntimeError(ret);
return result;
}
template <class T, class ST>
static inline Variable<T> _gelu_accurate_helper(const Tensor<T>& a, ST blockCount, ST threadCount) {
throw NotImplementedError();
}
template <class T>
static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type
_erf_helper(const Tensor<T>& a, size_t blockCount, size_t threadCount) {
Variable<T> result(a.shape());
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
hipLaunchKernelGGL(( _cuda_erf), dim3(blockCount), dim3(threadCount), 0, session->getImpl()->cudaStream(),
a.data(), a.size(),
result.data());
hipError_t ret = hipGetLastError();
if (ret != hipSuccess)
throw CUDARuntimeError(ret);
return result;
}
template <class T, class ST>
static inline Variable<T> _erf_helper(const Tensor<T>& a, ST blockCount, ST threadCount) {
throw NotImplementedError();
}
template <class T>
static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type
_asin_helper(const Tensor<T>& a, size_t blockCount, size_t threadCount) {
Variable<T> result(a.shape());
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
hipLaunchKernelGGL(( _cuda_asin), dim3(blockCount), dim3(threadCount), 0, session->getImpl()->cudaStream(),
a.data(), a.size(),
result.data());
hipError_t ret = hipGetLastError();
if (ret != hipSuccess)
throw CUDARuntimeError(ret);
return result;
}
template <class T, class ST>
static inline Variable<T> _asin_helper(const Tensor<T>& a, ST blockCount, ST threadCount) {
throw NotImplementedError();
}
template <class T>
static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type
_asinh_helper(const Tensor<T>& a, size_t blockCount, size_t threadCount) {
Variable<T> result(a.shape());
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
hipLaunchKernelGGL(( _cuda_asinh), dim3(blockCount), dim3(threadCount), 0, session->getImpl()->cudaStream(),
a.data(), a.size(),
result.data());
hipError_t ret = hipGetLastError();
if (ret != hipSuccess)
throw CUDARuntimeError(ret);
return result;
}
template <class T, class ST>
static inline Variable<T> _asinh_helper(const Tensor<T>& a, ST blockCount, ST threadCount) {
throw NotImplementedError();
}
template <class T>
static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type
_acos_helper(const Tensor<T>& a, size_t blockCount, size_t threadCount) {
Variable<T> result(a.shape());
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
hipLaunchKernelGGL(( _cuda_acos), dim3(blockCount), dim3(threadCount), 0, session->getImpl()->cudaStream(),
a.data(), a.size(),
result.data());
hipError_t ret = hipGetLastError();
if (ret != hipSuccess)
throw CUDARuntimeError(ret);
return result;
}
template <class T, class ST>
static inline Variable<T> _acos_helper(const Tensor<T>& a, ST blockCount, ST threadCount) {
throw NotImplementedError();
}
template <class T>
static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type
_acosh_helper(const Tensor<T>& a, size_t blockCount, size_t threadCount) {
Variable<T> result(a.shape());
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
hipLaunchKernelGGL(( _cuda_acosh), dim3(blockCount), dim3(threadCount), 0, session->getImpl()->cudaStream(),
a.data(), a.size(),
result.data());
hipError_t ret = hipGetLastError();
if (ret != hipSuccess)
throw CUDARuntimeError(ret);
return result;
}
template <class T, class ST>
static inline Variable<T> _acosh_helper(const Tensor<T>& a, ST blockCount, ST threadCount) {
throw NotImplementedError();
}
template <class T>
static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type
_atan_helper(const Tensor<T>& a, size_t blockCount, size_t threadCount) {
Variable<T> result(a.shape());
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
hipLaunchKernelGGL(( _cuda_atan), dim3(blockCount), dim3(threadCount), 0, session->getImpl()->cudaStream(),
a.data(), a.size(),
result.data());
hipError_t ret = hipGetLastError();
if (ret != hipSuccess)
throw CUDARuntimeError(ret);
return result;
}
template <class T, class ST>
static inline Variable<T> _atan_helper(const Tensor<T>& a, ST blockCount, ST threadCount) {
throw NotImplementedError();
}
template <class T>
static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type
_atanh_helper(const Tensor<T>& a, size_t blockCount, size_t threadCount) {
Variable<T> result(a.shape());
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
hipLaunchKernelGGL(( _cuda_atanh), dim3(blockCount), dim3(threadCount), 0, session->getImpl()->cudaStream(),
a.data(), a.size(),
result.data());
hipError_t ret = hipGetLastError();
if (ret != hipSuccess)
throw CUDARuntimeError(ret);
return result;
}
template <class T, class ST>
static inline Variable<T> _atanh_helper(const Tensor<T>& a, ST blockCount, ST threadCount) {
throw NotImplementedError();
}
template <class T>
static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type
_sign_helper(const Tensor<T>& a, size_t blockCount, size_t threadCount) {
Variable<T> result(a.shape());
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
hipLaunchKernelGGL(( _cuda_sign), dim3(blockCount), dim3(threadCount), 0, session->getImpl()->cudaStream(),
a.data(), a.size(),
result.data());
hipError_t ret = hipGetLastError();
if (ret != hipSuccess)
throw CUDARuntimeError(ret);
return result;
}
template <class T, class ST>
static inline Variable<T> _sign_helper(const Tensor<T>& a, ST blockCount, ST threadCount) {
throw NotImplementedError();
}
// class function
template <class T>
CUDAActivationInterface<T>::CUDAActivationInterface(const CUDADeviceProperty& property)
: _property(property) {
}
template <class T>
CUDAActivationInterface<T>::~CUDAActivationInterface() {
}
template <class T>
Variable<T> CUDAActivationInterface<T>::sigmoid(const Tensor<T>& a) {
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size());
return _sigmoid_helper(a, blockCount, threadCount);
}
template <class T>
Variable<T> CUDAActivationInterface<T>::tanh(const Tensor<T>& a) {
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size());
return _tanh_helper(a, blockCount, threadCount);
}
template <class T>
Variable<T> CUDAActivationInterface<T>::relu(const Tensor<T>& a) {
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size());
return _relu_helper(a, blockCount, threadCount);
}
template <class T>
Variable<T> CUDAActivationInterface<T>::leakyRelu(const Tensor<T>& a, T alpha) {
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size());
return _leaky_relu_helper(a, alpha, blockCount, threadCount);
}
template <class T>
Variable<T> CUDAActivationInterface<T>::softplus(const Tensor<T>& a) {
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size());
return _softplus_helper(a, blockCount, threadCount);
}
template <class T>
Variable<T> CUDAActivationInterface<T>::log(const Tensor<T>& a) {
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size());
return _log_helper(a, blockCount, threadCount);
}
template <class T>
Variable<T> CUDAActivationInterface<T>::rsqrt(const Tensor<T>& a) {
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size());
return _rsqrt_helper(a, blockCount, threadCount);
}
template <class T>
Variable<T> CUDAActivationInterface<T>::softmax(const Tensor<T>& a, size_t axis) {
throw NotImplementedError();
}
template <>
Variable<float> CUDAActivationInterface<float>::softmax(const Tensor<float>& a, size_t axis) {
Variable<float> result(a.shape());
unsigned s1 = 1, s2 = 1, s3 = 1;
s2 = a.shape(axis);
for (size_t i = 0; i < axis; ++i)
s1 *= a.shape(i);
for (size_t i = axis + 1; i < a.shape().size(); ++i)
s3 *= a.shape(i);
cudnnDataType_t cudnnDType = CUDNN_DATA_FLOAT;
if (s3 != 1 || s2 > 512) {
_cudnn_softmax(cudnnDType, s1, s2, s3, a.data(), result.data());
return result;
}
unsigned batchSize = s1;
unsigned chunkSize = s2;
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(batchSize, chunkSize);
threadCount = (threadCount + 31) / 32 * 32;
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
if (chunkSize > threadCount) {
_cudnn_softmax(cudnnDType, s1, s2, s3, a.data(), result.data());
return result;
}
SOFTMAX_SWITCH(threadCount);
hipError_t ret = hipGetLastError();
if (ret != hipSuccess)
throw CUDARuntimeError(ret);
return result;
}
template <>
Variable<double> CUDAActivationInterface<double>::softmax(const Tensor<double>& a, size_t axis) {
Variable<double> result(a.shape());
unsigned s1 = 1, s2 = 1, s3 = 1;
s2 = a.shape(axis);
for (size_t i = 0; i < axis; ++i)
s1 *= a.shape(i);
for (size_t i = axis + 1; i < a.shape().size(); ++i)
s3 *= a.shape(i);
cudnnDataType_t cudnnDType = CUDNN_DATA_DOUBLE;
_cudnn_softmax(cudnnDType, s1, s2, s3, a.data(), result.data());
return result;
}
template <class T>
Variable<T> CUDAActivationInterface<T>::sin(const Tensor<T>& a) {
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size());
return _sin_helper(a, blockCount, threadCount);
}
template <class T>
Variable<T> CUDAActivationInterface<T>::cos(const Tensor<T>& a) {
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size());
return _cos_helper(a, blockCount, threadCount);
}
template <class T>
Variable<T> CUDAActivationInterface<T>::pow(const Tensor<T>& a, T exponent) {
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size());
return _pow_helper(a, exponent, blockCount, threadCount);
}
template <class T>
Variable<T> CUDAActivationInterface<T>::pow(const Tensor<T>& a, const Tensor<T>& exponent) {
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size());
return _pow_helper(a, exponent, blockCount, threadCount);
}
template <class T>
Variable<T> CUDAActivationInterface<T>::pow(T a, const Tensor<T>& exponent) {
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(exponent.size());
return _pow_helper(a, exponent, blockCount, threadCount);
}
template <class T>
Variable<T> CUDAActivationInterface<T>::gelu(const Tensor<T>& a) {
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size());
return _gelu_helper(a, blockCount, threadCount);
}
template <class T>
Variable<T> CUDAActivationInterface<T>::geluAccurate(const Tensor<T>& a) {
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size());
return _gelu_accurate_helper(a, blockCount, threadCount);
}
template <class T>
Variable<T> CUDAActivationInterface<T>::erf(const Tensor<T>& a) {
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size());
return _erf_helper(a, blockCount, threadCount);
}
template <class T>
Variable<T> CUDAActivationInterface<T>::asin(const Tensor<T>& a) {
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size());
return _asin_helper(a, blockCount, threadCount);
}
template <class T>
Variable<T> CUDAActivationInterface<T>::asinh(const Tensor<T>& a) {
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size());
return _asinh_helper(a, blockCount, threadCount);
}
template <class T>
Variable<T> CUDAActivationInterface<T>::acos(const Tensor<T>& a) {
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size());
return _acos_helper(a, blockCount, threadCount);
}
template <class T>
Variable<T> CUDAActivationInterface<T>::acosh(const Tensor<T>& a) {
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size());
return _acosh_helper(a, blockCount, threadCount);
}
template <class T>
Variable<T> CUDAActivationInterface<T>::atan(const Tensor<T>& a) {
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size());
return _atan_helper(a, blockCount, threadCount);
}
template <class T>
Variable<T> CUDAActivationInterface<T>::atanh(const Tensor<T>& a) {
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size());
return _atanh_helper(a, blockCount, threadCount);
}
template <class T>
Variable<T> CUDAActivationInterface<T>::sign(const Tensor<T>& a) {
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size());
return _sign_helper(a, blockCount, threadCount);
}
#define DEFINE_FUNC(type) template class CUDAActivationInterface<type>;
TFCC_FOR_ALL_TYPES(DEFINE_FUNC);
} // namespace tfcc
| 8ec383221e6761d8791146604b68ed11f4c72f1f.cu |
#include "tfcc_cudaactivationinterface.h"
#include <limits>
#include <type_traits>
#include "exceptions/tfcc_cudaruntimeerror.h"
#include "exceptions/tfcc_cudnnruntimeerror.h"
#include "exceptions/tfcc_invalidargumenterror.h"
#include "exceptions/tfcc_notimplementederror.h"
#include "framework/tfcc_cudasession.h"
#include "framework/tfcc_session.h"
#include "framework/tfcc_types.h"
#include "kernel/tfcc_cudaatomickernel.hpp"
#include "kernel/tfcc_cudareducekernel.hpp"
#include "utils/tfcc_cudnnutils.h"
namespace tfcc {
// cuda functions
template <class T>
static void __global__ _cuda_sigmoid(const T* a, unsigned total, T* b) {
const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x;
const unsigned skip = blockDim.x * gridDim.x;
for (unsigned i = tid; i < total; i += skip) {
T v = a[i];
b[i] = 1 / (1 + exp(-v));
}
}
template <class T>
static void __global__ _cuda_tanh(const T* a, unsigned total, T* b) {
const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x;
const unsigned skip = blockDim.x * gridDim.x;
for (unsigned i = tid; i < total; i += skip) {
T v = a[i];
b[i] = tanh(v);
}
}
template <class T>
static void __global__ _cuda_relu(const T* a, unsigned total, T* b) {
const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x;
const unsigned skip = blockDim.x * gridDim.x;
for (unsigned i = tid; i < total; i += skip) {
T v = a[i];
b[i] = max(v, T(0));
}
}
template <class T>
static void __global__ _cuda_leaky_relu(const T* a, unsigned total, T alpha, T* b) {
const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x;
const unsigned skip = blockDim.x * gridDim.x;
for (unsigned i = tid; i < total; i += skip) {
T v = a[i];
T v2 = alpha * v;
b[i] = max(v, v2);
}
}
template <class T>
static void __global__ _cuda_softplus(const T* a, unsigned total, T* b) {
const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x;
const unsigned skip = blockDim.x * gridDim.x;
for (unsigned i = tid; i < total; i += skip) {
T v = a[i];
b[i] = log(exp(v) + 1);
}
}
template <class T>
static void __global__ _cuda_log(const T* a, unsigned total, T* b) {
const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x;
const unsigned skip = blockDim.x * gridDim.x;
for (unsigned i = tid; i < total; i += skip) {
T v = a[i];
b[i] = log(v);
}
}
template <class T>
static void __global__ _cuda_rsqrt(const T* a, unsigned total, T* b) {
const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x;
const unsigned skip = blockDim.x * gridDim.x;
for (unsigned i = tid; i < total; i += skip) {
T v = a[i];
b[i] = rsqrt(v);
}
}
constexpr float lowest_my = std::numeric_limits<float>::lowest();
template <class T, unsigned THREAD_COUNT>
static void __global__ _cuda_softmax_v1_small(const T* a, unsigned chunk, T* b) {
a += blockIdx.x * chunk;
b += blockIdx.x * chunk;
__shared__ T maxShared;
__shared__ T exSumShared;
unsigned tid = threadIdx.x;
T input = tid < chunk ? a[tid] : lowest_my;
T maxVal = _cuda_reduce_max<T, THREAD_COUNT>(input);
if (tid == 0)
maxShared = maxVal;
__syncthreads();
maxVal = maxShared;
T ex = tid < chunk ? exp(input - maxVal) : static_cast<T>(0);
T exSum = _cuda_reduce_sum<T, THREAD_COUNT>(ex);
if (tid == 0)
exSumShared = exSum;
__syncthreads();
exSum = exSumShared;
if (tid < chunk)
b[tid] = ex / exSum;
}
#define SOFTMAX_CASE(NUM) \
case NUM: \
while (offset < batchSize) { \
_cuda_softmax_v1_small<float, NUM><<<blockCount, threadCount, 0, session->getImpl()->cudaStream()>>>( \
a.data() + chunkSize * offset, \
chunkSize, \
result.data() + chunkSize * offset); \
offset += blockCount; \
} \
break
#define SOFTMAX_CASE_2(NUM1, NUM2) \
SOFTMAX_CASE(NUM1); \
SOFTMAX_CASE(NUM2)
#define SOFTMAX_CASE_4(NUM1, NUM2, NUM3, NUM4) \
SOFTMAX_CASE_2(NUM1, NUM2); \
SOFTMAX_CASE_2(NUM3, NUM4)
#define SOFTMAX_CASE_8(NUM1, NUM2, NUM3, NUM4, NUM5, NUM6, NUM7, NUM8) \
SOFTMAX_CASE_4(NUM1, NUM2, NUM3, NUM4); \
SOFTMAX_CASE_4(NUM5, NUM6, NUM7, NUM8)
#define SOFTMAX_SWITCH(THREADCNT) \
unsigned offset = 0; \
switch (THREADCNT) { \
SOFTMAX_CASE_8(32 * 1, 32 * 2, 32 * 3, 32 * 4, 32 * 5, 32 * 6, 32 * 7, 32 * 8); \
SOFTMAX_CASE_8(32 * 9, 32 * 10, 32 * 11, 32 * 12, 32 * 13, 32 * 14, 32 * 15, 32 * 16); \
SOFTMAX_CASE_8(32 * 17, 32 * 18, 32 * 19, 32 * 20, 32 * 21, 32 * 22, 32 * 23, 32 * 24); \
SOFTMAX_CASE_8(32 * 25, 32 * 26, 32 * 27, 32 * 28, 32 * 29, 32 * 30, 32 * 31, 32 * 32); \
}
template <class T>
static void __global__ _cuda_sin(const T* a, unsigned total, T* b) {
const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x;
const unsigned skip = blockDim.x * gridDim.x;
for (unsigned i = tid; i < total; i += skip) {
auto v = a[i];
b[i] = sin(v);
}
}
template <class T>
static void __global__ _cuda_cos(const T* a, unsigned total, T* b) {
const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x;
const unsigned skip = blockDim.x * gridDim.x;
for (unsigned i = tid; i < total; i += skip) {
T v = a[i];
b[i] = cos(v);
}
}
template <class T>
static void __global__ _cuda_pow(const T* a, unsigned total, T exponent, T* b) {
const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x;
const unsigned skip = blockDim.x * gridDim.x;
for (unsigned i = tid; i < total; i += skip) {
T v = a[i];
b[i] = pow(v, exponent);
}
}
template <class T>
static void __global__ _cuda_pow_v2(const T* a, const T* exponent, T* b, unsigned total) {
const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x;
const unsigned skip = blockDim.x * gridDim.x;
for (unsigned i = tid; i < total; i += skip) {
T v = a[i];
T e = exponent[i];
b[i] = pow(v, e);
}
}
template <class T>
static void __global__ _cuda_pow_v3(const T* exponent, unsigned total, T a, T* b) {
const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x;
const unsigned skip = blockDim.x * gridDim.x;
for (unsigned i = tid; i < total; i += skip) {
T e = exponent[i];
b[i] = pow(a, e);
}
}
template <class T>
static void __global__ _cuda_gelu(const T* a, unsigned total, T* b) {
const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x;
const unsigned skip = blockDim.x * gridDim.x;
for (unsigned i = tid; i < total; i += skip) {
T v = a[i];
T tmp = static_cast<T>(0.7978845608028654) * (v + 0.044715 * v * v * v);
b[i] = v * static_cast<T>(0.5) * (static_cast<T>(1.0) + tanh(tmp));
}
}
template <class T>
static void __global__ _cuda_gelu_accurate(const T* a, unsigned total, T* b) {
const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x;
const unsigned skip = blockDim.x * gridDim.x;
for (unsigned i = tid; i < total; i += skip) {
T v = a[i];
b[i] = 0.5 * v * (1 + erf(v / static_cast<T>(1.4142135623730951)));
}
}
template <class T>
static void __global__ _cuda_erf(const T* a, unsigned total, T* b) {
const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x;
const unsigned skip = blockDim.x * gridDim.x;
for (unsigned i = tid; i < total; i += skip) {
T v = a[i];
b[i] = erf(v);
}
}
template <class T>
static void __global__ _cuda_asin(const T* a, unsigned total, T* b) {
const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x;
const unsigned skip = blockDim.x * gridDim.x;
for (unsigned i = tid; i < total; i += skip) {
T v = a[i];
b[i] = asinf(v);
}
}
template <class T>
static void __global__ _cuda_asinh(const T* a, unsigned total, T* b) {
const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x;
const unsigned skip = blockDim.x * gridDim.x;
for (unsigned i = tid; i < total; i += skip) {
T v = a[i];
b[i] = asinhf(v);
}
}
template <class T>
static void __global__ _cuda_acos(const T* a, unsigned total, T* b) {
const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x;
const unsigned skip = blockDim.x * gridDim.x;
for (unsigned i = tid; i < total; i += skip) {
T v = a[i];
b[i] = acosf(v);
}
}
template <class T>
static void __global__ _cuda_acosh(const T* a, unsigned total, T* b) {
const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x;
const unsigned skip = blockDim.x * gridDim.x;
for (unsigned i = tid; i < total; i += skip) {
T v = a[i];
b[i] = acoshf(v);
}
}
template <class T>
static void __global__ _cuda_atan(const T* a, unsigned total, T* b) {
const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x;
const unsigned skip = blockDim.x * gridDim.x;
for (unsigned i = tid; i < total; i += skip) {
T v = a[i];
b[i] = atanf(v);
}
}
template <class T>
static void __global__ _cuda_atanh(const T* a, unsigned total, T* b) {
const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x;
const unsigned skip = blockDim.x * gridDim.x;
for (unsigned i = tid; i < total; i += skip) {
T v = a[i];
b[i] = atanhf(v);
}
}
template <class T>
static void __global__ _cuda_sign(const T* a, unsigned total, T* b) {
const unsigned tid = threadIdx.x + blockDim.x * blockIdx.x;
const unsigned skip = blockDim.x * gridDim.x;
for (unsigned i = tid; i < total; i += skip) {
T v = a[i];
if (v < 0)
b[i] = static_cast<T>(-1);
else if (v > 0)
b[i] = static_cast<T>(1);
else
b[i] = 0;
}
}
// helper functions
template <class T>
static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type
_sigmoid_helper(const Tensor<T>& a, size_t blockCount, size_t threadCount) {
Variable<T> result(a.shape());
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
_cuda_sigmoid<<<blockCount, threadCount, 0, session->getImpl()->cudaStream()>>>(
a.data(), a.size(),
result.data());
cudaError_t ret = cudaGetLastError();
if (ret != cudaSuccess)
throw CUDARuntimeError(ret);
return result;
}
template <class T, class ST>
static inline Variable<T> _sigmoid_helper(const Tensor<T>& a, ST blockCount, ST threadCount) {
throw NotImplementedError();
}
template <class T>
static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type
_tanh_helper(const Tensor<T>& a, size_t blockCount, size_t threadCount) {
Variable<T> result(a.shape());
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
_cuda_tanh<<<blockCount, threadCount, 0, session->getImpl()->cudaStream()>>>(
a.data(), a.size(),
result.data());
cudaError_t ret = cudaGetLastError();
if (ret != cudaSuccess)
throw CUDARuntimeError(ret);
return result;
}
template <class T, class ST>
static inline Variable<T> _tanh_helper(const Tensor<T>& a, ST blockCount, ST threadCount) {
throw NotImplementedError();
}
template <class T>
static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type
_relu_helper(const Tensor<T>& a, size_t blockCount, size_t threadCount) {
Variable<T> result(a.shape());
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
_cuda_relu<<<blockCount, threadCount, 0, session->getImpl()->cudaStream()>>>(
a.data(), a.size(),
result.data());
cudaError_t ret = cudaGetLastError();
if (ret != cudaSuccess)
throw CUDARuntimeError(ret);
return result;
}
template <class T, class ST>
static inline Variable<T> _relu_helper(const Tensor<T>& a, ST blockCount, ST threadCount) {
throw NotImplementedError();
}
template <class T>
static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type
_leaky_relu_helper(const Tensor<T>& a, T alpha, size_t blockCount, size_t threadCount) {
Variable<T> result(a.shape());
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
_cuda_leaky_relu<<<blockCount, threadCount, 0, session->getImpl()->cudaStream()>>>(
a.data(), a.size(),
alpha,
result.data());
cudaError_t ret = cudaGetLastError();
if (ret != cudaSuccess)
throw CUDARuntimeError(ret);
return result;
}
template <class T, class ST>
static inline Variable<T> _leaky_relu_helper(const Tensor<T>& a, T alpha, ST blockCount, ST threadCount) {
throw NotImplementedError();
}
template <class T>
static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type
_softplus_helper(const Tensor<T>& a, size_t blockCount, size_t threadCount) {
Variable<T> result(a.shape());
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
_cuda_softplus<<<blockCount, threadCount, 0, session->getImpl()->cudaStream()>>>(
a.data(), a.size(),
result.data());
cudaError_t ret = cudaGetLastError();
if (ret != cudaSuccess)
throw CUDARuntimeError(ret);
return result;
}
template <class T, class ST>
static inline Variable<T> _softplus_helper(const Tensor<T>& a, ST blockCount, ST threadCount) {
throw NotImplementedError();
}
template <class T>
static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type
_log_helper(const Tensor<T>& a, size_t blockCount, size_t threadCount) {
Variable<T> result(a.shape());
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
_cuda_log<<<blockCount, threadCount, 0, session->getImpl()->cudaStream()>>>(
a.data(), a.size(),
result.data());
cudaError_t ret = cudaGetLastError();
if (ret != cudaSuccess)
throw CUDARuntimeError(ret);
return result;
}
template <class T, class ST>
static inline Variable<T> _log_helper(const Tensor<T>& a, ST blockCount, ST threadCount) {
throw NotImplementedError();
}
template <class T>
static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type
_rsqrt_helper(const Tensor<T>& a, size_t blockCount, size_t threadCount) {
Variable<T> result(a.shape());
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
_cuda_rsqrt<<<blockCount, threadCount, 0, session->getImpl()->cudaStream()>>>(
a.data(), a.size(),
result.data());
cudaError_t ret = cudaGetLastError();
if (ret != cudaSuccess)
throw CUDARuntimeError(ret);
return result;
}
template <class T, class ST>
static inline Variable<T> _rsqrt_helper(const Tensor<T>& a, ST blockCount, ST threadCount) {
throw NotImplementedError();
}
template <class T>
static inline void _cudnn_softmax(cudnnDataType_t cudnnType, unsigned s1, unsigned s2, unsigned s3, const T* a, T* b) {
cudnnTensorDescriptor_t aTensor;
cudnnStatus_t ret = cudnnCreateTensorDescriptor(&aTensor);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
CudnnTensorDescriptorGuard aTensorGuard(&aTensor);
cudnnSoftmaxMode_t softmaxModel;
if (s3 == 1) {
ret = cudnnSetTensor4dDescriptor(aTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, s1, 1, 1, s2);
softmaxModel = CUDNN_SOFTMAX_MODE_INSTANCE;
} else {
ret = cudnnSetTensor4dDescriptor(aTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, s1, s2, s3, 1);
softmaxModel = CUDNN_SOFTMAX_MODE_CHANNEL;
}
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
T alpha = 1.0, beta = 0.0;
ret = cudnnSoftmaxForward(
session->getImpl()->cudnnHandle(),
CUDNN_SOFTMAX_ACCURATE,
softmaxModel,
&alpha,
aTensor, a,
&beta,
aTensor, b);
if (ret != CUDNN_STATUS_SUCCESS)
throw CUDNNRuntimeError(ret);
}
template <class T>
static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type
_sin_helper(const Tensor<T>& a, size_t blockCount, size_t threadCount) {
Variable<T> result(a.shape());
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
_cuda_sin<<<blockCount, threadCount, 0, session->getImpl()->cudaStream()>>>(
a.data(), a.size(),
result.data());
cudaError_t ret = cudaGetLastError();
if (ret != cudaSuccess)
throw CUDARuntimeError(ret);
return result;
}
template <class T, class ST>
static inline Variable<T> _sin_helper(const Tensor<T>& a, ST blockCount, ST threadCount) {
throw NotImplementedError();
}
template <class T>
static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type
_cos_helper(const Tensor<T>& a, size_t blockCount, size_t threadCount) {
Variable<T> result(a.shape());
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
_cuda_cos<<<blockCount, threadCount, 0, session->getImpl()->cudaStream()>>>(
a.data(), a.size(),
result.data());
cudaError_t ret = cudaGetLastError();
if (ret != cudaSuccess)
throw CUDARuntimeError(ret);
return result;
}
template <class T, class ST>
static inline Variable<T> _cos_helper(const Tensor<T>& a, ST blockCount, ST threadCount) {
throw NotImplementedError();
}
template <class T>
static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type
_pow_helper(const Tensor<T>& a, T exponent, size_t blockCount, size_t threadCount) {
Variable<T> result(a.shape());
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
_cuda_pow<<<blockCount, threadCount, 0, session->getImpl()->cudaStream()>>>(
a.data(), a.size(),
exponent,
result.data());
cudaError_t ret = cudaGetLastError();
if (ret != cudaSuccess)
throw CUDARuntimeError(ret);
return result;
}
template <class T, class ST>
static inline Variable<T> _pow_helper(const Tensor<T>& a, T exponent, ST blockCount, ST threadCount) {
throw NotImplementedError();
}
template <class T>
static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type
_pow_helper(const Tensor<T>& a, const Tensor<T>& exponent, size_t blockCount, size_t threadCount) {
Variable<T> result(a.shape());
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
_cuda_pow_v2<<<blockCount, threadCount, 0, session->getImpl()->cudaStream()>>>(
a.data(), exponent.data(),
result.data(),
result.size());
cudaError_t ret = cudaGetLastError();
if (ret != cudaSuccess)
throw CUDARuntimeError(ret);
return result;
}
template <class T, class ST>
static inline Variable<T> _pow_helper(const Tensor<T>& a, const Tensor<T>& exponent, ST blockCount, ST threadCount) {
throw NotImplementedError();
}
template <class T>
static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type
_pow_helper(T a, const Tensor<T>& exponent, size_t blockCount, size_t threadCount) {
Variable<T> result(exponent.shape());
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
_cuda_pow_v3<<<blockCount, threadCount, 0, session->getImpl()->cudaStream()>>>(
exponent.data(), exponent.size(),
a,
result.data());
cudaError_t ret = cudaGetLastError();
if (ret != cudaSuccess)
throw CUDARuntimeError(ret);
return result;
}
template <class T, class ST>
static inline Variable<T> _pow_helper(T a, const Tensor<T>& exponent, ST blockCount, ST threadCount) {
throw NotImplementedError();
}
template <class T>
static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type
_gelu_helper(const Tensor<T>& a, size_t blockCount, size_t threadCount) {
Variable<T> result(a.shape());
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
_cuda_gelu<<<blockCount, threadCount, 0, session->getImpl()->cudaStream()>>>(
a.data(), a.size(),
result.data());
cudaError_t ret = cudaGetLastError();
if (ret != cudaSuccess)
throw CUDARuntimeError(ret);
return result;
}
template <class T, class ST>
static inline Variable<T> _gelu_helper(const Tensor<T>& a, ST blockCount, ST threadCount) {
throw NotImplementedError();
}
template <class T>
static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type
_gelu_accurate_helper(const Tensor<T>& a, size_t blockCount, size_t threadCount) {
Variable<T> result(a.shape());
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
_cuda_gelu_accurate<<<blockCount, threadCount, 0, session->getImpl()->cudaStream()>>>(
a.data(), a.size(),
result.data());
cudaError_t ret = cudaGetLastError();
if (ret != cudaSuccess)
throw CUDARuntimeError(ret);
return result;
}
template <class T, class ST>
static inline Variable<T> _gelu_accurate_helper(const Tensor<T>& a, ST blockCount, ST threadCount) {
throw NotImplementedError();
}
template <class T>
static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type
_erf_helper(const Tensor<T>& a, size_t blockCount, size_t threadCount) {
Variable<T> result(a.shape());
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
_cuda_erf<<<blockCount, threadCount, 0, session->getImpl()->cudaStream()>>>(
a.data(), a.size(),
result.data());
cudaError_t ret = cudaGetLastError();
if (ret != cudaSuccess)
throw CUDARuntimeError(ret);
return result;
}
template <class T, class ST>
static inline Variable<T> _erf_helper(const Tensor<T>& a, ST blockCount, ST threadCount) {
throw NotImplementedError();
}
template <class T>
static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type
_asin_helper(const Tensor<T>& a, size_t blockCount, size_t threadCount) {
Variable<T> result(a.shape());
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
_cuda_asin<<<blockCount, threadCount, 0, session->getImpl()->cudaStream()>>>(
a.data(), a.size(),
result.data());
cudaError_t ret = cudaGetLastError();
if (ret != cudaSuccess)
throw CUDARuntimeError(ret);
return result;
}
template <class T, class ST>
static inline Variable<T> _asin_helper(const Tensor<T>& a, ST blockCount, ST threadCount) {
throw NotImplementedError();
}
template <class T>
static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type
_asinh_helper(const Tensor<T>& a, size_t blockCount, size_t threadCount) {
Variable<T> result(a.shape());
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
_cuda_asinh<<<blockCount, threadCount, 0, session->getImpl()->cudaStream()>>>(
a.data(), a.size(),
result.data());
cudaError_t ret = cudaGetLastError();
if (ret != cudaSuccess)
throw CUDARuntimeError(ret);
return result;
}
template <class T, class ST>
static inline Variable<T> _asinh_helper(const Tensor<T>& a, ST blockCount, ST threadCount) {
throw NotImplementedError();
}
template <class T>
static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type
_acos_helper(const Tensor<T>& a, size_t blockCount, size_t threadCount) {
Variable<T> result(a.shape());
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
_cuda_acos<<<blockCount, threadCount, 0, session->getImpl()->cudaStream()>>>(
a.data(), a.size(),
result.data());
cudaError_t ret = cudaGetLastError();
if (ret != cudaSuccess)
throw CUDARuntimeError(ret);
return result;
}
template <class T, class ST>
static inline Variable<T> _acos_helper(const Tensor<T>& a, ST blockCount, ST threadCount) {
throw NotImplementedError();
}
template <class T>
static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type
_acosh_helper(const Tensor<T>& a, size_t blockCount, size_t threadCount) {
Variable<T> result(a.shape());
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
_cuda_acosh<<<blockCount, threadCount, 0, session->getImpl()->cudaStream()>>>(
a.data(), a.size(),
result.data());
cudaError_t ret = cudaGetLastError();
if (ret != cudaSuccess)
throw CUDARuntimeError(ret);
return result;
}
template <class T, class ST>
static inline Variable<T> _acosh_helper(const Tensor<T>& a, ST blockCount, ST threadCount) {
throw NotImplementedError();
}
template <class T>
static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type
_atan_helper(const Tensor<T>& a, size_t blockCount, size_t threadCount) {
Variable<T> result(a.shape());
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
_cuda_atan<<<blockCount, threadCount, 0, session->getImpl()->cudaStream()>>>(
a.data(), a.size(),
result.data());
cudaError_t ret = cudaGetLastError();
if (ret != cudaSuccess)
throw CUDARuntimeError(ret);
return result;
}
template <class T, class ST>
static inline Variable<T> _atan_helper(const Tensor<T>& a, ST blockCount, ST threadCount) {
throw NotImplementedError();
}
template <class T>
static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type
_atanh_helper(const Tensor<T>& a, size_t blockCount, size_t threadCount) {
Variable<T> result(a.shape());
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
_cuda_atanh<<<blockCount, threadCount, 0, session->getImpl()->cudaStream()>>>(
a.data(), a.size(),
result.data());
cudaError_t ret = cudaGetLastError();
if (ret != cudaSuccess)
throw CUDARuntimeError(ret);
return result;
}
template <class T, class ST>
static inline Variable<T> _atanh_helper(const Tensor<T>& a, ST blockCount, ST threadCount) {
throw NotImplementedError();
}
template <class T>
static inline typename std::enable_if<std::is_same<float, T>::value || std::is_same<double, T>::value, Variable<T>>::type
_sign_helper(const Tensor<T>& a, size_t blockCount, size_t threadCount) {
Variable<T> result(a.shape());
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
_cuda_sign<<<blockCount, threadCount, 0, session->getImpl()->cudaStream()>>>(
a.data(), a.size(),
result.data());
cudaError_t ret = cudaGetLastError();
if (ret != cudaSuccess)
throw CUDARuntimeError(ret);
return result;
}
template <class T, class ST>
static inline Variable<T> _sign_helper(const Tensor<T>& a, ST blockCount, ST threadCount) {
throw NotImplementedError();
}
// class function
template <class T>
CUDAActivationInterface<T>::CUDAActivationInterface(const CUDADeviceProperty& property)
: _property(property) {
}
template <class T>
CUDAActivationInterface<T>::~CUDAActivationInterface() {
}
template <class T>
Variable<T> CUDAActivationInterface<T>::sigmoid(const Tensor<T>& a) {
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size());
return _sigmoid_helper(a, blockCount, threadCount);
}
template <class T>
Variable<T> CUDAActivationInterface<T>::tanh(const Tensor<T>& a) {
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size());
return _tanh_helper(a, blockCount, threadCount);
}
template <class T>
Variable<T> CUDAActivationInterface<T>::relu(const Tensor<T>& a) {
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size());
return _relu_helper(a, blockCount, threadCount);
}
template <class T>
Variable<T> CUDAActivationInterface<T>::leakyRelu(const Tensor<T>& a, T alpha) {
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size());
return _leaky_relu_helper(a, alpha, blockCount, threadCount);
}
template <class T>
Variable<T> CUDAActivationInterface<T>::softplus(const Tensor<T>& a) {
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size());
return _softplus_helper(a, blockCount, threadCount);
}
template <class T>
Variable<T> CUDAActivationInterface<T>::log(const Tensor<T>& a) {
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size());
return _log_helper(a, blockCount, threadCount);
}
template <class T>
Variable<T> CUDAActivationInterface<T>::rsqrt(const Tensor<T>& a) {
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size());
return _rsqrt_helper(a, blockCount, threadCount);
}
template <class T>
Variable<T> CUDAActivationInterface<T>::softmax(const Tensor<T>& a, size_t axis) {
throw NotImplementedError();
}
template <>
Variable<float> CUDAActivationInterface<float>::softmax(const Tensor<float>& a, size_t axis) {
Variable<float> result(a.shape());
unsigned s1 = 1, s2 = 1, s3 = 1;
s2 = a.shape(axis);
for (size_t i = 0; i < axis; ++i)
s1 *= a.shape(i);
for (size_t i = axis + 1; i < a.shape().size(); ++i)
s3 *= a.shape(i);
cudnnDataType_t cudnnDType = CUDNN_DATA_FLOAT;
if (s3 != 1 || s2 > 512) {
_cudnn_softmax(cudnnDType, s1, s2, s3, a.data(), result.data());
return result;
}
unsigned batchSize = s1;
unsigned chunkSize = s2;
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(batchSize, chunkSize);
threadCount = (threadCount + 31) / 32 * 32;
tfcc::CUDASession* session = static_cast<tfcc::CUDASession*>(Session::getThreadDefault());
if (chunkSize > threadCount) {
_cudnn_softmax(cudnnDType, s1, s2, s3, a.data(), result.data());
return result;
}
SOFTMAX_SWITCH(threadCount);
cudaError_t ret = cudaGetLastError();
if (ret != cudaSuccess)
throw CUDARuntimeError(ret);
return result;
}
template <>
Variable<double> CUDAActivationInterface<double>::softmax(const Tensor<double>& a, size_t axis) {
Variable<double> result(a.shape());
unsigned s1 = 1, s2 = 1, s3 = 1;
s2 = a.shape(axis);
for (size_t i = 0; i < axis; ++i)
s1 *= a.shape(i);
for (size_t i = axis + 1; i < a.shape().size(); ++i)
s3 *= a.shape(i);
cudnnDataType_t cudnnDType = CUDNN_DATA_DOUBLE;
_cudnn_softmax(cudnnDType, s1, s2, s3, a.data(), result.data());
return result;
}
template <class T>
Variable<T> CUDAActivationInterface<T>::sin(const Tensor<T>& a) {
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size());
return _sin_helper(a, blockCount, threadCount);
}
template <class T>
Variable<T> CUDAActivationInterface<T>::cos(const Tensor<T>& a) {
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size());
return _cos_helper(a, blockCount, threadCount);
}
template <class T>
Variable<T> CUDAActivationInterface<T>::pow(const Tensor<T>& a, T exponent) {
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size());
return _pow_helper(a, exponent, blockCount, threadCount);
}
template <class T>
Variable<T> CUDAActivationInterface<T>::pow(const Tensor<T>& a, const Tensor<T>& exponent) {
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size());
return _pow_helper(a, exponent, blockCount, threadCount);
}
template <class T>
Variable<T> CUDAActivationInterface<T>::pow(T a, const Tensor<T>& exponent) {
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(exponent.size());
return _pow_helper(a, exponent, blockCount, threadCount);
}
template <class T>
Variable<T> CUDAActivationInterface<T>::gelu(const Tensor<T>& a) {
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size());
return _gelu_helper(a, blockCount, threadCount);
}
template <class T>
Variable<T> CUDAActivationInterface<T>::geluAccurate(const Tensor<T>& a) {
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size());
return _gelu_accurate_helper(a, blockCount, threadCount);
}
template <class T>
Variable<T> CUDAActivationInterface<T>::erf(const Tensor<T>& a) {
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size());
return _erf_helper(a, blockCount, threadCount);
}
template <class T>
Variable<T> CUDAActivationInterface<T>::asin(const Tensor<T>& a) {
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size());
return _asin_helper(a, blockCount, threadCount);
}
template <class T>
Variable<T> CUDAActivationInterface<T>::asinh(const Tensor<T>& a) {
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size());
return _asinh_helper(a, blockCount, threadCount);
}
template <class T>
Variable<T> CUDAActivationInterface<T>::acos(const Tensor<T>& a) {
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size());
return _acos_helper(a, blockCount, threadCount);
}
template <class T>
Variable<T> CUDAActivationInterface<T>::acosh(const Tensor<T>& a) {
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size());
return _acosh_helper(a, blockCount, threadCount);
}
template <class T>
Variable<T> CUDAActivationInterface<T>::atan(const Tensor<T>& a) {
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size());
return _atan_helper(a, blockCount, threadCount);
}
template <class T>
Variable<T> CUDAActivationInterface<T>::atanh(const Tensor<T>& a) {
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size());
return _atanh_helper(a, blockCount, threadCount);
}
template <class T>
Variable<T> CUDAActivationInterface<T>::sign(const Tensor<T>& a) {
size_t blockCount, threadCount;
std::tie(blockCount, threadCount) = _property.getSuitableKernelSize(a.size());
return _sign_helper(a, blockCount, threadCount);
}
#define DEFINE_FUNC(type) template class CUDAActivationInterface<type>;
TFCC_FOR_ALL_TYPES(DEFINE_FUNC);
} // namespace tfcc
|
fd8b6138e7a17efe5fb5ad1c788e42f522f5deaf.hip | // !!! This is a file automatically generated by hipify!!!
#include <math.h>
#include <stdio.h>
#include <vector>
#include <limits.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <driver_functions.h>
#include "photomosaic.h"
struct GlobalConstants {
int numImages;
int numSlices;
int cutSize;
int* imageAverages;
int* allAverages;
int* imageIndex;
};
__constant__ GlobalConstants cuConstMosaicParams;
__device__ __inline__ int square(int x) {
return x * x;
}
__device__ __inline__ int RGBdistance(int3 rgb1, int3 rgb2) {
int red = square(rgb1.x - rgb2.x);
int green = square(rgb1.y - rgb2.y);
int blue = square(rgb1.z - rgb2.z);
return (int)sqrt((float)(red + green + blue));
}
__global__ void kernelMatchImages() {
int numImages = cuConstMosaicParams.numImages;
int numSlices = cuConstMosaicParams.numSlices;
int cutSize = cuConstMosaicParams.cutSize;
int cSizeSquared = square(cutSize);
int width = numSlices * cutSize;
int index = blockIdx.x * blockDim.x + (blockDim.y * blockIdx.y * width);
index += (threadIdx.x + (width * threadIdx.y));
int imageAverageStart = index * cSizeSquared * 3;
int dist;
int minIndex = 0;
int minVal = INT_MAX;
for (int i = 0; i < (numImages * square(cutSize) * 3); i += (cSizeSquared * 3)) {
dist = 0;
for (int j = 0; j < (cSizeSquared * 3); j += 3) {
int3 rgb1 = *(int3*)(&cuConstMosaicParams.imageAverages[imageAverageStart + j]);
int3 rgb2 = *(int3*)(&cuConstMosaicParams.allAverages[i + j]);
dist += RGBdistance(rgb1, rgb2);
}
if (dist < minVal) {
minVal = dist;
minIndex = (i / (cSizeSquared * 3));
}
}
__syncthreads();
cuConstMosaicParams.imageIndex[index] = minIndex;
__syncthreads();
}
CudaMosaic::CudaMosaic() {
printf("Constructor\n");
cudaDeviceImageAverages = NULL;
cudaDeviceAllAverages = NULL;
cudaDeviceImageIndex = NULL;
imageAverages = NULL;
allAverages = NULL;
imageIndex = NULL;
}
CudaMosaic::~CudaMosaic() {
printf("Destructing!\n");
if (imageAverages) {
delete [] imageAverages;
delete [] allAverages;
delete [] imageIndex;
}
if (cudaDeviceImageAverages) {
hipFree(cudaDeviceImageAverages);
hipFree(cudaDeviceAllAverages);
hipFree(cudaDeviceImageIndex);
}
delete [] imageAverages;
delete [] allAverages;
delete [] imageIndex;
}
const int* CudaMosaic::getIndices() {
printf("Copying index data from device\n");
hipMemcpy(imageIndex,
cudaDeviceImageIndex,
sizeof(int) * numSlices * numSlices,
hipMemcpyDeviceToHost);
hipDeviceReset();
return imageIndex;
}
void CudaMosaic::setup(int ni, int ns, int cs, int* imgavg, int* allavg) {
printf("Calling setup with numImages %d, numSlices %d, cutSize %d\n", ni, ns, cs);
numImages = ni;
numSlices = ns;
cutSize = cs;
imageAverages = new int[numSlices * numSlices * cutSize * cutSize * 3];
allAverages = new int[numImages * cutSize * cutSize * 3];
imageIndex = new int[numSlices * numSlices];
int i;
for (i = 0; i < (numSlices * numSlices * cutSize * cutSize * 3); i++) {
imageAverages[i] = imgavg[i];
}
for (i = 0; i < (numImages * cutSize * cutSize * 3); i++) {
allAverages[i] = allavg[i];
}
int deviceCount = 0;
hipError_t err = hipGetDeviceCount(&deviceCount);
printf("Initializing CUDA for CudaMosaic\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
hipDeviceProp_t deviceProps;
hipGetDeviceProperties(&deviceProps, i);
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
hipMalloc(&cudaDeviceImageAverages, sizeof(int) * cutSize * cutSize * 3 * (numSlices * numSlices));
hipMalloc(&cudaDeviceAllAverages, sizeof(int) * cutSize * cutSize * 3 * numImages);
hipMalloc(&cudaDeviceImageIndex, sizeof(int) * numSlices * numSlices);
hipMemcpy(cudaDeviceImageAverages, imageAverages, sizeof(int) * cutSize * cutSize * 3 * (numSlices * numSlices), hipMemcpyHostToDevice);
hipMemcpy(cudaDeviceAllAverages, allAverages, sizeof(int) * cutSize * cutSize * 3 * numImages, hipMemcpyHostToDevice);
// hipMemcpy(cudaDeviceImageIndex, imageIndex, sizeof(int) * numImages, hipMemcpyHostToDevice);
printf("Successfully transferred to device\n");
GlobalConstants params;
params.numImages = numImages;
params.numSlices = numSlices;
params.cutSize = cutSize;
params.imageAverages = cudaDeviceImageAverages;
params.allAverages = cudaDeviceAllAverages;
params.imageIndex = cudaDeviceImageIndex;
hipMemcpyToSymbol(cuConstMosaicParams, ¶ms, sizeof(GlobalConstants));
}
void CudaMosaic::imageMatch() {
dim3 threadsPerBock(cutSize, cutSize, 1);
dim3 numBlocks(numSlices, numSlices, 1);
printf("About to launch kernel block size %d num blocks %d\n", threadsPerBock.x, numBlocks.x);
hipLaunchKernelGGL(( kernelMatchImages), dim3(numBlocks), dim3(threadsPerBock), 0, 0, );
hipError_t err = hipGetLastError();
if ( hipSuccess != err )
{
printf( "cudaCheckError() failed at %s", hipGetErrorString( err ) );
exit( -1 );
}
hipDeviceSynchronize();
}
| fd8b6138e7a17efe5fb5ad1c788e42f522f5deaf.cu | #include <math.h>
#include <stdio.h>
#include <vector>
#include <limits.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <driver_functions.h>
#include "photomosaic.h"
struct GlobalConstants {
int numImages;
int numSlices;
int cutSize;
int* imageAverages;
int* allAverages;
int* imageIndex;
};
__constant__ GlobalConstants cuConstMosaicParams;
__device__ __inline__ int square(int x) {
return x * x;
}
__device__ __inline__ int RGBdistance(int3 rgb1, int3 rgb2) {
int red = square(rgb1.x - rgb2.x);
int green = square(rgb1.y - rgb2.y);
int blue = square(rgb1.z - rgb2.z);
return (int)sqrt((float)(red + green + blue));
}
__global__ void kernelMatchImages() {
int numImages = cuConstMosaicParams.numImages;
int numSlices = cuConstMosaicParams.numSlices;
int cutSize = cuConstMosaicParams.cutSize;
int cSizeSquared = square(cutSize);
int width = numSlices * cutSize;
int index = blockIdx.x * blockDim.x + (blockDim.y * blockIdx.y * width);
index += (threadIdx.x + (width * threadIdx.y));
int imageAverageStart = index * cSizeSquared * 3;
int dist;
int minIndex = 0;
int minVal = INT_MAX;
for (int i = 0; i < (numImages * square(cutSize) * 3); i += (cSizeSquared * 3)) {
dist = 0;
for (int j = 0; j < (cSizeSquared * 3); j += 3) {
int3 rgb1 = *(int3*)(&cuConstMosaicParams.imageAverages[imageAverageStart + j]);
int3 rgb2 = *(int3*)(&cuConstMosaicParams.allAverages[i + j]);
dist += RGBdistance(rgb1, rgb2);
}
if (dist < minVal) {
minVal = dist;
minIndex = (i / (cSizeSquared * 3));
}
}
__syncthreads();
cuConstMosaicParams.imageIndex[index] = minIndex;
__syncthreads();
}
CudaMosaic::CudaMosaic() {
printf("Constructor\n");
cudaDeviceImageAverages = NULL;
cudaDeviceAllAverages = NULL;
cudaDeviceImageIndex = NULL;
imageAverages = NULL;
allAverages = NULL;
imageIndex = NULL;
}
CudaMosaic::~CudaMosaic() {
printf("Destructing!\n");
if (imageAverages) {
delete [] imageAverages;
delete [] allAverages;
delete [] imageIndex;
}
if (cudaDeviceImageAverages) {
cudaFree(cudaDeviceImageAverages);
cudaFree(cudaDeviceAllAverages);
cudaFree(cudaDeviceImageIndex);
}
delete [] imageAverages;
delete [] allAverages;
delete [] imageIndex;
}
const int* CudaMosaic::getIndices() {
printf("Copying index data from device\n");
cudaMemcpy(imageIndex,
cudaDeviceImageIndex,
sizeof(int) * numSlices * numSlices,
cudaMemcpyDeviceToHost);
cudaDeviceReset();
return imageIndex;
}
void CudaMosaic::setup(int ni, int ns, int cs, int* imgavg, int* allavg) {
printf("Calling setup with numImages %d, numSlices %d, cutSize %d\n", ni, ns, cs);
numImages = ni;
numSlices = ns;
cutSize = cs;
imageAverages = new int[numSlices * numSlices * cutSize * cutSize * 3];
allAverages = new int[numImages * cutSize * cutSize * 3];
imageIndex = new int[numSlices * numSlices];
int i;
for (i = 0; i < (numSlices * numSlices * cutSize * cutSize * 3); i++) {
imageAverages[i] = imgavg[i];
}
for (i = 0; i < (numImages * cutSize * cutSize * 3); i++) {
allAverages[i] = allavg[i];
}
int deviceCount = 0;
cudaError_t err = cudaGetDeviceCount(&deviceCount);
printf("Initializing CUDA for CudaMosaic\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++) {
cudaDeviceProp deviceProps;
cudaGetDeviceProperties(&deviceProps, i);
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
cudaMalloc(&cudaDeviceImageAverages, sizeof(int) * cutSize * cutSize * 3 * (numSlices * numSlices));
cudaMalloc(&cudaDeviceAllAverages, sizeof(int) * cutSize * cutSize * 3 * numImages);
cudaMalloc(&cudaDeviceImageIndex, sizeof(int) * numSlices * numSlices);
cudaMemcpy(cudaDeviceImageAverages, imageAverages, sizeof(int) * cutSize * cutSize * 3 * (numSlices * numSlices), cudaMemcpyHostToDevice);
cudaMemcpy(cudaDeviceAllAverages, allAverages, sizeof(int) * cutSize * cutSize * 3 * numImages, cudaMemcpyHostToDevice);
// cudaMemcpy(cudaDeviceImageIndex, imageIndex, sizeof(int) * numImages, cudaMemcpyHostToDevice);
printf("Successfully transferred to device\n");
GlobalConstants params;
params.numImages = numImages;
params.numSlices = numSlices;
params.cutSize = cutSize;
params.imageAverages = cudaDeviceImageAverages;
params.allAverages = cudaDeviceAllAverages;
params.imageIndex = cudaDeviceImageIndex;
cudaMemcpyToSymbol(cuConstMosaicParams, ¶ms, sizeof(GlobalConstants));
}
void CudaMosaic::imageMatch() {
dim3 threadsPerBock(cutSize, cutSize, 1);
dim3 numBlocks(numSlices, numSlices, 1);
printf("About to launch kernel block size %d num blocks %d\n", threadsPerBock.x, numBlocks.x);
kernelMatchImages<<<numBlocks, threadsPerBock>>>();
cudaError err = cudaGetLastError();
if ( cudaSuccess != err )
{
printf( "cudaCheckError() failed at %s", cudaGetErrorString( err ) );
exit( -1 );
}
cudaThreadSynchronize();
}
|
8ac16d683df80e24139ecd84534fe64e04357bdc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "nn_kernels.h"
__device__ float sigmoid(float x)
{
return 1 / (1 + expf(-x));
}
__global__ void fill_ones_kernel(float *arr, int n)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n)
arr[idx] = 1.0f;
}
__global__ void active_kernel(float *in, float *out, int n)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n)
out[idx] = sigmoid(in[idx]);
}
__global__ void active_prime_kernel(float *in, float *out, int n)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n)
{
float t = sigmoid(in[idx]);
out[idx] = t * (1 - t);
}
}
//c = a - b
__global__ void subtract_kernel(float *a, float *b, float *c, int n)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n)
c[idx] = a[idx] - b[idx];
}
//c = a .* b
__global__ void mul_kernel(float *a, float *b, float *c, int n)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n)
c[idx] = a[idx] * b[idx];
}
__global__ void kl_prime_kernel(float *in, float *out, float rho, int n)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n)
{
float rho_hat = in[idx];
out[idx] = -rho / rho_hat + (1 - rho) / (1 - rho_hat);
}
}
__global__ void kl_kernel(float *in, float *out, float rho, int n)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n)
{
float rho_hat = in[idx];
out[idx] = rho * logf(rho / rho_hat) + (1 - rho) * logf( (1 - rho) / (1-rho_hat) );
}
}
void fill_ones(float *d_arr, int n)
{
int block_size = 1024;
int grid_size = (n + block_size - 1) / block_size;
hipLaunchKernelGGL(( fill_ones_kernel), dim3(grid_size), dim3(block_size), 0, 0, d_arr, n);
checkCudaErrors(hipGetLastError());
}
void active(float *d_in, float *d_out, int n_rows, int n_cols)
{
int n = n_rows * n_cols;
int block_size = 1024;
int grid_size = (n + block_size - 1) / block_size;
hipLaunchKernelGGL(( active_kernel), dim3(grid_size), dim3(block_size), 0, 0, d_in, d_out, n);
checkCudaErrors(hipGetLastError());
}
void active_prime(float *d_in, float *d_out, int n_rows, int n_cols)
{
int n = n_rows * n_cols;
int block_size = 1024;
int grid_size = (n + block_size - 1) / block_size;
hipLaunchKernelGGL(( active_prime_kernel), dim3(grid_size), dim3(block_size), 0, 0, d_in, d_out, n);
checkCudaErrors(hipGetLastError());
}
void square_loss_prime(float *d_Ypred, float *d_Ytrue, float *d_err, int n_rows,
int n_cols)
{
int n = n_rows * n_cols;
int block_size = 1024;
int grid_size = (n + block_size - 1) / block_size;
//err = Y_pred - Y_true
hipLaunchKernelGGL(( subtract_kernel), dim3(grid_size), dim3(block_size), 0, 0, d_Ypred, d_Ytrue, d_err, n);
checkCudaErrors(hipGetLastError());
}
void element_mul(float *d_a, float *d_b, float *d_c, int n_rows, int n_cols)
{
int n = n_rows * n_cols;
int block_size = 1024;
int grid_size = (n + block_size - 1) / block_size;
//c = a .* b
hipLaunchKernelGGL(( mul_kernel), dim3(grid_size), dim3(block_size), 0, 0, d_a, d_b, d_c, n);
checkCudaErrors(hipGetLastError());
}
void kl_prime(float *d_in, float *d_out, float rho, int n)
{
int block_size = 1024;
int grid_size = (n + block_size - 1) / block_size;
hipLaunchKernelGGL(( kl_prime_kernel), dim3(grid_size), dim3(block_size), 0, 0, d_in, d_out, rho, n);
checkCudaErrors(hipGetLastError());
}
void kl(float *d_in, float *d_out, float rho, int n)
{
int block_size = 1024;
int grid_size = (n + block_size - 1) / block_size;
hipLaunchKernelGGL(( kl_kernel), dim3(grid_size), dim3(block_size), 0, 0, d_in, d_out, rho, n);
checkCudaErrors(hipGetLastError());
}
| 8ac16d683df80e24139ecd84534fe64e04357bdc.cu | #include "nn_kernels.h"
__device__ float sigmoid(float x)
{
return 1 / (1 + expf(-x));
}
__global__ void fill_ones_kernel(float *arr, int n)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n)
arr[idx] = 1.0f;
}
__global__ void active_kernel(float *in, float *out, int n)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n)
out[idx] = sigmoid(in[idx]);
}
__global__ void active_prime_kernel(float *in, float *out, int n)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n)
{
float t = sigmoid(in[idx]);
out[idx] = t * (1 - t);
}
}
//c = a - b
__global__ void subtract_kernel(float *a, float *b, float *c, int n)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n)
c[idx] = a[idx] - b[idx];
}
//c = a .* b
__global__ void mul_kernel(float *a, float *b, float *c, int n)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n)
c[idx] = a[idx] * b[idx];
}
__global__ void kl_prime_kernel(float *in, float *out, float rho, int n)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n)
{
float rho_hat = in[idx];
out[idx] = -rho / rho_hat + (1 - rho) / (1 - rho_hat);
}
}
__global__ void kl_kernel(float *in, float *out, float rho, int n)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n)
{
float rho_hat = in[idx];
out[idx] = rho * logf(rho / rho_hat) + (1 - rho) * logf( (1 - rho) / (1-rho_hat) );
}
}
void fill_ones(float *d_arr, int n)
{
int block_size = 1024;
int grid_size = (n + block_size - 1) / block_size;
fill_ones_kernel<<<grid_size, block_size>>>(d_arr, n);
checkCudaErrors(cudaGetLastError());
}
void active(float *d_in, float *d_out, int n_rows, int n_cols)
{
int n = n_rows * n_cols;
int block_size = 1024;
int grid_size = (n + block_size - 1) / block_size;
active_kernel<<<grid_size, block_size>>>(d_in, d_out, n);
checkCudaErrors(cudaGetLastError());
}
void active_prime(float *d_in, float *d_out, int n_rows, int n_cols)
{
int n = n_rows * n_cols;
int block_size = 1024;
int grid_size = (n + block_size - 1) / block_size;
active_prime_kernel<<<grid_size, block_size>>>(d_in, d_out, n);
checkCudaErrors(cudaGetLastError());
}
void square_loss_prime(float *d_Ypred, float *d_Ytrue, float *d_err, int n_rows,
int n_cols)
{
int n = n_rows * n_cols;
int block_size = 1024;
int grid_size = (n + block_size - 1) / block_size;
//err = Y_pred - Y_true
subtract_kernel<<<grid_size, block_size>>>(d_Ypred, d_Ytrue, d_err, n);
checkCudaErrors(cudaGetLastError());
}
void element_mul(float *d_a, float *d_b, float *d_c, int n_rows, int n_cols)
{
int n = n_rows * n_cols;
int block_size = 1024;
int grid_size = (n + block_size - 1) / block_size;
//c = a .* b
mul_kernel<<<grid_size, block_size>>>(d_a, d_b, d_c, n);
checkCudaErrors(cudaGetLastError());
}
void kl_prime(float *d_in, float *d_out, float rho, int n)
{
int block_size = 1024;
int grid_size = (n + block_size - 1) / block_size;
kl_prime_kernel<<<grid_size, block_size>>>(d_in, d_out, rho, n);
checkCudaErrors(cudaGetLastError());
}
void kl(float *d_in, float *d_out, float rho, int n)
{
int block_size = 1024;
int grid_size = (n + block_size - 1) / block_size;
kl_kernel<<<grid_size, block_size>>>(d_in, d_out, rho, n);
checkCudaErrors(cudaGetLastError());
}
|
bb329e22b05717aac81e90a98ccc137b62c4e614.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include "kernel.h"
static inline int nextPowerOfTwo(int n) {
n--;
n = n >> 1 | n;
n = n >> 2 | n;
n = n >> 4 | n;
n = n >> 8 | n;
n = n >> 16 | n;
// n = n >> 32 | n; // For 64-bit ints
return ++n;
}
__host__ __device__ inline static
float euclid_dist_2(int numCoords,
int numObjs,
int numClusters,
float *objects, // [numCoords][numObjs]
float *clusters, // [numCoords][numClusters]
int objectId,
int clusterId)
{
int i;
float ans=0.0;
for (i = 0; i < numCoords; i++) {
ans += (objects[(objectId*numCoords) + i] - clusters[(clusterId*numCoords) +i]) *
(objects[(objectId*numCoords) + i] - clusters[(clusterId*numCoords) +i]);
}
return(ans);
}
__global__
void cuda_find_nearest_cluster(int numCoords,
int numObjs,
int numClusters,
float *objects, // [numCoords][numObjs]
float *deviceClusters, // [numCoords][numClusters]
int *membership
)
{
float *clusters = deviceClusters;
int objectId = blockDim.x * blockIdx.x + threadIdx.x;
//if (objectId == 1) printf("A cuda call with numObjs: %d, numCoords: %d, numClusters:%d \n", numObjs, numCoords, numClusters);
if (objectId < numObjs) {
int index, i;
float dist, min_dist;
index = 0;
min_dist = euclid_dist_2(numCoords, numObjs, numClusters,
objects, clusters, objectId, 0);
/*if (objectId == 1) {
printf("Distance to cluster 0: %f\n", min_dist);
}*/
__syncthreads();
for (i=1; i<numClusters; i++) {
dist = euclid_dist_2(numCoords, numObjs, numClusters,
objects, clusters, objectId, i);
/*if (objectId == 1) {
printf("Distance to cluster 0: %f\n", min_dist);
}*/
if (dist < min_dist) { // find the min and its array index
min_dist = dist;
index = i;
}
}
__syncthreads();
/*if (objectId == 1) {
printf("Object assigned to cluster %d\n", index);
}*/
membership[objectId] = index;
}
}
| bb329e22b05717aac81e90a98ccc137b62c4e614.cu | #include <stdio.h>
#include <stdlib.h>
#include "kernel.h"
static inline int nextPowerOfTwo(int n) {
n--;
n = n >> 1 | n;
n = n >> 2 | n;
n = n >> 4 | n;
n = n >> 8 | n;
n = n >> 16 | n;
// n = n >> 32 | n; // For 64-bit ints
return ++n;
}
__host__ __device__ inline static
float euclid_dist_2(int numCoords,
int numObjs,
int numClusters,
float *objects, // [numCoords][numObjs]
float *clusters, // [numCoords][numClusters]
int objectId,
int clusterId)
{
int i;
float ans=0.0;
for (i = 0; i < numCoords; i++) {
ans += (objects[(objectId*numCoords) + i] - clusters[(clusterId*numCoords) +i]) *
(objects[(objectId*numCoords) + i] - clusters[(clusterId*numCoords) +i]);
}
return(ans);
}
__global__
void cuda_find_nearest_cluster(int numCoords,
int numObjs,
int numClusters,
float *objects, // [numCoords][numObjs]
float *deviceClusters, // [numCoords][numClusters]
int *membership
)
{
float *clusters = deviceClusters;
int objectId = blockDim.x * blockIdx.x + threadIdx.x;
//if (objectId == 1) printf("A cuda call with numObjs: %d, numCoords: %d, numClusters:%d \n", numObjs, numCoords, numClusters);
if (objectId < numObjs) {
int index, i;
float dist, min_dist;
index = 0;
min_dist = euclid_dist_2(numCoords, numObjs, numClusters,
objects, clusters, objectId, 0);
/*if (objectId == 1) {
printf("Distance to cluster 0: %f\n", min_dist);
}*/
__syncthreads();
for (i=1; i<numClusters; i++) {
dist = euclid_dist_2(numCoords, numObjs, numClusters,
objects, clusters, objectId, i);
/*if (objectId == 1) {
printf("Distance to cluster 0: %f\n", min_dist);
}*/
if (dist < min_dist) { // find the min and its array index
min_dist = dist;
index = i;
}
}
__syncthreads();
/*if (objectId == 1) {
printf("Object assigned to cluster %d\n", index);
}*/
membership[objectId] = index;
}
}
|
59f3c5ab67934a891689f0034463c0e054aa4ba7.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include "mex.h"
#include <math.h>
#include <assert.h>
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil.h>
#include <cudpp.h>
#include <jacket.h>
// Input Arguments
#define data_IN_xi prhs[0]
#define data_IN_yi prhs[1]
#define data_IN_GI prhs[2]
#define data_IN_KBlut prhs[3]
#define data_IN_nj prhs[4]
#define data_IN_siz prhs[5]
#define data_IN_binidx prhs[6]
#define data_IN_binstartidx prhs[7]
#define data_IN_binlength prhs[8]
typedef unsigned int uint;
extern "C"
void gridding_irreg2reg_bin( float* xi, float* yi,
float* GI, size_t NGI,
float* KBlut, uint KBwidth,
float* G, size_t Gwidth, size_t Gheight,
uint* binidx, uint* binstartidx,
uint* binlength, uint tiledim );
err_t jktFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray *prhs[])
{
float *xi, *yi, *G, *Gsiz, *KBlut, *GI, *fKBwidth;
uint hostKBwidth, Gwidth, Gheight;
uint *binidx, *binstartidx, *binlength;
TRY( jkt_mem((void **)&xi, data_IN_xi ) );
TRY( jkt_mem((void **)&yi, data_IN_yi ) );
TRY( jkt_mem((void **)&GI, data_IN_GI ) );
TRY( jkt_mem((void **)&KBlut, data_IN_KBlut ) );
TRY( jkt_mem((void **)&binidx, data_IN_binidx ) );
TRY( jkt_mem((void **)&binstartidx, data_IN_binstartidx ) );
TRY( jkt_mem((void **)&binlength, data_IN_binlength ) );
TRY( jkt_mem_host((void **)&fKBwidth, data_IN_nj ) );
hostKBwidth = (uint)(fKBwidth[0]);
TRY( jkt_mem_host((void **)&Gsiz, data_IN_siz ) );
Gheight = (uint)(Gsiz[0]);
Gwidth = (uint)(Gsiz[1]);
mwSize NGI, len;
NGI = jkt_numel(data_IN_GI);
// Compute tile length
len = jkt_numel(data_IN_binstartidx);
len = Gwidth * Gheight / len;
len = sqrt(len);
// Output
mxArray *data_OUT_G = plhs[0] =
jkt_new( Gheight, Gwidth, mxSINGLE_CLASS, 0 );
TRY( jkt_mem((void **)&G, data_OUT_G) );
// Error checking
if( jkt_complex( data_IN_xi ) ||
jkt_complex( data_IN_yi ) ||
jkt_complex( data_IN_GI ) ||
jkt_complex( data_IN_KBlut) ) {
return err( "Input data must be real, non-complex\n" );
}
if( (Gheight & (Gheight-1) != 0) ||
(Gwidth & (Gwidth-1) != 0) ) {
return err( "G must have dimensions of power of two\n" );
}
if( jkt_numel(data_IN_KBlut) !=
((hostKBwidth+1)*20+1)*((hostKBwidth+1)*20+1) ) {
return err( "KBlut size does not match KBwidth\n" );
}
if( jkt_numel(data_IN_xi) != jkt_numel(data_IN_yi) ||
jkt_numel(data_IN_xi) != jkt_numel(data_IN_GI) ) {
return err( "Size of xi, yi and/or GI is not the same\n" );
}
if( jkt_numel(data_IN_binstartidx) != jkt_numel(data_IN_binlength) ) {
return err( "Size of binstartidx and binlength is not the same\n" );
}
// Call CUDA
gridding_irreg2reg_bin( xi, yi,
GI, NGI,
KBlut, hostKBwidth,
G, Gwidth, Gheight,
binidx, binstartidx, binlength, len );
return errNone;
}
| 59f3c5ab67934a891689f0034463c0e054aa4ba7.cu | #include <cuda.h>
#include "mex.h"
#include <math.h>
#include <assert.h>
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil.h>
#include <cudpp.h>
#include <jacket.h>
// Input Arguments
#define data_IN_xi prhs[0]
#define data_IN_yi prhs[1]
#define data_IN_GI prhs[2]
#define data_IN_KBlut prhs[3]
#define data_IN_nj prhs[4]
#define data_IN_siz prhs[5]
#define data_IN_binidx prhs[6]
#define data_IN_binstartidx prhs[7]
#define data_IN_binlength prhs[8]
typedef unsigned int uint;
extern "C"
void gridding_irreg2reg_bin( float* xi, float* yi,
float* GI, size_t NGI,
float* KBlut, uint KBwidth,
float* G, size_t Gwidth, size_t Gheight,
uint* binidx, uint* binstartidx,
uint* binlength, uint tiledim );
err_t jktFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray *prhs[])
{
float *xi, *yi, *G, *Gsiz, *KBlut, *GI, *fKBwidth;
uint hostKBwidth, Gwidth, Gheight;
uint *binidx, *binstartidx, *binlength;
TRY( jkt_mem((void **)&xi, data_IN_xi ) );
TRY( jkt_mem((void **)&yi, data_IN_yi ) );
TRY( jkt_mem((void **)&GI, data_IN_GI ) );
TRY( jkt_mem((void **)&KBlut, data_IN_KBlut ) );
TRY( jkt_mem((void **)&binidx, data_IN_binidx ) );
TRY( jkt_mem((void **)&binstartidx, data_IN_binstartidx ) );
TRY( jkt_mem((void **)&binlength, data_IN_binlength ) );
TRY( jkt_mem_host((void **)&fKBwidth, data_IN_nj ) );
hostKBwidth = (uint)(fKBwidth[0]);
TRY( jkt_mem_host((void **)&Gsiz, data_IN_siz ) );
Gheight = (uint)(Gsiz[0]);
Gwidth = (uint)(Gsiz[1]);
mwSize NGI, len;
NGI = jkt_numel(data_IN_GI);
// Compute tile length
len = jkt_numel(data_IN_binstartidx);
len = Gwidth * Gheight / len;
len = sqrt(len);
// Output
mxArray *data_OUT_G = plhs[0] =
jkt_new( Gheight, Gwidth, mxSINGLE_CLASS, 0 );
TRY( jkt_mem((void **)&G, data_OUT_G) );
// Error checking
if( jkt_complex( data_IN_xi ) ||
jkt_complex( data_IN_yi ) ||
jkt_complex( data_IN_GI ) ||
jkt_complex( data_IN_KBlut) ) {
return err( "Input data must be real, non-complex\n" );
}
if( (Gheight & (Gheight-1) != 0) ||
(Gwidth & (Gwidth-1) != 0) ) {
return err( "G must have dimensions of power of two\n" );
}
if( jkt_numel(data_IN_KBlut) !=
((hostKBwidth+1)*20+1)*((hostKBwidth+1)*20+1) ) {
return err( "KBlut size does not match KBwidth\n" );
}
if( jkt_numel(data_IN_xi) != jkt_numel(data_IN_yi) ||
jkt_numel(data_IN_xi) != jkt_numel(data_IN_GI) ) {
return err( "Size of xi, yi and/or GI is not the same\n" );
}
if( jkt_numel(data_IN_binstartidx) != jkt_numel(data_IN_binlength) ) {
return err( "Size of binstartidx and binlength is not the same\n" );
}
// Call CUDA
gridding_irreg2reg_bin( xi, yi,
GI, NGI,
KBlut, hostKBwidth,
G, Gwidth, Gheight,
binidx, binstartidx, binlength, len );
return errNone;
}
|
38244969c08cd174e388611d91d56c238af63aa4.hip | // !!! This is a file automatically generated by hipify!!!
#include <assert.h>
#include <iostream>
#include <stdexcept>
#include <hip/hip_runtime.h>
#include "async_cuda.h"
#include "async_timer.h"
#define CUDA_CALL(X) { hipError_t err = X; if (err != hipSuccess) { throw std::runtime_error(hipGetErrorString(err)); } hipError_t err_last = hipGetLastError(); if (err_last != hipSuccess) { throw std::runtime_error(hipGetErrorString(err_last)); } }
#define CUDADRIVER_CALL(func) \
{ hipError_t err; \
err = func; \
if (hipSuccess != err) { \
char buf[100] = {0,}; \
snprintf(buf, 100, "CUDA runtime API error: %d", err); \
throw std::runtime_error(buf); \
} \
}
namespace sys {
namespace cuda {
static hipStream_t stream_main = 0, stream_workload = 0;
static hipEvent_t event = 0;
static bool initialized = false;
void init_contexts()
{
CUDA_CALL(hipStreamCreateWithFlags(&stream_main, hipStreamNonBlocking))
CUDA_CALL(hipStreamCreateWithFlags(&stream_workload, hipStreamNonBlocking))
CUDA_CALL(hipEventCreateWithFlags(&event, hipEventDisableTiming));
}
void sync_contexts()
{
CUDA_CALL(hipStreamSynchronize(stream_main));
CUDA_CALL(hipStreamSynchronize(stream_workload));
}
size_t get_num_of_devices() {
int n;
CUDA_CALL(hipGetDeviceCount(&n));
return n;
}
void set_current_device(unsigned long n) {
assert(!initialized);
std::cout << "GPU device set: cuda_id=" << n << std::endl;
CUDA_CALL(hipSetDevice(n));
init_contexts();
initialized = true;
}
void set_current(const std::string &pci_id)
{
assert(!initialized);
hipDevice_t dev;
char devname[256];
CUDADRIVER_CALL(hipInit(0));
CUDADRIVER_CALL(hipDeviceGetByPCIBusId(&dev, pci_id.c_str()));
CUDADRIVER_CALL(hipDeviceGetName(devname, 256, dev));
std::cout << "GPU device set: pci_id=" << pci_id << ", name=" << devname << " (with hwloc)" << std::endl;
initialized = true;
}
int get_current_device_hash() {
int device_id;
char ptr[1024];
size_t len = 1024;
memset(ptr, 0, len);
CUDA_CALL(hipGetDevice(&device_id));
CUDA_CALL(hipDeviceGetPCIBusId(ptr, len, device_id));
int hash = 0;
for (size_t i = 0; i < len / 4; i += 4) {
if (!ptr[i*4])
break;
int32_t *iptr = (int32_t *)ptr + i;
int32_t x = *iptr;
hash ^= x;
}
return hash;
}
bool is_device_idle()
{
if (event) {
CUDA_CALL(hipEventRecord(event, stream_workload));
hipError_t ret = hipEventQuery(event);
if (ret != hipErrorNotReady && ret != hipSuccess) {
// error case: throw exception
CUDA_CALL(ret);
}
if (ret == hipErrorNotReady) {
// stream has some load currently, not idle
return false;
}
}
return true;
}
template <int SIZE>
__global__ void workload(int ncycles, int CALIBRATION_CONST) {
__shared__ double a[SIZE][SIZE], b[SIZE][SIZE], c[SIZE][SIZE];
while (ncycles--) {
for (int N = 0; N < CALIBRATION_CONST; N++) {
for (int i = 0; i < SIZE; i++) {
for (int j = 0; j < SIZE; j++) {
for (int k = 0; k < SIZE; k++) {
c[i][j] += a[i][k] * b[k][j] + N * N * ncycles;
}
}
}
}
}
}
void submit_workload(int ncycles, int calibration_const)
{
constexpr int array_dim = 8;
hipLaunchKernelGGL(( workload<array_dim>), dim3(1), dim3(1), 0, stream_workload, ncycles, calibration_const);
}
int workload_calibration() {
// Workload execution time calibration procedure. Trying to tune number of cycles
// so that workload execution+sync time is about 100 usec
static int cuda_workload_calibration = -1;
if (cuda_workload_calibration != -1)
return cuda_workload_calibration;
cuda_workload_calibration = 1;
const int workload_tune_maxiter = 23;
const long target_exec_time_in_usecs = 200L;
const long good_enough_calibration = (long)(0.95 * target_exec_time_in_usecs);
for (int i = 0; i < workload_tune_maxiter; i++) {
timer t;
sys::cuda::submit_workload(1, cuda_workload_calibration);
sys::cuda::sync_contexts();
long execution_time_in_usecs = (long)t.stop();
// Skip 13 first time estimations: they often include some GPU API
// initialization time
std::cout << ">> CUDA: execution_time_in_usecs=" << execution_time_in_usecs << " cuda_workload_calibration=" << cuda_workload_calibration << std::endl; if (i < 13)
continue;
if (execution_time_in_usecs == 0)
break;
if (execution_time_in_usecs < good_enough_calibration) {
auto c = int(target_exec_time_in_usecs / execution_time_in_usecs);
if (c == 2) {
cuda_workload_calibration *= 1.5;
} else if (c > 1000) {
continue;
} else if (c > 2) {
cuda_workload_calibration *= (c - 1);
} else {
auto _5percent = (int)(cuda_workload_calibration * 0.05);
cuda_workload_calibration += (_5percent ? _5percent : 1);
}
} else {
break;
}
}
if (cuda_workload_calibration < 2 || cuda_workload_calibration > 1000) {
std::cout << ">> cuda_workload_calibration=" << cuda_workload_calibration << std::endl;
throw std::runtime_error("cuda workload calibration failed");
}
return cuda_workload_calibration;
//std::cout << ">> CUDA: cuda_workload_calibration = " << cuda_workload_calibration << std::endl;
}
void host_alloc(char*& ptr, size_t size) {
CUDA_CALL(hipHostMalloc(&ptr, size, hipHostMallocPortable));
}
void register_mem(char* ptr, size_t size) {
CUDA_CALL(hipHostRegister(ptr, size, hipHostRegisterPortable));
}
void unregister_mem(char* ptr) {
CUDA_CALL(hipHostUnregister(ptr));
}
void device_alloc(char*& ptr, size_t size) {
CUDA_CALL(hipMalloc(&ptr, size));
CUDA_CALL(hipMemset(ptr, 0, size));
}
void host_free(char *ptr) {
CUDA_CALL(hipHostFree(ptr));
}
void device_free(char *ptr) {
if (ptr) {
CUDA_CALL(hipFree(ptr));
}
}
void d2h_transfer(char *to, char *from, size_t size, transfer_t type)
{
CUDA_CALL(hipMemcpyAsync(to, from, size, hipMemcpyDeviceToHost,
type == transfer_t::MAIN ? stream_main : stream_workload));
if (type == transfer_t::MAIN) {
CUDA_CALL(hipStreamSynchronize(stream_main))
}
}
void h2d_transfer(char *to, char *from, size_t size, transfer_t type)
{
CUDA_CALL(hipMemcpyAsync(to, from, size, hipMemcpyHostToDevice,
type == transfer_t::MAIN ? stream_main : stream_workload));
if (type == transfer_t::MAIN) {
CUDA_CALL(hipStreamSynchronize(stream_main))
}
}
}
}
| 38244969c08cd174e388611d91d56c238af63aa4.cu | #include <assert.h>
#include <iostream>
#include <stdexcept>
#include <cuda.h>
#include "async_cuda.h"
#include "async_timer.h"
#define CUDA_CALL(X) { cudaError_t err = X; if (err != cudaSuccess) { throw std::runtime_error(cudaGetErrorString(err)); } cudaError_t err_last = cudaGetLastError(); if (err_last != cudaSuccess) { throw std::runtime_error(cudaGetErrorString(err_last)); } }
#define CUDADRIVER_CALL(func) \
{ CUresult err; \
err = func; \
if (CUDA_SUCCESS != err) { \
char buf[100] = {0,}; \
snprintf(buf, 100, "CUDA runtime API error: %d", err); \
throw std::runtime_error(buf); \
} \
}
namespace sys {
namespace cuda {
static cudaStream_t stream_main = 0, stream_workload = 0;
static cudaEvent_t event = 0;
static bool initialized = false;
void init_contexts()
{
CUDA_CALL(cudaStreamCreateWithFlags(&stream_main, cudaStreamNonBlocking))
CUDA_CALL(cudaStreamCreateWithFlags(&stream_workload, cudaStreamNonBlocking))
CUDA_CALL(cudaEventCreateWithFlags(&event, cudaEventDisableTiming));
}
void sync_contexts()
{
CUDA_CALL(cudaStreamSynchronize(stream_main));
CUDA_CALL(cudaStreamSynchronize(stream_workload));
}
size_t get_num_of_devices() {
int n;
CUDA_CALL(cudaGetDeviceCount(&n));
return n;
}
void set_current_device(unsigned long n) {
assert(!initialized);
std::cout << "GPU device set: cuda_id=" << n << std::endl;
CUDA_CALL(cudaSetDevice(n));
init_contexts();
initialized = true;
}
void set_current(const std::string &pci_id)
{
assert(!initialized);
CUdevice dev;
char devname[256];
CUDADRIVER_CALL(cuInit(0));
CUDADRIVER_CALL(cuDeviceGetByPCIBusId(&dev, pci_id.c_str()));
CUDADRIVER_CALL(cuDeviceGetName(devname, 256, dev));
std::cout << "GPU device set: pci_id=" << pci_id << ", name=" << devname << " (with hwloc)" << std::endl;
initialized = true;
}
int get_current_device_hash() {
int device_id;
char ptr[1024];
size_t len = 1024;
memset(ptr, 0, len);
CUDA_CALL(cudaGetDevice(&device_id));
CUDA_CALL(cudaDeviceGetPCIBusId(ptr, len, device_id));
int hash = 0;
for (size_t i = 0; i < len / 4; i += 4) {
if (!ptr[i*4])
break;
int32_t *iptr = (int32_t *)ptr + i;
int32_t x = *iptr;
hash ^= x;
}
return hash;
}
bool is_device_idle()
{
if (event) {
CUDA_CALL(cudaEventRecord(event, stream_workload));
cudaError_t ret = cudaEventQuery(event);
if (ret != cudaErrorNotReady && ret != cudaSuccess) {
// error case: throw exception
CUDA_CALL(ret);
}
if (ret == cudaErrorNotReady) {
// stream has some load currently, not idle
return false;
}
}
return true;
}
template <int SIZE>
__global__ void workload(int ncycles, int CALIBRATION_CONST) {
__shared__ double a[SIZE][SIZE], b[SIZE][SIZE], c[SIZE][SIZE];
while (ncycles--) {
for (int N = 0; N < CALIBRATION_CONST; N++) {
for (int i = 0; i < SIZE; i++) {
for (int j = 0; j < SIZE; j++) {
for (int k = 0; k < SIZE; k++) {
c[i][j] += a[i][k] * b[k][j] + N * N * ncycles;
}
}
}
}
}
}
void submit_workload(int ncycles, int calibration_const)
{
constexpr int array_dim = 8;
workload<array_dim><<<1, 1, 0, stream_workload>>>(ncycles, calibration_const);
}
int workload_calibration() {
// Workload execution time calibration procedure. Trying to tune number of cycles
// so that workload execution+sync time is about 100 usec
static int cuda_workload_calibration = -1;
if (cuda_workload_calibration != -1)
return cuda_workload_calibration;
cuda_workload_calibration = 1;
const int workload_tune_maxiter = 23;
const long target_exec_time_in_usecs = 200L;
const long good_enough_calibration = (long)(0.95 * target_exec_time_in_usecs);
for (int i = 0; i < workload_tune_maxiter; i++) {
timer t;
sys::cuda::submit_workload(1, cuda_workload_calibration);
sys::cuda::sync_contexts();
long execution_time_in_usecs = (long)t.stop();
// Skip 13 first time estimations: they often include some GPU API
// initialization time
std::cout << ">> CUDA: execution_time_in_usecs=" << execution_time_in_usecs << " cuda_workload_calibration=" << cuda_workload_calibration << std::endl; if (i < 13)
continue;
if (execution_time_in_usecs == 0)
break;
if (execution_time_in_usecs < good_enough_calibration) {
auto c = int(target_exec_time_in_usecs / execution_time_in_usecs);
if (c == 2) {
cuda_workload_calibration *= 1.5;
} else if (c > 1000) {
continue;
} else if (c > 2) {
cuda_workload_calibration *= (c - 1);
} else {
auto _5percent = (int)(cuda_workload_calibration * 0.05);
cuda_workload_calibration += (_5percent ? _5percent : 1);
}
} else {
break;
}
}
if (cuda_workload_calibration < 2 || cuda_workload_calibration > 1000) {
std::cout << ">> cuda_workload_calibration=" << cuda_workload_calibration << std::endl;
throw std::runtime_error("cuda workload calibration failed");
}
return cuda_workload_calibration;
//std::cout << ">> CUDA: cuda_workload_calibration = " << cuda_workload_calibration << std::endl;
}
void host_alloc(char*& ptr, size_t size) {
CUDA_CALL(cudaHostAlloc(&ptr, size, cudaHostAllocPortable));
}
void register_mem(char* ptr, size_t size) {
CUDA_CALL(cudaHostRegister(ptr, size, cudaHostRegisterPortable));
}
void unregister_mem(char* ptr) {
CUDA_CALL(cudaHostUnregister(ptr));
}
void device_alloc(char*& ptr, size_t size) {
CUDA_CALL(cudaMalloc(&ptr, size));
CUDA_CALL(cudaMemset(ptr, 0, size));
}
void host_free(char *ptr) {
CUDA_CALL(cudaFreeHost(ptr));
}
void device_free(char *ptr) {
if (ptr) {
CUDA_CALL(cudaFree(ptr));
}
}
void d2h_transfer(char *to, char *from, size_t size, transfer_t type)
{
CUDA_CALL(cudaMemcpyAsync(to, from, size, cudaMemcpyDeviceToHost,
type == transfer_t::MAIN ? stream_main : stream_workload));
if (type == transfer_t::MAIN) {
CUDA_CALL(cudaStreamSynchronize(stream_main))
}
}
void h2d_transfer(char *to, char *from, size_t size, transfer_t type)
{
CUDA_CALL(cudaMemcpyAsync(to, from, size, cudaMemcpyHostToDevice,
type == transfer_t::MAIN ? stream_main : stream_workload));
if (type == transfer_t::MAIN) {
CUDA_CALL(cudaStreamSynchronize(stream_main))
}
}
}
}
|
7a7ab7dc9086c75ecd18a9f0d30bf6e3bd2753a4.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "melodycontour.h"
#include <unistd.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <dirent.h>
#include <cutil_inline.h>
int isreg(char *filename)
{
char *temp;
temp=strrchr(filename,(int)'.');
if(temp!=NULL) return 1;
else return 0;
}
int iswav(char *filename)
{
char *temp;
temp=strstr(filename,".wav");
if(temp!=NULL) return 1;
else return 0;
}
int main(int argc, char **argv)
{
if(argc!=3)
{
fprintf(stderr,"parameter should be:./melodycontour wavfile kind!\n");
return -1;
}
//initialize GPU device
hipSetDevice( 1 );
hipDeviceSynchronize();
if(isreg(argv[1])&&iswav(argv[1]))
{
melodycontour(argv[1],atoi(argv[2]));
}
else
{
DIR *dp;
struct dirent *entry;
struct stat statbuf;
if((dp=opendir(argv[1]))==NULL)
{
fprintf(stderr,"cannot open directory: %s\n",argv[1]);
}
chdir(argv[1]);
while((entry=readdir(dp))!=NULL)
{
lstat(entry->d_name,&statbuf);
if(S_ISREG(statbuf.st_mode)&&iswav(entry->d_name))
{
printf("calculate %s file\n",entry->d_name);
melodycontour(entry->d_name,atoi(argv[2]));
printf("\n\n");
}
}
}
}
| 7a7ab7dc9086c75ecd18a9f0d30bf6e3bd2753a4.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "melodycontour.h"
#include <unistd.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <dirent.h>
#include <cutil_inline.h>
int isreg(char *filename)
{
char *temp;
temp=strrchr(filename,(int)'.');
if(temp!=NULL) return 1;
else return 0;
}
int iswav(char *filename)
{
char *temp;
temp=strstr(filename,".wav");
if(temp!=NULL) return 1;
else return 0;
}
int main(int argc, char **argv)
{
if(argc!=3)
{
fprintf(stderr,"parameter should be:./melodycontour wavfile kind!\n");
return -1;
}
//initialize GPU device
cudaSetDevice( 1 );
cudaThreadSynchronize();
if(isreg(argv[1])&&iswav(argv[1]))
{
melodycontour(argv[1],atoi(argv[2]));
}
else
{
DIR *dp;
struct dirent *entry;
struct stat statbuf;
if((dp=opendir(argv[1]))==NULL)
{
fprintf(stderr,"cannot open directory: %s\n",argv[1]);
}
chdir(argv[1]);
while((entry=readdir(dp))!=NULL)
{
lstat(entry->d_name,&statbuf);
if(S_ISREG(statbuf.st_mode)&&iswav(entry->d_name))
{
printf("calculate %s file\n",entry->d_name);
melodycontour(entry->d_name,atoi(argv[2]));
printf("\n\n");
}
}
}
}
|
d08a889e376f8f401a01789d3f998f477eba18f3.hip | // !!! This is a file automatically generated by hipify!!!
%%cu
#include "hip/hip_runtime.h"
#include <iostream>
#include <numeric>
using namespace std;
__global__ void max(int* input,int count)
{
int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while (number_of_threads > 0)
{
if (tid < number_of_threads) // still alive?
{
int fst = tid * step_size * 2;
int snd = fst + step_size;
if(fst==count-1)
{
snd = fst;
fst = 0;
}
if(input[fst]<input[snd])
input[fst] = input[snd];
}
step_size *= 2;
number_of_threads /= 2;
}
}
int main()
{
int count = 15;
srand(time(NULL));
const int size = count * sizeof(int);
int h[count];
for(int i=0;i<count;i++)
{
h[i] = rand()%50;
cout<<h[i]<<" ";
}
int* d;
hipMalloc(&d, size);
hipMemcpy(d, h, size, hipMemcpyHostToDevice);
if(count%2==0)
{
hipLaunchKernelGGL(( max), dim3(1),dim3(count/2), 0, 0, d,count);
}else
{
hipLaunchKernelGGL(( max), dim3(1),dim3((count/2)+1), 0, 0, d,count);
}
int result;
hipMemcpy(&result,d, sizeof(int), hipMemcpyDeviceToHost);
cout << "Max is " << result << endl;
hipFree(d);
return 0;
} | d08a889e376f8f401a01789d3f998f477eba18f3.cu | %%cu
#include "cuda.h"
#include <iostream>
#include <numeric>
using namespace std;
__global__ void max(int* input,int count)
{
int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while (number_of_threads > 0)
{
if (tid < number_of_threads) // still alive?
{
int fst = tid * step_size * 2;
int snd = fst + step_size;
if(fst==count-1)
{
snd = fst;
fst = 0;
}
if(input[fst]<input[snd])
input[fst] = input[snd];
}
step_size *= 2;
number_of_threads /= 2;
}
}
int main()
{
int count = 15;
srand(time(NULL));
const int size = count * sizeof(int);
int h[count];
for(int i=0;i<count;i++)
{
h[i] = rand()%50;
cout<<h[i]<<" ";
}
int* d;
cudaMalloc(&d, size);
cudaMemcpy(d, h, size, cudaMemcpyHostToDevice);
if(count%2==0)
{
max<<<1,count/2>>>(d,count);
}else
{
max<<<1,(count/2)+1>>>(d,count);
}
int result;
cudaMemcpy(&result,d, sizeof(int), cudaMemcpyDeviceToHost);
cout << "Max is " << result << endl;
cudaFree(d);
return 0;
} |
ca8eb2077f037920da8f5cd2730f7666e6ae4aa4.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPEvent.h>
#include <ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h>
#include <ATen/native/Copy.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Loops.cuh>
#include <THH/THH.h>
namespace at {
namespace native {
using namespace at::cuda;
// device-to-device copy, does type conversion
void copy_device_to_device(TensorIterator& iter, bool non_blocking) {
int64_t numel = iter.numel();
// We can memcpy the memory if both tensors have the same type AND both
// tensors are contiguous after dimension coalescing and reordering.
bool same_type = iter.dtype(0) == iter.dtype(1);
bool memcpy_eligible = same_type && iter.is_contiguous();
Device dst_device = iter.device(0);
Device src_device = iter.device(1);
HIPGuardMasqueradingAsCUDA device_guard(src_device);
// We always perform the copy on the source device, using the current stream
// on the source device, and we fully synchronize on both src and dst's
// current streams for completion of the copy. We have to explicitly do this
// for non-contig copies. This mimics the behavior of cross-device
// hipMemcpyAsync on the default stream.
HIPStreamMasqueradingAsCUDA copy_stream = getCurrentHIPStreamMasqueradingAsCUDA(src_device.index());
if (src_device != dst_device) {
// This is a cross-device copy on the src current stream and dst current
// stream. We perform a two-way barrier between both devices' streams
// before the copy. This ensures that any write-after-write and
// write-after-read dependencies on the destination side are handled, so
// that no one is operating on the dst memory when we perform the copy.
// src waits on dst barrier (src already waits on src)
CUDAEvent dst_ready;
device_guard.set_device(dst_device);
dst_ready.record(getCurrentHIPStreamMasqueradingAsCUDA(dst_device.index()));
device_guard.set_device(src_device);
dst_ready.block(copy_stream);
}
if (memcpy_eligible) {
// Perform the copy
AT_CUDA_CHECK(hipMemcpyAsync(
iter.data_ptr(0),
iter.data_ptr(1),
numel * iter.element_size(0),
hipMemcpyDeviceToDevice,
copy_stream));
} else {
// this is done intentionally done after build because copy has a "promotion"
// rule that always "promote" to target dtype.
iter.promote_common_dtype();
AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBool, kBFloat16, iter.dtype(0), "copy_", [&] {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t x) { return x; });
});
}
if (src_device != dst_device) {
// dst waits on src barrier (dst already waits on dst). We cannot
// operate on dst's copy until the copy is complete.
// Still on src_device, record stream event
CUDAEvent src_ready;
src_ready.record(copy_stream);
device_guard.set_device(dst_device);
src_ready.block(getCurrentHIPStreamMasqueradingAsCUDA(dst_device.index()));
}
AT_CUDA_CHECK(hipGetLastError());
}
static bool copy_requires_temporaries(TensorIterator& iter, bool p2p_enabled) {
Device dst_device = iter.device(0);
Device src_device = iter.device(1);
if (dst_device == src_device) {
// We never require temporaries for copies on the same GPU.
TORCH_INTERNAL_ASSERT(dst_device.is_cuda() && src_device.is_cuda());
return false;
}
bool same_dtype = iter.dtype(0) == iter.dtype(1);
if (same_dtype && iter.is_contiguous()) {
// Contiguous same-dtype copies can always use hipMemcpyAsync
return false;
} else if (dst_device.is_cuda() && src_device.is_cuda()) {
// Copies between GPUs can use the copy kernel if P2P is supported
return !p2p_enabled;
} else {
// The remaining cases require temporaries. For example, this includes
// non-contiguous copies between CPU and GPU.
return true;
}
}
static bool maybe_enable_p2p_access(Device dst_device, Device src_device) {
if (dst_device.is_cpu() || src_device.is_cpu()) {
return false;
}
return THCState_getPeerToPeerAccess(
globalContext().getTHCState(), src_device.index(), dst_device.index());
}
static void copy_kernel_cuda(TensorIterator& iter, bool non_blocking) {
AT_ASSERT(iter.ntensors() == 2);
Device dst_device = iter.device(0);
Device src_device = iter.device(1);
// Enable p2p access between devices. (No-op if it invovles the CPU)
bool p2p_enabled = maybe_enable_p2p_access(dst_device, src_device);
if (copy_requires_temporaries(iter, p2p_enabled)) {
// NB: this involves recursive calls to copy. Be careful that those copies
// don't require temporaries or you will cause an infinite recursion!
auto& dst = iter.tensor(0);
Tensor dst_contig;
Tensor src_contig;
// Type conversions are performed on the CPU for CPU-GPU copies and on
// the src device for GPU-GPU copies.
if (iter.device_type(0) == kCUDA) {
dst_contig = dst.is_contiguous() ? dst : at::empty_like(dst, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
src_contig = iter.tensor(1).to(iter.dtype(0)).expand_as(dst).contiguous();
} else {
bool same_type = iter.dtype(0) == iter.dtype(1);
dst_contig = (dst.is_contiguous() && same_type) ? dst : at::empty_like(dst, iter.dtype(1), LEGACY_CONTIGUOUS_MEMORY_FORMAT);
src_contig = iter.tensor(1).expand_as(dst).contiguous();
}
// perform a same-dtype copy on contiguous tensors
TORCH_INTERNAL_ASSERT(dst_contig.sizes().equals(src_contig.sizes()));
TORCH_INTERNAL_ASSERT(dst_contig.scalar_type() == src_contig.scalar_type());
dst_contig.copy_(src_contig, non_blocking);
// if necessary, copy back into dst
if (!dst_contig.is_same(dst)) {
TORCH_INTERNAL_ASSERT(dst_contig.device() == dst.device());
dst.copy_(dst_contig, non_blocking);
}
return;
}
// Copy on GPU (or between GPUs)
if (dst_device.is_cuda() && src_device.is_cuda()) {
copy_device_to_device(iter, non_blocking);
return;
}
// Copy between CPU and GPU
hip::OptionalHIPGuardMasqueradingAsCUDA device_guard;
hipMemcpyKind kind;
if (dst_device.is_cuda() && src_device.is_cpu()) {
device_guard.set_device(dst_device);
kind = hipMemcpyHostToDevice;
} else if (dst_device.is_cpu() && src_device.is_cuda()) {
device_guard.set_device(src_device);
kind = hipMemcpyDeviceToHost;
} else {
TORCH_INTERNAL_ASSERT(false, "unsupported devices in GPU copy_()");
}
void* dst = iter.data_ptr(0);
void* src = iter.data_ptr(1);
int64_t nbytes = iter.numel() * iter.element_size(0);
HIPStreamMasqueradingAsCUDA stream = getCurrentHIPStreamMasqueradingAsCUDA();
AT_CUDA_CHECK(hipMemcpyAsync(dst, src, nbytes, kind, stream));
if (non_blocking) {
void* ptr = (dst_device == kCPU ? dst : src);
AT_CUDA_CHECK(THCCachingHostAllocator_recordEvent(ptr, stream));
} else {
AT_CUDA_CHECK(hipStreamSynchronize(stream));
}
}
REGISTER_DISPATCH(copy_stub, ©_kernel_cuda);
} // namespace native
} // namespace at
| ca8eb2077f037920da8f5cd2730f7666e6ae4aa4.cu | #include <ATen/ATen.h>
#include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAEvent.h>
#include <c10/cuda/CUDAStream.h>
#include <ATen/native/Copy.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Loops.cuh>
#include <THC/THC.h>
namespace at {
namespace native {
using namespace at::cuda;
// device-to-device copy, does type conversion
void copy_device_to_device(TensorIterator& iter, bool non_blocking) {
int64_t numel = iter.numel();
// We can memcpy the memory if both tensors have the same type AND both
// tensors are contiguous after dimension coalescing and reordering.
bool same_type = iter.dtype(0) == iter.dtype(1);
bool memcpy_eligible = same_type && iter.is_contiguous();
Device dst_device = iter.device(0);
Device src_device = iter.device(1);
CUDAGuard device_guard(src_device);
// We always perform the copy on the source device, using the current stream
// on the source device, and we fully synchronize on both src and dst's
// current streams for completion of the copy. We have to explicitly do this
// for non-contig copies. This mimics the behavior of cross-device
// cudaMemcpyAsync on the default stream.
CUDAStream copy_stream = getCurrentCUDAStream(src_device.index());
if (src_device != dst_device) {
// This is a cross-device copy on the src current stream and dst current
// stream. We perform a two-way barrier between both devices' streams
// before the copy. This ensures that any write-after-write and
// write-after-read dependencies on the destination side are handled, so
// that no one is operating on the dst memory when we perform the copy.
// src waits on dst barrier (src already waits on src)
CUDAEvent dst_ready;
device_guard.set_device(dst_device);
dst_ready.record(getCurrentCUDAStream(dst_device.index()));
device_guard.set_device(src_device);
dst_ready.block(copy_stream);
}
if (memcpy_eligible) {
// Perform the copy
AT_CUDA_CHECK(cudaMemcpyAsync(
iter.data_ptr(0),
iter.data_ptr(1),
numel * iter.element_size(0),
cudaMemcpyDeviceToDevice,
copy_stream));
} else {
// this is done intentionally done after build because copy has a "promotion"
// rule that always "promote" to target dtype.
iter.promote_common_dtype();
AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBool, kBFloat16, iter.dtype(0), "copy_", [&] {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t x) { return x; });
});
}
if (src_device != dst_device) {
// dst waits on src barrier (dst already waits on dst). We cannot
// operate on dst's copy until the copy is complete.
// Still on src_device, record stream event
CUDAEvent src_ready;
src_ready.record(copy_stream);
device_guard.set_device(dst_device);
src_ready.block(getCurrentCUDAStream(dst_device.index()));
}
AT_CUDA_CHECK(cudaGetLastError());
}
static bool copy_requires_temporaries(TensorIterator& iter, bool p2p_enabled) {
Device dst_device = iter.device(0);
Device src_device = iter.device(1);
if (dst_device == src_device) {
// We never require temporaries for copies on the same GPU.
TORCH_INTERNAL_ASSERT(dst_device.is_cuda() && src_device.is_cuda());
return false;
}
bool same_dtype = iter.dtype(0) == iter.dtype(1);
if (same_dtype && iter.is_contiguous()) {
// Contiguous same-dtype copies can always use cudaMemcpyAsync
return false;
} else if (dst_device.is_cuda() && src_device.is_cuda()) {
// Copies between GPUs can use the copy kernel if P2P is supported
return !p2p_enabled;
} else {
// The remaining cases require temporaries. For example, this includes
// non-contiguous copies between CPU and GPU.
return true;
}
}
static bool maybe_enable_p2p_access(Device dst_device, Device src_device) {
if (dst_device.is_cpu() || src_device.is_cpu()) {
return false;
}
return THCState_getPeerToPeerAccess(
globalContext().getTHCState(), src_device.index(), dst_device.index());
}
static void copy_kernel_cuda(TensorIterator& iter, bool non_blocking) {
AT_ASSERT(iter.ntensors() == 2);
Device dst_device = iter.device(0);
Device src_device = iter.device(1);
// Enable p2p access between devices. (No-op if it invovles the CPU)
bool p2p_enabled = maybe_enable_p2p_access(dst_device, src_device);
if (copy_requires_temporaries(iter, p2p_enabled)) {
// NB: this involves recursive calls to copy. Be careful that those copies
// don't require temporaries or you will cause an infinite recursion!
auto& dst = iter.tensor(0);
Tensor dst_contig;
Tensor src_contig;
// Type conversions are performed on the CPU for CPU-GPU copies and on
// the src device for GPU-GPU copies.
if (iter.device_type(0) == kCUDA) {
dst_contig = dst.is_contiguous() ? dst : at::empty_like(dst, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
src_contig = iter.tensor(1).to(iter.dtype(0)).expand_as(dst).contiguous();
} else {
bool same_type = iter.dtype(0) == iter.dtype(1);
dst_contig = (dst.is_contiguous() && same_type) ? dst : at::empty_like(dst, iter.dtype(1), LEGACY_CONTIGUOUS_MEMORY_FORMAT);
src_contig = iter.tensor(1).expand_as(dst).contiguous();
}
// perform a same-dtype copy on contiguous tensors
TORCH_INTERNAL_ASSERT(dst_contig.sizes().equals(src_contig.sizes()));
TORCH_INTERNAL_ASSERT(dst_contig.scalar_type() == src_contig.scalar_type());
dst_contig.copy_(src_contig, non_blocking);
// if necessary, copy back into dst
if (!dst_contig.is_same(dst)) {
TORCH_INTERNAL_ASSERT(dst_contig.device() == dst.device());
dst.copy_(dst_contig, non_blocking);
}
return;
}
// Copy on GPU (or between GPUs)
if (dst_device.is_cuda() && src_device.is_cuda()) {
copy_device_to_device(iter, non_blocking);
return;
}
// Copy between CPU and GPU
cuda::OptionalCUDAGuard device_guard;
cudaMemcpyKind kind;
if (dst_device.is_cuda() && src_device.is_cpu()) {
device_guard.set_device(dst_device);
kind = cudaMemcpyHostToDevice;
} else if (dst_device.is_cpu() && src_device.is_cuda()) {
device_guard.set_device(src_device);
kind = cudaMemcpyDeviceToHost;
} else {
TORCH_INTERNAL_ASSERT(false, "unsupported devices in GPU copy_()");
}
void* dst = iter.data_ptr(0);
void* src = iter.data_ptr(1);
int64_t nbytes = iter.numel() * iter.element_size(0);
CUDAStream stream = getCurrentCUDAStream();
AT_CUDA_CHECK(cudaMemcpyAsync(dst, src, nbytes, kind, stream));
if (non_blocking) {
void* ptr = (dst_device == kCPU ? dst : src);
AT_CUDA_CHECK(THCCachingHostAllocator_recordEvent(ptr, stream));
} else {
AT_CUDA_CHECK(cudaStreamSynchronize(stream));
}
}
REGISTER_DISPATCH(copy_stub, ©_kernel_cuda);
} // namespace native
} // namespace at
|
88924a0843215d2f48816fad594ff2304be578e8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "join_common_utils.cuh"
#include "join_common_utils.hpp"
#include "mixed_join_kernels.cuh"
#include <cudf/ast/detail/expression_parser.hpp>
#include <cudf/ast/expressions.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/join.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_device_view.cuh>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/span.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/fill.h>
#include <thrust/scan.h>
#include <optional>
#include <utility>
namespace cudf {
namespace detail {
std::pair<std::unique_ptr<rmm::device_uvector<size_type>>,
std::unique_ptr<rmm::device_uvector<size_type>>>
mixed_join(
table_view const& left_equality,
table_view const& right_equality,
table_view const& left_conditional,
table_view const& right_conditional,
ast::expression const& binary_predicate,
null_equality compare_nulls,
join_kind join_type,
std::optional<std::pair<std::size_t, device_span<size_type const>>> const& output_size_data,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(left_conditional.num_rows() == left_equality.num_rows(),
"The left conditional and equality tables must have the same number of rows.");
CUDF_EXPECTS(right_conditional.num_rows() == right_equality.num_rows(),
"The right conditional and equality tables must have the same number of rows.");
CUDF_EXPECTS((join_type != join_kind::LEFT_SEMI_JOIN) && (join_type != join_kind::LEFT_ANTI_JOIN),
"Left semi and anti joins should use mixed_join_semi.");
auto const right_num_rows{right_conditional.num_rows()};
auto const left_num_rows{left_conditional.num_rows()};
auto const swap_tables = (join_type == join_kind::INNER_JOIN) && (right_num_rows > left_num_rows);
// The "outer" table is the larger of the two tables. The kernels are
// launched with one thread per row of the outer table, which also means that
// it is the probe table for the hash
auto const outer_num_rows{swap_tables ? right_num_rows : left_num_rows};
// We can immediately filter out cases where the right table is empty. In
// some cases, we return all the rows of the left table with a corresponding
// null index for the right table; in others, we return an empty output.
if (right_num_rows == 0) {
switch (join_type) {
// Left and full joins all return all the row indices from
// left with a corresponding NULL from the right.
case join_kind::LEFT_JOIN:
case join_kind::FULL_JOIN: return get_trivial_left_join_indices(left_conditional, stream);
// Inner joins return empty output because no matches can exist.
case join_kind::INNER_JOIN:
return std::pair(std::make_unique<rmm::device_uvector<size_type>>(0, stream, mr),
std::make_unique<rmm::device_uvector<size_type>>(0, stream, mr));
default: CUDF_FAIL("Invalid join kind."); break;
}
} else if (left_num_rows == 0) {
switch (join_type) {
// Left and inner joins all return empty sets.
case join_kind::LEFT_JOIN:
case join_kind::INNER_JOIN:
return std::pair(std::make_unique<rmm::device_uvector<size_type>>(0, stream, mr),
std::make_unique<rmm::device_uvector<size_type>>(0, stream, mr));
// Full joins need to return the trivial complement.
case join_kind::FULL_JOIN: {
auto ret_flipped = get_trivial_left_join_indices(right_conditional, stream);
return std::pair(std::move(ret_flipped.second), std::move(ret_flipped.first));
}
default: CUDF_FAIL("Invalid join kind."); break;
}
}
// If evaluating the expression may produce null outputs we create a nullable
// output column and follow the null-supporting expression evaluation code
// path.
auto const has_nulls =
cudf::has_nulls(left_equality) || cudf::has_nulls(right_equality) ||
binary_predicate.may_evaluate_null(left_conditional, right_conditional, stream);
auto const parser = ast::detail::expression_parser{
binary_predicate, left_conditional, right_conditional, has_nulls, stream, mr};
CUDF_EXPECTS(parser.output_type().id() == type_id::BOOL8,
"The expression must produce a boolean output.");
// TODO: The non-conditional join impls start with a dictionary matching,
// figure out what that is and what it's needed for (and if conditional joins
// need to do the same).
auto& probe = swap_tables ? right_equality : left_equality;
auto& build = swap_tables ? left_equality : right_equality;
auto probe_view = table_device_view::create(probe, stream);
auto build_view = table_device_view::create(build, stream);
row_equality equality_probe{
cudf::nullate::DYNAMIC{has_nulls}, *probe_view, *build_view, compare_nulls};
// Don't use multimap_type because we want a CG size of 1.
mixed_multimap_type hash_table{
compute_hash_table_size(build.num_rows()),
cuco::sentinel::empty_key{std::numeric_limits<hash_value_type>::max()},
cuco::sentinel::empty_value{cudf::detail::JoinNoneValue},
stream.value(),
detail::hash_table_allocator_type{default_allocator<char>{}, stream}};
// TODO: To add support for nested columns we will need to flatten in many
// places. However, this probably isn't worth adding any time soon since we
// won't be able to support AST conditions for those types anyway.
auto const row_bitmask = cudf::detail::bitmask_and(build, stream).first;
build_join_hash_table(
build, hash_table, compare_nulls, static_cast<bitmask_type const*>(row_bitmask.data()), stream);
auto hash_table_view = hash_table.get_device_view();
auto left_conditional_view = table_device_view::create(left_conditional, stream);
auto right_conditional_view = table_device_view::create(right_conditional, stream);
// For inner joins we support optimizing the join by launching one thread for
// whichever table is larger rather than always using the left table.
detail::grid_1d const config(outer_num_rows, DEFAULT_JOIN_BLOCK_SIZE);
auto const shmem_size_per_block = parser.shmem_per_thread * config.num_threads_per_block;
join_kind const kernel_join_type =
join_type == join_kind::FULL_JOIN ? join_kind::LEFT_JOIN : join_type;
// If the join size data was not provided as an input, compute it here.
std::size_t join_size;
// Using an optional because we only need to allocate a new vector if one was
// not passed as input, and rmm::device_uvector is not default constructible
std::optional<rmm::device_uvector<size_type>> matches_per_row{};
device_span<size_type const> matches_per_row_span{};
if (output_size_data.has_value()) {
join_size = output_size_data->first;
matches_per_row_span = output_size_data->second;
} else {
// Allocate storage for the counter used to get the size of the join output
rmm::device_scalar<std::size_t> size(0, stream, mr);
matches_per_row =
rmm::device_uvector<size_type>{static_cast<std::size_t>(outer_num_rows), stream, mr};
// Note that the view goes out of scope after this else statement, but the
// data owned by matches_per_row stays alive so the data pointer is valid.
auto mutable_matches_per_row_span = cudf::device_span<size_type>{
matches_per_row->begin(), static_cast<std::size_t>(outer_num_rows)};
matches_per_row_span = cudf::device_span<size_type const>{
matches_per_row->begin(), static_cast<std::size_t>(outer_num_rows)};
if (has_nulls) {
hipLaunchKernelGGL(( compute_mixed_join_output_size<DEFAULT_JOIN_BLOCK_SIZE, true>)
, dim3(config.num_blocks), dim3(config.num_threads_per_block), shmem_size_per_block, stream.value(),
*left_conditional_view,
*right_conditional_view,
*probe_view,
*build_view,
equality_probe,
kernel_join_type,
hash_table_view,
parser.device_expression_data,
swap_tables,
size.data(),
mutable_matches_per_row_span);
} else {
hipLaunchKernelGGL(( compute_mixed_join_output_size<DEFAULT_JOIN_BLOCK_SIZE, false>)
, dim3(config.num_blocks), dim3(config.num_threads_per_block), shmem_size_per_block, stream.value(),
*left_conditional_view,
*right_conditional_view,
*probe_view,
*build_view,
equality_probe,
kernel_join_type,
hash_table_view,
parser.device_expression_data,
swap_tables,
size.data(),
mutable_matches_per_row_span);
}
join_size = size.value(stream);
}
// The initial early exit clauses guarantee that we will not reach this point
// unless both the left and right tables are non-empty. Under that
// constraint, neither left nor full joins can return an empty result since
// at minimum we are guaranteed null matches for all non-matching rows. In
// all other cases (inner, left semi, and left anti joins) if we reach this
// point we can safely return an empty result.
if (join_size == 0) {
return std::pair(std::make_unique<rmm::device_uvector<size_type>>(0, stream, mr),
std::make_unique<rmm::device_uvector<size_type>>(0, stream, mr));
}
// Given the number of matches per row, we need to compute the offsets for insertion.
auto join_result_offsets =
rmm::device_uvector<size_type>{static_cast<std::size_t>(outer_num_rows), stream, mr};
thrust::exclusive_scan(rmm::exec_policy{stream},
matches_per_row_span.begin(),
matches_per_row_span.end(),
join_result_offsets.begin());
auto left_indices = std::make_unique<rmm::device_uvector<size_type>>(join_size, stream, mr);
auto right_indices = std::make_unique<rmm::device_uvector<size_type>>(join_size, stream, mr);
auto const& join_output_l = left_indices->data();
auto const& join_output_r = right_indices->data();
if (has_nulls) {
hipLaunchKernelGGL(( mixed_join<DEFAULT_JOIN_BLOCK_SIZE, true>)
, dim3(config.num_blocks), dim3(config.num_threads_per_block), shmem_size_per_block, stream.value(),
*left_conditional_view,
*right_conditional_view,
*probe_view,
*build_view,
equality_probe,
kernel_join_type,
hash_table_view,
join_output_l,
join_output_r,
parser.device_expression_data,
join_result_offsets.data(),
swap_tables);
} else {
hipLaunchKernelGGL(( mixed_join<DEFAULT_JOIN_BLOCK_SIZE, false>)
, dim3(config.num_blocks), dim3(config.num_threads_per_block), shmem_size_per_block, stream.value(),
*left_conditional_view,
*right_conditional_view,
*probe_view,
*build_view,
equality_probe,
kernel_join_type,
hash_table_view,
join_output_l,
join_output_r,
parser.device_expression_data,
join_result_offsets.data(),
swap_tables);
}
auto join_indices = std::pair(std::move(left_indices), std::move(right_indices));
// For full joins, get the indices in the right table that were not joined to
// by any row in the left table.
if (join_type == join_kind::FULL_JOIN) {
auto complement_indices = detail::get_left_join_indices_complement(
join_indices.second, left_num_rows, right_num_rows, stream, mr);
join_indices = detail::concatenate_vector_pairs(join_indices, complement_indices, stream);
}
return join_indices;
}
std::pair<std::size_t, std::unique_ptr<rmm::device_uvector<size_type>>>
compute_mixed_join_output_size(table_view const& left_equality,
table_view const& right_equality,
table_view const& left_conditional,
table_view const& right_conditional,
ast::expression const& binary_predicate,
null_equality compare_nulls,
join_kind join_type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// Until we add logic to handle the number of non-matches in the right table,
// full joins are not supported in this function. Note that this does not
// prevent actually performing full joins since we do that by calculating the
// left join and then concatenating the complementary right indices.
CUDF_EXPECTS(join_type != join_kind::FULL_JOIN,
"Size estimation is not available for full joins.");
CUDF_EXPECTS(
(join_type != join_kind::LEFT_SEMI_JOIN) && (join_type != join_kind::LEFT_ANTI_JOIN),
"Left semi and anti join size estimation should use compute_mixed_join_output_size_semi.");
CUDF_EXPECTS(left_conditional.num_rows() == left_equality.num_rows(),
"The left conditional and equality tables must have the same number of rows.");
CUDF_EXPECTS(right_conditional.num_rows() == right_equality.num_rows(),
"The right conditional and equality tables must have the same number of rows.");
auto const right_num_rows{right_conditional.num_rows()};
auto const left_num_rows{left_conditional.num_rows()};
auto const swap_tables = (join_type == join_kind::INNER_JOIN) && (right_num_rows > left_num_rows);
// The "outer" table is the larger of the two tables. The kernels are
// launched with one thread per row of the outer table, which also means that
// it is the probe table for the hash
auto const outer_num_rows{swap_tables ? right_num_rows : left_num_rows};
auto matches_per_row = std::make_unique<rmm::device_uvector<size_type>>(
static_cast<std::size_t>(outer_num_rows), stream, mr);
auto matches_per_row_span = cudf::device_span<size_type>{
matches_per_row->begin(), static_cast<std::size_t>(outer_num_rows)};
// We can immediately filter out cases where one table is empty. In
// some cases, we return all the rows of the other table with a corresponding
// null index for the empty table; in others, we return an empty output.
if (right_num_rows == 0) {
switch (join_type) {
// Left, left anti, and full all return all the row indices from left
// with a corresponding NULL from the right.
case join_kind::LEFT_JOIN:
case join_kind::FULL_JOIN: {
thrust::fill(matches_per_row->begin(), matches_per_row->end(), 1);
return {left_num_rows, std::move(matches_per_row)};
}
// Inner and left semi joins return empty output because no matches can exist.
case join_kind::INNER_JOIN: {
thrust::fill(matches_per_row->begin(), matches_per_row->end(), 0);
return {0, std::move(matches_per_row)};
}
default: CUDF_FAIL("Invalid join kind."); break;
}
} else if (left_num_rows == 0) {
switch (join_type) {
// Left, left anti, left semi, and inner joins all return empty sets.
case join_kind::LEFT_JOIN:
case join_kind::INNER_JOIN: {
thrust::fill(matches_per_row->begin(), matches_per_row->end(), 0);
return {0, std::move(matches_per_row)};
}
// Full joins need to return the trivial complement.
case join_kind::FULL_JOIN: {
thrust::fill(matches_per_row->begin(), matches_per_row->end(), 1);
return {right_num_rows, std::move(matches_per_row)};
}
default: CUDF_FAIL("Invalid join kind."); break;
}
}
// If evaluating the expression may produce null outputs we create a nullable
// output column and follow the null-supporting expression evaluation code
// path.
auto const has_nulls =
cudf::has_nulls(left_equality) || cudf::has_nulls(right_equality) ||
binary_predicate.may_evaluate_null(left_conditional, right_conditional, stream);
auto const parser = ast::detail::expression_parser{
binary_predicate, left_conditional, right_conditional, has_nulls, stream, mr};
CUDF_EXPECTS(parser.output_type().id() == type_id::BOOL8,
"The expression must produce a boolean output.");
// TODO: The non-conditional join impls start with a dictionary matching,
// figure out what that is and what it's needed for (and if conditional joins
// need to do the same).
auto& probe = swap_tables ? right_equality : left_equality;
auto& build = swap_tables ? left_equality : right_equality;
auto probe_view = table_device_view::create(probe, stream);
auto build_view = table_device_view::create(build, stream);
row_equality equality_probe{
cudf::nullate::DYNAMIC{has_nulls}, *probe_view, *build_view, compare_nulls};
// Don't use multimap_type because we want a CG size of 1.
mixed_multimap_type hash_table{
compute_hash_table_size(build.num_rows()),
cuco::sentinel::empty_key{std::numeric_limits<hash_value_type>::max()},
cuco::sentinel::empty_value{cudf::detail::JoinNoneValue},
stream.value(),
detail::hash_table_allocator_type{default_allocator<char>{}, stream}};
// TODO: To add support for nested columns we will need to flatten in many
// places. However, this probably isn't worth adding any time soon since we
// won't be able to support AST conditions for those types anyway.
auto const row_bitmask = cudf::detail::bitmask_and(build, stream).first;
build_join_hash_table(
build, hash_table, compare_nulls, static_cast<bitmask_type const*>(row_bitmask.data()), stream);
auto hash_table_view = hash_table.get_device_view();
auto left_conditional_view = table_device_view::create(left_conditional, stream);
auto right_conditional_view = table_device_view::create(right_conditional, stream);
// For inner joins we support optimizing the join by launching one thread for
// whichever table is larger rather than always using the left table.
detail::grid_1d const config(outer_num_rows, DEFAULT_JOIN_BLOCK_SIZE);
auto const shmem_size_per_block = parser.shmem_per_thread * config.num_threads_per_block;
// Allocate storage for the counter used to get the size of the join output
rmm::device_scalar<std::size_t> size(0, stream, mr);
// Determine number of output rows without actually building the output to simply
// find what the size of the output will be.
if (has_nulls) {
hipLaunchKernelGGL(( compute_mixed_join_output_size<DEFAULT_JOIN_BLOCK_SIZE, true>)
, dim3(config.num_blocks), dim3(config.num_threads_per_block), shmem_size_per_block, stream.value(),
*left_conditional_view,
*right_conditional_view,
*probe_view,
*build_view,
equality_probe,
join_type,
hash_table_view,
parser.device_expression_data,
swap_tables,
size.data(),
matches_per_row_span);
} else {
hipLaunchKernelGGL(( compute_mixed_join_output_size<DEFAULT_JOIN_BLOCK_SIZE, false>)
, dim3(config.num_blocks), dim3(config.num_threads_per_block), shmem_size_per_block, stream.value(),
*left_conditional_view,
*right_conditional_view,
*probe_view,
*build_view,
equality_probe,
join_type,
hash_table_view,
parser.device_expression_data,
swap_tables,
size.data(),
matches_per_row_span);
}
return {size.value(stream), std::move(matches_per_row)};
}
} // namespace detail
std::pair<std::unique_ptr<rmm::device_uvector<size_type>>,
std::unique_ptr<rmm::device_uvector<size_type>>>
mixed_inner_join(
table_view const& left_equality,
table_view const& right_equality,
table_view const& left_conditional,
table_view const& right_conditional,
ast::expression const& binary_predicate,
null_equality compare_nulls,
std::optional<std::pair<std::size_t, device_span<size_type const>>> const output_size_data,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::mixed_join(left_equality,
right_equality,
left_conditional,
right_conditional,
binary_predicate,
compare_nulls,
detail::join_kind::INNER_JOIN,
output_size_data,
cudf::default_stream_value,
mr);
}
std::pair<std::size_t, std::unique_ptr<rmm::device_uvector<size_type>>> mixed_inner_join_size(
table_view const& left_equality,
table_view const& right_equality,
table_view const& left_conditional,
table_view const& right_conditional,
ast::expression const& binary_predicate,
null_equality compare_nulls,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::compute_mixed_join_output_size(left_equality,
right_equality,
left_conditional,
right_conditional,
binary_predicate,
compare_nulls,
detail::join_kind::INNER_JOIN,
cudf::default_stream_value,
mr);
}
std::pair<std::unique_ptr<rmm::device_uvector<size_type>>,
std::unique_ptr<rmm::device_uvector<size_type>>>
mixed_left_join(
table_view const& left_equality,
table_view const& right_equality,
table_view const& left_conditional,
table_view const& right_conditional,
ast::expression const& binary_predicate,
null_equality compare_nulls,
std::optional<std::pair<std::size_t, device_span<size_type const>>> const output_size_data,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::mixed_join(left_equality,
right_equality,
left_conditional,
right_conditional,
binary_predicate,
compare_nulls,
detail::join_kind::LEFT_JOIN,
output_size_data,
cudf::default_stream_value,
mr);
}
std::pair<std::size_t, std::unique_ptr<rmm::device_uvector<size_type>>> mixed_left_join_size(
table_view const& left_equality,
table_view const& right_equality,
table_view const& left_conditional,
table_view const& right_conditional,
ast::expression const& binary_predicate,
null_equality compare_nulls,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::compute_mixed_join_output_size(left_equality,
right_equality,
left_conditional,
right_conditional,
binary_predicate,
compare_nulls,
detail::join_kind::LEFT_JOIN,
cudf::default_stream_value,
mr);
}
std::pair<std::unique_ptr<rmm::device_uvector<size_type>>,
std::unique_ptr<rmm::device_uvector<size_type>>>
mixed_full_join(
table_view const& left_equality,
table_view const& right_equality,
table_view const& left_conditional,
table_view const& right_conditional,
ast::expression const& binary_predicate,
null_equality compare_nulls,
std::optional<std::pair<std::size_t, device_span<size_type const>>> const output_size_data,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::mixed_join(left_equality,
right_equality,
left_conditional,
right_conditional,
binary_predicate,
compare_nulls,
detail::join_kind::FULL_JOIN,
output_size_data,
cudf::default_stream_value,
mr);
}
} // namespace cudf
| 88924a0843215d2f48816fad594ff2304be578e8.cu | /*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "join_common_utils.cuh"
#include "join_common_utils.hpp"
#include "mixed_join_kernels.cuh"
#include <cudf/ast/detail/expression_parser.hpp>
#include <cudf/ast/expressions.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/join.hpp>
#include <cudf/table/table.hpp>
#include <cudf/table/table_device_view.cuh>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/span.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/fill.h>
#include <thrust/scan.h>
#include <optional>
#include <utility>
namespace cudf {
namespace detail {
std::pair<std::unique_ptr<rmm::device_uvector<size_type>>,
std::unique_ptr<rmm::device_uvector<size_type>>>
mixed_join(
table_view const& left_equality,
table_view const& right_equality,
table_view const& left_conditional,
table_view const& right_conditional,
ast::expression const& binary_predicate,
null_equality compare_nulls,
join_kind join_type,
std::optional<std::pair<std::size_t, device_span<size_type const>>> const& output_size_data,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(left_conditional.num_rows() == left_equality.num_rows(),
"The left conditional and equality tables must have the same number of rows.");
CUDF_EXPECTS(right_conditional.num_rows() == right_equality.num_rows(),
"The right conditional and equality tables must have the same number of rows.");
CUDF_EXPECTS((join_type != join_kind::LEFT_SEMI_JOIN) && (join_type != join_kind::LEFT_ANTI_JOIN),
"Left semi and anti joins should use mixed_join_semi.");
auto const right_num_rows{right_conditional.num_rows()};
auto const left_num_rows{left_conditional.num_rows()};
auto const swap_tables = (join_type == join_kind::INNER_JOIN) && (right_num_rows > left_num_rows);
// The "outer" table is the larger of the two tables. The kernels are
// launched with one thread per row of the outer table, which also means that
// it is the probe table for the hash
auto const outer_num_rows{swap_tables ? right_num_rows : left_num_rows};
// We can immediately filter out cases where the right table is empty. In
// some cases, we return all the rows of the left table with a corresponding
// null index for the right table; in others, we return an empty output.
if (right_num_rows == 0) {
switch (join_type) {
// Left and full joins all return all the row indices from
// left with a corresponding NULL from the right.
case join_kind::LEFT_JOIN:
case join_kind::FULL_JOIN: return get_trivial_left_join_indices(left_conditional, stream);
// Inner joins return empty output because no matches can exist.
case join_kind::INNER_JOIN:
return std::pair(std::make_unique<rmm::device_uvector<size_type>>(0, stream, mr),
std::make_unique<rmm::device_uvector<size_type>>(0, stream, mr));
default: CUDF_FAIL("Invalid join kind."); break;
}
} else if (left_num_rows == 0) {
switch (join_type) {
// Left and inner joins all return empty sets.
case join_kind::LEFT_JOIN:
case join_kind::INNER_JOIN:
return std::pair(std::make_unique<rmm::device_uvector<size_type>>(0, stream, mr),
std::make_unique<rmm::device_uvector<size_type>>(0, stream, mr));
// Full joins need to return the trivial complement.
case join_kind::FULL_JOIN: {
auto ret_flipped = get_trivial_left_join_indices(right_conditional, stream);
return std::pair(std::move(ret_flipped.second), std::move(ret_flipped.first));
}
default: CUDF_FAIL("Invalid join kind."); break;
}
}
// If evaluating the expression may produce null outputs we create a nullable
// output column and follow the null-supporting expression evaluation code
// path.
auto const has_nulls =
cudf::has_nulls(left_equality) || cudf::has_nulls(right_equality) ||
binary_predicate.may_evaluate_null(left_conditional, right_conditional, stream);
auto const parser = ast::detail::expression_parser{
binary_predicate, left_conditional, right_conditional, has_nulls, stream, mr};
CUDF_EXPECTS(parser.output_type().id() == type_id::BOOL8,
"The expression must produce a boolean output.");
// TODO: The non-conditional join impls start with a dictionary matching,
// figure out what that is and what it's needed for (and if conditional joins
// need to do the same).
auto& probe = swap_tables ? right_equality : left_equality;
auto& build = swap_tables ? left_equality : right_equality;
auto probe_view = table_device_view::create(probe, stream);
auto build_view = table_device_view::create(build, stream);
row_equality equality_probe{
cudf::nullate::DYNAMIC{has_nulls}, *probe_view, *build_view, compare_nulls};
// Don't use multimap_type because we want a CG size of 1.
mixed_multimap_type hash_table{
compute_hash_table_size(build.num_rows()),
cuco::sentinel::empty_key{std::numeric_limits<hash_value_type>::max()},
cuco::sentinel::empty_value{cudf::detail::JoinNoneValue},
stream.value(),
detail::hash_table_allocator_type{default_allocator<char>{}, stream}};
// TODO: To add support for nested columns we will need to flatten in many
// places. However, this probably isn't worth adding any time soon since we
// won't be able to support AST conditions for those types anyway.
auto const row_bitmask = cudf::detail::bitmask_and(build, stream).first;
build_join_hash_table(
build, hash_table, compare_nulls, static_cast<bitmask_type const*>(row_bitmask.data()), stream);
auto hash_table_view = hash_table.get_device_view();
auto left_conditional_view = table_device_view::create(left_conditional, stream);
auto right_conditional_view = table_device_view::create(right_conditional, stream);
// For inner joins we support optimizing the join by launching one thread for
// whichever table is larger rather than always using the left table.
detail::grid_1d const config(outer_num_rows, DEFAULT_JOIN_BLOCK_SIZE);
auto const shmem_size_per_block = parser.shmem_per_thread * config.num_threads_per_block;
join_kind const kernel_join_type =
join_type == join_kind::FULL_JOIN ? join_kind::LEFT_JOIN : join_type;
// If the join size data was not provided as an input, compute it here.
std::size_t join_size;
// Using an optional because we only need to allocate a new vector if one was
// not passed as input, and rmm::device_uvector is not default constructible
std::optional<rmm::device_uvector<size_type>> matches_per_row{};
device_span<size_type const> matches_per_row_span{};
if (output_size_data.has_value()) {
join_size = output_size_data->first;
matches_per_row_span = output_size_data->second;
} else {
// Allocate storage for the counter used to get the size of the join output
rmm::device_scalar<std::size_t> size(0, stream, mr);
matches_per_row =
rmm::device_uvector<size_type>{static_cast<std::size_t>(outer_num_rows), stream, mr};
// Note that the view goes out of scope after this else statement, but the
// data owned by matches_per_row stays alive so the data pointer is valid.
auto mutable_matches_per_row_span = cudf::device_span<size_type>{
matches_per_row->begin(), static_cast<std::size_t>(outer_num_rows)};
matches_per_row_span = cudf::device_span<size_type const>{
matches_per_row->begin(), static_cast<std::size_t>(outer_num_rows)};
if (has_nulls) {
compute_mixed_join_output_size<DEFAULT_JOIN_BLOCK_SIZE, true>
<<<config.num_blocks, config.num_threads_per_block, shmem_size_per_block, stream.value()>>>(
*left_conditional_view,
*right_conditional_view,
*probe_view,
*build_view,
equality_probe,
kernel_join_type,
hash_table_view,
parser.device_expression_data,
swap_tables,
size.data(),
mutable_matches_per_row_span);
} else {
compute_mixed_join_output_size<DEFAULT_JOIN_BLOCK_SIZE, false>
<<<config.num_blocks, config.num_threads_per_block, shmem_size_per_block, stream.value()>>>(
*left_conditional_view,
*right_conditional_view,
*probe_view,
*build_view,
equality_probe,
kernel_join_type,
hash_table_view,
parser.device_expression_data,
swap_tables,
size.data(),
mutable_matches_per_row_span);
}
join_size = size.value(stream);
}
// The initial early exit clauses guarantee that we will not reach this point
// unless both the left and right tables are non-empty. Under that
// constraint, neither left nor full joins can return an empty result since
// at minimum we are guaranteed null matches for all non-matching rows. In
// all other cases (inner, left semi, and left anti joins) if we reach this
// point we can safely return an empty result.
if (join_size == 0) {
return std::pair(std::make_unique<rmm::device_uvector<size_type>>(0, stream, mr),
std::make_unique<rmm::device_uvector<size_type>>(0, stream, mr));
}
// Given the number of matches per row, we need to compute the offsets for insertion.
auto join_result_offsets =
rmm::device_uvector<size_type>{static_cast<std::size_t>(outer_num_rows), stream, mr};
thrust::exclusive_scan(rmm::exec_policy{stream},
matches_per_row_span.begin(),
matches_per_row_span.end(),
join_result_offsets.begin());
auto left_indices = std::make_unique<rmm::device_uvector<size_type>>(join_size, stream, mr);
auto right_indices = std::make_unique<rmm::device_uvector<size_type>>(join_size, stream, mr);
auto const& join_output_l = left_indices->data();
auto const& join_output_r = right_indices->data();
if (has_nulls) {
mixed_join<DEFAULT_JOIN_BLOCK_SIZE, true>
<<<config.num_blocks, config.num_threads_per_block, shmem_size_per_block, stream.value()>>>(
*left_conditional_view,
*right_conditional_view,
*probe_view,
*build_view,
equality_probe,
kernel_join_type,
hash_table_view,
join_output_l,
join_output_r,
parser.device_expression_data,
join_result_offsets.data(),
swap_tables);
} else {
mixed_join<DEFAULT_JOIN_BLOCK_SIZE, false>
<<<config.num_blocks, config.num_threads_per_block, shmem_size_per_block, stream.value()>>>(
*left_conditional_view,
*right_conditional_view,
*probe_view,
*build_view,
equality_probe,
kernel_join_type,
hash_table_view,
join_output_l,
join_output_r,
parser.device_expression_data,
join_result_offsets.data(),
swap_tables);
}
auto join_indices = std::pair(std::move(left_indices), std::move(right_indices));
// For full joins, get the indices in the right table that were not joined to
// by any row in the left table.
if (join_type == join_kind::FULL_JOIN) {
auto complement_indices = detail::get_left_join_indices_complement(
join_indices.second, left_num_rows, right_num_rows, stream, mr);
join_indices = detail::concatenate_vector_pairs(join_indices, complement_indices, stream);
}
return join_indices;
}
std::pair<std::size_t, std::unique_ptr<rmm::device_uvector<size_type>>>
compute_mixed_join_output_size(table_view const& left_equality,
table_view const& right_equality,
table_view const& left_conditional,
table_view const& right_conditional,
ast::expression const& binary_predicate,
null_equality compare_nulls,
join_kind join_type,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// Until we add logic to handle the number of non-matches in the right table,
// full joins are not supported in this function. Note that this does not
// prevent actually performing full joins since we do that by calculating the
// left join and then concatenating the complementary right indices.
CUDF_EXPECTS(join_type != join_kind::FULL_JOIN,
"Size estimation is not available for full joins.");
CUDF_EXPECTS(
(join_type != join_kind::LEFT_SEMI_JOIN) && (join_type != join_kind::LEFT_ANTI_JOIN),
"Left semi and anti join size estimation should use compute_mixed_join_output_size_semi.");
CUDF_EXPECTS(left_conditional.num_rows() == left_equality.num_rows(),
"The left conditional and equality tables must have the same number of rows.");
CUDF_EXPECTS(right_conditional.num_rows() == right_equality.num_rows(),
"The right conditional and equality tables must have the same number of rows.");
auto const right_num_rows{right_conditional.num_rows()};
auto const left_num_rows{left_conditional.num_rows()};
auto const swap_tables = (join_type == join_kind::INNER_JOIN) && (right_num_rows > left_num_rows);
// The "outer" table is the larger of the two tables. The kernels are
// launched with one thread per row of the outer table, which also means that
// it is the probe table for the hash
auto const outer_num_rows{swap_tables ? right_num_rows : left_num_rows};
auto matches_per_row = std::make_unique<rmm::device_uvector<size_type>>(
static_cast<std::size_t>(outer_num_rows), stream, mr);
auto matches_per_row_span = cudf::device_span<size_type>{
matches_per_row->begin(), static_cast<std::size_t>(outer_num_rows)};
// We can immediately filter out cases where one table is empty. In
// some cases, we return all the rows of the other table with a corresponding
// null index for the empty table; in others, we return an empty output.
if (right_num_rows == 0) {
switch (join_type) {
// Left, left anti, and full all return all the row indices from left
// with a corresponding NULL from the right.
case join_kind::LEFT_JOIN:
case join_kind::FULL_JOIN: {
thrust::fill(matches_per_row->begin(), matches_per_row->end(), 1);
return {left_num_rows, std::move(matches_per_row)};
}
// Inner and left semi joins return empty output because no matches can exist.
case join_kind::INNER_JOIN: {
thrust::fill(matches_per_row->begin(), matches_per_row->end(), 0);
return {0, std::move(matches_per_row)};
}
default: CUDF_FAIL("Invalid join kind."); break;
}
} else if (left_num_rows == 0) {
switch (join_type) {
// Left, left anti, left semi, and inner joins all return empty sets.
case join_kind::LEFT_JOIN:
case join_kind::INNER_JOIN: {
thrust::fill(matches_per_row->begin(), matches_per_row->end(), 0);
return {0, std::move(matches_per_row)};
}
// Full joins need to return the trivial complement.
case join_kind::FULL_JOIN: {
thrust::fill(matches_per_row->begin(), matches_per_row->end(), 1);
return {right_num_rows, std::move(matches_per_row)};
}
default: CUDF_FAIL("Invalid join kind."); break;
}
}
// If evaluating the expression may produce null outputs we create a nullable
// output column and follow the null-supporting expression evaluation code
// path.
auto const has_nulls =
cudf::has_nulls(left_equality) || cudf::has_nulls(right_equality) ||
binary_predicate.may_evaluate_null(left_conditional, right_conditional, stream);
auto const parser = ast::detail::expression_parser{
binary_predicate, left_conditional, right_conditional, has_nulls, stream, mr};
CUDF_EXPECTS(parser.output_type().id() == type_id::BOOL8,
"The expression must produce a boolean output.");
// TODO: The non-conditional join impls start with a dictionary matching,
// figure out what that is and what it's needed for (and if conditional joins
// need to do the same).
auto& probe = swap_tables ? right_equality : left_equality;
auto& build = swap_tables ? left_equality : right_equality;
auto probe_view = table_device_view::create(probe, stream);
auto build_view = table_device_view::create(build, stream);
row_equality equality_probe{
cudf::nullate::DYNAMIC{has_nulls}, *probe_view, *build_view, compare_nulls};
// Don't use multimap_type because we want a CG size of 1.
mixed_multimap_type hash_table{
compute_hash_table_size(build.num_rows()),
cuco::sentinel::empty_key{std::numeric_limits<hash_value_type>::max()},
cuco::sentinel::empty_value{cudf::detail::JoinNoneValue},
stream.value(),
detail::hash_table_allocator_type{default_allocator<char>{}, stream}};
// TODO: To add support for nested columns we will need to flatten in many
// places. However, this probably isn't worth adding any time soon since we
// won't be able to support AST conditions for those types anyway.
auto const row_bitmask = cudf::detail::bitmask_and(build, stream).first;
build_join_hash_table(
build, hash_table, compare_nulls, static_cast<bitmask_type const*>(row_bitmask.data()), stream);
auto hash_table_view = hash_table.get_device_view();
auto left_conditional_view = table_device_view::create(left_conditional, stream);
auto right_conditional_view = table_device_view::create(right_conditional, stream);
// For inner joins we support optimizing the join by launching one thread for
// whichever table is larger rather than always using the left table.
detail::grid_1d const config(outer_num_rows, DEFAULT_JOIN_BLOCK_SIZE);
auto const shmem_size_per_block = parser.shmem_per_thread * config.num_threads_per_block;
// Allocate storage for the counter used to get the size of the join output
rmm::device_scalar<std::size_t> size(0, stream, mr);
// Determine number of output rows without actually building the output to simply
// find what the size of the output will be.
if (has_nulls) {
compute_mixed_join_output_size<DEFAULT_JOIN_BLOCK_SIZE, true>
<<<config.num_blocks, config.num_threads_per_block, shmem_size_per_block, stream.value()>>>(
*left_conditional_view,
*right_conditional_view,
*probe_view,
*build_view,
equality_probe,
join_type,
hash_table_view,
parser.device_expression_data,
swap_tables,
size.data(),
matches_per_row_span);
} else {
compute_mixed_join_output_size<DEFAULT_JOIN_BLOCK_SIZE, false>
<<<config.num_blocks, config.num_threads_per_block, shmem_size_per_block, stream.value()>>>(
*left_conditional_view,
*right_conditional_view,
*probe_view,
*build_view,
equality_probe,
join_type,
hash_table_view,
parser.device_expression_data,
swap_tables,
size.data(),
matches_per_row_span);
}
return {size.value(stream), std::move(matches_per_row)};
}
} // namespace detail
std::pair<std::unique_ptr<rmm::device_uvector<size_type>>,
std::unique_ptr<rmm::device_uvector<size_type>>>
mixed_inner_join(
table_view const& left_equality,
table_view const& right_equality,
table_view const& left_conditional,
table_view const& right_conditional,
ast::expression const& binary_predicate,
null_equality compare_nulls,
std::optional<std::pair<std::size_t, device_span<size_type const>>> const output_size_data,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::mixed_join(left_equality,
right_equality,
left_conditional,
right_conditional,
binary_predicate,
compare_nulls,
detail::join_kind::INNER_JOIN,
output_size_data,
cudf::default_stream_value,
mr);
}
std::pair<std::size_t, std::unique_ptr<rmm::device_uvector<size_type>>> mixed_inner_join_size(
table_view const& left_equality,
table_view const& right_equality,
table_view const& left_conditional,
table_view const& right_conditional,
ast::expression const& binary_predicate,
null_equality compare_nulls,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::compute_mixed_join_output_size(left_equality,
right_equality,
left_conditional,
right_conditional,
binary_predicate,
compare_nulls,
detail::join_kind::INNER_JOIN,
cudf::default_stream_value,
mr);
}
std::pair<std::unique_ptr<rmm::device_uvector<size_type>>,
std::unique_ptr<rmm::device_uvector<size_type>>>
mixed_left_join(
table_view const& left_equality,
table_view const& right_equality,
table_view const& left_conditional,
table_view const& right_conditional,
ast::expression const& binary_predicate,
null_equality compare_nulls,
std::optional<std::pair<std::size_t, device_span<size_type const>>> const output_size_data,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::mixed_join(left_equality,
right_equality,
left_conditional,
right_conditional,
binary_predicate,
compare_nulls,
detail::join_kind::LEFT_JOIN,
output_size_data,
cudf::default_stream_value,
mr);
}
std::pair<std::size_t, std::unique_ptr<rmm::device_uvector<size_type>>> mixed_left_join_size(
table_view const& left_equality,
table_view const& right_equality,
table_view const& left_conditional,
table_view const& right_conditional,
ast::expression const& binary_predicate,
null_equality compare_nulls,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::compute_mixed_join_output_size(left_equality,
right_equality,
left_conditional,
right_conditional,
binary_predicate,
compare_nulls,
detail::join_kind::LEFT_JOIN,
cudf::default_stream_value,
mr);
}
std::pair<std::unique_ptr<rmm::device_uvector<size_type>>,
std::unique_ptr<rmm::device_uvector<size_type>>>
mixed_full_join(
table_view const& left_equality,
table_view const& right_equality,
table_view const& left_conditional,
table_view const& right_conditional,
ast::expression const& binary_predicate,
null_equality compare_nulls,
std::optional<std::pair<std::size_t, device_span<size_type const>>> const output_size_data,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::mixed_join(left_equality,
right_equality,
left_conditional,
right_conditional,
binary_predicate,
compare_nulls,
detail::join_kind::FULL_JOIN,
output_size_data,
cudf::default_stream_value,
mr);
}
} // namespace cudf
|
a88a705e2c1349f45ff64a1ce838ece7336c33f9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Michael Lukiman at the Courant Institute
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
typedef unsigned int uint;
// GENERATE - PARALLEL VERSION
__global__ void generate( uint N, uint* array ) // Populates an array from 2 to N, assuming an already allocated array space.
{
uint ind = blockDim.x * blockIdx.x + threadIdx.x;
uint stride = blockDim.x * gridDim.x;
for ( uint nth = ind ; nth < N-1 ; nth += stride )
{
array[nth] = 2 + nth;
}
}
//----------------------------------
// SHOOT - PARALLEL VERSION
__global__ void shoot( uint N, uint multipleOf, uint* array ) // Turns every multipleOf value (except for the number itself) into value 0. Again, index[0] is actually the int 2 and increments from there.
{
uint ind = blockDim.x * blockIdx.x + threadIdx.x;
uint stride = blockDim.x * gridDim.x;
for ( uint nth = (multipleOf - 2) + (multipleOf*(ind+1)) ; nth < N-1 ; nth += multipleOf*(stride+1) )
{
array[nth] = 0;
}
}
//----------------------------------
// SHOOT-LOOP
void shootLoop( uint N, uint* array, uint numBlocks, uint threadsPerBlock ) // Increment through the numbers. If not shot, proceed to shoot using that number. Non-prime numbers will be shot long before they are reached, as can be mathematically induced.
{
for ( uint loop_index = 0 ; loop_index < floor( ( N - 1 ) / 2 ) ; loop_index++ )
{
if ( array[loop_index] != 0 )
{
hipLaunchKernelGGL(( shoot), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, N, array[loop_index], array);
hipDeviceSynchronize();
}
}
}
//----------------------------------
// MAIN
int main( int argc, char** argv )
{
uint N = atol(argv[1]);
if ( argc != 2 || N <= 2 )
{
printf("%s", "Please supply one argument, N, for prime numbers up to N. Naturally, N must be greater than 2 for the output to be significant. Thanks! Here's an example: ./genprimes 20\n");
}
else
{
uint numBlocks = (N + 256 - 2) / 256;
uint threadsPerBlock = 256;
uint *device_array;
hipMalloc(&device_array, (N-1)*sizeof(uint));
hipMemset(device_array, 0, (N-1)*sizeof(uint));
hipLaunchKernelGGL(( generate), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, N, device_array);
hipDeviceSynchronize();
shootLoop(N, device_array, numBlocks, threadsPerBlock);
uint *host_array;
host_array = malloc ( sizeof(uint)* (N-1) )
char buffer[1024];
FILE *outfile;
snprintf(buffer, sizeof(buffer), "%u.txt", N);
outfile = fopen(buffer, "a");
for ( uint i = 0 ; i < N-1 ; i++ )
{
if ( host_array[i] != 0 )
{
fprintf(outfile, "%u ", host_array[i] );
}
// else { printf( "%u\n", host_array[i] ); }
}
hipFree(device_array);
free(host_array);
}
return 1;
}
| a88a705e2c1349f45ff64a1ce838ece7336c33f9.cu | // Michael Lukiman at the Courant Institute
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
typedef unsigned int uint;
// GENERATE - PARALLEL VERSION
__global__ void generate( uint N, uint* array ) // Populates an array from 2 to N, assuming an already allocated array space.
{
uint ind = blockDim.x * blockIdx.x + threadIdx.x;
uint stride = blockDim.x * gridDim.x;
for ( uint nth = ind ; nth < N-1 ; nth += stride )
{
array[nth] = 2 + nth;
}
}
//----------------------------------
// SHOOT - PARALLEL VERSION
__global__ void shoot( uint N, uint multipleOf, uint* array ) // Turns every multipleOf value (except for the number itself) into value 0. Again, index[0] is actually the int 2 and increments from there.
{
uint ind = blockDim.x * blockIdx.x + threadIdx.x;
uint stride = blockDim.x * gridDim.x;
for ( uint nth = (multipleOf - 2) + (multipleOf*(ind+1)) ; nth < N-1 ; nth += multipleOf*(stride+1) )
{
array[nth] = 0;
}
}
//----------------------------------
// SHOOT-LOOP
void shootLoop( uint N, uint* array, uint numBlocks, uint threadsPerBlock ) // Increment through the numbers. If not shot, proceed to shoot using that number. Non-prime numbers will be shot long before they are reached, as can be mathematically induced.
{
for ( uint loop_index = 0 ; loop_index < floor( ( N - 1 ) / 2 ) ; loop_index++ )
{
if ( array[loop_index] != 0 )
{
shoot<<<numBlocks,threadsPerBlock>>>(N, array[loop_index], array);
cudaDeviceSynchronize();
}
}
}
//----------------------------------
// MAIN
int main( int argc, char** argv )
{
uint N = atol(argv[1]);
if ( argc != 2 || N <= 2 )
{
printf("%s", "Please supply one argument, N, for prime numbers up to N. Naturally, N must be greater than 2 for the output to be significant. Thanks! Here's an example: ./genprimes 20\n");
}
else
{
uint numBlocks = (N + 256 - 2) / 256;
uint threadsPerBlock = 256;
uint *device_array;
cudaMalloc(&device_array, (N-1)*sizeof(uint));
cudaMemset(device_array, 0, (N-1)*sizeof(uint));
generate<<<numBlocks, threadsPerBlock>>>(N, device_array);
cudaDeviceSynchronize();
shootLoop(N, device_array, numBlocks, threadsPerBlock);
uint *host_array;
host_array = malloc ( sizeof(uint)* (N-1) )
char buffer[1024];
FILE *outfile;
snprintf(buffer, sizeof(buffer), "%u.txt", N);
outfile = fopen(buffer, "a");
for ( uint i = 0 ; i < N-1 ; i++ )
{
if ( host_array[i] != 0 )
{
fprintf(outfile, "%u ", host_array[i] );
}
// else { printf( "%u\n", host_array[i] ); }
}
cudaFree(device_array);
free(host_array);
}
return 1;
}
|
b82d24a19d4f0189bda5e2c781272491a33957d0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
#include <thrust/system/hip/execution_policy.h>
#include <cassert>
#if __cplusplus >= 201103L
#include <future>
#endif
// This example demonstrates two ways to achieve algorithm invocations that are asynchronous with
// the calling thread.
//
// The first method wraps a call to thrust::reduce inside a __global__ function. Since __global__ function
// launches are asynchronous with the launching thread, this achieves asynchrony. The result of the reduction
// is stored to a pointer to CUDA global memory. The calling thread waits for the result of the reduction to
// be ready by synchronizing with the CUDA stream on which the __global__ function is launched.
//
// The second method uses the C++11 library function, std::async, to create concurrency. The lambda function
// given to std::async returns the result of thrust::reduce to a std::future. The calling thread can use the
// std::future to wait for the result of the reduction. This method requires a compiler which supports
// C++11-capable language and library constructs.
template<typename Iterator, typename T, typename BinaryOperation, typename Pointer>
__global__ void reduce_kernel(Iterator first, Iterator last, T init, BinaryOperation binary_op, Pointer result)
{
*result = thrust::reduce(thrust::hip::par, first, last, init, binary_op);
}
int main()
{
size_t n = 1 << 20;
thrust::device_vector<unsigned int> data(n, 1);
thrust::device_vector<unsigned int> result(1, 0);
// method 1: call thrust::reduce from an asynchronous CUDA kernel launch
// create a CUDA stream
hipStream_t s;
hipStreamCreate(&s);
// launch a CUDA kernel with only 1 thread on our stream
hipLaunchKernelGGL(( reduce_kernel), dim3(1),dim3(1),0,s, data.begin(), data.end(), 0, thrust::plus<int>(), result.data());
// wait for the stream to finish
hipStreamSynchronize(s);
// our result should be ready
assert(result[0] == n);
hipStreamDestroy(s);
// reset the result
result[0] = 0;
#if __cplusplus >= 201103L
// method 2: use std::async to create asynchrony
// copy all the algorithm parameters
auto begin = data.begin();
auto end = data.end();
auto init = 0;
auto binary_op = thrust::plus<int>();
// std::async captures the algorithm parameters by value
// use std::launch::async to ensure the creation of a new thread
std::future<int> future_result = std::async(std::launch::async, [=]
{
return thrust::reduce(begin, end, init, binary_op);
});
// wait on the result and check that it is correct
assert(future_result.get() == n);
#endif
return 0;
}
| b82d24a19d4f0189bda5e2c781272491a33957d0.cu | #include <thrust/device_vector.h>
#include <thrust/reduce.h>
#include <thrust/system/cuda/execution_policy.h>
#include <cassert>
#if __cplusplus >= 201103L
#include <future>
#endif
// This example demonstrates two ways to achieve algorithm invocations that are asynchronous with
// the calling thread.
//
// The first method wraps a call to thrust::reduce inside a __global__ function. Since __global__ function
// launches are asynchronous with the launching thread, this achieves asynchrony. The result of the reduction
// is stored to a pointer to CUDA global memory. The calling thread waits for the result of the reduction to
// be ready by synchronizing with the CUDA stream on which the __global__ function is launched.
//
// The second method uses the C++11 library function, std::async, to create concurrency. The lambda function
// given to std::async returns the result of thrust::reduce to a std::future. The calling thread can use the
// std::future to wait for the result of the reduction. This method requires a compiler which supports
// C++11-capable language and library constructs.
template<typename Iterator, typename T, typename BinaryOperation, typename Pointer>
__global__ void reduce_kernel(Iterator first, Iterator last, T init, BinaryOperation binary_op, Pointer result)
{
*result = thrust::reduce(thrust::cuda::par, first, last, init, binary_op);
}
int main()
{
size_t n = 1 << 20;
thrust::device_vector<unsigned int> data(n, 1);
thrust::device_vector<unsigned int> result(1, 0);
// method 1: call thrust::reduce from an asynchronous CUDA kernel launch
// create a CUDA stream
cudaStream_t s;
cudaStreamCreate(&s);
// launch a CUDA kernel with only 1 thread on our stream
reduce_kernel<<<1,1,0,s>>>(data.begin(), data.end(), 0, thrust::plus<int>(), result.data());
// wait for the stream to finish
cudaStreamSynchronize(s);
// our result should be ready
assert(result[0] == n);
cudaStreamDestroy(s);
// reset the result
result[0] = 0;
#if __cplusplus >= 201103L
// method 2: use std::async to create asynchrony
// copy all the algorithm parameters
auto begin = data.begin();
auto end = data.end();
auto init = 0;
auto binary_op = thrust::plus<int>();
// std::async captures the algorithm parameters by value
// use std::launch::async to ensure the creation of a new thread
std::future<int> future_result = std::async(std::launch::async, [=]
{
return thrust::reduce(begin, end, init, binary_op);
});
// wait on the result and check that it is correct
assert(future_result.get() == n);
#endif
return 0;
}
|
3f1b583d193cc3f81338c114f8367753d34ec1fc.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include "./utilities/timer.hpp"
#include "./utilities/graph.hpp"
#include "./utilities/gpu_error_check.cuh"
#include "./utilities/global.hpp"
#include "./utilities/argument_parser.hpp"
#include <omp.h>
uint* sssp_CPU(Graph* graph, int source){
int numNodes = graph->numNodes;
int numEdges = graph->numEdges;
uint *dist = new uint[numNodes];
uint *preNode = new uint[numNodes];
bool *processed = new bool[numNodes];
for (int i = 0; i < numNodes; i++) {
dist[i] = MAX_DIST;
preNode[i] = uint(-1);
processed[i] = false;
}
for (int i = 0; i < numEdges; i++) {
Edge edge = graph->edges.at(i);
if (edge.source == source){
if (edge.weight < dist[edge.end]){
dist[edge.end] = edge.weight;
preNode[edge.end] = source;
}
} else {
// Case: edge.source != source
continue;
}
}
Timer timer;
bool finished = false;
uint numIteration = 0;
dist[source] = 0;
preNode[source] = 0;
processed[source] = true;
timer.start();
while (!finished) {
// uint minDist = MAX_DIST;
finished = true;
numIteration++;
for (int i = 0; i < numEdges; i++){
Edge edge = graph->edges.at(i);
// Update its neighbor
uint source = edge.source;
uint end = edge.end;
uint weight = edge.weight;
if (dist[source] + weight < dist[end]) {
dist[end] = dist[source] + weight;
preNode[end] = source;
finished = false;
}
}
}
timer.stop();
printf("Process Done!\n");
printf("Number of Iteration: %d\n", numIteration);
printf("The execution time of SSSP on CPU: %f ms\n", timer.elapsedTime());
return dist;
}
float sssp_CPU_parallel(Graph *graph, int source) {
int numNodes = graph->numNodes;
int numEdges = graph->numEdges;
uint *dist = new uint[numNodes];
uint *preNode = new uint[numNodes];
bool *processed = new bool[numNodes];
uint *edgesSource = new uint[numEdges];
uint *edgesEnd = new uint[numEdges];
uint *edgesWeight = new uint[numEdges];
for (int i = 0; i < numNodes; i++) {
dist[i] = MAX_DIST;
preNode[i] = uint(-1);
processed[i] = false;
}
for (int i = 0; i < numEdges;i ++) {
Edge edge = graph->edges.at(i);
edgesSource[i] = edge.source;
edgesEnd[i] = edge.end;
edgesWeight[i] = edge.weight;
if (edge.source == source) {
if (edge.weight < dist[edge.end]) {
dist[edge.end] = edge.weight;
preNode[edge.end] = source;
}
} else {
// Case: edge.source != source
continue;
}
}
Timer timer;
bool finished = false;
uint numIteration = 0;
dist[source] = 0;
preNode[source] = 0;
processed[source] = true;
timer.start();
while(!finished) {
finished = true;
numIteration++;
#pragma omp parallel
{
// #pragma omp master
int threadId = omp_get_thread_num();
int numThreads = omp_get_num_threads();
int numEdgesPerThread = numEdges / numThreads + 1;
int start = threadId * numEdgesPerThread;
int end = (threadId + 1) * numEdgesPerThread;
if (start > numEdges) {
start = numEdges;
}
if (end > numEdges) {
end = numEdges;
}
for (int i = start; i < end; i++) {
uint source = edgesSource[i];
uint end = edgesEnd[i];
uint weight = edgesWeight[i];
if (dist[source] + weight < dist[end]) {
// #pragmaompatomic
dist[end] = dist[source] + weight;
// #pragmaompatomic
preNode[end] = source;
finished = false;
}
}
}
}
timer.stop();
// printf("Process Done!\n");
// printf("Number of Iteration: %d\n", numIteration);
// printf("The execution time of SSSP on CPU(OpenMP): %f ms\n", timer.elapsedTime());
// return dist;
delete []dist;
delete []preNode;
delete []edgesSource;
delete []edgesEnd;
delete []edgesWeight;
return timer.elapsedTime();
}
__global__ void sssp_GPU_Kernel(int numEdges,
int numEdgesPerThread,
uint *dist,
uint *preNode,
uint *edgesSource,
uint *edgesEnd,
uint *edgesWeight,
bool *finished) {
int threadId = blockDim.x * blockIdx.x + threadIdx.x;
int startId = threadId * numEdgesPerThread;
if (startId >= numEdges) {
return;
}
int endId = (threadId + 1) * numEdgesPerThread;
if (endId >= numEdges) {
endId = numEdges;
}
for (int nodeId = startId; nodeId < endId; nodeId++) {
uint source = edgesSource[nodeId];
uint end = edgesEnd[nodeId];
uint weight = edgesWeight[nodeId];
if (dist[source] + weight < dist[end]) {
atomicMin(&dist[end], dist[source] + weight);
// dist[end] = dist[source] + weight;
preNode[end] = source;
*finished = false;
}
}
}
float sssp_GPU(Graph *graph, int source) {
int numNodes = graph->numNodes;
int numEdges = graph->numEdges;
uint *dist = new uint[numNodes];
uint *preNode = new uint[numNodes];
uint *edgesSource = new uint[numEdges];
uint *edgesEnd = new uint[numEdges];
uint *edgesWeight = new uint[numEdges];
for (int i = 0; i < numNodes; i++) {
dist[i] = MAX_DIST;
preNode[i] = uint(-1);
}
for (int i = 0; i < numEdges; i++) {
Edge edge = graph->edges.at(i);
// Transfer the vector to the following three arrays
edgesSource[i] = edge.source;
edgesEnd[i] = edge.end;
edgesWeight[i] = edge.weight;
if (edge.source == source){
if (edge.weight < dist[edge.end]){
dist[edge.end] = edge.weight;
preNode[edge.end] = source;
}
} else {
// Case: edge.source != source
continue;
}
}
dist[source] = 0;
preNode[source] = 0;
uint *d_dist;
uint *d_preNode;
bool *d_finished;
uint *d_edgesSource;
uint *d_edgesEnd;
uint *d_edgesWeight;
gpuErrorcheck(hipMalloc(&d_dist, numNodes * sizeof(uint)));
gpuErrorcheck(hipMalloc(&d_preNode, numNodes * sizeof(uint)));
gpuErrorcheck(hipMalloc(&d_finished, sizeof(bool)));
gpuErrorcheck(hipMalloc(&d_edgesSource, numEdges * sizeof(uint)));
gpuErrorcheck(hipMalloc(&d_edgesEnd, numEdges * sizeof(uint)));
gpuErrorcheck(hipMalloc(&d_edgesWeight, numEdges * sizeof(uint)));
gpuErrorcheck(hipMemcpy(d_dist, dist, numNodes * sizeof(uint), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(d_preNode, preNode, numNodes * sizeof(uint), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(d_edgesSource, edgesSource, numEdges * sizeof(uint), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(d_edgesEnd, edgesEnd, numEdges * sizeof(uint), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(d_edgesWeight, edgesWeight, numEdges * sizeof(uint), hipMemcpyHostToDevice));
Timer timer;
int numIteration = 0;
int numEdgesPerThread = 8;
int numThreadsPerBlock = 512;
int numBlock = (numEdges) / (numThreadsPerBlock * numEdgesPerThread) + 1;
bool finished = true;
timer.start();
do {
numIteration++;
finished = true;
gpuErrorcheck(hipMemcpy(d_finished, &finished, sizeof(bool), hipMemcpyHostToDevice));
// TO-DO PARALLEL
hipLaunchKernelGGL(( sssp_GPU_Kernel), dim3(numBlock), dim3(numThreadsPerBlock) , 0, 0, numEdges,
numEdgesPerThread,
d_dist,
d_preNode,
d_edgesSource,
d_edgesEnd,
d_edgesWeight,
d_finished);
gpuErrorcheck(hipPeekAtLastError());
gpuErrorcheck(hipDeviceSynchronize());
gpuErrorcheck(hipMemcpy(&finished, d_finished, sizeof(bool), hipMemcpyDeviceToHost));
} while(!finished);
timer.stop();
// printf("Process Done!\n");
// printf("Number of Iteration: %d\n", numIteration);
// printf("The execution time of SSSP on GPU: %f ms\n", timer.elapsedTime());
gpuErrorcheck(hipMemcpy(dist, d_dist, numNodes * sizeof(uint), hipMemcpyDeviceToHost));
gpuErrorcheck(hipFree(d_dist));
gpuErrorcheck(hipFree(d_preNode));
gpuErrorcheck(hipFree(d_finished));
gpuErrorcheck(hipFree(d_edgesSource));
gpuErrorcheck(hipFree(d_edgesEnd));
gpuErrorcheck(hipFree(d_edgesWeight));
delete []dist;
delete []preNode;
delete []edgesSource;
delete []edgesEnd;
delete []edgesWeight;
// return dist;
return timer.elapsedTime();
}
__global__ void sssp_GPU_Hybrid_Kernel(int splitIndex,
int numEdges,
int numEdgesPerThread,
uint *dist,
uint *preNode,
uint *edgesSource,
uint *edgesEnd,
uint *edgesWeight,
bool *finished) {
int threadId = blockDim.x * blockIdx.x + threadIdx.x;
int startId = splitIndex + threadId * numEdgesPerThread;
if (startId >= numEdges) {
return;
}
int endId = splitIndex + (threadId + 1) * numEdgesPerThread;
if (endId >= numEdges) {
endId = numEdges;
}
// printf("GPU: process edged from: %d to %d \n", startId, endId);
for (int nodeId = startId; nodeId < endId; nodeId++) {
uint source = edgesSource[nodeId];
uint end = edgesEnd[nodeId];
uint weight = edgesWeight[nodeId];
if (dist[source] + weight < dist[end]) {
atomicMin(&dist[end], dist[source] + weight);
preNode[end] = source;
*finished = false;
}
}
}
float sssp_Hybrid(Graph *graph, int source) {
int numNodes = graph->numNodes;
int numEdges = graph->numEdges;
uint *dist = new uint[numNodes];
uint *preNode = new uint[numNodes];
uint *edgesSource = new uint[numEdges];
uint *edgesEnd = new uint[numEdges];
uint *edgesWeight = new uint[numEdges];
uint *dist_copy = new uint[numNodes];
for (int i = 0; i < numNodes; i++) {
dist[i] = MAX_DIST;
preNode[i] = uint(-1);
}
for (int i = 0; i < numEdges; i++) {
Edge edge = graph->edges.at(i);
// Transfer the vector to the following three arrays
edgesSource[i] = edge.source;
edgesEnd[i] = edge.end;
edgesWeight[i] = edge.weight;
if (edge.source == source){
if (edge.weight < dist[edge.end]){
dist[edge.end] = edge.weight;
preNode[edge.end] = source;
}
} else {
// Case: edge.source != source
continue;
}
}
dist[source] = 0;
preNode[source] = 0;
uint *d_dist;
uint *d_preNode;
bool *d_finished;
uint *d_edgesSource;
uint *d_edgesEnd;
uint *d_edgesWeight;
gpuErrorcheck(hipMalloc(&d_dist, numNodes * sizeof(uint)));
gpuErrorcheck(hipMalloc(&d_preNode, numNodes * sizeof(uint)));
gpuErrorcheck(hipMalloc(&d_finished, sizeof(bool)));
gpuErrorcheck(hipMalloc(&d_edgesSource, numEdges * sizeof(uint)));
gpuErrorcheck(hipMalloc(&d_edgesEnd, numEdges * sizeof(uint)));
gpuErrorcheck(hipMalloc(&d_edgesWeight, numEdges * sizeof(uint)));
gpuErrorcheck(hipMemcpy(d_dist, dist, numNodes * sizeof(uint), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(d_preNode, preNode, numNodes * sizeof(uint), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(d_edgesSource, edgesSource, numEdges * sizeof(uint), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(d_edgesEnd, edgesEnd, numEdges * sizeof(uint), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(d_edgesWeight, edgesWeight, numEdges * sizeof(uint), hipMemcpyHostToDevice));
// Copy from gpu memory
memcpy(dist_copy, dist, numNodes * sizeof(uint));
Timer timer;
int numIteration = 0;
bool finished = false;
bool h_finished = false;
float splitRatio; // cpu_data_size / whole_data_size
// Automatic select a prior value of spritRatio based on experience
if (numEdges < 300000) {
splitRatio = 0.95;
} else if (numEdges < 800000) {
splitRatio = 0.7;
} else {
splitRatio = 0.5;
}
/*
CPU process edges from 0 to splitIndex
number of edges: splitIndex
GPU process edges from splitIndex to numEdges
number of edges: numEdges - splitIndex + 1
*/
int splitIndex = numEdges * splitRatio;
int d_numEdgesPerThread = 8;
int d_numThreadsPerBlock = 512;
int d_numBlock = (numEdges - splitIndex + 1) / (d_numThreadsPerBlock * d_numEdgesPerThread) + 1;
Timer timer_cpu, timer_gpu;
Timer timer_host_to_device;
Timer timer_device_to_host;
// Default: enable cpu and gpu
// Once splitRatio equals to 0 only enable gpu
// Once splitRatio equals to 1 only enable cpu
bool cpu_enable = true;
bool gpu_enable = true;
vector<LoopInfo> infos;
LoopInfo loopInfo;
timer.start();
do {
numIteration++;
finished = true;
h_finished = true;
splitIndex = numEdges * splitRatio;
d_numBlock = (numEdges - splitIndex + 1) / (d_numThreadsPerBlock * d_numEdgesPerThread) + 1;
#pragma omp parallel //num_threads(8)
{
timer_gpu.start();
int threadId = omp_get_thread_num();
int h_numThreads = omp_get_num_threads();
if (threadId == h_numThreads - 1 && splitIndex < numEdges && gpu_enable) {
// Last thread will be used to launch gpu kernel
// if thread 0 is used to launch gpu kernel, the first block of
// data whose index begining from 0 will not be processed.
gpuErrorcheck(hipMemcpy(d_finished, &finished, sizeof(bool), hipMemcpyHostToDevice));
// timer_host_to_device.start();
gpuErrorcheck(hipMemcpy(d_dist, dist, sizeof(uint) * numNodes, hipMemcpyHostToDevice));
// timer_host_to_device.stop();
hipLaunchKernelGGL(( sssp_GPU_Hybrid_Kernel), dim3(d_numBlock), dim3(d_numThreadsPerBlock), 0, 0, splitIndex,
numEdges,
d_numEdgesPerThread,
d_dist,
d_preNode,
d_edgesSource,
d_edgesEnd,
d_edgesWeight,
d_finished);
gpuErrorcheck(hipPeekAtLastError());
gpuErrorcheck(hipDeviceSynchronize());
gpuErrorcheck(hipMemcpy(&finished, d_finished, sizeof(bool), hipMemcpyDeviceToHost));
// timer_device_to_host.start();
gpuErrorcheck(hipMemcpy(dist_copy, d_dist, sizeof(uint) * numNodes, hipMemcpyDeviceToHost));
// timer_device_to_host.stop();
timer_gpu.stop();
} else if (cpu_enable) {
// printf("Sub threads\n");
timer_cpu.start();
int h_numEdgesPerThread = (splitIndex) / (h_numThreads - 1) + 1;
int start = threadId * h_numEdgesPerThread;
int end = (threadId + 1) * h_numEdgesPerThread;
if (start > splitIndex) {
start = splitIndex;
}
if (end > splitIndex) {
end = splitIndex;
}
// cout << "Processs node: from " << start << " to: " << end << endl;
// printf("Process node from: %d to : %d\n", start, end);
for (int i = start; i < end; i++) {
uint source = edgesSource[i];
uint end = edgesEnd[i];
uint weight = edgesWeight[i];
if (dist[source] + weight < dist[end]) {
dist[end] = dist[source] + weight;
preNode[end] = source;
h_finished = false;
}
}
timer_cpu.stop();
}
}
finished = finished && h_finished;
#pragma omp parallel //num_threads(8)
{
int threadId = omp_get_thread_num();
int h_numThreads = omp_get_num_threads();
int h_numNodesPerThread = (numNodes) / (h_numThreads) + 1;
if (!finished) {
// Merge
int startIdx = threadId * h_numNodesPerThread;
int endIdx = (threadId + 1) * h_numNodesPerThread;
if (startIdx > numNodes) {
startIdx = numNodes;
}
if (endIdx > numNodes) {
endIdx = numNodes;
}
for (int i = startIdx; i < endIdx; i++) {
if (dist[i] > dist_copy[i]) {
dist[i] = dist_copy[i];
}
}
}
}
// Load Balancing
if (cpu_enable && gpu_enable) {
float factor = (timer_cpu.elapsedTime() / timer_gpu.elapsedTime());
if (factor > 1) {
splitRatio = splitRatio - 0.05;
if (splitRatio < 0) {
splitRatio = 0;
cpu_enable = false;
}
} else {
splitRatio = splitRatio + 0.05;
if (splitRatio > 1) {
splitRatio = 1;
gpu_enable = false;
}
}
// printf("No. itr: %d , updated splitRatio: %f, factor: %f\n", numIteration, splitRatio, factor);
// printf("CPU PART TIME: %f\n", timer_cpu.elapsedTime());
// printf("GPU PART TIME: %f\n", timer_gpu.elapsedTime());
// printf("Copy dist from host to device : %f ms \n", timer_host_to_device.elapsedTime());
// printf("Copy dist from device to host : %f ms \n", timer_device_to_host.elapsedTime());
loopInfo.numIteration = numIteration;
loopInfo.time_cpu = timer_cpu.elapsedTime();
loopInfo.time_gpu = timer_gpu.elapsedTime();
loopInfo.splitRatio = splitRatio;
infos.push_back(loopInfo);
}
} while(!finished);
timer.stop();
// printLoopInfo(infos);
// printf("Process Done!\n");
// printf("Number of Iteration: %d\n", numIteration);
// printf("The execution time of SSSP on Hybrid(CPU-GPU): %f ms\n", timer.elapsedTime());
gpuErrorcheck(hipFree(d_dist));
gpuErrorcheck(hipFree(d_preNode));
gpuErrorcheck(hipFree(d_finished));
gpuErrorcheck(hipFree(d_edgesSource));
gpuErrorcheck(hipFree(d_edgesEnd));
gpuErrorcheck(hipFree(d_edgesWeight));
delete []dist;
delete []preNode;
delete []edgesSource;
delete []edgesEnd;
delete []edgesWeight;
// return dist;
return timer.elapsedTime();
}
int main(int argc, char **argv) {
Timer timer_total, timer_load;
timer_total.start();
ArgumentParser args(argc, argv);
timer_load.start();
Graph graph(args.inputFilePath);
//Graph graph("datasets/simpleGraph.txt");
graph.readGraph();
timer_load.stop();
int sourceNode;
if (args.hasSourceNode) {
sourceNode = args.sourceNode;
} else {
// Use graph default source
sourceNode = graph.defaultSource;
}
// uint *dist_cpu_parallel = sssp_CPU_parallel(&graph, sourceNode);
// Hybrid running time
float time_hybrid = 0;
for (int i = 0; i < 10; i++) {
time_hybrid += sssp_Hybrid(&graph, sourceNode);
}
time_hybrid = time_hybrid / 10;
// GPU-only running time
float time_gpu = 0;
for (int i = 0; i < 10; i++) {
time_gpu += sssp_GPU(&graph, sourceNode);
}
time_gpu = time_gpu / 10;
// CPU-OpenMP running time
float time_openmp = 0;
if (args.runOnCPU) {
for (int i = 0; i < 10; i++) {
time_openmp += sssp_CPU_parallel(&graph, sourceNode);
}
time_openmp = time_openmp / 10;
printf("CPU (OpenMP) running time: %f ms\n", time_openmp);
}
printf("GPU-Only running time: %f ms\n", time_gpu);
printf("Hybrid running time: %f ms\n", time_hybrid);
/* uint *dist_hybrid = sssp_Hybrid(&graph, sourceNode);
uint *dist_gpu = sssp_GPU(&graph, sourceNode);
compareResult(dist_hybrid, dist_gpu, graph.numNodes);
if (args.runOnCPU) {
uint *dist_cpu = sssp_CPU_parallel(&graph, sourceNode);
compareResult(dist_cpu, dist_hybrid, graph.numNodes);
} */
timer_total.stop();
// printf("Total execution time: %f ms\n", timer_total.elapsedTime());
// printf("Graph loading execution time: %f ms\n", timer_load.elapsedTime());
return 0;
} | 3f1b583d193cc3f81338c114f8367753d34ec1fc.cu | #include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include "./utilities/timer.hpp"
#include "./utilities/graph.hpp"
#include "./utilities/gpu_error_check.cuh"
#include "./utilities/global.hpp"
#include "./utilities/argument_parser.hpp"
#include <omp.h>
uint* sssp_CPU(Graph* graph, int source){
int numNodes = graph->numNodes;
int numEdges = graph->numEdges;
uint *dist = new uint[numNodes];
uint *preNode = new uint[numNodes];
bool *processed = new bool[numNodes];
for (int i = 0; i < numNodes; i++) {
dist[i] = MAX_DIST;
preNode[i] = uint(-1);
processed[i] = false;
}
for (int i = 0; i < numEdges; i++) {
Edge edge = graph->edges.at(i);
if (edge.source == source){
if (edge.weight < dist[edge.end]){
dist[edge.end] = edge.weight;
preNode[edge.end] = source;
}
} else {
// Case: edge.source != source
continue;
}
}
Timer timer;
bool finished = false;
uint numIteration = 0;
dist[source] = 0;
preNode[source] = 0;
processed[source] = true;
timer.start();
while (!finished) {
// uint minDist = MAX_DIST;
finished = true;
numIteration++;
for (int i = 0; i < numEdges; i++){
Edge edge = graph->edges.at(i);
// Update its neighbor
uint source = edge.source;
uint end = edge.end;
uint weight = edge.weight;
if (dist[source] + weight < dist[end]) {
dist[end] = dist[source] + weight;
preNode[end] = source;
finished = false;
}
}
}
timer.stop();
printf("Process Done!\n");
printf("Number of Iteration: %d\n", numIteration);
printf("The execution time of SSSP on CPU: %f ms\n", timer.elapsedTime());
return dist;
}
float sssp_CPU_parallel(Graph *graph, int source) {
int numNodes = graph->numNodes;
int numEdges = graph->numEdges;
uint *dist = new uint[numNodes];
uint *preNode = new uint[numNodes];
bool *processed = new bool[numNodes];
uint *edgesSource = new uint[numEdges];
uint *edgesEnd = new uint[numEdges];
uint *edgesWeight = new uint[numEdges];
for (int i = 0; i < numNodes; i++) {
dist[i] = MAX_DIST;
preNode[i] = uint(-1);
processed[i] = false;
}
for (int i = 0; i < numEdges;i ++) {
Edge edge = graph->edges.at(i);
edgesSource[i] = edge.source;
edgesEnd[i] = edge.end;
edgesWeight[i] = edge.weight;
if (edge.source == source) {
if (edge.weight < dist[edge.end]) {
dist[edge.end] = edge.weight;
preNode[edge.end] = source;
}
} else {
// Case: edge.source != source
continue;
}
}
Timer timer;
bool finished = false;
uint numIteration = 0;
dist[source] = 0;
preNode[source] = 0;
processed[source] = true;
timer.start();
while(!finished) {
finished = true;
numIteration++;
#pragma omp parallel
{
// #pragma omp master
int threadId = omp_get_thread_num();
int numThreads = omp_get_num_threads();
int numEdgesPerThread = numEdges / numThreads + 1;
int start = threadId * numEdgesPerThread;
int end = (threadId + 1) * numEdgesPerThread;
if (start > numEdges) {
start = numEdges;
}
if (end > numEdges) {
end = numEdges;
}
for (int i = start; i < end; i++) {
uint source = edgesSource[i];
uint end = edgesEnd[i];
uint weight = edgesWeight[i];
if (dist[source] + weight < dist[end]) {
// #pragma omp atomic
dist[end] = dist[source] + weight;
// #pragma omp atomic
preNode[end] = source;
finished = false;
}
}
}
}
timer.stop();
// printf("Process Done!\n");
// printf("Number of Iteration: %d\n", numIteration);
// printf("The execution time of SSSP on CPU(OpenMP): %f ms\n", timer.elapsedTime());
// return dist;
delete []dist;
delete []preNode;
delete []edgesSource;
delete []edgesEnd;
delete []edgesWeight;
return timer.elapsedTime();
}
__global__ void sssp_GPU_Kernel(int numEdges,
int numEdgesPerThread,
uint *dist,
uint *preNode,
uint *edgesSource,
uint *edgesEnd,
uint *edgesWeight,
bool *finished) {
int threadId = blockDim.x * blockIdx.x + threadIdx.x;
int startId = threadId * numEdgesPerThread;
if (startId >= numEdges) {
return;
}
int endId = (threadId + 1) * numEdgesPerThread;
if (endId >= numEdges) {
endId = numEdges;
}
for (int nodeId = startId; nodeId < endId; nodeId++) {
uint source = edgesSource[nodeId];
uint end = edgesEnd[nodeId];
uint weight = edgesWeight[nodeId];
if (dist[source] + weight < dist[end]) {
atomicMin(&dist[end], dist[source] + weight);
// dist[end] = dist[source] + weight;
preNode[end] = source;
*finished = false;
}
}
}
float sssp_GPU(Graph *graph, int source) {
int numNodes = graph->numNodes;
int numEdges = graph->numEdges;
uint *dist = new uint[numNodes];
uint *preNode = new uint[numNodes];
uint *edgesSource = new uint[numEdges];
uint *edgesEnd = new uint[numEdges];
uint *edgesWeight = new uint[numEdges];
for (int i = 0; i < numNodes; i++) {
dist[i] = MAX_DIST;
preNode[i] = uint(-1);
}
for (int i = 0; i < numEdges; i++) {
Edge edge = graph->edges.at(i);
// Transfer the vector to the following three arrays
edgesSource[i] = edge.source;
edgesEnd[i] = edge.end;
edgesWeight[i] = edge.weight;
if (edge.source == source){
if (edge.weight < dist[edge.end]){
dist[edge.end] = edge.weight;
preNode[edge.end] = source;
}
} else {
// Case: edge.source != source
continue;
}
}
dist[source] = 0;
preNode[source] = 0;
uint *d_dist;
uint *d_preNode;
bool *d_finished;
uint *d_edgesSource;
uint *d_edgesEnd;
uint *d_edgesWeight;
gpuErrorcheck(cudaMalloc(&d_dist, numNodes * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&d_preNode, numNodes * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&d_finished, sizeof(bool)));
gpuErrorcheck(cudaMalloc(&d_edgesSource, numEdges * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&d_edgesEnd, numEdges * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&d_edgesWeight, numEdges * sizeof(uint)));
gpuErrorcheck(cudaMemcpy(d_dist, dist, numNodes * sizeof(uint), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(d_preNode, preNode, numNodes * sizeof(uint), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(d_edgesSource, edgesSource, numEdges * sizeof(uint), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(d_edgesEnd, edgesEnd, numEdges * sizeof(uint), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(d_edgesWeight, edgesWeight, numEdges * sizeof(uint), cudaMemcpyHostToDevice));
Timer timer;
int numIteration = 0;
int numEdgesPerThread = 8;
int numThreadsPerBlock = 512;
int numBlock = (numEdges) / (numThreadsPerBlock * numEdgesPerThread) + 1;
bool finished = true;
timer.start();
do {
numIteration++;
finished = true;
gpuErrorcheck(cudaMemcpy(d_finished, &finished, sizeof(bool), cudaMemcpyHostToDevice));
// TO-DO PARALLEL
sssp_GPU_Kernel<<< numBlock, numThreadsPerBlock >>> (numEdges,
numEdgesPerThread,
d_dist,
d_preNode,
d_edgesSource,
d_edgesEnd,
d_edgesWeight,
d_finished);
gpuErrorcheck(cudaPeekAtLastError());
gpuErrorcheck(cudaDeviceSynchronize());
gpuErrorcheck(cudaMemcpy(&finished, d_finished, sizeof(bool), cudaMemcpyDeviceToHost));
} while(!finished);
timer.stop();
// printf("Process Done!\n");
// printf("Number of Iteration: %d\n", numIteration);
// printf("The execution time of SSSP on GPU: %f ms\n", timer.elapsedTime());
gpuErrorcheck(cudaMemcpy(dist, d_dist, numNodes * sizeof(uint), cudaMemcpyDeviceToHost));
gpuErrorcheck(cudaFree(d_dist));
gpuErrorcheck(cudaFree(d_preNode));
gpuErrorcheck(cudaFree(d_finished));
gpuErrorcheck(cudaFree(d_edgesSource));
gpuErrorcheck(cudaFree(d_edgesEnd));
gpuErrorcheck(cudaFree(d_edgesWeight));
delete []dist;
delete []preNode;
delete []edgesSource;
delete []edgesEnd;
delete []edgesWeight;
// return dist;
return timer.elapsedTime();
}
__global__ void sssp_GPU_Hybrid_Kernel(int splitIndex,
int numEdges,
int numEdgesPerThread,
uint *dist,
uint *preNode,
uint *edgesSource,
uint *edgesEnd,
uint *edgesWeight,
bool *finished) {
int threadId = blockDim.x * blockIdx.x + threadIdx.x;
int startId = splitIndex + threadId * numEdgesPerThread;
if (startId >= numEdges) {
return;
}
int endId = splitIndex + (threadId + 1) * numEdgesPerThread;
if (endId >= numEdges) {
endId = numEdges;
}
// printf("GPU: process edged from: %d to %d \n", startId, endId);
for (int nodeId = startId; nodeId < endId; nodeId++) {
uint source = edgesSource[nodeId];
uint end = edgesEnd[nodeId];
uint weight = edgesWeight[nodeId];
if (dist[source] + weight < dist[end]) {
atomicMin(&dist[end], dist[source] + weight);
preNode[end] = source;
*finished = false;
}
}
}
float sssp_Hybrid(Graph *graph, int source) {
int numNodes = graph->numNodes;
int numEdges = graph->numEdges;
uint *dist = new uint[numNodes];
uint *preNode = new uint[numNodes];
uint *edgesSource = new uint[numEdges];
uint *edgesEnd = new uint[numEdges];
uint *edgesWeight = new uint[numEdges];
uint *dist_copy = new uint[numNodes];
for (int i = 0; i < numNodes; i++) {
dist[i] = MAX_DIST;
preNode[i] = uint(-1);
}
for (int i = 0; i < numEdges; i++) {
Edge edge = graph->edges.at(i);
// Transfer the vector to the following three arrays
edgesSource[i] = edge.source;
edgesEnd[i] = edge.end;
edgesWeight[i] = edge.weight;
if (edge.source == source){
if (edge.weight < dist[edge.end]){
dist[edge.end] = edge.weight;
preNode[edge.end] = source;
}
} else {
// Case: edge.source != source
continue;
}
}
dist[source] = 0;
preNode[source] = 0;
uint *d_dist;
uint *d_preNode;
bool *d_finished;
uint *d_edgesSource;
uint *d_edgesEnd;
uint *d_edgesWeight;
gpuErrorcheck(cudaMalloc(&d_dist, numNodes * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&d_preNode, numNodes * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&d_finished, sizeof(bool)));
gpuErrorcheck(cudaMalloc(&d_edgesSource, numEdges * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&d_edgesEnd, numEdges * sizeof(uint)));
gpuErrorcheck(cudaMalloc(&d_edgesWeight, numEdges * sizeof(uint)));
gpuErrorcheck(cudaMemcpy(d_dist, dist, numNodes * sizeof(uint), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(d_preNode, preNode, numNodes * sizeof(uint), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(d_edgesSource, edgesSource, numEdges * sizeof(uint), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(d_edgesEnd, edgesEnd, numEdges * sizeof(uint), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(d_edgesWeight, edgesWeight, numEdges * sizeof(uint), cudaMemcpyHostToDevice));
// Copy from gpu memory
memcpy(dist_copy, dist, numNodes * sizeof(uint));
Timer timer;
int numIteration = 0;
bool finished = false;
bool h_finished = false;
float splitRatio; // cpu_data_size / whole_data_size
// Automatic select a prior value of spritRatio based on experience
if (numEdges < 300000) {
splitRatio = 0.95;
} else if (numEdges < 800000) {
splitRatio = 0.7;
} else {
splitRatio = 0.5;
}
/*
CPU process edges from 0 to splitIndex
number of edges: splitIndex
GPU process edges from splitIndex to numEdges
number of edges: numEdges - splitIndex + 1
*/
int splitIndex = numEdges * splitRatio;
int d_numEdgesPerThread = 8;
int d_numThreadsPerBlock = 512;
int d_numBlock = (numEdges - splitIndex + 1) / (d_numThreadsPerBlock * d_numEdgesPerThread) + 1;
Timer timer_cpu, timer_gpu;
Timer timer_host_to_device;
Timer timer_device_to_host;
// Default: enable cpu and gpu
// Once splitRatio equals to 0 only enable gpu
// Once splitRatio equals to 1 only enable cpu
bool cpu_enable = true;
bool gpu_enable = true;
vector<LoopInfo> infos;
LoopInfo loopInfo;
timer.start();
do {
numIteration++;
finished = true;
h_finished = true;
splitIndex = numEdges * splitRatio;
d_numBlock = (numEdges - splitIndex + 1) / (d_numThreadsPerBlock * d_numEdgesPerThread) + 1;
#pragma omp parallel //num_threads(8)
{
timer_gpu.start();
int threadId = omp_get_thread_num();
int h_numThreads = omp_get_num_threads();
if (threadId == h_numThreads - 1 && splitIndex < numEdges && gpu_enable) {
// Last thread will be used to launch gpu kernel
// if thread 0 is used to launch gpu kernel, the first block of
// data whose index begining from 0 will not be processed.
gpuErrorcheck(cudaMemcpy(d_finished, &finished, sizeof(bool), cudaMemcpyHostToDevice));
// timer_host_to_device.start();
gpuErrorcheck(cudaMemcpy(d_dist, dist, sizeof(uint) * numNodes, cudaMemcpyHostToDevice));
// timer_host_to_device.stop();
sssp_GPU_Hybrid_Kernel<<< d_numBlock, d_numThreadsPerBlock>>> (splitIndex,
numEdges,
d_numEdgesPerThread,
d_dist,
d_preNode,
d_edgesSource,
d_edgesEnd,
d_edgesWeight,
d_finished);
gpuErrorcheck(cudaPeekAtLastError());
gpuErrorcheck(cudaDeviceSynchronize());
gpuErrorcheck(cudaMemcpy(&finished, d_finished, sizeof(bool), cudaMemcpyDeviceToHost));
// timer_device_to_host.start();
gpuErrorcheck(cudaMemcpy(dist_copy, d_dist, sizeof(uint) * numNodes, cudaMemcpyDeviceToHost));
// timer_device_to_host.stop();
timer_gpu.stop();
} else if (cpu_enable) {
// printf("Sub threads\n");
timer_cpu.start();
int h_numEdgesPerThread = (splitIndex) / (h_numThreads - 1) + 1;
int start = threadId * h_numEdgesPerThread;
int end = (threadId + 1) * h_numEdgesPerThread;
if (start > splitIndex) {
start = splitIndex;
}
if (end > splitIndex) {
end = splitIndex;
}
// cout << "Processs node: from " << start << " to: " << end << endl;
// printf("Process node from: %d to : %d\n", start, end);
for (int i = start; i < end; i++) {
uint source = edgesSource[i];
uint end = edgesEnd[i];
uint weight = edgesWeight[i];
if (dist[source] + weight < dist[end]) {
dist[end] = dist[source] + weight;
preNode[end] = source;
h_finished = false;
}
}
timer_cpu.stop();
}
}
finished = finished && h_finished;
#pragma omp parallel //num_threads(8)
{
int threadId = omp_get_thread_num();
int h_numThreads = omp_get_num_threads();
int h_numNodesPerThread = (numNodes) / (h_numThreads) + 1;
if (!finished) {
// Merge
int startIdx = threadId * h_numNodesPerThread;
int endIdx = (threadId + 1) * h_numNodesPerThread;
if (startIdx > numNodes) {
startIdx = numNodes;
}
if (endIdx > numNodes) {
endIdx = numNodes;
}
for (int i = startIdx; i < endIdx; i++) {
if (dist[i] > dist_copy[i]) {
dist[i] = dist_copy[i];
}
}
}
}
// Load Balancing
if (cpu_enable && gpu_enable) {
float factor = (timer_cpu.elapsedTime() / timer_gpu.elapsedTime());
if (factor > 1) {
splitRatio = splitRatio - 0.05;
if (splitRatio < 0) {
splitRatio = 0;
cpu_enable = false;
}
} else {
splitRatio = splitRatio + 0.05;
if (splitRatio > 1) {
splitRatio = 1;
gpu_enable = false;
}
}
// printf("No. itr: %d , updated splitRatio: %f, factor: %f\n", numIteration, splitRatio, factor);
// printf("CPU PART TIME: %f\n", timer_cpu.elapsedTime());
// printf("GPU PART TIME: %f\n", timer_gpu.elapsedTime());
// printf("Copy dist from host to device : %f ms \n", timer_host_to_device.elapsedTime());
// printf("Copy dist from device to host : %f ms \n", timer_device_to_host.elapsedTime());
loopInfo.numIteration = numIteration;
loopInfo.time_cpu = timer_cpu.elapsedTime();
loopInfo.time_gpu = timer_gpu.elapsedTime();
loopInfo.splitRatio = splitRatio;
infos.push_back(loopInfo);
}
} while(!finished);
timer.stop();
// printLoopInfo(infos);
// printf("Process Done!\n");
// printf("Number of Iteration: %d\n", numIteration);
// printf("The execution time of SSSP on Hybrid(CPU-GPU): %f ms\n", timer.elapsedTime());
gpuErrorcheck(cudaFree(d_dist));
gpuErrorcheck(cudaFree(d_preNode));
gpuErrorcheck(cudaFree(d_finished));
gpuErrorcheck(cudaFree(d_edgesSource));
gpuErrorcheck(cudaFree(d_edgesEnd));
gpuErrorcheck(cudaFree(d_edgesWeight));
delete []dist;
delete []preNode;
delete []edgesSource;
delete []edgesEnd;
delete []edgesWeight;
// return dist;
return timer.elapsedTime();
}
int main(int argc, char **argv) {
Timer timer_total, timer_load;
timer_total.start();
ArgumentParser args(argc, argv);
timer_load.start();
Graph graph(args.inputFilePath);
//Graph graph("datasets/simpleGraph.txt");
graph.readGraph();
timer_load.stop();
int sourceNode;
if (args.hasSourceNode) {
sourceNode = args.sourceNode;
} else {
// Use graph default source
sourceNode = graph.defaultSource;
}
// uint *dist_cpu_parallel = sssp_CPU_parallel(&graph, sourceNode);
// Hybrid running time
float time_hybrid = 0;
for (int i = 0; i < 10; i++) {
time_hybrid += sssp_Hybrid(&graph, sourceNode);
}
time_hybrid = time_hybrid / 10;
// GPU-only running time
float time_gpu = 0;
for (int i = 0; i < 10; i++) {
time_gpu += sssp_GPU(&graph, sourceNode);
}
time_gpu = time_gpu / 10;
// CPU-OpenMP running time
float time_openmp = 0;
if (args.runOnCPU) {
for (int i = 0; i < 10; i++) {
time_openmp += sssp_CPU_parallel(&graph, sourceNode);
}
time_openmp = time_openmp / 10;
printf("CPU (OpenMP) running time: %f ms\n", time_openmp);
}
printf("GPU-Only running time: %f ms\n", time_gpu);
printf("Hybrid running time: %f ms\n", time_hybrid);
/* uint *dist_hybrid = sssp_Hybrid(&graph, sourceNode);
uint *dist_gpu = sssp_GPU(&graph, sourceNode);
compareResult(dist_hybrid, dist_gpu, graph.numNodes);
if (args.runOnCPU) {
uint *dist_cpu = sssp_CPU_parallel(&graph, sourceNode);
compareResult(dist_cpu, dist_hybrid, graph.numNodes);
} */
timer_total.stop();
// printf("Total execution time: %f ms\n", timer_total.elapsedTime());
// printf("Graph loading execution time: %f ms\n", timer_load.elapsedTime());
return 0;
} |
5adb8a8419f7d93030a5172a505fa18ad0bd3281.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include <iostream>
#include <vector>
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/device/gemm_batched.h"
#pragma warning( disable : 4503)
/*
This example demonstrates how to use cutlass to compute a batched strided gemm.
In this example, both A and B matrix are non-transpose and column major matrix
batched_C = batched_A x batched_B
As an example, matrix C can be seen as
-----------------------------------------------------------
(0,0,0) | (0,0,1) | (0,0,2) | (1,0,0) | (1,0,1) | (1,0,2) |
-----------------------------------------------------------
(0,1,0) | (0,1,1) | (0,1,2) | (1,1,0) | (1,1,1) | (1,1,2) |
-----------------------------------------------------------
(0,2,0) | (0,2,1) | (0,2,2) | (1,2,0) | (1,2,1) | (1,2,2) |
-----------------------------------------------------------
(0,3,0) | (0,3,1) | (0,3,2) | (1,3,0) | (1,3,1) | (1,3,2) |
-----------------------------------------------------------
(0,4,0) | (0,4,1) | (0,4,2) | (1,4,0) | (1,4,1) | (1,4,2) |
-----------------------------------------------------------
(0,5,0) | (0,5,1) | (0,5,2) | (1,5,0) | (1,5,1) | (1,5,2) |
-----------------------------------------------------------
batch 0 | batch 1
where we denote each element with (batch_idx, row_idx, column_idx)
In this example, batch size is 2, M is 6 and N is 3
The stride (batch_stride_C) between the first element of two batches is ldc * n
matrix A can be seen as
---------------------------------------
(0,0,0) | (0,0,1) | (1,0,0) | (1,0,1) |
---------------------------------------
(0,1,0) | (0,1,1) | (1,1,0) | (1,1,1) |
---------------------------------------
(0,2,0) | (0,2,1) | (1,2,0) | (1,2,1) |
---------------------------------------
(0,3,0) | (0,3,1) | (1,3,0) | (1,3,1) |
---------------------------------------
(0,4,0) | (0,4,1) | (1,4,0) | (1,4,1) |
---------------------------------------
(0,5,0) | (0,5,1) | (1,5,0) | (1,5,1) |
---------------------------------------
batch 0 | batch 1
, where batch size is 2, M is 6 and K is 2
The stride (batch_stride_B) between the first element of two batches is lda * k
matrix B can be seen as
-----------------------------
(0,0,0) | (0,0,1) | (0,0,2) |
----------------------------- batch 0
(0,1,0) | (0,1,1) | (0,1,2) |
-------------------------------------
(1,0,0) | (1,0,1) | (1,0,2) |
----------------------------- batch 1
(1,1,0) | (1,1,1) | (1,1,2) |
-----------------------------
, where the batch size is 2, N is 3 and K is 2
The stride (batch_stride_C) between the first element of two batches is k
*/
hipError_t cutlass_strided_batched_sgemm(
int m,
int n,
int k,
float alpha,
float const *A,
int lda,
long long int batch_stride_A,
float const *B,
int ldb,
long long int batch_stride_B,
float *C,
int ldc,
long long int batch_stride_C,
float beta,
int batch_count) {
using Gemm = cutlass::gemm::device::GemmBatched<
float, cutlass::layout::ColumnMajor,
float, cutlass::layout::ColumnMajor,
float, cutlass::layout::ColumnMajor
>;
Gemm gemm_op;
cutlass::Status status = gemm_op({
{m, n, k},
{A, lda},
batch_stride_A,
{B, ldb},
batch_stride_B,
{C, ldc},
batch_stride_C,
{C, ldc},
batch_stride_C,
{alpha, beta},
batch_count
});
if (status != cutlass::Status::kSuccess) {
return hipErrorUnknown;
}
return hipSuccess;
}
template<typename T>
hipError_t strided_batched_gemm_nn_reference(
int m,
int n,
int k,
T alpha,
std::vector<T> const &A,
int lda,
long long int batch_stride_A,
std::vector<T> const &B,
int ldb,
long long int batch_stride_B,
std::vector<T> &C,
int ldc,
long long int batch_stride_C,
T beta,
int batch_count) {
/*
strided batched gemm NN
*/
hipError_t result = hipSuccess;
if (A.size() < lda * k * batch_count) {
std::cout << "the size of A is too small" << std::endl;
return hipErrorInvalidValue;
}
if (B.size() < ldb * n) {
std::cout << "the size of B is too small" << std::endl;
return hipErrorInvalidValue;
}
if (C.size() < ldc * n * batch_count) {
std::cout << "the size of C is too small" << std::endl;
return hipErrorInvalidValue;
}
for (int batch_idx = 0; batch_idx < batch_count; batch_idx++) {
for (int n_idx = 0; n_idx < n; n_idx++) {
for (int m_idx = 0; m_idx < m; m_idx++) {
T accum = beta * C[batch_idx * batch_stride_C + n_idx * ldc + m_idx];
for (int k_idx = 0; k_idx < k; k_idx++) {
accum += alpha
* A[batch_idx * batch_stride_A + k_idx * lda + m_idx]
* B[batch_idx * batch_stride_B + n_idx * ldb + k_idx];
}
C[batch_idx * batch_stride_C + n_idx * ldc + m_idx] = accum;
}
}
}
return result;
}
int main() {
// Arbitrary problem size
int const m = 520;
int const n = 219;
int const k = 129;
int const batch_count = 17;
// A, B are non-transpose, column major
int const lda = m;
int const ldb = k * batch_count;
int const ldc = m;
int const count_A = batch_count * lda * k;
int const count_B = ldb * n;
int const count_C = batch_count * ldc * n;
// the memory is batched along K dimension
long long int batch_stride_A = static_cast<long long int>(lda) * static_cast<long long int>(k);
long long int batch_stride_B = static_cast<long long int>(k);
long long int batch_stride_C = static_cast<long long int>(ldc) * static_cast<long long int>(n);
// alpha and beta
float alpha = 1.0f;
float beta = 2.0f;
hipError_t result = hipSuccess;
// allocate the host memory
std::vector<float> host_A(count_A);
std::vector<float> host_B(count_B);
std::vector<float> host_C(count_C);
std::vector<float> result_C(count_C);
// allocate the device memory
float *A;
float *B;
float *C;
result = hipMalloc(&A, count_A * sizeof(float));
if (result != hipSuccess) {
std::cerr << "hipMalloc result = " << result << std::endl;
return result;
}
result = hipMalloc(&B, count_B * sizeof(float));
if (result != hipSuccess) {
std::cerr << "hipMalloc result = " << result << std::endl;
return result;
}
result = hipMalloc(&C, count_C * sizeof(float));
if (result != hipSuccess) {
std::cerr << "hipMalloc result = " << result << std::endl;
return result;
}
// Limit range to avoid floating-point errors
int const kRange = 8;
// fill A
for (int b_idx = 0; b_idx < batch_count; b_idx++) {
for (int col_idx = 0; col_idx < k; col_idx++) {
for (int row_idx = 0; row_idx < m; row_idx++) {
host_A[row_idx + col_idx * lda + b_idx * lda * k] = static_cast<float>((row_idx + col_idx * lda + b_idx * lda * k) % kRange);
}
}
}
// fill B
for (int b_idx = 0; b_idx < batch_count; b_idx++) {
for (int col_idx = 0; col_idx < n; col_idx++) {
for (int row_idx = 0; row_idx < k; row_idx++) {
host_B[row_idx + col_idx * ldb + b_idx * k] = static_cast<float>(((n + k * ldb + batch_count * k) - (row_idx + col_idx * ldb + b_idx * k)) % kRange);
}
}
}
// fill C
for (int b_idx = 0; b_idx < batch_count; b_idx++) {
for (int col_idx = 0; col_idx < n; col_idx++) {
for (int row_idx = 0; row_idx < m; row_idx++) {
host_C[row_idx + col_idx * ldc + b_idx * ldc * n] = 1.f;
}
}
}
// ref memory
std::vector<float> ref_A(host_A);
std::vector<float> ref_B(host_B);
std::vector<float> ref_C(host_C);
// copy host memory to device
result = hipMemcpy(A, host_A.data(), count_A * sizeof(float), hipMemcpyHostToDevice);
if (result != hipSuccess) {
std::cerr << "hipMemcpy result = " << result << std::endl;
return result;
}
result = hipMemcpy(B, host_B.data(), count_B * sizeof(float), hipMemcpyHostToDevice);
if (result != hipSuccess) {
std::cerr << "hipMemcpy result = " << result << std::endl;
return result;
}
result = hipMemcpy(C, host_C.data(), count_C * sizeof(float), hipMemcpyHostToDevice);
if (result != hipSuccess) {
std::cerr << "hipMemcpy result = " << result << std::endl;
return result;
}
// run cutlass
result = cutlass_strided_batched_sgemm(
m, n, k, alpha, A, lda, batch_stride_A, B, ldb, batch_stride_B, C, ldc, batch_stride_C,
beta, batch_count);
if (result != hipSuccess)
return result;
// copy device memory to host
result = hipMemcpy(result_C.data(), C, count_C * sizeof(float), hipMemcpyDeviceToHost);
if (result != hipSuccess) {
std::cerr << "hipMemcpy result = " << result << std::endl;
return result;
}
//compare with reference code
result = strided_batched_gemm_nn_reference(m, n, k, alpha, ref_A, lda, batch_stride_A, ref_B, ldb, batch_stride_B, ref_C, ldc, batch_stride_C,
beta, batch_count);
if (result != 0)
return result;
// Expect bit-level accuracy for this simple example
if (ref_C != result_C) {
std::cout << "CUTLASS strided batched gemm does not run correctly" << std::endl;
return hipErrorUnknown;
}
// free memory
result = hipFree(A);
if (result != hipSuccess) {
std::cerr << "hipFree result = " << result << std::endl;
return result;
}
result = hipFree(B);
if (result != hipSuccess) {
std::cerr << "hipFree result = " << result << std::endl;
return result;
}
result = hipFree(C);
if (result != hipSuccess) {
std::cerr << "hipFree result = " << result << std::endl;
return result;
}
if (result == hipSuccess) {
std::cout << "Passed." << std::endl;
}
// Exit.
return result == hipSuccess ? 0 : -1;
}
| 5adb8a8419f7d93030a5172a505fa18ad0bd3281.cu | /***************************************************************************************************
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include <iostream>
#include <vector>
#include "cutlass/cutlass.h"
#include "cutlass/layout/matrix.h"
#include "cutlass/gemm/device/gemm_batched.h"
#pragma warning( disable : 4503)
/*
This example demonstrates how to use cutlass to compute a batched strided gemm.
In this example, both A and B matrix are non-transpose and column major matrix
batched_C = batched_A x batched_B
As an example, matrix C can be seen as
-----------------------------------------------------------
(0,0,0) | (0,0,1) | (0,0,2) | (1,0,0) | (1,0,1) | (1,0,2) |
-----------------------------------------------------------
(0,1,0) | (0,1,1) | (0,1,2) | (1,1,0) | (1,1,1) | (1,1,2) |
-----------------------------------------------------------
(0,2,0) | (0,2,1) | (0,2,2) | (1,2,0) | (1,2,1) | (1,2,2) |
-----------------------------------------------------------
(0,3,0) | (0,3,1) | (0,3,2) | (1,3,0) | (1,3,1) | (1,3,2) |
-----------------------------------------------------------
(0,4,0) | (0,4,1) | (0,4,2) | (1,4,0) | (1,4,1) | (1,4,2) |
-----------------------------------------------------------
(0,5,0) | (0,5,1) | (0,5,2) | (1,5,0) | (1,5,1) | (1,5,2) |
-----------------------------------------------------------
batch 0 | batch 1
where we denote each element with (batch_idx, row_idx, column_idx)
In this example, batch size is 2, M is 6 and N is 3
The stride (batch_stride_C) between the first element of two batches is ldc * n
matrix A can be seen as
---------------------------------------
(0,0,0) | (0,0,1) | (1,0,0) | (1,0,1) |
---------------------------------------
(0,1,0) | (0,1,1) | (1,1,0) | (1,1,1) |
---------------------------------------
(0,2,0) | (0,2,1) | (1,2,0) | (1,2,1) |
---------------------------------------
(0,3,0) | (0,3,1) | (1,3,0) | (1,3,1) |
---------------------------------------
(0,4,0) | (0,4,1) | (1,4,0) | (1,4,1) |
---------------------------------------
(0,5,0) | (0,5,1) | (1,5,0) | (1,5,1) |
---------------------------------------
batch 0 | batch 1
, where batch size is 2, M is 6 and K is 2
The stride (batch_stride_B) between the first element of two batches is lda * k
matrix B can be seen as
-----------------------------
(0,0,0) | (0,0,1) | (0,0,2) |
----------------------------- batch 0
(0,1,0) | (0,1,1) | (0,1,2) |
-------------------------------------
(1,0,0) | (1,0,1) | (1,0,2) |
----------------------------- batch 1
(1,1,0) | (1,1,1) | (1,1,2) |
-----------------------------
, where the batch size is 2, N is 3 and K is 2
The stride (batch_stride_C) between the first element of two batches is k
*/
cudaError_t cutlass_strided_batched_sgemm(
int m,
int n,
int k,
float alpha,
float const *A,
int lda,
long long int batch_stride_A,
float const *B,
int ldb,
long long int batch_stride_B,
float *C,
int ldc,
long long int batch_stride_C,
float beta,
int batch_count) {
using Gemm = cutlass::gemm::device::GemmBatched<
float, cutlass::layout::ColumnMajor,
float, cutlass::layout::ColumnMajor,
float, cutlass::layout::ColumnMajor
>;
Gemm gemm_op;
cutlass::Status status = gemm_op({
{m, n, k},
{A, lda},
batch_stride_A,
{B, ldb},
batch_stride_B,
{C, ldc},
batch_stride_C,
{C, ldc},
batch_stride_C,
{alpha, beta},
batch_count
});
if (status != cutlass::Status::kSuccess) {
return cudaErrorUnknown;
}
return cudaSuccess;
}
template<typename T>
cudaError_t strided_batched_gemm_nn_reference(
int m,
int n,
int k,
T alpha,
std::vector<T> const &A,
int lda,
long long int batch_stride_A,
std::vector<T> const &B,
int ldb,
long long int batch_stride_B,
std::vector<T> &C,
int ldc,
long long int batch_stride_C,
T beta,
int batch_count) {
/*
strided batched gemm NN
*/
cudaError_t result = cudaSuccess;
if (A.size() < lda * k * batch_count) {
std::cout << "the size of A is too small" << std::endl;
return cudaErrorInvalidValue;
}
if (B.size() < ldb * n) {
std::cout << "the size of B is too small" << std::endl;
return cudaErrorInvalidValue;
}
if (C.size() < ldc * n * batch_count) {
std::cout << "the size of C is too small" << std::endl;
return cudaErrorInvalidValue;
}
for (int batch_idx = 0; batch_idx < batch_count; batch_idx++) {
for (int n_idx = 0; n_idx < n; n_idx++) {
for (int m_idx = 0; m_idx < m; m_idx++) {
T accum = beta * C[batch_idx * batch_stride_C + n_idx * ldc + m_idx];
for (int k_idx = 0; k_idx < k; k_idx++) {
accum += alpha
* A[batch_idx * batch_stride_A + k_idx * lda + m_idx]
* B[batch_idx * batch_stride_B + n_idx * ldb + k_idx];
}
C[batch_idx * batch_stride_C + n_idx * ldc + m_idx] = accum;
}
}
}
return result;
}
int main() {
// Arbitrary problem size
int const m = 520;
int const n = 219;
int const k = 129;
int const batch_count = 17;
// A, B are non-transpose, column major
int const lda = m;
int const ldb = k * batch_count;
int const ldc = m;
int const count_A = batch_count * lda * k;
int const count_B = ldb * n;
int const count_C = batch_count * ldc * n;
// the memory is batched along K dimension
long long int batch_stride_A = static_cast<long long int>(lda) * static_cast<long long int>(k);
long long int batch_stride_B = static_cast<long long int>(k);
long long int batch_stride_C = static_cast<long long int>(ldc) * static_cast<long long int>(n);
// alpha and beta
float alpha = 1.0f;
float beta = 2.0f;
cudaError_t result = cudaSuccess;
// allocate the host memory
std::vector<float> host_A(count_A);
std::vector<float> host_B(count_B);
std::vector<float> host_C(count_C);
std::vector<float> result_C(count_C);
// allocate the device memory
float *A;
float *B;
float *C;
result = cudaMalloc(&A, count_A * sizeof(float));
if (result != cudaSuccess) {
std::cerr << "cudaMalloc result = " << result << std::endl;
return result;
}
result = cudaMalloc(&B, count_B * sizeof(float));
if (result != cudaSuccess) {
std::cerr << "cudaMalloc result = " << result << std::endl;
return result;
}
result = cudaMalloc(&C, count_C * sizeof(float));
if (result != cudaSuccess) {
std::cerr << "cudaMalloc result = " << result << std::endl;
return result;
}
// Limit range to avoid floating-point errors
int const kRange = 8;
// fill A
for (int b_idx = 0; b_idx < batch_count; b_idx++) {
for (int col_idx = 0; col_idx < k; col_idx++) {
for (int row_idx = 0; row_idx < m; row_idx++) {
host_A[row_idx + col_idx * lda + b_idx * lda * k] = static_cast<float>((row_idx + col_idx * lda + b_idx * lda * k) % kRange);
}
}
}
// fill B
for (int b_idx = 0; b_idx < batch_count; b_idx++) {
for (int col_idx = 0; col_idx < n; col_idx++) {
for (int row_idx = 0; row_idx < k; row_idx++) {
host_B[row_idx + col_idx * ldb + b_idx * k] = static_cast<float>(((n + k * ldb + batch_count * k) - (row_idx + col_idx * ldb + b_idx * k)) % kRange);
}
}
}
// fill C
for (int b_idx = 0; b_idx < batch_count; b_idx++) {
for (int col_idx = 0; col_idx < n; col_idx++) {
for (int row_idx = 0; row_idx < m; row_idx++) {
host_C[row_idx + col_idx * ldc + b_idx * ldc * n] = 1.f;
}
}
}
// ref memory
std::vector<float> ref_A(host_A);
std::vector<float> ref_B(host_B);
std::vector<float> ref_C(host_C);
// copy host memory to device
result = cudaMemcpy(A, host_A.data(), count_A * sizeof(float), cudaMemcpyHostToDevice);
if (result != cudaSuccess) {
std::cerr << "cudaMemcpy result = " << result << std::endl;
return result;
}
result = cudaMemcpy(B, host_B.data(), count_B * sizeof(float), cudaMemcpyHostToDevice);
if (result != cudaSuccess) {
std::cerr << "cudaMemcpy result = " << result << std::endl;
return result;
}
result = cudaMemcpy(C, host_C.data(), count_C * sizeof(float), cudaMemcpyHostToDevice);
if (result != cudaSuccess) {
std::cerr << "cudaMemcpy result = " << result << std::endl;
return result;
}
// run cutlass
result = cutlass_strided_batched_sgemm(
m, n, k, alpha, A, lda, batch_stride_A, B, ldb, batch_stride_B, C, ldc, batch_stride_C,
beta, batch_count);
if (result != cudaSuccess)
return result;
// copy device memory to host
result = cudaMemcpy(result_C.data(), C, count_C * sizeof(float), cudaMemcpyDeviceToHost);
if (result != cudaSuccess) {
std::cerr << "cudaMemcpy result = " << result << std::endl;
return result;
}
//compare with reference code
result = strided_batched_gemm_nn_reference(m, n, k, alpha, ref_A, lda, batch_stride_A, ref_B, ldb, batch_stride_B, ref_C, ldc, batch_stride_C,
beta, batch_count);
if (result != 0)
return result;
// Expect bit-level accuracy for this simple example
if (ref_C != result_C) {
std::cout << "CUTLASS strided batched gemm does not run correctly" << std::endl;
return cudaErrorUnknown;
}
// free memory
result = cudaFree(A);
if (result != cudaSuccess) {
std::cerr << "cudaFree result = " << result << std::endl;
return result;
}
result = cudaFree(B);
if (result != cudaSuccess) {
std::cerr << "cudaFree result = " << result << std::endl;
return result;
}
result = cudaFree(C);
if (result != cudaSuccess) {
std::cerr << "cudaFree result = " << result << std::endl;
return result;
}
if (result == cudaSuccess) {
std::cout << "Passed." << std::endl;
}
// Exit.
return result == cudaSuccess ? 0 : -1;
}
|
d63e81242760f384ed33a6f01798a20ac47d93c8.hip | // !!! This is a file automatically generated by hipify!!!
#include <omp.h>
#include <mpi.h>
#include <hip/hip_runtime.h>
#include <hipfft.h>
////////////////////////////////////////////////////////////////////////////////////////////////////
#include <thrust/version.h>
#include <thrust/host_vector.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/transform.h>
#include <thrust/sequence.h>
#include <thrust/copy.h>
#include <thrust/fill.h>
#include <thrust/replace.h>
#include <thrust/functional.h>
#include <thrust/sort.h>
////////////////////////////////////////////////////////////////////////////////////////////////////
#include <algorithm>
#include <iostream>
#include <fstream>
#include <iomanip>
#include "cmdparser.hpp"
#include "utility.hpp"
#include "timer.hpp"
#include "add.hpp"
#include "sub.hpp"
#include "mul.hpp"
#include "ddt.hpp"
#include "dft.hpp"
#include "dwt.hpp"
#include "shrink.hpp"
using namespace std;
using namespace csmri;
#define MAX_DEVICES_PER_NODE 8
#define MAX_DEVICES_PER_PROC 8
int main(int argc, const char* argv[])
{
const char* key =
"{ h |help | | print help message }"
"{ |spec | | Binary file of spectrum (kspace) - input }"
"{ |full | | Binary file of full reconstruction}"
"{ |mask | | Binary file of mask - input}"
"{ |zero | | Binary file of zero filling reconstruction}"
"{ |dest | | Binary file of reconstruction}"
"{ |dimx | | Number of the columns }"
"{ |dimy | | Number of the rows }"
"{ |dimz | | Temporal resolution }"
"{ |dimn | | Number of slices }"
"{ |devs | 1 | Number of GPUs }"
"{ |Mu | 0.100 | Weight of Interpolation }"
"{ |Lambda_w | 0.005 | Weight of Lambda }"
"{ |Lambda_t | 1.000 | Threshold of Lambda }"
"{ |Gamma_w | 0.200 | Weight of Gamma }"
"{ |Gamma_t | 1.000 | Threshold of Gamma }"
"{ |Omega_w | 0.600 | Weight of Omega }"
"{ |Omega_t | 1.000 | Threshold of Omega}"
"{ |Epsilon | 0.700 | Epsilon of Richardson loop}"
"{ |nOuter | 4 | Number of Outer loops}"
"{ |nInner | 8 | Number of Inner loops}"
"{ |nLoops | 4 | Number of Richardson loops}";
CommandLineParser cmd(argc, argv, key);
if (argc == 1)
{
cout << "Usage: " << argv[0] << " [options]" << endl;
cout << "Avaible options:" << endl;
cmd.printParams();
return 0;
}
// cmd.printParams();
string spec = cmd.get<string>("spec", true);
string full = cmd.get<string>("full", true);
string mask = cmd.get<string>("mask", true);
string zero = cmd.get<string>("zero", true);
string dest = cmd.get<string>("dest", true);
////////////////////////////////////////////////////////////////////////////
const int dimx = cmd.get<int>("dimx", true);
const int dimy = cmd.get<int>("dimy", true);
const int dimz = cmd.get<int>("dimz", true);
const int dimn = cmd.get<int>("dimn", true);
const int devs = cmd.get<int>("devs", true);
////////////////////////////////////////////////////////////////////////////
float Mu = cmd.get<float>("Mu", true);
float Lambda_w = cmd.get<float>("Lambda_w", true);
float Lambda_t = cmd.get<float>("Lambda_t", true);
float Gamma_w = cmd.get<float>("Gamma_w", true);
float Gamma_t = cmd.get<float>("Gamma_t", true);
float Omega_w = cmd.get<float>("Omega_w", true);
float Omega_t = cmd.get<float>("Omega_t", true);
float Ep = cmd.get<float>("Epsilon", true);
////////////////////////////////////////////////////////////////////////////
int nOuter = cmd.get<int>("nOuter", true);
int nInner = cmd.get<int>("nInner", true);
int nLoops = cmd.get<int>("nLoops", true);
////////////////////////////////////////////////////////////////////////////
/// Print out the parameters
cout << spec << endl;
cout << mask << endl;
cout << dest << endl;
printf("Size: %dx%dx%d\n", dimx, dimy, dimz);
printf("Number of GPUs: %d\n", devs);
printf("Mu : %4.4f\n", Mu);
printf("Lambda : %4.4f\t\t%4.4f\n", Lambda_w, Lambda_t);
printf("Gamma : %4.4f\t\t%4.4f\n", Gamma_w, Gamma_t);
printf("Omega : %4.4f\t\t%4.4f\n", Omega_w, Omega_t);
printf("Epsilon: %4.4f\n", Ep);
printf("Number of loops: %dx%dx%d\n", nOuter, nInner, nLoops);
////////////////////////////////////////////////////////////////////////////
/// Total problem size
const int dTotal = dimx*dimy*dimz;
const int nTotal = dimx*dimy*dimz*dimn;
/// Declare and allocate the host memories
float2 *h_spec;
float2 *h_full;
float2 *h_mask;
float2 *h_zero;
float2 *h_dest;
h_spec = new float2[nTotal];
h_full = new float2[nTotal];
h_mask = new float2[nTotal];
h_zero = new float2[nTotal];
h_dest = new float2[nTotal];
////////////////////////////////////////////////////////////////////////////
int nProcs, iProcs;
char processor_name[MPI_MAX_PROCESSOR_NAME];
int processor_name_len;
MPI::Init();
nProcs = MPI::COMM_WORLD.Get_size();
iProcs = MPI::COMM_WORLD.Get_rank();
MPI::Get_processor_name(processor_name, processor_name_len);
// printf("Hello world from process %d out of %d at %s\n",
// iProcs, nProcs, processor_name);
if(iProcs == 0)
{
/// Read data from file and store to memory
checkReadFile(spec.c_str(), h_spec, nTotal*sizeof(float2));
checkReadFile(mask.c_str(), h_mask, nTotal*sizeof(float2));
}
////////////////////////////////////////////////////////////////////////////
// hipSetDevice(iProcs); //8 GPUs for each node
hipSetDevice(2*(iProcs&3) + 0); //8 GPUs for each node
hipDeviceReset();
const int pTotal = nTotal/nProcs;
int rank[3] = {dimx, dimy, dimz};
/// Create Fourier plan 2.5d.
hipfftHandle plan;
hipfftPlanMany(&plan,
2, //Dimensionality of the transform (1, 2, or 3)
rank, //Array of size rank, describing the size of each dimension
NULL,
1, //Distance between two successive input elements in the innermost dimension
dimy*dimx, //Distance between the first element of two consecutive signals
NULL,
1,
dimy*dimx,
HIPFFT_C2C,
dimz);
float2 *p_spec, *p_full;
p_spec = new float2[pTotal];
p_full = new float2[pTotal];
float2 *d_spec, *d_full;
hipMalloc((void**)&d_spec, pTotal*sizeof(float2));
hipMalloc((void**)&d_full, pTotal*sizeof(float2));
////////////////////////////////////////////////////////////////////////////
/// <summary> Reconstruct the full data </summary>
/// Copy the spectrum from host to device
MPI::COMM_WORLD.Scatter(h_spec, pTotal, MPI::DOUBLE,
p_spec, pTotal, MPI::DOUBLE, 0);
hipMemcpyAsync(d_spec, p_spec, pTotal*sizeof(float2), hipMemcpyDefault);
hipDeviceSynchronize();
checkLastError();
dft(d_spec, d_full, dimx, dimy, dimz, DFT_INVERSE, plan);
scale(d_full, d_full, dimx, dimy, dimz, 1.0f/(dimx*dimy) );
hipDeviceSynchronize();
checkLastError();
hipMemcpyAsync(p_full, d_full, pTotal*sizeof(float2), hipMemcpyDefault);
hipDeviceSynchronize();
checkLastError();
MPI::COMM_WORLD.Barrier();
MPI::COMM_WORLD.Gather(p_full, pTotal, MPI::DOUBLE,
h_full, pTotal, MPI::DOUBLE, 0);
if(iProcs == 0)
{
checkWriteFile(full.c_str(), h_full, nTotal*sizeof(float2));
}
//////////////////////////////////////////////////////////////////////////////
float2 *p_mask, *p_fill, *p_zero;
p_mask = new float2[pTotal];
p_fill = new float2[pTotal];
p_zero = new float2[pTotal];
float2 *d_mask, *d_fill, *d_zero;
hipMalloc((void**)&d_mask, pTotal*sizeof(float2));
hipMalloc((void**)&d_fill, pTotal*sizeof(float2));
hipMalloc((void**)&d_zero, pTotal*sizeof(float2));
//////////////////////////////////////////////////////////////////////////////
/// <summary> Reconstruct the zero filling data </summary>
/// Copy the mask from host to device
MPI::COMM_WORLD.Scatter(h_mask, pTotal, MPI::DOUBLE,
p_mask, pTotal, MPI::DOUBLE, 0);
hipMemcpyAsync(d_mask, p_mask, pTotal*sizeof(float2), hipMemcpyDefault);
hipDeviceSynchronize();
checkLastError();
/// Subsampling kspace
mul(d_spec, d_mask, d_fill, dimx, dimy, dimz);
/// Perform Inverse Fourier and Scale
dft(d_fill, d_zero, dimx, dimy, dimz, DFT_INVERSE, plan);
scale(d_zero, d_zero, dimx, dimy, dimz, 1.0f/(dimx*dimy) );
hipDeviceSynchronize();
checkLastError();
hipMemcpyAsync(p_zero, d_zero, pTotal*sizeof(float2), hipMemcpyDefault);
hipDeviceSynchronize();
checkLastError();
MPI::COMM_WORLD.Barrier();
MPI::COMM_WORLD.Gather(p_zero, pTotal, MPI::DOUBLE,
h_zero, pTotal, MPI::DOUBLE, 0);
// printf("Hello world from process %d out of %d at %s\n",
// iProcs, nProcs, processor_name);
if(iProcs == 0)
{
checkWriteFile(zero.c_str(), h_zero, nTotal*sizeof(float2));
}
// printf("Hello world from process %d out of %d at %s\n",
// iProcs, nProcs, processor_name);
////////////////////////////////////////////////////////////////////////////
// hipSetDevice(iProcs);
MPI::COMM_WORLD.Barrier();
float2 *p_dest;
p_dest = new float2[pTotal];
// size_t avail, total;
// hipMemGetInfo( &avail, &total );
// size_t used = total-avail;
// //DEBUG
// printf(" Used %lu / Total %lu ( %.2f % ) %dth \n",used,total, ( (double)used/(double)total)*100, iProcs);
////////////////////////////////////////////////////////////////////////////
// int id, np, i;
// MPI_Init(&argc, &argv);
// MPI_Comm_size(MPI_COMM_WORLD, &np);
// MPI_Comm_rank(MPI_COMM_WORLD, &id);
int id;
hipGetDevice(&id);
checkLastError();
printf("We are using device %dth at %s\n", id, processor_name);
// MPI::COMM_WORLD.Barrier();
////////////////////////////////////////////////////////////////////////////
/// <summary> Reconstruct the compressive sensing data </summary>
/// <summary> Reserve Memory for the auxillary variables. </summary>
// size_t avail, total;
// hipMemGetInfo( &avail, &total );
// size_t used = total-avail;
// //DEBUG
// printf(" Used %lu / Total %lu ( %.2f % ) %dth \n",used,total, ( (double)used/(double)total)*100, iProcs);
float2 *d_f, *d_f0, *d_ft;
hipMalloc((void**)&d_f, pTotal*sizeof(float2));
checkLastError();
hipMalloc((void**)&d_f0, pTotal*sizeof(float2));
checkLastError();
hipMalloc((void**)&d_ft, pTotal*sizeof(float2));
checkLastError();
hipDeviceSynchronize();
float2 *d_Ax, *d_rhs, *d_murf, *d_Rft, *d_Rf;
hipMalloc((void**)&d_Ax , pTotal*sizeof(float2));
checkLastError();
hipMalloc((void**)&d_rhs , pTotal*sizeof(float2));
hipMalloc((void**)&d_murf, pTotal*sizeof(float2));
hipMalloc((void**)&d_Rft , pTotal*sizeof(float2));
hipMalloc((void**)&d_Rf , pTotal*sizeof(float2));
checkLastError();
hipMemset(d_Ax, 0, pTotal*sizeof(float2));
checkLastError();
hipDeviceSynchronize();
checkLastError();
float2 *d_R;
hipMalloc((void**)&d_R, pTotal*sizeof(float2));
hipMemset(d_R, 0, pTotal*sizeof(float2));
hipDeviceSynchronize();
checkLastError();
float2 *d_cu;
hipMalloc((void**)&d_cu, pTotal*sizeof(float2));
hipMemset(d_cu, 0, pTotal*sizeof(float2));
hipDeviceSynchronize();
checkLastError();
float2 *d_x, *d_y, *d_z, *d_w;
hipMalloc((void**)&d_x, pTotal*sizeof(float2));
hipMalloc((void**)&d_y, pTotal*sizeof(float2));
hipMalloc((void**)&d_z, pTotal*sizeof(float2));
hipMalloc((void**)&d_w, pTotal*sizeof(float2));
hipMemset(d_x, 0, pTotal*sizeof(float2));
hipMemset(d_y, 0, pTotal*sizeof(float2));
hipMemset(d_z, 0, pTotal*sizeof(float2));
hipMemset(d_w, 0, pTotal*sizeof(float2));
hipDeviceSynchronize();
checkLastError();
float2 *d_dx, *d_dy, *d_dz, *d_dw;
hipMalloc((void**)&d_dx, pTotal*sizeof(float2));
hipMalloc((void**)&d_dy, pTotal*sizeof(float2));
hipMalloc((void**)&d_dz, pTotal*sizeof(float2));
hipMalloc((void**)&d_dw, pTotal*sizeof(float2));
hipMemset(d_dx, 0, pTotal*sizeof(float2));
hipMemset(d_dy, 0, pTotal*sizeof(float2));
hipMemset(d_dz, 0, pTotal*sizeof(float2));
hipMemset(d_dw, 0, pTotal*sizeof(float2));
hipDeviceSynchronize();
checkLastError();
float2 *d_lx, *d_ly, *d_lz, *d_lw;
hipMalloc((void**)&d_lx, pTotal*sizeof(float2));
hipMalloc((void**)&d_ly, pTotal*sizeof(float2));
hipMalloc((void**)&d_lz, pTotal*sizeof(float2));
hipMalloc((void**)&d_lw, pTotal*sizeof(float2));
hipDeviceSynchronize();
checkLastError();
float2 *d_tx, *d_ty, *d_tz, *d_tw;
hipMalloc((void**)&d_tx, pTotal*sizeof(float2));
hipMalloc((void**)&d_ty, pTotal*sizeof(float2));
hipMalloc((void**)&d_tz, pTotal*sizeof(float2));
hipMalloc((void**)&d_tw, pTotal*sizeof(float2));
hipMemset(d_tx, 0, pTotal*sizeof(float2));
hipMemset(d_ty, 0, pTotal*sizeof(float2));
hipMemset(d_tz, 0, pTotal*sizeof(float2));
hipMemset(d_tw, 0, pTotal*sizeof(float2));
hipDeviceSynchronize();
checkLastError();
float2 *d_bx, *d_by, *d_bz, *d_bw;
hipMalloc((void**)&d_bx, pTotal*sizeof(float2));
hipMalloc((void**)&d_by, pTotal*sizeof(float2));
hipMalloc((void**)&d_bz, pTotal*sizeof(float2));
hipMalloc((void**)&d_bw, pTotal*sizeof(float2));
hipMemset(d_bx, 0, pTotal*sizeof(float2));
hipMemset(d_by, 0, pTotal*sizeof(float2));
hipMemset(d_bz, 0, pTotal*sizeof(float2));
hipMemset(d_bw, 0, pTotal*sizeof(float2));
hipDeviceSynchronize();
checkLastError();
float2 *d_xbx, *d_yby, *d_zbz, *d_wbw;
hipMalloc((void**)&d_xbx, pTotal*sizeof(float2));
hipMalloc((void**)&d_yby, pTotal*sizeof(float2));
hipMalloc((void**)&d_zbz, pTotal*sizeof(float2));
hipMalloc((void**)&d_wbw, pTotal*sizeof(float2));
hipMemset(d_xbx, 0, pTotal*sizeof(float2));
hipMemset(d_yby, 0, pTotal*sizeof(float2));
hipMemset(d_zbz, 0, pTotal*sizeof(float2));
hipMemset(d_wbw, 0, pTotal*sizeof(float2));
hipDeviceSynchronize();
checkLastError();
float2 *d_dxbx, *d_dyby, *d_dzbz, *d_dwbw;
hipMalloc((void**)&d_dxbx, pTotal*sizeof(float2));
hipMalloc((void**)&d_dyby, pTotal*sizeof(float2));
hipMalloc((void**)&d_dzbz, pTotal*sizeof(float2));
hipMalloc((void**)&d_dwbw, pTotal*sizeof(float2));
hipMemset(d_dxbx, 0, pTotal*sizeof(float2));
hipMemset(d_dyby, 0, pTotal*sizeof(float2));
hipMemset(d_dzbz, 0, pTotal*sizeof(float2));
hipMemset(d_dwbw, 0, pTotal*sizeof(float2));
hipDeviceSynchronize();
checkLastError();
/// Copy kspace
MPI::COMM_WORLD.Scatter(h_spec, pTotal, MPI::DOUBLE,
p_spec, pTotal, MPI::DOUBLE, 0);
hipMemcpyAsync(d_spec, p_spec, pTotal*sizeof(float2), hipMemcpyDefault);
/// Copy mask
MPI::COMM_WORLD.Scatter(h_mask, pTotal, MPI::DOUBLE,
p_mask, pTotal, MPI::DOUBLE, 0);
hipMemcpyAsync(d_R, p_mask, pTotal*sizeof(float2), hipMemcpyDefault);
/// Multiply the mask with the full kspace
mul(d_spec, d_R, d_f, dimx, dimy, dimz);
/// Prepare the interpolation
hipMemcpy(d_f0 , d_f, pTotal*sizeof(float2), hipMemcpyDeviceToDevice);
hipMemcpy(d_ft , d_f, pTotal*sizeof(float2), hipMemcpyDeviceToDevice);
dft(d_f, d_cu, dimx, dimy, dimz, DFT_INVERSE, plan);
scale(d_cu, d_cu, dimx, dimy, dimz, 1.0f/(dimx*dimy));
scale(d_cu, d_murf, dimx, dimy, dimz, Mu);
bool isContinue = true;
float diff = 0.0f;
int iOuter = 0;
int iInner = 0;
int iLoops = 0;
/// Start the iterative method
// GpuTimer timer;
// timer.Start();
for(iOuter=0; iOuter<nOuter && isContinue ; iOuter++)
{
for(iInner=0; iInner<nInner && isContinue; iInner++)
{
/// Update Righ Hand Side term.
sub(d_x, d_bx, d_xbx, dimx, dimy, dimz);
sub(d_y, d_by, d_yby, dimx, dimy, dimz);
sub(d_z, d_bz, d_zbz, dimx, dimy, dimz);
sub(d_w, d_bw, d_wbw, dimx, dimy, dimz);
dxt(d_xbx, d_tx, dimx, dimy, dimz, DDT_INVERSE);
dyt(d_yby, d_ty, dimx, dimy, dimz, DDT_INVERSE);
dzt(d_zbz, d_tz, dimx, dimy, dimz, DDT_INVERSE);
dwt(d_wbw, d_tw, dimx, dimy, dimz, DWT_INVERSE);
scale(d_tx, d_tx, dimx, dimy, dimz, Lambda_w);
scale(d_ty, d_ty, dimx, dimy, dimz, Lambda_w);
scale(d_tz, d_tz, dimx, dimy, dimz, Gamma_w);
scale(d_tw, d_tw, dimx, dimy, dimz, Omega_w);
add(d_tx, d_ty, d_tz, d_tw, d_murf, d_rhs, dimx, dimy, dimz);
/// Update u term.
for(iLoops=0; iLoops<nLoops; iLoops++)
{
dxt(d_cu, d_lx, dimx, dimy, dimz, DDT_LAPLACIAN);
dyt(d_cu, d_ly, dimx, dimy, dimz, DDT_LAPLACIAN);
dzt(d_cu, d_lz, dimx, dimy, dimz, DDT_LAPLACIAN);
scale(d_lx, d_lx, dimx, dimy, dimz, Lambda_w);
scale(d_ly, d_ly, dimx, dimy, dimz, Lambda_w);
scale(d_lz, d_lz, dimx, dimy, dimz, Gamma_w);
scale(d_cu, d_lw, dimx, dimy, dimz, Omega_w);
dft(d_cu, d_Ax, dimx, dimy, dimz, DFT_FORWARD, plan);
mul(d_Ax, d_R, d_Ax, dimx, dimy, dimz);
dft(d_Ax, d_Ax, dimx, dimy, dimz, DFT_INVERSE, plan);
scale(d_Ax, d_Ax, dimx, dimy, dimz, 1.0f/(dimx*dimy)*Mu);
add(d_lx, d_ly, d_lz, d_lw, d_Ax, d_Ax, dimx, dimy, dimz);
sub(d_rhs, d_Ax, d_Ax, dimx, dimy, dimz);
scale(d_Ax, d_Ax, dimx, dimy, dimz, Ep);
add(d_cu, d_Ax, d_cu, dimx, dimy, dimz);
}
/// Update x, y, z.
dxt(d_cu, d_dx, dimx, dimy, dimz, DDT_FORWARD);
dyt(d_cu, d_dy, dimx, dimy, dimz, DDT_FORWARD);
dzt(d_cu, d_dz, dimx, dimy, dimz, DDT_FORWARD);
dwt(d_cu, d_dw, dimx, dimy, dimz, DWT_FORWARD);
add(d_dx, d_bx, d_dxbx, dimx, dimy, dimz);
add(d_dy, d_by, d_dyby, dimx, dimy, dimz);
add(d_dz, d_bz, d_dzbz, dimx, dimy, dimz);
add(d_dw, d_bw, d_dwbw, dimx, dimy, dimz);
shrink2(d_dxbx, d_dyby, d_x, d_y, dimx, dimy, dimz, Lambda_t);
// shrink1(d_dxbx, d_x, dimx, dimy, dimz, Lambda_t);
// shrink1(d_dyby, d_y, dimx, dimy, dimz, Lambda_t);
shrink1(d_dzbz, d_z, dimx, dimy, dimz, Gamma_t);
shrink1(d_dwbw, d_w, dimx, dimy, dimz, Omega_t);
/// Update Bregman parameters.
sub(d_dxbx, d_x, d_bx, dimx, dimy, dimz);
sub(d_dyby, d_y, d_by, dimx, dimy, dimz);
sub(d_dzbz, d_z, d_bz, dimx, dimy, dimz);
sub(d_dwbw, d_w, d_bw, dimx, dimy, dimz);
}
/// Update Interpolation
dft(d_cu, d_ft , dimx , dimy, dimz, DFT_FORWARD, plan);
mul(d_ft, d_R , d_Rft, dimx, dimy, dimz);
add(d_f0, d_f , d_f , dimx, dimy, dimz);
sub(d_f , d_Rft , d_f , dimx, dimy, dimz);
mul(d_f , d_R , d_Rf , dimx, dimy, dimz);
dft(d_Rf, d_murf, dimx , dimy, dimz, DFT_INVERSE, plan);
scale(d_murf, d_murf, dimx, dimy, dimz, 1.0f/(dimx*dimy)*Mu);
}
/// Copy the reconstruction from device to host
hipMemcpyAsync(p_dest, d_cu, pTotal*sizeof(float2), hipMemcpyDefault);
MPI::COMM_WORLD.Barrier();
MPI::COMM_WORLD.Gather(p_dest, pTotal, MPI::DOUBLE,
h_dest, pTotal, MPI::DOUBLE, 0);
/// Write the full reconstruction to binary file
if(iProcs == 0)
checkWriteFile(dest.c_str(), h_dest, nTotal*sizeof(float2));
// cout << "CS Reconstruction: " << timer.Elapsed() << endl;
////////////////////////////////////////////////////////////////////////////
MPI::Finalize();
return 0;
} | d63e81242760f384ed33a6f01798a20ac47d93c8.cu | #include <omp.h>
#include <mpi.h>
#include <cuda.h>
#include <cufft.h>
////////////////////////////////////////////////////////////////////////////////////////////////////
#include <thrust/version.h>
#include <thrust/host_vector.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/transform.h>
#include <thrust/sequence.h>
#include <thrust/copy.h>
#include <thrust/fill.h>
#include <thrust/replace.h>
#include <thrust/functional.h>
#include <thrust/sort.h>
////////////////////////////////////////////////////////////////////////////////////////////////////
#include <algorithm>
#include <iostream>
#include <fstream>
#include <iomanip>
#include "cmdparser.hpp"
#include "utility.hpp"
#include "timer.hpp"
#include "add.hpp"
#include "sub.hpp"
#include "mul.hpp"
#include "ddt.hpp"
#include "dft.hpp"
#include "dwt.hpp"
#include "shrink.hpp"
using namespace std;
using namespace csmri;
#define MAX_DEVICES_PER_NODE 8
#define MAX_DEVICES_PER_PROC 8
int main(int argc, const char* argv[])
{
const char* key =
"{ h |help | | print help message }"
"{ |spec | | Binary file of spectrum (kspace) - input }"
"{ |full | | Binary file of full reconstruction}"
"{ |mask | | Binary file of mask - input}"
"{ |zero | | Binary file of zero filling reconstruction}"
"{ |dest | | Binary file of reconstruction}"
"{ |dimx | | Number of the columns }"
"{ |dimy | | Number of the rows }"
"{ |dimz | | Temporal resolution }"
"{ |dimn | | Number of slices }"
"{ |devs | 1 | Number of GPUs }"
"{ |Mu | 0.100 | Weight of Interpolation }"
"{ |Lambda_w | 0.005 | Weight of Lambda }"
"{ |Lambda_t | 1.000 | Threshold of Lambda }"
"{ |Gamma_w | 0.200 | Weight of Gamma }"
"{ |Gamma_t | 1.000 | Threshold of Gamma }"
"{ |Omega_w | 0.600 | Weight of Omega }"
"{ |Omega_t | 1.000 | Threshold of Omega}"
"{ |Epsilon | 0.700 | Epsilon of Richardson loop}"
"{ |nOuter | 4 | Number of Outer loops}"
"{ |nInner | 8 | Number of Inner loops}"
"{ |nLoops | 4 | Number of Richardson loops}";
CommandLineParser cmd(argc, argv, key);
if (argc == 1)
{
cout << "Usage: " << argv[0] << " [options]" << endl;
cout << "Avaible options:" << endl;
cmd.printParams();
return 0;
}
// cmd.printParams();
string spec = cmd.get<string>("spec", true);
string full = cmd.get<string>("full", true);
string mask = cmd.get<string>("mask", true);
string zero = cmd.get<string>("zero", true);
string dest = cmd.get<string>("dest", true);
////////////////////////////////////////////////////////////////////////////
const int dimx = cmd.get<int>("dimx", true);
const int dimy = cmd.get<int>("dimy", true);
const int dimz = cmd.get<int>("dimz", true);
const int dimn = cmd.get<int>("dimn", true);
const int devs = cmd.get<int>("devs", true);
////////////////////////////////////////////////////////////////////////////
float Mu = cmd.get<float>("Mu", true);
float Lambda_w = cmd.get<float>("Lambda_w", true);
float Lambda_t = cmd.get<float>("Lambda_t", true);
float Gamma_w = cmd.get<float>("Gamma_w", true);
float Gamma_t = cmd.get<float>("Gamma_t", true);
float Omega_w = cmd.get<float>("Omega_w", true);
float Omega_t = cmd.get<float>("Omega_t", true);
float Ep = cmd.get<float>("Epsilon", true);
////////////////////////////////////////////////////////////////////////////
int nOuter = cmd.get<int>("nOuter", true);
int nInner = cmd.get<int>("nInner", true);
int nLoops = cmd.get<int>("nLoops", true);
////////////////////////////////////////////////////////////////////////////
/// Print out the parameters
cout << spec << endl;
cout << mask << endl;
cout << dest << endl;
printf("Size: %dx%dx%d\n", dimx, dimy, dimz);
printf("Number of GPUs: %d\n", devs);
printf("Mu : %4.4f\n", Mu);
printf("Lambda : %4.4f\t\t%4.4f\n", Lambda_w, Lambda_t);
printf("Gamma : %4.4f\t\t%4.4f\n", Gamma_w, Gamma_t);
printf("Omega : %4.4f\t\t%4.4f\n", Omega_w, Omega_t);
printf("Epsilon: %4.4f\n", Ep);
printf("Number of loops: %dx%dx%d\n", nOuter, nInner, nLoops);
////////////////////////////////////////////////////////////////////////////
/// Total problem size
const int dTotal = dimx*dimy*dimz;
const int nTotal = dimx*dimy*dimz*dimn;
/// Declare and allocate the host memories
float2 *h_spec;
float2 *h_full;
float2 *h_mask;
float2 *h_zero;
float2 *h_dest;
h_spec = new float2[nTotal];
h_full = new float2[nTotal];
h_mask = new float2[nTotal];
h_zero = new float2[nTotal];
h_dest = new float2[nTotal];
////////////////////////////////////////////////////////////////////////////
int nProcs, iProcs;
char processor_name[MPI_MAX_PROCESSOR_NAME];
int processor_name_len;
MPI::Init();
nProcs = MPI::COMM_WORLD.Get_size();
iProcs = MPI::COMM_WORLD.Get_rank();
MPI::Get_processor_name(processor_name, processor_name_len);
// printf("Hello world from process %d out of %d at %s\n",
// iProcs, nProcs, processor_name);
if(iProcs == 0)
{
/// Read data from file and store to memory
checkReadFile(spec.c_str(), h_spec, nTotal*sizeof(float2));
checkReadFile(mask.c_str(), h_mask, nTotal*sizeof(float2));
}
////////////////////////////////////////////////////////////////////////////
// cudaSetDevice(iProcs); //8 GPUs for each node
cudaSetDevice(2*(iProcs&3) + 0); //8 GPUs for each node
cudaDeviceReset();
const int pTotal = nTotal/nProcs;
int rank[3] = {dimx, dimy, dimz};
/// Create Fourier plan 2.5d.
cufftHandle plan;
cufftPlanMany(&plan,
2, //Dimensionality of the transform (1, 2, or 3)
rank, //Array of size rank, describing the size of each dimension
NULL,
1, //Distance between two successive input elements in the innermost dimension
dimy*dimx, //Distance between the first element of two consecutive signals
NULL,
1,
dimy*dimx,
CUFFT_C2C,
dimz);
float2 *p_spec, *p_full;
p_spec = new float2[pTotal];
p_full = new float2[pTotal];
float2 *d_spec, *d_full;
cudaMalloc((void**)&d_spec, pTotal*sizeof(float2));
cudaMalloc((void**)&d_full, pTotal*sizeof(float2));
////////////////////////////////////////////////////////////////////////////
/// <summary> Reconstruct the full data </summary>
/// Copy the spectrum from host to device
MPI::COMM_WORLD.Scatter(h_spec, pTotal, MPI::DOUBLE,
p_spec, pTotal, MPI::DOUBLE, 0);
cudaMemcpyAsync(d_spec, p_spec, pTotal*sizeof(float2), cudaMemcpyDefault);
cudaDeviceSynchronize();
checkLastError();
dft(d_spec, d_full, dimx, dimy, dimz, DFT_INVERSE, plan);
scale(d_full, d_full, dimx, dimy, dimz, 1.0f/(dimx*dimy) );
cudaDeviceSynchronize();
checkLastError();
cudaMemcpyAsync(p_full, d_full, pTotal*sizeof(float2), cudaMemcpyDefault);
cudaDeviceSynchronize();
checkLastError();
MPI::COMM_WORLD.Barrier();
MPI::COMM_WORLD.Gather(p_full, pTotal, MPI::DOUBLE,
h_full, pTotal, MPI::DOUBLE, 0);
if(iProcs == 0)
{
checkWriteFile(full.c_str(), h_full, nTotal*sizeof(float2));
}
//////////////////////////////////////////////////////////////////////////////
float2 *p_mask, *p_fill, *p_zero;
p_mask = new float2[pTotal];
p_fill = new float2[pTotal];
p_zero = new float2[pTotal];
float2 *d_mask, *d_fill, *d_zero;
cudaMalloc((void**)&d_mask, pTotal*sizeof(float2));
cudaMalloc((void**)&d_fill, pTotal*sizeof(float2));
cudaMalloc((void**)&d_zero, pTotal*sizeof(float2));
//////////////////////////////////////////////////////////////////////////////
/// <summary> Reconstruct the zero filling data </summary>
/// Copy the mask from host to device
MPI::COMM_WORLD.Scatter(h_mask, pTotal, MPI::DOUBLE,
p_mask, pTotal, MPI::DOUBLE, 0);
cudaMemcpyAsync(d_mask, p_mask, pTotal*sizeof(float2), cudaMemcpyDefault);
cudaDeviceSynchronize();
checkLastError();
/// Subsampling kspace
mul(d_spec, d_mask, d_fill, dimx, dimy, dimz);
/// Perform Inverse Fourier and Scale
dft(d_fill, d_zero, dimx, dimy, dimz, DFT_INVERSE, plan);
scale(d_zero, d_zero, dimx, dimy, dimz, 1.0f/(dimx*dimy) );
cudaDeviceSynchronize();
checkLastError();
cudaMemcpyAsync(p_zero, d_zero, pTotal*sizeof(float2), cudaMemcpyDefault);
cudaDeviceSynchronize();
checkLastError();
MPI::COMM_WORLD.Barrier();
MPI::COMM_WORLD.Gather(p_zero, pTotal, MPI::DOUBLE,
h_zero, pTotal, MPI::DOUBLE, 0);
// printf("Hello world from process %d out of %d at %s\n",
// iProcs, nProcs, processor_name);
if(iProcs == 0)
{
checkWriteFile(zero.c_str(), h_zero, nTotal*sizeof(float2));
}
// printf("Hello world from process %d out of %d at %s\n",
// iProcs, nProcs, processor_name);
////////////////////////////////////////////////////////////////////////////
// cudaSetDevice(iProcs);
MPI::COMM_WORLD.Barrier();
float2 *p_dest;
p_dest = new float2[pTotal];
// size_t avail, total;
// cudaMemGetInfo( &avail, &total );
// size_t used = total-avail;
// //DEBUG
// printf(" Used %lu / Total %lu ( %.2f % ) %dth \n",used,total, ( (double)used/(double)total)*100, iProcs);
////////////////////////////////////////////////////////////////////////////
// int id, np, i;
// MPI_Init(&argc, &argv);
// MPI_Comm_size(MPI_COMM_WORLD, &np);
// MPI_Comm_rank(MPI_COMM_WORLD, &id);
int id;
cudaGetDevice(&id);
checkLastError();
printf("We are using device %dth at %s\n", id, processor_name);
// MPI::COMM_WORLD.Barrier();
////////////////////////////////////////////////////////////////////////////
/// <summary> Reconstruct the compressive sensing data </summary>
/// <summary> Reserve Memory for the auxillary variables. </summary>
// size_t avail, total;
// cudaMemGetInfo( &avail, &total );
// size_t used = total-avail;
// //DEBUG
// printf(" Used %lu / Total %lu ( %.2f % ) %dth \n",used,total, ( (double)used/(double)total)*100, iProcs);
float2 *d_f, *d_f0, *d_ft;
cudaMalloc((void**)&d_f, pTotal*sizeof(float2));
checkLastError();
cudaMalloc((void**)&d_f0, pTotal*sizeof(float2));
checkLastError();
cudaMalloc((void**)&d_ft, pTotal*sizeof(float2));
checkLastError();
cudaDeviceSynchronize();
float2 *d_Ax, *d_rhs, *d_murf, *d_Rft, *d_Rf;
cudaMalloc((void**)&d_Ax , pTotal*sizeof(float2));
checkLastError();
cudaMalloc((void**)&d_rhs , pTotal*sizeof(float2));
cudaMalloc((void**)&d_murf, pTotal*sizeof(float2));
cudaMalloc((void**)&d_Rft , pTotal*sizeof(float2));
cudaMalloc((void**)&d_Rf , pTotal*sizeof(float2));
checkLastError();
cudaMemset(d_Ax, 0, pTotal*sizeof(float2));
checkLastError();
cudaDeviceSynchronize();
checkLastError();
float2 *d_R;
cudaMalloc((void**)&d_R, pTotal*sizeof(float2));
cudaMemset(d_R, 0, pTotal*sizeof(float2));
cudaDeviceSynchronize();
checkLastError();
float2 *d_cu;
cudaMalloc((void**)&d_cu, pTotal*sizeof(float2));
cudaMemset(d_cu, 0, pTotal*sizeof(float2));
cudaDeviceSynchronize();
checkLastError();
float2 *d_x, *d_y, *d_z, *d_w;
cudaMalloc((void**)&d_x, pTotal*sizeof(float2));
cudaMalloc((void**)&d_y, pTotal*sizeof(float2));
cudaMalloc((void**)&d_z, pTotal*sizeof(float2));
cudaMalloc((void**)&d_w, pTotal*sizeof(float2));
cudaMemset(d_x, 0, pTotal*sizeof(float2));
cudaMemset(d_y, 0, pTotal*sizeof(float2));
cudaMemset(d_z, 0, pTotal*sizeof(float2));
cudaMemset(d_w, 0, pTotal*sizeof(float2));
cudaDeviceSynchronize();
checkLastError();
float2 *d_dx, *d_dy, *d_dz, *d_dw;
cudaMalloc((void**)&d_dx, pTotal*sizeof(float2));
cudaMalloc((void**)&d_dy, pTotal*sizeof(float2));
cudaMalloc((void**)&d_dz, pTotal*sizeof(float2));
cudaMalloc((void**)&d_dw, pTotal*sizeof(float2));
cudaMemset(d_dx, 0, pTotal*sizeof(float2));
cudaMemset(d_dy, 0, pTotal*sizeof(float2));
cudaMemset(d_dz, 0, pTotal*sizeof(float2));
cudaMemset(d_dw, 0, pTotal*sizeof(float2));
cudaDeviceSynchronize();
checkLastError();
float2 *d_lx, *d_ly, *d_lz, *d_lw;
cudaMalloc((void**)&d_lx, pTotal*sizeof(float2));
cudaMalloc((void**)&d_ly, pTotal*sizeof(float2));
cudaMalloc((void**)&d_lz, pTotal*sizeof(float2));
cudaMalloc((void**)&d_lw, pTotal*sizeof(float2));
cudaDeviceSynchronize();
checkLastError();
float2 *d_tx, *d_ty, *d_tz, *d_tw;
cudaMalloc((void**)&d_tx, pTotal*sizeof(float2));
cudaMalloc((void**)&d_ty, pTotal*sizeof(float2));
cudaMalloc((void**)&d_tz, pTotal*sizeof(float2));
cudaMalloc((void**)&d_tw, pTotal*sizeof(float2));
cudaMemset(d_tx, 0, pTotal*sizeof(float2));
cudaMemset(d_ty, 0, pTotal*sizeof(float2));
cudaMemset(d_tz, 0, pTotal*sizeof(float2));
cudaMemset(d_tw, 0, pTotal*sizeof(float2));
cudaDeviceSynchronize();
checkLastError();
float2 *d_bx, *d_by, *d_bz, *d_bw;
cudaMalloc((void**)&d_bx, pTotal*sizeof(float2));
cudaMalloc((void**)&d_by, pTotal*sizeof(float2));
cudaMalloc((void**)&d_bz, pTotal*sizeof(float2));
cudaMalloc((void**)&d_bw, pTotal*sizeof(float2));
cudaMemset(d_bx, 0, pTotal*sizeof(float2));
cudaMemset(d_by, 0, pTotal*sizeof(float2));
cudaMemset(d_bz, 0, pTotal*sizeof(float2));
cudaMemset(d_bw, 0, pTotal*sizeof(float2));
cudaDeviceSynchronize();
checkLastError();
float2 *d_xbx, *d_yby, *d_zbz, *d_wbw;
cudaMalloc((void**)&d_xbx, pTotal*sizeof(float2));
cudaMalloc((void**)&d_yby, pTotal*sizeof(float2));
cudaMalloc((void**)&d_zbz, pTotal*sizeof(float2));
cudaMalloc((void**)&d_wbw, pTotal*sizeof(float2));
cudaMemset(d_xbx, 0, pTotal*sizeof(float2));
cudaMemset(d_yby, 0, pTotal*sizeof(float2));
cudaMemset(d_zbz, 0, pTotal*sizeof(float2));
cudaMemset(d_wbw, 0, pTotal*sizeof(float2));
cudaDeviceSynchronize();
checkLastError();
float2 *d_dxbx, *d_dyby, *d_dzbz, *d_dwbw;
cudaMalloc((void**)&d_dxbx, pTotal*sizeof(float2));
cudaMalloc((void**)&d_dyby, pTotal*sizeof(float2));
cudaMalloc((void**)&d_dzbz, pTotal*sizeof(float2));
cudaMalloc((void**)&d_dwbw, pTotal*sizeof(float2));
cudaMemset(d_dxbx, 0, pTotal*sizeof(float2));
cudaMemset(d_dyby, 0, pTotal*sizeof(float2));
cudaMemset(d_dzbz, 0, pTotal*sizeof(float2));
cudaMemset(d_dwbw, 0, pTotal*sizeof(float2));
cudaDeviceSynchronize();
checkLastError();
/// Copy kspace
MPI::COMM_WORLD.Scatter(h_spec, pTotal, MPI::DOUBLE,
p_spec, pTotal, MPI::DOUBLE, 0);
cudaMemcpyAsync(d_spec, p_spec, pTotal*sizeof(float2), cudaMemcpyDefault);
/// Copy mask
MPI::COMM_WORLD.Scatter(h_mask, pTotal, MPI::DOUBLE,
p_mask, pTotal, MPI::DOUBLE, 0);
cudaMemcpyAsync(d_R, p_mask, pTotal*sizeof(float2), cudaMemcpyDefault);
/// Multiply the mask with the full kspace
mul(d_spec, d_R, d_f, dimx, dimy, dimz);
/// Prepare the interpolation
cudaMemcpy(d_f0 , d_f, pTotal*sizeof(float2), cudaMemcpyDeviceToDevice);
cudaMemcpy(d_ft , d_f, pTotal*sizeof(float2), cudaMemcpyDeviceToDevice);
dft(d_f, d_cu, dimx, dimy, dimz, DFT_INVERSE, plan);
scale(d_cu, d_cu, dimx, dimy, dimz, 1.0f/(dimx*dimy));
scale(d_cu, d_murf, dimx, dimy, dimz, Mu);
bool isContinue = true;
float diff = 0.0f;
int iOuter = 0;
int iInner = 0;
int iLoops = 0;
/// Start the iterative method
// GpuTimer timer;
// timer.Start();
for(iOuter=0; iOuter<nOuter && isContinue ; iOuter++)
{
for(iInner=0; iInner<nInner && isContinue; iInner++)
{
/// Update Righ Hand Side term.
sub(d_x, d_bx, d_xbx, dimx, dimy, dimz);
sub(d_y, d_by, d_yby, dimx, dimy, dimz);
sub(d_z, d_bz, d_zbz, dimx, dimy, dimz);
sub(d_w, d_bw, d_wbw, dimx, dimy, dimz);
dxt(d_xbx, d_tx, dimx, dimy, dimz, DDT_INVERSE);
dyt(d_yby, d_ty, dimx, dimy, dimz, DDT_INVERSE);
dzt(d_zbz, d_tz, dimx, dimy, dimz, DDT_INVERSE);
dwt(d_wbw, d_tw, dimx, dimy, dimz, DWT_INVERSE);
scale(d_tx, d_tx, dimx, dimy, dimz, Lambda_w);
scale(d_ty, d_ty, dimx, dimy, dimz, Lambda_w);
scale(d_tz, d_tz, dimx, dimy, dimz, Gamma_w);
scale(d_tw, d_tw, dimx, dimy, dimz, Omega_w);
add(d_tx, d_ty, d_tz, d_tw, d_murf, d_rhs, dimx, dimy, dimz);
/// Update u term.
for(iLoops=0; iLoops<nLoops; iLoops++)
{
dxt(d_cu, d_lx, dimx, dimy, dimz, DDT_LAPLACIAN);
dyt(d_cu, d_ly, dimx, dimy, dimz, DDT_LAPLACIAN);
dzt(d_cu, d_lz, dimx, dimy, dimz, DDT_LAPLACIAN);
scale(d_lx, d_lx, dimx, dimy, dimz, Lambda_w);
scale(d_ly, d_ly, dimx, dimy, dimz, Lambda_w);
scale(d_lz, d_lz, dimx, dimy, dimz, Gamma_w);
scale(d_cu, d_lw, dimx, dimy, dimz, Omega_w);
dft(d_cu, d_Ax, dimx, dimy, dimz, DFT_FORWARD, plan);
mul(d_Ax, d_R, d_Ax, dimx, dimy, dimz);
dft(d_Ax, d_Ax, dimx, dimy, dimz, DFT_INVERSE, plan);
scale(d_Ax, d_Ax, dimx, dimy, dimz, 1.0f/(dimx*dimy)*Mu);
add(d_lx, d_ly, d_lz, d_lw, d_Ax, d_Ax, dimx, dimy, dimz);
sub(d_rhs, d_Ax, d_Ax, dimx, dimy, dimz);
scale(d_Ax, d_Ax, dimx, dimy, dimz, Ep);
add(d_cu, d_Ax, d_cu, dimx, dimy, dimz);
}
/// Update x, y, z.
dxt(d_cu, d_dx, dimx, dimy, dimz, DDT_FORWARD);
dyt(d_cu, d_dy, dimx, dimy, dimz, DDT_FORWARD);
dzt(d_cu, d_dz, dimx, dimy, dimz, DDT_FORWARD);
dwt(d_cu, d_dw, dimx, dimy, dimz, DWT_FORWARD);
add(d_dx, d_bx, d_dxbx, dimx, dimy, dimz);
add(d_dy, d_by, d_dyby, dimx, dimy, dimz);
add(d_dz, d_bz, d_dzbz, dimx, dimy, dimz);
add(d_dw, d_bw, d_dwbw, dimx, dimy, dimz);
shrink2(d_dxbx, d_dyby, d_x, d_y, dimx, dimy, dimz, Lambda_t);
// shrink1(d_dxbx, d_x, dimx, dimy, dimz, Lambda_t);
// shrink1(d_dyby, d_y, dimx, dimy, dimz, Lambda_t);
shrink1(d_dzbz, d_z, dimx, dimy, dimz, Gamma_t);
shrink1(d_dwbw, d_w, dimx, dimy, dimz, Omega_t);
/// Update Bregman parameters.
sub(d_dxbx, d_x, d_bx, dimx, dimy, dimz);
sub(d_dyby, d_y, d_by, dimx, dimy, dimz);
sub(d_dzbz, d_z, d_bz, dimx, dimy, dimz);
sub(d_dwbw, d_w, d_bw, dimx, dimy, dimz);
}
/// Update Interpolation
dft(d_cu, d_ft , dimx , dimy, dimz, DFT_FORWARD, plan);
mul(d_ft, d_R , d_Rft, dimx, dimy, dimz);
add(d_f0, d_f , d_f , dimx, dimy, dimz);
sub(d_f , d_Rft , d_f , dimx, dimy, dimz);
mul(d_f , d_R , d_Rf , dimx, dimy, dimz);
dft(d_Rf, d_murf, dimx , dimy, dimz, DFT_INVERSE, plan);
scale(d_murf, d_murf, dimx, dimy, dimz, 1.0f/(dimx*dimy)*Mu);
}
/// Copy the reconstruction from device to host
cudaMemcpyAsync(p_dest, d_cu, pTotal*sizeof(float2), cudaMemcpyDefault);
MPI::COMM_WORLD.Barrier();
MPI::COMM_WORLD.Gather(p_dest, pTotal, MPI::DOUBLE,
h_dest, pTotal, MPI::DOUBLE, 0);
/// Write the full reconstruction to binary file
if(iProcs == 0)
checkWriteFile(dest.c_str(), h_dest, nTotal*sizeof(float2));
// cout << "CS Reconstruction: " << timer.Elapsed() << endl;
////////////////////////////////////////////////////////////////////////////
MPI::Finalize();
return 0;
} |
5e102064f17a089b57c8b6c9ab15c703bedc37f0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//Udacity HW 4
//Radix Sorting
#include "utils.h"
#include <thrust/host_vector.h>
/* Red Eye Removal
===============
For this assignment we are implementing red eye removal. This is
accomplished by first creating a score for every pixel that tells us how
likely it is to be a red eye pixel. We have already done this for you - you
are receiving the scores and need to sort them in ascending order so that we
know which pixels to alter to remove the red eye.
Note: ascending order == smallest to largest
Each score is associated with a position, when you sort the scores, you must
also move the positions accordingly.
Implementing Parallel Radix Sort with CUDA
==========================================
The basic idea is to construct a histogram on each pass of how many of each
"digit" there are. Then we scan this histogram so that we know where to put
the output of each digit. For example, the first 1 must come after all the
0s so we have to know how many 0s there are to be able to start moving 1s
into the correct position.
1) Histogram of the number of occurrences of each digit
2) Exclusive Prefix Sum of Histogram
3) Determine relative offset of each digit
For example [0 0 1 1 0 0 1]
-> [0 1 0 1 2 3 2]
4) Combine the results of steps 2 & 3 to determine the final
output location for each element and move it there
LSB Radix sort is an out-of-place sort and you will need to ping-pong values
between the input and output buffers we have provided. Make sure the final
sorted results end up in the output buffer! Hint: You may need to do a copy
at the end.
*/
int get_max_size(int n, int d) {
return (int)ceil((float)n / (float)d) + 1;
}
__global__
void histogram_kernel(unsigned int pass,
unsigned int* d_bins,
unsigned int* const d_input,
const size_t size)
{
size_t myId = threadIdx.x + blockDim.x * blockIdx.x;
if (myId >= size) {
return;
}
unsigned int one = 1;
bool bin = ((d_input[myId] & (one << pass)) == (one << pass)) ? 1 : 0;
if (bin) {
atomicAdd(&d_bins[1], 1);
} else {
atomicAdd(&d_bins[0], 1);
}
}
__global__
void exclusive_scan_kernel(unsigned int pass,
unsigned int const * d_inputVals,
unsigned int * d_output,
const int size,
unsigned int base,
unsigned int threadSize)
{
size_t myId = threadIdx.x + threadSize * base;
unsigned int one = 1;
if (myId >= size)
return;
unsigned int val = 0;
if (myId > 0)
val = ((d_inputVals[myId-1] & (one<<pass)) == (one<<pass)) ? 1 : 0;
else
val = 0;
d_output[myId] = val;
__syncthreads();
for (int s = 1; s <= threadSize; s *= 2) {
int spot = myId - s;
if (spot >= 0 && spot >= threadSize*base)
val = d_output[spot];
__syncthreads();
if (spot >= 0 && spot >= threadSize*base)
d_output[myId] += val;
__syncthreads();
}
if (base > 0)
d_output[myId] += d_output[base*threadSize - 1];
}
__global__
void move_kernel(unsigned int pass,
unsigned int* const d_inputVals,
unsigned int* const d_inputPos,
unsigned int* d_outputVals,
unsigned int* d_outputPos,
unsigned int* const d_scanned,
unsigned int one_pos,
const size_t numElems)
{
size_t myId = threadIdx.x + blockDim.x * blockIdx.x;
if (myId >= numElems)
return;
unsigned int scan=0;
unsigned int base=0;
unsigned int one= 1;
if (( d_inputVals[myId] & (one<<pass)) == (1<<pass)) {
scan = d_scanned[myId];
base = one_pos;
} else {
scan = (myId) - d_scanned[myId];
base = 0;
}
d_outputPos[base+scan] = d_inputPos[myId];//d_inputPos[0];
d_outputVals[base+scan] = d_inputVals[myId];//base+scan;//d_inputVals[0];
}
void your_sort(unsigned int* const d_inputVals,
unsigned int* const d_inputPos,
unsigned int* const d_outputVals,
unsigned int* const d_outputPos,
const size_t numElems)
{
//TODO
//PUT YOUR SORT HERE
unsigned int* d_bins;
unsigned int h_bins[2];
unsigned int* d_scanned;
const size_t histo_size = 2*sizeof(unsigned int);
const size_t arr_size = numElems*sizeof(unsigned int);
checkCudaErrors(hipMalloc(&d_bins, histo_size));
checkCudaErrors(hipMalloc(&d_scanned, arr_size));
dim3 thread_dim(1024);
dim3 hist_block_dim(get_max_size(numElems, thread_dim.x));
for (unsigned int pass = 0; pass < 32; pass++) {
// unsigned int one = 1;
checkCudaErrors(hipMemset(d_bins, 0, histo_size));
checkCudaErrors(hipMemset(d_scanned, 0, arr_size));
checkCudaErrors(hipMemset(d_outputVals, 0, arr_size));
checkCudaErrors(hipMemset(d_outputPos, 0, arr_size));
hipLaunchKernelGGL(( histogram_kernel), dim3(hist_block_dim), dim3(thread_dim), 0, 0, pass, d_bins, d_inputVals, numElems);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
checkCudaErrors(hipMemcpy(&h_bins, d_bins, histo_size, hipMemcpyDeviceToHost));
for (int i = 0; i < get_max_size(numElems, thread_dim.x); i++) {
hipLaunchKernelGGL(( exclusive_scan_kernel), dim3(dim3(1)), dim3(thread_dim), 0, 0,
pass,
d_inputVals,
d_scanned,
numElems,
i,
thread_dim.x
);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
hipLaunchKernelGGL(( move_kernel), dim3(hist_block_dim), dim3(thread_dim), 0, 0,
pass,
d_inputVals,
d_inputPos,
d_outputVals,
d_outputPos,
d_scanned,
h_bins[0],
numElems
);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
checkCudaErrors(hipMemcpy(d_inputVals, d_outputVals, arr_size, hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemcpy(d_inputPos, d_outputPos, arr_size, hipMemcpyDeviceToDevice));
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
checkCudaErrors(hipFree(d_scanned));
checkCudaErrors(hipFree(d_bins));
}
| 5e102064f17a089b57c8b6c9ab15c703bedc37f0.cu | //Udacity HW 4
//Radix Sorting
#include "utils.h"
#include <thrust/host_vector.h>
/* Red Eye Removal
===============
For this assignment we are implementing red eye removal. This is
accomplished by first creating a score for every pixel that tells us how
likely it is to be a red eye pixel. We have already done this for you - you
are receiving the scores and need to sort them in ascending order so that we
know which pixels to alter to remove the red eye.
Note: ascending order == smallest to largest
Each score is associated with a position, when you sort the scores, you must
also move the positions accordingly.
Implementing Parallel Radix Sort with CUDA
==========================================
The basic idea is to construct a histogram on each pass of how many of each
"digit" there are. Then we scan this histogram so that we know where to put
the output of each digit. For example, the first 1 must come after all the
0s so we have to know how many 0s there are to be able to start moving 1s
into the correct position.
1) Histogram of the number of occurrences of each digit
2) Exclusive Prefix Sum of Histogram
3) Determine relative offset of each digit
For example [0 0 1 1 0 0 1]
-> [0 1 0 1 2 3 2]
4) Combine the results of steps 2 & 3 to determine the final
output location for each element and move it there
LSB Radix sort is an out-of-place sort and you will need to ping-pong values
between the input and output buffers we have provided. Make sure the final
sorted results end up in the output buffer! Hint: You may need to do a copy
at the end.
*/
int get_max_size(int n, int d) {
return (int)ceil((float)n / (float)d) + 1;
}
__global__
void histogram_kernel(unsigned int pass,
unsigned int* d_bins,
unsigned int* const d_input,
const size_t size)
{
size_t myId = threadIdx.x + blockDim.x * blockIdx.x;
if (myId >= size) {
return;
}
unsigned int one = 1;
bool bin = ((d_input[myId] & (one << pass)) == (one << pass)) ? 1 : 0;
if (bin) {
atomicAdd(&d_bins[1], 1);
} else {
atomicAdd(&d_bins[0], 1);
}
}
__global__
void exclusive_scan_kernel(unsigned int pass,
unsigned int const * d_inputVals,
unsigned int * d_output,
const int size,
unsigned int base,
unsigned int threadSize)
{
size_t myId = threadIdx.x + threadSize * base;
unsigned int one = 1;
if (myId >= size)
return;
unsigned int val = 0;
if (myId > 0)
val = ((d_inputVals[myId-1] & (one<<pass)) == (one<<pass)) ? 1 : 0;
else
val = 0;
d_output[myId] = val;
__syncthreads();
for (int s = 1; s <= threadSize; s *= 2) {
int spot = myId - s;
if (spot >= 0 && spot >= threadSize*base)
val = d_output[spot];
__syncthreads();
if (spot >= 0 && spot >= threadSize*base)
d_output[myId] += val;
__syncthreads();
}
if (base > 0)
d_output[myId] += d_output[base*threadSize - 1];
}
__global__
void move_kernel(unsigned int pass,
unsigned int* const d_inputVals,
unsigned int* const d_inputPos,
unsigned int* d_outputVals,
unsigned int* d_outputPos,
unsigned int* const d_scanned,
unsigned int one_pos,
const size_t numElems)
{
size_t myId = threadIdx.x + blockDim.x * blockIdx.x;
if (myId >= numElems)
return;
unsigned int scan=0;
unsigned int base=0;
unsigned int one= 1;
if (( d_inputVals[myId] & (one<<pass)) == (1<<pass)) {
scan = d_scanned[myId];
base = one_pos;
} else {
scan = (myId) - d_scanned[myId];
base = 0;
}
d_outputPos[base+scan] = d_inputPos[myId];//d_inputPos[0];
d_outputVals[base+scan] = d_inputVals[myId];//base+scan;//d_inputVals[0];
}
void your_sort(unsigned int* const d_inputVals,
unsigned int* const d_inputPos,
unsigned int* const d_outputVals,
unsigned int* const d_outputPos,
const size_t numElems)
{
//TODO
//PUT YOUR SORT HERE
unsigned int* d_bins;
unsigned int h_bins[2];
unsigned int* d_scanned;
const size_t histo_size = 2*sizeof(unsigned int);
const size_t arr_size = numElems*sizeof(unsigned int);
checkCudaErrors(cudaMalloc(&d_bins, histo_size));
checkCudaErrors(cudaMalloc(&d_scanned, arr_size));
dim3 thread_dim(1024);
dim3 hist_block_dim(get_max_size(numElems, thread_dim.x));
for (unsigned int pass = 0; pass < 32; pass++) {
// unsigned int one = 1;
checkCudaErrors(cudaMemset(d_bins, 0, histo_size));
checkCudaErrors(cudaMemset(d_scanned, 0, arr_size));
checkCudaErrors(cudaMemset(d_outputVals, 0, arr_size));
checkCudaErrors(cudaMemset(d_outputPos, 0, arr_size));
histogram_kernel<<<hist_block_dim, thread_dim>>>(pass, d_bins, d_inputVals, numElems);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaMemcpy(&h_bins, d_bins, histo_size, cudaMemcpyDeviceToHost));
for (int i = 0; i < get_max_size(numElems, thread_dim.x); i++) {
exclusive_scan_kernel<<<dim3(1), thread_dim>>>(
pass,
d_inputVals,
d_scanned,
numElems,
i,
thread_dim.x
);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
move_kernel<<<hist_block_dim, thread_dim>>>(
pass,
d_inputVals,
d_inputPos,
d_outputVals,
d_outputPos,
d_scanned,
h_bins[0],
numElems
);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaMemcpy(d_inputVals, d_outputVals, arr_size, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(d_inputPos, d_outputPos, arr_size, cudaMemcpyDeviceToDevice));
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
checkCudaErrors(cudaFree(d_scanned));
checkCudaErrors(cudaFree(d_bins));
}
|
3afae4eedca033c4a7709a154e95284f06c16aa0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/platform/profiler.h"
namespace paddle {
namespace operators {
template <typename T>
__global__ void SimpleMarkerKernel(T* in, T* out, int ndim) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
for (; idx < ndim; idx += blockDim.x * gridDim.x) {
out[idx] = in[idx];
}
}
template <typename T>
class MarkerOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
auto marker_role = ctx.Attr<std::string>("marker_role");
auto marker_pos = ctx.Attr<std::string>("marker_pos");
VLOG(3) << "marker role: " << marker_role
<< " marker position: " << marker_pos;
framework::Tensor A;
framework::Tensor B;
auto* in_temp = A.mutable_data<T>({32, 1}, ctx.GetPlace());
auto* out_temp = B.mutable_data<T>({32, 1}, ctx.GetPlace());
platform::RecordEvent record_event(
"MarkerCUDA", platform::EventRole::kInnerOp,
"marker_" + marker_role + "_" + marker_pos);
hipLaunchKernelGGL(( SimpleMarkerKernel<T>), dim3(1), dim3(32), 0, dev_ctx.stream(), in_temp, out_temp,
32);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(marker, ops::MarkerOpCUDAKernel<float>);
| 3afae4eedca033c4a7709a154e95284f06c16aa0.cu | /* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/platform/profiler.h"
namespace paddle {
namespace operators {
template <typename T>
__global__ void SimpleMarkerKernel(T* in, T* out, int ndim) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
for (; idx < ndim; idx += blockDim.x * gridDim.x) {
out[idx] = in[idx];
}
}
template <typename T>
class MarkerOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
auto marker_role = ctx.Attr<std::string>("marker_role");
auto marker_pos = ctx.Attr<std::string>("marker_pos");
VLOG(3) << "marker role: " << marker_role
<< " marker position: " << marker_pos;
framework::Tensor A;
framework::Tensor B;
auto* in_temp = A.mutable_data<T>({32, 1}, ctx.GetPlace());
auto* out_temp = B.mutable_data<T>({32, 1}, ctx.GetPlace());
platform::RecordEvent record_event(
"MarkerCUDA", platform::EventRole::kInnerOp,
"marker_" + marker_role + "_" + marker_pos);
SimpleMarkerKernel<T><<<1, 32, 0, dev_ctx.stream()>>>(in_temp, out_temp,
32);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(marker, ops::MarkerOpCUDAKernel<float>);
|
c9fdf369baebff36be287841dca234e63c74f12d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "gGRUFastBackward.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *outState = NULL;
hipMalloc(&outState, XSIZE*YSIZE);
float *outXW = NULL;
hipMalloc(&outXW, XSIZE*YSIZE);
float *outSU = NULL;
hipMalloc(&outSU, XSIZE*YSIZE);
float *outB = NULL;
hipMalloc(&outB, XSIZE*YSIZE);
const float *state = NULL;
hipMalloc(&state, XSIZE*YSIZE);
const float *xW = NULL;
hipMalloc(&xW, XSIZE*YSIZE);
const float *sU = NULL;
hipMalloc(&sU, XSIZE*YSIZE);
const float *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
const float *mask = NULL;
hipMalloc(&mask, XSIZE*YSIZE);
const float *adj = NULL;
hipMalloc(&adj, XSIZE*YSIZE);
size_t rows = XSIZE;
size_t cols = YSIZE;
bool final = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
gGRUFastBackward), dim3(gridBlock),dim3(threadBlock), 0, 0, outState,outXW,outSU,outB,state,xW,sU,b,mask,adj,rows,cols,final);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
gGRUFastBackward), dim3(gridBlock),dim3(threadBlock), 0, 0, outState,outXW,outSU,outB,state,xW,sU,b,mask,adj,rows,cols,final);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
gGRUFastBackward), dim3(gridBlock),dim3(threadBlock), 0, 0, outState,outXW,outSU,outB,state,xW,sU,b,mask,adj,rows,cols,final);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | c9fdf369baebff36be287841dca234e63c74f12d.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "gGRUFastBackward.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *outState = NULL;
cudaMalloc(&outState, XSIZE*YSIZE);
float *outXW = NULL;
cudaMalloc(&outXW, XSIZE*YSIZE);
float *outSU = NULL;
cudaMalloc(&outSU, XSIZE*YSIZE);
float *outB = NULL;
cudaMalloc(&outB, XSIZE*YSIZE);
const float *state = NULL;
cudaMalloc(&state, XSIZE*YSIZE);
const float *xW = NULL;
cudaMalloc(&xW, XSIZE*YSIZE);
const float *sU = NULL;
cudaMalloc(&sU, XSIZE*YSIZE);
const float *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
const float *mask = NULL;
cudaMalloc(&mask, XSIZE*YSIZE);
const float *adj = NULL;
cudaMalloc(&adj, XSIZE*YSIZE);
size_t rows = XSIZE;
size_t cols = YSIZE;
bool final = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
gGRUFastBackward<<<gridBlock,threadBlock>>>(outState,outXW,outSU,outB,state,xW,sU,b,mask,adj,rows,cols,final);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
gGRUFastBackward<<<gridBlock,threadBlock>>>(outState,outXW,outSU,outB,state,xW,sU,b,mask,adj,rows,cols,final);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
gGRUFastBackward<<<gridBlock,threadBlock>>>(outState,outXW,outSU,outB,state,xW,sU,b,mask,adj,rows,cols,final);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
5a5a41771f2d03c039073a5d8c4cea4db445ace1.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) 2016 ISP RAS
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "CudaDeviceProperties.h"
#include "HandleError.h"
#include <iostream>
using namespace std;
void properties () {
int count;
CHECK_CUDA (hipGetDeviceCount (&count));
cout << "count = " << count << endl;
hipDeviceProp_t prop;
for (int i = 0; i < count; i++) {
CHECK_CUDA(hipGetDeviceProperties (&prop, i));
cout << prop.name << endl;
cout << " totalGlobalMem " << prop.totalGlobalMem << endl;//print bytes
cout << " sharedMemPerBlock " << prop.sharedMemPerBlock << endl;
cout << " regsPerBlock " << prop.regsPerBlock << endl;
cout << " warpSize " << prop.warpSize << endl;
cout << " memPitch " << prop.memPitch << endl;
cout << " maxThreadsPerBlock " << prop.maxThreadsPerBlock << endl;
cout << " maxThreadsDim[3] " << prop.maxThreadsDim[1] << ' ' << prop.maxThreadsDim[2] << endl;
cout << " maxGridSize[3] " << prop.maxGridSize[1] << ' ' << prop.maxGridSize[2] << endl;
cout << " totalConstMem " << prop.totalConstMem << endl;
cout << " major " << prop.major << endl;
cout << " minor " << prop.minor << endl;
cout << " clockRate " << prop.clockRate << endl;
cout << " textureAlignment " << prop.textureAlignment << endl;
cout << " deviceOverlap " << prop.deviceOverlap << endl;
cout << " multiProcessorCount " << prop.multiProcessorCount << endl;
cout << " kernelExecTimeoutEnabled " << prop.kernelExecTimeoutEnabled << endl;
cout << " integrated " << prop.integrated << endl;
cout << " canMapHostMemory " << prop.canMapHostMemory << endl;
cout << " computeMode " << prop.computeMode << endl;
cout << " concurrentKernels " << prop.concurrentKernels << endl;
cout << " ECCEnabled " << prop.ECCEnabled << endl;
cout << " pciBusID " << prop.pciBusID << endl;
cout << " pciDeviceID " << prop.pciDeviceID << endl;
cout << " tccDriver " << prop.tccDriver << endl;
cout << endl;
cout << " asyncEngineCount " << prop.asyncEngineCount << endl;
//cout << " concurrentManagedAccess " << prop.concurrentManagedAccess << endl;//Device can coherently access managed memory concurrently with the CPU
cout << " globalL1CacheSupported " << prop.globalL1CacheSupported << endl;
//cout << " hostNativeAtomicSupported " << prop.hostNativeAtomicSupported << endl;//Link between the device and the host supports native atomic operations
cout << " isMultiGpuBoard " << prop.isMultiGpuBoard << endl;
cout << " l2CacheSize " << prop.l2CacheSize << endl;
cout << " managedMemory " << prop.managedMemory << endl;
//cout << " maxSurface1D[2] " << prop.maxSurface1D[1] << prop.maxSurface1D[2] << endl;//Maximum 1D surface size
cout << " maxSurface1DLayered[2] " << prop.maxSurface1DLayered[1] << endl;
cout << " maxSurface2DLayered[3] " << prop.maxSurface2DLayered[1] << ' ' << prop.maxSurface2DLayered[2] << endl;
cout << " maxSurface3D[3] " << prop.maxSurface3D[1] << ' ' << prop.maxSurface3D[2] << endl;
cout << " maxSurfaceCubemap " << prop.maxSurfaceCubemap << endl;
cout << " maxSurfaceCubemapLayered[2] " << prop.maxSurfaceCubemapLayered[1] << endl;
cout << " maxTexture1D " << prop.maxTexture1D << endl;
cout << " maxTexture1DLayered[2] " << prop.maxTexture1DLayered[1] << endl;
cout << " maxTexture1DLinear " << prop.maxTexture1DLinear << endl;
cout << " maxTexture1DMipmap " << prop.maxTexture1DMipmap << endl;
cout << " maxTexture2D[2] " << prop.maxTexture2D[1] << endl;
cout << " maxTexture2DGather[2] " << prop.maxTexture2DGather[1] << endl;
cout << " maxTexture2DLayered[3] " << prop.maxTexture2DLayered[1] << ' ' << prop.maxTexture2DLayered[2] << endl;
cout << " maxTexture2DLinear[3] " << prop.maxTexture2DLinear[1] << ' ' << prop.maxTexture2DLinear[2] << endl;
cout << " maxTexture2DMipmap[2] " << prop.maxTexture2DMipmap[1] << endl;
//cout << " maxTexture3D[3] " << prop.maxTexture3D[1] << maxTexture3D[2] << prop.maxTexture3D[3] << endl; //Maximum 3D texture dimensions
//cout << " maxTexture3DAlt[3] " << propmaxTexture3DAlt[1] << prop.maxTexture3DAlt[2] << prop.maxTexture3DAlt[3] << endl; //Maximum alternate 3D texture dimensions
cout << " maxTextureCubemap " << prop.maxTextureCubemap << endl;
cout << " maxTextureCubemapLayered[2] " << prop.maxTextureCubemapLayered[1] << endl;
cout << " maxThreadsPerMultiProcessor " << prop.maxThreadsPerMultiProcessor << endl;
cout << " memoryBusWidth " << prop.memoryBusWidth << endl;
cout << " memoryClockRate " << prop.memoryClockRate << endl;
cout << " multiGpuBoardGroupID " << prop.multiGpuBoardGroupID << endl;
//cout << " pageableMemoryAccess " << prop.pageableMemoryAccess << endl; //Device supports coherently accessing pageable memory without calling hipHostRegister on it
cout << " pciDomainID " << prop.pciDomainID << endl;
cout << " regsPerMultiprocessor " << prop.regsPerMultiprocessor << endl;
cout << " sharedMemPerMultiprocessor " << prop.sharedMemPerMultiprocessor << endl;
//cout << " singleToDoublePrecisionPerfRatio " << prop.singleToDoublePrecisionPerfRatio << endl; //Ratio of single precision performance (in floating-point operations per second) to double precision performance
cout << " streamPrioritiesSupported " << prop.streamPrioritiesSupported << endl;
cout << " surfaceAlignment " << prop.surfaceAlignment << endl;
cout << " texturePitchAlignment " << prop.texturePitchAlignment << endl;
cout << " unifiedAddressing " << prop.unifiedAddressing << endl;
}
}
| 5a5a41771f2d03c039073a5d8c4cea4db445ace1.cu | /**
* Copyright (c) 2016 ISP RAS
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "CudaDeviceProperties.h"
#include "HandleError.h"
#include <iostream>
using namespace std;
void properties () {
int count;
CHECK_CUDA (cudaGetDeviceCount (&count));
cout << "count = " << count << endl;
cudaDeviceProp prop;
for (int i = 0; i < count; i++) {
CHECK_CUDA(cudaGetDeviceProperties (&prop, i));
cout << prop.name << endl;
cout << " totalGlobalMem " << prop.totalGlobalMem << endl;//print bytes
cout << " sharedMemPerBlock " << prop.sharedMemPerBlock << endl;
cout << " regsPerBlock " << prop.regsPerBlock << endl;
cout << " warpSize " << prop.warpSize << endl;
cout << " memPitch " << prop.memPitch << endl;
cout << " maxThreadsPerBlock " << prop.maxThreadsPerBlock << endl;
cout << " maxThreadsDim[3] " << prop.maxThreadsDim[1] << ' ' << prop.maxThreadsDim[2] << endl;
cout << " maxGridSize[3] " << prop.maxGridSize[1] << ' ' << prop.maxGridSize[2] << endl;
cout << " totalConstMem " << prop.totalConstMem << endl;
cout << " major " << prop.major << endl;
cout << " minor " << prop.minor << endl;
cout << " clockRate " << prop.clockRate << endl;
cout << " textureAlignment " << prop.textureAlignment << endl;
cout << " deviceOverlap " << prop.deviceOverlap << endl;
cout << " multiProcessorCount " << prop.multiProcessorCount << endl;
cout << " kernelExecTimeoutEnabled " << prop.kernelExecTimeoutEnabled << endl;
cout << " integrated " << prop.integrated << endl;
cout << " canMapHostMemory " << prop.canMapHostMemory << endl;
cout << " computeMode " << prop.computeMode << endl;
cout << " concurrentKernels " << prop.concurrentKernels << endl;
cout << " ECCEnabled " << prop.ECCEnabled << endl;
cout << " pciBusID " << prop.pciBusID << endl;
cout << " pciDeviceID " << prop.pciDeviceID << endl;
cout << " tccDriver " << prop.tccDriver << endl;
cout << endl;
cout << " asyncEngineCount " << prop.asyncEngineCount << endl;
//cout << " concurrentManagedAccess " << prop.concurrentManagedAccess << endl;//Device can coherently access managed memory concurrently with the CPU
cout << " globalL1CacheSupported " << prop.globalL1CacheSupported << endl;
//cout << " hostNativeAtomicSupported " << prop.hostNativeAtomicSupported << endl;//Link between the device and the host supports native atomic operations
cout << " isMultiGpuBoard " << prop.isMultiGpuBoard << endl;
cout << " l2CacheSize " << prop.l2CacheSize << endl;
cout << " managedMemory " << prop.managedMemory << endl;
//cout << " maxSurface1D[2] " << prop.maxSurface1D[1] << prop.maxSurface1D[2] << endl;//Maximum 1D surface size
cout << " maxSurface1DLayered[2] " << prop.maxSurface1DLayered[1] << endl;
cout << " maxSurface2DLayered[3] " << prop.maxSurface2DLayered[1] << ' ' << prop.maxSurface2DLayered[2] << endl;
cout << " maxSurface3D[3] " << prop.maxSurface3D[1] << ' ' << prop.maxSurface3D[2] << endl;
cout << " maxSurfaceCubemap " << prop.maxSurfaceCubemap << endl;
cout << " maxSurfaceCubemapLayered[2] " << prop.maxSurfaceCubemapLayered[1] << endl;
cout << " maxTexture1D " << prop.maxTexture1D << endl;
cout << " maxTexture1DLayered[2] " << prop.maxTexture1DLayered[1] << endl;
cout << " maxTexture1DLinear " << prop.maxTexture1DLinear << endl;
cout << " maxTexture1DMipmap " << prop.maxTexture1DMipmap << endl;
cout << " maxTexture2D[2] " << prop.maxTexture2D[1] << endl;
cout << " maxTexture2DGather[2] " << prop.maxTexture2DGather[1] << endl;
cout << " maxTexture2DLayered[3] " << prop.maxTexture2DLayered[1] << ' ' << prop.maxTexture2DLayered[2] << endl;
cout << " maxTexture2DLinear[3] " << prop.maxTexture2DLinear[1] << ' ' << prop.maxTexture2DLinear[2] << endl;
cout << " maxTexture2DMipmap[2] " << prop.maxTexture2DMipmap[1] << endl;
//cout << " maxTexture3D[3] " << prop.maxTexture3D[1] << maxTexture3D[2] << prop.maxTexture3D[3] << endl; //Maximum 3D texture dimensions
//cout << " maxTexture3DAlt[3] " << propmaxTexture3DAlt[1] << prop.maxTexture3DAlt[2] << prop.maxTexture3DAlt[3] << endl; //Maximum alternate 3D texture dimensions
cout << " maxTextureCubemap " << prop.maxTextureCubemap << endl;
cout << " maxTextureCubemapLayered[2] " << prop.maxTextureCubemapLayered[1] << endl;
cout << " maxThreadsPerMultiProcessor " << prop.maxThreadsPerMultiProcessor << endl;
cout << " memoryBusWidth " << prop.memoryBusWidth << endl;
cout << " memoryClockRate " << prop.memoryClockRate << endl;
cout << " multiGpuBoardGroupID " << prop.multiGpuBoardGroupID << endl;
//cout << " pageableMemoryAccess " << prop.pageableMemoryAccess << endl; //Device supports coherently accessing pageable memory without calling cudaHostRegister on it
cout << " pciDomainID " << prop.pciDomainID << endl;
cout << " regsPerMultiprocessor " << prop.regsPerMultiprocessor << endl;
cout << " sharedMemPerMultiprocessor " << prop.sharedMemPerMultiprocessor << endl;
//cout << " singleToDoublePrecisionPerfRatio " << prop.singleToDoublePrecisionPerfRatio << endl; //Ratio of single precision performance (in floating-point operations per second) to double precision performance
cout << " streamPrioritiesSupported " << prop.streamPrioritiesSupported << endl;
cout << " surfaceAlignment " << prop.surfaceAlignment << endl;
cout << " texturePitchAlignment " << prop.texturePitchAlignment << endl;
cout << " unifiedAddressing " << prop.unifiedAddressing << endl;
}
}
|
9180e49c48e9397fb5e63c9b6172f737a258b075.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void rgb2gray(float *grayImage, float *rgbImage, int channels, int width, int height) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < width && y < height) {
// get 1D coordinate for the grayscale image
int grayOffset = y * width + x;
// one can think of the RGB image having
// CHANNEL times columns than the gray scale image
int rgbOffset = grayOffset * channels;
float r = rgbImage[rgbOffset]; // red value for pixel
float g = rgbImage[rgbOffset + 1]; // green value for pixel
float b = rgbImage[rgbOffset + 2]; // blue value for pixel
// perform the rescaling and store it
// We multiply by floating point constants
grayImage[grayOffset] = 0.21f * r + 0.71f * g + 0.07f * b;
}
} | 9180e49c48e9397fb5e63c9b6172f737a258b075.cu | #include "includes.h"
__global__ void rgb2gray(float *grayImage, float *rgbImage, int channels, int width, int height) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < width && y < height) {
// get 1D coordinate for the grayscale image
int grayOffset = y * width + x;
// one can think of the RGB image having
// CHANNEL times columns than the gray scale image
int rgbOffset = grayOffset * channels;
float r = rgbImage[rgbOffset]; // red value for pixel
float g = rgbImage[rgbOffset + 1]; // green value for pixel
float b = rgbImage[rgbOffset + 2]; // blue value for pixel
// perform the rescaling and store it
// We multiply by floating point constants
grayImage[grayOffset] = 0.21f * r + 0.71f * g + 0.07f * b;
}
} |
dd1e7c31c01215699747ab9646ac529c0dc8a40b.hip | // !!! This is a file automatically generated by hipify!!!
#include "pathtracer.h"
#include "renderer/pathtracer_kernel.h"
#include "renderer/rayqueue.h"
#include <chrono>
PathTracer::PathTracer(const std::string& filename)
: m_scene(filename), m_sample_num(0), m_sum_bounce(0), m_reset(false)
{
}
PathTracer::~PathTracer()
{
}
void PathTracer::init()
{
initScene();
initQueue();
}
void PathTracer::initScene()
{
m_scene.createDeviceData();
}
void PathTracer::initQueue()
{
const size_t pixels_num = m_scene.m_frame_buffer.size();
MemoryArena arena;
RayQueue input_queue;
RayQueue scatter_queue;
RayQueue shadow_queue;
alloc_queues(pixels_num, input_queue, scatter_queue, shadow_queue, arena);
m_memory_pool.alloc(arena.size());
}
void PathTracer::render(uint32* output)
{
// Reset Renderer
if (m_reset)
{
reset();
}
m_reset = false;
// Calculate queue size
const size_t pixels_num = m_scene.m_frame_buffer.size();
// Allocate queues
MemoryArena arena(m_memory_pool.data());
RayQueue input_queue;
RayQueue scatter_queue;
RayQueue shadow_queue;
alloc_queues(pixels_num, input_queue, scatter_queue, shadow_queue, arena);
// Initialize data view
SceneView scene_view = m_scene.view();
FrameBufferView frame_buffer_view = m_scene.m_frame_buffer.view();
// Initialize context
PTContext context;
context.m_bounce = 0;
context.m_sample_num = m_sample_num++;
context.m_in_queue = input_queue;
context.m_scatter_queue = scatter_queue;
context.m_shadow_queue = shadow_queue;
// Generate primary rays
generate_primary_rays(context, scene_view, frame_buffer_view);
CUDA_CHECK(hipDeviceSynchronize());
// Trace ray
{
uint32 in_queue_size;
CUDA_CHECK(hipMemcpy(&in_queue_size, context.m_in_queue.m_size, sizeof(uint32), hipMemcpyDeviceToHost));
{
trace(scene_view, in_queue_size, context.m_in_queue.m_rays, context.m_in_queue.m_hits);
CUDA_CHECK(hipDeviceSynchronize());
}
}
// Bounce loop
for (context.m_bounce = 0;
context.m_bounce < m_options.m_max_path_length;
context.m_bounce++)
{
//printf("%u\n", context.m_bounce);
// Check exist ray number
uint32 in_queue_size;
CUDA_CHECK(hipMemcpy(&in_queue_size, context.m_in_queue.m_size, sizeof(uint32), hipMemcpyDeviceToHost));
m_sum_bounce += in_queue_size;
if (in_queue_size == 0)
{
break;
}
// Reset scatter and shadow queue size
hipMemset(context.m_scatter_queue.m_size, 0, sizeof(uint32));
hipMemset(context.m_shadow_queue.m_size, 0, sizeof(uint32));
// NEE(with MIS) and Sample BSDF
{
shade_hit(in_queue_size, context, scene_view, frame_buffer_view);
CUDA_CHECK(hipDeviceSynchronize());
}
// Solve occlude and Accumulate light sampling contribution
uint32 shadow_queue_size;
CUDA_CHECK(hipMemcpy(&shadow_queue_size, context.m_shadow_queue.m_size, sizeof(uint32), hipMemcpyDeviceToHost));
{
// Trace shadow ray
trace_shadow(scene_view, shadow_queue_size, context.m_shadow_queue.m_rays, context.m_shadow_queue.m_hits);
CUDA_CHECK(hipDeviceSynchronize());
// Accumulate light sampling contribution
accumulate_light_sampling_contribution(shadow_queue_size, context, scene_view, frame_buffer_view);
CUDA_CHECK(hipDeviceSynchronize());
}
// Solve intersect and Accumulate BSDF sampling contribution
uint32 scatter_queue_size;
CUDA_CHECK(hipMemcpy(&scatter_queue_size, context.m_scatter_queue.m_size, sizeof(uint32), hipMemcpyDeviceToHost));
{
// Trace ray
trace(scene_view, scatter_queue_size, context.m_scatter_queue.m_rays, context.m_scatter_queue.m_hits);
CUDA_CHECK(hipDeviceSynchronize());
// Accumulate BSDF sampling contribution
accumulate_BSDF_sampling_contribution(scatter_queue_size, context, scene_view, frame_buffer_view);
CUDA_CHECK(hipDeviceSynchronize());
}
// Swap scatter and in queue
std::swap(context.m_scatter_queue, context.m_in_queue);
}
// Add sample number
finish_sample(frame_buffer_view);
CUDA_CHECK(hipDeviceSynchronize());
// Filter for output
if (output)
{
filter(output, frame_buffer_view);
}
CUDA_CHECK(hipDeviceSynchronize());
}
void PathTracer::render(uint32 num)
{
auto _curTimePoint = std::chrono::steady_clock::now();
init();
for (uint32 i = 0; i < num; i++)
{
fprintf(stderr, "\r%f%%", 100.f * (i + 1) / num);
render();
}
auto curTime = std::chrono::steady_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(curTime - _curTimePoint);
std::cout << "SpendTime = " << duration.count() / 1000 << "s" << std::endl;
output(std::to_string(m_sample_num) + "spp.png");
exit(0);
}
void PathTracer::output(const std::string& filename)
{
m_scene.m_frame_buffer.output(filename);
}
void PathTracer::zoom(float d)
{
m_scene.m_camera.zoom(d);
m_reset = true;
}
void PathTracer::translate(float x, float y)
{
m_scene.m_camera.translate(x, y);
m_reset = true;
}
void PathTracer::rotate(float yaw, float pitch)
{
m_scene.m_camera.rotate(yaw, pitch);
m_reset = true;
}
void PathTracer::reset()
{
m_sample_num = 0;
m_sum_bounce = 0;
m_scene.m_frame_buffer.clear();
}
void PathTracer::resize(uint32 width, uint32 height)
{
m_scene.m_frame_buffer.resize(width, height);
m_scene.m_camera.resize(width, height);
m_scene.m_camera.updateAspectRation((float)(width) / height);
initQueue();
m_reset = true;
}
int2 PathTracer::getResolution() const
{
return m_scene.m_frame_buffer.getResolution();
}
| dd1e7c31c01215699747ab9646ac529c0dc8a40b.cu | #include "pathtracer.h"
#include "renderer/pathtracer_kernel.h"
#include "renderer/rayqueue.h"
#include <chrono>
PathTracer::PathTracer(const std::string& filename)
: m_scene(filename), m_sample_num(0), m_sum_bounce(0), m_reset(false)
{
}
PathTracer::~PathTracer()
{
}
void PathTracer::init()
{
initScene();
initQueue();
}
void PathTracer::initScene()
{
m_scene.createDeviceData();
}
void PathTracer::initQueue()
{
const size_t pixels_num = m_scene.m_frame_buffer.size();
MemoryArena arena;
RayQueue input_queue;
RayQueue scatter_queue;
RayQueue shadow_queue;
alloc_queues(pixels_num, input_queue, scatter_queue, shadow_queue, arena);
m_memory_pool.alloc(arena.size());
}
void PathTracer::render(uint32* output)
{
// Reset Renderer
if (m_reset)
{
reset();
}
m_reset = false;
// Calculate queue size
const size_t pixels_num = m_scene.m_frame_buffer.size();
// Allocate queues
MemoryArena arena(m_memory_pool.data());
RayQueue input_queue;
RayQueue scatter_queue;
RayQueue shadow_queue;
alloc_queues(pixels_num, input_queue, scatter_queue, shadow_queue, arena);
// Initialize data view
SceneView scene_view = m_scene.view();
FrameBufferView frame_buffer_view = m_scene.m_frame_buffer.view();
// Initialize context
PTContext context;
context.m_bounce = 0;
context.m_sample_num = m_sample_num++;
context.m_in_queue = input_queue;
context.m_scatter_queue = scatter_queue;
context.m_shadow_queue = shadow_queue;
// Generate primary rays
generate_primary_rays(context, scene_view, frame_buffer_view);
CUDA_CHECK(cudaDeviceSynchronize());
// Trace ray
{
uint32 in_queue_size;
CUDA_CHECK(cudaMemcpy(&in_queue_size, context.m_in_queue.m_size, sizeof(uint32), cudaMemcpyDeviceToHost));
{
trace(scene_view, in_queue_size, context.m_in_queue.m_rays, context.m_in_queue.m_hits);
CUDA_CHECK(cudaDeviceSynchronize());
}
}
// Bounce loop
for (context.m_bounce = 0;
context.m_bounce < m_options.m_max_path_length;
context.m_bounce++)
{
//printf("%u\n", context.m_bounce);
// Check exist ray number
uint32 in_queue_size;
CUDA_CHECK(cudaMemcpy(&in_queue_size, context.m_in_queue.m_size, sizeof(uint32), cudaMemcpyDeviceToHost));
m_sum_bounce += in_queue_size;
if (in_queue_size == 0)
{
break;
}
// Reset scatter and shadow queue size
cudaMemset(context.m_scatter_queue.m_size, 0, sizeof(uint32));
cudaMemset(context.m_shadow_queue.m_size, 0, sizeof(uint32));
// NEE(with MIS) and Sample BSDF
{
shade_hit(in_queue_size, context, scene_view, frame_buffer_view);
CUDA_CHECK(cudaDeviceSynchronize());
}
// Solve occlude and Accumulate light sampling contribution
uint32 shadow_queue_size;
CUDA_CHECK(cudaMemcpy(&shadow_queue_size, context.m_shadow_queue.m_size, sizeof(uint32), cudaMemcpyDeviceToHost));
{
// Trace shadow ray
trace_shadow(scene_view, shadow_queue_size, context.m_shadow_queue.m_rays, context.m_shadow_queue.m_hits);
CUDA_CHECK(cudaDeviceSynchronize());
// Accumulate light sampling contribution
accumulate_light_sampling_contribution(shadow_queue_size, context, scene_view, frame_buffer_view);
CUDA_CHECK(cudaDeviceSynchronize());
}
// Solve intersect and Accumulate BSDF sampling contribution
uint32 scatter_queue_size;
CUDA_CHECK(cudaMemcpy(&scatter_queue_size, context.m_scatter_queue.m_size, sizeof(uint32), cudaMemcpyDeviceToHost));
{
// Trace ray
trace(scene_view, scatter_queue_size, context.m_scatter_queue.m_rays, context.m_scatter_queue.m_hits);
CUDA_CHECK(cudaDeviceSynchronize());
// Accumulate BSDF sampling contribution
accumulate_BSDF_sampling_contribution(scatter_queue_size, context, scene_view, frame_buffer_view);
CUDA_CHECK(cudaDeviceSynchronize());
}
// Swap scatter and in queue
std::swap(context.m_scatter_queue, context.m_in_queue);
}
// Add sample number
finish_sample(frame_buffer_view);
CUDA_CHECK(cudaDeviceSynchronize());
// Filter for output
if (output)
{
filter(output, frame_buffer_view);
}
CUDA_CHECK(cudaDeviceSynchronize());
}
void PathTracer::render(uint32 num)
{
auto _curTimePoint = std::chrono::steady_clock::now();
init();
for (uint32 i = 0; i < num; i++)
{
fprintf(stderr, "\r%f%%", 100.f * (i + 1) / num);
render();
}
auto curTime = std::chrono::steady_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(curTime - _curTimePoint);
std::cout << "SpendTime = " << duration.count() / 1000 << "s" << std::endl;
output(std::to_string(m_sample_num) + "spp.png");
exit(0);
}
void PathTracer::output(const std::string& filename)
{
m_scene.m_frame_buffer.output(filename);
}
void PathTracer::zoom(float d)
{
m_scene.m_camera.zoom(d);
m_reset = true;
}
void PathTracer::translate(float x, float y)
{
m_scene.m_camera.translate(x, y);
m_reset = true;
}
void PathTracer::rotate(float yaw, float pitch)
{
m_scene.m_camera.rotate(yaw, pitch);
m_reset = true;
}
void PathTracer::reset()
{
m_sample_num = 0;
m_sum_bounce = 0;
m_scene.m_frame_buffer.clear();
}
void PathTracer::resize(uint32 width, uint32 height)
{
m_scene.m_frame_buffer.resize(width, height);
m_scene.m_camera.resize(width, height);
m_scene.m_camera.updateAspectRation((float)(width) / height);
initQueue();
m_reset = true;
}
int2 PathTracer::getResolution() const
{
return m_scene.m_frame_buffer.getResolution();
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.