hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
003ed954dde3b8250cee90a421cb1624c1c85bad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/kernel/cuda_check_numerics_kernel_observer.h"
#include "oneflow/core/kernel/kernel.h"
#include "oneflow/core/ep/cuda/cuda_stream.h"
namespace oneflow {
namespace {
template<typename T>
__device__ bool IsNotFinite(T x) {
return !isfinite(x);
}
template<>
__device__ bool IsNotFinite<half>(half x) {
return (__hisinf(x) || __hisnan(x));
}
template<typename T>
__global__ void HasNotFiniteGpuKernel(const int64_t n, const T* x, volatile bool* has_not_finite) {
if (*has_not_finite) { return; }
CUDA_1D_KERNEL_LOOP_T(int64_t, i, n) {
if (IsNotFinite(x[i])) {
*has_not_finite = true;
return;
}
}
}
template<typename T>
bool HasNotFinite(ep::Stream* stream, const int64_t elem_cnt, const T* data_ptr,
bool* has_not_finite_host, bool* has_not_finite_device) {
OF_CUDA_CHECK(hipMemsetAsync(has_not_finite_device, 0, sizeof(bool),
stream->As<ep::CudaStream>()->cuda_stream()));
hipLaunchKernelGGL(( HasNotFiniteGpuKernel<T>)
, dim3(BlocksNum4ThreadsNum(elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0,
stream->As<ep::CudaStream>()->cuda_stream(), elem_cnt, data_ptr, has_not_finite_device);
OF_CUDA_CHECK(hipMemcpyAsync(has_not_finite_host, has_not_finite_device, sizeof(bool),
hipMemcpyDefault, stream->As<ep::CudaStream>()->cuda_stream()));
OF_CUDA_CHECK(hipStreamSynchronize(stream->As<ep::CudaStream>()->cuda_stream()));
return *has_not_finite_host;
}
bool HasNotFiniteGpu(ep::Stream* stream, const Blob* blob, bool* has_not_finite_host,
bool* has_not_finite_device) {
const DataType dtype = blob->data_type();
const int64_t elem_cnt = blob->shape().elem_cnt();
if (dtype == kFloat) {
return HasNotFinite<float>(stream, elem_cnt, blob->dptr<float>(), has_not_finite_host,
has_not_finite_device);
} else if (dtype == kDouble) {
return HasNotFinite<double>(stream, elem_cnt, blob->dptr<double>(), has_not_finite_host,
has_not_finite_device);
} else if (dtype == kFloat16) {
return HasNotFinite<half>(stream, elem_cnt, blob->dptr<half>(), has_not_finite_host,
has_not_finite_device);
} else {
return false;
}
}
} // namespace
CudaCheckNumericsKernelObserver::CudaCheckNumericsKernelObserver()
: has_not_finite_host_(nullptr), has_not_finite_device_(nullptr) {
OF_CUDA_CHECK(hipGetDevice(&device_id_));
OF_CUDA_CHECK(hipHostMalloc(&has_not_finite_host_, sizeof(bool)));
OF_CUDA_CHECK(hipMalloc(&has_not_finite_device_, sizeof(bool)));
}
CudaCheckNumericsKernelObserver::~CudaCheckNumericsKernelObserver() {
CudaCurrentDeviceGuard guard(device_id_);
OF_CUDA_CHECK(hipHostFree(has_not_finite_host_));
OF_CUDA_CHECK(hipFree(has_not_finite_device_));
}
void CudaCheckNumericsKernelObserver::DidForwardDataContent(KernelContext* ctx,
const Kernel* kernel) {
for (const auto& obn : kernel->op_attribute().output_bns()) {
Blob* blob = ctx->BnInOp2Blob(obn);
if (blob != nullptr) {
bool has_not_finite =
HasNotFiniteGpu(ctx->stream(), blob, has_not_finite_host_, has_not_finite_device_);
CHECK(!has_not_finite) << kernel->op_conf().name() << " : " << obn << " has nan or inf";
}
}
}
} // namespace oneflow
| 003ed954dde3b8250cee90a421cb1624c1c85bad.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/kernel/cuda_check_numerics_kernel_observer.h"
#include "oneflow/core/kernel/kernel.h"
#include "oneflow/core/ep/cuda/cuda_stream.h"
namespace oneflow {
namespace {
template<typename T>
__device__ bool IsNotFinite(T x) {
return !isfinite(x);
}
template<>
__device__ bool IsNotFinite<half>(half x) {
return (__hisinf(x) || __hisnan(x));
}
template<typename T>
__global__ void HasNotFiniteGpuKernel(const int64_t n, const T* x, volatile bool* has_not_finite) {
if (*has_not_finite) { return; }
CUDA_1D_KERNEL_LOOP_T(int64_t, i, n) {
if (IsNotFinite(x[i])) {
*has_not_finite = true;
return;
}
}
}
template<typename T>
bool HasNotFinite(ep::Stream* stream, const int64_t elem_cnt, const T* data_ptr,
bool* has_not_finite_host, bool* has_not_finite_device) {
OF_CUDA_CHECK(cudaMemsetAsync(has_not_finite_device, 0, sizeof(bool),
stream->As<ep::CudaStream>()->cuda_stream()));
HasNotFiniteGpuKernel<T>
<<<BlocksNum4ThreadsNum(elem_cnt), kCudaThreadsNumPerBlock, 0,
stream->As<ep::CudaStream>()->cuda_stream()>>>(elem_cnt, data_ptr, has_not_finite_device);
OF_CUDA_CHECK(cudaMemcpyAsync(has_not_finite_host, has_not_finite_device, sizeof(bool),
cudaMemcpyDefault, stream->As<ep::CudaStream>()->cuda_stream()));
OF_CUDA_CHECK(cudaStreamSynchronize(stream->As<ep::CudaStream>()->cuda_stream()));
return *has_not_finite_host;
}
bool HasNotFiniteGpu(ep::Stream* stream, const Blob* blob, bool* has_not_finite_host,
bool* has_not_finite_device) {
const DataType dtype = blob->data_type();
const int64_t elem_cnt = blob->shape().elem_cnt();
if (dtype == kFloat) {
return HasNotFinite<float>(stream, elem_cnt, blob->dptr<float>(), has_not_finite_host,
has_not_finite_device);
} else if (dtype == kDouble) {
return HasNotFinite<double>(stream, elem_cnt, blob->dptr<double>(), has_not_finite_host,
has_not_finite_device);
} else if (dtype == kFloat16) {
return HasNotFinite<half>(stream, elem_cnt, blob->dptr<half>(), has_not_finite_host,
has_not_finite_device);
} else {
return false;
}
}
} // namespace
CudaCheckNumericsKernelObserver::CudaCheckNumericsKernelObserver()
: has_not_finite_host_(nullptr), has_not_finite_device_(nullptr) {
OF_CUDA_CHECK(cudaGetDevice(&device_id_));
OF_CUDA_CHECK(cudaMallocHost(&has_not_finite_host_, sizeof(bool)));
OF_CUDA_CHECK(cudaMalloc(&has_not_finite_device_, sizeof(bool)));
}
CudaCheckNumericsKernelObserver::~CudaCheckNumericsKernelObserver() {
CudaCurrentDeviceGuard guard(device_id_);
OF_CUDA_CHECK(cudaFreeHost(has_not_finite_host_));
OF_CUDA_CHECK(cudaFree(has_not_finite_device_));
}
void CudaCheckNumericsKernelObserver::DidForwardDataContent(KernelContext* ctx,
const Kernel* kernel) {
for (const auto& obn : kernel->op_attribute().output_bns()) {
Blob* blob = ctx->BnInOp2Blob(obn);
if (blob != nullptr) {
bool has_not_finite =
HasNotFiniteGpu(ctx->stream(), blob, has_not_finite_host_, has_not_finite_device_);
CHECK(!has_not_finite) << kernel->op_conf().name() << " : " << obn << " has nan or inf";
}
}
}
} // namespace oneflow
|
9a37a094b1f4316e8dae484ccb564f0c24a1ea25.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd(float *a, float *b, float *c, int n)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
c[id] = a[id] + b[id];
} | 9a37a094b1f4316e8dae484ccb564f0c24a1ea25.cu | #include "includes.h"
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd(float *a, float *b, float *c, int n)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
c[id] = a[id] + b[id];
} |
8da76bf87c130bea5b037caff6e7b5b4ec77bc44.hip | // !!! This is a file automatically generated by hipify!!!
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
#include <vtkm/cont/RuntimeDeviceTracker.h>
#include <vtkm/cont/cuda/DeviceAdapterCuda.h>
#include <vtkm/worklet/DispatcherMapField.h>
#include <vtkm/worklet/WorkletMapField.h>
#include <vtkm/cont/testing/Testing.h>
#include <cmath>
#include <iostream>
#include <limits>
#include <random>
#include <vector>
VTKM_THIRDPARTY_PRE_INCLUDE
#include "hiprand/hiprand_kernel.h"
VTKM_THIRDPARTY_POST_INCLUDE
namespace
{
struct TriggerICE : public vtkm::worklet::WorkletMapField
{
using ControlSignature = void(FieldIn, FieldIn, FieldOut);
using ExecutionSignature = _3(_1, _2, WorkIndex);
#ifdef VTKM_CUDA_DEVICE_PASS
template <class ValueType>
__device__ ValueType operator()(const ValueType& bad,
const ValueType& sane,
const vtkm::Id sequenceId) const
{
hiprandState_t state;
//Each thread uses same seed but different sequence numbers
hiprand_init(42, sequenceId, 0, &state);
int signBad = vtkm::SignBit(bad);
int signGood = vtkm::SignBit(bad);
vtkm::Vec<ValueType, 3> coord = { vtkm::Abs(bad * sane),
bad * sane + (ValueType)signBad,
bad * sane + (ValueType)signGood };
for (int i = 0; i < 10; ++i)
{
for (int j = 0; j < 3; ++j)
{
if (vtkm::IsNan(coord[j]))
{
coord[j] = hiprand_normal(&state) * 5.0f;
coord[j] = vtkm::Sqrt(vtkm::Dot(coord, coord));
if (coord[j] <= 1.0f)
{
coord[j] += 1.0f;
}
}
if (vtkm::IsInf(coord[j]))
{
coord[j] = hiprand_normal(&state) * 8.0f;
coord[j] = vtkm::Tan(vtkm::Cos(vtkm::Dot(coord, coord)));
}
}
}
return coord[0] * 4.0f + coord[1] * 4.0f + coord[2] * 4.0f;
}
#else
template <class ValueType>
ValueType operator()(const ValueType& bad, const ValueType& sane, const vtkm::Id sequenceId) const
{
return bad + sane * static_cast<ValueType>(sequenceId);
}
#endif
};
//-----------------------------------------------------------------------------
template <typename Device>
void RunEdgeCases()
{
std::cout << "Testing complicated worklets that can cause NVCC to ICE." << std::endl;
//When running CUDA on unsupported hardware we find that IsInf, IsNan, and
//SignBit can cause the CUDA compiler to crash. This test is a consistent
//way to detect this.
//
//The way it works is we generate all kinds of nasty floating point values
//such as signaling Nan, quiet Nan, other Nans, +Inf, -Inf, -0, +0, a collection of
//denormal numbers, and the min and max float values
//and than a random collection of values from normal float space. We combine this
//array which we will call 'bad' with another input array which we will call 'sane',
//We than execute a worklet that takes values stored in 'bad' and 'sane' that does
//some computation that takes into account the results of IsInf, IsNan, and
//SignBit
const vtkm::Id desired_size = 2048;
std::vector<float> sanevalues;
std::vector<float> badvalues = { std::numeric_limits<float>::signaling_NaN(),
std::numeric_limits<float>::quiet_NaN(),
std::nanf("1"),
std::nanf("4200042"),
std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::infinity() * -1,
0.0f,
-0.0f,
std::numeric_limits<float>::denorm_min(),
std::nextafter(std::numeric_limits<float>::min(), 0.0f),
std::numeric_limits<float>::denorm_min() *
(1 + std::numeric_limits<float>::epsilon()),
std::nextafter(std::numeric_limits<float>::min(), 0.0f) *
(1 + std::numeric_limits<float>::epsilon()),
std::numeric_limits<float>::lowest(),
std::numeric_limits<float>::min(),
std::numeric_limits<float>::max() };
const std::size_t bad_size = badvalues.size();
const vtkm::Id bad_size_as_id = static_cast<vtkm::Id>(bad_size);
badvalues.reserve(desired_size);
sanevalues.reserve(desired_size);
//construct a random number generator
std::mt19937 rng;
std::uniform_real_distribution<float> range(-1.0f, 1.0f);
// now add in some random numbers to the bad values
for (std::size_t i = 0; i < desired_size - bad_size; ++i)
{
badvalues.push_back(range(rng));
}
for (std::size_t i = 0; i < desired_size; ++i)
{
sanevalues.push_back(range(rng));
}
auto bad = vtkm::cont::make_ArrayHandle(badvalues, vtkm::CopyFlag::On);
auto sane = vtkm::cont::make_ArrayHandle(sanevalues, vtkm::CopyFlag::On);
decltype(sane) result;
vtkm::worklet::DispatcherMapField<TriggerICE> dispatcher;
dispatcher.SetDevice(Device());
dispatcher.Invoke(bad, sane, result);
auto portal = result.ReadPortal();
//the first 6 values should be nan
VTKM_TEST_ASSERT(vtkm::IsNan(portal.Get(0)), "Value should be NaN.");
VTKM_TEST_ASSERT(vtkm::IsNan(portal.Get(1)), "Value should be NaN.");
VTKM_TEST_ASSERT(vtkm::IsNan(portal.Get(2)), "Value should be NaN.");
VTKM_TEST_ASSERT(vtkm::IsNan(portal.Get(3)), "Value should be NaN.");
VTKM_TEST_ASSERT(vtkm::IsNan(portal.Get(4)), "Value should be NaN.");
VTKM_TEST_ASSERT(vtkm::IsNan(portal.Get(5)), "Value should be NaN.");
for (vtkm::Id i = bad_size_as_id; i < desired_size; ++i)
{ //The rest of the values shouldn't be Nan or Inf
auto v = portal.Get(i);
const bool valid = !vtkm::IsNan(v) && !vtkm::IsInf(v);
VTKM_TEST_ASSERT(valid, "value shouldn't be NaN or INF");
}
}
} //namespace
int UnitTestCudaMathEdgeCases(int argc, char* argv[])
{
auto& tracker = vtkm::cont::GetRuntimeDeviceTracker();
tracker.ForceDevice(vtkm::cont::DeviceAdapterTagCuda{});
return vtkm::cont::testing::Testing::Run(
RunEdgeCases<vtkm::cont::DeviceAdapterTagCuda>, argc, argv);
}
| 8da76bf87c130bea5b037caff6e7b5b4ec77bc44.cu | //============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//============================================================================
#include <vtkm/cont/RuntimeDeviceTracker.h>
#include <vtkm/cont/cuda/DeviceAdapterCuda.h>
#include <vtkm/worklet/DispatcherMapField.h>
#include <vtkm/worklet/WorkletMapField.h>
#include <vtkm/cont/testing/Testing.h>
#include <cmath>
#include <iostream>
#include <limits>
#include <random>
#include <vector>
VTKM_THIRDPARTY_PRE_INCLUDE
#include "curand_kernel.h"
VTKM_THIRDPARTY_POST_INCLUDE
namespace
{
struct TriggerICE : public vtkm::worklet::WorkletMapField
{
using ControlSignature = void(FieldIn, FieldIn, FieldOut);
using ExecutionSignature = _3(_1, _2, WorkIndex);
#ifdef VTKM_CUDA_DEVICE_PASS
template <class ValueType>
__device__ ValueType operator()(const ValueType& bad,
const ValueType& sane,
const vtkm::Id sequenceId) const
{
curandState_t state;
//Each thread uses same seed but different sequence numbers
curand_init(42, sequenceId, 0, &state);
int signBad = vtkm::SignBit(bad);
int signGood = vtkm::SignBit(bad);
vtkm::Vec<ValueType, 3> coord = { vtkm::Abs(bad * sane),
bad * sane + (ValueType)signBad,
bad * sane + (ValueType)signGood };
for (int i = 0; i < 10; ++i)
{
for (int j = 0; j < 3; ++j)
{
if (vtkm::IsNan(coord[j]))
{
coord[j] = curand_normal(&state) * 5.0f;
coord[j] = vtkm::Sqrt(vtkm::Dot(coord, coord));
if (coord[j] <= 1.0f)
{
coord[j] += 1.0f;
}
}
if (vtkm::IsInf(coord[j]))
{
coord[j] = curand_normal(&state) * 8.0f;
coord[j] = vtkm::Tan(vtkm::Cos(vtkm::Dot(coord, coord)));
}
}
}
return coord[0] * 4.0f + coord[1] * 4.0f + coord[2] * 4.0f;
}
#else
template <class ValueType>
ValueType operator()(const ValueType& bad, const ValueType& sane, const vtkm::Id sequenceId) const
{
return bad + sane * static_cast<ValueType>(sequenceId);
}
#endif
};
//-----------------------------------------------------------------------------
template <typename Device>
void RunEdgeCases()
{
std::cout << "Testing complicated worklets that can cause NVCC to ICE." << std::endl;
//When running CUDA on unsupported hardware we find that IsInf, IsNan, and
//SignBit can cause the CUDA compiler to crash. This test is a consistent
//way to detect this.
//
//The way it works is we generate all kinds of nasty floating point values
//such as signaling Nan, quiet Nan, other Nans, +Inf, -Inf, -0, +0, a collection of
//denormal numbers, and the min and max float values
//and than a random collection of values from normal float space. We combine this
//array which we will call 'bad' with another input array which we will call 'sane',
//We than execute a worklet that takes values stored in 'bad' and 'sane' that does
//some computation that takes into account the results of IsInf, IsNan, and
//SignBit
const vtkm::Id desired_size = 2048;
std::vector<float> sanevalues;
std::vector<float> badvalues = { std::numeric_limits<float>::signaling_NaN(),
std::numeric_limits<float>::quiet_NaN(),
std::nanf("1"),
std::nanf("4200042"),
std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::infinity() * -1,
0.0f,
-0.0f,
std::numeric_limits<float>::denorm_min(),
std::nextafter(std::numeric_limits<float>::min(), 0.0f),
std::numeric_limits<float>::denorm_min() *
(1 + std::numeric_limits<float>::epsilon()),
std::nextafter(std::numeric_limits<float>::min(), 0.0f) *
(1 + std::numeric_limits<float>::epsilon()),
std::numeric_limits<float>::lowest(),
std::numeric_limits<float>::min(),
std::numeric_limits<float>::max() };
const std::size_t bad_size = badvalues.size();
const vtkm::Id bad_size_as_id = static_cast<vtkm::Id>(bad_size);
badvalues.reserve(desired_size);
sanevalues.reserve(desired_size);
//construct a random number generator
std::mt19937 rng;
std::uniform_real_distribution<float> range(-1.0f, 1.0f);
// now add in some random numbers to the bad values
for (std::size_t i = 0; i < desired_size - bad_size; ++i)
{
badvalues.push_back(range(rng));
}
for (std::size_t i = 0; i < desired_size; ++i)
{
sanevalues.push_back(range(rng));
}
auto bad = vtkm::cont::make_ArrayHandle(badvalues, vtkm::CopyFlag::On);
auto sane = vtkm::cont::make_ArrayHandle(sanevalues, vtkm::CopyFlag::On);
decltype(sane) result;
vtkm::worklet::DispatcherMapField<TriggerICE> dispatcher;
dispatcher.SetDevice(Device());
dispatcher.Invoke(bad, sane, result);
auto portal = result.ReadPortal();
//the first 6 values should be nan
VTKM_TEST_ASSERT(vtkm::IsNan(portal.Get(0)), "Value should be NaN.");
VTKM_TEST_ASSERT(vtkm::IsNan(portal.Get(1)), "Value should be NaN.");
VTKM_TEST_ASSERT(vtkm::IsNan(portal.Get(2)), "Value should be NaN.");
VTKM_TEST_ASSERT(vtkm::IsNan(portal.Get(3)), "Value should be NaN.");
VTKM_TEST_ASSERT(vtkm::IsNan(portal.Get(4)), "Value should be NaN.");
VTKM_TEST_ASSERT(vtkm::IsNan(portal.Get(5)), "Value should be NaN.");
for (vtkm::Id i = bad_size_as_id; i < desired_size; ++i)
{ //The rest of the values shouldn't be Nan or Inf
auto v = portal.Get(i);
const bool valid = !vtkm::IsNan(v) && !vtkm::IsInf(v);
VTKM_TEST_ASSERT(valid, "value shouldn't be NaN or INF");
}
}
} //namespace
int UnitTestCudaMathEdgeCases(int argc, char* argv[])
{
auto& tracker = vtkm::cont::GetRuntimeDeviceTracker();
tracker.ForceDevice(vtkm::cont::DeviceAdapterTagCuda{});
return vtkm::cont::testing::Testing::Run(
RunEdgeCases<vtkm::cont::DeviceAdapterTagCuda>, argc, argv);
}
|
da24fd3fccf35ecfd000143f83cd7e4306c34f80.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "vec_to_mat_kernel.h"
#include "cuMat_config.h"
__global__ void vec_to_mat_kernel (const float * __restrict__ src,
float * __restrict__ dst, int m, int n){
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
if (row < m && col < n){
dst[row * n + col] = src[col];
}
}
void vec_to_mat_kernel_exec(const float *src, float *dst, int m, int n){
/* specified block and grid size */
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((n+block.x-1)/block.x, (m+block.y-1)/block.y);
/* lunch kernel */
hipLaunchKernelGGL(( vec_to_mat_kernel), dim3(grid), dim3(block), 0, 0, src, dst, m, n);
hipDeviceSynchronize();
}
| da24fd3fccf35ecfd000143f83cd7e4306c34f80.cu |
#include "vec_to_mat_kernel.h"
#include "cuMat_config.h"
__global__ void vec_to_mat_kernel (const float * __restrict__ src,
float * __restrict__ dst, int m, int n){
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
if (row < m && col < n){
dst[row * n + col] = src[col];
}
}
void vec_to_mat_kernel_exec(const float *src, float *dst, int m, int n){
/* specified block and grid size */
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((n+block.x-1)/block.x, (m+block.y-1)/block.y);
/* lunch kernel */
vec_to_mat_kernel<<<grid, block>>>(src, dst, m, n);
cudaThreadSynchronize();
}
|
b9ce9df91370697a0a57a7c8afd416227e31a9c6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__
void iterateKernel(int w, int h, int maxIterations, double xOrigin, double yOrigin, double zoomFactor, int* result) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int p = index; p < w * h; p += stride) {
// deliniarize
int i = p / w;
int j = p % w;
// convert to complex number
double cx = xOrigin - (2 / zoomFactor) * (1 - 2 * ((double) j / w));
double cy = yOrigin - (2 / zoomFactor) * (1 - 2 * ((double) (i+(w-h)/2) / w));
// do the iterations
double zx = cx;
double zy = cy;
double tx;
double ty;
bool inMandelbrot = true;
for(int k = 0; k < maxIterations; ++ k)
{
if(zx * zx + zy * zy > 4) {
result[i*w+j] = 255 * (1 - (double) k / maxIterations);
inMandelbrot = false;
break;
}
tx = zx * zx - zy * zy + cx;
ty = 2 * zx * zy + cy;
zx = tx;
zy = ty;
}
if(inMandelbrot)
result[i*w+j] = 0;
}
}
extern "C"
int* iterateGPU(int w, int h, int maxIterations, double xOrigin, double yOrigin, double zoomFactor) {
int* resultOnGPU;
hipMalloc(&resultOnGPU, w * h * sizeof(int));
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, 0);
int blockSize = deviceProp.maxThreadsPerBlock;
int numBlocks = (w * h - 1) / blockSize + 1;
hipLaunchKernelGGL(( iterateKernel), dim3(numBlocks), dim3(blockSize), 0, 0, w, h, maxIterations, xOrigin, yOrigin, zoomFactor, resultOnGPU);
hipDeviceSynchronize();
auto result = (int*) malloc(w * h * sizeof(int));
hipMemcpy(result, resultOnGPU, w * h * sizeof(int), hipMemcpyDeviceToHost);
hipFree(resultOnGPU);
return result;
}
| b9ce9df91370697a0a57a7c8afd416227e31a9c6.cu | __global__
void iterateKernel(int w, int h, int maxIterations, double xOrigin, double yOrigin, double zoomFactor, int* result) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int p = index; p < w * h; p += stride) {
// deliniarize
int i = p / w;
int j = p % w;
// convert to complex number
double cx = xOrigin - (2 / zoomFactor) * (1 - 2 * ((double) j / w));
double cy = yOrigin - (2 / zoomFactor) * (1 - 2 * ((double) (i+(w-h)/2) / w));
// do the iterations
double zx = cx;
double zy = cy;
double tx;
double ty;
bool inMandelbrot = true;
for(int k = 0; k < maxIterations; ++ k)
{
if(zx * zx + zy * zy > 4) {
result[i*w+j] = 255 * (1 - (double) k / maxIterations);
inMandelbrot = false;
break;
}
tx = zx * zx - zy * zy + cx;
ty = 2 * zx * zy + cy;
zx = tx;
zy = ty;
}
if(inMandelbrot)
result[i*w+j] = 0;
}
}
extern "C"
int* iterateGPU(int w, int h, int maxIterations, double xOrigin, double yOrigin, double zoomFactor) {
int* resultOnGPU;
cudaMalloc(&resultOnGPU, w * h * sizeof(int));
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, 0);
int blockSize = deviceProp.maxThreadsPerBlock;
int numBlocks = (w * h - 1) / blockSize + 1;
iterateKernel<<<numBlocks, blockSize>>>(w, h, maxIterations, xOrigin, yOrigin, zoomFactor, resultOnGPU);
cudaDeviceSynchronize();
auto result = (int*) malloc(w * h * sizeof(int));
cudaMemcpy(result, resultOnGPU, w * h * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(resultOnGPU);
return result;
}
|
1211907bf9d6a94ebb6edd4ff25884cf9813190d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//===----- data_sharing.cu - NVPTX OpenMP debug utilities -------- CUDA -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the implementation of data sharing environments/
//
//===----------------------------------------------------------------------===//
#include "omptarget-nvptx.h"
#include "target_impl.h"
#include <stdio.h>
// Warp ID in the CUDA block
INLINE static unsigned getWarpId() { return threadIdx.x / WARPSIZE; }
// Lane ID in the CUDA warp.
INLINE static unsigned getLaneId() { return threadIdx.x % WARPSIZE; }
// Return true if this is the first active thread in the warp.
INLINE static bool IsWarpMasterActiveThread() {
unsigned long long Mask = __ACTIVEMASK();
unsigned long long ShNum = WARPSIZE - (GetThreadIdInBlock() % WARPSIZE);
unsigned long long Sh = Mask << ShNum;
// Truncate Sh to the 32 lower bits
return (unsigned)Sh == 0;
}
// Return true if this is the master thread.
INLINE static bool IsMasterThread(bool isSPMDExecutionMode) {
return !isSPMDExecutionMode && GetMasterThreadID() == GetThreadIdInBlock();
}
/// Return the provided size aligned to the size of a pointer.
INLINE static size_t AlignVal(size_t Val) {
const size_t Align = (size_t)sizeof(void *);
if (Val & (Align - 1)) {
Val += Align;
Val &= ~(Align - 1);
}
return Val;
}
#define DSFLAG 0
#define DSFLAG_INIT 0
#define DSPRINT(_flag, _str, _args...) \
{ \
if (_flag) { \
/*printf("(%d,%d) -> " _str, blockIdx.x, threadIdx.x, _args);*/ \
} \
}
#define DSPRINT0(_flag, _str) \
{ \
if (_flag) { \
/*printf("(%d,%d) -> " _str, blockIdx.x, threadIdx.x);*/ \
} \
}
// Initialize the shared data structures. This is expected to be called for the
// master thread and warp masters. \param RootS: A pointer to the root of the
// data sharing stack. \param InitialDataSize: The initial size of the data in
// the slot.
EXTERN void
__kmpc_initialize_data_sharing_environment(__kmpc_data_sharing_slot *rootS,
size_t InitialDataSize) {
ASSERT0(LT_FUSSY, isRuntimeInitialized(), "Runtime must be initialized.");
DSPRINT0(DSFLAG_INIT,
"Entering __kmpc_initialize_data_sharing_environment\n");
unsigned WID = getWarpId();
DSPRINT(DSFLAG_INIT, "Warp ID: %u\n", WID);
omptarget_nvptx_TeamDescr *teamDescr =
&omptarget_nvptx_threadPrivateContext->TeamContext();
__kmpc_data_sharing_slot *RootS =
teamDescr->RootS(WID, IsMasterThread(isSPMDMode()));
DataSharingState.SlotPtr[WID] = RootS;
DataSharingState.StackPtr[WID] = (void *)&RootS->Data[0];
// We don't need to initialize the frame and active threads.
DSPRINT(DSFLAG_INIT, "Initial data size: %08x \n", (unsigned)InitialDataSize);
DSPRINT(DSFLAG_INIT, "Root slot at: %016llx \n", (unsigned long long)RootS);
DSPRINT(DSFLAG_INIT, "Root slot data-end at: %016llx \n",
(unsigned long long)RootS->DataEnd);
DSPRINT(DSFLAG_INIT, "Root slot next at: %016llx \n",
(unsigned long long)RootS->Next);
DSPRINT(DSFLAG_INIT, "Shared slot ptr at: %016llx \n",
(unsigned long long)DataSharingState.SlotPtr[WID]);
DSPRINT(DSFLAG_INIT, "Shared stack ptr at: %016llx \n",
(unsigned long long)DataSharingState.StackPtr[WID]);
DSPRINT0(DSFLAG_INIT, "Exiting __kmpc_initialize_data_sharing_environment\n");
}
EXTERN void *__kmpc_data_sharing_environment_begin(
__kmpc_data_sharing_slot **SavedSharedSlot, void **SavedSharedStack,
void **SavedSharedFrame, int32_t *SavedActiveThreads,
size_t SharingDataSize, size_t SharingDefaultDataSize,
int16_t IsOMPRuntimeInitialized) {
DSPRINT0(DSFLAG, "Entering __kmpc_data_sharing_environment_begin\n");
// If the runtime has been elided, used __shared__ memory for master-worker
// data sharing.
if (!IsOMPRuntimeInitialized)
return (void *)&DataSharingState;
DSPRINT(DSFLAG, "Data Size %016llx\n", (unsigned long long)SharingDataSize);
DSPRINT(DSFLAG, "Default Data Size %016llx\n",
(unsigned long long)SharingDefaultDataSize);
unsigned WID = getWarpId();
unsigned CurActiveThreads = __ACTIVEMASK();
__kmpc_data_sharing_slot *&SlotP = DataSharingState.SlotPtr[WID];
void *&StackP = DataSharingState.StackPtr[WID];
void * volatile &FrameP = DataSharingState.FramePtr[WID];
int32_t &ActiveT = DataSharingState.ActiveThreads[WID];
DSPRINT0(DSFLAG, "Save current slot/stack values.\n");
// Save the current values.
*SavedSharedSlot = SlotP;
*SavedSharedStack = StackP;
*SavedSharedFrame = FrameP;
*SavedActiveThreads = ActiveT;
DSPRINT(DSFLAG, "Warp ID: %u\n", WID);
DSPRINT(DSFLAG, "Saved slot ptr at: %016llx \n", (unsigned long long)SlotP);
DSPRINT(DSFLAG, "Saved stack ptr at: %016llx \n", (unsigned long long)StackP);
DSPRINT(DSFLAG, "Saved frame ptr at: %016llx \n", (long long)FrameP);
DSPRINT(DSFLAG, "Active threads: %08x \n", (unsigned)ActiveT);
// Only the warp active master needs to grow the stack.
if (IsWarpMasterActiveThread()) {
// Save the current active threads.
ActiveT = CurActiveThreads;
// Make sure we use aligned sizes to avoid rematerialization of data.
SharingDataSize = AlignVal(SharingDataSize);
// FIXME: The default data size can be assumed to be aligned?
SharingDefaultDataSize = AlignVal(SharingDefaultDataSize);
// Check if we have room for the data in the current slot.
const uintptr_t CurrentStartAddress = (uintptr_t)StackP;
const uintptr_t CurrentEndAddress = (uintptr_t)SlotP->DataEnd;
const uintptr_t RequiredEndAddress =
CurrentStartAddress + (uintptr_t)SharingDataSize;
DSPRINT(DSFLAG, "Data Size %016llx\n", (unsigned long long)SharingDataSize);
DSPRINT(DSFLAG, "Default Data Size %016llx\n",
(unsigned long long)SharingDefaultDataSize);
DSPRINT(DSFLAG, "Current Start Address %016llx\n",
(unsigned long long)CurrentStartAddress);
DSPRINT(DSFLAG, "Current End Address %016llx\n",
(unsigned long long)CurrentEndAddress);
DSPRINT(DSFLAG, "Required End Address %016llx\n",
(unsigned long long)RequiredEndAddress);
DSPRINT(DSFLAG, "Active Threads %08x\n", (unsigned)ActiveT);
// If we require a new slot, allocate it and initialize it (or attempt to
// reuse one). Also, set the shared stack and slot pointers to the new
// place. If we do not need to grow the stack, just adapt the stack and
// frame pointers.
if (CurrentEndAddress < RequiredEndAddress) {
size_t NewSize = (SharingDataSize > SharingDefaultDataSize)
? SharingDataSize
: SharingDefaultDataSize;
__kmpc_data_sharing_slot *NewSlot = 0;
// Attempt to reuse an existing slot.
if (__kmpc_data_sharing_slot *ExistingSlot = SlotP->Next) {
uintptr_t ExistingSlotSize = (uintptr_t)ExistingSlot->DataEnd -
(uintptr_t)(&ExistingSlot->Data[0]);
if (ExistingSlotSize >= NewSize) {
DSPRINT(DSFLAG, "Reusing stack slot %016llx\n",
(unsigned long long)ExistingSlot);
NewSlot = ExistingSlot;
} else {
DSPRINT(DSFLAG, "Cleaning up -failed reuse - %016llx\n",
(unsigned long long)SlotP->Next);
free(ExistingSlot);
}
}
if (!NewSlot) {
NewSlot = (__kmpc_data_sharing_slot *)malloc(
sizeof(__kmpc_data_sharing_slot) + NewSize);
DSPRINT(DSFLAG, "New slot allocated %016llx (data size=%016llx)\n",
(unsigned long long)NewSlot, NewSize);
}
NewSlot->Next = 0;
NewSlot->DataEnd = &NewSlot->Data[NewSize];
SlotP->Next = NewSlot;
SlotP = NewSlot;
StackP = &NewSlot->Data[SharingDataSize];
FrameP = &NewSlot->Data[0];
} else {
// Clean up any old slot that we may still have. The slot producers, do
// not eliminate them because that may be used to return data.
if (SlotP->Next) {
DSPRINT(DSFLAG, "Cleaning up - old not required - %016llx\n",
(unsigned long long)SlotP->Next);
free(SlotP->Next);
SlotP->Next = 0;
}
FrameP = StackP;
StackP = (void *)RequiredEndAddress;
}
}
// FIXME: Need to see the impact of doing it here.
__threadfence_block();
DSPRINT0(DSFLAG, "Exiting __kmpc_data_sharing_environment_begin\n");
// All the threads in this warp get the frame they should work with.
return FrameP;
}
EXTERN void __kmpc_data_sharing_environment_end(
__kmpc_data_sharing_slot **SavedSharedSlot, void **SavedSharedStack,
void **SavedSharedFrame, int32_t *SavedActiveThreads,
int32_t IsEntryPoint) {
DSPRINT0(DSFLAG, "Entering __kmpc_data_sharing_environment_end\n");
unsigned WID = getWarpId();
if (IsEntryPoint) {
if (IsWarpMasterActiveThread()) {
DSPRINT0(DSFLAG, "Doing clean up\n");
// The master thread cleans the saved slot, because this is an environment
// only for the master.
__kmpc_data_sharing_slot *S = IsMasterThread(isSPMDMode())
? *SavedSharedSlot
: DataSharingState.SlotPtr[WID];
if (S->Next) {
free(S->Next);
S->Next = 0;
}
}
DSPRINT0(DSFLAG, "Exiting Exiting __kmpc_data_sharing_environment_end\n");
return;
}
int32_t CurActive = __ACTIVEMASK();
// Only the warp master can restore the stack and frame information, and only
// if there are no other threads left behind in this environment (i.e. the
// warp diverged and returns in different places). This only works if we
// assume that threads will converge right after the call site that started
// the environment.
if (IsWarpMasterActiveThread()) {
int32_t &ActiveT = DataSharingState.ActiveThreads[WID];
DSPRINT0(DSFLAG, "Before restoring the stack\n");
// Zero the bits in the mask. If it is still different from zero, then we
// have other threads that will return after the current ones.
ActiveT &= ~CurActive;
DSPRINT(DSFLAG, "Active threads: %08x; New mask: %08x\n",
(unsigned)CurActive, (unsigned)ActiveT);
if (!ActiveT) {
// No other active threads? Great, lets restore the stack.
__kmpc_data_sharing_slot *&SlotP = DataSharingState.SlotPtr[WID];
void *&StackP = DataSharingState.StackPtr[WID];
void * volatile &FrameP = DataSharingState.FramePtr[WID];
SlotP = *SavedSharedSlot;
StackP = *SavedSharedStack;
FrameP = *SavedSharedFrame;
ActiveT = *SavedActiveThreads;
DSPRINT(DSFLAG, "Restored slot ptr at: %016llx \n",
(unsigned long long)SlotP);
DSPRINT(DSFLAG, "Restored stack ptr at: %016llx \n",
(unsigned long long)StackP);
DSPRINT(DSFLAG, "Restored frame ptr at: %016llx \n",
(unsigned long long)FrameP);
DSPRINT(DSFLAG, "Active threads: %08x \n", (unsigned)ActiveT);
}
}
// FIXME: Need to see the impact of doing it here.
__threadfence_block();
DSPRINT0(DSFLAG, "Exiting __kmpc_data_sharing_environment_end\n");
return;
}
EXTERN void *
__kmpc_get_data_sharing_environment_frame(int32_t SourceThreadID,
int16_t IsOMPRuntimeInitialized) {
DSPRINT0(DSFLAG, "Entering __kmpc_get_data_sharing_environment_frame\n");
// If the runtime has been elided, use __shared__ memory for master-worker
// data sharing. We're reusing the statically allocated data structure
// that is used for standard data sharing.
if (!IsOMPRuntimeInitialized)
return (void *)&DataSharingState;
// Get the frame used by the requested thread.
unsigned SourceWID = SourceThreadID / WARPSIZE;
DSPRINT(DSFLAG, "Source warp: %u\n", SourceWID);
void * volatile P = DataSharingState.FramePtr[SourceWID];
DSPRINT0(DSFLAG, "Exiting __kmpc_get_data_sharing_environment_frame\n");
return P;
}
////////////////////////////////////////////////////////////////////////////////
// Runtime functions for trunk data sharing scheme.
////////////////////////////////////////////////////////////////////////////////
INLINE static void data_sharing_init_stack_common() {
ASSERT0(LT_FUSSY, isRuntimeInitialized(), "Runtime must be initialized.");
omptarget_nvptx_TeamDescr *teamDescr =
&omptarget_nvptx_threadPrivateContext->TeamContext();
for (int WID = 0; WID < WARPSIZE; WID++) {
__kmpc_data_sharing_slot *RootS = teamDescr->GetPreallocatedSlotAddr(WID);
DataSharingState.SlotPtr[WID] = RootS;
DataSharingState.StackPtr[WID] = (void *)&RootS->Data[0];
}
}
// Initialize data sharing data structure. This function needs to be called
// once at the beginning of a data sharing context (coincides with the kernel
// initialization). This function is called only by the MASTER thread of each
// team in non-SPMD mode.
EXTERN void __kmpc_data_sharing_init_stack() {
ASSERT0(LT_FUSSY, isRuntimeInitialized(), "Runtime must be initialized.");
// This function initializes the stack pointer with the pointer to the
// statically allocated shared memory slots. The size of a shared memory
// slot is pre-determined to be 256 bytes.
data_sharing_init_stack_common();
omptarget_nvptx_globalArgs.Init();
}
// Initialize data sharing data structure. This function needs to be called
// once at the beginning of a data sharing context (coincides with the kernel
// initialization). This function is called in SPMD mode only.
EXTERN void __kmpc_data_sharing_init_stack_spmd() {
ASSERT0(LT_FUSSY, isRuntimeInitialized(), "Runtime must be initialized.");
// This function initializes the stack pointer with the pointer to the
// statically allocated shared memory slots. The size of a shared memory
// slot is pre-determined to be 256 bytes.
if (threadIdx.x == 0)
data_sharing_init_stack_common();
__threadfence_block();
}
INLINE static void* data_sharing_push_stack_common(size_t PushSize) {
ASSERT0(LT_FUSSY, isRuntimeInitialized(), "Expected initialized runtime.");
// Only warp active master threads manage the stack.
bool IsWarpMaster = (GetThreadIdInBlock() % WARPSIZE) == 0;
// Add worst-case padding to DataSize so that future stack allocations are
// correctly aligned.
const size_t Alignment = 8;
PushSize = (PushSize + (Alignment - 1)) / Alignment * Alignment;
// Frame pointer must be visible to all workers in the same warp.
const unsigned WID = getWarpId();
void *FrameP = 0;
int32_t CurActive = __ACTIVEMASK();
if (IsWarpMaster) {
// SlotP will point to either the shared memory slot or an existing
// global memory slot.
__kmpc_data_sharing_slot *&SlotP = DataSharingState.SlotPtr[WID];
void *&StackP = DataSharingState.StackPtr[WID];
// Check if we have room for the data in the current slot.
const uintptr_t StartAddress = (uintptr_t)StackP;
const uintptr_t EndAddress = (uintptr_t)SlotP->DataEnd;
const uintptr_t RequestedEndAddress = StartAddress + (uintptr_t)PushSize;
// If we requested more data than there is room for in the rest
// of the slot then we need to either re-use the next slot, if one exists,
// or create a new slot.
if (EndAddress < RequestedEndAddress) {
__kmpc_data_sharing_slot *NewSlot = 0;
size_t NewSize = PushSize;
// Allocate at least the default size for each type of slot.
// Master is a special case and even though there is only one thread,
// it can share more things with the workers. For uniformity, it uses
// the full size of a worker warp slot.
size_t DefaultSlotSize = DS_Worker_Warp_Slot_Size;
if (DefaultSlotSize > NewSize)
NewSize = DefaultSlotSize;
NewSlot = (__kmpc_data_sharing_slot *) SafeMalloc(
sizeof(__kmpc_data_sharing_slot) + NewSize,
"Global memory slot allocation.");
NewSlot->Next = 0;
NewSlot->Prev = SlotP;
NewSlot->PrevSlotStackPtr = StackP;
NewSlot->DataEnd = &NewSlot->Data[0] + NewSize;
// Make previous slot point to the newly allocated slot.
SlotP->Next = NewSlot;
// The current slot becomes the new slot.
SlotP = NewSlot;
// The stack pointer always points to the next free stack frame.
StackP = &NewSlot->Data[0] + PushSize;
// The frame pointer always points to the beginning of the frame.
FrameP = DataSharingState.FramePtr[WID] = &NewSlot->Data[0];
} else {
// Add the data chunk to the current slot. The frame pointer is set to
// point to the start of the new frame held in StackP.
FrameP = DataSharingState.FramePtr[WID] = StackP;
// Reset stack pointer to the requested address.
StackP = (void *)RequestedEndAddress;
}
}
// Get address from lane 0.
int *FP = (int *)&FrameP;
FP[0] = __kmpc_impl_shfl_sync(CurActive, FP[0], 0);
if (sizeof(FrameP) == 8)
FP[1] = __kmpc_impl_shfl_sync(CurActive, FP[1], 0);
return FrameP;
}
EXTERN void *__kmpc_data_sharing_coalesced_push_stack(size_t DataSize,
int16_t UseSharedMemory) {
return data_sharing_push_stack_common(DataSize);
}
// Called at the time of the kernel initialization. This is used to initilize
// the list of references to shared variables and to pre-allocate global storage
// for holding the globalized variables.
//
// By default the globalized variables are stored in global memory. If the
// UseSharedMemory is set to true, the runtime will attempt to use shared memory
// as long as the size requested fits the pre-allocated size.
EXTERN void *__kmpc_data_sharing_push_stack(size_t DataSize,
int16_t UseSharedMemory) {
// Compute the total memory footprint of the requested data.
// The master thread requires a stack only for itself. A worker
// thread (which at this point is a warp master) will require
// space for the variables of each thread in the warp,
// i.e. one DataSize chunk per warp lane.
// TODO: change WARPSIZE to the number of active threads in the warp.
size_t PushSize = (isRuntimeUninitialized() || IsMasterThread(isSPMDMode()))
? DataSize
: WARPSIZE * DataSize;
// Compute the start address of the frame of each thread in the warp.
uintptr_t FrameStartAddress =
(uintptr_t) data_sharing_push_stack_common(PushSize);
FrameStartAddress += (uintptr_t) (getLaneId() * DataSize);
return (void *)FrameStartAddress;
}
// Pop the stack and free any memory which can be reclaimed.
//
// When the pop operation removes the last global memory slot,
// reclaim all outstanding global memory slots since it is
// likely we have reached the end of the kernel.
EXTERN void __kmpc_data_sharing_pop_stack(void *FrameStart) {
ASSERT0(LT_FUSSY, isRuntimeInitialized(), "Expected initialized runtime.");
__threadfence_block();
if (GetThreadIdInBlock() % WARPSIZE == 0) {
unsigned WID = getWarpId();
// Current slot
__kmpc_data_sharing_slot *&SlotP = DataSharingState.SlotPtr[WID];
// Pointer to next available stack.
void *&StackP = DataSharingState.StackPtr[WID];
// Pop the frame.
StackP = FrameStart;
// If the current slot is empty, we need to free the slot after the
// pop.
bool SlotEmpty = (StackP == &SlotP->Data[0]);
if (SlotEmpty && SlotP->Prev) {
// Before removing the slot we need to reset StackP.
StackP = SlotP->PrevSlotStackPtr;
// Remove the slot.
SlotP = SlotP->Prev;
SafeFree(SlotP->Next, "Free slot.");
SlotP->Next = 0;
}
}
}
// Begin a data sharing context. Maintain a list of references to shared
// variables. This list of references to shared variables will be passed
// to one or more threads.
// In L0 data sharing this is called by master thread.
// In L1 data sharing this is called by active warp master thread.
EXTERN void __kmpc_begin_sharing_variables(void ***GlobalArgs, size_t nArgs) {
omptarget_nvptx_globalArgs.EnsureSize(nArgs);
*GlobalArgs = omptarget_nvptx_globalArgs.GetArgs();
}
// End a data sharing context. There is no need to have a list of refs
// to shared variables because the context in which those variables were
// shared has now ended. This should clean-up the list of references only
// without affecting the actual global storage of the variables.
// In L0 data sharing this is called by master thread.
// In L1 data sharing this is called by active warp master thread.
EXTERN void __kmpc_end_sharing_variables() {
omptarget_nvptx_globalArgs.DeInit();
}
// This function will return a list of references to global variables. This
// is how the workers will get a reference to the globalized variable. The
// members of this list will be passed to the outlined parallel function
// preserving the order.
// Called by all workers.
EXTERN void __kmpc_get_shared_variables(void ***GlobalArgs) {
*GlobalArgs = omptarget_nvptx_globalArgs.GetArgs();
}
// This function is used to init static memory manager. This manager is used to
// manage statically allocated global memory. This memory is allocated by the
// compiler and used to correctly implement globalization of the variables in
// target, teams and distribute regions.
EXTERN void __kmpc_get_team_static_memory(int16_t isSPMDExecutionMode,
const void *buf, size_t size,
int16_t is_shared,
const void **frame) {
if (is_shared) {
*frame = buf;
return;
}
if (isSPMDExecutionMode) {
if (GetThreadIdInBlock() == 0) {
*frame = omptarget_nvptx_simpleMemoryManager.Acquire(buf, size);
}
// FIXME: use __syncthreads instead when the function copy is fixed in LLVM.
__SYNCTHREADS();
return;
}
ASSERT0(LT_FUSSY, GetThreadIdInBlock() == GetMasterThreadID(),
"Must be called only in the target master thread.");
*frame = omptarget_nvptx_simpleMemoryManager.Acquire(buf, size);
__threadfence();
}
EXTERN void __kmpc_restore_team_static_memory(int16_t isSPMDExecutionMode,
int16_t is_shared) {
if (is_shared)
return;
if (isSPMDExecutionMode) {
// FIXME: use __syncthreads instead when the function copy is fixed in LLVM.
__SYNCTHREADS();
if (GetThreadIdInBlock() == 0) {
omptarget_nvptx_simpleMemoryManager.Release();
}
return;
}
__threadfence();
ASSERT0(LT_FUSSY, GetThreadIdInBlock() == GetMasterThreadID(),
"Must be called only in the target master thread.");
omptarget_nvptx_simpleMemoryManager.Release();
}
| 1211907bf9d6a94ebb6edd4ff25884cf9813190d.cu | //===----- data_sharing.cu - NVPTX OpenMP debug utilities -------- CUDA -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the implementation of data sharing environments/
//
//===----------------------------------------------------------------------===//
#include "omptarget-nvptx.h"
#include "target_impl.h"
#include <stdio.h>
// Warp ID in the CUDA block
INLINE static unsigned getWarpId() { return threadIdx.x / WARPSIZE; }
// Lane ID in the CUDA warp.
INLINE static unsigned getLaneId() { return threadIdx.x % WARPSIZE; }
// Return true if this is the first active thread in the warp.
INLINE static bool IsWarpMasterActiveThread() {
unsigned long long Mask = __ACTIVEMASK();
unsigned long long ShNum = WARPSIZE - (GetThreadIdInBlock() % WARPSIZE);
unsigned long long Sh = Mask << ShNum;
// Truncate Sh to the 32 lower bits
return (unsigned)Sh == 0;
}
// Return true if this is the master thread.
INLINE static bool IsMasterThread(bool isSPMDExecutionMode) {
return !isSPMDExecutionMode && GetMasterThreadID() == GetThreadIdInBlock();
}
/// Return the provided size aligned to the size of a pointer.
INLINE static size_t AlignVal(size_t Val) {
const size_t Align = (size_t)sizeof(void *);
if (Val & (Align - 1)) {
Val += Align;
Val &= ~(Align - 1);
}
return Val;
}
#define DSFLAG 0
#define DSFLAG_INIT 0
#define DSPRINT(_flag, _str, _args...) \
{ \
if (_flag) { \
/*printf("(%d,%d) -> " _str, blockIdx.x, threadIdx.x, _args);*/ \
} \
}
#define DSPRINT0(_flag, _str) \
{ \
if (_flag) { \
/*printf("(%d,%d) -> " _str, blockIdx.x, threadIdx.x);*/ \
} \
}
// Initialize the shared data structures. This is expected to be called for the
// master thread and warp masters. \param RootS: A pointer to the root of the
// data sharing stack. \param InitialDataSize: The initial size of the data in
// the slot.
EXTERN void
__kmpc_initialize_data_sharing_environment(__kmpc_data_sharing_slot *rootS,
size_t InitialDataSize) {
ASSERT0(LT_FUSSY, isRuntimeInitialized(), "Runtime must be initialized.");
DSPRINT0(DSFLAG_INIT,
"Entering __kmpc_initialize_data_sharing_environment\n");
unsigned WID = getWarpId();
DSPRINT(DSFLAG_INIT, "Warp ID: %u\n", WID);
omptarget_nvptx_TeamDescr *teamDescr =
&omptarget_nvptx_threadPrivateContext->TeamContext();
__kmpc_data_sharing_slot *RootS =
teamDescr->RootS(WID, IsMasterThread(isSPMDMode()));
DataSharingState.SlotPtr[WID] = RootS;
DataSharingState.StackPtr[WID] = (void *)&RootS->Data[0];
// We don't need to initialize the frame and active threads.
DSPRINT(DSFLAG_INIT, "Initial data size: %08x \n", (unsigned)InitialDataSize);
DSPRINT(DSFLAG_INIT, "Root slot at: %016llx \n", (unsigned long long)RootS);
DSPRINT(DSFLAG_INIT, "Root slot data-end at: %016llx \n",
(unsigned long long)RootS->DataEnd);
DSPRINT(DSFLAG_INIT, "Root slot next at: %016llx \n",
(unsigned long long)RootS->Next);
DSPRINT(DSFLAG_INIT, "Shared slot ptr at: %016llx \n",
(unsigned long long)DataSharingState.SlotPtr[WID]);
DSPRINT(DSFLAG_INIT, "Shared stack ptr at: %016llx \n",
(unsigned long long)DataSharingState.StackPtr[WID]);
DSPRINT0(DSFLAG_INIT, "Exiting __kmpc_initialize_data_sharing_environment\n");
}
EXTERN void *__kmpc_data_sharing_environment_begin(
__kmpc_data_sharing_slot **SavedSharedSlot, void **SavedSharedStack,
void **SavedSharedFrame, int32_t *SavedActiveThreads,
size_t SharingDataSize, size_t SharingDefaultDataSize,
int16_t IsOMPRuntimeInitialized) {
DSPRINT0(DSFLAG, "Entering __kmpc_data_sharing_environment_begin\n");
// If the runtime has been elided, used __shared__ memory for master-worker
// data sharing.
if (!IsOMPRuntimeInitialized)
return (void *)&DataSharingState;
DSPRINT(DSFLAG, "Data Size %016llx\n", (unsigned long long)SharingDataSize);
DSPRINT(DSFLAG, "Default Data Size %016llx\n",
(unsigned long long)SharingDefaultDataSize);
unsigned WID = getWarpId();
unsigned CurActiveThreads = __ACTIVEMASK();
__kmpc_data_sharing_slot *&SlotP = DataSharingState.SlotPtr[WID];
void *&StackP = DataSharingState.StackPtr[WID];
void * volatile &FrameP = DataSharingState.FramePtr[WID];
int32_t &ActiveT = DataSharingState.ActiveThreads[WID];
DSPRINT0(DSFLAG, "Save current slot/stack values.\n");
// Save the current values.
*SavedSharedSlot = SlotP;
*SavedSharedStack = StackP;
*SavedSharedFrame = FrameP;
*SavedActiveThreads = ActiveT;
DSPRINT(DSFLAG, "Warp ID: %u\n", WID);
DSPRINT(DSFLAG, "Saved slot ptr at: %016llx \n", (unsigned long long)SlotP);
DSPRINT(DSFLAG, "Saved stack ptr at: %016llx \n", (unsigned long long)StackP);
DSPRINT(DSFLAG, "Saved frame ptr at: %016llx \n", (long long)FrameP);
DSPRINT(DSFLAG, "Active threads: %08x \n", (unsigned)ActiveT);
// Only the warp active master needs to grow the stack.
if (IsWarpMasterActiveThread()) {
// Save the current active threads.
ActiveT = CurActiveThreads;
// Make sure we use aligned sizes to avoid rematerialization of data.
SharingDataSize = AlignVal(SharingDataSize);
// FIXME: The default data size can be assumed to be aligned?
SharingDefaultDataSize = AlignVal(SharingDefaultDataSize);
// Check if we have room for the data in the current slot.
const uintptr_t CurrentStartAddress = (uintptr_t)StackP;
const uintptr_t CurrentEndAddress = (uintptr_t)SlotP->DataEnd;
const uintptr_t RequiredEndAddress =
CurrentStartAddress + (uintptr_t)SharingDataSize;
DSPRINT(DSFLAG, "Data Size %016llx\n", (unsigned long long)SharingDataSize);
DSPRINT(DSFLAG, "Default Data Size %016llx\n",
(unsigned long long)SharingDefaultDataSize);
DSPRINT(DSFLAG, "Current Start Address %016llx\n",
(unsigned long long)CurrentStartAddress);
DSPRINT(DSFLAG, "Current End Address %016llx\n",
(unsigned long long)CurrentEndAddress);
DSPRINT(DSFLAG, "Required End Address %016llx\n",
(unsigned long long)RequiredEndAddress);
DSPRINT(DSFLAG, "Active Threads %08x\n", (unsigned)ActiveT);
// If we require a new slot, allocate it and initialize it (or attempt to
// reuse one). Also, set the shared stack and slot pointers to the new
// place. If we do not need to grow the stack, just adapt the stack and
// frame pointers.
if (CurrentEndAddress < RequiredEndAddress) {
size_t NewSize = (SharingDataSize > SharingDefaultDataSize)
? SharingDataSize
: SharingDefaultDataSize;
__kmpc_data_sharing_slot *NewSlot = 0;
// Attempt to reuse an existing slot.
if (__kmpc_data_sharing_slot *ExistingSlot = SlotP->Next) {
uintptr_t ExistingSlotSize = (uintptr_t)ExistingSlot->DataEnd -
(uintptr_t)(&ExistingSlot->Data[0]);
if (ExistingSlotSize >= NewSize) {
DSPRINT(DSFLAG, "Reusing stack slot %016llx\n",
(unsigned long long)ExistingSlot);
NewSlot = ExistingSlot;
} else {
DSPRINT(DSFLAG, "Cleaning up -failed reuse - %016llx\n",
(unsigned long long)SlotP->Next);
free(ExistingSlot);
}
}
if (!NewSlot) {
NewSlot = (__kmpc_data_sharing_slot *)malloc(
sizeof(__kmpc_data_sharing_slot) + NewSize);
DSPRINT(DSFLAG, "New slot allocated %016llx (data size=%016llx)\n",
(unsigned long long)NewSlot, NewSize);
}
NewSlot->Next = 0;
NewSlot->DataEnd = &NewSlot->Data[NewSize];
SlotP->Next = NewSlot;
SlotP = NewSlot;
StackP = &NewSlot->Data[SharingDataSize];
FrameP = &NewSlot->Data[0];
} else {
// Clean up any old slot that we may still have. The slot producers, do
// not eliminate them because that may be used to return data.
if (SlotP->Next) {
DSPRINT(DSFLAG, "Cleaning up - old not required - %016llx\n",
(unsigned long long)SlotP->Next);
free(SlotP->Next);
SlotP->Next = 0;
}
FrameP = StackP;
StackP = (void *)RequiredEndAddress;
}
}
// FIXME: Need to see the impact of doing it here.
__threadfence_block();
DSPRINT0(DSFLAG, "Exiting __kmpc_data_sharing_environment_begin\n");
// All the threads in this warp get the frame they should work with.
return FrameP;
}
EXTERN void __kmpc_data_sharing_environment_end(
__kmpc_data_sharing_slot **SavedSharedSlot, void **SavedSharedStack,
void **SavedSharedFrame, int32_t *SavedActiveThreads,
int32_t IsEntryPoint) {
DSPRINT0(DSFLAG, "Entering __kmpc_data_sharing_environment_end\n");
unsigned WID = getWarpId();
if (IsEntryPoint) {
if (IsWarpMasterActiveThread()) {
DSPRINT0(DSFLAG, "Doing clean up\n");
// The master thread cleans the saved slot, because this is an environment
// only for the master.
__kmpc_data_sharing_slot *S = IsMasterThread(isSPMDMode())
? *SavedSharedSlot
: DataSharingState.SlotPtr[WID];
if (S->Next) {
free(S->Next);
S->Next = 0;
}
}
DSPRINT0(DSFLAG, "Exiting Exiting __kmpc_data_sharing_environment_end\n");
return;
}
int32_t CurActive = __ACTIVEMASK();
// Only the warp master can restore the stack and frame information, and only
// if there are no other threads left behind in this environment (i.e. the
// warp diverged and returns in different places). This only works if we
// assume that threads will converge right after the call site that started
// the environment.
if (IsWarpMasterActiveThread()) {
int32_t &ActiveT = DataSharingState.ActiveThreads[WID];
DSPRINT0(DSFLAG, "Before restoring the stack\n");
// Zero the bits in the mask. If it is still different from zero, then we
// have other threads that will return after the current ones.
ActiveT &= ~CurActive;
DSPRINT(DSFLAG, "Active threads: %08x; New mask: %08x\n",
(unsigned)CurActive, (unsigned)ActiveT);
if (!ActiveT) {
// No other active threads? Great, lets restore the stack.
__kmpc_data_sharing_slot *&SlotP = DataSharingState.SlotPtr[WID];
void *&StackP = DataSharingState.StackPtr[WID];
void * volatile &FrameP = DataSharingState.FramePtr[WID];
SlotP = *SavedSharedSlot;
StackP = *SavedSharedStack;
FrameP = *SavedSharedFrame;
ActiveT = *SavedActiveThreads;
DSPRINT(DSFLAG, "Restored slot ptr at: %016llx \n",
(unsigned long long)SlotP);
DSPRINT(DSFLAG, "Restored stack ptr at: %016llx \n",
(unsigned long long)StackP);
DSPRINT(DSFLAG, "Restored frame ptr at: %016llx \n",
(unsigned long long)FrameP);
DSPRINT(DSFLAG, "Active threads: %08x \n", (unsigned)ActiveT);
}
}
// FIXME: Need to see the impact of doing it here.
__threadfence_block();
DSPRINT0(DSFLAG, "Exiting __kmpc_data_sharing_environment_end\n");
return;
}
EXTERN void *
__kmpc_get_data_sharing_environment_frame(int32_t SourceThreadID,
int16_t IsOMPRuntimeInitialized) {
DSPRINT0(DSFLAG, "Entering __kmpc_get_data_sharing_environment_frame\n");
// If the runtime has been elided, use __shared__ memory for master-worker
// data sharing. We're reusing the statically allocated data structure
// that is used for standard data sharing.
if (!IsOMPRuntimeInitialized)
return (void *)&DataSharingState;
// Get the frame used by the requested thread.
unsigned SourceWID = SourceThreadID / WARPSIZE;
DSPRINT(DSFLAG, "Source warp: %u\n", SourceWID);
void * volatile P = DataSharingState.FramePtr[SourceWID];
DSPRINT0(DSFLAG, "Exiting __kmpc_get_data_sharing_environment_frame\n");
return P;
}
////////////////////////////////////////////////////////////////////////////////
// Runtime functions for trunk data sharing scheme.
////////////////////////////////////////////////////////////////////////////////
INLINE static void data_sharing_init_stack_common() {
ASSERT0(LT_FUSSY, isRuntimeInitialized(), "Runtime must be initialized.");
omptarget_nvptx_TeamDescr *teamDescr =
&omptarget_nvptx_threadPrivateContext->TeamContext();
for (int WID = 0; WID < WARPSIZE; WID++) {
__kmpc_data_sharing_slot *RootS = teamDescr->GetPreallocatedSlotAddr(WID);
DataSharingState.SlotPtr[WID] = RootS;
DataSharingState.StackPtr[WID] = (void *)&RootS->Data[0];
}
}
// Initialize data sharing data structure. This function needs to be called
// once at the beginning of a data sharing context (coincides with the kernel
// initialization). This function is called only by the MASTER thread of each
// team in non-SPMD mode.
EXTERN void __kmpc_data_sharing_init_stack() {
ASSERT0(LT_FUSSY, isRuntimeInitialized(), "Runtime must be initialized.");
// This function initializes the stack pointer with the pointer to the
// statically allocated shared memory slots. The size of a shared memory
// slot is pre-determined to be 256 bytes.
data_sharing_init_stack_common();
omptarget_nvptx_globalArgs.Init();
}
// Initialize data sharing data structure. This function needs to be called
// once at the beginning of a data sharing context (coincides with the kernel
// initialization). This function is called in SPMD mode only.
EXTERN void __kmpc_data_sharing_init_stack_spmd() {
ASSERT0(LT_FUSSY, isRuntimeInitialized(), "Runtime must be initialized.");
// This function initializes the stack pointer with the pointer to the
// statically allocated shared memory slots. The size of a shared memory
// slot is pre-determined to be 256 bytes.
if (threadIdx.x == 0)
data_sharing_init_stack_common();
__threadfence_block();
}
INLINE static void* data_sharing_push_stack_common(size_t PushSize) {
ASSERT0(LT_FUSSY, isRuntimeInitialized(), "Expected initialized runtime.");
// Only warp active master threads manage the stack.
bool IsWarpMaster = (GetThreadIdInBlock() % WARPSIZE) == 0;
// Add worst-case padding to DataSize so that future stack allocations are
// correctly aligned.
const size_t Alignment = 8;
PushSize = (PushSize + (Alignment - 1)) / Alignment * Alignment;
// Frame pointer must be visible to all workers in the same warp.
const unsigned WID = getWarpId();
void *FrameP = 0;
int32_t CurActive = __ACTIVEMASK();
if (IsWarpMaster) {
// SlotP will point to either the shared memory slot or an existing
// global memory slot.
__kmpc_data_sharing_slot *&SlotP = DataSharingState.SlotPtr[WID];
void *&StackP = DataSharingState.StackPtr[WID];
// Check if we have room for the data in the current slot.
const uintptr_t StartAddress = (uintptr_t)StackP;
const uintptr_t EndAddress = (uintptr_t)SlotP->DataEnd;
const uintptr_t RequestedEndAddress = StartAddress + (uintptr_t)PushSize;
// If we requested more data than there is room for in the rest
// of the slot then we need to either re-use the next slot, if one exists,
// or create a new slot.
if (EndAddress < RequestedEndAddress) {
__kmpc_data_sharing_slot *NewSlot = 0;
size_t NewSize = PushSize;
// Allocate at least the default size for each type of slot.
// Master is a special case and even though there is only one thread,
// it can share more things with the workers. For uniformity, it uses
// the full size of a worker warp slot.
size_t DefaultSlotSize = DS_Worker_Warp_Slot_Size;
if (DefaultSlotSize > NewSize)
NewSize = DefaultSlotSize;
NewSlot = (__kmpc_data_sharing_slot *) SafeMalloc(
sizeof(__kmpc_data_sharing_slot) + NewSize,
"Global memory slot allocation.");
NewSlot->Next = 0;
NewSlot->Prev = SlotP;
NewSlot->PrevSlotStackPtr = StackP;
NewSlot->DataEnd = &NewSlot->Data[0] + NewSize;
// Make previous slot point to the newly allocated slot.
SlotP->Next = NewSlot;
// The current slot becomes the new slot.
SlotP = NewSlot;
// The stack pointer always points to the next free stack frame.
StackP = &NewSlot->Data[0] + PushSize;
// The frame pointer always points to the beginning of the frame.
FrameP = DataSharingState.FramePtr[WID] = &NewSlot->Data[0];
} else {
// Add the data chunk to the current slot. The frame pointer is set to
// point to the start of the new frame held in StackP.
FrameP = DataSharingState.FramePtr[WID] = StackP;
// Reset stack pointer to the requested address.
StackP = (void *)RequestedEndAddress;
}
}
// Get address from lane 0.
int *FP = (int *)&FrameP;
FP[0] = __kmpc_impl_shfl_sync(CurActive, FP[0], 0);
if (sizeof(FrameP) == 8)
FP[1] = __kmpc_impl_shfl_sync(CurActive, FP[1], 0);
return FrameP;
}
EXTERN void *__kmpc_data_sharing_coalesced_push_stack(size_t DataSize,
int16_t UseSharedMemory) {
return data_sharing_push_stack_common(DataSize);
}
// Called at the time of the kernel initialization. This is used to initilize
// the list of references to shared variables and to pre-allocate global storage
// for holding the globalized variables.
//
// By default the globalized variables are stored in global memory. If the
// UseSharedMemory is set to true, the runtime will attempt to use shared memory
// as long as the size requested fits the pre-allocated size.
EXTERN void *__kmpc_data_sharing_push_stack(size_t DataSize,
int16_t UseSharedMemory) {
// Compute the total memory footprint of the requested data.
// The master thread requires a stack only for itself. A worker
// thread (which at this point is a warp master) will require
// space for the variables of each thread in the warp,
// i.e. one DataSize chunk per warp lane.
// TODO: change WARPSIZE to the number of active threads in the warp.
size_t PushSize = (isRuntimeUninitialized() || IsMasterThread(isSPMDMode()))
? DataSize
: WARPSIZE * DataSize;
// Compute the start address of the frame of each thread in the warp.
uintptr_t FrameStartAddress =
(uintptr_t) data_sharing_push_stack_common(PushSize);
FrameStartAddress += (uintptr_t) (getLaneId() * DataSize);
return (void *)FrameStartAddress;
}
// Pop the stack and free any memory which can be reclaimed.
//
// When the pop operation removes the last global memory slot,
// reclaim all outstanding global memory slots since it is
// likely we have reached the end of the kernel.
EXTERN void __kmpc_data_sharing_pop_stack(void *FrameStart) {
ASSERT0(LT_FUSSY, isRuntimeInitialized(), "Expected initialized runtime.");
__threadfence_block();
if (GetThreadIdInBlock() % WARPSIZE == 0) {
unsigned WID = getWarpId();
// Current slot
__kmpc_data_sharing_slot *&SlotP = DataSharingState.SlotPtr[WID];
// Pointer to next available stack.
void *&StackP = DataSharingState.StackPtr[WID];
// Pop the frame.
StackP = FrameStart;
// If the current slot is empty, we need to free the slot after the
// pop.
bool SlotEmpty = (StackP == &SlotP->Data[0]);
if (SlotEmpty && SlotP->Prev) {
// Before removing the slot we need to reset StackP.
StackP = SlotP->PrevSlotStackPtr;
// Remove the slot.
SlotP = SlotP->Prev;
SafeFree(SlotP->Next, "Free slot.");
SlotP->Next = 0;
}
}
}
// Begin a data sharing context. Maintain a list of references to shared
// variables. This list of references to shared variables will be passed
// to one or more threads.
// In L0 data sharing this is called by master thread.
// In L1 data sharing this is called by active warp master thread.
EXTERN void __kmpc_begin_sharing_variables(void ***GlobalArgs, size_t nArgs) {
omptarget_nvptx_globalArgs.EnsureSize(nArgs);
*GlobalArgs = omptarget_nvptx_globalArgs.GetArgs();
}
// End a data sharing context. There is no need to have a list of refs
// to shared variables because the context in which those variables were
// shared has now ended. This should clean-up the list of references only
// without affecting the actual global storage of the variables.
// In L0 data sharing this is called by master thread.
// In L1 data sharing this is called by active warp master thread.
EXTERN void __kmpc_end_sharing_variables() {
omptarget_nvptx_globalArgs.DeInit();
}
// This function will return a list of references to global variables. This
// is how the workers will get a reference to the globalized variable. The
// members of this list will be passed to the outlined parallel function
// preserving the order.
// Called by all workers.
EXTERN void __kmpc_get_shared_variables(void ***GlobalArgs) {
*GlobalArgs = omptarget_nvptx_globalArgs.GetArgs();
}
// This function is used to init static memory manager. This manager is used to
// manage statically allocated global memory. This memory is allocated by the
// compiler and used to correctly implement globalization of the variables in
// target, teams and distribute regions.
EXTERN void __kmpc_get_team_static_memory(int16_t isSPMDExecutionMode,
const void *buf, size_t size,
int16_t is_shared,
const void **frame) {
if (is_shared) {
*frame = buf;
return;
}
if (isSPMDExecutionMode) {
if (GetThreadIdInBlock() == 0) {
*frame = omptarget_nvptx_simpleMemoryManager.Acquire(buf, size);
}
// FIXME: use __syncthreads instead when the function copy is fixed in LLVM.
__SYNCTHREADS();
return;
}
ASSERT0(LT_FUSSY, GetThreadIdInBlock() == GetMasterThreadID(),
"Must be called only in the target master thread.");
*frame = omptarget_nvptx_simpleMemoryManager.Acquire(buf, size);
__threadfence();
}
EXTERN void __kmpc_restore_team_static_memory(int16_t isSPMDExecutionMode,
int16_t is_shared) {
if (is_shared)
return;
if (isSPMDExecutionMode) {
// FIXME: use __syncthreads instead when the function copy is fixed in LLVM.
__SYNCTHREADS();
if (GetThreadIdInBlock() == 0) {
omptarget_nvptx_simpleMemoryManager.Release();
}
return;
}
__threadfence();
ASSERT0(LT_FUSSY, GetThreadIdInBlock() == GetMasterThreadID(),
"Must be called only in the target master thread.");
omptarget_nvptx_simpleMemoryManager.Release();
}
|
b92cf77654ec546c63746cb95f15a02f84737dbb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Open sourced multi-head attention
**/
#include "fastertransformer/open_decoder.h"
#include "hipcub/hipcub.hpp"
namespace fastertransformer{
const int WARP_SIZE = 32;
const bool ATTENION_OPT = true;
const int ATTENTION_BLOCK_SIZE = 256;
///////////////////////////////////////////////////////////////////////////////////////////////////
template <int HALF_ELEMENTS_PER_WARP_LOAD>
using Copy_half_t =
typename std::conditional<HALF_ELEMENTS_PER_WARP_LOAD == 32, half,
typename std::conditional<HALF_ELEMENTS_PER_WARP_LOAD == 64, int,
typename std::conditional<HALF_ELEMENTS_PER_WARP_LOAD == 128, int2, int4
>::type
>::type
>::type;
template <typename T, int ELEMENTS_PER_WARP_LOAD>
using Copy_t = Copy_half_t<sizeof(T) / sizeof(half) * ELEMENTS_PER_WARP_LOAD>;
///////////////////////////////////////////////////////////////////////////////////////////////////
/**
masked multi-head attention
*/
#define FINAL_MASK 0xffffffff
template <typename T>
__inline__ __device__
T warpReduceSum(T val)
{
for(int mask = 16; mask > 0; mask >>= 1)
val += __shfl_xor_sync(FINAL_MASK, val, mask, 32);
return val;
}
/* Calculate the sum of all elements in a block */
template <typename T>
__inline__ __device__
T blockReduceSum(T val)
{
static __shared__ T shared[32];
// __shared__ T shared[32];
int lane = threadIdx.x & 0x1f;
int wid = threadIdx.x >> 5;
val = warpReduceSum<T>(val);
if(lane == 0)
shared[wid] = val;
__syncthreads();
val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : (T)(0.0f);
val = warpReduceSum<T>(val);
return val;
}
/* gelu activation */
template <typename T>
__inline__ __device__
T gelu(T x)
{
float cdf = 0.5f * (1.0f + tanhf((0.7978845608028654f * (x + 0.044715f * x * x * x))));
return x * cdf;
}
/* gelu activation for half2 */
template <>
__inline__ __device__
half2 gelu(half2 val)
{
half2 val_pow3 = __hmul2(val, __hmul2(val, val));
float2 tmp_pow = __half22float2(val_pow3);
float2 tmp = __half22float2(val);
tmp.x = 0.5f * (1.0f + tanhf((0.7978845608028654f * (tmp.x + 0.044715f * tmp_pow.x))));
tmp.y = 0.5f * (1.0f + tanhf((0.7978845608028654f * (tmp.y + 0.044715f * tmp_pow.y))));
return __hmul2(val, __float22half2_rn(tmp));
}
template <typename T>
__global__
void add_bias_gelu(T* out, const T* bias, int m, int n)
{
for(int id = blockIdx.x * blockDim.x + threadIdx.x; id < m * n; id += blockDim.x * gridDim.x)
{
T reg_bias = __ldg(&bias[id % n]);
T val = out[id] + reg_bias;
out[id] = (T)(gelu(val));
}
}
template <>
__global__
void add_bias_gelu(half* out, const half* bias, int m, int n)
{
half2* out_ptr = (half2*) out;
const half2* bias_ptr = (half2*) bias;
for(int id = blockIdx.x * blockDim.x + threadIdx.x; id < m * n; id += blockDim.x * gridDim.x)
{
half2 reg_bias = __ldg(&bias_ptr[id % n]);
half2 val = out_ptr[id] + reg_bias;
out_ptr[id] = gelu(val);
}
}
template <typename T>
__global__
void add_bias_relu(T* out, const T* bias, int m, int n)
{
for(int id = blockIdx.x * blockDim.x + threadIdx.x; id < m * n; id += blockDim.x * gridDim.x)
{
T reg_bias = __ldg(&bias[id % n]);
T val = out[id] + reg_bias;
out[id] = (T)(val > 0.0f ? val : 0.0f);
}
}
template <>
__global__
void add_bias_relu(half* out, const half* bias, int m, int n)
{
half2* out_ptr = (half2*) out;
const half2* bias_ptr = (half2*) bias;
for(int id = blockIdx.x * blockDim.x + threadIdx.x; id < m * n; id += blockDim.x * gridDim.x)
{
half2 reg_bias = __ldg(&bias_ptr[id % n]);
half2 val = out_ptr[id] + reg_bias;
val.x = val.x > (half)0.0f ? val.x : (half)0.0f;
val.y = val.y > (half)0.0f ? val.y : (half)0.0f;
out_ptr[id] = val;
}
}
template <typename T>
__inline__ __device__
T warpReduceMax(T val)
{
for(int mask = 16; mask > 0; mask >>= 1)
val = max(val, __shfl_xor_sync(FINAL_MASK, val, mask, 32));
return val;
}
/* Calculate the maximum of all elements in a block */
template <typename T>
__inline__ __device__
T blockReduceMax(T val)
{
static __shared__ T shared[32];
// __shared__ T shared[32];
int lane = threadIdx.x & 0x1f; // in-warp idx
int wid = threadIdx.x >> 5; // warp idx
val = warpReduceMax(val); // get maxx in each warp
if(lane == 0) // record in-warp maxx by warp Idx
shared[wid] = val;
__syncthreads();
val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : (T)-1e20f;
val = warpReduceMax(val);
return val;
}
template <int size_per_head, int block_sz, typename T>
__global__
void masked_attention_kernel_opt(
T* __restrict key_buf, T* __restrict value_buf,
T* __restrict query_buf, const T* __restrict self_Q_bias,
T* __restrict key_cache, const T* __restrict self_K_bias,
T* __restrict value_cache, const T* __restrict self_V_bias,
T* __restrict context_buf, int batch_size, int head_num, const int step, const T scalar)
{
typedef Copy_t<T, size_per_head> copy_t;
const int elems_per_thread = size_per_head / WARP_SIZE;
union Access_t
{
copy_t v;
T x[elems_per_thread]; // supported size 1,2,4
};
typedef struct Float_n_t
{
T x[elems_per_thread]; // supported size 1,2,4
} float_n_t;
__shared__ float_n_t sq[block_sz];
extern __shared__ float logits[]; // use to store the logits from [0~step]
const int tid = threadIdx.x;
const int warp_num = block_sz / WARP_SIZE;
const int bid = blockIdx.x;
const int head_id = blockIdx.x % head_num;
const int warp_id = tid / WARP_SIZE; // warp_id in block
const int lane_id = tid % WARP_SIZE; // lane_id in warp
typedef hipcub::BlockReduce<float, block_sz> MaxValBlockReduce;
typedef hipcub::BlockReduce<float, block_sz> BlockReduce;
__shared__ typename MaxValBlockReduce::TempStorage max_val_block_temp_storage;
__shared__ typename BlockReduce::TempStorage block_temp_storage;
__shared__ typename hipcub::WarpReduce<float>::TempStorage temp_storage[warp_num];
int qkv_id = bid * size_per_head;
int qkv_bias_id = head_id * size_per_head;
query_buf = &query_buf[qkv_id];
key_buf = &key_buf[qkv_id];
value_buf = &value_buf[qkv_id];
self_K_bias = &self_K_bias[qkv_bias_id];
key_cache = &key_cache[qkv_id];
self_Q_bias = &self_Q_bias[qkv_bias_id];
self_V_bias = &self_V_bias[qkv_bias_id];
value_cache = &value_cache[qkv_id];
context_buf = &context_buf[qkv_id];
Access_t bias_r, query_buf_r;
Access_t key_val_r, key_buf_r;
Access_t value_val_r, value_buf_r;
// each warp will have its own copy of sq
query_buf_r.v = *((copy_t *)query_buf + lane_id);
key_buf_r.v = *((copy_t *)key_buf + lane_id);
bias_r.v = *((copy_t *)self_Q_bias + lane_id);
float qb_r[elems_per_thread];
for (int i = 0; i < elems_per_thread; ++i)
{
qb_r[i] = (float)query_buf_r.x[i] + (float)bias_r.x[i];
}
//offset for each step
int offset = batch_size * head_num * size_per_head;
bias_r.v = *((copy_t *) self_K_bias + lane_id);
for(int ite = warp_id; ite < step; ite += warp_num)
{
key_val_r.v = *((copy_t *)&key_cache[ite * offset] + lane_id);
//for the last step, we should update K + bias_K to the cache
if(ite == step - 1)
{
for (int i = 0; i < elems_per_thread; i++)
{
key_val_r.x[i] = (float)key_buf_r.x[i] + (float)bias_r.x[i];
}
*((copy_t *)&key_cache[ite * offset] + lane_id) = key_val_r.v;
}
float val = 0.f;
for (int i = 0; i < elems_per_thread; i++)
{
val = val + (float)key_val_r.x[i] * qb_r[i] * (float)scalar;
}
float qk = hipcub::WarpReduce<float>(temp_storage[warp_id]).Sum(val);
if (lane_id == 0)
{
logits[ite] = qk;
}
}
__syncthreads();
__shared__ float s_max_val, s_sum;
float local_i = -1e20f;
for(int i = tid; i < step; i += blockDim.x)
local_i = max(local_i, logits[i]);
float max_val = MaxValBlockReduce(max_val_block_temp_storage).Reduce(local_i, hipcub::Max());
if(tid == 0)
s_max_val = max_val;
__syncthreads();
float local_o = 0.0f;
for(int i = tid; i < step; i += blockDim.x)
{
logits[i] = __expf(logits[i] - s_max_val);
local_o += logits[i];
}
float val = BlockReduce(block_temp_storage).Sum(local_o);
if(tid == 0)
s_sum = val + 1e-6;
__syncthreads();
float s_sum_inverse = __fdividef(1.0f, s_sum);
for(int i = tid; i < step; i += blockDim.x)
{
logits[i] = logits[i] * s_sum_inverse;
}
__syncthreads();
// This optimization introduces discrepancy because of different order in FP32 summation
float sum_r[elems_per_thread] = {0.f};
bias_r.v = *((copy_t *) self_V_bias + lane_id);
value_buf_r.v = *((copy_t *)value_buf + lane_id);
for(int ite = warp_id; ite < step; ite += warp_num)
{
value_val_r.v = *((copy_t *)&value_cache[ite * offset] + lane_id);
//for the last step, we should update K + bias_K to the cache
if(ite == step - 1)
{
for (int i = 0; i < elems_per_thread; i++)
{
value_val_r.x[i] = (float)value_buf_r.x[i] + (float)bias_r.x[i];
}
*((copy_t *)&value_cache[ite * offset] + lane_id) = value_val_r.v;
}
for (int i = 0; i < elems_per_thread; ++i)
{
sum_r[i] += (float)value_val_r.x[i] * logits[ite];
}
}
for (int i = 0; i < elems_per_thread; i++)
{
sq[warp_id * WARP_SIZE + lane_id].x[i] = sum_r[i];
}
__syncthreads();
if (warp_id == 0)
{
#pragma unroll
for (int j = 1; j < warp_num; j++)
{
for (int i = 0; i < elems_per_thread; ++i)
{
sum_r[i] = sum_r[i] + (float)sq[j * WARP_SIZE + tid].x[i];
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < elems_per_thread; i++)
{
value_val_r.x[i] = sum_r[i];
}
if (warp_id == 0)
{
*((copy_t *)context_buf + lane_id) = value_val_r.v;
}
}
// only use for compile
template <int size_per_head, int block_sz>
__global__
void masked_attention_kernel_opt_half2(
float* __restrict key_buf, float* __restrict value_buf,
float* __restrict query_buf, const float* __restrict self_Q_bias,
float* __restrict key_cache, const float* __restrict self_K_bias,
float* __restrict value_cache, const float* __restrict self_V_bias,
float* __restrict context_buf, int batch_size, int head_num, const int step, const float scalar) {}
template <int size_per_head, int block_sz>
__global__
void masked_attention_kernel_opt_half2(
half* __restrict key_buf, half* __restrict value_buf,
half* __restrict query_buf, const half* __restrict self_Q_bias,
half* __restrict key_cache, const half* __restrict self_K_bias,
half* __restrict value_cache, const half* __restrict self_V_bias,
half* __restrict context_buf, int batch_size, int head_num, const int step, const half scalar)
{
half2* key_buf_ptr = (half2*)key_buf;
half2* value_buf_ptr = (half2*)value_buf;
half2* query_buf_ptr = (half2*)query_buf;
half2* key_cache_ptr = (half2*)key_cache;
half2* value_cache_ptr = (half2*)value_cache;
const half2* self_Q_bias_ptr = (const half2*)self_Q_bias;
const half2* self_K_bias_ptr = (const half2*)self_K_bias;
const half2* self_V_bias_ptr = (const half2*)self_V_bias;
half2* context_buf_ptr = (half2*)context_buf;
typedef Copy_t<half2, size_per_head/2> copy_t;
const int elems_per_thread = size_per_head / 2 / WARP_SIZE;
union Access_t
{
copy_t v;
half2 x[elems_per_thread]; // supported size 1,2,4
};
typedef struct Half_n_t
{
half2 x[elems_per_thread]; // supported size 1,2,4
} half_n_t;
__shared__ half_n_t sq[block_sz];
extern __shared__ float logits[]; // use to store the logits from [0~step]
const int tid = threadIdx.x;
const int warp_num = block_sz / WARP_SIZE;
const int bid = blockIdx.x;
const int head_id = blockIdx.x % head_num;
const int warp_id = tid / WARP_SIZE; // warp_id in block
const int lane_id = tid % WARP_SIZE; // lane_id in warp
typedef hipcub::BlockReduce<float, block_sz> MaxValBlockReduce;
typedef hipcub::BlockReduce<float, block_sz> BlockReduce;
__shared__ typename MaxValBlockReduce::TempStorage max_val_block_temp_storage;
__shared__ typename BlockReduce::TempStorage block_temp_storage;
__shared__ typename hipcub::WarpReduce<float>::TempStorage temp_storage[warp_num];
int qkv_id = bid * size_per_head / 2;
int qkv_bias_id = head_id * size_per_head / 2;
query_buf_ptr = &query_buf_ptr[qkv_id];
key_buf_ptr = &key_buf_ptr[qkv_id];
value_buf_ptr = &value_buf_ptr[qkv_id];
self_K_bias_ptr = &self_K_bias_ptr[qkv_bias_id];
key_cache_ptr = &key_cache_ptr[qkv_id];
self_Q_bias_ptr = &self_Q_bias_ptr[qkv_bias_id];
self_V_bias_ptr = &self_V_bias_ptr[qkv_bias_id];
value_cache_ptr = &value_cache_ptr[qkv_id];
context_buf_ptr = &context_buf_ptr[qkv_id];
Access_t bias_r, query_buf_r;
Access_t key_val_r, key_buf_r;
Access_t value_val_r, value_buf_r;
// each warp will have its own copy of sq
query_buf_r.v = *((copy_t *)query_buf_ptr + lane_id);
key_buf_r.v = *((copy_t *)key_buf_ptr + lane_id);
bias_r.v = *((copy_t *)self_Q_bias_ptr + lane_id);
half2 qb_r[elems_per_thread];
for (int i = 0; i < elems_per_thread; ++i)
{
qb_r[i] = __hadd2(query_buf_r.x[i], bias_r.x[i]);
}
//offset for each step
int offset = batch_size * head_num * size_per_head / 2;
bias_r.v = *((copy_t *) self_K_bias + lane_id);
for(int ite = warp_id; ite < step; ite += warp_num)
{
key_val_r.v = *((copy_t *)&key_cache_ptr[ite * offset] + lane_id);
//for the last step, we should update K + bias_K to the cache
if(ite == step - 1)
{
for (int i = 0; i < elems_per_thread; i++)
{
key_val_r.x[i] = __hadd2(key_buf_r.x[i], bias_r.x[i]);
}
*((copy_t *)&key_cache_ptr[ite * offset] + lane_id) = key_val_r.v;
}
float val = 0.f;
for (int i = 0; i < elems_per_thread; i++)
{
half2 val2 = __hmul2(key_val_r.x[i], qb_r[i]);
val = val + (float)((val2.x + val2.y) * scalar);
}
float qk = hipcub::WarpReduce<float>(temp_storage[warp_id]).Sum(val);
if (lane_id == 0)
{
logits[ite] = qk;
}
}
__syncthreads();
__shared__ float s_max_val, s_sum;
float local_i = -1e20f;
for(int i = tid; i < step; i += blockDim.x)
local_i = max(local_i, logits[i]);
float max_val = MaxValBlockReduce(max_val_block_temp_storage).Reduce(local_i, hipcub::Max());
if(tid == 0)
s_max_val = max_val;
__syncthreads();
float local_o = 0.0f;
for(int i = tid; i < step; i += blockDim.x)
{
logits[i] = __expf(logits[i] - s_max_val);
local_o += logits[i];
}
float val = BlockReduce(block_temp_storage).Sum(local_o);
if(tid == 0)
s_sum = val + 1e-6;
__syncthreads();
float s_sum_inverse = __fdividef(1.0f, s_sum);
for(int i = tid; i < step; i += blockDim.x)
{
logits[i] = logits[i] * s_sum_inverse;
}
__syncthreads();
// This optimization introduces discrepancy because of different order in FP32 summation
half2 sum_r[elems_per_thread];
for(int i = 0; i < elems_per_thread; i++)
{
sum_r[i].x = (half)0.f;
sum_r[i].y = (half)0.f;
}
bias_r.v = *((copy_t *) self_V_bias_ptr + lane_id);
value_buf_r.v = *((copy_t *)value_buf_ptr + lane_id);
for(int ite = warp_id; ite < step; ite += warp_num)
{
value_val_r.v = *((copy_t *)&value_cache_ptr[ite * offset] + lane_id);
//for the last step, we should update K + bias_K to the cache
if(ite == step - 1)
{
for (int i = 0; i < elems_per_thread; i++)
{
value_val_r.x[i] = __hadd2(value_buf_r.x[i], bias_r.x[i]);
}
*((copy_t *)&value_cache_ptr[ite * offset] + lane_id) = value_val_r.v;
}
for (int i = 0; i < elems_per_thread; ++i)
{
half2 logit2_val;
logit2_val.x = (half)logits[ite];
logit2_val.y = (half)logits[ite];
sum_r[i] = __hadd2(sum_r[i], __hmul2(value_val_r.x[i], logit2_val));
}
}
for (int i = 0; i < elems_per_thread; i++)
{
sq[warp_id * WARP_SIZE + lane_id].x[i] = sum_r[i];
}
__syncthreads();
if (warp_id == 0)
{
#pragma unroll
for (int j = 1; j < warp_num; j++)
{
for (int i = 0; i < elems_per_thread; ++i)
{
sum_r[i] = __hadd2(sum_r[i], sq[j * WARP_SIZE + tid].x[i]);
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < elems_per_thread; i++)
{
value_val_r.x[i] = sum_r[i];
}
if (warp_id == 0)
{
*((copy_t *)context_buf_ptr + lane_id) = value_val_r.v;
}
}
template <typename T>
__global__
void masked_attention_kernel(
T* key_buf, T* value_buf,
T* query_buf, const T* self_Q_bias,
T* key_cache, const T* self_K_bias, T* value_cache, const T* self_V_bias,
T* context_buf, int batch_size, int head_num, int size_per_head, const int step, const T scalar)
{
extern __shared__ __align__(sizeof(T)) unsigned s_buf[];
T* sq = reinterpret_cast<T *>(s_buf);
T* logits = reinterpret_cast<T *>(&sq[size_per_head]);
int tid = threadIdx.x;
int bid = blockIdx.x / head_num;
int head_id = blockIdx.x % head_num;
int qkv_id = bid * head_num * size_per_head + head_id * size_per_head + tid;
int qkv_bias_id = head_id * size_per_head + tid;
if(tid < size_per_head)
sq[tid] = query_buf[qkv_id] + self_Q_bias[qkv_bias_id];
__syncthreads();
//offset for each step
int offset = batch_size * head_num * size_per_head;
for(int ite = 0; ite < step; ++ite)
{
T key = tid < size_per_head ? key_cache[ite * offset + qkv_id] : (T)0.0f;
//for the last step, we should update K + bias_K to the cache
if(ite == step - 1 && tid < size_per_head)
{
key = key_buf[qkv_id] + self_K_bias[qkv_bias_id];
key_cache[ite * offset + qkv_id] = key;
}
T val = (tid < size_per_head) ? key * sq[tid] * scalar : (T)(0.0f);
T qk = blockReduceSum(val);
if(threadIdx.x == 0)
logits[ite] = qk;
__syncthreads(); //try to remove
}
__syncthreads(); //try to remove
__shared__ float s_max_val, s_sum;
float local_i = tid < step ? (float)logits[tid] : -1e20f;
float max_val = blockReduceMax<float>(local_i);
if(tid == 0)
s_max_val = max_val;
__syncthreads();
local_i -= s_max_val;
float local_o = tid < step ? __expf(local_i) : 0.0f;
float val = blockReduceSum<float>(local_o);
if(tid == 0)
s_sum = val + 1e-6;
__syncthreads();
if(tid < step)
logits[tid] = local_o / s_sum;
__syncthreads();
if(tid < size_per_head)
{
T sum = (T)0.0f;
for(int ite = 0; ite < step; ++ite)
{
T value = value_cache[ite * offset + qkv_id];
//for the last step, we should update K + bias_K to the cache
if(ite == step - 1)
{
value = value_buf[qkv_id] + self_V_bias[qkv_bias_id];
value_cache[ite * offset + qkv_id] = value;
}
sum += value * logits[ite];
}
context_buf[qkv_id] = sum;
}
}
template <typename T>
__global__
void masked_attention_kernel_v2(T* query_buf, const T* self_Q_bias,
T* key_cache, const T* self_K_bias, T* value_cache, const T* self_V_bias,
T* context_buf, int batch_size, int head_num, int size_per_head, const int step, const T scalar)
{
extern __shared__ __align__(sizeof(T)) unsigned s_buf[];
T* sq = reinterpret_cast<T *>(s_buf);
T* logits = reinterpret_cast<T *>(&sq[size_per_head]);
int tid = threadIdx.x;
int bid = blockIdx.x / head_num;
int head_id = blockIdx.x % head_num;
int qkv_id = bid * head_num * size_per_head + head_id * size_per_head + tid;
int qkv_bias_id = head_id * size_per_head + tid;
if(tid < size_per_head)
sq[tid] = query_buf[qkv_id] + self_Q_bias[qkv_bias_id];
__syncthreads();
int warp_size = 32;
int offset = batch_size * head_num * size_per_head;
int warp_ite = size_per_head / warp_size;
T qk = (T)0.0f;
//each warp process one step
int step_id = threadIdx.x >> 5;
if(step_id < step)
{
for(int wite = 0; wite < warp_ite; ++wite)
{
T key = key_cache[step_id * offset + bid * head_num * size_per_head + head_id * size_per_head
+ tid % warp_size + wite * warp_size];
//for the last step, we should update K + bias_K to the cache
if(step_id == step - 1)
{
key += self_K_bias[bid * head_num * size_per_head + head_id * size_per_head +
tid % warp_size + wite * warp_size];
key_cache[step_id * offset + bid * head_num * size_per_head + head_id * size_per_head
+ tid % warp_size + wite * warp_size] = key;
}
qk += key * sq[tid % warp_size + wite * warp_size];
}
qk = warpReduceSum(qk * scalar);
if(threadIdx.x % warp_size == 0)
{
logits[step_id] = qk;
printf("step_id %d %f\n", step_id, qk);
}
}
__syncthreads();
__shared__ float s_max_val, s_sum;
float local_i = tid < step ? (float)logits[tid] : -1e20f;
float max_val = blockReduceMax<float>(local_i);
if(tid == 0)
s_max_val = max_val;
__syncthreads();
local_i -= s_max_val;
float local_o = tid < step ? __expf(local_i) : 0.0f;
float val = blockReduceSum<float>(local_o);
if(tid == 0)
s_sum = val;
__syncthreads();
if(tid < step)
logits[tid] = local_o / s_sum;
__syncthreads();
if(tid < size_per_head)
{
T sum = (T)0.0f;
for(int ite = 0; ite < step; ++ite)
{
T value = value_cache[ite * offset + qkv_id];
//for the last step, we should update K + bias_K to the cache
if(ite == step - 1)
{
value += self_V_bias[qkv_bias_id];
value_cache[ite * offset + qkv_id] = value;
}
sum += value * logits[ite];
}
context_buf[qkv_id] = sum;
}
}
template <typename T>
void masked_attention_dispatch(
T* key_buf, T* value_buf,
T* query_buf, const T* self_Q_bias,
T* key_cache, const T* self_K_bias, T* value_cache, const T* self_V_bias,
T* context_buf, int batch_size, int head_num, int size_per_head, const int step, hipStream_t stream)
{
const int block_sz = ATTENTION_BLOCK_SIZE;
T scalar = (T)(1.f / sqrtf(size_per_head * 1.0f));
dim3 grid(batch_size * head_num);
int cond = size_per_head * ((ATTENION_OPT)? 1:0);
switch (cond)
{
case 32:
hipLaunchKernelGGL(( masked_attention_kernel_opt<32, block_sz, T>), dim3(grid), dim3(block_sz), sizeof(float)*step, stream,
key_buf, value_buf,
query_buf, self_Q_bias, key_cache, self_K_bias, value_cache, self_V_bias, context_buf,
batch_size, head_num, step, scalar);
break;
case 64:
if(sizeof(T) == 2)
hipLaunchKernelGGL(( masked_attention_kernel_opt_half2<64, block_sz>), dim3(grid), dim3(block_sz), sizeof(float)*step, stream,
key_buf, value_buf,
query_buf, self_Q_bias, key_cache, self_K_bias, value_cache, self_V_bias, context_buf,
batch_size, head_num, step, scalar);
else
hipLaunchKernelGGL(( masked_attention_kernel_opt<64, block_sz, T>), dim3(grid), dim3(block_sz), sizeof(float)*step, stream,
key_buf, value_buf,
query_buf, self_Q_bias,
key_cache, self_K_bias,
value_cache, self_V_bias,
context_buf,
batch_size, head_num, step, scalar);
break;
case 128:
if(sizeof(T) == 2)
hipLaunchKernelGGL(( masked_attention_kernel_opt_half2<128, block_sz>), dim3(grid), dim3(block_sz), sizeof(float)*step, stream,
key_buf, value_buf,
query_buf, self_Q_bias, key_cache, self_K_bias, value_cache, self_V_bias, context_buf,
batch_size, head_num, step, scalar);
else
hipLaunchKernelGGL(( masked_attention_kernel_opt<128, block_sz, T>), dim3(grid), dim3(block_sz), sizeof(float)*step, stream,
key_buf, value_buf,
query_buf, self_Q_bias, key_cache, self_K_bias, value_cache, self_V_bias, context_buf,
batch_size, head_num, step, scalar);
break;
default:
// default path
int block_size = 128;
//suppose size_per_head <= 128
if(step <= 64)
block_size = 64;
else if(step <= 128 && step > size_per_head)
block_size = 128;
else if(step > 128 && step <= 256)
block_size = 256;
else if(step > 256 && step <= 512)
block_size = 512;
else
block_size = 1024;
if((int)block_size < size_per_head)
block_size = size_per_head;
assert(block_size <= 1024);
dim3 block(block_size);
T scalar = 1 / sqrtf(size_per_head * 1.0f);
int shared_size = sizeof(T) * (size_per_head + step);
hipLaunchKernelGGL(( masked_attention_kernel<T>), dim3(grid), dim3(block), shared_size, stream,
key_buf, value_buf,
query_buf, self_Q_bias,
key_cache, self_K_bias,
value_cache, self_V_bias,
context_buf, batch_size,
head_num, size_per_head, step, scalar);
}
}
template<OperationType OpType_>
void OpenDecoder<OpType_>::masked_multi_head_attention(
const DataType_* from_tensor,
DataType_* key_cache_,
DataType_* value_cache_,
DataType_* decoder_output,
const int step)
{
int m = batch_size_;
int n = hidden_units_;
int k = hidden_units_;
DataType_ alpha = (DataType_)1.0f, beta = (DataType_)0.0f;
if(is_fuse_QKV == true)
{
check_cuda_error(hipblasGemmBatchedEx(param_.cublas_handle,
HIPBLAS_OP_N, HIPBLAS_OP_N,
n, m, k,
&alpha,
(const void* const*) qkv_kernel_, AType_, n,
(const void* const*) qkv_input_, BType_, k,
&beta,
(void* const*)qkv_buf_, CType_, n,
3,
computeType_,
static_cast<hipblasGemmAlgo_t>(cublasAlgo_[4])));
}
else
{
key_buf_ = key_cache_ + (step - 1) * m * n;
value_buf_ = value_cache_ + (step - 1) * m * n;
check_cuda_error(hipblasGemmEx(param_.cublas_handle,
HIPBLAS_OP_N, HIPBLAS_OP_N,
n, m, k,
&alpha,
param_.self_attention.query_weight.kernel , AType_, n,
from_tensor, BType_, k,
&beta,
query_buf_, CType_, n,
computeType_,
static_cast<hipblasGemmAlgo_t>(cublasAlgo_[0])));
check_cuda_error(hipblasGemmEx(param_.cublas_handle,
HIPBLAS_OP_N, HIPBLAS_OP_N,
n, m, k,
&alpha,
param_.self_attention.key_weight.kernel, AType_, n,
from_tensor, BType_, k,
&beta,
key_buf_, CType_, n,
computeType_,
static_cast<hipblasGemmAlgo_t>(cublasAlgo_[0])));
check_cuda_error(hipblasGemmEx(param_.cublas_handle,
HIPBLAS_OP_N, HIPBLAS_OP_N,
n, m, k,
&alpha,
param_.self_attention.value_weight.kernel, AType_, n,
from_tensor, BType_, k,
&beta,
value_buf_, CType_, n,
computeType_,
static_cast<hipblasGemmAlgo_t>(cublasAlgo_[0])));
}
masked_attention_dispatch<DataType_>(
key_buf_, value_buf_,
query_buf_, param_.self_attention.query_weight.bias,
key_cache_, param_.self_attention.key_weight.bias,
value_cache_, param_.self_attention.value_weight.bias,
context_buf_, batch_size_,
head_num_, size_per_head_, step, param_.stream);
check_cuda_error(hipblasGemmEx(param_.cublas_handle,
HIPBLAS_OP_N, HIPBLAS_OP_N,
n, m, k,
&alpha,
param_.self_attention.attention_output_weight.kernel, AType_, n,
context_buf_, BType_, k,
&beta,
decoder_output, CType_, n,
computeType_,
static_cast<hipblasGemmAlgo_t>(cublasAlgo_[0])));
}
template <typename T, int size_per_head, int block_sz>
__global__
void cross_attention_kernel_opt(
T* __restrict query_buf, const T* __restrict Q_bias,
T* __restrict key_cache, const T* __restrict K_bias,
T* __restrict value_cache, const T* __restrict V_bias,
const int* length_per_sample, T* __restrict context_buf,
int batch_size, int head_num, const int step, const int seq_len, const float scalar)
{
typedef Copy_t<T, size_per_head> copy_t;
const int elems_per_thread = size_per_head / WARP_SIZE;
union Access_t
{
copy_t v;
T x[elems_per_thread]; // supported size 1,2,4
};
typedef struct Float_n_t
{
float x[elems_per_thread]; // supported size 1,2,4
} float_n_t;
__shared__ float_n_t sq[block_sz];
extern __shared__ float logits[]; // use to store the logits from [0~step]
const int warp_id = threadIdx.x / WARP_SIZE;
const int warp_num = block_sz / WARP_SIZE;
typedef hipcub::BlockReduce<float, block_sz> MaxValBlockReduce;
typedef hipcub::BlockReduce<float, block_sz> BlockReduce;
__shared__ typename MaxValBlockReduce::TempStorage max_val_block_temp_storage;
__shared__ typename BlockReduce::TempStorage block_temp_storage;
__shared__ typename hipcub::WarpReduce<float>::TempStorage temp_storage[warp_num];
const int tid = threadIdx.x;
const int bid = blockIdx.x / head_num;
const int head_id = blockIdx.x % head_num;
int length = __ldg(&length_per_sample[bid]);
const int lane_id = tid % WARP_SIZE;
int qkv_id = bid * head_num * size_per_head + head_id * size_per_head;
int qkv_bias_id = head_id * size_per_head;
int key_value_id = bid * (seq_len * head_num * size_per_head) +
+ head_id * size_per_head;
query_buf = &query_buf[qkv_id];
K_bias = &K_bias[qkv_bias_id];
key_cache = &key_cache[key_value_id];
Q_bias = &Q_bias[qkv_bias_id];
V_bias = &V_bias[qkv_bias_id];
value_cache = &value_cache[key_value_id];
context_buf = &context_buf[qkv_id];
Access_t bias_r, key_val_r, query_buf_r;
// each warp will have its own copy of sq
query_buf_r.v = *((copy_t *)query_buf + lane_id);
bias_r.v = *((copy_t *)Q_bias + lane_id);
float qb_r[elems_per_thread];
for (int i = 0; i < elems_per_thread; ++i)
{
qb_r[i] = (float)query_buf_r.x[i] + (float)bias_r.x[i];
}
//offset for each step
int offset = head_num * size_per_head;
bias_r.v = *((copy_t *) K_bias + lane_id);
for(int ite = warp_id; ite < length; ite += warp_num)
{
key_val_r.v = *((copy_t *)&key_cache[ite * offset] + lane_id);
//For the first step, we should add bias to key memory cache.
//The KV memory cache only need to be updated at the first step.
if (step == 1)
{
for (int i = 0; i < elems_per_thread; i++)
{
key_val_r.x[i] = (float)key_val_r.x[i] + (float)bias_r.x[i];
}
*((copy_t *)&key_cache[ite * offset] + lane_id) = key_val_r.v;
}
float val = 0.f;
for (int i = 0; i < elems_per_thread; i++)
{
val = val + (float)key_val_r.x[i] * qb_r[i] * scalar;
}
float qk = hipcub::WarpReduce<float>(temp_storage[warp_id]).Sum(val);
if (lane_id == 0)
{
logits[ite] = qk;
}
}
__syncthreads();
__shared__ float s_max_val, s_sum;
float local_i = -1e20f;
for(int i = tid; i < length; i += blockDim.x)
local_i = max(local_i, logits[i]);
float max_val = MaxValBlockReduce(max_val_block_temp_storage).Reduce(local_i, hipcub::Max());
if(tid == 0)
s_max_val = max_val;
__syncthreads();
float local_o = 0.0f;
for(int i = tid; i < length; i += blockDim.x)
{
logits[i] = __expf(logits[i] - s_max_val);
local_o += logits[i];
}
float val = BlockReduce(block_temp_storage).Sum(local_o);
if(tid == 0)
s_sum = val + 1e-6;
__syncthreads();
float s_sum_inverse = __fdividef(1.0f, s_sum);
for(int i = tid; i < length; i += blockDim.x)
{
logits[i] = logits[i] * s_sum_inverse;
}
__syncthreads();
// This optimization introduces discrepancy because of different order in FP32 summation
float sum_r[elems_per_thread] = {0.f};
bias_r.v = *((copy_t *) V_bias + lane_id);
for(int ite = warp_id; ite < length; ite += warp_num)
{
key_val_r.v = *((copy_t *)&value_cache[ite * offset] + lane_id);
//For the first step, we should add bias to key memory cache.
if(step == 1)
{
for (int i = 0; i < elems_per_thread; i++)
{
key_val_r.x[i] = (float)key_val_r.x[i] + (float)bias_r.x[i];
}
*((copy_t *)&value_cache[ite * offset] + lane_id) = key_val_r.v;
}
for (int i = 0; i < elems_per_thread; ++i)
{
sum_r[i] += (float)key_val_r.x[i] * logits[ite];
}
}
for (int i = 0; i < elems_per_thread; i++)
{
sq[warp_id * WARP_SIZE + lane_id].x[i] = sum_r[i];
}
__syncthreads();
if (threadIdx.x < WARP_SIZE)
{
#pragma unroll
for (int j = 1; j < warp_num; j++)
{
for (int i = 0; i < elems_per_thread; ++i)
{
sum_r[i] = sum_r[i] + (float)sq[j * WARP_SIZE + threadIdx.x].x[i];
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < elems_per_thread; i++)
{
key_val_r.x[i] = sum_r[i];
}
if (threadIdx.x < WARP_SIZE)
{
*((copy_t *)context_buf + lane_id) = key_val_r.v;
}
}
template<typename T>
__global__
void cross_attention_kernel(
T* query_buf, const T* Q_bias,
T* key_cache, const T* K_bias,
T* value_cache, const T* V_bias,
const int* length_per_sample, T* context_buf,
int batch_size, int head_num, int size_per_head, int step, const int seq_len, const T scalar)
{
int tid = threadIdx.x;
int bid = blockIdx.x / head_num;
int head_id = blockIdx.x % head_num;
extern __shared__ __align__(sizeof(T)) unsigned s_buf[];
T* sq = reinterpret_cast<T *>(s_buf);
T* logits = reinterpret_cast<T *>(&sq[size_per_head]);
int length = __ldg(&length_per_sample[bid]);
int qkv_id = bid * head_num * size_per_head + head_id * size_per_head + tid;
int qkv_bias_id = head_id * size_per_head + tid;
if(tid < size_per_head)
sq[tid] = query_buf[qkv_id] + Q_bias[qkv_bias_id];
__syncthreads();
for(int ite = 0; ite < length; ++ite)
{
int key_id = bid * (seq_len * head_num * size_per_head) + ite * (head_num * size_per_head)
+ head_id * size_per_head + tid;
T key = tid < size_per_head ? key_cache[key_id] : (T)(0.0f);
//For the first step, we should add bias to key memory cache.
//The KV memory cache only need to be updated at the first step.
if(step == 1 && tid < size_per_head)
{
key += K_bias[head_id * size_per_head + tid];
key_cache[key_id] = key;
}
T val = (tid < size_per_head) ? key * sq[tid] * scalar : (T)(0.0f);
T qk = blockReduceSum(val);
if(threadIdx.x == 0)
logits[ite] = qk;
__syncthreads(); //try to remove
}
__syncthreads();
__shared__ float s_max_val, s_sum;
float local_i = tid < length ? (float)logits[tid] : -1e20f;
float max_val = blockReduceMax<float>(local_i);
if(tid == 0)
s_max_val = max_val;
__syncthreads();
local_i -= s_max_val;
float local_o = tid < length ? __expf(local_i) : 0.0f;
float val = blockReduceSum<float>(local_o);
if(tid == 0)
s_sum = val + 1e-6;
__syncthreads();
if(tid < length)
logits[tid] = local_o / s_sum;
__syncthreads();
if(tid < size_per_head)
{
T sum = (T)0.0f;
for(int ite = 0; ite < length; ++ite)
{
int value_id = bid * seq_len * head_num * size_per_head + ite * head_num * size_per_head
+ head_id * size_per_head + tid;
T value = value_cache[value_id];
//for the first step, we should add bias to key memory cache
if(step == 1)
{
value += V_bias[head_id * size_per_head + tid];
value_cache[value_id] = value;
}
sum += value * logits[ite];
}
context_buf[bid * head_num * size_per_head + head_id * size_per_head + tid] = sum;
}
}
template <typename T>
void cross_attention_dispatch(T* query_buf, const T* Q_bias,
T* key_cache, const T* K_bias, T* value_cache, const T* V_bias, const int* length,
T* context_buf, int batch_size, int head_num, int size_per_head, int step, int seq_len, hipStream_t stream)
{
const int block_sz = ATTENTION_BLOCK_SIZE;
float scalar = 1.f / sqrtf(size_per_head * 1.0f);
dim3 grid(batch_size * head_num);
int cond = size_per_head * ((ATTENION_OPT)? 1:0);
switch (cond)
{
case 32:
hipLaunchKernelGGL(( cross_attention_kernel_opt<T, 32, block_sz>), dim3(grid), dim3(block_sz), sizeof(float)*seq_len, stream,
query_buf, Q_bias, key_cache, K_bias, value_cache, V_bias, length, context_buf,
batch_size, head_num, step, seq_len, scalar);
break;
case 64:
hipLaunchKernelGGL(( cross_attention_kernel_opt<T, 64, block_sz>), dim3(grid), dim3(block_sz), sizeof(float)*seq_len, stream,
query_buf, Q_bias, key_cache, K_bias, value_cache, V_bias, length, context_buf,
batch_size, head_num, step, seq_len, scalar);
break;
case 128:
hipLaunchKernelGGL(( cross_attention_kernel_opt<T, 128, block_sz>), dim3(grid), dim3(block_sz), sizeof(float)*seq_len, stream,
query_buf, Q_bias, key_cache, K_bias, value_cache, V_bias, length, context_buf,
batch_size, head_num, step, seq_len, scalar);
break;
default:
// default path
int block_size = 128;
if(seq_len <= 64)
block_size = 64;
else if(seq_len <= 128 && seq_len > size_per_head)
block_size = 128;
else if(seq_len > 128 && seq_len <= 256)
block_size = 256;
else if(seq_len > 256 && seq_len <= 512)
block_size = 512;
else
block_size = 1024;
if(block_size < size_per_head)
block_size = size_per_head;
assert(block_size <= 1024);
dim3 block(block_size);
int shared_size = sizeof(T) * (size_per_head + seq_len);
hipLaunchKernelGGL(( cross_attention_kernel<T>), dim3(grid), dim3(block), shared_size, stream,
query_buf, Q_bias,
key_cache, K_bias,
value_cache, V_bias,
length, context_buf,
batch_size,
head_num, size_per_head, step, seq_len, scalar);
}
}
/* attention with source sentence */
template<OperationType OpType_>
void OpenDecoder<OpType_>::cross_multi_head_attention(
const DataType_* from_tensor,
const DataType_* memory_tensor,
DataType_* key_mem_cache,
DataType_* value_mem_cache,
DataType_* decoder_output,
const int* length,
const int seq_len,
const int step)
{
int m = batch_size_;
int n = hidden_units_;
int k = hidden_units_;
DataType_ alpha = (DataType_)1.0f, beta = (DataType_)0.0f;
//reuse the query_buf
check_cuda_error(hipblasGemmEx(param_.cublas_handle,
HIPBLAS_OP_N, HIPBLAS_OP_N,
n, m, k,
&alpha,
param_.cross_attention.query_weight.kernel, AType_, n,
from_tensor, BType_, k,
&beta,
query_buf_, CType_, n,
computeType_,
static_cast<hipblasGemmAlgo_t>(cublasAlgo_[0])));
if(step == 1)
{
m *= seq_len;
k = memory_hidden_units_;
check_cuda_error(hipblasGemmEx(param_.cublas_handle,
HIPBLAS_OP_N, HIPBLAS_OP_N,
n, m, k,
&alpha,
param_.cross_attention.key_weight.kernel, AType_, n,
memory_tensor, BType_, k,
&beta,
key_mem_cache, CType_, n,
computeType_,
static_cast<hipblasGemmAlgo_t>(cublasAlgo_[1])));
check_cuda_error(hipblasGemmEx(param_.cublas_handle,
HIPBLAS_OP_N, HIPBLAS_OP_N,
n, m, k,
&alpha,
param_.cross_attention.value_weight.kernel, AType_, n,
memory_tensor, BType_, k,
&beta,
value_mem_cache, CType_, n,
computeType_,
static_cast<hipblasGemmAlgo_t>(cublasAlgo_[1])));
k = hidden_units_;
}
cross_attention_dispatch<DataType_>(
query_buf_, param_.cross_attention.query_weight.bias,
key_mem_cache, param_.cross_attention.key_weight.bias,
value_mem_cache, param_.cross_attention.value_weight.bias,
length, context_buf_, batch_size_,
head_num_, size_per_head_, step, seq_len, param_.stream);
m = batch_size_;
n = head_num_ * size_per_head_;
k = n;
check_cuda_error(hipblasGemmEx(param_.cublas_handle,
HIPBLAS_OP_N, HIPBLAS_OP_N,
n, m, k,
&alpha,
param_.cross_attention.attention_output_weight.kernel, AType_, n,
context_buf_, BType_, k,
&beta,
decoder_output, CType_, n,
computeType_,
static_cast<hipblasGemmAlgo_t>(cublasAlgo_[0])));
}
template <typename T>
__global__
void decoder_norm1_kernel_generalize(const T* __restrict input,
const T* __restrict gamma,
const T* __restrict beta,
T* output,
int m, int n)
{
const int tid = threadIdx.x;
__shared__ float s_mean;
__shared__ float s_variance;
float mean = 0.0f;
float variance = 0.0f;
float local_sum = 0.0f;
for(int i = tid; i < n; i+= blockDim.x)
{
local_sum += (float)(__ldg(&input[blockIdx.x * n + i]));
}
mean = blockReduceSum<float>(local_sum);
if(threadIdx.x == 0)
s_mean = mean / n;
__syncthreads();
float local_var_sum = 0.0f;
for(int i = tid; i < n; i+= blockDim.x)
{
float diff = (float)(__ldg(&input[blockIdx.x * n + i])) - s_mean;
local_var_sum += diff * diff;
}
variance = blockReduceSum<float>(local_var_sum);
if(threadIdx.x == 0)
s_variance = rsqrtf(variance / n + 1e-6);
__syncthreads();
for(int i = tid; i < n; i+= blockDim.x)
{
output[blockIdx.x * n + i] =
(T)((( (float)input[blockIdx.x * n + i] - s_mean) * s_variance) * (float)(__ldg(&gamma[i])) + (float)(__ldg(&beta[i])));
}
}
template <typename T>
__global__
void decoder_norm1_kernel(const T* __restrict input,
const T* __restrict gamma,
const T* __restrict beta,
T* output,
int m, int n)
{
int tid = threadIdx.x;
__shared__ float s_mean;
__shared__ float s_variance;
float mean = 0.0f;
float variance = 0.0f;
float local_out = tid < n ? (float)(__ldg(&input[blockIdx.x * n + tid])) : 0.0f;
mean = blockReduceSum<float>(local_out);
if(threadIdx.x == 0)
s_mean = mean / n;
__syncthreads();
variance = blockReduceSum<float>(tid < n ? (local_out - s_mean) * (local_out - s_mean) : 0.0f);
if(threadIdx.x == 0)
s_variance = rsqrtf(variance / n + 1e-6);
__syncthreads();
if(tid < n)
output[blockIdx.x * n + tid] =
(T)(((local_out - s_mean) * s_variance) * (float)(__ldg(&gamma[tid])) + (float)(__ldg(&beta[tid])));
}
template <>
__global__
void decoder_norm1_kernel(const half* __restrict input,
const half* __restrict gamma,
const half* __restrict beta,
half* output,
int m, int n)
{
const int tid = threadIdx.x;
__shared__ float s_mean;
__shared__ float s_variance;
float mean = 0.0f;
float variance = 0.0f;
float2 local_out_fp2;
const half2* input_ptr = (const half2*)input;
const half2* gamma_ptr = (const half2*)gamma;
const half2* beta_ptr = (const half2*)beta;
half2* output_ptr = (half2*)output;
float local_out = 0.0f;
int id = blockIdx.x * blockDim.x + tid;
if(tid < blockDim.x)
{
local_out_fp2 = __half22float2(__ldg(&input_ptr[id]));
local_out += local_out_fp2.x;
local_out += local_out_fp2.y;
}
mean = blockReduceSum<float>(local_out);
if(tid == 0)
s_mean = mean / n;
__syncthreads();
variance = blockReduceSum<float>(tid < blockDim.x ?
(local_out_fp2.x - s_mean) * (local_out_fp2.x - s_mean) + (local_out_fp2.y - s_mean) * (local_out_fp2.y - s_mean)
: 0.0f);
if(tid == 0)
s_variance = rsqrtf(variance / n + 1e-6);
__syncthreads();
if(tid < blockDim.x)
{
float2 gamma_val = __half22float2(__ldg(&gamma_ptr[tid]));
float2 beta_val = __half22float2(__ldg(&beta_ptr[tid]));
local_out_fp2.x = (local_out_fp2.x - s_mean) * s_variance * gamma_val.x + beta_val.x;
local_out_fp2.y = (local_out_fp2.y - s_mean) * s_variance * gamma_val.y + beta_val.y;
output_ptr[id] = __float22half2_rn(local_out_fp2);
}
}
template <typename T>
__global__
void decoder_norm2_kernel_generalize(const T* __restrict input,
const T* __restrict gamma,
const T* __restrict beta,
const T* __restrict bias,
T* output, T* norm_output,
int m, int n)
{
int tid = threadIdx.x;
__shared__ float s_mean;
__shared__ float s_variance;
float mean = 0.0f;
float variance = 0.0f;
float local_sum = 0.0f;
for(int i = tid; i < n; i+= blockDim.x)
{
float local_out = (float)(__ldg(&input[blockIdx.x * n + i]));
local_out += (float)(output[blockIdx.x * n + i]);
local_out += (float)(__ldg(&bias[i]));
output[blockIdx.x * n + i] = (T)local_out;
local_sum += local_out;
}
mean = blockReduceSum<float>(local_sum);
if(threadIdx.x == 0)
s_mean = mean / n;
__syncthreads();
float local_var_sum = 0.0f;
for(int i = tid; i < n; i+= blockDim.x)
{
float diff = (float)(__ldg(&output[blockIdx.x * n + i])) - s_mean;
local_var_sum += diff * diff;
}
variance = blockReduceSum<float>(local_var_sum);
if(threadIdx.x == 0)
s_variance = rsqrtf(variance / n + 1e-6);
__syncthreads();
for(int i = tid; i < n; i+= blockDim.x)
{
norm_output[blockIdx.x * n + i] =
(T)((( (float)output[blockIdx.x * n + i] - s_mean) * s_variance) * (float)(__ldg(&gamma[i])) + (float)(__ldg(&beta[i])));
}
}
template <typename T>
__global__
void decoder_norm2_kernel(const T* __restrict input,
const T* __restrict gamma,
const T* __restrict beta,
const T* __restrict bias,
T* output, T* norm_output,
int m, int n)
{
int tid = threadIdx.x;
__shared__ float s_mean;
__shared__ float s_variance;
float mean = 0.0f;
float variance = 0.0f;
float local_out = 0.0f;
if(tid < n)
{
local_out = (float)(__ldg(&input[blockIdx.x * n + tid]));
local_out += (float)(output[blockIdx.x * n + tid]);
local_out += (float)(__ldg(&bias[tid]));
output[blockIdx.x * n + tid] = (T)local_out;
}
mean = blockReduceSum<float>(local_out);
if(threadIdx.x == 0)
s_mean = mean / n;
__syncthreads();
variance = blockReduceSum<float>(tid < n ? (local_out - s_mean) * (local_out - s_mean) : 0.0f);
if(threadIdx.x == 0)
s_variance = rsqrtf(variance / n + 1e-6);
__syncthreads();
if(tid < n)
norm_output[blockIdx.x * n + tid] =
(T)((local_out - s_mean) * s_variance * (float)(__ldg(&gamma[tid])) + (float)(__ldg(&beta[tid])));
}
template <>
__global__
void decoder_norm2_kernel(const half* __restrict input,
const half* __restrict gamma,
const half* __restrict beta,
const half* __restrict bias,
half* output, half* norm_output,
int m, int n)
{
const int tid = threadIdx.x;
__shared__ float s_mean;
__shared__ float s_variance;
float mean = 0.0f;
float variance = 0.0f;
float2 local_out_fp2;
const half2* input_ptr = (const half2*)input;
const half2* gamma_ptr = (const half2*)gamma;
const half2* beta_ptr = (const half2*)beta;
const half2* bias_ptr = (const half2*)bias;
half2* output_ptr = (half2*)output;
half2* norm_output_ptr = (half2*)norm_output;
float local_out = 0.0f;
int id = blockIdx.x * blockDim.x + tid;
if(tid < blockDim.x)
{
output_ptr[id] = __hadd2(__hadd2(output_ptr[id], __ldg(&input_ptr[id])), __ldg(&bias_ptr[tid]));
local_out_fp2 = __half22float2(output_ptr[id]);
local_out += local_out_fp2.x;
local_out += local_out_fp2.y;
}
mean = blockReduceSum<float>(local_out);
if(tid == 0)
s_mean = mean / n;
__syncthreads();
variance = blockReduceSum<float>(tid < blockDim.x ?
(local_out_fp2.x - s_mean) * (local_out_fp2.x - s_mean) + (local_out_fp2.y - s_mean) * (local_out_fp2.y - s_mean)
: 0.0f);
if(tid == 0)
s_variance = rsqrtf(variance / n + 1e-6);
__syncthreads();
if(tid < blockDim.x)
{
float2 gamma_val = __half22float2(__ldg(&gamma_ptr[tid]));
float2 beta_val = __half22float2(__ldg(&beta_ptr[tid]));
local_out_fp2.x = (local_out_fp2.x - s_mean) * s_variance * gamma_val.x + beta_val.x;
local_out_fp2.y = (local_out_fp2.y - s_mean) * s_variance * gamma_val.y + beta_val.y;
norm_output_ptr[id] = __float22half2_rn(local_out_fp2);
}
}
template<OperationType OpType_>
void OpenDecoder<OpType_>::decoder_norm1(
const DataType_* input,
const DataType_* gamma,
const DataType_* beta,
DataType_* output,
int m, int n)
{
dim3 grid(m);
dim3 block(min(n, 1024));
/* For general cases, n is equal to hidden_units, e.g., 512/1024.
Since we have warp shuffle inside the code, block.x % 32 should be 0.
*/
if(n % 32 != 0)
block.x = 1024;
block.x = block.x / (4 / sizeof(DataType_)); // if using half, only need half of block.x
/* should pay attention to the rsqrt precision*/
// assert(block.x <= 1024);
// decoder_norm1_kernel<DataType_><<<grid, block, 0, param_.stream>>>(input, gamma, beta, output, m, n);
hipLaunchKernelGGL(( decoder_norm1_kernel_generalize<DataType_>), dim3(grid), dim3(block), 0, param_.stream, input, gamma, beta, output, m, n); // For gpt-3
}
template<OperationType OpType_>
void OpenDecoder<OpType_>::decoder_norm2(
const DataType_* input,
const DataType_* gamma,
const DataType_* beta,
const DataType_* bias,
DataType_* output,
DataType_* norm_output,
int m, int n)
{
dim3 grid(m);
dim3 block(min(n, 1024));
/* For general cases, n is equal to hidden_units, e.g., 512/1024.
Since we have warp shuffle inside the code, block.x % 32 should be 0.
*/
if(n % 32 != 0)
block.x = 1024;
block.x = block.x / (4 / sizeof(DataType_)); // if using half, only need half of block.x
/* should pay attention to the rsqrt precision*/
// assert(block.x <= 1024);
// decoder_norm2_kernel<DataType_><<<grid, block, 0, param_.stream>>>(input, gamma, beta, bias, output, norm_output, m, n);
hipLaunchKernelGGL(( decoder_norm2_kernel_generalize<DataType_>), dim3(grid), dim3(block), 0, param_.stream, input, gamma, beta, bias, output, norm_output, m, n); // For gpt-3
}
template<OperationType OpType_>
void OpenDecoder<OpType_>::ffn(
const DataType_* input,
DataType_* ffn_inner,
DataType_* output,
const int m,
const int inner_size,
const int n,
ActivationType activation_type)
{
int m1 = m, k1 = n, n1 = inner_size;
DataType_ alpha = (DataType_)1.0f;
DataType_ beta = (DataType_)0.0f;
check_cuda_error(hipblasGemmEx(param_.cublas_handle,
HIPBLAS_OP_N, HIPBLAS_OP_N,
n1, m1, k1,
&alpha,
param_.ffn.intermediate_weight.kernel, AType_, n1,
input, BType_, k1,
&beta,
ffn_inner, CType_, n1,
computeType_,
static_cast<hipblasGemmAlgo_t>(cublasAlgo_[2])));
// dim3 grid(min(m1, 65536));
// dim3 block(min(n1 / 4, 1024));
// // TODO remove this limitation
// // assert(block.x <= 1024);
// if(activation_type == ActivationType::RELU)
// hipLaunchKernelGGL(( add_bias_relu<DataType_>), dim3(grid), dim3(block), 0, param_.stream, ffn_inner, param_.ffn.intermediate_weight.bias, m1, n1);
// else if(activation_type == ActivationType::GELU)
// hipLaunchKernelGGL(( add_bias_gelu<DataType_>), dim3(grid), dim3(block), 0, param_.stream, ffn_inner, param_.ffn.intermediate_weight.bias, m1, n1);
dim3 block(min((int)(n1 / 4 / (4 / sizeof(DataType_))), 1024));
dim3 grid(min(m1 * n1 / block.x, 65536));
if(activation_type == ActivationType::RELU)
hipLaunchKernelGGL(( add_bias_relu<DataType_>), dim3(grid), dim3(block), 0, param_.stream, ffn_inner, param_.ffn.intermediate_weight.bias, m1, n1 / (4 / sizeof(DataType_)));
else if(activation_type == ActivationType::GELU)
hipLaunchKernelGGL(( add_bias_gelu<DataType_>), dim3(grid), dim3(block), 0, param_.stream, ffn_inner, param_.ffn.intermediate_weight.bias, m1, n1 / (4 / sizeof(DataType_)));
int m2 = m, n2 = n, k2 = inner_size;
check_cuda_error(hipblasGemmEx(param_.cublas_handle,
HIPBLAS_OP_N, HIPBLAS_OP_N,
n2, m2, k2,
&alpha,
param_.ffn.output_weight.kernel, AType_, n2,
ffn_inner, BType_, k2,
&beta,
output, CType_, n2,
computeType_,
static_cast<hipblasGemmAlgo_t>(cublasAlgo_[3])));
}
template <typename T>
__global__
void add_bias_input_kernel(T* output, const T* input, const T* bias, const int m, const int n)
{
// original kernel, which only supports cases of n <= 1024.
int id = blockIdx.x * n + threadIdx.x;
output[id] = output[id] + input[id] + __ldg(&bias[threadIdx.x]);
}
template <typename T>
__global__
void add_bias_input_kernel_generalize(T* output, const T* input, const T* bias, const int m, const int n)
{
// TODO For GPT-3
// This kernel can run with any block size and grid size
// Since the hidden dimension of GPT-3 would be larger than 1024
const int bid = blockIdx.x;
const int blocks_per_row = n / blockDim.x;
const int col_index = (bid % blocks_per_row) * blockDim.x + threadIdx.x;
T bias_val = __ldg(&bias[col_index]);
for(int index = bid * blockDim.x + threadIdx.x; index < m * n; index += blockDim.x * gridDim.x)
{
output[index] = output[index] + input[index] + bias_val;
}
}
template<OperationType OpType_>
void OpenDecoder<OpType_>::add_bias_input(DataType_* output, const DataType_* input, const int m, const int n)
{
dim3 grid(min(m, 65536));
dim3 block(min(n, 1024));
hipLaunchKernelGGL(( add_bias_input_kernel_generalize), dim3(grid), dim3(block), 0, param_.stream, output, input, param_.ffn.output_weight.bias, m, n);
}
template void OpenDecoder<OperationType::FP32>::masked_multi_head_attention(
const float* from_tensor,
float* key_cache,
float* value_cache,
float* decoder_output,
const int step);
template void OpenDecoder<OperationType::FP16>::masked_multi_head_attention(
const half* from_tensor,
half* key_cache,
half* value_cache,
half* decoder_output,
const int step);
template void OpenDecoder<OperationType::FP32>::cross_multi_head_attention(
const float* from_tensor,
const float* memory_tensor,
float* key_mem_cache,
float* value_mem_cache,
float* decoder_output,
const int* length,
const int max_seq_len,
const int step);
template void OpenDecoder<OperationType::FP16>::cross_multi_head_attention(
const half* from_tensor,
const half* memory_tensor,
half* key_mem_cache,
half* value_mem_cache,
half* decoder_output,
const int* length,
const int max_seq_len,
const int step);
template void OpenDecoder<OperationType::FP32>::ffn(
const float* input,
float* ffn_inner,
float* otuput,
const int m,
const int inner_size,
const int n,
ActivationType activation_type);
template void OpenDecoder<OperationType::FP16>::ffn(
const half* input,
half* ffn_inner,
half* otuput,
const int m,
const int inner_size,
const int n,
ActivationType activation_type);
template void OpenDecoder<OperationType::FP32>::decoder_norm1(
const float* input,
const float* gamma,
const float* beta,
float* output,
int m, int n);
template void OpenDecoder<OperationType::FP16>::decoder_norm1(
const half* input,
const half* gamma,
const half* beta,
half* output,
int m, int n);
template void OpenDecoder<OperationType::FP32>::decoder_norm2(
const float* input,
const float* gamma,
const float* beta,
const float* bias,
float* output,
float* norm_output,
int m, int n);
template void OpenDecoder<OperationType::FP16>::decoder_norm2(
const half* input,
const half* gamma,
const half* beta,
const half* bias,
half* output,
half* norm_output,
int m, int n);
template void OpenDecoder<OperationType::FP32>::add_bias_input(
float* output,
const float* input,
const int m,
const int n);
template void OpenDecoder<OperationType::FP16>::add_bias_input(
half* output,
const half* input,
const int m,
const int n);
}//namespace FasterTransformer
| b92cf77654ec546c63746cb95f15a02f84737dbb.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Open sourced multi-head attention
**/
#include "fastertransformer/open_decoder.h"
#include "cub/cub.cuh"
namespace fastertransformer{
const int WARP_SIZE = 32;
const bool ATTENION_OPT = true;
const int ATTENTION_BLOCK_SIZE = 256;
///////////////////////////////////////////////////////////////////////////////////////////////////
template <int HALF_ELEMENTS_PER_WARP_LOAD>
using Copy_half_t =
typename std::conditional<HALF_ELEMENTS_PER_WARP_LOAD == 32, half,
typename std::conditional<HALF_ELEMENTS_PER_WARP_LOAD == 64, int,
typename std::conditional<HALF_ELEMENTS_PER_WARP_LOAD == 128, int2, int4
>::type
>::type
>::type;
template <typename T, int ELEMENTS_PER_WARP_LOAD>
using Copy_t = Copy_half_t<sizeof(T) / sizeof(half) * ELEMENTS_PER_WARP_LOAD>;
///////////////////////////////////////////////////////////////////////////////////////////////////
/**
masked multi-head attention
*/
#define FINAL_MASK 0xffffffff
template <typename T>
__inline__ __device__
T warpReduceSum(T val)
{
for(int mask = 16; mask > 0; mask >>= 1)
val += __shfl_xor_sync(FINAL_MASK, val, mask, 32);
return val;
}
/* Calculate the sum of all elements in a block */
template <typename T>
__inline__ __device__
T blockReduceSum(T val)
{
static __shared__ T shared[32];
// __shared__ T shared[32];
int lane = threadIdx.x & 0x1f;
int wid = threadIdx.x >> 5;
val = warpReduceSum<T>(val);
if(lane == 0)
shared[wid] = val;
__syncthreads();
val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : (T)(0.0f);
val = warpReduceSum<T>(val);
return val;
}
/* gelu activation */
template <typename T>
__inline__ __device__
T gelu(T x)
{
float cdf = 0.5f * (1.0f + tanhf((0.7978845608028654f * (x + 0.044715f * x * x * x))));
return x * cdf;
}
/* gelu activation for half2 */
template <>
__inline__ __device__
half2 gelu(half2 val)
{
half2 val_pow3 = __hmul2(val, __hmul2(val, val));
float2 tmp_pow = __half22float2(val_pow3);
float2 tmp = __half22float2(val);
tmp.x = 0.5f * (1.0f + tanhf((0.7978845608028654f * (tmp.x + 0.044715f * tmp_pow.x))));
tmp.y = 0.5f * (1.0f + tanhf((0.7978845608028654f * (tmp.y + 0.044715f * tmp_pow.y))));
return __hmul2(val, __float22half2_rn(tmp));
}
template <typename T>
__global__
void add_bias_gelu(T* out, const T* bias, int m, int n)
{
for(int id = blockIdx.x * blockDim.x + threadIdx.x; id < m * n; id += blockDim.x * gridDim.x)
{
T reg_bias = __ldg(&bias[id % n]);
T val = out[id] + reg_bias;
out[id] = (T)(gelu(val));
}
}
template <>
__global__
void add_bias_gelu(half* out, const half* bias, int m, int n)
{
half2* out_ptr = (half2*) out;
const half2* bias_ptr = (half2*) bias;
for(int id = blockIdx.x * blockDim.x + threadIdx.x; id < m * n; id += blockDim.x * gridDim.x)
{
half2 reg_bias = __ldg(&bias_ptr[id % n]);
half2 val = out_ptr[id] + reg_bias;
out_ptr[id] = gelu(val);
}
}
template <typename T>
__global__
void add_bias_relu(T* out, const T* bias, int m, int n)
{
for(int id = blockIdx.x * blockDim.x + threadIdx.x; id < m * n; id += blockDim.x * gridDim.x)
{
T reg_bias = __ldg(&bias[id % n]);
T val = out[id] + reg_bias;
out[id] = (T)(val > 0.0f ? val : 0.0f);
}
}
template <>
__global__
void add_bias_relu(half* out, const half* bias, int m, int n)
{
half2* out_ptr = (half2*) out;
const half2* bias_ptr = (half2*) bias;
for(int id = blockIdx.x * blockDim.x + threadIdx.x; id < m * n; id += blockDim.x * gridDim.x)
{
half2 reg_bias = __ldg(&bias_ptr[id % n]);
half2 val = out_ptr[id] + reg_bias;
val.x = val.x > (half)0.0f ? val.x : (half)0.0f;
val.y = val.y > (half)0.0f ? val.y : (half)0.0f;
out_ptr[id] = val;
}
}
template <typename T>
__inline__ __device__
T warpReduceMax(T val)
{
for(int mask = 16; mask > 0; mask >>= 1)
val = max(val, __shfl_xor_sync(FINAL_MASK, val, mask, 32));
return val;
}
/* Calculate the maximum of all elements in a block */
template <typename T>
__inline__ __device__
T blockReduceMax(T val)
{
static __shared__ T shared[32];
// __shared__ T shared[32];
int lane = threadIdx.x & 0x1f; // in-warp idx
int wid = threadIdx.x >> 5; // warp idx
val = warpReduceMax(val); // get maxx in each warp
if(lane == 0) // record in-warp maxx by warp Idx
shared[wid] = val;
__syncthreads();
val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : (T)-1e20f;
val = warpReduceMax(val);
return val;
}
template <int size_per_head, int block_sz, typename T>
__global__
void masked_attention_kernel_opt(
T* __restrict key_buf, T* __restrict value_buf,
T* __restrict query_buf, const T* __restrict self_Q_bias,
T* __restrict key_cache, const T* __restrict self_K_bias,
T* __restrict value_cache, const T* __restrict self_V_bias,
T* __restrict context_buf, int batch_size, int head_num, const int step, const T scalar)
{
typedef Copy_t<T, size_per_head> copy_t;
const int elems_per_thread = size_per_head / WARP_SIZE;
union Access_t
{
copy_t v;
T x[elems_per_thread]; // supported size 1,2,4
};
typedef struct Float_n_t
{
T x[elems_per_thread]; // supported size 1,2,4
} float_n_t;
__shared__ float_n_t sq[block_sz];
extern __shared__ float logits[]; // use to store the logits from [0~step]
const int tid = threadIdx.x;
const int warp_num = block_sz / WARP_SIZE;
const int bid = blockIdx.x;
const int head_id = blockIdx.x % head_num;
const int warp_id = tid / WARP_SIZE; // warp_id in block
const int lane_id = tid % WARP_SIZE; // lane_id in warp
typedef cub::BlockReduce<float, block_sz> MaxValBlockReduce;
typedef cub::BlockReduce<float, block_sz> BlockReduce;
__shared__ typename MaxValBlockReduce::TempStorage max_val_block_temp_storage;
__shared__ typename BlockReduce::TempStorage block_temp_storage;
__shared__ typename cub::WarpReduce<float>::TempStorage temp_storage[warp_num];
int qkv_id = bid * size_per_head;
int qkv_bias_id = head_id * size_per_head;
query_buf = &query_buf[qkv_id];
key_buf = &key_buf[qkv_id];
value_buf = &value_buf[qkv_id];
self_K_bias = &self_K_bias[qkv_bias_id];
key_cache = &key_cache[qkv_id];
self_Q_bias = &self_Q_bias[qkv_bias_id];
self_V_bias = &self_V_bias[qkv_bias_id];
value_cache = &value_cache[qkv_id];
context_buf = &context_buf[qkv_id];
Access_t bias_r, query_buf_r;
Access_t key_val_r, key_buf_r;
Access_t value_val_r, value_buf_r;
// each warp will have its own copy of sq
query_buf_r.v = *((copy_t *)query_buf + lane_id);
key_buf_r.v = *((copy_t *)key_buf + lane_id);
bias_r.v = *((copy_t *)self_Q_bias + lane_id);
float qb_r[elems_per_thread];
for (int i = 0; i < elems_per_thread; ++i)
{
qb_r[i] = (float)query_buf_r.x[i] + (float)bias_r.x[i];
}
//offset for each step
int offset = batch_size * head_num * size_per_head;
bias_r.v = *((copy_t *) self_K_bias + lane_id);
for(int ite = warp_id; ite < step; ite += warp_num)
{
key_val_r.v = *((copy_t *)&key_cache[ite * offset] + lane_id);
//for the last step, we should update K + bias_K to the cache
if(ite == step - 1)
{
for (int i = 0; i < elems_per_thread; i++)
{
key_val_r.x[i] = (float)key_buf_r.x[i] + (float)bias_r.x[i];
}
*((copy_t *)&key_cache[ite * offset] + lane_id) = key_val_r.v;
}
float val = 0.f;
for (int i = 0; i < elems_per_thread; i++)
{
val = val + (float)key_val_r.x[i] * qb_r[i] * (float)scalar;
}
float qk = cub::WarpReduce<float>(temp_storage[warp_id]).Sum(val);
if (lane_id == 0)
{
logits[ite] = qk;
}
}
__syncthreads();
__shared__ float s_max_val, s_sum;
float local_i = -1e20f;
for(int i = tid; i < step; i += blockDim.x)
local_i = max(local_i, logits[i]);
float max_val = MaxValBlockReduce(max_val_block_temp_storage).Reduce(local_i, cub::Max());
if(tid == 0)
s_max_val = max_val;
__syncthreads();
float local_o = 0.0f;
for(int i = tid; i < step; i += blockDim.x)
{
logits[i] = __expf(logits[i] - s_max_val);
local_o += logits[i];
}
float val = BlockReduce(block_temp_storage).Sum(local_o);
if(tid == 0)
s_sum = val + 1e-6;
__syncthreads();
float s_sum_inverse = __fdividef(1.0f, s_sum);
for(int i = tid; i < step; i += blockDim.x)
{
logits[i] = logits[i] * s_sum_inverse;
}
__syncthreads();
// This optimization introduces discrepancy because of different order in FP32 summation
float sum_r[elems_per_thread] = {0.f};
bias_r.v = *((copy_t *) self_V_bias + lane_id);
value_buf_r.v = *((copy_t *)value_buf + lane_id);
for(int ite = warp_id; ite < step; ite += warp_num)
{
value_val_r.v = *((copy_t *)&value_cache[ite * offset] + lane_id);
//for the last step, we should update K + bias_K to the cache
if(ite == step - 1)
{
for (int i = 0; i < elems_per_thread; i++)
{
value_val_r.x[i] = (float)value_buf_r.x[i] + (float)bias_r.x[i];
}
*((copy_t *)&value_cache[ite * offset] + lane_id) = value_val_r.v;
}
for (int i = 0; i < elems_per_thread; ++i)
{
sum_r[i] += (float)value_val_r.x[i] * logits[ite];
}
}
for (int i = 0; i < elems_per_thread; i++)
{
sq[warp_id * WARP_SIZE + lane_id].x[i] = sum_r[i];
}
__syncthreads();
if (warp_id == 0)
{
#pragma unroll
for (int j = 1; j < warp_num; j++)
{
for (int i = 0; i < elems_per_thread; ++i)
{
sum_r[i] = sum_r[i] + (float)sq[j * WARP_SIZE + tid].x[i];
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < elems_per_thread; i++)
{
value_val_r.x[i] = sum_r[i];
}
if (warp_id == 0)
{
*((copy_t *)context_buf + lane_id) = value_val_r.v;
}
}
// only use for compile
template <int size_per_head, int block_sz>
__global__
void masked_attention_kernel_opt_half2(
float* __restrict key_buf, float* __restrict value_buf,
float* __restrict query_buf, const float* __restrict self_Q_bias,
float* __restrict key_cache, const float* __restrict self_K_bias,
float* __restrict value_cache, const float* __restrict self_V_bias,
float* __restrict context_buf, int batch_size, int head_num, const int step, const float scalar) {}
template <int size_per_head, int block_sz>
__global__
void masked_attention_kernel_opt_half2(
half* __restrict key_buf, half* __restrict value_buf,
half* __restrict query_buf, const half* __restrict self_Q_bias,
half* __restrict key_cache, const half* __restrict self_K_bias,
half* __restrict value_cache, const half* __restrict self_V_bias,
half* __restrict context_buf, int batch_size, int head_num, const int step, const half scalar)
{
half2* key_buf_ptr = (half2*)key_buf;
half2* value_buf_ptr = (half2*)value_buf;
half2* query_buf_ptr = (half2*)query_buf;
half2* key_cache_ptr = (half2*)key_cache;
half2* value_cache_ptr = (half2*)value_cache;
const half2* self_Q_bias_ptr = (const half2*)self_Q_bias;
const half2* self_K_bias_ptr = (const half2*)self_K_bias;
const half2* self_V_bias_ptr = (const half2*)self_V_bias;
half2* context_buf_ptr = (half2*)context_buf;
typedef Copy_t<half2, size_per_head/2> copy_t;
const int elems_per_thread = size_per_head / 2 / WARP_SIZE;
union Access_t
{
copy_t v;
half2 x[elems_per_thread]; // supported size 1,2,4
};
typedef struct Half_n_t
{
half2 x[elems_per_thread]; // supported size 1,2,4
} half_n_t;
__shared__ half_n_t sq[block_sz];
extern __shared__ float logits[]; // use to store the logits from [0~step]
const int tid = threadIdx.x;
const int warp_num = block_sz / WARP_SIZE;
const int bid = blockIdx.x;
const int head_id = blockIdx.x % head_num;
const int warp_id = tid / WARP_SIZE; // warp_id in block
const int lane_id = tid % WARP_SIZE; // lane_id in warp
typedef cub::BlockReduce<float, block_sz> MaxValBlockReduce;
typedef cub::BlockReduce<float, block_sz> BlockReduce;
__shared__ typename MaxValBlockReduce::TempStorage max_val_block_temp_storage;
__shared__ typename BlockReduce::TempStorage block_temp_storage;
__shared__ typename cub::WarpReduce<float>::TempStorage temp_storage[warp_num];
int qkv_id = bid * size_per_head / 2;
int qkv_bias_id = head_id * size_per_head / 2;
query_buf_ptr = &query_buf_ptr[qkv_id];
key_buf_ptr = &key_buf_ptr[qkv_id];
value_buf_ptr = &value_buf_ptr[qkv_id];
self_K_bias_ptr = &self_K_bias_ptr[qkv_bias_id];
key_cache_ptr = &key_cache_ptr[qkv_id];
self_Q_bias_ptr = &self_Q_bias_ptr[qkv_bias_id];
self_V_bias_ptr = &self_V_bias_ptr[qkv_bias_id];
value_cache_ptr = &value_cache_ptr[qkv_id];
context_buf_ptr = &context_buf_ptr[qkv_id];
Access_t bias_r, query_buf_r;
Access_t key_val_r, key_buf_r;
Access_t value_val_r, value_buf_r;
// each warp will have its own copy of sq
query_buf_r.v = *((copy_t *)query_buf_ptr + lane_id);
key_buf_r.v = *((copy_t *)key_buf_ptr + lane_id);
bias_r.v = *((copy_t *)self_Q_bias_ptr + lane_id);
half2 qb_r[elems_per_thread];
for (int i = 0; i < elems_per_thread; ++i)
{
qb_r[i] = __hadd2(query_buf_r.x[i], bias_r.x[i]);
}
//offset for each step
int offset = batch_size * head_num * size_per_head / 2;
bias_r.v = *((copy_t *) self_K_bias + lane_id);
for(int ite = warp_id; ite < step; ite += warp_num)
{
key_val_r.v = *((copy_t *)&key_cache_ptr[ite * offset] + lane_id);
//for the last step, we should update K + bias_K to the cache
if(ite == step - 1)
{
for (int i = 0; i < elems_per_thread; i++)
{
key_val_r.x[i] = __hadd2(key_buf_r.x[i], bias_r.x[i]);
}
*((copy_t *)&key_cache_ptr[ite * offset] + lane_id) = key_val_r.v;
}
float val = 0.f;
for (int i = 0; i < elems_per_thread; i++)
{
half2 val2 = __hmul2(key_val_r.x[i], qb_r[i]);
val = val + (float)((val2.x + val2.y) * scalar);
}
float qk = cub::WarpReduce<float>(temp_storage[warp_id]).Sum(val);
if (lane_id == 0)
{
logits[ite] = qk;
}
}
__syncthreads();
__shared__ float s_max_val, s_sum;
float local_i = -1e20f;
for(int i = tid; i < step; i += blockDim.x)
local_i = max(local_i, logits[i]);
float max_val = MaxValBlockReduce(max_val_block_temp_storage).Reduce(local_i, cub::Max());
if(tid == 0)
s_max_val = max_val;
__syncthreads();
float local_o = 0.0f;
for(int i = tid; i < step; i += blockDim.x)
{
logits[i] = __expf(logits[i] - s_max_val);
local_o += logits[i];
}
float val = BlockReduce(block_temp_storage).Sum(local_o);
if(tid == 0)
s_sum = val + 1e-6;
__syncthreads();
float s_sum_inverse = __fdividef(1.0f, s_sum);
for(int i = tid; i < step; i += blockDim.x)
{
logits[i] = logits[i] * s_sum_inverse;
}
__syncthreads();
// This optimization introduces discrepancy because of different order in FP32 summation
half2 sum_r[elems_per_thread];
for(int i = 0; i < elems_per_thread; i++)
{
sum_r[i].x = (half)0.f;
sum_r[i].y = (half)0.f;
}
bias_r.v = *((copy_t *) self_V_bias_ptr + lane_id);
value_buf_r.v = *((copy_t *)value_buf_ptr + lane_id);
for(int ite = warp_id; ite < step; ite += warp_num)
{
value_val_r.v = *((copy_t *)&value_cache_ptr[ite * offset] + lane_id);
//for the last step, we should update K + bias_K to the cache
if(ite == step - 1)
{
for (int i = 0; i < elems_per_thread; i++)
{
value_val_r.x[i] = __hadd2(value_buf_r.x[i], bias_r.x[i]);
}
*((copy_t *)&value_cache_ptr[ite * offset] + lane_id) = value_val_r.v;
}
for (int i = 0; i < elems_per_thread; ++i)
{
half2 logit2_val;
logit2_val.x = (half)logits[ite];
logit2_val.y = (half)logits[ite];
sum_r[i] = __hadd2(sum_r[i], __hmul2(value_val_r.x[i], logit2_val));
}
}
for (int i = 0; i < elems_per_thread; i++)
{
sq[warp_id * WARP_SIZE + lane_id].x[i] = sum_r[i];
}
__syncthreads();
if (warp_id == 0)
{
#pragma unroll
for (int j = 1; j < warp_num; j++)
{
for (int i = 0; i < elems_per_thread; ++i)
{
sum_r[i] = __hadd2(sum_r[i], sq[j * WARP_SIZE + tid].x[i]);
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < elems_per_thread; i++)
{
value_val_r.x[i] = sum_r[i];
}
if (warp_id == 0)
{
*((copy_t *)context_buf_ptr + lane_id) = value_val_r.v;
}
}
template <typename T>
__global__
void masked_attention_kernel(
T* key_buf, T* value_buf,
T* query_buf, const T* self_Q_bias,
T* key_cache, const T* self_K_bias, T* value_cache, const T* self_V_bias,
T* context_buf, int batch_size, int head_num, int size_per_head, const int step, const T scalar)
{
extern __shared__ __align__(sizeof(T)) unsigned s_buf[];
T* sq = reinterpret_cast<T *>(s_buf);
T* logits = reinterpret_cast<T *>(&sq[size_per_head]);
int tid = threadIdx.x;
int bid = blockIdx.x / head_num;
int head_id = blockIdx.x % head_num;
int qkv_id = bid * head_num * size_per_head + head_id * size_per_head + tid;
int qkv_bias_id = head_id * size_per_head + tid;
if(tid < size_per_head)
sq[tid] = query_buf[qkv_id] + self_Q_bias[qkv_bias_id];
__syncthreads();
//offset for each step
int offset = batch_size * head_num * size_per_head;
for(int ite = 0; ite < step; ++ite)
{
T key = tid < size_per_head ? key_cache[ite * offset + qkv_id] : (T)0.0f;
//for the last step, we should update K + bias_K to the cache
if(ite == step - 1 && tid < size_per_head)
{
key = key_buf[qkv_id] + self_K_bias[qkv_bias_id];
key_cache[ite * offset + qkv_id] = key;
}
T val = (tid < size_per_head) ? key * sq[tid] * scalar : (T)(0.0f);
T qk = blockReduceSum(val);
if(threadIdx.x == 0)
logits[ite] = qk;
__syncthreads(); //try to remove
}
__syncthreads(); //try to remove
__shared__ float s_max_val, s_sum;
float local_i = tid < step ? (float)logits[tid] : -1e20f;
float max_val = blockReduceMax<float>(local_i);
if(tid == 0)
s_max_val = max_val;
__syncthreads();
local_i -= s_max_val;
float local_o = tid < step ? __expf(local_i) : 0.0f;
float val = blockReduceSum<float>(local_o);
if(tid == 0)
s_sum = val + 1e-6;
__syncthreads();
if(tid < step)
logits[tid] = local_o / s_sum;
__syncthreads();
if(tid < size_per_head)
{
T sum = (T)0.0f;
for(int ite = 0; ite < step; ++ite)
{
T value = value_cache[ite * offset + qkv_id];
//for the last step, we should update K + bias_K to the cache
if(ite == step - 1)
{
value = value_buf[qkv_id] + self_V_bias[qkv_bias_id];
value_cache[ite * offset + qkv_id] = value;
}
sum += value * logits[ite];
}
context_buf[qkv_id] = sum;
}
}
template <typename T>
__global__
void masked_attention_kernel_v2(T* query_buf, const T* self_Q_bias,
T* key_cache, const T* self_K_bias, T* value_cache, const T* self_V_bias,
T* context_buf, int batch_size, int head_num, int size_per_head, const int step, const T scalar)
{
extern __shared__ __align__(sizeof(T)) unsigned s_buf[];
T* sq = reinterpret_cast<T *>(s_buf);
T* logits = reinterpret_cast<T *>(&sq[size_per_head]);
int tid = threadIdx.x;
int bid = blockIdx.x / head_num;
int head_id = blockIdx.x % head_num;
int qkv_id = bid * head_num * size_per_head + head_id * size_per_head + tid;
int qkv_bias_id = head_id * size_per_head + tid;
if(tid < size_per_head)
sq[tid] = query_buf[qkv_id] + self_Q_bias[qkv_bias_id];
__syncthreads();
int warp_size = 32;
int offset = batch_size * head_num * size_per_head;
int warp_ite = size_per_head / warp_size;
T qk = (T)0.0f;
//each warp process one step
int step_id = threadIdx.x >> 5;
if(step_id < step)
{
for(int wite = 0; wite < warp_ite; ++wite)
{
T key = key_cache[step_id * offset + bid * head_num * size_per_head + head_id * size_per_head
+ tid % warp_size + wite * warp_size];
//for the last step, we should update K + bias_K to the cache
if(step_id == step - 1)
{
key += self_K_bias[bid * head_num * size_per_head + head_id * size_per_head +
tid % warp_size + wite * warp_size];
key_cache[step_id * offset + bid * head_num * size_per_head + head_id * size_per_head
+ tid % warp_size + wite * warp_size] = key;
}
qk += key * sq[tid % warp_size + wite * warp_size];
}
qk = warpReduceSum(qk * scalar);
if(threadIdx.x % warp_size == 0)
{
logits[step_id] = qk;
printf("step_id %d %f\n", step_id, qk);
}
}
__syncthreads();
__shared__ float s_max_val, s_sum;
float local_i = tid < step ? (float)logits[tid] : -1e20f;
float max_val = blockReduceMax<float>(local_i);
if(tid == 0)
s_max_val = max_val;
__syncthreads();
local_i -= s_max_val;
float local_o = tid < step ? __expf(local_i) : 0.0f;
float val = blockReduceSum<float>(local_o);
if(tid == 0)
s_sum = val;
__syncthreads();
if(tid < step)
logits[tid] = local_o / s_sum;
__syncthreads();
if(tid < size_per_head)
{
T sum = (T)0.0f;
for(int ite = 0; ite < step; ++ite)
{
T value = value_cache[ite * offset + qkv_id];
//for the last step, we should update K + bias_K to the cache
if(ite == step - 1)
{
value += self_V_bias[qkv_bias_id];
value_cache[ite * offset + qkv_id] = value;
}
sum += value * logits[ite];
}
context_buf[qkv_id] = sum;
}
}
template <typename T>
void masked_attention_dispatch(
T* key_buf, T* value_buf,
T* query_buf, const T* self_Q_bias,
T* key_cache, const T* self_K_bias, T* value_cache, const T* self_V_bias,
T* context_buf, int batch_size, int head_num, int size_per_head, const int step, cudaStream_t stream)
{
const int block_sz = ATTENTION_BLOCK_SIZE;
T scalar = (T)(1.f / sqrtf(size_per_head * 1.0f));
dim3 grid(batch_size * head_num);
int cond = size_per_head * ((ATTENION_OPT)? 1:0);
switch (cond)
{
case 32:
masked_attention_kernel_opt<32, block_sz, T><<<grid, block_sz, sizeof(float)*step, stream>>>(
key_buf, value_buf,
query_buf, self_Q_bias, key_cache, self_K_bias, value_cache, self_V_bias, context_buf,
batch_size, head_num, step, scalar);
break;
case 64:
if(sizeof(T) == 2)
masked_attention_kernel_opt_half2<64, block_sz><<<grid, block_sz, sizeof(float)*step, stream>>>(
key_buf, value_buf,
query_buf, self_Q_bias, key_cache, self_K_bias, value_cache, self_V_bias, context_buf,
batch_size, head_num, step, scalar);
else
masked_attention_kernel_opt<64, block_sz, T><<<grid, block_sz, sizeof(float)*step, stream>>>(
key_buf, value_buf,
query_buf, self_Q_bias,
key_cache, self_K_bias,
value_cache, self_V_bias,
context_buf,
batch_size, head_num, step, scalar);
break;
case 128:
if(sizeof(T) == 2)
masked_attention_kernel_opt_half2<128, block_sz><<<grid, block_sz, sizeof(float)*step, stream>>>(
key_buf, value_buf,
query_buf, self_Q_bias, key_cache, self_K_bias, value_cache, self_V_bias, context_buf,
batch_size, head_num, step, scalar);
else
masked_attention_kernel_opt<128, block_sz, T><<<grid, block_sz, sizeof(float)*step, stream>>>(
key_buf, value_buf,
query_buf, self_Q_bias, key_cache, self_K_bias, value_cache, self_V_bias, context_buf,
batch_size, head_num, step, scalar);
break;
default:
// default path
int block_size = 128;
//suppose size_per_head <= 128
if(step <= 64)
block_size = 64;
else if(step <= 128 && step > size_per_head)
block_size = 128;
else if(step > 128 && step <= 256)
block_size = 256;
else if(step > 256 && step <= 512)
block_size = 512;
else
block_size = 1024;
if((int)block_size < size_per_head)
block_size = size_per_head;
assert(block_size <= 1024);
dim3 block(block_size);
T scalar = 1 / sqrtf(size_per_head * 1.0f);
int shared_size = sizeof(T) * (size_per_head + step);
masked_attention_kernel<T><<<grid, block, shared_size, stream>>>(
key_buf, value_buf,
query_buf, self_Q_bias,
key_cache, self_K_bias,
value_cache, self_V_bias,
context_buf, batch_size,
head_num, size_per_head, step, scalar);
}
}
template<OperationType OpType_>
void OpenDecoder<OpType_>::masked_multi_head_attention(
const DataType_* from_tensor,
DataType_* key_cache_,
DataType_* value_cache_,
DataType_* decoder_output,
const int step)
{
int m = batch_size_;
int n = hidden_units_;
int k = hidden_units_;
DataType_ alpha = (DataType_)1.0f, beta = (DataType_)0.0f;
if(is_fuse_QKV == true)
{
check_cuda_error(cublasGemmBatchedEx(param_.cublas_handle,
CUBLAS_OP_N, CUBLAS_OP_N,
n, m, k,
&alpha,
(const void* const*) qkv_kernel_, AType_, n,
(const void* const*) qkv_input_, BType_, k,
&beta,
(void* const*)qkv_buf_, CType_, n,
3,
computeType_,
static_cast<cublasGemmAlgo_t>(cublasAlgo_[4])));
}
else
{
key_buf_ = key_cache_ + (step - 1) * m * n;
value_buf_ = value_cache_ + (step - 1) * m * n;
check_cuda_error(cublasGemmEx(param_.cublas_handle,
CUBLAS_OP_N, CUBLAS_OP_N,
n, m, k,
&alpha,
param_.self_attention.query_weight.kernel , AType_, n,
from_tensor, BType_, k,
&beta,
query_buf_, CType_, n,
computeType_,
static_cast<cublasGemmAlgo_t>(cublasAlgo_[0])));
check_cuda_error(cublasGemmEx(param_.cublas_handle,
CUBLAS_OP_N, CUBLAS_OP_N,
n, m, k,
&alpha,
param_.self_attention.key_weight.kernel, AType_, n,
from_tensor, BType_, k,
&beta,
key_buf_, CType_, n,
computeType_,
static_cast<cublasGemmAlgo_t>(cublasAlgo_[0])));
check_cuda_error(cublasGemmEx(param_.cublas_handle,
CUBLAS_OP_N, CUBLAS_OP_N,
n, m, k,
&alpha,
param_.self_attention.value_weight.kernel, AType_, n,
from_tensor, BType_, k,
&beta,
value_buf_, CType_, n,
computeType_,
static_cast<cublasGemmAlgo_t>(cublasAlgo_[0])));
}
masked_attention_dispatch<DataType_>(
key_buf_, value_buf_,
query_buf_, param_.self_attention.query_weight.bias,
key_cache_, param_.self_attention.key_weight.bias,
value_cache_, param_.self_attention.value_weight.bias,
context_buf_, batch_size_,
head_num_, size_per_head_, step, param_.stream);
check_cuda_error(cublasGemmEx(param_.cublas_handle,
CUBLAS_OP_N, CUBLAS_OP_N,
n, m, k,
&alpha,
param_.self_attention.attention_output_weight.kernel, AType_, n,
context_buf_, BType_, k,
&beta,
decoder_output, CType_, n,
computeType_,
static_cast<cublasGemmAlgo_t>(cublasAlgo_[0])));
}
template <typename T, int size_per_head, int block_sz>
__global__
void cross_attention_kernel_opt(
T* __restrict query_buf, const T* __restrict Q_bias,
T* __restrict key_cache, const T* __restrict K_bias,
T* __restrict value_cache, const T* __restrict V_bias,
const int* length_per_sample, T* __restrict context_buf,
int batch_size, int head_num, const int step, const int seq_len, const float scalar)
{
typedef Copy_t<T, size_per_head> copy_t;
const int elems_per_thread = size_per_head / WARP_SIZE;
union Access_t
{
copy_t v;
T x[elems_per_thread]; // supported size 1,2,4
};
typedef struct Float_n_t
{
float x[elems_per_thread]; // supported size 1,2,4
} float_n_t;
__shared__ float_n_t sq[block_sz];
extern __shared__ float logits[]; // use to store the logits from [0~step]
const int warp_id = threadIdx.x / WARP_SIZE;
const int warp_num = block_sz / WARP_SIZE;
typedef cub::BlockReduce<float, block_sz> MaxValBlockReduce;
typedef cub::BlockReduce<float, block_sz> BlockReduce;
__shared__ typename MaxValBlockReduce::TempStorage max_val_block_temp_storage;
__shared__ typename BlockReduce::TempStorage block_temp_storage;
__shared__ typename cub::WarpReduce<float>::TempStorage temp_storage[warp_num];
const int tid = threadIdx.x;
const int bid = blockIdx.x / head_num;
const int head_id = blockIdx.x % head_num;
int length = __ldg(&length_per_sample[bid]);
const int lane_id = tid % WARP_SIZE;
int qkv_id = bid * head_num * size_per_head + head_id * size_per_head;
int qkv_bias_id = head_id * size_per_head;
int key_value_id = bid * (seq_len * head_num * size_per_head) +
+ head_id * size_per_head;
query_buf = &query_buf[qkv_id];
K_bias = &K_bias[qkv_bias_id];
key_cache = &key_cache[key_value_id];
Q_bias = &Q_bias[qkv_bias_id];
V_bias = &V_bias[qkv_bias_id];
value_cache = &value_cache[key_value_id];
context_buf = &context_buf[qkv_id];
Access_t bias_r, key_val_r, query_buf_r;
// each warp will have its own copy of sq
query_buf_r.v = *((copy_t *)query_buf + lane_id);
bias_r.v = *((copy_t *)Q_bias + lane_id);
float qb_r[elems_per_thread];
for (int i = 0; i < elems_per_thread; ++i)
{
qb_r[i] = (float)query_buf_r.x[i] + (float)bias_r.x[i];
}
//offset for each step
int offset = head_num * size_per_head;
bias_r.v = *((copy_t *) K_bias + lane_id);
for(int ite = warp_id; ite < length; ite += warp_num)
{
key_val_r.v = *((copy_t *)&key_cache[ite * offset] + lane_id);
//For the first step, we should add bias to key memory cache.
//The KV memory cache only need to be updated at the first step.
if (step == 1)
{
for (int i = 0; i < elems_per_thread; i++)
{
key_val_r.x[i] = (float)key_val_r.x[i] + (float)bias_r.x[i];
}
*((copy_t *)&key_cache[ite * offset] + lane_id) = key_val_r.v;
}
float val = 0.f;
for (int i = 0; i < elems_per_thread; i++)
{
val = val + (float)key_val_r.x[i] * qb_r[i] * scalar;
}
float qk = cub::WarpReduce<float>(temp_storage[warp_id]).Sum(val);
if (lane_id == 0)
{
logits[ite] = qk;
}
}
__syncthreads();
__shared__ float s_max_val, s_sum;
float local_i = -1e20f;
for(int i = tid; i < length; i += blockDim.x)
local_i = max(local_i, logits[i]);
float max_val = MaxValBlockReduce(max_val_block_temp_storage).Reduce(local_i, cub::Max());
if(tid == 0)
s_max_val = max_val;
__syncthreads();
float local_o = 0.0f;
for(int i = tid; i < length; i += blockDim.x)
{
logits[i] = __expf(logits[i] - s_max_val);
local_o += logits[i];
}
float val = BlockReduce(block_temp_storage).Sum(local_o);
if(tid == 0)
s_sum = val + 1e-6;
__syncthreads();
float s_sum_inverse = __fdividef(1.0f, s_sum);
for(int i = tid; i < length; i += blockDim.x)
{
logits[i] = logits[i] * s_sum_inverse;
}
__syncthreads();
// This optimization introduces discrepancy because of different order in FP32 summation
float sum_r[elems_per_thread] = {0.f};
bias_r.v = *((copy_t *) V_bias + lane_id);
for(int ite = warp_id; ite < length; ite += warp_num)
{
key_val_r.v = *((copy_t *)&value_cache[ite * offset] + lane_id);
//For the first step, we should add bias to key memory cache.
if(step == 1)
{
for (int i = 0; i < elems_per_thread; i++)
{
key_val_r.x[i] = (float)key_val_r.x[i] + (float)bias_r.x[i];
}
*((copy_t *)&value_cache[ite * offset] + lane_id) = key_val_r.v;
}
for (int i = 0; i < elems_per_thread; ++i)
{
sum_r[i] += (float)key_val_r.x[i] * logits[ite];
}
}
for (int i = 0; i < elems_per_thread; i++)
{
sq[warp_id * WARP_SIZE + lane_id].x[i] = sum_r[i];
}
__syncthreads();
if (threadIdx.x < WARP_SIZE)
{
#pragma unroll
for (int j = 1; j < warp_num; j++)
{
for (int i = 0; i < elems_per_thread; ++i)
{
sum_r[i] = sum_r[i] + (float)sq[j * WARP_SIZE + threadIdx.x].x[i];
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < elems_per_thread; i++)
{
key_val_r.x[i] = sum_r[i];
}
if (threadIdx.x < WARP_SIZE)
{
*((copy_t *)context_buf + lane_id) = key_val_r.v;
}
}
template<typename T>
__global__
void cross_attention_kernel(
T* query_buf, const T* Q_bias,
T* key_cache, const T* K_bias,
T* value_cache, const T* V_bias,
const int* length_per_sample, T* context_buf,
int batch_size, int head_num, int size_per_head, int step, const int seq_len, const T scalar)
{
int tid = threadIdx.x;
int bid = blockIdx.x / head_num;
int head_id = blockIdx.x % head_num;
extern __shared__ __align__(sizeof(T)) unsigned s_buf[];
T* sq = reinterpret_cast<T *>(s_buf);
T* logits = reinterpret_cast<T *>(&sq[size_per_head]);
int length = __ldg(&length_per_sample[bid]);
int qkv_id = bid * head_num * size_per_head + head_id * size_per_head + tid;
int qkv_bias_id = head_id * size_per_head + tid;
if(tid < size_per_head)
sq[tid] = query_buf[qkv_id] + Q_bias[qkv_bias_id];
__syncthreads();
for(int ite = 0; ite < length; ++ite)
{
int key_id = bid * (seq_len * head_num * size_per_head) + ite * (head_num * size_per_head)
+ head_id * size_per_head + tid;
T key = tid < size_per_head ? key_cache[key_id] : (T)(0.0f);
//For the first step, we should add bias to key memory cache.
//The KV memory cache only need to be updated at the first step.
if(step == 1 && tid < size_per_head)
{
key += K_bias[head_id * size_per_head + tid];
key_cache[key_id] = key;
}
T val = (tid < size_per_head) ? key * sq[tid] * scalar : (T)(0.0f);
T qk = blockReduceSum(val);
if(threadIdx.x == 0)
logits[ite] = qk;
__syncthreads(); //try to remove
}
__syncthreads();
__shared__ float s_max_val, s_sum;
float local_i = tid < length ? (float)logits[tid] : -1e20f;
float max_val = blockReduceMax<float>(local_i);
if(tid == 0)
s_max_val = max_val;
__syncthreads();
local_i -= s_max_val;
float local_o = tid < length ? __expf(local_i) : 0.0f;
float val = blockReduceSum<float>(local_o);
if(tid == 0)
s_sum = val + 1e-6;
__syncthreads();
if(tid < length)
logits[tid] = local_o / s_sum;
__syncthreads();
if(tid < size_per_head)
{
T sum = (T)0.0f;
for(int ite = 0; ite < length; ++ite)
{
int value_id = bid * seq_len * head_num * size_per_head + ite * head_num * size_per_head
+ head_id * size_per_head + tid;
T value = value_cache[value_id];
//for the first step, we should add bias to key memory cache
if(step == 1)
{
value += V_bias[head_id * size_per_head + tid];
value_cache[value_id] = value;
}
sum += value * logits[ite];
}
context_buf[bid * head_num * size_per_head + head_id * size_per_head + tid] = sum;
}
}
template <typename T>
void cross_attention_dispatch(T* query_buf, const T* Q_bias,
T* key_cache, const T* K_bias, T* value_cache, const T* V_bias, const int* length,
T* context_buf, int batch_size, int head_num, int size_per_head, int step, int seq_len, cudaStream_t stream)
{
const int block_sz = ATTENTION_BLOCK_SIZE;
float scalar = 1.f / sqrtf(size_per_head * 1.0f);
dim3 grid(batch_size * head_num);
int cond = size_per_head * ((ATTENION_OPT)? 1:0);
switch (cond)
{
case 32:
cross_attention_kernel_opt<T, 32, block_sz><<<grid, block_sz, sizeof(float)*seq_len, stream>>>(
query_buf, Q_bias, key_cache, K_bias, value_cache, V_bias, length, context_buf,
batch_size, head_num, step, seq_len, scalar);
break;
case 64:
cross_attention_kernel_opt<T, 64, block_sz><<<grid, block_sz, sizeof(float)*seq_len, stream>>>(
query_buf, Q_bias, key_cache, K_bias, value_cache, V_bias, length, context_buf,
batch_size, head_num, step, seq_len, scalar);
break;
case 128:
cross_attention_kernel_opt<T, 128, block_sz><<<grid, block_sz, sizeof(float)*seq_len, stream>>>(
query_buf, Q_bias, key_cache, K_bias, value_cache, V_bias, length, context_buf,
batch_size, head_num, step, seq_len, scalar);
break;
default:
// default path
int block_size = 128;
if(seq_len <= 64)
block_size = 64;
else if(seq_len <= 128 && seq_len > size_per_head)
block_size = 128;
else if(seq_len > 128 && seq_len <= 256)
block_size = 256;
else if(seq_len > 256 && seq_len <= 512)
block_size = 512;
else
block_size = 1024;
if(block_size < size_per_head)
block_size = size_per_head;
assert(block_size <= 1024);
dim3 block(block_size);
int shared_size = sizeof(T) * (size_per_head + seq_len);
cross_attention_kernel<T><<<grid, block, shared_size, stream>>>(
query_buf, Q_bias,
key_cache, K_bias,
value_cache, V_bias,
length, context_buf,
batch_size,
head_num, size_per_head, step, seq_len, scalar);
}
}
/* attention with source sentence */
template<OperationType OpType_>
void OpenDecoder<OpType_>::cross_multi_head_attention(
const DataType_* from_tensor,
const DataType_* memory_tensor,
DataType_* key_mem_cache,
DataType_* value_mem_cache,
DataType_* decoder_output,
const int* length,
const int seq_len,
const int step)
{
int m = batch_size_;
int n = hidden_units_;
int k = hidden_units_;
DataType_ alpha = (DataType_)1.0f, beta = (DataType_)0.0f;
//reuse the query_buf
check_cuda_error(cublasGemmEx(param_.cublas_handle,
CUBLAS_OP_N, CUBLAS_OP_N,
n, m, k,
&alpha,
param_.cross_attention.query_weight.kernel, AType_, n,
from_tensor, BType_, k,
&beta,
query_buf_, CType_, n,
computeType_,
static_cast<cublasGemmAlgo_t>(cublasAlgo_[0])));
if(step == 1)
{
m *= seq_len;
k = memory_hidden_units_;
check_cuda_error(cublasGemmEx(param_.cublas_handle,
CUBLAS_OP_N, CUBLAS_OP_N,
n, m, k,
&alpha,
param_.cross_attention.key_weight.kernel, AType_, n,
memory_tensor, BType_, k,
&beta,
key_mem_cache, CType_, n,
computeType_,
static_cast<cublasGemmAlgo_t>(cublasAlgo_[1])));
check_cuda_error(cublasGemmEx(param_.cublas_handle,
CUBLAS_OP_N, CUBLAS_OP_N,
n, m, k,
&alpha,
param_.cross_attention.value_weight.kernel, AType_, n,
memory_tensor, BType_, k,
&beta,
value_mem_cache, CType_, n,
computeType_,
static_cast<cublasGemmAlgo_t>(cublasAlgo_[1])));
k = hidden_units_;
}
cross_attention_dispatch<DataType_>(
query_buf_, param_.cross_attention.query_weight.bias,
key_mem_cache, param_.cross_attention.key_weight.bias,
value_mem_cache, param_.cross_attention.value_weight.bias,
length, context_buf_, batch_size_,
head_num_, size_per_head_, step, seq_len, param_.stream);
m = batch_size_;
n = head_num_ * size_per_head_;
k = n;
check_cuda_error(cublasGemmEx(param_.cublas_handle,
CUBLAS_OP_N, CUBLAS_OP_N,
n, m, k,
&alpha,
param_.cross_attention.attention_output_weight.kernel, AType_, n,
context_buf_, BType_, k,
&beta,
decoder_output, CType_, n,
computeType_,
static_cast<cublasGemmAlgo_t>(cublasAlgo_[0])));
}
template <typename T>
__global__
void decoder_norm1_kernel_generalize(const T* __restrict input,
const T* __restrict gamma,
const T* __restrict beta,
T* output,
int m, int n)
{
const int tid = threadIdx.x;
__shared__ float s_mean;
__shared__ float s_variance;
float mean = 0.0f;
float variance = 0.0f;
float local_sum = 0.0f;
for(int i = tid; i < n; i+= blockDim.x)
{
local_sum += (float)(__ldg(&input[blockIdx.x * n + i]));
}
mean = blockReduceSum<float>(local_sum);
if(threadIdx.x == 0)
s_mean = mean / n;
__syncthreads();
float local_var_sum = 0.0f;
for(int i = tid; i < n; i+= blockDim.x)
{
float diff = (float)(__ldg(&input[blockIdx.x * n + i])) - s_mean;
local_var_sum += diff * diff;
}
variance = blockReduceSum<float>(local_var_sum);
if(threadIdx.x == 0)
s_variance = rsqrtf(variance / n + 1e-6);
__syncthreads();
for(int i = tid; i < n; i+= blockDim.x)
{
output[blockIdx.x * n + i] =
(T)((( (float)input[blockIdx.x * n + i] - s_mean) * s_variance) * (float)(__ldg(&gamma[i])) + (float)(__ldg(&beta[i])));
}
}
template <typename T>
__global__
void decoder_norm1_kernel(const T* __restrict input,
const T* __restrict gamma,
const T* __restrict beta,
T* output,
int m, int n)
{
int tid = threadIdx.x;
__shared__ float s_mean;
__shared__ float s_variance;
float mean = 0.0f;
float variance = 0.0f;
float local_out = tid < n ? (float)(__ldg(&input[blockIdx.x * n + tid])) : 0.0f;
mean = blockReduceSum<float>(local_out);
if(threadIdx.x == 0)
s_mean = mean / n;
__syncthreads();
variance = blockReduceSum<float>(tid < n ? (local_out - s_mean) * (local_out - s_mean) : 0.0f);
if(threadIdx.x == 0)
s_variance = rsqrtf(variance / n + 1e-6);
__syncthreads();
if(tid < n)
output[blockIdx.x * n + tid] =
(T)(((local_out - s_mean) * s_variance) * (float)(__ldg(&gamma[tid])) + (float)(__ldg(&beta[tid])));
}
template <>
__global__
void decoder_norm1_kernel(const half* __restrict input,
const half* __restrict gamma,
const half* __restrict beta,
half* output,
int m, int n)
{
const int tid = threadIdx.x;
__shared__ float s_mean;
__shared__ float s_variance;
float mean = 0.0f;
float variance = 0.0f;
float2 local_out_fp2;
const half2* input_ptr = (const half2*)input;
const half2* gamma_ptr = (const half2*)gamma;
const half2* beta_ptr = (const half2*)beta;
half2* output_ptr = (half2*)output;
float local_out = 0.0f;
int id = blockIdx.x * blockDim.x + tid;
if(tid < blockDim.x)
{
local_out_fp2 = __half22float2(__ldg(&input_ptr[id]));
local_out += local_out_fp2.x;
local_out += local_out_fp2.y;
}
mean = blockReduceSum<float>(local_out);
if(tid == 0)
s_mean = mean / n;
__syncthreads();
variance = blockReduceSum<float>(tid < blockDim.x ?
(local_out_fp2.x - s_mean) * (local_out_fp2.x - s_mean) + (local_out_fp2.y - s_mean) * (local_out_fp2.y - s_mean)
: 0.0f);
if(tid == 0)
s_variance = rsqrtf(variance / n + 1e-6);
__syncthreads();
if(tid < blockDim.x)
{
float2 gamma_val = __half22float2(__ldg(&gamma_ptr[tid]));
float2 beta_val = __half22float2(__ldg(&beta_ptr[tid]));
local_out_fp2.x = (local_out_fp2.x - s_mean) * s_variance * gamma_val.x + beta_val.x;
local_out_fp2.y = (local_out_fp2.y - s_mean) * s_variance * gamma_val.y + beta_val.y;
output_ptr[id] = __float22half2_rn(local_out_fp2);
}
}
template <typename T>
__global__
void decoder_norm2_kernel_generalize(const T* __restrict input,
const T* __restrict gamma,
const T* __restrict beta,
const T* __restrict bias,
T* output, T* norm_output,
int m, int n)
{
int tid = threadIdx.x;
__shared__ float s_mean;
__shared__ float s_variance;
float mean = 0.0f;
float variance = 0.0f;
float local_sum = 0.0f;
for(int i = tid; i < n; i+= blockDim.x)
{
float local_out = (float)(__ldg(&input[blockIdx.x * n + i]));
local_out += (float)(output[blockIdx.x * n + i]);
local_out += (float)(__ldg(&bias[i]));
output[blockIdx.x * n + i] = (T)local_out;
local_sum += local_out;
}
mean = blockReduceSum<float>(local_sum);
if(threadIdx.x == 0)
s_mean = mean / n;
__syncthreads();
float local_var_sum = 0.0f;
for(int i = tid; i < n; i+= blockDim.x)
{
float diff = (float)(__ldg(&output[blockIdx.x * n + i])) - s_mean;
local_var_sum += diff * diff;
}
variance = blockReduceSum<float>(local_var_sum);
if(threadIdx.x == 0)
s_variance = rsqrtf(variance / n + 1e-6);
__syncthreads();
for(int i = tid; i < n; i+= blockDim.x)
{
norm_output[blockIdx.x * n + i] =
(T)((( (float)output[blockIdx.x * n + i] - s_mean) * s_variance) * (float)(__ldg(&gamma[i])) + (float)(__ldg(&beta[i])));
}
}
template <typename T>
__global__
void decoder_norm2_kernel(const T* __restrict input,
const T* __restrict gamma,
const T* __restrict beta,
const T* __restrict bias,
T* output, T* norm_output,
int m, int n)
{
int tid = threadIdx.x;
__shared__ float s_mean;
__shared__ float s_variance;
float mean = 0.0f;
float variance = 0.0f;
float local_out = 0.0f;
if(tid < n)
{
local_out = (float)(__ldg(&input[blockIdx.x * n + tid]));
local_out += (float)(output[blockIdx.x * n + tid]);
local_out += (float)(__ldg(&bias[tid]));
output[blockIdx.x * n + tid] = (T)local_out;
}
mean = blockReduceSum<float>(local_out);
if(threadIdx.x == 0)
s_mean = mean / n;
__syncthreads();
variance = blockReduceSum<float>(tid < n ? (local_out - s_mean) * (local_out - s_mean) : 0.0f);
if(threadIdx.x == 0)
s_variance = rsqrtf(variance / n + 1e-6);
__syncthreads();
if(tid < n)
norm_output[blockIdx.x * n + tid] =
(T)((local_out - s_mean) * s_variance * (float)(__ldg(&gamma[tid])) + (float)(__ldg(&beta[tid])));
}
template <>
__global__
void decoder_norm2_kernel(const half* __restrict input,
const half* __restrict gamma,
const half* __restrict beta,
const half* __restrict bias,
half* output, half* norm_output,
int m, int n)
{
const int tid = threadIdx.x;
__shared__ float s_mean;
__shared__ float s_variance;
float mean = 0.0f;
float variance = 0.0f;
float2 local_out_fp2;
const half2* input_ptr = (const half2*)input;
const half2* gamma_ptr = (const half2*)gamma;
const half2* beta_ptr = (const half2*)beta;
const half2* bias_ptr = (const half2*)bias;
half2* output_ptr = (half2*)output;
half2* norm_output_ptr = (half2*)norm_output;
float local_out = 0.0f;
int id = blockIdx.x * blockDim.x + tid;
if(tid < blockDim.x)
{
output_ptr[id] = __hadd2(__hadd2(output_ptr[id], __ldg(&input_ptr[id])), __ldg(&bias_ptr[tid]));
local_out_fp2 = __half22float2(output_ptr[id]);
local_out += local_out_fp2.x;
local_out += local_out_fp2.y;
}
mean = blockReduceSum<float>(local_out);
if(tid == 0)
s_mean = mean / n;
__syncthreads();
variance = blockReduceSum<float>(tid < blockDim.x ?
(local_out_fp2.x - s_mean) * (local_out_fp2.x - s_mean) + (local_out_fp2.y - s_mean) * (local_out_fp2.y - s_mean)
: 0.0f);
if(tid == 0)
s_variance = rsqrtf(variance / n + 1e-6);
__syncthreads();
if(tid < blockDim.x)
{
float2 gamma_val = __half22float2(__ldg(&gamma_ptr[tid]));
float2 beta_val = __half22float2(__ldg(&beta_ptr[tid]));
local_out_fp2.x = (local_out_fp2.x - s_mean) * s_variance * gamma_val.x + beta_val.x;
local_out_fp2.y = (local_out_fp2.y - s_mean) * s_variance * gamma_val.y + beta_val.y;
norm_output_ptr[id] = __float22half2_rn(local_out_fp2);
}
}
template<OperationType OpType_>
void OpenDecoder<OpType_>::decoder_norm1(
const DataType_* input,
const DataType_* gamma,
const DataType_* beta,
DataType_* output,
int m, int n)
{
dim3 grid(m);
dim3 block(min(n, 1024));
/* For general cases, n is equal to hidden_units, e.g., 512/1024.
Since we have warp shuffle inside the code, block.x % 32 should be 0.
*/
if(n % 32 != 0)
block.x = 1024;
block.x = block.x / (4 / sizeof(DataType_)); // if using half, only need half of block.x
/* should pay attention to the rsqrt precision*/
// assert(block.x <= 1024);
// decoder_norm1_kernel<DataType_><<<grid, block, 0, param_.stream>>>(input, gamma, beta, output, m, n);
decoder_norm1_kernel_generalize<DataType_><<<grid, block, 0, param_.stream>>>(input, gamma, beta, output, m, n); // For gpt-3
}
template<OperationType OpType_>
void OpenDecoder<OpType_>::decoder_norm2(
const DataType_* input,
const DataType_* gamma,
const DataType_* beta,
const DataType_* bias,
DataType_* output,
DataType_* norm_output,
int m, int n)
{
dim3 grid(m);
dim3 block(min(n, 1024));
/* For general cases, n is equal to hidden_units, e.g., 512/1024.
Since we have warp shuffle inside the code, block.x % 32 should be 0.
*/
if(n % 32 != 0)
block.x = 1024;
block.x = block.x / (4 / sizeof(DataType_)); // if using half, only need half of block.x
/* should pay attention to the rsqrt precision*/
// assert(block.x <= 1024);
// decoder_norm2_kernel<DataType_><<<grid, block, 0, param_.stream>>>(input, gamma, beta, bias, output, norm_output, m, n);
decoder_norm2_kernel_generalize<DataType_><<<grid, block, 0, param_.stream>>>(input, gamma, beta, bias, output, norm_output, m, n); // For gpt-3
}
template<OperationType OpType_>
void OpenDecoder<OpType_>::ffn(
const DataType_* input,
DataType_* ffn_inner,
DataType_* output,
const int m,
const int inner_size,
const int n,
ActivationType activation_type)
{
int m1 = m, k1 = n, n1 = inner_size;
DataType_ alpha = (DataType_)1.0f;
DataType_ beta = (DataType_)0.0f;
check_cuda_error(cublasGemmEx(param_.cublas_handle,
CUBLAS_OP_N, CUBLAS_OP_N,
n1, m1, k1,
&alpha,
param_.ffn.intermediate_weight.kernel, AType_, n1,
input, BType_, k1,
&beta,
ffn_inner, CType_, n1,
computeType_,
static_cast<cublasGemmAlgo_t>(cublasAlgo_[2])));
// dim3 grid(min(m1, 65536));
// dim3 block(min(n1 / 4, 1024));
// // TODO remove this limitation
// // assert(block.x <= 1024);
// if(activation_type == ActivationType::RELU)
// add_bias_relu<DataType_><<<grid, block, 0, param_.stream>>>(ffn_inner, param_.ffn.intermediate_weight.bias, m1, n1);
// else if(activation_type == ActivationType::GELU)
// add_bias_gelu<DataType_><<<grid, block, 0, param_.stream>>>(ffn_inner, param_.ffn.intermediate_weight.bias, m1, n1);
dim3 block(min((int)(n1 / 4 / (4 / sizeof(DataType_))), 1024));
dim3 grid(min(m1 * n1 / block.x, 65536));
if(activation_type == ActivationType::RELU)
add_bias_relu<DataType_><<<grid, block, 0, param_.stream>>>(ffn_inner, param_.ffn.intermediate_weight.bias, m1, n1 / (4 / sizeof(DataType_)));
else if(activation_type == ActivationType::GELU)
add_bias_gelu<DataType_><<<grid, block, 0, param_.stream>>>(ffn_inner, param_.ffn.intermediate_weight.bias, m1, n1 / (4 / sizeof(DataType_)));
int m2 = m, n2 = n, k2 = inner_size;
check_cuda_error(cublasGemmEx(param_.cublas_handle,
CUBLAS_OP_N, CUBLAS_OP_N,
n2, m2, k2,
&alpha,
param_.ffn.output_weight.kernel, AType_, n2,
ffn_inner, BType_, k2,
&beta,
output, CType_, n2,
computeType_,
static_cast<cublasGemmAlgo_t>(cublasAlgo_[3])));
}
template <typename T>
__global__
void add_bias_input_kernel(T* output, const T* input, const T* bias, const int m, const int n)
{
// original kernel, which only supports cases of n <= 1024.
int id = blockIdx.x * n + threadIdx.x;
output[id] = output[id] + input[id] + __ldg(&bias[threadIdx.x]);
}
template <typename T>
__global__
void add_bias_input_kernel_generalize(T* output, const T* input, const T* bias, const int m, const int n)
{
// TODO For GPT-3
// This kernel can run with any block size and grid size
// Since the hidden dimension of GPT-3 would be larger than 1024
const int bid = blockIdx.x;
const int blocks_per_row = n / blockDim.x;
const int col_index = (bid % blocks_per_row) * blockDim.x + threadIdx.x;
T bias_val = __ldg(&bias[col_index]);
for(int index = bid * blockDim.x + threadIdx.x; index < m * n; index += blockDim.x * gridDim.x)
{
output[index] = output[index] + input[index] + bias_val;
}
}
template<OperationType OpType_>
void OpenDecoder<OpType_>::add_bias_input(DataType_* output, const DataType_* input, const int m, const int n)
{
dim3 grid(min(m, 65536));
dim3 block(min(n, 1024));
add_bias_input_kernel_generalize<<<grid, block, 0, param_.stream>>>(output, input, param_.ffn.output_weight.bias, m, n);
}
template void OpenDecoder<OperationType::FP32>::masked_multi_head_attention(
const float* from_tensor,
float* key_cache,
float* value_cache,
float* decoder_output,
const int step);
template void OpenDecoder<OperationType::FP16>::masked_multi_head_attention(
const half* from_tensor,
half* key_cache,
half* value_cache,
half* decoder_output,
const int step);
template void OpenDecoder<OperationType::FP32>::cross_multi_head_attention(
const float* from_tensor,
const float* memory_tensor,
float* key_mem_cache,
float* value_mem_cache,
float* decoder_output,
const int* length,
const int max_seq_len,
const int step);
template void OpenDecoder<OperationType::FP16>::cross_multi_head_attention(
const half* from_tensor,
const half* memory_tensor,
half* key_mem_cache,
half* value_mem_cache,
half* decoder_output,
const int* length,
const int max_seq_len,
const int step);
template void OpenDecoder<OperationType::FP32>::ffn(
const float* input,
float* ffn_inner,
float* otuput,
const int m,
const int inner_size,
const int n,
ActivationType activation_type);
template void OpenDecoder<OperationType::FP16>::ffn(
const half* input,
half* ffn_inner,
half* otuput,
const int m,
const int inner_size,
const int n,
ActivationType activation_type);
template void OpenDecoder<OperationType::FP32>::decoder_norm1(
const float* input,
const float* gamma,
const float* beta,
float* output,
int m, int n);
template void OpenDecoder<OperationType::FP16>::decoder_norm1(
const half* input,
const half* gamma,
const half* beta,
half* output,
int m, int n);
template void OpenDecoder<OperationType::FP32>::decoder_norm2(
const float* input,
const float* gamma,
const float* beta,
const float* bias,
float* output,
float* norm_output,
int m, int n);
template void OpenDecoder<OperationType::FP16>::decoder_norm2(
const half* input,
const half* gamma,
const half* beta,
const half* bias,
half* output,
half* norm_output,
int m, int n);
template void OpenDecoder<OperationType::FP32>::add_bias_input(
float* output,
const float* input,
const int m,
const int n);
template void OpenDecoder<OperationType::FP16>::add_bias_input(
half* output,
const half* input,
const int m,
const int n);
}//namespace FasterTransformer
|
14119151f23e1b71d1c3c591e84a45cac3eee1e9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__device__ float softplus_kernel(float x, float threshold = 20) {
if (x > threshold) return x; // too large
else if (x < -threshold) return expf(x); // too small
return log1pf(expf(x));
//return logf(expf(x) + 1);
}
__global__ void gradient_array_mish_kernel(int n, float *activation_input_gpu, float *delta)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < n) {
const float MISH_THRESHOLD = 20.0f;
// implementation from TensorFlow: https://github.com/tensorflow/addons/blob/093cdfa85d334cbe19a37624c33198f3140109ed/tensorflow_addons/custom_ops/activations/cc/kernels/mish_op.h#L66-L80
// implementation from Pytorch: https://github.com/thomasbrandon/mish-cuda/blob/master/csrc/mish.h#L26-L31
// log1p(x) == log(x + 1)
const float inp = activation_input_gpu[i];
const float sp = softplus_kernel(inp, MISH_THRESHOLD);
const float grad_sp = -expm1f(-sp);
//const float grad_sp = 1 - expf(-sp);
const float tsp = tanh(sp);
const float grad_tsp = (1 - tsp*tsp) * grad_sp;
const float grad = inp * grad_tsp + tsp;
delta[i] *= grad;
//float x = activation_input[i];
//float d = 2 * expf(x) + expf(2 * x) + 2;
//float w = 4 * (x + 1) + 4 * expf(2 * x) + expf(3 * x) + expf(x)*(4 * x + 6);
//float derivative = expf(x) * w / (d * d);
//delta[i] *= derivative;
}
} | 14119151f23e1b71d1c3c591e84a45cac3eee1e9.cu | #include "includes.h"
__device__ float softplus_kernel(float x, float threshold = 20) {
if (x > threshold) return x; // too large
else if (x < -threshold) return expf(x); // too small
return log1pf(expf(x));
//return logf(expf(x) + 1);
}
__global__ void gradient_array_mish_kernel(int n, float *activation_input_gpu, float *delta)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < n) {
const float MISH_THRESHOLD = 20.0f;
// implementation from TensorFlow: https://github.com/tensorflow/addons/blob/093cdfa85d334cbe19a37624c33198f3140109ed/tensorflow_addons/custom_ops/activations/cc/kernels/mish_op.h#L66-L80
// implementation from Pytorch: https://github.com/thomasbrandon/mish-cuda/blob/master/csrc/mish.h#L26-L31
// log1p(x) == log(x + 1)
const float inp = activation_input_gpu[i];
const float sp = softplus_kernel(inp, MISH_THRESHOLD);
const float grad_sp = -expm1f(-sp);
//const float grad_sp = 1 - expf(-sp);
const float tsp = tanh(sp);
const float grad_tsp = (1 - tsp*tsp) * grad_sp;
const float grad = inp * grad_tsp + tsp;
delta[i] *= grad;
//float x = activation_input[i];
//float d = 2 * expf(x) + expf(2 * x) + 2;
//float w = 4 * (x + 1) + 4 * expf(2 * x) + expf(3 * x) + expf(x)*(4 * x + 6);
//float derivative = expf(x) * w / (d * d);
//delta[i] *= derivative;
}
} |
e0a2850d4fab0988e241acb3d995826c6da0be7e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include "CImg.h"
#include "timer.hpp"
using namespace cimg_library;
using ColorType = unsigned char;
const int BLUR_BLOCK_SIDE = 16;
__global__ void unweaveKernel(ColorType* src, ColorType* dst, int numPixels)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = 3 * i;
if (i < numPixels)
{
int stride = numPixels;
dst[i] = src[j];
dst[i + stride] = src[j + 1];
dst[i + 2 * stride] = src[j + 2];
}
}
__constant__ float c_mask[5];
__global__ void blurKernel(ColorType* src, ColorType* blurred, int width, int height, bool vertPass)
{
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
__shared__ ColorType cachedR[BLUR_BLOCK_SIDE*(BLUR_BLOCK_SIDE + 4)];
__shared__ ColorType cachedG[BLUR_BLOCK_SIDE*(BLUR_BLOCK_SIDE + 4)];
__shared__ ColorType cachedB[BLUR_BLOCK_SIDE*(BLUR_BLOCK_SIDE + 4)];
if (ix < width && iy < height)
{
int stride = width * height;
int tx = threadIdx.x;
int ty = threadIdx.y;
if (vertPass)
{
int index = 2 * blockDim.x + tx + ty * blockDim.x;
cachedR[index] = src[ix + iy * width];
cachedG[index] = src[ix + iy * width + stride];
cachedB[index] = src[ix + iy * width + 2*stride];
if (ty < 2 || ty >= blockDim.y - 2)
{
int haloIndex = tx + ty * blockDim.x;
int cy = iy;
if (ty < 2)
{
cy -= 2;
}
else if (ty >= blockDim.y - 2)
{
haloIndex += 4 * blockDim.x;
cy += 2;
}
cy = max(0, min(height - 1, cy));
cachedR[haloIndex] = src[ix + cy * width];
cachedG[haloIndex] = src[ix + cy * width + stride];
cachedB[haloIndex] = src[ix + cy * width + 2 * stride];
}
}
else
{
int index = tx + 2 + ty * (4 + blockDim.x);
cachedR[index] = src[ix + iy * width];
cachedG[index] = src[ix + iy * width + stride];
cachedB[index] = src[ix + iy * width + 2 * stride];
if (tx < 2 || tx >= blockDim.x - 2)
{
int haloIndex = index;
int cx = ix;
if (tx < 2)
{
haloIndex -= 2;
cx -= 2;
}
else
{
haloIndex += 2;
cx += 2;
}
cx = max(0, min(width - 1, cx));
cachedR[haloIndex] = src[cx + iy * width];
cachedG[haloIndex] = src[cx + iy * width + stride];
cachedB[haloIndex] = src[cx + iy * width + 2 * stride];
}
}
__syncthreads();
float sumR = 0.f;
float sumG = 0.f;
float sumB = 0.f;
for (int m = 0; m < 5; m++)
{
int index;
if (vertPass)
{
index = 2 * blockDim.x + tx + (ty + m - 2) * blockDim.x;
}
else
{
index = tx + m + ty * (4 + blockDim.x);
}
float currMask = c_mask[m];
sumR += currMask * cachedR[index];
sumG += currMask * cachedG[index];
sumB += currMask * cachedB[index];
}
int i = ix + iy * width;
blurred[i] = sumR;
blurred[i+stride] = sumG;
blurred[i+2*stride] = sumB;
}
}
int main()
{
Timer t;
const char* file = "cake.ppm";
CImg<ColorType> image(file);
int width = image.width();
int height = image.height();
// CImg already stores image data as RR...GG...BB...
// this function call changes the layout to RGBRGBRGB...
// but the image can't be displayed now. The later call to the same function
// restores the image to a displayable state.
// source: https://www.codefull.org/2014/11/cimg-does-not-store-pixels-in-the-interleaved-format/
image.permute_axes("cxyz");
t.restart();
float mask[5];
mask[0] = mask[4] = 1.f / 16.f;
mask[1] = mask[3] = 4.f / 16.f;
mask[2] = 6.f / 16.f;
size_t imgMemSize = sizeof(ColorType)*image.size();
size_t maskMemSize = sizeof(float) * 5;
ColorType* d_interleavedImg = nullptr;
ColorType* d_img = nullptr;
ColorType* d_blurImg = nullptr;
hipMalloc((void**)&d_interleavedImg, imgMemSize);
hipMalloc((void**)&d_img, imgMemSize);
hipMalloc((void**)&d_blurImg, imgMemSize);
hipMemcpy(d_interleavedImg, image.data(), imgMemSize, hipMemcpyHostToDevice);
hipMemcpyToSymbol(c_mask, mask, maskMemSize);
int size = image.size() / 3;
int unweaveBlockSize = 1024;
int unweaveNumBlocks = ::ceil(double(size) / unweaveBlockSize);
hipLaunchKernelGGL(( unweaveKernel), dim3(unweaveNumBlocks), dim3(unweaveBlockSize), 0, 0, d_interleavedImg, d_img, size);
image.permute_axes("yzcx");
//hipDeviceSynchronize();
dim3 blurBlockSize(BLUR_BLOCK_SIDE, BLUR_BLOCK_SIDE);
dim3 blurNumBlocks(::ceil(double(width) / blurBlockSize.x),
::ceil(double(height) / blurBlockSize.y));
hipLaunchKernelGGL(( blurKernel), dim3(blurNumBlocks), dim3(blurBlockSize), 0, 0, d_img, d_blurImg, width, height, true);
std::swap(d_img, d_blurImg);
hipLaunchKernelGGL(( blurKernel), dim3(blurNumBlocks), dim3(blurBlockSize), 0, 0, d_img, d_blurImg, width, height, false);
//hipDeviceSynchronize();
hipMemcpy(image.data(), d_blurImg, imgMemSize, hipMemcpyDeviceToHost);
std::cout << "elapsed: " << t.elapsed() << "\n";
/*
CImg<ColorType> original(file);
CImgDisplay display1(original, "original");
CImgDisplay display2(image, "blurred");
while (true)
{
display1.wait();
display2.wait();
}
*/
return 0;
} | e0a2850d4fab0988e241acb3d995826c6da0be7e.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include "CImg.h"
#include "timer.hpp"
using namespace cimg_library;
using ColorType = unsigned char;
const int BLUR_BLOCK_SIDE = 16;
__global__ void unweaveKernel(ColorType* src, ColorType* dst, int numPixels)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = 3 * i;
if (i < numPixels)
{
int stride = numPixels;
dst[i] = src[j];
dst[i + stride] = src[j + 1];
dst[i + 2 * stride] = src[j + 2];
}
}
__constant__ float c_mask[5];
__global__ void blurKernel(ColorType* src, ColorType* blurred, int width, int height, bool vertPass)
{
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
__shared__ ColorType cachedR[BLUR_BLOCK_SIDE*(BLUR_BLOCK_SIDE + 4)];
__shared__ ColorType cachedG[BLUR_BLOCK_SIDE*(BLUR_BLOCK_SIDE + 4)];
__shared__ ColorType cachedB[BLUR_BLOCK_SIDE*(BLUR_BLOCK_SIDE + 4)];
if (ix < width && iy < height)
{
int stride = width * height;
int tx = threadIdx.x;
int ty = threadIdx.y;
if (vertPass)
{
int index = 2 * blockDim.x + tx + ty * blockDim.x;
cachedR[index] = src[ix + iy * width];
cachedG[index] = src[ix + iy * width + stride];
cachedB[index] = src[ix + iy * width + 2*stride];
if (ty < 2 || ty >= blockDim.y - 2)
{
int haloIndex = tx + ty * blockDim.x;
int cy = iy;
if (ty < 2)
{
cy -= 2;
}
else if (ty >= blockDim.y - 2)
{
haloIndex += 4 * blockDim.x;
cy += 2;
}
cy = max(0, min(height - 1, cy));
cachedR[haloIndex] = src[ix + cy * width];
cachedG[haloIndex] = src[ix + cy * width + stride];
cachedB[haloIndex] = src[ix + cy * width + 2 * stride];
}
}
else
{
int index = tx + 2 + ty * (4 + blockDim.x);
cachedR[index] = src[ix + iy * width];
cachedG[index] = src[ix + iy * width + stride];
cachedB[index] = src[ix + iy * width + 2 * stride];
if (tx < 2 || tx >= blockDim.x - 2)
{
int haloIndex = index;
int cx = ix;
if (tx < 2)
{
haloIndex -= 2;
cx -= 2;
}
else
{
haloIndex += 2;
cx += 2;
}
cx = max(0, min(width - 1, cx));
cachedR[haloIndex] = src[cx + iy * width];
cachedG[haloIndex] = src[cx + iy * width + stride];
cachedB[haloIndex] = src[cx + iy * width + 2 * stride];
}
}
__syncthreads();
float sumR = 0.f;
float sumG = 0.f;
float sumB = 0.f;
for (int m = 0; m < 5; m++)
{
int index;
if (vertPass)
{
index = 2 * blockDim.x + tx + (ty + m - 2) * blockDim.x;
}
else
{
index = tx + m + ty * (4 + blockDim.x);
}
float currMask = c_mask[m];
sumR += currMask * cachedR[index];
sumG += currMask * cachedG[index];
sumB += currMask * cachedB[index];
}
int i = ix + iy * width;
blurred[i] = sumR;
blurred[i+stride] = sumG;
blurred[i+2*stride] = sumB;
}
}
int main()
{
Timer t;
const char* file = "cake.ppm";
CImg<ColorType> image(file);
int width = image.width();
int height = image.height();
// CImg already stores image data as RR...GG...BB...
// this function call changes the layout to RGBRGBRGB...
// but the image can't be displayed now. The later call to the same function
// restores the image to a displayable state.
// source: https://www.codefull.org/2014/11/cimg-does-not-store-pixels-in-the-interleaved-format/
image.permute_axes("cxyz");
t.restart();
float mask[5];
mask[0] = mask[4] = 1.f / 16.f;
mask[1] = mask[3] = 4.f / 16.f;
mask[2] = 6.f / 16.f;
size_t imgMemSize = sizeof(ColorType)*image.size();
size_t maskMemSize = sizeof(float) * 5;
ColorType* d_interleavedImg = nullptr;
ColorType* d_img = nullptr;
ColorType* d_blurImg = nullptr;
cudaMalloc((void**)&d_interleavedImg, imgMemSize);
cudaMalloc((void**)&d_img, imgMemSize);
cudaMalloc((void**)&d_blurImg, imgMemSize);
cudaMemcpy(d_interleavedImg, image.data(), imgMemSize, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(c_mask, mask, maskMemSize);
int size = image.size() / 3;
int unweaveBlockSize = 1024;
int unweaveNumBlocks = std::ceil(double(size) / unweaveBlockSize);
unweaveKernel<<<unweaveNumBlocks, unweaveBlockSize>>>(d_interleavedImg, d_img, size);
image.permute_axes("yzcx");
//cudaDeviceSynchronize();
dim3 blurBlockSize(BLUR_BLOCK_SIDE, BLUR_BLOCK_SIDE);
dim3 blurNumBlocks(std::ceil(double(width) / blurBlockSize.x),
std::ceil(double(height) / blurBlockSize.y));
blurKernel<<<blurNumBlocks, blurBlockSize>>>(d_img, d_blurImg, width, height, true);
std::swap(d_img, d_blurImg);
blurKernel<<<blurNumBlocks, blurBlockSize>>>(d_img, d_blurImg, width, height, false);
//cudaDeviceSynchronize();
cudaMemcpy(image.data(), d_blurImg, imgMemSize, cudaMemcpyDeviceToHost);
std::cout << "elapsed: " << t.elapsed() << "\n";
/*
CImg<ColorType> original(file);
CImgDisplay display1(original, "original");
CImgDisplay display2(image, "blurred");
while (true)
{
display1.wait();
display2.wait();
}
*/
return 0;
} |
c188e73ab526cebd2a8068fd6e5cdf1932660956.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* NOTE: this file is generated by dispatch_00_generate.py
*
* Make changes there and run in this directory:
*
* > python dispatch_00_generate.py
*
*/
#include <raft/core/operators.hpp> // raft::identity_op
#include <raft/distance/detail/distance_ops/all_ops.cuh> // ops::*
#include <raft/distance/detail/pairwise_matrix/dispatch-inl.cuh> // dispatch
#include <raft/distance/detail/pairwise_matrix/dispatch_sm60.cuh>
#include <raft/distance/detail/pairwise_matrix/dispatch_sm80.cuh>
#define instantiate_raft_distance_detail_pairwise_matrix_dispatch( \
OpT, DataT, AccT, OutT, FinOpT, IdxT) \
template void raft::distance::detail:: \
pairwise_matrix_dispatch<OpT<DataT, AccT, IdxT>, DataT, AccT, OutT, FinOpT, IdxT>( \
OpT<DataT, AccT, IdxT> distance_op, \
IdxT m, \
IdxT n, \
IdxT k, \
const DataT* x, \
const DataT* y, \
const DataT* x_norm, \
const DataT* y_norm, \
OutT* out, \
FinOpT fin_op, \
hipStream_t stream, \
bool is_row_major)
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
raft::distance::detail::ops::l2_exp_distance_op, double, double, double, raft::identity_op, int);
#undef instantiate_raft_distance_detail_pairwise_matrix_dispatch
| c188e73ab526cebd2a8068fd6e5cdf1932660956.cu | /*
* Copyright (c) 2021-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* NOTE: this file is generated by dispatch_00_generate.py
*
* Make changes there and run in this directory:
*
* > python dispatch_00_generate.py
*
*/
#include <raft/core/operators.hpp> // raft::identity_op
#include <raft/distance/detail/distance_ops/all_ops.cuh> // ops::*
#include <raft/distance/detail/pairwise_matrix/dispatch-inl.cuh> // dispatch
#include <raft/distance/detail/pairwise_matrix/dispatch_sm60.cuh>
#include <raft/distance/detail/pairwise_matrix/dispatch_sm80.cuh>
#define instantiate_raft_distance_detail_pairwise_matrix_dispatch( \
OpT, DataT, AccT, OutT, FinOpT, IdxT) \
template void raft::distance::detail:: \
pairwise_matrix_dispatch<OpT<DataT, AccT, IdxT>, DataT, AccT, OutT, FinOpT, IdxT>( \
OpT<DataT, AccT, IdxT> distance_op, \
IdxT m, \
IdxT n, \
IdxT k, \
const DataT* x, \
const DataT* y, \
const DataT* x_norm, \
const DataT* y_norm, \
OutT* out, \
FinOpT fin_op, \
cudaStream_t stream, \
bool is_row_major)
instantiate_raft_distance_detail_pairwise_matrix_dispatch(
raft::distance::detail::ops::l2_exp_distance_op, double, double, double, raft::identity_op, int);
#undef instantiate_raft_distance_detail_pairwise_matrix_dispatch
|
dfcfed8349c28d5db1a89142c78dc773be085a3f.hip | // !!! This is a file automatically generated by hipify!!!
#include <math.h>
#include <iostream>
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <cstdio>
using namespace std;
#define SIZE 1024 * 1024
const int N = 1024;
float h_A[SIZE];
float h_B[SIZE];
float h_C[SIZE];
__global__ void matrixMultiplicationKernel(float* A, float* B, float* C, int N) {
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
float tmpSum = 0;
if (row < N && col < N) {
// each thread computes one element of the block sub-matrix
for (int i = 0; i < N; i++)
tmpSum += A[row * N + i] * B[i * N + col];
}
C[row * N + col] = tmpSum;
}
void matrixMultiplication(float *A, float *B, float *C, int N){
// declare the number of blocks per grid and the number of threads per block
// use 1 to 1024 threads per block
dim3 threadsPerBlock(1, 1);
dim3 blocksPerGrid(1024, 1024);
hipLaunchKernelGGL(( matrixMultiplicationKernel), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, A, B, C, N);
}
int main(void)
{
float *d_A, *d_B, *d_C;
for(int i = 0; i < N; i++) {
h_A[i * N + i] = 1;
h_B[i * N + i] = 2;
}
hipMalloc((void **) &d_A, SIZE * sizeof(float));
hipMalloc((void **) &d_B, SIZE * sizeof(float));
hipMalloc((void **) &d_C, SIZE * sizeof(float));
hipMemcpy(d_A, h_A, SIZE * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, SIZE * sizeof(float), hipMemcpyHostToDevice);
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
matrixMultiplication(d_A, d_B, d_C, N);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
hipDeviceSynchronize();
hipMemcpy(h_C, d_C, SIZE * sizeof(float), hipMemcpyDeviceToHost);
for(int i = 0; i < N; i++) {
for(int j = 0; j < N; j++) {
cout << h_C[i * N + j] << " ";
}
cout << endl;
}
printf("Time for the kernel: %fms\n", time);
return 0;
}
| dfcfed8349c28d5db1a89142c78dc773be085a3f.cu | #include <math.h>
#include <iostream>
#include <cuda_runtime.h>
#include <stdlib.h>
#include <cstdio>
using namespace std;
#define SIZE 1024 * 1024
const int N = 1024;
float h_A[SIZE];
float h_B[SIZE];
float h_C[SIZE];
__global__ void matrixMultiplicationKernel(float* A, float* B, float* C, int N) {
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
float tmpSum = 0;
if (row < N && col < N) {
// each thread computes one element of the block sub-matrix
for (int i = 0; i < N; i++)
tmpSum += A[row * N + i] * B[i * N + col];
}
C[row * N + col] = tmpSum;
}
void matrixMultiplication(float *A, float *B, float *C, int N){
// declare the number of blocks per grid and the number of threads per block
// use 1 to 1024 threads per block
dim3 threadsPerBlock(1, 1);
dim3 blocksPerGrid(1024, 1024);
matrixMultiplicationKernel<<<blocksPerGrid,threadsPerBlock>>>(A, B, C, N);
}
int main(void)
{
float *d_A, *d_B, *d_C;
for(int i = 0; i < N; i++) {
h_A[i * N + i] = 1;
h_B[i * N + i] = 2;
}
cudaMalloc((void **) &d_A, SIZE * sizeof(float));
cudaMalloc((void **) &d_B, SIZE * sizeof(float));
cudaMalloc((void **) &d_C, SIZE * sizeof(float));
cudaMemcpy(d_A, h_A, SIZE * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, SIZE * sizeof(float), cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
matrixMultiplication(d_A, d_B, d_C, N);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
cudaDeviceSynchronize();
cudaMemcpy(h_C, d_C, SIZE * sizeof(float), cudaMemcpyDeviceToHost);
for(int i = 0; i < N; i++) {
for(int j = 0; j < N; j++) {
cout << h_C[i * N + j] << " ";
}
cout << endl;
}
printf("Time for the kernel: %fms\n", time);
return 0;
}
|
52025def7d30ca064fd75c54c6b4f2d3ed8bbc68.hip | // !!! This is a file automatically generated by hipify!!!
// Ref:https://github.com/PacktPublishing/Hands-On-GPU-Accelerated-Computer-Vision-with-OpenCV-and-CUDA/blob/master/Chapter2/02_variable_addition_reference.cu
#include <iostream>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void gpuAdd(int* d_a, int* d_b, int* d_c) {
*d_c = *d_a + *d_b;
}
int main() {
int h_a, h_b, h_c;
int *d_a, *d_b, *d_c;
h_a = 1;
h_b = 4;
hipMalloc((void**)&d_a, sizeof(int));
hipMalloc((void**)&d_b, sizeof(int));
hipMalloc((void**)&d_c, sizeof(int));
hipMemcpy(d_a, &h_a, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_b, &h_b, sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( gpuAdd), dim3(1), dim3(1), 0, 0, d_a, d_b, d_c);
hipMemcpy(&h_c, d_c, sizeof(int), hipMemcpyDeviceToHost);
hipFree(d_a);
hipFree(d_a);
hipFree(d_a);
return 0;
} | 52025def7d30ca064fd75c54c6b4f2d3ed8bbc68.cu | // Ref:https://github.com/PacktPublishing/Hands-On-GPU-Accelerated-Computer-Vision-with-OpenCV-and-CUDA/blob/master/Chapter2/02_variable_addition_reference.cu
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
__global__ void gpuAdd(int* d_a, int* d_b, int* d_c) {
*d_c = *d_a + *d_b;
}
int main() {
int h_a, h_b, h_c;
int *d_a, *d_b, *d_c;
h_a = 1;
h_b = 4;
cudaMalloc((void**)&d_a, sizeof(int));
cudaMalloc((void**)&d_b, sizeof(int));
cudaMalloc((void**)&d_c, sizeof(int));
cudaMemcpy(d_a, &h_a, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &h_b, sizeof(int), cudaMemcpyHostToDevice);
gpuAdd<<<1, 1>>>(d_a, d_b, d_c);
cudaMemcpy(&h_c, d_c, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_a);
cudaFree(d_a);
cudaFree(d_a);
return 0;
} |
4bf609717bcf314128f0021b2a533a3f3d1a5d1f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// filename: vmult!.cu
// a simple CUDA kernel to element multiply two vectors C=alpha*A.*B
extern "C"
{
__global__ void vmultbang_32(const int lengthA, const float alpha, const float *a, const float *b, float *c)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<lengthA)
{
c[i] = alpha*a[i] * b[i];
}
}
} | 4bf609717bcf314128f0021b2a533a3f3d1a5d1f.cu | // filename: vmult!.cu
// a simple CUDA kernel to element multiply two vectors C=alpha*A.*B
extern "C"
{
__global__ void vmultbang_32(const int lengthA, const float alpha, const float *a, const float *b, float *c)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<lengthA)
{
c[i] = alpha*a[i] * b[i];
}
}
} |
7e97a74b5a5b239d3d094d56b01309d033b1a38a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "GaussianKernelSimple.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const uint8_t *src = NULL;
hipMalloc(&src, XSIZE*YSIZE);
uint8_t *dst = NULL;
hipMalloc(&dst, XSIZE*YSIZE);
int width = XSIZE;
int height = YSIZE;
int step = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
GaussianKernelSimple), dim3(gridBlock),dim3(threadBlock), 0, 0, src,dst,width,height,step);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
GaussianKernelSimple), dim3(gridBlock),dim3(threadBlock), 0, 0, src,dst,width,height,step);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
GaussianKernelSimple), dim3(gridBlock),dim3(threadBlock), 0, 0, src,dst,width,height,step);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 7e97a74b5a5b239d3d094d56b01309d033b1a38a.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "GaussianKernelSimple.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const uint8_t *src = NULL;
cudaMalloc(&src, XSIZE*YSIZE);
uint8_t *dst = NULL;
cudaMalloc(&dst, XSIZE*YSIZE);
int width = XSIZE;
int height = YSIZE;
int step = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
GaussianKernelSimple<<<gridBlock,threadBlock>>>(src,dst,width,height,step);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
GaussianKernelSimple<<<gridBlock,threadBlock>>>(src,dst,width,height,step);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
GaussianKernelSimple<<<gridBlock,threadBlock>>>(src,dst,width,height,step);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
0632aaa8cc5b406c8efaec6ec4428f375436be5e.hip | // !!! This is a file automatically generated by hipify!!!
// includes, cuda
#include <hip/hip_runtime.h>
#include <cudaDefs.h>
#include <imageManager.h>
#include <stdlib.h>
#include <iostream>
#include <string>
#include <vector>
#include <ctime>
#include <iomanip>
#include "Runner.h"
#define BLOCK_DIM 1024
//#define LINES 4096
#define LINES 8196
#define ITEM_SIZE 4096
#define LINE_SIZE 7 * ITEM_SIZE
#define EMPTY_CHAR 205
#define RESULT_VALUES 5
hipDeviceProp_t deviceProp = hipDeviceProp_t();
using namespace std;
vector<string> split(const string& str, const string& delim)
{
vector<string> tokens;
size_t prev = 0, pos = 0;
do
{
pos = str.find(delim, prev);
if (pos == string::npos) pos = str.length();
string token = str.substr(prev, pos - prev);
if (!token.empty()) tokens.push_back(token);
prev = pos + delim.length();
} while (pos < str.length() && prev < str.length());
return tokens;
}
ifstream* openFile(const char* file)
{
ifstream* stream = new ifstream();
stream->open(file);
return stream;
}
ofstream* writeFile(const char* file)
{
ofstream* stream = new ofstream();
// stream->open(file, ofstream::app); // Append
stream->open(file, ofstream::trunc); // Rewrite
return stream;
}
void closeFile(ifstream* stream)
{
if (stream->is_open())
stream->close();
}
void closeFile(ofstream* stream)
{
if (stream->is_open())
stream->close();
}
unsigned int skipFile(ifstream* stream, const unsigned int lines)
{
if (!stream->is_open())
throw exception("Stream is not open!");
string line;
bool success = true;
unsigned int i = 0;
for (; i < lines && success; i++)
{
success = !getline(*stream, line).eof();
}
return i;
}
unsigned int readFile(ifstream* stream, unsigned char* data, const unsigned int lines)
{
if (!stream->is_open())
throw exception("Stream is not open!");
string line;
bool success = true;
unsigned int len = 0;
for (unsigned int i = 0; i < lines && success; i++)
{
success = !getline(*stream, line).eof();
if (success)
{
memcpy(&data[i * LINE_SIZE], line.c_str(), line.size() * sizeof(char));
len++;
}
}
return len;
}
void appendFile(ofstream* stream, int* data, const unsigned int lines)
{
if (lines == 0)
return;
if (!stream->is_open())
throw exception("Stream is not open!");
string write;
for (unsigned int i = 0; i < lines; i++)
{
unsigned int o = i * RESULT_VALUES;
for (unsigned int j = 0; j < RESULT_VALUES - 1; j++)
{
write += to_string(data[o + j]) + ",";
}
write += to_string(data[o + RESULT_VALUES - 1]) + "\n";
}
stream->write(write.c_str(), write.size());
}
__global__ void evalUrls(
unsigned char* urls,
unsigned int* urls_lenghts,
unsigned char* safe,
unsigned char* neutral,
unsigned int urls_lenght,
unsigned int safe_lenght,
unsigned int neutral_lenght,
int* results)
{
unsigned int offset = blockDim.x * blockIdx.x + threadIdx.x;
//unsigned int skip = blockDim.x * gridDim.x;
if(offset < urls_lenght)
{
char ok = -2;
int o = offset * ITEM_SIZE;
int len = urls_lenghts[offset];
bool php = false, asp = false, amp = false;
const char* sphp = ".php", * sasp = ".asp", * samp = "&";
int pphp = 0, pasp = 0, pamp = 0;
// dangerous chars
for (int j = 0; j < len; j++) {
char c = urls[o + j];
php = sphp[pphp] == c;
pphp = php ? pphp + 1 : 0;
asp = sasp[pasp] == c;
pasp = asp ? pasp + 1 : 0;
amp = samp[pamp] == c;
pamp = amp ? pamp + 1 : 0;
if (pphp == 4 || pasp == 4 || pamp == 1)
break;
/*if (c <= 0)
{
php = asp = amp = false;
break;
}*/
}
php = pphp == 4;
asp = pasp == 4;
amp = pamp == 1;
if (!(php || asp || amp))
{
ok = -1;
// safe urls
for (int i = 0; ok == -1 && i < safe_lenght; i++)
{
int off = i * ITEM_SIZE;
bool eq = true;
for (int j = 0; eq && j < len; j++)
eq = safe[off + j] == urls[o + j];
eq = eq && safe[off + len] == EMPTY_CHAR;
ok = (eq ? 1 : -1);
}
// neutral urls
for (int i = 0; ok == -1 && i < neutral_lenght; i++)
{
int off = i * ITEM_SIZE;
bool eq = true;
for (int j = 0; eq && j < len; j++)
eq = neutral[off + j] == urls[o + j];
eq = eq && neutral[off + len] == EMPTY_CHAR;
ok = (eq ? 0 : -1);
}
}
results[offset * RESULT_VALUES + 1] = ok;
//offset += skip;
}
}
__global__ void evalAgents(
char* agents,
const unsigned long agents_lenght,
int* length)
{
unsigned int offset = blockDim.x * blockIdx.x + threadIdx.x;
//unsigned int skip = blockDim.x * gridDim.x;
if(offset < agents_lenght)
{
int o = offset * 4096;
// todo
//offset += skip;
}
}
__global__ void prepareData(
unsigned char* lines,
const unsigned long lines_lenght,
unsigned int* ids,
unsigned char* urls,
unsigned int* urls_length,
tm* createds,
unsigned int* user_ids,
unsigned char* ips,
unsigned int* ips_length,
unsigned char* agents,
unsigned int* agents_length,
int* results)
{
unsigned int offset = blockDim.x * blockIdx.x + threadIdx.x;
//unsigned int skip = blockDim.x * gridDim.x;
if (offset < lines_lenght)
{
int ol = offset * LINE_SIZE;
int oi = offset * ITEM_SIZE;
int or = offset * RESULT_VALUES;
char spliter = ',';
char zero = '0';
char nine = '9';
char escape = '"';
int val = 0;
int started = 0;
// Parse line
unsigned int i = 0;
unsigned char c = 0;
unsigned char oc = 0;
// get Id
for (; i < LINE_SIZE && c != spliter; i++)
{
c = lines[ol + i];
val = c >= zero && c <= nine ? val * 10 + c - zero : val;
}
ids[offset] = val;
results[or ] = val;
val = 0;
c = 0;
// get Url
for (; i < LINE_SIZE && !(oc == escape && c == spliter); i++)
{
oc = c;
c = lines[ol + i];
if (started > 0 && c != spliter)
urls[oi + i - started] = c;
else if (c == escape)
started = i+1;
}
/*for (unsigned int j = i - started - 2; j < ITEM_SIZE; j++)
urls[oi + j] = EMPTY_CHAR;*/
urls_length[offset] = i - started - 2;
results[or + 2] = i - started - 2;
c = 0;
started = 0;
// get Date
for (; i < LINE_SIZE && !(oc == escape && c == spliter); i++)
{
oc = c;
c = lines[ol + i];
// ToDo
}
c = 0;
// get User Id
for (; i < LINE_SIZE && c != spliter; i++)
{
c = lines[ol + i];
val = c >= zero && c <= nine ? val * 10 + c - zero : val;
}
user_ids[offset] = val;
results[or + 3] = val;
c = 0;
// get Ip
for (; i < LINE_SIZE && !(oc == escape && c == spliter); i++)
{
oc = c;
c = lines[ol + i];
if (started > 0 && c != spliter)
ips[oi + i - started] = c;
else if (c == escape)
started = i + 1;
}
if (started == 0)
{
/*for (unsigned int j = 0; j < ITEM_SIZE; j++)
ips[oi + j] = EMPTY_CHAR;*/
ips_length[offset] = 0;
}
else
{
/*for (unsigned int j = i - started - 2; j < ITEM_SIZE; j++)
ips[oi + j] = EMPTY_CHAR;*/
ips_length[offset] = i - started - 2;
}
c = 0;
started = 0;
// get Agent
for (; i < LINE_SIZE && !(oc == escape && c == spliter); i++)
{
oc = c;
c = lines[ol + i];
if (started > 0 && c != spliter)
agents[oi + i - started] = c;
else if (c == escape)
started = i + 1;
}
if (started == 0)
{
/*for (unsigned int j = 0; j < ITEM_SIZE; j++)
agents[oi + j] = EMPTY_CHAR;*/
agents_length[offset] = 0;
results[or + 4] = 0;
}
else
{
/*for (unsigned int j = i - started - 2; j < ITEM_SIZE; j++)
agents[oi + j] = EMPTY_CHAR;*/
agents_length[offset] = i - started - 2;
results[or + 4] = i - started - 2;
}
//offset += skip;
}
}
char* mallocCharVec(vector<char*>* src)
{
char* dst;
checkCudaErrors(hipMalloc(&dst, src->size() * ITEM_SIZE * sizeof(char)));
char* hd = (char*)malloc(src->size() * ITEM_SIZE * sizeof(char));
for (int i = 0; i < src->size(); i++)
memcpy(&(hd[i * ITEM_SIZE]), src->at(i), ITEM_SIZE);
checkCudaErrors(hipMemcpy(dst, hd, src->size() * ITEM_SIZE * sizeof(char), hipMemcpyHostToDevice));
delete hd;
return dst;
}
unsigned char* fileToCuda(const char* file_name, unsigned int* length)
{
// Read file to RAM
auto file = openFile(file_name);
unsigned char* data = (unsigned char*)malloc(LINES * ITEM_SIZE);
int lines = LINES;
int sum = 0;
while (lines >= LINES)
{
lines = readFile(file, data, LINES);
sum += lines;
if (lines >= LINES)
data = (unsigned char*)realloc(data, sum * ITEM_SIZE);
}
closeFile(file);
delete file;
*length = sum;
// Move from RAM to GPU
unsigned char* cuda_array;
checkCudaErrors(hipMalloc((void**)&cuda_array, sum * ITEM_SIZE));
checkCudaErrors(hipMemcpy(cuda_array, data, sum * ITEM_SIZE, hipMemcpyHostToDevice));
// Free RAM
free(data);
return cuda_array;
}
void computeVectors()
{
// Load safe urls
unsigned int safe_length;
unsigned char* d_safe = fileToCuda("D:/Documents/Projekty/kola/PA2/project/assets/safe_urls.txt", &safe_length);
//checkDeviceMatrix(d_safe, ITEM_SIZE, 20, 60, "%d ");
// Load neutral urls
unsigned int neutral_length;
unsigned char* d_neutral = fileToCuda("D:/Documents/Projekty/kola/PA2/project/assets/neutral_urls.txt", &neutral_length);
// create stream
hipStream_t streamA, streamB;
checkCudaErrors(hipStreamCreate(&streamA));
checkCudaErrors(hipStreamCreate(&streamB));
// Host allocations
unsigned char* dataA;
checkCudaErrors(hipHostMalloc((void**)&dataA, LINES * LINE_SIZE, hipHostMallocDefault));
unsigned char* dataB;
checkCudaErrors(hipHostMalloc((void**)&dataB, LINES * LINE_SIZE, hipHostMallocDefault));
int* h_dataA;
checkCudaErrors(hipHostMalloc((void**)&h_dataA, LINES * RESULT_VALUES * sizeof(int), hipHostMallocDefault));
int* h_dataB;
checkCudaErrors(hipHostMalloc((void**)&h_dataB, LINES * RESULT_VALUES * sizeof(int), hipHostMallocDefault));
// Cuda allocations
unsigned char* d_linesA;
unsigned char* d_linesB;
checkCudaErrors(hipMalloc((void**)&d_linesA, LINES * LINE_SIZE));
checkCudaErrors(hipMalloc((void**)&d_linesB, LINES * LINE_SIZE));
// Cuda parsed data
unsigned int* d_idsA;
unsigned int* d_user_idsA;
unsigned char* d_urlsA;
unsigned char* d_ipsA;
unsigned char* d_agentsA;
unsigned int* d_urls_lengthA;
unsigned int* d_ips_lengthA;
unsigned int* d_agents_lengthA;
tm* d_createdsA;
checkCudaErrors(hipMalloc((void**)&d_idsA, LINES * sizeof(unsigned int)));
checkCudaErrors(hipMalloc((void**)&d_user_idsA, LINES * sizeof(unsigned int)));
checkCudaErrors(hipMalloc((void**)&d_urlsA, LINES * ITEM_SIZE));
checkCudaErrors(hipMalloc((void**)&d_ipsA, LINES * ITEM_SIZE));
checkCudaErrors(hipMalloc((void**)&d_agentsA, LINES * ITEM_SIZE));
checkCudaErrors(hipMalloc((void**)&d_createdsA, LINES * sizeof(tm)));
checkCudaErrors(hipMalloc((void**)&d_urls_lengthA, LINES * sizeof(unsigned int)));
checkCudaErrors(hipMalloc((void**)&d_ips_lengthA, LINES * sizeof(unsigned int)));
checkCudaErrors(hipMalloc((void**)&d_agents_lengthA, LINES * sizeof(unsigned int)));
unsigned int* d_idsB;
unsigned int* d_user_idsB;
unsigned char* d_urlsB;
unsigned char* d_ipsB;
unsigned char* d_agentsB;
unsigned int* d_urls_lengthB;
unsigned int* d_ips_lengthB;
unsigned int* d_agents_lengthB;
tm* d_createdsB;
checkCudaErrors(hipMalloc((void**)&d_idsB, LINES * sizeof(unsigned int)));
checkCudaErrors(hipMalloc((void**)&d_user_idsB, LINES * sizeof(unsigned int)));
checkCudaErrors(hipMalloc((void**)&d_urlsB, LINES * ITEM_SIZE));
checkCudaErrors(hipMalloc((void**)&d_ipsB, LINES * ITEM_SIZE));
checkCudaErrors(hipMalloc((void**)&d_agentsB, LINES * ITEM_SIZE));
checkCudaErrors(hipMalloc((void**)&d_createdsB, LINES * sizeof(tm)));
checkCudaErrors(hipMalloc((void**)&d_urls_lengthB, LINES * sizeof(unsigned int)));
checkCudaErrors(hipMalloc((void**)&d_ips_lengthB, LINES * sizeof(unsigned int)));
checkCudaErrors(hipMalloc((void**)&d_agents_lengthB, LINES * sizeof(unsigned int)));
// Cuda results
int* d_resultsA;
int* d_resultsB;
// Id, Url Danger, Url len, UserId, Agent Length
checkCudaErrors(hipMalloc((void**)&d_resultsA, LINES * RESULT_VALUES * sizeof(int)));
checkCudaErrors(hipMalloc((void**)&d_resultsB, LINES * RESULT_VALUES * sizeof(int)));
auto requests = openFile("D:/Documents/Projekty/kola/PA2/project/assets/requestlog.csv");
auto results = writeFile("D:/Documents/Projekty/kola/PA2/project/assets/results.csv");
// Skip/Add header
skipFile(requests, 1);
if (results->is_open())
results->write("Id,UrlDanger,UrlLen,UserId,AgentLen\n", 36);
clock_t start = clock();
// Stream FILE -> RAM -> GPU -> RAM -> FILE
int lines = LINES;
int linesA = 0;
int linesB = 0;
int dim = 0;
int sum = 0;
while (lines >= LINES)
{
// ----------------- Stream A ----------------------------------
lines = readFile(requests, dataA, LINES);
dim = (lines + BLOCK_DIM - 1) / BLOCK_DIM;
checkCudaErrors(hipStreamSynchronize(streamB));
appendFile(results, h_dataB, linesB);
/*hipStreamSynchronize(streamA);
appendFile(results, h_dataA, linesA);*/
//checkDeviceMatrix(d_resultsA, RESULT_VALUES * sizeof(int), 10, 5, "%d ");
//checkDeviceMatrix(dataA, LINE_SIZE * sizeof(char), 10, 40, "%c");
//checkDeviceMatrix(d_idsA, sizeof(int), 10, 1, "%d");
//checkHostMatrix(h_dataA, RESULT_VALUES * sizeof(int), 10, 5, "%d ");
hipMemcpyAsync(d_linesA, dataA, lines * LINE_SIZE, hipMemcpyHostToDevice, streamA);
hipLaunchKernelGGL(( prepareData), dim3(dim), dim3(BLOCK_DIM), 0, streamA, d_linesA, lines, d_idsA, d_urlsA, d_urls_lengthA, d_createdsA, d_user_idsA, d_ipsA, d_ips_lengthA, d_agentsA, d_agents_lengthA, d_resultsA);
hipLaunchKernelGGL(( evalUrls), dim3(dim), dim3(BLOCK_DIM), 0, streamA, d_urlsA, d_urls_lengthA, d_safe, d_neutral, lines, safe_length, neutral_length, d_resultsA);
hipMemcpyAsync(h_dataA, d_resultsA, lines * RESULT_VALUES * sizeof(int), hipMemcpyDeviceToHost, streamA);
linesA = lines;
sum += lines;
//// ----------------- Stream B ----------------------------------
lines = readFile(requests, dataB, LINES);
dim = (lines + BLOCK_DIM - 1) / BLOCK_DIM;
checkCudaErrors(hipStreamSynchronize(streamA));
appendFile(results, h_dataA, linesA);
/*checkCudaErrors(hipStreamSynchronize(streamB));
appendFile(results, h_dataB, linesB);*/
checkCudaErrors(hipMemcpyAsync(d_linesB, dataB, lines * LINE_SIZE, hipMemcpyHostToDevice, streamB));
hipLaunchKernelGGL(( prepareData), dim3(dim), dim3(BLOCK_DIM), 0, streamB, d_linesB, lines, d_idsB, d_urlsB, d_urls_lengthB, d_createdsB, d_user_idsB, d_ipsB, d_ips_lengthB, d_agentsB, d_agents_lengthB, d_resultsB);
hipLaunchKernelGGL(( evalUrls), dim3(dim), dim3(BLOCK_DIM), 0, streamB, d_urlsB, d_urls_lengthB, d_safe, d_neutral, lines, safe_length, neutral_length, d_resultsB);
checkCudaErrors(hipMemcpyAsync(h_dataB, d_resultsB, lines * RESULT_VALUES * sizeof(int), hipMemcpyDeviceToHost, streamB));
linesB = lines;
sum += lines;
printf("\r%d", sum);
}
printf("\n");
// Append last data
/*checkCudaErrors(hipStreamSynchronize(streamA));
appendFile(results, h_dataA, linesA);
checkCudaErrors(hipStreamSynchronize(streamB));
appendFile(results, h_dataB, linesB);*/
closeFile(requests);
delete requests;
closeFile(results);
delete results;
hipStreamSynchronize(streamA);
hipStreamSynchronize(streamB);
hipStreamDestroy(streamA);
hipStreamDestroy(streamB);
printf("Done in %.2f s\n", (std::clock() - start) / (double)CLOCKS_PER_SEC);
printf("%d Lines readed\n", sum);
hipFree(d_linesA);
hipFree(d_linesB);
hipFree(d_idsA);
hipFree(d_user_idsA);
hipFree(d_urlsA);
hipFree(d_ipsA);
hipFree(d_agentsA);
hipFree(d_urls_lengthA);
hipFree(d_ips_lengthA);
hipFree(d_agents_lengthA);
hipFree(d_createdsA);
hipFree(d_idsB);
hipFree(d_user_idsB);
hipFree(d_urlsB);
hipFree(d_ipsB);
hipFree(d_agentsB);
hipFree(d_urls_lengthB);
hipFree(d_ips_lengthB);
hipFree(d_agents_lengthB);
hipFree(d_createdsB);
hipHostFree(dataA);
hipHostFree(dataB);
hipHostFree(h_dataA);
hipHostFree(h_dataB);
}
void readCPUBenchmark()
{
auto requests = openFile("D:/Documents/Projekty/kola/PA2/project/assets/requestlog.csv");
unsigned char* data;
hipHostMalloc((void**)&data, LINES * LINE_SIZE, hipHostMallocDefault);
clock_t start = clock();
int lines = LINES;
int sum = 0;
while (lines >= LINES)
{
lines = readFile(requests, data, LINES);
sum += lines;
}
printf("Done in %.2f s\n", (std::clock() - start) / (double)CLOCKS_PER_SEC);
printf("%d Lines readed\n", sum);
closeFile(requests);
delete requests;
}
void readCopyCPUBenchmark()
{
auto requests = openFile("D:/Documents/Projekty/kola/PA2/project/assets/requestlog.csv");
// create stream
hipStream_t streamA, streamB;
hipStreamCreate(&streamA);
hipStreamCreate(&streamB);
unsigned char* dataA;
hipHostMalloc((void**)&dataA, LINES * LINE_SIZE, hipHostMallocDefault);
unsigned char* dataB;
hipHostMalloc((void**)&dataB, LINES * LINE_SIZE, hipHostMallocDefault);
unsigned char* d_linesA;
hipMalloc((void**)&d_linesA, LINE_SIZE);
unsigned char* d_linesB;
hipMalloc((void**)&d_linesB, LINE_SIZE);
clock_t start = clock();
int lines = LINES;
int sum = 0;
while (lines >= LINES)
{
lines = readFile(requests, dataA, LINES);
hipMemcpyAsync(d_linesA, dataA, lines * LINE_SIZE, hipMemcpyHostToDevice, streamA);
sum += lines;
lines = readFile(requests, dataB, LINES);
hipMemcpyAsync(d_linesB, dataB, lines * LINE_SIZE, hipMemcpyHostToDevice, streamB);
sum += lines;
}
hipStreamSynchronize(streamA);
hipStreamSynchronize(streamB);
hipStreamDestroy(streamA);
hipStreamDestroy(streamB);
printf("Done in %.2f s\n", (std::clock() - start) / (double)CLOCKS_PER_SEC);
printf("%d Lines readed\n", sum);
hipFree(d_linesA);
hipFree(d_linesB);
hipHostFree(dataA);
hipHostFree(dataB);
closeFile(requests);
delete requests;
}
int main(int argc, char* argv[])
{
initializeCUDA(deviceProp);
//readCPUBenchmark();
//readCopyCPUBenchmark();
computeVectors();
}
| 0632aaa8cc5b406c8efaec6ec4428f375436be5e.cu | // includes, cuda
#include <cuda_runtime.h>
#include <cudaDefs.h>
#include <imageManager.h>
#include <stdlib.h>
#include <iostream>
#include <string>
#include <vector>
#include <ctime>
#include <iomanip>
#include "Runner.h"
#define BLOCK_DIM 1024
//#define LINES 4096
#define LINES 8196
#define ITEM_SIZE 4096
#define LINE_SIZE 7 * ITEM_SIZE
#define EMPTY_CHAR 205
#define RESULT_VALUES 5
cudaDeviceProp deviceProp = cudaDeviceProp();
using namespace std;
vector<string> split(const string& str, const string& delim)
{
vector<string> tokens;
size_t prev = 0, pos = 0;
do
{
pos = str.find(delim, prev);
if (pos == string::npos) pos = str.length();
string token = str.substr(prev, pos - prev);
if (!token.empty()) tokens.push_back(token);
prev = pos + delim.length();
} while (pos < str.length() && prev < str.length());
return tokens;
}
ifstream* openFile(const char* file)
{
ifstream* stream = new ifstream();
stream->open(file);
return stream;
}
ofstream* writeFile(const char* file)
{
ofstream* stream = new ofstream();
// stream->open(file, ofstream::app); // Append
stream->open(file, ofstream::trunc); // Rewrite
return stream;
}
void closeFile(ifstream* stream)
{
if (stream->is_open())
stream->close();
}
void closeFile(ofstream* stream)
{
if (stream->is_open())
stream->close();
}
unsigned int skipFile(ifstream* stream, const unsigned int lines)
{
if (!stream->is_open())
throw exception("Stream is not open!");
string line;
bool success = true;
unsigned int i = 0;
for (; i < lines && success; i++)
{
success = !getline(*stream, line).eof();
}
return i;
}
unsigned int readFile(ifstream* stream, unsigned char* data, const unsigned int lines)
{
if (!stream->is_open())
throw exception("Stream is not open!");
string line;
bool success = true;
unsigned int len = 0;
for (unsigned int i = 0; i < lines && success; i++)
{
success = !getline(*stream, line).eof();
if (success)
{
memcpy(&data[i * LINE_SIZE], line.c_str(), line.size() * sizeof(char));
len++;
}
}
return len;
}
void appendFile(ofstream* stream, int* data, const unsigned int lines)
{
if (lines == 0)
return;
if (!stream->is_open())
throw exception("Stream is not open!");
string write;
for (unsigned int i = 0; i < lines; i++)
{
unsigned int o = i * RESULT_VALUES;
for (unsigned int j = 0; j < RESULT_VALUES - 1; j++)
{
write += to_string(data[o + j]) + ",";
}
write += to_string(data[o + RESULT_VALUES - 1]) + "\n";
}
stream->write(write.c_str(), write.size());
}
__global__ void evalUrls(
unsigned char* urls,
unsigned int* urls_lenghts,
unsigned char* safe,
unsigned char* neutral,
unsigned int urls_lenght,
unsigned int safe_lenght,
unsigned int neutral_lenght,
int* results)
{
unsigned int offset = blockDim.x * blockIdx.x + threadIdx.x;
//unsigned int skip = blockDim.x * gridDim.x;
if(offset < urls_lenght)
{
char ok = -2;
int o = offset * ITEM_SIZE;
int len = urls_lenghts[offset];
bool php = false, asp = false, amp = false;
const char* sphp = ".php", * sasp = ".asp", * samp = "&";
int pphp = 0, pasp = 0, pamp = 0;
// dangerous chars
for (int j = 0; j < len; j++) {
char c = urls[o + j];
php = sphp[pphp] == c;
pphp = php ? pphp + 1 : 0;
asp = sasp[pasp] == c;
pasp = asp ? pasp + 1 : 0;
amp = samp[pamp] == c;
pamp = amp ? pamp + 1 : 0;
if (pphp == 4 || pasp == 4 || pamp == 1)
break;
/*if (c <= 0)
{
php = asp = amp = false;
break;
}*/
}
php = pphp == 4;
asp = pasp == 4;
amp = pamp == 1;
if (!(php || asp || amp))
{
ok = -1;
// safe urls
for (int i = 0; ok == -1 && i < safe_lenght; i++)
{
int off = i * ITEM_SIZE;
bool eq = true;
for (int j = 0; eq && j < len; j++)
eq = safe[off + j] == urls[o + j];
eq = eq && safe[off + len] == EMPTY_CHAR;
ok = (eq ? 1 : -1);
}
// neutral urls
for (int i = 0; ok == -1 && i < neutral_lenght; i++)
{
int off = i * ITEM_SIZE;
bool eq = true;
for (int j = 0; eq && j < len; j++)
eq = neutral[off + j] == urls[o + j];
eq = eq && neutral[off + len] == EMPTY_CHAR;
ok = (eq ? 0 : -1);
}
}
results[offset * RESULT_VALUES + 1] = ok;
//offset += skip;
}
}
__global__ void evalAgents(
char* agents,
const unsigned long agents_lenght,
int* length)
{
unsigned int offset = blockDim.x * blockIdx.x + threadIdx.x;
//unsigned int skip = blockDim.x * gridDim.x;
if(offset < agents_lenght)
{
int o = offset * 4096;
// todo
//offset += skip;
}
}
__global__ void prepareData(
unsigned char* lines,
const unsigned long lines_lenght,
unsigned int* ids,
unsigned char* urls,
unsigned int* urls_length,
tm* createds,
unsigned int* user_ids,
unsigned char* ips,
unsigned int* ips_length,
unsigned char* agents,
unsigned int* agents_length,
int* results)
{
unsigned int offset = blockDim.x * blockIdx.x + threadIdx.x;
//unsigned int skip = blockDim.x * gridDim.x;
if (offset < lines_lenght)
{
int ol = offset * LINE_SIZE;
int oi = offset * ITEM_SIZE;
int or = offset * RESULT_VALUES;
char spliter = ',';
char zero = '0';
char nine = '9';
char escape = '"';
int val = 0;
int started = 0;
// Parse line
unsigned int i = 0;
unsigned char c = 0;
unsigned char oc = 0;
// get Id
for (; i < LINE_SIZE && c != spliter; i++)
{
c = lines[ol + i];
val = c >= zero && c <= nine ? val * 10 + c - zero : val;
}
ids[offset] = val;
results[or ] = val;
val = 0;
c = 0;
// get Url
for (; i < LINE_SIZE && !(oc == escape && c == spliter); i++)
{
oc = c;
c = lines[ol + i];
if (started > 0 && c != spliter)
urls[oi + i - started] = c;
else if (c == escape)
started = i+1;
}
/*for (unsigned int j = i - started - 2; j < ITEM_SIZE; j++)
urls[oi + j] = EMPTY_CHAR;*/
urls_length[offset] = i - started - 2;
results[or + 2] = i - started - 2;
c = 0;
started = 0;
// get Date
for (; i < LINE_SIZE && !(oc == escape && c == spliter); i++)
{
oc = c;
c = lines[ol + i];
// ToDo
}
c = 0;
// get User Id
for (; i < LINE_SIZE && c != spliter; i++)
{
c = lines[ol + i];
val = c >= zero && c <= nine ? val * 10 + c - zero : val;
}
user_ids[offset] = val;
results[or + 3] = val;
c = 0;
// get Ip
for (; i < LINE_SIZE && !(oc == escape && c == spliter); i++)
{
oc = c;
c = lines[ol + i];
if (started > 0 && c != spliter)
ips[oi + i - started] = c;
else if (c == escape)
started = i + 1;
}
if (started == 0)
{
/*for (unsigned int j = 0; j < ITEM_SIZE; j++)
ips[oi + j] = EMPTY_CHAR;*/
ips_length[offset] = 0;
}
else
{
/*for (unsigned int j = i - started - 2; j < ITEM_SIZE; j++)
ips[oi + j] = EMPTY_CHAR;*/
ips_length[offset] = i - started - 2;
}
c = 0;
started = 0;
// get Agent
for (; i < LINE_SIZE && !(oc == escape && c == spliter); i++)
{
oc = c;
c = lines[ol + i];
if (started > 0 && c != spliter)
agents[oi + i - started] = c;
else if (c == escape)
started = i + 1;
}
if (started == 0)
{
/*for (unsigned int j = 0; j < ITEM_SIZE; j++)
agents[oi + j] = EMPTY_CHAR;*/
agents_length[offset] = 0;
results[or + 4] = 0;
}
else
{
/*for (unsigned int j = i - started - 2; j < ITEM_SIZE; j++)
agents[oi + j] = EMPTY_CHAR;*/
agents_length[offset] = i - started - 2;
results[or + 4] = i - started - 2;
}
//offset += skip;
}
}
char* mallocCharVec(vector<char*>* src)
{
char* dst;
checkCudaErrors(cudaMalloc(&dst, src->size() * ITEM_SIZE * sizeof(char)));
char* hd = (char*)malloc(src->size() * ITEM_SIZE * sizeof(char));
for (int i = 0; i < src->size(); i++)
memcpy(&(hd[i * ITEM_SIZE]), src->at(i), ITEM_SIZE);
checkCudaErrors(cudaMemcpy(dst, hd, src->size() * ITEM_SIZE * sizeof(char), cudaMemcpyHostToDevice));
delete hd;
return dst;
}
unsigned char* fileToCuda(const char* file_name, unsigned int* length)
{
// Read file to RAM
auto file = openFile(file_name);
unsigned char* data = (unsigned char*)malloc(LINES * ITEM_SIZE);
int lines = LINES;
int sum = 0;
while (lines >= LINES)
{
lines = readFile(file, data, LINES);
sum += lines;
if (lines >= LINES)
data = (unsigned char*)realloc(data, sum * ITEM_SIZE);
}
closeFile(file);
delete file;
*length = sum;
// Move from RAM to GPU
unsigned char* cuda_array;
checkCudaErrors(cudaMalloc((void**)&cuda_array, sum * ITEM_SIZE));
checkCudaErrors(cudaMemcpy(cuda_array, data, sum * ITEM_SIZE, cudaMemcpyHostToDevice));
// Free RAM
free(data);
return cuda_array;
}
void computeVectors()
{
// Load safe urls
unsigned int safe_length;
unsigned char* d_safe = fileToCuda("D:/Documents/Projekty/Škola/PA2/project/assets/safe_urls.txt", &safe_length);
//checkDeviceMatrix(d_safe, ITEM_SIZE, 20, 60, "%d ");
// Load neutral urls
unsigned int neutral_length;
unsigned char* d_neutral = fileToCuda("D:/Documents/Projekty/Škola/PA2/project/assets/neutral_urls.txt", &neutral_length);
// create stream
cudaStream_t streamA, streamB;
checkCudaErrors(cudaStreamCreate(&streamA));
checkCudaErrors(cudaStreamCreate(&streamB));
// Host allocations
unsigned char* dataA;
checkCudaErrors(cudaHostAlloc((void**)&dataA, LINES * LINE_SIZE, cudaHostAllocDefault));
unsigned char* dataB;
checkCudaErrors(cudaHostAlloc((void**)&dataB, LINES * LINE_SIZE, cudaHostAllocDefault));
int* h_dataA;
checkCudaErrors(cudaHostAlloc((void**)&h_dataA, LINES * RESULT_VALUES * sizeof(int), cudaHostAllocDefault));
int* h_dataB;
checkCudaErrors(cudaHostAlloc((void**)&h_dataB, LINES * RESULT_VALUES * sizeof(int), cudaHostAllocDefault));
// Cuda allocations
unsigned char* d_linesA;
unsigned char* d_linesB;
checkCudaErrors(cudaMalloc((void**)&d_linesA, LINES * LINE_SIZE));
checkCudaErrors(cudaMalloc((void**)&d_linesB, LINES * LINE_SIZE));
// Cuda parsed data
unsigned int* d_idsA;
unsigned int* d_user_idsA;
unsigned char* d_urlsA;
unsigned char* d_ipsA;
unsigned char* d_agentsA;
unsigned int* d_urls_lengthA;
unsigned int* d_ips_lengthA;
unsigned int* d_agents_lengthA;
tm* d_createdsA;
checkCudaErrors(cudaMalloc((void**)&d_idsA, LINES * sizeof(unsigned int)));
checkCudaErrors(cudaMalloc((void**)&d_user_idsA, LINES * sizeof(unsigned int)));
checkCudaErrors(cudaMalloc((void**)&d_urlsA, LINES * ITEM_SIZE));
checkCudaErrors(cudaMalloc((void**)&d_ipsA, LINES * ITEM_SIZE));
checkCudaErrors(cudaMalloc((void**)&d_agentsA, LINES * ITEM_SIZE));
checkCudaErrors(cudaMalloc((void**)&d_createdsA, LINES * sizeof(tm)));
checkCudaErrors(cudaMalloc((void**)&d_urls_lengthA, LINES * sizeof(unsigned int)));
checkCudaErrors(cudaMalloc((void**)&d_ips_lengthA, LINES * sizeof(unsigned int)));
checkCudaErrors(cudaMalloc((void**)&d_agents_lengthA, LINES * sizeof(unsigned int)));
unsigned int* d_idsB;
unsigned int* d_user_idsB;
unsigned char* d_urlsB;
unsigned char* d_ipsB;
unsigned char* d_agentsB;
unsigned int* d_urls_lengthB;
unsigned int* d_ips_lengthB;
unsigned int* d_agents_lengthB;
tm* d_createdsB;
checkCudaErrors(cudaMalloc((void**)&d_idsB, LINES * sizeof(unsigned int)));
checkCudaErrors(cudaMalloc((void**)&d_user_idsB, LINES * sizeof(unsigned int)));
checkCudaErrors(cudaMalloc((void**)&d_urlsB, LINES * ITEM_SIZE));
checkCudaErrors(cudaMalloc((void**)&d_ipsB, LINES * ITEM_SIZE));
checkCudaErrors(cudaMalloc((void**)&d_agentsB, LINES * ITEM_SIZE));
checkCudaErrors(cudaMalloc((void**)&d_createdsB, LINES * sizeof(tm)));
checkCudaErrors(cudaMalloc((void**)&d_urls_lengthB, LINES * sizeof(unsigned int)));
checkCudaErrors(cudaMalloc((void**)&d_ips_lengthB, LINES * sizeof(unsigned int)));
checkCudaErrors(cudaMalloc((void**)&d_agents_lengthB, LINES * sizeof(unsigned int)));
// Cuda results
int* d_resultsA;
int* d_resultsB;
// Id, Url Danger, Url len, UserId, Agent Length
checkCudaErrors(cudaMalloc((void**)&d_resultsA, LINES * RESULT_VALUES * sizeof(int)));
checkCudaErrors(cudaMalloc((void**)&d_resultsB, LINES * RESULT_VALUES * sizeof(int)));
auto requests = openFile("D:/Documents/Projekty/Škola/PA2/project/assets/requestlog.csv");
auto results = writeFile("D:/Documents/Projekty/Škola/PA2/project/assets/results.csv");
// Skip/Add header
skipFile(requests, 1);
if (results->is_open())
results->write("Id,UrlDanger,UrlLen,UserId,AgentLen\n", 36);
clock_t start = clock();
// Stream FILE -> RAM -> GPU -> RAM -> FILE
int lines = LINES;
int linesA = 0;
int linesB = 0;
int dim = 0;
int sum = 0;
while (lines >= LINES)
{
// ----------------- Stream A ----------------------------------
lines = readFile(requests, dataA, LINES);
dim = (lines + BLOCK_DIM - 1) / BLOCK_DIM;
checkCudaErrors(cudaStreamSynchronize(streamB));
appendFile(results, h_dataB, linesB);
/*cudaStreamSynchronize(streamA);
appendFile(results, h_dataA, linesA);*/
//checkDeviceMatrix(d_resultsA, RESULT_VALUES * sizeof(int), 10, 5, "%d ");
//checkDeviceMatrix(dataA, LINE_SIZE * sizeof(char), 10, 40, "%c");
//checkDeviceMatrix(d_idsA, sizeof(int), 10, 1, "%d");
//checkHostMatrix(h_dataA, RESULT_VALUES * sizeof(int), 10, 5, "%d ");
cudaMemcpyAsync(d_linesA, dataA, lines * LINE_SIZE, cudaMemcpyHostToDevice, streamA);
prepareData<<<dim, BLOCK_DIM, 0, streamA>>>(d_linesA, lines, d_idsA, d_urlsA, d_urls_lengthA, d_createdsA, d_user_idsA, d_ipsA, d_ips_lengthA, d_agentsA, d_agents_lengthA, d_resultsA);
evalUrls<<<dim, BLOCK_DIM, 0, streamA>>>(d_urlsA, d_urls_lengthA, d_safe, d_neutral, lines, safe_length, neutral_length, d_resultsA);
cudaMemcpyAsync(h_dataA, d_resultsA, lines * RESULT_VALUES * sizeof(int), cudaMemcpyDeviceToHost, streamA);
linesA = lines;
sum += lines;
//// ----------------- Stream B ----------------------------------
lines = readFile(requests, dataB, LINES);
dim = (lines + BLOCK_DIM - 1) / BLOCK_DIM;
checkCudaErrors(cudaStreamSynchronize(streamA));
appendFile(results, h_dataA, linesA);
/*checkCudaErrors(cudaStreamSynchronize(streamB));
appendFile(results, h_dataB, linesB);*/
checkCudaErrors(cudaMemcpyAsync(d_linesB, dataB, lines * LINE_SIZE, cudaMemcpyHostToDevice, streamB));
prepareData<<<dim, BLOCK_DIM, 0, streamB>>>(d_linesB, lines, d_idsB, d_urlsB, d_urls_lengthB, d_createdsB, d_user_idsB, d_ipsB, d_ips_lengthB, d_agentsB, d_agents_lengthB, d_resultsB);
evalUrls<<<dim, BLOCK_DIM, 0, streamB>>>(d_urlsB, d_urls_lengthB, d_safe, d_neutral, lines, safe_length, neutral_length, d_resultsB);
checkCudaErrors(cudaMemcpyAsync(h_dataB, d_resultsB, lines * RESULT_VALUES * sizeof(int), cudaMemcpyDeviceToHost, streamB));
linesB = lines;
sum += lines;
printf("\r%d", sum);
}
printf("\n");
// Append last data
/*checkCudaErrors(cudaStreamSynchronize(streamA));
appendFile(results, h_dataA, linesA);
checkCudaErrors(cudaStreamSynchronize(streamB));
appendFile(results, h_dataB, linesB);*/
closeFile(requests);
delete requests;
closeFile(results);
delete results;
cudaStreamSynchronize(streamA);
cudaStreamSynchronize(streamB);
cudaStreamDestroy(streamA);
cudaStreamDestroy(streamB);
printf("Done in %.2f s\n", (std::clock() - start) / (double)CLOCKS_PER_SEC);
printf("%d Lines readed\n", sum);
cudaFree(d_linesA);
cudaFree(d_linesB);
cudaFree(d_idsA);
cudaFree(d_user_idsA);
cudaFree(d_urlsA);
cudaFree(d_ipsA);
cudaFree(d_agentsA);
cudaFree(d_urls_lengthA);
cudaFree(d_ips_lengthA);
cudaFree(d_agents_lengthA);
cudaFree(d_createdsA);
cudaFree(d_idsB);
cudaFree(d_user_idsB);
cudaFree(d_urlsB);
cudaFree(d_ipsB);
cudaFree(d_agentsB);
cudaFree(d_urls_lengthB);
cudaFree(d_ips_lengthB);
cudaFree(d_agents_lengthB);
cudaFree(d_createdsB);
cudaFreeHost(dataA);
cudaFreeHost(dataB);
cudaFreeHost(h_dataA);
cudaFreeHost(h_dataB);
}
void readCPUBenchmark()
{
auto requests = openFile("D:/Documents/Projekty/Škola/PA2/project/assets/requestlog.csv");
unsigned char* data;
cudaHostAlloc((void**)&data, LINES * LINE_SIZE, cudaHostAllocDefault);
clock_t start = clock();
int lines = LINES;
int sum = 0;
while (lines >= LINES)
{
lines = readFile(requests, data, LINES);
sum += lines;
}
printf("Done in %.2f s\n", (std::clock() - start) / (double)CLOCKS_PER_SEC);
printf("%d Lines readed\n", sum);
closeFile(requests);
delete requests;
}
void readCopyCPUBenchmark()
{
auto requests = openFile("D:/Documents/Projekty/Škola/PA2/project/assets/requestlog.csv");
// create stream
cudaStream_t streamA, streamB;
cudaStreamCreate(&streamA);
cudaStreamCreate(&streamB);
unsigned char* dataA;
cudaHostAlloc((void**)&dataA, LINES * LINE_SIZE, cudaHostAllocDefault);
unsigned char* dataB;
cudaHostAlloc((void**)&dataB, LINES * LINE_SIZE, cudaHostAllocDefault);
unsigned char* d_linesA;
cudaMalloc((void**)&d_linesA, LINE_SIZE);
unsigned char* d_linesB;
cudaMalloc((void**)&d_linesB, LINE_SIZE);
clock_t start = clock();
int lines = LINES;
int sum = 0;
while (lines >= LINES)
{
lines = readFile(requests, dataA, LINES);
cudaMemcpyAsync(d_linesA, dataA, lines * LINE_SIZE, cudaMemcpyHostToDevice, streamA);
sum += lines;
lines = readFile(requests, dataB, LINES);
cudaMemcpyAsync(d_linesB, dataB, lines * LINE_SIZE, cudaMemcpyHostToDevice, streamB);
sum += lines;
}
cudaStreamSynchronize(streamA);
cudaStreamSynchronize(streamB);
cudaStreamDestroy(streamA);
cudaStreamDestroy(streamB);
printf("Done in %.2f s\n", (std::clock() - start) / (double)CLOCKS_PER_SEC);
printf("%d Lines readed\n", sum);
cudaFree(d_linesA);
cudaFree(d_linesB);
cudaFreeHost(dataA);
cudaFreeHost(dataB);
closeFile(requests);
delete requests;
}
int main(int argc, char* argv[])
{
initializeCUDA(deviceProp);
//readCPUBenchmark();
//readCopyCPUBenchmark();
computeVectors();
}
|
adcd853d5985ad439040ec7605de8a39ef0b96e1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Matrix multiplication: C = A * B.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil.h>
// includes, kernels
#include <matrixmul_kernel.cu>
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);
Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width, int init);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
int ReadFile(Matrix* M, char* file_name);
void WriteFile(Matrix M, char* file_name);
void FreeDeviceMatrix(Matrix* M);
void FreeMatrix(Matrix* M);
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
Matrix M;
Matrix N;
Matrix P;
int errorM = 0, errorN = 0;
srand(52);
if(argc != 5 && argc != 4)
{
// Allocate and initialize the matrices
//M = AllocateMatrix(rand()%1024, rand()%1024, 1);
//N = AllocateMatrix(M.width, rand() % 1024, 1);
M = AllocateMatrix(1024, 1024, 1);
N = AllocateMatrix(1024, 1024, 1);
P = AllocateMatrix(M.height, N.width, 0);
}
else
{
// Allocate and read in matrices from disk
int* params = NULL; //(int*)malloc(3 * sizeof(int));
unsigned int data_read = 3;
cutReadFilei(argv[1], ¶ms, &data_read, true);
if(data_read != 3)
{
printf("Error reading parameter file\n");
return 1;
}
M = AllocateMatrix(params[0], params[1], 0);
N = AllocateMatrix(params[1], params[2], 0);
P = AllocateMatrix(params[0], params[2], 0);
errorM = ReadFile(&M, argv[2]);
errorN = ReadFile(&N, argv[3]);
if(errorM || errorN )
{
printf("Error reading input files %d, %d\n", errorM, errorN);
return 1;
}
}
//printf("the size of M is:\nheight=%d\nwidth=%d\n",M.height, M.width);
//printf("the size of N is:\nheight=%d\nwidth=%d\n",N.height, N.width);
// M * N on the device
MatrixMulOnDevice(M, N, P);
printf("GPU computation complete\n");
// compute the matrix multiplication on the CPU for comparison
Matrix reference = AllocateMatrix(P.height, P.width, 0);
computeGold(reference.elements, M.elements, N.elements, M.height, M.width, N.width);
printf("CPU computation complete\n");
// in this case check if the result is equivalent to the expected soluion
CUTBoolean res = cutComparefe(reference.elements, P.elements,P.height*P.width, 0.001f);
printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED");
if(argc == 5)
{
WriteFile(P, argv[4]);
}
else if(argc == 2)
{
WriteFile(P, argv[1]);
}
// Free matrices
FreeMatrix(&M);
FreeMatrix(&N);
FreeMatrix(&P);
return 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P)
{
// Load M and N to the device
Matrix Md = AllocateDeviceMatrix(M);
CopyToDeviceMatrix(Md, M);
Matrix Nd = AllocateDeviceMatrix(N);
CopyToDeviceMatrix(Nd, N);
// Allocate P on the device
Matrix Pd = AllocateDeviceMatrix(P);
CopyToDeviceMatrix(Pd, P); // Clear memory
// Setup the execution configuration
//int blky = (P.height%TILED_WIDTH==0)?P.height/TILED_WIDTH:P.height/TILED_WIDTH+1;
//int blkx = (P.width%TILED_WIDTH==0)?P.width/TILED_WIDTH:P.width/TILED_WIDTH+1;
int blky = P.height/TILED_WIDTH;
int blkx = P.width/TILED_WIDTH;
dim3 block2D(blkx, blky);
dim3 thread2D(TILED_WIDTH, TILED_WIDTH);
hipEvent_t start, stop;
float elapsedTime=0.0f;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
// Launch the device computation threads!
hipLaunchKernelGGL(( MatrixMulKernel), dim3(block2D),dim3(thread2D), 0, 0, Md, Nd, Pd);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
printf("The execution time of GPU is:%f\n",elapsedTime);
printf("The tiled size is:%d\n",TILED_WIDTH);
// Read P from the device
CopyFromDeviceMatrix(P, Pd);
//Free device matrices
FreeDeviceMatrix(&Md);
FreeDeviceMatrix(&Nd);
FreeDeviceMatrix(&Pd);
}
// Allocate a device matrix of same size as M.
Matrix AllocateDeviceMatrix(const Matrix M)
{
Matrix Mdevice = M;
int size = M.width * M.height * sizeof(float);
hipMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
// Allocate a device matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
// If init == 2, initialize matrix parameters, but do not allocate memory
Matrix AllocateMatrix(int height, int width, int init)
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = NULL;
// don't allocate memory on option 2
if(init == 2)
return M;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < M.height * M.width; i++)
{
M.elements[i] = (init == 0) ? (0.0f) : (rand()*3 / (float)RAND_MAX);
}
return M;
}
// Copy a host matrix to a device matrix.
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
Mdevice.height = Mhost.height;
Mdevice.width = Mhost.width;
Mdevice.pitch = Mhost.pitch;
hipMemcpy(Mdevice.elements, Mhost.elements, size, hipMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.width * Mdevice.height * sizeof(float);
hipMemcpy(Mhost.elements, Mdevice.elements, size, hipMemcpyDeviceToHost);
}
// Free a device matrix.
void FreeDeviceMatrix(Matrix* M)
{
hipFree(M->elements);
M->elements = NULL;
}
// Free a host Matrix
void FreeMatrix(Matrix* M)
{
free(M->elements);
M->elements = NULL;
}
// Read a floating point matrix in from file
// Returns zero if the number of elements read is
// equals M.height * M.width, and 1 otherwise
int ReadFile(Matrix* M, char* file_name)
{
unsigned int data_read = M->height*M->width;
cutReadFilef(file_name, &(M->elements), &data_read, true);
return (data_read != (M->height * M->width));
}
// Write a 16x16 floating point matrix to file
void WriteFile(Matrix M, char* file_name)
{
cutWriteFilef(file_name, M.elements, M.width*M.height, 0.0001f);
}
| adcd853d5985ad439040ec7605de8a39ef0b96e1.cu | /*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Matrix multiplication: C = A * B.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil.h>
// includes, kernels
#include <matrixmul_kernel.cu>
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);
Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width, int init);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
int ReadFile(Matrix* M, char* file_name);
void WriteFile(Matrix M, char* file_name);
void FreeDeviceMatrix(Matrix* M);
void FreeMatrix(Matrix* M);
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
Matrix M;
Matrix N;
Matrix P;
int errorM = 0, errorN = 0;
srand(52);
if(argc != 5 && argc != 4)
{
// Allocate and initialize the matrices
//M = AllocateMatrix(rand()%1024, rand()%1024, 1);
//N = AllocateMatrix(M.width, rand() % 1024, 1);
M = AllocateMatrix(1024, 1024, 1);
N = AllocateMatrix(1024, 1024, 1);
P = AllocateMatrix(M.height, N.width, 0);
}
else
{
// Allocate and read in matrices from disk
int* params = NULL; //(int*)malloc(3 * sizeof(int));
unsigned int data_read = 3;
cutReadFilei(argv[1], ¶ms, &data_read, true);
if(data_read != 3)
{
printf("Error reading parameter file\n");
return 1;
}
M = AllocateMatrix(params[0], params[1], 0);
N = AllocateMatrix(params[1], params[2], 0);
P = AllocateMatrix(params[0], params[2], 0);
errorM = ReadFile(&M, argv[2]);
errorN = ReadFile(&N, argv[3]);
if(errorM || errorN )
{
printf("Error reading input files %d, %d\n", errorM, errorN);
return 1;
}
}
//printf("the size of M is:\nheight=%d\nwidth=%d\n",M.height, M.width);
//printf("the size of N is:\nheight=%d\nwidth=%d\n",N.height, N.width);
// M * N on the device
MatrixMulOnDevice(M, N, P);
printf("GPU computation complete\n");
// compute the matrix multiplication on the CPU for comparison
Matrix reference = AllocateMatrix(P.height, P.width, 0);
computeGold(reference.elements, M.elements, N.elements, M.height, M.width, N.width);
printf("CPU computation complete\n");
// in this case check if the result is equivalent to the expected soluion
CUTBoolean res = cutComparefe(reference.elements, P.elements,P.height*P.width, 0.001f);
printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED");
if(argc == 5)
{
WriteFile(P, argv[4]);
}
else if(argc == 2)
{
WriteFile(P, argv[1]);
}
// Free matrices
FreeMatrix(&M);
FreeMatrix(&N);
FreeMatrix(&P);
return 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P)
{
// Load M and N to the device
Matrix Md = AllocateDeviceMatrix(M);
CopyToDeviceMatrix(Md, M);
Matrix Nd = AllocateDeviceMatrix(N);
CopyToDeviceMatrix(Nd, N);
// Allocate P on the device
Matrix Pd = AllocateDeviceMatrix(P);
CopyToDeviceMatrix(Pd, P); // Clear memory
// Setup the execution configuration
//int blky = (P.height%TILED_WIDTH==0)?P.height/TILED_WIDTH:P.height/TILED_WIDTH+1;
//int blkx = (P.width%TILED_WIDTH==0)?P.width/TILED_WIDTH:P.width/TILED_WIDTH+1;
int blky = P.height/TILED_WIDTH;
int blkx = P.width/TILED_WIDTH;
dim3 block2D(blkx, blky);
dim3 thread2D(TILED_WIDTH, TILED_WIDTH);
cudaEvent_t start, stop;
float elapsedTime=0.0f;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// Launch the device computation threads!
MatrixMulKernel<<<block2D,thread2D>>>(Md, Nd, Pd);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("The execution time of GPU is:%f\n",elapsedTime);
printf("The tiled size is:%d\n",TILED_WIDTH);
// Read P from the device
CopyFromDeviceMatrix(P, Pd);
//Free device matrices
FreeDeviceMatrix(&Md);
FreeDeviceMatrix(&Nd);
FreeDeviceMatrix(&Pd);
}
// Allocate a device matrix of same size as M.
Matrix AllocateDeviceMatrix(const Matrix M)
{
Matrix Mdevice = M;
int size = M.width * M.height * sizeof(float);
cudaMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
// Allocate a device matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
// If init == 2, initialize matrix parameters, but do not allocate memory
Matrix AllocateMatrix(int height, int width, int init)
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = NULL;
// don't allocate memory on option 2
if(init == 2)
return M;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < M.height * M.width; i++)
{
M.elements[i] = (init == 0) ? (0.0f) : (rand()*3 / (float)RAND_MAX);
}
return M;
}
// Copy a host matrix to a device matrix.
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
Mdevice.height = Mhost.height;
Mdevice.width = Mhost.width;
Mdevice.pitch = Mhost.pitch;
cudaMemcpy(Mdevice.elements, Mhost.elements, size, cudaMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.width * Mdevice.height * sizeof(float);
cudaMemcpy(Mhost.elements, Mdevice.elements, size, cudaMemcpyDeviceToHost);
}
// Free a device matrix.
void FreeDeviceMatrix(Matrix* M)
{
cudaFree(M->elements);
M->elements = NULL;
}
// Free a host Matrix
void FreeMatrix(Matrix* M)
{
free(M->elements);
M->elements = NULL;
}
// Read a floating point matrix in from file
// Returns zero if the number of elements read is
// equals M.height * M.width, and 1 otherwise
int ReadFile(Matrix* M, char* file_name)
{
unsigned int data_read = M->height*M->width;
cutReadFilef(file_name, &(M->elements), &data_read, true);
return (data_read != (M->height * M->width));
}
// Write a 16x16 floating point matrix to file
void WriteFile(Matrix M, char* file_name)
{
cutWriteFilef(file_name, M.elements, M.width*M.height, 0.0001f);
}
|
98c8d720251cc70ebaf20953a569c820d1895699.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdio.h>
#include<random>
using namespace std;
__global__ void prescan(float *g_odata, float *g_idata, int n)
{
extern __shared__ float temp[]; // allocated on invocation
int thid = threadIdx.x;
int offset = 1;
temp[2 * thid] = g_idata[2 * thid]; // load input into shared memory
temp[2 * thid + 1] = g_idata[2 * thid + 1];
//printf("%d - %f - %f \n", thid, g_odata[2 * thid], g_odata[2 * thid + 1]);
//printf("%d - %f - %f \n", thid, g_idata[2 * thid], g_idata[2 * thid + 1]);
for (int d = n >> 1; d > 0; d >>= 1) // build sum in place up the tree
{
__syncthreads();
if (thid < d)
{
int ai = offset*(2 * thid + 1) - 1;
int bi = offset*(2 * thid + 2) - 1;
temp[bi] += temp[ai];
}
offset *= 2;
}
if (thid == 0) { temp[n - 1] = 0; } // clear the last element
for (int d = 1; d < n; d *= 2) // traverse down tree & build scan
{
offset >>= 1;
__syncthreads();
if (thid < d)
{
int ai = offset*(2 * thid + 1) - 1;
int bi = offset*(2 * thid + 2) - 1;
float t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
g_odata[2 * thid] = temp[2 * thid]; // write results to device memory
g_odata[2 * thid + 1] = temp[2 * thid + 1];
// printf("%d - %f - %f \n", thid, g_odata[2 * thid], g_odata[2 * thid + 1]);
//printf("%d - %f - %f \n", thid, g_idata[2 * thid], g_idata[2 * thid + 1]);
}
float * getData(int size){
float *data = new float [size];
for (int i = 0; i < size; i++){
data[i] = std::rand()%10;
}
return data;
}
void displayData(float * data,int size){
for (int i= 0; i < size; i++){
printf("%f\n", data[i]);
}
}
int main(){
int size = 128;
float *input;
float *output = new float[size];
float *inp_dev, *out_dev;
input = getData(size);
// displayData(input, size);
hipMalloc(&inp_dev, size*sizeof(float));
hipMalloc(&out_dev, size*sizeof(float));
hipError_t cudaStatus = hipMemcpy(inp_dev, input, size*sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
hipMemset(out_dev, 0, size*sizeof(float));
prescan << <1, size/2, size*sizeof(float) >> >(out_dev,inp_dev, size);
hipDeviceSynchronize();
hipMemcpy(output, out_dev, size*sizeof(float), hipMemcpyDeviceToHost);
cout << "\nFinal Output\n";
displayData(output, size);
Error:
getchar();
return cudaStatus;
} | 98c8d720251cc70ebaf20953a569c820d1895699.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdio.h>
#include<random>
using namespace std;
__global__ void prescan(float *g_odata, float *g_idata, int n)
{
extern __shared__ float temp[]; // allocated on invocation
int thid = threadIdx.x;
int offset = 1;
temp[2 * thid] = g_idata[2 * thid]; // load input into shared memory
temp[2 * thid + 1] = g_idata[2 * thid + 1];
//printf("%d - %f - %f \n", thid, g_odata[2 * thid], g_odata[2 * thid + 1]);
//printf("%d - %f - %f \n", thid, g_idata[2 * thid], g_idata[2 * thid + 1]);
for (int d = n >> 1; d > 0; d >>= 1) // build sum in place up the tree
{
__syncthreads();
if (thid < d)
{
int ai = offset*(2 * thid + 1) - 1;
int bi = offset*(2 * thid + 2) - 1;
temp[bi] += temp[ai];
}
offset *= 2;
}
if (thid == 0) { temp[n - 1] = 0; } // clear the last element
for (int d = 1; d < n; d *= 2) // traverse down tree & build scan
{
offset >>= 1;
__syncthreads();
if (thid < d)
{
int ai = offset*(2 * thid + 1) - 1;
int bi = offset*(2 * thid + 2) - 1;
float t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
g_odata[2 * thid] = temp[2 * thid]; // write results to device memory
g_odata[2 * thid + 1] = temp[2 * thid + 1];
// printf("%d - %f - %f \n", thid, g_odata[2 * thid], g_odata[2 * thid + 1]);
//printf("%d - %f - %f \n", thid, g_idata[2 * thid], g_idata[2 * thid + 1]);
}
float * getData(int size){
float *data = new float [size];
for (int i = 0; i < size; i++){
data[i] = std::rand()%10;
}
return data;
}
void displayData(float * data,int size){
for (int i= 0; i < size; i++){
printf("%f\n", data[i]);
}
}
int main(){
int size = 128;
float *input;
float *output = new float[size];
float *inp_dev, *out_dev;
input = getData(size);
// displayData(input, size);
cudaMalloc(&inp_dev, size*sizeof(float));
cudaMalloc(&out_dev, size*sizeof(float));
cudaError_t cudaStatus = cudaMemcpy(inp_dev, input, size*sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaMemset(out_dev, 0, size*sizeof(float));
prescan << <1, size/2, size*sizeof(float) >> >(out_dev,inp_dev, size);
cudaDeviceSynchronize();
cudaMemcpy(output, out_dev, size*sizeof(float), cudaMemcpyDeviceToHost);
cout << "\nFinal Output\n";
displayData(output, size);
Error:
getchar();
return cudaStatus;
} |
57fd1a991c95c6caa9a049f2356305dcf680fff9.hip | // !!! This is a file automatically generated by hipify!!!
/************************************************************************
MEX code for Multiple Spin Pool Exchange (ME) spin discrete evolution using IPP or Framewave and
parallel GPU computation (CUDA) written by Fang Liu ([email protected]).
************************************************************************/
/* system header */
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <vector>
/* MEX header */
#include <mex.h>
#include "matrix.h"
/* nVIDIA CUDA header */
#include <hip/hip_runtime.h>
/* OpenMP header*/
#include <omp.h>
/* Intel IPP header */
#ifdef IPP
#include <ipp.h>
#endif
/* AMD Framewave header */
#ifdef FW
#include <fwSignal.h>
#include <fwBase.h>
#define Ipp32f Fw32f
#define ippAlgHintFast fwAlgHintFast
#define ippsMalloc_32f fwsMalloc_32f
#define ippsFree fwsFree
#define ippsZero_32f fwsZero_32f
#define ippsZero_64f fwsZero_64f
#define ippsSum_32f fwsSum_32f
#define ippsCopy_32f fwsCopy_32f
#define ippsAddC_32f fwsAddC_32f
#define ippsAddC_32f_I fwsAddC_32f_I
#define ippsAdd_32f fwsAdd_32f
#define ippsAdd_32f_I fwsAdd_32f_I
#define ippsMulC_32f fwsMulC_32f
#define ippsMulC_32f_I fwsMulC_32f_I
#define ippsMul_32f fwsMul_32f
#define ippsMul_32f_I fwsMul_32f_I
#define ippsDiv_32f fwsDiv_32f
#define ippsDivC_32f fwsDivC_32f
#define ippsInv_32f_A24 fwsInv_32f_A24
#define ippsThreshold_LT_32f_I fwsThreshold_LT_32f_I
#define ippsExp_32f_I fwsExp_32f_I
#define ippsArctan_32f fwsArctan_32f
#define ippsSqr_32f fwsSqr_32f
#define ippsSqr_32f_I fwsSqr_32f_I
#define ippsSqrt_32f_I fwsSqrt_32f_I
#define ippsSin_32f_A24 fwsSin_32f_A24
#define ippsCos_32f_A24 fwsCos_32f_A24
#define ippsPolarToCart_32f fwsPolarToCart_32f
#define ippsCartToPolar_32f fwsCartToPolar_32f
#endif
/* for fixing error : identifier "IUnknown" is undefined" */
#ifdef _WIN32
#define WIN32_LEAN_AND_MEAN
#endif
#if defined(_WIN32) || defined(_WIN64)
#include <windows.h>
#endif
#define PI 3.14159265359 /* pi constant */
/* includes CUDA kernel */
#include "BlochKernelME.cuh"
// extern "C" bool mxUnshareArray(mxArray *array_ptr, bool noDeepCopy);
/* MEX entry function */
void mexFunction(int nlhs, mxArray *plhs[],int nrhs, const mxArray *prhs[])
{
/* pointers for VObj */
double *Gyro;
int SpinMxNum, SpinMxColNum, SpinMxRowNum, SpinMxSliceNum, SpinMxDimNum;
const mwSize *SpinMxDims;
float *Mz, *My, *Mx, *Rho, *T1, *T2, *K;
/* pointers for VMag */
float *dB0, *dWRnd, *Gzgrid, *Gygrid, *Gxgrid;
/* pointers for VCoi */
float *RxCoilx, *RxCoily, *TxCoilmg, *TxCoilpe;
double *RxCoilDefault, *TxCoilDefault;
/* pointers for VCtl */
double *CS;
int *TRNum, *MaxThreadNum, ThreadNum, *RunMode;
int *ActiveThreadNum;
int *GPUIndex;
/* pointers for VSeq */
double *utsLine, *tsLine, *rfAmpLine, *rfPhaseLine, *rfFreqLine, *rfCoilLine, *GzAmpLine, *GyAmpLine, *GxAmpLine, *ADCLine, *ExtLine, *flagsLine;
/* pointers for VVar */
double *t, *dt, *rfAmp, *rfPhase, *rfFreq, *rfCoil, *rfRef, *GzAmp, *GyAmp, *GxAmp, *ADC, *Ext, *KzTmp, *KyTmp, *KxTmp, *gpuFetch;
int *utsi, *rfi, *Gzi, *Gyi, *Gxi, *ADCi, *Exti, *TRCount;
/* pointers for VSig */
double *Sx, *Sy, *Kz, *Ky, *Kx, *Muts;
float *Mxs, *Mys, *Mzs;
double *p_Sx, *p_Sy;
/* loop control */
int i=0, j=0, s=0, Signali=0, Signalptr=0, PreSignalLen=0, SignalLen=0, SBufferLen=0, Typei, RxCoili, TxCoili;
int MaxStep, MaxutsStep, MaxrfStep, MaxGzStep, MaxGyStep, MaxGxStep, *SpinNum, *TypeNum, *TxCoilNum, *RxCoilNum, *SignalNum;
double flag[6];
/* IPP or FW buffer */
Ipp32f buffer, *Sxbuffer, *Sybuffer;
/* function status */
int ExtCall;
/* GPU execution sequence */
std::vector<float> g_Sig;
/* force breaking Copy-on-Write in Matlab */
mxUnshareArray(const_cast<mxArray *>(mexGetVariablePtr("global", "VObj")), true);
mxUnshareArray(const_cast<mxArray *>(mexGetVariablePtr("global", "VMag")), true);
mxUnshareArray(const_cast<mxArray *>(mexGetVariablePtr("global", "VCoi")), true);
mxUnshareArray(const_cast<mxArray *>(mexGetVariablePtr("global", "VCtl")), true);
mxUnshareArray(const_cast<mxArray *>(mexGetVariablePtr("global", "VSeq")), true);
mxUnshareArray(const_cast<mxArray *>(mexGetVariablePtr("global", "VVar")), true);
mxUnshareArray(const_cast<mxArray *>(mexGetVariablePtr("global", "VSig")), true);
/* assign pointers */
Gyro = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VObj"), 0, "Gyro"));
Mz = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VObj"), 0, "Mz"));
My = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VObj"), 0, "My"));
Mx = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VObj"), 0, "Mx"));
Rho = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VObj"), 0, "Rho"));
T1 = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VObj"), 0, "T1"));
T2 = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VObj"), 0, "T2"));
K = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VObj"), 0, "K"));
SpinNum = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VObj"), 0, "SpinNum"));
TypeNum = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VObj"), 0, "TypeNum"));
dB0 = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VMag"), 0, "dB0"));
dWRnd = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VMag"), 0, "dWRnd"));
Gzgrid = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VMag"), 0, "Gzgrid"));
Gygrid = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VMag"), 0, "Gygrid"));
Gxgrid = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VMag"), 0, "Gxgrid"));
TxCoilmg = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VCoi"), 0, "TxCoilmg"));
TxCoilpe = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VCoi"), 0, "TxCoilpe"));
RxCoilx = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VCoi"), 0, "RxCoilx"));
RxCoily = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VCoi"), 0, "RxCoily"));
TxCoilNum = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VCoi"), 0, "TxCoilNum"));
RxCoilNum = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VCoi"), 0, "RxCoilNum"));
TxCoilDefault = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VCoi"), 0, "TxCoilDefault"));
RxCoilDefault = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VCoi"), 0, "RxCoilDefault"));
CS = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VCtl"), 0, "CS"));
TRNum = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VCtl"), 0, "TRNum"));
RunMode = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VCtl"), 0, "RunMode"));
MaxThreadNum = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VCtl"), 0, "MaxThreadNum"));
ActiveThreadNum = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VCtl"), 0, "ActiveThreadNum"));
GPUIndex = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VCtl"), 0, "GPUIndex"));
utsLine = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VSeq"), 0, "utsLine"));
tsLine = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VSeq"), 0, "tsLine"));
rfAmpLine = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VSeq"), 0, "rfAmpLine"));
rfPhaseLine = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VSeq"), 0, "rfPhaseLine"));
rfFreqLine = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VSeq"), 0, "rfFreqLine"));
rfCoilLine = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VSeq"), 0, "rfCoilLine"));
GzAmpLine = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VSeq"), 0, "GzAmpLine"));
GyAmpLine = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VSeq"), 0, "GyAmpLine"));
GxAmpLine = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VSeq"), 0, "GxAmpLine"));
ADCLine = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VSeq"), 0, "ADCLine"));
ExtLine = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VSeq"), 0, "ExtLine"));
flagsLine = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VSeq"), 0, "flagsLine"));
MaxStep = mxGetNumberOfElements(mxGetField(mexGetVariablePtr("global", "VSeq"), 0, "tsLine"));
MaxutsStep = mxGetNumberOfElements(mxGetField(mexGetVariablePtr("global", "VSeq"), 0, "utsLine"));
MaxrfStep = mxGetNumberOfElements(mxGetField(mexGetVariablePtr("global", "VSeq"), 0, "rfAmpLine"));
MaxGzStep = mxGetNumberOfElements(mxGetField(mexGetVariablePtr("global", "VSeq"), 0, "GzAmpLine"));
MaxGyStep = mxGetNumberOfElements(mxGetField(mexGetVariablePtr("global", "VSeq"), 0, "GyAmpLine"));
MaxGxStep = mxGetNumberOfElements(mxGetField(mexGetVariablePtr("global", "VSeq"), 0, "GxAmpLine"));
t = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "t"));
dt = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "dt"));
rfAmp = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "rfAmp"));
rfPhase = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "rfPhase"));
rfFreq = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "rfFreq"));
rfCoil = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "rfCoil"));
rfRef = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "rfRef"));
GzAmp = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "GzAmp"));
GyAmp = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "GyAmp"));
GxAmp = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "GxAmp"));
ADC = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "ADC"));
Ext = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "Ext"));
KzTmp = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "Kz"));
KyTmp = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "Ky"));
KxTmp = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "Kx"));
gpuFetch = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "gpuFetch"));
utsi = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "utsi"));
rfi = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "rfi"));
Gzi = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "Gzi"));
Gyi = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "Gyi"));
Gxi = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "Gxi"));
ADCi = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "ADCi"));
Exti = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "Exti"));
TRCount = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "TRCount"));
Sy = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VSig"), 0, "Sy"));
Sx = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VSig"), 0, "Sx"));
Kz = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VSig"), 0, "Kz"));
Ky = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VSig"), 0, "Ky"));
Kx = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VSig"), 0, "Kx"));
Mzs = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VSig"), 0, "Mz"));
Mys = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VSig"), 0, "My"));
Mxs = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VSig"), 0, "Mx"));
Muts = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VSig"), 0, "Muts"));
SignalNum = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VSig"), 0, "SignalNum"));
/* get size of spin matrix */
SpinMxDimNum = mxGetNumberOfDimensions(mxGetField(mexGetVariablePtr("global", "VObj"), 0, "Mz"));
SpinMxDims = (mwSize*) mxCalloc(SpinMxDimNum, sizeof(mwSize));
SpinMxDims = mxGetDimensions(mxGetField(mexGetVariablePtr("global", "VObj"), 0, "Mz"));
SpinMxRowNum = SpinMxDims[0];
SpinMxColNum = SpinMxDims[1];
SpinMxNum = SpinMxDims[0] * SpinMxDims[1];
if (SpinMxDimNum == 2){
SpinMxSliceNum = 1;
}else{
SpinMxSliceNum = SpinMxDims[2];
}
/* choose selected GPU */
if( hipSuccess != hipSetDevice(*GPUIndex)){
mexPrintf( "\n%s", hipGetErrorString(hipGetLastError()));
return;
}
/* set GPU grid & block configuration*/
hipDeviceProp_t deviceProp;
memset( &deviceProp, 0, sizeof(deviceProp));
if( hipSuccess != hipGetDeviceProperties(&deviceProp, *GPUIndex)){
mexPrintf( "\n%s", hipGetErrorString(hipGetLastError()));
return;
}
dim3 dimGridImg(SpinMxColNum,1,1);
dim3 dimBlockImg(1,SpinMxRowNum,1);
for (i=SpinMxColNum - 1; i >= deviceProp.multiProcessorCount; i--){
if ( SpinMxNum % i == 0 ){
if (SpinMxNum/i > deviceProp.maxThreadsPerBlock) break;
if ((SpinMxNum/i)*63 > deviceProp.regsPerBlock) break; // 63 registers per thread for current kernel
dimGridImg.x = i;
dimBlockImg.y = SpinMxNum/i;
}
}
i=0;
/* increase heep size for malloc in kernel */
/*
size_t heap_size;
hipDeviceGetLimit(&heap_size, hipLimitMallocHeapSize);
hipDeviceSetLimit(hipLimitMallocHeapSize, 128 * 1024 * 1024);
*/
/* allocate device memory for matrices */
float *d_Mz = NULL;
hipMalloc( (void**) &d_Mz, SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * sizeof(float)) ;
hipMemcpy( d_Mz, Mz, SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * sizeof(float), hipMemcpyHostToDevice ) ;
float *d_My = NULL;
hipMalloc( (void**) &d_My, SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * sizeof(float)) ;
hipMemcpy( d_My, My, SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * sizeof(float), hipMemcpyHostToDevice ) ;
float *d_Mx = NULL;
hipMalloc( (void**) &d_Mx, SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * sizeof(float)) ;
hipMemcpy( d_Mx, Mx, SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * sizeof(float), hipMemcpyHostToDevice ) ;
float *d_dWRnd = NULL;
hipMalloc( (void**) &d_dWRnd, SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * sizeof(float)) ;
hipMemcpy( d_dWRnd, dWRnd, SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * sizeof(float), hipMemcpyHostToDevice ) ;
float *d_Rho = NULL;
hipMalloc( (void**) &d_Rho, SpinMxNum * SpinMxSliceNum * (*TypeNum) * sizeof(float)) ;
hipMemcpy( d_Rho, Rho, SpinMxNum * SpinMxSliceNum * (*TypeNum) * sizeof(float), hipMemcpyHostToDevice ) ;
float *d_T1 = NULL;
hipMalloc( (void**) &d_T1, SpinMxNum * SpinMxSliceNum * (*TypeNum) * sizeof(float)) ;
hipMemcpy( d_T1, T1, SpinMxNum * SpinMxSliceNum * (*TypeNum) * sizeof(float), hipMemcpyHostToDevice ) ;
float *d_T2 = NULL;
hipMalloc( (void**) &d_T2, SpinMxNum * SpinMxSliceNum * (*TypeNum) * sizeof(float)) ;
hipMemcpy( d_T2, T2, SpinMxNum * SpinMxSliceNum * (*TypeNum) * sizeof(float), hipMemcpyHostToDevice ) ;
float *d_K = NULL;
hipMalloc( (void**) &d_K, SpinMxNum * SpinMxSliceNum * (*TypeNum) * (*TypeNum) * sizeof(float)) ;
hipMemcpy( d_K, K, SpinMxNum * SpinMxSliceNum * (*TypeNum) * (*TypeNum) * sizeof(float), hipMemcpyHostToDevice ) ;
float *d_Gzgrid = NULL;
hipMalloc( (void**) &d_Gzgrid, SpinMxNum * SpinMxSliceNum * sizeof(float)) ;
hipMemcpy( d_Gzgrid, Gzgrid, SpinMxNum * SpinMxSliceNum * sizeof(float), hipMemcpyHostToDevice ) ;
float *d_Gygrid = NULL;
hipMalloc( (void**) &d_Gygrid, SpinMxNum * SpinMxSliceNum * sizeof(float)) ;
hipMemcpy( d_Gygrid, Gygrid, SpinMxNum * SpinMxSliceNum * sizeof(float), hipMemcpyHostToDevice ) ;
float *d_Gxgrid = NULL;
hipMalloc( (void**) &d_Gxgrid, SpinMxNum * SpinMxSliceNum * sizeof(float)) ;
hipMemcpy( d_Gxgrid, Gxgrid, SpinMxNum * SpinMxSliceNum * sizeof(float), hipMemcpyHostToDevice ) ;
float *d_dB0 = NULL;
hipMalloc( (void**) &d_dB0, SpinMxNum * SpinMxSliceNum * sizeof(float)) ;
hipMemcpy( d_dB0, dB0, SpinMxNum * SpinMxSliceNum * sizeof(float), hipMemcpyHostToDevice ) ;
float *d_TxCoilmg = NULL;
hipMalloc( (void**) &d_TxCoilmg, SpinMxNum * SpinMxSliceNum * (*TxCoilNum) * sizeof(float)) ;
hipMemcpy( d_TxCoilmg, TxCoilmg, SpinMxNum * SpinMxSliceNum * (*TxCoilNum) * sizeof(float), hipMemcpyHostToDevice ) ;
float *d_TxCoilpe = NULL;
hipMalloc( (void**) &d_TxCoilpe, SpinMxNum * SpinMxSliceNum * (*TxCoilNum) * sizeof(float)) ;
hipMemcpy( d_TxCoilpe, TxCoilpe, SpinMxNum * SpinMxSliceNum * (*TxCoilNum) * sizeof(float), hipMemcpyHostToDevice ) ;
float *d_RxCoilx = NULL;
hipMalloc( (void**) &d_RxCoilx, SpinMxNum * SpinMxSliceNum * (*RxCoilNum) * sizeof(float)) ;
hipMemcpy( d_RxCoilx, RxCoilx, SpinMxNum * SpinMxSliceNum * (*RxCoilNum) * sizeof(float), hipMemcpyHostToDevice ) ;
float *d_RxCoily = NULL;
hipMalloc( (void**) &d_RxCoily, SpinMxNum * SpinMxSliceNum * (*RxCoilNum) * sizeof(float)) ;
hipMemcpy( d_RxCoily, RxCoily, SpinMxNum * SpinMxSliceNum * (*RxCoilNum) * sizeof(float), hipMemcpyHostToDevice ) ;
double *d_CS = NULL;
hipMalloc( (void**) &d_CS, *TypeNum * sizeof(double)) ;
hipMemcpy( d_CS, CS, *TypeNum * sizeof(double), hipMemcpyHostToDevice ) ;
/* allocate device memory for GPU execution sequence*/
float *d_Sig = NULL;
hipMalloc( (void**) &d_Sig, (5+3*(*TxCoilNum)) * MaxutsStep * sizeof(float)) ;
/* allocate device memory according to RunMode */
float *d_b_Mz = NULL;
float *d_b_My = NULL;
float *d_b_Mx = NULL;
switch (*RunMode){
case 1: /* spin rotation simulation & rf simulation */
hipMalloc( (void**) &d_b_Mz, SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * MaxutsStep * sizeof(float));
hipMalloc( (void**) &d_b_My, SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * MaxutsStep * sizeof(float));
hipMalloc( (void**) &d_b_Mx, SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * MaxutsStep * sizeof(float));
/* zero buffer */
hipMemset(d_b_Mz, 0 ,SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * MaxutsStep * sizeof(float)); /* only work for 0 */
hipMemset(d_b_My, 0 ,SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * MaxutsStep * sizeof(float)); /* only work for 0 */
hipMemset(d_b_Mx, 0 ,SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * MaxutsStep * sizeof(float)); /* only work for 0 */
break;
default:
hipMalloc( (void**) &d_b_Mz, 1 * sizeof(float)) ;
hipMalloc( (void**) &d_b_My, 1 * sizeof(float)) ;
hipMalloc( (void**) &d_b_Mx, 1 * sizeof(float)) ;
break;
}
/* allocate device memory for buffers
*tempMx
*tempMy
*tempMz
*ExpdtT2
*ExpdtT1
*M0dtT1
*/
float *d_Buffer = NULL;
hipMalloc( (void**) &d_Buffer, SpinMxNum * (*TypeNum) * 6 * sizeof(float)) ;
/* set CPU signal buffer */
Sxbuffer = ippsMalloc_32f(SpinMxNum * PreSignalLen * (*TypeNum) * (*RxCoilNum));
Sybuffer = ippsMalloc_32f(SpinMxNum * PreSignalLen * (*TypeNum) * (*RxCoilNum));
/* allocate device memory for buffering acquired signal */
float *d_Sx = NULL;
hipMalloc( (void**) &d_Sx, SpinMxNum * PreSignalLen * (*TypeNum) * (*RxCoilNum) * sizeof(float)) ;
float *d_Sy = NULL;
hipMalloc( (void**) &d_Sy, SpinMxNum * PreSignalLen * (*TypeNum) * (*RxCoilNum) * sizeof(float)) ;
/* start simulator execution loop */
mexPrintf("------ Current active GPU device : %s ------\n", &deviceProp.name[0]);
mexPrintf("TR Counts: %d of %d\n", 1, *TRNum);
while (i < MaxStep){
/* check MR sequence pulse flag */
flag[0]=0;
flag[1]=0;
flag[2]=0;
flag[3]=0;
flag[4]=0;
flag[5]=0;
if (tsLine[i]!=tsLine[i+1]){
flag[0]+=flagsLine[i*6];
flag[1]+=flagsLine[i*6+1];
flag[2]+=flagsLine[i*6+2];
flag[3]+=flagsLine[i*6+3];
flag[4]+=flagsLine[i*6+4];
flag[5]+=flagsLine[i*6+5];
i++;
}
else{
flag[0]+=flagsLine[i*6];
flag[1]+=flagsLine[i*6+1];
flag[2]+=flagsLine[i*6+2];
flag[3]+=flagsLine[i*6+3];
flag[4]+=flagsLine[i*6+4];
flag[5]+=flagsLine[i*6+5];
while (tsLine[i]==tsLine[i+1]){
flag[0]+=flagsLine[(i+1)*6];
flag[1]+=flagsLine[(i+1)*6+1];
flag[2]+=flagsLine[(i+1)*6+2];
flag[3]+=flagsLine[(i+1)*6+3];
flag[4]+=flagsLine[(i+1)*6+4];
flag[5]+=flagsLine[(i+1)*6+5];
i++;
if (i==MaxStep-1){
break;
}
}
i++;
}
/* update pulse status */
*t = *(utsLine + *utsi);
*dt = *(utsLine + (int)min(*utsi+1, MaxutsStep-1))-*(utsLine + *utsi);
*utsi = (int)*utsi+1; /* how to address the end point of the sequence? */
g_Sig.push_back((float)*dt);
switch (*RunMode){
case 1: /* spin rotation simulation & rf simulation */
if (*dt > 0)
*(Muts+*utsi) = *(Muts+*utsi-1) + *dt;
else if (*dt < 0)
*(Muts+*utsi) = *(Muts+*utsi-1);
break;
}
if (flag[0]>=1 ){ /* update rfAmp, rfPhase, rfFreq, rfCoil for multiple rf lines */
for (j = 0; j < flag[0]; j++){
*rfCoil = *(rfCoilLine+ *rfi);
TxCoili = (int)(*rfCoil);
s = *rfi + 1;
while (s < MaxrfStep){
if (*rfCoil == *(rfCoilLine + s)){
if (fabs(*(rfAmpLine+ *rfi)) <= fabs(*(rfAmpLine + s)))
*(rfAmp + TxCoili - 1)= *(rfAmpLine+ *rfi);
else
*(rfAmp + TxCoili - 1)= *(rfAmpLine+ s);
if (fabs(*(rfPhaseLine+ *rfi)) <= fabs(*(rfPhaseLine + s)))
*(rfPhase + TxCoili - 1)= *(rfPhaseLine+ *rfi);
else
*(rfPhase + TxCoili - 1)= *(rfPhaseLine+ s);
if (fabs(*(rfFreqLine+ *rfi)) <= fabs(*(rfFreqLine + s)))
*(rfFreq + TxCoili - 1)= *(rfFreqLine+ *rfi);
else
*(rfFreq + TxCoili - 1)= *(rfFreqLine+ s);
break;
}
s++;
}
(*rfi)++;
}
for (j = 0; j < *TxCoilNum; j++){ /* multi-Tx, deal with rfPhase */
if (rfAmp[j]<0){
rfAmp[j]=fabs(rfAmp[j]);
rfPhase[j]=rfPhase[j]+PI;
}
}
}
for (j = 0; j < *TxCoilNum; j++){
g_Sig.push_back((float)rfAmp[j]);
g_Sig.push_back((float)rfPhase[j]);
g_Sig.push_back((float)rfFreq[j]);
}
if (flag[1]==1 ){ /* update GzAmp */
if (fabs(*(GzAmpLine+ *Gzi)) <= fabs(*(GzAmpLine + (int)min(*Gzi+1, MaxGzStep-1))))
*GzAmp = *(GzAmpLine+ *Gzi);
else
*GzAmp = *(GzAmpLine+ *Gzi+1);
(*Gzi)++;
}
g_Sig.push_back((float)*GzAmp);
if (flag[2]==1 ){ /* update GyAmp */
if (fabs(*(GyAmpLine+ *Gyi)) <= fabs(*(GyAmpLine + (int)min(*Gyi+1, MaxGyStep-1))))
*GyAmp = *(GyAmpLine+ *Gyi);
else
*GyAmp = *(GyAmpLine+ *Gyi+1);
(*Gyi)++;
}
g_Sig.push_back((float)*GyAmp);
if (flag[3]==1 ){ /* update GxAmp */
if (fabs(*(GxAmpLine+ *Gxi)) <= fabs(*(GxAmpLine + (int)min(*Gxi+1, MaxGxStep-1))))
*GxAmp = *(GxAmpLine+ *Gxi);
else
*GxAmp = *(GxAmpLine+ *Gxi+1);
(*Gxi)++;
}
g_Sig.push_back((float)*GxAmp);
*ADC = 0; /* avoid ADC overflow */
if (flag[4]==1){ /* update ADC */
*ADC = *(ADCLine+ *ADCi);
(*ADCi)++;
}
g_Sig.push_back((float)*ADC);
if (*ADC == 1){
/* update k-space */
Kz[Signali] += *KzTmp;
Ky[Signali] += *KyTmp;
Kx[Signali] += *KxTmp;
Signali++;
}
/* update Kz, Ky & Kx buffer */
*KzTmp +=(*GzAmp)*(*dt)*(*Gyro/(2*PI));
*KyTmp +=(*GyAmp)*(*dt)*(*Gyro/(2*PI));
*KxTmp +=(*GxAmp)*(*dt)*(*Gyro/(2*PI));
if (flag[5]==1){ /* update Ext */
*Ext = *(ExtLine+ *Exti);
/* execute extended process */
if (*Ext != 0){
if (g_Sig.size() !=0){
/* calculate signal length */
SignalLen = Signali-Signalptr;
/* reset buffer */
if (PreSignalLen!=SignalLen && SignalLen>0){
PreSignalLen = SignalLen;
/* allocate device memory for acquired signal buffer */
hipFree(d_Sx);
hipFree(d_Sy);
hipMalloc( (void**) &d_Sx, SpinMxNum * SignalLen * (*TypeNum) * (*RxCoilNum) * sizeof(float)) ;
hipMalloc( (void**) &d_Sy, SpinMxNum * SignalLen * (*TypeNum) * (*RxCoilNum) * sizeof(float)) ;
/* zero signal buffer */
hipMemset(d_Sx, 0 ,SpinMxNum * SignalLen * (*TypeNum) * (*RxCoilNum) * sizeof(float)); /* only work for 0 */
hipMemset(d_Sy, 0 ,SpinMxNum * SignalLen * (*TypeNum) * (*RxCoilNum) * sizeof(float)); /* only work for 0 */
/* set buffer */
ippsFree(Sxbuffer);
ippsFree(Sybuffer);
Sxbuffer = ippsMalloc_32f(SpinMxNum * SignalLen * (*TypeNum) * (*RxCoilNum));
Sybuffer = ippsMalloc_32f(SpinMxNum * SignalLen * (*TypeNum) * (*RxCoilNum));
}
/* avoid shared memory overflow */
if (g_Sig.size() * sizeof(float) > deviceProp.sharedMemPerBlock){
SBufferLen = 0;
}else{
SBufferLen = g_Sig.size() * sizeof(float);
}
/* upload GPU sequence */
hipMemcpy( d_Sig, &g_Sig[0], g_Sig.size() * sizeof(float), hipMemcpyHostToDevice ) ;
/* call GPU kernel for spin discrete precessing */
hipLaunchKernelGGL(( BlochKernelMEGPU), dim3(dimGridImg), dim3(dimBlockImg), SBufferLen , 0,
(float)*Gyro, d_CS, d_Rho, d_T1, d_T2, d_K, d_Mz, d_My, d_Mx, d_Buffer,
d_dB0, d_dWRnd, d_Gzgrid, d_Gygrid, d_Gxgrid, d_TxCoilmg, d_TxCoilpe, d_RxCoilx, d_RxCoily,
d_Sig, (float)*RxCoilDefault, (float)*TxCoilDefault,
d_Sx, d_Sy, (float)*rfRef, SignalLen, SBufferLen,
*RunMode, *utsi, d_b_Mz, d_b_My, d_b_Mx,
SpinMxColNum, SpinMxRowNum, SpinMxSliceNum, *SpinNum, *TypeNum, *TxCoilNum, *RxCoilNum, g_Sig.size()/(5+3*(*TxCoilNum)));
hipDeviceSynchronize();
g_Sig.clear();
Signalptr = Signali; /* shift signal array pointer */
}
/* signal acquisition */
if (SignalLen>0){
/* get Sx, Sy buffer from GPU */
hipMemcpy( Sybuffer, d_Sy, SpinMxNum * SignalLen * (*RxCoilNum) * (*TypeNum) * sizeof(float), hipMemcpyDeviceToHost ) ;
hipMemcpy( Sxbuffer, d_Sx, SpinMxNum * SignalLen * (*RxCoilNum) * (*TypeNum) * sizeof(float), hipMemcpyDeviceToHost ) ;
/* sum MR signal via openMP */
for (Typei = 0; Typei < *TypeNum; Typei++){
for (RxCoili = 0; RxCoili < *RxCoilNum; RxCoili++){ /* signal acquisition per Rx coil */
#pragma omp parallel
{
#pragma omp for private(j, s, p_Sx, p_Sy, buffer)
for (j=0; j < SignalLen; j++){
if (j==0){
*ActiveThreadNum = omp_get_num_threads();
}
s=Signali-SignalLen+j;
p_Sx = Sx + (Typei*(*RxCoilNum)*(*SignalNum)+RxCoili*(*SignalNum)+s);
p_Sy = Sy + (Typei*(*RxCoilNum)*(*SignalNum)+RxCoili*(*SignalNum)+s);
ippsSum_32f(&Sxbuffer[Typei * (SpinMxNum * SignalLen * (*RxCoilNum)) + RxCoili * (SpinMxNum * SignalLen) + j*SpinMxNum], SpinMxNum, &buffer, ippAlgHintFast);
*p_Sx = (double)buffer;
ippsSum_32f(&Sybuffer[Typei * (SpinMxNum * SignalLen * (*RxCoilNum)) + RxCoili * (SpinMxNum * SignalLen) + j*SpinMxNum], SpinMxNum, &buffer, ippAlgHintFast);
*p_Sy = (double)buffer;
}
}
}
}
/* zero signal buffer */
hipMemset(d_Sx, 0 ,SpinMxNum * SignalLen * (*TypeNum) * (*RxCoilNum) * sizeof(float)); /* only work for 0 */
hipMemset(d_Sy, 0 ,SpinMxNum * SignalLen * (*TypeNum) * (*RxCoilNum) * sizeof(float)); /* only work for 0 */
}
/* fetch GPU data? */
ExtCall = mexEvalString("DoGPUFetch");
if (ExtCall){
mexErrMsgTxt("Extended process encounters ERROR!");
return;
}
if (*gpuFetch !=0){
/* fetch data from GPU */
hipMemcpy( Mz, d_Mz, SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * sizeof(float), hipMemcpyDeviceToHost );
hipMemcpy( My, d_My, SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * sizeof(float), hipMemcpyDeviceToHost );
hipMemcpy( Mx, d_Mx, SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * sizeof(float), hipMemcpyDeviceToHost );
hipMemcpy( dWRnd, d_dWRnd, SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * sizeof(float), hipMemcpyDeviceToHost );
hipMemcpy( Rho, d_Rho, SpinMxNum * SpinMxSliceNum * (*TypeNum) * sizeof(float), hipMemcpyDeviceToHost );
hipMemcpy( T1, d_T1, SpinMxNum * SpinMxSliceNum * (*TypeNum) * sizeof(float), hipMemcpyDeviceToHost );
hipMemcpy( T2, d_T2, SpinMxNum * SpinMxSliceNum * (*TypeNum) * sizeof(float), hipMemcpyDeviceToHost );
hipMemcpy( K, d_K, SpinMxNum * SpinMxSliceNum * (*TypeNum) * (*TypeNum) * sizeof(float), hipMemcpyDeviceToHost );
hipMemcpy( Gzgrid, d_Gzgrid, SpinMxNum * SpinMxSliceNum * sizeof(float), hipMemcpyDeviceToHost );
hipMemcpy( Gygrid, d_Gygrid, SpinMxNum * SpinMxSliceNum * sizeof(float), hipMemcpyDeviceToHost );
hipMemcpy( Gxgrid, d_Gxgrid, SpinMxNum * SpinMxSliceNum * sizeof(float), hipMemcpyDeviceToHost );
hipMemcpy( dB0, d_dB0, SpinMxNum * SpinMxSliceNum * sizeof(float), hipMemcpyDeviceToHost );
hipMemcpy( TxCoilmg, d_TxCoilmg, SpinMxNum * SpinMxSliceNum * (*TxCoilNum) * sizeof(float), hipMemcpyDeviceToHost );
hipMemcpy( TxCoilpe, d_TxCoilpe, SpinMxNum * SpinMxSliceNum * (*TxCoilNum) * sizeof(float), hipMemcpyDeviceToHost );
hipMemcpy( RxCoilx, d_RxCoilx, SpinMxNum * SpinMxSliceNum * (*RxCoilNum) * sizeof(float), hipMemcpyDeviceToHost );
hipMemcpy( RxCoily, d_RxCoily, SpinMxNum * SpinMxSliceNum * (*RxCoilNum) * sizeof(float), hipMemcpyDeviceToHost );
}
/* execute extended process */
ExtCall = mexEvalString("DoExtPlugin");
if (ExtCall){
mexErrMsgTxt("Extended process encounters ERROR!");
return;
}
/* update pointers, avoid pointer change between Matlab and Mex call */
t = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "t"));
dt = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "dt"));
rfAmp = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "rfAmp"));
rfPhase = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "rfPhase"));
rfFreq = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "rfFreq"));
rfCoil = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "rfCoil"));
rfRef = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "rfRef"));
GzAmp = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "GzAmp"));
GyAmp = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "GyAmp"));
GxAmp = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "GxAmp"));
ADC = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "ADC"));
Ext = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "Ext"));
KzTmp = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "Kz"));
KyTmp = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "Ky"));
KxTmp = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "Kx"));
gpuFetch = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "gpuFetch"));
utsi = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "utsi"));
rfi = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "rfi"));
Gzi = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "Gzi"));
Gyi = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "Gyi"));
Gxi = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "Gxi"));
ADCi = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "ADCi"));
Exti = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "Exti"));
TRCount = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "TRCount"));
if (*gpuFetch !=0){
*gpuFetch =0;
/* update pointers, avoid pointer change between Matlab and Mex call */
Mz = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VObj"), 0, "Mz"));
My = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VObj"), 0, "My"));
Mx = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VObj"), 0, "Mx"));
Rho = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VObj"), 0, "Rho"));
T1 = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VObj"), 0, "T1"));
T2 = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VObj"), 0, "T2"));
K = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VObj"), 0, "K"));
dWRnd = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VMag"), 0, "dWRnd"));
dB0 = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VMag"), 0, "dB0"));
Gzgrid = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VMag"), 0, "Gzgrid"));
Gygrid = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VMag"), 0, "Gygrid"));
Gxgrid = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VMag"), 0, "Gxgrid"));
TxCoilmg = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VCoi"), 0, "TxCoilmg"));
TxCoilpe = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VCoi"), 0, "TxCoilpe"));
RxCoilx = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VCoi"), 0, "RxCoilx"));
RxCoily = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VCoi"), 0, "RxCoily"));
/* send data back to GPU */
hipMemcpy( d_Mz, Mz, SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * sizeof(float), hipMemcpyHostToDevice );
hipMemcpy( d_My, My, SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * sizeof(float), hipMemcpyHostToDevice );
hipMemcpy( d_Mx, Mx, SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * sizeof(float), hipMemcpyHostToDevice );
hipMemcpy( d_dWRnd, dWRnd, SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * sizeof(float), hipMemcpyHostToDevice );
hipMemcpy( d_Rho, Rho, SpinMxNum * SpinMxSliceNum * (*TypeNum) * sizeof(float), hipMemcpyHostToDevice );
hipMemcpy( d_T1, T1, SpinMxNum * SpinMxSliceNum * (*TypeNum) * sizeof(float), hipMemcpyHostToDevice );
hipMemcpy( d_T2, T2, SpinMxNum * SpinMxSliceNum * (*TypeNum) * sizeof(float), hipMemcpyHostToDevice );
hipMemcpy( d_K, K, SpinMxNum * SpinMxSliceNum * (*TypeNum) * (*TypeNum) * sizeof(float), hipMemcpyHostToDevice );
hipMemcpy( d_Gzgrid, Gzgrid, SpinMxNum * SpinMxSliceNum * sizeof(float), hipMemcpyHostToDevice );
hipMemcpy( d_Gygrid, Gygrid, SpinMxNum * SpinMxSliceNum * sizeof(float), hipMemcpyHostToDevice );
hipMemcpy( d_Gxgrid, Gxgrid, SpinMxNum * SpinMxSliceNum * sizeof(float), hipMemcpyHostToDevice );
hipMemcpy( d_dB0, dB0, SpinMxNum * SpinMxSliceNum * sizeof(float), hipMemcpyHostToDevice );
hipMemcpy( d_TxCoilmg, TxCoilmg, SpinMxNum * SpinMxSliceNum * (*TxCoilNum) * sizeof(float), hipMemcpyHostToDevice );
hipMemcpy( d_TxCoilpe, TxCoilpe, SpinMxNum * SpinMxSliceNum * (*TxCoilNum) * sizeof(float), hipMemcpyHostToDevice );
hipMemcpy( d_RxCoilx, RxCoilx, SpinMxNum * SpinMxSliceNum * (*RxCoilNum) * sizeof(float), hipMemcpyHostToDevice );
hipMemcpy( d_RxCoily, RxCoily, SpinMxNum * SpinMxSliceNum * (*RxCoilNum) * sizeof(float), hipMemcpyHostToDevice );
}
}
(*Exti)++;
}
if (flag[0]+flag[1]+flag[2]+flag[3]+flag[4]+flag[5] == 0){ /* reset VVar */
ippsZero_64f(rfAmp, *TxCoilNum);
ippsZero_64f(rfPhase, *TxCoilNum);
ippsZero_64f(rfFreq, *TxCoilNum);
*GzAmp = 0;
*GyAmp = 0;
*GxAmp = 0;
*ADC = 0;
*Ext = 0;
}
/* check TR point & end of time point */
if (*dt <= 0){
if (g_Sig.size() !=0){
/* calculate signal length */
SignalLen = Signali-Signalptr;
/* reset buffer if needed */
if (PreSignalLen!=SignalLen && SignalLen>0){
PreSignalLen = SignalLen;
/* allocate device memory for acquired signal buffer */
hipFree(d_Sx);
hipFree(d_Sy);
hipMalloc( (void**) &d_Sx, SpinMxNum * SignalLen * (*TypeNum) * (*RxCoilNum) * sizeof(float)) ;
hipMalloc( (void**) &d_Sy, SpinMxNum * SignalLen * (*TypeNum) * (*RxCoilNum) * sizeof(float)) ;
/* zero signal buffer */
hipMemset(d_Sx, 0 ,SpinMxNum * SignalLen * (*TypeNum) * (*RxCoilNum) * sizeof(float)); /* only work for 0 */
hipMemset(d_Sy, 0 ,SpinMxNum * SignalLen * (*TypeNum) * (*RxCoilNum) * sizeof(float)); /* only work for 0 */
/* set buffer */
ippsFree(Sxbuffer);
ippsFree(Sybuffer);
Sxbuffer = ippsMalloc_32f(SpinMxNum * SignalLen * (*TypeNum) * (*RxCoilNum));
Sybuffer = ippsMalloc_32f(SpinMxNum * SignalLen * (*TypeNum) * (*RxCoilNum));
}
/* avoid shared memory overflow */
if (g_Sig.size() * sizeof(float) > deviceProp.sharedMemPerBlock){
SBufferLen = 0;
}else{
SBufferLen = g_Sig.size() * sizeof(float);
}
/* upload GPU sequence */
hipMemcpy( d_Sig, &g_Sig[0], g_Sig.size() * sizeof(float), hipMemcpyHostToDevice ) ;
/* call GPU kernel for spin discrete precessing */
hipLaunchKernelGGL(( BlochKernelMEGPU), dim3(dimGridImg), dim3(dimBlockImg), SBufferLen , 0,
(float)*Gyro, d_CS, d_Rho, d_T1, d_T2, d_K, d_Mz, d_My, d_Mx, d_Buffer,
d_dB0, d_dWRnd, d_Gzgrid, d_Gygrid, d_Gxgrid, d_TxCoilmg, d_TxCoilpe, d_RxCoilx, d_RxCoily,
d_Sig, (float)*RxCoilDefault, (float)*TxCoilDefault,
d_Sx, d_Sy, (float)*rfRef, SignalLen, SBufferLen,
*RunMode, *utsi, d_b_Mz, d_b_My, d_b_Mx,
SpinMxColNum, SpinMxRowNum, SpinMxSliceNum, *SpinNum, *TypeNum, *TxCoilNum, *RxCoilNum, g_Sig.size()/(5+3*(*TxCoilNum)));
hipDeviceSynchronize(); /* stabilize simulation */
g_Sig.clear();
Signalptr = Signali;
}
/* signal acquisition */
if (SignalLen>0){
/* get Sx, Sy buffer from GPU */
hipMemcpy( Sybuffer, d_Sy, SpinMxNum * SignalLen * (*RxCoilNum) * (*TypeNum) * sizeof(float), hipMemcpyDeviceToHost ) ;
hipMemcpy( Sxbuffer, d_Sx, SpinMxNum * SignalLen * (*RxCoilNum) * (*TypeNum) * sizeof(float), hipMemcpyDeviceToHost ) ;
/* sum MR signal via openMP */
for (Typei = 0; Typei < *TypeNum; Typei++){
for (RxCoili = 0; RxCoili < *RxCoilNum; RxCoili++){ /* signal acquisition per Rx coil */
#pragma omp parallel
{
#pragma omp for private(j, s, p_Sx, p_Sy, buffer)
for (j=0; j < SignalLen; j++){
if (j==0){
*ActiveThreadNum = omp_get_num_threads();
}
s=Signali-SignalLen+j;
p_Sx = Sx + (Typei*(*RxCoilNum)*(*SignalNum)+RxCoili*(*SignalNum)+s);
p_Sy = Sy + (Typei*(*RxCoilNum)*(*SignalNum)+RxCoili*(*SignalNum)+s);
ippsSum_32f(&Sxbuffer[Typei * (SpinMxNum * SignalLen * (*RxCoilNum)) + RxCoili * (SpinMxNum * SignalLen) + j*SpinMxNum], SpinMxNum, &buffer, ippAlgHintFast);
*p_Sx = (double)buffer;
ippsSum_32f(&Sybuffer[Typei * (SpinMxNum * SignalLen * (*RxCoilNum)) + RxCoili * (SpinMxNum * SignalLen) + j*SpinMxNum], SpinMxNum, &buffer, ippAlgHintFast);
*p_Sy = (double)buffer;
}
}
}
}
/* zero signal buffer */
hipMemset(d_Sx, 0 ,SpinMxNum * SignalLen * (*TypeNum) * (*RxCoilNum) * sizeof(float)); /* only work for 0 */
hipMemset(d_Sy, 0 ,SpinMxNum * SignalLen * (*TypeNum) * (*RxCoilNum) * sizeof(float)); /* only work for 0 */
}
if (*dt < 0){
(*TRCount)++;
mexPrintf("TR Counts: %d of %d\n", *TRCount, *TRNum);
}
}
}
switch (*RunMode){
case 1: /* spin rotation simulation & rf simulation */
/* Get Mx, My & Mz from GPU */
hipMemcpy( Mzs, d_b_Mz, SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * MaxutsStep * sizeof(float), hipMemcpyDeviceToHost ) ;
hipMemcpy( Mys, d_b_My, SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * MaxutsStep * sizeof(float), hipMemcpyDeviceToHost ) ;
hipMemcpy( Mxs, d_b_Mx, SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * MaxutsStep * sizeof(float), hipMemcpyDeviceToHost ) ;
break;
}
/* free GPU memory */
hipFree(d_Mz);
hipFree(d_My);
hipFree(d_Mx);
hipFree(d_Buffer);
hipFree(d_dWRnd);
hipFree(d_Rho);
hipFree(d_T1);
hipFree(d_T2);
hipFree(d_K);
hipFree(d_Gzgrid);
hipFree(d_Gygrid);
hipFree(d_Gxgrid);
hipFree(d_dB0);
hipFree(d_TxCoilmg);
hipFree(d_TxCoilpe);
hipFree(d_RxCoilx);
hipFree(d_RxCoily);
hipFree(d_CS);
hipFree(d_Sig);
hipFree(d_Sx);
hipFree(d_Sy);
hipFree(d_b_Mz);
hipFree(d_b_My);
hipFree(d_b_Mx);
/* reset device, may slow down subsequent startup due to initialization */
// hipDeviceReset();
}
| 57fd1a991c95c6caa9a049f2356305dcf680fff9.cu |
/************************************************************************
MEX code for Multiple Spin Pool Exchange (ME) spin discrete evolution using IPP or Framewave and
parallel GPU computation (CUDA) written by Fang Liu ([email protected]).
************************************************************************/
/* system header */
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <vector>
/* MEX header */
#include <mex.h>
#include "matrix.h"
/* nVIDIA CUDA header */
#include <cuda.h>
/* OpenMP header*/
#include <omp.h>
/* Intel IPP header */
#ifdef IPP
#include <ipp.h>
#endif
/* AMD Framewave header */
#ifdef FW
#include <fwSignal.h>
#include <fwBase.h>
#define Ipp32f Fw32f
#define ippAlgHintFast fwAlgHintFast
#define ippsMalloc_32f fwsMalloc_32f
#define ippsFree fwsFree
#define ippsZero_32f fwsZero_32f
#define ippsZero_64f fwsZero_64f
#define ippsSum_32f fwsSum_32f
#define ippsCopy_32f fwsCopy_32f
#define ippsAddC_32f fwsAddC_32f
#define ippsAddC_32f_I fwsAddC_32f_I
#define ippsAdd_32f fwsAdd_32f
#define ippsAdd_32f_I fwsAdd_32f_I
#define ippsMulC_32f fwsMulC_32f
#define ippsMulC_32f_I fwsMulC_32f_I
#define ippsMul_32f fwsMul_32f
#define ippsMul_32f_I fwsMul_32f_I
#define ippsDiv_32f fwsDiv_32f
#define ippsDivC_32f fwsDivC_32f
#define ippsInv_32f_A24 fwsInv_32f_A24
#define ippsThreshold_LT_32f_I fwsThreshold_LT_32f_I
#define ippsExp_32f_I fwsExp_32f_I
#define ippsArctan_32f fwsArctan_32f
#define ippsSqr_32f fwsSqr_32f
#define ippsSqr_32f_I fwsSqr_32f_I
#define ippsSqrt_32f_I fwsSqrt_32f_I
#define ippsSin_32f_A24 fwsSin_32f_A24
#define ippsCos_32f_A24 fwsCos_32f_A24
#define ippsPolarToCart_32f fwsPolarToCart_32f
#define ippsCartToPolar_32f fwsCartToPolar_32f
#endif
/* for fixing error : identifier "IUnknown" is undefined" */
#ifdef _WIN32
#define WIN32_LEAN_AND_MEAN
#endif
#if defined(_WIN32) || defined(_WIN64)
#include <windows.h>
#endif
#define PI 3.14159265359 /* pi constant */
/* includes CUDA kernel */
#include "BlochKernelME.cuh"
// extern "C" bool mxUnshareArray(mxArray *array_ptr, bool noDeepCopy);
/* MEX entry function */
void mexFunction(int nlhs, mxArray *plhs[],int nrhs, const mxArray *prhs[])
{
/* pointers for VObj */
double *Gyro;
int SpinMxNum, SpinMxColNum, SpinMxRowNum, SpinMxSliceNum, SpinMxDimNum;
const mwSize *SpinMxDims;
float *Mz, *My, *Mx, *Rho, *T1, *T2, *K;
/* pointers for VMag */
float *dB0, *dWRnd, *Gzgrid, *Gygrid, *Gxgrid;
/* pointers for VCoi */
float *RxCoilx, *RxCoily, *TxCoilmg, *TxCoilpe;
double *RxCoilDefault, *TxCoilDefault;
/* pointers for VCtl */
double *CS;
int *TRNum, *MaxThreadNum, ThreadNum, *RunMode;
int *ActiveThreadNum;
int *GPUIndex;
/* pointers for VSeq */
double *utsLine, *tsLine, *rfAmpLine, *rfPhaseLine, *rfFreqLine, *rfCoilLine, *GzAmpLine, *GyAmpLine, *GxAmpLine, *ADCLine, *ExtLine, *flagsLine;
/* pointers for VVar */
double *t, *dt, *rfAmp, *rfPhase, *rfFreq, *rfCoil, *rfRef, *GzAmp, *GyAmp, *GxAmp, *ADC, *Ext, *KzTmp, *KyTmp, *KxTmp, *gpuFetch;
int *utsi, *rfi, *Gzi, *Gyi, *Gxi, *ADCi, *Exti, *TRCount;
/* pointers for VSig */
double *Sx, *Sy, *Kz, *Ky, *Kx, *Muts;
float *Mxs, *Mys, *Mzs;
double *p_Sx, *p_Sy;
/* loop control */
int i=0, j=0, s=0, Signali=0, Signalptr=0, PreSignalLen=0, SignalLen=0, SBufferLen=0, Typei, RxCoili, TxCoili;
int MaxStep, MaxutsStep, MaxrfStep, MaxGzStep, MaxGyStep, MaxGxStep, *SpinNum, *TypeNum, *TxCoilNum, *RxCoilNum, *SignalNum;
double flag[6];
/* IPP or FW buffer */
Ipp32f buffer, *Sxbuffer, *Sybuffer;
/* function status */
int ExtCall;
/* GPU execution sequence */
std::vector<float> g_Sig;
/* force breaking Copy-on-Write in Matlab */
mxUnshareArray(const_cast<mxArray *>(mexGetVariablePtr("global", "VObj")), true);
mxUnshareArray(const_cast<mxArray *>(mexGetVariablePtr("global", "VMag")), true);
mxUnshareArray(const_cast<mxArray *>(mexGetVariablePtr("global", "VCoi")), true);
mxUnshareArray(const_cast<mxArray *>(mexGetVariablePtr("global", "VCtl")), true);
mxUnshareArray(const_cast<mxArray *>(mexGetVariablePtr("global", "VSeq")), true);
mxUnshareArray(const_cast<mxArray *>(mexGetVariablePtr("global", "VVar")), true);
mxUnshareArray(const_cast<mxArray *>(mexGetVariablePtr("global", "VSig")), true);
/* assign pointers */
Gyro = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VObj"), 0, "Gyro"));
Mz = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VObj"), 0, "Mz"));
My = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VObj"), 0, "My"));
Mx = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VObj"), 0, "Mx"));
Rho = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VObj"), 0, "Rho"));
T1 = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VObj"), 0, "T1"));
T2 = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VObj"), 0, "T2"));
K = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VObj"), 0, "K"));
SpinNum = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VObj"), 0, "SpinNum"));
TypeNum = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VObj"), 0, "TypeNum"));
dB0 = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VMag"), 0, "dB0"));
dWRnd = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VMag"), 0, "dWRnd"));
Gzgrid = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VMag"), 0, "Gzgrid"));
Gygrid = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VMag"), 0, "Gygrid"));
Gxgrid = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VMag"), 0, "Gxgrid"));
TxCoilmg = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VCoi"), 0, "TxCoilmg"));
TxCoilpe = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VCoi"), 0, "TxCoilpe"));
RxCoilx = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VCoi"), 0, "RxCoilx"));
RxCoily = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VCoi"), 0, "RxCoily"));
TxCoilNum = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VCoi"), 0, "TxCoilNum"));
RxCoilNum = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VCoi"), 0, "RxCoilNum"));
TxCoilDefault = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VCoi"), 0, "TxCoilDefault"));
RxCoilDefault = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VCoi"), 0, "RxCoilDefault"));
CS = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VCtl"), 0, "CS"));
TRNum = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VCtl"), 0, "TRNum"));
RunMode = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VCtl"), 0, "RunMode"));
MaxThreadNum = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VCtl"), 0, "MaxThreadNum"));
ActiveThreadNum = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VCtl"), 0, "ActiveThreadNum"));
GPUIndex = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VCtl"), 0, "GPUIndex"));
utsLine = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VSeq"), 0, "utsLine"));
tsLine = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VSeq"), 0, "tsLine"));
rfAmpLine = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VSeq"), 0, "rfAmpLine"));
rfPhaseLine = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VSeq"), 0, "rfPhaseLine"));
rfFreqLine = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VSeq"), 0, "rfFreqLine"));
rfCoilLine = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VSeq"), 0, "rfCoilLine"));
GzAmpLine = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VSeq"), 0, "GzAmpLine"));
GyAmpLine = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VSeq"), 0, "GyAmpLine"));
GxAmpLine = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VSeq"), 0, "GxAmpLine"));
ADCLine = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VSeq"), 0, "ADCLine"));
ExtLine = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VSeq"), 0, "ExtLine"));
flagsLine = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VSeq"), 0, "flagsLine"));
MaxStep = mxGetNumberOfElements(mxGetField(mexGetVariablePtr("global", "VSeq"), 0, "tsLine"));
MaxutsStep = mxGetNumberOfElements(mxGetField(mexGetVariablePtr("global", "VSeq"), 0, "utsLine"));
MaxrfStep = mxGetNumberOfElements(mxGetField(mexGetVariablePtr("global", "VSeq"), 0, "rfAmpLine"));
MaxGzStep = mxGetNumberOfElements(mxGetField(mexGetVariablePtr("global", "VSeq"), 0, "GzAmpLine"));
MaxGyStep = mxGetNumberOfElements(mxGetField(mexGetVariablePtr("global", "VSeq"), 0, "GyAmpLine"));
MaxGxStep = mxGetNumberOfElements(mxGetField(mexGetVariablePtr("global", "VSeq"), 0, "GxAmpLine"));
t = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "t"));
dt = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "dt"));
rfAmp = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "rfAmp"));
rfPhase = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "rfPhase"));
rfFreq = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "rfFreq"));
rfCoil = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "rfCoil"));
rfRef = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "rfRef"));
GzAmp = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "GzAmp"));
GyAmp = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "GyAmp"));
GxAmp = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "GxAmp"));
ADC = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "ADC"));
Ext = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "Ext"));
KzTmp = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "Kz"));
KyTmp = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "Ky"));
KxTmp = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "Kx"));
gpuFetch = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "gpuFetch"));
utsi = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "utsi"));
rfi = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "rfi"));
Gzi = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "Gzi"));
Gyi = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "Gyi"));
Gxi = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "Gxi"));
ADCi = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "ADCi"));
Exti = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "Exti"));
TRCount = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "TRCount"));
Sy = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VSig"), 0, "Sy"));
Sx = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VSig"), 0, "Sx"));
Kz = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VSig"), 0, "Kz"));
Ky = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VSig"), 0, "Ky"));
Kx = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VSig"), 0, "Kx"));
Mzs = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VSig"), 0, "Mz"));
Mys = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VSig"), 0, "My"));
Mxs = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VSig"), 0, "Mx"));
Muts = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VSig"), 0, "Muts"));
SignalNum = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VSig"), 0, "SignalNum"));
/* get size of spin matrix */
SpinMxDimNum = mxGetNumberOfDimensions(mxGetField(mexGetVariablePtr("global", "VObj"), 0, "Mz"));
SpinMxDims = (mwSize*) mxCalloc(SpinMxDimNum, sizeof(mwSize));
SpinMxDims = mxGetDimensions(mxGetField(mexGetVariablePtr("global", "VObj"), 0, "Mz"));
SpinMxRowNum = SpinMxDims[0];
SpinMxColNum = SpinMxDims[1];
SpinMxNum = SpinMxDims[0] * SpinMxDims[1];
if (SpinMxDimNum == 2){
SpinMxSliceNum = 1;
}else{
SpinMxSliceNum = SpinMxDims[2];
}
/* choose selected GPU */
if( cudaSuccess != cudaSetDevice(*GPUIndex)){
mexPrintf( "\n%s", cudaGetErrorString(cudaGetLastError()));
return;
}
/* set GPU grid & block configuration*/
cudaDeviceProp deviceProp;
memset( &deviceProp, 0, sizeof(deviceProp));
if( cudaSuccess != cudaGetDeviceProperties(&deviceProp, *GPUIndex)){
mexPrintf( "\n%s", cudaGetErrorString(cudaGetLastError()));
return;
}
dim3 dimGridImg(SpinMxColNum,1,1);
dim3 dimBlockImg(1,SpinMxRowNum,1);
for (i=SpinMxColNum - 1; i >= deviceProp.multiProcessorCount; i--){
if ( SpinMxNum % i == 0 ){
if (SpinMxNum/i > deviceProp.maxThreadsPerBlock) break;
if ((SpinMxNum/i)*63 > deviceProp.regsPerBlock) break; // 63 registers per thread for current kernel
dimGridImg.x = i;
dimBlockImg.y = SpinMxNum/i;
}
}
i=0;
/* increase heep size for malloc in kernel */
/*
size_t heap_size;
cudaDeviceGetLimit(&heap_size, cudaLimitMallocHeapSize);
cudaDeviceSetLimit(cudaLimitMallocHeapSize, 128 * 1024 * 1024);
*/
/* allocate device memory for matrices */
float *d_Mz = NULL;
cudaMalloc( (void**) &d_Mz, SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * sizeof(float)) ;
cudaMemcpy( d_Mz, Mz, SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * sizeof(float), cudaMemcpyHostToDevice ) ;
float *d_My = NULL;
cudaMalloc( (void**) &d_My, SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * sizeof(float)) ;
cudaMemcpy( d_My, My, SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * sizeof(float), cudaMemcpyHostToDevice ) ;
float *d_Mx = NULL;
cudaMalloc( (void**) &d_Mx, SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * sizeof(float)) ;
cudaMemcpy( d_Mx, Mx, SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * sizeof(float), cudaMemcpyHostToDevice ) ;
float *d_dWRnd = NULL;
cudaMalloc( (void**) &d_dWRnd, SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * sizeof(float)) ;
cudaMemcpy( d_dWRnd, dWRnd, SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * sizeof(float), cudaMemcpyHostToDevice ) ;
float *d_Rho = NULL;
cudaMalloc( (void**) &d_Rho, SpinMxNum * SpinMxSliceNum * (*TypeNum) * sizeof(float)) ;
cudaMemcpy( d_Rho, Rho, SpinMxNum * SpinMxSliceNum * (*TypeNum) * sizeof(float), cudaMemcpyHostToDevice ) ;
float *d_T1 = NULL;
cudaMalloc( (void**) &d_T1, SpinMxNum * SpinMxSliceNum * (*TypeNum) * sizeof(float)) ;
cudaMemcpy( d_T1, T1, SpinMxNum * SpinMxSliceNum * (*TypeNum) * sizeof(float), cudaMemcpyHostToDevice ) ;
float *d_T2 = NULL;
cudaMalloc( (void**) &d_T2, SpinMxNum * SpinMxSliceNum * (*TypeNum) * sizeof(float)) ;
cudaMemcpy( d_T2, T2, SpinMxNum * SpinMxSliceNum * (*TypeNum) * sizeof(float), cudaMemcpyHostToDevice ) ;
float *d_K = NULL;
cudaMalloc( (void**) &d_K, SpinMxNum * SpinMxSliceNum * (*TypeNum) * (*TypeNum) * sizeof(float)) ;
cudaMemcpy( d_K, K, SpinMxNum * SpinMxSliceNum * (*TypeNum) * (*TypeNum) * sizeof(float), cudaMemcpyHostToDevice ) ;
float *d_Gzgrid = NULL;
cudaMalloc( (void**) &d_Gzgrid, SpinMxNum * SpinMxSliceNum * sizeof(float)) ;
cudaMemcpy( d_Gzgrid, Gzgrid, SpinMxNum * SpinMxSliceNum * sizeof(float), cudaMemcpyHostToDevice ) ;
float *d_Gygrid = NULL;
cudaMalloc( (void**) &d_Gygrid, SpinMxNum * SpinMxSliceNum * sizeof(float)) ;
cudaMemcpy( d_Gygrid, Gygrid, SpinMxNum * SpinMxSliceNum * sizeof(float), cudaMemcpyHostToDevice ) ;
float *d_Gxgrid = NULL;
cudaMalloc( (void**) &d_Gxgrid, SpinMxNum * SpinMxSliceNum * sizeof(float)) ;
cudaMemcpy( d_Gxgrid, Gxgrid, SpinMxNum * SpinMxSliceNum * sizeof(float), cudaMemcpyHostToDevice ) ;
float *d_dB0 = NULL;
cudaMalloc( (void**) &d_dB0, SpinMxNum * SpinMxSliceNum * sizeof(float)) ;
cudaMemcpy( d_dB0, dB0, SpinMxNum * SpinMxSliceNum * sizeof(float), cudaMemcpyHostToDevice ) ;
float *d_TxCoilmg = NULL;
cudaMalloc( (void**) &d_TxCoilmg, SpinMxNum * SpinMxSliceNum * (*TxCoilNum) * sizeof(float)) ;
cudaMemcpy( d_TxCoilmg, TxCoilmg, SpinMxNum * SpinMxSliceNum * (*TxCoilNum) * sizeof(float), cudaMemcpyHostToDevice ) ;
float *d_TxCoilpe = NULL;
cudaMalloc( (void**) &d_TxCoilpe, SpinMxNum * SpinMxSliceNum * (*TxCoilNum) * sizeof(float)) ;
cudaMemcpy( d_TxCoilpe, TxCoilpe, SpinMxNum * SpinMxSliceNum * (*TxCoilNum) * sizeof(float), cudaMemcpyHostToDevice ) ;
float *d_RxCoilx = NULL;
cudaMalloc( (void**) &d_RxCoilx, SpinMxNum * SpinMxSliceNum * (*RxCoilNum) * sizeof(float)) ;
cudaMemcpy( d_RxCoilx, RxCoilx, SpinMxNum * SpinMxSliceNum * (*RxCoilNum) * sizeof(float), cudaMemcpyHostToDevice ) ;
float *d_RxCoily = NULL;
cudaMalloc( (void**) &d_RxCoily, SpinMxNum * SpinMxSliceNum * (*RxCoilNum) * sizeof(float)) ;
cudaMemcpy( d_RxCoily, RxCoily, SpinMxNum * SpinMxSliceNum * (*RxCoilNum) * sizeof(float), cudaMemcpyHostToDevice ) ;
double *d_CS = NULL;
cudaMalloc( (void**) &d_CS, *TypeNum * sizeof(double)) ;
cudaMemcpy( d_CS, CS, *TypeNum * sizeof(double), cudaMemcpyHostToDevice ) ;
/* allocate device memory for GPU execution sequence*/
float *d_Sig = NULL;
cudaMalloc( (void**) &d_Sig, (5+3*(*TxCoilNum)) * MaxutsStep * sizeof(float)) ;
/* allocate device memory according to RunMode */
float *d_b_Mz = NULL;
float *d_b_My = NULL;
float *d_b_Mx = NULL;
switch (*RunMode){
case 1: /* spin rotation simulation & rf simulation */
cudaMalloc( (void**) &d_b_Mz, SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * MaxutsStep * sizeof(float));
cudaMalloc( (void**) &d_b_My, SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * MaxutsStep * sizeof(float));
cudaMalloc( (void**) &d_b_Mx, SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * MaxutsStep * sizeof(float));
/* zero buffer */
cudaMemset(d_b_Mz, 0 ,SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * MaxutsStep * sizeof(float)); /* only work for 0 */
cudaMemset(d_b_My, 0 ,SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * MaxutsStep * sizeof(float)); /* only work for 0 */
cudaMemset(d_b_Mx, 0 ,SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * MaxutsStep * sizeof(float)); /* only work for 0 */
break;
default:
cudaMalloc( (void**) &d_b_Mz, 1 * sizeof(float)) ;
cudaMalloc( (void**) &d_b_My, 1 * sizeof(float)) ;
cudaMalloc( (void**) &d_b_Mx, 1 * sizeof(float)) ;
break;
}
/* allocate device memory for buffers
*tempMx
*tempMy
*tempMz
*ExpdtT2
*ExpdtT1
*M0dtT1
*/
float *d_Buffer = NULL;
cudaMalloc( (void**) &d_Buffer, SpinMxNum * (*TypeNum) * 6 * sizeof(float)) ;
/* set CPU signal buffer */
Sxbuffer = ippsMalloc_32f(SpinMxNum * PreSignalLen * (*TypeNum) * (*RxCoilNum));
Sybuffer = ippsMalloc_32f(SpinMxNum * PreSignalLen * (*TypeNum) * (*RxCoilNum));
/* allocate device memory for buffering acquired signal */
float *d_Sx = NULL;
cudaMalloc( (void**) &d_Sx, SpinMxNum * PreSignalLen * (*TypeNum) * (*RxCoilNum) * sizeof(float)) ;
float *d_Sy = NULL;
cudaMalloc( (void**) &d_Sy, SpinMxNum * PreSignalLen * (*TypeNum) * (*RxCoilNum) * sizeof(float)) ;
/* start simulator execution loop */
mexPrintf("------ Current active GPU device : %s ------\n", &deviceProp.name[0]);
mexPrintf("TR Counts: %d of %d\n", 1, *TRNum);
while (i < MaxStep){
/* check MR sequence pulse flag */
flag[0]=0;
flag[1]=0;
flag[2]=0;
flag[3]=0;
flag[4]=0;
flag[5]=0;
if (tsLine[i]!=tsLine[i+1]){
flag[0]+=flagsLine[i*6];
flag[1]+=flagsLine[i*6+1];
flag[2]+=flagsLine[i*6+2];
flag[3]+=flagsLine[i*6+3];
flag[4]+=flagsLine[i*6+4];
flag[5]+=flagsLine[i*6+5];
i++;
}
else{
flag[0]+=flagsLine[i*6];
flag[1]+=flagsLine[i*6+1];
flag[2]+=flagsLine[i*6+2];
flag[3]+=flagsLine[i*6+3];
flag[4]+=flagsLine[i*6+4];
flag[5]+=flagsLine[i*6+5];
while (tsLine[i]==tsLine[i+1]){
flag[0]+=flagsLine[(i+1)*6];
flag[1]+=flagsLine[(i+1)*6+1];
flag[2]+=flagsLine[(i+1)*6+2];
flag[3]+=flagsLine[(i+1)*6+3];
flag[4]+=flagsLine[(i+1)*6+4];
flag[5]+=flagsLine[(i+1)*6+5];
i++;
if (i==MaxStep-1){
break;
}
}
i++;
}
/* update pulse status */
*t = *(utsLine + *utsi);
*dt = *(utsLine + (int)min(*utsi+1, MaxutsStep-1))-*(utsLine + *utsi);
*utsi = (int)*utsi+1; /* how to address the end point of the sequence? */
g_Sig.push_back((float)*dt);
switch (*RunMode){
case 1: /* spin rotation simulation & rf simulation */
if (*dt > 0)
*(Muts+*utsi) = *(Muts+*utsi-1) + *dt;
else if (*dt < 0)
*(Muts+*utsi) = *(Muts+*utsi-1);
break;
}
if (flag[0]>=1 ){ /* update rfAmp, rfPhase, rfFreq, rfCoil for multiple rf lines */
for (j = 0; j < flag[0]; j++){
*rfCoil = *(rfCoilLine+ *rfi);
TxCoili = (int)(*rfCoil);
s = *rfi + 1;
while (s < MaxrfStep){
if (*rfCoil == *(rfCoilLine + s)){
if (fabs(*(rfAmpLine+ *rfi)) <= fabs(*(rfAmpLine + s)))
*(rfAmp + TxCoili - 1)= *(rfAmpLine+ *rfi);
else
*(rfAmp + TxCoili - 1)= *(rfAmpLine+ s);
if (fabs(*(rfPhaseLine+ *rfi)) <= fabs(*(rfPhaseLine + s)))
*(rfPhase + TxCoili - 1)= *(rfPhaseLine+ *rfi);
else
*(rfPhase + TxCoili - 1)= *(rfPhaseLine+ s);
if (fabs(*(rfFreqLine+ *rfi)) <= fabs(*(rfFreqLine + s)))
*(rfFreq + TxCoili - 1)= *(rfFreqLine+ *rfi);
else
*(rfFreq + TxCoili - 1)= *(rfFreqLine+ s);
break;
}
s++;
}
(*rfi)++;
}
for (j = 0; j < *TxCoilNum; j++){ /* multi-Tx, deal with rfPhase */
if (rfAmp[j]<0){
rfAmp[j]=fabs(rfAmp[j]);
rfPhase[j]=rfPhase[j]+PI;
}
}
}
for (j = 0; j < *TxCoilNum; j++){
g_Sig.push_back((float)rfAmp[j]);
g_Sig.push_back((float)rfPhase[j]);
g_Sig.push_back((float)rfFreq[j]);
}
if (flag[1]==1 ){ /* update GzAmp */
if (fabs(*(GzAmpLine+ *Gzi)) <= fabs(*(GzAmpLine + (int)min(*Gzi+1, MaxGzStep-1))))
*GzAmp = *(GzAmpLine+ *Gzi);
else
*GzAmp = *(GzAmpLine+ *Gzi+1);
(*Gzi)++;
}
g_Sig.push_back((float)*GzAmp);
if (flag[2]==1 ){ /* update GyAmp */
if (fabs(*(GyAmpLine+ *Gyi)) <= fabs(*(GyAmpLine + (int)min(*Gyi+1, MaxGyStep-1))))
*GyAmp = *(GyAmpLine+ *Gyi);
else
*GyAmp = *(GyAmpLine+ *Gyi+1);
(*Gyi)++;
}
g_Sig.push_back((float)*GyAmp);
if (flag[3]==1 ){ /* update GxAmp */
if (fabs(*(GxAmpLine+ *Gxi)) <= fabs(*(GxAmpLine + (int)min(*Gxi+1, MaxGxStep-1))))
*GxAmp = *(GxAmpLine+ *Gxi);
else
*GxAmp = *(GxAmpLine+ *Gxi+1);
(*Gxi)++;
}
g_Sig.push_back((float)*GxAmp);
*ADC = 0; /* avoid ADC overflow */
if (flag[4]==1){ /* update ADC */
*ADC = *(ADCLine+ *ADCi);
(*ADCi)++;
}
g_Sig.push_back((float)*ADC);
if (*ADC == 1){
/* update k-space */
Kz[Signali] += *KzTmp;
Ky[Signali] += *KyTmp;
Kx[Signali] += *KxTmp;
Signali++;
}
/* update Kz, Ky & Kx buffer */
*KzTmp +=(*GzAmp)*(*dt)*(*Gyro/(2*PI));
*KyTmp +=(*GyAmp)*(*dt)*(*Gyro/(2*PI));
*KxTmp +=(*GxAmp)*(*dt)*(*Gyro/(2*PI));
if (flag[5]==1){ /* update Ext */
*Ext = *(ExtLine+ *Exti);
/* execute extended process */
if (*Ext != 0){
if (g_Sig.size() !=0){
/* calculate signal length */
SignalLen = Signali-Signalptr;
/* reset buffer */
if (PreSignalLen!=SignalLen && SignalLen>0){
PreSignalLen = SignalLen;
/* allocate device memory for acquired signal buffer */
cudaFree(d_Sx);
cudaFree(d_Sy);
cudaMalloc( (void**) &d_Sx, SpinMxNum * SignalLen * (*TypeNum) * (*RxCoilNum) * sizeof(float)) ;
cudaMalloc( (void**) &d_Sy, SpinMxNum * SignalLen * (*TypeNum) * (*RxCoilNum) * sizeof(float)) ;
/* zero signal buffer */
cudaMemset(d_Sx, 0 ,SpinMxNum * SignalLen * (*TypeNum) * (*RxCoilNum) * sizeof(float)); /* only work for 0 */
cudaMemset(d_Sy, 0 ,SpinMxNum * SignalLen * (*TypeNum) * (*RxCoilNum) * sizeof(float)); /* only work for 0 */
/* set buffer */
ippsFree(Sxbuffer);
ippsFree(Sybuffer);
Sxbuffer = ippsMalloc_32f(SpinMxNum * SignalLen * (*TypeNum) * (*RxCoilNum));
Sybuffer = ippsMalloc_32f(SpinMxNum * SignalLen * (*TypeNum) * (*RxCoilNum));
}
/* avoid shared memory overflow */
if (g_Sig.size() * sizeof(float) > deviceProp.sharedMemPerBlock){
SBufferLen = 0;
}else{
SBufferLen = g_Sig.size() * sizeof(float);
}
/* upload GPU sequence */
cudaMemcpy( d_Sig, &g_Sig[0], g_Sig.size() * sizeof(float), cudaMemcpyHostToDevice ) ;
/* call GPU kernel for spin discrete precessing */
BlochKernelMEGPU<<< dimGridImg, dimBlockImg, SBufferLen >>>
((float)*Gyro, d_CS, d_Rho, d_T1, d_T2, d_K, d_Mz, d_My, d_Mx, d_Buffer,
d_dB0, d_dWRnd, d_Gzgrid, d_Gygrid, d_Gxgrid, d_TxCoilmg, d_TxCoilpe, d_RxCoilx, d_RxCoily,
d_Sig, (float)*RxCoilDefault, (float)*TxCoilDefault,
d_Sx, d_Sy, (float)*rfRef, SignalLen, SBufferLen,
*RunMode, *utsi, d_b_Mz, d_b_My, d_b_Mx,
SpinMxColNum, SpinMxRowNum, SpinMxSliceNum, *SpinNum, *TypeNum, *TxCoilNum, *RxCoilNum, g_Sig.size()/(5+3*(*TxCoilNum)));
cudaThreadSynchronize();
g_Sig.clear();
Signalptr = Signali; /* shift signal array pointer */
}
/* signal acquisition */
if (SignalLen>0){
/* get Sx, Sy buffer from GPU */
cudaMemcpy( Sybuffer, d_Sy, SpinMxNum * SignalLen * (*RxCoilNum) * (*TypeNum) * sizeof(float), cudaMemcpyDeviceToHost ) ;
cudaMemcpy( Sxbuffer, d_Sx, SpinMxNum * SignalLen * (*RxCoilNum) * (*TypeNum) * sizeof(float), cudaMemcpyDeviceToHost ) ;
/* sum MR signal via openMP */
for (Typei = 0; Typei < *TypeNum; Typei++){
for (RxCoili = 0; RxCoili < *RxCoilNum; RxCoili++){ /* signal acquisition per Rx coil */
#pragma omp parallel
{
#pragma omp for private(j, s, p_Sx, p_Sy, buffer)
for (j=0; j < SignalLen; j++){
if (j==0){
*ActiveThreadNum = omp_get_num_threads();
}
s=Signali-SignalLen+j;
p_Sx = Sx + (Typei*(*RxCoilNum)*(*SignalNum)+RxCoili*(*SignalNum)+s);
p_Sy = Sy + (Typei*(*RxCoilNum)*(*SignalNum)+RxCoili*(*SignalNum)+s);
ippsSum_32f(&Sxbuffer[Typei * (SpinMxNum * SignalLen * (*RxCoilNum)) + RxCoili * (SpinMxNum * SignalLen) + j*SpinMxNum], SpinMxNum, &buffer, ippAlgHintFast);
*p_Sx = (double)buffer;
ippsSum_32f(&Sybuffer[Typei * (SpinMxNum * SignalLen * (*RxCoilNum)) + RxCoili * (SpinMxNum * SignalLen) + j*SpinMxNum], SpinMxNum, &buffer, ippAlgHintFast);
*p_Sy = (double)buffer;
}
}
}
}
/* zero signal buffer */
cudaMemset(d_Sx, 0 ,SpinMxNum * SignalLen * (*TypeNum) * (*RxCoilNum) * sizeof(float)); /* only work for 0 */
cudaMemset(d_Sy, 0 ,SpinMxNum * SignalLen * (*TypeNum) * (*RxCoilNum) * sizeof(float)); /* only work for 0 */
}
/* fetch GPU data? */
ExtCall = mexEvalString("DoGPUFetch");
if (ExtCall){
mexErrMsgTxt("Extended process encounters ERROR!");
return;
}
if (*gpuFetch !=0){
/* fetch data from GPU */
cudaMemcpy( Mz, d_Mz, SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * sizeof(float), cudaMemcpyDeviceToHost );
cudaMemcpy( My, d_My, SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * sizeof(float), cudaMemcpyDeviceToHost );
cudaMemcpy( Mx, d_Mx, SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * sizeof(float), cudaMemcpyDeviceToHost );
cudaMemcpy( dWRnd, d_dWRnd, SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * sizeof(float), cudaMemcpyDeviceToHost );
cudaMemcpy( Rho, d_Rho, SpinMxNum * SpinMxSliceNum * (*TypeNum) * sizeof(float), cudaMemcpyDeviceToHost );
cudaMemcpy( T1, d_T1, SpinMxNum * SpinMxSliceNum * (*TypeNum) * sizeof(float), cudaMemcpyDeviceToHost );
cudaMemcpy( T2, d_T2, SpinMxNum * SpinMxSliceNum * (*TypeNum) * sizeof(float), cudaMemcpyDeviceToHost );
cudaMemcpy( K, d_K, SpinMxNum * SpinMxSliceNum * (*TypeNum) * (*TypeNum) * sizeof(float), cudaMemcpyDeviceToHost );
cudaMemcpy( Gzgrid, d_Gzgrid, SpinMxNum * SpinMxSliceNum * sizeof(float), cudaMemcpyDeviceToHost );
cudaMemcpy( Gygrid, d_Gygrid, SpinMxNum * SpinMxSliceNum * sizeof(float), cudaMemcpyDeviceToHost );
cudaMemcpy( Gxgrid, d_Gxgrid, SpinMxNum * SpinMxSliceNum * sizeof(float), cudaMemcpyDeviceToHost );
cudaMemcpy( dB0, d_dB0, SpinMxNum * SpinMxSliceNum * sizeof(float), cudaMemcpyDeviceToHost );
cudaMemcpy( TxCoilmg, d_TxCoilmg, SpinMxNum * SpinMxSliceNum * (*TxCoilNum) * sizeof(float), cudaMemcpyDeviceToHost );
cudaMemcpy( TxCoilpe, d_TxCoilpe, SpinMxNum * SpinMxSliceNum * (*TxCoilNum) * sizeof(float), cudaMemcpyDeviceToHost );
cudaMemcpy( RxCoilx, d_RxCoilx, SpinMxNum * SpinMxSliceNum * (*RxCoilNum) * sizeof(float), cudaMemcpyDeviceToHost );
cudaMemcpy( RxCoily, d_RxCoily, SpinMxNum * SpinMxSliceNum * (*RxCoilNum) * sizeof(float), cudaMemcpyDeviceToHost );
}
/* execute extended process */
ExtCall = mexEvalString("DoExtPlugin");
if (ExtCall){
mexErrMsgTxt("Extended process encounters ERROR!");
return;
}
/* update pointers, avoid pointer change between Matlab and Mex call */
t = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "t"));
dt = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "dt"));
rfAmp = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "rfAmp"));
rfPhase = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "rfPhase"));
rfFreq = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "rfFreq"));
rfCoil = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "rfCoil"));
rfRef = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "rfRef"));
GzAmp = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "GzAmp"));
GyAmp = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "GyAmp"));
GxAmp = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "GxAmp"));
ADC = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "ADC"));
Ext = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "Ext"));
KzTmp = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "Kz"));
KyTmp = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "Ky"));
KxTmp = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "Kx"));
gpuFetch = (double*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "gpuFetch"));
utsi = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "utsi"));
rfi = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "rfi"));
Gzi = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "Gzi"));
Gyi = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "Gyi"));
Gxi = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "Gxi"));
ADCi = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "ADCi"));
Exti = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "Exti"));
TRCount = (int*) mxGetData(mxGetField(mexGetVariablePtr("global", "VVar"), 0, "TRCount"));
if (*gpuFetch !=0){
*gpuFetch =0;
/* update pointers, avoid pointer change between Matlab and Mex call */
Mz = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VObj"), 0, "Mz"));
My = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VObj"), 0, "My"));
Mx = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VObj"), 0, "Mx"));
Rho = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VObj"), 0, "Rho"));
T1 = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VObj"), 0, "T1"));
T2 = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VObj"), 0, "T2"));
K = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VObj"), 0, "K"));
dWRnd = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VMag"), 0, "dWRnd"));
dB0 = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VMag"), 0, "dB0"));
Gzgrid = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VMag"), 0, "Gzgrid"));
Gygrid = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VMag"), 0, "Gygrid"));
Gxgrid = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VMag"), 0, "Gxgrid"));
TxCoilmg = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VCoi"), 0, "TxCoilmg"));
TxCoilpe = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VCoi"), 0, "TxCoilpe"));
RxCoilx = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VCoi"), 0, "RxCoilx"));
RxCoily = (float*) mxGetData(mxGetField(mexGetVariablePtr("global", "VCoi"), 0, "RxCoily"));
/* send data back to GPU */
cudaMemcpy( d_Mz, Mz, SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( d_My, My, SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( d_Mx, Mx, SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( d_dWRnd, dWRnd, SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( d_Rho, Rho, SpinMxNum * SpinMxSliceNum * (*TypeNum) * sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( d_T1, T1, SpinMxNum * SpinMxSliceNum * (*TypeNum) * sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( d_T2, T2, SpinMxNum * SpinMxSliceNum * (*TypeNum) * sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( d_K, K, SpinMxNum * SpinMxSliceNum * (*TypeNum) * (*TypeNum) * sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( d_Gzgrid, Gzgrid, SpinMxNum * SpinMxSliceNum * sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( d_Gygrid, Gygrid, SpinMxNum * SpinMxSliceNum * sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( d_Gxgrid, Gxgrid, SpinMxNum * SpinMxSliceNum * sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( d_dB0, dB0, SpinMxNum * SpinMxSliceNum * sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( d_TxCoilmg, TxCoilmg, SpinMxNum * SpinMxSliceNum * (*TxCoilNum) * sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( d_TxCoilpe, TxCoilpe, SpinMxNum * SpinMxSliceNum * (*TxCoilNum) * sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( d_RxCoilx, RxCoilx, SpinMxNum * SpinMxSliceNum * (*RxCoilNum) * sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( d_RxCoily, RxCoily, SpinMxNum * SpinMxSliceNum * (*RxCoilNum) * sizeof(float), cudaMemcpyHostToDevice );
}
}
(*Exti)++;
}
if (flag[0]+flag[1]+flag[2]+flag[3]+flag[4]+flag[5] == 0){ /* reset VVar */
ippsZero_64f(rfAmp, *TxCoilNum);
ippsZero_64f(rfPhase, *TxCoilNum);
ippsZero_64f(rfFreq, *TxCoilNum);
*GzAmp = 0;
*GyAmp = 0;
*GxAmp = 0;
*ADC = 0;
*Ext = 0;
}
/* check TR point & end of time point */
if (*dt <= 0){
if (g_Sig.size() !=0){
/* calculate signal length */
SignalLen = Signali-Signalptr;
/* reset buffer if needed */
if (PreSignalLen!=SignalLen && SignalLen>0){
PreSignalLen = SignalLen;
/* allocate device memory for acquired signal buffer */
cudaFree(d_Sx);
cudaFree(d_Sy);
cudaMalloc( (void**) &d_Sx, SpinMxNum * SignalLen * (*TypeNum) * (*RxCoilNum) * sizeof(float)) ;
cudaMalloc( (void**) &d_Sy, SpinMxNum * SignalLen * (*TypeNum) * (*RxCoilNum) * sizeof(float)) ;
/* zero signal buffer */
cudaMemset(d_Sx, 0 ,SpinMxNum * SignalLen * (*TypeNum) * (*RxCoilNum) * sizeof(float)); /* only work for 0 */
cudaMemset(d_Sy, 0 ,SpinMxNum * SignalLen * (*TypeNum) * (*RxCoilNum) * sizeof(float)); /* only work for 0 */
/* set buffer */
ippsFree(Sxbuffer);
ippsFree(Sybuffer);
Sxbuffer = ippsMalloc_32f(SpinMxNum * SignalLen * (*TypeNum) * (*RxCoilNum));
Sybuffer = ippsMalloc_32f(SpinMxNum * SignalLen * (*TypeNum) * (*RxCoilNum));
}
/* avoid shared memory overflow */
if (g_Sig.size() * sizeof(float) > deviceProp.sharedMemPerBlock){
SBufferLen = 0;
}else{
SBufferLen = g_Sig.size() * sizeof(float);
}
/* upload GPU sequence */
cudaMemcpy( d_Sig, &g_Sig[0], g_Sig.size() * sizeof(float), cudaMemcpyHostToDevice ) ;
/* call GPU kernel for spin discrete precessing */
BlochKernelMEGPU<<< dimGridImg, dimBlockImg, SBufferLen >>>
((float)*Gyro, d_CS, d_Rho, d_T1, d_T2, d_K, d_Mz, d_My, d_Mx, d_Buffer,
d_dB0, d_dWRnd, d_Gzgrid, d_Gygrid, d_Gxgrid, d_TxCoilmg, d_TxCoilpe, d_RxCoilx, d_RxCoily,
d_Sig, (float)*RxCoilDefault, (float)*TxCoilDefault,
d_Sx, d_Sy, (float)*rfRef, SignalLen, SBufferLen,
*RunMode, *utsi, d_b_Mz, d_b_My, d_b_Mx,
SpinMxColNum, SpinMxRowNum, SpinMxSliceNum, *SpinNum, *TypeNum, *TxCoilNum, *RxCoilNum, g_Sig.size()/(5+3*(*TxCoilNum)));
cudaThreadSynchronize(); /* stabilize simulation */
g_Sig.clear();
Signalptr = Signali;
}
/* signal acquisition */
if (SignalLen>0){
/* get Sx, Sy buffer from GPU */
cudaMemcpy( Sybuffer, d_Sy, SpinMxNum * SignalLen * (*RxCoilNum) * (*TypeNum) * sizeof(float), cudaMemcpyDeviceToHost ) ;
cudaMemcpy( Sxbuffer, d_Sx, SpinMxNum * SignalLen * (*RxCoilNum) * (*TypeNum) * sizeof(float), cudaMemcpyDeviceToHost ) ;
/* sum MR signal via openMP */
for (Typei = 0; Typei < *TypeNum; Typei++){
for (RxCoili = 0; RxCoili < *RxCoilNum; RxCoili++){ /* signal acquisition per Rx coil */
#pragma omp parallel
{
#pragma omp for private(j, s, p_Sx, p_Sy, buffer)
for (j=0; j < SignalLen; j++){
if (j==0){
*ActiveThreadNum = omp_get_num_threads();
}
s=Signali-SignalLen+j;
p_Sx = Sx + (Typei*(*RxCoilNum)*(*SignalNum)+RxCoili*(*SignalNum)+s);
p_Sy = Sy + (Typei*(*RxCoilNum)*(*SignalNum)+RxCoili*(*SignalNum)+s);
ippsSum_32f(&Sxbuffer[Typei * (SpinMxNum * SignalLen * (*RxCoilNum)) + RxCoili * (SpinMxNum * SignalLen) + j*SpinMxNum], SpinMxNum, &buffer, ippAlgHintFast);
*p_Sx = (double)buffer;
ippsSum_32f(&Sybuffer[Typei * (SpinMxNum * SignalLen * (*RxCoilNum)) + RxCoili * (SpinMxNum * SignalLen) + j*SpinMxNum], SpinMxNum, &buffer, ippAlgHintFast);
*p_Sy = (double)buffer;
}
}
}
}
/* zero signal buffer */
cudaMemset(d_Sx, 0 ,SpinMxNum * SignalLen * (*TypeNum) * (*RxCoilNum) * sizeof(float)); /* only work for 0 */
cudaMemset(d_Sy, 0 ,SpinMxNum * SignalLen * (*TypeNum) * (*RxCoilNum) * sizeof(float)); /* only work for 0 */
}
if (*dt < 0){
(*TRCount)++;
mexPrintf("TR Counts: %d of %d\n", *TRCount, *TRNum);
}
}
}
switch (*RunMode){
case 1: /* spin rotation simulation & rf simulation */
/* Get Mx, My & Mz from GPU */
cudaMemcpy( Mzs, d_b_Mz, SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * MaxutsStep * sizeof(float), cudaMemcpyDeviceToHost ) ;
cudaMemcpy( Mys, d_b_My, SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * MaxutsStep * sizeof(float), cudaMemcpyDeviceToHost ) ;
cudaMemcpy( Mxs, d_b_Mx, SpinMxNum * SpinMxSliceNum * (*SpinNum) * (*TypeNum) * MaxutsStep * sizeof(float), cudaMemcpyDeviceToHost ) ;
break;
}
/* free GPU memory */
cudaFree(d_Mz);
cudaFree(d_My);
cudaFree(d_Mx);
cudaFree(d_Buffer);
cudaFree(d_dWRnd);
cudaFree(d_Rho);
cudaFree(d_T1);
cudaFree(d_T2);
cudaFree(d_K);
cudaFree(d_Gzgrid);
cudaFree(d_Gygrid);
cudaFree(d_Gxgrid);
cudaFree(d_dB0);
cudaFree(d_TxCoilmg);
cudaFree(d_TxCoilpe);
cudaFree(d_RxCoilx);
cudaFree(d_RxCoily);
cudaFree(d_CS);
cudaFree(d_Sig);
cudaFree(d_Sx);
cudaFree(d_Sy);
cudaFree(d_b_Mz);
cudaFree(d_b_My);
cudaFree(d_b_Mx);
/* reset device, may slow down subsequent startup due to initialization */
// cudaDeviceReset();
}
|
8ea0e13f619fdcc5ddb8a7c955b2eac9981a9355.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
/*#include "opencv2/core/gpumat.hpp"*/
#include "opencv2/gpu/device/common.hpp"
#include "opencv2/gpu/gpu.hpp"
using namespace cv::gpu;
namespace vibe_gpu
{
void loadConstants(int nbSamples, int reqMatches, int radius, int subsamplingFactor);
void init_gpu(PtrStepSzb frame, int cn, PtrStepSzb samples, PtrStepSz<unsigned int> randStates, hipStream_t stream);
void update_gpu(PtrStepSzb frame, int cn, PtrStepSzb fgmask, PtrStepSzb samples, PtrStepSz<unsigned int> randStates, hipStream_t stream);
}
namespace vibe_gpu
{
__constant__ int c_nbSamples;
__constant__ int c_reqMatches;
__constant__ int c_radius;
__constant__ int c_subsamplingFactor;
void loadConstants(int nbSamples, int reqMatches, int radius, int subsamplingFactor)
{
cudaSafeCall( hipMemcpyToSymbol(c_nbSamples, &nbSamples, sizeof(int)) );
cudaSafeCall( hipMemcpyToSymbol(c_reqMatches, &reqMatches, sizeof(int)) );
cudaSafeCall( hipMemcpyToSymbol(c_radius, &radius, sizeof(int)) );
cudaSafeCall( hipMemcpyToSymbol(c_subsamplingFactor, &subsamplingFactor, sizeof(int)) );
}
__device__ __forceinline__ uint nextRand(uint& state)
{
const unsigned int CV_RNG_COEFF = 4164903690U;
state = state * CV_RNG_COEFF + (state >> 16);
return state;
}
__constant__ int c_xoff[9] = {-1, 0, 1, -1, 1, -1, 0, 1, 0};
__constant__ int c_yoff[9] = {-1, -1, -1, 0, 0, 1, 1, 1, 0};
__device__ __forceinline__ int2 chooseRandomNeighbor(int x, int y, uint& randState, int count = 8)
{
int idx = nextRand(randState) % count;
return make_int2(x + c_xoff[idx], y + c_yoff[idx]);
}
__device__ __forceinline__ uchar cvt(uchar val)
{
return val;
}
__device__ __forceinline__ uchar4 cvt(const uchar3& val)
{
return make_uchar4(val.x, val.y, val.z, 0);
}
__device__ __forceinline__ uchar4 cvt(const uchar4& val)
{
return val;
}
template <typename SrcT, typename SampleT>
__global__ void init(const PtrStepSz<SrcT> frame, PtrStep<SampleT> samples, PtrStep<uint> randStates)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= frame.cols || y >= frame.rows)
return;
uint localState = randStates(y, x);
for (int k = 0; k < c_nbSamples; ++k)
{
int2 np = chooseRandomNeighbor(x, y, localState, 9);
np.x = ::max(0, ::min(np.x, frame.cols - 1));
np.y = ::max(0, ::min(np.y, frame.rows - 1));
SrcT pix = frame(np.y, np.x);
samples(k * frame.rows + y, x) = cvt(pix);
}
randStates(y, x) = localState;
}
template <typename SrcT, typename SampleT>
void init_caller(PtrStepSzb frame, PtrStepSzb samples, PtrStepSz<uint> randStates, hipStream_t stream)
{
dim3 block(32, 8);
dim3 grid(divUp(frame.cols, block.x), divUp(frame.rows, block.y));
cudaSafeCall( hipFuncSetCacheConfig(init<SrcT, SampleT>, hipFuncCachePreferL1) );
hipLaunchKernelGGL(( init<SrcT, SampleT>), dim3(grid), dim3(block), 0, stream, (PtrStepSz<SrcT>) frame, (PtrStepSz<SampleT>) samples, randStates);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
void init_gpu(PtrStepSzb frame, int cn, PtrStepSzb samples, PtrStepSz<uint> randStates, hipStream_t stream)
{
typedef void (*func_t)(PtrStepSzb frame, PtrStepSzb samples, PtrStepSz<uint> randStates, hipStream_t stream);
static const func_t funcs[] =
{
0, init_caller<uchar, uchar>, 0, init_caller<uchar3, uchar4>, init_caller<uchar4, uchar4>
};
funcs[cn](frame, samples, randStates, stream);
}
__device__ __forceinline__ int calcDist(uchar a, uchar b)
{
return ::abs(a - b);
}
__device__ __forceinline__ int calcDist(const uchar3& a, const uchar4& b)
{
return (::abs(a.x - b.x) + ::abs(a.y - b.y) + ::abs(a.z - b.z)) / 3;
}
__device__ __forceinline__ int calcDist(const uchar4& a, const uchar4& b)
{
return (::abs(a.x - b.x) + ::abs(a.y - b.y) + ::abs(a.z - b.z)) / 3;
}
template <typename SrcT, typename SampleT>
__global__ void update(const PtrStepSz<SrcT> frame, PtrStepb fgmask, PtrStep<SampleT> samples, PtrStep<uint> randStates)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= frame.cols || y >= frame.rows)
return;
uint localState = randStates(y, x);
SrcT imgPix = frame(y, x);
// comparison with the model
int count = 0;
for (int k = 0; (count < c_reqMatches) && (k < c_nbSamples); ++k)
{
SampleT samplePix = samples(k * frame.rows + y, x);
int distance = calcDist(imgPix, samplePix);
if (distance < c_radius)
++count;
}
// pixel classification according to reqMatches
fgmask(y, x) = (uchar) (-(count < c_reqMatches));
if (count >= c_reqMatches)
{
// the pixel belongs to the background
// gets a random number between 0 and subsamplingFactor-1
int randomNumber = nextRand(localState) % c_subsamplingFactor;
// update of the current pixel model
if (randomNumber == 0)
{
// random subsampling
int k = nextRand(localState) % c_nbSamples;
samples(k * frame.rows + y, x) = cvt(imgPix);
}
// update of a neighboring pixel model
randomNumber = nextRand(localState) % c_subsamplingFactor;
if (randomNumber == 0)
{
// random subsampling
// chooses a neighboring pixel randomly
int2 np = chooseRandomNeighbor(x, y, localState);
np.x = ::max(0, ::min(np.x, frame.cols - 1));
np.y = ::max(0, ::min(np.y, frame.rows - 1));
// chooses the value to be replaced randomly
int k = nextRand(localState) % c_nbSamples;
samples(k * frame.rows + np.y, np.x) = cvt(imgPix);
}
}
randStates(y, x) = localState;
}
template <typename SrcT, typename SampleT>
void update_caller(PtrStepSzb frame, PtrStepSzb fgmask, PtrStepSzb samples, PtrStepSz<uint> randStates, hipStream_t stream)
{
dim3 block(32, 8);
dim3 grid(divUp(frame.cols, block.x), divUp(frame.rows, block.y));
cudaSafeCall( hipFuncSetCacheConfig(update<SrcT, SampleT>, hipFuncCachePreferL1) );
hipLaunchKernelGGL(( update<SrcT, SampleT>), dim3(grid), dim3(block), 0, stream, (PtrStepSz<SrcT>) frame, fgmask, (PtrStepSz<SampleT>) samples, randStates);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
void update_gpu(PtrStepSzb frame, int cn, PtrStepSzb fgmask, PtrStepSzb samples, PtrStepSz<uint> randStates, hipStream_t stream)
{
typedef void (*func_t)(PtrStepSzb frame, PtrStepSzb fgmask, PtrStepSzb samples, PtrStepSz<uint> randStates, hipStream_t stream);
static const func_t funcs[] =
{
0, update_caller<uchar, uchar>, 0, update_caller<uchar3, uchar4>, update_caller<uchar4, uchar4>
};
funcs[cn](frame, fgmask, samples, randStates, stream);
}
}
| 8ea0e13f619fdcc5ddb8a7c955b2eac9981a9355.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
/*#include "opencv2/core/gpumat.hpp"*/
#include "opencv2/gpu/device/common.hpp"
#include "opencv2/gpu/gpu.hpp"
using namespace cv::gpu;
namespace vibe_gpu
{
void loadConstants(int nbSamples, int reqMatches, int radius, int subsamplingFactor);
void init_gpu(PtrStepSzb frame, int cn, PtrStepSzb samples, PtrStepSz<unsigned int> randStates, cudaStream_t stream);
void update_gpu(PtrStepSzb frame, int cn, PtrStepSzb fgmask, PtrStepSzb samples, PtrStepSz<unsigned int> randStates, cudaStream_t stream);
}
namespace vibe_gpu
{
__constant__ int c_nbSamples;
__constant__ int c_reqMatches;
__constant__ int c_radius;
__constant__ int c_subsamplingFactor;
void loadConstants(int nbSamples, int reqMatches, int radius, int subsamplingFactor)
{
cudaSafeCall( cudaMemcpyToSymbol(c_nbSamples, &nbSamples, sizeof(int)) );
cudaSafeCall( cudaMemcpyToSymbol(c_reqMatches, &reqMatches, sizeof(int)) );
cudaSafeCall( cudaMemcpyToSymbol(c_radius, &radius, sizeof(int)) );
cudaSafeCall( cudaMemcpyToSymbol(c_subsamplingFactor, &subsamplingFactor, sizeof(int)) );
}
__device__ __forceinline__ uint nextRand(uint& state)
{
const unsigned int CV_RNG_COEFF = 4164903690U;
state = state * CV_RNG_COEFF + (state >> 16);
return state;
}
__constant__ int c_xoff[9] = {-1, 0, 1, -1, 1, -1, 0, 1, 0};
__constant__ int c_yoff[9] = {-1, -1, -1, 0, 0, 1, 1, 1, 0};
__device__ __forceinline__ int2 chooseRandomNeighbor(int x, int y, uint& randState, int count = 8)
{
int idx = nextRand(randState) % count;
return make_int2(x + c_xoff[idx], y + c_yoff[idx]);
}
__device__ __forceinline__ uchar cvt(uchar val)
{
return val;
}
__device__ __forceinline__ uchar4 cvt(const uchar3& val)
{
return make_uchar4(val.x, val.y, val.z, 0);
}
__device__ __forceinline__ uchar4 cvt(const uchar4& val)
{
return val;
}
template <typename SrcT, typename SampleT>
__global__ void init(const PtrStepSz<SrcT> frame, PtrStep<SampleT> samples, PtrStep<uint> randStates)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= frame.cols || y >= frame.rows)
return;
uint localState = randStates(y, x);
for (int k = 0; k < c_nbSamples; ++k)
{
int2 np = chooseRandomNeighbor(x, y, localState, 9);
np.x = ::max(0, ::min(np.x, frame.cols - 1));
np.y = ::max(0, ::min(np.y, frame.rows - 1));
SrcT pix = frame(np.y, np.x);
samples(k * frame.rows + y, x) = cvt(pix);
}
randStates(y, x) = localState;
}
template <typename SrcT, typename SampleT>
void init_caller(PtrStepSzb frame, PtrStepSzb samples, PtrStepSz<uint> randStates, cudaStream_t stream)
{
dim3 block(32, 8);
dim3 grid(divUp(frame.cols, block.x), divUp(frame.rows, block.y));
cudaSafeCall( cudaFuncSetCacheConfig(init<SrcT, SampleT>, cudaFuncCachePreferL1) );
init<SrcT, SampleT><<<grid, block, 0, stream>>>((PtrStepSz<SrcT>) frame, (PtrStepSz<SampleT>) samples, randStates);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
void init_gpu(PtrStepSzb frame, int cn, PtrStepSzb samples, PtrStepSz<uint> randStates, cudaStream_t stream)
{
typedef void (*func_t)(PtrStepSzb frame, PtrStepSzb samples, PtrStepSz<uint> randStates, cudaStream_t stream);
static const func_t funcs[] =
{
0, init_caller<uchar, uchar>, 0, init_caller<uchar3, uchar4>, init_caller<uchar4, uchar4>
};
funcs[cn](frame, samples, randStates, stream);
}
__device__ __forceinline__ int calcDist(uchar a, uchar b)
{
return ::abs(a - b);
}
__device__ __forceinline__ int calcDist(const uchar3& a, const uchar4& b)
{
return (::abs(a.x - b.x) + ::abs(a.y - b.y) + ::abs(a.z - b.z)) / 3;
}
__device__ __forceinline__ int calcDist(const uchar4& a, const uchar4& b)
{
return (::abs(a.x - b.x) + ::abs(a.y - b.y) + ::abs(a.z - b.z)) / 3;
}
template <typename SrcT, typename SampleT>
__global__ void update(const PtrStepSz<SrcT> frame, PtrStepb fgmask, PtrStep<SampleT> samples, PtrStep<uint> randStates)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= frame.cols || y >= frame.rows)
return;
uint localState = randStates(y, x);
SrcT imgPix = frame(y, x);
// comparison with the model
int count = 0;
for (int k = 0; (count < c_reqMatches) && (k < c_nbSamples); ++k)
{
SampleT samplePix = samples(k * frame.rows + y, x);
int distance = calcDist(imgPix, samplePix);
if (distance < c_radius)
++count;
}
// pixel classification according to reqMatches
fgmask(y, x) = (uchar) (-(count < c_reqMatches));
if (count >= c_reqMatches)
{
// the pixel belongs to the background
// gets a random number between 0 and subsamplingFactor-1
int randomNumber = nextRand(localState) % c_subsamplingFactor;
// update of the current pixel model
if (randomNumber == 0)
{
// random subsampling
int k = nextRand(localState) % c_nbSamples;
samples(k * frame.rows + y, x) = cvt(imgPix);
}
// update of a neighboring pixel model
randomNumber = nextRand(localState) % c_subsamplingFactor;
if (randomNumber == 0)
{
// random subsampling
// chooses a neighboring pixel randomly
int2 np = chooseRandomNeighbor(x, y, localState);
np.x = ::max(0, ::min(np.x, frame.cols - 1));
np.y = ::max(0, ::min(np.y, frame.rows - 1));
// chooses the value to be replaced randomly
int k = nextRand(localState) % c_nbSamples;
samples(k * frame.rows + np.y, np.x) = cvt(imgPix);
}
}
randStates(y, x) = localState;
}
template <typename SrcT, typename SampleT>
void update_caller(PtrStepSzb frame, PtrStepSzb fgmask, PtrStepSzb samples, PtrStepSz<uint> randStates, cudaStream_t stream)
{
dim3 block(32, 8);
dim3 grid(divUp(frame.cols, block.x), divUp(frame.rows, block.y));
cudaSafeCall( cudaFuncSetCacheConfig(update<SrcT, SampleT>, cudaFuncCachePreferL1) );
update<SrcT, SampleT><<<grid, block, 0, stream>>>((PtrStepSz<SrcT>) frame, fgmask, (PtrStepSz<SampleT>) samples, randStates);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
void update_gpu(PtrStepSzb frame, int cn, PtrStepSzb fgmask, PtrStepSzb samples, PtrStepSz<uint> randStates, cudaStream_t stream)
{
typedef void (*func_t)(PtrStepSzb frame, PtrStepSzb fgmask, PtrStepSzb samples, PtrStepSz<uint> randStates, cudaStream_t stream);
static const func_t funcs[] =
{
0, update_caller<uchar, uchar>, 0, update_caller<uchar3, uchar4>, update_caller<uchar4, uchar4>
};
funcs[cn](frame, fgmask, samples, randStates, stream);
}
}
|
f074d63594786114dae62f4608367cc537965d2a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "BinaryExecution.hpp"
#include "Raster.cuh"
namespace MNN {
namespace CUDA {
template <typename T>
__global__ void ATAN2(const T *input0, const T* input1, T *output, size_t count, size_t s0, size_t s1) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
T x = input0[i * s0];
T y = input1[i * s1];
output[i] = atan2(x, y);
}
return;
}
template <typename T>
__global__ void MOD(const T *input0, const T* input1, T *output, size_t count, size_t s0, size_t s1) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
T x = input0[i * s0];
T y = input1[i * s1];
output[i] = x - x / y;
}
return;
}
template <typename T>
__global__ void LOGICALOR(const T *input0, const T* input1, T *output, size_t count, size_t s0, size_t s1) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
T x = input0[i * s0];
T y = input1[i * s1];
output[i] = (x || y) ? 1 : 0;
}
return;
}
BinaryExecution::BinaryExecution(int opType, Backend *backend) : Execution(backend) {
mType = opType;
}
BinaryExecution::~BinaryExecution(){
// Do nothing
}
ErrorCode BinaryExecution::onExecute(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) {
auto count = CUDABackend::realSize(outputs[0]);
auto inputS0 = CUDABackend::realSize(inputs[0]);
auto inputS1 = CUDABackend::realSize(inputs[1]);
int s0 = inputS0 == 1 ? 0 : 1;
int s1 = inputS1 == 1 ? 0 : 1;
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
//printf("%d - %d\n", block_num, threads_num);
int size[3] = {1, 1, count};
int stride0[3] = {0, 0, s0};
int stride1[3] = {0, 0, s1};
int stride2[3] = {0, 0, 1};
auto type = outputs[0]->getType();
if (type.code == halide_type_float) {
// Use Half or float
type.bits = static_cast<CUDABackend*>(backend())->getBytes(inputs[0]) * 8;
}
auto computeFunction = [&](Tensor* input0T, Tensor* input1T, Tensor* outputT) {
auto input0 = (uint8_t*)input0T->deviceId();
auto input1 = (uint8_t*)input1T->deviceId();
auto output = (uint8_t*)outputT->deviceId();
BinaryBlit(output, input0, input1, size, stride0, stride1, stride2, type, runtime, mType);
};
computeFunction(inputs[0], inputs[1], outputs[0]);
for (int i=2; i<inputs.size(); ++i) {
computeFunction(outputs[0], inputs[i], outputs[0]);
}
return NO_ERROR;
}
class BinaryCreator : public CUDABackend::Creator {
public:
virtual Execution* onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs,
const MNN::Op* op, Backend* backend) const override {
if (op->type() == OpType_BinaryOp) {
return new BinaryExecution(op->main_as_BinaryOp()->opType(), backend);
}
if (op->type() == OpType_Eltwise) {
switch (op->main_as_Eltwise()->type()) {
case EltwiseType_SUM:
return new BinaryExecution(BinaryOpOperation_ADD, backend);
case EltwiseType_PROD:
return new BinaryExecution(BinaryOpOperation_MUL, backend);
case EltwiseType_MAXIMUM:
return new BinaryExecution(BinaryOpOperation_MAXIMUM, backend);
default:
break;
}
}
return nullptr;
}
};
static CUDACreatorRegister<BinaryCreator> __init(OpType_BinaryOp);
static CUDACreatorRegister<BinaryCreator> __init2(OpType_Eltwise);
}
} | f074d63594786114dae62f4608367cc537965d2a.cu | #include "BinaryExecution.hpp"
#include "Raster.cuh"
namespace MNN {
namespace CUDA {
template <typename T>
__global__ void ATAN2(const T *input0, const T* input1, T *output, size_t count, size_t s0, size_t s1) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
T x = input0[i * s0];
T y = input1[i * s1];
output[i] = atan2(x, y);
}
return;
}
template <typename T>
__global__ void MOD(const T *input0, const T* input1, T *output, size_t count, size_t s0, size_t s1) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
T x = input0[i * s0];
T y = input1[i * s1];
output[i] = x - x / y;
}
return;
}
template <typename T>
__global__ void LOGICALOR(const T *input0, const T* input1, T *output, size_t count, size_t s0, size_t s1) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) {
T x = input0[i * s0];
T y = input1[i * s1];
output[i] = (x || y) ? 1 : 0;
}
return;
}
BinaryExecution::BinaryExecution(int opType, Backend *backend) : Execution(backend) {
mType = opType;
}
BinaryExecution::~BinaryExecution(){
// Do nothing
}
ErrorCode BinaryExecution::onExecute(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) {
auto count = CUDABackend::realSize(outputs[0]);
auto inputS0 = CUDABackend::realSize(inputs[0]);
auto inputS1 = CUDABackend::realSize(inputs[1]);
int s0 = inputS0 == 1 ? 0 : 1;
int s1 = inputS1 == 1 ? 0 : 1;
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
//printf("%d - %d\n", block_num, threads_num);
int size[3] = {1, 1, count};
int stride0[3] = {0, 0, s0};
int stride1[3] = {0, 0, s1};
int stride2[3] = {0, 0, 1};
auto type = outputs[0]->getType();
if (type.code == halide_type_float) {
// Use Half or float
type.bits = static_cast<CUDABackend*>(backend())->getBytes(inputs[0]) * 8;
}
auto computeFunction = [&](Tensor* input0T, Tensor* input1T, Tensor* outputT) {
auto input0 = (uint8_t*)input0T->deviceId();
auto input1 = (uint8_t*)input1T->deviceId();
auto output = (uint8_t*)outputT->deviceId();
BinaryBlit(output, input0, input1, size, stride0, stride1, stride2, type, runtime, mType);
};
computeFunction(inputs[0], inputs[1], outputs[0]);
for (int i=2; i<inputs.size(); ++i) {
computeFunction(outputs[0], inputs[i], outputs[0]);
}
return NO_ERROR;
}
class BinaryCreator : public CUDABackend::Creator {
public:
virtual Execution* onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs,
const MNN::Op* op, Backend* backend) const override {
if (op->type() == OpType_BinaryOp) {
return new BinaryExecution(op->main_as_BinaryOp()->opType(), backend);
}
if (op->type() == OpType_Eltwise) {
switch (op->main_as_Eltwise()->type()) {
case EltwiseType_SUM:
return new BinaryExecution(BinaryOpOperation_ADD, backend);
case EltwiseType_PROD:
return new BinaryExecution(BinaryOpOperation_MUL, backend);
case EltwiseType_MAXIMUM:
return new BinaryExecution(BinaryOpOperation_MAXIMUM, backend);
default:
break;
}
}
return nullptr;
}
};
static CUDACreatorRegister<BinaryCreator> __init(OpType_BinaryOp);
static CUDACreatorRegister<BinaryCreator> __init2(OpType_Eltwise);
}
} |
930f11baee83fec738f0f80f9eab157ad63456ad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.h"
#include <omp.h>
__global__ void emptyKernel() {
}
/// Invoke `emptyKernel` with given number of blocks and threads/block and
/// report the execution time.
void invokeEmpty(bool synchronize, int numBlocks, int threadsPerBlock) {
double dt = benchmark(100'000, [=]() {
// emptyKernel<<<numBlocks, threadsPerBlock>>>();
CUDA_LAUNCH(emptyKernel, numBlocks, threadsPerBlock);
if (synchronize)
CUDA_CHECK(hipDeviceSynchronize());
});
printf("synchronize=%d blocks=%5d threads/block=%4d iteration=%.1f us\n",
(int)synchronize, numBlocks, threadsPerBlock, 1e6 * dt);
};
/// Run an empty parallel region with `numThreads` threads.
void emptyParallelRegion(int numThreads) {
#pragma omp parallel num_threads(numThreads)
{
// With this command we prevent the compiler from optimizing away the
// whole parallel region.
__asm__ volatile("");
}
}
int main() {
invokeEmpty(false, 1, 1); // Task 1a) #1
invokeEmpty(true, 1, 1); // Task 1a) #2
invokeEmpty(true, 1, 32); // Task 1a) #3
invokeEmpty(true, 1, 1024);
invokeEmpty(true, 32, 1024);
invokeEmpty(true, 1024, 32);
invokeEmpty(true, 32768, 1);
invokeEmpty(true, 32768, 32);
invokeEmpty(true, 32768, 1024);
static constexpr int numThreads = 12;
double dt = benchmark(100'000, []() {
emptyParallelRegion(numThreads);
});
printf("Empty OpenMP parallel region with %d threads --> %.1f us\n",
numThreads, 1e6 * dt);
}
| 930f11baee83fec738f0f80f9eab157ad63456ad.cu | #include "utils.h"
#include <omp.h>
__global__ void emptyKernel() {
}
/// Invoke `emptyKernel` with given number of blocks and threads/block and
/// report the execution time.
void invokeEmpty(bool synchronize, int numBlocks, int threadsPerBlock) {
double dt = benchmark(100'000, [=]() {
// emptyKernel<<<numBlocks, threadsPerBlock>>>();
CUDA_LAUNCH(emptyKernel, numBlocks, threadsPerBlock);
if (synchronize)
CUDA_CHECK(cudaDeviceSynchronize());
});
printf("synchronize=%d blocks=%5d threads/block=%4d iteration=%.1f us\n",
(int)synchronize, numBlocks, threadsPerBlock, 1e6 * dt);
};
/// Run an empty parallel region with `numThreads` threads.
void emptyParallelRegion(int numThreads) {
#pragma omp parallel num_threads(numThreads)
{
// With this command we prevent the compiler from optimizing away the
// whole parallel region.
__asm__ volatile("");
}
}
int main() {
invokeEmpty(false, 1, 1); // Task 1a) #1
invokeEmpty(true, 1, 1); // Task 1a) #2
invokeEmpty(true, 1, 32); // Task 1a) #3
invokeEmpty(true, 1, 1024);
invokeEmpty(true, 32, 1024);
invokeEmpty(true, 1024, 32);
invokeEmpty(true, 32768, 1);
invokeEmpty(true, 32768, 32);
invokeEmpty(true, 32768, 1024);
static constexpr int numThreads = 12;
double dt = benchmark(100'000, []() {
emptyParallelRegion(numThreads);
});
printf("Empty OpenMP parallel region with %d threads --> %.1f us\n",
numThreads, 1e6 * dt);
}
|
ef03e9d43b3f95563ae06f82cf0a3e71b8d54324.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kernelFlou.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned char *ptr = NULL;
hipMalloc(&ptr, XSIZE*YSIZE);
unsigned int *debug = NULL;
hipMalloc(&debug, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kernelFlou), dim3(gridBlock),dim3(threadBlock), 0, 0, ptr,debug);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kernelFlou), dim3(gridBlock),dim3(threadBlock), 0, 0, ptr,debug);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kernelFlou), dim3(gridBlock),dim3(threadBlock), 0, 0, ptr,debug);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | ef03e9d43b3f95563ae06f82cf0a3e71b8d54324.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kernelFlou.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned char *ptr = NULL;
cudaMalloc(&ptr, XSIZE*YSIZE);
unsigned int *debug = NULL;
cudaMalloc(&debug, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kernelFlou<<<gridBlock,threadBlock>>>(ptr,debug);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kernelFlou<<<gridBlock,threadBlock>>>(ptr,debug);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kernelFlou<<<gridBlock,threadBlock>>>(ptr,debug);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
c9bced0ba809475422bbfd21c2eb00f64ad0c262.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "randomizePopulation.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
hiprandState_t *states = NULL;
hipMalloc(&states, XSIZE*YSIZE);
unsigned char *population = NULL;
hipMalloc(&population, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
randomizePopulation), dim3(gridBlock),dim3(threadBlock), 0, 0, states,population);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
randomizePopulation), dim3(gridBlock),dim3(threadBlock), 0, 0, states,population);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
randomizePopulation), dim3(gridBlock),dim3(threadBlock), 0, 0, states,population);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | c9bced0ba809475422bbfd21c2eb00f64ad0c262.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "randomizePopulation.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
curandState_t *states = NULL;
cudaMalloc(&states, XSIZE*YSIZE);
unsigned char *population = NULL;
cudaMalloc(&population, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
randomizePopulation<<<gridBlock,threadBlock>>>(states,population);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
randomizePopulation<<<gridBlock,threadBlock>>>(states,population);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
randomizePopulation<<<gridBlock,threadBlock>>>(states,population);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
285fdee109ef37b58b64284727913da160bcfaa3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hist.cuh"
#include "hist_2_one_byte_base.cuh"
#include "tuning_policy_enums.cuh"
#include <hip/hip_cooperative_groups.h>
#include <library/cuda/wrappers/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
using namespace cooperative_groups;
namespace NKernel
{
template <int BlockSize>
struct TPointHist2OneByte<6, BlockSize> : public TPointHist2OneByteBase<TPointHist2OneByte<6, BlockSize>, BlockSize> {
using TParent = TPointHist2OneByteBase<TPointHist2OneByte<6, BlockSize>, BlockSize>;
using TPointHist2OneByteBase<TPointHist2OneByte<6, BlockSize>, BlockSize>::Histogram;
__forceinline__ __device__ TPointHist2OneByte(float* buffer)
: TPointHist2OneByteBase<TPointHist2OneByte<6, BlockSize>, BlockSize>(buffer) {
}
__forceinline__ __device__ int SliceOffset() {
const int warpId = (threadIdx.x / 32);
const int warpOffset = 1024 * warpId;
const int blocks = 2;
const int innerHistStart = (threadIdx.x & ((blocks - 1) << 4));
return warpOffset + innerHistStart;
}
template <int N>
__forceinline__ __device__ void AddPointsImpl(const ui32* ci,
const float* s1,
const float* s2) {
thread_block_tile<16> syncTile = tiled_partition<16>(this_thread_block());
const bool flag = threadIdx.x & 1;
float stat1[N];
float stat2[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
stat1[k] = flag ? s2[k] : s1[k];
stat2[k] = flag ? s1[k] : s2[k];
}
float val1[N];
float val2[N];
int offset[N];
#pragma unroll
for (int i = 0; i < 4; i++) {
const int f = ((2 * i + threadIdx.x) & 6);
#pragma unroll
for (int k = 0; k < N; ++k) {
const int bin = (ci[k] >> (24 - (f << 2))) & 255;
const float pass = bin != 64 ? 1.0f : 0.0f;
val1[k] = pass * stat1[k];
val2[k] = pass * stat2[k];
offset[k] = f + 16 * (bin & 62) + 8 * (bin & 1) + flag;
}
const bool writeFirstFlag = threadIdx.x & 8;
syncTile.sync();
if (writeFirstFlag) {
#pragma unroll
for (int k = 0; k < N; ++k) {
Histogram[offset[k]] += val1[k];
}
}
syncTile.sync();
if (!writeFirstFlag) {
#pragma unroll
for (int k = 0; k < N; ++k) {
Histogram[offset[k]] += val1[k];
}
}
int shift = flag ? -1 : 1;
#pragma unroll
for (int k = 0; k < N; ++k) {
offset[k] += shift;
}
syncTile.sync();
if (writeFirstFlag) {
#pragma unroll
for (int k = 0; k < N; ++k) {
Histogram[offset[k]] += val2[k];
}
}
syncTile.sync();
if (!writeFirstFlag) {
#pragma unroll
for (int k = 0; k < N; ++k) {
Histogram[offset[k]] += val2[k];
}
}
}
}
static constexpr int MaxBits() {
return 6;
}
__forceinline__ __device__ void Reduce() {
TParent::ReduceToOneWarp();
if (threadIdx.x < 256) {
const int isSecondStat = threadIdx.x & 1;
const int f = threadIdx.x / 64;
float sum0 = 0.0f;
float sum1 = 0.0f;
const int fold0 = (threadIdx.x >> 1) & 31;
const int maxFoldCount = 64;
{
const int innerHistCount = 2;
const volatile float* __restrict__ src = Histogram
+ 2048 //warpHistSize
+ 2 * f
+ 8 * (fold0 & 1)
+ 32 * (fold0 >> 1)
+ isSecondStat;
#pragma unroll
for (int inWarpHist = 0; inWarpHist < innerHistCount; ++inWarpHist) {
sum0 += src[(inWarpHist << 4)];
sum1 += src[(inWarpHist << 4) + 512];
}
Histogram[maxFoldCount * 4 * isSecondStat + maxFoldCount * f + fold0] = sum0;
Histogram[maxFoldCount * 4 * isSecondStat + maxFoldCount * f + fold0 + 32] = sum1;
}
}
__syncthreads();
}
};
DefineHist2Pass(6)
}
| 285fdee109ef37b58b64284727913da160bcfaa3.cu | #include "hist.cuh"
#include "hist_2_one_byte_base.cuh"
#include "tuning_policy_enums.cuh"
#include <cooperative_groups.h>
#include <library/cuda/wrappers/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
using namespace cooperative_groups;
namespace NKernel
{
template <int BlockSize>
struct TPointHist2OneByte<6, BlockSize> : public TPointHist2OneByteBase<TPointHist2OneByte<6, BlockSize>, BlockSize> {
using TParent = TPointHist2OneByteBase<TPointHist2OneByte<6, BlockSize>, BlockSize>;
using TPointHist2OneByteBase<TPointHist2OneByte<6, BlockSize>, BlockSize>::Histogram;
__forceinline__ __device__ TPointHist2OneByte(float* buffer)
: TPointHist2OneByteBase<TPointHist2OneByte<6, BlockSize>, BlockSize>(buffer) {
}
__forceinline__ __device__ int SliceOffset() {
const int warpId = (threadIdx.x / 32);
const int warpOffset = 1024 * warpId;
const int blocks = 2;
const int innerHistStart = (threadIdx.x & ((blocks - 1) << 4));
return warpOffset + innerHistStart;
}
template <int N>
__forceinline__ __device__ void AddPointsImpl(const ui32* ci,
const float* s1,
const float* s2) {
thread_block_tile<16> syncTile = tiled_partition<16>(this_thread_block());
const bool flag = threadIdx.x & 1;
float stat1[N];
float stat2[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
stat1[k] = flag ? s2[k] : s1[k];
stat2[k] = flag ? s1[k] : s2[k];
}
float val1[N];
float val2[N];
int offset[N];
#pragma unroll
for (int i = 0; i < 4; i++) {
const int f = ((2 * i + threadIdx.x) & 6);
#pragma unroll
for (int k = 0; k < N; ++k) {
const int bin = (ci[k] >> (24 - (f << 2))) & 255;
const float pass = bin != 64 ? 1.0f : 0.0f;
val1[k] = pass * stat1[k];
val2[k] = pass * stat2[k];
offset[k] = f + 16 * (bin & 62) + 8 * (bin & 1) + flag;
}
const bool writeFirstFlag = threadIdx.x & 8;
syncTile.sync();
if (writeFirstFlag) {
#pragma unroll
for (int k = 0; k < N; ++k) {
Histogram[offset[k]] += val1[k];
}
}
syncTile.sync();
if (!writeFirstFlag) {
#pragma unroll
for (int k = 0; k < N; ++k) {
Histogram[offset[k]] += val1[k];
}
}
int shift = flag ? -1 : 1;
#pragma unroll
for (int k = 0; k < N; ++k) {
offset[k] += shift;
}
syncTile.sync();
if (writeFirstFlag) {
#pragma unroll
for (int k = 0; k < N; ++k) {
Histogram[offset[k]] += val2[k];
}
}
syncTile.sync();
if (!writeFirstFlag) {
#pragma unroll
for (int k = 0; k < N; ++k) {
Histogram[offset[k]] += val2[k];
}
}
}
}
static constexpr int MaxBits() {
return 6;
}
__forceinline__ __device__ void Reduce() {
TParent::ReduceToOneWarp();
if (threadIdx.x < 256) {
const int isSecondStat = threadIdx.x & 1;
const int f = threadIdx.x / 64;
float sum0 = 0.0f;
float sum1 = 0.0f;
const int fold0 = (threadIdx.x >> 1) & 31;
const int maxFoldCount = 64;
{
const int innerHistCount = 2;
const volatile float* __restrict__ src = Histogram
+ 2048 //warpHistSize
+ 2 * f
+ 8 * (fold0 & 1)
+ 32 * (fold0 >> 1)
+ isSecondStat;
#pragma unroll
for (int inWarpHist = 0; inWarpHist < innerHistCount; ++inWarpHist) {
sum0 += src[(inWarpHist << 4)];
sum1 += src[(inWarpHist << 4) + 512];
}
Histogram[maxFoldCount * 4 * isSecondStat + maxFoldCount * f + fold0] = sum0;
Histogram[maxFoldCount * 4 * isSecondStat + maxFoldCount * f + fold0 + 32] = sum1;
}
}
__syncthreads();
}
};
DefineHist2Pass(6)
}
|
efc9ad63d13ea27f9660f4eba313465564a1ddb0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// modified to use only 7 floats for triMem
//1. #define TRIMEMLENGTH 7
//2. in FIMCuda and run_neighbor_check, add initilize old at the begining of iteration
//3. in FIMCuda and run_neighbor_check, s_triMem[tx*TRIMEMLENGTH + 3 + C] = TC after each iteration instead of s_triMem[tx*TRIMEMLENGTH + 6 + C] = TC
//4. in FIMCuda and run_neighbor_check, in the reconcile step, there should be no +3 in fetching the location of triMem
//
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#include "meshFIM3dEikonal.h"
#include "Vec.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <sstream>
#ifdef WIN32
#include <io.h>
#define unlink _unlink
#else
#include <unistd.h>
#endif
#include "CUDADefines.h"
#include <time.h>
#include <cutil.h>
extern "C" {
#include <metis.h>
}
/////declaration for cuda kernels///////////////////////////
extern __global__ void run_reduction(bool *con, bool *blockCon, int* ActiveList, int nActiveBlock, int* blockSizes);
extern __global__ void FIMCuda(float3* d_tetMem0, float3* d_tetMem1, float4* d_tetT, float* d_vertT, float* d_speedInv, int* d_vertMem, int* d_vertMemOutside,
int* d_BlockSizes, bool* d_con, int* d_ActiveList,
int m_maxNumInVert, int m_maxVertMappingInside, int m_maxNumOutVertMapping, int nIter);
extern __global__ void CopyOutBack(float4* d_tetT, float* d_vertT, int* d_vertMem, int* d_vertMemOutside, int* d_BlockSizes, int* d_ActiveList, int m_maxNumInVert, int m_maxNumTotalTets, int m_maxVertMappingInside, int m_maxVertMappingOutside);
extern __global__ void run_check_neghbor(float3* d_tetMem0, float3* d_tetMem1, float4* d_tetT, float* d_speedInv, int* d_vertMem, int* d_vertMemOutside,
int* d_BlockSizes, bool* d_con, int* d_ActiveList,
int m_maxNumInVert, int m_maxVertMappingInside, int m_maxNumOutVertMapping);
#if __DEVICE_EMULATION__
bool InitCUDA(bool verbose = false)
{
return true;
}
#else
bool InitCUDA(bool verbose = false)
{
int count = 0;
int i = 0;
hipGetDeviceCount(&count);
if(count == 0)
{
fprintf(stderr, "There is no device.\n");
return false;
}
for(i = 0; i < count; i++)
{
hipDeviceProp_t prop;
if(hipGetDeviceProperties(&prop, i) == hipSuccess)
{
if(prop.major >= 1)
{
break;
}
}
}
if(i == count)
{
fprintf(stderr, "There is no device supporting CUDA.\n");
return false;
}
hipDeviceProp_t props;
cudaSafeCall(hipSetDevice(0));
cudaSafeCall(hipGetDeviceProperties(&props, 0));
if (verbose) {
printf("Device 0: \"%s\" with Compute %d.%d capability\n", props.name, props.major, props.minor);
printf("CUDA initialized.\n");
}
return true;
}
#endif
/////////////////////////////////////////////////////////////////////////////
//create .mesh file from trimesh faces and call partnmesh function
//to partition and create intermediate mesh.npart.N file and then read this file
void meshFIM3dEikonal::writeVTK(std::vector < std::vector <float> > values)
{
size_t nv = m_meshPtr->vertices.size();
size_t nt = m_meshPtr->tets.size();
for (size_t j = 0; j < values.size(); j++) {
FILE* vtkfile;
std::stringstream ss;
ss << "result" << j << ".vtk";
vtkfile = fopen(ss.str().c_str(), "w+");
fprintf(vtkfile, "# vtk DataFile Version 3.0\nvtk output\nASCII\nDATASET UNSTRUCTURED_GRID\n");
fprintf(vtkfile, "POINTS %d float\n", nv);
for (int i = 0; i < nv; i++)
{
fprintf(vtkfile, "%.12f %.12f %.12f\n", m_meshPtr->vertices[i][0],
m_meshPtr->vertices[i][1], m_meshPtr->vertices[i][2]);
}
fprintf(vtkfile, "CELLS %d %d\n", nt, nt * 5);
for (int i = 0; i < nt; i++)
{
fprintf(vtkfile, "4 %d %d %d %d\n", m_meshPtr->tets[i][0],
m_meshPtr->tets[i][1], m_meshPtr->tets[i][2], m_meshPtr->tets[i][3]);
}
fprintf(vtkfile, "CELL_TYPES %d\n", nt);
for (int i = 0; i < nt; i++)
{
fprintf(vtkfile, "10\n");
}
fprintf(vtkfile, "POINT_DATA %d\nSCALARS traveltime float 1\nLOOKUP_TABLE default\n",
nv, values.size());
for (int i = 0; i < values[j].size(); i++) {
fprintf(vtkfile, "%.12f\n ", values[j][i]);
}
fclose(vtkfile);
}
}
void meshFIM3dEikonal::GraphPartition_METIS2(int& numBlock, int maxNumBlockVerts, bool verbose)
{
FILE * outf;
outf = fopen("tmp.mesh", "w+");
if(outf == NULL)
{
printf("Cannot open mesh file to write!!!!\n");
exit(1);
}
size_t sz = m_meshPtr->tets.size();
fprintf(outf, "%d 2\n", sz);
for(int i = 0; i < sz; i++)
fprintf(outf, "%d %d %d %d\n", m_meshPtr->tets[i].v[0] + 1,
m_meshPtr->tets[i].v[1] + 1, m_meshPtr->tets[i].v[2] + 1,
m_meshPtr->tets[i].v[3] + 1);
fclose(outf);
size_t numVert = m_meshPtr->vertices.size();
m_PartitionLabel.resize(numVert);
char outputFileName[512];
char meshfile[] = "tmp.mesh";
if(numBlock == 0)
{
numBlock = static_cast<int>(MAX(numVert / maxNumBlockVerts - 10, 1));
do
{
numBlock++;
m_BlockSizes.resize(numBlock);
for(int i = 0; i < numBlock; i++)
{
m_BlockSizes[i] = 0;
}
partnmesh(meshfile,numBlock,verbose?1:0);
sprintf(outputFileName, "tmp.mesh.npart.%d", numBlock);
FILE* partFile = fopen(outputFileName, "r+");
if(partFile == NULL)
{
printf("NO part file found!!!!\n");
exit(1);
}
for(int i = 0; i < numVert; i++)
{
fscanf(partFile, "%d", &m_PartitionLabel[i]);
}
for(int i = 0; i < numVert; i++)
{
m_BlockSizes[m_PartitionLabel[i]]++;
}
m_maxNumInVert = 0;
for(int i = 0; i < numBlock; i++)
{
m_maxNumInVert = MAX(m_maxNumInVert, m_BlockSizes[i]);
}
fclose(partFile);
sprintf(outputFileName, "tmp.mesh.npart.%d", numBlock);
unlink(outputFileName);
sprintf(outputFileName, "tmp.mesh.epart.%d", numBlock);
unlink(outputFileName);
}
while(m_maxNumInVert != maxNumBlockVerts);
}
else
{
m_BlockSizes.resize(numBlock);
for(int i = 0; i < numBlock; i++)
{
m_BlockSizes[i] = 0;
}
partnmesh(meshfile, numBlock,verbose?1:0);
sprintf(outputFileName, "tmp.mesh.npart.%d", numBlock);
FILE* partFile = fopen(outputFileName, "r+");
if(partFile == NULL)
{
printf("NO part file found!!!!\n");
exit(1);
}
for(int i = 0; i < numVert; i++)
{
fscanf(partFile, "%d", &m_PartitionLabel[i]);
}
for(int i = 0; i < numVert; i++)
{
m_BlockSizes[m_PartitionLabel[i]]++;
}
m_maxNumInVert = 0;
for(int i = 0; i < numBlock; i++)
{
m_maxNumInVert = MAX(m_maxNumInVert, m_BlockSizes[i]);
}
if (verbose)
printf("max num vert is : %d\n", m_maxNumInVert);
fclose(partFile);
sprintf(outputFileName, "tmp.mesh.npart.%d", numBlock);
unlink(outputFileName);
sprintf(outputFileName, "tmp.mesh.epart.%d", numBlock);
unlink(outputFileName);
}
srand((unsigned)time(NULL));
if (verbose)
printf("numBlock is : %d\n", numBlock);
m_PartitionInVerts.resize(numBlock);
for(int i = 0; i < numVert; i++)
{
m_PartitionInVerts[m_PartitionLabel[i]].push_back(i);
}
unlink("tmp.mesh");
}
void meshFIM3dEikonal::GraphPartition_Square(int squareLength, int squareWidth, int squareHeight, int blockLength, int blockWidth, int blockHeight, bool verbose)
{
size_t numVert = m_meshPtr->vertices.size();
m_PartitionLabel.resize(numVert);
int numBlockLength = (squareLength / blockLength);
int numBlockWidth = (squareWidth / blockWidth);
int numBlockHeight = (squareHeight / blockHeight);
int numBlock = numBlockLength * numBlockWidth*numBlockHeight;
for(int k = 0; k < squareHeight; k++)
for(int i = 0; i < squareWidth; i++)
for(int j = 0; j < squareLength; j++)
{
int index = k * squareLength * squareWidth + i * squareLength + j;
int k2 = k;
int i2 = i;
int j2 = j;
m_PartitionLabel[index] = (k2 / blockHeight) * numBlockLength * numBlockWidth + (i2 / blockWidth) * numBlockLength + (j2 / blockLength);
}
m_BlockSizes.resize(numBlock);
for(int i = 0; i < numBlock; i++)
m_BlockSizes[i] = 0;
m_PartitionInVerts.resize(numBlock);
for(int i = 0; i < numVert; i++)
{
m_PartitionInVerts[m_PartitionLabel[i]].push_back(i);
m_BlockSizes[m_PartitionLabel[i]]++;
}
m_maxNumInVert = 0;
for(int i = 0; i < numBlock; i++)
{
m_maxNumInVert = MAX(m_maxNumInVert, m_BlockSizes[i]);
}
if (verbose)
printf("final number of blocks: %d\n", numBlock);
}
std::vector < std::vector < float > > meshFIM3dEikonal::GenerateData(size_t maxIters, bool verbose)
{
size_t numVert = m_meshPtr->vertices.size();
if(!InitCUDA(verbose))
{
exit(1);
}
float* h_tetMem0;
float* h_tetMem1;
float* h_tetT;
float* h_vertT;
int* h_vertMem;
int* h_vertMemOutside;
bool* h_blockCon;
int* h_BlockSizes;
int* h_BlockLabel;
vector<int> h_ActiveList;
vector<int> h_ActiveListNew;
int* d_ActiveList = 0;
bool* d_con;
bool* d_blockCon;
float3* d_tetMem0;
float3* d_tetMem1;
float4* d_tetT;
float* d_vertT;
float* d_speedInv;
int* d_vertMem;
int* d_vertMemOutside;
int* d_BlockSizes;
GetTetMem(h_tetMem0, h_tetMem1, h_tetT);
GetVertMem(h_vertMem, h_vertMemOutside);
h_vertT = (float*)malloc(sizeof(float)* m_maxNumInVert * m_numBlock);
h_blockCon = (bool*)malloc(sizeof(bool) * m_numBlock);
h_BlockLabel = (int*)malloc(sizeof(int)* m_numBlock);
h_BlockSizes = (int*)malloc(sizeof(int)* m_numBlock);
memset(h_blockCon, 1, sizeof(bool) * m_numBlock);
for(int i = 0; i < m_numBlock; i++) {
h_BlockLabel[i] = FARP;
h_BlockSizes[i] = m_BlockSizes[i];
}
////////////////////initialize the seed points for h_tetT//////////////////////////
if (verbose)
printf("Seed size is %d, source block is %d\n", m_SeedPoints.size(),
m_PartitionLabel.empty()?-1:
(m_PartitionLabel[m_SeedPoints.empty()?0:m_SeedPoints[0]]));
for(int i = 0; i < m_SeedPoints.size(); i++)
{
int seed = m_SeedPoints[i];
int seedBelongToBlock = m_PartitionLabel[seed];
m_ActiveBlocks.insert(m_ActiveBlocks.end(), seedBelongToBlock);
h_blockCon[seedBelongToBlock] = false;
h_BlockLabel[seedBelongToBlock] = ACTIVE;
for(int j = 0; j < m_blockVertMapping[seed].size(); j++)
{
h_tetT[m_blockVertMapping[seed][j]] = 0.0;
}
}
size_t numActive = m_ActiveBlocks.size();
if (verbose)
printf("Active block number is %d.\n", numActive);
h_ActiveList.resize(m_numBlock);
set<int>::iterator activeiter = m_ActiveBlocks.begin();
for(int i = 0; activeiter != m_ActiveBlocks.end(); activeiter++)
h_ActiveList[i++] = *activeiter;
unsigned int timerstart, timerend = 0;
///////////////////////malloc GPU memory/////////////////////////////////
cudaSafeCall((hipMalloc((void**)&d_con, sizeof(bool) * m_numBlock * m_maxNumInVert)));
cudaSafeCall((hipMalloc((void**)&d_tetMem0, sizeof(float)* 3 * m_maxNumTotalTets * m_numBlock)));
cudaSafeCall((hipMalloc((void**)&d_tetMem1, sizeof(float)* 3 * m_maxNumTotalTets * m_numBlock)));
cudaSafeCall((hipMalloc((void**)&d_tetT, sizeof(float)* 4 * m_maxNumTotalTets * m_numBlock)));
cudaSafeCall((hipMalloc((void**)&d_vertT, sizeof(float)* m_maxNumInVert * m_numBlock)));
cudaSafeCall( hipMalloc( (void**) &d_speedInv, sizeof(float) * m_maxNumTotalTets * m_numBlock) );
cudaSafeCall((hipMalloc((void**)&d_vertMem, sizeof(int)* m_maxNumInVert * m_numBlock * m_maxVertMappingInside)));
cudaSafeCall((hipMalloc((void**)&d_vertMemOutside, sizeof(int)* m_maxNumInVert * m_numBlock * m_maxVertMappingOutside)));
cudaSafeCall((hipMalloc((void**)&d_BlockSizes, sizeof(int)* m_numBlock)));
cudaSafeCall((hipMalloc((void**)&d_blockCon, sizeof(bool) * m_numBlock)));
cudaSafeCall((hipMalloc((void**)&d_ActiveList, sizeof(int)* m_numBlock)));
//////////////////copy to gpu memories///////////////////////////////
cudaSafeCall((hipMemcpy(d_tetMem0, h_tetMem0, sizeof(float)* 3 * m_maxNumTotalTets * m_numBlock, hipMemcpyHostToDevice)));
cudaSafeCall((hipMemcpy(d_tetMem1, h_tetMem1, sizeof(float)* 3 * m_maxNumTotalTets * m_numBlock, hipMemcpyHostToDevice)));
cudaSafeCall((hipMemcpy(d_tetT, h_tetT, sizeof(float)* 4 * m_maxNumTotalTets * m_numBlock, hipMemcpyHostToDevice)));
cudaSafeCall((hipMemcpy(d_vertMem, h_vertMem, sizeof(int)* m_maxNumInVert * m_numBlock * m_maxVertMappingInside, hipMemcpyHostToDevice)));
cudaSafeCall((hipMemcpy(d_vertMemOutside, h_vertMemOutside, sizeof(int)* m_maxNumInVert * m_numBlock * m_maxVertMappingOutside, hipMemcpyHostToDevice)));
cudaSafeCall((hipMemcpy(d_BlockSizes, h_BlockSizes, sizeof(int)* m_numBlock, hipMemcpyHostToDevice)));
cudaSafeCall((hipMemcpy(d_blockCon, h_blockCon, sizeof(bool) * m_numBlock, hipMemcpyHostToDevice)));
cudaSafeCall((hipMemset(d_vertT, 0, sizeof(float)* m_maxNumInVert * m_numBlock)));
size_t nTotalIter = 0;
size_t nIter = 2;
hipFuncSetCacheConfig(FIMCuda, hipFuncCachePreferShared);
hipFuncSetCacheConfig(run_check_neghbor, hipFuncCachePreferShared);
vector< vector<float> > tmp_h_verrT;
vector< vector<float> > tmp_h_verrT2;
tmp_h_verrT.resize(m_numBlock);
tmp_h_verrT2.resize(m_numBlock);
size_t totalIterationNumber = 0;
timerstart = clock();
//the result vector
std::vector< std::vector < float > > result;
m_meshPtr->vertT.resize(numVert);
size_t maxActive = 0;
while(numActive > 0)
{
maxActive = static_cast<int>(MAX(maxActive, numActive));
///////step 1: run solver /////////////////////////////////////
nTotalIter++;
//don't do more than maxIters
if (nTotalIter > maxIters) break;
if (verbose ) {
size_t act = numActive / 3;
for(size_t ab = 0; ab < 60; ab++) {
if (ab < act)
printf("=");
else
printf(" ");
}
printf(" %d Active blocks.\n", numActive);
}
totalIterationNumber += numActive;
dim3 dimGrid(static_cast<int>(numActive), 1);
dim3 dimBlock(m_maxNumTotalTets, 1);
cudaSafeCall(hipMemcpy(d_ActiveList, &h_ActiveList[0], sizeof(int)* m_numBlock, hipMemcpyHostToDevice));
int sharedSize = sizeof(float)* 4 * m_maxNumTotalTets + sizeof(int)* m_maxNumInVert * m_maxVertMappingInside;
(FIMCuda << <dimGrid, dimBlock, sharedSize >> >(d_tetMem0, d_tetMem1, d_tetT, d_vertT, d_speedInv, d_vertMem, d_vertMemOutside,
d_BlockSizes, d_con, d_ActiveList, m_maxNumInVert, m_maxVertMappingInside,
m_maxVertMappingOutside, static_cast<int>(nIter)));
cudaCheckError();
dimBlock = dim3(m_maxNumInVert, 1);
CopyOutBack << <dimGrid, dimBlock >> >(d_tetT, d_vertT, d_vertMem, d_vertMemOutside,
d_BlockSizes, d_ActiveList, m_maxNumInVert, m_maxNumTotalTets, m_maxVertMappingInside,
m_maxVertMappingOutside);
dimBlock = dim3(m_maxNumInVert, 1);
run_reduction << <dimGrid, dimBlock >> >(d_con, d_blockCon, d_ActiveList, static_cast<int>(numActive), d_BlockSizes);
cudaSafeCall(hipMemcpy(h_blockCon, d_blockCon, m_numBlock * sizeof(bool), hipMemcpyDeviceToHost));
size_t nOldActiveBlock = numActive;
numActive = 0;
h_ActiveListNew.clear();
for(uint i = 0; i < nOldActiveBlock; i++)
{
uint currBlkIdx = h_ActiveList[i];
h_BlockLabel[currBlkIdx] = FARP;
if(!h_blockCon[currBlkIdx]) // if not converged
{
h_BlockLabel[currBlkIdx] = ACTIVE;
}
}
for(uint i = 0; i < nOldActiveBlock; i++)
{
// check neighbors of current active tile
uint currBlkIdx = h_ActiveList[i];
if(h_blockCon[currBlkIdx]) //converged
{
set<int> nb = m_BlockNeighbor[currBlkIdx];
set<int>::iterator iter;
for(iter = nb.begin(); iter != nb.end(); iter++)
{
int currIdx = *iter;
if(h_BlockLabel[currIdx] == FARP)
{
h_BlockLabel[currIdx] = ACTIVE;
h_ActiveListNew.push_back(currIdx);
}
}
}
}
for(uint i = 0; i < nOldActiveBlock; i++)
{
uint currBlkIdx = h_ActiveList[i];
if(!h_blockCon[currBlkIdx]) // if not converged
{
h_ActiveList[numActive++] = currBlkIdx;
}
}
//////////////////////////////////////////////////////////////////
// 4. run solver only once for neighbor blocks of converged block
// current active list contains active blocks and neighbor blocks of
// any converged blocks
if(h_ActiveListNew.size() > 0)
{
size_t numActiveNew = h_ActiveListNew.size();
cudaSafeCall(hipMemcpy(d_ActiveList, &h_ActiveListNew[0], numActiveNew * sizeof(int), hipMemcpyHostToDevice));
dim3 dimGrid(static_cast<int>(numActiveNew), 1);
dim3 dimBlock(m_maxNumTotalTets, 1);
int sharedSize = sizeof(float4) * m_maxNumTotalTets + sizeof(int)* m_maxNumInVert * m_maxVertMappingInside;
run_check_neghbor << <dimGrid, dimBlock, sharedSize >> >(d_tetMem0, d_tetMem1, d_tetT, d_speedInv, d_vertMem, d_vertMemOutside,
d_BlockSizes, d_con, d_ActiveList, m_maxNumInVert, m_maxVertMappingInside, m_maxVertMappingOutside);
////////////////////////////////////////////////////////////////
// 5. reduction
///////////////////////////////////////////////////////////////
dimGrid = dim3(static_cast<int>(numActiveNew), 1);
dimBlock = dim3(m_maxNumInVert, 1);
run_reduction << <dimGrid, dimBlock >> >(d_con, d_blockCon, d_ActiveList, static_cast<int>(numActiveNew), d_BlockSizes);
//////////////////////////////////////////////////////////////////
// 6. update active list
// read back active volume from the device and add
// active block to active list on the host memory
cudaSafeCall(hipMemcpy(h_blockCon, d_blockCon, m_numBlock * sizeof(bool), hipMemcpyDeviceToHost));
for(uint i = 0; i < h_ActiveListNew.size(); i++)
{
uint currBlkIdx = h_ActiveListNew[i];
if(!h_blockCon[currBlkIdx]) // false : activate block (not converged)
{
h_ActiveList[numActive++] = currBlkIdx;
}
else h_BlockLabel[currBlkIdx] = FARP;
}
}
////////////////////////copy values from each iteration
cudaSafeCall(hipMemcpy(h_vertT, d_vertT,
sizeof(float)* m_maxNumInVert * m_numBlock, hipMemcpyDeviceToHost));
for(int i = 0; i < m_numBlock; i++)
{
for(int j = 0; j < m_PartitionInVerts[i].size(); j++)
{
m_meshPtr->vertT[m_PartitionInVerts[i][j]] =
h_vertT[i * m_maxNumInVert + j];
}
}
result.push_back(m_meshPtr->vertT);
////////////////////////////////END copy
}
cudaSafeCall(hipDeviceSynchronize());
timerend = clock();
double duration = (double)(timerend - timerstart) / CLOCKS_PER_SEC;
if (verbose)
printf("Computing time : %.10lf s\n",duration);
cudaSafeCall(hipDeviceSynchronize());
if (verbose)
printf("num of max active %d\n", maxActive);
if (verbose) {
printf("The iteration number: %d\n", nTotalIter);
printf("The total iteration number: %d\n", totalIterationNumber);
}
cudaSafeCall(hipFree(d_con));
cudaSafeCall(hipFree(d_blockCon));
cudaSafeCall(hipFree(d_BlockSizes));
free(h_blockCon);
free(h_BlockSizes);
return result;
}
void meshFIM3dEikonal::PartitionTets(int numBlock, bool verbose)
{
///////////////////////////////////step 3: partition faces//////////////////////////////////////
if (verbose)
printf("Start PartitionTets ...");
m_PartitionTets.resize(numBlock);
m_PartitionNbTets.resize(numBlock);
size_t numTets = m_meshPtr->tets.size();
size_t numVerts = m_meshPtr->vertices.size();
TetMesh::Tet t;
vector<TetMesh::Tet> virtualTets;
vector<int> virtualTetCnt;
virtualTetCnt.resize(numBlock);
m_PartitionVirtualTets.resize(numBlock);
set<int> labels;
for(int i = 0; i < numTets; i++)
{
t = m_meshPtr->tets[i];
size_t vfCnt = m_meshPtr->tetVirtualTets[i].size();
int obtusevert = t.obtuseV;
if(obtusevert >= 0)
{
virtualTetCnt[m_PartitionLabel[t[obtusevert]]] += static_cast<int>(vfCnt);
m_PartitionVirtualTets[m_PartitionLabel[t[obtusevert]]].insert(
m_PartitionVirtualTets[m_PartitionLabel[t[obtusevert]]].end(),
m_meshPtr->tetVirtualTets[i].begin(), m_meshPtr->tetVirtualTets[i].end());
}
labels.clear();
for(int m = 0; m < 4; m++)
labels.insert(labels.begin(), m_PartitionLabel[t[m]]);
if(labels.size() == 1)
{
m_PartitionTets[*(labels.begin())].push_back(i);
}
else if(labels.size() > 1)
{
set<int>::iterator it = labels.begin();
for(set<int>::iterator it = labels.begin(); it != labels.end(); it++)
{
m_PartitionNbTets[*it].push_back(i);
}
}
else
printf("Error!!\n");
}
vector<int> PartitionToltalTets;
PartitionToltalTets.resize(numBlock);
m_maxNumTotalTets = 0;
for(int j = 0; j < numBlock; j++)
{
PartitionToltalTets[j] = static_cast<int>(m_PartitionTets[j].size() + m_PartitionNbTets[j].size() + virtualTetCnt[j]);
m_maxNumTotalTets = MAX(PartitionToltalTets[j], m_maxNumTotalTets);
}
if (verbose)
printf("m_maxNumTotalTets is %d\n", m_maxNumTotalTets);
//calculate block neighbors.
m_BlockNeighbor.resize(numBlock);
for(int i = 0; i < numVerts; i++)
{
vector<int> nbs = m_meshPtr->neighbors[i];
for(int j = 0; j < nbs.size(); j++)
{
int nb = nbs[j];
if(m_PartitionLabel[nb] != m_PartitionLabel[i])
m_BlockNeighbor[m_PartitionLabel[i]].insert(m_BlockNeighbor[m_PartitionLabel[i]].end(), m_PartitionLabel[nb]);
}
}
if (verbose)
printf("done!\n");
}
bool meshFIM3dEikonal::gettetmem(vector<float>& tetmem, TetMesh::Tet t)
{
bool needswap = false;
tetmem.resize(6);
point A = m_meshPtr->vertices[t[0]];
point B = m_meshPtr->vertices[t[1]];
point C = m_meshPtr->vertices[t[2]];
point D = m_meshPtr->vertices[t[3]];
point AB = B - A;
point AC = C - A;
point AD = D - A;
AC = C - A;
AD = D - A;
point BC = C - B;
point CD = D - C;
point BD = D - B;
tetmem[0] = vMv(AC, t.M, BC);
tetmem[1] = vMv(BC, t.M, CD);
tetmem[2] = vMv(AC, t.M, CD);
tetmem[3] = vMv(AD, t.M, BD);
tetmem[4] = vMv(AC, t.M, AD);
tetmem[5] = vMv(BC, t.M, BD);
return needswap;
}
void meshFIM3dEikonal::GetTetMem(float* &h_tetMem0, float* &h_tetMem1, float* &h_tetT)
{
h_tetMem0 = (float*)malloc(3 * sizeof(float)* m_maxNumTotalTets * m_numBlock);
h_tetMem1 = (float*)malloc(3 * sizeof(float)* m_maxNumTotalTets * m_numBlock);
h_tetT = (float*)malloc(4 * sizeof(float)* m_maxNumTotalTets * m_numBlock);
size_t numTets = m_meshPtr->tets.size();
size_t numVert = m_meshPtr->vertices.size();
m_blockVertMapping.resize(numVert); //for each vertex, store the addresses where it appears in the global triMem array.
TetMesh::Tet t;
for(int i = 0; i < m_numBlock; i++)
{
int blockIdx = i * m_maxNumTotalTets * 3;
size_t numPF = m_PartitionTets[i].size();
for(int j = 0; j < numPF; j++)
{
t = m_meshPtr->tets[m_PartitionTets[i][j]];
vector<float> tetmem;
bool needswap = gettetmem(tetmem, t);
h_tetMem0[blockIdx + j * 3 + 0] = tetmem[0];
h_tetMem0[blockIdx + j * 3 + 1] = tetmem[1];
h_tetMem0[blockIdx + j * 3 + 2] = tetmem[2];
h_tetMem1[blockIdx + j * 3 + 0] = tetmem[3];
h_tetMem1[blockIdx + j * 3 + 1] = tetmem[4];
h_tetMem1[blockIdx + j * 3 + 2] = tetmem[5];
h_tetT[i * m_maxNumTotalTets * 4 + j * 4 + 0] = LARGENUM;
h_tetT[i * m_maxNumTotalTets * 4 + j * 4 + 1] = LARGENUM;
h_tetT[i * m_maxNumTotalTets * 4 + j * 4 + 2] = LARGENUM;
h_tetT[i * m_maxNumTotalTets * 4 + j * 4 + 3] = LARGENUM;
m_blockVertMapping[t[0]%m_blockVertMapping.size()].push_back(i * m_maxNumTotalTets * 4 + j * 4 + 0);
m_blockVertMapping[t[3]%m_blockVertMapping.size()].push_back(i * m_maxNumTotalTets * 4 + j * 4 + 3);
if(needswap)
{
m_blockVertMapping[t[1]%m_blockVertMapping.size()].push_back(i * m_maxNumTotalTets * 4 + j * 4 + 2);
m_blockVertMapping[t[2]%m_blockVertMapping.size()].push_back(i * m_maxNumTotalTets * 4 + j * 4 + 1);
}
else
{
m_blockVertMapping[t[1]%m_blockVertMapping.size()].push_back(i * m_maxNumTotalTets * 4 + j * 4 + 1);
m_blockVertMapping[t[2]%m_blockVertMapping.size()].push_back(i * m_maxNumTotalTets * 4 + j * 4 + 2);
}
}
}
for(int i = 0; i < m_numBlock; i++)
{
int blockIdx = i * m_maxNumTotalTets * 3;
size_t numPF = m_PartitionTets[i].size();
size_t numPNF = m_PartitionNbTets[i].size();
size_t numPVF = m_PartitionVirtualTets[i].size();
int k = 0;
int l = 0;
for (int j = static_cast<int>(numPF); j < m_maxNumTotalTets; j++)
{
if(j < numPF + numPNF)
{
vector<float> tetmem;
t = m_meshPtr->tets[m_PartitionNbTets[i][k]];
bool needswap = gettetmem(tetmem, t);
h_tetMem0[blockIdx + j * 3 + 0] = tetmem[0];
h_tetMem0[blockIdx + j * 3 + 1] = tetmem[1];
h_tetMem0[blockIdx + j * 3 + 2] = tetmem[2];
h_tetMem1[blockIdx + j * 3 + 0] = tetmem[3];
h_tetMem1[blockIdx + j * 3 + 1] = tetmem[4];
h_tetMem1[blockIdx + j * 3 + 2] = tetmem[5];
h_tetT[i * m_maxNumTotalTets * 4 + j * 4 + 0] = LARGENUM;
h_tetT[i * m_maxNumTotalTets * 4 + j * 4 + 1] = LARGENUM;
h_tetT[i * m_maxNumTotalTets * 4 + j * 4 + 2] = LARGENUM;
h_tetT[i * m_maxNumTotalTets * 4 + j * 4 + 3] = LARGENUM;
m_blockVertMapping[t[0]%m_blockVertMapping.size()].push_back(i * m_maxNumTotalTets * 4 + j * 4 + 0);
m_blockVertMapping[t[3]%m_blockVertMapping.size()].push_back(i * m_maxNumTotalTets * 4 + j * 4 + 3);
if(needswap)
{
m_blockVertMapping[t[1]%m_blockVertMapping.size()].push_back(i * m_maxNumTotalTets * 4 + j * 4 + 2);
m_blockVertMapping[t[2]%m_blockVertMapping.size()].push_back(i * m_maxNumTotalTets * 4 + j * 4 + 1);
}
else
{
m_blockVertMapping[t[1]%m_blockVertMapping.size()].push_back(i * m_maxNumTotalTets * 4 + j * 4 + 1);
m_blockVertMapping[t[2]%m_blockVertMapping.size()].push_back(i * m_maxNumTotalTets * 4 + j * 4 + 2);
}
k++;
}
else if(j < numPF + numPNF + numPVF)
{
t = m_PartitionVirtualTets[i][l];
vector<float> tetmem;
bool needswap = gettetmem(tetmem, t);
h_tetMem0[blockIdx + j * 3 + 0] = tetmem[0];
h_tetMem0[blockIdx + j * 3 + 1] = tetmem[1];
h_tetMem0[blockIdx + j * 3 + 2] = tetmem[2];
h_tetMem1[blockIdx + j * 3 + 0] = tetmem[3];
h_tetMem1[blockIdx + j * 3 + 1] = tetmem[4];
h_tetMem1[blockIdx + j * 3 + 2] = tetmem[5];
h_tetT[i * m_maxNumTotalTets * 4 + j * 4 + 0] = LARGENUM;
h_tetT[i * m_maxNumTotalTets * 4 + j * 4 + 1] = LARGENUM;
h_tetT[i * m_maxNumTotalTets * 4 + j * 4 + 2] = LARGENUM;
h_tetT[i * m_maxNumTotalTets * 4 + j * 4 + 3] = LARGENUM;
m_blockVertMapping[t[0]%m_blockVertMapping.size()].push_back(i * m_maxNumTotalTets * 4 + j * 4 + 0);
m_blockVertMapping[t[3]%m_blockVertMapping.size()].push_back(i * m_maxNumTotalTets * 4 + j * 4 + 3);
if(needswap)
{
m_blockVertMapping[t[1]%m_blockVertMapping.size()].push_back(i * m_maxNumTotalTets * 4 + j * 4 + 2);
m_blockVertMapping[t[2]%m_blockVertMapping.size()].push_back(i * m_maxNumTotalTets * 4 + j * 4 + 1);
}
else
{
m_blockVertMapping[t[1]%m_blockVertMapping.size()].push_back(i * m_maxNumTotalTets * 4 + j * 4 + 1);
m_blockVertMapping[t[2]%m_blockVertMapping.size()].push_back(i * m_maxNumTotalTets * 4 + j * 4 + 2);
}
l++;
}
else
{
h_tetT[i * m_maxNumTotalTets * 4 + j * 4 + 0] = LARGENUM;
h_tetT[i * m_maxNumTotalTets * 4 + j * 4 + 1] = LARGENUM;
h_tetT[i * m_maxNumTotalTets * 4 + j * 4 + 2] = LARGENUM;
h_tetT[i * m_maxNumTotalTets * 4 + j * 4 + 3] = LARGENUM;
}
}
}
}
void meshFIM3dEikonal::GetVertMem(int* &h_vertMem, int* &h_vertMemOutside)
{
size_t numVert = m_meshPtr->vertices.size();
m_blockVertMappingInside.resize(numVert);
m_blockVertMappingOutside.resize(numVert);
m_maxNumVertMapping = 0;
for(int i = 0; i < m_numBlock; i++)
{
int triIdx = i * TETMEMLENGTH * m_maxNumTotalTets;
for(int m = 0; m < m_PartitionInVerts[i].size(); m++)
{
m_maxNumVertMapping = static_cast<int>(MAX(m_maxNumVertMapping, m_blockVertMapping[i%m_blockVertMapping.size()].size()));
vector<int> tmp = m_blockVertMapping[m_PartitionInVerts[i][m]%m_blockVertMapping.size()];
for(int n = 0; n < tmp.size(); n++)
{
if(tmp[n] >= triIdx + 0 && tmp[n] < triIdx + m_maxNumTotalTets * TETMEMLENGTH)
m_blockVertMappingInside[m_PartitionInVerts[i][m]].push_back(tmp[n]);
else
{
m_blockVertMappingOutside[m_PartitionInVerts[i][m]].push_back(tmp[n]);
}
}
}
}
m_maxVertMappingInside = 0;
m_maxVertMappingOutside = 0;
for(int i = 0; i < numVert; i++)
{
m_maxVertMappingInside = static_cast<int>(MAX(m_maxVertMappingInside, (m_blockVertMappingInside[i].size())));
m_maxVertMappingOutside = static_cast<int>(MAX(m_maxVertMappingOutside, (m_blockVertMappingOutside[i].size())));
}
h_vertMem = (int*)malloc(sizeof(int)* m_maxVertMappingInside * m_maxNumInVert * m_numBlock);
for(int i = 0; i < m_numBlock; i++)
{
int vertIdx = i * m_maxVertMappingInside * m_maxNumInVert;
for(int m = 0; m < m_PartitionInVerts[i].size(); m++)
{
size_t tmpsize = m_blockVertMappingInside[m_PartitionInVerts[i][m]].size();
int n = 0;
for(; n < tmpsize; n++)
h_vertMem[vertIdx + m * m_maxVertMappingInside + n] = m_blockVertMappingInside[m_PartitionInVerts[i][m]][n];
for(; n < m_maxVertMappingInside; n++)
h_vertMem[vertIdx + m * m_maxVertMappingInside + n] = -1 + i * m_maxNumTotalTets * TETMEMLENGTH;
}
for (size_t m = m_PartitionInVerts[i].size() * m_maxVertMappingInside; m < m_maxNumInVert * m_maxVertMappingInside; m++)
{
h_vertMem[vertIdx + m] = -1 + i * m_maxNumTotalTets*TETMEMLENGTH;
}
}
h_vertMemOutside = (int*)malloc(m_maxNumInVert * m_numBlock * m_maxVertMappingOutside * sizeof(int));
for(int i = 0; i < m_numBlock; i++)
{
int vertIdx = i * m_maxVertMappingOutside * m_maxNumInVert;
for(int m = 0; m < m_PartitionInVerts[i].size(); m++)
{
size_t tmpsize = m_blockVertMappingOutside[m_PartitionInVerts[i][m]].size();
int n = 0;
for(; n < tmpsize; n++)
h_vertMemOutside[vertIdx + m * m_maxVertMappingOutside + n] = m_blockVertMappingOutside[m_PartitionInVerts[i][m]][n];
for(; n < m_maxVertMappingOutside; n++)
h_vertMemOutside[vertIdx + m * m_maxVertMappingOutside + n] = -1;
}
for (size_t m = m_PartitionInVerts[i].size() * m_maxVertMappingOutside; m < m_maxNumInVert * m_maxVertMappingOutside; m++)
{
h_vertMemOutside[vertIdx + m] = -1;
}
}
}
| efc9ad63d13ea27f9660f4eba313465564a1ddb0.cu |
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// modified to use only 7 floats for triMem
//1. #define TRIMEMLENGTH 7
//2. in FIMCuda and run_neighbor_check, add initilize old at the begining of iteration
//3. in FIMCuda and run_neighbor_check, s_triMem[tx*TRIMEMLENGTH + 3 + C] = TC after each iteration instead of s_triMem[tx*TRIMEMLENGTH + 6 + C] = TC
//4. in FIMCuda and run_neighbor_check, in the reconcile step, there should be no +3 in fetching the location of triMem
//
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#include "meshFIM3dEikonal.h"
#include "Vec.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <sstream>
#ifdef WIN32
#include <io.h>
#define unlink _unlink
#else
#include <unistd.h>
#endif
#include "CUDADefines.h"
#include <time.h>
#include <cutil.h>
extern "C" {
#include <metis.h>
}
/////declaration for cuda kernels///////////////////////////
extern __global__ void run_reduction(bool *con, bool *blockCon, int* ActiveList, int nActiveBlock, int* blockSizes);
extern __global__ void FIMCuda(float3* d_tetMem0, float3* d_tetMem1, float4* d_tetT, float* d_vertT, float* d_speedInv, int* d_vertMem, int* d_vertMemOutside,
int* d_BlockSizes, bool* d_con, int* d_ActiveList,
int m_maxNumInVert, int m_maxVertMappingInside, int m_maxNumOutVertMapping, int nIter);
extern __global__ void CopyOutBack(float4* d_tetT, float* d_vertT, int* d_vertMem, int* d_vertMemOutside, int* d_BlockSizes, int* d_ActiveList, int m_maxNumInVert, int m_maxNumTotalTets, int m_maxVertMappingInside, int m_maxVertMappingOutside);
extern __global__ void run_check_neghbor(float3* d_tetMem0, float3* d_tetMem1, float4* d_tetT, float* d_speedInv, int* d_vertMem, int* d_vertMemOutside,
int* d_BlockSizes, bool* d_con, int* d_ActiveList,
int m_maxNumInVert, int m_maxVertMappingInside, int m_maxNumOutVertMapping);
#if __DEVICE_EMULATION__
bool InitCUDA(bool verbose = false)
{
return true;
}
#else
bool InitCUDA(bool verbose = false)
{
int count = 0;
int i = 0;
cudaGetDeviceCount(&count);
if(count == 0)
{
fprintf(stderr, "There is no device.\n");
return false;
}
for(i = 0; i < count; i++)
{
cudaDeviceProp prop;
if(cudaGetDeviceProperties(&prop, i) == cudaSuccess)
{
if(prop.major >= 1)
{
break;
}
}
}
if(i == count)
{
fprintf(stderr, "There is no device supporting CUDA.\n");
return false;
}
cudaDeviceProp props;
cudaSafeCall(cudaSetDevice(0));
cudaSafeCall(cudaGetDeviceProperties(&props, 0));
if (verbose) {
printf("Device 0: \"%s\" with Compute %d.%d capability\n", props.name, props.major, props.minor);
printf("CUDA initialized.\n");
}
return true;
}
#endif
/////////////////////////////////////////////////////////////////////////////
//create .mesh file from trimesh faces and call partnmesh function
//to partition and create intermediate mesh.npart.N file and then read this file
void meshFIM3dEikonal::writeVTK(std::vector < std::vector <float> > values)
{
size_t nv = m_meshPtr->vertices.size();
size_t nt = m_meshPtr->tets.size();
for (size_t j = 0; j < values.size(); j++) {
FILE* vtkfile;
std::stringstream ss;
ss << "result" << j << ".vtk";
vtkfile = fopen(ss.str().c_str(), "w+");
fprintf(vtkfile, "# vtk DataFile Version 3.0\nvtk output\nASCII\nDATASET UNSTRUCTURED_GRID\n");
fprintf(vtkfile, "POINTS %d float\n", nv);
for (int i = 0; i < nv; i++)
{
fprintf(vtkfile, "%.12f %.12f %.12f\n", m_meshPtr->vertices[i][0],
m_meshPtr->vertices[i][1], m_meshPtr->vertices[i][2]);
}
fprintf(vtkfile, "CELLS %d %d\n", nt, nt * 5);
for (int i = 0; i < nt; i++)
{
fprintf(vtkfile, "4 %d %d %d %d\n", m_meshPtr->tets[i][0],
m_meshPtr->tets[i][1], m_meshPtr->tets[i][2], m_meshPtr->tets[i][3]);
}
fprintf(vtkfile, "CELL_TYPES %d\n", nt);
for (int i = 0; i < nt; i++)
{
fprintf(vtkfile, "10\n");
}
fprintf(vtkfile, "POINT_DATA %d\nSCALARS traveltime float 1\nLOOKUP_TABLE default\n",
nv, values.size());
for (int i = 0; i < values[j].size(); i++) {
fprintf(vtkfile, "%.12f\n ", values[j][i]);
}
fclose(vtkfile);
}
}
void meshFIM3dEikonal::GraphPartition_METIS2(int& numBlock, int maxNumBlockVerts, bool verbose)
{
FILE * outf;
outf = fopen("tmp.mesh", "w+");
if(outf == NULL)
{
printf("Cannot open mesh file to write!!!!\n");
exit(1);
}
size_t sz = m_meshPtr->tets.size();
fprintf(outf, "%d 2\n", sz);
for(int i = 0; i < sz; i++)
fprintf(outf, "%d %d %d %d\n", m_meshPtr->tets[i].v[0] + 1,
m_meshPtr->tets[i].v[1] + 1, m_meshPtr->tets[i].v[2] + 1,
m_meshPtr->tets[i].v[3] + 1);
fclose(outf);
size_t numVert = m_meshPtr->vertices.size();
m_PartitionLabel.resize(numVert);
char outputFileName[512];
char meshfile[] = "tmp.mesh";
if(numBlock == 0)
{
numBlock = static_cast<int>(MAX(numVert / maxNumBlockVerts - 10, 1));
do
{
numBlock++;
m_BlockSizes.resize(numBlock);
for(int i = 0; i < numBlock; i++)
{
m_BlockSizes[i] = 0;
}
partnmesh(meshfile,numBlock,verbose?1:0);
sprintf(outputFileName, "tmp.mesh.npart.%d", numBlock);
FILE* partFile = fopen(outputFileName, "r+");
if(partFile == NULL)
{
printf("NO part file found!!!!\n");
exit(1);
}
for(int i = 0; i < numVert; i++)
{
fscanf(partFile, "%d", &m_PartitionLabel[i]);
}
for(int i = 0; i < numVert; i++)
{
m_BlockSizes[m_PartitionLabel[i]]++;
}
m_maxNumInVert = 0;
for(int i = 0; i < numBlock; i++)
{
m_maxNumInVert = MAX(m_maxNumInVert, m_BlockSizes[i]);
}
fclose(partFile);
sprintf(outputFileName, "tmp.mesh.npart.%d", numBlock);
unlink(outputFileName);
sprintf(outputFileName, "tmp.mesh.epart.%d", numBlock);
unlink(outputFileName);
}
while(m_maxNumInVert != maxNumBlockVerts);
}
else
{
m_BlockSizes.resize(numBlock);
for(int i = 0; i < numBlock; i++)
{
m_BlockSizes[i] = 0;
}
partnmesh(meshfile, numBlock,verbose?1:0);
sprintf(outputFileName, "tmp.mesh.npart.%d", numBlock);
FILE* partFile = fopen(outputFileName, "r+");
if(partFile == NULL)
{
printf("NO part file found!!!!\n");
exit(1);
}
for(int i = 0; i < numVert; i++)
{
fscanf(partFile, "%d", &m_PartitionLabel[i]);
}
for(int i = 0; i < numVert; i++)
{
m_BlockSizes[m_PartitionLabel[i]]++;
}
m_maxNumInVert = 0;
for(int i = 0; i < numBlock; i++)
{
m_maxNumInVert = MAX(m_maxNumInVert, m_BlockSizes[i]);
}
if (verbose)
printf("max num vert is : %d\n", m_maxNumInVert);
fclose(partFile);
sprintf(outputFileName, "tmp.mesh.npart.%d", numBlock);
unlink(outputFileName);
sprintf(outputFileName, "tmp.mesh.epart.%d", numBlock);
unlink(outputFileName);
}
srand((unsigned)time(NULL));
if (verbose)
printf("numBlock is : %d\n", numBlock);
m_PartitionInVerts.resize(numBlock);
for(int i = 0; i < numVert; i++)
{
m_PartitionInVerts[m_PartitionLabel[i]].push_back(i);
}
unlink("tmp.mesh");
}
void meshFIM3dEikonal::GraphPartition_Square(int squareLength, int squareWidth, int squareHeight, int blockLength, int blockWidth, int blockHeight, bool verbose)
{
size_t numVert = m_meshPtr->vertices.size();
m_PartitionLabel.resize(numVert);
int numBlockLength = (squareLength / blockLength);
int numBlockWidth = (squareWidth / blockWidth);
int numBlockHeight = (squareHeight / blockHeight);
int numBlock = numBlockLength * numBlockWidth*numBlockHeight;
for(int k = 0; k < squareHeight; k++)
for(int i = 0; i < squareWidth; i++)
for(int j = 0; j < squareLength; j++)
{
int index = k * squareLength * squareWidth + i * squareLength + j;
int k2 = k;
int i2 = i;
int j2 = j;
m_PartitionLabel[index] = (k2 / blockHeight) * numBlockLength * numBlockWidth + (i2 / blockWidth) * numBlockLength + (j2 / blockLength);
}
m_BlockSizes.resize(numBlock);
for(int i = 0; i < numBlock; i++)
m_BlockSizes[i] = 0;
m_PartitionInVerts.resize(numBlock);
for(int i = 0; i < numVert; i++)
{
m_PartitionInVerts[m_PartitionLabel[i]].push_back(i);
m_BlockSizes[m_PartitionLabel[i]]++;
}
m_maxNumInVert = 0;
for(int i = 0; i < numBlock; i++)
{
m_maxNumInVert = MAX(m_maxNumInVert, m_BlockSizes[i]);
}
if (verbose)
printf("final number of blocks: %d\n", numBlock);
}
std::vector < std::vector < float > > meshFIM3dEikonal::GenerateData(size_t maxIters, bool verbose)
{
size_t numVert = m_meshPtr->vertices.size();
if(!InitCUDA(verbose))
{
exit(1);
}
float* h_tetMem0;
float* h_tetMem1;
float* h_tetT;
float* h_vertT;
int* h_vertMem;
int* h_vertMemOutside;
bool* h_blockCon;
int* h_BlockSizes;
int* h_BlockLabel;
vector<int> h_ActiveList;
vector<int> h_ActiveListNew;
int* d_ActiveList = 0;
bool* d_con;
bool* d_blockCon;
float3* d_tetMem0;
float3* d_tetMem1;
float4* d_tetT;
float* d_vertT;
float* d_speedInv;
int* d_vertMem;
int* d_vertMemOutside;
int* d_BlockSizes;
GetTetMem(h_tetMem0, h_tetMem1, h_tetT);
GetVertMem(h_vertMem, h_vertMemOutside);
h_vertT = (float*)malloc(sizeof(float)* m_maxNumInVert * m_numBlock);
h_blockCon = (bool*)malloc(sizeof(bool) * m_numBlock);
h_BlockLabel = (int*)malloc(sizeof(int)* m_numBlock);
h_BlockSizes = (int*)malloc(sizeof(int)* m_numBlock);
memset(h_blockCon, 1, sizeof(bool) * m_numBlock);
for(int i = 0; i < m_numBlock; i++) {
h_BlockLabel[i] = FARP;
h_BlockSizes[i] = m_BlockSizes[i];
}
////////////////////initialize the seed points for h_tetT//////////////////////////
if (verbose)
printf("Seed size is %d, source block is %d\n", m_SeedPoints.size(),
m_PartitionLabel.empty()?-1:
(m_PartitionLabel[m_SeedPoints.empty()?0:m_SeedPoints[0]]));
for(int i = 0; i < m_SeedPoints.size(); i++)
{
int seed = m_SeedPoints[i];
int seedBelongToBlock = m_PartitionLabel[seed];
m_ActiveBlocks.insert(m_ActiveBlocks.end(), seedBelongToBlock);
h_blockCon[seedBelongToBlock] = false;
h_BlockLabel[seedBelongToBlock] = ACTIVE;
for(int j = 0; j < m_blockVertMapping[seed].size(); j++)
{
h_tetT[m_blockVertMapping[seed][j]] = 0.0;
}
}
size_t numActive = m_ActiveBlocks.size();
if (verbose)
printf("Active block number is %d.\n", numActive);
h_ActiveList.resize(m_numBlock);
set<int>::iterator activeiter = m_ActiveBlocks.begin();
for(int i = 0; activeiter != m_ActiveBlocks.end(); activeiter++)
h_ActiveList[i++] = *activeiter;
unsigned int timerstart, timerend = 0;
///////////////////////malloc GPU memory/////////////////////////////////
cudaSafeCall((cudaMalloc((void**)&d_con, sizeof(bool) * m_numBlock * m_maxNumInVert)));
cudaSafeCall((cudaMalloc((void**)&d_tetMem0, sizeof(float)* 3 * m_maxNumTotalTets * m_numBlock)));
cudaSafeCall((cudaMalloc((void**)&d_tetMem1, sizeof(float)* 3 * m_maxNumTotalTets * m_numBlock)));
cudaSafeCall((cudaMalloc((void**)&d_tetT, sizeof(float)* 4 * m_maxNumTotalTets * m_numBlock)));
cudaSafeCall((cudaMalloc((void**)&d_vertT, sizeof(float)* m_maxNumInVert * m_numBlock)));
cudaSafeCall( cudaMalloc( (void**) &d_speedInv, sizeof(float) * m_maxNumTotalTets * m_numBlock) );
cudaSafeCall((cudaMalloc((void**)&d_vertMem, sizeof(int)* m_maxNumInVert * m_numBlock * m_maxVertMappingInside)));
cudaSafeCall((cudaMalloc((void**)&d_vertMemOutside, sizeof(int)* m_maxNumInVert * m_numBlock * m_maxVertMappingOutside)));
cudaSafeCall((cudaMalloc((void**)&d_BlockSizes, sizeof(int)* m_numBlock)));
cudaSafeCall((cudaMalloc((void**)&d_blockCon, sizeof(bool) * m_numBlock)));
cudaSafeCall((cudaMalloc((void**)&d_ActiveList, sizeof(int)* m_numBlock)));
//////////////////copy to gpu memories///////////////////////////////
cudaSafeCall((cudaMemcpy(d_tetMem0, h_tetMem0, sizeof(float)* 3 * m_maxNumTotalTets * m_numBlock, cudaMemcpyHostToDevice)));
cudaSafeCall((cudaMemcpy(d_tetMem1, h_tetMem1, sizeof(float)* 3 * m_maxNumTotalTets * m_numBlock, cudaMemcpyHostToDevice)));
cudaSafeCall((cudaMemcpy(d_tetT, h_tetT, sizeof(float)* 4 * m_maxNumTotalTets * m_numBlock, cudaMemcpyHostToDevice)));
cudaSafeCall((cudaMemcpy(d_vertMem, h_vertMem, sizeof(int)* m_maxNumInVert * m_numBlock * m_maxVertMappingInside, cudaMemcpyHostToDevice)));
cudaSafeCall((cudaMemcpy(d_vertMemOutside, h_vertMemOutside, sizeof(int)* m_maxNumInVert * m_numBlock * m_maxVertMappingOutside, cudaMemcpyHostToDevice)));
cudaSafeCall((cudaMemcpy(d_BlockSizes, h_BlockSizes, sizeof(int)* m_numBlock, cudaMemcpyHostToDevice)));
cudaSafeCall((cudaMemcpy(d_blockCon, h_blockCon, sizeof(bool) * m_numBlock, cudaMemcpyHostToDevice)));
cudaSafeCall((cudaMemset(d_vertT, 0, sizeof(float)* m_maxNumInVert * m_numBlock)));
size_t nTotalIter = 0;
size_t nIter = 2;
cudaFuncSetCacheConfig(FIMCuda, cudaFuncCachePreferShared);
cudaFuncSetCacheConfig(run_check_neghbor, cudaFuncCachePreferShared);
vector< vector<float> > tmp_h_verrT;
vector< vector<float> > tmp_h_verrT2;
tmp_h_verrT.resize(m_numBlock);
tmp_h_verrT2.resize(m_numBlock);
size_t totalIterationNumber = 0;
timerstart = clock();
//the result vector
std::vector< std::vector < float > > result;
m_meshPtr->vertT.resize(numVert);
size_t maxActive = 0;
while(numActive > 0)
{
maxActive = static_cast<int>(MAX(maxActive, numActive));
///////step 1: run solver /////////////////////////////////////
nTotalIter++;
//don't do more than maxIters
if (nTotalIter > maxIters) break;
if (verbose ) {
size_t act = numActive / 3;
for(size_t ab = 0; ab < 60; ab++) {
if (ab < act)
printf("=");
else
printf(" ");
}
printf(" %d Active blocks.\n", numActive);
}
totalIterationNumber += numActive;
dim3 dimGrid(static_cast<int>(numActive), 1);
dim3 dimBlock(m_maxNumTotalTets, 1);
cudaSafeCall(cudaMemcpy(d_ActiveList, &h_ActiveList[0], sizeof(int)* m_numBlock, cudaMemcpyHostToDevice));
int sharedSize = sizeof(float)* 4 * m_maxNumTotalTets + sizeof(int)* m_maxNumInVert * m_maxVertMappingInside;
(FIMCuda << <dimGrid, dimBlock, sharedSize >> >(d_tetMem0, d_tetMem1, d_tetT, d_vertT, d_speedInv, d_vertMem, d_vertMemOutside,
d_BlockSizes, d_con, d_ActiveList, m_maxNumInVert, m_maxVertMappingInside,
m_maxVertMappingOutside, static_cast<int>(nIter)));
cudaCheckError();
dimBlock = dim3(m_maxNumInVert, 1);
CopyOutBack << <dimGrid, dimBlock >> >(d_tetT, d_vertT, d_vertMem, d_vertMemOutside,
d_BlockSizes, d_ActiveList, m_maxNumInVert, m_maxNumTotalTets, m_maxVertMappingInside,
m_maxVertMappingOutside);
dimBlock = dim3(m_maxNumInVert, 1);
run_reduction << <dimGrid, dimBlock >> >(d_con, d_blockCon, d_ActiveList, static_cast<int>(numActive), d_BlockSizes);
cudaSafeCall(cudaMemcpy(h_blockCon, d_blockCon, m_numBlock * sizeof(bool), cudaMemcpyDeviceToHost));
size_t nOldActiveBlock = numActive;
numActive = 0;
h_ActiveListNew.clear();
for(uint i = 0; i < nOldActiveBlock; i++)
{
uint currBlkIdx = h_ActiveList[i];
h_BlockLabel[currBlkIdx] = FARP;
if(!h_blockCon[currBlkIdx]) // if not converged
{
h_BlockLabel[currBlkIdx] = ACTIVE;
}
}
for(uint i = 0; i < nOldActiveBlock; i++)
{
// check neighbors of current active tile
uint currBlkIdx = h_ActiveList[i];
if(h_blockCon[currBlkIdx]) //converged
{
set<int> nb = m_BlockNeighbor[currBlkIdx];
set<int>::iterator iter;
for(iter = nb.begin(); iter != nb.end(); iter++)
{
int currIdx = *iter;
if(h_BlockLabel[currIdx] == FARP)
{
h_BlockLabel[currIdx] = ACTIVE;
h_ActiveListNew.push_back(currIdx);
}
}
}
}
for(uint i = 0; i < nOldActiveBlock; i++)
{
uint currBlkIdx = h_ActiveList[i];
if(!h_blockCon[currBlkIdx]) // if not converged
{
h_ActiveList[numActive++] = currBlkIdx;
}
}
//////////////////////////////////////////////////////////////////
// 4. run solver only once for neighbor blocks of converged block
// current active list contains active blocks and neighbor blocks of
// any converged blocks
if(h_ActiveListNew.size() > 0)
{
size_t numActiveNew = h_ActiveListNew.size();
cudaSafeCall(cudaMemcpy(d_ActiveList, &h_ActiveListNew[0], numActiveNew * sizeof(int), cudaMemcpyHostToDevice));
dim3 dimGrid(static_cast<int>(numActiveNew), 1);
dim3 dimBlock(m_maxNumTotalTets, 1);
int sharedSize = sizeof(float4) * m_maxNumTotalTets + sizeof(int)* m_maxNumInVert * m_maxVertMappingInside;
run_check_neghbor << <dimGrid, dimBlock, sharedSize >> >(d_tetMem0, d_tetMem1, d_tetT, d_speedInv, d_vertMem, d_vertMemOutside,
d_BlockSizes, d_con, d_ActiveList, m_maxNumInVert, m_maxVertMappingInside, m_maxVertMappingOutside);
////////////////////////////////////////////////////////////////
// 5. reduction
///////////////////////////////////////////////////////////////
dimGrid = dim3(static_cast<int>(numActiveNew), 1);
dimBlock = dim3(m_maxNumInVert, 1);
run_reduction << <dimGrid, dimBlock >> >(d_con, d_blockCon, d_ActiveList, static_cast<int>(numActiveNew), d_BlockSizes);
//////////////////////////////////////////////////////////////////
// 6. update active list
// read back active volume from the device and add
// active block to active list on the host memory
cudaSafeCall(cudaMemcpy(h_blockCon, d_blockCon, m_numBlock * sizeof(bool), cudaMemcpyDeviceToHost));
for(uint i = 0; i < h_ActiveListNew.size(); i++)
{
uint currBlkIdx = h_ActiveListNew[i];
if(!h_blockCon[currBlkIdx]) // false : activate block (not converged)
{
h_ActiveList[numActive++] = currBlkIdx;
}
else h_BlockLabel[currBlkIdx] = FARP;
}
}
////////////////////////copy values from each iteration
cudaSafeCall(cudaMemcpy(h_vertT, d_vertT,
sizeof(float)* m_maxNumInVert * m_numBlock, cudaMemcpyDeviceToHost));
for(int i = 0; i < m_numBlock; i++)
{
for(int j = 0; j < m_PartitionInVerts[i].size(); j++)
{
m_meshPtr->vertT[m_PartitionInVerts[i][j]] =
h_vertT[i * m_maxNumInVert + j];
}
}
result.push_back(m_meshPtr->vertT);
////////////////////////////////END copy
}
cudaSafeCall(cudaThreadSynchronize());
timerend = clock();
double duration = (double)(timerend - timerstart) / CLOCKS_PER_SEC;
if (verbose)
printf("Computing time : %.10lf s\n",duration);
cudaSafeCall(cudaThreadSynchronize());
if (verbose)
printf("num of max active %d\n", maxActive);
if (verbose) {
printf("The iteration number: %d\n", nTotalIter);
printf("The total iteration number: %d\n", totalIterationNumber);
}
cudaSafeCall(cudaFree(d_con));
cudaSafeCall(cudaFree(d_blockCon));
cudaSafeCall(cudaFree(d_BlockSizes));
free(h_blockCon);
free(h_BlockSizes);
return result;
}
void meshFIM3dEikonal::PartitionTets(int numBlock, bool verbose)
{
///////////////////////////////////step 3: partition faces//////////////////////////////////////
if (verbose)
printf("Start PartitionTets ...");
m_PartitionTets.resize(numBlock);
m_PartitionNbTets.resize(numBlock);
size_t numTets = m_meshPtr->tets.size();
size_t numVerts = m_meshPtr->vertices.size();
TetMesh::Tet t;
vector<TetMesh::Tet> virtualTets;
vector<int> virtualTetCnt;
virtualTetCnt.resize(numBlock);
m_PartitionVirtualTets.resize(numBlock);
set<int> labels;
for(int i = 0; i < numTets; i++)
{
t = m_meshPtr->tets[i];
size_t vfCnt = m_meshPtr->tetVirtualTets[i].size();
int obtusevert = t.obtuseV;
if(obtusevert >= 0)
{
virtualTetCnt[m_PartitionLabel[t[obtusevert]]] += static_cast<int>(vfCnt);
m_PartitionVirtualTets[m_PartitionLabel[t[obtusevert]]].insert(
m_PartitionVirtualTets[m_PartitionLabel[t[obtusevert]]].end(),
m_meshPtr->tetVirtualTets[i].begin(), m_meshPtr->tetVirtualTets[i].end());
}
labels.clear();
for(int m = 0; m < 4; m++)
labels.insert(labels.begin(), m_PartitionLabel[t[m]]);
if(labels.size() == 1)
{
m_PartitionTets[*(labels.begin())].push_back(i);
}
else if(labels.size() > 1)
{
set<int>::iterator it = labels.begin();
for(set<int>::iterator it = labels.begin(); it != labels.end(); it++)
{
m_PartitionNbTets[*it].push_back(i);
}
}
else
printf("Error!!\n");
}
vector<int> PartitionToltalTets;
PartitionToltalTets.resize(numBlock);
m_maxNumTotalTets = 0;
for(int j = 0; j < numBlock; j++)
{
PartitionToltalTets[j] = static_cast<int>(m_PartitionTets[j].size() + m_PartitionNbTets[j].size() + virtualTetCnt[j]);
m_maxNumTotalTets = MAX(PartitionToltalTets[j], m_maxNumTotalTets);
}
if (verbose)
printf("m_maxNumTotalTets is %d\n", m_maxNumTotalTets);
//calculate block neighbors.
m_BlockNeighbor.resize(numBlock);
for(int i = 0; i < numVerts; i++)
{
vector<int> nbs = m_meshPtr->neighbors[i];
for(int j = 0; j < nbs.size(); j++)
{
int nb = nbs[j];
if(m_PartitionLabel[nb] != m_PartitionLabel[i])
m_BlockNeighbor[m_PartitionLabel[i]].insert(m_BlockNeighbor[m_PartitionLabel[i]].end(), m_PartitionLabel[nb]);
}
}
if (verbose)
printf("done!\n");
}
bool meshFIM3dEikonal::gettetmem(vector<float>& tetmem, TetMesh::Tet t)
{
bool needswap = false;
tetmem.resize(6);
point A = m_meshPtr->vertices[t[0]];
point B = m_meshPtr->vertices[t[1]];
point C = m_meshPtr->vertices[t[2]];
point D = m_meshPtr->vertices[t[3]];
point AB = B - A;
point AC = C - A;
point AD = D - A;
AC = C - A;
AD = D - A;
point BC = C - B;
point CD = D - C;
point BD = D - B;
tetmem[0] = vMv(AC, t.M, BC);
tetmem[1] = vMv(BC, t.M, CD);
tetmem[2] = vMv(AC, t.M, CD);
tetmem[3] = vMv(AD, t.M, BD);
tetmem[4] = vMv(AC, t.M, AD);
tetmem[5] = vMv(BC, t.M, BD);
return needswap;
}
void meshFIM3dEikonal::GetTetMem(float* &h_tetMem0, float* &h_tetMem1, float* &h_tetT)
{
h_tetMem0 = (float*)malloc(3 * sizeof(float)* m_maxNumTotalTets * m_numBlock);
h_tetMem1 = (float*)malloc(3 * sizeof(float)* m_maxNumTotalTets * m_numBlock);
h_tetT = (float*)malloc(4 * sizeof(float)* m_maxNumTotalTets * m_numBlock);
size_t numTets = m_meshPtr->tets.size();
size_t numVert = m_meshPtr->vertices.size();
m_blockVertMapping.resize(numVert); //for each vertex, store the addresses where it appears in the global triMem array.
TetMesh::Tet t;
for(int i = 0; i < m_numBlock; i++)
{
int blockIdx = i * m_maxNumTotalTets * 3;
size_t numPF = m_PartitionTets[i].size();
for(int j = 0; j < numPF; j++)
{
t = m_meshPtr->tets[m_PartitionTets[i][j]];
vector<float> tetmem;
bool needswap = gettetmem(tetmem, t);
h_tetMem0[blockIdx + j * 3 + 0] = tetmem[0];
h_tetMem0[blockIdx + j * 3 + 1] = tetmem[1];
h_tetMem0[blockIdx + j * 3 + 2] = tetmem[2];
h_tetMem1[blockIdx + j * 3 + 0] = tetmem[3];
h_tetMem1[blockIdx + j * 3 + 1] = tetmem[4];
h_tetMem1[blockIdx + j * 3 + 2] = tetmem[5];
h_tetT[i * m_maxNumTotalTets * 4 + j * 4 + 0] = LARGENUM;
h_tetT[i * m_maxNumTotalTets * 4 + j * 4 + 1] = LARGENUM;
h_tetT[i * m_maxNumTotalTets * 4 + j * 4 + 2] = LARGENUM;
h_tetT[i * m_maxNumTotalTets * 4 + j * 4 + 3] = LARGENUM;
m_blockVertMapping[t[0]%m_blockVertMapping.size()].push_back(i * m_maxNumTotalTets * 4 + j * 4 + 0);
m_blockVertMapping[t[3]%m_blockVertMapping.size()].push_back(i * m_maxNumTotalTets * 4 + j * 4 + 3);
if(needswap)
{
m_blockVertMapping[t[1]%m_blockVertMapping.size()].push_back(i * m_maxNumTotalTets * 4 + j * 4 + 2);
m_blockVertMapping[t[2]%m_blockVertMapping.size()].push_back(i * m_maxNumTotalTets * 4 + j * 4 + 1);
}
else
{
m_blockVertMapping[t[1]%m_blockVertMapping.size()].push_back(i * m_maxNumTotalTets * 4 + j * 4 + 1);
m_blockVertMapping[t[2]%m_blockVertMapping.size()].push_back(i * m_maxNumTotalTets * 4 + j * 4 + 2);
}
}
}
for(int i = 0; i < m_numBlock; i++)
{
int blockIdx = i * m_maxNumTotalTets * 3;
size_t numPF = m_PartitionTets[i].size();
size_t numPNF = m_PartitionNbTets[i].size();
size_t numPVF = m_PartitionVirtualTets[i].size();
int k = 0;
int l = 0;
for (int j = static_cast<int>(numPF); j < m_maxNumTotalTets; j++)
{
if(j < numPF + numPNF)
{
vector<float> tetmem;
t = m_meshPtr->tets[m_PartitionNbTets[i][k]];
bool needswap = gettetmem(tetmem, t);
h_tetMem0[blockIdx + j * 3 + 0] = tetmem[0];
h_tetMem0[blockIdx + j * 3 + 1] = tetmem[1];
h_tetMem0[blockIdx + j * 3 + 2] = tetmem[2];
h_tetMem1[blockIdx + j * 3 + 0] = tetmem[3];
h_tetMem1[blockIdx + j * 3 + 1] = tetmem[4];
h_tetMem1[blockIdx + j * 3 + 2] = tetmem[5];
h_tetT[i * m_maxNumTotalTets * 4 + j * 4 + 0] = LARGENUM;
h_tetT[i * m_maxNumTotalTets * 4 + j * 4 + 1] = LARGENUM;
h_tetT[i * m_maxNumTotalTets * 4 + j * 4 + 2] = LARGENUM;
h_tetT[i * m_maxNumTotalTets * 4 + j * 4 + 3] = LARGENUM;
m_blockVertMapping[t[0]%m_blockVertMapping.size()].push_back(i * m_maxNumTotalTets * 4 + j * 4 + 0);
m_blockVertMapping[t[3]%m_blockVertMapping.size()].push_back(i * m_maxNumTotalTets * 4 + j * 4 + 3);
if(needswap)
{
m_blockVertMapping[t[1]%m_blockVertMapping.size()].push_back(i * m_maxNumTotalTets * 4 + j * 4 + 2);
m_blockVertMapping[t[2]%m_blockVertMapping.size()].push_back(i * m_maxNumTotalTets * 4 + j * 4 + 1);
}
else
{
m_blockVertMapping[t[1]%m_blockVertMapping.size()].push_back(i * m_maxNumTotalTets * 4 + j * 4 + 1);
m_blockVertMapping[t[2]%m_blockVertMapping.size()].push_back(i * m_maxNumTotalTets * 4 + j * 4 + 2);
}
k++;
}
else if(j < numPF + numPNF + numPVF)
{
t = m_PartitionVirtualTets[i][l];
vector<float> tetmem;
bool needswap = gettetmem(tetmem, t);
h_tetMem0[blockIdx + j * 3 + 0] = tetmem[0];
h_tetMem0[blockIdx + j * 3 + 1] = tetmem[1];
h_tetMem0[blockIdx + j * 3 + 2] = tetmem[2];
h_tetMem1[blockIdx + j * 3 + 0] = tetmem[3];
h_tetMem1[blockIdx + j * 3 + 1] = tetmem[4];
h_tetMem1[blockIdx + j * 3 + 2] = tetmem[5];
h_tetT[i * m_maxNumTotalTets * 4 + j * 4 + 0] = LARGENUM;
h_tetT[i * m_maxNumTotalTets * 4 + j * 4 + 1] = LARGENUM;
h_tetT[i * m_maxNumTotalTets * 4 + j * 4 + 2] = LARGENUM;
h_tetT[i * m_maxNumTotalTets * 4 + j * 4 + 3] = LARGENUM;
m_blockVertMapping[t[0]%m_blockVertMapping.size()].push_back(i * m_maxNumTotalTets * 4 + j * 4 + 0);
m_blockVertMapping[t[3]%m_blockVertMapping.size()].push_back(i * m_maxNumTotalTets * 4 + j * 4 + 3);
if(needswap)
{
m_blockVertMapping[t[1]%m_blockVertMapping.size()].push_back(i * m_maxNumTotalTets * 4 + j * 4 + 2);
m_blockVertMapping[t[2]%m_blockVertMapping.size()].push_back(i * m_maxNumTotalTets * 4 + j * 4 + 1);
}
else
{
m_blockVertMapping[t[1]%m_blockVertMapping.size()].push_back(i * m_maxNumTotalTets * 4 + j * 4 + 1);
m_blockVertMapping[t[2]%m_blockVertMapping.size()].push_back(i * m_maxNumTotalTets * 4 + j * 4 + 2);
}
l++;
}
else
{
h_tetT[i * m_maxNumTotalTets * 4 + j * 4 + 0] = LARGENUM;
h_tetT[i * m_maxNumTotalTets * 4 + j * 4 + 1] = LARGENUM;
h_tetT[i * m_maxNumTotalTets * 4 + j * 4 + 2] = LARGENUM;
h_tetT[i * m_maxNumTotalTets * 4 + j * 4 + 3] = LARGENUM;
}
}
}
}
void meshFIM3dEikonal::GetVertMem(int* &h_vertMem, int* &h_vertMemOutside)
{
size_t numVert = m_meshPtr->vertices.size();
m_blockVertMappingInside.resize(numVert);
m_blockVertMappingOutside.resize(numVert);
m_maxNumVertMapping = 0;
for(int i = 0; i < m_numBlock; i++)
{
int triIdx = i * TETMEMLENGTH * m_maxNumTotalTets;
for(int m = 0; m < m_PartitionInVerts[i].size(); m++)
{
m_maxNumVertMapping = static_cast<int>(MAX(m_maxNumVertMapping, m_blockVertMapping[i%m_blockVertMapping.size()].size()));
vector<int> tmp = m_blockVertMapping[m_PartitionInVerts[i][m]%m_blockVertMapping.size()];
for(int n = 0; n < tmp.size(); n++)
{
if(tmp[n] >= triIdx + 0 && tmp[n] < triIdx + m_maxNumTotalTets * TETMEMLENGTH)
m_blockVertMappingInside[m_PartitionInVerts[i][m]].push_back(tmp[n]);
else
{
m_blockVertMappingOutside[m_PartitionInVerts[i][m]].push_back(tmp[n]);
}
}
}
}
m_maxVertMappingInside = 0;
m_maxVertMappingOutside = 0;
for(int i = 0; i < numVert; i++)
{
m_maxVertMappingInside = static_cast<int>(MAX(m_maxVertMappingInside, (m_blockVertMappingInside[i].size())));
m_maxVertMappingOutside = static_cast<int>(MAX(m_maxVertMappingOutside, (m_blockVertMappingOutside[i].size())));
}
h_vertMem = (int*)malloc(sizeof(int)* m_maxVertMappingInside * m_maxNumInVert * m_numBlock);
for(int i = 0; i < m_numBlock; i++)
{
int vertIdx = i * m_maxVertMappingInside * m_maxNumInVert;
for(int m = 0; m < m_PartitionInVerts[i].size(); m++)
{
size_t tmpsize = m_blockVertMappingInside[m_PartitionInVerts[i][m]].size();
int n = 0;
for(; n < tmpsize; n++)
h_vertMem[vertIdx + m * m_maxVertMappingInside + n] = m_blockVertMappingInside[m_PartitionInVerts[i][m]][n];
for(; n < m_maxVertMappingInside; n++)
h_vertMem[vertIdx + m * m_maxVertMappingInside + n] = -1 + i * m_maxNumTotalTets * TETMEMLENGTH;
}
for (size_t m = m_PartitionInVerts[i].size() * m_maxVertMappingInside; m < m_maxNumInVert * m_maxVertMappingInside; m++)
{
h_vertMem[vertIdx + m] = -1 + i * m_maxNumTotalTets*TETMEMLENGTH;
}
}
h_vertMemOutside = (int*)malloc(m_maxNumInVert * m_numBlock * m_maxVertMappingOutside * sizeof(int));
for(int i = 0; i < m_numBlock; i++)
{
int vertIdx = i * m_maxVertMappingOutside * m_maxNumInVert;
for(int m = 0; m < m_PartitionInVerts[i].size(); m++)
{
size_t tmpsize = m_blockVertMappingOutside[m_PartitionInVerts[i][m]].size();
int n = 0;
for(; n < tmpsize; n++)
h_vertMemOutside[vertIdx + m * m_maxVertMappingOutside + n] = m_blockVertMappingOutside[m_PartitionInVerts[i][m]][n];
for(; n < m_maxVertMappingOutside; n++)
h_vertMemOutside[vertIdx + m * m_maxVertMappingOutside + n] = -1;
}
for (size_t m = m_PartitionInVerts[i].size() * m_maxVertMappingOutside; m < m_maxNumInVert * m_maxVertMappingOutside; m++)
{
h_vertMemOutside[vertIdx + m] = -1;
}
}
}
|
1c7eec697f96a15e351f82cdc6f8b035219861fc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*/
#define CUDA_CHECK_RETURN(value) { \
hipError_t _m_cudaStat = value; \
if (_m_cudaStat != hipSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
hipGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
#define N 100
__global__ void kernel_1() { double sum = 0.0;
for (int i = 0; i < N; i++) {
sum = sum + tan(0.1) * tan(0.1); }
}
__global__ void kernel_2() { double sum = 0.0;
for (int i = 0; i < N; i++) {
sum = sum + tan(0.1) * tan(0.1); }
}
__global__ void kernel_3() { double sum = 0.0;
for (int i = 0; i < N; i++) {
sum = sum + tan(0.1) * tan(0.1); }
}
/**
* Host function that prepares data array and passes it to the CUDA kernel.
*/
int main(void) {
int n_streams = 3;
hipStream_t *streams = (hipStream_t *)malloc(n_streams * sizeof(hipStream_t));
for (int i = 0 ; i < n_streams; i++) {
hipStreamCreate(&streams[i]);
}
dim3 block(1);
dim3 grid(1);
for (int i = 0; i < n_streams; i++) {
hipLaunchKernelGGL(( kernel_1), dim3(grid), dim3(block), 0, streams[i], );
//}
//for (int i = 0; i < n_streams; i++) {
hipLaunchKernelGGL(( kernel_2), dim3(grid), dim3(block), 0, streams[i], );
//}
//for (int i = 0; i < n_streams; i++) {
hipLaunchKernelGGL(( kernel_3), dim3(grid), dim3(block), 0, streams[i], );
}
printf("done\n");
CUDA_CHECK_RETURN(hipDeviceSynchronize()); // Wait for the GPU launched work to complete
// CUDA_CHECK_RETURN(hipGetLastError());
CUDA_CHECK_RETURN(hipDeviceReset());
return 0;
}
| 1c7eec697f96a15e351f82cdc6f8b035219861fc.cu | /**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*/
#define CUDA_CHECK_RETURN(value) { \
cudaError_t _m_cudaStat = value; \
if (_m_cudaStat != cudaSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
#define N 100
__global__ void kernel_1() { double sum = 0.0;
for (int i = 0; i < N; i++) {
sum = sum + tan(0.1) * tan(0.1); }
}
__global__ void kernel_2() { double sum = 0.0;
for (int i = 0; i < N; i++) {
sum = sum + tan(0.1) * tan(0.1); }
}
__global__ void kernel_3() { double sum = 0.0;
for (int i = 0; i < N; i++) {
sum = sum + tan(0.1) * tan(0.1); }
}
/**
* Host function that prepares data array and passes it to the CUDA kernel.
*/
int main(void) {
int n_streams = 3;
cudaStream_t *streams = (cudaStream_t *)malloc(n_streams * sizeof(cudaStream_t));
for (int i = 0 ; i < n_streams; i++) {
cudaStreamCreate(&streams[i]);
}
dim3 block(1);
dim3 grid(1);
for (int i = 0; i < n_streams; i++) {
kernel_1<<<grid, block, 0, streams[i]>>>();
//}
//for (int i = 0; i < n_streams; i++) {
kernel_2<<<grid, block, 0, streams[i]>>>();
//}
//for (int i = 0; i < n_streams; i++) {
kernel_3<<<grid, block, 0, streams[i]>>>();
}
printf("done\n");
CUDA_CHECK_RETURN(cudaDeviceSynchronize()); // Wait for the GPU launched work to complete
// CUDA_CHECK_RETURN(cudaGetLastError());
CUDA_CHECK_RETURN(cudaDeviceReset());
return 0;
}
|
77e0bc4d20c6331da9f2a9762531d27f6286bae3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void Thumbnail_ushort2(hipTextureObject_t ushort2_tex, int *histogram, int src_width, int src_height)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y < src_height && x < src_width)
{
ushort2 pixel = tex2D<ushort2>(ushort2_tex, x, y);
atomicAdd(&histogram[(pixel.x + 128) >> 8], 1);
atomicAdd(&histogram[256 + ((pixel.y + 128) >> 8)], 1);
}
} | 77e0bc4d20c6331da9f2a9762531d27f6286bae3.cu | #include "includes.h"
__global__ void Thumbnail_ushort2(cudaTextureObject_t ushort2_tex, int *histogram, int src_width, int src_height)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y < src_height && x < src_width)
{
ushort2 pixel = tex2D<ushort2>(ushort2_tex, x, y);
atomicAdd(&histogram[(pixel.x + 128) >> 8], 1);
atomicAdd(&histogram[256 + ((pixel.y + 128) >> 8)], 1);
}
} |
7cf5946720685e3fa03fc723c745def2b1c9457a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <string>
#include "paddle/fluid/operators/interpolate_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle {
namespace operators {
using framework::Tensor;
template <typename T>
__global__ void KeNearestNeighborInterpFw(
const T* in, const size_t in_img_h, const size_t in_img_w,
const size_t input_h, const size_t input_w, T* out, const size_t out_img_h,
const size_t out_img_w, const size_t output_h, const size_t output_w,
const size_t num_channels, const float ratio_h, const float ratio_w,
const bool align_corners) {
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (; tid < nthreads; tid += stride) {
int out_id_h = tid / output_w;
int out_id_w = tid % output_w;
int in_img_size = input_w / num_channels;
int out_img_size = output_w / num_channels;
int channel_id = out_id_w / out_img_size;
int out_img_idy = (out_id_w % out_img_size) / out_img_w;
int in_img_idy = (align_corners)
? static_cast<int>(ratio_h * out_img_idy + 0.5)
: static_cast<int>(ratio_h * out_img_idy);
int out_img_idx = tid % out_img_w;
int in_img_idx = (align_corners)
? static_cast<int>(ratio_w * out_img_idx + 0.5)
: static_cast<int>(ratio_w * out_img_idx);
out[tid] = in[out_id_h * input_w + channel_id * in_img_size +
in_img_idy * in_img_w + in_img_idx];
}
}
template <typename T>
__global__ void KeNearestNeighborInterpBw(
T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h,
const size_t input_w, const T* out, const size_t out_img_h,
const size_t out_img_w, const size_t output_h, const size_t output_w,
const size_t num_channels, const float ratio_h, const float ratio_w,
const bool align_corners) {
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (; tid < nthreads; tid += stride) {
int out_id_h = tid / output_w;
int out_id_w = tid % output_w;
int in_img_size = input_w / num_channels;
int out_img_size = output_w / num_channels;
int channel_id = out_id_w / out_img_size;
int out_img_idy = (out_id_w % out_img_size) / out_img_w;
int in_img_idy = (align_corners)
? static_cast<int>(ratio_h * out_img_idy + 0.5)
: static_cast<int>(ratio_h * out_img_idy);
int out_img_idx = tid % out_img_w;
int in_img_idx = (align_corners)
? static_cast<int>(ratio_w * out_img_idx + 0.5)
: static_cast<int>(ratio_w * out_img_idx);
T* in_pos = &in[out_id_h * input_w + channel_id * in_img_size +
in_img_idy * in_img_w + in_img_idx];
const T out_pos = out[out_id_h * output_w + out_id_w];
platform::CudaAtomicAdd(in_pos, out_pos);
}
}
template <typename T>
__global__ void KeBilinearInterpFw(
const T* in, const size_t in_img_h, const size_t in_img_w,
const size_t input_h, const size_t input_w, T* out, const size_t out_img_h,
const size_t out_img_w, const size_t output_h, const size_t output_w,
const size_t num_channels, const float ratio_h, const float ratio_w,
const bool align_corners, const int align_mode) {
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
bool align_flag = (align_mode == 0 && !align_corners);
for (; tid < nthreads; tid += stride) {
int out_id_h = tid / output_w;
int out_id_w = tid % output_w;
int in_img_size = input_w / num_channels;
int out_img_size = output_w / num_channels;
int channel_id = out_id_w / out_img_size;
int out_img_idy = (out_id_w % out_img_size) / out_img_w;
int in_img_idy = align_flag
? static_cast<int>(ratio_h * (out_img_idy + 0.5) - 0.5)
: static_cast<int>(ratio_h * out_img_idy);
in_img_idy = (in_img_idy > 0) ? in_img_idy : 0;
int h_id = (in_img_idy < in_img_h - 1) ? 1 : 0;
T src_h = ratio_h * (out_img_idy + 0.5) - 0.5;
src_h = (src_h > 0) ? src_h : 0;
T h1lambda =
align_flag ? src_h - in_img_idy : ratio_h * out_img_idy - in_img_idy;
T h2lambda = 1.f - h1lambda;
int out_img_idx = tid % out_img_w;
int in_img_idx = align_flag
? static_cast<int>(ratio_w * (out_img_idx + 0.5) - 0.5)
: static_cast<int>(ratio_w * out_img_idx);
in_img_idx = (in_img_idx > 0) ? in_img_idx : 0;
int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0;
T src_w = ratio_w * (out_img_idx + 0.5) - 0.5;
src_w = (src_w > 0) ? src_w : 0;
T w1lambda =
align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx;
T w2lambda = 1.f - w1lambda;
const T* in_pos = &in[out_id_h * input_w + channel_id * in_img_size +
in_img_idy * in_img_w + in_img_idx];
// bilinear interpolation
out[out_id_h * output_w + out_id_w] =
h2lambda * (w2lambda * in_pos[0] + w1lambda * in_pos[w_id]) +
h1lambda * (w2lambda * in_pos[h_id * in_img_w] +
w1lambda * in_pos[h_id * in_img_w + w_id]);
}
}
template <typename T>
__global__ void KeBilinearInterpBw(
T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h,
const size_t input_w, const T* out, const size_t out_img_h,
const size_t out_img_w, const size_t output_h, const size_t output_w,
const size_t num_channels, const T ratio_h, const T ratio_w,
const bool align_corners, const int align_mode) {
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
bool align_flag = (align_mode == 0 && !align_corners);
for (; tid < nthreads; tid += stride) {
int out_id_h = tid / output_w;
int out_id_w = tid % output_w;
int in_img_size = input_w / num_channels;
int out_img_size = output_w / num_channels;
int channel_id = out_id_w / out_img_size;
int out_img_idy = (out_id_w % out_img_size) / out_img_w;
int in_img_idy = align_flag ? ratio_h * (out_img_idy + 0.5) - 0.5
: ratio_h * out_img_idy;
in_img_idy = (in_img_idy > 0) ? in_img_idy : 0;
int h_id = (in_img_idy < in_img_h - 1) ? 1 : 0;
T src_h = ratio_h * (out_img_idy + 0.5) - 0.5;
src_h = (src_h > 0) ? src_h : 0;
T h1lambda =
align_flag ? src_h - in_img_idy : ratio_h * out_img_idy - in_img_idy;
T h2lambda = 1.f - h1lambda;
int out_img_idx = tid % out_img_w;
int in_img_idx = align_flag ? ratio_w * (out_img_idx + 0.5) - 0.5
: ratio_w * out_img_idx;
in_img_idx = (in_img_idx > 0) ? in_img_idx : 0;
int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0;
T src_w = ratio_w * (out_img_idx + 0.5) - 0.5;
src_w = (src_w > 0) ? src_w : 0;
T w1lambda =
align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx;
T w2lambda = 1.f - w1lambda;
T* in_pos = &in[out_id_h * input_w + channel_id * in_img_size +
in_img_idy * in_img_w + in_img_idx];
const T* out_pos = &out[out_id_h * output_w + out_id_w];
platform::CudaAtomicAdd(&in_pos[0], h2lambda * w2lambda * out_pos[0]);
platform::CudaAtomicAdd(&in_pos[w_id], h2lambda * w1lambda * out_pos[0]);
platform::CudaAtomicAdd(&in_pos[h_id * in_img_w],
h1lambda * w2lambda * out_pos[0]);
platform::CudaAtomicAdd(&in_pos[h_id * in_img_w + w_id],
h1lambda * w1lambda * out_pos[0]);
}
}
template <typename T>
class InterpolateOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
"This kernel only runs on GPU device.");
auto* input = ctx.Input<Tensor>("X");
auto* output = ctx.Output<Tensor>("Out");
auto* input_data = input->data<T>();
int n = input->dims()[0];
int c = input->dims()[1];
int in_h = input->dims()[2];
int in_w = input->dims()[3];
auto interp_method = ctx.Attr<std::string>("interp_method");
int out_h = ctx.Attr<int>("out_h");
int out_w = ctx.Attr<int>("out_w");
float scale = ctx.Attr<float>("scale");
if (scale > 0) {
out_h = in_h * scale;
out_w = in_w * scale;
}
auto out_size = ctx.Input<Tensor>("OutSize");
if (out_size != nullptr) {
Tensor sizes;
framework::TensorCopy(*out_size, platform::CPUPlace(), &sizes);
auto size_data = sizes.data<int>();
out_h = size_data[0];
out_w = size_data[1];
}
bool align_corners = ctx.Attr<bool>("align_corners");
int align_mode = ctx.Attr<int>("align_mode");
auto* output_data =
output->mutable_data<T>({n, c, out_h, out_w}, ctx.GetPlace());
int in_hw = in_h * in_w;
int out_hw = out_h * out_w;
int in_chw = c * in_hw;
int out_chw = c * out_hw;
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_h > 1) {
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(in_h) / out_h;
}
if (out_w > 1) {
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(in_w) / out_w;
}
if (in_h == out_h && in_w == out_w) {
framework::TensorCopy(*input, ctx.GetPlace(), output);
return;
}
int pixelNum = n * out_chw;
int grid_dim = (pixelNum + 512 - 1) / 512;
grid_dim = grid_dim > 8 ? 8 : grid_dim;
if ("nearest" == interp_method) {
hipLaunchKernelGGL(( KeNearestNeighborInterpFw<
T>), dim3(grid_dim), dim3(512), 0, ctx.cuda_device_context().stream(),
input_data, in_h, in_w, n, in_chw, output_data, out_h, out_w, n,
out_chw, c, ratio_h, ratio_w, align_corners);
} else if ("bilinear" == interp_method) {
hipLaunchKernelGGL(( KeBilinearInterpFw<
T>), dim3(grid_dim), dim3(512), 0, ctx.cuda_device_context().stream(),
input_data, in_h, in_w, n, in_chw, output_data, out_h, out_w, n,
out_chw, c, ratio_h, ratio_w, align_corners, align_mode);
}
}
};
template <typename T>
class InterpolateGradOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* input_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
auto* output_grad = ctx.Input<Tensor>(framework::GradVarName("Out"));
auto* output_grad_data = output_grad->data<T>();
auto* input_grad_data = input_grad->mutable_data<T>(ctx.GetPlace());
auto& device_ctx =
ctx.template device_context<platform::CUDADeviceContext>();
math::SetConstant<platform::CUDADeviceContext, T> zero;
zero(device_ctx, input_grad, static_cast<T>(0.0));
int n = input_grad->dims()[0];
int c = input_grad->dims()[1];
int in_h = input_grad->dims()[2];
int in_w = input_grad->dims()[3];
auto interp_method = ctx.Attr<std::string>("interp_method");
int out_h = ctx.Attr<int>("out_h");
int out_w = ctx.Attr<int>("out_w");
float scale = ctx.Attr<float>("scale");
if (scale > 0) {
out_h = in_h * scale;
out_w = in_w * scale;
}
auto out_size = ctx.Input<Tensor>("OutSize");
if (out_size != nullptr) {
Tensor sizes;
framework::TensorCopy(*out_size, platform::CPUPlace(), &sizes);
auto size_data = sizes.data<int>();
out_h = size_data[0];
out_w = size_data[1];
}
bool align_corners = ctx.Attr<bool>("align_corners");
int align_mode = ctx.Attr<int>("align_mode");
int in_hw = in_h * in_w;
int out_hw = out_h * out_w;
int in_chw = c * in_hw;
int out_chw = c * out_hw;
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_h > 1) {
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(in_h) / out_h;
}
if (out_w > 1) {
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(in_w) / out_w;
}
if (in_h == out_h && in_w == out_w) {
framework::TensorCopy(*output_grad, ctx.GetPlace(), input_grad);
return;
}
int pixelNum = n * out_chw;
int grid_dim = (pixelNum + 512 - 1) / 512;
grid_dim = grid_dim > 8 ? 8 : grid_dim;
if ("nearest" == interp_method) {
hipLaunchKernelGGL(( KeNearestNeighborInterpBw<
T>), dim3(grid_dim), dim3(512), 0, ctx.cuda_device_context().stream(),
input_grad_data, in_h, in_w, n, in_chw, output_grad_data, out_h,
out_w, n, out_chw, c, ratio_h, ratio_w, align_corners);
} else if ("bilinear" == interp_method) {
hipLaunchKernelGGL(( KeBilinearInterpBw<
T>), dim3(grid_dim), dim3(512), 0, ctx.cuda_device_context().stream(),
input_grad_data, in_h, in_w, n, in_chw, output_grad_data, out_h,
out_w, n, out_chw, c, ratio_h, ratio_w, align_corners, align_mode);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(bilinear_interp, ops::InterpolateOpCUDAKernel<float>,
ops::InterpolateOpCUDAKernel<double>,
ops::InterpolateOpCUDAKernel<int>);
REGISTER_OP_CUDA_KERNEL(bilinear_interp_grad,
ops::InterpolateGradOpCUDAKernel<float>,
ops::InterpolateGradOpCUDAKernel<double>);
REGISTER_OP_CUDA_KERNEL(nearest_interp, ops::InterpolateOpCUDAKernel<float>,
ops::InterpolateOpCUDAKernel<double>,
ops::InterpolateOpCUDAKernel<int>);
REGISTER_OP_CUDA_KERNEL(nearest_interp_grad,
ops::InterpolateGradOpCUDAKernel<float>,
ops::InterpolateGradOpCUDAKernel<double>);
| 7cf5946720685e3fa03fc723c745def2b1c9457a.cu | /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <string>
#include "paddle/fluid/operators/interpolate_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle {
namespace operators {
using framework::Tensor;
template <typename T>
__global__ void KeNearestNeighborInterpFw(
const T* in, const size_t in_img_h, const size_t in_img_w,
const size_t input_h, const size_t input_w, T* out, const size_t out_img_h,
const size_t out_img_w, const size_t output_h, const size_t output_w,
const size_t num_channels, const float ratio_h, const float ratio_w,
const bool align_corners) {
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (; tid < nthreads; tid += stride) {
int out_id_h = tid / output_w;
int out_id_w = tid % output_w;
int in_img_size = input_w / num_channels;
int out_img_size = output_w / num_channels;
int channel_id = out_id_w / out_img_size;
int out_img_idy = (out_id_w % out_img_size) / out_img_w;
int in_img_idy = (align_corners)
? static_cast<int>(ratio_h * out_img_idy + 0.5)
: static_cast<int>(ratio_h * out_img_idy);
int out_img_idx = tid % out_img_w;
int in_img_idx = (align_corners)
? static_cast<int>(ratio_w * out_img_idx + 0.5)
: static_cast<int>(ratio_w * out_img_idx);
out[tid] = in[out_id_h * input_w + channel_id * in_img_size +
in_img_idy * in_img_w + in_img_idx];
}
}
template <typename T>
__global__ void KeNearestNeighborInterpBw(
T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h,
const size_t input_w, const T* out, const size_t out_img_h,
const size_t out_img_w, const size_t output_h, const size_t output_w,
const size_t num_channels, const float ratio_h, const float ratio_w,
const bool align_corners) {
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (; tid < nthreads; tid += stride) {
int out_id_h = tid / output_w;
int out_id_w = tid % output_w;
int in_img_size = input_w / num_channels;
int out_img_size = output_w / num_channels;
int channel_id = out_id_w / out_img_size;
int out_img_idy = (out_id_w % out_img_size) / out_img_w;
int in_img_idy = (align_corners)
? static_cast<int>(ratio_h * out_img_idy + 0.5)
: static_cast<int>(ratio_h * out_img_idy);
int out_img_idx = tid % out_img_w;
int in_img_idx = (align_corners)
? static_cast<int>(ratio_w * out_img_idx + 0.5)
: static_cast<int>(ratio_w * out_img_idx);
T* in_pos = &in[out_id_h * input_w + channel_id * in_img_size +
in_img_idy * in_img_w + in_img_idx];
const T out_pos = out[out_id_h * output_w + out_id_w];
platform::CudaAtomicAdd(in_pos, out_pos);
}
}
template <typename T>
__global__ void KeBilinearInterpFw(
const T* in, const size_t in_img_h, const size_t in_img_w,
const size_t input_h, const size_t input_w, T* out, const size_t out_img_h,
const size_t out_img_w, const size_t output_h, const size_t output_w,
const size_t num_channels, const float ratio_h, const float ratio_w,
const bool align_corners, const int align_mode) {
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
bool align_flag = (align_mode == 0 && !align_corners);
for (; tid < nthreads; tid += stride) {
int out_id_h = tid / output_w;
int out_id_w = tid % output_w;
int in_img_size = input_w / num_channels;
int out_img_size = output_w / num_channels;
int channel_id = out_id_w / out_img_size;
int out_img_idy = (out_id_w % out_img_size) / out_img_w;
int in_img_idy = align_flag
? static_cast<int>(ratio_h * (out_img_idy + 0.5) - 0.5)
: static_cast<int>(ratio_h * out_img_idy);
in_img_idy = (in_img_idy > 0) ? in_img_idy : 0;
int h_id = (in_img_idy < in_img_h - 1) ? 1 : 0;
T src_h = ratio_h * (out_img_idy + 0.5) - 0.5;
src_h = (src_h > 0) ? src_h : 0;
T h1lambda =
align_flag ? src_h - in_img_idy : ratio_h * out_img_idy - in_img_idy;
T h2lambda = 1.f - h1lambda;
int out_img_idx = tid % out_img_w;
int in_img_idx = align_flag
? static_cast<int>(ratio_w * (out_img_idx + 0.5) - 0.5)
: static_cast<int>(ratio_w * out_img_idx);
in_img_idx = (in_img_idx > 0) ? in_img_idx : 0;
int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0;
T src_w = ratio_w * (out_img_idx + 0.5) - 0.5;
src_w = (src_w > 0) ? src_w : 0;
T w1lambda =
align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx;
T w2lambda = 1.f - w1lambda;
const T* in_pos = &in[out_id_h * input_w + channel_id * in_img_size +
in_img_idy * in_img_w + in_img_idx];
// bilinear interpolation
out[out_id_h * output_w + out_id_w] =
h2lambda * (w2lambda * in_pos[0] + w1lambda * in_pos[w_id]) +
h1lambda * (w2lambda * in_pos[h_id * in_img_w] +
w1lambda * in_pos[h_id * in_img_w + w_id]);
}
}
template <typename T>
__global__ void KeBilinearInterpBw(
T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h,
const size_t input_w, const T* out, const size_t out_img_h,
const size_t out_img_w, const size_t output_h, const size_t output_w,
const size_t num_channels, const T ratio_h, const T ratio_w,
const bool align_corners, const int align_mode) {
int nthreads = output_h * output_w;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
bool align_flag = (align_mode == 0 && !align_corners);
for (; tid < nthreads; tid += stride) {
int out_id_h = tid / output_w;
int out_id_w = tid % output_w;
int in_img_size = input_w / num_channels;
int out_img_size = output_w / num_channels;
int channel_id = out_id_w / out_img_size;
int out_img_idy = (out_id_w % out_img_size) / out_img_w;
int in_img_idy = align_flag ? ratio_h * (out_img_idy + 0.5) - 0.5
: ratio_h * out_img_idy;
in_img_idy = (in_img_idy > 0) ? in_img_idy : 0;
int h_id = (in_img_idy < in_img_h - 1) ? 1 : 0;
T src_h = ratio_h * (out_img_idy + 0.5) - 0.5;
src_h = (src_h > 0) ? src_h : 0;
T h1lambda =
align_flag ? src_h - in_img_idy : ratio_h * out_img_idy - in_img_idy;
T h2lambda = 1.f - h1lambda;
int out_img_idx = tid % out_img_w;
int in_img_idx = align_flag ? ratio_w * (out_img_idx + 0.5) - 0.5
: ratio_w * out_img_idx;
in_img_idx = (in_img_idx > 0) ? in_img_idx : 0;
int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0;
T src_w = ratio_w * (out_img_idx + 0.5) - 0.5;
src_w = (src_w > 0) ? src_w : 0;
T w1lambda =
align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx;
T w2lambda = 1.f - w1lambda;
T* in_pos = &in[out_id_h * input_w + channel_id * in_img_size +
in_img_idy * in_img_w + in_img_idx];
const T* out_pos = &out[out_id_h * output_w + out_id_w];
platform::CudaAtomicAdd(&in_pos[0], h2lambda * w2lambda * out_pos[0]);
platform::CudaAtomicAdd(&in_pos[w_id], h2lambda * w1lambda * out_pos[0]);
platform::CudaAtomicAdd(&in_pos[h_id * in_img_w],
h1lambda * w2lambda * out_pos[0]);
platform::CudaAtomicAdd(&in_pos[h_id * in_img_w + w_id],
h1lambda * w1lambda * out_pos[0]);
}
}
template <typename T>
class InterpolateOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
"This kernel only runs on GPU device.");
auto* input = ctx.Input<Tensor>("X");
auto* output = ctx.Output<Tensor>("Out");
auto* input_data = input->data<T>();
int n = input->dims()[0];
int c = input->dims()[1];
int in_h = input->dims()[2];
int in_w = input->dims()[3];
auto interp_method = ctx.Attr<std::string>("interp_method");
int out_h = ctx.Attr<int>("out_h");
int out_w = ctx.Attr<int>("out_w");
float scale = ctx.Attr<float>("scale");
if (scale > 0) {
out_h = in_h * scale;
out_w = in_w * scale;
}
auto out_size = ctx.Input<Tensor>("OutSize");
if (out_size != nullptr) {
Tensor sizes;
framework::TensorCopy(*out_size, platform::CPUPlace(), &sizes);
auto size_data = sizes.data<int>();
out_h = size_data[0];
out_w = size_data[1];
}
bool align_corners = ctx.Attr<bool>("align_corners");
int align_mode = ctx.Attr<int>("align_mode");
auto* output_data =
output->mutable_data<T>({n, c, out_h, out_w}, ctx.GetPlace());
int in_hw = in_h * in_w;
int out_hw = out_h * out_w;
int in_chw = c * in_hw;
int out_chw = c * out_hw;
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_h > 1) {
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(in_h) / out_h;
}
if (out_w > 1) {
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(in_w) / out_w;
}
if (in_h == out_h && in_w == out_w) {
framework::TensorCopy(*input, ctx.GetPlace(), output);
return;
}
int pixelNum = n * out_chw;
int grid_dim = (pixelNum + 512 - 1) / 512;
grid_dim = grid_dim > 8 ? 8 : grid_dim;
if ("nearest" == interp_method) {
KeNearestNeighborInterpFw<
T><<<grid_dim, 512, 0, ctx.cuda_device_context().stream()>>>(
input_data, in_h, in_w, n, in_chw, output_data, out_h, out_w, n,
out_chw, c, ratio_h, ratio_w, align_corners);
} else if ("bilinear" == interp_method) {
KeBilinearInterpFw<
T><<<grid_dim, 512, 0, ctx.cuda_device_context().stream()>>>(
input_data, in_h, in_w, n, in_chw, output_data, out_h, out_w, n,
out_chw, c, ratio_h, ratio_w, align_corners, align_mode);
}
}
};
template <typename T>
class InterpolateGradOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* input_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
auto* output_grad = ctx.Input<Tensor>(framework::GradVarName("Out"));
auto* output_grad_data = output_grad->data<T>();
auto* input_grad_data = input_grad->mutable_data<T>(ctx.GetPlace());
auto& device_ctx =
ctx.template device_context<platform::CUDADeviceContext>();
math::SetConstant<platform::CUDADeviceContext, T> zero;
zero(device_ctx, input_grad, static_cast<T>(0.0));
int n = input_grad->dims()[0];
int c = input_grad->dims()[1];
int in_h = input_grad->dims()[2];
int in_w = input_grad->dims()[3];
auto interp_method = ctx.Attr<std::string>("interp_method");
int out_h = ctx.Attr<int>("out_h");
int out_w = ctx.Attr<int>("out_w");
float scale = ctx.Attr<float>("scale");
if (scale > 0) {
out_h = in_h * scale;
out_w = in_w * scale;
}
auto out_size = ctx.Input<Tensor>("OutSize");
if (out_size != nullptr) {
Tensor sizes;
framework::TensorCopy(*out_size, platform::CPUPlace(), &sizes);
auto size_data = sizes.data<int>();
out_h = size_data[0];
out_w = size_data[1];
}
bool align_corners = ctx.Attr<bool>("align_corners");
int align_mode = ctx.Attr<int>("align_mode");
int in_hw = in_h * in_w;
int out_hw = out_h * out_w;
int in_chw = c * in_hw;
int out_chw = c * out_hw;
float ratio_h = 0.f;
float ratio_w = 0.f;
if (out_h > 1) {
ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1)
: static_cast<float>(in_h) / out_h;
}
if (out_w > 1) {
ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1)
: static_cast<float>(in_w) / out_w;
}
if (in_h == out_h && in_w == out_w) {
framework::TensorCopy(*output_grad, ctx.GetPlace(), input_grad);
return;
}
int pixelNum = n * out_chw;
int grid_dim = (pixelNum + 512 - 1) / 512;
grid_dim = grid_dim > 8 ? 8 : grid_dim;
if ("nearest" == interp_method) {
KeNearestNeighborInterpBw<
T><<<grid_dim, 512, 0, ctx.cuda_device_context().stream()>>>(
input_grad_data, in_h, in_w, n, in_chw, output_grad_data, out_h,
out_w, n, out_chw, c, ratio_h, ratio_w, align_corners);
} else if ("bilinear" == interp_method) {
KeBilinearInterpBw<
T><<<grid_dim, 512, 0, ctx.cuda_device_context().stream()>>>(
input_grad_data, in_h, in_w, n, in_chw, output_grad_data, out_h,
out_w, n, out_chw, c, ratio_h, ratio_w, align_corners, align_mode);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(bilinear_interp, ops::InterpolateOpCUDAKernel<float>,
ops::InterpolateOpCUDAKernel<double>,
ops::InterpolateOpCUDAKernel<int>);
REGISTER_OP_CUDA_KERNEL(bilinear_interp_grad,
ops::InterpolateGradOpCUDAKernel<float>,
ops::InterpolateGradOpCUDAKernel<double>);
REGISTER_OP_CUDA_KERNEL(nearest_interp, ops::InterpolateOpCUDAKernel<float>,
ops::InterpolateOpCUDAKernel<double>,
ops::InterpolateOpCUDAKernel<int>);
REGISTER_OP_CUDA_KERNEL(nearest_interp_grad,
ops::InterpolateGradOpCUDAKernel<float>,
ops::InterpolateGradOpCUDAKernel<double>);
|
94d67ed5778da973c94693f30b20e6593346d0ed.hip | // !!! This is a file automatically generated by hipify!!!
#pragma once
#include "mb.cuh"
#include <opencv2/opencv.hpp>
#include <opencv2/cudev.hpp>
#include <hip/hip_runtime.h>
//for __syncthreads()
#ifndef __HIPCC__
#define __HIPCC__
#endif
#ifndef __CUDACC_RTC__
#define __CUDACC_RTC__
#endif //!(__CUDACC_RTC__)
#include <device_launch_parameters.h>
#include "mb_param.h"
__global__ void myKernel(cv::cudev::PtrStepSz<uchar3> dst) {
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if ((x < dst.cols) && (y < dst.rows)) {
//
double oy = (double)(dst.rows) / 2.0;
double ox = (double)(dst.cols) / 3.0;
//
double nx = ((double)x - 2 * ox) / ox;
double ny = ((double)y - oy) / oy;
//
double zx = 0;
double zy = 0;
//
double convergenceDecision = true;
//
double tx = 0;
double ty = 0;
//
int n = 0;
for (int i = 0; i <= INF; i++) {
double a = sqrt(zx * zx + zy * zy);
if (a >= LIMIT) {
convergenceDecision = false;
break;
}
tx = zx * zx - zy * zy + nx;
ty = 2 * zx * zy + ny;
zx = tx;
zy = ty;
n = i;
}
//
int r = (n % (INF / 11)) * 20;
int g = (n % (INF / 15)) * 15;
int b = (n % (INF / 19)) * 12;
if (convergenceDecision) {
//
} else {
dst.ptr(y)[x] = make_uchar3(b, g, r);
}
}
//__syncthreads();
}
void createMB(cv::cuda::GpuMat &mat) {
const dim3 block(32, 8);
const dim3 grid(cv::cudev::divUp(mat.cols, block.x), cv::cudev::divUp(mat.rows, block.y));
// CUDA
myKernel << <grid, block >> > (mat);
CV_CUDEV_SAFE_CALL(hipGetLastError());
CV_CUDEV_SAFE_CALL(hipDeviceSynchronize());
} | 94d67ed5778da973c94693f30b20e6593346d0ed.cu | #pragma once
#include "mb.cuh"
#include <opencv2/opencv.hpp>
#include <opencv2/cudev.hpp>
#include <cuda_runtime.h>
//for __syncthreads()
#ifndef __CUDACC__
#define __CUDACC__
#endif
#ifndef __CUDACC_RTC__
#define __CUDACC_RTC__
#endif //!(__CUDACC_RTC__)
#include <device_launch_parameters.h>
#include "mb_param.h"
__global__ void myKernel(cv::cudev::PtrStepSz<uchar3> dst) {
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if ((x < dst.cols) && (y < dst.rows)) {
//原点座標
double oy = (double)(dst.rows) / 2.0;
double ox = (double)(dst.cols) / 3.0;
//基準となる複素数
double nx = ((double)x - 2 * ox) / ox;
double ny = ((double)y - oy) / oy;
//数列計算結果の複素数
double zx = 0;
double zy = 0;
//収束したか
double convergenceDecision = true;
//計算用テンプレート
double tx = 0;
double ty = 0;
//発散速度(大きいほど早く発散)
int n = 0;
for (int i = 0; i <= INF; i++) {
double a = sqrt(zx * zx + zy * zy);
if (a >= LIMIT) {
convergenceDecision = false;
break;
}
tx = zx * zx - zy * zy + nx;
ty = 2 * zx * zy + ny;
zx = tx;
zy = ty;
n = i;
}
//発散速度に応じて色を変える
int r = (n % (INF / 11)) * 20;
int g = (n % (INF / 15)) * 15;
int b = (n % (INF / 19)) * 12;
if (convergenceDecision) {
//収束したら何もしない(色が黒)
} else {
dst.ptr(y)[x] = make_uchar3(b, g, r);
}
}
//__syncthreads();
}
void createMB(cv::cuda::GpuMat &mat) {
const dim3 block(32, 8);
const dim3 grid(cv::cudev::divUp(mat.cols, block.x), cv::cudev::divUp(mat.rows, block.y));
// 自作CUDAカーネルを呼び出す
myKernel << <grid, block >> > (mat);
CV_CUDEV_SAFE_CALL(cudaGetLastError());
CV_CUDEV_SAFE_CALL(cudaDeviceSynchronize());
} |
0c211bd3344f50d065421feb46620fb3e3737c05.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/Atomic.cuh>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/NumericLimits.cuh>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/hip/detail/TensorInfo.cuh>
#include <ATen/hip/detail/KernelUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/NumericUtils.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <c10/util/Exception.h>
#include <algorithm>
#include <cfloat>
#include <cmath>
namespace at {
namespace native {
using namespace at::cuda::detail;
namespace {
template <typename scalar_t, typename accscalar_t>
__device__ inline int64_t get_intervals(
accscalar_t sample,
int64_t index,
int64_t inputSize,
int64_t outputSize,
int64_t poolSize) {
accscalar_t alpha = static_cast<accscalar_t>(inputSize - poolSize) /
static_cast<accscalar_t>(outputSize - 1);
if (index == outputSize - 1) {
return inputSize - poolSize;
} else {
return static_cast<int64_t>((index + sample) * alpha) - \
static_cast<int64_t>(sample * alpha);
}
}
template <typename scalar_t>
__global__ void fractional_max_pool3d_out_frame(
PackedTensorAccessor64<scalar_t, 5> input,
PackedTensorAccessor64<scalar_t, 5> output,
PackedTensorAccessor64<int64_t, 5> indices,
PackedTensorAccessor64<scalar_t, 3> samples,
int64_t poolSizeT, int64_t poolSizeH, int64_t poolSizeW) {
using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>;
// Output (t, h, w) point that this thread is responsible for
int64_t ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x;
int64_t plane = blockIdx.y;
int64_t batch = blockIdx.z;
// Each thread generates a specific output point
if (ourOutputPoint < output.size(2) * output.size(3) *
output.size(4)){
int64_t outputT = ourOutputPoint / (output.size(3) *
output.size(4));
int64_t outputH = (ourOutputPoint / output.size(4)) %
output.size(3);
int64_t outputW = ourOutputPoint % output.size(4);
int64_t poolT = get_intervals<scalar_t,accscalar_t>(
static_cast<accscalar_t>(samples[batch][plane][0]),
outputT, input.size(2), output.size(2), poolSizeT);
int64_t poolH = get_intervals<scalar_t, accscalar_t>(
static_cast<accscalar_t>(samples[batch][plane][1]),
outputH, input.size(3), output.size(3), poolSizeH);
int64_t poolW = get_intervals<scalar_t, accscalar_t>(
static_cast<accscalar_t>(samples[batch][plane][2]),
outputW, input.size(4), output.size(4), poolSizeW);
scalar_t maxVal = at::numeric_limits<scalar_t>::lower_bound();
int64_t maxIndex = poolT * input.size(3) * input.size(4) + poolH * input.size(4) + poolW;
for(int64_t t = poolT; t < poolT + poolSizeT; ++ t) {
for (int64_t h = poolH; h < poolH + poolSizeH; ++h) {
if(poolSizeW < 2 || poolSizeW > 7) {
for (int64_t w = poolW; w < poolW + poolSizeW; ++w) {
scalar_t val = input[batch][plane][t][h][w];
// for consistency with THNN, favor the first max
if (val > maxVal || at::_isnan(val)) {
maxIndex = t * input.size(3) *
input.size(4) + h * input.size(4) + w;
maxVal = val;
}
}
} else {
for (int64_t i = 0; i < poolSizeW; ++i) {
int64_t w = i + poolW;
scalar_t val = input[batch][plane][t][h][w];
// for consistency with THNN, favor the first max
if (val > maxVal || at::_isnan(val)) {
maxIndex = t * input.size(3) * input.size(4) +
h * input.size(4) + w;
maxVal = val;
}
}
}
}
}
indices[batch][plane][outputT][outputH][outputW] = maxIndex;
output[batch][plane][outputT][outputH][outputW] = maxVal;
}
}
template <typename scalar_t>
__global__ void fractional_max_pool3d_backward_out_frame(
PackedTensorAccessor64<scalar_t, 5> gradInput,
PackedTensorAccessor64<scalar_t, 5> gradOutput,
PackedTensorAccessor64<int64_t, 5> indices) {
// Output (h, w) point that this thread is responsible for
int64_t ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x;
int64_t plane = blockIdx.y;
int64_t batch = blockIdx.z;
// Each thread generates a specific output point
if (ourOutputPoint < gradOutput.size(2) *
gradOutput.size(3) * gradOutput.size(4)) {
int64_t outputW = ourOutputPoint % gradOutput.size(4);
int64_t outputH = (ourOutputPoint / gradOutput.size(4)) %
gradOutput.size(3);
int64_t outputT = ourOutputPoint / (gradOutput.size(3) *
gradOutput.size(4));
int64_t index = indices[batch][plane][outputT][outputH][outputW];
assert(index >= 0);
int64_t inputW = index % gradInput.size(4);
int64_t inputH = (index / gradInput.size(4)) %
gradInput.size(3);
int64_t inputT = index / (gradInput.size(3) *
gradInput.size(4));
assert(inputT < gradInput.size(2));
gpuAtomicAddNoReturn(
&gradInput[batch][plane][inputT][inputH][inputW],
gradOutput[batch][plane][outputT][outputH][outputW]
);
}
}
void fractional_max_pool3d_backward_out_cuda_template(
Tensor& gradInput,
const Tensor& gradOutput,
const Tensor& input,
IntArrayRef output_size,
const Tensor& indices) {
int64_t dimt = 1;
int64_t dimh = 2;
int64_t dimw = 3;
int64_t outputT = output_size[0];
int64_t outputH = output_size[1];
int64_t outputW = output_size[2];
int64_t ndims = input.ndimension();
if (ndims == 5) {
dimt++;
dimh++;
dimw++;
}
/* sizes */
int64_t inputT = input.size(dimt);
int64_t inputH = input.size(dimh);
int64_t inputW = input.size(dimw);
TORCH_CHECK(
outputT == gradOutput.size(dimt),
"fractional_max_pool3d_backward_out_cuda_template(): ",
"gradOutput time unexpected"
);
TORCH_CHECK(
outputH == gradOutput.size(dimh),
"fractional_max_pool3d_backward_out_cuda_template(): ",
"gradOutput height unexpected"
);
TORCH_CHECK(
outputW == gradOutput.size(dimw),
"fractional_max_pool3d_backward_out_cuda_template(): ",
"gradOutput width unexpected"
);
/* resize */
gradInput.resize_as_(input);
gradInput.zero_();
auto gradInput_ = gradInput;
auto gradOutput_ = gradOutput;
auto indices_ = indices;
if(ndims == 4) {
gradInput_ = gradInput_.reshape({1, gradInput.size(0), inputT,
inputH, inputW});
gradOutput_ = gradOutput_.reshape({1, gradOutput.size(0), outputT,
outputH, outputW});
indices_ = indices_.reshape({1, indices.size(0), outputT, outputH,
outputW});
}
if (gradInput.numel() == 0) {
return;
}
/* backprop */
// block is limited to 4 warps
// grid handles overflow per each plane
int64_t outputPlaneSize = gradOutput_.size(2) *
gradOutput_.size(3) * gradOutput_.size(4);
dim3 grid(
(outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128)
gradInput_.size(1),
gradInput_.size(0));
dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
gradOutput.scalar_type(),
"fractional_max_pool3d_backward_out_frame",
[&] {
hipLaunchKernelGGL(( fractional_max_pool3d_backward_out_frame<scalar_t>)
, dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
gradInput_.packed_accessor64<scalar_t, 5>(),
gradOutput_.packed_accessor64<scalar_t, 5>(),
indices_.packed_accessor64<int64_t, 5>()
);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
);
}
}// namespace
TORCH_IMPL_FUNC(fractional_max_pool3d_out_cuda) (
const Tensor& input,
int64_t poolSizeT,
int64_t poolSizeH,
int64_t poolSizeW,
int64_t outputT,
int64_t outputH,
int64_t outputW,
const Tensor& randomSamples,
const Tensor& output,
const Tensor& indices
) {
int64_t planeDim = 0;
int64_t dimt = 1;
int64_t dimh = 2;
int64_t dimw = 3;
int64_t ndims = input.ndimension();
if (ndims == 5) {
planeDim++;
dimt++;
dimh++;
dimw++;
}
/* sizes */
int64_t numPlanes = input.size(planeDim);
int64_t inputT = input.size(dimt);
int64_t inputH = input.size(dimh);
int64_t inputW = input.size(dimw);
auto output_ = output;
auto indices_ = indices;
auto input_ = input;
if(ndims == 4) {
output_ = output_.reshape({1, numPlanes, outputT, outputH, outputW});
indices_ = indices_.reshape({1, numPlanes, outputT, outputH, outputW});
input_ = input_.reshape({1, numPlanes, inputT, inputH, inputW});
}
if (output_.numel() == 0) {
return;
}
// block is limited to 4 warps
// grid handles overflow per each plane
int64_t outputPlaneSize = output_.size(2) *
output_.size(3) * output_.size(4);
dim3 grid(
(outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128)
input_.size(1),
input_.size(0));
dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(),
"fractional_max_pool3d_out_frame",
[&]{
hipLaunchKernelGGL(( fractional_max_pool3d_out_frame<scalar_t>)
, dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
input_.packed_accessor64<scalar_t, 5>(),
output_.packed_accessor64<scalar_t, 5>(),
indices_.packed_accessor64<int64_t, 5>(),
randomSamples.packed_accessor64<scalar_t, 3>(),
poolSizeT, poolSizeH, poolSizeW
);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
);
}
Tensor& fractional_max_pool3d_backward_out_cuda(const at::Tensor& gradOutput_,
const at::Tensor& input,
IntArrayRef /*pool_size*/,
IntArrayRef output_size,
const at::Tensor& indices,
at::Tensor& gradInput) {
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("fractional_max_pool3d_backward_out_cuda");
fractional_max_pool3d_backward_out_cuda_template(
gradInput,
gradOutput_,
input,
output_size,
indices
);
return gradInput;
}
Tensor fractional_max_pool3d_backward_cuda(
const at::Tensor& gradOutput,
const at::Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const at::Tensor& indices) {
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("fractional_max_pool3d_backward_cuda");
Tensor gradInput = at::empty({0}, input.options());
fractional_max_pool3d_backward_out_cuda_template(
gradInput,
gradOutput,
input,
output_size,
indices
);
return gradInput;
}
}// native
}// at
| 0c211bd3344f50d065421feb46620fb3e3737c05.cu | #include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/Atomic.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/NumericLimits.cuh>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/cuda/detail/TensorInfo.cuh>
#include <ATen/cuda/detail/KernelUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/NumericUtils.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <c10/util/Exception.h>
#include <algorithm>
#include <cfloat>
#include <cmath>
namespace at {
namespace native {
using namespace at::cuda::detail;
namespace {
template <typename scalar_t, typename accscalar_t>
__device__ inline int64_t get_intervals(
accscalar_t sample,
int64_t index,
int64_t inputSize,
int64_t outputSize,
int64_t poolSize) {
accscalar_t alpha = static_cast<accscalar_t>(inputSize - poolSize) /
static_cast<accscalar_t>(outputSize - 1);
if (index == outputSize - 1) {
return inputSize - poolSize;
} else {
return static_cast<int64_t>((index + sample) * alpha) - \
static_cast<int64_t>(sample * alpha);
}
}
template <typename scalar_t>
__global__ void fractional_max_pool3d_out_frame(
PackedTensorAccessor64<scalar_t, 5> input,
PackedTensorAccessor64<scalar_t, 5> output,
PackedTensorAccessor64<int64_t, 5> indices,
PackedTensorAccessor64<scalar_t, 3> samples,
int64_t poolSizeT, int64_t poolSizeH, int64_t poolSizeW) {
using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>;
// Output (t, h, w) point that this thread is responsible for
int64_t ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x;
int64_t plane = blockIdx.y;
int64_t batch = blockIdx.z;
// Each thread generates a specific output point
if (ourOutputPoint < output.size(2) * output.size(3) *
output.size(4)){
int64_t outputT = ourOutputPoint / (output.size(3) *
output.size(4));
int64_t outputH = (ourOutputPoint / output.size(4)) %
output.size(3);
int64_t outputW = ourOutputPoint % output.size(4);
int64_t poolT = get_intervals<scalar_t,accscalar_t>(
static_cast<accscalar_t>(samples[batch][plane][0]),
outputT, input.size(2), output.size(2), poolSizeT);
int64_t poolH = get_intervals<scalar_t, accscalar_t>(
static_cast<accscalar_t>(samples[batch][plane][1]),
outputH, input.size(3), output.size(3), poolSizeH);
int64_t poolW = get_intervals<scalar_t, accscalar_t>(
static_cast<accscalar_t>(samples[batch][plane][2]),
outputW, input.size(4), output.size(4), poolSizeW);
scalar_t maxVal = at::numeric_limits<scalar_t>::lower_bound();
int64_t maxIndex = poolT * input.size(3) * input.size(4) + poolH * input.size(4) + poolW;
for(int64_t t = poolT; t < poolT + poolSizeT; ++ t) {
for (int64_t h = poolH; h < poolH + poolSizeH; ++h) {
if(poolSizeW < 2 || poolSizeW > 7) {
for (int64_t w = poolW; w < poolW + poolSizeW; ++w) {
scalar_t val = input[batch][plane][t][h][w];
// for consistency with THNN, favor the first max
if (val > maxVal || at::_isnan(val)) {
maxIndex = t * input.size(3) *
input.size(4) + h * input.size(4) + w;
maxVal = val;
}
}
} else {
for (int64_t i = 0; i < poolSizeW; ++i) {
int64_t w = i + poolW;
scalar_t val = input[batch][plane][t][h][w];
// for consistency with THNN, favor the first max
if (val > maxVal || at::_isnan(val)) {
maxIndex = t * input.size(3) * input.size(4) +
h * input.size(4) + w;
maxVal = val;
}
}
}
}
}
indices[batch][plane][outputT][outputH][outputW] = maxIndex;
output[batch][plane][outputT][outputH][outputW] = maxVal;
}
}
template <typename scalar_t>
__global__ void fractional_max_pool3d_backward_out_frame(
PackedTensorAccessor64<scalar_t, 5> gradInput,
PackedTensorAccessor64<scalar_t, 5> gradOutput,
PackedTensorAccessor64<int64_t, 5> indices) {
// Output (h, w) point that this thread is responsible for
int64_t ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x;
int64_t plane = blockIdx.y;
int64_t batch = blockIdx.z;
// Each thread generates a specific output point
if (ourOutputPoint < gradOutput.size(2) *
gradOutput.size(3) * gradOutput.size(4)) {
int64_t outputW = ourOutputPoint % gradOutput.size(4);
int64_t outputH = (ourOutputPoint / gradOutput.size(4)) %
gradOutput.size(3);
int64_t outputT = ourOutputPoint / (gradOutput.size(3) *
gradOutput.size(4));
int64_t index = indices[batch][plane][outputT][outputH][outputW];
assert(index >= 0);
int64_t inputW = index % gradInput.size(4);
int64_t inputH = (index / gradInput.size(4)) %
gradInput.size(3);
int64_t inputT = index / (gradInput.size(3) *
gradInput.size(4));
assert(inputT < gradInput.size(2));
gpuAtomicAddNoReturn(
&gradInput[batch][plane][inputT][inputH][inputW],
gradOutput[batch][plane][outputT][outputH][outputW]
);
}
}
void fractional_max_pool3d_backward_out_cuda_template(
Tensor& gradInput,
const Tensor& gradOutput,
const Tensor& input,
IntArrayRef output_size,
const Tensor& indices) {
int64_t dimt = 1;
int64_t dimh = 2;
int64_t dimw = 3;
int64_t outputT = output_size[0];
int64_t outputH = output_size[1];
int64_t outputW = output_size[2];
int64_t ndims = input.ndimension();
if (ndims == 5) {
dimt++;
dimh++;
dimw++;
}
/* sizes */
int64_t inputT = input.size(dimt);
int64_t inputH = input.size(dimh);
int64_t inputW = input.size(dimw);
TORCH_CHECK(
outputT == gradOutput.size(dimt),
"fractional_max_pool3d_backward_out_cuda_template(): ",
"gradOutput time unexpected"
);
TORCH_CHECK(
outputH == gradOutput.size(dimh),
"fractional_max_pool3d_backward_out_cuda_template(): ",
"gradOutput height unexpected"
);
TORCH_CHECK(
outputW == gradOutput.size(dimw),
"fractional_max_pool3d_backward_out_cuda_template(): ",
"gradOutput width unexpected"
);
/* resize */
gradInput.resize_as_(input);
gradInput.zero_();
auto gradInput_ = gradInput;
auto gradOutput_ = gradOutput;
auto indices_ = indices;
if(ndims == 4) {
gradInput_ = gradInput_.reshape({1, gradInput.size(0), inputT,
inputH, inputW});
gradOutput_ = gradOutput_.reshape({1, gradOutput.size(0), outputT,
outputH, outputW});
indices_ = indices_.reshape({1, indices.size(0), outputT, outputH,
outputW});
}
if (gradInput.numel() == 0) {
return;
}
/* backprop */
// block is limited to 4 warps
// grid handles overflow per each plane
int64_t outputPlaneSize = gradOutput_.size(2) *
gradOutput_.size(3) * gradOutput_.size(4);
dim3 grid(
(outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128)
gradInput_.size(1),
gradInput_.size(0));
dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
gradOutput.scalar_type(),
"fractional_max_pool3d_backward_out_frame",
[&] {
fractional_max_pool3d_backward_out_frame<scalar_t>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
gradInput_.packed_accessor64<scalar_t, 5>(),
gradOutput_.packed_accessor64<scalar_t, 5>(),
indices_.packed_accessor64<int64_t, 5>()
);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
);
}
}// namespace
TORCH_IMPL_FUNC(fractional_max_pool3d_out_cuda) (
const Tensor& input,
int64_t poolSizeT,
int64_t poolSizeH,
int64_t poolSizeW,
int64_t outputT,
int64_t outputH,
int64_t outputW,
const Tensor& randomSamples,
const Tensor& output,
const Tensor& indices
) {
int64_t planeDim = 0;
int64_t dimt = 1;
int64_t dimh = 2;
int64_t dimw = 3;
int64_t ndims = input.ndimension();
if (ndims == 5) {
planeDim++;
dimt++;
dimh++;
dimw++;
}
/* sizes */
int64_t numPlanes = input.size(planeDim);
int64_t inputT = input.size(dimt);
int64_t inputH = input.size(dimh);
int64_t inputW = input.size(dimw);
auto output_ = output;
auto indices_ = indices;
auto input_ = input;
if(ndims == 4) {
output_ = output_.reshape({1, numPlanes, outputT, outputH, outputW});
indices_ = indices_.reshape({1, numPlanes, outputT, outputH, outputW});
input_ = input_.reshape({1, numPlanes, inputT, inputH, inputW});
}
if (output_.numel() == 0) {
return;
}
// block is limited to 4 warps
// grid handles overflow per each plane
int64_t outputPlaneSize = output_.size(2) *
output_.size(3) * output_.size(4);
dim3 grid(
(outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128)
input_.size(1),
input_.size(0));
dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(),
"fractional_max_pool3d_out_frame",
[&]{
fractional_max_pool3d_out_frame<scalar_t>
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
input_.packed_accessor64<scalar_t, 5>(),
output_.packed_accessor64<scalar_t, 5>(),
indices_.packed_accessor64<int64_t, 5>(),
randomSamples.packed_accessor64<scalar_t, 3>(),
poolSizeT, poolSizeH, poolSizeW
);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
);
}
Tensor& fractional_max_pool3d_backward_out_cuda(const at::Tensor& gradOutput_,
const at::Tensor& input,
IntArrayRef /*pool_size*/,
IntArrayRef output_size,
const at::Tensor& indices,
at::Tensor& gradInput) {
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("fractional_max_pool3d_backward_out_cuda");
fractional_max_pool3d_backward_out_cuda_template(
gradInput,
gradOutput_,
input,
output_size,
indices
);
return gradInput;
}
Tensor fractional_max_pool3d_backward_cuda(
const at::Tensor& gradOutput,
const at::Tensor& input,
IntArrayRef pool_size,
IntArrayRef output_size,
const at::Tensor& indices) {
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("fractional_max_pool3d_backward_cuda");
Tensor gradInput = at::empty({0}, input.options());
fractional_max_pool3d_backward_out_cuda_template(
gradInput,
gradOutput,
input,
output_size,
indices
);
return gradInput;
}
}// native
}// at
|
bed3a731102b8875ccb9f3415bd2b163387d14d8.hip | // !!! This is a file automatically generated by hipify!!!
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 1993-2011, NVIDIA Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "row_filter.h"
namespace filter
{
template void linearRow<int, float>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, hipStream_t stream);
}
#endif /* CUDA_DISABLER */
| bed3a731102b8875ccb9f3415bd2b163387d14d8.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 1993-2011, NVIDIA Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "row_filter.h"
namespace filter
{
template void linearRow<int, float>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, cudaStream_t stream);
}
#endif /* CUDA_DISABLER */
|
3b2068eb8075091da431291ee290efd9b03dcdd8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "activate_op.hpp"
namespace Shadow {
namespace Vision {
#if defined(USE_ROCM)
template <typename T>
__device__ float ActivateValue(T x, int type, float slope) {
// PRelu: 0, Relu: 1, Leaky: 2, Sigmoid: 3, SoftPlus: 4, Tanh: 5
switch (type) {
case 1:
return x * (x > 0);
case 2:
return x > 0 ? x : T(slope * x);
case 3:
return 1 / (1 + expf(-x));
case 4:
return logf(1 + expf(x));
case 5: {
T exp_2x = expf(2 * x);
return (exp_2x - 1) / (exp_2x + 1);
}
default:
return x;
}
}
template <typename T>
__global__ void KernelActivate(T *data, int count, int type, float slope) {
CUDA_KERNEL_LOOP(globalid, count) {
data[globalid] = ActivateValue(data[globalid], type, slope);
}
}
template <typename T>
void Activate(T *data, int count, int type, float slope) {
hipLaunchKernelGGL(( KernelActivate<T>), dim3(GetBlocks(count)), dim3(NumThreads), 0, 0, data, count, type, slope);
CUDA_CHECK(hipPeekAtLastError());
}
template <typename T>
__global__ void KernelPRelu(T *data, int count, int channels, int dim,
int div_factor, const T *slope_data) {
CUDA_KERNEL_LOOP(globalid, count) {
int c = (globalid / dim) % channels / div_factor;
T value = data[globalid];
data[globalid] = value > 0 ? value : value * slope_data[c];
}
}
template <typename T>
void PRelu(T *data, const VecInt &in_shape, bool channel_shared,
const T *slope_data) {
int channels = in_shape[1], dim = 1;
for (int i = 2; i < in_shape.size(); ++i) dim *= in_shape[i];
int count = in_shape[0] * channels * dim;
int div_factor = channel_shared ? channels : 1;
hipLaunchKernelGGL(( KernelPRelu<T>), dim3(GetBlocks(count)), dim3(NumThreads), 0, 0, data, count, channels, dim,
div_factor, slope_data);
CUDA_CHECK(hipPeekAtLastError());
}
template void Activate(float *data, int count, int type, float slope);
template void PRelu(float *data, const VecInt &in_shape, bool channel_shared,
const float *slope_data);
#endif
} // namespace Vision
} // namespace Shadow | 3b2068eb8075091da431291ee290efd9b03dcdd8.cu | #include "activate_op.hpp"
namespace Shadow {
namespace Vision {
#if defined(USE_CUDA)
template <typename T>
__device__ float ActivateValue(T x, int type, float slope) {
// PRelu: 0, Relu: 1, Leaky: 2, Sigmoid: 3, SoftPlus: 4, Tanh: 5
switch (type) {
case 1:
return x * (x > 0);
case 2:
return x > 0 ? x : T(slope * x);
case 3:
return 1 / (1 + expf(-x));
case 4:
return logf(1 + expf(x));
case 5: {
T exp_2x = expf(2 * x);
return (exp_2x - 1) / (exp_2x + 1);
}
default:
return x;
}
}
template <typename T>
__global__ void KernelActivate(T *data, int count, int type, float slope) {
CUDA_KERNEL_LOOP(globalid, count) {
data[globalid] = ActivateValue(data[globalid], type, slope);
}
}
template <typename T>
void Activate(T *data, int count, int type, float slope) {
KernelActivate<T><<<GetBlocks(count), NumThreads>>>(data, count, type, slope);
CUDA_CHECK(cudaPeekAtLastError());
}
template <typename T>
__global__ void KernelPRelu(T *data, int count, int channels, int dim,
int div_factor, const T *slope_data) {
CUDA_KERNEL_LOOP(globalid, count) {
int c = (globalid / dim) % channels / div_factor;
T value = data[globalid];
data[globalid] = value > 0 ? value : value * slope_data[c];
}
}
template <typename T>
void PRelu(T *data, const VecInt &in_shape, bool channel_shared,
const T *slope_data) {
int channels = in_shape[1], dim = 1;
for (int i = 2; i < in_shape.size(); ++i) dim *= in_shape[i];
int count = in_shape[0] * channels * dim;
int div_factor = channel_shared ? channels : 1;
KernelPRelu<T><<<GetBlocks(count), NumThreads>>>(data, count, channels, dim,
div_factor, slope_data);
CUDA_CHECK(cudaPeekAtLastError());
}
template void Activate(float *data, int count, int type, float slope);
template void PRelu(float *data, const VecInt &in_shape, bool channel_shared,
const float *slope_data);
#endif
} // namespace Vision
} // namespace Shadow |
41696fc4b7c45f2b1aee6cef19069ce687423948.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Vector-matrix multiplication: Y = A * X.
* Host code.
* Author: Naga Kandasamy
* Date: 2/21/2017
*/
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <string.h>
#include <math.h>
#include "vec_mat_mult_kernel.cu"
#define MIN_NUMBER 1
#define MAX_NUMBER 4
extern "C" void compute_gold(float*, const float*, const float*, unsigned int, unsigned int);
Matrix allocate_matrix_on_gpu(const Matrix);
Matrix allocate_matrix(int, int, int);
void copy_matrix_to_device(Matrix, const Matrix);
void copy_matrix_from_device(Matrix, const Matrix);
void vec_mat_mult_on_device_using_global_memory(const Matrix, const Matrix, Matrix);
void vec_mat_mult_on_device_using_shared_memory(const Matrix, const Matrix, Matrix);
void print_matrix(const Matrix);
float get_random_number(int, int);
int checkResults(float *, float *, int, float);
int
main(int argc, char** argv) {
struct timeval start, stop;
// Matrices for the program
Matrix A; // N x N matrix
Matrix X; // N x 1 vector
Matrix Y_cpu, Y_gpu_1, Y_gpu_2; // N x 1 vector
// Initialize the random number generator with a seed value
srand(time(NULL));
// Check command line arguments
if(argc > 1){
printf("Error. This program accepts no arguments. \n");
exit(0);
}
// Allocate and initialize the matrices
A = allocate_matrix(MATRIX_SIZE, MATRIX_SIZE, 1); // Create a random N x N matrix
X = allocate_matrix(MATRIX_SIZE, 1, 1); // Create a random N x 1 vector
Y_cpu = allocate_matrix(MATRIX_SIZE, 1, 0); // Allocate memory for the output vectors
Y_gpu_1 = allocate_matrix(MATRIX_SIZE, 1, 0);
Y_gpu_2 = allocate_matrix(MATRIX_SIZE, 1, 0);
// compute the vector-matrix multiplication on the CPU for comparison
gettimeofday(&start, NULL);
compute_gold(Y_cpu.elements, A.elements, X.elements, A.num_rows, A.num_columns);
gettimeofday(&stop, NULL);
printf("CPU Compute: %fs \n",(float)(stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec)/(float)1000000));
// Perform the vector-matrix multiplication on the GPU using global memory
// Return the results in Y_gpu_1
vec_mat_mult_on_device_using_global_memory(A, X, Y_gpu_1);
// check if the device result is equivalent to the expected solution
printf("Checking against reference result. \n");
int size_elements = NUM_ROWS;
int res = checkResults(Y_cpu.elements, Y_gpu_1.elements, size_elements, 0.0001);
printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED");
// Perform the vector-matrix multiplication on the GPU using shared memory
// Return the results in Y_gpu_2
vec_mat_mult_on_device_using_shared_memory(A, X, Y_gpu_2);
// check if the device result is equivalent to the expected solution
printf("Checking against reference result. \n");
res = checkResults(Y_cpu.elements, Y_gpu_2.elements, size_elements, 0.0001);
printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED");
// Free host matrices
free(A.elements); A.elements = NULL;
free(X.elements); X.elements = NULL;
free(Y_cpu.elements); Y_cpu.elements = NULL;
free(Y_gpu_1.elements); Y_gpu_1.elements = NULL;
free(Y_gpu_2.elements); Y_gpu_2.elements = NULL;
return 0;
}
// Complete the functionality of vector-matrix multiplication using the GPU
// Kernel should use global memory
void
vec_mat_mult_on_device_using_global_memory(const Matrix A, const Matrix X, Matrix Y)
{
struct timeval start, stop;
//Creating device copies of each matrix
Matrix A_dev;
Matrix X_dev;
Matrix Y_dev;
//Allocating each Matrix onto the GPU
A_dev = allocate_matrix_on_gpu(A);
X_dev = allocate_matrix_on_gpu(X);
Y_dev = allocate_matrix_on_gpu(Y);
//Coping the two input matricies to the device
copy_matrix_to_device(A_dev, A);
copy_matrix_to_device(X_dev, X);
//Setting the block and grid sizes to be used in the kernel
dim3 dimBlock(512, 1);
dim3 dimGrid(MATRIX_SIZE/dimBlock.x,1);
//Calling the kernel with the block and grid sices, pushing the device elements
gettimeofday(&start, NULL);
hipLaunchKernelGGL(( vec_mat_kernel_naive) , dim3(dimGrid), dim3(dimBlock) , 0, 0, A_dev.elements, X_dev.elements, Y_dev.elements);
hipDeviceSynchronize();
gettimeofday(&stop, NULL);
//Getting the output matrix from the gpu
copy_matrix_from_device(Y, Y_dev);
//Freeing up the elements off the GPU
hipFree(A_dev.elements);
hipFree(X_dev.elements);
hipFree(Y_dev.elements);
printf("CUDA Global: %fs \n",(float)(stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec)/(float)1000000));
}
// Complete the functionality of vector-matrix multiplication using the GPU
// Kernel should use shared memory
void
vec_mat_mult_on_device_using_shared_memory(const Matrix A, const Matrix X, Matrix Y)
{
struct timeval start, stop;
Matrix A_dev;
Matrix X_dev;
Matrix Y_dev;
A_dev = allocate_matrix_on_gpu(A);
X_dev = allocate_matrix_on_gpu(X);
Y_dev = allocate_matrix_on_gpu(Y);
copy_matrix_to_device(A_dev, A);
copy_matrix_to_device(X_dev, X);
dim3 dimBlock(16, 16);
dim3 dimGrid(MATRIX_SIZE/dimBlock.x,1);
gettimeofday(&start, NULL);
hipLaunchKernelGGL(( vec_mat_kernel_optimized) , dim3(dimGrid), dim3(dimBlock) , 0, 0, A_dev.elements, X_dev.elements, Y_dev.elements);
hipDeviceSynchronize();
gettimeofday(&stop, NULL);
copy_matrix_from_device(Y, Y_dev);
hipFree(A_dev.elements);
hipFree(X_dev.elements);
hipFree(Y_dev.elements);
printf("CUDA Shared: %fs \n",(float)(stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec)/(float)1000000));
}
// Allocate a device matrix of same size as M.
Matrix
allocate_matrix_on_gpu(const Matrix M)
{
Matrix Mdevice = M;
int size = M.num_rows * M.num_columns * sizeof(float);
hipMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
// Allocate a matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
Matrix
allocate_matrix(int num_rows, int num_columns, int init)
{
Matrix M;
M.num_columns = M.pitch = num_columns;
M.num_rows = num_rows;
int size = M.num_rows * M.num_columns;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < size; i++){
if(init == 0) M.elements[i] = 0;
else
M.elements[i] = get_random_number(MIN_NUMBER, MAX_NUMBER);
}
return M;
}
// Copy a host matrix to a device matrix.
void
copy_matrix_to_device(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.num_rows * Mhost.num_columns * sizeof(float);
Mdevice.num_rows = Mhost.num_rows;
Mdevice.num_columns = Mhost.num_columns;
Mdevice.pitch = Mhost.pitch;
hipMemcpy(Mdevice.elements, Mhost.elements, size, hipMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void
copy_matrix_from_device(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.num_rows * Mdevice.num_columns * sizeof(float);
hipMemcpy(Mhost.elements, Mdevice.elements, size, hipMemcpyDeviceToHost);
}
// Prints the matrix out to screen
void
print_matrix(const Matrix M)
{
for(unsigned int i = 0; i < M.num_rows; i++){
for(unsigned int j = 0; j < M.num_columns; j++)
printf("%f ", M.elements[i*M.num_columns + j]);
printf("\n");
}
printf("\n");
}
// Returns a random floating-point number between the specified min and max values
float
get_random_number(int min, int max){
return (float)floor((double)(min + (max - min + 1)*((float)rand()/(float)RAND_MAX)));
}
int
checkResults(float *reference, float *gpu_result, int num_elements, float threshold)
{
int checkMark = 1;
float epsilon = 0.0;
for(int i = 0; i < num_elements; i++)
if(fabsf((reference[i] - gpu_result[i])/reference[i]) > threshold){
checkMark = 0;
break;
}
for(int i = 0; i < num_elements; i++)
if(fabsf((reference[i] - gpu_result[i])/reference[i]) > epsilon){
epsilon = fabsf((reference[i] - gpu_result[i])/reference[i]);
}
printf("Max epsilon = %f. \n", epsilon);
return checkMark;
}
| 41696fc4b7c45f2b1aee6cef19069ce687423948.cu | /* Vector-matrix multiplication: Y = A * X.
* Host code.
* Author: Naga Kandasamy
* Date: 2/21/2017
*/
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <string.h>
#include <math.h>
#include "vec_mat_mult_kernel.cu"
#define MIN_NUMBER 1
#define MAX_NUMBER 4
extern "C" void compute_gold(float*, const float*, const float*, unsigned int, unsigned int);
Matrix allocate_matrix_on_gpu(const Matrix);
Matrix allocate_matrix(int, int, int);
void copy_matrix_to_device(Matrix, const Matrix);
void copy_matrix_from_device(Matrix, const Matrix);
void vec_mat_mult_on_device_using_global_memory(const Matrix, const Matrix, Matrix);
void vec_mat_mult_on_device_using_shared_memory(const Matrix, const Matrix, Matrix);
void print_matrix(const Matrix);
float get_random_number(int, int);
int checkResults(float *, float *, int, float);
int
main(int argc, char** argv) {
struct timeval start, stop;
// Matrices for the program
Matrix A; // N x N matrix
Matrix X; // N x 1 vector
Matrix Y_cpu, Y_gpu_1, Y_gpu_2; // N x 1 vector
// Initialize the random number generator with a seed value
srand(time(NULL));
// Check command line arguments
if(argc > 1){
printf("Error. This program accepts no arguments. \n");
exit(0);
}
// Allocate and initialize the matrices
A = allocate_matrix(MATRIX_SIZE, MATRIX_SIZE, 1); // Create a random N x N matrix
X = allocate_matrix(MATRIX_SIZE, 1, 1); // Create a random N x 1 vector
Y_cpu = allocate_matrix(MATRIX_SIZE, 1, 0); // Allocate memory for the output vectors
Y_gpu_1 = allocate_matrix(MATRIX_SIZE, 1, 0);
Y_gpu_2 = allocate_matrix(MATRIX_SIZE, 1, 0);
// compute the vector-matrix multiplication on the CPU for comparison
gettimeofday(&start, NULL);
compute_gold(Y_cpu.elements, A.elements, X.elements, A.num_rows, A.num_columns);
gettimeofday(&stop, NULL);
printf("CPU Compute: %fs \n",(float)(stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec)/(float)1000000));
// Perform the vector-matrix multiplication on the GPU using global memory
// Return the results in Y_gpu_1
vec_mat_mult_on_device_using_global_memory(A, X, Y_gpu_1);
// check if the device result is equivalent to the expected solution
printf("Checking against reference result. \n");
int size_elements = NUM_ROWS;
int res = checkResults(Y_cpu.elements, Y_gpu_1.elements, size_elements, 0.0001);
printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED");
// Perform the vector-matrix multiplication on the GPU using shared memory
// Return the results in Y_gpu_2
vec_mat_mult_on_device_using_shared_memory(A, X, Y_gpu_2);
// check if the device result is equivalent to the expected solution
printf("Checking against reference result. \n");
res = checkResults(Y_cpu.elements, Y_gpu_2.elements, size_elements, 0.0001);
printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED");
// Free host matrices
free(A.elements); A.elements = NULL;
free(X.elements); X.elements = NULL;
free(Y_cpu.elements); Y_cpu.elements = NULL;
free(Y_gpu_1.elements); Y_gpu_1.elements = NULL;
free(Y_gpu_2.elements); Y_gpu_2.elements = NULL;
return 0;
}
// Complete the functionality of vector-matrix multiplication using the GPU
// Kernel should use global memory
void
vec_mat_mult_on_device_using_global_memory(const Matrix A, const Matrix X, Matrix Y)
{
struct timeval start, stop;
//Creating device copies of each matrix
Matrix A_dev;
Matrix X_dev;
Matrix Y_dev;
//Allocating each Matrix onto the GPU
A_dev = allocate_matrix_on_gpu(A);
X_dev = allocate_matrix_on_gpu(X);
Y_dev = allocate_matrix_on_gpu(Y);
//Coping the two input matricies to the device
copy_matrix_to_device(A_dev, A);
copy_matrix_to_device(X_dev, X);
//Setting the block and grid sizes to be used in the kernel
dim3 dimBlock(512, 1);
dim3 dimGrid(MATRIX_SIZE/dimBlock.x,1);
//Calling the kernel with the block and grid sices, pushing the device elements
gettimeofday(&start, NULL);
vec_mat_kernel_naive <<< dimGrid, dimBlock >>> (A_dev.elements, X_dev.elements, Y_dev.elements);
cudaThreadSynchronize();
gettimeofday(&stop, NULL);
//Getting the output matrix from the gpu
copy_matrix_from_device(Y, Y_dev);
//Freeing up the elements off the GPU
cudaFree(A_dev.elements);
cudaFree(X_dev.elements);
cudaFree(Y_dev.elements);
printf("CUDA Global: %fs \n",(float)(stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec)/(float)1000000));
}
// Complete the functionality of vector-matrix multiplication using the GPU
// Kernel should use shared memory
void
vec_mat_mult_on_device_using_shared_memory(const Matrix A, const Matrix X, Matrix Y)
{
struct timeval start, stop;
Matrix A_dev;
Matrix X_dev;
Matrix Y_dev;
A_dev = allocate_matrix_on_gpu(A);
X_dev = allocate_matrix_on_gpu(X);
Y_dev = allocate_matrix_on_gpu(Y);
copy_matrix_to_device(A_dev, A);
copy_matrix_to_device(X_dev, X);
dim3 dimBlock(16, 16);
dim3 dimGrid(MATRIX_SIZE/dimBlock.x,1);
gettimeofday(&start, NULL);
vec_mat_kernel_optimized <<< dimGrid, dimBlock >>> (A_dev.elements, X_dev.elements, Y_dev.elements);
cudaThreadSynchronize();
gettimeofday(&stop, NULL);
copy_matrix_from_device(Y, Y_dev);
cudaFree(A_dev.elements);
cudaFree(X_dev.elements);
cudaFree(Y_dev.elements);
printf("CUDA Shared: %fs \n",(float)(stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec)/(float)1000000));
}
// Allocate a device matrix of same size as M.
Matrix
allocate_matrix_on_gpu(const Matrix M)
{
Matrix Mdevice = M;
int size = M.num_rows * M.num_columns * sizeof(float);
cudaMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
// Allocate a matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
Matrix
allocate_matrix(int num_rows, int num_columns, int init)
{
Matrix M;
M.num_columns = M.pitch = num_columns;
M.num_rows = num_rows;
int size = M.num_rows * M.num_columns;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < size; i++){
if(init == 0) M.elements[i] = 0;
else
M.elements[i] = get_random_number(MIN_NUMBER, MAX_NUMBER);
}
return M;
}
// Copy a host matrix to a device matrix.
void
copy_matrix_to_device(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.num_rows * Mhost.num_columns * sizeof(float);
Mdevice.num_rows = Mhost.num_rows;
Mdevice.num_columns = Mhost.num_columns;
Mdevice.pitch = Mhost.pitch;
cudaMemcpy(Mdevice.elements, Mhost.elements, size, cudaMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void
copy_matrix_from_device(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.num_rows * Mdevice.num_columns * sizeof(float);
cudaMemcpy(Mhost.elements, Mdevice.elements, size, cudaMemcpyDeviceToHost);
}
// Prints the matrix out to screen
void
print_matrix(const Matrix M)
{
for(unsigned int i = 0; i < M.num_rows; i++){
for(unsigned int j = 0; j < M.num_columns; j++)
printf("%f ", M.elements[i*M.num_columns + j]);
printf("\n");
}
printf("\n");
}
// Returns a random floating-point number between the specified min and max values
float
get_random_number(int min, int max){
return (float)floor((double)(min + (max - min + 1)*((float)rand()/(float)RAND_MAX)));
}
int
checkResults(float *reference, float *gpu_result, int num_elements, float threshold)
{
int checkMark = 1;
float epsilon = 0.0;
for(int i = 0; i < num_elements; i++)
if(fabsf((reference[i] - gpu_result[i])/reference[i]) > threshold){
checkMark = 0;
break;
}
for(int i = 0; i < num_elements; i++)
if(fabsf((reference[i] - gpu_result[i])/reference[i]) > epsilon){
epsilon = fabsf((reference[i] - gpu_result[i])/reference[i]);
}
printf("Max epsilon = %f. \n", epsilon);
return checkMark;
}
|
6615e277b63bb5d24260548a3c3c041655e013db.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <opencv2\opencv.hpp>
#include <thrust\device_vector.h>
#include <thrust\copy.h>
#include <ctime>
__global__ void transformKernel(unsigned char * mask, unsigned char * label, int cols, int size, int nClasses)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size)
{
int r = i / cols;
int c = i % cols;
unsigned char plane = mask[i];
unsigned char channel = plane >= nClasses ? 0 : plane;
int idx = channel + c * nClasses + r * cols * nClasses;
label[idx] = 1;
}
}
using namespace std;
using namespace cv;
const String keys = {
"{help h usage ? | | print this message }"
"{@image1 | | image1 for compare }"
"{@classes | 2 | number of classes }"
};
int convertMask(Mat& img, int nClasses, bool debug, bool onGpu)
{
vector<uchar> imgArray;
vector<vector<vector<uchar>>> labels3d(img.rows);
// Convert to a 3D representation
for (int i = 0; i < img.rows; i++)
{
labels3d[i].resize(img.cols);
for (int j = 0; j < img.cols; j++)
{
labels3d[i][j].resize(nClasses);
}
}
if (debug)
{
cout << "Classses: " << nClasses << endl;
cout << "Size: " << img.rows * img.cols << endl;
cout << "Rows: " << img.rows << endl << "Cols: " << img.cols << endl;
}
if (onGpu)
{
// copy from Mat -> vector
if (img.isContinuous())
{
imgArray.assign(img.datastart, img.dataend);
}
else
{
for (int i = 0; i < img.rows; i++)
{
imgArray.insert(imgArray.end(), img.ptr<uchar>(i), img.ptr<uchar>(i) + img.cols);
}
}
// Use Thrust to allocate device memory because it's easier
thrust::device_vector<uchar> d_mask(imgArray);
thrust::device_vector<uchar> d_label(d_mask.size() * nClasses, 0U);
int blockSize = 256;
int gridSize = (d_mask.size() + blockSize - 1) / blockSize;
if (debug)
{
cout << "Grid: " << gridSize << endl << "Block: " << blockSize << endl;
}
uchar * ptrMask = thrust::raw_pointer_cast(d_mask.data());
uchar * ptrLabels = thrust::raw_pointer_cast(d_label.data());
transformKernel << <gridSize, blockSize >> > (ptrMask, ptrLabels, img.cols, img.rows * img.cols, nClasses);
hipError_t cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "Kernel launch failed: %s\n", hipGetErrorString(cudaStatus));
return 1;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching kernel\n", cudaStatus);
return 1;
}
//Need to copy results back
thrust::host_vector<uchar> labels(d_label);
if (debug)
{
vector<uchar> bar;
auto it = std::copy_if(labels.begin(), labels.end(), std::back_inserter(bar), [](int i) {return i > 0; });
if (bar.size() != labels.size() / 2)
{
cout << "Kernel error";
}
cout << "Kernel completed successfully" << endl;
}
for (int i = 0; i < img.rows * img.cols * nClasses; i++)
{
int classs = i % nClasses;
int col = (i / nClasses) % img.cols;
int row = i / nClasses / img.cols;
labels3d[row][col][classs] = labels[i];
}
}
else
{
for (int i = 0; i < img.rows; i++)
{
for (int j = 0; j < img.cols; j++)
{
int classs = img.at<uchar>(i,j) < nClasses ? img.at<uchar>(i, j) : 0;
labels3d[i][j][classs] = 1;
}
}
}
return 0;
}
int main(int argc, char* argv[])
{
CommandLineParser parser(argc, argv, keys);
if (parser.has("help"))
{
parser.printMessage();
return 0;
}
String imgFile = parser.get<String>(0);
int nClasses = parser.get<int>(1);
Mat img = imread(imgFile, IMREAD_GRAYSCALE);
int sizes[] = { 64, 128, 256, 512, 1024, 2048, 4096 };
auto l = convertMask(img, nClasses, true, true);
for (auto s : sizes)
{
Mat im;
resize(img, im, Size(s, s));
// GPU
double duration = 0;
int n = 5;
for (int i = 0; i < n; i++)
{
int start = clock();
convertMask(im, nClasses, false, true);
int stop = clock();
duration += (stop - start) / double(CLOCKS_PER_SEC) * 1000;
}
double durationCpu = 0;
// CPU
for (int i = 0; i < n; i++)
{
int start = clock();
convertMask(im, nClasses, false, false);
int stop = clock();
durationCpu += (stop - start) / double(CLOCKS_PER_SEC) * 1000;
}
cout << "Size: " << s << endl << "\tGPU: " << duration / n << endl << "\tCPU " << durationCpu / n << endl;
}
}
| 6615e277b63bb5d24260548a3c3c041655e013db.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <opencv2\opencv.hpp>
#include <thrust\device_vector.h>
#include <thrust\copy.h>
#include <ctime>
__global__ void transformKernel(unsigned char * mask, unsigned char * label, int cols, int size, int nClasses)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size)
{
int r = i / cols;
int c = i % cols;
unsigned char plane = mask[i];
unsigned char channel = plane >= nClasses ? 0 : plane;
int idx = channel + c * nClasses + r * cols * nClasses;
label[idx] = 1;
}
}
using namespace std;
using namespace cv;
const String keys = {
"{help h usage ? | | print this message }"
"{@image1 | | image1 for compare }"
"{@classes | 2 | number of classes }"
};
int convertMask(Mat& img, int nClasses, bool debug, bool onGpu)
{
vector<uchar> imgArray;
vector<vector<vector<uchar>>> labels3d(img.rows);
// Convert to a 3D representation
for (int i = 0; i < img.rows; i++)
{
labels3d[i].resize(img.cols);
for (int j = 0; j < img.cols; j++)
{
labels3d[i][j].resize(nClasses);
}
}
if (debug)
{
cout << "Classses: " << nClasses << endl;
cout << "Size: " << img.rows * img.cols << endl;
cout << "Rows: " << img.rows << endl << "Cols: " << img.cols << endl;
}
if (onGpu)
{
// copy from Mat -> vector
if (img.isContinuous())
{
imgArray.assign(img.datastart, img.dataend);
}
else
{
for (int i = 0; i < img.rows; i++)
{
imgArray.insert(imgArray.end(), img.ptr<uchar>(i), img.ptr<uchar>(i) + img.cols);
}
}
// Use Thrust to allocate device memory because it's easier
thrust::device_vector<uchar> d_mask(imgArray);
thrust::device_vector<uchar> d_label(d_mask.size() * nClasses, 0U);
int blockSize = 256;
int gridSize = (d_mask.size() + blockSize - 1) / blockSize;
if (debug)
{
cout << "Grid: " << gridSize << endl << "Block: " << blockSize << endl;
}
uchar * ptrMask = thrust::raw_pointer_cast(d_mask.data());
uchar * ptrLabels = thrust::raw_pointer_cast(d_label.data());
transformKernel << <gridSize, blockSize >> > (ptrMask, ptrLabels, img.cols, img.rows * img.cols, nClasses);
cudaError_t cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Kernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
return 1;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching kernel\n", cudaStatus);
return 1;
}
//Need to copy results back
thrust::host_vector<uchar> labels(d_label);
if (debug)
{
vector<uchar> bar;
auto it = std::copy_if(labels.begin(), labels.end(), std::back_inserter(bar), [](int i) {return i > 0; });
if (bar.size() != labels.size() / 2)
{
cout << "Kernel error";
}
cout << "Kernel completed successfully" << endl;
}
for (int i = 0; i < img.rows * img.cols * nClasses; i++)
{
int classs = i % nClasses;
int col = (i / nClasses) % img.cols;
int row = i / nClasses / img.cols;
labels3d[row][col][classs] = labels[i];
}
}
else
{
for (int i = 0; i < img.rows; i++)
{
for (int j = 0; j < img.cols; j++)
{
int classs = img.at<uchar>(i,j) < nClasses ? img.at<uchar>(i, j) : 0;
labels3d[i][j][classs] = 1;
}
}
}
return 0;
}
int main(int argc, char* argv[])
{
CommandLineParser parser(argc, argv, keys);
if (parser.has("help"))
{
parser.printMessage();
return 0;
}
String imgFile = parser.get<String>(0);
int nClasses = parser.get<int>(1);
Mat img = imread(imgFile, IMREAD_GRAYSCALE);
int sizes[] = { 64, 128, 256, 512, 1024, 2048, 4096 };
auto l = convertMask(img, nClasses, true, true);
for (auto s : sizes)
{
Mat im;
resize(img, im, Size(s, s));
// GPU
double duration = 0;
int n = 5;
for (int i = 0; i < n; i++)
{
int start = clock();
convertMask(im, nClasses, false, true);
int stop = clock();
duration += (stop - start) / double(CLOCKS_PER_SEC) * 1000;
}
double durationCpu = 0;
// CPU
for (int i = 0; i < n; i++)
{
int start = clock();
convertMask(im, nClasses, false, false);
int stop = clock();
durationCpu += (stop - start) / double(CLOCKS_PER_SEC) * 1000;
}
cout << "Size: " << s << endl << "\tGPU: " << duration / n << endl << "\tCPU " << durationCpu / n << endl;
}
}
|
c62439bf4e7984643824db57bf0dc425d8855bfd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/detection/anchor_generator_op.h"
namespace paddle {
namespace operators {
template <typename T>
__global__ void GenAnchors(T* out, const T* aspect_ratios, const int ar_num,
const T* anchor_sizes, const int as_num,
const T* stride, const int sd_num, const int height,
const int width, const T offset) {
int num_anchors = as_num * ar_num;
int box_num = height * width * num_anchors;
CUDA_KERNEL_LOOP(i, box_num) {
int h_idx = i / (num_anchors * width);
int w_idx = (i / num_anchors) % width;
T stride_width = stride[0];
T stride_height = stride[1];
T x_ctr = (w_idx * stride_width) + offset * (stride_width - 1);
T y_ctr = (h_idx * stride_height) + offset * (stride_height - 1);
T area, area_ratios;
T base_w, base_h;
T scale_w, scale_h;
T anchor_width, anchor_height;
int anch_idx = i % num_anchors;
int ar_idx = anch_idx / as_num;
int as_idx = anch_idx % as_num;
T aspect_ratio = aspect_ratios[ar_idx];
T anchor_size = anchor_sizes[as_idx];
area = stride_width * stride_height;
area_ratios = area / aspect_ratio;
base_w = round(sqrt(area_ratios));
base_h = round(base_w * aspect_ratio);
scale_w = anchor_size / stride_width;
scale_h = anchor_size / stride_height;
anchor_width = scale_w * base_w;
anchor_height = scale_h * base_h;
T xmin = (x_ctr - .5f * (anchor_width - 1));
T ymin = (y_ctr - .5f * (anchor_height - 1));
T xmax = (x_ctr + .5f * (anchor_width - 1));
T ymax = (y_ctr + .5f * (anchor_height - 1));
reinterpret_cast<float4*>(out)[i] = make_float4(xmin, ymin, xmax, ymax);
}
}
template <typename T>
__global__ void SetVariance(T* out, const T* var, const int vnum,
const int num) {
CUDA_KERNEL_LOOP(i, num) { out[i] = var[i % vnum]; }
}
template <typename T>
class AnchorGeneratorOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* input = ctx.Input<paddle::framework::Tensor>("Input");
auto* anchors = ctx.Output<paddle::framework::Tensor>("Anchors");
auto* vars = ctx.Output<paddle::framework::Tensor>("Variances");
auto anchor_sizes = ctx.Attr<std::vector<float>>("anchor_sizes");
auto aspect_ratios = ctx.Attr<std::vector<float>>("aspect_ratios");
auto stride = ctx.Attr<std::vector<float>>("stride");
auto variances = ctx.Attr<std::vector<float>>("variances");
T offset = static_cast<T>(ctx.Attr<float>("offset"));
auto width = input->dims()[3];
auto height = input->dims()[2];
int num_anchors = aspect_ratios.size() * anchor_sizes.size();
int box_num = width * height * num_anchors;
int block = 512;
int grid = (box_num + block - 1) / block;
auto stream =
ctx.template device_context<platform::CUDADeviceContext>().stream();
anchors->mutable_data<T>(ctx.GetPlace());
vars->mutable_data<T>(ctx.GetPlace());
framework::Tensor ar;
framework::TensorFromVector(aspect_ratios, ctx.device_context(), &ar);
framework::Tensor as;
framework::TensorFromVector(anchor_sizes, ctx.device_context(), &as);
framework::Tensor sd;
framework::TensorFromVector(stride, ctx.device_context(), &sd);
hipLaunchKernelGGL(( GenAnchors<T>), dim3(grid), dim3(block), 0, stream,
anchors->data<T>(), ar.data<T>(), aspect_ratios.size(), as.data<T>(),
anchor_sizes.size(), sd.data<T>(), stride.size(), height, width,
offset);
framework::Tensor v;
framework::TensorFromVector(variances, ctx.device_context(), &v);
grid = (box_num * 4 + block - 1) / block;
hipLaunchKernelGGL(( SetVariance<T>), dim3(grid), dim3(block), 0, stream, vars->data<T>(), v.data<T>(),
variances.size(), box_num * 4);
}
}; // namespace operators
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(anchor_generator,
ops::AnchorGeneratorOpCUDAKernel<float>,
ops::AnchorGeneratorOpCUDAKernel<double>);
| c62439bf4e7984643824db57bf0dc425d8855bfd.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/detection/anchor_generator_op.h"
namespace paddle {
namespace operators {
template <typename T>
__global__ void GenAnchors(T* out, const T* aspect_ratios, const int ar_num,
const T* anchor_sizes, const int as_num,
const T* stride, const int sd_num, const int height,
const int width, const T offset) {
int num_anchors = as_num * ar_num;
int box_num = height * width * num_anchors;
CUDA_KERNEL_LOOP(i, box_num) {
int h_idx = i / (num_anchors * width);
int w_idx = (i / num_anchors) % width;
T stride_width = stride[0];
T stride_height = stride[1];
T x_ctr = (w_idx * stride_width) + offset * (stride_width - 1);
T y_ctr = (h_idx * stride_height) + offset * (stride_height - 1);
T area, area_ratios;
T base_w, base_h;
T scale_w, scale_h;
T anchor_width, anchor_height;
int anch_idx = i % num_anchors;
int ar_idx = anch_idx / as_num;
int as_idx = anch_idx % as_num;
T aspect_ratio = aspect_ratios[ar_idx];
T anchor_size = anchor_sizes[as_idx];
area = stride_width * stride_height;
area_ratios = area / aspect_ratio;
base_w = round(sqrt(area_ratios));
base_h = round(base_w * aspect_ratio);
scale_w = anchor_size / stride_width;
scale_h = anchor_size / stride_height;
anchor_width = scale_w * base_w;
anchor_height = scale_h * base_h;
T xmin = (x_ctr - .5f * (anchor_width - 1));
T ymin = (y_ctr - .5f * (anchor_height - 1));
T xmax = (x_ctr + .5f * (anchor_width - 1));
T ymax = (y_ctr + .5f * (anchor_height - 1));
reinterpret_cast<float4*>(out)[i] = make_float4(xmin, ymin, xmax, ymax);
}
}
template <typename T>
__global__ void SetVariance(T* out, const T* var, const int vnum,
const int num) {
CUDA_KERNEL_LOOP(i, num) { out[i] = var[i % vnum]; }
}
template <typename T>
class AnchorGeneratorOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* input = ctx.Input<paddle::framework::Tensor>("Input");
auto* anchors = ctx.Output<paddle::framework::Tensor>("Anchors");
auto* vars = ctx.Output<paddle::framework::Tensor>("Variances");
auto anchor_sizes = ctx.Attr<std::vector<float>>("anchor_sizes");
auto aspect_ratios = ctx.Attr<std::vector<float>>("aspect_ratios");
auto stride = ctx.Attr<std::vector<float>>("stride");
auto variances = ctx.Attr<std::vector<float>>("variances");
T offset = static_cast<T>(ctx.Attr<float>("offset"));
auto width = input->dims()[3];
auto height = input->dims()[2];
int num_anchors = aspect_ratios.size() * anchor_sizes.size();
int box_num = width * height * num_anchors;
int block = 512;
int grid = (box_num + block - 1) / block;
auto stream =
ctx.template device_context<platform::CUDADeviceContext>().stream();
anchors->mutable_data<T>(ctx.GetPlace());
vars->mutable_data<T>(ctx.GetPlace());
framework::Tensor ar;
framework::TensorFromVector(aspect_ratios, ctx.device_context(), &ar);
framework::Tensor as;
framework::TensorFromVector(anchor_sizes, ctx.device_context(), &as);
framework::Tensor sd;
framework::TensorFromVector(stride, ctx.device_context(), &sd);
GenAnchors<T><<<grid, block, 0, stream>>>(
anchors->data<T>(), ar.data<T>(), aspect_ratios.size(), as.data<T>(),
anchor_sizes.size(), sd.data<T>(), stride.size(), height, width,
offset);
framework::Tensor v;
framework::TensorFromVector(variances, ctx.device_context(), &v);
grid = (box_num * 4 + block - 1) / block;
SetVariance<T><<<grid, block, 0, stream>>>(vars->data<T>(), v.data<T>(),
variances.size(), box_num * 4);
}
}; // namespace operators
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(anchor_generator,
ops::AnchorGeneratorOpCUDAKernel<float>,
ops::AnchorGeneratorOpCUDAKernel<double>);
|
103d38eba97b74ff71a5d249b19017b1640d0bbd.hip | // !!! This is a file automatically generated by hipify!!!
#include <algorithm>
#include <vector>
#include "caffe/filler.hpp"
#include "caffe/layers/bn_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
void BNLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* const_bottom_data = bottom[0]->gpu_data();
const Dtype* const_top_data = top[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const Dtype* scale_data = this->blobs_[0]->gpu_data();
const Dtype* shift_data = this->blobs_[1]->gpu_data();
// ---------- mean subtraction ---------- //
// statistic across spatial
caffe_gpu_gemv<Dtype>(CblasNoTrans, num_ * channels_, height_ * width_, Dtype(1. / (height_ * width_)), const_bottom_data,
spatial_sum_multiplier_.gpu_data(), Dtype(0), spatial_statistic_.mutable_gpu_data());
// statistic across batch
caffe_gpu_gemv<Dtype>(CblasTrans, num_, channels_, Dtype(1. / num_), spatial_statistic_.gpu_data(),
batch_sum_multiplier_.gpu_data(), Dtype(0), ex_.mutable_gpu_data());
if (this->phase_ == TRAIN) {
// sync statistics
if ( sync_forward_ ){
// first, sync EX
caffe_copy(channels_, ex_.gpu_data(), statistics_all_.mutable_gpu_data());
P2PSync<Dtype>* p2p = this->callbacks()[ 0 ]->callbacks()[ 0 ]->p2p()[ 0 ];
Blob<Dtype> statistics_child(1, channels_, 1, 1);
for ( int i = 0; i < p2p->children().size(); ++i ){
#ifdef _WIN64
Blob<Dtype>* s_c_ogpu = p2p->dataQueue().pop();
#else
Blob<Dtype>* s_c_ogpu = NULL;
while ( !p2p->dataQueue().try_pop(&s_c_ogpu) )
;
#endif
//Blob<Dtype>* s_c_ogpu = p2p->dataQueue().pop();
CUDA_CHECK(hipMemcpyAsync(statistics_child.mutable_gpu_data(), s_c_ogpu->gpu_data(), channels_*sizeof( Dtype ), hipMemcpyDeviceToDevice, hipStreamDefault));
CUDA_CHECK(hipStreamSynchronize(hipStreamDefault));
caffe_gpu_add(channels_, statistics_child.gpu_data(), statistics_all_.gpu_data(), statistics_all_.mutable_gpu_data());
}
if ( p2p->parent() ){
p2p->parent()->dataQueue().push(&statistics_all_);
#ifdef _WIN64
Blob<Dtype>* statistics_final = p2p->dataQueue().pop();
#else
Blob<Dtype>* statistics_final = NULL;
while ( !p2p->dataQueue().try_pop(&statistics_final) )
;
#endif
//Blob<Dtype>* statistics_final = p2p->dataQueue().pop();
CUDA_CHECK(hipMemcpyAsync(ex_.mutable_gpu_data(), statistics_final->gpu_data(), channels_*sizeof( Dtype ), hipMemcpyDeviceToDevice, hipStreamDefault));
CUDA_CHECK(hipStreamSynchronize(hipStreamDefault));
}
else {
caffe_gpu_scal<Dtype>(channels_, Dtype(1.0 / Caffe::solver_count()), statistics_all_.mutable_gpu_data());
caffe_copy(channels_, statistics_all_.gpu_data(), ex_.mutable_gpu_data());
}
for ( int i = 0; i < p2p->children().size(); ++i ){
p2p->children()[ i ]->dataQueue().push(&ex_);
}
}
// save history mean
caffe_gpu_axpby(ex_.count(), Dtype(1) - decay_, ex_.gpu_data(), decay_,
this->blobs_[2]->mutable_gpu_data());
}
if (this->phase_ == TEST && moving_average_) {
// use moving average mean
caffe_copy(ex_.count(), this->blobs_[ 2 ]->gpu_data(), ex_.mutable_gpu_data());
}
// put mean blob into buffer_blob_
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_, channels_, 1, Dtype(1),
batch_sum_multiplier_.gpu_data(), ex_.gpu_data(), Dtype(0),
spatial_statistic_.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_ * channels_, height_ * width_, 1, Dtype(-1),
spatial_statistic_.gpu_data(), spatial_sum_multiplier_.gpu_data(), Dtype(0),
buffer_blob_.mutable_gpu_data());
// substract mean
caffe_gpu_add(buffer_blob_.count(), const_bottom_data, buffer_blob_.gpu_data(), top_data);
// ---------- variance normalization ---------- //
// add by yu liu
// calculate EX2
caffe_gpu_powx(bottom[ 0 ]->count(), const_bottom_data, Dtype(2), buffer_blob_.mutable_gpu_data());
// statistic across spatial
caffe_gpu_gemv<Dtype>(CblasNoTrans, num_ * channels_, height_ * width_, Dtype(1. / ( height_ * width_ )), buffer_blob_.gpu_data(),
spatial_sum_multiplier_.gpu_data(), Dtype(0), spatial_statistic_.mutable_gpu_data());
// statistic across batch
caffe_gpu_gemv<Dtype>(CblasTrans, num_, channels_, Dtype(1. / num_), spatial_statistic_.gpu_data(),
batch_sum_multiplier_.gpu_data(), Dtype(0), batch_statistic_.mutable_gpu_data());
if ( sync_forward_ ){
// second, sync EX2
caffe_copy(channels_, batch_statistic_.gpu_data(), statistics_all_.mutable_gpu_data());
P2PSync<Dtype>* p2p = this->callbacks()[ 0 ]->callbacks()[ 0 ]->p2p()[ 0 ];
Blob<Dtype> statistics_child(1, channels_, 1, 1);
for ( int i = 0; i < p2p->children().size(); ++i ){
#ifdef _WIN64
Blob<Dtype>* s_c_ogpu = p2p->dataQueue().pop();
#else // Linux support
Blob<Dtype>* s_c_ogpu = NULL;
while ( !p2p->dataQueue().try_pop(&s_c_ogpu) )
;
#endif
//Blob<Dtype>* s_c_ogpu = p2p->dataQueue().pop();
CUDA_CHECK(hipMemcpyAsync(statistics_child.mutable_gpu_data(), s_c_ogpu->gpu_data(), channels_*sizeof( Dtype ), hipMemcpyDeviceToDevice, hipStreamDefault));
CUDA_CHECK(hipStreamSynchronize(hipStreamDefault));
caffe_gpu_add(channels_, statistics_child.gpu_data(), statistics_all_.gpu_data(), statistics_all_.mutable_gpu_data());
}
if ( p2p->parent() ){
p2p->parent()->dataQueue().push(&statistics_all_);
#ifdef _WIN64
Blob<Dtype>* statistics_final = p2p->dataQueue().pop();
#else // Linux support
Blob<Dtype>* statistics_final = NULL;
while ( !p2p->dataQueue().try_pop(&statistics_final) )
;
#endif
//Blob<Dtype>* statistics_final = p2p->dataQueue().pop();
CUDA_CHECK(hipMemcpyAsync(dx_.mutable_gpu_data(), statistics_final->gpu_data(), channels_*sizeof( Dtype ), hipMemcpyDeviceToDevice, hipStreamDefault));
CUDA_CHECK(hipStreamSynchronize(hipStreamDefault));
}
else {
caffe_gpu_scal<Dtype>(channels_, Dtype(1.0 / Caffe::solver_count()), statistics_all_.mutable_gpu_data());
Blob<Dtype> e2x_(1, channels_, 1, 1);
caffe_gpu_powx(ex_.count(), ex_.gpu_data(), Dtype(2), e2x_.mutable_gpu_data());
caffe_gpu_sub<Dtype>(ex_.count(), statistics_all_.gpu_data(), e2x_.gpu_data(), dx_.mutable_gpu_data());
}
for ( int i = 0; i < p2p->children().size(); ++i ){
p2p->children()[ i ]->dataQueue().push(&dx_);
}
}
else{
Blob<Dtype> e2x_(1, channels_, 1, 1);
caffe_gpu_powx(ex_.count(), ex_.gpu_data(), Dtype(2), e2x_.mutable_gpu_data());
caffe_gpu_sub<Dtype>(ex_.count(), batch_statistic_.gpu_data(), e2x_.gpu_data(), dx_.mutable_gpu_data());
}
// original dx
//// put the squares of X - mean into buffer_blob_
//caffe_gpu_powx(buffer_blob_.count(), const_top_data, Dtype(2), buffer_blob_.mutable_gpu_data());
//// statistic across spatial
//caffe_gpu_gemv<Dtype>(CblasNoTrans, num_ * channels_, height_ * width_, Dtype(1. / (height_ * width_)), buffer_blob_.gpu_data(),
// spatial_sum_multiplier_.gpu_data(), Dtype(0), spatial_statistic_.mutable_gpu_data());
//// statistic across batch
//caffe_gpu_gemv<Dtype>(CblasTrans, num_, channels_, Dtype(1. / num_), spatial_statistic_.gpu_data(),
// batch_sum_multiplier_.gpu_data(), Dtype(0), batch_statistic_.mutable_gpu_data());
if (this->phase_ == TRAIN) {
// save history variance
caffe_gpu_axpby(dx_.count(), Dtype(1) - decay_, dx_.gpu_data(), decay_,
this->blobs_[3]->mutable_gpu_data());
}
if (this->phase_ == TEST && moving_average_) {
// use moving average variance
caffe_copy(dx_.count(), this->blobs_[ 3 ]->gpu_data(), dx_.mutable_gpu_data());
}
// add eps
caffe_gpu_add_scalar(dx_.count(), var_eps_, dx_.mutable_gpu_data());
// std
caffe_gpu_powx(dx_.count(), dx_.gpu_data(), Dtype(0.5),
batch_statistic_.mutable_gpu_data());
// put std blob into buffer_blob_
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_, channels_, 1, Dtype(1),
batch_sum_multiplier_.gpu_data(), batch_statistic_.gpu_data(), Dtype(0),
spatial_statistic_.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_ * channels_, height_ * width_, 1, Dtype(1),
spatial_statistic_.gpu_data(), spatial_sum_multiplier_.gpu_data(), Dtype(0),
buffer_blob_.mutable_gpu_data());
// variance normalization
caffe_gpu_div(buffer_blob_.count(), const_top_data, buffer_blob_.gpu_data(), top_data);
// ---------- save x_norm and x_std ---------- //
caffe_copy(buffer_blob_.count(), const_top_data, x_norm_.mutable_gpu_data());
caffe_copy(batch_statistic_.count(), batch_statistic_.gpu_data(), x_std_.mutable_gpu_data());
// ---------- scale ---------- //
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_, channels_, 1, Dtype(1),
batch_sum_multiplier_.gpu_data(), scale_data, Dtype(0),
spatial_statistic_.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_ * channels_, height_ * width_, 1, Dtype(1),
spatial_statistic_.gpu_data(), spatial_sum_multiplier_.gpu_data(), Dtype(0),
buffer_blob_.mutable_gpu_data());
caffe_gpu_mul(buffer_blob_.count(), const_top_data, buffer_blob_.gpu_data(), top_data);
// ---------- shift ---------- //
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_, channels_, 1, Dtype(1),
batch_sum_multiplier_.gpu_data(), shift_data, Dtype(0),
spatial_statistic_.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_ * channels_, height_ * width_, 1, Dtype(1),
spatial_statistic_.gpu_data(), spatial_sum_multiplier_.gpu_data(), Dtype(0),
buffer_blob_.mutable_gpu_data());
caffe_gpu_add(buffer_blob_.count(), const_top_data, buffer_blob_.gpu_data(), top_data);
}
template <typename Dtype>
void BNLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* const_bottom_diff = bottom[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* const_top_diff = top[0]->gpu_diff();
Dtype* scale_diff = this->blobs_[0]->mutable_gpu_diff();
Dtype* shift_diff = this->blobs_[1]->mutable_gpu_diff();
const Dtype* scale_data = this->blobs_[0]->gpu_data();
// ---------- gradient w.r.t. scale ---------- //
caffe_gpu_mul(buffer_blob_.count(), x_norm_.gpu_data(), const_top_diff, buffer_blob_.mutable_gpu_data());
// statistic across spatial
caffe_gpu_gemv<Dtype>(CblasNoTrans, num_ * channels_, height_ * width_, Dtype(1), buffer_blob_.gpu_data(),
spatial_sum_multiplier_.gpu_data(), Dtype(0), spatial_statistic_.mutable_gpu_data());
// statistic across batch
caffe_gpu_gemv<Dtype>(CblasTrans, num_, channels_, Dtype(1), spatial_statistic_.gpu_data(),
batch_sum_multiplier_.gpu_data(), Dtype(0), scale_diff);
// ---------- gradient w.r.t. shift ---------- //
// statistic across spatial
caffe_gpu_gemv<Dtype>(CblasNoTrans, num_ * channels_, height_ * width_, Dtype(1), const_top_diff,
spatial_sum_multiplier_.gpu_data(), Dtype(0), spatial_statistic_.mutable_gpu_data());
// statistic across batch
caffe_gpu_gemv<Dtype>(CblasTrans, num_, channels_, Dtype(1), spatial_statistic_.gpu_data(),
batch_sum_multiplier_.gpu_data(), Dtype(0), shift_diff);
// ---------- gradient w.r.t. to bottom blob ---------- //
// put scale * top_diff to buffer_blob_
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_, channels_, 1, Dtype(1),
batch_sum_multiplier_.gpu_data(), scale_data, Dtype(0),
spatial_statistic_.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_ * channels_, height_ * width_, 1, Dtype(1),
spatial_statistic_.gpu_data(), spatial_sum_multiplier_.gpu_data(), Dtype(0),
buffer_blob_.mutable_gpu_data());
caffe_gpu_mul(buffer_blob_.count(), const_top_diff, buffer_blob_.gpu_data(), buffer_blob_.mutable_gpu_data());
if (this->phase_ == TRAIN) {
// use new top diff for computation
caffe_gpu_mul(buffer_blob_.count(), x_norm_.gpu_data(), buffer_blob_.gpu_data(), bottom_diff);
// statistic across spatial
caffe_gpu_gemv<Dtype>(CblasNoTrans, num_ * channels_, height_ * width_, Dtype(1), const_bottom_diff,
spatial_sum_multiplier_.gpu_data(), Dtype(0), spatial_statistic_.mutable_gpu_data());
// statistic across batch
caffe_gpu_gemv<Dtype>(CblasTrans, num_, channels_, Dtype(1), spatial_statistic_.gpu_data(),
batch_sum_multiplier_.gpu_data(), Dtype(0), batch_statistic_.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_, channels_, 1, Dtype(1),
batch_sum_multiplier_.gpu_data(), batch_statistic_.gpu_data(), Dtype(0),
spatial_statistic_.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_ * channels_, height_ * width_, 1, Dtype(1),
spatial_statistic_.gpu_data(), spatial_sum_multiplier_.gpu_data(), Dtype(0),
bottom_diff);
caffe_gpu_mul(buffer_blob_.count(), x_norm_.gpu_data(), const_bottom_diff, bottom_diff);
// statistic across spatial
caffe_gpu_gemv<Dtype>(CblasNoTrans, num_ * channels_, height_ * width_, Dtype(1), buffer_blob_.gpu_data(),
spatial_sum_multiplier_.gpu_data(), Dtype(0), spatial_statistic_.mutable_gpu_data());
// statistic across batch
caffe_gpu_gemv<Dtype>(CblasTrans, num_, channels_, Dtype(1), spatial_statistic_.gpu_data(),
batch_sum_multiplier_.gpu_data(), Dtype(0), batch_statistic_.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_, channels_, 1, Dtype(1),
batch_sum_multiplier_.gpu_data(), batch_statistic_.gpu_data(), Dtype(0),
spatial_statistic_.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_ * channels_, height_ * width_, 1, Dtype(1),
spatial_statistic_.gpu_data(), spatial_sum_multiplier_.gpu_data(), Dtype(1),
bottom_diff);
caffe_gpu_axpby(buffer_blob_.count(), Dtype(1), buffer_blob_.gpu_data(), Dtype(-1. / (num_ * height_ * width_)),
bottom_diff);
}
if (this->phase_ == TEST && moving_average_) {
// use moving average variance
caffe_copy(buffer_blob_.count(), buffer_blob_.gpu_data(), bottom_diff);
}
// variance normalization
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_, channels_, 1, Dtype(1),
batch_sum_multiplier_.gpu_data(), x_std_.gpu_data(), Dtype(0),
spatial_statistic_.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_ * channels_, height_ * width_, 1, Dtype(1),
spatial_statistic_.gpu_data(), spatial_sum_multiplier_.gpu_data(), Dtype(0),
buffer_blob_.mutable_gpu_data());
caffe_gpu_div(buffer_blob_.count(), const_bottom_diff, buffer_blob_.gpu_data(), bottom_diff);
}
INSTANTIATE_LAYER_GPU_FUNCS(BNLayer);
} // namespace caffe
| 103d38eba97b74ff71a5d249b19017b1640d0bbd.cu | #include <algorithm>
#include <vector>
#include "caffe/filler.hpp"
#include "caffe/layers/bn_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
void BNLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* const_bottom_data = bottom[0]->gpu_data();
const Dtype* const_top_data = top[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const Dtype* scale_data = this->blobs_[0]->gpu_data();
const Dtype* shift_data = this->blobs_[1]->gpu_data();
// ---------- mean subtraction ---------- //
// statistic across spatial
caffe_gpu_gemv<Dtype>(CblasNoTrans, num_ * channels_, height_ * width_, Dtype(1. / (height_ * width_)), const_bottom_data,
spatial_sum_multiplier_.gpu_data(), Dtype(0), spatial_statistic_.mutable_gpu_data());
// statistic across batch
caffe_gpu_gemv<Dtype>(CblasTrans, num_, channels_, Dtype(1. / num_), spatial_statistic_.gpu_data(),
batch_sum_multiplier_.gpu_data(), Dtype(0), ex_.mutable_gpu_data());
if (this->phase_ == TRAIN) {
// sync statistics
if ( sync_forward_ ){
// first, sync EX
caffe_copy(channels_, ex_.gpu_data(), statistics_all_.mutable_gpu_data());
P2PSync<Dtype>* p2p = this->callbacks()[ 0 ]->callbacks()[ 0 ]->p2p()[ 0 ];
Blob<Dtype> statistics_child(1, channels_, 1, 1);
for ( int i = 0; i < p2p->children().size(); ++i ){
#ifdef _WIN64
Blob<Dtype>* s_c_ogpu = p2p->dataQueue().pop();
#else
Blob<Dtype>* s_c_ogpu = NULL;
while ( !p2p->dataQueue().try_pop(&s_c_ogpu) )
;
#endif
//Blob<Dtype>* s_c_ogpu = p2p->dataQueue().pop();
CUDA_CHECK(cudaMemcpyAsync(statistics_child.mutable_gpu_data(), s_c_ogpu->gpu_data(), channels_*sizeof( Dtype ), cudaMemcpyDeviceToDevice, cudaStreamDefault));
CUDA_CHECK(cudaStreamSynchronize(cudaStreamDefault));
caffe_gpu_add(channels_, statistics_child.gpu_data(), statistics_all_.gpu_data(), statistics_all_.mutable_gpu_data());
}
if ( p2p->parent() ){
p2p->parent()->dataQueue().push(&statistics_all_);
#ifdef _WIN64
Blob<Dtype>* statistics_final = p2p->dataQueue().pop();
#else
Blob<Dtype>* statistics_final = NULL;
while ( !p2p->dataQueue().try_pop(&statistics_final) )
;
#endif
//Blob<Dtype>* statistics_final = p2p->dataQueue().pop();
CUDA_CHECK(cudaMemcpyAsync(ex_.mutable_gpu_data(), statistics_final->gpu_data(), channels_*sizeof( Dtype ), cudaMemcpyDeviceToDevice, cudaStreamDefault));
CUDA_CHECK(cudaStreamSynchronize(cudaStreamDefault));
}
else {
caffe_gpu_scal<Dtype>(channels_, Dtype(1.0 / Caffe::solver_count()), statistics_all_.mutable_gpu_data());
caffe_copy(channels_, statistics_all_.gpu_data(), ex_.mutable_gpu_data());
}
for ( int i = 0; i < p2p->children().size(); ++i ){
p2p->children()[ i ]->dataQueue().push(&ex_);
}
}
// save history mean
caffe_gpu_axpby(ex_.count(), Dtype(1) - decay_, ex_.gpu_data(), decay_,
this->blobs_[2]->mutable_gpu_data());
}
if (this->phase_ == TEST && moving_average_) {
// use moving average mean
caffe_copy(ex_.count(), this->blobs_[ 2 ]->gpu_data(), ex_.mutable_gpu_data());
}
// put mean blob into buffer_blob_
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_, channels_, 1, Dtype(1),
batch_sum_multiplier_.gpu_data(), ex_.gpu_data(), Dtype(0),
spatial_statistic_.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_ * channels_, height_ * width_, 1, Dtype(-1),
spatial_statistic_.gpu_data(), spatial_sum_multiplier_.gpu_data(), Dtype(0),
buffer_blob_.mutable_gpu_data());
// substract mean
caffe_gpu_add(buffer_blob_.count(), const_bottom_data, buffer_blob_.gpu_data(), top_data);
// ---------- variance normalization ---------- //
// add by yu liu
// calculate EX2
caffe_gpu_powx(bottom[ 0 ]->count(), const_bottom_data, Dtype(2), buffer_blob_.mutable_gpu_data());
// statistic across spatial
caffe_gpu_gemv<Dtype>(CblasNoTrans, num_ * channels_, height_ * width_, Dtype(1. / ( height_ * width_ )), buffer_blob_.gpu_data(),
spatial_sum_multiplier_.gpu_data(), Dtype(0), spatial_statistic_.mutable_gpu_data());
// statistic across batch
caffe_gpu_gemv<Dtype>(CblasTrans, num_, channels_, Dtype(1. / num_), spatial_statistic_.gpu_data(),
batch_sum_multiplier_.gpu_data(), Dtype(0), batch_statistic_.mutable_gpu_data());
if ( sync_forward_ ){
// second, sync EX2
caffe_copy(channels_, batch_statistic_.gpu_data(), statistics_all_.mutable_gpu_data());
P2PSync<Dtype>* p2p = this->callbacks()[ 0 ]->callbacks()[ 0 ]->p2p()[ 0 ];
Blob<Dtype> statistics_child(1, channels_, 1, 1);
for ( int i = 0; i < p2p->children().size(); ++i ){
#ifdef _WIN64
Blob<Dtype>* s_c_ogpu = p2p->dataQueue().pop();
#else // Linux support
Blob<Dtype>* s_c_ogpu = NULL;
while ( !p2p->dataQueue().try_pop(&s_c_ogpu) )
;
#endif
//Blob<Dtype>* s_c_ogpu = p2p->dataQueue().pop();
CUDA_CHECK(cudaMemcpyAsync(statistics_child.mutable_gpu_data(), s_c_ogpu->gpu_data(), channels_*sizeof( Dtype ), cudaMemcpyDeviceToDevice, cudaStreamDefault));
CUDA_CHECK(cudaStreamSynchronize(cudaStreamDefault));
caffe_gpu_add(channels_, statistics_child.gpu_data(), statistics_all_.gpu_data(), statistics_all_.mutable_gpu_data());
}
if ( p2p->parent() ){
p2p->parent()->dataQueue().push(&statistics_all_);
#ifdef _WIN64
Blob<Dtype>* statistics_final = p2p->dataQueue().pop();
#else // Linux support
Blob<Dtype>* statistics_final = NULL;
while ( !p2p->dataQueue().try_pop(&statistics_final) )
;
#endif
//Blob<Dtype>* statistics_final = p2p->dataQueue().pop();
CUDA_CHECK(cudaMemcpyAsync(dx_.mutable_gpu_data(), statistics_final->gpu_data(), channels_*sizeof( Dtype ), cudaMemcpyDeviceToDevice, cudaStreamDefault));
CUDA_CHECK(cudaStreamSynchronize(cudaStreamDefault));
}
else {
caffe_gpu_scal<Dtype>(channels_, Dtype(1.0 / Caffe::solver_count()), statistics_all_.mutable_gpu_data());
Blob<Dtype> e2x_(1, channels_, 1, 1);
caffe_gpu_powx(ex_.count(), ex_.gpu_data(), Dtype(2), e2x_.mutable_gpu_data());
caffe_gpu_sub<Dtype>(ex_.count(), statistics_all_.gpu_data(), e2x_.gpu_data(), dx_.mutable_gpu_data());
}
for ( int i = 0; i < p2p->children().size(); ++i ){
p2p->children()[ i ]->dataQueue().push(&dx_);
}
}
else{
Blob<Dtype> e2x_(1, channels_, 1, 1);
caffe_gpu_powx(ex_.count(), ex_.gpu_data(), Dtype(2), e2x_.mutable_gpu_data());
caffe_gpu_sub<Dtype>(ex_.count(), batch_statistic_.gpu_data(), e2x_.gpu_data(), dx_.mutable_gpu_data());
}
// original dx
//// put the squares of X - mean into buffer_blob_
//caffe_gpu_powx(buffer_blob_.count(), const_top_data, Dtype(2), buffer_blob_.mutable_gpu_data());
//// statistic across spatial
//caffe_gpu_gemv<Dtype>(CblasNoTrans, num_ * channels_, height_ * width_, Dtype(1. / (height_ * width_)), buffer_blob_.gpu_data(),
// spatial_sum_multiplier_.gpu_data(), Dtype(0), spatial_statistic_.mutable_gpu_data());
//// statistic across batch
//caffe_gpu_gemv<Dtype>(CblasTrans, num_, channels_, Dtype(1. / num_), spatial_statistic_.gpu_data(),
// batch_sum_multiplier_.gpu_data(), Dtype(0), batch_statistic_.mutable_gpu_data());
if (this->phase_ == TRAIN) {
// save history variance
caffe_gpu_axpby(dx_.count(), Dtype(1) - decay_, dx_.gpu_data(), decay_,
this->blobs_[3]->mutable_gpu_data());
}
if (this->phase_ == TEST && moving_average_) {
// use moving average variance
caffe_copy(dx_.count(), this->blobs_[ 3 ]->gpu_data(), dx_.mutable_gpu_data());
}
// add eps
caffe_gpu_add_scalar(dx_.count(), var_eps_, dx_.mutable_gpu_data());
// std
caffe_gpu_powx(dx_.count(), dx_.gpu_data(), Dtype(0.5),
batch_statistic_.mutable_gpu_data());
// put std blob into buffer_blob_
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_, channels_, 1, Dtype(1),
batch_sum_multiplier_.gpu_data(), batch_statistic_.gpu_data(), Dtype(0),
spatial_statistic_.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_ * channels_, height_ * width_, 1, Dtype(1),
spatial_statistic_.gpu_data(), spatial_sum_multiplier_.gpu_data(), Dtype(0),
buffer_blob_.mutable_gpu_data());
// variance normalization
caffe_gpu_div(buffer_blob_.count(), const_top_data, buffer_blob_.gpu_data(), top_data);
// ---------- save x_norm and x_std ---------- //
caffe_copy(buffer_blob_.count(), const_top_data, x_norm_.mutable_gpu_data());
caffe_copy(batch_statistic_.count(), batch_statistic_.gpu_data(), x_std_.mutable_gpu_data());
// ---------- scale ---------- //
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_, channels_, 1, Dtype(1),
batch_sum_multiplier_.gpu_data(), scale_data, Dtype(0),
spatial_statistic_.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_ * channels_, height_ * width_, 1, Dtype(1),
spatial_statistic_.gpu_data(), spatial_sum_multiplier_.gpu_data(), Dtype(0),
buffer_blob_.mutable_gpu_data());
caffe_gpu_mul(buffer_blob_.count(), const_top_data, buffer_blob_.gpu_data(), top_data);
// ---------- shift ---------- //
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_, channels_, 1, Dtype(1),
batch_sum_multiplier_.gpu_data(), shift_data, Dtype(0),
spatial_statistic_.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_ * channels_, height_ * width_, 1, Dtype(1),
spatial_statistic_.gpu_data(), spatial_sum_multiplier_.gpu_data(), Dtype(0),
buffer_blob_.mutable_gpu_data());
caffe_gpu_add(buffer_blob_.count(), const_top_data, buffer_blob_.gpu_data(), top_data);
}
template <typename Dtype>
void BNLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* const_bottom_diff = bottom[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* const_top_diff = top[0]->gpu_diff();
Dtype* scale_diff = this->blobs_[0]->mutable_gpu_diff();
Dtype* shift_diff = this->blobs_[1]->mutable_gpu_diff();
const Dtype* scale_data = this->blobs_[0]->gpu_data();
// ---------- gradient w.r.t. scale ---------- //
caffe_gpu_mul(buffer_blob_.count(), x_norm_.gpu_data(), const_top_diff, buffer_blob_.mutable_gpu_data());
// statistic across spatial
caffe_gpu_gemv<Dtype>(CblasNoTrans, num_ * channels_, height_ * width_, Dtype(1), buffer_blob_.gpu_data(),
spatial_sum_multiplier_.gpu_data(), Dtype(0), spatial_statistic_.mutable_gpu_data());
// statistic across batch
caffe_gpu_gemv<Dtype>(CblasTrans, num_, channels_, Dtype(1), spatial_statistic_.gpu_data(),
batch_sum_multiplier_.gpu_data(), Dtype(0), scale_diff);
// ---------- gradient w.r.t. shift ---------- //
// statistic across spatial
caffe_gpu_gemv<Dtype>(CblasNoTrans, num_ * channels_, height_ * width_, Dtype(1), const_top_diff,
spatial_sum_multiplier_.gpu_data(), Dtype(0), spatial_statistic_.mutable_gpu_data());
// statistic across batch
caffe_gpu_gemv<Dtype>(CblasTrans, num_, channels_, Dtype(1), spatial_statistic_.gpu_data(),
batch_sum_multiplier_.gpu_data(), Dtype(0), shift_diff);
// ---------- gradient w.r.t. to bottom blob ---------- //
// put scale * top_diff to buffer_blob_
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_, channels_, 1, Dtype(1),
batch_sum_multiplier_.gpu_data(), scale_data, Dtype(0),
spatial_statistic_.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_ * channels_, height_ * width_, 1, Dtype(1),
spatial_statistic_.gpu_data(), spatial_sum_multiplier_.gpu_data(), Dtype(0),
buffer_blob_.mutable_gpu_data());
caffe_gpu_mul(buffer_blob_.count(), const_top_diff, buffer_blob_.gpu_data(), buffer_blob_.mutable_gpu_data());
if (this->phase_ == TRAIN) {
// use new top diff for computation
caffe_gpu_mul(buffer_blob_.count(), x_norm_.gpu_data(), buffer_blob_.gpu_data(), bottom_diff);
// statistic across spatial
caffe_gpu_gemv<Dtype>(CblasNoTrans, num_ * channels_, height_ * width_, Dtype(1), const_bottom_diff,
spatial_sum_multiplier_.gpu_data(), Dtype(0), spatial_statistic_.mutable_gpu_data());
// statistic across batch
caffe_gpu_gemv<Dtype>(CblasTrans, num_, channels_, Dtype(1), spatial_statistic_.gpu_data(),
batch_sum_multiplier_.gpu_data(), Dtype(0), batch_statistic_.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_, channels_, 1, Dtype(1),
batch_sum_multiplier_.gpu_data(), batch_statistic_.gpu_data(), Dtype(0),
spatial_statistic_.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_ * channels_, height_ * width_, 1, Dtype(1),
spatial_statistic_.gpu_data(), spatial_sum_multiplier_.gpu_data(), Dtype(0),
bottom_diff);
caffe_gpu_mul(buffer_blob_.count(), x_norm_.gpu_data(), const_bottom_diff, bottom_diff);
// statistic across spatial
caffe_gpu_gemv<Dtype>(CblasNoTrans, num_ * channels_, height_ * width_, Dtype(1), buffer_blob_.gpu_data(),
spatial_sum_multiplier_.gpu_data(), Dtype(0), spatial_statistic_.mutable_gpu_data());
// statistic across batch
caffe_gpu_gemv<Dtype>(CblasTrans, num_, channels_, Dtype(1), spatial_statistic_.gpu_data(),
batch_sum_multiplier_.gpu_data(), Dtype(0), batch_statistic_.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_, channels_, 1, Dtype(1),
batch_sum_multiplier_.gpu_data(), batch_statistic_.gpu_data(), Dtype(0),
spatial_statistic_.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_ * channels_, height_ * width_, 1, Dtype(1),
spatial_statistic_.gpu_data(), spatial_sum_multiplier_.gpu_data(), Dtype(1),
bottom_diff);
caffe_gpu_axpby(buffer_blob_.count(), Dtype(1), buffer_blob_.gpu_data(), Dtype(-1. / (num_ * height_ * width_)),
bottom_diff);
}
if (this->phase_ == TEST && moving_average_) {
// use moving average variance
caffe_copy(buffer_blob_.count(), buffer_blob_.gpu_data(), bottom_diff);
}
// variance normalization
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_, channels_, 1, Dtype(1),
batch_sum_multiplier_.gpu_data(), x_std_.gpu_data(), Dtype(0),
spatial_statistic_.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_ * channels_, height_ * width_, 1, Dtype(1),
spatial_statistic_.gpu_data(), spatial_sum_multiplier_.gpu_data(), Dtype(0),
buffer_blob_.mutable_gpu_data());
caffe_gpu_div(buffer_blob_.count(), const_bottom_diff, buffer_blob_.gpu_data(), bottom_diff);
}
INSTANTIATE_LAYER_GPU_FUNCS(BNLayer);
} // namespace caffe
|
ee1716acaebe47f0833ea93896d64af4543b54b9.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "efficient.h"
/*! Block size used for CUDA kernel launch*/
#define blockSize 1024
namespace StreamCompaction {
namespace Efficient {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
int nextPowerOf2(int n) {
int p = 1;
if (n && !(n & (n - 1))) {
return n;
}
while (p < n) {
p <<= 1;
}
return p;
}
__global__ void kernUpsweep(int n, int d, int *odata, int incr, int twod) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
//also return if index is not a multiple of the incr
if (index >= n || (index) % incr != 0) {
return;
}
//if we reached here, index+1 must be a multiple of incr (2^(d+1))
odata[index + incr - 1] += odata[index + twod - 1];
odata[n - 1] = 0;
}
__global__ void kernDownsweep(int n, int d, int *odata, int incr, int twod) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
//also return if index is not a multiple of the incr
if (index >= n || (index) % incr != 0) {
return;
}
//if we reached here, index+1 must be a multiple of incr (2^(d+1))
int t = odata[index + twod - 1];
odata[index + twod - 1] = odata[index + incr - 1];
odata[index + incr - 1] += t;
}
__global__ void kernMapToBoolean(int n, int *mask, int *idata) {
//dev_odata contains idata
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= n) {
return;
}
if (idata[index] != 0) {
mask[index] = 1;
}
else {
mask[index] = 0;
}
}
__global__ void kernScatter(int n, int *mask, int *odata, int *odata2, int *idata) {
//odata now contains scan result
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= n){
return;
}
int shouldInclude = mask[index];
if (shouldInclude) {
int newIdx = odata2[index];
odata[newIdx] = idata[index];
}
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
int malloc_size = nextPowerOf2(n);
//CUDA Malloc buffers
int *dev_odata;
hipMalloc((void**)&dev_odata, malloc_size * sizeof(int));
checkCUDAError("hipMalloc dev_odata failed!");
dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize);
int max_level = ilog2ceil(n);
int incr = 0;
int twod = 0;
//Copy idata into dev_odata
hipMemcpy(dev_odata, idata, n * sizeof(int), hipMemcpyHostToDevice);
checkCUDAError("hipMemcpy dev_odata failed!");
timer().startGpuTimer();
//Upsweep
for (int d = 0; d < max_level; d++) {
incr = pow(2, d + 1);
twod = pow(2, d);
hipLaunchKernelGGL(( kernUpsweep), dim3(fullBlocksPerGrid), dim3(blockSize) , 0, 0, malloc_size, d, dev_odata, incr, twod);
}
//Downsweep
for (int d = max_level-1; d >= 0; d--) {
incr = pow(2, d + 1);
twod = pow(2, d);
hipLaunchKernelGGL(( kernDownsweep), dim3(fullBlocksPerGrid), dim3(blockSize) , 0, 0, malloc_size, d, dev_odata, incr, twod);
}
timer().endGpuTimer();
hipMemcpy(odata, dev_odata, n * sizeof(int), hipMemcpyDeviceToHost);
//Free Memory
hipFree(dev_odata);
}
void scan_notimer(int n, int malloc_size, int *dev_odata) {
//Odata contains mask info
dim3 fullBlocksPerGrid((malloc_size + blockSize - 1) / blockSize);
int max_level = ilog2ceil(n);
int incr = 0;
int twod = 0;
//Upsweep
for (int d = 0; d < max_level; d++) {
incr = pow(2, d + 1);
twod = pow(2, d);
hipLaunchKernelGGL(( kernUpsweep), dim3(fullBlocksPerGrid), dim3(blockSize) , 0, 0, malloc_size, d, dev_odata, incr, twod);
}
//Downsweep
for (int d = max_level-1; d >= 0; d--) {
incr = pow(2, d + 1);
twod = pow(2, d);
hipLaunchKernelGGL(( kernDownsweep), dim3(fullBlocksPerGrid), dim3(blockSize) , 0, 0, malloc_size, d, dev_odata, incr, twod);
}
}
void printArray(int n, int *a, bool abridged = false) {
printf(" [ ");
for (int i = 0; i < n; i++) {
if (abridged && i + 2 == 15 && n > 16) {
i = n - 2;
printf("... ");
}
printf("%3d ", a[i]);
}
printf("]\n");
}
/**
* Performs stream compaction on idata, storing the result into odata.
* All zeroes are discarded.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to compact.
* @returns The number of elements remaining after compaction.
*/
int compact(int n, int *odata, const int *idata) {
int malloc_size = nextPowerOf2(n);
//CUDA Malloc buffers
int *dev_odata;
int *dev_odata2;
int *dev_idata;
int *dev_mask;
hipMalloc((void**)&dev_odata, (malloc_size+1) * sizeof(int));
checkCUDAError("hipMalloc dev_odata failed!");
hipMalloc((void**)&dev_odata2, (malloc_size+1) * sizeof(int));
checkCUDAError("hipMalloc dev_odata failed!");
hipMalloc((void**)&dev_idata, malloc_size * sizeof(int));
checkCUDAError("hipMalloc dev_in failed!");
hipMalloc((void**)&dev_mask, malloc_size * sizeof(int));
checkCUDAError("hipMalloc dev_temp failed!");
//Memcpy idata into dev_odata for starters
hipMemcpy(dev_idata, idata, n * sizeof(int), hipMemcpyHostToDevice);
checkCUDAError("hipMemcpy dev_idata failed!");
hipMemcpy(dev_odata, dev_idata, n * sizeof(int), hipMemcpyDeviceToDevice);
checkCUDAError("hipMemcpy dev_odata failed!");
dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize);
timer().startGpuTimer();
//1: Compute mask (Temporary Array)
hipLaunchKernelGGL(( kernMapToBoolean), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, n, dev_odata, dev_idata);
//2: Exclusive Scan on TempArray
hipMemcpy(dev_mask, dev_odata, n * sizeof(int), hipMemcpyDeviceToDevice);
checkCUDAError("hipMemcpy dev_odata failed!");
scan_notimer(n, malloc_size, dev_odata);
//2.5: Get Count from dev_mask
int tempcount[1];
hipMemcpy(&tempcount, dev_odata + n - 1, 1 * sizeof(int), hipMemcpyDeviceToHost);
int count = idata[n - 1] == 0 ? tempcount[0] : tempcount[0] + 1;
//3: Scatter (dev_odata now contains scan info)
hipMemcpy(dev_odata2, dev_odata, n * sizeof(int), hipMemcpyDeviceToDevice);
checkCUDAError("hipMemcpy dev_odata failed!");
hipLaunchKernelGGL(( kernScatter), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, n, dev_mask, dev_odata, dev_odata2, dev_idata);
timer().endGpuTimer();
hipMemcpy(odata, dev_odata, (count) * sizeof(int), hipMemcpyDeviceToHost);
hipFree(dev_mask);
hipFree(dev_odata);
hipFree(dev_odata2);
hipFree(dev_idata);
return count;
}
}
}
| ee1716acaebe47f0833ea93896d64af4543b54b9.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "efficient.h"
/*! Block size used for CUDA kernel launch*/
#define blockSize 1024
namespace StreamCompaction {
namespace Efficient {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
int nextPowerOf2(int n) {
int p = 1;
if (n && !(n & (n - 1))) {
return n;
}
while (p < n) {
p <<= 1;
}
return p;
}
__global__ void kernUpsweep(int n, int d, int *odata, int incr, int twod) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
//also return if index is not a multiple of the incr
if (index >= n || (index) % incr != 0) {
return;
}
//if we reached here, index+1 must be a multiple of incr (2^(d+1))
odata[index + incr - 1] += odata[index + twod - 1];
odata[n - 1] = 0;
}
__global__ void kernDownsweep(int n, int d, int *odata, int incr, int twod) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
//also return if index is not a multiple of the incr
if (index >= n || (index) % incr != 0) {
return;
}
//if we reached here, index+1 must be a multiple of incr (2^(d+1))
int t = odata[index + twod - 1];
odata[index + twod - 1] = odata[index + incr - 1];
odata[index + incr - 1] += t;
}
__global__ void kernMapToBoolean(int n, int *mask, int *idata) {
//dev_odata contains idata
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= n) {
return;
}
if (idata[index] != 0) {
mask[index] = 1;
}
else {
mask[index] = 0;
}
}
__global__ void kernScatter(int n, int *mask, int *odata, int *odata2, int *idata) {
//odata now contains scan result
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= n){
return;
}
int shouldInclude = mask[index];
if (shouldInclude) {
int newIdx = odata2[index];
odata[newIdx] = idata[index];
}
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
int malloc_size = nextPowerOf2(n);
//CUDA Malloc buffers
int *dev_odata;
cudaMalloc((void**)&dev_odata, malloc_size * sizeof(int));
checkCUDAError("cudaMalloc dev_odata failed!");
dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize);
int max_level = ilog2ceil(n);
int incr = 0;
int twod = 0;
//Copy idata into dev_odata
cudaMemcpy(dev_odata, idata, n * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAError("cudaMemcpy dev_odata failed!");
timer().startGpuTimer();
//Upsweep
for (int d = 0; d < max_level; d++) {
incr = pow(2, d + 1);
twod = pow(2, d);
kernUpsweep<<<fullBlocksPerGrid, blockSize >>>(malloc_size, d, dev_odata, incr, twod);
}
//Downsweep
for (int d = max_level-1; d >= 0; d--) {
incr = pow(2, d + 1);
twod = pow(2, d);
kernDownsweep<<<fullBlocksPerGrid, blockSize >>>(malloc_size, d, dev_odata, incr, twod);
}
timer().endGpuTimer();
cudaMemcpy(odata, dev_odata, n * sizeof(int), cudaMemcpyDeviceToHost);
//Free Memory
cudaFree(dev_odata);
}
void scan_notimer(int n, int malloc_size, int *dev_odata) {
//Odata contains mask info
dim3 fullBlocksPerGrid((malloc_size + blockSize - 1) / blockSize);
int max_level = ilog2ceil(n);
int incr = 0;
int twod = 0;
//Upsweep
for (int d = 0; d < max_level; d++) {
incr = pow(2, d + 1);
twod = pow(2, d);
kernUpsweep<<<fullBlocksPerGrid, blockSize >>>(malloc_size, d, dev_odata, incr, twod);
}
//Downsweep
for (int d = max_level-1; d >= 0; d--) {
incr = pow(2, d + 1);
twod = pow(2, d);
kernDownsweep<<<fullBlocksPerGrid, blockSize >>>(malloc_size, d, dev_odata, incr, twod);
}
}
void printArray(int n, int *a, bool abridged = false) {
printf(" [ ");
for (int i = 0; i < n; i++) {
if (abridged && i + 2 == 15 && n > 16) {
i = n - 2;
printf("... ");
}
printf("%3d ", a[i]);
}
printf("]\n");
}
/**
* Performs stream compaction on idata, storing the result into odata.
* All zeroes are discarded.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to compact.
* @returns The number of elements remaining after compaction.
*/
int compact(int n, int *odata, const int *idata) {
int malloc_size = nextPowerOf2(n);
//CUDA Malloc buffers
int *dev_odata;
int *dev_odata2;
int *dev_idata;
int *dev_mask;
cudaMalloc((void**)&dev_odata, (malloc_size+1) * sizeof(int));
checkCUDAError("cudaMalloc dev_odata failed!");
cudaMalloc((void**)&dev_odata2, (malloc_size+1) * sizeof(int));
checkCUDAError("cudaMalloc dev_odata failed!");
cudaMalloc((void**)&dev_idata, malloc_size * sizeof(int));
checkCUDAError("cudaMalloc dev_in failed!");
cudaMalloc((void**)&dev_mask, malloc_size * sizeof(int));
checkCUDAError("cudaMalloc dev_temp failed!");
//Memcpy idata into dev_odata for starters
cudaMemcpy(dev_idata, idata, n * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAError("cudaMemcpy dev_idata failed!");
cudaMemcpy(dev_odata, dev_idata, n * sizeof(int), cudaMemcpyDeviceToDevice);
checkCUDAError("cudaMemcpy dev_odata failed!");
dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize);
timer().startGpuTimer();
//1: Compute mask (Temporary Array)
kernMapToBoolean<<<fullBlocksPerGrid, blockSize>>>(n, dev_odata, dev_idata);
//2: Exclusive Scan on TempArray
cudaMemcpy(dev_mask, dev_odata, n * sizeof(int), cudaMemcpyDeviceToDevice);
checkCUDAError("cudaMemcpy dev_odata failed!");
scan_notimer(n, malloc_size, dev_odata);
//2.5: Get Count from dev_mask
int tempcount[1];
cudaMemcpy(&tempcount, dev_odata + n - 1, 1 * sizeof(int), cudaMemcpyDeviceToHost);
int count = idata[n - 1] == 0 ? tempcount[0] : tempcount[0] + 1;
//3: Scatter (dev_odata now contains scan info)
cudaMemcpy(dev_odata2, dev_odata, n * sizeof(int), cudaMemcpyDeviceToDevice);
checkCUDAError("cudaMemcpy dev_odata failed!");
kernScatter<<<fullBlocksPerGrid, blockSize>>>(n, dev_mask, dev_odata, dev_odata2, dev_idata);
timer().endGpuTimer();
cudaMemcpy(odata, dev_odata, (count) * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_mask);
cudaFree(dev_odata);
cudaFree(dev_odata2);
cudaFree(dev_idata);
return count;
}
}
}
|
c3463885d5a2374606c652ea4faaae8de8269814.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <torch/types.h>
#include <torch/extension.h>
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <stdio.h>
#include <math.h>
#include <float.h>
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
template <typename T>
__global__ void PSROIPoolForward(
const T* bottom_data,
const T spatial_scale,
const int num_rois,
const int height,
const int width,
const int channels,
const int pooled_height,
const int pooled_width,
const T* bottom_rois,
const int group_size,
const int output_dim,
T* top_data,
int* mapping_channel,
hipStream_t stream)
{
const long output_size = output_dim * pooled_height * pooled_width * num_rois;
const long nthreads = output_size;
CUDA_1D_KERNEL_LOOP(index, nthreads)
{
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
T roi_start_w = bottom_rois[1] * spatial_scale;
T roi_start_h = bottom_rois[2] * spatial_scale;
T roi_end_w = (bottom_rois[3] + 1) * spatial_scale;
T roi_end_h = (bottom_rois[4] + 1) * spatial_scale;
T roi_width = roi_end_w - roi_start_w;
T roi_height = roi_end_h - roi_start_h;
// skip invalid rois
if(roi_width <= 0 || roi_height <= 0)
{
continue;
}
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
int hstart = floor(static_cast<T>(ph) * bin_size_h + roi_start_h);
int wstart = floor(static_cast<T>(pw) * bin_size_w + roi_start_w);
int hend = ceil(static_cast<T>(ph + 1) * bin_size_h + roi_start_h);
int wend = ceil(static_cast<T>(pw + 1) * bin_size_w + roi_start_w);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0), width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
int gw = pw;
int gh = ph;
int c = (ctop * group_size + gh) * group_size + gw;
bottom_data += (roi_batch_ind * channels + c) * height * width;
T out_sum = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
out_sum += bottom_data[bottom_index];
}
}
float bin_area = (hend - hstart) * (wend - wstart);
//top_data[index] = nthreads;
top_data[index] = is_empty ? 0. : out_sum / bin_area;
mapping_channel[index] = c;
}
}
template <typename T>
__global__ void PSROIPoolBackward(const T* top_diff,
const int* mapping_channel,
const int batch_size,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_width,
const int pooled_height,
const int output_dim,
T* bottom_diff,
const T* bottom_rois,
hipStream_t stream)
{
const long output_size = output_dim * pooled_height * pooled_width * num_rois;
const long nthreads = output_size;
CUDA_1D_KERNEL_LOOP(index, nthreads)
{
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
T roi_start_w = bottom_rois[1] * spatial_scale;
T roi_start_h = bottom_rois[2] * spatial_scale;
T roi_end_w = (bottom_rois[3] + 1) * spatial_scale;
T roi_end_h = (bottom_rois[4] + 1) * spatial_scale;
T roi_width = roi_end_w - roi_start_w;
T roi_height = roi_end_h - roi_start_h;
// skip invalid rois
if(roi_width <= 0 || roi_height <= 0)
{
continue;
}
// Compute w and h at bottom
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
int hstart = floor(static_cast<T>(ph) * bin_size_h + roi_start_h);
int wstart = floor(static_cast<T>(pw) * bin_size_w + roi_start_w);
int hend = ceil(static_cast<T>(ph + 1) * bin_size_h + roi_start_h);
int wend = ceil(static_cast<T>(pw + 1) * bin_size_w + roi_start_w);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0), width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Compute c at bottom
int c = mapping_channel[index];
T* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width;
float bin_area = (hend - hstart) * (wend - wstart);
T diff_val = is_empty ? 0. : top_diff[index] / bin_area;
for (int h = hstart; h < hend; ++h)
{
for (int w = wstart; w < wend; ++w)
{
int bottom_index = h * width + w;
//caffe_gpu_atomic_add(diff_val, offset_bottom_diff + bottom_index);
atomicAdd(offset_bottom_diff + bottom_index, diff_val);
}
}
}
}
namespace cvpods{
at::Tensor psroi_pooling_forward_cuda(
at::Tensor& features,
at::Tensor& rois,
at::Tensor& mapping_channel,
const int pooled_height,
const int pooled_width,
const float spatial_scale,
const int group_size,
const int output_dim)
{
int* mapping_channel_out = mapping_channel.contiguous().data_ptr<int>();
//Get # of Rois
int num_rois = rois.size(0);
int size_rois = rois.size(1);
AT_ASSERTM(size_rois == 5, "rois channels must be 5");
at::Tensor output = at::zeros({num_rois, output_dim, pooled_height, pooled_width}, features.options());
int data_height = features.size(2);
int data_width = features.size(3);
int num_channels = features.size(1);
const int kThreadsPerBlock = 1024;
const long output_size = (long)num_rois * pooled_height * pooled_width * num_channels;
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES(features.scalar_type(), "PSROIPoolForward", [&] {
scalar_t* data_in = features.contiguous().data_ptr<scalar_t>();
scalar_t* rois_in = rois.contiguous().data_ptr<scalar_t>();
scalar_t* output_out = output.contiguous().data_ptr<scalar_t>();
// call the gpu kernel for psroi_pooling
hipLaunchKernelGGL(( PSROIPoolForward<scalar_t>), dim3((output_size + kThreadsPerBlock - 1) / kThreadsPerBlock), dim3(kThreadsPerBlock), 0, stream,
data_in, spatial_scale, num_rois,
data_height, data_width, num_channels,
pooled_height, pooled_width, rois_in,
group_size, output_dim,
output_out, mapping_channel_out, stream
);
});
hipError_t err = hipGetLastError();
if(hipSuccess != err)
{
printf("error in psroi_pooling_forward_cuda: %s\n", hipGetErrorString(err));
exit(-1);
}
return output;
}
at::Tensor psroi_pooling_backward_cuda(
at::Tensor& top_grad,
at::Tensor& rois,
at::Tensor& mapping_channel,
const int batch_size,
const int bottom_dim,
const int bottom_height,
const int bottom_width,
const float spatial_scale)
{
int output_dim = top_grad.size(1);
int pooled_height = top_grad.size(2);
int pooled_width = top_grad.size(3);
at::Tensor bottom_grad = at::zeros({batch_size, bottom_dim, bottom_height, bottom_width}, top_grad.options());
// Number of ROIs
int num_rois = rois.size(0);
int size_rois = rois.size(1);
AT_ASSERTM(size_rois == 5, "rois channels must be 5");
int* mapping_channel_flat = mapping_channel.contiguous().data_ptr<int>();
const int kThreadsPerBlock = 1024;
const long output_size = (long)output_dim * pooled_height * pooled_width * num_rois;
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES(top_grad.scalar_type(), "PSROIPoolBackward", [&] {
scalar_t* top_grad_flat = top_grad.contiguous().data_ptr<scalar_t>();
scalar_t* rois_flat = rois.contiguous().data_ptr<scalar_t>();
scalar_t* bottom_grad_flat = bottom_grad.contiguous().data_ptr<scalar_t>();
// call the gpu kernel for psroi_pooling
hipLaunchKernelGGL(( PSROIPoolBackward<scalar_t>), dim3((output_size + kThreadsPerBlock - 1) / kThreadsPerBlock), dim3(kThreadsPerBlock), 0, stream,
top_grad_flat, mapping_channel_flat,
batch_size, num_rois, spatial_scale, bottom_dim,
bottom_height, bottom_width, pooled_width,
pooled_height, output_dim,
bottom_grad_flat, rois_flat, stream);
});
hipError_t err = hipGetLastError();
if(hipSuccess != err)
{
printf("error in psroi_pooling_backward_cuda: %s\n", hipGetErrorString(err));
exit(-1);
}
return bottom_grad;
}
}
| c3463885d5a2374606c652ea4faaae8de8269814.cu | #include <torch/types.h>
#include <torch/extension.h>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <stdio.h>
#include <math.h>
#include <float.h>
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
template <typename T>
__global__ void PSROIPoolForward(
const T* bottom_data,
const T spatial_scale,
const int num_rois,
const int height,
const int width,
const int channels,
const int pooled_height,
const int pooled_width,
const T* bottom_rois,
const int group_size,
const int output_dim,
T* top_data,
int* mapping_channel,
cudaStream_t stream)
{
const long output_size = output_dim * pooled_height * pooled_width * num_rois;
const long nthreads = output_size;
CUDA_1D_KERNEL_LOOP(index, nthreads)
{
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
T roi_start_w = bottom_rois[1] * spatial_scale;
T roi_start_h = bottom_rois[2] * spatial_scale;
T roi_end_w = (bottom_rois[3] + 1) * spatial_scale;
T roi_end_h = (bottom_rois[4] + 1) * spatial_scale;
T roi_width = roi_end_w - roi_start_w;
T roi_height = roi_end_h - roi_start_h;
// skip invalid rois
if(roi_width <= 0 || roi_height <= 0)
{
continue;
}
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
int hstart = floor(static_cast<T>(ph) * bin_size_h + roi_start_h);
int wstart = floor(static_cast<T>(pw) * bin_size_w + roi_start_w);
int hend = ceil(static_cast<T>(ph + 1) * bin_size_h + roi_start_h);
int wend = ceil(static_cast<T>(pw + 1) * bin_size_w + roi_start_w);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0), width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
int gw = pw;
int gh = ph;
int c = (ctop * group_size + gh) * group_size + gw;
bottom_data += (roi_batch_ind * channels + c) * height * width;
T out_sum = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
out_sum += bottom_data[bottom_index];
}
}
float bin_area = (hend - hstart) * (wend - wstart);
//top_data[index] = nthreads;
top_data[index] = is_empty ? 0. : out_sum / bin_area;
mapping_channel[index] = c;
}
}
template <typename T>
__global__ void PSROIPoolBackward(const T* top_diff,
const int* mapping_channel,
const int batch_size,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_width,
const int pooled_height,
const int output_dim,
T* bottom_diff,
const T* bottom_rois,
cudaStream_t stream)
{
const long output_size = output_dim * pooled_height * pooled_width * num_rois;
const long nthreads = output_size;
CUDA_1D_KERNEL_LOOP(index, nthreads)
{
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
T roi_start_w = bottom_rois[1] * spatial_scale;
T roi_start_h = bottom_rois[2] * spatial_scale;
T roi_end_w = (bottom_rois[3] + 1) * spatial_scale;
T roi_end_h = (bottom_rois[4] + 1) * spatial_scale;
T roi_width = roi_end_w - roi_start_w;
T roi_height = roi_end_h - roi_start_h;
// skip invalid rois
if(roi_width <= 0 || roi_height <= 0)
{
continue;
}
// Compute w and h at bottom
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
int hstart = floor(static_cast<T>(ph) * bin_size_h + roi_start_h);
int wstart = floor(static_cast<T>(pw) * bin_size_w + roi_start_w);
int hend = ceil(static_cast<T>(ph + 1) * bin_size_h + roi_start_h);
int wend = ceil(static_cast<T>(pw + 1) * bin_size_w + roi_start_w);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0), width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Compute c at bottom
int c = mapping_channel[index];
T* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width;
float bin_area = (hend - hstart) * (wend - wstart);
T diff_val = is_empty ? 0. : top_diff[index] / bin_area;
for (int h = hstart; h < hend; ++h)
{
for (int w = wstart; w < wend; ++w)
{
int bottom_index = h * width + w;
//caffe_gpu_atomic_add(diff_val, offset_bottom_diff + bottom_index);
atomicAdd(offset_bottom_diff + bottom_index, diff_val);
}
}
}
}
namespace cvpods{
at::Tensor psroi_pooling_forward_cuda(
at::Tensor& features,
at::Tensor& rois,
at::Tensor& mapping_channel,
const int pooled_height,
const int pooled_width,
const float spatial_scale,
const int group_size,
const int output_dim)
{
int* mapping_channel_out = mapping_channel.contiguous().data_ptr<int>();
//Get # of Rois
int num_rois = rois.size(0);
int size_rois = rois.size(1);
AT_ASSERTM(size_rois == 5, "rois channels must be 5");
at::Tensor output = at::zeros({num_rois, output_dim, pooled_height, pooled_width}, features.options());
int data_height = features.size(2);
int data_width = features.size(3);
int num_channels = features.size(1);
const int kThreadsPerBlock = 1024;
const long output_size = (long)num_rois * pooled_height * pooled_width * num_channels;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES(features.scalar_type(), "PSROIPoolForward", [&] {
scalar_t* data_in = features.contiguous().data_ptr<scalar_t>();
scalar_t* rois_in = rois.contiguous().data_ptr<scalar_t>();
scalar_t* output_out = output.contiguous().data_ptr<scalar_t>();
// call the gpu kernel for psroi_pooling
PSROIPoolForward<scalar_t><<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>(
data_in, spatial_scale, num_rois,
data_height, data_width, num_channels,
pooled_height, pooled_width, rois_in,
group_size, output_dim,
output_out, mapping_channel_out, stream
);
});
cudaError_t err = cudaGetLastError();
if(cudaSuccess != err)
{
printf("error in psroi_pooling_forward_cuda: %s\n", cudaGetErrorString(err));
exit(-1);
}
return output;
}
at::Tensor psroi_pooling_backward_cuda(
at::Tensor& top_grad,
at::Tensor& rois,
at::Tensor& mapping_channel,
const int batch_size,
const int bottom_dim,
const int bottom_height,
const int bottom_width,
const float spatial_scale)
{
int output_dim = top_grad.size(1);
int pooled_height = top_grad.size(2);
int pooled_width = top_grad.size(3);
at::Tensor bottom_grad = at::zeros({batch_size, bottom_dim, bottom_height, bottom_width}, top_grad.options());
// Number of ROIs
int num_rois = rois.size(0);
int size_rois = rois.size(1);
AT_ASSERTM(size_rois == 5, "rois channels must be 5");
int* mapping_channel_flat = mapping_channel.contiguous().data_ptr<int>();
const int kThreadsPerBlock = 1024;
const long output_size = (long)output_dim * pooled_height * pooled_width * num_rois;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES(top_grad.scalar_type(), "PSROIPoolBackward", [&] {
scalar_t* top_grad_flat = top_grad.contiguous().data_ptr<scalar_t>();
scalar_t* rois_flat = rois.contiguous().data_ptr<scalar_t>();
scalar_t* bottom_grad_flat = bottom_grad.contiguous().data_ptr<scalar_t>();
// call the gpu kernel for psroi_pooling
PSROIPoolBackward<scalar_t><<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>(
top_grad_flat, mapping_channel_flat,
batch_size, num_rois, spatial_scale, bottom_dim,
bottom_height, bottom_width, pooled_width,
pooled_height, output_dim,
bottom_grad_flat, rois_flat, stream);
});
cudaError_t err = cudaGetLastError();
if(cudaSuccess != err)
{
printf("error in psroi_pooling_backward_cuda: %s\n", cudaGetErrorString(err));
exit(-1);
}
return bottom_grad;
}
}
|
4e0444dc59e6c2fab9e9e94e88f2349cbb524df2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void
Kernel( Node* g_graph_nodes, int* g_graph_edges, bool* g_graph_mask, bool* g_graph_visited, int* g_cost, bool *g_over, int no_of_nodes)
{
int tid = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
if( tid<no_of_nodes && g_graph_mask[tid])
{
g_graph_mask[tid]=false;
g_graph_visited[tid]=true;
for(int i=g_graph_nodes[tid].starting; i<(g_graph_nodes[tid].no_of_edges + g_graph_nodes[tid].starting); i++)
{
int id = g_graph_edges[i];
if(!g_graph_visited[id])
{
g_cost[id]=g_cost[tid]+1;
g_graph_mask[id]=true;
//Change the loop stop value such that loop continues
*g_over=true;
}
}
}
}
| 4e0444dc59e6c2fab9e9e94e88f2349cbb524df2.cu | __global__ void
Kernel( Node* g_graph_nodes, int* g_graph_edges, bool* g_graph_mask, bool* g_graph_visited, int* g_cost, bool *g_over, int no_of_nodes)
{
int tid = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
if( tid<no_of_nodes && g_graph_mask[tid])
{
g_graph_mask[tid]=false;
g_graph_visited[tid]=true;
for(int i=g_graph_nodes[tid].starting; i<(g_graph_nodes[tid].no_of_edges + g_graph_nodes[tid].starting); i++)
{
int id = g_graph_edges[i];
if(!g_graph_visited[id])
{
g_cost[id]=g_cost[tid]+1;
g_graph_mask[id]=true;
//Change the loop stop value such that loop continues
*g_over=true;
}
}
}
}
|
b587dcb2b1a0c16edd18336e77b7dbc5d3a0a73f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "arrayReduce.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *m = NULL;
hipMalloc(&m, XSIZE*YSIZE);
int *ms = NULL;
hipMalloc(&ms, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
arrayReduce), dim3(gridBlock),dim3(threadBlock), 0, 0, m,ms);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
arrayReduce), dim3(gridBlock),dim3(threadBlock), 0, 0, m,ms);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
arrayReduce), dim3(gridBlock),dim3(threadBlock), 0, 0, m,ms);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | b587dcb2b1a0c16edd18336e77b7dbc5d3a0a73f.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "arrayReduce.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *m = NULL;
cudaMalloc(&m, XSIZE*YSIZE);
int *ms = NULL;
cudaMalloc(&ms, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
arrayReduce<<<gridBlock,threadBlock>>>(m,ms);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
arrayReduce<<<gridBlock,threadBlock>>>(m,ms);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
arrayReduce<<<gridBlock,threadBlock>>>(m,ms);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
debcdbca875cf973413923fecfb3ce75a0f4506a.hip | // !!! This is a file automatically generated by hipify!!!
#include <glm/qn/glm_linear.h>
#include <glm/qn/glm_logistic.h>
#include <glm/qn/glm_softmax.h>
#include <glm/qn/qn.h>
#include <gtest/gtest.h>
#include <linalg/transpose.h>
#include <cuml/linear_model/glm.hpp>
#include <vector>
#include "test_utils.h"
#include "utils.h"
namespace ML {
namespace GLM {
using namespace MLCommon;
struct QuasiNewtonTest : ::testing::Test {
static constexpr int N = 10;
static constexpr int D = 2;
const static double *nobptr;
const static double tol;
const static double X[N][D];
cumlHandle cuml_handle;
const cumlHandle_impl &handle;
hipStream_t stream;
std::shared_ptr<SimpleMatOwning<double>> Xdev;
std::shared_ptr<SimpleVecOwning<double>> ydev;
std::shared_ptr<deviceAllocator> allocator;
QuasiNewtonTest() : handle(cuml_handle.getImpl()) {}
void SetUp() {
stream = cuml_handle.getStream();
Xdev.reset(new SimpleMatOwning<double>(handle.getDeviceAllocator(), N, D,
stream, ROW_MAJOR));
updateDevice(Xdev->data, &X[0][0], Xdev->len, stream);
ydev.reset(
new SimpleVecOwning<double>(handle.getDeviceAllocator(), N, stream));
CUDA_CHECK(hipStreamSynchronize(stream));
allocator = handle.getDeviceAllocator();
}
void TearDown() {}
};
const double *QuasiNewtonTest::nobptr = 0;
const double QuasiNewtonTest::tol = 5e-6;
const double QuasiNewtonTest::X[QuasiNewtonTest::N][QuasiNewtonTest::D] = {
{-0.2047076594847130, 0.4789433380575482},
{-0.5194387150567381, -0.5557303043474900},
{1.9657805725027142, 1.3934058329729904},
{0.0929078767437177, 0.2817461528302025},
{0.7690225676118387, 1.2464347363862822},
{1.0071893575830049, -1.2962211091122635},
{0.2749916334321240, 0.2289128789353159},
{1.3529168351654497, 0.8864293405915888},
{-2.0016373096603974, -0.3718425371402544},
{1.6690253095248706, -0.4385697358355719}};
template <typename T, class Comp>
::testing::AssertionResult checkParamsEqual(const cumlHandle_impl &handle,
const T *host_weights,
const T *host_bias, const T *w,
const GLMDims &dims, Comp &comp,
hipStream_t stream) {
int C = dims.C;
int D = dims.D;
bool fit_intercept = dims.fit_intercept;
std::vector<T> w_ref_cm(C * D);
int idx = 0;
for (int d = 0; d < D; d++)
for (int c = 0; c < C; c++) {
w_ref_cm[idx++] = host_weights[c * D + d];
}
SimpleVecOwning<T> w_ref(handle.getDeviceAllocator(), dims.n_param, stream);
updateDevice(w_ref.data, &w_ref_cm[0], C * D, stream);
if (fit_intercept) {
updateDevice(&w_ref.data[C * D], host_bias, C, stream);
}
CUDA_CHECK(hipStreamSynchronize(stream));
return devArrMatch(w_ref.data, w, w_ref.len, comp);
}
template <typename T, class LossFunction>
T run(const cumlHandle_impl &handle, LossFunction &loss, const SimpleMat<T> &X,
const SimpleVec<T> &y, T l1, T l2, T *w, SimpleMat<T> &z, int verbosity,
hipStream_t stream) {
int max_iter = 100;
T grad_tol = 1e-16;
int linesearch_max_iter = 50;
int lbfgs_memory = 5;
int num_iters = 0;
T fx;
SimpleVec<T> w0(w, loss.n_param);
qn_fit<T, LossFunction>(handle, loss, X.data, y.data, z.data, X.m, l1, l2,
max_iter, grad_tol, linesearch_max_iter, lbfgs_memory,
verbosity, w0.data, &fx, &num_iters, X.ord, stream);
return fx;
}
template <typename T>
T run_api(const cumlHandle &cuml_handle, int loss_type, int C,
bool fit_intercept, const SimpleMat<T> &X, const SimpleVec<T> &y,
T l1, T l2, T *w, SimpleMat<T> &z, int verbosity,
hipStream_t stream) {
int max_iter = 100;
T grad_tol = 1e-8;
int linesearch_max_iter = 50;
int lbfgs_memory = 5;
int num_iters = 0;
SimpleVec<T> w0(w, X.n + fit_intercept);
w0.fill(T(0), stream);
T fx;
qnFit(cuml_handle, X.data, y.data, X.m, X.n, C, fit_intercept, l1, l2,
max_iter, grad_tol, linesearch_max_iter, lbfgs_memory, verbosity, w,
&fx, &num_iters, false, loss_type);
return fx;
}
TEST_F(QuasiNewtonTest, binary_logistic_vs_sklearn) {
CompareApprox<double> compApprox(tol);
// Test case generated in python and solved with sklearn
double y[N] = {1, 1, 1, 0, 1, 0, 1, 0, 1, 0};
updateDevice(ydev->data, &y[0], ydev->len, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
double alpha = 0.01;
LogisticLoss<double> loss_b(handle, D, true);
LogisticLoss<double> loss_no_b(handle, D, false);
SimpleVecOwning<double> w0(allocator, D + 1, stream);
SimpleVecOwning<double> z(allocator, N, stream);
double l1, l2, fx;
double w_l1_b[2] = {-1.6899370396155091, 1.9021577534928300};
double b_l1_b = 0.8057670813749118;
double obj_l1_b = 0.44295941481024703;
l1 = alpha;
l2 = 0.0;
fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_b, fx));
ASSERT_TRUE(checkParamsEqual(handle, &w_l1_b[0], &b_l1_b, w0.data, loss_b,
compApprox, stream));
fx = run_api(cuml_handle, 0, 2, loss_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_b, fx));
double w_l2_b[2] = {-1.5339880402781370, 1.6788639581350926};
double b_l2_b = 0.806087868102401;
double obj_l2_b = 0.4378085369889721;
l1 = 0;
l2 = alpha;
fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_b, fx));
ASSERT_TRUE(checkParamsEqual(handle, &w_l2_b[0], &b_l2_b, w0.data, loss_b,
compApprox, stream));
fx = run_api(cuml_handle, 0, 2, loss_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_b, fx));
double w_l1_no_b[2] = {-1.6215035298864591, 2.3650868394981086};
double obj_l1_no_b = 0.4769896009200278;
l1 = alpha;
l2 = 0.0;
fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_no_b, fx));
ASSERT_TRUE(checkParamsEqual(handle, &w_l1_no_b[0], nobptr, w0.data,
loss_no_b, compApprox, stream));
fx = run_api(cuml_handle, 0, 2, loss_no_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_no_b, fx));
double w_l2_no_b[2] = {-1.3931049893764620, 2.0140103094119621};
double obj_l2_no_b = 0.47502098062114273;
l1 = 0;
l2 = alpha;
fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_no_b, fx));
ASSERT_TRUE(checkParamsEqual(handle, &w_l2_no_b[0], nobptr, w0.data,
loss_no_b, compApprox, stream));
fx = run_api(cuml_handle, 0, 2, loss_no_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_no_b, fx));
}
TEST_F(QuasiNewtonTest, multiclass_logistic_vs_sklearn) {
// The data seems to small for the objective to be strongly convex
// leaving out exact param checks
CompareApprox<double> compApprox(tol);
double y[N] = {2, 2, 0, 3, 3, 0, 0, 0, 1, 0};
updateDevice(ydev->data, &y[0], ydev->len, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
double fx, l1, l2;
int C = 4;
double alpha = 0.016;
SimpleMatOwning<double> z(allocator, C, N, stream);
SimpleVecOwning<double> w0(allocator, C * (D + 1), stream);
Softmax<double> loss_b(handle, D, C, true);
Softmax<double> loss_no_b(handle, D, C, false);
l1 = alpha;
l2 = 0.0;
double obj_l1_b = 0.5407911382311313;
fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_b, fx));
fx = run_api(cuml_handle, 2, C, loss_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_b, fx));
l1 = 0.0;
l2 = alpha;
double obj_l2_b = 0.5721784062720949;
fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_b, fx));
fx = run_api(cuml_handle, 2, C, loss_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_b, fx));
l1 = alpha;
l2 = 0.0;
double obj_l1_no_b = 0.6606929813245878;
fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_no_b, fx));
fx = run_api(cuml_handle, 2, C, loss_no_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_no_b, fx));
l1 = 0.0;
l2 = alpha;
double obj_l2_no_b = 0.6597171282106854;
fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_no_b, fx));
fx = run_api(cuml_handle, 2, C, loss_no_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_no_b, fx));
}
TEST_F(QuasiNewtonTest, linear_regression_vs_sklearn) {
CompareApprox<double> compApprox(tol);
double y[N] = {0.2675836026202781, -0.0678277759663704, -0.6334027174275105,
-0.1018336189077367, 0.0933815935886932, -1.1058853496996381,
-0.1658298189619160, -0.2954290675648911, 0.7966520536712608,
-1.0767450516284769};
updateDevice(ydev->data, &y[0], ydev->len, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
double fx, l1, l2;
double alpha = 0.01;
SimpleVecOwning<double> w0(allocator, D + 1, stream);
SimpleVecOwning<double> z(allocator, N, stream);
SquaredLoss<double> loss_b(handle, D, true);
SquaredLoss<double> loss_no_b(handle, D, false);
l1 = alpha;
l2 = 0.0;
double w_l1_b[2] = {-0.4952397281519840, 0.3813315300180231};
double b_l1_b = -0.08140861819001188;
double obj_l1_b = 0.011136986298775138;
fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_b, fx));
ASSERT_TRUE(checkParamsEqual(handle, &w_l1_b[0], &b_l1_b, w0.data, loss_b,
compApprox, stream));
fx = run_api(cuml_handle, 1, 1, loss_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_b, fx));
l1 = 0.0;
l2 = alpha;
double w_l2_b[2] = {-0.5022384743587150, 0.3937352417485087};
double b_l2_b = -0.08062397391797513;
double obj_l2_b = 0.004268621967866347;
fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_b, fx));
ASSERT_TRUE(checkParamsEqual(handle, &w_l2_b[0], &b_l2_b, w0.data, loss_b,
compApprox, stream));
fx = run_api(cuml_handle, 1, 1, loss_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_b, fx));
l1 = alpha;
l2 = 0.0;
double w_l1_no_b[2] = {-0.5175178128147135, 0.3720844589831813};
double obj_l1_no_b = 0.013981355746112447;
fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_no_b, fx));
ASSERT_TRUE(checkParamsEqual(handle, &w_l1_no_b[0], nobptr, w0.data,
loss_no_b, compApprox, stream));
fx = run_api(cuml_handle, 1, 1, loss_no_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_no_b, fx));
l1 = 0.0;
l2 = alpha;
double w_l2_no_b[2] = {-0.5241651041233270, 0.3846317886627560};
double obj_l2_no_b = 0.007061261366969662;
fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_no_b, fx));
ASSERT_TRUE(checkParamsEqual(handle, &w_l2_no_b[0], nobptr, w0.data,
loss_no_b, compApprox, stream));
fx = run_api(cuml_handle, 1, 1, loss_no_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_no_b, fx));
}
TEST_F(QuasiNewtonTest, predict) {
CompareApprox<double> compApprox(1e-8);
std::vector<double> w_host(D);
w_host[0] = 1;
std::vector<double> preds_host(N);
SimpleVecOwning<double> w(allocator, D, stream);
SimpleVecOwning<double> preds(allocator, N, stream);
updateDevice(w.data, &w_host[0], w.len, stream);
qnPredict(handle, Xdev->data, N, D, 2, false, w.data, false, 0, preds.data,
stream);
updateHost(&preds_host[0], preds.data, preds.len, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
for (int it = 0; it < N; it++) {
ASSERT_TRUE(X[it][0] > 0 ? compApprox(preds_host[it], 1)
: compApprox(preds_host[it], 0));
}
qnPredict(handle, Xdev->data, N, D, 1, false, w.data, false, 1, preds.data,
stream);
updateHost(&preds_host[0], preds.data, preds.len, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
for (int it = 0; it < N; it++) {
ASSERT_TRUE(compApprox(X[it][0], preds_host[it]));
}
}
TEST_F(QuasiNewtonTest, predict_softmax) {
CompareApprox<double> compApprox(1e-8);
int C = 4;
std::vector<double> w_host(C * D);
w_host[0] = 1;
w_host[D * C - 1] = 1;
std::vector<double> preds_host(N);
SimpleVecOwning<double> w(allocator, w_host.size(), stream);
SimpleVecOwning<double> preds(allocator, N, stream);
updateDevice(w.data, &w_host[0], w.len, stream);
qnPredict(handle, Xdev->data, N, D, C, false, w.data, false, 2, preds.data,
stream);
updateHost(&preds_host[0], preds.data, preds.len, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
for (int it = 0; it < N; it++) {
if (X[it][0] < 0 && X[it][1] < 0) {
ASSERT_TRUE(compApprox(1, preds_host[it]));
} else if (X[it][0] > X[it][1]) {
ASSERT_TRUE(compApprox(0, preds_host[it]));
} else {
ASSERT_TRUE(compApprox(C - 1, preds_host[it]));
}
}
}
} // namespace GLM
} // end namespace ML
| debcdbca875cf973413923fecfb3ce75a0f4506a.cu | #include <glm/qn/glm_linear.h>
#include <glm/qn/glm_logistic.h>
#include <glm/qn/glm_softmax.h>
#include <glm/qn/qn.h>
#include <gtest/gtest.h>
#include <linalg/transpose.h>
#include <cuml/linear_model/glm.hpp>
#include <vector>
#include "test_utils.h"
#include "utils.h"
namespace ML {
namespace GLM {
using namespace MLCommon;
struct QuasiNewtonTest : ::testing::Test {
static constexpr int N = 10;
static constexpr int D = 2;
const static double *nobptr;
const static double tol;
const static double X[N][D];
cumlHandle cuml_handle;
const cumlHandle_impl &handle;
cudaStream_t stream;
std::shared_ptr<SimpleMatOwning<double>> Xdev;
std::shared_ptr<SimpleVecOwning<double>> ydev;
std::shared_ptr<deviceAllocator> allocator;
QuasiNewtonTest() : handle(cuml_handle.getImpl()) {}
void SetUp() {
stream = cuml_handle.getStream();
Xdev.reset(new SimpleMatOwning<double>(handle.getDeviceAllocator(), N, D,
stream, ROW_MAJOR));
updateDevice(Xdev->data, &X[0][0], Xdev->len, stream);
ydev.reset(
new SimpleVecOwning<double>(handle.getDeviceAllocator(), N, stream));
CUDA_CHECK(cudaStreamSynchronize(stream));
allocator = handle.getDeviceAllocator();
}
void TearDown() {}
};
const double *QuasiNewtonTest::nobptr = 0;
const double QuasiNewtonTest::tol = 5e-6;
const double QuasiNewtonTest::X[QuasiNewtonTest::N][QuasiNewtonTest::D] = {
{-0.2047076594847130, 0.4789433380575482},
{-0.5194387150567381, -0.5557303043474900},
{1.9657805725027142, 1.3934058329729904},
{0.0929078767437177, 0.2817461528302025},
{0.7690225676118387, 1.2464347363862822},
{1.0071893575830049, -1.2962211091122635},
{0.2749916334321240, 0.2289128789353159},
{1.3529168351654497, 0.8864293405915888},
{-2.0016373096603974, -0.3718425371402544},
{1.6690253095248706, -0.4385697358355719}};
template <typename T, class Comp>
::testing::AssertionResult checkParamsEqual(const cumlHandle_impl &handle,
const T *host_weights,
const T *host_bias, const T *w,
const GLMDims &dims, Comp &comp,
cudaStream_t stream) {
int C = dims.C;
int D = dims.D;
bool fit_intercept = dims.fit_intercept;
std::vector<T> w_ref_cm(C * D);
int idx = 0;
for (int d = 0; d < D; d++)
for (int c = 0; c < C; c++) {
w_ref_cm[idx++] = host_weights[c * D + d];
}
SimpleVecOwning<T> w_ref(handle.getDeviceAllocator(), dims.n_param, stream);
updateDevice(w_ref.data, &w_ref_cm[0], C * D, stream);
if (fit_intercept) {
updateDevice(&w_ref.data[C * D], host_bias, C, stream);
}
CUDA_CHECK(cudaStreamSynchronize(stream));
return devArrMatch(w_ref.data, w, w_ref.len, comp);
}
template <typename T, class LossFunction>
T run(const cumlHandle_impl &handle, LossFunction &loss, const SimpleMat<T> &X,
const SimpleVec<T> &y, T l1, T l2, T *w, SimpleMat<T> &z, int verbosity,
cudaStream_t stream) {
int max_iter = 100;
T grad_tol = 1e-16;
int linesearch_max_iter = 50;
int lbfgs_memory = 5;
int num_iters = 0;
T fx;
SimpleVec<T> w0(w, loss.n_param);
qn_fit<T, LossFunction>(handle, loss, X.data, y.data, z.data, X.m, l1, l2,
max_iter, grad_tol, linesearch_max_iter, lbfgs_memory,
verbosity, w0.data, &fx, &num_iters, X.ord, stream);
return fx;
}
template <typename T>
T run_api(const cumlHandle &cuml_handle, int loss_type, int C,
bool fit_intercept, const SimpleMat<T> &X, const SimpleVec<T> &y,
T l1, T l2, T *w, SimpleMat<T> &z, int verbosity,
cudaStream_t stream) {
int max_iter = 100;
T grad_tol = 1e-8;
int linesearch_max_iter = 50;
int lbfgs_memory = 5;
int num_iters = 0;
SimpleVec<T> w0(w, X.n + fit_intercept);
w0.fill(T(0), stream);
T fx;
qnFit(cuml_handle, X.data, y.data, X.m, X.n, C, fit_intercept, l1, l2,
max_iter, grad_tol, linesearch_max_iter, lbfgs_memory, verbosity, w,
&fx, &num_iters, false, loss_type);
return fx;
}
TEST_F(QuasiNewtonTest, binary_logistic_vs_sklearn) {
CompareApprox<double> compApprox(tol);
// Test case generated in python and solved with sklearn
double y[N] = {1, 1, 1, 0, 1, 0, 1, 0, 1, 0};
updateDevice(ydev->data, &y[0], ydev->len, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
double alpha = 0.01;
LogisticLoss<double> loss_b(handle, D, true);
LogisticLoss<double> loss_no_b(handle, D, false);
SimpleVecOwning<double> w0(allocator, D + 1, stream);
SimpleVecOwning<double> z(allocator, N, stream);
double l1, l2, fx;
double w_l1_b[2] = {-1.6899370396155091, 1.9021577534928300};
double b_l1_b = 0.8057670813749118;
double obj_l1_b = 0.44295941481024703;
l1 = alpha;
l2 = 0.0;
fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_b, fx));
ASSERT_TRUE(checkParamsEqual(handle, &w_l1_b[0], &b_l1_b, w0.data, loss_b,
compApprox, stream));
fx = run_api(cuml_handle, 0, 2, loss_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_b, fx));
double w_l2_b[2] = {-1.5339880402781370, 1.6788639581350926};
double b_l2_b = 0.806087868102401;
double obj_l2_b = 0.4378085369889721;
l1 = 0;
l2 = alpha;
fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_b, fx));
ASSERT_TRUE(checkParamsEqual(handle, &w_l2_b[0], &b_l2_b, w0.data, loss_b,
compApprox, stream));
fx = run_api(cuml_handle, 0, 2, loss_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_b, fx));
double w_l1_no_b[2] = {-1.6215035298864591, 2.3650868394981086};
double obj_l1_no_b = 0.4769896009200278;
l1 = alpha;
l2 = 0.0;
fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_no_b, fx));
ASSERT_TRUE(checkParamsEqual(handle, &w_l1_no_b[0], nobptr, w0.data,
loss_no_b, compApprox, stream));
fx = run_api(cuml_handle, 0, 2, loss_no_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_no_b, fx));
double w_l2_no_b[2] = {-1.3931049893764620, 2.0140103094119621};
double obj_l2_no_b = 0.47502098062114273;
l1 = 0;
l2 = alpha;
fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_no_b, fx));
ASSERT_TRUE(checkParamsEqual(handle, &w_l2_no_b[0], nobptr, w0.data,
loss_no_b, compApprox, stream));
fx = run_api(cuml_handle, 0, 2, loss_no_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_no_b, fx));
}
TEST_F(QuasiNewtonTest, multiclass_logistic_vs_sklearn) {
// The data seems to small for the objective to be strongly convex
// leaving out exact param checks
CompareApprox<double> compApprox(tol);
double y[N] = {2, 2, 0, 3, 3, 0, 0, 0, 1, 0};
updateDevice(ydev->data, &y[0], ydev->len, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
double fx, l1, l2;
int C = 4;
double alpha = 0.016;
SimpleMatOwning<double> z(allocator, C, N, stream);
SimpleVecOwning<double> w0(allocator, C * (D + 1), stream);
Softmax<double> loss_b(handle, D, C, true);
Softmax<double> loss_no_b(handle, D, C, false);
l1 = alpha;
l2 = 0.0;
double obj_l1_b = 0.5407911382311313;
fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_b, fx));
fx = run_api(cuml_handle, 2, C, loss_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_b, fx));
l1 = 0.0;
l2 = alpha;
double obj_l2_b = 0.5721784062720949;
fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_b, fx));
fx = run_api(cuml_handle, 2, C, loss_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_b, fx));
l1 = alpha;
l2 = 0.0;
double obj_l1_no_b = 0.6606929813245878;
fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_no_b, fx));
fx = run_api(cuml_handle, 2, C, loss_no_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_no_b, fx));
l1 = 0.0;
l2 = alpha;
double obj_l2_no_b = 0.6597171282106854;
fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_no_b, fx));
fx = run_api(cuml_handle, 2, C, loss_no_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_no_b, fx));
}
TEST_F(QuasiNewtonTest, linear_regression_vs_sklearn) {
CompareApprox<double> compApprox(tol);
double y[N] = {0.2675836026202781, -0.0678277759663704, -0.6334027174275105,
-0.1018336189077367, 0.0933815935886932, -1.1058853496996381,
-0.1658298189619160, -0.2954290675648911, 0.7966520536712608,
-1.0767450516284769};
updateDevice(ydev->data, &y[0], ydev->len, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
double fx, l1, l2;
double alpha = 0.01;
SimpleVecOwning<double> w0(allocator, D + 1, stream);
SimpleVecOwning<double> z(allocator, N, stream);
SquaredLoss<double> loss_b(handle, D, true);
SquaredLoss<double> loss_no_b(handle, D, false);
l1 = alpha;
l2 = 0.0;
double w_l1_b[2] = {-0.4952397281519840, 0.3813315300180231};
double b_l1_b = -0.08140861819001188;
double obj_l1_b = 0.011136986298775138;
fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_b, fx));
ASSERT_TRUE(checkParamsEqual(handle, &w_l1_b[0], &b_l1_b, w0.data, loss_b,
compApprox, stream));
fx = run_api(cuml_handle, 1, 1, loss_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_b, fx));
l1 = 0.0;
l2 = alpha;
double w_l2_b[2] = {-0.5022384743587150, 0.3937352417485087};
double b_l2_b = -0.08062397391797513;
double obj_l2_b = 0.004268621967866347;
fx = run(handle, loss_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_b, fx));
ASSERT_TRUE(checkParamsEqual(handle, &w_l2_b[0], &b_l2_b, w0.data, loss_b,
compApprox, stream));
fx = run_api(cuml_handle, 1, 1, loss_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_b, fx));
l1 = alpha;
l2 = 0.0;
double w_l1_no_b[2] = {-0.5175178128147135, 0.3720844589831813};
double obj_l1_no_b = 0.013981355746112447;
fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_no_b, fx));
ASSERT_TRUE(checkParamsEqual(handle, &w_l1_no_b[0], nobptr, w0.data,
loss_no_b, compApprox, stream));
fx = run_api(cuml_handle, 1, 1, loss_no_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l1_no_b, fx));
l1 = 0.0;
l2 = alpha;
double w_l2_no_b[2] = {-0.5241651041233270, 0.3846317886627560};
double obj_l2_no_b = 0.007061261366969662;
fx = run(handle, loss_no_b, *Xdev, *ydev, l1, l2, w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_no_b, fx));
ASSERT_TRUE(checkParamsEqual(handle, &w_l2_no_b[0], nobptr, w0.data,
loss_no_b, compApprox, stream));
fx = run_api(cuml_handle, 1, 1, loss_no_b.fit_intercept, *Xdev, *ydev, l1, l2,
w0.data, z, 0, stream);
ASSERT_TRUE(compApprox(obj_l2_no_b, fx));
}
TEST_F(QuasiNewtonTest, predict) {
CompareApprox<double> compApprox(1e-8);
std::vector<double> w_host(D);
w_host[0] = 1;
std::vector<double> preds_host(N);
SimpleVecOwning<double> w(allocator, D, stream);
SimpleVecOwning<double> preds(allocator, N, stream);
updateDevice(w.data, &w_host[0], w.len, stream);
qnPredict(handle, Xdev->data, N, D, 2, false, w.data, false, 0, preds.data,
stream);
updateHost(&preds_host[0], preds.data, preds.len, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
for (int it = 0; it < N; it++) {
ASSERT_TRUE(X[it][0] > 0 ? compApprox(preds_host[it], 1)
: compApprox(preds_host[it], 0));
}
qnPredict(handle, Xdev->data, N, D, 1, false, w.data, false, 1, preds.data,
stream);
updateHost(&preds_host[0], preds.data, preds.len, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
for (int it = 0; it < N; it++) {
ASSERT_TRUE(compApprox(X[it][0], preds_host[it]));
}
}
TEST_F(QuasiNewtonTest, predict_softmax) {
CompareApprox<double> compApprox(1e-8);
int C = 4;
std::vector<double> w_host(C * D);
w_host[0] = 1;
w_host[D * C - 1] = 1;
std::vector<double> preds_host(N);
SimpleVecOwning<double> w(allocator, w_host.size(), stream);
SimpleVecOwning<double> preds(allocator, N, stream);
updateDevice(w.data, &w_host[0], w.len, stream);
qnPredict(handle, Xdev->data, N, D, C, false, w.data, false, 2, preds.data,
stream);
updateHost(&preds_host[0], preds.data, preds.len, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
for (int it = 0; it < N; it++) {
if (X[it][0] < 0 && X[it][1] < 0) {
ASSERT_TRUE(compApprox(1, preds_host[it]));
} else if (X[it][0] > X[it][1]) {
ASSERT_TRUE(compApprox(0, preds_host[it]));
} else {
ASSERT_TRUE(compApprox(C - 1, preds_host[it]));
}
}
}
} // namespace GLM
} // end namespace ML
|
0b96c709f6df21bfa970f2111d26b7ba342e527e.hip | // !!! This is a file automatically generated by hipify!!!
/*
* float_encoding.cu
*
* Created on: 30 pa 2015
* Author: Karol Dzitkowski
*/
#include <compression/float/float_encoding.hpp>
#include <util/transform/cuda_array_transform.hpp>
#include <util/statistics/cuda_array_statistics.hpp>
#include "core/macros.h"
#include <limits>
#include <cmath>
namespace ddj
{
template<typename T>
SharedCudaPtrVector<char> FloatEncoding::Encode(SharedCudaPtr<T> data)
{
CUDA_ASSERT_RETURN( hipGetLastError() );
LOG4CPLUS_INFO_FMT(_logger, "FLOAT encoding START: data size = %lu", data->size());
if(data->size() <= 0)
return SharedCudaPtrVector<char>{
CudaPtr<char>::make_shared(),
CudaPtr<char>::make_shared()
};
int precision = CudaArrayStatistics().Precision(data);
SharedCudaPtr<int> resultData;
FloatingPointToIntegerOperator<T, int> op { precision };
// Make sure we won't overflow
bool transform = false;
if(precision < MAX_PRECISION)
{
auto minMax = CudaArrayStatistics().MinMax(data);
int scaleFactor = ::pow(10, precision);
if((std::get<0>(minMax) * scaleFactor) > std::numeric_limits<int>::min() &&
(std::get<1>(minMax) * scaleFactor) < std::numeric_limits<int>::max())
transform = true;
else precision = MAX_PRECISION;
}
if(transform)
resultData = CudaArrayTransform().Transform<T, int>(data, op);
else
resultData = CastSharedCudaPtr<T, int>(data->copy());
auto resultMetadata = CudaPtr<char>::make_shared(sizeof(int));
resultMetadata->fillFromHost((char*)&precision, sizeof(int));
CUDA_ASSERT_RETURN( hipGetLastError() );
LOG4CPLUS_INFO(_logger, "FLOAT enoding END");
return SharedCudaPtrVector<char> { resultMetadata, MoveSharedCudaPtr<int, char>(resultData) };
}
template<typename T>
SharedCudaPtr<T> FloatEncoding::Decode(SharedCudaPtrVector<char> input)
{
LOG4CPLUS_INFO_FMT(
_logger,
"FLOAT decoding START: input[0] size = %lu, input[1] size = %lu",
input[0]->size(), input[1]->size()
);
if(input[1]->size() <= 0)
return CudaPtr<T>::make_shared();
auto metadata = input[0];
auto data = MoveSharedCudaPtr<char, int>(input[1]);
int precision;
CUDA_CALL( hipMemcpy(&precision, metadata->get(), sizeof(int), CPY_DTH) );
SharedCudaPtr<T> result;
IntegerToFloatingPointOperator<int, T> op { precision };
if(precision < MAX_PRECISION)
result = CudaArrayTransform().Transform<int, T>(data, op);
else
result = CastSharedCudaPtr<int, T>(data->copy());
CUDA_ASSERT_RETURN( hipGetLastError() );
LOG4CPLUS_INFO(_logger, "FLOAT decoding END");
return result;
}
#define FLOAT_ENCODING_SPEC(X) \
template SharedCudaPtrVector<char> FloatEncoding::Encode<X>(SharedCudaPtr<X> data); \
template SharedCudaPtr<X> FloatEncoding::Decode<X>(SharedCudaPtrVector<char> data);
FOR_EACH(FLOAT_ENCODING_SPEC, char, short, double, float, int, long, long long, unsigned int)
} /* namespace ddj */
| 0b96c709f6df21bfa970f2111d26b7ba342e527e.cu | /*
* float_encoding.cu
*
* Created on: 30 paź 2015
* Author: Karol Dzitkowski
*/
#include <compression/float/float_encoding.hpp>
#include <util/transform/cuda_array_transform.hpp>
#include <util/statistics/cuda_array_statistics.hpp>
#include "core/macros.h"
#include <limits>
#include <cmath>
namespace ddj
{
template<typename T>
SharedCudaPtrVector<char> FloatEncoding::Encode(SharedCudaPtr<T> data)
{
CUDA_ASSERT_RETURN( cudaGetLastError() );
LOG4CPLUS_INFO_FMT(_logger, "FLOAT encoding START: data size = %lu", data->size());
if(data->size() <= 0)
return SharedCudaPtrVector<char>{
CudaPtr<char>::make_shared(),
CudaPtr<char>::make_shared()
};
int precision = CudaArrayStatistics().Precision(data);
SharedCudaPtr<int> resultData;
FloatingPointToIntegerOperator<T, int> op { precision };
// Make sure we won't overflow
bool transform = false;
if(precision < MAX_PRECISION)
{
auto minMax = CudaArrayStatistics().MinMax(data);
int scaleFactor = std::pow(10, precision);
if((std::get<0>(minMax) * scaleFactor) > std::numeric_limits<int>::min() &&
(std::get<1>(minMax) * scaleFactor) < std::numeric_limits<int>::max())
transform = true;
else precision = MAX_PRECISION;
}
if(transform)
resultData = CudaArrayTransform().Transform<T, int>(data, op);
else
resultData = CastSharedCudaPtr<T, int>(data->copy());
auto resultMetadata = CudaPtr<char>::make_shared(sizeof(int));
resultMetadata->fillFromHost((char*)&precision, sizeof(int));
CUDA_ASSERT_RETURN( cudaGetLastError() );
LOG4CPLUS_INFO(_logger, "FLOAT enoding END");
return SharedCudaPtrVector<char> { resultMetadata, MoveSharedCudaPtr<int, char>(resultData) };
}
template<typename T>
SharedCudaPtr<T> FloatEncoding::Decode(SharedCudaPtrVector<char> input)
{
LOG4CPLUS_INFO_FMT(
_logger,
"FLOAT decoding START: input[0] size = %lu, input[1] size = %lu",
input[0]->size(), input[1]->size()
);
if(input[1]->size() <= 0)
return CudaPtr<T>::make_shared();
auto metadata = input[0];
auto data = MoveSharedCudaPtr<char, int>(input[1]);
int precision;
CUDA_CALL( cudaMemcpy(&precision, metadata->get(), sizeof(int), CPY_DTH) );
SharedCudaPtr<T> result;
IntegerToFloatingPointOperator<int, T> op { precision };
if(precision < MAX_PRECISION)
result = CudaArrayTransform().Transform<int, T>(data, op);
else
result = CastSharedCudaPtr<int, T>(data->copy());
CUDA_ASSERT_RETURN( cudaGetLastError() );
LOG4CPLUS_INFO(_logger, "FLOAT decoding END");
return result;
}
#define FLOAT_ENCODING_SPEC(X) \
template SharedCudaPtrVector<char> FloatEncoding::Encode<X>(SharedCudaPtr<X> data); \
template SharedCudaPtr<X> FloatEncoding::Decode<X>(SharedCudaPtrVector<char> data);
FOR_EACH(FLOAT_ENCODING_SPEC, char, short, double, float, int, long, long long, unsigned int)
} /* namespace ddj */
|
54a41a303a90db34b58c091203c39ae237d491ec.hip | // !!! This is a file automatically generated by hipify!!!
// This is a personal academic project. Dear PVS-Studio, please check it.
// PVS-Studio Static Code Analyzer for C, C++ and C#: http://www.viva64.com
#include <iostream>
#include "GPUCudaCleaner.h"
CudaCleaner::CudaCleaner() {}
CudaCleaner::~CudaCleaner() {
std::cout << "calling hipDeviceReset()..." << std::endl;
hipDeviceReset();
std::cout << "Done." << std::endl;
}
| 54a41a303a90db34b58c091203c39ae237d491ec.cu | // This is a personal academic project. Dear PVS-Studio, please check it.
// PVS-Studio Static Code Analyzer for C, C++ and C#: http://www.viva64.com
#include <iostream>
#include "GPUCudaCleaner.h"
CudaCleaner::CudaCleaner() {}
CudaCleaner::~CudaCleaner() {
std::cout << "calling cudaDeviceReset()..." << std::endl;
cudaDeviceReset();
std::cout << "Done." << std::endl;
}
|
50c998d37e01d84b24c30912c399b1a5b26747fb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/utilities/span.hpp>
#include <cudf_test/base_fixture.hpp>
#include <cudf_test/cudf_gtest.hpp>
#include <cudf_test/type_lists.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/device_vector.hpp>
#include <cstddef>
#include <cstring>
#include <string>
using cudf::detail::device_span;
using cudf::detail::host_span;
template <typename T>
void expect_equivolent(host_span<T> a, host_span<T> b)
{
EXPECT_EQ(a.size(), b.size());
EXPECT_EQ(a.data(), b.data());
}
template <typename Iterator1, typename T>
void expect_match(Iterator1 expected, size_t expected_size, host_span<T> input)
{
EXPECT_EQ(expected_size, input.size());
for (size_t i = 0; i < expected_size; i++) { EXPECT_EQ(*(expected + i), *(input.begin() + i)); }
}
template <typename T>
void expect_match(std::string expected, host_span<T> input)
{
return expect_match(expected.begin(), expected.size(), input);
}
std::string const hello_wold_message = "hello world";
std::vector<char> create_hello_world_message()
{
return std::vector<char>(hello_wold_message.begin(), hello_wold_message.end());
}
class SpanTest : public cudf::test::BaseFixture {
};
TEST(SpanTest, CanCreateFullSubspan)
{
auto message = create_hello_world_message();
auto const message_span = host_span<char>(message.data(), message.size());
expect_equivolent(message_span, message_span.subspan(0, message_span.size()));
}
TEST(SpanTest, CanTakeFirst)
{
auto message = create_hello_world_message();
auto const message_span = host_span<char>(message.data(), message.size());
expect_match("hello", message_span.first(5));
}
TEST(SpanTest, CanTakeLast)
{
auto message = create_hello_world_message();
auto const message_span = host_span<char>(message.data(), message.size());
expect_match("world", message_span.last(5));
}
TEST(SpanTest, CanTakeSubspanFull)
{
auto message = create_hello_world_message();
auto const message_span = host_span<char>(message.data(), message.size());
expect_match("hello world", message_span.subspan(0, 11));
}
TEST(SpanTest, CanTakeSubspanPartial)
{
auto message = create_hello_world_message();
auto const message_span = host_span<char>(message.data(), message.size());
expect_match("lo w", message_span.subspan(3, 4));
}
TEST(SpanTest, CanGetFront)
{
auto message = create_hello_world_message();
auto const message_span = host_span<char>(message.data(), message.size());
EXPECT_EQ('h', message_span.front());
}
TEST(SpanTest, CanGetBack)
{
auto message = create_hello_world_message();
auto const message_span = host_span<char>(message.data(), message.size());
EXPECT_EQ('d', message_span.back());
}
TEST(SpanTest, CanGetData)
{
auto message = create_hello_world_message();
auto const message_span = host_span<char>(message.data(), message.size());
EXPECT_EQ(message.data(), message_span.data());
}
TEST(SpanTest, CanDetermineEmptiness)
{
auto message = create_hello_world_message();
auto const message_span = host_span<char>(message.data(), message.size());
auto const empty_span = host_span<char>();
EXPECT_FALSE(message_span.empty());
EXPECT_TRUE(empty_span.empty());
}
TEST(SpanTest, CanGetSize)
{
auto message = create_hello_world_message();
auto const message_span = host_span<char>(message.data(), message.size());
auto const empty_span = host_span<char>();
EXPECT_EQ(static_cast<size_t>(11), message_span.size());
EXPECT_EQ(static_cast<size_t>(0), empty_span.size());
}
TEST(SpanTest, CanGetSizeBytes)
{
auto doubles = std::vector<double>({6, 3, 2});
auto const doubles_span = host_span<double>(doubles.data(), doubles.size());
auto const empty_span = host_span<double>();
EXPECT_EQ(static_cast<size_t>(24), doubles_span.size_bytes());
EXPECT_EQ(static_cast<size_t>(0), empty_span.size_bytes());
}
TEST(SpanTest, CanCopySpan)
{
auto message = create_hello_world_message();
host_span<char> message_span_copy;
{
auto const message_span = host_span<char>(message.data(), message.size());
message_span_copy = message_span;
}
EXPECT_EQ(message.data(), message_span_copy.data());
EXPECT_EQ(message.size(), message_span_copy.size());
}
TEST(SpanTest, CanSubscriptRead)
{
auto message = create_hello_world_message();
auto const message_span = host_span<char>(message.data(), message.size());
EXPECT_EQ('o', message_span[4]);
}
TEST(SpanTest, CanSubscriptWrite)
{
auto message = create_hello_world_message();
auto const message_span = host_span<char>(message.data(), message.size());
message_span[4] = 'x';
EXPECT_EQ('x', message_span[4]);
}
TEST(SpanTest, CanConstructFromHostContainers)
{
auto std_vector = std::vector<int>(1);
auto h_vector = thrust::host_vector<int>(1);
(void)host_span<int>(std_vector);
(void)host_span<int>(h_vector);
auto const std_vector_c = std_vector;
auto const h_vector_c = h_vector;
(void)host_span<int const>(std_vector_c);
(void)host_span<int const>(h_vector_c);
}
TEST(SpanTest, CanConstructFromDeviceContainers)
{
auto d_thrust_vector = thrust::device_vector<int>(1);
auto d_vector = rmm::device_vector<int>(1);
auto d_uvector = rmm::device_uvector<int>(1, 0);
(void)device_span<int>(d_thrust_vector);
(void)device_span<int>(d_vector);
(void)device_span<int>(d_uvector);
auto const& d_thrust_vector_c = d_thrust_vector;
auto const& d_vector_c = d_vector;
auto const& d_uvector_c = d_uvector;
(void)device_span<int const>(d_thrust_vector_c);
(void)device_span<int const>(d_vector_c);
(void)device_span<int const>(d_uvector_c);
}
__global__ void simple_device_kernel(device_span<bool> result) { result[0] = true; }
TEST(SpanTest, CanUseDeviceSpan)
{
rmm::device_vector<bool> d_message = std::vector<bool>({false});
auto d_span = device_span<bool>(d_message.data().get(), d_message.size());
hipLaunchKernelGGL(( simple_device_kernel), dim3(1), dim3(1), 0, 0, d_span);
hipDeviceSynchronize();
thrust::host_vector<bool> h_message = d_message;
ASSERT_TRUE(h_message[0]);
}
CUDF_TEST_PROGRAM_MAIN()
| 50c998d37e01d84b24c30912c399b1a5b26747fb.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/utilities/span.hpp>
#include <cudf_test/base_fixture.hpp>
#include <cudf_test/cudf_gtest.hpp>
#include <cudf_test/type_lists.hpp>
#include <rmm/device_buffer.hpp>
#include <rmm/device_vector.hpp>
#include <cstddef>
#include <cstring>
#include <string>
using cudf::detail::device_span;
using cudf::detail::host_span;
template <typename T>
void expect_equivolent(host_span<T> a, host_span<T> b)
{
EXPECT_EQ(a.size(), b.size());
EXPECT_EQ(a.data(), b.data());
}
template <typename Iterator1, typename T>
void expect_match(Iterator1 expected, size_t expected_size, host_span<T> input)
{
EXPECT_EQ(expected_size, input.size());
for (size_t i = 0; i < expected_size; i++) { EXPECT_EQ(*(expected + i), *(input.begin() + i)); }
}
template <typename T>
void expect_match(std::string expected, host_span<T> input)
{
return expect_match(expected.begin(), expected.size(), input);
}
std::string const hello_wold_message = "hello world";
std::vector<char> create_hello_world_message()
{
return std::vector<char>(hello_wold_message.begin(), hello_wold_message.end());
}
class SpanTest : public cudf::test::BaseFixture {
};
TEST(SpanTest, CanCreateFullSubspan)
{
auto message = create_hello_world_message();
auto const message_span = host_span<char>(message.data(), message.size());
expect_equivolent(message_span, message_span.subspan(0, message_span.size()));
}
TEST(SpanTest, CanTakeFirst)
{
auto message = create_hello_world_message();
auto const message_span = host_span<char>(message.data(), message.size());
expect_match("hello", message_span.first(5));
}
TEST(SpanTest, CanTakeLast)
{
auto message = create_hello_world_message();
auto const message_span = host_span<char>(message.data(), message.size());
expect_match("world", message_span.last(5));
}
TEST(SpanTest, CanTakeSubspanFull)
{
auto message = create_hello_world_message();
auto const message_span = host_span<char>(message.data(), message.size());
expect_match("hello world", message_span.subspan(0, 11));
}
TEST(SpanTest, CanTakeSubspanPartial)
{
auto message = create_hello_world_message();
auto const message_span = host_span<char>(message.data(), message.size());
expect_match("lo w", message_span.subspan(3, 4));
}
TEST(SpanTest, CanGetFront)
{
auto message = create_hello_world_message();
auto const message_span = host_span<char>(message.data(), message.size());
EXPECT_EQ('h', message_span.front());
}
TEST(SpanTest, CanGetBack)
{
auto message = create_hello_world_message();
auto const message_span = host_span<char>(message.data(), message.size());
EXPECT_EQ('d', message_span.back());
}
TEST(SpanTest, CanGetData)
{
auto message = create_hello_world_message();
auto const message_span = host_span<char>(message.data(), message.size());
EXPECT_EQ(message.data(), message_span.data());
}
TEST(SpanTest, CanDetermineEmptiness)
{
auto message = create_hello_world_message();
auto const message_span = host_span<char>(message.data(), message.size());
auto const empty_span = host_span<char>();
EXPECT_FALSE(message_span.empty());
EXPECT_TRUE(empty_span.empty());
}
TEST(SpanTest, CanGetSize)
{
auto message = create_hello_world_message();
auto const message_span = host_span<char>(message.data(), message.size());
auto const empty_span = host_span<char>();
EXPECT_EQ(static_cast<size_t>(11), message_span.size());
EXPECT_EQ(static_cast<size_t>(0), empty_span.size());
}
TEST(SpanTest, CanGetSizeBytes)
{
auto doubles = std::vector<double>({6, 3, 2});
auto const doubles_span = host_span<double>(doubles.data(), doubles.size());
auto const empty_span = host_span<double>();
EXPECT_EQ(static_cast<size_t>(24), doubles_span.size_bytes());
EXPECT_EQ(static_cast<size_t>(0), empty_span.size_bytes());
}
TEST(SpanTest, CanCopySpan)
{
auto message = create_hello_world_message();
host_span<char> message_span_copy;
{
auto const message_span = host_span<char>(message.data(), message.size());
message_span_copy = message_span;
}
EXPECT_EQ(message.data(), message_span_copy.data());
EXPECT_EQ(message.size(), message_span_copy.size());
}
TEST(SpanTest, CanSubscriptRead)
{
auto message = create_hello_world_message();
auto const message_span = host_span<char>(message.data(), message.size());
EXPECT_EQ('o', message_span[4]);
}
TEST(SpanTest, CanSubscriptWrite)
{
auto message = create_hello_world_message();
auto const message_span = host_span<char>(message.data(), message.size());
message_span[4] = 'x';
EXPECT_EQ('x', message_span[4]);
}
TEST(SpanTest, CanConstructFromHostContainers)
{
auto std_vector = std::vector<int>(1);
auto h_vector = thrust::host_vector<int>(1);
(void)host_span<int>(std_vector);
(void)host_span<int>(h_vector);
auto const std_vector_c = std_vector;
auto const h_vector_c = h_vector;
(void)host_span<int const>(std_vector_c);
(void)host_span<int const>(h_vector_c);
}
TEST(SpanTest, CanConstructFromDeviceContainers)
{
auto d_thrust_vector = thrust::device_vector<int>(1);
auto d_vector = rmm::device_vector<int>(1);
auto d_uvector = rmm::device_uvector<int>(1, 0);
(void)device_span<int>(d_thrust_vector);
(void)device_span<int>(d_vector);
(void)device_span<int>(d_uvector);
auto const& d_thrust_vector_c = d_thrust_vector;
auto const& d_vector_c = d_vector;
auto const& d_uvector_c = d_uvector;
(void)device_span<int const>(d_thrust_vector_c);
(void)device_span<int const>(d_vector_c);
(void)device_span<int const>(d_uvector_c);
}
__global__ void simple_device_kernel(device_span<bool> result) { result[0] = true; }
TEST(SpanTest, CanUseDeviceSpan)
{
rmm::device_vector<bool> d_message = std::vector<bool>({false});
auto d_span = device_span<bool>(d_message.data().get(), d_message.size());
simple_device_kernel<<<1, 1>>>(d_span);
cudaDeviceSynchronize();
thrust::host_vector<bool> h_message = d_message;
ASSERT_TRUE(h_message[0]);
}
CUDF_TEST_PROGRAM_MAIN()
|
eaf0b26754af6dcdccc7828ff5db064de482e6d8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void kernel( void ) {
}
int main( void ) {
hipLaunchKernelGGL(( kernel), dim3(1),dim3(1), 0, 0, );
printf( "Hello, World!\n" );
return 0;
}
| eaf0b26754af6dcdccc7828ff5db064de482e6d8.cu | #include <stdio.h>
__global__ void kernel( void ) {
}
int main( void ) {
kernel<<<1,1>>>();
printf( "Hello, World!\n" );
return 0;
}
|
5bc27973a5df9a86b116a8764418b60fbc63ab83.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "prefixSum.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *arr = NULL;
hipMalloc(&arr, XSIZE*YSIZE);
int step = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
prefixSum), dim3(gridBlock),dim3(threadBlock), 0, 0, arr,step);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
prefixSum), dim3(gridBlock),dim3(threadBlock), 0, 0, arr,step);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
prefixSum), dim3(gridBlock),dim3(threadBlock), 0, 0, arr,step);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 5bc27973a5df9a86b116a8764418b60fbc63ab83.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "prefixSum.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *arr = NULL;
cudaMalloc(&arr, XSIZE*YSIZE);
int step = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
prefixSum<<<gridBlock,threadBlock>>>(arr,step);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
prefixSum<<<gridBlock,threadBlock>>>(arr,step);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
prefixSum<<<gridBlock,threadBlock>>>(arr,step);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
03bec3e94a5d044815bd733d82d77750fc8319da.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void cudaSPadding_kernel( unsigned int nbOutputs, unsigned int outputWidth, unsigned int outputHeight, unsigned int nbChannels, unsigned int batchSize, unsigned int inputWidth, unsigned int inputHeight, int leftPad, int rightPad, int topPad, int botPad, const float* input, float* outputs)
{
const unsigned int inputOffset
= (blockIdx.z * blockDim.z + threadIdx.z) * nbChannels*inputWidth*inputHeight;
const unsigned int outputOffset
= (blockIdx.z * blockDim.z + threadIdx.z) * nbOutputs*outputWidth*outputHeight;
// nbCh = nbChannels for propagate
// = nbOutputs for back-propagate
const unsigned int nbCh = min(nbChannels, nbOutputs);
for (unsigned int ch = blockIdx.x; ch < nbCh; ch += gridDim.x)
{
for (unsigned int oy = threadIdx.y; oy < outputHeight; oy += blockDim.y)
{
for (unsigned int ox = threadIdx.x; ox < outputWidth; ox += blockDim.x)
{
float outputValue = 0.0;
int ix = (int) ox - leftPad;
int iy = (int) oy - topPad;
if( ix >= 0 && ix < (int) inputWidth
&& iy >= 0 && iy < (int) inputHeight )
{
outputValue = input[ix +
iy*inputWidth
+ ch*inputWidth*inputHeight
+ inputOffset];
}
outputs[ ox + oy*outputWidth
+ ch*outputWidth*outputHeight + outputOffset] = outputValue;
}
}
}
} | 03bec3e94a5d044815bd733d82d77750fc8319da.cu | #include "includes.h"
__global__ void cudaSPadding_kernel( unsigned int nbOutputs, unsigned int outputWidth, unsigned int outputHeight, unsigned int nbChannels, unsigned int batchSize, unsigned int inputWidth, unsigned int inputHeight, int leftPad, int rightPad, int topPad, int botPad, const float* input, float* outputs)
{
const unsigned int inputOffset
= (blockIdx.z * blockDim.z + threadIdx.z) * nbChannels*inputWidth*inputHeight;
const unsigned int outputOffset
= (blockIdx.z * blockDim.z + threadIdx.z) * nbOutputs*outputWidth*outputHeight;
// nbCh = nbChannels for propagate
// = nbOutputs for back-propagate
const unsigned int nbCh = min(nbChannels, nbOutputs);
for (unsigned int ch = blockIdx.x; ch < nbCh; ch += gridDim.x)
{
for (unsigned int oy = threadIdx.y; oy < outputHeight; oy += blockDim.y)
{
for (unsigned int ox = threadIdx.x; ox < outputWidth; ox += blockDim.x)
{
float outputValue = 0.0;
int ix = (int) ox - leftPad;
int iy = (int) oy - topPad;
if( ix >= 0 && ix < (int) inputWidth
&& iy >= 0 && iy < (int) inputHeight )
{
outputValue = input[ix +
iy*inputWidth
+ ch*inputWidth*inputHeight
+ inputOffset];
}
outputs[ ox + oy*outputWidth
+ ch*outputWidth*outputHeight + outputOffset] = outputValue;
}
}
}
} |
75bfedc15dbd8ea513a15abe120d663b84a6fe1a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <transient.hpp>
//#include <trans_cuda.hpp>
#include <map>
#include <thrust/for_each.h>
#include <string>
#include <utility>
//#include <boost/functional/hash.hpp>
//#include <boost/numeric/odeint/integrate/integrate_const.hpp>
//#include <boost/numeric/odeint/external/thrust/thrust.hpp>
//#include <iostream>
//#include <cmath>
//#include <boost/numeric/odeint/stepper/runge_kutta_dopri5.hpp>
//#include <thrust/device_vector.h>
//#include <thrust/iterator/permutation_iterator.h>
//#include <thrust/iterator/counting_iterator.h>
//#include <boost/numeric/odeint/external/thrust/thrust.hpp>
//#include <thrust/for_each.h>
//#include <thrust/device_vector.h>
//#include <thrust/execution_policy.h>
using namespace std;
using namespace boost::numeric::odeint;
namespace transient_analysis {
/*
typedef pair<string, string> strpair;
struct printf_functor
{
//template <string T1, GENERATOR T2>;
__host__ __device__
void operator()(const pair<string, string> mp)
{
// note that using printf in a __device__ function requires
// code compiled for a GPU with compute capability 2.0 or
// higher (nvcc --arch=sm_20)
//printf("%d\n", x);
//printf("%d\n", y);
printf("mp first=%s\n", mp.first);
printf("success in printf_functor\n");
//uint__t bus_id = bus_name_to_id[bus_name] - 1;
}
};
*/
//extern "C" void test_cuda(unordered_map<int, int> generators);
//extern "C" void test_cuda(map<string, GENERATOR> generators);
//void test_cuda(map<string, GENERATOR> &generators);
//extern __global__ void test_cuda();
Transient_Solver::
Transient_Solver(real__t start_time, real__t end_time, real__t time_stepping, string main_path)
: start_time(start_time), end_time(end_time), time_stepping(time_stepping), main_path(main_path) {}
Transient_Solver::~Transient_Solver() {
GraphLU_Destroy(Ybus_matrix);
free(Ybus_matrix);
free(eY);
free(ei);
free(ep);
free(gCurrent);
free(gVoltage);
}
void Transient_Solver::setup_system() {
load_system_data(main_path);
print_system_summary();
get_bus_name_id_mapping();
bus_types[1] = "PQ Bus";
bus_types[2] = "PV Bus";
bus_types[3] = "Slack Bus";
string output_path = "../output";
remove((output_path + "/genBusResults.csv").c_str());
remove((output_path + "/allBusResults.csv").c_str());
nBuses = buses.size();
nGen = generators.size();
nGenUnknowns = VS_output_idx + 1;
n = 2 * nBuses;
nnz = 4 * (line.size() + nBuses);
for (auto &g_hldr : generators) {
auto & bus_name=g_hldr.first;
auto & gen=g_hldr.second;
gen_solution [bus_name] = vector<real__t>(nGenUnknowns, 0.);
gen_error [bus_name] = vector<real__t>(nGenUnknowns, 0.);
gen_dq_current[bus_name] = vector<real__t>(2, 0.);
}
bus_voltage = vector<vector<real__t>> (nBuses, vector<real__t>(2, 0.));
// dump_Voltages();
print_gen_parameters();
// print_branch_data();
gCurrent = (real__t*)calloc(n, sizeof(real__t));
gVoltage = (real__t*)calloc(n, sizeof(real__t));
/** allocate memory for arrays used in CSR form */
eY = (real__t*)malloc(sizeof(real__t) * (nnz)); // nonzero values in Ybus matrix
ei = (uint__t*)malloc(sizeof(uint__t) * (nnz)); // column idx of Ybus matrix
ep = (uint__t*)malloc(sizeof(uint__t) * (n + 1)); // initial row pointers
Ybus_matrix = (SGraphLU*)malloc(sizeof(SGraphLU));
gVertex_all = vector<vector<real__t>>(nBuses, vector<real__t>(5, 0.));
current_time = start_time;
num_of_steps = 0;
tol = 1.e-4;
max_step_size = 0.01;
is_modify_Y_bus_matrix = true;
is_fault_treated = false;
}
void Transient_Solver::load_system_data(string folder_path) {
read_bus_data(buses, folder_path + "/system/Node.csv");
read_generator_node_data(buses, generators, folder_path + "/system/Generator.csv");
// read_load_data(buses, folder_path + "/system/Load.csv");
read_compensator_P_data(buses, folder_path + "/system/Compensator_P.csv");
// read_DC_line_data(buses, line, folder_path + "/system/DC_Line.csv");
read_AC_line_data(buses, line, folder_path + "/system/AC_Line.csv");
read_two_winding_transformer_data(buses, line, folder_path + "/system/Two_winding_transformer.csv");
read_three_winding_transformer_data(buses, line, folder_path + "/system/Three_winding_transformer.csv");
read_fdpf_data(buses, "../output/fdpf.csv");
read_EPRI_GEN_data(all_gen, folder_path + "/parameter/Synchronous_Machine.csv");
read_EPRI_GOV_I_data(all_gov_1, folder_path + "/parameter/Governor_1.csv");
read_EPRI_GOV_II_data(all_gov_2, folder_path + "/parameter/Governor_2.csv");
read_EPRI_GOV_III_data(all_gov_3, folder_path + "/parameter/Governor_3.csv");
read_EPRI_GOV_IV_data(all_gov_4, folder_path + "/parameter/Governor_4.csv");
read_EPRI_GOV_V_data(all_gov_5, folder_path + "/parameter/Governor_5.csv");
read_EPRI_GOV_VII_data(all_gov_7, folder_path + "/parameter/Governor_7.csv");
read_EPRI_GOV_VIII_data(all_gov_8, folder_path + "/parameter/Governor_8.csv");
read_EPRI_GOV_IX_data(all_gov_9, folder_path + "/parameter/Governor_9.csv");
read_EPRI_EXC_I_data(all_exc_1, folder_path + "/parameter/AVR_1.csv");
read_EPRI_EXC_II_data(all_exc_2, folder_path + "/parameter/AVR_2.csv");
read_EPRI_EXC_III_TO_X_data(all_exc_3_10, folder_path + "/parameter/AVR_3_to_10.csv");
read_EPRI_EXC_XI_TO_XII_data(all_exc_11_12, folder_path + "/parameter/AVR_11_to_12.csv");
read_EPRI_PSS_I_data(all_pss_1, folder_path + "/parameter/PSS_1.csv");
read_EPRI_PSS_II_data(all_pss_2, folder_path + "/parameter/PSS_2.csv");
read_EPRI_PSS_IV_VI_data(all_pss_4_6, folder_path + "/parameter/PSS_4_6.csv");
read_EPRI_PSS_V_data(all_pss_5, folder_path + "/parameter/PSS_5.csv");
read_EPRI_PSS_VIII_data(all_pss_8, folder_path + "/parameter/PSS_8.csv");
#if DEBUG
cout << "Transient simulation loading data success!\n";
#endif
}
void Transient_Solver::get_bus_name_id_mapping() {
int idx = 1; // id begins with 1
for (auto &b : buses) {
bus_name_to_id[b.first] = idx;
bus_id_to_name[idx] = b.first;
++idx;
}
#if DEBUG
printf("Transient simulation getting bus_name to bus_id mapping success!\n");
#endif
}
void Transient_Solver::dump_Voltages() {
if (current_time == start_time) {
for (int i = 0; i < buses.size(); ++i) {
string bus_name = bus_id_to_name[i];
bus_voltage[i][0] = buses[bus_name].Vm;
bus_voltage[i][1] = buses[bus_name].Va;
}
} else {
for (int i = 0; i < buses.size(); ++i) {
real__t Vx = gVoltage[i];
real__t Vy = gVoltage[i + nBuses];
bus_voltage[i][0] = sqrt(Vx * Vx + Vy * Vy);
bus_voltage[i][1] = atan2(Vy, Vx);
}
}
#if DEBUG
printf("Transient simulation dumping voltages success!\n");
#endif
}
string Transient_Solver::
solve_algebraic_equations_one_step(SGraphLU* matrix, real__t* rhs) {
/* call the graphlu solver to solve the sparse linear system
* notice that GraphLU_Solve_Singular allows the matrix to be *numerically*
* singular */
int ret_solve = GraphLU_Solve_Singular(matrix, rhs, 0);
if (ret_solve < 0) {
printf("Error: solve_algebraic_equations_one_step: %d\n", ret_solve);
return "FAILED";
}
return "SUCCESS";
}
/* a matrix-vector multiplier (not used in this code) */
void Transient_Solver::
matrix_vector_mult(const SGraphLU* matrix, const real__t* src, real__t* dst) {
uint__t n = matrix->n;
real__t* ax = matrix->ax;
uint__t* ai = matrix->ai;
uint__t* ap = matrix->ap;
for (int i = 0; i < n; ++i) {
dst[i] = 0;
for (int k = ap[i]; k < ap[i + 1]; ++k) {
dst[i] += ax[k] * src[ai[k]];
}
}
}
void Transient_Solver::update_step_size() {
real__t err = 0;
for (auto &g : gen_error) {
for (auto &v : g.second) err += abs(v * v);
}
err = max(sqrt(err), EPS);
printf("current error = %2.12f\n", err);
real__t q = pow(tol / err, 1. / 4.) * 0.8;
if (err < tol)
step_size = min(max(q, 0.1), 4.) * step_size;
else
step_size = min(max(q, 0.1), 1.) * step_size;
step_size = min(max_step_size, step_size);
}
void Transient_Solver::get_generator_current() {
for (auto &g_hldr : generators) {
auto & bus_name=g_hldr.first;
auto & gen=g_hldr.second;
uint__t bus_id = bus_name_to_id[bus_name] - 1;
uint__t Gen_Par = gen.Gen_Par;
if (Gen_Par == 0) continue;
real__t Vx = gVoltage[bus_id];
real__t Vy = gVoltage[bus_id + nBuses];
real__t delta = gen_solution[bus_name][delta_idx];
real__t Vd = Vx * sin(delta) - Vy * cos(delta);
real__t Vq = Vx * cos(delta) + Vy * sin(delta);
real__t Ra = all_gen[Gen_Par].Ra;
real__t Xdpp = all_gen[Gen_Par].Xdpp;
real__t Xqpp = all_gen[Gen_Par].Xqpp;
real__t Edpp = gen_solution[bus_name][Edpp_idx];
real__t Eqpp = gen_solution[bus_name][Eqpp_idx];
real__t denom = Ra * Ra + Xdpp * Xqpp;
assert(denom > EPS);
gen_dq_current[bus_name][0] = (+Ra * (Edpp - Vd) + Xqpp * (Eqpp - Vq)) / denom;
gen_dq_current[bus_name][1] = (-Xdpp * (Edpp - Vd) + Ra * (Eqpp - Vq)) / denom;
// cout << "dq currents in get_generator_current:" << endl;
// cout << "Id, Iq = " << gen_dq_current[bus_name][0] << ", " << gen_dq_current[bus_name][1] << endl;
#if DEBUG
printf("Transient simulation getting generator current success for bus %s!\n", bus_name.c_str());
#endif
}
}
void Transient_Solver::
apply_fault(const uint__t busID, const real__t t, const real__t fault_btime, const real__t fault_etime) {
/* busID is indexed from 1 */
if (t >= fault_btime && t < fault_etime - time_stepping) { // fault happens
if (is_fault_treated)
return;
for (int i = 0; i < gEdge_all.size(); ++i) {
if (gEdge_all[i][0] == busID - 1 && gEdge_all[i][1] == busID - 1) {
gEdge_all[i][2] += INF;
is_fault_treated = true;
break;
}
}
} else if (abs(t - fault_etime) <= EPS + time_stepping) { // fault cleared
is_fault_treated = false;
} else if (t > fault_etime) {
if (is_fault_treated)
return;
for (int i = 0; i < gEdge_all.size(); ++i) {
if (gEdge_all[i][0] == busID - 1 && gEdge_all[i][1] == busID - 1) {
gEdge_all[i][2] -= INF;
is_fault_treated = true;
break;
}
}
}
}
/** this function should be called after the compute_ODE_initial_values */
void Transient_Solver::assign_ode_solver_data(string bus_name, GENERATOR& gen) {
system.parameters.EXC_type = gen.AVR_Model;
switch (gen.AVR_Model) {
case 1:
system.parameters.exc_1 = all_exc_1[gen.AVR_Par]; break;
case 2:
system.parameters.exc_2 = all_exc_2[gen.AVR_Par]; break;
case 3: case 4: case 5: case 6: case 7: case 8: case 9: case 10:
system.parameters.exc_3_10 = all_exc_3_10[gen.AVR_Par]; break;
case 11: case 12:
system.parameters.exc_11_12 = all_exc_11_12[gen.AVR_Par]; break;
default:
break;
}
system.parameters.GOV_type = gen.GOV_Model;
switch (gen.GOV_Model) {
case 1: system.parameters.gov_1 = all_gov_1[gen.GOV_Par]; break;
case 2: system.parameters.gov_2 = all_gov_2[gen.GOV_Par]; break;
case 3: system.parameters.gov_3 = all_gov_3[gen.GOV_Par]; break;
case 4: system.parameters.gov_4 = all_gov_4[gen.GOV_Par]; break;
case 5: system.parameters.gov_5 = all_gov_5[gen.GOV_Par]; break;
case 7: system.parameters.gov_7 = all_gov_7[gen.GOV_Par]; break;
case 8: system.parameters.gov_8 = all_gov_8[gen.GOV_Par]; break;
case 9: system.parameters.gov_9 = all_gov_9[gen.GOV_Par]; break;
}
system.parameters.PSS_type = gen.PSS_Model;
switch (gen.PSS_Model) {
case 1: system.parameters.pss_1 = all_pss_1[gen.PSS_Par]; break;
case 2: system.parameters.pss_2 = all_pss_2[gen.PSS_Par]; break;
case 4: system.parameters.pss_4_6 = all_pss_4_6[gen.PSS_Par]; break;
case 5: system.parameters.pss_5 = all_pss_5[gen.PSS_Par]; break;
case 6: system.parameters.pss_4_6 = all_pss_4_6[gen.PSS_Par]; break;
case 8: system.parameters.pss_8 = all_pss_8[gen.PSS_Par]; break;
}
system.parameters.GEN_type = gen.Gen_Model;
if (gen.Gen_Par == 0) {
all_gen[0].Xdp = gen.Xdp < EPS ? 0.0001 : gen.Xdp;
all_gen[0].Xdpp = gen.Xdpp < EPS ? 0.0001 : gen.Xdpp;
all_gen[0].TJ = gen.TJ < EPS ? 999999.875 : gen.TJ;
all_gen[0].X2 = gen.X2;
all_gen[0].Ra = 0.;
}
system.parameters.gen = all_gen[gen.Gen_Par];
system.parameters.gen.bus_id = bus_name_to_id[bus_name] - 1;
system.parameters.omega_ref = gen.omega_ref;
system.parameters.freq_ref = gen.freq_ref;
system.parameters.Pe_ref = gen.Pe_ref;
system.parameters.Vt_ref = gen.Vt_ref;
system.parameters.Efd0 = gen.Efd0;
system.parameters.mu0 = gen.mu0;
system.parameters.Rate_MW = gen.Rate_MW;
}
void Transient_Solver::run(int argc, char** argv) {
/** initialize simulation settings */
setup_system();
compute_ODE_initial_values();
runge_kutta_dopri5<d_vector_type, value_type , d_vector_type , value_type> dopri5_stepper_type;
print_bus_data();
/** convert the data on the edges, notice that the conversion is
once for all, unless the topology of the network changes */
generate_edges_matrix();
const value_type dt = 0.1;
//d2_vector_type d2_gen_solution_set;
h2_vector_type h2_gen_solution_set;
h2_vector_type h2_gen_error_set;
//double h2_gen_solution_set[8][35];
while (current_time <= end_time) {
/* output every 10 steps - modify if necessary */
if ((num_of_steps % 5 == 0 && current_time > 0) || num_of_steps == 1) {
output_data_to_csv_one_step();
}
/* make sure that current_time + dt does not exceed end_time */
time_stepping = min(time_stepping, end_time - current_time);
if (time_stepping == 0) break;
/** add a fault to a LOAD bus */
int fault_bus_id = 81; // 36 is bus 28, in sichuan is bus 81;
assert(fault_bus_id <= nBuses);
//apply_fault(fault_bus_id, current_time, 2., 2.2);
apply_fault(fault_bus_id, current_time, 3.0, 3.1);
/** update the buses info, then generate the new Y bus matrix */
convert_nodes();
convert_to_CSR_arrays();
generate_Y_Bus_matrix();
/** As graphlu returns the solution in-place, we copy gCurrent to gVoltage */
memcpy(gVoltage, gCurrent, sizeof(*gCurrent) * n);
/** solve one step of algebraic equations */
string result = solve_algebraic_equations_one_step(Ybus_matrix, gVoltage);
if (result == "FAILED") {
std::cerr << "Solving algebraic equations FAILED, please check!!!\n";
std::terminate();
}
int gen_length = 35;
int gen_count = 0;
int gen_attr_count = 0;
d_vector_type d2_gen_solution_set(GEN_SIZE*gen_length);
d_vector_type d2_gen_error_set(GEN_SIZE*gen_length);
std::clock_t start_forloop = std::clock();
for (auto& g_hldr : generators) {
auto & bus_name=g_hldr.first;
auto & gen=g_hldr.second;
uint__t bus_id = bus_name_to_id[bus_name] - 1;
/** update the Vx and Vy values at each gen node */
system.Vx[gen_count] = gVoltage[bus_id];
system.Vy[gen_count] = gVoltage[bus_id + nBuses];
system.Id[gen_count] = gen_dq_current[bus_name][0];
system.Iq[gen_count] = gen_dq_current[bus_name][1];
assign_ode_solver_data(bus_name, gen);
gen_length = gen_solution[bus_name].size();
d_vector_type d_gen_solution_set = gen_solution[bus_name];
d_vector_type d_gen_error_set = gen_error[bus_name];
//h2_gen_solution_set[gen_count][gen_attr_count]
h2_gen_solution_set.push_back(gen_solution[bus_name]);
h2_gen_error_set.push_back(gen_error[bus_name]);
thrust::copy(h2_gen_solution_set[gen_count].begin(), h2_gen_solution_set[gen_count].end(), &d2_gen_solution_set[gen_count*gen_length]);
thrust::copy(h2_gen_error_set[gen_count].begin(), h2_gen_error_set[gen_count].end(), &d2_gen_error_set[gen_count*gen_length]);
gen_count++;
}
//int gen_length = gen_solution[bus_name].size();
//d_vector_type d2_gen_solution_set(GEN_SIZE*gen_length);
//d_vector_type d2_gen_solution_set(h2_gen_solution_set.begin(), h2_gen_solution_set.end());
//thrust::copy(&(h2_gen_solution_set[0][0]), &(h2_gen_solution_set[7][gen_length-1]), d2_gen_solution_set.begin());
//for(int i=0; i < GEN_SIZE; i++){
// thrust::copy(h2_gen_solution_set[i].begin(), h2_gen_solution_set[i].end(), &d2_gen_solution_set[i*gen_length]);
// thrust::copy(h2_gen_error_set[i].begin(), h2_gen_error_set[i].end(), &d2_gen_error_set[i*gen_length]);
//}
thrust::sequence(d2_gen_solution_set.begin(), d2_gen_solution_set.end());
//d_vector_type d2_gen_error_set(GEN_SIZE*gen_length);
//thrust::copy(&(h2_gen_error_set[0][0]), &(h2_gen_error_set[7][gen_length-1]), d2_gen_error_set.begin());
//d_vector_type d2_gen_error_set(h2_gen_error_set.begin(), h2_gen_error_set.end());
//for(int i =0; i<GEN_SIZE; i++){
// thrust::copy(h2_gen_error_set[i].begin(), h2_gen_error_set[i].end(), &d2_gen_error_set[i*gen_length]);
// }
thrust::sequence(d2_gen_error_set.begin(), d2_gen_error_set.end());
//d_vector_type d2_gen_solution_set = h2_gen_solution_set;
//d_vector_type d2_gen_error_set = h2_gen_error_set;
std::clock_t start = std::clock();
dopri5_stepper_type.do_step(system, d2_gen_solution_set, current_time,
time_stepping, d2_gen_error_set);
printf("+++After 8 Gen computing: %.4f seconds\n\n", (std::clock() - start) / (real__t)CLOCKS_PER_SEC);
printf("+++After 8 Gen computing including forloop: %.4f seconds\n\n", (std::clock() - start_forloop) / (real__t)CLOCKS_PER_SEC);
int gen_ith = 0;
for (auto& g_hldr : generators) {
auto & bus_name=g_hldr.first;
auto & gen=g_hldr.second;
//d2_gen_solution_set.push_back(gen_solution[bus_name]);
//integrate_const( dopri5_stepper_type , system , d_gen_solution_set , 0.0 , 1.0 , dt );
////dopri5_stepper_type.do_step(system, d_gen_solution_set, current_time,
//// time_stepping, d_gen_error_set);
//dopri5_stepper_type.do_step(system, gen_solution[bus_name], current_time,
// time_stepping, gen_error[bus_name]);
//integrate_adaptive( make_controlled( 1.0e-6 , 1.0e-6 , dopri5_stepper_type() ) , system , x , t , t + 1.0 , 0.1 );
//cout << "End of dp_step===========================" << endl;
/** post-process: prepare for outputs */
gen_solution[bus_name][mu_output_idx] = system.get_mu(gen_ith);
gen_solution[bus_name][PT_output_idx] = system.get_Pmech(gen_ith);
gen_solution[bus_name][Efd_output_idx] = system.get_Efd(gen_ith);
gen_solution[bus_name][VS_output_idx] = system.get_VS(gen_ith);
gen_ith++;
/* update the time and number of steps */
//current_time += time_stepping;
//num_of_steps++;
}
//cout << "\n+++Now number of steps: " << num_of_steps << endl;
#if DEBUG
if (num_of_steps % 1 == 0) {
// print_matrix<real__t>(gEdge_all, "System Matrix");
// print_array<real__t>(gCurrent, buses.size(), "RHS: ");
// print_array<real__t>(gVoltage, buses.size(), "Solution: ");
print_bus_data();
print_gen_solution();
}
#endif
/* update the time and number of steps */
current_time += time_stepping;
num_of_steps++;
}
printf("\n\nTransient Simulation Result:\n");
print_bus_data();
print_gen_solution();
/* Done with the simulation! Congratulations! */
cout << "\n+++Total number of steps: " << num_of_steps << endl;
}
} // namespace transient_analysis
| 75bfedc15dbd8ea513a15abe120d663b84a6fe1a.cu | #include <transient.hpp>
//#include <trans_cuda.hpp>
#include <map>
#include <thrust/for_each.h>
#include <string>
#include <utility>
//#include <boost/functional/hash.hpp>
//#include <boost/numeric/odeint/integrate/integrate_const.hpp>
//#include <boost/numeric/odeint/external/thrust/thrust.hpp>
//#include <iostream>
//#include <cmath>
//#include <boost/numeric/odeint/stepper/runge_kutta_dopri5.hpp>
//#include <thrust/device_vector.h>
//#include <thrust/iterator/permutation_iterator.h>
//#include <thrust/iterator/counting_iterator.h>
//#include <boost/numeric/odeint/external/thrust/thrust.hpp>
//#include <thrust/for_each.h>
//#include <thrust/device_vector.h>
//#include <thrust/execution_policy.h>
using namespace std;
using namespace boost::numeric::odeint;
namespace transient_analysis {
/*
typedef pair<string, string> strpair;
struct printf_functor
{
//template <string T1, GENERATOR T2>;
__host__ __device__
void operator()(const pair<string, string> mp)
{
// note that using printf in a __device__ function requires
// code compiled for a GPU with compute capability 2.0 or
// higher (nvcc --arch=sm_20)
//printf("%d\n", x);
//printf("%d\n", y);
printf("mp first=%s\n", mp.first);
printf("success in printf_functor\n");
//uint__t bus_id = bus_name_to_id[bus_name] - 1;
}
};
*/
//extern "C" void test_cuda(unordered_map<int, int> generators);
//extern "C" void test_cuda(map<string, GENERATOR> generators);
//void test_cuda(map<string, GENERATOR> &generators);
//extern __global__ void test_cuda();
Transient_Solver::
Transient_Solver(real__t start_time, real__t end_time, real__t time_stepping, string main_path)
: start_time(start_time), end_time(end_time), time_stepping(time_stepping), main_path(main_path) {}
Transient_Solver::~Transient_Solver() {
GraphLU_Destroy(Ybus_matrix);
free(Ybus_matrix);
free(eY);
free(ei);
free(ep);
free(gCurrent);
free(gVoltage);
}
void Transient_Solver::setup_system() {
load_system_data(main_path);
print_system_summary();
get_bus_name_id_mapping();
bus_types[1] = "PQ Bus";
bus_types[2] = "PV Bus";
bus_types[3] = "Slack Bus";
string output_path = "../output";
remove((output_path + "/genBusResults.csv").c_str());
remove((output_path + "/allBusResults.csv").c_str());
nBuses = buses.size();
nGen = generators.size();
nGenUnknowns = VS_output_idx + 1;
n = 2 * nBuses;
nnz = 4 * (line.size() + nBuses);
for (auto &g_hldr : generators) {
auto & bus_name=g_hldr.first;
auto & gen=g_hldr.second;
gen_solution [bus_name] = vector<real__t>(nGenUnknowns, 0.);
gen_error [bus_name] = vector<real__t>(nGenUnknowns, 0.);
gen_dq_current[bus_name] = vector<real__t>(2, 0.);
}
bus_voltage = vector<vector<real__t>> (nBuses, vector<real__t>(2, 0.));
// dump_Voltages();
print_gen_parameters();
// print_branch_data();
gCurrent = (real__t*)calloc(n, sizeof(real__t));
gVoltage = (real__t*)calloc(n, sizeof(real__t));
/** allocate memory for arrays used in CSR form */
eY = (real__t*)malloc(sizeof(real__t) * (nnz)); // nonzero values in Ybus matrix
ei = (uint__t*)malloc(sizeof(uint__t) * (nnz)); // column idx of Ybus matrix
ep = (uint__t*)malloc(sizeof(uint__t) * (n + 1)); // initial row pointers
Ybus_matrix = (SGraphLU*)malloc(sizeof(SGraphLU));
gVertex_all = vector<vector<real__t>>(nBuses, vector<real__t>(5, 0.));
current_time = start_time;
num_of_steps = 0;
tol = 1.e-4;
max_step_size = 0.01;
is_modify_Y_bus_matrix = true;
is_fault_treated = false;
}
void Transient_Solver::load_system_data(string folder_path) {
read_bus_data(buses, folder_path + "/system/Node.csv");
read_generator_node_data(buses, generators, folder_path + "/system/Generator.csv");
// read_load_data(buses, folder_path + "/system/Load.csv");
read_compensator_P_data(buses, folder_path + "/system/Compensator_P.csv");
// read_DC_line_data(buses, line, folder_path + "/system/DC_Line.csv");
read_AC_line_data(buses, line, folder_path + "/system/AC_Line.csv");
read_two_winding_transformer_data(buses, line, folder_path + "/system/Two_winding_transformer.csv");
read_three_winding_transformer_data(buses, line, folder_path + "/system/Three_winding_transformer.csv");
read_fdpf_data(buses, "../output/fdpf.csv");
read_EPRI_GEN_data(all_gen, folder_path + "/parameter/Synchronous_Machine.csv");
read_EPRI_GOV_I_data(all_gov_1, folder_path + "/parameter/Governor_1.csv");
read_EPRI_GOV_II_data(all_gov_2, folder_path + "/parameter/Governor_2.csv");
read_EPRI_GOV_III_data(all_gov_3, folder_path + "/parameter/Governor_3.csv");
read_EPRI_GOV_IV_data(all_gov_4, folder_path + "/parameter/Governor_4.csv");
read_EPRI_GOV_V_data(all_gov_5, folder_path + "/parameter/Governor_5.csv");
read_EPRI_GOV_VII_data(all_gov_7, folder_path + "/parameter/Governor_7.csv");
read_EPRI_GOV_VIII_data(all_gov_8, folder_path + "/parameter/Governor_8.csv");
read_EPRI_GOV_IX_data(all_gov_9, folder_path + "/parameter/Governor_9.csv");
read_EPRI_EXC_I_data(all_exc_1, folder_path + "/parameter/AVR_1.csv");
read_EPRI_EXC_II_data(all_exc_2, folder_path + "/parameter/AVR_2.csv");
read_EPRI_EXC_III_TO_X_data(all_exc_3_10, folder_path + "/parameter/AVR_3_to_10.csv");
read_EPRI_EXC_XI_TO_XII_data(all_exc_11_12, folder_path + "/parameter/AVR_11_to_12.csv");
read_EPRI_PSS_I_data(all_pss_1, folder_path + "/parameter/PSS_1.csv");
read_EPRI_PSS_II_data(all_pss_2, folder_path + "/parameter/PSS_2.csv");
read_EPRI_PSS_IV_VI_data(all_pss_4_6, folder_path + "/parameter/PSS_4_6.csv");
read_EPRI_PSS_V_data(all_pss_5, folder_path + "/parameter/PSS_5.csv");
read_EPRI_PSS_VIII_data(all_pss_8, folder_path + "/parameter/PSS_8.csv");
#if DEBUG
cout << "Transient simulation loading data success!\n";
#endif
}
void Transient_Solver::get_bus_name_id_mapping() {
int idx = 1; // id begins with 1
for (auto &b : buses) {
bus_name_to_id[b.first] = idx;
bus_id_to_name[idx] = b.first;
++idx;
}
#if DEBUG
printf("Transient simulation getting bus_name to bus_id mapping success!\n");
#endif
}
void Transient_Solver::dump_Voltages() {
if (current_time == start_time) {
for (int i = 0; i < buses.size(); ++i) {
string bus_name = bus_id_to_name[i];
bus_voltage[i][0] = buses[bus_name].Vm;
bus_voltage[i][1] = buses[bus_name].Va;
}
} else {
for (int i = 0; i < buses.size(); ++i) {
real__t Vx = gVoltage[i];
real__t Vy = gVoltage[i + nBuses];
bus_voltage[i][0] = sqrt(Vx * Vx + Vy * Vy);
bus_voltage[i][1] = atan2(Vy, Vx);
}
}
#if DEBUG
printf("Transient simulation dumping voltages success!\n");
#endif
}
string Transient_Solver::
solve_algebraic_equations_one_step(SGraphLU* matrix, real__t* rhs) {
/* call the graphlu solver to solve the sparse linear system
* notice that GraphLU_Solve_Singular allows the matrix to be *numerically*
* singular */
int ret_solve = GraphLU_Solve_Singular(matrix, rhs, 0);
if (ret_solve < 0) {
printf("Error: solve_algebraic_equations_one_step: %d\n", ret_solve);
return "FAILED";
}
return "SUCCESS";
}
/* a matrix-vector multiplier (not used in this code) */
void Transient_Solver::
matrix_vector_mult(const SGraphLU* matrix, const real__t* src, real__t* dst) {
uint__t n = matrix->n;
real__t* ax = matrix->ax;
uint__t* ai = matrix->ai;
uint__t* ap = matrix->ap;
for (int i = 0; i < n; ++i) {
dst[i] = 0;
for (int k = ap[i]; k < ap[i + 1]; ++k) {
dst[i] += ax[k] * src[ai[k]];
}
}
}
void Transient_Solver::update_step_size() {
real__t err = 0;
for (auto &g : gen_error) {
for (auto &v : g.second) err += abs(v * v);
}
err = max(sqrt(err), EPS);
printf("current error = %2.12f\n", err);
real__t q = pow(tol / err, 1. / 4.) * 0.8;
if (err < tol)
step_size = min(max(q, 0.1), 4.) * step_size;
else
step_size = min(max(q, 0.1), 1.) * step_size;
step_size = min(max_step_size, step_size);
}
void Transient_Solver::get_generator_current() {
for (auto &g_hldr : generators) {
auto & bus_name=g_hldr.first;
auto & gen=g_hldr.second;
uint__t bus_id = bus_name_to_id[bus_name] - 1;
uint__t Gen_Par = gen.Gen_Par;
if (Gen_Par == 0) continue;
real__t Vx = gVoltage[bus_id];
real__t Vy = gVoltage[bus_id + nBuses];
real__t delta = gen_solution[bus_name][delta_idx];
real__t Vd = Vx * sin(delta) - Vy * cos(delta);
real__t Vq = Vx * cos(delta) + Vy * sin(delta);
real__t Ra = all_gen[Gen_Par].Ra;
real__t Xdpp = all_gen[Gen_Par].Xdpp;
real__t Xqpp = all_gen[Gen_Par].Xqpp;
real__t Edpp = gen_solution[bus_name][Edpp_idx];
real__t Eqpp = gen_solution[bus_name][Eqpp_idx];
real__t denom = Ra * Ra + Xdpp * Xqpp;
assert(denom > EPS);
gen_dq_current[bus_name][0] = (+Ra * (Edpp - Vd) + Xqpp * (Eqpp - Vq)) / denom;
gen_dq_current[bus_name][1] = (-Xdpp * (Edpp - Vd) + Ra * (Eqpp - Vq)) / denom;
// cout << "dq currents in get_generator_current:" << endl;
// cout << "Id, Iq = " << gen_dq_current[bus_name][0] << ", " << gen_dq_current[bus_name][1] << endl;
#if DEBUG
printf("Transient simulation getting generator current success for bus %s!\n", bus_name.c_str());
#endif
}
}
void Transient_Solver::
apply_fault(const uint__t busID, const real__t t, const real__t fault_btime, const real__t fault_etime) {
/* busID is indexed from 1 */
if (t >= fault_btime && t < fault_etime - time_stepping) { // fault happens
if (is_fault_treated)
return;
for (int i = 0; i < gEdge_all.size(); ++i) {
if (gEdge_all[i][0] == busID - 1 && gEdge_all[i][1] == busID - 1) {
gEdge_all[i][2] += INF;
is_fault_treated = true;
break;
}
}
} else if (abs(t - fault_etime) <= EPS + time_stepping) { // fault cleared
is_fault_treated = false;
} else if (t > fault_etime) {
if (is_fault_treated)
return;
for (int i = 0; i < gEdge_all.size(); ++i) {
if (gEdge_all[i][0] == busID - 1 && gEdge_all[i][1] == busID - 1) {
gEdge_all[i][2] -= INF;
is_fault_treated = true;
break;
}
}
}
}
/** this function should be called after the compute_ODE_initial_values */
void Transient_Solver::assign_ode_solver_data(string bus_name, GENERATOR& gen) {
system.parameters.EXC_type = gen.AVR_Model;
switch (gen.AVR_Model) {
case 1:
system.parameters.exc_1 = all_exc_1[gen.AVR_Par]; break;
case 2:
system.parameters.exc_2 = all_exc_2[gen.AVR_Par]; break;
case 3: case 4: case 5: case 6: case 7: case 8: case 9: case 10:
system.parameters.exc_3_10 = all_exc_3_10[gen.AVR_Par]; break;
case 11: case 12:
system.parameters.exc_11_12 = all_exc_11_12[gen.AVR_Par]; break;
default:
break;
}
system.parameters.GOV_type = gen.GOV_Model;
switch (gen.GOV_Model) {
case 1: system.parameters.gov_1 = all_gov_1[gen.GOV_Par]; break;
case 2: system.parameters.gov_2 = all_gov_2[gen.GOV_Par]; break;
case 3: system.parameters.gov_3 = all_gov_3[gen.GOV_Par]; break;
case 4: system.parameters.gov_4 = all_gov_4[gen.GOV_Par]; break;
case 5: system.parameters.gov_5 = all_gov_5[gen.GOV_Par]; break;
case 7: system.parameters.gov_7 = all_gov_7[gen.GOV_Par]; break;
case 8: system.parameters.gov_8 = all_gov_8[gen.GOV_Par]; break;
case 9: system.parameters.gov_9 = all_gov_9[gen.GOV_Par]; break;
}
system.parameters.PSS_type = gen.PSS_Model;
switch (gen.PSS_Model) {
case 1: system.parameters.pss_1 = all_pss_1[gen.PSS_Par]; break;
case 2: system.parameters.pss_2 = all_pss_2[gen.PSS_Par]; break;
case 4: system.parameters.pss_4_6 = all_pss_4_6[gen.PSS_Par]; break;
case 5: system.parameters.pss_5 = all_pss_5[gen.PSS_Par]; break;
case 6: system.parameters.pss_4_6 = all_pss_4_6[gen.PSS_Par]; break;
case 8: system.parameters.pss_8 = all_pss_8[gen.PSS_Par]; break;
}
system.parameters.GEN_type = gen.Gen_Model;
if (gen.Gen_Par == 0) {
all_gen[0].Xdp = gen.Xdp < EPS ? 0.0001 : gen.Xdp;
all_gen[0].Xdpp = gen.Xdpp < EPS ? 0.0001 : gen.Xdpp;
all_gen[0].TJ = gen.TJ < EPS ? 999999.875 : gen.TJ;
all_gen[0].X2 = gen.X2;
all_gen[0].Ra = 0.;
}
system.parameters.gen = all_gen[gen.Gen_Par];
system.parameters.gen.bus_id = bus_name_to_id[bus_name] - 1;
system.parameters.omega_ref = gen.omega_ref;
system.parameters.freq_ref = gen.freq_ref;
system.parameters.Pe_ref = gen.Pe_ref;
system.parameters.Vt_ref = gen.Vt_ref;
system.parameters.Efd0 = gen.Efd0;
system.parameters.mu0 = gen.mu0;
system.parameters.Rate_MW = gen.Rate_MW;
}
void Transient_Solver::run(int argc, char** argv) {
/** initialize simulation settings */
setup_system();
compute_ODE_initial_values();
runge_kutta_dopri5<d_vector_type, value_type , d_vector_type , value_type> dopri5_stepper_type;
print_bus_data();
/** convert the data on the edges, notice that the conversion is
once for all, unless the topology of the network changes */
generate_edges_matrix();
const value_type dt = 0.1;
//d2_vector_type d2_gen_solution_set;
h2_vector_type h2_gen_solution_set;
h2_vector_type h2_gen_error_set;
//double h2_gen_solution_set[8][35];
while (current_time <= end_time) {
/* output every 10 steps - modify if necessary */
if ((num_of_steps % 5 == 0 && current_time > 0) || num_of_steps == 1) {
output_data_to_csv_one_step();
}
/* make sure that current_time + dt does not exceed end_time */
time_stepping = min(time_stepping, end_time - current_time);
if (time_stepping == 0) break;
/** add a fault to a LOAD bus */
int fault_bus_id = 81; // 36 is bus 28, in sichuan is bus 81;
assert(fault_bus_id <= nBuses);
//apply_fault(fault_bus_id, current_time, 2., 2.2);
apply_fault(fault_bus_id, current_time, 3.0, 3.1);
/** update the buses info, then generate the new Y bus matrix */
convert_nodes();
convert_to_CSR_arrays();
generate_Y_Bus_matrix();
/** As graphlu returns the solution in-place, we copy gCurrent to gVoltage */
memcpy(gVoltage, gCurrent, sizeof(*gCurrent) * n);
/** solve one step of algebraic equations */
string result = solve_algebraic_equations_one_step(Ybus_matrix, gVoltage);
if (result == "FAILED") {
std::cerr << "Solving algebraic equations FAILED, please check!!!\n";
std::terminate();
}
int gen_length = 35;
int gen_count = 0;
int gen_attr_count = 0;
d_vector_type d2_gen_solution_set(GEN_SIZE*gen_length);
d_vector_type d2_gen_error_set(GEN_SIZE*gen_length);
std::clock_t start_forloop = std::clock();
for (auto& g_hldr : generators) {
auto & bus_name=g_hldr.first;
auto & gen=g_hldr.second;
uint__t bus_id = bus_name_to_id[bus_name] - 1;
/** update the Vx and Vy values at each gen node */
system.Vx[gen_count] = gVoltage[bus_id];
system.Vy[gen_count] = gVoltage[bus_id + nBuses];
system.Id[gen_count] = gen_dq_current[bus_name][0];
system.Iq[gen_count] = gen_dq_current[bus_name][1];
assign_ode_solver_data(bus_name, gen);
gen_length = gen_solution[bus_name].size();
d_vector_type d_gen_solution_set = gen_solution[bus_name];
d_vector_type d_gen_error_set = gen_error[bus_name];
//h2_gen_solution_set[gen_count][gen_attr_count]
h2_gen_solution_set.push_back(gen_solution[bus_name]);
h2_gen_error_set.push_back(gen_error[bus_name]);
thrust::copy(h2_gen_solution_set[gen_count].begin(), h2_gen_solution_set[gen_count].end(), &d2_gen_solution_set[gen_count*gen_length]);
thrust::copy(h2_gen_error_set[gen_count].begin(), h2_gen_error_set[gen_count].end(), &d2_gen_error_set[gen_count*gen_length]);
gen_count++;
}
//int gen_length = gen_solution[bus_name].size();
//d_vector_type d2_gen_solution_set(GEN_SIZE*gen_length);
//d_vector_type d2_gen_solution_set(h2_gen_solution_set.begin(), h2_gen_solution_set.end());
//thrust::copy(&(h2_gen_solution_set[0][0]), &(h2_gen_solution_set[7][gen_length-1]), d2_gen_solution_set.begin());
//for(int i=0; i < GEN_SIZE; i++){
// thrust::copy(h2_gen_solution_set[i].begin(), h2_gen_solution_set[i].end(), &d2_gen_solution_set[i*gen_length]);
// thrust::copy(h2_gen_error_set[i].begin(), h2_gen_error_set[i].end(), &d2_gen_error_set[i*gen_length]);
//}
thrust::sequence(d2_gen_solution_set.begin(), d2_gen_solution_set.end());
//d_vector_type d2_gen_error_set(GEN_SIZE*gen_length);
//thrust::copy(&(h2_gen_error_set[0][0]), &(h2_gen_error_set[7][gen_length-1]), d2_gen_error_set.begin());
//d_vector_type d2_gen_error_set(h2_gen_error_set.begin(), h2_gen_error_set.end());
//for(int i =0; i<GEN_SIZE; i++){
// thrust::copy(h2_gen_error_set[i].begin(), h2_gen_error_set[i].end(), &d2_gen_error_set[i*gen_length]);
// }
thrust::sequence(d2_gen_error_set.begin(), d2_gen_error_set.end());
//d_vector_type d2_gen_solution_set = h2_gen_solution_set;
//d_vector_type d2_gen_error_set = h2_gen_error_set;
std::clock_t start = std::clock();
dopri5_stepper_type.do_step(system, d2_gen_solution_set, current_time,
time_stepping, d2_gen_error_set);
printf("+++After 8 Gen computing: %.4f seconds\n\n", (std::clock() - start) / (real__t)CLOCKS_PER_SEC);
printf("+++After 8 Gen computing including forloop: %.4f seconds\n\n", (std::clock() - start_forloop) / (real__t)CLOCKS_PER_SEC);
int gen_ith = 0;
for (auto& g_hldr : generators) {
auto & bus_name=g_hldr.first;
auto & gen=g_hldr.second;
//d2_gen_solution_set.push_back(gen_solution[bus_name]);
//integrate_const( dopri5_stepper_type , system , d_gen_solution_set , 0.0 , 1.0 , dt );
////dopri5_stepper_type.do_step(system, d_gen_solution_set, current_time,
//// time_stepping, d_gen_error_set);
//dopri5_stepper_type.do_step(system, gen_solution[bus_name], current_time,
// time_stepping, gen_error[bus_name]);
//integrate_adaptive( make_controlled( 1.0e-6 , 1.0e-6 , dopri5_stepper_type() ) , system , x , t , t + 1.0 , 0.1 );
//cout << "End of dp_step===========================" << endl;
/** post-process: prepare for outputs */
gen_solution[bus_name][mu_output_idx] = system.get_mu(gen_ith);
gen_solution[bus_name][PT_output_idx] = system.get_Pmech(gen_ith);
gen_solution[bus_name][Efd_output_idx] = system.get_Efd(gen_ith);
gen_solution[bus_name][VS_output_idx] = system.get_VS(gen_ith);
gen_ith++;
/* update the time and number of steps */
//current_time += time_stepping;
//num_of_steps++;
}
//cout << "\n+++Now number of steps: " << num_of_steps << endl;
#if DEBUG
if (num_of_steps % 1 == 0) {
// print_matrix<real__t>(gEdge_all, "System Matrix");
// print_array<real__t>(gCurrent, buses.size(), "RHS: ");
// print_array<real__t>(gVoltage, buses.size(), "Solution: ");
print_bus_data();
print_gen_solution();
}
#endif
/* update the time and number of steps */
current_time += time_stepping;
num_of_steps++;
}
printf("\n\nTransient Simulation Result:\n");
print_bus_data();
print_gen_solution();
/* Done with the simulation! Congratulations! */
cout << "\n+++Total number of steps: " << num_of_steps << endl;
}
} // namespace transient_analysis
|
218464c8fa1852b1623ccef92d3fdd954634817f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C" {
#ifndef REAL
#define REAL float
#endif
#ifndef CAST
#define CAST(fun) fun ## f
#endif
#ifndef REAL2o3
#define REAL2o3 (REAL)0.6666666666666667
#endif
#ifndef REAL3o2
#define REAL3o2 (REAL)1.5
#endif
}
__global__ void vector_tan (const int n, const REAL* x, const int offset_x, const int stride_x, REAL* y, const int offset_y, const int stride_y) {
const int gid = blockIdx.x * blockDim.x + threadIdx.x;
if (gid < n) {
y[offset_y + gid * stride_y] = CAST(tan)(x[offset_x + gid * stride_x]);
}
} | 218464c8fa1852b1623ccef92d3fdd954634817f.cu | #include "includes.h"
extern "C" {
#ifndef REAL
#define REAL float
#endif
#ifndef CAST
#define CAST(fun) fun ## f
#endif
#ifndef REAL2o3
#define REAL2o3 (REAL)0.6666666666666667
#endif
#ifndef REAL3o2
#define REAL3o2 (REAL)1.5
#endif
}
__global__ void vector_tan (const int n, const REAL* x, const int offset_x, const int stride_x, REAL* y, const int offset_y, const int stride_y) {
const int gid = blockIdx.x * blockDim.x + threadIdx.x;
if (gid < n) {
y[offset_y + gid * stride_y] = CAST(tan)(x[offset_x + gid * stride_x]);
}
} |
6d032fe149f6c22c8975ff3e9334311ea119c318.hip | // !!! This is a file automatically generated by hipify!!!
/*!
Count triangles using NVGraph
*/
#include <iostream>
#include <vector>
#include <roctracer/roctx.h>
#include <clara/clara.hpp>
#include <fmt/format.h>
#include "pangolin/configure.hpp"
#include "pangolin/file/tsv.hpp"
#include "pangolin/init.hpp"
#include "pangolin/algorithm/csr/tc_nvgraph.hpp"
#include "pangolin/sparse/csr_val.hpp"
struct RunOptions {
int iters;
std::vector<int> gpus;
std::string path; //!< path for graph
std::string sep; //!< seperator for output
bool readMostly;
bool accessedBy;
bool prefetchAsync;
bool shrinkToFit;
bool preCountBarrier;
};
void print_header(const RunOptions &opts) {
fmt::print("bmark{0}bs{0}gpus{0}graph{0}nodes{0}edges{0}tris", opts.sep);
for (auto i = 0; i < opts.iters; ++i) {
fmt::print("{}total_time{}", opts.sep, i);
}
for (auto i = 0; i < opts.iters; ++i) {
fmt::print("{}total_teps{}", opts.sep, i);
}
for (auto i = 0; i < opts.iters; ++i) {
fmt::print("{}gpu_time{}", opts.sep, i);
}
for (auto i = 0; i < opts.iters; ++i) {
fmt::print("{}gpu_teps{}", opts.sep, i);
}
for (auto i = 0; i < opts.iters; ++i) {
fmt::print("{}count_time{}", opts.sep, i);
}
for (auto i = 0; i < opts.iters; ++i) {
fmt::print("{}count_teps{}", opts.sep, i);
}
for (auto i = 0; i < opts.iters; ++i) {
fmt::print("{}kernel_time{}", opts.sep, i);
}
for (auto i = 0; i < opts.iters; ++i) {
fmt::print("{}kernel_teps{}", opts.sep, i);
}
fmt::print("\n");
}
template <typename V> void print_vec(const V &vec, const std::string &sep) {
for (const auto &e : vec) {
fmt::print("{}{}", sep, e);
}
}
int run(RunOptions &opts) {
// CUSparse uses integers for indices
typedef int NodeIndex;
typedef int EdgeIndex;
typedef float Val;
typedef pangolin::WeightedDiEdge<NodeIndex, Val> GraphEdge;
typedef pangolin::CSR<NodeIndex, EdgeIndex, Val> CSR;
typedef pangolin::file::TSV TSV;
typedef TSV::edge_type FileEdge;
typedef pangolin::NVGraphTC TC;
auto gpus = opts.gpus;
if (gpus.empty()) {
LOG(warn, "no GPUs provided on command line, using GPU 0");
gpus.push_back(0);
}
std::vector<double> totalTimes;
std::vector<double> gpuTimes;
std::vector<double> countTimes;
std::vector<double> kernelTimes;
uint64_t nnz;
uint64_t numRows;
uint64_t tris;
// create csr and count `opts.iters` times
for (int i = 0; i < opts.iters; ++i) {
const auto totalStart = std::chrono::system_clock::now();
// read data
TSV file(opts.path);
std::vector<FileEdge> fileEdges = file.read_edges();
double elapsed = (std::chrono::system_clock::now() - totalStart).count() / 1e9;
LOG(info, "read_data time {}s", elapsed);
LOG(debug, "read {} edges", fileEdges.size());
// build CSR
CSR csr;
for (auto fileEdge : fileEdges) {
GraphEdge graphEdge;
graphEdge.src = fileEdge.src;
graphEdge.dst = fileEdge.dst;
graphEdge.val = fileEdge.val;
if (graphEdge.src > graphEdge.dst) {
csr.add_next_edge(graphEdge);
}
}
csr.finish_edges();
if (opts.shrinkToFit) {
LOG(debug, "shrink CSR");
csr.shrink_to_fit();
}
elapsed = (std::chrono::system_clock::now() - totalStart).count() / 1e9;
LOG(info, "io/csr time {}s", elapsed);
LOG(debug, "CSR nnz = {} rows = {}", csr.nnz(), csr.num_rows());
LOG(debug, "CSR cap = {}MB size = {}MB", csr.capacity_bytes() / 1024 / 1024, csr.size_bytes() / 1024 / 1024);
const auto gpuStart = std::chrono::system_clock::now();
// read-mostly
roctxRangePush("read-mostly");
auto start = std::chrono::system_clock::now();
if (opts.readMostly) {
csr.read_mostly();
}
elapsed = (std::chrono::system_clock::now() - start).count() / 1e9;
roctxRangePop();
LOG(info, "read-mostly CSR time {}s", elapsed);
// accessed-by
start = std::chrono::system_clock::now();
if (opts.accessedBy) {
for (const auto &gpu : gpus) {
csr.accessed_by(gpu);
}
}
elapsed = (std::chrono::system_clock::now() - start).count() / 1e9;
LOG(info, "accessed-by CSR time {}s", elapsed);
// prefetch
start = std::chrono::system_clock::now();
if (opts.prefetchAsync) {
for (size_t gpuIdx = 0; gpuIdx < gpus.size(); ++gpuIdx) {
auto &gpu = gpus[gpuIdx];
csr.prefetch_async(gpu, 0);
}
}
elapsed = (std::chrono::system_clock::now() - start).count() / 1e9;
LOG(info, "prefetch CSR time {}s", elapsed);
if (opts.preCountBarrier) {
LOG(debug, "sync streams after hints");
CUDA_RUNTIME(hipDeviceSynchronize());
}
// count triangles
roctxRangePush("count");
const auto countStart = std::chrono::system_clock::now();
TC counter(gpus[0]);
tris = counter.count_sync(csr);
const auto stop = std::chrono::system_clock::now();
roctxRangePop(); // count
// record graph stats
// tris = total;
nnz = csr.nnz();
numRows = csr.num_rows();
const double totalElapsed = (stop - totalStart).count() / 1e9;
const double gpuElapsed = (stop - gpuStart).count() / 1e9;
const double countElapsed = (stop - countStart).count() / 1e9;
LOG(info, "total time {}s ({} teps)", totalElapsed, nnz / totalElapsed);
LOG(info, "gpu time {}s ({} teps)", gpuElapsed, nnz / gpuElapsed);
LOG(info, "count time {}s ({} teps)", countElapsed, nnz / countElapsed);
totalTimes.push_back(totalElapsed);
gpuTimes.push_back(gpuElapsed);
countTimes.push_back(countElapsed);
// for (auto &counter : counters) {
// double secs = counter.kernel_time();
// int dev = counter.device();
// LOG(info, "gpu {} kernel time {}s ({} teps)", dev, secs, nnz / secs);
// }
// if (counters.size() == 1) {
// kernelTimes.push_back(counters[0].kernel_time());
// } else {
// kernelTimes.push_back(0);
// }
}
if (opts.iters > 0) {
fmt::print("nvgraph");
std::string gpuStr;
for (auto gpu : gpus) {
gpuStr += std::to_string(gpu);
}
fmt::print("{}{}", opts.sep, gpuStr);
fmt::print("{}{}", opts.sep, opts.path);
fmt::print("{}{}", opts.sep, numRows);
fmt::print("{}{}", opts.sep, nnz);
fmt::print("{}{}", opts.sep, tris);
print_vec(totalTimes, opts.sep);
for (const auto &s : totalTimes) {
fmt::print("{}{}", opts.sep, nnz / s);
}
print_vec(gpuTimes, opts.sep);
for (const auto &s : gpuTimes) {
fmt::print("{}{}", opts.sep, nnz / s);
}
print_vec(countTimes, opts.sep);
for (const auto &s : countTimes) {
fmt::print("{}{}", opts.sep, nnz / s);
}
print_vec(kernelTimes, opts.sep);
for (const auto &s : kernelTimes) {
fmt::print("{}{}", opts.sep, nnz / s);
}
fmt::print("\n");
}
return 0;
}
int main(int argc, char **argv) {
pangolin::init();
RunOptions opts;
opts.sep = ",";
opts.iters = 1;
opts.shrinkToFit = false;
opts.readMostly = false;
opts.accessedBy = false;
opts.prefetchAsync = false;
opts.preCountBarrier = true;
bool help = false;
bool debug = false;
bool verbose = false;
bool onlyPrintHeader = false;
clara::Parser cli;
cli = cli | clara::Help(help);
cli = cli | clara::Opt(debug)["--debug"]("print debug messages to stderr");
cli = cli | clara::Opt(verbose)["--verbose"]("print verbose messages to stderr");
cli = cli | clara::Opt(onlyPrintHeader)["--header"]("print the header for the times output and quit");
cli = cli | clara::Opt(opts.gpus, "dev ids")["-g"]("gpus to use");
cli = cli | clara::Opt(opts.shrinkToFit)["--shrink-to-fit"]("shrink allocations to fit data");
cli = cli | clara::Opt(opts.readMostly)["--read-mostly"]("mark data as read-mostly by all gpus before kernel");
cli = cli | clara::Opt(opts.accessedBy)["--accessed-by"]("mark data as accessed-by all GPUs before kernel");
cli = cli | clara::Opt(opts.prefetchAsync)["--prefetch-async"]("prefetch data to all GPUs before kernel");
cli = cli | clara::Opt(opts.iters, "N")["-n"]("number of counts");
cli = cli | clara::Arg(opts.path, "graph file")("Path to adjacency list").required();
auto result = cli.parse(clara::Args(argc, argv));
if (!result) {
LOG(error, "Error in command line: {}", result.errorMessage());
exit(1);
}
if (help) {
std::cout << cli;
return 0;
}
// set logging level
if (verbose) {
pangolin::logger::set_level(pangolin::logger::Level::TRACE);
} else if (debug) {
pangolin::logger::set_level(pangolin::logger::Level::DEBUG);
}
// log command line before much else happens
{
std::string cmd;
for (int i = 0; i < argc; ++i) {
if (i != 0) {
cmd += " ";
}
cmd += argv[i];
}
LOG(debug, cmd);
}
LOG(debug, "pangolin version: {}.{}.{}", PANGOLIN_VERSION_MAJOR, PANGOLIN_VERSION_MINOR, PANGOLIN_VERSION_PATCH);
LOG(debug, "pangolin branch: {}", PANGOLIN_GIT_REFSPEC);
LOG(debug, "pangolin sha: {}", PANGOLIN_GIT_HASH);
LOG(debug, "pangolin changes: {}", PANGOLIN_GIT_LOCAL_CHANGES);
#ifndef NDEBUG
LOG(warn, "Not a release build");
#endif
if (onlyPrintHeader) {
print_header(opts);
return 0;
}
return run(opts);
}
| 6d032fe149f6c22c8975ff3e9334311ea119c318.cu | /*!
Count triangles using NVGraph
*/
#include <iostream>
#include <vector>
#include <nvToolsExt.h>
#include <clara/clara.hpp>
#include <fmt/format.h>
#include "pangolin/configure.hpp"
#include "pangolin/file/tsv.hpp"
#include "pangolin/init.hpp"
#include "pangolin/algorithm/csr/tc_nvgraph.hpp"
#include "pangolin/sparse/csr_val.hpp"
struct RunOptions {
int iters;
std::vector<int> gpus;
std::string path; //!< path for graph
std::string sep; //!< seperator for output
bool readMostly;
bool accessedBy;
bool prefetchAsync;
bool shrinkToFit;
bool preCountBarrier;
};
void print_header(const RunOptions &opts) {
fmt::print("bmark{0}bs{0}gpus{0}graph{0}nodes{0}edges{0}tris", opts.sep);
for (auto i = 0; i < opts.iters; ++i) {
fmt::print("{}total_time{}", opts.sep, i);
}
for (auto i = 0; i < opts.iters; ++i) {
fmt::print("{}total_teps{}", opts.sep, i);
}
for (auto i = 0; i < opts.iters; ++i) {
fmt::print("{}gpu_time{}", opts.sep, i);
}
for (auto i = 0; i < opts.iters; ++i) {
fmt::print("{}gpu_teps{}", opts.sep, i);
}
for (auto i = 0; i < opts.iters; ++i) {
fmt::print("{}count_time{}", opts.sep, i);
}
for (auto i = 0; i < opts.iters; ++i) {
fmt::print("{}count_teps{}", opts.sep, i);
}
for (auto i = 0; i < opts.iters; ++i) {
fmt::print("{}kernel_time{}", opts.sep, i);
}
for (auto i = 0; i < opts.iters; ++i) {
fmt::print("{}kernel_teps{}", opts.sep, i);
}
fmt::print("\n");
}
template <typename V> void print_vec(const V &vec, const std::string &sep) {
for (const auto &e : vec) {
fmt::print("{}{}", sep, e);
}
}
int run(RunOptions &opts) {
// CUSparse uses integers for indices
typedef int NodeIndex;
typedef int EdgeIndex;
typedef float Val;
typedef pangolin::WeightedDiEdge<NodeIndex, Val> GraphEdge;
typedef pangolin::CSR<NodeIndex, EdgeIndex, Val> CSR;
typedef pangolin::file::TSV TSV;
typedef TSV::edge_type FileEdge;
typedef pangolin::NVGraphTC TC;
auto gpus = opts.gpus;
if (gpus.empty()) {
LOG(warn, "no GPUs provided on command line, using GPU 0");
gpus.push_back(0);
}
std::vector<double> totalTimes;
std::vector<double> gpuTimes;
std::vector<double> countTimes;
std::vector<double> kernelTimes;
uint64_t nnz;
uint64_t numRows;
uint64_t tris;
// create csr and count `opts.iters` times
for (int i = 0; i < opts.iters; ++i) {
const auto totalStart = std::chrono::system_clock::now();
// read data
TSV file(opts.path);
std::vector<FileEdge> fileEdges = file.read_edges();
double elapsed = (std::chrono::system_clock::now() - totalStart).count() / 1e9;
LOG(info, "read_data time {}s", elapsed);
LOG(debug, "read {} edges", fileEdges.size());
// build CSR
CSR csr;
for (auto fileEdge : fileEdges) {
GraphEdge graphEdge;
graphEdge.src = fileEdge.src;
graphEdge.dst = fileEdge.dst;
graphEdge.val = fileEdge.val;
if (graphEdge.src > graphEdge.dst) {
csr.add_next_edge(graphEdge);
}
}
csr.finish_edges();
if (opts.shrinkToFit) {
LOG(debug, "shrink CSR");
csr.shrink_to_fit();
}
elapsed = (std::chrono::system_clock::now() - totalStart).count() / 1e9;
LOG(info, "io/csr time {}s", elapsed);
LOG(debug, "CSR nnz = {} rows = {}", csr.nnz(), csr.num_rows());
LOG(debug, "CSR cap = {}MB size = {}MB", csr.capacity_bytes() / 1024 / 1024, csr.size_bytes() / 1024 / 1024);
const auto gpuStart = std::chrono::system_clock::now();
// read-mostly
nvtxRangePush("read-mostly");
auto start = std::chrono::system_clock::now();
if (opts.readMostly) {
csr.read_mostly();
}
elapsed = (std::chrono::system_clock::now() - start).count() / 1e9;
nvtxRangePop();
LOG(info, "read-mostly CSR time {}s", elapsed);
// accessed-by
start = std::chrono::system_clock::now();
if (opts.accessedBy) {
for (const auto &gpu : gpus) {
csr.accessed_by(gpu);
}
}
elapsed = (std::chrono::system_clock::now() - start).count() / 1e9;
LOG(info, "accessed-by CSR time {}s", elapsed);
// prefetch
start = std::chrono::system_clock::now();
if (opts.prefetchAsync) {
for (size_t gpuIdx = 0; gpuIdx < gpus.size(); ++gpuIdx) {
auto &gpu = gpus[gpuIdx];
csr.prefetch_async(gpu, 0);
}
}
elapsed = (std::chrono::system_clock::now() - start).count() / 1e9;
LOG(info, "prefetch CSR time {}s", elapsed);
if (opts.preCountBarrier) {
LOG(debug, "sync streams after hints");
CUDA_RUNTIME(cudaDeviceSynchronize());
}
// count triangles
nvtxRangePush("count");
const auto countStart = std::chrono::system_clock::now();
TC counter(gpus[0]);
tris = counter.count_sync(csr);
const auto stop = std::chrono::system_clock::now();
nvtxRangePop(); // count
// record graph stats
// tris = total;
nnz = csr.nnz();
numRows = csr.num_rows();
const double totalElapsed = (stop - totalStart).count() / 1e9;
const double gpuElapsed = (stop - gpuStart).count() / 1e9;
const double countElapsed = (stop - countStart).count() / 1e9;
LOG(info, "total time {}s ({} teps)", totalElapsed, nnz / totalElapsed);
LOG(info, "gpu time {}s ({} teps)", gpuElapsed, nnz / gpuElapsed);
LOG(info, "count time {}s ({} teps)", countElapsed, nnz / countElapsed);
totalTimes.push_back(totalElapsed);
gpuTimes.push_back(gpuElapsed);
countTimes.push_back(countElapsed);
// for (auto &counter : counters) {
// double secs = counter.kernel_time();
// int dev = counter.device();
// LOG(info, "gpu {} kernel time {}s ({} teps)", dev, secs, nnz / secs);
// }
// if (counters.size() == 1) {
// kernelTimes.push_back(counters[0].kernel_time());
// } else {
// kernelTimes.push_back(0);
// }
}
if (opts.iters > 0) {
fmt::print("nvgraph");
std::string gpuStr;
for (auto gpu : gpus) {
gpuStr += std::to_string(gpu);
}
fmt::print("{}{}", opts.sep, gpuStr);
fmt::print("{}{}", opts.sep, opts.path);
fmt::print("{}{}", opts.sep, numRows);
fmt::print("{}{}", opts.sep, nnz);
fmt::print("{}{}", opts.sep, tris);
print_vec(totalTimes, opts.sep);
for (const auto &s : totalTimes) {
fmt::print("{}{}", opts.sep, nnz / s);
}
print_vec(gpuTimes, opts.sep);
for (const auto &s : gpuTimes) {
fmt::print("{}{}", opts.sep, nnz / s);
}
print_vec(countTimes, opts.sep);
for (const auto &s : countTimes) {
fmt::print("{}{}", opts.sep, nnz / s);
}
print_vec(kernelTimes, opts.sep);
for (const auto &s : kernelTimes) {
fmt::print("{}{}", opts.sep, nnz / s);
}
fmt::print("\n");
}
return 0;
}
int main(int argc, char **argv) {
pangolin::init();
RunOptions opts;
opts.sep = ",";
opts.iters = 1;
opts.shrinkToFit = false;
opts.readMostly = false;
opts.accessedBy = false;
opts.prefetchAsync = false;
opts.preCountBarrier = true;
bool help = false;
bool debug = false;
bool verbose = false;
bool onlyPrintHeader = false;
clara::Parser cli;
cli = cli | clara::Help(help);
cli = cli | clara::Opt(debug)["--debug"]("print debug messages to stderr");
cli = cli | clara::Opt(verbose)["--verbose"]("print verbose messages to stderr");
cli = cli | clara::Opt(onlyPrintHeader)["--header"]("print the header for the times output and quit");
cli = cli | clara::Opt(opts.gpus, "dev ids")["-g"]("gpus to use");
cli = cli | clara::Opt(opts.shrinkToFit)["--shrink-to-fit"]("shrink allocations to fit data");
cli = cli | clara::Opt(opts.readMostly)["--read-mostly"]("mark data as read-mostly by all gpus before kernel");
cli = cli | clara::Opt(opts.accessedBy)["--accessed-by"]("mark data as accessed-by all GPUs before kernel");
cli = cli | clara::Opt(opts.prefetchAsync)["--prefetch-async"]("prefetch data to all GPUs before kernel");
cli = cli | clara::Opt(opts.iters, "N")["-n"]("number of counts");
cli = cli | clara::Arg(opts.path, "graph file")("Path to adjacency list").required();
auto result = cli.parse(clara::Args(argc, argv));
if (!result) {
LOG(error, "Error in command line: {}", result.errorMessage());
exit(1);
}
if (help) {
std::cout << cli;
return 0;
}
// set logging level
if (verbose) {
pangolin::logger::set_level(pangolin::logger::Level::TRACE);
} else if (debug) {
pangolin::logger::set_level(pangolin::logger::Level::DEBUG);
}
// log command line before much else happens
{
std::string cmd;
for (int i = 0; i < argc; ++i) {
if (i != 0) {
cmd += " ";
}
cmd += argv[i];
}
LOG(debug, cmd);
}
LOG(debug, "pangolin version: {}.{}.{}", PANGOLIN_VERSION_MAJOR, PANGOLIN_VERSION_MINOR, PANGOLIN_VERSION_PATCH);
LOG(debug, "pangolin branch: {}", PANGOLIN_GIT_REFSPEC);
LOG(debug, "pangolin sha: {}", PANGOLIN_GIT_HASH);
LOG(debug, "pangolin changes: {}", PANGOLIN_GIT_LOCAL_CHANGES);
#ifndef NDEBUG
LOG(warn, "Not a release build");
#endif
if (onlyPrintHeader) {
print_header(opts);
return 0;
}
return run(opts);
}
|
d78ea1d93293a00d0ed346e940eb8c04706c8de6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* sort node count and range
*
* 1 0,13
* 2 0,6 7,13
* 4 0,3 4,6 7,10 11,13
* 8 0,1 2,3 4,5 5,6 7,8 9,10 11,12 13,13
*
* each node consumes one spawns two or one or zero
* level n needs max 2^n nodes
* each level has node count and range updated by atomic
*
* taskIn 1 0,13 |
* taskOut 3 0,13 | 0,6 7,13
*
* taskIn 3 0,13 0,6 7,13 |
* taskOut 7 0,13 0,6 7,13 | 0,3 4,6 7,10 11,13
*
* taskIn 7 0,13 0,6 7,13 0,3 4,6 7,10 11,13 |
* taskOut 15 0,13 0,6 7,13 0,3 4,6 7,10 11,13 | 0,1 2,3 4,5 5,6 7,8 9,10 11,12 13,13
*
* when taskIn.tbid > taskOut.qtail means no more work is available
* no more work is available doesn't mean the job is done
* still could be more task added, need workDoneCounter
* once workDoneCounter > taskOut.qtail, there is nothing left to do
* intially task out is a copy of task in
* new tasks will be added to end
* each time a task is done first enqueue() of needed,
* then check if work is done, if not then dequeue()
*
*/
#include "quickSort.cuh"
#include "OddEvenSort.cuh"
#include "bvh_common.h"
#include "CudaBase.h"
extern "C" {
void cu_testQuickSort(void * q,
uint * idata,
uint * nodes,
int * elements,
SimpleQueueInterface * qi,
uint numElements,
uint * workBlocks,
uint * loopbuf,
int * headtailperloop)
{
//hipDeviceSynchronize();
simpleQueue::SimpleQueue * queue = (simpleQueue::SimpleQueue *)q;
hipLaunchKernelGGL(( simpleQueue::init_kernel), dim3(1),dim3(32) , 0, 0, queue, 1, elements);
const int tpb = 256;
dim3 block(tpb, 1, 1);
const unsigned nblk = 1024;
dim3 grid(nblk, 1, 1);
oddEvenSort::OddEvenSortTask oes;
oddEvenSort::DataInterface oesd;
oesd.idata = idata;
oesd.nodes = (int2 *)nodes;
int lpb = 1 + numElements>>10;
hipLaunchKernelGGL(( quickSort_test_kernel<simpleQueue::SimpleQueue, oddEvenSort::OddEvenSortTask, oddEvenSort::DataInterface, 24>), dim3(grid), dim3(block), 16320, 0, queue,
oes,
oesd,
qi,
workBlocks,
loopbuf,
(int4 *)headtailperloop,
lpb,
255);
aphid::CudaBase::CheckCudaError("q sort");
}
}
| d78ea1d93293a00d0ed346e940eb8c04706c8de6.cu | /*
* sort node count and range
*
* 1 0,13
* 2 0,6 7,13
* 4 0,3 4,6 7,10 11,13
* 8 0,1 2,3 4,5 5,6 7,8 9,10 11,12 13,13
*
* each node consumes one spawns two or one or zero
* level n needs max 2^n nodes
* each level has node count and range updated by atomic
*
* taskIn 1 0,13 |
* taskOut 3 0,13 | 0,6 7,13
*
* taskIn 3 0,13 0,6 7,13 |
* taskOut 7 0,13 0,6 7,13 | 0,3 4,6 7,10 11,13
*
* taskIn 7 0,13 0,6 7,13 0,3 4,6 7,10 11,13 |
* taskOut 15 0,13 0,6 7,13 0,3 4,6 7,10 11,13 | 0,1 2,3 4,5 5,6 7,8 9,10 11,12 13,13
*
* when taskIn.tbid > taskOut.qtail means no more work is available
* no more work is available doesn't mean the job is done
* still could be more task added, need workDoneCounter
* once workDoneCounter > taskOut.qtail, there is nothing left to do
* intially task out is a copy of task in
* new tasks will be added to end
* each time a task is done first enqueue() of needed,
* then check if work is done, if not then dequeue()
*
*/
#include "quickSort.cuh"
#include "OddEvenSort.cuh"
#include "bvh_common.h"
#include "CudaBase.h"
extern "C" {
void cu_testQuickSort(void * q,
uint * idata,
uint * nodes,
int * elements,
SimpleQueueInterface * qi,
uint numElements,
uint * workBlocks,
uint * loopbuf,
int * headtailperloop)
{
//cudaDeviceSynchronize();
simpleQueue::SimpleQueue * queue = (simpleQueue::SimpleQueue *)q;
simpleQueue::init_kernel<<< 1,32 >>>(queue, 1, elements);
const int tpb = 256;
dim3 block(tpb, 1, 1);
const unsigned nblk = 1024;
dim3 grid(nblk, 1, 1);
oddEvenSort::OddEvenSortTask oes;
oddEvenSort::DataInterface oesd;
oesd.idata = idata;
oesd.nodes = (int2 *)nodes;
int lpb = 1 + numElements>>10;
quickSort_test_kernel<simpleQueue::SimpleQueue, oddEvenSort::OddEvenSortTask, oddEvenSort::DataInterface, 24><<<grid, block, 16320>>>(queue,
oes,
oesd,
qi,
workBlocks,
loopbuf,
(int4 *)headtailperloop,
lpb,
255);
aphid::CudaBase::CheckCudaError("q sort");
}
}
|
193a749e87c14cb66a367dbbd5e0f2a1fe7207da.hip | // !!! This is a file automatically generated by hipify!!!
#include "dev_ptr.cuh"
#include "device_error.cuh"
#include "hip/hip_runtime.h"
//template<class T>
//DevPtr<T>::DevPtr()
//{
//
//}
//template<class T>
//DevPtr<T>::DevPtr(const DevPtr& devPtr)
//{
// hipFree(_data);
// _data = devPtr.Get();
// _size = devPtr.Size();
//}
//
//template<class T>
//DevPtr<T>& DevPtr<T>::operator=(DevPtr<T>& devPtr)
//{
// hipFree(_data);
// _data = devPtr.Get();
// _size = devPtr.Size();
//
// return this;
//}
//
//template<class T>
//T& DevPtr<T>::operator[](int i)
//{
// return _data[i];
//}
//
//template<class T>
//const T& DevPtr<T>::operator[](int i) const
//{
// return _data[i];
//}
//
//template<class T>
//void DevPtr<T>::CopyFromHost(const T* data)
//{
// hipError_t result = hipMemcpy(static_cast<void*>(_data),
// static_cast<void*>(data),
// _size * sizeof(T),
// hipMemcpyKind::hipMemcpyHostToDevice);
// if(result != hipError_t::hipSuccess)
// throw CopyError();
//}
//
//template<class T>
//void DevPtr<T>::CopyToHost(T* data)
//{
// hipError_t result = hipMemcpy((void**)_data,
// (void**)data,
// _size * sizeof(T),
// hipMemcpyKind::hipMemcpyHostToDevice);
// if(result != hipError_t::hipSuccess)
// throw CopyError();
//}
| 193a749e87c14cb66a367dbbd5e0f2a1fe7207da.cu | #include "dev_ptr.cuh"
#include "device_error.cuh"
#include "cuda_runtime.h"
//template<class T>
//DevPtr<T>::DevPtr()
//{
//
//}
//template<class T>
//DevPtr<T>::DevPtr(const DevPtr& devPtr)
//{
// cudaFree(_data);
// _data = devPtr.Get();
// _size = devPtr.Size();
//}
//
//template<class T>
//DevPtr<T>& DevPtr<T>::operator=(DevPtr<T>& devPtr)
//{
// cudaFree(_data);
// _data = devPtr.Get();
// _size = devPtr.Size();
//
// return this;
//}
//
//template<class T>
//T& DevPtr<T>::operator[](int i)
//{
// return _data[i];
//}
//
//template<class T>
//const T& DevPtr<T>::operator[](int i) const
//{
// return _data[i];
//}
//
//template<class T>
//void DevPtr<T>::CopyFromHost(const T* data)
//{
// cudaError_t result = cudaMemcpy(static_cast<void*>(_data),
// static_cast<void*>(data),
// _size * sizeof(T),
// cudaMemcpyKind::cudaMemcpyHostToDevice);
// if(result != cudaError_t::cudaSuccess)
// throw CopyError();
//}
//
//template<class T>
//void DevPtr<T>::CopyToHost(T* data)
//{
// cudaError_t result = cudaMemcpy((void**)_data,
// (void**)data,
// _size * sizeof(T),
// cudaMemcpyKind::cudaMemcpyHostToDevice);
// if(result != cudaError_t::cudaSuccess)
// throw CopyError();
//}
|
ad605869064e9365f11e0696901b429264384ba0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define CHUNK_SIZE 16
static __global__ void _lookup(kd_tree *tree, float *query, int dimensions, int *leaves);
void lookup(float *query, int dimensions, int nQuery, int *leaves) {
// round up nQuery to the nearest multiply of CHUNK_SIZE
int roundedNQuery = ((nQuery-1)/CHUNK_SIZE + 1)*CHUNK_SIZE;
// allocate space for the query vector
float *deviceQuery;
CUDA_SAFE_CALL(hipMalloc((void **)&deviceQuery, roundedNQuery*dimensions*sizeof(float)));
CUDA_SAFE_CALL(hipMemcpy(deviceQuery, query, nQuery*dimensions*sizeof(float), hipMemcpyHostToDevice));
// allocate space for the response vector
int *deviceLeaves;
CUDA_SAFE_CALL(hipMalloc((void **)&deviceLeaves, roundedNQuery*sizeof(int)));
dim3 threadBlock(CHUNK_SIZE, 1);
dim3 blockGrid((nQuery-1)/CHUNK_SIZE+1, 1);
hipLaunchKernelGGL(( _lookup), dim3(blockGrid), dim3(threadBlock), 0, 0, tree, deviceQuery, dimensions, deviceLeaves);
// copy back response
CUDA_SAFE_CALL(hipMemcpy(leaves, deviceLeaves, nQuery*sizeof(int), hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipFree(deviceQuery));
CUDA_SAFE_CALL(hipFree(deviceLeaves));
fflush(stdout);
}
static __global__ void _lookup(kd_tree *tree, float *query, int dimensions, int *leaves) {
float *queryBlock = query + blockIdx.x * CHUNK_SIZE * dimensions;
// put my vector in shared memory
__shared__ float localQuery[CHUNK_SIZE*MAX_DIMENSIONS];
for (int i = threadIdx.x; i < dimensions*CHUNK_SIZE; i += CHUNK_SIZE) {
localQuery[i] = queryBlock[i];
}
float *myQuery = localQuery + threadIdx.x * dimensions;
__syncthreads();
// ok, we have my query, we're good to go
int nodeIdx = 0;
node *nodeArray = tree->nodeArray;
do {
node n = nodeArray[nodeIdx];
DEBUG(printf("Retrieved a node: %i, %i, %f, %i, %i\n", nodeIdx, n.cut_dim, n.cut_val, n.left, n.right));
DEBUG(printf("Query %i with value %f\n",
blockIdx.x * CHUNK_SIZE + threadIdx.x, myQuery[n.cut_dim]));
if (myQuery[n.cut_dim] <= n.cut_val) {
DEBUG(printf("Query %i splitting left to node %i\n", blockIdx.x * CHUNK_SIZE + threadIdx.x, n.left));
nodeIdx = n.left;
} else {
DEBUG(printf("Query %i splitting right to node %i\n", blockIdx.x * CHUNK_SIZE + threadIdx.x, n.right));
nodeIdx = n.right;
}
} while (nodeIdx > 0);
// we're at a leaf node, put it's ID in the leaves array
leaves[blockIdx.x * CHUNK_SIZE + threadIdx.x] = -nodeIdx;
}
| ad605869064e9365f11e0696901b429264384ba0.cu |
#define CHUNK_SIZE 16
static __global__ void _lookup(kd_tree *tree, float *query, int dimensions, int *leaves);
void lookup(float *query, int dimensions, int nQuery, int *leaves) {
// round up nQuery to the nearest multiply of CHUNK_SIZE
int roundedNQuery = ((nQuery-1)/CHUNK_SIZE + 1)*CHUNK_SIZE;
// allocate space for the query vector
float *deviceQuery;
CUDA_SAFE_CALL(cudaMalloc((void **)&deviceQuery, roundedNQuery*dimensions*sizeof(float)));
CUDA_SAFE_CALL(cudaMemcpy(deviceQuery, query, nQuery*dimensions*sizeof(float), cudaMemcpyHostToDevice));
// allocate space for the response vector
int *deviceLeaves;
CUDA_SAFE_CALL(cudaMalloc((void **)&deviceLeaves, roundedNQuery*sizeof(int)));
dim3 threadBlock(CHUNK_SIZE, 1);
dim3 blockGrid((nQuery-1)/CHUNK_SIZE+1, 1);
_lookup<<<blockGrid, threadBlock>>>(tree, deviceQuery, dimensions, deviceLeaves);
// copy back response
CUDA_SAFE_CALL(cudaMemcpy(leaves, deviceLeaves, nQuery*sizeof(int), cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaFree(deviceQuery));
CUDA_SAFE_CALL(cudaFree(deviceLeaves));
fflush(stdout);
}
static __global__ void _lookup(kd_tree *tree, float *query, int dimensions, int *leaves) {
float *queryBlock = query + blockIdx.x * CHUNK_SIZE * dimensions;
// put my vector in shared memory
__shared__ float localQuery[CHUNK_SIZE*MAX_DIMENSIONS];
for (int i = threadIdx.x; i < dimensions*CHUNK_SIZE; i += CHUNK_SIZE) {
localQuery[i] = queryBlock[i];
}
float *myQuery = localQuery + threadIdx.x * dimensions;
__syncthreads();
// ok, we have my query, we're good to go
int nodeIdx = 0;
node *nodeArray = tree->nodeArray;
do {
node n = nodeArray[nodeIdx];
DEBUG(printf("Retrieved a node: %i, %i, %f, %i, %i\n", nodeIdx, n.cut_dim, n.cut_val, n.left, n.right));
DEBUG(printf("Query %i with value %f\n",
blockIdx.x * CHUNK_SIZE + threadIdx.x, myQuery[n.cut_dim]));
if (myQuery[n.cut_dim] <= n.cut_val) {
DEBUG(printf("Query %i splitting left to node %i\n", blockIdx.x * CHUNK_SIZE + threadIdx.x, n.left));
nodeIdx = n.left;
} else {
DEBUG(printf("Query %i splitting right to node %i\n", blockIdx.x * CHUNK_SIZE + threadIdx.x, n.right));
nodeIdx = n.right;
}
} while (nodeIdx > 0);
// we're at a leaf node, put it's ID in the leaves array
leaves[blockIdx.x * CHUNK_SIZE + threadIdx.x] = -nodeIdx;
}
|
5ce19cbed606d4d5c240ed870bbd9a98b957ac3b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernels.h"
#include "utilities.cuh"
// Sorts row sums and selects new row set
__global__ void sortSelectRows_kernel(const double *Sums, double *RowSet,
const uint16_t* Sizes,
uint32_t *RowChanges, uint16_t MatrixHeight,
const uint16_t NextPowerOfTwoAfterHeight,
const uint32_t TotalInvocations)
{
extern __shared__ double sums[];
const uint16_t GlobalID = blockIdx.x;
uint16_t* permutation = (uint16_t*) &sums[NextPowerOfTwoAfterHeight];
uint16_t biclusterHeight = Sizes[GlobalID * 2 + 1];
loadRowsSums(Sums, sums, permutation, MatrixHeight,
NextPowerOfTwoAfterHeight);
sortSums(sums, permutation, NextPowerOfTwoAfterHeight);
__syncthreads();
// writing new row set and updating count of changed elements in it
double flag = 1.0 * int(threadIdx.x < biclusterHeight);
uint32_t index = permutation[threadIdx.x] * TotalInvocations + GlobalID;
uint8_t changed = uint8_t(RowSet[index] != flag);
RowSet[index] = flag;
index = threadIdx.x + NextPowerOfTwoAfterHeight / 2;
if (index < MatrixHeight)
{
flag = 1.0 * int(index < biclusterHeight);
index = permutation[index] * TotalInvocations + GlobalID;
changed += uint8_t(RowSet[index] != flag);
RowSet[index] = flag;
}
atomicAdd(&RowChanges[GlobalID], changed);
}
void sortSelectRows(LasInstanceMemory& memory, uint32_t activeInvocations)
{
hipLaunchKernelGGL(( sortSelectRows_kernel), dim3(activeInvocations), dim3(memory.NextPowerOfTwoAfterHeight / 2),
sizeof(double) * memory.NextPowerOfTwoAfterHeight +
sizeof(uint16_t) * memory.NextPowerOfTwoAfterHeight , 0,
memory.deviceSums.begin(),
memory.deviceRowSet.begin(),
memory.deviceSizes.begin(),
memory.deviceRowChanges.begin(),
memory.Height,
memory.NextPowerOfTwoAfterHeight,
memory.InvocationsPerBicluster);
}
| 5ce19cbed606d4d5c240ed870bbd9a98b957ac3b.cu | #include "kernels.h"
#include "utilities.cuh"
// Sorts row sums and selects new row set
__global__ void sortSelectRows_kernel(const double *Sums, double *RowSet,
const uint16_t* Sizes,
uint32_t *RowChanges, uint16_t MatrixHeight,
const uint16_t NextPowerOfTwoAfterHeight,
const uint32_t TotalInvocations)
{
extern __shared__ double sums[];
const uint16_t GlobalID = blockIdx.x;
uint16_t* permutation = (uint16_t*) &sums[NextPowerOfTwoAfterHeight];
uint16_t biclusterHeight = Sizes[GlobalID * 2 + 1];
loadRowsSums(Sums, sums, permutation, MatrixHeight,
NextPowerOfTwoAfterHeight);
sortSums(sums, permutation, NextPowerOfTwoAfterHeight);
__syncthreads();
// writing new row set and updating count of changed elements in it
double flag = 1.0 * int(threadIdx.x < biclusterHeight);
uint32_t index = permutation[threadIdx.x] * TotalInvocations + GlobalID;
uint8_t changed = uint8_t(RowSet[index] != flag);
RowSet[index] = flag;
index = threadIdx.x + NextPowerOfTwoAfterHeight / 2;
if (index < MatrixHeight)
{
flag = 1.0 * int(index < biclusterHeight);
index = permutation[index] * TotalInvocations + GlobalID;
changed += uint8_t(RowSet[index] != flag);
RowSet[index] = flag;
}
atomicAdd(&RowChanges[GlobalID], changed);
}
void sortSelectRows(LasInstanceMemory& memory, uint32_t activeInvocations)
{
sortSelectRows_kernel<<< activeInvocations, memory.NextPowerOfTwoAfterHeight / 2,
sizeof(double) * memory.NextPowerOfTwoAfterHeight +
sizeof(uint16_t) * memory.NextPowerOfTwoAfterHeight >>>(
memory.deviceSums.begin(),
memory.deviceRowSet.begin(),
memory.deviceSizes.begin(),
memory.deviceRowChanges.begin(),
memory.Height,
memory.NextPowerOfTwoAfterHeight,
memory.InvocationsPerBicluster);
}
|
b1bdc5d89d56e56a0317f59dda42f469b8fd9404.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#ifndef PRINT_SUFFIX
#define PRINT_SUFFIX "<find_cudadevices>"
#endif
#define MY_CUDA_VER (__CUDACC_VER_MAJOR__ * 100 + __CUDACC_VER_MINOR__)
inline void check(hipError_t result)
{
if (result)
{
fprintf(stderr, PRINT_SUFFIX "%s (%s)", hipGetErrorName(result), hipGetErrorString(result));
hipDeviceReset();
// Make sure we call CUDA Device Reset before exiting
exit(0);
}
}
inline void print_value(size_t value)
{
// in case we don't have '%zu'
printf("%llu", (unsigned long long)value);
}
inline void print_value(bool value)
{
printf(value ? "true" : "false");
}
inline void print_value(int value)
{
printf("%d", value);
}
template <typename T, size_t len>
inline void print_value(const T (&value)[len])
{
printf("(");
for (size_t i = 0; i < len - 1; i++)
{
print_value(value[i]);
printf(", ");
}
print_value(value[len - 1]);
printf(")");
}
inline void print_value(unsigned int value)
{
printf("%u", value);
}
inline void print_value(const void *value)
{
printf("\"%s\"", (const char *)value);
}
template <size_t len>
inline void print_value(const char (&value)[len])
{
printf("\"");
for (size_t i = 0; i < len; i++)
printf("%02hhx", value[i]);
printf("\"");
}
template <>
inline void print_value<16>(const char (&value)[16])
{
// speicalized for uuid
printf("\"%02hhx%02hhx%02hhx%02hhx-%02hhx%02hhx-%02hhx%02hhx-%02hhx%02hhx-%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx\"",
value[0], value[1], value[2], value[3],
value[4], value[5], value[6], value[7],
value[8], value[9], value[10], value[11],
value[12], value[13], value[14], value[15]);
}
#if MY_CUDA_VER >= 1000
inline void print_value(const hipUUID &value)
{
print_value(value.bytes);
}
#endif
template <typename T>
inline void print_property(const char *name, const T &value)
{
printf(PRINT_SUFFIX " %s = ", name);
print_value(value);
printf("\n");
}
inline void print_device(int id)
{
hipDeviceProp_t deviceProp;
check(hipGetDeviceProperties(&deviceProp, id));
#define PRINT_PROPERTY(name) print_property(#name, deviceProp.name)
#define PRINT_BOOL_PROPERTY(name) print_property(#name, static_cast<bool>(deviceProp.name))
#define PRINT_STR_PROPERTY(name) print_property(#name, static_cast<const void *>(deviceProp.name))
// cuda 8.0
PRINT_STR_PROPERTY(name);
PRINT_PROPERTY(totalGlobalMem);
PRINT_PROPERTY(sharedMemPerBlock);
PRINT_PROPERTY(regsPerBlock);
PRINT_PROPERTY(warpSize);
PRINT_PROPERTY(memPitch);
PRINT_PROPERTY(maxThreadsPerBlock);
PRINT_PROPERTY(maxThreadsDim);
PRINT_PROPERTY(maxGridSize);
PRINT_PROPERTY(clockRate);
PRINT_PROPERTY(totalConstMem);
PRINT_PROPERTY(major);
PRINT_PROPERTY(minor);
PRINT_PROPERTY(textureAlignment);
PRINT_PROPERTY(texturePitchAlignment);
PRINT_BOOL_PROPERTY(deviceOverlap);
PRINT_PROPERTY(multiProcessorCount);
PRINT_BOOL_PROPERTY(kernelExecTimeoutEnabled);
PRINT_BOOL_PROPERTY(integrated);
PRINT_BOOL_PROPERTY(canMapHostMemory);
PRINT_PROPERTY(computeMode);
PRINT_PROPERTY(maxTexture1D);
PRINT_PROPERTY(maxTexture1DMipmap);
PRINT_PROPERTY(maxTexture1DLinear);
PRINT_PROPERTY(maxTexture2D);
PRINT_PROPERTY(maxTexture2DMipmap);
PRINT_PROPERTY(maxTexture2DLinear);
PRINT_PROPERTY(maxTexture2DGather);
PRINT_PROPERTY(maxTexture3D);
PRINT_PROPERTY(maxTexture3DAlt);
PRINT_PROPERTY(maxTextureCubemap);
PRINT_PROPERTY(maxTexture1DLayered);
PRINT_PROPERTY(maxTexture2DLayered);
PRINT_PROPERTY(maxTextureCubemapLayered);
PRINT_PROPERTY(maxSurface1D);
PRINT_PROPERTY(maxSurface2D);
PRINT_PROPERTY(maxSurface3D);
PRINT_PROPERTY(maxSurface1DLayered);
PRINT_PROPERTY(maxSurface2DLayered);
PRINT_PROPERTY(maxSurfaceCubemap);
PRINT_PROPERTY(maxSurfaceCubemapLayered);
PRINT_PROPERTY(surfaceAlignment);
PRINT_BOOL_PROPERTY(concurrentKernels);
PRINT_BOOL_PROPERTY(ECCEnabled);
PRINT_PROPERTY(pciBusID);
PRINT_PROPERTY(pciDeviceID);
PRINT_PROPERTY(pciDomainID);
PRINT_BOOL_PROPERTY(tccDriver);
PRINT_PROPERTY(asyncEngineCount);
PRINT_BOOL_PROPERTY(unifiedAddressing);
PRINT_PROPERTY(memoryClockRate);
PRINT_PROPERTY(memoryBusWidth);
PRINT_PROPERTY(l2CacheSize);
PRINT_PROPERTY(maxThreadsPerMultiProcessor);
PRINT_BOOL_PROPERTY(streamPrioritiesSupported);
PRINT_BOOL_PROPERTY(globalL1CacheSupported);
PRINT_BOOL_PROPERTY(localL1CacheSupported);
PRINT_PROPERTY(sharedMemPerMultiprocessor);
PRINT_PROPERTY(regsPerMultiprocessor);
PRINT_BOOL_PROPERTY(isMultiGpuBoard);
PRINT_PROPERTY(multiGpuBoardGroupID);
PRINT_PROPERTY(singleToDoublePrecisionPerfRatio);
PRINT_BOOL_PROPERTY(pageableMemoryAccess);
PRINT_BOOL_PROPERTY(concurrentManagedAccess);
PRINT_BOOL_PROPERTY(managedMemory);
#if MY_CUDA_VER >= 900
// Added in cuda 9.0
PRINT_BOOL_PROPERTY(computePreemptionSupported);
PRINT_BOOL_PROPERTY(canUseHostPointerForRegisteredMem);
PRINT_BOOL_PROPERTY(cooperativeLaunch);
PRINT_BOOL_PROPERTY(cooperativeMultiDeviceLaunch);
PRINT_PROPERTY(sharedMemPerBlockOptin);
#endif
#if MY_CUDA_VER >= 902
// Added in cuda 9.2
PRINT_BOOL_PROPERTY(pageableMemoryAccessUsesHostPageTables);
PRINT_BOOL_PROPERTY(directManagedMemAccessFromHost);
#endif
#if MY_CUDA_VER >= 1000
// Added in cuda 10.0
PRINT_PROPERTY(uuid);
PRINT_PROPERTY(luid);
PRINT_PROPERTY(luidDeviceNodeMask);
#endif
}
int main(int argc, char *argv[])
{
printf("\n");
fprintf(stderr, "\n");
int count = 0;
check(hipGetDeviceCount(&count));
for (int i = 0; i < count; i++)
{
printf(PRINT_SUFFIX "DEVICE #%d\n", i);
print_device(i);
}
return 0;
}
| b1bdc5d89d56e56a0317f59dda42f469b8fd9404.cu | #include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#ifndef PRINT_SUFFIX
#define PRINT_SUFFIX "<find_cudadevices>"
#endif
#define MY_CUDA_VER (__CUDACC_VER_MAJOR__ * 100 + __CUDACC_VER_MINOR__)
inline void check(cudaError_t result)
{
if (result)
{
fprintf(stderr, PRINT_SUFFIX "%s (%s)", cudaGetErrorName(result), cudaGetErrorString(result));
cudaDeviceReset();
// Make sure we call CUDA Device Reset before exiting
exit(0);
}
}
inline void print_value(size_t value)
{
// in case we don't have '%zu'
printf("%llu", (unsigned long long)value);
}
inline void print_value(bool value)
{
printf(value ? "true" : "false");
}
inline void print_value(int value)
{
printf("%d", value);
}
template <typename T, size_t len>
inline void print_value(const T (&value)[len])
{
printf("(");
for (size_t i = 0; i < len - 1; i++)
{
print_value(value[i]);
printf(", ");
}
print_value(value[len - 1]);
printf(")");
}
inline void print_value(unsigned int value)
{
printf("%u", value);
}
inline void print_value(const void *value)
{
printf("\"%s\"", (const char *)value);
}
template <size_t len>
inline void print_value(const char (&value)[len])
{
printf("\"");
for (size_t i = 0; i < len; i++)
printf("%02hhx", value[i]);
printf("\"");
}
template <>
inline void print_value<16>(const char (&value)[16])
{
// speicalized for uuid
printf("\"%02hhx%02hhx%02hhx%02hhx-%02hhx%02hhx-%02hhx%02hhx-%02hhx%02hhx-%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx\"",
value[0], value[1], value[2], value[3],
value[4], value[5], value[6], value[7],
value[8], value[9], value[10], value[11],
value[12], value[13], value[14], value[15]);
}
#if MY_CUDA_VER >= 1000
inline void print_value(const cudaUUID_t &value)
{
print_value(value.bytes);
}
#endif
template <typename T>
inline void print_property(const char *name, const T &value)
{
printf(PRINT_SUFFIX " %s = ", name);
print_value(value);
printf("\n");
}
inline void print_device(int id)
{
cudaDeviceProp deviceProp;
check(cudaGetDeviceProperties(&deviceProp, id));
#define PRINT_PROPERTY(name) print_property(#name, deviceProp.name)
#define PRINT_BOOL_PROPERTY(name) print_property(#name, static_cast<bool>(deviceProp.name))
#define PRINT_STR_PROPERTY(name) print_property(#name, static_cast<const void *>(deviceProp.name))
// cuda 8.0
PRINT_STR_PROPERTY(name);
PRINT_PROPERTY(totalGlobalMem);
PRINT_PROPERTY(sharedMemPerBlock);
PRINT_PROPERTY(regsPerBlock);
PRINT_PROPERTY(warpSize);
PRINT_PROPERTY(memPitch);
PRINT_PROPERTY(maxThreadsPerBlock);
PRINT_PROPERTY(maxThreadsDim);
PRINT_PROPERTY(maxGridSize);
PRINT_PROPERTY(clockRate);
PRINT_PROPERTY(totalConstMem);
PRINT_PROPERTY(major);
PRINT_PROPERTY(minor);
PRINT_PROPERTY(textureAlignment);
PRINT_PROPERTY(texturePitchAlignment);
PRINT_BOOL_PROPERTY(deviceOverlap);
PRINT_PROPERTY(multiProcessorCount);
PRINT_BOOL_PROPERTY(kernelExecTimeoutEnabled);
PRINT_BOOL_PROPERTY(integrated);
PRINT_BOOL_PROPERTY(canMapHostMemory);
PRINT_PROPERTY(computeMode);
PRINT_PROPERTY(maxTexture1D);
PRINT_PROPERTY(maxTexture1DMipmap);
PRINT_PROPERTY(maxTexture1DLinear);
PRINT_PROPERTY(maxTexture2D);
PRINT_PROPERTY(maxTexture2DMipmap);
PRINT_PROPERTY(maxTexture2DLinear);
PRINT_PROPERTY(maxTexture2DGather);
PRINT_PROPERTY(maxTexture3D);
PRINT_PROPERTY(maxTexture3DAlt);
PRINT_PROPERTY(maxTextureCubemap);
PRINT_PROPERTY(maxTexture1DLayered);
PRINT_PROPERTY(maxTexture2DLayered);
PRINT_PROPERTY(maxTextureCubemapLayered);
PRINT_PROPERTY(maxSurface1D);
PRINT_PROPERTY(maxSurface2D);
PRINT_PROPERTY(maxSurface3D);
PRINT_PROPERTY(maxSurface1DLayered);
PRINT_PROPERTY(maxSurface2DLayered);
PRINT_PROPERTY(maxSurfaceCubemap);
PRINT_PROPERTY(maxSurfaceCubemapLayered);
PRINT_PROPERTY(surfaceAlignment);
PRINT_BOOL_PROPERTY(concurrentKernels);
PRINT_BOOL_PROPERTY(ECCEnabled);
PRINT_PROPERTY(pciBusID);
PRINT_PROPERTY(pciDeviceID);
PRINT_PROPERTY(pciDomainID);
PRINT_BOOL_PROPERTY(tccDriver);
PRINT_PROPERTY(asyncEngineCount);
PRINT_BOOL_PROPERTY(unifiedAddressing);
PRINT_PROPERTY(memoryClockRate);
PRINT_PROPERTY(memoryBusWidth);
PRINT_PROPERTY(l2CacheSize);
PRINT_PROPERTY(maxThreadsPerMultiProcessor);
PRINT_BOOL_PROPERTY(streamPrioritiesSupported);
PRINT_BOOL_PROPERTY(globalL1CacheSupported);
PRINT_BOOL_PROPERTY(localL1CacheSupported);
PRINT_PROPERTY(sharedMemPerMultiprocessor);
PRINT_PROPERTY(regsPerMultiprocessor);
PRINT_BOOL_PROPERTY(isMultiGpuBoard);
PRINT_PROPERTY(multiGpuBoardGroupID);
PRINT_PROPERTY(singleToDoublePrecisionPerfRatio);
PRINT_BOOL_PROPERTY(pageableMemoryAccess);
PRINT_BOOL_PROPERTY(concurrentManagedAccess);
PRINT_BOOL_PROPERTY(managedMemory);
#if MY_CUDA_VER >= 900
// Added in cuda 9.0
PRINT_BOOL_PROPERTY(computePreemptionSupported);
PRINT_BOOL_PROPERTY(canUseHostPointerForRegisteredMem);
PRINT_BOOL_PROPERTY(cooperativeLaunch);
PRINT_BOOL_PROPERTY(cooperativeMultiDeviceLaunch);
PRINT_PROPERTY(sharedMemPerBlockOptin);
#endif
#if MY_CUDA_VER >= 902
// Added in cuda 9.2
PRINT_BOOL_PROPERTY(pageableMemoryAccessUsesHostPageTables);
PRINT_BOOL_PROPERTY(directManagedMemAccessFromHost);
#endif
#if MY_CUDA_VER >= 1000
// Added in cuda 10.0
PRINT_PROPERTY(uuid);
PRINT_PROPERTY(luid);
PRINT_PROPERTY(luidDeviceNodeMask);
#endif
}
int main(int argc, char *argv[])
{
printf("\n");
fprintf(stderr, "\n");
int count = 0;
check(cudaGetDeviceCount(&count));
for (int i = 0; i < count; i++)
{
printf(PRINT_SUFFIX "DEVICE #%d\n", i);
print_device(i);
}
return 0;
}
|
f40f6a626b499401411b2b0fd6685fb2322273eb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* cudasort.cu */
/* S. Engblom and A. Goude 2011-10-21 */
#ifndef M_PI
#define M_PI 3.1415926535897932384626433
#endif
#ifndef SWAP
#define SWAP(x,y,tmp) (tmp)=(x);(x)=(y);(y)=(tmp);
#endif
#ifndef C_CODE /*if in C file mode, redefine all mex functions to c functions*/
#include "mex.h"
#include "matrix.h"
#endif
#include "cudaeval.h"
#include "cudasort.h"
__host__ __device__ int imin(int x, int y);
__host__ __device__ int imax(int x, int y);
// include all CUDA kernels
#include "cudasortkernels.h"
#if defined(CUDASUPPORT) && defined(CUDASORT)
// prototypes
int checkpartitioning(const SORT_REAL* originalpositions,
const SORT_REAL *originalpositions2,
SORT_REAL *cudapositions,
SORT_REAL *cudapositions2,
SORT_REAL *splitpoints,
int *cudaindices,int *oldcudaindices,
int count,int *cudallimits,int *cudarlimits,
int splitcount,
int *cudaysplit,
int printsuccess,int printfailure,
SORT_DCMPLX* z,SORT_DCMPLX *d);
void multiblockpartition(SORT_REAL *positions,SORT_REAL *positions2,
int *indices,int *newindices,
SORT_REAL *splitpoints,
int *llimits,int *rlimits,int *newllimits,
int *newrlimits,int *tmpllimits,int *tmprlimits,
int splitcount,int count,
SORT_REAL *newpositions,SORT_REAL *newpositions2,
int *ysplit,int *lrcount,
int *threesplitvector,int *outputvector,int N
SORTLIMITSTRING DEBUGVECTORSTRING);
#ifdef SORTLIMIT
void calculatesortlimits(float *distlimit,float *disttarget,float *sortlimit,float* input,float currentlevel,float maxlevel);
#endif
double calculateetalimits(double *eta,double currentlevel,double maxlevel);
/*------------------------------------------------------------------------*/
//this function copies data for CUDA_perform_partitioning
//put in its own function for timing issues only
void CUDA_copy_vectors(cudavariables *GPUvars,
int N,int NE,
const double* zr,const double *zi,
const double *er,const double *ei)
{
if(NE) {
cudasafeMalloc((void**)&GPUvars->er,NE*sizeof(double));
cudasafeMalloc((void**)&GPUvars->ei,NE*sizeof(double));
cudasafeMalloc((void**)&GPUvars->jx,NE*sizeof(int));
cudasafe( hipMemcpy(GPUvars->er, er, NE*sizeof(double), hipMemcpyHostToDevice), "hipMemcpy ertmp" );
cudasafe( hipMemcpy(GPUvars->ei, ei, NE*sizeof(double), hipMemcpyHostToDevice), "hipMemcpy eitmp" );
}
else {
GPUvars->er=NULL;
GPUvars->ei=NULL;
}
cudasafeMalloc((void**)&GPUvars->zr,N*sizeof(double));
cudasafeMalloc((void**)&GPUvars->zi,N*sizeof(double));
cudasafeMalloc((void**)&GPUvars->ix,N*sizeof(int));
cudasafe( hipMemcpy(GPUvars->zr, zr, N*sizeof(double), hipMemcpyHostToDevice), "hipMemcpy zrtmp" );
cudasafe( hipMemcpy(GPUvars->zi, zi, N*sizeof(double), hipMemcpyHostToDevice), "hipMemcpy zitmp" );
}
/*------------------------------------------------------------------------*/
// this is the main function that performes partitioning. Will
// allocate GPUvariables, that will be left on the GPU
// GPUvars-variables for GPU N,NE number of potential/evaluation
// points nlevels, the number of times to split the boxes into four
// smaller ones zr,zr,er,ei coordinates of potential and evaluation
// points eta, for special split with eta-criterion
void CUDA_perform_partitioning(cudavariables *GPUvars,
int N,int NE,int nlevels VALIDATEPARTITIONINGSTRING1)
{
SORT_REAL *splitpoints;
int* rlimits;
int* newllimits;
int* newrlimits;
int* tmpllimits;
int* tmprlimits;
int* newindices;
int* ysplit;
int* lrcount;
int* outputvector;
int* splitside;
SORT_REAL* xpositionstmp;
SORT_REAL* ypositionstmp;
SORT_REAL* zdmaxpositions;
SORT_REAL* zdminpositions;
SORT_REAL* zdmaxpositions2;
SORT_REAL* zdminpositions2;
#ifdef SORTLIMIT
SORT_REAL* leftlimitvalues;
SORT_REAL* rightlimitvalues;
float distlimit;
float disttarget;
float sortlimit;
#endif
double eta;
int* zdllimits;
int* zdrlimits;
int* threesplit;
SORT_DCMPLX *ztmp;
SORT_DCMPLX *dtmp;
SORT_DCMPLX *z,*d;
SORT_REAL *dabs;
int* rlimitsNE;
int* newllimitsNE;
int* newrlimitsNE;
int* lrcountNE;
int* newindicesNE;
SORT_REAL* xpositionstmpNE;
SORT_REAL* ypositionstmpNE;
int ptrinitvector[2];
ptrinitvector[0]=0;
int threadcount=ZDMAXTHREADS;
#ifdef FLOATSORT
float *fer=NULL,*fei=NULL,*fzr=NULL,*fzi=NULL;
#else
double *fer,*fei,*fzr,*fzi;
#endif
checkcudaerror("Partitioning start\n");
//init
#ifdef CUDADEBUGVECTOR
cudasafe(cudaMallocDebug((void**)&GPUvars->debugvector,imax((NE<N?N:NE),2000)*sizeof(double)),"hipMalloc");
cudasafe(hipMemset(GPUvars->debugvector,0,imax((NE<N?N:NE),2000)*sizeof(double)),"hipMemset");
#endif
//allocate positions and move them to gpu (moved to function above)
#ifdef FLOATSORT
if(NE) {
cudasafeMalloc((void**)&fer,NE*sizeof(float));
cudasafeMalloc((void**)&fei,NE*sizeof(float));
int blockcountcf=imin((NE+4*CONVERTTOFLOATMAXTHREADS-1)/(4*CONVERTTOFLOATMAXTHREADS),CONVERTTOFLOATMAXBLOCKS);
checkcudaerror("before converttofloatNE\n");
hipLaunchKernelGGL(( converttofloat), dim3(blockcountcf),dim3(CONVERTTOFLOATMAXTHREADS), 0, 0, fer,fei,GPUvars->er,GPUvars->ei,NE);
CHECKCUDAERROR
checkcudaerror("converttofloatNE\n");
}
cudasafeMalloc((void**)&fzr,N*sizeof(float));
cudasafeMalloc((void**)&fzi,N*sizeof(float));
int blockcountcf=imin((N+4*CONVERTTOFLOATMAXTHREADS-1)/(4*CONVERTTOFLOATMAXTHREADS),CONVERTTOFLOATMAXBLOCKS);
hipLaunchKernelGGL(( converttofloat), dim3(blockcountcf),dim3(CONVERTTOFLOATMAXTHREADS), 0, 0, fzr,fzi,GPUvars->zr,GPUvars->zi,N);
CHECKCUDAERROR
checkcudaerror("converttofloat\n");
#else
fer=GPUvars->er;
fei=GPUvars->ei;
fzr=GPUvars->zr;
fzi=GPUvars->zi;
#endif
//calculate the number of boxes etc.
int Nf=1,Nt=1;
for(int i=1;i<=nlevels;i++) {
Nf<<=2;
Nt+=Nf;
}
if(NE)
cudasafeMalloc((void**)&GPUvars->jxptr, (Nf+1)*sizeof(int));
cudasafeMalloc((void**)&GPUvars->ixptr,(Nf+1)*sizeof(int));
cudasafeMalloc((void**)&GPUvars->z0,Nt*sizeof(SORT_DCMPLX));
cudasafeMalloc((void**)&GPUvars->d0,Nt*sizeof(SORT_DCMPLX));
cudasafeMalloc((void **)&GPUvars->dabs,Nt*sizeof(SORT_REAL));
z=(SORT_DCMPLX*)GPUvars->z0;
d=(SORT_DCMPLX*)GPUvars->d0;
dabs=GPUvars->dabs;
#ifdef CUDATIMESORT
hipEvent_t start;
hipEvent_t stop;
hipEvent_t start2;
hipEvent_t stop2;
cudasafe(hipEventCreate(&start),"hipEventCreate sort1start");
cudasafe(hipEventCreate(&stop),"hipEventCreate sort1stop");
cudasafe(hipEventCreate(&start2),"hipEventCreate sort1start");
cudasafe(hipEventCreate(&stop2),"hipEventCreate sort1stop");
cudasafe(hipEventRecord(start,0),"hipEventRecord sort1start");
float elapsedtime,elapsedtime2;
#endif
//allocate temporary variables. These should be cleaned up afterwards
cudasafe(cudaMallocDebug((void**)&xpositionstmp, N*sizeof(SORT_REAL)), "hipMalloc xpositionstmp");
cudasafe(cudaMallocDebug((void**)&ypositionstmp, N*sizeof(SORT_REAL)), "hipMalloc ypositionstmp");
cudasafe(cudaMallocDebug((void**)&newindices, N*sizeof(int)), "hipMalloc newindices");
cudasafe(cudaMallocDebug((void**)&lrcount, Nf*sizeof(int)), "hipMalloc lrcount");
cudasafe(cudaMallocDebug((void**)&rlimits, Nf*sizeof(int)), "hipMalloc rlimits");
cudasafe(cudaMallocDebug((void**)&newllimits, (Nf/2+1)*sizeof(int)), "hipMalloc newllimits");
cudasafe(cudaMallocDebug((void**)&newrlimits, Nf/2*sizeof(int)), "hipMalloc newrlimits");
cudasafe(cudaMallocDebug((void**)&tmpllimits, Nf/2*sizeof(int)), "hipMalloc tmpllimits");
cudasafe(cudaMallocDebug((void**)&tmprlimits, Nf/2*sizeof(int)), "hipMalloc tmprlimits");
cudasafe(cudaMallocDebug((void**)&threesplit, Nf/2*sizeof(int)), "hipMalloc threesplit");
#ifdef SORTLIMIT
cudasafe(cudaMallocDebug((void**)&leftlimitvalues, Nf/2*sizeof(double)), "hipMalloc leftlimitvalues");
cudasafe(cudaMallocDebug((void**)&rightlimitvalues, Nf/2*sizeof(double)), "hipMalloc rightlimitvalues");
#endif
cudasafe(cudaMallocDebug((void**)&outputvector, 2*sizeof(int)), "hipMalloc outputvector");
cudasafe(cudaMallocDebug((void**)&ysplit, Nf/2*sizeof(int)), "hipMalloc ysplit");
cudasafe(cudaMallocDebug((void**)&splitside, Nf/2*sizeof(int)), "hipMalloc splitside");
cudasafe(cudaMallocDebug((void**)&splitpoints, Nf/2*sizeof(SORT_REAL)), "hipMalloc cxpositions");
cudasafe(cudaMallocDebug((void**)&zdllimits, MAXBLOCKSZDMAXMULTI*sizeof(int)), "hipMalloc zdllimits");
cudasafe(cudaMallocDebug((void**)&zdrlimits, MAXBLOCKSZDMAXMULTI*sizeof(int)), "hipMalloc zdrlimits");
cudasafe(cudaMallocDebug((void**)&zdmaxpositions, MAXBLOCKSZDMAXMULTI*sizeof(SORT_REAL)), "hipMalloc zdmaxpositions");
cudasafe(cudaMallocDebug((void**)&zdminpositions, MAXBLOCKSZDMAXMULTI*sizeof(SORT_REAL)), "hipMalloc zdminpositions");
cudasafe(cudaMallocDebug((void**)&zdmaxpositions2, MAXBLOCKSZDMAXMULTI*sizeof(SORT_REAL)), "hipMalloc zdmaxpositions2");
cudasafe(cudaMallocDebug((void**)&zdminpositions2, MAXBLOCKSZDMAXMULTI*sizeof(SORT_REAL)), "hipMalloc zdminpositions2");
cudasafe(cudaMallocDebug((void**)&ztmp, Nf/2*sizeof(SORT_DCMPLX)), "hipMalloc cxpositions");
cudasafe(cudaMallocDebug((void**)&dtmp, Nf/2*sizeof(SORT_DCMPLX)), "hipMalloc cxpositions");
if(NE) {
cudasafe(cudaMallocDebug((void**)&xpositionstmpNE, NE*sizeof(SORT_REAL)), "hipMalloc xpositionstmpNE");
cudasafe(cudaMallocDebug((void**)&ypositionstmpNE, NE*sizeof(SORT_REAL)), "hipMalloc ypositionstmpNE");
cudasafe(cudaMallocDebug((void**)&newindicesNE, NE*sizeof(int)), "hipMalloc newindicesNE");
cudasafe(cudaMallocDebug((void**)&newllimitsNE, (Nf/2+1)*sizeof(int)), "hipMalloc newllimitsNE");
cudasafe(cudaMallocDebug((void**)&newrlimitsNE, Nf/2*sizeof(int)), "hipMalloc newrlimitsNE");
cudasafe(cudaMallocDebug((void**)&rlimitsNE, Nf*sizeof(int)), "hipMalloc rlimitsNE");
cudasafe(cudaMallocDebug((void**)&lrcountNE, Nf*sizeof(int)), "hipMalloc lrcountNE");
ptrinitvector[1]=NE; //make sure the last element is there in case of only 1 level
cudasafe(hipMemcpy(GPUvars->jxptr,ptrinitvector, 2*sizeof(int), hipMemcpyHostToDevice), "hipMemcpy GPUvars->jxptr");
cudasafe( hipMemcpy(rlimitsNE, &NE, sizeof(int), hipMemcpyHostToDevice), "hipMemcpy rlimitsNE" );
}
checkcudaerror("allocation\n");
//initiation
ptrinitvector[1]=N; //this is necessary in case only one level is used. Otherwise, it has no point
cudasafe(hipMemcpy(GPUvars->ixptr,ptrinitvector, 2*sizeof(int), hipMemcpyHostToDevice), "hipMemcpy GPUvars->ixptr");
cudasafe( hipMemcpy(rlimits, &N, sizeof(int), hipMemcpyHostToDevice), "hipMemcpy rlimits" );
//useful number of blocks for calls. Could probably be optimized
int blockcount=imin((N+4*threadsperblock-1)/(4*threadsperblock),maxblockcount);
int blockcountNE=imin((NE+4*threadsperblock-1)/(4*threadsperblock),maxblockcount);
int zdblockcount=imin((imax(N,NE)+4*ZDMAXTHREADS-1)/(4*ZDMAXTHREADS),MAXBLOCKSZDMAXMULTI);
#ifdef CUDATIMESORT
cudasafe(hipEventRecord(start2,0),"hipEventRecord stop2");
#endif
//initiate all indices to [1,2,3,....]
checkcudaerror("before initiateindices\n");
hipLaunchKernelGGL(( initiateindices), dim3(blockcount),dim3(threadsperblock), 0, 0, GPUvars->ix,N);
CHECKCUDAERROR
checkcudaerror("initiateindices\n");
if(NE) {
hipLaunchKernelGGL(( initiateindices), dim3(blockcountNE),dim3(threadsperblock), 0, 0, GPUvars->jx,NE);
CHECKCUDAERROR
checkcudaerror("initiateindicesNE\n");
}
#ifdef CUDATIMESORT
cudasafe(hipEventRecord(stop2),"hipEventRecord fulltimestop");
cudasafe(hipEventSynchronize(stop2),"hipEventSynchronize fulltimestop");
cudasafe(hipEventElapsedTime(&elapsedtime,start2,stop2),"hipEventElapsedTime");
mexPrintf("Initiateindices, init time: %f\n",elapsedtime/1000);
cudasafe(hipEventRecord(start2,0),"hipEventRecord sort1start");
#endif
//determine base block size
cudasafe(hipMemset(outputvector,0,sizeof(int)),"Memset outputvector");
hipLaunchKernelGGL(( findzdmulti), dim3(zdblockcount),dim3(ZDMAXTHREADS), 0, 0, GPUvars->ixptr,rlimits,fzr,fzi,GPUvars->jxptr,rlimitsNE,fer,fei,zdllimits,zdrlimits,zdmaxpositions,zdmaxpositions2,zdminpositions,zdminpositions2,1,1,outputvector DEBUGVECTORSTRING2);
CHECKCUDAERROR
checkcudaerror("findzdmulti\n");
int hasnan;
cudasafe(hipMemcpy(&hasnan,outputvector, sizeof(int), hipMemcpyDeviceToHost), "hipMemcpy coutputvector");
if(hasnan) {
hipDeviceReset();
resetalloccount();
mexErrMsgTxt("NaN detected in input vectors, aborting");
}
#ifdef CUDATIMESORT
cudasafe(hipEventRecord(stop2),"hipEventRecord fulltimestop");
cudasafe(hipEventSynchronize(stop2),"hipEventSynchronize fulltimestop");
cudasafe(hipEventElapsedTime(&elapsedtime,start2,stop2),"hipEventElapsedTime");
mexPrintf("findzdmulti, init time: %f\n",elapsedtime/1000);
cudasafe(hipEventRecord(start2,0),"hipEventRecord sort1start");
#endif
hipLaunchKernelGGL(( findzdmultistep2), dim3(1),dim3(ZDMAXTHREADS), 0, 0, zdllimits,zdrlimits,zdmaxpositions,zdmaxpositions2,zdminpositions,zdminpositions2,z,d,dabs,1);
CHECKCUDAERROR
checkcudaerror("findzdmultistep2\n");
#ifdef CUDATIMESORT
cudasafe(hipEventRecord(stop2),"hipEventRecord fulltimestop");
cudasafe(hipEventSynchronize(stop2),"hipEventSynchronize fulltimestop");
cudasafe(hipEventElapsedTime(&elapsedtime,start2,stop2),"hipEventElapsedTime");
mexPrintf("findzdmultistep2, init time: %f\n",elapsedtime/1000);
#endif
#ifdef CUDATIMESORT
cudasafe(hipEventRecord(stop),"hipEventRecord fulltimestop");
cudasafe(hipEventSynchronize(stop),"hipEventSynchronize fulltimestop");
cudasafe(hipEventElapsedTime(&elapsedtime,start,stop),"hipEventElapsedTime");
mexPrintf("Partition, init time: %f\n",elapsedtime/1000);
cudasafe(hipEventRecord(start,0),"hipEventRecord sort1start");
#endif
//the main partitioning loop
for(int i=0,Nb=1;i<nlevels;i++) {
// mexPrintf("partitioning step %d of %d\n",i,nlevels);
#ifdef SORTLIMIT
calculatesortlimits(&distlimit,&disttarget,&sortlimit,GPUvars->sortlimits,i,nlevels-0.5);
#endif
eta=calculateetalimits(GPUvars->eta,i,nlevels-0.5);
while(threadcount>imax(N,NE)/Nb&&threadcount>32)
threadcount>>=1;
if(threadcount<32) threadcount=32; //should not happen
//setup
int singleblockwork=(Nb+SINGLETHREADTHREADCOUNT-1)/SINGLETHREADTHREADCOUNT;
checkcudaerror("before setuppartition\n");
hipLaunchKernelGGL(( setuppartition), dim3(imin(singleblockwork,SINGLETHREADMAXTHREADS)),dim3(SINGLETHREADTHREADCOUNT), 0, 0, z,d,splitpoints,ysplit,1,Nb SORTLIMITCALLINGSTRING2);
CHECKCUDAERROR
cudasafe(hipMemset(lrcount,0, Nb*2*sizeof(int)), "hipMemset lrcount");
cudasafe(hipMemset(threesplit,0, Nb*sizeof(int)), "hipMemset threesplit");
#ifdef CUDATIMESORT
cudasafe(hipEventRecord(start2,0),"hipEventRecord sort1start");
#endif
//make first partitioning
checkcudaerror("before multiblockpartition\n");
multiblockpartition(fzr,fzi,GPUvars->ix,newindices,splitpoints,GPUvars->ixptr,rlimits,newllimits,newrlimits,tmpllimits,tmprlimits,Nb,N,xpositionstmp,ypositionstmp,ysplit,lrcount,threesplit,outputvector,N SORTLIMITCALLINGSTRING DEBUGVECTORSTRING2/*,zr,zi*/);
if(eta<1) { //if eta<1, make one additional split
hipLaunchKernelGGL(( setupetasplit), dim3(imin(singleblockwork,SINGLETHREADMAXTHREADS)),dim3(SINGLETHREADTHREADCOUNT), 0, 0, z,splitpoints,ysplit,eta,Nb,splitside,newllimits,newrlimits,tmpllimits,tmprlimits);
CHECKCUDAERROR
cudasafe(hipMemset(lrcount,0, Nb*2*sizeof(int)), "hipMemset lrcount");
//make partitioning, and copy data back to original array
if(blockcount>Nb) { //single or multiblock mode?
hipLaunchKernelGGL(( partitionsplit), dim3(blockcount),dim3(threadsperblock), 0, 0, xpositionstmp,ypositionstmp,newindices,GPUvars->ix,splitpoints,tmpllimits,tmprlimits,lrcount,Nb,fzr,fzi,ysplit,NULL DEBUGVECTORSTRING2);
CHECKCUDAERROR
hipLaunchKernelGGL(( splitetacopymultithread), dim3(blockcount),dim3(threadsperblock), 0, 0, tmpllimits,tmprlimits,fzr,fzi,GPUvars->ix,xpositionstmp,ypositionstmp,newindices,Nb);
CHECKCUDAERROR
}
else {
hipLaunchKernelGGL(( partitionsplitsinglethread), dim3(blockcount),dim3(threadsperblock), 0, 0, xpositionstmp,ypositionstmp,newindices,GPUvars->ix,splitpoints,tmpllimits,tmprlimits,lrcount,Nb,fzr,fzi,ysplit DEBUGVECTORSTRING2);
CHECKCUDAERROR
hipLaunchKernelGGL(( splitetacopysinglethread), dim3(blockcount),dim3(threadsperblock), 0, 0, tmpllimits,tmprlimits,fzr,fzi,GPUvars->ix,xpositionstmp,ypositionstmp,newindices,Nb);
CHECKCUDAERROR
}
hipLaunchKernelGGL(( correctetalimits), dim3(imin(singleblockwork,SINGLETHREADMAXTHREADS)),dim3(SINGLETHREADTHREADCOUNT), 0, 0, newllimits,newrlimits,splitside,lrcount,Nb);
CHECKCUDAERROR
}
#ifdef CUDATIMESORT
cudasafe(hipEventRecord(stop2),"hipEventRecord stop2");
#endif
if(NE) { //if evaluation points, split them
if(blockcountNE>Nb) { //single or multi block mode
cudasafe(hipMemset(lrcountNE, 0, Nb*2*sizeof(int)), "hipMemset lrcount");
hipLaunchKernelGGL(( partitionsplit), dim3(blockcountNE), dim3(threadsperblock), 0, 0, fer, fei, GPUvars->jx, newindicesNE, splitpoints, GPUvars->jxptr, rlimitsNE, lrcountNE, Nb, xpositionstmpNE, ypositionstmpNE, ysplit, NULL DEBUGVECTORSTRING2);
CHECKCUDAERROR
}
else {
hipLaunchKernelGGL(( partitionsplitsinglethread), dim3(blockcountNE), dim3(threadsperblock), 0, 0, fer, fei, GPUvars->jx, newindicesNE, splitpoints, GPUvars->jxptr, rlimitsNE, lrcountNE, Nb, xpositionstmpNE, ypositionstmpNE, ysplit DEBUGVECTORSTRING2);
CHECKCUDAERROR
}
hipLaunchKernelGGL(( setNElimits), dim3(imin(singleblockwork, SINGLETHREADMAXTHREADS)), dim3(SINGLETHREADTHREADCOUNT), 0, 0, GPUvars->jxptr, rlimitsNE, newllimitsNE, newrlimitsNE, lrcountNE, Nb);
CHECKCUDAERROR
}
//in the middle step, only approximate z and d, since these values will no be used anymore (except for the eta split, and as starting values in the next partitioning)
hipLaunchKernelGGL(( approximatezd), dim3(imin(singleblockwork,SINGLETHREADMAXTHREADS)),dim3(SINGLETHREADTHREADCOUNT), 0, 0, z,d,splitpoints,ztmp,dtmp,ysplit,Nb);
CHECKCUDAERROR
#ifdef CUDATIMESORT
cudasafe(hipEventRecord(stop),"hipEventRecord stop 287");
cudasafe(hipEventSynchronize(stop),"hipEventSynchronize stop");
cudasafe(hipEventSynchronize(stop2),"hipEventSynchronize stop2");
cudasafe(hipEventElapsedTime(&elapsedtime,start,stop),"hipEventElapsedTime");
cudasafe(hipEventElapsedTime(&elapsedtime2,start2,stop2),"hipEventElapsedTime");
mexPrintf("Partition, loop %d: %f partitiontime: %f\n",i,elapsedtime/1000,elapsedtime2/1000);
#endif
#ifdef VALIDATEPARTITIONING
checkpartitioning(zr,zi,xpositionstmp,ypositionstmp,splitpoints,newindices,GPUvars->ix,N,newllimits,newrlimits,Nb,ysplit,0,1,ztmp,dtmp);
#endif
z+=Nb;
d+=Nb;
dabs+=Nb;
#ifdef CUDATIMESORT
cudasafe(hipEventRecord(start,0),"hipEventRecord sort1start");
#endif
Nb<<=1;
#ifdef SORTLIMITS
calculatesortlimits(&distlimit,&disttarget,&sortlimit,GPUvars->sortlimits,i+0.5,nlevels-0.5);
#endif
eta=calculateetalimits(GPUvars->eta,i+0.5,nlevels-0.5);
singleblockwork=(Nb+SINGLETHREADTHREADCOUNT-1)/SINGLETHREADTHREADCOUNT;
cudasafe(hipMemset(lrcount,0, Nb*2*sizeof(int)), "hipMemset lrcount");
cudasafe(hipMemset(threesplit,0, Nb*sizeof(int)), "hipMemset threesplit");
hipLaunchKernelGGL(( setuppartition), dim3(imin(singleblockwork,SINGLETHREADMAXTHREADS)),dim3(SINGLETHREADTHREADCOUNT), 0, 0, ztmp,dtmp,splitpoints,ysplit,0,Nb SORTLIMITCALLINGSTRING2);
CHECKCUDAERROR
#ifdef CUDATIMESORT
cudasafe(hipEventRecord(start2,0),"hipEventRecord sort1start");
#endif
//second partitioning on this level
checkcudaerror("before second multiblockpartition\n");
multiblockpartition(xpositionstmp,ypositionstmp,newindices,GPUvars->ix,splitpoints,newllimits,newrlimits,GPUvars->ixptr,rlimits,tmpllimits,tmprlimits,Nb,N,fzr,fzi,ysplit,lrcount,threesplit,outputvector,N SORTLIMITCALLINGSTRING DEBUGVECTORSTRING2/*,zr,zi*/);
#ifdef CUDATIMESORT
cudasafe(hipEventRecord(stop2),"hipEventRecord fulltimestop");
#endif
if(eta<1) { //eta split again
hipLaunchKernelGGL(( setupetasplit), dim3(imin(singleblockwork, SINGLETHREADMAXTHREADS)), dim3(SINGLETHREADTHREADCOUNT), 0, 0, ztmp, splitpoints, ysplit, eta, Nb, splitside, GPUvars->ixptr, rlimits, tmpllimits, tmprlimits);
CHECKCUDAERROR
cudasafe(hipMemset(lrcount, 0, Nb*2*sizeof(int)), "hipMemset lrcount");
if(blockcount>Nb) {
hipLaunchKernelGGL(( partitionsplit), dim3(blockcount), dim3(threadsperblock), 0, 0, fzr, fzi, GPUvars->ix, newindices, splitpoints, tmpllimits, tmprlimits, lrcount, Nb, xpositionstmp, ypositionstmp, ysplit, NULL DEBUGVECTORSTRING2);
CHECKCUDAERROR
hipLaunchKernelGGL(( splitetacopymultithread), dim3(blockcount), dim3(threadsperblock), 0, 0, tmpllimits, tmprlimits, xpositionstmp, ypositionstmp, newindices, fzr, fzi, GPUvars->ix, Nb);
CHECKCUDAERROR
}
else {
hipLaunchKernelGGL(( partitionsplitsinglethread), dim3(blockcount), dim3(threadsperblock), 0, 0, fzr, fzi, GPUvars->ix, newindices, splitpoints, tmpllimits, tmprlimits, lrcount, Nb, xpositionstmp, ypositionstmp, ysplit DEBUGVECTORSTRING2);
CHECKCUDAERROR
hipLaunchKernelGGL(( splitetacopysinglethread), dim3(blockcount), dim3(threadsperblock), 0, 0, tmpllimits, tmprlimits, xpositionstmp, ypositionstmp, newindices, fzr, fzi, GPUvars->ix, Nb);
CHECKCUDAERROR
}
hipLaunchKernelGGL(( correctetalimits), dim3(imin(singleblockwork, SINGLETHREADMAXTHREADS)), dim3(SINGLETHREADTHREADCOUNT), 0, 0, GPUvars->ixptr, rlimits, splitside, lrcount, Nb);
CHECKCUDAERROR
}
if(NE) { //evaluation point split
if(blockcountNE>Nb) {
cudasafe(hipMemset(lrcountNE, 0, Nb*2*sizeof(int)), "hipMemset lrcount");
hipLaunchKernelGGL(( partitionsplit), dim3(blockcountNE), dim3(threadsperblock), 0, 0, xpositionstmpNE, ypositionstmpNE, newindicesNE, GPUvars->jx, splitpoints, newllimitsNE, newrlimitsNE, lrcountNE, Nb, fer, fei, ysplit, NULL DEBUGVECTORSTRING2);
CHECKCUDAERROR
}
else {
hipLaunchKernelGGL(( partitionsplitsinglethread), dim3(blockcountNE), dim3(threadsperblock), 0, 0, xpositionstmpNE, ypositionstmpNE, newindicesNE, GPUvars->jx, splitpoints, newllimitsNE, newrlimitsNE, lrcountNE, Nb, fer, fei, ysplit DEBUGVECTORSTRING2);
CHECKCUDAERROR
}
hipLaunchKernelGGL(( setNElimits), dim3(imin(singleblockwork, SINGLETHREADMAXTHREADS)), dim3(SINGLETHREADTHREADCOUNT), 0, 0, newllimitsNE, newrlimitsNE, GPUvars->jxptr, rlimitsNE, lrcountNE, Nb);
CHECKCUDAERROR
}
#ifdef CUDATIMESORT
cudasafe(hipEventRecord(stop),"hipEventRecord stop 350");
cudasafe(hipEventSynchronize(stop),"hipEventSynchronize stop");
cudasafe(hipEventSynchronize(stop2),"hipEventSynchronize stop2");
cudasafe(hipEventElapsedTime(&elapsedtime,start,stop),"hipEventElapsedTime");
cudasafe(hipEventElapsedTime(&elapsedtime2,start2,stop2),"hipEventElapsedTime");
mexPrintf("Partition, loop %d: %f partitiontime: %f\n",i,elapsedtime/1000,elapsedtime2/1000);
cudasafe(hipEventRecord(start,0),"hipEventRecord sort1start");
#endif
Nb<<=1;
singleblockwork=(Nb+SINGLETHREADTHREADCOUNT-1)/SINGLETHREADTHREADCOUNT;
//now, this will be the actual fmm level. Here, calculate the real values of z and d to use by the theta-criterion etc.
if(Nb>zdblockcount) { //single or multi block mode
hipLaunchKernelGGL(( findzd), dim3(imin(MAXBLOCKSZDMAXMULTI, Nb)), dim3(threadcount),threadcount*4*sizeof(SORT_REAL), 0, GPUvars->ixptr, rlimits, fzr, fzi, GPUvars->jxptr, rlimitsNE, fer, fei, z, d, dabs, Nb);
CHECKCUDAERROR
}
else {
// mexPrintf("findzdmulti\n");
// SORT_REAL *hzdmaxpositions=(SORT_REAL*)mxMalloc(MAXBLOCKSZDMAXMULTI*sizeof(SORT_REAL));
// SORT_REAL *hzdminpositions=(SORT_REAL*)mxMalloc(MAXBLOCKSZDMAXMULTI*sizeof(SORT_REAL));
// SORT_REAL *hzdmaxpositions2=(SORT_REAL*)mxMalloc(MAXBLOCKSZDMAXMULTI*sizeof(SORT_REAL));
// SORT_REAL *hzdminpositions2=(SORT_REAL*)mxMalloc(MAXBLOCKSZDMAXMULTI*sizeof(SORT_REAL));
// int *hzdllimits=(int*)mxMalloc(Nb*sizeof(int));
// int *hzdrlimits=(int*)mxMalloc(Nb*sizeof(int));
// SORT_DCMPLX *hd0=(SORT_DCMPLX*)mxMalloc(Nb*sizeof(SORT_DCMPLX));
// SORT_DCMPLX *hz0=(SORT_DCMPLX*)mxMalloc(Nb*sizeof(SORT_DCMPLX));
// double* hdebugvector=(double*)mxMalloc(MAXBLOCKSZDMAXMULTI*sizeof(double));
// cudasafe(hipMemset(GPUvars->debugvector,0,MAXBLOCKSZDMAXMULTI*sizeof(double)),"cudaMeMset");
// cudasafe(hipMemset(zdmaxpositions,0,MAXBLOCKSZDMAXMULTI*sizeof(SORT_REAL)),"hipMemset");
// cudasafe(hipMemset(zdminpositions,0,MAXBLOCKSZDMAXMULTI*sizeof(SORT_REAL)),"hipMemset");
// cudasafe(hipMemset(zdmaxpositions2,0,MAXBLOCKSZDMAXMULTI*sizeof(SORT_REAL)),"hipMemset");
// cudasafe(hipMemset(zdminpositions2,0,MAXBLOCKSZDMAXMULTI*sizeof(SORT_REAL)),"hipMemset");
hipLaunchKernelGGL(( findzdmulti), dim3(zdblockcount),dim3(ZDMAXTHREADS), 0, 0, GPUvars->ixptr,rlimits,fzr,fzi,GPUvars->jxptr,rlimitsNE,fer,fei,zdllimits,zdrlimits,zdmaxpositions,zdmaxpositions2,zdminpositions,zdminpositions2,Nb,0,NULL DEBUGVECTORSTRING2);
CHECKCUDAERROR
// cudasafe(hipMemcpy(hzdmaxpositions,zdmaxpositions,MAXBLOCKSZDMAXMULTI*sizeof(SORT_REAL),hipMemcpyDeviceToHost),"Memcpy");
// cudasafe(hipMemcpy(hzdminpositions,zdminpositions,MAXBLOCKSZDMAXMULTI*sizeof(SORT_REAL),hipMemcpyDeviceToHost),"Memcpy");
// cudasafe(hipMemcpy(hzdmaxpositions2,zdmaxpositions2,MAXBLOCKSZDMAXMULTI*sizeof(SORT_REAL),hipMemcpyDeviceToHost),"Memcpy");
// cudasafe(hipMemcpy(hzdminpositions2,zdminpositions2,MAXBLOCKSZDMAXMULTI*sizeof(SORT_REAL),hipMemcpyDeviceToHost),"Memcpy");
// cudasafe(hipMemcpy(hdebugvector,GPUvars->debugvector,MAXBLOCKSZDMAXMULTI*sizeof(double),hipMemcpyDeviceToHost),"Memcpy");
// cudasafe(hipMemcpy(hzdllimits,zdllimits,Nb*sizeof(int),hipMemcpyDeviceToHost),"Memcpy");
// cudasafe(hipMemcpy(hzdrlimits,zdrlimits,Nb*sizeof(int),hipMemcpyDeviceToHost),"Memcpy");
hipLaunchKernelGGL(( findzdmultistep2), dim3(Nb),dim3(ZDMAXTHREADS), 0, 0, zdllimits,zdrlimits,zdmaxpositions,zdmaxpositions2,zdminpositions,zdminpositions2,z,d,dabs,Nb);
CHECKCUDAERROR
// cudasafe(hipMemcpy(hd0,d,Nb*sizeof(SORT_DCMPLX),hipMemcpyDeviceToHost),"Memcpy");
// cudasafe(hipMemcpy(hz0,z,Nb*sizeof(SORT_DCMPLX),hipMemcpyDeviceToHost),"Memcpy");
// if(Nb==1024) {
// for(int k=0;k<zdblockcount;k++)
// mexPrintf("limits[%d]: %14e %14e %14e %14e, dv=%e\n",k,hzdminpositions[k],hzdmaxpositions[k],hzdminpositions2[k],hzdmaxpositions2[k],hdebugvector[k]);
// for(int k=0;k<Nb;k++)
// mexPrintf("inner size[%d]: [%d %d) z0=%e+%ei d0=%e+%ei\n",k,hzdllimits[k],hzdrlimits[k],creal(hz0[k]),cimag(hz0[k]),creal(hd0[k]),cimag(hd0[k]));
// }
// mexPrintf("zdblockcount=%d\n",zdblockcount);
// mxFree(hzdmaxpositions);
// mxFree(hzdminpositions);
// mxFree(hzdmaxpositions2);
// mxFree(hzdminpositions2);
// mxFree(hzdllimits);
// mxFree(hzdrlimits);
// mxFree(hd0);
// mxFree(hz0);
// mxFree(hdebugvector);
}
#ifdef CUDATIMESORT
cudasafe(hipEventRecord(stop),"hipEventRecord stop 370");
cudasafe(hipEventSynchronize(stop),"hipEventSynchronize stop");
cudasafe(hipEventElapsedTime(&elapsedtime,start,stop),"hipEventElapsedTime");
mexPrintf("find zd, loop %d: %f\n",i,elapsedtime/1000);
#endif
#ifdef VALIDATEPARTITIONING
checkpartitioning(zr,zi,fzr,fzi,splitpoints,GPUvars->ix,newindices,N,GPUvars->ixptr,rlimits,Nb/2,ysplit,0,1,z,d);
#endif
#ifdef CUDATIMESORT
cudasafe(hipEventRecord(start,0),"hipEventRecord sort1start");
#endif
}
//cleanup
#ifdef CUDATIMESORT
cudasafe(hipEventDestroy(stop),"hipEventDestroy");
cudasafe(hipEventDestroy(start),"hipEventDestroy");
cudasafe(hipEventDestroy(stop2),"hipEventDestroy");
cudasafe(hipEventDestroy(start2),"hipEventDestroy");
#endif
cudaFreeDebug(xpositionstmp);
cudaFreeDebug(ypositionstmp);
cudaFreeDebug(newindices);
cudaFreeDebug(rlimits);
cudaFreeDebug(newllimits);
cudaFreeDebug(newrlimits);
cudaFreeDebug(tmpllimits);
cudaFreeDebug(tmprlimits);
cudaFreeDebug(threesplit);
cudaFreeDebug(outputvector);
cudaFreeDebug(ysplit);
cudaFreeDebug(splitside);
cudaFreeDebug(lrcount);
cudaFreeDebug(splitpoints);
cudaFreeDebug(zdllimits);
cudaFreeDebug(zdrlimits);
cudaFreeDebug(zdmaxpositions);
cudaFreeDebug(zdminpositions);
cudaFreeDebug(zdmaxpositions2);
cudaFreeDebug(zdminpositions2);
cudaFreeDebug(ztmp);
cudaFreeDebug(dtmp);
#ifdef FLOATSORT
cudaFreeDebug(fzr);
cudaFreeDebug(fzi);
#endif
#ifdef SORTLIMIT
cudaFreeDebug(leftlimitvalues);
cudaFreeDebug(rightlimitvalues);
#endif
if(NE) {
cudaFreeDebug(xpositionstmpNE);
cudaFreeDebug(ypositionstmpNE);
cudaFreeDebug(newindicesNE);
cudaFreeDebug(newllimitsNE);
cudaFreeDebug(newrlimitsNE);
cudaFreeDebug(rlimitsNE);
cudaFreeDebug(lrcountNE);
#ifdef FLOATSORT
cudaFreeDebug(fer);
cudaFreeDebug(fei);
#endif
}
}
/*------------------------------------------------------------------------*/
#ifdef CHECKPARTITIONING
#include "cudasortdebugdefs.h"
#endif
/*------------------------------------------------------------------------*/
//multiblockpartition is the equivalent to singleblockpartition, but uses multiple blocks per partitioning. Good in the beginning with few boxes
//positions,positions2 is positions in x and y direction
//indices,newindices is input and output values for the permutation array
//splitpoints is where the split has been performed
//llimits,rlimits indicates where the array to be split are
//newllimits,newrlimits output for llimits/rlimits
//tmpllimits,tmprlimits temporary storage for limits during the algorithm
//splitcount number of partitions to be performed
//count number of elements
//newpositions,newpositions2 output values for positions,positions2
//ysplit split with respect to y or x coordinates
//lrcount for a split, number of elements on each sidet
//threesplitvector if threesplit mode should be used for the box
//outputvector vector on GPU to commuticate results from split
//debugvector debug purposes only
void multiblockpartition(SORT_REAL* positions,SORT_REAL *positions2,int* indices,int* newindices,SORT_REAL *splitpoints,int *llimits,int* rlimits,int *newllimits,int* newrlimits,int* tmpllimits,int* tmprlimits,int splitcount,int count,SORT_REAL* newpositions,SORT_REAL *newpositions2,int* ysplit,int* lrcount,int* threesplitvector,int* outputvector,int N SORTLIMITSTRING DEBUGVECTORSTRING)
{
int *itmp,i;
checkcudaerror("start multiblockpartition\n");
#ifdef CHECKPARTITIONING
static int printcount=0;
SORT_REAL outsplitpoints[4];
#endif
SORT_REAL *ctmp;
int outputvectorlocal[2];
int threadcount;
int blockcount=imin((count+4*threadsperblock-1)/(4*threadsperblock),maxblockcount); //determine a reasonable number of blocks to use
if(splitcount>blockcount) { //if more splits than blocks, use singleblockpartition instead
#ifdef CUDATIMESORT
hipEvent_t start;
hipEvent_t stop;
cudasafe(hipEventCreate(&start), "hipEventCreate start");
cudasafe(hipEventCreate(&stop), "hipEventCreate stop");
cudasafe(hipEventRecord(start, 0), "hipEventRecord start");
#endif
threadcount=threadsperblock;
while(threadcount>N/splitcount&&threadcount>32)
threadcount>>=1;
if(threadcount<32) threadcount=32; //should not happen
// int* hllimits=(int*)mxMalloc((splitcount+1)*sizeof(int));
// int* hrlimits=(int*)mxMalloc(splitcount*sizeof(int));
// int* hllimitsnew=(int*)mxMalloc((splitcount+1)*sizeof(int));
// int* hrlimitsnew=(int*)mxMalloc(splitcount*sizeof(int));
// cudasafe( hipMemcpy(hllimits, llimits, splitcount*sizeof(int), hipMemcpyDeviceToHost), "hipMemcpy outputvector" );
// cudasafe( hipMemcpy(hrlimits, rlimits, splitcount*sizeof(int), hipMemcpyDeviceToHost), "hipMemcpy outputvector" );
// // for(int k=0;k<splitcount;k++)
// // mexPrintf("limits[%d]=[%d %d)\n",k,hllimits[k],hrlimits[k]);
// for(int k=0;k<splitcount;k++) {
// if(hrlimits[k]<hllimits[k]||hllimits[k]<0||hrlimits[k]>N)
// mexPrintf("ERROR: limits[%d]=[%d %d)\n",k,hllimits[k],hrlimits[k]);
// if(k<splitcount-1&&hrlimits[k]!=hllimits[k+1])
// mexPrintf("ERROR: not connected %d\n",k);
// }
// double *positionsbak,*positions2bak,*splitpointsbak;
// int* indicesbak;
// cudasafeMalloc((void **)&positionsbak,N*sizeof(double));
// cudasafeMalloc((void **)&positions2bak,N*sizeof(double));
// cudasafeMalloc((void **)&indicesbak,N*sizeof(int));
// cudasafeMalloc((void **)&splitpointsbak,splitcount*sizeof(int));
// cudasafe( hipMemcpy(positionsbak, positions, N*sizeof(double), hipMemcpyDeviceToDevice), "hipMemcpy outputvector" );
// cudasafe( hipMemcpy(positions2bak, positions2, N*sizeof(double), hipMemcpyDeviceToDevice), "hipMemcpy outputvector" );
// cudasafe( hipMemcpy(indicesbak, indices, N*sizeof(int), hipMemcpyDeviceToDevice), "hipMemcpy outputvector" );
// cudasafe( hipMemcpy(splitpointsbak, splitpoints, splitcount*sizeof(double), hipMemcpyDeviceToDevice), "hipMemcpy outputvector" );
// double *hdebugvector=(double*)mxMalloc(imax(2000,splitcount)*sizeof(double));
// mexPrintf("distlimit=%f disttarget=%f\n",distlimit,disttarget);
// for(int k=0;k<splitcount;k++) {
// for(int m=0;m<splitcount;m++) {
// if(m==k) {
// hllimitsnew[m]=hllimits[k];
// hrlimitsnew[m]=hrlimits[k];
// }
// else {
// hllimitsnew[m]=0;
// hrlimitsnew[m]=0;
// }
// }
// cudasafe( hipMemset(debugvector,0,imax(2000,splitcount)*sizeof(double)),"dd");
// cudasafe( hipMemcpy(llimits, hllimitsnew, splitcount*sizeof(int), hipMemcpyHostToDevice), "hipMemcpy outputvector" );
// cudasafe( hipMemcpy(rlimits, hrlimitsnew, splitcount*sizeof(int), hipMemcpyHostToDevice), "hipMemcpy outputvector" );
// mexPrintf("testing box %d limits = [%d,%d) (%d elements) maxblockcount=%d splitcount=%d\n",k,hllimits[k],hrlimits[k],hrlimits[k]-hllimits[k],maxblockcount,splitcount);
// mexEvalString("drawnow");
// // hipLaunchKernelGGL(( singleblockpartition), dim3(imin(splitcount, maxblockcount)), dim3(threadcount),INDEXCACHELENGTH*threadcount*(2*sizeof(int)+sizeof(SORT_REAL)), 0, positions, positions2, indices, newindices, splitpoints, llimits, rlimits, newllimits, newrlimits, NULL, NULL, splitcount, newpositions, newpositions2, ysplit, 0 SORTLIMITCALLINGSTRING DEBUGVECTORSTRING3);
// singleblockpartitiondebug<<<imin(splitcount, maxblockcount), threadcount,INDEXCACHELENGTH*threadcount*(2*sizeof(int)+sizeof(SORT_REAL))>>>(positions, positions2, indices, newindices, splitpoints, llimits, rlimits, newllimits, newrlimits, NULL, NULL, splitcount, newpositions, newpositions2, ysplit, 0 ,leftlimitvaluesdebug,rightlimitvaluesdebug,distlimit,disttarget DEBUGVECTORSTRING3);
// cudasafe( hipMemcpy(positions, positionsbak, N*sizeof(double), hipMemcpyDeviceToDevice), "hipMemcpy outputvector" );
// cudasafe( hipMemcpy(positions2, positions2bak, N*sizeof(double), hipMemcpyDeviceToDevice), "hipMemcpy outputvector" );
// cudasafe( hipMemcpy(indices, indicesbak, N*sizeof(int), hipMemcpyDeviceToDevice), "hipMemcpy outputvector" );
// cudasafe( hipMemcpy(splitpoints, splitpointsbak, splitcount*sizeof(double), hipMemcpyDeviceToDevice), "hipMemcpy outputvector" );
// CHECKCUDAERROR
// cudasafe( hipMemcpy(hdebugvector, debugvector, imax(2000,splitcount)*sizeof(double), hipMemcpyDeviceToHost), "hipMemcpy outputvector" );
// for(int m=0;m<imax(2000,splitcount);m++)
// if(hdebugvector[m]!=0.0)
// mexPrintf("debugvector[%d]=%e\n",m,hdebugvector[m]);
// mexPrintf("test2\n");
// mexEvalString("drawnow");
//
// // if(k==0)
//
// hipLaunchKernelGGL(( singleblockpartition), dim3(imin(splitcount, maxblockcount)), dim3(threadcount),INDEXCACHELENGTH*threadcount*(2*sizeof(int)+sizeof(SORT_REAL)), 0, positions, positions2, indices, newindices, splitpoints, llimits, rlimits, newllimits, newrlimits, NULL, NULL, splitcount, newpositions, newpositions2, ysplit, 0 ,leftlimitvalues,rightlimitvalues,distlimit,disttarget DEBUGVECTORSTRING3);
// cudasafe( hipMemcpy(positions, positionsbak, N*sizeof(double), hipMemcpyDeviceToDevice), "hipMemcpy outputvector" );
// cudasafe( hipMemcpy(positions2, positions2bak, N*sizeof(double), hipMemcpyDeviceToDevice), "hipMemcpy outputvector" );
// cudasafe( hipMemcpy(indices, indicesbak, N*sizeof(int), hipMemcpyDeviceToDevice), "hipMemcpy outputvector" );
// cudasafe( hipMemcpy(splitpoints, splitpointsbak, splitcount*sizeof(double), hipMemcpyDeviceToDevice), "hipMemcpy outputvector" );
// CHECKCUDAERROR
// mexPrintf("test3\n");
// mexEvalString("drawnow");
// hipLaunchKernelGGL(( singleblockpartition), dim3(imin(splitcount, maxblockcount)), dim3(threadcount),INDEXCACHELENGTH*threadcount*(2*sizeof(int)+sizeof(SORT_REAL)), 0, positions, positions2, indices, newindices, splitpoints, llimits, rlimits, newllimits, newrlimits, NULL, NULL, splitcount, newpositions, newpositions2, ysplit, 0 ,leftlimitvaluesdebug,rightlimitvaluesdebug,distlimit,disttarget DEBUGVECTORSTRING3);
// cudasafe( hipMemcpy(positions, positionsbak, N*sizeof(double), hipMemcpyDeviceToDevice), "hipMemcpy outputvector" );
// cudasafe( hipMemcpy(positions2, positions2bak, N*sizeof(double), hipMemcpyDeviceToDevice), "hipMemcpy outputvector" );
// cudasafe( hipMemcpy(indices, indicesbak, N*sizeof(int), hipMemcpyDeviceToDevice), "hipMemcpy outputvector" );
// cudasafe( hipMemcpy(splitpoints, splitpointsbak, splitcount*sizeof(double), hipMemcpyDeviceToDevice), "hipMemcpy outputvector" );
// CHECKCUDAERROR
//
// }
// cudasafe( hipMemcpy(llimits, hllimits, splitcount*sizeof(int), hipMemcpyHostToDevice), "hipMemcpy outputvector" );
// cudasafe( hipMemcpy(rlimits, hrlimits, splitcount*sizeof(int), hipMemcpyHostToDevice), "hipMemcpy outputvector" );
//
// mxFree(hllimits);
// mxFree(hrlimits);
// mxFree(hllimitsnew);
// mxFree(hrlimitsnew);
// mxFree(hdebugvector);
// double* leftlimits=(double*)mxMalloc(splitcount*sizeof(double));
// double* rightlimits=(double*)mxMalloc(splitcount*sizeof(double));
// double* hsplitpoints=(double*)mxMalloc(splitcount*sizeof(double));
// cudasafe( hipMemcpy(leftlimits, leftlimitvaluesdebug, splitcount*sizeof(double), hipMemcpyDeviceToHost), "hipMemcpy outputvector" );
// cudasafe( hipMemcpy(rightlimits, rightlimitvaluesdebug, splitcount*sizeof(double), hipMemcpyDeviceToHost), "hipMemcpy outputvector" );
// cudasafe( hipMemcpy(hsplitpoints, splitpoints, splitcount*sizeof(double), hipMemcpyDeviceToHost), "hipMemcpy outputvector" );
// // for(int k=0;k<splitcount;k++)
// // mexPrintf("sortlimits[%d]=[%e %e %e)\n",k,leftlimits[k],hsplitpoints[k],rightlimits[k]);
// for(int k=0;k<splitcount;k++) {
// if(hsplitpoints[k]<leftlimits[k]||hsplitpoints[k]>rightlimits[k])
// mexPrintf("ERROR sortlimits[%d]=[%e %e %e)\n",k,leftlimits[k],hsplitpoints[k],rightlimits[k]);
// }
// mxFree(leftlimits);
// mxFree(rightlimits);
// mxFree(hsplitpoints);
// // int* hindices=(int*)mxMalloc(N*sizeof(int));
// // cudasafe( hipMemcpy(hindices, indices, N*sizeof(int), hipMemcpyDeviceToHost), "hipMemcpy outputvector" );
// // for(int k=0;k<N;k++)
// // if(hindices[k]<0||hindices[k]>=N)
// // mexPrintf("Error index[%d]=%d\n",k,hindices[k]);
// //
// mexPrintf(".");
// mexEvalString("drawnow");
// hipLaunchKernelGGL(( singleblockpartition), dim3(imin(splitcount, maxblockcount)), dim3(threadcount),INDEXCACHELENGTH*threadcount*(2*sizeof(int)+sizeof(SORT_REAL)), 0, positions, positions2, indices, newindices, splitpoints, llimits, rlimits, newllimits, newrlimits, NULL, NULL, splitcount, newpositions, newpositions2, ysplit, 0 ,leftlimitvaluesdebug,rightlimitvaluesdebug,distlimit,disttarget DEBUGVECTORSTRING3);
// cudasafe( hipMemcpy(positions, positionsbak, N*sizeof(double), hipMemcpyDeviceToDevice), "hipMemcpy outputvector" );
// cudasafe( hipMemcpy(positions2, positions2bak, N*sizeof(double), hipMemcpyDeviceToDevice), "hipMemcpy outputvector" );
// cudasafe( hipMemcpy(indices, indicesbak, N*sizeof(int), hipMemcpyDeviceToDevice), "hipMemcpy outputvector" );
// cudasafe( hipMemcpy(splitpoints, splitpointsbak, splitcount*sizeof(double), hipMemcpyDeviceToDevice), "hipMemcpy outputvector" );
// CHECKCUDAERROR
// mexPrintf(",");
// mexEvalString("drawnow");
hipLaunchKernelGGL(( singleblockpartition), dim3(imin(splitcount, maxblockcount)), dim3(threadcount),INDEXCACHELENGTH*threadcount*(2*sizeof(int)+sizeof(SORT_REAL)), 0, positions, positions2, indices, newindices, splitpoints, llimits, rlimits, newllimits, newrlimits, NULL, NULL, splitcount, newpositions, newpositions2, ysplit, 0 SORTLIMITCALLINGSTRING DEBUGVECTORSTRING3);
CHECKCUDAERROR
// mexPrintf(":");
// mexEvalString("drawnow");
// cudaFreeDebug(positionsbak);
// cudaFreeDebug(positions2bak);
// cudaFreeDebug(indicesbak);
// cudaFreeDebug(splitpointsbak);
#ifdef CUDATIMESORT
float elapsedtime;
cudasafe(hipEventRecord(stop), "hipEventRecord stop 722");
cudasafe(hipEventSynchronize(stop), "hipEventSynchronize stop");
cudasafe(hipEventElapsedTime(&elapsedtime, start, stop), "hipEventElapsedTime");
mexPrintf("singleblockpartition (%d blocks): %f\n", imin(splitcount, maxblockcount), elapsedtime/1000);
cudasafe(hipEventDestroy(stop), "hipEventDestroy");
cudasafe(hipEventDestroy(start), "hipEventDestroy");
#endif
return;
}
//now start the real multiblock partition
blockcount=imax(blockcount,splitcount); //at least as many blocks as splits, but in this case, use singleblockpartition instead
cudasafe(hipMemset(lrcount,0, 2*splitcount*sizeof(int)), "hipMemset lrcount");
cudasafe(hipMemset(threesplitvector,0, splitcount*sizeof(int)), "hipMemset threesplitvector");
//initial split using the given value in splitpoints (center of box)
hipLaunchKernelGGL(( partitionsplit), dim3(blockcount),dim3(threadsperblock), 0, 0, positions,positions2,indices,newindices,splitpoints,llimits,rlimits,lrcount,splitcount,newpositions,newpositions2,ysplit,threesplitvector DEBUGVECTORSTRING3);
CHECKCUDAERROR
checkcudaerror("partitionsplit\n");
SWAP(indices,newindices,itmp);
SWAP(positions,newpositions,ctmp);
SWAP(positions2,newpositions2,ctmp);
cudasafe(hipMemset(outputvector,0, 2*sizeof(int)), "hipMemset outputvector");
//complete the split and set up the next one
#ifdef MEDIAN_OF_32
hipLaunchKernelGGL(( preparesplit32), dim3(splitcount),dim3(PREPARESPLIT32THREADCOUNT), 0, 0, positions, positions2,indices,splitpoints,llimits,rlimits,tmpllimits,tmprlimits,llimits,rlimits,lrcount,splitcount,ysplit,threesplitvector,outputvector SORTLIMITCALLINGSTRING DEBUGVECTORSTRING3);
CHECKCUDAERROR
#else
hipLaunchKernelGGL(( preparesplit), dim3((splitcount+PREPARESPLITTHREADCOUNT-1)/PREPARESPLITTHREADCOUNT),dim3(PREPARESPLITTHREADCOUNT), 0, 0, positions, positions2,indices,splitpoints,llimits,rlimits,tmpllimits,tmprlimits,llimits,rlimits,lrcount,splitcount,ysplit,threesplitvector,outputvector SORTLIMITCALLINGSTRING DEBUGVECTORSTRING3);
CHECKCUDAERROR
#endif
cudasafe( hipMemcpy(outputvectorlocal, outputvector, 2*sizeof(int), hipMemcpyDeviceToHost), "hipMemcpy outputvector" );
i=0;
#ifdef CHECKPARTITIONING
CHECKPARTITIONINGSTRING1
CHECKPARTITIONINGSTRINGSORT1
#endif
//outputvectorlocal contains maximum number of elements in the coming split.
//If this is small enough, move to singleblockpartition.
//If outputvector[1]==0, one split did not move any elements at all, move to singleblockpartition
//in this case as well, since singleblockpartition handles this more properly
while(outputvectorlocal[0]>SPLITSHIFT&&outputvectorlocal[1]==0||i%2==1) { //always make an even number of splits from this point. This is to keep the results in the correct vector when moving to singleblockpartition
cudasafe(hipMemset(lrcount,0, 2*splitcount*sizeof(int)), "hipMemset lrcount");
#ifdef CHECKPARTITIONING
CHECKPARTITIONINGSTRING2
#endif
//partitioning
hipLaunchKernelGGL(( partitionsplit), dim3(blockcount),dim3(threadsperblock), 0, 0, positions,positions2,indices,newindices,splitpoints,tmpllimits,tmprlimits,lrcount,splitcount,newpositions,newpositions2,ysplit,threesplitvector DEBUGVECTORSTRING3);
CHECKCUDAERROR
#ifdef CHECKPARTITIONING
CHECKPARTITIONINGSTRING3
#endif
SWAP(indices,newindices,itmp);
SWAP(positions,newpositions,ctmp);
SWAP(positions2,newpositions2,ctmp);
cudasafe(hipMemset(outputvector,0, 2*sizeof(int)), "hipMemset outputvector");
//set up next one
#ifdef MEDIAN_OF_32
hipLaunchKernelGGL(( preparesplit32), dim3(splitcount),dim3(PREPARESPLIT32THREADCOUNT), 0, 0, positions, positions2,indices,splitpoints,tmpllimits,tmprlimits,newllimits,newrlimits,llimits,rlimits,lrcount,splitcount,ysplit,threesplitvector,outputvector SORTLIMITCALLINGSTRING DEBUGVECTORSTRING3);
CHECKCUDAERROR
#else
hipLaunchKernelGGL(( preparesplit), dim3((splitcount+PREPARESPLITTHREADCOUNT-1)/PREPARESPLITTHREADCOUNT),dim3(PREPARESPLITTHREADCOUNT), 0, 0, positions, positions2,indices,splitpoints,tmpllimits,tmprlimits,newllimits,newrlimits,llimits,rlimits,lrcount,splitcount,ysplit,threesplitvector,outputvector SORTLIMITCALLINGSTRING DEBUGVECTORSTRING3);
CHECKCUDAERROR
#endif
SWAP(newllimits,tmpllimits,itmp);
SWAP(newrlimits,tmprlimits,itmp);
#ifdef CHECKPARTITIONING
CHECKPARTITIONINGSTRING4
CHECKPARTITIONINGSTRINGSORT1
#endif
if(i%2==0) {
#ifdef CHECKPARTITIONING
CHECKPARTITIONINGSTRING5
#endif
//in this direction, copy the elements outside the split region to the output vectors
hipLaunchKernelGGL(( splitoutsidecopy), dim3(blockcount),dim3(threadsperblock), 0, 0, newllimits,newrlimits,tmpllimits,tmprlimits,positions,positions2,indices,newpositions,newpositions2,newindices,splitcount DEBUGVECTORSTRING3);
CHECKCUDAERROR
#ifdef CHECKPARTITIONING
CHECKPARTITIONINGSTRING6
#endif
}
cudasafe( hipMemcpy(outputvectorlocal, outputvector, 2*sizeof(int), hipMemcpyDeviceToHost), "hipMemcpy outputvector" );
#ifdef CHECKPARTITIONING
CHECKPARTITIONINGSTRING7
#endif
i++;
}
#ifdef CHECKPARTITIONING
CHECKPARTITIONINGSTRING8
#endif
#ifdef CUDATIMESORT
hipEvent_t start;
hipEvent_t stop;
cudasafe(hipEventCreate(&start),"hipEventCreate sort1start");
cudasafe(hipEventCreate(&stop),"hipEventCreate sort1stop");
cudasafe(hipEventRecord(start,0),"hipEventRecord sort1start");
#endif
//finish by using singleblockpartition
threadcount=threadsperblock;
while(threadcount>N/splitcount&&threadcount>32)
threadcount>>=1;
if(threadcount<32) threadcount=32; //should not happen
hipLaunchKernelGGL(( singleblockpartition), dim3(imin(splitcount,maxblockcount)),dim3(threadcount),INDEXCACHELENGTH*threadcount*(2*sizeof(int)+sizeof(SORT_REAL)), 0, positions,positions2,indices,newindices,splitpoints,tmpllimits,tmprlimits,newllimits,newrlimits,llimits,rlimits,splitcount,newpositions,newpositions2,ysplit,1 SORTLIMITCALLINGSTRING DEBUGVECTORSTRING3);
CHECKCUDAERROR
#ifdef CUDATIMESORT
float elapsedtime;
cudasafe(hipEventRecord(stop),"hipEventRecord fulltimestop");
cudasafe(hipEventSynchronize(stop),"hipEventSynchronize fulltimestop");
cudasafe(hipEventElapsedTime(&elapsedtime,start,stop),"hipEventElapsedTime");
mexPrintf("singleblockpartition (%d blocks): (%d previous loops) %f\n",imin(splitcount,maxblockcount),i,elapsedtime/1000);
cudasafe(hipEventDestroy(stop),"hipEventDestroy");
cudasafe(hipEventDestroy(start),"hipEventDestroy");
#endif
#ifdef CHECKPARTITIONING
CHECKPARTITIONINGSTRING9
#endif
}
/*------------------------------------------------------------------------*/
//debug function, checks if partitioning is valid or not
int checkpartitioning(const SORT_REAL* originalpositions,const SORT_REAL *originalpositions2,SORT_REAL *cudapositions,SORT_REAL *cudapositions2,SORT_REAL *splitpoints,int* cudaindices,int* oldcudaindices,int count,int* cudallimits,int* cudarlimits,int splitcount,int* cudaysplit,int printsuccess,int printfailure,SORT_DCMPLX* z,SORT_DCMPLX *d)
{
SORT_REAL *positions=(SORT_REAL*)mxMalloc(count*sizeof(SORT_REAL));
SORT_REAL *positions2=(SORT_REAL*)mxMalloc(count*sizeof(SORT_REAL));
int *indices=(int*)mxMalloc(count*sizeof(int));
int *oldindices=(int*)mxMalloc(count*sizeof(int));
int *llimits=(int*)mxMalloc(2*splitcount*sizeof(int));
int *rlimits=(int*)mxMalloc(2*splitcount*sizeof(int));
int *ysplit=(int*)mxMalloc(splitcount*sizeof(int));
int* buckets=(int*)mxCalloc(count, sizeof(int));
SORT_DCMPLX *hz=(SORT_DCMPLX*)mxMalloc(2*splitcount*sizeof(SORT_DCMPLX));
SORT_DCMPLX *hd=(SORT_DCMPLX*)mxMalloc(2*splitcount*sizeof(SORT_DCMPLX));
SORT_REAL* hsplitpoints=(SORT_REAL*)mxMalloc(splitcount*sizeof(SORT_REAL));
cudasafe( hipMemcpy(positions, cudapositions, count*sizeof(SORT_REAL), hipMemcpyDeviceToHost), "hipMemcpy cudapositions" );
cudasafe( hipMemcpy(positions2, cudapositions2, count*sizeof(SORT_REAL), hipMemcpyDeviceToHost), "hipMemcpy cudapositions2" );
cudasafe( hipMemcpy(indices, cudaindices, count*sizeof(int), hipMemcpyDeviceToHost), "hipMemcpy cudaindices" );
cudasafe( hipMemcpy(oldindices, oldcudaindices, count*sizeof(int), hipMemcpyDeviceToHost), "hipMemcpy oldcudaindices" );
cudasafe( hipMemcpy(llimits, cudallimits, 2*splitcount*sizeof(int), hipMemcpyDeviceToHost), "hipMemcpy cudallimits" );
cudasafe( hipMemcpy(rlimits, cudarlimits, 2*splitcount*sizeof(int), hipMemcpyDeviceToHost), "hipMemcpy cudarlimits" );
cudasafe( hipMemcpy(ysplit, cudaysplit, splitcount*sizeof(int), hipMemcpyDeviceToHost), "hipMemcpy cudaysplit" );
cudasafe( hipMemcpy(hz, z, 2*splitcount*sizeof(SORT_DCMPLX), hipMemcpyDeviceToHost), "hipMemcpy z" );
cudasafe( hipMemcpy(hd, d, 2*splitcount*sizeof(SORT_DCMPLX), hipMemcpyDeviceToHost), "hipMemcpy d" );
cudasafe( hipMemcpy(hsplitpoints, splitpoints, splitcount*sizeof(SORT_REAL), hipMemcpyDeviceToHost), "hipMemcpy splitpoints" );
// mexPrintf("running checkpartitioning\n");
int returnvalue=0;
int outside=0, failures=0, failurecount;
SORT_REAL max, min, max2, min2;
//start by checking that all elements exists in the vector
for(int j=0;j<count;j++) {
if(indices[j]>=count||indices[j]<0)
outside++;
else
buckets[indices[j]]++;
}
for(int j=0;j<count;j++) {
if(buckets[j]!=1)
failures++;
}
if(failures==0) {
if(printsuccess)
mexPrintf("All elements accounted for\n");
}
else {
returnvalue&=1;
if(printfailure)
mexPrintf("Invalid split, %d elements do not occur 1 time\n", failures);
failurecount=0;
//find where the elements were lost
for(int i=0;i<splitcount;i++) {
failures=0;
memset(buckets, 0, count*sizeof(int));
for(int j=llimits[2*i];j<rlimits[2*i+1];j++) {
buckets[indices[j]]++;
buckets[oldindices[j]]--;
}
for(int j=0;j<count;j++) {
if(buckets[j]!=0) { //if element is missing, determine which split it belongs to
failures++;
if(printfailure&&failures<10) {
if(buckets[j]<0) {
mexPrintf("E%d missing. ", j);
for(int k=llimits[2*i];k<rlimits[2*i+1];k++) {
if(oldindices[k]==j)
mexPrintf("pos %d. ", k);
}
}
if(buckets[j]>0) {
mexPrintf("E%d extra. ", j);
for(int k=llimits[2*i];k<rlimits[2*i+1];k++) {
if(oldindices[k]==j)
mexPrintf("from %d. ", k);
if(indices[k]==j)
mexPrintf("to %d. ", k);
}
}
}
}
}
if(failures!=0&&failurecount<20&&printfailure)
mexPrintf("split %d invalid, %d elements do not occur 1 time\n", i, failures);
}
if(printfailure)
mexPrintf("Invalid split, %d splits incorrect\n", failurecount);
}
if(outside!=0) {
returnvalue&=2;
if(printfailure)
mexPrintf("Invalid split, %d indices outside interval\n", outside);
mxFree(positions);
mxFree(positions2);
mxFree(indices);
mxFree(llimits);
mxFree(rlimits);
mxFree(ysplit);
mxFree(buckets);
mxFree(hz);
mxFree(hd);
return returnvalue; //abort in this case, considering that successive tests could cause segmentation fault
}
//check permutation of positions
failures=0;
for(int j=0;j<count;j++) {
if(originalpositions[indices[j]]!=positions[j]) {
if(failures<10&&printfailure) {
int k=0;
for(;j>llimits[k];k++);
k--;
mexPrintf("element %d (indices=%d) in split %d (%d to %d) (ysplit=%d) not premuted correctly for positions, permuted value: %e, original value: %e\n", j, indices[j], k, llimits[k], rlimits[k], ysplit[k>>1], positions[j], originalpositions[indices[j]]);
}
failures++;
}
}
if(failures==0) {
if(printsuccess)
mexPrintf("Positions permuted correctly\n");
}
else {
returnvalue&=4;
if(printfailure)
mexPrintf("Invalid permutation of positions, %d elements incorrect\n", failures);
}
//check permutation of positions2
failures=0;
for(int j=0;j<count;j++) {
if(originalpositions2[indices[j]]!=positions2[j]) {
if(failures<10&&printfailure) {
int k=0;
for(;j>llimits[k];k++);
k--;
mexPrintf("element %d (indices=%d) in split %d (%d to %d) (ysplit=%d) not premuted correctly for positions 2, permuted value: %e, original value: %e\n", j, indices[j], k, llimits[k], rlimits[k], ysplit[k>>1], positions2[j], originalpositions2[indices[j]]);
}
failures++;
}
}
if(failures==0) {
if(printsuccess)
mexPrintf("Positions2 permuted correctly\n");
}
else {
returnvalue&=8;
if(printfailure)
mexPrintf("Invalid permutation of positions2, %d elements incorrect\n", failures);
}
//check that all elements on left side are smaller than all on right side
failures=0;
int splitpointfailures=0;
for(int k=0;k<splitcount;k++) {
if(rlimits[2*k]>llimits[2*k]) {
if(ysplit[k]) {
max=positions2[llimits[2*k]];
for(int j=llimits[2*k];j<rlimits[2*k];j++) {
if(positions2[j]>max)
max=positions2[j];
}
}
else {
max=positions[llimits[2*k]];
for(int j=llimits[2*k];j<rlimits[2*k];j++) {
if(positions[j]>max)
max=positions[j];
}
}
}
if(rlimits[2*k+1]>llimits[2*k+1]) {
if(ysplit[k]) {
min=positions2[llimits[2*k+1]];
for(int j=llimits[2*k+1];j<rlimits[2*k+1];j++) {
if(positions2[j]<min)
min=positions2[j];
}
}
else {
min=positions[llimits[2*k+1]];
for(int j=llimits[2*k+1];j<rlimits[2*k+1];j++) {
if(positions[j]<min)
min=positions[j];
}
}
}
if(max>min&&rlimits[2*k]>llimits[2*k]&&rlimits[2*k+1]>llimits[2*k+1]) {
if(printfailure&&failures<10)
mexPrintf("split %d: min[%d,%d)=%.16e max[%d,%d)=%.16e\n", k, llimits[2*k+1], rlimits[2*k+1], min, llimits[2*k], rlimits[2*k], max);
failures++;
}
//check that the value of the splitpoints are in between the two boxes
if(max>hsplitpoints[k]&&rlimits[2*k]>llimits[2*k]||min<hsplitpoints[k]&&rlimits[2*k+1]>llimits[2*k+1]) {
if(printfailure&&splitpointfailures<10)
mexPrintf("split %d: min[%d,%d)=%.16e max[%d,%d)=%.16e splitpoint=%16e\n", k, llimits[2*k+1], rlimits[2*k+1], min, llimits[2*k], rlimits[2*k], max, hsplitpoints[k]);
splitpointfailures++;
}
}
//split properly. All elements on the correct side of the split
if(failures==0) {
if(printsuccess)
mexPrintf("Split values ok\n");
}
else {
returnvalue&=8;
if(printfailure)
mexPrintf("Invalid values in vector, split not correct, %d elements incorrect\n", failures);
}
if(splitpointfailures==0) {
if(printsuccess)
mexPrintf("Splitpoints ok\n");
}
else {
returnvalue&=16;
if(printfailure)
mexPrintf("Splitpoints incorrect, %d elements incorrect\n", splitpointfailures);
}
for(int k=0;k<2*splitcount;k++) {
if(rlimits[k]>llimits[k]) {
max=positions[llimits[k]];
max2=positions2[llimits[k]];
min=positions[llimits[k]];
min2=positions2[llimits[k]];
for(int j=llimits[k];j<rlimits[k];j++) {
if(positions[j]>max)
max=positions[j];
if(positions2[j]>max2)
max2=positions2[j];
if(positions[j]<min)
min=positions[j];
if(positions[j]<min2)
min2=positions2[j];
}
if(creal(hz[k])-creal(hd[k])*1.0000001>min) {
failures++;
if(printfailure)
mexPrintf("Box %d of %d size incorrect, real value %e smaller than box %e\n",k,2*splitcount,min, creal(hz[k])-creal(hd[k]));
}
if(creal(hz[k])+creal(hd[k])*1.0000001<max) {
failures++;
if(printfailure)
mexPrintf("Box %d of %d size incorrect, real value %e larger than box %e\n",k,2*splitcount,max, creal(hz[k])+creal(hd[k]));
}
if(cimag(hz[k])-cimag(hd[k])*1.0000001>min2) {
failures++;
if(printfailure)
mexPrintf("Box %d of %d size incorrect, imag value %e smaller than box %e\n",k,2*splitcount,min2, cimag(hz[k])-cimag(hd[k]));
}
if(cimag(hz[k])+cimag(hd[k])*1.0000001<max2) {
failures++;
if(printfailure)
mexPrintf("Box %d of %d size incorrect, imag value %e larger than box %e\n",k,2*splitcount,max2, cimag(hz[k])+cimag(hd[k]));
}
// mexPrintf("B %d limits: [%.16e %.16e %.16e %.16e ] z0=%.16e + %.16e d0=%.16e + %.16e\n",k,min,max,min2,max2,creal(hz[k]),cimag(hz[k]),creal(hd[k]),cimag(hd[k]));
}
}
mxFree(positions);
mxFree(positions2);
mxFree(indices);
mxFree(oldindices);
mxFree(llimits);
mxFree(rlimits);
mxFree(ysplit);
mxFree(buckets);
mxFree(hsplitpoints);
mxFree(hz);
mxFree(hd);
return returnvalue;
}
/*------------------------------------------------------------------------*/
#endif /* defined(CUDASUPPORT) && defined(CUDASORT) */
//last two functions used by fmmsort in assymetric case even without CUDASORT
//as connectivity always is built on the GPU
#ifdef CUDASUPPORT
//the use of void* pointers instead of dcmplx is because of visual studio compatibility, where the built in dcmplx class does not work in cuda, and cuda files are compiled with different implementation of dcmplx compared to cpu files
void cudaCreateConnectivity(int *jcptr,int *kcptr,int *ir,
int *oldjcptr,int *oldkcptr,int *oldir,
int count,int maxm2p,
void *z,SORT_REAL *dabs,
SORT_REAL cutoff,int lastlevel,
int *outputvector DEBUGVECTORSTRING)
//wrapper since sort.cpp is not a cuda-file
{
hipLaunchKernelGGL(( cudacreateconnectivity), dim3(imin(MAXCONNECTIVITYBLOCKS,(count+MAXCONNECTIVITYTHREADS-1)/MAXCONNECTIVITYTHREADS)),dim3(MAXCONNECTIVITYTHREADS), 0, 0, jcptr,kcptr,ir,oldjcptr,oldkcptr,oldir,count,maxm2p,(SORT_DCMPLX*)z,dabs,cutoff,lastlevel,outputvector DEBUGVECTORSTRING3);
CHECKCUDAERROR
}
/*------------------------------------------------------------------------*/
void calcdabs(const void *d,SORT_REAL *dabs,int count)
{
hipLaunchKernelGGL(( calculatedabs), dim3(imin(MAXCONNECTIVITYBLOCKS,(count+MAXCONNECTIVITYTHREADS-1)/MAXCONNECTIVITYTHREADS)),dim3(MAXCONNECTIVITYTHREADS), 0, 0, (SORT_DCMPLX*)d,dabs,count);
CHECKCUDAERROR
}
/*------------------------------------------------------------------------*/
void cumsumlist(int* oldjcptr,int* oldkcptr,int* jcptr,size_t count,cudavariables* GPUvars,int evalshift)
{
size_t shift=CUMSUMSHIFTSTEP;
size_t blockcount=(count+CUMSUMTHREADS-1)/CUMSUMTHREADS;
size_t blockcounts[64/CUMSUMSHIFTSTEP+1]; //size=the limit where a 64 bit int would overflow anyway
size_t shiftfactor=1<<CUMSUMSHIFTSTEP;
size_t fullshiftfactor=1<<shift;
size_t i=1;
blockcounts[0]=blockcount;
if(GPUvars->evalonly) {
hipLaunchKernelGGL(( cumsuminitevalonly), dim3(imin(blockcount,CUMSUMMAXBLOCKS)),dim3(CUMSUMTHREADS), 0, 0, oldjcptr,oldkcptr,jcptr,count,blockcount,GPUvars->jxptr,evalshift);
CHECKCUDAERROR
}
else {
hipLaunchKernelGGL(( cumsuminit), dim3(imin(blockcount,CUMSUMMAXBLOCKS)),dim3(CUMSUMTHREADS), 0, 0, oldjcptr,oldkcptr,jcptr,count,blockcount,GPUvars->ixptr,GPUvars->jxptr,evalshift);
CHECKCUDAERROR
}
blockcount=(blockcount+shiftfactor-2)/shiftfactor;
while(blockcount>=1) {
blockcounts[i]=blockcount;
// mexPrintf("blockcount=%d shift=%d shiftfactor=%d fullshiftfactor=%d\n",blockcount,shift,1<<shiftfactor,fullshiftfactor);
hipLaunchKernelGGL(( cumsumpass1), dim3(imin(blockcount,CUMSUMMAXBLOCKS)),dim3(CUMSUMTHREADS), 0, 0, jcptr+fullshiftfactor-1,count-fullshiftfactor+1,blockcount,shift);
CHECKCUDAERROR
// cudasafe(hipDeviceSynchronize(),"hipDeviceSynchronize");
shift+=CUMSUMSHIFTSTEP;
fullshiftfactor=1<<shift;
blockcount=(blockcount+shiftfactor-2)/shiftfactor;
i++;
}
i--;
shift-=CUMSUMSHIFTSTEP*2;
while(i>0) {
fullshiftfactor=1<<shift;
hipLaunchKernelGGL(( cumsumpass2), dim3(imin(blockcounts[i-1],CUMSUMMAXBLOCKS)),dim3(CUMSUMTHREADS), 0, 0, jcptr+fullshiftfactor-1,count-fullshiftfactor+1,blockcounts[i-1],shift/*,debugvector*/);
CHECKCUDAERROR
// cudasafe(hipDeviceSynchronize(),"hipDeviceSynchronize");
shift-=CUMSUMSHIFTSTEP;
i--;
}
}
/*------------------------------------------------------------------------*/
#ifdef SORTLIMIT
void calculatesortlimits(float *distlimit,float *disttarget,float *sortlimit,float* input,float currentlevel,float maxlevel)
{
float tmpdistlimit,tmpdisttarget,tmpsortlimit;
float interppos=currentlevel/(float)maxlevel;
tmpsortlimit=input[0]*(1-interppos)+input[1]*interppos;
tmpdistlimit=input[2]*(1-interppos)+input[3]*interppos;
tmpdisttarget=input[4]*(1-interppos)+input[5]*interppos;
//tmpsortlimit needs to be atleast 0, larger than 1 gives no effect, but should work.
//put all variables in the region [0 1] as it is here they have effect.
if(tmpsortlimit<0)
tmpsortlimit=0;
if(tmpdisttarget<0)
tmpdisttarget=0;
if(tmpdisttarget>1)
tmpdisttarget=1;
if(tmpdistlimit<=0) { //split in position centre
tmpdistlimit=0;
tmpsortlimit=2; //disable this for speed issues
}
if(tmpdistlimit>=1)
tmpdistlimit=1;
if(tmpdisttarget>tmpdistlimit) //not reasonable, and not implemented in the code, as sorting could fail otherwise
tmpdisttarget=tmpdistlimit;
*distlimit=tmpdistlimit;
*disttarget=tmpdisttarget;
*sortlimit=tmpsortlimit;
// mexPrintf("interppos=%f distlimit=%f disttarget=%f sortlimit=%f\n",interppos,tmpdistlimit,tmpdisttarget,tmpsortlimit);
}
#endif
double calculateetalimits(double *eta,double currentlevel,double maxlevel)
{
double ret;
double interppos=currentlevel/maxlevel;
ret=eta[0]*(1-interppos)+eta[1]*interppos;
if(ret<0) //eta should be larger than 0.
ret=0;
return ret;
}
#endif
| f40f6a626b499401411b2b0fd6685fb2322273eb.cu | /* cudasort.cu */
/* S. Engblom and A. Goude 2011-10-21 */
#ifndef M_PI
#define M_PI 3.1415926535897932384626433
#endif
#ifndef SWAP
#define SWAP(x,y,tmp) (tmp)=(x);(x)=(y);(y)=(tmp);
#endif
#ifndef C_CODE /*if in C file mode, redefine all mex functions to c functions*/
#include "mex.h"
#include "matrix.h"
#endif
#include "cudaeval.h"
#include "cudasort.h"
__host__ __device__ int imin(int x, int y);
__host__ __device__ int imax(int x, int y);
// include all CUDA kernels
#include "cudasortkernels.h"
#if defined(CUDASUPPORT) && defined(CUDASORT)
// prototypes
int checkpartitioning(const SORT_REAL* originalpositions,
const SORT_REAL *originalpositions2,
SORT_REAL *cudapositions,
SORT_REAL *cudapositions2,
SORT_REAL *splitpoints,
int *cudaindices,int *oldcudaindices,
int count,int *cudallimits,int *cudarlimits,
int splitcount,
int *cudaysplit,
int printsuccess,int printfailure,
SORT_DCMPLX* z,SORT_DCMPLX *d);
void multiblockpartition(SORT_REAL *positions,SORT_REAL *positions2,
int *indices,int *newindices,
SORT_REAL *splitpoints,
int *llimits,int *rlimits,int *newllimits,
int *newrlimits,int *tmpllimits,int *tmprlimits,
int splitcount,int count,
SORT_REAL *newpositions,SORT_REAL *newpositions2,
int *ysplit,int *lrcount,
int *threesplitvector,int *outputvector,int N
SORTLIMITSTRING DEBUGVECTORSTRING);
#ifdef SORTLIMIT
void calculatesortlimits(float *distlimit,float *disttarget,float *sortlimit,float* input,float currentlevel,float maxlevel);
#endif
double calculateetalimits(double *eta,double currentlevel,double maxlevel);
/*------------------------------------------------------------------------*/
//this function copies data for CUDA_perform_partitioning
//put in its own function for timing issues only
void CUDA_copy_vectors(cudavariables *GPUvars,
int N,int NE,
const double* zr,const double *zi,
const double *er,const double *ei)
{
if(NE) {
cudasafeMalloc((void**)&GPUvars->er,NE*sizeof(double));
cudasafeMalloc((void**)&GPUvars->ei,NE*sizeof(double));
cudasafeMalloc((void**)&GPUvars->jx,NE*sizeof(int));
cudasafe( cudaMemcpy(GPUvars->er, er, NE*sizeof(double), cudaMemcpyHostToDevice), "cudaMemcpy ertmp" );
cudasafe( cudaMemcpy(GPUvars->ei, ei, NE*sizeof(double), cudaMemcpyHostToDevice), "cudaMemcpy eitmp" );
}
else {
GPUvars->er=NULL;
GPUvars->ei=NULL;
}
cudasafeMalloc((void**)&GPUvars->zr,N*sizeof(double));
cudasafeMalloc((void**)&GPUvars->zi,N*sizeof(double));
cudasafeMalloc((void**)&GPUvars->ix,N*sizeof(int));
cudasafe( cudaMemcpy(GPUvars->zr, zr, N*sizeof(double), cudaMemcpyHostToDevice), "cudaMemcpy zrtmp" );
cudasafe( cudaMemcpy(GPUvars->zi, zi, N*sizeof(double), cudaMemcpyHostToDevice), "cudaMemcpy zitmp" );
}
/*------------------------------------------------------------------------*/
// this is the main function that performes partitioning. Will
// allocate GPUvariables, that will be left on the GPU
// GPUvars-variables for GPU N,NE number of potential/evaluation
// points nlevels, the number of times to split the boxes into four
// smaller ones zr,zr,er,ei coordinates of potential and evaluation
// points eta, for special split with eta-criterion
void CUDA_perform_partitioning(cudavariables *GPUvars,
int N,int NE,int nlevels VALIDATEPARTITIONINGSTRING1)
{
SORT_REAL *splitpoints;
int* rlimits;
int* newllimits;
int* newrlimits;
int* tmpllimits;
int* tmprlimits;
int* newindices;
int* ysplit;
int* lrcount;
int* outputvector;
int* splitside;
SORT_REAL* xpositionstmp;
SORT_REAL* ypositionstmp;
SORT_REAL* zdmaxpositions;
SORT_REAL* zdminpositions;
SORT_REAL* zdmaxpositions2;
SORT_REAL* zdminpositions2;
#ifdef SORTLIMIT
SORT_REAL* leftlimitvalues;
SORT_REAL* rightlimitvalues;
float distlimit;
float disttarget;
float sortlimit;
#endif
double eta;
int* zdllimits;
int* zdrlimits;
int* threesplit;
SORT_DCMPLX *ztmp;
SORT_DCMPLX *dtmp;
SORT_DCMPLX *z,*d;
SORT_REAL *dabs;
int* rlimitsNE;
int* newllimitsNE;
int* newrlimitsNE;
int* lrcountNE;
int* newindicesNE;
SORT_REAL* xpositionstmpNE;
SORT_REAL* ypositionstmpNE;
int ptrinitvector[2];
ptrinitvector[0]=0;
int threadcount=ZDMAXTHREADS;
#ifdef FLOATSORT
float *fer=NULL,*fei=NULL,*fzr=NULL,*fzi=NULL;
#else
double *fer,*fei,*fzr,*fzi;
#endif
checkcudaerror("Partitioning start\n");
//init
#ifdef CUDADEBUGVECTOR
cudasafe(cudaMallocDebug((void**)&GPUvars->debugvector,imax((NE<N?N:NE),2000)*sizeof(double)),"cudaMalloc");
cudasafe(cudaMemset(GPUvars->debugvector,0,imax((NE<N?N:NE),2000)*sizeof(double)),"cudaMemset");
#endif
//allocate positions and move them to gpu (moved to function above)
#ifdef FLOATSORT
if(NE) {
cudasafeMalloc((void**)&fer,NE*sizeof(float));
cudasafeMalloc((void**)&fei,NE*sizeof(float));
int blockcountcf=imin((NE+4*CONVERTTOFLOATMAXTHREADS-1)/(4*CONVERTTOFLOATMAXTHREADS),CONVERTTOFLOATMAXBLOCKS);
checkcudaerror("before converttofloatNE\n");
converttofloat<<<blockcountcf,CONVERTTOFLOATMAXTHREADS>>>(fer,fei,GPUvars->er,GPUvars->ei,NE);
CHECKCUDAERROR
checkcudaerror("converttofloatNE\n");
}
cudasafeMalloc((void**)&fzr,N*sizeof(float));
cudasafeMalloc((void**)&fzi,N*sizeof(float));
int blockcountcf=imin((N+4*CONVERTTOFLOATMAXTHREADS-1)/(4*CONVERTTOFLOATMAXTHREADS),CONVERTTOFLOATMAXBLOCKS);
converttofloat<<<blockcountcf,CONVERTTOFLOATMAXTHREADS>>>(fzr,fzi,GPUvars->zr,GPUvars->zi,N);
CHECKCUDAERROR
checkcudaerror("converttofloat\n");
#else
fer=GPUvars->er;
fei=GPUvars->ei;
fzr=GPUvars->zr;
fzi=GPUvars->zi;
#endif
//calculate the number of boxes etc.
int Nf=1,Nt=1;
for(int i=1;i<=nlevels;i++) {
Nf<<=2;
Nt+=Nf;
}
if(NE)
cudasafeMalloc((void**)&GPUvars->jxptr, (Nf+1)*sizeof(int));
cudasafeMalloc((void**)&GPUvars->ixptr,(Nf+1)*sizeof(int));
cudasafeMalloc((void**)&GPUvars->z0,Nt*sizeof(SORT_DCMPLX));
cudasafeMalloc((void**)&GPUvars->d0,Nt*sizeof(SORT_DCMPLX));
cudasafeMalloc((void **)&GPUvars->dabs,Nt*sizeof(SORT_REAL));
z=(SORT_DCMPLX*)GPUvars->z0;
d=(SORT_DCMPLX*)GPUvars->d0;
dabs=GPUvars->dabs;
#ifdef CUDATIMESORT
cudaEvent_t start;
cudaEvent_t stop;
cudaEvent_t start2;
cudaEvent_t stop2;
cudasafe(cudaEventCreate(&start),"cudaEventCreate sort1start");
cudasafe(cudaEventCreate(&stop),"cudaEventCreate sort1stop");
cudasafe(cudaEventCreate(&start2),"cudaEventCreate sort1start");
cudasafe(cudaEventCreate(&stop2),"cudaEventCreate sort1stop");
cudasafe(cudaEventRecord(start,0),"cudaEventRecord sort1start");
float elapsedtime,elapsedtime2;
#endif
//allocate temporary variables. These should be cleaned up afterwards
cudasafe(cudaMallocDebug((void**)&xpositionstmp, N*sizeof(SORT_REAL)), "cudaMalloc xpositionstmp");
cudasafe(cudaMallocDebug((void**)&ypositionstmp, N*sizeof(SORT_REAL)), "cudaMalloc ypositionstmp");
cudasafe(cudaMallocDebug((void**)&newindices, N*sizeof(int)), "cudaMalloc newindices");
cudasafe(cudaMallocDebug((void**)&lrcount, Nf*sizeof(int)), "cudaMalloc lrcount");
cudasafe(cudaMallocDebug((void**)&rlimits, Nf*sizeof(int)), "cudaMalloc rlimits");
cudasafe(cudaMallocDebug((void**)&newllimits, (Nf/2+1)*sizeof(int)), "cudaMalloc newllimits");
cudasafe(cudaMallocDebug((void**)&newrlimits, Nf/2*sizeof(int)), "cudaMalloc newrlimits");
cudasafe(cudaMallocDebug((void**)&tmpllimits, Nf/2*sizeof(int)), "cudaMalloc tmpllimits");
cudasafe(cudaMallocDebug((void**)&tmprlimits, Nf/2*sizeof(int)), "cudaMalloc tmprlimits");
cudasafe(cudaMallocDebug((void**)&threesplit, Nf/2*sizeof(int)), "cudaMalloc threesplit");
#ifdef SORTLIMIT
cudasafe(cudaMallocDebug((void**)&leftlimitvalues, Nf/2*sizeof(double)), "cudaMalloc leftlimitvalues");
cudasafe(cudaMallocDebug((void**)&rightlimitvalues, Nf/2*sizeof(double)), "cudaMalloc rightlimitvalues");
#endif
cudasafe(cudaMallocDebug((void**)&outputvector, 2*sizeof(int)), "cudaMalloc outputvector");
cudasafe(cudaMallocDebug((void**)&ysplit, Nf/2*sizeof(int)), "cudaMalloc ysplit");
cudasafe(cudaMallocDebug((void**)&splitside, Nf/2*sizeof(int)), "cudaMalloc splitside");
cudasafe(cudaMallocDebug((void**)&splitpoints, Nf/2*sizeof(SORT_REAL)), "cudaMalloc cxpositions");
cudasafe(cudaMallocDebug((void**)&zdllimits, MAXBLOCKSZDMAXMULTI*sizeof(int)), "cudaMalloc zdllimits");
cudasafe(cudaMallocDebug((void**)&zdrlimits, MAXBLOCKSZDMAXMULTI*sizeof(int)), "cudaMalloc zdrlimits");
cudasafe(cudaMallocDebug((void**)&zdmaxpositions, MAXBLOCKSZDMAXMULTI*sizeof(SORT_REAL)), "cudaMalloc zdmaxpositions");
cudasafe(cudaMallocDebug((void**)&zdminpositions, MAXBLOCKSZDMAXMULTI*sizeof(SORT_REAL)), "cudaMalloc zdminpositions");
cudasafe(cudaMallocDebug((void**)&zdmaxpositions2, MAXBLOCKSZDMAXMULTI*sizeof(SORT_REAL)), "cudaMalloc zdmaxpositions2");
cudasafe(cudaMallocDebug((void**)&zdminpositions2, MAXBLOCKSZDMAXMULTI*sizeof(SORT_REAL)), "cudaMalloc zdminpositions2");
cudasafe(cudaMallocDebug((void**)&ztmp, Nf/2*sizeof(SORT_DCMPLX)), "cudaMalloc cxpositions");
cudasafe(cudaMallocDebug((void**)&dtmp, Nf/2*sizeof(SORT_DCMPLX)), "cudaMalloc cxpositions");
if(NE) {
cudasafe(cudaMallocDebug((void**)&xpositionstmpNE, NE*sizeof(SORT_REAL)), "cudaMalloc xpositionstmpNE");
cudasafe(cudaMallocDebug((void**)&ypositionstmpNE, NE*sizeof(SORT_REAL)), "cudaMalloc ypositionstmpNE");
cudasafe(cudaMallocDebug((void**)&newindicesNE, NE*sizeof(int)), "cudaMalloc newindicesNE");
cudasafe(cudaMallocDebug((void**)&newllimitsNE, (Nf/2+1)*sizeof(int)), "cudaMalloc newllimitsNE");
cudasafe(cudaMallocDebug((void**)&newrlimitsNE, Nf/2*sizeof(int)), "cudaMalloc newrlimitsNE");
cudasafe(cudaMallocDebug((void**)&rlimitsNE, Nf*sizeof(int)), "cudaMalloc rlimitsNE");
cudasafe(cudaMallocDebug((void**)&lrcountNE, Nf*sizeof(int)), "cudaMalloc lrcountNE");
ptrinitvector[1]=NE; //make sure the last element is there in case of only 1 level
cudasafe(cudaMemcpy(GPUvars->jxptr,ptrinitvector, 2*sizeof(int), cudaMemcpyHostToDevice), "cudaMemcpy GPUvars->jxptr");
cudasafe( cudaMemcpy(rlimitsNE, &NE, sizeof(int), cudaMemcpyHostToDevice), "cudaMemcpy rlimitsNE" );
}
checkcudaerror("allocation\n");
//initiation
ptrinitvector[1]=N; //this is necessary in case only one level is used. Otherwise, it has no point
cudasafe(cudaMemcpy(GPUvars->ixptr,ptrinitvector, 2*sizeof(int), cudaMemcpyHostToDevice), "cudaMemcpy GPUvars->ixptr");
cudasafe( cudaMemcpy(rlimits, &N, sizeof(int), cudaMemcpyHostToDevice), "cudaMemcpy rlimits" );
//useful number of blocks for calls. Could probably be optimized
int blockcount=imin((N+4*threadsperblock-1)/(4*threadsperblock),maxblockcount);
int blockcountNE=imin((NE+4*threadsperblock-1)/(4*threadsperblock),maxblockcount);
int zdblockcount=imin((imax(N,NE)+4*ZDMAXTHREADS-1)/(4*ZDMAXTHREADS),MAXBLOCKSZDMAXMULTI);
#ifdef CUDATIMESORT
cudasafe(cudaEventRecord(start2,0),"cudaEventRecord stop2");
#endif
//initiate all indices to [1,2,3,....]
checkcudaerror("before initiateindices\n");
initiateindices<<<blockcount,threadsperblock>>>(GPUvars->ix,N);
CHECKCUDAERROR
checkcudaerror("initiateindices\n");
if(NE) {
initiateindices<<<blockcountNE,threadsperblock>>>(GPUvars->jx,NE);
CHECKCUDAERROR
checkcudaerror("initiateindicesNE\n");
}
#ifdef CUDATIMESORT
cudasafe(cudaEventRecord(stop2),"cudaEventRecord fulltimestop");
cudasafe(cudaEventSynchronize(stop2),"cudaEventSynchronize fulltimestop");
cudasafe(cudaEventElapsedTime(&elapsedtime,start2,stop2),"cudaEventElapsedTime");
mexPrintf("Initiateindices, init time: %f\n",elapsedtime/1000);
cudasafe(cudaEventRecord(start2,0),"cudaEventRecord sort1start");
#endif
//determine base block size
cudasafe(cudaMemset(outputvector,0,sizeof(int)),"Memset outputvector");
findzdmulti<<<zdblockcount,ZDMAXTHREADS>>>(GPUvars->ixptr,rlimits,fzr,fzi,GPUvars->jxptr,rlimitsNE,fer,fei,zdllimits,zdrlimits,zdmaxpositions,zdmaxpositions2,zdminpositions,zdminpositions2,1,1,outputvector DEBUGVECTORSTRING2);
CHECKCUDAERROR
checkcudaerror("findzdmulti\n");
int hasnan;
cudasafe(cudaMemcpy(&hasnan,outputvector, sizeof(int), cudaMemcpyDeviceToHost), "cudaMemcpy coutputvector");
if(hasnan) {
cudaThreadExit();
resetalloccount();
mexErrMsgTxt("NaN detected in input vectors, aborting");
}
#ifdef CUDATIMESORT
cudasafe(cudaEventRecord(stop2),"cudaEventRecord fulltimestop");
cudasafe(cudaEventSynchronize(stop2),"cudaEventSynchronize fulltimestop");
cudasafe(cudaEventElapsedTime(&elapsedtime,start2,stop2),"cudaEventElapsedTime");
mexPrintf("findzdmulti, init time: %f\n",elapsedtime/1000);
cudasafe(cudaEventRecord(start2,0),"cudaEventRecord sort1start");
#endif
findzdmultistep2<<<1,ZDMAXTHREADS>>>(zdllimits,zdrlimits,zdmaxpositions,zdmaxpositions2,zdminpositions,zdminpositions2,z,d,dabs,1);
CHECKCUDAERROR
checkcudaerror("findzdmultistep2\n");
#ifdef CUDATIMESORT
cudasafe(cudaEventRecord(stop2),"cudaEventRecord fulltimestop");
cudasafe(cudaEventSynchronize(stop2),"cudaEventSynchronize fulltimestop");
cudasafe(cudaEventElapsedTime(&elapsedtime,start2,stop2),"cudaEventElapsedTime");
mexPrintf("findzdmultistep2, init time: %f\n",elapsedtime/1000);
#endif
#ifdef CUDATIMESORT
cudasafe(cudaEventRecord(stop),"cudaEventRecord fulltimestop");
cudasafe(cudaEventSynchronize(stop),"cudaEventSynchronize fulltimestop");
cudasafe(cudaEventElapsedTime(&elapsedtime,start,stop),"cudaEventElapsedTime");
mexPrintf("Partition, init time: %f\n",elapsedtime/1000);
cudasafe(cudaEventRecord(start,0),"cudaEventRecord sort1start");
#endif
//the main partitioning loop
for(int i=0,Nb=1;i<nlevels;i++) {
// mexPrintf("partitioning step %d of %d\n",i,nlevels);
#ifdef SORTLIMIT
calculatesortlimits(&distlimit,&disttarget,&sortlimit,GPUvars->sortlimits,i,nlevels-0.5);
#endif
eta=calculateetalimits(GPUvars->eta,i,nlevels-0.5);
while(threadcount>imax(N,NE)/Nb&&threadcount>32)
threadcount>>=1;
if(threadcount<32) threadcount=32; //should not happen
//setup
int singleblockwork=(Nb+SINGLETHREADTHREADCOUNT-1)/SINGLETHREADTHREADCOUNT;
checkcudaerror("before setuppartition\n");
setuppartition<<<imin(singleblockwork,SINGLETHREADMAXTHREADS),SINGLETHREADTHREADCOUNT>>>(z,d,splitpoints,ysplit,1,Nb SORTLIMITCALLINGSTRING2);
CHECKCUDAERROR
cudasafe(cudaMemset(lrcount,0, Nb*2*sizeof(int)), "cudaMemset lrcount");
cudasafe(cudaMemset(threesplit,0, Nb*sizeof(int)), "cudaMemset threesplit");
#ifdef CUDATIMESORT
cudasafe(cudaEventRecord(start2,0),"cudaEventRecord sort1start");
#endif
//make first partitioning
checkcudaerror("before multiblockpartition\n");
multiblockpartition(fzr,fzi,GPUvars->ix,newindices,splitpoints,GPUvars->ixptr,rlimits,newllimits,newrlimits,tmpllimits,tmprlimits,Nb,N,xpositionstmp,ypositionstmp,ysplit,lrcount,threesplit,outputvector,N SORTLIMITCALLINGSTRING DEBUGVECTORSTRING2/*,zr,zi*/);
if(eta<1) { //if eta<1, make one additional split
setupetasplit<<<imin(singleblockwork,SINGLETHREADMAXTHREADS),SINGLETHREADTHREADCOUNT>>>(z,splitpoints,ysplit,eta,Nb,splitside,newllimits,newrlimits,tmpllimits,tmprlimits);
CHECKCUDAERROR
cudasafe(cudaMemset(lrcount,0, Nb*2*sizeof(int)), "cudaMemset lrcount");
//make partitioning, and copy data back to original array
if(blockcount>Nb) { //single or multiblock mode?
partitionsplit<<<blockcount,threadsperblock>>>(xpositionstmp,ypositionstmp,newindices,GPUvars->ix,splitpoints,tmpllimits,tmprlimits,lrcount,Nb,fzr,fzi,ysplit,NULL DEBUGVECTORSTRING2);
CHECKCUDAERROR
splitetacopymultithread<<<blockcount,threadsperblock>>>(tmpllimits,tmprlimits,fzr,fzi,GPUvars->ix,xpositionstmp,ypositionstmp,newindices,Nb);
CHECKCUDAERROR
}
else {
partitionsplitsinglethread<<<blockcount,threadsperblock>>>(xpositionstmp,ypositionstmp,newindices,GPUvars->ix,splitpoints,tmpllimits,tmprlimits,lrcount,Nb,fzr,fzi,ysplit DEBUGVECTORSTRING2);
CHECKCUDAERROR
splitetacopysinglethread<<<blockcount,threadsperblock>>>(tmpllimits,tmprlimits,fzr,fzi,GPUvars->ix,xpositionstmp,ypositionstmp,newindices,Nb);
CHECKCUDAERROR
}
correctetalimits<<<imin(singleblockwork,SINGLETHREADMAXTHREADS),SINGLETHREADTHREADCOUNT>>>(newllimits,newrlimits,splitside,lrcount,Nb);
CHECKCUDAERROR
}
#ifdef CUDATIMESORT
cudasafe(cudaEventRecord(stop2),"cudaEventRecord stop2");
#endif
if(NE) { //if evaluation points, split them
if(blockcountNE>Nb) { //single or multi block mode
cudasafe(cudaMemset(lrcountNE, 0, Nb*2*sizeof(int)), "cudaMemset lrcount");
partitionsplit<<<blockcountNE, threadsperblock>>>(fer, fei, GPUvars->jx, newindicesNE, splitpoints, GPUvars->jxptr, rlimitsNE, lrcountNE, Nb, xpositionstmpNE, ypositionstmpNE, ysplit, NULL DEBUGVECTORSTRING2);
CHECKCUDAERROR
}
else {
partitionsplitsinglethread<<<blockcountNE, threadsperblock>>>(fer, fei, GPUvars->jx, newindicesNE, splitpoints, GPUvars->jxptr, rlimitsNE, lrcountNE, Nb, xpositionstmpNE, ypositionstmpNE, ysplit DEBUGVECTORSTRING2);
CHECKCUDAERROR
}
setNElimits<<<imin(singleblockwork, SINGLETHREADMAXTHREADS), SINGLETHREADTHREADCOUNT>>>(GPUvars->jxptr, rlimitsNE, newllimitsNE, newrlimitsNE, lrcountNE, Nb);
CHECKCUDAERROR
}
//in the middle step, only approximate z and d, since these values will no be used anymore (except for the eta split, and as starting values in the next partitioning)
approximatezd<<<imin(singleblockwork,SINGLETHREADMAXTHREADS),SINGLETHREADTHREADCOUNT>>>(z,d,splitpoints,ztmp,dtmp,ysplit,Nb);
CHECKCUDAERROR
#ifdef CUDATIMESORT
cudasafe(cudaEventRecord(stop),"cudaEventRecord stop 287");
cudasafe(cudaEventSynchronize(stop),"cudaEventSynchronize stop");
cudasafe(cudaEventSynchronize(stop2),"cudaEventSynchronize stop2");
cudasafe(cudaEventElapsedTime(&elapsedtime,start,stop),"cudaEventElapsedTime");
cudasafe(cudaEventElapsedTime(&elapsedtime2,start2,stop2),"cudaEventElapsedTime");
mexPrintf("Partition, loop %d: %f partitiontime: %f\n",i,elapsedtime/1000,elapsedtime2/1000);
#endif
#ifdef VALIDATEPARTITIONING
checkpartitioning(zr,zi,xpositionstmp,ypositionstmp,splitpoints,newindices,GPUvars->ix,N,newllimits,newrlimits,Nb,ysplit,0,1,ztmp,dtmp);
#endif
z+=Nb;
d+=Nb;
dabs+=Nb;
#ifdef CUDATIMESORT
cudasafe(cudaEventRecord(start,0),"cudaEventRecord sort1start");
#endif
Nb<<=1;
#ifdef SORTLIMITS
calculatesortlimits(&distlimit,&disttarget,&sortlimit,GPUvars->sortlimits,i+0.5,nlevels-0.5);
#endif
eta=calculateetalimits(GPUvars->eta,i+0.5,nlevels-0.5);
singleblockwork=(Nb+SINGLETHREADTHREADCOUNT-1)/SINGLETHREADTHREADCOUNT;
cudasafe(cudaMemset(lrcount,0, Nb*2*sizeof(int)), "cudaMemset lrcount");
cudasafe(cudaMemset(threesplit,0, Nb*sizeof(int)), "cudaMemset threesplit");
setuppartition<<<imin(singleblockwork,SINGLETHREADMAXTHREADS),SINGLETHREADTHREADCOUNT>>>(ztmp,dtmp,splitpoints,ysplit,0,Nb SORTLIMITCALLINGSTRING2);
CHECKCUDAERROR
#ifdef CUDATIMESORT
cudasafe(cudaEventRecord(start2,0),"cudaEventRecord sort1start");
#endif
//second partitioning on this level
checkcudaerror("before second multiblockpartition\n");
multiblockpartition(xpositionstmp,ypositionstmp,newindices,GPUvars->ix,splitpoints,newllimits,newrlimits,GPUvars->ixptr,rlimits,tmpllimits,tmprlimits,Nb,N,fzr,fzi,ysplit,lrcount,threesplit,outputvector,N SORTLIMITCALLINGSTRING DEBUGVECTORSTRING2/*,zr,zi*/);
#ifdef CUDATIMESORT
cudasafe(cudaEventRecord(stop2),"cudaEventRecord fulltimestop");
#endif
if(eta<1) { //eta split again
setupetasplit<<<imin(singleblockwork, SINGLETHREADMAXTHREADS), SINGLETHREADTHREADCOUNT>>>(ztmp, splitpoints, ysplit, eta, Nb, splitside, GPUvars->ixptr, rlimits, tmpllimits, tmprlimits);
CHECKCUDAERROR
cudasafe(cudaMemset(lrcount, 0, Nb*2*sizeof(int)), "cudaMemset lrcount");
if(blockcount>Nb) {
partitionsplit<<<blockcount, threadsperblock>>>(fzr, fzi, GPUvars->ix, newindices, splitpoints, tmpllimits, tmprlimits, lrcount, Nb, xpositionstmp, ypositionstmp, ysplit, NULL DEBUGVECTORSTRING2);
CHECKCUDAERROR
splitetacopymultithread<<<blockcount, threadsperblock>>>(tmpllimits, tmprlimits, xpositionstmp, ypositionstmp, newindices, fzr, fzi, GPUvars->ix, Nb);
CHECKCUDAERROR
}
else {
partitionsplitsinglethread<<<blockcount, threadsperblock>>>(fzr, fzi, GPUvars->ix, newindices, splitpoints, tmpllimits, tmprlimits, lrcount, Nb, xpositionstmp, ypositionstmp, ysplit DEBUGVECTORSTRING2);
CHECKCUDAERROR
splitetacopysinglethread<<<blockcount, threadsperblock>>>(tmpllimits, tmprlimits, xpositionstmp, ypositionstmp, newindices, fzr, fzi, GPUvars->ix, Nb);
CHECKCUDAERROR
}
correctetalimits<<<imin(singleblockwork, SINGLETHREADMAXTHREADS), SINGLETHREADTHREADCOUNT>>>(GPUvars->ixptr, rlimits, splitside, lrcount, Nb);
CHECKCUDAERROR
}
if(NE) { //evaluation point split
if(blockcountNE>Nb) {
cudasafe(cudaMemset(lrcountNE, 0, Nb*2*sizeof(int)), "cudaMemset lrcount");
partitionsplit<<<blockcountNE, threadsperblock>>>(xpositionstmpNE, ypositionstmpNE, newindicesNE, GPUvars->jx, splitpoints, newllimitsNE, newrlimitsNE, lrcountNE, Nb, fer, fei, ysplit, NULL DEBUGVECTORSTRING2);
CHECKCUDAERROR
}
else {
partitionsplitsinglethread<<<blockcountNE, threadsperblock>>>(xpositionstmpNE, ypositionstmpNE, newindicesNE, GPUvars->jx, splitpoints, newllimitsNE, newrlimitsNE, lrcountNE, Nb, fer, fei, ysplit DEBUGVECTORSTRING2);
CHECKCUDAERROR
}
setNElimits<<<imin(singleblockwork, SINGLETHREADMAXTHREADS), SINGLETHREADTHREADCOUNT>>>(newllimitsNE, newrlimitsNE, GPUvars->jxptr, rlimitsNE, lrcountNE, Nb);
CHECKCUDAERROR
}
#ifdef CUDATIMESORT
cudasafe(cudaEventRecord(stop),"cudaEventRecord stop 350");
cudasafe(cudaEventSynchronize(stop),"cudaEventSynchronize stop");
cudasafe(cudaEventSynchronize(stop2),"cudaEventSynchronize stop2");
cudasafe(cudaEventElapsedTime(&elapsedtime,start,stop),"cudaEventElapsedTime");
cudasafe(cudaEventElapsedTime(&elapsedtime2,start2,stop2),"cudaEventElapsedTime");
mexPrintf("Partition, loop %d: %f partitiontime: %f\n",i,elapsedtime/1000,elapsedtime2/1000);
cudasafe(cudaEventRecord(start,0),"cudaEventRecord sort1start");
#endif
Nb<<=1;
singleblockwork=(Nb+SINGLETHREADTHREADCOUNT-1)/SINGLETHREADTHREADCOUNT;
//now, this will be the actual fmm level. Here, calculate the real values of z and d to use by the theta-criterion etc.
if(Nb>zdblockcount) { //single or multi block mode
findzd<<<imin(MAXBLOCKSZDMAXMULTI, Nb), threadcount,threadcount*4*sizeof(SORT_REAL)>>>(GPUvars->ixptr, rlimits, fzr, fzi, GPUvars->jxptr, rlimitsNE, fer, fei, z, d, dabs, Nb);
CHECKCUDAERROR
}
else {
// mexPrintf("findzdmulti\n");
// SORT_REAL *hzdmaxpositions=(SORT_REAL*)mxMalloc(MAXBLOCKSZDMAXMULTI*sizeof(SORT_REAL));
// SORT_REAL *hzdminpositions=(SORT_REAL*)mxMalloc(MAXBLOCKSZDMAXMULTI*sizeof(SORT_REAL));
// SORT_REAL *hzdmaxpositions2=(SORT_REAL*)mxMalloc(MAXBLOCKSZDMAXMULTI*sizeof(SORT_REAL));
// SORT_REAL *hzdminpositions2=(SORT_REAL*)mxMalloc(MAXBLOCKSZDMAXMULTI*sizeof(SORT_REAL));
// int *hzdllimits=(int*)mxMalloc(Nb*sizeof(int));
// int *hzdrlimits=(int*)mxMalloc(Nb*sizeof(int));
// SORT_DCMPLX *hd0=(SORT_DCMPLX*)mxMalloc(Nb*sizeof(SORT_DCMPLX));
// SORT_DCMPLX *hz0=(SORT_DCMPLX*)mxMalloc(Nb*sizeof(SORT_DCMPLX));
// double* hdebugvector=(double*)mxMalloc(MAXBLOCKSZDMAXMULTI*sizeof(double));
// cudasafe(cudaMemset(GPUvars->debugvector,0,MAXBLOCKSZDMAXMULTI*sizeof(double)),"cudaMeMset");
// cudasafe(cudaMemset(zdmaxpositions,0,MAXBLOCKSZDMAXMULTI*sizeof(SORT_REAL)),"cudaMemset");
// cudasafe(cudaMemset(zdminpositions,0,MAXBLOCKSZDMAXMULTI*sizeof(SORT_REAL)),"cudaMemset");
// cudasafe(cudaMemset(zdmaxpositions2,0,MAXBLOCKSZDMAXMULTI*sizeof(SORT_REAL)),"cudaMemset");
// cudasafe(cudaMemset(zdminpositions2,0,MAXBLOCKSZDMAXMULTI*sizeof(SORT_REAL)),"cudaMemset");
findzdmulti<<<zdblockcount,ZDMAXTHREADS>>>(GPUvars->ixptr,rlimits,fzr,fzi,GPUvars->jxptr,rlimitsNE,fer,fei,zdllimits,zdrlimits,zdmaxpositions,zdmaxpositions2,zdminpositions,zdminpositions2,Nb,0,NULL DEBUGVECTORSTRING2);
CHECKCUDAERROR
// cudasafe(cudaMemcpy(hzdmaxpositions,zdmaxpositions,MAXBLOCKSZDMAXMULTI*sizeof(SORT_REAL),cudaMemcpyDeviceToHost),"Memcpy");
// cudasafe(cudaMemcpy(hzdminpositions,zdminpositions,MAXBLOCKSZDMAXMULTI*sizeof(SORT_REAL),cudaMemcpyDeviceToHost),"Memcpy");
// cudasafe(cudaMemcpy(hzdmaxpositions2,zdmaxpositions2,MAXBLOCKSZDMAXMULTI*sizeof(SORT_REAL),cudaMemcpyDeviceToHost),"Memcpy");
// cudasafe(cudaMemcpy(hzdminpositions2,zdminpositions2,MAXBLOCKSZDMAXMULTI*sizeof(SORT_REAL),cudaMemcpyDeviceToHost),"Memcpy");
// cudasafe(cudaMemcpy(hdebugvector,GPUvars->debugvector,MAXBLOCKSZDMAXMULTI*sizeof(double),cudaMemcpyDeviceToHost),"Memcpy");
// cudasafe(cudaMemcpy(hzdllimits,zdllimits,Nb*sizeof(int),cudaMemcpyDeviceToHost),"Memcpy");
// cudasafe(cudaMemcpy(hzdrlimits,zdrlimits,Nb*sizeof(int),cudaMemcpyDeviceToHost),"Memcpy");
findzdmultistep2<<<Nb,ZDMAXTHREADS>>>(zdllimits,zdrlimits,zdmaxpositions,zdmaxpositions2,zdminpositions,zdminpositions2,z,d,dabs,Nb);
CHECKCUDAERROR
// cudasafe(cudaMemcpy(hd0,d,Nb*sizeof(SORT_DCMPLX),cudaMemcpyDeviceToHost),"Memcpy");
// cudasafe(cudaMemcpy(hz0,z,Nb*sizeof(SORT_DCMPLX),cudaMemcpyDeviceToHost),"Memcpy");
// if(Nb==1024) {
// for(int k=0;k<zdblockcount;k++)
// mexPrintf("limits[%d]: %14e %14e %14e %14e, dv=%e\n",k,hzdminpositions[k],hzdmaxpositions[k],hzdminpositions2[k],hzdmaxpositions2[k],hdebugvector[k]);
// for(int k=0;k<Nb;k++)
// mexPrintf("inner size[%d]: [%d %d) z0=%e+%ei d0=%e+%ei\n",k,hzdllimits[k],hzdrlimits[k],creal(hz0[k]),cimag(hz0[k]),creal(hd0[k]),cimag(hd0[k]));
// }
// mexPrintf("zdblockcount=%d\n",zdblockcount);
// mxFree(hzdmaxpositions);
// mxFree(hzdminpositions);
// mxFree(hzdmaxpositions2);
// mxFree(hzdminpositions2);
// mxFree(hzdllimits);
// mxFree(hzdrlimits);
// mxFree(hd0);
// mxFree(hz0);
// mxFree(hdebugvector);
}
#ifdef CUDATIMESORT
cudasafe(cudaEventRecord(stop),"cudaEventRecord stop 370");
cudasafe(cudaEventSynchronize(stop),"cudaEventSynchronize stop");
cudasafe(cudaEventElapsedTime(&elapsedtime,start,stop),"cudaEventElapsedTime");
mexPrintf("find zd, loop %d: %f\n",i,elapsedtime/1000);
#endif
#ifdef VALIDATEPARTITIONING
checkpartitioning(zr,zi,fzr,fzi,splitpoints,GPUvars->ix,newindices,N,GPUvars->ixptr,rlimits,Nb/2,ysplit,0,1,z,d);
#endif
#ifdef CUDATIMESORT
cudasafe(cudaEventRecord(start,0),"cudaEventRecord sort1start");
#endif
}
//cleanup
#ifdef CUDATIMESORT
cudasafe(cudaEventDestroy(stop),"cudaEventDestroy");
cudasafe(cudaEventDestroy(start),"cudaEventDestroy");
cudasafe(cudaEventDestroy(stop2),"cudaEventDestroy");
cudasafe(cudaEventDestroy(start2),"cudaEventDestroy");
#endif
cudaFreeDebug(xpositionstmp);
cudaFreeDebug(ypositionstmp);
cudaFreeDebug(newindices);
cudaFreeDebug(rlimits);
cudaFreeDebug(newllimits);
cudaFreeDebug(newrlimits);
cudaFreeDebug(tmpllimits);
cudaFreeDebug(tmprlimits);
cudaFreeDebug(threesplit);
cudaFreeDebug(outputvector);
cudaFreeDebug(ysplit);
cudaFreeDebug(splitside);
cudaFreeDebug(lrcount);
cudaFreeDebug(splitpoints);
cudaFreeDebug(zdllimits);
cudaFreeDebug(zdrlimits);
cudaFreeDebug(zdmaxpositions);
cudaFreeDebug(zdminpositions);
cudaFreeDebug(zdmaxpositions2);
cudaFreeDebug(zdminpositions2);
cudaFreeDebug(ztmp);
cudaFreeDebug(dtmp);
#ifdef FLOATSORT
cudaFreeDebug(fzr);
cudaFreeDebug(fzi);
#endif
#ifdef SORTLIMIT
cudaFreeDebug(leftlimitvalues);
cudaFreeDebug(rightlimitvalues);
#endif
if(NE) {
cudaFreeDebug(xpositionstmpNE);
cudaFreeDebug(ypositionstmpNE);
cudaFreeDebug(newindicesNE);
cudaFreeDebug(newllimitsNE);
cudaFreeDebug(newrlimitsNE);
cudaFreeDebug(rlimitsNE);
cudaFreeDebug(lrcountNE);
#ifdef FLOATSORT
cudaFreeDebug(fer);
cudaFreeDebug(fei);
#endif
}
}
/*------------------------------------------------------------------------*/
#ifdef CHECKPARTITIONING
#include "cudasortdebugdefs.h"
#endif
/*------------------------------------------------------------------------*/
//multiblockpartition is the equivalent to singleblockpartition, but uses multiple blocks per partitioning. Good in the beginning with few boxes
//positions,positions2 is positions in x and y direction
//indices,newindices is input and output values for the permutation array
//splitpoints is where the split has been performed
//llimits,rlimits indicates where the array to be split are
//newllimits,newrlimits output for llimits/rlimits
//tmpllimits,tmprlimits temporary storage for limits during the algorithm
//splitcount number of partitions to be performed
//count number of elements
//newpositions,newpositions2 output values for positions,positions2
//ysplit split with respect to y or x coordinates
//lrcount for a split, number of elements on each sidet
//threesplitvector if threesplit mode should be used for the box
//outputvector vector on GPU to commuticate results from split
//debugvector debug purposes only
void multiblockpartition(SORT_REAL* positions,SORT_REAL *positions2,int* indices,int* newindices,SORT_REAL *splitpoints,int *llimits,int* rlimits,int *newllimits,int* newrlimits,int* tmpllimits,int* tmprlimits,int splitcount,int count,SORT_REAL* newpositions,SORT_REAL *newpositions2,int* ysplit,int* lrcount,int* threesplitvector,int* outputvector,int N SORTLIMITSTRING DEBUGVECTORSTRING)
{
int *itmp,i;
checkcudaerror("start multiblockpartition\n");
#ifdef CHECKPARTITIONING
static int printcount=0;
SORT_REAL outsplitpoints[4];
#endif
SORT_REAL *ctmp;
int outputvectorlocal[2];
int threadcount;
int blockcount=imin((count+4*threadsperblock-1)/(4*threadsperblock),maxblockcount); //determine a reasonable number of blocks to use
if(splitcount>blockcount) { //if more splits than blocks, use singleblockpartition instead
#ifdef CUDATIMESORT
cudaEvent_t start;
cudaEvent_t stop;
cudasafe(cudaEventCreate(&start), "cudaEventCreate start");
cudasafe(cudaEventCreate(&stop), "cudaEventCreate stop");
cudasafe(cudaEventRecord(start, 0), "cudaEventRecord start");
#endif
threadcount=threadsperblock;
while(threadcount>N/splitcount&&threadcount>32)
threadcount>>=1;
if(threadcount<32) threadcount=32; //should not happen
// int* hllimits=(int*)mxMalloc((splitcount+1)*sizeof(int));
// int* hrlimits=(int*)mxMalloc(splitcount*sizeof(int));
// int* hllimitsnew=(int*)mxMalloc((splitcount+1)*sizeof(int));
// int* hrlimitsnew=(int*)mxMalloc(splitcount*sizeof(int));
// cudasafe( cudaMemcpy(hllimits, llimits, splitcount*sizeof(int), cudaMemcpyDeviceToHost), "cudaMemcpy outputvector" );
// cudasafe( cudaMemcpy(hrlimits, rlimits, splitcount*sizeof(int), cudaMemcpyDeviceToHost), "cudaMemcpy outputvector" );
// // for(int k=0;k<splitcount;k++)
// // mexPrintf("limits[%d]=[%d %d)\n",k,hllimits[k],hrlimits[k]);
// for(int k=0;k<splitcount;k++) {
// if(hrlimits[k]<hllimits[k]||hllimits[k]<0||hrlimits[k]>N)
// mexPrintf("ERROR: limits[%d]=[%d %d)\n",k,hllimits[k],hrlimits[k]);
// if(k<splitcount-1&&hrlimits[k]!=hllimits[k+1])
// mexPrintf("ERROR: not connected %d\n",k);
// }
// double *positionsbak,*positions2bak,*splitpointsbak;
// int* indicesbak;
// cudasafeMalloc((void **)&positionsbak,N*sizeof(double));
// cudasafeMalloc((void **)&positions2bak,N*sizeof(double));
// cudasafeMalloc((void **)&indicesbak,N*sizeof(int));
// cudasafeMalloc((void **)&splitpointsbak,splitcount*sizeof(int));
// cudasafe( cudaMemcpy(positionsbak, positions, N*sizeof(double), cudaMemcpyDeviceToDevice), "cudaMemcpy outputvector" );
// cudasafe( cudaMemcpy(positions2bak, positions2, N*sizeof(double), cudaMemcpyDeviceToDevice), "cudaMemcpy outputvector" );
// cudasafe( cudaMemcpy(indicesbak, indices, N*sizeof(int), cudaMemcpyDeviceToDevice), "cudaMemcpy outputvector" );
// cudasafe( cudaMemcpy(splitpointsbak, splitpoints, splitcount*sizeof(double), cudaMemcpyDeviceToDevice), "cudaMemcpy outputvector" );
// double *hdebugvector=(double*)mxMalloc(imax(2000,splitcount)*sizeof(double));
// mexPrintf("distlimit=%f disttarget=%f\n",distlimit,disttarget);
// for(int k=0;k<splitcount;k++) {
// for(int m=0;m<splitcount;m++) {
// if(m==k) {
// hllimitsnew[m]=hllimits[k];
// hrlimitsnew[m]=hrlimits[k];
// }
// else {
// hllimitsnew[m]=0;
// hrlimitsnew[m]=0;
// }
// }
// cudasafe( cudaMemset(debugvector,0,imax(2000,splitcount)*sizeof(double)),"dd");
// cudasafe( cudaMemcpy(llimits, hllimitsnew, splitcount*sizeof(int), cudaMemcpyHostToDevice), "cudaMemcpy outputvector" );
// cudasafe( cudaMemcpy(rlimits, hrlimitsnew, splitcount*sizeof(int), cudaMemcpyHostToDevice), "cudaMemcpy outputvector" );
// mexPrintf("testing box %d limits = [%d,%d) (%d elements) maxblockcount=%d splitcount=%d\n",k,hllimits[k],hrlimits[k],hrlimits[k]-hllimits[k],maxblockcount,splitcount);
// mexEvalString("drawnow");
// // singleblockpartition<<<imin(splitcount, maxblockcount), threadcount,INDEXCACHELENGTH*threadcount*(2*sizeof(int)+sizeof(SORT_REAL))>>>(positions, positions2, indices, newindices, splitpoints, llimits, rlimits, newllimits, newrlimits, NULL, NULL, splitcount, newpositions, newpositions2, ysplit, 0 SORTLIMITCALLINGSTRING DEBUGVECTORSTRING3);
// singleblockpartitiondebug<<<imin(splitcount, maxblockcount), threadcount,INDEXCACHELENGTH*threadcount*(2*sizeof(int)+sizeof(SORT_REAL))>>>(positions, positions2, indices, newindices, splitpoints, llimits, rlimits, newllimits, newrlimits, NULL, NULL, splitcount, newpositions, newpositions2, ysplit, 0 ,leftlimitvaluesdebug,rightlimitvaluesdebug,distlimit,disttarget DEBUGVECTORSTRING3);
// cudasafe( cudaMemcpy(positions, positionsbak, N*sizeof(double), cudaMemcpyDeviceToDevice), "cudaMemcpy outputvector" );
// cudasafe( cudaMemcpy(positions2, positions2bak, N*sizeof(double), cudaMemcpyDeviceToDevice), "cudaMemcpy outputvector" );
// cudasafe( cudaMemcpy(indices, indicesbak, N*sizeof(int), cudaMemcpyDeviceToDevice), "cudaMemcpy outputvector" );
// cudasafe( cudaMemcpy(splitpoints, splitpointsbak, splitcount*sizeof(double), cudaMemcpyDeviceToDevice), "cudaMemcpy outputvector" );
// CHECKCUDAERROR
// cudasafe( cudaMemcpy(hdebugvector, debugvector, imax(2000,splitcount)*sizeof(double), cudaMemcpyDeviceToHost), "cudaMemcpy outputvector" );
// for(int m=0;m<imax(2000,splitcount);m++)
// if(hdebugvector[m]!=0.0)
// mexPrintf("debugvector[%d]=%e\n",m,hdebugvector[m]);
// mexPrintf("test2\n");
// mexEvalString("drawnow");
//
// // if(k==0)
//
// singleblockpartition<<<imin(splitcount, maxblockcount), threadcount,INDEXCACHELENGTH*threadcount*(2*sizeof(int)+sizeof(SORT_REAL))>>>(positions, positions2, indices, newindices, splitpoints, llimits, rlimits, newllimits, newrlimits, NULL, NULL, splitcount, newpositions, newpositions2, ysplit, 0 ,leftlimitvalues,rightlimitvalues,distlimit,disttarget DEBUGVECTORSTRING3);
// cudasafe( cudaMemcpy(positions, positionsbak, N*sizeof(double), cudaMemcpyDeviceToDevice), "cudaMemcpy outputvector" );
// cudasafe( cudaMemcpy(positions2, positions2bak, N*sizeof(double), cudaMemcpyDeviceToDevice), "cudaMemcpy outputvector" );
// cudasafe( cudaMemcpy(indices, indicesbak, N*sizeof(int), cudaMemcpyDeviceToDevice), "cudaMemcpy outputvector" );
// cudasafe( cudaMemcpy(splitpoints, splitpointsbak, splitcount*sizeof(double), cudaMemcpyDeviceToDevice), "cudaMemcpy outputvector" );
// CHECKCUDAERROR
// mexPrintf("test3\n");
// mexEvalString("drawnow");
// singleblockpartition<<<imin(splitcount, maxblockcount), threadcount,INDEXCACHELENGTH*threadcount*(2*sizeof(int)+sizeof(SORT_REAL))>>>(positions, positions2, indices, newindices, splitpoints, llimits, rlimits, newllimits, newrlimits, NULL, NULL, splitcount, newpositions, newpositions2, ysplit, 0 ,leftlimitvaluesdebug,rightlimitvaluesdebug,distlimit,disttarget DEBUGVECTORSTRING3);
// cudasafe( cudaMemcpy(positions, positionsbak, N*sizeof(double), cudaMemcpyDeviceToDevice), "cudaMemcpy outputvector" );
// cudasafe( cudaMemcpy(positions2, positions2bak, N*sizeof(double), cudaMemcpyDeviceToDevice), "cudaMemcpy outputvector" );
// cudasafe( cudaMemcpy(indices, indicesbak, N*sizeof(int), cudaMemcpyDeviceToDevice), "cudaMemcpy outputvector" );
// cudasafe( cudaMemcpy(splitpoints, splitpointsbak, splitcount*sizeof(double), cudaMemcpyDeviceToDevice), "cudaMemcpy outputvector" );
// CHECKCUDAERROR
//
// }
// cudasafe( cudaMemcpy(llimits, hllimits, splitcount*sizeof(int), cudaMemcpyHostToDevice), "cudaMemcpy outputvector" );
// cudasafe( cudaMemcpy(rlimits, hrlimits, splitcount*sizeof(int), cudaMemcpyHostToDevice), "cudaMemcpy outputvector" );
//
// mxFree(hllimits);
// mxFree(hrlimits);
// mxFree(hllimitsnew);
// mxFree(hrlimitsnew);
// mxFree(hdebugvector);
// double* leftlimits=(double*)mxMalloc(splitcount*sizeof(double));
// double* rightlimits=(double*)mxMalloc(splitcount*sizeof(double));
// double* hsplitpoints=(double*)mxMalloc(splitcount*sizeof(double));
// cudasafe( cudaMemcpy(leftlimits, leftlimitvaluesdebug, splitcount*sizeof(double), cudaMemcpyDeviceToHost), "cudaMemcpy outputvector" );
// cudasafe( cudaMemcpy(rightlimits, rightlimitvaluesdebug, splitcount*sizeof(double), cudaMemcpyDeviceToHost), "cudaMemcpy outputvector" );
// cudasafe( cudaMemcpy(hsplitpoints, splitpoints, splitcount*sizeof(double), cudaMemcpyDeviceToHost), "cudaMemcpy outputvector" );
// // for(int k=0;k<splitcount;k++)
// // mexPrintf("sortlimits[%d]=[%e %e %e)\n",k,leftlimits[k],hsplitpoints[k],rightlimits[k]);
// for(int k=0;k<splitcount;k++) {
// if(hsplitpoints[k]<leftlimits[k]||hsplitpoints[k]>rightlimits[k])
// mexPrintf("ERROR sortlimits[%d]=[%e %e %e)\n",k,leftlimits[k],hsplitpoints[k],rightlimits[k]);
// }
// mxFree(leftlimits);
// mxFree(rightlimits);
// mxFree(hsplitpoints);
// // int* hindices=(int*)mxMalloc(N*sizeof(int));
// // cudasafe( cudaMemcpy(hindices, indices, N*sizeof(int), cudaMemcpyDeviceToHost), "cudaMemcpy outputvector" );
// // for(int k=0;k<N;k++)
// // if(hindices[k]<0||hindices[k]>=N)
// // mexPrintf("Error index[%d]=%d\n",k,hindices[k]);
// //
// mexPrintf(".");
// mexEvalString("drawnow");
// singleblockpartition<<<imin(splitcount, maxblockcount), threadcount,INDEXCACHELENGTH*threadcount*(2*sizeof(int)+sizeof(SORT_REAL))>>>(positions, positions2, indices, newindices, splitpoints, llimits, rlimits, newllimits, newrlimits, NULL, NULL, splitcount, newpositions, newpositions2, ysplit, 0 ,leftlimitvaluesdebug,rightlimitvaluesdebug,distlimit,disttarget DEBUGVECTORSTRING3);
// cudasafe( cudaMemcpy(positions, positionsbak, N*sizeof(double), cudaMemcpyDeviceToDevice), "cudaMemcpy outputvector" );
// cudasafe( cudaMemcpy(positions2, positions2bak, N*sizeof(double), cudaMemcpyDeviceToDevice), "cudaMemcpy outputvector" );
// cudasafe( cudaMemcpy(indices, indicesbak, N*sizeof(int), cudaMemcpyDeviceToDevice), "cudaMemcpy outputvector" );
// cudasafe( cudaMemcpy(splitpoints, splitpointsbak, splitcount*sizeof(double), cudaMemcpyDeviceToDevice), "cudaMemcpy outputvector" );
// CHECKCUDAERROR
// mexPrintf(",");
// mexEvalString("drawnow");
singleblockpartition<<<imin(splitcount, maxblockcount), threadcount,INDEXCACHELENGTH*threadcount*(2*sizeof(int)+sizeof(SORT_REAL))>>>(positions, positions2, indices, newindices, splitpoints, llimits, rlimits, newllimits, newrlimits, NULL, NULL, splitcount, newpositions, newpositions2, ysplit, 0 SORTLIMITCALLINGSTRING DEBUGVECTORSTRING3);
CHECKCUDAERROR
// mexPrintf(":");
// mexEvalString("drawnow");
// cudaFreeDebug(positionsbak);
// cudaFreeDebug(positions2bak);
// cudaFreeDebug(indicesbak);
// cudaFreeDebug(splitpointsbak);
#ifdef CUDATIMESORT
float elapsedtime;
cudasafe(cudaEventRecord(stop), "cudaEventRecord stop 722");
cudasafe(cudaEventSynchronize(stop), "cudaEventSynchronize stop");
cudasafe(cudaEventElapsedTime(&elapsedtime, start, stop), "cudaEventElapsedTime");
mexPrintf("singleblockpartition (%d blocks): %f\n", imin(splitcount, maxblockcount), elapsedtime/1000);
cudasafe(cudaEventDestroy(stop), "cudaEventDestroy");
cudasafe(cudaEventDestroy(start), "cudaEventDestroy");
#endif
return;
}
//now start the real multiblock partition
blockcount=imax(blockcount,splitcount); //at least as many blocks as splits, but in this case, use singleblockpartition instead
cudasafe(cudaMemset(lrcount,0, 2*splitcount*sizeof(int)), "cudaMemset lrcount");
cudasafe(cudaMemset(threesplitvector,0, splitcount*sizeof(int)), "cudaMemset threesplitvector");
//initial split using the given value in splitpoints (center of box)
partitionsplit<<<blockcount,threadsperblock>>>(positions,positions2,indices,newindices,splitpoints,llimits,rlimits,lrcount,splitcount,newpositions,newpositions2,ysplit,threesplitvector DEBUGVECTORSTRING3);
CHECKCUDAERROR
checkcudaerror("partitionsplit\n");
SWAP(indices,newindices,itmp);
SWAP(positions,newpositions,ctmp);
SWAP(positions2,newpositions2,ctmp);
cudasafe(cudaMemset(outputvector,0, 2*sizeof(int)), "cudaMemset outputvector");
//complete the split and set up the next one
#ifdef MEDIAN_OF_32
preparesplit32<<<splitcount,PREPARESPLIT32THREADCOUNT>>>(positions, positions2,indices,splitpoints,llimits,rlimits,tmpllimits,tmprlimits,llimits,rlimits,lrcount,splitcount,ysplit,threesplitvector,outputvector SORTLIMITCALLINGSTRING DEBUGVECTORSTRING3);
CHECKCUDAERROR
#else
preparesplit<<<(splitcount+PREPARESPLITTHREADCOUNT-1)/PREPARESPLITTHREADCOUNT,PREPARESPLITTHREADCOUNT>>>(positions, positions2,indices,splitpoints,llimits,rlimits,tmpllimits,tmprlimits,llimits,rlimits,lrcount,splitcount,ysplit,threesplitvector,outputvector SORTLIMITCALLINGSTRING DEBUGVECTORSTRING3);
CHECKCUDAERROR
#endif
cudasafe( cudaMemcpy(outputvectorlocal, outputvector, 2*sizeof(int), cudaMemcpyDeviceToHost), "cudaMemcpy outputvector" );
i=0;
#ifdef CHECKPARTITIONING
CHECKPARTITIONINGSTRING1
CHECKPARTITIONINGSTRINGSORT1
#endif
//outputvectorlocal contains maximum number of elements in the coming split.
//If this is small enough, move to singleblockpartition.
//If outputvector[1]==0, one split did not move any elements at all, move to singleblockpartition
//in this case as well, since singleblockpartition handles this more properly
while(outputvectorlocal[0]>SPLITSHIFT&&outputvectorlocal[1]==0||i%2==1) { //always make an even number of splits from this point. This is to keep the results in the correct vector when moving to singleblockpartition
cudasafe(cudaMemset(lrcount,0, 2*splitcount*sizeof(int)), "cudaMemset lrcount");
#ifdef CHECKPARTITIONING
CHECKPARTITIONINGSTRING2
#endif
//partitioning
partitionsplit<<<blockcount,threadsperblock>>>(positions,positions2,indices,newindices,splitpoints,tmpllimits,tmprlimits,lrcount,splitcount,newpositions,newpositions2,ysplit,threesplitvector DEBUGVECTORSTRING3);
CHECKCUDAERROR
#ifdef CHECKPARTITIONING
CHECKPARTITIONINGSTRING3
#endif
SWAP(indices,newindices,itmp);
SWAP(positions,newpositions,ctmp);
SWAP(positions2,newpositions2,ctmp);
cudasafe(cudaMemset(outputvector,0, 2*sizeof(int)), "cudaMemset outputvector");
//set up next one
#ifdef MEDIAN_OF_32
preparesplit32<<<splitcount,PREPARESPLIT32THREADCOUNT>>>(positions, positions2,indices,splitpoints,tmpllimits,tmprlimits,newllimits,newrlimits,llimits,rlimits,lrcount,splitcount,ysplit,threesplitvector,outputvector SORTLIMITCALLINGSTRING DEBUGVECTORSTRING3);
CHECKCUDAERROR
#else
preparesplit<<<(splitcount+PREPARESPLITTHREADCOUNT-1)/PREPARESPLITTHREADCOUNT,PREPARESPLITTHREADCOUNT>>>(positions, positions2,indices,splitpoints,tmpllimits,tmprlimits,newllimits,newrlimits,llimits,rlimits,lrcount,splitcount,ysplit,threesplitvector,outputvector SORTLIMITCALLINGSTRING DEBUGVECTORSTRING3);
CHECKCUDAERROR
#endif
SWAP(newllimits,tmpllimits,itmp);
SWAP(newrlimits,tmprlimits,itmp);
#ifdef CHECKPARTITIONING
CHECKPARTITIONINGSTRING4
CHECKPARTITIONINGSTRINGSORT1
#endif
if(i%2==0) {
#ifdef CHECKPARTITIONING
CHECKPARTITIONINGSTRING5
#endif
//in this direction, copy the elements outside the split region to the output vectors
splitoutsidecopy<<<blockcount,threadsperblock>>>(newllimits,newrlimits,tmpllimits,tmprlimits,positions,positions2,indices,newpositions,newpositions2,newindices,splitcount DEBUGVECTORSTRING3);
CHECKCUDAERROR
#ifdef CHECKPARTITIONING
CHECKPARTITIONINGSTRING6
#endif
}
cudasafe( cudaMemcpy(outputvectorlocal, outputvector, 2*sizeof(int), cudaMemcpyDeviceToHost), "cudaMemcpy outputvector" );
#ifdef CHECKPARTITIONING
CHECKPARTITIONINGSTRING7
#endif
i++;
}
#ifdef CHECKPARTITIONING
CHECKPARTITIONINGSTRING8
#endif
#ifdef CUDATIMESORT
cudaEvent_t start;
cudaEvent_t stop;
cudasafe(cudaEventCreate(&start),"cudaEventCreate sort1start");
cudasafe(cudaEventCreate(&stop),"cudaEventCreate sort1stop");
cudasafe(cudaEventRecord(start,0),"cudaEventRecord sort1start");
#endif
//finish by using singleblockpartition
threadcount=threadsperblock;
while(threadcount>N/splitcount&&threadcount>32)
threadcount>>=1;
if(threadcount<32) threadcount=32; //should not happen
singleblockpartition<<<imin(splitcount,maxblockcount),threadcount,INDEXCACHELENGTH*threadcount*(2*sizeof(int)+sizeof(SORT_REAL))>>>(positions,positions2,indices,newindices,splitpoints,tmpllimits,tmprlimits,newllimits,newrlimits,llimits,rlimits,splitcount,newpositions,newpositions2,ysplit,1 SORTLIMITCALLINGSTRING DEBUGVECTORSTRING3);
CHECKCUDAERROR
#ifdef CUDATIMESORT
float elapsedtime;
cudasafe(cudaEventRecord(stop),"cudaEventRecord fulltimestop");
cudasafe(cudaEventSynchronize(stop),"cudaEventSynchronize fulltimestop");
cudasafe(cudaEventElapsedTime(&elapsedtime,start,stop),"cudaEventElapsedTime");
mexPrintf("singleblockpartition (%d blocks): (%d previous loops) %f\n",imin(splitcount,maxblockcount),i,elapsedtime/1000);
cudasafe(cudaEventDestroy(stop),"cudaEventDestroy");
cudasafe(cudaEventDestroy(start),"cudaEventDestroy");
#endif
#ifdef CHECKPARTITIONING
CHECKPARTITIONINGSTRING9
#endif
}
/*------------------------------------------------------------------------*/
//debug function, checks if partitioning is valid or not
int checkpartitioning(const SORT_REAL* originalpositions,const SORT_REAL *originalpositions2,SORT_REAL *cudapositions,SORT_REAL *cudapositions2,SORT_REAL *splitpoints,int* cudaindices,int* oldcudaindices,int count,int* cudallimits,int* cudarlimits,int splitcount,int* cudaysplit,int printsuccess,int printfailure,SORT_DCMPLX* z,SORT_DCMPLX *d)
{
SORT_REAL *positions=(SORT_REAL*)mxMalloc(count*sizeof(SORT_REAL));
SORT_REAL *positions2=(SORT_REAL*)mxMalloc(count*sizeof(SORT_REAL));
int *indices=(int*)mxMalloc(count*sizeof(int));
int *oldindices=(int*)mxMalloc(count*sizeof(int));
int *llimits=(int*)mxMalloc(2*splitcount*sizeof(int));
int *rlimits=(int*)mxMalloc(2*splitcount*sizeof(int));
int *ysplit=(int*)mxMalloc(splitcount*sizeof(int));
int* buckets=(int*)mxCalloc(count, sizeof(int));
SORT_DCMPLX *hz=(SORT_DCMPLX*)mxMalloc(2*splitcount*sizeof(SORT_DCMPLX));
SORT_DCMPLX *hd=(SORT_DCMPLX*)mxMalloc(2*splitcount*sizeof(SORT_DCMPLX));
SORT_REAL* hsplitpoints=(SORT_REAL*)mxMalloc(splitcount*sizeof(SORT_REAL));
cudasafe( cudaMemcpy(positions, cudapositions, count*sizeof(SORT_REAL), cudaMemcpyDeviceToHost), "cudaMemcpy cudapositions" );
cudasafe( cudaMemcpy(positions2, cudapositions2, count*sizeof(SORT_REAL), cudaMemcpyDeviceToHost), "cudaMemcpy cudapositions2" );
cudasafe( cudaMemcpy(indices, cudaindices, count*sizeof(int), cudaMemcpyDeviceToHost), "cudaMemcpy cudaindices" );
cudasafe( cudaMemcpy(oldindices, oldcudaindices, count*sizeof(int), cudaMemcpyDeviceToHost), "cudaMemcpy oldcudaindices" );
cudasafe( cudaMemcpy(llimits, cudallimits, 2*splitcount*sizeof(int), cudaMemcpyDeviceToHost), "cudaMemcpy cudallimits" );
cudasafe( cudaMemcpy(rlimits, cudarlimits, 2*splitcount*sizeof(int), cudaMemcpyDeviceToHost), "cudaMemcpy cudarlimits" );
cudasafe( cudaMemcpy(ysplit, cudaysplit, splitcount*sizeof(int), cudaMemcpyDeviceToHost), "cudaMemcpy cudaysplit" );
cudasafe( cudaMemcpy(hz, z, 2*splitcount*sizeof(SORT_DCMPLX), cudaMemcpyDeviceToHost), "cudaMemcpy z" );
cudasafe( cudaMemcpy(hd, d, 2*splitcount*sizeof(SORT_DCMPLX), cudaMemcpyDeviceToHost), "cudaMemcpy d" );
cudasafe( cudaMemcpy(hsplitpoints, splitpoints, splitcount*sizeof(SORT_REAL), cudaMemcpyDeviceToHost), "cudaMemcpy splitpoints" );
// mexPrintf("running checkpartitioning\n");
int returnvalue=0;
int outside=0, failures=0, failurecount;
SORT_REAL max, min, max2, min2;
//start by checking that all elements exists in the vector
for(int j=0;j<count;j++) {
if(indices[j]>=count||indices[j]<0)
outside++;
else
buckets[indices[j]]++;
}
for(int j=0;j<count;j++) {
if(buckets[j]!=1)
failures++;
}
if(failures==0) {
if(printsuccess)
mexPrintf("All elements accounted for\n");
}
else {
returnvalue&=1;
if(printfailure)
mexPrintf("Invalid split, %d elements do not occur 1 time\n", failures);
failurecount=0;
//find where the elements were lost
for(int i=0;i<splitcount;i++) {
failures=0;
memset(buckets, 0, count*sizeof(int));
for(int j=llimits[2*i];j<rlimits[2*i+1];j++) {
buckets[indices[j]]++;
buckets[oldindices[j]]--;
}
for(int j=0;j<count;j++) {
if(buckets[j]!=0) { //if element is missing, determine which split it belongs to
failures++;
if(printfailure&&failures<10) {
if(buckets[j]<0) {
mexPrintf("E%d missing. ", j);
for(int k=llimits[2*i];k<rlimits[2*i+1];k++) {
if(oldindices[k]==j)
mexPrintf("pos %d. ", k);
}
}
if(buckets[j]>0) {
mexPrintf("E%d extra. ", j);
for(int k=llimits[2*i];k<rlimits[2*i+1];k++) {
if(oldindices[k]==j)
mexPrintf("from %d. ", k);
if(indices[k]==j)
mexPrintf("to %d. ", k);
}
}
}
}
}
if(failures!=0&&failurecount<20&&printfailure)
mexPrintf("split %d invalid, %d elements do not occur 1 time\n", i, failures);
}
if(printfailure)
mexPrintf("Invalid split, %d splits incorrect\n", failurecount);
}
if(outside!=0) {
returnvalue&=2;
if(printfailure)
mexPrintf("Invalid split, %d indices outside interval\n", outside);
mxFree(positions);
mxFree(positions2);
mxFree(indices);
mxFree(llimits);
mxFree(rlimits);
mxFree(ysplit);
mxFree(buckets);
mxFree(hz);
mxFree(hd);
return returnvalue; //abort in this case, considering that successive tests could cause segmentation fault
}
//check permutation of positions
failures=0;
for(int j=0;j<count;j++) {
if(originalpositions[indices[j]]!=positions[j]) {
if(failures<10&&printfailure) {
int k=0;
for(;j>llimits[k];k++);
k--;
mexPrintf("element %d (indices=%d) in split %d (%d to %d) (ysplit=%d) not premuted correctly for positions, permuted value: %e, original value: %e\n", j, indices[j], k, llimits[k], rlimits[k], ysplit[k>>1], positions[j], originalpositions[indices[j]]);
}
failures++;
}
}
if(failures==0) {
if(printsuccess)
mexPrintf("Positions permuted correctly\n");
}
else {
returnvalue&=4;
if(printfailure)
mexPrintf("Invalid permutation of positions, %d elements incorrect\n", failures);
}
//check permutation of positions2
failures=0;
for(int j=0;j<count;j++) {
if(originalpositions2[indices[j]]!=positions2[j]) {
if(failures<10&&printfailure) {
int k=0;
for(;j>llimits[k];k++);
k--;
mexPrintf("element %d (indices=%d) in split %d (%d to %d) (ysplit=%d) not premuted correctly for positions 2, permuted value: %e, original value: %e\n", j, indices[j], k, llimits[k], rlimits[k], ysplit[k>>1], positions2[j], originalpositions2[indices[j]]);
}
failures++;
}
}
if(failures==0) {
if(printsuccess)
mexPrintf("Positions2 permuted correctly\n");
}
else {
returnvalue&=8;
if(printfailure)
mexPrintf("Invalid permutation of positions2, %d elements incorrect\n", failures);
}
//check that all elements on left side are smaller than all on right side
failures=0;
int splitpointfailures=0;
for(int k=0;k<splitcount;k++) {
if(rlimits[2*k]>llimits[2*k]) {
if(ysplit[k]) {
max=positions2[llimits[2*k]];
for(int j=llimits[2*k];j<rlimits[2*k];j++) {
if(positions2[j]>max)
max=positions2[j];
}
}
else {
max=positions[llimits[2*k]];
for(int j=llimits[2*k];j<rlimits[2*k];j++) {
if(positions[j]>max)
max=positions[j];
}
}
}
if(rlimits[2*k+1]>llimits[2*k+1]) {
if(ysplit[k]) {
min=positions2[llimits[2*k+1]];
for(int j=llimits[2*k+1];j<rlimits[2*k+1];j++) {
if(positions2[j]<min)
min=positions2[j];
}
}
else {
min=positions[llimits[2*k+1]];
for(int j=llimits[2*k+1];j<rlimits[2*k+1];j++) {
if(positions[j]<min)
min=positions[j];
}
}
}
if(max>min&&rlimits[2*k]>llimits[2*k]&&rlimits[2*k+1]>llimits[2*k+1]) {
if(printfailure&&failures<10)
mexPrintf("split %d: min[%d,%d)=%.16e max[%d,%d)=%.16e\n", k, llimits[2*k+1], rlimits[2*k+1], min, llimits[2*k], rlimits[2*k], max);
failures++;
}
//check that the value of the splitpoints are in between the two boxes
if(max>hsplitpoints[k]&&rlimits[2*k]>llimits[2*k]||min<hsplitpoints[k]&&rlimits[2*k+1]>llimits[2*k+1]) {
if(printfailure&&splitpointfailures<10)
mexPrintf("split %d: min[%d,%d)=%.16e max[%d,%d)=%.16e splitpoint=%16e\n", k, llimits[2*k+1], rlimits[2*k+1], min, llimits[2*k], rlimits[2*k], max, hsplitpoints[k]);
splitpointfailures++;
}
}
//split properly. All elements on the correct side of the split
if(failures==0) {
if(printsuccess)
mexPrintf("Split values ok\n");
}
else {
returnvalue&=8;
if(printfailure)
mexPrintf("Invalid values in vector, split not correct, %d elements incorrect\n", failures);
}
if(splitpointfailures==0) {
if(printsuccess)
mexPrintf("Splitpoints ok\n");
}
else {
returnvalue&=16;
if(printfailure)
mexPrintf("Splitpoints incorrect, %d elements incorrect\n", splitpointfailures);
}
for(int k=0;k<2*splitcount;k++) {
if(rlimits[k]>llimits[k]) {
max=positions[llimits[k]];
max2=positions2[llimits[k]];
min=positions[llimits[k]];
min2=positions2[llimits[k]];
for(int j=llimits[k];j<rlimits[k];j++) {
if(positions[j]>max)
max=positions[j];
if(positions2[j]>max2)
max2=positions2[j];
if(positions[j]<min)
min=positions[j];
if(positions[j]<min2)
min2=positions2[j];
}
if(creal(hz[k])-creal(hd[k])*1.0000001>min) {
failures++;
if(printfailure)
mexPrintf("Box %d of %d size incorrect, real value %e smaller than box %e\n",k,2*splitcount,min, creal(hz[k])-creal(hd[k]));
}
if(creal(hz[k])+creal(hd[k])*1.0000001<max) {
failures++;
if(printfailure)
mexPrintf("Box %d of %d size incorrect, real value %e larger than box %e\n",k,2*splitcount,max, creal(hz[k])+creal(hd[k]));
}
if(cimag(hz[k])-cimag(hd[k])*1.0000001>min2) {
failures++;
if(printfailure)
mexPrintf("Box %d of %d size incorrect, imag value %e smaller than box %e\n",k,2*splitcount,min2, cimag(hz[k])-cimag(hd[k]));
}
if(cimag(hz[k])+cimag(hd[k])*1.0000001<max2) {
failures++;
if(printfailure)
mexPrintf("Box %d of %d size incorrect, imag value %e larger than box %e\n",k,2*splitcount,max2, cimag(hz[k])+cimag(hd[k]));
}
// mexPrintf("B %d limits: [%.16e %.16e %.16e %.16e ] z0=%.16e + %.16e d0=%.16e + %.16e\n",k,min,max,min2,max2,creal(hz[k]),cimag(hz[k]),creal(hd[k]),cimag(hd[k]));
}
}
mxFree(positions);
mxFree(positions2);
mxFree(indices);
mxFree(oldindices);
mxFree(llimits);
mxFree(rlimits);
mxFree(ysplit);
mxFree(buckets);
mxFree(hsplitpoints);
mxFree(hz);
mxFree(hd);
return returnvalue;
}
/*------------------------------------------------------------------------*/
#endif /* defined(CUDASUPPORT) && defined(CUDASORT) */
//last two functions used by fmmsort in assymetric case even without CUDASORT
//as connectivity always is built on the GPU
#ifdef CUDASUPPORT
//the use of void* pointers instead of dcmplx is because of visual studio compatibility, where the built in dcmplx class does not work in cuda, and cuda files are compiled with different implementation of dcmplx compared to cpu files
void cudaCreateConnectivity(int *jcptr,int *kcptr,int *ir,
int *oldjcptr,int *oldkcptr,int *oldir,
int count,int maxm2p,
void *z,SORT_REAL *dabs,
SORT_REAL cutoff,int lastlevel,
int *outputvector DEBUGVECTORSTRING)
//wrapper since sort.cpp is not a cuda-file
{
cudacreateconnectivity<<<imin(MAXCONNECTIVITYBLOCKS,(count+MAXCONNECTIVITYTHREADS-1)/MAXCONNECTIVITYTHREADS),MAXCONNECTIVITYTHREADS>>>(jcptr,kcptr,ir,oldjcptr,oldkcptr,oldir,count,maxm2p,(SORT_DCMPLX*)z,dabs,cutoff,lastlevel,outputvector DEBUGVECTORSTRING3);
CHECKCUDAERROR
}
/*------------------------------------------------------------------------*/
void calcdabs(const void *d,SORT_REAL *dabs,int count)
{
calculatedabs<<<imin(MAXCONNECTIVITYBLOCKS,(count+MAXCONNECTIVITYTHREADS-1)/MAXCONNECTIVITYTHREADS),MAXCONNECTIVITYTHREADS>>>((SORT_DCMPLX*)d,dabs,count);
CHECKCUDAERROR
}
/*------------------------------------------------------------------------*/
void cumsumlist(int* oldjcptr,int* oldkcptr,int* jcptr,size_t count,cudavariables* GPUvars,int evalshift)
{
size_t shift=CUMSUMSHIFTSTEP;
size_t blockcount=(count+CUMSUMTHREADS-1)/CUMSUMTHREADS;
size_t blockcounts[64/CUMSUMSHIFTSTEP+1]; //size=the limit where a 64 bit int would overflow anyway
size_t shiftfactor=1<<CUMSUMSHIFTSTEP;
size_t fullshiftfactor=1<<shift;
size_t i=1;
blockcounts[0]=blockcount;
if(GPUvars->evalonly) {
cumsuminitevalonly<<<imin(blockcount,CUMSUMMAXBLOCKS),CUMSUMTHREADS>>>(oldjcptr,oldkcptr,jcptr,count,blockcount,GPUvars->jxptr,evalshift);
CHECKCUDAERROR
}
else {
cumsuminit<<<imin(blockcount,CUMSUMMAXBLOCKS),CUMSUMTHREADS>>>(oldjcptr,oldkcptr,jcptr,count,blockcount,GPUvars->ixptr,GPUvars->jxptr,evalshift);
CHECKCUDAERROR
}
blockcount=(blockcount+shiftfactor-2)/shiftfactor;
while(blockcount>=1) {
blockcounts[i]=blockcount;
// mexPrintf("blockcount=%d shift=%d shiftfactor=%d fullshiftfactor=%d\n",blockcount,shift,1<<shiftfactor,fullshiftfactor);
cumsumpass1<<<imin(blockcount,CUMSUMMAXBLOCKS),CUMSUMTHREADS>>>(jcptr+fullshiftfactor-1,count-fullshiftfactor+1,blockcount,shift);
CHECKCUDAERROR
// cudasafe(cudaThreadSynchronize(),"cudaThreadSynchronize");
shift+=CUMSUMSHIFTSTEP;
fullshiftfactor=1<<shift;
blockcount=(blockcount+shiftfactor-2)/shiftfactor;
i++;
}
i--;
shift-=CUMSUMSHIFTSTEP*2;
while(i>0) {
fullshiftfactor=1<<shift;
cumsumpass2<<<imin(blockcounts[i-1],CUMSUMMAXBLOCKS),CUMSUMTHREADS>>>(jcptr+fullshiftfactor-1,count-fullshiftfactor+1,blockcounts[i-1],shift/*,debugvector*/);
CHECKCUDAERROR
// cudasafe(cudaThreadSynchronize(),"cudaThreadSynchronize");
shift-=CUMSUMSHIFTSTEP;
i--;
}
}
/*------------------------------------------------------------------------*/
#ifdef SORTLIMIT
void calculatesortlimits(float *distlimit,float *disttarget,float *sortlimit,float* input,float currentlevel,float maxlevel)
{
float tmpdistlimit,tmpdisttarget,tmpsortlimit;
float interppos=currentlevel/(float)maxlevel;
tmpsortlimit=input[0]*(1-interppos)+input[1]*interppos;
tmpdistlimit=input[2]*(1-interppos)+input[3]*interppos;
tmpdisttarget=input[4]*(1-interppos)+input[5]*interppos;
//tmpsortlimit needs to be atleast 0, larger than 1 gives no effect, but should work.
//put all variables in the region [0 1] as it is here they have effect.
if(tmpsortlimit<0)
tmpsortlimit=0;
if(tmpdisttarget<0)
tmpdisttarget=0;
if(tmpdisttarget>1)
tmpdisttarget=1;
if(tmpdistlimit<=0) { //split in position centre
tmpdistlimit=0;
tmpsortlimit=2; //disable this for speed issues
}
if(tmpdistlimit>=1)
tmpdistlimit=1;
if(tmpdisttarget>tmpdistlimit) //not reasonable, and not implemented in the code, as sorting could fail otherwise
tmpdisttarget=tmpdistlimit;
*distlimit=tmpdistlimit;
*disttarget=tmpdisttarget;
*sortlimit=tmpsortlimit;
// mexPrintf("interppos=%f distlimit=%f disttarget=%f sortlimit=%f\n",interppos,tmpdistlimit,tmpdisttarget,tmpsortlimit);
}
#endif
double calculateetalimits(double *eta,double currentlevel,double maxlevel)
{
double ret;
double interppos=currentlevel/maxlevel;
ret=eta[0]*(1-interppos)+eta[1]*interppos;
if(ret<0) //eta should be larger than 0.
ret=0;
return ret;
}
#endif
|
695c292b5626f40e33236d43ba5b5203ee5aa0cf.hip | // !!! This is a file automatically generated by hipify!!!
// *************************************************************************
//
// PARALUTION www.paralution.com
//
// Copyright (C) 2012-2014 Dimitar Lukarski
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
//
// *************************************************************************
// PARALUTION version 0.7.0b
#include "../backend_manager.hpp"
#include "backend_gpu.hpp"
#include "../../utils/log.hpp"
#include "gpu_utils.hpp"
#include "../base_vector.hpp"
#include "../base_matrix.hpp"
#include "gpu_vector.hpp"
#include "gpu_matrix_csr.hpp"
#include "gpu_matrix_coo.hpp"
#include "gpu_matrix_mcsr.hpp"
#include "gpu_matrix_bcsr.hpp"
#include "gpu_matrix_hyb.hpp"
#include "gpu_matrix_dia.hpp"
#include "gpu_matrix_ell.hpp"
#include "gpu_matrix_dense.hpp"
#include <assert.h>
#include <hip/hip_runtime.h>
#include <rocblas.h>
namespace paralution {
bool paralution_init_gpu(void) {
LOG_DEBUG(0, "paralution_init_gpu()",
"* begin");
assert(_Backend_Descriptor.GPU_cublas_handle == NULL);
assert(_Backend_Descriptor.GPU_cusparse_handle == NULL);
// assert(_Backend_Descriptor.GPU_dev == -1);
// create a handle
_Backend_Descriptor.GPU_cublas_handle = new hipblasHandle_t;
_Backend_Descriptor.GPU_cusparse_handle = new hipsparseHandle_t;
// get last cuda error (if any)
hipGetLastError();
hipError_t cuda_status_t;
int num_dev;
hipGetDeviceCount(&num_dev);
cuda_status_t = hipGetLastError();
// if querying for device count fails, fall back to host backend
if (cuda_status_t != hipSuccess) {
LOG_INFO("Querying for GPU devices failed - falling back to host backend");
return false;
}
LOG_INFO("Number of GPU devices in the system: " << num_dev);
if (num_dev < 1) {
LOG_INFO("No GPU device found");
} else {
if (_Backend_Descriptor.GPU_dev != -1)
num_dev = 1;
for (int idev=0; idev<num_dev; idev++) {
int dev = idev;
if (_Backend_Descriptor.GPU_dev != -1) {
dev = _Backend_Descriptor.GPU_dev;
}
hipSetDevice(dev);
cuda_status_t = hipGetLastError();
if (cuda_status_t == hipSuccess) {
if ((hipblasCreate(static_cast<hipblasHandle_t*>(_Backend_Descriptor.GPU_cublas_handle)) == HIPBLAS_STATUS_SUCCESS) &&
(hipsparseCreate(static_cast<hipsparseHandle_t*>(_Backend_Descriptor.GPU_cusparse_handle)) == HIPSPARSE_STATUS_SUCCESS)) {
_Backend_Descriptor.GPU_dev = dev;
break;
} else
LOG_INFO("GPU device " << dev << " cannot create CUBLAS/CUSPARSE context");
}
if (cuda_status_t == hipErrorDeviceAlreadyInUse)
LOG_INFO("GPU device " << dev << " is already in use");
if (cuda_status_t == hipErrorInvalidDevice)
LOG_INFO("GPU device " << dev << " is invalid NVIDIA GPU device");
}
}
if (_Backend_Descriptor.GPU_dev == -1) {
LOG_INFO("CUDA and CUBLAS/CUSPARSE have NOT been initialized!");
return false;
}
struct hipDeviceProp_t dev_prop;
hipGetDeviceProperties(&dev_prop, _Backend_Descriptor.GPU_dev);
if (dev_prop.major < 2) {
LOG_INFO("GPU device " << _Backend_Descriptor.GPU_dev << " has low compute capability (min 2.0 is needed)");
return false;
}
LOG_DEBUG(0, "paralution_init_gpu()",
"* end");
return true;
}
void paralution_stop_gpu(void) {
LOG_DEBUG(0, "paralution_stop_gpu()",
"* begin");
if (_Backend_Descriptor.accelerator) {
if (hipblasDestroy(*(static_cast<hipblasHandle_t*>(_Backend_Descriptor.GPU_cublas_handle))) != HIPBLAS_STATUS_SUCCESS) {
LOG_INFO("Error in hipblasDestroy");
}
if (hipsparseDestroy(*(static_cast<hipsparseHandle_t*>(_Backend_Descriptor.GPU_cusparse_handle))) != HIPSPARSE_STATUS_SUCCESS) {
LOG_INFO("Error in hipsparseDestroy");
}
}
delete (static_cast<hipblasHandle_t*>(_Backend_Descriptor.GPU_cublas_handle));
delete (static_cast<hipsparseHandle_t*>(_Backend_Descriptor.GPU_cusparse_handle));
_Backend_Descriptor.GPU_cublas_handle = NULL;
_Backend_Descriptor.GPU_cusparse_handle = NULL;
_Backend_Descriptor.GPU_dev = -1;
LOG_DEBUG(0, "paralution_stop_gpu()",
"* end");
}
void paralution_info_gpu(const struct Paralution_Backend_Descriptor backend_descriptor) {
int num_dev;
hipGetDeviceCount(&num_dev);
hipGetLastError();
// CHECK_CUDA_ERROR(__FILE__, __LINE__);
// LOG_INFO("Number of GPU devices in the sytem: " << num_dev);
if (_Backend_Descriptor.GPU_dev >= 0) {
LOG_INFO("Selected GPU device: " << backend_descriptor.GPU_dev);
} else {
LOG_INFO("No GPU device is selected!");
}
for (int dev = 0; dev < num_dev; dev++) {
struct hipDeviceProp_t dev_prop;
hipGetDeviceProperties(&dev_prop, dev);
LOG_INFO("------------------------------------------------");
LOG_INFO("Device number: " << dev);
LOG_INFO("Device name: " << dev_prop.name); // char name[256];
LOG_INFO("totalGlobalMem: " << (dev_prop.totalGlobalMem >> 20) <<" MByte");// size_t totalGlobalMem;
/*
LOG_INFO("sharedMemPerBlock: " << dev_prop.sharedMemPerBlock); // size_t sharedMemPerBlock;
LOG_INFO("regsPerBlock: " << dev_prop.regsPerBlock); // int regsPerBlock;
LOG_INFO("warpSize: " << dev_prop.warpSize); // int warpSize;
LOG_INFO("memPitch: " << dev_prop.memPitch); // size_t memPitch;
LOG_INFO("maxThreadsPerBlock: " << dev_prop.maxThreadsPerBlock); // int maxThreadsPerBlock;
LOG_INFO("maxThreadsDim[0]: " << dev_prop.maxThreadsDim[0]); // int maxThreadsDim[0];
LOG_INFO("maxThreadsDim[1]: " << dev_prop.maxThreadsDim[1]); // int maxThreadsDim[1];
LOG_INFO("maxThreadsDim[2]: " << dev_prop.maxThreadsDim[2]); // int maxThreadsDim[2];
LOG_INFO("maxGridSize[0]: " << dev_prop.maxGridSize[0]); // int maxGridSize[0];
LOG_INFO("maxGridSize[1]: " << dev_prop.maxGridSize[1]); // int maxGridSize[1];
LOG_INFO("maxGridSize[2]: " << dev_prop.maxGridSize[2]); // int maxGridSize[2];
*/
LOG_INFO("clockRate: " << dev_prop.clockRate); // int clockRate;
/*
LOG_INFO("totalConstMem: " << dev_prop.totalConstMem); // size_t totalConstMem;
*/
/*
LOG_INFO("major: " << dev_prop.major); // int major;
LOG_INFO("minor: " << dev_prop.minor); // int minor;
*/
LOG_INFO("compute capability: " << dev_prop.major << "." << dev_prop.minor);
/*
LOG_INFO("textureAlignment: " << dev_prop.textureAlignment); // size_t textureAlignment;
LOG_INFO("deviceOverlap: " << dev_prop.deviceOverlap); // int deviceOverlap;
LOG_INFO("multiProcessorCount: " << dev_prop.multiProcessorCount); // int multiProcessorCount;
LOG_INFO("kernelExecTimeoutEnabled: " << dev_prop.kernelExecTimeoutEnabled); // int kernelExecTimeoutEnabled;
LOG_INFO("integrated: " << dev_prop.integrated); // int integrated;
LOG_INFO("canMapHostMemory: " << dev_prop.canMapHostMemory); // int canMapHostMemory;
LOG_INFO("computeMode: " << dev_prop.computeMode); // int computeMode;
LOG_INFO("maxTexture1D: " << dev_prop.maxTexture1D); // int maxTexture1D;
LOG_INFO("maxTexture2D[0]: " << dev_prop.maxTexture2D[0]); // int maxTexture2D[0];
LOG_INFO("maxTexture2D[1]: " << dev_prop.maxTexture2D[1]); // int maxTexture2D[1];
LOG_INFO("maxTexture3D[0]: " << dev_prop.maxTexture3D[0]); // int maxTexture3D[0];
LOG_INFO("maxTexture3D[1]: " << dev_prop.maxTexture3D[1]); // int maxTexture3D[1];
LOG_INFO("maxTexture3D[2]: " << dev_prop.maxTexture3D[2]); // int maxTexture3D[2];
LOG_INFO("maxTexture1DLayered[0]: " << dev_prop.maxTexture1DLayered[0]); // int maxTexture1DLayered[0];
LOG_INFO("maxTexture1DLayered[1]: " << dev_prop.maxTexture1DLayered[1]); // int maxTexture1DLayered[1];
LOG_INFO("maxTexture2DLayered[0]: " << dev_prop.maxTexture2DLayered[0]); // int maxTexture2DLayered[0];
LOG_INFO("maxTexture2DLayered[1]: " << dev_prop.maxTexture2DLayered[1]); // int maxTexture2DLayered[1];
LOG_INFO("maxTexture2DLayered[2]: " << dev_prop.maxTexture2DLayered[2]); // int maxTexture2DLayered[2];
LOG_INFO("surfaceAlignment: " << dev_prop.surfaceAlignment); // size_t surfaceAlignment;
LOG_INFO("concurrentKernels: " << dev_prop.concurrentKernels); // int concurrentKernels;
*/
LOG_INFO("ECCEnabled: " << dev_prop.ECCEnabled); // int ECCEnabled;
/*
LOG_INFO("pciBusID: " << dev_prop.pciBusID); // int pciBusID;
LOG_INFO("pciDeviceID: " << dev_prop.pciDeviceID); // int pciDeviceID;
LOG_INFO("pciDomainID: " << dev_prop.pciDomainID); // int pciDomainID;
LOG_INFO("tccDriver: " << dev_prop.tccDriver); // int tccDriver;
LOG_INFO("asyncEngineCount: " << dev_prop.asyncEngineCount); // int asyncEngineCount;
LOG_INFO("unifiedAddressing: " << dev_prop.unifiedAddressing); // int unifiedAddressing;
LOG_INFO("memoryClockRate: " << dev_prop.memoryClockRate); // int memoryClockRate;
LOG_INFO("memoryBusWidth: " << dev_prop.memoryBusWidth); // int memoryBusWidth;
LOG_INFO("l2CacheSize: " << dev_prop.l2CacheSize); // int l2CacheSize;
LOG_INFO("maxThreadsPerMultiProcessor: " << dev_prop.maxThreadsPerMultiProcessor); // int maxThreadsPerMultiProcessor;
*/
LOG_INFO("------------------------------------------------");
}
}
template <typename ValueType>
AcceleratorMatrix<ValueType>* _paralution_init_base_gpu_matrix(const struct Paralution_Backend_Descriptor backend_descriptor,
const unsigned int matrix_format) {
assert(backend_descriptor.backend == GPU);
switch (matrix_format) {
case CSR:
return new GPUAcceleratorMatrixCSR<ValueType>(backend_descriptor);
case COO:
return new GPUAcceleratorMatrixCOO<ValueType>(backend_descriptor);
case MCSR:
return new GPUAcceleratorMatrixMCSR<ValueType>(backend_descriptor);
case DIA:
return new GPUAcceleratorMatrixDIA<ValueType>(backend_descriptor);
case ELL:
return new GPUAcceleratorMatrixELL<ValueType>(backend_descriptor);
case DENSE:
return new GPUAcceleratorMatrixDENSE<ValueType>(backend_descriptor);
case HYB:
return new GPUAcceleratorMatrixHYB<ValueType>(backend_descriptor);
case BCSR:
return new GPUAcceleratorMatrixBCSR<ValueType>(backend_descriptor);
default:
LOG_INFO("This backed is not supported for Matrix types");
FATAL_ERROR(__FILE__, __LINE__);
return NULL;
}
}
template <typename ValueType>
AcceleratorVector<ValueType>* _paralution_init_base_gpu_vector(const struct Paralution_Backend_Descriptor backend_descriptor) {
assert(backend_descriptor.backend == GPU);
return new GPUAcceleratorVector<ValueType>(backend_descriptor);
}
void paralution_gpu_sync(void) {
hipDeviceSynchronize();
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
template AcceleratorVector<float>* _paralution_init_base_gpu_vector(const struct Paralution_Backend_Descriptor backend_descriptor);
template AcceleratorVector<double>* _paralution_init_base_gpu_vector(const struct Paralution_Backend_Descriptor backend_descriptor);
template AcceleratorVector<int>* _paralution_init_base_gpu_vector(const struct Paralution_Backend_Descriptor backend_descriptor);
template AcceleratorMatrix<float>* _paralution_init_base_gpu_matrix(const struct Paralution_Backend_Descriptor backend_descriptor,
const unsigned int matrix_format);
template AcceleratorMatrix<double>* _paralution_init_base_gpu_matrix(const struct Paralution_Backend_Descriptor backend_descriptor,
const unsigned int matrix_format);
}
| 695c292b5626f40e33236d43ba5b5203ee5aa0cf.cu | // *************************************************************************
//
// PARALUTION www.paralution.com
//
// Copyright (C) 2012-2014 Dimitar Lukarski
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
//
// *************************************************************************
// PARALUTION version 0.7.0b
#include "../backend_manager.hpp"
#include "backend_gpu.hpp"
#include "../../utils/log.hpp"
#include "gpu_utils.hpp"
#include "../base_vector.hpp"
#include "../base_matrix.hpp"
#include "gpu_vector.hpp"
#include "gpu_matrix_csr.hpp"
#include "gpu_matrix_coo.hpp"
#include "gpu_matrix_mcsr.hpp"
#include "gpu_matrix_bcsr.hpp"
#include "gpu_matrix_hyb.hpp"
#include "gpu_matrix_dia.hpp"
#include "gpu_matrix_ell.hpp"
#include "gpu_matrix_dense.hpp"
#include <assert.h>
#include <cuda.h>
#include <cublas_v2.h>
namespace paralution {
bool paralution_init_gpu(void) {
LOG_DEBUG(0, "paralution_init_gpu()",
"* begin");
assert(_Backend_Descriptor.GPU_cublas_handle == NULL);
assert(_Backend_Descriptor.GPU_cusparse_handle == NULL);
// assert(_Backend_Descriptor.GPU_dev == -1);
// create a handle
_Backend_Descriptor.GPU_cublas_handle = new cublasHandle_t;
_Backend_Descriptor.GPU_cusparse_handle = new cusparseHandle_t;
// get last cuda error (if any)
cudaGetLastError();
cudaError_t cuda_status_t;
int num_dev;
cudaGetDeviceCount(&num_dev);
cuda_status_t = cudaGetLastError();
// if querying for device count fails, fall back to host backend
if (cuda_status_t != cudaSuccess) {
LOG_INFO("Querying for GPU devices failed - falling back to host backend");
return false;
}
LOG_INFO("Number of GPU devices in the system: " << num_dev);
if (num_dev < 1) {
LOG_INFO("No GPU device found");
} else {
if (_Backend_Descriptor.GPU_dev != -1)
num_dev = 1;
for (int idev=0; idev<num_dev; idev++) {
int dev = idev;
if (_Backend_Descriptor.GPU_dev != -1) {
dev = _Backend_Descriptor.GPU_dev;
}
cudaSetDevice(dev);
cuda_status_t = cudaGetLastError();
if (cuda_status_t == cudaSuccess) {
if ((cublasCreate(static_cast<cublasHandle_t*>(_Backend_Descriptor.GPU_cublas_handle)) == CUBLAS_STATUS_SUCCESS) &&
(cusparseCreate(static_cast<cusparseHandle_t*>(_Backend_Descriptor.GPU_cusparse_handle)) == CUSPARSE_STATUS_SUCCESS)) {
_Backend_Descriptor.GPU_dev = dev;
break;
} else
LOG_INFO("GPU device " << dev << " cannot create CUBLAS/CUSPARSE context");
}
if (cuda_status_t == cudaErrorDeviceAlreadyInUse)
LOG_INFO("GPU device " << dev << " is already in use");
if (cuda_status_t == cudaErrorInvalidDevice)
LOG_INFO("GPU device " << dev << " is invalid NVIDIA GPU device");
}
}
if (_Backend_Descriptor.GPU_dev == -1) {
LOG_INFO("CUDA and CUBLAS/CUSPARSE have NOT been initialized!");
return false;
}
struct cudaDeviceProp dev_prop;
cudaGetDeviceProperties(&dev_prop, _Backend_Descriptor.GPU_dev);
if (dev_prop.major < 2) {
LOG_INFO("GPU device " << _Backend_Descriptor.GPU_dev << " has low compute capability (min 2.0 is needed)");
return false;
}
LOG_DEBUG(0, "paralution_init_gpu()",
"* end");
return true;
}
void paralution_stop_gpu(void) {
LOG_DEBUG(0, "paralution_stop_gpu()",
"* begin");
if (_Backend_Descriptor.accelerator) {
if (cublasDestroy(*(static_cast<cublasHandle_t*>(_Backend_Descriptor.GPU_cublas_handle))) != CUBLAS_STATUS_SUCCESS) {
LOG_INFO("Error in cublasDestroy");
}
if (cusparseDestroy(*(static_cast<cusparseHandle_t*>(_Backend_Descriptor.GPU_cusparse_handle))) != CUSPARSE_STATUS_SUCCESS) {
LOG_INFO("Error in cusparseDestroy");
}
}
delete (static_cast<cublasHandle_t*>(_Backend_Descriptor.GPU_cublas_handle));
delete (static_cast<cusparseHandle_t*>(_Backend_Descriptor.GPU_cusparse_handle));
_Backend_Descriptor.GPU_cublas_handle = NULL;
_Backend_Descriptor.GPU_cusparse_handle = NULL;
_Backend_Descriptor.GPU_dev = -1;
LOG_DEBUG(0, "paralution_stop_gpu()",
"* end");
}
void paralution_info_gpu(const struct Paralution_Backend_Descriptor backend_descriptor) {
int num_dev;
cudaGetDeviceCount(&num_dev);
cudaGetLastError();
// CHECK_CUDA_ERROR(__FILE__, __LINE__);
// LOG_INFO("Number of GPU devices in the sytem: " << num_dev);
if (_Backend_Descriptor.GPU_dev >= 0) {
LOG_INFO("Selected GPU device: " << backend_descriptor.GPU_dev);
} else {
LOG_INFO("No GPU device is selected!");
}
for (int dev = 0; dev < num_dev; dev++) {
struct cudaDeviceProp dev_prop;
cudaGetDeviceProperties(&dev_prop, dev);
LOG_INFO("------------------------------------------------");
LOG_INFO("Device number: " << dev);
LOG_INFO("Device name: " << dev_prop.name); // char name[256];
LOG_INFO("totalGlobalMem: " << (dev_prop.totalGlobalMem >> 20) <<" MByte");// size_t totalGlobalMem;
/*
LOG_INFO("sharedMemPerBlock: " << dev_prop.sharedMemPerBlock); // size_t sharedMemPerBlock;
LOG_INFO("regsPerBlock: " << dev_prop.regsPerBlock); // int regsPerBlock;
LOG_INFO("warpSize: " << dev_prop.warpSize); // int warpSize;
LOG_INFO("memPitch: " << dev_prop.memPitch); // size_t memPitch;
LOG_INFO("maxThreadsPerBlock: " << dev_prop.maxThreadsPerBlock); // int maxThreadsPerBlock;
LOG_INFO("maxThreadsDim[0]: " << dev_prop.maxThreadsDim[0]); // int maxThreadsDim[0];
LOG_INFO("maxThreadsDim[1]: " << dev_prop.maxThreadsDim[1]); // int maxThreadsDim[1];
LOG_INFO("maxThreadsDim[2]: " << dev_prop.maxThreadsDim[2]); // int maxThreadsDim[2];
LOG_INFO("maxGridSize[0]: " << dev_prop.maxGridSize[0]); // int maxGridSize[0];
LOG_INFO("maxGridSize[1]: " << dev_prop.maxGridSize[1]); // int maxGridSize[1];
LOG_INFO("maxGridSize[2]: " << dev_prop.maxGridSize[2]); // int maxGridSize[2];
*/
LOG_INFO("clockRate: " << dev_prop.clockRate); // int clockRate;
/*
LOG_INFO("totalConstMem: " << dev_prop.totalConstMem); // size_t totalConstMem;
*/
/*
LOG_INFO("major: " << dev_prop.major); // int major;
LOG_INFO("minor: " << dev_prop.minor); // int minor;
*/
LOG_INFO("compute capability: " << dev_prop.major << "." << dev_prop.minor);
/*
LOG_INFO("textureAlignment: " << dev_prop.textureAlignment); // size_t textureAlignment;
LOG_INFO("deviceOverlap: " << dev_prop.deviceOverlap); // int deviceOverlap;
LOG_INFO("multiProcessorCount: " << dev_prop.multiProcessorCount); // int multiProcessorCount;
LOG_INFO("kernelExecTimeoutEnabled: " << dev_prop.kernelExecTimeoutEnabled); // int kernelExecTimeoutEnabled;
LOG_INFO("integrated: " << dev_prop.integrated); // int integrated;
LOG_INFO("canMapHostMemory: " << dev_prop.canMapHostMemory); // int canMapHostMemory;
LOG_INFO("computeMode: " << dev_prop.computeMode); // int computeMode;
LOG_INFO("maxTexture1D: " << dev_prop.maxTexture1D); // int maxTexture1D;
LOG_INFO("maxTexture2D[0]: " << dev_prop.maxTexture2D[0]); // int maxTexture2D[0];
LOG_INFO("maxTexture2D[1]: " << dev_prop.maxTexture2D[1]); // int maxTexture2D[1];
LOG_INFO("maxTexture3D[0]: " << dev_prop.maxTexture3D[0]); // int maxTexture3D[0];
LOG_INFO("maxTexture3D[1]: " << dev_prop.maxTexture3D[1]); // int maxTexture3D[1];
LOG_INFO("maxTexture3D[2]: " << dev_prop.maxTexture3D[2]); // int maxTexture3D[2];
LOG_INFO("maxTexture1DLayered[0]: " << dev_prop.maxTexture1DLayered[0]); // int maxTexture1DLayered[0];
LOG_INFO("maxTexture1DLayered[1]: " << dev_prop.maxTexture1DLayered[1]); // int maxTexture1DLayered[1];
LOG_INFO("maxTexture2DLayered[0]: " << dev_prop.maxTexture2DLayered[0]); // int maxTexture2DLayered[0];
LOG_INFO("maxTexture2DLayered[1]: " << dev_prop.maxTexture2DLayered[1]); // int maxTexture2DLayered[1];
LOG_INFO("maxTexture2DLayered[2]: " << dev_prop.maxTexture2DLayered[2]); // int maxTexture2DLayered[2];
LOG_INFO("surfaceAlignment: " << dev_prop.surfaceAlignment); // size_t surfaceAlignment;
LOG_INFO("concurrentKernels: " << dev_prop.concurrentKernels); // int concurrentKernels;
*/
LOG_INFO("ECCEnabled: " << dev_prop.ECCEnabled); // int ECCEnabled;
/*
LOG_INFO("pciBusID: " << dev_prop.pciBusID); // int pciBusID;
LOG_INFO("pciDeviceID: " << dev_prop.pciDeviceID); // int pciDeviceID;
LOG_INFO("pciDomainID: " << dev_prop.pciDomainID); // int pciDomainID;
LOG_INFO("tccDriver: " << dev_prop.tccDriver); // int tccDriver;
LOG_INFO("asyncEngineCount: " << dev_prop.asyncEngineCount); // int asyncEngineCount;
LOG_INFO("unifiedAddressing: " << dev_prop.unifiedAddressing); // int unifiedAddressing;
LOG_INFO("memoryClockRate: " << dev_prop.memoryClockRate); // int memoryClockRate;
LOG_INFO("memoryBusWidth: " << dev_prop.memoryBusWidth); // int memoryBusWidth;
LOG_INFO("l2CacheSize: " << dev_prop.l2CacheSize); // int l2CacheSize;
LOG_INFO("maxThreadsPerMultiProcessor: " << dev_prop.maxThreadsPerMultiProcessor); // int maxThreadsPerMultiProcessor;
*/
LOG_INFO("------------------------------------------------");
}
}
template <typename ValueType>
AcceleratorMatrix<ValueType>* _paralution_init_base_gpu_matrix(const struct Paralution_Backend_Descriptor backend_descriptor,
const unsigned int matrix_format) {
assert(backend_descriptor.backend == GPU);
switch (matrix_format) {
case CSR:
return new GPUAcceleratorMatrixCSR<ValueType>(backend_descriptor);
case COO:
return new GPUAcceleratorMatrixCOO<ValueType>(backend_descriptor);
case MCSR:
return new GPUAcceleratorMatrixMCSR<ValueType>(backend_descriptor);
case DIA:
return new GPUAcceleratorMatrixDIA<ValueType>(backend_descriptor);
case ELL:
return new GPUAcceleratorMatrixELL<ValueType>(backend_descriptor);
case DENSE:
return new GPUAcceleratorMatrixDENSE<ValueType>(backend_descriptor);
case HYB:
return new GPUAcceleratorMatrixHYB<ValueType>(backend_descriptor);
case BCSR:
return new GPUAcceleratorMatrixBCSR<ValueType>(backend_descriptor);
default:
LOG_INFO("This backed is not supported for Matrix types");
FATAL_ERROR(__FILE__, __LINE__);
return NULL;
}
}
template <typename ValueType>
AcceleratorVector<ValueType>* _paralution_init_base_gpu_vector(const struct Paralution_Backend_Descriptor backend_descriptor) {
assert(backend_descriptor.backend == GPU);
return new GPUAcceleratorVector<ValueType>(backend_descriptor);
}
void paralution_gpu_sync(void) {
cudaDeviceSynchronize();
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
template AcceleratorVector<float>* _paralution_init_base_gpu_vector(const struct Paralution_Backend_Descriptor backend_descriptor);
template AcceleratorVector<double>* _paralution_init_base_gpu_vector(const struct Paralution_Backend_Descriptor backend_descriptor);
template AcceleratorVector<int>* _paralution_init_base_gpu_vector(const struct Paralution_Backend_Descriptor backend_descriptor);
template AcceleratorMatrix<float>* _paralution_init_base_gpu_matrix(const struct Paralution_Backend_Descriptor backend_descriptor,
const unsigned int matrix_format);
template AcceleratorMatrix<double>* _paralution_init_base_gpu_matrix(const struct Paralution_Backend_Descriptor backend_descriptor,
const unsigned int matrix_format);
}
|
01e7d9876e28efee7541738ca240fe74d3fef8d8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
///////////////////////////////////////////////////////////////////////////////
#include <prof.cu>
#include <hipfft.h>
#include <math_constants.h>
//Round a / b to nearest higher integer value
int cuda_iDivUp(int a, int b)
{
return (a + (b - 1)) / b;
}
// complex math functions
__device__
float2 conjugate(float2 arg)
{
return make_float2(arg.x, -arg.y);
}
__device__
float2 complex_exp(float arg)
{
return make_float2(cosf(arg), sinf(arg));
}
__device__
float2 complex_add(float2 a, float2 b)
{
return make_float2(a.x + b.x, a.y + b.y);
}
__device__
float2 complex_mult(float2 ab, float2 cd)
{
return make_float2(ab.x * cd.x - ab.y * cd.y, ab.x * cd.y + ab.y * cd.x);
}
// generate wave heightfield at time t based on initial heightfield and dispersion relationship
__global__ void generateSpectrumKernel(float2* h0, float2 *ht, unsigned int width, unsigned int height, float t, float patchSize)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int i = y*width+x;
// calculate coordinates
float2 k;
k.x = CUDART_PI_F * x / (float) patchSize;
k.y = 2.0f * CUDART_PI_F * y / (float) patchSize;
// calculate dispersion w(k)
float k_len = sqrtf(k.x*k.x + k.y*k.y);
float w = sqrtf(9.81f * k_len);
float2 h_tilda;
if ((x < width) && (y < height)) {
float2 h0_k = h0[i];
float2 h0_mk = h0[(((height-1)-y)*width)+x];
h_tilda = complex_add( complex_mult(h0_k, complex_exp(w * t)),
complex_mult(conjugate(h0_mk), complex_exp(-w * t)) );
}
// output frequency-space complex values
if ((x < width) && (y < height)) {
ht[i] = h_tilda;
}
}
// generate slope by partial differences in spatial domain
__global__ void calculateSlopeKernel(float* h, float2 *slopeOut, unsigned int width, unsigned int height)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int i = y*width+x;
float2 slope;
if ((x > 0) && (y > 0) && (x < width-1) && (y < height-1)) {
slope.x = h[i+1] - h[i-1];
slope.y = h[i+width] - h[i-width];
} else {
slope = make_float2(0.0f, 0.0f);
}
slopeOut[i] = slope;
}
extern "C"
void cudaGenerateSpectrumKernel(float2* d_h0, float2 *d_ht,
unsigned int width, unsigned int height,
float animTime, float patchSize)
{
dim3 block(8, 8, 1);
dim3 grid(cuda_iDivUp(width, block.x), cuda_iDivUp(height, block.y), 1);
GpuProfiling::prepareProfiling( grid, block );
hipLaunchKernelGGL(( generateSpectrumKernel), dim3(grid), dim3(block), 0, 0, d_h0, d_ht, width, height, animTime, patchSize);
GpuProfiling::addResults("generateSpectrumKernel");
}
extern "C"
void cudaCalculateSlopeKernel( float* hptr, float2 *slopeOut,
unsigned int width, unsigned int height)
{
dim3 block(8, 8, 1);
dim3 grid2(cuda_iDivUp(width, block.x), cuda_iDivUp(height, block.y), 1);
GpuProfiling::prepareProfiling( grid2, block );
hipLaunchKernelGGL(( calculateSlopeKernel), dim3(grid2), dim3(block), 0, 0, hptr, slopeOut, width, height);
GpuProfiling::addResults("calculateSlopeKernel");
}
| 01e7d9876e28efee7541738ca240fe74d3fef8d8.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
///////////////////////////////////////////////////////////////////////////////
#include <prof.cu>
#include <cufft.h>
#include <math_constants.h>
//Round a / b to nearest higher integer value
int cuda_iDivUp(int a, int b)
{
return (a + (b - 1)) / b;
}
// complex math functions
__device__
float2 conjugate(float2 arg)
{
return make_float2(arg.x, -arg.y);
}
__device__
float2 complex_exp(float arg)
{
return make_float2(cosf(arg), sinf(arg));
}
__device__
float2 complex_add(float2 a, float2 b)
{
return make_float2(a.x + b.x, a.y + b.y);
}
__device__
float2 complex_mult(float2 ab, float2 cd)
{
return make_float2(ab.x * cd.x - ab.y * cd.y, ab.x * cd.y + ab.y * cd.x);
}
// generate wave heightfield at time t based on initial heightfield and dispersion relationship
__global__ void generateSpectrumKernel(float2* h0, float2 *ht, unsigned int width, unsigned int height, float t, float patchSize)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int i = y*width+x;
// calculate coordinates
float2 k;
k.x = CUDART_PI_F * x / (float) patchSize;
k.y = 2.0f * CUDART_PI_F * y / (float) patchSize;
// calculate dispersion w(k)
float k_len = sqrtf(k.x*k.x + k.y*k.y);
float w = sqrtf(9.81f * k_len);
float2 h_tilda;
if ((x < width) && (y < height)) {
float2 h0_k = h0[i];
float2 h0_mk = h0[(((height-1)-y)*width)+x];
h_tilda = complex_add( complex_mult(h0_k, complex_exp(w * t)),
complex_mult(conjugate(h0_mk), complex_exp(-w * t)) );
}
// output frequency-space complex values
if ((x < width) && (y < height)) {
ht[i] = h_tilda;
}
}
// generate slope by partial differences in spatial domain
__global__ void calculateSlopeKernel(float* h, float2 *slopeOut, unsigned int width, unsigned int height)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int i = y*width+x;
float2 slope;
if ((x > 0) && (y > 0) && (x < width-1) && (y < height-1)) {
slope.x = h[i+1] - h[i-1];
slope.y = h[i+width] - h[i-width];
} else {
slope = make_float2(0.0f, 0.0f);
}
slopeOut[i] = slope;
}
extern "C"
void cudaGenerateSpectrumKernel(float2* d_h0, float2 *d_ht,
unsigned int width, unsigned int height,
float animTime, float patchSize)
{
dim3 block(8, 8, 1);
dim3 grid(cuda_iDivUp(width, block.x), cuda_iDivUp(height, block.y), 1);
GpuProfiling::prepareProfiling( grid, block );
generateSpectrumKernel<<<grid, block>>>(d_h0, d_ht, width, height, animTime, patchSize);
GpuProfiling::addResults("generateSpectrumKernel");
}
extern "C"
void cudaCalculateSlopeKernel( float* hptr, float2 *slopeOut,
unsigned int width, unsigned int height)
{
dim3 block(8, 8, 1);
dim3 grid2(cuda_iDivUp(width, block.x), cuda_iDivUp(height, block.y), 1);
GpuProfiling::prepareProfiling( grid2, block );
calculateSlopeKernel<<<grid2, block>>>(hptr, slopeOut, width, height);
GpuProfiling::addResults("calculateSlopeKernel");
}
|
95657bcc108b281a9d816d991a673a17f88dc3d3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "stereolite.h"
// ***********************
// MULTIPLY
// ***********************
__global__
void LiteScalarMultiplyKernel(float* src, float scalar,
int width, int height, int stride)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
src[pos] = src[pos] * scalar;
}
void StereoLite::ScalarMultiply(float *src, float scalar, int w, int h, int s)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
LiteScalarMultiplyKernel << <blocks, threads >> > (src, scalar, w, h, s);
}
__global__
void LiteScalarMultiplyKernel(float* src, float scalar,
int width, int height, int stride, float* dst)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
dst[pos] = src[pos] * scalar;
}
void StereoLite::ScalarMultiply(float *src, float scalar, int w, int h, int s, float* dst)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
LiteScalarMultiplyKernel << <blocks, threads >> > (src, scalar, w, h, s, dst);
}
__global__
void LiteScalarMultiplyKernel(float2* src, float scalar,
int width, int height, int stride, float2* dst)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
float2 srcs = src[pos];
dst[pos].x = srcs.x * scalar;
dst[pos].y = srcs.y * scalar;
}
void StereoLite::ScalarMultiply(float2 *src, float scalar, int w, int h, int s, float2* dst)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
LiteScalarMultiplyKernel << <blocks, threads >> > (src, scalar, w, h, s, dst);
}
//************************
// ADD
//************************
__global__
void LiteAddKernel(float* src1, float * src2,
int width, int height, int stride, float* dst)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
dst[pos] = src1[pos] + src2[pos];
}
void StereoLite::Add(float *src1, float* src2, int w, int h, int s, float* dst)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
LiteAddKernel << <blocks, threads >> > (src1, src2, w, h, s, dst);
}
//************************
// ADD FLOAT2
//************************
__global__
void LiteAddFloat2Kernel(float2* src1, float2 * src2,
int width, int height, int stride, float2* dst)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
dst[pos].x = src1[pos].x + src2[pos].x;
dst[pos].y = src1[pos].y + src2[pos].y;
}
void StereoLite::Add(float2 *src1, float2* src2, int w, int h, int s, float2* dst)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
LiteAddFloat2Kernel << <blocks, threads >> > (src1, src2, w, h, s, dst);
}
//************************
// SUBTRACT
//************************
__global__
void LiteSubtractKernel(float* minuend, float * subtrahend,
int width, int height, int stride, float* difference)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
difference[pos] = minuend[pos] - subtrahend[pos];
}
void StereoLite::Subtract(float *minuend, float* subtrahend, int w, int h, int s, float* difference)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
LiteSubtractKernel << <blocks, threads >> > (minuend, subtrahend, w, h, s, difference);
} | 95657bcc108b281a9d816d991a673a17f88dc3d3.cu | #include "stereolite.h"
// ***********************
// MULTIPLY
// ***********************
__global__
void LiteScalarMultiplyKernel(float* src, float scalar,
int width, int height, int stride)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
src[pos] = src[pos] * scalar;
}
void StereoLite::ScalarMultiply(float *src, float scalar, int w, int h, int s)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
LiteScalarMultiplyKernel << <blocks, threads >> > (src, scalar, w, h, s);
}
__global__
void LiteScalarMultiplyKernel(float* src, float scalar,
int width, int height, int stride, float* dst)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
dst[pos] = src[pos] * scalar;
}
void StereoLite::ScalarMultiply(float *src, float scalar, int w, int h, int s, float* dst)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
LiteScalarMultiplyKernel << <blocks, threads >> > (src, scalar, w, h, s, dst);
}
__global__
void LiteScalarMultiplyKernel(float2* src, float scalar,
int width, int height, int stride, float2* dst)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
float2 srcs = src[pos];
dst[pos].x = srcs.x * scalar;
dst[pos].y = srcs.y * scalar;
}
void StereoLite::ScalarMultiply(float2 *src, float scalar, int w, int h, int s, float2* dst)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
LiteScalarMultiplyKernel << <blocks, threads >> > (src, scalar, w, h, s, dst);
}
//************************
// ADD
//************************
__global__
void LiteAddKernel(float* src1, float * src2,
int width, int height, int stride, float* dst)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
dst[pos] = src1[pos] + src2[pos];
}
void StereoLite::Add(float *src1, float* src2, int w, int h, int s, float* dst)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
LiteAddKernel << <blocks, threads >> > (src1, src2, w, h, s, dst);
}
//************************
// ADD FLOAT2
//************************
__global__
void LiteAddFloat2Kernel(float2* src1, float2 * src2,
int width, int height, int stride, float2* dst)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
dst[pos].x = src1[pos].x + src2[pos].x;
dst[pos].y = src1[pos].y + src2[pos].y;
}
void StereoLite::Add(float2 *src1, float2* src2, int w, int h, int s, float2* dst)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
LiteAddFloat2Kernel << <blocks, threads >> > (src1, src2, w, h, s, dst);
}
//************************
// SUBTRACT
//************************
__global__
void LiteSubtractKernel(float* minuend, float * subtrahend,
int width, int height, int stride, float* difference)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
difference[pos] = minuend[pos] - subtrahend[pos];
}
void StereoLite::Subtract(float *minuend, float* subtrahend, int w, int h, int s, float* difference)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
LiteSubtractKernel << <blocks, threads >> > (minuend, subtrahend, w, h, s, difference);
} |
9ff27c0fa5cfb154a4e1868bf024f05752c1907d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#define MASK_WIDTH 11
#define COMMENT "Histogram_GPU"
#define RGB_COMPONENT_COLOR 255
#define DIM_BLOCO 32
typedef struct {
unsigned char red, green, blue;
} PPMPixel;
typedef struct {
int x, y;
PPMPixel *data;
} PPMImage;
double rtclock()
{
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday (&Tp, &Tzp);
if (stat != 0) printf("Error return from gettimeofday: %d",stat);
return(Tp.tv_sec + Tp.tv_usec*1.0e-6);
}
static PPMImage *readPPM(const char *filename) {
char buff[16];
PPMImage *img;
FILE *fp;
int c, rgb_comp_color;
fp = fopen(filename, "rb");
if (!fp) {
fprintf(stderr, "Unable to open file '%s'\n", filename);
exit(1);
}
if (!fgets(buff, sizeof(buff), fp)) {
perror(filename);
exit(1);
}
if (buff[0] != 'P' || buff[1] != '6') {
fprintf(stderr, "Invalid image format (must be 'P6')\n");
exit(1);
}
img = (PPMImage *) malloc(sizeof(PPMImage));
if (!img) {
fprintf(stderr, "Unable to allocate memory\n");
exit(1);
}
c = getc(fp);
while (c == '#') {
while (getc(fp) != '\n')
;
c = getc(fp);
}
ungetc(c, fp);
if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) {
fprintf(stderr, "Invalid image size (error loading '%s')\n", filename);
exit(1);
}
if (fscanf(fp, "%d", &rgb_comp_color) != 1) {
fprintf(stderr, "Invalid rgb component (error loading '%s')\n",
filename);
exit(1);
}
if (rgb_comp_color != RGB_COMPONENT_COLOR) {
fprintf(stderr, "'%s' does not have 8-bits components\n", filename);
exit(1);
}
while (fgetc(fp) != '\n')
;
img->data = (PPMPixel*) malloc(img->x * img->y * sizeof(PPMPixel));
if (!img) {
fprintf(stderr, "Unable to allocate memory\n");
exit(1);
}
if (fread(img->data, 3 * img->x, img->y, fp) != img->y) {
fprintf(stderr, "Error loading image '%s'\n", filename);
exit(1);
}
fclose(fp);
return img;
}
void writePPM(PPMImage *img) {
fprintf(stdout, "P6\n");
fprintf(stdout, "# %s\n", COMMENT);
fprintf(stdout, "%d %d\n", img->x, img->y);
fprintf(stdout, "%d\n", RGB_COMPONENT_COLOR);
fwrite(img->data, 3 * img->x, img->y, stdout);
fclose(stdout);
}
/* CUDA kernel */
__global__ void smoothing_gpu(PPMPixel *data, PPMPixel *data_copy, int dim_x, int dim_y){
unsigned int index, index_in_block;
int pos0_x, pos0_y; // A posio na imagem do primeiro pixel do bloco
int img_x, img_y; // A posio do pixel da thread na imagem
//
int x, y, lx, ly;
int total_red, total_blue, total_green;
//
pos0_x = blockIdx.x*(blockDim.x-2*(MASK_WIDTH/2));
pos0_y = blockIdx.y*(blockDim.y-2*(MASK_WIDTH/2));
img_x = pos0_x+(threadIdx.x-MASK_WIDTH/2);
img_y = pos0_y+(threadIdx.y-MASK_WIDTH/2);
index_in_block = blockDim.x*threadIdx.y+threadIdx.x;
//if(img_x < dim_x && img_x >= 0 && img_y < dim_y && img_y >= 0){
PPMPixel s_data;
__shared__ PPMPixel s_data_copy[(DIM_BLOCO+2*(MASK_WIDTH/2))*(DIM_BLOCO+2*(MASK_WIDTH/2))];
if(img_x < dim_x && img_x >= 0 && img_y < dim_y && img_y >= 0){
s_data_copy[index_in_block] = data_copy[img_y*dim_x+img_x];
} else {
s_data_copy[index_in_block].red = 0;
s_data_copy[index_in_block].blue = 0;
s_data_copy[index_in_block].green = 0;
}
__syncthreads();
if(img_x-pos0_x < DIM_BLOCO
&& img_x-pos0_x >= 0
&& img_y-pos0_y < DIM_BLOCO
&& img_y-pos0_y >= 0
&& img_x < dim_x
&& img_x >= 0
&& img_y < dim_y
&& img_y >= 0){
total_red = total_blue = total_green = 0;
for (y = img_y - ((MASK_WIDTH-1)/2); y <= (img_y + ((MASK_WIDTH-1)/2)); y++) {
for (x = img_x - ((MASK_WIDTH-1)/2); x <= (img_x + ((MASK_WIDTH-1)/2)); x++) {
//if (x >= 0 && y >= 0 && y < dim_y && x < dim_x) {
lx = x - img_x + threadIdx.x;
ly = y - img_y + threadIdx.y;
total_red += s_data_copy[ly*blockDim.x+lx].red;
total_blue += s_data_copy[ly*blockDim.x+lx].blue;
total_green += s_data_copy[ly*blockDim.x+lx].green;
//}
}
}
s_data.red = total_red / (MASK_WIDTH*MASK_WIDTH);
s_data.blue = total_blue / (MASK_WIDTH*MASK_WIDTH);
s_data.green = total_green / (MASK_WIDTH*MASK_WIDTH);
data[img_y*dim_x+img_x] = s_data;
}
}
/* End of CUDA kernel */
/* void Smoothing_CPU_Serial(PPMImage *image, PPMImage *image_copy) {
int i, j, y, x;
int total_red, total_blue, total_green;
for (i = 0; i < image->y; i++) {
for (j = 0; j < image->x; j++) {
total_red = total_blue = total_green = 0;
for (y = i - ((MASK_WIDTH-1)/2); y <= (i + ((MASK_WIDTH-1)/2)); y++) {
for (x = j - ((MASK_WIDTH-1)/2); x <= (j + ((MASK_WIDTH-1)/2)); x++) {
if (x >= 0 && y >= 0 && y < image->y && x < image->x) {
total_red += image_copy->data[(y * image->x) + x].red;
total_blue += image_copy->data[(y * image->x) + x].blue;
total_green += image_copy->data[(y * image->x) + x].green;
} //if
} //for z
} //for y
image->data[(i * image->x) + j].red = total_red / (MASK_WIDTH*MASK_WIDTH);
image->data[(i * image->x) + j].blue = total_blue / (MASK_WIDTH*MASK_WIDTH);
image->data[(i * image->x) + j].green = total_green / (MASK_WIDTH*MASK_WIDTH);
}
}
} */
int main(int argc, char *argv[]) {
if( argc != 2 ) {
printf("Too many or no one arguments supplied.\n");
}
double t_start, t_end;
int i;
char *filename = argv[1]; //Recebendo o arquivo!;
//int MASK_WIDTH;
//scanf("%d",&MASK_WIDTH);
PPMImage *image = readPPM(filename);
PPMImage *image_output = readPPM(filename);
t_start = rtclock();
/* CUDA stuff */
unsigned int n = image->x*image->y;
unsigned int dim_grid_x = (image->x+DIM_BLOCO)/DIM_BLOCO;
unsigned int dim_grid_y = (image->y+DIM_BLOCO)/DIM_BLOCO;
unsigned int data_size = 3*(sizeof(unsigned char))*n;
PPMPixel *d_data, *d_data_copy;
hipMalloc((void **)&d_data, data_size);
hipMalloc((void **)&d_data_copy, data_size);
//
hipMemcpy(d_data, image_output->data, data_size, hipMemcpyHostToDevice);
hipMemcpy(d_data_copy, image->data, data_size, hipMemcpyHostToDevice);
//
dim3 dimBlock(DIM_BLOCO+2*(MASK_WIDTH/2),DIM_BLOCO+2*(MASK_WIDTH/2));
dim3 dimGrid(dim_grid_x,dim_grid_y);
//
hipLaunchKernelGGL(( smoothing_gpu), dim3(dimGrid),dim3(dimBlock), 0, 0, d_data, d_data_copy, image->x, image->y);
//
hipMemcpy(image_output->data, d_data, data_size, hipMemcpyDeviceToHost);
//
hipFree(d_data); hipFree(d_data_copy);
/* End of CUDA stuff */
t_end = rtclock();
writePPM(image_output);
//fprintf(stdout, "\n%0.6lfs\n", t_end - t_start);
free(image);
free(image_output);
}
| 9ff27c0fa5cfb154a4e1868bf024f05752c1907d.cu | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#define MASK_WIDTH 11
#define COMMENT "Histogram_GPU"
#define RGB_COMPONENT_COLOR 255
#define DIM_BLOCO 32
typedef struct {
unsigned char red, green, blue;
} PPMPixel;
typedef struct {
int x, y;
PPMPixel *data;
} PPMImage;
double rtclock()
{
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday (&Tp, &Tzp);
if (stat != 0) printf("Error return from gettimeofday: %d",stat);
return(Tp.tv_sec + Tp.tv_usec*1.0e-6);
}
static PPMImage *readPPM(const char *filename) {
char buff[16];
PPMImage *img;
FILE *fp;
int c, rgb_comp_color;
fp = fopen(filename, "rb");
if (!fp) {
fprintf(stderr, "Unable to open file '%s'\n", filename);
exit(1);
}
if (!fgets(buff, sizeof(buff), fp)) {
perror(filename);
exit(1);
}
if (buff[0] != 'P' || buff[1] != '6') {
fprintf(stderr, "Invalid image format (must be 'P6')\n");
exit(1);
}
img = (PPMImage *) malloc(sizeof(PPMImage));
if (!img) {
fprintf(stderr, "Unable to allocate memory\n");
exit(1);
}
c = getc(fp);
while (c == '#') {
while (getc(fp) != '\n')
;
c = getc(fp);
}
ungetc(c, fp);
if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) {
fprintf(stderr, "Invalid image size (error loading '%s')\n", filename);
exit(1);
}
if (fscanf(fp, "%d", &rgb_comp_color) != 1) {
fprintf(stderr, "Invalid rgb component (error loading '%s')\n",
filename);
exit(1);
}
if (rgb_comp_color != RGB_COMPONENT_COLOR) {
fprintf(stderr, "'%s' does not have 8-bits components\n", filename);
exit(1);
}
while (fgetc(fp) != '\n')
;
img->data = (PPMPixel*) malloc(img->x * img->y * sizeof(PPMPixel));
if (!img) {
fprintf(stderr, "Unable to allocate memory\n");
exit(1);
}
if (fread(img->data, 3 * img->x, img->y, fp) != img->y) {
fprintf(stderr, "Error loading image '%s'\n", filename);
exit(1);
}
fclose(fp);
return img;
}
void writePPM(PPMImage *img) {
fprintf(stdout, "P6\n");
fprintf(stdout, "# %s\n", COMMENT);
fprintf(stdout, "%d %d\n", img->x, img->y);
fprintf(stdout, "%d\n", RGB_COMPONENT_COLOR);
fwrite(img->data, 3 * img->x, img->y, stdout);
fclose(stdout);
}
/* CUDA kernel */
__global__ void smoothing_gpu(PPMPixel *data, PPMPixel *data_copy, int dim_x, int dim_y){
unsigned int index, index_in_block;
int pos0_x, pos0_y; // A posição na imagem do primeiro pixel do bloco
int img_x, img_y; // A posição do pixel da thread na imagem
//
int x, y, lx, ly;
int total_red, total_blue, total_green;
//
pos0_x = blockIdx.x*(blockDim.x-2*(MASK_WIDTH/2));
pos0_y = blockIdx.y*(blockDim.y-2*(MASK_WIDTH/2));
img_x = pos0_x+(threadIdx.x-MASK_WIDTH/2);
img_y = pos0_y+(threadIdx.y-MASK_WIDTH/2);
index_in_block = blockDim.x*threadIdx.y+threadIdx.x;
//if(img_x < dim_x && img_x >= 0 && img_y < dim_y && img_y >= 0){
PPMPixel s_data;
__shared__ PPMPixel s_data_copy[(DIM_BLOCO+2*(MASK_WIDTH/2))*(DIM_BLOCO+2*(MASK_WIDTH/2))];
if(img_x < dim_x && img_x >= 0 && img_y < dim_y && img_y >= 0){
s_data_copy[index_in_block] = data_copy[img_y*dim_x+img_x];
} else {
s_data_copy[index_in_block].red = 0;
s_data_copy[index_in_block].blue = 0;
s_data_copy[index_in_block].green = 0;
}
__syncthreads();
if(img_x-pos0_x < DIM_BLOCO
&& img_x-pos0_x >= 0
&& img_y-pos0_y < DIM_BLOCO
&& img_y-pos0_y >= 0
&& img_x < dim_x
&& img_x >= 0
&& img_y < dim_y
&& img_y >= 0){
total_red = total_blue = total_green = 0;
for (y = img_y - ((MASK_WIDTH-1)/2); y <= (img_y + ((MASK_WIDTH-1)/2)); y++) {
for (x = img_x - ((MASK_WIDTH-1)/2); x <= (img_x + ((MASK_WIDTH-1)/2)); x++) {
//if (x >= 0 && y >= 0 && y < dim_y && x < dim_x) {
lx = x - img_x + threadIdx.x;
ly = y - img_y + threadIdx.y;
total_red += s_data_copy[ly*blockDim.x+lx].red;
total_blue += s_data_copy[ly*blockDim.x+lx].blue;
total_green += s_data_copy[ly*blockDim.x+lx].green;
//}
}
}
s_data.red = total_red / (MASK_WIDTH*MASK_WIDTH);
s_data.blue = total_blue / (MASK_WIDTH*MASK_WIDTH);
s_data.green = total_green / (MASK_WIDTH*MASK_WIDTH);
data[img_y*dim_x+img_x] = s_data;
}
}
/* End of CUDA kernel */
/* void Smoothing_CPU_Serial(PPMImage *image, PPMImage *image_copy) {
int i, j, y, x;
int total_red, total_blue, total_green;
for (i = 0; i < image->y; i++) {
for (j = 0; j < image->x; j++) {
total_red = total_blue = total_green = 0;
for (y = i - ((MASK_WIDTH-1)/2); y <= (i + ((MASK_WIDTH-1)/2)); y++) {
for (x = j - ((MASK_WIDTH-1)/2); x <= (j + ((MASK_WIDTH-1)/2)); x++) {
if (x >= 0 && y >= 0 && y < image->y && x < image->x) {
total_red += image_copy->data[(y * image->x) + x].red;
total_blue += image_copy->data[(y * image->x) + x].blue;
total_green += image_copy->data[(y * image->x) + x].green;
} //if
} //for z
} //for y
image->data[(i * image->x) + j].red = total_red / (MASK_WIDTH*MASK_WIDTH);
image->data[(i * image->x) + j].blue = total_blue / (MASK_WIDTH*MASK_WIDTH);
image->data[(i * image->x) + j].green = total_green / (MASK_WIDTH*MASK_WIDTH);
}
}
} */
int main(int argc, char *argv[]) {
if( argc != 2 ) {
printf("Too many or no one arguments supplied.\n");
}
double t_start, t_end;
int i;
char *filename = argv[1]; //Recebendo o arquivo!;
//int MASK_WIDTH;
//scanf("%d",&MASK_WIDTH);
PPMImage *image = readPPM(filename);
PPMImage *image_output = readPPM(filename);
t_start = rtclock();
/* CUDA stuff */
unsigned int n = image->x*image->y;
unsigned int dim_grid_x = (image->x+DIM_BLOCO)/DIM_BLOCO;
unsigned int dim_grid_y = (image->y+DIM_BLOCO)/DIM_BLOCO;
unsigned int data_size = 3*(sizeof(unsigned char))*n;
PPMPixel *d_data, *d_data_copy;
cudaMalloc((void **)&d_data, data_size);
cudaMalloc((void **)&d_data_copy, data_size);
//
cudaMemcpy(d_data, image_output->data, data_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_data_copy, image->data, data_size, cudaMemcpyHostToDevice);
//
dim3 dimBlock(DIM_BLOCO+2*(MASK_WIDTH/2),DIM_BLOCO+2*(MASK_WIDTH/2));
dim3 dimGrid(dim_grid_x,dim_grid_y);
//
smoothing_gpu<<<dimGrid,dimBlock>>>(d_data, d_data_copy, image->x, image->y);
//
cudaMemcpy(image_output->data, d_data, data_size, cudaMemcpyDeviceToHost);
//
cudaFree(d_data); cudaFree(d_data_copy);
/* End of CUDA stuff */
t_end = rtclock();
writePPM(image_output);
//fprintf(stdout, "\n%0.6lfs\n", t_end - t_start);
free(image);
free(image_output);
}
|
c79cb45ef5e506f6e046899932324257ef478257.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/TensorUtils.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include "cuda_helpers.h"
template <typename T>
__global__ void PSROIPoolForward(
const int nthreads,
const T* input,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const T* rois,
const int channels_out,
T* output,
int* channel_mapping) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c_out, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c_out = (index / pooled_width / pooled_height) % channels_out;
int n = index / pooled_width / pooled_height / channels_out;
// (n, c_in, ph, pw) is the associated element in the input
int c_in = (c_out * pooled_height + ph) * pooled_width + pw;
// [start, end) interval for spatial sampling
const T* offset_rois = rois + n * 5;
int roi_batch_ind = offset_rois[0];
int roi_start_w = roundf(offset_rois[1] * spatial_scale);
int roi_start_h = roundf(offset_rois[2] * spatial_scale);
int roi_end_w = roundf(offset_rois[3] * spatial_scale);
int roi_end_h = roundf(offset_rois[4] * spatial_scale);
// Force too small ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w, 1);
int roi_height = max(roi_end_h - roi_start_h, 1);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h));
int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w));
int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h));
int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height - 1);
hend = min(max(hend + roi_start_h, 0), height - 1);
wstart = min(max(wstart + roi_start_w, 0), width - 1);
wend = min(max(wend + roi_start_w, 0), width - 1);
bool is_empty = (hend <= hstart) || (wend <= wstart);
const T* offset_input =
input + (roi_batch_ind * channels + c_in) * height * width;
T out_sum = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int input_index = h * width + w;
out_sum += offset_input[input_index];
}
}
T bin_area = (hend - hstart) * (wend - wstart);
output[index] = is_empty ? static_cast<T>(0) : out_sum / bin_area;
channel_mapping[index] = c_in;
}
}
template <typename T>
__global__ void PSROIPoolBackward(
const int nthreads,
const T* grad_output,
const int* channel_mapping,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int channels_out,
T* grad_input,
const T* rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, *, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int n = index / pooled_width / pooled_height / channels_out;
const T* offset_rois = rois + n * 5;
int roi_batch_ind = offset_rois[0];
int roi_start_w = roundf(offset_rois[1] * spatial_scale);
int roi_start_h = roundf(offset_rois[2] * spatial_scale);
int roi_end_w = roundf(offset_rois[3] * spatial_scale);
int roi_end_h = roundf(offset_rois[4] * spatial_scale);
// Force too small ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w, 1);
int roi_height = max(roi_end_h - roi_start_h, 1);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h));
int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w));
int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h));
int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
int c_in = channel_mapping[index];
T* grad_input_offset =
grad_input + (roi_batch_ind * channels + c_in) * height * width;
T bin_area = (hend - hstart) * (wend - wstart);
T diff_val = is_empty ? static_cast<T>(0) : grad_output[index] / bin_area;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int grad_input_index = h * width + w;
atomicAdd(grad_input_offset + grad_input_index, diff_val);
}
}
}
}
std::tuple<at::Tensor, at::Tensor> PSROIPool_forward_cuda(
const at::Tensor& input,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width) {
// Check if input tensors are CUDA tensors
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor");
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
at::CheckedFrom c = "PSROIPool_forward_cuda";
at::checkAllSameGPU(c, {input_t, rois_t});
at::checkAllSameType(c, {input_t, rois_t});
at::hip::HIPGuardMasqueradingAsCUDA device_guard(input.device());
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
AT_ASSERTM(
channels % (pooled_height * pooled_width) == 0,
"input channels must be a multiple of pooling height * pooling width");
int channels_out = channels / (pooled_height * pooled_width);
auto output = at::zeros(
{num_rois, channels_out, pooled_height, pooled_width}, input.options());
auto channel_mapping =
at::zeros(output.sizes(), input.options().dtype(at::kInt));
auto output_size = output.numel();
if (output_size == 0) {
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(output, channel_mapping);
}
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(at::cuda::ATenCeilDiv(output_size, 512L), 4096L));
dim3 block(512);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "PSROIPool_forward", [&] {
hipLaunchKernelGGL(( PSROIPoolForward<scalar_t>), dim3(grid), dim3(block), 0, stream,
output_size,
input.contiguous().data<scalar_t>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
rois.contiguous().data<scalar_t>(),
channels_out,
output.data<scalar_t>(),
channel_mapping.data<int>());
});
AT_CUDA_CHECK(hipGetLastError());
return std::make_tuple(output, channel_mapping);
}
at::Tensor PSROIPool_backward_cuda(
const at::Tensor& grad,
const at::Tensor& rois,
const at::Tensor& channel_mapping,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int batch_size,
const int channels,
const int height,
const int width) {
// Check if input tensors are CUDA tensors
AT_ASSERTM(grad.type().is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor");
AT_ASSERTM(
channel_mapping.type().is_cuda(),
"channel_mapping must be a CUDA tensor");
at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2},
channel_mapping_t{channel_mapping, "channel_mapping", 3};
at::CheckedFrom c = "PSROIPool_backward_cuda";
at::checkAllSameGPU(c, {grad_t, rois_t, channel_mapping_t});
at::checkAllSameType(c, {grad_t, rois_t});
at::hip::HIPGuardMasqueradingAsCUDA device_guard(grad.device());
auto num_rois = rois.size(0);
auto grad_input =
at::zeros({batch_size, channels, height, width}, grad.options());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(at::cuda::ATenCeilDiv(grad.numel(), 512L), 4096L));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return grad_input;
}
int channels_out = channels / (pooled_height * pooled_width);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad.scalar_type(), "PSROIPool_backward", [&] {
hipLaunchKernelGGL(( PSROIPoolBackward<scalar_t>), dim3(grid), dim3(block), 0, stream,
grad.numel(),
grad.contiguous().data<scalar_t>(),
channel_mapping.data<int>(),
num_rois,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
channels_out,
grad_input.data<scalar_t>(),
rois.contiguous().data<scalar_t>());
});
AT_CUDA_CHECK(hipGetLastError());
return grad_input;
}
| c79cb45ef5e506f6e046899932324257ef478257.cu | #include <ATen/ATen.h>
#include <ATen/TensorUtils.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDAGuard.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include "cuda_helpers.h"
template <typename T>
__global__ void PSROIPoolForward(
const int nthreads,
const T* input,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const T* rois,
const int channels_out,
T* output,
int* channel_mapping) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c_out, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c_out = (index / pooled_width / pooled_height) % channels_out;
int n = index / pooled_width / pooled_height / channels_out;
// (n, c_in, ph, pw) is the associated element in the input
int c_in = (c_out * pooled_height + ph) * pooled_width + pw;
// [start, end) interval for spatial sampling
const T* offset_rois = rois + n * 5;
int roi_batch_ind = offset_rois[0];
int roi_start_w = roundf(offset_rois[1] * spatial_scale);
int roi_start_h = roundf(offset_rois[2] * spatial_scale);
int roi_end_w = roundf(offset_rois[3] * spatial_scale);
int roi_end_h = roundf(offset_rois[4] * spatial_scale);
// Force too small ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w, 1);
int roi_height = max(roi_end_h - roi_start_h, 1);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h));
int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w));
int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h));
int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height - 1);
hend = min(max(hend + roi_start_h, 0), height - 1);
wstart = min(max(wstart + roi_start_w, 0), width - 1);
wend = min(max(wend + roi_start_w, 0), width - 1);
bool is_empty = (hend <= hstart) || (wend <= wstart);
const T* offset_input =
input + (roi_batch_ind * channels + c_in) * height * width;
T out_sum = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int input_index = h * width + w;
out_sum += offset_input[input_index];
}
}
T bin_area = (hend - hstart) * (wend - wstart);
output[index] = is_empty ? static_cast<T>(0) : out_sum / bin_area;
channel_mapping[index] = c_in;
}
}
template <typename T>
__global__ void PSROIPoolBackward(
const int nthreads,
const T* grad_output,
const int* channel_mapping,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int channels_out,
T* grad_input,
const T* rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, *, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int n = index / pooled_width / pooled_height / channels_out;
const T* offset_rois = rois + n * 5;
int roi_batch_ind = offset_rois[0];
int roi_start_w = roundf(offset_rois[1] * spatial_scale);
int roi_start_h = roundf(offset_rois[2] * spatial_scale);
int roi_end_w = roundf(offset_rois[3] * spatial_scale);
int roi_end_h = roundf(offset_rois[4] * spatial_scale);
// Force too small ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w, 1);
int roi_height = max(roi_end_h - roi_start_h, 1);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h));
int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w));
int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h));
int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
int c_in = channel_mapping[index];
T* grad_input_offset =
grad_input + (roi_batch_ind * channels + c_in) * height * width;
T bin_area = (hend - hstart) * (wend - wstart);
T diff_val = is_empty ? static_cast<T>(0) : grad_output[index] / bin_area;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int grad_input_index = h * width + w;
atomicAdd(grad_input_offset + grad_input_index, diff_val);
}
}
}
}
std::tuple<at::Tensor, at::Tensor> PSROIPool_forward_cuda(
const at::Tensor& input,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width) {
// Check if input tensors are CUDA tensors
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor");
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
at::CheckedFrom c = "PSROIPool_forward_cuda";
at::checkAllSameGPU(c, {input_t, rois_t});
at::checkAllSameType(c, {input_t, rois_t});
at::cuda::CUDAGuard device_guard(input.device());
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
AT_ASSERTM(
channels % (pooled_height * pooled_width) == 0,
"input channels must be a multiple of pooling height * pooling width");
int channels_out = channels / (pooled_height * pooled_width);
auto output = at::zeros(
{num_rois, channels_out, pooled_height, pooled_width}, input.options());
auto channel_mapping =
at::zeros(output.sizes(), input.options().dtype(at::kInt));
auto output_size = output.numel();
if (output_size == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(output, channel_mapping);
}
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(at::cuda::ATenCeilDiv(output_size, 512L), 4096L));
dim3 block(512);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "PSROIPool_forward", [&] {
PSROIPoolForward<scalar_t><<<grid, block, 0, stream>>>(
output_size,
input.contiguous().data<scalar_t>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
rois.contiguous().data<scalar_t>(),
channels_out,
output.data<scalar_t>(),
channel_mapping.data<int>());
});
AT_CUDA_CHECK(cudaGetLastError());
return std::make_tuple(output, channel_mapping);
}
at::Tensor PSROIPool_backward_cuda(
const at::Tensor& grad,
const at::Tensor& rois,
const at::Tensor& channel_mapping,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int batch_size,
const int channels,
const int height,
const int width) {
// Check if input tensors are CUDA tensors
AT_ASSERTM(grad.type().is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor");
AT_ASSERTM(
channel_mapping.type().is_cuda(),
"channel_mapping must be a CUDA tensor");
at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2},
channel_mapping_t{channel_mapping, "channel_mapping", 3};
at::CheckedFrom c = "PSROIPool_backward_cuda";
at::checkAllSameGPU(c, {grad_t, rois_t, channel_mapping_t});
at::checkAllSameType(c, {grad_t, rois_t});
at::cuda::CUDAGuard device_guard(grad.device());
auto num_rois = rois.size(0);
auto grad_input =
at::zeros({batch_size, channels, height, width}, grad.options());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(at::cuda::ATenCeilDiv(grad.numel(), 512L), 4096L));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return grad_input;
}
int channels_out = channels / (pooled_height * pooled_width);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad.scalar_type(), "PSROIPool_backward", [&] {
PSROIPoolBackward<scalar_t><<<grid, block, 0, stream>>>(
grad.numel(),
grad.contiguous().data<scalar_t>(),
channel_mapping.data<int>(),
num_rois,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
channels_out,
grad_input.data<scalar_t>(),
rois.contiguous().data<scalar_t>());
});
AT_CUDA_CHECK(cudaGetLastError());
return grad_input;
}
|
be07838444da1aea779b7a07b40832e883e0af72.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @brief
* compose
*
* @copyright
* Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey)
*
* @copyright
* See LICENSE for clarification regarding multiple authors
*/
#include <limits>
#include <thread>
#include <vector>
#include "k2/csrc/array_ops.h"
#include "k2/csrc/fsa_algo.h"
#include "k2/csrc/fsa_utils.h"
#include "k2/csrc/hash.h"
#include "k2/csrc/ragged_ops.h"
namespace k2 {
namespace intersect_pruned_internal {
/* Information associated with a state active on a particular frame.. */
struct StateInfo {
/* abs_state_id is the state-index in a_fsas_. Note: the ind0 in here
won't necessarily match the ind0 within FrameInfo::state if
a_fsas_stride_ == 0. */
int32_t a_fsas_state_idx01;
/* Caution: this is ACTUALLY A FLOAT that has been bit-twiddled using
FloatToOrderedInt/OrderedIntToFloat so we can use atomic max. It
represents a Viterbi-style 'forward probability'. (Viterbi, meaning: we
use max not log-sum). You can take the pruned lattice and rescore it if
you want log-sum. */
int32_t forward_loglike;
/* Note: this `backward_loglike` is the best score of any path from here to
the end, minus the best path in the overall FSA, i.e. it's the backward
score you get if, at the final-state, you set backward_loglike ==
forward_loglike. So backward_loglike + OrderedIntToFloat(forward_loglike)
<= 0, and you can treat it somewhat like a posterior (except they don't sum
to one as we're using max, not log-add).
*/
float backward_loglike;
};
struct ArcInfo { // for an arc that wasn't pruned away...
int32_t a_fsas_arc_idx012; // the arc-index in a_fsas_.
float arc_loglike; // loglike on this arc: equals loglike from data
// (nnet output, == b_fsas), plus loglike from
// the arc in a_fsas.
union {
// these 3 different ways of storing the index of the destination state
// are used at different stages of the algorithm; we give them different
// names for clarity.
int32_t dest_a_fsas_state_idx01; // The destination-state as an index
// into a_fsas_.
int32_t dest_info_state_idx1; // The destination-state as an idx1 into the
// next FrameInfo's `arcs` or `states`,
// omitting the FSA-index which can be worked
// out from the structure of this frame's
// ArcInfo.
} u;
float end_loglike; // loglike at the end of the arc just before
// (conceptually) it joins the destination state.
};
/*
static std::ostream &operator<<(std::ostream &os, const StateInfo &s) {
os << "StateInfo{" << s.a_fsas_state_idx01 << ","
<< OrderedIntToFloat(s.forward_loglike) << "," << s.backward_loglike
<< "}";
return os;
}
static std::ostream &operator<<(std::ostream &os, const ArcInfo &a) {
os << "ArcInfo{" << a.a_fsas_arc_idx012 << "," << a.arc_loglike << ","
<< a.u.dest_a_fsas_state_idx01 << "," << a.end_loglike
<< "[i=" << FloatToOrderedInt(a.end_loglike) << "]"
<< "}";
return os;
}
*/
} // namespace intersect_pruned_internal
using namespace intersect_pruned_internal; // NOLINT
// Caution: this is really a .cu file. It contains mixed host and device code.
/*
Pruned intersection (a.k.a. composition) that corresponds to decoding for
speech recognition-type tasks. Can use either different decoding graphs (one
per acoustic sequence) or a shared graph
*/
class MultiGraphDenseIntersectPruned {
public:
/**
Pruned intersection (a.k.a. composition) that corresponds to decoding for
speech recognition-type tasks
@param [in] a_fsas The decoding graphs, one per sequence. E.g. might
just be a linear sequence of phones, or might be
something more complicated. Must have either the
same Dim0() as b_fsas, or Size0()==1 in which
case the graph is shared.
@param [in] b_fsas The neural-net output, with each frame containing the
log-likes of each phone. A series of sequences of
(in general) different length.
@param [in] search_beam "Default" search/decoding beam. The actual
beam is dynamic and also depends on max_active and
min_active.
@param [in] output_beam Beam for pruning the output FSA, will
typically be smaller than search_beam.
@param [in] min_active Minimum number of FSA states that are allowed to
be active on any given frame for any given
intersection/composition task. This is advisory,
in that it will try not to have fewer than this
number active.
@param [in] max_active Maximum number of FSA states that are allowed to
be active on any given frame for any given
intersection/composition task. This is advisory,
in that it will try not to exceed that but may not
always succeed. This determines the hash size.
*/
MultiGraphDenseIntersectPruned(FsaVec &a_fsas, DenseFsaVec &b_fsas,
float search_beam, float output_beam,
int32_t min_active, int32_t max_active)
: a_fsas_(a_fsas),
b_fsas_(b_fsas),
search_beam_(search_beam),
output_beam_(output_beam),
min_active_(min_active),
max_active_(max_active),
dynamic_beams_(a_fsas.Context(), b_fsas.shape.Dim0(), search_beam),
forward_semaphore_(1) {
NVTX_RANGE(K2_FUNC);
c_ = GetContext(a_fsas.shape, b_fsas.shape);
T_ = b_fsas_.shape.MaxSize(1);
K2_CHECK(b_fsas.scores.IsContiguous());
K2_CHECK_GT(search_beam, 0);
K2_CHECK_GT(output_beam, 0);
K2_CHECK_GE(min_active, 0);
K2_CHECK_GT(max_active, min_active);
K2_CHECK(a_fsas.shape.Dim0() == b_fsas.shape.Dim0() ||
a_fsas.shape.Dim0() == 1);
K2_CHECK_GE(b_fsas.shape.Dim0(), 1);
int32_t num_seqs = b_fsas.shape.Dim0();
int32_t num_buckets = RoundUpToNearestPowerOfTwo(num_seqs * 4 *
max_active);
if (num_buckets < 128)
num_buckets = 128;
state_map_ = Hash32(c_, num_buckets);
int32_t num_a_copies;
if (a_fsas.shape.Dim0() == 1) {
a_fsas_stride_ = 0;
state_map_fsa_stride_ = a_fsas.TotSize(1);
num_a_copies = b_fsas.shape.Dim0();
} else {
K2_CHECK_EQ(a_fsas.shape.Dim0(), b_fsas.shape.Dim0());
a_fsas_stride_ = 1;
state_map_fsa_stride_ = 0;
num_a_copies = 1;
}
int64_t num_keys = num_a_copies * (int64_t)a_fsas.TotSize(1);
K2_CHECK(num_keys == (uint32_t)num_keys);
{ // set up do_pruning_after_ and prune_t_begin_end_.
do_pruning_after_.resize(T_ + 1, (char)0);
// each time we prune, prune 30 frames; but shift by 20 frames each
// time so there are 10 frames of overlap.
int32_t prune_num_frames = 30,
prune_shift = 20,
T = T_;
K2_CHECK_GT(prune_num_frames, prune_shift);
// The first begin_t is negative but will be rounded up to zero to get the
// start of the range. The motivation is: we don't want to wait until we
// have processed `prune_num_frames` frames to prune for the first time,
// because that first interval of not-pruning, being larger than normal,
// would dominate the maximum memory used by intersection.
for (int32_t begin_t = prune_shift - prune_num_frames; ;
begin_t += prune_shift) {
int32_t prune_begin = std::max<int32_t>(0, begin_t),
prune_end = begin_t + prune_num_frames;
bool last = false;
if (prune_end >= T) {
prune_end = T;
last = true;
}
K2_CHECK_LT(prune_begin, prune_end);
do_pruning_after_[prune_end - 1] = (char)1;
prune_t_begin_end_.push_back({prune_begin, prune_end});
if (last)
break;
}
}
}
// The information we have for each frame of the pruned-intersection (really:
// decoding) algorithm. We keep an array of these, one for each frame, up to
// the length of the longest sequence we're decoding plus one.
struct FrameInfo {
// States that are active at the beginning of this frame. Indexed
// [fsa_idx][state_idx], where fsa_idx indexes b_fsas_ (and a_fsas_, if
// a_fsas_stride_ != 0); and state_idx just enumerates the active states
// on this frame (as state_idx01's in a_fsas_).
Ragged<StateInfo> states; // 2 axes: fsa, state
// Indexed [fsa_idx][state_idx][arc_idx].. the first 2 indexes are
// the same as those into 'states' (the first 2 levels of the structure
// are shared), and the last one enumerates the arcs leaving each of those
// states.
//
// Note: there may be indexes [fsa_idx] that have no states (because that
// FSA had fewer frames than the max), and indexes [fsa_idx][state_idx] that
// have no arcs due to pruning.
Ragged<ArcInfo> arcs; // 3 axes: fsa, state, arc
};
/* Does the main work of intersection/composition, but doesn't produce any
output; the output is provided when you call FormatOutput(). */
void Intersect() {
/*
T is the largest number of (frames+1) of neural net output, or the largest
number of frames of log-likelihoods we count the final frame with (0,
-inf, -inf..) that is used for the final-arcc. The largest number of
states in the fsas represented by b_fsas equals T+1 (e.g. 1 frame would
require 2 states, because that 1 frame is the arc from state 0 to state
1). So the #states is 2 greater than the actual number of frames in the
neural-net output.
*/
NVTX_RANGE(K2_FUNC);
int32_t num_fsas = b_fsas_.shape.Dim0(), T = T_;
std::ostringstream os;
os << "Intersect:T=" << T << ",num_fsas=" << num_fsas
<< ",TotSize(1)=" << b_fsas_.shape.TotSize(1);
NVTX_RANGE(os.str().c_str());
std::thread backward_thread(BackwardPassStatic, this);
// we'll initially populate frames_[0.. T+1], but discard the one at T+1,
// which has no arcs or states, the ones we use are from 0 to T.
frames_.reserve(T + 2);
frames_.push_back(InitialFrameInfo());
for (int32_t t = 0; t <= T; t++) {
frames_.push_back(PropagateForward(t, frames_.back().get()));
if (do_pruning_after_[t]) {
// let a phase of backward-pass pruning commence.
backward_semaphore_.Signal(c_);
// note: normally we should acquire forward_semaphore_ without having to
// wait. It avoids the backward pass getting too far behind the forward
// pass, which could mean too much memory is used.
forward_semaphore_.acquire();
}
}
// The FrameInfo for time T+1 will have no states. We did that
// last PropagateForward so that the 'arcs' member of frames_[T]
// is set up (it has no arcs but we need the shape).
frames_.pop_back();
backward_thread.join();
}
void BackwardPass() {
int32_t num_fsas = b_fsas_.shape.Dim0(),
num_work_items = max_active_ * num_fsas * T_;
ParallelRunner pr(c_);
// if num_work_items is big enough, it will actually create a new stream.
hipStream_t stream = pr.NewStream(num_work_items);
With w(stream); // This overrides whatever stream c_ contains with `stream`, if it's not
NVTX_RANGE(K2_FUNC);
for (size_t i = 0; i < prune_t_begin_end_.size(); i++) {
backward_semaphore_.Wait(c_);
int32_t prune_t_begin = prune_t_begin_end_[i].first,
prune_t_end = prune_t_begin_end_[i].second;
PruneTimeRange(prune_t_begin, prune_t_end);
forward_semaphore_.release();
}
}
static void BackwardPassStatic(MultiGraphDenseIntersectPruned *c) {
c->BackwardPass();
}
// Return FrameInfo for 1st frame, with `states` set but `arcs` not set.
std::unique_ptr<FrameInfo> InitialFrameInfo() {
NVTX_RANGE("InitialFrameInfo");
int32_t num_fsas = b_fsas_.shape.Dim0();
std::unique_ptr<FrameInfo> ans = std::make_unique<FrameInfo>();
if (a_fsas_.Dim0() == 1) {
int32_t start_states_per_seq = (a_fsas_.shape.TotSize(1) > 0), // 0 or 1
num_start_states = num_fsas * start_states_per_seq;
ans->states = Ragged<StateInfo>(
RegularRaggedShape(c_, num_fsas, start_states_per_seq),
Array1<StateInfo>(c_, num_start_states));
StateInfo *states_data = ans->states.values.Data();
K2_EVAL(
c_, num_start_states, lambda_set_states, (int32_t i)->void {
StateInfo info;
info.a_fsas_state_idx01 = 0; // start state of a_fsas_
info.forward_loglike = FloatToOrderedInt(0.0);
states_data[i] = info;
});
} else {
Ragged<int32_t> start_states = GetStartStates(a_fsas_);
ans->states =
Ragged<StateInfo>(start_states.shape,
Array1<StateInfo>(c_, start_states.NumElements()));
StateInfo *ans_states_values_data = ans->states.values.Data();
const int32_t *start_states_values_data = start_states.values.Data(),
*start_states_row_ids1_data =
start_states.shape.RowIds(1).Data();
K2_EVAL(
c_, start_states.NumElements(), lambda_set_state_info,
(int32_t states_idx01)->void {
StateInfo info;
info.a_fsas_state_idx01 = start_states_values_data[states_idx01];
info.forward_loglike = FloatToOrderedInt(0.0);
ans_states_values_data[states_idx01] = info;
});
}
return ans;
}
void FormatOutput(FsaVec *ofsa, Array1<int32_t> *arc_map_a,
Array1<int32_t> *arc_map_b) {
NVTX_RANGE("FormatOutput");
int32_t T = T_;
ContextPtr c_cpu = GetCpuContext();
Array1<ArcInfo *> arcs_data_ptrs(c_cpu, T + 1);
Array1<int32_t *> arcs_row_splits1_ptrs(c_cpu, T + 1);
for (int32_t t = 0; t <= T; t++) {
arcs_data_ptrs.Data()[t] = frames_[t]->arcs.values.Data();
arcs_row_splits1_ptrs.Data()[t] = frames_[t]->arcs.RowSplits(1).Data();
}
// transfer to GPU if we're using a GPU
arcs_data_ptrs = arcs_data_ptrs.To(c_);
ArcInfo **arcs_data_ptrs_data = arcs_data_ptrs.Data();
arcs_row_splits1_ptrs = arcs_row_splits1_ptrs.To(c_);
int32_t **arcs_row_splits1_ptrs_data = arcs_row_splits1_ptrs.Data();
const int32_t *b_fsas_row_splits1 = b_fsas_.shape.RowSplits(1).Data();
const int32_t *a_fsas_row_splits1 = a_fsas_.RowSplits(1).Data();
int32_t a_fsas_stride = a_fsas_stride_; // 0 or 1 depending if the decoding
// graph is shared.
int32_t num_fsas = b_fsas_.shape.Dim0();
RaggedShape final_arcs_shape;
{ /* This block populates `final_arcs_shape`. It is the shape of a ragged
tensor of arcs that conceptually would live at frames_[T+1]->arcs. It
contains no actual arcs, but may contain some states, that represent
"missing" final-states. The problem we are trying to solve is that
there was a start-state for an FSA but no final-state because it did
not survive pruning, and this could lead to an output FSA that is
invalid or is misinterpreted (because we are interpreting a non-final
state as a final state).
*/
Array1<int32_t> num_extra_states(c_, num_fsas + 1);
int32_t *num_extra_states_data = num_extra_states.Data();
K2_EVAL(c_, num_fsas, lambda_set_num_extra_states, (int32_t i) -> void {
int32_t final_t = b_fsas_row_splits1[i+1] - b_fsas_row_splits1[i];
int32_t *arcs_row_splits1_data = arcs_row_splits1_ptrs_data[final_t];
int32_t num_states_final_t = arcs_row_splits1_data[i + 1] -
arcs_row_splits1_data[i];
K2_CHECK_LE(num_states_final_t, 1);
// has_start_state is 1 if there is a start-state; note, we don't prune
// the start-states, so they'll be present if they were present in a_fsas_.
int32_t has_start_state = (a_fsas_row_splits1[i * a_fsas_stride] <
a_fsas_row_splits1[i * a_fsas_stride + 1]);
// num_extra_states_data[i] will be 1 if there was a start state but no final-state;
// else, 0.
num_extra_states_data[i] = has_start_state * (1 - num_states_final_t);
});
ExclusiveSum(num_extra_states, &num_extra_states);
RaggedShape top_shape = RaggedShape2(&num_extra_states, nullptr, -1),
bottom_shape = RegularRaggedShape(c_, top_shape.NumElements(), 0);
final_arcs_shape = ComposeRaggedShapes(top_shape, bottom_shape);
}
RaggedShape oshape;
// see documentation of Stack() in ragged_ops.h for explanation.
Array1<uint32_t> oshape_merge_map;
{
NVTX_RANGE("InitOshape");
// each of these have 3 axes.
std::vector<RaggedShape *> arcs_shapes(T + 2);
for (int32_t t = 0; t <= T; t++)
arcs_shapes[t] = &(frames_[t]->arcs.shape);
arcs_shapes[T + 1] = &final_arcs_shape;
// oshape is a 4-axis ragged tensor which is indexed:
// oshape[fsa_index][t][state_idx][arc_idx]
int32_t axis = 1;
oshape = Stack(axis, T + 2, arcs_shapes.data(), &oshape_merge_map);
}
int32_t *oshape_row_ids3 = oshape.RowIds(3).Data(),
*oshape_row_ids2 = oshape.RowIds(2).Data(),
*oshape_row_ids1 = oshape.RowIds(1).Data(),
*oshape_row_splits3 = oshape.RowSplits(3).Data(),
*oshape_row_splits2 = oshape.RowSplits(2).Data(),
*oshape_row_splits1 = oshape.RowSplits(1).Data();
int32_t num_arcs = oshape.NumElements();
*arc_map_a = Array1<int32_t>(c_, num_arcs);
*arc_map_b = Array1<int32_t>(c_, num_arcs);
int32_t *arc_map_a_data = arc_map_a->Data(),
*arc_map_b_data = arc_map_b->Data();
Array1<Arc> arcs_out(c_, num_arcs);
Arc *arcs_out_data = arcs_out.Data();
const Arc *a_fsas_arcs = a_fsas_.values.Data();
int32_t b_fsas_num_cols = b_fsas_.scores.Dim1();
const int32_t *b_fsas_row_ids1 = b_fsas_.shape.RowIds(1).Data();
const uint32_t *oshape_merge_map_data = oshape_merge_map.Data();
K2_EVAL(
c_, num_arcs, lambda_format_arc_data,
(int32_t oarc_idx0123)->void { // by 'oarc' we mean arc with shape `oshape`.
int32_t oarc_idx012 = oshape_row_ids3[oarc_idx0123],
oarc_idx01 = oshape_row_ids2[oarc_idx012],
oarc_idx0 = oshape_row_ids1[oarc_idx01],
oarc_idx0x = oshape_row_splits1[oarc_idx0],
oarc_idx0xx = oshape_row_splits2[oarc_idx0x],
oarc_idx1 = oarc_idx01 - oarc_idx0x,
oarc_idx01x_next = oshape_row_splits2[oarc_idx01 + 1];
int32_t m = oshape_merge_map_data[oarc_idx0123],
t = m % (T + 2), // actually we won't get t == T or t == T + 1
// here since those frames have no arcs.
arcs_idx012 = m / (T + 2); // arc_idx012 into FrameInfo::arcs on time t,
// index of the arc on that frame.
K2_CHECK_EQ(t, oarc_idx1);
const ArcInfo *arcs_data = arcs_data_ptrs_data[t];
ArcInfo arc_info = arcs_data[arcs_idx012];
Arc arc;
arc.src_state = oarc_idx012 - oarc_idx0xx;
// Note: the idx1 w.r.t. the frame's `arcs` is an idx2 w.r.t. `oshape`.
int32_t dest_state_idx012 = oarc_idx01x_next +
arc_info.u.dest_info_state_idx1;
arc.dest_state = dest_state_idx012 - oarc_idx0xx;
arc.label = a_fsas_arcs[arc_info.a_fsas_arc_idx012].label;
int32_t fsa_id = oarc_idx0,
b_fsas_idx0x = b_fsas_row_splits1[fsa_id],
b_fsas_idx01 = b_fsas_idx0x + t,
b_fsas_idx2 = (arc.label + 1),
b_fsas_arc_idx012 = b_fsas_idx01 * b_fsas_num_cols + b_fsas_idx2;
arc.score = arc_info.arc_loglike;
arc_map_a_data[oarc_idx0123] = arc_info.a_fsas_arc_idx012;
arc_map_b_data[oarc_idx0123] = b_fsas_arc_idx012;
arcs_out_data[oarc_idx0123] = arc;
});
// Remove axis 1, which corresponds to time.
*ofsa = FsaVec(RemoveAxis(oshape, 1), arcs_out);
}
/*
Computes pruning cutoffs for this frame: these are the cutoffs for the arc
"forward score", one per FSA. This is a dynamic process involving
dynamic_beams_ which are updated on each frame (they start off at
search_beam_).
@param [in] arc_end_scores The "forward log-probs" (scores) at the
end of each arc, i.e. its contribution to the following
state. Is a tensor indexed [fsa_id][state][arc]; we
will get rid of the [state] dim, combining it with the
[arc] dim, so it's just [fsa_id][arc]
It is conceptually unchanged by this operation but non-const
because row-ids of its shape may need to be generated.
@return Returns a vector of log-likelihood cutoffs, one per FSA (the
cutoff will be -infinity for FSAs that don't have any active
states). The cutoffs will be of the form: the best score
for any arc, minus the dynamic beam. See the code for how
the dynamic beam is adjusted; it will approach
'search_beam_' as long as the number of active states in
each FSA is between min_active and max_active.
*/
Array1<float> GetPruningCutoffs(Ragged<float> &arc_end_scores) {
NVTX_RANGE(K2_FUNC);
int32_t num_fsas = arc_end_scores.shape.Dim0();
// get the maximum score from each sub-list (i.e. each FSA, on this frame).
// Note: can probably do this with a cub Reduce operation using an operator
// that has side effects (that notices when it's operating across a
// boundary).
// the max will be -infinity for any FSA-id that doesn't have any active
// states (e.g. because that stream has finished).
// Casting to ragged2 just considers the top 2 indexes, ignoring the 3rd.
// i.e. it's indexed by [fsa_id][state].
Ragged<float> end_scores_per_fsa = arc_end_scores.RemoveAxis(1);
Array1<float> max_per_fsa(c_, end_scores_per_fsa.Dim0());
MaxPerSublist(end_scores_per_fsa, -std::numeric_limits<float>::infinity(),
&max_per_fsa);
const int32_t *arc_end_scores_row_splits1_data =
arc_end_scores.RowSplits(1).Data();
const float *max_per_fsa_data = max_per_fsa.Data();
float *dynamic_beams_data = dynamic_beams_.Data();
float default_beam = search_beam_, max_active = max_active_,
min_active = min_active_;
K2_CHECK_LT(min_active, max_active);
Array1<float> cutoffs(c_, num_fsas);
float *cutoffs_data = cutoffs.Data();
K2_EVAL(
c_, num_fsas, lambda_set_beam_and_cutoffs, (int32_t i)->void {
float best_loglike = max_per_fsa_data[i],
dynamic_beam = dynamic_beams_data[i];
int32_t active_states = arc_end_scores_row_splits1_data[i + 1] -
arc_end_scores_row_splits1_data[i];
if (active_states <= max_active) {
// Not constrained by max_active...
if (active_states >= min_active || active_states == 0) {
// Neither the max_active nor min_active constraints
// apply. Gradually approach 'beam'
// (Also approach 'beam' if active_states == 0; we might as
// well, since there is nothing to prune here).
dynamic_beam = 0.8 * dynamic_beam + 0.2 * default_beam;
} else {
// We violated the min_active constraint -> increase beam
if (dynamic_beam < default_beam) dynamic_beam = default_beam;
// gradually make the beam larger as long
// as we are below min_active
dynamic_beam *= 1.25;
}
} else {
// We violated the max_active constraint -> decrease beam
if (dynamic_beam > default_beam) dynamic_beam = default_beam;
// Decrease the beam as long as we have more than
// max_active active states.
dynamic_beam *= 0.8;
}
dynamic_beams_data[i] = dynamic_beam;
cutoffs_data[i] = best_loglike - dynamic_beam;
});
return cutoffs;
}
/*
Returns list of arcs on this frame, consisting of all arcs leaving
the states active on 'cur_frame'.
@param [in] t The time-index (on which to look up log-likes),
t >= 0
@param [in] cur_frame The FrameInfo for the current frame; only its
'states' member is expected to be set up on entry.
*/
Ragged<ArcInfo> GetArcs(int32_t t, FrameInfo *cur_frame) {
NVTX_RANGE(K2_FUNC);
Ragged<StateInfo> &states = cur_frame->states;
const StateInfo *state_values = states.values.Data();
// in a_fsas_ (the decoding graphs), maps from state_idx01 to arc_idx01x.
const int32_t *fsa_arc_splits = a_fsas_.shape.RowSplits(2).Data();
int32_t num_states = states.values.Dim();
Array1<int32_t> num_arcs(c_, num_states + 1);
int32_t *num_arcs_data = num_arcs.Data();
// `num_arcs` gives the num-arcs for each state in `states`.
K2_EVAL(
c_, num_states, num_arcs_lambda, (int32_t state_idx01)->void {
int32_t a_fsas_state_idx01 =
state_values[state_idx01].a_fsas_state_idx01,
a_fsas_arc_idx01x = fsa_arc_splits[a_fsas_state_idx01],
a_fsas_arc_idx01x_next =
fsa_arc_splits[a_fsas_state_idx01 + 1],
a_fsas_num_arcs = a_fsas_arc_idx01x_next - a_fsas_arc_idx01x;
num_arcs_data[state_idx01] = a_fsas_num_arcs;
});
ExclusiveSum(num_arcs, &num_arcs);
// initialize shape of array that will hold arcs leaving the active states.
// Its shape is [fsa_index][state][arc]; the top two levels are shared with
// `states`. 'ai' means ArcInfo.
RaggedShape ai_shape =
ComposeRaggedShapes(states.shape, RaggedShape2(&num_arcs, nullptr, -1));
// from state_idx01 (into `states` or `ai_shape`) -> fsa_idx0
const int32_t *ai_row_ids1 = ai_shape.RowIds(1).Data();
// from arc_idx012 (into `ai_shape`) to state_idx01
const int32_t *ai_row_ids2 = ai_shape.RowIds(2).Data();
// from state_idx01 to arc_idx01x
const int32_t *ai_row_splits2 = ai_shape.RowSplits(2).Data();
// from state_idx01 (into a_fsas_) to arc_idx01x (into a_fsas_)
const int32_t *a_fsas_row_splits2 = a_fsas_.shape.RowSplits(2).Data();
const Arc *arcs = a_fsas_.values.Data();
// fsa_idx0 to ind0x (into b_fsas_), which gives the 1st row for this
// sequence.
const int32_t *b_fsas_row_ids1 = b_fsas_.shape.RowIds(1).Data();
const int32_t *b_fsas_row_splits1 = b_fsas_.shape.RowSplits(1).Data();
const float *score_data = b_fsas_.scores.Data();
int32_t scores_num_cols = b_fsas_.scores.Dim1();
auto scores_acc = b_fsas_.scores.Accessor();
Ragged<ArcInfo> ai(ai_shape);
ArcInfo *ai_data = ai.values.Data(); // uninitialized
K2_EVAL(
c_, ai.values.Dim(), ai_lambda, (int32_t ai_arc_idx012)->void {
int32_t ai_state_idx01 = ai_row_ids2[ai_arc_idx012],
ai_fsa_idx0 = ai_row_ids1[ai_state_idx01],
ai_arc_idx01x = ai_row_splits2[ai_state_idx01],
ai_arc_idx2 = ai_arc_idx012 - ai_arc_idx01x;
StateInfo sinfo = state_values[ai_state_idx01];
int32_t a_fsas_arc_idx01x =
a_fsas_row_splits2[sinfo.a_fsas_state_idx01],
a_fsas_arc_idx012 = a_fsas_arc_idx01x + ai_arc_idx2;
Arc arc = arcs[a_fsas_arc_idx012];
int32_t scores_idx0x = b_fsas_row_splits1[ai_fsa_idx0],
scores_idx01 = scores_idx0x + t, // t == ind1 into 'scores'
scores_idx2 =
arc.label + 1; // the +1 is so that -1 can be handled
K2_DCHECK_LT(static_cast<uint32_t>(scores_idx2),
static_cast<uint32_t>(scores_num_cols));
float acoustic_score = scores_acc(scores_idx01, scores_idx2);
ArcInfo ai;
ai.a_fsas_arc_idx012 = a_fsas_arc_idx012;
ai.arc_loglike = acoustic_score + arc.score;
ai.end_loglike =
OrderedIntToFloat(sinfo.forward_loglike) + ai.arc_loglike;
// at least currently, the ArcInfo object's src_state and dest_state
// are idx1's not idx01's, i.e. they don't contain the FSA-index,
// where as the ai element is an idx01, so we need to do this to
// convert to an idx01; this relies on the fact that
// sinfo.abs_state_id == arc.src_state
// + a_fsas_fsa_idx0x.
ai.u.dest_a_fsas_state_idx01 =
sinfo.a_fsas_state_idx01 + arc.dest_state - arc.src_state;
ai_data[ai_arc_idx012] = ai;
});
return ai;
}
// Later we may choose to support b_fsas_.Dim0() == 1 and a_fsas_.Dim0() > 1,
// and we'll have to change various bits of code for that to work.
inline int32_t NumFsas() { return b_fsas_.shape.Dim0(); }
/*
Does the forward-propagation (basically: the decoding step) and
returns a newly allocated FrameInfo* object for the next frame.
@param [in] t Time-step that we are processing arcs leaving from;
will be called with t=0, t=1, ...
@param [in] cur_frame FrameInfo object for the states corresponding to
time t; will have its 'states' member set up but not its
'arcs' member (this function will create that).
@return Returns FrameInfo object corresponding to time t+1; will have its
'states' member set up but not its 'arcs' member.
*/
std::unique_ptr<FrameInfo> PropagateForward(int32_t t, FrameInfo *cur_frame) {
NVTX_RANGE("PropagateForward");
int32_t num_fsas = NumFsas();
// Ragged<StateInfo> &states = cur_frame->states;
// arc_info has 3 axes: fsa_id, state, arc.
cur_frame->arcs = GetArcs(t, cur_frame);
Ragged<ArcInfo> &arc_info = cur_frame->arcs;
ArcInfo *ai_data = arc_info.values.Data();
Array1<float> ai_data_array1(c_, cur_frame->arcs.values.Dim());
float *ai_data_array1_data = ai_data_array1.Data();
K2_EVAL(
c_, ai_data_array1.Dim(), lambda_set_ai_data,
(int32_t i)->void { ai_data_array1_data[i] = ai_data[i].end_loglike; });
Ragged<float> ai_loglikes(arc_info.shape, ai_data_array1);
// `cutoffs` is of dimension num_fsas.
Array1<float> cutoffs = GetPruningCutoffs(ai_loglikes);
float *cutoffs_data = cutoffs.Data();
// write certain indexes ( into ai.values) to state_map_.Data(). Keeps
// track of the active states and will allow us to assign a numbering to
// them.
int32_t *ai_row_ids1 = arc_info.shape.RowIds(1).Data(),
*ai_row_ids2 = arc_info.shape.RowIds(2).Data();
auto state_map_acc = state_map_.GetAccessor();
int32_t state_map_fsa_stride = state_map_fsa_stride_;
// renumber_states will be a renumbering that dictates which of the arcs in
// 'ai' correspond to unique states. Only one arc for each dest-state is
// kept (it doesn't matter which one).
Renumbering renumber_states(c_, arc_info.NumElements());
char *keep_this_state_data = renumber_states.Keep().Data();
{
NVTX_RANGE("LambdaSetStateMap");
K2_EVAL(
c_, arc_info.NumElements(), lambda_set_state_map,
(int32_t arc_idx012)->void {
int32_t fsa_id = ai_row_ids1[ai_row_ids2[arc_idx012]];
int32_t dest_state_idx01 =
ai_data[arc_idx012].u.dest_a_fsas_state_idx01;
float end_loglike = ai_data[arc_idx012].end_loglike,
cutoff = cutoffs_data[fsa_id];
char keep_this_state = 0; // only one arc entering any state will
// have its 'keep_this_state_data' entry
// set to 1.
if (end_loglike > cutoff) {
int32_t state_map_idx = dest_state_idx01 +
fsa_id * state_map_fsa_stride;
if (state_map_acc.Insert(state_map_idx, arc_idx012))
keep_this_state = 1;
}
keep_this_state_data[arc_idx012] = keep_this_state;
});
}
int32_t num_states = renumber_states.NumNewElems();
// state_reorder_data maps from (state_idx01 on next frame) to (the
// arc_idx012 on this frame which is the source arc which we arbitrarily
// choose as being "responsible" for the creation of that state).
int32_t *state_reorder_data = renumber_states.Old2New().Data();
// state_to_fsa_id maps from an index into the next frame's
// FrameInfo::states.values() vector to the sequence-id (fsa_id) associated
// with it. It should be non-decreasing.
Array1<int32_t> state_to_fsa_id(c_, num_states);
{ // This block sets 'state_to_fsa_id'.
NVTX_RANGE("LambdaSetStateToFsaId");
int32_t *state_to_fsa_id_data = state_to_fsa_id.Data();
K2_EVAL(
c_, arc_info.NumElements(), lambda_state_to_fsa_id,
(int32_t arc_idx012)->void {
int32_t fsa_id = ai_row_ids1[ai_row_ids2[arc_idx012]],
this_state_j = state_reorder_data[arc_idx012],
next_state_j = state_reorder_data[arc_idx012 + 1];
if (next_state_j > this_state_j) {
state_to_fsa_id_data[this_state_j] = fsa_id;
}
});
K2_DCHECK(IsMonotonic(state_to_fsa_id));
}
std::unique_ptr<FrameInfo> ans = std::make_unique<FrameInfo>();
Array1<int32_t> states_row_splits1(c_, num_fsas + 1);
RowIdsToRowSplits(state_to_fsa_id, &states_row_splits1);
ans->states = Ragged<StateInfo>(
RaggedShape2(&states_row_splits1, &state_to_fsa_id, num_states),
Array1<StateInfo>(c_, num_states));
StateInfo *ans_states_data = ans->states.values.Data();
const int32_t minus_inf_int =
FloatToOrderedInt(-std::numeric_limits<float>::infinity());
K2_EVAL(
c_, num_states, lambda_init_loglike, (int32_t i)->void {
ans_states_data[i].forward_loglike = minus_inf_int;
});
{
NVTX_RANGE("LambdaModifyStateMap");
// Modify the elements of `state_map` to refer to the indexes into
// `ans->states` / `kept_states_data`, rather than the indexes into
// ai_data. This will decrease some of the values in `state_map`, in
// general.
K2_EVAL(
c_, arc_info.NumElements(), lambda_modify_state_map,
(int32_t arc_idx012)->void {
int32_t fsa_id = ai_row_ids1[ai_row_ids2[arc_idx012]];
int32_t dest_state_idx01 =
ai_data[arc_idx012].u.dest_a_fsas_state_idx01;
int32_t this_j = state_reorder_data[arc_idx012],
next_j = state_reorder_data[arc_idx012 + 1];
if (next_j > this_j) {
int32_t state_map_idx = dest_state_idx01 +
fsa_id * state_map_fsa_stride;
int32_t value, *value_addr;
bool ans = state_map_acc.Find(state_map_idx,
&value, &value_addr);
K2_CHECK(ans);
K2_CHECK_EQ(value, arc_idx012);
// Note: this_j is an idx01 into ans->states. previously it
// contained an arc_idx012 (of the entering arc that won the
// race).
*value_addr = this_j;
}
});
}
// We'll set up the data of the kept states below...
StateInfo *kept_states_data = ans->states.values.Data();
{
int32_t *ans_states_row_splits1_data = ans->states.RowSplits(1).Data();
NVTX_RANGE("LambdaSetStates");
K2_EVAL(
c_, arc_info.NumElements(), lambda_set_arcs_and_states,
(int32_t arc_idx012)->void {
int32_t fsa_id = ai_row_ids1[ai_row_ids2[arc_idx012]];
ArcInfo &info = ai_data[arc_idx012];
int32_t dest_a_fsas_state_idx01 = info.u.dest_a_fsas_state_idx01;
int32_t state_map_idx = dest_a_fsas_state_idx01 +
fsa_id * state_map_fsa_stride;
int32_t state_idx01;
if (!state_map_acc.Find(state_map_idx, &state_idx01))
state_idx01 = -1; // The destination state did not survive
// pruning.
int32_t state_idx1;
if (state_idx01 >= 0) {
int32_t state_idx0x = ans_states_row_splits1_data[fsa_id];
state_idx1 = state_idx01 - state_idx0x;
} else {
state_idx1 = -1; // Meaning: invalid.
}
// state_idx1 is the idx1 into ans->states, of the destination
// state.
info.u.dest_info_state_idx1 = state_idx1;
if (state_idx1 < 0)
return;
// multiple threads may write the same value to the address written
// to in the next line.
kept_states_data[state_idx01].a_fsas_state_idx01 =
dest_a_fsas_state_idx01;
int32_t end_loglike_int = FloatToOrderedInt(info.end_loglike);
// Set the forward log-like of the dest state to the largest of any
// of those of the incoming arcs. Note: we initialized this in
// lambda_init_loglike above.
AtomicMax(&(kept_states_data[state_idx01].forward_loglike),
end_loglike_int);
});
}
{
NVTX_RANGE("LambdaResetStateMap");
const int32_t *next_states_row_ids1 = ans->states.shape.RowIds(1).Data();
K2_EVAL(
c_, ans->states.NumElements(), lambda_reset_state_map,
(int32_t state_idx01)->void {
int32_t a_fsas_state_idx01 =
kept_states_data[state_idx01].a_fsas_state_idx01,
fsa_idx0 = next_states_row_ids1[state_idx01];
int32_t state_map_idx = a_fsas_state_idx01 +
fsa_idx0 * state_map_fsa_stride;
state_map_acc.Delete(state_map_idx);
});
}
return ans;
}
/*
Sets backward_loglike fields of StateInfo to the negative of the forward
prob if (this is the final-state or !only_final_probs), else -infinity.
This is used in computing the backward loglikes/scores for purposes of
pruning. This may be done after we're finished decoding/intersecting,
or while we are still decoding.
Note: something similar to this (setting backward-prob == forward-prob) is
also done in PropagateBackward() when we detect final-states. That's needed
because not all sequences have the same length, so some may have reached
their final state earlier. (Note: we only get to the final-state of a_fsas_
if we've reached the final frame of the input, because for non-final frames
we always have -infinity as the log-prob corresponding to the symbol -1.)
While we are still decoding, a background process will do pruning
concurrently with the forward computation, for purposes of reducing memory
usage (and so that most of the pruning can be made concurrent with the
forward computation). In this case we want to avoid pruning away anything
that wouldn't have been pruned away if we were to have waited to the end;
and it turns out that setting the backward probs to the negative of the
forward probs (i.e. for all states, not just final states) accomplishes
this. The issue was mentioned in the "Exact Lattice Generation.." paper and
also in the code for Kaldi's lattice-faster-decoder; search for "As in [3],
to save memory..."
@param [in] cur_frame Frame on which to set the backward probs
*/
void SetBackwardProbsFinal(FrameInfo *cur_frame) {
NVTX_RANGE("SetBackwardProbsFinal");
Ragged<StateInfo> &cur_states = cur_frame->states; // 2 axes: fsa,state
int32_t num_states = cur_states.values.Dim();
if (num_states == 0)
return;
StateInfo *cur_states_data = cur_states.values.Data();
const int32_t *a_fsas_row_ids1_data = a_fsas_.shape.RowIds(1).Data(),
*a_fsas_row_splits1_data = a_fsas_.shape.RowSplits(1).Data(),
*cur_states_row_ids1_data = cur_states.RowIds(1).Data();
double minus_inf = -std::numeric_limits<double>::infinity();
K2_EVAL(c_, num_states, lambda_set_backward_prob, (int32_t state_idx01) -> void {
StateInfo *info = cur_states_data + state_idx01;
double backward_loglike,
forward_loglike = OrderedIntToFloat(info->forward_loglike);
if (forward_loglike - forward_loglike == 0) { // not -infinity...
// canonically we'd set this to zero, but setting it to the forward
// loglike when this is the final-state (in a_fsas_) has the effect of
// making the (forward+backward) probs equivalent to the logprob minus
// the best-path log-prob, which is convenient for pruning. If this
// is not actually the last frame of this sequence, which can happen
// if this was called before the forward decoding process was
// finished, what we are doing is a form of pruning that is guaranteed
// not to prune anything out that would not have been pruned out if we
// had waited until the real end of the file to do the pruning.
backward_loglike = -forward_loglike;
} else {
backward_loglike = minus_inf;
}
info->backward_loglike = backward_loglike;
});
}
/*
Does backward propagation of log-likes, which means setting the
backward_loglike field of the StateInfo variable (for cur_frame);
and works out which arcs and which states are to be pruned
on cur_frame; this information is output to Array1<char>'s which
are supplied by the caller.
These backward log-likes are normalized in such a way that you can add them
with the forward log-likes to produce the log-likelihood ratio vs the best
path (this will be non-positive). (To do this, for the final state we have
to set the backward log-like to the negative of the forward log-like; see
SetBackwardProbsFinal()).
This function also prunes arc-indexes on `cur_frame` and state-indexes
on `next_frame`.
@param [in] t The time-index (on which to look up log-likes);
equals time index of `cur_frame`; t >= 0
@param [in] cur_frame The FrameInfo for the frame on which we want to
set the forward log-like, and output pruning info
for arcs and states
@param [in] next_frame The next frame's FrameInfo, on which to look
up log-likes for the next frame; the
`backward_loglike` values of states on `next_frame`
are assumed to already be set, either by
SetBackwardProbsFinal() or a previous call to
PropagateBackward().
@param [out] cur_frame_states_keep An array, created by the caller,
to which we'll write 1s for elements of cur_frame->states
which we need to keep, and 0s for others.
@param [out] cur_frame_arcs_keep An array, created by the caller,
to which we'll write 1s for elements of cur_frame->arcs
which we need to keep (because they survived pruning),
and 0s for others.
*/
void PropagateBackward(int32_t t,
FrameInfo *cur_frame,
FrameInfo *next_frame,
Array1<char> *cur_frame_states_keep,
Array1<char> *cur_frame_arcs_keep) {
NVTX_RANGE("PropagateBackward");
int32_t num_states = cur_frame->states.NumElements(),
num_arcs = cur_frame->arcs.NumElements();
K2_CHECK_EQ(num_states, cur_frame_states_keep->Dim());
K2_CHECK_EQ(num_arcs, cur_frame_arcs_keep->Dim());
int32_t *a_fsas_row_ids1_data = a_fsas_.shape.RowIds(1).Data(),
*a_fsas_row_splits1_data = a_fsas_.shape.RowSplits(1).Data();
float minus_inf = -std::numeric_limits<float>::infinity();
Ragged<float> arc_backward_prob(cur_frame->arcs.shape,
Array1<float>(c_, cur_frame->arcs.NumElements()));
float *arc_backward_prob_data = arc_backward_prob.values.Data();
ArcInfo *ai_data = cur_frame->arcs.values.Data();
int32_t *arcs_rowids1 = cur_frame->arcs.shape.RowIds(1).Data(),
*arcs_rowids2 = cur_frame->arcs.shape.RowIds(2).Data(),
*arcs_row_splits1 = cur_frame->arcs.shape.RowSplits(1).Data(),
*arcs_row_splits2 = cur_frame->arcs.shape.RowSplits(2).Data();
float output_beam = output_beam_;
// compute arc backward probs, and set elements of 'keep_cur_arcs_data'
int32_t next_num_states = next_frame->states.TotSize(1);
char *keep_cur_arcs_data = cur_frame_arcs_keep->Data(),
*keep_cur_states_data = cur_frame_states_keep->Data();
const int32_t *next_states_row_splits1_data =
next_frame->states.RowSplits(1).Data();
StateInfo *next_states_data = next_frame->states.values.Data();
StateInfo *cur_states_data = cur_frame->states.values.Data();
K2_EVAL(c_, num_arcs, lambda_set_arc_backward_prob_and_keep,
(int32_t arcs_idx012) -> void {
ArcInfo *arc = ai_data + arcs_idx012;
int32_t state_idx01 = arcs_rowids2[arcs_idx012],
seq_idx0 = arcs_rowids1[state_idx01], // 'seq' == fsa-idx in b
next_states_idx0x = next_states_row_splits1_data[seq_idx0];
// Note: if dest_state_idx1 == -1, dest_state_idx01 has a meaningless
// value below, but it's never referenced.
int32_t dest_state_idx1 = arc->u.dest_info_state_idx1,
dest_state_idx01 = next_states_idx0x + dest_state_idx1;
float backward_loglike = minus_inf;
char keep_this_arc = 0;
if (dest_state_idx1 == -1) {
// dest_state_idx1 == -1 means this arc was already pruned in
// the forward pass.. do nothing.
} else {
float arc_loglike = arc->arc_loglike;
float dest_state_backward_loglike =
next_states_data[dest_state_idx01].backward_loglike;
// 'backward_loglike' is the loglike at the beginning of the arc
backward_loglike = arc_loglike + dest_state_backward_loglike;
float src_state_forward_loglike = OrderedIntToFloat(
cur_states_data[arcs_rowids2[arcs_idx012]].forward_loglike);
// should be <= 0.0, mathematically.
K2_CHECK_LT(backward_loglike, -src_state_forward_loglike + 2.0);
if (backward_loglike + src_state_forward_loglike >= -output_beam) {
keep_this_arc = 1;
} else {
backward_loglike = minus_inf; // Don't let arcs outside beam
// contribute to their start-states's
// backward prob (we'll use that to
// prune the start-states away.)
}
}
keep_cur_arcs_data[arcs_idx012] = keep_this_arc;
arc_backward_prob_data[arcs_idx012] = backward_loglike;
});
/* note, the elements of state_backward_prob that don't have arcs leaving
them will be set to the supplied default. */
Array1<float> state_backward_prob(c_, num_states);
MaxPerSublist(arc_backward_prob, minus_inf, &state_backward_prob);
const float *state_backward_prob_data = state_backward_prob.Data();
const int32_t *cur_states_row_ids1 =
cur_frame->states.shape.RowIds(1).Data();
int32_t num_fsas = NumFsas();
K2_DCHECK_EQ(cur_frame->states.shape.Dim0(), num_fsas);
K2_EVAL(
c_, cur_frame->states.NumElements(), lambda_set_state_backward_prob,
(int32_t state_idx01)->void {
StateInfo *info = cur_states_data + state_idx01;
int32_t fsas_state_idx01 = info->a_fsas_state_idx01,
a_fsas_idx0 = a_fsas_row_ids1_data[fsas_state_idx01],
fsas_state_idx0x_next = a_fsas_row_splits1_data[a_fsas_idx0 + 1];
float forward_loglike = OrderedIntToFloat(info->forward_loglike),
backward_loglike;
// `is_final_state` means this is the final-state in a_fsas. this
// implies it's final in b_fsas too, since they both would have seen
// symbols -1.
int32_t is_final_state =
(fsas_state_idx01 + 1 >= fsas_state_idx0x_next);
if (is_final_state) {
// Note: there is only one final-state.
backward_loglike = -forward_loglike;
} else {
backward_loglike = state_backward_prob_data[state_idx01];
}
info->backward_loglike = backward_loglike;
keep_cur_states_data[state_idx01] = (backward_loglike != minus_inf);
});
}
/*
This function does backward propagation and pruning of arcs and states for a
specific time range.
@param [in] begin_t Lowest `t` value to call PropagateBackward() for
and to prune its arcs and states. Require t >= 0.
@param [in] end_t One-past-the-highest `t` value to call PropagateBackward()
and to prune its arcs and states. Require that
`frames_[t+1]` already be set up; this requires at least
end_t <= T.
Arcs on frames t >= end_t and states on frame t > end_t are ignored; the backward
probs on time end_t are set by SetBackwardProbsFinal(), see its documentation
to understand what this does if we haven't yet reached the end of one of the
sequences.
After this function is done, the arcs for `frames_[t]` with begin_t <= t < end_t and
the states for `frames_[t]` with begin_t < t < end_t will have their numbering changed.
(We don't renumber the states on begin_t because that would require the dest-states
of the arcs on time `begin_t - 1` to be modified). TODO: check this...
*/
void PruneTimeRange(int32_t begin_t,
int32_t end_t) {
SetBackwardProbsFinal(frames_[end_t].get());
ContextPtr cpu = GetCpuContext();
int32_t num_fsas = b_fsas_.shape.Dim0(),
num_t = end_t - begin_t;
Array1<int32_t> old_states_offsets(cpu, num_t + 1),
old_arcs_offsets(cpu, num_t + 1);
int32_t tot_states = 0, tot_arcs = 0;
{
int32_t *old_states_offsets_data = old_states_offsets.Data(),
*old_arcs_offsets_data = old_arcs_offsets.Data();
for (int32_t i = 0; i <= num_t; i++) {
int32_t t = begin_t + i;
old_states_offsets_data[i] = tot_states;
old_arcs_offsets_data[i] = tot_arcs;
if (i < num_t) {
tot_states += frames_[t]->arcs.TotSize(1);
tot_arcs += frames_[t]->arcs.TotSize(2);
}
}
}
// contains respectively: row_splits1_ptrs, row_ids1_ptrs,
// row_splits1_ptrs, row_splits2_ptrs,
// old_arcs_ptrs (really type ArcInfo*),
// old_states_ptrs (really type StateInfo*).
Array1<void*> old_all_ptrs(cpu, num_t * 6);
Renumbering renumber_states(c_, tot_states),
renumber_arcs(c_, tot_arcs);
{
void **all_p = old_all_ptrs.Data();
int32_t **old_row_splits1_ptrs_data = (int32_t**)all_p,
**old_row_ids1_ptrs_data = (int32_t**)all_p + num_t,
**old_row_splits2_ptrs_data = (int32_t**)all_p + 2 * num_t,
**old_row_ids2_ptrs_data = (int32_t**)all_p + 3 * num_t;
StateInfo **old_states_ptrs_data = (StateInfo**)all_p + 4 * num_t;
ArcInfo **old_arcs_ptrs_data = (ArcInfo**)all_p + 5 * num_t;
int32_t *old_states_offsets_data = old_states_offsets.Data(),
*old_arcs_offsets_data = old_arcs_offsets.Data();
for (int32_t t = end_t - 1; t >= begin_t; --t) {
int32_t i = t - begin_t;
Array1<char> this_states_keep =
renumber_states.Keep().Arange(old_states_offsets_data[i],
old_states_offsets_data[i + 1]),
this_arcs_keep =
renumber_arcs.Keep().Arange(old_arcs_offsets_data[i],
old_arcs_offsets_data[i + 1]);
FrameInfo *cur_frame = frames_[t].get();
PropagateBackward(t, cur_frame, frames_[t+1].get(),
&this_states_keep, &this_arcs_keep);
old_row_splits1_ptrs_data[i] = cur_frame->arcs.RowSplits(1).Data();
old_row_ids1_ptrs_data[i] = cur_frame->arcs.RowIds(1).Data();
old_row_splits2_ptrs_data[i] = cur_frame->arcs.RowSplits(2).Data();
old_row_ids2_ptrs_data[i] = cur_frame->arcs.RowIds(2).Data();
old_arcs_ptrs_data[i] = cur_frame->arcs.values.Data();
old_states_ptrs_data[i] = cur_frame->states.values.Data();
// We can't discard any states on t == begin_t because: if it is not t ==
// 0, it would be inconvenient to map the dest-states of arcs on t - 1;
// and if it is t == 0, this may remove the start-state, which would make
// it more complex to avoid invalid FSAs (e.g. with an end-state but no
// start-state, or in which we incorrectly interpret a non-start state as
// the start state).
if (i == 0) // t == begin_t
this_states_keep = (char)1; // set all elements of the array
// `states_keep` to 1.
}
}
old_states_offsets = old_states_offsets.To(c_);
old_arcs_offsets = old_arcs_offsets.To(c_);
Array1<int32_t> new_states_offsets = renumber_states.Old2New(true)[old_states_offsets],
new_arcs_offsets = renumber_arcs.Old2New(true)[old_arcs_offsets];
int32_t new_num_states = renumber_states.NumNewElems(),
new_num_arcs = renumber_arcs.NumNewElems();
// These arrays map to the (t - begin_t) corresponding to this state or arc
// in the new numbering, i.e. the frame index minus begin_t.
Array1<int32_t> new_state_to_frame(c_, new_num_states),
new_arc_to_frame(c_, new_num_arcs);
RowSplitsToRowIds(new_states_offsets, &new_state_to_frame);
RowSplitsToRowIds(new_arcs_offsets, &new_arc_to_frame);
const int32_t *old_states_offsets_data = old_states_offsets.Data(),
*new_states_offsets_data = new_states_offsets.Data(),
*old_arcs_offsets_data = old_arcs_offsets.Data(),
*new_arcs_offsets_data = new_arcs_offsets.Data(),
*new_state_to_frame_data = new_state_to_frame.Data(),
*new_arc_to_frame_data = new_arc_to_frame.Data(),
*states_old2new_data = renumber_states.Old2New().Data(),
*states_new2old_data = renumber_states.New2Old().Data(),
*arcs_old2new_data = renumber_arcs.Old2New().Data(),
*arcs_new2old_data = renumber_arcs.New2Old().Data();
// Allocate the new row_splits and row_ids vectors for the shapes on the
// individual frames, and the new arc-info and state-info.
Array2<int32_t> all_row_splits1(c_, num_t, num_fsas + 1);
auto all_row_splits1_acc = all_row_splits1.Accessor();
Array1<int32_t> all_row_ids1(c_, new_num_states);
// the "+ num_t" below is for the extra element of each row_splits array.
Array1<int32_t> all_row_splits2(c_, new_num_states + num_t);
Array1<int32_t> all_row_ids2(c_, new_num_arcs);
Array1<StateInfo> all_states(c_, new_num_states);
Array1<ArcInfo> all_arcs(c_, new_num_arcs);
int32_t *all_row_ids1_data = all_row_ids1.Data(),
*all_row_ids2_data = all_row_ids2.Data(),
*all_row_splits2_data = all_row_splits2.Data();
StateInfo *all_states_data = all_states.Data();
ArcInfo *all_arcs_data = all_arcs.Data();
old_all_ptrs = old_all_ptrs.To(c_);
void **all_p = old_all_ptrs.Data();
K2_EVAL2(c_, num_t, num_fsas + 1,
lambda_set_new_row_splits1, (int32_t t_offset,
int32_t seq_idx) -> void {
// note, t_offset is t - t_start.
int32_t *old_row_splits1 = (int32_t*) all_p[t_offset];
int32_t old_idx0x = old_row_splits1[seq_idx];
// "pos" means position in appended states vector
// old_start_pos means start for this `t`.
int32_t old_start_pos = old_states_offsets_data[t_offset],
old_pos = old_start_pos + old_idx0x,
new_start_pos = states_old2new_data[old_start_pos],
new_pos = states_old2new_data[old_pos],
new_idx0x = new_pos - new_start_pos;
all_row_splits1_acc(t_offset, seq_idx) = new_idx0x;
// TODO: set elem zero of row-splits?
if (seq_idx == 0) {
// We assign the `seq_idx == 0` version of the kernel to set the initial
// zero in each row_splits vector.
all_row_splits2_data[new_pos + t_offset] = 0;
}
});
K2_EVAL(c_, new_num_states, lambda_per_state, (int32_t new_i) -> void {
// new_i is position in appended vector of all states.
int32_t t_offset = new_state_to_frame_data[new_i],
old_state_start_pos = old_states_offsets_data[t_offset],
new_arc_start_pos = new_arcs_offsets_data[t_offset],
old_arc_start_pos = old_arcs_offsets_data[t_offset],
old_i = states_new2old_data[new_i],
old_state_idx01 = old_i - old_state_start_pos;
// this old_states_data is from its FrameInfo::states.
const StateInfo *old_states_data = (StateInfo*)all_p[4 * num_t + t_offset];
const int32_t *old_row_ids1_data = (int32_t*)all_p[1 * num_t + t_offset],
*old_row_splits2_data = (int32_t*)all_p[2 * num_t + t_offset];
// set the row-ids1 (these contain FSA-ids).
all_row_ids1_data[new_i] = old_row_ids1_data[old_state_idx01];
{ // set the row-splits2.
// We make each kernel responsible for the *next* row_splits entry,
// i.e. for its new_state_idx01 plus one. This solves the problem of no
// kernel being responsible for the last row-splits entry. We
// separately wrote the zeros for the 1st row-splits entry, in a
// previous kernel.
//
// It's safe to use old_state_idx01+1 instead of doing the same mapping
// from new_i+1 that we do from new_i to old_state_idx01, because
// we know this state was kept (because it has a new_i index.)
int32_t old_arc_idx01x_next = old_row_splits2_data[old_state_idx01+1],
old_arc_pos_next = old_arc_idx01x_next + old_arc_start_pos,
new_arc_pos_next = arcs_old2new_data[old_arc_pos_next],
new_arc_idx01x_next = new_arc_pos_next - new_arc_start_pos;
// "+ t_offset" is to compensate for the extra element of each row_splits
// vector. The "+ 1" is about the "next", i.e. each kernel is responsible
// for the next row_splits element, and none is responsible for the initial zero;
// that is set in a previous kernel.
all_row_splits2_data[new_i + t_offset + 1] = new_arc_idx01x_next;
}
all_states_data[new_i] = old_states_data[old_state_idx01];
});
K2_EVAL(c_, new_num_arcs, lambda_set_arcs, (int32_t new_i) -> void {
// new_i is position in appended vector of all arcs
int32_t t_offset = new_arc_to_frame_data[new_i],
new_state_start_pos = new_states_offsets_data[t_offset],
old_state_start_pos = old_states_offsets_data[t_offset],
next_old_state_start_pos = old_states_offsets_data[t_offset + 1],
old_arc_start_pos = old_arcs_offsets_data[t_offset],
old_i = arcs_new2old_data[new_i],
old_arc_idx012 = old_i - old_arc_start_pos;
ArcInfo *old_info_data = (ArcInfo*)all_p[5 * num_t + t_offset];
int32_t *old_row_ids2_data = (int32_t*)all_p[3 * num_t + t_offset],
*old_row_ids1_data = (int32_t*)all_p[1 * num_t + t_offset],
*next_old_row_splits1_data = (int32_t*)all_p[t_offset + 1];
int32_t old_src_state_idx01 = old_row_ids2_data[old_arc_idx012],
fsa_idx0 = old_row_ids1_data[old_src_state_idx01],
old_src_state_pos = old_src_state_idx01 + old_state_start_pos,
new_src_state_pos = states_old2new_data[old_src_state_pos],
new_src_state_idx01 = new_src_state_pos - new_state_start_pos;
all_row_ids2_data[new_i] = new_src_state_idx01;
ArcInfo info = old_info_data[old_arc_idx012];
if (t_offset + 1 == num_t) {
// Do nothing; this is the last frame of the batch of frames that we are
// pruning, so we don't need to renumber the destination-states of the
// arcs leaving it because the next frame's states have not been pruned
// (so the numbering stays the same).
} else {
// idx1 of the state in the next frame's `states` object.
int32_t dest_info_state_idx1 = info.u.dest_info_state_idx1;
// the naming below is unusual; by "pos" we mean position in the old or
// new "all_states" or "all_arcs" vectors, which have all frames appended.
// (the new ones physically exist; the old ones don't, but they are the
// numberings used in renumber_states.Keep() and renumber_arcs.Keep().)
int32_t old_dest_state_idx0x = next_old_row_splits1_data[fsa_idx0],
old_dest_state_idx01 = old_dest_state_idx0x + dest_info_state_idx1,
old_dest_state_idx0x_pos = next_old_state_start_pos + old_dest_state_idx0x,
old_dest_state_idx01_pos = next_old_state_start_pos + old_dest_state_idx01,
new_dest_state_idx0x_pos = states_old2new_data[old_dest_state_idx0x_pos],
new_dest_state_idx01_pos = states_old2new_data[old_dest_state_idx01_pos],
new_dest_state_idx1 = new_dest_state_idx01_pos - new_dest_state_idx0x_pos;
info.u.dest_info_state_idx1 = new_dest_state_idx1;
}
all_arcs_data[new_i] = info;
});
// Now reconstruct the states and arcs for all the frames we pruned, from
// sub-parts of the arrays we just created.
new_states_offsets = new_states_offsets.To(cpu);
new_arcs_offsets = new_arcs_offsets.To(cpu);
new_states_offsets_data = new_states_offsets.Data();
new_arcs_offsets_data = new_arcs_offsets.Data();
for (int32_t i = 0; i < num_t; i++) { // i corresponds to "t_offset".
int32_t state_offset = new_states_offsets_data[i],
next_state_offset = new_states_offsets_data[i + 1],
arc_offset = new_arcs_offsets_data[i],
next_arc_offset = new_arcs_offsets_data[i + 1];
// next line: operator[] into Array2 gives Array1, one row.
Array1<int32_t> row_splits1 = all_row_splits1.Row(i),
row_ids1 = all_row_ids1.Arange(state_offset, next_state_offset),
row_splits2 = all_row_splits2.Arange(state_offset + i, next_state_offset + (i+1)),
row_ids2 = all_row_ids2.Arange(arc_offset, next_arc_offset);
Array1<ArcInfo> arcs = all_arcs.Arange(arc_offset, next_arc_offset);
RaggedShape arcs_shape = RaggedShape3(&row_splits1, &row_ids1, -1,
&row_splits2, &row_ids2, -1);
int32_t t = begin_t + i;
frames_[t]->arcs = Ragged<ArcInfo>(arcs_shape, arcs);
Array1<StateInfo> states = all_states.Arange(state_offset, next_state_offset);
RaggedShape states_shape = GetLayer(arcs_shape, 0);
frames_[t]->states = Ragged<StateInfo>(states_shape, states);
}
}
ContextPtr c_;
FsaVec &a_fsas_; // Note: a_fsas_ has 3 axes.
int32_t a_fsas_stride_; // 1 if we use a different FSA per sequence
// (a_fsas_.Dim0() > 1), 0 if the decoding graph is
// shared (a_fsas_.Dim0() == 1).
DenseFsaVec &b_fsas_;
int32_t T_; // == b_fsas_.MaxSize(1).
float search_beam_;
float output_beam_;
int32_t min_active_;
int32_t max_active_;
Array1<float> dynamic_beams_; // dynamic beams (initially just search_beam_
// but change due to max_active/min_active
// constraints).
int32_t state_map_fsa_stride_; // state_map_fsa_stride_ is a_fsas_.TotSize(1)
// if a_fsas_.Dim0() == 1, else 0.
Hash32 state_map_; // state_map_ maps from:
// key == (state_map_fsa_stride_*n) + a_fsas_state_idx01,
// where n is the fsa_idx, i.e. the index into b_fsas_
// to
// value, where at different stages of PropagateForward(),
// value is an arc_idx012 (into cur_frame->arcs), and
// then later a state_idx01 into the next frame's `state`
// member.
// The 1st dim is needed because If all the
// streams share the same FSA in a_fsas_, we need
// separate maps for each). This map is used on
// each frame to compute and store the mapping
// from active states to the position in the
// `states` array. Between frames, all values
// have -1 in them.
std::vector<std::unique_ptr<FrameInfo>> frames_;
// logically an array of bool, of size T_ + 1; for each 0 <= t <= T, after the
// forward pass finishes propagation with cur_frame_ == t, if
// do_pruning_after_[t] is false it will continue as normal; otherwise (if
// true), it will signal `semaphore_`.
std::vector<char> do_pruning_after_;
// For each t for which do_pruning_after_[t] is true, there will be a
// pair (begin_t, end_t) in prune_t_begin_end giving the
// arguments for which we will invoke PruneTimeRange() after the forward-pass
// for time t has completed. The size of this array equals the sum
// of nonzero elements of do_pruning_after_.
std::vector<std::pair<int32_t, int32_t> > prune_t_begin_end_;
// Each time the forward-pass finishes forward processing for a t value for
// which do_pruning_after_[t] is true, it will signal this semaphore; the
// backward-pass thread (which does pruning) will wait on it as many times as
// do_pruning_after_[t] is set to true.
Semaphore backward_semaphore_;
// The function of forward_semaphore_ is to ensure that the backward (pruning)
// pass doesn't "get too far behind" relative to the forward pass, which might
// cause us to use more memory than expected. (Note: the backward pass is
// normally a bit faster than the forward pass, so typically this won't be a
// problem). Each time the backward pass has finished one round of pruning it
// signals this semaphore. each time after the forward pass signals the
// backward pass that it's ready to prune, it waits on this semaphore
// immediately afterward. But because forward_semaphore_ is initialized to 1
// rather than zero, the effect is that the forward pass is waiting for the
// *previous* phase of backward pruning to complete, rather than the current
// one.
k2std::counting_semaphore forward_semaphore_;
};
void IntersectDensePruned(FsaVec &a_fsas, DenseFsaVec &b_fsas,
float search_beam, float output_beam,
int32_t min_active_states, int32_t max_active_states,
FsaVec *out, Array1<int32_t> *arc_map_a,
Array1<int32_t> *arc_map_b) {
NVTX_RANGE("IntersectDensePruned");
FsaVec a_vec = FsaToFsaVec(a_fsas);
MultiGraphDenseIntersectPruned intersector(a_vec, b_fsas, search_beam,
output_beam, min_active_states,
max_active_states);
intersector.Intersect();
intersector.FormatOutput(out, arc_map_a, arc_map_b);
}
} // namespace k2
| be07838444da1aea779b7a07b40832e883e0af72.cu | /**
* @brief
* compose
*
* @copyright
* Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey)
*
* @copyright
* See LICENSE for clarification regarding multiple authors
*/
#include <limits>
#include <thread>
#include <vector>
#include "k2/csrc/array_ops.h"
#include "k2/csrc/fsa_algo.h"
#include "k2/csrc/fsa_utils.h"
#include "k2/csrc/hash.h"
#include "k2/csrc/ragged_ops.h"
namespace k2 {
namespace intersect_pruned_internal {
/* Information associated with a state active on a particular frame.. */
struct StateInfo {
/* abs_state_id is the state-index in a_fsas_. Note: the ind0 in here
won't necessarily match the ind0 within FrameInfo::state if
a_fsas_stride_ == 0. */
int32_t a_fsas_state_idx01;
/* Caution: this is ACTUALLY A FLOAT that has been bit-twiddled using
FloatToOrderedInt/OrderedIntToFloat so we can use atomic max. It
represents a Viterbi-style 'forward probability'. (Viterbi, meaning: we
use max not log-sum). You can take the pruned lattice and rescore it if
you want log-sum. */
int32_t forward_loglike;
/* Note: this `backward_loglike` is the best score of any path from here to
the end, minus the best path in the overall FSA, i.e. it's the backward
score you get if, at the final-state, you set backward_loglike ==
forward_loglike. So backward_loglike + OrderedIntToFloat(forward_loglike)
<= 0, and you can treat it somewhat like a posterior (except they don't sum
to one as we're using max, not log-add).
*/
float backward_loglike;
};
struct ArcInfo { // for an arc that wasn't pruned away...
int32_t a_fsas_arc_idx012; // the arc-index in a_fsas_.
float arc_loglike; // loglike on this arc: equals loglike from data
// (nnet output, == b_fsas), plus loglike from
// the arc in a_fsas.
union {
// these 3 different ways of storing the index of the destination state
// are used at different stages of the algorithm; we give them different
// names for clarity.
int32_t dest_a_fsas_state_idx01; // The destination-state as an index
// into a_fsas_.
int32_t dest_info_state_idx1; // The destination-state as an idx1 into the
// next FrameInfo's `arcs` or `states`,
// omitting the FSA-index which can be worked
// out from the structure of this frame's
// ArcInfo.
} u;
float end_loglike; // loglike at the end of the arc just before
// (conceptually) it joins the destination state.
};
/*
static std::ostream &operator<<(std::ostream &os, const StateInfo &s) {
os << "StateInfo{" << s.a_fsas_state_idx01 << ","
<< OrderedIntToFloat(s.forward_loglike) << "," << s.backward_loglike
<< "}";
return os;
}
static std::ostream &operator<<(std::ostream &os, const ArcInfo &a) {
os << "ArcInfo{" << a.a_fsas_arc_idx012 << "," << a.arc_loglike << ","
<< a.u.dest_a_fsas_state_idx01 << "," << a.end_loglike
<< "[i=" << FloatToOrderedInt(a.end_loglike) << "]"
<< "}";
return os;
}
*/
} // namespace intersect_pruned_internal
using namespace intersect_pruned_internal; // NOLINT
// Caution: this is really a .cu file. It contains mixed host and device code.
/*
Pruned intersection (a.k.a. composition) that corresponds to decoding for
speech recognition-type tasks. Can use either different decoding graphs (one
per acoustic sequence) or a shared graph
*/
class MultiGraphDenseIntersectPruned {
public:
/**
Pruned intersection (a.k.a. composition) that corresponds to decoding for
speech recognition-type tasks
@param [in] a_fsas The decoding graphs, one per sequence. E.g. might
just be a linear sequence of phones, or might be
something more complicated. Must have either the
same Dim0() as b_fsas, or Size0()==1 in which
case the graph is shared.
@param [in] b_fsas The neural-net output, with each frame containing the
log-likes of each phone. A series of sequences of
(in general) different length.
@param [in] search_beam "Default" search/decoding beam. The actual
beam is dynamic and also depends on max_active and
min_active.
@param [in] output_beam Beam for pruning the output FSA, will
typically be smaller than search_beam.
@param [in] min_active Minimum number of FSA states that are allowed to
be active on any given frame for any given
intersection/composition task. This is advisory,
in that it will try not to have fewer than this
number active.
@param [in] max_active Maximum number of FSA states that are allowed to
be active on any given frame for any given
intersection/composition task. This is advisory,
in that it will try not to exceed that but may not
always succeed. This determines the hash size.
*/
MultiGraphDenseIntersectPruned(FsaVec &a_fsas, DenseFsaVec &b_fsas,
float search_beam, float output_beam,
int32_t min_active, int32_t max_active)
: a_fsas_(a_fsas),
b_fsas_(b_fsas),
search_beam_(search_beam),
output_beam_(output_beam),
min_active_(min_active),
max_active_(max_active),
dynamic_beams_(a_fsas.Context(), b_fsas.shape.Dim0(), search_beam),
forward_semaphore_(1) {
NVTX_RANGE(K2_FUNC);
c_ = GetContext(a_fsas.shape, b_fsas.shape);
T_ = b_fsas_.shape.MaxSize(1);
K2_CHECK(b_fsas.scores.IsContiguous());
K2_CHECK_GT(search_beam, 0);
K2_CHECK_GT(output_beam, 0);
K2_CHECK_GE(min_active, 0);
K2_CHECK_GT(max_active, min_active);
K2_CHECK(a_fsas.shape.Dim0() == b_fsas.shape.Dim0() ||
a_fsas.shape.Dim0() == 1);
K2_CHECK_GE(b_fsas.shape.Dim0(), 1);
int32_t num_seqs = b_fsas.shape.Dim0();
int32_t num_buckets = RoundUpToNearestPowerOfTwo(num_seqs * 4 *
max_active);
if (num_buckets < 128)
num_buckets = 128;
state_map_ = Hash32(c_, num_buckets);
int32_t num_a_copies;
if (a_fsas.shape.Dim0() == 1) {
a_fsas_stride_ = 0;
state_map_fsa_stride_ = a_fsas.TotSize(1);
num_a_copies = b_fsas.shape.Dim0();
} else {
K2_CHECK_EQ(a_fsas.shape.Dim0(), b_fsas.shape.Dim0());
a_fsas_stride_ = 1;
state_map_fsa_stride_ = 0;
num_a_copies = 1;
}
int64_t num_keys = num_a_copies * (int64_t)a_fsas.TotSize(1);
K2_CHECK(num_keys == (uint32_t)num_keys);
{ // set up do_pruning_after_ and prune_t_begin_end_.
do_pruning_after_.resize(T_ + 1, (char)0);
// each time we prune, prune 30 frames; but shift by 20 frames each
// time so there are 10 frames of overlap.
int32_t prune_num_frames = 30,
prune_shift = 20,
T = T_;
K2_CHECK_GT(prune_num_frames, prune_shift);
// The first begin_t is negative but will be rounded up to zero to get the
// start of the range. The motivation is: we don't want to wait until we
// have processed `prune_num_frames` frames to prune for the first time,
// because that first interval of not-pruning, being larger than normal,
// would dominate the maximum memory used by intersection.
for (int32_t begin_t = prune_shift - prune_num_frames; ;
begin_t += prune_shift) {
int32_t prune_begin = std::max<int32_t>(0, begin_t),
prune_end = begin_t + prune_num_frames;
bool last = false;
if (prune_end >= T) {
prune_end = T;
last = true;
}
K2_CHECK_LT(prune_begin, prune_end);
do_pruning_after_[prune_end - 1] = (char)1;
prune_t_begin_end_.push_back({prune_begin, prune_end});
if (last)
break;
}
}
}
// The information we have for each frame of the pruned-intersection (really:
// decoding) algorithm. We keep an array of these, one for each frame, up to
// the length of the longest sequence we're decoding plus one.
struct FrameInfo {
// States that are active at the beginning of this frame. Indexed
// [fsa_idx][state_idx], where fsa_idx indexes b_fsas_ (and a_fsas_, if
// a_fsas_stride_ != 0); and state_idx just enumerates the active states
// on this frame (as state_idx01's in a_fsas_).
Ragged<StateInfo> states; // 2 axes: fsa, state
// Indexed [fsa_idx][state_idx][arc_idx].. the first 2 indexes are
// the same as those into 'states' (the first 2 levels of the structure
// are shared), and the last one enumerates the arcs leaving each of those
// states.
//
// Note: there may be indexes [fsa_idx] that have no states (because that
// FSA had fewer frames than the max), and indexes [fsa_idx][state_idx] that
// have no arcs due to pruning.
Ragged<ArcInfo> arcs; // 3 axes: fsa, state, arc
};
/* Does the main work of intersection/composition, but doesn't produce any
output; the output is provided when you call FormatOutput(). */
void Intersect() {
/*
T is the largest number of (frames+1) of neural net output, or the largest
number of frames of log-likelihoods we count the final frame with (0,
-inf, -inf..) that is used for the final-arcc. The largest number of
states in the fsas represented by b_fsas equals T+1 (e.g. 1 frame would
require 2 states, because that 1 frame is the arc from state 0 to state
1). So the #states is 2 greater than the actual number of frames in the
neural-net output.
*/
NVTX_RANGE(K2_FUNC);
int32_t num_fsas = b_fsas_.shape.Dim0(), T = T_;
std::ostringstream os;
os << "Intersect:T=" << T << ",num_fsas=" << num_fsas
<< ",TotSize(1)=" << b_fsas_.shape.TotSize(1);
NVTX_RANGE(os.str().c_str());
std::thread backward_thread(BackwardPassStatic, this);
// we'll initially populate frames_[0.. T+1], but discard the one at T+1,
// which has no arcs or states, the ones we use are from 0 to T.
frames_.reserve(T + 2);
frames_.push_back(InitialFrameInfo());
for (int32_t t = 0; t <= T; t++) {
frames_.push_back(PropagateForward(t, frames_.back().get()));
if (do_pruning_after_[t]) {
// let a phase of backward-pass pruning commence.
backward_semaphore_.Signal(c_);
// note: normally we should acquire forward_semaphore_ without having to
// wait. It avoids the backward pass getting too far behind the forward
// pass, which could mean too much memory is used.
forward_semaphore_.acquire();
}
}
// The FrameInfo for time T+1 will have no states. We did that
// last PropagateForward so that the 'arcs' member of frames_[T]
// is set up (it has no arcs but we need the shape).
frames_.pop_back();
backward_thread.join();
}
void BackwardPass() {
int32_t num_fsas = b_fsas_.shape.Dim0(),
num_work_items = max_active_ * num_fsas * T_;
ParallelRunner pr(c_);
// if num_work_items is big enough, it will actually create a new stream.
cudaStream_t stream = pr.NewStream(num_work_items);
With w(stream); // This overrides whatever stream c_ contains with `stream`, if it's not
NVTX_RANGE(K2_FUNC);
for (size_t i = 0; i < prune_t_begin_end_.size(); i++) {
backward_semaphore_.Wait(c_);
int32_t prune_t_begin = prune_t_begin_end_[i].first,
prune_t_end = prune_t_begin_end_[i].second;
PruneTimeRange(prune_t_begin, prune_t_end);
forward_semaphore_.release();
}
}
static void BackwardPassStatic(MultiGraphDenseIntersectPruned *c) {
c->BackwardPass();
}
// Return FrameInfo for 1st frame, with `states` set but `arcs` not set.
std::unique_ptr<FrameInfo> InitialFrameInfo() {
NVTX_RANGE("InitialFrameInfo");
int32_t num_fsas = b_fsas_.shape.Dim0();
std::unique_ptr<FrameInfo> ans = std::make_unique<FrameInfo>();
if (a_fsas_.Dim0() == 1) {
int32_t start_states_per_seq = (a_fsas_.shape.TotSize(1) > 0), // 0 or 1
num_start_states = num_fsas * start_states_per_seq;
ans->states = Ragged<StateInfo>(
RegularRaggedShape(c_, num_fsas, start_states_per_seq),
Array1<StateInfo>(c_, num_start_states));
StateInfo *states_data = ans->states.values.Data();
K2_EVAL(
c_, num_start_states, lambda_set_states, (int32_t i)->void {
StateInfo info;
info.a_fsas_state_idx01 = 0; // start state of a_fsas_
info.forward_loglike = FloatToOrderedInt(0.0);
states_data[i] = info;
});
} else {
Ragged<int32_t> start_states = GetStartStates(a_fsas_);
ans->states =
Ragged<StateInfo>(start_states.shape,
Array1<StateInfo>(c_, start_states.NumElements()));
StateInfo *ans_states_values_data = ans->states.values.Data();
const int32_t *start_states_values_data = start_states.values.Data(),
*start_states_row_ids1_data =
start_states.shape.RowIds(1).Data();
K2_EVAL(
c_, start_states.NumElements(), lambda_set_state_info,
(int32_t states_idx01)->void {
StateInfo info;
info.a_fsas_state_idx01 = start_states_values_data[states_idx01];
info.forward_loglike = FloatToOrderedInt(0.0);
ans_states_values_data[states_idx01] = info;
});
}
return ans;
}
void FormatOutput(FsaVec *ofsa, Array1<int32_t> *arc_map_a,
Array1<int32_t> *arc_map_b) {
NVTX_RANGE("FormatOutput");
int32_t T = T_;
ContextPtr c_cpu = GetCpuContext();
Array1<ArcInfo *> arcs_data_ptrs(c_cpu, T + 1);
Array1<int32_t *> arcs_row_splits1_ptrs(c_cpu, T + 1);
for (int32_t t = 0; t <= T; t++) {
arcs_data_ptrs.Data()[t] = frames_[t]->arcs.values.Data();
arcs_row_splits1_ptrs.Data()[t] = frames_[t]->arcs.RowSplits(1).Data();
}
// transfer to GPU if we're using a GPU
arcs_data_ptrs = arcs_data_ptrs.To(c_);
ArcInfo **arcs_data_ptrs_data = arcs_data_ptrs.Data();
arcs_row_splits1_ptrs = arcs_row_splits1_ptrs.To(c_);
int32_t **arcs_row_splits1_ptrs_data = arcs_row_splits1_ptrs.Data();
const int32_t *b_fsas_row_splits1 = b_fsas_.shape.RowSplits(1).Data();
const int32_t *a_fsas_row_splits1 = a_fsas_.RowSplits(1).Data();
int32_t a_fsas_stride = a_fsas_stride_; // 0 or 1 depending if the decoding
// graph is shared.
int32_t num_fsas = b_fsas_.shape.Dim0();
RaggedShape final_arcs_shape;
{ /* This block populates `final_arcs_shape`. It is the shape of a ragged
tensor of arcs that conceptually would live at frames_[T+1]->arcs. It
contains no actual arcs, but may contain some states, that represent
"missing" final-states. The problem we are trying to solve is that
there was a start-state for an FSA but no final-state because it did
not survive pruning, and this could lead to an output FSA that is
invalid or is misinterpreted (because we are interpreting a non-final
state as a final state).
*/
Array1<int32_t> num_extra_states(c_, num_fsas + 1);
int32_t *num_extra_states_data = num_extra_states.Data();
K2_EVAL(c_, num_fsas, lambda_set_num_extra_states, (int32_t i) -> void {
int32_t final_t = b_fsas_row_splits1[i+1] - b_fsas_row_splits1[i];
int32_t *arcs_row_splits1_data = arcs_row_splits1_ptrs_data[final_t];
int32_t num_states_final_t = arcs_row_splits1_data[i + 1] -
arcs_row_splits1_data[i];
K2_CHECK_LE(num_states_final_t, 1);
// has_start_state is 1 if there is a start-state; note, we don't prune
// the start-states, so they'll be present if they were present in a_fsas_.
int32_t has_start_state = (a_fsas_row_splits1[i * a_fsas_stride] <
a_fsas_row_splits1[i * a_fsas_stride + 1]);
// num_extra_states_data[i] will be 1 if there was a start state but no final-state;
// else, 0.
num_extra_states_data[i] = has_start_state * (1 - num_states_final_t);
});
ExclusiveSum(num_extra_states, &num_extra_states);
RaggedShape top_shape = RaggedShape2(&num_extra_states, nullptr, -1),
bottom_shape = RegularRaggedShape(c_, top_shape.NumElements(), 0);
final_arcs_shape = ComposeRaggedShapes(top_shape, bottom_shape);
}
RaggedShape oshape;
// see documentation of Stack() in ragged_ops.h for explanation.
Array1<uint32_t> oshape_merge_map;
{
NVTX_RANGE("InitOshape");
// each of these have 3 axes.
std::vector<RaggedShape *> arcs_shapes(T + 2);
for (int32_t t = 0; t <= T; t++)
arcs_shapes[t] = &(frames_[t]->arcs.shape);
arcs_shapes[T + 1] = &final_arcs_shape;
// oshape is a 4-axis ragged tensor which is indexed:
// oshape[fsa_index][t][state_idx][arc_idx]
int32_t axis = 1;
oshape = Stack(axis, T + 2, arcs_shapes.data(), &oshape_merge_map);
}
int32_t *oshape_row_ids3 = oshape.RowIds(3).Data(),
*oshape_row_ids2 = oshape.RowIds(2).Data(),
*oshape_row_ids1 = oshape.RowIds(1).Data(),
*oshape_row_splits3 = oshape.RowSplits(3).Data(),
*oshape_row_splits2 = oshape.RowSplits(2).Data(),
*oshape_row_splits1 = oshape.RowSplits(1).Data();
int32_t num_arcs = oshape.NumElements();
*arc_map_a = Array1<int32_t>(c_, num_arcs);
*arc_map_b = Array1<int32_t>(c_, num_arcs);
int32_t *arc_map_a_data = arc_map_a->Data(),
*arc_map_b_data = arc_map_b->Data();
Array1<Arc> arcs_out(c_, num_arcs);
Arc *arcs_out_data = arcs_out.Data();
const Arc *a_fsas_arcs = a_fsas_.values.Data();
int32_t b_fsas_num_cols = b_fsas_.scores.Dim1();
const int32_t *b_fsas_row_ids1 = b_fsas_.shape.RowIds(1).Data();
const uint32_t *oshape_merge_map_data = oshape_merge_map.Data();
K2_EVAL(
c_, num_arcs, lambda_format_arc_data,
(int32_t oarc_idx0123)->void { // by 'oarc' we mean arc with shape `oshape`.
int32_t oarc_idx012 = oshape_row_ids3[oarc_idx0123],
oarc_idx01 = oshape_row_ids2[oarc_idx012],
oarc_idx0 = oshape_row_ids1[oarc_idx01],
oarc_idx0x = oshape_row_splits1[oarc_idx0],
oarc_idx0xx = oshape_row_splits2[oarc_idx0x],
oarc_idx1 = oarc_idx01 - oarc_idx0x,
oarc_idx01x_next = oshape_row_splits2[oarc_idx01 + 1];
int32_t m = oshape_merge_map_data[oarc_idx0123],
t = m % (T + 2), // actually we won't get t == T or t == T + 1
// here since those frames have no arcs.
arcs_idx012 = m / (T + 2); // arc_idx012 into FrameInfo::arcs on time t,
// index of the arc on that frame.
K2_CHECK_EQ(t, oarc_idx1);
const ArcInfo *arcs_data = arcs_data_ptrs_data[t];
ArcInfo arc_info = arcs_data[arcs_idx012];
Arc arc;
arc.src_state = oarc_idx012 - oarc_idx0xx;
// Note: the idx1 w.r.t. the frame's `arcs` is an idx2 w.r.t. `oshape`.
int32_t dest_state_idx012 = oarc_idx01x_next +
arc_info.u.dest_info_state_idx1;
arc.dest_state = dest_state_idx012 - oarc_idx0xx;
arc.label = a_fsas_arcs[arc_info.a_fsas_arc_idx012].label;
int32_t fsa_id = oarc_idx0,
b_fsas_idx0x = b_fsas_row_splits1[fsa_id],
b_fsas_idx01 = b_fsas_idx0x + t,
b_fsas_idx2 = (arc.label + 1),
b_fsas_arc_idx012 = b_fsas_idx01 * b_fsas_num_cols + b_fsas_idx2;
arc.score = arc_info.arc_loglike;
arc_map_a_data[oarc_idx0123] = arc_info.a_fsas_arc_idx012;
arc_map_b_data[oarc_idx0123] = b_fsas_arc_idx012;
arcs_out_data[oarc_idx0123] = arc;
});
// Remove axis 1, which corresponds to time.
*ofsa = FsaVec(RemoveAxis(oshape, 1), arcs_out);
}
/*
Computes pruning cutoffs for this frame: these are the cutoffs for the arc
"forward score", one per FSA. This is a dynamic process involving
dynamic_beams_ which are updated on each frame (they start off at
search_beam_).
@param [in] arc_end_scores The "forward log-probs" (scores) at the
end of each arc, i.e. its contribution to the following
state. Is a tensor indexed [fsa_id][state][arc]; we
will get rid of the [state] dim, combining it with the
[arc] dim, so it's just [fsa_id][arc]
It is conceptually unchanged by this operation but non-const
because row-ids of its shape may need to be generated.
@return Returns a vector of log-likelihood cutoffs, one per FSA (the
cutoff will be -infinity for FSAs that don't have any active
states). The cutoffs will be of the form: the best score
for any arc, minus the dynamic beam. See the code for how
the dynamic beam is adjusted; it will approach
'search_beam_' as long as the number of active states in
each FSA is between min_active and max_active.
*/
Array1<float> GetPruningCutoffs(Ragged<float> &arc_end_scores) {
NVTX_RANGE(K2_FUNC);
int32_t num_fsas = arc_end_scores.shape.Dim0();
// get the maximum score from each sub-list (i.e. each FSA, on this frame).
// Note: can probably do this with a cub Reduce operation using an operator
// that has side effects (that notices when it's operating across a
// boundary).
// the max will be -infinity for any FSA-id that doesn't have any active
// states (e.g. because that stream has finished).
// Casting to ragged2 just considers the top 2 indexes, ignoring the 3rd.
// i.e. it's indexed by [fsa_id][state].
Ragged<float> end_scores_per_fsa = arc_end_scores.RemoveAxis(1);
Array1<float> max_per_fsa(c_, end_scores_per_fsa.Dim0());
MaxPerSublist(end_scores_per_fsa, -std::numeric_limits<float>::infinity(),
&max_per_fsa);
const int32_t *arc_end_scores_row_splits1_data =
arc_end_scores.RowSplits(1).Data();
const float *max_per_fsa_data = max_per_fsa.Data();
float *dynamic_beams_data = dynamic_beams_.Data();
float default_beam = search_beam_, max_active = max_active_,
min_active = min_active_;
K2_CHECK_LT(min_active, max_active);
Array1<float> cutoffs(c_, num_fsas);
float *cutoffs_data = cutoffs.Data();
K2_EVAL(
c_, num_fsas, lambda_set_beam_and_cutoffs, (int32_t i)->void {
float best_loglike = max_per_fsa_data[i],
dynamic_beam = dynamic_beams_data[i];
int32_t active_states = arc_end_scores_row_splits1_data[i + 1] -
arc_end_scores_row_splits1_data[i];
if (active_states <= max_active) {
// Not constrained by max_active...
if (active_states >= min_active || active_states == 0) {
// Neither the max_active nor min_active constraints
// apply. Gradually approach 'beam'
// (Also approach 'beam' if active_states == 0; we might as
// well, since there is nothing to prune here).
dynamic_beam = 0.8 * dynamic_beam + 0.2 * default_beam;
} else {
// We violated the min_active constraint -> increase beam
if (dynamic_beam < default_beam) dynamic_beam = default_beam;
// gradually make the beam larger as long
// as we are below min_active
dynamic_beam *= 1.25;
}
} else {
// We violated the max_active constraint -> decrease beam
if (dynamic_beam > default_beam) dynamic_beam = default_beam;
// Decrease the beam as long as we have more than
// max_active active states.
dynamic_beam *= 0.8;
}
dynamic_beams_data[i] = dynamic_beam;
cutoffs_data[i] = best_loglike - dynamic_beam;
});
return cutoffs;
}
/*
Returns list of arcs on this frame, consisting of all arcs leaving
the states active on 'cur_frame'.
@param [in] t The time-index (on which to look up log-likes),
t >= 0
@param [in] cur_frame The FrameInfo for the current frame; only its
'states' member is expected to be set up on entry.
*/
Ragged<ArcInfo> GetArcs(int32_t t, FrameInfo *cur_frame) {
NVTX_RANGE(K2_FUNC);
Ragged<StateInfo> &states = cur_frame->states;
const StateInfo *state_values = states.values.Data();
// in a_fsas_ (the decoding graphs), maps from state_idx01 to arc_idx01x.
const int32_t *fsa_arc_splits = a_fsas_.shape.RowSplits(2).Data();
int32_t num_states = states.values.Dim();
Array1<int32_t> num_arcs(c_, num_states + 1);
int32_t *num_arcs_data = num_arcs.Data();
// `num_arcs` gives the num-arcs for each state in `states`.
K2_EVAL(
c_, num_states, num_arcs_lambda, (int32_t state_idx01)->void {
int32_t a_fsas_state_idx01 =
state_values[state_idx01].a_fsas_state_idx01,
a_fsas_arc_idx01x = fsa_arc_splits[a_fsas_state_idx01],
a_fsas_arc_idx01x_next =
fsa_arc_splits[a_fsas_state_idx01 + 1],
a_fsas_num_arcs = a_fsas_arc_idx01x_next - a_fsas_arc_idx01x;
num_arcs_data[state_idx01] = a_fsas_num_arcs;
});
ExclusiveSum(num_arcs, &num_arcs);
// initialize shape of array that will hold arcs leaving the active states.
// Its shape is [fsa_index][state][arc]; the top two levels are shared with
// `states`. 'ai' means ArcInfo.
RaggedShape ai_shape =
ComposeRaggedShapes(states.shape, RaggedShape2(&num_arcs, nullptr, -1));
// from state_idx01 (into `states` or `ai_shape`) -> fsa_idx0
const int32_t *ai_row_ids1 = ai_shape.RowIds(1).Data();
// from arc_idx012 (into `ai_shape`) to state_idx01
const int32_t *ai_row_ids2 = ai_shape.RowIds(2).Data();
// from state_idx01 to arc_idx01x
const int32_t *ai_row_splits2 = ai_shape.RowSplits(2).Data();
// from state_idx01 (into a_fsas_) to arc_idx01x (into a_fsas_)
const int32_t *a_fsas_row_splits2 = a_fsas_.shape.RowSplits(2).Data();
const Arc *arcs = a_fsas_.values.Data();
// fsa_idx0 to ind0x (into b_fsas_), which gives the 1st row for this
// sequence.
const int32_t *b_fsas_row_ids1 = b_fsas_.shape.RowIds(1).Data();
const int32_t *b_fsas_row_splits1 = b_fsas_.shape.RowSplits(1).Data();
const float *score_data = b_fsas_.scores.Data();
int32_t scores_num_cols = b_fsas_.scores.Dim1();
auto scores_acc = b_fsas_.scores.Accessor();
Ragged<ArcInfo> ai(ai_shape);
ArcInfo *ai_data = ai.values.Data(); // uninitialized
K2_EVAL(
c_, ai.values.Dim(), ai_lambda, (int32_t ai_arc_idx012)->void {
int32_t ai_state_idx01 = ai_row_ids2[ai_arc_idx012],
ai_fsa_idx0 = ai_row_ids1[ai_state_idx01],
ai_arc_idx01x = ai_row_splits2[ai_state_idx01],
ai_arc_idx2 = ai_arc_idx012 - ai_arc_idx01x;
StateInfo sinfo = state_values[ai_state_idx01];
int32_t a_fsas_arc_idx01x =
a_fsas_row_splits2[sinfo.a_fsas_state_idx01],
a_fsas_arc_idx012 = a_fsas_arc_idx01x + ai_arc_idx2;
Arc arc = arcs[a_fsas_arc_idx012];
int32_t scores_idx0x = b_fsas_row_splits1[ai_fsa_idx0],
scores_idx01 = scores_idx0x + t, // t == ind1 into 'scores'
scores_idx2 =
arc.label + 1; // the +1 is so that -1 can be handled
K2_DCHECK_LT(static_cast<uint32_t>(scores_idx2),
static_cast<uint32_t>(scores_num_cols));
float acoustic_score = scores_acc(scores_idx01, scores_idx2);
ArcInfo ai;
ai.a_fsas_arc_idx012 = a_fsas_arc_idx012;
ai.arc_loglike = acoustic_score + arc.score;
ai.end_loglike =
OrderedIntToFloat(sinfo.forward_loglike) + ai.arc_loglike;
// at least currently, the ArcInfo object's src_state and dest_state
// are idx1's not idx01's, i.e. they don't contain the FSA-index,
// where as the ai element is an idx01, so we need to do this to
// convert to an idx01; this relies on the fact that
// sinfo.abs_state_id == arc.src_state
// + a_fsas_fsa_idx0x.
ai.u.dest_a_fsas_state_idx01 =
sinfo.a_fsas_state_idx01 + arc.dest_state - arc.src_state;
ai_data[ai_arc_idx012] = ai;
});
return ai;
}
// Later we may choose to support b_fsas_.Dim0() == 1 and a_fsas_.Dim0() > 1,
// and we'll have to change various bits of code for that to work.
inline int32_t NumFsas() { return b_fsas_.shape.Dim0(); }
/*
Does the forward-propagation (basically: the decoding step) and
returns a newly allocated FrameInfo* object for the next frame.
@param [in] t Time-step that we are processing arcs leaving from;
will be called with t=0, t=1, ...
@param [in] cur_frame FrameInfo object for the states corresponding to
time t; will have its 'states' member set up but not its
'arcs' member (this function will create that).
@return Returns FrameInfo object corresponding to time t+1; will have its
'states' member set up but not its 'arcs' member.
*/
std::unique_ptr<FrameInfo> PropagateForward(int32_t t, FrameInfo *cur_frame) {
NVTX_RANGE("PropagateForward");
int32_t num_fsas = NumFsas();
// Ragged<StateInfo> &states = cur_frame->states;
// arc_info has 3 axes: fsa_id, state, arc.
cur_frame->arcs = GetArcs(t, cur_frame);
Ragged<ArcInfo> &arc_info = cur_frame->arcs;
ArcInfo *ai_data = arc_info.values.Data();
Array1<float> ai_data_array1(c_, cur_frame->arcs.values.Dim());
float *ai_data_array1_data = ai_data_array1.Data();
K2_EVAL(
c_, ai_data_array1.Dim(), lambda_set_ai_data,
(int32_t i)->void { ai_data_array1_data[i] = ai_data[i].end_loglike; });
Ragged<float> ai_loglikes(arc_info.shape, ai_data_array1);
// `cutoffs` is of dimension num_fsas.
Array1<float> cutoffs = GetPruningCutoffs(ai_loglikes);
float *cutoffs_data = cutoffs.Data();
// write certain indexes ( into ai.values) to state_map_.Data(). Keeps
// track of the active states and will allow us to assign a numbering to
// them.
int32_t *ai_row_ids1 = arc_info.shape.RowIds(1).Data(),
*ai_row_ids2 = arc_info.shape.RowIds(2).Data();
auto state_map_acc = state_map_.GetAccessor();
int32_t state_map_fsa_stride = state_map_fsa_stride_;
// renumber_states will be a renumbering that dictates which of the arcs in
// 'ai' correspond to unique states. Only one arc for each dest-state is
// kept (it doesn't matter which one).
Renumbering renumber_states(c_, arc_info.NumElements());
char *keep_this_state_data = renumber_states.Keep().Data();
{
NVTX_RANGE("LambdaSetStateMap");
K2_EVAL(
c_, arc_info.NumElements(), lambda_set_state_map,
(int32_t arc_idx012)->void {
int32_t fsa_id = ai_row_ids1[ai_row_ids2[arc_idx012]];
int32_t dest_state_idx01 =
ai_data[arc_idx012].u.dest_a_fsas_state_idx01;
float end_loglike = ai_data[arc_idx012].end_loglike,
cutoff = cutoffs_data[fsa_id];
char keep_this_state = 0; // only one arc entering any state will
// have its 'keep_this_state_data' entry
// set to 1.
if (end_loglike > cutoff) {
int32_t state_map_idx = dest_state_idx01 +
fsa_id * state_map_fsa_stride;
if (state_map_acc.Insert(state_map_idx, arc_idx012))
keep_this_state = 1;
}
keep_this_state_data[arc_idx012] = keep_this_state;
});
}
int32_t num_states = renumber_states.NumNewElems();
// state_reorder_data maps from (state_idx01 on next frame) to (the
// arc_idx012 on this frame which is the source arc which we arbitrarily
// choose as being "responsible" for the creation of that state).
int32_t *state_reorder_data = renumber_states.Old2New().Data();
// state_to_fsa_id maps from an index into the next frame's
// FrameInfo::states.values() vector to the sequence-id (fsa_id) associated
// with it. It should be non-decreasing.
Array1<int32_t> state_to_fsa_id(c_, num_states);
{ // This block sets 'state_to_fsa_id'.
NVTX_RANGE("LambdaSetStateToFsaId");
int32_t *state_to_fsa_id_data = state_to_fsa_id.Data();
K2_EVAL(
c_, arc_info.NumElements(), lambda_state_to_fsa_id,
(int32_t arc_idx012)->void {
int32_t fsa_id = ai_row_ids1[ai_row_ids2[arc_idx012]],
this_state_j = state_reorder_data[arc_idx012],
next_state_j = state_reorder_data[arc_idx012 + 1];
if (next_state_j > this_state_j) {
state_to_fsa_id_data[this_state_j] = fsa_id;
}
});
K2_DCHECK(IsMonotonic(state_to_fsa_id));
}
std::unique_ptr<FrameInfo> ans = std::make_unique<FrameInfo>();
Array1<int32_t> states_row_splits1(c_, num_fsas + 1);
RowIdsToRowSplits(state_to_fsa_id, &states_row_splits1);
ans->states = Ragged<StateInfo>(
RaggedShape2(&states_row_splits1, &state_to_fsa_id, num_states),
Array1<StateInfo>(c_, num_states));
StateInfo *ans_states_data = ans->states.values.Data();
const int32_t minus_inf_int =
FloatToOrderedInt(-std::numeric_limits<float>::infinity());
K2_EVAL(
c_, num_states, lambda_init_loglike, (int32_t i)->void {
ans_states_data[i].forward_loglike = minus_inf_int;
});
{
NVTX_RANGE("LambdaModifyStateMap");
// Modify the elements of `state_map` to refer to the indexes into
// `ans->states` / `kept_states_data`, rather than the indexes into
// ai_data. This will decrease some of the values in `state_map`, in
// general.
K2_EVAL(
c_, arc_info.NumElements(), lambda_modify_state_map,
(int32_t arc_idx012)->void {
int32_t fsa_id = ai_row_ids1[ai_row_ids2[arc_idx012]];
int32_t dest_state_idx01 =
ai_data[arc_idx012].u.dest_a_fsas_state_idx01;
int32_t this_j = state_reorder_data[arc_idx012],
next_j = state_reorder_data[arc_idx012 + 1];
if (next_j > this_j) {
int32_t state_map_idx = dest_state_idx01 +
fsa_id * state_map_fsa_stride;
int32_t value, *value_addr;
bool ans = state_map_acc.Find(state_map_idx,
&value, &value_addr);
K2_CHECK(ans);
K2_CHECK_EQ(value, arc_idx012);
// Note: this_j is an idx01 into ans->states. previously it
// contained an arc_idx012 (of the entering arc that won the
// race).
*value_addr = this_j;
}
});
}
// We'll set up the data of the kept states below...
StateInfo *kept_states_data = ans->states.values.Data();
{
int32_t *ans_states_row_splits1_data = ans->states.RowSplits(1).Data();
NVTX_RANGE("LambdaSetStates");
K2_EVAL(
c_, arc_info.NumElements(), lambda_set_arcs_and_states,
(int32_t arc_idx012)->void {
int32_t fsa_id = ai_row_ids1[ai_row_ids2[arc_idx012]];
ArcInfo &info = ai_data[arc_idx012];
int32_t dest_a_fsas_state_idx01 = info.u.dest_a_fsas_state_idx01;
int32_t state_map_idx = dest_a_fsas_state_idx01 +
fsa_id * state_map_fsa_stride;
int32_t state_idx01;
if (!state_map_acc.Find(state_map_idx, &state_idx01))
state_idx01 = -1; // The destination state did not survive
// pruning.
int32_t state_idx1;
if (state_idx01 >= 0) {
int32_t state_idx0x = ans_states_row_splits1_data[fsa_id];
state_idx1 = state_idx01 - state_idx0x;
} else {
state_idx1 = -1; // Meaning: invalid.
}
// state_idx1 is the idx1 into ans->states, of the destination
// state.
info.u.dest_info_state_idx1 = state_idx1;
if (state_idx1 < 0)
return;
// multiple threads may write the same value to the address written
// to in the next line.
kept_states_data[state_idx01].a_fsas_state_idx01 =
dest_a_fsas_state_idx01;
int32_t end_loglike_int = FloatToOrderedInt(info.end_loglike);
// Set the forward log-like of the dest state to the largest of any
// of those of the incoming arcs. Note: we initialized this in
// lambda_init_loglike above.
AtomicMax(&(kept_states_data[state_idx01].forward_loglike),
end_loglike_int);
});
}
{
NVTX_RANGE("LambdaResetStateMap");
const int32_t *next_states_row_ids1 = ans->states.shape.RowIds(1).Data();
K2_EVAL(
c_, ans->states.NumElements(), lambda_reset_state_map,
(int32_t state_idx01)->void {
int32_t a_fsas_state_idx01 =
kept_states_data[state_idx01].a_fsas_state_idx01,
fsa_idx0 = next_states_row_ids1[state_idx01];
int32_t state_map_idx = a_fsas_state_idx01 +
fsa_idx0 * state_map_fsa_stride;
state_map_acc.Delete(state_map_idx);
});
}
return ans;
}
/*
Sets backward_loglike fields of StateInfo to the negative of the forward
prob if (this is the final-state or !only_final_probs), else -infinity.
This is used in computing the backward loglikes/scores for purposes of
pruning. This may be done after we're finished decoding/intersecting,
or while we are still decoding.
Note: something similar to this (setting backward-prob == forward-prob) is
also done in PropagateBackward() when we detect final-states. That's needed
because not all sequences have the same length, so some may have reached
their final state earlier. (Note: we only get to the final-state of a_fsas_
if we've reached the final frame of the input, because for non-final frames
we always have -infinity as the log-prob corresponding to the symbol -1.)
While we are still decoding, a background process will do pruning
concurrently with the forward computation, for purposes of reducing memory
usage (and so that most of the pruning can be made concurrent with the
forward computation). In this case we want to avoid pruning away anything
that wouldn't have been pruned away if we were to have waited to the end;
and it turns out that setting the backward probs to the negative of the
forward probs (i.e. for all states, not just final states) accomplishes
this. The issue was mentioned in the "Exact Lattice Generation.." paper and
also in the code for Kaldi's lattice-faster-decoder; search for "As in [3],
to save memory..."
@param [in] cur_frame Frame on which to set the backward probs
*/
void SetBackwardProbsFinal(FrameInfo *cur_frame) {
NVTX_RANGE("SetBackwardProbsFinal");
Ragged<StateInfo> &cur_states = cur_frame->states; // 2 axes: fsa,state
int32_t num_states = cur_states.values.Dim();
if (num_states == 0)
return;
StateInfo *cur_states_data = cur_states.values.Data();
const int32_t *a_fsas_row_ids1_data = a_fsas_.shape.RowIds(1).Data(),
*a_fsas_row_splits1_data = a_fsas_.shape.RowSplits(1).Data(),
*cur_states_row_ids1_data = cur_states.RowIds(1).Data();
double minus_inf = -std::numeric_limits<double>::infinity();
K2_EVAL(c_, num_states, lambda_set_backward_prob, (int32_t state_idx01) -> void {
StateInfo *info = cur_states_data + state_idx01;
double backward_loglike,
forward_loglike = OrderedIntToFloat(info->forward_loglike);
if (forward_loglike - forward_loglike == 0) { // not -infinity...
// canonically we'd set this to zero, but setting it to the forward
// loglike when this is the final-state (in a_fsas_) has the effect of
// making the (forward+backward) probs equivalent to the logprob minus
// the best-path log-prob, which is convenient for pruning. If this
// is not actually the last frame of this sequence, which can happen
// if this was called before the forward decoding process was
// finished, what we are doing is a form of pruning that is guaranteed
// not to prune anything out that would not have been pruned out if we
// had waited until the real end of the file to do the pruning.
backward_loglike = -forward_loglike;
} else {
backward_loglike = minus_inf;
}
info->backward_loglike = backward_loglike;
});
}
/*
Does backward propagation of log-likes, which means setting the
backward_loglike field of the StateInfo variable (for cur_frame);
and works out which arcs and which states are to be pruned
on cur_frame; this information is output to Array1<char>'s which
are supplied by the caller.
These backward log-likes are normalized in such a way that you can add them
with the forward log-likes to produce the log-likelihood ratio vs the best
path (this will be non-positive). (To do this, for the final state we have
to set the backward log-like to the negative of the forward log-like; see
SetBackwardProbsFinal()).
This function also prunes arc-indexes on `cur_frame` and state-indexes
on `next_frame`.
@param [in] t The time-index (on which to look up log-likes);
equals time index of `cur_frame`; t >= 0
@param [in] cur_frame The FrameInfo for the frame on which we want to
set the forward log-like, and output pruning info
for arcs and states
@param [in] next_frame The next frame's FrameInfo, on which to look
up log-likes for the next frame; the
`backward_loglike` values of states on `next_frame`
are assumed to already be set, either by
SetBackwardProbsFinal() or a previous call to
PropagateBackward().
@param [out] cur_frame_states_keep An array, created by the caller,
to which we'll write 1s for elements of cur_frame->states
which we need to keep, and 0s for others.
@param [out] cur_frame_arcs_keep An array, created by the caller,
to which we'll write 1s for elements of cur_frame->arcs
which we need to keep (because they survived pruning),
and 0s for others.
*/
void PropagateBackward(int32_t t,
FrameInfo *cur_frame,
FrameInfo *next_frame,
Array1<char> *cur_frame_states_keep,
Array1<char> *cur_frame_arcs_keep) {
NVTX_RANGE("PropagateBackward");
int32_t num_states = cur_frame->states.NumElements(),
num_arcs = cur_frame->arcs.NumElements();
K2_CHECK_EQ(num_states, cur_frame_states_keep->Dim());
K2_CHECK_EQ(num_arcs, cur_frame_arcs_keep->Dim());
int32_t *a_fsas_row_ids1_data = a_fsas_.shape.RowIds(1).Data(),
*a_fsas_row_splits1_data = a_fsas_.shape.RowSplits(1).Data();
float minus_inf = -std::numeric_limits<float>::infinity();
Ragged<float> arc_backward_prob(cur_frame->arcs.shape,
Array1<float>(c_, cur_frame->arcs.NumElements()));
float *arc_backward_prob_data = arc_backward_prob.values.Data();
ArcInfo *ai_data = cur_frame->arcs.values.Data();
int32_t *arcs_rowids1 = cur_frame->arcs.shape.RowIds(1).Data(),
*arcs_rowids2 = cur_frame->arcs.shape.RowIds(2).Data(),
*arcs_row_splits1 = cur_frame->arcs.shape.RowSplits(1).Data(),
*arcs_row_splits2 = cur_frame->arcs.shape.RowSplits(2).Data();
float output_beam = output_beam_;
// compute arc backward probs, and set elements of 'keep_cur_arcs_data'
int32_t next_num_states = next_frame->states.TotSize(1);
char *keep_cur_arcs_data = cur_frame_arcs_keep->Data(),
*keep_cur_states_data = cur_frame_states_keep->Data();
const int32_t *next_states_row_splits1_data =
next_frame->states.RowSplits(1).Data();
StateInfo *next_states_data = next_frame->states.values.Data();
StateInfo *cur_states_data = cur_frame->states.values.Data();
K2_EVAL(c_, num_arcs, lambda_set_arc_backward_prob_and_keep,
(int32_t arcs_idx012) -> void {
ArcInfo *arc = ai_data + arcs_idx012;
int32_t state_idx01 = arcs_rowids2[arcs_idx012],
seq_idx0 = arcs_rowids1[state_idx01], // 'seq' == fsa-idx in b
next_states_idx0x = next_states_row_splits1_data[seq_idx0];
// Note: if dest_state_idx1 == -1, dest_state_idx01 has a meaningless
// value below, but it's never referenced.
int32_t dest_state_idx1 = arc->u.dest_info_state_idx1,
dest_state_idx01 = next_states_idx0x + dest_state_idx1;
float backward_loglike = minus_inf;
char keep_this_arc = 0;
if (dest_state_idx1 == -1) {
// dest_state_idx1 == -1 means this arc was already pruned in
// the forward pass.. do nothing.
} else {
float arc_loglike = arc->arc_loglike;
float dest_state_backward_loglike =
next_states_data[dest_state_idx01].backward_loglike;
// 'backward_loglike' is the loglike at the beginning of the arc
backward_loglike = arc_loglike + dest_state_backward_loglike;
float src_state_forward_loglike = OrderedIntToFloat(
cur_states_data[arcs_rowids2[arcs_idx012]].forward_loglike);
// should be <= 0.0, mathematically.
K2_CHECK_LT(backward_loglike, -src_state_forward_loglike + 2.0);
if (backward_loglike + src_state_forward_loglike >= -output_beam) {
keep_this_arc = 1;
} else {
backward_loglike = minus_inf; // Don't let arcs outside beam
// contribute to their start-states's
// backward prob (we'll use that to
// prune the start-states away.)
}
}
keep_cur_arcs_data[arcs_idx012] = keep_this_arc;
arc_backward_prob_data[arcs_idx012] = backward_loglike;
});
/* note, the elements of state_backward_prob that don't have arcs leaving
them will be set to the supplied default. */
Array1<float> state_backward_prob(c_, num_states);
MaxPerSublist(arc_backward_prob, minus_inf, &state_backward_prob);
const float *state_backward_prob_data = state_backward_prob.Data();
const int32_t *cur_states_row_ids1 =
cur_frame->states.shape.RowIds(1).Data();
int32_t num_fsas = NumFsas();
K2_DCHECK_EQ(cur_frame->states.shape.Dim0(), num_fsas);
K2_EVAL(
c_, cur_frame->states.NumElements(), lambda_set_state_backward_prob,
(int32_t state_idx01)->void {
StateInfo *info = cur_states_data + state_idx01;
int32_t fsas_state_idx01 = info->a_fsas_state_idx01,
a_fsas_idx0 = a_fsas_row_ids1_data[fsas_state_idx01],
fsas_state_idx0x_next = a_fsas_row_splits1_data[a_fsas_idx0 + 1];
float forward_loglike = OrderedIntToFloat(info->forward_loglike),
backward_loglike;
// `is_final_state` means this is the final-state in a_fsas. this
// implies it's final in b_fsas too, since they both would have seen
// symbols -1.
int32_t is_final_state =
(fsas_state_idx01 + 1 >= fsas_state_idx0x_next);
if (is_final_state) {
// Note: there is only one final-state.
backward_loglike = -forward_loglike;
} else {
backward_loglike = state_backward_prob_data[state_idx01];
}
info->backward_loglike = backward_loglike;
keep_cur_states_data[state_idx01] = (backward_loglike != minus_inf);
});
}
/*
This function does backward propagation and pruning of arcs and states for a
specific time range.
@param [in] begin_t Lowest `t` value to call PropagateBackward() for
and to prune its arcs and states. Require t >= 0.
@param [in] end_t One-past-the-highest `t` value to call PropagateBackward()
and to prune its arcs and states. Require that
`frames_[t+1]` already be set up; this requires at least
end_t <= T.
Arcs on frames t >= end_t and states on frame t > end_t are ignored; the backward
probs on time end_t are set by SetBackwardProbsFinal(), see its documentation
to understand what this does if we haven't yet reached the end of one of the
sequences.
After this function is done, the arcs for `frames_[t]` with begin_t <= t < end_t and
the states for `frames_[t]` with begin_t < t < end_t will have their numbering changed.
(We don't renumber the states on begin_t because that would require the dest-states
of the arcs on time `begin_t - 1` to be modified). TODO: check this...
*/
void PruneTimeRange(int32_t begin_t,
int32_t end_t) {
SetBackwardProbsFinal(frames_[end_t].get());
ContextPtr cpu = GetCpuContext();
int32_t num_fsas = b_fsas_.shape.Dim0(),
num_t = end_t - begin_t;
Array1<int32_t> old_states_offsets(cpu, num_t + 1),
old_arcs_offsets(cpu, num_t + 1);
int32_t tot_states = 0, tot_arcs = 0;
{
int32_t *old_states_offsets_data = old_states_offsets.Data(),
*old_arcs_offsets_data = old_arcs_offsets.Data();
for (int32_t i = 0; i <= num_t; i++) {
int32_t t = begin_t + i;
old_states_offsets_data[i] = tot_states;
old_arcs_offsets_data[i] = tot_arcs;
if (i < num_t) {
tot_states += frames_[t]->arcs.TotSize(1);
tot_arcs += frames_[t]->arcs.TotSize(2);
}
}
}
// contains respectively: row_splits1_ptrs, row_ids1_ptrs,
// row_splits1_ptrs, row_splits2_ptrs,
// old_arcs_ptrs (really type ArcInfo*),
// old_states_ptrs (really type StateInfo*).
Array1<void*> old_all_ptrs(cpu, num_t * 6);
Renumbering renumber_states(c_, tot_states),
renumber_arcs(c_, tot_arcs);
{
void **all_p = old_all_ptrs.Data();
int32_t **old_row_splits1_ptrs_data = (int32_t**)all_p,
**old_row_ids1_ptrs_data = (int32_t**)all_p + num_t,
**old_row_splits2_ptrs_data = (int32_t**)all_p + 2 * num_t,
**old_row_ids2_ptrs_data = (int32_t**)all_p + 3 * num_t;
StateInfo **old_states_ptrs_data = (StateInfo**)all_p + 4 * num_t;
ArcInfo **old_arcs_ptrs_data = (ArcInfo**)all_p + 5 * num_t;
int32_t *old_states_offsets_data = old_states_offsets.Data(),
*old_arcs_offsets_data = old_arcs_offsets.Data();
for (int32_t t = end_t - 1; t >= begin_t; --t) {
int32_t i = t - begin_t;
Array1<char> this_states_keep =
renumber_states.Keep().Arange(old_states_offsets_data[i],
old_states_offsets_data[i + 1]),
this_arcs_keep =
renumber_arcs.Keep().Arange(old_arcs_offsets_data[i],
old_arcs_offsets_data[i + 1]);
FrameInfo *cur_frame = frames_[t].get();
PropagateBackward(t, cur_frame, frames_[t+1].get(),
&this_states_keep, &this_arcs_keep);
old_row_splits1_ptrs_data[i] = cur_frame->arcs.RowSplits(1).Data();
old_row_ids1_ptrs_data[i] = cur_frame->arcs.RowIds(1).Data();
old_row_splits2_ptrs_data[i] = cur_frame->arcs.RowSplits(2).Data();
old_row_ids2_ptrs_data[i] = cur_frame->arcs.RowIds(2).Data();
old_arcs_ptrs_data[i] = cur_frame->arcs.values.Data();
old_states_ptrs_data[i] = cur_frame->states.values.Data();
// We can't discard any states on t == begin_t because: if it is not t ==
// 0, it would be inconvenient to map the dest-states of arcs on t - 1;
// and if it is t == 0, this may remove the start-state, which would make
// it more complex to avoid invalid FSAs (e.g. with an end-state but no
// start-state, or in which we incorrectly interpret a non-start state as
// the start state).
if (i == 0) // t == begin_t
this_states_keep = (char)1; // set all elements of the array
// `states_keep` to 1.
}
}
old_states_offsets = old_states_offsets.To(c_);
old_arcs_offsets = old_arcs_offsets.To(c_);
Array1<int32_t> new_states_offsets = renumber_states.Old2New(true)[old_states_offsets],
new_arcs_offsets = renumber_arcs.Old2New(true)[old_arcs_offsets];
int32_t new_num_states = renumber_states.NumNewElems(),
new_num_arcs = renumber_arcs.NumNewElems();
// These arrays map to the (t - begin_t) corresponding to this state or arc
// in the new numbering, i.e. the frame index minus begin_t.
Array1<int32_t> new_state_to_frame(c_, new_num_states),
new_arc_to_frame(c_, new_num_arcs);
RowSplitsToRowIds(new_states_offsets, &new_state_to_frame);
RowSplitsToRowIds(new_arcs_offsets, &new_arc_to_frame);
const int32_t *old_states_offsets_data = old_states_offsets.Data(),
*new_states_offsets_data = new_states_offsets.Data(),
*old_arcs_offsets_data = old_arcs_offsets.Data(),
*new_arcs_offsets_data = new_arcs_offsets.Data(),
*new_state_to_frame_data = new_state_to_frame.Data(),
*new_arc_to_frame_data = new_arc_to_frame.Data(),
*states_old2new_data = renumber_states.Old2New().Data(),
*states_new2old_data = renumber_states.New2Old().Data(),
*arcs_old2new_data = renumber_arcs.Old2New().Data(),
*arcs_new2old_data = renumber_arcs.New2Old().Data();
// Allocate the new row_splits and row_ids vectors for the shapes on the
// individual frames, and the new arc-info and state-info.
Array2<int32_t> all_row_splits1(c_, num_t, num_fsas + 1);
auto all_row_splits1_acc = all_row_splits1.Accessor();
Array1<int32_t> all_row_ids1(c_, new_num_states);
// the "+ num_t" below is for the extra element of each row_splits array.
Array1<int32_t> all_row_splits2(c_, new_num_states + num_t);
Array1<int32_t> all_row_ids2(c_, new_num_arcs);
Array1<StateInfo> all_states(c_, new_num_states);
Array1<ArcInfo> all_arcs(c_, new_num_arcs);
int32_t *all_row_ids1_data = all_row_ids1.Data(),
*all_row_ids2_data = all_row_ids2.Data(),
*all_row_splits2_data = all_row_splits2.Data();
StateInfo *all_states_data = all_states.Data();
ArcInfo *all_arcs_data = all_arcs.Data();
old_all_ptrs = old_all_ptrs.To(c_);
void **all_p = old_all_ptrs.Data();
K2_EVAL2(c_, num_t, num_fsas + 1,
lambda_set_new_row_splits1, (int32_t t_offset,
int32_t seq_idx) -> void {
// note, t_offset is t - t_start.
int32_t *old_row_splits1 = (int32_t*) all_p[t_offset];
int32_t old_idx0x = old_row_splits1[seq_idx];
// "pos" means position in appended states vector
// old_start_pos means start for this `t`.
int32_t old_start_pos = old_states_offsets_data[t_offset],
old_pos = old_start_pos + old_idx0x,
new_start_pos = states_old2new_data[old_start_pos],
new_pos = states_old2new_data[old_pos],
new_idx0x = new_pos - new_start_pos;
all_row_splits1_acc(t_offset, seq_idx) = new_idx0x;
// TODO: set elem zero of row-splits?
if (seq_idx == 0) {
// We assign the `seq_idx == 0` version of the kernel to set the initial
// zero in each row_splits vector.
all_row_splits2_data[new_pos + t_offset] = 0;
}
});
K2_EVAL(c_, new_num_states, lambda_per_state, (int32_t new_i) -> void {
// new_i is position in appended vector of all states.
int32_t t_offset = new_state_to_frame_data[new_i],
old_state_start_pos = old_states_offsets_data[t_offset],
new_arc_start_pos = new_arcs_offsets_data[t_offset],
old_arc_start_pos = old_arcs_offsets_data[t_offset],
old_i = states_new2old_data[new_i],
old_state_idx01 = old_i - old_state_start_pos;
// this old_states_data is from its FrameInfo::states.
const StateInfo *old_states_data = (StateInfo*)all_p[4 * num_t + t_offset];
const int32_t *old_row_ids1_data = (int32_t*)all_p[1 * num_t + t_offset],
*old_row_splits2_data = (int32_t*)all_p[2 * num_t + t_offset];
// set the row-ids1 (these contain FSA-ids).
all_row_ids1_data[new_i] = old_row_ids1_data[old_state_idx01];
{ // set the row-splits2.
// We make each kernel responsible for the *next* row_splits entry,
// i.e. for its new_state_idx01 plus one. This solves the problem of no
// kernel being responsible for the last row-splits entry. We
// separately wrote the zeros for the 1st row-splits entry, in a
// previous kernel.
//
// It's safe to use old_state_idx01+1 instead of doing the same mapping
// from new_i+1 that we do from new_i to old_state_idx01, because
// we know this state was kept (because it has a new_i index.)
int32_t old_arc_idx01x_next = old_row_splits2_data[old_state_idx01+1],
old_arc_pos_next = old_arc_idx01x_next + old_arc_start_pos,
new_arc_pos_next = arcs_old2new_data[old_arc_pos_next],
new_arc_idx01x_next = new_arc_pos_next - new_arc_start_pos;
// "+ t_offset" is to compensate for the extra element of each row_splits
// vector. The "+ 1" is about the "next", i.e. each kernel is responsible
// for the next row_splits element, and none is responsible for the initial zero;
// that is set in a previous kernel.
all_row_splits2_data[new_i + t_offset + 1] = new_arc_idx01x_next;
}
all_states_data[new_i] = old_states_data[old_state_idx01];
});
K2_EVAL(c_, new_num_arcs, lambda_set_arcs, (int32_t new_i) -> void {
// new_i is position in appended vector of all arcs
int32_t t_offset = new_arc_to_frame_data[new_i],
new_state_start_pos = new_states_offsets_data[t_offset],
old_state_start_pos = old_states_offsets_data[t_offset],
next_old_state_start_pos = old_states_offsets_data[t_offset + 1],
old_arc_start_pos = old_arcs_offsets_data[t_offset],
old_i = arcs_new2old_data[new_i],
old_arc_idx012 = old_i - old_arc_start_pos;
ArcInfo *old_info_data = (ArcInfo*)all_p[5 * num_t + t_offset];
int32_t *old_row_ids2_data = (int32_t*)all_p[3 * num_t + t_offset],
*old_row_ids1_data = (int32_t*)all_p[1 * num_t + t_offset],
*next_old_row_splits1_data = (int32_t*)all_p[t_offset + 1];
int32_t old_src_state_idx01 = old_row_ids2_data[old_arc_idx012],
fsa_idx0 = old_row_ids1_data[old_src_state_idx01],
old_src_state_pos = old_src_state_idx01 + old_state_start_pos,
new_src_state_pos = states_old2new_data[old_src_state_pos],
new_src_state_idx01 = new_src_state_pos - new_state_start_pos;
all_row_ids2_data[new_i] = new_src_state_idx01;
ArcInfo info = old_info_data[old_arc_idx012];
if (t_offset + 1 == num_t) {
// Do nothing; this is the last frame of the batch of frames that we are
// pruning, so we don't need to renumber the destination-states of the
// arcs leaving it because the next frame's states have not been pruned
// (so the numbering stays the same).
} else {
// idx1 of the state in the next frame's `states` object.
int32_t dest_info_state_idx1 = info.u.dest_info_state_idx1;
// the naming below is unusual; by "pos" we mean position in the old or
// new "all_states" or "all_arcs" vectors, which have all frames appended.
// (the new ones physically exist; the old ones don't, but they are the
// numberings used in renumber_states.Keep() and renumber_arcs.Keep().)
int32_t old_dest_state_idx0x = next_old_row_splits1_data[fsa_idx0],
old_dest_state_idx01 = old_dest_state_idx0x + dest_info_state_idx1,
old_dest_state_idx0x_pos = next_old_state_start_pos + old_dest_state_idx0x,
old_dest_state_idx01_pos = next_old_state_start_pos + old_dest_state_idx01,
new_dest_state_idx0x_pos = states_old2new_data[old_dest_state_idx0x_pos],
new_dest_state_idx01_pos = states_old2new_data[old_dest_state_idx01_pos],
new_dest_state_idx1 = new_dest_state_idx01_pos - new_dest_state_idx0x_pos;
info.u.dest_info_state_idx1 = new_dest_state_idx1;
}
all_arcs_data[new_i] = info;
});
// Now reconstruct the states and arcs for all the frames we pruned, from
// sub-parts of the arrays we just created.
new_states_offsets = new_states_offsets.To(cpu);
new_arcs_offsets = new_arcs_offsets.To(cpu);
new_states_offsets_data = new_states_offsets.Data();
new_arcs_offsets_data = new_arcs_offsets.Data();
for (int32_t i = 0; i < num_t; i++) { // i corresponds to "t_offset".
int32_t state_offset = new_states_offsets_data[i],
next_state_offset = new_states_offsets_data[i + 1],
arc_offset = new_arcs_offsets_data[i],
next_arc_offset = new_arcs_offsets_data[i + 1];
// next line: operator[] into Array2 gives Array1, one row.
Array1<int32_t> row_splits1 = all_row_splits1.Row(i),
row_ids1 = all_row_ids1.Arange(state_offset, next_state_offset),
row_splits2 = all_row_splits2.Arange(state_offset + i, next_state_offset + (i+1)),
row_ids2 = all_row_ids2.Arange(arc_offset, next_arc_offset);
Array1<ArcInfo> arcs = all_arcs.Arange(arc_offset, next_arc_offset);
RaggedShape arcs_shape = RaggedShape3(&row_splits1, &row_ids1, -1,
&row_splits2, &row_ids2, -1);
int32_t t = begin_t + i;
frames_[t]->arcs = Ragged<ArcInfo>(arcs_shape, arcs);
Array1<StateInfo> states = all_states.Arange(state_offset, next_state_offset);
RaggedShape states_shape = GetLayer(arcs_shape, 0);
frames_[t]->states = Ragged<StateInfo>(states_shape, states);
}
}
ContextPtr c_;
FsaVec &a_fsas_; // Note: a_fsas_ has 3 axes.
int32_t a_fsas_stride_; // 1 if we use a different FSA per sequence
// (a_fsas_.Dim0() > 1), 0 if the decoding graph is
// shared (a_fsas_.Dim0() == 1).
DenseFsaVec &b_fsas_;
int32_t T_; // == b_fsas_.MaxSize(1).
float search_beam_;
float output_beam_;
int32_t min_active_;
int32_t max_active_;
Array1<float> dynamic_beams_; // dynamic beams (initially just search_beam_
// but change due to max_active/min_active
// constraints).
int32_t state_map_fsa_stride_; // state_map_fsa_stride_ is a_fsas_.TotSize(1)
// if a_fsas_.Dim0() == 1, else 0.
Hash32 state_map_; // state_map_ maps from:
// key == (state_map_fsa_stride_*n) + a_fsas_state_idx01,
// where n is the fsa_idx, i.e. the index into b_fsas_
// to
// value, where at different stages of PropagateForward(),
// value is an arc_idx012 (into cur_frame->arcs), and
// then later a state_idx01 into the next frame's `state`
// member.
// The 1st dim is needed because If all the
// streams share the same FSA in a_fsas_, we need
// separate maps for each). This map is used on
// each frame to compute and store the mapping
// from active states to the position in the
// `states` array. Between frames, all values
// have -1 in them.
std::vector<std::unique_ptr<FrameInfo>> frames_;
// logically an array of bool, of size T_ + 1; for each 0 <= t <= T, after the
// forward pass finishes propagation with cur_frame_ == t, if
// do_pruning_after_[t] is false it will continue as normal; otherwise (if
// true), it will signal `semaphore_`.
std::vector<char> do_pruning_after_;
// For each t for which do_pruning_after_[t] is true, there will be a
// pair (begin_t, end_t) in prune_t_begin_end giving the
// arguments for which we will invoke PruneTimeRange() after the forward-pass
// for time t has completed. The size of this array equals the sum
// of nonzero elements of do_pruning_after_.
std::vector<std::pair<int32_t, int32_t> > prune_t_begin_end_;
// Each time the forward-pass finishes forward processing for a t value for
// which do_pruning_after_[t] is true, it will signal this semaphore; the
// backward-pass thread (which does pruning) will wait on it as many times as
// do_pruning_after_[t] is set to true.
Semaphore backward_semaphore_;
// The function of forward_semaphore_ is to ensure that the backward (pruning)
// pass doesn't "get too far behind" relative to the forward pass, which might
// cause us to use more memory than expected. (Note: the backward pass is
// normally a bit faster than the forward pass, so typically this won't be a
// problem). Each time the backward pass has finished one round of pruning it
// signals this semaphore. each time after the forward pass signals the
// backward pass that it's ready to prune, it waits on this semaphore
// immediately afterward. But because forward_semaphore_ is initialized to 1
// rather than zero, the effect is that the forward pass is waiting for the
// *previous* phase of backward pruning to complete, rather than the current
// one.
k2std::counting_semaphore forward_semaphore_;
};
void IntersectDensePruned(FsaVec &a_fsas, DenseFsaVec &b_fsas,
float search_beam, float output_beam,
int32_t min_active_states, int32_t max_active_states,
FsaVec *out, Array1<int32_t> *arc_map_a,
Array1<int32_t> *arc_map_b) {
NVTX_RANGE("IntersectDensePruned");
FsaVec a_vec = FsaToFsaVec(a_fsas);
MultiGraphDenseIntersectPruned intersector(a_vec, b_fsas, search_beam,
output_beam, min_active_states,
max_active_states);
intersector.Intersect();
intersector.FormatOutput(out, arc_map_a, arc_map_b);
}
} // namespace k2
|
e3dd8f94e87feb9b0aff375a7acdc8bb8c4a922f.hip | // !!! This is a file automatically generated by hipify!!!
/*
Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition
(HOOMD-blue) Open Source Software License Copyright 2008, 2009 Ames Laboratory
Iowa State University and The Regents of the University of Michigan All rights
reserved.
HOOMD-blue may contain modifications ("Contributions") provided, and to which
copyright is held, by various Contributors who have granted The Regents of the
University of Michigan the right to modify and/or distribute such Contributions.
Redistribution and use of HOOMD-blue, in source and binary forms, with or
without modification, are permitted, provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions, and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions, and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of HOOMD-blue's
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
Disclaimer
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS''
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR
ANY WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*! \file DriverTersoffGPU.cu
\brief Defines the driver functions for computing all types of three-body forces on the GPU
*/
#include "DriverTersoffGPU.cuh"
#include "EvaluatorTersoff.h"
hipError_t gpu_compute_tersoff_forces(const tersoff_args_t& pair_args,
const tersoff_params *d_params)
{
return gpu_compute_triplet_forces<EvaluatorTersoff>(pair_args,
d_params);
}
| e3dd8f94e87feb9b0aff375a7acdc8bb8c4a922f.cu | /*
Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition
(HOOMD-blue) Open Source Software License Copyright 2008, 2009 Ames Laboratory
Iowa State University and The Regents of the University of Michigan All rights
reserved.
HOOMD-blue may contain modifications ("Contributions") provided, and to which
copyright is held, by various Contributors who have granted The Regents of the
University of Michigan the right to modify and/or distribute such Contributions.
Redistribution and use of HOOMD-blue, in source and binary forms, with or
without modification, are permitted, provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions, and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions, and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of HOOMD-blue's
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
Disclaimer
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS''
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR
ANY WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*! \file DriverTersoffGPU.cu
\brief Defines the driver functions for computing all types of three-body forces on the GPU
*/
#include "DriverTersoffGPU.cuh"
#include "EvaluatorTersoff.h"
cudaError_t gpu_compute_tersoff_forces(const tersoff_args_t& pair_args,
const tersoff_params *d_params)
{
return gpu_compute_triplet_forces<EvaluatorTersoff>(pair_args,
d_params);
}
|
ad6626facfb7a186bda8484d65f8c943097f968e.hip | // !!! This is a file automatically generated by hipify!!!
#include <cudnn.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <malloc.h>
#include <cstdlib>
#include <time.h>
#include <iostream>
#include <sys/types.h>
#include <errno.h>
#include <vector>
#include <fstream>
#include <string>
#include <omp.h>
#define TH 4
#define TW 1
#define TC 4
#define C 64
#define N 32
#define H 7
#define W 7
#define TCS ((C-1)/TC + 1)
#define THS ((H-1)/TH + 1)
#define TWS ((W-1)/TW+1)
#define WPAD (TWS*TW + 2)
#define R 3
#define S 3
using namespace std;
#define checkCUDNN(expression) \
{ \
cudnnStatus_t status = (expression); \
if (status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
inline void chkerr(hipError_t code)
{
if (code != hipSuccess)
{
std::cerr << "ERROR!!!:" << hipGetErrorString(code) <<endl;
exit(-1);
}
}
extern "C" __global__ void default_function_kernel0(float* __restrict__ data, float* __restrict__ kernel, float* __restrict__ compute) {
float compute_local[4];
__shared__ float pad_temp_shared[168];
__shared__ float kernel_shared[192];
float pad_temp_shared_local[8];
float kernel_shared_local[32];
#pragma unroll
for (int ff_c_init = 0; ff_c_init < 2; ++ff_c_init) {
compute_local[(ff_c_init)] = 0.000000e+00f;
compute_local[((ff_c_init + 2))] = 0.000000e+00f;
}
for (int rc_outer = 0; rc_outer < 8; ++rc_outer) {
#pragma unroll
for (int rx_outer = 0; rx_outer < 3; ++rx_outer) {
__syncthreads();
#pragma unroll
for (int ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner = 0; ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner < 12; ++ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) {
pad_temp_shared[((((((int)threadIdx.z) * 84) + (((int)threadIdx.x) * 12)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner))] = (((((1 <= (((((((int)threadIdx.x) * 12) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 21) / 7) + ((int)blockIdx.y))) && ((((((((int)threadIdx.x) * 12) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 21) / 7) + ((int)blockIdx.y)) < 8)) && (1 <= (rx_outer + (((((int)threadIdx.x) * 12) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 7)))) && ((rx_outer + (((((int)threadIdx.x) * 12) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 7)) < 8)) ? data[((((((((rc_outer * 392) + (((int)threadIdx.z) * 196)) + ((((((int)threadIdx.x) * 12) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) / 21) * 49)) + (((int)blockIdx.y) * 7)) + rx_outer) + (((((int)threadIdx.x) * 12) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 21)) - 8))] : 0.000000e+00f);
}
#pragma unroll
for (int ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 = 0; ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 < 14; ++ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) {
if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.x) * 14) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) / 24)) < 8) {
if (((((int)threadIdx.z) * 32) + (((((int)threadIdx.x) * 14) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) / 3)) < 64) {
if ((((((int)threadIdx.z) * 96) + (((int)threadIdx.x) * 14)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) < 192) {
if (((((int)threadIdx.x) * 14) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) < 96) {
kernel_shared[((((((int)threadIdx.z) * 96) + (((int)threadIdx.x) * 14)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1))] = kernel[(((((((((int)blockIdx.z) * 4608) + (((int)threadIdx.z) * 2304)) + ((((((int)threadIdx.x) * 14) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) / 24) * 576)) + (rc_outer * 72)) + ((((((int)threadIdx.x) * 14) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) % 24) * 3)) + rx_outer))];
}
}
}
}
}
__syncthreads();
#pragma unroll
for (int ry_inner_outer = 0; ry_inner_outer < 3; ++ry_inner_outer) {
#pragma unroll
for (int ax1 = 0; ax1 < 8; ++ax1) {
pad_temp_shared_local[(ax1)] = pad_temp_shared[((((ax1 * 21) + (ry_inner_outer * 7)) + ((int)threadIdx.x)))];
}
#pragma unroll
for (int ax0 = 0; ax0 < 2; ++ax0) {
#pragma unroll
for (int ax11 = 0; ax11 < 8; ++ax11) {
kernel_shared_local[(((ax0 * 8) + ax11))] = kernel_shared[(((((((int)threadIdx.z) * 48) + (ax0 * 24)) + (ax11 * 3)) + ry_inner_outer))];
kernel_shared_local[((((ax0 * 8) + ax11) + 16))] = kernel_shared[((((((((int)threadIdx.z) * 48) + (ax0 * 24)) + (ax11 * 3)) + ry_inner_outer) + 96))];
}
}
#pragma unroll
for (int rc_inner_inner = 0; rc_inner_inner < 8; ++rc_inner_inner) {
#pragma unroll
for (int ff_c = 0; ff_c < 2; ++ff_c) {
compute_local[(ff_c)] = (compute_local[(ff_c)] + (pad_temp_shared_local[(rc_inner_inner)] * kernel_shared_local[(((ff_c * 8) + rc_inner_inner))]));
compute_local[((ff_c + 2))] = (compute_local[((ff_c + 2))] + (pad_temp_shared_local[(rc_inner_inner)] * kernel_shared_local[((((ff_c * 8) + rc_inner_inner) + 16))]));
}
}
}
}
}
#pragma unroll
for (int ff_inner_inner_inner = 0; ff_inner_inner_inner < 2; ++ff_inner_inner_inner) {
compute[((((((((int)blockIdx.z) * 392) + (((int)threadIdx.z) * 98)) + (ff_inner_inner_inner * 49)) + (((int)blockIdx.y) * 7)) + ((int)threadIdx.x)))] = compute_local[(ff_inner_inner_inner)];
compute[(((((((((int)blockIdx.z) * 392) + (((int)threadIdx.z) * 98)) + (ff_inner_inner_inner * 49)) + (((int)blockIdx.y) * 7)) + ((int)threadIdx.x)) + 196))] = compute_local[((ff_inner_inner_inner + 2))];
}
}
class ConvGemm{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvGemm::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvGemm::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvWinogradeNon{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvWinogradeNon::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvWinogradeNon::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvFFT{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvFFT::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvFFT::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
__device__ void load_input_2_shared_memory(float *input, float *shared_input, unsigned int h_start,
unsigned int h_end, unsigned int h_offset, unsigned int c_start,
unsigned int warp_id, unsigned int lane_id, unsigned int warp_size){
switch(h_offset){
case 0:
for(unsigned int c = warp_id; c<TC; c+=TWS){
for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){
unsigned int r = i/W;
unsigned int s = i%W;
shared_input[c*(TH + 2)*(WPAD) + r * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i];
}
}
break;
case 1:
for(unsigned int c = warp_id; c<TC; c+=TWS){
for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){
unsigned int r = i/W;
unsigned int s = i%W;
shared_input[c*(TH + 2)*(WPAD) + (1 + r) * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i];
}
}
break;
}
}
__device__ __forceinline__ void switch_write_back(unsigned int write_h, unsigned int write_w, unsigned int h_out_start, unsigned int w_out_start, unsigned int n, float * outputs, float * temp_result){
switch(write_h){
case 1:
switch(write_w){
case 1:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 1; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
}
break;
case 2:
switch(write_w){
case 1:
#pragma unroll
for (unsigned int th = 0; th < 2; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 1; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
}
break;
case 3:
switch(write_w){
case 1:
#pragma unroll
for (unsigned int th = 0; th < 3; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 1; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
}
break;
case 4:
switch(write_w){
case 1:
#pragma unroll
for (unsigned int th = 0; th < 4; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 1; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
}
break;
}
}
__global__ void conv2d(float * __restrict__ input,const float * __restrict__ kernel, float * __restrict__ outputs){
extern __shared__ float shared_input[];
const unsigned int tile_id = blockIdx.x;
const unsigned int tc_id = tile_id / THS;
const unsigned int th_id = tile_id % THS;
const unsigned int tw_id = threadIdx.x / N;
const int h_out_start = th_id * TH;
const int w_out_start = tw_id * TW;
const unsigned int warp_id = tw_id;
const unsigned int lane_id = threadIdx.x % N;
float data_array[9];
float temp_result[TH*TW] = {0.0f};
for(unsigned int i=threadIdx.x;i<TC*(TH+2)*WPAD;i+=blockDim.x){
shared_input[i] = 0.0f;
}
unsigned int n = lane_id;
unsigned int c_offset = tc_id * TC;
int h_offset = (h_out_start == 0)?1:0;
int h_padded_start = h_out_start;
int h_padded_end = min(h_padded_start + TH + 2, H + 2);
int h_non_padded_start = max(h_out_start - 1, 0);
int h_non_padded_end = min(H, h_padded_end - 1);
__syncthreads();
load_input_2_shared_memory(input, shared_input, h_non_padded_start, h_non_padded_end, h_offset, c_offset, warp_id, lane_id, N);
__syncthreads();
#pragma unroll
for(unsigned int c=0;c<TC;c++){
#pragma unroll
for(unsigned int r=0;r<R;++r){
#pragma unroll
for(unsigned int s=0;s<S;++s){
data_array[r*S+s] = kernel[(c + c_offset)*N*9+r*3*N+s*N+n];
}
}
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 0]*data_array[0];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[1];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[2];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[0];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[3];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[1];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[4];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[2];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[5];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[0];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[3];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[6];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[1];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[4];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[7];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[2];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[5];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[8];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 0]*data_array[0];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 0]*data_array[3];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 0]*data_array[6];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 1]*data_array[1];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 1]*data_array[4];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 1]*data_array[7];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[2];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[5];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[8];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 4 * WPAD + tw_id * TW + 0]*data_array[3];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 4 * WPAD + tw_id * TW + 0]*data_array[6];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 4 * WPAD + tw_id * TW + 1]*data_array[4];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 4 * WPAD + tw_id * TW + 1]*data_array[7];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 4 * WPAD + tw_id * TW + 2]*data_array[5];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 4 * WPAD + tw_id * TW + 2]*data_array[8];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 5 * WPAD + tw_id * TW + 0]*data_array[6];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 5 * WPAD + tw_id * TW + 1]*data_array[7];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 5 * WPAD + tw_id * TW + 2]*data_array[8];
}
switch_write_back(min(TH, H - h_out_start), min(TW, W - w_out_start), h_out_start, w_out_start, n, outputs, temp_result);
}
float check_diff(float *x, float *y, unsigned int size){
float diff = 0.0f;
#pragma omp parallel for reduction(+ : diff)
for(unsigned int i=0;i<size;++i){
diff += abs(x[i] - y[i]);
}
return diff;
}
int main(void){
float *input = new float[C*H*W];
time_t t;
float *matrix;
hipMalloc(&matrix,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float));
hipMemset(matrix,0,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float));
srand((unsigned) time(&t));
for(int i =0;i<C*H*W;++i){
input[i] = rand() % 10;
}
float *device_input;
hipMalloc(&device_input,C*H*W*sizeof(float));
hipMemcpy(device_input,input,C*H*W*sizeof(float),hipMemcpyHostToDevice);
float *K = new float[C*N*9];
for(int i=0;i<C*N*9;++i){
K[i] = 1.0f;
}
ConvGemm convGemm;
convGemm.initialize();
ConvWinogradeNon convWinogradeNon;
convWinogradeNon.initialize();
ConvFFT convFFT;
convFFT.initialize();
float *out_cudnn;
float *out_cudnn_host = new float[N*H*W];
hipEvent_t event_start;
hipEvent_t event_stop;
hipEventCreate(&event_start);
hipEventCreate(&event_stop);
out_cudnn = convGemm.forward(device_input);
hipMemcpy(out_cudnn_host,out_cudnn,N*H*W*sizeof(float),hipMemcpyDeviceToHost);
out_cudnn = convFFT.forward(device_input);
out_cudnn = convWinogradeNon.forward(device_input);
float *device_K;
float *device_out;
hipMalloc(&device_out,H*W*N*sizeof(float));
hipMemset(device_out,0,H*W*N*sizeof(float));
hipMalloc(&device_K,C*N*9*sizeof(float));
hipMemcpy(device_K,K,C*N*9*sizeof(float),hipMemcpyHostToDevice);
hipEventRecord(event_start);
convGemm.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnGemmTime;
hipEventElapsedTime(&cudnnGemmTime, event_start, event_stop);
hipEventRecord(event_start);
convWinogradeNon.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnWinogradeTimeNon;
hipEventElapsedTime(&cudnnWinogradeTimeNon, event_start, event_stop);
hipEventRecord(event_start);
convFFT.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnFFTTime;
hipEventElapsedTime(&cudnnFFTTime, event_start, event_stop);
dim3 grid(1,7,4);
dim3 block(7,1,2);
hipEventRecord(event_start);
hipLaunchKernelGGL(( default_function_kernel0), dim3(grid), dim3(block), 0, 0, device_input, device_K, device_out);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float time_tvm;
hipEventElapsedTime(&time_tvm, event_start, event_stop);
float *out_tvm = new float[N*H*W];
hipMemcpy(out_tvm,device_out,N*H*W*sizeof(float),hipMemcpyDeviceToHost);
hipMemset(device_out, 0, sizeof(float)*N*H*W);
chkerr(hipFuncSetAttribute(conv2d,hipFuncAttributeMaxDynamicSharedMemorySize, TC*(TH+2)*(WPAD)*4));
hipEventRecord(event_start);
hipLaunchKernelGGL(( conv2d), dim3(TCS*THS), dim3(N * TWS), TC*(TH+2)*(WPAD)*4, 0, device_input, device_K, device_out);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float time_tdc;
hipEventElapsedTime(&time_tdc, event_start, event_stop);
float *out_tdc = new float[N*H*W];
hipMemcpy(out_tdc,device_out,N*H*W*sizeof(float),hipMemcpyDeviceToHost);
ofstream outfile;
char buffer[1000];
int ret = sprintf(buffer,"%d,%d,%d,%d,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",N,C,H,W,
cudnnFFTTime,cudnnWinogradeTimeNon,cudnnGemmTime,time_tvm,time_tdc,
cudnnFFTTime/time_tdc,cudnnWinogradeTimeNon/time_tdc,cudnnGemmTime/time_tdc,time_tvm/time_tdc);
outfile.open("../../evaluation_outcome/2080Ti-layers-eval-oracle.csv", std::ios_base::app);
outfile << buffer;
float difference = check_diff(out_cudnn_host, out_tdc, N*H*W);
cout<<N<<","<<C<<","<<H<<","<<W<<","<<cudnnFFTTime<<","<<cudnnWinogradeTimeNon<<","<<cudnnGemmTime<<","<<
time_tvm<<","<<time_tdc<<","<<cudnnFFTTime/time_tdc<<","<<
cudnnWinogradeTimeNon/time_tdc<<","<<cudnnGemmTime/time_tdc<<","<<time_tvm/time_tdc<<endl;
return 0;
}
| ad6626facfb7a186bda8484d65f8c943097f968e.cu | #include <cudnn.h>
#include <stdio.h>
#include <cuda.h>
#include <malloc.h>
#include <cstdlib>
#include <time.h>
#include <iostream>
#include <sys/types.h>
#include <errno.h>
#include <vector>
#include <fstream>
#include <string>
#include <omp.h>
#define TH 4
#define TW 1
#define TC 4
#define C 64
#define N 32
#define H 7
#define W 7
#define TCS ((C-1)/TC + 1)
#define THS ((H-1)/TH + 1)
#define TWS ((W-1)/TW+1)
#define WPAD (TWS*TW + 2)
#define R 3
#define S 3
using namespace std;
#define checkCUDNN(expression) \
{ \
cudnnStatus_t status = (expression); \
if (status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
inline void chkerr(cudaError_t code)
{
if (code != cudaSuccess)
{
std::cerr << "ERROR!!!:" << cudaGetErrorString(code) <<endl;
exit(-1);
}
}
extern "C" __global__ void default_function_kernel0(float* __restrict__ data, float* __restrict__ kernel, float* __restrict__ compute) {
float compute_local[4];
__shared__ float pad_temp_shared[168];
__shared__ float kernel_shared[192];
float pad_temp_shared_local[8];
float kernel_shared_local[32];
#pragma unroll
for (int ff_c_init = 0; ff_c_init < 2; ++ff_c_init) {
compute_local[(ff_c_init)] = 0.000000e+00f;
compute_local[((ff_c_init + 2))] = 0.000000e+00f;
}
for (int rc_outer = 0; rc_outer < 8; ++rc_outer) {
#pragma unroll
for (int rx_outer = 0; rx_outer < 3; ++rx_outer) {
__syncthreads();
#pragma unroll
for (int ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner = 0; ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner < 12; ++ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) {
pad_temp_shared[((((((int)threadIdx.z) * 84) + (((int)threadIdx.x) * 12)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner))] = (((((1 <= (((((((int)threadIdx.x) * 12) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 21) / 7) + ((int)blockIdx.y))) && ((((((((int)threadIdx.x) * 12) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 21) / 7) + ((int)blockIdx.y)) < 8)) && (1 <= (rx_outer + (((((int)threadIdx.x) * 12) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 7)))) && ((rx_outer + (((((int)threadIdx.x) * 12) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 7)) < 8)) ? data[((((((((rc_outer * 392) + (((int)threadIdx.z) * 196)) + ((((((int)threadIdx.x) * 12) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) / 21) * 49)) + (((int)blockIdx.y) * 7)) + rx_outer) + (((((int)threadIdx.x) * 12) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 21)) - 8))] : 0.000000e+00f);
}
#pragma unroll
for (int ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 = 0; ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 < 14; ++ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) {
if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.x) * 14) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) / 24)) < 8) {
if (((((int)threadIdx.z) * 32) + (((((int)threadIdx.x) * 14) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) / 3)) < 64) {
if ((((((int)threadIdx.z) * 96) + (((int)threadIdx.x) * 14)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) < 192) {
if (((((int)threadIdx.x) * 14) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) < 96) {
kernel_shared[((((((int)threadIdx.z) * 96) + (((int)threadIdx.x) * 14)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1))] = kernel[(((((((((int)blockIdx.z) * 4608) + (((int)threadIdx.z) * 2304)) + ((((((int)threadIdx.x) * 14) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) / 24) * 576)) + (rc_outer * 72)) + ((((((int)threadIdx.x) * 14) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) % 24) * 3)) + rx_outer))];
}
}
}
}
}
__syncthreads();
#pragma unroll
for (int ry_inner_outer = 0; ry_inner_outer < 3; ++ry_inner_outer) {
#pragma unroll
for (int ax1 = 0; ax1 < 8; ++ax1) {
pad_temp_shared_local[(ax1)] = pad_temp_shared[((((ax1 * 21) + (ry_inner_outer * 7)) + ((int)threadIdx.x)))];
}
#pragma unroll
for (int ax0 = 0; ax0 < 2; ++ax0) {
#pragma unroll
for (int ax11 = 0; ax11 < 8; ++ax11) {
kernel_shared_local[(((ax0 * 8) + ax11))] = kernel_shared[(((((((int)threadIdx.z) * 48) + (ax0 * 24)) + (ax11 * 3)) + ry_inner_outer))];
kernel_shared_local[((((ax0 * 8) + ax11) + 16))] = kernel_shared[((((((((int)threadIdx.z) * 48) + (ax0 * 24)) + (ax11 * 3)) + ry_inner_outer) + 96))];
}
}
#pragma unroll
for (int rc_inner_inner = 0; rc_inner_inner < 8; ++rc_inner_inner) {
#pragma unroll
for (int ff_c = 0; ff_c < 2; ++ff_c) {
compute_local[(ff_c)] = (compute_local[(ff_c)] + (pad_temp_shared_local[(rc_inner_inner)] * kernel_shared_local[(((ff_c * 8) + rc_inner_inner))]));
compute_local[((ff_c + 2))] = (compute_local[((ff_c + 2))] + (pad_temp_shared_local[(rc_inner_inner)] * kernel_shared_local[((((ff_c * 8) + rc_inner_inner) + 16))]));
}
}
}
}
}
#pragma unroll
for (int ff_inner_inner_inner = 0; ff_inner_inner_inner < 2; ++ff_inner_inner_inner) {
compute[((((((((int)blockIdx.z) * 392) + (((int)threadIdx.z) * 98)) + (ff_inner_inner_inner * 49)) + (((int)blockIdx.y) * 7)) + ((int)threadIdx.x)))] = compute_local[(ff_inner_inner_inner)];
compute[(((((((((int)blockIdx.z) * 392) + (((int)threadIdx.z) * 98)) + (ff_inner_inner_inner * 49)) + (((int)blockIdx.y) * 7)) + ((int)threadIdx.x)) + 196))] = compute_local[((ff_inner_inner_inner + 2))];
}
}
class ConvGemm{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvGemm::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvGemm::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvWinogradeNon{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvWinogradeNon::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvWinogradeNon::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvFFT{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvFFT::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvFFT::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
__device__ void load_input_2_shared_memory(float *input, float *shared_input, unsigned int h_start,
unsigned int h_end, unsigned int h_offset, unsigned int c_start,
unsigned int warp_id, unsigned int lane_id, unsigned int warp_size){
switch(h_offset){
case 0:
for(unsigned int c = warp_id; c<TC; c+=TWS){
for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){
unsigned int r = i/W;
unsigned int s = i%W;
shared_input[c*(TH + 2)*(WPAD) + r * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i];
}
}
break;
case 1:
for(unsigned int c = warp_id; c<TC; c+=TWS){
for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){
unsigned int r = i/W;
unsigned int s = i%W;
shared_input[c*(TH + 2)*(WPAD) + (1 + r) * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i];
}
}
break;
}
}
__device__ __forceinline__ void switch_write_back(unsigned int write_h, unsigned int write_w, unsigned int h_out_start, unsigned int w_out_start, unsigned int n, float * outputs, float * temp_result){
switch(write_h){
case 1:
switch(write_w){
case 1:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 1; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
}
break;
case 2:
switch(write_w){
case 1:
#pragma unroll
for (unsigned int th = 0; th < 2; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 1; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
}
break;
case 3:
switch(write_w){
case 1:
#pragma unroll
for (unsigned int th = 0; th < 3; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 1; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
}
break;
case 4:
switch(write_w){
case 1:
#pragma unroll
for (unsigned int th = 0; th < 4; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 1; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
}
break;
}
}
__global__ void conv2d(float * __restrict__ input,const float * __restrict__ kernel, float * __restrict__ outputs){
extern __shared__ float shared_input[];
const unsigned int tile_id = blockIdx.x;
const unsigned int tc_id = tile_id / THS;
const unsigned int th_id = tile_id % THS;
const unsigned int tw_id = threadIdx.x / N;
const int h_out_start = th_id * TH;
const int w_out_start = tw_id * TW;
const unsigned int warp_id = tw_id;
const unsigned int lane_id = threadIdx.x % N;
float data_array[9];
float temp_result[TH*TW] = {0.0f};
for(unsigned int i=threadIdx.x;i<TC*(TH+2)*WPAD;i+=blockDim.x){
shared_input[i] = 0.0f;
}
unsigned int n = lane_id;
unsigned int c_offset = tc_id * TC;
int h_offset = (h_out_start == 0)?1:0;
int h_padded_start = h_out_start;
int h_padded_end = min(h_padded_start + TH + 2, H + 2);
int h_non_padded_start = max(h_out_start - 1, 0);
int h_non_padded_end = min(H, h_padded_end - 1);
__syncthreads();
load_input_2_shared_memory(input, shared_input, h_non_padded_start, h_non_padded_end, h_offset, c_offset, warp_id, lane_id, N);
__syncthreads();
#pragma unroll
for(unsigned int c=0;c<TC;c++){
#pragma unroll
for(unsigned int r=0;r<R;++r){
#pragma unroll
for(unsigned int s=0;s<S;++s){
data_array[r*S+s] = kernel[(c + c_offset)*N*9+r*3*N+s*N+n];
}
}
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 0]*data_array[0];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[1];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[2];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[0];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[3];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[1];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[4];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[2];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[5];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[0];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[3];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[6];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[1];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[4];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[7];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[2];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[5];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[8];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 0]*data_array[0];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 0]*data_array[3];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 0]*data_array[6];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 1]*data_array[1];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 1]*data_array[4];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 1]*data_array[7];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[2];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[5];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[8];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 4 * WPAD + tw_id * TW + 0]*data_array[3];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 4 * WPAD + tw_id * TW + 0]*data_array[6];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 4 * WPAD + tw_id * TW + 1]*data_array[4];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 4 * WPAD + tw_id * TW + 1]*data_array[7];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 4 * WPAD + tw_id * TW + 2]*data_array[5];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 4 * WPAD + tw_id * TW + 2]*data_array[8];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 5 * WPAD + tw_id * TW + 0]*data_array[6];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 5 * WPAD + tw_id * TW + 1]*data_array[7];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 5 * WPAD + tw_id * TW + 2]*data_array[8];
}
switch_write_back(min(TH, H - h_out_start), min(TW, W - w_out_start), h_out_start, w_out_start, n, outputs, temp_result);
}
float check_diff(float *x, float *y, unsigned int size){
float diff = 0.0f;
#pragma omp parallel for reduction(+ : diff)
for(unsigned int i=0;i<size;++i){
diff += abs(x[i] - y[i]);
}
return diff;
}
int main(void){
float *input = new float[C*H*W];
time_t t;
float *matrix;
cudaMalloc(&matrix,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float));
cudaMemset(matrix,0,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float));
srand((unsigned) time(&t));
for(int i =0;i<C*H*W;++i){
input[i] = rand() % 10;
}
float *device_input;
cudaMalloc(&device_input,C*H*W*sizeof(float));
cudaMemcpy(device_input,input,C*H*W*sizeof(float),cudaMemcpyHostToDevice);
float *K = new float[C*N*9];
for(int i=0;i<C*N*9;++i){
K[i] = 1.0f;
}
ConvGemm convGemm;
convGemm.initialize();
ConvWinogradeNon convWinogradeNon;
convWinogradeNon.initialize();
ConvFFT convFFT;
convFFT.initialize();
float *out_cudnn;
float *out_cudnn_host = new float[N*H*W];
cudaEvent_t event_start;
cudaEvent_t event_stop;
cudaEventCreate(&event_start);
cudaEventCreate(&event_stop);
out_cudnn = convGemm.forward(device_input);
cudaMemcpy(out_cudnn_host,out_cudnn,N*H*W*sizeof(float),cudaMemcpyDeviceToHost);
out_cudnn = convFFT.forward(device_input);
out_cudnn = convWinogradeNon.forward(device_input);
float *device_K;
float *device_out;
cudaMalloc(&device_out,H*W*N*sizeof(float));
cudaMemset(device_out,0,H*W*N*sizeof(float));
cudaMalloc(&device_K,C*N*9*sizeof(float));
cudaMemcpy(device_K,K,C*N*9*sizeof(float),cudaMemcpyHostToDevice);
cudaEventRecord(event_start);
convGemm.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnGemmTime;
cudaEventElapsedTime(&cudnnGemmTime, event_start, event_stop);
cudaEventRecord(event_start);
convWinogradeNon.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnWinogradeTimeNon;
cudaEventElapsedTime(&cudnnWinogradeTimeNon, event_start, event_stop);
cudaEventRecord(event_start);
convFFT.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnFFTTime;
cudaEventElapsedTime(&cudnnFFTTime, event_start, event_stop);
dim3 grid(1,7,4);
dim3 block(7,1,2);
cudaEventRecord(event_start);
default_function_kernel0<<<grid, block>>>(device_input, device_K, device_out);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float time_tvm;
cudaEventElapsedTime(&time_tvm, event_start, event_stop);
float *out_tvm = new float[N*H*W];
cudaMemcpy(out_tvm,device_out,N*H*W*sizeof(float),cudaMemcpyDeviceToHost);
cudaMemset(device_out, 0, sizeof(float)*N*H*W);
chkerr(cudaFuncSetAttribute(conv2d,cudaFuncAttributeMaxDynamicSharedMemorySize, TC*(TH+2)*(WPAD)*4));
cudaEventRecord(event_start);
conv2d<<<TCS*THS, N * TWS, TC*(TH+2)*(WPAD)*4>>>(device_input, device_K, device_out);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float time_tdc;
cudaEventElapsedTime(&time_tdc, event_start, event_stop);
float *out_tdc = new float[N*H*W];
cudaMemcpy(out_tdc,device_out,N*H*W*sizeof(float),cudaMemcpyDeviceToHost);
ofstream outfile;
char buffer[1000];
int ret = sprintf(buffer,"%d,%d,%d,%d,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",N,C,H,W,
cudnnFFTTime,cudnnWinogradeTimeNon,cudnnGemmTime,time_tvm,time_tdc,
cudnnFFTTime/time_tdc,cudnnWinogradeTimeNon/time_tdc,cudnnGemmTime/time_tdc,time_tvm/time_tdc);
outfile.open("../../evaluation_outcome/2080Ti-layers-eval-oracle.csv", std::ios_base::app);
outfile << buffer;
float difference = check_diff(out_cudnn_host, out_tdc, N*H*W);
cout<<N<<","<<C<<","<<H<<","<<W<<","<<cudnnFFTTime<<","<<cudnnWinogradeTimeNon<<","<<cudnnGemmTime<<","<<
time_tvm<<","<<time_tdc<<","<<cudnnFFTTime/time_tdc<<","<<
cudnnWinogradeTimeNon/time_tdc<<","<<cudnnGemmTime/time_tdc<<","<<time_tvm/time_tdc<<endl;
return 0;
}
|
8f1003f02f550bacc8171f0654a491ae355531ef.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "pointwise_hist2.cuh"
#include "split_properties_helpers.cuh"
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
namespace NKernel
{
template<int OUTER_HIST_BITS_COUNT, int INNER_HIST_BITS_COUNT, int BLOCK_SIZE>
struct TPointHist
{
volatile float* __restrict__ Buffer;
int BlockId;
__forceinline__ __device__ int SliceOffset()
{
const int warpOffset = 1024 * (threadIdx.x / 32);
const int blocks = 4 >> INNER_HIST_BITS_COUNT;
const int innerHistStart = (threadIdx.x & ((blocks - 1) << (INNER_HIST_BITS_COUNT + 3)));
return warpOffset + innerHistStart;
}
__forceinline__ __device__ TPointHist(float* buff)
{
const int HIST_SIZE = 32 * BLOCK_SIZE;
#pragma unroll 8
for (int i = threadIdx.x; i < HIST_SIZE; i += BLOCK_SIZE)
{
buff[i] = 0;
}
Buffer = buff + SliceOffset();
BlockId = (threadIdx.x / 32) & ((1 << OUTER_HIST_BITS_COUNT) - 1);
__syncthreads();
}
__device__ void AddPoint(ui32 ci, const float t, const float w)
{
const bool flag = threadIdx.x & 1;
#pragma unroll
for (int i = 0; i < 4; i++)
{
short f = (threadIdx.x + (i << 1)) & 6;
short bin = bfe(ci, 24 - (f << 2), 8);
bool pass = (bin >> (5 + INNER_HIST_BITS_COUNT)) == BlockId;
int offset0 = f + flag;
int offset1 = f + !flag;
const int mask = (1 << INNER_HIST_BITS_COUNT) - 1;
const int tmp = (((bin >> INNER_HIST_BITS_COUNT) & 31) << 5) + 8 * (bin & mask);
offset0 += tmp;
offset1 += tmp;
if (INNER_HIST_BITS_COUNT > 0)
{
#pragma unroll
for (int k = 0; k < (1 << INNER_HIST_BITS_COUNT); ++k)
{
if (((threadIdx.x >> 3) & ((1 << INNER_HIST_BITS_COUNT) - 1)) == k)
{
Buffer[offset0] += (flag ? t : w) * pass;
Buffer[offset1] += (flag ? w : t) * pass;
}
}
} else {
Buffer[offset0] += (flag ? t : w) * pass;
Buffer[offset1] += (flag ? w : t) * pass;
}
}
}
//After reduce we store histograms by blocks: 256 floats (4 x 2 x 32)
// for first 32 bins; than 256 floats for second 32 bins, etc
__forceinline__ __device__ void Reduce()
{
Buffer -= SliceOffset();
const int warpCount = BLOCK_SIZE >> 5;
const int innerHistCount = 4 >> INNER_HIST_BITS_COUNT;
const int warpHistCount = warpCount >> OUTER_HIST_BITS_COUNT;
const int fold = (threadIdx.x >> 3) & 31;
const int mask = (1 << INNER_HIST_BITS_COUNT) - 1;
const int binOffset = ((fold >> INNER_HIST_BITS_COUNT) << 5) + 8 * (fold & mask);
const int offset = (threadIdx.x & 7) + binOffset;
const float* __restrict__ buffer = const_cast<float*>(Buffer);
#pragma unroll
for (int outerBits = 0; outerBits < 1 << (OUTER_HIST_BITS_COUNT); ++outerBits)
{
#pragma unroll
for (int innerBits = 0; innerBits < (1 << (INNER_HIST_BITS_COUNT)); ++innerBits)
{
float sum = 0.0f;
const int innerOffset = innerBits << (10 - INNER_HIST_BITS_COUNT);
const int tmp = innerOffset + offset;
{
#pragma unroll
for (int hist = 0; hist < warpHistCount; ++hist)
{
const int warpOffset = ((hist << OUTER_HIST_BITS_COUNT) + outerBits) * 1024;
const int tmp2 = tmp + warpOffset;
#pragma unroll
for (int inWarpHist = 0; inWarpHist < innerHistCount; ++inWarpHist)
{
sum += buffer[tmp2 + (inWarpHist << (3 + INNER_HIST_BITS_COUNT))];
}
}
}
__syncthreads();
if (threadIdx.x < 256)
{
Buffer[threadIdx.x + 256 * (innerBits | (outerBits << INNER_HIST_BITS_COUNT))] = sum;
}
}
}
__syncthreads();
}
};
template<int STRIPE_SIZE, int OUTER_UNROLL, int N, int HIST_BLOCK_COUNT, int BLOCKS_PER_FEATURE, typename THist>
__forceinline__ __device__ void ComputeHistogram(
const ui32* __restrict__ indices,
int offset, int dsSize,
const float* __restrict__ target, const float* __restrict__ weight,
const ui32* __restrict__ cindex, float* __restrict__ result) {
weight += offset;
target += offset;
indices += offset;
THist hist(result);
indices += (blockIdx.x % BLOCKS_PER_FEATURE) * STRIPE_SIZE;
target += (blockIdx.x % BLOCKS_PER_FEATURE) * STRIPE_SIZE;
weight += (blockIdx.x % BLOCKS_PER_FEATURE) * STRIPE_SIZE;
dsSize = max(dsSize - (blockIdx.x % BLOCKS_PER_FEATURE) * STRIPE_SIZE, 0);
const int stripe = STRIPE_SIZE * BLOCKS_PER_FEATURE;
if (dsSize)
{
int i = (threadIdx.x & 31) + (threadIdx.x / 32 / HIST_BLOCK_COUNT) * 32;
int iteration_count = (dsSize - i + (stripe - 1)) / stripe;
int blocked_iteration_count = ((dsSize - (i | 31) + (stripe - 1)) / stripe) / N;
weight += i;
target += i;
indices += i;
#pragma unroll OUTER_UNROLL
for (int j = 0; j < blocked_iteration_count; ++j)
{
ui32 local_index[N];
#pragma unroll
for (int k = 0; k < N; k++)
{
local_index[k] = __ldg(indices + stripe * k);
}
ui32 local_ci[N];
float local_w[N];
float local_wt[N];
#pragma unroll
for (int k = 0; k < N; ++k)
{
local_ci[k] = __ldg(cindex + local_index[k]);
local_w[k] = __ldg(weight + stripe * k);
local_wt[k] = __ldg(target + stripe * k);
}
#pragma unroll
for (int k = 0; k < N; ++k)
{
hist.AddPoint(local_ci[k], local_wt[k], local_w[k]);
}
i += stripe * N;
indices += stripe * N;
target += stripe * N;
weight += stripe * N;
}
for (int k = blocked_iteration_count * N; k < iteration_count; ++k)
{
const int index = __ldg(indices);
ui32 ci = __ldg(cindex + index);
float w = __ldg(weight);
float wt = __ldg(target);
hist.AddPoint(ci, wt, w);
i += stripe;
indices += stripe;
target += stripe;
weight += stripe;
}
__syncthreads();
hist.Reduce();
}
}
template<int STRIPE_SIZE, int OUTER_UNROLL, int HIST_BLOCK_COUNT, int BLOCKS_PER_FEATURE, typename THist>
__forceinline__ __device__ void ComputeHistogram2(
const ui32* __restrict__ indices,
int offset, int dsSize,
const float* __restrict__ target, const float* __restrict__ weight,
const ui32* __restrict__ cindex, float* __restrict__ result) {
weight += offset;
target += offset;
indices += offset;
THist hist(result);
if (dsSize)
{
//first: first warp make memory access aligned. it load first 32 - offset % 32 elements.
{
int lastId = min(dsSize, 128 - (offset & 127));
int colId = (threadIdx.x & 31) + (threadIdx.x / 32 / HIST_BLOCK_COUNT) * 32;
if ((blockIdx.x % BLOCKS_PER_FEATURE) == 0)
{
for (; (colId < lastId); colId += blockDim.x / HIST_BLOCK_COUNT)
{
const int index = __ldg(indices + colId);
const ui32 ci = __ldg(cindex + index);
const float w = __ldg(weight + colId);
const float wt = __ldg(target + colId);
hist.AddPoint(ci, wt, w);
}
}
dsSize = max(dsSize - lastId, 0);
indices += lastId;
target += lastId;
weight += lastId;
}
//now lets align end
const int unalignedTail = (dsSize & 31);
if (unalignedTail != 0) {
if ((blockIdx.x % BLOCKS_PER_FEATURE) == 0)
{
int colId = (threadIdx.x & 31) + (threadIdx.x / 32 / HIST_BLOCK_COUNT) * 32;
const int tailOffset = dsSize - unalignedTail;
for (; colId < unalignedTail; colId += blockDim.x / HIST_BLOCK_COUNT)
{
const int index = __ldg(indices + tailOffset + colId);
const ui32 ci = __ldg(cindex + index);
const float w = __ldg(weight + tailOffset + colId);
const float wt = __ldg(target + tailOffset + colId);
hist.AddPoint(ci, wt, w);
}
}
}
dsSize -= unalignedTail;
if (dsSize <= 0) {
if ((blockIdx.x % BLOCKS_PER_FEATURE) == 0) {
__syncthreads();
hist.Reduce();
}
return;
}
indices += (blockIdx.x % BLOCKS_PER_FEATURE) * STRIPE_SIZE * 2;
target += (blockIdx.x % BLOCKS_PER_FEATURE) * STRIPE_SIZE * 2;
weight += (blockIdx.x % BLOCKS_PER_FEATURE) * STRIPE_SIZE * 2;
const int stripe = STRIPE_SIZE * BLOCKS_PER_FEATURE * 2;
dsSize = max(dsSize - (blockIdx.x % BLOCKS_PER_FEATURE) * STRIPE_SIZE * 2, 0);
if (dsSize) {
int iterCount;
{
const int i = 2 * ((threadIdx.x & 31) + (threadIdx.x / 32 / HIST_BLOCK_COUNT) * 32);
weight += i;
target += i;
indices += i;
iterCount = (dsSize - i + (stripe - 1)) / stripe;
}
#pragma unroll OUTER_UNROLL
for (int j = 0; j < iterCount; ++j) {
const uint2 localIndices = __ldg((uint2*) indices);
const ui32 firstBin = __ldg(cindex + localIndices.x);
const ui32 secondBin = __ldg(cindex + localIndices.y);
const float2 localTarget = __ldg((float2* )(target));
const float2 localWeight = __ldg((float2* )(weight));
hist.AddPoint(firstBin, localTarget.x, localWeight.x);
hist.AddPoint(secondBin, localTarget.y, localWeight.y);
indices += stripe;
target += stripe;
weight += stripe;
}
__syncthreads();
hist.Reduce();
}
}
}
template<int BLOCK_SIZE, int OUTER_HIST_BITS_COUNT, int INNER_HIST_BITS_COUNT, int BLOCKS_PER_FEATURE, bool USE_64_BIT_LOAD>
__forceinline__ __device__ void ComputeSplitPropertiesPass(const TCFeature* __restrict__ feature, const ui32* __restrict__ cindex,
const float* __restrict__ target, const float* __restrict__ weight,
const ui32* __restrict__ indices,
const TDataPartition* __restrict__ partition, int fCount,
float* binSumsForPart,
float* smem)
{
using THist = TPointHist<OUTER_HIST_BITS_COUNT, INNER_HIST_BITS_COUNT, BLOCK_SIZE>;
const int stripeSize = (BLOCK_SIZE >> OUTER_HIST_BITS_COUNT);
const int histBlockCount = 1 << OUTER_HIST_BITS_COUNT;
if (USE_64_BIT_LOAD)
{
#if __CUDA_ARCH__ < 300
const int INNER_UNROLL = (INNER_HIST_BITS_COUNT + OUTER_HIST_BITS_COUNT) == 0 ? 4 : 2;
const int OUTER_UNROLL = 2;
#elif __CUDA_ARCH__ <= 350
const int INNER_UNROLL = (INNER_HIST_BITS_COUNT + OUTER_HIST_BITS_COUNT) == 0 ? 8 : 4;
const int OUTER_UNROLL = 2;
#else
const int INNER_UNROLL = 1;
const int OUTER_UNROLL = (INNER_HIST_BITS_COUNT + OUTER_HIST_BITS_COUNT) == 0 ? 4 : 1;
#endif
const int size = partition->Size;
const int offset = partition->Offset;
ComputeHistogram2 < stripeSize, OUTER_UNROLL, histBlockCount, BLOCKS_PER_FEATURE, THist > (indices, offset, size,
target,
weight,
cindex,
smem);
}
else {
#if __CUDA_ARCH__ < 300
const int INNER_UNROLL = (INNER_HIST_BITS_COUNT + OUTER_HIST_BITS_COUNT) == 0 ? 4 : 2;
const int OUTER_UNROLL = 2;
#elif __CUDA_ARCH__ <= 350
const int INNER_UNROLL = (INNER_HIST_BITS_COUNT + OUTER_HIST_BITS_COUNT) == 0 ? 8 : 4;
const int OUTER_UNROLL = 2;
#else
const int INNER_UNROLL = 1;
const int OUTER_UNROLL = 1;
#endif
ComputeHistogram<stripeSize, OUTER_UNROLL, INNER_UNROLL, histBlockCount, BLOCKS_PER_FEATURE, THist>(indices,
partition->Offset,
partition->Size,
target,
weight,
cindex,
smem);
}
__syncthreads();
int fid = (threadIdx.x / 64);
int fold = (threadIdx.x / 2) & 31;
#pragma unroll
for (int upperBits = 0; upperBits < (1 << (OUTER_HIST_BITS_COUNT + INNER_HIST_BITS_COUNT)); ++upperBits)
{
const int binOffset = upperBits << 5;
if (fid < fCount && fold < min((int) feature[fid].Folds - binOffset, 32))
{
int w = threadIdx.x & 1;
const float val = smem[fold * 8 + 2 * fid + w + 256 * upperBits];
if (abs(val) > 1e-20f)
{
if (BLOCKS_PER_FEATURE > 1)
{
atomicAdd(binSumsForPart + (feature[fid].FirstFoldIndex + fold + binOffset) * 2 + w, val);
} else
{
WriteThrough(binSumsForPart + (feature[fid].FirstFoldIndex + fold + binOffset) * 2 + w, val);
}
}
}
}
}
#define DECLARE_PASS(O, I, M, USE_64_BIT_LOAD) \
ComputeSplitPropertiesPass<BLOCK_SIZE, O, I, M, USE_64_BIT_LOAD>(feature, cindex, target, weight, indices, partition, fCount, binSums, &counters[0]);
template<int BLOCK_SIZE, bool FULL_PASS, int M>
#if __CUDA_ARCH__ == 600
__launch_bounds__(BLOCK_SIZE, 1)
#elif __CUDA_ARCH__ >= 520
__launch_bounds__(BLOCK_SIZE, 2)
#else
__launch_bounds__(BLOCK_SIZE, 1)
#endif
__global__ void ComputeSplitPropertiesNBImpl(
const TCFeature* __restrict__ feature, int fCount, const ui32* __restrict__ cindex,
const float* __restrict__ target, const float* __restrict__ weight,
const ui32* __restrict__ indices,
const TDataPartition* __restrict__ partition,
float* __restrict__ binSums,
const int totalFeatureCount)
{
TPartOffsetsHelper helper(gridDim.z);
helper.ShiftPartAndBinSumsPtr(partition, binSums, totalFeatureCount, FULL_PASS);
feature += (blockIdx.x / M) * 4;
cindex += feature->Offset;
fCount = min(fCount - (blockIdx.x / M) * 4, 4);
__shared__ float counters[32 * BLOCK_SIZE];
const int maxBinCount = GetMaxBinCount(feature, fCount, (int*) &counters[0]);
__syncthreads();
//CatBoost always use direct loads on first pass of histograms calculation and for this step 64-bits loads are almost x2 faster
#if __CUDA_ARCH__ > 350
const bool use64BitLoad = FULL_PASS;// float2 for target/indices/weights
#else
const bool use64BitLoad = false;
#endif
if (partition->Size) {
if (maxBinCount <= 32) {
DECLARE_PASS(0, 0, M, use64BitLoad);
} else if (maxBinCount <= 64) {
DECLARE_PASS(0, 1, M, false);
} else if (maxBinCount <= 128) {
DECLARE_PASS(0, 2, M, false);
} else {
DECLARE_PASS(1, 2, M, false);
}
}
}
template<int BLOCK_SIZE>
struct TPointHistHalfByte
{
volatile float* Buffer;
__forceinline__ __device__ int SliceOffset()
{
const int warpOffset = 512 * (threadIdx.x / 32);
const int innerHistStart = threadIdx.x & 16;
return warpOffset + innerHistStart;
}
__forceinline__ __device__ TPointHistHalfByte(float* buff)
{
const int HIST_SIZE = 16 * BLOCK_SIZE;
for (int i = threadIdx.x; i < HIST_SIZE; i += BLOCK_SIZE)
{
buff[i] = 0;
}
__syncthreads();
Buffer = buff + SliceOffset();
}
__forceinline__ __device__ void AddPoint(ui32 ci, const float t, const float w)
{
const bool flag = threadIdx.x & 1;
#pragma unroll
for (int i = 0; i < 8; i++)
{
const short f = (threadIdx.x + (i << 1)) & 14;
short bin = bfe(ci, 28 - (f << 1), 4);
bin <<= 5;
bin += f;
const int offset0 = bin + flag;
const int offset1 = bin + !flag;
Buffer[offset0] += (flag ? t : w);
Buffer[offset1] += (flag ? w : t);
}
}
__device__ void Reduce()
{
Buffer -= SliceOffset();
const int warpCount = BLOCK_SIZE >> 5;
{
const int fold = (threadIdx.x >> 5) & 15;
const int sumOffset = threadIdx.x & 31;
float sum = 0.0;
if (threadIdx.x < 512)
{
float* __restrict__ buffer = const_cast<float*>(Buffer);
#pragma unroll
for (int warpId = 0; warpId < warpCount; ++warpId)
{
const int warpOffset = 512 * warpId;
sum += buffer[warpOffset + sumOffset + 32 * fold];
}
}
__syncthreads();
if (threadIdx.x < 512)
{
Buffer[threadIdx.x] = sum;
}
}
__syncthreads();
const int fold = (threadIdx.x >> 4) & 15;
float sum = 0.0f;
if (threadIdx.x < 256)
{
const int histEntryId = (threadIdx.x & 15);
sum = Buffer[32 * fold + histEntryId] + Buffer[32 * fold + histEntryId + 16];
}
__syncthreads();
if (threadIdx.x < 256)
{
Buffer[threadIdx.x] = sum;
}
__syncthreads();
}
};
template<int BLOCK_SIZE, bool FULL_PASS, int M>
#if __CUDA_ARCH__ >= 520
__launch_bounds__(BLOCK_SIZE, 2)
#else
__launch_bounds__(BLOCK_SIZE, 1)
#endif
__global__ void ComputeSplitPropertiesBImpl(
const TCFeature* __restrict__ feature, int fCount, const ui32* __restrict__ cindex,
const float* __restrict__ target, const float* __restrict__ weight,
const ui32* __restrict__ indices,
const TDataPartition* __restrict__ partition, float* __restrict__ binSums, int totalFeatureCount)
{
TPartOffsetsHelper helper(gridDim.z);
helper.ShiftPartAndBinSumsPtr(partition, binSums, totalFeatureCount, FULL_PASS);
feature += (blockIdx.x / M) * 32;
cindex += feature->Offset;
fCount = min(fCount - (blockIdx.x / M) * 32, 32);
__shared__ float counters[16 * BLOCK_SIZE];
if (partition->Size)
{
using THist = TPointHistHalfByte<BLOCK_SIZE>;
#if __CUDA_ARCH__ > 350
const bool use64bitLoad = FULL_PASS;
#else
const bool use64bitLoad = false;
#endif
if (use64bitLoad) {
//full pass
#if __CUDA_ARCH__ <= 350
const int INNER_UNROLL = 4;
const int OUTER_UNROLL = 1;
#else
const int INNER_UNROLL = 1;
const int OUTER_UNROLL = 1;
#endif
ComputeHistogram2 < BLOCK_SIZE, OUTER_UNROLL, 1, M, THist > (indices, partition->Offset, partition->Size, target, weight, cindex, &counters[0]);
} else {
#if __CUDA_ARCH__ <= 300
const int INNER_UNROLL = 2;
const int OUTER_UNROLL = 1;
#elif __CUDA_ARCH__ <= 350
const int INNER_UNROLL = 4;
const int OUTER_UNROLL = 1;
#else
const int INNER_UNROLL = 1;
const int OUTER_UNROLL = 1;
#endif
ComputeHistogram < BLOCK_SIZE, OUTER_UNROLL, INNER_UNROLL, 1, M, THist > (
indices,
partition->Offset, partition->Size,
target, weight,
cindex,
&counters[0]);
}
ui32 w = threadIdx.x & 1;
ui32 fid = (threadIdx.x >> 1);
if (fid < fCount)
{
const int groupId = fid / 4;
uchar fMask = 1 << (3 - (fid & 3));
float sum = 0.f;
#pragma uroll
for (int i = 0; i < 16; i++)
{
if (!(i & fMask))
{
sum += counters[i * 16 + 2 * groupId + w];
}
}
if (abs(sum) > 1e-20f)
{
if (M > 1)
{
atomicAdd(binSums + (feature[fid].FirstFoldIndex) * 2 + w, sum);
} else
{
binSums[(feature[fid].FirstFoldIndex) * 2 + w] = sum;
}
}
}
}
}
template<int BLOCK_SIZE,
int BLOCKS_PER_FEATURE_COUNT>
inline void RunComputeHist2NonBinaryKernel(const TCFeature* nbFeatures, int nbCount,
const ui32* cindex,
const float* target, const float* weight, const ui32* indices,
const TDataPartition* partition,
float* binSums, const int binFeatureCount,
bool fullPass,
TCudaStream stream,
dim3 numBlocks)
{
if (fullPass)
{
ComputeSplitPropertiesNBImpl < BLOCK_SIZE, true, BLOCKS_PER_FEATURE_COUNT > << <numBlocks, BLOCK_SIZE, 0, stream>>>(
nbFeatures, nbCount, cindex, target, weight,
indices, partition, binSums, binFeatureCount
);
} else
{
ComputeSplitPropertiesNBImpl < BLOCK_SIZE, false, BLOCKS_PER_FEATURE_COUNT > << <numBlocks, BLOCK_SIZE, 0, stream>>>(
nbFeatures, nbCount, cindex, target, weight,
indices, partition, binSums, binFeatureCount
);
}
}
inline ui32 EstimateBlockPerFeatureMultiplier(dim3 numBlocks, ui32 dsSize) {
ui32 multiplier = 1;
while ((numBlocks.x * numBlocks.y * min(numBlocks.z, 4) * multiplier < TArchProps::SMCount()) &&
((dsSize / multiplier) > 10000) && (multiplier < 64)) {
multiplier *= 2;
}
return multiplier;
}
void ComputeHist2NonBinary(const TCFeature* nbFeatures, int nbCount,
const ui32* cindex,
const float* target, const float* weight,
const ui32* indices, ui32 size,
const TDataPartition* partition, ui32 partCount, ui32 foldCount,
float* binSums, const int binFeatureCount,
bool fullPass,
TCudaStream stream)
{
if (nbCount)
{
dim3 numBlocks;
numBlocks.x = (nbCount + 3) / 4;
const int histPartCount = (fullPass ? partCount : partCount / 2);
numBlocks.y = histPartCount;
numBlocks.z = foldCount;
const int blockSize = 384;
const ui32 multiplier = min(EstimateBlockPerFeatureMultiplier(numBlocks, size), 64);
numBlocks.x *= multiplier;
#define COMPUTE(k)\
RunComputeHist2NonBinaryKernel<blockSize, k>(nbFeatures, nbCount, cindex, target, weight, indices, \
partition, binSums, binFeatureCount, fullPass, stream, numBlocks);
if (multiplier == 1) {
COMPUTE(1)
} else if (multiplier == 2) {
COMPUTE(2)
} else if (multiplier == 4) {
COMPUTE(4)
} else if (multiplier == 8) {
COMPUTE(8)
} else if (multiplier == 16) {
COMPUTE(16)
} else if (multiplier == 32) {
COMPUTE(32)
} else if (multiplier == 64) {
COMPUTE(64)
} else {
exit(1);
}
#undef COMPUTE
const int scanBlockSize = 256;
dim3 scanBlocks;
scanBlocks.x = (nbCount * 32 + scanBlockSize - 1) / scanBlockSize;
scanBlocks.y = histPartCount;
scanBlocks.z = foldCount;
const int scanOffset = fullPass ? 0 : ((partCount / 2) * binFeatureCount * 2) * foldCount;
ScanHistogramsImpl<scanBlockSize, 2> << < scanBlocks, scanBlockSize, 0, stream >> > (nbFeatures, nbCount, binFeatureCount, binSums +
scanOffset);
if (!fullPass)
{
UpdatePointwiseHistograms(binSums, binFeatureCount, partCount, foldCount, 2, partition, stream);
}
}
}
template<int BLOCK_SIZE, int BLOCKS_PER_FEATURE_COUNT>
void RunComputeHist2BinaryKernel(const TCFeature* bFeatures, int bCount,
const ui32* cindex,
const float* target, const float* weight, const ui32* indices,
const TDataPartition* partition,
float* binSums, bool fullPass,
TCudaStream stream,
dim3 numBlocks)
{
if (fullPass)
{
ComputeSplitPropertiesBImpl < BLOCK_SIZE, true,
BLOCKS_PER_FEATURE_COUNT > << <numBlocks, BLOCK_SIZE, 0, stream>>>(
bFeatures, bCount, cindex, target, weight, indices, partition, binSums, bCount
);
} else
{
ComputeSplitPropertiesBImpl < BLOCK_SIZE, false,
BLOCKS_PER_FEATURE_COUNT > << <numBlocks, BLOCK_SIZE, 0, stream>>>(
bFeatures, bCount, cindex, target, weight, indices, partition, binSums, bCount
);
}
};
void ComputeHist2Binary(const TCFeature* bFeatures, int bCount,
const ui32* cindex,
const float* target, const float* weight,
const ui32* indices, ui32 size,
const TDataPartition* partition, ui32 partsCount, ui32 foldCount,
float* binSums, bool fullPass,
TCudaStream stream) {
dim3 numBlocks;
numBlocks.x = (bCount + 31) / 32;
const int histCount = fullPass ? partsCount : partsCount / 2;
numBlocks.y = histCount;
numBlocks.z = foldCount;
const int blockSize = 768;
const ui32 multiplier = min(EstimateBlockPerFeatureMultiplier(numBlocks, size), 64);
numBlocks.x *= multiplier;
if (bCount)
{
#define COMPUTE(k) \
RunComputeHist2BinaryKernel<blockSize, k>(bFeatures, bCount, cindex, target, weight, indices, \
partition, binSums, fullPass, stream, numBlocks); \
if (multiplier == 1)
{
COMPUTE(1)
} else if (multiplier == 2)
{
COMPUTE(2)
} else if (multiplier == 4)
{
COMPUTE(4)
} else if (multiplier == 8)
{
COMPUTE(8);
} else if (multiplier == 16)
{
COMPUTE(16)
} else if (multiplier == 32)
{
COMPUTE(32)
} else if (multiplier == 64) {
COMPUTE(64)
} else {
exit(1);
}
#undef COMPUTE
if (!fullPass)
{
UpdatePointwiseHistograms(binSums, bCount, partsCount, foldCount, 2, partition, stream);
}
}
}
template<int BLOCK_SIZE, bool FULL_PASS, int M>
#if __CUDA_ARCH__ >= 520
__launch_bounds__(BLOCK_SIZE, 2)
#else
__launch_bounds__(BLOCK_SIZE, 1)
#endif
__global__ void ComputeSplitPropertiesHalfByteImpl(
const TCFeature* __restrict__ feature, int fCount, const ui32* __restrict__ cindex,
const float* __restrict__ target, const float* __restrict__ weight,
const ui32* __restrict__ indices,
const TDataPartition* __restrict__ partition,
float* __restrict__ binSums,
const int totalFeatureCount)
{
TPartOffsetsHelper helper(gridDim.z);
helper.ShiftPartAndBinSumsPtr(partition, binSums, totalFeatureCount, FULL_PASS);
feature += (blockIdx.x / M) * 8;
cindex += feature->Offset;
fCount = min(fCount - (blockIdx.x / M) * 8, 8);
//
__shared__ float smem[16 * BLOCK_SIZE];
using THist = TPointHistHalfByte<BLOCK_SIZE>;
#if __CUDA_ARCH__ > 350
const bool use64BitLoad = FULL_PASS;
#else
const bool use64BitLoad = false;
#endif
if (use64BitLoad)
{
#if __CUDA_ARCH__ <= 350
const int INNER_UNROLL = 4;
const int OUTER_UNROLL = 2;
#else
const int INNER_UNROLL = 1;
const int OUTER_UNROLL = 1;
#endif
ComputeHistogram2 < BLOCK_SIZE, OUTER_UNROLL, 1, M, THist > (indices, partition->Offset, partition->Size, target, weight, cindex, smem);
} else {
#if __CUDA_ARCH__ <= 300
const int INNER_UNROLL = 2;
const int OUTER_UNROLL = 2;
#elif __CUDA_ARCH__ <= 350
const int INNER_UNROLL = 4;
const int OUTER_UNROLL = 2;
#else
const int INNER_UNROLL = 1;
const int OUTER_UNROLL = 1;
#endif
ComputeHistogram < BLOCK_SIZE, OUTER_UNROLL, INNER_UNROLL, 1, M, THist > (
indices, partition->Offset, partition->Size,
target, weight,
cindex, smem);
}
__syncthreads();
const int fid = (threadIdx.x / 32);
const int fold = (threadIdx.x / 2) & 15;
const int w = threadIdx.x & 1;
if (fid < fCount && fold < feature[fid].Folds)
{
const float result = smem[fold * 16 + 2 * fid + w];
if (abs(result) > 1e-20)
{
if (M > 1)
{
atomicAdd(binSums + (feature[fid].FirstFoldIndex + fold) * 2 + w, result);
} else
{
binSums[(feature[fid].FirstFoldIndex + fold) * 2 + w] = result;
}
}
}
}
template<int BLOCK_SIZE,
int BLOCKS_PER_FEATURE_COUNT>
inline void RunComputeHist2HalfByteKernel(const TCFeature* nbFeatures, int nbCount,
const ui32* cindex,
const float* target, const float* weight, const ui32* indices,
const TDataPartition* partition,
float* binSums, const int binFeatureCount,
bool fullPass,
TCudaStream stream,
dim3 numBlocks)
{
if (fullPass)
{
ComputeSplitPropertiesHalfByteImpl < BLOCK_SIZE, true,
BLOCKS_PER_FEATURE_COUNT > << <numBlocks, BLOCK_SIZE, 0, stream>>>(
nbFeatures, nbCount, cindex, target, weight,
indices, partition, binSums, binFeatureCount
);
} else
{
ComputeSplitPropertiesHalfByteImpl < BLOCK_SIZE, false,
BLOCKS_PER_FEATURE_COUNT > << <numBlocks, BLOCK_SIZE, 0, stream>>>(
nbFeatures, nbCount, cindex, target, weight,
indices, partition, binSums, binFeatureCount);
}
}
void ComputeHist2HalfByte(const TCFeature* halfByteFeatures, int halfByteFeaturesCount,
const ui32* cindex,
const float* target, const float* weight, const ui32* indices,
ui32 size,
const TDataPartition* partition, ui32 partsCount, ui32 foldCount,
float* binSums, const int binFeatureCount,
bool fullPass,
TCudaStream stream)
{
dim3 numBlocks;
numBlocks.x = static_cast<ui32>((halfByteFeaturesCount + 7) / 8);
const int histCount = fullPass ? partsCount : partsCount / 2;
numBlocks.y = static_cast<ui32>(histCount);
numBlocks.z = foldCount;
const int blockSize = 768;
const ui32 multiplier = min(EstimateBlockPerFeatureMultiplier(numBlocks, size), 64);
numBlocks.x *= multiplier;
if (halfByteFeaturesCount)
{
#define COMPUTE(k)\
RunComputeHist2HalfByteKernel<blockSize, k>(halfByteFeatures, halfByteFeaturesCount, cindex,\
target,\
weight, indices, partition, binSums, binFeatureCount,\
fullPass,\
stream, numBlocks);
if (multiplier == 1) {
COMPUTE(1)
} else if (multiplier == 2) {
COMPUTE(2)
} else if (multiplier == 4) {
COMPUTE(4)
} else if (multiplier == 8) {
COMPUTE(8)
} else if (multiplier == 16) {
COMPUTE(16)
} else if (multiplier == 32) {
COMPUTE(32)
} else if (multiplier == 64) {
COMPUTE(64)
} else {
exit(1);
}
#undef COMPUTE
const int scanBlockSize = 256;
dim3 scanBlocks;
scanBlocks.x = static_cast<ui32>((halfByteFeaturesCount * 32 + scanBlockSize - 1) / scanBlockSize);
scanBlocks.y = static_cast<ui32>(histCount);
scanBlocks.z = foldCount;
const int scanOffset = fullPass ? 0 : ((partsCount / 2) * binFeatureCount * 2) * foldCount;
ScanHistogramsImpl<scanBlockSize, 2> << < scanBlocks, scanBlockSize, 0, stream >> >
(halfByteFeatures, halfByteFeaturesCount, binFeatureCount,
binSums + scanOffset);
if (!fullPass) {
UpdatePointwiseHistograms(binSums, binFeatureCount, partsCount, foldCount, 2, partition, stream);
}
}
}
__global__ void UpdateBinsImpl(ui32* dstBins, const ui32* bins, const ui32* docIndices, ui32 size,
ui32 loadBit, ui32 foldBits)
{
const ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
{
const ui32 idx = LdgWithFallback(docIndices, i);
const ui32 bit = (LdgWithFallback(bins, idx) >> loadBit) & 1;
dstBins[i] = dstBins[i] | (bit << (loadBit + foldBits));
}
}
void UpdateFoldBins(ui32* dstBins, const ui32* bins, const ui32* docIndices, ui32 size,
ui32 loadBit, ui32 foldBits, TCudaStream stream)
{
const ui32 blockSize = 256;
const ui32 numBlocks = CeilDivide(size, blockSize);
UpdateBinsImpl << < numBlocks, blockSize, 0, stream >> > (dstBins, bins, docIndices, size, loadBit, foldBits);
}
}
| 8f1003f02f550bacc8171f0654a491ae355531ef.cu | #include "pointwise_hist2.cuh"
#include "split_properties_helpers.cuh"
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
namespace NKernel
{
template<int OUTER_HIST_BITS_COUNT, int INNER_HIST_BITS_COUNT, int BLOCK_SIZE>
struct TPointHist
{
volatile float* __restrict__ Buffer;
int BlockId;
__forceinline__ __device__ int SliceOffset()
{
const int warpOffset = 1024 * (threadIdx.x / 32);
const int blocks = 4 >> INNER_HIST_BITS_COUNT;
const int innerHistStart = (threadIdx.x & ((blocks - 1) << (INNER_HIST_BITS_COUNT + 3)));
return warpOffset + innerHistStart;
}
__forceinline__ __device__ TPointHist(float* buff)
{
const int HIST_SIZE = 32 * BLOCK_SIZE;
#pragma unroll 8
for (int i = threadIdx.x; i < HIST_SIZE; i += BLOCK_SIZE)
{
buff[i] = 0;
}
Buffer = buff + SliceOffset();
BlockId = (threadIdx.x / 32) & ((1 << OUTER_HIST_BITS_COUNT) - 1);
__syncthreads();
}
__device__ void AddPoint(ui32 ci, const float t, const float w)
{
const bool flag = threadIdx.x & 1;
#pragma unroll
for (int i = 0; i < 4; i++)
{
short f = (threadIdx.x + (i << 1)) & 6;
short bin = bfe(ci, 24 - (f << 2), 8);
bool pass = (bin >> (5 + INNER_HIST_BITS_COUNT)) == BlockId;
int offset0 = f + flag;
int offset1 = f + !flag;
const int mask = (1 << INNER_HIST_BITS_COUNT) - 1;
const int tmp = (((bin >> INNER_HIST_BITS_COUNT) & 31) << 5) + 8 * (bin & mask);
offset0 += tmp;
offset1 += tmp;
if (INNER_HIST_BITS_COUNT > 0)
{
#pragma unroll
for (int k = 0; k < (1 << INNER_HIST_BITS_COUNT); ++k)
{
if (((threadIdx.x >> 3) & ((1 << INNER_HIST_BITS_COUNT) - 1)) == k)
{
Buffer[offset0] += (flag ? t : w) * pass;
Buffer[offset1] += (flag ? w : t) * pass;
}
}
} else {
Buffer[offset0] += (flag ? t : w) * pass;
Buffer[offset1] += (flag ? w : t) * pass;
}
}
}
//After reduce we store histograms by blocks: 256 floats (4 x 2 x 32)
// for first 32 bins; than 256 floats for second 32 bins, etc
__forceinline__ __device__ void Reduce()
{
Buffer -= SliceOffset();
const int warpCount = BLOCK_SIZE >> 5;
const int innerHistCount = 4 >> INNER_HIST_BITS_COUNT;
const int warpHistCount = warpCount >> OUTER_HIST_BITS_COUNT;
const int fold = (threadIdx.x >> 3) & 31;
const int mask = (1 << INNER_HIST_BITS_COUNT) - 1;
const int binOffset = ((fold >> INNER_HIST_BITS_COUNT) << 5) + 8 * (fold & mask);
const int offset = (threadIdx.x & 7) + binOffset;
const float* __restrict__ buffer = const_cast<float*>(Buffer);
#pragma unroll
for (int outerBits = 0; outerBits < 1 << (OUTER_HIST_BITS_COUNT); ++outerBits)
{
#pragma unroll
for (int innerBits = 0; innerBits < (1 << (INNER_HIST_BITS_COUNT)); ++innerBits)
{
float sum = 0.0f;
const int innerOffset = innerBits << (10 - INNER_HIST_BITS_COUNT);
const int tmp = innerOffset + offset;
{
#pragma unroll
for (int hist = 0; hist < warpHistCount; ++hist)
{
const int warpOffset = ((hist << OUTER_HIST_BITS_COUNT) + outerBits) * 1024;
const int tmp2 = tmp + warpOffset;
#pragma unroll
for (int inWarpHist = 0; inWarpHist < innerHistCount; ++inWarpHist)
{
sum += buffer[tmp2 + (inWarpHist << (3 + INNER_HIST_BITS_COUNT))];
}
}
}
__syncthreads();
if (threadIdx.x < 256)
{
Buffer[threadIdx.x + 256 * (innerBits | (outerBits << INNER_HIST_BITS_COUNT))] = sum;
}
}
}
__syncthreads();
}
};
template<int STRIPE_SIZE, int OUTER_UNROLL, int N, int HIST_BLOCK_COUNT, int BLOCKS_PER_FEATURE, typename THist>
__forceinline__ __device__ void ComputeHistogram(
const ui32* __restrict__ indices,
int offset, int dsSize,
const float* __restrict__ target, const float* __restrict__ weight,
const ui32* __restrict__ cindex, float* __restrict__ result) {
weight += offset;
target += offset;
indices += offset;
THist hist(result);
indices += (blockIdx.x % BLOCKS_PER_FEATURE) * STRIPE_SIZE;
target += (blockIdx.x % BLOCKS_PER_FEATURE) * STRIPE_SIZE;
weight += (blockIdx.x % BLOCKS_PER_FEATURE) * STRIPE_SIZE;
dsSize = max(dsSize - (blockIdx.x % BLOCKS_PER_FEATURE) * STRIPE_SIZE, 0);
const int stripe = STRIPE_SIZE * BLOCKS_PER_FEATURE;
if (dsSize)
{
int i = (threadIdx.x & 31) + (threadIdx.x / 32 / HIST_BLOCK_COUNT) * 32;
int iteration_count = (dsSize - i + (stripe - 1)) / stripe;
int blocked_iteration_count = ((dsSize - (i | 31) + (stripe - 1)) / stripe) / N;
weight += i;
target += i;
indices += i;
#pragma unroll OUTER_UNROLL
for (int j = 0; j < blocked_iteration_count; ++j)
{
ui32 local_index[N];
#pragma unroll
for (int k = 0; k < N; k++)
{
local_index[k] = __ldg(indices + stripe * k);
}
ui32 local_ci[N];
float local_w[N];
float local_wt[N];
#pragma unroll
for (int k = 0; k < N; ++k)
{
local_ci[k] = __ldg(cindex + local_index[k]);
local_w[k] = __ldg(weight + stripe * k);
local_wt[k] = __ldg(target + stripe * k);
}
#pragma unroll
for (int k = 0; k < N; ++k)
{
hist.AddPoint(local_ci[k], local_wt[k], local_w[k]);
}
i += stripe * N;
indices += stripe * N;
target += stripe * N;
weight += stripe * N;
}
for (int k = blocked_iteration_count * N; k < iteration_count; ++k)
{
const int index = __ldg(indices);
ui32 ci = __ldg(cindex + index);
float w = __ldg(weight);
float wt = __ldg(target);
hist.AddPoint(ci, wt, w);
i += stripe;
indices += stripe;
target += stripe;
weight += stripe;
}
__syncthreads();
hist.Reduce();
}
}
template<int STRIPE_SIZE, int OUTER_UNROLL, int HIST_BLOCK_COUNT, int BLOCKS_PER_FEATURE, typename THist>
__forceinline__ __device__ void ComputeHistogram2(
const ui32* __restrict__ indices,
int offset, int dsSize,
const float* __restrict__ target, const float* __restrict__ weight,
const ui32* __restrict__ cindex, float* __restrict__ result) {
weight += offset;
target += offset;
indices += offset;
THist hist(result);
if (dsSize)
{
//first: first warp make memory access aligned. it load first 32 - offset % 32 elements.
{
int lastId = min(dsSize, 128 - (offset & 127));
int colId = (threadIdx.x & 31) + (threadIdx.x / 32 / HIST_BLOCK_COUNT) * 32;
if ((blockIdx.x % BLOCKS_PER_FEATURE) == 0)
{
for (; (colId < lastId); colId += blockDim.x / HIST_BLOCK_COUNT)
{
const int index = __ldg(indices + colId);
const ui32 ci = __ldg(cindex + index);
const float w = __ldg(weight + colId);
const float wt = __ldg(target + colId);
hist.AddPoint(ci, wt, w);
}
}
dsSize = max(dsSize - lastId, 0);
indices += lastId;
target += lastId;
weight += lastId;
}
//now lets align end
const int unalignedTail = (dsSize & 31);
if (unalignedTail != 0) {
if ((blockIdx.x % BLOCKS_PER_FEATURE) == 0)
{
int colId = (threadIdx.x & 31) + (threadIdx.x / 32 / HIST_BLOCK_COUNT) * 32;
const int tailOffset = dsSize - unalignedTail;
for (; colId < unalignedTail; colId += blockDim.x / HIST_BLOCK_COUNT)
{
const int index = __ldg(indices + tailOffset + colId);
const ui32 ci = __ldg(cindex + index);
const float w = __ldg(weight + tailOffset + colId);
const float wt = __ldg(target + tailOffset + colId);
hist.AddPoint(ci, wt, w);
}
}
}
dsSize -= unalignedTail;
if (dsSize <= 0) {
if ((blockIdx.x % BLOCKS_PER_FEATURE) == 0) {
__syncthreads();
hist.Reduce();
}
return;
}
indices += (blockIdx.x % BLOCKS_PER_FEATURE) * STRIPE_SIZE * 2;
target += (blockIdx.x % BLOCKS_PER_FEATURE) * STRIPE_SIZE * 2;
weight += (blockIdx.x % BLOCKS_PER_FEATURE) * STRIPE_SIZE * 2;
const int stripe = STRIPE_SIZE * BLOCKS_PER_FEATURE * 2;
dsSize = max(dsSize - (blockIdx.x % BLOCKS_PER_FEATURE) * STRIPE_SIZE * 2, 0);
if (dsSize) {
int iterCount;
{
const int i = 2 * ((threadIdx.x & 31) + (threadIdx.x / 32 / HIST_BLOCK_COUNT) * 32);
weight += i;
target += i;
indices += i;
iterCount = (dsSize - i + (stripe - 1)) / stripe;
}
#pragma unroll OUTER_UNROLL
for (int j = 0; j < iterCount; ++j) {
const uint2 localIndices = __ldg((uint2*) indices);
const ui32 firstBin = __ldg(cindex + localIndices.x);
const ui32 secondBin = __ldg(cindex + localIndices.y);
const float2 localTarget = __ldg((float2* )(target));
const float2 localWeight = __ldg((float2* )(weight));
hist.AddPoint(firstBin, localTarget.x, localWeight.x);
hist.AddPoint(secondBin, localTarget.y, localWeight.y);
indices += stripe;
target += stripe;
weight += stripe;
}
__syncthreads();
hist.Reduce();
}
}
}
template<int BLOCK_SIZE, int OUTER_HIST_BITS_COUNT, int INNER_HIST_BITS_COUNT, int BLOCKS_PER_FEATURE, bool USE_64_BIT_LOAD>
__forceinline__ __device__ void ComputeSplitPropertiesPass(const TCFeature* __restrict__ feature, const ui32* __restrict__ cindex,
const float* __restrict__ target, const float* __restrict__ weight,
const ui32* __restrict__ indices,
const TDataPartition* __restrict__ partition, int fCount,
float* binSumsForPart,
float* smem)
{
using THist = TPointHist<OUTER_HIST_BITS_COUNT, INNER_HIST_BITS_COUNT, BLOCK_SIZE>;
const int stripeSize = (BLOCK_SIZE >> OUTER_HIST_BITS_COUNT);
const int histBlockCount = 1 << OUTER_HIST_BITS_COUNT;
if (USE_64_BIT_LOAD)
{
#if __CUDA_ARCH__ < 300
const int INNER_UNROLL = (INNER_HIST_BITS_COUNT + OUTER_HIST_BITS_COUNT) == 0 ? 4 : 2;
const int OUTER_UNROLL = 2;
#elif __CUDA_ARCH__ <= 350
const int INNER_UNROLL = (INNER_HIST_BITS_COUNT + OUTER_HIST_BITS_COUNT) == 0 ? 8 : 4;
const int OUTER_UNROLL = 2;
#else
const int INNER_UNROLL = 1;
const int OUTER_UNROLL = (INNER_HIST_BITS_COUNT + OUTER_HIST_BITS_COUNT) == 0 ? 4 : 1;
#endif
const int size = partition->Size;
const int offset = partition->Offset;
ComputeHistogram2 < stripeSize, OUTER_UNROLL, histBlockCount, BLOCKS_PER_FEATURE, THist > (indices, offset, size,
target,
weight,
cindex,
smem);
}
else {
#if __CUDA_ARCH__ < 300
const int INNER_UNROLL = (INNER_HIST_BITS_COUNT + OUTER_HIST_BITS_COUNT) == 0 ? 4 : 2;
const int OUTER_UNROLL = 2;
#elif __CUDA_ARCH__ <= 350
const int INNER_UNROLL = (INNER_HIST_BITS_COUNT + OUTER_HIST_BITS_COUNT) == 0 ? 8 : 4;
const int OUTER_UNROLL = 2;
#else
const int INNER_UNROLL = 1;
const int OUTER_UNROLL = 1;
#endif
ComputeHistogram<stripeSize, OUTER_UNROLL, INNER_UNROLL, histBlockCount, BLOCKS_PER_FEATURE, THist>(indices,
partition->Offset,
partition->Size,
target,
weight,
cindex,
smem);
}
__syncthreads();
int fid = (threadIdx.x / 64);
int fold = (threadIdx.x / 2) & 31;
#pragma unroll
for (int upperBits = 0; upperBits < (1 << (OUTER_HIST_BITS_COUNT + INNER_HIST_BITS_COUNT)); ++upperBits)
{
const int binOffset = upperBits << 5;
if (fid < fCount && fold < min((int) feature[fid].Folds - binOffset, 32))
{
int w = threadIdx.x & 1;
const float val = smem[fold * 8 + 2 * fid + w + 256 * upperBits];
if (abs(val) > 1e-20f)
{
if (BLOCKS_PER_FEATURE > 1)
{
atomicAdd(binSumsForPart + (feature[fid].FirstFoldIndex + fold + binOffset) * 2 + w, val);
} else
{
WriteThrough(binSumsForPart + (feature[fid].FirstFoldIndex + fold + binOffset) * 2 + w, val);
}
}
}
}
}
#define DECLARE_PASS(O, I, M, USE_64_BIT_LOAD) \
ComputeSplitPropertiesPass<BLOCK_SIZE, O, I, M, USE_64_BIT_LOAD>(feature, cindex, target, weight, indices, partition, fCount, binSums, &counters[0]);
template<int BLOCK_SIZE, bool FULL_PASS, int M>
#if __CUDA_ARCH__ == 600
__launch_bounds__(BLOCK_SIZE, 1)
#elif __CUDA_ARCH__ >= 520
__launch_bounds__(BLOCK_SIZE, 2)
#else
__launch_bounds__(BLOCK_SIZE, 1)
#endif
__global__ void ComputeSplitPropertiesNBImpl(
const TCFeature* __restrict__ feature, int fCount, const ui32* __restrict__ cindex,
const float* __restrict__ target, const float* __restrict__ weight,
const ui32* __restrict__ indices,
const TDataPartition* __restrict__ partition,
float* __restrict__ binSums,
const int totalFeatureCount)
{
TPartOffsetsHelper helper(gridDim.z);
helper.ShiftPartAndBinSumsPtr(partition, binSums, totalFeatureCount, FULL_PASS);
feature += (blockIdx.x / M) * 4;
cindex += feature->Offset;
fCount = min(fCount - (blockIdx.x / M) * 4, 4);
__shared__ float counters[32 * BLOCK_SIZE];
const int maxBinCount = GetMaxBinCount(feature, fCount, (int*) &counters[0]);
__syncthreads();
//CatBoost always use direct loads on first pass of histograms calculation and for this step 64-bits loads are almost x2 faster
#if __CUDA_ARCH__ > 350
const bool use64BitLoad = FULL_PASS;// float2 for target/indices/weights
#else
const bool use64BitLoad = false;
#endif
if (partition->Size) {
if (maxBinCount <= 32) {
DECLARE_PASS(0, 0, M, use64BitLoad);
} else if (maxBinCount <= 64) {
DECLARE_PASS(0, 1, M, false);
} else if (maxBinCount <= 128) {
DECLARE_PASS(0, 2, M, false);
} else {
DECLARE_PASS(1, 2, M, false);
}
}
}
template<int BLOCK_SIZE>
struct TPointHistHalfByte
{
volatile float* Buffer;
__forceinline__ __device__ int SliceOffset()
{
const int warpOffset = 512 * (threadIdx.x / 32);
const int innerHistStart = threadIdx.x & 16;
return warpOffset + innerHistStart;
}
__forceinline__ __device__ TPointHistHalfByte(float* buff)
{
const int HIST_SIZE = 16 * BLOCK_SIZE;
for (int i = threadIdx.x; i < HIST_SIZE; i += BLOCK_SIZE)
{
buff[i] = 0;
}
__syncthreads();
Buffer = buff + SliceOffset();
}
__forceinline__ __device__ void AddPoint(ui32 ci, const float t, const float w)
{
const bool flag = threadIdx.x & 1;
#pragma unroll
for (int i = 0; i < 8; i++)
{
const short f = (threadIdx.x + (i << 1)) & 14;
short bin = bfe(ci, 28 - (f << 1), 4);
bin <<= 5;
bin += f;
const int offset0 = bin + flag;
const int offset1 = bin + !flag;
Buffer[offset0] += (flag ? t : w);
Buffer[offset1] += (flag ? w : t);
}
}
__device__ void Reduce()
{
Buffer -= SliceOffset();
const int warpCount = BLOCK_SIZE >> 5;
{
const int fold = (threadIdx.x >> 5) & 15;
const int sumOffset = threadIdx.x & 31;
float sum = 0.0;
if (threadIdx.x < 512)
{
float* __restrict__ buffer = const_cast<float*>(Buffer);
#pragma unroll
for (int warpId = 0; warpId < warpCount; ++warpId)
{
const int warpOffset = 512 * warpId;
sum += buffer[warpOffset + sumOffset + 32 * fold];
}
}
__syncthreads();
if (threadIdx.x < 512)
{
Buffer[threadIdx.x] = sum;
}
}
__syncthreads();
const int fold = (threadIdx.x >> 4) & 15;
float sum = 0.0f;
if (threadIdx.x < 256)
{
const int histEntryId = (threadIdx.x & 15);
sum = Buffer[32 * fold + histEntryId] + Buffer[32 * fold + histEntryId + 16];
}
__syncthreads();
if (threadIdx.x < 256)
{
Buffer[threadIdx.x] = sum;
}
__syncthreads();
}
};
template<int BLOCK_SIZE, bool FULL_PASS, int M>
#if __CUDA_ARCH__ >= 520
__launch_bounds__(BLOCK_SIZE, 2)
#else
__launch_bounds__(BLOCK_SIZE, 1)
#endif
__global__ void ComputeSplitPropertiesBImpl(
const TCFeature* __restrict__ feature, int fCount, const ui32* __restrict__ cindex,
const float* __restrict__ target, const float* __restrict__ weight,
const ui32* __restrict__ indices,
const TDataPartition* __restrict__ partition, float* __restrict__ binSums, int totalFeatureCount)
{
TPartOffsetsHelper helper(gridDim.z);
helper.ShiftPartAndBinSumsPtr(partition, binSums, totalFeatureCount, FULL_PASS);
feature += (blockIdx.x / M) * 32;
cindex += feature->Offset;
fCount = min(fCount - (blockIdx.x / M) * 32, 32);
__shared__ float counters[16 * BLOCK_SIZE];
if (partition->Size)
{
using THist = TPointHistHalfByte<BLOCK_SIZE>;
#if __CUDA_ARCH__ > 350
const bool use64bitLoad = FULL_PASS;
#else
const bool use64bitLoad = false;
#endif
if (use64bitLoad) {
//full pass
#if __CUDA_ARCH__ <= 350
const int INNER_UNROLL = 4;
const int OUTER_UNROLL = 1;
#else
const int INNER_UNROLL = 1;
const int OUTER_UNROLL = 1;
#endif
ComputeHistogram2 < BLOCK_SIZE, OUTER_UNROLL, 1, M, THist > (indices, partition->Offset, partition->Size, target, weight, cindex, &counters[0]);
} else {
#if __CUDA_ARCH__ <= 300
const int INNER_UNROLL = 2;
const int OUTER_UNROLL = 1;
#elif __CUDA_ARCH__ <= 350
const int INNER_UNROLL = 4;
const int OUTER_UNROLL = 1;
#else
const int INNER_UNROLL = 1;
const int OUTER_UNROLL = 1;
#endif
ComputeHistogram < BLOCK_SIZE, OUTER_UNROLL, INNER_UNROLL, 1, M, THist > (
indices,
partition->Offset, partition->Size,
target, weight,
cindex,
&counters[0]);
}
ui32 w = threadIdx.x & 1;
ui32 fid = (threadIdx.x >> 1);
if (fid < fCount)
{
const int groupId = fid / 4;
uchar fMask = 1 << (3 - (fid & 3));
float sum = 0.f;
#pragma uroll
for (int i = 0; i < 16; i++)
{
if (!(i & fMask))
{
sum += counters[i * 16 + 2 * groupId + w];
}
}
if (abs(sum) > 1e-20f)
{
if (M > 1)
{
atomicAdd(binSums + (feature[fid].FirstFoldIndex) * 2 + w, sum);
} else
{
binSums[(feature[fid].FirstFoldIndex) * 2 + w] = sum;
}
}
}
}
}
template<int BLOCK_SIZE,
int BLOCKS_PER_FEATURE_COUNT>
inline void RunComputeHist2NonBinaryKernel(const TCFeature* nbFeatures, int nbCount,
const ui32* cindex,
const float* target, const float* weight, const ui32* indices,
const TDataPartition* partition,
float* binSums, const int binFeatureCount,
bool fullPass,
TCudaStream stream,
dim3 numBlocks)
{
if (fullPass)
{
ComputeSplitPropertiesNBImpl < BLOCK_SIZE, true, BLOCKS_PER_FEATURE_COUNT > << <numBlocks, BLOCK_SIZE, 0, stream>>>(
nbFeatures, nbCount, cindex, target, weight,
indices, partition, binSums, binFeatureCount
);
} else
{
ComputeSplitPropertiesNBImpl < BLOCK_SIZE, false, BLOCKS_PER_FEATURE_COUNT > << <numBlocks, BLOCK_SIZE, 0, stream>>>(
nbFeatures, nbCount, cindex, target, weight,
indices, partition, binSums, binFeatureCount
);
}
}
inline ui32 EstimateBlockPerFeatureMultiplier(dim3 numBlocks, ui32 dsSize) {
ui32 multiplier = 1;
while ((numBlocks.x * numBlocks.y * min(numBlocks.z, 4) * multiplier < TArchProps::SMCount()) &&
((dsSize / multiplier) > 10000) && (multiplier < 64)) {
multiplier *= 2;
}
return multiplier;
}
void ComputeHist2NonBinary(const TCFeature* nbFeatures, int nbCount,
const ui32* cindex,
const float* target, const float* weight,
const ui32* indices, ui32 size,
const TDataPartition* partition, ui32 partCount, ui32 foldCount,
float* binSums, const int binFeatureCount,
bool fullPass,
TCudaStream stream)
{
if (nbCount)
{
dim3 numBlocks;
numBlocks.x = (nbCount + 3) / 4;
const int histPartCount = (fullPass ? partCount : partCount / 2);
numBlocks.y = histPartCount;
numBlocks.z = foldCount;
const int blockSize = 384;
const ui32 multiplier = min(EstimateBlockPerFeatureMultiplier(numBlocks, size), 64);
numBlocks.x *= multiplier;
#define COMPUTE(k)\
RunComputeHist2NonBinaryKernel<blockSize, k>(nbFeatures, nbCount, cindex, target, weight, indices, \
partition, binSums, binFeatureCount, fullPass, stream, numBlocks);
if (multiplier == 1) {
COMPUTE(1)
} else if (multiplier == 2) {
COMPUTE(2)
} else if (multiplier == 4) {
COMPUTE(4)
} else if (multiplier == 8) {
COMPUTE(8)
} else if (multiplier == 16) {
COMPUTE(16)
} else if (multiplier == 32) {
COMPUTE(32)
} else if (multiplier == 64) {
COMPUTE(64)
} else {
exit(1);
}
#undef COMPUTE
const int scanBlockSize = 256;
dim3 scanBlocks;
scanBlocks.x = (nbCount * 32 + scanBlockSize - 1) / scanBlockSize;
scanBlocks.y = histPartCount;
scanBlocks.z = foldCount;
const int scanOffset = fullPass ? 0 : ((partCount / 2) * binFeatureCount * 2) * foldCount;
ScanHistogramsImpl<scanBlockSize, 2> << < scanBlocks, scanBlockSize, 0, stream >> > (nbFeatures, nbCount, binFeatureCount, binSums +
scanOffset);
if (!fullPass)
{
UpdatePointwiseHistograms(binSums, binFeatureCount, partCount, foldCount, 2, partition, stream);
}
}
}
template<int BLOCK_SIZE, int BLOCKS_PER_FEATURE_COUNT>
void RunComputeHist2BinaryKernel(const TCFeature* bFeatures, int bCount,
const ui32* cindex,
const float* target, const float* weight, const ui32* indices,
const TDataPartition* partition,
float* binSums, bool fullPass,
TCudaStream stream,
dim3 numBlocks)
{
if (fullPass)
{
ComputeSplitPropertiesBImpl < BLOCK_SIZE, true,
BLOCKS_PER_FEATURE_COUNT > << <numBlocks, BLOCK_SIZE, 0, stream>>>(
bFeatures, bCount, cindex, target, weight, indices, partition, binSums, bCount
);
} else
{
ComputeSplitPropertiesBImpl < BLOCK_SIZE, false,
BLOCKS_PER_FEATURE_COUNT > << <numBlocks, BLOCK_SIZE, 0, stream>>>(
bFeatures, bCount, cindex, target, weight, indices, partition, binSums, bCount
);
}
};
void ComputeHist2Binary(const TCFeature* bFeatures, int bCount,
const ui32* cindex,
const float* target, const float* weight,
const ui32* indices, ui32 size,
const TDataPartition* partition, ui32 partsCount, ui32 foldCount,
float* binSums, bool fullPass,
TCudaStream stream) {
dim3 numBlocks;
numBlocks.x = (bCount + 31) / 32;
const int histCount = fullPass ? partsCount : partsCount / 2;
numBlocks.y = histCount;
numBlocks.z = foldCount;
const int blockSize = 768;
const ui32 multiplier = min(EstimateBlockPerFeatureMultiplier(numBlocks, size), 64);
numBlocks.x *= multiplier;
if (bCount)
{
#define COMPUTE(k) \
RunComputeHist2BinaryKernel<blockSize, k>(bFeatures, bCount, cindex, target, weight, indices, \
partition, binSums, fullPass, stream, numBlocks); \
if (multiplier == 1)
{
COMPUTE(1)
} else if (multiplier == 2)
{
COMPUTE(2)
} else if (multiplier == 4)
{
COMPUTE(4)
} else if (multiplier == 8)
{
COMPUTE(8);
} else if (multiplier == 16)
{
COMPUTE(16)
} else if (multiplier == 32)
{
COMPUTE(32)
} else if (multiplier == 64) {
COMPUTE(64)
} else {
exit(1);
}
#undef COMPUTE
if (!fullPass)
{
UpdatePointwiseHistograms(binSums, bCount, partsCount, foldCount, 2, partition, stream);
}
}
}
template<int BLOCK_SIZE, bool FULL_PASS, int M>
#if __CUDA_ARCH__ >= 520
__launch_bounds__(BLOCK_SIZE, 2)
#else
__launch_bounds__(BLOCK_SIZE, 1)
#endif
__global__ void ComputeSplitPropertiesHalfByteImpl(
const TCFeature* __restrict__ feature, int fCount, const ui32* __restrict__ cindex,
const float* __restrict__ target, const float* __restrict__ weight,
const ui32* __restrict__ indices,
const TDataPartition* __restrict__ partition,
float* __restrict__ binSums,
const int totalFeatureCount)
{
TPartOffsetsHelper helper(gridDim.z);
helper.ShiftPartAndBinSumsPtr(partition, binSums, totalFeatureCount, FULL_PASS);
feature += (blockIdx.x / M) * 8;
cindex += feature->Offset;
fCount = min(fCount - (blockIdx.x / M) * 8, 8);
//
__shared__ float smem[16 * BLOCK_SIZE];
using THist = TPointHistHalfByte<BLOCK_SIZE>;
#if __CUDA_ARCH__ > 350
const bool use64BitLoad = FULL_PASS;
#else
const bool use64BitLoad = false;
#endif
if (use64BitLoad)
{
#if __CUDA_ARCH__ <= 350
const int INNER_UNROLL = 4;
const int OUTER_UNROLL = 2;
#else
const int INNER_UNROLL = 1;
const int OUTER_UNROLL = 1;
#endif
ComputeHistogram2 < BLOCK_SIZE, OUTER_UNROLL, 1, M, THist > (indices, partition->Offset, partition->Size, target, weight, cindex, smem);
} else {
#if __CUDA_ARCH__ <= 300
const int INNER_UNROLL = 2;
const int OUTER_UNROLL = 2;
#elif __CUDA_ARCH__ <= 350
const int INNER_UNROLL = 4;
const int OUTER_UNROLL = 2;
#else
const int INNER_UNROLL = 1;
const int OUTER_UNROLL = 1;
#endif
ComputeHistogram < BLOCK_SIZE, OUTER_UNROLL, INNER_UNROLL, 1, M, THist > (
indices, partition->Offset, partition->Size,
target, weight,
cindex, smem);
}
__syncthreads();
const int fid = (threadIdx.x / 32);
const int fold = (threadIdx.x / 2) & 15;
const int w = threadIdx.x & 1;
if (fid < fCount && fold < feature[fid].Folds)
{
const float result = smem[fold * 16 + 2 * fid + w];
if (abs(result) > 1e-20)
{
if (M > 1)
{
atomicAdd(binSums + (feature[fid].FirstFoldIndex + fold) * 2 + w, result);
} else
{
binSums[(feature[fid].FirstFoldIndex + fold) * 2 + w] = result;
}
}
}
}
template<int BLOCK_SIZE,
int BLOCKS_PER_FEATURE_COUNT>
inline void RunComputeHist2HalfByteKernel(const TCFeature* nbFeatures, int nbCount,
const ui32* cindex,
const float* target, const float* weight, const ui32* indices,
const TDataPartition* partition,
float* binSums, const int binFeatureCount,
bool fullPass,
TCudaStream stream,
dim3 numBlocks)
{
if (fullPass)
{
ComputeSplitPropertiesHalfByteImpl < BLOCK_SIZE, true,
BLOCKS_PER_FEATURE_COUNT > << <numBlocks, BLOCK_SIZE, 0, stream>>>(
nbFeatures, nbCount, cindex, target, weight,
indices, partition, binSums, binFeatureCount
);
} else
{
ComputeSplitPropertiesHalfByteImpl < BLOCK_SIZE, false,
BLOCKS_PER_FEATURE_COUNT > << <numBlocks, BLOCK_SIZE, 0, stream>>>(
nbFeatures, nbCount, cindex, target, weight,
indices, partition, binSums, binFeatureCount);
}
}
void ComputeHist2HalfByte(const TCFeature* halfByteFeatures, int halfByteFeaturesCount,
const ui32* cindex,
const float* target, const float* weight, const ui32* indices,
ui32 size,
const TDataPartition* partition, ui32 partsCount, ui32 foldCount,
float* binSums, const int binFeatureCount,
bool fullPass,
TCudaStream stream)
{
dim3 numBlocks;
numBlocks.x = static_cast<ui32>((halfByteFeaturesCount + 7) / 8);
const int histCount = fullPass ? partsCount : partsCount / 2;
numBlocks.y = static_cast<ui32>(histCount);
numBlocks.z = foldCount;
const int blockSize = 768;
const ui32 multiplier = min(EstimateBlockPerFeatureMultiplier(numBlocks, size), 64);
numBlocks.x *= multiplier;
if (halfByteFeaturesCount)
{
#define COMPUTE(k)\
RunComputeHist2HalfByteKernel<blockSize, k>(halfByteFeatures, halfByteFeaturesCount, cindex,\
target,\
weight, indices, partition, binSums, binFeatureCount,\
fullPass,\
stream, numBlocks);
if (multiplier == 1) {
COMPUTE(1)
} else if (multiplier == 2) {
COMPUTE(2)
} else if (multiplier == 4) {
COMPUTE(4)
} else if (multiplier == 8) {
COMPUTE(8)
} else if (multiplier == 16) {
COMPUTE(16)
} else if (multiplier == 32) {
COMPUTE(32)
} else if (multiplier == 64) {
COMPUTE(64)
} else {
exit(1);
}
#undef COMPUTE
const int scanBlockSize = 256;
dim3 scanBlocks;
scanBlocks.x = static_cast<ui32>((halfByteFeaturesCount * 32 + scanBlockSize - 1) / scanBlockSize);
scanBlocks.y = static_cast<ui32>(histCount);
scanBlocks.z = foldCount;
const int scanOffset = fullPass ? 0 : ((partsCount / 2) * binFeatureCount * 2) * foldCount;
ScanHistogramsImpl<scanBlockSize, 2> << < scanBlocks, scanBlockSize, 0, stream >> >
(halfByteFeatures, halfByteFeaturesCount, binFeatureCount,
binSums + scanOffset);
if (!fullPass) {
UpdatePointwiseHistograms(binSums, binFeatureCount, partsCount, foldCount, 2, partition, stream);
}
}
}
__global__ void UpdateBinsImpl(ui32* dstBins, const ui32* bins, const ui32* docIndices, ui32 size,
ui32 loadBit, ui32 foldBits)
{
const ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
{
const ui32 idx = LdgWithFallback(docIndices, i);
const ui32 bit = (LdgWithFallback(bins, idx) >> loadBit) & 1;
dstBins[i] = dstBins[i] | (bit << (loadBit + foldBits));
}
}
void UpdateFoldBins(ui32* dstBins, const ui32* bins, const ui32* docIndices, ui32 size,
ui32 loadBit, ui32 foldBits, TCudaStream stream)
{
const ui32 blockSize = 256;
const ui32 numBlocks = CeilDivide(size, blockSize);
UpdateBinsImpl << < numBlocks, blockSize, 0, stream >> > (dstBins, bins, docIndices, size, loadBit, foldBits);
}
}
|
67d4d53ffd1b5a4382b9414719838739ce0bd62b.hip | // !!! This is a file automatically generated by hipify!!!
#include "../common/common.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
/*
* Example kernels for transposing a rectangular host array using a variety of
* optimizations, including shared memory, unrolling, and memory padding.
*/
// Some kernels assume square blocks
#define BDIMX 16
#define BDIMY BDIMX
#define INDEX(ROW, COL, INNER) ((ROW) * (INNER) + (COL))
#define IPAD 2
void initialData(float *in, const int size)
{
for (int i = 0; i < size; i++)
{
in[i] = (float)(rand() & 0xFF) / 10.0f;
}
return;
}
void printData(float *in, const int size)
{
for (int i = 0; i < size; i++)
{
printf("%3.0f ", in[i]);
}
printf("\n");
return;
}
void checkResult(float *hostRef, float *gpuRef, int rows, int cols)
{
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
int index = INDEX(i, j, cols);
if (abs(hostRef[index] - gpuRef[index]) > epsilon) {
match = 0;
printf("different on (%d, %d) (offset=%d) element in "
"transposed matrix: host %f gpu %f\n", i, j, index,
hostRef[index], gpuRef[index]);
break;
}
}
if (!match) break;
}
if (!match) printf("Arrays do not match.\n\n");
}
void transposeHost(float *out, float *in, const int nrows, const int ncols)
{
for (int iy = 0; iy < nrows; ++iy)
{
for (int ix = 0; ix < ncols; ++ix)
{
out[INDEX(ix, iy, nrows)] = in[INDEX(iy, ix, ncols)];
}
}
}
__global__ void copyGmem(float *out, float *in, const int nrows, const int ncols)
{
// matrix coordinate (ix,iy)
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int col = blockIdx.x * blockDim.x + threadIdx.x;
// transpose with boundary test
if (row < nrows && col < ncols)
{
// NOTE this is a transpose, not a copy
out[INDEX(col, row, nrows)] = in[INDEX(row, col, ncols)];
}
}
__global__ void naiveGmem(float *out, float *in, const int nrows, const int ncols)
{
// matrix coordinate (ix,iy)
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int col = blockIdx.x * blockDim.x + threadIdx.x;
// transpose with boundary test
if (row < nrows && col < ncols)
{
out[INDEX(col, row, nrows)] = in[INDEX(row, col, ncols)];
}
}
__global__ void naiveGmemUnroll(float *out, float *in, const int nrows,
const int ncols)
{
// Pretend there are twice as many blocks in the x direction
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int col = (2 * blockIdx.x * blockDim.x) + threadIdx.x;
if (row < nrows)
{
if (col < ncols)
{
out[INDEX(col, row, nrows)] = in[INDEX(row, col, ncols)];
}
col += blockDim.x;
if (col < ncols)
{
out[INDEX(col, row, nrows)] = in[INDEX(row, col, ncols)];
}
}
}
__global__ void transposeSmem(float *out, float *in, int nrows, int ncols)
{
// static shared memory
__shared__ float tile[BDIMY][BDIMX];
// coordinate in original matrix
unsigned int row = blockDim.y * blockIdx.y + threadIdx.y;
unsigned int col = blockDim.x * blockIdx.x + threadIdx.x;
// linear global memory index for original matrix
unsigned int offset = INDEX(row, col, ncols);
if (row < nrows && col < ncols)
{
// load data from global memory to shared memory
tile[threadIdx.y][threadIdx.x] = in[offset];
}
// thread index in transposed block
unsigned int bidx, irow, icol;
bidx = threadIdx.y * blockDim.x + threadIdx.x;
irow = bidx / blockDim.y;
icol = bidx % blockDim.y;
// NOTE - need to transpose row and col on block and thread-block level:
// 1. swap blocks x-y
// 2. swap thread x-y assignment (irow and icol calculations above)
// note col still has continuous threadIdx.x -> coalesced gst
col = blockIdx.y * blockDim.y + icol;
row = blockIdx.x * blockDim.x + irow;
// linear global memory index for transposed matrix
// NOTE nrows is stride of result, row and col are transposed
unsigned int transposed_offset = INDEX(row, col, nrows);
// thread synchronization
__syncthreads();
// NOTE invert sizes for write check
if (row < ncols && col < nrows)
{
// store data to global memory from shared memory
out[transposed_offset] = tile[icol][irow]; // NOTE icol,irow not irow,icol
}
}
__global__ void transposeSmemUnroll(float *out, float *in, const int nrows,
const int ncols)
{
// static 1D shared memory
__shared__ float tile[BDIMY][BDIMX * 2];
// coordinate in original matrix
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int col = (2 * blockIdx.x * blockDim.x) + threadIdx.x;
unsigned int row2 = row;
unsigned int col2 = col + blockDim.x;
// linear global memory index for original matrix
unsigned int offset = INDEX(row, col, ncols);
unsigned int offset2 = INDEX(row2, col2, ncols);
// thread index in transposed block
unsigned int bidx = threadIdx.y * blockDim.x + threadIdx.x;
unsigned int irow = bidx / blockDim.y;
unsigned int icol = bidx % blockDim.y;
// linear global memory index for transposed matrix
unsigned int transposed_offset = INDEX(col, row, nrows);
unsigned int transposed_offset2 = INDEX(col2, row2, nrows);
if (row < nrows && col < ncols)
{
tile[threadIdx.y][threadIdx.x] = in[offset];
}
if (row2 < nrows && col2 < ncols)
{
tile[threadIdx.y][blockDim.x + threadIdx.x] = in[offset2];
}
__syncthreads();
if (row < nrows && col < ncols)
{
out[transposed_offset] = tile[irow][icol];
}
if (row2 < nrows && col2 < ncols)
{
out[transposed_offset2] = tile[irow][blockDim.x + icol];
}
}
__global__ void transposeSmemUnrollPad(float *out, float *in, const int nrows,
const int ncols)
{
// static 1D shared memory with padding
__shared__ float tile[BDIMY][BDIMX * 2 + IPAD];
// coordinate in original matrix
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int col = (2 * blockIdx.x * blockDim.x) + threadIdx.x;
unsigned int row2 = row;
unsigned int col2 = col + blockDim.x;
// linear global memory index for original matrix
unsigned int offset = INDEX(row, col, ncols);
unsigned int offset2 = INDEX(row2, col2, ncols);
// thread index in transposed block
unsigned int bidx = threadIdx.y * blockDim.x + threadIdx.x;
unsigned int irow = bidx / blockDim.y;
unsigned int icol = bidx % blockDim.y;
// linear global memory index for transposed matrix
unsigned int transposed_offset = INDEX(col, row, nrows);
unsigned int transposed_offset2 = INDEX(col2, row2, nrows);
if (row < nrows && col < ncols)
{
tile[threadIdx.y][threadIdx.x] = in[offset];
}
if (row2 < nrows && col2 < ncols)
{
tile[threadIdx.y][blockDim.x + threadIdx.x] = in[offset2];
}
__syncthreads();
if (row < nrows && col < ncols)
{
out[transposed_offset] = tile[irow][icol];
}
if (row2 < nrows && col2 < ncols)
{
out[transposed_offset2] = tile[irow][blockDim.x + icol];
}
}
__global__ void transposeSmemUnrollPadDyn (float *out, float *in, const int nrows,
const int ncols)
{
// dynamic shared memory
extern __shared__ float tile[];
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int col = (2 * blockIdx.x * blockDim.x) + threadIdx.x;
unsigned int row2 = row;
unsigned int col2 = col + blockDim.x;
// linear global memory index for original matrix
unsigned int offset = INDEX(row, col, ncols);
unsigned int offset2 = INDEX(row2, col2, ncols);
// thread index in transposed block
unsigned int bidx = threadIdx.y * blockDim.x + threadIdx.x;
unsigned int irow = bidx / blockDim.y;
unsigned int icol = bidx % blockDim.y;
// coordinate in transposed matrix
unsigned int transposed_offset = INDEX(col, row, nrows);
unsigned int transposed_offset2 = INDEX(col2, row2, nrows);
if (row < nrows && col < ncols)
{
tile[INDEX(threadIdx.y, threadIdx.x, BDIMX * 2 + IPAD)] = in[offset];
}
if (row2 < nrows && col2 < ncols)
{
tile[INDEX(threadIdx.y, blockDim.x + threadIdx.x, BDIMX * 2 + IPAD)] =
in[offset2];
}
__syncthreads();
if (row < nrows && col < ncols)
{
out[transposed_offset] = tile[INDEX(irow, icol, BDIMX * 2 + IPAD)];
}
if (row2 < nrows && col2 < ncols)
{
out[transposed_offset2] = tile[INDEX(irow, blockDim.x + icol, BDIMX * 2 + IPAD)];
}
}
__global__ void transposeSmemPad(float *out, float *in, int nrows, int ncols)
{
// static shared memory with padding
__shared__ float tile[BDIMY][BDIMX + IPAD];
// coordinate in original matrix
unsigned int row = blockDim.y * blockIdx.y + threadIdx.y;
unsigned int col = blockDim.x * blockIdx.x + threadIdx.x;
// linear global memory index for original matrix
unsigned int offset = INDEX(row, col, ncols);
// thread index in transposed block
unsigned int bidx, irow, icol;
bidx = threadIdx.y * blockDim.x + threadIdx.x;
irow = bidx / blockDim.y;
icol = bidx % blockDim.y;
// linear global memory index for transposed matrix
unsigned int transposed_offset = INDEX(col, row, nrows);
// transpose with boundary test
if (row < nrows && col < ncols)
{
// load data from global memory to shared memory
tile[threadIdx.y][threadIdx.x] = in[offset];
// thread synchronization
__syncthreads();
// store data to global memory from shared memory
out[transposed_offset] = tile[irow][icol];
}
}
__global__ void transposeSmemDyn(float *out, float *in, int nrows, int ncols)
{
// dynamic shared memory
extern __shared__ float tile[];
// coordinate in original matrix
unsigned int row = blockDim.y * blockIdx.y + threadIdx.y;
unsigned int col = blockDim.x * blockIdx.x + threadIdx.x;
// linear global memory index for original matrix
unsigned int offset = INDEX(row, col, ncols);
// thread index in transposed block
unsigned int row_idx, col_idx, irow, icol;
row_idx = threadIdx.y * blockDim.x + threadIdx.x;
irow = row_idx / blockDim.y;
icol = row_idx % blockDim.y;
col_idx = irow * blockDim.x + icol;
// linear global memory index for transposed matrix
unsigned int transposed_offset = INDEX(col, row, nrows);
// transpose with boundary test
if (row < nrows && col < ncols)
{
// load data from global memory to shared memory
tile[row_idx] = in[offset];
// thread synchronization
__syncthreads();
// store data to global memory from shared memory
out[transposed_offset] = tile[col_idx];
}
}
__global__ void transposeSmemPadDyn(float *out, float *in, int nrows, int ncols)
{
// static shared memory with padding
extern __shared__ float tile[];
// coordinate in original matrix
unsigned int row = blockDim.y * blockIdx.y + threadIdx.y;
unsigned int col = blockDim.x * blockIdx.x + threadIdx.x;
// linear global memory index for original matrix
unsigned int offset = INDEX(row, col, ncols);
// thread index in transposed block
unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x;
unsigned int row_idx = threadIdx.y * (blockDim.x + IPAD) + threadIdx.x;
unsigned int irow = idx / blockDim.y;
unsigned int icol = idx % blockDim.y;
unsigned int col_idx = irow * (blockDim.x + IPAD) + icol;
// linear global memory index for transposed matrix
unsigned int transposed_offset = INDEX(col, row, nrows);
// transpose with boundary test
if (row < nrows && col < ncols)
{
// load data from global memory to shared memory
tile[row_idx] = in[offset];
// thread synchronization
__syncthreads();
// store data to global memory from shared memory
out[transposed_offset] = tile[col_idx];
}
}
int main(int argc, char **argv)
{
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("%s starting transpose at ", argv[0]);
printf("device %d: %s ", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
bool iprint = 0;
// set up array size 2048
int nrows = 1 << 12;
int ncols = 1 << 12;
if (argc > 1) iprint = atoi(argv[1]);
if (argc > 2) nrows = atoi(argv[2]);
if (argc > 3) ncols = atoi(argv[3]);
printf(" with matrix nrows %d ncols %d\n", nrows, ncols);
size_t ncells = nrows * ncols;
size_t nBytes = ncells * sizeof(float);
// execution configuration
dim3 block (BDIMX, BDIMY);
/*
* Map CUDA blocks/threads to output space. Map rows in output to same
* x-value in CUDA, columns to same y-value.
*/
dim3 grid ((ncols + block.x - 1) / block.x, (nrows + block.y - 1) / block.y);
dim3 grid2 ((grid.x + 2 - 1) / 2, grid.y);
// allocate host memory
float *h_A = (float *)malloc(nBytes);
float *hostRef = (float *)malloc(nBytes);
float *gpuRef = (float *)malloc(nBytes);
// initialize host array
initialData(h_A, nrows * ncols);
// transpose at host side
transposeHost(hostRef, h_A, nrows, ncols);
// allocate device memory
float *d_A, *d_C;
CHECK(hipMalloc((float**)&d_A, nBytes));
CHECK(hipMalloc((float**)&d_C, nBytes));
// copy data from host to device
CHECK(hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice));
// tranpose gmem
CHECK(hipMemset(d_C, 0, nBytes));
memset(gpuRef, 0, nBytes);
double iStart = seconds();
hipLaunchKernelGGL(( copyGmem), dim3(grid), dim3(block), 0, 0, d_C, d_A, nrows, ncols);
CHECK(hipDeviceSynchronize());
double iElaps = seconds() - iStart;
CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost));
if(iprint) printData(gpuRef, nrows * ncols);
float ibnd = 2 * ncells * sizeof(float) / (1024.0 * 1024.0 * 1024.0) /
iElaps;
ibnd = 2 * ncells * sizeof(float) / 1e9 / iElaps;
printf("copyGmem elapsed %f sec <<< grid (%d,%d) block (%d,%d)>>> "
"effective bandwidth %f GB\n", iElaps, grid.x, grid.y, block.x,
block.y, ibnd);
// tranpose gmem
CHECK(hipMemset(d_C, 0, nBytes));
memset(gpuRef, 0, nBytes);
iStart = seconds();
hipLaunchKernelGGL(( naiveGmem), dim3(grid), dim3(block), 0, 0, d_C, d_A, nrows, ncols);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost));
if(iprint) printData(gpuRef, ncells);
checkResult(hostRef, gpuRef, ncols, nrows);
ibnd = 2 * ncells * sizeof(float) / (1024.0 * 1024.0 * 1024.0) / iElaps;
ibnd = 2 * ncells * sizeof(float) / 1e9 / iElaps;
printf("naiveGmem elapsed %f sec <<< grid (%d,%d) block (%d,%d)>>> "
"effective bandwidth %f GB\n", iElaps, grid.x, grid.y, block.x,
block.y, ibnd);
// tranpose smem
CHECK(hipMemset(d_C, 0, nBytes));
memset(gpuRef, 0, nBytes);
iStart = seconds();
hipLaunchKernelGGL(( naiveGmemUnroll), dim3(grid2), dim3(block), 0, 0, d_C, d_A, nrows, ncols);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost));
if(iprint) printData(gpuRef, ncells);
checkResult(hostRef, gpuRef, ncols, nrows);
ibnd = 2 * ncells * sizeof(float) / (1024.0 * 1024.0 * 1024.0) / iElaps;
ibnd = 2 * ncells * sizeof(float) / 1e9 / iElaps;
printf("naiveGmemUnroll elapsed %f sec <<< grid (%d,%d) block (%d,%d)>>> "
"effective bandwidth %f GB\n", iElaps, grid2.x, grid2.y, block.x,
block.y, ibnd);
// tranpose smem
CHECK(hipMemset(d_C, 0, nBytes));
memset(gpuRef, 0, nBytes);
iStart = seconds();
hipLaunchKernelGGL(( transposeSmem), dim3(grid), dim3(block), 0, 0, d_C, d_A, nrows, ncols);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost));
if(iprint) printData(gpuRef, ncells);
checkResult(hostRef, gpuRef, ncols, nrows);
ibnd = 2 * ncells * sizeof(float) / (1024.0 * 1024.0 * 1024.0) / iElaps;
ibnd = 2 * ncells * sizeof(float) / 1e9 / iElaps;
printf("transposeSmem elapsed %f sec <<< grid (%d,%d) block (%d,%d)>>> "
"effective bandwidth %f GB\n", iElaps, grid.x, grid.y, block.x,
block.y, ibnd);
// tranpose smem pad
CHECK(hipMemset(d_C, 0, nBytes));
memset(gpuRef, 0, nBytes);
iStart = seconds();
hipLaunchKernelGGL(( transposeSmemPad), dim3(grid), dim3(block), 0, 0, d_C, d_A, nrows, ncols);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost));
if(iprint) printData(gpuRef, ncells);
checkResult(hostRef, gpuRef, ncols, nrows);
ibnd = 2 * ncells * sizeof(float) / (1024.0 * 1024.0 * 1024.0) / iElaps;
ibnd = 2 * ncells * sizeof(float) / 1e9 / iElaps;
printf("transposeSmemPad elapsed %f sec <<< grid (%d,%d) block (%d,%d)>>> "
"effective bandwidth %f GB\n", iElaps, grid.x, grid.y, block.x,
block.y, ibnd);
// tranpose smem pad
CHECK(hipMemset(d_C, 0, nBytes));
memset(gpuRef, 0, nBytes);
iStart = seconds();
hipLaunchKernelGGL(( transposeSmemDyn), dim3(grid), dim3(block), BDIMX*BDIMY*sizeof(float), 0, d_C, d_A, nrows,
ncols);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost));
if(iprint) printData(gpuRef, ncells);
checkResult(hostRef, gpuRef, ncols, nrows);
ibnd = 2 * ncells * sizeof(float) / (1024.0 * 1024.0 * 1024.0) / iElaps;
ibnd = 2 * ncells * sizeof(float) / 1e9 / iElaps;
printf("transposeSmemDyn elapsed %f sec <<< grid (%d,%d) block (%d,%d)>>> "
"effective bandwidth %f GB\n", iElaps, grid.x, grid.y, block.x,
block.y, ibnd);
// tranpose smem pad
CHECK(hipMemset(d_C, 0, nBytes));
memset(gpuRef, 0, nBytes);
iStart = seconds();
hipLaunchKernelGGL(( transposeSmemPadDyn), dim3(grid), dim3(block), (BDIMX + IPAD) * BDIMY * sizeof(float), 0,
d_C, d_A, nrows, ncols);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost));
if(iprint) printData(gpuRef, ncells);
checkResult(hostRef, gpuRef, ncols, nrows);
ibnd = 2 * ncells * sizeof(float) / (1024.0 * 1024.0 * 1024.0) / iElaps;
ibnd = 2 * ncells * sizeof(float) / 1e9 / iElaps;
printf("transposeSmemPadDyn elapsed %f sec <<< grid (%d,%d) block "
"(%d,%d)>>> effective bandwidth %f GB\n", iElaps, grid.x, grid.y,
block.x, block.y, ibnd);
// tranpose smem
CHECK(hipMemset(d_C, 0, nBytes));
memset(gpuRef, 0, nBytes);
iStart = seconds();
hipLaunchKernelGGL(( transposeSmemUnroll), dim3(grid2), dim3(block), 0, 0, d_C, d_A, nrows, ncols);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost));
if(iprint) printData(gpuRef, ncells);
checkResult(hostRef, gpuRef, ncols, nrows);
ibnd = 2 * ncells * sizeof(float) / (1024.0 * 1024.0 * 1024.0) / iElaps;
ibnd = 2 * ncells * sizeof(float) / 1e9 / iElaps;
printf("transposeSmemUnroll elapsed %f sec <<< grid (%d,%d) block "
"(%d,%d)>>> effective bandwidth %f GB\n", iElaps, grid2.x, grid2.y,
block.x, block.y, ibnd);
// tranpose smem
CHECK(hipMemset(d_C, 0, nBytes));
memset(gpuRef, 0, nBytes);
iStart = seconds();
hipLaunchKernelGGL(( transposeSmemUnrollPad), dim3(grid2), dim3(block), 0, 0, d_C, d_A, nrows, ncols);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost));
if(iprint) printData(gpuRef, ncells);
checkResult(hostRef, gpuRef, ncols, nrows);
ibnd = 2 * ncells * sizeof(float) / (1024.0 * 1024.0 * 1024.0) / iElaps;
ibnd = 2 * ncells * sizeof(float) / 1e9 / iElaps;
printf("transposeSmemUnrollPad elapsed %f sec <<< grid (%d,%d) block "
"(%d,%d)>>> effective bandwidth %f GB\n", iElaps, grid2.x, grid2.y,
block.x, block.y, ibnd);
// tranpose smem
CHECK(hipMemset(d_C, 0, nBytes));
memset(gpuRef, 0, nBytes);
iStart = seconds();
hipLaunchKernelGGL(( transposeSmemUnrollPadDyn), dim3(grid2), dim3(block), (BDIMX * 2 + IPAD) * BDIMY *
sizeof(float), 0, d_C, d_A, nrows, ncols);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost));
if(iprint) printData(gpuRef, ncells);
checkResult(hostRef, gpuRef, ncols, nrows);
ibnd = 2 * ncells * sizeof(float) / (1024.0 * 1024.0 * 1024.0) / iElaps;
ibnd = 2 * ncells * sizeof(float) / 1e9 / iElaps;
printf("transposeSmemUnrollPadDyn elapsed %f sec <<< grid (%d,%d) block "
"(%d,%d)>>> effective bandwidth %f GB\n", iElaps, grid2.x, grid2.y,
block.x, block.y, ibnd);
// free host and device memory
CHECK(hipFree(d_A));
CHECK(hipFree(d_C));
free(h_A);
free(hostRef);
free(gpuRef);
// reset device
CHECK(hipDeviceReset());
return EXIT_SUCCESS;
}
| 67d4d53ffd1b5a4382b9414719838739ce0bd62b.cu | #include "../common/common.h"
#include <cuda_runtime.h>
#include <stdio.h>
/*
* Example kernels for transposing a rectangular host array using a variety of
* optimizations, including shared memory, unrolling, and memory padding.
*/
// Some kernels assume square blocks
#define BDIMX 16
#define BDIMY BDIMX
#define INDEX(ROW, COL, INNER) ((ROW) * (INNER) + (COL))
#define IPAD 2
void initialData(float *in, const int size)
{
for (int i = 0; i < size; i++)
{
in[i] = (float)(rand() & 0xFF) / 10.0f;
}
return;
}
void printData(float *in, const int size)
{
for (int i = 0; i < size; i++)
{
printf("%3.0f ", in[i]);
}
printf("\n");
return;
}
void checkResult(float *hostRef, float *gpuRef, int rows, int cols)
{
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
int index = INDEX(i, j, cols);
if (abs(hostRef[index] - gpuRef[index]) > epsilon) {
match = 0;
printf("different on (%d, %d) (offset=%d) element in "
"transposed matrix: host %f gpu %f\n", i, j, index,
hostRef[index], gpuRef[index]);
break;
}
}
if (!match) break;
}
if (!match) printf("Arrays do not match.\n\n");
}
void transposeHost(float *out, float *in, const int nrows, const int ncols)
{
for (int iy = 0; iy < nrows; ++iy)
{
for (int ix = 0; ix < ncols; ++ix)
{
out[INDEX(ix, iy, nrows)] = in[INDEX(iy, ix, ncols)];
}
}
}
__global__ void copyGmem(float *out, float *in, const int nrows, const int ncols)
{
// matrix coordinate (ix,iy)
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int col = blockIdx.x * blockDim.x + threadIdx.x;
// transpose with boundary test
if (row < nrows && col < ncols)
{
// NOTE this is a transpose, not a copy
out[INDEX(col, row, nrows)] = in[INDEX(row, col, ncols)];
}
}
__global__ void naiveGmem(float *out, float *in, const int nrows, const int ncols)
{
// matrix coordinate (ix,iy)
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int col = blockIdx.x * blockDim.x + threadIdx.x;
// transpose with boundary test
if (row < nrows && col < ncols)
{
out[INDEX(col, row, nrows)] = in[INDEX(row, col, ncols)];
}
}
__global__ void naiveGmemUnroll(float *out, float *in, const int nrows,
const int ncols)
{
// Pretend there are twice as many blocks in the x direction
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int col = (2 * blockIdx.x * blockDim.x) + threadIdx.x;
if (row < nrows)
{
if (col < ncols)
{
out[INDEX(col, row, nrows)] = in[INDEX(row, col, ncols)];
}
col += blockDim.x;
if (col < ncols)
{
out[INDEX(col, row, nrows)] = in[INDEX(row, col, ncols)];
}
}
}
__global__ void transposeSmem(float *out, float *in, int nrows, int ncols)
{
// static shared memory
__shared__ float tile[BDIMY][BDIMX];
// coordinate in original matrix
unsigned int row = blockDim.y * blockIdx.y + threadIdx.y;
unsigned int col = blockDim.x * blockIdx.x + threadIdx.x;
// linear global memory index for original matrix
unsigned int offset = INDEX(row, col, ncols);
if (row < nrows && col < ncols)
{
// load data from global memory to shared memory
tile[threadIdx.y][threadIdx.x] = in[offset];
}
// thread index in transposed block
unsigned int bidx, irow, icol;
bidx = threadIdx.y * blockDim.x + threadIdx.x;
irow = bidx / blockDim.y;
icol = bidx % blockDim.y;
// NOTE - need to transpose row and col on block and thread-block level:
// 1. swap blocks x-y
// 2. swap thread x-y assignment (irow and icol calculations above)
// note col still has continuous threadIdx.x -> coalesced gst
col = blockIdx.y * blockDim.y + icol;
row = blockIdx.x * blockDim.x + irow;
// linear global memory index for transposed matrix
// NOTE nrows is stride of result, row and col are transposed
unsigned int transposed_offset = INDEX(row, col, nrows);
// thread synchronization
__syncthreads();
// NOTE invert sizes for write check
if (row < ncols && col < nrows)
{
// store data to global memory from shared memory
out[transposed_offset] = tile[icol][irow]; // NOTE icol,irow not irow,icol
}
}
__global__ void transposeSmemUnroll(float *out, float *in, const int nrows,
const int ncols)
{
// static 1D shared memory
__shared__ float tile[BDIMY][BDIMX * 2];
// coordinate in original matrix
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int col = (2 * blockIdx.x * blockDim.x) + threadIdx.x;
unsigned int row2 = row;
unsigned int col2 = col + blockDim.x;
// linear global memory index for original matrix
unsigned int offset = INDEX(row, col, ncols);
unsigned int offset2 = INDEX(row2, col2, ncols);
// thread index in transposed block
unsigned int bidx = threadIdx.y * blockDim.x + threadIdx.x;
unsigned int irow = bidx / blockDim.y;
unsigned int icol = bidx % blockDim.y;
// linear global memory index for transposed matrix
unsigned int transposed_offset = INDEX(col, row, nrows);
unsigned int transposed_offset2 = INDEX(col2, row2, nrows);
if (row < nrows && col < ncols)
{
tile[threadIdx.y][threadIdx.x] = in[offset];
}
if (row2 < nrows && col2 < ncols)
{
tile[threadIdx.y][blockDim.x + threadIdx.x] = in[offset2];
}
__syncthreads();
if (row < nrows && col < ncols)
{
out[transposed_offset] = tile[irow][icol];
}
if (row2 < nrows && col2 < ncols)
{
out[transposed_offset2] = tile[irow][blockDim.x + icol];
}
}
__global__ void transposeSmemUnrollPad(float *out, float *in, const int nrows,
const int ncols)
{
// static 1D shared memory with padding
__shared__ float tile[BDIMY][BDIMX * 2 + IPAD];
// coordinate in original matrix
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int col = (2 * blockIdx.x * blockDim.x) + threadIdx.x;
unsigned int row2 = row;
unsigned int col2 = col + blockDim.x;
// linear global memory index for original matrix
unsigned int offset = INDEX(row, col, ncols);
unsigned int offset2 = INDEX(row2, col2, ncols);
// thread index in transposed block
unsigned int bidx = threadIdx.y * blockDim.x + threadIdx.x;
unsigned int irow = bidx / blockDim.y;
unsigned int icol = bidx % blockDim.y;
// linear global memory index for transposed matrix
unsigned int transposed_offset = INDEX(col, row, nrows);
unsigned int transposed_offset2 = INDEX(col2, row2, nrows);
if (row < nrows && col < ncols)
{
tile[threadIdx.y][threadIdx.x] = in[offset];
}
if (row2 < nrows && col2 < ncols)
{
tile[threadIdx.y][blockDim.x + threadIdx.x] = in[offset2];
}
__syncthreads();
if (row < nrows && col < ncols)
{
out[transposed_offset] = tile[irow][icol];
}
if (row2 < nrows && col2 < ncols)
{
out[transposed_offset2] = tile[irow][blockDim.x + icol];
}
}
__global__ void transposeSmemUnrollPadDyn (float *out, float *in, const int nrows,
const int ncols)
{
// dynamic shared memory
extern __shared__ float tile[];
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int col = (2 * blockIdx.x * blockDim.x) + threadIdx.x;
unsigned int row2 = row;
unsigned int col2 = col + blockDim.x;
// linear global memory index for original matrix
unsigned int offset = INDEX(row, col, ncols);
unsigned int offset2 = INDEX(row2, col2, ncols);
// thread index in transposed block
unsigned int bidx = threadIdx.y * blockDim.x + threadIdx.x;
unsigned int irow = bidx / blockDim.y;
unsigned int icol = bidx % blockDim.y;
// coordinate in transposed matrix
unsigned int transposed_offset = INDEX(col, row, nrows);
unsigned int transposed_offset2 = INDEX(col2, row2, nrows);
if (row < nrows && col < ncols)
{
tile[INDEX(threadIdx.y, threadIdx.x, BDIMX * 2 + IPAD)] = in[offset];
}
if (row2 < nrows && col2 < ncols)
{
tile[INDEX(threadIdx.y, blockDim.x + threadIdx.x, BDIMX * 2 + IPAD)] =
in[offset2];
}
__syncthreads();
if (row < nrows && col < ncols)
{
out[transposed_offset] = tile[INDEX(irow, icol, BDIMX * 2 + IPAD)];
}
if (row2 < nrows && col2 < ncols)
{
out[transposed_offset2] = tile[INDEX(irow, blockDim.x + icol, BDIMX * 2 + IPAD)];
}
}
__global__ void transposeSmemPad(float *out, float *in, int nrows, int ncols)
{
// static shared memory with padding
__shared__ float tile[BDIMY][BDIMX + IPAD];
// coordinate in original matrix
unsigned int row = blockDim.y * blockIdx.y + threadIdx.y;
unsigned int col = blockDim.x * blockIdx.x + threadIdx.x;
// linear global memory index for original matrix
unsigned int offset = INDEX(row, col, ncols);
// thread index in transposed block
unsigned int bidx, irow, icol;
bidx = threadIdx.y * blockDim.x + threadIdx.x;
irow = bidx / blockDim.y;
icol = bidx % blockDim.y;
// linear global memory index for transposed matrix
unsigned int transposed_offset = INDEX(col, row, nrows);
// transpose with boundary test
if (row < nrows && col < ncols)
{
// load data from global memory to shared memory
tile[threadIdx.y][threadIdx.x] = in[offset];
// thread synchronization
__syncthreads();
// store data to global memory from shared memory
out[transposed_offset] = tile[irow][icol];
}
}
__global__ void transposeSmemDyn(float *out, float *in, int nrows, int ncols)
{
// dynamic shared memory
extern __shared__ float tile[];
// coordinate in original matrix
unsigned int row = blockDim.y * blockIdx.y + threadIdx.y;
unsigned int col = blockDim.x * blockIdx.x + threadIdx.x;
// linear global memory index for original matrix
unsigned int offset = INDEX(row, col, ncols);
// thread index in transposed block
unsigned int row_idx, col_idx, irow, icol;
row_idx = threadIdx.y * blockDim.x + threadIdx.x;
irow = row_idx / blockDim.y;
icol = row_idx % blockDim.y;
col_idx = irow * blockDim.x + icol;
// linear global memory index for transposed matrix
unsigned int transposed_offset = INDEX(col, row, nrows);
// transpose with boundary test
if (row < nrows && col < ncols)
{
// load data from global memory to shared memory
tile[row_idx] = in[offset];
// thread synchronization
__syncthreads();
// store data to global memory from shared memory
out[transposed_offset] = tile[col_idx];
}
}
__global__ void transposeSmemPadDyn(float *out, float *in, int nrows, int ncols)
{
// static shared memory with padding
extern __shared__ float tile[];
// coordinate in original matrix
unsigned int row = blockDim.y * blockIdx.y + threadIdx.y;
unsigned int col = blockDim.x * blockIdx.x + threadIdx.x;
// linear global memory index for original matrix
unsigned int offset = INDEX(row, col, ncols);
// thread index in transposed block
unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x;
unsigned int row_idx = threadIdx.y * (blockDim.x + IPAD) + threadIdx.x;
unsigned int irow = idx / blockDim.y;
unsigned int icol = idx % blockDim.y;
unsigned int col_idx = irow * (blockDim.x + IPAD) + icol;
// linear global memory index for transposed matrix
unsigned int transposed_offset = INDEX(col, row, nrows);
// transpose with boundary test
if (row < nrows && col < ncols)
{
// load data from global memory to shared memory
tile[row_idx] = in[offset];
// thread synchronization
__syncthreads();
// store data to global memory from shared memory
out[transposed_offset] = tile[col_idx];
}
}
int main(int argc, char **argv)
{
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("%s starting transpose at ", argv[0]);
printf("device %d: %s ", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
bool iprint = 0;
// set up array size 2048
int nrows = 1 << 12;
int ncols = 1 << 12;
if (argc > 1) iprint = atoi(argv[1]);
if (argc > 2) nrows = atoi(argv[2]);
if (argc > 3) ncols = atoi(argv[3]);
printf(" with matrix nrows %d ncols %d\n", nrows, ncols);
size_t ncells = nrows * ncols;
size_t nBytes = ncells * sizeof(float);
// execution configuration
dim3 block (BDIMX, BDIMY);
/*
* Map CUDA blocks/threads to output space. Map rows in output to same
* x-value in CUDA, columns to same y-value.
*/
dim3 grid ((ncols + block.x - 1) / block.x, (nrows + block.y - 1) / block.y);
dim3 grid2 ((grid.x + 2 - 1) / 2, grid.y);
// allocate host memory
float *h_A = (float *)malloc(nBytes);
float *hostRef = (float *)malloc(nBytes);
float *gpuRef = (float *)malloc(nBytes);
// initialize host array
initialData(h_A, nrows * ncols);
// transpose at host side
transposeHost(hostRef, h_A, nrows, ncols);
// allocate device memory
float *d_A, *d_C;
CHECK(cudaMalloc((float**)&d_A, nBytes));
CHECK(cudaMalloc((float**)&d_C, nBytes));
// copy data from host to device
CHECK(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice));
// tranpose gmem
CHECK(cudaMemset(d_C, 0, nBytes));
memset(gpuRef, 0, nBytes);
double iStart = seconds();
copyGmem<<<grid, block>>>(d_C, d_A, nrows, ncols);
CHECK(cudaDeviceSynchronize());
double iElaps = seconds() - iStart;
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
if(iprint) printData(gpuRef, nrows * ncols);
float ibnd = 2 * ncells * sizeof(float) / (1024.0 * 1024.0 * 1024.0) /
iElaps;
ibnd = 2 * ncells * sizeof(float) / 1e9 / iElaps;
printf("copyGmem elapsed %f sec <<< grid (%d,%d) block (%d,%d)>>> "
"effective bandwidth %f GB\n", iElaps, grid.x, grid.y, block.x,
block.y, ibnd);
// tranpose gmem
CHECK(cudaMemset(d_C, 0, nBytes));
memset(gpuRef, 0, nBytes);
iStart = seconds();
naiveGmem<<<grid, block>>>(d_C, d_A, nrows, ncols);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
if(iprint) printData(gpuRef, ncells);
checkResult(hostRef, gpuRef, ncols, nrows);
ibnd = 2 * ncells * sizeof(float) / (1024.0 * 1024.0 * 1024.0) / iElaps;
ibnd = 2 * ncells * sizeof(float) / 1e9 / iElaps;
printf("naiveGmem elapsed %f sec <<< grid (%d,%d) block (%d,%d)>>> "
"effective bandwidth %f GB\n", iElaps, grid.x, grid.y, block.x,
block.y, ibnd);
// tranpose smem
CHECK(cudaMemset(d_C, 0, nBytes));
memset(gpuRef, 0, nBytes);
iStart = seconds();
naiveGmemUnroll<<<grid2, block>>>(d_C, d_A, nrows, ncols);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
if(iprint) printData(gpuRef, ncells);
checkResult(hostRef, gpuRef, ncols, nrows);
ibnd = 2 * ncells * sizeof(float) / (1024.0 * 1024.0 * 1024.0) / iElaps;
ibnd = 2 * ncells * sizeof(float) / 1e9 / iElaps;
printf("naiveGmemUnroll elapsed %f sec <<< grid (%d,%d) block (%d,%d)>>> "
"effective bandwidth %f GB\n", iElaps, grid2.x, grid2.y, block.x,
block.y, ibnd);
// tranpose smem
CHECK(cudaMemset(d_C, 0, nBytes));
memset(gpuRef, 0, nBytes);
iStart = seconds();
transposeSmem<<<grid, block>>>(d_C, d_A, nrows, ncols);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
if(iprint) printData(gpuRef, ncells);
checkResult(hostRef, gpuRef, ncols, nrows);
ibnd = 2 * ncells * sizeof(float) / (1024.0 * 1024.0 * 1024.0) / iElaps;
ibnd = 2 * ncells * sizeof(float) / 1e9 / iElaps;
printf("transposeSmem elapsed %f sec <<< grid (%d,%d) block (%d,%d)>>> "
"effective bandwidth %f GB\n", iElaps, grid.x, grid.y, block.x,
block.y, ibnd);
// tranpose smem pad
CHECK(cudaMemset(d_C, 0, nBytes));
memset(gpuRef, 0, nBytes);
iStart = seconds();
transposeSmemPad<<<grid, block>>>(d_C, d_A, nrows, ncols);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
if(iprint) printData(gpuRef, ncells);
checkResult(hostRef, gpuRef, ncols, nrows);
ibnd = 2 * ncells * sizeof(float) / (1024.0 * 1024.0 * 1024.0) / iElaps;
ibnd = 2 * ncells * sizeof(float) / 1e9 / iElaps;
printf("transposeSmemPad elapsed %f sec <<< grid (%d,%d) block (%d,%d)>>> "
"effective bandwidth %f GB\n", iElaps, grid.x, grid.y, block.x,
block.y, ibnd);
// tranpose smem pad
CHECK(cudaMemset(d_C, 0, nBytes));
memset(gpuRef, 0, nBytes);
iStart = seconds();
transposeSmemDyn<<<grid, block, BDIMX*BDIMY*sizeof(float)>>>(d_C, d_A, nrows,
ncols);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
if(iprint) printData(gpuRef, ncells);
checkResult(hostRef, gpuRef, ncols, nrows);
ibnd = 2 * ncells * sizeof(float) / (1024.0 * 1024.0 * 1024.0) / iElaps;
ibnd = 2 * ncells * sizeof(float) / 1e9 / iElaps;
printf("transposeSmemDyn elapsed %f sec <<< grid (%d,%d) block (%d,%d)>>> "
"effective bandwidth %f GB\n", iElaps, grid.x, grid.y, block.x,
block.y, ibnd);
// tranpose smem pad
CHECK(cudaMemset(d_C, 0, nBytes));
memset(gpuRef, 0, nBytes);
iStart = seconds();
transposeSmemPadDyn<<<grid, block, (BDIMX + IPAD) * BDIMY * sizeof(float)>>>(
d_C, d_A, nrows, ncols);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
if(iprint) printData(gpuRef, ncells);
checkResult(hostRef, gpuRef, ncols, nrows);
ibnd = 2 * ncells * sizeof(float) / (1024.0 * 1024.0 * 1024.0) / iElaps;
ibnd = 2 * ncells * sizeof(float) / 1e9 / iElaps;
printf("transposeSmemPadDyn elapsed %f sec <<< grid (%d,%d) block "
"(%d,%d)>>> effective bandwidth %f GB\n", iElaps, grid.x, grid.y,
block.x, block.y, ibnd);
// tranpose smem
CHECK(cudaMemset(d_C, 0, nBytes));
memset(gpuRef, 0, nBytes);
iStart = seconds();
transposeSmemUnroll<<<grid2, block>>>(d_C, d_A, nrows, ncols);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
if(iprint) printData(gpuRef, ncells);
checkResult(hostRef, gpuRef, ncols, nrows);
ibnd = 2 * ncells * sizeof(float) / (1024.0 * 1024.0 * 1024.0) / iElaps;
ibnd = 2 * ncells * sizeof(float) / 1e9 / iElaps;
printf("transposeSmemUnroll elapsed %f sec <<< grid (%d,%d) block "
"(%d,%d)>>> effective bandwidth %f GB\n", iElaps, grid2.x, grid2.y,
block.x, block.y, ibnd);
// tranpose smem
CHECK(cudaMemset(d_C, 0, nBytes));
memset(gpuRef, 0, nBytes);
iStart = seconds();
transposeSmemUnrollPad<<<grid2, block>>>(d_C, d_A, nrows, ncols);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
if(iprint) printData(gpuRef, ncells);
checkResult(hostRef, gpuRef, ncols, nrows);
ibnd = 2 * ncells * sizeof(float) / (1024.0 * 1024.0 * 1024.0) / iElaps;
ibnd = 2 * ncells * sizeof(float) / 1e9 / iElaps;
printf("transposeSmemUnrollPad elapsed %f sec <<< grid (%d,%d) block "
"(%d,%d)>>> effective bandwidth %f GB\n", iElaps, grid2.x, grid2.y,
block.x, block.y, ibnd);
// tranpose smem
CHECK(cudaMemset(d_C, 0, nBytes));
memset(gpuRef, 0, nBytes);
iStart = seconds();
transposeSmemUnrollPadDyn<<<grid2, block, (BDIMX * 2 + IPAD) * BDIMY *
sizeof(float)>>>(d_C, d_A, nrows, ncols);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
if(iprint) printData(gpuRef, ncells);
checkResult(hostRef, gpuRef, ncols, nrows);
ibnd = 2 * ncells * sizeof(float) / (1024.0 * 1024.0 * 1024.0) / iElaps;
ibnd = 2 * ncells * sizeof(float) / 1e9 / iElaps;
printf("transposeSmemUnrollPadDyn elapsed %f sec <<< grid (%d,%d) block "
"(%d,%d)>>> effective bandwidth %f GB\n", iElaps, grid2.x, grid2.y,
block.x, block.y, ibnd);
// free host and device memory
CHECK(cudaFree(d_A));
CHECK(cudaFree(d_C));
free(h_A);
free(hostRef);
free(gpuRef);
// reset device
CHECK(cudaDeviceReset());
return EXIT_SUCCESS;
}
|
427bb58377d3297755aefac8655efc129c987ba7.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "naive.h"
namespace StreamCompaction {
namespace Naive {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
// TODO: __global__
__device__ int ilog2Device(int x) {
int lg = 0;
while (x >>= 1) {
++lg;
}
return lg;
}
__device__ int ilog2ceilDevice(int x) {
return x == 1 ? 0 : ilog2Device(x - 1) + 1;
}
__global__ void plusp(int n, int *idata, int *odata ,int d)
{
int idx = blockDim.x*blockIdx.x + threadIdx.x;
if (idx < n)
{
if (idx >= (1 << (d - 1)))
{
odata[idx] = idata[idx-(1 << (d - 1))] + idata[idx];
}
else
{
odata[idx] = idata[idx];
}
__syncthreads();
}
}
__global__ void resetidata(int n, int *idata, int *odata)
{
int idx = blockDim.x*blockIdx.x + threadIdx.x;
if (idx < n)
{
idata[idx] = odata[idx];
}
}
__global__ void toExclusive(int n, int *idata, int *odata)
{
int idx = blockDim.x*blockIdx.x + threadIdx.x;
if (idx < n)
{
odata[0] = 0;
if (idx > 0)
{
odata[idx] = idata[idx - 1];
}
}
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
if (n <= 0)return;
odata[0] = 0;
int blocksize = 1024;
dim3 blocknum = (n + blocksize - 1) / blocksize;
int *dev_idata, *dev_odata;
hipMalloc((void**) & dev_idata, n * sizeof(int));
checkCUDAError("hipMalloc dev_idata failed!");
hipMalloc((void**)& dev_odata, n * sizeof(int));
checkCUDAError("hipMalloc dev_odata failed!");
hipMemcpy(dev_idata, idata, n * sizeof(int), hipMemcpyHostToDevice);
checkCUDAError("hipMemcpy failed!");
timer().startGpuTimer();
// TODO
int dmax = ilog2ceil(n);
for (int d = 1; d <= dmax; ++d)
{
plusp << <blocknum, blocksize >> > (n, dev_idata, dev_odata,d);
resetidata << <blocknum, blocksize >> > (n, dev_idata, dev_odata);
}
toExclusive << < blocknum, blocksize >> > (n, dev_idata, dev_odata);
timer().endGpuTimer();
hipMemcpy(odata, dev_odata, n * sizeof(int), hipMemcpyDeviceToHost);
checkCUDAError("hipMemcpy failed!");
hipFree(dev_idata);
hipFree(dev_odata);
}
}
}
| 427bb58377d3297755aefac8655efc129c987ba7.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "naive.h"
namespace StreamCompaction {
namespace Naive {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
// TODO: __global__
__device__ int ilog2Device(int x) {
int lg = 0;
while (x >>= 1) {
++lg;
}
return lg;
}
__device__ int ilog2ceilDevice(int x) {
return x == 1 ? 0 : ilog2Device(x - 1) + 1;
}
__global__ void plusp(int n, int *idata, int *odata ,int d)
{
int idx = blockDim.x*blockIdx.x + threadIdx.x;
if (idx < n)
{
if (idx >= (1 << (d - 1)))
{
odata[idx] = idata[idx-(1 << (d - 1))] + idata[idx];
}
else
{
odata[idx] = idata[idx];
}
__syncthreads();
}
}
__global__ void resetidata(int n, int *idata, int *odata)
{
int idx = blockDim.x*blockIdx.x + threadIdx.x;
if (idx < n)
{
idata[idx] = odata[idx];
}
}
__global__ void toExclusive(int n, int *idata, int *odata)
{
int idx = blockDim.x*blockIdx.x + threadIdx.x;
if (idx < n)
{
odata[0] = 0;
if (idx > 0)
{
odata[idx] = idata[idx - 1];
}
}
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
if (n <= 0)return;
odata[0] = 0;
int blocksize = 1024;
dim3 blocknum = (n + blocksize - 1) / blocksize;
int *dev_idata, *dev_odata;
cudaMalloc((void**) & dev_idata, n * sizeof(int));
checkCUDAError("cudaMalloc dev_idata failed!");
cudaMalloc((void**)& dev_odata, n * sizeof(int));
checkCUDAError("cudaMalloc dev_odata failed!");
cudaMemcpy(dev_idata, idata, n * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAError("cudaMemcpy failed!");
timer().startGpuTimer();
// TODO
int dmax = ilog2ceil(n);
for (int d = 1; d <= dmax; ++d)
{
plusp << <blocknum, blocksize >> > (n, dev_idata, dev_odata,d);
resetidata << <blocknum, blocksize >> > (n, dev_idata, dev_odata);
}
toExclusive << < blocknum, blocksize >> > (n, dev_idata, dev_odata);
timer().endGpuTimer();
cudaMemcpy(odata, dev_odata, n * sizeof(int), cudaMemcpyDeviceToHost);
checkCUDAError("cudaMemcpy failed!");
cudaFree(dev_idata);
cudaFree(dev_odata);
}
}
}
|
2860cb7f52df273860724ab1f603ec8d9da83076.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (C) 2018 ETH Zurich
// Copyright (C) 2018 UT-Battelle, LLC
// All rights reserved.
//
// See LICENSE.txt for terms of usage.
// See CITATION.md for citation guidelines, if DCA++ is used for scientific publications.
//
// Author: Giovanni Balduzzi ([email protected])
//
// Implementation of the device methods used by the CT-INT submatrix walker.
#include "dca/phys/dca_step/cluster_solver/ctint/walker/kernels_interface.hpp"
#include <array>
#include <cassert>
#include "dca/platform/dca_gpu.h"
#include "dca/util/cuda_blocks.hpp"
namespace dca {
namespace phys {
namespace solver {
namespace ctint {
namespace details {
template <typename Real>
__global__ void setRightSectorToIdKernel(Real* m, const int ldm, const int n0, const int n_max) {
const int i = threadIdx.x + blockDim.x * blockIdx.x;
const int j = threadIdx.y + blockDim.y * blockIdx.y + n0;
if (i >= n_max || j >= n_max)
return;
m[i + ldm * j] = (i == j) ? 1. : 0.;
}
template <typename Real>
void setRightSectorToId(Real* m, const int ldm, const int n0, const int n_max, hipStream_t stream) {
auto blocks = dca::util::getBlockSize(n_max, n_max - n0);
hipLaunchKernelGGL(( setRightSectorToIdKernel), dim3(blocks[0]), dim3(blocks[1]), 0, stream, m, ldm, n0, n_max);
}
template void setRightSectorToId(float*, const int, const int, const int, hipStream_t);
template void setRightSectorToId(double*, const int, const int, const int, hipStream_t);
template <typename Real>
__global__ void computeGLeftKernel(MatrixView<Real> G, const MatrixView<Real> M,
const Real* __restrict__ f, int n_init) {
const int i_t = threadIdx.x + blockDim.x * blockIdx.x;
const int stride = blockDim.x * gridDim.x;
const int j = threadIdx.y + blockDim.y * blockIdx.y;
if (j >= n_init)
return;
const Real factor = 1. / (f[j] - 1);
const Real fj = f[j];
for (int i = i_t; i < G.nrRows(); i += stride)
G(i, j) = (M(i, j) * fj - Real(i == j)) * factor;
}
template <typename Real>
void computeGLeft(MatrixView<Real>& G, const MatrixView<Real>& M, const Real* f, int n_init,
hipStream_t stream) {
if (n_init == 0)
return;
const int n = G.nrRows();
constexpr int thread_j = 4;
constexpr int thread_i = 64;
dim3 threads(thread_i, thread_j);
dim3 blocks(::max(n / (10 * thread_i), 1), util::ceilDiv(n_init, thread_j));
hipLaunchKernelGGL(( computeGLeftKernel), dim3(blocks), dim3(threads), 0, stream, G, M, f, n_init);
}
template void computeGLeft(MatrixView<float>&, const MatrixView<float>&, const float*, int,
hipStream_t);
template void computeGLeft(MatrixView<double>&, const MatrixView<double>&, const double*, int,
hipStream_t);
template <typename Real>
__global__ void multiplyByFColFactorKernel(MatrixView<Real> M, const Real* f_vals) {
const int i = threadIdx.x + blockDim.x * blockIdx.x;
const int j = threadIdx.y + blockDim.y * blockIdx.y;
if (i >= M.nrRows() || j >= M.nrCols())
return;
const Real factor = f_vals[j] - 1.;
M(i, j) *= factor;
}
template <typename Real>
void multiplyByFColFactor(MatrixView<Real>& M, const Real* f_vals, hipStream_t stream) {
if (M.nrCols() == 0 || M.nrRows() == 0)
return;
const auto blocks = dca::util::getBlockSize(M.nrRows(), M.nrCols());
hipLaunchKernelGGL(( multiplyByFColFactorKernel), dim3(blocks[0]), dim3(blocks[1]), 0, stream, M, f_vals);
}
template void multiplyByFColFactor(MatrixView<float>&, const float*, hipStream_t);
template void multiplyByFColFactor(MatrixView<double>&, const double*, hipStream_t);
template <typename Real>
__global__ void multiplyByInverseFFactorKernel(const MatrixView<Real> m_in, MatrixView<Real> m_out,
const Real* f_vals) {
const int i = threadIdx.x + blockDim.x * blockIdx.x;
const int j = threadIdx.y + blockDim.y * blockIdx.y;
if (i >= m_in.nrRows() || j >= m_in.nrCols())
return;
const Real factor = -(f_vals[i] - 1.);
m_out(i, j) = factor * m_in(i, j);
}
template <typename Real>
void multiplyByInverseFFactor(const MatrixView<Real>& m_in, MatrixView<Real>& m_out,
const Real* f_vals, hipStream_t stream) {
assert(m_in.nrRows() == m_out.nrRows() && m_in.nrCols() == m_out.nrCols());
if (m_in.nrCols() == 0 || m_in.nrRows() == 0)
return;
const auto blocks = dca::util::getBlockSize(m_in.nrRows(), m_out.nrCols());
hipLaunchKernelGGL(( multiplyByInverseFFactorKernel), dim3(blocks[0]), dim3(blocks[1]), 0, stream, m_in, m_out, f_vals);
}
template void multiplyByInverseFFactor(const MatrixView<float>&, MatrixView<float>&, const float*,
hipStream_t);
template void multiplyByInverseFFactor(const MatrixView<double>&, MatrixView<double>&,
const double*, hipStream_t);
template <typename Real>
__global__ void divideByGammaFactorKernel(MatrixView<Real> m,
const std::pair<int, Real>* gamma_indices,
const int n_indices) {
// TODO: loop over a number of j indices.
const int i = threadIdx.x + blockDim.x * blockIdx.x;
const int j = threadIdx.y + blockDim.y * blockIdx.y;
if (i >= n_indices || j >= m.nrCols())
return;
const int p = gamma_indices[i].first;
assert(p < m.nrRows());
m(p, j) /= 1. + gamma_indices[i].second;
}
template <typename Real>
void divideByGammaFactor(MatrixView<Real> m, const std::pair<int, Real>* gamma_indices,
const int n_indices, hipStream_t stream) {
const auto blocks = dca::util::getBlockSize(n_indices, m.nrCols());
hipLaunchKernelGGL(( divideByGammaFactorKernel), dim3(blocks[0]), dim3(blocks[1]), 0, stream, m, gamma_indices, n_indices);
}
template void divideByGammaFactor(MatrixView<float>, const std::pair<int, float>*, const int,
hipStream_t);
template void divideByGammaFactor(MatrixView<double>, const std::pair<int, double>*, const int,
hipStream_t);
} // namespace details
} // namespace ctint
} // namespace solver
} // namespace phys
} // namespace dca
| 2860cb7f52df273860724ab1f603ec8d9da83076.cu | // Copyright (C) 2018 ETH Zurich
// Copyright (C) 2018 UT-Battelle, LLC
// All rights reserved.
//
// See LICENSE.txt for terms of usage.
// See CITATION.md for citation guidelines, if DCA++ is used for scientific publications.
//
// Author: Giovanni Balduzzi ([email protected])
//
// Implementation of the device methods used by the CT-INT submatrix walker.
#include "dca/phys/dca_step/cluster_solver/ctint/walker/kernels_interface.hpp"
#include <array>
#include <cassert>
#include "dca/platform/dca_gpu.h"
#include "dca/util/cuda_blocks.hpp"
namespace dca {
namespace phys {
namespace solver {
namespace ctint {
namespace details {
template <typename Real>
__global__ void setRightSectorToIdKernel(Real* m, const int ldm, const int n0, const int n_max) {
const int i = threadIdx.x + blockDim.x * blockIdx.x;
const int j = threadIdx.y + blockDim.y * blockIdx.y + n0;
if (i >= n_max || j >= n_max)
return;
m[i + ldm * j] = (i == j) ? 1. : 0.;
}
template <typename Real>
void setRightSectorToId(Real* m, const int ldm, const int n0, const int n_max, cudaStream_t stream) {
auto blocks = dca::util::getBlockSize(n_max, n_max - n0);
setRightSectorToIdKernel<<<blocks[0], blocks[1], 0, stream>>>(m, ldm, n0, n_max);
}
template void setRightSectorToId(float*, const int, const int, const int, cudaStream_t);
template void setRightSectorToId(double*, const int, const int, const int, cudaStream_t);
template <typename Real>
__global__ void computeGLeftKernel(MatrixView<Real> G, const MatrixView<Real> M,
const Real* __restrict__ f, int n_init) {
const int i_t = threadIdx.x + blockDim.x * blockIdx.x;
const int stride = blockDim.x * gridDim.x;
const int j = threadIdx.y + blockDim.y * blockIdx.y;
if (j >= n_init)
return;
const Real factor = 1. / (f[j] - 1);
const Real fj = f[j];
for (int i = i_t; i < G.nrRows(); i += stride)
G(i, j) = (M(i, j) * fj - Real(i == j)) * factor;
}
template <typename Real>
void computeGLeft(MatrixView<Real>& G, const MatrixView<Real>& M, const Real* f, int n_init,
cudaStream_t stream) {
if (n_init == 0)
return;
const int n = G.nrRows();
constexpr int thread_j = 4;
constexpr int thread_i = 64;
dim3 threads(thread_i, thread_j);
dim3 blocks(std::max(n / (10 * thread_i), 1), util::ceilDiv(n_init, thread_j));
computeGLeftKernel<<<blocks, threads, 0, stream>>>(G, M, f, n_init);
}
template void computeGLeft(MatrixView<float>&, const MatrixView<float>&, const float*, int,
cudaStream_t);
template void computeGLeft(MatrixView<double>&, const MatrixView<double>&, const double*, int,
cudaStream_t);
template <typename Real>
__global__ void multiplyByFColFactorKernel(MatrixView<Real> M, const Real* f_vals) {
const int i = threadIdx.x + blockDim.x * blockIdx.x;
const int j = threadIdx.y + blockDim.y * blockIdx.y;
if (i >= M.nrRows() || j >= M.nrCols())
return;
const Real factor = f_vals[j] - 1.;
M(i, j) *= factor;
}
template <typename Real>
void multiplyByFColFactor(MatrixView<Real>& M, const Real* f_vals, cudaStream_t stream) {
if (M.nrCols() == 0 || M.nrRows() == 0)
return;
const auto blocks = dca::util::getBlockSize(M.nrRows(), M.nrCols());
multiplyByFColFactorKernel<<<blocks[0], blocks[1], 0, stream>>>(M, f_vals);
}
template void multiplyByFColFactor(MatrixView<float>&, const float*, cudaStream_t);
template void multiplyByFColFactor(MatrixView<double>&, const double*, cudaStream_t);
template <typename Real>
__global__ void multiplyByInverseFFactorKernel(const MatrixView<Real> m_in, MatrixView<Real> m_out,
const Real* f_vals) {
const int i = threadIdx.x + blockDim.x * blockIdx.x;
const int j = threadIdx.y + blockDim.y * blockIdx.y;
if (i >= m_in.nrRows() || j >= m_in.nrCols())
return;
const Real factor = -(f_vals[i] - 1.);
m_out(i, j) = factor * m_in(i, j);
}
template <typename Real>
void multiplyByInverseFFactor(const MatrixView<Real>& m_in, MatrixView<Real>& m_out,
const Real* f_vals, cudaStream_t stream) {
assert(m_in.nrRows() == m_out.nrRows() && m_in.nrCols() == m_out.nrCols());
if (m_in.nrCols() == 0 || m_in.nrRows() == 0)
return;
const auto blocks = dca::util::getBlockSize(m_in.nrRows(), m_out.nrCols());
multiplyByInverseFFactorKernel<<<blocks[0], blocks[1], 0, stream>>>(m_in, m_out, f_vals);
}
template void multiplyByInverseFFactor(const MatrixView<float>&, MatrixView<float>&, const float*,
cudaStream_t);
template void multiplyByInverseFFactor(const MatrixView<double>&, MatrixView<double>&,
const double*, cudaStream_t);
template <typename Real>
__global__ void divideByGammaFactorKernel(MatrixView<Real> m,
const std::pair<int, Real>* gamma_indices,
const int n_indices) {
// TODO: loop over a number of j indices.
const int i = threadIdx.x + blockDim.x * blockIdx.x;
const int j = threadIdx.y + blockDim.y * blockIdx.y;
if (i >= n_indices || j >= m.nrCols())
return;
const int p = gamma_indices[i].first;
assert(p < m.nrRows());
m(p, j) /= 1. + gamma_indices[i].second;
}
template <typename Real>
void divideByGammaFactor(MatrixView<Real> m, const std::pair<int, Real>* gamma_indices,
const int n_indices, cudaStream_t stream) {
const auto blocks = dca::util::getBlockSize(n_indices, m.nrCols());
divideByGammaFactorKernel<<<blocks[0], blocks[1], 0, stream>>>(m, gamma_indices, n_indices);
}
template void divideByGammaFactor(MatrixView<float>, const std::pair<int, float>*, const int,
cudaStream_t);
template void divideByGammaFactor(MatrixView<double>, const std::pair<int, double>*, const int,
cudaStream_t);
} // namespace details
} // namespace ctint
} // namespace solver
} // namespace phys
} // namespace dca
|
d1a7c68941c3c143f8deaee70d7015b3d7d190c2.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Yuri Gorokhov
* lab 8 - grid configurations continued
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <math.h>
#include "../include/cuda_util.h"
#define PROX 6
#define SHARED_MEM_PER_BLOCK 4000
#define THREADS 2048
#ifndef GRID_Y
#define GRID_Y 4
#endif
#ifndef ARRAY_SIZE
#define ARRAY_SIZE 3 * SHARED_MEM_PER_BLOCK / 4
#endif
__global__ void sum_kernel();
int main(void) {
hipEvent_t start, stop;
float elapsedTime;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
dim3 grid(1,GRID_Y);
hipLaunchKernelGGL(( sum_kernel), dim3(grid), dim3(THREADS / GRID_Y), 0, 0, );
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
printf("\nProcessors: %i\nShared mem per block: %i", PROX, SHARED_MEM_PER_BLOCK);
printf("\nGrid: 1x%i array of blocks, %i threads per block, S=%i -> %f\n", GRID_Y, THREADS / GRID_Y, ARRAY_SIZE, elapsedTime);
}
__global__ void sum_kernel() {
__shared__ int filler[ARRAY_SIZE];
filler[threadIdx.x % 16] = 0;
int result = 0;
for(int i = 1; i <= 1000; i++) {
result += i;
}
__syncthreads();
}
| d1a7c68941c3c143f8deaee70d7015b3d7d190c2.cu | /**
* Yuri Gorokhov
* lab 8 - grid configurations continued
*/
#include <stdio.h>
#include <cuda.h>
#include <math.h>
#include "../include/cuda_util.h"
#define PROX 6
#define SHARED_MEM_PER_BLOCK 4000
#define THREADS 2048
#ifndef GRID_Y
#define GRID_Y 4
#endif
#ifndef ARRAY_SIZE
#define ARRAY_SIZE 3 * SHARED_MEM_PER_BLOCK / 4
#endif
__global__ void sum_kernel();
int main(void) {
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
dim3 grid(1,GRID_Y);
sum_kernel<<<grid, THREADS / GRID_Y>>>();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("\nProcessors: %i\nShared mem per block: %i", PROX, SHARED_MEM_PER_BLOCK);
printf("\nGrid: 1x%i array of blocks, %i threads per block, S=%i -> %f\n", GRID_Y, THREADS / GRID_Y, ARRAY_SIZE, elapsedTime);
}
__global__ void sum_kernel() {
__shared__ int filler[ARRAY_SIZE];
filler[threadIdx.x % 16] = 0;
int result = 0;
for(int i = 1; i <= 1000; i++) {
result += i;
}
__syncthreads();
}
|
42a9524411e8288b12b1e03a8183598ea8febcb5.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vec_rsqrt.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
double *result = NULL;
hipMalloc(&result, XSIZE*YSIZE);
double *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vec_rsqrt), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vec_rsqrt), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vec_rsqrt), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 42a9524411e8288b12b1e03a8183598ea8febcb5.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vec_rsqrt.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
double *result = NULL;
cudaMalloc(&result, XSIZE*YSIZE);
double *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vec_rsqrt<<<gridBlock,threadBlock>>>(n,result,x);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vec_rsqrt<<<gridBlock,threadBlock>>>(n,result,x);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vec_rsqrt<<<gridBlock,threadBlock>>>(n,result,x);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
ade65853f1d32319bdf6170ed5822e8709154af7.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "HugeCTR/include/layers/interaction_layer.hpp"
#include "HugeCTR/include/common.hpp"
#include "HugeCTR/include/tensor.hpp"
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <hip/hip_runtime_api.h>
#include <device_launch_parameters.h>
#include <mma.h>
#include <type_traits>
#ifndef NDEBUG
#include <iostream>
#endif
namespace HugeCTR {
namespace {
using namespace nvcuda;
template <uint x>
struct Log2 {
static constexpr uint value = 1 + Log2<x / 2>::value;
};
template <>
struct Log2<1> {
static constexpr uint value = 0;
};
struct __align__(8) half4 {
half2 vals[2];
};
template <uint WARPS_PER_BLOCK, uint THREADBLOCK_SIZE, uint M_BLOCKS, uint K_BLOCKS,
uint SMEM_STRIDE, uint SMEM_STRIDE_ACC, uint THREADS_IN_WARP, uint THREADS_IN_WARP_LOG_2,
uint TILE_DIM, uint TILE_DIM_LOG_2>
__launch_bounds__(THREADBLOCK_SIZE) __global__ void dotBasedInteractFwdKernelNonAligned(
const __half *__restrict bottom_mlp_input, const __half *__restrict emb_input,
__half *__restrict output, uint batch_size, uint num_rows, uint num_cols,
uint num_rows_after_padding, uint num_cols_after_padding, uint smem_elems_per_warp,
uint smem_rows_per_warp, uint output_size, uint num_row_steps, uint num_col_steps) {
uint warp_id = (threadIdx.x >> THREADS_IN_WARP_LOG_2);
int sample_id = blockIdx.x * WARPS_PER_BLOCK + warp_id;
if (sample_id >= batch_size) {
return;
}
int lane_id = threadIdx.x & (THREADS_IN_WARP - 1);
extern __shared__ half shmem_dynamic[];
half *shmem = shmem_dynamic + (warp_id * smem_elems_per_warp);
// const half *sample_input = input + num_rows * num_cols * sample_id;
const half *sample_bottom_mlp_input = bottom_mlp_input + num_cols * sample_id;
const half *sample_emp_input = emb_input + (num_rows - 1) * num_cols * sample_id;
const half *sample_input = sample_bottom_mlp_input;
// for (uint i = 0; i < num_rows; ++i, sample_input += num_cols) {
for (uint i = 0; i < num_rows; ++i) {
for (uint idx = lane_id; idx < num_cols; idx += THREADS_IN_WARP) {
(shmem + i * SMEM_STRIDE)[idx] = sample_input[idx];
}
sample_input = (i == 0) ? sample_emp_input : (sample_input + num_cols);
}
uint idx = lane_id + num_cols;
if (idx < num_cols_after_padding) {
for (int i = 0; i < num_rows; ++i) {
(shmem + i * SMEM_STRIDE)[idx] = __float2half(0);
}
}
half4 zeros;
zeros.vals[0].x = __float2half(0);
zeros.vals[0].y = __float2half(0);
zeros.vals[1].x = __float2half(0);
zeros.vals[1].y = __float2half(0);
if (lane_id < (num_cols_after_padding >> 2)) {
for (int i = num_rows; i < num_rows_after_padding; i++) {
((half4 *)(shmem + i * SMEM_STRIDE))[lane_id] = zeros;
}
}
__syncwarp();
half *gmem_output = output + output_size * sample_id;
for (uint idx = lane_id; idx < num_cols; idx += THREADS_IN_WARP) {
gmem_output[idx] = shmem[idx];
}
wmma::fragment<wmma::accumulator, TILE_DIM, TILE_DIM, TILE_DIM, float> acc[M_BLOCKS][M_BLOCKS];
for (int i = 0; i < M_BLOCKS; i++) {
for (int j = 0; j < M_BLOCKS; j++) {
wmma::fill_fragment(acc[i][j], 0);
}
}
for (int k_step = 0; k_step < num_col_steps; k_step++) {
wmma::fragment<wmma::matrix_a, TILE_DIM, TILE_DIM, TILE_DIM, half, wmma::row_major> a[M_BLOCKS];
wmma::fragment<wmma::matrix_b, TILE_DIM, TILE_DIM, TILE_DIM, half, wmma::col_major> b[M_BLOCKS];
for (int j = 0; j < M_BLOCKS; j++) {
int base_row = (j < M_BLOCKS - 1) ? j * 16 : smem_rows_per_warp - 16;
const half *tile_ptr = shmem + (base_row * SMEM_STRIDE + k_step * 16);
wmma::load_matrix_sync(a[j], tile_ptr, SMEM_STRIDE);
wmma::load_matrix_sync(b[j], tile_ptr, SMEM_STRIDE);
}
for (int i = 0; i < M_BLOCKS; i++) {
for (int j = 0; j < M_BLOCKS; j++) {
wmma::mma_sync(acc[i][j], a[i], b[j], acc[i][j]);
}
}
}
float *shmem_store = reinterpret_cast<float *>(shmem);
for (int i = 0; i < M_BLOCKS; i++) {
for (int j = 0; j < M_BLOCKS; j++) {
float *tile_ptr = shmem_store + (i * 16 * SMEM_STRIDE_ACC + j * 16);
wmma::store_matrix_sync(tile_ptr, acc[i][j], SMEM_STRIDE_ACC, wmma::mem_row_major);
}
}
half *gmem_interact_output = gmem_output + num_cols;
int lastRowBlockOffset = M_BLOCKS * 16 - smem_rows_per_warp;
int srcLine = 0;
for (int i = 0; i < num_rows; ++i, ++srcLine) {
if (i == ((M_BLOCKS - 1) * 16)) {
srcLine += lastRowBlockOffset;
}
if (lane_id < i) {
uint offset = (i * (i - 1)) >> 1;
gmem_interact_output[offset + lane_id] =
__float2half(shmem_store[srcLine * SMEM_STRIDE_ACC + lane_id]);
}
}
// Padding
if (lane_id == 0) {
gmem_output[output_size - 1] = __float2half(0);
}
}
template <uint WARPS_PER_BLOCK, uint THREADBLOCK_SIZE, uint M_BLOCKS, uint K_BLOCKS,
uint SMEM_STRIDE, uint SMEM_STRIDE_ACC, uint THREADS_IN_WARP, uint THREADS_IN_WARP_LOG_2,
uint TILE_DIM, uint TILE_DIM_LOG_2>
__launch_bounds__(THREADBLOCK_SIZE) __global__
void dotBasedInteractFwdKernel(const __half *__restrict bottom_mlp_input,
const __half *__restrict emb_input, __half *__restrict output,
uint batch_size, uint num_rows, uint num_cols,
uint num_rows_after_padding, uint num_cols_after_padding,
uint smem_elems_per_warp, uint smem_rows_per_warp,
uint output_size, uint num_row_steps, uint num_col_steps) {
uint warp_id = (threadIdx.x >> THREADS_IN_WARP_LOG_2);
int sample_id = blockIdx.x * WARPS_PER_BLOCK + warp_id;
if (sample_id >= batch_size) {
return;
}
int lane_id = threadIdx.x & (THREADS_IN_WARP - 1);
extern __shared__ half shmem_dynamic[];
half *shmem = shmem_dynamic + (warp_id * smem_elems_per_warp);
// const half *sample_input = input + num_rows * num_cols * sample_id;
const half *sample_bottom_mlp_input = bottom_mlp_input + num_cols * sample_id;
const half *sample_emp_input = emb_input + (num_rows - 1) * num_cols * sample_id;
const half *sample_input = sample_bottom_mlp_input;
if (lane_id < (num_cols >> 2)) {
// for (int i = 0; i < num_rows; ++i, sample_input += num_cols) {
for (int i = 0; i < num_rows; ++i) {
((float2 *)(shmem + i * SMEM_STRIDE))[lane_id] = ((float2 *)sample_input)[lane_id];
sample_input = (i == 0) ? sample_emp_input : (sample_input + num_cols);
}
}
uint idx = lane_id + num_cols;
if (idx < num_cols_after_padding) {
for (int i = 0; i < num_rows; ++i) {
(shmem + i * SMEM_STRIDE)[idx] = __float2half(0);
}
}
half4 zeros;
zeros.vals[0].x = __float2half(0);
zeros.vals[0].y = __float2half(0);
zeros.vals[1].x = __float2half(0);
zeros.vals[1].y = __float2half(0);
if (lane_id < (num_cols_after_padding >> 2)) {
for (int i = num_rows; i < num_rows_after_padding; i++) {
((half4 *)(shmem + i * SMEM_STRIDE))[lane_id] = zeros;
}
}
__syncwarp();
half *gmem_output = output + output_size * sample_id;
if (lane_id < (num_cols >> 2)) {
((float2 *)gmem_output)[lane_id] = ((float2 *)shmem)[lane_id];
}
wmma::fragment<wmma::accumulator, TILE_DIM, TILE_DIM, TILE_DIM, float> acc[M_BLOCKS][M_BLOCKS];
for (int i = 0; i < M_BLOCKS; i++) {
for (int j = 0; j < M_BLOCKS; j++) {
wmma::fill_fragment(acc[i][j], 0);
}
}
for (int k_step = 0; k_step < num_col_steps; k_step++) {
wmma::fragment<wmma::matrix_a, TILE_DIM, TILE_DIM, TILE_DIM, half, wmma::row_major> a[M_BLOCKS];
wmma::fragment<wmma::matrix_b, TILE_DIM, TILE_DIM, TILE_DIM, half, wmma::col_major> b[M_BLOCKS];
for (int j = 0; j < M_BLOCKS; j++) {
int base_row = (j < M_BLOCKS - 1) ? j * 16 : smem_rows_per_warp - 16;
const half *tile_ptr = shmem + (base_row * SMEM_STRIDE + k_step * 16);
wmma::load_matrix_sync(a[j], tile_ptr, SMEM_STRIDE);
wmma::load_matrix_sync(b[j], tile_ptr, SMEM_STRIDE);
}
for (int i = 0; i < M_BLOCKS; i++) {
for (int j = 0; j < M_BLOCKS; j++) {
wmma::mma_sync(acc[i][j], a[i], b[j], acc[i][j]);
}
}
}
float *shmem_store = reinterpret_cast<float *>(shmem);
for (int i = 0; i < M_BLOCKS; i++) {
for (int j = 0; j < M_BLOCKS; j++) {
float *tile_ptr = shmem_store + (i * 16 * SMEM_STRIDE_ACC + j * 16);
wmma::store_matrix_sync(tile_ptr, acc[i][j], SMEM_STRIDE_ACC, wmma::mem_row_major);
}
}
half *gmem_interact_output = gmem_output + num_cols;
int lastRowBlockOffset = M_BLOCKS * 16 - smem_rows_per_warp;
int srcLine = 0;
for (int i = 0; i < num_rows; ++i, ++srcLine) {
if (i == ((M_BLOCKS - 1) * 16)) {
srcLine += lastRowBlockOffset;
}
if (lane_id < i) {
uint offset = (i * (i - 1)) >> 1;
gmem_interact_output[offset + lane_id] =
__float2half(shmem_store[srcLine * SMEM_STRIDE_ACC + lane_id]);
}
}
// Padding
if (lane_id == 0) {
gmem_output[output_size - 1] = __float2half(0);
}
}
template <uint WARPS_PER_BLOCK, uint THREADBLOCK_SIZE, uint ROW_TILES_PER_STEP,
uint COL_TILES_PER_STEP, uint THREADS_IN_WARP, uint THREADS_IN_WARP_LOG_2, uint TILE_DIM,
uint TILE_DIM_LOG_2>
__launch_bounds__(THREADBLOCK_SIZE) __global__ void dotBasedInteractBwdKernelNonAligned(
const __half *__restrict upstream_grad, half __restrict *bottom_mlp_grad,
half __restrict *emb_grad, uint batch_size, uint num_rows, uint num_cols,
uint num_rows_after_padding, uint num_cols_after_padding, uint sample_size,
uint interaction_ugrad_size, uint interaction_ugrad_size_with_padding,
uint interaction_ugrad_2D_size_elems, uint interaction_ugrad_2D_stride, uint input_size_elems,
uint input_stride, uint num_row_steps, uint num_col_steps, uint row_tiles_per_step,
uint shared_mem_per_warp_size_byte) {
extern __shared__ half shared_mem[];
uint warp_id = (threadIdx.x >> THREADS_IN_WARP_LOG_2);
uint sample_id = blockIdx.x * WARPS_PER_BLOCK + warp_id;
if (sample_id >= batch_size) {
return;
}
uint lane_id = threadIdx.x & (THREADS_IN_WARP - 1);
// ">> 1" to convert to half pointer
uint smem_warp_offset = warp_id * (shared_mem_per_warp_size_byte >> 1);
half *smem_in = &shared_mem[smem_warp_offset];
half *smem_temp = &shared_mem[smem_warp_offset + input_size_elems];
float *smem_out = reinterpret_cast<float *>(smem_temp);
// Global memory pointers for the current sample
// Input
// uint gmem_input_sample_offset = sample_id * sample_size;
// const half *gmem_input = &input[gmem_input_sample_offset];
uint gmem_bottom_mlp_input_sample_offset = sample_id * num_cols;
uint gmem_emb_input_sample_offset = sample_id * (num_rows - 1) * num_cols;
const half *gmem_bottom_mlp_input = &bottom_mlp_grad[gmem_bottom_mlp_input_sample_offset];
const half *gmem_emb_input = &emb_grad[gmem_emb_input_sample_offset];
// Interaction Gradient
// const uint &gmem_grad_sample_offset = gmem_input_sample_offset;
// half *gmem_grad = &grad[gmem_grad_sample_offset];
half *gmem_bottom_mlp_grad = &bottom_mlp_grad[gmem_bottom_mlp_input_sample_offset];
half *gmem_emb_grad = &emb_grad[gmem_emb_input_sample_offset];
// Bottom MLP gradient
// half *gmem_mlp_grad = &bottom_mlp_grad[sample_id * num_cols];
// Upstream gradient vector
uint gmem_ugrad_sample_offset = sample_id * (num_cols + interaction_ugrad_size_with_padding);
const half *gmem_ugrad = &upstream_grad[gmem_ugrad_sample_offset];
// Upstream gradient vector for interactions
const half *gmem_ugrad_interactions = &gmem_ugrad[num_cols];
// upstream grad -> shared memory (place in input section temporarily)
#pragma unroll
for (uint idx = lane_id; idx < interaction_ugrad_size; idx += THREADS_IN_WARP) {
smem_in[idx] = gmem_ugrad_interactions[idx];
}
__syncwarp();
// Form the 2D ugrad matrix.
if (lane_id < num_rows_after_padding) {
uint ugrad_flat_index = ((lane_id * (lane_id - 1)) >> 1);
uint ugrad_offset_1 = lane_id * interaction_ugrad_2D_stride;
for (uint row = 0; row < num_rows; row++) {
half ugrad_val = __float2half(0.0f);
if (row < lane_id && lane_id < num_rows) {
ugrad_val = smem_in[ugrad_flat_index + row];
smem_temp[ugrad_offset_1 + row] = ugrad_val;
}
if (row <= lane_id && lane_id < num_rows_after_padding) {
smem_temp[row * interaction_ugrad_2D_stride + lane_id] = ugrad_val;
}
}
for (uint row = num_rows; row < num_rows_after_padding; row++) {
smem_temp[row * interaction_ugrad_2D_stride + lane_id] = __float2half(0.0f);
}
}
__syncwarp();
// Input -> Shared Memory
for (uint row = 0; row < num_rows; row++) {
half *smem_row_ptr = &smem_in[row * input_stride];
// const half *gmem_row_ptr = &gmem_input[row * num_cols];
const half *gmem_row_ptr =
(row == 0) ? gmem_bottom_mlp_input : &gmem_emb_input[(row - 1) * num_cols];
for (uint idx = lane_id; idx < num_cols; idx += THREADS_IN_WARP) {
smem_row_ptr[idx] = gmem_row_ptr[idx];
}
uint idx = lane_id + num_cols;
if (idx < num_cols_after_padding) {
smem_row_ptr[idx] = __float2half(0);
}
}
#pragma unroll 2
for (uint row = num_rows; row < num_rows_after_padding; row++) {
half *smem_row_ptr = &smem_in[row * input_stride];
for (uint idx = lane_id; idx < num_cols_after_padding; idx += THREADS_IN_WARP) {
smem_row_ptr[idx] = __float2half(0);
}
}
__syncwarp();
wmma::fragment<wmma::matrix_a, TILE_DIM, TILE_DIM, TILE_DIM, half, wmma::row_major>
a[ROW_TILES_PER_STEP][ROW_TILES_PER_STEP];
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
for (uint j = 0; j < ROW_TILES_PER_STEP; j++) {
const half *tile_ptr = smem_temp + ((i * interaction_ugrad_2D_stride + j) << TILE_DIM_LOG_2);
wmma::load_matrix_sync(a[i][j], tile_ptr, interaction_ugrad_2D_stride);
}
}
wmma::fragment<wmma::accumulator, TILE_DIM, TILE_DIM, TILE_DIM, float> acc[ROW_TILES_PER_STEP];
wmma::fragment<wmma::matrix_b, TILE_DIM, TILE_DIM, TILE_DIM, half, wmma::row_major>
b[ROW_TILES_PER_STEP];
for (int col_step = 0; col_step < num_col_steps; col_step++) {
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
const half *tile_ptr = smem_in + ((i * input_stride + col_step) << TILE_DIM_LOG_2);
wmma::fill_fragment(acc[i], 0);
wmma::load_matrix_sync(b[i], tile_ptr, input_stride);
}
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
for (uint j = 0; j < ROW_TILES_PER_STEP; j++) {
wmma::mma_sync(acc[i], a[i][j], b[j], acc[i]);
}
}
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
float *tile_ptr = smem_out + i * TILE_DIM * TILE_DIM;
wmma::store_matrix_sync(tile_ptr, acc[i], TILE_DIM, wmma::mem_row_major);
}
__syncwarp();
uint gmem_grad_col = (col_step << TILE_DIM_LOG_2) + lane_id;
if (gmem_grad_col < num_cols) {
for (uint i = 0; i < num_rows; i++) {
// gmem_grad[i * num_cols + gmem_grad_col] = __float2half(smem_out[(i << TILE_DIM_LOG_2) +
// lane_id]);
half *gmem_grad = (i == 0) ? gmem_bottom_mlp_grad : gmem_emb_grad;
uint idx = (i == 0) ? gmem_grad_col : ((i - 1) * num_cols + gmem_grad_col);
half val = __float2half(smem_out[(i << TILE_DIM_LOG_2) + lane_id]);
gmem_grad[idx] = (i == 0) ? (val + gmem_ugrad[idx]) : val;
}
}
}
// for (uint idx = lane_id; idx < num_cols; idx += THREADS_IN_WARP) {
// gmem_mlp_grad[idx] = gmem_ugrad[idx];
// }
}
template <uint WARPS_PER_BLOCK, uint THREADBLOCK_SIZE, uint ROW_TILES_PER_STEP,
uint COL_TILES_PER_STEP, uint THREADS_IN_WARP, uint THREADS_IN_WARP_LOG_2, uint TILE_DIM,
uint TILE_DIM_LOG_2>
__launch_bounds__(THREADBLOCK_SIZE) __global__
void dotBasedInteractBwdKernel(const __half *__restrict upstream_grad,
half __restrict *bottom_mlp_grad, half __restrict *emb_grad,
uint batch_size, uint num_rows, uint num_cols,
uint num_rows_after_padding, uint num_cols_after_padding,
uint sample_size, uint interaction_ugrad_size,
uint interaction_ugrad_size_with_padding,
uint interaction_ugrad_2D_size_elems,
uint interaction_ugrad_2D_stride, uint input_size_elems,
uint input_stride, uint num_row_steps, uint num_col_steps,
uint row_tiles_per_step, uint shared_mem_per_warp_size_byte) {
extern __shared__ half shared_mem[];
uint warp_id = (threadIdx.x >> THREADS_IN_WARP_LOG_2);
uint sample_id = blockIdx.x * WARPS_PER_BLOCK + warp_id;
if (sample_id >= batch_size) {
return;
}
uint lane_id = threadIdx.x & (THREADS_IN_WARP - 1);
// ">> 1" to convert to half pointer
uint smem_warp_offset = warp_id * (shared_mem_per_warp_size_byte >> 1);
half *smem_in = &shared_mem[smem_warp_offset];
half *smem_temp = &shared_mem[smem_warp_offset + input_size_elems];
float *smem_out = reinterpret_cast<float *>(smem_temp);
// Global memory pointers for the current sample
// Input
// uint gmem_input_sample_offset = sample_id * sample_size;
// const half *gmem_input = &input[gmem_input_sample_offset];
uint gmem_bottom_mlp_input_sample_offset = sample_id * num_cols;
uint gmem_emb_input_sample_offset = sample_id * (num_rows - 1) * num_cols;
const half *gmem_bottom_mlp_input = &bottom_mlp_grad[gmem_bottom_mlp_input_sample_offset];
const half *gmem_emb_input = &emb_grad[gmem_emb_input_sample_offset];
// Interaction Gradient
// const uint &gmem_grad_sample_offset = gmem_input_sample_offset;
// half *gmem_grad = &grad[gmem_grad_sample_offset];
half *gmem_bottom_mlp_grad = &bottom_mlp_grad[gmem_bottom_mlp_input_sample_offset];
half *gmem_emb_grad = &emb_grad[gmem_emb_input_sample_offset];
// Bottom MLP gradient
// half *gmem_mlp_grad = &bottom_mlp_grad[sample_id * num_cols];
// Upstream gradient vector
uint gmem_ugrad_sample_offset = sample_id * (num_cols + interaction_ugrad_size_with_padding);
const half *gmem_ugrad = &upstream_grad[gmem_ugrad_sample_offset];
// Upstream gradient vector for interactions
const half *gmem_ugrad_interactions = &gmem_ugrad[num_cols];
// upstream grad -> shared memory (place in input section temporarily)
#pragma unroll
for (uint idx = lane_id; idx < (interaction_ugrad_size >> 3); idx += THREADS_IN_WARP) {
((float4 *)smem_in)[idx] = ((float4 *)gmem_ugrad_interactions)[idx];
}
uint offset = (interaction_ugrad_size >> 3) << 3;
for (uint idx = lane_id + offset; idx < interaction_ugrad_size; idx += THREADS_IN_WARP) {
smem_in[idx] = gmem_ugrad_interactions[idx];
}
__syncwarp();
// Form the 2D ugrad matrix.
if (lane_id < num_rows_after_padding) {
uint ugrad_flat_index = ((lane_id * (lane_id - 1)) >> 1);
uint ugrad_offset_1 = lane_id * interaction_ugrad_2D_stride;
for (uint row = 0; row < num_rows; row++) {
half ugrad_val = __float2half(0.0f);
if (row < lane_id && lane_id < num_rows) {
ugrad_val = smem_in[ugrad_flat_index + row];
smem_temp[ugrad_offset_1 + row] = ugrad_val;
}
if (row <= lane_id && lane_id < num_rows_after_padding) {
smem_temp[row * interaction_ugrad_2D_stride + lane_id] = ugrad_val;
}
}
for (uint row = num_rows; row < num_rows_after_padding; row++) {
smem_temp[row * interaction_ugrad_2D_stride + lane_id] = __float2half(0.0f);
}
}
__syncwarp();
// Input -> Shared Memory
if (lane_id < (num_cols >> 2)) {
for (uint row = 0; row < num_rows; row++) {
half *smem_row_ptr = &smem_in[row * input_stride];
// const half *gmem_row_ptr = &gmem_input[row * num_cols];
const half *gmem_row_ptr =
(row == 0) ? gmem_bottom_mlp_input : &gmem_emb_input[(row - 1) * num_cols];
((float2 *)smem_row_ptr)[lane_id] = ((float2 *)gmem_row_ptr)[lane_id];
}
}
uint idx = lane_id + num_cols;
if (idx < num_cols_after_padding) {
for (uint row = 0; row < num_rows; row++) {
half *smem_row_ptr = &smem_in[row * input_stride];
smem_row_ptr[idx] = __float2half(0);
}
}
half4 zeros;
zeros.vals[0].x = __float2half(0);
zeros.vals[0].y = __float2half(0);
zeros.vals[1].x = __float2half(0);
zeros.vals[1].y = __float2half(0);
if (lane_id < (num_cols_after_padding >> 2)) {
#pragma unroll 2
for (uint row = num_rows; row < num_rows_after_padding; row++) {
half *smem_row_ptr = &smem_in[row * input_stride];
((half4 *)smem_row_ptr)[lane_id] = zeros;
}
}
__syncwarp();
wmma::fragment<wmma::matrix_a, TILE_DIM, TILE_DIM, TILE_DIM, half, wmma::row_major>
a[ROW_TILES_PER_STEP][ROW_TILES_PER_STEP];
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
for (uint j = 0; j < ROW_TILES_PER_STEP; j++) {
const half *tile_ptr = smem_temp + ((i * interaction_ugrad_2D_stride + j) << TILE_DIM_LOG_2);
wmma::load_matrix_sync(a[i][j], tile_ptr, interaction_ugrad_2D_stride);
}
}
wmma::fragment<wmma::accumulator, TILE_DIM, TILE_DIM, TILE_DIM, float> acc[ROW_TILES_PER_STEP];
wmma::fragment<wmma::matrix_b, TILE_DIM, TILE_DIM, TILE_DIM, half, wmma::row_major>
b[ROW_TILES_PER_STEP];
for (int col_step = 0; col_step < num_col_steps; col_step++) {
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
const half *tile_ptr = smem_in + ((i * input_stride + col_step) << TILE_DIM_LOG_2);
wmma::fill_fragment(acc[i], 0);
wmma::load_matrix_sync(b[i], tile_ptr, input_stride);
}
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
for (uint j = 0; j < ROW_TILES_PER_STEP; j++) {
wmma::mma_sync(acc[i], a[i][j], b[j], acc[i]);
}
}
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
float *tile_ptr = smem_out + i * TILE_DIM * TILE_DIM;
wmma::store_matrix_sync(tile_ptr, acc[i], TILE_DIM, wmma::mem_row_major);
}
__syncwarp();
uint gmem_grad_col_base = (col_step << TILE_DIM_LOG_2);
uint gmem_grad_col = gmem_grad_col_base + lane_id;
if (gmem_grad_col < num_cols) {
if (lane_id < 8) {
((__half2 *)(gmem_bottom_mlp_grad + gmem_grad_col_base))[lane_id] =
__hadd2(__float22half2_rn(((float2 *)smem_out)[lane_id]),
((__half2 *)(gmem_ugrad + gmem_grad_col_base))[lane_id]);
}
for (uint i = 0; i < num_rows - 1; i++) {
half val = __float2half(smem_out[((i + 1) << TILE_DIM_LOG_2) + lane_id]);
gmem_emb_grad[i * num_cols + gmem_grad_col] = val;
}
}
}
}
inline void dotBasedInteractFwd(const void *bottom_mlp_input, const void *emb_input, void *output,
uint batch_size, uint num_rows, uint num_cols,
hipStream_t stream) {
const uint kWarpSize = 32;
const uint kWarpSizeLog2 = Log2<kWarpSize>::value;
const uint kTileDim = 16;
const uint kTileDimLog2 = Log2<kTileDim>::value;
const uint warps_per_threadblock = 4;
const uint threadblock_size = warps_per_threadblock * 32;
const uint kPaddingSize = 1;
const uint kRowTilesPerStep = 2;
const uint kColTilesPerStep = 1;
// num tiles
uint num_row_tiles = (num_rows + kTileDim - 1) >> kTileDimLog2;
uint num_col_tiles = (num_cols + kTileDim - 1) >> kTileDimLog2;
// number of rows and columns after padding
uint num_rows_after_padding = kTileDim << 1;
uint num_cols_after_padding = num_col_tiles << kTileDimLog2;
uint num_row_steps = num_row_tiles / kRowTilesPerStep;
uint num_col_steps = num_col_tiles / kColTilesPerStep;
const uint K_BLOCKS = 8;
const uint M_BLOCKS = 2;
const uint SKEW_HALF = ((K_BLOCKS % 2) == 0) ? 8 : 0;
const uint SMEM_STRIDE = (K_BLOCKS * 16 + SKEW_HALF);
// multiple of 2 to guarantee 256-bit alignment for start of the row, at least 16 to safeload a
// tile
const uint smem_rows_per_warp = M_BLOCKS << 4;
const uint smem_elems_per_warp_mat = smem_rows_per_warp * SMEM_STRIDE;
const uint SKEW_HALF_ACC = ((M_BLOCKS % 2) == 0) ? 8 : 0;
const uint SMEM_STRIDE_ACC = (M_BLOCKS * 16 + SKEW_HALF_ACC);
const uint smem_elems_per_warp_acc = M_BLOCKS * 16 * SMEM_STRIDE_ACC * 2; // output in FP32
const uint smem_elems_per_warp = (smem_elems_per_warp_mat > smem_elems_per_warp_acc)
? smem_elems_per_warp_mat
: smem_elems_per_warp_acc;
uint output_size = num_cols + (num_rows * (num_rows - 1) >> 1) + kPaddingSize;
bool float4_predicate = !((num_cols & 7) || (output_size & 7));
if (float4_predicate) {
hipLaunchKernelGGL(( dotBasedInteractFwdKernel<warps_per_threadblock, threadblock_size, M_BLOCKS, K_BLOCKS,
SMEM_STRIDE, SMEM_STRIDE_ACC, kWarpSize, kWarpSizeLog2, kTileDim,
kTileDimLog2>)
, dim3((batch_size + warps_per_threadblock - 1) / warps_per_threadblock), dim3(threadblock_size),
warps_per_threadblock * smem_elems_per_warp * sizeof(__half), stream,
(const __half *)bottom_mlp_input, (const __half *)emb_input, (half *)output, batch_size,
num_rows, num_cols, num_rows_after_padding, num_cols_after_padding, smem_elems_per_warp,
smem_rows_per_warp, output_size, num_row_steps, num_col_steps);
} else {
hipLaunchKernelGGL(( dotBasedInteractFwdKernelNonAligned<warps_per_threadblock, threadblock_size, M_BLOCKS, K_BLOCKS,
SMEM_STRIDE, SMEM_STRIDE_ACC, kWarpSize, kWarpSizeLog2,
kTileDim, kTileDimLog2>)
, dim3((batch_size + warps_per_threadblock - 1) / warps_per_threadblock), dim3(threadblock_size),
warps_per_threadblock * smem_elems_per_warp * sizeof(__half), stream,
(const __half *)bottom_mlp_input, (const __half *)emb_input, (half *)output, batch_size,
num_rows, num_cols, num_rows_after_padding, num_cols_after_padding, smem_elems_per_warp,
smem_rows_per_warp, output_size, num_row_steps, num_col_steps);
}
}
inline void dotBasedInteractBwd(void *upstream_grad, void *bottom_mlp_grad, void *emb_grad,
uint batch_size, uint num_rows, uint num_cols,
hipStream_t stream) {
const uint kWarpSize = 32;
const uint kWarpSizeLog2 = Log2<kWarpSize>::value;
const uint kTileDim = 16;
const uint kTileDimLog2 = Log2<kTileDim>::value;
const uint mem_skew_size = 8;
const uint kPaddingSize = 1;
const uint kWarpsPerBlock = 4;
const uint kWarpsPerBlockLog2 = Log2<kWarpsPerBlock>::value;
const uint kNumThreads = kWarpsPerBlock * kWarpSize;
const uint kRowTilesPerStep = 2;
const uint kColTilesPerStep = 1;
uint row_tiles_per_step = num_rows > kTileDim ? kRowTilesPerStep : 1;
// num tiles
uint num_row_tiles = (num_rows + kTileDim - 1) >> kTileDimLog2;
uint num_col_tiles = (num_cols + kTileDim - 1) >> kTileDimLog2;
// number of rows and columns after padding
uint num_rows_after_padding = kTileDim << 1;
uint num_cols_after_padding = num_col_tiles << kTileDimLog2;
// 2D ugrad size and stride
uint interaction_ugrad_2D_stride = num_rows_after_padding + mem_skew_size;
uint interaction_ugrad_2D_size_elems = num_rows_after_padding * interaction_ugrad_2D_stride;
uint interaction_ugrad_2D_size_bytes = interaction_ugrad_2D_size_elems * sizeof(half);
// 1D ugrad size
uint interaction_ugrad_size = num_rows * (num_rows - 1) >> 1;
uint interaction_ugrad_size_with_padding = interaction_ugrad_size + kPaddingSize;
// in_out place size and stride
uint input_stride = num_cols_after_padding + mem_skew_size;
uint input_size_elems = num_rows_after_padding * input_stride;
uint input_size_bytes = input_size_elems * sizeof(half);
// sample size
uint sample_size = num_rows * num_cols;
// output size
uint output_size_elems = kTileDim * kTileDim * kRowTilesPerStep * kColTilesPerStep;
uint output_size_bytes = output_size_elems * sizeof(float);
// staging area size
uint staging_area_size_bytes = output_size_bytes > interaction_ugrad_2D_size_bytes
? output_size_bytes
: interaction_ugrad_2D_size_bytes;
// Shared memory size
uint shared_mem_per_warp_size_byte = input_size_bytes + staging_area_size_bytes;
uint shared_mem_size_bytes = kWarpsPerBlock * shared_mem_per_warp_size_byte;
uint num_blocks = (batch_size + kWarpsPerBlock - 1) >> kWarpsPerBlockLog2;
uint num_row_steps = num_row_tiles / row_tiles_per_step;
uint num_col_steps = num_col_tiles / kColTilesPerStep;
bool float4_predicate = !((interaction_ugrad_size_with_padding & 7) || (num_cols & 7));
if (float4_predicate) {
hipLaunchKernelGGL(( dotBasedInteractBwdKernel<kWarpsPerBlock, kNumThreads, kRowTilesPerStep, kColTilesPerStep,
kWarpSize, kWarpSizeLog2, kTileDim, kTileDimLog2>)
, dim3(num_blocks), dim3(kNumThreads), shared_mem_size_bytes, stream,
(const half *)upstream_grad, (half *)bottom_mlp_grad, (half *)emb_grad, batch_size,
num_rows, num_cols, num_rows_after_padding, num_cols_after_padding, sample_size,
interaction_ugrad_size, interaction_ugrad_size_with_padding,
interaction_ugrad_2D_size_elems, interaction_ugrad_2D_stride, input_size_elems,
input_stride, num_row_steps, num_col_steps, row_tiles_per_step,
shared_mem_per_warp_size_byte);
} else {
hipLaunchKernelGGL(( dotBasedInteractBwdKernelNonAligned<kWarpsPerBlock, kNumThreads, kRowTilesPerStep,
kColTilesPerStep, kWarpSize, kWarpSizeLog2, kTileDim,
kTileDimLog2>)
, dim3(num_blocks), dim3(kNumThreads), shared_mem_size_bytes, stream,
(const half *)upstream_grad, (half *)bottom_mlp_grad, (half *)emb_grad, batch_size,
num_rows, num_cols, num_rows_after_padding, num_cols_after_padding, sample_size,
interaction_ugrad_size, interaction_ugrad_size_with_padding,
interaction_ugrad_2D_size_elems, interaction_ugrad_2D_stride, input_size_elems,
input_stride, num_row_steps, num_col_steps, row_tiles_per_step,
shared_mem_per_warp_size_byte);
}
}
template <typename T>
__global__ void concat_kernel(bool forward, T *out, T *in_mlp, T *in_emb, const int h,
const int out_w, const int in_w, const int n_emb) {
const int n_ins = 1 + n_emb;
if (blockIdx.x < n_ins) {
T *in = (blockIdx.x == 0) ? in_mlp : in_emb + (blockIdx.x - 1) * in_w;
for (int bid = blockIdx.y; bid < h; bid += gridDim.y) {
int in_idx_base = (blockIdx.x == 0) ? bid * in_w : bid * in_w * n_emb;
for (int tid = threadIdx.x; tid < in_w; tid += blockDim.x) {
int in_idx = in_idx_base + tid;
int out_idx = bid * out_w + blockIdx.x * in_w + tid;
if (forward) {
out[out_idx] = in[in_idx];
} else {
in[in_idx] = (blockIdx.x == 0) ? (in[in_idx] + out[out_idx]) : out[out_idx];
}
}
}
}
}
template <typename T>
__global__ void gather_concat_fprop_kernel(T *out, const T *in0, const T *mat, const int h,
const int n_ins, const int w) {
extern __shared__ T s_buf[];
for (int bid = blockIdx.x; bid < h; bid += gridDim.x) {
int g_in_idx_base = bid * n_ins * n_ins;
for (int row = threadIdx.y; row < n_ins; row += blockDim.y) {
for (int col = threadIdx.x; col < n_ins; col += blockDim.x) {
if (col > row) {
int idx_in_blk = row * n_ins + col;
int g_in_idx = g_in_idx_base + idx_in_blk;
int s_idx = (col * (col - 1) / 2) + row;
s_buf[s_idx] = mat[g_in_idx];
}
}
}
__syncthreads();
int tid_base = threadIdx.y * blockDim.x + threadIdx.x;
int out_len = w + (n_ins * (n_ins + 1) / 2 - n_ins) + 1;
int g_out_idx_base = bid * out_len;
for (int tid = tid_base; tid < out_len - 1; tid += blockDim.y * blockDim.x) {
int g_out_idx = g_out_idx_base + tid;
T value = (tid < w) ? in0[bid * w + tid] : s_buf[tid - w];
out[g_out_idx] = value;
}
__syncthreads();
}
}
template <typename T>
__global__ void transpose_and_add(const T *src, T *dst, const int h, const int n_ins) {
extern __shared__ T s_buf[];
for (int bid = blockIdx.z; bid < h; bid += gridDim.z) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int gid = bid * n_ins * n_ins + y * n_ins + x;
int sid_n = threadIdx.y * blockDim.x + threadIdx.x;
int sid_t = threadIdx.x * blockDim.y + threadIdx.y;
if (x < n_ins && y < n_ins) {
s_buf[sid_n] = src[gid];
}
__syncthreads();
if (x < n_ins && y < n_ins) {
dst[gid] = s_buf[sid_n] + s_buf[sid_t];
}
__syncthreads();
}
}
template <typename T>
__global__ void gather_concat_bprop_kernel(const T *out, T *in0, T *mat, const int h,
const int n_ins, const int w) {
extern __shared__ T s_buf[];
for (int bid = blockIdx.x; bid < h; bid += gridDim.x) {
int tid_base = threadIdx.y * blockDim.x + threadIdx.x;
int out_len = w + (n_ins * (n_ins + 1) / 2 - n_ins) + 1;
int g_out_idx_base = bid * out_len;
for (int tid = tid_base; tid < out_len - 1; tid += blockDim.y * blockDim.x) {
int g_out_idx = g_out_idx_base + tid;
T val = out[g_out_idx];
if (tid < w) {
in0[bid * w + tid] = val;
} else {
s_buf[tid - w] = val;
}
}
__syncthreads();
int g_in_idx_base = bid * n_ins * n_ins;
for (int row = threadIdx.y; row < n_ins; row += blockDim.y) {
for (int col = threadIdx.x; col < n_ins; col += blockDim.x) {
int idx_in_blk = row * n_ins + col;
int g_in_idx = g_in_idx_base + idx_in_blk;
int s_idx = (col * (col - 1) / 2) + row;
mat[g_in_idx] = (col > row) ? s_buf[s_idx] : T(0);
}
}
__syncthreads();
}
}
} // anonymous namespace
template <typename T>
InteractionLayer<T>::InteractionLayer(std::shared_ptr<Tensor<T>> in_bottom_mlp_tensor,
std::shared_ptr<Tensor<T>> in_embeddings,
std::shared_ptr<Tensor<T>> &out_tensor,
const std::shared_ptr<GeneralBuffer<T>> &blobs_buff,
hipblasHandle_t cublas_handle, bool use_mixed_precision,
int device_id)
: cublas_handle_(cublas_handle),
use_mixed_precision_(use_mixed_precision),
n_sms_(0),
Layer(device_id) {
try {
CudaDeviceContext context(get_device_id());
auto first_in_dims = in_bottom_mlp_tensor->get_dims();
auto second_in_dims = in_embeddings->get_dims();
if (first_in_dims.size() != 2) {
CK_THROW_(Error_t::WrongInput, "Input Bottom MLP must be a 2D tensor");
}
if (in_bottom_mlp_tensor->get_format() != TensorFormat_t::HW ||
in_embeddings->get_format() != TensorFormat_t::HSW) {
CK_THROW_(Error_t::WrongInput, "TensorFormat_t is invalid");
}
if (second_in_dims.size() != 3) {
CK_THROW_(Error_t::WrongInput, "Input Embeddings must be a 3D tensor");
}
if (first_in_dims[0] != second_in_dims[0]) {
CK_THROW_(Error_t::WrongInput, "the input tensors' batch sizes must be the same");
}
if (first_in_dims[1] != second_in_dims[2]) {
CK_THROW_(Error_t::WrongInput, "the input tensors' widths must be the same");
}
TensorFormat_t format = TensorFormat_t::HW;
size_t n_ins = 1 + second_in_dims[1];
if (std::is_same<T, __half>::value == false) {
size_t concat_dims_width = first_in_dims[1] + second_in_dims[1] * second_in_dims[2];
std::vector<size_t> concat_dims = {first_in_dims[0], concat_dims_width};
internal_tensors_.emplace_back(new Tensor<T>(concat_dims, blobs_buff, format));
std::vector<size_t> mat_dims = {first_in_dims[0], n_ins * n_ins};
internal_tensors_.emplace_back(new Tensor<T>(mat_dims, blobs_buff, format));
internal_tensors_.emplace_back(new Tensor<T>(concat_dims, blobs_buff, format));
}
int concat_len = n_ins * (n_ins + 1) / 2 - n_ins;
std::vector<size_t> out_dims = {first_in_dims[0], first_in_dims[1] + concat_len + 1};
out_tensor.reset(new Tensor<T>(out_dims, blobs_buff, format));
in_tensors_.emplace_back(in_bottom_mlp_tensor);
in_tensors_.emplace_back(in_embeddings);
out_tensors_.emplace_back(out_tensor);
int device = get_device_id();
CK_CUDA_THROW_(hipDeviceGetAttribute(&n_sms_, hipDeviceAttributeMultiprocessorCount, device));
assert(n_sms_ > 0);
} catch (const std::runtime_error &rt_err) {
std::cerr << rt_err.what() << std::endl;
throw;
}
}
template <typename T>
InteractionLayer<T>::~InteractionLayer(){};
template <typename T>
void InteractionLayer<T>::fprop(hipStream_t stream) {
CudaDeviceContext context(get_device_id());
CK_CUBLAS_THROW_(hipblasSetStream(cublas_handle_, stream));
// phase 0: concat
T *concat = internal_tensors_[0]->get_ptr();
T *in_mlp = in_tensors_[0]->get_ptr();
T *in_emb = in_tensors_[1]->get_ptr();
const int h = internal_tensors_[0]->get_dims()[0];
const int out_w = internal_tensors_[0]->get_dims()[1];
const int in_w = in_tensors_[0]->get_dims()[1];
const int n_emb = in_tensors_[1]->get_dims()[1];
const int n_ins = 1 + n_emb;
dim3 grid0(n_ins, n_sms_, 1);
dim3 block0(((in_w <= 128) ? 128 : ((in_w <= 256) ? 256 : 512)), 1, 1);
hipLaunchKernelGGL(( concat_kernel), dim3(grid0), dim3(block0), 0, stream, true, concat, in_mlp, in_emb, h, out_w, in_w, n_emb);
// phase 1: matmul
const int batch_count = h;
T *mat = internal_tensors_[1]->get_ptr();
const int m = n_ins;
const int n = n_ins;
const int k = in_w;
float alpha = 1.0f;
float beta = 0.0f;
long long int stride_a = n * k;
long long int stride_b = k * m;
long long int stride_c = n * m;
hipDataType a_type = HIP_R_32F;
hipDataType b_type = HIP_R_32F;
hipDataType c_type = HIP_R_32F;
hipDataType compute_type = HIP_R_32F;
hipblasGemmAlgo_t algo =
use_mixed_precision_ ? CUBLAS_GEMM_DEFAULT_TENSOR_OP : HIPBLAS_GEMM_DEFAULT;
CK_CUBLAS_THROW_(hipblasGemmStridedBatchedEx(cublas_handle_, HIPBLAS_OP_T, HIPBLAS_OP_N, m, n, k,
&alpha, concat, a_type, k, stride_a, concat, b_type,
k, stride_b, &beta, mat, c_type, n, stride_c,
batch_count, compute_type, algo));
// phase 2: gather & concat
T *in0 = in_tensors_[0]->get_ptr();
T *gather = out_tensors_[0]->get_ptr();
dim3 grid1(n_sms_ * 8, 1, 1);
dim3 block1(16, 16, 1);
size_t smem_size = sizeof(T) * (n_ins * (n_ins + 1) / 2 - n_ins);
hipLaunchKernelGGL(( gather_concat_fprop_kernel), dim3(grid1), dim3(block1), smem_size, stream, gather, in0, mat, h, n_ins,
in_w);
#ifndef NDEBUG
hipDeviceSynchronize();
CK_CUDA_THROW_(hipGetLastError());
#endif
}
template <>
void InteractionLayer<__half>::fprop(hipStream_t stream) {
CudaDeviceContext context(get_device_id());
CK_CUBLAS_THROW_(hipblasSetStream(cublas_handle_, stream));
// __half* concat = internal_tensors_[0]->get_ptr();
__half *in_mlp = in_tensors_[0]->get_ptr();
__half *in_emb = in_tensors_[1]->get_ptr();
__half *output = out_tensors_[0]->get_ptr();
const int h = in_tensors_[0]->get_dims()[0];
// const int out_w = internal_tensors_[0]->get_dims()[1];
const int in_w = in_tensors_[0]->get_dims()[1];
const int n_emb = in_tensors_[1]->get_dims()[1];
const int n_ins = 1 + n_emb;
// dim3 grid0(n_ins, n_sms_, 1);
// dim3 block0(((in_w <= 128)? 128 : ((in_w <= 256)? 256 : 512)), 1, 1);
//hipLaunchKernelGGL(( concat_kernel), dim3(grid0), dim3(block0), 0, stream, true, concat, in_mlp, in_emb,
// h, out_w,
// in_w, n_emb);
dotBasedInteractFwd(in_mlp, in_emb, output, h, n_ins, in_w, stream);
#ifndef NDEBUG
hipDeviceSynchronize();
CK_CUDA_THROW_(hipGetLastError());
#endif
}
template <typename T>
void InteractionLayer<T>::bprop(hipStream_t stream) {
CudaDeviceContext context(get_device_id());
CK_CUBLAS_THROW_(hipblasSetStream(cublas_handle_, stream));
// phase 0:
T *gather = out_tensors_[0]->get_ptr();
T *in0 = in_tensors_[0]->get_ptr();
T *mat = internal_tensors_[1]->get_ptr();
const int h = internal_tensors_[0]->get_dims()[0];
const int n_ins = 1 + in_tensors_[1]->get_dims()[1];
const int in_w = in_tensors_[0]->get_dims()[1];
dim3 grid1(n_sms_ * 8, 1, 1);
dim3 block1(16, 16, 1);
size_t smem_size = sizeof(T) * (n_ins * (n_ins + 1) / 2 - n_ins);
hipLaunchKernelGGL(( gather_concat_bprop_kernel), dim3(grid1), dim3(block1), smem_size, stream, gather, in0, mat, h, n_ins,
in_w);
// phase 1:
const int batch_count = h;
T *concat = internal_tensors_[0]->get_ptr();
T *concat_tmp = internal_tensors_[2]->get_ptr();
const int m = n_ins;
const int n = in_w;
const int k = n_ins;
T alpha = 1.0f;
T beta = 0.0f;
long long int stride_a = n * k;
long long int stride_b = k * m;
long long int stride_c = n * m;
hipDataType a_type = HIP_R_32F;
hipDataType b_type = HIP_R_32F;
hipDataType c_type = HIP_R_32F;
hipDataType compute_type = HIP_R_32F;
hipblasGemmAlgo_t algo =
use_mixed_precision_ ? CUBLAS_GEMM_DEFAULT_TENSOR_OP : HIPBLAS_GEMM_DEFAULT;
// mat = mat + T(mat)
{
dim3 block(32, 32, 1);
dim3 grid((n_ins + block.x - 1) / block.x, (n_ins + block.y - 1) / block.y, h);
size_t smem_size = sizeof(T) * block.x * block.y;
hipLaunchKernelGGL(( transpose_and_add), dim3(grid), dim3(block), smem_size, stream, mat, mat, h, n_ins);
}
CK_CUBLAS_THROW_(hipblasGemmStridedBatchedEx(cublas_handle_, HIPBLAS_OP_N, HIPBLAS_OP_N, n, m, k,
&alpha, concat, a_type, n, stride_a, mat, b_type, k,
stride_b, &beta, concat_tmp, c_type, n, stride_c,
batch_count, compute_type, algo));
// phase 2:
T *in_mlp = in_tensors_[0]->get_ptr();
T *in_emb = in_tensors_[1]->get_ptr();
const int out_w = internal_tensors_[0]->get_dims()[1];
const int n_emb = in_tensors_[1]->get_dims()[1];
dim3 grid0(n_ins, n_sms_, 1);
dim3 block0(((in_w <= 128) ? 128 : ((in_w <= 256) ? 256 : 512)), 1, 1);
hipLaunchKernelGGL(( concat_kernel), dim3(grid0), dim3(block0), 0, stream, false, concat_tmp, in_mlp, in_emb, h, out_w, in_w,
n_emb);
#ifndef NDEBUG
hipDeviceSynchronize();
CK_CUDA_THROW_(hipGetLastError());
#endif
}
template <>
void InteractionLayer<__half>::bprop(hipStream_t stream) {
CudaDeviceContext context(get_device_id());
CK_CUBLAS_THROW_(hipblasSetStream(cublas_handle_, stream));
__half *up_grad = out_tensors_[0]->get_ptr();
__half *mlp_grad = in_tensors_[0]->get_ptr();
__half *emb_grad = in_tensors_[1]->get_ptr();
// __half* out_grad = internal_tensors_[2]->get_ptr();
const int h = in_tensors_[0]->get_dims()[0];
const int n_emb = in_tensors_[1]->get_dims()[1];
const int n_ins = 1 + n_emb;
const int in_w = in_tensors_[0]->get_dims()[1];
// const int out_w = internal_tensors_[0]->get_dims()[1];
dotBasedInteractBwd(up_grad, mlp_grad, emb_grad, h, n_ins, in_w, stream);
// dim3 grid0(n_ins, n_sms_, 1);
// dim3 block0(((in_w <= 128)? 128 : ((in_w <= 256)? 256 : 512)), 1, 1);
//hipLaunchKernelGGL(( concat_kernel), dim3(grid0), dim3(block0), 0, stream, false, out_grad, mlp_grad, emb_grad,
// h, out_w,
// in_w, n_emb);
#ifndef NDEBUG
hipDeviceSynchronize();
CK_CUDA_THROW_(hipGetLastError());
#endif
}
template class InteractionLayer<float>;
template class InteractionLayer<__half>;
} // namespace HugeCTR
| ade65853f1d32319bdf6170ed5822e8709154af7.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "HugeCTR/include/layers/interaction_layer.hpp"
#include "HugeCTR/include/common.hpp"
#include "HugeCTR/include/tensor.hpp"
#include <cuda.h>
#include <cuda_fp16.h>
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include <mma.h>
#include <type_traits>
#ifndef NDEBUG
#include <iostream>
#endif
namespace HugeCTR {
namespace {
using namespace nvcuda;
template <uint x>
struct Log2 {
static constexpr uint value = 1 + Log2<x / 2>::value;
};
template <>
struct Log2<1> {
static constexpr uint value = 0;
};
struct __align__(8) half4 {
half2 vals[2];
};
template <uint WARPS_PER_BLOCK, uint THREADBLOCK_SIZE, uint M_BLOCKS, uint K_BLOCKS,
uint SMEM_STRIDE, uint SMEM_STRIDE_ACC, uint THREADS_IN_WARP, uint THREADS_IN_WARP_LOG_2,
uint TILE_DIM, uint TILE_DIM_LOG_2>
__launch_bounds__(THREADBLOCK_SIZE) __global__ void dotBasedInteractFwdKernelNonAligned(
const __half *__restrict bottom_mlp_input, const __half *__restrict emb_input,
__half *__restrict output, uint batch_size, uint num_rows, uint num_cols,
uint num_rows_after_padding, uint num_cols_after_padding, uint smem_elems_per_warp,
uint smem_rows_per_warp, uint output_size, uint num_row_steps, uint num_col_steps) {
uint warp_id = (threadIdx.x >> THREADS_IN_WARP_LOG_2);
int sample_id = blockIdx.x * WARPS_PER_BLOCK + warp_id;
if (sample_id >= batch_size) {
return;
}
int lane_id = threadIdx.x & (THREADS_IN_WARP - 1);
extern __shared__ half shmem_dynamic[];
half *shmem = shmem_dynamic + (warp_id * smem_elems_per_warp);
// const half *sample_input = input + num_rows * num_cols * sample_id;
const half *sample_bottom_mlp_input = bottom_mlp_input + num_cols * sample_id;
const half *sample_emp_input = emb_input + (num_rows - 1) * num_cols * sample_id;
const half *sample_input = sample_bottom_mlp_input;
// for (uint i = 0; i < num_rows; ++i, sample_input += num_cols) {
for (uint i = 0; i < num_rows; ++i) {
for (uint idx = lane_id; idx < num_cols; idx += THREADS_IN_WARP) {
(shmem + i * SMEM_STRIDE)[idx] = sample_input[idx];
}
sample_input = (i == 0) ? sample_emp_input : (sample_input + num_cols);
}
uint idx = lane_id + num_cols;
if (idx < num_cols_after_padding) {
for (int i = 0; i < num_rows; ++i) {
(shmem + i * SMEM_STRIDE)[idx] = __float2half(0);
}
}
half4 zeros;
zeros.vals[0].x = __float2half(0);
zeros.vals[0].y = __float2half(0);
zeros.vals[1].x = __float2half(0);
zeros.vals[1].y = __float2half(0);
if (lane_id < (num_cols_after_padding >> 2)) {
for (int i = num_rows; i < num_rows_after_padding; i++) {
((half4 *)(shmem + i * SMEM_STRIDE))[lane_id] = zeros;
}
}
__syncwarp();
half *gmem_output = output + output_size * sample_id;
for (uint idx = lane_id; idx < num_cols; idx += THREADS_IN_WARP) {
gmem_output[idx] = shmem[idx];
}
wmma::fragment<wmma::accumulator, TILE_DIM, TILE_DIM, TILE_DIM, float> acc[M_BLOCKS][M_BLOCKS];
for (int i = 0; i < M_BLOCKS; i++) {
for (int j = 0; j < M_BLOCKS; j++) {
wmma::fill_fragment(acc[i][j], 0);
}
}
for (int k_step = 0; k_step < num_col_steps; k_step++) {
wmma::fragment<wmma::matrix_a, TILE_DIM, TILE_DIM, TILE_DIM, half, wmma::row_major> a[M_BLOCKS];
wmma::fragment<wmma::matrix_b, TILE_DIM, TILE_DIM, TILE_DIM, half, wmma::col_major> b[M_BLOCKS];
for (int j = 0; j < M_BLOCKS; j++) {
int base_row = (j < M_BLOCKS - 1) ? j * 16 : smem_rows_per_warp - 16;
const half *tile_ptr = shmem + (base_row * SMEM_STRIDE + k_step * 16);
wmma::load_matrix_sync(a[j], tile_ptr, SMEM_STRIDE);
wmma::load_matrix_sync(b[j], tile_ptr, SMEM_STRIDE);
}
for (int i = 0; i < M_BLOCKS; i++) {
for (int j = 0; j < M_BLOCKS; j++) {
wmma::mma_sync(acc[i][j], a[i], b[j], acc[i][j]);
}
}
}
float *shmem_store = reinterpret_cast<float *>(shmem);
for (int i = 0; i < M_BLOCKS; i++) {
for (int j = 0; j < M_BLOCKS; j++) {
float *tile_ptr = shmem_store + (i * 16 * SMEM_STRIDE_ACC + j * 16);
wmma::store_matrix_sync(tile_ptr, acc[i][j], SMEM_STRIDE_ACC, wmma::mem_row_major);
}
}
half *gmem_interact_output = gmem_output + num_cols;
int lastRowBlockOffset = M_BLOCKS * 16 - smem_rows_per_warp;
int srcLine = 0;
for (int i = 0; i < num_rows; ++i, ++srcLine) {
if (i == ((M_BLOCKS - 1) * 16)) {
srcLine += lastRowBlockOffset;
}
if (lane_id < i) {
uint offset = (i * (i - 1)) >> 1;
gmem_interact_output[offset + lane_id] =
__float2half(shmem_store[srcLine * SMEM_STRIDE_ACC + lane_id]);
}
}
// Padding
if (lane_id == 0) {
gmem_output[output_size - 1] = __float2half(0);
}
}
template <uint WARPS_PER_BLOCK, uint THREADBLOCK_SIZE, uint M_BLOCKS, uint K_BLOCKS,
uint SMEM_STRIDE, uint SMEM_STRIDE_ACC, uint THREADS_IN_WARP, uint THREADS_IN_WARP_LOG_2,
uint TILE_DIM, uint TILE_DIM_LOG_2>
__launch_bounds__(THREADBLOCK_SIZE) __global__
void dotBasedInteractFwdKernel(const __half *__restrict bottom_mlp_input,
const __half *__restrict emb_input, __half *__restrict output,
uint batch_size, uint num_rows, uint num_cols,
uint num_rows_after_padding, uint num_cols_after_padding,
uint smem_elems_per_warp, uint smem_rows_per_warp,
uint output_size, uint num_row_steps, uint num_col_steps) {
uint warp_id = (threadIdx.x >> THREADS_IN_WARP_LOG_2);
int sample_id = blockIdx.x * WARPS_PER_BLOCK + warp_id;
if (sample_id >= batch_size) {
return;
}
int lane_id = threadIdx.x & (THREADS_IN_WARP - 1);
extern __shared__ half shmem_dynamic[];
half *shmem = shmem_dynamic + (warp_id * smem_elems_per_warp);
// const half *sample_input = input + num_rows * num_cols * sample_id;
const half *sample_bottom_mlp_input = bottom_mlp_input + num_cols * sample_id;
const half *sample_emp_input = emb_input + (num_rows - 1) * num_cols * sample_id;
const half *sample_input = sample_bottom_mlp_input;
if (lane_id < (num_cols >> 2)) {
// for (int i = 0; i < num_rows; ++i, sample_input += num_cols) {
for (int i = 0; i < num_rows; ++i) {
((float2 *)(shmem + i * SMEM_STRIDE))[lane_id] = ((float2 *)sample_input)[lane_id];
sample_input = (i == 0) ? sample_emp_input : (sample_input + num_cols);
}
}
uint idx = lane_id + num_cols;
if (idx < num_cols_after_padding) {
for (int i = 0; i < num_rows; ++i) {
(shmem + i * SMEM_STRIDE)[idx] = __float2half(0);
}
}
half4 zeros;
zeros.vals[0].x = __float2half(0);
zeros.vals[0].y = __float2half(0);
zeros.vals[1].x = __float2half(0);
zeros.vals[1].y = __float2half(0);
if (lane_id < (num_cols_after_padding >> 2)) {
for (int i = num_rows; i < num_rows_after_padding; i++) {
((half4 *)(shmem + i * SMEM_STRIDE))[lane_id] = zeros;
}
}
__syncwarp();
half *gmem_output = output + output_size * sample_id;
if (lane_id < (num_cols >> 2)) {
((float2 *)gmem_output)[lane_id] = ((float2 *)shmem)[lane_id];
}
wmma::fragment<wmma::accumulator, TILE_DIM, TILE_DIM, TILE_DIM, float> acc[M_BLOCKS][M_BLOCKS];
for (int i = 0; i < M_BLOCKS; i++) {
for (int j = 0; j < M_BLOCKS; j++) {
wmma::fill_fragment(acc[i][j], 0);
}
}
for (int k_step = 0; k_step < num_col_steps; k_step++) {
wmma::fragment<wmma::matrix_a, TILE_DIM, TILE_DIM, TILE_DIM, half, wmma::row_major> a[M_BLOCKS];
wmma::fragment<wmma::matrix_b, TILE_DIM, TILE_DIM, TILE_DIM, half, wmma::col_major> b[M_BLOCKS];
for (int j = 0; j < M_BLOCKS; j++) {
int base_row = (j < M_BLOCKS - 1) ? j * 16 : smem_rows_per_warp - 16;
const half *tile_ptr = shmem + (base_row * SMEM_STRIDE + k_step * 16);
wmma::load_matrix_sync(a[j], tile_ptr, SMEM_STRIDE);
wmma::load_matrix_sync(b[j], tile_ptr, SMEM_STRIDE);
}
for (int i = 0; i < M_BLOCKS; i++) {
for (int j = 0; j < M_BLOCKS; j++) {
wmma::mma_sync(acc[i][j], a[i], b[j], acc[i][j]);
}
}
}
float *shmem_store = reinterpret_cast<float *>(shmem);
for (int i = 0; i < M_BLOCKS; i++) {
for (int j = 0; j < M_BLOCKS; j++) {
float *tile_ptr = shmem_store + (i * 16 * SMEM_STRIDE_ACC + j * 16);
wmma::store_matrix_sync(tile_ptr, acc[i][j], SMEM_STRIDE_ACC, wmma::mem_row_major);
}
}
half *gmem_interact_output = gmem_output + num_cols;
int lastRowBlockOffset = M_BLOCKS * 16 - smem_rows_per_warp;
int srcLine = 0;
for (int i = 0; i < num_rows; ++i, ++srcLine) {
if (i == ((M_BLOCKS - 1) * 16)) {
srcLine += lastRowBlockOffset;
}
if (lane_id < i) {
uint offset = (i * (i - 1)) >> 1;
gmem_interact_output[offset + lane_id] =
__float2half(shmem_store[srcLine * SMEM_STRIDE_ACC + lane_id]);
}
}
// Padding
if (lane_id == 0) {
gmem_output[output_size - 1] = __float2half(0);
}
}
template <uint WARPS_PER_BLOCK, uint THREADBLOCK_SIZE, uint ROW_TILES_PER_STEP,
uint COL_TILES_PER_STEP, uint THREADS_IN_WARP, uint THREADS_IN_WARP_LOG_2, uint TILE_DIM,
uint TILE_DIM_LOG_2>
__launch_bounds__(THREADBLOCK_SIZE) __global__ void dotBasedInteractBwdKernelNonAligned(
const __half *__restrict upstream_grad, half __restrict *bottom_mlp_grad,
half __restrict *emb_grad, uint batch_size, uint num_rows, uint num_cols,
uint num_rows_after_padding, uint num_cols_after_padding, uint sample_size,
uint interaction_ugrad_size, uint interaction_ugrad_size_with_padding,
uint interaction_ugrad_2D_size_elems, uint interaction_ugrad_2D_stride, uint input_size_elems,
uint input_stride, uint num_row_steps, uint num_col_steps, uint row_tiles_per_step,
uint shared_mem_per_warp_size_byte) {
extern __shared__ half shared_mem[];
uint warp_id = (threadIdx.x >> THREADS_IN_WARP_LOG_2);
uint sample_id = blockIdx.x * WARPS_PER_BLOCK + warp_id;
if (sample_id >= batch_size) {
return;
}
uint lane_id = threadIdx.x & (THREADS_IN_WARP - 1);
// ">> 1" to convert to half pointer
uint smem_warp_offset = warp_id * (shared_mem_per_warp_size_byte >> 1);
half *smem_in = &shared_mem[smem_warp_offset];
half *smem_temp = &shared_mem[smem_warp_offset + input_size_elems];
float *smem_out = reinterpret_cast<float *>(smem_temp);
// Global memory pointers for the current sample
// Input
// uint gmem_input_sample_offset = sample_id * sample_size;
// const half *gmem_input = &input[gmem_input_sample_offset];
uint gmem_bottom_mlp_input_sample_offset = sample_id * num_cols;
uint gmem_emb_input_sample_offset = sample_id * (num_rows - 1) * num_cols;
const half *gmem_bottom_mlp_input = &bottom_mlp_grad[gmem_bottom_mlp_input_sample_offset];
const half *gmem_emb_input = &emb_grad[gmem_emb_input_sample_offset];
// Interaction Gradient
// const uint &gmem_grad_sample_offset = gmem_input_sample_offset;
// half *gmem_grad = &grad[gmem_grad_sample_offset];
half *gmem_bottom_mlp_grad = &bottom_mlp_grad[gmem_bottom_mlp_input_sample_offset];
half *gmem_emb_grad = &emb_grad[gmem_emb_input_sample_offset];
// Bottom MLP gradient
// half *gmem_mlp_grad = &bottom_mlp_grad[sample_id * num_cols];
// Upstream gradient vector
uint gmem_ugrad_sample_offset = sample_id * (num_cols + interaction_ugrad_size_with_padding);
const half *gmem_ugrad = &upstream_grad[gmem_ugrad_sample_offset];
// Upstream gradient vector for interactions
const half *gmem_ugrad_interactions = &gmem_ugrad[num_cols];
// upstream grad -> shared memory (place in input section temporarily)
#pragma unroll
for (uint idx = lane_id; idx < interaction_ugrad_size; idx += THREADS_IN_WARP) {
smem_in[idx] = gmem_ugrad_interactions[idx];
}
__syncwarp();
// Form the 2D ugrad matrix.
if (lane_id < num_rows_after_padding) {
uint ugrad_flat_index = ((lane_id * (lane_id - 1)) >> 1);
uint ugrad_offset_1 = lane_id * interaction_ugrad_2D_stride;
for (uint row = 0; row < num_rows; row++) {
half ugrad_val = __float2half(0.0f);
if (row < lane_id && lane_id < num_rows) {
ugrad_val = smem_in[ugrad_flat_index + row];
smem_temp[ugrad_offset_1 + row] = ugrad_val;
}
if (row <= lane_id && lane_id < num_rows_after_padding) {
smem_temp[row * interaction_ugrad_2D_stride + lane_id] = ugrad_val;
}
}
for (uint row = num_rows; row < num_rows_after_padding; row++) {
smem_temp[row * interaction_ugrad_2D_stride + lane_id] = __float2half(0.0f);
}
}
__syncwarp();
// Input -> Shared Memory
for (uint row = 0; row < num_rows; row++) {
half *smem_row_ptr = &smem_in[row * input_stride];
// const half *gmem_row_ptr = &gmem_input[row * num_cols];
const half *gmem_row_ptr =
(row == 0) ? gmem_bottom_mlp_input : &gmem_emb_input[(row - 1) * num_cols];
for (uint idx = lane_id; idx < num_cols; idx += THREADS_IN_WARP) {
smem_row_ptr[idx] = gmem_row_ptr[idx];
}
uint idx = lane_id + num_cols;
if (idx < num_cols_after_padding) {
smem_row_ptr[idx] = __float2half(0);
}
}
#pragma unroll 2
for (uint row = num_rows; row < num_rows_after_padding; row++) {
half *smem_row_ptr = &smem_in[row * input_stride];
for (uint idx = lane_id; idx < num_cols_after_padding; idx += THREADS_IN_WARP) {
smem_row_ptr[idx] = __float2half(0);
}
}
__syncwarp();
wmma::fragment<wmma::matrix_a, TILE_DIM, TILE_DIM, TILE_DIM, half, wmma::row_major>
a[ROW_TILES_PER_STEP][ROW_TILES_PER_STEP];
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
for (uint j = 0; j < ROW_TILES_PER_STEP; j++) {
const half *tile_ptr = smem_temp + ((i * interaction_ugrad_2D_stride + j) << TILE_DIM_LOG_2);
wmma::load_matrix_sync(a[i][j], tile_ptr, interaction_ugrad_2D_stride);
}
}
wmma::fragment<wmma::accumulator, TILE_DIM, TILE_DIM, TILE_DIM, float> acc[ROW_TILES_PER_STEP];
wmma::fragment<wmma::matrix_b, TILE_DIM, TILE_DIM, TILE_DIM, half, wmma::row_major>
b[ROW_TILES_PER_STEP];
for (int col_step = 0; col_step < num_col_steps; col_step++) {
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
const half *tile_ptr = smem_in + ((i * input_stride + col_step) << TILE_DIM_LOG_2);
wmma::fill_fragment(acc[i], 0);
wmma::load_matrix_sync(b[i], tile_ptr, input_stride);
}
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
for (uint j = 0; j < ROW_TILES_PER_STEP; j++) {
wmma::mma_sync(acc[i], a[i][j], b[j], acc[i]);
}
}
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
float *tile_ptr = smem_out + i * TILE_DIM * TILE_DIM;
wmma::store_matrix_sync(tile_ptr, acc[i], TILE_DIM, wmma::mem_row_major);
}
__syncwarp();
uint gmem_grad_col = (col_step << TILE_DIM_LOG_2) + lane_id;
if (gmem_grad_col < num_cols) {
for (uint i = 0; i < num_rows; i++) {
// gmem_grad[i * num_cols + gmem_grad_col] = __float2half(smem_out[(i << TILE_DIM_LOG_2) +
// lane_id]);
half *gmem_grad = (i == 0) ? gmem_bottom_mlp_grad : gmem_emb_grad;
uint idx = (i == 0) ? gmem_grad_col : ((i - 1) * num_cols + gmem_grad_col);
half val = __float2half(smem_out[(i << TILE_DIM_LOG_2) + lane_id]);
gmem_grad[idx] = (i == 0) ? (val + gmem_ugrad[idx]) : val;
}
}
}
// for (uint idx = lane_id; idx < num_cols; idx += THREADS_IN_WARP) {
// gmem_mlp_grad[idx] = gmem_ugrad[idx];
// }
}
template <uint WARPS_PER_BLOCK, uint THREADBLOCK_SIZE, uint ROW_TILES_PER_STEP,
uint COL_TILES_PER_STEP, uint THREADS_IN_WARP, uint THREADS_IN_WARP_LOG_2, uint TILE_DIM,
uint TILE_DIM_LOG_2>
__launch_bounds__(THREADBLOCK_SIZE) __global__
void dotBasedInteractBwdKernel(const __half *__restrict upstream_grad,
half __restrict *bottom_mlp_grad, half __restrict *emb_grad,
uint batch_size, uint num_rows, uint num_cols,
uint num_rows_after_padding, uint num_cols_after_padding,
uint sample_size, uint interaction_ugrad_size,
uint interaction_ugrad_size_with_padding,
uint interaction_ugrad_2D_size_elems,
uint interaction_ugrad_2D_stride, uint input_size_elems,
uint input_stride, uint num_row_steps, uint num_col_steps,
uint row_tiles_per_step, uint shared_mem_per_warp_size_byte) {
extern __shared__ half shared_mem[];
uint warp_id = (threadIdx.x >> THREADS_IN_WARP_LOG_2);
uint sample_id = blockIdx.x * WARPS_PER_BLOCK + warp_id;
if (sample_id >= batch_size) {
return;
}
uint lane_id = threadIdx.x & (THREADS_IN_WARP - 1);
// ">> 1" to convert to half pointer
uint smem_warp_offset = warp_id * (shared_mem_per_warp_size_byte >> 1);
half *smem_in = &shared_mem[smem_warp_offset];
half *smem_temp = &shared_mem[smem_warp_offset + input_size_elems];
float *smem_out = reinterpret_cast<float *>(smem_temp);
// Global memory pointers for the current sample
// Input
// uint gmem_input_sample_offset = sample_id * sample_size;
// const half *gmem_input = &input[gmem_input_sample_offset];
uint gmem_bottom_mlp_input_sample_offset = sample_id * num_cols;
uint gmem_emb_input_sample_offset = sample_id * (num_rows - 1) * num_cols;
const half *gmem_bottom_mlp_input = &bottom_mlp_grad[gmem_bottom_mlp_input_sample_offset];
const half *gmem_emb_input = &emb_grad[gmem_emb_input_sample_offset];
// Interaction Gradient
// const uint &gmem_grad_sample_offset = gmem_input_sample_offset;
// half *gmem_grad = &grad[gmem_grad_sample_offset];
half *gmem_bottom_mlp_grad = &bottom_mlp_grad[gmem_bottom_mlp_input_sample_offset];
half *gmem_emb_grad = &emb_grad[gmem_emb_input_sample_offset];
// Bottom MLP gradient
// half *gmem_mlp_grad = &bottom_mlp_grad[sample_id * num_cols];
// Upstream gradient vector
uint gmem_ugrad_sample_offset = sample_id * (num_cols + interaction_ugrad_size_with_padding);
const half *gmem_ugrad = &upstream_grad[gmem_ugrad_sample_offset];
// Upstream gradient vector for interactions
const half *gmem_ugrad_interactions = &gmem_ugrad[num_cols];
// upstream grad -> shared memory (place in input section temporarily)
#pragma unroll
for (uint idx = lane_id; idx < (interaction_ugrad_size >> 3); idx += THREADS_IN_WARP) {
((float4 *)smem_in)[idx] = ((float4 *)gmem_ugrad_interactions)[idx];
}
uint offset = (interaction_ugrad_size >> 3) << 3;
for (uint idx = lane_id + offset; idx < interaction_ugrad_size; idx += THREADS_IN_WARP) {
smem_in[idx] = gmem_ugrad_interactions[idx];
}
__syncwarp();
// Form the 2D ugrad matrix.
if (lane_id < num_rows_after_padding) {
uint ugrad_flat_index = ((lane_id * (lane_id - 1)) >> 1);
uint ugrad_offset_1 = lane_id * interaction_ugrad_2D_stride;
for (uint row = 0; row < num_rows; row++) {
half ugrad_val = __float2half(0.0f);
if (row < lane_id && lane_id < num_rows) {
ugrad_val = smem_in[ugrad_flat_index + row];
smem_temp[ugrad_offset_1 + row] = ugrad_val;
}
if (row <= lane_id && lane_id < num_rows_after_padding) {
smem_temp[row * interaction_ugrad_2D_stride + lane_id] = ugrad_val;
}
}
for (uint row = num_rows; row < num_rows_after_padding; row++) {
smem_temp[row * interaction_ugrad_2D_stride + lane_id] = __float2half(0.0f);
}
}
__syncwarp();
// Input -> Shared Memory
if (lane_id < (num_cols >> 2)) {
for (uint row = 0; row < num_rows; row++) {
half *smem_row_ptr = &smem_in[row * input_stride];
// const half *gmem_row_ptr = &gmem_input[row * num_cols];
const half *gmem_row_ptr =
(row == 0) ? gmem_bottom_mlp_input : &gmem_emb_input[(row - 1) * num_cols];
((float2 *)smem_row_ptr)[lane_id] = ((float2 *)gmem_row_ptr)[lane_id];
}
}
uint idx = lane_id + num_cols;
if (idx < num_cols_after_padding) {
for (uint row = 0; row < num_rows; row++) {
half *smem_row_ptr = &smem_in[row * input_stride];
smem_row_ptr[idx] = __float2half(0);
}
}
half4 zeros;
zeros.vals[0].x = __float2half(0);
zeros.vals[0].y = __float2half(0);
zeros.vals[1].x = __float2half(0);
zeros.vals[1].y = __float2half(0);
if (lane_id < (num_cols_after_padding >> 2)) {
#pragma unroll 2
for (uint row = num_rows; row < num_rows_after_padding; row++) {
half *smem_row_ptr = &smem_in[row * input_stride];
((half4 *)smem_row_ptr)[lane_id] = zeros;
}
}
__syncwarp();
wmma::fragment<wmma::matrix_a, TILE_DIM, TILE_DIM, TILE_DIM, half, wmma::row_major>
a[ROW_TILES_PER_STEP][ROW_TILES_PER_STEP];
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
for (uint j = 0; j < ROW_TILES_PER_STEP; j++) {
const half *tile_ptr = smem_temp + ((i * interaction_ugrad_2D_stride + j) << TILE_DIM_LOG_2);
wmma::load_matrix_sync(a[i][j], tile_ptr, interaction_ugrad_2D_stride);
}
}
wmma::fragment<wmma::accumulator, TILE_DIM, TILE_DIM, TILE_DIM, float> acc[ROW_TILES_PER_STEP];
wmma::fragment<wmma::matrix_b, TILE_DIM, TILE_DIM, TILE_DIM, half, wmma::row_major>
b[ROW_TILES_PER_STEP];
for (int col_step = 0; col_step < num_col_steps; col_step++) {
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
const half *tile_ptr = smem_in + ((i * input_stride + col_step) << TILE_DIM_LOG_2);
wmma::fill_fragment(acc[i], 0);
wmma::load_matrix_sync(b[i], tile_ptr, input_stride);
}
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
for (uint j = 0; j < ROW_TILES_PER_STEP; j++) {
wmma::mma_sync(acc[i], a[i][j], b[j], acc[i]);
}
}
for (uint i = 0; i < ROW_TILES_PER_STEP; i++) {
float *tile_ptr = smem_out + i * TILE_DIM * TILE_DIM;
wmma::store_matrix_sync(tile_ptr, acc[i], TILE_DIM, wmma::mem_row_major);
}
__syncwarp();
uint gmem_grad_col_base = (col_step << TILE_DIM_LOG_2);
uint gmem_grad_col = gmem_grad_col_base + lane_id;
if (gmem_grad_col < num_cols) {
if (lane_id < 8) {
((__half2 *)(gmem_bottom_mlp_grad + gmem_grad_col_base))[lane_id] =
__hadd2(__float22half2_rn(((float2 *)smem_out)[lane_id]),
((__half2 *)(gmem_ugrad + gmem_grad_col_base))[lane_id]);
}
for (uint i = 0; i < num_rows - 1; i++) {
half val = __float2half(smem_out[((i + 1) << TILE_DIM_LOG_2) + lane_id]);
gmem_emb_grad[i * num_cols + gmem_grad_col] = val;
}
}
}
}
inline void dotBasedInteractFwd(const void *bottom_mlp_input, const void *emb_input, void *output,
uint batch_size, uint num_rows, uint num_cols,
cudaStream_t stream) {
const uint kWarpSize = 32;
const uint kWarpSizeLog2 = Log2<kWarpSize>::value;
const uint kTileDim = 16;
const uint kTileDimLog2 = Log2<kTileDim>::value;
const uint warps_per_threadblock = 4;
const uint threadblock_size = warps_per_threadblock * 32;
const uint kPaddingSize = 1;
const uint kRowTilesPerStep = 2;
const uint kColTilesPerStep = 1;
// num tiles
uint num_row_tiles = (num_rows + kTileDim - 1) >> kTileDimLog2;
uint num_col_tiles = (num_cols + kTileDim - 1) >> kTileDimLog2;
// number of rows and columns after padding
uint num_rows_after_padding = kTileDim << 1;
uint num_cols_after_padding = num_col_tiles << kTileDimLog2;
uint num_row_steps = num_row_tiles / kRowTilesPerStep;
uint num_col_steps = num_col_tiles / kColTilesPerStep;
const uint K_BLOCKS = 8;
const uint M_BLOCKS = 2;
const uint SKEW_HALF = ((K_BLOCKS % 2) == 0) ? 8 : 0;
const uint SMEM_STRIDE = (K_BLOCKS * 16 + SKEW_HALF);
// multiple of 2 to guarantee 256-bit alignment for start of the row, at least 16 to safeload a
// tile
const uint smem_rows_per_warp = M_BLOCKS << 4;
const uint smem_elems_per_warp_mat = smem_rows_per_warp * SMEM_STRIDE;
const uint SKEW_HALF_ACC = ((M_BLOCKS % 2) == 0) ? 8 : 0;
const uint SMEM_STRIDE_ACC = (M_BLOCKS * 16 + SKEW_HALF_ACC);
const uint smem_elems_per_warp_acc = M_BLOCKS * 16 * SMEM_STRIDE_ACC * 2; // output in FP32
const uint smem_elems_per_warp = (smem_elems_per_warp_mat > smem_elems_per_warp_acc)
? smem_elems_per_warp_mat
: smem_elems_per_warp_acc;
uint output_size = num_cols + (num_rows * (num_rows - 1) >> 1) + kPaddingSize;
bool float4_predicate = !((num_cols & 7) || (output_size & 7));
if (float4_predicate) {
dotBasedInteractFwdKernel<warps_per_threadblock, threadblock_size, M_BLOCKS, K_BLOCKS,
SMEM_STRIDE, SMEM_STRIDE_ACC, kWarpSize, kWarpSizeLog2, kTileDim,
kTileDimLog2>
<<<(batch_size + warps_per_threadblock - 1) / warps_per_threadblock, threadblock_size,
warps_per_threadblock * smem_elems_per_warp * sizeof(__half), stream>>>(
(const __half *)bottom_mlp_input, (const __half *)emb_input, (half *)output, batch_size,
num_rows, num_cols, num_rows_after_padding, num_cols_after_padding, smem_elems_per_warp,
smem_rows_per_warp, output_size, num_row_steps, num_col_steps);
} else {
dotBasedInteractFwdKernelNonAligned<warps_per_threadblock, threadblock_size, M_BLOCKS, K_BLOCKS,
SMEM_STRIDE, SMEM_STRIDE_ACC, kWarpSize, kWarpSizeLog2,
kTileDim, kTileDimLog2>
<<<(batch_size + warps_per_threadblock - 1) / warps_per_threadblock, threadblock_size,
warps_per_threadblock * smem_elems_per_warp * sizeof(__half), stream>>>(
(const __half *)bottom_mlp_input, (const __half *)emb_input, (half *)output, batch_size,
num_rows, num_cols, num_rows_after_padding, num_cols_after_padding, smem_elems_per_warp,
smem_rows_per_warp, output_size, num_row_steps, num_col_steps);
}
}
inline void dotBasedInteractBwd(void *upstream_grad, void *bottom_mlp_grad, void *emb_grad,
uint batch_size, uint num_rows, uint num_cols,
cudaStream_t stream) {
const uint kWarpSize = 32;
const uint kWarpSizeLog2 = Log2<kWarpSize>::value;
const uint kTileDim = 16;
const uint kTileDimLog2 = Log2<kTileDim>::value;
const uint mem_skew_size = 8;
const uint kPaddingSize = 1;
const uint kWarpsPerBlock = 4;
const uint kWarpsPerBlockLog2 = Log2<kWarpsPerBlock>::value;
const uint kNumThreads = kWarpsPerBlock * kWarpSize;
const uint kRowTilesPerStep = 2;
const uint kColTilesPerStep = 1;
uint row_tiles_per_step = num_rows > kTileDim ? kRowTilesPerStep : 1;
// num tiles
uint num_row_tiles = (num_rows + kTileDim - 1) >> kTileDimLog2;
uint num_col_tiles = (num_cols + kTileDim - 1) >> kTileDimLog2;
// number of rows and columns after padding
uint num_rows_after_padding = kTileDim << 1;
uint num_cols_after_padding = num_col_tiles << kTileDimLog2;
// 2D ugrad size and stride
uint interaction_ugrad_2D_stride = num_rows_after_padding + mem_skew_size;
uint interaction_ugrad_2D_size_elems = num_rows_after_padding * interaction_ugrad_2D_stride;
uint interaction_ugrad_2D_size_bytes = interaction_ugrad_2D_size_elems * sizeof(half);
// 1D ugrad size
uint interaction_ugrad_size = num_rows * (num_rows - 1) >> 1;
uint interaction_ugrad_size_with_padding = interaction_ugrad_size + kPaddingSize;
// in_out place size and stride
uint input_stride = num_cols_after_padding + mem_skew_size;
uint input_size_elems = num_rows_after_padding * input_stride;
uint input_size_bytes = input_size_elems * sizeof(half);
// sample size
uint sample_size = num_rows * num_cols;
// output size
uint output_size_elems = kTileDim * kTileDim * kRowTilesPerStep * kColTilesPerStep;
uint output_size_bytes = output_size_elems * sizeof(float);
// staging area size
uint staging_area_size_bytes = output_size_bytes > interaction_ugrad_2D_size_bytes
? output_size_bytes
: interaction_ugrad_2D_size_bytes;
// Shared memory size
uint shared_mem_per_warp_size_byte = input_size_bytes + staging_area_size_bytes;
uint shared_mem_size_bytes = kWarpsPerBlock * shared_mem_per_warp_size_byte;
uint num_blocks = (batch_size + kWarpsPerBlock - 1) >> kWarpsPerBlockLog2;
uint num_row_steps = num_row_tiles / row_tiles_per_step;
uint num_col_steps = num_col_tiles / kColTilesPerStep;
bool float4_predicate = !((interaction_ugrad_size_with_padding & 7) || (num_cols & 7));
if (float4_predicate) {
dotBasedInteractBwdKernel<kWarpsPerBlock, kNumThreads, kRowTilesPerStep, kColTilesPerStep,
kWarpSize, kWarpSizeLog2, kTileDim, kTileDimLog2>
<<<num_blocks, kNumThreads, shared_mem_size_bytes, stream>>>(
(const half *)upstream_grad, (half *)bottom_mlp_grad, (half *)emb_grad, batch_size,
num_rows, num_cols, num_rows_after_padding, num_cols_after_padding, sample_size,
interaction_ugrad_size, interaction_ugrad_size_with_padding,
interaction_ugrad_2D_size_elems, interaction_ugrad_2D_stride, input_size_elems,
input_stride, num_row_steps, num_col_steps, row_tiles_per_step,
shared_mem_per_warp_size_byte);
} else {
dotBasedInteractBwdKernelNonAligned<kWarpsPerBlock, kNumThreads, kRowTilesPerStep,
kColTilesPerStep, kWarpSize, kWarpSizeLog2, kTileDim,
kTileDimLog2>
<<<num_blocks, kNumThreads, shared_mem_size_bytes, stream>>>(
(const half *)upstream_grad, (half *)bottom_mlp_grad, (half *)emb_grad, batch_size,
num_rows, num_cols, num_rows_after_padding, num_cols_after_padding, sample_size,
interaction_ugrad_size, interaction_ugrad_size_with_padding,
interaction_ugrad_2D_size_elems, interaction_ugrad_2D_stride, input_size_elems,
input_stride, num_row_steps, num_col_steps, row_tiles_per_step,
shared_mem_per_warp_size_byte);
}
}
template <typename T>
__global__ void concat_kernel(bool forward, T *out, T *in_mlp, T *in_emb, const int h,
const int out_w, const int in_w, const int n_emb) {
const int n_ins = 1 + n_emb;
if (blockIdx.x < n_ins) {
T *in = (blockIdx.x == 0) ? in_mlp : in_emb + (blockIdx.x - 1) * in_w;
for (int bid = blockIdx.y; bid < h; bid += gridDim.y) {
int in_idx_base = (blockIdx.x == 0) ? bid * in_w : bid * in_w * n_emb;
for (int tid = threadIdx.x; tid < in_w; tid += blockDim.x) {
int in_idx = in_idx_base + tid;
int out_idx = bid * out_w + blockIdx.x * in_w + tid;
if (forward) {
out[out_idx] = in[in_idx];
} else {
in[in_idx] = (blockIdx.x == 0) ? (in[in_idx] + out[out_idx]) : out[out_idx];
}
}
}
}
}
template <typename T>
__global__ void gather_concat_fprop_kernel(T *out, const T *in0, const T *mat, const int h,
const int n_ins, const int w) {
extern __shared__ T s_buf[];
for (int bid = blockIdx.x; bid < h; bid += gridDim.x) {
int g_in_idx_base = bid * n_ins * n_ins;
for (int row = threadIdx.y; row < n_ins; row += blockDim.y) {
for (int col = threadIdx.x; col < n_ins; col += blockDim.x) {
if (col > row) {
int idx_in_blk = row * n_ins + col;
int g_in_idx = g_in_idx_base + idx_in_blk;
int s_idx = (col * (col - 1) / 2) + row;
s_buf[s_idx] = mat[g_in_idx];
}
}
}
__syncthreads();
int tid_base = threadIdx.y * blockDim.x + threadIdx.x;
int out_len = w + (n_ins * (n_ins + 1) / 2 - n_ins) + 1;
int g_out_idx_base = bid * out_len;
for (int tid = tid_base; tid < out_len - 1; tid += blockDim.y * blockDim.x) {
int g_out_idx = g_out_idx_base + tid;
T value = (tid < w) ? in0[bid * w + tid] : s_buf[tid - w];
out[g_out_idx] = value;
}
__syncthreads();
}
}
template <typename T>
__global__ void transpose_and_add(const T *src, T *dst, const int h, const int n_ins) {
extern __shared__ T s_buf[];
for (int bid = blockIdx.z; bid < h; bid += gridDim.z) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int gid = bid * n_ins * n_ins + y * n_ins + x;
int sid_n = threadIdx.y * blockDim.x + threadIdx.x;
int sid_t = threadIdx.x * blockDim.y + threadIdx.y;
if (x < n_ins && y < n_ins) {
s_buf[sid_n] = src[gid];
}
__syncthreads();
if (x < n_ins && y < n_ins) {
dst[gid] = s_buf[sid_n] + s_buf[sid_t];
}
__syncthreads();
}
}
template <typename T>
__global__ void gather_concat_bprop_kernel(const T *out, T *in0, T *mat, const int h,
const int n_ins, const int w) {
extern __shared__ T s_buf[];
for (int bid = blockIdx.x; bid < h; bid += gridDim.x) {
int tid_base = threadIdx.y * blockDim.x + threadIdx.x;
int out_len = w + (n_ins * (n_ins + 1) / 2 - n_ins) + 1;
int g_out_idx_base = bid * out_len;
for (int tid = tid_base; tid < out_len - 1; tid += blockDim.y * blockDim.x) {
int g_out_idx = g_out_idx_base + tid;
T val = out[g_out_idx];
if (tid < w) {
in0[bid * w + tid] = val;
} else {
s_buf[tid - w] = val;
}
}
__syncthreads();
int g_in_idx_base = bid * n_ins * n_ins;
for (int row = threadIdx.y; row < n_ins; row += blockDim.y) {
for (int col = threadIdx.x; col < n_ins; col += blockDim.x) {
int idx_in_blk = row * n_ins + col;
int g_in_idx = g_in_idx_base + idx_in_blk;
int s_idx = (col * (col - 1) / 2) + row;
mat[g_in_idx] = (col > row) ? s_buf[s_idx] : T(0);
}
}
__syncthreads();
}
}
} // anonymous namespace
template <typename T>
InteractionLayer<T>::InteractionLayer(std::shared_ptr<Tensor<T>> in_bottom_mlp_tensor,
std::shared_ptr<Tensor<T>> in_embeddings,
std::shared_ptr<Tensor<T>> &out_tensor,
const std::shared_ptr<GeneralBuffer<T>> &blobs_buff,
cublasHandle_t cublas_handle, bool use_mixed_precision,
int device_id)
: cublas_handle_(cublas_handle),
use_mixed_precision_(use_mixed_precision),
n_sms_(0),
Layer(device_id) {
try {
CudaDeviceContext context(get_device_id());
auto first_in_dims = in_bottom_mlp_tensor->get_dims();
auto second_in_dims = in_embeddings->get_dims();
if (first_in_dims.size() != 2) {
CK_THROW_(Error_t::WrongInput, "Input Bottom MLP must be a 2D tensor");
}
if (in_bottom_mlp_tensor->get_format() != TensorFormat_t::HW ||
in_embeddings->get_format() != TensorFormat_t::HSW) {
CK_THROW_(Error_t::WrongInput, "TensorFormat_t is invalid");
}
if (second_in_dims.size() != 3) {
CK_THROW_(Error_t::WrongInput, "Input Embeddings must be a 3D tensor");
}
if (first_in_dims[0] != second_in_dims[0]) {
CK_THROW_(Error_t::WrongInput, "the input tensors' batch sizes must be the same");
}
if (first_in_dims[1] != second_in_dims[2]) {
CK_THROW_(Error_t::WrongInput, "the input tensors' widths must be the same");
}
TensorFormat_t format = TensorFormat_t::HW;
size_t n_ins = 1 + second_in_dims[1];
if (std::is_same<T, __half>::value == false) {
size_t concat_dims_width = first_in_dims[1] + second_in_dims[1] * second_in_dims[2];
std::vector<size_t> concat_dims = {first_in_dims[0], concat_dims_width};
internal_tensors_.emplace_back(new Tensor<T>(concat_dims, blobs_buff, format));
std::vector<size_t> mat_dims = {first_in_dims[0], n_ins * n_ins};
internal_tensors_.emplace_back(new Tensor<T>(mat_dims, blobs_buff, format));
internal_tensors_.emplace_back(new Tensor<T>(concat_dims, blobs_buff, format));
}
int concat_len = n_ins * (n_ins + 1) / 2 - n_ins;
std::vector<size_t> out_dims = {first_in_dims[0], first_in_dims[1] + concat_len + 1};
out_tensor.reset(new Tensor<T>(out_dims, blobs_buff, format));
in_tensors_.emplace_back(in_bottom_mlp_tensor);
in_tensors_.emplace_back(in_embeddings);
out_tensors_.emplace_back(out_tensor);
int device = get_device_id();
CK_CUDA_THROW_(cudaDeviceGetAttribute(&n_sms_, cudaDevAttrMultiProcessorCount, device));
assert(n_sms_ > 0);
} catch (const std::runtime_error &rt_err) {
std::cerr << rt_err.what() << std::endl;
throw;
}
}
template <typename T>
InteractionLayer<T>::~InteractionLayer(){};
template <typename T>
void InteractionLayer<T>::fprop(cudaStream_t stream) {
CudaDeviceContext context(get_device_id());
CK_CUBLAS_THROW_(cublasSetStream(cublas_handle_, stream));
// phase 0: concat
T *concat = internal_tensors_[0]->get_ptr();
T *in_mlp = in_tensors_[0]->get_ptr();
T *in_emb = in_tensors_[1]->get_ptr();
const int h = internal_tensors_[0]->get_dims()[0];
const int out_w = internal_tensors_[0]->get_dims()[1];
const int in_w = in_tensors_[0]->get_dims()[1];
const int n_emb = in_tensors_[1]->get_dims()[1];
const int n_ins = 1 + n_emb;
dim3 grid0(n_ins, n_sms_, 1);
dim3 block0(((in_w <= 128) ? 128 : ((in_w <= 256) ? 256 : 512)), 1, 1);
concat_kernel<<<grid0, block0, 0, stream>>>(true, concat, in_mlp, in_emb, h, out_w, in_w, n_emb);
// phase 1: matmul
const int batch_count = h;
T *mat = internal_tensors_[1]->get_ptr();
const int m = n_ins;
const int n = n_ins;
const int k = in_w;
float alpha = 1.0f;
float beta = 0.0f;
long long int stride_a = n * k;
long long int stride_b = k * m;
long long int stride_c = n * m;
cudaDataType_t a_type = CUDA_R_32F;
cudaDataType_t b_type = CUDA_R_32F;
cudaDataType_t c_type = CUDA_R_32F;
cudaDataType_t compute_type = CUDA_R_32F;
cublasGemmAlgo_t algo =
use_mixed_precision_ ? CUBLAS_GEMM_DEFAULT_TENSOR_OP : CUBLAS_GEMM_DEFAULT;
CK_CUBLAS_THROW_(cublasGemmStridedBatchedEx(cublas_handle_, CUBLAS_OP_T, CUBLAS_OP_N, m, n, k,
&alpha, concat, a_type, k, stride_a, concat, b_type,
k, stride_b, &beta, mat, c_type, n, stride_c,
batch_count, compute_type, algo));
// phase 2: gather & concat
T *in0 = in_tensors_[0]->get_ptr();
T *gather = out_tensors_[0]->get_ptr();
dim3 grid1(n_sms_ * 8, 1, 1);
dim3 block1(16, 16, 1);
size_t smem_size = sizeof(T) * (n_ins * (n_ins + 1) / 2 - n_ins);
gather_concat_fprop_kernel<<<grid1, block1, smem_size, stream>>>(gather, in0, mat, h, n_ins,
in_w);
#ifndef NDEBUG
cudaDeviceSynchronize();
CK_CUDA_THROW_(cudaGetLastError());
#endif
}
template <>
void InteractionLayer<__half>::fprop(cudaStream_t stream) {
CudaDeviceContext context(get_device_id());
CK_CUBLAS_THROW_(cublasSetStream(cublas_handle_, stream));
// __half* concat = internal_tensors_[0]->get_ptr();
__half *in_mlp = in_tensors_[0]->get_ptr();
__half *in_emb = in_tensors_[1]->get_ptr();
__half *output = out_tensors_[0]->get_ptr();
const int h = in_tensors_[0]->get_dims()[0];
// const int out_w = internal_tensors_[0]->get_dims()[1];
const int in_w = in_tensors_[0]->get_dims()[1];
const int n_emb = in_tensors_[1]->get_dims()[1];
const int n_ins = 1 + n_emb;
// dim3 grid0(n_ins, n_sms_, 1);
// dim3 block0(((in_w <= 128)? 128 : ((in_w <= 256)? 256 : 512)), 1, 1);
// concat_kernel<<<grid0, block0, 0, stream>>>(true, concat, in_mlp, in_emb,
// h, out_w,
// in_w, n_emb);
dotBasedInteractFwd(in_mlp, in_emb, output, h, n_ins, in_w, stream);
#ifndef NDEBUG
cudaDeviceSynchronize();
CK_CUDA_THROW_(cudaGetLastError());
#endif
}
template <typename T>
void InteractionLayer<T>::bprop(cudaStream_t stream) {
CudaDeviceContext context(get_device_id());
CK_CUBLAS_THROW_(cublasSetStream(cublas_handle_, stream));
// phase 0:
T *gather = out_tensors_[0]->get_ptr();
T *in0 = in_tensors_[0]->get_ptr();
T *mat = internal_tensors_[1]->get_ptr();
const int h = internal_tensors_[0]->get_dims()[0];
const int n_ins = 1 + in_tensors_[1]->get_dims()[1];
const int in_w = in_tensors_[0]->get_dims()[1];
dim3 grid1(n_sms_ * 8, 1, 1);
dim3 block1(16, 16, 1);
size_t smem_size = sizeof(T) * (n_ins * (n_ins + 1) / 2 - n_ins);
gather_concat_bprop_kernel<<<grid1, block1, smem_size, stream>>>(gather, in0, mat, h, n_ins,
in_w);
// phase 1:
const int batch_count = h;
T *concat = internal_tensors_[0]->get_ptr();
T *concat_tmp = internal_tensors_[2]->get_ptr();
const int m = n_ins;
const int n = in_w;
const int k = n_ins;
T alpha = 1.0f;
T beta = 0.0f;
long long int stride_a = n * k;
long long int stride_b = k * m;
long long int stride_c = n * m;
cudaDataType_t a_type = CUDA_R_32F;
cudaDataType_t b_type = CUDA_R_32F;
cudaDataType_t c_type = CUDA_R_32F;
cudaDataType_t compute_type = CUDA_R_32F;
cublasGemmAlgo_t algo =
use_mixed_precision_ ? CUBLAS_GEMM_DEFAULT_TENSOR_OP : CUBLAS_GEMM_DEFAULT;
// mat = mat + T(mat)
{
dim3 block(32, 32, 1);
dim3 grid((n_ins + block.x - 1) / block.x, (n_ins + block.y - 1) / block.y, h);
size_t smem_size = sizeof(T) * block.x * block.y;
transpose_and_add<<<grid, block, smem_size, stream>>>(mat, mat, h, n_ins);
}
CK_CUBLAS_THROW_(cublasGemmStridedBatchedEx(cublas_handle_, CUBLAS_OP_N, CUBLAS_OP_N, n, m, k,
&alpha, concat, a_type, n, stride_a, mat, b_type, k,
stride_b, &beta, concat_tmp, c_type, n, stride_c,
batch_count, compute_type, algo));
// phase 2:
T *in_mlp = in_tensors_[0]->get_ptr();
T *in_emb = in_tensors_[1]->get_ptr();
const int out_w = internal_tensors_[0]->get_dims()[1];
const int n_emb = in_tensors_[1]->get_dims()[1];
dim3 grid0(n_ins, n_sms_, 1);
dim3 block0(((in_w <= 128) ? 128 : ((in_w <= 256) ? 256 : 512)), 1, 1);
concat_kernel<<<grid0, block0, 0, stream>>>(false, concat_tmp, in_mlp, in_emb, h, out_w, in_w,
n_emb);
#ifndef NDEBUG
cudaDeviceSynchronize();
CK_CUDA_THROW_(cudaGetLastError());
#endif
}
template <>
void InteractionLayer<__half>::bprop(cudaStream_t stream) {
CudaDeviceContext context(get_device_id());
CK_CUBLAS_THROW_(cublasSetStream(cublas_handle_, stream));
__half *up_grad = out_tensors_[0]->get_ptr();
__half *mlp_grad = in_tensors_[0]->get_ptr();
__half *emb_grad = in_tensors_[1]->get_ptr();
// __half* out_grad = internal_tensors_[2]->get_ptr();
const int h = in_tensors_[0]->get_dims()[0];
const int n_emb = in_tensors_[1]->get_dims()[1];
const int n_ins = 1 + n_emb;
const int in_w = in_tensors_[0]->get_dims()[1];
// const int out_w = internal_tensors_[0]->get_dims()[1];
dotBasedInteractBwd(up_grad, mlp_grad, emb_grad, h, n_ins, in_w, stream);
// dim3 grid0(n_ins, n_sms_, 1);
// dim3 block0(((in_w <= 128)? 128 : ((in_w <= 256)? 256 : 512)), 1, 1);
// concat_kernel<<<grid0, block0, 0, stream>>>(false, out_grad, mlp_grad, emb_grad,
// h, out_w,
// in_w, n_emb);
#ifndef NDEBUG
cudaDeviceSynchronize();
CK_CUDA_THROW_(cudaGetLastError());
#endif
}
template class InteractionLayer<float>;
template class InteractionLayer<__half>;
} // namespace HugeCTR
|
ba0e4a377ab58dc307761753202f54c8c14f215f.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-20, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <groupby/common/utils.hpp>
#include <groupby/hash/groupby_kernels.cuh>
#include <cudf/aggregation.hpp>
#include <cudf/column/column.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/detail/aggregation/aggregation.cuh>
#include <cudf/detail/aggregation/aggregation.hpp>
#include <cudf/detail/aggregation/result_cache.hpp>
#include <cudf/detail/gather.cuh>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/groupby.hpp>
#include <cudf/detail/replace.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/hash_functions.cuh>
#include <cudf/groupby.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/table/row_operators.cuh>
#include <cudf/table/table.hpp>
#include <cudf/table/table_device_view.cuh>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/traits.hpp>
#include <hash/concurrent_unordered_map.cuh>
#include <memory>
#include <utility>
namespace cudf {
namespace groupby {
namespace detail {
namespace hash {
namespace {
// This is a temporary fix due to compiler bug and we can resort back to
// constexpr once cuda 10.2 becomes RAPIDS's minimum compiler version
#if 0
/**
* @brief List of aggregation operations that can be computed with a hash-based
* implementation.
*/
constexpr std::array<aggregation::Kind, 7> hash_aggregations{
aggregation::SUM, aggregation::MIN, aggregation::MAX,
aggregation::COUNT_VALID, aggregation::COUNT_ALL,
aggregation::ARGMIN, aggregation::ARGMAX};
template <class T, size_t N>
constexpr bool array_contains(std::array<T, N> const& haystack, T needle) {
for (auto i = 0u; i < N; ++i) {
if (haystack[i] == needle) return true;
}
return false;
}
#endif
/**
* @brief Indicates whether the specified aggregation operation can be computed
* with a hash-based implementation.
*
* @param t The aggregation operation to verify
* @return true `t` is valid for a hash based groupby
* @return false `t` is invalid for a hash based groupby
*/
bool constexpr is_hash_aggregation(aggregation::Kind t)
{
// this is a temporary fix due to compiler bug and we can resort back to
// constexpr once cuda 10.2 becomes RAPIDS's minimum compiler version
// return array_contains(hash_aggregations, t);
return (t == aggregation::SUM) or (t == aggregation::MIN) or (t == aggregation::MAX) or
(t == aggregation::COUNT_VALID) or (t == aggregation::COUNT_ALL) or
(t == aggregation::ARGMIN) or (t == aggregation::ARGMAX);
}
// flatten aggs to filter in single pass aggs
std::tuple<table_view, std::vector<aggregation::Kind>, std::vector<size_t>>
flatten_single_pass_aggs(std::vector<aggregation_request> const& requests)
{
std::vector<column_view> columns;
std::vector<aggregation::Kind> agg_kinds;
std::vector<size_t> col_ids;
for (size_t i = 0; i < requests.size(); i++) {
auto const& request = requests[i];
auto const& agg_v = request.aggregations;
auto insert_agg = [&agg_kinds, &columns, &col_ids, &request, i](aggregation::Kind k) {
agg_kinds.push_back(k);
columns.push_back(request.values);
col_ids.push_back(i);
};
for (auto&& agg : agg_v) {
if (is_hash_aggregation(agg->kind)) {
if (is_fixed_width(request.values.type()) or agg->kind == aggregation::COUNT_VALID or
agg->kind == aggregation::COUNT_ALL) {
insert_agg(agg->kind);
} else if (request.values.type().id() == type_id::STRING) {
// For string type, only ARGMIN, ARGMAX, MIN, and MAX are supported
if (agg->kind == aggregation::ARGMIN or agg->kind == aggregation::ARGMAX) {
insert_agg(agg->kind);
} else if (agg->kind == aggregation::MIN) {
insert_agg(aggregation::ARGMIN);
} else if (agg->kind == aggregation::MAX) {
insert_agg(aggregation::ARGMAX);
}
}
}
}
}
return std::make_tuple(table_view(columns), std::move(agg_kinds), std::move(col_ids));
}
/**
* @brief Gather sparse results into dense using `gather_map` and add to
* `dense_cache`
*
* @see groupby_null_templated()
*/
void sparse_to_dense_results(std::vector<aggregation_request> const& requests,
cudf::detail::result_cache const& sparse_results,
cudf::detail::result_cache* dense_results,
rmm::device_vector<size_type> const& gather_map,
size_type map_size,
hipStream_t stream,
rmm::mr::device_memory_resource* mr)
{
for (size_t i = 0; i < requests.size(); i++) {
auto const& agg_v = requests[i].aggregations;
auto const& col = requests[i].values;
// Given an aggregation, this will get the result from sparse_results and
// convert and return dense, compacted result
auto to_dense_agg_result =
[&sparse_results, &gather_map, map_size, i, mr, stream](auto const& agg) {
auto s = sparse_results.get_result(i, agg);
auto dense_result_table = cudf::detail::gather(
table_view({s}), gather_map.begin(), gather_map.begin() + map_size, false, mr, stream);
return std::move(dense_result_table->release()[0]);
};
// Enables conversion of ARGMIN/ARGMAX into MIN/MAX
auto transformed_result = [&col, to_dense_agg_result, mr, stream](auto const& agg_kind) {
auto transformed_agg = std::make_unique<aggregation>(agg_kind);
auto arg_result = to_dense_agg_result(*transformed_agg);
// We make a view of ARG(MIN/MAX) result without a null mask and gather
// using this map. The values in data buffer of ARG(MIN/MAX) result
// corresponding to null values was initialized to ARG(MIN/MAX)_SENTINEL
// which is an out of bounds index value (-1) and causes the gathered
// value to be null.
column_view null_removed_map(
data_type(type_to_id<size_type>()),
arg_result->size(),
static_cast<void const*>(arg_result->view().template data<size_type>()));
auto transformed_result =
cudf::detail::gather(table_view({col}),
null_removed_map,
arg_result->nullable() ? cudf::detail::out_of_bounds_policy::IGNORE
: cudf::detail::out_of_bounds_policy::NULLIFY,
cudf::detail::negative_index_policy::NOT_ALLOWED,
mr,
stream);
return std::move(transformed_result->release()[0]);
};
for (auto&& agg : agg_v) {
auto const& agg_ref = *agg;
if (agg->kind == aggregation::COUNT_VALID or agg->kind == aggregation::COUNT_ALL) {
dense_results->add_result(i, agg_ref, to_dense_agg_result(agg_ref));
} else if (col.type().id() == type_id::STRING and
(agg->kind == aggregation::MAX or agg->kind == aggregation::MIN)) {
if (agg->kind == aggregation::MAX) {
dense_results->add_result(i, agg_ref, transformed_result(aggregation::ARGMAX));
} else if (agg->kind == aggregation::MIN) {
dense_results->add_result(i, agg_ref, transformed_result(aggregation::ARGMIN));
}
} else if (sparse_results.has_result(i, agg_ref)) {
dense_results->add_result(i, agg_ref, to_dense_agg_result(agg_ref));
}
}
}
}
/**
* @brief Construct hash map that uses row comparator and row hasher on
* `d_keys` table and stores indices
*/
template <bool keys_have_nulls>
auto create_hash_map(table_device_view const& d_keys,
null_policy include_null_keys,
hipStream_t stream = 0)
{
size_type constexpr unused_key{std::numeric_limits<size_type>::max()};
size_type constexpr unused_value{std::numeric_limits<size_type>::max()};
using map_type = concurrent_unordered_map<size_type,
size_type,
row_hasher<default_hash, keys_have_nulls>,
row_equality_comparator<keys_have_nulls>>;
using allocator_type = typename map_type::allocator_type;
bool const null_keys_are_equal{include_null_keys == null_policy::INCLUDE};
row_hasher<default_hash, keys_have_nulls> hasher{d_keys};
row_equality_comparator<keys_have_nulls> rows_equal{d_keys, d_keys, null_keys_are_equal};
return map_type::create(compute_hash_table_size(d_keys.num_rows()),
unused_key,
unused_value,
hasher,
rows_equal,
allocator_type(),
stream);
}
/**
* @brief Computes all aggregations from `requests` that require a single pass
* over the data and stores the results in `sparse_results`
*
* @see groupby_null_templated()
*/
template <bool keys_have_nulls, typename Map>
void compute_single_pass_aggs(table_view const& keys,
std::vector<aggregation_request> const& requests,
cudf::detail::result_cache* sparse_results,
Map& map,
null_policy include_null_keys,
hipStream_t stream)
{
// flatten the aggs to a table that can be operated on by aggregate_row
table_view flattened_values;
std::vector<aggregation::Kind> aggs;
std::vector<size_t> col_ids;
std::tie(flattened_values, aggs, col_ids) = flatten_single_pass_aggs(requests);
// make table that will hold sparse results
std::vector<std::unique_ptr<column>> sparse_columns;
std::transform(flattened_values.begin(),
flattened_values.end(),
aggs.begin(),
std::back_inserter(sparse_columns),
[stream](auto const& col, auto const& agg) {
bool nullable =
(agg == aggregation::COUNT_VALID or agg == aggregation::COUNT_ALL)
? false
: col.has_nulls();
auto mask_flag = (nullable) ? mask_state::ALL_NULL : mask_state::UNALLOCATED;
return make_fixed_width_column(
cudf::detail::target_type(col.type(), agg), col.size(), mask_flag, stream);
});
table sparse_table(std::move(sparse_columns));
mutable_table_view table_view = sparse_table.mutable_view();
cudf::detail::initialize_with_identity(table_view, aggs, stream);
// prepare to launch kernel to do the actual aggregation
auto d_sparse_table = mutable_table_device_view::create(sparse_table);
auto d_values = table_device_view::create(flattened_values);
rmm::device_vector<aggregation::Kind> d_aggs(aggs);
bool skip_key_rows_with_nulls = keys_have_nulls and include_null_keys == null_policy::EXCLUDE;
if (skip_key_rows_with_nulls) {
auto row_bitmask{bitmask_and(keys, rmm::mr::get_current_device_resource(), stream)};
thrust::for_each_n(
rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator(0),
keys.num_rows(),
hash::compute_single_pass_aggs<true, Map>{map,
keys.num_rows(),
*d_values,
*d_sparse_table,
d_aggs.data().get(),
static_cast<bitmask_type*>(row_bitmask.data())});
} else {
thrust::for_each_n(
rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator(0),
keys.num_rows(),
hash::compute_single_pass_aggs<false, Map>{
map, keys.num_rows(), *d_values, *d_sparse_table, d_aggs.data().get(), nullptr});
}
// Add results back to sparse_results cache
auto sparse_result_cols = sparse_table.release();
for (size_t i = 0; i < aggs.size(); i++) {
// Note that the cache will make a copy of this temporary aggregation
auto agg = std::make_unique<aggregation>(aggs[i]);
sparse_results->add_result(col_ids[i], *agg, std::move(sparse_result_cols[i]));
}
}
/**
* @brief Computes and returns a device vector containing all populated keys in
* `map`.
*/
template <typename Map>
std::pair<rmm::device_vector<size_type>, size_type> extract_populated_keys(Map map,
size_type num_keys,
hipStream_t stream = 0)
{
rmm::device_vector<size_type> populated_keys(num_keys);
auto get_key = [] __device__(auto const& element) {
size_type key, value;
thrust::tie(key, value) = element;
return key;
};
auto end_it = thrust::copy_if(
rmm::exec_policy(stream)->on(stream),
thrust::make_transform_iterator(map.data(), get_key),
thrust::make_transform_iterator(map.data() + map.capacity(), get_key),
populated_keys.begin(),
[unused_key = map.get_unused_key()] __device__(size_type key) { return key != unused_key; });
size_type map_size = end_it - populated_keys.begin();
return std::make_pair(std::move(populated_keys), map_size);
}
/**
* @brief Computes groupby using hash table.
*
* First, we create a hash table that stores the indices of unique rows in
* `keys`. The upper limit on the number of values in this map is the number
* of rows in `keys`.
*
* To store the results of aggregations, we create temporary sparse columns
* which have the same size as input value columns. Using the hash map, we
* determine the location within the sparse column to write the result of the
* aggregation into.
*
* The sparse column results of all aggregations are stored into the cache
* `sparse_results`. This enables the use of previously calculated results in
* other aggregations.
*
* All the aggregations which can be computed in a single pass are computed
* first, in a combined kernel. Then using these results, aggregations that
* require multiple passes, will be computed.
*
* Finally, using the hash map, we generate a vector of indices of populated
* values in sparse result columns. Then, for each aggregation originally
* requested in `requests`, we gather sparse results into a column of dense
* results using the aforementioned index vector. Dense results are stored into
* the in/out parameter `cache`.
*
*/
template <bool keys_have_nulls>
std::unique_ptr<table> groupby_null_templated(table_view const& keys,
std::vector<aggregation_request> const& requests,
cudf::detail::result_cache* cache,
null_policy include_null_keys,
hipStream_t stream,
rmm::mr::device_memory_resource* mr)
{
auto d_keys = table_device_view::create(keys);
auto map = create_hash_map<keys_have_nulls>(*d_keys, include_null_keys, stream);
// Cache of sparse results where the location of aggregate value in each
// column is indexed by the hash map
cudf::detail::result_cache sparse_results(requests.size());
// Compute all single pass aggs first
compute_single_pass_aggs<keys_have_nulls>(
keys, requests, &sparse_results, *map, include_null_keys, stream);
// Now continue with remaining multi-pass aggs
// <placeholder>
// Extract the populated indices from the hash map and create a gather map.
// Gathering using this map from sparse results will give dense results.
rmm::device_vector<size_type> gather_map;
size_type map_size;
std::tie(gather_map, map_size) = extract_populated_keys(*map, keys.num_rows(), stream);
// Compact all results from sparse_results and insert into cache
sparse_to_dense_results(requests, sparse_results, cache, gather_map, map_size, stream, mr);
return cudf::detail::gather(
keys, gather_map.begin(), gather_map.begin() + map_size, false, mr, stream);
}
} // namespace
/**
* @brief Indicates if a set of aggregation requests can be satisfied with a
* hash-based groupby implementation.
*
* @param keys The table of keys
* @param requests The set of columns to aggregate and the aggregations to
* perform
* @return true A hash-based groupby should be used
* @return false A hash-based groupby should not be used
*/
bool can_use_hash_groupby(table_view const& keys, std::vector<aggregation_request> const& requests)
{
return std::all_of(requests.begin(), requests.end(), [](aggregation_request const& r) {
return std::all_of(r.aggregations.begin(), r.aggregations.end(), [](auto const& a) {
return is_hash_aggregation(a->kind);
});
});
}
// Hash-based groupby
std::pair<std::unique_ptr<table>, std::vector<aggregation_result>> groupby(
table_view const& keys,
std::vector<aggregation_request> const& requests,
null_policy include_null_keys,
hipStream_t stream,
rmm::mr::device_memory_resource* mr)
{
cudf::detail::result_cache cache(requests.size());
std::unique_ptr<table> unique_keys;
if (has_nulls(keys)) {
unique_keys =
groupby_null_templated<true>(keys, requests, &cache, include_null_keys, stream, mr);
} else {
unique_keys =
groupby_null_templated<false>(keys, requests, &cache, include_null_keys, stream, mr);
}
return std::make_pair(std::move(unique_keys), extract_results(requests, cache));
}
} // namespace hash
} // namespace detail
} // namespace groupby
} // namespace cudf
| ba0e4a377ab58dc307761753202f54c8c14f215f.cu | /*
* Copyright (c) 2019-20, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <groupby/common/utils.hpp>
#include <groupby/hash/groupby_kernels.cuh>
#include <cudf/aggregation.hpp>
#include <cudf/column/column.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/detail/aggregation/aggregation.cuh>
#include <cudf/detail/aggregation/aggregation.hpp>
#include <cudf/detail/aggregation/result_cache.hpp>
#include <cudf/detail/gather.cuh>
#include <cudf/detail/gather.hpp>
#include <cudf/detail/groupby.hpp>
#include <cudf/detail/replace.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/hash_functions.cuh>
#include <cudf/groupby.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/table/row_operators.cuh>
#include <cudf/table/table.hpp>
#include <cudf/table/table_device_view.cuh>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/traits.hpp>
#include <hash/concurrent_unordered_map.cuh>
#include <memory>
#include <utility>
namespace cudf {
namespace groupby {
namespace detail {
namespace hash {
namespace {
// This is a temporary fix due to compiler bug and we can resort back to
// constexpr once cuda 10.2 becomes RAPIDS's minimum compiler version
#if 0
/**
* @brief List of aggregation operations that can be computed with a hash-based
* implementation.
*/
constexpr std::array<aggregation::Kind, 7> hash_aggregations{
aggregation::SUM, aggregation::MIN, aggregation::MAX,
aggregation::COUNT_VALID, aggregation::COUNT_ALL,
aggregation::ARGMIN, aggregation::ARGMAX};
template <class T, size_t N>
constexpr bool array_contains(std::array<T, N> const& haystack, T needle) {
for (auto i = 0u; i < N; ++i) {
if (haystack[i] == needle) return true;
}
return false;
}
#endif
/**
* @brief Indicates whether the specified aggregation operation can be computed
* with a hash-based implementation.
*
* @param t The aggregation operation to verify
* @return true `t` is valid for a hash based groupby
* @return false `t` is invalid for a hash based groupby
*/
bool constexpr is_hash_aggregation(aggregation::Kind t)
{
// this is a temporary fix due to compiler bug and we can resort back to
// constexpr once cuda 10.2 becomes RAPIDS's minimum compiler version
// return array_contains(hash_aggregations, t);
return (t == aggregation::SUM) or (t == aggregation::MIN) or (t == aggregation::MAX) or
(t == aggregation::COUNT_VALID) or (t == aggregation::COUNT_ALL) or
(t == aggregation::ARGMIN) or (t == aggregation::ARGMAX);
}
// flatten aggs to filter in single pass aggs
std::tuple<table_view, std::vector<aggregation::Kind>, std::vector<size_t>>
flatten_single_pass_aggs(std::vector<aggregation_request> const& requests)
{
std::vector<column_view> columns;
std::vector<aggregation::Kind> agg_kinds;
std::vector<size_t> col_ids;
for (size_t i = 0; i < requests.size(); i++) {
auto const& request = requests[i];
auto const& agg_v = request.aggregations;
auto insert_agg = [&agg_kinds, &columns, &col_ids, &request, i](aggregation::Kind k) {
agg_kinds.push_back(k);
columns.push_back(request.values);
col_ids.push_back(i);
};
for (auto&& agg : agg_v) {
if (is_hash_aggregation(agg->kind)) {
if (is_fixed_width(request.values.type()) or agg->kind == aggregation::COUNT_VALID or
agg->kind == aggregation::COUNT_ALL) {
insert_agg(agg->kind);
} else if (request.values.type().id() == type_id::STRING) {
// For string type, only ARGMIN, ARGMAX, MIN, and MAX are supported
if (agg->kind == aggregation::ARGMIN or agg->kind == aggregation::ARGMAX) {
insert_agg(agg->kind);
} else if (agg->kind == aggregation::MIN) {
insert_agg(aggregation::ARGMIN);
} else if (agg->kind == aggregation::MAX) {
insert_agg(aggregation::ARGMAX);
}
}
}
}
}
return std::make_tuple(table_view(columns), std::move(agg_kinds), std::move(col_ids));
}
/**
* @brief Gather sparse results into dense using `gather_map` and add to
* `dense_cache`
*
* @see groupby_null_templated()
*/
void sparse_to_dense_results(std::vector<aggregation_request> const& requests,
cudf::detail::result_cache const& sparse_results,
cudf::detail::result_cache* dense_results,
rmm::device_vector<size_type> const& gather_map,
size_type map_size,
cudaStream_t stream,
rmm::mr::device_memory_resource* mr)
{
for (size_t i = 0; i < requests.size(); i++) {
auto const& agg_v = requests[i].aggregations;
auto const& col = requests[i].values;
// Given an aggregation, this will get the result from sparse_results and
// convert and return dense, compacted result
auto to_dense_agg_result =
[&sparse_results, &gather_map, map_size, i, mr, stream](auto const& agg) {
auto s = sparse_results.get_result(i, agg);
auto dense_result_table = cudf::detail::gather(
table_view({s}), gather_map.begin(), gather_map.begin() + map_size, false, mr, stream);
return std::move(dense_result_table->release()[0]);
};
// Enables conversion of ARGMIN/ARGMAX into MIN/MAX
auto transformed_result = [&col, to_dense_agg_result, mr, stream](auto const& agg_kind) {
auto transformed_agg = std::make_unique<aggregation>(agg_kind);
auto arg_result = to_dense_agg_result(*transformed_agg);
// We make a view of ARG(MIN/MAX) result without a null mask and gather
// using this map. The values in data buffer of ARG(MIN/MAX) result
// corresponding to null values was initialized to ARG(MIN/MAX)_SENTINEL
// which is an out of bounds index value (-1) and causes the gathered
// value to be null.
column_view null_removed_map(
data_type(type_to_id<size_type>()),
arg_result->size(),
static_cast<void const*>(arg_result->view().template data<size_type>()));
auto transformed_result =
cudf::detail::gather(table_view({col}),
null_removed_map,
arg_result->nullable() ? cudf::detail::out_of_bounds_policy::IGNORE
: cudf::detail::out_of_bounds_policy::NULLIFY,
cudf::detail::negative_index_policy::NOT_ALLOWED,
mr,
stream);
return std::move(transformed_result->release()[0]);
};
for (auto&& agg : agg_v) {
auto const& agg_ref = *agg;
if (agg->kind == aggregation::COUNT_VALID or agg->kind == aggregation::COUNT_ALL) {
dense_results->add_result(i, agg_ref, to_dense_agg_result(agg_ref));
} else if (col.type().id() == type_id::STRING and
(agg->kind == aggregation::MAX or agg->kind == aggregation::MIN)) {
if (agg->kind == aggregation::MAX) {
dense_results->add_result(i, agg_ref, transformed_result(aggregation::ARGMAX));
} else if (agg->kind == aggregation::MIN) {
dense_results->add_result(i, agg_ref, transformed_result(aggregation::ARGMIN));
}
} else if (sparse_results.has_result(i, agg_ref)) {
dense_results->add_result(i, agg_ref, to_dense_agg_result(agg_ref));
}
}
}
}
/**
* @brief Construct hash map that uses row comparator and row hasher on
* `d_keys` table and stores indices
*/
template <bool keys_have_nulls>
auto create_hash_map(table_device_view const& d_keys,
null_policy include_null_keys,
cudaStream_t stream = 0)
{
size_type constexpr unused_key{std::numeric_limits<size_type>::max()};
size_type constexpr unused_value{std::numeric_limits<size_type>::max()};
using map_type = concurrent_unordered_map<size_type,
size_type,
row_hasher<default_hash, keys_have_nulls>,
row_equality_comparator<keys_have_nulls>>;
using allocator_type = typename map_type::allocator_type;
bool const null_keys_are_equal{include_null_keys == null_policy::INCLUDE};
row_hasher<default_hash, keys_have_nulls> hasher{d_keys};
row_equality_comparator<keys_have_nulls> rows_equal{d_keys, d_keys, null_keys_are_equal};
return map_type::create(compute_hash_table_size(d_keys.num_rows()),
unused_key,
unused_value,
hasher,
rows_equal,
allocator_type(),
stream);
}
/**
* @brief Computes all aggregations from `requests` that require a single pass
* over the data and stores the results in `sparse_results`
*
* @see groupby_null_templated()
*/
template <bool keys_have_nulls, typename Map>
void compute_single_pass_aggs(table_view const& keys,
std::vector<aggregation_request> const& requests,
cudf::detail::result_cache* sparse_results,
Map& map,
null_policy include_null_keys,
cudaStream_t stream)
{
// flatten the aggs to a table that can be operated on by aggregate_row
table_view flattened_values;
std::vector<aggregation::Kind> aggs;
std::vector<size_t> col_ids;
std::tie(flattened_values, aggs, col_ids) = flatten_single_pass_aggs(requests);
// make table that will hold sparse results
std::vector<std::unique_ptr<column>> sparse_columns;
std::transform(flattened_values.begin(),
flattened_values.end(),
aggs.begin(),
std::back_inserter(sparse_columns),
[stream](auto const& col, auto const& agg) {
bool nullable =
(agg == aggregation::COUNT_VALID or agg == aggregation::COUNT_ALL)
? false
: col.has_nulls();
auto mask_flag = (nullable) ? mask_state::ALL_NULL : mask_state::UNALLOCATED;
return make_fixed_width_column(
cudf::detail::target_type(col.type(), agg), col.size(), mask_flag, stream);
});
table sparse_table(std::move(sparse_columns));
mutable_table_view table_view = sparse_table.mutable_view();
cudf::detail::initialize_with_identity(table_view, aggs, stream);
// prepare to launch kernel to do the actual aggregation
auto d_sparse_table = mutable_table_device_view::create(sparse_table);
auto d_values = table_device_view::create(flattened_values);
rmm::device_vector<aggregation::Kind> d_aggs(aggs);
bool skip_key_rows_with_nulls = keys_have_nulls and include_null_keys == null_policy::EXCLUDE;
if (skip_key_rows_with_nulls) {
auto row_bitmask{bitmask_and(keys, rmm::mr::get_current_device_resource(), stream)};
thrust::for_each_n(
rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator(0),
keys.num_rows(),
hash::compute_single_pass_aggs<true, Map>{map,
keys.num_rows(),
*d_values,
*d_sparse_table,
d_aggs.data().get(),
static_cast<bitmask_type*>(row_bitmask.data())});
} else {
thrust::for_each_n(
rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator(0),
keys.num_rows(),
hash::compute_single_pass_aggs<false, Map>{
map, keys.num_rows(), *d_values, *d_sparse_table, d_aggs.data().get(), nullptr});
}
// Add results back to sparse_results cache
auto sparse_result_cols = sparse_table.release();
for (size_t i = 0; i < aggs.size(); i++) {
// Note that the cache will make a copy of this temporary aggregation
auto agg = std::make_unique<aggregation>(aggs[i]);
sparse_results->add_result(col_ids[i], *agg, std::move(sparse_result_cols[i]));
}
}
/**
* @brief Computes and returns a device vector containing all populated keys in
* `map`.
*/
template <typename Map>
std::pair<rmm::device_vector<size_type>, size_type> extract_populated_keys(Map map,
size_type num_keys,
cudaStream_t stream = 0)
{
rmm::device_vector<size_type> populated_keys(num_keys);
auto get_key = [] __device__(auto const& element) {
size_type key, value;
thrust::tie(key, value) = element;
return key;
};
auto end_it = thrust::copy_if(
rmm::exec_policy(stream)->on(stream),
thrust::make_transform_iterator(map.data(), get_key),
thrust::make_transform_iterator(map.data() + map.capacity(), get_key),
populated_keys.begin(),
[unused_key = map.get_unused_key()] __device__(size_type key) { return key != unused_key; });
size_type map_size = end_it - populated_keys.begin();
return std::make_pair(std::move(populated_keys), map_size);
}
/**
* @brief Computes groupby using hash table.
*
* First, we create a hash table that stores the indices of unique rows in
* `keys`. The upper limit on the number of values in this map is the number
* of rows in `keys`.
*
* To store the results of aggregations, we create temporary sparse columns
* which have the same size as input value columns. Using the hash map, we
* determine the location within the sparse column to write the result of the
* aggregation into.
*
* The sparse column results of all aggregations are stored into the cache
* `sparse_results`. This enables the use of previously calculated results in
* other aggregations.
*
* All the aggregations which can be computed in a single pass are computed
* first, in a combined kernel. Then using these results, aggregations that
* require multiple passes, will be computed.
*
* Finally, using the hash map, we generate a vector of indices of populated
* values in sparse result columns. Then, for each aggregation originally
* requested in `requests`, we gather sparse results into a column of dense
* results using the aforementioned index vector. Dense results are stored into
* the in/out parameter `cache`.
*
*/
template <bool keys_have_nulls>
std::unique_ptr<table> groupby_null_templated(table_view const& keys,
std::vector<aggregation_request> const& requests,
cudf::detail::result_cache* cache,
null_policy include_null_keys,
cudaStream_t stream,
rmm::mr::device_memory_resource* mr)
{
auto d_keys = table_device_view::create(keys);
auto map = create_hash_map<keys_have_nulls>(*d_keys, include_null_keys, stream);
// Cache of sparse results where the location of aggregate value in each
// column is indexed by the hash map
cudf::detail::result_cache sparse_results(requests.size());
// Compute all single pass aggs first
compute_single_pass_aggs<keys_have_nulls>(
keys, requests, &sparse_results, *map, include_null_keys, stream);
// Now continue with remaining multi-pass aggs
// <placeholder>
// Extract the populated indices from the hash map and create a gather map.
// Gathering using this map from sparse results will give dense results.
rmm::device_vector<size_type> gather_map;
size_type map_size;
std::tie(gather_map, map_size) = extract_populated_keys(*map, keys.num_rows(), stream);
// Compact all results from sparse_results and insert into cache
sparse_to_dense_results(requests, sparse_results, cache, gather_map, map_size, stream, mr);
return cudf::detail::gather(
keys, gather_map.begin(), gather_map.begin() + map_size, false, mr, stream);
}
} // namespace
/**
* @brief Indicates if a set of aggregation requests can be satisfied with a
* hash-based groupby implementation.
*
* @param keys The table of keys
* @param requests The set of columns to aggregate and the aggregations to
* perform
* @return true A hash-based groupby should be used
* @return false A hash-based groupby should not be used
*/
bool can_use_hash_groupby(table_view const& keys, std::vector<aggregation_request> const& requests)
{
return std::all_of(requests.begin(), requests.end(), [](aggregation_request const& r) {
return std::all_of(r.aggregations.begin(), r.aggregations.end(), [](auto const& a) {
return is_hash_aggregation(a->kind);
});
});
}
// Hash-based groupby
std::pair<std::unique_ptr<table>, std::vector<aggregation_result>> groupby(
table_view const& keys,
std::vector<aggregation_request> const& requests,
null_policy include_null_keys,
cudaStream_t stream,
rmm::mr::device_memory_resource* mr)
{
cudf::detail::result_cache cache(requests.size());
std::unique_ptr<table> unique_keys;
if (has_nulls(keys)) {
unique_keys =
groupby_null_templated<true>(keys, requests, &cache, include_null_keys, stream, mr);
} else {
unique_keys =
groupby_null_templated<false>(keys, requests, &cache, include_null_keys, stream, mr);
}
return std::make_pair(std::move(unique_keys), extract_results(requests, cache));
}
} // namespace hash
} // namespace detail
} // namespace groupby
} // namespace cudf
|
9a0c52dfd5328101d8fce79d137c6547d55e15f5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
#include <time.h>
#include "support.h"
#include "kernel.hip"
int main(int argc, char**argv) {
Timer timer;
hipError_t cuda_ret;
time_t t;
float* Ad, *Bd, *Cd;
// Initialize host variables ----------------------------------------------
printf("\nSetting up the problem..."); fflush(stdout);
startTime(&timer);
unsigned int n;
if(argc == 1) {
n = 10000;
} else if(argc == 2) {
n = atoi(argv[1]);
} else {
printf("\n Invalid input parameters!"
"\n Usage: ./vecadd # Vector of size 10,000 is used"
"\n Usage: ./vecadd <m> # Vector of size m is used"
"\n");
exit(0);
}
/* Intializes random number generator */
srand((unsigned) time(&t));
float* A_h = (float*) malloc(n*sizeof(float));
for (unsigned int i=0; i < n; i++) { A_h[i] = (rand()%100)/100.00; }
float* B_h = (float*) malloc(n*sizeof(float));
for (unsigned int i=0; i < n; i++) { B_h[i] = (rand()%100)/100.00; }
float* C_h = (float*) malloc(n*sizeof(float));
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
printf(" Vector size = %u\n", n);
// Allocate device variables ----------------------------------------------
printf("Allocating device variables..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
if (hipSuccess!=hipMalloc((void**)&Ad, n*sizeof(float)))
{
printf("Error in memory allocation/n");
exit(-1);
}
if (hipSuccess!=hipMalloc((void**)&Bd, n*sizeof(float)))
{
printf("Error allocating memory\n");
exit(-1);
}
if (hipSuccess!=hipMalloc((void**)&Cd, n*sizeof(float)))
{
printf("Error allocating memory\n");
exit(-1);
}
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy host variables to device ------------------------------------------
printf("Copying data from host to device..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
int size=n*sizeof(float);
if (hipSuccess != hipMemcpy(Ad,A_h, size, hipMemcpyHostToDevice))
{
printf("Error copying memory to data\n");
exit(-1);
}
if (hipSuccess != hipMemcpy(Bd,B_h, size, hipMemcpyHostToDevice))
{
printf("Error copying memory to data\n");
exit(-1);
}
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Launch kernel ----------------------------------------------------------
printf("Launching kernel..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
dim3 dimBlock(16,16,1);
dim3 dimGrid(ceil(n/16),1,1);
hipLaunchKernelGGL(( vecAddKernel), dim3(dimGrid),dim3(dimBlock), 0, 0, Ad,Bd, Cd, n);
cuda_ret = hipDeviceSynchronize();
if(cuda_ret != hipSuccess) FATAL("Unable to launch kernel");
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy device variables from host ----------------------------------------
printf("Copying data from device to host..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
if (hipSuccess != hipMemcpy(C_h,Cd, size, hipMemcpyDeviceToHost))
{
printf("Error copying data to host");
exit(-1);
}
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Verify correctness -----------------------------------------------------
printf("Verifying results..."); fflush(stdout);
verify(A_h, B_h, C_h, n);
// Free memory ------------------------------------------------------------
free(A_h);
free(B_h);
free(C_h);
//INSERT CODE HERE
if (hipSuccess != hipFree(Ad))
{
printf("Error releasing cuda");
exit(-1);
}
if (hipSuccess != hipFree(Bd))
{
printf("Error releasing cuda");
exit(-1);
}
if (hipSuccess != hipFree(Cd))
{
printf("Error releasing cuda");
exit(-1);
}
return 0;
}
| 9a0c52dfd5328101d8fce79d137c6547d55e15f5.cu | /******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
#include <time.h>
#include "support.h"
#include "kernel.cu"
int main(int argc, char**argv) {
Timer timer;
cudaError_t cuda_ret;
time_t t;
float* Ad, *Bd, *Cd;
// Initialize host variables ----------------------------------------------
printf("\nSetting up the problem..."); fflush(stdout);
startTime(&timer);
unsigned int n;
if(argc == 1) {
n = 10000;
} else if(argc == 2) {
n = atoi(argv[1]);
} else {
printf("\n Invalid input parameters!"
"\n Usage: ./vecadd # Vector of size 10,000 is used"
"\n Usage: ./vecadd <m> # Vector of size m is used"
"\n");
exit(0);
}
/* Intializes random number generator */
srand((unsigned) time(&t));
float* A_h = (float*) malloc(n*sizeof(float));
for (unsigned int i=0; i < n; i++) { A_h[i] = (rand()%100)/100.00; }
float* B_h = (float*) malloc(n*sizeof(float));
for (unsigned int i=0; i < n; i++) { B_h[i] = (rand()%100)/100.00; }
float* C_h = (float*) malloc(n*sizeof(float));
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
printf(" Vector size = %u\n", n);
// Allocate device variables ----------------------------------------------
printf("Allocating device variables..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
if (cudaSuccess!=cudaMalloc((void**)&Ad, n*sizeof(float)))
{
printf("Error in memory allocation/n");
exit(-1);
}
if (cudaSuccess!=cudaMalloc((void**)&Bd, n*sizeof(float)))
{
printf("Error allocating memory\n");
exit(-1);
}
if (cudaSuccess!=cudaMalloc((void**)&Cd, n*sizeof(float)))
{
printf("Error allocating memory\n");
exit(-1);
}
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy host variables to device ------------------------------------------
printf("Copying data from host to device..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
int size=n*sizeof(float);
if (cudaSuccess != cudaMemcpy(Ad,A_h, size, cudaMemcpyHostToDevice))
{
printf("Error copying memory to data\n");
exit(-1);
}
if (cudaSuccess != cudaMemcpy(Bd,B_h, size, cudaMemcpyHostToDevice))
{
printf("Error copying memory to data\n");
exit(-1);
}
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Launch kernel ----------------------------------------------------------
printf("Launching kernel..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
dim3 dimBlock(16,16,1);
dim3 dimGrid(ceil(n/16),1,1);
vecAddKernel<<<dimGrid,dimBlock>>>(Ad,Bd, Cd, n);
cuda_ret = cudaDeviceSynchronize();
if(cuda_ret != cudaSuccess) FATAL("Unable to launch kernel");
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy device variables from host ----------------------------------------
printf("Copying data from device to host..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
if (cudaSuccess != cudaMemcpy(C_h,Cd, size, cudaMemcpyDeviceToHost))
{
printf("Error copying data to host");
exit(-1);
}
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Verify correctness -----------------------------------------------------
printf("Verifying results..."); fflush(stdout);
verify(A_h, B_h, C_h, n);
// Free memory ------------------------------------------------------------
free(A_h);
free(B_h);
free(C_h);
//INSERT CODE HERE
if (cudaSuccess != cudaFree(Ad))
{
printf("Error releasing cuda");
exit(-1);
}
if (cudaSuccess != cudaFree(Bd))
{
printf("Error releasing cuda");
exit(-1);
}
if (cudaSuccess != cudaFree(Cd))
{
printf("Error releasing cuda");
exit(-1);
}
return 0;
}
|
34f088562c7c42cdf6d930e47bf4401a9ecc56e6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "la8cuda.cuh"
#include "la8coeffs.h"
#define BLOCK_SIZE2 263 // (8 down, 6 up)/2
#define BLOCK_SECT2 256
#define BLOCK_SIZE_ML2 278 //(24 down, 20 up)/2
#define BLOCK_SIZE_ML2B 274 //(20 down, 16 up)/2
// block sect remains the same.
#define BLOCK_SECT_ML2 256
/***********************************************************
Lifted Least Asymmetric 8 code - in CUDA! With shared memory
***********************************************************/
int LA8CUDA_sh(real* x_d, uint len, short int sense, uint nlevels){
// sense '1' is forwards, '0' is backwards, anything else is sideways
uint filterlength=8;
nlevels = check_len_levels(len,nlevels,filterlength);
if(nlevels == 0) return(1); //NB nlevels=0 when calling this function means that check_len_levels will calculate the maximum number of levels - in which case it will return this number
// however, in the case of an error, it will return 0 - because any strictly positive integer would be valid. & nlevels is unsigned.
switch(sense){
case 1:
return(fLA8CUDAsh(x_d,len,1,nlevels));
case 0:
return(bLA8CUDAsh(x_d,len,1<<(nlevels-1)));
default:
printf("\nSense must be 1 for forward or 0 for backwards. We don't do sideways.\n");
return(1);
}
}
int fLA8CUDAsh(real* x_d, uint len, uint skip, uint nlevels){
if(skip < (1 << nlevels)){
hipError_t cuderr;
int threadsPerBlock = BLOCK_SIZE2;
int blocksPerGrid =(len/(skip<<1) + BLOCK_SECT2 - 1) / BLOCK_SECT2;
int res;
uint k = 8; // max(above,below) # border coeffs obtained
real* bdrs; // vector of boundary points - ensures independence of loops
uint lenb = max((len*k)/(skip*BLOCK_SECT2),2*k); // length of bdrs vector
int tPB_bd = BS_BD;
int bPG_bd = max(((lenb/(2*k)) + BS_BD - 1) / BS_BD,1);
hipMalloc((void **)&bdrs,lenb*sizeof(real));
hipLaunchKernelGGL(( get_bdrs_sh_k), dim3(bPG_bd), dim3(tPB_bd), 0, 0, x_d,len,skip,bdrs,lenb,k,BLOCK_SECT2); //we copy the boundary points into a vector
cuderr = hipGetLastError();
if (cuderr != hipSuccess)
{
fprintf(stderr, "CUDA error in transform in get boundaries sh (error code %s)!\n", hipGetErrorString(cuderr));
exit(EXIT_FAILURE);
}
hipDeviceSynchronize();
// printveccu<<<1,1>>>(bdrs,lenb);
// hipDeviceSynchronize();
// printf("\n### threadsperblock = %i, blockspergrid = %i ####\n",threadsPerBlock,blocksPerGrid);
hipLaunchKernelGGL(( LA8_kernel_shared_f), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, x_d,len,skip,bdrs,lenb);
cuderr = hipGetLastError();
if (cuderr != hipSuccess)
{
fprintf(stderr, "CUDA error in transform in LA8 sh (error code %s)!\n", hipGetErrorString(cuderr));
exit(EXIT_FAILURE);
}
hipDeviceSynchronize();
// //print stuff...
// printf("CUDA: len=%u,skip=%u\n",len,skip);
// printveccu<<<1,1>>>(x_d,len);
// hipDeviceSynchronize();
hipFree(bdrs);
res=fLA8CUDAsh(x_d,len,skip<<1,nlevels);
hipDeviceSynchronize();
return(res);
}
return(0);
}
int bLA8CUDAsh(real* x_d, uint len, uint skip){
if(skip > 0){
hipError_t cuderr;
int threadsPerBlock = BLOCK_SIZE2;
int blocksPerGrid =(len/(skip<<1) + BLOCK_SECT2 - 1) / BLOCK_SECT2;
int res;
uint k = 8; // max(above,below) # border coeffs obtained
real* bdrs; // vector of boundary points - ensures independence of loops
uint lenb = max((len*k)/(skip*BLOCK_SECT2),2*k); // length of bdrs vector
int tPB_bd = BS_BD;
int bPG_bd = max(((lenb/(2*k)) + BS_BD - 1) / BS_BD,1);
hipMalloc((void **)&bdrs,lenb*sizeof(real));
hipLaunchKernelGGL(( get_bdrs_sh_k), dim3(bPG_bd), dim3(tPB_bd), 0, 0, x_d,len,skip,bdrs,lenb,k,BLOCK_SECT2); //we copy the boundary points into a vector
cuderr = hipGetLastError();
if (cuderr != hipSuccess)
{
fprintf(stderr, "CUDA error in transform in get boundaries sh (error code %s)!\n", hipGetErrorString(cuderr));
exit(EXIT_FAILURE);
}
hipDeviceSynchronize();
// printveccu<<<1,1>>>(bdrs,lenb);
// hipDeviceSynchronize();
// printf("\n### threadsperblock = %i, blockspergrid = %i ####\n",threadsPerBlock,blocksPerGrid);
hipLaunchKernelGGL(( LA8_kernel_shared_b), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, x_d,len,skip,bdrs,lenb);
cuderr = hipGetLastError();
if (cuderr != hipSuccess)
{
fprintf(stderr, "CUDA error in transform in LA8 sh (error code %s)!\n", hipGetErrorString(cuderr));
exit(EXIT_FAILURE);
}
hipDeviceSynchronize();
// //print stuff...
// printf("CUDA: len=%u,skip=%u\n",len,skip);
// printveccu<<<1,1>>>(x_d,len);
// hipDeviceSynchronize();
hipFree(bdrs);
res=bLA8CUDAsh(x_d,len,skip>>1);
hipDeviceSynchronize();
return(res);
}
return(0);
}
/*
Shared memory has following structure:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
| | | |
|_ _ _ _ _ _|_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _|_ _ |
|<----8---->|<---------(2*BLOCK_SECT2 )--------------->|--6-|
|<----------------( 2 * BLOCK_SIZE2 )------------------->|
Where of the 8 spaces below, we need 7; of the 6 spaces above, we need 5.
But our memory structure dictates that we use even values of 'i'
*/
__global__ void LA8_kernel_shared_f(real* x, const uint len, const uint skip, real* bdrs, const uint lenb){
int i = (BLOCK_SECT2 * blockIdx.x + threadIdx.x -4)*skip<<1;
// i = -8*skip, -6*skip, -4*skip, -2*skip 0, 2*skip, ... , len-2*skip, len, len + 2*skip, len + 4*skip, len + 6*skip
// for each block, we have, e.g. i = -8*skip, -6*skip, -4*skip, -2*skip, 0, 2*skip, ..., 2*skip*BLOCK_SECT2-2*skip, 2*skip*BLOCK_SECT2, 2*skip*BLOCK_SECT2+2*skip, 2*skip*BLOCK_SECT2+4*skip, 2*skip*BLOCK_SECT2+4*skip
int ish = (threadIdx.x)<<1;
// ish = 0, 2,... , 2*BLOCK_SIZE2-2, 2*BLOCK_SIZE2
__shared__ real x_work[BLOCK_SIZE2<<1];
// real switchsd;
// printf("\nthreadIdx.x=%i,blockIdx.x=%i,i=%i,ish=%i\n",threadIdx.x,blockIdx.x,i,ish);
// // First, we copy x into shared array x_work
// // NB - conditioning on threadIdx.x, not ish!
// if(threadIdx.x < 6){ //ish == 0, 2, 4, 6, 8, 10
// if(i<(int)len){
// // we are filling the shared block with lower boundary points
// // printf("here2 thread%i\n",threadIdx.x);
// // x_work[ish] = x[i];
// // x_work[ish+1] = x[i + skip];
// x_work[ish] = bdrs[ish + blockIdx.x*12];
// x_work[ish+1] = bdrs[1 + ish + blockIdx.x*12];
// }
// }
// if((threadIdx.x >= 6) && (threadIdx.x < BLOCK_SECT2+6)){
// // needs to be conditional on i and BLOCK_SECT2
// if(i < (int)len){
// // we fill the central block of shared memory (no boundary coeffs)
// // printf("here3a thread%i\n",threadIdx.x);
// x_work[ish] = x[i];
// x_work[ish+1] = x[i + skip];
// }
// else if(i==(int)len){
// // this happens when len < BLOCK_SECT2
// // we have to deal with upper boundary points
// // printf("here3b thread%i\n",threadIdx.x);
// // x_work[ish] = x[0];
// // x_work[ish+1] = x[skip];
// x_work[ish] = bdrs[6+(blockIdx.x*12)];
// x_work[ish+1] = bdrs[7+(blockIdx.x*12)];
// }
// }
// else if(threadIdx.x == BLOCK_SECT2+4){
// if(i<=(int)len){
// x_work[ish] = bdrs[6+(blockIdx.x*12)];
// x_work[ish+1] = bdrs[7+(blockIdx.x*12)];
// }
// }
// x_work[ish] = get_wvt_shared(x,bdrs,len,skip,i,ish,threadIdx.x, blockIdx.x,1);
// x_work[ish+1] = get_wvt_shared(x,bdrs,len,skip,i,ish,threadIdx.x, blockIdx.x,0);
// x_work[ish] = get_wvt_shared_gen(x,bdrs,len,skip,i,ish,8,6,BLOCK_SECT2,1);
// x_work[ish+1] = get_wvt_shared_gen(x,bdrs,len,skip,i,ish,8,6,BLOCK_SECT2,0);
write_wvt_shared_gen(x,bdrs,len,skip,i,ish,8,6,BLOCK_SECT2,x_work);
__syncthreads();
// in the lifting cycles below, we keep some threads idle so that we only update
// the intermediate coefficients that we need in the subsequent steps
//1st cycle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if((ish>=2)&&(ish<=(BLOCK_SECT2<<1+12))){ //keep 2 threads idle
//printf("herecyc1 thread%i\n",threadIdx.x);
lift_cyc_1(x_work,ish,1,FWD);
// x_work[ish] = x_work[ish] + x_work[ish-1]*CL0 + x_work[ish+1]*CL1;
// d1[l] = x[2l+1] + q11*x[2l] + q12*x[2l+2]
}
__syncthreads();
//2nd cycle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if((ish>=2)&&(ish<=(BLOCK_SECT2<<1+12))){ //keep 2 threads idle
// printf("herecyc2 thread%i\n",threadIdx.x);
lift_cyc_2(x_work,ish,1,FWD);
// x_work[ish-1] = x_work[ish-1] + x_work[ish]*CL2 + x_work[ish+2]*CL3;
// s1[l] = x[2l] + q21*d1[l] + q22*d1[l+1]
}
__syncthreads();
//3rd cycle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if((ish>=2)&&(ish<(BLOCK_SECT2<<1+8))){ //keep 3 threads idle
// printf("herecyc3 thread%i\n",threadIdx.x);
lift_cyc_3(x_work,ish,1,FWD);
// x_work[ish] = x_work[ish] + x_work[ish-1]*CL4 + x_work[ish+1]*CL5;
// d2[l] = d1[l] + q31*s1[l] + q32*s1[l+1]
}
__syncthreads();
//4th cycle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if((ish>=4)&&(ish<(BLOCK_SECT2<<1+8))){ //keep 4 threads idle
lift_cyc_4(x_work,ish,1,FWD);
// x_work[ish-1] = x_work[ish-1] + x_work[ish-2]*CL6 + x_work[ish]*CL7;
// s2[l] = s1[l] + q41*d2[l-1] + q42*d2[l]
}
__syncthreads();
//5th cycle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if((ish>=8)&&(ish<(BLOCK_SECT2<<1+8))){ // keep 6 threads idle
lift_cyc_5(x_work,ish,1,FWD);
// x_work[ish] = x_work[ish] + x_work[ish-5]*CL8 + x_work[ish-3]*CL9 + x_work[ish-1]*CL10;
// d3[l] = d2[l] + s1*K^2*s2[l-2] + s2*K^2*s2[l-1] + s3*K^2*s2[l]
}
__syncthreads();
// //6th cycle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - done below
// if((ish>=8)&&(ish<(BLOCK_SECT2<<1+8))){
// switchsd = x_work[ish]*CL12;
// //s3[l] = (K)*s2[l]
// x_work[ish] = x_work[ish+1]*CL11;
// // d4[l] = (1/K)*d3[l]
// x_work[ish+1] = switchsd;
// }
// __syncthreads();
// We do last lifting cycle at the same time as writing back to global memory.
// Involves a switch in coefficients because of the derived lifting algo.
if((ish>=8) && (ish<(BLOCK_SECT2<<1)+8) && (i<(int)len)){
// printf("hereputback thread%i\n",threadIdx.x);
x[i] = x_work[ish+1]*CL11;
x[i + skip] = x_work[ish]*CL12;
}
__syncthreads();
}
// same memory diagram as for the forward transform
__global__ void LA8_kernel_shared_b(real* x, const uint len, const uint skip, real* bdrs, const uint lenb){
int i = (BLOCK_SECT2 * blockIdx.x + threadIdx.x -4)*skip<<1;
// i = -8*skip, -6*skip, -4*skip, -2*skip 0, 2*skip, ... , len-2*skip, len, len + 2*skip, len + 4*skip, len + 6*skip
// for each block, we have, e.g. i = -8*skip, -6*skip, -4*skip, -2*skip, 0, 2*skip, ..., 2*skip*BLOCK_SECT2-2*skip, 2*skip*BLOCK_SECT2, 2*skip*BLOCK_SECT2+2*skip, 2*skip*BLOCK_SECT2+4*skip, 2*skip*BLOCK_SECT2+4*skip
int ish = (threadIdx.x)<<1;
// ish = 0, 2,... , 2*BLOCK_SIZE2-2, 2*BLOCK_SIZE2
__shared__ real x_work[BLOCK_SIZE2<<1];
// printf("\nthreadIdx.x=%i,blockIdx.x=%i,i=%i,ish=%i\n",threadIdx.x,blockIdx.x,i,ish);
x_work[ish] = get_wvt_shared_gen(x,bdrs,len,skip,i,ish,8,6,BLOCK_SECT2,0) * CL11;
x_work[ish+1] = get_wvt_shared_gen(x,bdrs,len,skip,i,ish,8,6,BLOCK_SECT2,1) * CL12;
// above, we do the last lifting cycle at the same time as reading the coefficients
// we have switched the odd/even coefficients above because our derived lifting algo
// requires that
__syncthreads();
// in the lifting cycles below, we keep some threads idle so that we only update
// the intermediate coefficients that we need in the subsequent steps
//5th cycle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if((ish>=6)&&(ish<(BLOCK_SECT2<<1+14))){ // keep 3 threads idle
lift_cyc_5(x_work,ish,1,BWD);
// x_work[ish] = x_work[ish] - x_work[ish-5]*CL8 - x_work[ish-3]*CL9 - x_work[ish-1]*CL10;
// d3[l] = d2[l] - s1*K^2*s2[l-2] - s2*K^2*s2[l-1] - s3*K^2*s2[l]
}
__syncthreads();
//4th cycle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if((ish>=6)&&(ish<(BLOCK_SECT2<<1+14))){ //keep 4 threads idle
lift_cyc_4(x_work,ish,1,BWD);
// x_work[ish-1] = x_work[ish-1] - x_work[ish-2]*CL6 - x_work[ish]*CL7;
// s2[l] = s1[l] - q41*d2[l-1] - q42*d2[l]
}
__syncthreads();
//3rd cycle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if((ish>=8)&&(ish<(BLOCK_SECT2<<1+12))){ //keep 5 threads idle
// printf("herecyc3 thread%i\n",threadIdx.x);
lift_cyc_3(x_work,ish,1,BWD);
// x_work[ish] = x_work[ish] - x_work[ish-1]*CL4 - x_work[ish+1]*CL5;
// d2[l] = d1[l] - q31*s1[l] - q32*s1[l+1]
}
__syncthreads();
//2nd cycle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if((ish>=6)&&(ish< (BLOCK_SECT2<<1+12))){ //keep 6 threads idle
// printf("herecyc2 thread%i\n",threadIdx.x);
lift_cyc_2(x_work,ish,1,BWD);
// x_work[ish-1] = x_work[ish-1] - x_work[ish]*CL2 - x_work[ish+2]*CL3;
// s1[l] = x[2l] - q21*d1[l] - q22*d1[l+1]
}
__syncthreads();
//1st cycle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if((ish>=8)&&(ish<(BLOCK_SECT2<<1+10))){ //keep 7 threads idle
//printf("herecyc1 thread%i\n",threadIdx.x);
lift_cyc_1(x_work,ish,1,BWD);
// x_work[ish] = x_work[ish] - x_work[ish-1]*CL0 - x_work[ish+1]*CL1;
// d1[l] = x[2l+1] - q11*x[2l] - q12*x[2l+2]
}
__syncthreads();
// We write back to global memory.
if((ish>=8) && (ish<(BLOCK_SECT2<<1)+8) && (i<(int)len)){
// printf("hereputback thread%i\n",threadIdx.x);
x[i] = x_work[ish];
x[i + skip] = x_work[ish+1];
}
__syncthreads();
}
// ########################################################################
// Now we have a kernel that performs 2 levels of transform
// ########################################################################
/*
Shared memory has following, new, structure:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
| | | |
|_ _ _ _ |_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ |_ _ _ _ _ _ |
|<--24-->|<---------(2*BLOCK_SECT_ML2)--------------->|<---20----->|
|<---------------( 2 * BLOCK_SIZE_ML2 )------------------------->|
indices...
1, 2, 3, ..., k, k1, ..., k17
used in 1st level of transform (where k=2*BLOCK_SECT_ML2+22) k1 is k+1 etc
then indices...
8, 9, 10, ..., k, k1, ..., k11
used in 2nd level of transform
First & last 4/6 coefficients contain shared memory boundary coefficients* for the transform levels.
*shared memory boundary coefficients: for the first & last shared memory blocks, they hold periodic boundary coefficient points; for all other blocks, they hold the boundary coefficients of the previous/following memory block.
The threads point to (via the variable ish):
(ish is actual index in shared memory)
(skipwork = 1)
|0 1 ... 23 |24 25 ... l |l1 l2 ... l20 |
where l is BLOCK_SECT_ML2+1
(skipwork = 2)
|0 ... 11 |12 ... m |m1 ... m10 |
where m is floor[(BLOCK_SECT_ML2+1)/2]
*/
// above is probably not quite right! :D
int LA8CUDA_sh_ml2(real* x_d, uint len, short int sense, uint nlevels){
// sense '1' is forwards, '0' is backwards, anything else is sideways
uint filterlength=8;
nlevels = check_len_levels(len,nlevels,filterlength);
if(nlevels == 0) return(1); //NB nlevels=0 when calling this function means that check_len_levels will calculate the maximum number of levels - in which case it will return this number
// however, it the case of an error, it will return 0 - because any strictly positive integer would be valid. & nlevels is unsigned.
switch(sense){
case 1:
return(fLA8CUDAsh_ml2(x_d,len,1,nlevels));
case 0:
return(bLA8CUDAsh_ml2(x_d,len,1<<(nlevels-1)));
default:
printf("\nSense must be 1 for forward or 0 for backwards. We don't do sideways.\n");
return(1);
}
}
int fLA8CUDAsh_ml2(real* x_d, uint len, uint skip, uint nlevels){
if(skip < (1 << nlevels)){
hipError_t cuderr;
int threadsPerBlock;
int blocksPerGrid;
uint levels=1; //leave at 1. This is initialisation for level variable!
uint k1 = 8; //# border coeffs needed for single level kernel
uint k2 = 24; //# coeffs needed for 2 level kernel
// printf("\n### threadsperblock = %i, blockspergrid = %i ####\n",threadsPerBlock,blocksPerGrid);
while((levels+1<=2)&&((skip<<(levels+1))<=(1 << nlevels))&&(len/skip>k2)){ // levels+1<=k gives L, #levels to loop over
levels+=1;
}
if (levels==1){
// deal with bdrs
uint k = k1; // max(above,below) # border coeffs obtained
real* bdrs; // vector of boundary points - ensures independence of loops
uint lenb = max((len*k)/(skip*BLOCK_SECT2),2*k); // length of bdrs vector
int tPB_bd = BS_BD;
int bPG_bd = max(((lenb/(2*k)) + BS_BD - 1) / BS_BD,1);
hipMalloc((void **)&bdrs,lenb*sizeof(real));
hipLaunchKernelGGL(( get_bdrs_sh_k), dim3(bPG_bd), dim3(tPB_bd), 0, 0, x_d,len,skip,bdrs,lenb,k,BLOCK_SECT2); //we copy the boundary points into a vector
cuderr = hipGetLastError();
if (cuderr != hipSuccess)
{
fprintf(stderr, "CUDA error in transform in get boundaries sh (error code %s)!\n", hipGetErrorString(cuderr));
exit(EXIT_FAILURE);
}
hipDeviceSynchronize();
threadsPerBlock = BLOCK_SIZE2;
blocksPerGrid =(len/(skip<<1) + BLOCK_SECT2 - 1) / BLOCK_SECT2;
hipLaunchKernelGGL(( LA8_kernel_shared_f), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, x_d,len,skip,bdrs,lenb);
hipFree(bdrs);
}
else{
// deal with bdrs
real* bdrs; // vector of boundary points - ensures independence of loops
uint k = k2;
uint lenb = max((len*k)/(skip*BLOCK_SECT_ML2),2*k); // length of bdrs vector
int tPB_bd = BS_BD;
int bPG_bd = max(((lenb/(2*k)) + BS_BD - 1) / BS_BD,1);
hipMalloc((void **)&bdrs,lenb*sizeof(real));
hipLaunchKernelGGL(( get_bdrs_sh_k), dim3(bPG_bd), dim3(tPB_bd), 0, 0, x_d,len,skip,bdrs,lenb,k,BLOCK_SECT_ML2); //we copy the boundary points into a vector
cuderr = hipGetLastError();
if (cuderr != hipSuccess)
{
fprintf(stderr, "CUDA error in transform in get boundaries sh (error code %s)!\n", hipGetErrorString(cuderr));
exit(EXIT_FAILURE);
}
hipDeviceSynchronize();
threadsPerBlock = BLOCK_SIZE_ML2;
blocksPerGrid =(len/(skip<<1) + BLOCK_SECT_ML2 - 1) / BLOCK_SECT_ML2;
// printf("\nlevels=2");
// printveccu<<<1,1>>>(bdrs,lenb);
// hipDeviceSynchronize();
// printf("\n### threadsperblock = %i, blockspergrid = %i ####\n",threadsPerBlock,blocksPerGrid);
hipLaunchKernelGGL(( LA8_kernel_shared_f_ml2), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, x_d,len,skip,bdrs,lenb);
hipFree(bdrs);
}
cuderr = hipGetLastError();
if (cuderr != hipSuccess)
{
fprintf(stderr, "CUDA error in transform in fLA8 MLsh (error code %s)!\n", hipGetErrorString(cuderr));
exit(EXIT_FAILURE);
}
hipDeviceSynchronize();
// //print stuff...
// printf("CUDA: len=%u,skip=%u\n",len,skip);
// printveccu<<<1,1>>>(x_d,len);
hipDeviceSynchronize();
return(fLA8CUDAsh_ml2(x_d,len,skip<<levels,nlevels));
}
return(0);
}
int bLA8CUDAsh_ml2(real* x_d, uint len, uint skip){
if(skip > 0){
hipError_t cuderr;
int threadsPerBlock;
int blocksPerGrid;
real* bdrs;
uint levels=1; //leave at 1. This is initialisation for level variable!
uint k1 = 8; //# border coeffs needed for single level kernel
uint k2 = 20; //# coeffs needed for 2 level kernel
// printf("\n### threadsperblock = %i, blockspergrid = %i ####\n",threadsPerBlock,blocksPerGrid);
while((levels+1<=2)&&((skip>>levels)>0)&&(len/skip>k2)){ // levels+1<=k gives L, #levels to loop over
levels+=1;
}
if (levels==1){
// deal with bdrs
uint k = k1; // max(above,below) # border coeffs obtained
real* bdrs; // vector of boundary points - ensures independence of loops
uint lenb = max((len*k)/(skip*BLOCK_SECT2),2*k); // length of bdrs vector
int tPB_bd = BS_BD;
int bPG_bd = max(((lenb/(2*k)) + BS_BD - 1) / BS_BD,1);
hipMalloc((void **)&bdrs,lenb*sizeof(real));
hipLaunchKernelGGL(( get_bdrs_sh_k), dim3(bPG_bd), dim3(tPB_bd), 0, 0, x_d,len,skip,bdrs,lenb,k,BLOCK_SECT2); //we copy the boundary points into a vector
cuderr = hipGetLastError();
if (cuderr != hipSuccess)
{
fprintf(stderr, "CUDA error in transform in get boundaries sh (error code %s)!\n", hipGetErrorString(cuderr));
exit(EXIT_FAILURE);
}
hipDeviceSynchronize();
threadsPerBlock = BLOCK_SIZE2;
blocksPerGrid =(len/(skip<<1) + BLOCK_SECT2 - 1) / BLOCK_SECT2;
hipLaunchKernelGGL(( LA8_kernel_shared_b), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, x_d,len,skip,bdrs,lenb);
hipFree(bdrs);
}
else{
// deal with bdrs
real* bdrs; // vector of boundary points - ensures independence of loops
uint k = k2;
uint lenb = max((len*k*2)/(skip*BLOCK_SECT_ML2),2*k); // length of bdrs vector
int tPB_bd = BS_BD;
int bPG_bd = max(((lenb/(2*k)) + BS_BD - 1) / BS_BD,1);
hipMalloc((void **)&bdrs,lenb*sizeof(real));
hipLaunchKernelGGL(( get_bdrs_sh_k), dim3(bPG_bd), dim3(tPB_bd), 0, 0, x_d,len,skip/2,bdrs,lenb,k,BLOCK_SECT_ML2); //we copy the boundary points into a vector
cuderr = hipGetLastError();
if (cuderr != hipSuccess)
{
fprintf(stderr, "CUDA error in transform in get boundaries sh (error code %s)!\n", hipGetErrorString(cuderr));
exit(EXIT_FAILURE);
}
hipDeviceSynchronize();
threadsPerBlock = BLOCK_SIZE_ML2B;
blocksPerGrid =(len/skip + BLOCK_SECT_ML2 - 1) / BLOCK_SECT_ML2;
// printf("\nlevels=2");
// printveccu<<<1,1>>>(bdrs,lenb);
// hipDeviceSynchronize();
// printf("\n### threadsperblock = %i, blockspergrid = %i ####\n",threadsPerBlock,blocksPerGrid);
hipLaunchKernelGGL(( LA8_kernel_shared_b_ml2), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, x_d,len,skip,bdrs,lenb);
hipFree(bdrs);
}
cuderr = hipGetLastError();
if (cuderr != hipSuccess)
{
fprintf(stderr, "CUDA error in transform in bLA8 MLsh (error code %s)!\n", hipGetErrorString(cuderr));
exit(EXIT_FAILURE);
}
hipDeviceSynchronize();
// //print stuff...
// printf("CUDA: len=%u,skip=%u\n",len,skip);
// printveccu<<<1,1>>>(x_d,len);
hipDeviceSynchronize();
return(bLA8CUDAsh_ml2(x_d,len,skip>>levels));
}
return(0);
}
__global__ void LA8_kernel_shared_f_ml2(real* x, const uint len, const uint skip, real* bdrs, const uint lenb){
int i = (BLOCK_SECT_ML2 * blockIdx.x + threadIdx.x -12)*skip << 1;
// i = -24*skip,..., -2*skip, 0, 2*skip, ... , len-2*skip, len, len +2, ... , len + 16*skip, len + 18*skip
// for each block, we have, e.g. i = -24*skip, ..., -2*skip, 0, 2*skip, ..., 2*skip*BLOCK_SECT_ML2-2*skip, 2*BLOCK_SECT_ML2*skip, 2*skip*BLOCK_SECT_ML2+2*skip, ..., 2*skip*BLOCK_SECT_ML2+20*skip
uint ish = (threadIdx.x)<<1;
uint ishlast = (BLOCK_SIZE_ML2-1)<<1;
uint li;
// ish = 0, 2,... , 2*BLOCK_SECT_ML2-2, 2*BLOCK_SECT_ML2, 2*BLOCK_SECT_ML2+2, 2*BLOCK_SECT_ML2+4, 2*BLOCK_SECT_ML2+6, +8, ... , +38
__shared__ real x_work[BLOCK_SIZE_ML2<<1];
uint skipwork = 1;
write_wvt_shared_gen(x,bdrs,len,skip,i,ish,24,20,BLOCK_SECT2,x_work);
__syncthreads();
for(li = 0; li < 2; li++){
//1st cycle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if( ((li==0)&&(ish<=ishlast-2)&&(ish>=4)) ||
((li==1)&&(ish<=ishlast-8)&&(ish>=12)) ){
lift_cyc_1(x_work,ish,skipwork,FWD);
}
__syncthreads();
//2nd cycle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if( ((li==0)&&(ish<=ishlast-2)&&(ish>=4)) ||
((li==1)&&(ish<=ishlast-8)&&(ish>=12)) ){
lift_cyc_2(x_work,ish,skipwork,FWD);
}
__syncthreads();
//3rd cycle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if( ((li==0)&&(ish<=ishlast-6)&&(ish>=4)) ||
((li==1)&&(ish<=ishlast-14)&&(ish>=12)) ){
lift_cyc_3(x_work,ish,skipwork,FWD);
}
__syncthreads();
//4th cycle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if( ((li==0)&&(ish<=ishlast-6)&&(ish>=6)) ||
((li==1)&&(ish<=ishlast-16)&&(ish>=16)) ){
lift_cyc_4(x_work,ish,skipwork,FWD);
}
__syncthreads();
//5th cycle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if( ((li==0)&&(ish<=ishlast-8)&&(ish>=10)) ||
((li==1)&&(ish<=ishlast-20)&&(ish>=24)) ){
lift_cyc_5(x_work,ish,skipwork,FWD);
}
__syncthreads();
//6th cycle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if( ((li==0)&&(ish<=ishlast-8)&&(ish>=4)) ||
((li==1)&&(ish<=ishlast-20)&&(ish>=22)) ){
lift_cyc_6(x_work,ish,skipwork,FWD);
}
__syncthreads();
if(li==0) ish=ish<<1; skipwork=skipwork<<1;
}
// Now transform level is done. We copy shared array x_work back into x
ish = (threadIdx.x)<<1;
if( (ish>=24) && (ish<2*BLOCK_SECT_ML2+24) && (i<len)){
// printf("hereputback thread%i\n",threadIdx.x);
x[i] = x_work[ish];
x[i + skip] = x_work[ish+1];
}
}
__global__ void LA8_kernel_shared_b_ml2(real* x, const uint len, const uint skip, real* bdrs, const uint lenb){
int i = (BLOCK_SECT_ML2 * blockIdx.x + threadIdx.x -10)*skip;
// i = -24*skip,..., -2*skip, 0, 2*skip, ... , len-2*skip, len, len +2, ... , len + 16*skip, len + 18*skip
// for each block, we have, e.g. i = -24*skip, ..., -2*skip, 0, 2*skip, ..., 2*skip*BLOCK_SECT_ML2-2*skip, 2*BLOCK_SECT_ML2*skip, 2*skip*BLOCK_SECT_ML2+2*skip, ..., 2*skip*BLOCK_SECT_ML2+20*skip
uint skipbl = skip/2; // skip value for second layer. Used in filling shared mem & filling global memory vector.
uint ish = (threadIdx.x)<<1;
uint ishlast = min(len/skipbl+20+16,(BLOCK_SIZE_ML2-1)<<1); //size of shared vec x_work
uint li;
// ish = 0, 2,... , 2*BLOCK_SECT_ML2-2, 2*BLOCK_SECT_ML2, 2*BLOCK_SECT_ML2+2, 2*BLOCK_SECT_ML2+4, 2*BLOCK_SECT_ML2+6, +8, ... , +38
__shared__ real x_work[BLOCK_SIZE_ML2<<1];
uint skipwork = 2;
write_wvt_shared_gen(x,bdrs,len,skipbl,i,ish,20,16,BLOCK_SECT2,x_work);
__syncthreads();
ish=ish<<1;
for(li = 0; li < 2; li++){
//6th cycle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if( ((li==0)&&(ish<=ishlast-4)&&(ish>=0)) ||
((li==1)&&(ish<=ishlast-10)&&(ish>=14)) ){
lift_cyc_6(x_work,ish,skipwork,BWD);
}
__syncthreads();
//5th cycle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if( ((li==0)&&(ish<=ishlast-4)&&(ish>=12)) ||
((li==1)&&(ish<=ishlast-10)&&(ish>=14)) ){
lift_cyc_5(x_work,ish,skipwork,BWD);
}
__syncthreads();
//4th cycle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if( ((li==0)&&(ish<=ishlast-4)&&(ish>=12)) ||
((li==1)&&(ish<=ishlast-12)&&(ish>=18)) ){
lift_cyc_4(x_work,ish,skipwork,BWD);
}
__syncthreads();
//3rd cycle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if( ((li==0)&&(ish<=ishlast-6)&&(ish>=16)) ||
((li==1)&&(ish<=ishlast-12)&&(ish>=18)) ){
lift_cyc_3(x_work,ish,skipwork,BWD);
}
__syncthreads();
//2nd cycle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if( ((li==0)&&(ish<=ishlast-8)&&(ish>=16)) ||
((li==1)&&(ish<=ishlast-12)&&(ish>=20)) ){
lift_cyc_2(x_work,ish,skipwork,BWD);
}
__syncthreads();
//1st cycle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if( ((li==0)&&(ish<=ishlast-10)&&(ish>=16)) ||
((li==1)&&(ish<=ishlast-16)&&(ish>=20)) ){
lift_cyc_1(x_work,ish,skipwork,BWD);
}
__syncthreads();
if(li==0) ish=ish>>1; skipwork=skipwork>>1;
}
// Now transform level is done. We copy shared array x_work back into x
ish = (threadIdx.x)<<1;
if( (ish>=20) && (ish<2*BLOCK_SECT_ML2+20) && (i<len)){
// printf("hereputback thread%i\n",threadIdx.x);
x[i] = x_work[ish];
x[i + skipbl] = x_work[ish+1];
}
}
static __device__ void lift_cyc_1(real* xsh, const int ish, const uint sk, const short int sense){
if(sense == FWD) xsh[ish] = xsh[ish] + xsh[ish-sk]*CL0 + xsh[ish+sk]*CL1;
else xsh[ish] = xsh[ish] - xsh[ish-sk]*CL0 - xsh[ish+sk]*CL1;
// d1[l] = x[2l+1] + q11*x[2l] + q12*x[2l+2]
}
static __device__ void lift_cyc_2(real* xsh, const int ish, const uint sk, const short int sense){
if(sense == FWD) xsh[ish-sk] = xsh[ish-sk] + xsh[ish]*CL2 + xsh[ish+2*sk]*CL3;
else xsh[ish-sk] = xsh[ish-sk] - xsh[ish]*CL2 - xsh[ish+2*sk]*CL3;
// s1[l] = x[2l] + q21*d1[l] + q22*d1[l+1]
}
static __device__ void lift_cyc_3(real* xsh, const int ish, const uint sk, const short int sense){
if(sense == FWD) xsh[ish] = xsh[ish] + xsh[ish-sk]*CL4 + xsh[ish+sk]*CL5;
else xsh[ish] = xsh[ish] - xsh[ish-sk]*CL4 - xsh[ish+sk]*CL5;
// d2[l] = d1[l] + q31*s1[l] + q32*s1[l+1]
}
static __device__ void lift_cyc_4(real* xsh, const int ish, const uint sk, const short int sense){
if(sense == FWD) xsh[ish-sk] = xsh[ish-sk] + xsh[ish-2*sk]*CL6 + xsh[ish]*CL7;
else xsh[ish-sk] = xsh[ish-sk] - xsh[ish-2*sk]*CL6 - xsh[ish]*CL7;
// s2[l] = s1[l] + q41*d2[l-1] + q42*d2[l]
}
static __device__ void lift_cyc_5(real* xsh, const int ish, const uint sk, const short int sense){
if(sense == FWD) xsh[ish] = xsh[ish] + xsh[ish-5*sk]*CL8 + xsh[ish-3*sk]*CL9 + xsh[ish-sk]*CL10;
else xsh[ish] = xsh[ish] - xsh[ish-5*sk]*CL8 - xsh[ish-3*sk]*CL9 - xsh[ish-sk]*CL10;
// d3[l] = d2[l] + s1*K^2*s2[l-2] + s2*K^2*s2[l-1] + s3*K^2*s2[l]
}
static __device__ void lift_cyc_6(real* xsh, const int ish, const uint sk, const short int sense){
if(sense == FWD){
real switchsd = xsh[ish]*CL12;
xsh[ish] = xsh[ish+sk]*CL11;
xsh[ish+sk] = switchsd;
}
else{
real switchsd = xsh[ish]*CL12;
xsh[ish] = xsh[ish+sk]*CL11;
xsh[ish+sk] = switchsd;
}
//s3[l] = (K)*s2[l]
// d4[l] = (1/K)*d3[l]
}
// // no longer used!
// __device__ double get_wvt_shared(real* x, real* bdrs, const uint len, const uint skip, const int i, const int ish, const short isskip){
// // First, we copy x into shared array x_work
// // NB - conditioning on threadIdx.x, not ish!
// if(threadIdx.x < 6){ //ish == 0, 2, 4, 6, 8, 10
// if(i<(int)len){
// // we are filling the shared block with lower boundary points
// // printf("here2 thread%i\n",threadIdx.x);
// // x_work[ish] = x[i];
// // x_work[ish+1] = x[i + skip];
// if(isskip) return(bdrs[ish + blockIdx.x*12]);
// else return(bdrs[1 + ish + blockIdx.x*12]);
// }
// }
// if((threadIdx.x >= 6) && (threadIdx.x < BLOCK_SECT2+6)){
// // needs to be conditional on i and BLOCK_SECT2
// if(i < (int)len){
// // we fill the central block of shared memory (no boundary coeffs)
// // printf("here3a thread%i\n",threadIdx.x);
// if(isskip) return(x[i]);
// else return(x[i + skip]);
// }
// else if(i==(int)len){
// // this happens when len < BLOCK_SECT2
// // we have to deal with upper boundary points
// // printf("here3b thread%i\n",threadIdx.x);
// // x_work[ish] = x[0];
// // x_work[ish+1] = x[skip];
// if(isskip) return(bdrs[6+(blockIdx.x*12)]);
// else return(bdrs[7+(blockIdx.x*12)]);
// }
// }
// else if(threadIdx.x == BLOCK_SECT2+4){
// if(i<=(int)len){
// if(isskip) return(bdrs[6+(blockIdx.x*12)]);
// else return(bdrs[7+(blockIdx.x*12)]);
// }
// }
// return(9999);
// }
int LA8CUDA_sh_ml2_streams(real* x_h, real* x_d, uint len, short int sense, uint nlevels, hipStream_t stream){
// sense '1' is forwards, '0' is backwards, anything else is sideways
uint filterlength=8;
uint ret;
nlevels = check_len_levels(len,nlevels,filterlength);
if(nlevels == 0) return(1); //NB nlevels=0 when calling this function means that check_len_levels will calculate the maximum number of levels - in which case it will return this number
// however, it the case of an error, it will return 0 - because any strictly positive integer would be valid. & nlevels is unsigned.
hipMemcpyAsync(x_d,x_h,len*sizeof(real),HTD,stream);
switch(sense){
case 1:
ret = fLA8CUDAsh_ml2(x_d,len,1,nlevels);
break;
case 0:
ret = bLA8CUDAsh_ml2(x_d,len,1<<(nlevels-1));
break;
default:
printf("\nSense must be 1 for forward or 0 for backwards. We don't do sideways.\n");
return(1);
}
hipMemcpyAsync(x_h,x_d,len*sizeof(real),DTH,stream);
// we copy x_d back into x_h
// we have to do this after the DWT, as the transform is in-place
return(ret);
}
| 34f088562c7c42cdf6d930e47bf4401a9ecc56e6.cu | #include "la8cuda.cuh"
#include "la8coeffs.h"
#define BLOCK_SIZE2 263 // (8 down, 6 up)/2
#define BLOCK_SECT2 256
#define BLOCK_SIZE_ML2 278 //(24 down, 20 up)/2
#define BLOCK_SIZE_ML2B 274 //(20 down, 16 up)/2
// block sect remains the same.
#define BLOCK_SECT_ML2 256
/***********************************************************
Lifted Least Asymmetric 8 code - in CUDA! With shared memory
***********************************************************/
int LA8CUDA_sh(real* x_d, uint len, short int sense, uint nlevels){
// sense '1' is forwards, '0' is backwards, anything else is sideways
uint filterlength=8;
nlevels = check_len_levels(len,nlevels,filterlength);
if(nlevels == 0) return(1); //NB nlevels=0 when calling this function means that check_len_levels will calculate the maximum number of levels - in which case it will return this number
// however, in the case of an error, it will return 0 - because any strictly positive integer would be valid. & nlevels is unsigned.
switch(sense){
case 1:
return(fLA8CUDAsh(x_d,len,1,nlevels));
case 0:
return(bLA8CUDAsh(x_d,len,1<<(nlevels-1)));
default:
printf("\nSense must be 1 for forward or 0 for backwards. We don't do sideways.\n");
return(1);
}
}
int fLA8CUDAsh(real* x_d, uint len, uint skip, uint nlevels){
if(skip < (1 << nlevels)){
cudaError_t cuderr;
int threadsPerBlock = BLOCK_SIZE2;
int blocksPerGrid =(len/(skip<<1) + BLOCK_SECT2 - 1) / BLOCK_SECT2;
int res;
uint k = 8; // max(above,below) # border coeffs obtained
real* bdrs; // vector of boundary points - ensures independence of loops
uint lenb = max((len*k)/(skip*BLOCK_SECT2),2*k); // length of bdrs vector
int tPB_bd = BS_BD;
int bPG_bd = max(((lenb/(2*k)) + BS_BD - 1) / BS_BD,1);
cudaMalloc((void **)&bdrs,lenb*sizeof(real));
get_bdrs_sh_k<<<bPG_bd, tPB_bd>>>(x_d,len,skip,bdrs,lenb,k,BLOCK_SECT2); //we copy the boundary points into a vector
cuderr = cudaGetLastError();
if (cuderr != cudaSuccess)
{
fprintf(stderr, "CUDA error in transform in get boundaries sh (error code %s)!\n", cudaGetErrorString(cuderr));
exit(EXIT_FAILURE);
}
cudaDeviceSynchronize();
// printveccu<<<1,1>>>(bdrs,lenb);
// cudaDeviceSynchronize();
// printf("\n### threadsperblock = %i, blockspergrid = %i ####\n",threadsPerBlock,blocksPerGrid);
LA8_kernel_shared_f<<<blocksPerGrid, threadsPerBlock>>>(x_d,len,skip,bdrs,lenb);
cuderr = cudaGetLastError();
if (cuderr != cudaSuccess)
{
fprintf(stderr, "CUDA error in transform in LA8 sh (error code %s)!\n", cudaGetErrorString(cuderr));
exit(EXIT_FAILURE);
}
cudaDeviceSynchronize();
// //print stuff...
// printf("CUDA: len=%u,skip=%u\n",len,skip);
// printveccu<<<1,1>>>(x_d,len);
// cudaDeviceSynchronize();
cudaFree(bdrs);
res=fLA8CUDAsh(x_d,len,skip<<1,nlevels);
cudaDeviceSynchronize();
return(res);
}
return(0);
}
int bLA8CUDAsh(real* x_d, uint len, uint skip){
if(skip > 0){
cudaError_t cuderr;
int threadsPerBlock = BLOCK_SIZE2;
int blocksPerGrid =(len/(skip<<1) + BLOCK_SECT2 - 1) / BLOCK_SECT2;
int res;
uint k = 8; // max(above,below) # border coeffs obtained
real* bdrs; // vector of boundary points - ensures independence of loops
uint lenb = max((len*k)/(skip*BLOCK_SECT2),2*k); // length of bdrs vector
int tPB_bd = BS_BD;
int bPG_bd = max(((lenb/(2*k)) + BS_BD - 1) / BS_BD,1);
cudaMalloc((void **)&bdrs,lenb*sizeof(real));
get_bdrs_sh_k<<<bPG_bd, tPB_bd>>>(x_d,len,skip,bdrs,lenb,k,BLOCK_SECT2); //we copy the boundary points into a vector
cuderr = cudaGetLastError();
if (cuderr != cudaSuccess)
{
fprintf(stderr, "CUDA error in transform in get boundaries sh (error code %s)!\n", cudaGetErrorString(cuderr));
exit(EXIT_FAILURE);
}
cudaDeviceSynchronize();
// printveccu<<<1,1>>>(bdrs,lenb);
// cudaDeviceSynchronize();
// printf("\n### threadsperblock = %i, blockspergrid = %i ####\n",threadsPerBlock,blocksPerGrid);
LA8_kernel_shared_b<<<blocksPerGrid, threadsPerBlock>>>(x_d,len,skip,bdrs,lenb);
cuderr = cudaGetLastError();
if (cuderr != cudaSuccess)
{
fprintf(stderr, "CUDA error in transform in LA8 sh (error code %s)!\n", cudaGetErrorString(cuderr));
exit(EXIT_FAILURE);
}
cudaDeviceSynchronize();
// //print stuff...
// printf("CUDA: len=%u,skip=%u\n",len,skip);
// printveccu<<<1,1>>>(x_d,len);
// cudaDeviceSynchronize();
cudaFree(bdrs);
res=bLA8CUDAsh(x_d,len,skip>>1);
cudaDeviceSynchronize();
return(res);
}
return(0);
}
/*
Shared memory has following structure:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
| | | |
|_ _ _ _ _ _|_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _|_ _ |
|<----8---->|<---------(2*BLOCK_SECT2 )--------------->|--6-|
|<----------------( 2 * BLOCK_SIZE2 )------------------->|
Where of the 8 spaces below, we need 7; of the 6 spaces above, we need 5.
But our memory structure dictates that we use even values of 'i'
*/
__global__ void LA8_kernel_shared_f(real* x, const uint len, const uint skip, real* bdrs, const uint lenb){
int i = (BLOCK_SECT2 * blockIdx.x + threadIdx.x -4)*skip<<1;
// i = -8*skip, -6*skip, -4*skip, -2*skip 0, 2*skip, ... , len-2*skip, len, len + 2*skip, len + 4*skip, len + 6*skip
// for each block, we have, e.g. i = -8*skip, -6*skip, -4*skip, -2*skip, 0, 2*skip, ..., 2*skip*BLOCK_SECT2-2*skip, 2*skip*BLOCK_SECT2, 2*skip*BLOCK_SECT2+2*skip, 2*skip*BLOCK_SECT2+4*skip, 2*skip*BLOCK_SECT2+4*skip
int ish = (threadIdx.x)<<1;
// ish = 0, 2,... , 2*BLOCK_SIZE2-2, 2*BLOCK_SIZE2
__shared__ real x_work[BLOCK_SIZE2<<1];
// real switchsd;
// printf("\nthreadIdx.x=%i,blockIdx.x=%i,i=%i,ish=%i\n",threadIdx.x,blockIdx.x,i,ish);
// // First, we copy x into shared array x_work
// // NB - conditioning on threadIdx.x, not ish!
// if(threadIdx.x < 6){ //ish == 0, 2, 4, 6, 8, 10
// if(i<(int)len){
// // we are filling the shared block with lower boundary points
// // printf("here2 thread%i\n",threadIdx.x);
// // x_work[ish] = x[i];
// // x_work[ish+1] = x[i + skip];
// x_work[ish] = bdrs[ish + blockIdx.x*12];
// x_work[ish+1] = bdrs[1 + ish + blockIdx.x*12];
// }
// }
// if((threadIdx.x >= 6) && (threadIdx.x < BLOCK_SECT2+6)){
// // needs to be conditional on i and BLOCK_SECT2
// if(i < (int)len){
// // we fill the central block of shared memory (no boundary coeffs)
// // printf("here3a thread%i\n",threadIdx.x);
// x_work[ish] = x[i];
// x_work[ish+1] = x[i + skip];
// }
// else if(i==(int)len){
// // this happens when len < BLOCK_SECT2
// // we have to deal with upper boundary points
// // printf("here3b thread%i\n",threadIdx.x);
// // x_work[ish] = x[0];
// // x_work[ish+1] = x[skip];
// x_work[ish] = bdrs[6+(blockIdx.x*12)];
// x_work[ish+1] = bdrs[7+(blockIdx.x*12)];
// }
// }
// else if(threadIdx.x == BLOCK_SECT2+4){
// if(i<=(int)len){
// x_work[ish] = bdrs[6+(blockIdx.x*12)];
// x_work[ish+1] = bdrs[7+(blockIdx.x*12)];
// }
// }
// x_work[ish] = get_wvt_shared(x,bdrs,len,skip,i,ish,threadIdx.x, blockIdx.x,1);
// x_work[ish+1] = get_wvt_shared(x,bdrs,len,skip,i,ish,threadIdx.x, blockIdx.x,0);
// x_work[ish] = get_wvt_shared_gen(x,bdrs,len,skip,i,ish,8,6,BLOCK_SECT2,1);
// x_work[ish+1] = get_wvt_shared_gen(x,bdrs,len,skip,i,ish,8,6,BLOCK_SECT2,0);
write_wvt_shared_gen(x,bdrs,len,skip,i,ish,8,6,BLOCK_SECT2,x_work);
__syncthreads();
// in the lifting cycles below, we keep some threads idle so that we only update
// the intermediate coefficients that we need in the subsequent steps
//1st cycle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if((ish>=2)&&(ish<=(BLOCK_SECT2<<1+12))){ //keep 2 threads idle
//printf("herecyc1 thread%i\n",threadIdx.x);
lift_cyc_1(x_work,ish,1,FWD);
// x_work[ish] = x_work[ish] + x_work[ish-1]*CL0 + x_work[ish+1]*CL1;
// d1[l] = x[2l+1] + q11*x[2l] + q12*x[2l+2]
}
__syncthreads();
//2nd cycle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if((ish>=2)&&(ish<=(BLOCK_SECT2<<1+12))){ //keep 2 threads idle
// printf("herecyc2 thread%i\n",threadIdx.x);
lift_cyc_2(x_work,ish,1,FWD);
// x_work[ish-1] = x_work[ish-1] + x_work[ish]*CL2 + x_work[ish+2]*CL3;
// s1[l] = x[2l] + q21*d1[l] + q22*d1[l+1]
}
__syncthreads();
//3rd cycle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if((ish>=2)&&(ish<(BLOCK_SECT2<<1+8))){ //keep 3 threads idle
// printf("herecyc3 thread%i\n",threadIdx.x);
lift_cyc_3(x_work,ish,1,FWD);
// x_work[ish] = x_work[ish] + x_work[ish-1]*CL4 + x_work[ish+1]*CL5;
// d2[l] = d1[l] + q31*s1[l] + q32*s1[l+1]
}
__syncthreads();
//4th cycle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if((ish>=4)&&(ish<(BLOCK_SECT2<<1+8))){ //keep 4 threads idle
lift_cyc_4(x_work,ish,1,FWD);
// x_work[ish-1] = x_work[ish-1] + x_work[ish-2]*CL6 + x_work[ish]*CL7;
// s2[l] = s1[l] + q41*d2[l-1] + q42*d2[l]
}
__syncthreads();
//5th cycle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if((ish>=8)&&(ish<(BLOCK_SECT2<<1+8))){ // keep 6 threads idle
lift_cyc_5(x_work,ish,1,FWD);
// x_work[ish] = x_work[ish] + x_work[ish-5]*CL8 + x_work[ish-3]*CL9 + x_work[ish-1]*CL10;
// d3[l] = d2[l] + s1*K^2*s2[l-2] + s2*K^2*s2[l-1] + s3*K^2*s2[l]
}
__syncthreads();
// //6th cycle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - done below
// if((ish>=8)&&(ish<(BLOCK_SECT2<<1+8))){
// switchsd = x_work[ish]*CL12;
// //s3[l] = (K)*s2[l]
// x_work[ish] = x_work[ish+1]*CL11;
// // d4[l] = (1/K)*d3[l]
// x_work[ish+1] = switchsd;
// }
// __syncthreads();
// We do last lifting cycle at the same time as writing back to global memory.
// Involves a switch in coefficients because of the derived lifting algo.
if((ish>=8) && (ish<(BLOCK_SECT2<<1)+8) && (i<(int)len)){
// printf("hereputback thread%i\n",threadIdx.x);
x[i] = x_work[ish+1]*CL11;
x[i + skip] = x_work[ish]*CL12;
}
__syncthreads();
}
// same memory diagram as for the forward transform
__global__ void LA8_kernel_shared_b(real* x, const uint len, const uint skip, real* bdrs, const uint lenb){
int i = (BLOCK_SECT2 * blockIdx.x + threadIdx.x -4)*skip<<1;
// i = -8*skip, -6*skip, -4*skip, -2*skip 0, 2*skip, ... , len-2*skip, len, len + 2*skip, len + 4*skip, len + 6*skip
// for each block, we have, e.g. i = -8*skip, -6*skip, -4*skip, -2*skip, 0, 2*skip, ..., 2*skip*BLOCK_SECT2-2*skip, 2*skip*BLOCK_SECT2, 2*skip*BLOCK_SECT2+2*skip, 2*skip*BLOCK_SECT2+4*skip, 2*skip*BLOCK_SECT2+4*skip
int ish = (threadIdx.x)<<1;
// ish = 0, 2,... , 2*BLOCK_SIZE2-2, 2*BLOCK_SIZE2
__shared__ real x_work[BLOCK_SIZE2<<1];
// printf("\nthreadIdx.x=%i,blockIdx.x=%i,i=%i,ish=%i\n",threadIdx.x,blockIdx.x,i,ish);
x_work[ish] = get_wvt_shared_gen(x,bdrs,len,skip,i,ish,8,6,BLOCK_SECT2,0) * CL11;
x_work[ish+1] = get_wvt_shared_gen(x,bdrs,len,skip,i,ish,8,6,BLOCK_SECT2,1) * CL12;
// above, we do the last lifting cycle at the same time as reading the coefficients
// we have switched the odd/even coefficients above because our derived lifting algo
// requires that
__syncthreads();
// in the lifting cycles below, we keep some threads idle so that we only update
// the intermediate coefficients that we need in the subsequent steps
//5th cycle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if((ish>=6)&&(ish<(BLOCK_SECT2<<1+14))){ // keep 3 threads idle
lift_cyc_5(x_work,ish,1,BWD);
// x_work[ish] = x_work[ish] - x_work[ish-5]*CL8 - x_work[ish-3]*CL9 - x_work[ish-1]*CL10;
// d3[l] = d2[l] - s1*K^2*s2[l-2] - s2*K^2*s2[l-1] - s3*K^2*s2[l]
}
__syncthreads();
//4th cycle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if((ish>=6)&&(ish<(BLOCK_SECT2<<1+14))){ //keep 4 threads idle
lift_cyc_4(x_work,ish,1,BWD);
// x_work[ish-1] = x_work[ish-1] - x_work[ish-2]*CL6 - x_work[ish]*CL7;
// s2[l] = s1[l] - q41*d2[l-1] - q42*d2[l]
}
__syncthreads();
//3rd cycle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if((ish>=8)&&(ish<(BLOCK_SECT2<<1+12))){ //keep 5 threads idle
// printf("herecyc3 thread%i\n",threadIdx.x);
lift_cyc_3(x_work,ish,1,BWD);
// x_work[ish] = x_work[ish] - x_work[ish-1]*CL4 - x_work[ish+1]*CL5;
// d2[l] = d1[l] - q31*s1[l] - q32*s1[l+1]
}
__syncthreads();
//2nd cycle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if((ish>=6)&&(ish< (BLOCK_SECT2<<1+12))){ //keep 6 threads idle
// printf("herecyc2 thread%i\n",threadIdx.x);
lift_cyc_2(x_work,ish,1,BWD);
// x_work[ish-1] = x_work[ish-1] - x_work[ish]*CL2 - x_work[ish+2]*CL3;
// s1[l] = x[2l] - q21*d1[l] - q22*d1[l+1]
}
__syncthreads();
//1st cycle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if((ish>=8)&&(ish<(BLOCK_SECT2<<1+10))){ //keep 7 threads idle
//printf("herecyc1 thread%i\n",threadIdx.x);
lift_cyc_1(x_work,ish,1,BWD);
// x_work[ish] = x_work[ish] - x_work[ish-1]*CL0 - x_work[ish+1]*CL1;
// d1[l] = x[2l+1] - q11*x[2l] - q12*x[2l+2]
}
__syncthreads();
// We write back to global memory.
if((ish>=8) && (ish<(BLOCK_SECT2<<1)+8) && (i<(int)len)){
// printf("hereputback thread%i\n",threadIdx.x);
x[i] = x_work[ish];
x[i + skip] = x_work[ish+1];
}
__syncthreads();
}
// ########################################################################
// Now we have a kernel that performs 2 levels of transform
// ########################################################################
/*
Shared memory has following, new, structure:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
| | | |
|_ _ _ _ |_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ |_ _ _ _ _ _ |
|<--24-->|<---------(2*BLOCK_SECT_ML2)--------------->|<---20----->|
|<---------------( 2 * BLOCK_SIZE_ML2 )------------------------->|
indices...
1, 2, 3, ..., k, k1, ..., k17
used in 1st level of transform (where k=2*BLOCK_SECT_ML2+22) k1 is k+1 etc
then indices...
8, 9, 10, ..., k, k1, ..., k11
used in 2nd level of transform
First & last 4/6 coefficients contain shared memory boundary coefficients* for the transform levels.
*shared memory boundary coefficients: for the first & last shared memory blocks, they hold periodic boundary coefficient points; for all other blocks, they hold the boundary coefficients of the previous/following memory block.
The threads point to (via the variable ish):
(ish is actual index in shared memory)
(skipwork = 1)
|0 1 ... 23 |24 25 ... l |l1 l2 ... l20 |
where l is BLOCK_SECT_ML2+1
(skipwork = 2)
|0 ... 11 |12 ... m |m1 ... m10 |
where m is floor[(BLOCK_SECT_ML2+1)/2]
*/
// above is probably not quite right! :D
int LA8CUDA_sh_ml2(real* x_d, uint len, short int sense, uint nlevels){
// sense '1' is forwards, '0' is backwards, anything else is sideways
uint filterlength=8;
nlevels = check_len_levels(len,nlevels,filterlength);
if(nlevels == 0) return(1); //NB nlevels=0 when calling this function means that check_len_levels will calculate the maximum number of levels - in which case it will return this number
// however, it the case of an error, it will return 0 - because any strictly positive integer would be valid. & nlevels is unsigned.
switch(sense){
case 1:
return(fLA8CUDAsh_ml2(x_d,len,1,nlevels));
case 0:
return(bLA8CUDAsh_ml2(x_d,len,1<<(nlevels-1)));
default:
printf("\nSense must be 1 for forward or 0 for backwards. We don't do sideways.\n");
return(1);
}
}
int fLA8CUDAsh_ml2(real* x_d, uint len, uint skip, uint nlevels){
if(skip < (1 << nlevels)){
cudaError_t cuderr;
int threadsPerBlock;
int blocksPerGrid;
uint levels=1; //leave at 1. This is initialisation for level variable!
uint k1 = 8; //# border coeffs needed for single level kernel
uint k2 = 24; //# coeffs needed for 2 level kernel
// printf("\n### threadsperblock = %i, blockspergrid = %i ####\n",threadsPerBlock,blocksPerGrid);
while((levels+1<=2)&&((skip<<(levels+1))<=(1 << nlevels))&&(len/skip>k2)){ // levels+1<=k gives L, #levels to loop over
levels+=1;
}
if (levels==1){
// deal with bdrs
uint k = k1; // max(above,below) # border coeffs obtained
real* bdrs; // vector of boundary points - ensures independence of loops
uint lenb = max((len*k)/(skip*BLOCK_SECT2),2*k); // length of bdrs vector
int tPB_bd = BS_BD;
int bPG_bd = max(((lenb/(2*k)) + BS_BD - 1) / BS_BD,1);
cudaMalloc((void **)&bdrs,lenb*sizeof(real));
get_bdrs_sh_k<<<bPG_bd, tPB_bd>>>(x_d,len,skip,bdrs,lenb,k,BLOCK_SECT2); //we copy the boundary points into a vector
cuderr = cudaGetLastError();
if (cuderr != cudaSuccess)
{
fprintf(stderr, "CUDA error in transform in get boundaries sh (error code %s)!\n", cudaGetErrorString(cuderr));
exit(EXIT_FAILURE);
}
cudaDeviceSynchronize();
threadsPerBlock = BLOCK_SIZE2;
blocksPerGrid =(len/(skip<<1) + BLOCK_SECT2 - 1) / BLOCK_SECT2;
LA8_kernel_shared_f<<<blocksPerGrid, threadsPerBlock>>>(x_d,len,skip,bdrs,lenb);
cudaFree(bdrs);
}
else{
// deal with bdrs
real* bdrs; // vector of boundary points - ensures independence of loops
uint k = k2;
uint lenb = max((len*k)/(skip*BLOCK_SECT_ML2),2*k); // length of bdrs vector
int tPB_bd = BS_BD;
int bPG_bd = max(((lenb/(2*k)) + BS_BD - 1) / BS_BD,1);
cudaMalloc((void **)&bdrs,lenb*sizeof(real));
get_bdrs_sh_k<<<bPG_bd, tPB_bd>>>(x_d,len,skip,bdrs,lenb,k,BLOCK_SECT_ML2); //we copy the boundary points into a vector
cuderr = cudaGetLastError();
if (cuderr != cudaSuccess)
{
fprintf(stderr, "CUDA error in transform in get boundaries sh (error code %s)!\n", cudaGetErrorString(cuderr));
exit(EXIT_FAILURE);
}
cudaDeviceSynchronize();
threadsPerBlock = BLOCK_SIZE_ML2;
blocksPerGrid =(len/(skip<<1) + BLOCK_SECT_ML2 - 1) / BLOCK_SECT_ML2;
// printf("\nlevels=2");
// printveccu<<<1,1>>>(bdrs,lenb);
// cudaDeviceSynchronize();
// printf("\n### threadsperblock = %i, blockspergrid = %i ####\n",threadsPerBlock,blocksPerGrid);
LA8_kernel_shared_f_ml2<<<blocksPerGrid, threadsPerBlock>>>(x_d,len,skip,bdrs,lenb);
cudaFree(bdrs);
}
cuderr = cudaGetLastError();
if (cuderr != cudaSuccess)
{
fprintf(stderr, "CUDA error in transform in fLA8 MLsh (error code %s)!\n", cudaGetErrorString(cuderr));
exit(EXIT_FAILURE);
}
cudaDeviceSynchronize();
// //print stuff...
// printf("CUDA: len=%u,skip=%u\n",len,skip);
// printveccu<<<1,1>>>(x_d,len);
cudaDeviceSynchronize();
return(fLA8CUDAsh_ml2(x_d,len,skip<<levels,nlevels));
}
return(0);
}
int bLA8CUDAsh_ml2(real* x_d, uint len, uint skip){
if(skip > 0){
cudaError_t cuderr;
int threadsPerBlock;
int blocksPerGrid;
real* bdrs;
uint levels=1; //leave at 1. This is initialisation for level variable!
uint k1 = 8; //# border coeffs needed for single level kernel
uint k2 = 20; //# coeffs needed for 2 level kernel
// printf("\n### threadsperblock = %i, blockspergrid = %i ####\n",threadsPerBlock,blocksPerGrid);
while((levels+1<=2)&&((skip>>levels)>0)&&(len/skip>k2)){ // levels+1<=k gives L, #levels to loop over
levels+=1;
}
if (levels==1){
// deal with bdrs
uint k = k1; // max(above,below) # border coeffs obtained
real* bdrs; // vector of boundary points - ensures independence of loops
uint lenb = max((len*k)/(skip*BLOCK_SECT2),2*k); // length of bdrs vector
int tPB_bd = BS_BD;
int bPG_bd = max(((lenb/(2*k)) + BS_BD - 1) / BS_BD,1);
cudaMalloc((void **)&bdrs,lenb*sizeof(real));
get_bdrs_sh_k<<<bPG_bd, tPB_bd>>>(x_d,len,skip,bdrs,lenb,k,BLOCK_SECT2); //we copy the boundary points into a vector
cuderr = cudaGetLastError();
if (cuderr != cudaSuccess)
{
fprintf(stderr, "CUDA error in transform in get boundaries sh (error code %s)!\n", cudaGetErrorString(cuderr));
exit(EXIT_FAILURE);
}
cudaDeviceSynchronize();
threadsPerBlock = BLOCK_SIZE2;
blocksPerGrid =(len/(skip<<1) + BLOCK_SECT2 - 1) / BLOCK_SECT2;
LA8_kernel_shared_b<<<blocksPerGrid, threadsPerBlock>>>(x_d,len,skip,bdrs,lenb);
cudaFree(bdrs);
}
else{
// deal with bdrs
real* bdrs; // vector of boundary points - ensures independence of loops
uint k = k2;
uint lenb = max((len*k*2)/(skip*BLOCK_SECT_ML2),2*k); // length of bdrs vector
int tPB_bd = BS_BD;
int bPG_bd = max(((lenb/(2*k)) + BS_BD - 1) / BS_BD,1);
cudaMalloc((void **)&bdrs,lenb*sizeof(real));
get_bdrs_sh_k<<<bPG_bd, tPB_bd>>>(x_d,len,skip/2,bdrs,lenb,k,BLOCK_SECT_ML2); //we copy the boundary points into a vector
cuderr = cudaGetLastError();
if (cuderr != cudaSuccess)
{
fprintf(stderr, "CUDA error in transform in get boundaries sh (error code %s)!\n", cudaGetErrorString(cuderr));
exit(EXIT_FAILURE);
}
cudaDeviceSynchronize();
threadsPerBlock = BLOCK_SIZE_ML2B;
blocksPerGrid =(len/skip + BLOCK_SECT_ML2 - 1) / BLOCK_SECT_ML2;
// printf("\nlevels=2");
// printveccu<<<1,1>>>(bdrs,lenb);
// cudaDeviceSynchronize();
// printf("\n### threadsperblock = %i, blockspergrid = %i ####\n",threadsPerBlock,blocksPerGrid);
LA8_kernel_shared_b_ml2<<<blocksPerGrid, threadsPerBlock>>>(x_d,len,skip,bdrs,lenb);
cudaFree(bdrs);
}
cuderr = cudaGetLastError();
if (cuderr != cudaSuccess)
{
fprintf(stderr, "CUDA error in transform in bLA8 MLsh (error code %s)!\n", cudaGetErrorString(cuderr));
exit(EXIT_FAILURE);
}
cudaDeviceSynchronize();
// //print stuff...
// printf("CUDA: len=%u,skip=%u\n",len,skip);
// printveccu<<<1,1>>>(x_d,len);
cudaDeviceSynchronize();
return(bLA8CUDAsh_ml2(x_d,len,skip>>levels));
}
return(0);
}
__global__ void LA8_kernel_shared_f_ml2(real* x, const uint len, const uint skip, real* bdrs, const uint lenb){
int i = (BLOCK_SECT_ML2 * blockIdx.x + threadIdx.x -12)*skip << 1;
// i = -24*skip,..., -2*skip, 0, 2*skip, ... , len-2*skip, len, len +2, ... , len + 16*skip, len + 18*skip
// for each block, we have, e.g. i = -24*skip, ..., -2*skip, 0, 2*skip, ..., 2*skip*BLOCK_SECT_ML2-2*skip, 2*BLOCK_SECT_ML2*skip, 2*skip*BLOCK_SECT_ML2+2*skip, ..., 2*skip*BLOCK_SECT_ML2+20*skip
uint ish = (threadIdx.x)<<1;
uint ishlast = (BLOCK_SIZE_ML2-1)<<1;
uint li;
// ish = 0, 2,... , 2*BLOCK_SECT_ML2-2, 2*BLOCK_SECT_ML2, 2*BLOCK_SECT_ML2+2, 2*BLOCK_SECT_ML2+4, 2*BLOCK_SECT_ML2+6, +8, ... , +38
__shared__ real x_work[BLOCK_SIZE_ML2<<1];
uint skipwork = 1;
write_wvt_shared_gen(x,bdrs,len,skip,i,ish,24,20,BLOCK_SECT2,x_work);
__syncthreads();
for(li = 0; li < 2; li++){
//1st cycle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if( ((li==0)&&(ish<=ishlast-2)&&(ish>=4)) ||
((li==1)&&(ish<=ishlast-8)&&(ish>=12)) ){
lift_cyc_1(x_work,ish,skipwork,FWD);
}
__syncthreads();
//2nd cycle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if( ((li==0)&&(ish<=ishlast-2)&&(ish>=4)) ||
((li==1)&&(ish<=ishlast-8)&&(ish>=12)) ){
lift_cyc_2(x_work,ish,skipwork,FWD);
}
__syncthreads();
//3rd cycle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if( ((li==0)&&(ish<=ishlast-6)&&(ish>=4)) ||
((li==1)&&(ish<=ishlast-14)&&(ish>=12)) ){
lift_cyc_3(x_work,ish,skipwork,FWD);
}
__syncthreads();
//4th cycle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if( ((li==0)&&(ish<=ishlast-6)&&(ish>=6)) ||
((li==1)&&(ish<=ishlast-16)&&(ish>=16)) ){
lift_cyc_4(x_work,ish,skipwork,FWD);
}
__syncthreads();
//5th cycle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if( ((li==0)&&(ish<=ishlast-8)&&(ish>=10)) ||
((li==1)&&(ish<=ishlast-20)&&(ish>=24)) ){
lift_cyc_5(x_work,ish,skipwork,FWD);
}
__syncthreads();
//6th cycle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if( ((li==0)&&(ish<=ishlast-8)&&(ish>=4)) ||
((li==1)&&(ish<=ishlast-20)&&(ish>=22)) ){
lift_cyc_6(x_work,ish,skipwork,FWD);
}
__syncthreads();
if(li==0) ish=ish<<1; skipwork=skipwork<<1;
}
// Now transform level is done. We copy shared array x_work back into x
ish = (threadIdx.x)<<1;
if( (ish>=24) && (ish<2*BLOCK_SECT_ML2+24) && (i<len)){
// printf("hereputback thread%i\n",threadIdx.x);
x[i] = x_work[ish];
x[i + skip] = x_work[ish+1];
}
}
__global__ void LA8_kernel_shared_b_ml2(real* x, const uint len, const uint skip, real* bdrs, const uint lenb){
int i = (BLOCK_SECT_ML2 * blockIdx.x + threadIdx.x -10)*skip;
// i = -24*skip,..., -2*skip, 0, 2*skip, ... , len-2*skip, len, len +2, ... , len + 16*skip, len + 18*skip
// for each block, we have, e.g. i = -24*skip, ..., -2*skip, 0, 2*skip, ..., 2*skip*BLOCK_SECT_ML2-2*skip, 2*BLOCK_SECT_ML2*skip, 2*skip*BLOCK_SECT_ML2+2*skip, ..., 2*skip*BLOCK_SECT_ML2+20*skip
uint skipbl = skip/2; // skip value for second layer. Used in filling shared mem & filling global memory vector.
uint ish = (threadIdx.x)<<1;
uint ishlast = min(len/skipbl+20+16,(BLOCK_SIZE_ML2-1)<<1); //size of shared vec x_work
uint li;
// ish = 0, 2,... , 2*BLOCK_SECT_ML2-2, 2*BLOCK_SECT_ML2, 2*BLOCK_SECT_ML2+2, 2*BLOCK_SECT_ML2+4, 2*BLOCK_SECT_ML2+6, +8, ... , +38
__shared__ real x_work[BLOCK_SIZE_ML2<<1];
uint skipwork = 2;
write_wvt_shared_gen(x,bdrs,len,skipbl,i,ish,20,16,BLOCK_SECT2,x_work);
__syncthreads();
ish=ish<<1;
for(li = 0; li < 2; li++){
//6th cycle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if( ((li==0)&&(ish<=ishlast-4)&&(ish>=0)) ||
((li==1)&&(ish<=ishlast-10)&&(ish>=14)) ){
lift_cyc_6(x_work,ish,skipwork,BWD);
}
__syncthreads();
//5th cycle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if( ((li==0)&&(ish<=ishlast-4)&&(ish>=12)) ||
((li==1)&&(ish<=ishlast-10)&&(ish>=14)) ){
lift_cyc_5(x_work,ish,skipwork,BWD);
}
__syncthreads();
//4th cycle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if( ((li==0)&&(ish<=ishlast-4)&&(ish>=12)) ||
((li==1)&&(ish<=ishlast-12)&&(ish>=18)) ){
lift_cyc_4(x_work,ish,skipwork,BWD);
}
__syncthreads();
//3rd cycle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if( ((li==0)&&(ish<=ishlast-6)&&(ish>=16)) ||
((li==1)&&(ish<=ishlast-12)&&(ish>=18)) ){
lift_cyc_3(x_work,ish,skipwork,BWD);
}
__syncthreads();
//2nd cycle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if( ((li==0)&&(ish<=ishlast-8)&&(ish>=16)) ||
((li==1)&&(ish<=ishlast-12)&&(ish>=20)) ){
lift_cyc_2(x_work,ish,skipwork,BWD);
}
__syncthreads();
//1st cycle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if( ((li==0)&&(ish<=ishlast-10)&&(ish>=16)) ||
((li==1)&&(ish<=ishlast-16)&&(ish>=20)) ){
lift_cyc_1(x_work,ish,skipwork,BWD);
}
__syncthreads();
if(li==0) ish=ish>>1; skipwork=skipwork>>1;
}
// Now transform level is done. We copy shared array x_work back into x
ish = (threadIdx.x)<<1;
if( (ish>=20) && (ish<2*BLOCK_SECT_ML2+20) && (i<len)){
// printf("hereputback thread%i\n",threadIdx.x);
x[i] = x_work[ish];
x[i + skipbl] = x_work[ish+1];
}
}
static __device__ void lift_cyc_1(real* xsh, const int ish, const uint sk, const short int sense){
if(sense == FWD) xsh[ish] = xsh[ish] + xsh[ish-sk]*CL0 + xsh[ish+sk]*CL1;
else xsh[ish] = xsh[ish] - xsh[ish-sk]*CL0 - xsh[ish+sk]*CL1;
// d1[l] = x[2l+1] + q11*x[2l] + q12*x[2l+2]
}
static __device__ void lift_cyc_2(real* xsh, const int ish, const uint sk, const short int sense){
if(sense == FWD) xsh[ish-sk] = xsh[ish-sk] + xsh[ish]*CL2 + xsh[ish+2*sk]*CL3;
else xsh[ish-sk] = xsh[ish-sk] - xsh[ish]*CL2 - xsh[ish+2*sk]*CL3;
// s1[l] = x[2l] + q21*d1[l] + q22*d1[l+1]
}
static __device__ void lift_cyc_3(real* xsh, const int ish, const uint sk, const short int sense){
if(sense == FWD) xsh[ish] = xsh[ish] + xsh[ish-sk]*CL4 + xsh[ish+sk]*CL5;
else xsh[ish] = xsh[ish] - xsh[ish-sk]*CL4 - xsh[ish+sk]*CL5;
// d2[l] = d1[l] + q31*s1[l] + q32*s1[l+1]
}
static __device__ void lift_cyc_4(real* xsh, const int ish, const uint sk, const short int sense){
if(sense == FWD) xsh[ish-sk] = xsh[ish-sk] + xsh[ish-2*sk]*CL6 + xsh[ish]*CL7;
else xsh[ish-sk] = xsh[ish-sk] - xsh[ish-2*sk]*CL6 - xsh[ish]*CL7;
// s2[l] = s1[l] + q41*d2[l-1] + q42*d2[l]
}
static __device__ void lift_cyc_5(real* xsh, const int ish, const uint sk, const short int sense){
if(sense == FWD) xsh[ish] = xsh[ish] + xsh[ish-5*sk]*CL8 + xsh[ish-3*sk]*CL9 + xsh[ish-sk]*CL10;
else xsh[ish] = xsh[ish] - xsh[ish-5*sk]*CL8 - xsh[ish-3*sk]*CL9 - xsh[ish-sk]*CL10;
// d3[l] = d2[l] + s1*K^2*s2[l-2] + s2*K^2*s2[l-1] + s3*K^2*s2[l]
}
static __device__ void lift_cyc_6(real* xsh, const int ish, const uint sk, const short int sense){
if(sense == FWD){
real switchsd = xsh[ish]*CL12;
xsh[ish] = xsh[ish+sk]*CL11;
xsh[ish+sk] = switchsd;
}
else{
real switchsd = xsh[ish]*CL12;
xsh[ish] = xsh[ish+sk]*CL11;
xsh[ish+sk] = switchsd;
}
//s3[l] = (K)*s2[l]
// d4[l] = (1/K)*d3[l]
}
// // no longer used!
// __device__ double get_wvt_shared(real* x, real* bdrs, const uint len, const uint skip, const int i, const int ish, const short isskip){
// // First, we copy x into shared array x_work
// // NB - conditioning on threadIdx.x, not ish!
// if(threadIdx.x < 6){ //ish == 0, 2, 4, 6, 8, 10
// if(i<(int)len){
// // we are filling the shared block with lower boundary points
// // printf("here2 thread%i\n",threadIdx.x);
// // x_work[ish] = x[i];
// // x_work[ish+1] = x[i + skip];
// if(isskip) return(bdrs[ish + blockIdx.x*12]);
// else return(bdrs[1 + ish + blockIdx.x*12]);
// }
// }
// if((threadIdx.x >= 6) && (threadIdx.x < BLOCK_SECT2+6)){
// // needs to be conditional on i and BLOCK_SECT2
// if(i < (int)len){
// // we fill the central block of shared memory (no boundary coeffs)
// // printf("here3a thread%i\n",threadIdx.x);
// if(isskip) return(x[i]);
// else return(x[i + skip]);
// }
// else if(i==(int)len){
// // this happens when len < BLOCK_SECT2
// // we have to deal with upper boundary points
// // printf("here3b thread%i\n",threadIdx.x);
// // x_work[ish] = x[0];
// // x_work[ish+1] = x[skip];
// if(isskip) return(bdrs[6+(blockIdx.x*12)]);
// else return(bdrs[7+(blockIdx.x*12)]);
// }
// }
// else if(threadIdx.x == BLOCK_SECT2+4){
// if(i<=(int)len){
// if(isskip) return(bdrs[6+(blockIdx.x*12)]);
// else return(bdrs[7+(blockIdx.x*12)]);
// }
// }
// return(9999);
// }
int LA8CUDA_sh_ml2_streams(real* x_h, real* x_d, uint len, short int sense, uint nlevels, cudaStream_t stream){
// sense '1' is forwards, '0' is backwards, anything else is sideways
uint filterlength=8;
uint ret;
nlevels = check_len_levels(len,nlevels,filterlength);
if(nlevels == 0) return(1); //NB nlevels=0 when calling this function means that check_len_levels will calculate the maximum number of levels - in which case it will return this number
// however, it the case of an error, it will return 0 - because any strictly positive integer would be valid. & nlevels is unsigned.
cudaMemcpyAsync(x_d,x_h,len*sizeof(real),HTD,stream);
switch(sense){
case 1:
ret = fLA8CUDAsh_ml2(x_d,len,1,nlevels);
break;
case 0:
ret = bLA8CUDAsh_ml2(x_d,len,1<<(nlevels-1));
break;
default:
printf("\nSense must be 1 for forward or 0 for backwards. We don't do sideways.\n");
return(1);
}
cudaMemcpyAsync(x_h,x_d,len*sizeof(real),DTH,stream);
// we copy x_d back into x_h
// we have to do this after the DWT, as the transform is in-place
return(ret);
}
|
0c0dff7d1abe1cd78979a2bb2f582cad81aaefbc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @file : main.cu
* @brief : main driver file for Examples using cuRAND device API to generate pseudorandom numbers using either XORWOW or MRG32k3a generators, header file
* @details : This program uses the device CURAND API. The purpose of these examples is explore scope and compiling and modularity/separation issues with CURAND
*
* @author : Ernest Yeung <[email protected]>
* @date : 20180109
* @ref : http://docs.nvidia.com/cuda/hiprand/device-api-overview.html#device-api-example
*
* https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=ernestsaveschristmas%2bpaypal%40gmail%2ecom&lc=US&item_name=ernestyalumni¤cy_code=USD&bn=PP%2dDonationsBF%3abtn_donateCC_LG%2egif%3aNonHosted
*
* which won't go through a 3rd. party such as indiegogo, kickstarter, patreon.
* Otherwise, I receive emails and messages on how all my (free) material on
* physics, math, and engineering have helped students with their studies,
* and I know what it's like to not have money as a student, but love physics
* (or math, sciences, etc.), so I am committed to keeping all my material
* open-source and free, whether or not
* sufficiently crowdfunded, under the open-source MIT license:
* feel free to copy, edit, paste, make your own versions, share, use as you wish.
* Just don't be an asshole and not give credit where credit is due.
* Peace out, never give up! -EY
*
* */
/*
* COMPILATION TIP
* nvcc -lcurand main.cu ./gens2distri/XORMRGgens.cu -o main
* */
#include "./gens2distri/XORMRGgens.h" //
#include <iostream>
/* ********** functions to setup device GPU ********** */
/** @fn getMaxGridSize
* @brief get maxGridSize (total number threads on a (thread) grid, on device GPU, of a single device GPU
* */
size_t get_maxGridSize() {
hipDeviceProp_t prop;
int count;
hipGetDeviceCount(&count);
size_t MAXGRIDSIZE;
if (count>0) {
hipGetDeviceProperties(&prop, 0);
MAXGRIDSIZE = prop.maxGridSize[0];
return MAXGRIDSIZE;
} else { return EXIT_FAILURE; }
};
/** @fn generate_kernel
* @param n - for each thread, generate n random unsigned ints; 1 reason to do this is to utilize the compute of a thread
* */
__global__ void generate_kernel(hiprandState_t *state,
int n,
unsigned int *result, const unsigned long int L)
{
int id = threadIdx.x + blockIdx.x * blockDim.x ;
for (int idx = id; idx < L; idx += blockDim.x * gridDim.x ) {
unsigned int count = 0;
unsigned int x; //
/* Copy state to local memory for efficiency */
hiprandState_t localState = state[idx];
/* Generate pseudo-random unsigned ints */
for (int i=0; i<n; i++) {
x = hiprand(&localState);
/* Check if low bit set */ // i.e. if it's odd or not
if (x & 1) {
count++;
}
}
/* Copy state back to global memory */
state[idx] = localState;
result[idx] += count;
}
}
/** @fn generate_kernel
* @param n - for each thread, generate n random unsigned ints; 1 reason to do this is to utilize the compute of a thread
* */
__global__ void generate_kernel(hiprandStatePhilox4_32_10_t *state,
int n,
unsigned int *result, const unsigned long int L)
{
int id = threadIdx.x + blockIdx.x * blockDim.x ;
for (int idx = id; idx < L; idx += blockDim.x * gridDim.x ) {
unsigned int count = 0;
unsigned int x; //
/* Copy state to local memory for efficiency */
hiprandStatePhilox4_32_10_t localState = state[idx];
/* Generate pseudo-random unsigned ints */
for (int i=0; i<n; i++) {
x = hiprand(&localState);
/* Check if low bit set */ // i.e. if it's odd or not
if (x & 1) {
count++;
}
}
/* Copy state back to global memory */
state[idx] = localState;
result[idx] += count;
}
}
/** @fn generate_uniform_kernel
* @param n - for each thread, generate n random unsigned ints; 1 reason to do this is to utilize the compute of a thread
* */
__global__ void generate_uniform_kernel(hiprandState_t *state,
int n,
unsigned int *result, const unsigned long int L)
{
int id = threadIdx.x + blockIdx.x * blockDim.x ;
for (int idx = id; idx < L; idx += blockDim.x * gridDim.x ) {
unsigned int count = 0;
float x;
/* Copy state to local memory for efficiency */
hiprandState_t localState = state[idx];
/* Generate pseudo-random unsigned ints */
for (int i=0; i<n; i++) {
x = hiprand_uniform(&localState);
/* Check if > .5 */
if (x > .5) {
count++;
}
}
/* Copy state back to global memory */
state[idx] = localState;
result[idx] += count;
}
}
/** @fn generate_uniform_kernel
* @param n - for each thread, generate n random unsigned ints; 1 reason to do this is to utilize the compute of a thread
* */
__global__ void generate_uniform_kernel(hiprandStatePhilox4_32_10_t *state,
int n,
unsigned int *result, const unsigned long int L)
{
int id = threadIdx.x + blockIdx.x * blockDim.x ;
unsigned int count = 0;
float x;
for (int idx = id; idx < L; idx += blockDim.x * gridDim.x ) {
/* Copy state to local memory for efficiency */
hiprandStatePhilox4_32_10_t localState = state[idx];
/* Generate pseudo-random unsigned ints */
for (int i=0; i<n; i++) {
x = hiprand_uniform(&localState);
/* Check if > .5 */
if (x > .5) {
count++;
}
}
/* Copy state back to global memory */
state[idx] = localState;
result[idx] += count;
}
}
int main(int argc, char* argv[])
{
/* ***** (thread) grid,block dims ***** */
/* min of N_x, number of (thread) blocks on grid in x-direction, and MAX_BLOCKS allowed is
* determined here */
size_t MAXGRIDSIZE = get_maxGridSize();
unsigned int M_x = 1<<8; // M_x = number of threads in x-direction, in a single block, i.e. blocksize; 2^8=256
unsigned int L = 1<<18; // doesn't output correct values for n = 1<<39
unsigned int MAX_BLOCKS = (MAXGRIDSIZE + M_x - 1)/ M_x;
// notice how we're only launching 1/4 of L threads
unsigned int N_x = min( MAX_BLOCKS, ((L + M_x - 1)/ M_x));
/* ***** END of (thread) grid,block dims ***** */
// Use structs devStatesXOR, devStatesMRG, devStatesPhilox4_32_10_t to automate process of setting up curandStates
devStatesXOR devstatesXOR = { L, N_x, M_x } ;
devStatesMRG devstatesMRG = { L, N_x, M_x } ;
devStatesPhilox4_32_10_t devstatesPhilox4_32_10_t = { L, N_x, M_x } ;
// set the sampleCount
constexpr const int sampleCount = 10000;
/* Allocate space for results on host */
auto hostResults = std::make_unique<unsigned int[]>(L);
/* Allocate space for results on device */
// custom deleter for unsigned int array, as a lambda function
auto del_devResults_lambda_main=[&](unsigned int* devResults) {hipFree(devResults); };
std::unique_ptr<unsigned int[],decltype(del_devResults_lambda_main)> devResults(nullptr, del_devResults_lambda_main);
hipMallocManaged((void **)&devResults, L*sizeof(unsigned int));
/* Set results to 0 */
hipMemset(devResults.get(), 0, L*sizeof(unsigned int) );
/* Generate and use pseudo-random */
/* this will test if we have low bit set, i.e. odd numbers */
for (int i=0; i < 50; i++) {
hipLaunchKernelGGL(( generate_kernel), dim3(N_x),dim3(M_x), 0, 0, devstatesXOR.devStates.get(), sampleCount, devResults.get(), L );
}
/* Copy device memory to host */
hipMemcpy(hostResults.get(), devResults.get(), L * sizeof(unsigned int), hipMemcpyDeviceToHost);
/* Show results */
unsigned long long int total = 0;
for (int i =0; i < L; i++) {
total += hostResults[i];
}
std::cout << "Fraction with low bit set was " << (float)total / (L * sampleCount * 50.0f) << std::endl;
/* Set results to 0 */
hipMemset(devResults.get(), 0, L * sizeof(unsigned int));
/* Generate and use uniform pseudo-random */
for (int i=0; i<50; i++) {
hipLaunchKernelGGL(( generate_uniform_kernel), dim3(N_x),dim3(M_x), 0, 0, devstatesXOR.devStates.get(), sampleCount, devResults.get(), L);
}
/* Copy device memory to host */
hipMemcpy(hostResults.get(), devResults.get(), L * sizeof(unsigned int), hipMemcpyDeviceToHost);
/* Show result */
total =0;
for (int i=0; i < L; i++) {
total += hostResults[i];
}
std::cout << "Fraction of uniforms > 0.5 was " << (float) total / ( (float) L * sampleCount * 50.0f ) << std::endl;
}
| 0c0dff7d1abe1cd78979a2bb2f582cad81aaefbc.cu | /**
* @file : main.cu
* @brief : main driver file for Examples using cuRAND device API to generate pseudorandom numbers using either XORWOW or MRG32k3a generators, header file
* @details : This program uses the device CURAND API. The purpose of these examples is explore scope and compiling and modularity/separation issues with CURAND
*
* @author : Ernest Yeung <[email protected]>
* @date : 20180109
* @ref : http://docs.nvidia.com/cuda/curand/device-api-overview.html#device-api-example
*
* https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=ernestsaveschristmas%2bpaypal%40gmail%2ecom&lc=US&item_name=ernestyalumni¤cy_code=USD&bn=PP%2dDonationsBF%3abtn_donateCC_LG%2egif%3aNonHosted
*
* which won't go through a 3rd. party such as indiegogo, kickstarter, patreon.
* Otherwise, I receive emails and messages on how all my (free) material on
* physics, math, and engineering have helped students with their studies,
* and I know what it's like to not have money as a student, but love physics
* (or math, sciences, etc.), so I am committed to keeping all my material
* open-source and free, whether or not
* sufficiently crowdfunded, under the open-source MIT license:
* feel free to copy, edit, paste, make your own versions, share, use as you wish.
* Just don't be an asshole and not give credit where credit is due.
* Peace out, never give up! -EY
*
* */
/*
* COMPILATION TIP
* nvcc -lcurand main.cu ./gens2distri/XORMRGgens.cu -o main
* */
#include "./gens2distri/XORMRGgens.h" //
#include <iostream>
/* ********** functions to setup device GPU ********** */
/** @fn getMaxGridSize
* @brief get maxGridSize (total number threads on a (thread) grid, on device GPU, of a single device GPU
* */
size_t get_maxGridSize() {
cudaDeviceProp prop;
int count;
cudaGetDeviceCount(&count);
size_t MAXGRIDSIZE;
if (count>0) {
cudaGetDeviceProperties(&prop, 0);
MAXGRIDSIZE = prop.maxGridSize[0];
return MAXGRIDSIZE;
} else { return EXIT_FAILURE; }
};
/** @fn generate_kernel
* @param n - for each thread, generate n random unsigned ints; 1 reason to do this is to utilize the compute of a thread
* */
__global__ void generate_kernel(curandState *state,
int n,
unsigned int *result, const unsigned long int L)
{
int id = threadIdx.x + blockIdx.x * blockDim.x ;
for (int idx = id; idx < L; idx += blockDim.x * gridDim.x ) {
unsigned int count = 0;
unsigned int x; //
/* Copy state to local memory for efficiency */
curandState localState = state[idx];
/* Generate pseudo-random unsigned ints */
for (int i=0; i<n; i++) {
x = curand(&localState);
/* Check if low bit set */ // i.e. if it's odd or not
if (x & 1) {
count++;
}
}
/* Copy state back to global memory */
state[idx] = localState;
result[idx] += count;
}
}
/** @fn generate_kernel
* @param n - for each thread, generate n random unsigned ints; 1 reason to do this is to utilize the compute of a thread
* */
__global__ void generate_kernel(curandStatePhilox4_32_10_t *state,
int n,
unsigned int *result, const unsigned long int L)
{
int id = threadIdx.x + blockIdx.x * blockDim.x ;
for (int idx = id; idx < L; idx += blockDim.x * gridDim.x ) {
unsigned int count = 0;
unsigned int x; //
/* Copy state to local memory for efficiency */
curandStatePhilox4_32_10_t localState = state[idx];
/* Generate pseudo-random unsigned ints */
for (int i=0; i<n; i++) {
x = curand(&localState);
/* Check if low bit set */ // i.e. if it's odd or not
if (x & 1) {
count++;
}
}
/* Copy state back to global memory */
state[idx] = localState;
result[idx] += count;
}
}
/** @fn generate_uniform_kernel
* @param n - for each thread, generate n random unsigned ints; 1 reason to do this is to utilize the compute of a thread
* */
__global__ void generate_uniform_kernel(curandState *state,
int n,
unsigned int *result, const unsigned long int L)
{
int id = threadIdx.x + blockIdx.x * blockDim.x ;
for (int idx = id; idx < L; idx += blockDim.x * gridDim.x ) {
unsigned int count = 0;
float x;
/* Copy state to local memory for efficiency */
curandState localState = state[idx];
/* Generate pseudo-random unsigned ints */
for (int i=0; i<n; i++) {
x = curand_uniform(&localState);
/* Check if > .5 */
if (x > .5) {
count++;
}
}
/* Copy state back to global memory */
state[idx] = localState;
result[idx] += count;
}
}
/** @fn generate_uniform_kernel
* @param n - for each thread, generate n random unsigned ints; 1 reason to do this is to utilize the compute of a thread
* */
__global__ void generate_uniform_kernel(curandStatePhilox4_32_10_t *state,
int n,
unsigned int *result, const unsigned long int L)
{
int id = threadIdx.x + blockIdx.x * blockDim.x ;
unsigned int count = 0;
float x;
for (int idx = id; idx < L; idx += blockDim.x * gridDim.x ) {
/* Copy state to local memory for efficiency */
curandStatePhilox4_32_10_t localState = state[idx];
/* Generate pseudo-random unsigned ints */
for (int i=0; i<n; i++) {
x = curand_uniform(&localState);
/* Check if > .5 */
if (x > .5) {
count++;
}
}
/* Copy state back to global memory */
state[idx] = localState;
result[idx] += count;
}
}
int main(int argc, char* argv[])
{
/* ***** (thread) grid,block dims ***** */
/* min of N_x, number of (thread) blocks on grid in x-direction, and MAX_BLOCKS allowed is
* determined here */
size_t MAXGRIDSIZE = get_maxGridSize();
unsigned int M_x = 1<<8; // M_x = number of threads in x-direction, in a single block, i.e. blocksize; 2^8=256
unsigned int L = 1<<18; // doesn't output correct values for n = 1<<39
unsigned int MAX_BLOCKS = (MAXGRIDSIZE + M_x - 1)/ M_x;
// notice how we're only launching 1/4 of L threads
unsigned int N_x = min( MAX_BLOCKS, ((L + M_x - 1)/ M_x));
/* ***** END of (thread) grid,block dims ***** */
// Use structs devStatesXOR, devStatesMRG, devStatesPhilox4_32_10_t to automate process of setting up curandStates
devStatesXOR devstatesXOR = { L, N_x, M_x } ;
devStatesMRG devstatesMRG = { L, N_x, M_x } ;
devStatesPhilox4_32_10_t devstatesPhilox4_32_10_t = { L, N_x, M_x } ;
// set the sampleCount
constexpr const int sampleCount = 10000;
/* Allocate space for results on host */
auto hostResults = std::make_unique<unsigned int[]>(L);
/* Allocate space for results on device */
// custom deleter for unsigned int array, as a lambda function
auto del_devResults_lambda_main=[&](unsigned int* devResults) {cudaFree(devResults); };
std::unique_ptr<unsigned int[],decltype(del_devResults_lambda_main)> devResults(nullptr, del_devResults_lambda_main);
cudaMallocManaged((void **)&devResults, L*sizeof(unsigned int));
/* Set results to 0 */
cudaMemset(devResults.get(), 0, L*sizeof(unsigned int) );
/* Generate and use pseudo-random */
/* this will test if we have low bit set, i.e. odd numbers */
for (int i=0; i < 50; i++) {
generate_kernel<<<N_x,M_x>>>(devstatesXOR.devStates.get(), sampleCount, devResults.get(), L );
}
/* Copy device memory to host */
cudaMemcpy(hostResults.get(), devResults.get(), L * sizeof(unsigned int), cudaMemcpyDeviceToHost);
/* Show results */
unsigned long long int total = 0;
for (int i =0; i < L; i++) {
total += hostResults[i];
}
std::cout << "Fraction with low bit set was " << (float)total / (L * sampleCount * 50.0f) << std::endl;
/* Set results to 0 */
cudaMemset(devResults.get(), 0, L * sizeof(unsigned int));
/* Generate and use uniform pseudo-random */
for (int i=0; i<50; i++) {
generate_uniform_kernel<<<N_x,M_x>>>(devstatesXOR.devStates.get(), sampleCount, devResults.get(), L);
}
/* Copy device memory to host */
cudaMemcpy(hostResults.get(), devResults.get(), L * sizeof(unsigned int), cudaMemcpyDeviceToHost);
/* Show result */
total =0;
for (int i=0; i < L; i++) {
total += hostResults[i];
}
std::cout << "Fraction of uniforms > 0.5 was " << (float) total / ( (float) L * sampleCount * 50.0f ) << std::endl;
}
|
4b0ab19f7ca4a2bb30d746e63de25490c30a8f66.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int tid = (blockIdx.x+blockIdx.y*gridDim.x)*blockDim.x*blockDim.y+threadIdx.y*blockDim.x+threadIdx.x;
if (tid < numRows * numCols){
greyImage[tid]=0.299*rgbaImage[tid].x+0.587*rgbaImage[tid].y+0.114*rgbaImage[tid].z;
}
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize(8, 8, 1); //TODO
const dim3 gridSize( numRows/8+1, numCols/8+1, 1); //TODO
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
| 4b0ab19f7ca4a2bb30d746e63de25490c30a8f66.cu | // Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int tid = (blockIdx.x+blockIdx.y*gridDim.x)*blockDim.x*blockDim.y+threadIdx.y*blockDim.x+threadIdx.x;
if (tid < numRows * numCols){
greyImage[tid]=0.299*rgbaImage[tid].x+0.587*rgbaImage[tid].y+0.114*rgbaImage[tid].z;
}
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize(8, 8, 1); //TODO
const dim3 gridSize( numRows/8+1, numCols/8+1, 1); //TODO
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
|
9c9ada577a0be3522dde39f131356c95164bee00.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 1993-2011, NVIDIA Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "internal_shared.hpp"
#include "opencv2/gpu/device/saturate_cast.hpp"
#include "opencv2/gpu/device/vec_math.hpp"
#include "opencv2/gpu/device/limits.hpp"
#include "opencv2/gpu/device/border_interpolate.hpp"
#include "opencv2/gpu/device/static_check.hpp"
namespace cv { namespace gpu { namespace device
{
namespace row_filter
{
#define MAX_KERNEL_SIZE 32
__constant__ float c_kernel[MAX_KERNEL_SIZE];
void loadKernel(const float kernel[], int ksize)
{
cudaSafeCall( hipMemcpyToSymbol(c_kernel, kernel, ksize * sizeof(float)) );
}
template <int KSIZE, typename T, typename D, typename B>
__global__ void linearRowFilter(const DevMem2D_<T> src, PtrStep<D> dst, const int anchor, const B brd)
{
#if __CUDA_ARCH__ >= 200
const int BLOCK_DIM_X = 32;
const int BLOCK_DIM_Y = 8;
const int PATCH_PER_BLOCK = 4;
const int HALO_SIZE = 1;
#else
const int BLOCK_DIM_X = 32;
const int BLOCK_DIM_Y = 4;
const int PATCH_PER_BLOCK = 4;
const int HALO_SIZE = 1;
#endif
typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type sum_t;
__shared__ sum_t smem[BLOCK_DIM_Y][(PATCH_PER_BLOCK + 2 * HALO_SIZE) * BLOCK_DIM_X];
const int y = blockIdx.y * BLOCK_DIM_Y + threadIdx.y;
if (y >= src.rows)
return;
const T* src_row = src.ptr(y);
const int xStart = blockIdx.x * (PATCH_PER_BLOCK * BLOCK_DIM_X) + threadIdx.x;
//Load left halo
#pragma unroll
for (int j = 0; j < HALO_SIZE; ++j)
smem[threadIdx.y][threadIdx.x + j * BLOCK_DIM_X] = saturate_cast<sum_t>(brd.at_low(xStart - (HALO_SIZE - j) * BLOCK_DIM_X, src_row));
//Load main data
#pragma unroll
for (int j = 0; j < PATCH_PER_BLOCK; ++j)
smem[threadIdx.y][threadIdx.x + HALO_SIZE * BLOCK_DIM_X + j * BLOCK_DIM_X] = saturate_cast<sum_t>(brd.at_high(xStart + j * BLOCK_DIM_X, src_row));
//Load right halo
#pragma unroll
for (int j = 0; j < HALO_SIZE; ++j)
smem[threadIdx.y][threadIdx.x + (PATCH_PER_BLOCK + HALO_SIZE) * BLOCK_DIM_X + j * BLOCK_DIM_X] = saturate_cast<sum_t>(brd.at_high(xStart + (PATCH_PER_BLOCK + j) * BLOCK_DIM_X, src_row));
__syncthreads();
#pragma unroll
for (int j = 0; j < PATCH_PER_BLOCK; ++j)
{
const int x = xStart + j * BLOCK_DIM_X;
if (x < src.cols)
{
sum_t sum = VecTraits<sum_t>::all(0);
#pragma unroll
for (int k = 0; k < KSIZE; ++k)
sum = sum + smem[threadIdx.y][threadIdx.x + HALO_SIZE * BLOCK_DIM_X + j * BLOCK_DIM_X - anchor + k] * c_kernel[k];
dst(y, x) = saturate_cast<D>(sum);
}
}
}
template <int KSIZE, typename T, typename D, template<typename> class B>
void linearRowFilter_caller(DevMem2D_<T> src, DevMem2D_<D> dst, int anchor, int cc, hipStream_t stream)
{
int BLOCK_DIM_X;
int BLOCK_DIM_Y;
int PATCH_PER_BLOCK;
if (cc >= 20)
{
BLOCK_DIM_X = 32;
BLOCK_DIM_Y = 8;
PATCH_PER_BLOCK = 4;
}
else
{
BLOCK_DIM_X = 32;
BLOCK_DIM_Y = 4;
PATCH_PER_BLOCK = 4;
}
const dim3 block(BLOCK_DIM_X, BLOCK_DIM_Y);
const dim3 grid(divUp(src.cols, BLOCK_DIM_X * PATCH_PER_BLOCK), divUp(src.rows, BLOCK_DIM_Y));
B<T> brd(src.cols);
hipLaunchKernelGGL(( linearRowFilter<KSIZE, T, D>), dim3(grid), dim3(block), 0, stream, src, dst, anchor, brd);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template <typename T, typename D>
void linearRowFilter_gpu(DevMem2Db src, DevMem2Db dst, const float kernel[], int ksize, int anchor, int brd_type, int cc, hipStream_t stream)
{
typedef void (*caller_t)(DevMem2D_<T> src, DevMem2D_<D> dst, int anchor, int cc, hipStream_t stream);
static const caller_t callers[5][33] =
{
{
0,
linearRowFilter_caller< 1, T, D, BrdRowReflect101>,
linearRowFilter_caller< 2, T, D, BrdRowReflect101>,
linearRowFilter_caller< 3, T, D, BrdRowReflect101>,
linearRowFilter_caller< 4, T, D, BrdRowReflect101>,
linearRowFilter_caller< 5, T, D, BrdRowReflect101>,
linearRowFilter_caller< 6, T, D, BrdRowReflect101>,
linearRowFilter_caller< 7, T, D, BrdRowReflect101>,
linearRowFilter_caller< 8, T, D, BrdRowReflect101>,
linearRowFilter_caller< 9, T, D, BrdRowReflect101>,
linearRowFilter_caller<10, T, D, BrdRowReflect101>,
linearRowFilter_caller<11, T, D, BrdRowReflect101>,
linearRowFilter_caller<12, T, D, BrdRowReflect101>,
linearRowFilter_caller<13, T, D, BrdRowReflect101>,
linearRowFilter_caller<14, T, D, BrdRowReflect101>,
linearRowFilter_caller<15, T, D, BrdRowReflect101>,
linearRowFilter_caller<16, T, D, BrdRowReflect101>,
linearRowFilter_caller<17, T, D, BrdRowReflect101>,
linearRowFilter_caller<18, T, D, BrdRowReflect101>,
linearRowFilter_caller<19, T, D, BrdRowReflect101>,
linearRowFilter_caller<20, T, D, BrdRowReflect101>,
linearRowFilter_caller<21, T, D, BrdRowReflect101>,
linearRowFilter_caller<22, T, D, BrdRowReflect101>,
linearRowFilter_caller<23, T, D, BrdRowReflect101>,
linearRowFilter_caller<24, T, D, BrdRowReflect101>,
linearRowFilter_caller<25, T, D, BrdRowReflect101>,
linearRowFilter_caller<26, T, D, BrdRowReflect101>,
linearRowFilter_caller<27, T, D, BrdRowReflect101>,
linearRowFilter_caller<28, T, D, BrdRowReflect101>,
linearRowFilter_caller<29, T, D, BrdRowReflect101>,
linearRowFilter_caller<30, T, D, BrdRowReflect101>,
linearRowFilter_caller<31, T, D, BrdRowReflect101>,
linearRowFilter_caller<32, T, D, BrdRowReflect101>
},
{
0,
linearRowFilter_caller< 1, T, D, BrdRowReplicate>,
linearRowFilter_caller< 2, T, D, BrdRowReplicate>,
linearRowFilter_caller< 3, T, D, BrdRowReplicate>,
linearRowFilter_caller< 4, T, D, BrdRowReplicate>,
linearRowFilter_caller< 5, T, D, BrdRowReplicate>,
linearRowFilter_caller< 6, T, D, BrdRowReplicate>,
linearRowFilter_caller< 7, T, D, BrdRowReplicate>,
linearRowFilter_caller< 8, T, D, BrdRowReplicate>,
linearRowFilter_caller< 9, T, D, BrdRowReplicate>,
linearRowFilter_caller<10, T, D, BrdRowReplicate>,
linearRowFilter_caller<11, T, D, BrdRowReplicate>,
linearRowFilter_caller<12, T, D, BrdRowReplicate>,
linearRowFilter_caller<13, T, D, BrdRowReplicate>,
linearRowFilter_caller<14, T, D, BrdRowReplicate>,
linearRowFilter_caller<15, T, D, BrdRowReplicate>,
linearRowFilter_caller<16, T, D, BrdRowReplicate>,
linearRowFilter_caller<17, T, D, BrdRowReplicate>,
linearRowFilter_caller<18, T, D, BrdRowReplicate>,
linearRowFilter_caller<19, T, D, BrdRowReplicate>,
linearRowFilter_caller<20, T, D, BrdRowReplicate>,
linearRowFilter_caller<21, T, D, BrdRowReplicate>,
linearRowFilter_caller<22, T, D, BrdRowReplicate>,
linearRowFilter_caller<23, T, D, BrdRowReplicate>,
linearRowFilter_caller<24, T, D, BrdRowReplicate>,
linearRowFilter_caller<25, T, D, BrdRowReplicate>,
linearRowFilter_caller<26, T, D, BrdRowReplicate>,
linearRowFilter_caller<27, T, D, BrdRowReplicate>,
linearRowFilter_caller<28, T, D, BrdRowReplicate>,
linearRowFilter_caller<29, T, D, BrdRowReplicate>,
linearRowFilter_caller<30, T, D, BrdRowReplicate>,
linearRowFilter_caller<31, T, D, BrdRowReplicate>,
linearRowFilter_caller<32, T, D, BrdRowReplicate>
},
{
0,
linearRowFilter_caller< 1, T, D, BrdRowConstant>,
linearRowFilter_caller< 2, T, D, BrdRowConstant>,
linearRowFilter_caller< 3, T, D, BrdRowConstant>,
linearRowFilter_caller< 4, T, D, BrdRowConstant>,
linearRowFilter_caller< 5, T, D, BrdRowConstant>,
linearRowFilter_caller< 6, T, D, BrdRowConstant>,
linearRowFilter_caller< 7, T, D, BrdRowConstant>,
linearRowFilter_caller< 8, T, D, BrdRowConstant>,
linearRowFilter_caller< 9, T, D, BrdRowConstant>,
linearRowFilter_caller<10, T, D, BrdRowConstant>,
linearRowFilter_caller<11, T, D, BrdRowConstant>,
linearRowFilter_caller<12, T, D, BrdRowConstant>,
linearRowFilter_caller<13, T, D, BrdRowConstant>,
linearRowFilter_caller<14, T, D, BrdRowConstant>,
linearRowFilter_caller<15, T, D, BrdRowConstant>,
linearRowFilter_caller<16, T, D, BrdRowConstant>,
linearRowFilter_caller<17, T, D, BrdRowConstant>,
linearRowFilter_caller<18, T, D, BrdRowConstant>,
linearRowFilter_caller<19, T, D, BrdRowConstant>,
linearRowFilter_caller<20, T, D, BrdRowConstant>,
linearRowFilter_caller<21, T, D, BrdRowConstant>,
linearRowFilter_caller<22, T, D, BrdRowConstant>,
linearRowFilter_caller<23, T, D, BrdRowConstant>,
linearRowFilter_caller<24, T, D, BrdRowConstant>,
linearRowFilter_caller<25, T, D, BrdRowConstant>,
linearRowFilter_caller<26, T, D, BrdRowConstant>,
linearRowFilter_caller<27, T, D, BrdRowConstant>,
linearRowFilter_caller<28, T, D, BrdRowConstant>,
linearRowFilter_caller<29, T, D, BrdRowConstant>,
linearRowFilter_caller<30, T, D, BrdRowConstant>,
linearRowFilter_caller<31, T, D, BrdRowConstant>,
linearRowFilter_caller<32, T, D, BrdRowConstant>
},
{
0,
linearRowFilter_caller< 1, T, D, BrdRowReflect>,
linearRowFilter_caller< 2, T, D, BrdRowReflect>,
linearRowFilter_caller< 3, T, D, BrdRowReflect>,
linearRowFilter_caller< 4, T, D, BrdRowReflect>,
linearRowFilter_caller< 5, T, D, BrdRowReflect>,
linearRowFilter_caller< 6, T, D, BrdRowReflect>,
linearRowFilter_caller< 7, T, D, BrdRowReflect>,
linearRowFilter_caller< 8, T, D, BrdRowReflect>,
linearRowFilter_caller< 9, T, D, BrdRowReflect>,
linearRowFilter_caller<10, T, D, BrdRowReflect>,
linearRowFilter_caller<11, T, D, BrdRowReflect>,
linearRowFilter_caller<12, T, D, BrdRowReflect>,
linearRowFilter_caller<13, T, D, BrdRowReflect>,
linearRowFilter_caller<14, T, D, BrdRowReflect>,
linearRowFilter_caller<15, T, D, BrdRowReflect>,
linearRowFilter_caller<16, T, D, BrdRowReflect>,
linearRowFilter_caller<17, T, D, BrdRowReflect>,
linearRowFilter_caller<18, T, D, BrdRowReflect>,
linearRowFilter_caller<19, T, D, BrdRowReflect>,
linearRowFilter_caller<20, T, D, BrdRowReflect>,
linearRowFilter_caller<21, T, D, BrdRowReflect>,
linearRowFilter_caller<22, T, D, BrdRowReflect>,
linearRowFilter_caller<23, T, D, BrdRowReflect>,
linearRowFilter_caller<24, T, D, BrdRowReflect>,
linearRowFilter_caller<25, T, D, BrdRowReflect>,
linearRowFilter_caller<26, T, D, BrdRowReflect>,
linearRowFilter_caller<27, T, D, BrdRowReflect>,
linearRowFilter_caller<28, T, D, BrdRowReflect>,
linearRowFilter_caller<29, T, D, BrdRowReflect>,
linearRowFilter_caller<30, T, D, BrdRowReflect>,
linearRowFilter_caller<31, T, D, BrdRowReflect>,
linearRowFilter_caller<32, T, D, BrdRowReflect>
},
{
0,
linearRowFilter_caller< 1, T, D, BrdRowWrap>,
linearRowFilter_caller< 2, T, D, BrdRowWrap>,
linearRowFilter_caller< 3, T, D, BrdRowWrap>,
linearRowFilter_caller< 4, T, D, BrdRowWrap>,
linearRowFilter_caller< 5, T, D, BrdRowWrap>,
linearRowFilter_caller< 6, T, D, BrdRowWrap>,
linearRowFilter_caller< 7, T, D, BrdRowWrap>,
linearRowFilter_caller< 8, T, D, BrdRowWrap>,
linearRowFilter_caller< 9, T, D, BrdRowWrap>,
linearRowFilter_caller<10, T, D, BrdRowWrap>,
linearRowFilter_caller<11, T, D, BrdRowWrap>,
linearRowFilter_caller<12, T, D, BrdRowWrap>,
linearRowFilter_caller<13, T, D, BrdRowWrap>,
linearRowFilter_caller<14, T, D, BrdRowWrap>,
linearRowFilter_caller<15, T, D, BrdRowWrap>,
linearRowFilter_caller<16, T, D, BrdRowWrap>,
linearRowFilter_caller<17, T, D, BrdRowWrap>,
linearRowFilter_caller<18, T, D, BrdRowWrap>,
linearRowFilter_caller<19, T, D, BrdRowWrap>,
linearRowFilter_caller<20, T, D, BrdRowWrap>,
linearRowFilter_caller<21, T, D, BrdRowWrap>,
linearRowFilter_caller<22, T, D, BrdRowWrap>,
linearRowFilter_caller<23, T, D, BrdRowWrap>,
linearRowFilter_caller<24, T, D, BrdRowWrap>,
linearRowFilter_caller<25, T, D, BrdRowWrap>,
linearRowFilter_caller<26, T, D, BrdRowWrap>,
linearRowFilter_caller<27, T, D, BrdRowWrap>,
linearRowFilter_caller<28, T, D, BrdRowWrap>,
linearRowFilter_caller<29, T, D, BrdRowWrap>,
linearRowFilter_caller<30, T, D, BrdRowWrap>,
linearRowFilter_caller<31, T, D, BrdRowWrap>,
linearRowFilter_caller<32, T, D, BrdRowWrap>
}
};
loadKernel(kernel, ksize);
callers[brd_type][ksize]((DevMem2D_<T>)src, (DevMem2D_<D>)dst, anchor, cc, stream);
}
template void linearRowFilter_gpu<uchar , float >(DevMem2Db src, DevMem2Db dst, const float kernel[], int ksize, int anchor, int brd_type, int cc, hipStream_t stream);
template void linearRowFilter_gpu<uchar4, float4>(DevMem2Db src, DevMem2Db dst, const float kernel[], int ksize, int anchor, int brd_type, int cc, hipStream_t stream);
template void linearRowFilter_gpu<short3, float3>(DevMem2Db src, DevMem2Db dst, const float kernel[], int ksize, int anchor, int brd_type, int cc, hipStream_t stream);
template void linearRowFilter_gpu<int , float >(DevMem2Db src, DevMem2Db dst, const float kernel[], int ksize, int anchor, int brd_type, int cc, hipStream_t stream);
template void linearRowFilter_gpu<float , float >(DevMem2Db src, DevMem2Db dst, const float kernel[], int ksize, int anchor, int brd_type, int cc, hipStream_t stream);
} // namespace row_filter
}}} // namespace cv { namespace gpu { namespace device
| 9c9ada577a0be3522dde39f131356c95164bee00.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 1993-2011, NVIDIA Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "internal_shared.hpp"
#include "opencv2/gpu/device/saturate_cast.hpp"
#include "opencv2/gpu/device/vec_math.hpp"
#include "opencv2/gpu/device/limits.hpp"
#include "opencv2/gpu/device/border_interpolate.hpp"
#include "opencv2/gpu/device/static_check.hpp"
namespace cv { namespace gpu { namespace device
{
namespace row_filter
{
#define MAX_KERNEL_SIZE 32
__constant__ float c_kernel[MAX_KERNEL_SIZE];
void loadKernel(const float kernel[], int ksize)
{
cudaSafeCall( cudaMemcpyToSymbol(c_kernel, kernel, ksize * sizeof(float)) );
}
template <int KSIZE, typename T, typename D, typename B>
__global__ void linearRowFilter(const DevMem2D_<T> src, PtrStep<D> dst, const int anchor, const B brd)
{
#if __CUDA_ARCH__ >= 200
const int BLOCK_DIM_X = 32;
const int BLOCK_DIM_Y = 8;
const int PATCH_PER_BLOCK = 4;
const int HALO_SIZE = 1;
#else
const int BLOCK_DIM_X = 32;
const int BLOCK_DIM_Y = 4;
const int PATCH_PER_BLOCK = 4;
const int HALO_SIZE = 1;
#endif
typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type sum_t;
__shared__ sum_t smem[BLOCK_DIM_Y][(PATCH_PER_BLOCK + 2 * HALO_SIZE) * BLOCK_DIM_X];
const int y = blockIdx.y * BLOCK_DIM_Y + threadIdx.y;
if (y >= src.rows)
return;
const T* src_row = src.ptr(y);
const int xStart = blockIdx.x * (PATCH_PER_BLOCK * BLOCK_DIM_X) + threadIdx.x;
//Load left halo
#pragma unroll
for (int j = 0; j < HALO_SIZE; ++j)
smem[threadIdx.y][threadIdx.x + j * BLOCK_DIM_X] = saturate_cast<sum_t>(brd.at_low(xStart - (HALO_SIZE - j) * BLOCK_DIM_X, src_row));
//Load main data
#pragma unroll
for (int j = 0; j < PATCH_PER_BLOCK; ++j)
smem[threadIdx.y][threadIdx.x + HALO_SIZE * BLOCK_DIM_X + j * BLOCK_DIM_X] = saturate_cast<sum_t>(brd.at_high(xStart + j * BLOCK_DIM_X, src_row));
//Load right halo
#pragma unroll
for (int j = 0; j < HALO_SIZE; ++j)
smem[threadIdx.y][threadIdx.x + (PATCH_PER_BLOCK + HALO_SIZE) * BLOCK_DIM_X + j * BLOCK_DIM_X] = saturate_cast<sum_t>(brd.at_high(xStart + (PATCH_PER_BLOCK + j) * BLOCK_DIM_X, src_row));
__syncthreads();
#pragma unroll
for (int j = 0; j < PATCH_PER_BLOCK; ++j)
{
const int x = xStart + j * BLOCK_DIM_X;
if (x < src.cols)
{
sum_t sum = VecTraits<sum_t>::all(0);
#pragma unroll
for (int k = 0; k < KSIZE; ++k)
sum = sum + smem[threadIdx.y][threadIdx.x + HALO_SIZE * BLOCK_DIM_X + j * BLOCK_DIM_X - anchor + k] * c_kernel[k];
dst(y, x) = saturate_cast<D>(sum);
}
}
}
template <int KSIZE, typename T, typename D, template<typename> class B>
void linearRowFilter_caller(DevMem2D_<T> src, DevMem2D_<D> dst, int anchor, int cc, cudaStream_t stream)
{
int BLOCK_DIM_X;
int BLOCK_DIM_Y;
int PATCH_PER_BLOCK;
if (cc >= 20)
{
BLOCK_DIM_X = 32;
BLOCK_DIM_Y = 8;
PATCH_PER_BLOCK = 4;
}
else
{
BLOCK_DIM_X = 32;
BLOCK_DIM_Y = 4;
PATCH_PER_BLOCK = 4;
}
const dim3 block(BLOCK_DIM_X, BLOCK_DIM_Y);
const dim3 grid(divUp(src.cols, BLOCK_DIM_X * PATCH_PER_BLOCK), divUp(src.rows, BLOCK_DIM_Y));
B<T> brd(src.cols);
linearRowFilter<KSIZE, T, D><<<grid, block, 0, stream>>>(src, dst, anchor, brd);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <typename T, typename D>
void linearRowFilter_gpu(DevMem2Db src, DevMem2Db dst, const float kernel[], int ksize, int anchor, int brd_type, int cc, cudaStream_t stream)
{
typedef void (*caller_t)(DevMem2D_<T> src, DevMem2D_<D> dst, int anchor, int cc, cudaStream_t stream);
static const caller_t callers[5][33] =
{
{
0,
linearRowFilter_caller< 1, T, D, BrdRowReflect101>,
linearRowFilter_caller< 2, T, D, BrdRowReflect101>,
linearRowFilter_caller< 3, T, D, BrdRowReflect101>,
linearRowFilter_caller< 4, T, D, BrdRowReflect101>,
linearRowFilter_caller< 5, T, D, BrdRowReflect101>,
linearRowFilter_caller< 6, T, D, BrdRowReflect101>,
linearRowFilter_caller< 7, T, D, BrdRowReflect101>,
linearRowFilter_caller< 8, T, D, BrdRowReflect101>,
linearRowFilter_caller< 9, T, D, BrdRowReflect101>,
linearRowFilter_caller<10, T, D, BrdRowReflect101>,
linearRowFilter_caller<11, T, D, BrdRowReflect101>,
linearRowFilter_caller<12, T, D, BrdRowReflect101>,
linearRowFilter_caller<13, T, D, BrdRowReflect101>,
linearRowFilter_caller<14, T, D, BrdRowReflect101>,
linearRowFilter_caller<15, T, D, BrdRowReflect101>,
linearRowFilter_caller<16, T, D, BrdRowReflect101>,
linearRowFilter_caller<17, T, D, BrdRowReflect101>,
linearRowFilter_caller<18, T, D, BrdRowReflect101>,
linearRowFilter_caller<19, T, D, BrdRowReflect101>,
linearRowFilter_caller<20, T, D, BrdRowReflect101>,
linearRowFilter_caller<21, T, D, BrdRowReflect101>,
linearRowFilter_caller<22, T, D, BrdRowReflect101>,
linearRowFilter_caller<23, T, D, BrdRowReflect101>,
linearRowFilter_caller<24, T, D, BrdRowReflect101>,
linearRowFilter_caller<25, T, D, BrdRowReflect101>,
linearRowFilter_caller<26, T, D, BrdRowReflect101>,
linearRowFilter_caller<27, T, D, BrdRowReflect101>,
linearRowFilter_caller<28, T, D, BrdRowReflect101>,
linearRowFilter_caller<29, T, D, BrdRowReflect101>,
linearRowFilter_caller<30, T, D, BrdRowReflect101>,
linearRowFilter_caller<31, T, D, BrdRowReflect101>,
linearRowFilter_caller<32, T, D, BrdRowReflect101>
},
{
0,
linearRowFilter_caller< 1, T, D, BrdRowReplicate>,
linearRowFilter_caller< 2, T, D, BrdRowReplicate>,
linearRowFilter_caller< 3, T, D, BrdRowReplicate>,
linearRowFilter_caller< 4, T, D, BrdRowReplicate>,
linearRowFilter_caller< 5, T, D, BrdRowReplicate>,
linearRowFilter_caller< 6, T, D, BrdRowReplicate>,
linearRowFilter_caller< 7, T, D, BrdRowReplicate>,
linearRowFilter_caller< 8, T, D, BrdRowReplicate>,
linearRowFilter_caller< 9, T, D, BrdRowReplicate>,
linearRowFilter_caller<10, T, D, BrdRowReplicate>,
linearRowFilter_caller<11, T, D, BrdRowReplicate>,
linearRowFilter_caller<12, T, D, BrdRowReplicate>,
linearRowFilter_caller<13, T, D, BrdRowReplicate>,
linearRowFilter_caller<14, T, D, BrdRowReplicate>,
linearRowFilter_caller<15, T, D, BrdRowReplicate>,
linearRowFilter_caller<16, T, D, BrdRowReplicate>,
linearRowFilter_caller<17, T, D, BrdRowReplicate>,
linearRowFilter_caller<18, T, D, BrdRowReplicate>,
linearRowFilter_caller<19, T, D, BrdRowReplicate>,
linearRowFilter_caller<20, T, D, BrdRowReplicate>,
linearRowFilter_caller<21, T, D, BrdRowReplicate>,
linearRowFilter_caller<22, T, D, BrdRowReplicate>,
linearRowFilter_caller<23, T, D, BrdRowReplicate>,
linearRowFilter_caller<24, T, D, BrdRowReplicate>,
linearRowFilter_caller<25, T, D, BrdRowReplicate>,
linearRowFilter_caller<26, T, D, BrdRowReplicate>,
linearRowFilter_caller<27, T, D, BrdRowReplicate>,
linearRowFilter_caller<28, T, D, BrdRowReplicate>,
linearRowFilter_caller<29, T, D, BrdRowReplicate>,
linearRowFilter_caller<30, T, D, BrdRowReplicate>,
linearRowFilter_caller<31, T, D, BrdRowReplicate>,
linearRowFilter_caller<32, T, D, BrdRowReplicate>
},
{
0,
linearRowFilter_caller< 1, T, D, BrdRowConstant>,
linearRowFilter_caller< 2, T, D, BrdRowConstant>,
linearRowFilter_caller< 3, T, D, BrdRowConstant>,
linearRowFilter_caller< 4, T, D, BrdRowConstant>,
linearRowFilter_caller< 5, T, D, BrdRowConstant>,
linearRowFilter_caller< 6, T, D, BrdRowConstant>,
linearRowFilter_caller< 7, T, D, BrdRowConstant>,
linearRowFilter_caller< 8, T, D, BrdRowConstant>,
linearRowFilter_caller< 9, T, D, BrdRowConstant>,
linearRowFilter_caller<10, T, D, BrdRowConstant>,
linearRowFilter_caller<11, T, D, BrdRowConstant>,
linearRowFilter_caller<12, T, D, BrdRowConstant>,
linearRowFilter_caller<13, T, D, BrdRowConstant>,
linearRowFilter_caller<14, T, D, BrdRowConstant>,
linearRowFilter_caller<15, T, D, BrdRowConstant>,
linearRowFilter_caller<16, T, D, BrdRowConstant>,
linearRowFilter_caller<17, T, D, BrdRowConstant>,
linearRowFilter_caller<18, T, D, BrdRowConstant>,
linearRowFilter_caller<19, T, D, BrdRowConstant>,
linearRowFilter_caller<20, T, D, BrdRowConstant>,
linearRowFilter_caller<21, T, D, BrdRowConstant>,
linearRowFilter_caller<22, T, D, BrdRowConstant>,
linearRowFilter_caller<23, T, D, BrdRowConstant>,
linearRowFilter_caller<24, T, D, BrdRowConstant>,
linearRowFilter_caller<25, T, D, BrdRowConstant>,
linearRowFilter_caller<26, T, D, BrdRowConstant>,
linearRowFilter_caller<27, T, D, BrdRowConstant>,
linearRowFilter_caller<28, T, D, BrdRowConstant>,
linearRowFilter_caller<29, T, D, BrdRowConstant>,
linearRowFilter_caller<30, T, D, BrdRowConstant>,
linearRowFilter_caller<31, T, D, BrdRowConstant>,
linearRowFilter_caller<32, T, D, BrdRowConstant>
},
{
0,
linearRowFilter_caller< 1, T, D, BrdRowReflect>,
linearRowFilter_caller< 2, T, D, BrdRowReflect>,
linearRowFilter_caller< 3, T, D, BrdRowReflect>,
linearRowFilter_caller< 4, T, D, BrdRowReflect>,
linearRowFilter_caller< 5, T, D, BrdRowReflect>,
linearRowFilter_caller< 6, T, D, BrdRowReflect>,
linearRowFilter_caller< 7, T, D, BrdRowReflect>,
linearRowFilter_caller< 8, T, D, BrdRowReflect>,
linearRowFilter_caller< 9, T, D, BrdRowReflect>,
linearRowFilter_caller<10, T, D, BrdRowReflect>,
linearRowFilter_caller<11, T, D, BrdRowReflect>,
linearRowFilter_caller<12, T, D, BrdRowReflect>,
linearRowFilter_caller<13, T, D, BrdRowReflect>,
linearRowFilter_caller<14, T, D, BrdRowReflect>,
linearRowFilter_caller<15, T, D, BrdRowReflect>,
linearRowFilter_caller<16, T, D, BrdRowReflect>,
linearRowFilter_caller<17, T, D, BrdRowReflect>,
linearRowFilter_caller<18, T, D, BrdRowReflect>,
linearRowFilter_caller<19, T, D, BrdRowReflect>,
linearRowFilter_caller<20, T, D, BrdRowReflect>,
linearRowFilter_caller<21, T, D, BrdRowReflect>,
linearRowFilter_caller<22, T, D, BrdRowReflect>,
linearRowFilter_caller<23, T, D, BrdRowReflect>,
linearRowFilter_caller<24, T, D, BrdRowReflect>,
linearRowFilter_caller<25, T, D, BrdRowReflect>,
linearRowFilter_caller<26, T, D, BrdRowReflect>,
linearRowFilter_caller<27, T, D, BrdRowReflect>,
linearRowFilter_caller<28, T, D, BrdRowReflect>,
linearRowFilter_caller<29, T, D, BrdRowReflect>,
linearRowFilter_caller<30, T, D, BrdRowReflect>,
linearRowFilter_caller<31, T, D, BrdRowReflect>,
linearRowFilter_caller<32, T, D, BrdRowReflect>
},
{
0,
linearRowFilter_caller< 1, T, D, BrdRowWrap>,
linearRowFilter_caller< 2, T, D, BrdRowWrap>,
linearRowFilter_caller< 3, T, D, BrdRowWrap>,
linearRowFilter_caller< 4, T, D, BrdRowWrap>,
linearRowFilter_caller< 5, T, D, BrdRowWrap>,
linearRowFilter_caller< 6, T, D, BrdRowWrap>,
linearRowFilter_caller< 7, T, D, BrdRowWrap>,
linearRowFilter_caller< 8, T, D, BrdRowWrap>,
linearRowFilter_caller< 9, T, D, BrdRowWrap>,
linearRowFilter_caller<10, T, D, BrdRowWrap>,
linearRowFilter_caller<11, T, D, BrdRowWrap>,
linearRowFilter_caller<12, T, D, BrdRowWrap>,
linearRowFilter_caller<13, T, D, BrdRowWrap>,
linearRowFilter_caller<14, T, D, BrdRowWrap>,
linearRowFilter_caller<15, T, D, BrdRowWrap>,
linearRowFilter_caller<16, T, D, BrdRowWrap>,
linearRowFilter_caller<17, T, D, BrdRowWrap>,
linearRowFilter_caller<18, T, D, BrdRowWrap>,
linearRowFilter_caller<19, T, D, BrdRowWrap>,
linearRowFilter_caller<20, T, D, BrdRowWrap>,
linearRowFilter_caller<21, T, D, BrdRowWrap>,
linearRowFilter_caller<22, T, D, BrdRowWrap>,
linearRowFilter_caller<23, T, D, BrdRowWrap>,
linearRowFilter_caller<24, T, D, BrdRowWrap>,
linearRowFilter_caller<25, T, D, BrdRowWrap>,
linearRowFilter_caller<26, T, D, BrdRowWrap>,
linearRowFilter_caller<27, T, D, BrdRowWrap>,
linearRowFilter_caller<28, T, D, BrdRowWrap>,
linearRowFilter_caller<29, T, D, BrdRowWrap>,
linearRowFilter_caller<30, T, D, BrdRowWrap>,
linearRowFilter_caller<31, T, D, BrdRowWrap>,
linearRowFilter_caller<32, T, D, BrdRowWrap>
}
};
loadKernel(kernel, ksize);
callers[brd_type][ksize]((DevMem2D_<T>)src, (DevMem2D_<D>)dst, anchor, cc, stream);
}
template void linearRowFilter_gpu<uchar , float >(DevMem2Db src, DevMem2Db dst, const float kernel[], int ksize, int anchor, int brd_type, int cc, cudaStream_t stream);
template void linearRowFilter_gpu<uchar4, float4>(DevMem2Db src, DevMem2Db dst, const float kernel[], int ksize, int anchor, int brd_type, int cc, cudaStream_t stream);
template void linearRowFilter_gpu<short3, float3>(DevMem2Db src, DevMem2Db dst, const float kernel[], int ksize, int anchor, int brd_type, int cc, cudaStream_t stream);
template void linearRowFilter_gpu<int , float >(DevMem2Db src, DevMem2Db dst, const float kernel[], int ksize, int anchor, int brd_type, int cc, cudaStream_t stream);
template void linearRowFilter_gpu<float , float >(DevMem2Db src, DevMem2Db dst, const float kernel[], int ksize, int anchor, int brd_type, int cc, cudaStream_t stream);
} // namespace row_filter
}}} // namespace cv { namespace gpu { namespace device
|
c501658d7dfdaae6f8dd6a88249469066f4ca7de.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kmeans_parallel.cuh"
#include "announce.hh"
void KMeans::main(DataPoint* const centroids, DataPoint* const data) {
cudaAssert (
hipHostRegister(data, DataSize*sizeof(DataPoint), hipHostRegisterPortable)
);
cudaAssert (
hipHostRegister(centroids, KSize*sizeof(DataPoint), hipHostRegisterPortable)
);
auto newCentroids = new DataPoint[KSize];
bool* isSame = new bool(true);
cudaAssert (
hipHostRegister(newCentroids, KSize*sizeof(DataPoint), hipHostRegisterPortable)
);
cudaAssert (
hipHostRegister(isSame, sizeof(bool), hipHostRegisterPortable)
);
//study(deviceQuery());
int numThread_labeling = 4; /*TODO get from study*/
int numBlock_labeling = ceil((float)DataSize / numThread_labeling);
int threashold = 3; //
while(threashold-- > 0) {
hipLaunchKernelGGL(( KMeans::labeling), dim3(numBlock_labeling), dim3(numThread_labeling), 0, 0, centroids, data);
//hipDeviceSynchronize();
//announce.Labels(data);
hipLaunchKernelGGL(( resetNewCentroids), dim3(KSize),dim3(FeatSize), 0, 0, newCentroids);
hipLaunchKernelGGL(( KMeans::updateCentroidAccum), dim3(numBlock_labeling),dim3(numThread_labeling), 0, 0, newCentroids, data);
hipLaunchKernelGGL(( KMeans::updateCentroidDivide), dim3(KSize), dim3(FeatSize), 0, 0, newCentroids);
hipLaunchKernelGGL(( KMeans::checkIsSame), dim3(KSize), dim3(FeatSize), 0, 0, isSame, centroids, newCentroids);
//hipDeviceSynchronize();
//if(isSame)
//break;
hipLaunchKernelGGL(( memcpyCentroid), dim3(KSize),dim3(FeatSize), 0, 0, centroids, newCentroids);
}
hipDeviceSynchronize();
cudaAssert( hipPeekAtLastError());
announce.Labels(data);
cudaAssert( hipHostUnregister(data) );
cudaAssert( hipHostUnregister(centroids) );
cudaAssert( hipHostUnregister(newCentroids) );
cudaAssert( hipHostUnregister(isSame) );
delete[] newCentroids;
delete isSame;
}
/// labeling ////////////////////////////////////////////////////////////////////////////////////
__global__
void KMeans::labeling(const DataPoint* const centroids, DataPoint* const data) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= DataSize)
return;
const DataPoint* centroidPtr = centroids;
DataPoint threadData = data[idx];
Label_T minDistLabel = 0;
Data_T minDistSQR = MaxDataValue;
for(int i=0; i!=KSize; ++i) {
Data_T currDistSQR = Labeling::euclideanDistSQR(threadData.value, centroidPtr->value);
if(minDistSQR > currDistSQR) {
minDistLabel = i;
minDistSQR = currDistSQR;
}
centroidPtr++;
}
data[idx].label = minDistLabel;
}
__device__
Data_T KMeans::Labeling::euclideanDistSQR (const Data_T* const lhs, const Data_T* const rhs) {
const Data_T* valuePtrLHS = lhs;
const Data_T* valuePtrRHS = rhs;
Data_T distSQR = 0;
for(int featIdx=0; featIdx!=FeatSize; ++featIdx) {
Data_T dist = *valuePtrLHS - *valuePtrRHS;
distSQR += dist*dist;
valuePtrLHS++;
valuePtrRHS++;
}
return distSQR;
}
/// update centroids //////////////////////////////////////////////////////////////////////////////
__global__
void KMeans::updateCentroidAccum(DataPoint* const centroids, const DataPoint* const data) {
const int dataIdx = blockIdx.x * blockDim.x + threadIdx.x;
if(dataIdx >= DataSize)
return;
const int centroidIdx = data[dataIdx].label;
atomicAdd(&(centroids[centroidIdx].label), 1); // newCentroids labelSize 0
Update::addValuesLtoR(data[dataIdx].value, centroids[centroidIdx].value);
}
__global__
void KMeans::updateCentroidDivide(DataPoint* const centroids) {
centroids[blockIdx.x].value[threadIdx.x] /= centroids[blockIdx.x].label;
}
__device__
void KMeans::Update::addValuesLtoR(const Data_T* const lhs, Data_T* const rhs) {
const Data_T* lhsPtr = lhs;
Data_T* rhsPtr = rhs;
for(int featIdx=0; featIdx!=FeatSize; ++featIdx)
atomicAdd(rhsPtr++, *(lhsPtr++));
}
void study(const std::vector<DeviceQuery>& devices) {
/*
* According to the CUDA C Best Practice Guide.
* 1. Thread per block should be a multiple of 32(warp size)
* 2. A minimum of 64 threads per block should be used.
* 3. Between 128 and 256 thread per block is a better choice
* 4. Use several(3 to 4) small thread blocks rather than one large thread block
*/
/*
* sizeof DataPoint
* = 4(float) * 200(feature size) + 4(label, int)
* = 804 byte
* =>register memory per thread
* = 832 byte { 804 + 8(pointer) + 8(two int) + 8(size_t) + 4(Data_T) }
* =>register count per thread
* = 832/4 = 208
*
* sizeof Centroid
* = DataPoint x 10
* = 8040 byte
*
* memory per block (* NOT SHARED MEMORY *)
* = 804 * 64
* = 51456 byte
*
* total global memory size = 8112 MBytes
* number of registers per block = 65536
*/
Count_T numRegisterPerKernel_labeling = 208;
MemSize_L sizeDataPoint = sizeof(DataPoint);
MemSize_L sizeCentroids = sizeDataPoint * KSize;
for(auto device : devices) {
assert(sizeCentroids < device.totalConstMem);
std::cout << "Device["<<device.index<<"]" << std::endl;
Count_T maxThreadsPerBlock = device.numRegPerBlock / numRegisterPerKernel_labeling;
std::cout <<"max threads per block(labeling) : " << maxThreadsPerBlock << std::endl;
std::cout <<"max threads per block(update) : " << maxThreadsPerBlock << std::endl;
std::cout <<"max threads per block(check) : " << maxThreadsPerBlock << std::endl;
std::cout << device.numRegPerBlock / 208.0 << std::endl;
std::cout << device.threadsPerBlock << std::endl;
std::cout << device.threadsPerMultiprocesser << std::endl;
}
} | c501658d7dfdaae6f8dd6a88249469066f4ca7de.cu | #include "kmeans_parallel.cuh"
#include "announce.hh"
void KMeans::main(DataPoint* const centroids, DataPoint* const data) {
cudaAssert (
cudaHostRegister(data, DataSize*sizeof(DataPoint), cudaHostRegisterPortable)
);
cudaAssert (
cudaHostRegister(centroids, KSize*sizeof(DataPoint), cudaHostRegisterPortable)
);
auto newCentroids = new DataPoint[KSize];
bool* isSame = new bool(true);
cudaAssert (
cudaHostRegister(newCentroids, KSize*sizeof(DataPoint), cudaHostRegisterPortable)
);
cudaAssert (
cudaHostRegister(isSame, sizeof(bool), cudaHostRegisterPortable)
);
//study(deviceQuery());
int numThread_labeling = 4; /*TODO get from study*/
int numBlock_labeling = ceil((float)DataSize / numThread_labeling);
int threashold = 3; //
while(threashold-- > 0) {
KMeans::labeling<<<numBlock_labeling, numThread_labeling>>>(centroids, data);
//cudaDeviceSynchronize();
//announce.Labels(data);
resetNewCentroids<<<KSize,FeatSize>>>(newCentroids);
KMeans::updateCentroidAccum<<<numBlock_labeling,numThread_labeling>>>(newCentroids, data);
KMeans::updateCentroidDivide<<<KSize, FeatSize>>>(newCentroids);
KMeans::checkIsSame<<<KSize, FeatSize>>>(isSame, centroids, newCentroids);
//cudaDeviceSynchronize();
//if(isSame)
//break;
memcpyCentroid<<<KSize,FeatSize>>>(centroids, newCentroids);
}
cudaDeviceSynchronize();
cudaAssert( cudaPeekAtLastError());
announce.Labels(data);
cudaAssert( cudaHostUnregister(data) );
cudaAssert( cudaHostUnregister(centroids) );
cudaAssert( cudaHostUnregister(newCentroids) );
cudaAssert( cudaHostUnregister(isSame) );
delete[] newCentroids;
delete isSame;
}
/// labeling ////////////////////////////////////////////////////////////////////////////////////
__global__
void KMeans::labeling(const DataPoint* const centroids, DataPoint* const data) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= DataSize)
return;
const DataPoint* centroidPtr = centroids;
DataPoint threadData = data[idx];
Label_T minDistLabel = 0;
Data_T minDistSQR = MaxDataValue;
for(int i=0; i!=KSize; ++i) {
Data_T currDistSQR = Labeling::euclideanDistSQR(threadData.value, centroidPtr->value);
if(minDistSQR > currDistSQR) {
minDistLabel = i;
minDistSQR = currDistSQR;
}
centroidPtr++;
}
data[idx].label = minDistLabel;
}
__device__
Data_T KMeans::Labeling::euclideanDistSQR (const Data_T* const lhs, const Data_T* const rhs) {
const Data_T* valuePtrLHS = lhs;
const Data_T* valuePtrRHS = rhs;
Data_T distSQR = 0;
for(int featIdx=0; featIdx!=FeatSize; ++featIdx) {
Data_T dist = *valuePtrLHS - *valuePtrRHS;
distSQR += dist*dist;
valuePtrLHS++;
valuePtrRHS++;
}
return distSQR;
}
/// update centroids //////////////////////////////////////////////////////////////////////////////
__global__
void KMeans::updateCentroidAccum(DataPoint* const centroids, const DataPoint* const data) {
const int dataIdx = blockIdx.x * blockDim.x + threadIdx.x;
if(dataIdx >= DataSize)
return;
const int centroidIdx = data[dataIdx].label;
atomicAdd(&(centroids[centroidIdx].label), 1); // newCentroids는 labelSize를 나타내기 위해 0으로 초기화됨
Update::addValuesLtoR(data[dataIdx].value, centroids[centroidIdx].value);
}
__global__
void KMeans::updateCentroidDivide(DataPoint* const centroids) {
centroids[blockIdx.x].value[threadIdx.x] /= centroids[blockIdx.x].label;
}
__device__
void KMeans::Update::addValuesLtoR(const Data_T* const lhs, Data_T* const rhs) {
const Data_T* lhsPtr = lhs;
Data_T* rhsPtr = rhs;
for(int featIdx=0; featIdx!=FeatSize; ++featIdx)
atomicAdd(rhsPtr++, *(lhsPtr++));
}
void study(const std::vector<DeviceQuery>& devices) {
/*
* According to the CUDA C Best Practice Guide.
* 1. Thread per block should be a multiple of 32(warp size)
* 2. A minimum of 64 threads per block should be used.
* 3. Between 128 and 256 thread per block is a better choice
* 4. Use several(3 to 4) small thread blocks rather than one large thread block
*/
/*
* sizeof DataPoint
* = 4(float) * 200(feature size) + 4(label, int)
* = 804 byte
* =>register memory per thread
* = 832 byte { 804 + 8(pointer) + 8(two int) + 8(size_t) + 4(Data_T) }
* =>register count per thread
* = 832/4 = 208
*
* sizeof Centroid
* = DataPoint x 10
* = 8040 byte
*
* memory per block (* NOT SHARED MEMORY *)
* = 804 * 64
* = 51456 byte
*
* total global memory size = 8112 MBytes
* number of registers per block = 65536
*/
Count_T numRegisterPerKernel_labeling = 208;
MemSize_L sizeDataPoint = sizeof(DataPoint);
MemSize_L sizeCentroids = sizeDataPoint * KSize;
for(auto device : devices) {
assert(sizeCentroids < device.totalConstMem);
std::cout << "Device["<<device.index<<"]" << std::endl;
Count_T maxThreadsPerBlock = device.numRegPerBlock / numRegisterPerKernel_labeling;
std::cout <<"max threads per block(labeling) : " << maxThreadsPerBlock << std::endl;
std::cout <<"max threads per block(update) : " << maxThreadsPerBlock << std::endl;
std::cout <<"max threads per block(check) : " << maxThreadsPerBlock << std::endl;
std::cout << device.numRegPerBlock / 208.0 << std::endl;
std::cout << device.threadsPerBlock << std::endl;
std::cout << device.threadsPerMultiprocesser << std::endl;
}
} |
f3900f5c3083febe12eb2a7c2ef886d7e74ac124.hip | // !!! This is a file automatically generated by hipify!!!
/*******************************************************************************
********************************* BLUEBOTTLE **********************************
*******************************************************************************
*
* Copyright 2012 - 2018 Adam Sierakowski and Daniel Willen,
* The Johns Hopkins University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Please contact the Johns Hopkins University to use Bluebottle for
* commercial and/or for-profit applications.
******************************************************************************/
#include <hip/hip_runtime.h>
#include <thrust/sort.h>
#include "cuda_particle.h"
#include <helper_cuda.h>
#include <thrust/scan.h>
#include <thrust/device_ptr.h>
__constant__ bin_struct _bins;
real *_force_send_e;
real *_force_send_w;
real *_force_send_n;
real *_force_send_s;
real *_force_send_t;
real *_force_send_b;
real *_force_recv_e;
real *_force_recv_w;
real *_force_recv_n;
real *_force_recv_s;
real *_force_recv_t;
real *_force_recv_b;
extern "C"
void cuda_part_malloc_host(void)
{
// Flags in cuda_bluebottle.cu:cuda_dom_malloc_host since they are needed even
// without particles
checkCudaErrors(hipHostMalloc(&phase, dom[rank].Gcc.s3b * sizeof(int)));
cpumem += dom[rank].Gcc.s3b * sizeof(int);
checkCudaErrors(hipHostMalloc(&phase_shell, dom[rank].Gcc.s3b * sizeof(int)));
cpumem += dom[rank].Gcc.s3b * sizeof(int);
}
extern "C"
void cuda_part_malloc_dev(void)
{
//printf("N%d >> Allocating device particle memory...\n", rank);
// Flags in cuda_bluebottle.cu:cuda_dom_malloc_dev since they are needed even
// without particles
// Phase
checkCudaErrors(hipMalloc(&_phase, dom[rank].Gcc.s3b * sizeof(int)));
gpumem += dom[rank].Gcc.s3b * sizeof(int);
checkCudaErrors(hipMalloc(&_phase_shell, dom[rank].Gcc.s3b * sizeof(int)));
gpumem += dom[rank].Gcc.s3b * sizeof(int);
// Allocate device variables
if (NPARTS > 0) {
checkCudaErrors(hipMalloc(&_parts, nparts * sizeof(part_struct)));
cpumem += nparts * sizeof(part_struct);
checkCudaErrors(hipMemcpyToSymbol(_bins, &bins, sizeof(bin_struct)));
checkCudaErrors(hipMalloc(&_bin_start, bins.Gcc.s3b * sizeof(int)));
gpumem += bins.Gcc.s3b * sizeof(int);
checkCudaErrors(hipMalloc(&_bin_end, bins.Gcc.s3b * sizeof(int)));
gpumem += bins.Gcc.s3b * sizeof(int);
checkCudaErrors(hipMalloc(&_bin_count, bins.Gcc.s3b * sizeof(int)));
gpumem += bins.Gcc.s3b * sizeof(int);
}
/* These arrays are allocated/free'd in their functions, but listed here for
* reference
* _part_ind
* _part_bin
* _send_parts_{e,w}
* _recv_parts_{e,w}
*/
/* For pointers to pointers, if we need to go back... */
// https://stackoverflow.com/questions/26111794/how-to-use-pointer-to-pointer-
// in-cuda
// https://stackoverflow.com/questions/15113960/cuda-allocating-array-of-
// pointers-to-images-and-the-images
// https://stackoverflow.com/questions/23609770/cuda-double-pointer-memory-copy
// -->https://stackoverflow.com/questions/27931630/copying-array-of-pointers-
// into-device-memory-and-back-cuda
}
extern "C"
void cuda_part_push(void)
{
if (NPARTS > 0) {
checkCudaErrors(hipMemcpy(_parts, parts, nparts * sizeof(part_struct),
hipMemcpyHostToDevice));
}
checkCudaErrors(hipMemcpy(_phase, phase, dom[rank].Gcc.s3b * sizeof(int),
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_phase_shell, phase_shell, dom[rank].Gcc.s3b * sizeof(int),
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_flag_u, flag_u, dom[rank].Gfx.s3b * sizeof(int),
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_flag_v, flag_v, dom[rank].Gfy.s3b * sizeof(int),
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_flag_w, flag_w, dom[rank].Gfz.s3b * sizeof(int),
hipMemcpyHostToDevice));
}
extern "C"
void cuda_part_pull(void)
{
/* Declare temporary part structure and nparts_subdom */
part_struct *_tmp_parts;
nparts_subdom = 0;
/* Re-allocate memory */
checkCudaErrors(hipMalloc(&_part_ind, nparts * sizeof(int)));
checkCudaErrors(hipMalloc(&_part_bin, nparts * sizeof(int)));
thrust::device_ptr<int> t_part_ind(_part_ind);
thrust::device_ptr<int> t_part_bin(_part_bin);
checkCudaErrors(hipMemset(_bin_start, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(hipMemset(_bin_end, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(hipMemset(_bin_count, 0, bins.Gcc.s3b * sizeof(int)));
thrust::device_ptr<int> t_bin_count(_bin_count);
if (nparts > 0) {
// Thread over nparts
int t_nparts = nparts * (nparts < MAX_THREADS_1D)
+ MAX_THREADS_1D * (nparts >= MAX_THREADS_1D);
int b_nparts = (int) ceil((real) nparts / (real) t_nparts);
dim3 dim_nparts(t_nparts);
dim3 num_nparts(b_nparts);
// thread over top/bottom faces
int tx = bins.Gcc.inb * (bins.Gcc.inb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.inb >= MAX_THREADS_DIM);
int ty = bins.Gcc.jnb * (bins.Gcc.jnb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.jnb >= MAX_THREADS_DIM);
int tz = bins.Gcc.knb * (bins.Gcc.knb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.knb >= MAX_THREADS_DIM);
int bx = (int) ceil((real) bins.Gcc.inb / (real) tx);
int by = (int) ceil((real) bins.Gcc.jnb / (real) ty);
int bz = (int) ceil((real) bins.Gcc.knb / (real) tz);
dim3 bin_num_inb(by, bz);
dim3 bin_dim_inb(ty, tz);
dim3 bin_num_jnb(bz, bx);
dim3 bin_dim_jnb(tz, tx);
dim3 bin_num_knb(bx, by);
dim3 bin_dim_knb(tx, ty);
/* Find each particle's bin */
hipLaunchKernelGGL(( bin_fill_k), dim3(num_nparts), dim3(dim_nparts), 0, 0, _part_ind, _part_bin, _parts, nparts,
_DOM);
/* Sort _part_ind by _part_bin (sort key by value) */
thrust::sort_by_key(t_part_bin, t_part_bin + nparts, t_part_ind);
/* Find start and ending index of each bin */
int smem_size = (nparts + 1) * sizeof(int);
hipLaunchKernelGGL(( find_bin_start_end), dim3(b_nparts), dim3(t_nparts), smem_size, 0, _bin_start, _bin_end,
_part_bin, nparts);
/* Find number of particles in each bin */
hipLaunchKernelGGL(( count_bin_parts_k), dim3(bin_num_knb), dim3(bin_dim_knb), 0, 0, _bin_start, _bin_end,
_bin_count);
/* Set ghost bin count to zero (GFZ indexed) */
hipLaunchKernelGGL(( zero_ghost_bins_i), dim3(bin_num_inb), dim3(bin_dim_inb), 0, 0, _bin_count);
hipLaunchKernelGGL(( zero_ghost_bins_j), dim3(bin_num_jnb), dim3(bin_dim_jnb), 0, 0, _bin_count);
hipLaunchKernelGGL(( zero_ghost_bins_k), dim3(bin_num_knb), dim3(bin_dim_knb), 0, 0, _bin_count);
/* Allocate memory to find bin offset target indices in tmp part_struct */
int *_bin_offset;
checkCudaErrors(hipMalloc(&_bin_offset, bins.Gcc.s3b * sizeof(int)));
/* Prefix scan _bin_count to find target indices in tmp part_struct */
thrust::device_ptr<int> t_bin_count(_bin_count);
thrust::device_ptr<int> t_bin_offset(_bin_offset);
thrust::exclusive_scan(t_bin_count, t_bin_count + bins.Gcc.s3b, t_bin_offset);
/* Reduce bin_count to find nparts in subdomain (ghost bins are zero'd) */
nparts_subdom = thrust::reduce(t_bin_count, t_bin_count + bins.Gcc.s3b,
0., thrust::plus<int>());
/* Allocate new device part struct (no ghost particles) */
checkCudaErrors(hipMalloc(&_tmp_parts, nparts_subdom * sizeof(part_struct)));
/* Copy subdom parts to tmp part_struct (only in subdom, so [in, jn]) */
// thread over inner bins (no ghost bins)
tx = bins.Gcc.in * (bins.Gcc.in < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.in >= MAX_THREADS_DIM);
ty = bins.Gcc.jn * (bins.Gcc.jn < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.jn >= MAX_THREADS_DIM);
bx = (int) ceil((real) bins.Gcc.in / (real) tx);
by = (int) ceil((real) bins.Gcc.jn / (real) ty);
dim3 bin_num_kn(bx, by);
dim3 bin_dim_kn(tx, ty);
hipLaunchKernelGGL(( copy_subdom_parts), dim3(bin_num_kn), dim3(bin_dim_kn), 0, 0, _tmp_parts, _parts,
_bin_start, _bin_count, _part_ind, _bin_offset);
hipFree(_bin_offset);
} else { // nparts <= 0
checkCudaErrors(hipMemset(_part_ind, -1, nparts * sizeof(int)));
checkCudaErrors(hipMemset(_part_bin, -1, nparts * sizeof(int)));
nparts_subdom = 0;
checkCudaErrors(hipMalloc(&_tmp_parts, nparts_subdom * sizeof(part_struct)));
}
/* Allocate new host parts with nparts in subdom */
free(parts);
parts = (part_struct*) malloc(nparts_subdom * sizeof(part_struct));
// Pull from device
checkCudaErrors(hipMemcpy(parts, _tmp_parts, nparts_subdom * sizeof(part_struct),
hipMemcpyDeviceToHost));
// Free
hipFree(_tmp_parts);
hipFree(_part_ind);
hipFree(_part_bin);
// Double check the number of particles is correct
int reduce_parts = 0;
MPI_Allreduce(&nparts_subdom, &reduce_parts, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
if (reduce_parts != NPARTS) {
printf("N%d >> Something went wrong. NPARTS = %d, but %d exist\n",
rank, NPARTS, reduce_parts);
printf("N%d >> Has %d parts\n", rank, nparts_subdom);
exit(EXIT_FAILURE);
}
}
extern "C"
void cuda_part_pull_debug(void)
{
// Pull ALL particles, including ghosts
// Allocate new host part_struct with new nparts
free(parts);
parts = (part_struct*) malloc(nparts * sizeof(part_struct));
// Pull all particles from device
checkCudaErrors(hipMemcpy(parts, _parts, nparts * sizeof(part_struct),
hipMemcpyDeviceToHost));
}
extern "C"
void cuda_part_free(void)
{
//printf("N%d >> Freeing device particle memory...\n", rank);
// Flags in cuda_dom_free
checkCudaErrors(hipHostFree(phase));
checkCudaErrors(hipHostFree(phase_shell));
checkCudaErrors(hipFree(_phase));
checkCudaErrors(hipFree(_phase_shell));
if (NPARTS > 0) {
checkCudaErrors(hipFree(_parts));
checkCudaErrors(hipFree(_bin_start));
checkCudaErrors(hipFree(_bin_end));
checkCudaErrors(hipFree(_bin_count));
}
}
extern "C"
void cuda_transfer_parts_i(void)
{
//printf("N%d >> Transfering parts in i, nparts = %d\n", rank, nparts);
/* Transfer particles east and west
* * Bin the particles, indexing with `i` varying slowest
* * Sort particles by their bin
* * Find start and end of each bin's particles
* * Find number of particles in each bin
* * Find number of particles in _is & _ie planes. These need to be sent W/E
* * Communicate these number east and west. Each process now knows how many
* to send and recv
* * Allocate memory for particle send and recv
* * Copy particles into sending arrays. Each bin can find the offset target
* index for its particles by performing a prefix scan.
* * Communicate particles east and west, send -> recv
* * Recv'd parts exist in the ghost bins and replace whatever existed there
* at the last time step. Sum the particles in _isb & _ieb and subtract
* from nparts. This, plus the number of particle recv'd from E/W, is the
* number of new particles
* * Allocate temp part structure to hold all new particles.
* * Reduce bin_count from _is->_ie to find nparts that we will keep
* * Prefix scan from _ie -> _ie to find offset index for particle copy to
* temp struct
* * Backfill recv'd particles to the end of the temp array
* * Repeat process for j, k to take care of edge, corner. Indexing will be
* different to take advantage of memory coalescence and the prefix scan
* two steps back
*/
/* NOTE
* cuda-memcheck occasionally produces the error "bulk_kernel_by_value: an
* illegal memory address was encountered" error on a (thrust) call to
* hipDeviceSynchronize. This doesn't seem to be reliably reproducible
* (occurs on any of the several thrust calls in this function). This does
* not seem to affect the results in any way, but should be further
* investigated. See bug id 008.
*/
/* Init execution config -- thread over east/west faces */
int ty = bins.Gcc.jnb * (bins.Gcc.jnb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.jnb >= MAX_THREADS_DIM);
int tz = bins.Gcc.knb * (bins.Gcc.knb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.knb >= MAX_THREADS_DIM);
int by = (int) ceil((real) bins.Gcc.jnb / (real) ty);
int bz = (int) ceil((real) bins.Gcc.knb / (real) tz);
dim3 bin_num_inb(by, bz);
dim3 bin_dim_inb(ty, tz);
// Thread over nparts
int t_nparts = nparts * (nparts < MAX_THREADS_1D)
+ MAX_THREADS_1D * (nparts >= MAX_THREADS_1D);
int b_nparts = (int) ceil((real) nparts / (real) t_nparts);
dim3 dim_nparts(t_nparts);
dim3 num_nparts(b_nparts);
/* Declare things we might need */
int s1b, s2b; // custom strides
int offset;
/* Allocate memory */
// These are realloc'd every time
checkCudaErrors(hipMalloc(&_part_ind, nparts * sizeof(int)));
checkCudaErrors(hipMalloc(&_part_bin, nparts * sizeof(int)));
thrust::device_ptr<int> t_part_ind(_part_ind);
thrust::device_ptr<int> t_part_bin(_part_bin);
int *_offset_e;
int *_offset_w;
checkCudaErrors(hipMalloc(&_offset_e, bins.Gcc.s2b_i * sizeof(int)));
checkCudaErrors(hipMalloc(&_offset_w, bins.Gcc.s2b_i * sizeof(int)));
thrust::device_ptr<int> t_offset_e(_offset_e);
thrust::device_ptr<int> t_offset_w(_offset_w);
checkCudaErrors(hipMemset(_bin_start, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(hipMemset(_bin_end, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(hipMemset(_bin_count, 0, bins.Gcc.s3b * sizeof(int)));
thrust::device_ptr<int> t_bin_count(_bin_count);
// If we have parts...
if (nparts > 0) {
/* Find each particle's bin */
hipLaunchKernelGGL(( bin_fill_i), dim3(num_nparts), dim3(dim_nparts), 0, 0, _part_ind, _part_bin, _parts, nparts,
_DOM);
/* Sort _part_ind by _part_bin (sort key by value) */
if (nparts > 1) {
thrust::sort_by_key(t_part_bin, t_part_bin + nparts, t_part_ind);
}
/* Find start and ending index of each bin */
int smem_size = (nparts + 1) * sizeof(int);
hipLaunchKernelGGL(( find_bin_start_end), dim3(b_nparts), dim3(t_nparts), smem_size, 0, _bin_start, _bin_end,
_part_bin, nparts);
/* Find number of particles in each bin */
hipLaunchKernelGGL(( count_bin_parts_i), dim3(bin_num_inb), dim3(bin_dim_inb), 0, 0, _bin_start, _bin_end,
_bin_count);
/* Find number of particles to send, and packing offsets */
s1b = bins.Gcc.jnb;
s2b = s1b * bins.Gcc.knb;
// East
offset = GFX_LOC(bins.Gcc._ie, 0, 0, s1b, s2b);
if (dom[rank].e != MPI_PROC_NULL) {
// _bin_count is indexed with i varying slowest -- can do a reduction
// directly from _bin_count, given the offset of the start of the _ie plane
nparts_send[EAST] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_i,
0., thrust::plus<int>());
/* Determine packing offsets with an excl prefix scan */
if (nparts_send[EAST] > 0) {
thrust::exclusive_scan(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_i, t_offset_e);
} else {
hipMemset(_offset_e, 0., bins.Gcc.s2b_i * sizeof(int));
}
} else { // no parts to send
nparts_send[EAST] = 0;
hipMemset(_offset_e, 0., bins.Gcc.s2b_i * sizeof(int));
}
// West
offset = GFX_LOC(bins.Gcc._is, 0, 0, s1b, s2b);
if (dom[rank].w != MPI_PROC_NULL) {
nparts_send[WEST] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_i,
0., thrust::plus<int>());
if (nparts_send[WEST] > 0) {
thrust::exclusive_scan(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_i, t_offset_w);
} else {
hipMemset(_offset_w, 0., bins.Gcc.s2b_i * sizeof(int));
}
} else {
nparts_send[WEST] = 0;
hipMemset(_offset_w, 0., bins.Gcc.s2b_i * sizeof(int));
}
} else { // nparts <= 0
checkCudaErrors(hipMemset(_part_ind, -1, nparts * sizeof(int)));
checkCudaErrors(hipMemset(_part_bin, -1, nparts * sizeof(int)));
nparts_send[EAST] = 0;
nparts_send[WEST] = 0;
hipMemset(_offset_e, 0., bins.Gcc.s2b_i * sizeof(int));
hipMemset(_offset_w, 0., bins.Gcc.s2b_i * sizeof(int));
}
/* Send number of parts to east/west */
// origin target
// nparts_send[WEST] -> nparts_recv[EAST]
// nparts_recv[WEST] <- nparts_send[EAST]
nparts_recv[WEST] = 0; // init
nparts_recv[EAST] = 0;
mpi_send_nparts_i();
/* Allocate memory for send and receiving particles */
// NOTE: If no particles need to be sent/received in a given direction, this
// allocates a memory location with size zero which returns a null device
// pointer. If this is passed to MPI_Win_create(base, ...) as the base in
// CUDA 9.0, it causes MPI to hang. This was not an issue in CUDA 7.5
//
// The fix involves fooling MPI by allocating a very small amount of dummy
// information if no particles are to be sent. This gives the location a valid
// memory pointer, than than a null pointer. The MPI communication still knows
// that the allocated window size and info to be sent is zero, and nothing is
// unpacked because that is wrapped in an if-statement already. This doesn't
// affect most cases where particles are communicated every direction at every
// time; this will only affect extremely dilute cases.
int send_alloc_e = nparts_send[EAST]*(nparts_send[EAST] > 0) + (nparts_send[EAST] == 0);
int send_alloc_w = nparts_send[WEST]*(nparts_send[WEST] > 0) + (nparts_send[WEST] == 0);
int recv_alloc_e = nparts_recv[EAST]*(nparts_recv[EAST] > 0) + (nparts_recv[EAST] == 0);
int recv_alloc_w = nparts_recv[WEST]*(nparts_recv[WEST] > 0) + (nparts_recv[WEST] == 0);
checkCudaErrors(hipMalloc(&_send_parts_e, send_alloc_e * sizeof(part_struct)));
checkCudaErrors(hipMalloc(&_send_parts_w, send_alloc_w * sizeof(part_struct)));
checkCudaErrors(hipMalloc(&_recv_parts_e, recv_alloc_e * sizeof(part_struct)));
checkCudaErrors(hipMalloc(&_recv_parts_w, recv_alloc_w * sizeof(part_struct)));
/* Pack particles into _send_parts */
if (nparts_send[EAST] > 0) {
hipLaunchKernelGGL(( pack_parts_e), dim3(bin_num_inb), dim3(bin_dim_inb), 0, 0, _send_parts_e, _parts, _offset_e,
_bin_start, _bin_count, _part_ind);
} else { // fill dummy data
//hipMemset(_send_parts_e, 0., send_alloc_e * sizeof(part_struct));
}
if (nparts_send[WEST] > 0) {
hipLaunchKernelGGL(( pack_parts_w), dim3(bin_num_inb), dim3(bin_dim_inb), 0, 0, _send_parts_w, _parts, _offset_w,
_bin_start, _bin_count, _part_ind);
} else { // fill dummy data
//hipMemset(_send_parts_w, 0., send_alloc_w * sizeof(part_struct));
}
hipDeviceSynchronize(); // To ensure packing is complete before sending
/* Communicate particles with MPI */
mpi_send_parts_i();
/* Find number of particles currently in the EAST/WEST ghost bins */
int nparts_ghost[6];
if (nparts > 0) {
// East
offset = GFX_LOC(bins.Gcc._ieb, 0, 0, s1b, s2b);
if (dom[rank].e != MPI_PROC_NULL) {
nparts_ghost[EAST] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_i,
0., thrust::plus<int>());
} else {
nparts_ghost[EAST] = 0;
}
// West
offset = GFX_LOC(bins.Gcc._isb, 0, 0, s1b, s2b);
if (dom[rank].w != MPI_PROC_NULL) {
nparts_ghost[WEST] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_i,
0., thrust::plus<int>());
} else {
nparts_ghost[WEST] = 0;
}
} else { // no parts
nparts_ghost[EAST] = 0;
nparts_ghost[WEST] = 0;
}
/* Calculate new number of particles */
int nparts_old = nparts;
nparts += nparts_recv[EAST] + nparts_recv[WEST]
- nparts_ghost[EAST] - nparts_ghost[WEST];
/* allocate temporary part struct */
part_struct *_tmp_parts;
checkCudaErrors(hipMalloc(&_tmp_parts, nparts * sizeof(part_struct)));
if (nparts_old > 0) {
/* parallel prefix scan of [_is, _ie] of _bin_count */
int *_offset_all;
checkCudaErrors(hipMalloc(&_offset_all, bins.Gcc.s3b * sizeof(int)));
thrust::device_ptr<int> t_offset_all(_offset_all);
// Scan over bin_count[_is->_ie, j, k]
int size = bins.Gcc.s3b - 2*bins.Gcc.s2b_i;
thrust::exclusive_scan(t_bin_count + bins.Gcc.s2b_i,
t_bin_count + bins.Gcc.s2b_i + size,
t_offset_all + bins.Gcc.s2b_i);
/* copy bins of particles to tmp_parts */
hipLaunchKernelGGL(( copy_central_bin_parts_i), dim3(bin_num_inb), dim3(bin_dim_inb), 0, 0, _tmp_parts, _parts,
_bin_start, _bin_count, _part_ind, _offset_all);
hipFree(_offset_all);
} else { // no (old) parts
// Do not need to copy or prefix scan
}
/* Copy ghost particles received from WEST */
if (nparts_recv[WEST] > 0) {
t_nparts = nparts_recv[WEST] * (nparts_recv[WEST] < MAX_THREADS_1D)
+ MAX_THREADS_1D * (nparts_recv[WEST] >= MAX_THREADS_1D);
b_nparts = (int) ceil((real) nparts_recv[WEST] / (real) t_nparts);
dim3 dim_nparts_w(t_nparts);
dim3 num_nparts_w(b_nparts);
offset = nparts_old - nparts_ghost[WEST] - nparts_ghost[EAST];
hipLaunchKernelGGL(( copy_ghost_bin_parts), dim3(num_nparts_w), dim3(dim_nparts_w), 0, 0, _tmp_parts, _recv_parts_w,
nparts_recv[WEST], offset, WEST, _DOM);
} else { // nparts_recv[WEST] <= 0
// Do nothing
}
/* Copy ghost particles received from EAST */
if (nparts_recv[EAST] > 0) {
t_nparts = nparts_recv[EAST] * (nparts_recv[EAST] < MAX_THREADS_1D)
+ MAX_THREADS_1D * (nparts_recv[EAST] >= MAX_THREADS_1D);
b_nparts = (int) ceil((real) nparts_recv[EAST] / (real) t_nparts);
dim3 dim_nparts_e(t_nparts);
dim3 num_nparts_e(b_nparts);
offset = nparts_old - nparts_ghost[WEST] - nparts_ghost[EAST]
+ nparts_recv[WEST];
hipLaunchKernelGGL(( copy_ghost_bin_parts), dim3(num_nparts_e), dim3(dim_nparts_e), 0, 0, _tmp_parts, _recv_parts_e,
nparts_recv[EAST], offset, EAST, _DOM);
} else { // npats_recv[EAST] <= 0
// Do nothing
}
/* Swap pointers to _parts and _tmp_parts */
part_struct *tmp = _parts;
_parts = _tmp_parts;
_tmp_parts = tmp;
// /* Correct ghost particle position for periodic boundaries */
// int nparts_added = nparts_recv[EAST] + nparts_recv[WEST];
// if (nparts_added > 0) {
// t_nparts = nparts_added * (nparts_added < MAX_THREADS_1D)
// + MAX_THREADS_1D * (nparts_added >= MAX_THREADS_1D);
// b_nparts = (int) ceil((real) nparts_added / (real) t_nparts);
//
// dim3 dim_nparts_a(t_nparts);
// dim3 num_nparts_a(b_nparts);
//
// offset = nparts_old - nparts_ghost[WEST] - nparts_ghost[EAST];
// correct_periodic_boundaries_i<<<num_nparts_a, dim_nparts_a>>>(_parts,
// offset, nparts_added, _bc, _DOM);
// }
// Free memory
hipFree(_part_ind);
hipFree(_part_bin);
hipFree(_offset_e);
hipFree(_offset_w);
hipFree(_send_parts_e);
hipFree(_send_parts_w);
hipFree(_recv_parts_e);
hipFree(_recv_parts_w);
hipFree(_tmp_parts);
}
extern "C"
void cuda_transfer_parts_j(void)
{
// Steps are the same as in cuda_transfer_part_i, except we index with 'j'
// varying the slowest
/* Init execution config */
// thread over north/south faces
int tz = bins.Gcc.knb * (bins.Gcc.knb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.knb >= MAX_THREADS_DIM);
int tx = bins.Gcc.inb * (bins.Gcc.inb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.inb >= MAX_THREADS_DIM);
int bz = (int) ceil((real) bins.Gcc.knb / (real) tz);
int bx = (int) ceil((real) bins.Gcc.inb / (real) tx);
dim3 bin_num_jnb(bz, bx);
dim3 bin_dim_jnb(tz, tx);
// Thread over nparts
int t_nparts = nparts * (nparts < MAX_THREADS_1D)
+ MAX_THREADS_1D * (nparts >= MAX_THREADS_1D);
int b_nparts = (int) ceil((real) nparts / (real) t_nparts);
dim3 dim_nparts(t_nparts);
dim3 num_nparts(b_nparts);
/* Declare things we might need */
int s1b, s2b; // custom strides
int offset;
/* Allocate memory */
checkCudaErrors(hipMalloc(&_part_ind, nparts * sizeof(int)));
checkCudaErrors(hipMalloc(&_part_bin, nparts * sizeof(int)));
thrust::device_ptr<int> t_part_ind(_part_ind);
thrust::device_ptr<int> t_part_bin(_part_bin);
int *_offset_n;
int *_offset_s;
checkCudaErrors(hipMalloc(&_offset_n, bins.Gcc.s2b_j * sizeof(int)));
checkCudaErrors(hipMalloc(&_offset_s, bins.Gcc.s2b_j * sizeof(int)));
thrust::device_ptr<int> t_offset_n(_offset_n);
thrust::device_ptr<int> t_offset_s(_offset_s);
checkCudaErrors(hipMemset(_bin_start, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(hipMemset(_bin_end, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(hipMemset(_bin_count, 0, bins.Gcc.s3b * sizeof(int)));
thrust::device_ptr<int> t_bin_count(_bin_count);
// If we have parts...
if (nparts > 0) {
/* Find each particle's bin */
hipLaunchKernelGGL(( bin_fill_j), dim3(num_nparts), dim3(dim_nparts), 0, 0, _part_ind, _part_bin, _parts, nparts,
_DOM);
/* Sort _part_ind by _part_bin (sort key by value) */
if (nparts > 1) {
thrust::sort_by_key(t_part_bin, t_part_bin + nparts, t_part_ind);
}
/* Find start and ending index of each bin */
int smem_size = (nparts + 1) * sizeof(int);
hipLaunchKernelGGL(( find_bin_start_end), dim3(b_nparts), dim3(t_nparts), smem_size, 0, _bin_start, _bin_end,
_part_bin, nparts);
/* Find number of particles in each bin */
hipLaunchKernelGGL(( count_bin_parts_j), dim3(bin_num_jnb), dim3(bin_dim_jnb), 0, 0, _bin_start, _bin_end,
_bin_count);
/* Find number of particles to send, and packing offsets */
s1b = bins.Gcc.knb;
s2b = s1b * bins.Gcc.inb;
// North
offset = GFY_LOC(0, bins.Gcc._je, 0, s1b, s2b);
if (dom[rank].n != MPI_PROC_NULL) {
// _bin_count is indexed with j varying slowest -- can do a reduction
// directly from _bin_count, given the offset of the start of the _je plane
nparts_send[NORTH] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_j,
0., thrust::plus<int>());
/* Determine packing offsets with an excl prefix scan */
if (nparts_send[NORTH] > 0) {
thrust::exclusive_scan(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_j, t_offset_n);
} else {
hipMemset(_offset_n, 0., bins.Gcc.s2b_j * sizeof(int));
}
} else {
nparts_send[NORTH] = 0;
hipMemset(_offset_n, 0., bins.Gcc.s2b_j * sizeof(int));
}
// South
offset = GFY_LOC(0, bins.Gcc._js, 0, s1b, s2b);
if (dom[rank].s != MPI_PROC_NULL) {
nparts_send[SOUTH] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_j,
0., thrust::plus<int>());
if (nparts_send[SOUTH] > 0) {
thrust::exclusive_scan(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_j, t_offset_s);
} else {
hipMemset(_offset_s, 0., bins.Gcc.s2b_j * sizeof(int));
}
} else {
nparts_send[SOUTH] = 0;
hipMemset(_offset_s, 0., bins.Gcc.s2b_j * sizeof(int));
}
} else { // nparts <= 0
checkCudaErrors(hipMemset(_part_ind, -1, nparts * sizeof(int)));
checkCudaErrors(hipMemset(_part_bin, -1, nparts * sizeof(int)));
nparts_send[NORTH] = 0;
nparts_send[SOUTH] = 0;
hipMemset(_offset_n, 0., bins.Gcc.s2b_j * sizeof(int));
hipMemset(_offset_s, 0., bins.Gcc.s2b_j * sizeof(int));
}
/* Send number of parts to north/south */
nparts_recv[SOUTH] = 0; // init
nparts_recv[NORTH] = 0;
mpi_send_nparts_j();
/* Allocate memory for send and receiving particles */
// See accompanying note at the same location in cuda_transfer_parts_i
int send_alloc_n = nparts_send[NORTH]*(nparts_send[NORTH] > 0) + (nparts_send[NORTH] == 0);
int send_alloc_s = nparts_send[SOUTH]*(nparts_send[SOUTH] > 0) + (nparts_send[SOUTH] == 0);
int recv_alloc_n = nparts_recv[NORTH]*(nparts_recv[NORTH] > 0) + (nparts_recv[NORTH] == 0);
int recv_alloc_s = nparts_recv[SOUTH]*(nparts_recv[SOUTH] > 0) + (nparts_recv[SOUTH] == 0);
checkCudaErrors(hipMalloc(&_send_parts_n, send_alloc_n * sizeof(part_struct)));
checkCudaErrors(hipMalloc(&_send_parts_s, send_alloc_s * sizeof(part_struct)));
checkCudaErrors(hipMalloc(&_recv_parts_n, recv_alloc_n * sizeof(part_struct)));
checkCudaErrors(hipMalloc(&_recv_parts_s, recv_alloc_s * sizeof(part_struct)));
/* Pack particles into _send_parts */
if (nparts_send[NORTH] > 0) {
hipLaunchKernelGGL(( pack_parts_n), dim3(bin_num_jnb), dim3(bin_dim_jnb), 0, 0, _send_parts_n, _parts, _offset_n,
_bin_start, _bin_count, _part_ind);
} else { // fill dummy data
//hipMemset(_send_parts_n, 0., send_alloc_n * sizeof(part_struct));
}
if (nparts_send[SOUTH] > 0) {
hipLaunchKernelGGL(( pack_parts_s), dim3(bin_num_jnb), dim3(bin_dim_jnb), 0, 0, _send_parts_s, _parts, _offset_s,
_bin_start, _bin_count, _part_ind);
} else { // fill dummy data
//hipMemset(_send_parts_s, 0., send_alloc_s * sizeof(part_struct));
}
hipDeviceSynchronize(); // To ensure packing is complete before sending
/* Communicate particles with MPI */
mpi_send_parts_j();
/* Find number of particles currently in the NORTH/SOUTH ghost bins */
int nparts_ghost[6];
if (nparts > 0) {
// North
offset = GFY_LOC(0, bins.Gcc._jeb, 0, s1b, s2b);
if (dom[rank].n != MPI_PROC_NULL) {
nparts_ghost[NORTH] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_j,
0., thrust::plus<int>());
} else {
nparts_ghost[NORTH] = 0;
}
// South
offset = GFY_LOC(0, bins.Gcc._jsb, 0, s1b, s2b);
if (dom[rank].s != MPI_PROC_NULL) {
nparts_ghost[SOUTH] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_j,
0., thrust::plus<int>());
} else {
nparts_ghost[SOUTH] = 0;
}
} else { // no parts
nparts_ghost[NORTH] = 0;
nparts_ghost[SOUTH] = 0;
}
/* Calculate new number of particles */
int nparts_old = nparts;
nparts += nparts_recv[NORTH] + nparts_recv[SOUTH]
- nparts_ghost[NORTH] - nparts_ghost[SOUTH];
/* allocate temporary part struct */
part_struct *_tmp_parts;
checkCudaErrors(hipMalloc(&_tmp_parts, nparts * sizeof(part_struct)));
if (nparts_old > 0) {
/* parallel prefix scan of ALL of _bin_count */
int *_offset_all;
checkCudaErrors(hipMalloc(&_offset_all, bins.Gcc.s3b * sizeof(int)));
// Scan over bin_count[i, _js->_je, k]
int size = bins.Gcc.s3b - 2*bins.Gcc.s2b_j;
thrust::device_ptr<int> t_offset_all(_offset_all);
thrust::exclusive_scan(t_bin_count + bins.Gcc.s2b_j,
t_bin_count + bins.Gcc.s2b_j + size,
t_offset_all + bins.Gcc.s2b_j);
/* copy bins of particles to tmp_parts */
hipLaunchKernelGGL(( copy_central_bin_parts_j), dim3(bin_num_jnb), dim3(bin_dim_jnb), 0, 0, _tmp_parts, _parts,
_bin_start, _bin_count, _part_ind, _offset_all);
hipFree(_offset_all);
} else { // no (old) parts
// Do nothing
}
/* Copy ghost particles recieved from SOUTH */
if (nparts_recv[SOUTH] > 0) {
t_nparts = nparts_recv[SOUTH] * (nparts_recv[SOUTH] < MAX_THREADS_1D)
+ MAX_THREADS_1D * (nparts_recv[SOUTH] >= MAX_THREADS_1D);
b_nparts = (int) ceil((real) nparts_recv[SOUTH] / (real) t_nparts);
dim3 dim_nparts_s(t_nparts);
dim3 num_nparts_s(b_nparts);
offset = nparts_old - nparts_ghost[SOUTH] - nparts_ghost[NORTH];
hipLaunchKernelGGL(( copy_ghost_bin_parts), dim3(num_nparts_s), dim3(dim_nparts_s), 0, 0, _tmp_parts, _recv_parts_s,
nparts_recv[SOUTH], offset, SOUTH, _DOM);
} else { // nparts_recv[SOUTH] <= 0
// Do nothing
}
/* Copy ghost particles received from NORTH */
if (nparts_recv[NORTH] > 0) {
t_nparts = nparts_recv[NORTH] * (nparts_recv[NORTH] < MAX_THREADS_1D)
+ MAX_THREADS_1D * (nparts_recv[NORTH] >= MAX_THREADS_1D);
b_nparts = (int) ceil((real) nparts_recv[NORTH] / (real) t_nparts);
dim3 dim_nparts_n(t_nparts);
dim3 num_nparts_n(b_nparts);
offset = nparts_old - nparts_ghost[SOUTH] - nparts_ghost[NORTH]
+ nparts_recv[SOUTH];
hipLaunchKernelGGL(( copy_ghost_bin_parts), dim3(num_nparts_n), dim3(dim_nparts_n), 0, 0, _tmp_parts, _recv_parts_n,
nparts_recv[NORTH], offset, NORTH, _DOM);
} else { // nparts_recv[NORTH] <= 0
// Do nothing
}
/* Swap pointers to _parts and _tmp_parts */
part_struct *tmp = _parts;
_parts = _tmp_parts;
_tmp_parts = tmp;
// /* Correct ghost particle position for periodic boundaries */
// int nparts_added = nparts_recv[NORTH] + nparts_recv[SOUTH];
// if (nparts_added > 0) {
// t_nparts = nparts_added * (nparts_added < MAX_THREADS_1D)
// + MAX_THREADS_1D * (nparts_added >= MAX_THREADS_1D);
// b_nparts = (int) ceil((real) nparts_added / (real) t_nparts);
//
// dim3 dim_nparts_a(t_nparts);
// dim3 num_nparts_a(b_nparts);
//
// offset = nparts_old - nparts_ghost[SOUTH] - nparts_ghost[NORTH];
// correct_periodic_boundaries_j<<<num_nparts_a, dim_nparts_a>>>(_parts,
// offset, nparts_added, _bc, _DOM);
// }
// Free memory
hipFree(_part_ind);
hipFree(_part_bin);
hipFree(_offset_n);
hipFree(_offset_s);
hipFree(_send_parts_n);
hipFree(_send_parts_s);
hipFree(_recv_parts_n);
hipFree(_recv_parts_s);
hipFree(_tmp_parts);
}
extern "C"
void cuda_transfer_parts_k(void)
{
// Steps are the same as in cuda_transfer_part_i, except we index with 'k'
// varying the slowest
/* Init execution config */
// thread over top/bottom faces
int tx = bins.Gcc.inb * (bins.Gcc.inb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.inb >= MAX_THREADS_DIM);
int ty = bins.Gcc.jnb * (bins.Gcc.jnb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.jnb >= MAX_THREADS_DIM);
int bx = (int) ceil((real) bins.Gcc.inb / (real) tx);
int by = (int) ceil((real) bins.Gcc.jnb / (real) ty);
dim3 bin_num_knb(bx, by);
dim3 bin_dim_knb(tx, ty);
// Thread over nparts
int t_nparts = nparts * (nparts < MAX_THREADS_1D)
+ MAX_THREADS_1D * (nparts >= MAX_THREADS_1D);
int b_nparts = (int) ceil((real) nparts / (real) t_nparts);
dim3 dim_nparts(t_nparts);
dim3 num_nparts(b_nparts);
/* Declare things we might need */
int s1b = bins.Gcc.inb;
int s2b = s1b * bins.Gcc.jnb;
int offset;
/* Allocate memory */
checkCudaErrors(hipMalloc(&_part_ind, nparts * sizeof(int)));
checkCudaErrors(hipMalloc(&_part_bin, nparts * sizeof(int)));
thrust::device_ptr<int> t_part_ind(_part_ind);
thrust::device_ptr<int> t_part_bin(_part_bin);
int *_offset_t;
int *_offset_b;
checkCudaErrors(hipMalloc(&_offset_t, bins.Gcc.s2b_k * sizeof(int)));
checkCudaErrors(hipMalloc(&_offset_b, bins.Gcc.s2b_k * sizeof(int)));
thrust::device_ptr<int> t_offset_t(_offset_t);
thrust::device_ptr<int> t_offset_b(_offset_b);
checkCudaErrors(hipMemset(_bin_start, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(hipMemset(_bin_end, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(hipMemset(_bin_count, 0, bins.Gcc.s3b * sizeof(int)));
thrust::device_ptr<int> t_bin_count(_bin_count);
// If we have parts...
if (nparts > 0) {
/* Find each particle's bin */
hipLaunchKernelGGL(( bin_fill_k), dim3(num_nparts), dim3(dim_nparts), 0, 0, _part_ind, _part_bin, _parts, nparts,
_DOM);
/* Sort _part_ind by _part_bin (sort key by value) */
if (nparts > 1) {
thrust::sort_by_key(t_part_bin, t_part_bin + nparts, t_part_ind);
}
//_part_bin = thrust::raw_pointer_cast(t_part_bin);
//_part_ind = thrust::raw_pointer_cast(t_part_ind);
/* Find start and ending index of each bin */
int smem_size = (nparts + 1) * sizeof(int);
hipLaunchKernelGGL(( find_bin_start_end), dim3(b_nparts), dim3(t_nparts), smem_size, 0, _bin_start, _bin_end,
_part_bin, nparts);
/* Find number of particles in each bin */
hipLaunchKernelGGL(( count_bin_parts_k), dim3(bin_num_knb), dim3(bin_dim_knb), 0, 0, _bin_start, _bin_end,
_bin_count);
/* Find number of particles to send, and packing offsets */
// Top
offset = GFZ_LOC(0, 0, bins.Gcc._ke, s1b, s2b);
if (dom[rank].t != MPI_PROC_NULL) {
// _bin_count is indexed with k varying slowest -- can do a reduction
// directly from _bin_count, given the offset of the start of the _ke plane
nparts_send[TOP] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_k,
0., thrust::plus<int>());
/* Determine packing offsets with an excl prefix scan */
if (nparts_send[TOP] > 0) {
thrust::exclusive_scan(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_k, t_offset_t);
} else {
hipMemset(_offset_t, 0., bins.Gcc.s2b_k * sizeof(int));
}
} else {
nparts_send[TOP] = 0;
hipMemset(_offset_t, 0., bins.Gcc.s2b_k * sizeof(int));
}
// Bottom
offset = GFZ_LOC(0, 0, bins.Gcc._ks, s1b, s2b);
if (dom[rank].b != MPI_PROC_NULL) {
nparts_send[BOTTOM] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_k,
0., thrust::plus<int>());
if (nparts_send[BOTTOM] > 0) {
thrust::exclusive_scan(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_k, t_offset_b);
} else {
hipMemset(_offset_b, 0., bins.Gcc.s2b_k * sizeof(int));
}
} else {
nparts_send[BOTTOM] = 0;
hipMemset(_offset_b, 0., bins.Gcc.s2b_k * sizeof(int));
}
} else { // nparts <= 0
checkCudaErrors(hipMemset(_part_ind, -1, nparts * sizeof(int)));
checkCudaErrors(hipMemset(_part_bin, -1, nparts * sizeof(int)));
nparts_send[TOP] = 0;
nparts_send[BOTTOM] = 0;
hipMemset(_offset_t, 0., bins.Gcc.s2b_k * sizeof(int));
hipMemset(_offset_b, 0., bins.Gcc.s2b_k * sizeof(int));
}
/* Send number of parts to top/bottom */
nparts_recv[TOP] = 0; // init
nparts_recv[BOTTOM] = 0;
mpi_send_nparts_k();
/* Allocate memory for send and receiving particles */
// See accompanying note at the same location in cuda_transfer_parts_i
int send_alloc_t = nparts_send[TOP]*(nparts_send[TOP] > 0) + (nparts_send[TOP] == 0);
int send_alloc_b = nparts_send[BOTTOM]*(nparts_send[BOTTOM] > 0) + (nparts_send[BOTTOM] == 0);
int recv_alloc_t = nparts_recv[TOP]*(nparts_recv[TOP] > 0) + (nparts_recv[TOP] == 0);
int recv_alloc_b = nparts_recv[BOTTOM]*(nparts_recv[BOTTOM] > 0) + (nparts_recv[BOTTOM] == 0);
checkCudaErrors(hipMalloc(&_send_parts_t, send_alloc_t * sizeof(part_struct)));
checkCudaErrors(hipMalloc(&_send_parts_b, send_alloc_b * sizeof(part_struct)));
checkCudaErrors(hipMalloc(&_recv_parts_t, recv_alloc_t * sizeof(part_struct)));
checkCudaErrors(hipMalloc(&_recv_parts_b, recv_alloc_b * sizeof(part_struct)));
/* Pack particles into _send_parts */
if (nparts_send[TOP] > 0) {
hipLaunchKernelGGL(( pack_parts_t), dim3(bin_num_knb), dim3(bin_dim_knb), 0, 0, _send_parts_t, _parts, _offset_t,
_bin_start, _bin_count, _part_ind);
} else { // fill dummy data
//hipMemset(_send_parts_t, 0., send_alloc_t * sizeof(part_struct));
}
if (nparts_send[BOTTOM] > 0) {
hipLaunchKernelGGL(( pack_parts_b), dim3(bin_num_knb), dim3(bin_dim_knb), 0, 0, _send_parts_b, _parts, _offset_b,
_bin_start, _bin_count, _part_ind);
} else { // fill dummy data
//hipMemset(_send_parts_b, 0., send_alloc_b * sizeof(part_struct));
}
hipDeviceSynchronize(); // To ensure packing is complete before sending
/* Communicate particles with MPI */
mpi_send_parts_k();
/* Find number of particles currently in the TOP/BOTTOM ghost bins */
int nparts_ghost[6];
if (nparts > 0) {
// TOP
offset = GFZ_LOC(0, 0, bins.Gcc._keb, s1b, s2b);
if (dom[rank].t != MPI_PROC_NULL) {
nparts_ghost[TOP] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_k,
0., thrust::plus<int>());
} else {
nparts_ghost[TOP] = 0;
}
// BOTTOM
offset = GFZ_LOC(0, 0, bins.Gcc._ksb, s1b, s2b);
if (dom[rank].b != MPI_PROC_NULL) {
nparts_ghost[BOTTOM] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_k,
0., thrust::plus<int>());
} else {
nparts_ghost[BOTTOM] = 0;
}
} else { // no parts
nparts_ghost[TOP] = 0;
nparts_ghost[BOTTOM] = 0;
}
/* Calculate new number of particles */
int nparts_old = nparts;
nparts += nparts_recv[TOP] + nparts_recv[BOTTOM]
- nparts_ghost[TOP] - nparts_ghost[BOTTOM];
/* allocate temporary part struct */
part_struct *_tmp_parts;
checkCudaErrors(hipMalloc(&_tmp_parts, nparts * sizeof(part_struct)));
if (nparts_old > 0) {
/* parallel prefix scan of ALL of _bin_count */
int *_offset_all;
checkCudaErrors(hipMalloc(&_offset_all, bins.Gcc.s3b * sizeof(int)));
// Scan over bin_count[i, m, _ks->_ke]
int size = bins.Gcc.s3b - 2*bins.Gcc.s2b_k;
thrust::device_ptr<int> t_offset_all(_offset_all);
thrust::exclusive_scan(t_bin_count + bins.Gcc.s2b_k,
t_bin_count + bins.Gcc.s2b_k + size,
t_offset_all + bins.Gcc.s2b_k);
/* copy bins of particles to tmp_parts */
hipLaunchKernelGGL(( copy_central_bin_parts_k), dim3(bin_num_knb), dim3(bin_dim_knb), 0, 0, _tmp_parts, _parts,
_bin_start, _bin_count, _part_ind, _offset_all);
hipFree(_offset_all);
} else { // no (old) parts
// Do nothing
}
/* Copy ghost particles recieved from BOTTOM */
if (nparts_recv[BOTTOM] > 0) {
t_nparts = nparts_recv[BOTTOM] * (nparts_recv[BOTTOM] < MAX_THREADS_1D)
+ MAX_THREADS_1D * (nparts_recv[BOTTOM] >= MAX_THREADS_1D);
b_nparts = (int) ceil((real) nparts_recv[BOTTOM] / (real) t_nparts);
dim3 dim_nparts_b(t_nparts);
dim3 num_nparts_b(b_nparts);
offset = nparts_old - nparts_ghost[BOTTOM] - nparts_ghost[TOP];
hipLaunchKernelGGL(( copy_ghost_bin_parts), dim3(num_nparts_b), dim3(dim_nparts_b), 0, 0, _tmp_parts, _recv_parts_b,
nparts_recv[BOTTOM], offset, BOTTOM, _DOM);
} else { // nparts_recv[BOTTOM] <= 0
// Do nothing
}
/* Copy ghost particles received from TOP */
if (nparts_recv[TOP] > 0) {
t_nparts = nparts_recv[TOP] * (nparts_recv[TOP] < MAX_THREADS_1D)
+ MAX_THREADS_1D * (nparts_recv[TOP] >= MAX_THREADS_1D);
b_nparts = (int) ceil((real) nparts_recv[TOP] / (real) t_nparts);
dim3 dim_nparts_t(t_nparts);
dim3 num_nparts_t(b_nparts);
offset = nparts_old - nparts_ghost[BOTTOM] - nparts_ghost[TOP]
+ nparts_recv[BOTTOM];
hipLaunchKernelGGL(( copy_ghost_bin_parts), dim3(num_nparts_t), dim3(dim_nparts_t), 0, 0, _tmp_parts, _recv_parts_t,
nparts_recv[TOP], offset, TOP, _DOM);
} else { // nparts_recv[TOP] <= 0
// Do nothing
}
/* Swap pointers to _parts and _tmp_parts */
part_struct *tmp = _parts;
_parts = _tmp_parts;
_tmp_parts = tmp;
// /* Correct ghost particle position for periodic boundaries */
// int nparts_added = nparts_recv[TOP] + nparts_recv[BOTTOM];
// if (nparts_added > 0) {
// t_nparts = nparts_added * (nparts_added < MAX_THREADS_1D)
// + MAX_THREADS_1D * (nparts_added >= MAX_THREADS_1D);
// b_nparts = (int) ceil((real) nparts_added / (real) t_nparts);
//
// dim3 dim_nparts_a(t_nparts);
// dim3 num_nparts_a(b_nparts);
//
// offset = nparts_old - nparts_ghost[BOTTOM] - nparts_ghost[TOP];
// correct_periodic_boundaries_k<<<num_nparts_a, dim_nparts_a>>>(_parts,
// offset, nparts_added, _bc, _DOM);
//
// }
// Free memory
hipFree(_part_ind);
hipFree(_part_bin);
hipFree(_offset_t);
hipFree(_offset_b);
hipFree(_send_parts_t);
hipFree(_send_parts_b);
hipFree(_recv_parts_t);
hipFree(_recv_parts_b);
hipFree(_tmp_parts);
}
extern "C"
void cuda_move_parts()
{
//printf("N%d >> Moving parts (nparts %d)\n", rank, nparts);
// Thread over nparts
int t_nparts = nparts * (nparts < MAX_THREADS_1D)
+ MAX_THREADS_1D * (nparts >= MAX_THREADS_1D);
int b_nparts = (int) ceil((real) nparts / (real) t_nparts);
dim3 dim_nparts(t_nparts);
dim3 num_nparts(b_nparts);
if (nparts > 0) {
real eps = 0.01; // compact support parameter
if (nparts == 1) {
hipLaunchKernelGGL(( collision_init), dim3(num_nparts), dim3(dim_nparts), 0, 0, _parts, nparts);
hipLaunchKernelGGL(( spring_parts), dim3(num_nparts), dim3(dim_nparts), 0, 0, _parts, nparts, _DOM);
hipLaunchKernelGGL(( collision_walls), dim3(num_nparts), dim3(dim_nparts), 0, 0, _parts, nparts, _bc, eps, mu,
rho_f, nu, interaction_length_ratio, dt, _DOM);
/* Communicate forces to prevent MPI hang */
cuda_update_part_forces_i();
cuda_update_part_forces_j();
cuda_update_part_forces_k();
hipLaunchKernelGGL(( move_parts_a), dim3(num_nparts), dim3(dim_nparts), 0, 0, _parts, nparts, dt, g, gradP,
rho_f);
hipLaunchKernelGGL(( collision_init), dim3(num_nparts), dim3(dim_nparts), 0, 0, _parts, nparts);
hipLaunchKernelGGL(( spring_parts), dim3(num_nparts), dim3(dim_nparts), 0, 0, _parts, nparts, _DOM);
hipLaunchKernelGGL(( collision_walls), dim3(num_nparts), dim3(dim_nparts), 0, 0, _parts, nparts, _bc, eps, mu,
rho_f, nu, interaction_length_ratio, dt, _DOM);
} else if (nparts > 1) {
/* Initialize forces to zero */
hipLaunchKernelGGL(( collision_init), dim3(num_nparts), dim3(dim_nparts), 0, 0, _parts, nparts);
/* Allocate memory */
checkCudaErrors(hipMalloc(&_part_ind, nparts * sizeof(int)));
checkCudaErrors(hipMalloc(&_part_bin, nparts * sizeof(int)));
thrust::device_ptr<int> t_part_ind(_part_ind);
thrust::device_ptr<int> t_part_bin(_part_bin);
/* Reset memory */
checkCudaErrors(hipMemset(_bin_start, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(hipMemset(_bin_end, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(hipMemset(_bin_count, 0, bins.Gcc.s3b * sizeof(int)));
thrust::device_ptr<int> t_bin_count(_bin_count);
/* Bin particles */
hipLaunchKernelGGL(( bin_fill_k), dim3(num_nparts), dim3(dim_nparts), 0, 0, _part_ind, _part_bin, _parts, nparts,
_DOM);
/* Sort _part_ind by _part_bin (sort key by value) */
thrust::sort_by_key(t_part_bin, t_part_bin + nparts, t_part_ind);
/* Find start and ending index of each bin */
int smem_size = (nparts + 1) * sizeof(int);
hipLaunchKernelGGL(( find_bin_start_end), dim3(b_nparts), dim3(t_nparts), smem_size, 0, _bin_start, _bin_end,
_part_bin, nparts);
/* Find number of particles in each bin */
//count_bin_parts_k<<<bin_num_knb, bin_dim_knb>>>(_bin_start, _bin_end,
// _bin_count);
/* Deal with particle-particle collisions */
hipLaunchKernelGGL(( collision_parts), dim3(num_nparts), dim3(dim_nparts), 0, 0, _parts, nparts,
eps, mu, rho_f, nu, _bc, _bin_start, _bin_end, _part_bin,
_part_ind, interaction_length_ratio, dt);
/* Calculate spring forces on particles */
hipLaunchKernelGGL(( spring_parts), dim3(num_nparts), dim3(dim_nparts), 0, 0, _parts, nparts, _DOM);
/* Calculate wall collision forces */
hipLaunchKernelGGL(( collision_walls), dim3(num_nparts), dim3(dim_nparts), 0, 0, _parts, nparts, _bc, eps, mu,
rho_f, nu, interaction_length_ratio, dt, _DOM);
/* Free _part_bin, _part_ind (re-malloc'd in comm functions) */
hipFree(_part_ind);
hipFree(_part_bin);
/* Communicate forces */
cuda_update_part_forces_i();
cuda_update_part_forces_j();
cuda_update_part_forces_k();
/*** Update velocities and accelerations ***/
hipLaunchKernelGGL(( move_parts_a), dim3(num_nparts), dim3(dim_nparts), 0, 0, _parts, nparts, dt, g, gradP,
rho_f);
/* Re-alloc memory */
checkCudaErrors(hipMalloc(&_part_ind, nparts * sizeof(int)));
checkCudaErrors(hipMalloc(&_part_bin, nparts * sizeof(int)));
thrust::device_ptr<int> t_part_ind2(_part_ind);
thrust::device_ptr<int> t_part_bin2(_part_bin);
/* Reset memory */
checkCudaErrors(hipMemset(_bin_start, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(hipMemset(_bin_end, -1, bins.Gcc.s3b * sizeof(int)));
//checkCudaErrors(hipMemset(_bin_count, 0, bins.Gcc.s3b * sizeof(int)));
//thrust::device_ptr<int> t_bin_count(_bin_count);
/* Bin particles */
hipLaunchKernelGGL(( bin_fill_k), dim3(num_nparts), dim3(dim_nparts), 0, 0, _part_ind, _part_bin, _parts, nparts,
_DOM);
/* Sort _part_ind by _part_bin (sort key by value) */
thrust::sort_by_key(t_part_bin2, t_part_bin2 + nparts, t_part_ind2);
/* Find start and ending index of each bin */
hipLaunchKernelGGL(( find_bin_start_end), dim3(b_nparts), dim3(t_nparts), smem_size, 0, _bin_start, _bin_end,
_part_bin, nparts);
/* Initialize forces to zero */
hipLaunchKernelGGL(( collision_init), dim3(num_nparts), dim3(dim_nparts), 0, 0, _parts, nparts);
/* Deal with particle-particle collisions */
hipLaunchKernelGGL(( collision_parts), dim3(num_nparts), dim3(dim_nparts), 0, 0, _parts, nparts,
eps, mu, rho_f, nu, _bc, _bin_start, _bin_end, _part_bin,
_part_ind, interaction_length_ratio, dt);
/* Calculate spring forces on particles */
hipLaunchKernelGGL(( spring_parts), dim3(num_nparts), dim3(dim_nparts), 0, 0, _parts, nparts, _DOM);
/* Calculate wall collision forces */
hipLaunchKernelGGL(( collision_walls), dim3(num_nparts), dim3(dim_nparts), 0, 0, _parts, nparts, _bc, eps, mu,
rho_f, nu, interaction_length_ratio, dt, _DOM);
/* Free memory */
hipFree(_part_ind);
hipFree(_part_bin);
} // end if (nparts > 1)
/* Communicate forces */
cuda_update_part_forces_i();
cuda_update_part_forces_j();
cuda_update_part_forces_k();
/* Move particles */
hipLaunchKernelGGL(( move_parts_b), dim3(num_nparts), dim3(dim_nparts), 0, 0, _parts, nparts, dt, g, gradP,
rho_f);
} // end if (nparts > 0)
}
extern "C"
void cuda_move_parts_sub()
{
//printf("N%d >> Moving parts (sub-Lamb's iteration) (nparts %d)\n", rank, nparts);
// Thread over nparts
int t_nparts = nparts * (nparts < MAX_THREADS_1D)
+ MAX_THREADS_1D * (nparts >= MAX_THREADS_1D);
int b_nparts = (int) ceil((real) nparts / (real) t_nparts);
dim3 dim_nparts(t_nparts);
dim3 num_nparts(b_nparts);
real eps = 0.01; // compact support parameter
if (nparts == 0) {
/* Communicate forces to prevent MPI hang */
cuda_update_part_forces_i();
cuda_update_part_forces_j();
cuda_update_part_forces_k();
} else if (nparts == 1) {
hipLaunchKernelGGL(( collision_init), dim3(num_nparts), dim3(dim_nparts), 0, 0, _parts, nparts);
hipLaunchKernelGGL(( spring_parts), dim3(num_nparts), dim3(dim_nparts), 0, 0, _parts, nparts, _DOM);
hipLaunchKernelGGL(( collision_walls), dim3(num_nparts), dim3(dim_nparts), 0, 0, _parts, nparts, _bc, eps, mu,
rho_f, nu, interaction_length_ratio, dt, _DOM);
/* Communicate forces to prevent MPI hang */
cuda_update_part_forces_i();
cuda_update_part_forces_j();
cuda_update_part_forces_k();
hipLaunchKernelGGL(( move_parts_a), dim3(num_nparts), dim3(dim_nparts), 0, 0, _parts, nparts, dt, g, gradP,
rho_f);
hipLaunchKernelGGL(( collision_init), dim3(num_nparts), dim3(dim_nparts), 0, 0, _parts, nparts);
hipLaunchKernelGGL(( spring_parts), dim3(num_nparts), dim3(dim_nparts), 0, 0, _parts, nparts, _DOM);
hipLaunchKernelGGL(( collision_walls), dim3(num_nparts), dim3(dim_nparts), 0, 0, _parts, nparts, _bc, eps, mu,
rho_f, nu, interaction_length_ratio, dt, _DOM);
} else if (nparts > 1) {
/* Initialize forces to zero */
hipLaunchKernelGGL(( collision_init), dim3(num_nparts), dim3(dim_nparts), 0, 0, _parts, nparts);
/* Allocate memory */
checkCudaErrors(hipMalloc(&_part_ind, nparts * sizeof(int)));
checkCudaErrors(hipMalloc(&_part_bin, nparts * sizeof(int)));
thrust::device_ptr<int> t_part_ind(_part_ind);
thrust::device_ptr<int> t_part_bin(_part_bin);
/* Reset memory */
checkCudaErrors(hipMemset(_bin_start, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(hipMemset(_bin_end, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(hipMemset(_bin_count, 0, bins.Gcc.s3b * sizeof(int)));
thrust::device_ptr<int> t_bin_count(_bin_count);
/* Bin particles */
hipLaunchKernelGGL(( bin_fill_k), dim3(num_nparts), dim3(dim_nparts), 0, 0, _part_ind, _part_bin, _parts, nparts,
_DOM);
/* Sort _part_ind by _part_bin (sort key by value) */
thrust::sort_by_key(t_part_bin, t_part_bin + nparts, t_part_ind);
/* Find start and ending index of each bin */
int smem_size = (nparts + 1) * sizeof(int);
hipLaunchKernelGGL(( find_bin_start_end), dim3(b_nparts), dim3(t_nparts), smem_size, 0, _bin_start, _bin_end,
_part_bin, nparts);
/* Find number of particles in each bin */
//count_bin_parts_k<<<bin_num_knb, bin_dim_knb>>>(_bin_start, _bin_end,
// _bin_count);
/* Deal with particle-particle collisions */
hipLaunchKernelGGL(( collision_parts), dim3(num_nparts), dim3(dim_nparts), 0, 0, _parts, nparts,
eps, mu, rho_f, nu, _bc, _bin_start, _bin_end, _part_bin,
_part_ind, interaction_length_ratio, dt);
/* Calculate spring forces on particles */
hipLaunchKernelGGL(( spring_parts), dim3(num_nparts), dim3(dim_nparts), 0, 0, _parts, nparts, _DOM);
/* Calculate wall collision forces */
hipLaunchKernelGGL(( collision_walls), dim3(num_nparts), dim3(dim_nparts), 0, 0, _parts, nparts, _bc, eps, mu,
rho_f, nu, interaction_length_ratio, dt, _DOM);
/* Free _part_bin, _part_ind (re-malloc'd in comm functions) */
checkCudaErrors(hipFree(_part_ind));
checkCudaErrors(hipFree(_part_bin));
/* Communicate forces */
cuda_update_part_forces_i();
cuda_update_part_forces_j();
cuda_update_part_forces_k(); // uses bin_fill_k
/*** Update velocities and accelerations ***/
hipLaunchKernelGGL(( move_parts_a), dim3(num_nparts), dim3(dim_nparts), 0, 0, _parts, nparts, dt, g, gradP,
rho_f);
/* Re-alloc memory */
checkCudaErrors(hipMalloc(&_part_ind, nparts * sizeof(int)));
checkCudaErrors(hipMalloc(&_part_bin, nparts * sizeof(int)));
thrust::device_ptr<int> t_part_ind2(_part_ind);
thrust::device_ptr<int> t_part_bin2(_part_bin);
/* Reset memory */
checkCudaErrors(hipMemset(_bin_start, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(hipMemset(_bin_end, -1, bins.Gcc.s3b * sizeof(int)));
//checkCudaErrors(hipMemset(_bin_count, 0, bins.Gcc.s3b * sizeof(int)));
//thrust::device_ptr<int> t_bin_count(_bin_count);
/* Bin particles */
hipLaunchKernelGGL(( bin_fill_k), dim3(num_nparts), dim3(dim_nparts), 0, 0, _part_ind, _part_bin, _parts, nparts,
_DOM);
/* Sort _part_ind by _part_bin (sort key by value) */
thrust::sort_by_key(t_part_bin2, t_part_bin2 + nparts, t_part_ind2);
/* Find start and ending index of each bin */
hipLaunchKernelGGL(( find_bin_start_end), dim3(b_nparts), dim3(t_nparts), smem_size, 0, _bin_start, _bin_end,
_part_bin, nparts);
/* Initialize forces to zero */
hipLaunchKernelGGL(( collision_init), dim3(num_nparts), dim3(dim_nparts), 0, 0, _parts, nparts);
/* Deal with particle-particle collisions */
hipLaunchKernelGGL(( collision_parts), dim3(num_nparts), dim3(dim_nparts), 0, 0, _parts, nparts,
eps, mu, rho_f, nu, _bc, _bin_start, _bin_end, _part_bin,
_part_ind, interaction_length_ratio, dt);
/* Calculate spring forces on particles */
hipLaunchKernelGGL(( spring_parts), dim3(num_nparts), dim3(dim_nparts), 0, 0, _parts, nparts, _DOM);
/* Calculate wall collision forces */
hipLaunchKernelGGL(( collision_walls), dim3(num_nparts), dim3(dim_nparts), 0, 0, _parts, nparts, _bc, eps, mu,
rho_f, nu, interaction_length_ratio, dt, _DOM);
/* Free memory */
checkCudaErrors(hipFree(_part_ind));
checkCudaErrors(hipFree(_part_bin));
} // end if (nparts > 1)
}
extern "C"
void cuda_update_part_velocity()
{
// Thread over nparts
int t_nparts = nparts * (nparts < MAX_THREADS_1D)
+ MAX_THREADS_1D * (nparts >= MAX_THREADS_1D);
int b_nparts = (int) ceil((real) nparts / (real) t_nparts);
dim3 dim_nparts(t_nparts);
dim3 num_nparts(b_nparts);
/* Communicate forces to prevent MPI hang */
cuda_update_part_forces_i();
cuda_update_part_forces_j();
cuda_update_part_forces_k();
if (nparts > 0) {
hipLaunchKernelGGL(( move_parts_a), dim3(num_nparts), dim3(dim_nparts), 0, 0, _parts, nparts, dt, g, gradP,
rho_f);
}
}
extern "C"
void cuda_update_part_position()
{
// Thread over nparts
int t_nparts = nparts * (nparts < MAX_THREADS_1D)
+ MAX_THREADS_1D * (nparts >= MAX_THREADS_1D);
int b_nparts = (int) ceil((real) nparts / (real) t_nparts);
dim3 dim_nparts(t_nparts);
dim3 num_nparts(b_nparts);
if (nparts > 0) {
hipLaunchKernelGGL(( move_parts_b), dim3(num_nparts), dim3(dim_nparts), 0, 0, _parts, nparts, dt, g, gradP,
rho_f);
}
}
extern "C"
void cuda_build_cages(void)
{
/* Reset flag_{u,v,w} to fluid */
hipLaunchKernelGGL(( reset_flag_u), dim3(blocks.Gfx.num_inb), dim3(blocks.Gfx.dim_inb), 0, 0, _flag_u);
hipLaunchKernelGGL(( reset_flag_v), dim3(blocks.Gfy.num_jnb), dim3(blocks.Gfy.dim_jnb), 0, 0, _flag_v);
hipLaunchKernelGGL(( reset_flag_w), dim3(blocks.Gfz.num_knb), dim3(blocks.Gfz.dim_knb), 0, 0, _flag_w);
/* Reset phase, phase_shell to fluid */
if (NPARTS > 0) {
hipLaunchKernelGGL(( reset_phases), dim3(blocks.Gcc.num_knb), dim3(blocks.Gcc.dim_knb), 0, 0, _phase, _phase_shell);
/* Init exec configuration */
int tx = 0.5*MAX_THREADS_DIM;
int ty = 0.5*MAX_THREADS_DIM;
int tz = 0.5*MAX_THREADS_DIM;
real itx = 1./tx;
real ity = 1./ty;
real itz = 1./tz;
int cage_dim[3];
int *_cage_dim;
checkCudaErrors(hipMalloc(&_cage_dim, 3 * sizeof(int)));
/* build phase */
for (int n = 0; n < nparts; n++) {
// Set up cage extents
// _parts is different than parts, so we need to do this device-side
// and copy back to get exec config
hipLaunchKernelGGL(( cage_setup), dim3(1),dim3(1), 0, 0, _parts, n, _cage_dim);
hipMemcpy(cage_dim, _cage_dim, 3 * sizeof(int), hipMemcpyDeviceToHost);
int bx = (int) ceil((real) cage_dim[0] * itx);
int by = (int) ceil((real) cage_dim[1] * ity);
int bz = (int) ceil((real) cage_dim[2] * itz);
dim3 dimb_3(tx, ty, tz);
dim3 numb_3(bx, by, bz);
if (bx > 0 && by > 0 && bz > 0) {
hipLaunchKernelGGL(( build_phase), dim3(numb_3), dim3(dimb_3), 0, 0, _parts, n, _cage_dim, _phase,
_phase_shell, _DOM, _bc);
}
}
/* build phase_shell (needs phase to exist) */
for (int n = 0; n < nparts; n++) {
// Set up cage extents
// _parts is different than parts, so we need to do this device-side
// and copy back to get exec config
hipLaunchKernelGGL(( cage_setup), dim3(1),dim3(1), 0, 0, _parts, n, _cage_dim);
hipMemcpy(cage_dim, _cage_dim, 3 * sizeof(int), hipMemcpyDeviceToHost);
int bx = (int) ceil((real) cage_dim[0] * itx);
int by = (int) ceil((real) cage_dim[1] * ity);
int bz = (int) ceil((real) cage_dim[2] * itz);
dim3 dimb_3(tx, ty, tz);
dim3 numb_3(bx, by, bz);
if (bx > 0 && by > 0 && bz > 0) {
hipLaunchKernelGGL(( build_phase_shell), dim3(numb_3), dim3(dimb_3), 0, 0, _parts, n, _cage_dim, _phase,
_phase_shell, _DOM, _bc);
}
}
hipFree(_cage_dim);
//phase_shell_x<<<blocks.Gcc.num_in, blocks.Gcc.dim_in>>>(_parts, _phase, _phase_shell);
//phase_shell_y<<<blocks.Gcc.num_jn, blocks.Gcc.dim_jn>>>(_parts, _phase, _phase_shell);
//phase_shell_z<<<blocks.Gcc.num_kn, blocks.Gcc.dim_kn>>>(_parts, _phase, _phase_shell);
/* Build flags from phase, phase_shell */
// Need phase shell at ghost cells, but not flag
hipLaunchKernelGGL(( cage_flag_u), dim3(blocks.Gfx.num_inb), dim3(blocks.Gfx.dim_inb), 0, 0, _flag_u, _phase, _phase_shell);
hipLaunchKernelGGL(( cage_flag_v), dim3(blocks.Gfy.num_jnb), dim3(blocks.Gfy.dim_jnb), 0, 0, _flag_v, _phase, _phase_shell);
hipLaunchKernelGGL(( cage_flag_w), dim3(blocks.Gfz.num_knb), dim3(blocks.Gfz.dim_knb), 0, 0, _flag_w, _phase, _phase_shell);
}
/* Flag external boundaries
* * Only for non-periodic conditions
* * Only if subdomain is on domain boundary
*/
// i direction
if (bc.pW != PERIODIC && bc.pE != PERIODIC) {
if (dom[rank].I == DOM.Is) {
hipLaunchKernelGGL(( flag_external_u), dim3(blocks.Gfx.num_inb), dim3(blocks.Gfx.dim_inb), 0, 0, _flag_u,
dom[rank].Gfx._is);
}
if (dom[rank].I == DOM.Ie) {
hipLaunchKernelGGL(( flag_external_u), dim3(blocks.Gfx.num_inb), dim3(blocks.Gfx.dim_inb), 0, 0, _flag_u,
dom[rank].Gfx._ie);
}
}
// j direction
if (bc.pS != PERIODIC && bc.pN != PERIODIC) {
if (dom[rank].J == DOM.Js) {
hipLaunchKernelGGL(( flag_external_v), dim3(blocks.Gfy.num_jnb), dim3(blocks.Gfy.dim_jnb), 0, 0, _flag_v,
dom[rank].Gfy._js);
}
if (dom[rank].J == DOM.Je) {
hipLaunchKernelGGL(( flag_external_v), dim3(blocks.Gfy.num_jnb), dim3(blocks.Gfy.dim_jnb), 0, 0, _flag_v,
dom[rank].Gfy._je);
}
}
// k direction
if (bc.pB != PERIODIC && bc.pT != PERIODIC) {
if (dom[rank].K == DOM.Ks) {
hipLaunchKernelGGL(( flag_external_w), dim3(blocks.Gfz.num_knb), dim3(blocks.Gfz.dim_knb), 0, 0, _flag_w,
dom[rank].Gfz._ks);
}
if (dom[rank].K == DOM.Ke) {
hipLaunchKernelGGL(( flag_external_w), dim3(blocks.Gfz.num_knb), dim3(blocks.Gfz.dim_knb), 0, 0, _flag_w,
dom[rank].Gfz._ke);
}
}
/* Fill in flag_{u,v,w} ghost cells for periodic boundary conditions -- only
necessary with particles bc of cage */
// Do this exactly like we do ghost cell exchanges -- since dom[rank].e will
// be MPI_PROC_NULL if need be, we don't need to worry about exchanging over
// periodic boundaries
}
extern "C"
void cuda_part_BC(void)
{
//printf("N%d >> Applying particle boundary conditions to u...\n", rank);
// u
hipLaunchKernelGGL(( part_BC_u), dim3(blocks.Gfx.num_inb), dim3(blocks.Gfx.dim_inb), 0, 0, _u, _phase, _flag_u,
_parts, nu, nparts);
// v
hipLaunchKernelGGL(( part_BC_v), dim3(blocks.Gfy.num_jnb), dim3(blocks.Gfy.dim_jnb), 0, 0, _v, _phase, _flag_v,
_parts, nu, nparts);
// w
hipLaunchKernelGGL(( part_BC_w), dim3(blocks.Gfz.num_knb), dim3(blocks.Gfz.dim_knb), 0, 0, _w, _phase, _flag_w,
_parts, nu, nparts);
}
extern "C"
void cuda_part_BC_star(void)
{
// u
hipLaunchKernelGGL(( part_BC_u), dim3(blocks.Gfx.num_inb), dim3(blocks.Gfx.dim_inb), 0, 0, _u_star, _phase,
_flag_u, _parts, nu, nparts);
// v
hipLaunchKernelGGL(( part_BC_v), dim3(blocks.Gfy.num_jnb), dim3(blocks.Gfy.dim_jnb), 0, 0, _v_star, _phase,
_flag_v, _parts, nu, nparts);
// w
hipLaunchKernelGGL(( part_BC_w), dim3(blocks.Gfz.num_knb), dim3(blocks.Gfz.dim_knb), 0, 0, _w_star, _phase,
_flag_w, _parts, nu, nparts);
}
extern "C"
void cuda_part_BC_p(void)
{
hipLaunchKernelGGL(( part_BC_p), dim3(blocks.Gcc.num_kn), dim3(blocks.Gcc.dim_kn), 0, 0, _p0, _rhs_p, _phase,
_phase_shell, _parts, mu, nu, dt, dt0, gradP, rho_f, nparts, s_beta, s_ref, g);
}
extern "C"
void cuda_part_p_fill(void)
{
hipLaunchKernelGGL(( part_BC_p_fill), dim3(blocks.Gcc.num_kn), dim3(blocks.Gcc.dim_kn), 0, 0, _p, _phase, _parts,
mu, nu, rho_f, gradP, nparts, s_beta, s_ref, g);
}
extern "C"
void cuda_parts_internal(void)
{
if (nparts > 0) {
hipLaunchKernelGGL(( internal_u), dim3(blocks.Gfx.num_in), dim3(blocks.Gfx.dim_in), 0, 0, _u, _parts, _flag_u,
_phase, nparts);
hipLaunchKernelGGL(( internal_v), dim3(blocks.Gfy.num_jn), dim3(blocks.Gfy.dim_jn), 0, 0, _v, _parts, _flag_v,
_phase, nparts);
hipLaunchKernelGGL(( internal_w), dim3(blocks.Gfz.num_kn), dim3(blocks.Gfz.dim_kn), 0, 0, _w, _parts, _flag_w,
_phase, nparts);
}
}
extern "C"
void cuda_update_part_forces_i(void)
{
/* Outline of communication
* The following need to be communicated before move_parts_{a,b}
* * kFx, kFy, kFz -- subdom + ghost, but same
* * iFx, iFy, iFz -- subdom
* * iLx, iLy, iLz -- subdom
* * iSt, St -- subdom
* This communication is similar to the communication of partial sums during
* the Lebedev quadrature (see cuda_physalis.cu:cuda_partial_sum_i)
* 1) All particles in the outer computational bin plane need to be sent,
* for example the (j,k) planes at _bins.Gcc.{_is, _ie}.
* 2) Bin the particles using i indexing to find _bin_{start,end,count}
* 3) Reduce _bin_count at _is, _ie to find nparts_send_{e,w}
* 4) Communicate nparts_send_{e,w} with appropriate subdom to find
* nparts_recv_{e,w}
* 5) Excl. prefix scan bin_count over _is, _ie to find destination index for
* packed particle data
* 6) Allocate send and recv array
* 7) Pack send array using destination offsetes
* 8) Communicate send->recv
* 9) Excl. prefix over _isb, _ieb to find unpacking indices
* 10) Unpack
* 11) Repeat for j, k
*/
/* Initialize execution config */
// Thread over east/west faces
int ty = bins.Gcc.jnb * (bins.Gcc.jnb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.jnb >= MAX_THREADS_DIM);
int tz = bins.Gcc.knb * (bins.Gcc.knb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.knb >= MAX_THREADS_DIM);
int by = (int) ceil((real) bins.Gcc.jnb / (real) ty);
int bz = (int) ceil((real) bins.Gcc.knb / (real) tz);
dim3 bin_num_inb(by, bz);
dim3 bin_dim_inb(ty, tz);
// Thread over nparts
int t_nparts = nparts * (nparts < MAX_THREADS_1D)
+ MAX_THREADS_1D * (nparts >= MAX_THREADS_1D);
int b_nparts = (int) ceil((real) nparts / (real) t_nparts);
dim3 dim_nparts(t_nparts);
dim3 num_nparts(b_nparts);
/* Declare things we might need */
int s1b = bins.Gcc.jnb; // custom strides
int s2b = s1b * bins.Gcc.knb;
int offset;
/* Allocate */
checkCudaErrors(hipMalloc(&_part_ind, nparts * sizeof(int)));
checkCudaErrors(hipMalloc(&_part_bin, nparts * sizeof(int)));
thrust::device_ptr<int> t_part_ind(_part_ind);
thrust::device_ptr<int> t_part_bin(_part_bin);
int *_offset_e;
int *_offset_w;
checkCudaErrors(hipMalloc(&_offset_e, bins.Gcc.s2b_i * sizeof(int)));
checkCudaErrors(hipMalloc(&_offset_w, bins.Gcc.s2b_i * sizeof(int)));
thrust::device_ptr<int> t_offset_e(_offset_e);
thrust::device_ptr<int> t_offset_w(_offset_w);
checkCudaErrors(hipMemset(_bin_start, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(hipMemset(_bin_end, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(hipMemset(_bin_count, 0, bins.Gcc.s3b * sizeof(int)));
thrust::device_ptr<int> t_bin_count(_bin_count);
// If we have parts...
if (nparts > 0) {
/* Find each particle's bin */
hipLaunchKernelGGL(( bin_fill_i), dim3(num_nparts), dim3(dim_nparts), 0, 0, _part_ind, _part_bin, _parts, nparts,
_DOM);
/* Sort _part_ind by _part_bin (sort key by value) */
if (nparts > 1) {
thrust::sort_by_key(t_part_bin, t_part_bin + nparts, t_part_ind);
}
/* Find start and ending index of each bin */
int smem_size = (nparts + 1) * sizeof(int);
hipLaunchKernelGGL(( find_bin_start_end), dim3(b_nparts), dim3(t_nparts), smem_size, 0, _bin_start, _bin_end,
_part_bin, nparts);
/* Find number of particles in each bin */
hipLaunchKernelGGL(( count_bin_parts_i), dim3(bin_num_inb), dim3(bin_dim_inb), 0, 0, _bin_start, _bin_end,
_bin_count);
/* Find number of particles to send and packing offsets */
// East: _ie, _ieb
if (dom[rank].e != MPI_PROC_NULL) {
// _bin_count is indexed with i varying slowest
// Do reduction over bin_count, given correct starting offset of _ie plane
offset = GFX_LOC(bins.Gcc._ie, 0, 0, s1b, s2b);
nparts_send[EAST] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_i,
0., thrust::plus<int>());
/* Determine packing offsets with an excl prefix scan */
if (nparts_send[EAST] > 0) {
thrust::exclusive_scan(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_i, t_offset_e);
} else {
hipMemset(_offset_e, 0., bins.Gcc.s2b_i * sizeof(int));
}
/* Also determine number of parts to recv */
offset = GFX_LOC(bins.Gcc._ieb, 0, 0, s1b, s2b);
nparts_recv[EAST] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_i,
0., thrust::plus<int>());
} else { // no parts to send or recv
nparts_send[EAST] = 0;
nparts_recv[EAST] = 0;
hipMemset(_offset_e, 0., bins.Gcc.s2b_i * sizeof(int));
}
// West: _is, _isb
if (dom[rank].w != MPI_PROC_NULL) {
// nparts_send
offset = GFX_LOC(bins.Gcc._is, 0, 0, s1b, s2b);
nparts_send[WEST] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_i,
0., thrust::plus<int>());
// send offsets
if (nparts_send[WEST] > 0) {
thrust::exclusive_scan(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_i, t_offset_w);
} else {
hipMemset(_offset_w, 0., bins.Gcc.s2b_i * sizeof(int));
}
// nparts_recv
offset = GFX_LOC(bins.Gcc._isb, 0, 0, s1b, s2b);
nparts_recv[WEST] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_i,
0., thrust::plus<int>());
} else {
nparts_send[WEST] = 0;
nparts_recv[WEST] = 0;
hipMemset(_offset_w, 0., bins.Gcc.s2b_i * sizeof(int));
}
} else { // nparts <= 0
checkCudaErrors(hipMemset(_part_ind, -1, nparts * sizeof(int)));
checkCudaErrors(hipMemset(_part_bin, -1, nparts * sizeof(int)));
nparts_send[EAST] = 0;
nparts_send[WEST] = 0;
nparts_recv[EAST] = 0;
nparts_recv[WEST] = 0;
hipMemset(_offset_e, 0., bins.Gcc.s2b_i * sizeof(int));
hipMemset(_offset_w, 0., bins.Gcc.s2b_i * sizeof(int));
}
/* Send number of parts to east/west */
// origin target
// nparts_send[WEST] -> nparts_recv[EAST]
// nparts_recv[WEST] <- nparts_send[EAST]
//nparts_recv[WEST] = 0; // init
//nparts_recv[EAST] = 0;
//mpi_send_nparts_i();
/* Allocate memory for send and recv forces */
int n_send = 9;
// * kFx, kFy, kFz
// * iFx, iFy, iFz
// * iLx, iLy, iLz
// Indexing is, for example:
// _force_send_e[force + 9*part_id]
// where
// part_id = [0, nparts) and force = [0, 9)
// 0: kFx 1: kFy 2: kFz
// 3: iFx 4: iFy 5: iFz
// 6: iLx 7: iLy 8: iLz
// See accompanying note at the same location in cuda_transfer_parts_i
int send_alloc_e = nparts_send[EAST]*(nparts_send[EAST] > 0) + (nparts_send[EAST] == 0);
int send_alloc_w = nparts_send[WEST]*(nparts_send[WEST] > 0) + (nparts_send[WEST] == 0);
int recv_alloc_e = nparts_recv[EAST]*(nparts_recv[EAST] > 0) + (nparts_recv[EAST] == 0);
int recv_alloc_w = nparts_recv[WEST]*(nparts_recv[WEST] > 0) + (nparts_recv[WEST] == 0);
checkCudaErrors(hipMalloc(&_force_send_e, send_alloc_e*n_send*sizeof(real)));
checkCudaErrors(hipMalloc(&_force_send_w, send_alloc_w*n_send*sizeof(real)));
checkCudaErrors(hipMalloc(&_force_recv_e, recv_alloc_e*n_send*sizeof(real)));
checkCudaErrors(hipMalloc(&_force_recv_w, recv_alloc_w*n_send*sizeof(real)));
/* Pack partial forces */
if (nparts_send[EAST] > 0) {
hipLaunchKernelGGL(( pack_forces_e), dim3(bin_num_inb), dim3(bin_dim_inb), 0, 0, _force_send_e, _offset_e,
_bin_start, _bin_count, _part_ind, _parts);
} else { // fill dummy data
//hipMemset(_force_send_e, 0., send_alloc_e * n_send * sizeof(real));
}
if (nparts_send[WEST] > 0) {
hipLaunchKernelGGL(( pack_forces_w), dim3(bin_num_inb), dim3(bin_dim_inb), 0, 0, _force_send_w, _offset_w,
_bin_start, _bin_count, _part_ind, _parts);
} else { // fill dummy data
//hipMemset(_force_send_w, 0., send_alloc_w * n_send * sizeof(real));
}
hipDeviceSynchronize(); // ensure packing is complete
/* Communicate forces with MPI */
mpi_send_forces_i();
/* Find offsets in ghost bins */
if (nparts > 0) {
// East: _ieb
if (dom[rank].e != MPI_PROC_NULL) {
/* Determine packing offsets with an excl prefix scan */
if (nparts_recv[EAST] > 0) {
offset = GFX_LOC(bins.Gcc._ieb, 0, 0, s1b, s2b);
thrust::exclusive_scan(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_i, t_offset_e);
} else {
hipMemset(_offset_e, 0., bins.Gcc.s2b_i * sizeof(int));
}
} else {
hipMemset(_offset_e, 0., bins.Gcc.s2b_i * sizeof(int));
}
// West: _isb plane
if (dom[rank].w != MPI_PROC_NULL) {
if (nparts_recv[WEST] > 0) {
offset = GFX_LOC(bins.Gcc._isb, 0, 0, s1b, s2b);
thrust::exclusive_scan(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_i, t_offset_w);
} else {
hipMemset(_offset_w, 0., bins.Gcc.s2b_i * sizeof(int));
}
} else {
hipMemset(_offset_w, 0., bins.Gcc.s2b_i * sizeof(int));
}
/* Unpack and complete forces */
if (nparts_recv[EAST] > 0) {
hipLaunchKernelGGL(( unpack_forces_e), dim3(bin_num_inb), dim3(bin_dim_inb), 0, 0, _force_recv_e, _offset_e,
_bin_start, _bin_count, _part_ind, _parts);
}
if (nparts_recv[WEST] > 0) {
hipLaunchKernelGGL(( unpack_forces_w), dim3(bin_num_inb), dim3(bin_dim_inb), 0, 0, _force_recv_w, _offset_w,
_bin_start, _bin_count, _part_ind, _parts);
}
hipDeviceSynchronize(); // ensure packing is complete
} else { // nparts <= 0
hipMemset(_offset_e, 0., bins.Gcc.s2b_i * sizeof(int));
hipMemset(_offset_w, 0., bins.Gcc.s2b_i * sizeof(int));
}
/* Free */
hipFree(_force_send_e);
hipFree(_force_send_w);
hipFree(_force_recv_e);
hipFree(_force_recv_w);
hipFree(_part_ind);
hipFree(_part_bin);
hipFree(_offset_e);
hipFree(_offset_w);
}
extern "C"
void cuda_update_part_forces_j(void)
{
//printf("N%d >> Updating particle forces in j... (nparts %d)\n", rank, nparts);
/* Communication follows same pattern as cuda_update_part_forces_i */
/* Initialize execution config */
// Thread over north/south faces
int tz = bins.Gcc.knb * (bins.Gcc.knb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.knb >= MAX_THREADS_DIM);
int tx = bins.Gcc.inb * (bins.Gcc.inb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.inb >= MAX_THREADS_DIM);
int bz = (int) ceil((real) bins.Gcc.knb / (real) tz);
int bx = (int) ceil((real) bins.Gcc.inb / (real) tx);
dim3 bin_num_jnb(bz, bx);
dim3 bin_dim_jnb(tz, tx);
// Thread over nparts
int t_nparts = nparts * (nparts < MAX_THREADS_1D)
+ MAX_THREADS_1D * (nparts >= MAX_THREADS_1D);
int b_nparts = (int) ceil((real) nparts / (real) t_nparts);
dim3 dim_nparts(t_nparts);
dim3 num_nparts(b_nparts);
/* Declare things we might need */
int s1b = bins.Gcc.knb;
int s2b = s1b * bins.Gcc.inb;
int offset;
/* Allocate */
checkCudaErrors(hipMalloc(&_part_ind, nparts * sizeof(int)));
checkCudaErrors(hipMalloc(&_part_bin, nparts * sizeof(int)));
thrust::device_ptr<int> t_part_ind(_part_ind);
thrust::device_ptr<int> t_part_bin(_part_bin);
int *_offset_n;
int *_offset_s;
checkCudaErrors(hipMalloc(&_offset_n, bins.Gcc.s2b_j * sizeof(int)));
checkCudaErrors(hipMalloc(&_offset_s, bins.Gcc.s2b_j * sizeof(int)));
thrust::device_ptr<int> t_offset_n(_offset_n);
thrust::device_ptr<int> t_offset_s(_offset_s);
checkCudaErrors(hipMemset(_bin_start, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(hipMemset(_bin_end, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(hipMemset(_bin_count, 0, bins.Gcc.s3b * sizeof(int)));
thrust::device_ptr<int> t_bin_count(_bin_count);
// If we have parts...
if (nparts > 0) {
/* Find each particle's bin */
hipLaunchKernelGGL(( bin_fill_j), dim3(num_nparts), dim3(dim_nparts), 0, 0, _part_ind, _part_bin, _parts, nparts,
_DOM);
/* Sort _part_ind by _part_bin (sort key by value) */
if (nparts > 1) {
thrust::sort_by_key(t_part_bin, t_part_bin + nparts, t_part_ind);
}
/* Find start and ending index of each bin */
int smem_size = (nparts + 1) * sizeof(int);
hipLaunchKernelGGL(( find_bin_start_end), dim3(b_nparts), dim3(t_nparts), smem_size, 0, _bin_start, _bin_end,
_part_bin, nparts);
/* Find number of particles in each bin */
hipLaunchKernelGGL(( count_bin_parts_j), dim3(bin_num_jnb), dim3(bin_dim_jnb), 0, 0, _bin_start, _bin_end,
_bin_count);
/* Find number of particles to send and packing offsets */
// north: _je
if (dom[rank].n != MPI_PROC_NULL) {
// _bin_count is indexed with j varying slowest
// nparts_send
offset = GFY_LOC(0, bins.Gcc._je, 0, s1b, s2b);
nparts_send[NORTH] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_j,
0., thrust::plus<int>());
// send offsets
if (nparts_send[NORTH] > 0) {
thrust::exclusive_scan(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_j, t_offset_n);
} else {
hipMemset(_offset_n, 0., bins.Gcc.s2b_j * sizeof(int));
}
// nparts_recv
offset = GFY_LOC(0, bins.Gcc._jeb, 0, s1b, s2b);
nparts_recv[NORTH] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_j,
0., thrust::plus<int>());
} else { // no parts to send
nparts_send[NORTH] = 0;
nparts_recv[NORTH] = 0;
hipMemset(_offset_n, 0., bins.Gcc.s2b_j * sizeof(int));
}
// SOUTH: _js planes
if (dom[rank].s != MPI_PROC_NULL) {
// nparts_send
offset = GFY_LOC(0, bins.Gcc._js, 0, s1b, s2b);
nparts_send[SOUTH] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_j,
0., thrust::plus<int>());
// sending offsets
if (nparts_send[SOUTH] > 0) {
thrust::exclusive_scan(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_j, t_offset_s);
} else {
hipMemset(_offset_s, 0., bins.Gcc.s2b_j * sizeof(int));
}
// nparts_recv
offset = GFY_LOC(0, bins.Gcc._jsb, 0, s1b, s2b);
nparts_recv[SOUTH] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_j,
0., thrust::plus<int>());
} else {
nparts_send[SOUTH] = 0;
nparts_recv[SOUTH] = 0;
hipMemset(_offset_s, 0., bins.Gcc.s2b_j * sizeof(int));
}
} else { // nparts <= 0
checkCudaErrors(hipMemset(_part_ind, -1, nparts * sizeof(int)));
checkCudaErrors(hipMemset(_part_bin, -1, nparts * sizeof(int)));
nparts_send[NORTH] = 0;
nparts_send[SOUTH] = 0;
nparts_recv[NORTH] = 0;
nparts_recv[SOUTH] = 0;
hipMemset(_offset_n, 0., bins.Gcc.s2b_j * sizeof(int));
hipMemset(_offset_s, 0., bins.Gcc.s2b_j * sizeof(int));
}
/* Send number of parts to NORTH/SOUTH */
// origin target
// nparts_send[SOUTH] -> nparts_recv[NORTH]
// nparts_recv[SOUTH] <- nparts_send[NORTH]
//nparts_recv[SOUTH] = 0; // init
//nparts_recv[NORTH] = 0;
//mpi_send_nparts_j();
/* Allocate memory for send and recv forces */
int n_send = 9;
// * kFx, kFy, kFz
// * iFx, iFy, iFz
// * iLx, iLy, iLz
// Indexing is, for example:
// _force_send_n[force + 9*part_id]
// where
// part_id = [0, nparts) and force = [0, 9)
// 0: kFx 1: kFy 2: kFz
// 3: iFx 4: iFy 5: iFz
// 6: iLx 7: iLy 8: iLz
// See accompanying note at the same location in cuda_transfer_parts_i
int send_alloc_n = nparts_send[NORTH]*(nparts_send[NORTH] > 0) + (nparts_send[NORTH] == 0);
int send_alloc_s = nparts_send[SOUTH]*(nparts_send[SOUTH] > 0) + (nparts_send[SOUTH] == 0);
int recv_alloc_n = nparts_recv[NORTH]*(nparts_recv[NORTH] > 0) + (nparts_recv[NORTH] == 0);
int recv_alloc_s = nparts_recv[SOUTH]*(nparts_recv[SOUTH] > 0) + (nparts_recv[SOUTH] == 0);
checkCudaErrors(hipMalloc(&_force_send_n, send_alloc_n*n_send*sizeof(real)));
checkCudaErrors(hipMalloc(&_force_send_s, send_alloc_s*n_send*sizeof(real)));
checkCudaErrors(hipMalloc(&_force_recv_n, recv_alloc_n*n_send*sizeof(real)));
checkCudaErrors(hipMalloc(&_force_recv_s, recv_alloc_s*n_send*sizeof(real)));
/* Pack partial forces */
if (nparts_send[NORTH] > 0) {
hipLaunchKernelGGL(( pack_forces_n), dim3(bin_num_jnb), dim3(bin_dim_jnb), 0, 0, _force_send_n, _offset_n,
_bin_start, _bin_count, _part_ind, _parts);
} else {
hipMemset(_force_send_n, 0., send_alloc_n*n_send*sizeof(real));
}
if (nparts_send[SOUTH] > 0) {
hipLaunchKernelGGL(( pack_forces_s), dim3(bin_num_jnb), dim3(bin_dim_jnb), 0, 0, _force_send_s, _offset_s,
_bin_start, _bin_count, _part_ind, _parts);
} else {
hipMemset(_force_send_s, 0., send_alloc_s*n_send*sizeof(real));
}
hipDeviceSynchronize(); // ensure packing is complete
/* Communicate forces with MPI */
mpi_send_forces_j();
/* Find offsets in ghost bins */
if (nparts > 0) {
// NORTH: _jeb
if (dom[rank].n != MPI_PROC_NULL) {
/* Determine packing offsets with an excl prefix scan */
if (nparts_recv[NORTH] > 0) {
offset = GFY_LOC(0, bins.Gcc._jeb, 0, s1b, s2b);
thrust::exclusive_scan(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_j, t_offset_n);
} else {
hipMemset(_offset_n, 0., bins.Gcc.s2b_j * sizeof(int));
}
} else { // no parts to send
hipMemset(_offset_n, 0., bins.Gcc.s2b_j * sizeof(int));
}
// SOUTH: _jsb plane
if (dom[rank].s != MPI_PROC_NULL) {
if (nparts_recv[SOUTH] > 0) {
offset = GFY_LOC(0, bins.Gcc._jsb, 0, s1b, s2b);
thrust::exclusive_scan(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_j, t_offset_s);
} else {
hipMemset(_offset_s, 0., bins.Gcc.s2b_j * sizeof(int));
}
} else {
hipMemset(_offset_s, 0., bins.Gcc.s2b_j * sizeof(int));
}
/* Unpack and complete forces */
if (nparts_recv[NORTH] > 0) {
hipLaunchKernelGGL(( unpack_forces_n), dim3(bin_num_jnb), dim3(bin_dim_jnb), 0, 0, _force_recv_n, _offset_n,
_bin_start, _bin_count, _part_ind, _parts);
}
if (nparts_recv[SOUTH] > 0) {
hipLaunchKernelGGL(( unpack_forces_s), dim3(bin_num_jnb), dim3(bin_dim_jnb), 0, 0, _force_recv_s, _offset_s,
_bin_start, _bin_count, _part_ind, _parts);
}
hipDeviceSynchronize(); // ensure packing is complete
} else { // nparts <= 0
hipMemset(_offset_n, 0., bins.Gcc.s2b_j * sizeof(int));
hipMemset(_offset_s, 0., bins.Gcc.s2b_j * sizeof(int));
}
/* Free */
hipFree(_force_send_n);
hipFree(_force_send_s);
hipFree(_force_recv_n);
hipFree(_force_recv_s);
hipFree(_part_ind);
hipFree(_part_bin);
hipFree(_offset_n);
hipFree(_offset_s);
}
extern "C"
void cuda_update_part_forces_k(void)
{
//printf("N%d >> Updating particle forces in k... (nparts %d)\n", rank, nparts);
/* Communication follows same pattern as cuda_update_part_forces_i */
/* Initialize execution config */
// thread over top/bottom faces
int tx = bins.Gcc.inb * (bins.Gcc.inb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.inb >= MAX_THREADS_DIM);
int ty = bins.Gcc.jnb * (bins.Gcc.jnb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.jnb >= MAX_THREADS_DIM);
int bx = (int) ceil((real) bins.Gcc.inb / (real) tx);
int by = (int) ceil((real) bins.Gcc.jnb / (real) ty);
dim3 bin_num_knb(bx, by);
dim3 bin_dim_knb(tx, ty);
// Thread over nparts
int t_nparts = nparts * (nparts < MAX_THREADS_1D)
+ MAX_THREADS_1D * (nparts >= MAX_THREADS_1D);
int b_nparts = (int) ceil((real) nparts / (real) t_nparts);
dim3 dim_nparts(t_nparts);
dim3 num_nparts(b_nparts);
/* Declare things we might need */
int s1b = bins.Gcc.inb;
int s2b = s1b * bins.Gcc.jnb;
int offset;
/* Allocate */
checkCudaErrors(hipMalloc(&_part_ind, nparts * sizeof(int)));
checkCudaErrors(hipMalloc(&_part_bin, nparts * sizeof(int)));
thrust::device_ptr<int> t_part_ind(_part_ind);
thrust::device_ptr<int> t_part_bin(_part_bin);
int *_offset_t;
int *_offset_b;
checkCudaErrors(hipMalloc(&_offset_t, bins.Gcc.s2b_k * sizeof(int)));
checkCudaErrors(hipMalloc(&_offset_b, bins.Gcc.s2b_k * sizeof(int)));
thrust::device_ptr<int> t_offset_t(_offset_t);
thrust::device_ptr<int> t_offset_b(_offset_b);
checkCudaErrors(hipMemset(_bin_start, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(hipMemset(_bin_end, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(hipMemset(_bin_count, 0, bins.Gcc.s3b * sizeof(int)));
thrust::device_ptr<int> t_bin_count(_bin_count);
// If we have parts...
if (nparts > 0) {
/* Find each particle's bin */
hipLaunchKernelGGL(( bin_fill_k), dim3(num_nparts), dim3(dim_nparts), 0, 0, _part_ind, _part_bin, _parts, nparts,
_DOM);
/* Sort _part_ind by _part_bin (sort key by value) */
if (nparts > 1) {
thrust::sort_by_key(t_part_bin, t_part_bin + nparts, t_part_ind);
}
/* Find start and ending index of each bin */
int smem_size = (nparts + 1) * sizeof(int);
hipLaunchKernelGGL(( find_bin_start_end), dim3(b_nparts), dim3(t_nparts), smem_size, 0, _bin_start, _bin_end,
_part_bin, nparts);
/* Find number of particles in each bin */
hipLaunchKernelGGL(( count_bin_parts_k), dim3(bin_num_knb), dim3(bin_dim_knb), 0, 0, _bin_start, _bin_end,
_bin_count);
/* Find number of particles to send and packing offsets */
// TOP: _ke
if (dom[rank].t != MPI_PROC_NULL) {
// _bin_count is indexed with k varying slowest
// nparts_send
offset = GFZ_LOC(0, 0, bins.Gcc._ke, s1b, s2b);
nparts_send[TOP] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_k,
0., thrust::plus<int>());
// sending offsets
if (nparts_send[TOP] > 0) {
thrust::exclusive_scan(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_k, t_offset_t);
} else {
hipMemset(_offset_t, 0., bins.Gcc.s2b_k * sizeof(int));
}
// nparts_recv
offset = GFZ_LOC(0, 0, bins.Gcc._keb, s1b, s2b);
nparts_recv[TOP] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_k,
0., thrust::plus<int>());
} else { // no parts to send
nparts_send[TOP] = 0;
nparts_recv[TOP] = 0;
hipMemset(_offset_t, 0., bins.Gcc.s2b_k * sizeof(int));
}
// BOTTOM: _ks planes
if (dom[rank].b != MPI_PROC_NULL) {
// nparts_send
offset = GFZ_LOC(0, 0, bins.Gcc._ks, s1b, s2b);
nparts_send[BOTTOM] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_k,
0., thrust::plus<int>());
// sending offsets
if (nparts_send[BOTTOM] > 0) {
thrust::exclusive_scan(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_k, t_offset_b);
} else {
hipMemset(_offset_b, 0., bins.Gcc.s2b_k * sizeof(int));
}
// nparts_recv
offset = GFZ_LOC(0, 0, bins.Gcc._ksb, s1b, s2b);
nparts_recv[BOTTOM] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_k,
0., thrust::plus<int>());
} else {
nparts_send[BOTTOM] = 0;
nparts_recv[BOTTOM] = 0;
hipMemset(_offset_b, 0., bins.Gcc.s2b_k * sizeof(int));
}
} else { // nparts <= 0
checkCudaErrors(hipMemset(_part_ind, -1, nparts * sizeof(int)));
checkCudaErrors(hipMemset(_part_bin, -1, nparts * sizeof(int)));
nparts_send[TOP] = 0;
nparts_send[BOTTOM] = 0;
nparts_recv[TOP] = 0;
nparts_recv[BOTTOM] = 0;
hipMemset(_offset_t, 0., bins.Gcc.s2b_k * sizeof(int));
hipMemset(_offset_b, 0., bins.Gcc.s2b_k * sizeof(int));
}
/* Send number of parts to TOP/BOTTOM */
// origin target
// nparts_send[BOTTOM] -> nparts_recv[TOP]
// nparts_recv[BOTTOM] <- nparts_send[TOP]
//nparts_recv[BOTTOM] = 0; // init
//nparts_recv[TOP] = 0;
//mpi_send_nparts_k();
/* Allocate memory for send and recv forces */
int n_send = 9;
// * kFx, kFy, kFz
// * iFx, iFy, iFz
// * iLx, iLy, iLz
// Indexing is, for example:
// _force_send_t[force + 9*part_id]
// where
// part_id = [0, nparts) and force = [0, 9)
// 0: kFx 1: kFy 2: kFz
// 3: iFx 4: iFy 5: iFz
// 6: iLx 7: iLy 8: iLz
// See accompanying note at the same location in cuda_transfer_parts_i
int send_alloc_t = nparts_send[TOP]*(nparts_send[TOP] > 0) + (nparts_send[TOP] == 0);
int send_alloc_b = nparts_send[BOTTOM]*(nparts_send[BOTTOM] > 0) + (nparts_send[BOTTOM] == 0);
int recv_alloc_t = nparts_recv[TOP]*(nparts_recv[TOP] > 0) + (nparts_recv[TOP] == 0);
int recv_alloc_b = nparts_recv[BOTTOM]*(nparts_recv[BOTTOM] > 0) + (nparts_recv[BOTTOM] == 0);
checkCudaErrors(hipMalloc(&_force_send_t, send_alloc_t*n_send*sizeof(real)));
checkCudaErrors(hipMalloc(&_force_send_b, send_alloc_b*n_send*sizeof(real)));
checkCudaErrors(hipMalloc(&_force_recv_t, recv_alloc_t*n_send*sizeof(real)));
checkCudaErrors(hipMalloc(&_force_recv_b, recv_alloc_b*n_send*sizeof(real)));
/* Pack partial forces */
if (nparts_send[TOP] > 0) {
hipLaunchKernelGGL(( pack_forces_t), dim3(bin_num_knb), dim3(bin_dim_knb), 0, 0, _force_send_t, _offset_t,
_bin_start, _bin_count, _part_ind, _parts);
} else { // fill dummy data
//hipMemset(_force_send_t, 0., send_alloc_t * n_send * sizeof(real));
}
if (nparts_send[BOTTOM] > 0) {
hipLaunchKernelGGL(( pack_forces_b), dim3(bin_num_knb), dim3(bin_dim_knb), 0, 0, _force_send_b, _offset_b,
_bin_start, _bin_count, _part_ind, _parts);
} else { // fill dummy data
//hipMemset(_force_send_b, 0., send_alloc_b * n_send * sizeof(real));
}
hipDeviceSynchronize(); // ensure packing is complete
/* Communicate forces with MPI */
mpi_send_forces_k();
if (nparts > 0) {
/* Find offsets in ghost bins */
// TOP: _keb
if (dom[rank].t != MPI_PROC_NULL) {
offset = GFZ_LOC(0, 0, bins.Gcc._keb, s1b, s2b);
/* Determine packing offsets with an excl prefix scan */
if (nparts_recv[TOP] > 0) {
thrust::exclusive_scan(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_k, t_offset_t);
} else {
hipMemset(_offset_t, 0., bins.Gcc.s2b_k * sizeof(int));
}
} else { // no parts to send
hipMemset(_offset_t, 0., bins.Gcc.s2b_k * sizeof(int));
}
// BOTTOM: _ksb plane
if (dom[rank].b != MPI_PROC_NULL) {
offset = GFZ_LOC(0, 0, bins.Gcc._ksb, s1b, s2b);
if (nparts_recv[BOTTOM] > 0) {
thrust::exclusive_scan(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_k, t_offset_b);
} else {
hipMemset(_offset_b, 0., bins.Gcc.s2b_k * sizeof(int));
}
} else {
hipMemset(_offset_b, 0., bins.Gcc.s2b_k * sizeof(int));
}
/* Unpack and complete forces */
if (nparts_recv[TOP] > 0) {
hipLaunchKernelGGL(( unpack_forces_t), dim3(bin_num_knb), dim3(bin_dim_knb), 0, 0, _force_recv_t, _offset_t,
_bin_start, _bin_count, _part_ind, _parts);
}
if (nparts_recv[BOTTOM] > 0) {
hipLaunchKernelGGL(( unpack_forces_b), dim3(bin_num_knb), dim3(bin_dim_knb), 0, 0, _force_recv_b, _offset_b,
_bin_start, _bin_count, _part_ind, _parts);
}
hipDeviceSynchronize(); // ensure packing is complete
} else {
hipMemset(_offset_t, 0., bins.Gcc.s2b_k * sizeof(int));
hipMemset(_offset_b, 0., bins.Gcc.s2b_k * sizeof(int));
}
/* Free */
hipFree(_force_send_t);
hipFree(_force_send_b);
hipFree(_force_recv_t);
hipFree(_force_recv_b);
hipFree(_part_ind);
hipFree(_part_bin);
hipFree(_offset_t);
hipFree(_offset_b);
}
| f3900f5c3083febe12eb2a7c2ef886d7e74ac124.cu | /*******************************************************************************
********************************* BLUEBOTTLE **********************************
*******************************************************************************
*
* Copyright 2012 - 2018 Adam Sierakowski and Daniel Willen,
* The Johns Hopkins University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Please contact the Johns Hopkins University to use Bluebottle for
* commercial and/or for-profit applications.
******************************************************************************/
#include <cuda.h>
#include <thrust/sort.h>
#include "cuda_particle.h"
#include <helper_cuda.h>
#include <thrust/scan.h>
#include <thrust/device_ptr.h>
__constant__ bin_struct _bins;
real *_force_send_e;
real *_force_send_w;
real *_force_send_n;
real *_force_send_s;
real *_force_send_t;
real *_force_send_b;
real *_force_recv_e;
real *_force_recv_w;
real *_force_recv_n;
real *_force_recv_s;
real *_force_recv_t;
real *_force_recv_b;
extern "C"
void cuda_part_malloc_host(void)
{
// Flags in cuda_bluebottle.cu:cuda_dom_malloc_host since they are needed even
// without particles
checkCudaErrors(cudaMallocHost(&phase, dom[rank].Gcc.s3b * sizeof(int)));
cpumem += dom[rank].Gcc.s3b * sizeof(int);
checkCudaErrors(cudaMallocHost(&phase_shell, dom[rank].Gcc.s3b * sizeof(int)));
cpumem += dom[rank].Gcc.s3b * sizeof(int);
}
extern "C"
void cuda_part_malloc_dev(void)
{
//printf("N%d >> Allocating device particle memory...\n", rank);
// Flags in cuda_bluebottle.cu:cuda_dom_malloc_dev since they are needed even
// without particles
// Phase
checkCudaErrors(cudaMalloc(&_phase, dom[rank].Gcc.s3b * sizeof(int)));
gpumem += dom[rank].Gcc.s3b * sizeof(int);
checkCudaErrors(cudaMalloc(&_phase_shell, dom[rank].Gcc.s3b * sizeof(int)));
gpumem += dom[rank].Gcc.s3b * sizeof(int);
// Allocate device variables
if (NPARTS > 0) {
checkCudaErrors(cudaMalloc(&_parts, nparts * sizeof(part_struct)));
cpumem += nparts * sizeof(part_struct);
checkCudaErrors(cudaMemcpyToSymbol(_bins, &bins, sizeof(bin_struct)));
checkCudaErrors(cudaMalloc(&_bin_start, bins.Gcc.s3b * sizeof(int)));
gpumem += bins.Gcc.s3b * sizeof(int);
checkCudaErrors(cudaMalloc(&_bin_end, bins.Gcc.s3b * sizeof(int)));
gpumem += bins.Gcc.s3b * sizeof(int);
checkCudaErrors(cudaMalloc(&_bin_count, bins.Gcc.s3b * sizeof(int)));
gpumem += bins.Gcc.s3b * sizeof(int);
}
/* These arrays are allocated/free'd in their functions, but listed here for
* reference
* _part_ind
* _part_bin
* _send_parts_{e,w}
* _recv_parts_{e,w}
*/
/* For pointers to pointers, if we need to go back... */
// https://stackoverflow.com/questions/26111794/how-to-use-pointer-to-pointer-
// in-cuda
// https://stackoverflow.com/questions/15113960/cuda-allocating-array-of-
// pointers-to-images-and-the-images
// https://stackoverflow.com/questions/23609770/cuda-double-pointer-memory-copy
// -->https://stackoverflow.com/questions/27931630/copying-array-of-pointers-
// into-device-memory-and-back-cuda
}
extern "C"
void cuda_part_push(void)
{
if (NPARTS > 0) {
checkCudaErrors(cudaMemcpy(_parts, parts, nparts * sizeof(part_struct),
cudaMemcpyHostToDevice));
}
checkCudaErrors(cudaMemcpy(_phase, phase, dom[rank].Gcc.s3b * sizeof(int),
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_phase_shell, phase_shell, dom[rank].Gcc.s3b * sizeof(int),
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_flag_u, flag_u, dom[rank].Gfx.s3b * sizeof(int),
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_flag_v, flag_v, dom[rank].Gfy.s3b * sizeof(int),
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_flag_w, flag_w, dom[rank].Gfz.s3b * sizeof(int),
cudaMemcpyHostToDevice));
}
extern "C"
void cuda_part_pull(void)
{
/* Declare temporary part structure and nparts_subdom */
part_struct *_tmp_parts;
nparts_subdom = 0;
/* Re-allocate memory */
checkCudaErrors(cudaMalloc(&_part_ind, nparts * sizeof(int)));
checkCudaErrors(cudaMalloc(&_part_bin, nparts * sizeof(int)));
thrust::device_ptr<int> t_part_ind(_part_ind);
thrust::device_ptr<int> t_part_bin(_part_bin);
checkCudaErrors(cudaMemset(_bin_start, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(cudaMemset(_bin_end, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(cudaMemset(_bin_count, 0, bins.Gcc.s3b * sizeof(int)));
thrust::device_ptr<int> t_bin_count(_bin_count);
if (nparts > 0) {
// Thread over nparts
int t_nparts = nparts * (nparts < MAX_THREADS_1D)
+ MAX_THREADS_1D * (nparts >= MAX_THREADS_1D);
int b_nparts = (int) ceil((real) nparts / (real) t_nparts);
dim3 dim_nparts(t_nparts);
dim3 num_nparts(b_nparts);
// thread over top/bottom faces
int tx = bins.Gcc.inb * (bins.Gcc.inb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.inb >= MAX_THREADS_DIM);
int ty = bins.Gcc.jnb * (bins.Gcc.jnb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.jnb >= MAX_THREADS_DIM);
int tz = bins.Gcc.knb * (bins.Gcc.knb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.knb >= MAX_THREADS_DIM);
int bx = (int) ceil((real) bins.Gcc.inb / (real) tx);
int by = (int) ceil((real) bins.Gcc.jnb / (real) ty);
int bz = (int) ceil((real) bins.Gcc.knb / (real) tz);
dim3 bin_num_inb(by, bz);
dim3 bin_dim_inb(ty, tz);
dim3 bin_num_jnb(bz, bx);
dim3 bin_dim_jnb(tz, tx);
dim3 bin_num_knb(bx, by);
dim3 bin_dim_knb(tx, ty);
/* Find each particle's bin */
bin_fill_k<<<num_nparts, dim_nparts>>>(_part_ind, _part_bin, _parts, nparts,
_DOM);
/* Sort _part_ind by _part_bin (sort key by value) */
thrust::sort_by_key(t_part_bin, t_part_bin + nparts, t_part_ind);
/* Find start and ending index of each bin */
int smem_size = (nparts + 1) * sizeof(int);
find_bin_start_end<<<b_nparts, t_nparts, smem_size>>>(_bin_start, _bin_end,
_part_bin, nparts);
/* Find number of particles in each bin */
count_bin_parts_k<<<bin_num_knb, bin_dim_knb>>>(_bin_start, _bin_end,
_bin_count);
/* Set ghost bin count to zero (GFZ indexed) */
zero_ghost_bins_i<<<bin_num_inb, bin_dim_inb>>>(_bin_count);
zero_ghost_bins_j<<<bin_num_jnb, bin_dim_jnb>>>(_bin_count);
zero_ghost_bins_k<<<bin_num_knb, bin_dim_knb>>>(_bin_count);
/* Allocate memory to find bin offset target indices in tmp part_struct */
int *_bin_offset;
checkCudaErrors(cudaMalloc(&_bin_offset, bins.Gcc.s3b * sizeof(int)));
/* Prefix scan _bin_count to find target indices in tmp part_struct */
thrust::device_ptr<int> t_bin_count(_bin_count);
thrust::device_ptr<int> t_bin_offset(_bin_offset);
thrust::exclusive_scan(t_bin_count, t_bin_count + bins.Gcc.s3b, t_bin_offset);
/* Reduce bin_count to find nparts in subdomain (ghost bins are zero'd) */
nparts_subdom = thrust::reduce(t_bin_count, t_bin_count + bins.Gcc.s3b,
0., thrust::plus<int>());
/* Allocate new device part struct (no ghost particles) */
checkCudaErrors(cudaMalloc(&_tmp_parts, nparts_subdom * sizeof(part_struct)));
/* Copy subdom parts to tmp part_struct (only in subdom, so [in, jn]) */
// thread over inner bins (no ghost bins)
tx = bins.Gcc.in * (bins.Gcc.in < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.in >= MAX_THREADS_DIM);
ty = bins.Gcc.jn * (bins.Gcc.jn < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.jn >= MAX_THREADS_DIM);
bx = (int) ceil((real) bins.Gcc.in / (real) tx);
by = (int) ceil((real) bins.Gcc.jn / (real) ty);
dim3 bin_num_kn(bx, by);
dim3 bin_dim_kn(tx, ty);
copy_subdom_parts<<<bin_num_kn, bin_dim_kn>>>(_tmp_parts, _parts,
_bin_start, _bin_count, _part_ind, _bin_offset);
cudaFree(_bin_offset);
} else { // nparts <= 0
checkCudaErrors(cudaMemset(_part_ind, -1, nparts * sizeof(int)));
checkCudaErrors(cudaMemset(_part_bin, -1, nparts * sizeof(int)));
nparts_subdom = 0;
checkCudaErrors(cudaMalloc(&_tmp_parts, nparts_subdom * sizeof(part_struct)));
}
/* Allocate new host parts with nparts in subdom */
free(parts);
parts = (part_struct*) malloc(nparts_subdom * sizeof(part_struct));
// Pull from device
checkCudaErrors(cudaMemcpy(parts, _tmp_parts, nparts_subdom * sizeof(part_struct),
cudaMemcpyDeviceToHost));
// Free
cudaFree(_tmp_parts);
cudaFree(_part_ind);
cudaFree(_part_bin);
// Double check the number of particles is correct
int reduce_parts = 0;
MPI_Allreduce(&nparts_subdom, &reduce_parts, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
if (reduce_parts != NPARTS) {
printf("N%d >> Something went wrong. NPARTS = %d, but %d exist\n",
rank, NPARTS, reduce_parts);
printf("N%d >> Has %d parts\n", rank, nparts_subdom);
exit(EXIT_FAILURE);
}
}
extern "C"
void cuda_part_pull_debug(void)
{
// Pull ALL particles, including ghosts
// Allocate new host part_struct with new nparts
free(parts);
parts = (part_struct*) malloc(nparts * sizeof(part_struct));
// Pull all particles from device
checkCudaErrors(cudaMemcpy(parts, _parts, nparts * sizeof(part_struct),
cudaMemcpyDeviceToHost));
}
extern "C"
void cuda_part_free(void)
{
//printf("N%d >> Freeing device particle memory...\n", rank);
// Flags in cuda_dom_free
checkCudaErrors(cudaFreeHost(phase));
checkCudaErrors(cudaFreeHost(phase_shell));
checkCudaErrors(cudaFree(_phase));
checkCudaErrors(cudaFree(_phase_shell));
if (NPARTS > 0) {
checkCudaErrors(cudaFree(_parts));
checkCudaErrors(cudaFree(_bin_start));
checkCudaErrors(cudaFree(_bin_end));
checkCudaErrors(cudaFree(_bin_count));
}
}
extern "C"
void cuda_transfer_parts_i(void)
{
//printf("N%d >> Transfering parts in i, nparts = %d\n", rank, nparts);
/* Transfer particles east and west
* * Bin the particles, indexing with `i` varying slowest
* * Sort particles by their bin
* * Find start and end of each bin's particles
* * Find number of particles in each bin
* * Find number of particles in _is & _ie planes. These need to be sent W/E
* * Communicate these number east and west. Each process now knows how many
* to send and recv
* * Allocate memory for particle send and recv
* * Copy particles into sending arrays. Each bin can find the offset target
* index for its particles by performing a prefix scan.
* * Communicate particles east and west, send -> recv
* * Recv'd parts exist in the ghost bins and replace whatever existed there
* at the last time step. Sum the particles in _isb & _ieb and subtract
* from nparts. This, plus the number of particle recv'd from E/W, is the
* number of new particles
* * Allocate temp part structure to hold all new particles.
* * Reduce bin_count from _is->_ie to find nparts that we will keep
* * Prefix scan from _ie -> _ie to find offset index for particle copy to
* temp struct
* * Backfill recv'd particles to the end of the temp array
* * Repeat process for j, k to take care of edge, corner. Indexing will be
* different to take advantage of memory coalescence and the prefix scan
* two steps back
*/
/* NOTE
* cuda-memcheck occasionally produces the error "bulk_kernel_by_value: an
* illegal memory address was encountered" error on a (thrust) call to
* cudaDeviceSynchronize. This doesn't seem to be reliably reproducible
* (occurs on any of the several thrust calls in this function). This does
* not seem to affect the results in any way, but should be further
* investigated. See bug id 008.
*/
/* Init execution config -- thread over east/west faces */
int ty = bins.Gcc.jnb * (bins.Gcc.jnb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.jnb >= MAX_THREADS_DIM);
int tz = bins.Gcc.knb * (bins.Gcc.knb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.knb >= MAX_THREADS_DIM);
int by = (int) ceil((real) bins.Gcc.jnb / (real) ty);
int bz = (int) ceil((real) bins.Gcc.knb / (real) tz);
dim3 bin_num_inb(by, bz);
dim3 bin_dim_inb(ty, tz);
// Thread over nparts
int t_nparts = nparts * (nparts < MAX_THREADS_1D)
+ MAX_THREADS_1D * (nparts >= MAX_THREADS_1D);
int b_nparts = (int) ceil((real) nparts / (real) t_nparts);
dim3 dim_nparts(t_nparts);
dim3 num_nparts(b_nparts);
/* Declare things we might need */
int s1b, s2b; // custom strides
int offset;
/* Allocate memory */
// These are realloc'd every time
checkCudaErrors(cudaMalloc(&_part_ind, nparts * sizeof(int)));
checkCudaErrors(cudaMalloc(&_part_bin, nparts * sizeof(int)));
thrust::device_ptr<int> t_part_ind(_part_ind);
thrust::device_ptr<int> t_part_bin(_part_bin);
int *_offset_e;
int *_offset_w;
checkCudaErrors(cudaMalloc(&_offset_e, bins.Gcc.s2b_i * sizeof(int)));
checkCudaErrors(cudaMalloc(&_offset_w, bins.Gcc.s2b_i * sizeof(int)));
thrust::device_ptr<int> t_offset_e(_offset_e);
thrust::device_ptr<int> t_offset_w(_offset_w);
checkCudaErrors(cudaMemset(_bin_start, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(cudaMemset(_bin_end, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(cudaMemset(_bin_count, 0, bins.Gcc.s3b * sizeof(int)));
thrust::device_ptr<int> t_bin_count(_bin_count);
// If we have parts...
if (nparts > 0) {
/* Find each particle's bin */
bin_fill_i<<<num_nparts, dim_nparts>>>(_part_ind, _part_bin, _parts, nparts,
_DOM);
/* Sort _part_ind by _part_bin (sort key by value) */
if (nparts > 1) {
thrust::sort_by_key(t_part_bin, t_part_bin + nparts, t_part_ind);
}
/* Find start and ending index of each bin */
int smem_size = (nparts + 1) * sizeof(int);
find_bin_start_end<<<b_nparts, t_nparts, smem_size>>>(_bin_start, _bin_end,
_part_bin, nparts);
/* Find number of particles in each bin */
count_bin_parts_i<<<bin_num_inb, bin_dim_inb>>>(_bin_start, _bin_end,
_bin_count);
/* Find number of particles to send, and packing offsets */
s1b = bins.Gcc.jnb;
s2b = s1b * bins.Gcc.knb;
// East
offset = GFX_LOC(bins.Gcc._ie, 0, 0, s1b, s2b);
if (dom[rank].e != MPI_PROC_NULL) {
// _bin_count is indexed with i varying slowest -- can do a reduction
// directly from _bin_count, given the offset of the start of the _ie plane
nparts_send[EAST] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_i,
0., thrust::plus<int>());
/* Determine packing offsets with an excl prefix scan */
if (nparts_send[EAST] > 0) {
thrust::exclusive_scan(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_i, t_offset_e);
} else {
cudaMemset(_offset_e, 0., bins.Gcc.s2b_i * sizeof(int));
}
} else { // no parts to send
nparts_send[EAST] = 0;
cudaMemset(_offset_e, 0., bins.Gcc.s2b_i * sizeof(int));
}
// West
offset = GFX_LOC(bins.Gcc._is, 0, 0, s1b, s2b);
if (dom[rank].w != MPI_PROC_NULL) {
nparts_send[WEST] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_i,
0., thrust::plus<int>());
if (nparts_send[WEST] > 0) {
thrust::exclusive_scan(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_i, t_offset_w);
} else {
cudaMemset(_offset_w, 0., bins.Gcc.s2b_i * sizeof(int));
}
} else {
nparts_send[WEST] = 0;
cudaMemset(_offset_w, 0., bins.Gcc.s2b_i * sizeof(int));
}
} else { // nparts <= 0
checkCudaErrors(cudaMemset(_part_ind, -1, nparts * sizeof(int)));
checkCudaErrors(cudaMemset(_part_bin, -1, nparts * sizeof(int)));
nparts_send[EAST] = 0;
nparts_send[WEST] = 0;
cudaMemset(_offset_e, 0., bins.Gcc.s2b_i * sizeof(int));
cudaMemset(_offset_w, 0., bins.Gcc.s2b_i * sizeof(int));
}
/* Send number of parts to east/west */
// origin target
// nparts_send[WEST] -> nparts_recv[EAST]
// nparts_recv[WEST] <- nparts_send[EAST]
nparts_recv[WEST] = 0; // init
nparts_recv[EAST] = 0;
mpi_send_nparts_i();
/* Allocate memory for send and receiving particles */
// NOTE: If no particles need to be sent/received in a given direction, this
// allocates a memory location with size zero which returns a null device
// pointer. If this is passed to MPI_Win_create(base, ...) as the base in
// CUDA 9.0, it causes MPI to hang. This was not an issue in CUDA 7.5
//
// The fix involves fooling MPI by allocating a very small amount of dummy
// information if no particles are to be sent. This gives the location a valid
// memory pointer, than than a null pointer. The MPI communication still knows
// that the allocated window size and info to be sent is zero, and nothing is
// unpacked because that is wrapped in an if-statement already. This doesn't
// affect most cases where particles are communicated every direction at every
// time; this will only affect extremely dilute cases.
int send_alloc_e = nparts_send[EAST]*(nparts_send[EAST] > 0) + (nparts_send[EAST] == 0);
int send_alloc_w = nparts_send[WEST]*(nparts_send[WEST] > 0) + (nparts_send[WEST] == 0);
int recv_alloc_e = nparts_recv[EAST]*(nparts_recv[EAST] > 0) + (nparts_recv[EAST] == 0);
int recv_alloc_w = nparts_recv[WEST]*(nparts_recv[WEST] > 0) + (nparts_recv[WEST] == 0);
checkCudaErrors(cudaMalloc(&_send_parts_e, send_alloc_e * sizeof(part_struct)));
checkCudaErrors(cudaMalloc(&_send_parts_w, send_alloc_w * sizeof(part_struct)));
checkCudaErrors(cudaMalloc(&_recv_parts_e, recv_alloc_e * sizeof(part_struct)));
checkCudaErrors(cudaMalloc(&_recv_parts_w, recv_alloc_w * sizeof(part_struct)));
/* Pack particles into _send_parts */
if (nparts_send[EAST] > 0) {
pack_parts_e<<<bin_num_inb, bin_dim_inb>>>(_send_parts_e, _parts, _offset_e,
_bin_start, _bin_count, _part_ind);
} else { // fill dummy data
//cudaMemset(_send_parts_e, 0., send_alloc_e * sizeof(part_struct));
}
if (nparts_send[WEST] > 0) {
pack_parts_w<<<bin_num_inb, bin_dim_inb>>>(_send_parts_w, _parts, _offset_w,
_bin_start, _bin_count, _part_ind);
} else { // fill dummy data
//cudaMemset(_send_parts_w, 0., send_alloc_w * sizeof(part_struct));
}
cudaDeviceSynchronize(); // To ensure packing is complete before sending
/* Communicate particles with MPI */
mpi_send_parts_i();
/* Find number of particles currently in the EAST/WEST ghost bins */
int nparts_ghost[6];
if (nparts > 0) {
// East
offset = GFX_LOC(bins.Gcc._ieb, 0, 0, s1b, s2b);
if (dom[rank].e != MPI_PROC_NULL) {
nparts_ghost[EAST] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_i,
0., thrust::plus<int>());
} else {
nparts_ghost[EAST] = 0;
}
// West
offset = GFX_LOC(bins.Gcc._isb, 0, 0, s1b, s2b);
if (dom[rank].w != MPI_PROC_NULL) {
nparts_ghost[WEST] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_i,
0., thrust::plus<int>());
} else {
nparts_ghost[WEST] = 0;
}
} else { // no parts
nparts_ghost[EAST] = 0;
nparts_ghost[WEST] = 0;
}
/* Calculate new number of particles */
int nparts_old = nparts;
nparts += nparts_recv[EAST] + nparts_recv[WEST]
- nparts_ghost[EAST] - nparts_ghost[WEST];
/* allocate temporary part struct */
part_struct *_tmp_parts;
checkCudaErrors(cudaMalloc(&_tmp_parts, nparts * sizeof(part_struct)));
if (nparts_old > 0) {
/* parallel prefix scan of [_is, _ie] of _bin_count */
int *_offset_all;
checkCudaErrors(cudaMalloc(&_offset_all, bins.Gcc.s3b * sizeof(int)));
thrust::device_ptr<int> t_offset_all(_offset_all);
// Scan over bin_count[_is->_ie, j, k]
int size = bins.Gcc.s3b - 2*bins.Gcc.s2b_i;
thrust::exclusive_scan(t_bin_count + bins.Gcc.s2b_i,
t_bin_count + bins.Gcc.s2b_i + size,
t_offset_all + bins.Gcc.s2b_i);
/* copy bins of particles to tmp_parts */
copy_central_bin_parts_i<<<bin_num_inb, bin_dim_inb>>>(_tmp_parts, _parts,
_bin_start, _bin_count, _part_ind, _offset_all);
cudaFree(_offset_all);
} else { // no (old) parts
// Do not need to copy or prefix scan
}
/* Copy ghost particles received from WEST */
if (nparts_recv[WEST] > 0) {
t_nparts = nparts_recv[WEST] * (nparts_recv[WEST] < MAX_THREADS_1D)
+ MAX_THREADS_1D * (nparts_recv[WEST] >= MAX_THREADS_1D);
b_nparts = (int) ceil((real) nparts_recv[WEST] / (real) t_nparts);
dim3 dim_nparts_w(t_nparts);
dim3 num_nparts_w(b_nparts);
offset = nparts_old - nparts_ghost[WEST] - nparts_ghost[EAST];
copy_ghost_bin_parts<<<num_nparts_w, dim_nparts_w>>>(_tmp_parts, _recv_parts_w,
nparts_recv[WEST], offset, WEST, _DOM);
} else { // nparts_recv[WEST] <= 0
// Do nothing
}
/* Copy ghost particles received from EAST */
if (nparts_recv[EAST] > 0) {
t_nparts = nparts_recv[EAST] * (nparts_recv[EAST] < MAX_THREADS_1D)
+ MAX_THREADS_1D * (nparts_recv[EAST] >= MAX_THREADS_1D);
b_nparts = (int) ceil((real) nparts_recv[EAST] / (real) t_nparts);
dim3 dim_nparts_e(t_nparts);
dim3 num_nparts_e(b_nparts);
offset = nparts_old - nparts_ghost[WEST] - nparts_ghost[EAST]
+ nparts_recv[WEST];
copy_ghost_bin_parts<<<num_nparts_e, dim_nparts_e>>>(_tmp_parts, _recv_parts_e,
nparts_recv[EAST], offset, EAST, _DOM);
} else { // npats_recv[EAST] <= 0
// Do nothing
}
/* Swap pointers to _parts and _tmp_parts */
part_struct *tmp = _parts;
_parts = _tmp_parts;
_tmp_parts = tmp;
// /* Correct ghost particle position for periodic boundaries */
// int nparts_added = nparts_recv[EAST] + nparts_recv[WEST];
// if (nparts_added > 0) {
// t_nparts = nparts_added * (nparts_added < MAX_THREADS_1D)
// + MAX_THREADS_1D * (nparts_added >= MAX_THREADS_1D);
// b_nparts = (int) ceil((real) nparts_added / (real) t_nparts);
//
// dim3 dim_nparts_a(t_nparts);
// dim3 num_nparts_a(b_nparts);
//
// offset = nparts_old - nparts_ghost[WEST] - nparts_ghost[EAST];
// correct_periodic_boundaries_i<<<num_nparts_a, dim_nparts_a>>>(_parts,
// offset, nparts_added, _bc, _DOM);
// }
// Free memory
cudaFree(_part_ind);
cudaFree(_part_bin);
cudaFree(_offset_e);
cudaFree(_offset_w);
cudaFree(_send_parts_e);
cudaFree(_send_parts_w);
cudaFree(_recv_parts_e);
cudaFree(_recv_parts_w);
cudaFree(_tmp_parts);
}
extern "C"
void cuda_transfer_parts_j(void)
{
// Steps are the same as in cuda_transfer_part_i, except we index with 'j'
// varying the slowest
/* Init execution config */
// thread over north/south faces
int tz = bins.Gcc.knb * (bins.Gcc.knb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.knb >= MAX_THREADS_DIM);
int tx = bins.Gcc.inb * (bins.Gcc.inb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.inb >= MAX_THREADS_DIM);
int bz = (int) ceil((real) bins.Gcc.knb / (real) tz);
int bx = (int) ceil((real) bins.Gcc.inb / (real) tx);
dim3 bin_num_jnb(bz, bx);
dim3 bin_dim_jnb(tz, tx);
// Thread over nparts
int t_nparts = nparts * (nparts < MAX_THREADS_1D)
+ MAX_THREADS_1D * (nparts >= MAX_THREADS_1D);
int b_nparts = (int) ceil((real) nparts / (real) t_nparts);
dim3 dim_nparts(t_nparts);
dim3 num_nparts(b_nparts);
/* Declare things we might need */
int s1b, s2b; // custom strides
int offset;
/* Allocate memory */
checkCudaErrors(cudaMalloc(&_part_ind, nparts * sizeof(int)));
checkCudaErrors(cudaMalloc(&_part_bin, nparts * sizeof(int)));
thrust::device_ptr<int> t_part_ind(_part_ind);
thrust::device_ptr<int> t_part_bin(_part_bin);
int *_offset_n;
int *_offset_s;
checkCudaErrors(cudaMalloc(&_offset_n, bins.Gcc.s2b_j * sizeof(int)));
checkCudaErrors(cudaMalloc(&_offset_s, bins.Gcc.s2b_j * sizeof(int)));
thrust::device_ptr<int> t_offset_n(_offset_n);
thrust::device_ptr<int> t_offset_s(_offset_s);
checkCudaErrors(cudaMemset(_bin_start, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(cudaMemset(_bin_end, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(cudaMemset(_bin_count, 0, bins.Gcc.s3b * sizeof(int)));
thrust::device_ptr<int> t_bin_count(_bin_count);
// If we have parts...
if (nparts > 0) {
/* Find each particle's bin */
bin_fill_j<<<num_nparts, dim_nparts>>>(_part_ind, _part_bin, _parts, nparts,
_DOM);
/* Sort _part_ind by _part_bin (sort key by value) */
if (nparts > 1) {
thrust::sort_by_key(t_part_bin, t_part_bin + nparts, t_part_ind);
}
/* Find start and ending index of each bin */
int smem_size = (nparts + 1) * sizeof(int);
find_bin_start_end<<<b_nparts, t_nparts, smem_size>>>(_bin_start, _bin_end,
_part_bin, nparts);
/* Find number of particles in each bin */
count_bin_parts_j<<<bin_num_jnb, bin_dim_jnb>>>(_bin_start, _bin_end,
_bin_count);
/* Find number of particles to send, and packing offsets */
s1b = bins.Gcc.knb;
s2b = s1b * bins.Gcc.inb;
// North
offset = GFY_LOC(0, bins.Gcc._je, 0, s1b, s2b);
if (dom[rank].n != MPI_PROC_NULL) {
// _bin_count is indexed with j varying slowest -- can do a reduction
// directly from _bin_count, given the offset of the start of the _je plane
nparts_send[NORTH] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_j,
0., thrust::plus<int>());
/* Determine packing offsets with an excl prefix scan */
if (nparts_send[NORTH] > 0) {
thrust::exclusive_scan(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_j, t_offset_n);
} else {
cudaMemset(_offset_n, 0., bins.Gcc.s2b_j * sizeof(int));
}
} else {
nparts_send[NORTH] = 0;
cudaMemset(_offset_n, 0., bins.Gcc.s2b_j * sizeof(int));
}
// South
offset = GFY_LOC(0, bins.Gcc._js, 0, s1b, s2b);
if (dom[rank].s != MPI_PROC_NULL) {
nparts_send[SOUTH] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_j,
0., thrust::plus<int>());
if (nparts_send[SOUTH] > 0) {
thrust::exclusive_scan(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_j, t_offset_s);
} else {
cudaMemset(_offset_s, 0., bins.Gcc.s2b_j * sizeof(int));
}
} else {
nparts_send[SOUTH] = 0;
cudaMemset(_offset_s, 0., bins.Gcc.s2b_j * sizeof(int));
}
} else { // nparts <= 0
checkCudaErrors(cudaMemset(_part_ind, -1, nparts * sizeof(int)));
checkCudaErrors(cudaMemset(_part_bin, -1, nparts * sizeof(int)));
nparts_send[NORTH] = 0;
nparts_send[SOUTH] = 0;
cudaMemset(_offset_n, 0., bins.Gcc.s2b_j * sizeof(int));
cudaMemset(_offset_s, 0., bins.Gcc.s2b_j * sizeof(int));
}
/* Send number of parts to north/south */
nparts_recv[SOUTH] = 0; // init
nparts_recv[NORTH] = 0;
mpi_send_nparts_j();
/* Allocate memory for send and receiving particles */
// See accompanying note at the same location in cuda_transfer_parts_i
int send_alloc_n = nparts_send[NORTH]*(nparts_send[NORTH] > 0) + (nparts_send[NORTH] == 0);
int send_alloc_s = nparts_send[SOUTH]*(nparts_send[SOUTH] > 0) + (nparts_send[SOUTH] == 0);
int recv_alloc_n = nparts_recv[NORTH]*(nparts_recv[NORTH] > 0) + (nparts_recv[NORTH] == 0);
int recv_alloc_s = nparts_recv[SOUTH]*(nparts_recv[SOUTH] > 0) + (nparts_recv[SOUTH] == 0);
checkCudaErrors(cudaMalloc(&_send_parts_n, send_alloc_n * sizeof(part_struct)));
checkCudaErrors(cudaMalloc(&_send_parts_s, send_alloc_s * sizeof(part_struct)));
checkCudaErrors(cudaMalloc(&_recv_parts_n, recv_alloc_n * sizeof(part_struct)));
checkCudaErrors(cudaMalloc(&_recv_parts_s, recv_alloc_s * sizeof(part_struct)));
/* Pack particles into _send_parts */
if (nparts_send[NORTH] > 0) {
pack_parts_n<<<bin_num_jnb, bin_dim_jnb>>>(_send_parts_n, _parts, _offset_n,
_bin_start, _bin_count, _part_ind);
} else { // fill dummy data
//cudaMemset(_send_parts_n, 0., send_alloc_n * sizeof(part_struct));
}
if (nparts_send[SOUTH] > 0) {
pack_parts_s<<<bin_num_jnb, bin_dim_jnb>>>(_send_parts_s, _parts, _offset_s,
_bin_start, _bin_count, _part_ind);
} else { // fill dummy data
//cudaMemset(_send_parts_s, 0., send_alloc_s * sizeof(part_struct));
}
cudaDeviceSynchronize(); // To ensure packing is complete before sending
/* Communicate particles with MPI */
mpi_send_parts_j();
/* Find number of particles currently in the NORTH/SOUTH ghost bins */
int nparts_ghost[6];
if (nparts > 0) {
// North
offset = GFY_LOC(0, bins.Gcc._jeb, 0, s1b, s2b);
if (dom[rank].n != MPI_PROC_NULL) {
nparts_ghost[NORTH] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_j,
0., thrust::plus<int>());
} else {
nparts_ghost[NORTH] = 0;
}
// South
offset = GFY_LOC(0, bins.Gcc._jsb, 0, s1b, s2b);
if (dom[rank].s != MPI_PROC_NULL) {
nparts_ghost[SOUTH] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_j,
0., thrust::plus<int>());
} else {
nparts_ghost[SOUTH] = 0;
}
} else { // no parts
nparts_ghost[NORTH] = 0;
nparts_ghost[SOUTH] = 0;
}
/* Calculate new number of particles */
int nparts_old = nparts;
nparts += nparts_recv[NORTH] + nparts_recv[SOUTH]
- nparts_ghost[NORTH] - nparts_ghost[SOUTH];
/* allocate temporary part struct */
part_struct *_tmp_parts;
checkCudaErrors(cudaMalloc(&_tmp_parts, nparts * sizeof(part_struct)));
if (nparts_old > 0) {
/* parallel prefix scan of ALL of _bin_count */
int *_offset_all;
checkCudaErrors(cudaMalloc(&_offset_all, bins.Gcc.s3b * sizeof(int)));
// Scan over bin_count[i, _js->_je, k]
int size = bins.Gcc.s3b - 2*bins.Gcc.s2b_j;
thrust::device_ptr<int> t_offset_all(_offset_all);
thrust::exclusive_scan(t_bin_count + bins.Gcc.s2b_j,
t_bin_count + bins.Gcc.s2b_j + size,
t_offset_all + bins.Gcc.s2b_j);
/* copy bins of particles to tmp_parts */
copy_central_bin_parts_j<<<bin_num_jnb, bin_dim_jnb>>>(_tmp_parts, _parts,
_bin_start, _bin_count, _part_ind, _offset_all);
cudaFree(_offset_all);
} else { // no (old) parts
// Do nothing
}
/* Copy ghost particles recieved from SOUTH */
if (nparts_recv[SOUTH] > 0) {
t_nparts = nparts_recv[SOUTH] * (nparts_recv[SOUTH] < MAX_THREADS_1D)
+ MAX_THREADS_1D * (nparts_recv[SOUTH] >= MAX_THREADS_1D);
b_nparts = (int) ceil((real) nparts_recv[SOUTH] / (real) t_nparts);
dim3 dim_nparts_s(t_nparts);
dim3 num_nparts_s(b_nparts);
offset = nparts_old - nparts_ghost[SOUTH] - nparts_ghost[NORTH];
copy_ghost_bin_parts<<<num_nparts_s, dim_nparts_s>>>(_tmp_parts, _recv_parts_s,
nparts_recv[SOUTH], offset, SOUTH, _DOM);
} else { // nparts_recv[SOUTH] <= 0
// Do nothing
}
/* Copy ghost particles received from NORTH */
if (nparts_recv[NORTH] > 0) {
t_nparts = nparts_recv[NORTH] * (nparts_recv[NORTH] < MAX_THREADS_1D)
+ MAX_THREADS_1D * (nparts_recv[NORTH] >= MAX_THREADS_1D);
b_nparts = (int) ceil((real) nparts_recv[NORTH] / (real) t_nparts);
dim3 dim_nparts_n(t_nparts);
dim3 num_nparts_n(b_nparts);
offset = nparts_old - nparts_ghost[SOUTH] - nparts_ghost[NORTH]
+ nparts_recv[SOUTH];
copy_ghost_bin_parts<<<num_nparts_n, dim_nparts_n>>>(_tmp_parts, _recv_parts_n,
nparts_recv[NORTH], offset, NORTH, _DOM);
} else { // nparts_recv[NORTH] <= 0
// Do nothing
}
/* Swap pointers to _parts and _tmp_parts */
part_struct *tmp = _parts;
_parts = _tmp_parts;
_tmp_parts = tmp;
// /* Correct ghost particle position for periodic boundaries */
// int nparts_added = nparts_recv[NORTH] + nparts_recv[SOUTH];
// if (nparts_added > 0) {
// t_nparts = nparts_added * (nparts_added < MAX_THREADS_1D)
// + MAX_THREADS_1D * (nparts_added >= MAX_THREADS_1D);
// b_nparts = (int) ceil((real) nparts_added / (real) t_nparts);
//
// dim3 dim_nparts_a(t_nparts);
// dim3 num_nparts_a(b_nparts);
//
// offset = nparts_old - nparts_ghost[SOUTH] - nparts_ghost[NORTH];
// correct_periodic_boundaries_j<<<num_nparts_a, dim_nparts_a>>>(_parts,
// offset, nparts_added, _bc, _DOM);
// }
// Free memory
cudaFree(_part_ind);
cudaFree(_part_bin);
cudaFree(_offset_n);
cudaFree(_offset_s);
cudaFree(_send_parts_n);
cudaFree(_send_parts_s);
cudaFree(_recv_parts_n);
cudaFree(_recv_parts_s);
cudaFree(_tmp_parts);
}
extern "C"
void cuda_transfer_parts_k(void)
{
// Steps are the same as in cuda_transfer_part_i, except we index with 'k'
// varying the slowest
/* Init execution config */
// thread over top/bottom faces
int tx = bins.Gcc.inb * (bins.Gcc.inb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.inb >= MAX_THREADS_DIM);
int ty = bins.Gcc.jnb * (bins.Gcc.jnb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.jnb >= MAX_THREADS_DIM);
int bx = (int) ceil((real) bins.Gcc.inb / (real) tx);
int by = (int) ceil((real) bins.Gcc.jnb / (real) ty);
dim3 bin_num_knb(bx, by);
dim3 bin_dim_knb(tx, ty);
// Thread over nparts
int t_nparts = nparts * (nparts < MAX_THREADS_1D)
+ MAX_THREADS_1D * (nparts >= MAX_THREADS_1D);
int b_nparts = (int) ceil((real) nparts / (real) t_nparts);
dim3 dim_nparts(t_nparts);
dim3 num_nparts(b_nparts);
/* Declare things we might need */
int s1b = bins.Gcc.inb;
int s2b = s1b * bins.Gcc.jnb;
int offset;
/* Allocate memory */
checkCudaErrors(cudaMalloc(&_part_ind, nparts * sizeof(int)));
checkCudaErrors(cudaMalloc(&_part_bin, nparts * sizeof(int)));
thrust::device_ptr<int> t_part_ind(_part_ind);
thrust::device_ptr<int> t_part_bin(_part_bin);
int *_offset_t;
int *_offset_b;
checkCudaErrors(cudaMalloc(&_offset_t, bins.Gcc.s2b_k * sizeof(int)));
checkCudaErrors(cudaMalloc(&_offset_b, bins.Gcc.s2b_k * sizeof(int)));
thrust::device_ptr<int> t_offset_t(_offset_t);
thrust::device_ptr<int> t_offset_b(_offset_b);
checkCudaErrors(cudaMemset(_bin_start, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(cudaMemset(_bin_end, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(cudaMemset(_bin_count, 0, bins.Gcc.s3b * sizeof(int)));
thrust::device_ptr<int> t_bin_count(_bin_count);
// If we have parts...
if (nparts > 0) {
/* Find each particle's bin */
bin_fill_k<<<num_nparts, dim_nparts>>>(_part_ind, _part_bin, _parts, nparts,
_DOM);
/* Sort _part_ind by _part_bin (sort key by value) */
if (nparts > 1) {
thrust::sort_by_key(t_part_bin, t_part_bin + nparts, t_part_ind);
}
//_part_bin = thrust::raw_pointer_cast(t_part_bin);
//_part_ind = thrust::raw_pointer_cast(t_part_ind);
/* Find start and ending index of each bin */
int smem_size = (nparts + 1) * sizeof(int);
find_bin_start_end<<<b_nparts, t_nparts, smem_size>>>(_bin_start, _bin_end,
_part_bin, nparts);
/* Find number of particles in each bin */
count_bin_parts_k<<<bin_num_knb, bin_dim_knb>>>(_bin_start, _bin_end,
_bin_count);
/* Find number of particles to send, and packing offsets */
// Top
offset = GFZ_LOC(0, 0, bins.Gcc._ke, s1b, s2b);
if (dom[rank].t != MPI_PROC_NULL) {
// _bin_count is indexed with k varying slowest -- can do a reduction
// directly from _bin_count, given the offset of the start of the _ke plane
nparts_send[TOP] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_k,
0., thrust::plus<int>());
/* Determine packing offsets with an excl prefix scan */
if (nparts_send[TOP] > 0) {
thrust::exclusive_scan(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_k, t_offset_t);
} else {
cudaMemset(_offset_t, 0., bins.Gcc.s2b_k * sizeof(int));
}
} else {
nparts_send[TOP] = 0;
cudaMemset(_offset_t, 0., bins.Gcc.s2b_k * sizeof(int));
}
// Bottom
offset = GFZ_LOC(0, 0, bins.Gcc._ks, s1b, s2b);
if (dom[rank].b != MPI_PROC_NULL) {
nparts_send[BOTTOM] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_k,
0., thrust::plus<int>());
if (nparts_send[BOTTOM] > 0) {
thrust::exclusive_scan(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_k, t_offset_b);
} else {
cudaMemset(_offset_b, 0., bins.Gcc.s2b_k * sizeof(int));
}
} else {
nparts_send[BOTTOM] = 0;
cudaMemset(_offset_b, 0., bins.Gcc.s2b_k * sizeof(int));
}
} else { // nparts <= 0
checkCudaErrors(cudaMemset(_part_ind, -1, nparts * sizeof(int)));
checkCudaErrors(cudaMemset(_part_bin, -1, nparts * sizeof(int)));
nparts_send[TOP] = 0;
nparts_send[BOTTOM] = 0;
cudaMemset(_offset_t, 0., bins.Gcc.s2b_k * sizeof(int));
cudaMemset(_offset_b, 0., bins.Gcc.s2b_k * sizeof(int));
}
/* Send number of parts to top/bottom */
nparts_recv[TOP] = 0; // init
nparts_recv[BOTTOM] = 0;
mpi_send_nparts_k();
/* Allocate memory for send and receiving particles */
// See accompanying note at the same location in cuda_transfer_parts_i
int send_alloc_t = nparts_send[TOP]*(nparts_send[TOP] > 0) + (nparts_send[TOP] == 0);
int send_alloc_b = nparts_send[BOTTOM]*(nparts_send[BOTTOM] > 0) + (nparts_send[BOTTOM] == 0);
int recv_alloc_t = nparts_recv[TOP]*(nparts_recv[TOP] > 0) + (nparts_recv[TOP] == 0);
int recv_alloc_b = nparts_recv[BOTTOM]*(nparts_recv[BOTTOM] > 0) + (nparts_recv[BOTTOM] == 0);
checkCudaErrors(cudaMalloc(&_send_parts_t, send_alloc_t * sizeof(part_struct)));
checkCudaErrors(cudaMalloc(&_send_parts_b, send_alloc_b * sizeof(part_struct)));
checkCudaErrors(cudaMalloc(&_recv_parts_t, recv_alloc_t * sizeof(part_struct)));
checkCudaErrors(cudaMalloc(&_recv_parts_b, recv_alloc_b * sizeof(part_struct)));
/* Pack particles into _send_parts */
if (nparts_send[TOP] > 0) {
pack_parts_t<<<bin_num_knb, bin_dim_knb>>>(_send_parts_t, _parts, _offset_t,
_bin_start, _bin_count, _part_ind);
} else { // fill dummy data
//cudaMemset(_send_parts_t, 0., send_alloc_t * sizeof(part_struct));
}
if (nparts_send[BOTTOM] > 0) {
pack_parts_b<<<bin_num_knb, bin_dim_knb>>>(_send_parts_b, _parts, _offset_b,
_bin_start, _bin_count, _part_ind);
} else { // fill dummy data
//cudaMemset(_send_parts_b, 0., send_alloc_b * sizeof(part_struct));
}
cudaDeviceSynchronize(); // To ensure packing is complete before sending
/* Communicate particles with MPI */
mpi_send_parts_k();
/* Find number of particles currently in the TOP/BOTTOM ghost bins */
int nparts_ghost[6];
if (nparts > 0) {
// TOP
offset = GFZ_LOC(0, 0, bins.Gcc._keb, s1b, s2b);
if (dom[rank].t != MPI_PROC_NULL) {
nparts_ghost[TOP] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_k,
0., thrust::plus<int>());
} else {
nparts_ghost[TOP] = 0;
}
// BOTTOM
offset = GFZ_LOC(0, 0, bins.Gcc._ksb, s1b, s2b);
if (dom[rank].b != MPI_PROC_NULL) {
nparts_ghost[BOTTOM] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_k,
0., thrust::plus<int>());
} else {
nparts_ghost[BOTTOM] = 0;
}
} else { // no parts
nparts_ghost[TOP] = 0;
nparts_ghost[BOTTOM] = 0;
}
/* Calculate new number of particles */
int nparts_old = nparts;
nparts += nparts_recv[TOP] + nparts_recv[BOTTOM]
- nparts_ghost[TOP] - nparts_ghost[BOTTOM];
/* allocate temporary part struct */
part_struct *_tmp_parts;
checkCudaErrors(cudaMalloc(&_tmp_parts, nparts * sizeof(part_struct)));
if (nparts_old > 0) {
/* parallel prefix scan of ALL of _bin_count */
int *_offset_all;
checkCudaErrors(cudaMalloc(&_offset_all, bins.Gcc.s3b * sizeof(int)));
// Scan over bin_count[i, m, _ks->_ke]
int size = bins.Gcc.s3b - 2*bins.Gcc.s2b_k;
thrust::device_ptr<int> t_offset_all(_offset_all);
thrust::exclusive_scan(t_bin_count + bins.Gcc.s2b_k,
t_bin_count + bins.Gcc.s2b_k + size,
t_offset_all + bins.Gcc.s2b_k);
/* copy bins of particles to tmp_parts */
copy_central_bin_parts_k<<<bin_num_knb, bin_dim_knb>>>(_tmp_parts, _parts,
_bin_start, _bin_count, _part_ind, _offset_all);
cudaFree(_offset_all);
} else { // no (old) parts
// Do nothing
}
/* Copy ghost particles recieved from BOTTOM */
if (nparts_recv[BOTTOM] > 0) {
t_nparts = nparts_recv[BOTTOM] * (nparts_recv[BOTTOM] < MAX_THREADS_1D)
+ MAX_THREADS_1D * (nparts_recv[BOTTOM] >= MAX_THREADS_1D);
b_nparts = (int) ceil((real) nparts_recv[BOTTOM] / (real) t_nparts);
dim3 dim_nparts_b(t_nparts);
dim3 num_nparts_b(b_nparts);
offset = nparts_old - nparts_ghost[BOTTOM] - nparts_ghost[TOP];
copy_ghost_bin_parts<<<num_nparts_b, dim_nparts_b>>>(_tmp_parts, _recv_parts_b,
nparts_recv[BOTTOM], offset, BOTTOM, _DOM);
} else { // nparts_recv[BOTTOM] <= 0
// Do nothing
}
/* Copy ghost particles received from TOP */
if (nparts_recv[TOP] > 0) {
t_nparts = nparts_recv[TOP] * (nparts_recv[TOP] < MAX_THREADS_1D)
+ MAX_THREADS_1D * (nparts_recv[TOP] >= MAX_THREADS_1D);
b_nparts = (int) ceil((real) nparts_recv[TOP] / (real) t_nparts);
dim3 dim_nparts_t(t_nparts);
dim3 num_nparts_t(b_nparts);
offset = nparts_old - nparts_ghost[BOTTOM] - nparts_ghost[TOP]
+ nparts_recv[BOTTOM];
copy_ghost_bin_parts<<<num_nparts_t, dim_nparts_t>>>(_tmp_parts, _recv_parts_t,
nparts_recv[TOP], offset, TOP, _DOM);
} else { // nparts_recv[TOP] <= 0
// Do nothing
}
/* Swap pointers to _parts and _tmp_parts */
part_struct *tmp = _parts;
_parts = _tmp_parts;
_tmp_parts = tmp;
// /* Correct ghost particle position for periodic boundaries */
// int nparts_added = nparts_recv[TOP] + nparts_recv[BOTTOM];
// if (nparts_added > 0) {
// t_nparts = nparts_added * (nparts_added < MAX_THREADS_1D)
// + MAX_THREADS_1D * (nparts_added >= MAX_THREADS_1D);
// b_nparts = (int) ceil((real) nparts_added / (real) t_nparts);
//
// dim3 dim_nparts_a(t_nparts);
// dim3 num_nparts_a(b_nparts);
//
// offset = nparts_old - nparts_ghost[BOTTOM] - nparts_ghost[TOP];
// correct_periodic_boundaries_k<<<num_nparts_a, dim_nparts_a>>>(_parts,
// offset, nparts_added, _bc, _DOM);
//
// }
// Free memory
cudaFree(_part_ind);
cudaFree(_part_bin);
cudaFree(_offset_t);
cudaFree(_offset_b);
cudaFree(_send_parts_t);
cudaFree(_send_parts_b);
cudaFree(_recv_parts_t);
cudaFree(_recv_parts_b);
cudaFree(_tmp_parts);
}
extern "C"
void cuda_move_parts()
{
//printf("N%d >> Moving parts (nparts %d)\n", rank, nparts);
// Thread over nparts
int t_nparts = nparts * (nparts < MAX_THREADS_1D)
+ MAX_THREADS_1D * (nparts >= MAX_THREADS_1D);
int b_nparts = (int) ceil((real) nparts / (real) t_nparts);
dim3 dim_nparts(t_nparts);
dim3 num_nparts(b_nparts);
if (nparts > 0) {
real eps = 0.01; // compact support parameter
if (nparts == 1) {
collision_init<<<num_nparts, dim_nparts>>>(_parts, nparts);
spring_parts<<<num_nparts, dim_nparts>>>(_parts, nparts, _DOM);
collision_walls<<<num_nparts, dim_nparts>>>(_parts, nparts, _bc, eps, mu,
rho_f, nu, interaction_length_ratio, dt, _DOM);
/* Communicate forces to prevent MPI hang */
cuda_update_part_forces_i();
cuda_update_part_forces_j();
cuda_update_part_forces_k();
move_parts_a<<<num_nparts, dim_nparts>>>(_parts, nparts, dt, g, gradP,
rho_f);
collision_init<<<num_nparts, dim_nparts>>>(_parts, nparts);
spring_parts<<<num_nparts, dim_nparts>>>(_parts, nparts, _DOM);
collision_walls<<<num_nparts, dim_nparts>>>(_parts, nparts, _bc, eps, mu,
rho_f, nu, interaction_length_ratio, dt, _DOM);
} else if (nparts > 1) {
/* Initialize forces to zero */
collision_init<<<num_nparts, dim_nparts>>>(_parts, nparts);
/* Allocate memory */
checkCudaErrors(cudaMalloc(&_part_ind, nparts * sizeof(int)));
checkCudaErrors(cudaMalloc(&_part_bin, nparts * sizeof(int)));
thrust::device_ptr<int> t_part_ind(_part_ind);
thrust::device_ptr<int> t_part_bin(_part_bin);
/* Reset memory */
checkCudaErrors(cudaMemset(_bin_start, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(cudaMemset(_bin_end, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(cudaMemset(_bin_count, 0, bins.Gcc.s3b * sizeof(int)));
thrust::device_ptr<int> t_bin_count(_bin_count);
/* Bin particles */
bin_fill_k<<<num_nparts, dim_nparts>>>(_part_ind, _part_bin, _parts, nparts,
_DOM);
/* Sort _part_ind by _part_bin (sort key by value) */
thrust::sort_by_key(t_part_bin, t_part_bin + nparts, t_part_ind);
/* Find start and ending index of each bin */
int smem_size = (nparts + 1) * sizeof(int);
find_bin_start_end<<<b_nparts, t_nparts, smem_size>>>(_bin_start, _bin_end,
_part_bin, nparts);
/* Find number of particles in each bin */
//count_bin_parts_k<<<bin_num_knb, bin_dim_knb>>>(_bin_start, _bin_end,
// _bin_count);
/* Deal with particle-particle collisions */
collision_parts<<<num_nparts, dim_nparts>>>(_parts, nparts,
eps, mu, rho_f, nu, _bc, _bin_start, _bin_end, _part_bin,
_part_ind, interaction_length_ratio, dt);
/* Calculate spring forces on particles */
spring_parts<<<num_nparts, dim_nparts>>>(_parts, nparts, _DOM);
/* Calculate wall collision forces */
collision_walls<<<num_nparts, dim_nparts>>>(_parts, nparts, _bc, eps, mu,
rho_f, nu, interaction_length_ratio, dt, _DOM);
/* Free _part_bin, _part_ind (re-malloc'd in comm functions) */
cudaFree(_part_ind);
cudaFree(_part_bin);
/* Communicate forces */
cuda_update_part_forces_i();
cuda_update_part_forces_j();
cuda_update_part_forces_k();
/*** Update velocities and accelerations ***/
move_parts_a<<<num_nparts, dim_nparts>>>(_parts, nparts, dt, g, gradP,
rho_f);
/* Re-alloc memory */
checkCudaErrors(cudaMalloc(&_part_ind, nparts * sizeof(int)));
checkCudaErrors(cudaMalloc(&_part_bin, nparts * sizeof(int)));
thrust::device_ptr<int> t_part_ind2(_part_ind);
thrust::device_ptr<int> t_part_bin2(_part_bin);
/* Reset memory */
checkCudaErrors(cudaMemset(_bin_start, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(cudaMemset(_bin_end, -1, bins.Gcc.s3b * sizeof(int)));
//checkCudaErrors(cudaMemset(_bin_count, 0, bins.Gcc.s3b * sizeof(int)));
//thrust::device_ptr<int> t_bin_count(_bin_count);
/* Bin particles */
bin_fill_k<<<num_nparts, dim_nparts>>>(_part_ind, _part_bin, _parts, nparts,
_DOM);
/* Sort _part_ind by _part_bin (sort key by value) */
thrust::sort_by_key(t_part_bin2, t_part_bin2 + nparts, t_part_ind2);
/* Find start and ending index of each bin */
find_bin_start_end<<<b_nparts, t_nparts, smem_size>>>(_bin_start, _bin_end,
_part_bin, nparts);
/* Initialize forces to zero */
collision_init<<<num_nparts, dim_nparts>>>(_parts, nparts);
/* Deal with particle-particle collisions */
collision_parts<<<num_nparts, dim_nparts>>>(_parts, nparts,
eps, mu, rho_f, nu, _bc, _bin_start, _bin_end, _part_bin,
_part_ind, interaction_length_ratio, dt);
/* Calculate spring forces on particles */
spring_parts<<<num_nparts, dim_nparts>>>(_parts, nparts, _DOM);
/* Calculate wall collision forces */
collision_walls<<<num_nparts, dim_nparts>>>(_parts, nparts, _bc, eps, mu,
rho_f, nu, interaction_length_ratio, dt, _DOM);
/* Free memory */
cudaFree(_part_ind);
cudaFree(_part_bin);
} // end if (nparts > 1)
/* Communicate forces */
cuda_update_part_forces_i();
cuda_update_part_forces_j();
cuda_update_part_forces_k();
/* Move particles */
move_parts_b<<<num_nparts, dim_nparts>>>(_parts, nparts, dt, g, gradP,
rho_f);
} // end if (nparts > 0)
}
extern "C"
void cuda_move_parts_sub()
{
//printf("N%d >> Moving parts (sub-Lamb's iteration) (nparts %d)\n", rank, nparts);
// Thread over nparts
int t_nparts = nparts * (nparts < MAX_THREADS_1D)
+ MAX_THREADS_1D * (nparts >= MAX_THREADS_1D);
int b_nparts = (int) ceil((real) nparts / (real) t_nparts);
dim3 dim_nparts(t_nparts);
dim3 num_nparts(b_nparts);
real eps = 0.01; // compact support parameter
if (nparts == 0) {
/* Communicate forces to prevent MPI hang */
cuda_update_part_forces_i();
cuda_update_part_forces_j();
cuda_update_part_forces_k();
} else if (nparts == 1) {
collision_init<<<num_nparts, dim_nparts>>>(_parts, nparts);
spring_parts<<<num_nparts, dim_nparts>>>(_parts, nparts, _DOM);
collision_walls<<<num_nparts, dim_nparts>>>(_parts, nparts, _bc, eps, mu,
rho_f, nu, interaction_length_ratio, dt, _DOM);
/* Communicate forces to prevent MPI hang */
cuda_update_part_forces_i();
cuda_update_part_forces_j();
cuda_update_part_forces_k();
move_parts_a<<<num_nparts, dim_nparts>>>(_parts, nparts, dt, g, gradP,
rho_f);
collision_init<<<num_nparts, dim_nparts>>>(_parts, nparts);
spring_parts<<<num_nparts, dim_nparts>>>(_parts, nparts, _DOM);
collision_walls<<<num_nparts, dim_nparts>>>(_parts, nparts, _bc, eps, mu,
rho_f, nu, interaction_length_ratio, dt, _DOM);
} else if (nparts > 1) {
/* Initialize forces to zero */
collision_init<<<num_nparts, dim_nparts>>>(_parts, nparts);
/* Allocate memory */
checkCudaErrors(cudaMalloc(&_part_ind, nparts * sizeof(int)));
checkCudaErrors(cudaMalloc(&_part_bin, nparts * sizeof(int)));
thrust::device_ptr<int> t_part_ind(_part_ind);
thrust::device_ptr<int> t_part_bin(_part_bin);
/* Reset memory */
checkCudaErrors(cudaMemset(_bin_start, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(cudaMemset(_bin_end, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(cudaMemset(_bin_count, 0, bins.Gcc.s3b * sizeof(int)));
thrust::device_ptr<int> t_bin_count(_bin_count);
/* Bin particles */
bin_fill_k<<<num_nparts, dim_nparts>>>(_part_ind, _part_bin, _parts, nparts,
_DOM);
/* Sort _part_ind by _part_bin (sort key by value) */
thrust::sort_by_key(t_part_bin, t_part_bin + nparts, t_part_ind);
/* Find start and ending index of each bin */
int smem_size = (nparts + 1) * sizeof(int);
find_bin_start_end<<<b_nparts, t_nparts, smem_size>>>(_bin_start, _bin_end,
_part_bin, nparts);
/* Find number of particles in each bin */
//count_bin_parts_k<<<bin_num_knb, bin_dim_knb>>>(_bin_start, _bin_end,
// _bin_count);
/* Deal with particle-particle collisions */
collision_parts<<<num_nparts, dim_nparts>>>(_parts, nparts,
eps, mu, rho_f, nu, _bc, _bin_start, _bin_end, _part_bin,
_part_ind, interaction_length_ratio, dt);
/* Calculate spring forces on particles */
spring_parts<<<num_nparts, dim_nparts>>>(_parts, nparts, _DOM);
/* Calculate wall collision forces */
collision_walls<<<num_nparts, dim_nparts>>>(_parts, nparts, _bc, eps, mu,
rho_f, nu, interaction_length_ratio, dt, _DOM);
/* Free _part_bin, _part_ind (re-malloc'd in comm functions) */
checkCudaErrors(cudaFree(_part_ind));
checkCudaErrors(cudaFree(_part_bin));
/* Communicate forces */
cuda_update_part_forces_i();
cuda_update_part_forces_j();
cuda_update_part_forces_k(); // uses bin_fill_k
/*** Update velocities and accelerations ***/
move_parts_a<<<num_nparts, dim_nparts>>>(_parts, nparts, dt, g, gradP,
rho_f);
/* Re-alloc memory */
checkCudaErrors(cudaMalloc(&_part_ind, nparts * sizeof(int)));
checkCudaErrors(cudaMalloc(&_part_bin, nparts * sizeof(int)));
thrust::device_ptr<int> t_part_ind2(_part_ind);
thrust::device_ptr<int> t_part_bin2(_part_bin);
/* Reset memory */
checkCudaErrors(cudaMemset(_bin_start, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(cudaMemset(_bin_end, -1, bins.Gcc.s3b * sizeof(int)));
//checkCudaErrors(cudaMemset(_bin_count, 0, bins.Gcc.s3b * sizeof(int)));
//thrust::device_ptr<int> t_bin_count(_bin_count);
/* Bin particles */
bin_fill_k<<<num_nparts, dim_nparts>>>(_part_ind, _part_bin, _parts, nparts,
_DOM);
/* Sort _part_ind by _part_bin (sort key by value) */
thrust::sort_by_key(t_part_bin2, t_part_bin2 + nparts, t_part_ind2);
/* Find start and ending index of each bin */
find_bin_start_end<<<b_nparts, t_nparts, smem_size>>>(_bin_start, _bin_end,
_part_bin, nparts);
/* Initialize forces to zero */
collision_init<<<num_nparts, dim_nparts>>>(_parts, nparts);
/* Deal with particle-particle collisions */
collision_parts<<<num_nparts, dim_nparts>>>(_parts, nparts,
eps, mu, rho_f, nu, _bc, _bin_start, _bin_end, _part_bin,
_part_ind, interaction_length_ratio, dt);
/* Calculate spring forces on particles */
spring_parts<<<num_nparts, dim_nparts>>>(_parts, nparts, _DOM);
/* Calculate wall collision forces */
collision_walls<<<num_nparts, dim_nparts>>>(_parts, nparts, _bc, eps, mu,
rho_f, nu, interaction_length_ratio, dt, _DOM);
/* Free memory */
checkCudaErrors(cudaFree(_part_ind));
checkCudaErrors(cudaFree(_part_bin));
} // end if (nparts > 1)
}
extern "C"
void cuda_update_part_velocity()
{
// Thread over nparts
int t_nparts = nparts * (nparts < MAX_THREADS_1D)
+ MAX_THREADS_1D * (nparts >= MAX_THREADS_1D);
int b_nparts = (int) ceil((real) nparts / (real) t_nparts);
dim3 dim_nparts(t_nparts);
dim3 num_nparts(b_nparts);
/* Communicate forces to prevent MPI hang */
cuda_update_part_forces_i();
cuda_update_part_forces_j();
cuda_update_part_forces_k();
if (nparts > 0) {
move_parts_a<<<num_nparts, dim_nparts>>>(_parts, nparts, dt, g, gradP,
rho_f);
}
}
extern "C"
void cuda_update_part_position()
{
// Thread over nparts
int t_nparts = nparts * (nparts < MAX_THREADS_1D)
+ MAX_THREADS_1D * (nparts >= MAX_THREADS_1D);
int b_nparts = (int) ceil((real) nparts / (real) t_nparts);
dim3 dim_nparts(t_nparts);
dim3 num_nparts(b_nparts);
if (nparts > 0) {
move_parts_b<<<num_nparts, dim_nparts>>>(_parts, nparts, dt, g, gradP,
rho_f);
}
}
extern "C"
void cuda_build_cages(void)
{
/* Reset flag_{u,v,w} to fluid */
reset_flag_u<<<blocks.Gfx.num_inb, blocks.Gfx.dim_inb>>>(_flag_u);
reset_flag_v<<<blocks.Gfy.num_jnb, blocks.Gfy.dim_jnb>>>(_flag_v);
reset_flag_w<<<blocks.Gfz.num_knb, blocks.Gfz.dim_knb>>>(_flag_w);
/* Reset phase, phase_shell to fluid */
if (NPARTS > 0) {
reset_phases<<<blocks.Gcc.num_knb, blocks.Gcc.dim_knb>>>(_phase, _phase_shell);
/* Init exec configuration */
int tx = 0.5*MAX_THREADS_DIM;
int ty = 0.5*MAX_THREADS_DIM;
int tz = 0.5*MAX_THREADS_DIM;
real itx = 1./tx;
real ity = 1./ty;
real itz = 1./tz;
int cage_dim[3];
int *_cage_dim;
checkCudaErrors(cudaMalloc(&_cage_dim, 3 * sizeof(int)));
/* build phase */
for (int n = 0; n < nparts; n++) {
// Set up cage extents
// _parts is different than parts, so we need to do this device-side
// and copy back to get exec config
cage_setup<<<1,1>>>(_parts, n, _cage_dim);
cudaMemcpy(cage_dim, _cage_dim, 3 * sizeof(int), cudaMemcpyDeviceToHost);
int bx = (int) ceil((real) cage_dim[0] * itx);
int by = (int) ceil((real) cage_dim[1] * ity);
int bz = (int) ceil((real) cage_dim[2] * itz);
dim3 dimb_3(tx, ty, tz);
dim3 numb_3(bx, by, bz);
if (bx > 0 && by > 0 && bz > 0) {
build_phase<<<numb_3, dimb_3>>>(_parts, n, _cage_dim, _phase,
_phase_shell, _DOM, _bc);
}
}
/* build phase_shell (needs phase to exist) */
for (int n = 0; n < nparts; n++) {
// Set up cage extents
// _parts is different than parts, so we need to do this device-side
// and copy back to get exec config
cage_setup<<<1,1>>>(_parts, n, _cage_dim);
cudaMemcpy(cage_dim, _cage_dim, 3 * sizeof(int), cudaMemcpyDeviceToHost);
int bx = (int) ceil((real) cage_dim[0] * itx);
int by = (int) ceil((real) cage_dim[1] * ity);
int bz = (int) ceil((real) cage_dim[2] * itz);
dim3 dimb_3(tx, ty, tz);
dim3 numb_3(bx, by, bz);
if (bx > 0 && by > 0 && bz > 0) {
build_phase_shell<<<numb_3, dimb_3>>>(_parts, n, _cage_dim, _phase,
_phase_shell, _DOM, _bc);
}
}
cudaFree(_cage_dim);
//phase_shell_x<<<blocks.Gcc.num_in, blocks.Gcc.dim_in>>>(_parts, _phase, _phase_shell);
//phase_shell_y<<<blocks.Gcc.num_jn, blocks.Gcc.dim_jn>>>(_parts, _phase, _phase_shell);
//phase_shell_z<<<blocks.Gcc.num_kn, blocks.Gcc.dim_kn>>>(_parts, _phase, _phase_shell);
/* Build flags from phase, phase_shell */
// Need phase shell at ghost cells, but not flag
cage_flag_u<<<blocks.Gfx.num_inb, blocks.Gfx.dim_inb>>>(_flag_u, _phase, _phase_shell);
cage_flag_v<<<blocks.Gfy.num_jnb, blocks.Gfy.dim_jnb>>>(_flag_v, _phase, _phase_shell);
cage_flag_w<<<blocks.Gfz.num_knb, blocks.Gfz.dim_knb>>>(_flag_w, _phase, _phase_shell);
}
/* Flag external boundaries
* * Only for non-periodic conditions
* * Only if subdomain is on domain boundary
*/
// i direction
if (bc.pW != PERIODIC && bc.pE != PERIODIC) {
if (dom[rank].I == DOM.Is) {
flag_external_u<<<blocks.Gfx.num_inb, blocks.Gfx.dim_inb>>>(_flag_u,
dom[rank].Gfx._is);
}
if (dom[rank].I == DOM.Ie) {
flag_external_u<<<blocks.Gfx.num_inb, blocks.Gfx.dim_inb>>>(_flag_u,
dom[rank].Gfx._ie);
}
}
// j direction
if (bc.pS != PERIODIC && bc.pN != PERIODIC) {
if (dom[rank].J == DOM.Js) {
flag_external_v<<<blocks.Gfy.num_jnb, blocks.Gfy.dim_jnb>>>(_flag_v,
dom[rank].Gfy._js);
}
if (dom[rank].J == DOM.Je) {
flag_external_v<<<blocks.Gfy.num_jnb, blocks.Gfy.dim_jnb>>>(_flag_v,
dom[rank].Gfy._je);
}
}
// k direction
if (bc.pB != PERIODIC && bc.pT != PERIODIC) {
if (dom[rank].K == DOM.Ks) {
flag_external_w<<<blocks.Gfz.num_knb, blocks.Gfz.dim_knb>>>(_flag_w,
dom[rank].Gfz._ks);
}
if (dom[rank].K == DOM.Ke) {
flag_external_w<<<blocks.Gfz.num_knb, blocks.Gfz.dim_knb>>>(_flag_w,
dom[rank].Gfz._ke);
}
}
/* Fill in flag_{u,v,w} ghost cells for periodic boundary conditions -- only
necessary with particles bc of cage */
// Do this exactly like we do ghost cell exchanges -- since dom[rank].e will
// be MPI_PROC_NULL if need be, we don't need to worry about exchanging over
// periodic boundaries
}
extern "C"
void cuda_part_BC(void)
{
//printf("N%d >> Applying particle boundary conditions to u...\n", rank);
// u
part_BC_u<<<blocks.Gfx.num_inb, blocks.Gfx.dim_inb>>>(_u, _phase, _flag_u,
_parts, nu, nparts);
// v
part_BC_v<<<blocks.Gfy.num_jnb, blocks.Gfy.dim_jnb>>>(_v, _phase, _flag_v,
_parts, nu, nparts);
// w
part_BC_w<<<blocks.Gfz.num_knb, blocks.Gfz.dim_knb>>>(_w, _phase, _flag_w,
_parts, nu, nparts);
}
extern "C"
void cuda_part_BC_star(void)
{
// u
part_BC_u<<<blocks.Gfx.num_inb, blocks.Gfx.dim_inb>>>(_u_star, _phase,
_flag_u, _parts, nu, nparts);
// v
part_BC_v<<<blocks.Gfy.num_jnb, blocks.Gfy.dim_jnb>>>(_v_star, _phase,
_flag_v, _parts, nu, nparts);
// w
part_BC_w<<<blocks.Gfz.num_knb, blocks.Gfz.dim_knb>>>(_w_star, _phase,
_flag_w, _parts, nu, nparts);
}
extern "C"
void cuda_part_BC_p(void)
{
part_BC_p<<<blocks.Gcc.num_kn, blocks.Gcc.dim_kn>>>(_p0, _rhs_p, _phase,
_phase_shell, _parts, mu, nu, dt, dt0, gradP, rho_f, nparts, s_beta, s_ref, g);
}
extern "C"
void cuda_part_p_fill(void)
{
part_BC_p_fill<<<blocks.Gcc.num_kn, blocks.Gcc.dim_kn>>>(_p, _phase, _parts,
mu, nu, rho_f, gradP, nparts, s_beta, s_ref, g);
}
extern "C"
void cuda_parts_internal(void)
{
if (nparts > 0) {
internal_u<<<blocks.Gfx.num_in, blocks.Gfx.dim_in>>>(_u, _parts, _flag_u,
_phase, nparts);
internal_v<<<blocks.Gfy.num_jn, blocks.Gfy.dim_jn>>>(_v, _parts, _flag_v,
_phase, nparts);
internal_w<<<blocks.Gfz.num_kn, blocks.Gfz.dim_kn>>>(_w, _parts, _flag_w,
_phase, nparts);
}
}
extern "C"
void cuda_update_part_forces_i(void)
{
/* Outline of communication
* The following need to be communicated before move_parts_{a,b}
* * kFx, kFy, kFz -- subdom + ghost, but same
* * iFx, iFy, iFz -- subdom
* * iLx, iLy, iLz -- subdom
* * iSt, St -- subdom
* This communication is similar to the communication of partial sums during
* the Lebedev quadrature (see cuda_physalis.cu:cuda_partial_sum_i)
* 1) All particles in the outer computational bin plane need to be sent,
* for example the (j,k) planes at _bins.Gcc.{_is, _ie}.
* 2) Bin the particles using i indexing to find _bin_{start,end,count}
* 3) Reduce _bin_count at _is, _ie to find nparts_send_{e,w}
* 4) Communicate nparts_send_{e,w} with appropriate subdom to find
* nparts_recv_{e,w}
* 5) Excl. prefix scan bin_count over _is, _ie to find destination index for
* packed particle data
* 6) Allocate send and recv array
* 7) Pack send array using destination offsetes
* 8) Communicate send->recv
* 9) Excl. prefix over _isb, _ieb to find unpacking indices
* 10) Unpack
* 11) Repeat for j, k
*/
/* Initialize execution config */
// Thread over east/west faces
int ty = bins.Gcc.jnb * (bins.Gcc.jnb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.jnb >= MAX_THREADS_DIM);
int tz = bins.Gcc.knb * (bins.Gcc.knb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.knb >= MAX_THREADS_DIM);
int by = (int) ceil((real) bins.Gcc.jnb / (real) ty);
int bz = (int) ceil((real) bins.Gcc.knb / (real) tz);
dim3 bin_num_inb(by, bz);
dim3 bin_dim_inb(ty, tz);
// Thread over nparts
int t_nparts = nparts * (nparts < MAX_THREADS_1D)
+ MAX_THREADS_1D * (nparts >= MAX_THREADS_1D);
int b_nparts = (int) ceil((real) nparts / (real) t_nparts);
dim3 dim_nparts(t_nparts);
dim3 num_nparts(b_nparts);
/* Declare things we might need */
int s1b = bins.Gcc.jnb; // custom strides
int s2b = s1b * bins.Gcc.knb;
int offset;
/* Allocate */
checkCudaErrors(cudaMalloc(&_part_ind, nparts * sizeof(int)));
checkCudaErrors(cudaMalloc(&_part_bin, nparts * sizeof(int)));
thrust::device_ptr<int> t_part_ind(_part_ind);
thrust::device_ptr<int> t_part_bin(_part_bin);
int *_offset_e;
int *_offset_w;
checkCudaErrors(cudaMalloc(&_offset_e, bins.Gcc.s2b_i * sizeof(int)));
checkCudaErrors(cudaMalloc(&_offset_w, bins.Gcc.s2b_i * sizeof(int)));
thrust::device_ptr<int> t_offset_e(_offset_e);
thrust::device_ptr<int> t_offset_w(_offset_w);
checkCudaErrors(cudaMemset(_bin_start, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(cudaMemset(_bin_end, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(cudaMemset(_bin_count, 0, bins.Gcc.s3b * sizeof(int)));
thrust::device_ptr<int> t_bin_count(_bin_count);
// If we have parts...
if (nparts > 0) {
/* Find each particle's bin */
bin_fill_i<<<num_nparts, dim_nparts>>>(_part_ind, _part_bin, _parts, nparts,
_DOM);
/* Sort _part_ind by _part_bin (sort key by value) */
if (nparts > 1) {
thrust::sort_by_key(t_part_bin, t_part_bin + nparts, t_part_ind);
}
/* Find start and ending index of each bin */
int smem_size = (nparts + 1) * sizeof(int);
find_bin_start_end<<<b_nparts, t_nparts, smem_size>>>(_bin_start, _bin_end,
_part_bin, nparts);
/* Find number of particles in each bin */
count_bin_parts_i<<<bin_num_inb, bin_dim_inb>>>(_bin_start, _bin_end,
_bin_count);
/* Find number of particles to send and packing offsets */
// East: _ie, _ieb
if (dom[rank].e != MPI_PROC_NULL) {
// _bin_count is indexed with i varying slowest
// Do reduction over bin_count, given correct starting offset of _ie plane
offset = GFX_LOC(bins.Gcc._ie, 0, 0, s1b, s2b);
nparts_send[EAST] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_i,
0., thrust::plus<int>());
/* Determine packing offsets with an excl prefix scan */
if (nparts_send[EAST] > 0) {
thrust::exclusive_scan(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_i, t_offset_e);
} else {
cudaMemset(_offset_e, 0., bins.Gcc.s2b_i * sizeof(int));
}
/* Also determine number of parts to recv */
offset = GFX_LOC(bins.Gcc._ieb, 0, 0, s1b, s2b);
nparts_recv[EAST] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_i,
0., thrust::plus<int>());
} else { // no parts to send or recv
nparts_send[EAST] = 0;
nparts_recv[EAST] = 0;
cudaMemset(_offset_e, 0., bins.Gcc.s2b_i * sizeof(int));
}
// West: _is, _isb
if (dom[rank].w != MPI_PROC_NULL) {
// nparts_send
offset = GFX_LOC(bins.Gcc._is, 0, 0, s1b, s2b);
nparts_send[WEST] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_i,
0., thrust::plus<int>());
// send offsets
if (nparts_send[WEST] > 0) {
thrust::exclusive_scan(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_i, t_offset_w);
} else {
cudaMemset(_offset_w, 0., bins.Gcc.s2b_i * sizeof(int));
}
// nparts_recv
offset = GFX_LOC(bins.Gcc._isb, 0, 0, s1b, s2b);
nparts_recv[WEST] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_i,
0., thrust::plus<int>());
} else {
nparts_send[WEST] = 0;
nparts_recv[WEST] = 0;
cudaMemset(_offset_w, 0., bins.Gcc.s2b_i * sizeof(int));
}
} else { // nparts <= 0
checkCudaErrors(cudaMemset(_part_ind, -1, nparts * sizeof(int)));
checkCudaErrors(cudaMemset(_part_bin, -1, nparts * sizeof(int)));
nparts_send[EAST] = 0;
nparts_send[WEST] = 0;
nparts_recv[EAST] = 0;
nparts_recv[WEST] = 0;
cudaMemset(_offset_e, 0., bins.Gcc.s2b_i * sizeof(int));
cudaMemset(_offset_w, 0., bins.Gcc.s2b_i * sizeof(int));
}
/* Send number of parts to east/west */
// origin target
// nparts_send[WEST] -> nparts_recv[EAST]
// nparts_recv[WEST] <- nparts_send[EAST]
//nparts_recv[WEST] = 0; // init
//nparts_recv[EAST] = 0;
//mpi_send_nparts_i();
/* Allocate memory for send and recv forces */
int n_send = 9;
// * kFx, kFy, kFz
// * iFx, iFy, iFz
// * iLx, iLy, iLz
// Indexing is, for example:
// _force_send_e[force + 9*part_id]
// where
// part_id = [0, nparts) and force = [0, 9)
// 0: kFx 1: kFy 2: kFz
// 3: iFx 4: iFy 5: iFz
// 6: iLx 7: iLy 8: iLz
// See accompanying note at the same location in cuda_transfer_parts_i
int send_alloc_e = nparts_send[EAST]*(nparts_send[EAST] > 0) + (nparts_send[EAST] == 0);
int send_alloc_w = nparts_send[WEST]*(nparts_send[WEST] > 0) + (nparts_send[WEST] == 0);
int recv_alloc_e = nparts_recv[EAST]*(nparts_recv[EAST] > 0) + (nparts_recv[EAST] == 0);
int recv_alloc_w = nparts_recv[WEST]*(nparts_recv[WEST] > 0) + (nparts_recv[WEST] == 0);
checkCudaErrors(cudaMalloc(&_force_send_e, send_alloc_e*n_send*sizeof(real)));
checkCudaErrors(cudaMalloc(&_force_send_w, send_alloc_w*n_send*sizeof(real)));
checkCudaErrors(cudaMalloc(&_force_recv_e, recv_alloc_e*n_send*sizeof(real)));
checkCudaErrors(cudaMalloc(&_force_recv_w, recv_alloc_w*n_send*sizeof(real)));
/* Pack partial forces */
if (nparts_send[EAST] > 0) {
pack_forces_e<<<bin_num_inb, bin_dim_inb>>>(_force_send_e, _offset_e,
_bin_start, _bin_count, _part_ind, _parts);
} else { // fill dummy data
//cudaMemset(_force_send_e, 0., send_alloc_e * n_send * sizeof(real));
}
if (nparts_send[WEST] > 0) {
pack_forces_w<<<bin_num_inb, bin_dim_inb>>>(_force_send_w, _offset_w,
_bin_start, _bin_count, _part_ind, _parts);
} else { // fill dummy data
//cudaMemset(_force_send_w, 0., send_alloc_w * n_send * sizeof(real));
}
cudaDeviceSynchronize(); // ensure packing is complete
/* Communicate forces with MPI */
mpi_send_forces_i();
/* Find offsets in ghost bins */
if (nparts > 0) {
// East: _ieb
if (dom[rank].e != MPI_PROC_NULL) {
/* Determine packing offsets with an excl prefix scan */
if (nparts_recv[EAST] > 0) {
offset = GFX_LOC(bins.Gcc._ieb, 0, 0, s1b, s2b);
thrust::exclusive_scan(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_i, t_offset_e);
} else {
cudaMemset(_offset_e, 0., bins.Gcc.s2b_i * sizeof(int));
}
} else {
cudaMemset(_offset_e, 0., bins.Gcc.s2b_i * sizeof(int));
}
// West: _isb plane
if (dom[rank].w != MPI_PROC_NULL) {
if (nparts_recv[WEST] > 0) {
offset = GFX_LOC(bins.Gcc._isb, 0, 0, s1b, s2b);
thrust::exclusive_scan(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_i, t_offset_w);
} else {
cudaMemset(_offset_w, 0., bins.Gcc.s2b_i * sizeof(int));
}
} else {
cudaMemset(_offset_w, 0., bins.Gcc.s2b_i * sizeof(int));
}
/* Unpack and complete forces */
if (nparts_recv[EAST] > 0) {
unpack_forces_e<<<bin_num_inb, bin_dim_inb>>>(_force_recv_e, _offset_e,
_bin_start, _bin_count, _part_ind, _parts);
}
if (nparts_recv[WEST] > 0) {
unpack_forces_w<<<bin_num_inb, bin_dim_inb>>>(_force_recv_w, _offset_w,
_bin_start, _bin_count, _part_ind, _parts);
}
cudaDeviceSynchronize(); // ensure packing is complete
} else { // nparts <= 0
cudaMemset(_offset_e, 0., bins.Gcc.s2b_i * sizeof(int));
cudaMemset(_offset_w, 0., bins.Gcc.s2b_i * sizeof(int));
}
/* Free */
cudaFree(_force_send_e);
cudaFree(_force_send_w);
cudaFree(_force_recv_e);
cudaFree(_force_recv_w);
cudaFree(_part_ind);
cudaFree(_part_bin);
cudaFree(_offset_e);
cudaFree(_offset_w);
}
extern "C"
void cuda_update_part_forces_j(void)
{
//printf("N%d >> Updating particle forces in j... (nparts %d)\n", rank, nparts);
/* Communication follows same pattern as cuda_update_part_forces_i */
/* Initialize execution config */
// Thread over north/south faces
int tz = bins.Gcc.knb * (bins.Gcc.knb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.knb >= MAX_THREADS_DIM);
int tx = bins.Gcc.inb * (bins.Gcc.inb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.inb >= MAX_THREADS_DIM);
int bz = (int) ceil((real) bins.Gcc.knb / (real) tz);
int bx = (int) ceil((real) bins.Gcc.inb / (real) tx);
dim3 bin_num_jnb(bz, bx);
dim3 bin_dim_jnb(tz, tx);
// Thread over nparts
int t_nparts = nparts * (nparts < MAX_THREADS_1D)
+ MAX_THREADS_1D * (nparts >= MAX_THREADS_1D);
int b_nparts = (int) ceil((real) nparts / (real) t_nparts);
dim3 dim_nparts(t_nparts);
dim3 num_nparts(b_nparts);
/* Declare things we might need */
int s1b = bins.Gcc.knb;
int s2b = s1b * bins.Gcc.inb;
int offset;
/* Allocate */
checkCudaErrors(cudaMalloc(&_part_ind, nparts * sizeof(int)));
checkCudaErrors(cudaMalloc(&_part_bin, nparts * sizeof(int)));
thrust::device_ptr<int> t_part_ind(_part_ind);
thrust::device_ptr<int> t_part_bin(_part_bin);
int *_offset_n;
int *_offset_s;
checkCudaErrors(cudaMalloc(&_offset_n, bins.Gcc.s2b_j * sizeof(int)));
checkCudaErrors(cudaMalloc(&_offset_s, bins.Gcc.s2b_j * sizeof(int)));
thrust::device_ptr<int> t_offset_n(_offset_n);
thrust::device_ptr<int> t_offset_s(_offset_s);
checkCudaErrors(cudaMemset(_bin_start, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(cudaMemset(_bin_end, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(cudaMemset(_bin_count, 0, bins.Gcc.s3b * sizeof(int)));
thrust::device_ptr<int> t_bin_count(_bin_count);
// If we have parts...
if (nparts > 0) {
/* Find each particle's bin */
bin_fill_j<<<num_nparts, dim_nparts>>>(_part_ind, _part_bin, _parts, nparts,
_DOM);
/* Sort _part_ind by _part_bin (sort key by value) */
if (nparts > 1) {
thrust::sort_by_key(t_part_bin, t_part_bin + nparts, t_part_ind);
}
/* Find start and ending index of each bin */
int smem_size = (nparts + 1) * sizeof(int);
find_bin_start_end<<<b_nparts, t_nparts, smem_size>>>(_bin_start, _bin_end,
_part_bin, nparts);
/* Find number of particles in each bin */
count_bin_parts_j<<<bin_num_jnb, bin_dim_jnb>>>(_bin_start, _bin_end,
_bin_count);
/* Find number of particles to send and packing offsets */
// north: _je
if (dom[rank].n != MPI_PROC_NULL) {
// _bin_count is indexed with j varying slowest
// nparts_send
offset = GFY_LOC(0, bins.Gcc._je, 0, s1b, s2b);
nparts_send[NORTH] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_j,
0., thrust::plus<int>());
// send offsets
if (nparts_send[NORTH] > 0) {
thrust::exclusive_scan(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_j, t_offset_n);
} else {
cudaMemset(_offset_n, 0., bins.Gcc.s2b_j * sizeof(int));
}
// nparts_recv
offset = GFY_LOC(0, bins.Gcc._jeb, 0, s1b, s2b);
nparts_recv[NORTH] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_j,
0., thrust::plus<int>());
} else { // no parts to send
nparts_send[NORTH] = 0;
nparts_recv[NORTH] = 0;
cudaMemset(_offset_n, 0., bins.Gcc.s2b_j * sizeof(int));
}
// SOUTH: _js planes
if (dom[rank].s != MPI_PROC_NULL) {
// nparts_send
offset = GFY_LOC(0, bins.Gcc._js, 0, s1b, s2b);
nparts_send[SOUTH] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_j,
0., thrust::plus<int>());
// sending offsets
if (nparts_send[SOUTH] > 0) {
thrust::exclusive_scan(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_j, t_offset_s);
} else {
cudaMemset(_offset_s, 0., bins.Gcc.s2b_j * sizeof(int));
}
// nparts_recv
offset = GFY_LOC(0, bins.Gcc._jsb, 0, s1b, s2b);
nparts_recv[SOUTH] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_j,
0., thrust::plus<int>());
} else {
nparts_send[SOUTH] = 0;
nparts_recv[SOUTH] = 0;
cudaMemset(_offset_s, 0., bins.Gcc.s2b_j * sizeof(int));
}
} else { // nparts <= 0
checkCudaErrors(cudaMemset(_part_ind, -1, nparts * sizeof(int)));
checkCudaErrors(cudaMemset(_part_bin, -1, nparts * sizeof(int)));
nparts_send[NORTH] = 0;
nparts_send[SOUTH] = 0;
nparts_recv[NORTH] = 0;
nparts_recv[SOUTH] = 0;
cudaMemset(_offset_n, 0., bins.Gcc.s2b_j * sizeof(int));
cudaMemset(_offset_s, 0., bins.Gcc.s2b_j * sizeof(int));
}
/* Send number of parts to NORTH/SOUTH */
// origin target
// nparts_send[SOUTH] -> nparts_recv[NORTH]
// nparts_recv[SOUTH] <- nparts_send[NORTH]
//nparts_recv[SOUTH] = 0; // init
//nparts_recv[NORTH] = 0;
//mpi_send_nparts_j();
/* Allocate memory for send and recv forces */
int n_send = 9;
// * kFx, kFy, kFz
// * iFx, iFy, iFz
// * iLx, iLy, iLz
// Indexing is, for example:
// _force_send_n[force + 9*part_id]
// where
// part_id = [0, nparts) and force = [0, 9)
// 0: kFx 1: kFy 2: kFz
// 3: iFx 4: iFy 5: iFz
// 6: iLx 7: iLy 8: iLz
// See accompanying note at the same location in cuda_transfer_parts_i
int send_alloc_n = nparts_send[NORTH]*(nparts_send[NORTH] > 0) + (nparts_send[NORTH] == 0);
int send_alloc_s = nparts_send[SOUTH]*(nparts_send[SOUTH] > 0) + (nparts_send[SOUTH] == 0);
int recv_alloc_n = nparts_recv[NORTH]*(nparts_recv[NORTH] > 0) + (nparts_recv[NORTH] == 0);
int recv_alloc_s = nparts_recv[SOUTH]*(nparts_recv[SOUTH] > 0) + (nparts_recv[SOUTH] == 0);
checkCudaErrors(cudaMalloc(&_force_send_n, send_alloc_n*n_send*sizeof(real)));
checkCudaErrors(cudaMalloc(&_force_send_s, send_alloc_s*n_send*sizeof(real)));
checkCudaErrors(cudaMalloc(&_force_recv_n, recv_alloc_n*n_send*sizeof(real)));
checkCudaErrors(cudaMalloc(&_force_recv_s, recv_alloc_s*n_send*sizeof(real)));
/* Pack partial forces */
if (nparts_send[NORTH] > 0) {
pack_forces_n<<<bin_num_jnb, bin_dim_jnb>>>(_force_send_n, _offset_n,
_bin_start, _bin_count, _part_ind, _parts);
} else {
cudaMemset(_force_send_n, 0., send_alloc_n*n_send*sizeof(real));
}
if (nparts_send[SOUTH] > 0) {
pack_forces_s<<<bin_num_jnb, bin_dim_jnb>>>(_force_send_s, _offset_s,
_bin_start, _bin_count, _part_ind, _parts);
} else {
cudaMemset(_force_send_s, 0., send_alloc_s*n_send*sizeof(real));
}
cudaDeviceSynchronize(); // ensure packing is complete
/* Communicate forces with MPI */
mpi_send_forces_j();
/* Find offsets in ghost bins */
if (nparts > 0) {
// NORTH: _jeb
if (dom[rank].n != MPI_PROC_NULL) {
/* Determine packing offsets with an excl prefix scan */
if (nparts_recv[NORTH] > 0) {
offset = GFY_LOC(0, bins.Gcc._jeb, 0, s1b, s2b);
thrust::exclusive_scan(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_j, t_offset_n);
} else {
cudaMemset(_offset_n, 0., bins.Gcc.s2b_j * sizeof(int));
}
} else { // no parts to send
cudaMemset(_offset_n, 0., bins.Gcc.s2b_j * sizeof(int));
}
// SOUTH: _jsb plane
if (dom[rank].s != MPI_PROC_NULL) {
if (nparts_recv[SOUTH] > 0) {
offset = GFY_LOC(0, bins.Gcc._jsb, 0, s1b, s2b);
thrust::exclusive_scan(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_j, t_offset_s);
} else {
cudaMemset(_offset_s, 0., bins.Gcc.s2b_j * sizeof(int));
}
} else {
cudaMemset(_offset_s, 0., bins.Gcc.s2b_j * sizeof(int));
}
/* Unpack and complete forces */
if (nparts_recv[NORTH] > 0) {
unpack_forces_n<<<bin_num_jnb, bin_dim_jnb>>>(_force_recv_n, _offset_n,
_bin_start, _bin_count, _part_ind, _parts);
}
if (nparts_recv[SOUTH] > 0) {
unpack_forces_s<<<bin_num_jnb, bin_dim_jnb>>>(_force_recv_s, _offset_s,
_bin_start, _bin_count, _part_ind, _parts);
}
cudaDeviceSynchronize(); // ensure packing is complete
} else { // nparts <= 0
cudaMemset(_offset_n, 0., bins.Gcc.s2b_j * sizeof(int));
cudaMemset(_offset_s, 0., bins.Gcc.s2b_j * sizeof(int));
}
/* Free */
cudaFree(_force_send_n);
cudaFree(_force_send_s);
cudaFree(_force_recv_n);
cudaFree(_force_recv_s);
cudaFree(_part_ind);
cudaFree(_part_bin);
cudaFree(_offset_n);
cudaFree(_offset_s);
}
extern "C"
void cuda_update_part_forces_k(void)
{
//printf("N%d >> Updating particle forces in k... (nparts %d)\n", rank, nparts);
/* Communication follows same pattern as cuda_update_part_forces_i */
/* Initialize execution config */
// thread over top/bottom faces
int tx = bins.Gcc.inb * (bins.Gcc.inb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.inb >= MAX_THREADS_DIM);
int ty = bins.Gcc.jnb * (bins.Gcc.jnb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.jnb >= MAX_THREADS_DIM);
int bx = (int) ceil((real) bins.Gcc.inb / (real) tx);
int by = (int) ceil((real) bins.Gcc.jnb / (real) ty);
dim3 bin_num_knb(bx, by);
dim3 bin_dim_knb(tx, ty);
// Thread over nparts
int t_nparts = nparts * (nparts < MAX_THREADS_1D)
+ MAX_THREADS_1D * (nparts >= MAX_THREADS_1D);
int b_nparts = (int) ceil((real) nparts / (real) t_nparts);
dim3 dim_nparts(t_nparts);
dim3 num_nparts(b_nparts);
/* Declare things we might need */
int s1b = bins.Gcc.inb;
int s2b = s1b * bins.Gcc.jnb;
int offset;
/* Allocate */
checkCudaErrors(cudaMalloc(&_part_ind, nparts * sizeof(int)));
checkCudaErrors(cudaMalloc(&_part_bin, nparts * sizeof(int)));
thrust::device_ptr<int> t_part_ind(_part_ind);
thrust::device_ptr<int> t_part_bin(_part_bin);
int *_offset_t;
int *_offset_b;
checkCudaErrors(cudaMalloc(&_offset_t, bins.Gcc.s2b_k * sizeof(int)));
checkCudaErrors(cudaMalloc(&_offset_b, bins.Gcc.s2b_k * sizeof(int)));
thrust::device_ptr<int> t_offset_t(_offset_t);
thrust::device_ptr<int> t_offset_b(_offset_b);
checkCudaErrors(cudaMemset(_bin_start, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(cudaMemset(_bin_end, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(cudaMemset(_bin_count, 0, bins.Gcc.s3b * sizeof(int)));
thrust::device_ptr<int> t_bin_count(_bin_count);
// If we have parts...
if (nparts > 0) {
/* Find each particle's bin */
bin_fill_k<<<num_nparts, dim_nparts>>>(_part_ind, _part_bin, _parts, nparts,
_DOM);
/* Sort _part_ind by _part_bin (sort key by value) */
if (nparts > 1) {
thrust::sort_by_key(t_part_bin, t_part_bin + nparts, t_part_ind);
}
/* Find start and ending index of each bin */
int smem_size = (nparts + 1) * sizeof(int);
find_bin_start_end<<<b_nparts, t_nparts, smem_size>>>(_bin_start, _bin_end,
_part_bin, nparts);
/* Find number of particles in each bin */
count_bin_parts_k<<<bin_num_knb, bin_dim_knb>>>(_bin_start, _bin_end,
_bin_count);
/* Find number of particles to send and packing offsets */
// TOP: _ke
if (dom[rank].t != MPI_PROC_NULL) {
// _bin_count is indexed with k varying slowest
// nparts_send
offset = GFZ_LOC(0, 0, bins.Gcc._ke, s1b, s2b);
nparts_send[TOP] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_k,
0., thrust::plus<int>());
// sending offsets
if (nparts_send[TOP] > 0) {
thrust::exclusive_scan(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_k, t_offset_t);
} else {
cudaMemset(_offset_t, 0., bins.Gcc.s2b_k * sizeof(int));
}
// nparts_recv
offset = GFZ_LOC(0, 0, bins.Gcc._keb, s1b, s2b);
nparts_recv[TOP] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_k,
0., thrust::plus<int>());
} else { // no parts to send
nparts_send[TOP] = 0;
nparts_recv[TOP] = 0;
cudaMemset(_offset_t, 0., bins.Gcc.s2b_k * sizeof(int));
}
// BOTTOM: _ks planes
if (dom[rank].b != MPI_PROC_NULL) {
// nparts_send
offset = GFZ_LOC(0, 0, bins.Gcc._ks, s1b, s2b);
nparts_send[BOTTOM] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_k,
0., thrust::plus<int>());
// sending offsets
if (nparts_send[BOTTOM] > 0) {
thrust::exclusive_scan(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_k, t_offset_b);
} else {
cudaMemset(_offset_b, 0., bins.Gcc.s2b_k * sizeof(int));
}
// nparts_recv
offset = GFZ_LOC(0, 0, bins.Gcc._ksb, s1b, s2b);
nparts_recv[BOTTOM] = thrust::reduce(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_k,
0., thrust::plus<int>());
} else {
nparts_send[BOTTOM] = 0;
nparts_recv[BOTTOM] = 0;
cudaMemset(_offset_b, 0., bins.Gcc.s2b_k * sizeof(int));
}
} else { // nparts <= 0
checkCudaErrors(cudaMemset(_part_ind, -1, nparts * sizeof(int)));
checkCudaErrors(cudaMemset(_part_bin, -1, nparts * sizeof(int)));
nparts_send[TOP] = 0;
nparts_send[BOTTOM] = 0;
nparts_recv[TOP] = 0;
nparts_recv[BOTTOM] = 0;
cudaMemset(_offset_t, 0., bins.Gcc.s2b_k * sizeof(int));
cudaMemset(_offset_b, 0., bins.Gcc.s2b_k * sizeof(int));
}
/* Send number of parts to TOP/BOTTOM */
// origin target
// nparts_send[BOTTOM] -> nparts_recv[TOP]
// nparts_recv[BOTTOM] <- nparts_send[TOP]
//nparts_recv[BOTTOM] = 0; // init
//nparts_recv[TOP] = 0;
//mpi_send_nparts_k();
/* Allocate memory for send and recv forces */
int n_send = 9;
// * kFx, kFy, kFz
// * iFx, iFy, iFz
// * iLx, iLy, iLz
// Indexing is, for example:
// _force_send_t[force + 9*part_id]
// where
// part_id = [0, nparts) and force = [0, 9)
// 0: kFx 1: kFy 2: kFz
// 3: iFx 4: iFy 5: iFz
// 6: iLx 7: iLy 8: iLz
// See accompanying note at the same location in cuda_transfer_parts_i
int send_alloc_t = nparts_send[TOP]*(nparts_send[TOP] > 0) + (nparts_send[TOP] == 0);
int send_alloc_b = nparts_send[BOTTOM]*(nparts_send[BOTTOM] > 0) + (nparts_send[BOTTOM] == 0);
int recv_alloc_t = nparts_recv[TOP]*(nparts_recv[TOP] > 0) + (nparts_recv[TOP] == 0);
int recv_alloc_b = nparts_recv[BOTTOM]*(nparts_recv[BOTTOM] > 0) + (nparts_recv[BOTTOM] == 0);
checkCudaErrors(cudaMalloc(&_force_send_t, send_alloc_t*n_send*sizeof(real)));
checkCudaErrors(cudaMalloc(&_force_send_b, send_alloc_b*n_send*sizeof(real)));
checkCudaErrors(cudaMalloc(&_force_recv_t, recv_alloc_t*n_send*sizeof(real)));
checkCudaErrors(cudaMalloc(&_force_recv_b, recv_alloc_b*n_send*sizeof(real)));
/* Pack partial forces */
if (nparts_send[TOP] > 0) {
pack_forces_t<<<bin_num_knb, bin_dim_knb>>>(_force_send_t, _offset_t,
_bin_start, _bin_count, _part_ind, _parts);
} else { // fill dummy data
//cudaMemset(_force_send_t, 0., send_alloc_t * n_send * sizeof(real));
}
if (nparts_send[BOTTOM] > 0) {
pack_forces_b<<<bin_num_knb, bin_dim_knb>>>(_force_send_b, _offset_b,
_bin_start, _bin_count, _part_ind, _parts);
} else { // fill dummy data
//cudaMemset(_force_send_b, 0., send_alloc_b * n_send * sizeof(real));
}
cudaDeviceSynchronize(); // ensure packing is complete
/* Communicate forces with MPI */
mpi_send_forces_k();
if (nparts > 0) {
/* Find offsets in ghost bins */
// TOP: _keb
if (dom[rank].t != MPI_PROC_NULL) {
offset = GFZ_LOC(0, 0, bins.Gcc._keb, s1b, s2b);
/* Determine packing offsets with an excl prefix scan */
if (nparts_recv[TOP] > 0) {
thrust::exclusive_scan(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_k, t_offset_t);
} else {
cudaMemset(_offset_t, 0., bins.Gcc.s2b_k * sizeof(int));
}
} else { // no parts to send
cudaMemset(_offset_t, 0., bins.Gcc.s2b_k * sizeof(int));
}
// BOTTOM: _ksb plane
if (dom[rank].b != MPI_PROC_NULL) {
offset = GFZ_LOC(0, 0, bins.Gcc._ksb, s1b, s2b);
if (nparts_recv[BOTTOM] > 0) {
thrust::exclusive_scan(t_bin_count + offset,
t_bin_count + offset + bins.Gcc.s2b_k, t_offset_b);
} else {
cudaMemset(_offset_b, 0., bins.Gcc.s2b_k * sizeof(int));
}
} else {
cudaMemset(_offset_b, 0., bins.Gcc.s2b_k * sizeof(int));
}
/* Unpack and complete forces */
if (nparts_recv[TOP] > 0) {
unpack_forces_t<<<bin_num_knb, bin_dim_knb>>>(_force_recv_t, _offset_t,
_bin_start, _bin_count, _part_ind, _parts);
}
if (nparts_recv[BOTTOM] > 0) {
unpack_forces_b<<<bin_num_knb, bin_dim_knb>>>(_force_recv_b, _offset_b,
_bin_start, _bin_count, _part_ind, _parts);
}
cudaDeviceSynchronize(); // ensure packing is complete
} else {
cudaMemset(_offset_t, 0., bins.Gcc.s2b_k * sizeof(int));
cudaMemset(_offset_b, 0., bins.Gcc.s2b_k * sizeof(int));
}
/* Free */
cudaFree(_force_send_t);
cudaFree(_force_send_b);
cudaFree(_force_recv_t);
cudaFree(_force_recv_b);
cudaFree(_part_ind);
cudaFree(_part_bin);
cudaFree(_offset_t);
cudaFree(_offset_b);
}
|
de839144d6a2ab20d5875a3a7f7e2b7840bd1e4c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
unsigned int filter_radius;
#define FILTER_LENGTH (2 * filter_radius + 1)
#define ABS(val) ((val)<0.0 ? (-(val)) : (val))
#define accuracy 0.05
#define NUM_THREADS imageW*imageH
#define NUM_BLOCKS 1
#define cudaCheckError() { \
hipError_t e=hipGetLastError(); \
if(e!=hipSuccess) { \
printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,hipGetErrorString(e)); \
exit(EXIT_FAILURE); \
} \
}
//GPU code
__global__ void convolROWS(float* d_Output_GPU,
float* d_Input,
float* d_Filter,
int imageH,
int imageW,
int filterR)
{
int k, i = blockIdx.x *blockDim.x + threadIdx.x;
//row conv
float sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = i%imageW + k;
if (d >= 0 && d < imageW) {
sum += d_Input[i + k] * d_Filter[filterR - k];
}
d_Output_GPU[i] = sum;
}
}
__global__ void convolCOL(float* d_Output_GPU,
float* d_Input,
float* d_Filter,
int imageH,
int imageW,
int filterR)
{
int k, i = blockIdx.x *blockDim.x + threadIdx.x;
//col conv
float sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = i/imageW + k;
if (d >= 0 && d < imageH) {
sum += d_Input[d*imageW + i%imageW] * d_Filter[filterR - k];
}
d_Output_GPU[i] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////
// Reference row convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionRowCPU(float *h_Dst, float *h_Src, float *h_Filter,
int imageW, int imageH, int filterR) {
int x, y, k;
for (y = 0; y < imageH; y++) {
for (x = 0; x < imageW; x++) {
float sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = x + k;
if (d >= 0 && d < imageW) {
sum += h_Src[y * imageW + d] * h_Filter[filterR - k];
}
h_Dst[y * imageW + x] = sum;
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Reference column convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionColumnCPU(float *h_Dst, float *h_Src, float *h_Filter,
int imageW, int imageH, int filterR) {
int x, y, k;
for (y = 0; y < imageH; y++) {
for (x = 0; x < imageW; x++) {
float sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = y + k;
if (d >= 0 && d < imageH) {
sum += h_Src[d * imageW + x] * h_Filter[filterR - k];
}
h_Dst[y * imageW + x] = sum;
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv) {
float
*h_Filter,
*h_Input,
*h_Buffer,
*h_OutputCPU,
*h_OutputGPU,
*d_Input,
*d_Filter,
*d_Output_GPU,
*d_Buffer;
int imageW;
int imageH;
unsigned int i;
printf("Enter filter radius : ");
scanf("%d", &filter_radius);
printf("Enter image size. Should be a power of two and greater than %d : ", FILTER_LENGTH);
scanf("%d", &imageW);
imageH = imageW;
printf("Image Width x Height = %i x %i\n\n", imageW, imageH);
printf("Allocating and initializing host arrays...\n");
h_Filter = (float *)malloc(FILTER_LENGTH * sizeof(float));
h_Input = (float *)malloc(imageW * imageH * sizeof(float));
h_Buffer = (float *)malloc(imageW * imageH * sizeof(float));
h_OutputCPU = (float *)malloc(imageW * imageH * sizeof(float));
h_OutputGPU = (float *)malloc(imageW * imageH * sizeof(float)); //new malloc for result from device
if( h_Filter == NULL || h_Input == NULL || h_Buffer == NULL || h_OutputCPU == NULL || h_OutputGPU == NULL ){
printf("Malloc allocation problem on host, exiting...\n");
return(1);
}
srand(200);
for (i = 0; i < FILTER_LENGTH; i++) {
h_Filter[i] = (float)(rand() % 16);
}
for (i = 0; i < imageW * imageH; i++) {
h_Input[i] = (float)rand() / ((float)RAND_MAX / 255) + (float)rand() / (float)RAND_MAX;
}
printf("CPU computation...\n");
convolutionRowCPU(h_Buffer, h_Input, h_Filter, imageW, imageH, filter_radius);
convolutionColumnCPU(h_OutputCPU, h_Buffer, h_Filter, imageW, imageH, filter_radius);
// Kanete h sugrish anamesa se GPU kai CPU kai an estw kai kapoio apotelesma xeperna thn akriveia
// pou exoume orisei, tote exoume sfalma kai mporoume endexomenws na termatisoume to programma mas
hipMalloc((void**)&d_Input, imageW * imageH * sizeof(float));
hipMalloc((void**)&d_Filter, FILTER_LENGTH * sizeof(float));
hipMalloc((void**)&d_Output_GPU, imageW * imageH * sizeof(float));
hipMalloc((void**)&d_Buffer, imageW * imageH * sizeof(float));
if(!(d_Input || d_Filter || d_Output_GPU || d_Buffer)){
printf("Malloc allocation problem on device, exiting.. \n");
return(1);
}
hipMemcpy(d_Input, h_Input, imageW * imageH * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_Filter, h_Filter, FILTER_LENGTH * sizeof(float), hipMemcpyHostToDevice);
cudaCheckError(); //check for errors in hipMemcpy
hipLaunchKernelGGL(( convolROWS), dim3(NUM_BLOCKS) , dim3(NUM_THREADS), 0, 0, d_Buffer, d_Input, d_Filter, imageH, imageW, filter_radius);
//wait 1st kernel to finish
hipDeviceSynchronize();
//check for errors
cudaCheckError();
hipLaunchKernelGGL(( convolCOL), dim3(NUM_BLOCKS) , dim3(NUM_THREADS), 0, 0, d_Output_GPU, d_Buffer, d_Filter, imageH, imageW, filter_radius);
//wait to finish
hipDeviceSynchronize();
//check for errors
cudaCheckError();
//copy output from device to host
hipMemcpy(h_OutputGPU, d_Output_GPU, imageW * imageH * sizeof(float), hipMemcpyDeviceToHost);
//compare here
for (i = 0; i < imageW * imageH; i++) {
if(ABS(h_OutputGPU[i] - h_OutputCPU[i]) > accuracy){
printf("The difference between the %dnth element is larger than accuracy. \n CPU: %g GPU %g differece: %.15g \nNow exiting..\n", i,h_OutputCPU[i] ,h_OutputGPU[i], ABS(h_OutputGPU[i] - h_OutputCPU[i]) );
break;
}
}
// free all the allocated memory
free(h_OutputCPU);
free(h_Buffer);
free(h_Input);
free(h_Filter);
hipFree(d_Input);
hipFree(d_Filter);
hipFree(d_Output_GPU);
// Do a device reset just in case... Bgalte to sxolio otan ylopoihsete CUDA
hipDeviceReset();
return 0;
}
| de839144d6a2ab20d5875a3a7f7e2b7840bd1e4c.cu | #include <stdio.h>
#include <stdlib.h>
unsigned int filter_radius;
#define FILTER_LENGTH (2 * filter_radius + 1)
#define ABS(val) ((val)<0.0 ? (-(val)) : (val))
#define accuracy 0.05
#define NUM_THREADS imageW*imageH
#define NUM_BLOCKS 1
#define cudaCheckError() { \
cudaError_t e=cudaGetLastError(); \
if(e!=cudaSuccess) { \
printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(e)); \
exit(EXIT_FAILURE); \
} \
}
//GPU code
__global__ void convolROWS(float* d_Output_GPU,
float* d_Input,
float* d_Filter,
int imageH,
int imageW,
int filterR)
{
int k, i = blockIdx.x *blockDim.x + threadIdx.x;
//row conv
float sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = i%imageW + k;
if (d >= 0 && d < imageW) {
sum += d_Input[i + k] * d_Filter[filterR - k];
}
d_Output_GPU[i] = sum;
}
}
__global__ void convolCOL(float* d_Output_GPU,
float* d_Input,
float* d_Filter,
int imageH,
int imageW,
int filterR)
{
int k, i = blockIdx.x *blockDim.x + threadIdx.x;
//col conv
float sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = i/imageW + k;
if (d >= 0 && d < imageH) {
sum += d_Input[d*imageW + i%imageW] * d_Filter[filterR - k];
}
d_Output_GPU[i] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////
// Reference row convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionRowCPU(float *h_Dst, float *h_Src, float *h_Filter,
int imageW, int imageH, int filterR) {
int x, y, k;
for (y = 0; y < imageH; y++) {
for (x = 0; x < imageW; x++) {
float sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = x + k;
if (d >= 0 && d < imageW) {
sum += h_Src[y * imageW + d] * h_Filter[filterR - k];
}
h_Dst[y * imageW + x] = sum;
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Reference column convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionColumnCPU(float *h_Dst, float *h_Src, float *h_Filter,
int imageW, int imageH, int filterR) {
int x, y, k;
for (y = 0; y < imageH; y++) {
for (x = 0; x < imageW; x++) {
float sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = y + k;
if (d >= 0 && d < imageH) {
sum += h_Src[d * imageW + x] * h_Filter[filterR - k];
}
h_Dst[y * imageW + x] = sum;
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv) {
float
*h_Filter,
*h_Input,
*h_Buffer,
*h_OutputCPU,
*h_OutputGPU,
*d_Input,
*d_Filter,
*d_Output_GPU,
*d_Buffer;
int imageW;
int imageH;
unsigned int i;
printf("Enter filter radius : ");
scanf("%d", &filter_radius);
printf("Enter image size. Should be a power of two and greater than %d : ", FILTER_LENGTH);
scanf("%d", &imageW);
imageH = imageW;
printf("Image Width x Height = %i x %i\n\n", imageW, imageH);
printf("Allocating and initializing host arrays...\n");
h_Filter = (float *)malloc(FILTER_LENGTH * sizeof(float));
h_Input = (float *)malloc(imageW * imageH * sizeof(float));
h_Buffer = (float *)malloc(imageW * imageH * sizeof(float));
h_OutputCPU = (float *)malloc(imageW * imageH * sizeof(float));
h_OutputGPU = (float *)malloc(imageW * imageH * sizeof(float)); //new malloc for result from device
if( h_Filter == NULL || h_Input == NULL || h_Buffer == NULL || h_OutputCPU == NULL || h_OutputGPU == NULL ){
printf("Malloc allocation problem on host, exiting...\n");
return(1);
}
srand(200);
for (i = 0; i < FILTER_LENGTH; i++) {
h_Filter[i] = (float)(rand() % 16);
}
for (i = 0; i < imageW * imageH; i++) {
h_Input[i] = (float)rand() / ((float)RAND_MAX / 255) + (float)rand() / (float)RAND_MAX;
}
printf("CPU computation...\n");
convolutionRowCPU(h_Buffer, h_Input, h_Filter, imageW, imageH, filter_radius);
convolutionColumnCPU(h_OutputCPU, h_Buffer, h_Filter, imageW, imageH, filter_radius);
// Kanete h sugrish anamesa se GPU kai CPU kai an estw kai kapoio apotelesma xeperna thn akriveia
// pou exoume orisei, tote exoume sfalma kai mporoume endexomenws na termatisoume to programma mas
cudaMalloc((void**)&d_Input, imageW * imageH * sizeof(float));
cudaMalloc((void**)&d_Filter, FILTER_LENGTH * sizeof(float));
cudaMalloc((void**)&d_Output_GPU, imageW * imageH * sizeof(float));
cudaMalloc((void**)&d_Buffer, imageW * imageH * sizeof(float));
if(!(d_Input || d_Filter || d_Output_GPU || d_Buffer)){
printf("Malloc allocation problem on device, exiting.. \n");
return(1);
}
cudaMemcpy(d_Input, h_Input, imageW * imageH * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_Filter, h_Filter, FILTER_LENGTH * sizeof(float), cudaMemcpyHostToDevice);
cudaCheckError(); //check for errors in cudaMemcpy
convolROWS<<<NUM_BLOCKS , NUM_THREADS>>>(d_Buffer, d_Input, d_Filter, imageH, imageW, filter_radius);
//wait 1st kernel to finish
cudaThreadSynchronize();
//check for errors
cudaCheckError();
convolCOL<<<NUM_BLOCKS , NUM_THREADS>>>(d_Output_GPU, d_Buffer, d_Filter, imageH, imageW, filter_radius);
//wait to finish
cudaThreadSynchronize();
//check for errors
cudaCheckError();
//copy output from device to host
cudaMemcpy(h_OutputGPU, d_Output_GPU, imageW * imageH * sizeof(float), cudaMemcpyDeviceToHost);
//compare here
for (i = 0; i < imageW * imageH; i++) {
if(ABS(h_OutputGPU[i] - h_OutputCPU[i]) > accuracy){
printf("The difference between the %dnth element is larger than accuracy. \n CPU: %g GPU %g differece: %.15g \nNow exiting..\n", i,h_OutputCPU[i] ,h_OutputGPU[i], ABS(h_OutputGPU[i] - h_OutputCPU[i]) );
break;
}
}
// free all the allocated memory
free(h_OutputCPU);
free(h_Buffer);
free(h_Input);
free(h_Filter);
cudaFree(d_Input);
cudaFree(d_Filter);
cudaFree(d_Output_GPU);
// Do a device reset just in case... Bgalte to sxolio otan ylopoihsete CUDA
cudaDeviceReset();
return 0;
}
|
dd5a7fdd85886161fd591d13acfda095e91d70f9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This sample implements multi-threaded heterogeneous computing workloads with the new CPU callbacks for CUDA streams and events introduced with CUDA 5.0.
* Together with the thread safety of the CUDA API implementing heterogeneous workloads that float between CPU threads and GPUs has become simple and efficient.
*
* The workloads in the sample follow the form CPU preprocess -> GPU process -> CPU postprocess.
* Each CPU processing step is handled by its own dedicated thread. GPU workloads are sent to all available GPUs in the system.
*
*/
// System includes
#include <stdio.h>
// helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
#include "multithreading.h"
const int N_workloads = 8;
const int N_elements_per_workload = 100000;
CUTBarrier thread_barrier;
void CUDART_CB myStreamCallback(hipStream_t event, hipError_t status, void *data);
struct heterogeneous_workload
{
int id;
int cudaDeviceID;
int *h_data;
int *d_data;
hipStream_t stream;
bool success;
};
__global__
void incKernel(int *data, int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N)
data[i]++;
}
CUT_THREADPROC launch(void *void_arg)
{
heterogeneous_workload *workload = (heterogeneous_workload *) void_arg;
// Select GPU for this CPU thread
checkCudaErrors(hipSetDevice(workload->cudaDeviceID));
// Allocate Resources
checkCudaErrors(hipStreamCreate(&workload->stream));
checkCudaErrors(hipMalloc(&workload->d_data, N_elements_per_workload * sizeof(int)));
checkCudaErrors(hipHostMalloc(&workload->h_data, N_elements_per_workload * sizeof(int), hipHostMallocPortable));
// CPU thread generates data
for (int i=0; i < N_elements_per_workload; ++i)
{
workload->h_data[i] = workload->id + i;
}
// Schedule work for GPU in CUDA stream without blocking the CPU thread
// Note: Dedicated streams enable concurrent execution of workloads on the GPU
dim3 block(512);
dim3 grid((N_elements_per_workload + block.x-1) / block.x);
checkCudaErrors(hipMemcpyAsync(workload->d_data, workload->h_data, N_elements_per_workload * sizeof(int), hipMemcpyHostToDevice, workload->stream));
hipLaunchKernelGGL(( incKernel), dim3(grid), dim3(block),0,workload->stream, workload->d_data, N_elements_per_workload);
checkCudaErrors(hipMemcpyAsync(workload->h_data, workload->d_data, N_elements_per_workload * sizeof(int), hipMemcpyDeviceToHost, workload->stream));
// New in CUDA 5.0: Add a CPU callback which is called once all currently pending operations in the CUDA stream have finished
checkCudaErrors(hipStreamAddCallback(workload->stream, myStreamCallback, workload, 0));
CUT_THREADEND;
// CPU thread end of life, GPU continues to process data...
}
CUT_THREADPROC postprocess(void *void_arg)
{
heterogeneous_workload *workload = (heterogeneous_workload *) void_arg;
// ... GPU is done with processing, continue on new CPU thread...
// Select GPU for this CPU thread
checkCudaErrors(hipSetDevice(workload->cudaDeviceID));
// CPU thread consumes results from GPU
workload->success = true;
for (int i=0; i< N_workloads; ++i)
{
workload->success &= workload->h_data[i] == i + workload->id + 1;
}
// Free Resources
checkCudaErrors(hipFree(workload->d_data));
checkCudaErrors(hipHostFree(workload->h_data));
checkCudaErrors(hipStreamDestroy(workload->stream));
// Signal the end of the heterogenous workload to main thread
cutIncrementBarrier(&thread_barrier);
CUT_THREADEND;
}
void CUDART_CB myStreamCallback(hipStream_t stream, hipError_t status, void *data)
{
// Check status of GPU after stream operations are done
checkCudaErrors(status);
// Spawn new CPU worker thread and continue processing on the CPU
cutStartThread(postprocess, data);
}
int main(int argc, char **argv)
{
int N_gpus, max_gpus = 0;
int gpuInfo[32]; // assume a maximum of 32 GPUs in a system configuration
printf("Starting simpleCallback\n");
checkCudaErrors(hipGetDeviceCount(&N_gpus));
printf("Found %d CUDA capable GPUs\n", N_gpus);
if (N_gpus > 32)
{
printf("simpleCallback only supports 32 GPU(s)\n");
}
for (int devid=0; devid < N_gpus; devid++)
{
int SMversion;
hipDeviceProp_t deviceProp;
hipSetDevice(devid);
hipGetDeviceProperties(&deviceProp, devid);
SMversion = deviceProp.major << 4 + deviceProp.minor;
printf("GPU[%d] %s supports SM %d.%d", devid, deviceProp.name, deviceProp.major, deviceProp.minor);
printf(", %s GPU Callback Functions\n", (SMversion >= 0x11) ? "capable" : "NOT capable");
if (SMversion >= 0x11)
{
gpuInfo[max_gpus++] = devid;
}
}
printf("%d GPUs available to run Callback Functions\n", max_gpus);
heterogeneous_workload *workloads;
workloads = (heterogeneous_workload *) malloc(N_workloads * sizeof(heterogeneous_workload));;
thread_barrier = cutCreateBarrier(N_workloads);
// Main thread spawns a CPU worker thread for each heterogenous workload
printf("Starting %d heterogeneous computing workloads\n", N_workloads);
for (int i=0; i< N_workloads; ++i)
{
workloads[i].id = i;
workloads[i].cudaDeviceID = gpuInfo[i % max_gpus]; // i % N_gpus;
cutStartThread(launch, &workloads[i]);
}
// Sleep until all workloads have finished
cutWaitForBarrier(&thread_barrier);
printf("Total of %d workloads finished:\n", N_workloads);
bool success = true;
for (int i=0; i< N_workloads; ++i)
{
success &= workloads[i].success;
}
printf("%s\n", success ? "Success" : "Failure");
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exits
hipDeviceReset();
free(workloads);
exit(success ? EXIT_SUCCESS : EXIT_FAILURE);
}
| dd5a7fdd85886161fd591d13acfda095e91d70f9.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This sample implements multi-threaded heterogeneous computing workloads with the new CPU callbacks for CUDA streams and events introduced with CUDA 5.0.
* Together with the thread safety of the CUDA API implementing heterogeneous workloads that float between CPU threads and GPUs has become simple and efficient.
*
* The workloads in the sample follow the form CPU preprocess -> GPU process -> CPU postprocess.
* Each CPU processing step is handled by its own dedicated thread. GPU workloads are sent to all available GPUs in the system.
*
*/
// System includes
#include <stdio.h>
// helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
#include "multithreading.h"
const int N_workloads = 8;
const int N_elements_per_workload = 100000;
CUTBarrier thread_barrier;
void CUDART_CB myStreamCallback(cudaStream_t event, cudaError_t status, void *data);
struct heterogeneous_workload
{
int id;
int cudaDeviceID;
int *h_data;
int *d_data;
cudaStream_t stream;
bool success;
};
__global__
void incKernel(int *data, int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N)
data[i]++;
}
CUT_THREADPROC launch(void *void_arg)
{
heterogeneous_workload *workload = (heterogeneous_workload *) void_arg;
// Select GPU for this CPU thread
checkCudaErrors(cudaSetDevice(workload->cudaDeviceID));
// Allocate Resources
checkCudaErrors(cudaStreamCreate(&workload->stream));
checkCudaErrors(cudaMalloc(&workload->d_data, N_elements_per_workload * sizeof(int)));
checkCudaErrors(cudaHostAlloc(&workload->h_data, N_elements_per_workload * sizeof(int), cudaHostAllocPortable));
// CPU thread generates data
for (int i=0; i < N_elements_per_workload; ++i)
{
workload->h_data[i] = workload->id + i;
}
// Schedule work for GPU in CUDA stream without blocking the CPU thread
// Note: Dedicated streams enable concurrent execution of workloads on the GPU
dim3 block(512);
dim3 grid((N_elements_per_workload + block.x-1) / block.x);
checkCudaErrors(cudaMemcpyAsync(workload->d_data, workload->h_data, N_elements_per_workload * sizeof(int), cudaMemcpyHostToDevice, workload->stream));
incKernel<<<grid, block,0,workload->stream>>>(workload->d_data, N_elements_per_workload);
checkCudaErrors(cudaMemcpyAsync(workload->h_data, workload->d_data, N_elements_per_workload * sizeof(int), cudaMemcpyDeviceToHost, workload->stream));
// New in CUDA 5.0: Add a CPU callback which is called once all currently pending operations in the CUDA stream have finished
checkCudaErrors(cudaStreamAddCallback(workload->stream, myStreamCallback, workload, 0));
CUT_THREADEND;
// CPU thread end of life, GPU continues to process data...
}
CUT_THREADPROC postprocess(void *void_arg)
{
heterogeneous_workload *workload = (heterogeneous_workload *) void_arg;
// ... GPU is done with processing, continue on new CPU thread...
// Select GPU for this CPU thread
checkCudaErrors(cudaSetDevice(workload->cudaDeviceID));
// CPU thread consumes results from GPU
workload->success = true;
for (int i=0; i< N_workloads; ++i)
{
workload->success &= workload->h_data[i] == i + workload->id + 1;
}
// Free Resources
checkCudaErrors(cudaFree(workload->d_data));
checkCudaErrors(cudaFreeHost(workload->h_data));
checkCudaErrors(cudaStreamDestroy(workload->stream));
// Signal the end of the heterogenous workload to main thread
cutIncrementBarrier(&thread_barrier);
CUT_THREADEND;
}
void CUDART_CB myStreamCallback(cudaStream_t stream, cudaError_t status, void *data)
{
// Check status of GPU after stream operations are done
checkCudaErrors(status);
// Spawn new CPU worker thread and continue processing on the CPU
cutStartThread(postprocess, data);
}
int main(int argc, char **argv)
{
int N_gpus, max_gpus = 0;
int gpuInfo[32]; // assume a maximum of 32 GPUs in a system configuration
printf("Starting simpleCallback\n");
checkCudaErrors(cudaGetDeviceCount(&N_gpus));
printf("Found %d CUDA capable GPUs\n", N_gpus);
if (N_gpus > 32)
{
printf("simpleCallback only supports 32 GPU(s)\n");
}
for (int devid=0; devid < N_gpus; devid++)
{
int SMversion;
cudaDeviceProp deviceProp;
cudaSetDevice(devid);
cudaGetDeviceProperties(&deviceProp, devid);
SMversion = deviceProp.major << 4 + deviceProp.minor;
printf("GPU[%d] %s supports SM %d.%d", devid, deviceProp.name, deviceProp.major, deviceProp.minor);
printf(", %s GPU Callback Functions\n", (SMversion >= 0x11) ? "capable" : "NOT capable");
if (SMversion >= 0x11)
{
gpuInfo[max_gpus++] = devid;
}
}
printf("%d GPUs available to run Callback Functions\n", max_gpus);
heterogeneous_workload *workloads;
workloads = (heterogeneous_workload *) malloc(N_workloads * sizeof(heterogeneous_workload));;
thread_barrier = cutCreateBarrier(N_workloads);
// Main thread spawns a CPU worker thread for each heterogenous workload
printf("Starting %d heterogeneous computing workloads\n", N_workloads);
for (int i=0; i< N_workloads; ++i)
{
workloads[i].id = i;
workloads[i].cudaDeviceID = gpuInfo[i % max_gpus]; // i % N_gpus;
cutStartThread(launch, &workloads[i]);
}
// Sleep until all workloads have finished
cutWaitForBarrier(&thread_barrier);
printf("Total of %d workloads finished:\n", N_workloads);
bool success = true;
for (int i=0; i< N_workloads; ++i)
{
success &= workloads[i].success;
}
printf("%s\n", success ? "Success" : "Failure");
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
free(workloads);
exit(success ? EXIT_SUCCESS : EXIT_FAILURE);
}
|
83ef915dcbf2807d0a97da38eb6f47e96492ca12.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/layers/upsample_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void UpsampleForward(const int nthreads, int in_w, int in_h,
int out_w, int out_h, const Dtype* bottom_data,
const Dtype* bottom_mask, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int offset = index / (in_w * in_h) * out_w * out_h;
int upsample_idx = static_cast<int>(bottom_mask[index]);
top_data[offset + upsample_idx] = bottom_data[index];
}
}
template <typename Dtype>
void UpsampleLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_mask = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
caffe_gpu_set(top[0]->count(), Dtype(0), top_data);
int bottom_count = bottom[0]->count();
hipLaunchKernelGGL(( UpsampleForward<Dtype>), dim3(CAFFE_GET_BLOCKS(bottom_count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
bottom_count, bottom[0]->width(), bottom[0]->height(),
top[0]->width(), top[0]->height(), bottom_data, bottom_mask, top_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void UpsampleBackward(const int nthreads, int in_w, int in_h,
int out_w, int out_h, const Dtype* top_diff,
const Dtype* bottom_mask, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
int offset = index / (in_w * in_h) * out_w * out_h;
int upsample_idx = static_cast<int>(bottom_mask[index]);
bottom_diff[index] = top_diff[offset + upsample_idx];
}
}
template <typename Dtype>
void UpsampleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* bottom_mask = bottom[1]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int bottom_count = bottom[0]->count();
caffe_gpu_set(bottom_count, Dtype(0.), bottom_diff);
hipLaunchKernelGGL(( UpsampleBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(bottom_count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
bottom_count, bottom[0]->width(), bottom[0]->height(),
top[0]->width(), top[0]->height(), top_diff, bottom_mask, bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(UpsampleLayer);
} // namespace caffe
| 83ef915dcbf2807d0a97da38eb6f47e96492ca12.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/layers/upsample_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void UpsampleForward(const int nthreads, int in_w, int in_h,
int out_w, int out_h, const Dtype* bottom_data,
const Dtype* bottom_mask, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int offset = index / (in_w * in_h) * out_w * out_h;
int upsample_idx = static_cast<int>(bottom_mask[index]);
top_data[offset + upsample_idx] = bottom_data[index];
}
}
template <typename Dtype>
void UpsampleLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_mask = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
caffe_gpu_set(top[0]->count(), Dtype(0), top_data);
int bottom_count = bottom[0]->count();
UpsampleForward<Dtype><<<CAFFE_GET_BLOCKS(bottom_count), CAFFE_CUDA_NUM_THREADS>>>(
bottom_count, bottom[0]->width(), bottom[0]->height(),
top[0]->width(), top[0]->height(), bottom_data, bottom_mask, top_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void UpsampleBackward(const int nthreads, int in_w, int in_h,
int out_w, int out_h, const Dtype* top_diff,
const Dtype* bottom_mask, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
int offset = index / (in_w * in_h) * out_w * out_h;
int upsample_idx = static_cast<int>(bottom_mask[index]);
bottom_diff[index] = top_diff[offset + upsample_idx];
}
}
template <typename Dtype>
void UpsampleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* bottom_mask = bottom[1]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int bottom_count = bottom[0]->count();
caffe_gpu_set(bottom_count, Dtype(0.), bottom_diff);
UpsampleBackward<Dtype><<<CAFFE_GET_BLOCKS(bottom_count), CAFFE_CUDA_NUM_THREADS>>>(
bottom_count, bottom[0]->width(), bottom[0]->height(),
top[0]->width(), top[0]->height(), top_diff, bottom_mask, bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(UpsampleLayer);
} // namespace caffe
|
be088f799b2639805397fd88976bfb57da3191a1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#foreach( $degs in $degrees )
// P(X)/Q(X) = a_0 + a_1*X + a_2*X^2 + ... + a_n*X^n / 1 + |b_0||X| + |b_1||X|^2 + ... + |b_i||X|^{i+1}
#set( $degs_a = $degs[0] )
#set( $degs_b = $degs[1] )
#set( $coefs_a = $degs_a )
#set( $coefs_b = $degs_b - 1 )
#set( $a_counts = $coefs_a + 1 )
#set( $b_counts = $coefs_b + 1 )
#set( $max_x = $degs[2] )
template <typename scalar_t>
__global__ void pau_cuda_forward_A_kernel_$degs[0]_$degs[1]( const scalar_t* __restrict__ x, const scalar_t* __restrict__ a,
const scalar_t* __restrict__ b, scalar_t* __restrict__ result, size_t x_size) {
#foreach( $idx in [0..$coefs_a] )
scalar_t a_$idx = a[$idx];
#end
#foreach( $idx in [0..$coefs_b] )
scalar_t ab_$idx = abs(b[$idx]);
#end
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < x_size;
index += blockDim.x * gridDim.x){
scalar_t xp1 = x[index];
#foreach( $idx in [2..$max_x] )#set( $value = $idx - 1 )
scalar_t xp$idx = xp$value * xp1;
#end
#foreach( $idx in [1..$degs_b] )
scalar_t axp$idx = abs(xp$idx);
#end
scalar_t P = a_0
#foreach( $idx in [1..$coefs_a] )
+ a_$idx * xp$idx
#end
;
scalar_t Q = scalar_t(1.0)
#foreach( $idx in [0..$coefs_b] )#set( $value = $idx + 1 )
+ ab_$idx * axp$value
#end
;
result[index] = P / Q;
}
}
at::Tensor pau_cuda_forward_A_$degs[0]_$degs[1](torch::Tensor x, torch::Tensor n, torch::Tensor d){
auto result = at::empty_like(x);
const auto x_size = x.numel();
int blockSize = THREADS_PER_BLOCK;
int numBlocks = (x_size + blockSize - 1) / blockSize;
AT_DISPATCH_FLOATING_TYPES(x.type(), "pau_cuda_forward_A_$degs[0]_$degs[1]", ([&] {
pau_cuda_forward_A_kernel_$degs[0]_$degshipLaunchKernelGGL(([1]<scalar_t>)
, dim3(numBlocks), dim3(blockSize), 0, 0,
x.data<scalar_t>(),
n.data<scalar_t>(),
d.data<scalar_t>(),
result.data<scalar_t>(),
x_size);
}));
return result;
}
//P(X) = a_0 + a_1*X + a_2*X^2 ...
//Q(X) = 1 + |b_0||X| + |b_1||X|^2 + |b_2||X|^3
//R(X) = a_1 + 2*a_2*X + 3*a_3*X ...
//S(X) = sign(X) * ( |b_0| + 2|b_1||X| + 3|b_2||X|^2 ...)
//dF/dx = (-P(X)/Q(X)^2)*S(X) + R(X)/Q(X)
//dF/da_i = x^i/Q(X), i \in {0,$degs[0]}
//dF/db_i = (-P(X)/Q(X)^2) * sign(b_i) * |X^{i+1}| , i \in {0,$degs[1]}
template <typename scalar_t>
__global__ void pau_cuda_backward_A_kernel_$degs[0]_$degs[1](
const scalar_t* __restrict__ grad_output,
const scalar_t* __restrict__ x,
const scalar_t* __restrict__ a,
const scalar_t* __restrict__ b,
scalar_t* __restrict__ d_x,
double* __restrict__ d_a,
double* __restrict__ d_b,
size_t x_size) {
__shared__ double sda[$a_counts];
__shared__ double sdb[$b_counts];
if( threadIdx.x == 0){
#foreach( $idx in [0..$coefs_a] )
sda[$idx] = 0;
#end
#foreach( $idx in [0..$coefs_b] )
sdb[$idx] = 0;
#end
}
__syncthreads();
#foreach( $idx in [0..$coefs_a] )
scalar_t d_a$idx = 0;
scalar_t a_$idx = a[$idx];
#end
#foreach( $idx in [0..$coefs_b] )
scalar_t d_b$idx = 0;
scalar_t b_$idx = b[$idx];
scalar_t ab_$idx = abs(b_$idx);
#end
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < x_size;
index += blockDim.x * gridDim.x)
{
scalar_t xp1 = x[index];
scalar_t axp1 = abs(xp1);
#foreach( $idx in [2..$max_x] )#set( $value = $idx - 1 )
scalar_t xp$idx = xp$value * xp1;
scalar_t axp$idx = abs(xp$idx);
#end
scalar_t P = a_0
#foreach( $idx in [1..$coefs_a] )
+ a_$idx*xp$idx
#end
;
scalar_t Q = scalar_t(1.0)
#foreach( $idx in [0..$coefs_b] )#set( $value = $idx + 1 )
+ ab_$idx * axp$value
#end
;
scalar_t R = a_1
#foreach( $idx in [2..$coefs_a] )#set( $value = $idx - 1 )
+ scalar_t($idx.0) * a_$idx * xp$value
#end
;
scalar_t S = copysign( scalar_t(1.0), xp1 ) * (ab_0
#foreach( $idx in [1..$coefs_b] )#set( $value = $idx + 1 )
+ scalar_t($value.0) * ab_$idx * axp$idx
#end
);
scalar_t mpq2 = -P/(Q*Q);
scalar_t grad_o = grad_output[index];
scalar_t d_i_x = (R/Q + S*mpq2);
d_x[index] = d_i_x * grad_o;
#foreach( $idx in [0..$coefs_b] )#set( $value = $idx + 1 )
scalar_t d_i_b$idx = mpq2 * copysign( scalar_t(1.0), b_$idx ) * axp$value;
d_b$idx += d_i_b$idx * grad_o;
#end
scalar_t d_i_a0 = scalar_t(1.0)/Q;
d_a0 += d_i_a0 * grad_o;
#foreach( $idx in [1..$coefs_a] )
scalar_t d_i_a$idx = xp$idx/Q;
d_a$idx += d_i_a$idx * grad_o;
#end
}
#foreach( $idx in [0..$coefs_a] )
atomicAdd(&sda[$idx], d_a$idx);
#end
#foreach( $idx in [0..$coefs_b] )
atomicAdd(&sdb[$idx], d_b$idx);
#end
__syncthreads();
if( threadIdx.x == 0){
#foreach( $idx in [0..$coefs_a] )
atomicAdd(&d_a[$idx], sda[$idx]);
#end
#foreach( $idx in [0..$coefs_b] )
atomicAdd(&d_b[$idx], sdb[$idx]);
#end
}
}
std::vector<torch::Tensor> pau_cuda_backward_A_$degs[0]_$degs[1](torch::Tensor grad_output, torch::Tensor x, torch::Tensor n, torch::Tensor d){
const auto x_size = x.numel();
auto d_x = at::empty_like(x);
auto d_n = at::zeros_like(n).toType(at::kDouble);
auto d_d = at::zeros_like(d).toType(at::kDouble);
int blockSize = THREADS_PER_BLOCK;
AT_DISPATCH_FLOATING_TYPES(x.type(), "pau_cuda_backward_A_$degs[0]_$degs[1]", ([&] {
pau_cuda_backward_A_kernel_$degs[0]_$degshipLaunchKernelGGL(([1]<scalar_t>)
, dim3(16), dim3(blockSize), 0, 0,
grad_output.data<scalar_t>(),
x.data<scalar_t>(),
n.data<scalar_t>(),
d.data<scalar_t>(),
d_x.data<scalar_t>(),
d_n.data<double>(),
d_d.data<double>(),
x_size);
}));
return {d_x, d_n.toType(at::kFloat), d_d.toType(at::kFloat)};
}
#end | be088f799b2639805397fd88976bfb57da3191a1.cu | #foreach( $degs in $degrees )
// P(X)/Q(X) = a_0 + a_1*X + a_2*X^2 + ... + a_n*X^n / 1 + |b_0||X| + |b_1||X|^2 + ... + |b_i||X|^{i+1}
#set( $degs_a = $degs[0] )
#set( $degs_b = $degs[1] )
#set( $coefs_a = $degs_a )
#set( $coefs_b = $degs_b - 1 )
#set( $a_counts = $coefs_a + 1 )
#set( $b_counts = $coefs_b + 1 )
#set( $max_x = $degs[2] )
template <typename scalar_t>
__global__ void pau_cuda_forward_A_kernel_$degs[0]_$degs[1]( const scalar_t* __restrict__ x, const scalar_t* __restrict__ a,
const scalar_t* __restrict__ b, scalar_t* __restrict__ result, size_t x_size) {
#foreach( $idx in [0..$coefs_a] )
scalar_t a_$idx = a[$idx];
#end
#foreach( $idx in [0..$coefs_b] )
scalar_t ab_$idx = abs(b[$idx]);
#end
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < x_size;
index += blockDim.x * gridDim.x){
scalar_t xp1 = x[index];
#foreach( $idx in [2..$max_x] )#set( $value = $idx - 1 )
scalar_t xp$idx = xp$value * xp1;
#end
#foreach( $idx in [1..$degs_b] )
scalar_t axp$idx = abs(xp$idx);
#end
scalar_t P = a_0
#foreach( $idx in [1..$coefs_a] )
+ a_$idx * xp$idx
#end
;
scalar_t Q = scalar_t(1.0)
#foreach( $idx in [0..$coefs_b] )#set( $value = $idx + 1 )
+ ab_$idx * axp$value
#end
;
result[index] = P / Q;
}
}
at::Tensor pau_cuda_forward_A_$degs[0]_$degs[1](torch::Tensor x, torch::Tensor n, torch::Tensor d){
auto result = at::empty_like(x);
const auto x_size = x.numel();
int blockSize = THREADS_PER_BLOCK;
int numBlocks = (x_size + blockSize - 1) / blockSize;
AT_DISPATCH_FLOATING_TYPES(x.type(), "pau_cuda_forward_A_$degs[0]_$degs[1]", ([&] {
pau_cuda_forward_A_kernel_$degs[0]_$degs[1]<scalar_t>
<<<numBlocks, blockSize>>>(
x.data<scalar_t>(),
n.data<scalar_t>(),
d.data<scalar_t>(),
result.data<scalar_t>(),
x_size);
}));
return result;
}
//P(X) = a_0 + a_1*X + a_2*X^2 ...
//Q(X) = 1 + |b_0||X| + |b_1||X|^2 + |b_2||X|^3
//R(X) = a_1 + 2*a_2*X + 3*a_3*X ...
//S(X) = sign(X) * ( |b_0| + 2|b_1||X| + 3|b_2||X|^2 ...)
//dF/dx = (-P(X)/Q(X)^2)*S(X) + R(X)/Q(X)
//dF/da_i = x^i/Q(X), i \in {0,$degs[0]}
//dF/db_i = (-P(X)/Q(X)^2) * sign(b_i) * |X^{i+1}| , i \in {0,$degs[1]}
template <typename scalar_t>
__global__ void pau_cuda_backward_A_kernel_$degs[0]_$degs[1](
const scalar_t* __restrict__ grad_output,
const scalar_t* __restrict__ x,
const scalar_t* __restrict__ a,
const scalar_t* __restrict__ b,
scalar_t* __restrict__ d_x,
double* __restrict__ d_a,
double* __restrict__ d_b,
size_t x_size) {
__shared__ double sda[$a_counts];
__shared__ double sdb[$b_counts];
if( threadIdx.x == 0){
#foreach( $idx in [0..$coefs_a] )
sda[$idx] = 0;
#end
#foreach( $idx in [0..$coefs_b] )
sdb[$idx] = 0;
#end
}
__syncthreads();
#foreach( $idx in [0..$coefs_a] )
scalar_t d_a$idx = 0;
scalar_t a_$idx = a[$idx];
#end
#foreach( $idx in [0..$coefs_b] )
scalar_t d_b$idx = 0;
scalar_t b_$idx = b[$idx];
scalar_t ab_$idx = abs(b_$idx);
#end
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < x_size;
index += blockDim.x * gridDim.x)
{
scalar_t xp1 = x[index];
scalar_t axp1 = abs(xp1);
#foreach( $idx in [2..$max_x] )#set( $value = $idx - 1 )
scalar_t xp$idx = xp$value * xp1;
scalar_t axp$idx = abs(xp$idx);
#end
scalar_t P = a_0
#foreach( $idx in [1..$coefs_a] )
+ a_$idx*xp$idx
#end
;
scalar_t Q = scalar_t(1.0)
#foreach( $idx in [0..$coefs_b] )#set( $value = $idx + 1 )
+ ab_$idx * axp$value
#end
;
scalar_t R = a_1
#foreach( $idx in [2..$coefs_a] )#set( $value = $idx - 1 )
+ scalar_t($idx.0) * a_$idx * xp$value
#end
;
scalar_t S = copysign( scalar_t(1.0), xp1 ) * (ab_0
#foreach( $idx in [1..$coefs_b] )#set( $value = $idx + 1 )
+ scalar_t($value.0) * ab_$idx * axp$idx
#end
);
scalar_t mpq2 = -P/(Q*Q);
scalar_t grad_o = grad_output[index];
scalar_t d_i_x = (R/Q + S*mpq2);
d_x[index] = d_i_x * grad_o;
#foreach( $idx in [0..$coefs_b] )#set( $value = $idx + 1 )
scalar_t d_i_b$idx = mpq2 * copysign( scalar_t(1.0), b_$idx ) * axp$value;
d_b$idx += d_i_b$idx * grad_o;
#end
scalar_t d_i_a0 = scalar_t(1.0)/Q;
d_a0 += d_i_a0 * grad_o;
#foreach( $idx in [1..$coefs_a] )
scalar_t d_i_a$idx = xp$idx/Q;
d_a$idx += d_i_a$idx * grad_o;
#end
}
#foreach( $idx in [0..$coefs_a] )
atomicAdd(&sda[$idx], d_a$idx);
#end
#foreach( $idx in [0..$coefs_b] )
atomicAdd(&sdb[$idx], d_b$idx);
#end
__syncthreads();
if( threadIdx.x == 0){
#foreach( $idx in [0..$coefs_a] )
atomicAdd(&d_a[$idx], sda[$idx]);
#end
#foreach( $idx in [0..$coefs_b] )
atomicAdd(&d_b[$idx], sdb[$idx]);
#end
}
}
std::vector<torch::Tensor> pau_cuda_backward_A_$degs[0]_$degs[1](torch::Tensor grad_output, torch::Tensor x, torch::Tensor n, torch::Tensor d){
const auto x_size = x.numel();
auto d_x = at::empty_like(x);
auto d_n = at::zeros_like(n).toType(at::kDouble);
auto d_d = at::zeros_like(d).toType(at::kDouble);
int blockSize = THREADS_PER_BLOCK;
AT_DISPATCH_FLOATING_TYPES(x.type(), "pau_cuda_backward_A_$degs[0]_$degs[1]", ([&] {
pau_cuda_backward_A_kernel_$degs[0]_$degs[1]<scalar_t>
<<<16, blockSize>>>(
grad_output.data<scalar_t>(),
x.data<scalar_t>(),
n.data<scalar_t>(),
d.data<scalar_t>(),
d_x.data<scalar_t>(),
d_n.data<double>(),
d_d.data<double>(),
x_size);
}));
return {d_x, d_n.toType(at::kFloat), d_d.toType(at::kFloat)};
}
#end |
2640ccaf91bbfb38deafde8b1dcc584556590658.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
%%writefile wavecuda1.cu
#include <cstdlib>
#include <stdio.h>
#include <sys/time.h>
#include <math.h>
#include "wave.h"
__global__
void work(int width, unsigned char* pic) {
for (int row = 0; row < width; row++) {
for (int col = 0; col < width; col++) {
float fx = col - 1024/2;
float fy = row - 1024/2;
float d = sqrtf( fx * fx + fy * fy );
unsigned char color = (unsigned char) (120.0f + 127.0f *
cos(d/10.0f - threadIdx.x/7.0f) /
(d/50.0f + 1.0f));
pic[threadIdx.x * width * width + row * width + col] = (unsigned char) color;
}
}
}
int main(int argc, char** argv) {
if (argc != 3) {
printf("ERRO: usar %s largura_frame num_frames\n", argv[0]);
exit(-1);
}
int width = atoi(argv[1]);
if (width < 100) {
printf("ERRO: largura_frame deve ser maior igual a 100\n");
exit(-1);
}
int frames = atoi(argv[2]);
if (frames < 1) {
printf("ERRO: num_frames deve ser pelo menos 1\n");
exit(-1);
}
printf("CUDA computing %d frames of %d by %d picture\n", frames, width, width);
// allocate picture array
unsigned char* pic;
hipMallocManaged(&pic, frames*width*width*sizeof(unsigned char));
// start time
timeval start, end;
gettimeofday(&start, NULL);
hipLaunchKernelGGL(( work), dim3(1), dim3(frames), 0, 0, width, pic);
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
// termina o tempo
gettimeofday(&end, NULL);
double runtime = end.tv_sec + end.tv_usec / 1000000.0 - start.tv_sec - start.tv_usec / 1000000.0;
printf("CUDA compute time: %.4f s\n", runtime);
hipFree(pic);
return 0;
} | 2640ccaf91bbfb38deafde8b1dcc584556590658.cu | %%writefile wavecuda1.cu
#include <cstdlib>
#include <stdio.h>
#include <sys/time.h>
#include <math.h>
#include "wave.h"
__global__
void work(int width, unsigned char* pic) {
for (int row = 0; row < width; row++) {
for (int col = 0; col < width; col++) {
float fx = col - 1024/2;
float fy = row - 1024/2;
float d = sqrtf( fx * fx + fy * fy );
unsigned char color = (unsigned char) (120.0f + 127.0f *
cos(d/10.0f - threadIdx.x/7.0f) /
(d/50.0f + 1.0f));
pic[threadIdx.x * width * width + row * width + col] = (unsigned char) color;
}
}
}
int main(int argc, char** argv) {
if (argc != 3) {
printf("ERRO: usar %s largura_frame num_frames\n", argv[0]);
exit(-1);
}
int width = atoi(argv[1]);
if (width < 100) {
printf("ERRO: largura_frame deve ser maior igual a 100\n");
exit(-1);
}
int frames = atoi(argv[2]);
if (frames < 1) {
printf("ERRO: num_frames deve ser pelo menos 1\n");
exit(-1);
}
printf("CUDA computing %d frames of %d by %d picture\n", frames, width, width);
// allocate picture array
unsigned char* pic;
cudaMallocManaged(&pic, frames*width*width*sizeof(unsigned char));
// start time
timeval start, end;
gettimeofday(&start, NULL);
work<<<1, frames>>>(width, pic);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// termina o tempo
gettimeofday(&end, NULL);
double runtime = end.tv_sec + end.tv_usec / 1000000.0 - start.tv_sec - start.tv_usec / 1000000.0;
printf("CUDA compute time: %.4f s\n", runtime);
cudaFree(pic);
return 0;
} |
800a229bb57b828867a19e9b25fb6fc6fb6f4d22.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "im2col_pad_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *im = NULL;
hipMalloc(&im, XSIZE*YSIZE);
int channels = 1;
int height = YSIZE;
int width = XSIZE;
int ksize = XSIZE*YSIZE;
int stride = 2;
float *data_col = NULL;
hipMalloc(&data_col, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
im2col_pad_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, im,channels,height,width,ksize,stride,data_col);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
im2col_pad_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, im,channels,height,width,ksize,stride,data_col);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
im2col_pad_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, im,channels,height,width,ksize,stride,data_col);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 800a229bb57b828867a19e9b25fb6fc6fb6f4d22.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "im2col_pad_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *im = NULL;
cudaMalloc(&im, XSIZE*YSIZE);
int channels = 1;
int height = YSIZE;
int width = XSIZE;
int ksize = XSIZE*YSIZE;
int stride = 2;
float *data_col = NULL;
cudaMalloc(&data_col, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
im2col_pad_kernel<<<gridBlock,threadBlock>>>(im,channels,height,width,ksize,stride,data_col);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
im2col_pad_kernel<<<gridBlock,threadBlock>>>(im,channels,height,width,ksize,stride,data_col);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
im2col_pad_kernel<<<gridBlock,threadBlock>>>(im,channels,height,width,ksize,stride,data_col);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
9d0dae73fa57b2a4fb0b965d4479a0d59ed32b5e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <assert.h>
#include "Device.h"
#include "Rippling.h"
#include <assert.h>
using std::cout;
using std::endl;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
extern __global__ void rippling(uchar4* ptrDevPixels,uint w, uint h,float t);
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*-------------------------*\
|* Constructeur *|
\*-------------------------*/
Rippling::Rippling(const Grid& grid, uint w, uint h, float dt) :
Animable_I<uchar4>(grid, w, h, "Rippling_Cuda")
{
assert(w == h); // specific rippling
// Inputs
this->dt = dt;
// Tools
this->t = 0; // protected dans Animable
}
Rippling::~Rippling()
{
// rien
}
/*-------------------------*\
|* Methode *|
\*-------------------------*/
/**
* Override
* Call periodicly by the API
*
* Note : domaineMath pas use car pas zoomable
*/
void Rippling::process(uchar4* ptrDevPixels, uint w, uint h, const DomaineMath& domaineMath)
{
//Device::lastCudaError("rippling rgba uchar4 (before kernel)"); // facultatif, for debug only, remove for release
// TODO lancer le kernel avec <<<dg,db>>>
// le kernel est importer ci-dessus (ligne 19)
hipLaunchKernelGGL(( rippling), dim3(dg),dim3(db), 0, 0, ptrDevPixels, w, h, t);
//Device::lastCudaError("rippling rgba uchar4 (after kernel)"); // facultatif, for debug only, remove for release
}
/**
* Override
* Call periodicly by the API
*/
void Rippling::animationStep()
{
t += dt;
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
| 9d0dae73fa57b2a4fb0b965d4479a0d59ed32b5e.cu | #include <iostream>
#include <assert.h>
#include "Device.h"
#include "Rippling.h"
#include <assert.h>
using std::cout;
using std::endl;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
extern __global__ void rippling(uchar4* ptrDevPixels,uint w, uint h,float t);
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*-------------------------*\
|* Constructeur *|
\*-------------------------*/
Rippling::Rippling(const Grid& grid, uint w, uint h, float dt) :
Animable_I<uchar4>(grid, w, h, "Rippling_Cuda")
{
assert(w == h); // specific rippling
// Inputs
this->dt = dt;
// Tools
this->t = 0; // protected dans Animable
}
Rippling::~Rippling()
{
// rien
}
/*-------------------------*\
|* Methode *|
\*-------------------------*/
/**
* Override
* Call periodicly by the API
*
* Note : domaineMath pas use car pas zoomable
*/
void Rippling::process(uchar4* ptrDevPixels, uint w, uint h, const DomaineMath& domaineMath)
{
//Device::lastCudaError("rippling rgba uchar4 (before kernel)"); // facultatif, for debug only, remove for release
// TODO lancer le kernel avec <<<dg,db>>>
// le kernel est importer ci-dessus (ligne 19)
rippling<<<dg,db>>>(ptrDevPixels, w, h, t);
//Device::lastCudaError("rippling rgba uchar4 (after kernel)"); // facultatif, for debug only, remove for release
}
/**
* Override
* Call periodicly by the API
*/
void Rippling::animationStep()
{
t += dt;
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
0f159e6f74210ff55f65c6aebc257a56f64c8e52.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <cmath>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <driver_functions.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include "CycleTimer.h"
using namespace std;
// this is dependent on the time tiling and grid size of one thread block
// we first finish a non-time tiling version
#define BLOCK_DIM_X 16
#define BLOCK_DIM_Y 16
#define HALO_LEFT 1
#define HALO_RIGHT 1
#define HALO_TOP 1
#define HALO_BOTTOM 1
void printCudaInfo();
extern float toBW(int bytes, float sec);
struct GlobalConstants {
int nx;
int ny;
int Mt;
int nts;
int ictype;
float G;
float R;
float delta;
float k;
float c_infm;
float Dl;
float d0;
float W0;
float lT;
float lamd;
float tau0;
float c_infty;
float R_tilde;
float Dl_tilde;
float lT_tilde;
float eps;
float alpha0;
float dx;
float dt;
float asp_ratio;
float lxd;
float lx;
float lyd;
float eta;
float U0;
// parameters that are not in the input file
float hi;
float cosa;
float sina;
float sqrt2;
float a_s;
float epsilon;
float a_12;
};
__constant__ GlobalConstants cP;
// Device codes
// boundary condition
// only use this function to access the boundary points,
// other functions return at the boundary
// TODO: this function is doing what, we can definetly merge this into kenrel right?
__global__ void
set_BC(float* ps, float* ph, float* U, float* dpsi, int fnx, int fny){
// find the location of boundary:
int index = blockIdx.x * blockDim.x + threadIdx.x;
// z=0, lx
if (index<fnx) {
int b_in = index+2*fnx;
int t_out = index+(fny-1)*fnx;
int t_in = index+(fny-3)*fnx;
ps[index] = ps[b_in];
ph[index] = ph[b_in];
U[index] = U[b_in];
dpsi[index] = dpsi[b_in];
ps[t_out] = ps[t_in];
ph[t_out] = ph[t_in];
U[t_out] = U[t_in];
dpsi[t_out] = dpsi[t_in];
}
if (index<fny){
int l_out = index*fnx;
int l_in = index*fnx + 2;
int r_out = index*fnx + fnx -1;
int r_in = index*fnx + fnx -3;
ps[l_out] = ps[l_in];
ph[l_out] = ph[l_in];
U[l_out] = U[l_in];
dpsi[l_out] = dpsi[l_in];
ps[r_out] = ps[r_in];
ph[r_out] = ph[r_in];
U[r_out] = U[r_in];
dpsi[r_out] = dpsi[r_in];
}
}
// initialization
__global__ void
initialize(float* ps_old, float* ph_old, float* U_old, float* ps_new, float* ph_new, float* U_new
, float* x, float* y, int fnx, int fny){
int C = blockIdx.x * blockDim.x + threadIdx.x;
// obtain i and j(2D position)
int j=C/fnx;
int i=C-j*fnx;
// when initialize, you need to consider C/F layout
// if F layout, the 1D array has peroidicity of nx
// all the variables should be functions of x and y
// size (nx+2)*(ny+2), x:nx, y:ny
if ( (i>0) && (i<fnx-1) && (j>0) && (j<fny-1) ) {
float xc = x[i];
float yc = y[j];
int cent = fnx/2;
ps_old[C] = 5.625f - sqrtf( (xc-x[cent])*(xc-x[cent]) + yc*yc )/cP.W0 ;
//if (C<1000){printf("ps %f\n",ps_old[C]);}
ps_new[C] = ps_old[C];
U_old[C] = cP.U0;
U_new[C] = cP.U0;
ph_old[C] = tanhf(ps_old[C]/cP.sqrt2);
ph_new[C] = tanhf(ps_new[C]/cP.sqrt2);
// if (C<1000){printf("phi %f\n",ph_old[C]);}
}
}
// anisotropy functions
__device__ float
atheta(float ux, float uz){
float ux2 = cP.cosa*ux + cP.sina*uz;
ux2 = ux2*ux2;
float uz2 = -cP.sina*ux + cP.cosa*uz;
uz2 = uz2*uz2;
float MAG_sq = (ux2 + uz2);
float MAG_sq2= MAG_sq*MAG_sq;
if (MAG_sq > cP.eps){
return cP.a_s*( 1.0f + cP.epsilon*(ux2*ux2 + uz2*uz2) / MAG_sq2);}
else {return 1.0f;}
}
__device__ float
aptheta(float ux, float uz){
float uxr = cP.cosa*ux + cP.sina*uz;
float ux2 = uxr*uxr;
float uzr = -cP.sina*ux + cP.cosa*uz;
float uz2 = uzr*uzr;
float MAG_sq = (ux2 + uz2);
float MAG_sq2= MAG_sq*MAG_sq;
if (MAG_sq > cP.eps){
return -cP.a_12*uxr*uzr*(ux2 - uz2) / MAG_sq2;}
else {return 0.0f;}
}
// psi & phi equation: two dimensions
__global__ void
rhs_psi_shared_mem(float* ps, float* ph, float* U, float* ps_new, float* ph_new, \
float* y, float* dpsi, int fnx, int fny, int nt, int num_block_x, int num_block_y){
// each CUDA theard block compute one grid(32*32)
// memory access is from global and also not continous which cannot reach the max bandwidth(memory colaseing)
// add a shared memory version to store the neighbours data: ps and ph
// clare shared memory for time tiling
// we have extra (nx+2)(ny+2) size of space to load
int halo_left = 1;
int halo_right = 1;
int halo_top = 1;
int halo_bottom = 1;
int real_block_x = BLOCK_DIM_X - halo_left - halo_right;
int real_block_y = BLOCK_DIM_Y - halo_top - halo_bottom;
// load (32+2)*(32+2) daat from mem
__shared__ float ps_shared[(BLOCK_DIM_Y)*(BLOCK_DIM_X)];
__shared__ float ph_shared[(BLOCK_DIM_Y)*(BLOCK_DIM_X)];
__shared__ float U_shared[(BLOCK_DIM_Y)*(BLOCK_DIM_X)];
// each thread --> one data in one enlarged block
int local_id = threadIdx.x; //local id -> sava to shared mem
int local_id_x = local_id % BLOCK_DIM_X;
int local_id_y = local_id / BLOCK_DIM_X;
// obtain the block id in shrinked region
int block_id = blockIdx.x; // 0~num_block_x*num_block_y
int block_id_x = block_id % num_block_x;
int block_id_y = block_id / num_block_x;
// this is the addr in inner region without considering the BC
int block_addr_x = block_id_x * real_block_x;
int block_addr_y = block_id_y * real_block_y;
// find the addr of data in global memory
// add 1 as we counter the block in inner region; - halo_left as we have halo region in this block
int data_addr_x = block_addr_x + 1 - halo_left + local_id_x;
int data_addr_y = block_addr_y + 1 - halo_bottom + local_id_y;
int C= data_addr_y * fnx + data_addr_x; // place in global memory
// int j=C/fnx;
// int i=C-j*fnx;
int j = data_addr_y;
int i = data_addr_x;
// update data
ps_shared[local_id] = ps[C];
ph_shared[local_id] = ph[C];
U_shared[local_id] = U[C];
__syncthreads();
if ((i > fnx - 1) ||(i > fny - 1)) {return;}
// if (C==1001){
// printf("check data 1: %f\n", ps[C]);
// }
// if (local_id == 0) printf("check data %f", ps_shared[local_id]);
// compute based on the shared memory, skip if we are at the boundary
int place = local_id_y * BLOCK_DIM_X + local_id_x;
// if the points are at boundary, return
// two levels of retunr: global and local region
if ( (i>0) && (i<fnx-1) && (j>0) && (j<fny-1) ) {
if ((local_id_x>0)&& (local_id_x<BLOCK_DIM_X-1) && (local_id_y>0) && (local_id_y<BLOCK_DIM_Y-1)) {
// find the indices of the 8 neighbors for center
// if (C==1000){printf("find");}
int R=place+1;
int L=place-1;
int T=place+BLOCK_DIM_X;
int B=place-BLOCK_DIM_X;
// // preload data
// float ps_shared_c = ps_shared[place];
// float ps_shared_r = ps_shared[R];
// float ps_shared_l = ps_shared[L];
// float ps_shared_top = ps_shared[T];
// float ps_shared_top_l = ps_shared[T-1];
// float ps_shared_top_r = ps_shared[T+1];
// float ps_shared_b_r = ps_shared[B-1];
// float ps_shared_b_l = ps_shared[B+1];
// float ps_shared_b = ps_shared[B];
// float ph_shared_c = ph_shared[place];
// float ph_shared_r = ph_shared[R];
// float ph_shared_l = ph_shared[L];
// float ph_shared_top = ph_shared[T];
// float ph_shared_top_l = ph_shared[T-1];
// float ph_shared_top_r = ph_shared[T+1];
// float ph_shared_b_r = ph_shared[B-1];
// float ph_shared_b_l = ph_shared[B+1];
// float ph_shared_b = ph_shared[B];
// if (C==1001){
// printf("detailed check of neighbours\n");
// printf("R: %d ; L:%d ; T: %d ; B: %d \n", R, L, T, B);
// printf("R: %f ; L:%f ; T: %f ; B: %f \n", ps_shared[R], ps_shared[L], ps_shared[T], ps_shared[B]);
// printf("R: %f ; L:%f ; T: %f ; B: %f \n", ps_shared[R], ps_shared[L], ps_shared[T+1], ps_shared[B]);
// }
// =============================================================
// 1. ANISOTROPIC DIFFUSION
// =============================================================
// these ps's are defined on cell centers
float psipjp=( ps_shared[place] + ps_shared[R] + ps_shared[T] + ps_shared[T+1] ) * 0.25f;
float psipjm=( ps_shared[place] + ps_shared[R] + ps_shared[B] + ps_shared[B+1] ) * 0.25f;
float psimjp=( ps_shared[place] + ps_shared[L] + ps_shared[T-1] + ps_shared[T] ) * 0.25f;
float psimjm=( ps_shared[place] + ps_shared[L] + ps_shared[B-1] + ps_shared[B] ) * 0.25f;
float phipjp=( ph_shared[place] + ph_shared[R] + ph_shared[T] + ph_shared[T+1] ) * 0.25f;
float phipjm=( ph_shared[place] + ph_shared[R] + ph_shared[B] + ph_shared[B+1] ) * 0.25f;
float phimjp=( ph_shared[place] + ph_shared[L] + ph_shared[T-1] + ph_shared[T] ) * 0.25f;
float phimjm=( ph_shared[place] + ph_shared[L] + ph_shared[B-1] + ph_shared[B] ) * 0.25f;
// if (C==1001){
// printf("detailed check of neighbours 2\n");
// printf("R: %f ; L:%f ; T: %f ; B: %f \n", psipjp, psipjm, psimjp, psimjm);
// }
// ============================
// right edge flux
// ============================
float psx = ps_shared[R]-ps_shared[place];
float psz = psipjp - psipjm;
float phx = ph_shared[R]-ph_shared[place];
float phz = phipjp - phipjm;
float A = atheta( phx,phz);
float Ap = aptheta(phx,phz);
float JR = A * ( A*psx - Ap*psz );
// ============================
// left edge flux
// ============================
psx = ps_shared[place]-ps_shared[L];
psz = psimjp - psimjm;
phx = ph_shared[place]-ph_shared[L];
phz = phimjp - phimjm;
A = atheta( phx,phz);
Ap = aptheta(phx,phz);
float JL = A * ( A*psx - Ap*psz );
// ============================
// top edge flux
// ============================
psx = psipjp - psimjp;
psz = ps_shared[T]-ps_shared[place];
phx = phipjp - phimjp;
phz = ph_shared[T]-ph_shared[place];
A = atheta( phx,phz);
Ap = aptheta(phx,phz);
float JT = A * ( A*psz + Ap*psx );
// ============================
// bottom edge flux
// ============================
psx = psipjm - psimjm;
psz = ps_shared[place]-ps_shared[B];
phx = phipjm - phimjm;
phz = ph_shared[place]-ph_shared[B];
A = atheta( phx,phz);
Ap = aptheta(phx,phz);
float JB = A * ( A*psz + Ap*psx );
// if (C==1001){
// printf("detailed check of neighbours 3\n");
// printf("R: %f ; L:%f ; T: %f ; B: %f \n", JR, JL, JT, JB);
// }
/*# =============================================================
#
# 2. EXTRA TERM: sqrt2 * atheta**2 * phi * |grad psi|^2
#
# =============================================================
# d(phi)/dx d(psi)/dx d(phi)/dz d(psi)/dz at nodes (i,j)*/
float phxn = ( ph_shared[R] - ph_shared[L] ) * 0.5f;
float phzn = ( ph_shared[T] - ph_shared[B] ) * 0.5f;
float psxn = ( ps_shared[R] - ps_shared[L] ) * 0.5f;
float pszn = ( ps_shared[T] - ps_shared[B] ) * 0.5f;
float A2 = atheta(phxn,phzn);
A2 = A2*A2;
float gradps2 = (psxn)*(psxn) + (pszn)*(pszn);
float extra = -cP.sqrt2 * A2 * ph_shared[place] * gradps2;
/*# =============================================================
#
# 3. double well (transformed): sqrt2 * phi + nonlinear terms
#
# =============================================================*/
float Up = (y[j]/cP.W0 - cP.R_tilde * (nt*cP.dt) )/cP.lT_tilde;
float rhs_psi = ((JR-JL) + (JT-JB) + extra) * cP.hi*cP.hi + \
cP.sqrt2*ph_shared[place] - cP.lamd*(1.0f-ph_shared[place]*ph_shared[place])*cP.sqrt2*(U_shared[place] + Up);
/*# =============================================================
#
# 4. dpsi/dt term
#
# =============================================================*/
float tp = (1.0f-(1.0f-cP.k)*Up);
float tau_psi;
if (tp >= cP.k){tau_psi = tp*A2;}
else {tau_psi = cP.k*A2;}
dpsi[C] = rhs_psi / tau_psi;
ps_new[C] = ps_shared[place] + cP.dt * dpsi[C];
ph_new[C] = tanhf(ps_new[C]/cP.sqrt2);
// if (C==1000){printf("%f ",ph_new[C]);}
// if (C == 1000) printf("check data %f\n", ps_shared[local_id]);
// if (C == 137) {
// printf("check data ps: %f and ph: %f and dpsi: %f and U: %f\n", ps_new[C], ph_new[C], dpsi[C], U[C]);
// // printf("block id %d ; local_id_x %d; local_id_y %d\n", block_id, local_id_x, local_id_y);
// // printf("block id %d ; data_addr_x %d; data_addr_y %d\n", block_id, data_addr_x, data_addr_y);
// }
}
}
// __syncthreads();
}
// psi & phi equation: two dimensions
// merge set BC func into this func
__global__ void
rhs_psi_shared_mem_BC(float* ps, float* ph, float* U, float* ps_new, float* ph_new, \
float* y, float* dpsi, int fnx, int fny, int nt, int num_block_x, int num_block_y){
// each CUDA theard block compute one grid(32*32)
// memory access is from global and also not continous which cannot reach the max bandwidth(memory colaseing)
// add a shared memory version to store the neighbours data: ps and ph
// clare shared memory for time tiling
// we have extra (nx+2)(ny+2) size of space to load
int halo_left = 1;
int halo_right = 1;
int halo_top = 1;
int halo_bottom = 1;
int real_block_x = BLOCK_DIM_X - halo_left - halo_right;
int real_block_y = BLOCK_DIM_Y - halo_top - halo_bottom;
// load (32+2)*(32+2) daat from mem
__shared__ float ps_shared[(BLOCK_DIM_Y)*(BLOCK_DIM_X)];
__shared__ float ph_shared[(BLOCK_DIM_Y)*(BLOCK_DIM_X)];
__shared__ float U_shared[(BLOCK_DIM_Y)*(BLOCK_DIM_X)];
__shared__ float dpsi_shared[(BLOCK_DIM_Y)*(BLOCK_DIM_X)];
// need locate new shared_mem for updating U if we also merge U into this
__shared__ float ps_shared_new[(BLOCK_DIM_Y)*(BLOCK_DIM_X)];
__shared__ float ph_shared_new[(BLOCK_DIM_Y)*(BLOCK_DIM_X)];
__shared__ float U_shared_new[(BLOCK_DIM_Y)*(BLOCK_DIM_X)];
__shared__ float dpsi_shared_new[(BLOCK_DIM_Y)*(BLOCK_DIM_X)];
// each thread --> one data in one enlarged block
int local_id = threadIdx.x; //local id -> sava to shared mem
int local_id_x = local_id % BLOCK_DIM_X;
int local_id_y = local_id / BLOCK_DIM_X;
// obtain the block id in shrinked region
int block_id = blockIdx.x; // 0~num_block_x*num_block_y
int block_id_x = block_id % num_block_x;
int block_id_y = block_id / num_block_x;
// this is the addr in inner region without considering the BC
int block_addr_x = block_id_x * real_block_x;
int block_addr_y = block_id_y * real_block_y;
// find the addr of data in global memory
// add 1 as we counter the block in inner region; - halo_left as we have halo region in this block
int data_addr_x = block_addr_x + 1 - halo_left + local_id_x;
int data_addr_y = block_addr_y + 1 - halo_bottom + local_id_y;
int C= data_addr_y * fnx + data_addr_x; // place in global memory
int j = data_addr_y;
int i = data_addr_x;
// load data into shared mem for old values
ps_shared[local_id] = ps[C];
ph_shared[local_id] = ph[C];
U_shared[local_id] = U[C];
dpsi_shared[local_id] = dpsi[C];
ps_shared_new[local_id] = ps[C];
ph_shared_new[local_id] = ph[C];
U_shared_new[local_id] = U[C];
dpsi_shared_new[local_id] = dpsi[C];
__syncthreads();
// return if the id exceeds the true region
if ((i > fnx - 1) ||(i > fny - 1)) {return;}
// if (C==1001){
// printf("check data 1: %f\n", ps[C]);
// }
// if (local_id == 0) printf("check data %f", ps_shared[local_id]);
// compute based on the shared memory, skip if we are at the boundary
int place = local_id_y * BLOCK_DIM_X + local_id_x;
// if the points are at boundary, return
// two levels of retunr: global and local region
if ( (i>0) && (i<fnx-1) && (j>0) && (j<fny-1) ) {
if ((local_id_x>0)&& (local_id_x<BLOCK_DIM_X-1) && (local_id_y>0) && (local_id_y<BLOCK_DIM_Y-1)) {
// find the indices of the 8 neighbors for center
// if (C==1000){printf("find");}
int R=place+1;
int L=place-1;
int T=place+BLOCK_DIM_X;
int B=place-BLOCK_DIM_X;
// if (C==1001){
// printf("detailed check of neighbours\n");
// printf("R: %d ; L:%d ; T: %d ; B: %d \n", R, L, T, B);
// printf("R: %f ; L:%f ; T: %f ; B: %f \n", ps_shared[R], ps_shared[L], ps_shared[T], ps_shared[B]);
// printf("R: %f ; L:%f ; T: %f ; B: %f \n", ps_shared[R], ps_shared[L], ps_shared[T+1], ps_shared[B]);
// }
// =============================================================
// 1. ANISOTROPIC DIFFUSION
// =============================================================
// these ps's are defined on cell centers
float psipjp=( ps_shared[place] + ps_shared[R] + ps_shared[T] + ps_shared[T+1] ) * 0.25f;
float psipjm=( ps_shared[place] + ps_shared[R] + ps_shared[B] + ps_shared[B+1] ) * 0.25f;
float psimjp=( ps_shared[place] + ps_shared[L] + ps_shared[T-1] + ps_shared[T] ) * 0.25f;
float psimjm=( ps_shared[place] + ps_shared[L] + ps_shared[B-1] + ps_shared[B] ) * 0.25f;
float phipjp=( ph_shared[place] + ph_shared[R] + ph_shared[T] + ph_shared[T+1] ) * 0.25f;
float phipjm=( ph_shared[place] + ph_shared[R] + ph_shared[B] + ph_shared[B+1] ) * 0.25f;
float phimjp=( ph_shared[place] + ph_shared[L] + ph_shared[T-1] + ph_shared[T] ) * 0.25f;
float phimjm=( ph_shared[place] + ph_shared[L] + ph_shared[B-1] + ph_shared[B] ) * 0.25f;
// if (C==1001){
// printf("detailed check of neighbours 2\n");
// printf("R: %f ; L:%f ; T: %f ; B: %f \n", psipjp, psipjm, psimjp, psimjm);
// }
// ============================
// right edge flux
// ============================
float psx = ps_shared[R]-ps_shared[place];
float psz = psipjp - psipjm;
float phx = ph_shared[R]-ph_shared[place];
float phz = phipjp - phipjm;
float A = atheta( phx,phz);
float Ap = aptheta(phx,phz);
float JR = A * ( A*psx - Ap*psz );
// ============================
// left edge flux
// ============================
psx = ps_shared[place]-ps_shared[L];
psz = psimjp - psimjm;
phx = ph_shared[place]-ph_shared[L];
phz = phimjp - phimjm;
A = atheta( phx,phz);
Ap = aptheta(phx,phz);
float JL = A * ( A*psx - Ap*psz );
// ============================
// top edge flux
// ============================
psx = psipjp - psimjp;
psz = ps_shared[T]-ps_shared[place];
phx = phipjp - phimjp;
phz = ph_shared[T]-ph_shared[place];
A = atheta( phx,phz);
Ap = aptheta(phx,phz);
float JT = A * ( A*psz + Ap*psx );
// ============================
// bottom edge flux
// ============================
psx = psipjm - psimjm;
psz = ps_shared[place]-ps_shared[B];
phx = phipjm - phimjm;
phz = ph_shared[place]-ph_shared[B];
A = atheta( phx,phz);
Ap = aptheta(phx,phz);
float JB = A * ( A*psz + Ap*psx );
// if (C==1001){
// printf("detailed check of neighbours 3\n");
// printf("R: %f ; L:%f ; T: %f ; B: %f \n", JR, JL, JT, JB);
// }
/*# =============================================================
#
# 2. EXTRA TERM: sqrt2 * atheta**2 * phi * |grad psi|^2
#
# =============================================================
# d(phi)/dx d(psi)/dx d(phi)/dz d(psi)/dz at nodes (i,j)*/
float phxn = ( ph_shared[R] - ph_shared[L] ) * 0.5f;
float phzn = ( ph_shared[T] - ph_shared[B] ) * 0.5f;
float psxn = ( ps_shared[R] - ps_shared[L] ) * 0.5f;
float pszn = ( ps_shared[T] - ps_shared[B] ) * 0.5f;
float A2 = atheta(phxn,phzn);
A2 = A2*A2;
float gradps2 = (psxn)*(psxn) + (pszn)*(pszn);
float extra = -cP.sqrt2 * A2 * ph_shared[place] * gradps2;
/*# =============================================================
#
# 3. double well (transformed): sqrt2 * phi + nonlinear terms
#
# =============================================================*/
float Up = (y[j]/cP.W0 - cP.R_tilde * (nt*cP.dt) )/cP.lT_tilde;
float rhs_psi = ((JR-JL) + (JT-JB) + extra) * cP.hi*cP.hi + \
cP.sqrt2*ph_shared[place] - cP.lamd*(1.0f-ph_shared[place]*ph_shared[place])*cP.sqrt2*(U_shared[place] + Up);
/*# =============================================================
#
# 4. dpsi/dt term
#
# =============================================================*/
float tp = (1.0f-(1.0f-cP.k)*Up);
float tau_psi;
if (tp >= cP.k){tau_psi = tp*A2;}
else {tau_psi = cP.k*A2;}
dpsi_shared_new[place] = rhs_psi / tau_psi;
ps_shared_new[place] = ps_shared[place] + cP.dt * dpsi_shared_new[place];
ph_shared_new[place] = tanhf(ps_shared_new[place]/cP.sqrt2);
// if (C==1000){printf("%f ",ph_new[C]);}
// if (C == 1000) printf("check data %f\n", ps_shared[local_id]);
// if (C == 137) {
// printf("check data ps: %f and ph: %f and dpsi: %f and U: %f\n", ps_new[C], ph_new[C], dpsi[C], U[C]);
// // printf("block id %d ; local_id_x %d; local_id_y %d\n", block_id, local_id_x, local_id_y);
// // printf("block id %d ; data_addr_x %d; data_addr_y %d\n", block_id, data_addr_x, data_addr_y);
// }
}
}
__syncthreads();
// write back
// need write back ps ph dpsi;
// U is not updated such that we don't need write back
// core data can be saved back safely
// but BC data need to be very careful
// write the core data back
if ( (i>0) && (i<fnx-1) && (j>0) && (j<fny-1) ) {
if ((local_id_x>0)&& (local_id_x<BLOCK_DIM_X-1) && (local_id_y>0) && (local_id_y<BLOCK_DIM_Y-1)) {
ps_new[C] = ps_shared_new[place];
ph_new[C] = ph_shared_new[place];
dpsi[C] = dpsi_shared_new[place];
}
}
__syncthreads();
// // update BC
// // need update BC of ps ph dpsi and U
// // bottom line
if ((j == 0) && (i < fnx)){
// if(i == 0){
// // left bottom point
// ps_new[C] = ps_shared_new[place + 2 + 2*BLOCK_DIM_X];
// ph_new[C] = ph_shared_new[place + 2 + 2*BLOCK_DIM_X];
// dpsi[C] = dpsi_shared_new[place + 2 + 2*BLOCK_DIM_X];
// U[C] = U_shared[place + 2 + 2*BLOCK_DIM_X];
// // printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C]);
// // printf("check global addr(%d) data ph at local id: %d is %f\n", C, place, ph_new[C]);
// // printf("check global addr(%d) data U at local id: %d is %f\n", C, place, U[C]);
// // printf("check global addr(%d) data dpsi at local id: %d is %f\n", C, place, dpsi[C]);
// }
if (((local_id_x>0) && (local_id_x<BLOCK_DIM_X-1))||(i == 0) || (i == fnx - 1)){
// cut corners
// ps_shared_new[place] = ps_shared_new[place + 2*BLOCK_DIM_X];
ph_shared_new[place] = ph_shared_new[place + 2*BLOCK_DIM_X];
dpsi_shared_new[place] = dpsi_shared_new[place + 2*BLOCK_DIM_X];
U_shared[place] = U_shared[place + 2*BLOCK_DIM_X];
ps_new[C] = ps_shared_new[place + 2*BLOCK_DIM_X];
ph_new[C] = ph_shared_new[place + 2*BLOCK_DIM_X];
dpsi[C] = dpsi_shared_new[place + 2*BLOCK_DIM_X];
U[C] = U_shared[place + 2*BLOCK_DIM_X];
// if (i == 130){
// printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C]);
// printf("check global addr(%d) data ph at local id: %d is %f\n", C, place, ph_new[C]);
// printf("check global addr(%d) data U at local id: %d is %f\n", C, place, U[C]);
// printf("check global addr(%d) data dpsi at local id: %d is %f\n", C, place, dpsi[C]);
// }
}
}
// up line
if ((j == fny - 1)&& (i < fnx)){
// printf("we are here");
// if(i == fnx - 1){
// // printf("we are here");
// // right top point
// ps_new[C] = ps_shared_new[place - 2 - 2*BLOCK_DIM_X];
// ph_new[C] = ph_shared_new[place - 2 - 2*BLOCK_DIM_X];
// dpsi[C] = dpsi_shared_new[place - 2 - 2*BLOCK_DIM_X];
// U[C] = U_shared[place - 2 - 2*BLOCK_DIM_X];
// printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C]);
// // printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C - 2 - 2*fnx]);
// // printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C - 2]);
// printf("check global addr(%d) data ph at local id: %d is %f\n", C, place, ph_new[C]);
// printf("check global addr(%d) data U at local id: %d is %f\n", C, place, U[C]);
// printf("check global addr(%d) data dpsi at local id: %d is %f\n", C, place, dpsi[C]);
// }
if (((local_id_x>0)&& (local_id_x<BLOCK_DIM_X-1))||(i == 0) || (i == fnx - 1)){
ps_shared_new[place] = ps_shared_new[place - 2*BLOCK_DIM_X];
ph_shared_new[place] = ph_shared_new[place - 2*BLOCK_DIM_X];
dpsi_shared_new[place] = dpsi_shared_new[place - 2*BLOCK_DIM_X];
U_shared[place] = U_shared[place - 2*BLOCK_DIM_X];
// cut corners
ps_new[C] = ps_shared_new[place - 2*BLOCK_DIM_X];
ph_new[C] = ph_shared_new[place - 2*BLOCK_DIM_X];
dpsi[C] = dpsi_shared_new[place - 2*BLOCK_DIM_X];
U[C] = U_shared[place - 2*BLOCK_DIM_X];
// printf("we update up line at %d\n", C);
// printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C]);
// printf("check global addr(%d) data ph at local id: %d is %f\n", C, place, ph_new[C]);
// printf("check global addr(%d) data U at local id: %d is %f\n", C, place, U[C]);
// printf("check global addr(%d) data dpsi at local id: %d is %f\n", C, place, dpsi[C]);
}
}
__syncthreads();
// // left line
if ((i == 0) && (j < fny)){
// if(j == fny - 1){
// // printf("we are here");
// // left top point
// ps_new[C] = ps_shared_new[place + 2 - 2*BLOCK_DIM_X];
// ph_new[C] = ph_shared_new[place + 2 - 2*BLOCK_DIM_X];
// dpsi[C] = dpsi_shared_new[place + 2 - 2*BLOCK_DIM_X];
// U[C] = U_shared[place + 2 - 2*BLOCK_DIM_X];
// // printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C]);
// // printf("check global addr(%d) data ph at local id: %d is %f\n", C, place, ph_new[C]);
// // printf("check global addr(%d) data U at local id: %d is %f\n", C, place, U[C]);
// // printf("check global addr(%d) data dpsi at local id: %d is %f\n", C, place, dpsi[C]);
// }
if ((local_id_y>0) && (local_id_y<BLOCK_DIM_Y-1)||(j == 0) || (j == fny - 1)){
ps_shared_new[place] = ps_shared_new[place + 2];
ph_shared_new[place] = ph_shared_new[place + 2];
dpsi_shared_new[place] = dpsi_shared_new[place + 2];
U_shared[place] = U_shared[place + 2];
// cut corners
ps_new[C] = ps_shared_new[place + 2];
ph_new[C] = ph_shared_new[place + 2];
dpsi[C] = dpsi_shared_new[place + 2];
U[C] = U_shared[place + 2];
}
}
// right line
if ((i == fnx - 1) && (j < fny)){
// if(j == 0){
// // right bottom point
// ps_new[C] = ps_shared_new[place - 2 + 2*BLOCK_DIM_X];
// ph_new[C] = ph_shared_new[place - 2 + 2*BLOCK_DIM_X];
// dpsi[C] = dpsi_shared_new[place - 2 + 2*BLOCK_DIM_X];
// U[C] = U_shared[place - 2 + 2*BLOCK_DIM_X];
// // printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C]);
// // printf("check global addr(%d) data ph at local id: %d is %f\n", C, place, ph_new[C]);
// // printf("check global addr(%d) data U at local id: %d is %f\n", C, place, U[C]);
// // printf("check global addr(%d) data dpsi at local id: %d is %f\n", C, place, dpsi[C]);
// }
if ((local_id_y>0) && (local_id_y<BLOCK_DIM_Y-1)||(j == 0) || (j == fny - 1)){
ps_shared_new[place] = ps_shared_new[place - 2];
ph_shared_new[place] = ph_shared_new[place - 2];
dpsi_shared_new[place] = dpsi_shared_new[place - 2];
U_shared[place] = U_shared[place - 2];
// cut corners
ps_new[C] = ps_shared_new[place - 2];
ph_new[C] = ph_shared_new[place - 2];
dpsi[C] = dpsi_shared_new[place - 2];
U[C] = U_shared[place - 2];
// printf("we update right line at %d\n", C);
// printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C]);
// printf("check global addr(%d) data ph at local id: %d is %f\n", C, place, ph_new[C]);
// printf("check global addr(%d) data U at local id: %d is %f\n", C, place, U[C]);
// printf("check global addr(%d) data dpsi at local id: %d is %f\n", C, place, dpsi[C]);
}
}
__syncthreads();
// // at last we update corners
// if ((j == 0)){
// if(i == 0){
// // printf("we are at LB");
// // left bottom point
// ps_new[C] = ps_shared_new[place + 2 + 2*BLOCK_DIM_X];
// ph_new[C] = ph_shared_new[place + 2 + 2*BLOCK_DIM_X];
// dpsi[C] = dpsi_shared_new[place + 2 + 2*BLOCK_DIM_X];
// U[C] = U_shared[place + 2 + 2*BLOCK_DIM_X];
// // printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C]);
// // printf("check global addr(%d) data ph at local id: %d is %f\n", C, place, ph_new[C]);
// // printf("check global addr(%d) data U at local id: %d is %f\n", C, place, U[C]);
// // printf("check global addr(%d) data dpsi at local id: %d is %f\n", C, place, dpsi[C]);
// }
// }
// if ((j == fny - 1)){
// // printf("we are here");
// if(i == fnx - 1){
// // printf("we are at RT");
// // right top point
// // ps_new[C] = ps_shared_new[place - 2 - 2*BLOCK_DIM_X];
// // ph_new[C] = ph_shared_new[place - 2 - 2*BLOCK_DIM_X];
// // dpsi[C] = dpsi_shared_new[place - 2 - 2*BLOCK_DIM_X];
// // U[C] = U_shared[place - 2 - 2*BLOCK_DIM_X];
// ps_new[C] = ps_shared_new[place - 2- 2*BLOCK_DIM_X];
// ph_new[C] = ph_shared_new[place - 2- 2*BLOCK_DIM_X];
// dpsi[C] = dpsi_shared_new[place - 2- 2*BLOCK_DIM_X];
// U[C] = U_shared[place - 2];
// // printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C]);
// // // printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C - 2 - 2*fnx]);
// // // printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C - 2]);
// // printf("check global addr(%d) data ph at local id: %d is %f\n", C, place, ph_new[C]);
// // printf("check global addr(%d) data U at local id: %d is %f\n", C, place, U[C]);
// // printf("check global addr(%d) data dpsi at local id: %d is %f\n", C, place, dpsi[C]);
// }
// }
// if ((i == fnx - 1)){
// if(j == 0){
// // printf("we are at RB");
// // right bottom point
// ps_new[C] = ps_shared_new[place - 2 + 2*BLOCK_DIM_X];
// ph_new[C] = ph_shared_new[place - 2 + 2*BLOCK_DIM_X];
// dpsi[C] = dpsi_shared_new[place - 2 + 2*BLOCK_DIM_X];
// U[C] = U_shared[place - 2 + 2*BLOCK_DIM_X];
// // printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C]);
// // printf("check global addr(%d) data ph at local id: %d is %f\n", C, place, ph_new[C]);
// // printf("check global addr(%d) data U at local id: %d is %f\n", C, place, U[C]);
// // printf("check global addr(%d) data dpsi at local id: %d is %f\n", C, place, dpsi[C]);
// }
// }
// if ((i == 0)){
// if(j == fny - 1){
// // printf("we are at LT");
// // left top point
// ps_new[C] = ps_shared_new[place + 2 - 2*BLOCK_DIM_X];
// ph_new[C] = ph_shared_new[place + 2 - 2*BLOCK_DIM_X];
// dpsi[C] = dpsi_shared_new[place + 2 - 2*BLOCK_DIM_X];
// U[C] = U_shared[place + 2 - 2*BLOCK_DIM_X];
// // printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C]);
// // printf("check global addr(%d) data ph at local id: %d is %f\n", C, place, ph_new[C]);
// // printf("check global addr(%d) data U at local id: %d is %f\n", C, place, U[C]);
// // printf("check global addr(%d) data dpsi at local id: %d is %f\n", C, place, dpsi[C]);
// }
// }
}
// psi & phi equation: two dimensions
// merge set BC func into this func
__global__ void
rhs_psi_U_shared_mem_merge(float* ps, float* ph, float* U, float* ps_new, float* ph_new, float* U_new, \
float* y, float* dpsi, int fnx, int fny, int nt, int num_block_x, int num_block_y){
// each CUDA theard block compute one grid(32*32)
// memory access is from global and also not continous which cannot reach the max bandwidth(memory colaseing)
// add a shared memory version to store the neighbours data: ps and ph
// clare shared memory for time tiling
// we have extra (nx+2)(ny+2) size of space to load
int halo_left = 1;
int halo_right = 1;
int halo_top = 1;
int halo_bottom = 1;
int real_block_x_p = BLOCK_DIM_X - halo_left - halo_right;
int real_block_y_p = BLOCK_DIM_Y - halo_top - halo_bottom;
// real block width/height due to update U based on phi, dpsi
int real_block_x = BLOCK_DIM_X - 2*halo_left - 2*halo_right;
int real_block_y = BLOCK_DIM_Y - 2*halo_top - 2*halo_bottom;
// load old data
__shared__ float ps_shared[(BLOCK_DIM_Y)*(BLOCK_DIM_X)];
__shared__ float ph_shared[(BLOCK_DIM_Y)*(BLOCK_DIM_X)];
__shared__ float U_shared[(BLOCK_DIM_Y)*(BLOCK_DIM_X)];
__shared__ float dpsi_shared[(BLOCK_DIM_Y)*(BLOCK_DIM_X)];
// write data into new array and update at last
__shared__ float ps_shared_new[(BLOCK_DIM_Y)*(BLOCK_DIM_X)];
__shared__ float ph_shared_new[(BLOCK_DIM_Y)*(BLOCK_DIM_X)];
__shared__ float U_shared_new[(BLOCK_DIM_Y)*(BLOCK_DIM_X)];
__shared__ float dpsi_shared_new[(BLOCK_DIM_Y)*(BLOCK_DIM_X)];
// each thread --> one data in one enlarged block
int local_id = threadIdx.x; //local id -> sava to shared mem
int local_id_x = local_id % BLOCK_DIM_X;
int local_id_y = local_id / BLOCK_DIM_X;
// obtain the block id in shrinked region
int block_id = blockIdx.x; // 0~num_block_x*num_block_y
int block_id_x = block_id % num_block_x;
int block_id_y = block_id / num_block_x;
// this is the addr in inner region without considering the BC
int block_addr_x = block_id_x * real_block_x;
int block_addr_y = block_id_y * real_block_y;
// find the addr of data in global memory
// add 1 as we counter the block in inner region; - halo_left*2 as we have two halo region in this block
int data_addr_x = block_addr_x + 1 - halo_left*2 + local_id_x;
int data_addr_y = block_addr_y + 1 - halo_bottom*2 + local_id_y;
int C= data_addr_y * fnx + data_addr_x; // place in global memory
int j = data_addr_y;
int i = data_addr_x;
// initialize data into shared mem
ps_shared[local_id] = ps[C];
ph_shared[local_id] = ph[C];
U_shared[local_id] = U[C];
dpsi_shared[local_id] = dpsi[C];
ps_shared_new[local_id] = ps[C];
ph_shared_new[local_id] = ph[C];
U_shared_new[local_id] = U[C];
dpsi_shared_new[local_id] = dpsi[C];
__syncthreads();
// return if the id exceeds the true region
if ((i > fnx - 1) ||(i > fny - 1)) {return;}
int place = local_id_y * BLOCK_DIM_X + local_id_x;
// compute based on the shared memory, skip if we are at the boundary
// step1: udpate phi, psi and dpsi
if ( (i>0) && (i<fnx-1) && (j>0) && (j<fny-1) ) {
if ((local_id_x>0)&& (local_id_x<BLOCK_DIM_X-halo_right) && (local_id_y>0) && (local_id_y<BLOCK_DIM_Y-halo_top)) {
// find the indices of the 8 neighbors for center
// if (C==1000){printf("find");}
int R=place+1;
int L=place-1;
int T=place+BLOCK_DIM_X;
int B=place-BLOCK_DIM_X;
// if (C==1001){
// printf("detailed check of neighbours\n");
// printf("R: %d ; L:%d ; T: %d ; B: %d \n", R, L, T, B);
// printf("R: %f ; L:%f ; T: %f ; B: %f \n", ps_shared[R], ps_shared[L], ps_shared[T], ps_shared[B]);
// printf("R: %f ; L:%f ; T: %f ; B: %f \n", ps_shared[R], ps_shared[L], ps_shared[T+1], ps_shared[B]);
// }
// =============================================================
// 1. ANISOTROPIC DIFFUSION
// =============================================================
// these ps's are defined on cell centers
float psipjp=( ps_shared[place] + ps_shared[R] + ps_shared[T] + ps_shared[T+1] ) * 0.25f;
float psipjm=( ps_shared[place] + ps_shared[R] + ps_shared[B] + ps_shared[B+1] ) * 0.25f;
float psimjp=( ps_shared[place] + ps_shared[L] + ps_shared[T-1] + ps_shared[T] ) * 0.25f;
float psimjm=( ps_shared[place] + ps_shared[L] + ps_shared[B-1] + ps_shared[B] ) * 0.25f;
float phipjp=( ph_shared[place] + ph_shared[R] + ph_shared[T] + ph_shared[T+1] ) * 0.25f;
float phipjm=( ph_shared[place] + ph_shared[R] + ph_shared[B] + ph_shared[B+1] ) * 0.25f;
float phimjp=( ph_shared[place] + ph_shared[L] + ph_shared[T-1] + ph_shared[T] ) * 0.25f;
float phimjm=( ph_shared[place] + ph_shared[L] + ph_shared[B-1] + ph_shared[B] ) * 0.25f;
// if (C==1001){
// printf("detailed check of neighbours 2\n");
// printf("R: %f ; L:%f ; T: %f ; B: %f \n", psipjp, psipjm, psimjp, psimjm);
// }
// ============================
// right edge flux
// ============================
float psx = ps_shared[R]-ps_shared[place];
float psz = psipjp - psipjm;
float phx = ph_shared[R]-ph_shared[place];
float phz = phipjp - phipjm;
float A = atheta( phx,phz);
float Ap = aptheta(phx,phz);
float JR = A * ( A*psx - Ap*psz );
// ============================
// left edge flux
// ============================
psx = ps_shared[place]-ps_shared[L];
psz = psimjp - psimjm;
phx = ph_shared[place]-ph_shared[L];
phz = phimjp - phimjm;
A = atheta( phx,phz);
Ap = aptheta(phx,phz);
float JL = A * ( A*psx - Ap*psz );
// ============================
// top edge flux
// ============================
psx = psipjp - psimjp;
psz = ps_shared[T]-ps_shared[place];
phx = phipjp - phimjp;
phz = ph_shared[T]-ph_shared[place];
A = atheta( phx,phz);
Ap = aptheta(phx,phz);
float JT = A * ( A*psz + Ap*psx );
// ============================
// bottom edge flux
// ============================
psx = psipjm - psimjm;
psz = ps_shared[place]-ps_shared[B];
phx = phipjm - phimjm;
phz = ph_shared[place]-ph_shared[B];
A = atheta( phx,phz);
Ap = aptheta(phx,phz);
float JB = A * ( A*psz + Ap*psx );
// if (C==1001){
// printf("detailed check of neighbours 3\n");
// printf("R: %f ; L:%f ; T: %f ; B: %f \n", JR, JL, JT, JB);
// }
/*# =============================================================
#
# 2. EXTRA TERM: sqrt2 * atheta**2 * phi * |grad psi|^2
#
# =============================================================
# d(phi)/dx d(psi)/dx d(phi)/dz d(psi)/dz at nodes (i,j)*/
float phxn = ( ph_shared[R] - ph_shared[L] ) * 0.5f;
float phzn = ( ph_shared[T] - ph_shared[B] ) * 0.5f;
float psxn = ( ps_shared[R] - ps_shared[L] ) * 0.5f;
float pszn = ( ps_shared[T] - ps_shared[B] ) * 0.5f;
float A2 = atheta(phxn,phzn);
A2 = A2*A2;
float gradps2 = (psxn)*(psxn) + (pszn)*(pszn);
float extra = -cP.sqrt2 * A2 * ph_shared[place] * gradps2;
/*# =============================================================
#
# 3. double well (transformed): sqrt2 * phi + nonlinear terms
#
# =============================================================*/
float Up = (y[j]/cP.W0 - cP.R_tilde * (nt*cP.dt) )/cP.lT_tilde;
float rhs_psi = ((JR-JL) + (JT-JB) + extra) * cP.hi*cP.hi + \
cP.sqrt2*ph_shared[place] - cP.lamd*(1.0f-ph_shared[place]*ph_shared[place])*cP.sqrt2*(U_shared[place] + Up);
/*# =============================================================
#
# 4. dpsi/dt term
#
# =============================================================*/
float tp = (1.0f-(1.0f-cP.k)*Up);
float tau_psi;
if (tp >= cP.k){tau_psi = tp*A2;}
else {tau_psi = cP.k*A2;}
dpsi_shared_new[place] = rhs_psi / tau_psi;
ps_shared_new[place] = ps_shared[place] + cP.dt * dpsi_shared_new[place];
ph_shared_new[place] = tanhf(ps_shared_new[place]/cP.sqrt2);
// if (C==1000){printf("%f ",ph_new[C]);}
// if (C == 1000) printf("check data %f\n", ps_shared[local_id]);
// if (C == 137) {
// printf("check data ps: %f and ph: %f and dpsi: %f and U: %f\n", ps_new[C], ph_new[C], dpsi[C], U[C]);
// // printf("block id %d ; local_id_x %d; local_id_y %d\n", block_id, local_id_x, local_id_y);
// // printf("block id %d ; data_addr_x %d; data_addr_y %d\n", block_id, data_addr_x, data_addr_y);
// }
}
}
__syncthreads();
// // need update BC of ps ph dpsi and U
// // bottom line
if ((j == 0) && (i < fnx)){
// if(i == 0){
// // left bottom point
// ps_new[C] = ps_shared_new[place + 2 + 2*BLOCK_DIM_X];
// ph_new[C] = ph_shared_new[place + 2 + 2*BLOCK_DIM_X];
// dpsi[C] = dpsi_shared_new[place + 2 + 2*BLOCK_DIM_X];
// U[C] = U_shared[place + 2 + 2*BLOCK_DIM_X];
// // printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C]);
// // printf("check global addr(%d) data ph at local id: %d is %f\n", C, place, ph_new[C]);
// // printf("check global addr(%d) data U at local id: %d is %f\n", C, place, U[C]);
// // printf("check global addr(%d) data dpsi at local id: %d is %f\n", C, place, dpsi[C]);
// }
if (((local_id_x>0) && (local_id_x<BLOCK_DIM_X-1))||(i == 0) || (i == fnx - 1)){
// cut corners
ps_shared_new[place] = ps_shared_new[place + 2*BLOCK_DIM_X];
ph_shared_new[place] = ph_shared_new[place + 2*BLOCK_DIM_X];
dpsi_shared_new[place] = dpsi_shared_new[place + 2*BLOCK_DIM_X];
U_shared[place] = U_shared[place + 2*BLOCK_DIM_X];
// ps_new[C] = ps_shared_new[place + 2*BLOCK_DIM_X];
// ph_new[C] = ph_shared_new[place + 2*BLOCK_DIM_X];
// dpsi[C] = dpsi_shared_new[place + 2*BLOCK_DIM_X];
// U[C] = U_shared[place + 2*BLOCK_DIM_X];
// if (i == 130){
// printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C]);
// printf("check global addr(%d) data ph at local id: %d is %f\n", C, place, ph_new[C]);
// printf("check global addr(%d) data U at local id: %d is %f\n", C, place, U[C]);
// printf("check global addr(%d) data dpsi at local id: %d is %f\n", C, place, dpsi[C]);
// }
}
}
// up line
if ((j == fny - 1)&& (i < fnx)){
// printf("we are here");
// if(i == fnx - 1){
// // printf("we are here");
// // right top point
// ps_new[C] = ps_shared_new[place - 2 - 2*BLOCK_DIM_X];
// ph_new[C] = ph_shared_new[place - 2 - 2*BLOCK_DIM_X];
// dpsi[C] = dpsi_shared_new[place - 2 - 2*BLOCK_DIM_X];
// U[C] = U_shared[place - 2 - 2*BLOCK_DIM_X];
// printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C]);
// // printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C - 2 - 2*fnx]);
// // printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C - 2]);
// printf("check global addr(%d) data ph at local id: %d is %f\n", C, place, ph_new[C]);
// printf("check global addr(%d) data U at local id: %d is %f\n", C, place, U[C]);
// printf("check global addr(%d) data dpsi at local id: %d is %f\n", C, place, dpsi[C]);
// }
if (((local_id_x>0)&& (local_id_x<BLOCK_DIM_X-1))||(i == 0) || (i == fnx - 1)){
ps_shared_new[place] = ps_shared_new[place - 2*BLOCK_DIM_X];
ph_shared_new[place] = ph_shared_new[place - 2*BLOCK_DIM_X];
dpsi_shared_new[place] = dpsi_shared_new[place - 2*BLOCK_DIM_X];
U_shared[place] = U_shared[place - 2*BLOCK_DIM_X];
// cut corners
// ps_new[C] = ps_shared_new[place - 2*BLOCK_DIM_X];
// ph_new[C] = ph_shared_new[place - 2*BLOCK_DIM_X];
// dpsi[C] = dpsi_shared_new[place - 2*BLOCK_DIM_X];
// U[C] = U_shared[place - 2*BLOCK_DIM_X];
// printf("we update up line at %d\n", C);
// printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C]);
// printf("check global addr(%d) data ph at local id: %d is %f\n", C, place, ph_new[C]);
// printf("check global addr(%d) data U at local id: %d is %f\n", C, place, U[C]);
// printf("check global addr(%d) data dpsi at local id: %d is %f\n", C, place, dpsi[C]);
}
}
__syncthreads();
// // left line
if ((i == 0) && (j < fny)){
// if(j == fny - 1){
// // printf("we are here");
// // left top point
// ps_new[C] = ps_shared_new[place + 2 - 2*BLOCK_DIM_X];
// ph_new[C] = ph_shared_new[place + 2 - 2*BLOCK_DIM_X];
// dpsi[C] = dpsi_shared_new[place + 2 - 2*BLOCK_DIM_X];
// U[C] = U_shared[place + 2 - 2*BLOCK_DIM_X];
// // printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C]);
// // printf("check global addr(%d) data ph at local id: %d is %f\n", C, place, ph_new[C]);
// // printf("check global addr(%d) data U at local id: %d is %f\n", C, place, U[C]);
// // printf("check global addr(%d) data dpsi at local id: %d is %f\n", C, place, dpsi[C]);
// }
if ((local_id_y>0) && (local_id_y<BLOCK_DIM_Y-1)||(j == 0) || (j == fny - 1)){
// printf("hey man\n");
ps_shared_new[place] = ps_shared_new[place + 2];
ph_shared_new[place] = ph_shared_new[place + 2];
dpsi_shared_new[place] = dpsi_shared_new[place + 2];
U_shared[place] = U_shared[place + 2];
// ps_new[C] = ps_shared_new[place + 2];
// ph_new[C] = ph_shared_new[place + 2];
// dpsi[C] = dpsi_shared_new[place + 2];
// U[C] = U_shared[place + 2];
}
}
// right line
if ((i == fnx - 1) && (j < fny)){
// if(j == 0){
// // right bottom point
// ps_new[C] = ps_shared_new[place - 2 + 2*BLOCK_DIM_X];
// ph_new[C] = ph_shared_new[place - 2 + 2*BLOCK_DIM_X];
// dpsi[C] = dpsi_shared_new[place - 2 + 2*BLOCK_DIM_X];
// U[C] = U_shared[place - 2 + 2*BLOCK_DIM_X];
// // printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C]);
// // printf("check global addr(%d) data ph at local id: %d is %f\n", C, place, ph_new[C]);
// // printf("check global addr(%d) data U at local id: %d is %f\n", C, place, U[C]);
// // printf("check global addr(%d) data dpsi at local id: %d is %f\n", C, place, dpsi[C]);
// }
if ((local_id_y>0) && (local_id_y<BLOCK_DIM_Y-1)||(j == 0) || (j == fny - 1)){
ps_shared_new[place] = ps_shared_new[place - 2];
ph_shared_new[place] = ph_shared_new[place - 2];
dpsi_shared_new[place] = dpsi_shared_new[place - 2];
U_shared[place] = U_shared[place - 2];
// cut corners
// ps_new[C] = ps_shared_new[place - 2];
// ph_new[C] = ph_shared_new[place - 2];
// dpsi[C] = dpsi_shared_new[place - 2];
// U[C] = U_shared[place - 2];
// printf("we update right line at %d\n", C);
// printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C]);
// printf("check global addr(%d) data ph at local id: %d is %f\n", C, place, ph_new[C]);
// printf("check global addr(%d) data U at local id: %d is %f\n", C, place, U[C]);
// printf("check global addr(%d) data dpsi at local id: %d is %f\n", C, place, dpsi[C]);
}
}
__syncthreads();
// update U
// if the points are at boundary, return
if ( (i>0) && (i<fnx-1) && (j>0) && (j<fny-1) ) {
// only update the inner res
if ((local_id_x>halo_left*2-1)&& (local_id_x<BLOCK_DIM_X-halo_right*2) && (local_id_y>halo_bottom*2-1) && (local_id_y<BLOCK_DIM_Y-halo_top*2)) {
// find the indices of the 8 neighbors for center
int R=place+1;
int L=place-1;
int T=place+BLOCK_DIM_X;
int B=place-BLOCK_DIM_X;
float hi = cP.hi;
float Dl_tilde = cP.Dl_tilde;
float k = cP.k;
float nx, nz;
float eps = cP.eps;
// =============================================================
// 1. ANISOTROPIC DIFFUSION
// =============================================================
// these ph's are defined on cell centers
float phipjp=( ph_shared_new[place] + ph_shared_new[R] + ph_shared_new[T] + ph_shared_new[T+1] ) * 0.25f;
float phipjm=( ph_shared_new[place] + ph_shared_new[R] + ph_shared_new[B] + ph_shared_new[B+1] ) * 0.25f;
float phimjp=( ph_shared_new[place] + ph_shared_new[L] + ph_shared_new[T-1] + ph_shared_new[T] ) * 0.25f;
float phimjm=( ph_shared_new[place] + ph_shared_new[L] + ph_shared_new[B-1] + ph_shared_new[B] ) * 0.25f;
// if (C==1001){
// printf("detailed check of neighbours 3\n");
// printf("R: %f ; L:%f ; T: %f ; B: %f \n", phipjp, phipjm, phimjp, phimjm);
// }
float jat = 0.5f*(1.0f+(1.0f-k)*U_shared[place])*(1.0f-ph_shared_new[place]*ph_shared_new[place])*dpsi_shared_new[place];
/*# ============================
# right edge flux (i+1/2, j)
# ============================*/
float phx = ph_shared_new[R]-ph_shared_new[place];
float phz = phipjp - phipjm;
float phn2 = phx*phx + phz*phz;
if (phn2 > eps) {nx = phx / sqrtf(phn2);}
else {nx = 0.0f;}
float jat_ip = 0.5f*(1.0f+(1.0f-k)*U_shared[R])*(1.0f-ph_shared_new[R]*ph_shared_new[R])*dpsi_shared_new[R];
float UR = hi*Dl_tilde*0.5f*(2.0f - ph_shared_new[place] - ph_shared_new[R])*(U_shared[R]-U_shared[place]) + 0.5f*(jat + jat_ip)*nx;
/* ============================
# left edge flux (i-1/2, j)
# ============================*/
phx = ph_shared_new[place]-ph_shared_new[L];
phz = phimjp - phimjm;
phn2 = phx*phx + phz*phz;
if (phn2 > eps) {nx = phx / sqrtf(phn2);}
else {nx = 0.0f;}
float jat_im = 0.5f*(1.0f+(1.0f-k)*U_shared[L])*(1.0f-ph_shared_new[L]*ph_shared_new[L])*dpsi_shared_new[L];
float UL = hi*Dl_tilde*0.5f*(2.0f - ph_shared_new[place] - ph_shared_new[L])*(U_shared[place]-U_shared[L]) + 0.5f*(jat + jat_im)*nx;
/*# ============================
# top edge flux (i, j+1/2)
# ============================*/
phx = phipjp - phimjp;
phz = ph_shared_new[T]-ph_shared_new[place];
phn2 = phx*phx + phz*phz;
if (phn2 > eps) {nz = phz / sqrtf(phn2);}
else {nz = 0.0f;}
float jat_jp = 0.5f*(1.0f+(1.0f-k)*U_shared[T])*(1.0f-ph_shared_new[T]*ph_shared_new[T])*dpsi_shared_new[T];
float UT = hi*Dl_tilde*0.5f*(2.0f - ph_shared_new[place] - ph_shared_new[T])*(U_shared[T]-U_shared[place]) + 0.5f*(jat + jat_jp)*nz;
/*# ============================
# bottom edge flux (i, j-1/2)
# ============================*/
phx = phipjm - phimjm;
phz = ph_shared_new[place]-ph_shared_new[B];
phn2 = phx*phx + phz*phz;
if (phn2 > eps) {nz = phz / sqrtf(phn2);}
else {nz = 0.0f;}
float jat_jm = 0.5f*(1.0f+(1.0f-k)*U_shared[B])*(1.0f-ph_shared_new[B]*ph_shared_new[B])*dpsi_shared_new[B];
float UB = hi*Dl_tilde*0.5f*(2.0f - ph_shared_new[place] - ph_shared_new[B])*(U_shared[place]-U_shared[B]) + 0.5f*(jat + jat_jm)*nz;
float rhs_U = ( (UR-UL) + (UT-UB) ) * hi + cP.sqrt2 * jat;
float tau_U = (1.0f+cP.k) - (1.0f-cP.k)*ph_shared_new[place];
U_shared_new[place] = U_shared[place] + cP.dt * ( rhs_U / tau_U );
}
}
// at last write back
// need write back ps ph dpsi and U
// core data can be saved back safely
// but BC data need to be very careful
// write the core data back
if ( (i>0) && (i<fnx-1) && (j>0) && (j<fny-1) ) {
if ((local_id_x>halo_left*2-1)&& (local_id_x<BLOCK_DIM_X-halo_right*2) && (local_id_y>halo_bottom*2-1) && (local_id_y<BLOCK_DIM_Y-halo_top*2)) {
ps_new[C] = ps_shared_new[place];
ph_new[C] = ph_shared_new[place];
dpsi[C] = dpsi_shared_new[place];
U_new[C] = U_shared_new[place];
}
}
__syncthreads();
// need update BC
// directly use BC data in shared_mem_new to write back is ok
// as we already update the BC before
// bottom line
if ((j == 0) && (i < fnx)){
if (((local_id_x>halo_left*2-1) && (local_id_x<BLOCK_DIM_X-halo_right*2))||(i == 0) || (i == fnx - 1)){
// printf("hey man\n");
ps_new[C] = ps_shared_new[place];
ph_new[C] = ph_shared_new[place];
dpsi[C] = dpsi_shared_new[place];
U_new[C] = U_shared[place];
// if (i == 130){
// printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C]);
// printf("check global addr(%d) data ph at local id: %d is %f\n", C, place, ph_new[C]);
// printf("check global addr(%d) data U at local id: %d is %f\n", C, place, U[C]);
// printf("check global addr(%d) data dpsi at local id: %d is %f\n", C, place, dpsi[C]);
// }
}
}
// up line
if ((j == fny - 1)&& (i < fnx)){
if (((local_id_x>halo_left*2-1) && (local_id_x<BLOCK_DIM_X-halo_right*2))||(i == 0) || (i == fnx - 1)){
ps_new[C] = ps_shared_new[place];
ph_new[C] = ph_shared_new[place];
dpsi[C] = dpsi_shared_new[place];
U_new[C] = U_shared[place];
// printf("we update up line at %d\n", C);
// printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C]);
// printf("check global addr(%d) data ph at local id: %d is %f\n", C, place, ph_new[C]);
// printf("check global addr(%d) data U at local id: %d is %f\n", C, place, U[C]);
// printf("check global addr(%d) data dpsi at local id: %d is %f\n", C, place, dpsi[C]);
}
}
__syncthreads();
// // left line
if ((i == 0) && (j < fny)){
if (((local_id_y>halo_bottom*2-1) && (local_id_y<BLOCK_DIM_Y-halo_top*2))||(j == 0) || (j == fny - 1)){
ps_new[C] = ps_shared_new[place];
ph_new[C] = ph_shared_new[place];
dpsi[C] = dpsi_shared_new[place];
U_new[C] = U_shared[place];
}
}
// right line
if ((i == fnx - 1) && (j < fny)){
// printf("hey man\n");
if (((local_id_y>halo_bottom*2-1) && (local_id_y<BLOCK_DIM_Y-halo_top*2))||(j == 0) || (j == fny - 1)){
ps_new[C] = ps_shared_new[place];
ph_new[C] = ph_shared_new[place];
dpsi[C] = dpsi_shared_new[place];
U_new[C] = U_shared[place];
// printf("we update right line at %d\n", C);
// printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C]);
// printf("check global addr(%d) data ph at local id: %d is %f\n", C, place, ph_new[C]);
// printf("check global addr(%d) data U at local id: %d is %f\n", C, place, U[C]);
// printf("check global addr(%d) data dpsi at local id: %d is %f\n", C, place, dpsi[C]);
}
}
__syncthreads();
}
// U equation
// shared mem version but each thread process real update point such that we need extra time to load the halo region data
__global__ void
rhs_U_shared_mem_ex(float* U, float* U_new, float* ph, float* dpsi, int fnx, int fny, int num_block_x, int num_block_y){
// we have halo region
// __shared__ float ps_shared[(BLOCK_DIM_Y+HALO_TOP+HALO_BOTTOM)*(BLOCK_DIM_X+HALO_LEFT+HALO_RIGHT)];
__shared__ float ph_shared[(BLOCK_DIM_Y+HALO_TOP+HALO_BOTTOM)*(BLOCK_DIM_X+HALO_LEFT+HALO_RIGHT)];
__shared__ float U_shared[(BLOCK_DIM_Y+HALO_TOP+HALO_BOTTOM)*(BLOCK_DIM_X+HALO_LEFT+HALO_RIGHT)];
__shared__ float dpsi_shared[(BLOCK_DIM_Y+HALO_TOP+HALO_BOTTOM)*(BLOCK_DIM_X+HALO_LEFT+HALO_RIGHT)];
int halo_left = 1;
int halo_right = 1;
int halo_top = 1;
int halo_bottom = 1;
int real_block_x = BLOCK_DIM_X;
int real_block_y = BLOCK_DIM_Y;
// each thread --> one data in one enlarged block
int local_id = threadIdx.x; //local id -> sava to shared mem
int local_id_x = local_id % BLOCK_DIM_X + halo_left;
int local_id_y = local_id / BLOCK_DIM_X + halo_bottom;
// obtain the block id in shrinked region
int block_id = blockIdx.x; // 0~num_block_x*num_block_y
int block_id_x = block_id % num_block_x;
int block_id_y = block_id / num_block_x;
// this is the addr in inner region without considering the BC
int block_addr_x = block_id_x * real_block_x;
int block_addr_y = block_id_y * real_block_y;
// find the addr of data in global memory
// add 1 as we counter the block in inner region; - halo_left as we have halo region in this block
int data_addr_x = block_addr_x + 1 + local_id_x - halo_left;
int data_addr_y = block_addr_y + 1 + local_id_y - halo_bottom;
int C= data_addr_y * fnx + data_addr_x; // place in global memory
int j = data_addr_y;
int i = data_addr_x;
// update data into shared mem
int place = local_id_y * (BLOCK_DIM_X+HALO_RIGHT+HALO_LEFT) + local_id_x;
ph_shared[place] = ph[C];
U_shared[place] = U[C];
dpsi_shared[place] = dpsi[C];
__syncthreads();
// fetch the BC datapoint
if (local_id_x < 2){
ph_shared[place - halo_left] = ph[C-halo_left]; //left vertical line
U_shared[place - halo_left] = U[C-halo_left];
dpsi_shared[place - halo_left] = dpsi[C-halo_left];
ph_shared[place + real_block_x + halo_left - 1] = ph[C+ real_block_x + halo_left - 1]; //right vertical line
U_shared[place + real_block_x + halo_left - 1] = U[C+ real_block_x + halo_left - 1];
dpsi_shared[place + real_block_x + halo_left - 1] = dpsi[C+ real_block_x + halo_left - 1];
}
if (local_id_y < 2){
ph_shared[place - (real_block_x+halo_left+halo_right)] = ph[C-fnx]; //bottom horizontal line
U_shared[place - (real_block_x+halo_left+halo_right)] = U[C-fnx];
dpsi_shared[place - (real_block_x+halo_left+halo_right)] = dpsi[C-fnx];
ph_shared[place + (real_block_x+halo_left+halo_right)*(real_block_y + halo_top - 1)] = ph[C+ fnx*(real_block_y + halo_top - 1)]; //top horizontal line
U_shared[place + (real_block_x+halo_left+halo_right)*(real_block_y + halo_top - 1)] = U[C+ fnx*(real_block_y + halo_top - 1)];
dpsi_shared[place + (real_block_x+halo_left+halo_right)*(real_block_y + halo_top - 1)] = dpsi[C+ fnx*(real_block_y + halo_top - 1)];
}
// corners
if ((local_id_x < 2) && (local_id_y < 2)){
// update four corners
ph_shared[(local_id_y - 1) * (BLOCK_DIM_X+HALO_RIGHT+HALO_LEFT) + local_id_x - 1] = ph[i - 1 + (j-1) *fnx];
U_shared[(local_id_y - 1) * (BLOCK_DIM_X+HALO_RIGHT+HALO_LEFT) + local_id_x - 1] = U[i - 1 + (j-1) *fnx];
dpsi_shared[(local_id_y - 1) * (BLOCK_DIM_X+HALO_RIGHT+HALO_LEFT) + local_id_x - 1] = dpsi[i - 1 + (j-1) *fnx];
ph_shared[local_id_y * (BLOCK_DIM_X+HALO_RIGHT+HALO_LEFT) + local_id_x - 2] = ph[i - 1 + (j-1) *fnx + (BLOCK_DIM_X+HALO_RIGHT+HALO_LEFT - 1)];
U_shared[local_id_y * (BLOCK_DIM_X+HALO_RIGHT+HALO_LEFT) + local_id_x - 2] = U[i - 1 + (j-1) *fnx + (BLOCK_DIM_X+HALO_RIGHT+HALO_LEFT - 1)];
dpsi_shared[local_id_y * (BLOCK_DIM_X+HALO_RIGHT+HALO_LEFT) + local_id_x - 2] = dpsi[i - 1 + (j-1) *fnx + (BLOCK_DIM_X+HALO_RIGHT+HALO_LEFT - 1)];
ph_shared[(local_id_y + BLOCK_DIM_Y+halo_top-1) * (BLOCK_DIM_X+HALO_RIGHT+HALO_LEFT) + local_id_x - 1] = ph[i - 1 + (j+BLOCK_DIM_Y+halo_top-1) *fnx];
U_shared[(local_id_y + BLOCK_DIM_Y+halo_top-1) * (BLOCK_DIM_X+HALO_RIGHT+HALO_LEFT) + local_id_x - 1] = U[i - 1 + (j+BLOCK_DIM_Y+halo_top-1) *fnx];
dpsi_shared[(local_id_y + BLOCK_DIM_Y+halo_top-1) * (BLOCK_DIM_X+HALO_RIGHT+HALO_LEFT) + local_id_x - 1] = dpsi[i - 1 + (j+BLOCK_DIM_Y+halo_top-1) *fnx];
ph_shared[(local_id_y + BLOCK_DIM_Y+halo_top) * (BLOCK_DIM_X+HALO_RIGHT+HALO_LEFT) + local_id_x - 2] = ph[i - 1 + (j+BLOCK_DIM_Y+halo_top-1) *fnx + (BLOCK_DIM_X+HALO_RIGHT+HALO_LEFT - 1)];
U_shared[(local_id_y + BLOCK_DIM_Y+halo_top) * (BLOCK_DIM_X+HALO_RIGHT+HALO_LEFT) + local_id_x - 2] = U[i - 1 + (j+BLOCK_DIM_Y+halo_top-1) *fnx + (BLOCK_DIM_X+HALO_RIGHT+HALO_LEFT - 1)];
dpsi_shared[(local_id_y + BLOCK_DIM_Y+halo_top) * (BLOCK_DIM_X+HALO_RIGHT+HALO_LEFT) + local_id_x - 2] = dpsi[i - 1 + (j+BLOCK_DIM_Y+halo_top-1) *fnx + (BLOCK_DIM_X+HALO_RIGHT+HALO_LEFT - 1)];
}
__syncthreads();
// if (C==137){
// printf("check pre-loaded data\n");
// printf("local_id_x: %d, local_id_y: %d\n", local_id_x, local_id_y);
// printf("ph: %f ; u:%f ; dpsi: %f\n", ph[C], U[C], dpsi[C]);
// printf("ph: %f ; u:%f ; dpsi: %f\n", ph_shared[6], U_shared[6], dpsi_shared[6]);
// printf("ph: %f ; u:%f ; dpsi: %f\n", ph_shared[0], U_shared[0], dpsi_shared[0]);
// printf("ph: %f ; u:%f ; dpsi: %f\n", ph_shared[real_block_x+halo_left+halo_right-1], U_shared[real_block_x+halo_left+halo_right-1], dpsi_shared[real_block_x+halo_left+halo_right-1]);
// printf("ph: %f ; u:%f ; dpsi: %f\n", ph_shared[(real_block_x+halo_left+halo_right)*(real_block_y+halo_bottom)], U_shared[(real_block_x+halo_left+halo_right)*(real_block_y+halo_bottom)], dpsi_shared[(real_block_x+halo_left+halo_right)*(real_block_y+halo_bottom)]);
// printf("ph: %f ; u:%f ; dpsi: %f\n", ph_shared[(real_block_x+halo_left+halo_right)*(real_block_y+halo_bottom+halo_top) - 1], U_shared[(real_block_x+halo_left+halo_right)*(real_block_y+halo_bottom+halo_top) - 1], dpsi_shared[(real_block_x+halo_left+halo_right)*(real_block_y+halo_bottom+halo_top) - 1]);
// }
// if the points are at boundary, return
if ( (i>0) && (i<fnx-1) && (j>0) && (j<fny-1) ) {
// find the indices of the 8 neighbors for center
int R=place+1;
int L=place-1;
int T=place+BLOCK_DIM_X+halo_left+halo_right;
int B=place-(BLOCK_DIM_X+halo_left+halo_right);
float hi = cP.hi;
float Dl_tilde = cP.Dl_tilde;
float k = cP.k;
float nx, nz;
float eps = cP.eps;
// =============================================================
// 1. ANISOTROPIC DIFFUSION
// =============================================================
// these ph's are defined on cell centers
float phipjp=( ph_shared[place] + ph_shared[R] + ph_shared[T] + ph_shared[T+1] ) * 0.25f;
float phipjm=( ph_shared[place] + ph_shared[R] + ph_shared[B] + ph_shared[B+1] ) * 0.25f;
float phimjp=( ph_shared[place] + ph_shared[L] + ph_shared[T-1] + ph_shared[T] ) * 0.25f;
float phimjm=( ph_shared[place] + ph_shared[L] + ph_shared[B-1] + ph_shared[B] ) * 0.25f;
// if (C==137){
// printf("detailed check of neighbours 3\n");
// // print("place: %d\n", B);
// printf("R: %f ; L:%f ; T: %f ; B: %f \n", phipjp, phipjm, phimjp, phimjm);
// printf("R: %f ; L:%f ; T: %f ; B: %f \n", U_shared[R], U_shared[L], U_shared[T], U_shared[B]);
// printf("R: %f ; L:%f ; T: %f ; B: %f \n", dpsi_shared[R], dpsi_shared[L], dpsi_shared[T], dpsi_shared[B]);
// }
float jat = 0.5f*(1.0f+(1.0f-k)*U_shared[place])*(1.0f-ph_shared[place]*ph_shared[place])*dpsi_shared[place];
/*# ============================
# right edge flux (i+1/2, j)
# ============================*/
float phx = ph_shared[R]-ph_shared[place];
float phz = phipjp - phipjm;
float phn2 = phx*phx + phz*phz;
if (phn2 > eps) {nx = phx / sqrtf(phn2);}
else {nx = 0.0f;}
float jat_ip = 0.5f*(1.0f+(1.0f-k)*U_shared[R])*(1.0f-ph_shared[R]*ph_shared[R])*dpsi_shared[R];
float UR = hi*Dl_tilde*0.5f*(2.0f - ph_shared[place] - ph_shared[R])*(U_shared[R]-U_shared[place]) + 0.5f*(jat + jat_ip)*nx;
/* ============================
# left edge flux (i-1/2, j)
# ============================*/
phx = ph_shared[place]-ph_shared[L];
phz = phimjp - phimjm;
phn2 = phx*phx + phz*phz;
if (phn2 > eps) {nx = phx / sqrtf(phn2);}
else {nx = 0.0f;}
float jat_im = 0.5f*(1.0f+(1.0f-k)*U_shared[L])*(1.0f-ph_shared[L]*ph_shared[L])*dpsi_shared[L];
float UL = hi*Dl_tilde*0.5f*(2.0f - ph_shared[place] - ph_shared[L])*(U_shared[place]-U_shared[L]) + 0.5f*(jat + jat_im)*nx;
/*# ============================
# top edge flux (i, j+1/2)
# ============================*/
phx = phipjp - phimjp;
phz = ph_shared[T]-ph_shared[place];
phn2 = phx*phx + phz*phz;
if (phn2 > eps) {nz = phz / sqrtf(phn2);}
else {nz = 0.0f;}
float jat_jp = 0.5f*(1.0f+(1.0f-k)*U_shared[T])*(1.0f-ph_shared[T]*ph_shared[T])*dpsi_shared[T];
float UT = hi*Dl_tilde*0.5f*(2.0f - ph_shared[place] - ph_shared[T])*(U_shared[T]-U_shared[place]) + 0.5f*(jat + jat_jp)*nz;
/*# ============================
# bottom edge flux (i, j-1/2)
# ============================*/
phx = phipjm - phimjm;
phz = ph_shared[place]-ph_shared[B];
phn2 = phx*phx + phz*phz;
if (phn2 > eps) {nz = phz / sqrtf(phn2);}
else {nz = 0.0f;}
float jat_jm = 0.5f*(1.0f+(1.0f-k)*U_shared[B])*(1.0f-ph_shared[B]*ph_shared[B])*dpsi_shared[B];
float UB = hi*Dl_tilde*0.5f*(2.0f - ph_shared[place] - ph_shared[B])*(U_shared[place]-U_shared[B]) + 0.5f*(jat + jat_jm)*nz;
// if (C==137){
// printf("detailed check of neighbours 4\n");
// printf("UR: %f ; UL:%f ; UT: %f ; UB: %f \n", UR, UL, UT, UB);
// }
float rhs_U = ( (UR-UL) + (UT-UB) ) * hi + cP.sqrt2 * jat;
float tau_U = (1.0f+cP.k) - (1.0f-cP.k)*ph_shared[place];
U_new[C] = U_shared[place] + cP.dt * ( rhs_U / tau_U );
// if (C==137){
// printf("detailed check of neighbours 3\n");
// printf("U: %f \n", U_new[C]);
// }
}
}
// U equation
__global__ void
rhs_U_shared_mem(float* U, float* U_new, float* ph, float* dpsi, int fnx, int fny, int num_block_x, int num_block_y){
// __shared__ float ps_shared[(BLOCK_DIM_Y)*(BLOCK_DIM_X)];
__shared__ float ph_shared[(BLOCK_DIM_Y)*(BLOCK_DIM_X)];
__shared__ float U_shared[(BLOCK_DIM_Y)*(BLOCK_DIM_X)];
__shared__ float dpsi_shared[(BLOCK_DIM_Y)*(BLOCK_DIM_X)];
int halo_left = 1;
int halo_right = 1;
int halo_top = 1;
int halo_bottom = 1;
int real_block_x = BLOCK_DIM_X - halo_left - halo_right;
int real_block_y = BLOCK_DIM_Y - halo_top - halo_bottom;
// each thread --> one data in one enlarged block
int local_id = threadIdx.x; //local id -> sava to shared mem
int local_id_x = local_id % BLOCK_DIM_X;
int local_id_y = local_id / BLOCK_DIM_X;
// obtain the block id in shrinked region
int block_id = blockIdx.x; // 0~num_block_x*num_block_y
int block_id_x = block_id % num_block_x;
int block_id_y = block_id / num_block_x;
// this is the addr in inner region without considering the BC
int block_addr_x = block_id_x * real_block_x;
int block_addr_y = block_id_y * real_block_y;
// find the addr of data in global memory
// add 1 as we counter the block in inner region; - halo_left as we have halo region in this block
int data_addr_x = block_addr_x + 1 + local_id_x - halo_left;
int data_addr_y = block_addr_y + 1 - halo_bottom + local_id_y;
int C= data_addr_y * fnx + data_addr_x; // place in global memory
// int j=C/fnx;
// int i=C-j*fnx;
int j = data_addr_y;
int i = data_addr_x;
// update data
// ps_shared[local_id] = ps[C];
ph_shared[local_id] = ph[C];
U_shared[local_id] = U[C];
dpsi_shared[local_id] = dpsi[C];
__syncthreads();
if ((i > fnx - 1) ||(i > fny - 1)) {return;}
// if (C==1001){
// printf("check pre-loaded data\n");
// printf("ph: %f ; u:%f ; dpsi: %f\n", ph[C], U[C], dpsi[C]);
// }
int place = local_id_y * BLOCK_DIM_X + local_id_x;
// if the points are at boundary, return
if ( (i>0) && (i<fnx-1) && (j>0) && (j<fny-1) ) {
if ((local_id_x>0)&& (local_id_x<BLOCK_DIM_X-1) && (local_id_y>0) && (local_id_y<BLOCK_DIM_Y-1)) {
// find the indices of the 8 neighbors for center
int R=place+1;
int L=place-1;
int T=place+BLOCK_DIM_X;
int B=place-BLOCK_DIM_X;
float hi = cP.hi;
float Dl_tilde = cP.Dl_tilde;
float k = cP.k;
float nx, nz;
float eps = cP.eps;
// =============================================================
// 1. ANISOTROPIC DIFFUSION
// =============================================================
// these ph's are defined on cell centers
float phipjp=( ph_shared[place] + ph_shared[R] + ph_shared[T] + ph_shared[T+1] ) * 0.25f;
float phipjm=( ph_shared[place] + ph_shared[R] + ph_shared[B] + ph_shared[B+1] ) * 0.25f;
float phimjp=( ph_shared[place] + ph_shared[L] + ph_shared[T-1] + ph_shared[T] ) * 0.25f;
float phimjm=( ph_shared[place] + ph_shared[L] + ph_shared[B-1] + ph_shared[B] ) * 0.25f;
// if (C==1001){
// printf("detailed check of neighbours 3\n");
// printf("R: %f ; L:%f ; T: %f ; B: %f \n", phipjp, phipjm, phimjp, phimjm);
// }
float jat = 0.5f*(1.0f+(1.0f-k)*U_shared[place])*(1.0f-ph_shared[place]*ph_shared[place])*dpsi_shared[place];
/*# ============================
# right edge flux (i+1/2, j)
# ============================*/
float phx = ph_shared[R]-ph_shared[place];
float phz = phipjp - phipjm;
float phn2 = phx*phx + phz*phz;
if (phn2 > eps) {nx = phx / sqrtf(phn2);}
else {nx = 0.0f;}
float jat_ip = 0.5f*(1.0f+(1.0f-k)*U_shared[R])*(1.0f-ph_shared[R]*ph_shared[R])*dpsi_shared[R];
float UR = hi*Dl_tilde*0.5f*(2.0f - ph_shared[place] - ph_shared[R])*(U_shared[R]-U_shared[place]) + 0.5f*(jat + jat_ip)*nx;
/* ============================
# left edge flux (i-1/2, j)
# ============================*/
phx = ph_shared[place]-ph_shared[L];
phz = phimjp - phimjm;
phn2 = phx*phx + phz*phz;
if (phn2 > eps) {nx = phx / sqrtf(phn2);}
else {nx = 0.0f;}
float jat_im = 0.5f*(1.0f+(1.0f-k)*U_shared[L])*(1.0f-ph_shared[L]*ph_shared[L])*dpsi_shared[L];
float UL = hi*Dl_tilde*0.5f*(2.0f - ph_shared[place] - ph_shared[L])*(U_shared[place]-U_shared[L]) + 0.5f*(jat + jat_im)*nx;
/*# ============================
# top edge flux (i, j+1/2)
# ============================*/
phx = phipjp - phimjp;
phz = ph_shared[T]-ph_shared[place];
phn2 = phx*phx + phz*phz;
if (phn2 > eps) {nz = phz / sqrtf(phn2);}
else {nz = 0.0f;}
float jat_jp = 0.5f*(1.0f+(1.0f-k)*U_shared[T])*(1.0f-ph_shared[T]*ph_shared[T])*dpsi_shared[T];
float UT = hi*Dl_tilde*0.5f*(2.0f - ph_shared[place] - ph_shared[T])*(U_shared[T]-U_shared[place]) + 0.5f*(jat + jat_jp)*nz;
/*# ============================
# bottom edge flux (i, j-1/2)
# ============================*/
phx = phipjm - phimjm;
phz = ph_shared[place]-ph_shared[B];
phn2 = phx*phx + phz*phz;
if (phn2 > eps) {nz = phz / sqrtf(phn2);}
else {nz = 0.0f;}
float jat_jm = 0.5f*(1.0f+(1.0f-k)*U_shared[B])*(1.0f-ph_shared[B]*ph_shared[B])*dpsi_shared[B];
float UB = hi*Dl_tilde*0.5f*(2.0f - ph_shared[place] - ph_shared[B])*(U_shared[place]-U_shared[B]) + 0.5f*(jat + jat_jm)*nz;
float rhs_U = ( (UR-UL) + (UT-UB) ) * hi + cP.sqrt2 * jat;
float tau_U = (1.0f+cP.k) - (1.0f-cP.k)*ph_shared[place];
U_new[C] = U_shared[place] + cP.dt * ( rhs_U / tau_U );
}
}
}
// U equation
__global__ void
rhs_U_ori(float* U, float* U_new, float* ph, float* dpsi, int fnx, int fny ){
int C = blockIdx.x * blockDim.x + threadIdx.x;
int j=C/fnx;
int i=C-j*fnx;
// if the points are at boundary, return
if ( (i>0) && (i<fnx-1) && (j>0) && (j<fny-1) ) {
// find the indices of the 8 neighbors for center
int R=C+1;
int L=C-1;
int T=C+fnx;
int B=C-fnx;
float hi = cP.hi;
float Dl_tilde = cP.Dl_tilde;
float k = cP.k;
float nx,nz;
float eps = cP.eps;
// =============================================================
// 1. ANISOTROPIC DIFFUSION
// =============================================================
// these ps's are defined on cell centers
float phipjp=( ph[C] + ph[R] + ph[T] + ph[T+1] ) * 0.25f;
float phipjm=( ph[C] + ph[R] + ph[B] + ph[B+1] ) * 0.25f;
float phimjp=( ph[C] + ph[L] + ph[T-1] + ph[T] ) * 0.25f;
float phimjm=( ph[C] + ph[L] + ph[B-1] + ph[B] ) * 0.25f;
float jat = 0.5f*(1.0f+(1.0f-k)*U[C])*(1.0f-ph[C]*ph[C])*dpsi[C];
/*# ============================
# right edge flux (i+1/2, j)
# ============================*/
float phx = ph[R]-ph[C];
float phz = phipjp - phipjm;
float phn2 = phx*phx + phz*phz;
if (phn2 > eps) {nx = phx / sqrtf(phn2);}
else {nx = 0.0f;}
float jat_ip = 0.5f*(1.0f+(1.0f-k)*U[R])*(1.0f-ph[R]*ph[R])*dpsi[R];
float UR = hi*Dl_tilde*0.5f*(2.0f - ph[C] - ph[R])*(U[R]-U[C]) + 0.5f*(jat + jat_ip)*nx;
/* ============================
# left edge flux (i-1/2, j)
# ============================*/
phx = ph[C]-ph[L];
phz = phimjp - phimjm;
phn2 = phx*phx + phz*phz;
if (phn2 > eps) {nx = phx / sqrtf(phn2);}
else {nx = 0.0f;}
float jat_im = 0.5f*(1.0f+(1.0f-k)*U[L])*(1.0f-ph[L]*ph[L])*dpsi[L];
float UL = hi*Dl_tilde*0.5f*(2.0f - ph[C] - ph[L])*(U[C]-U[L]) + 0.5f*(jat + jat_im)*nx;
/*# ============================
# top edge flux (i, j+1/2)
# ============================*/
phx = phipjp - phimjp;
phz = ph[T]-ph[C];
phn2 = phx*phx + phz*phz;
if (phn2 > eps) {nz = phz / sqrtf(phn2);}
else {nz = 0.0f;}
float jat_jp = 0.5f*(1.0f+(1.0f-k)*U[T])*(1.0f-ph[T]*ph[T])*dpsi[T];
float UT = hi*Dl_tilde*0.5f*(2.0f - ph[C] - ph[T])*(U[T]-U[C]) + 0.5f*(jat + jat_jp)*nz;
/*# ============================
# bottom edge flux (i, j-1/2)
# ============================*/
phx = phipjm - phimjm;
phz = ph[C]-ph[B];
phn2 = phx*phx + phz*phz;
if (phn2 > eps) {nz = phz / sqrtf(phn2);}
else {nz = 0.0f;}
float jat_jm = 0.5f*(1.0f+(1.0f-k)*U[B])*(1.0f-ph[B]*ph[B])*dpsi[B];
float UB = hi*Dl_tilde*0.5f*(2.0f - ph[C] - ph[B])*(U[C]-U[B]) + 0.5f*(jat + jat_jm)*nz;
float rhs_U = ( (UR-UL) + (UT-UB) ) * hi + cP.sqrt2 * jat;
float tau_U = (1.0f+cP.k) - (1.0f-cP.k)*ph[C];
U_new[C] = U[C] + cP.dt * ( rhs_U / tau_U );
}
}
// Host codes for PF computing
void setup(GlobalConstants params, int fnx, int fny, float* x, float* y, float* phi, float* psi,float* U){
// we should have already pass all the data structure in by this time
// move those data onto device
printCudaInfo();
float* x_device;// = NULL;
float* y_device;// = NULL;
// store two for swap behavior
float* psi_old;// = NULL;
float* psi_new;// = NULL;
float* U_old;// = NULL;
float* U_new;// = NULL;
float* phi_old;// = NULL;
float* phi_new;// = NULL;
float* dpsi;// = NULL;
// allocate x, y, phi, psi, U related params
int length = fnx*fny;
hipMalloc((void **)&x_device, sizeof(float) * fnx);
hipMalloc((void **)&y_device, sizeof(float) * fny);
hipMalloc((void **)&phi_old, sizeof(float) * length);
hipMalloc((void **)&psi_old, sizeof(float) * length);
hipMalloc((void **)&U_old, sizeof(float) * length);
hipMalloc((void **)&phi_new, sizeof(float) * length);
hipMalloc((void **)&psi_new, sizeof(float) * length);
hipMalloc((void **)&U_new, sizeof(float) * length);
hipMalloc((void **)&dpsi, sizeof(float) * length);
float * psi_check = new float[length];
// set initial params
hipMemcpy(x_device, x, sizeof(float) * fnx, hipMemcpyHostToDevice);
hipMemcpy(y_device, y, sizeof(float) * fny, hipMemcpyHostToDevice);
hipMemcpy(psi_old, psi, sizeof(float) * length, hipMemcpyHostToDevice);
hipMemcpy(phi_old, phi, sizeof(float) * length, hipMemcpyHostToDevice);
hipMemcpy(U_old, U, sizeof(float) * length, hipMemcpyHostToDevice);
// pass all the read-only params into global constant
hipMemcpyToSymbol(cP, ¶ms, sizeof(GlobalConstants));
int blocksize_1d = 128;
int blocksize_2d = 128; // seems reduce the block size makes it a little faster, but around 128 is okay.
int num_block_2d = (fnx*fny+blocksize_2d-1)/blocksize_2d;
int num_block_1d = (fnx+fny+blocksize_1d-1)/blocksize_1d;
printf("nx: %d and ny: %d\n", fnx, fny);
printf("block size %d, # blocks %d\n", blocksize_2d, num_block_2d);
hipLaunchKernelGGL(( initialize), dim3(num_block_2d), dim3(blocksize_2d) , 0, 0, psi_old, phi_old, U_old, psi_new, phi_new, U_new, x_device, y_device, fnx, fny);
hipLaunchKernelGGL(( set_BC), dim3(num_block_1d), dim3(blocksize_1d) , 0, 0, psi_new, phi_new, U_new, dpsi, fnx, fny);
hipLaunchKernelGGL(( set_BC), dim3(num_block_1d), dim3(blocksize_1d) , 0, 0, psi_old, phi_old, U_old, dpsi, fnx, fny);
hipDeviceSynchronize();
double startTime = CycleTimer::currentSeconds();
// change the 2d block due to we donn't want to include halo region
int real_per_block_x = BLOCK_DIM_X - HALO_LEFT - HALO_RIGHT;
int real_per_block_y = BLOCK_DIM_Y - HALO_TOP - HALO_BOTTOM;
int num_block_x = (fnx - 2 + real_per_block_x - 1) / real_per_block_x;
int num_block_y = (fny - 2 + real_per_block_y - 1) / real_per_block_y;
printf("block_x: %d and block_y: %d\n", real_per_block_x, real_per_block_y);
printf("block_x: %d and block_y: %d\n", num_block_x, num_block_y);
int num_block_2d_s = num_block_x * num_block_y; //each one take one block with (32-2)+ (32-2) ture block within (fnx-2), (fny-2)
int blocksize_2d_s = BLOCK_DIM_X * BLOCK_DIM_Y; // 32*32: as we have to write 32*32 data region into shared memory
int real_per_block_x_merge = BLOCK_DIM_X - HALO_LEFT*2 - HALO_RIGHT*2;
int real_per_block_y_merge = BLOCK_DIM_Y - HALO_TOP*2 - HALO_BOTTOM*2;
int num_block_x_merge = (fnx - 2 + real_per_block_x_merge - 1) / real_per_block_x_merge;
int num_block_y_merge = (fny - 2 + real_per_block_y_merge - 1) / real_per_block_y_merge;
printf("real_per_block_x_merge: %d and real_per_block_y_merge: %d\n", real_per_block_x_merge, real_per_block_y_merge);
printf("num_block_x_merge: %d and num_block_y_merge: %d\n", num_block_x, num_block_y_merge);
int num_block_2d_s_merge = num_block_x_merge * num_block_y_merge; //each one take one block with (32-2)+ (32-2) ture block within (fnx-2), (fny-2)
int blocksize_2d_s_merge = BLOCK_DIM_X * BLOCK_DIM_Y; // 32*32: as we have to write 32*32 data region into shared memory
// use for no halo region
// int real_per_block_x2 = BLOCK_DIM_X;
// int real_per_block_y2 = BLOCK_DIM_Y;
// int num_block_x2 = (fnx - 2 + real_per_block_x2 - 1) / real_per_block_x2;
// int num_block_y2 = (fny - 2 + real_per_block_y2 - 1) / real_per_block_y2;
// printf("block_x: %d and block_y: %d\n", real_per_block_x2, real_per_block_y2);
// printf("block_x: %d and block_y: %d\n", num_block_x2, num_block_y2);
// int num_block_2d_s2 = num_block_x2 * num_block_y2; //each one take one block with (32-2)+ (32-2) ture block within (fnx-2), (fny-2)
// int blocksize_2d_s2 = BLOCK_DIM_X * BLOCK_DIM_Y; // 32*32: as we have to write 32*32 data region into shared memory
for (int kt=0; kt<params.Mt/2; kt++){
// printf("time step %d\n",kt);
// rhs_psi_shared_mem<<< num_block_2d_s, blocksize_2d_s >>>(psi_old, phi_old, U_old, psi_new, phi_new, y_device, dpsi, fnx, fny, 2*kt, num_block_x, num_block_y);
// rhs_psi_shared_mem_BC<<< num_block_2d_s, blocksize_2d_s >>>(psi_old, phi_old, U_old, psi_new, phi_new, y_device, dpsi, fnx, fny, 2*kt, num_block_x, num_block_y);
// hipDeviceSynchronize();
// hipLaunchKernelGGL(( set_BC), dim3(num_block_1d), dim3(blocksize_1d) , 0, 0, psi_new, phi_new, U_old, dpsi, fnx, fny);
// hipDeviceSynchronize();
// hipMemcpy(psi_check, psi_new, sizeof(float) * length, hipMemcpyDeviceToHost);
// printf("check data at 0+20: %f\n", psi_check[0+20]);
// printf("check data at 130+fnx*2: %f\n", psi_check[130+fnx*2]);
// printf("check data at 67334-fnx*2: %f\n", psi_check[67334-fnx*2]);
// printf("check data at 67464-20: %f\n", psi_check[67464-20]);
// printf("\n");
// printf("Iter %d\n", kt);
// printf("check data at 0: %f\n", psi_check[0]);
// printf("check data at 0+2: %f\n", psi_check[0+2]);
// printf("check data at 0+2fnx*2: %f\n", psi_check[0+2*fnx]);
// printf("check data at 130: %f\n", psi_check[130]);
// printf("check data at 130-2: %f\n", psi_check[130-2]);
// printf("check data at 130+2fnx*2: %f\n", psi_check[130+2*fnx]);
// printf("check data at 67334: %f\n", psi_check[67334]);
// printf("check data at 67334+2: %f\n", psi_check[67334+2]);
// printf("check data at 67334-2fnx*2: %f\n", psi_check[67334-2*fnx]);
// printf("check data at 67464: %f\n", psi_check[67464]);
// printf("check data at 67464-2: %f\n", psi_check[67464-2]);
// printf("check data at 67464-2fnx*2: %f\n", psi_check[67464-2*fnx]);
// printf("\n");
// rhs_U_shared_mem<<< num_block_2d_s, blocksize_2d_s >>>(U_old, U_new, phi_new, dpsi, fnx, fny, num_block_x, num_block_y);
// rhs_U_shared_mem_ex<<< num_block_2d_s2, blocksize_2d_s2 >>>(U_old, U_new, phi_new, dpsi, fnx, fny, num_block_x2, num_block_y2);
// hipDeviceSynchronize();
hipLaunchKernelGGL(( rhs_psi_U_shared_mem_merge), dim3(num_block_2d_s_merge), dim3(blocksize_2d_s_merge) , 0, 0, psi_old, phi_old, U_old, psi_new, phi_new, U_new, y_device, dpsi, fnx, fny, 2*kt, num_block_x_merge, num_block_y_merge);
hipLaunchKernelGGL(( rhs_psi_U_shared_mem_merge), dim3(num_block_2d_s_merge), dim3(blocksize_2d_s_merge) , 0, 0, psi_new, phi_new, U_new, psi_old, phi_old, U_old, y_device, dpsi, fnx, fny, 2*kt, num_block_x_merge, num_block_y_merge);
// rhs_psi_shared_mem<<< num_block_2d_s, blocksize_2d_s >>>(psi_new, phi_new, U_new, psi_old, phi_old, y_device, dpsi, fnx, fny, 2*kt+1, num_block_x, num_block_y);
// // rhs_psi_shared_mem_BC<<< num_block_2d_s, blocksize_2d_s >>>(psi_new, phi_new, U_new, psi_old, phi_old, y_device, dpsi, fnx, fny, 2*kt+1, num_block_x, num_block_y);
// // hipDeviceSynchronize();
// hipLaunchKernelGGL(( set_BC), dim3(num_block_1d), dim3(blocksize_1d) , 0, 0, psi_old, phi_old, U_new, dpsi, fnx, fny);
// // hipDeviceSynchronize();
// rhs_U_shared_mem<<< num_block_2d_s, blocksize_2d_s >>>(U_new, U_old, phi_old, dpsi, fnx, fny, num_block_x, num_block_y);
// rhs_U_shared_mem_ex<<< num_block_2d_s2, blocksize_2d_s2 >>>(U_new, U_old, phi_old, dpsi, fnx, fny, num_block_x2, num_block_y2);
// hipDeviceSynchronize();
}
hipDeviceSynchronize();
double endTime = CycleTimer::currentSeconds();
printf("time for %d iterations: %f s\n", params.Mt, endTime-startTime);
hipMemcpy(psi, psi_old, length * sizeof(float),hipMemcpyDeviceToHost);
hipMemcpy(phi, phi_old, length * sizeof(float),hipMemcpyDeviceToHost);
hipMemcpy(U, U_old, length * sizeof(float),hipMemcpyDeviceToHost);
hipFree(x_device); hipFree(y_device);
hipFree(psi_old); hipFree(psi_new);
hipFree(phi_old); hipFree(phi_new);
hipFree(U_old); hipFree(U_new);
hipFree(dpsi);
}
/*
void time_marching(GlobalConstants params, int fnx, int fny){
// initialize or load
int blocksize_1d = 256;
int blocksize_2d = 512;
int num_block_2d = (fnx*fny+blocksize_2d-1)/blocksize_2d;
int num_block_1d = (fnx+fny+blocksize_1d-1)/blocksize_1d;
hipLaunchKernelGGL(( initialize), dim3(num_block_2d), dim3(blocksize_2d) , 0, 0, ps_old, ph_old, U_old, ps_new, ph_new, U_new, x_device, y_device, fnx, fny);
for (int kt=0; kt<params.Mt/2; kt++){
rhs_psi<<< num_block_2d, blocksize_2d >>>(psi_old, phi_old, U_old, psi_new, phi_new, y_device, dpsi, fnx, fny, 2*kt );
hipLaunchKernelGGL(( set_BC), dim3(num_block_1d), dim3(blocksize_1d) , 0, 0, psi_new, phi_new, U_old, dpsi, fnx, fny);
rhs_U<<< num_block_2d, blocksize_2d >>>(U_old, U_new, phi_new, dpsi);
rhs_psi<<< num_block_2d, blocksize_2d >>>(psi_new, phi_new, U_new, psi_old, phi_old, y_device, dpsi, fnx, fny, 2*kt+1 );
hipLaunchKernelGGL(( set_BC), dim3(num_block_1d), dim3(blocksize_1d) , 0, 0, psi_old, phi_old, U_new, dpsi, fnx, fny);
rhs_U<<< num_block_2d, blocksize_2d >>>(U_new, U_old, phi_old, dpsi);
}
}*/
void printCudaInfo()
{
// for fun, just print out some stats on the machine
int deviceCount = 0;
hipError_t err = hipGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++)
{
hipDeviceProp_t deviceProps;
hipGetDeviceProperties(&deviceProps, i);
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n",
static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
printf("---------------------------------------------------------\n");
}
| 0f159e6f74210ff55f65c6aebc257a56f64c8e52.cu | #include <stdio.h>
#include <cmath>
#include <cuda.h>
#include <cuda_runtime.h>
#include <driver_functions.h>
#include <curand.h>
#include <curand_kernel.h>
#include "CycleTimer.h"
using namespace std;
// this is dependent on the time tiling and grid size of one thread block
// we first finish a non-time tiling version
#define BLOCK_DIM_X 16
#define BLOCK_DIM_Y 16
#define HALO_LEFT 1
#define HALO_RIGHT 1
#define HALO_TOP 1
#define HALO_BOTTOM 1
void printCudaInfo();
extern float toBW(int bytes, float sec);
struct GlobalConstants {
int nx;
int ny;
int Mt;
int nts;
int ictype;
float G;
float R;
float delta;
float k;
float c_infm;
float Dl;
float d0;
float W0;
float lT;
float lamd;
float tau0;
float c_infty;
float R_tilde;
float Dl_tilde;
float lT_tilde;
float eps;
float alpha0;
float dx;
float dt;
float asp_ratio;
float lxd;
float lx;
float lyd;
float eta;
float U0;
// parameters that are not in the input file
float hi;
float cosa;
float sina;
float sqrt2;
float a_s;
float epsilon;
float a_12;
};
__constant__ GlobalConstants cP;
// Device codes
// boundary condition
// only use this function to access the boundary points,
// other functions return at the boundary
// TODO: this function is doing what, we can definetly merge this into kenrel right?
__global__ void
set_BC(float* ps, float* ph, float* U, float* dpsi, int fnx, int fny){
// find the location of boundary:
int index = blockIdx.x * blockDim.x + threadIdx.x;
// z=0, lx
if (index<fnx) {
int b_in = index+2*fnx;
int t_out = index+(fny-1)*fnx;
int t_in = index+(fny-3)*fnx;
ps[index] = ps[b_in];
ph[index] = ph[b_in];
U[index] = U[b_in];
dpsi[index] = dpsi[b_in];
ps[t_out] = ps[t_in];
ph[t_out] = ph[t_in];
U[t_out] = U[t_in];
dpsi[t_out] = dpsi[t_in];
}
if (index<fny){
int l_out = index*fnx;
int l_in = index*fnx + 2;
int r_out = index*fnx + fnx -1;
int r_in = index*fnx + fnx -3;
ps[l_out] = ps[l_in];
ph[l_out] = ph[l_in];
U[l_out] = U[l_in];
dpsi[l_out] = dpsi[l_in];
ps[r_out] = ps[r_in];
ph[r_out] = ph[r_in];
U[r_out] = U[r_in];
dpsi[r_out] = dpsi[r_in];
}
}
// initialization
__global__ void
initialize(float* ps_old, float* ph_old, float* U_old, float* ps_new, float* ph_new, float* U_new
, float* x, float* y, int fnx, int fny){
int C = blockIdx.x * blockDim.x + threadIdx.x;
// obtain i and j(2D position)
int j=C/fnx;
int i=C-j*fnx;
// when initialize, you need to consider C/F layout
// if F layout, the 1D array has peroidicity of nx
// all the variables should be functions of x and y
// size (nx+2)*(ny+2), x:nx, y:ny
if ( (i>0) && (i<fnx-1) && (j>0) && (j<fny-1) ) {
float xc = x[i];
float yc = y[j];
int cent = fnx/2;
ps_old[C] = 5.625f - sqrtf( (xc-x[cent])*(xc-x[cent]) + yc*yc )/cP.W0 ;
//if (C<1000){printf("ps %f\n",ps_old[C]);}
ps_new[C] = ps_old[C];
U_old[C] = cP.U0;
U_new[C] = cP.U0;
ph_old[C] = tanhf(ps_old[C]/cP.sqrt2);
ph_new[C] = tanhf(ps_new[C]/cP.sqrt2);
// if (C<1000){printf("phi %f\n",ph_old[C]);}
}
}
// anisotropy functions
__device__ float
atheta(float ux, float uz){
float ux2 = cP.cosa*ux + cP.sina*uz;
ux2 = ux2*ux2;
float uz2 = -cP.sina*ux + cP.cosa*uz;
uz2 = uz2*uz2;
float MAG_sq = (ux2 + uz2);
float MAG_sq2= MAG_sq*MAG_sq;
if (MAG_sq > cP.eps){
return cP.a_s*( 1.0f + cP.epsilon*(ux2*ux2 + uz2*uz2) / MAG_sq2);}
else {return 1.0f;}
}
__device__ float
aptheta(float ux, float uz){
float uxr = cP.cosa*ux + cP.sina*uz;
float ux2 = uxr*uxr;
float uzr = -cP.sina*ux + cP.cosa*uz;
float uz2 = uzr*uzr;
float MAG_sq = (ux2 + uz2);
float MAG_sq2= MAG_sq*MAG_sq;
if (MAG_sq > cP.eps){
return -cP.a_12*uxr*uzr*(ux2 - uz2) / MAG_sq2;}
else {return 0.0f;}
}
// psi & phi equation: two dimensions
__global__ void
rhs_psi_shared_mem(float* ps, float* ph, float* U, float* ps_new, float* ph_new, \
float* y, float* dpsi, int fnx, int fny, int nt, int num_block_x, int num_block_y){
// each CUDA theard block compute one grid(32*32)
// memory access is from global and also not continous which cannot reach the max bandwidth(memory colaseing)
// add a shared memory version to store the neighbours data: ps and ph
// clare shared memory for time tiling
// we have extra (nx+2)(ny+2) size of space to load
int halo_left = 1;
int halo_right = 1;
int halo_top = 1;
int halo_bottom = 1;
int real_block_x = BLOCK_DIM_X - halo_left - halo_right;
int real_block_y = BLOCK_DIM_Y - halo_top - halo_bottom;
// load (32+2)*(32+2) daat from mem
__shared__ float ps_shared[(BLOCK_DIM_Y)*(BLOCK_DIM_X)];
__shared__ float ph_shared[(BLOCK_DIM_Y)*(BLOCK_DIM_X)];
__shared__ float U_shared[(BLOCK_DIM_Y)*(BLOCK_DIM_X)];
// each thread --> one data in one enlarged block
int local_id = threadIdx.x; //local id -> sava to shared mem
int local_id_x = local_id % BLOCK_DIM_X;
int local_id_y = local_id / BLOCK_DIM_X;
// obtain the block id in shrinked region
int block_id = blockIdx.x; // 0~num_block_x*num_block_y
int block_id_x = block_id % num_block_x;
int block_id_y = block_id / num_block_x;
// this is the addr in inner region without considering the BC
int block_addr_x = block_id_x * real_block_x;
int block_addr_y = block_id_y * real_block_y;
// find the addr of data in global memory
// add 1 as we counter the block in inner region; - halo_left as we have halo region in this block
int data_addr_x = block_addr_x + 1 - halo_left + local_id_x;
int data_addr_y = block_addr_y + 1 - halo_bottom + local_id_y;
int C= data_addr_y * fnx + data_addr_x; // place in global memory
// int j=C/fnx;
// int i=C-j*fnx;
int j = data_addr_y;
int i = data_addr_x;
// update data
ps_shared[local_id] = ps[C];
ph_shared[local_id] = ph[C];
U_shared[local_id] = U[C];
__syncthreads();
if ((i > fnx - 1) ||(i > fny - 1)) {return;}
// if (C==1001){
// printf("check data 1: %f\n", ps[C]);
// }
// if (local_id == 0) printf("check data %f", ps_shared[local_id]);
// compute based on the shared memory, skip if we are at the boundary
int place = local_id_y * BLOCK_DIM_X + local_id_x;
// if the points are at boundary, return
// two levels of retunr: global and local region
if ( (i>0) && (i<fnx-1) && (j>0) && (j<fny-1) ) {
if ((local_id_x>0)&& (local_id_x<BLOCK_DIM_X-1) && (local_id_y>0) && (local_id_y<BLOCK_DIM_Y-1)) {
// find the indices of the 8 neighbors for center
// if (C==1000){printf("find");}
int R=place+1;
int L=place-1;
int T=place+BLOCK_DIM_X;
int B=place-BLOCK_DIM_X;
// // preload data
// float ps_shared_c = ps_shared[place];
// float ps_shared_r = ps_shared[R];
// float ps_shared_l = ps_shared[L];
// float ps_shared_top = ps_shared[T];
// float ps_shared_top_l = ps_shared[T-1];
// float ps_shared_top_r = ps_shared[T+1];
// float ps_shared_b_r = ps_shared[B-1];
// float ps_shared_b_l = ps_shared[B+1];
// float ps_shared_b = ps_shared[B];
// float ph_shared_c = ph_shared[place];
// float ph_shared_r = ph_shared[R];
// float ph_shared_l = ph_shared[L];
// float ph_shared_top = ph_shared[T];
// float ph_shared_top_l = ph_shared[T-1];
// float ph_shared_top_r = ph_shared[T+1];
// float ph_shared_b_r = ph_shared[B-1];
// float ph_shared_b_l = ph_shared[B+1];
// float ph_shared_b = ph_shared[B];
// if (C==1001){
// printf("detailed check of neighbours\n");
// printf("R: %d ; L:%d ; T: %d ; B: %d \n", R, L, T, B);
// printf("R: %f ; L:%f ; T: %f ; B: %f \n", ps_shared[R], ps_shared[L], ps_shared[T], ps_shared[B]);
// printf("R: %f ; L:%f ; T: %f ; B: %f \n", ps_shared[R], ps_shared[L], ps_shared[T+1], ps_shared[B]);
// }
// =============================================================
// 1. ANISOTROPIC DIFFUSION
// =============================================================
// these ps's are defined on cell centers
float psipjp=( ps_shared[place] + ps_shared[R] + ps_shared[T] + ps_shared[T+1] ) * 0.25f;
float psipjm=( ps_shared[place] + ps_shared[R] + ps_shared[B] + ps_shared[B+1] ) * 0.25f;
float psimjp=( ps_shared[place] + ps_shared[L] + ps_shared[T-1] + ps_shared[T] ) * 0.25f;
float psimjm=( ps_shared[place] + ps_shared[L] + ps_shared[B-1] + ps_shared[B] ) * 0.25f;
float phipjp=( ph_shared[place] + ph_shared[R] + ph_shared[T] + ph_shared[T+1] ) * 0.25f;
float phipjm=( ph_shared[place] + ph_shared[R] + ph_shared[B] + ph_shared[B+1] ) * 0.25f;
float phimjp=( ph_shared[place] + ph_shared[L] + ph_shared[T-1] + ph_shared[T] ) * 0.25f;
float phimjm=( ph_shared[place] + ph_shared[L] + ph_shared[B-1] + ph_shared[B] ) * 0.25f;
// if (C==1001){
// printf("detailed check of neighbours 2\n");
// printf("R: %f ; L:%f ; T: %f ; B: %f \n", psipjp, psipjm, psimjp, psimjm);
// }
// ============================
// right edge flux
// ============================
float psx = ps_shared[R]-ps_shared[place];
float psz = psipjp - psipjm;
float phx = ph_shared[R]-ph_shared[place];
float phz = phipjp - phipjm;
float A = atheta( phx,phz);
float Ap = aptheta(phx,phz);
float JR = A * ( A*psx - Ap*psz );
// ============================
// left edge flux
// ============================
psx = ps_shared[place]-ps_shared[L];
psz = psimjp - psimjm;
phx = ph_shared[place]-ph_shared[L];
phz = phimjp - phimjm;
A = atheta( phx,phz);
Ap = aptheta(phx,phz);
float JL = A * ( A*psx - Ap*psz );
// ============================
// top edge flux
// ============================
psx = psipjp - psimjp;
psz = ps_shared[T]-ps_shared[place];
phx = phipjp - phimjp;
phz = ph_shared[T]-ph_shared[place];
A = atheta( phx,phz);
Ap = aptheta(phx,phz);
float JT = A * ( A*psz + Ap*psx );
// ============================
// bottom edge flux
// ============================
psx = psipjm - psimjm;
psz = ps_shared[place]-ps_shared[B];
phx = phipjm - phimjm;
phz = ph_shared[place]-ph_shared[B];
A = atheta( phx,phz);
Ap = aptheta(phx,phz);
float JB = A * ( A*psz + Ap*psx );
// if (C==1001){
// printf("detailed check of neighbours 3\n");
// printf("R: %f ; L:%f ; T: %f ; B: %f \n", JR, JL, JT, JB);
// }
/*# =============================================================
#
# 2. EXTRA TERM: sqrt2 * atheta**2 * phi * |grad psi|^2
#
# =============================================================
# d(phi)/dx d(psi)/dx d(phi)/dz d(psi)/dz at nodes (i,j)*/
float phxn = ( ph_shared[R] - ph_shared[L] ) * 0.5f;
float phzn = ( ph_shared[T] - ph_shared[B] ) * 0.5f;
float psxn = ( ps_shared[R] - ps_shared[L] ) * 0.5f;
float pszn = ( ps_shared[T] - ps_shared[B] ) * 0.5f;
float A2 = atheta(phxn,phzn);
A2 = A2*A2;
float gradps2 = (psxn)*(psxn) + (pszn)*(pszn);
float extra = -cP.sqrt2 * A2 * ph_shared[place] * gradps2;
/*# =============================================================
#
# 3. double well (transformed): sqrt2 * phi + nonlinear terms
#
# =============================================================*/
float Up = (y[j]/cP.W0 - cP.R_tilde * (nt*cP.dt) )/cP.lT_tilde;
float rhs_psi = ((JR-JL) + (JT-JB) + extra) * cP.hi*cP.hi + \
cP.sqrt2*ph_shared[place] - cP.lamd*(1.0f-ph_shared[place]*ph_shared[place])*cP.sqrt2*(U_shared[place] + Up);
/*# =============================================================
#
# 4. dpsi/dt term
#
# =============================================================*/
float tp = (1.0f-(1.0f-cP.k)*Up);
float tau_psi;
if (tp >= cP.k){tau_psi = tp*A2;}
else {tau_psi = cP.k*A2;}
dpsi[C] = rhs_psi / tau_psi;
ps_new[C] = ps_shared[place] + cP.dt * dpsi[C];
ph_new[C] = tanhf(ps_new[C]/cP.sqrt2);
// if (C==1000){printf("%f ",ph_new[C]);}
// if (C == 1000) printf("check data %f\n", ps_shared[local_id]);
// if (C == 137) {
// printf("check data ps: %f and ph: %f and dpsi: %f and U: %f\n", ps_new[C], ph_new[C], dpsi[C], U[C]);
// // printf("block id %d ; local_id_x %d; local_id_y %d\n", block_id, local_id_x, local_id_y);
// // printf("block id %d ; data_addr_x %d; data_addr_y %d\n", block_id, data_addr_x, data_addr_y);
// }
}
}
// __syncthreads();
}
// psi & phi equation: two dimensions
// merge set BC func into this func
__global__ void
rhs_psi_shared_mem_BC(float* ps, float* ph, float* U, float* ps_new, float* ph_new, \
float* y, float* dpsi, int fnx, int fny, int nt, int num_block_x, int num_block_y){
// each CUDA theard block compute one grid(32*32)
// memory access is from global and also not continous which cannot reach the max bandwidth(memory colaseing)
// add a shared memory version to store the neighbours data: ps and ph
// clare shared memory for time tiling
// we have extra (nx+2)(ny+2) size of space to load
int halo_left = 1;
int halo_right = 1;
int halo_top = 1;
int halo_bottom = 1;
int real_block_x = BLOCK_DIM_X - halo_left - halo_right;
int real_block_y = BLOCK_DIM_Y - halo_top - halo_bottom;
// load (32+2)*(32+2) daat from mem
__shared__ float ps_shared[(BLOCK_DIM_Y)*(BLOCK_DIM_X)];
__shared__ float ph_shared[(BLOCK_DIM_Y)*(BLOCK_DIM_X)];
__shared__ float U_shared[(BLOCK_DIM_Y)*(BLOCK_DIM_X)];
__shared__ float dpsi_shared[(BLOCK_DIM_Y)*(BLOCK_DIM_X)];
// need locate new shared_mem for updating U if we also merge U into this
__shared__ float ps_shared_new[(BLOCK_DIM_Y)*(BLOCK_DIM_X)];
__shared__ float ph_shared_new[(BLOCK_DIM_Y)*(BLOCK_DIM_X)];
__shared__ float U_shared_new[(BLOCK_DIM_Y)*(BLOCK_DIM_X)];
__shared__ float dpsi_shared_new[(BLOCK_DIM_Y)*(BLOCK_DIM_X)];
// each thread --> one data in one enlarged block
int local_id = threadIdx.x; //local id -> sava to shared mem
int local_id_x = local_id % BLOCK_DIM_X;
int local_id_y = local_id / BLOCK_DIM_X;
// obtain the block id in shrinked region
int block_id = blockIdx.x; // 0~num_block_x*num_block_y
int block_id_x = block_id % num_block_x;
int block_id_y = block_id / num_block_x;
// this is the addr in inner region without considering the BC
int block_addr_x = block_id_x * real_block_x;
int block_addr_y = block_id_y * real_block_y;
// find the addr of data in global memory
// add 1 as we counter the block in inner region; - halo_left as we have halo region in this block
int data_addr_x = block_addr_x + 1 - halo_left + local_id_x;
int data_addr_y = block_addr_y + 1 - halo_bottom + local_id_y;
int C= data_addr_y * fnx + data_addr_x; // place in global memory
int j = data_addr_y;
int i = data_addr_x;
// load data into shared mem for old values
ps_shared[local_id] = ps[C];
ph_shared[local_id] = ph[C];
U_shared[local_id] = U[C];
dpsi_shared[local_id] = dpsi[C];
ps_shared_new[local_id] = ps[C];
ph_shared_new[local_id] = ph[C];
U_shared_new[local_id] = U[C];
dpsi_shared_new[local_id] = dpsi[C];
__syncthreads();
// return if the id exceeds the true region
if ((i > fnx - 1) ||(i > fny - 1)) {return;}
// if (C==1001){
// printf("check data 1: %f\n", ps[C]);
// }
// if (local_id == 0) printf("check data %f", ps_shared[local_id]);
// compute based on the shared memory, skip if we are at the boundary
int place = local_id_y * BLOCK_DIM_X + local_id_x;
// if the points are at boundary, return
// two levels of retunr: global and local region
if ( (i>0) && (i<fnx-1) && (j>0) && (j<fny-1) ) {
if ((local_id_x>0)&& (local_id_x<BLOCK_DIM_X-1) && (local_id_y>0) && (local_id_y<BLOCK_DIM_Y-1)) {
// find the indices of the 8 neighbors for center
// if (C==1000){printf("find");}
int R=place+1;
int L=place-1;
int T=place+BLOCK_DIM_X;
int B=place-BLOCK_DIM_X;
// if (C==1001){
// printf("detailed check of neighbours\n");
// printf("R: %d ; L:%d ; T: %d ; B: %d \n", R, L, T, B);
// printf("R: %f ; L:%f ; T: %f ; B: %f \n", ps_shared[R], ps_shared[L], ps_shared[T], ps_shared[B]);
// printf("R: %f ; L:%f ; T: %f ; B: %f \n", ps_shared[R], ps_shared[L], ps_shared[T+1], ps_shared[B]);
// }
// =============================================================
// 1. ANISOTROPIC DIFFUSION
// =============================================================
// these ps's are defined on cell centers
float psipjp=( ps_shared[place] + ps_shared[R] + ps_shared[T] + ps_shared[T+1] ) * 0.25f;
float psipjm=( ps_shared[place] + ps_shared[R] + ps_shared[B] + ps_shared[B+1] ) * 0.25f;
float psimjp=( ps_shared[place] + ps_shared[L] + ps_shared[T-1] + ps_shared[T] ) * 0.25f;
float psimjm=( ps_shared[place] + ps_shared[L] + ps_shared[B-1] + ps_shared[B] ) * 0.25f;
float phipjp=( ph_shared[place] + ph_shared[R] + ph_shared[T] + ph_shared[T+1] ) * 0.25f;
float phipjm=( ph_shared[place] + ph_shared[R] + ph_shared[B] + ph_shared[B+1] ) * 0.25f;
float phimjp=( ph_shared[place] + ph_shared[L] + ph_shared[T-1] + ph_shared[T] ) * 0.25f;
float phimjm=( ph_shared[place] + ph_shared[L] + ph_shared[B-1] + ph_shared[B] ) * 0.25f;
// if (C==1001){
// printf("detailed check of neighbours 2\n");
// printf("R: %f ; L:%f ; T: %f ; B: %f \n", psipjp, psipjm, psimjp, psimjm);
// }
// ============================
// right edge flux
// ============================
float psx = ps_shared[R]-ps_shared[place];
float psz = psipjp - psipjm;
float phx = ph_shared[R]-ph_shared[place];
float phz = phipjp - phipjm;
float A = atheta( phx,phz);
float Ap = aptheta(phx,phz);
float JR = A * ( A*psx - Ap*psz );
// ============================
// left edge flux
// ============================
psx = ps_shared[place]-ps_shared[L];
psz = psimjp - psimjm;
phx = ph_shared[place]-ph_shared[L];
phz = phimjp - phimjm;
A = atheta( phx,phz);
Ap = aptheta(phx,phz);
float JL = A * ( A*psx - Ap*psz );
// ============================
// top edge flux
// ============================
psx = psipjp - psimjp;
psz = ps_shared[T]-ps_shared[place];
phx = phipjp - phimjp;
phz = ph_shared[T]-ph_shared[place];
A = atheta( phx,phz);
Ap = aptheta(phx,phz);
float JT = A * ( A*psz + Ap*psx );
// ============================
// bottom edge flux
// ============================
psx = psipjm - psimjm;
psz = ps_shared[place]-ps_shared[B];
phx = phipjm - phimjm;
phz = ph_shared[place]-ph_shared[B];
A = atheta( phx,phz);
Ap = aptheta(phx,phz);
float JB = A * ( A*psz + Ap*psx );
// if (C==1001){
// printf("detailed check of neighbours 3\n");
// printf("R: %f ; L:%f ; T: %f ; B: %f \n", JR, JL, JT, JB);
// }
/*# =============================================================
#
# 2. EXTRA TERM: sqrt2 * atheta**2 * phi * |grad psi|^2
#
# =============================================================
# d(phi)/dx d(psi)/dx d(phi)/dz d(psi)/dz at nodes (i,j)*/
float phxn = ( ph_shared[R] - ph_shared[L] ) * 0.5f;
float phzn = ( ph_shared[T] - ph_shared[B] ) * 0.5f;
float psxn = ( ps_shared[R] - ps_shared[L] ) * 0.5f;
float pszn = ( ps_shared[T] - ps_shared[B] ) * 0.5f;
float A2 = atheta(phxn,phzn);
A2 = A2*A2;
float gradps2 = (psxn)*(psxn) + (pszn)*(pszn);
float extra = -cP.sqrt2 * A2 * ph_shared[place] * gradps2;
/*# =============================================================
#
# 3. double well (transformed): sqrt2 * phi + nonlinear terms
#
# =============================================================*/
float Up = (y[j]/cP.W0 - cP.R_tilde * (nt*cP.dt) )/cP.lT_tilde;
float rhs_psi = ((JR-JL) + (JT-JB) + extra) * cP.hi*cP.hi + \
cP.sqrt2*ph_shared[place] - cP.lamd*(1.0f-ph_shared[place]*ph_shared[place])*cP.sqrt2*(U_shared[place] + Up);
/*# =============================================================
#
# 4. dpsi/dt term
#
# =============================================================*/
float tp = (1.0f-(1.0f-cP.k)*Up);
float tau_psi;
if (tp >= cP.k){tau_psi = tp*A2;}
else {tau_psi = cP.k*A2;}
dpsi_shared_new[place] = rhs_psi / tau_psi;
ps_shared_new[place] = ps_shared[place] + cP.dt * dpsi_shared_new[place];
ph_shared_new[place] = tanhf(ps_shared_new[place]/cP.sqrt2);
// if (C==1000){printf("%f ",ph_new[C]);}
// if (C == 1000) printf("check data %f\n", ps_shared[local_id]);
// if (C == 137) {
// printf("check data ps: %f and ph: %f and dpsi: %f and U: %f\n", ps_new[C], ph_new[C], dpsi[C], U[C]);
// // printf("block id %d ; local_id_x %d; local_id_y %d\n", block_id, local_id_x, local_id_y);
// // printf("block id %d ; data_addr_x %d; data_addr_y %d\n", block_id, data_addr_x, data_addr_y);
// }
}
}
__syncthreads();
// write back
// need write back ps ph dpsi;
// U is not updated such that we don't need write back
// core data can be saved back safely
// but BC data need to be very careful
// write the core data back
if ( (i>0) && (i<fnx-1) && (j>0) && (j<fny-1) ) {
if ((local_id_x>0)&& (local_id_x<BLOCK_DIM_X-1) && (local_id_y>0) && (local_id_y<BLOCK_DIM_Y-1)) {
ps_new[C] = ps_shared_new[place];
ph_new[C] = ph_shared_new[place];
dpsi[C] = dpsi_shared_new[place];
}
}
__syncthreads();
// // update BC
// // need update BC of ps ph dpsi and U
// // bottom line
if ((j == 0) && (i < fnx)){
// if(i == 0){
// // left bottom point
// ps_new[C] = ps_shared_new[place + 2 + 2*BLOCK_DIM_X];
// ph_new[C] = ph_shared_new[place + 2 + 2*BLOCK_DIM_X];
// dpsi[C] = dpsi_shared_new[place + 2 + 2*BLOCK_DIM_X];
// U[C] = U_shared[place + 2 + 2*BLOCK_DIM_X];
// // printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C]);
// // printf("check global addr(%d) data ph at local id: %d is %f\n", C, place, ph_new[C]);
// // printf("check global addr(%d) data U at local id: %d is %f\n", C, place, U[C]);
// // printf("check global addr(%d) data dpsi at local id: %d is %f\n", C, place, dpsi[C]);
// }
if (((local_id_x>0) && (local_id_x<BLOCK_DIM_X-1))||(i == 0) || (i == fnx - 1)){
// cut corners
// ps_shared_new[place] = ps_shared_new[place + 2*BLOCK_DIM_X];
ph_shared_new[place] = ph_shared_new[place + 2*BLOCK_DIM_X];
dpsi_shared_new[place] = dpsi_shared_new[place + 2*BLOCK_DIM_X];
U_shared[place] = U_shared[place + 2*BLOCK_DIM_X];
ps_new[C] = ps_shared_new[place + 2*BLOCK_DIM_X];
ph_new[C] = ph_shared_new[place + 2*BLOCK_DIM_X];
dpsi[C] = dpsi_shared_new[place + 2*BLOCK_DIM_X];
U[C] = U_shared[place + 2*BLOCK_DIM_X];
// if (i == 130){
// printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C]);
// printf("check global addr(%d) data ph at local id: %d is %f\n", C, place, ph_new[C]);
// printf("check global addr(%d) data U at local id: %d is %f\n", C, place, U[C]);
// printf("check global addr(%d) data dpsi at local id: %d is %f\n", C, place, dpsi[C]);
// }
}
}
// up line
if ((j == fny - 1)&& (i < fnx)){
// printf("we are here");
// if(i == fnx - 1){
// // printf("we are here");
// // right top point
// ps_new[C] = ps_shared_new[place - 2 - 2*BLOCK_DIM_X];
// ph_new[C] = ph_shared_new[place - 2 - 2*BLOCK_DIM_X];
// dpsi[C] = dpsi_shared_new[place - 2 - 2*BLOCK_DIM_X];
// U[C] = U_shared[place - 2 - 2*BLOCK_DIM_X];
// printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C]);
// // printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C - 2 - 2*fnx]);
// // printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C - 2]);
// printf("check global addr(%d) data ph at local id: %d is %f\n", C, place, ph_new[C]);
// printf("check global addr(%d) data U at local id: %d is %f\n", C, place, U[C]);
// printf("check global addr(%d) data dpsi at local id: %d is %f\n", C, place, dpsi[C]);
// }
if (((local_id_x>0)&& (local_id_x<BLOCK_DIM_X-1))||(i == 0) || (i == fnx - 1)){
ps_shared_new[place] = ps_shared_new[place - 2*BLOCK_DIM_X];
ph_shared_new[place] = ph_shared_new[place - 2*BLOCK_DIM_X];
dpsi_shared_new[place] = dpsi_shared_new[place - 2*BLOCK_DIM_X];
U_shared[place] = U_shared[place - 2*BLOCK_DIM_X];
// cut corners
ps_new[C] = ps_shared_new[place - 2*BLOCK_DIM_X];
ph_new[C] = ph_shared_new[place - 2*BLOCK_DIM_X];
dpsi[C] = dpsi_shared_new[place - 2*BLOCK_DIM_X];
U[C] = U_shared[place - 2*BLOCK_DIM_X];
// printf("we update up line at %d\n", C);
// printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C]);
// printf("check global addr(%d) data ph at local id: %d is %f\n", C, place, ph_new[C]);
// printf("check global addr(%d) data U at local id: %d is %f\n", C, place, U[C]);
// printf("check global addr(%d) data dpsi at local id: %d is %f\n", C, place, dpsi[C]);
}
}
__syncthreads();
// // left line
if ((i == 0) && (j < fny)){
// if(j == fny - 1){
// // printf("we are here");
// // left top point
// ps_new[C] = ps_shared_new[place + 2 - 2*BLOCK_DIM_X];
// ph_new[C] = ph_shared_new[place + 2 - 2*BLOCK_DIM_X];
// dpsi[C] = dpsi_shared_new[place + 2 - 2*BLOCK_DIM_X];
// U[C] = U_shared[place + 2 - 2*BLOCK_DIM_X];
// // printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C]);
// // printf("check global addr(%d) data ph at local id: %d is %f\n", C, place, ph_new[C]);
// // printf("check global addr(%d) data U at local id: %d is %f\n", C, place, U[C]);
// // printf("check global addr(%d) data dpsi at local id: %d is %f\n", C, place, dpsi[C]);
// }
if ((local_id_y>0) && (local_id_y<BLOCK_DIM_Y-1)||(j == 0) || (j == fny - 1)){
ps_shared_new[place] = ps_shared_new[place + 2];
ph_shared_new[place] = ph_shared_new[place + 2];
dpsi_shared_new[place] = dpsi_shared_new[place + 2];
U_shared[place] = U_shared[place + 2];
// cut corners
ps_new[C] = ps_shared_new[place + 2];
ph_new[C] = ph_shared_new[place + 2];
dpsi[C] = dpsi_shared_new[place + 2];
U[C] = U_shared[place + 2];
}
}
// right line
if ((i == fnx - 1) && (j < fny)){
// if(j == 0){
// // right bottom point
// ps_new[C] = ps_shared_new[place - 2 + 2*BLOCK_DIM_X];
// ph_new[C] = ph_shared_new[place - 2 + 2*BLOCK_DIM_X];
// dpsi[C] = dpsi_shared_new[place - 2 + 2*BLOCK_DIM_X];
// U[C] = U_shared[place - 2 + 2*BLOCK_DIM_X];
// // printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C]);
// // printf("check global addr(%d) data ph at local id: %d is %f\n", C, place, ph_new[C]);
// // printf("check global addr(%d) data U at local id: %d is %f\n", C, place, U[C]);
// // printf("check global addr(%d) data dpsi at local id: %d is %f\n", C, place, dpsi[C]);
// }
if ((local_id_y>0) && (local_id_y<BLOCK_DIM_Y-1)||(j == 0) || (j == fny - 1)){
ps_shared_new[place] = ps_shared_new[place - 2];
ph_shared_new[place] = ph_shared_new[place - 2];
dpsi_shared_new[place] = dpsi_shared_new[place - 2];
U_shared[place] = U_shared[place - 2];
// cut corners
ps_new[C] = ps_shared_new[place - 2];
ph_new[C] = ph_shared_new[place - 2];
dpsi[C] = dpsi_shared_new[place - 2];
U[C] = U_shared[place - 2];
// printf("we update right line at %d\n", C);
// printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C]);
// printf("check global addr(%d) data ph at local id: %d is %f\n", C, place, ph_new[C]);
// printf("check global addr(%d) data U at local id: %d is %f\n", C, place, U[C]);
// printf("check global addr(%d) data dpsi at local id: %d is %f\n", C, place, dpsi[C]);
}
}
__syncthreads();
// // at last we update corners
// if ((j == 0)){
// if(i == 0){
// // printf("we are at LB");
// // left bottom point
// ps_new[C] = ps_shared_new[place + 2 + 2*BLOCK_DIM_X];
// ph_new[C] = ph_shared_new[place + 2 + 2*BLOCK_DIM_X];
// dpsi[C] = dpsi_shared_new[place + 2 + 2*BLOCK_DIM_X];
// U[C] = U_shared[place + 2 + 2*BLOCK_DIM_X];
// // printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C]);
// // printf("check global addr(%d) data ph at local id: %d is %f\n", C, place, ph_new[C]);
// // printf("check global addr(%d) data U at local id: %d is %f\n", C, place, U[C]);
// // printf("check global addr(%d) data dpsi at local id: %d is %f\n", C, place, dpsi[C]);
// }
// }
// if ((j == fny - 1)){
// // printf("we are here");
// if(i == fnx - 1){
// // printf("we are at RT");
// // right top point
// // ps_new[C] = ps_shared_new[place - 2 - 2*BLOCK_DIM_X];
// // ph_new[C] = ph_shared_new[place - 2 - 2*BLOCK_DIM_X];
// // dpsi[C] = dpsi_shared_new[place - 2 - 2*BLOCK_DIM_X];
// // U[C] = U_shared[place - 2 - 2*BLOCK_DIM_X];
// ps_new[C] = ps_shared_new[place - 2- 2*BLOCK_DIM_X];
// ph_new[C] = ph_shared_new[place - 2- 2*BLOCK_DIM_X];
// dpsi[C] = dpsi_shared_new[place - 2- 2*BLOCK_DIM_X];
// U[C] = U_shared[place - 2];
// // printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C]);
// // // printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C - 2 - 2*fnx]);
// // // printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C - 2]);
// // printf("check global addr(%d) data ph at local id: %d is %f\n", C, place, ph_new[C]);
// // printf("check global addr(%d) data U at local id: %d is %f\n", C, place, U[C]);
// // printf("check global addr(%d) data dpsi at local id: %d is %f\n", C, place, dpsi[C]);
// }
// }
// if ((i == fnx - 1)){
// if(j == 0){
// // printf("we are at RB");
// // right bottom point
// ps_new[C] = ps_shared_new[place - 2 + 2*BLOCK_DIM_X];
// ph_new[C] = ph_shared_new[place - 2 + 2*BLOCK_DIM_X];
// dpsi[C] = dpsi_shared_new[place - 2 + 2*BLOCK_DIM_X];
// U[C] = U_shared[place - 2 + 2*BLOCK_DIM_X];
// // printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C]);
// // printf("check global addr(%d) data ph at local id: %d is %f\n", C, place, ph_new[C]);
// // printf("check global addr(%d) data U at local id: %d is %f\n", C, place, U[C]);
// // printf("check global addr(%d) data dpsi at local id: %d is %f\n", C, place, dpsi[C]);
// }
// }
// if ((i == 0)){
// if(j == fny - 1){
// // printf("we are at LT");
// // left top point
// ps_new[C] = ps_shared_new[place + 2 - 2*BLOCK_DIM_X];
// ph_new[C] = ph_shared_new[place + 2 - 2*BLOCK_DIM_X];
// dpsi[C] = dpsi_shared_new[place + 2 - 2*BLOCK_DIM_X];
// U[C] = U_shared[place + 2 - 2*BLOCK_DIM_X];
// // printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C]);
// // printf("check global addr(%d) data ph at local id: %d is %f\n", C, place, ph_new[C]);
// // printf("check global addr(%d) data U at local id: %d is %f\n", C, place, U[C]);
// // printf("check global addr(%d) data dpsi at local id: %d is %f\n", C, place, dpsi[C]);
// }
// }
}
// psi & phi equation: two dimensions
// merge set BC func into this func
__global__ void
rhs_psi_U_shared_mem_merge(float* ps, float* ph, float* U, float* ps_new, float* ph_new, float* U_new, \
float* y, float* dpsi, int fnx, int fny, int nt, int num_block_x, int num_block_y){
// each CUDA theard block compute one grid(32*32)
// memory access is from global and also not continous which cannot reach the max bandwidth(memory colaseing)
// add a shared memory version to store the neighbours data: ps and ph
// clare shared memory for time tiling
// we have extra (nx+2)(ny+2) size of space to load
int halo_left = 1;
int halo_right = 1;
int halo_top = 1;
int halo_bottom = 1;
int real_block_x_p = BLOCK_DIM_X - halo_left - halo_right;
int real_block_y_p = BLOCK_DIM_Y - halo_top - halo_bottom;
// real block width/height due to update U based on phi, dpsi
int real_block_x = BLOCK_DIM_X - 2*halo_left - 2*halo_right;
int real_block_y = BLOCK_DIM_Y - 2*halo_top - 2*halo_bottom;
// load old data
__shared__ float ps_shared[(BLOCK_DIM_Y)*(BLOCK_DIM_X)];
__shared__ float ph_shared[(BLOCK_DIM_Y)*(BLOCK_DIM_X)];
__shared__ float U_shared[(BLOCK_DIM_Y)*(BLOCK_DIM_X)];
__shared__ float dpsi_shared[(BLOCK_DIM_Y)*(BLOCK_DIM_X)];
// write data into new array and update at last
__shared__ float ps_shared_new[(BLOCK_DIM_Y)*(BLOCK_DIM_X)];
__shared__ float ph_shared_new[(BLOCK_DIM_Y)*(BLOCK_DIM_X)];
__shared__ float U_shared_new[(BLOCK_DIM_Y)*(BLOCK_DIM_X)];
__shared__ float dpsi_shared_new[(BLOCK_DIM_Y)*(BLOCK_DIM_X)];
// each thread --> one data in one enlarged block
int local_id = threadIdx.x; //local id -> sava to shared mem
int local_id_x = local_id % BLOCK_DIM_X;
int local_id_y = local_id / BLOCK_DIM_X;
// obtain the block id in shrinked region
int block_id = blockIdx.x; // 0~num_block_x*num_block_y
int block_id_x = block_id % num_block_x;
int block_id_y = block_id / num_block_x;
// this is the addr in inner region without considering the BC
int block_addr_x = block_id_x * real_block_x;
int block_addr_y = block_id_y * real_block_y;
// find the addr of data in global memory
// add 1 as we counter the block in inner region; - halo_left*2 as we have two halo region in this block
int data_addr_x = block_addr_x + 1 - halo_left*2 + local_id_x;
int data_addr_y = block_addr_y + 1 - halo_bottom*2 + local_id_y;
int C= data_addr_y * fnx + data_addr_x; // place in global memory
int j = data_addr_y;
int i = data_addr_x;
// initialize data into shared mem
ps_shared[local_id] = ps[C];
ph_shared[local_id] = ph[C];
U_shared[local_id] = U[C];
dpsi_shared[local_id] = dpsi[C];
ps_shared_new[local_id] = ps[C];
ph_shared_new[local_id] = ph[C];
U_shared_new[local_id] = U[C];
dpsi_shared_new[local_id] = dpsi[C];
__syncthreads();
// return if the id exceeds the true region
if ((i > fnx - 1) ||(i > fny - 1)) {return;}
int place = local_id_y * BLOCK_DIM_X + local_id_x;
// compute based on the shared memory, skip if we are at the boundary
// step1: udpate phi, psi and dpsi
if ( (i>0) && (i<fnx-1) && (j>0) && (j<fny-1) ) {
if ((local_id_x>0)&& (local_id_x<BLOCK_DIM_X-halo_right) && (local_id_y>0) && (local_id_y<BLOCK_DIM_Y-halo_top)) {
// find the indices of the 8 neighbors for center
// if (C==1000){printf("find");}
int R=place+1;
int L=place-1;
int T=place+BLOCK_DIM_X;
int B=place-BLOCK_DIM_X;
// if (C==1001){
// printf("detailed check of neighbours\n");
// printf("R: %d ; L:%d ; T: %d ; B: %d \n", R, L, T, B);
// printf("R: %f ; L:%f ; T: %f ; B: %f \n", ps_shared[R], ps_shared[L], ps_shared[T], ps_shared[B]);
// printf("R: %f ; L:%f ; T: %f ; B: %f \n", ps_shared[R], ps_shared[L], ps_shared[T+1], ps_shared[B]);
// }
// =============================================================
// 1. ANISOTROPIC DIFFUSION
// =============================================================
// these ps's are defined on cell centers
float psipjp=( ps_shared[place] + ps_shared[R] + ps_shared[T] + ps_shared[T+1] ) * 0.25f;
float psipjm=( ps_shared[place] + ps_shared[R] + ps_shared[B] + ps_shared[B+1] ) * 0.25f;
float psimjp=( ps_shared[place] + ps_shared[L] + ps_shared[T-1] + ps_shared[T] ) * 0.25f;
float psimjm=( ps_shared[place] + ps_shared[L] + ps_shared[B-1] + ps_shared[B] ) * 0.25f;
float phipjp=( ph_shared[place] + ph_shared[R] + ph_shared[T] + ph_shared[T+1] ) * 0.25f;
float phipjm=( ph_shared[place] + ph_shared[R] + ph_shared[B] + ph_shared[B+1] ) * 0.25f;
float phimjp=( ph_shared[place] + ph_shared[L] + ph_shared[T-1] + ph_shared[T] ) * 0.25f;
float phimjm=( ph_shared[place] + ph_shared[L] + ph_shared[B-1] + ph_shared[B] ) * 0.25f;
// if (C==1001){
// printf("detailed check of neighbours 2\n");
// printf("R: %f ; L:%f ; T: %f ; B: %f \n", psipjp, psipjm, psimjp, psimjm);
// }
// ============================
// right edge flux
// ============================
float psx = ps_shared[R]-ps_shared[place];
float psz = psipjp - psipjm;
float phx = ph_shared[R]-ph_shared[place];
float phz = phipjp - phipjm;
float A = atheta( phx,phz);
float Ap = aptheta(phx,phz);
float JR = A * ( A*psx - Ap*psz );
// ============================
// left edge flux
// ============================
psx = ps_shared[place]-ps_shared[L];
psz = psimjp - psimjm;
phx = ph_shared[place]-ph_shared[L];
phz = phimjp - phimjm;
A = atheta( phx,phz);
Ap = aptheta(phx,phz);
float JL = A * ( A*psx - Ap*psz );
// ============================
// top edge flux
// ============================
psx = psipjp - psimjp;
psz = ps_shared[T]-ps_shared[place];
phx = phipjp - phimjp;
phz = ph_shared[T]-ph_shared[place];
A = atheta( phx,phz);
Ap = aptheta(phx,phz);
float JT = A * ( A*psz + Ap*psx );
// ============================
// bottom edge flux
// ============================
psx = psipjm - psimjm;
psz = ps_shared[place]-ps_shared[B];
phx = phipjm - phimjm;
phz = ph_shared[place]-ph_shared[B];
A = atheta( phx,phz);
Ap = aptheta(phx,phz);
float JB = A * ( A*psz + Ap*psx );
// if (C==1001){
// printf("detailed check of neighbours 3\n");
// printf("R: %f ; L:%f ; T: %f ; B: %f \n", JR, JL, JT, JB);
// }
/*# =============================================================
#
# 2. EXTRA TERM: sqrt2 * atheta**2 * phi * |grad psi|^2
#
# =============================================================
# d(phi)/dx d(psi)/dx d(phi)/dz d(psi)/dz at nodes (i,j)*/
float phxn = ( ph_shared[R] - ph_shared[L] ) * 0.5f;
float phzn = ( ph_shared[T] - ph_shared[B] ) * 0.5f;
float psxn = ( ps_shared[R] - ps_shared[L] ) * 0.5f;
float pszn = ( ps_shared[T] - ps_shared[B] ) * 0.5f;
float A2 = atheta(phxn,phzn);
A2 = A2*A2;
float gradps2 = (psxn)*(psxn) + (pszn)*(pszn);
float extra = -cP.sqrt2 * A2 * ph_shared[place] * gradps2;
/*# =============================================================
#
# 3. double well (transformed): sqrt2 * phi + nonlinear terms
#
# =============================================================*/
float Up = (y[j]/cP.W0 - cP.R_tilde * (nt*cP.dt) )/cP.lT_tilde;
float rhs_psi = ((JR-JL) + (JT-JB) + extra) * cP.hi*cP.hi + \
cP.sqrt2*ph_shared[place] - cP.lamd*(1.0f-ph_shared[place]*ph_shared[place])*cP.sqrt2*(U_shared[place] + Up);
/*# =============================================================
#
# 4. dpsi/dt term
#
# =============================================================*/
float tp = (1.0f-(1.0f-cP.k)*Up);
float tau_psi;
if (tp >= cP.k){tau_psi = tp*A2;}
else {tau_psi = cP.k*A2;}
dpsi_shared_new[place] = rhs_psi / tau_psi;
ps_shared_new[place] = ps_shared[place] + cP.dt * dpsi_shared_new[place];
ph_shared_new[place] = tanhf(ps_shared_new[place]/cP.sqrt2);
// if (C==1000){printf("%f ",ph_new[C]);}
// if (C == 1000) printf("check data %f\n", ps_shared[local_id]);
// if (C == 137) {
// printf("check data ps: %f and ph: %f and dpsi: %f and U: %f\n", ps_new[C], ph_new[C], dpsi[C], U[C]);
// // printf("block id %d ; local_id_x %d; local_id_y %d\n", block_id, local_id_x, local_id_y);
// // printf("block id %d ; data_addr_x %d; data_addr_y %d\n", block_id, data_addr_x, data_addr_y);
// }
}
}
__syncthreads();
// // need update BC of ps ph dpsi and U
// // bottom line
if ((j == 0) && (i < fnx)){
// if(i == 0){
// // left bottom point
// ps_new[C] = ps_shared_new[place + 2 + 2*BLOCK_DIM_X];
// ph_new[C] = ph_shared_new[place + 2 + 2*BLOCK_DIM_X];
// dpsi[C] = dpsi_shared_new[place + 2 + 2*BLOCK_DIM_X];
// U[C] = U_shared[place + 2 + 2*BLOCK_DIM_X];
// // printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C]);
// // printf("check global addr(%d) data ph at local id: %d is %f\n", C, place, ph_new[C]);
// // printf("check global addr(%d) data U at local id: %d is %f\n", C, place, U[C]);
// // printf("check global addr(%d) data dpsi at local id: %d is %f\n", C, place, dpsi[C]);
// }
if (((local_id_x>0) && (local_id_x<BLOCK_DIM_X-1))||(i == 0) || (i == fnx - 1)){
// cut corners
ps_shared_new[place] = ps_shared_new[place + 2*BLOCK_DIM_X];
ph_shared_new[place] = ph_shared_new[place + 2*BLOCK_DIM_X];
dpsi_shared_new[place] = dpsi_shared_new[place + 2*BLOCK_DIM_X];
U_shared[place] = U_shared[place + 2*BLOCK_DIM_X];
// ps_new[C] = ps_shared_new[place + 2*BLOCK_DIM_X];
// ph_new[C] = ph_shared_new[place + 2*BLOCK_DIM_X];
// dpsi[C] = dpsi_shared_new[place + 2*BLOCK_DIM_X];
// U[C] = U_shared[place + 2*BLOCK_DIM_X];
// if (i == 130){
// printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C]);
// printf("check global addr(%d) data ph at local id: %d is %f\n", C, place, ph_new[C]);
// printf("check global addr(%d) data U at local id: %d is %f\n", C, place, U[C]);
// printf("check global addr(%d) data dpsi at local id: %d is %f\n", C, place, dpsi[C]);
// }
}
}
// up line
if ((j == fny - 1)&& (i < fnx)){
// printf("we are here");
// if(i == fnx - 1){
// // printf("we are here");
// // right top point
// ps_new[C] = ps_shared_new[place - 2 - 2*BLOCK_DIM_X];
// ph_new[C] = ph_shared_new[place - 2 - 2*BLOCK_DIM_X];
// dpsi[C] = dpsi_shared_new[place - 2 - 2*BLOCK_DIM_X];
// U[C] = U_shared[place - 2 - 2*BLOCK_DIM_X];
// printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C]);
// // printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C - 2 - 2*fnx]);
// // printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C - 2]);
// printf("check global addr(%d) data ph at local id: %d is %f\n", C, place, ph_new[C]);
// printf("check global addr(%d) data U at local id: %d is %f\n", C, place, U[C]);
// printf("check global addr(%d) data dpsi at local id: %d is %f\n", C, place, dpsi[C]);
// }
if (((local_id_x>0)&& (local_id_x<BLOCK_DIM_X-1))||(i == 0) || (i == fnx - 1)){
ps_shared_new[place] = ps_shared_new[place - 2*BLOCK_DIM_X];
ph_shared_new[place] = ph_shared_new[place - 2*BLOCK_DIM_X];
dpsi_shared_new[place] = dpsi_shared_new[place - 2*BLOCK_DIM_X];
U_shared[place] = U_shared[place - 2*BLOCK_DIM_X];
// cut corners
// ps_new[C] = ps_shared_new[place - 2*BLOCK_DIM_X];
// ph_new[C] = ph_shared_new[place - 2*BLOCK_DIM_X];
// dpsi[C] = dpsi_shared_new[place - 2*BLOCK_DIM_X];
// U[C] = U_shared[place - 2*BLOCK_DIM_X];
// printf("we update up line at %d\n", C);
// printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C]);
// printf("check global addr(%d) data ph at local id: %d is %f\n", C, place, ph_new[C]);
// printf("check global addr(%d) data U at local id: %d is %f\n", C, place, U[C]);
// printf("check global addr(%d) data dpsi at local id: %d is %f\n", C, place, dpsi[C]);
}
}
__syncthreads();
// // left line
if ((i == 0) && (j < fny)){
// if(j == fny - 1){
// // printf("we are here");
// // left top point
// ps_new[C] = ps_shared_new[place + 2 - 2*BLOCK_DIM_X];
// ph_new[C] = ph_shared_new[place + 2 - 2*BLOCK_DIM_X];
// dpsi[C] = dpsi_shared_new[place + 2 - 2*BLOCK_DIM_X];
// U[C] = U_shared[place + 2 - 2*BLOCK_DIM_X];
// // printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C]);
// // printf("check global addr(%d) data ph at local id: %d is %f\n", C, place, ph_new[C]);
// // printf("check global addr(%d) data U at local id: %d is %f\n", C, place, U[C]);
// // printf("check global addr(%d) data dpsi at local id: %d is %f\n", C, place, dpsi[C]);
// }
if ((local_id_y>0) && (local_id_y<BLOCK_DIM_Y-1)||(j == 0) || (j == fny - 1)){
// printf("hey man\n");
ps_shared_new[place] = ps_shared_new[place + 2];
ph_shared_new[place] = ph_shared_new[place + 2];
dpsi_shared_new[place] = dpsi_shared_new[place + 2];
U_shared[place] = U_shared[place + 2];
// ps_new[C] = ps_shared_new[place + 2];
// ph_new[C] = ph_shared_new[place + 2];
// dpsi[C] = dpsi_shared_new[place + 2];
// U[C] = U_shared[place + 2];
}
}
// right line
if ((i == fnx - 1) && (j < fny)){
// if(j == 0){
// // right bottom point
// ps_new[C] = ps_shared_new[place - 2 + 2*BLOCK_DIM_X];
// ph_new[C] = ph_shared_new[place - 2 + 2*BLOCK_DIM_X];
// dpsi[C] = dpsi_shared_new[place - 2 + 2*BLOCK_DIM_X];
// U[C] = U_shared[place - 2 + 2*BLOCK_DIM_X];
// // printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C]);
// // printf("check global addr(%d) data ph at local id: %d is %f\n", C, place, ph_new[C]);
// // printf("check global addr(%d) data U at local id: %d is %f\n", C, place, U[C]);
// // printf("check global addr(%d) data dpsi at local id: %d is %f\n", C, place, dpsi[C]);
// }
if ((local_id_y>0) && (local_id_y<BLOCK_DIM_Y-1)||(j == 0) || (j == fny - 1)){
ps_shared_new[place] = ps_shared_new[place - 2];
ph_shared_new[place] = ph_shared_new[place - 2];
dpsi_shared_new[place] = dpsi_shared_new[place - 2];
U_shared[place] = U_shared[place - 2];
// cut corners
// ps_new[C] = ps_shared_new[place - 2];
// ph_new[C] = ph_shared_new[place - 2];
// dpsi[C] = dpsi_shared_new[place - 2];
// U[C] = U_shared[place - 2];
// printf("we update right line at %d\n", C);
// printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C]);
// printf("check global addr(%d) data ph at local id: %d is %f\n", C, place, ph_new[C]);
// printf("check global addr(%d) data U at local id: %d is %f\n", C, place, U[C]);
// printf("check global addr(%d) data dpsi at local id: %d is %f\n", C, place, dpsi[C]);
}
}
__syncthreads();
// update U
// if the points are at boundary, return
if ( (i>0) && (i<fnx-1) && (j>0) && (j<fny-1) ) {
// only update the inner res
if ((local_id_x>halo_left*2-1)&& (local_id_x<BLOCK_DIM_X-halo_right*2) && (local_id_y>halo_bottom*2-1) && (local_id_y<BLOCK_DIM_Y-halo_top*2)) {
// find the indices of the 8 neighbors for center
int R=place+1;
int L=place-1;
int T=place+BLOCK_DIM_X;
int B=place-BLOCK_DIM_X;
float hi = cP.hi;
float Dl_tilde = cP.Dl_tilde;
float k = cP.k;
float nx, nz;
float eps = cP.eps;
// =============================================================
// 1. ANISOTROPIC DIFFUSION
// =============================================================
// these ph's are defined on cell centers
float phipjp=( ph_shared_new[place] + ph_shared_new[R] + ph_shared_new[T] + ph_shared_new[T+1] ) * 0.25f;
float phipjm=( ph_shared_new[place] + ph_shared_new[R] + ph_shared_new[B] + ph_shared_new[B+1] ) * 0.25f;
float phimjp=( ph_shared_new[place] + ph_shared_new[L] + ph_shared_new[T-1] + ph_shared_new[T] ) * 0.25f;
float phimjm=( ph_shared_new[place] + ph_shared_new[L] + ph_shared_new[B-1] + ph_shared_new[B] ) * 0.25f;
// if (C==1001){
// printf("detailed check of neighbours 3\n");
// printf("R: %f ; L:%f ; T: %f ; B: %f \n", phipjp, phipjm, phimjp, phimjm);
// }
float jat = 0.5f*(1.0f+(1.0f-k)*U_shared[place])*(1.0f-ph_shared_new[place]*ph_shared_new[place])*dpsi_shared_new[place];
/*# ============================
# right edge flux (i+1/2, j)
# ============================*/
float phx = ph_shared_new[R]-ph_shared_new[place];
float phz = phipjp - phipjm;
float phn2 = phx*phx + phz*phz;
if (phn2 > eps) {nx = phx / sqrtf(phn2);}
else {nx = 0.0f;}
float jat_ip = 0.5f*(1.0f+(1.0f-k)*U_shared[R])*(1.0f-ph_shared_new[R]*ph_shared_new[R])*dpsi_shared_new[R];
float UR = hi*Dl_tilde*0.5f*(2.0f - ph_shared_new[place] - ph_shared_new[R])*(U_shared[R]-U_shared[place]) + 0.5f*(jat + jat_ip)*nx;
/* ============================
# left edge flux (i-1/2, j)
# ============================*/
phx = ph_shared_new[place]-ph_shared_new[L];
phz = phimjp - phimjm;
phn2 = phx*phx + phz*phz;
if (phn2 > eps) {nx = phx / sqrtf(phn2);}
else {nx = 0.0f;}
float jat_im = 0.5f*(1.0f+(1.0f-k)*U_shared[L])*(1.0f-ph_shared_new[L]*ph_shared_new[L])*dpsi_shared_new[L];
float UL = hi*Dl_tilde*0.5f*(2.0f - ph_shared_new[place] - ph_shared_new[L])*(U_shared[place]-U_shared[L]) + 0.5f*(jat + jat_im)*nx;
/*# ============================
# top edge flux (i, j+1/2)
# ============================*/
phx = phipjp - phimjp;
phz = ph_shared_new[T]-ph_shared_new[place];
phn2 = phx*phx + phz*phz;
if (phn2 > eps) {nz = phz / sqrtf(phn2);}
else {nz = 0.0f;}
float jat_jp = 0.5f*(1.0f+(1.0f-k)*U_shared[T])*(1.0f-ph_shared_new[T]*ph_shared_new[T])*dpsi_shared_new[T];
float UT = hi*Dl_tilde*0.5f*(2.0f - ph_shared_new[place] - ph_shared_new[T])*(U_shared[T]-U_shared[place]) + 0.5f*(jat + jat_jp)*nz;
/*# ============================
# bottom edge flux (i, j-1/2)
# ============================*/
phx = phipjm - phimjm;
phz = ph_shared_new[place]-ph_shared_new[B];
phn2 = phx*phx + phz*phz;
if (phn2 > eps) {nz = phz / sqrtf(phn2);}
else {nz = 0.0f;}
float jat_jm = 0.5f*(1.0f+(1.0f-k)*U_shared[B])*(1.0f-ph_shared_new[B]*ph_shared_new[B])*dpsi_shared_new[B];
float UB = hi*Dl_tilde*0.5f*(2.0f - ph_shared_new[place] - ph_shared_new[B])*(U_shared[place]-U_shared[B]) + 0.5f*(jat + jat_jm)*nz;
float rhs_U = ( (UR-UL) + (UT-UB) ) * hi + cP.sqrt2 * jat;
float tau_U = (1.0f+cP.k) - (1.0f-cP.k)*ph_shared_new[place];
U_shared_new[place] = U_shared[place] + cP.dt * ( rhs_U / tau_U );
}
}
// at last write back
// need write back ps ph dpsi and U
// core data can be saved back safely
// but BC data need to be very careful
// write the core data back
if ( (i>0) && (i<fnx-1) && (j>0) && (j<fny-1) ) {
if ((local_id_x>halo_left*2-1)&& (local_id_x<BLOCK_DIM_X-halo_right*2) && (local_id_y>halo_bottom*2-1) && (local_id_y<BLOCK_DIM_Y-halo_top*2)) {
ps_new[C] = ps_shared_new[place];
ph_new[C] = ph_shared_new[place];
dpsi[C] = dpsi_shared_new[place];
U_new[C] = U_shared_new[place];
}
}
__syncthreads();
// need update BC
// directly use BC data in shared_mem_new to write back is ok
// as we already update the BC before
// bottom line
if ((j == 0) && (i < fnx)){
if (((local_id_x>halo_left*2-1) && (local_id_x<BLOCK_DIM_X-halo_right*2))||(i == 0) || (i == fnx - 1)){
// printf("hey man\n");
ps_new[C] = ps_shared_new[place];
ph_new[C] = ph_shared_new[place];
dpsi[C] = dpsi_shared_new[place];
U_new[C] = U_shared[place];
// if (i == 130){
// printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C]);
// printf("check global addr(%d) data ph at local id: %d is %f\n", C, place, ph_new[C]);
// printf("check global addr(%d) data U at local id: %d is %f\n", C, place, U[C]);
// printf("check global addr(%d) data dpsi at local id: %d is %f\n", C, place, dpsi[C]);
// }
}
}
// up line
if ((j == fny - 1)&& (i < fnx)){
if (((local_id_x>halo_left*2-1) && (local_id_x<BLOCK_DIM_X-halo_right*2))||(i == 0) || (i == fnx - 1)){
ps_new[C] = ps_shared_new[place];
ph_new[C] = ph_shared_new[place];
dpsi[C] = dpsi_shared_new[place];
U_new[C] = U_shared[place];
// printf("we update up line at %d\n", C);
// printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C]);
// printf("check global addr(%d) data ph at local id: %d is %f\n", C, place, ph_new[C]);
// printf("check global addr(%d) data U at local id: %d is %f\n", C, place, U[C]);
// printf("check global addr(%d) data dpsi at local id: %d is %f\n", C, place, dpsi[C]);
}
}
__syncthreads();
// // left line
if ((i == 0) && (j < fny)){
if (((local_id_y>halo_bottom*2-1) && (local_id_y<BLOCK_DIM_Y-halo_top*2))||(j == 0) || (j == fny - 1)){
ps_new[C] = ps_shared_new[place];
ph_new[C] = ph_shared_new[place];
dpsi[C] = dpsi_shared_new[place];
U_new[C] = U_shared[place];
}
}
// right line
if ((i == fnx - 1) && (j < fny)){
// printf("hey man\n");
if (((local_id_y>halo_bottom*2-1) && (local_id_y<BLOCK_DIM_Y-halo_top*2))||(j == 0) || (j == fny - 1)){
ps_new[C] = ps_shared_new[place];
ph_new[C] = ph_shared_new[place];
dpsi[C] = dpsi_shared_new[place];
U_new[C] = U_shared[place];
// printf("we update right line at %d\n", C);
// printf("check global addr(%d) data ps at local id: %d is %f\n", C, place, ps_new[C]);
// printf("check global addr(%d) data ph at local id: %d is %f\n", C, place, ph_new[C]);
// printf("check global addr(%d) data U at local id: %d is %f\n", C, place, U[C]);
// printf("check global addr(%d) data dpsi at local id: %d is %f\n", C, place, dpsi[C]);
}
}
__syncthreads();
}
// U equation
// shared mem version but each thread process real update point such that we need extra time to load the halo region data
__global__ void
rhs_U_shared_mem_ex(float* U, float* U_new, float* ph, float* dpsi, int fnx, int fny, int num_block_x, int num_block_y){
// we have halo region
// __shared__ float ps_shared[(BLOCK_DIM_Y+HALO_TOP+HALO_BOTTOM)*(BLOCK_DIM_X+HALO_LEFT+HALO_RIGHT)];
__shared__ float ph_shared[(BLOCK_DIM_Y+HALO_TOP+HALO_BOTTOM)*(BLOCK_DIM_X+HALO_LEFT+HALO_RIGHT)];
__shared__ float U_shared[(BLOCK_DIM_Y+HALO_TOP+HALO_BOTTOM)*(BLOCK_DIM_X+HALO_LEFT+HALO_RIGHT)];
__shared__ float dpsi_shared[(BLOCK_DIM_Y+HALO_TOP+HALO_BOTTOM)*(BLOCK_DIM_X+HALO_LEFT+HALO_RIGHT)];
int halo_left = 1;
int halo_right = 1;
int halo_top = 1;
int halo_bottom = 1;
int real_block_x = BLOCK_DIM_X;
int real_block_y = BLOCK_DIM_Y;
// each thread --> one data in one enlarged block
int local_id = threadIdx.x; //local id -> sava to shared mem
int local_id_x = local_id % BLOCK_DIM_X + halo_left;
int local_id_y = local_id / BLOCK_DIM_X + halo_bottom;
// obtain the block id in shrinked region
int block_id = blockIdx.x; // 0~num_block_x*num_block_y
int block_id_x = block_id % num_block_x;
int block_id_y = block_id / num_block_x;
// this is the addr in inner region without considering the BC
int block_addr_x = block_id_x * real_block_x;
int block_addr_y = block_id_y * real_block_y;
// find the addr of data in global memory
// add 1 as we counter the block in inner region; - halo_left as we have halo region in this block
int data_addr_x = block_addr_x + 1 + local_id_x - halo_left;
int data_addr_y = block_addr_y + 1 + local_id_y - halo_bottom;
int C= data_addr_y * fnx + data_addr_x; // place in global memory
int j = data_addr_y;
int i = data_addr_x;
// update data into shared mem
int place = local_id_y * (BLOCK_DIM_X+HALO_RIGHT+HALO_LEFT) + local_id_x;
ph_shared[place] = ph[C];
U_shared[place] = U[C];
dpsi_shared[place] = dpsi[C];
__syncthreads();
// fetch the BC datapoint
if (local_id_x < 2){
ph_shared[place - halo_left] = ph[C-halo_left]; //left vertical line
U_shared[place - halo_left] = U[C-halo_left];
dpsi_shared[place - halo_left] = dpsi[C-halo_left];
ph_shared[place + real_block_x + halo_left - 1] = ph[C+ real_block_x + halo_left - 1]; //right vertical line
U_shared[place + real_block_x + halo_left - 1] = U[C+ real_block_x + halo_left - 1];
dpsi_shared[place + real_block_x + halo_left - 1] = dpsi[C+ real_block_x + halo_left - 1];
}
if (local_id_y < 2){
ph_shared[place - (real_block_x+halo_left+halo_right)] = ph[C-fnx]; //bottom horizontal line
U_shared[place - (real_block_x+halo_left+halo_right)] = U[C-fnx];
dpsi_shared[place - (real_block_x+halo_left+halo_right)] = dpsi[C-fnx];
ph_shared[place + (real_block_x+halo_left+halo_right)*(real_block_y + halo_top - 1)] = ph[C+ fnx*(real_block_y + halo_top - 1)]; //top horizontal line
U_shared[place + (real_block_x+halo_left+halo_right)*(real_block_y + halo_top - 1)] = U[C+ fnx*(real_block_y + halo_top - 1)];
dpsi_shared[place + (real_block_x+halo_left+halo_right)*(real_block_y + halo_top - 1)] = dpsi[C+ fnx*(real_block_y + halo_top - 1)];
}
// corners
if ((local_id_x < 2) && (local_id_y < 2)){
// update four corners
ph_shared[(local_id_y - 1) * (BLOCK_DIM_X+HALO_RIGHT+HALO_LEFT) + local_id_x - 1] = ph[i - 1 + (j-1) *fnx];
U_shared[(local_id_y - 1) * (BLOCK_DIM_X+HALO_RIGHT+HALO_LEFT) + local_id_x - 1] = U[i - 1 + (j-1) *fnx];
dpsi_shared[(local_id_y - 1) * (BLOCK_DIM_X+HALO_RIGHT+HALO_LEFT) + local_id_x - 1] = dpsi[i - 1 + (j-1) *fnx];
ph_shared[local_id_y * (BLOCK_DIM_X+HALO_RIGHT+HALO_LEFT) + local_id_x - 2] = ph[i - 1 + (j-1) *fnx + (BLOCK_DIM_X+HALO_RIGHT+HALO_LEFT - 1)];
U_shared[local_id_y * (BLOCK_DIM_X+HALO_RIGHT+HALO_LEFT) + local_id_x - 2] = U[i - 1 + (j-1) *fnx + (BLOCK_DIM_X+HALO_RIGHT+HALO_LEFT - 1)];
dpsi_shared[local_id_y * (BLOCK_DIM_X+HALO_RIGHT+HALO_LEFT) + local_id_x - 2] = dpsi[i - 1 + (j-1) *fnx + (BLOCK_DIM_X+HALO_RIGHT+HALO_LEFT - 1)];
ph_shared[(local_id_y + BLOCK_DIM_Y+halo_top-1) * (BLOCK_DIM_X+HALO_RIGHT+HALO_LEFT) + local_id_x - 1] = ph[i - 1 + (j+BLOCK_DIM_Y+halo_top-1) *fnx];
U_shared[(local_id_y + BLOCK_DIM_Y+halo_top-1) * (BLOCK_DIM_X+HALO_RIGHT+HALO_LEFT) + local_id_x - 1] = U[i - 1 + (j+BLOCK_DIM_Y+halo_top-1) *fnx];
dpsi_shared[(local_id_y + BLOCK_DIM_Y+halo_top-1) * (BLOCK_DIM_X+HALO_RIGHT+HALO_LEFT) + local_id_x - 1] = dpsi[i - 1 + (j+BLOCK_DIM_Y+halo_top-1) *fnx];
ph_shared[(local_id_y + BLOCK_DIM_Y+halo_top) * (BLOCK_DIM_X+HALO_RIGHT+HALO_LEFT) + local_id_x - 2] = ph[i - 1 + (j+BLOCK_DIM_Y+halo_top-1) *fnx + (BLOCK_DIM_X+HALO_RIGHT+HALO_LEFT - 1)];
U_shared[(local_id_y + BLOCK_DIM_Y+halo_top) * (BLOCK_DIM_X+HALO_RIGHT+HALO_LEFT) + local_id_x - 2] = U[i - 1 + (j+BLOCK_DIM_Y+halo_top-1) *fnx + (BLOCK_DIM_X+HALO_RIGHT+HALO_LEFT - 1)];
dpsi_shared[(local_id_y + BLOCK_DIM_Y+halo_top) * (BLOCK_DIM_X+HALO_RIGHT+HALO_LEFT) + local_id_x - 2] = dpsi[i - 1 + (j+BLOCK_DIM_Y+halo_top-1) *fnx + (BLOCK_DIM_X+HALO_RIGHT+HALO_LEFT - 1)];
}
__syncthreads();
// if (C==137){
// printf("check pre-loaded data\n");
// printf("local_id_x: %d, local_id_y: %d\n", local_id_x, local_id_y);
// printf("ph: %f ; u:%f ; dpsi: %f\n", ph[C], U[C], dpsi[C]);
// printf("ph: %f ; u:%f ; dpsi: %f\n", ph_shared[6], U_shared[6], dpsi_shared[6]);
// printf("ph: %f ; u:%f ; dpsi: %f\n", ph_shared[0], U_shared[0], dpsi_shared[0]);
// printf("ph: %f ; u:%f ; dpsi: %f\n", ph_shared[real_block_x+halo_left+halo_right-1], U_shared[real_block_x+halo_left+halo_right-1], dpsi_shared[real_block_x+halo_left+halo_right-1]);
// printf("ph: %f ; u:%f ; dpsi: %f\n", ph_shared[(real_block_x+halo_left+halo_right)*(real_block_y+halo_bottom)], U_shared[(real_block_x+halo_left+halo_right)*(real_block_y+halo_bottom)], dpsi_shared[(real_block_x+halo_left+halo_right)*(real_block_y+halo_bottom)]);
// printf("ph: %f ; u:%f ; dpsi: %f\n", ph_shared[(real_block_x+halo_left+halo_right)*(real_block_y+halo_bottom+halo_top) - 1], U_shared[(real_block_x+halo_left+halo_right)*(real_block_y+halo_bottom+halo_top) - 1], dpsi_shared[(real_block_x+halo_left+halo_right)*(real_block_y+halo_bottom+halo_top) - 1]);
// }
// if the points are at boundary, return
if ( (i>0) && (i<fnx-1) && (j>0) && (j<fny-1) ) {
// find the indices of the 8 neighbors for center
int R=place+1;
int L=place-1;
int T=place+BLOCK_DIM_X+halo_left+halo_right;
int B=place-(BLOCK_DIM_X+halo_left+halo_right);
float hi = cP.hi;
float Dl_tilde = cP.Dl_tilde;
float k = cP.k;
float nx, nz;
float eps = cP.eps;
// =============================================================
// 1. ANISOTROPIC DIFFUSION
// =============================================================
// these ph's are defined on cell centers
float phipjp=( ph_shared[place] + ph_shared[R] + ph_shared[T] + ph_shared[T+1] ) * 0.25f;
float phipjm=( ph_shared[place] + ph_shared[R] + ph_shared[B] + ph_shared[B+1] ) * 0.25f;
float phimjp=( ph_shared[place] + ph_shared[L] + ph_shared[T-1] + ph_shared[T] ) * 0.25f;
float phimjm=( ph_shared[place] + ph_shared[L] + ph_shared[B-1] + ph_shared[B] ) * 0.25f;
// if (C==137){
// printf("detailed check of neighbours 3\n");
// // print("place: %d\n", B);
// printf("R: %f ; L:%f ; T: %f ; B: %f \n", phipjp, phipjm, phimjp, phimjm);
// printf("R: %f ; L:%f ; T: %f ; B: %f \n", U_shared[R], U_shared[L], U_shared[T], U_shared[B]);
// printf("R: %f ; L:%f ; T: %f ; B: %f \n", dpsi_shared[R], dpsi_shared[L], dpsi_shared[T], dpsi_shared[B]);
// }
float jat = 0.5f*(1.0f+(1.0f-k)*U_shared[place])*(1.0f-ph_shared[place]*ph_shared[place])*dpsi_shared[place];
/*# ============================
# right edge flux (i+1/2, j)
# ============================*/
float phx = ph_shared[R]-ph_shared[place];
float phz = phipjp - phipjm;
float phn2 = phx*phx + phz*phz;
if (phn2 > eps) {nx = phx / sqrtf(phn2);}
else {nx = 0.0f;}
float jat_ip = 0.5f*(1.0f+(1.0f-k)*U_shared[R])*(1.0f-ph_shared[R]*ph_shared[R])*dpsi_shared[R];
float UR = hi*Dl_tilde*0.5f*(2.0f - ph_shared[place] - ph_shared[R])*(U_shared[R]-U_shared[place]) + 0.5f*(jat + jat_ip)*nx;
/* ============================
# left edge flux (i-1/2, j)
# ============================*/
phx = ph_shared[place]-ph_shared[L];
phz = phimjp - phimjm;
phn2 = phx*phx + phz*phz;
if (phn2 > eps) {nx = phx / sqrtf(phn2);}
else {nx = 0.0f;}
float jat_im = 0.5f*(1.0f+(1.0f-k)*U_shared[L])*(1.0f-ph_shared[L]*ph_shared[L])*dpsi_shared[L];
float UL = hi*Dl_tilde*0.5f*(2.0f - ph_shared[place] - ph_shared[L])*(U_shared[place]-U_shared[L]) + 0.5f*(jat + jat_im)*nx;
/*# ============================
# top edge flux (i, j+1/2)
# ============================*/
phx = phipjp - phimjp;
phz = ph_shared[T]-ph_shared[place];
phn2 = phx*phx + phz*phz;
if (phn2 > eps) {nz = phz / sqrtf(phn2);}
else {nz = 0.0f;}
float jat_jp = 0.5f*(1.0f+(1.0f-k)*U_shared[T])*(1.0f-ph_shared[T]*ph_shared[T])*dpsi_shared[T];
float UT = hi*Dl_tilde*0.5f*(2.0f - ph_shared[place] - ph_shared[T])*(U_shared[T]-U_shared[place]) + 0.5f*(jat + jat_jp)*nz;
/*# ============================
# bottom edge flux (i, j-1/2)
# ============================*/
phx = phipjm - phimjm;
phz = ph_shared[place]-ph_shared[B];
phn2 = phx*phx + phz*phz;
if (phn2 > eps) {nz = phz / sqrtf(phn2);}
else {nz = 0.0f;}
float jat_jm = 0.5f*(1.0f+(1.0f-k)*U_shared[B])*(1.0f-ph_shared[B]*ph_shared[B])*dpsi_shared[B];
float UB = hi*Dl_tilde*0.5f*(2.0f - ph_shared[place] - ph_shared[B])*(U_shared[place]-U_shared[B]) + 0.5f*(jat + jat_jm)*nz;
// if (C==137){
// printf("detailed check of neighbours 4\n");
// printf("UR: %f ; UL:%f ; UT: %f ; UB: %f \n", UR, UL, UT, UB);
// }
float rhs_U = ( (UR-UL) + (UT-UB) ) * hi + cP.sqrt2 * jat;
float tau_U = (1.0f+cP.k) - (1.0f-cP.k)*ph_shared[place];
U_new[C] = U_shared[place] + cP.dt * ( rhs_U / tau_U );
// if (C==137){
// printf("detailed check of neighbours 3\n");
// printf("U: %f \n", U_new[C]);
// }
}
}
// U equation
__global__ void
rhs_U_shared_mem(float* U, float* U_new, float* ph, float* dpsi, int fnx, int fny, int num_block_x, int num_block_y){
// __shared__ float ps_shared[(BLOCK_DIM_Y)*(BLOCK_DIM_X)];
__shared__ float ph_shared[(BLOCK_DIM_Y)*(BLOCK_DIM_X)];
__shared__ float U_shared[(BLOCK_DIM_Y)*(BLOCK_DIM_X)];
__shared__ float dpsi_shared[(BLOCK_DIM_Y)*(BLOCK_DIM_X)];
int halo_left = 1;
int halo_right = 1;
int halo_top = 1;
int halo_bottom = 1;
int real_block_x = BLOCK_DIM_X - halo_left - halo_right;
int real_block_y = BLOCK_DIM_Y - halo_top - halo_bottom;
// each thread --> one data in one enlarged block
int local_id = threadIdx.x; //local id -> sava to shared mem
int local_id_x = local_id % BLOCK_DIM_X;
int local_id_y = local_id / BLOCK_DIM_X;
// obtain the block id in shrinked region
int block_id = blockIdx.x; // 0~num_block_x*num_block_y
int block_id_x = block_id % num_block_x;
int block_id_y = block_id / num_block_x;
// this is the addr in inner region without considering the BC
int block_addr_x = block_id_x * real_block_x;
int block_addr_y = block_id_y * real_block_y;
// find the addr of data in global memory
// add 1 as we counter the block in inner region; - halo_left as we have halo region in this block
int data_addr_x = block_addr_x + 1 + local_id_x - halo_left;
int data_addr_y = block_addr_y + 1 - halo_bottom + local_id_y;
int C= data_addr_y * fnx + data_addr_x; // place in global memory
// int j=C/fnx;
// int i=C-j*fnx;
int j = data_addr_y;
int i = data_addr_x;
// update data
// ps_shared[local_id] = ps[C];
ph_shared[local_id] = ph[C];
U_shared[local_id] = U[C];
dpsi_shared[local_id] = dpsi[C];
__syncthreads();
if ((i > fnx - 1) ||(i > fny - 1)) {return;}
// if (C==1001){
// printf("check pre-loaded data\n");
// printf("ph: %f ; u:%f ; dpsi: %f\n", ph[C], U[C], dpsi[C]);
// }
int place = local_id_y * BLOCK_DIM_X + local_id_x;
// if the points are at boundary, return
if ( (i>0) && (i<fnx-1) && (j>0) && (j<fny-1) ) {
if ((local_id_x>0)&& (local_id_x<BLOCK_DIM_X-1) && (local_id_y>0) && (local_id_y<BLOCK_DIM_Y-1)) {
// find the indices of the 8 neighbors for center
int R=place+1;
int L=place-1;
int T=place+BLOCK_DIM_X;
int B=place-BLOCK_DIM_X;
float hi = cP.hi;
float Dl_tilde = cP.Dl_tilde;
float k = cP.k;
float nx, nz;
float eps = cP.eps;
// =============================================================
// 1. ANISOTROPIC DIFFUSION
// =============================================================
// these ph's are defined on cell centers
float phipjp=( ph_shared[place] + ph_shared[R] + ph_shared[T] + ph_shared[T+1] ) * 0.25f;
float phipjm=( ph_shared[place] + ph_shared[R] + ph_shared[B] + ph_shared[B+1] ) * 0.25f;
float phimjp=( ph_shared[place] + ph_shared[L] + ph_shared[T-1] + ph_shared[T] ) * 0.25f;
float phimjm=( ph_shared[place] + ph_shared[L] + ph_shared[B-1] + ph_shared[B] ) * 0.25f;
// if (C==1001){
// printf("detailed check of neighbours 3\n");
// printf("R: %f ; L:%f ; T: %f ; B: %f \n", phipjp, phipjm, phimjp, phimjm);
// }
float jat = 0.5f*(1.0f+(1.0f-k)*U_shared[place])*(1.0f-ph_shared[place]*ph_shared[place])*dpsi_shared[place];
/*# ============================
# right edge flux (i+1/2, j)
# ============================*/
float phx = ph_shared[R]-ph_shared[place];
float phz = phipjp - phipjm;
float phn2 = phx*phx + phz*phz;
if (phn2 > eps) {nx = phx / sqrtf(phn2);}
else {nx = 0.0f;}
float jat_ip = 0.5f*(1.0f+(1.0f-k)*U_shared[R])*(1.0f-ph_shared[R]*ph_shared[R])*dpsi_shared[R];
float UR = hi*Dl_tilde*0.5f*(2.0f - ph_shared[place] - ph_shared[R])*(U_shared[R]-U_shared[place]) + 0.5f*(jat + jat_ip)*nx;
/* ============================
# left edge flux (i-1/2, j)
# ============================*/
phx = ph_shared[place]-ph_shared[L];
phz = phimjp - phimjm;
phn2 = phx*phx + phz*phz;
if (phn2 > eps) {nx = phx / sqrtf(phn2);}
else {nx = 0.0f;}
float jat_im = 0.5f*(1.0f+(1.0f-k)*U_shared[L])*(1.0f-ph_shared[L]*ph_shared[L])*dpsi_shared[L];
float UL = hi*Dl_tilde*0.5f*(2.0f - ph_shared[place] - ph_shared[L])*(U_shared[place]-U_shared[L]) + 0.5f*(jat + jat_im)*nx;
/*# ============================
# top edge flux (i, j+1/2)
# ============================*/
phx = phipjp - phimjp;
phz = ph_shared[T]-ph_shared[place];
phn2 = phx*phx + phz*phz;
if (phn2 > eps) {nz = phz / sqrtf(phn2);}
else {nz = 0.0f;}
float jat_jp = 0.5f*(1.0f+(1.0f-k)*U_shared[T])*(1.0f-ph_shared[T]*ph_shared[T])*dpsi_shared[T];
float UT = hi*Dl_tilde*0.5f*(2.0f - ph_shared[place] - ph_shared[T])*(U_shared[T]-U_shared[place]) + 0.5f*(jat + jat_jp)*nz;
/*# ============================
# bottom edge flux (i, j-1/2)
# ============================*/
phx = phipjm - phimjm;
phz = ph_shared[place]-ph_shared[B];
phn2 = phx*phx + phz*phz;
if (phn2 > eps) {nz = phz / sqrtf(phn2);}
else {nz = 0.0f;}
float jat_jm = 0.5f*(1.0f+(1.0f-k)*U_shared[B])*(1.0f-ph_shared[B]*ph_shared[B])*dpsi_shared[B];
float UB = hi*Dl_tilde*0.5f*(2.0f - ph_shared[place] - ph_shared[B])*(U_shared[place]-U_shared[B]) + 0.5f*(jat + jat_jm)*nz;
float rhs_U = ( (UR-UL) + (UT-UB) ) * hi + cP.sqrt2 * jat;
float tau_U = (1.0f+cP.k) - (1.0f-cP.k)*ph_shared[place];
U_new[C] = U_shared[place] + cP.dt * ( rhs_U / tau_U );
}
}
}
// U equation
__global__ void
rhs_U_ori(float* U, float* U_new, float* ph, float* dpsi, int fnx, int fny ){
int C = blockIdx.x * blockDim.x + threadIdx.x;
int j=C/fnx;
int i=C-j*fnx;
// if the points are at boundary, return
if ( (i>0) && (i<fnx-1) && (j>0) && (j<fny-1) ) {
// find the indices of the 8 neighbors for center
int R=C+1;
int L=C-1;
int T=C+fnx;
int B=C-fnx;
float hi = cP.hi;
float Dl_tilde = cP.Dl_tilde;
float k = cP.k;
float nx,nz;
float eps = cP.eps;
// =============================================================
// 1. ANISOTROPIC DIFFUSION
// =============================================================
// these ps's are defined on cell centers
float phipjp=( ph[C] + ph[R] + ph[T] + ph[T+1] ) * 0.25f;
float phipjm=( ph[C] + ph[R] + ph[B] + ph[B+1] ) * 0.25f;
float phimjp=( ph[C] + ph[L] + ph[T-1] + ph[T] ) * 0.25f;
float phimjm=( ph[C] + ph[L] + ph[B-1] + ph[B] ) * 0.25f;
float jat = 0.5f*(1.0f+(1.0f-k)*U[C])*(1.0f-ph[C]*ph[C])*dpsi[C];
/*# ============================
# right edge flux (i+1/2, j)
# ============================*/
float phx = ph[R]-ph[C];
float phz = phipjp - phipjm;
float phn2 = phx*phx + phz*phz;
if (phn2 > eps) {nx = phx / sqrtf(phn2);}
else {nx = 0.0f;}
float jat_ip = 0.5f*(1.0f+(1.0f-k)*U[R])*(1.0f-ph[R]*ph[R])*dpsi[R];
float UR = hi*Dl_tilde*0.5f*(2.0f - ph[C] - ph[R])*(U[R]-U[C]) + 0.5f*(jat + jat_ip)*nx;
/* ============================
# left edge flux (i-1/2, j)
# ============================*/
phx = ph[C]-ph[L];
phz = phimjp - phimjm;
phn2 = phx*phx + phz*phz;
if (phn2 > eps) {nx = phx / sqrtf(phn2);}
else {nx = 0.0f;}
float jat_im = 0.5f*(1.0f+(1.0f-k)*U[L])*(1.0f-ph[L]*ph[L])*dpsi[L];
float UL = hi*Dl_tilde*0.5f*(2.0f - ph[C] - ph[L])*(U[C]-U[L]) + 0.5f*(jat + jat_im)*nx;
/*# ============================
# top edge flux (i, j+1/2)
# ============================*/
phx = phipjp - phimjp;
phz = ph[T]-ph[C];
phn2 = phx*phx + phz*phz;
if (phn2 > eps) {nz = phz / sqrtf(phn2);}
else {nz = 0.0f;}
float jat_jp = 0.5f*(1.0f+(1.0f-k)*U[T])*(1.0f-ph[T]*ph[T])*dpsi[T];
float UT = hi*Dl_tilde*0.5f*(2.0f - ph[C] - ph[T])*(U[T]-U[C]) + 0.5f*(jat + jat_jp)*nz;
/*# ============================
# bottom edge flux (i, j-1/2)
# ============================*/
phx = phipjm - phimjm;
phz = ph[C]-ph[B];
phn2 = phx*phx + phz*phz;
if (phn2 > eps) {nz = phz / sqrtf(phn2);}
else {nz = 0.0f;}
float jat_jm = 0.5f*(1.0f+(1.0f-k)*U[B])*(1.0f-ph[B]*ph[B])*dpsi[B];
float UB = hi*Dl_tilde*0.5f*(2.0f - ph[C] - ph[B])*(U[C]-U[B]) + 0.5f*(jat + jat_jm)*nz;
float rhs_U = ( (UR-UL) + (UT-UB) ) * hi + cP.sqrt2 * jat;
float tau_U = (1.0f+cP.k) - (1.0f-cP.k)*ph[C];
U_new[C] = U[C] + cP.dt * ( rhs_U / tau_U );
}
}
// Host codes for PF computing
void setup(GlobalConstants params, int fnx, int fny, float* x, float* y, float* phi, float* psi,float* U){
// we should have already pass all the data structure in by this time
// move those data onto device
printCudaInfo();
float* x_device;// = NULL;
float* y_device;// = NULL;
// store two for swap behavior
float* psi_old;// = NULL;
float* psi_new;// = NULL;
float* U_old;// = NULL;
float* U_new;// = NULL;
float* phi_old;// = NULL;
float* phi_new;// = NULL;
float* dpsi;// = NULL;
// allocate x, y, phi, psi, U related params
int length = fnx*fny;
cudaMalloc((void **)&x_device, sizeof(float) * fnx);
cudaMalloc((void **)&y_device, sizeof(float) * fny);
cudaMalloc((void **)&phi_old, sizeof(float) * length);
cudaMalloc((void **)&psi_old, sizeof(float) * length);
cudaMalloc((void **)&U_old, sizeof(float) * length);
cudaMalloc((void **)&phi_new, sizeof(float) * length);
cudaMalloc((void **)&psi_new, sizeof(float) * length);
cudaMalloc((void **)&U_new, sizeof(float) * length);
cudaMalloc((void **)&dpsi, sizeof(float) * length);
float * psi_check = new float[length];
// set initial params
cudaMemcpy(x_device, x, sizeof(float) * fnx, cudaMemcpyHostToDevice);
cudaMemcpy(y_device, y, sizeof(float) * fny, cudaMemcpyHostToDevice);
cudaMemcpy(psi_old, psi, sizeof(float) * length, cudaMemcpyHostToDevice);
cudaMemcpy(phi_old, phi, sizeof(float) * length, cudaMemcpyHostToDevice);
cudaMemcpy(U_old, U, sizeof(float) * length, cudaMemcpyHostToDevice);
// pass all the read-only params into global constant
cudaMemcpyToSymbol(cP, ¶ms, sizeof(GlobalConstants));
int blocksize_1d = 128;
int blocksize_2d = 128; // seems reduce the block size makes it a little faster, but around 128 is okay.
int num_block_2d = (fnx*fny+blocksize_2d-1)/blocksize_2d;
int num_block_1d = (fnx+fny+blocksize_1d-1)/blocksize_1d;
printf("nx: %d and ny: %d\n", fnx, fny);
printf("block size %d, # blocks %d\n", blocksize_2d, num_block_2d);
initialize<<< num_block_2d, blocksize_2d >>>(psi_old, phi_old, U_old, psi_new, phi_new, U_new, x_device, y_device, fnx, fny);
set_BC<<< num_block_1d, blocksize_1d >>>(psi_new, phi_new, U_new, dpsi, fnx, fny);
set_BC<<< num_block_1d, blocksize_1d >>>(psi_old, phi_old, U_old, dpsi, fnx, fny);
cudaDeviceSynchronize();
double startTime = CycleTimer::currentSeconds();
// change the 2d block due to we donn't want to include halo region
int real_per_block_x = BLOCK_DIM_X - HALO_LEFT - HALO_RIGHT;
int real_per_block_y = BLOCK_DIM_Y - HALO_TOP - HALO_BOTTOM;
int num_block_x = (fnx - 2 + real_per_block_x - 1) / real_per_block_x;
int num_block_y = (fny - 2 + real_per_block_y - 1) / real_per_block_y;
printf("block_x: %d and block_y: %d\n", real_per_block_x, real_per_block_y);
printf("block_x: %d and block_y: %d\n", num_block_x, num_block_y);
int num_block_2d_s = num_block_x * num_block_y; //each one take one block with (32-2)+ (32-2) ture block within (fnx-2), (fny-2)
int blocksize_2d_s = BLOCK_DIM_X * BLOCK_DIM_Y; // 32*32: as we have to write 32*32 data region into shared memory
int real_per_block_x_merge = BLOCK_DIM_X - HALO_LEFT*2 - HALO_RIGHT*2;
int real_per_block_y_merge = BLOCK_DIM_Y - HALO_TOP*2 - HALO_BOTTOM*2;
int num_block_x_merge = (fnx - 2 + real_per_block_x_merge - 1) / real_per_block_x_merge;
int num_block_y_merge = (fny - 2 + real_per_block_y_merge - 1) / real_per_block_y_merge;
printf("real_per_block_x_merge: %d and real_per_block_y_merge: %d\n", real_per_block_x_merge, real_per_block_y_merge);
printf("num_block_x_merge: %d and num_block_y_merge: %d\n", num_block_x, num_block_y_merge);
int num_block_2d_s_merge = num_block_x_merge * num_block_y_merge; //each one take one block with (32-2)+ (32-2) ture block within (fnx-2), (fny-2)
int blocksize_2d_s_merge = BLOCK_DIM_X * BLOCK_DIM_Y; // 32*32: as we have to write 32*32 data region into shared memory
// use for no halo region
// int real_per_block_x2 = BLOCK_DIM_X;
// int real_per_block_y2 = BLOCK_DIM_Y;
// int num_block_x2 = (fnx - 2 + real_per_block_x2 - 1) / real_per_block_x2;
// int num_block_y2 = (fny - 2 + real_per_block_y2 - 1) / real_per_block_y2;
// printf("block_x: %d and block_y: %d\n", real_per_block_x2, real_per_block_y2);
// printf("block_x: %d and block_y: %d\n", num_block_x2, num_block_y2);
// int num_block_2d_s2 = num_block_x2 * num_block_y2; //each one take one block with (32-2)+ (32-2) ture block within (fnx-2), (fny-2)
// int blocksize_2d_s2 = BLOCK_DIM_X * BLOCK_DIM_Y; // 32*32: as we have to write 32*32 data region into shared memory
for (int kt=0; kt<params.Mt/2; kt++){
// printf("time step %d\n",kt);
// rhs_psi_shared_mem<<< num_block_2d_s, blocksize_2d_s >>>(psi_old, phi_old, U_old, psi_new, phi_new, y_device, dpsi, fnx, fny, 2*kt, num_block_x, num_block_y);
// rhs_psi_shared_mem_BC<<< num_block_2d_s, blocksize_2d_s >>>(psi_old, phi_old, U_old, psi_new, phi_new, y_device, dpsi, fnx, fny, 2*kt, num_block_x, num_block_y);
// cudaDeviceSynchronize();
// set_BC<<< num_block_1d, blocksize_1d >>>(psi_new, phi_new, U_old, dpsi, fnx, fny);
// cudaDeviceSynchronize();
// cudaMemcpy(psi_check, psi_new, sizeof(float) * length, cudaMemcpyDeviceToHost);
// printf("check data at 0+20: %f\n", psi_check[0+20]);
// printf("check data at 130+fnx*2: %f\n", psi_check[130+fnx*2]);
// printf("check data at 67334-fnx*2: %f\n", psi_check[67334-fnx*2]);
// printf("check data at 67464-20: %f\n", psi_check[67464-20]);
// printf("\n");
// printf("Iter %d\n", kt);
// printf("check data at 0: %f\n", psi_check[0]);
// printf("check data at 0+2: %f\n", psi_check[0+2]);
// printf("check data at 0+2fnx*2: %f\n", psi_check[0+2*fnx]);
// printf("check data at 130: %f\n", psi_check[130]);
// printf("check data at 130-2: %f\n", psi_check[130-2]);
// printf("check data at 130+2fnx*2: %f\n", psi_check[130+2*fnx]);
// printf("check data at 67334: %f\n", psi_check[67334]);
// printf("check data at 67334+2: %f\n", psi_check[67334+2]);
// printf("check data at 67334-2fnx*2: %f\n", psi_check[67334-2*fnx]);
// printf("check data at 67464: %f\n", psi_check[67464]);
// printf("check data at 67464-2: %f\n", psi_check[67464-2]);
// printf("check data at 67464-2fnx*2: %f\n", psi_check[67464-2*fnx]);
// printf("\n");
// rhs_U_shared_mem<<< num_block_2d_s, blocksize_2d_s >>>(U_old, U_new, phi_new, dpsi, fnx, fny, num_block_x, num_block_y);
// rhs_U_shared_mem_ex<<< num_block_2d_s2, blocksize_2d_s2 >>>(U_old, U_new, phi_new, dpsi, fnx, fny, num_block_x2, num_block_y2);
// cudaDeviceSynchronize();
rhs_psi_U_shared_mem_merge<<< num_block_2d_s_merge, blocksize_2d_s_merge >>>(psi_old, phi_old, U_old, psi_new, phi_new, U_new, y_device, dpsi, fnx, fny, 2*kt, num_block_x_merge, num_block_y_merge);
rhs_psi_U_shared_mem_merge<<< num_block_2d_s_merge, blocksize_2d_s_merge >>>(psi_new, phi_new, U_new, psi_old, phi_old, U_old, y_device, dpsi, fnx, fny, 2*kt, num_block_x_merge, num_block_y_merge);
// rhs_psi_shared_mem<<< num_block_2d_s, blocksize_2d_s >>>(psi_new, phi_new, U_new, psi_old, phi_old, y_device, dpsi, fnx, fny, 2*kt+1, num_block_x, num_block_y);
// // rhs_psi_shared_mem_BC<<< num_block_2d_s, blocksize_2d_s >>>(psi_new, phi_new, U_new, psi_old, phi_old, y_device, dpsi, fnx, fny, 2*kt+1, num_block_x, num_block_y);
// // cudaDeviceSynchronize();
// set_BC<<< num_block_1d, blocksize_1d >>>(psi_old, phi_old, U_new, dpsi, fnx, fny);
// // cudaDeviceSynchronize();
// rhs_U_shared_mem<<< num_block_2d_s, blocksize_2d_s >>>(U_new, U_old, phi_old, dpsi, fnx, fny, num_block_x, num_block_y);
// rhs_U_shared_mem_ex<<< num_block_2d_s2, blocksize_2d_s2 >>>(U_new, U_old, phi_old, dpsi, fnx, fny, num_block_x2, num_block_y2);
// cudaDeviceSynchronize();
}
cudaDeviceSynchronize();
double endTime = CycleTimer::currentSeconds();
printf("time for %d iterations: %f s\n", params.Mt, endTime-startTime);
cudaMemcpy(psi, psi_old, length * sizeof(float),cudaMemcpyDeviceToHost);
cudaMemcpy(phi, phi_old, length * sizeof(float),cudaMemcpyDeviceToHost);
cudaMemcpy(U, U_old, length * sizeof(float),cudaMemcpyDeviceToHost);
cudaFree(x_device); cudaFree(y_device);
cudaFree(psi_old); cudaFree(psi_new);
cudaFree(phi_old); cudaFree(phi_new);
cudaFree(U_old); cudaFree(U_new);
cudaFree(dpsi);
}
/*
void time_marching(GlobalConstants params, int fnx, int fny){
// initialize or load
int blocksize_1d = 256;
int blocksize_2d = 512;
int num_block_2d = (fnx*fny+blocksize_2d-1)/blocksize_2d;
int num_block_1d = (fnx+fny+blocksize_1d-1)/blocksize_1d;
initialize<<< num_block_2d, blocksize_2d >>>(ps_old, ph_old, U_old, ps_new, ph_new, U_new, x_device, y_device, fnx, fny);
for (int kt=0; kt<params.Mt/2; kt++){
rhs_psi<<< num_block_2d, blocksize_2d >>>(psi_old, phi_old, U_old, psi_new, phi_new, y_device, dpsi, fnx, fny, 2*kt );
set_BC<<< num_block_1d, blocksize_1d >>>(psi_new, phi_new, U_old, dpsi, fnx, fny);
rhs_U<<< num_block_2d, blocksize_2d >>>(U_old, U_new, phi_new, dpsi);
rhs_psi<<< num_block_2d, blocksize_2d >>>(psi_new, phi_new, U_new, psi_old, phi_old, y_device, dpsi, fnx, fny, 2*kt+1 );
set_BC<<< num_block_1d, blocksize_1d >>>(psi_old, phi_old, U_new, dpsi, fnx, fny);
rhs_U<<< num_block_2d, blocksize_2d >>>(U_new, U_old, phi_old, dpsi);
}
}*/
void printCudaInfo()
{
// for fun, just print out some stats on the machine
int deviceCount = 0;
cudaError_t err = cudaGetDeviceCount(&deviceCount);
printf("---------------------------------------------------------\n");
printf("Found %d CUDA devices\n", deviceCount);
for (int i=0; i<deviceCount; i++)
{
cudaDeviceProp deviceProps;
cudaGetDeviceProperties(&deviceProps, i);
printf("Device %d: %s\n", i, deviceProps.name);
printf(" SMs: %d\n", deviceProps.multiProcessorCount);
printf(" Global mem: %.0f MB\n",
static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024));
printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor);
}
printf("---------------------------------------------------------\n");
}
|
ee3c6a8ef6227aa21d41c62d6518725e7ad4748a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by op2.py
//
//user function
__device__ void initBore_select_gpu( float *values, const float *center,
const float *x0,
const float *Hl,
const float *ul,
const float *vl,
const float *Hr,
const float *ur,
const float *vr) {
values[0] = center[0] < *x0 ? *Hl : *Hr;
values[1] = center[0] < *x0 ? *ul : *ur;
values[2] = center[0] < *x0 ? *vl : *vr;
}
// CUDA kernel function
__global__ void op_cuda_initBore_select(
float *arg0,
const float *__restrict arg1,
const float *arg2,
const float *arg3,
const float *arg4,
const float *arg5,
const float *arg6,
const float *arg7,
const float *arg8,
int set_size ) {
//process set elements
for ( int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x ){
//user-supplied kernel call
initBore_select_gpu(arg0+n*4,
arg1+n*2,
arg2,
arg3,
arg4,
arg5,
arg6,
arg7,
arg8);
}
}
//host stub function
void op_par_loop_initBore_select(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5,
op_arg arg6,
op_arg arg7,
op_arg arg8){
float*arg2h = (float *)arg2.data;
float*arg3h = (float *)arg3.data;
float*arg4h = (float *)arg4.data;
float*arg5h = (float *)arg5.data;
float*arg6h = (float *)arg6.data;
float*arg7h = (float *)arg7.data;
float*arg8h = (float *)arg8.data;
int nargs = 9;
op_arg args[9];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
args[3] = arg3;
args[4] = arg4;
args[5] = arg5;
args[6] = arg6;
args[7] = arg7;
args[8] = arg8;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(14);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[14].name = name;
OP_kernels[14].count += 1;
if (OP_diags>2) {
printf(" kernel routine w/o indirection: initBore_select");
}
int set_size = op_mpi_halo_exchanges_grouped(set, nargs, args, 2);
if (set_size > 0) {
//transfer constants to GPU
int consts_bytes = 0;
consts_bytes += ROUND_UP(1*sizeof(float));
consts_bytes += ROUND_UP(1*sizeof(float));
consts_bytes += ROUND_UP(1*sizeof(float));
consts_bytes += ROUND_UP(1*sizeof(float));
consts_bytes += ROUND_UP(1*sizeof(float));
consts_bytes += ROUND_UP(1*sizeof(float));
consts_bytes += ROUND_UP(1*sizeof(float));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OP_consts_h + consts_bytes;
arg2.data_d = OP_consts_d + consts_bytes;
for ( int d=0; d<1; d++ ){
((float *)arg2.data)[d] = arg2h[d];
}
consts_bytes += ROUND_UP(1*sizeof(float));
arg3.data = OP_consts_h + consts_bytes;
arg3.data_d = OP_consts_d + consts_bytes;
for ( int d=0; d<1; d++ ){
((float *)arg3.data)[d] = arg3h[d];
}
consts_bytes += ROUND_UP(1*sizeof(float));
arg4.data = OP_consts_h + consts_bytes;
arg4.data_d = OP_consts_d + consts_bytes;
for ( int d=0; d<1; d++ ){
((float *)arg4.data)[d] = arg4h[d];
}
consts_bytes += ROUND_UP(1*sizeof(float));
arg5.data = OP_consts_h + consts_bytes;
arg5.data_d = OP_consts_d + consts_bytes;
for ( int d=0; d<1; d++ ){
((float *)arg5.data)[d] = arg5h[d];
}
consts_bytes += ROUND_UP(1*sizeof(float));
arg6.data = OP_consts_h + consts_bytes;
arg6.data_d = OP_consts_d + consts_bytes;
for ( int d=0; d<1; d++ ){
((float *)arg6.data)[d] = arg6h[d];
}
consts_bytes += ROUND_UP(1*sizeof(float));
arg7.data = OP_consts_h + consts_bytes;
arg7.data_d = OP_consts_d + consts_bytes;
for ( int d=0; d<1; d++ ){
((float *)arg7.data)[d] = arg7h[d];
}
consts_bytes += ROUND_UP(1*sizeof(float));
arg8.data = OP_consts_h + consts_bytes;
arg8.data_d = OP_consts_d + consts_bytes;
for ( int d=0; d<1; d++ ){
((float *)arg8.data)[d] = arg8h[d];
}
consts_bytes += ROUND_UP(1*sizeof(float));
mvConstArraysToDevice(consts_bytes);
//set CUDA execution parameters
#ifdef OP_BLOCK_SIZE_14
int nthread = OP_BLOCK_SIZE_14;
#else
int nthread = OP_block_size;
#endif
int nblocks = 200;
hipLaunchKernelGGL(( op_cuda_initBore_select), dim3(nblocks),dim3(nthread), 0, 0,
(float *) arg0.data_d,
(float *) arg1.data_d,
(float *) arg2.data_d,
(float *) arg3.data_d,
(float *) arg4.data_d,
(float *) arg5.data_d,
(float *) arg6.data_d,
(float *) arg7.data_d,
(float *) arg8.data_d,
set->size );
}
op_mpi_set_dirtybit_cuda(nargs, args);
if (OP_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
}
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[14].time += wall_t2 - wall_t1;
OP_kernels[14].transfer += (float)set->size * arg0.size * 2.0f;
OP_kernels[14].transfer += (float)set->size * arg1.size;
}
| ee3c6a8ef6227aa21d41c62d6518725e7ad4748a.cu | //
// auto-generated by op2.py
//
//user function
__device__ void initBore_select_gpu( float *values, const float *center,
const float *x0,
const float *Hl,
const float *ul,
const float *vl,
const float *Hr,
const float *ur,
const float *vr) {
values[0] = center[0] < *x0 ? *Hl : *Hr;
values[1] = center[0] < *x0 ? *ul : *ur;
values[2] = center[0] < *x0 ? *vl : *vr;
}
// CUDA kernel function
__global__ void op_cuda_initBore_select(
float *arg0,
const float *__restrict arg1,
const float *arg2,
const float *arg3,
const float *arg4,
const float *arg5,
const float *arg6,
const float *arg7,
const float *arg8,
int set_size ) {
//process set elements
for ( int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x ){
//user-supplied kernel call
initBore_select_gpu(arg0+n*4,
arg1+n*2,
arg2,
arg3,
arg4,
arg5,
arg6,
arg7,
arg8);
}
}
//host stub function
void op_par_loop_initBore_select(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5,
op_arg arg6,
op_arg arg7,
op_arg arg8){
float*arg2h = (float *)arg2.data;
float*arg3h = (float *)arg3.data;
float*arg4h = (float *)arg4.data;
float*arg5h = (float *)arg5.data;
float*arg6h = (float *)arg6.data;
float*arg7h = (float *)arg7.data;
float*arg8h = (float *)arg8.data;
int nargs = 9;
op_arg args[9];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
args[3] = arg3;
args[4] = arg4;
args[5] = arg5;
args[6] = arg6;
args[7] = arg7;
args[8] = arg8;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(14);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[14].name = name;
OP_kernels[14].count += 1;
if (OP_diags>2) {
printf(" kernel routine w/o indirection: initBore_select");
}
int set_size = op_mpi_halo_exchanges_grouped(set, nargs, args, 2);
if (set_size > 0) {
//transfer constants to GPU
int consts_bytes = 0;
consts_bytes += ROUND_UP(1*sizeof(float));
consts_bytes += ROUND_UP(1*sizeof(float));
consts_bytes += ROUND_UP(1*sizeof(float));
consts_bytes += ROUND_UP(1*sizeof(float));
consts_bytes += ROUND_UP(1*sizeof(float));
consts_bytes += ROUND_UP(1*sizeof(float));
consts_bytes += ROUND_UP(1*sizeof(float));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OP_consts_h + consts_bytes;
arg2.data_d = OP_consts_d + consts_bytes;
for ( int d=0; d<1; d++ ){
((float *)arg2.data)[d] = arg2h[d];
}
consts_bytes += ROUND_UP(1*sizeof(float));
arg3.data = OP_consts_h + consts_bytes;
arg3.data_d = OP_consts_d + consts_bytes;
for ( int d=0; d<1; d++ ){
((float *)arg3.data)[d] = arg3h[d];
}
consts_bytes += ROUND_UP(1*sizeof(float));
arg4.data = OP_consts_h + consts_bytes;
arg4.data_d = OP_consts_d + consts_bytes;
for ( int d=0; d<1; d++ ){
((float *)arg4.data)[d] = arg4h[d];
}
consts_bytes += ROUND_UP(1*sizeof(float));
arg5.data = OP_consts_h + consts_bytes;
arg5.data_d = OP_consts_d + consts_bytes;
for ( int d=0; d<1; d++ ){
((float *)arg5.data)[d] = arg5h[d];
}
consts_bytes += ROUND_UP(1*sizeof(float));
arg6.data = OP_consts_h + consts_bytes;
arg6.data_d = OP_consts_d + consts_bytes;
for ( int d=0; d<1; d++ ){
((float *)arg6.data)[d] = arg6h[d];
}
consts_bytes += ROUND_UP(1*sizeof(float));
arg7.data = OP_consts_h + consts_bytes;
arg7.data_d = OP_consts_d + consts_bytes;
for ( int d=0; d<1; d++ ){
((float *)arg7.data)[d] = arg7h[d];
}
consts_bytes += ROUND_UP(1*sizeof(float));
arg8.data = OP_consts_h + consts_bytes;
arg8.data_d = OP_consts_d + consts_bytes;
for ( int d=0; d<1; d++ ){
((float *)arg8.data)[d] = arg8h[d];
}
consts_bytes += ROUND_UP(1*sizeof(float));
mvConstArraysToDevice(consts_bytes);
//set CUDA execution parameters
#ifdef OP_BLOCK_SIZE_14
int nthread = OP_BLOCK_SIZE_14;
#else
int nthread = OP_block_size;
#endif
int nblocks = 200;
op_cuda_initBore_select<<<nblocks,nthread>>>(
(float *) arg0.data_d,
(float *) arg1.data_d,
(float *) arg2.data_d,
(float *) arg3.data_d,
(float *) arg4.data_d,
(float *) arg5.data_d,
(float *) arg6.data_d,
(float *) arg7.data_d,
(float *) arg8.data_d,
set->size );
}
op_mpi_set_dirtybit_cuda(nargs, args);
if (OP_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
}
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[14].time += wall_t2 - wall_t1;
OP_kernels[14].transfer += (float)set->size * arg0.size * 2.0f;
OP_kernels[14].transfer += (float)set->size * arg1.size;
}
|
9fac7191835ca998401736ed3e3b55e0aeee9280.hip | // !!! This is a file automatically generated by hipify!!!
/*----------------------------------------------------------------------
CUDA C extension for Python
This extension module provides auxiliary functionality for list-mode data
processing, generating look-up tables for image reconstruction.
author: Pawel Markiewicz
Copyrights: 2018
----------------------------------------------------------------------*/
#define PY_SSIZE_T_CLEAN
#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION // NPY_API_VERSION
#include "auxmath.h"
#include "def.h"
#include "norm.h"
#include "scanner_0.h"
#include <Python.h>
#include <numpy/arrayobject.h>
#include <stdlib.h>
//=== START PYTHON INIT ===
//--- Available functions
static PyObject *mmr_norm(PyObject *self, PyObject *args);
static PyObject *mmr_span11LUT(PyObject *self, PyObject *args);
static PyObject *mmr_pgaps(PyObject *self, PyObject *args);
static PyObject *mmr_rgaps(PyObject *self, PyObject *args);
static PyObject *aux_varon(PyObject *self, PyObject *args);
//---
//> Module Method Table
static PyMethodDef mmr_auxe_methods[] = {
{"norm", mmr_norm, METH_VARARGS,
"Create 3D normalisation sinograms from provided normalisation components."},
{"s1s11", mmr_span11LUT, METH_VARARGS, "Create span-1 to span-11 look up table."},
{"pgaps", mmr_pgaps, METH_VARARGS,
"Create span-11 Siemens compatible sinograms by inserting gaps into the GPU-optimised "
"sinograms in span-11."},
{"rgaps", mmr_rgaps, METH_VARARGS,
"Create span-11 GPU-optimised sinograms by removing the gaps in Siemens-compatible sinograms "
"in span-11"},
{"varon", aux_varon, METH_VARARGS, "Calculate variance online for the provided vector."},
{NULL, NULL, 0, NULL} // Sentinel
};
//> Module Definition Structure
static struct PyModuleDef mmr_auxe_module = {
PyModuleDef_HEAD_INIT,
//> name of module
"mmr_auxe",
//> module documentation, may be NULL
"Initialisation and basic processing routines for the Siemens Biograph mMR.",
//> the module keeps state in global variables.
-1,
mmr_auxe_methods};
//> Initialization function
PyMODINIT_FUNC PyInit_mmr_auxe(void) {
Py_Initialize();
//> load NumPy functionality
import_array();
return PyModule_Create(&mmr_auxe_module);
}
//=== END PYTHON INIT ===
//==============================================================================
//==============================================================================
// N O R M A L I S A T I O N (component based)
//------------------------------------------------------------------------------
static PyObject *mmr_norm(PyObject *self, PyObject *args) {
// Structure of constants
Cnst Cnt;
// Dictionary of scanner constants
PyObject *o_mmrcnst;
// structure of norm C arrays (defined in norm.h).
NormCmp normc;
// structure of axial LUTs in C arrays (defined in norm.h).
axialLUT axLUT;
// Output norm sino
PyObject *o_sino = NULL;
// normalisation component dictionary.
PyObject *o_norm_cmp;
// axial LUT dicionary. contains such LUTs: li2rno, li2sn, li2nos.
PyObject *o_axLUT;
// 2D sino index LUT (dead bisn are out).
PyObject *o_aw2ali = NULL;
// singles buckets for dead time correction
PyObject *o_bckts = NULL;
//^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
/* Parse the input tuple */
if (!PyArg_ParseTuple(args, "OOOOOO", &o_sino, &o_norm_cmp, &o_bckts, &o_axLUT, &o_aw2ali,
&o_mmrcnst))
return NULL;
//^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
/* Interpret the input objects as numpy arrays. */
// norm components:
PyObject *pd_geo = PyDict_GetItemString(o_norm_cmp, "geo");
PyObject *pd_cinf = PyDict_GetItemString(o_norm_cmp, "cinf");
PyObject *pd_ceff = PyDict_GetItemString(o_norm_cmp, "ceff");
PyObject *pd_axe1 = PyDict_GetItemString(o_norm_cmp, "axe1");
PyObject *pd_dtp = PyDict_GetItemString(o_norm_cmp, "dtp");
PyObject *pd_dtnp = PyDict_GetItemString(o_norm_cmp, "dtnp");
PyObject *pd_dtc = PyDict_GetItemString(o_norm_cmp, "dtc");
PyObject *pd_axe2 = PyDict_GetItemString(o_norm_cmp, "axe2");
PyObject *pd_axf1 = PyDict_GetItemString(o_norm_cmp, "axf1");
// axial LUTs:
PyObject *pd_li2rno = PyDict_GetItemString(o_axLUT, "li2rno");
PyObject *pd_li2sn = PyDict_GetItemString(o_axLUT, "li2sn");
PyObject *pd_li2nos = PyDict_GetItemString(o_axLUT, "li2nos");
PyObject *pd_sn1sn11 = PyDict_GetItemString(o_axLUT, "sn1_sn11");
PyObject *pd_sn1rno = PyDict_GetItemString(o_axLUT, "sn1_rno");
PyObject *pd_sn1sn11no = PyDict_GetItemString(o_axLUT, "sn1_sn11no");
PyObject *pd_span = PyDict_GetItemString(o_mmrcnst, "SPN");
Cnt.SPN = (int)PyLong_AsLong(pd_span);
PyObject *pd_log = PyDict_GetItemString(o_mmrcnst, "LOG");
Cnt.LOG = (char)PyLong_AsLong(pd_log);
PyObject *pd_devid = PyDict_GetItemString(o_mmrcnst, "DEVID");
Cnt.DEVID = (char)PyLong_AsLong(pd_devid);
// get the output sino
PyArrayObject *p_sino = NULL;
p_sino = (PyArrayObject *)PyArray_FROM_OTF(o_sino, NPY_FLOAT32, NPY_ARRAY_INOUT_ARRAY2);
//-- get the arrays from the dictionaries
// norm components
PyArrayObject *p_geo = NULL;
p_geo = (PyArrayObject *)PyArray_FROM_OTF(pd_geo, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
PyArrayObject *p_cinf = NULL;
p_cinf = (PyArrayObject *)PyArray_FROM_OTF(pd_cinf, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
PyArrayObject *p_ceff = NULL;
p_ceff = (PyArrayObject *)PyArray_FROM_OTF(pd_ceff, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
PyArrayObject *p_axe1 = NULL;
p_axe1 = (PyArrayObject *)PyArray_FROM_OTF(pd_axe1, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
PyArrayObject *p_dtp = NULL;
p_dtp = (PyArrayObject *)PyArray_FROM_OTF(pd_dtp, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
PyArrayObject *p_dtnp = NULL;
p_dtnp = (PyArrayObject *)PyArray_FROM_OTF(pd_dtnp, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
PyArrayObject *p_dtc = NULL;
p_dtc = (PyArrayObject *)PyArray_FROM_OTF(pd_dtc, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
PyArrayObject *p_axe2 = NULL;
p_axe2 = (PyArrayObject *)PyArray_FROM_OTF(pd_axe2, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
PyArrayObject *p_axf1 = NULL;
p_axf1 = (PyArrayObject *)PyArray_FROM_OTF(pd_axf1, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
// then axLUTs
PyArrayObject *p_li2rno = NULL;
p_li2rno = (PyArrayObject *)PyArray_FROM_OTF(pd_li2rno, NPY_INT32, NPY_ARRAY_IN_ARRAY);
PyArrayObject *p_li2sn = NULL;
p_li2sn = (PyArrayObject *)PyArray_FROM_OTF(pd_li2sn, NPY_INT32, NPY_ARRAY_IN_ARRAY);
PyArrayObject *p_li2nos = NULL;
p_li2nos = (PyArrayObject *)PyArray_FROM_OTF(pd_li2nos, NPY_INT32, NPY_ARRAY_IN_ARRAY);
PyArrayObject *p_sn1sn11 = NULL;
p_sn1sn11 = (PyArrayObject *)PyArray_FROM_OTF(pd_sn1sn11, NPY_INT16, NPY_ARRAY_IN_ARRAY);
PyArrayObject *p_sn1rno = NULL;
p_sn1rno = (PyArrayObject *)PyArray_FROM_OTF(pd_sn1rno, NPY_INT16, NPY_ARRAY_IN_ARRAY);
PyArrayObject *p_sn1sn11no = NULL;
p_sn1sn11no = (PyArrayObject *)PyArray_FROM_OTF(pd_sn1sn11no, NPY_INT8, NPY_ARRAY_IN_ARRAY);
// 2D sino index LUT:
PyArrayObject *p_aw2ali = NULL;
p_aw2ali = (PyArrayObject *)PyArray_FROM_OTF(o_aw2ali, NPY_INT32, NPY_ARRAY_IN_ARRAY);
// single bucktes:
PyArrayObject *p_bckts = NULL;
p_bckts = (PyArrayObject *)PyArray_FROM_OTF(o_bckts, NPY_INT32, NPY_ARRAY_IN_ARRAY);
//--
/* If that didn't work, throw an exception. */
if (p_geo == NULL || p_cinf == NULL || p_ceff == NULL || p_axe1 == NULL || p_dtp == NULL ||
p_dtnp == NULL || p_dtc == NULL || p_axe2 == NULL || p_axf1 == NULL || p_li2rno == NULL ||
p_li2sn == NULL || p_li2nos == NULL || p_aw2ali == NULL || p_sn1sn11 == NULL ||
p_sn1rno == NULL || p_sn1sn11no == NULL || p_sino == NULL) {
Py_XDECREF(p_geo);
Py_XDECREF(p_cinf);
Py_XDECREF(p_ceff);
Py_XDECREF(p_axe1);
Py_XDECREF(p_dtp);
Py_XDECREF(p_dtnp);
Py_XDECREF(p_dtc);
Py_XDECREF(p_axe2);
Py_XDECREF(p_axf1);
// axLUTs
Py_XDECREF(p_li2rno);
Py_XDECREF(p_li2sn);
Py_XDECREF(p_li2nos);
Py_XDECREF(p_sn1sn11);
Py_XDECREF(p_sn1rno);
Py_XDECREF(p_sn1sn11no);
// 2D sino LUT
Py_XDECREF(p_aw2ali);
// singles buckets
Py_XDECREF(p_bckts);
// output sino
PyArray_DiscardWritebackIfCopy(p_sino);
Py_XDECREF(p_sino);
return NULL;
}
//-- get the pointers to the data as C-types
// norm components
normc.geo = (float *)PyArray_DATA(p_geo);
normc.cinf = (float *)PyArray_DATA(p_cinf);
normc.ceff = (float *)PyArray_DATA(p_ceff);
normc.axe1 = (float *)PyArray_DATA(p_axe1);
normc.dtp = (float *)PyArray_DATA(p_dtp);
normc.dtnp = (float *)PyArray_DATA(p_dtnp);
normc.dtc = (float *)PyArray_DATA(p_dtc);
normc.axe2 = (float *)PyArray_DATA(p_axe2);
normc.axf1 = (float *)PyArray_DATA(p_axf1);
// axLUTs
axLUT.li2rno = (int *)PyArray_DATA(p_li2rno);
axLUT.li2sn = (int *)PyArray_DATA(p_li2sn);
axLUT.li2nos = (int *)PyArray_DATA(p_li2nos);
axLUT.sn1_sn11 = (short *)PyArray_DATA(p_sn1sn11);
axLUT.sn1_rno = (short *)PyArray_DATA(p_sn1rno);
axLUT.sn1_sn11no = (char *)PyArray_DATA(p_sn1sn11no);
// 2D sino index LUT
int *aw2ali = (int *)PyArray_DATA(p_aw2ali);
// singles bucktes
int *bckts = (int *)PyArray_DATA(p_bckts);
//--- Array size
int Naw = (int)PyArray_DIM(p_aw2ali, 0);
if (AW != Naw)
printf("\ne> number of active bins is inconsitent !!! <<------------------<<<<<\n");
// output sino
float *sino = (float *)PyArray_DATA(p_sino);
// norm components
normc.ngeo[0] = (int)PyArray_DIM(p_geo, 0);
normc.ngeo[1] = (int)PyArray_DIM(p_geo, 1);
normc.ncinf[0] = (int)PyArray_DIM(p_cinf, 0);
normc.ncinf[1] = (int)PyArray_DIM(p_cinf, 1);
normc.nceff[0] = (int)PyArray_DIM(p_ceff, 0);
normc.nceff[1] = (int)PyArray_DIM(p_ceff, 1);
normc.naxe = (int)PyArray_DIM(p_axe1, 0);
normc.nrdt = (int)PyArray_DIM(p_dtp, 0);
normc.ncdt = (int)PyArray_DIM(p_dtc, 0);
// axial LUTs:
axLUT.Nli2rno[0] = (int)PyArray_DIM(p_li2rno, 0);
axLUT.Nli2rno[1] = (int)PyArray_DIM(p_li2rno, 1);
axLUT.Nli2sn[0] = (int)PyArray_DIM(p_li2sn, 0);
axLUT.Nli2sn[1] = (int)PyArray_DIM(p_li2sn, 1);
axLUT.Nli2nos = (int)PyArray_DIM(p_li2nos, 0);
// sets the device on which to calculate
HANDLE_ERROR(hipSetDevice(Cnt.DEVID));
//<><><><><><><><><><> Call the CUDA stuff now
norm_from_components(sino, normc, axLUT, aw2ali, bckts, Cnt);
//<><><><><><><><><><>
//-- Clear up
// norm components
Py_DECREF(p_geo);
Py_DECREF(p_cinf);
Py_DECREF(p_ceff);
Py_DECREF(p_axe1);
Py_DECREF(p_dtp);
Py_DECREF(p_dtnp);
Py_DECREF(p_dtc);
Py_DECREF(p_axe2);
// axLUT
Py_DECREF(p_li2rno);
Py_DECREF(p_li2sn);
Py_DECREF(p_li2nos);
// 2D sino index LUT
Py_DECREF(p_aw2ali);
// singles buckets
Py_DECREF(p_bckts);
// output sino
PyArray_ResolveWritebackIfCopy(p_sino);
Py_DECREF(p_sino);
Py_INCREF(Py_None);
return Py_None;
}
//====================================================================================================
static PyObject *mmr_pgaps(PyObject *self, PyObject *args) {
// output sino
PyObject *o_sino;
// transaxial LUT dictionary (e.g., 2D sino where dead bins are out).
PyObject *o_txLUT;
// Dictionary of scanner constants
PyObject *o_mmrcnst;
// GPU input sino in span-11
PyObject *o_sng;
// Structure of constants
Cnst Cnt;
int sino_no;
//^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
/* Parse the input tuple */
if (!PyArg_ParseTuple(args, "OOOOi", &o_sino, &o_sng, &o_txLUT, &o_mmrcnst, &sino_no))
return NULL;
//^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
/* Interpret the input objects as... */
PyObject *pd_NSN11 = PyDict_GetItemString(o_mmrcnst, "NSN11");
Cnt.NSN11 = (int)PyLong_AsLong(pd_NSN11);
PyObject *pd_A = PyDict_GetItemString(o_mmrcnst, "NSANGLES");
Cnt.A = (int)PyLong_AsLong(pd_A);
PyObject *pd_W = PyDict_GetItemString(o_mmrcnst, "NSBINS");
Cnt.W = (int)PyLong_AsLong(pd_W);
PyObject *pd_SPN = PyDict_GetItemString(o_mmrcnst, "SPN");
Cnt.SPN = (int)PyLong_AsLong(pd_SPN);
PyObject *pd_log = PyDict_GetItemString(o_mmrcnst, "LOG");
Cnt.LOG = (char)PyLong_AsLong(pd_log);
PyObject *pd_devid = PyDict_GetItemString(o_mmrcnst, "DEVID");
Cnt.DEVID = (char)PyLong_AsLong(pd_devid);
PyObject *pd_rngstrt = PyDict_GetItemString(o_mmrcnst, "RNG_STRT");
PyObject *pd_rngend = PyDict_GetItemString(o_mmrcnst, "RNG_END");
Cnt.RNG_STRT = (char)PyLong_AsLong(pd_rngstrt);
Cnt.RNG_END = (char)PyLong_AsLong(pd_rngend);
// GPU 2D linear sino index into Siemens sino index LUT
PyObject *pd_aw2ali = PyDict_GetItemString(o_txLUT, "aw2ali");
// GPU input sino and the above 2D LUT
PyArrayObject *p_sng = NULL;
p_sng = (PyArrayObject *)PyArray_FROM_OTF(o_sng, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
PyArrayObject *p_aw2ali = NULL;
p_aw2ali = (PyArrayObject *)PyArray_FROM_OTF(pd_aw2ali, NPY_INT32, NPY_ARRAY_IN_ARRAY);
// output sino
PyArrayObject *p_sino = NULL;
p_sino = (PyArrayObject *)PyArray_FROM_OTF(o_sino, NPY_FLOAT32, NPY_ARRAY_INOUT_ARRAY2);
if (p_sng == NULL || p_aw2ali == NULL || p_sino == NULL) {
Py_XDECREF(p_aw2ali);
Py_XDECREF(p_sng);
PyArray_DiscardWritebackIfCopy(p_sino);
Py_XDECREF(p_sino);
}
int *aw2ali = (int *)PyArray_DATA(p_aw2ali);
float *sng = (float *)PyArray_DATA(p_sng);
// output sino
float *sino = (float *)PyArray_DATA(p_sino);
// sets the device on which to calculate
HANDLE_ERROR(hipSetDevice(Cnt.DEVID));
//<><><><><><><><><><><><><><><><><><><><><><>
// Run the conversion to sinos with gaps
put_gaps(sino, sng, aw2ali, sino_no, Cnt);
//<><><><><><><><><><><><><><><><><><><><><><>
// Clean up
Py_DECREF(p_aw2ali);
Py_DECREF(p_sng);
PyArray_ResolveWritebackIfCopy(p_sino);
Py_DECREF(p_sino);
Py_INCREF(Py_None);
return Py_None;
}
//====================================================================================================
static PyObject *mmr_rgaps(PyObject *self, PyObject *args) {
// output sino with gaps removed
PyObject *o_sng;
// transaxial LUT dictionary (e.g., 2D sino where dead bins are out).
PyObject *o_txLUT;
// Dictionary of scanner constants
PyObject *o_mmrcnst;
// input sino to be reformated with gaps removed
PyObject *o_sino;
// Structure of constants
Cnst Cnt;
//^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
/* Parse the input tuple */
if (!PyArg_ParseTuple(args, "OOOO", &o_sng, &o_sino, &o_txLUT, &o_mmrcnst)) return NULL;
//^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
/* Interpret the input objects as... PyLong_AsLong*/
PyObject *pd_NSN11 = PyDict_GetItemString(o_mmrcnst, "NSN11");
Cnt.NSN11 = (int)PyLong_AsLong(pd_NSN11);
PyObject *pd_NSN1 = PyDict_GetItemString(o_mmrcnst, "NSN1");
Cnt.NSN1 = (int)PyLong_AsLong(pd_NSN1);
PyObject *pd_A = PyDict_GetItemString(o_mmrcnst, "NSANGLES");
Cnt.A = (int)PyLong_AsLong(pd_A);
PyObject *pd_W = PyDict_GetItemString(o_mmrcnst, "NSBINS");
Cnt.W = (int)PyLong_AsLong(pd_W);
PyObject *pd_SPN = PyDict_GetItemString(o_mmrcnst, "SPN");
Cnt.SPN = (int)PyLong_AsLong(pd_SPN);
PyObject *pd_log = PyDict_GetItemString(o_mmrcnst, "LOG");
Cnt.LOG = (char)PyLong_AsLong(pd_log);
PyObject *pd_devid = PyDict_GetItemString(o_mmrcnst, "DEVID");
Cnt.DEVID = (char)PyLong_AsLong(pd_devid);
// GPU 2D linear sino index into Siemens sino index LUT
PyObject *pd_aw2ali = PyDict_GetItemString(o_txLUT, "aw2ali");
// input sino and the above 2D LUT
PyArrayObject *p_sino = NULL;
p_sino = (PyArrayObject *)PyArray_FROM_OTF(o_sino, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
PyArrayObject *p_aw2ali = NULL;
p_aw2ali = (PyArrayObject *)PyArray_FROM_OTF(pd_aw2ali, NPY_INT32, NPY_ARRAY_IN_ARRAY);
// number of sinogram from the shape of the sino (can be any number especially when using reduced
// ring number)
int snno = (int)PyArray_DIM(p_sino, 0);
// output sino
PyArrayObject *p_sng = NULL;
p_sng = (PyArrayObject *)PyArray_FROM_OTF(o_sng, NPY_FLOAT32, NPY_ARRAY_INOUT_ARRAY2);
if (p_sino == NULL || p_aw2ali == NULL || p_sino == NULL) {
Py_XDECREF(p_aw2ali);
Py_XDECREF(p_sino);
PyArray_DiscardWritebackIfCopy(p_sng);
Py_XDECREF(p_sng);
}
int *aw2ali = (int *)PyArray_DATA(p_aw2ali);
float *sino = (float *)PyArray_DATA(p_sino);
float *sng = (float *)PyArray_DATA(p_sng);
// sets the device on which to calculate
HANDLE_ERROR(hipSetDevice(Cnt.DEVID));
//<><><><><><><><><><><><><><><><><><><><><><>
// Run the conversion to GPU sinos
remove_gaps(sng, sino, snno, aw2ali, Cnt);
//<><><><><><><><><><><><><><><><><><><><><><>
// Clean up
Py_DECREF(p_aw2ali);
Py_DECREF(p_sino);
PyArray_ResolveWritebackIfCopy(p_sng);
Py_DECREF(p_sng);
Py_INCREF(Py_None);
return Py_None;
}
void free_capsule(PyObject *capsule) {
void *data = PyCapsule_GetPointer(capsule, NULL);
free(data);
}
//====================================================================================================
static PyObject *mmr_span11LUT(PyObject *self, PyObject *args) {
// Dictionary of scanner constants
PyObject *o_mmrcnst;
// Structure of constants
Cnst Cnt;
//^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
/* Parse the input tuple */
if (!PyArg_ParseTuple(args, "O", &o_mmrcnst)) return NULL;
//^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
/* Interpret the input objects as... */
PyObject *pd_Naw = PyDict_GetItemString(o_mmrcnst, "Naw");
Cnt.aw = (int)PyLong_AsLong(pd_Naw);
PyObject *pd_NSN1 = PyDict_GetItemString(o_mmrcnst, "NSN1");
Cnt.NSN1 = (int)PyLong_AsLong(pd_NSN1);
PyObject *pd_NSN11 = PyDict_GetItemString(o_mmrcnst, "NSN11");
Cnt.NSN11 = (int)PyLong_AsLong(pd_NSN11);
PyObject *pd_NRNG = PyDict_GetItemString(o_mmrcnst, "NRNG");
Cnt.NRNG = (int)PyLong_AsLong(pd_NRNG);
span11LUT span11 = span1_span11(Cnt);
npy_intp dims[2];
dims[0] = Cnt.NSN1;
PyArrayObject *s1s11_out =
(PyArrayObject *)PyArray_SimpleNewFromData(1, dims, NPY_INT16, span11.li2s11);
PyObject *capsule = PyCapsule_New(span11.li2s11, NULL, free_capsule);
PyArray_SetBaseObject(s1s11_out, capsule);
dims[0] = Cnt.NSN11;
PyArrayObject *s1nos_out =
(PyArrayObject *)PyArray_SimpleNewFromData(1, dims, NPY_INT8, span11.NSinos);
capsule = PyCapsule_New(span11.NSinos, NULL, free_capsule);
PyArray_SetBaseObject(s1nos_out, capsule);
PyObject *o_out = PyTuple_New(2);
PyTuple_SetItem(o_out, 0, PyArray_Return(s1s11_out));
PyTuple_SetItem(o_out, 1, PyArray_Return(s1nos_out));
return o_out;
}
//====================================================================================================
static PyObject *aux_varon(PyObject *self, PyObject *args) {
// M1 (mean) vector
PyObject *o_m1;
// M2 (variance) vector
PyObject *o_m2;
// input of instance data X
PyObject *o_x;
// Dictionary of scanner constants
PyObject *o_mmrcnst;
// Structure of constants
Cnst Cnt;
// realisation number
int b;
//^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
/* Parse the input tuple */
if (!PyArg_ParseTuple(args, "OOOiO", &o_m1, &o_m2, &o_x, &b, &o_mmrcnst)) return NULL;
//^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
PyObject *pd_log = PyDict_GetItemString(o_mmrcnst, "LOG");
Cnt.LOG = (char)PyLong_AsLong(pd_log);
PyObject *pd_devid = PyDict_GetItemString(o_mmrcnst, "DEVID");
Cnt.DEVID = (char)PyLong_AsLong(pd_devid);
// input sino and the above 2D LUT
PyArrayObject *p_m1 = NULL;
p_m1 = (PyArrayObject *)PyArray_FROM_OTF(o_m1, NPY_FLOAT32, NPY_ARRAY_INOUT_ARRAY2);
PyArrayObject *p_m2 = NULL;
p_m2 = (PyArrayObject *)PyArray_FROM_OTF(o_m2, NPY_FLOAT32, NPY_ARRAY_INOUT_ARRAY2);
PyArrayObject *p_x = NULL;
p_x = (PyArrayObject *)PyArray_FROM_OTF(o_x, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
if (p_m1 == NULL || p_m2 == NULL || p_x == NULL) {
PyArray_DiscardWritebackIfCopy(p_m1);
PyArray_DiscardWritebackIfCopy(p_m2);
Py_XDECREF(p_m1);
Py_XDECREF(p_m2);
Py_XDECREF(p_x);
}
float *m1 = (float *)PyArray_DATA(p_m1);
float *m2 = (float *)PyArray_DATA(p_m2);
float *x = (float *)PyArray_DATA(p_x);
int ndim = PyArray_NDIM(p_x);
size_t nele = 1;
for (int i = 0; i < ndim; i++) { nele *= PyArray_DIM(p_x, i); }
printf("i> number of elements in data array: %lu\n", nele);
// sets the device on which to calculate
HANDLE_ERROR(hipSetDevice(Cnt.DEVID));
//<><><><><><><><><><><><><><><><><><><><><><>
// Update variance online (M1, M2) using data instance X
var_online(m1, m2, x, b, nele);
//<><><><><><><><><><><><><><><><><><><><><><>
// Clean up
PyArray_ResolveWritebackIfCopy(p_m1);
PyArray_ResolveWritebackIfCopy(p_m2);
Py_DECREF(p_m1);
Py_DECREF(p_m2);
Py_DECREF(p_x);
Py_INCREF(Py_None);
return Py_None;
}
| 9fac7191835ca998401736ed3e3b55e0aeee9280.cu | /*----------------------------------------------------------------------
CUDA C extension for Python
This extension module provides auxiliary functionality for list-mode data
processing, generating look-up tables for image reconstruction.
author: Pawel Markiewicz
Copyrights: 2018
----------------------------------------------------------------------*/
#define PY_SSIZE_T_CLEAN
#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION // NPY_API_VERSION
#include "auxmath.h"
#include "def.h"
#include "norm.h"
#include "scanner_0.h"
#include <Python.h>
#include <numpy/arrayobject.h>
#include <stdlib.h>
//=== START PYTHON INIT ===
//--- Available functions
static PyObject *mmr_norm(PyObject *self, PyObject *args);
static PyObject *mmr_span11LUT(PyObject *self, PyObject *args);
static PyObject *mmr_pgaps(PyObject *self, PyObject *args);
static PyObject *mmr_rgaps(PyObject *self, PyObject *args);
static PyObject *aux_varon(PyObject *self, PyObject *args);
//---
//> Module Method Table
static PyMethodDef mmr_auxe_methods[] = {
{"norm", mmr_norm, METH_VARARGS,
"Create 3D normalisation sinograms from provided normalisation components."},
{"s1s11", mmr_span11LUT, METH_VARARGS, "Create span-1 to span-11 look up table."},
{"pgaps", mmr_pgaps, METH_VARARGS,
"Create span-11 Siemens compatible sinograms by inserting gaps into the GPU-optimised "
"sinograms in span-11."},
{"rgaps", mmr_rgaps, METH_VARARGS,
"Create span-11 GPU-optimised sinograms by removing the gaps in Siemens-compatible sinograms "
"in span-11"},
{"varon", aux_varon, METH_VARARGS, "Calculate variance online for the provided vector."},
{NULL, NULL, 0, NULL} // Sentinel
};
//> Module Definition Structure
static struct PyModuleDef mmr_auxe_module = {
PyModuleDef_HEAD_INIT,
//> name of module
"mmr_auxe",
//> module documentation, may be NULL
"Initialisation and basic processing routines for the Siemens Biograph mMR.",
//> the module keeps state in global variables.
-1,
mmr_auxe_methods};
//> Initialization function
PyMODINIT_FUNC PyInit_mmr_auxe(void) {
Py_Initialize();
//> load NumPy functionality
import_array();
return PyModule_Create(&mmr_auxe_module);
}
//=== END PYTHON INIT ===
//==============================================================================
//==============================================================================
// N O R M A L I S A T I O N (component based)
//------------------------------------------------------------------------------
static PyObject *mmr_norm(PyObject *self, PyObject *args) {
// Structure of constants
Cnst Cnt;
// Dictionary of scanner constants
PyObject *o_mmrcnst;
// structure of norm C arrays (defined in norm.h).
NormCmp normc;
// structure of axial LUTs in C arrays (defined in norm.h).
axialLUT axLUT;
// Output norm sino
PyObject *o_sino = NULL;
// normalisation component dictionary.
PyObject *o_norm_cmp;
// axial LUT dicionary. contains such LUTs: li2rno, li2sn, li2nos.
PyObject *o_axLUT;
// 2D sino index LUT (dead bisn are out).
PyObject *o_aw2ali = NULL;
// singles buckets for dead time correction
PyObject *o_bckts = NULL;
//^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
/* Parse the input tuple */
if (!PyArg_ParseTuple(args, "OOOOOO", &o_sino, &o_norm_cmp, &o_bckts, &o_axLUT, &o_aw2ali,
&o_mmrcnst))
return NULL;
//^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
/* Interpret the input objects as numpy arrays. */
// norm components:
PyObject *pd_geo = PyDict_GetItemString(o_norm_cmp, "geo");
PyObject *pd_cinf = PyDict_GetItemString(o_norm_cmp, "cinf");
PyObject *pd_ceff = PyDict_GetItemString(o_norm_cmp, "ceff");
PyObject *pd_axe1 = PyDict_GetItemString(o_norm_cmp, "axe1");
PyObject *pd_dtp = PyDict_GetItemString(o_norm_cmp, "dtp");
PyObject *pd_dtnp = PyDict_GetItemString(o_norm_cmp, "dtnp");
PyObject *pd_dtc = PyDict_GetItemString(o_norm_cmp, "dtc");
PyObject *pd_axe2 = PyDict_GetItemString(o_norm_cmp, "axe2");
PyObject *pd_axf1 = PyDict_GetItemString(o_norm_cmp, "axf1");
// axial LUTs:
PyObject *pd_li2rno = PyDict_GetItemString(o_axLUT, "li2rno");
PyObject *pd_li2sn = PyDict_GetItemString(o_axLUT, "li2sn");
PyObject *pd_li2nos = PyDict_GetItemString(o_axLUT, "li2nos");
PyObject *pd_sn1sn11 = PyDict_GetItemString(o_axLUT, "sn1_sn11");
PyObject *pd_sn1rno = PyDict_GetItemString(o_axLUT, "sn1_rno");
PyObject *pd_sn1sn11no = PyDict_GetItemString(o_axLUT, "sn1_sn11no");
PyObject *pd_span = PyDict_GetItemString(o_mmrcnst, "SPN");
Cnt.SPN = (int)PyLong_AsLong(pd_span);
PyObject *pd_log = PyDict_GetItemString(o_mmrcnst, "LOG");
Cnt.LOG = (char)PyLong_AsLong(pd_log);
PyObject *pd_devid = PyDict_GetItemString(o_mmrcnst, "DEVID");
Cnt.DEVID = (char)PyLong_AsLong(pd_devid);
// get the output sino
PyArrayObject *p_sino = NULL;
p_sino = (PyArrayObject *)PyArray_FROM_OTF(o_sino, NPY_FLOAT32, NPY_ARRAY_INOUT_ARRAY2);
//-- get the arrays from the dictionaries
// norm components
PyArrayObject *p_geo = NULL;
p_geo = (PyArrayObject *)PyArray_FROM_OTF(pd_geo, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
PyArrayObject *p_cinf = NULL;
p_cinf = (PyArrayObject *)PyArray_FROM_OTF(pd_cinf, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
PyArrayObject *p_ceff = NULL;
p_ceff = (PyArrayObject *)PyArray_FROM_OTF(pd_ceff, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
PyArrayObject *p_axe1 = NULL;
p_axe1 = (PyArrayObject *)PyArray_FROM_OTF(pd_axe1, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
PyArrayObject *p_dtp = NULL;
p_dtp = (PyArrayObject *)PyArray_FROM_OTF(pd_dtp, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
PyArrayObject *p_dtnp = NULL;
p_dtnp = (PyArrayObject *)PyArray_FROM_OTF(pd_dtnp, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
PyArrayObject *p_dtc = NULL;
p_dtc = (PyArrayObject *)PyArray_FROM_OTF(pd_dtc, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
PyArrayObject *p_axe2 = NULL;
p_axe2 = (PyArrayObject *)PyArray_FROM_OTF(pd_axe2, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
PyArrayObject *p_axf1 = NULL;
p_axf1 = (PyArrayObject *)PyArray_FROM_OTF(pd_axf1, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
// then axLUTs
PyArrayObject *p_li2rno = NULL;
p_li2rno = (PyArrayObject *)PyArray_FROM_OTF(pd_li2rno, NPY_INT32, NPY_ARRAY_IN_ARRAY);
PyArrayObject *p_li2sn = NULL;
p_li2sn = (PyArrayObject *)PyArray_FROM_OTF(pd_li2sn, NPY_INT32, NPY_ARRAY_IN_ARRAY);
PyArrayObject *p_li2nos = NULL;
p_li2nos = (PyArrayObject *)PyArray_FROM_OTF(pd_li2nos, NPY_INT32, NPY_ARRAY_IN_ARRAY);
PyArrayObject *p_sn1sn11 = NULL;
p_sn1sn11 = (PyArrayObject *)PyArray_FROM_OTF(pd_sn1sn11, NPY_INT16, NPY_ARRAY_IN_ARRAY);
PyArrayObject *p_sn1rno = NULL;
p_sn1rno = (PyArrayObject *)PyArray_FROM_OTF(pd_sn1rno, NPY_INT16, NPY_ARRAY_IN_ARRAY);
PyArrayObject *p_sn1sn11no = NULL;
p_sn1sn11no = (PyArrayObject *)PyArray_FROM_OTF(pd_sn1sn11no, NPY_INT8, NPY_ARRAY_IN_ARRAY);
// 2D sino index LUT:
PyArrayObject *p_aw2ali = NULL;
p_aw2ali = (PyArrayObject *)PyArray_FROM_OTF(o_aw2ali, NPY_INT32, NPY_ARRAY_IN_ARRAY);
// single bucktes:
PyArrayObject *p_bckts = NULL;
p_bckts = (PyArrayObject *)PyArray_FROM_OTF(o_bckts, NPY_INT32, NPY_ARRAY_IN_ARRAY);
//--
/* If that didn't work, throw an exception. */
if (p_geo == NULL || p_cinf == NULL || p_ceff == NULL || p_axe1 == NULL || p_dtp == NULL ||
p_dtnp == NULL || p_dtc == NULL || p_axe2 == NULL || p_axf1 == NULL || p_li2rno == NULL ||
p_li2sn == NULL || p_li2nos == NULL || p_aw2ali == NULL || p_sn1sn11 == NULL ||
p_sn1rno == NULL || p_sn1sn11no == NULL || p_sino == NULL) {
Py_XDECREF(p_geo);
Py_XDECREF(p_cinf);
Py_XDECREF(p_ceff);
Py_XDECREF(p_axe1);
Py_XDECREF(p_dtp);
Py_XDECREF(p_dtnp);
Py_XDECREF(p_dtc);
Py_XDECREF(p_axe2);
Py_XDECREF(p_axf1);
// axLUTs
Py_XDECREF(p_li2rno);
Py_XDECREF(p_li2sn);
Py_XDECREF(p_li2nos);
Py_XDECREF(p_sn1sn11);
Py_XDECREF(p_sn1rno);
Py_XDECREF(p_sn1sn11no);
// 2D sino LUT
Py_XDECREF(p_aw2ali);
// singles buckets
Py_XDECREF(p_bckts);
// output sino
PyArray_DiscardWritebackIfCopy(p_sino);
Py_XDECREF(p_sino);
return NULL;
}
//-- get the pointers to the data as C-types
// norm components
normc.geo = (float *)PyArray_DATA(p_geo);
normc.cinf = (float *)PyArray_DATA(p_cinf);
normc.ceff = (float *)PyArray_DATA(p_ceff);
normc.axe1 = (float *)PyArray_DATA(p_axe1);
normc.dtp = (float *)PyArray_DATA(p_dtp);
normc.dtnp = (float *)PyArray_DATA(p_dtnp);
normc.dtc = (float *)PyArray_DATA(p_dtc);
normc.axe2 = (float *)PyArray_DATA(p_axe2);
normc.axf1 = (float *)PyArray_DATA(p_axf1);
// axLUTs
axLUT.li2rno = (int *)PyArray_DATA(p_li2rno);
axLUT.li2sn = (int *)PyArray_DATA(p_li2sn);
axLUT.li2nos = (int *)PyArray_DATA(p_li2nos);
axLUT.sn1_sn11 = (short *)PyArray_DATA(p_sn1sn11);
axLUT.sn1_rno = (short *)PyArray_DATA(p_sn1rno);
axLUT.sn1_sn11no = (char *)PyArray_DATA(p_sn1sn11no);
// 2D sino index LUT
int *aw2ali = (int *)PyArray_DATA(p_aw2ali);
// singles bucktes
int *bckts = (int *)PyArray_DATA(p_bckts);
//--- Array size
int Naw = (int)PyArray_DIM(p_aw2ali, 0);
if (AW != Naw)
printf("\ne> number of active bins is inconsitent !!! <<------------------<<<<<\n");
// output sino
float *sino = (float *)PyArray_DATA(p_sino);
// norm components
normc.ngeo[0] = (int)PyArray_DIM(p_geo, 0);
normc.ngeo[1] = (int)PyArray_DIM(p_geo, 1);
normc.ncinf[0] = (int)PyArray_DIM(p_cinf, 0);
normc.ncinf[1] = (int)PyArray_DIM(p_cinf, 1);
normc.nceff[0] = (int)PyArray_DIM(p_ceff, 0);
normc.nceff[1] = (int)PyArray_DIM(p_ceff, 1);
normc.naxe = (int)PyArray_DIM(p_axe1, 0);
normc.nrdt = (int)PyArray_DIM(p_dtp, 0);
normc.ncdt = (int)PyArray_DIM(p_dtc, 0);
// axial LUTs:
axLUT.Nli2rno[0] = (int)PyArray_DIM(p_li2rno, 0);
axLUT.Nli2rno[1] = (int)PyArray_DIM(p_li2rno, 1);
axLUT.Nli2sn[0] = (int)PyArray_DIM(p_li2sn, 0);
axLUT.Nli2sn[1] = (int)PyArray_DIM(p_li2sn, 1);
axLUT.Nli2nos = (int)PyArray_DIM(p_li2nos, 0);
// sets the device on which to calculate
HANDLE_ERROR(cudaSetDevice(Cnt.DEVID));
//<><><><><><><><><><> Call the CUDA stuff now
norm_from_components(sino, normc, axLUT, aw2ali, bckts, Cnt);
//<><><><><><><><><><>
//-- Clear up
// norm components
Py_DECREF(p_geo);
Py_DECREF(p_cinf);
Py_DECREF(p_ceff);
Py_DECREF(p_axe1);
Py_DECREF(p_dtp);
Py_DECREF(p_dtnp);
Py_DECREF(p_dtc);
Py_DECREF(p_axe2);
// axLUT
Py_DECREF(p_li2rno);
Py_DECREF(p_li2sn);
Py_DECREF(p_li2nos);
// 2D sino index LUT
Py_DECREF(p_aw2ali);
// singles buckets
Py_DECREF(p_bckts);
// output sino
PyArray_ResolveWritebackIfCopy(p_sino);
Py_DECREF(p_sino);
Py_INCREF(Py_None);
return Py_None;
}
//====================================================================================================
static PyObject *mmr_pgaps(PyObject *self, PyObject *args) {
// output sino
PyObject *o_sino;
// transaxial LUT dictionary (e.g., 2D sino where dead bins are out).
PyObject *o_txLUT;
// Dictionary of scanner constants
PyObject *o_mmrcnst;
// GPU input sino in span-11
PyObject *o_sng;
// Structure of constants
Cnst Cnt;
int sino_no;
//^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
/* Parse the input tuple */
if (!PyArg_ParseTuple(args, "OOOOi", &o_sino, &o_sng, &o_txLUT, &o_mmrcnst, &sino_no))
return NULL;
//^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
/* Interpret the input objects as... */
PyObject *pd_NSN11 = PyDict_GetItemString(o_mmrcnst, "NSN11");
Cnt.NSN11 = (int)PyLong_AsLong(pd_NSN11);
PyObject *pd_A = PyDict_GetItemString(o_mmrcnst, "NSANGLES");
Cnt.A = (int)PyLong_AsLong(pd_A);
PyObject *pd_W = PyDict_GetItemString(o_mmrcnst, "NSBINS");
Cnt.W = (int)PyLong_AsLong(pd_W);
PyObject *pd_SPN = PyDict_GetItemString(o_mmrcnst, "SPN");
Cnt.SPN = (int)PyLong_AsLong(pd_SPN);
PyObject *pd_log = PyDict_GetItemString(o_mmrcnst, "LOG");
Cnt.LOG = (char)PyLong_AsLong(pd_log);
PyObject *pd_devid = PyDict_GetItemString(o_mmrcnst, "DEVID");
Cnt.DEVID = (char)PyLong_AsLong(pd_devid);
PyObject *pd_rngstrt = PyDict_GetItemString(o_mmrcnst, "RNG_STRT");
PyObject *pd_rngend = PyDict_GetItemString(o_mmrcnst, "RNG_END");
Cnt.RNG_STRT = (char)PyLong_AsLong(pd_rngstrt);
Cnt.RNG_END = (char)PyLong_AsLong(pd_rngend);
// GPU 2D linear sino index into Siemens sino index LUT
PyObject *pd_aw2ali = PyDict_GetItemString(o_txLUT, "aw2ali");
// GPU input sino and the above 2D LUT
PyArrayObject *p_sng = NULL;
p_sng = (PyArrayObject *)PyArray_FROM_OTF(o_sng, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
PyArrayObject *p_aw2ali = NULL;
p_aw2ali = (PyArrayObject *)PyArray_FROM_OTF(pd_aw2ali, NPY_INT32, NPY_ARRAY_IN_ARRAY);
// output sino
PyArrayObject *p_sino = NULL;
p_sino = (PyArrayObject *)PyArray_FROM_OTF(o_sino, NPY_FLOAT32, NPY_ARRAY_INOUT_ARRAY2);
if (p_sng == NULL || p_aw2ali == NULL || p_sino == NULL) {
Py_XDECREF(p_aw2ali);
Py_XDECREF(p_sng);
PyArray_DiscardWritebackIfCopy(p_sino);
Py_XDECREF(p_sino);
}
int *aw2ali = (int *)PyArray_DATA(p_aw2ali);
float *sng = (float *)PyArray_DATA(p_sng);
// output sino
float *sino = (float *)PyArray_DATA(p_sino);
// sets the device on which to calculate
HANDLE_ERROR(cudaSetDevice(Cnt.DEVID));
//<><><><><><><><><><><><><><><><><><><><><><>
// Run the conversion to sinos with gaps
put_gaps(sino, sng, aw2ali, sino_no, Cnt);
//<><><><><><><><><><><><><><><><><><><><><><>
// Clean up
Py_DECREF(p_aw2ali);
Py_DECREF(p_sng);
PyArray_ResolveWritebackIfCopy(p_sino);
Py_DECREF(p_sino);
Py_INCREF(Py_None);
return Py_None;
}
//====================================================================================================
static PyObject *mmr_rgaps(PyObject *self, PyObject *args) {
// output sino with gaps removed
PyObject *o_sng;
// transaxial LUT dictionary (e.g., 2D sino where dead bins are out).
PyObject *o_txLUT;
// Dictionary of scanner constants
PyObject *o_mmrcnst;
// input sino to be reformated with gaps removed
PyObject *o_sino;
// Structure of constants
Cnst Cnt;
//^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
/* Parse the input tuple */
if (!PyArg_ParseTuple(args, "OOOO", &o_sng, &o_sino, &o_txLUT, &o_mmrcnst)) return NULL;
//^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
/* Interpret the input objects as... PyLong_AsLong*/
PyObject *pd_NSN11 = PyDict_GetItemString(o_mmrcnst, "NSN11");
Cnt.NSN11 = (int)PyLong_AsLong(pd_NSN11);
PyObject *pd_NSN1 = PyDict_GetItemString(o_mmrcnst, "NSN1");
Cnt.NSN1 = (int)PyLong_AsLong(pd_NSN1);
PyObject *pd_A = PyDict_GetItemString(o_mmrcnst, "NSANGLES");
Cnt.A = (int)PyLong_AsLong(pd_A);
PyObject *pd_W = PyDict_GetItemString(o_mmrcnst, "NSBINS");
Cnt.W = (int)PyLong_AsLong(pd_W);
PyObject *pd_SPN = PyDict_GetItemString(o_mmrcnst, "SPN");
Cnt.SPN = (int)PyLong_AsLong(pd_SPN);
PyObject *pd_log = PyDict_GetItemString(o_mmrcnst, "LOG");
Cnt.LOG = (char)PyLong_AsLong(pd_log);
PyObject *pd_devid = PyDict_GetItemString(o_mmrcnst, "DEVID");
Cnt.DEVID = (char)PyLong_AsLong(pd_devid);
// GPU 2D linear sino index into Siemens sino index LUT
PyObject *pd_aw2ali = PyDict_GetItemString(o_txLUT, "aw2ali");
// input sino and the above 2D LUT
PyArrayObject *p_sino = NULL;
p_sino = (PyArrayObject *)PyArray_FROM_OTF(o_sino, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
PyArrayObject *p_aw2ali = NULL;
p_aw2ali = (PyArrayObject *)PyArray_FROM_OTF(pd_aw2ali, NPY_INT32, NPY_ARRAY_IN_ARRAY);
// number of sinogram from the shape of the sino (can be any number especially when using reduced
// ring number)
int snno = (int)PyArray_DIM(p_sino, 0);
// output sino
PyArrayObject *p_sng = NULL;
p_sng = (PyArrayObject *)PyArray_FROM_OTF(o_sng, NPY_FLOAT32, NPY_ARRAY_INOUT_ARRAY2);
if (p_sino == NULL || p_aw2ali == NULL || p_sino == NULL) {
Py_XDECREF(p_aw2ali);
Py_XDECREF(p_sino);
PyArray_DiscardWritebackIfCopy(p_sng);
Py_XDECREF(p_sng);
}
int *aw2ali = (int *)PyArray_DATA(p_aw2ali);
float *sino = (float *)PyArray_DATA(p_sino);
float *sng = (float *)PyArray_DATA(p_sng);
// sets the device on which to calculate
HANDLE_ERROR(cudaSetDevice(Cnt.DEVID));
//<><><><><><><><><><><><><><><><><><><><><><>
// Run the conversion to GPU sinos
remove_gaps(sng, sino, snno, aw2ali, Cnt);
//<><><><><><><><><><><><><><><><><><><><><><>
// Clean up
Py_DECREF(p_aw2ali);
Py_DECREF(p_sino);
PyArray_ResolveWritebackIfCopy(p_sng);
Py_DECREF(p_sng);
Py_INCREF(Py_None);
return Py_None;
}
void free_capsule(PyObject *capsule) {
void *data = PyCapsule_GetPointer(capsule, NULL);
free(data);
}
//====================================================================================================
static PyObject *mmr_span11LUT(PyObject *self, PyObject *args) {
// Dictionary of scanner constants
PyObject *o_mmrcnst;
// Structure of constants
Cnst Cnt;
//^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
/* Parse the input tuple */
if (!PyArg_ParseTuple(args, "O", &o_mmrcnst)) return NULL;
//^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
/* Interpret the input objects as... */
PyObject *pd_Naw = PyDict_GetItemString(o_mmrcnst, "Naw");
Cnt.aw = (int)PyLong_AsLong(pd_Naw);
PyObject *pd_NSN1 = PyDict_GetItemString(o_mmrcnst, "NSN1");
Cnt.NSN1 = (int)PyLong_AsLong(pd_NSN1);
PyObject *pd_NSN11 = PyDict_GetItemString(o_mmrcnst, "NSN11");
Cnt.NSN11 = (int)PyLong_AsLong(pd_NSN11);
PyObject *pd_NRNG = PyDict_GetItemString(o_mmrcnst, "NRNG");
Cnt.NRNG = (int)PyLong_AsLong(pd_NRNG);
span11LUT span11 = span1_span11(Cnt);
npy_intp dims[2];
dims[0] = Cnt.NSN1;
PyArrayObject *s1s11_out =
(PyArrayObject *)PyArray_SimpleNewFromData(1, dims, NPY_INT16, span11.li2s11);
PyObject *capsule = PyCapsule_New(span11.li2s11, NULL, free_capsule);
PyArray_SetBaseObject(s1s11_out, capsule);
dims[0] = Cnt.NSN11;
PyArrayObject *s1nos_out =
(PyArrayObject *)PyArray_SimpleNewFromData(1, dims, NPY_INT8, span11.NSinos);
capsule = PyCapsule_New(span11.NSinos, NULL, free_capsule);
PyArray_SetBaseObject(s1nos_out, capsule);
PyObject *o_out = PyTuple_New(2);
PyTuple_SetItem(o_out, 0, PyArray_Return(s1s11_out));
PyTuple_SetItem(o_out, 1, PyArray_Return(s1nos_out));
return o_out;
}
//====================================================================================================
static PyObject *aux_varon(PyObject *self, PyObject *args) {
// M1 (mean) vector
PyObject *o_m1;
// M2 (variance) vector
PyObject *o_m2;
// input of instance data X
PyObject *o_x;
// Dictionary of scanner constants
PyObject *o_mmrcnst;
// Structure of constants
Cnst Cnt;
// realisation number
int b;
//^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
/* Parse the input tuple */
if (!PyArg_ParseTuple(args, "OOOiO", &o_m1, &o_m2, &o_x, &b, &o_mmrcnst)) return NULL;
//^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
PyObject *pd_log = PyDict_GetItemString(o_mmrcnst, "LOG");
Cnt.LOG = (char)PyLong_AsLong(pd_log);
PyObject *pd_devid = PyDict_GetItemString(o_mmrcnst, "DEVID");
Cnt.DEVID = (char)PyLong_AsLong(pd_devid);
// input sino and the above 2D LUT
PyArrayObject *p_m1 = NULL;
p_m1 = (PyArrayObject *)PyArray_FROM_OTF(o_m1, NPY_FLOAT32, NPY_ARRAY_INOUT_ARRAY2);
PyArrayObject *p_m2 = NULL;
p_m2 = (PyArrayObject *)PyArray_FROM_OTF(o_m2, NPY_FLOAT32, NPY_ARRAY_INOUT_ARRAY2);
PyArrayObject *p_x = NULL;
p_x = (PyArrayObject *)PyArray_FROM_OTF(o_x, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY);
if (p_m1 == NULL || p_m2 == NULL || p_x == NULL) {
PyArray_DiscardWritebackIfCopy(p_m1);
PyArray_DiscardWritebackIfCopy(p_m2);
Py_XDECREF(p_m1);
Py_XDECREF(p_m2);
Py_XDECREF(p_x);
}
float *m1 = (float *)PyArray_DATA(p_m1);
float *m2 = (float *)PyArray_DATA(p_m2);
float *x = (float *)PyArray_DATA(p_x);
int ndim = PyArray_NDIM(p_x);
size_t nele = 1;
for (int i = 0; i < ndim; i++) { nele *= PyArray_DIM(p_x, i); }
printf("i> number of elements in data array: %lu\n", nele);
// sets the device on which to calculate
HANDLE_ERROR(cudaSetDevice(Cnt.DEVID));
//<><><><><><><><><><><><><><><><><><><><><><>
// Update variance online (M1, M2) using data instance X
var_online(m1, m2, x, b, nele);
//<><><><><><><><><><><><><><><><><><><><><><>
// Clean up
PyArray_ResolveWritebackIfCopy(p_m1);
PyArray_ResolveWritebackIfCopy(p_m2);
Py_DECREF(p_m1);
Py_DECREF(p_m2);
Py_DECREF(p_x);
Py_INCREF(Py_None);
return Py_None;
}
|
7197f96454186adf76e62c99db95794493d6c2ff.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
@generated from zmgeelltmv.cu normal z -> d, Fri Jul 18 17:34:28 2014
*/
#include "common_magma.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 128
#else
#define BLOCK_SIZE 512
#endif
__global__ void
dmgeelltmv_kernel( int num_rows,
int num_cols,
int num_vecs,
int num_cols_per_row,
double alpha,
double *d_val,
magma_index_t *d_colind,
double *d_x,
double beta,
double *d_y)
{
extern __shared__ double dot[];
int row = blockDim.x * blockIdx.x + threadIdx.x ;
if(row < num_rows ){
for( int i=0; i<num_vecs; i++ )
dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_D_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row ; n ++){
int col = d_colind [ num_rows * n + row ];
double val = d_val [ num_rows * n + row ];
if( val != 0){
for( int i=0; i<num_vecs; i++ )
dot[ threadIdx.x + i*blockDim.x ] +=
val * d_x[col + i * num_cols ];
}
}
for( int i=0; i<num_vecs; i++ )
d_y[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ]
* alpha + beta * d_y [ row + i*num_cols ];
}
}
/**
Purpose
-------
This routine computes Y = alpha * A * X + beta * Y for X and Y sets of
num_vec vectors on the GPU. Input format is ELL.
Arguments
---------
@param
transA magma_trans_t
transposition parameter for A
@param
m magma_int_t
number of rows in A
@param
n magma_int_t
number of columns in A
@param
num_vecs mama_int_t
number of vectors
@param
nnz_per_row magma_int_t
number of elements in the longest row
@param
alpha double
scalar multiplier
@param
d_val double*
array containing values of A in ELL
@param
d_colind magma_int_t*
columnindices of A in ELL
@param
d_x double*
input vector x
@param
beta double
scalar multiplier
@param
d_y double*
input/output vector y
@ingroup magmasparse_dblas
********************************************************************/
extern "C" magma_int_t
magma_dmgeelltmv( magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t num_vecs,
magma_int_t nnz_per_row,
double alpha,
double *d_val,
magma_index_t *d_colind,
double *d_x,
double beta,
double *d_y ){
dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1);
unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE
* sizeof( double ); // num_vecs vectors
hipLaunchKernelGGL(( dmgeelltmv_kernel), dim3(grid), dim3(BLOCK_SIZE), MEM_SIZE , 0,
m, n, num_vecs, nnz_per_row, alpha, d_val, d_colind, d_x, beta, d_y );
return MAGMA_SUCCESS;
}
| 7197f96454186adf76e62c99db95794493d6c2ff.cu | /*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
@generated from zmgeelltmv.cu normal z -> d, Fri Jul 18 17:34:28 2014
*/
#include "common_magma.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 128
#else
#define BLOCK_SIZE 512
#endif
__global__ void
dmgeelltmv_kernel( int num_rows,
int num_cols,
int num_vecs,
int num_cols_per_row,
double alpha,
double *d_val,
magma_index_t *d_colind,
double *d_x,
double beta,
double *d_y)
{
extern __shared__ double dot[];
int row = blockDim.x * blockIdx.x + threadIdx.x ;
if(row < num_rows ){
for( int i=0; i<num_vecs; i++ )
dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_D_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row ; n ++){
int col = d_colind [ num_rows * n + row ];
double val = d_val [ num_rows * n + row ];
if( val != 0){
for( int i=0; i<num_vecs; i++ )
dot[ threadIdx.x + i*blockDim.x ] +=
val * d_x[col + i * num_cols ];
}
}
for( int i=0; i<num_vecs; i++ )
d_y[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ]
* alpha + beta * d_y [ row + i*num_cols ];
}
}
/**
Purpose
-------
This routine computes Y = alpha * A * X + beta * Y for X and Y sets of
num_vec vectors on the GPU. Input format is ELL.
Arguments
---------
@param
transA magma_trans_t
transposition parameter for A
@param
m magma_int_t
number of rows in A
@param
n magma_int_t
number of columns in A
@param
num_vecs mama_int_t
number of vectors
@param
nnz_per_row magma_int_t
number of elements in the longest row
@param
alpha double
scalar multiplier
@param
d_val double*
array containing values of A in ELL
@param
d_colind magma_int_t*
columnindices of A in ELL
@param
d_x double*
input vector x
@param
beta double
scalar multiplier
@param
d_y double*
input/output vector y
@ingroup magmasparse_dblas
********************************************************************/
extern "C" magma_int_t
magma_dmgeelltmv( magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t num_vecs,
magma_int_t nnz_per_row,
double alpha,
double *d_val,
magma_index_t *d_colind,
double *d_x,
double beta,
double *d_y ){
dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1);
unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE
* sizeof( double ); // num_vecs vectors
dmgeelltmv_kernel<<< grid, BLOCK_SIZE, MEM_SIZE >>>
( m, n, num_vecs, nnz_per_row, alpha, d_val, d_colind, d_x, beta, d_y );
return MAGMA_SUCCESS;
}
|
790cf98904553a0413a09c858bece4e9203d473b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void set_carr(float br, float bi, float * c, int N)
{
int idx=blockIdx.x*blockDim.x+threadIdx.x; if(idx>=N) return;
int idc=idx*2;
c[idc]=br;c[idc+1]=bi;
} | 790cf98904553a0413a09c858bece4e9203d473b.cu | #include "includes.h"
__global__ void set_carr(float br, float bi, float * c, int N)
{
int idx=blockIdx.x*blockDim.x+threadIdx.x; if(idx>=N) return;
int idc=idx*2;
c[idc]=br;c[idc+1]=bi;
} |
c2884c67b204c0c6ecaa88553550c5845c333bf6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// TanH neuron activation function layer.
// Adapted from ReLU layer code written by Yangqing Jia
#include <vector>
#include "caffe/layers/tanh_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void TanHForward(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = tanh(in[index]);
}
}
template <typename Dtype>
void TanHLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( TanHForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, top_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void TanHBackward(const int n, const Dtype* in_diff,
const Dtype* out_data, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
Dtype tanhx = out_data[index];
out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);
}
}
template <typename Dtype>
void TanHLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_data = top[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( TanHBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, top_data, bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(TanHLayer);
} // namespace caffe
| c2884c67b204c0c6ecaa88553550c5845c333bf6.cu | // TanH neuron activation function layer.
// Adapted from ReLU layer code written by Yangqing Jia
#include <vector>
#include "caffe/layers/tanh_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void TanHForward(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = tanh(in[index]);
}
}
template <typename Dtype>
void TanHLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
TanHForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, top_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void TanHBackward(const int n, const Dtype* in_diff,
const Dtype* out_data, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
Dtype tanhx = out_data[index];
out_diff[index] = in_diff[index] * (1 - tanhx * tanhx);
}
}
template <typename Dtype>
void TanHLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_data = top[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
TanHBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, top_data, bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(TanHLayer);
} // namespace caffe
|
2b4ecd98016e649e8892ef3897585d01e5136b39.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// The CUDA kernel is modified from SplitGelu plugin of TensorRT 8.5.
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hipcub/hipcub.hpp>
#include "core/providers/cuda/cu_inc/common.cuh"
#include "contrib_ops/cuda/diffusion/bias_split_gelu_impl.h"
using namespace onnxruntime::cuda;
namespace onnxruntime {
namespace contrib {
namespace cuda {
template <typename T, int32_t HHS, int32_t TPB>
__global__ void biasSplitGeluKernel(T const* input, T const* bias, T* output) {
int32_t index_input = blockIdx.x * HHS * 2 + threadIdx.x;
int32_t index_output = blockIdx.x * HHS + threadIdx.x;
int32_t index_bias = threadIdx.x;
#pragma unroll
for (int32_t i = 0; i < HHS / TPB; ++i) {
auto value_left = (float)(input[index_input] + bias[index_bias]);
auto value_right = (float)(input[index_input + HHS] + bias[index_bias + HHS]);
// Gelu is applied to right side only: Gelu(x) = x * 0.5 * (erf(x / sqrt(2)) + 1.0)
float gelu_right = value_right * 0.5f * (erff(value_right / 1.41421356237f) + 1.0f);
float result = value_left * gelu_right;
output[index_output] = static_cast<T>(result);
index_input += TPB;
index_output += TPB;
index_bias += TPB;
}
return;
}
template <typename T>
void LaunchBiasSplitGeluKernel(hipStream_t stream, int32_t grid_size, int32_t half_hidden_size,
T const* input, T const* bias, T* output) {
constexpr int32_t TPB = 256; // thread per block
switch (half_hidden_size) {
case 1280:
(biasSplitGeluKernel<T, 1280, TPBhipLaunchKernelGGL((>)), dim3(grid_size), dim3(TPB), 0, stream, input, bias, output);
break;
case 2560:
(biasSplitGeluKernel<T, 2560, TPBhipLaunchKernelGGL((>)), dim3(grid_size), dim3(TPB), 0, stream, input, bias, output);
break;
case 5120:
(biasSplitGeluKernel<T, 5120, TPBhipLaunchKernelGGL((>)), dim3(grid_size), dim3(TPB), 0, stream, input, bias, output);
break;
default:
ORT_NOT_IMPLEMENTED("Not implemented");
}
}
template __global__ void biasSplitGeluKernel<float, 1280, 256>(float const*, float const*, float*);
template __global__ void biasSplitGeluKernel<float, 2560, 256>(float const*, float const*, float*);
template __global__ void biasSplitGeluKernel<float, 5120, 256>(float const*, float const*, float*);
template __global__ void biasSplitGeluKernel<half, 1280, 256>(half const*, half const*, half*);
template __global__ void biasSplitGeluKernel<half, 2560, 256>(half const*, half const*, half*);
template __global__ void biasSplitGeluKernel<half, 5120, 256>(half const*, half const*, half*);
template void LaunchBiasSplitGeluKernel<float>(hipStream_t stream, int32_t grid_size, int32_t half_hidden_size,
float const* input, float const* bias, float* output);
template void LaunchBiasSplitGeluKernel<half>(hipStream_t stream, int32_t grid_size, int32_t half_hidden_size,
half const* input, half const* bias, half* output);
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
| 2b4ecd98016e649e8892ef3897585d01e5136b39.cu | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// The CUDA kernel is modified from SplitGelu plugin of TensorRT 8.5.
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cub/cub.cuh>
#include "core/providers/cuda/cu_inc/common.cuh"
#include "contrib_ops/cuda/diffusion/bias_split_gelu_impl.h"
using namespace onnxruntime::cuda;
namespace onnxruntime {
namespace contrib {
namespace cuda {
template <typename T, int32_t HHS, int32_t TPB>
__global__ void biasSplitGeluKernel(T const* input, T const* bias, T* output) {
int32_t index_input = blockIdx.x * HHS * 2 + threadIdx.x;
int32_t index_output = blockIdx.x * HHS + threadIdx.x;
int32_t index_bias = threadIdx.x;
#pragma unroll
for (int32_t i = 0; i < HHS / TPB; ++i) {
auto value_left = (float)(input[index_input] + bias[index_bias]);
auto value_right = (float)(input[index_input + HHS] + bias[index_bias + HHS]);
// Gelu is applied to right side only: Gelu(x) = x * 0.5 * (erf(x / sqrt(2)) + 1.0)
float gelu_right = value_right * 0.5f * (erff(value_right / 1.41421356237f) + 1.0f);
float result = value_left * gelu_right;
output[index_output] = static_cast<T>(result);
index_input += TPB;
index_output += TPB;
index_bias += TPB;
}
return;
}
template <typename T>
void LaunchBiasSplitGeluKernel(cudaStream_t stream, int32_t grid_size, int32_t half_hidden_size,
T const* input, T const* bias, T* output) {
constexpr int32_t TPB = 256; // thread per block
switch (half_hidden_size) {
case 1280:
(biasSplitGeluKernel<T, 1280, TPB>)<<<grid_size, TPB, 0, stream>>>(input, bias, output);
break;
case 2560:
(biasSplitGeluKernel<T, 2560, TPB>)<<<grid_size, TPB, 0, stream>>>(input, bias, output);
break;
case 5120:
(biasSplitGeluKernel<T, 5120, TPB>)<<<grid_size, TPB, 0, stream>>>(input, bias, output);
break;
default:
ORT_NOT_IMPLEMENTED("Not implemented");
}
}
template __global__ void biasSplitGeluKernel<float, 1280, 256>(float const*, float const*, float*);
template __global__ void biasSplitGeluKernel<float, 2560, 256>(float const*, float const*, float*);
template __global__ void biasSplitGeluKernel<float, 5120, 256>(float const*, float const*, float*);
template __global__ void biasSplitGeluKernel<half, 1280, 256>(half const*, half const*, half*);
template __global__ void biasSplitGeluKernel<half, 2560, 256>(half const*, half const*, half*);
template __global__ void biasSplitGeluKernel<half, 5120, 256>(half const*, half const*, half*);
template void LaunchBiasSplitGeluKernel<float>(cudaStream_t stream, int32_t grid_size, int32_t half_hidden_size,
float const* input, float const* bias, float* output);
template void LaunchBiasSplitGeluKernel<half>(cudaStream_t stream, int32_t grid_size, int32_t half_hidden_size,
half const* input, half const* bias, half* output);
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
|
5d443c7771856a09ae6730e7b4510db904fb92c4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../utils/hpc.h"
#define BLKDIM 1024
__global__ void kernel_layer(matrix_t in, model_t mdl, matrix_t out, int* layer)
{
// shared memory to exploit data reuse of the input
__shared__ float temp_i[BLKDIM+R];
// index and batch
const unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
const unsigned int batch = blockIdx.y;
// current layer weights and biases
const float** w = (const float**) mdl->weights_list[*layer]->data;
const float* b = (const float*) mdl->bias_list[*layer]->data;
// current output size
const unsigned max_n = mdl->bias_list[*layer]->len;
if (index >= max_n)
return;
// fill shared mem handling the case that input is not a multiple of block size
temp_i[threadIdx.x] = in->data[batch][index];
if (threadIdx.x < R) {
int missing = max_n - index;
if (missing < BLKDIM)
temp_i[2*threadIdx.x+missing] = in->data[batch][threadIdx.x+max_n];
else
temp_i[threadIdx.x+blockDim.x] = in->data[batch][index+blockDim.x];
}
__syncthreads();
// compute neuron output
float sum = b[index];
for (unsigned int k=0; k<R; k++) {
sum += w[index][k] * temp_i[threadIdx.x+k];
}
// apply activation if isn't the last layer (the model is a regressor)
out->data[batch][index] = (*layer != mdl->num_layer-1) ? ACTIVATION(sum) : sum;
}
__host__ matrix_t cuda_forward_mlp(matrix_t input_batch, model_t model)
{
// alloc and copy input and model to device memory
matrix_t d_in = h2d_matrix(input_batch);
model_t d_mdl = h2d_model(model);
// create buffer matrix as big as the input (create on host, move on device, free on host)
matrix_t h_buff = new_matrix_pinned(input_batch->m, input_batch->n, ZERO);
matrix_t d_buff = h2d_matrix(h_buff);
free_matrix_pinned(h_buff);
// 2D grid, x is for number of features, y is for batch elements
dim3 grid((input_batch->n + BLKDIM-1)/BLKDIM, input_batch->m);
dim3 block(BLKDIM);
// Alloc on device current layer index
int *d_layer; cudaSafeCall(hipMalloc((void**)&d_layer, sizeof(int)));
double tstart = hpc_gettime();
// layer loop
for (int layer=0; layer<model->num_layer; layer++) {
// update current layer index on device
cudaSafeCall(hipMemcpy(d_layer, &layer, sizeof(int), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( kernel_layer), dim3(grid), dim3(block), 0, 0, d_in, d_mdl, d_buff, d_layer);
hipDeviceSynchronize();
cudaCheckError();
// swap input and output buffer pointers
if (layer < model->num_layer-1) {
matrix_t swap;
swap = d_in;
d_in = d_buff;
d_buff = swap;
}
}
double tstop = hpc_gettime();
printf("Kernel time elapsed = %f s\n", tstop-tstart);
cudaSafeCall(hipFree(d_layer));
// copy the result on host memory
matrix_t h_out = d2h_matrix(d_buff);
// truncate to the output dimension (it was as big as the input)
h_out->n = model->bias_list[model->num_layer-1]->len;
device_free_matrix(d_in);
device_free_model(d_mdl);
device_free_matrix(d_buff);
return h_out;
}
/********************************** VECTOR MEMORY UTILITIES **********************************/
__host__ vector_t new_vector_pinned(unsigned int length, init_t init_type)
{
vector_t vec;
cudaSafeCall(hipHostMalloc(&vec, sizeof(vector_s)));
vec->len = length;
cudaSafeCall(hipHostMalloc(&vec->data, length * sizeof(float)));
for (unsigned int i=0; i<length; i++) {
switch (init_type)
{
case RAND_UNIFORM: // [-2, +2)
vec->data[i] = 4 * ((float) rand() / (float) RAND_MAX - 0.5);
break;
default:
vec->data[i] = 0;
break;
}
}
return vec;
}
__host__ void free_vector_pinned(vector_t vector)
{
cudaSafeCall(hipHostFree(vector->data));
cudaSafeCall(hipHostFree(vector));
}
__host__ vector_t h2d_vector(vector_t vec)
{
// alloc the struct
vector_t d_vec;
cudaSafeCall(hipMalloc((void**)&d_vec, sizeof(vector_s)));
cudaSafeCall(hipMemcpy(&(d_vec->len), &(vec->len), sizeof(int), hipMemcpyHostToDevice));
// alloc the data, then update the vec->data pointer with that one on the device memory
float* d_data;
cudaSafeCall(hipMalloc((void**)&d_data, vec->len*sizeof(float)));
cudaSafeCall(hipMemcpy(d_data, vec->data, vec->len*sizeof(float), hipMemcpyHostToDevice));
cudaSafeCall(hipMemcpy(&d_vec->data, &d_data, sizeof(float*), hipMemcpyHostToDevice));
return d_vec;
}
__host__ vector_t d2h_vector(vector_t d_vec)
{
// copy on host the struct
vector_t vec;
cudaSafeCall(hipHostMalloc(&vec, sizeof(vector_s)));
cudaSafeCall(hipMemcpy(vec, d_vec, sizeof(vector_s), hipMemcpyDeviceToHost));
// copy on host the data
float* d_data;
cudaSafeCall(hipHostMalloc(&d_data, vec->len*sizeof(float)));
cudaSafeCall(hipMemcpy(d_data, vec->data, vec->len*sizeof(float), hipMemcpyDeviceToHost));
vec->data = d_data;
return vec;
}
__host__ void device_free_vector(vector_t d_vec)
{
float* d_data;
cudaSafeCall(hipMemcpy(&d_data, &d_vec->data, sizeof(float*), hipMemcpyDeviceToHost));
cudaSafeCall(hipFree(d_data));
cudaSafeCall(hipFree(d_vec));
}
/********************************** MATRIX MEMORY UTILITIES **********************************/
__host__ matrix_t new_matrix_pinned(unsigned int m, unsigned int n, init_t init_type)
{
matrix_t mat;
cudaSafeCall(hipHostMalloc(&mat, sizeof(vector_s)));
mat->m = m; mat->n = n;
// data block (contiguous)
float* blk;
cudaSafeCall(hipHostMalloc(&blk, m * n * sizeof(float)));
cudaSafeCall(hipHostMalloc(&mat->data, m * sizeof(float*)));
// updates the elements of data with the addresses of data block
// allowing to preserve the double indexing of the array
for (unsigned int i=0; i<m; i++) {
mat->data[i] = &(blk[i*n]);
for (unsigned int j=0; j<n; j++) {
switch (init_type)
{
case RAND_UNIFORM: // [-2, +2)
mat->data[i][j] = 4 * ((float) rand() / (float) RAND_MAX - 0.5);
break;
default:
mat->data[i][j] = 0;
break;
}
}
}
return mat;
}
__host__ void free_matrix_pinned(matrix_t matrix)
{
cudaSafeCall(hipHostFree(matrix->data[0])); //free blk
cudaSafeCall(hipHostFree(matrix->data));
cudaSafeCall(hipHostFree(matrix));
}
__host__ matrix_t h2d_matrix(matrix_t mat)
{
// copy on device the struct
matrix_t d_mat;
cudaSafeCall(hipMalloc((void**)&d_mat, sizeof(matrix_s)));
cudaSafeCall(hipMemcpy(&(d_mat->m), &(mat->m), sizeof(unsigned int), hipMemcpyHostToDevice));
cudaSafeCall(hipMemcpy(&(d_mat->n), &(mat->n), sizeof(unsigned int), hipMemcpyHostToDevice));
// copy on device the data (d_blk)
float** d_data;
float* d_blk;
cudaSafeCall(hipMalloc((void**)&d_data, mat->m*sizeof(float*)));
cudaSafeCall(hipMalloc((void**)&d_blk, mat->m*mat->n*sizeof(float)));
cudaSafeCall(hipMemcpy(d_blk, mat->data[0], mat->m*mat->n*sizeof(float), hipMemcpyHostToDevice));
// update the d_data indeces with that ones od d_blk on the device memory, so to preserve the double index access
float* addr[mat->m];
for (unsigned int i=0; i<mat->m; i++) {
addr[i] = &d_blk[i*mat->n];
}
cudaSafeCall(hipMemcpy(d_data, addr, mat->m*sizeof(float*), hipMemcpyHostToDevice));
cudaSafeCall(hipMemcpy(&d_mat->data, &d_data, sizeof(float**), hipMemcpyHostToDevice));
return d_mat;
}
__host__ matrix_t d2h_matrix(matrix_t d_mat)
{
// copy struct to host
matrix_t mat;
cudaSafeCall(hipHostMalloc(&mat, sizeof(matrix_s)));
cudaSafeCall(hipMemcpy(mat, d_mat, sizeof(matrix_s), hipMemcpyDeviceToHost));
// copy data to host (data[0] will contain the memory block address)
float** data;
cudaSafeCall(hipHostMalloc(&data, mat->m*sizeof(float*)));
cudaSafeCall(hipMemcpy(data, mat->data, mat->m*sizeof(float*), hipMemcpyDeviceToHost));
// copy the memory block to host
float* blk;
cudaSafeCall(hipHostMalloc(&blk, mat->m*mat->n*sizeof(float)));
cudaSafeCall(hipMemcpy(blk, data[0], mat->m*mat->n*sizeof(float), hipMemcpyDeviceToHost));
// update data indeces with the host copy of blk
for (unsigned int i=0; i<mat->m; i++) {
data[i] = &(blk[i*mat->n]);
}
mat->data = data;
return mat;
}
__host__ void device_free_matrix(matrix_t d_mat)
{
float** d_data;
cudaSafeCall(hipMemcpy(&d_data, &d_mat->data, sizeof(float**), hipMemcpyDeviceToHost));
float *d_blk;
cudaSafeCall(hipMemcpy(&d_blk, d_data, sizeof(float*), hipMemcpyDeviceToHost));
cudaSafeCall(hipFree(d_blk));
cudaSafeCall(hipFree(d_data));
cudaSafeCall(hipFree(d_mat));
}
/********************************** MODEL MEMORY UTILITIES **********************************/
__host__ model_t new_model_pinned(unsigned int inputs, unsigned int num_layer, init_t init_type) {
model_t obj;
cudaSafeCall(hipHostMalloc(&obj, sizeof(model_s)));
obj->num_layer = num_layer;
cudaSafeCall(hipHostMalloc(&obj->weights_list, num_layer * sizeof(matrix_t)));
cudaSafeCall(hipHostMalloc(&obj->bias_list, num_layer * sizeof(vector_t)));
unsigned int last = inputs;
for (unsigned int i=0; i<num_layer; i++) {
obj->weights_list[i] = new_matrix_pinned(last-(R-1), R, init_type);
obj->bias_list[i] = new_vector_pinned(last-(R-1), init_type);
last -= R-1;
}
return obj;
}
__host__ void free_model_pinned(model_t model)
{
for (unsigned int i=0; i<model->num_layer; i++) {
free_matrix_pinned(model->weights_list[i]);
free_vector_pinned(model->bias_list[i]);
}
cudaSafeCall(hipHostFree(model->weights_list));
cudaSafeCall(hipHostFree(model->bias_list));
cudaSafeCall(hipHostFree(model));
}
__host__ model_t h2d_model(model_t mdl)
{
// copy struct to device
model_t d_mdl;
cudaSafeCall(hipMalloc((void**)&d_mdl, sizeof(model_s)));
cudaSafeCall(hipMemcpy(&(d_mdl->num_layer), &(mdl->num_layer), sizeof(unsigned int), hipMemcpyHostToDevice));
// alloc the pointer lists
matrix_t* d_weights;
vector_t* d_biases;
cudaSafeCall(hipMalloc((void**)&d_weights, mdl->num_layer*sizeof(matrix_t*)));
cudaSafeCall(hipMalloc((void**)&d_biases, mdl->num_layer*sizeof(vector_t*)));
// copy all weights and biases and update the lists with device pointers
for (unsigned int i=0; i<mdl->num_layer; i++) {
matrix_t d_m = h2d_matrix(mdl->weights_list[i]);
vector_t d_v = h2d_vector(mdl->bias_list[i]);
cudaSafeCall(hipMemcpy(&d_weights[i], &d_m, sizeof(matrix_t), hipMemcpyHostToDevice));
cudaSafeCall(hipMemcpy(&d_biases[i], &d_v, sizeof(vector_t), hipMemcpyHostToDevice));
}
cudaSafeCall(hipMemcpy(&d_mdl->weights_list, &d_weights, sizeof(matrix_t*), hipMemcpyHostToDevice));
cudaSafeCall(hipMemcpy(&d_mdl->bias_list, &d_biases, sizeof(vector_t*), hipMemcpyHostToDevice));
return d_mdl;
}
__host__ void device_free_model(model_t d_mdl)
{
// copy to host the pointer lists, than free each element
matrix_t* d_weights;
vector_t* d_biases;
unsigned int num_layer;
cudaSafeCall(hipMemcpy(&num_layer, &d_mdl->num_layer, sizeof(unsigned int), hipMemcpyDeviceToHost));
cudaSafeCall(hipMemcpy(&d_weights, &d_mdl->weights_list, sizeof(matrix_t*), hipMemcpyDeviceToHost));
cudaSafeCall(hipMemcpy(&d_biases, &d_mdl->bias_list, sizeof(vector_t*), hipMemcpyDeviceToHost));
for (unsigned int i=0; i<num_layer; i++) {
matrix_t d_w;
vector_t d_b;
cudaSafeCall(hipMemcpy(&d_w, &d_weights[i], sizeof(matrix_t), hipMemcpyDeviceToHost));
cudaSafeCall(hipMemcpy(&d_b, &d_biases[i], sizeof(vector_t), hipMemcpyDeviceToHost));
device_free_matrix(d_w);
device_free_vector(d_b);
}
cudaSafeCall(hipFree(d_weights));
cudaSafeCall(hipFree(d_biases));
cudaSafeCall(hipFree(d_mdl));
}
__host__ void test_device_mem_leak()
{
const unsigned int cycles = 1000;
vector_t vec = new_vector_pinned(1000, RAND_UNIFORM);
matrix_t mat = new_matrix_pinned(1000, 1000, RAND_UNIFORM);
model_t mdl = new_model_pinned(100, 20, RAND_UNIFORM);
printf("Device memory leak stress test: running %d cycles...\n", cycles);
for (unsigned int i=0; i<cycles; i++) {
vector_t d_vec = h2d_vector(vec);
vector_t h_vec = d2h_vector(d_vec);
assert_equal_vector(vec, h_vec);
free_vector_pinned(h_vec);
device_free_vector(d_vec);
matrix_t d_mat = h2d_matrix(mat);
matrix_t h_mat = d2h_matrix(d_mat);
assert_equal_matrix(mat, h_mat);
free_matrix_pinned(h_mat);
device_free_matrix(d_mat);
model_t d_mdl = h2d_model(mdl);
device_free_model(d_mdl);
printf("%d ", i); fflush(stdout);
}
free_vector_pinned(vec);
free_matrix_pinned(mat);
free_model_pinned(mdl);
printf("\n");
} | 5d443c7771856a09ae6730e7b4510db904fb92c4.cu | #include "cuda.h"
#include "../utils/hpc.h"
#define BLKDIM 1024
__global__ void kernel_layer(matrix_t in, model_t mdl, matrix_t out, int* layer)
{
// shared memory to exploit data reuse of the input
__shared__ float temp_i[BLKDIM+R];
// index and batch
const unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
const unsigned int batch = blockIdx.y;
// current layer weights and biases
const float** w = (const float**) mdl->weights_list[*layer]->data;
const float* b = (const float*) mdl->bias_list[*layer]->data;
// current output size
const unsigned max_n = mdl->bias_list[*layer]->len;
if (index >= max_n)
return;
// fill shared mem handling the case that input is not a multiple of block size
temp_i[threadIdx.x] = in->data[batch][index];
if (threadIdx.x < R) {
int missing = max_n - index;
if (missing < BLKDIM)
temp_i[2*threadIdx.x+missing] = in->data[batch][threadIdx.x+max_n];
else
temp_i[threadIdx.x+blockDim.x] = in->data[batch][index+blockDim.x];
}
__syncthreads();
// compute neuron output
float sum = b[index];
for (unsigned int k=0; k<R; k++) {
sum += w[index][k] * temp_i[threadIdx.x+k];
}
// apply activation if isn't the last layer (the model is a regressor)
out->data[batch][index] = (*layer != mdl->num_layer-1) ? ACTIVATION(sum) : sum;
}
__host__ matrix_t cuda_forward_mlp(matrix_t input_batch, model_t model)
{
// alloc and copy input and model to device memory
matrix_t d_in = h2d_matrix(input_batch);
model_t d_mdl = h2d_model(model);
// create buffer matrix as big as the input (create on host, move on device, free on host)
matrix_t h_buff = new_matrix_pinned(input_batch->m, input_batch->n, ZERO);
matrix_t d_buff = h2d_matrix(h_buff);
free_matrix_pinned(h_buff);
// 2D grid, x is for number of features, y is for batch elements
dim3 grid((input_batch->n + BLKDIM-1)/BLKDIM, input_batch->m);
dim3 block(BLKDIM);
// Alloc on device current layer index
int *d_layer; cudaSafeCall(cudaMalloc((void**)&d_layer, sizeof(int)));
double tstart = hpc_gettime();
// layer loop
for (int layer=0; layer<model->num_layer; layer++) {
// update current layer index on device
cudaSafeCall(cudaMemcpy(d_layer, &layer, sizeof(int), cudaMemcpyHostToDevice));
kernel_layer<<<grid, block>>>(d_in, d_mdl, d_buff, d_layer);
cudaDeviceSynchronize();
cudaCheckError();
// swap input and output buffer pointers
if (layer < model->num_layer-1) {
matrix_t swap;
swap = d_in;
d_in = d_buff;
d_buff = swap;
}
}
double tstop = hpc_gettime();
printf("Kernel time elapsed = %f s\n", tstop-tstart);
cudaSafeCall(cudaFree(d_layer));
// copy the result on host memory
matrix_t h_out = d2h_matrix(d_buff);
// truncate to the output dimension (it was as big as the input)
h_out->n = model->bias_list[model->num_layer-1]->len;
device_free_matrix(d_in);
device_free_model(d_mdl);
device_free_matrix(d_buff);
return h_out;
}
/********************************** VECTOR MEMORY UTILITIES **********************************/
__host__ vector_t new_vector_pinned(unsigned int length, init_t init_type)
{
vector_t vec;
cudaSafeCall(cudaMallocHost(&vec, sizeof(vector_s)));
vec->len = length;
cudaSafeCall(cudaMallocHost(&vec->data, length * sizeof(float)));
for (unsigned int i=0; i<length; i++) {
switch (init_type)
{
case RAND_UNIFORM: // [-2, +2)
vec->data[i] = 4 * ((float) rand() / (float) RAND_MAX - 0.5);
break;
default:
vec->data[i] = 0;
break;
}
}
return vec;
}
__host__ void free_vector_pinned(vector_t vector)
{
cudaSafeCall(cudaFreeHost(vector->data));
cudaSafeCall(cudaFreeHost(vector));
}
__host__ vector_t h2d_vector(vector_t vec)
{
// alloc the struct
vector_t d_vec;
cudaSafeCall(cudaMalloc((void**)&d_vec, sizeof(vector_s)));
cudaSafeCall(cudaMemcpy(&(d_vec->len), &(vec->len), sizeof(int), cudaMemcpyHostToDevice));
// alloc the data, then update the vec->data pointer with that one on the device memory
float* d_data;
cudaSafeCall(cudaMalloc((void**)&d_data, vec->len*sizeof(float)));
cudaSafeCall(cudaMemcpy(d_data, vec->data, vec->len*sizeof(float), cudaMemcpyHostToDevice));
cudaSafeCall(cudaMemcpy(&d_vec->data, &d_data, sizeof(float*), cudaMemcpyHostToDevice));
return d_vec;
}
__host__ vector_t d2h_vector(vector_t d_vec)
{
// copy on host the struct
vector_t vec;
cudaSafeCall(cudaMallocHost(&vec, sizeof(vector_s)));
cudaSafeCall(cudaMemcpy(vec, d_vec, sizeof(vector_s), cudaMemcpyDeviceToHost));
// copy on host the data
float* d_data;
cudaSafeCall(cudaMallocHost(&d_data, vec->len*sizeof(float)));
cudaSafeCall(cudaMemcpy(d_data, vec->data, vec->len*sizeof(float), cudaMemcpyDeviceToHost));
vec->data = d_data;
return vec;
}
__host__ void device_free_vector(vector_t d_vec)
{
float* d_data;
cudaSafeCall(cudaMemcpy(&d_data, &d_vec->data, sizeof(float*), cudaMemcpyDeviceToHost));
cudaSafeCall(cudaFree(d_data));
cudaSafeCall(cudaFree(d_vec));
}
/********************************** MATRIX MEMORY UTILITIES **********************************/
__host__ matrix_t new_matrix_pinned(unsigned int m, unsigned int n, init_t init_type)
{
matrix_t mat;
cudaSafeCall(cudaMallocHost(&mat, sizeof(vector_s)));
mat->m = m; mat->n = n;
// data block (contiguous)
float* blk;
cudaSafeCall(cudaMallocHost(&blk, m * n * sizeof(float)));
cudaSafeCall(cudaMallocHost(&mat->data, m * sizeof(float*)));
// updates the elements of data with the addresses of data block
// allowing to preserve the double indexing of the array
for (unsigned int i=0; i<m; i++) {
mat->data[i] = &(blk[i*n]);
for (unsigned int j=0; j<n; j++) {
switch (init_type)
{
case RAND_UNIFORM: // [-2, +2)
mat->data[i][j] = 4 * ((float) rand() / (float) RAND_MAX - 0.5);
break;
default:
mat->data[i][j] = 0;
break;
}
}
}
return mat;
}
__host__ void free_matrix_pinned(matrix_t matrix)
{
cudaSafeCall(cudaFreeHost(matrix->data[0])); //free blk
cudaSafeCall(cudaFreeHost(matrix->data));
cudaSafeCall(cudaFreeHost(matrix));
}
__host__ matrix_t h2d_matrix(matrix_t mat)
{
// copy on device the struct
matrix_t d_mat;
cudaSafeCall(cudaMalloc((void**)&d_mat, sizeof(matrix_s)));
cudaSafeCall(cudaMemcpy(&(d_mat->m), &(mat->m), sizeof(unsigned int), cudaMemcpyHostToDevice));
cudaSafeCall(cudaMemcpy(&(d_mat->n), &(mat->n), sizeof(unsigned int), cudaMemcpyHostToDevice));
// copy on device the data (d_blk)
float** d_data;
float* d_blk;
cudaSafeCall(cudaMalloc((void**)&d_data, mat->m*sizeof(float*)));
cudaSafeCall(cudaMalloc((void**)&d_blk, mat->m*mat->n*sizeof(float)));
cudaSafeCall(cudaMemcpy(d_blk, mat->data[0], mat->m*mat->n*sizeof(float), cudaMemcpyHostToDevice));
// update the d_data indeces with that ones od d_blk on the device memory, so to preserve the double index access
float* addr[mat->m];
for (unsigned int i=0; i<mat->m; i++) {
addr[i] = &d_blk[i*mat->n];
}
cudaSafeCall(cudaMemcpy(d_data, addr, mat->m*sizeof(float*), cudaMemcpyHostToDevice));
cudaSafeCall(cudaMemcpy(&d_mat->data, &d_data, sizeof(float**), cudaMemcpyHostToDevice));
return d_mat;
}
__host__ matrix_t d2h_matrix(matrix_t d_mat)
{
// copy struct to host
matrix_t mat;
cudaSafeCall(cudaMallocHost(&mat, sizeof(matrix_s)));
cudaSafeCall(cudaMemcpy(mat, d_mat, sizeof(matrix_s), cudaMemcpyDeviceToHost));
// copy data to host (data[0] will contain the memory block address)
float** data;
cudaSafeCall(cudaMallocHost(&data, mat->m*sizeof(float*)));
cudaSafeCall(cudaMemcpy(data, mat->data, mat->m*sizeof(float*), cudaMemcpyDeviceToHost));
// copy the memory block to host
float* blk;
cudaSafeCall(cudaMallocHost(&blk, mat->m*mat->n*sizeof(float)));
cudaSafeCall(cudaMemcpy(blk, data[0], mat->m*mat->n*sizeof(float), cudaMemcpyDeviceToHost));
// update data indeces with the host copy of blk
for (unsigned int i=0; i<mat->m; i++) {
data[i] = &(blk[i*mat->n]);
}
mat->data = data;
return mat;
}
__host__ void device_free_matrix(matrix_t d_mat)
{
float** d_data;
cudaSafeCall(cudaMemcpy(&d_data, &d_mat->data, sizeof(float**), cudaMemcpyDeviceToHost));
float *d_blk;
cudaSafeCall(cudaMemcpy(&d_blk, d_data, sizeof(float*), cudaMemcpyDeviceToHost));
cudaSafeCall(cudaFree(d_blk));
cudaSafeCall(cudaFree(d_data));
cudaSafeCall(cudaFree(d_mat));
}
/********************************** MODEL MEMORY UTILITIES **********************************/
__host__ model_t new_model_pinned(unsigned int inputs, unsigned int num_layer, init_t init_type) {
model_t obj;
cudaSafeCall(cudaMallocHost(&obj, sizeof(model_s)));
obj->num_layer = num_layer;
cudaSafeCall(cudaMallocHost(&obj->weights_list, num_layer * sizeof(matrix_t)));
cudaSafeCall(cudaMallocHost(&obj->bias_list, num_layer * sizeof(vector_t)));
unsigned int last = inputs;
for (unsigned int i=0; i<num_layer; i++) {
obj->weights_list[i] = new_matrix_pinned(last-(R-1), R, init_type);
obj->bias_list[i] = new_vector_pinned(last-(R-1), init_type);
last -= R-1;
}
return obj;
}
__host__ void free_model_pinned(model_t model)
{
for (unsigned int i=0; i<model->num_layer; i++) {
free_matrix_pinned(model->weights_list[i]);
free_vector_pinned(model->bias_list[i]);
}
cudaSafeCall(cudaFreeHost(model->weights_list));
cudaSafeCall(cudaFreeHost(model->bias_list));
cudaSafeCall(cudaFreeHost(model));
}
__host__ model_t h2d_model(model_t mdl)
{
// copy struct to device
model_t d_mdl;
cudaSafeCall(cudaMalloc((void**)&d_mdl, sizeof(model_s)));
cudaSafeCall(cudaMemcpy(&(d_mdl->num_layer), &(mdl->num_layer), sizeof(unsigned int), cudaMemcpyHostToDevice));
// alloc the pointer lists
matrix_t* d_weights;
vector_t* d_biases;
cudaSafeCall(cudaMalloc((void**)&d_weights, mdl->num_layer*sizeof(matrix_t*)));
cudaSafeCall(cudaMalloc((void**)&d_biases, mdl->num_layer*sizeof(vector_t*)));
// copy all weights and biases and update the lists with device pointers
for (unsigned int i=0; i<mdl->num_layer; i++) {
matrix_t d_m = h2d_matrix(mdl->weights_list[i]);
vector_t d_v = h2d_vector(mdl->bias_list[i]);
cudaSafeCall(cudaMemcpy(&d_weights[i], &d_m, sizeof(matrix_t), cudaMemcpyHostToDevice));
cudaSafeCall(cudaMemcpy(&d_biases[i], &d_v, sizeof(vector_t), cudaMemcpyHostToDevice));
}
cudaSafeCall(cudaMemcpy(&d_mdl->weights_list, &d_weights, sizeof(matrix_t*), cudaMemcpyHostToDevice));
cudaSafeCall(cudaMemcpy(&d_mdl->bias_list, &d_biases, sizeof(vector_t*), cudaMemcpyHostToDevice));
return d_mdl;
}
__host__ void device_free_model(model_t d_mdl)
{
// copy to host the pointer lists, than free each element
matrix_t* d_weights;
vector_t* d_biases;
unsigned int num_layer;
cudaSafeCall(cudaMemcpy(&num_layer, &d_mdl->num_layer, sizeof(unsigned int), cudaMemcpyDeviceToHost));
cudaSafeCall(cudaMemcpy(&d_weights, &d_mdl->weights_list, sizeof(matrix_t*), cudaMemcpyDeviceToHost));
cudaSafeCall(cudaMemcpy(&d_biases, &d_mdl->bias_list, sizeof(vector_t*), cudaMemcpyDeviceToHost));
for (unsigned int i=0; i<num_layer; i++) {
matrix_t d_w;
vector_t d_b;
cudaSafeCall(cudaMemcpy(&d_w, &d_weights[i], sizeof(matrix_t), cudaMemcpyDeviceToHost));
cudaSafeCall(cudaMemcpy(&d_b, &d_biases[i], sizeof(vector_t), cudaMemcpyDeviceToHost));
device_free_matrix(d_w);
device_free_vector(d_b);
}
cudaSafeCall(cudaFree(d_weights));
cudaSafeCall(cudaFree(d_biases));
cudaSafeCall(cudaFree(d_mdl));
}
__host__ void test_device_mem_leak()
{
const unsigned int cycles = 1000;
vector_t vec = new_vector_pinned(1000, RAND_UNIFORM);
matrix_t mat = new_matrix_pinned(1000, 1000, RAND_UNIFORM);
model_t mdl = new_model_pinned(100, 20, RAND_UNIFORM);
printf("Device memory leak stress test: running %d cycles...\n", cycles);
for (unsigned int i=0; i<cycles; i++) {
vector_t d_vec = h2d_vector(vec);
vector_t h_vec = d2h_vector(d_vec);
assert_equal_vector(vec, h_vec);
free_vector_pinned(h_vec);
device_free_vector(d_vec);
matrix_t d_mat = h2d_matrix(mat);
matrix_t h_mat = d2h_matrix(d_mat);
assert_equal_matrix(mat, h_mat);
free_matrix_pinned(h_mat);
device_free_matrix(d_mat);
model_t d_mdl = h2d_model(mdl);
device_free_model(d_mdl);
printf("%d ", i); fflush(stdout);
}
free_vector_pinned(vec);
free_matrix_pinned(mat);
free_model_pinned(mdl);
printf("\n");
} |
3317986a0c3f5e6d06b724a0f06b3c37ddc061e6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void gMaxPoolingForward(float* out, int outRows, int outCols, float* in, int inRows, int inCols, float* mask, int numKernels, int maskCols, int width, int lastWidth) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid >= outRows * outCols)
return;
int rowId = tid / outRows;
int colId = tid % outRows;
float* b = in + (rowId * inCols) + (colId * width);
float* localMask = mask + (rowId / numKernels) * maskCols + colId * width;
if(colId == outRows - 1) {
width = lastWidth;
}
float currentMax = b[0] * localMask[0];
for(int i = 1; i < width; ++i) {
if(b[i] * localMask[i] > currentMax) {
currentMax = b[i] * localMask[i];
}
}
out[rowId + (colId * outCols)] = currentMax;
} | 3317986a0c3f5e6d06b724a0f06b3c37ddc061e6.cu | #include "includes.h"
__global__ void gMaxPoolingForward(float* out, int outRows, int outCols, float* in, int inRows, int inCols, float* mask, int numKernels, int maskCols, int width, int lastWidth) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid >= outRows * outCols)
return;
int rowId = tid / outRows;
int colId = tid % outRows;
float* b = in + (rowId * inCols) + (colId * width);
float* localMask = mask + (rowId / numKernels) * maskCols + colId * width;
if(colId == outRows - 1) {
width = lastWidth;
}
float currentMax = b[0] * localMask[0];
for(int i = 1; i < width; ++i) {
if(b[i] * localMask[i] > currentMax) {
currentMax = b[i] * localMask[i];
}
}
out[rowId + (colId * outCols)] = currentMax;
} |
1202102e4e87c693a8b7f6640c3b36bc380b343c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
/*
* 1D DWT for Haar wavelet and signals with a length which is a power of 2.
* The code reduces bank conflicts and non-coalesced reads / writes as
* appropriate but does not fully remove them because the computational
* overhead to achieve this would outweighs the benefit (see inline comments
* for more details).
* Large signals are subdivided into sub-signals with 512 elements and the
* wavelet transform for these is computed with one block over 10 decomposition
* levels. The resulting signal consisting of the approximation coefficients at
* level X is then processed in a subsequent step on the device. This requires
* interblock syncronization which is only possible on host side.
* Detail coefficients which have been computed are not further referenced
* during the decomposition so that they can be stored directly in their final
* position in global memory. The transform and its storing scheme preserve
* locality in the coefficients so that these writes are coalesced.
* Approximation coefficients are stored in shared memory because they are
* needed to compute the subsequent decomposition step. The top most
* approximation coefficient for a sub-signal processed by one block is stored
* in a special global memory location to simplify the processing after the
* interblock synchronization.
* Most books on wavelets explain the Haar wavelet decompositon. A good freely
* available resource is the Wavelet primer by Stollnitz et al.
* http://grail.cs.washington.edu/projects/wavelets/article/wavelet1.pdf
* http://grail.cs.washington.edu/projects/wavelets/article/wavelet2.pdf
* The basic of all Wavelet transforms is to decompose a signal into
* approximation (a) and detail (d) coefficients where the detail tends to be
* small or zero which allows / simplifies compression. The following "graphs"
* demonstrate the transform for a signal
* of length eight. The index always describes the decomposition level where
* a coefficient arises. The input signal is interpreted as approximation signal
* at level 0. The coefficients computed on the device are stored in the same
* scheme as in the example. This data strucure is particularly well suited for
* compression and also preserves the hierachical strucure of the decomposition.
-------------------------------------------------
| a_0 | a_0 | a_0 | a_0 | a_0 | a_0 | a_0 | a_0 |
-------------------------------------------------
-------------------------------------------------
| a_1 | a_1 | a_1 | a_1 | d_1 | d_1 | d_1 | d_1 |
-------------------------------------------------
-------------------------------------------------
| a_2 | a_2 | d_2 | d_2 | d_1 | d_1 | d_1 | d_1 |
-------------------------------------------------
-------------------------------------------------
| a_3 | d_3 | d_2 | d_2 | d_1 | d_1 | d_1 | d_1 |
-------------------------------------------------
* Host code.
*/
#ifdef _WIN32
# define NOMINMAX
#endif
#include <prof.cu>
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
//#include <cutil_inline.h>
#include <cutil_inline.h>
// constants which are used in host and device code
#define INV_SQRT_2 0.70710678118654752440f;
const unsigned int LOG_NUM_BANKS = 4;
const unsigned int NUM_BANKS = 16;
////////////////////////////////////////////////////////////////////////////////
// includes, kernels
#include <dwtHaar1D_kernel.cu>
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
CUTBoolean getLevels( unsigned int len, unsigned int* levels);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
GpuProfiling::initProf();
// run test
runTest( argc, argv);
cutilExit(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Perform the wavelet decomposition
////////////////////////////////////////////////////////////////////////////////
void
runTest( int argc, char** argv)
{
char* s_fname;
char* r_fname;
char* r_gold_fname;
const char usage[] =
{
"\nUsage:\n"
" dwtHaar1D --signal=<signal_file> --result=<result_file> --gold=<gold_file>\n\n"
" <signal_file> Input file containing the signal\n"
" <result_file> Output file storing the result of the wavelet decomposition\n"
" <gold_file> Input file containing the reference result of the wavelet decomposition\n"
"\nExample:\n"
" bin\\win32\\release\\dwtHaar1D\n"
" --signal=projects\\dwtHaar1D\\data\\signal.dat\n"
" --result=projects\\dwtHaar1D\\data\\regression.dat\n"
" --gold=projects\\dwtHaar1D\\data\\regression.gold.dat\n"
};
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") )
cutilDeviceInit(argc, argv);
else
hipSetDevice( cutGetMaxGflopsDeviceId() );
// file names, either specified as cmd line args or use default
if( argc == 4)
{
if ((cutGetCmdLineArgumentstr(argc, (const char**)argv, "signal", &s_fname) != CUTTrue) ||
(cutGetCmdLineArgumentstr(argc, (const char**)argv, "result", &r_fname) != CUTTrue) ||
(cutGetCmdLineArgumentstr(argc, (const char**)argv, "gold", &r_gold_fname) != CUTTrue))
{
fprintf(stderr, "Invalid input syntax.\n%s", usage);
hipDeviceReset();
exit(1);
}
}
else
{
s_fname = cutFindFilePath("signal.dat", argv[0]);
r_fname = cutFindFilePath("regression.dat", argv[0]);
r_gold_fname = cutFindFilePath("regression.gold.dat", argv[0]);
}
// read in signal
unsigned int slength = 0;
float* signal = NULL;
if (s_fname == 0)
{
fprintf(stderr, "Cannot find the file containing the signal.\n%s", usage);
hipDeviceReset();
exit(1);
}
if (cutReadFilef( s_fname, &signal, &slength) == CUTTrue) {
printf("Reading signal from %s\n", s_fname);
} else {
hipDeviceReset();
exit(1);
}
// get the number of decompositions necessary to perform a full decomposition
unsigned int dlevels_complete = 0;
if( CUTTrue != getLevels( slength, &dlevels_complete))
{
// error message
fprintf( stderr, "Signal length not supported.\n");
// cleanup and abort
cutFree( signal);
return;
}
// device in data
float* d_idata = NULL;
// device out data
float* d_odata = NULL;
// device approx_final data
float* approx_final = NULL;
// The very final approximation coefficient has to be written to the output
// data, all others are reused as input data in the next global step and
// therefore have to be written to the input data again.
// The following flag indicates where to copy approx_final data
// - 0 is input, 1 is output
int approx_is_input;
// allocate device mem
const unsigned int smem_size = sizeof(float) * slength;
cutilSafeCall( hipMalloc( (void**) &d_idata, smem_size));
cutilSafeCall( hipMalloc( (void**) &d_odata, smem_size));
cutilSafeCall( hipMalloc( (void**) &approx_final, smem_size));
// copy input data to device
cutilSafeCall( hipMemcpy( d_idata, signal, smem_size,
hipMemcpyHostToDevice) );
// clear result memory
float* tmp = (float*) malloc( smem_size);
for( unsigned int i = 0; i < slength; ++i)
{
tmp[i] = 0.0;
}
cutilSafeCall( hipMemcpy( d_odata, tmp, smem_size,
hipMemcpyHostToDevice) );
free( tmp);
// total number of threads
// in the first decomposition step always one thread computes the average and
// detail signal for one pair of adjacent values
unsigned int num_threads_total_left = slength / 2;
// decomposition levels performed in the current / next step
unsigned int dlevels_step = dlevels_complete;
// 1D signal so the arrangement of elements is also 1D
dim3 block_size;
dim3 grid_size;
// number of decomposition levels left after one iteration on the device
unsigned int dlevels_left = dlevels_complete;
// if less or equal 1k elements, then the data can be processed in one block,
// this avoids the Wait-For-Idle (WFI) on host side which is necessary if the
// computation is split accross multiple SM's if enough input data
if( dlevels_complete <= 10)
{
// decomposition can be performed at once
block_size.x = num_threads_total_left;
approx_is_input = 0;
}
else
{
// 512 threads per block
grid_size.x = (num_threads_total_left / 512);
block_size.x = 512;
// 512 threads corresponds to 10 decomposition steps
dlevels_step = 10;
dlevels_left -= 10;
approx_is_input = 1;
}
// do until full decomposition is accomplished
while( 0 != num_threads_total_left)
{
// double the number of threads as bytes
unsigned int mem_shared = (2 * block_size.x) * sizeof( float);
// extra memory requirements to avoid bank conflicts
mem_shared += ((2 * block_size.x) / NUM_BANKS) * sizeof( float);
// run kernel
GpuProfiling::prepareProfiling( grid_size, block_size, mem_shared );
hipLaunchKernelGGL(( dwtHaar1D), dim3(grid_size), dim3(block_size), mem_shared , 0, d_idata, d_odata,
approx_final,
dlevels_step,
num_threads_total_left,
block_size.x );
GpuProfiling::addResults("dwtHaar1D");
// Copy approx_final to appropriate location
if (approx_is_input)
{
cutilSafeCall (hipMemcpy (d_idata, approx_final, grid_size.x * 4,
hipMemcpyDeviceToDevice) );
}
else
{
cutilSafeCall (hipMemcpy (d_odata, approx_final, grid_size.x * 4,
hipMemcpyDeviceToDevice) );
}
// update level variables
if( dlevels_left < 10)
{
// approx_final = d_odata;
approx_is_input = 0;
}
// more global steps necessary
dlevels_step = (dlevels_left > 10) ? dlevels_left - 10 : dlevels_left;
dlevels_left -= 10;
// after each step only half the threads are used any longer
// therefore after 10 steps 2^10 less threads
num_threads_total_left = num_threads_total_left >> 10;
// update block and grid size
grid_size.x = (num_threads_total_left / 512)
+ (0 != (num_threads_total_left % 512)) ? 1 : 0;
if( grid_size.x <= 1)
{
block_size.x = num_threads_total_left;
}
}
// get the result back from the server
// allocate mem for the result
float* odata = (float*) malloc( smem_size);
cutilSafeCall( hipMemcpy( odata, d_odata, smem_size,
hipMemcpyDeviceToHost));
// post processing
if( cutCheckCmdLineFlag( argc, (const char**) argv, "regression"))
{
// write file for regression test
if (r_fname == 0) {
fprintf(stderr, "Cannot write the output file storing the result of the wavelet decomposition.\n%s", usage);
hipDeviceReset();
exit(1);
}
if (cutWriteFilef( r_fname, odata, slength, 0.001f, false) == CUTTrue)
printf("Writing result to %s\n", r_fname);
else {
hipDeviceReset();
exit(1);
}
}
else
{
// load the reference solution
unsigned int len_reference = 0;
float* reference = NULL;
if (r_gold_fname == 0)
{
fprintf(stderr, "Cannot read the file containing the reference result of the wavelet decomposition.\n%s", usage);
hipDeviceReset();
exit(1);
}
if (cutReadFilef( r_gold_fname, &reference, &len_reference) == CUTTrue)
printf("Reading reference result from %s\n", r_gold_fname);
else {
hipDeviceReset();
exit(1);
}
cutilCondition( slength == len_reference);
// compare the computed solution and the reference
CUTBoolean res = cutComparefe( reference, odata, slength, 0.001f);
printf( "%s\n", (1 == res) ? "PASSED" : "FAILED");
GpuProfiling::printResults();
cutFree( reference);
}
// free allocated host and device memory
cutilSafeCall(hipFree(d_odata));
cutilSafeCall(hipFree(d_idata));
cutilSafeCall(hipFree(approx_final));
cutFree( signal);
free( odata);
cutFree( s_fname);
cutFree( r_fname);
cutFree( r_gold_fname);
hipDeviceReset();
exit(0);
cutilExit(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Get number of decomposition levels to perform a full decomposition
//! Also check if the input signal size is suitable
//! @return CUTTrue if the number of decomposition levels could be determined
//! and the signal length is supported by the implementation,
//! otherwise CUTFalse
//! @param len length of input signal
//! @param levels number of decomposition levels necessary to perform a full
//! decomposition
////////////////////////////////////////////////////////////////////////////////
CUTBoolean
getLevels( unsigned int len, unsigned int* levels)
{
CUTBoolean retval = CUTFalse;
// currently signals up to a length of 2^20 supported
for( unsigned int i = 0; i < 20; ++i)
{
if( len == (1 << i))
{
*levels = i;
retval = CUTTrue;
break;
}
}
return retval;
}
| 1202102e4e87c693a8b7f6640c3b36bc380b343c.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
/*
* 1D DWT for Haar wavelet and signals with a length which is a power of 2.
* The code reduces bank conflicts and non-coalesced reads / writes as
* appropriate but does not fully remove them because the computational
* overhead to achieve this would outweighs the benefit (see inline comments
* for more details).
* Large signals are subdivided into sub-signals with 512 elements and the
* wavelet transform for these is computed with one block over 10 decomposition
* levels. The resulting signal consisting of the approximation coefficients at
* level X is then processed in a subsequent step on the device. This requires
* interblock syncronization which is only possible on host side.
* Detail coefficients which have been computed are not further referenced
* during the decomposition so that they can be stored directly in their final
* position in global memory. The transform and its storing scheme preserve
* locality in the coefficients so that these writes are coalesced.
* Approximation coefficients are stored in shared memory because they are
* needed to compute the subsequent decomposition step. The top most
* approximation coefficient for a sub-signal processed by one block is stored
* in a special global memory location to simplify the processing after the
* interblock synchronization.
* Most books on wavelets explain the Haar wavelet decompositon. A good freely
* available resource is the Wavelet primer by Stollnitz et al.
* http://grail.cs.washington.edu/projects/wavelets/article/wavelet1.pdf
* http://grail.cs.washington.edu/projects/wavelets/article/wavelet2.pdf
* The basic of all Wavelet transforms is to decompose a signal into
* approximation (a) and detail (d) coefficients where the detail tends to be
* small or zero which allows / simplifies compression. The following "graphs"
* demonstrate the transform for a signal
* of length eight. The index always describes the decomposition level where
* a coefficient arises. The input signal is interpreted as approximation signal
* at level 0. The coefficients computed on the device are stored in the same
* scheme as in the example. This data strucure is particularly well suited for
* compression and also preserves the hierachical strucure of the decomposition.
-------------------------------------------------
| a_0 | a_0 | a_0 | a_0 | a_0 | a_0 | a_0 | a_0 |
-------------------------------------------------
-------------------------------------------------
| a_1 | a_1 | a_1 | a_1 | d_1 | d_1 | d_1 | d_1 |
-------------------------------------------------
-------------------------------------------------
| a_2 | a_2 | d_2 | d_2 | d_1 | d_1 | d_1 | d_1 |
-------------------------------------------------
-------------------------------------------------
| a_3 | d_3 | d_2 | d_2 | d_1 | d_1 | d_1 | d_1 |
-------------------------------------------------
* Host code.
*/
#ifdef _WIN32
# define NOMINMAX
#endif
#include <prof.cu>
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
//#include <cutil_inline.h>
#include <cutil_inline.h>
// constants which are used in host and device code
#define INV_SQRT_2 0.70710678118654752440f;
const unsigned int LOG_NUM_BANKS = 4;
const unsigned int NUM_BANKS = 16;
////////////////////////////////////////////////////////////////////////////////
// includes, kernels
#include <dwtHaar1D_kernel.cu>
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
CUTBoolean getLevels( unsigned int len, unsigned int* levels);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
GpuProfiling::initProf();
// run test
runTest( argc, argv);
cutilExit(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Perform the wavelet decomposition
////////////////////////////////////////////////////////////////////////////////
void
runTest( int argc, char** argv)
{
char* s_fname;
char* r_fname;
char* r_gold_fname;
const char usage[] =
{
"\nUsage:\n"
" dwtHaar1D --signal=<signal_file> --result=<result_file> --gold=<gold_file>\n\n"
" <signal_file> Input file containing the signal\n"
" <result_file> Output file storing the result of the wavelet decomposition\n"
" <gold_file> Input file containing the reference result of the wavelet decomposition\n"
"\nExample:\n"
" bin\\win32\\release\\dwtHaar1D\n"
" --signal=projects\\dwtHaar1D\\data\\signal.dat\n"
" --result=projects\\dwtHaar1D\\data\\regression.dat\n"
" --gold=projects\\dwtHaar1D\\data\\regression.gold.dat\n"
};
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") )
cutilDeviceInit(argc, argv);
else
cudaSetDevice( cutGetMaxGflopsDeviceId() );
// file names, either specified as cmd line args or use default
if( argc == 4)
{
if ((cutGetCmdLineArgumentstr(argc, (const char**)argv, "signal", &s_fname) != CUTTrue) ||
(cutGetCmdLineArgumentstr(argc, (const char**)argv, "result", &r_fname) != CUTTrue) ||
(cutGetCmdLineArgumentstr(argc, (const char**)argv, "gold", &r_gold_fname) != CUTTrue))
{
fprintf(stderr, "Invalid input syntax.\n%s", usage);
cudaThreadExit();
exit(1);
}
}
else
{
s_fname = cutFindFilePath("signal.dat", argv[0]);
r_fname = cutFindFilePath("regression.dat", argv[0]);
r_gold_fname = cutFindFilePath("regression.gold.dat", argv[0]);
}
// read in signal
unsigned int slength = 0;
float* signal = NULL;
if (s_fname == 0)
{
fprintf(stderr, "Cannot find the file containing the signal.\n%s", usage);
cudaThreadExit();
exit(1);
}
if (cutReadFilef( s_fname, &signal, &slength) == CUTTrue) {
printf("Reading signal from %s\n", s_fname);
} else {
cudaThreadExit();
exit(1);
}
// get the number of decompositions necessary to perform a full decomposition
unsigned int dlevels_complete = 0;
if( CUTTrue != getLevels( slength, &dlevels_complete))
{
// error message
fprintf( stderr, "Signal length not supported.\n");
// cleanup and abort
cutFree( signal);
return;
}
// device in data
float* d_idata = NULL;
// device out data
float* d_odata = NULL;
// device approx_final data
float* approx_final = NULL;
// The very final approximation coefficient has to be written to the output
// data, all others are reused as input data in the next global step and
// therefore have to be written to the input data again.
// The following flag indicates where to copy approx_final data
// - 0 is input, 1 is output
int approx_is_input;
// allocate device mem
const unsigned int smem_size = sizeof(float) * slength;
cutilSafeCall( cudaMalloc( (void**) &d_idata, smem_size));
cutilSafeCall( cudaMalloc( (void**) &d_odata, smem_size));
cutilSafeCall( cudaMalloc( (void**) &approx_final, smem_size));
// copy input data to device
cutilSafeCall( cudaMemcpy( d_idata, signal, smem_size,
cudaMemcpyHostToDevice) );
// clear result memory
float* tmp = (float*) malloc( smem_size);
for( unsigned int i = 0; i < slength; ++i)
{
tmp[i] = 0.0;
}
cutilSafeCall( cudaMemcpy( d_odata, tmp, smem_size,
cudaMemcpyHostToDevice) );
free( tmp);
// total number of threads
// in the first decomposition step always one thread computes the average and
// detail signal for one pair of adjacent values
unsigned int num_threads_total_left = slength / 2;
// decomposition levels performed in the current / next step
unsigned int dlevels_step = dlevels_complete;
// 1D signal so the arrangement of elements is also 1D
dim3 block_size;
dim3 grid_size;
// number of decomposition levels left after one iteration on the device
unsigned int dlevels_left = dlevels_complete;
// if less or equal 1k elements, then the data can be processed in one block,
// this avoids the Wait-For-Idle (WFI) on host side which is necessary if the
// computation is split accross multiple SM's if enough input data
if( dlevels_complete <= 10)
{
// decomposition can be performed at once
block_size.x = num_threads_total_left;
approx_is_input = 0;
}
else
{
// 512 threads per block
grid_size.x = (num_threads_total_left / 512);
block_size.x = 512;
// 512 threads corresponds to 10 decomposition steps
dlevels_step = 10;
dlevels_left -= 10;
approx_is_input = 1;
}
// do until full decomposition is accomplished
while( 0 != num_threads_total_left)
{
// double the number of threads as bytes
unsigned int mem_shared = (2 * block_size.x) * sizeof( float);
// extra memory requirements to avoid bank conflicts
mem_shared += ((2 * block_size.x) / NUM_BANKS) * sizeof( float);
// run kernel
GpuProfiling::prepareProfiling( grid_size, block_size, mem_shared );
dwtHaar1D<<<grid_size, block_size, mem_shared >>>( d_idata, d_odata,
approx_final,
dlevels_step,
num_threads_total_left,
block_size.x );
GpuProfiling::addResults("dwtHaar1D");
// Copy approx_final to appropriate location
if (approx_is_input)
{
cutilSafeCall (cudaMemcpy (d_idata, approx_final, grid_size.x * 4,
cudaMemcpyDeviceToDevice) );
}
else
{
cutilSafeCall (cudaMemcpy (d_odata, approx_final, grid_size.x * 4,
cudaMemcpyDeviceToDevice) );
}
// update level variables
if( dlevels_left < 10)
{
// approx_final = d_odata;
approx_is_input = 0;
}
// more global steps necessary
dlevels_step = (dlevels_left > 10) ? dlevels_left - 10 : dlevels_left;
dlevels_left -= 10;
// after each step only half the threads are used any longer
// therefore after 10 steps 2^10 less threads
num_threads_total_left = num_threads_total_left >> 10;
// update block and grid size
grid_size.x = (num_threads_total_left / 512)
+ (0 != (num_threads_total_left % 512)) ? 1 : 0;
if( grid_size.x <= 1)
{
block_size.x = num_threads_total_left;
}
}
// get the result back from the server
// allocate mem for the result
float* odata = (float*) malloc( smem_size);
cutilSafeCall( cudaMemcpy( odata, d_odata, smem_size,
cudaMemcpyDeviceToHost));
// post processing
if( cutCheckCmdLineFlag( argc, (const char**) argv, "regression"))
{
// write file for regression test
if (r_fname == 0) {
fprintf(stderr, "Cannot write the output file storing the result of the wavelet decomposition.\n%s", usage);
cudaThreadExit();
exit(1);
}
if (cutWriteFilef( r_fname, odata, slength, 0.001f, false) == CUTTrue)
printf("Writing result to %s\n", r_fname);
else {
cudaThreadExit();
exit(1);
}
}
else
{
// load the reference solution
unsigned int len_reference = 0;
float* reference = NULL;
if (r_gold_fname == 0)
{
fprintf(stderr, "Cannot read the file containing the reference result of the wavelet decomposition.\n%s", usage);
cudaThreadExit();
exit(1);
}
if (cutReadFilef( r_gold_fname, &reference, &len_reference) == CUTTrue)
printf("Reading reference result from %s\n", r_gold_fname);
else {
cudaThreadExit();
exit(1);
}
cutilCondition( slength == len_reference);
// compare the computed solution and the reference
CUTBoolean res = cutComparefe( reference, odata, slength, 0.001f);
printf( "%s\n", (1 == res) ? "PASSED" : "FAILED");
GpuProfiling::printResults();
cutFree( reference);
}
// free allocated host and device memory
cutilSafeCall(cudaFree(d_odata));
cutilSafeCall(cudaFree(d_idata));
cutilSafeCall(cudaFree(approx_final));
cutFree( signal);
free( odata);
cutFree( s_fname);
cutFree( r_fname);
cutFree( r_gold_fname);
cudaThreadExit();
exit(0);
cutilExit(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Get number of decomposition levels to perform a full decomposition
//! Also check if the input signal size is suitable
//! @return CUTTrue if the number of decomposition levels could be determined
//! and the signal length is supported by the implementation,
//! otherwise CUTFalse
//! @param len length of input signal
//! @param levels number of decomposition levels necessary to perform a full
//! decomposition
////////////////////////////////////////////////////////////////////////////////
CUTBoolean
getLevels( unsigned int len, unsigned int* levels)
{
CUTBoolean retval = CUTFalse;
// currently signals up to a length of 2^20 supported
for( unsigned int i = 0; i < 20; ++i)
{
if( len == (1 << i))
{
*levels = i;
retval = CUTTrue;
break;
}
}
return retval;
}
|
bc2c82e7e0c2eb52a96afaf0169d2b631701a798.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
int main(int argc, char **argv){
hipDeviceProp_t dP;
int rc = hipGetDeviceProperties(&dP, 0);
if(rc != hipSuccess) {
hipError_t error = hipGetLastError();
printf("CUDA error: %s", hipGetErrorString(error));
return rc; /* Failure */
}
printf("%d%d", dP.major, dP.minor);
return 0;
}
| bc2c82e7e0c2eb52a96afaf0169d2b631701a798.cu | #include <stdio.h>
int main(int argc, char **argv){
cudaDeviceProp dP;
int rc = cudaGetDeviceProperties(&dP, 0);
if(rc != cudaSuccess) {
cudaError_t error = cudaGetLastError();
printf("CUDA error: %s", cudaGetErrorString(error));
return rc; /* Failure */
}
printf("%d%d", dP.major, dP.minor);
return 0;
}
|
99d1248fd4cd4b53b06182c40ae6e1ed079dd847.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
template <typename T>
__global__
void calculate_cornerness_cuda_kernel(T* gx_integral,T* gy_integral,T* gxy_integral,T* cornerness_out,float k_param,int heightImage,int widthImage,int kernel_size)
{
unsigned int x=blockIdx.x*blockDim.x+threadIdx.x;
unsigned int y=blockIdx.y*blockDim.y+threadIdx.y;
unsigned int index = x * widthImage + y;
if(y>kernel_size/2+1&&y<widthImage-kernel_size/2&&x>kernel_size/2+1&&x<heightImage-kernel_size/2){
T gxD=gx_integral[(x-kernel_size/2-1)*widthImage+(y-kernel_size/2-1)];
T gxC=gx_integral[(x+kernel_size/2)*widthImage+(y-kernel_size/2-1)];
T gxB=gx_integral[(x-kernel_size/2-1)*widthImage+(y+kernel_size/2)];
T gxA=gx_integral[(x+kernel_size/2)*widthImage+(y+kernel_size/2)];
T sum_gx=gxA+gxD-gxB-gxC;
T gyD=gy_integral[(x-kernel_size/2-1)*widthImage+(y-kernel_size/2-1)];
T gyC=gy_integral[(x+kernel_size/2)*widthImage+(y-kernel_size/2-1)];
T gyB=gy_integral[(x-kernel_size/2-1)*widthImage+(y+kernel_size/2)];
T gyA=gy_integral[(x+kernel_size/2)*widthImage+(y+kernel_size/2)];
T sum_gy=gyA+gyD-gyB-gyC;
T gxyD=gxy_integral[(x-kernel_size/2-1)*widthImage+(y-kernel_size/2-1)];
T gxyC=gxy_integral[(x+kernel_size/2)*widthImage+(y-kernel_size/2-1)];
T gxyB=gxy_integral[(x-kernel_size/2-1)*widthImage+(y+kernel_size/2)];
T gxyA=gxy_integral[(x+kernel_size/2)*widthImage+(y+kernel_size/2)];
T sum_gxy=gxyA+gxyD-gxyB-gxyC;
T det=sum_gx*sum_gy-(sum_gxy*sum_gxy);
T trace=sum_gx+sum_gy;
cornerness_out[index]=det-k_param*(trace*trace);
if (cornerness_out[index] < 1 )
cornerness_out[index] = 0;
}
else{cornerness_out[index]=0;}
}
void calculate_cornerness_cuda( float * gx_integral, float * gy_integral, float * gxy_integral, float * cornerness_out,float k,int mask_size,int heightImage,int widthImage,int cuda_threads ,hipStream_t stream)
{
dim3 block( cuda_threads,cuda_threads, 1);
dim3 grid( heightImage/ block.x,widthImage / block.y, 1);
hipLaunchKernelGGL(( calculate_cornerness_cuda_kernel), dim3(grid),dim3(block),0,stream, gx_integral, gy_integral, gxy_integral, cornerness_out, k, heightImage, widthImage, mask_size);
}
| 99d1248fd4cd4b53b06182c40ae6e1ed079dd847.cu | template <typename T>
__global__
void calculate_cornerness_cuda_kernel(T* gx_integral,T* gy_integral,T* gxy_integral,T* cornerness_out,float k_param,int heightImage,int widthImage,int kernel_size)
{
unsigned int x=blockIdx.x*blockDim.x+threadIdx.x;
unsigned int y=blockIdx.y*blockDim.y+threadIdx.y;
unsigned int index = x * widthImage + y;
if(y>kernel_size/2+1&&y<widthImage-kernel_size/2&&x>kernel_size/2+1&&x<heightImage-kernel_size/2){
T gxD=gx_integral[(x-kernel_size/2-1)*widthImage+(y-kernel_size/2-1)];
T gxC=gx_integral[(x+kernel_size/2)*widthImage+(y-kernel_size/2-1)];
T gxB=gx_integral[(x-kernel_size/2-1)*widthImage+(y+kernel_size/2)];
T gxA=gx_integral[(x+kernel_size/2)*widthImage+(y+kernel_size/2)];
T sum_gx=gxA+gxD-gxB-gxC;
T gyD=gy_integral[(x-kernel_size/2-1)*widthImage+(y-kernel_size/2-1)];
T gyC=gy_integral[(x+kernel_size/2)*widthImage+(y-kernel_size/2-1)];
T gyB=gy_integral[(x-kernel_size/2-1)*widthImage+(y+kernel_size/2)];
T gyA=gy_integral[(x+kernel_size/2)*widthImage+(y+kernel_size/2)];
T sum_gy=gyA+gyD-gyB-gyC;
T gxyD=gxy_integral[(x-kernel_size/2-1)*widthImage+(y-kernel_size/2-1)];
T gxyC=gxy_integral[(x+kernel_size/2)*widthImage+(y-kernel_size/2-1)];
T gxyB=gxy_integral[(x-kernel_size/2-1)*widthImage+(y+kernel_size/2)];
T gxyA=gxy_integral[(x+kernel_size/2)*widthImage+(y+kernel_size/2)];
T sum_gxy=gxyA+gxyD-gxyB-gxyC;
T det=sum_gx*sum_gy-(sum_gxy*sum_gxy);
T trace=sum_gx+sum_gy;
cornerness_out[index]=det-k_param*(trace*trace);
if (cornerness_out[index] < 1 )
cornerness_out[index] = 0;
}
else{cornerness_out[index]=0;}
}
void calculate_cornerness_cuda( float * gx_integral, float * gy_integral, float * gxy_integral, float * cornerness_out,float k,int mask_size,int heightImage,int widthImage,int cuda_threads ,cudaStream_t stream)
{
dim3 block( cuda_threads,cuda_threads, 1);
dim3 grid( heightImage/ block.x,widthImage / block.y, 1);
calculate_cornerness_cuda_kernel<<<grid,block,0,stream>>>( gx_integral, gy_integral, gxy_integral, cornerness_out, k, heightImage, widthImage, mask_size);
}
|
cd884b617df542a2da5b62bcb114e02cbcd40c1a.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @file compare.cu
* @brief cuda array
* @author HIKARU KONDO
* @date 2021/09/10
*/
#include "transpose.cuh"
#include <stdio.h>
#include "hip/hip_runtime.h"
#define BLOCKDIM 256
/**
* TODO Doc
**/
template<typename T>
__global__ void transpose_kernel(T *x, T *y, int size, const int *index_array) {
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= size) { return; }
int transpose_idx = index_array[idx];
y[transpose_idx] = x[idx];
}
template<typename T>
void transpose(T *x, T *y, int size, const int *index_array) {
dim3 blockDim(BLOCKDIM);
dim3 gridDim((size + blockDim.x - 1) / blockDim.x);
int *_index_array;
hipMalloc(&_index_array, size * sizeof(int));
hipMemcpy(_index_array, index_array, size * sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( transpose_kernel) , dim3(gridDim), dim3(blockDim) , 0, 0, x, y, size, _index_array);
hipFree(_index_array);
}
void floatTranspose(float *x, float *y, int size, const int *index_array) {
transpose(x, y, size, index_array);
}
void doubleTranspose(double *x, double *y, int size, const int *index_array) {
transpose(x, y, size, index_array);
}
| cd884b617df542a2da5b62bcb114e02cbcd40c1a.cu | /**
* @file compare.cu
* @brief cuda arrayの比較の実装
* @author HIKARU KONDO
* @date 2021/09/10
*/
#include "transpose.cuh"
#include <stdio.h>
#include "cuda.h"
#define BLOCKDIM 256
/**
* TODO Doc
**/
template<typename T>
__global__ void transpose_kernel(T *x, T *y, int size, const int *index_array) {
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= size) { return; }
int transpose_idx = index_array[idx];
y[transpose_idx] = x[idx];
}
template<typename T>
void transpose(T *x, T *y, int size, const int *index_array) {
dim3 blockDim(BLOCKDIM);
dim3 gridDim((size + blockDim.x - 1) / blockDim.x);
int *_index_array;
cudaMalloc(&_index_array, size * sizeof(int));
cudaMemcpy(_index_array, index_array, size * sizeof(int), cudaMemcpyHostToDevice);
transpose_kernel <<< gridDim, blockDim >>> (x, y, size, _index_array);
cudaFree(_index_array);
}
void floatTranspose(float *x, float *y, int size, const int *index_array) {
transpose(x, y, size, index_array);
}
void doubleTranspose(double *x, double *y, int size, const int *index_array) {
transpose(x, y, size, index_array);
}
|
45e0cccc645a85b634af9ae575ddee7b471c9199.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2009-2016 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "ComputeFreeVolumeGPU.cuh"
#include "IntegratorHPMCMonoGPU.cuh"
#include "IntegratorHPMCMonoImplicitGPU.cuh"
#include "IntegratorHPMCMonoImplicitNewGPU.cuh"
#include "ShapeSpheropolyhedron.h"
namespace hpmc
{
namespace detail
{
//! HPMC kernels for ShapeSpheropolyhedron
template hipError_t gpu_hpmc_free_volume<ShapeSpheropolyhedron >(const hpmc_free_volume_args_t &args,
const typename ShapeSpheropolyhedron ::param_type *d_params);
template hipError_t gpu_hpmc_update<ShapeSpheropolyhedron >(const hpmc_args_t& args,
const typename ShapeSpheropolyhedron ::param_type *d_params);
template hipError_t gpu_hpmc_implicit_count_overlaps<ShapeSpheropolyhedron >(const hpmc_implicit_args_t& args,
const typename ShapeSpheropolyhedron ::param_type *d_params);
template hipError_t gpu_hpmc_implicit_accept_reject<ShapeSpheropolyhedron >(const hpmc_implicit_args_t& args,
const typename ShapeSpheropolyhedron ::param_type *d_params);
template hipError_t gpu_hpmc_insert_depletants_queue<ShapeSpheropolyhedron >(const hpmc_implicit_args_new_t& args,
const typename ShapeSpheropolyhedron ::param_type *d_params);
template hipError_t gpu_hpmc_implicit_accept_reject_new<ShapeSpheropolyhedron >(const hpmc_implicit_args_new_t& args,
const typename ShapeSpheropolyhedron ::param_type *d_params);
}; // end namespace detail
} // end namespace hpmc
| 45e0cccc645a85b634af9ae575ddee7b471c9199.cu | // Copyright (c) 2009-2016 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "ComputeFreeVolumeGPU.cuh"
#include "IntegratorHPMCMonoGPU.cuh"
#include "IntegratorHPMCMonoImplicitGPU.cuh"
#include "IntegratorHPMCMonoImplicitNewGPU.cuh"
#include "ShapeSpheropolyhedron.h"
namespace hpmc
{
namespace detail
{
//! HPMC kernels for ShapeSpheropolyhedron
template cudaError_t gpu_hpmc_free_volume<ShapeSpheropolyhedron >(const hpmc_free_volume_args_t &args,
const typename ShapeSpheropolyhedron ::param_type *d_params);
template cudaError_t gpu_hpmc_update<ShapeSpheropolyhedron >(const hpmc_args_t& args,
const typename ShapeSpheropolyhedron ::param_type *d_params);
template cudaError_t gpu_hpmc_implicit_count_overlaps<ShapeSpheropolyhedron >(const hpmc_implicit_args_t& args,
const typename ShapeSpheropolyhedron ::param_type *d_params);
template cudaError_t gpu_hpmc_implicit_accept_reject<ShapeSpheropolyhedron >(const hpmc_implicit_args_t& args,
const typename ShapeSpheropolyhedron ::param_type *d_params);
template cudaError_t gpu_hpmc_insert_depletants_queue<ShapeSpheropolyhedron >(const hpmc_implicit_args_new_t& args,
const typename ShapeSpheropolyhedron ::param_type *d_params);
template cudaError_t gpu_hpmc_implicit_accept_reject_new<ShapeSpheropolyhedron >(const hpmc_implicit_args_new_t& args,
const typename ShapeSpheropolyhedron ::param_type *d_params);
}; // end namespace detail
} // end namespace hpmc
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.