hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
d5718290a096e7f0ed809cd85d37aac06f65ddf8.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2018 XGBoost contributors
*/
#include <gtest/gtest.h>
#include <thrust/equal.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/transform.h>
#include "../../../src/common/device_helpers.cuh"
#include <xgboost/host_device_vector.h>
namespace xgboost {
namespace common {
namespace {
void SetDeviceForTest(int device) {
int n_devices;
dh::safe_cuda(hipGetDeviceCount(&n_devices));
device %= n_devices;
dh::safe_cuda(hipSetDevice(device));
}
} // namespace
struct HostDeviceVectorSetDeviceHandler {
template <typename Functor>
explicit HostDeviceVectorSetDeviceHandler(Functor f) {
SetCudaSetDeviceHandler(f);
}
~HostDeviceVectorSetDeviceHandler() {
SetCudaSetDeviceHandler(nullptr);
}
};
void InitHostDeviceVector(size_t n, int device, HostDeviceVector<int> *v) {
// create the vector
v->SetDevice(device);
v->Resize(n);
ASSERT_EQ(v->Size(), n);
ASSERT_EQ(v->DeviceIdx(), device);
// ensure that the device have read-write access
ASSERT_TRUE(v->DeviceCanRead());
ASSERT_TRUE(v->DeviceCanWrite());
// ensure that the host has no access
ASSERT_FALSE(v->HostCanRead());
ASSERT_FALSE(v->HostCanWrite());
// fill in the data on the host
std::vector<int>& data_h = v->HostVector();
// ensure that the host has full access, while the device have none
ASSERT_TRUE(v->HostCanRead());
ASSERT_TRUE(v->HostCanWrite());
ASSERT_FALSE(v->DeviceCanRead());
ASSERT_FALSE(v->DeviceCanWrite());
ASSERT_EQ(data_h.size(), n);
std::copy_n(thrust::make_counting_iterator(0), n, data_h.begin());
}
void PlusOne(HostDeviceVector<int> *v) {
int device = v->DeviceIdx();
SetDeviceForTest(device);
thrust::transform(dh::tcbegin(*v), dh::tcend(*v), dh::tbegin(*v),
[=]__device__(unsigned int a){ return a + 1; });
ASSERT_TRUE(v->DeviceCanWrite());
}
void CheckDevice(HostDeviceVector<int>* v,
size_t size,
unsigned int first,
GPUAccess access) {
ASSERT_EQ(v->Size(), size);
SetDeviceForTest(v->DeviceIdx());
ASSERT_TRUE(thrust::equal(dh::tcbegin(*v), dh::tcend(*v),
thrust::make_counting_iterator(first)));
ASSERT_TRUE(v->DeviceCanRead());
// ensure that the device has at most the access specified by access
ASSERT_EQ(v->DeviceCanWrite(), access == GPUAccess::kWrite);
ASSERT_EQ(v->HostCanRead(), access == GPUAccess::kRead);
ASSERT_FALSE(v->HostCanWrite());
ASSERT_TRUE(thrust::equal(dh::tbegin(*v), dh::tend(*v),
thrust::make_counting_iterator(first)));
ASSERT_TRUE(v->DeviceCanRead());
ASSERT_TRUE(v->DeviceCanWrite());
ASSERT_FALSE(v->HostCanRead());
ASSERT_FALSE(v->HostCanWrite());
}
void CheckHost(HostDeviceVector<int> *v, GPUAccess access) {
const std::vector<int>& data_h = access == GPUAccess::kNone ?
v->HostVector() : v->ConstHostVector();
for (size_t i = 0; i < v->Size(); ++i) {
ASSERT_EQ(data_h.at(i), i + 1);
}
ASSERT_TRUE(v->HostCanRead());
ASSERT_EQ(v->HostCanWrite(), access == GPUAccess::kNone);
ASSERT_EQ(v->DeviceCanRead(), access == GPUAccess::kRead);
// the devices should have no write access
ASSERT_FALSE(v->DeviceCanWrite());
}
void TestHostDeviceVector(size_t n, int device) {
HostDeviceVectorSetDeviceHandler hdvec_dev_hndlr(SetDevice);
HostDeviceVector<int> v;
InitHostDeviceVector(n, device, &v);
CheckDevice(&v, n, 0, GPUAccess::kRead);
PlusOne(&v);
CheckDevice(&v, n, 1, GPUAccess::kWrite);
CheckHost(&v, GPUAccess::kRead);
CheckHost(&v, GPUAccess::kNone);
}
TEST(HostDeviceVector, Basic) {
size_t n = 1001;
int device = 0;
TestHostDeviceVector(n, device);
}
TEST(HostDeviceVector, Copy) {
size_t n = 1001;
int device = 0;
HostDeviceVectorSetDeviceHandler hdvec_dev_hndlr(SetDevice);
HostDeviceVector<int> v;
{
// a separate scope to ensure that v1 is gone before further checks
HostDeviceVector<int> v1;
InitHostDeviceVector(n, device, &v1);
v.Resize(v1.Size());
v.Copy(v1);
}
CheckDevice(&v, n, 0, GPUAccess::kRead);
PlusOne(&v);
CheckDevice(&v, n, 1, GPUAccess::kWrite);
CheckHost(&v, GPUAccess::kRead);
CheckHost(&v, GPUAccess::kNone);
}
TEST(HostDeviceVector, SetDevice) {
std::vector<int> h_vec (2345);
for (size_t i = 0; i < h_vec.size(); ++i) {
h_vec[i] = i;
}
HostDeviceVector<int> vec (h_vec);
auto device = 0;
vec.SetDevice(device);
ASSERT_EQ(vec.Size(), h_vec.size());
auto span = vec.DeviceSpan(); // sync to device
vec.SetDevice(-1); // pull back to cpu.
ASSERT_EQ(vec.Size(), h_vec.size());
ASSERT_EQ(vec.DeviceIdx(), -1);
auto h_vec_1 = vec.HostVector();
ASSERT_TRUE(std::equal(h_vec_1.cbegin(), h_vec_1.cend(), h_vec.cbegin()));
}
TEST(HostDeviceVector, Span) {
HostDeviceVector<float> vec {1.0f, 2.0f, 3.0f, 4.0f};
vec.SetDevice(0);
auto span = vec.DeviceSpan();
ASSERT_EQ(vec.Size(), span.size());
ASSERT_EQ(vec.DevicePointer(), span.data());
auto const_span = vec.ConstDeviceSpan();
ASSERT_EQ(vec.Size(), const_span.size());
ASSERT_EQ(vec.ConstDevicePointer(), const_span.data());
auto h_span = vec.ConstHostSpan();
ASSERT_TRUE(vec.HostCanRead());
ASSERT_FALSE(vec.HostCanWrite());
ASSERT_EQ(h_span.size(), vec.Size());
ASSERT_EQ(h_span.data(), vec.ConstHostPointer());
h_span = vec.HostSpan();
ASSERT_TRUE(vec.HostCanWrite());
}
TEST(HostDeviceVector, Empty) {
HostDeviceVector<float> vec {1.0f, 2.0f, 3.0f, 4.0f};
HostDeviceVector<float> another { std::move(vec) };
ASSERT_FALSE(another.Empty());
ASSERT_TRUE(vec.Empty());
}
} // namespace common
} // namespace xgboost
| d5718290a096e7f0ed809cd85d37aac06f65ddf8.cu | /*!
* Copyright 2018 XGBoost contributors
*/
#include <gtest/gtest.h>
#include <thrust/equal.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/transform.h>
#include "../../../src/common/device_helpers.cuh"
#include <xgboost/host_device_vector.h>
namespace xgboost {
namespace common {
namespace {
void SetDeviceForTest(int device) {
int n_devices;
dh::safe_cuda(cudaGetDeviceCount(&n_devices));
device %= n_devices;
dh::safe_cuda(cudaSetDevice(device));
}
} // namespace
struct HostDeviceVectorSetDeviceHandler {
template <typename Functor>
explicit HostDeviceVectorSetDeviceHandler(Functor f) {
SetCudaSetDeviceHandler(f);
}
~HostDeviceVectorSetDeviceHandler() {
SetCudaSetDeviceHandler(nullptr);
}
};
void InitHostDeviceVector(size_t n, int device, HostDeviceVector<int> *v) {
// create the vector
v->SetDevice(device);
v->Resize(n);
ASSERT_EQ(v->Size(), n);
ASSERT_EQ(v->DeviceIdx(), device);
// ensure that the device have read-write access
ASSERT_TRUE(v->DeviceCanRead());
ASSERT_TRUE(v->DeviceCanWrite());
// ensure that the host has no access
ASSERT_FALSE(v->HostCanRead());
ASSERT_FALSE(v->HostCanWrite());
// fill in the data on the host
std::vector<int>& data_h = v->HostVector();
// ensure that the host has full access, while the device have none
ASSERT_TRUE(v->HostCanRead());
ASSERT_TRUE(v->HostCanWrite());
ASSERT_FALSE(v->DeviceCanRead());
ASSERT_FALSE(v->DeviceCanWrite());
ASSERT_EQ(data_h.size(), n);
std::copy_n(thrust::make_counting_iterator(0), n, data_h.begin());
}
void PlusOne(HostDeviceVector<int> *v) {
int device = v->DeviceIdx();
SetDeviceForTest(device);
thrust::transform(dh::tcbegin(*v), dh::tcend(*v), dh::tbegin(*v),
[=]__device__(unsigned int a){ return a + 1; });
ASSERT_TRUE(v->DeviceCanWrite());
}
void CheckDevice(HostDeviceVector<int>* v,
size_t size,
unsigned int first,
GPUAccess access) {
ASSERT_EQ(v->Size(), size);
SetDeviceForTest(v->DeviceIdx());
ASSERT_TRUE(thrust::equal(dh::tcbegin(*v), dh::tcend(*v),
thrust::make_counting_iterator(first)));
ASSERT_TRUE(v->DeviceCanRead());
// ensure that the device has at most the access specified by access
ASSERT_EQ(v->DeviceCanWrite(), access == GPUAccess::kWrite);
ASSERT_EQ(v->HostCanRead(), access == GPUAccess::kRead);
ASSERT_FALSE(v->HostCanWrite());
ASSERT_TRUE(thrust::equal(dh::tbegin(*v), dh::tend(*v),
thrust::make_counting_iterator(first)));
ASSERT_TRUE(v->DeviceCanRead());
ASSERT_TRUE(v->DeviceCanWrite());
ASSERT_FALSE(v->HostCanRead());
ASSERT_FALSE(v->HostCanWrite());
}
void CheckHost(HostDeviceVector<int> *v, GPUAccess access) {
const std::vector<int>& data_h = access == GPUAccess::kNone ?
v->HostVector() : v->ConstHostVector();
for (size_t i = 0; i < v->Size(); ++i) {
ASSERT_EQ(data_h.at(i), i + 1);
}
ASSERT_TRUE(v->HostCanRead());
ASSERT_EQ(v->HostCanWrite(), access == GPUAccess::kNone);
ASSERT_EQ(v->DeviceCanRead(), access == GPUAccess::kRead);
// the devices should have no write access
ASSERT_FALSE(v->DeviceCanWrite());
}
void TestHostDeviceVector(size_t n, int device) {
HostDeviceVectorSetDeviceHandler hdvec_dev_hndlr(SetDevice);
HostDeviceVector<int> v;
InitHostDeviceVector(n, device, &v);
CheckDevice(&v, n, 0, GPUAccess::kRead);
PlusOne(&v);
CheckDevice(&v, n, 1, GPUAccess::kWrite);
CheckHost(&v, GPUAccess::kRead);
CheckHost(&v, GPUAccess::kNone);
}
TEST(HostDeviceVector, Basic) {
size_t n = 1001;
int device = 0;
TestHostDeviceVector(n, device);
}
TEST(HostDeviceVector, Copy) {
size_t n = 1001;
int device = 0;
HostDeviceVectorSetDeviceHandler hdvec_dev_hndlr(SetDevice);
HostDeviceVector<int> v;
{
// a separate scope to ensure that v1 is gone before further checks
HostDeviceVector<int> v1;
InitHostDeviceVector(n, device, &v1);
v.Resize(v1.Size());
v.Copy(v1);
}
CheckDevice(&v, n, 0, GPUAccess::kRead);
PlusOne(&v);
CheckDevice(&v, n, 1, GPUAccess::kWrite);
CheckHost(&v, GPUAccess::kRead);
CheckHost(&v, GPUAccess::kNone);
}
TEST(HostDeviceVector, SetDevice) {
std::vector<int> h_vec (2345);
for (size_t i = 0; i < h_vec.size(); ++i) {
h_vec[i] = i;
}
HostDeviceVector<int> vec (h_vec);
auto device = 0;
vec.SetDevice(device);
ASSERT_EQ(vec.Size(), h_vec.size());
auto span = vec.DeviceSpan(); // sync to device
vec.SetDevice(-1); // pull back to cpu.
ASSERT_EQ(vec.Size(), h_vec.size());
ASSERT_EQ(vec.DeviceIdx(), -1);
auto h_vec_1 = vec.HostVector();
ASSERT_TRUE(std::equal(h_vec_1.cbegin(), h_vec_1.cend(), h_vec.cbegin()));
}
TEST(HostDeviceVector, Span) {
HostDeviceVector<float> vec {1.0f, 2.0f, 3.0f, 4.0f};
vec.SetDevice(0);
auto span = vec.DeviceSpan();
ASSERT_EQ(vec.Size(), span.size());
ASSERT_EQ(vec.DevicePointer(), span.data());
auto const_span = vec.ConstDeviceSpan();
ASSERT_EQ(vec.Size(), const_span.size());
ASSERT_EQ(vec.ConstDevicePointer(), const_span.data());
auto h_span = vec.ConstHostSpan();
ASSERT_TRUE(vec.HostCanRead());
ASSERT_FALSE(vec.HostCanWrite());
ASSERT_EQ(h_span.size(), vec.Size());
ASSERT_EQ(h_span.data(), vec.ConstHostPointer());
h_span = vec.HostSpan();
ASSERT_TRUE(vec.HostCanWrite());
}
TEST(HostDeviceVector, Empty) {
HostDeviceVector<float> vec {1.0f, 2.0f, 3.0f, 4.0f};
HostDeviceVector<float> another { std::move(vec) };
ASSERT_FALSE(another.Empty());
ASSERT_TRUE(vec.Empty());
}
} // namespace common
} // namespace xgboost
|
71406ab2ac5489c0f471f670d11868e5fef251f2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2020 insaneyilin All Rights Reserved.
*
*
*/
#include "../common/common.h"
#define N (1024*1024)
#define FULL_DATA_SIZE (N * 100)
__global__ void kernel(int *a, int *b, int *c) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < N) {
// compute an average of three values in a and
// three values in b
int idx1 = (idx + 1) % 256;
int idx2 = (idx + 2) % 256;
float as = (a[idx] + a[idx1] + a[idx2]) / 3.0f;
float bs = (b[idx] + b[idx1] + b[idx2]) / 3.0f;
// then compute the average of the two averages
c[idx] = (as + bs) / 2;
}
}
int main(int argc, char **argv) {
hipDeviceProp_t prop;
int which_device;
CHECK_CUDA_ERROR(hipGetDevice(&which_device));
CHECK_CUDA_ERROR(hipGetDeviceProperties(&prop, which_device));
// device overlap is a feature that supports to simultaneously execute a
// CUDA C kernel while performing a copy between device and host memory.
if (!prop.deviceOverlap) {
printf("Device will not handle overlaps, so no speed up from streams\n");
return 0;
}
hipEvent_t start;
hipEvent_t stop;
float elapsed_time;
hipStream_t stream0;
hipStream_t stream1;
int *host_a;
int *host_b;
int *host_c;
// device memory for stream0
int *dev_a0;
int *dev_b0;
int *dev_c0;
// device memory for stream1
int *dev_a1;
int *dev_b1;
int *dev_c1;
// start the timers
CHECK_CUDA_ERROR(hipEventCreate(&start));
CHECK_CUDA_ERROR(hipEventCreate(&stop));
// initialize the stream
CHECK_CUDA_ERROR(hipStreamCreate(&stream0));
CHECK_CUDA_ERROR(hipStreamCreate(&stream1));
// allocate the memory on the GPU
CHECK_CUDA_ERROR(hipMalloc((void**)&dev_a0, N * sizeof(int)));
CHECK_CUDA_ERROR(hipMalloc((void**)&dev_b0, N * sizeof(int)));
CHECK_CUDA_ERROR(hipMalloc((void**)&dev_c0, N * sizeof(int)));
CHECK_CUDA_ERROR(hipMalloc((void**)&dev_a1, N * sizeof(int)));
CHECK_CUDA_ERROR(hipMalloc((void**)&dev_b1, N * sizeof(int)));
CHECK_CUDA_ERROR(hipMalloc((void**)&dev_c1, N * sizeof(int)));
// allocate host locked memory, used to stream
CHECK_CUDA_ERROR(hipHostMalloc((void**)&host_a, FULL_DATA_SIZE * sizeof(int),
hipHostMallocDefault));
CHECK_CUDA_ERROR(hipHostMalloc((void**)&host_b, FULL_DATA_SIZE * sizeof(int),
hipHostMallocDefault));
CHECK_CUDA_ERROR(hipHostMalloc((void**)&host_c, FULL_DATA_SIZE * sizeof(int),
hipHostMallocDefault));
for (int i = 0; i < FULL_DATA_SIZE; ++i) {
host_a[i] = rand();
host_b[i] = rand();
}
CHECK_CUDA_ERROR(hipEventRecord(start, 0));
// now loop over full data, in bite-sized chunks
for (int i = 0; i < FULL_DATA_SIZE; i += N * 2) {
// copy the locked memory to the device, async
CHECK_CUDA_ERROR(hipMemcpyAsync(dev_a0, host_a + i,
N * sizeof(int),
hipMemcpyHostToDevice,
stream0));
CHECK_CUDA_ERROR(hipMemcpyAsync(dev_b0, host_b + i,
N * sizeof(int),
hipMemcpyHostToDevice,
stream0));
// <<<grid dims, block dims, dynamic shared memory size, stream ID>>>
hipLaunchKernelGGL(( kernel), dim3(N / 256), dim3(256), 0, stream0, dev_a0, dev_b0, dev_c0);
// copy the data from device to locked memory
CHECK_CUDA_ERROR(hipMemcpyAsync(host_c + i, dev_c0,
N * sizeof(int),
hipMemcpyDeviceToHost,
stream0));
// copy the locked memory to the device, async
CHECK_CUDA_ERROR(hipMemcpyAsync(dev_a1, host_a + i + N,
N * sizeof(int),
hipMemcpyHostToDevice,
stream1));
CHECK_CUDA_ERROR(hipMemcpyAsync(dev_b1, host_b + i + N,
N * sizeof(int),
hipMemcpyHostToDevice,
stream1));
hipLaunchKernelGGL(( kernel), dim3(N / 256), dim3(256), 0, stream1, dev_a1, dev_b1, dev_c1);
// copy the data from device to locked memory
CHECK_CUDA_ERROR(hipMemcpyAsync(host_c + i + N, dev_c1,
N * sizeof(int),
hipMemcpyDeviceToHost,
stream1));
}
// copy result chunk from locked to full buffer
CHECK_CUDA_ERROR(hipStreamSynchronize(stream0));
CHECK_CUDA_ERROR(hipStreamSynchronize(stream1));
CHECK_CUDA_ERROR(hipEventRecord(stop, 0));
CHECK_CUDA_ERROR(hipEventSynchronize(stop));
CHECK_CUDA_ERROR(hipEventElapsedTime(&elapsed_time,
start, stop));
printf("Time taken: %3.1f ms\n", elapsed_time);
// cleanup the streams and memory
CHECK_CUDA_ERROR(hipHostFree(host_a));
CHECK_CUDA_ERROR(hipHostFree(host_b));
CHECK_CUDA_ERROR(hipHostFree(host_c));
CHECK_CUDA_ERROR(hipFree(dev_a0));
CHECK_CUDA_ERROR(hipFree(dev_b0));
CHECK_CUDA_ERROR(hipFree(dev_c0));
CHECK_CUDA_ERROR(hipFree(dev_a1));
CHECK_CUDA_ERROR(hipFree(dev_b1));
CHECK_CUDA_ERROR(hipFree(dev_c1));
CHECK_CUDA_ERROR(hipStreamDestroy(stream0));
CHECK_CUDA_ERROR(hipStreamDestroy(stream1));
return 0;
}
| 71406ab2ac5489c0f471f670d11868e5fef251f2.cu | /*
* Copyright 2020 insaneyilin All Rights Reserved.
*
*
*/
#include "../common/common.h"
#define N (1024*1024)
#define FULL_DATA_SIZE (N * 100)
__global__ void kernel(int *a, int *b, int *c) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < N) {
// compute an average of three values in a and
// three values in b
int idx1 = (idx + 1) % 256;
int idx2 = (idx + 2) % 256;
float as = (a[idx] + a[idx1] + a[idx2]) / 3.0f;
float bs = (b[idx] + b[idx1] + b[idx2]) / 3.0f;
// then compute the average of the two averages
c[idx] = (as + bs) / 2;
}
}
int main(int argc, char **argv) {
cudaDeviceProp prop;
int which_device;
CHECK_CUDA_ERROR(cudaGetDevice(&which_device));
CHECK_CUDA_ERROR(cudaGetDeviceProperties(&prop, which_device));
// device overlap is a feature that supports to simultaneously execute a
// CUDA C kernel while performing a copy between device and host memory.
if (!prop.deviceOverlap) {
printf("Device will not handle overlaps, so no speed up from streams\n");
return 0;
}
cudaEvent_t start;
cudaEvent_t stop;
float elapsed_time;
cudaStream_t stream0;
cudaStream_t stream1;
int *host_a;
int *host_b;
int *host_c;
// device memory for stream0
int *dev_a0;
int *dev_b0;
int *dev_c0;
// device memory for stream1
int *dev_a1;
int *dev_b1;
int *dev_c1;
// start the timers
CHECK_CUDA_ERROR(cudaEventCreate(&start));
CHECK_CUDA_ERROR(cudaEventCreate(&stop));
// initialize the stream
CHECK_CUDA_ERROR(cudaStreamCreate(&stream0));
CHECK_CUDA_ERROR(cudaStreamCreate(&stream1));
// allocate the memory on the GPU
CHECK_CUDA_ERROR(cudaMalloc((void**)&dev_a0, N * sizeof(int)));
CHECK_CUDA_ERROR(cudaMalloc((void**)&dev_b0, N * sizeof(int)));
CHECK_CUDA_ERROR(cudaMalloc((void**)&dev_c0, N * sizeof(int)));
CHECK_CUDA_ERROR(cudaMalloc((void**)&dev_a1, N * sizeof(int)));
CHECK_CUDA_ERROR(cudaMalloc((void**)&dev_b1, N * sizeof(int)));
CHECK_CUDA_ERROR(cudaMalloc((void**)&dev_c1, N * sizeof(int)));
// allocate host locked memory, used to stream
CHECK_CUDA_ERROR(cudaHostAlloc((void**)&host_a, FULL_DATA_SIZE * sizeof(int),
cudaHostAllocDefault));
CHECK_CUDA_ERROR(cudaHostAlloc((void**)&host_b, FULL_DATA_SIZE * sizeof(int),
cudaHostAllocDefault));
CHECK_CUDA_ERROR(cudaHostAlloc((void**)&host_c, FULL_DATA_SIZE * sizeof(int),
cudaHostAllocDefault));
for (int i = 0; i < FULL_DATA_SIZE; ++i) {
host_a[i] = rand();
host_b[i] = rand();
}
CHECK_CUDA_ERROR(cudaEventRecord(start, 0));
// now loop over full data, in bite-sized chunks
for (int i = 0; i < FULL_DATA_SIZE; i += N * 2) {
// copy the locked memory to the device, async
CHECK_CUDA_ERROR(cudaMemcpyAsync(dev_a0, host_a + i,
N * sizeof(int),
cudaMemcpyHostToDevice,
stream0));
CHECK_CUDA_ERROR(cudaMemcpyAsync(dev_b0, host_b + i,
N * sizeof(int),
cudaMemcpyHostToDevice,
stream0));
// <<<grid dims, block dims, dynamic shared memory size, stream ID>>>
kernel<<<N / 256, 256, 0, stream0>>>(dev_a0, dev_b0, dev_c0);
// copy the data from device to locked memory
CHECK_CUDA_ERROR(cudaMemcpyAsync(host_c + i, dev_c0,
N * sizeof(int),
cudaMemcpyDeviceToHost,
stream0));
// copy the locked memory to the device, async
CHECK_CUDA_ERROR(cudaMemcpyAsync(dev_a1, host_a + i + N,
N * sizeof(int),
cudaMemcpyHostToDevice,
stream1));
CHECK_CUDA_ERROR(cudaMemcpyAsync(dev_b1, host_b + i + N,
N * sizeof(int),
cudaMemcpyHostToDevice,
stream1));
kernel<<<N / 256, 256, 0, stream1>>>(dev_a1, dev_b1, dev_c1);
// copy the data from device to locked memory
CHECK_CUDA_ERROR(cudaMemcpyAsync(host_c + i + N, dev_c1,
N * sizeof(int),
cudaMemcpyDeviceToHost,
stream1));
}
// copy result chunk from locked to full buffer
CHECK_CUDA_ERROR(cudaStreamSynchronize(stream0));
CHECK_CUDA_ERROR(cudaStreamSynchronize(stream1));
CHECK_CUDA_ERROR(cudaEventRecord(stop, 0));
CHECK_CUDA_ERROR(cudaEventSynchronize(stop));
CHECK_CUDA_ERROR(cudaEventElapsedTime(&elapsed_time,
start, stop));
printf("Time taken: %3.1f ms\n", elapsed_time);
// cleanup the streams and memory
CHECK_CUDA_ERROR(cudaFreeHost(host_a));
CHECK_CUDA_ERROR(cudaFreeHost(host_b));
CHECK_CUDA_ERROR(cudaFreeHost(host_c));
CHECK_CUDA_ERROR(cudaFree(dev_a0));
CHECK_CUDA_ERROR(cudaFree(dev_b0));
CHECK_CUDA_ERROR(cudaFree(dev_c0));
CHECK_CUDA_ERROR(cudaFree(dev_a1));
CHECK_CUDA_ERROR(cudaFree(dev_b1));
CHECK_CUDA_ERROR(cudaFree(dev_c1));
CHECK_CUDA_ERROR(cudaStreamDestroy(stream0));
CHECK_CUDA_ERROR(cudaStreamDestroy(stream1));
return 0;
}
|
cb9b261387bc771ad9364d70d7bf5defd292ed54.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from sparse/blas/zgeaxpy.cu, normal z -> d, Mon Jun 25 18:24:23 2018
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
// axpy kernel for matrices stored in the MAGMA format
__global__ void
dgeaxpy_kernel(
int num_rows,
int num_cols,
double alpha,
double * dx,
double beta,
double * dy)
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if( row<num_rows ){
for( j=0; j<num_cols; j++ ){
int idx = row + j*num_rows;
dy[ idx ] = alpha * dx[ idx ] + beta * dy[ idx ];
}
}
}
/**
Purpose
-------
This routine computes Y = alpha * X + beta * Y on the GPU.
The input format is magma_d_matrix. It can handle both,
dense matrix (vector block) and CSR matrices. For the latter,
it interfaces the cuSPARSE library.
Arguments
---------
@param[in]
alpha double
scalar multiplier.
@param[in]
X magma_d_matrix
input/output matrix Y.
@param[in]
beta double
scalar multiplier.
@param[in,out]
Y magma_d_matrix*
input matrix X.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dblas
********************************************************************/
extern "C"
magma_int_t
magma_dgeaxpy(
double alpha,
magma_d_matrix X,
double beta,
magma_d_matrix *Y,
magma_queue_t queue )
{
int m = X.num_rows;
int n = X.num_cols;
magma_d_matrix C={Magma_CSR};
if( X.storage_type == Magma_DENSE && Y->storage_type == Magma_DENSE ){
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( dgeaxpy_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, alpha, X.dval, beta, Y->dval );
} else if( X.storage_type == Magma_CSR && Y->storage_type == Magma_CSR ) {
magma_dcuspaxpy( &alpha, X, &beta, *Y, &C, queue );
magma_dmfree( Y, queue );
magma_dmtransfer( C, Y, Magma_DEV, Magma_DEV, queue );
magma_dmfree( &C, queue );
} else {
printf("%% error: matrix addition only supported for DENSE and CSR format.\n");
}
return MAGMA_SUCCESS;
}
| cb9b261387bc771ad9364d70d7bf5defd292ed54.cu | /*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from sparse/blas/zgeaxpy.cu, normal z -> d, Mon Jun 25 18:24:23 2018
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
// axpy kernel for matrices stored in the MAGMA format
__global__ void
dgeaxpy_kernel(
int num_rows,
int num_cols,
double alpha,
double * dx,
double beta,
double * dy)
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if( row<num_rows ){
for( j=0; j<num_cols; j++ ){
int idx = row + j*num_rows;
dy[ idx ] = alpha * dx[ idx ] + beta * dy[ idx ];
}
}
}
/**
Purpose
-------
This routine computes Y = alpha * X + beta * Y on the GPU.
The input format is magma_d_matrix. It can handle both,
dense matrix (vector block) and CSR matrices. For the latter,
it interfaces the cuSPARSE library.
Arguments
---------
@param[in]
alpha double
scalar multiplier.
@param[in]
X magma_d_matrix
input/output matrix Y.
@param[in]
beta double
scalar multiplier.
@param[in,out]
Y magma_d_matrix*
input matrix X.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dblas
********************************************************************/
extern "C"
magma_int_t
magma_dgeaxpy(
double alpha,
magma_d_matrix X,
double beta,
magma_d_matrix *Y,
magma_queue_t queue )
{
int m = X.num_rows;
int n = X.num_cols;
magma_d_matrix C={Magma_CSR};
if( X.storage_type == Magma_DENSE && Y->storage_type == Magma_DENSE ){
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
dgeaxpy_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( m, n, alpha, X.dval, beta, Y->dval );
} else if( X.storage_type == Magma_CSR && Y->storage_type == Magma_CSR ) {
magma_dcuspaxpy( &alpha, X, &beta, *Y, &C, queue );
magma_dmfree( Y, queue );
magma_dmtransfer( C, Y, Magma_DEV, Magma_DEV, queue );
magma_dmfree( &C, queue );
} else {
printf("%% error: matrix addition only supported for DENSE and CSR format.\n");
}
return MAGMA_SUCCESS;
}
|
f620f40d92db23f83063b21458c2d797494a2d3b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "thrust/device_vector.h"
#include "caffe/filler.hpp"
#include "caffe/layers/norm_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
// divid a matrix with vector
template <typename Dtype>
__global__ void DivBsx(const int nthreads, const Dtype* A,
const Dtype* v, const int rows, const int cols, const CBLAS_TRANSPOSE trans,
Dtype* B) {
CUDA_KERNEL_LOOP(index, nthreads) {
int c = index % cols;
int r = (index / cols) % rows;
if (trans == CblasNoTrans) {
B[index] = A[index] / v[c];
} else {
B[index] = A[index] / v[r];
}
}
}
template <typename Dtype>
__global__ void MulBsx(const int nthreads, const Dtype* A,
const Dtype* v, const int rows, const int cols, const CBLAS_TRANSPOSE trans,
Dtype* B) {
CUDA_KERNEL_LOOP(index, nthreads) {
int c = index % cols;
int r = (index / cols) % rows;
if (trans == CblasNoTrans) {
B[index] = A[index] * v[c];
} else {
B[index] = A[index] * v[r];
}
}
}
template <typename Dtype>
void NormLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
Dtype* buffer_data = buffer_.mutable_gpu_data();
Dtype* norm_data;
if (across_spatial_) {
// need to index it
norm_data = norm_.mutable_cpu_data();
} else {
norm_data = norm_.mutable_gpu_data();
// add eps to avoid overflow
caffe_gpu_set<Dtype>(norm_.count(), Dtype(eps_), norm_data);
}
const Dtype* scale;
if (channel_shared_) {
scale = this->blobs_[0]->cpu_data();
} else {
scale = this->blobs_[0]->gpu_data();
}
const Dtype* sum_channel_multiplier = sum_channel_multiplier_.gpu_data();
int num = bottom[0]->num();
int dim = bottom[0]->count() / num;
int spatial_dim = bottom[0]->height() * bottom[0]->width();
int channels = bottom[0]->channels();
for (int n = 0; n < num; ++n) {
caffe_gpu_powx<Dtype>(dim, bottom_data, Dtype(2), buffer_data);
if (across_spatial_) {
Dtype normsqr;
caffe_gpu_asum<Dtype>(dim, buffer_data, &normsqr);
// add eps to avoid overflow
norm_data[n] = pow(normsqr+eps_, Dtype(0.5));
caffe_gpu_scale<Dtype>(dim, Dtype(1.0 / norm_data[n]), bottom_data,
top_data);
} else {
// compute norm
caffe_gpu_gemv<Dtype>(CblasTrans, channels, spatial_dim, Dtype(1),
buffer_data, sum_channel_multiplier, Dtype(1),
norm_data);
caffe_gpu_powx<Dtype>(spatial_dim, norm_data, Dtype(0.5), norm_data);
// scale the layer
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( DivBsx<Dtype>) , dim3(CAFFE_GET_BLOCKS(dim)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
dim, bottom_data, norm_data, channels, spatial_dim, CblasNoTrans,
top_data);
CUDA_POST_KERNEL_CHECK;
norm_data += spatial_dim;
}
// scale the output
if (channel_shared_) {
caffe_gpu_scal<Dtype>(dim, scale[0], top_data);
} else {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( MulBsx<Dtype>) , dim3(CAFFE_GET_BLOCKS(dim)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
dim, top_data, scale, channels, spatial_dim, CblasTrans,
top_data);
CUDA_POST_KERNEL_CHECK;
}
bottom_data += dim;
top_data += dim;
}
}
template <typename Dtype>
void NormLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* top_data = top[0]->gpu_data();
const Dtype* bottom_data = bottom[0]->mutable_gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* norm_data;
if (across_spatial_) {
// need to index it
norm_data = norm_.cpu_data();
} else {
norm_data = norm_.gpu_data();
}
const Dtype* scale;
if (channel_shared_) {
scale = this->blobs_[0]->cpu_data();
} else {
scale = this->blobs_[0]->gpu_data();
}
Dtype* buffer_data = buffer_.mutable_gpu_data();
Dtype* buffer_channel = buffer_channel_.mutable_gpu_data();
Dtype* buffer_spatial = buffer_spatial_.mutable_gpu_data();
const Dtype* sum_channel_multiplier = sum_channel_multiplier_.gpu_data();
const Dtype* sum_spatial_multiplier = sum_spatial_multiplier_.gpu_data();
int count = top[0]->count();
int num = top[0]->num();
int dim = count / num;
int spatial_dim = top[0]->height() * top[0]->width();
int channels = top[0]->channels();
// Propagate to param
if (this->param_propagate_down_[0]) {
if (channel_shared_) {
Dtype* scale_diff = this->blobs_[0]->mutable_cpu_diff();
Dtype a;
caffe_gpu_dot<Dtype>(count, top_data, top_diff, &a);
scale_diff[0] += a / scale[0];
} else {
Dtype* scale_diff = this->blobs_[0]->mutable_gpu_diff();
for (int n = 0; n < num; ++n) {
// compute a
caffe_gpu_mul<Dtype>(dim, top_data+n*dim, top_diff+n*dim, buffer_data);
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels, spatial_dim, Dtype(1),
buffer_data, sum_spatial_multiplier, Dtype(0),
buffer_channel);
// store a / scale[i] in buffer_data temporary
caffe_gpu_div<Dtype>(channels, buffer_channel, scale, buffer_channel);
caffe_gpu_add<Dtype>(channels, buffer_channel, scale_diff, scale_diff);
}
}
}
// Propagate to bottom
if (propagate_down[0]) {
for (int n = 0; n < num; ++n) {
if (across_spatial_) {
Dtype a;
caffe_gpu_dot<Dtype>(dim, bottom_data, top_diff, &a);
caffe_gpu_scale<Dtype>(dim, a / norm_data[n] / norm_data[n],
bottom_data, bottom_diff);
caffe_gpu_sub<Dtype>(dim, top_diff, bottom_diff, bottom_diff);
caffe_gpu_scale<Dtype>(dim, Dtype(1.0 / norm_data[n]), bottom_diff,
bottom_diff);
} else {
// dot product between bottom_data and top_diff
caffe_gpu_mul<Dtype>(dim, bottom_data, top_diff, buffer_data);
caffe_gpu_gemv<Dtype>(CblasTrans, channels, spatial_dim, Dtype(1),
buffer_data, sum_channel_multiplier, Dtype(0),
buffer_spatial);
// scale botom_diff
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( MulBsx<Dtype>) , dim3(CAFFE_GET_BLOCKS(dim)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
dim, bottom_data, buffer_spatial, channels, spatial_dim,
CblasNoTrans, bottom_diff);
CUDA_POST_KERNEL_CHECK;
// divide by square of norm
caffe_gpu_powx<Dtype>(spatial_dim, norm_data, Dtype(2), buffer_spatial);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( DivBsx<Dtype>) , dim3(CAFFE_GET_BLOCKS(dim)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
dim, bottom_diff, buffer_spatial, channels, spatial_dim,
CblasNoTrans, bottom_diff);
CUDA_POST_KERNEL_CHECK;
caffe_gpu_sub<Dtype>(dim, top_diff, bottom_diff, bottom_diff);
// divide by norm
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( DivBsx<Dtype>) , dim3(CAFFE_GET_BLOCKS(dim)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
dim, bottom_diff, norm_data, channels, spatial_dim, CblasNoTrans,
bottom_diff);
CUDA_POST_KERNEL_CHECK;
norm_data += spatial_dim;
}
// scale the diff
if (channel_shared_) {
caffe_gpu_scal<Dtype>(dim, scale[0], bottom_diff);
} else {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( MulBsx<Dtype>) , dim3(CAFFE_GET_BLOCKS(dim)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
dim, bottom_diff, scale, channels, spatial_dim, CblasTrans,
bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
bottom_data += dim;
top_diff += dim;
bottom_diff += dim;
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(NormLayer);
} // namespace caffe
| f620f40d92db23f83063b21458c2d797494a2d3b.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "thrust/device_vector.h"
#include "caffe/filler.hpp"
#include "caffe/layers/norm_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
// divid a matrix with vector
template <typename Dtype>
__global__ void DivBsx(const int nthreads, const Dtype* A,
const Dtype* v, const int rows, const int cols, const CBLAS_TRANSPOSE trans,
Dtype* B) {
CUDA_KERNEL_LOOP(index, nthreads) {
int c = index % cols;
int r = (index / cols) % rows;
if (trans == CblasNoTrans) {
B[index] = A[index] / v[c];
} else {
B[index] = A[index] / v[r];
}
}
}
template <typename Dtype>
__global__ void MulBsx(const int nthreads, const Dtype* A,
const Dtype* v, const int rows, const int cols, const CBLAS_TRANSPOSE trans,
Dtype* B) {
CUDA_KERNEL_LOOP(index, nthreads) {
int c = index % cols;
int r = (index / cols) % rows;
if (trans == CblasNoTrans) {
B[index] = A[index] * v[c];
} else {
B[index] = A[index] * v[r];
}
}
}
template <typename Dtype>
void NormLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
Dtype* buffer_data = buffer_.mutable_gpu_data();
Dtype* norm_data;
if (across_spatial_) {
// need to index it
norm_data = norm_.mutable_cpu_data();
} else {
norm_data = norm_.mutable_gpu_data();
// add eps to avoid overflow
caffe_gpu_set<Dtype>(norm_.count(), Dtype(eps_), norm_data);
}
const Dtype* scale;
if (channel_shared_) {
scale = this->blobs_[0]->cpu_data();
} else {
scale = this->blobs_[0]->gpu_data();
}
const Dtype* sum_channel_multiplier = sum_channel_multiplier_.gpu_data();
int num = bottom[0]->num();
int dim = bottom[0]->count() / num;
int spatial_dim = bottom[0]->height() * bottom[0]->width();
int channels = bottom[0]->channels();
for (int n = 0; n < num; ++n) {
caffe_gpu_powx<Dtype>(dim, bottom_data, Dtype(2), buffer_data);
if (across_spatial_) {
Dtype normsqr;
caffe_gpu_asum<Dtype>(dim, buffer_data, &normsqr);
// add eps to avoid overflow
norm_data[n] = pow(normsqr+eps_, Dtype(0.5));
caffe_gpu_scale<Dtype>(dim, Dtype(1.0 / norm_data[n]), bottom_data,
top_data);
} else {
// compute norm
caffe_gpu_gemv<Dtype>(CblasTrans, channels, spatial_dim, Dtype(1),
buffer_data, sum_channel_multiplier, Dtype(1),
norm_data);
caffe_gpu_powx<Dtype>(spatial_dim, norm_data, Dtype(0.5), norm_data);
// scale the layer
// NOLINT_NEXT_LINE(whitespace/operators)
DivBsx<Dtype> <<<CAFFE_GET_BLOCKS(dim), CAFFE_CUDA_NUM_THREADS>>>(
dim, bottom_data, norm_data, channels, spatial_dim, CblasNoTrans,
top_data);
CUDA_POST_KERNEL_CHECK;
norm_data += spatial_dim;
}
// scale the output
if (channel_shared_) {
caffe_gpu_scal<Dtype>(dim, scale[0], top_data);
} else {
// NOLINT_NEXT_LINE(whitespace/operators)
MulBsx<Dtype> <<<CAFFE_GET_BLOCKS(dim), CAFFE_CUDA_NUM_THREADS>>>(
dim, top_data, scale, channels, spatial_dim, CblasTrans,
top_data);
CUDA_POST_KERNEL_CHECK;
}
bottom_data += dim;
top_data += dim;
}
}
template <typename Dtype>
void NormLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* top_data = top[0]->gpu_data();
const Dtype* bottom_data = bottom[0]->mutable_gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* norm_data;
if (across_spatial_) {
// need to index it
norm_data = norm_.cpu_data();
} else {
norm_data = norm_.gpu_data();
}
const Dtype* scale;
if (channel_shared_) {
scale = this->blobs_[0]->cpu_data();
} else {
scale = this->blobs_[0]->gpu_data();
}
Dtype* buffer_data = buffer_.mutable_gpu_data();
Dtype* buffer_channel = buffer_channel_.mutable_gpu_data();
Dtype* buffer_spatial = buffer_spatial_.mutable_gpu_data();
const Dtype* sum_channel_multiplier = sum_channel_multiplier_.gpu_data();
const Dtype* sum_spatial_multiplier = sum_spatial_multiplier_.gpu_data();
int count = top[0]->count();
int num = top[0]->num();
int dim = count / num;
int spatial_dim = top[0]->height() * top[0]->width();
int channels = top[0]->channels();
// Propagate to param
if (this->param_propagate_down_[0]) {
if (channel_shared_) {
Dtype* scale_diff = this->blobs_[0]->mutable_cpu_diff();
Dtype a;
caffe_gpu_dot<Dtype>(count, top_data, top_diff, &a);
scale_diff[0] += a / scale[0];
} else {
Dtype* scale_diff = this->blobs_[0]->mutable_gpu_diff();
for (int n = 0; n < num; ++n) {
// compute a
caffe_gpu_mul<Dtype>(dim, top_data+n*dim, top_diff+n*dim, buffer_data);
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels, spatial_dim, Dtype(1),
buffer_data, sum_spatial_multiplier, Dtype(0),
buffer_channel);
// store a / scale[i] in buffer_data temporary
caffe_gpu_div<Dtype>(channels, buffer_channel, scale, buffer_channel);
caffe_gpu_add<Dtype>(channels, buffer_channel, scale_diff, scale_diff);
}
}
}
// Propagate to bottom
if (propagate_down[0]) {
for (int n = 0; n < num; ++n) {
if (across_spatial_) {
Dtype a;
caffe_gpu_dot<Dtype>(dim, bottom_data, top_diff, &a);
caffe_gpu_scale<Dtype>(dim, a / norm_data[n] / norm_data[n],
bottom_data, bottom_diff);
caffe_gpu_sub<Dtype>(dim, top_diff, bottom_diff, bottom_diff);
caffe_gpu_scale<Dtype>(dim, Dtype(1.0 / norm_data[n]), bottom_diff,
bottom_diff);
} else {
// dot product between bottom_data and top_diff
caffe_gpu_mul<Dtype>(dim, bottom_data, top_diff, buffer_data);
caffe_gpu_gemv<Dtype>(CblasTrans, channels, spatial_dim, Dtype(1),
buffer_data, sum_channel_multiplier, Dtype(0),
buffer_spatial);
// scale botom_diff
// NOLINT_NEXT_LINE(whitespace/operators)
MulBsx<Dtype> <<<CAFFE_GET_BLOCKS(dim), CAFFE_CUDA_NUM_THREADS>>>(
dim, bottom_data, buffer_spatial, channels, spatial_dim,
CblasNoTrans, bottom_diff);
CUDA_POST_KERNEL_CHECK;
// divide by square of norm
caffe_gpu_powx<Dtype>(spatial_dim, norm_data, Dtype(2), buffer_spatial);
// NOLINT_NEXT_LINE(whitespace/operators)
DivBsx<Dtype> <<<CAFFE_GET_BLOCKS(dim), CAFFE_CUDA_NUM_THREADS>>>(
dim, bottom_diff, buffer_spatial, channels, spatial_dim,
CblasNoTrans, bottom_diff);
CUDA_POST_KERNEL_CHECK;
caffe_gpu_sub<Dtype>(dim, top_diff, bottom_diff, bottom_diff);
// divide by norm
// NOLINT_NEXT_LINE(whitespace/operators)
DivBsx<Dtype> <<<CAFFE_GET_BLOCKS(dim), CAFFE_CUDA_NUM_THREADS>>>(
dim, bottom_diff, norm_data, channels, spatial_dim, CblasNoTrans,
bottom_diff);
CUDA_POST_KERNEL_CHECK;
norm_data += spatial_dim;
}
// scale the diff
if (channel_shared_) {
caffe_gpu_scal<Dtype>(dim, scale[0], bottom_diff);
} else {
// NOLINT_NEXT_LINE(whitespace/operators)
MulBsx<Dtype> <<<CAFFE_GET_BLOCKS(dim), CAFFE_CUDA_NUM_THREADS>>>(
dim, bottom_diff, scale, channels, spatial_dim, CblasTrans,
bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
bottom_data += dim;
top_diff += dim;
bottom_diff += dim;
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(NormLayer);
} // namespace caffe
|
1289ea8e291ca6081a652252981a797bcaf8d677.hip | // !!! This is a file automatically generated by hipify!!!
#include <rocblas.h>
#include <hiprand/hiprand.h>
#include <cassert>
#include <cmath>
#include <ctime>
#include <iostream>
#include <vector>
#include "addvec.h"
int matmul(const int M, const int N, const int K, const int iter) {
// random number generator using Pseudo
hiprandGenerator_t prng;
hiprandCreateGenerator(&prng, HIPRAND_RNG_PSEUDO_DEFAULT);
// seed setting
hiprandSetPseudoRandomGeneratorSeed(prng, (unsigned long long)clock());
// calculation of gpu runtime using cuda event creator.
float elapsed = 0;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
// loop to generate random A and C for given number of iteration.
for (int n = 1; n <= iter; n++) {
// Pre-calculate the size of our matrices
const size_t bytes_a = M * K * sizeof(float);
const size_t bytes_b = K * N * sizeof(float);
const size_t bytes_c = M * N * sizeof(float);
// Allocate device memory
float* d_a, * d_c;
float* d_b;
hipMalloc(&d_a, bytes_a);
hipMalloc(&d_b, bytes_b);
hipMalloc(&d_c, bytes_c);
// Filling matrix A with random numbers
hiprandGenerateUniform(prng, d_a, M * K);
// Filling matrix B with random numbers
hiprandGenerateUniform(prng, d_b, K * M);
// cuBLAS handle
hipblasHandle_t handle;
hipblasCreate(&handle);
// Scalaing factors
float alpha = 1.0f;
float beta = 0.0f;
// matrix calculation using cublas sgemm.
hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, M, N, K, &alpha, d_a, M, d_b, K, &beta, d_c, M);
// Free our memory
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
printf("time in gpu : %.2f ms\ ", elapsed);
return 0;
}
int main() {
// Condition -1 but with changing in matrix size and number of iterations.
const int M = 5000;
const int N = 5000;
const int K = 4000;
const int iter = 200;
matmul(M, N, K, iter);
std::cout << "Condition COMPLETED SUCCESSFULLY\n";
return 0;
} | 1289ea8e291ca6081a652252981a797bcaf8d677.cu | #include <cublas_v2.h>
#include <curand.h>
#include <cassert>
#include <cmath>
#include <ctime>
#include <iostream>
#include <vector>
#include "addvec.h"
int matmul(const int M, const int N, const int K, const int iter) {
// random number generator using Pseudo
curandGenerator_t prng;
curandCreateGenerator(&prng, CURAND_RNG_PSEUDO_DEFAULT);
// seed setting
curandSetPseudoRandomGeneratorSeed(prng, (unsigned long long)clock());
// calculation of gpu runtime using cuda event creator.
float elapsed = 0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// loop to generate random A and C for given number of iteration.
for (int n = 1; n <= iter; n++) {
// Pre-calculate the size of our matrices
const size_t bytes_a = M * K * sizeof(float);
const size_t bytes_b = K * N * sizeof(float);
const size_t bytes_c = M * N * sizeof(float);
// Allocate device memory
float* d_a, * d_c;
float* d_b;
cudaMalloc(&d_a, bytes_a);
cudaMalloc(&d_b, bytes_b);
cudaMalloc(&d_c, bytes_c);
// Filling matrix A with random numbers
curandGenerateUniform(prng, d_a, M * K);
// Filling matrix B with random numbers
curandGenerateUniform(prng, d_b, K * M);
// cuBLAS handle
cublasHandle_t handle;
cublasCreate(&handle);
// Scalaing factors
float alpha = 1.0f;
float beta = 0.0f;
// matrix calculation using cublas sgemm.
cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, M, N, K, &alpha, d_a, M, d_b, K, &beta, d_c, M);
// Free our memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("time in gpu : %.2f ms\ ", elapsed);
return 0;
}
int main() {
// Condition -1 but with changing in matrix size and number of iterations.
const int M = 5000;
const int N = 5000;
const int K = 4000;
const int iter = 200;
matmul(M, N, K, iter);
std::cout << "Condition COMPLETED SUCCESSFULLY\n";
return 0;
} |
32d67750a8c664e2050cf41b606d16902f8eea65.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright 2017-2020 XGBoost contributors
*/
#include <thrust/copy.h>
#include <thrust/reduce.h>
#include <xgboost/tree_updater.h>
#include <algorithm>
#include <cmath>
#include <memory>
#include <limits>
#include <queue>
#include <utility>
#include <vector>
#include "xgboost/host_device_vector.h"
#include "xgboost/parameter.h"
#include "xgboost/span.h"
#include "xgboost/json.h"
#include "../common/device_helpers.cuh"
#include "../common/hist_util.h"
#include "../common/timer.h"
#include "../data/ellpack_page.cuh"
#include "param.h"
#include "updater_gpu_common.cuh"
#include "constraints.cuh"
#include "gpu_hist/gradient_based_sampler.cuh"
#include "gpu_hist/row_partitioner.cuh"
#include "gpu_hist/histogram.cuh"
namespace xgboost {
namespace tree {
#if !defined(GTEST_TEST)
DMLC_REGISTRY_FILE_TAG(updater_gpu_hist);
#endif // !defined(GTEST_TEST)
// training parameters specific to this algorithm
struct GPUHistMakerTrainParam
: public XGBoostParameter<GPUHistMakerTrainParam> {
bool single_precision_histogram;
bool deterministic_histogram;
bool debug_synchronize;
// declare parameters
DMLC_DECLARE_PARAMETER(GPUHistMakerTrainParam) {
DMLC_DECLARE_FIELD(single_precision_histogram).set_default(false).describe(
"Use single precision to build histograms.");
DMLC_DECLARE_FIELD(deterministic_histogram).set_default(true).describe(
"Pre-round the gradient for obtaining deterministic gradient histogram.");
DMLC_DECLARE_FIELD(debug_synchronize).set_default(false).describe(
"Check if all distributed tree are identical after tree construction.");
}
};
#if !defined(GTEST_TEST)
DMLC_REGISTER_PARAMETER(GPUHistMakerTrainParam);
#endif // !defined(GTEST_TEST)
struct ExpandEntry {
int nid;
int depth;
DeviceSplitCandidate split;
uint64_t timestamp;
ExpandEntry() = default;
ExpandEntry(int nid, int depth, DeviceSplitCandidate split,
uint64_t timestamp)
: nid(nid), depth(depth), split(std::move(split)), timestamp(timestamp) {}
bool IsValid(const TrainParam& param, int num_leaves) const {
if (split.loss_chg <= kRtEps) return false;
if (split.left_sum.GetHess() == 0 || split.right_sum.GetHess() == 0) {
return false;
}
if (split.loss_chg < param.min_split_loss) { return false; }
if (param.max_depth > 0 && depth == param.max_depth) {return false; }
if (param.max_leaves > 0 && num_leaves == param.max_leaves) { return false; }
return true;
}
static bool ChildIsValid(const TrainParam& param, int depth, int num_leaves) {
if (param.max_depth > 0 && depth >= param.max_depth) return false;
if (param.max_leaves > 0 && num_leaves >= param.max_leaves) return false;
return true;
}
friend std::ostream& operator<<(std::ostream& os, const ExpandEntry& e) {
os << "ExpandEntry: \n";
os << "nidx: " << e.nid << "\n";
os << "depth: " << e.depth << "\n";
os << "loss: " << e.split.loss_chg << "\n";
os << "left_sum: " << e.split.left_sum << "\n";
os << "right_sum: " << e.split.right_sum << "\n";
return os;
}
};
inline static bool DepthWise(const ExpandEntry& lhs, const ExpandEntry& rhs) {
if (lhs.depth == rhs.depth) {
return lhs.timestamp > rhs.timestamp; // favor small timestamp
} else {
return lhs.depth > rhs.depth; // favor small depth
}
}
inline static bool LossGuide(const ExpandEntry& lhs, const ExpandEntry& rhs) {
if (lhs.split.loss_chg == rhs.split.loss_chg) {
return lhs.timestamp > rhs.timestamp; // favor small timestamp
} else {
return lhs.split.loss_chg < rhs.split.loss_chg; // favor large loss_chg
}
}
// With constraints
template <typename GradientPairT>
XGBOOST_DEVICE float inline LossChangeMissing(
const GradientPairT& scan, const GradientPairT& missing, const GradientPairT& parent_sum,
const float& parent_gain, const GPUTrainingParam& param, int constraint,
const ValueConstraint& value_constraint,
bool& missing_left_out) { // NOLINT
float missing_left_gain = value_constraint.CalcSplitGain(
param, constraint, GradStats(scan + missing),
GradStats(parent_sum - (scan + missing)));
float missing_right_gain = value_constraint.CalcSplitGain(
param, constraint, GradStats(scan), GradStats(parent_sum - scan));
if (missing_left_gain >= missing_right_gain) {
missing_left_out = true;
return missing_left_gain - parent_gain;
} else {
missing_left_out = false;
return missing_right_gain - parent_gain;
}
}
/*!
* \brief
*
* \tparam ReduceT BlockReduce Type.
* \tparam TempStorage Cub Shared memory
*
* \param begin
* \param end
* \param temp_storage Shared memory for intermediate result.
*/
template <int BLOCK_THREADS, typename ReduceT, typename TempStorageT, typename GradientSumT>
__device__ GradientSumT ReduceFeature(common::Span<const GradientSumT> feature_histogram,
TempStorageT* temp_storage) {
__shared__ cub::Uninitialized<GradientSumT> uninitialized_sum;
GradientSumT& shared_sum = uninitialized_sum.Alias();
GradientSumT local_sum = GradientSumT();
// For loop sums features into one block size
auto begin = feature_histogram.data();
auto end = begin + feature_histogram.size();
for (auto itr = begin; itr < end; itr += BLOCK_THREADS) {
bool thread_active = itr + threadIdx.x < end;
// Scan histogram
GradientSumT bin = thread_active ? *(itr + threadIdx.x) : GradientSumT();
local_sum += bin;
}
local_sum = ReduceT(temp_storage->sum_reduce).Reduce(local_sum, hipcub::Sum());
// Reduction result is stored in thread 0.
if (threadIdx.x == 0) {
shared_sum = local_sum;
}
__syncthreads();
return shared_sum;
}
/*! \brief Find the thread with best gain. */
template <int BLOCK_THREADS, typename ReduceT, typename ScanT,
typename MaxReduceT, typename TempStorageT, typename GradientSumT>
__device__ void EvaluateFeature(
int fidx, common::Span<const GradientSumT> node_histogram,
const EllpackDeviceAccessor& matrix,
DeviceSplitCandidate* best_split, // shared memory storing best split
const DeviceNodeStats& node, const GPUTrainingParam& param,
TempStorageT* temp_storage, // temp memory for cub operations
int constraint, // monotonic_constraints
const ValueConstraint& value_constraint) {
// Use pointer from cut to indicate begin and end of bins for each feature.
uint32_t gidx_begin = matrix.feature_segments[fidx]; // begining bin
uint32_t gidx_end = matrix.feature_segments[fidx + 1]; // end bin for i^th feature
// Sum histogram bins for current feature
GradientSumT const feature_sum = ReduceFeature<BLOCK_THREADS, ReduceT>(
node_histogram.subspan(gidx_begin, gidx_end - gidx_begin), temp_storage);
GradientSumT const parent_sum = GradientSumT(node.sum_gradients);
GradientSumT const missing = parent_sum - feature_sum;
float const null_gain = -std::numeric_limits<bst_float>::infinity();
SumCallbackOp<GradientSumT> prefix_op =
SumCallbackOp<GradientSumT>();
for (int scan_begin = gidx_begin; scan_begin < gidx_end;
scan_begin += BLOCK_THREADS) {
bool thread_active = (scan_begin + threadIdx.x) < gidx_end;
// Gradient value for current bin.
GradientSumT bin =
thread_active ? node_histogram[scan_begin + threadIdx.x] : GradientSumT();
ScanT(temp_storage->scan).ExclusiveScan(bin, bin, hipcub::Sum(), prefix_op);
// Whether the gradient of missing values is put to the left side.
bool missing_left = true;
float gain = null_gain;
if (thread_active) {
gain = LossChangeMissing(bin, missing, parent_sum, node.root_gain, param,
constraint, value_constraint, missing_left);
}
__syncthreads();
// Find thread with best gain
hipcub::KeyValuePair<int, float> tuple(threadIdx.x, gain);
hipcub::KeyValuePair<int, float> best =
MaxReduceT(temp_storage->max_reduce).Reduce(tuple, hipcub::ArgMax());
__shared__ hipcub::KeyValuePair<int, float> block_max;
if (threadIdx.x == 0) {
block_max = best;
}
__syncthreads();
// Best thread updates split
if (threadIdx.x == block_max.key) {
int split_gidx = (scan_begin + threadIdx.x) - 1;
float fvalue;
if (split_gidx < static_cast<int>(gidx_begin)) {
fvalue = matrix.min_fvalue[fidx];
} else {
fvalue = matrix.gidx_fvalue_map[split_gidx];
}
GradientSumT left = missing_left ? bin + missing : bin;
GradientSumT right = parent_sum - left;
best_split->Update(gain, missing_left ? kLeftDir : kRightDir, fvalue,
fidx, GradientPair(left), GradientPair(right), param);
}
__syncthreads();
}
}
template <int BLOCK_THREADS, typename GradientSumT>
__global__ void EvaluateSplitKernel(
common::Span<const GradientSumT> node_histogram, // histogram for gradients
common::Span<const bst_feature_t> feature_set, // Selected features
DeviceNodeStats node,
xgboost::EllpackDeviceAccessor matrix,
GPUTrainingParam gpu_param,
common::Span<DeviceSplitCandidate> split_candidates, // resulting split
ValueConstraint value_constraint,
common::Span<int> d_monotonic_constraints) {
// KeyValuePair here used as threadIdx.x -> gain_value
using ArgMaxT = hipcub::KeyValuePair<int, float>;
using BlockScanT =
hipcub::BlockScan<GradientSumT, BLOCK_THREADS, cub::BLOCK_SCAN_WARP_SCANS>;
using MaxReduceT = hipcub::BlockReduce<ArgMaxT, BLOCK_THREADS>;
using SumReduceT = hipcub::BlockReduce<GradientSumT, BLOCK_THREADS>;
union TempStorage {
typename BlockScanT::TempStorage scan;
typename MaxReduceT::TempStorage max_reduce;
typename SumReduceT::TempStorage sum_reduce;
};
// Aligned && shared storage for best_split
__shared__ cub::Uninitialized<DeviceSplitCandidate> uninitialized_split;
DeviceSplitCandidate& best_split = uninitialized_split.Alias();
__shared__ TempStorage temp_storage;
if (threadIdx.x == 0) {
best_split = DeviceSplitCandidate();
}
__syncthreads();
// One block for each feature. Features are sampled, so fidx != blockIdx.x
int fidx = feature_set[blockIdx.x];
int constraint = d_monotonic_constraints[fidx];
EvaluateFeature<BLOCK_THREADS, SumReduceT, BlockScanT, MaxReduceT>(
fidx, node_histogram, matrix, &best_split, node, gpu_param, &temp_storage,
constraint, value_constraint);
__syncthreads();
if (threadIdx.x == 0) {
// Record best loss for each feature
split_candidates[blockIdx.x] = best_split;
}
}
/**
* \struct DeviceHistogram
*
* \summary Data storage for node histograms on device. Automatically expands.
*
* \tparam GradientSumT histogram entry type.
* \tparam kStopGrowingSize Do not grow beyond this size
*
* \author Rory
* \date 28/07/2018
*/
template <typename GradientSumT, size_t kStopGrowingSize = 1 << 26>
class DeviceHistogram {
private:
/*! \brief Map nidx to starting index of its histogram. */
std::map<int, size_t> nidx_map_;
dh::device_vector<typename GradientSumT::ValueT> data_;
int n_bins_;
int device_id_;
static constexpr size_t kNumItemsInGradientSum =
sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT);
static_assert(kNumItemsInGradientSum == 2,
"Number of items in gradient type should be 2.");
public:
void Init(int device_id, int n_bins) {
this->n_bins_ = n_bins;
this->device_id_ = device_id;
}
void Reset() {
auto d_data = data_.data().get();
dh::LaunchN(device_id_, data_.size(),
[=] __device__(size_t idx) { d_data[idx] = 0.0f; });
nidx_map_.clear();
}
bool HistogramExists(int nidx) const {
return nidx_map_.find(nidx) != nidx_map_.cend();
}
int Bins() const {
return n_bins_;
}
size_t HistogramSize() const {
return n_bins_ * kNumItemsInGradientSum;
}
dh::device_vector<typename GradientSumT::ValueT>& Data() {
return data_;
}
void AllocateHistogram(int nidx) {
if (HistogramExists(nidx)) return;
// Number of items currently used in data
const size_t used_size = nidx_map_.size() * HistogramSize();
const size_t new_used_size = used_size + HistogramSize();
if (data_.size() >= kStopGrowingSize) {
// Recycle histogram memory
if (new_used_size <= data_.size()) {
// no need to remove old node, just insert the new one.
nidx_map_[nidx] = used_size;
// memset histogram size in bytes
} else {
std::pair<int, size_t> old_entry = *nidx_map_.begin();
nidx_map_.erase(old_entry.first);
nidx_map_[nidx] = old_entry.second;
}
// Zero recycled memory
auto d_data = data_.data().get() + nidx_map_[nidx];
dh::LaunchN(device_id_, n_bins_ * 2,
[=] __device__(size_t idx) { d_data[idx] = 0.0f; });
} else {
// Append new node histogram
nidx_map_[nidx] = used_size;
// Check there is enough memory for another histogram node
if (data_.size() < new_used_size + HistogramSize()) {
size_t new_required_memory =
::max(data_.size() * 2, HistogramSize());
data_.resize(new_required_memory);
}
}
CHECK_GE(data_.size(), nidx_map_.size() * HistogramSize());
}
/**
* \summary Return pointer to histogram memory for a given node.
* \param nidx Tree node index.
* \return hist pointer.
*/
common::Span<GradientSumT> GetNodeHistogram(int nidx) {
CHECK(this->HistogramExists(nidx));
auto ptr = data_.data().get() + nidx_map_[nidx];
return common::Span<GradientSumT>(
reinterpret_cast<GradientSumT*>(ptr), n_bins_);
}
};
struct CalcWeightTrainParam {
float min_child_weight;
float reg_alpha;
float reg_lambda;
float max_delta_step;
float learning_rate;
XGBOOST_DEVICE explicit CalcWeightTrainParam(const TrainParam& p)
: min_child_weight(p.min_child_weight),
reg_alpha(p.reg_alpha),
reg_lambda(p.reg_lambda),
max_delta_step(p.max_delta_step),
learning_rate(p.learning_rate) {}
};
// Manage memory for a single GPU
template <typename GradientSumT>
struct GPUHistMakerDevice {
int device_id;
EllpackPageImpl* page;
BatchParam batch_param;
std::unique_ptr<RowPartitioner> row_partitioner;
DeviceHistogram<GradientSumT> hist{};
/*! \brief Gradient pair for each row. */
common::Span<GradientPair> gpair;
dh::caching_device_vector<int> monotone_constraints;
dh::caching_device_vector<bst_float> prediction_cache;
/*! \brief Sum gradient for each node. */
std::vector<GradientPair> host_node_sum_gradients;
dh::caching_device_vector<GradientPair> node_sum_gradients;
bst_uint n_rows;
TrainParam param;
bool deterministic_histogram;
GradientSumT histogram_rounding;
dh::PinnedMemory pinned_memory;
std::vector<hipStream_t> streams{};
common::Monitor monitor;
std::vector<ValueConstraint> node_value_constraints;
common::ColumnSampler column_sampler;
FeatureInteractionConstraintDevice interaction_constraints;
using ExpandQueue =
std::priority_queue<ExpandEntry, std::vector<ExpandEntry>,
std::function<bool(ExpandEntry, ExpandEntry)>>;
std::unique_ptr<ExpandQueue> qexpand;
std::unique_ptr<GradientBasedSampler> sampler;
GPUHistMakerDevice(int _device_id,
EllpackPageImpl* _page,
bst_uint _n_rows,
TrainParam _param,
uint32_t column_sampler_seed,
uint32_t n_features,
bool deterministic_histogram,
BatchParam _batch_param)
: device_id(_device_id),
page(_page),
n_rows(_n_rows),
param(std::move(_param)),
column_sampler(column_sampler_seed),
interaction_constraints(param, n_features),
deterministic_histogram{deterministic_histogram},
batch_param(_batch_param) {
sampler.reset(new GradientBasedSampler(page,
n_rows,
batch_param,
param.subsample,
param.sampling_method));
monitor.Init(std::string("GPUHistMakerDevice") + std::to_string(device_id));
}
void InitHistogram();
~GPUHistMakerDevice() { // NOLINT
dh::safe_cuda(hipSetDevice(device_id));
for (auto& stream : streams) {
dh::safe_cuda(hipStreamDestroy(stream));
}
}
// Get vector of at least n initialised streams
std::vector<hipStream_t>& GetStreams(int n) {
if (n > streams.size()) {
for (auto& stream : streams) {
dh::safe_cuda(hipStreamDestroy(stream));
}
streams.clear();
streams.resize(n);
for (auto& stream : streams) {
dh::safe_cuda(hipStreamCreate(&stream));
}
}
return streams;
}
// Reset values for each update iteration
// Note that the column sampler must be passed by value because it is not
// thread safe
void Reset(HostDeviceVector<GradientPair>* dh_gpair, DMatrix* dmat, int64_t num_columns) {
if (param.grow_policy == TrainParam::kLossGuide) {
qexpand.reset(new ExpandQueue(LossGuide));
} else {
qexpand.reset(new ExpandQueue(DepthWise));
}
this->column_sampler.Init(num_columns, param.colsample_bynode,
param.colsample_bylevel, param.colsample_bytree);
dh::safe_cuda(hipSetDevice(device_id));
this->interaction_constraints.Reset();
std::fill(host_node_sum_gradients.begin(), host_node_sum_gradients.end(),
GradientPair());
auto sample = sampler->Sample(dh_gpair->DeviceSpan(), dmat);
n_rows = sample.sample_rows;
page = sample.page;
gpair = sample.gpair;
if (deterministic_histogram) {
histogram_rounding = CreateRoundingFactor<GradientSumT>(this->gpair);
} else {
histogram_rounding = GradientSumT{0.0, 0.0};
}
row_partitioner.reset(); // Release the device memory first before reallocating
row_partitioner.reset(new RowPartitioner(device_id, n_rows));
hist.Reset();
}
std::vector<DeviceSplitCandidate> EvaluateSplits(
std::vector<int> nidxs, const RegTree& tree,
size_t num_columns) {
auto result_all = pinned_memory.GetSpan<DeviceSplitCandidate>(nidxs.size());
// Work out cub temporary memory requirement
GPUTrainingParam gpu_param(param);
DeviceSplitCandidateReduceOp op(gpu_param);
dh::TemporaryArray<DeviceSplitCandidate> d_result_all(nidxs.size());
dh::TemporaryArray<DeviceSplitCandidate> split_candidates_all(nidxs.size()*num_columns);
auto& streams = this->GetStreams(nidxs.size());
for (auto i = 0ull; i < nidxs.size(); i++) {
auto nidx = nidxs[i];
auto p_feature_set = column_sampler.GetFeatureSet(tree.GetDepth(nidx));
p_feature_set->SetDevice(device_id);
common::Span<bst_feature_t> d_sampled_features =
p_feature_set->DeviceSpan();
common::Span<bst_feature_t> d_feature_set =
interaction_constraints.Query(d_sampled_features, nidx);
common::Span<DeviceSplitCandidate> d_split_candidates(
split_candidates_all.data().get() + i * num_columns,
d_feature_set.size());
DeviceNodeStats node(host_node_sum_gradients[nidx], nidx, param);
common::Span<DeviceSplitCandidate> d_result(d_result_all.data().get() + i, 1);
if (d_feature_set.empty()) {
// Acting as a device side constructor for DeviceSplitCandidate.
// DeviceSplitCandidate::IsValid is false so that ApplySplit can reject this
// candidate.
auto worst_candidate = DeviceSplitCandidate();
dh::safe_cuda(hipMemcpyAsync(d_result.data(), &worst_candidate,
sizeof(DeviceSplitCandidate),
hipMemcpyHostToDevice));
continue;
}
// One block for each feature
uint32_t constexpr kBlockThreads = 256;
dh::LaunchKernel {uint32_t(d_feature_set.size()), kBlockThreads, 0, streams[i]} (
EvaluateSplitKernel<kBlockThreads, GradientSumT>,
hist.GetNodeHistogram(nidx), d_feature_set, node, page->GetDeviceAccessor(device_id),
gpu_param, d_split_candidates, node_value_constraints[nidx],
dh::ToSpan(monotone_constraints));
// Reduce over features to find best feature
size_t cub_bytes = 0;
hipcub::DeviceReduce::Reduce(nullptr,
cub_bytes, d_split_candidates.data(),
d_result.data(), d_split_candidates.size(), op,
DeviceSplitCandidate(), streams[i]);
dh::TemporaryArray<char> cub_temp(cub_bytes);
hipcub::DeviceReduce::Reduce(reinterpret_cast<void*>(cub_temp.data().get()),
cub_bytes, d_split_candidates.data(),
d_result.data(), d_split_candidates.size(), op,
DeviceSplitCandidate(), streams[i]);
}
dh::safe_cuda(hipMemcpy(result_all.data(), d_result_all.data().get(),
sizeof(DeviceSplitCandidate) * d_result_all.size(),
hipMemcpyDeviceToHost));
return std::vector<DeviceSplitCandidate>(result_all.begin(), result_all.end());
}
void BuildHist(int nidx) {
hist.AllocateHistogram(nidx);
auto d_node_hist = hist.GetNodeHistogram(nidx);
auto d_ridx = row_partitioner->GetRows(nidx);
BuildGradientHistogram(page->GetDeviceAccessor(device_id), gpair, d_ridx, d_node_hist,
histogram_rounding);
}
void SubtractionTrick(int nidx_parent, int nidx_histogram,
int nidx_subtraction) {
auto d_node_hist_parent = hist.GetNodeHistogram(nidx_parent);
auto d_node_hist_histogram = hist.GetNodeHistogram(nidx_histogram);
auto d_node_hist_subtraction = hist.GetNodeHistogram(nidx_subtraction);
dh::LaunchN(device_id, page->Cuts().TotalBins(), [=] __device__(size_t idx) {
d_node_hist_subtraction[idx] =
d_node_hist_parent[idx] - d_node_hist_histogram[idx];
});
}
bool CanDoSubtractionTrick(int nidx_parent, int nidx_histogram,
int nidx_subtraction) {
// Make sure histograms are already allocated
hist.AllocateHistogram(nidx_subtraction);
return hist.HistogramExists(nidx_histogram) &&
hist.HistogramExists(nidx_parent);
}
void UpdatePosition(int nidx, RegTree::Node split_node) {
auto d_matrix = page->GetDeviceAccessor(device_id);
row_partitioner->UpdatePosition(
nidx, split_node.LeftChild(), split_node.RightChild(),
[=] __device__(bst_uint ridx) {
// given a row index, returns the node id it belongs to
bst_float cut_value =
d_matrix.GetFvalue(ridx, split_node.SplitIndex());
// Missing value
int new_position = 0;
if (isnan(cut_value)) {
new_position = split_node.DefaultChild();
} else {
if (cut_value <= split_node.SplitCond()) {
new_position = split_node.LeftChild();
} else {
new_position = split_node.RightChild();
}
}
return new_position;
});
}
// After tree update is finished, update the position of all training
// instances to their final leaf. This information is used later to update the
// prediction cache
void FinalisePosition(RegTree const* p_tree, DMatrix* p_fmat) {
dh::TemporaryArray<RegTree::Node> d_nodes(p_tree->GetNodes().size());
dh::safe_cuda(hipMemcpy(d_nodes.data().get(), p_tree->GetNodes().data(),
d_nodes.size() * sizeof(RegTree::Node),
hipMemcpyHostToDevice));
if (row_partitioner->GetRows().size() != p_fmat->Info().num_row_) {
row_partitioner.reset(); // Release the device memory first before reallocating
row_partitioner.reset(new RowPartitioner(device_id, p_fmat->Info().num_row_));
}
if (page->n_rows == p_fmat->Info().num_row_) {
FinalisePositionInPage(page, dh::ToSpan(d_nodes));
} else {
for (auto& batch : p_fmat->GetBatches<EllpackPage>(batch_param)) {
FinalisePositionInPage(batch.Impl(), dh::ToSpan(d_nodes));
}
}
}
void FinalisePositionInPage(EllpackPageImpl* page, const common::Span<RegTree::Node> d_nodes) {
auto d_matrix = page->GetDeviceAccessor(device_id);
row_partitioner->FinalisePosition(
[=] __device__(size_t row_id, int position) {
if (!d_matrix.IsInRange(row_id)) {
return RowPartitioner::kIgnoredTreePosition;
}
auto node = d_nodes[position];
while (!node.IsLeaf()) {
bst_float element = d_matrix.GetFvalue(row_id, node.SplitIndex());
// Missing value
if (isnan(element)) {
position = node.DefaultChild();
} else {
if (element <= node.SplitCond()) {
position = node.LeftChild();
} else {
position = node.RightChild();
}
}
node = d_nodes[position];
}
return position;
});
}
void UpdatePredictionCache(bst_float* out_preds_d) {
dh::safe_cuda(hipSetDevice(device_id));
auto d_ridx = row_partitioner->GetRows();
if (prediction_cache.size() != d_ridx.size()) {
prediction_cache.resize(d_ridx.size());
dh::safe_cuda(hipMemcpyAsync(prediction_cache.data().get(), out_preds_d,
prediction_cache.size() * sizeof(bst_float),
hipMemcpyDefault));
}
CalcWeightTrainParam param_d(param);
dh::safe_cuda(
hipMemcpyAsync(node_sum_gradients.data().get(), host_node_sum_gradients.data(),
sizeof(GradientPair) * host_node_sum_gradients.size(),
hipMemcpyHostToDevice));
auto d_position = row_partitioner->GetPosition();
auto d_node_sum_gradients = node_sum_gradients.data().get();
auto d_prediction_cache = prediction_cache.data().get();
dh::LaunchN(
device_id, prediction_cache.size(), [=] __device__(int local_idx) {
int pos = d_position[local_idx];
bst_float weight = CalcWeight(param_d, d_node_sum_gradients[pos]);
d_prediction_cache[d_ridx[local_idx]] +=
weight * param_d.learning_rate;
});
dh::safe_cuda(hipMemcpy(
out_preds_d, prediction_cache.data().get(),
prediction_cache.size() * sizeof(bst_float), hipMemcpyDefault));
row_partitioner.reset();
}
void AllReduceHist(int nidx, dh::AllReducer* reducer) {
monitor.StartCuda("AllReduce");
auto d_node_hist = hist.GetNodeHistogram(nidx).data();
reducer->AllReduceSum(
reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist),
reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist),
page->Cuts().TotalBins() * (sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT)));
reducer->Synchronize();
monitor.StopCuda("AllReduce");
}
/**
* \brief Build GPU local histograms for the left and right child of some parent node
*/
void BuildHistLeftRight(const ExpandEntry &candidate, int nidx_left,
int nidx_right, dh::AllReducer* reducer) {
auto build_hist_nidx = nidx_left;
auto subtraction_trick_nidx = nidx_right;
// Decide whether to build the left histogram or right histogram
// Use sum of Hessian as a heuristic to select node with fewest training instances
bool fewer_right = candidate.split.right_sum.GetHess() < candidate.split.left_sum.GetHess();
if (fewer_right) {
std::swap(build_hist_nidx, subtraction_trick_nidx);
}
this->BuildHist(build_hist_nidx);
this->AllReduceHist(build_hist_nidx, reducer);
// Check whether we can use the subtraction trick to calculate the other
bool do_subtraction_trick = this->CanDoSubtractionTrick(
candidate.nid, build_hist_nidx, subtraction_trick_nidx);
if (do_subtraction_trick) {
// Calculate other histogram using subtraction trick
this->SubtractionTrick(candidate.nid, build_hist_nidx,
subtraction_trick_nidx);
} else {
// Calculate other histogram manually
this->BuildHist(subtraction_trick_nidx);
this->AllReduceHist(subtraction_trick_nidx, reducer);
}
}
void ApplySplit(const ExpandEntry& candidate, RegTree* p_tree) {
RegTree& tree = *p_tree;
GradStats left_stats{};
left_stats.Add(candidate.split.left_sum);
GradStats right_stats{};
right_stats.Add(candidate.split.right_sum);
GradStats parent_sum{};
parent_sum.Add(left_stats);
parent_sum.Add(right_stats);
node_value_constraints.resize(tree.GetNodes().size());
auto base_weight = node_value_constraints[candidate.nid].CalcWeight(param, parent_sum);
auto left_weight =
node_value_constraints[candidate.nid].CalcWeight(param, left_stats)*param.learning_rate;
auto right_weight =
node_value_constraints[candidate.nid].CalcWeight(param, right_stats)*param.learning_rate;
tree.ExpandNode(candidate.nid, candidate.split.findex,
candidate.split.fvalue, candidate.split.dir == kLeftDir,
base_weight, left_weight, right_weight,
candidate.split.loss_chg, parent_sum.sum_hess,
left_stats.GetHess(), right_stats.GetHess());
// Set up child constraints
node_value_constraints.resize(tree.GetNodes().size());
node_value_constraints[candidate.nid].SetChild(
param, tree[candidate.nid].SplitIndex(), left_stats, right_stats,
&node_value_constraints[tree[candidate.nid].LeftChild()],
&node_value_constraints[tree[candidate.nid].RightChild()]);
host_node_sum_gradients[tree[candidate.nid].LeftChild()] =
candidate.split.left_sum;
host_node_sum_gradients[tree[candidate.nid].RightChild()] =
candidate.split.right_sum;
interaction_constraints.Split(candidate.nid, tree[candidate.nid].SplitIndex(),
tree[candidate.nid].LeftChild(),
tree[candidate.nid].RightChild());
}
void InitRoot(RegTree* p_tree, dh::AllReducer* reducer, int64_t num_columns) {
constexpr bst_node_t kRootNIdx = 0;
dh::XGBCachingDeviceAllocator<char> alloc;
GradientPair root_sum = thrust::reduce(
thrust::hip::par(alloc),
thrust::device_ptr<GradientPair const>(gpair.data()),
thrust::device_ptr<GradientPair const>(gpair.data() + gpair.size()));
dh::safe_cuda(hipMemcpyAsync(node_sum_gradients.data().get(), &root_sum, sizeof(root_sum),
hipMemcpyHostToDevice));
reducer->AllReduceSum(
reinterpret_cast<float*>(node_sum_gradients.data().get()),
reinterpret_cast<float*>(node_sum_gradients.data().get()), 2);
reducer->Synchronize();
dh::safe_cuda(hipMemcpyAsync(host_node_sum_gradients.data(),
node_sum_gradients.data().get(), sizeof(GradientPair),
hipMemcpyDeviceToHost));
this->BuildHist(kRootNIdx);
this->AllReduceHist(kRootNIdx, reducer);
// Remember root stats
p_tree->Stat(kRootNIdx).sum_hess = host_node_sum_gradients[kRootNIdx].GetHess();
auto weight = CalcWeight(param, host_node_sum_gradients[kRootNIdx]);
p_tree->Stat(kRootNIdx).base_weight = weight;
(*p_tree)[kRootNIdx].SetLeaf(param.learning_rate * weight);
// Initialise root constraint
node_value_constraints.resize(p_tree->GetNodes().size());
// Generate first split
auto split = this->EvaluateSplits({kRootNIdx}, *p_tree, num_columns);
qexpand->push(
ExpandEntry(kRootNIdx, p_tree->GetDepth(kRootNIdx), split.at(0), 0));
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair_all, DMatrix* p_fmat,
RegTree* p_tree, dh::AllReducer* reducer) {
auto& tree = *p_tree;
monitor.StartCuda("Reset");
this->Reset(gpair_all, p_fmat, p_fmat->Info().num_col_);
monitor.StopCuda("Reset");
monitor.StartCuda("InitRoot");
this->InitRoot(p_tree, reducer, p_fmat->Info().num_col_);
monitor.StopCuda("InitRoot");
auto timestamp = qexpand->size();
auto num_leaves = 1;
while (!qexpand->empty()) {
ExpandEntry candidate = qexpand->top();
qexpand->pop();
if (!candidate.IsValid(param, num_leaves)) {
continue;
}
this->ApplySplit(candidate, p_tree);
num_leaves++;
int left_child_nidx = tree[candidate.nid].LeftChild();
int right_child_nidx = tree[candidate.nid].RightChild();
// Only create child entries if needed
if (ExpandEntry::ChildIsValid(param, tree.GetDepth(left_child_nidx),
num_leaves)) {
monitor.StartCuda("UpdatePosition");
this->UpdatePosition(candidate.nid, (*p_tree)[candidate.nid]);
monitor.StopCuda("UpdatePosition");
monitor.StartCuda("BuildHist");
this->BuildHistLeftRight(candidate, left_child_nidx, right_child_nidx, reducer);
monitor.StopCuda("BuildHist");
monitor.StartCuda("EvaluateSplits");
auto splits = this->EvaluateSplits({left_child_nidx, right_child_nidx},
*p_tree, p_fmat->Info().num_col_);
monitor.StopCuda("EvaluateSplits");
qexpand->push(ExpandEntry(left_child_nidx,
tree.GetDepth(left_child_nidx), splits.at(0),
timestamp++));
qexpand->push(ExpandEntry(right_child_nidx,
tree.GetDepth(right_child_nidx),
splits.at(1), timestamp++));
}
}
monitor.StartCuda("FinalisePosition");
this->FinalisePosition(p_tree, p_fmat);
monitor.StopCuda("FinalisePosition");
}
};
template <typename GradientSumT>
inline void GPUHistMakerDevice<GradientSumT>::InitHistogram() {
if (!param.monotone_constraints.empty()) {
// Copy assigning an empty vector causes an exception in MSVC debug builds
monotone_constraints = param.monotone_constraints;
}
host_node_sum_gradients.resize(param.MaxNodes());
node_sum_gradients.resize(param.MaxNodes());
// Init histogram
hist.Init(device_id, page->Cuts().TotalBins());
}
template <typename GradientSumT>
class GPUHistMakerSpecialised {
public:
GPUHistMakerSpecialised() = default;
void Configure(const Args& args, GenericParameter const* generic_param) {
param_.UpdateAllowUnknown(args);
generic_param_ = generic_param;
hist_maker_param_.UpdateAllowUnknown(args);
dh::CheckComputeCapability();
monitor_.Init("updater_gpu_hist");
}
~GPUHistMakerSpecialised() { // NOLINT
dh::GlobalMemoryLogger().Log();
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
const std::vector<RegTree*>& trees) {
monitor_.StartCuda("Update");
// rescale learning rate according to size of trees
float lr = param_.learning_rate;
param_.learning_rate = lr / trees.size();
ValueConstraint::Init(¶m_, dmat->Info().num_col_);
// build tree
try {
for (xgboost::RegTree* tree : trees) {
this->UpdateTree(gpair, dmat, tree);
if (hist_maker_param_.debug_synchronize) {
this->CheckTreesSynchronized(tree);
}
}
dh::safe_cuda(hipGetLastError());
} catch (const std::exception& e) {
LOG(FATAL) << "Exception in gpu_hist: " << e.what() << std::endl;
}
param_.learning_rate = lr;
monitor_.StopCuda("Update");
}
void InitDataOnce(DMatrix* dmat) {
device_ = generic_param_->gpu_id;
CHECK_GE(device_, 0) << "Must have at least one device";
info_ = &dmat->Info();
reducer_.Init({device_}); // NOLINT
// Synchronise the column sampling seed
uint32_t column_sampling_seed = common::GlobalRandom()();
rabit::Broadcast(&column_sampling_seed, sizeof(column_sampling_seed), 0);
BatchParam batch_param{
device_,
param_.max_bin,
generic_param_->gpu_page_size
};
auto page = (*dmat->GetBatches<EllpackPage>(batch_param).begin()).Impl();
dh::safe_cuda(hipSetDevice(device_));
maker.reset(new GPUHistMakerDevice<GradientSumT>(device_,
page,
info_->num_row_,
param_,
column_sampling_seed,
info_->num_col_,
hist_maker_param_.deterministic_histogram,
batch_param));
monitor_.StartCuda("InitHistogram");
dh::safe_cuda(hipSetDevice(device_));
maker->InitHistogram();
monitor_.StopCuda("InitHistogram");
p_last_fmat_ = dmat;
initialised_ = true;
}
void InitData(DMatrix* dmat) {
if (!initialised_) {
monitor_.StartCuda("InitDataOnce");
this->InitDataOnce(dmat);
monitor_.StopCuda("InitDataOnce");
}
}
// Only call this method for testing
void CheckTreesSynchronized(RegTree* local_tree) const {
std::string s_model;
common::MemoryBufferStream fs(&s_model);
int rank = rabit::GetRank();
if (rank == 0) {
local_tree->Save(&fs);
}
fs.Seek(0);
rabit::Broadcast(&s_model, 0);
RegTree reference_tree {}; // rank 0 tree
reference_tree.Load(&fs);
CHECK(*local_tree == reference_tree);
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair, DMatrix* p_fmat,
RegTree* p_tree) {
monitor_.StartCuda("InitData");
this->InitData(p_fmat);
monitor_.StopCuda("InitData");
gpair->SetDevice(device_);
maker->UpdateTree(gpair, p_fmat, p_tree, &reducer_);
}
bool UpdatePredictionCache(const DMatrix* data, HostDeviceVector<bst_float>* p_out_preds) {
if (maker == nullptr || p_last_fmat_ == nullptr || p_last_fmat_ != data) {
return false;
}
monitor_.StartCuda("UpdatePredictionCache");
p_out_preds->SetDevice(device_);
maker->UpdatePredictionCache(p_out_preds->DevicePointer());
monitor_.StopCuda("UpdatePredictionCache");
return true;
}
TrainParam param_; // NOLINT
MetaInfo* info_{}; // NOLINT
std::unique_ptr<GPUHistMakerDevice<GradientSumT>> maker; // NOLINT
private:
bool initialised_ { false };
GPUHistMakerTrainParam hist_maker_param_;
GenericParameter const* generic_param_;
dh::AllReducer reducer_;
DMatrix* p_last_fmat_ { nullptr };
int device_{-1};
common::Monitor monitor_;
};
class GPUHistMaker : public TreeUpdater {
public:
void Configure(const Args& args) override {
// Used in test to count how many configurations are performed
LOG(DEBUG) << "[GPU Hist]: Configure";
hist_maker_param_.UpdateAllowUnknown(args);
// The passed in args can be empty, if we simply purge the old maker without
// preserving parameters then we can't do Update on it.
TrainParam param;
if (float_maker_) {
param = float_maker_->param_;
} else if (double_maker_) {
param = double_maker_->param_;
}
if (hist_maker_param_.single_precision_histogram) {
float_maker_.reset(new GPUHistMakerSpecialised<GradientPair>());
float_maker_->param_ = param;
float_maker_->Configure(args, tparam_);
} else {
double_maker_.reset(new GPUHistMakerSpecialised<GradientPairPrecise>());
double_maker_->param_ = param;
double_maker_->Configure(args, tparam_);
}
}
void LoadConfig(Json const& in) override {
auto const& config = get<Object const>(in);
FromJson(config.at("gpu_hist_train_param"), &this->hist_maker_param_);
if (hist_maker_param_.single_precision_histogram) {
float_maker_.reset(new GPUHistMakerSpecialised<GradientPair>());
FromJson(config.at("train_param"), &float_maker_->param_);
} else {
double_maker_.reset(new GPUHistMakerSpecialised<GradientPairPrecise>());
FromJson(config.at("train_param"), &double_maker_->param_);
}
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["gpu_hist_train_param"] = ToJson(hist_maker_param_);
if (hist_maker_param_.single_precision_histogram) {
out["train_param"] = ToJson(float_maker_->param_);
} else {
out["train_param"] = ToJson(double_maker_->param_);
}
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
const std::vector<RegTree*>& trees) override {
if (hist_maker_param_.single_precision_histogram) {
float_maker_->Update(gpair, dmat, trees);
} else {
double_maker_->Update(gpair, dmat, trees);
}
}
bool UpdatePredictionCache(
const DMatrix* data, HostDeviceVector<bst_float>* p_out_preds) override {
if (hist_maker_param_.single_precision_histogram) {
return float_maker_->UpdatePredictionCache(data, p_out_preds);
} else {
return double_maker_->UpdatePredictionCache(data, p_out_preds);
}
}
char const* Name() const override {
return "grow_gpu_hist";
}
private:
GPUHistMakerTrainParam hist_maker_param_;
std::unique_ptr<GPUHistMakerSpecialised<GradientPair>> float_maker_;
std::unique_ptr<GPUHistMakerSpecialised<GradientPairPrecise>> double_maker_;
};
#if !defined(GTEST_TEST)
XGBOOST_REGISTER_TREE_UPDATER(GPUHistMaker, "grow_gpu_hist")
.describe("Grow tree with GPU.")
.set_body([]() { return new GPUHistMaker(); });
#endif // !defined(GTEST_TEST)
} // namespace tree
} // namespace xgboost
| 32d67750a8c664e2050cf41b606d16902f8eea65.cu | /*!
* Copyright 2017-2020 XGBoost contributors
*/
#include <thrust/copy.h>
#include <thrust/reduce.h>
#include <xgboost/tree_updater.h>
#include <algorithm>
#include <cmath>
#include <memory>
#include <limits>
#include <queue>
#include <utility>
#include <vector>
#include "xgboost/host_device_vector.h"
#include "xgboost/parameter.h"
#include "xgboost/span.h"
#include "xgboost/json.h"
#include "../common/device_helpers.cuh"
#include "../common/hist_util.h"
#include "../common/timer.h"
#include "../data/ellpack_page.cuh"
#include "param.h"
#include "updater_gpu_common.cuh"
#include "constraints.cuh"
#include "gpu_hist/gradient_based_sampler.cuh"
#include "gpu_hist/row_partitioner.cuh"
#include "gpu_hist/histogram.cuh"
namespace xgboost {
namespace tree {
#if !defined(GTEST_TEST)
DMLC_REGISTRY_FILE_TAG(updater_gpu_hist);
#endif // !defined(GTEST_TEST)
// training parameters specific to this algorithm
struct GPUHistMakerTrainParam
: public XGBoostParameter<GPUHistMakerTrainParam> {
bool single_precision_histogram;
bool deterministic_histogram;
bool debug_synchronize;
// declare parameters
DMLC_DECLARE_PARAMETER(GPUHistMakerTrainParam) {
DMLC_DECLARE_FIELD(single_precision_histogram).set_default(false).describe(
"Use single precision to build histograms.");
DMLC_DECLARE_FIELD(deterministic_histogram).set_default(true).describe(
"Pre-round the gradient for obtaining deterministic gradient histogram.");
DMLC_DECLARE_FIELD(debug_synchronize).set_default(false).describe(
"Check if all distributed tree are identical after tree construction.");
}
};
#if !defined(GTEST_TEST)
DMLC_REGISTER_PARAMETER(GPUHistMakerTrainParam);
#endif // !defined(GTEST_TEST)
struct ExpandEntry {
int nid;
int depth;
DeviceSplitCandidate split;
uint64_t timestamp;
ExpandEntry() = default;
ExpandEntry(int nid, int depth, DeviceSplitCandidate split,
uint64_t timestamp)
: nid(nid), depth(depth), split(std::move(split)), timestamp(timestamp) {}
bool IsValid(const TrainParam& param, int num_leaves) const {
if (split.loss_chg <= kRtEps) return false;
if (split.left_sum.GetHess() == 0 || split.right_sum.GetHess() == 0) {
return false;
}
if (split.loss_chg < param.min_split_loss) { return false; }
if (param.max_depth > 0 && depth == param.max_depth) {return false; }
if (param.max_leaves > 0 && num_leaves == param.max_leaves) { return false; }
return true;
}
static bool ChildIsValid(const TrainParam& param, int depth, int num_leaves) {
if (param.max_depth > 0 && depth >= param.max_depth) return false;
if (param.max_leaves > 0 && num_leaves >= param.max_leaves) return false;
return true;
}
friend std::ostream& operator<<(std::ostream& os, const ExpandEntry& e) {
os << "ExpandEntry: \n";
os << "nidx: " << e.nid << "\n";
os << "depth: " << e.depth << "\n";
os << "loss: " << e.split.loss_chg << "\n";
os << "left_sum: " << e.split.left_sum << "\n";
os << "right_sum: " << e.split.right_sum << "\n";
return os;
}
};
inline static bool DepthWise(const ExpandEntry& lhs, const ExpandEntry& rhs) {
if (lhs.depth == rhs.depth) {
return lhs.timestamp > rhs.timestamp; // favor small timestamp
} else {
return lhs.depth > rhs.depth; // favor small depth
}
}
inline static bool LossGuide(const ExpandEntry& lhs, const ExpandEntry& rhs) {
if (lhs.split.loss_chg == rhs.split.loss_chg) {
return lhs.timestamp > rhs.timestamp; // favor small timestamp
} else {
return lhs.split.loss_chg < rhs.split.loss_chg; // favor large loss_chg
}
}
// With constraints
template <typename GradientPairT>
XGBOOST_DEVICE float inline LossChangeMissing(
const GradientPairT& scan, const GradientPairT& missing, const GradientPairT& parent_sum,
const float& parent_gain, const GPUTrainingParam& param, int constraint,
const ValueConstraint& value_constraint,
bool& missing_left_out) { // NOLINT
float missing_left_gain = value_constraint.CalcSplitGain(
param, constraint, GradStats(scan + missing),
GradStats(parent_sum - (scan + missing)));
float missing_right_gain = value_constraint.CalcSplitGain(
param, constraint, GradStats(scan), GradStats(parent_sum - scan));
if (missing_left_gain >= missing_right_gain) {
missing_left_out = true;
return missing_left_gain - parent_gain;
} else {
missing_left_out = false;
return missing_right_gain - parent_gain;
}
}
/*!
* \brief
*
* \tparam ReduceT BlockReduce Type.
* \tparam TempStorage Cub Shared memory
*
* \param begin
* \param end
* \param temp_storage Shared memory for intermediate result.
*/
template <int BLOCK_THREADS, typename ReduceT, typename TempStorageT, typename GradientSumT>
__device__ GradientSumT ReduceFeature(common::Span<const GradientSumT> feature_histogram,
TempStorageT* temp_storage) {
__shared__ cub::Uninitialized<GradientSumT> uninitialized_sum;
GradientSumT& shared_sum = uninitialized_sum.Alias();
GradientSumT local_sum = GradientSumT();
// For loop sums features into one block size
auto begin = feature_histogram.data();
auto end = begin + feature_histogram.size();
for (auto itr = begin; itr < end; itr += BLOCK_THREADS) {
bool thread_active = itr + threadIdx.x < end;
// Scan histogram
GradientSumT bin = thread_active ? *(itr + threadIdx.x) : GradientSumT();
local_sum += bin;
}
local_sum = ReduceT(temp_storage->sum_reduce).Reduce(local_sum, cub::Sum());
// Reduction result is stored in thread 0.
if (threadIdx.x == 0) {
shared_sum = local_sum;
}
__syncthreads();
return shared_sum;
}
/*! \brief Find the thread with best gain. */
template <int BLOCK_THREADS, typename ReduceT, typename ScanT,
typename MaxReduceT, typename TempStorageT, typename GradientSumT>
__device__ void EvaluateFeature(
int fidx, common::Span<const GradientSumT> node_histogram,
const EllpackDeviceAccessor& matrix,
DeviceSplitCandidate* best_split, // shared memory storing best split
const DeviceNodeStats& node, const GPUTrainingParam& param,
TempStorageT* temp_storage, // temp memory for cub operations
int constraint, // monotonic_constraints
const ValueConstraint& value_constraint) {
// Use pointer from cut to indicate begin and end of bins for each feature.
uint32_t gidx_begin = matrix.feature_segments[fidx]; // begining bin
uint32_t gidx_end = matrix.feature_segments[fidx + 1]; // end bin for i^th feature
// Sum histogram bins for current feature
GradientSumT const feature_sum = ReduceFeature<BLOCK_THREADS, ReduceT>(
node_histogram.subspan(gidx_begin, gidx_end - gidx_begin), temp_storage);
GradientSumT const parent_sum = GradientSumT(node.sum_gradients);
GradientSumT const missing = parent_sum - feature_sum;
float const null_gain = -std::numeric_limits<bst_float>::infinity();
SumCallbackOp<GradientSumT> prefix_op =
SumCallbackOp<GradientSumT>();
for (int scan_begin = gidx_begin; scan_begin < gidx_end;
scan_begin += BLOCK_THREADS) {
bool thread_active = (scan_begin + threadIdx.x) < gidx_end;
// Gradient value for current bin.
GradientSumT bin =
thread_active ? node_histogram[scan_begin + threadIdx.x] : GradientSumT();
ScanT(temp_storage->scan).ExclusiveScan(bin, bin, cub::Sum(), prefix_op);
// Whether the gradient of missing values is put to the left side.
bool missing_left = true;
float gain = null_gain;
if (thread_active) {
gain = LossChangeMissing(bin, missing, parent_sum, node.root_gain, param,
constraint, value_constraint, missing_left);
}
__syncthreads();
// Find thread with best gain
cub::KeyValuePair<int, float> tuple(threadIdx.x, gain);
cub::KeyValuePair<int, float> best =
MaxReduceT(temp_storage->max_reduce).Reduce(tuple, cub::ArgMax());
__shared__ cub::KeyValuePair<int, float> block_max;
if (threadIdx.x == 0) {
block_max = best;
}
__syncthreads();
// Best thread updates split
if (threadIdx.x == block_max.key) {
int split_gidx = (scan_begin + threadIdx.x) - 1;
float fvalue;
if (split_gidx < static_cast<int>(gidx_begin)) {
fvalue = matrix.min_fvalue[fidx];
} else {
fvalue = matrix.gidx_fvalue_map[split_gidx];
}
GradientSumT left = missing_left ? bin + missing : bin;
GradientSumT right = parent_sum - left;
best_split->Update(gain, missing_left ? kLeftDir : kRightDir, fvalue,
fidx, GradientPair(left), GradientPair(right), param);
}
__syncthreads();
}
}
template <int BLOCK_THREADS, typename GradientSumT>
__global__ void EvaluateSplitKernel(
common::Span<const GradientSumT> node_histogram, // histogram for gradients
common::Span<const bst_feature_t> feature_set, // Selected features
DeviceNodeStats node,
xgboost::EllpackDeviceAccessor matrix,
GPUTrainingParam gpu_param,
common::Span<DeviceSplitCandidate> split_candidates, // resulting split
ValueConstraint value_constraint,
common::Span<int> d_monotonic_constraints) {
// KeyValuePair here used as threadIdx.x -> gain_value
using ArgMaxT = cub::KeyValuePair<int, float>;
using BlockScanT =
cub::BlockScan<GradientSumT, BLOCK_THREADS, cub::BLOCK_SCAN_WARP_SCANS>;
using MaxReduceT = cub::BlockReduce<ArgMaxT, BLOCK_THREADS>;
using SumReduceT = cub::BlockReduce<GradientSumT, BLOCK_THREADS>;
union TempStorage {
typename BlockScanT::TempStorage scan;
typename MaxReduceT::TempStorage max_reduce;
typename SumReduceT::TempStorage sum_reduce;
};
// Aligned && shared storage for best_split
__shared__ cub::Uninitialized<DeviceSplitCandidate> uninitialized_split;
DeviceSplitCandidate& best_split = uninitialized_split.Alias();
__shared__ TempStorage temp_storage;
if (threadIdx.x == 0) {
best_split = DeviceSplitCandidate();
}
__syncthreads();
// One block for each feature. Features are sampled, so fidx != blockIdx.x
int fidx = feature_set[blockIdx.x];
int constraint = d_monotonic_constraints[fidx];
EvaluateFeature<BLOCK_THREADS, SumReduceT, BlockScanT, MaxReduceT>(
fidx, node_histogram, matrix, &best_split, node, gpu_param, &temp_storage,
constraint, value_constraint);
__syncthreads();
if (threadIdx.x == 0) {
// Record best loss for each feature
split_candidates[blockIdx.x] = best_split;
}
}
/**
* \struct DeviceHistogram
*
* \summary Data storage for node histograms on device. Automatically expands.
*
* \tparam GradientSumT histogram entry type.
* \tparam kStopGrowingSize Do not grow beyond this size
*
* \author Rory
* \date 28/07/2018
*/
template <typename GradientSumT, size_t kStopGrowingSize = 1 << 26>
class DeviceHistogram {
private:
/*! \brief Map nidx to starting index of its histogram. */
std::map<int, size_t> nidx_map_;
dh::device_vector<typename GradientSumT::ValueT> data_;
int n_bins_;
int device_id_;
static constexpr size_t kNumItemsInGradientSum =
sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT);
static_assert(kNumItemsInGradientSum == 2,
"Number of items in gradient type should be 2.");
public:
void Init(int device_id, int n_bins) {
this->n_bins_ = n_bins;
this->device_id_ = device_id;
}
void Reset() {
auto d_data = data_.data().get();
dh::LaunchN(device_id_, data_.size(),
[=] __device__(size_t idx) { d_data[idx] = 0.0f; });
nidx_map_.clear();
}
bool HistogramExists(int nidx) const {
return nidx_map_.find(nidx) != nidx_map_.cend();
}
int Bins() const {
return n_bins_;
}
size_t HistogramSize() const {
return n_bins_ * kNumItemsInGradientSum;
}
dh::device_vector<typename GradientSumT::ValueT>& Data() {
return data_;
}
void AllocateHistogram(int nidx) {
if (HistogramExists(nidx)) return;
// Number of items currently used in data
const size_t used_size = nidx_map_.size() * HistogramSize();
const size_t new_used_size = used_size + HistogramSize();
if (data_.size() >= kStopGrowingSize) {
// Recycle histogram memory
if (new_used_size <= data_.size()) {
// no need to remove old node, just insert the new one.
nidx_map_[nidx] = used_size;
// memset histogram size in bytes
} else {
std::pair<int, size_t> old_entry = *nidx_map_.begin();
nidx_map_.erase(old_entry.first);
nidx_map_[nidx] = old_entry.second;
}
// Zero recycled memory
auto d_data = data_.data().get() + nidx_map_[nidx];
dh::LaunchN(device_id_, n_bins_ * 2,
[=] __device__(size_t idx) { d_data[idx] = 0.0f; });
} else {
// Append new node histogram
nidx_map_[nidx] = used_size;
// Check there is enough memory for another histogram node
if (data_.size() < new_used_size + HistogramSize()) {
size_t new_required_memory =
std::max(data_.size() * 2, HistogramSize());
data_.resize(new_required_memory);
}
}
CHECK_GE(data_.size(), nidx_map_.size() * HistogramSize());
}
/**
* \summary Return pointer to histogram memory for a given node.
* \param nidx Tree node index.
* \return hist pointer.
*/
common::Span<GradientSumT> GetNodeHistogram(int nidx) {
CHECK(this->HistogramExists(nidx));
auto ptr = data_.data().get() + nidx_map_[nidx];
return common::Span<GradientSumT>(
reinterpret_cast<GradientSumT*>(ptr), n_bins_);
}
};
struct CalcWeightTrainParam {
float min_child_weight;
float reg_alpha;
float reg_lambda;
float max_delta_step;
float learning_rate;
XGBOOST_DEVICE explicit CalcWeightTrainParam(const TrainParam& p)
: min_child_weight(p.min_child_weight),
reg_alpha(p.reg_alpha),
reg_lambda(p.reg_lambda),
max_delta_step(p.max_delta_step),
learning_rate(p.learning_rate) {}
};
// Manage memory for a single GPU
template <typename GradientSumT>
struct GPUHistMakerDevice {
int device_id;
EllpackPageImpl* page;
BatchParam batch_param;
std::unique_ptr<RowPartitioner> row_partitioner;
DeviceHistogram<GradientSumT> hist{};
/*! \brief Gradient pair for each row. */
common::Span<GradientPair> gpair;
dh::caching_device_vector<int> monotone_constraints;
dh::caching_device_vector<bst_float> prediction_cache;
/*! \brief Sum gradient for each node. */
std::vector<GradientPair> host_node_sum_gradients;
dh::caching_device_vector<GradientPair> node_sum_gradients;
bst_uint n_rows;
TrainParam param;
bool deterministic_histogram;
GradientSumT histogram_rounding;
dh::PinnedMemory pinned_memory;
std::vector<cudaStream_t> streams{};
common::Monitor monitor;
std::vector<ValueConstraint> node_value_constraints;
common::ColumnSampler column_sampler;
FeatureInteractionConstraintDevice interaction_constraints;
using ExpandQueue =
std::priority_queue<ExpandEntry, std::vector<ExpandEntry>,
std::function<bool(ExpandEntry, ExpandEntry)>>;
std::unique_ptr<ExpandQueue> qexpand;
std::unique_ptr<GradientBasedSampler> sampler;
GPUHistMakerDevice(int _device_id,
EllpackPageImpl* _page,
bst_uint _n_rows,
TrainParam _param,
uint32_t column_sampler_seed,
uint32_t n_features,
bool deterministic_histogram,
BatchParam _batch_param)
: device_id(_device_id),
page(_page),
n_rows(_n_rows),
param(std::move(_param)),
column_sampler(column_sampler_seed),
interaction_constraints(param, n_features),
deterministic_histogram{deterministic_histogram},
batch_param(_batch_param) {
sampler.reset(new GradientBasedSampler(page,
n_rows,
batch_param,
param.subsample,
param.sampling_method));
monitor.Init(std::string("GPUHistMakerDevice") + std::to_string(device_id));
}
void InitHistogram();
~GPUHistMakerDevice() { // NOLINT
dh::safe_cuda(cudaSetDevice(device_id));
for (auto& stream : streams) {
dh::safe_cuda(cudaStreamDestroy(stream));
}
}
// Get vector of at least n initialised streams
std::vector<cudaStream_t>& GetStreams(int n) {
if (n > streams.size()) {
for (auto& stream : streams) {
dh::safe_cuda(cudaStreamDestroy(stream));
}
streams.clear();
streams.resize(n);
for (auto& stream : streams) {
dh::safe_cuda(cudaStreamCreate(&stream));
}
}
return streams;
}
// Reset values for each update iteration
// Note that the column sampler must be passed by value because it is not
// thread safe
void Reset(HostDeviceVector<GradientPair>* dh_gpair, DMatrix* dmat, int64_t num_columns) {
if (param.grow_policy == TrainParam::kLossGuide) {
qexpand.reset(new ExpandQueue(LossGuide));
} else {
qexpand.reset(new ExpandQueue(DepthWise));
}
this->column_sampler.Init(num_columns, param.colsample_bynode,
param.colsample_bylevel, param.colsample_bytree);
dh::safe_cuda(cudaSetDevice(device_id));
this->interaction_constraints.Reset();
std::fill(host_node_sum_gradients.begin(), host_node_sum_gradients.end(),
GradientPair());
auto sample = sampler->Sample(dh_gpair->DeviceSpan(), dmat);
n_rows = sample.sample_rows;
page = sample.page;
gpair = sample.gpair;
if (deterministic_histogram) {
histogram_rounding = CreateRoundingFactor<GradientSumT>(this->gpair);
} else {
histogram_rounding = GradientSumT{0.0, 0.0};
}
row_partitioner.reset(); // Release the device memory first before reallocating
row_partitioner.reset(new RowPartitioner(device_id, n_rows));
hist.Reset();
}
std::vector<DeviceSplitCandidate> EvaluateSplits(
std::vector<int> nidxs, const RegTree& tree,
size_t num_columns) {
auto result_all = pinned_memory.GetSpan<DeviceSplitCandidate>(nidxs.size());
// Work out cub temporary memory requirement
GPUTrainingParam gpu_param(param);
DeviceSplitCandidateReduceOp op(gpu_param);
dh::TemporaryArray<DeviceSplitCandidate> d_result_all(nidxs.size());
dh::TemporaryArray<DeviceSplitCandidate> split_candidates_all(nidxs.size()*num_columns);
auto& streams = this->GetStreams(nidxs.size());
for (auto i = 0ull; i < nidxs.size(); i++) {
auto nidx = nidxs[i];
auto p_feature_set = column_sampler.GetFeatureSet(tree.GetDepth(nidx));
p_feature_set->SetDevice(device_id);
common::Span<bst_feature_t> d_sampled_features =
p_feature_set->DeviceSpan();
common::Span<bst_feature_t> d_feature_set =
interaction_constraints.Query(d_sampled_features, nidx);
common::Span<DeviceSplitCandidate> d_split_candidates(
split_candidates_all.data().get() + i * num_columns,
d_feature_set.size());
DeviceNodeStats node(host_node_sum_gradients[nidx], nidx, param);
common::Span<DeviceSplitCandidate> d_result(d_result_all.data().get() + i, 1);
if (d_feature_set.empty()) {
// Acting as a device side constructor for DeviceSplitCandidate.
// DeviceSplitCandidate::IsValid is false so that ApplySplit can reject this
// candidate.
auto worst_candidate = DeviceSplitCandidate();
dh::safe_cuda(cudaMemcpyAsync(d_result.data(), &worst_candidate,
sizeof(DeviceSplitCandidate),
cudaMemcpyHostToDevice));
continue;
}
// One block for each feature
uint32_t constexpr kBlockThreads = 256;
dh::LaunchKernel {uint32_t(d_feature_set.size()), kBlockThreads, 0, streams[i]} (
EvaluateSplitKernel<kBlockThreads, GradientSumT>,
hist.GetNodeHistogram(nidx), d_feature_set, node, page->GetDeviceAccessor(device_id),
gpu_param, d_split_candidates, node_value_constraints[nidx],
dh::ToSpan(monotone_constraints));
// Reduce over features to find best feature
size_t cub_bytes = 0;
cub::DeviceReduce::Reduce(nullptr,
cub_bytes, d_split_candidates.data(),
d_result.data(), d_split_candidates.size(), op,
DeviceSplitCandidate(), streams[i]);
dh::TemporaryArray<char> cub_temp(cub_bytes);
cub::DeviceReduce::Reduce(reinterpret_cast<void*>(cub_temp.data().get()),
cub_bytes, d_split_candidates.data(),
d_result.data(), d_split_candidates.size(), op,
DeviceSplitCandidate(), streams[i]);
}
dh::safe_cuda(cudaMemcpy(result_all.data(), d_result_all.data().get(),
sizeof(DeviceSplitCandidate) * d_result_all.size(),
cudaMemcpyDeviceToHost));
return std::vector<DeviceSplitCandidate>(result_all.begin(), result_all.end());
}
void BuildHist(int nidx) {
hist.AllocateHistogram(nidx);
auto d_node_hist = hist.GetNodeHistogram(nidx);
auto d_ridx = row_partitioner->GetRows(nidx);
BuildGradientHistogram(page->GetDeviceAccessor(device_id), gpair, d_ridx, d_node_hist,
histogram_rounding);
}
void SubtractionTrick(int nidx_parent, int nidx_histogram,
int nidx_subtraction) {
auto d_node_hist_parent = hist.GetNodeHistogram(nidx_parent);
auto d_node_hist_histogram = hist.GetNodeHistogram(nidx_histogram);
auto d_node_hist_subtraction = hist.GetNodeHistogram(nidx_subtraction);
dh::LaunchN(device_id, page->Cuts().TotalBins(), [=] __device__(size_t idx) {
d_node_hist_subtraction[idx] =
d_node_hist_parent[idx] - d_node_hist_histogram[idx];
});
}
bool CanDoSubtractionTrick(int nidx_parent, int nidx_histogram,
int nidx_subtraction) {
// Make sure histograms are already allocated
hist.AllocateHistogram(nidx_subtraction);
return hist.HistogramExists(nidx_histogram) &&
hist.HistogramExists(nidx_parent);
}
void UpdatePosition(int nidx, RegTree::Node split_node) {
auto d_matrix = page->GetDeviceAccessor(device_id);
row_partitioner->UpdatePosition(
nidx, split_node.LeftChild(), split_node.RightChild(),
[=] __device__(bst_uint ridx) {
// given a row index, returns the node id it belongs to
bst_float cut_value =
d_matrix.GetFvalue(ridx, split_node.SplitIndex());
// Missing value
int new_position = 0;
if (isnan(cut_value)) {
new_position = split_node.DefaultChild();
} else {
if (cut_value <= split_node.SplitCond()) {
new_position = split_node.LeftChild();
} else {
new_position = split_node.RightChild();
}
}
return new_position;
});
}
// After tree update is finished, update the position of all training
// instances to their final leaf. This information is used later to update the
// prediction cache
void FinalisePosition(RegTree const* p_tree, DMatrix* p_fmat) {
dh::TemporaryArray<RegTree::Node> d_nodes(p_tree->GetNodes().size());
dh::safe_cuda(cudaMemcpy(d_nodes.data().get(), p_tree->GetNodes().data(),
d_nodes.size() * sizeof(RegTree::Node),
cudaMemcpyHostToDevice));
if (row_partitioner->GetRows().size() != p_fmat->Info().num_row_) {
row_partitioner.reset(); // Release the device memory first before reallocating
row_partitioner.reset(new RowPartitioner(device_id, p_fmat->Info().num_row_));
}
if (page->n_rows == p_fmat->Info().num_row_) {
FinalisePositionInPage(page, dh::ToSpan(d_nodes));
} else {
for (auto& batch : p_fmat->GetBatches<EllpackPage>(batch_param)) {
FinalisePositionInPage(batch.Impl(), dh::ToSpan(d_nodes));
}
}
}
void FinalisePositionInPage(EllpackPageImpl* page, const common::Span<RegTree::Node> d_nodes) {
auto d_matrix = page->GetDeviceAccessor(device_id);
row_partitioner->FinalisePosition(
[=] __device__(size_t row_id, int position) {
if (!d_matrix.IsInRange(row_id)) {
return RowPartitioner::kIgnoredTreePosition;
}
auto node = d_nodes[position];
while (!node.IsLeaf()) {
bst_float element = d_matrix.GetFvalue(row_id, node.SplitIndex());
// Missing value
if (isnan(element)) {
position = node.DefaultChild();
} else {
if (element <= node.SplitCond()) {
position = node.LeftChild();
} else {
position = node.RightChild();
}
}
node = d_nodes[position];
}
return position;
});
}
void UpdatePredictionCache(bst_float* out_preds_d) {
dh::safe_cuda(cudaSetDevice(device_id));
auto d_ridx = row_partitioner->GetRows();
if (prediction_cache.size() != d_ridx.size()) {
prediction_cache.resize(d_ridx.size());
dh::safe_cuda(cudaMemcpyAsync(prediction_cache.data().get(), out_preds_d,
prediction_cache.size() * sizeof(bst_float),
cudaMemcpyDefault));
}
CalcWeightTrainParam param_d(param);
dh::safe_cuda(
cudaMemcpyAsync(node_sum_gradients.data().get(), host_node_sum_gradients.data(),
sizeof(GradientPair) * host_node_sum_gradients.size(),
cudaMemcpyHostToDevice));
auto d_position = row_partitioner->GetPosition();
auto d_node_sum_gradients = node_sum_gradients.data().get();
auto d_prediction_cache = prediction_cache.data().get();
dh::LaunchN(
device_id, prediction_cache.size(), [=] __device__(int local_idx) {
int pos = d_position[local_idx];
bst_float weight = CalcWeight(param_d, d_node_sum_gradients[pos]);
d_prediction_cache[d_ridx[local_idx]] +=
weight * param_d.learning_rate;
});
dh::safe_cuda(cudaMemcpy(
out_preds_d, prediction_cache.data().get(),
prediction_cache.size() * sizeof(bst_float), cudaMemcpyDefault));
row_partitioner.reset();
}
void AllReduceHist(int nidx, dh::AllReducer* reducer) {
monitor.StartCuda("AllReduce");
auto d_node_hist = hist.GetNodeHistogram(nidx).data();
reducer->AllReduceSum(
reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist),
reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist),
page->Cuts().TotalBins() * (sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT)));
reducer->Synchronize();
monitor.StopCuda("AllReduce");
}
/**
* \brief Build GPU local histograms for the left and right child of some parent node
*/
void BuildHistLeftRight(const ExpandEntry &candidate, int nidx_left,
int nidx_right, dh::AllReducer* reducer) {
auto build_hist_nidx = nidx_left;
auto subtraction_trick_nidx = nidx_right;
// Decide whether to build the left histogram or right histogram
// Use sum of Hessian as a heuristic to select node with fewest training instances
bool fewer_right = candidate.split.right_sum.GetHess() < candidate.split.left_sum.GetHess();
if (fewer_right) {
std::swap(build_hist_nidx, subtraction_trick_nidx);
}
this->BuildHist(build_hist_nidx);
this->AllReduceHist(build_hist_nidx, reducer);
// Check whether we can use the subtraction trick to calculate the other
bool do_subtraction_trick = this->CanDoSubtractionTrick(
candidate.nid, build_hist_nidx, subtraction_trick_nidx);
if (do_subtraction_trick) {
// Calculate other histogram using subtraction trick
this->SubtractionTrick(candidate.nid, build_hist_nidx,
subtraction_trick_nidx);
} else {
// Calculate other histogram manually
this->BuildHist(subtraction_trick_nidx);
this->AllReduceHist(subtraction_trick_nidx, reducer);
}
}
void ApplySplit(const ExpandEntry& candidate, RegTree* p_tree) {
RegTree& tree = *p_tree;
GradStats left_stats{};
left_stats.Add(candidate.split.left_sum);
GradStats right_stats{};
right_stats.Add(candidate.split.right_sum);
GradStats parent_sum{};
parent_sum.Add(left_stats);
parent_sum.Add(right_stats);
node_value_constraints.resize(tree.GetNodes().size());
auto base_weight = node_value_constraints[candidate.nid].CalcWeight(param, parent_sum);
auto left_weight =
node_value_constraints[candidate.nid].CalcWeight(param, left_stats)*param.learning_rate;
auto right_weight =
node_value_constraints[candidate.nid].CalcWeight(param, right_stats)*param.learning_rate;
tree.ExpandNode(candidate.nid, candidate.split.findex,
candidate.split.fvalue, candidate.split.dir == kLeftDir,
base_weight, left_weight, right_weight,
candidate.split.loss_chg, parent_sum.sum_hess,
left_stats.GetHess(), right_stats.GetHess());
// Set up child constraints
node_value_constraints.resize(tree.GetNodes().size());
node_value_constraints[candidate.nid].SetChild(
param, tree[candidate.nid].SplitIndex(), left_stats, right_stats,
&node_value_constraints[tree[candidate.nid].LeftChild()],
&node_value_constraints[tree[candidate.nid].RightChild()]);
host_node_sum_gradients[tree[candidate.nid].LeftChild()] =
candidate.split.left_sum;
host_node_sum_gradients[tree[candidate.nid].RightChild()] =
candidate.split.right_sum;
interaction_constraints.Split(candidate.nid, tree[candidate.nid].SplitIndex(),
tree[candidate.nid].LeftChild(),
tree[candidate.nid].RightChild());
}
void InitRoot(RegTree* p_tree, dh::AllReducer* reducer, int64_t num_columns) {
constexpr bst_node_t kRootNIdx = 0;
dh::XGBCachingDeviceAllocator<char> alloc;
GradientPair root_sum = thrust::reduce(
thrust::cuda::par(alloc),
thrust::device_ptr<GradientPair const>(gpair.data()),
thrust::device_ptr<GradientPair const>(gpair.data() + gpair.size()));
dh::safe_cuda(cudaMemcpyAsync(node_sum_gradients.data().get(), &root_sum, sizeof(root_sum),
cudaMemcpyHostToDevice));
reducer->AllReduceSum(
reinterpret_cast<float*>(node_sum_gradients.data().get()),
reinterpret_cast<float*>(node_sum_gradients.data().get()), 2);
reducer->Synchronize();
dh::safe_cuda(cudaMemcpyAsync(host_node_sum_gradients.data(),
node_sum_gradients.data().get(), sizeof(GradientPair),
cudaMemcpyDeviceToHost));
this->BuildHist(kRootNIdx);
this->AllReduceHist(kRootNIdx, reducer);
// Remember root stats
p_tree->Stat(kRootNIdx).sum_hess = host_node_sum_gradients[kRootNIdx].GetHess();
auto weight = CalcWeight(param, host_node_sum_gradients[kRootNIdx]);
p_tree->Stat(kRootNIdx).base_weight = weight;
(*p_tree)[kRootNIdx].SetLeaf(param.learning_rate * weight);
// Initialise root constraint
node_value_constraints.resize(p_tree->GetNodes().size());
// Generate first split
auto split = this->EvaluateSplits({kRootNIdx}, *p_tree, num_columns);
qexpand->push(
ExpandEntry(kRootNIdx, p_tree->GetDepth(kRootNIdx), split.at(0), 0));
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair_all, DMatrix* p_fmat,
RegTree* p_tree, dh::AllReducer* reducer) {
auto& tree = *p_tree;
monitor.StartCuda("Reset");
this->Reset(gpair_all, p_fmat, p_fmat->Info().num_col_);
monitor.StopCuda("Reset");
monitor.StartCuda("InitRoot");
this->InitRoot(p_tree, reducer, p_fmat->Info().num_col_);
monitor.StopCuda("InitRoot");
auto timestamp = qexpand->size();
auto num_leaves = 1;
while (!qexpand->empty()) {
ExpandEntry candidate = qexpand->top();
qexpand->pop();
if (!candidate.IsValid(param, num_leaves)) {
continue;
}
this->ApplySplit(candidate, p_tree);
num_leaves++;
int left_child_nidx = tree[candidate.nid].LeftChild();
int right_child_nidx = tree[candidate.nid].RightChild();
// Only create child entries if needed
if (ExpandEntry::ChildIsValid(param, tree.GetDepth(left_child_nidx),
num_leaves)) {
monitor.StartCuda("UpdatePosition");
this->UpdatePosition(candidate.nid, (*p_tree)[candidate.nid]);
monitor.StopCuda("UpdatePosition");
monitor.StartCuda("BuildHist");
this->BuildHistLeftRight(candidate, left_child_nidx, right_child_nidx, reducer);
monitor.StopCuda("BuildHist");
monitor.StartCuda("EvaluateSplits");
auto splits = this->EvaluateSplits({left_child_nidx, right_child_nidx},
*p_tree, p_fmat->Info().num_col_);
monitor.StopCuda("EvaluateSplits");
qexpand->push(ExpandEntry(left_child_nidx,
tree.GetDepth(left_child_nidx), splits.at(0),
timestamp++));
qexpand->push(ExpandEntry(right_child_nidx,
tree.GetDepth(right_child_nidx),
splits.at(1), timestamp++));
}
}
monitor.StartCuda("FinalisePosition");
this->FinalisePosition(p_tree, p_fmat);
monitor.StopCuda("FinalisePosition");
}
};
template <typename GradientSumT>
inline void GPUHistMakerDevice<GradientSumT>::InitHistogram() {
if (!param.monotone_constraints.empty()) {
// Copy assigning an empty vector causes an exception in MSVC debug builds
monotone_constraints = param.monotone_constraints;
}
host_node_sum_gradients.resize(param.MaxNodes());
node_sum_gradients.resize(param.MaxNodes());
// Init histogram
hist.Init(device_id, page->Cuts().TotalBins());
}
template <typename GradientSumT>
class GPUHistMakerSpecialised {
public:
GPUHistMakerSpecialised() = default;
void Configure(const Args& args, GenericParameter const* generic_param) {
param_.UpdateAllowUnknown(args);
generic_param_ = generic_param;
hist_maker_param_.UpdateAllowUnknown(args);
dh::CheckComputeCapability();
monitor_.Init("updater_gpu_hist");
}
~GPUHistMakerSpecialised() { // NOLINT
dh::GlobalMemoryLogger().Log();
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
const std::vector<RegTree*>& trees) {
monitor_.StartCuda("Update");
// rescale learning rate according to size of trees
float lr = param_.learning_rate;
param_.learning_rate = lr / trees.size();
ValueConstraint::Init(¶m_, dmat->Info().num_col_);
// build tree
try {
for (xgboost::RegTree* tree : trees) {
this->UpdateTree(gpair, dmat, tree);
if (hist_maker_param_.debug_synchronize) {
this->CheckTreesSynchronized(tree);
}
}
dh::safe_cuda(cudaGetLastError());
} catch (const std::exception& e) {
LOG(FATAL) << "Exception in gpu_hist: " << e.what() << std::endl;
}
param_.learning_rate = lr;
monitor_.StopCuda("Update");
}
void InitDataOnce(DMatrix* dmat) {
device_ = generic_param_->gpu_id;
CHECK_GE(device_, 0) << "Must have at least one device";
info_ = &dmat->Info();
reducer_.Init({device_}); // NOLINT
// Synchronise the column sampling seed
uint32_t column_sampling_seed = common::GlobalRandom()();
rabit::Broadcast(&column_sampling_seed, sizeof(column_sampling_seed), 0);
BatchParam batch_param{
device_,
param_.max_bin,
generic_param_->gpu_page_size
};
auto page = (*dmat->GetBatches<EllpackPage>(batch_param).begin()).Impl();
dh::safe_cuda(cudaSetDevice(device_));
maker.reset(new GPUHistMakerDevice<GradientSumT>(device_,
page,
info_->num_row_,
param_,
column_sampling_seed,
info_->num_col_,
hist_maker_param_.deterministic_histogram,
batch_param));
monitor_.StartCuda("InitHistogram");
dh::safe_cuda(cudaSetDevice(device_));
maker->InitHistogram();
monitor_.StopCuda("InitHistogram");
p_last_fmat_ = dmat;
initialised_ = true;
}
void InitData(DMatrix* dmat) {
if (!initialised_) {
monitor_.StartCuda("InitDataOnce");
this->InitDataOnce(dmat);
monitor_.StopCuda("InitDataOnce");
}
}
// Only call this method for testing
void CheckTreesSynchronized(RegTree* local_tree) const {
std::string s_model;
common::MemoryBufferStream fs(&s_model);
int rank = rabit::GetRank();
if (rank == 0) {
local_tree->Save(&fs);
}
fs.Seek(0);
rabit::Broadcast(&s_model, 0);
RegTree reference_tree {}; // rank 0 tree
reference_tree.Load(&fs);
CHECK(*local_tree == reference_tree);
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair, DMatrix* p_fmat,
RegTree* p_tree) {
monitor_.StartCuda("InitData");
this->InitData(p_fmat);
monitor_.StopCuda("InitData");
gpair->SetDevice(device_);
maker->UpdateTree(gpair, p_fmat, p_tree, &reducer_);
}
bool UpdatePredictionCache(const DMatrix* data, HostDeviceVector<bst_float>* p_out_preds) {
if (maker == nullptr || p_last_fmat_ == nullptr || p_last_fmat_ != data) {
return false;
}
monitor_.StartCuda("UpdatePredictionCache");
p_out_preds->SetDevice(device_);
maker->UpdatePredictionCache(p_out_preds->DevicePointer());
monitor_.StopCuda("UpdatePredictionCache");
return true;
}
TrainParam param_; // NOLINT
MetaInfo* info_{}; // NOLINT
std::unique_ptr<GPUHistMakerDevice<GradientSumT>> maker; // NOLINT
private:
bool initialised_ { false };
GPUHistMakerTrainParam hist_maker_param_;
GenericParameter const* generic_param_;
dh::AllReducer reducer_;
DMatrix* p_last_fmat_ { nullptr };
int device_{-1};
common::Monitor monitor_;
};
class GPUHistMaker : public TreeUpdater {
public:
void Configure(const Args& args) override {
// Used in test to count how many configurations are performed
LOG(DEBUG) << "[GPU Hist]: Configure";
hist_maker_param_.UpdateAllowUnknown(args);
// The passed in args can be empty, if we simply purge the old maker without
// preserving parameters then we can't do Update on it.
TrainParam param;
if (float_maker_) {
param = float_maker_->param_;
} else if (double_maker_) {
param = double_maker_->param_;
}
if (hist_maker_param_.single_precision_histogram) {
float_maker_.reset(new GPUHistMakerSpecialised<GradientPair>());
float_maker_->param_ = param;
float_maker_->Configure(args, tparam_);
} else {
double_maker_.reset(new GPUHistMakerSpecialised<GradientPairPrecise>());
double_maker_->param_ = param;
double_maker_->Configure(args, tparam_);
}
}
void LoadConfig(Json const& in) override {
auto const& config = get<Object const>(in);
FromJson(config.at("gpu_hist_train_param"), &this->hist_maker_param_);
if (hist_maker_param_.single_precision_histogram) {
float_maker_.reset(new GPUHistMakerSpecialised<GradientPair>());
FromJson(config.at("train_param"), &float_maker_->param_);
} else {
double_maker_.reset(new GPUHistMakerSpecialised<GradientPairPrecise>());
FromJson(config.at("train_param"), &double_maker_->param_);
}
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["gpu_hist_train_param"] = ToJson(hist_maker_param_);
if (hist_maker_param_.single_precision_histogram) {
out["train_param"] = ToJson(float_maker_->param_);
} else {
out["train_param"] = ToJson(double_maker_->param_);
}
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
const std::vector<RegTree*>& trees) override {
if (hist_maker_param_.single_precision_histogram) {
float_maker_->Update(gpair, dmat, trees);
} else {
double_maker_->Update(gpair, dmat, trees);
}
}
bool UpdatePredictionCache(
const DMatrix* data, HostDeviceVector<bst_float>* p_out_preds) override {
if (hist_maker_param_.single_precision_histogram) {
return float_maker_->UpdatePredictionCache(data, p_out_preds);
} else {
return double_maker_->UpdatePredictionCache(data, p_out_preds);
}
}
char const* Name() const override {
return "grow_gpu_hist";
}
private:
GPUHistMakerTrainParam hist_maker_param_;
std::unique_ptr<GPUHistMakerSpecialised<GradientPair>> float_maker_;
std::unique_ptr<GPUHistMakerSpecialised<GradientPairPrecise>> double_maker_;
};
#if !defined(GTEST_TEST)
XGBOOST_REGISTER_TREE_UPDATER(GPUHistMaker, "grow_gpu_hist")
.describe("Grow tree with GPU.")
.set_body([]() { return new GPUHistMaker(); });
#endif // !defined(GTEST_TEST)
} // namespace tree
} // namespace xgboost
|
84b953c9757e4627dc8ae686744d49908d9b9afe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<math.h>
#include<stdio.h>
#include "CUDA.h"
#include "Random.h"
#include "mycomplex.h"
#include "model.h"
using namespace std;
/* ----------------------------------------*/
/**************************/
double *yzero;
__device__ double const tau=1.;
__device__ double const one_over_tau=1./tau;
double const iniy_max=10.;
double const iniy_min=-10.;
__device__ inline double kappa(double zz){
double amp=1.;
/* double zmax=100.;
double mu=0.1;
double zabs=abs(zz);
if (zabs > zmax){
return mu*zmax*zmax;
}
else{
return mu*zz*zz;
} */
return amp;
}
/* ----------------------------------------*/
__global__ void inirand_evolve(unsigned long long seed[]);
/* ----------------------------------------*/
/*__device__ double telegraph(double nu, double tt, int local_index, hiprandState_t mystate){
double poisson_mean=nu*tt;
double pr=Poisson(poisson_mean,&mystate);
int ppower=(int) fmod(pr,2.);
double tele_ran = powf(-1,ppower);
return tele_ran;
}*/
/* ----------------------------------------*/
__device__ void eval_rhs(double rhs[],double tt,double yy[],int lindex){
double v1=yy[lindex+3];
double v2=yy[lindex+4];
double v3=yy[lindex+5];
double zdot1 = v1;
double zdot2 = v2;
double zdot3 = v3;
/* --The stochastic part of the equation is added outside the usual integrator - */
double vdot1 = -one_over_tau*v1;
double vdot2 = -one_over_tau*v2;
double vdot3 = -one_over_tau*v3;
/* ---------------------------------------------------- */
rhs[0]=zdot1;
rhs[1]=zdot2;
rhs[2]=zdot3;
rhs[3]=vdot1;
rhs[4]=vdot2;
rhs[5]=vdot3;
}
/* ----------------------------------------*/
__device__ void stochastic(double yy[],hiprandState_t global_state[], double tlocal,
double deltat,int lindex)
{
double pi = 4.*atan(1.);
//double zz=yy[lindex];
//double r = fmod(zz,pi);
double mean=0;
double sigma=1.;
//double sigma=kappa(zz);
int tid=lindex/pdim;
hiprandState_t local_state=global_state[tid];
double uu1 = Gaussian(mean,sigma,&local_state);
double uu2 = Gaussian(mean,sigma,&local_state);
double uu3 = Gaussian(mean,sigma,&local_state);
global_state[tid] = local_state;
yy[lindex+3]=yy[lindex+3]+one_over_tau*uu1*sqrt(deltat);
yy[lindex+4]=yy[lindex+4]+one_over_tau*uu2*sqrt(deltat);
yy[lindex+5]=yy[lindex+5]+one_over_tau*uu3*sqrt(deltat);
}
/*---------------
__global__ void inirand_evolve(unsigned long long seed[], dev_global_state[]){
int tid = blockIdx.x;
unsigned long long local_seed = seed[tid];
hiprandState_t local_state;
local_state = dev_global_state[tid];
hiprand_init(local_seed,tid,0, &local_state);
dev_global_state[tid] = local_state;
}*/
/* ----------------------------------------*/
void iniconf(double y[],int Nensemble, hiprandState_t rand_state[]){
hiprandState_t *dev_iniran_state;
double rand[Nensemble],rand2[Nensemble];
double *dev_rand;
unsigned long long seed[Nensemble];
unsigned long long *dev_seed;
for(int i=0;i<Nensemble;i++){
seed[i]=37*i+53*i*i;
rand[i]=0.;
rand2[i]=0.;
}
dev_rand= host2dev(Nensemble,rand);
dev_seed = host2dev(Nensemble,seed);
hipMalloc( (void**)&dev_iniran_state, Nensemble*sizeof(hiprandState_t) );
hipLaunchKernelGGL(( init_random), dim3(Nensemble),dim3(1), 0, 0, dev_seed,dev_iniran_state);
hipLaunchKernelGGL(( UniformRandom), dim3(Nensemble),dim3(1), 0, 0, dev_rand, dev_iniran_state);
dev2host(rand,Nensemble,dev_rand);
hipLaunchKernelGGL(( UniformRandom), dim3(Nensemble),dim3(1), 0, 0, dev_rand, dev_iniran_state);
dev2host(rand2,Nensemble,dev_rand);
for(int j=0;j<Nensemble;j++){
// Uniformly distributed initial position between iniy_min to iniy_max
y[0+j*pdim]=iniy_min+rand[j]*(iniy_max-iniy_min);
// and random initial velocity
y[1+j*pdim]=rand2[j];
printf("y0,y1,%lf,%lf\n",y[0],y[1]);
}
/* copy the state of the random no. generator to host */
dev2host(rand_state,Nensemble,dev_iniran_state);
// inirand_evolve<<<Nensemble,1>>>(dev_seed, dev_rand_state);
}
/* ----------------------------------------*/
__host__ void diag(double tt, double y[], int Nensemble, FILE* tseries, FILE* diagf){
int ndim=pdim*Nensemble;
if (tt == 0.) {
yzero=(double*)malloc(ndim*sizeof(double));
for (int i=0;i<ndim;i++){
yzero[i]=y[i];
}
}
//printf("%lf\t%lf\t%lf\t%lf\t%lf\n",tt,y[0],y[1],y[2],y[3]);
fprintf(tseries,"%lf\t",tt);
for (int i=0;i<ndim-1;i++){
fprintf(tseries,"%lf\t",y[i]);
}
fprintf(tseries,"%lf\n",y[ndim-1]);
double meanz=0.;
double vsqrm=0.;
double dzrms=0;
for(int i=0; i<Nensemble; i++){
int lindex=pdim*i;
double zz1=y[lindex+0];
double zz2=y[lindex+1];
double zz3=y[lindex+2];
double vv1=y[lindex+3];
double vv2=y[lindex+4];
double vv3=y[lindex+5];
double dz=zz1-yzero[lindex];
meanz= zz1+meanz ;
dzrms= dz*dz+dzrms ;
vsqrm= vv1*vv1+vv2*vv2+vv3*vv3+vsqrm;
}
double zz=y[0];
meanz=meanz/Nensemble;
vsqrm=vsqrm/Nensemble;
dzrms=sqrt(dzrms)/Nensemble;
printf("%lf\t%lf\t%lf\t%lf\n",tt,dzrms,meanz,vsqrm);
//printf("%lf\t%lf\t%lf\n",tt,u1,u2);
fprintf(diagf,"%lf\t%lf\t%lf\t%lf\n",tt,zz,dzrms,meanz,vsqrm);
}
/* ----------------------------------------*/
| 84b953c9757e4627dc8ae686744d49908d9b9afe.cu | #include<math.h>
#include<stdio.h>
#include "CUDA.h"
#include "Random.h"
#include "mycomplex.h"
#include "model.h"
using namespace std;
/* ----------------------------------------*/
/**************************/
double *yzero;
__device__ double const tau=1.;
__device__ double const one_over_tau=1./tau;
double const iniy_max=10.;
double const iniy_min=-10.;
__device__ inline double kappa(double zz){
double amp=1.;
/* double zmax=100.;
double mu=0.1;
double zabs=abs(zz);
if (zabs > zmax){
return mu*zmax*zmax;
}
else{
return mu*zz*zz;
} */
return amp;
}
/* ----------------------------------------*/
__global__ void inirand_evolve(unsigned long long seed[]);
/* ----------------------------------------*/
/*__device__ double telegraph(double nu, double tt, int local_index, curandState mystate){
double poisson_mean=nu*tt;
double pr=Poisson(poisson_mean,&mystate);
int ppower=(int) fmod(pr,2.);
double tele_ran = powf(-1,ppower);
return tele_ran;
}*/
/* ----------------------------------------*/
__device__ void eval_rhs(double rhs[],double tt,double yy[],int lindex){
double v1=yy[lindex+3];
double v2=yy[lindex+4];
double v3=yy[lindex+5];
double zdot1 = v1;
double zdot2 = v2;
double zdot3 = v3;
/* --The stochastic part of the equation is added outside the usual integrator - */
double vdot1 = -one_over_tau*v1;
double vdot2 = -one_over_tau*v2;
double vdot3 = -one_over_tau*v3;
/* ---------------------------------------------------- */
rhs[0]=zdot1;
rhs[1]=zdot2;
rhs[2]=zdot3;
rhs[3]=vdot1;
rhs[4]=vdot2;
rhs[5]=vdot3;
}
/* ----------------------------------------*/
__device__ void stochastic(double yy[],curandState global_state[], double tlocal,
double deltat,int lindex)
{
double pi = 4.*atan(1.);
//double zz=yy[lindex];
//double r = fmod(zz,pi);
double mean=0;
double sigma=1.;
//double sigma=kappa(zz);
int tid=lindex/pdim;
curandState local_state=global_state[tid];
double uu1 = Gaussian(mean,sigma,&local_state);
double uu2 = Gaussian(mean,sigma,&local_state);
double uu3 = Gaussian(mean,sigma,&local_state);
global_state[tid] = local_state;
yy[lindex+3]=yy[lindex+3]+one_over_tau*uu1*sqrt(deltat);
yy[lindex+4]=yy[lindex+4]+one_over_tau*uu2*sqrt(deltat);
yy[lindex+5]=yy[lindex+5]+one_over_tau*uu3*sqrt(deltat);
}
/*---------------
__global__ void inirand_evolve(unsigned long long seed[], dev_global_state[]){
int tid = blockIdx.x;
unsigned long long local_seed = seed[tid];
curandState local_state;
local_state = dev_global_state[tid];
curand_init(local_seed,tid,0, &local_state);
dev_global_state[tid] = local_state;
}*/
/* ----------------------------------------*/
void iniconf(double y[],int Nensemble, curandState rand_state[]){
curandState *dev_iniran_state;
double rand[Nensemble],rand2[Nensemble];
double *dev_rand;
unsigned long long seed[Nensemble];
unsigned long long *dev_seed;
for(int i=0;i<Nensemble;i++){
seed[i]=37*i+53*i*i;
rand[i]=0.;
rand2[i]=0.;
}
dev_rand= host2dev(Nensemble,rand);
dev_seed = host2dev(Nensemble,seed);
cudaMalloc( (void**)&dev_iniran_state, Nensemble*sizeof(curandState) );
init_random<<<Nensemble,1>>>(dev_seed,dev_iniran_state);
UniformRandom<<<Nensemble,1>>>(dev_rand, dev_iniran_state);
dev2host(rand,Nensemble,dev_rand);
UniformRandom<<<Nensemble,1>>>(dev_rand, dev_iniran_state);
dev2host(rand2,Nensemble,dev_rand);
for(int j=0;j<Nensemble;j++){
// Uniformly distributed initial position between iniy_min to iniy_max
y[0+j*pdim]=iniy_min+rand[j]*(iniy_max-iniy_min);
// and random initial velocity
y[1+j*pdim]=rand2[j];
printf("y0,y1,%lf,%lf\n",y[0],y[1]);
}
/* copy the state of the random no. generator to host */
dev2host(rand_state,Nensemble,dev_iniran_state);
// inirand_evolve<<<Nensemble,1>>>(dev_seed, dev_rand_state);
}
/* ----------------------------------------*/
__host__ void diag(double tt, double y[], int Nensemble, FILE* tseries, FILE* diagf){
int ndim=pdim*Nensemble;
if (tt == 0.) {
yzero=(double*)malloc(ndim*sizeof(double));
for (int i=0;i<ndim;i++){
yzero[i]=y[i];
}
}
//printf("%lf\t%lf\t%lf\t%lf\t%lf\n",tt,y[0],y[1],y[2],y[3]);
fprintf(tseries,"%lf\t",tt);
for (int i=0;i<ndim-1;i++){
fprintf(tseries,"%lf\t",y[i]);
}
fprintf(tseries,"%lf\n",y[ndim-1]);
double meanz=0.;
double vsqrm=0.;
double dzrms=0;
for(int i=0; i<Nensemble; i++){
int lindex=pdim*i;
double zz1=y[lindex+0];
double zz2=y[lindex+1];
double zz3=y[lindex+2];
double vv1=y[lindex+3];
double vv2=y[lindex+4];
double vv3=y[lindex+5];
double dz=zz1-yzero[lindex];
meanz= zz1+meanz ;
dzrms= dz*dz+dzrms ;
vsqrm= vv1*vv1+vv2*vv2+vv3*vv3+vsqrm;
}
double zz=y[0];
meanz=meanz/Nensemble;
vsqrm=vsqrm/Nensemble;
dzrms=sqrt(dzrms)/Nensemble;
printf("%lf\t%lf\t%lf\t%lf\n",tt,dzrms,meanz,vsqrm);
//printf("%lf\t%lf\t%lf\n",tt,u1,u2);
fprintf(diagf,"%lf\t%lf\t%lf\t%lf\n",tt,zz,dzrms,meanz,vsqrm);
}
/* ----------------------------------------*/
|
ebf3bd0e058a278b3e8c9d5c8445761bcce1b7e4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* \file dnn/src/cuda/mesh_indexing/mesh_indexing.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "megdnn/basic_types.h"
#include "megdnn/dtype.h"
#include "src/common/indexing_multi_axis_vec_kdef.h"
#include "src/cuda/indexing_multi_axis_vec/kern.cuh"
#include "src/cuda/mesh_indexing/mesh_indexing.cuh"
#include "src/cuda/utils.cuh"
#define KERN_APPLY_OPR_INDEXING ::megdnn::indexing_multi_axis_vec_kdef::OprFwd
#define KERN_APPLY_OPR_INCR ::megdnn::cuda::indexing_multi_axis_vec::OprAtomicIncr
#define KERN_APPLY_OPR_SET ::megdnn::indexing_multi_axis_vec_kdef::OprSet
namespace {
using namespace megdnn;
using namespace cuda;
using namespace mesh_indexing;
template <typename T, class Opr>
__global__ void mesh_indexing_general_kernel(
T* src, T* dst, const KernIndexer indexer) {
uint32_t dst_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (dst_idx < indexer.size) {
int src_idx = indexer.convert_indxer(dst_idx);
Opr::apply(src[src_idx], dst[dst_idx]);
}
}
} // namespace
namespace megdnn {
namespace cuda {
namespace mesh_indexing {
template <typename T, class Opr>
void mesh_indexing_proxy(T* src, T* dst, KernIndexer* indexer, hipStream_t stream) {
hipLaunchKernelGGL(( mesh_indexing_general_kernel<T, Opr>)
, dim3(DIVUP(indexer->size, NR_THREADS)), dim3(NR_THREADS), 0, stream,
src, dst, *indexer);
}
#define INST(_ctype) \
template void mesh_indexing_proxy<_ctype, KERN_APPLY_OPR_INDEXING>( \
_ctype * src, _ctype * dst, KernIndexer * indexer, hipStream_t stream); \
\
template void mesh_indexing_proxy<_ctype, KERN_APPLY_OPR_SET>( \
_ctype * src, _ctype * dst, KernIndexer * indexer, hipStream_t stream);
#define INST_ATOMIC_ADD(_ctype) \
template void mesh_indexing_proxy<_ctype, KERN_APPLY_OPR_INCR>( \
_ctype * src, _ctype * dst, KernIndexer * indexer, hipStream_t stream);
#define cb(_dtype) INST(DTypeTrait<_dtype>::ctype)
MEGDNN_FOREACH_COMPUTING_DTYPE(cb)
#undef cb
#define cb(_dtype) INST_ATOMIC_ADD(DTypeTrait<_dtype>::ctype)
cb(dtype::Float32);
cb(dtype::Int32)
#undef cb
#undef INST
} // namespace mesh_indexing
} // namespace cuda
} // namespace megdnn
| ebf3bd0e058a278b3e8c9d5c8445761bcce1b7e4.cu | /**
* \file dnn/src/cuda/mesh_indexing/mesh_indexing.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "megdnn/basic_types.h"
#include "megdnn/dtype.h"
#include "src/common/indexing_multi_axis_vec_kdef.h"
#include "src/cuda/indexing_multi_axis_vec/kern.cuh"
#include "src/cuda/mesh_indexing/mesh_indexing.cuh"
#include "src/cuda/utils.cuh"
#define KERN_APPLY_OPR_INDEXING ::megdnn::indexing_multi_axis_vec_kdef::OprFwd
#define KERN_APPLY_OPR_INCR ::megdnn::cuda::indexing_multi_axis_vec::OprAtomicIncr
#define KERN_APPLY_OPR_SET ::megdnn::indexing_multi_axis_vec_kdef::OprSet
namespace {
using namespace megdnn;
using namespace cuda;
using namespace mesh_indexing;
template <typename T, class Opr>
__global__ void mesh_indexing_general_kernel(
T* src, T* dst, const KernIndexer indexer) {
uint32_t dst_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (dst_idx < indexer.size) {
int src_idx = indexer.convert_indxer(dst_idx);
Opr::apply(src[src_idx], dst[dst_idx]);
}
}
} // namespace
namespace megdnn {
namespace cuda {
namespace mesh_indexing {
template <typename T, class Opr>
void mesh_indexing_proxy(T* src, T* dst, KernIndexer* indexer, cudaStream_t stream) {
mesh_indexing_general_kernel<T, Opr>
<<<DIVUP(indexer->size, NR_THREADS), NR_THREADS, 0, stream>>>(
src, dst, *indexer);
}
#define INST(_ctype) \
template void mesh_indexing_proxy<_ctype, KERN_APPLY_OPR_INDEXING>( \
_ctype * src, _ctype * dst, KernIndexer * indexer, cudaStream_t stream); \
\
template void mesh_indexing_proxy<_ctype, KERN_APPLY_OPR_SET>( \
_ctype * src, _ctype * dst, KernIndexer * indexer, cudaStream_t stream);
#define INST_ATOMIC_ADD(_ctype) \
template void mesh_indexing_proxy<_ctype, KERN_APPLY_OPR_INCR>( \
_ctype * src, _ctype * dst, KernIndexer * indexer, cudaStream_t stream);
#define cb(_dtype) INST(DTypeTrait<_dtype>::ctype)
MEGDNN_FOREACH_COMPUTING_DTYPE(cb)
#undef cb
#define cb(_dtype) INST_ATOMIC_ADD(DTypeTrait<_dtype>::ctype)
cb(dtype::Float32);
cb(dtype::Int32)
#undef cb
#undef INST
} // namespace mesh_indexing
} // namespace cuda
} // namespace megdnn
|
c87e4ad856706a371bc0f563caa190fa178e26d9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<cuda.h>
#include<cuda_runtime.h>
namespace Neighbours
{
class FileHandle
{
public:
int InputSize(void)
{
FILE *input = NULL;
input = fopen("input.txt", "r");
char line[30];
int N = 0;
while(fgets(line, 30, input) != NULL)
N++;
fclose(input);
return N;
}
void ReadFromFile(double *x, double *y, double *z, bool *b, int *N)
{
FILE *input = NULL;
input = fopen("input.txt", "r");
char line[30];
for(int i = 0; i < (*N); i++)
{
fgets(line, 30, input);
sscanf(line, "%lf %lf %lf", &x[i], &y[i], &z[i]);
b[i] = true;
}
fclose(input);
printf("Data imported from input.txt successfully!\n");
}
void WriteToFile(double *x, double *y, double *z, bool *b, int *N)
{
FILE *output = NULL;
output = fopen("output.txt", "w");
for(int i = 0; i < (*N); i++)
{
if(b[i] == true)
fprintf(output, "%.1lf %.1lf %.1lf\n", x[i], y[i], z[i]);
}
fclose(output);
printf("Data exported to output.txt successfully!\n");
}
};
__global__ void kernel(double *d_xx, double *d_yy, double *d_zz, bool *d_bb, int *d_N, double *x, double *y, double *z, double *r)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index < *d_N)
{
if((pow((*x)-d_xx[index], 2) + pow((*y)-d_yy[index], 2) + pow((*z)-d_zz[index], 2)) > pow(*r, 2))
d_bb[index] = false;
}
}
class NeighbourSearch
{
public:
void FindNeighbours(double *d_xx, double *d_yy, double *d_zz, bool *d_bb, int *d_N, double *x, double *y, double *z, double *r)
{
int grid_size, block_size = 256;
grid_size = ((*d_N) + block_size) / block_size;
hipLaunchKernelGGL(( kernel), dim3(grid_size), dim3(block_size), 0, 0, d_xx, d_yy, d_zz, d_bb, d_N, x, y, z, r);
hipDeviceSynchronize();
}
};
} // namespace Neighbours
int main()
{
Neighbours::FileHandle fh = Neighbours::FileHandle();
double *x, *y, *z;
double *r;
double *xx, *yy, *zz;
bool *bb;
int *N;
hipMallocManaged(&N, sizeof(int));
*N = fh.InputSize();
hipMallocManaged(&x, sizeof(double));
hipMallocManaged(&y, sizeof(double));
hipMallocManaged(&z, sizeof(double));
hipMallocManaged(&r, sizeof(double));
hipMallocManaged(&xx, sizeof(double)*(*N));
hipMallocManaged(&yy, sizeof(double)*(*N));
hipMallocManaged(&zz, sizeof(double)*(*N));
hipMallocManaged(&bb, sizeof(double)*(*N));
fh.ReadFromFile(xx, yy, zz, bb, N);
Neighbours::NeighbourSearch ns = Neighbours::NeighbourSearch();
while(1)
{
printf("Enter the x, y and z coordinates of the point and the search distance:\t");
scanf("%lf %lf %lf %lf", x, y, z, r);
if((*r) <= 0)
break;
else
{
ns.FindNeighbours(xx, yy, zz, bb, N, x, y, z, r);
fh.WriteToFile(xx, yy, zz, bb, N);
}
}
hipFree(xx);
hipFree(yy);
hipFree(zz);
hipFree(bb);
hipFree(N);
hipFree(x);
hipFree(y);
hipFree(z);
hipFree(r);
printf("Program terminated.\n");
return 0;
}
| c87e4ad856706a371bc0f563caa190fa178e26d9.cu | #include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<cuda.h>
#include<cuda_runtime.h>
namespace Neighbours
{
class FileHandle
{
public:
int InputSize(void)
{
FILE *input = NULL;
input = fopen("input.txt", "r");
char line[30];
int N = 0;
while(fgets(line, 30, input) != NULL)
N++;
fclose(input);
return N;
}
void ReadFromFile(double *x, double *y, double *z, bool *b, int *N)
{
FILE *input = NULL;
input = fopen("input.txt", "r");
char line[30];
for(int i = 0; i < (*N); i++)
{
fgets(line, 30, input);
sscanf(line, "%lf %lf %lf", &x[i], &y[i], &z[i]);
b[i] = true;
}
fclose(input);
printf("Data imported from input.txt successfully!\n");
}
void WriteToFile(double *x, double *y, double *z, bool *b, int *N)
{
FILE *output = NULL;
output = fopen("output.txt", "w");
for(int i = 0; i < (*N); i++)
{
if(b[i] == true)
fprintf(output, "%.1lf %.1lf %.1lf\n", x[i], y[i], z[i]);
}
fclose(output);
printf("Data exported to output.txt successfully!\n");
}
};
__global__ void kernel(double *d_xx, double *d_yy, double *d_zz, bool *d_bb, int *d_N, double *x, double *y, double *z, double *r)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index < *d_N)
{
if((pow((*x)-d_xx[index], 2) + pow((*y)-d_yy[index], 2) + pow((*z)-d_zz[index], 2)) > pow(*r, 2))
d_bb[index] = false;
}
}
class NeighbourSearch
{
public:
void FindNeighbours(double *d_xx, double *d_yy, double *d_zz, bool *d_bb, int *d_N, double *x, double *y, double *z, double *r)
{
int grid_size, block_size = 256;
grid_size = ((*d_N) + block_size) / block_size;
kernel<<<grid_size, block_size>>>(d_xx, d_yy, d_zz, d_bb, d_N, x, y, z, r);
cudaDeviceSynchronize();
}
};
} // namespace Neighbours
int main()
{
Neighbours::FileHandle fh = Neighbours::FileHandle();
double *x, *y, *z;
double *r;
double *xx, *yy, *zz;
bool *bb;
int *N;
cudaMallocManaged(&N, sizeof(int));
*N = fh.InputSize();
cudaMallocManaged(&x, sizeof(double));
cudaMallocManaged(&y, sizeof(double));
cudaMallocManaged(&z, sizeof(double));
cudaMallocManaged(&r, sizeof(double));
cudaMallocManaged(&xx, sizeof(double)*(*N));
cudaMallocManaged(&yy, sizeof(double)*(*N));
cudaMallocManaged(&zz, sizeof(double)*(*N));
cudaMallocManaged(&bb, sizeof(double)*(*N));
fh.ReadFromFile(xx, yy, zz, bb, N);
Neighbours::NeighbourSearch ns = Neighbours::NeighbourSearch();
while(1)
{
printf("Enter the x, y and z coordinates of the point and the search distance:\t");
scanf("%lf %lf %lf %lf", x, y, z, r);
if((*r) <= 0)
break;
else
{
ns.FindNeighbours(xx, yy, zz, bb, N, x, y, z, r);
fh.WriteToFile(xx, yy, zz, bb, N);
}
}
cudaFree(xx);
cudaFree(yy);
cudaFree(zz);
cudaFree(bb);
cudaFree(N);
cudaFree(x);
cudaFree(y);
cudaFree(z);
cudaFree(r);
printf("Program terminated.\n");
return 0;
}
|
9825e06dd75711802246251a84d51fbdb594b06c.hip | // !!! This is a file automatically generated by hipify!!!
#include "stdio.h"
#include <hip/hip_runtime.h>
#include "../../../CudaHelper.h"
const unsigned int DATE_SIZE = 1 << 24; // 8M
const unsigned int BLOCK_SIZE = 1024; // block size
/*
*
* Many Block: every thread exec 1 computation task and all computation task is exec in device
*/
// Kernel function to compute square sum of an int array to a result
__global__ void SquareSum(int *pInputData, int *pResult)
{
const int tid = threadIdx.x +blockDim.x * blockIdx.x;
if(tid < DATE_SIZE)
{
*pResult += pInputData[tid] * pInputData[tid];
}
}
int main(int argv, char* argc[])
{
// Get cuda device count
int iCount;
hipGetDeviceCount(&iCount);
if(0 == iCount)
{
printf("There is no cuda device\n");
return false;
}
// Find the first suitable device
int i;
for (i = 0; i < iCount; i++)
{
hipDeviceProp_t prop;
if(hipGetDeviceProperties(&prop, i) == hipSuccess)
{
// find a prop > CUDA 1.X device and break
if(prop.major >= 1)
{
break;
}
}
}
// can not find a prop > CUDA 1.X device and return false
if(i == iCount)
{
printf("There is no CUDA 1.X device\n");
return false;
}
// Set the suitable device to current
hipSetDevice(i);
// Malloc host data
int *pHostData = (int*)malloc(sizeof(int)*DATE_SIZE);
int hostResult = 0;
if( 0 == pHostData)
{
printf("malloc host data failed!!!\n");
return -1;
}
// Generate 16M rand data range from 0 to 4
for(int i = 0; i < DATE_SIZE; i++)
{
pHostData[i] = rand() % 5;
}
// Malloc device data
int *pDeviceData = NULL;
int *pDeviceResult = NULL;
HANDLE_CUDA_ERROR(hipMalloc((void**)&pDeviceData, sizeof(int) * DATE_SIZE));
HANDLE_CUDA_ERROR(hipMalloc((void**)&pDeviceResult, sizeof(int)));
printf("\nGPU COMPUTE BEGIN********************\n");
// Record total time elapsed via GPU
TIME_TRACE_CUDA_EVENT_START(TotalElpasedTimeViaGPU);
// Copy host data to device
TIME_TRACE_CUDA_EVENT_START(hipMemcpyHostToDevice);
HANDLE_CUDA_ERROR(hipMemcpy(pDeviceData, pHostData, sizeof(int) * DATE_SIZE, hipMemcpyHostToDevice));
TIME_TRACE_CUDA_EVENT_STOP(hipMemcpyHostToDevice);
// Execute Kernel
TIME_TRACE_CUDA_EVENT_START(SqureSumKernel);
unsigned int GRID_SIZE = (DATE_SIZE + BLOCK_SIZE - 1)/BLOCK_SIZE;
hipLaunchKernelGGL(( SquareSum), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, pDeviceData, pDeviceResult);
hipError_t err = hipGetLastError();
if(err != hipSuccess)
{
printf("%s\n", hipGetErrorString(err));
}
TIME_TRACE_CUDA_EVENT_STOP(SqureSumKernel);
// Copy result from device
TIME_TRACE_CUDA_EVENT_START(hipMemcpyDeviceToHost);
HANDLE_CUDA_ERROR(hipMemcpy(&hostResult, pDeviceResult, sizeof(int), hipMemcpyDeviceToHost));
TIME_TRACE_CUDA_EVENT_STOP(hipMemcpyDeviceToHost);
TIME_TRACE_CUDA_EVENT_STOP(TotalElpasedTimeViaGPU);
// Free device memory
HANDLE_CUDA_ERROR(hipFree(pDeviceData));
HANDLE_CUDA_ERROR(hipFree(pDeviceResult));
// Print result
printf("Square Sum Computed Via Result GPU is %d.\n", hostResult);
// hipDeviceReset to ensure Visual Profile run correctly
HANDLE_CUDA_ERROR(hipDeviceReset());
printf("\nGPU COMPUTE END********************\n");
printf("\nCPU COMPUTE BEGIN********************\n");
// Compute in CPU for comparision
hostResult = 0;
TIME_TRACE_CPU_START(TotalElpasedTimeViaCPU);
for (int i = 0 ; i < DATE_SIZE; i++)
{
hostResult += pHostData[i] * pHostData[i];
}
TIME_TRACE_CPU_STOP(TotalElpasedTimeViaCPU);
// Free host memory
free(pHostData); pHostData = NULL;
// Print result
printf("Square Sum Computed Result Via CPU is %d.\n", hostResult);
printf("\nCPU COMPUTE END********************\n");
return 0;
}
| 9825e06dd75711802246251a84d51fbdb594b06c.cu | #include "stdio.h"
#include <cuda_runtime.h>
#include "../../../CudaHelper.h"
const unsigned int DATE_SIZE = 1 << 24; // 8M
const unsigned int BLOCK_SIZE = 1024; // block size
/*
*
* Many Block: every thread exec 1 computation task and all computation task is exec in device
*/
// Kernel function to compute square sum of an int array to a result
__global__ void SquareSum(int *pInputData, int *pResult)
{
const int tid = threadIdx.x +blockDim.x * blockIdx.x;
if(tid < DATE_SIZE)
{
*pResult += pInputData[tid] * pInputData[tid];
}
}
int main(int argv, char* argc[])
{
// Get cuda device count
int iCount;
cudaGetDeviceCount(&iCount);
if(0 == iCount)
{
printf("There is no cuda device\n");
return false;
}
// Find the first suitable device
int i;
for (i = 0; i < iCount; i++)
{
cudaDeviceProp prop;
if(cudaGetDeviceProperties(&prop, i) == cudaSuccess)
{
// find a prop > CUDA 1.X device and break
if(prop.major >= 1)
{
break;
}
}
}
// can not find a prop > CUDA 1.X device and return false
if(i == iCount)
{
printf("There is no CUDA 1.X device\n");
return false;
}
// Set the suitable device to current
cudaSetDevice(i);
// Malloc host data
int *pHostData = (int*)malloc(sizeof(int)*DATE_SIZE);
int hostResult = 0;
if( 0 == pHostData)
{
printf("malloc host data failed!!!\n");
return -1;
}
// Generate 16M rand data range from 0 to 4
for(int i = 0; i < DATE_SIZE; i++)
{
pHostData[i] = rand() % 5;
}
// Malloc device data
int *pDeviceData = NULL;
int *pDeviceResult = NULL;
HANDLE_CUDA_ERROR(cudaMalloc((void**)&pDeviceData, sizeof(int) * DATE_SIZE));
HANDLE_CUDA_ERROR(cudaMalloc((void**)&pDeviceResult, sizeof(int)));
printf("\nGPU COMPUTE BEGIN********************\n");
// Record total time elapsed via GPU
TIME_TRACE_CUDA_EVENT_START(TotalElpasedTimeViaGPU);
// Copy host data to device
TIME_TRACE_CUDA_EVENT_START(cudaMemcpyHostToDevice);
HANDLE_CUDA_ERROR(cudaMemcpy(pDeviceData, pHostData, sizeof(int) * DATE_SIZE, cudaMemcpyHostToDevice));
TIME_TRACE_CUDA_EVENT_STOP(cudaMemcpyHostToDevice);
// Execute Kernel
TIME_TRACE_CUDA_EVENT_START(SqureSumKernel);
unsigned int GRID_SIZE = (DATE_SIZE + BLOCK_SIZE - 1)/BLOCK_SIZE;
SquareSum<<<GRID_SIZE, BLOCK_SIZE>>>(pDeviceData, pDeviceResult);
cudaError_t err = cudaGetLastError();
if(err != cudaSuccess)
{
printf("%s\n", cudaGetErrorString(err));
}
TIME_TRACE_CUDA_EVENT_STOP(SqureSumKernel);
// Copy result from device
TIME_TRACE_CUDA_EVENT_START(cudaMemcpyDeviceToHost);
HANDLE_CUDA_ERROR(cudaMemcpy(&hostResult, pDeviceResult, sizeof(int), cudaMemcpyDeviceToHost));
TIME_TRACE_CUDA_EVENT_STOP(cudaMemcpyDeviceToHost);
TIME_TRACE_CUDA_EVENT_STOP(TotalElpasedTimeViaGPU);
// Free device memory
HANDLE_CUDA_ERROR(cudaFree(pDeviceData));
HANDLE_CUDA_ERROR(cudaFree(pDeviceResult));
// Print result
printf("Square Sum Computed Via Result GPU is %d.\n", hostResult);
// cudaDeviceReset to ensure Visual Profile run correctly
HANDLE_CUDA_ERROR(cudaDeviceReset());
printf("\nGPU COMPUTE END********************\n");
printf("\nCPU COMPUTE BEGIN********************\n");
// Compute in CPU for comparision
hostResult = 0;
TIME_TRACE_CPU_START(TotalElpasedTimeViaCPU);
for (int i = 0 ; i < DATE_SIZE; i++)
{
hostResult += pHostData[i] * pHostData[i];
}
TIME_TRACE_CPU_STOP(TotalElpasedTimeViaCPU);
// Free host memory
free(pHostData); pHostData = NULL;
// Print result
printf("Square Sum Computed Result Via CPU is %d.\n", hostResult);
printf("\nCPU COMPUTE END********************\n");
return 0;
}
|
b13ad6171c033b49d437e17ec9c068b31d619134.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2016 by Contributors
* \file optimizer_op.cu
* \brief Optimizer operators
* \author Junyuan Xie
*/
#include "./optimizer_op-inl.h"
#include <hipcub/hipcub.hpp>
namespace mxnet {
namespace op {
template<int req>
struct SGDMomStdDnsRspDnsKernel<req, gpu> {
template<typename DType, typename IType, typename RType>
MSHADOW_XINLINE static void Map(int i, index_t row_length, DType* out_data,
DType* mom_data, const DType* weight_data, const IType* grad_idx,
const DType* grad_data, const RType* prefix_sum, const DType clip_gradient,
const DType momentum, const DType lr, const DType wd, const DType rescale_grad) {
using nnvm::dim_t;
const DType rate = lr * wd;
const dim_t row_id = i / row_length;
const dim_t col_id = i % row_length;
const dim_t nnr = prefix_sum[row_id];
const bool non_zero = (row_id == 0) ? prefix_sum[0] > 0
: nnr > prefix_sum[row_id - 1];
const RType grad_i = (nnr - 1) * row_length + col_id;
const DType grad = non_zero ? grad_data[grad_i]
: static_cast<DType>(0);
if (clip_gradient >= 0.0f) {
mom_data[i] = momentum * mom_data[i]
- rate * weight_data[i]
- lr * mshadow_op::clip::Map(rescale_grad * grad, clip_gradient);
} else {
mom_data[i] = momentum * mom_data[i]
- rate * weight_data[i] - lr * rescale_grad * grad;
}
KERNEL_ASSIGN(out_data[i], req, weight_data[i] + mom_data[i]);
}
};
template<>
void SGDMomStdUpdateDnsRspDnsImpl<gpu>(const SGDMomParam& param,
const OpContext& ctx,
const TBlob& weight,
const NDArray& grad,
const TBlob& mom,
const OpReqType& req,
TBlob *out) {
using namespace mxnet_op;
using namespace rowsparse;
using namespace mshadow;
Stream<gpu>* s = ctx.get_stream<gpu>();
if (req == kNullOp) return;
CHECK_EQ(req, kWriteInplace) << "kWriteInplace is expected for sparse sgd_mom_update";
CHECK_GT(weight.shape_.Size(), 0);
CHECK_GT(mom.shape_.Size(), 0);
MSHADOW_REAL_TYPE_SWITCH(weight.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(grad.aux_type(kIdx), IType, {
MXNET_ASSIGN_REQ_SWITCH(req, req_type, {
DType* weight_data = weight.dptr<DType>();
IType* grad_idx = grad.aux_data(kIdx).dptr<IType>();
DType* grad_val = grad.data().dptr<DType>();
DType* mom_data = mom.dptr<DType>();
DType* out_data = out->dptr<DType>();
nnvm::dim_t num_rows = weight.shape_[0];
nnvm::dim_t row_length = weight.shape_.ProdShape(1, weight.ndim());
nnvm::dim_t* prefix_sum = NULL;
void* d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
hipcub::DeviceScan::InclusiveSum(d_temp_storage,
temp_storage_bytes,
prefix_sum,
prefix_sum,
num_rows,
Stream<gpu>::GetStream(s));
Tensor<gpu, 1, char> workspace = ctx.requested[0]
.get_space_typed<gpu, 1, char>(Shape1(num_rows * sizeof(nnvm::dim_t) +
temp_storage_bytes), s);
prefix_sum = reinterpret_cast<nnvm::dim_t*>(workspace.dptr_);
d_temp_storage = workspace.dptr_ + num_rows*sizeof(nnvm::dim_t);
// mark row flags
Fill<false>(s, TBlob(prefix_sum, Shape1(num_rows), gpu::kDevMask), kWriteTo, 0);
if (grad.storage_initialized()) {
Kernel<MarkRowFlgKernel, gpu>::Launch(s, grad.aux_shape(kIdx)[0],
prefix_sum, grad_idx);
// calculate inclusive prefix sum
hipcub::DeviceScan::InclusiveSum(d_temp_storage,
temp_storage_bytes,
prefix_sum,
prefix_sum,
num_rows,
mshadow::Stream<gpu>::GetStream(s));
}
size_t num_threads = num_rows * row_length;
Kernel<SGDMomStdDnsRspDnsKernel<req_type, gpu>, gpu>::Launch(s, num_threads, row_length,
out_data, mom_data, weight_data, grad_idx, grad_val, prefix_sum,
static_cast<DType>(param.clip_gradient), static_cast<DType>(param.momentum),
static_cast<DType>(param.lr), static_cast<DType>(param.wd),
static_cast<DType>(param.rescale_grad));
});
});
});
}
template<int req>
struct AdamStdDnsRspDnsKernel<req, gpu> {
template<typename DType, typename IType, typename RType>
MSHADOW_XINLINE static void Map(int i, const nnvm::dim_t row_length, DType* out_data,
DType* mean_data, DType* var_data, const DType* weight_data, const IType* grad_idx,
const DType* grad_data, const RType* prefix_sum, const DType clip_gradient,
const DType beta1, const DType beta2, const DType lr, const DType wd,
const DType epsilon, const DType rescale_grad) {
using namespace mshadow_op;
using nnvm::dim_t;
const dim_t row_id = i / row_length;
const dim_t col_id = i % row_length;
const bool non_zero = (row_id == 0) ? prefix_sum[0] > 0
: prefix_sum[row_id] > prefix_sum[row_id - 1];
const RType grad_offset = (prefix_sum[row_id] - 1) * row_length + col_id;
DType grad_rescaled = non_zero ? static_cast<DType>(grad_data[grad_offset] * rescale_grad
+ weight_data[i] * wd)
: static_cast<DType>(weight_data[i] * wd);
if (clip_gradient >= 0.0f) {
grad_rescaled = clip::Map(grad_rescaled, clip_gradient);
}
mean_data[i] = beta1 * mean_data[i] + (1.f - beta1) * grad_rescaled;
var_data[i] = beta2 * var_data[i] +
(1.f - beta2) * square::Map(grad_rescaled);
KERNEL_ASSIGN(out_data[i], req, weight_data[i] - lr * mean_data[i] /
(square_root::Map(var_data[i]) + epsilon));
}
};
template<>
void AdamStdUpdateDnsRspDnsImpl<gpu>(const AdamParam& param,
const OpContext& ctx,
const TBlob& weight,
const NDArray& grad,
const TBlob& mean,
const TBlob& var,
const OpReqType& req,
TBlob *out) {
using namespace mxnet_op;
using namespace rowsparse;
using namespace mshadow;
Stream<gpu>* s = ctx.get_stream<gpu>();
if (req == kNullOp) return;
CHECK_EQ(req, kWriteInplace) << "kWriteInplace is expected for sparse adam_update";
CHECK_GT(weight.shape_.Size(), 0);
CHECK_GT(mean.shape_.Size(), 0);
CHECK_GT(var.shape_.Size(), 0);
MSHADOW_REAL_TYPE_SWITCH(weight.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(grad.aux_type(kIdx), IType, {
MXNET_ASSIGN_REQ_SWITCH(req, req_type, {
const DType* weight_data = weight.dptr<DType>();
const IType* grad_idx = grad.aux_data(kIdx).dptr<IType>();
const DType* grad_val = grad.data().dptr<DType>();
DType* mean_data = mean.dptr<DType>();
DType* var_data = var.dptr<DType>();
DType* out_data = out->dptr<DType>();
const nnvm::dim_t num_rows = weight.shape_[0];
const nnvm::dim_t row_length = weight.shape_.ProdShape(1, weight.ndim());
nnvm::dim_t* prefix_sum = NULL;
void* d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
hipcub::DeviceScan::InclusiveSum(d_temp_storage,
temp_storage_bytes,
prefix_sum,
prefix_sum,
num_rows,
Stream<gpu>::GetStream(s));
Tensor<gpu, 1, char> workspace = ctx.requested[0]
.get_space_typed<gpu, 1, char>(Shape1(num_rows * sizeof(nnvm::dim_t) +
temp_storage_bytes), s);
prefix_sum = reinterpret_cast<nnvm::dim_t*>(workspace.dptr_);
d_temp_storage = workspace.dptr_ + num_rows*sizeof(nnvm::dim_t);
// mark row flags
Fill<false>(s, TBlob(prefix_sum, Shape1(num_rows), gpu::kDevMask), kWriteTo, 0);
if (grad.storage_initialized()) {
Kernel<MarkRowFlgKernel, gpu>::Launch(s, grad.aux_shape(kIdx)[0],
prefix_sum, grad_idx);
// calculate inclusive prefix sum
hipcub::DeviceScan::InclusiveSum(d_temp_storage,
temp_storage_bytes,
prefix_sum,
prefix_sum,
num_rows,
Stream<gpu>::GetStream(s));
}
Kernel<AdamStdDnsRspDnsKernel<req_type, gpu>, gpu>::Launch(s, weight.shape_.Size(),
row_length, out_data, mean_data, var_data, weight_data, grad_idx, grad_val, prefix_sum,
static_cast<DType>(param.clip_gradient), static_cast<DType>(param.beta1),
static_cast<DType>(param.beta2), static_cast<DType>(param.lr),
static_cast<DType>(param.wd), static_cast<DType>(param.epsilon),
static_cast<DType>(param.rescale_grad));
});
});
});
}
NNVM_REGISTER_OP(signsgd_update)
.set_attr<FCompute>("FCompute<gpu>", SignSGDUpdate<gpu>);
NNVM_REGISTER_OP(signum_update)
.set_attr<FCompute>("FCompute<gpu>", SignumUpdate<gpu>);
NNVM_REGISTER_OP(sgd_update)
.set_attr<FCompute>("FCompute<gpu>", SGDUpdate<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", SGDUpdateEx<gpu>);
NNVM_REGISTER_OP(sgd_mom_update)
.set_attr<FCompute>("FCompute<gpu>", SGDMomUpdate<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", SGDMomUpdateEx<gpu>);
NNVM_REGISTER_OP(mp_sgd_update)
.set_attr<FCompute>("FCompute<gpu>", MP_SGDUpdate<gpu>);
NNVM_REGISTER_OP(mp_sgd_mom_update)
.set_attr<FCompute>("FCompute<gpu>", MP_SGDMomUpdate<gpu>);
NNVM_REGISTER_OP(multi_sgd_update)
.set_attr<FCompute>("FCompute<gpu>", MultiSGDUpdate<gpu, type_identity, 2>);
NNVM_REGISTER_OP(multi_sgd_mom_update)
.set_attr<FCompute>("FCompute<gpu>", MultiSGDMomUpdate<gpu, type_identity, 3>);
NNVM_REGISTER_OP(multi_mp_sgd_update)
.set_attr<FCompute>("FCompute<gpu>", MultiSGDUpdate<gpu, single_precision, 3>);
NNVM_REGISTER_OP(multi_mp_sgd_mom_update)
.set_attr<FCompute>("FCompute<gpu>", MultiSGDMomUpdate<gpu, single_precision, 4>);
NNVM_REGISTER_OP(ftml_update)
.set_attr<FCompute>("FCompute<gpu>", FTMLUpdate<gpu>);
NNVM_REGISTER_OP(adam_update)
.set_attr<FCompute>("FCompute<gpu>", AdamUpdate<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", AdamUpdateEx<gpu>);
NNVM_REGISTER_OP(rmsprop_update)
.set_attr<FCompute>("FCompute<gpu>", RMSPropUpdate<gpu>);
NNVM_REGISTER_OP(rmspropalex_update)
.set_attr<FCompute>("FCompute<gpu>", RMSPropAlexUpdate<gpu>);
NNVM_REGISTER_OP(ftrl_update)
.set_attr<FCompute>("FCompute<gpu>", FtrlUpdate<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", FtrlUpdateEx<gpu>);
NNVM_REGISTER_OP(_sparse_adagrad_update)
.set_attr<FComputeEx>("FComputeEx<gpu>", AdagradUpdateEx<gpu>);
} // namespace op
} // namespace mxnet
| b13ad6171c033b49d437e17ec9c068b31d619134.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2016 by Contributors
* \file optimizer_op.cu
* \brief Optimizer operators
* \author Junyuan Xie
*/
#include "./optimizer_op-inl.h"
#include <cub/cub.cuh>
namespace mxnet {
namespace op {
template<int req>
struct SGDMomStdDnsRspDnsKernel<req, gpu> {
template<typename DType, typename IType, typename RType>
MSHADOW_XINLINE static void Map(int i, index_t row_length, DType* out_data,
DType* mom_data, const DType* weight_data, const IType* grad_idx,
const DType* grad_data, const RType* prefix_sum, const DType clip_gradient,
const DType momentum, const DType lr, const DType wd, const DType rescale_grad) {
using nnvm::dim_t;
const DType rate = lr * wd;
const dim_t row_id = i / row_length;
const dim_t col_id = i % row_length;
const dim_t nnr = prefix_sum[row_id];
const bool non_zero = (row_id == 0) ? prefix_sum[0] > 0
: nnr > prefix_sum[row_id - 1];
const RType grad_i = (nnr - 1) * row_length + col_id;
const DType grad = non_zero ? grad_data[grad_i]
: static_cast<DType>(0);
if (clip_gradient >= 0.0f) {
mom_data[i] = momentum * mom_data[i]
- rate * weight_data[i]
- lr * mshadow_op::clip::Map(rescale_grad * grad, clip_gradient);
} else {
mom_data[i] = momentum * mom_data[i]
- rate * weight_data[i] - lr * rescale_grad * grad;
}
KERNEL_ASSIGN(out_data[i], req, weight_data[i] + mom_data[i]);
}
};
template<>
void SGDMomStdUpdateDnsRspDnsImpl<gpu>(const SGDMomParam& param,
const OpContext& ctx,
const TBlob& weight,
const NDArray& grad,
const TBlob& mom,
const OpReqType& req,
TBlob *out) {
using namespace mxnet_op;
using namespace rowsparse;
using namespace mshadow;
Stream<gpu>* s = ctx.get_stream<gpu>();
if (req == kNullOp) return;
CHECK_EQ(req, kWriteInplace) << "kWriteInplace is expected for sparse sgd_mom_update";
CHECK_GT(weight.shape_.Size(), 0);
CHECK_GT(mom.shape_.Size(), 0);
MSHADOW_REAL_TYPE_SWITCH(weight.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(grad.aux_type(kIdx), IType, {
MXNET_ASSIGN_REQ_SWITCH(req, req_type, {
DType* weight_data = weight.dptr<DType>();
IType* grad_idx = grad.aux_data(kIdx).dptr<IType>();
DType* grad_val = grad.data().dptr<DType>();
DType* mom_data = mom.dptr<DType>();
DType* out_data = out->dptr<DType>();
nnvm::dim_t num_rows = weight.shape_[0];
nnvm::dim_t row_length = weight.shape_.ProdShape(1, weight.ndim());
nnvm::dim_t* prefix_sum = NULL;
void* d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
cub::DeviceScan::InclusiveSum(d_temp_storage,
temp_storage_bytes,
prefix_sum,
prefix_sum,
num_rows,
Stream<gpu>::GetStream(s));
Tensor<gpu, 1, char> workspace = ctx.requested[0]
.get_space_typed<gpu, 1, char>(Shape1(num_rows * sizeof(nnvm::dim_t) +
temp_storage_bytes), s);
prefix_sum = reinterpret_cast<nnvm::dim_t*>(workspace.dptr_);
d_temp_storage = workspace.dptr_ + num_rows*sizeof(nnvm::dim_t);
// mark row flags
Fill<false>(s, TBlob(prefix_sum, Shape1(num_rows), gpu::kDevMask), kWriteTo, 0);
if (grad.storage_initialized()) {
Kernel<MarkRowFlgKernel, gpu>::Launch(s, grad.aux_shape(kIdx)[0],
prefix_sum, grad_idx);
// calculate inclusive prefix sum
cub::DeviceScan::InclusiveSum(d_temp_storage,
temp_storage_bytes,
prefix_sum,
prefix_sum,
num_rows,
mshadow::Stream<gpu>::GetStream(s));
}
size_t num_threads = num_rows * row_length;
Kernel<SGDMomStdDnsRspDnsKernel<req_type, gpu>, gpu>::Launch(s, num_threads, row_length,
out_data, mom_data, weight_data, grad_idx, grad_val, prefix_sum,
static_cast<DType>(param.clip_gradient), static_cast<DType>(param.momentum),
static_cast<DType>(param.lr), static_cast<DType>(param.wd),
static_cast<DType>(param.rescale_grad));
});
});
});
}
template<int req>
struct AdamStdDnsRspDnsKernel<req, gpu> {
template<typename DType, typename IType, typename RType>
MSHADOW_XINLINE static void Map(int i, const nnvm::dim_t row_length, DType* out_data,
DType* mean_data, DType* var_data, const DType* weight_data, const IType* grad_idx,
const DType* grad_data, const RType* prefix_sum, const DType clip_gradient,
const DType beta1, const DType beta2, const DType lr, const DType wd,
const DType epsilon, const DType rescale_grad) {
using namespace mshadow_op;
using nnvm::dim_t;
const dim_t row_id = i / row_length;
const dim_t col_id = i % row_length;
const bool non_zero = (row_id == 0) ? prefix_sum[0] > 0
: prefix_sum[row_id] > prefix_sum[row_id - 1];
const RType grad_offset = (prefix_sum[row_id] - 1) * row_length + col_id;
DType grad_rescaled = non_zero ? static_cast<DType>(grad_data[grad_offset] * rescale_grad
+ weight_data[i] * wd)
: static_cast<DType>(weight_data[i] * wd);
if (clip_gradient >= 0.0f) {
grad_rescaled = clip::Map(grad_rescaled, clip_gradient);
}
mean_data[i] = beta1 * mean_data[i] + (1.f - beta1) * grad_rescaled;
var_data[i] = beta2 * var_data[i] +
(1.f - beta2) * square::Map(grad_rescaled);
KERNEL_ASSIGN(out_data[i], req, weight_data[i] - lr * mean_data[i] /
(square_root::Map(var_data[i]) + epsilon));
}
};
template<>
void AdamStdUpdateDnsRspDnsImpl<gpu>(const AdamParam& param,
const OpContext& ctx,
const TBlob& weight,
const NDArray& grad,
const TBlob& mean,
const TBlob& var,
const OpReqType& req,
TBlob *out) {
using namespace mxnet_op;
using namespace rowsparse;
using namespace mshadow;
Stream<gpu>* s = ctx.get_stream<gpu>();
if (req == kNullOp) return;
CHECK_EQ(req, kWriteInplace) << "kWriteInplace is expected for sparse adam_update";
CHECK_GT(weight.shape_.Size(), 0);
CHECK_GT(mean.shape_.Size(), 0);
CHECK_GT(var.shape_.Size(), 0);
MSHADOW_REAL_TYPE_SWITCH(weight.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(grad.aux_type(kIdx), IType, {
MXNET_ASSIGN_REQ_SWITCH(req, req_type, {
const DType* weight_data = weight.dptr<DType>();
const IType* grad_idx = grad.aux_data(kIdx).dptr<IType>();
const DType* grad_val = grad.data().dptr<DType>();
DType* mean_data = mean.dptr<DType>();
DType* var_data = var.dptr<DType>();
DType* out_data = out->dptr<DType>();
const nnvm::dim_t num_rows = weight.shape_[0];
const nnvm::dim_t row_length = weight.shape_.ProdShape(1, weight.ndim());
nnvm::dim_t* prefix_sum = NULL;
void* d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
cub::DeviceScan::InclusiveSum(d_temp_storage,
temp_storage_bytes,
prefix_sum,
prefix_sum,
num_rows,
Stream<gpu>::GetStream(s));
Tensor<gpu, 1, char> workspace = ctx.requested[0]
.get_space_typed<gpu, 1, char>(Shape1(num_rows * sizeof(nnvm::dim_t) +
temp_storage_bytes), s);
prefix_sum = reinterpret_cast<nnvm::dim_t*>(workspace.dptr_);
d_temp_storage = workspace.dptr_ + num_rows*sizeof(nnvm::dim_t);
// mark row flags
Fill<false>(s, TBlob(prefix_sum, Shape1(num_rows), gpu::kDevMask), kWriteTo, 0);
if (grad.storage_initialized()) {
Kernel<MarkRowFlgKernel, gpu>::Launch(s, grad.aux_shape(kIdx)[0],
prefix_sum, grad_idx);
// calculate inclusive prefix sum
cub::DeviceScan::InclusiveSum(d_temp_storage,
temp_storage_bytes,
prefix_sum,
prefix_sum,
num_rows,
Stream<gpu>::GetStream(s));
}
Kernel<AdamStdDnsRspDnsKernel<req_type, gpu>, gpu>::Launch(s, weight.shape_.Size(),
row_length, out_data, mean_data, var_data, weight_data, grad_idx, grad_val, prefix_sum,
static_cast<DType>(param.clip_gradient), static_cast<DType>(param.beta1),
static_cast<DType>(param.beta2), static_cast<DType>(param.lr),
static_cast<DType>(param.wd), static_cast<DType>(param.epsilon),
static_cast<DType>(param.rescale_grad));
});
});
});
}
NNVM_REGISTER_OP(signsgd_update)
.set_attr<FCompute>("FCompute<gpu>", SignSGDUpdate<gpu>);
NNVM_REGISTER_OP(signum_update)
.set_attr<FCompute>("FCompute<gpu>", SignumUpdate<gpu>);
NNVM_REGISTER_OP(sgd_update)
.set_attr<FCompute>("FCompute<gpu>", SGDUpdate<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", SGDUpdateEx<gpu>);
NNVM_REGISTER_OP(sgd_mom_update)
.set_attr<FCompute>("FCompute<gpu>", SGDMomUpdate<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", SGDMomUpdateEx<gpu>);
NNVM_REGISTER_OP(mp_sgd_update)
.set_attr<FCompute>("FCompute<gpu>", MP_SGDUpdate<gpu>);
NNVM_REGISTER_OP(mp_sgd_mom_update)
.set_attr<FCompute>("FCompute<gpu>", MP_SGDMomUpdate<gpu>);
NNVM_REGISTER_OP(multi_sgd_update)
.set_attr<FCompute>("FCompute<gpu>", MultiSGDUpdate<gpu, type_identity, 2>);
NNVM_REGISTER_OP(multi_sgd_mom_update)
.set_attr<FCompute>("FCompute<gpu>", MultiSGDMomUpdate<gpu, type_identity, 3>);
NNVM_REGISTER_OP(multi_mp_sgd_update)
.set_attr<FCompute>("FCompute<gpu>", MultiSGDUpdate<gpu, single_precision, 3>);
NNVM_REGISTER_OP(multi_mp_sgd_mom_update)
.set_attr<FCompute>("FCompute<gpu>", MultiSGDMomUpdate<gpu, single_precision, 4>);
NNVM_REGISTER_OP(ftml_update)
.set_attr<FCompute>("FCompute<gpu>", FTMLUpdate<gpu>);
NNVM_REGISTER_OP(adam_update)
.set_attr<FCompute>("FCompute<gpu>", AdamUpdate<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", AdamUpdateEx<gpu>);
NNVM_REGISTER_OP(rmsprop_update)
.set_attr<FCompute>("FCompute<gpu>", RMSPropUpdate<gpu>);
NNVM_REGISTER_OP(rmspropalex_update)
.set_attr<FCompute>("FCompute<gpu>", RMSPropAlexUpdate<gpu>);
NNVM_REGISTER_OP(ftrl_update)
.set_attr<FCompute>("FCompute<gpu>", FtrlUpdate<gpu>)
.set_attr<FComputeEx>("FComputeEx<gpu>", FtrlUpdateEx<gpu>);
NNVM_REGISTER_OP(_sparse_adagrad_update)
.set_attr<FComputeEx>("FComputeEx<gpu>", AdagradUpdateEx<gpu>);
} // namespace op
} // namespace mxnet
|
04d5fecda3d930fa17aa8bd773719161f6a60b50.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/manifold/umapparams.h>
#include <cuml/manifold/umap.hpp>
#include "runner.cuh"
#include <iostream>
namespace ML {
static const int TPB_X = 256;
void transform(const raft::handle_t &handle, float *X, int n, int d,
int64_t *knn_indices, float *knn_dists, float *orig_X,
int orig_n, float *embedding, int embedding_n,
UMAPParams *params, float *transformed) {
UMAPAlgo::_transform<float, TPB_X>(handle, X, n, d, knn_indices, knn_dists,
orig_X, orig_n, embedding, embedding_n,
params, transformed);
}
void fit(const raft::handle_t &handle,
float *X, // input matrix
float *y, // labels
int n, int d, int64_t *knn_indices, float *knn_dists,
UMAPParams *params, float *embeddings) {
UMAPAlgo::_fit<float, TPB_X>(handle, X, y, n, d, knn_indices, knn_dists,
params, embeddings);
}
void fit(const raft::handle_t &handle,
float *X, // input matrix
int n, // rows
int d, // cols
int64_t *knn_indices, float *knn_dists, UMAPParams *params,
float *embeddings) {
UMAPAlgo::_fit<float, TPB_X>(handle, X, n, d, knn_indices, knn_dists, params,
embeddings);
}
void find_ab(const raft::handle_t &handle, UMAPParams *params) {
hipStream_t stream = handle.get_stream();
auto d_alloc = handle.get_device_allocator();
UMAPAlgo::find_ab(params, d_alloc, stream);
}
UMAP_API::UMAP_API(const raft::handle_t &handle, UMAPParams *params)
: params(params) {
this->handle = const_cast<raft::handle_t *>(&handle);
orig_X = nullptr;
orig_n = 0;
};
UMAP_API::~UMAP_API() {}
/**
* Fits a UMAP model
* @param X
* pointer to an array in row-major format (note: this will be col-major soon)
* @param n
* n_samples in X
* @param d
* d_features in X
* @param knn_indices
* an array containing the n_neighbors nearest neighors indices for each sample
* @param knn_dists
* an array containing the n_neighbors nearest neighors distances for each sample
* @param embeddings
* an array to return the output embeddings of size (n_samples, n_components)
*/
void UMAP_API::fit(float *X, int n, int d, int64_t *knn_indices,
float *knn_dists, float *embeddings) {
this->orig_X = X;
this->orig_n = n;
UMAPAlgo::_fit<float, TPB_X>(*this->handle, X, n, d, knn_indices, knn_dists,
get_params(), embeddings);
}
void UMAP_API::fit(float *X, float *y, int n, int d, int64_t *knn_indices,
float *knn_dists, float *embeddings) {
this->orig_X = X;
this->orig_n = n;
UMAPAlgo::_fit<float, TPB_X>(*this->handle, X, y, n, d, knn_indices,
knn_dists, get_params(), embeddings);
}
/**
* Project a set of X vectors into the embedding space.
* @param X
* pointer to an array in row-major format (note: this will be col-major soon)
* @param n
* n_samples in X
* @param d
* d_features in X
* @param knn_indices
* an array containing the n_neighbors nearest neighors indices for each sample
* @param knn_dists
* an array containing the n_neighbors nearest neighors distances for each sample
* @param embedding
* pointer to embedding array of size (embedding_n, n_components) that has been created with fit()
* @param embedding_n
* n_samples in embedding array
* @param out
* pointer to array for storing output embeddings (n, n_components)
*/
void UMAP_API::transform(float *X, int n, int d, int64_t *knn_indices,
float *knn_dists, float *embedding, int embedding_n,
float *out) {
UMAPAlgo::_transform<float, TPB_X>(*this->handle, X, n, d, knn_indices,
knn_dists, this->orig_X, this->orig_n,
embedding, embedding_n, get_params(), out);
}
/**
* Get the UMAPParams instance
*/
UMAPParams *UMAP_API::get_params() { return this->params; }
} // namespace ML
| 04d5fecda3d930fa17aa8bd773719161f6a60b50.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuml/manifold/umapparams.h>
#include <cuml/manifold/umap.hpp>
#include "runner.cuh"
#include <iostream>
namespace ML {
static const int TPB_X = 256;
void transform(const raft::handle_t &handle, float *X, int n, int d,
int64_t *knn_indices, float *knn_dists, float *orig_X,
int orig_n, float *embedding, int embedding_n,
UMAPParams *params, float *transformed) {
UMAPAlgo::_transform<float, TPB_X>(handle, X, n, d, knn_indices, knn_dists,
orig_X, orig_n, embedding, embedding_n,
params, transformed);
}
void fit(const raft::handle_t &handle,
float *X, // input matrix
float *y, // labels
int n, int d, int64_t *knn_indices, float *knn_dists,
UMAPParams *params, float *embeddings) {
UMAPAlgo::_fit<float, TPB_X>(handle, X, y, n, d, knn_indices, knn_dists,
params, embeddings);
}
void fit(const raft::handle_t &handle,
float *X, // input matrix
int n, // rows
int d, // cols
int64_t *knn_indices, float *knn_dists, UMAPParams *params,
float *embeddings) {
UMAPAlgo::_fit<float, TPB_X>(handle, X, n, d, knn_indices, knn_dists, params,
embeddings);
}
void find_ab(const raft::handle_t &handle, UMAPParams *params) {
cudaStream_t stream = handle.get_stream();
auto d_alloc = handle.get_device_allocator();
UMAPAlgo::find_ab(params, d_alloc, stream);
}
UMAP_API::UMAP_API(const raft::handle_t &handle, UMAPParams *params)
: params(params) {
this->handle = const_cast<raft::handle_t *>(&handle);
orig_X = nullptr;
orig_n = 0;
};
UMAP_API::~UMAP_API() {}
/**
* Fits a UMAP model
* @param X
* pointer to an array in row-major format (note: this will be col-major soon)
* @param n
* n_samples in X
* @param d
* d_features in X
* @param knn_indices
* an array containing the n_neighbors nearest neighors indices for each sample
* @param knn_dists
* an array containing the n_neighbors nearest neighors distances for each sample
* @param embeddings
* an array to return the output embeddings of size (n_samples, n_components)
*/
void UMAP_API::fit(float *X, int n, int d, int64_t *knn_indices,
float *knn_dists, float *embeddings) {
this->orig_X = X;
this->orig_n = n;
UMAPAlgo::_fit<float, TPB_X>(*this->handle, X, n, d, knn_indices, knn_dists,
get_params(), embeddings);
}
void UMAP_API::fit(float *X, float *y, int n, int d, int64_t *knn_indices,
float *knn_dists, float *embeddings) {
this->orig_X = X;
this->orig_n = n;
UMAPAlgo::_fit<float, TPB_X>(*this->handle, X, y, n, d, knn_indices,
knn_dists, get_params(), embeddings);
}
/**
* Project a set of X vectors into the embedding space.
* @param X
* pointer to an array in row-major format (note: this will be col-major soon)
* @param n
* n_samples in X
* @param d
* d_features in X
* @param knn_indices
* an array containing the n_neighbors nearest neighors indices for each sample
* @param knn_dists
* an array containing the n_neighbors nearest neighors distances for each sample
* @param embedding
* pointer to embedding array of size (embedding_n, n_components) that has been created with fit()
* @param embedding_n
* n_samples in embedding array
* @param out
* pointer to array for storing output embeddings (n, n_components)
*/
void UMAP_API::transform(float *X, int n, int d, int64_t *knn_indices,
float *knn_dists, float *embedding, int embedding_n,
float *out) {
UMAPAlgo::_transform<float, TPB_X>(*this->handle, X, n, d, knn_indices,
knn_dists, this->orig_X, this->orig_n,
embedding, embedding_n, get_params(), out);
}
/**
* Get the UMAPParams instance
*/
UMAPParams *UMAP_API::get_params() { return this->params; }
} // namespace ML
|
8cc6bb17d6911ae0b2a37e51b1962ac21285d97f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
* Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
*
* Code and text by Sean Baxter, NVIDIA Research
* See http://nvlabs.github.io/moderngpu for repository and documentation.
*
******************************************************************************/
#include "util/mgpucontext.h"
namespace mgpu {
////////////////////////////////////////////////////////////////////////////////
// CudaTimer
void CudaTimer::Start() {
hipEventRecord(start);
hipDeviceSynchronize();
}
double CudaTimer::Split() {
hipEventRecord(end);
hipDeviceSynchronize();
float t;
hipEventElapsedTime(&t, start, end);
start.Swap(end);
//return (t / 1000.0);
return t;
}
double CudaTimer::Throughput(int count, int numIterations) {
double elapsed = Split();
return (double)numIterations * count / elapsed;
}
////////////////////////////////////////////////////////////////////////////////
// CudaDevice
__global__ void KernelVersionShim() { }
struct DeviceGroup {
int numCudaDevices;
CudaDevice** cudaDevices;
DeviceGroup() {
numCudaDevices = -1;
cudaDevices = 0;
}
int GetDeviceCount() {
if(-1 == numCudaDevices) {
hipError_t error = hipGetDeviceCount(&numCudaDevices);
if(hipSuccess != error || numCudaDevices <= 0) {
fprintf(stderr, "ERROR ENUMERATING CUDA DEVICES.\nExiting.\n");
exit(0);
}
cudaDevices = new CudaDevice*[numCudaDevices];
memset(cudaDevices, 0, sizeof(CudaDevice*) * numCudaDevices);
}
return numCudaDevices;
}
CudaDevice* GetByOrdinal(int ordinal) {
if(ordinal >= GetDeviceCount()) return 0;
if(!cudaDevices[ordinal]) {
// Retrieve the device properties.
CudaDevice* device = cudaDevices[ordinal] = new CudaDevice;
device->_ordinal = ordinal;
hipError_t error = hipGetDeviceProperties(&device->_prop,
ordinal);
if(hipSuccess != error) {
fprintf(stderr, "FAILURE TO CREATE CUDA DEVICE %d\n", ordinal);
exit(0);
}
// Get the compiler version for this device.
hipSetDevice(ordinal);
hipFuncAttributes attr;
error = hipFuncGetAttributes(&attr, KernelVersionShim);
if(hipSuccess == error)
device->_ptxVersion = 10 * attr.ptxVersion;
else {
printf("NOT COMPILED WITH COMPATIBLE PTX VERSION FOR DEVICE"
" %d\n", ordinal);
// The module wasn't compiled with support for this device.
device->_ptxVersion = 0;
}
}
return cudaDevices[ordinal];
}
~DeviceGroup() {
if(cudaDevices) {
for(int i = 0; i < numCudaDevices; ++i)
delete cudaDevices[i];
delete [] cudaDevices;
}
hipDeviceReset();
}
};
std::auto_ptr<DeviceGroup> deviceGroup;
int CudaDevice::DeviceCount() {
if(!deviceGroup.get())
deviceGroup.reset(new DeviceGroup);
return deviceGroup->GetDeviceCount();
}
CudaDevice& CudaDevice::ByOrdinal(int ordinal) {
if(ordinal < 0 || ordinal >= DeviceCount()) {
fprintf(stderr, "CODE REQUESTED INVALID CUDA DEVICE %d\n", ordinal);
exit(0);
}
return *deviceGroup->GetByOrdinal(ordinal);
}
CudaDevice& CudaDevice::Selected() {
int ordinal;
hipError_t error = hipGetDevice(&ordinal);
if(hipSuccess != error) {
fprintf(stderr, "ERROR RETRIEVING CUDA DEVICE ORDINAL\n");
exit(0);
}
return ByOrdinal(ordinal);
}
void CudaDevice::SetActive() {
hipError_t error = hipSetDevice(_ordinal);
if(hipSuccess != error) {
fprintf(stderr, "ERROR SETTING CUDA DEVICE TO ORDINAL %d\n", _ordinal);
exit(0);
}
}
std::string CudaDevice::DeviceString() const {
size_t freeMem, totalMem;
hipError_t error = hipMemGetInfo(&freeMem, &totalMem);
if(hipSuccess != error) {
fprintf(stderr, "ERROR RETRIEVING MEM INFO FOR CUDA DEVICE %d\n",
_ordinal);
exit(0);
}
double memBandwidth = (_prop.memoryClockRate * 1000.0) *
(_prop.memoryBusWidth / 8 * 2) / 1.0e9;
std::string s = stringprintf(
"%s : %8.3lf Mhz (Ordinal %d)\n"
"%d SMs enabled. Compute Capability sm_%d%d\n"
"FreeMem: %6dMB TotalMem: %6dMB %2d-bit pointers.\n"
"Mem Clock: %8.3lf Mhz x %d bits (%5.1lf GB/s)\n"
"ECC %s\n\n",
_prop.name, _prop.clockRate / 1000.0, _ordinal,
_prop.multiProcessorCount, _prop.major, _prop.minor,
(int)(freeMem / (1<< 20)), (int)(totalMem / (1<< 20)), 8 * sizeof(int*),
_prop.memoryClockRate / 1000.0, _prop.memoryBusWidth, memBandwidth,
_prop.ECCEnabled ? "Enabled" : "Disabled");
return s;
}
////////////////////////////////////////////////////////////////////////////////
// CudaContext
struct ContextGroup {
CudaContext** standardContexts;
int numDevices;
ContextGroup() {
numDevices = CudaDevice::DeviceCount();
standardContexts = new CudaContext*[numDevices];
memset(standardContexts, 0, sizeof(CudaContext*) * numDevices);
}
CudaContext* GetByOrdinal(int ordinal) {
if(!standardContexts[ordinal]) {
CudaDevice& device = CudaDevice::ByOrdinal(ordinal);
standardContexts[ordinal] = new CudaContext(device, false, true);
}
return standardContexts[ordinal];
}
~ContextGroup() {
if(standardContexts) {
for(int i = 0; i < numDevices; ++i)
delete standardContexts[i];
delete [] standardContexts;
}
}
};
std::auto_ptr<ContextGroup> contextGroup;
CudaContext::CudaContext(CudaDevice& device, bool newStream, bool standard) :
_event(hipEventDisableTiming /*| hipEventBlockingSync */),
_stream(0), _noRefCount(standard), _pageLocked(0) {
// Create an allocator.
if(standard)
_alloc.reset(new CudaAllocSimple(device));
else
_alloc = CreateDefaultAlloc(device);
if(newStream) hipStreamCreate(&_stream);
_ownStream = newStream;
// Allocate 4KB of page-locked memory.
hipError_t error = hipHostMalloc((void**)&_pageLocked, 4096);
// Allocate an auxiliary stream.
error = hipStreamCreate(&_auxStream);
}
CudaContext::~CudaContext() {
if(_pageLocked)
hipHostFree(_pageLocked);
if(_ownStream && _stream)
hipStreamDestroy(_stream);
if(_auxStream)
hipStreamDestroy(_auxStream);
}
AllocPtr CudaContext::CreateDefaultAlloc(CudaDevice& device) {
intrusive_ptr<CudaAllocBuckets> alloc(new CudaAllocBuckets(device));
size_t freeMem, totalMem;
hipError_t error = hipMemGetInfo(&freeMem, &totalMem);
if(hipSuccess != error) {
fprintf(stderr, "ERROR RETRIEVING MEM INFO FOR CUDA DEVICE %d\n",
device.Ordinal());
exit(0);
}
// Maintain a buffer of 128MB with max objects of 64MB.
alloc->SetCapacity(128<< 20, 64<< 20);
return AllocPtr(alloc.get());
}
CudaContext& CudaContext::StandardContext(int ordinal) {
bool setActive = -1 != ordinal;
if(-1 == ordinal) {
hipError_t error = hipGetDevice(&ordinal);
if(hipSuccess != error) {
fprintf(stderr, "ERROR RETRIEVING CUDA DEVICE ORDINAL\n");
exit(0);
}
}
int numDevices = CudaDevice::DeviceCount();
if(ordinal < 0 || ordinal >= numDevices) {
fprintf(stderr, "CODE REQUESTED INVALID CUDA DEVICE %d\n", ordinal);
exit(0);
}
if(!contextGroup.get())
contextGroup.reset(new ContextGroup);
CudaContext& context = //*contextGroup->standardContexts[ordinal];
*contextGroup->GetByOrdinal(ordinal);
if(!context.PTXVersion()) {
fprintf(stderr, "This CUDA executable was not compiled with support"
" for device %d (sm_%2d)\n", ordinal, context.ArchVersion() / 10);
exit(0);
}
if(setActive) context.SetActive();
return context;
}
ContextPtr CreateCudaDevice(int ordinal) {
CudaDevice& device = CudaDevice::ByOrdinal(ordinal);
ContextPtr context(new CudaContext(device, false, false));
return context;
}
ContextPtr CreateCudaDevice(int argc, char** argv, bool printInfo) {
int ordinal = 0;
if(argc >= 2 && !sscanf(argv[1], "%d", &ordinal)) {
fprintf(stderr, "INVALID COMMAND LINE ARGUMENT - NOT A CUDA ORDINAL\n");
exit(0);
}
ContextPtr context = CreateCudaDevice(ordinal);
if(!context->PTXVersion()) {
fprintf(stderr, "This CUDA executable was not compiled with support"
" for device %d (sm_%2d)\n", ordinal, context->ArchVersion() / 10);
exit(0);
}
context->SetActive();
if(printInfo)
printf("%s\n", context->Device().DeviceString().c_str());
return context;
}
ContextPtr CreateCudaDeviceStream(int ordinal) {
ContextPtr context(new CudaContext(
CudaDevice::ByOrdinal(ordinal), true, false));
return context;
}
ContextPtr CreateCudaDeviceStream(int argc, char** argv, bool printInfo) {
int ordinal = 0;
if(argc >= 2 && !sscanf(argv[1], "%d", &ordinal)) {
fprintf(stderr, "INVALID COMMAND LINE ARGUMENT - NOT A CUDA ORDINAL\n");
exit(0);
}
ContextPtr context = CreateCudaDeviceStream(ordinal);
if(!context->PTXVersion()) {
fprintf(stderr, "This CUDA executable was not compiled with support"
" for device %d (sm_%2d)\n", ordinal, context->ArchVersion() / 10);
exit(0);
}
context->SetActive();
if(printInfo)
printf("%s\n", context->Device().DeviceString().c_str());
return context;
}
ContextPtr CreateCudaDeviceAttachStream(int ordinal, hipStream_t stream) {
ContextPtr context(new CudaContext(
CudaDevice::ByOrdinal(ordinal), false, false));
context->_stream = stream;
return context;
}
ContextPtr CreateCudaDeviceAttachStream(hipStream_t stream) {
int ordinal;
hipGetDevice(&ordinal);
return CreateCudaDeviceAttachStream(ordinal, stream);
}
////////////////////////////////////////////////////////////////////////////////
// CudaAllocSimple
hipError_t CudaAllocSimple::Malloc(size_t size, void** p) {
hipError_t error = hipSuccess;
*p = 0;
if(size) error = hipMalloc(p, size);
if(hipSuccess != error) {
printf("CUDA MALLOC ERROR %d\n", error);
exit(0);
}
return error;
}
bool CudaAllocSimple::Free(void* p) {
hipError_t error = hipSuccess;
if(p) error = hipFree(p);
return hipSuccess == error;
}
////////////////////////////////////////////////////////////////////////////////
// CudaAllocBuckets
CudaAllocBuckets::CudaAllocBuckets(CudaDevice& device) : CudaAlloc(device) {
_maxObjectSize = _capacity = _allocated = _committed = 0;
_counter = 0;
}
CudaAllocBuckets::~CudaAllocBuckets() {
SetCapacity(0, 0);
assert(!_allocated);
}
bool CudaAllocBuckets::SanityCheck() const {
// Iterate through all allocated objects and verify sizes.
size_t allocatedCount = 0, committedCount = 0;
for(AddressMap::const_iterator i = _addressMap.begin();
i != _addressMap.end(); ++i) {
int bucket = i->second->bucket;
size_t size = (bucket < NumBuckets) ? BucketSizes[bucket] : 0;
allocatedCount += size;
if(i->second->priority == _priorityMap.end())
committedCount += size;
}
return allocatedCount == _allocated && committedCount == _committed;
}
hipError_t CudaAllocBuckets::Malloc(size_t size, void** p) {
// Locate the bucket index and adjust the size of the allocation to the
// bucket size.
size_t allocSize = size;
size_t commitSize = 0;
int bucket = LocateBucket(size);
if(bucket < NumBuckets)
allocSize = commitSize = BucketSizes[bucket];
// Peel off an already-allocated node and reuse it.
MemList& list = _memLists[bucket];
if(list.size() && list.front().priority != _priorityMap.end()) {
MemList::iterator memIt = list.begin();
_priorityMap.erase(memIt->priority);
memIt->priority = _priorityMap.end();
list.splice(list.end(), list, memIt);
_committed += commitSize;
*p = memIt->address->first;
return hipSuccess;
}
// Shrink if this allocation would put us over the limit.
Compact(commitSize);
hipError_t error = hipSuccess;
*p = 0;
if(size) error = hipMalloc(p, allocSize);
while((hipErrorMemoryAllocation == error) && (_committed < _allocated)) {
SetCapacity(_capacity - _capacity / 10, _maxObjectSize);
error = hipMalloc(p, size);
}
if(hipSuccess != error) return error;
MemList::iterator memIt =
_memLists[bucket].insert(_memLists[bucket].end(), MemNode());
memIt->bucket = bucket;
memIt->address = _addressMap.insert(std::make_pair(*p, memIt)).first;
memIt->priority = _priorityMap.end();
_allocated += commitSize;
_committed += commitSize;
assert(SanityCheck());
return hipSuccess;
}
bool CudaAllocBuckets::Free(void* p) {
AddressMap::iterator it = _addressMap.find(p);
if(it == _addressMap.end()) {
// If the pointer was not found in the address map, hipFree it anyways
// but return false.
if(p) hipFree(p);
return false;
}
// Because we're freeing a page, it had better not be in the priority queue.
MemList::iterator memIt = it->second;
assert(memIt->priority == _priorityMap.end());
// Always free allocations larger than the largest bucket
it->second->priority = _priorityMap.insert(
std::make_pair(_counter++ - memIt->bucket, memIt));
// Freed nodes are moved to the front, committed nodes are moved to the
// end.
int bucket = memIt->bucket;
size_t commitSize = (bucket < NumBuckets) ? BucketSizes[bucket] : 0;
MemList& list = _memLists[bucket];
list.splice(list.begin(), list, memIt);
_committed -= commitSize;
// Delete data that's not cached.
if(NumBuckets == bucket)
FreeNode(memIt);
Compact(0);
return true;
}
void CudaAllocBuckets::Clear() {
Compact(_allocated);
}
void CudaAllocBuckets::FreeNode(CudaAllocBuckets::MemList::iterator memIt) {
if(memIt->address->first) hipFree(memIt->address->first);
int bucket = memIt->bucket;
size_t commitSize = (bucket < NumBuckets) ? BucketSizes[bucket] : 0;
_addressMap.erase(memIt->address);
if(memIt->priority != _priorityMap.end())
_priorityMap.erase(memIt->priority);
else
_committed -= commitSize;
_allocated -= commitSize;
_memLists[bucket].erase(memIt);
assert(SanityCheck());
}
void CudaAllocBuckets::Compact(size_t extra) {
while(_allocated + extra > _capacity && _allocated > _committed) {
// Walk the priority queue from beginning to end removing nodes.
MemList::iterator memIt = _priorityMap.begin()->second;
FreeNode(memIt);
}
}
// Exponentially spaced buckets.
const size_t CudaAllocBuckets::BucketSizes[CudaAllocBuckets::NumBuckets] = {
256, 512, 1024, 2048, 4096, 8192,
12288, 16384, 24576, 32768, 49152, 65536,
98304, 131072, 174848, 218624, 262144, 349696,
436992, 524288, 655360, 786432, 917504, 1048576,
1310720, 1572864, 1835008, 2097152, 2516736, 2936064,
3355648, 3774976, 4194304, 4893440, 5592576, 6291456,
6990592, 7689728, 8388608, 9786880, 11184896, 12582912,
13981184, 15379200, 16777216, 18874368, 20971520, 23068672,
25165824, 27262976, 29360128, 31457280, 33554432, 36910080,
40265472, 43620864, 46976256, 50331648, 53687296, 57042688,
60398080, 63753472, 67108864, 72701440, 78293760, 83886080,
89478656, 95070976, 100663296, 106255872, 111848192, 117440512,
123033088, 128625408, 134217728, 143804928, 153391872, 162978816,
172565760, 182152704, 191739648, 201326592, 210913792, 220500736
};
int CudaAllocBuckets::LocateBucket(size_t size) const {
if(size > _maxObjectSize || size > BucketSizes[NumBuckets - 1])
return NumBuckets;
return (int)(std::lower_bound(BucketSizes, BucketSizes + NumBuckets, size) -
BucketSizes);
}
} // namespace mgpu
| 8cc6bb17d6911ae0b2a37e51b1962ac21285d97f.cu | /******************************************************************************
* Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
*
* Code and text by Sean Baxter, NVIDIA Research
* See http://nvlabs.github.io/moderngpu for repository and documentation.
*
******************************************************************************/
#include "util/mgpucontext.h"
namespace mgpu {
////////////////////////////////////////////////////////////////////////////////
// CudaTimer
void CudaTimer::Start() {
cudaEventRecord(start);
cudaDeviceSynchronize();
}
double CudaTimer::Split() {
cudaEventRecord(end);
cudaDeviceSynchronize();
float t;
cudaEventElapsedTime(&t, start, end);
start.Swap(end);
//return (t / 1000.0);
return t;
}
double CudaTimer::Throughput(int count, int numIterations) {
double elapsed = Split();
return (double)numIterations * count / elapsed;
}
////////////////////////////////////////////////////////////////////////////////
// CudaDevice
__global__ void KernelVersionShim() { }
struct DeviceGroup {
int numCudaDevices;
CudaDevice** cudaDevices;
DeviceGroup() {
numCudaDevices = -1;
cudaDevices = 0;
}
int GetDeviceCount() {
if(-1 == numCudaDevices) {
cudaError_t error = cudaGetDeviceCount(&numCudaDevices);
if(cudaSuccess != error || numCudaDevices <= 0) {
fprintf(stderr, "ERROR ENUMERATING CUDA DEVICES.\nExiting.\n");
exit(0);
}
cudaDevices = new CudaDevice*[numCudaDevices];
memset(cudaDevices, 0, sizeof(CudaDevice*) * numCudaDevices);
}
return numCudaDevices;
}
CudaDevice* GetByOrdinal(int ordinal) {
if(ordinal >= GetDeviceCount()) return 0;
if(!cudaDevices[ordinal]) {
// Retrieve the device properties.
CudaDevice* device = cudaDevices[ordinal] = new CudaDevice;
device->_ordinal = ordinal;
cudaError_t error = cudaGetDeviceProperties(&device->_prop,
ordinal);
if(cudaSuccess != error) {
fprintf(stderr, "FAILURE TO CREATE CUDA DEVICE %d\n", ordinal);
exit(0);
}
// Get the compiler version for this device.
cudaSetDevice(ordinal);
cudaFuncAttributes attr;
error = cudaFuncGetAttributes(&attr, KernelVersionShim);
if(cudaSuccess == error)
device->_ptxVersion = 10 * attr.ptxVersion;
else {
printf("NOT COMPILED WITH COMPATIBLE PTX VERSION FOR DEVICE"
" %d\n", ordinal);
// The module wasn't compiled with support for this device.
device->_ptxVersion = 0;
}
}
return cudaDevices[ordinal];
}
~DeviceGroup() {
if(cudaDevices) {
for(int i = 0; i < numCudaDevices; ++i)
delete cudaDevices[i];
delete [] cudaDevices;
}
cudaDeviceReset();
}
};
std::auto_ptr<DeviceGroup> deviceGroup;
int CudaDevice::DeviceCount() {
if(!deviceGroup.get())
deviceGroup.reset(new DeviceGroup);
return deviceGroup->GetDeviceCount();
}
CudaDevice& CudaDevice::ByOrdinal(int ordinal) {
if(ordinal < 0 || ordinal >= DeviceCount()) {
fprintf(stderr, "CODE REQUESTED INVALID CUDA DEVICE %d\n", ordinal);
exit(0);
}
return *deviceGroup->GetByOrdinal(ordinal);
}
CudaDevice& CudaDevice::Selected() {
int ordinal;
cudaError_t error = cudaGetDevice(&ordinal);
if(cudaSuccess != error) {
fprintf(stderr, "ERROR RETRIEVING CUDA DEVICE ORDINAL\n");
exit(0);
}
return ByOrdinal(ordinal);
}
void CudaDevice::SetActive() {
cudaError_t error = cudaSetDevice(_ordinal);
if(cudaSuccess != error) {
fprintf(stderr, "ERROR SETTING CUDA DEVICE TO ORDINAL %d\n", _ordinal);
exit(0);
}
}
std::string CudaDevice::DeviceString() const {
size_t freeMem, totalMem;
cudaError_t error = cudaMemGetInfo(&freeMem, &totalMem);
if(cudaSuccess != error) {
fprintf(stderr, "ERROR RETRIEVING MEM INFO FOR CUDA DEVICE %d\n",
_ordinal);
exit(0);
}
double memBandwidth = (_prop.memoryClockRate * 1000.0) *
(_prop.memoryBusWidth / 8 * 2) / 1.0e9;
std::string s = stringprintf(
"%s : %8.3lf Mhz (Ordinal %d)\n"
"%d SMs enabled. Compute Capability sm_%d%d\n"
"FreeMem: %6dMB TotalMem: %6dMB %2d-bit pointers.\n"
"Mem Clock: %8.3lf Mhz x %d bits (%5.1lf GB/s)\n"
"ECC %s\n\n",
_prop.name, _prop.clockRate / 1000.0, _ordinal,
_prop.multiProcessorCount, _prop.major, _prop.minor,
(int)(freeMem / (1<< 20)), (int)(totalMem / (1<< 20)), 8 * sizeof(int*),
_prop.memoryClockRate / 1000.0, _prop.memoryBusWidth, memBandwidth,
_prop.ECCEnabled ? "Enabled" : "Disabled");
return s;
}
////////////////////////////////////////////////////////////////////////////////
// CudaContext
struct ContextGroup {
CudaContext** standardContexts;
int numDevices;
ContextGroup() {
numDevices = CudaDevice::DeviceCount();
standardContexts = new CudaContext*[numDevices];
memset(standardContexts, 0, sizeof(CudaContext*) * numDevices);
}
CudaContext* GetByOrdinal(int ordinal) {
if(!standardContexts[ordinal]) {
CudaDevice& device = CudaDevice::ByOrdinal(ordinal);
standardContexts[ordinal] = new CudaContext(device, false, true);
}
return standardContexts[ordinal];
}
~ContextGroup() {
if(standardContexts) {
for(int i = 0; i < numDevices; ++i)
delete standardContexts[i];
delete [] standardContexts;
}
}
};
std::auto_ptr<ContextGroup> contextGroup;
CudaContext::CudaContext(CudaDevice& device, bool newStream, bool standard) :
_event(cudaEventDisableTiming /*| cudaEventBlockingSync */),
_stream(0), _noRefCount(standard), _pageLocked(0) {
// Create an allocator.
if(standard)
_alloc.reset(new CudaAllocSimple(device));
else
_alloc = CreateDefaultAlloc(device);
if(newStream) cudaStreamCreate(&_stream);
_ownStream = newStream;
// Allocate 4KB of page-locked memory.
cudaError_t error = cudaMallocHost((void**)&_pageLocked, 4096);
// Allocate an auxiliary stream.
error = cudaStreamCreate(&_auxStream);
}
CudaContext::~CudaContext() {
if(_pageLocked)
cudaFreeHost(_pageLocked);
if(_ownStream && _stream)
cudaStreamDestroy(_stream);
if(_auxStream)
cudaStreamDestroy(_auxStream);
}
AllocPtr CudaContext::CreateDefaultAlloc(CudaDevice& device) {
intrusive_ptr<CudaAllocBuckets> alloc(new CudaAllocBuckets(device));
size_t freeMem, totalMem;
cudaError_t error = cudaMemGetInfo(&freeMem, &totalMem);
if(cudaSuccess != error) {
fprintf(stderr, "ERROR RETRIEVING MEM INFO FOR CUDA DEVICE %d\n",
device.Ordinal());
exit(0);
}
// Maintain a buffer of 128MB with max objects of 64MB.
alloc->SetCapacity(128<< 20, 64<< 20);
return AllocPtr(alloc.get());
}
CudaContext& CudaContext::StandardContext(int ordinal) {
bool setActive = -1 != ordinal;
if(-1 == ordinal) {
cudaError_t error = cudaGetDevice(&ordinal);
if(cudaSuccess != error) {
fprintf(stderr, "ERROR RETRIEVING CUDA DEVICE ORDINAL\n");
exit(0);
}
}
int numDevices = CudaDevice::DeviceCount();
if(ordinal < 0 || ordinal >= numDevices) {
fprintf(stderr, "CODE REQUESTED INVALID CUDA DEVICE %d\n", ordinal);
exit(0);
}
if(!contextGroup.get())
contextGroup.reset(new ContextGroup);
CudaContext& context = //*contextGroup->standardContexts[ordinal];
*contextGroup->GetByOrdinal(ordinal);
if(!context.PTXVersion()) {
fprintf(stderr, "This CUDA executable was not compiled with support"
" for device %d (sm_%2d)\n", ordinal, context.ArchVersion() / 10);
exit(0);
}
if(setActive) context.SetActive();
return context;
}
ContextPtr CreateCudaDevice(int ordinal) {
CudaDevice& device = CudaDevice::ByOrdinal(ordinal);
ContextPtr context(new CudaContext(device, false, false));
return context;
}
ContextPtr CreateCudaDevice(int argc, char** argv, bool printInfo) {
int ordinal = 0;
if(argc >= 2 && !sscanf(argv[1], "%d", &ordinal)) {
fprintf(stderr, "INVALID COMMAND LINE ARGUMENT - NOT A CUDA ORDINAL\n");
exit(0);
}
ContextPtr context = CreateCudaDevice(ordinal);
if(!context->PTXVersion()) {
fprintf(stderr, "This CUDA executable was not compiled with support"
" for device %d (sm_%2d)\n", ordinal, context->ArchVersion() / 10);
exit(0);
}
context->SetActive();
if(printInfo)
printf("%s\n", context->Device().DeviceString().c_str());
return context;
}
ContextPtr CreateCudaDeviceStream(int ordinal) {
ContextPtr context(new CudaContext(
CudaDevice::ByOrdinal(ordinal), true, false));
return context;
}
ContextPtr CreateCudaDeviceStream(int argc, char** argv, bool printInfo) {
int ordinal = 0;
if(argc >= 2 && !sscanf(argv[1], "%d", &ordinal)) {
fprintf(stderr, "INVALID COMMAND LINE ARGUMENT - NOT A CUDA ORDINAL\n");
exit(0);
}
ContextPtr context = CreateCudaDeviceStream(ordinal);
if(!context->PTXVersion()) {
fprintf(stderr, "This CUDA executable was not compiled with support"
" for device %d (sm_%2d)\n", ordinal, context->ArchVersion() / 10);
exit(0);
}
context->SetActive();
if(printInfo)
printf("%s\n", context->Device().DeviceString().c_str());
return context;
}
ContextPtr CreateCudaDeviceAttachStream(int ordinal, cudaStream_t stream) {
ContextPtr context(new CudaContext(
CudaDevice::ByOrdinal(ordinal), false, false));
context->_stream = stream;
return context;
}
ContextPtr CreateCudaDeviceAttachStream(cudaStream_t stream) {
int ordinal;
cudaGetDevice(&ordinal);
return CreateCudaDeviceAttachStream(ordinal, stream);
}
////////////////////////////////////////////////////////////////////////////////
// CudaAllocSimple
cudaError_t CudaAllocSimple::Malloc(size_t size, void** p) {
cudaError_t error = cudaSuccess;
*p = 0;
if(size) error = cudaMalloc(p, size);
if(cudaSuccess != error) {
printf("CUDA MALLOC ERROR %d\n", error);
exit(0);
}
return error;
}
bool CudaAllocSimple::Free(void* p) {
cudaError_t error = cudaSuccess;
if(p) error = cudaFree(p);
return cudaSuccess == error;
}
////////////////////////////////////////////////////////////////////////////////
// CudaAllocBuckets
CudaAllocBuckets::CudaAllocBuckets(CudaDevice& device) : CudaAlloc(device) {
_maxObjectSize = _capacity = _allocated = _committed = 0;
_counter = 0;
}
CudaAllocBuckets::~CudaAllocBuckets() {
SetCapacity(0, 0);
assert(!_allocated);
}
bool CudaAllocBuckets::SanityCheck() const {
// Iterate through all allocated objects and verify sizes.
size_t allocatedCount = 0, committedCount = 0;
for(AddressMap::const_iterator i = _addressMap.begin();
i != _addressMap.end(); ++i) {
int bucket = i->second->bucket;
size_t size = (bucket < NumBuckets) ? BucketSizes[bucket] : 0;
allocatedCount += size;
if(i->second->priority == _priorityMap.end())
committedCount += size;
}
return allocatedCount == _allocated && committedCount == _committed;
}
cudaError_t CudaAllocBuckets::Malloc(size_t size, void** p) {
// Locate the bucket index and adjust the size of the allocation to the
// bucket size.
size_t allocSize = size;
size_t commitSize = 0;
int bucket = LocateBucket(size);
if(bucket < NumBuckets)
allocSize = commitSize = BucketSizes[bucket];
// Peel off an already-allocated node and reuse it.
MemList& list = _memLists[bucket];
if(list.size() && list.front().priority != _priorityMap.end()) {
MemList::iterator memIt = list.begin();
_priorityMap.erase(memIt->priority);
memIt->priority = _priorityMap.end();
list.splice(list.end(), list, memIt);
_committed += commitSize;
*p = memIt->address->first;
return cudaSuccess;
}
// Shrink if this allocation would put us over the limit.
Compact(commitSize);
cudaError_t error = cudaSuccess;
*p = 0;
if(size) error = cudaMalloc(p, allocSize);
while((cudaErrorMemoryAllocation == error) && (_committed < _allocated)) {
SetCapacity(_capacity - _capacity / 10, _maxObjectSize);
error = cudaMalloc(p, size);
}
if(cudaSuccess != error) return error;
MemList::iterator memIt =
_memLists[bucket].insert(_memLists[bucket].end(), MemNode());
memIt->bucket = bucket;
memIt->address = _addressMap.insert(std::make_pair(*p, memIt)).first;
memIt->priority = _priorityMap.end();
_allocated += commitSize;
_committed += commitSize;
assert(SanityCheck());
return cudaSuccess;
}
bool CudaAllocBuckets::Free(void* p) {
AddressMap::iterator it = _addressMap.find(p);
if(it == _addressMap.end()) {
// If the pointer was not found in the address map, cudaFree it anyways
// but return false.
if(p) cudaFree(p);
return false;
}
// Because we're freeing a page, it had better not be in the priority queue.
MemList::iterator memIt = it->second;
assert(memIt->priority == _priorityMap.end());
// Always free allocations larger than the largest bucket
it->second->priority = _priorityMap.insert(
std::make_pair(_counter++ - memIt->bucket, memIt));
// Freed nodes are moved to the front, committed nodes are moved to the
// end.
int bucket = memIt->bucket;
size_t commitSize = (bucket < NumBuckets) ? BucketSizes[bucket] : 0;
MemList& list = _memLists[bucket];
list.splice(list.begin(), list, memIt);
_committed -= commitSize;
// Delete data that's not cached.
if(NumBuckets == bucket)
FreeNode(memIt);
Compact(0);
return true;
}
void CudaAllocBuckets::Clear() {
Compact(_allocated);
}
void CudaAllocBuckets::FreeNode(CudaAllocBuckets::MemList::iterator memIt) {
if(memIt->address->first) cudaFree(memIt->address->first);
int bucket = memIt->bucket;
size_t commitSize = (bucket < NumBuckets) ? BucketSizes[bucket] : 0;
_addressMap.erase(memIt->address);
if(memIt->priority != _priorityMap.end())
_priorityMap.erase(memIt->priority);
else
_committed -= commitSize;
_allocated -= commitSize;
_memLists[bucket].erase(memIt);
assert(SanityCheck());
}
void CudaAllocBuckets::Compact(size_t extra) {
while(_allocated + extra > _capacity && _allocated > _committed) {
// Walk the priority queue from beginning to end removing nodes.
MemList::iterator memIt = _priorityMap.begin()->second;
FreeNode(memIt);
}
}
// Exponentially spaced buckets.
const size_t CudaAllocBuckets::BucketSizes[CudaAllocBuckets::NumBuckets] = {
256, 512, 1024, 2048, 4096, 8192,
12288, 16384, 24576, 32768, 49152, 65536,
98304, 131072, 174848, 218624, 262144, 349696,
436992, 524288, 655360, 786432, 917504, 1048576,
1310720, 1572864, 1835008, 2097152, 2516736, 2936064,
3355648, 3774976, 4194304, 4893440, 5592576, 6291456,
6990592, 7689728, 8388608, 9786880, 11184896, 12582912,
13981184, 15379200, 16777216, 18874368, 20971520, 23068672,
25165824, 27262976, 29360128, 31457280, 33554432, 36910080,
40265472, 43620864, 46976256, 50331648, 53687296, 57042688,
60398080, 63753472, 67108864, 72701440, 78293760, 83886080,
89478656, 95070976, 100663296, 106255872, 111848192, 117440512,
123033088, 128625408, 134217728, 143804928, 153391872, 162978816,
172565760, 182152704, 191739648, 201326592, 210913792, 220500736
};
int CudaAllocBuckets::LocateBucket(size_t size) const {
if(size > _maxObjectSize || size > BucketSizes[NumBuckets - 1])
return NumBuckets;
return (int)(std::lower_bound(BucketSizes, BucketSizes + NumBuckets, size) -
BucketSizes);
}
} // namespace mgpu
|
38c6396e7c938c7d7b4deb25c06a5709e80f68fd.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime_api.h>
#include "cudaWrap.h"
#include <stdio.h>
#define BLKX 32
#define BLKY 32
hipStream_t gstream;
#define CUDA_CALL_SAFE(f) \
do { \
hipError_t _e = f; \
if(_e != hipSuccess) { \
fprintf(stderr, "Cuda error %s %d %s:: %s\n", __FILE__,__LINE__, __func__, hipGetErrorString(_e)); \
exit(EXIT_FAILURE); \
} \
} while(0)
void hostCopy(void *src, void *dest, size_t size){
CUDA_CALL_SAFE(hipMemcpyAsync(dest, src, size, hipMemcpyDeviceToHost,gstream));
}
void deviceCopy(void *src, void *dest, size_t size){
CUDA_CALL_SAFE(hipMemcpyAsync(dest, src, size, hipMemcpyHostToDevice,gstream));
}
void setDevice(int id){
CUDA_CALL_SAFE(hipSetDevice(id));
}
int getProperties(){
int nDevices;
hipGetDeviceCount(&nDevices);
return nDevices;
}
void initStream(){
hipStreamCreate(&gstream);
}
void syncStream(){
CUDA_CALL_SAFE(hipStreamSynchronize(gstream));
}
void destroyStream(){
CUDA_CALL_SAFE(hipStreamDestroy(gstream));
}
void freeCuda( void *ptr ){
CUDA_CALL_SAFE(hipFree(ptr));
}
void freeCudaHost(void *ptr){
CUDA_CALL_SAFE(hipHostFree(ptr));
}
__global__ void initData(int nbLines, int M, double *h, double *g)
{
long idX = threadIdx.x + blockIdx.x * blockDim.x;
if (idX > nbLines * M)
return;
h[idX] = 0.0L;
g[idX] = 0.0L;
if ( idX >= M +1 && idX < 2*M-1 ){
h[idX] = 100.0;
g[idX] = 100.0;
}
}
__global__ void gpuWork(double *g, double *h, double *error, int M, int nbLines){
// This moves thread (0,0) to position (1,1) on the grid
long idX = threadIdx.x + blockIdx.x * blockDim.x +1;
long idY = threadIdx.y + blockIdx.y * blockDim.y +1;
long threadId = threadIdx.y * blockDim.x + threadIdx.x;
long tidX = threadIdx.x + blockIdx.x * blockDim.x;
long tidY = threadIdx.y + blockIdx.y * blockDim.y;
register double temp;
long xSize = M+2;
__shared__ double errors[BLKX*BLKY];
errors[threadId] = 0.0;
if (tidX < M && tidY < nbLines ){
temp = 0.25*(h[(idY-1)*xSize +idX]
+h[((idY+1)*xSize)+idX]
+h[(idY*xSize)+idX-1]
+h[(idY*xSize)+idX+1]);
errors[threadId] = fabs(temp - h[(idY*xSize)+idX]);
g[(idY*xSize)+idX] = temp;
}
else{
return;
}
__syncthreads();
for (unsigned long s = (blockDim.x*blockDim.y)/2; s>0; s=s>>1){
if ( threadId < s ){
errors[threadId] = fmax(errors[threadId], errors[threadId+s]);
}
__syncthreads();
}
if ( threadId == 0 ){
int id = blockIdx.y * (gridDim.x) + blockIdx.x;
error[id] = errors[0];
}
return;
}
void allocateMemory(void **ptr, size_t size){
CUDA_CALL_SAFE(hipMallocManaged(ptr, size));
return;
}
void allocateSafeHost(void **ptr, size_t size){
CUDA_CALL_SAFE(hipHostMalloc(ptr, size));
return;
}
void allocateErrorMemory( void **lerror, void **derror, long xElem, long yElem){
long numGridsX = ceil(xElem/BLKX);
long numGridsY = ceil(yElem)/BLKY;
*lerror = (double*) malloc(sizeof(double)*numGridsX*numGridsY);
allocateMemory(derror, sizeof(double) * numGridsX*numGridsY );
}
double executekernel(long xElem, long yElem, double *in, double *out, double *halo[2], double *dError, double *lError, int rank){
long numGridsX = ceil(xElem/BLKX);
long numGridsY = ceil(yElem)/BLKY;
dim3 dimGrid(numGridsX,numGridsY);
dim3 dimBlock(BLKX,BLKY);
double localError;
hipLaunchKernelGGL(( gpuWork), dim3(dimGrid),dim3(dimBlock),0,gstream, out, in, dError, xElem , yElem);
CUDA_CALL_SAFE(hipPeekAtLastError());
hostCopy(dError,lError, sizeof(double) *((xElem*yElem)/(numGridsX*numGridsY)+1));
localError=0.0;
for (long j = 0; j < numGridsX*numGridsY; j++){
localError = fmax(localError, lError[j]);
}
return localError;
}
void init(double *h, double *g, long Y, long X){
long numBlocks = ceil((X*Y)/1024.0);
hipLaunchKernelGGL(( initData), dim3(numBlocks) ,dim3(1024),0,gstream, Y, X, h, g);
CUDA_CALL_SAFE(hipStreamSynchronize(gstream));
}
| 38c6396e7c938c7d7b4deb25c06a5709e80f68fd.cu | #include <cuda_runtime_api.h>
#include "cudaWrap.h"
#include <stdio.h>
#define BLKX 32
#define BLKY 32
cudaStream_t gstream;
#define CUDA_CALL_SAFE(f) \
do { \
cudaError_t _e = f; \
if(_e != cudaSuccess) { \
fprintf(stderr, "Cuda error %s %d %s:: %s\n", __FILE__,__LINE__, __func__, cudaGetErrorString(_e)); \
exit(EXIT_FAILURE); \
} \
} while(0)
void hostCopy(void *src, void *dest, size_t size){
CUDA_CALL_SAFE(cudaMemcpyAsync(dest, src, size, cudaMemcpyDeviceToHost,gstream));
}
void deviceCopy(void *src, void *dest, size_t size){
CUDA_CALL_SAFE(cudaMemcpyAsync(dest, src, size, cudaMemcpyHostToDevice,gstream));
}
void setDevice(int id){
CUDA_CALL_SAFE(cudaSetDevice(id));
}
int getProperties(){
int nDevices;
cudaGetDeviceCount(&nDevices);
return nDevices;
}
void initStream(){
cudaStreamCreate(&gstream);
}
void syncStream(){
CUDA_CALL_SAFE(cudaStreamSynchronize(gstream));
}
void destroyStream(){
CUDA_CALL_SAFE(cudaStreamDestroy(gstream));
}
void freeCuda( void *ptr ){
CUDA_CALL_SAFE(cudaFree(ptr));
}
void freeCudaHost(void *ptr){
CUDA_CALL_SAFE(cudaFreeHost(ptr));
}
__global__ void initData(int nbLines, int M, double *h, double *g)
{
long idX = threadIdx.x + blockIdx.x * blockDim.x;
if (idX > nbLines * M)
return;
h[idX] = 0.0L;
g[idX] = 0.0L;
if ( idX >= M +1 && idX < 2*M-1 ){
h[idX] = 100.0;
g[idX] = 100.0;
}
}
__global__ void gpuWork(double *g, double *h, double *error, int M, int nbLines){
// This moves thread (0,0) to position (1,1) on the grid
long idX = threadIdx.x + blockIdx.x * blockDim.x +1;
long idY = threadIdx.y + blockIdx.y * blockDim.y +1;
long threadId = threadIdx.y * blockDim.x + threadIdx.x;
long tidX = threadIdx.x + blockIdx.x * blockDim.x;
long tidY = threadIdx.y + blockIdx.y * blockDim.y;
register double temp;
long xSize = M+2;
__shared__ double errors[BLKX*BLKY];
errors[threadId] = 0.0;
if (tidX < M && tidY < nbLines ){
temp = 0.25*(h[(idY-1)*xSize +idX]
+h[((idY+1)*xSize)+idX]
+h[(idY*xSize)+idX-1]
+h[(idY*xSize)+idX+1]);
errors[threadId] = fabs(temp - h[(idY*xSize)+idX]);
g[(idY*xSize)+idX] = temp;
}
else{
return;
}
__syncthreads();
for (unsigned long s = (blockDim.x*blockDim.y)/2; s>0; s=s>>1){
if ( threadId < s ){
errors[threadId] = fmax(errors[threadId], errors[threadId+s]);
}
__syncthreads();
}
if ( threadId == 0 ){
int id = blockIdx.y * (gridDim.x) + blockIdx.x;
error[id] = errors[0];
}
return;
}
void allocateMemory(void **ptr, size_t size){
CUDA_CALL_SAFE(cudaMallocManaged(ptr, size));
return;
}
void allocateSafeHost(void **ptr, size_t size){
CUDA_CALL_SAFE(cudaMallocHost(ptr, size));
return;
}
void allocateErrorMemory( void **lerror, void **derror, long xElem, long yElem){
long numGridsX = ceil(xElem/BLKX);
long numGridsY = ceil(yElem)/BLKY;
*lerror = (double*) malloc(sizeof(double)*numGridsX*numGridsY);
allocateMemory(derror, sizeof(double) * numGridsX*numGridsY );
}
double executekernel(long xElem, long yElem, double *in, double *out, double *halo[2], double *dError, double *lError, int rank){
long numGridsX = ceil(xElem/BLKX);
long numGridsY = ceil(yElem)/BLKY;
dim3 dimGrid(numGridsX,numGridsY);
dim3 dimBlock(BLKX,BLKY);
double localError;
gpuWork<<<dimGrid,dimBlock,0,gstream>>>(out, in, dError, xElem , yElem);
CUDA_CALL_SAFE(cudaPeekAtLastError());
hostCopy(dError,lError, sizeof(double) *((xElem*yElem)/(numGridsX*numGridsY)+1));
localError=0.0;
for (long j = 0; j < numGridsX*numGridsY; j++){
localError = fmax(localError, lError[j]);
}
return localError;
}
void init(double *h, double *g, long Y, long X){
long numBlocks = ceil((X*Y)/1024.0);
initData<<<numBlocks ,1024,0,gstream>>>(Y, X, h, g);
CUDA_CALL_SAFE(cudaStreamSynchronize(gstream));
}
|
eb39b112f6090ef865322f4395fc8ec9bb653815.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Host Side Code for Cross-correlation in GPU
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <fstream>
//#include "Cross_Data_type.h"
#include "corr2Mex.h"
#include "normXcorr_GPUKernel.cu"
using namespace std;
Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width,int init);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
bool CompareResults(float* A, float* B, int elements, float eps);
void FreeDeviceMatrix(Matrix* M);
void FreeMatrix(Matrix* M);
void CorrelationOnDevice(const Matrix M, const Matrix N, float *CorrH, params parameters);
int main(int argc,char** argv) {
// Input Parameters
if(argc!=9)
{
printf("Usage %s Parameters missing\n",argv[0]);
return 1;
}
int imageWidth = atoi(argv[1]);
int imageHeight = atoi(argv[2]);
int SEARCH_X = atoi(argv[3]);
int SEARCH_Y = atoi(argv[4]);
int KERNEL_X = atoi(argv[5]);
int KERNEL_Y = atoi(argv[6]);
int numX = atoi(argv[7]);
int numY = atoi(argv[8]);
int DisplacementSize = numX*numY;
int Corr_size = SEARCH_X*SEARCH_Y;
Matrix Pre;
Matrix Post;
float OVERLAP = 50.0;
params parameters = {SEARCH_Y,SEARCH_X,KERNEL_Y,KERNEL_X,OVERLAP,numX,numY};
Pre = AllocateMatrix(imageHeight,imageWidth, 1);
Post = AllocateMatrix(imageHeight,imageWidth, 1);
float gpuTime=0.f;
// Allocating Host-side Memory for Cross-correlation
/*SoA_Corr *CorrH;
CorrH = (SoA_Corr *)malloc(sizeof(SoA_Corr)*DisplacementSize);
for(int k=0;k<DisplacementSize;k++){
CorrH[k].Corr_Points = (float*) malloc(Corr_size*sizeof(float));
}*/
float *CorrH;
CorrH = (float*)malloc(Corr_size*DisplacementSize*sizeof(float));
float elapsedTime_inc;
hipEvent_t startEvent_inc, stopEvent_inc;
hipEventCreate(&startEvent_inc);
hipEventCreate(&stopEvent_inc);
hipEventRecord(startEvent_inc,0); // starting timing for inclusive
CorrelationOnDevice(Pre, Post, CorrH, parameters); // Execution Model for GPU is set up in this function
hipEventRecord(stopEvent_inc,0); //ending timing for inclusive
hipEventSynchronize(stopEvent_inc);
hipEventElapsedTime(&elapsedTime_inc, startEvent_inc, stopEvent_inc);
gpuTime = elapsedTime_inc;
// Printing Cross-correlation Matrix for Block:0
for(int h=0;h<DisplacementSize;h++){
for(int z=0;z<SEARCH_X;z++){
for(int g=0;g<SEARCH_Y;g++){
printf("%f ",CorrH[g+SEARCH_X*(z+SEARCH_Y*h)]);
}
printf("\n");
}
printf("\n");
}
printf("\n");
// Free matrices
FreeMatrix(&Pre);
FreeMatrix(&Post);
return 0;
}
//// Cuda Kernel Call //////
void CorrelationOnDevice(const Matrix Pre, const Matrix Post, float *CorrH, params parameters)
{
// Load Pre and Post to the device
Matrix Pred = AllocateDeviceMatrix(Pre);
CopyToDeviceMatrix(Pred, Pre);
Matrix Postd = AllocateDeviceMatrix(Post);
CopyToDeviceMatrix(Postd, Post);
// Allocate Space for Pre-Mean
float *preMean;
float *preVar;
hipMalloc((void **)&preMean,sizeof(float)*parameters.numX*parameters.numY);
hipMalloc((void **)&preVar,sizeof(float)*parameters.numX*parameters.numY);
// Allocate SoA on the device ?????
float *CorrD;
hipMalloc((void **)&CorrD,sizeof(float)*parameters.numX*parameters.numY*parameters.searchX*parameters.searchY);
//hipMalloc((SoA_Corr **)&CorrD,sizeof(SoA_Corr)*parameters.numX*parameters.numY);
// Setup the execution configuration
dim3 dimBlock(parameters.searchX, parameters.searchY);
//dim3 dimBlock(2*parameters.searchX, 2*parameters.searchY);
dim3 dimGrid(parameters.numX, parameters.numY);
int sharedmemsize = 2*parameters.searchX*parameters.searchY*sizeof(float);
// Launch the device computation threads!
hipLaunchKernelGGL(( normXcorr_GPU), dim3(dimGrid), dim3(dimBlock),sharedmemsize, 0, Pred,Postd,CorrD,parameters,preMean,preVar);
//Copting SoA from Device to Host
//CopyFromDeviceMatrix(Corr, Corrd);
//hipMemcpy(CorrH,CorrD,sizeof(SoA_Corr)*parameters.numX*parameters.numY,hipMemcpyDeviceToHost);
hipMemcpy(CorrH,CorrD,sizeof(float)*parameters.numX*parameters.numY*parameters.searchX*parameters.searchY,hipMemcpyDeviceToHost);
// Free device matrices
FreeDeviceMatrix(&Pred);
FreeDeviceMatrix(&Postd);
hipFree(CorrD);
//FreeDeviceMatrix(&Corrd);
}
// Allocate a device matrix of same size as M.
Matrix AllocateDeviceMatrix(const Matrix M)
{
Matrix Mdevice = M;
int size = M.width * M.height * sizeof(float);
hipMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
Matrix AllocateMatrix(int height, int width,int init) // 1 is file read/ 0 is just allocation
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = NULL;
FILE *fp;
fp = fopen("trialNumbers.inp","r");
// don't allocate memory on option 2
M.elements = (float*) malloc(size*sizeof(float));
if(init)
{
for(unsigned int i = 0; i < M.width * M.height; i++)
{
fscanf(fp,"%f",&M.elements[i]);
}
}
return M;
}
// Copy a host matrix to a device matrix.
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
Mdevice.height = Mhost.height;
Mdevice.width = Mhost.width;
Mdevice.pitch = Mhost.pitch;
hipMemcpy(Mdevice.elements, Mhost.elements, size,
hipMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.width * Mdevice.height * sizeof(float);
hipMemcpy(Mhost.elements, Mdevice.elements, size,
hipMemcpyDeviceToHost);
}
// Free a device matrix.
void FreeDeviceMatrix(Matrix* M)
{
hipFree(M->elements);
M->elements = NULL;
}
// Free a host Matrix
void FreeMatrix(Matrix* M)
{
free(M->elements);
M->elements = NULL;
}
| eb39b112f6090ef865322f4395fc8ec9bb653815.cu | // Host Side Code for Cross-correlation in GPU
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <fstream>
//#include "Cross_Data_type.h"
#include "corr2Mex.h"
#include "normXcorr_GPUKernel.cu"
using namespace std;
Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width,int init);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
bool CompareResults(float* A, float* B, int elements, float eps);
void FreeDeviceMatrix(Matrix* M);
void FreeMatrix(Matrix* M);
void CorrelationOnDevice(const Matrix M, const Matrix N, float *CorrH, params parameters);
int main(int argc,char** argv) {
// Input Parameters
if(argc!=9)
{
printf("Usage %s Parameters missing\n",argv[0]);
return 1;
}
int imageWidth = atoi(argv[1]);
int imageHeight = atoi(argv[2]);
int SEARCH_X = atoi(argv[3]);
int SEARCH_Y = atoi(argv[4]);
int KERNEL_X = atoi(argv[5]);
int KERNEL_Y = atoi(argv[6]);
int numX = atoi(argv[7]);
int numY = atoi(argv[8]);
int DisplacementSize = numX*numY;
int Corr_size = SEARCH_X*SEARCH_Y;
Matrix Pre;
Matrix Post;
float OVERLAP = 50.0;
params parameters = {SEARCH_Y,SEARCH_X,KERNEL_Y,KERNEL_X,OVERLAP,numX,numY};
Pre = AllocateMatrix(imageHeight,imageWidth, 1);
Post = AllocateMatrix(imageHeight,imageWidth, 1);
float gpuTime=0.f;
// Allocating Host-side Memory for Cross-correlation
/*SoA_Corr *CorrH;
CorrH = (SoA_Corr *)malloc(sizeof(SoA_Corr)*DisplacementSize);
for(int k=0;k<DisplacementSize;k++){
CorrH[k].Corr_Points = (float*) malloc(Corr_size*sizeof(float));
}*/
float *CorrH;
CorrH = (float*)malloc(Corr_size*DisplacementSize*sizeof(float));
float elapsedTime_inc;
cudaEvent_t startEvent_inc, stopEvent_inc;
cudaEventCreate(&startEvent_inc);
cudaEventCreate(&stopEvent_inc);
cudaEventRecord(startEvent_inc,0); // starting timing for inclusive
CorrelationOnDevice(Pre, Post, CorrH, parameters); // Execution Model for GPU is set up in this function
cudaEventRecord(stopEvent_inc,0); //ending timing for inclusive
cudaEventSynchronize(stopEvent_inc);
cudaEventElapsedTime(&elapsedTime_inc, startEvent_inc, stopEvent_inc);
gpuTime = elapsedTime_inc;
// Printing Cross-correlation Matrix for Block:0
for(int h=0;h<DisplacementSize;h++){
for(int z=0;z<SEARCH_X;z++){
for(int g=0;g<SEARCH_Y;g++){
printf("%f ",CorrH[g+SEARCH_X*(z+SEARCH_Y*h)]);
}
printf("\n");
}
printf("\n");
}
printf("\n");
// Free matrices
FreeMatrix(&Pre);
FreeMatrix(&Post);
return 0;
}
//// Cuda Kernel Call //////
void CorrelationOnDevice(const Matrix Pre, const Matrix Post, float *CorrH, params parameters)
{
// Load Pre and Post to the device
Matrix Pred = AllocateDeviceMatrix(Pre);
CopyToDeviceMatrix(Pred, Pre);
Matrix Postd = AllocateDeviceMatrix(Post);
CopyToDeviceMatrix(Postd, Post);
// Allocate Space for Pre-Mean
float *preMean;
float *preVar;
cudaMalloc((void **)&preMean,sizeof(float)*parameters.numX*parameters.numY);
cudaMalloc((void **)&preVar,sizeof(float)*parameters.numX*parameters.numY);
// Allocate SoA on the device ?????
float *CorrD;
cudaMalloc((void **)&CorrD,sizeof(float)*parameters.numX*parameters.numY*parameters.searchX*parameters.searchY);
//cudaMalloc((SoA_Corr **)&CorrD,sizeof(SoA_Corr)*parameters.numX*parameters.numY);
// Setup the execution configuration
dim3 dimBlock(parameters.searchX, parameters.searchY);
//dim3 dimBlock(2*parameters.searchX, 2*parameters.searchY);
dim3 dimGrid(parameters.numX, parameters.numY);
int sharedmemsize = 2*parameters.searchX*parameters.searchY*sizeof(float);
// Launch the device computation threads!
normXcorr_GPU<<<dimGrid, dimBlock,sharedmemsize>>>(Pred,Postd,CorrD,parameters,preMean,preVar);
//Copting SoA from Device to Host
//CopyFromDeviceMatrix(Corr, Corrd);
//cudaMemcpy(CorrH,CorrD,sizeof(SoA_Corr)*parameters.numX*parameters.numY,cudaMemcpyDeviceToHost);
cudaMemcpy(CorrH,CorrD,sizeof(float)*parameters.numX*parameters.numY*parameters.searchX*parameters.searchY,cudaMemcpyDeviceToHost);
// Free device matrices
FreeDeviceMatrix(&Pred);
FreeDeviceMatrix(&Postd);
cudaFree(CorrD);
//FreeDeviceMatrix(&Corrd);
}
// Allocate a device matrix of same size as M.
Matrix AllocateDeviceMatrix(const Matrix M)
{
Matrix Mdevice = M;
int size = M.width * M.height * sizeof(float);
cudaMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
Matrix AllocateMatrix(int height, int width,int init) // 1 is file read/ 0 is just allocation
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = NULL;
FILE *fp;
fp = fopen("trialNumbers.inp","r");
// don't allocate memory on option 2
M.elements = (float*) malloc(size*sizeof(float));
if(init)
{
for(unsigned int i = 0; i < M.width * M.height; i++)
{
fscanf(fp,"%f",&M.elements[i]);
}
}
return M;
}
// Copy a host matrix to a device matrix.
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
Mdevice.height = Mhost.height;
Mdevice.width = Mhost.width;
Mdevice.pitch = Mhost.pitch;
cudaMemcpy(Mdevice.elements, Mhost.elements, size,
cudaMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.width * Mdevice.height * sizeof(float);
cudaMemcpy(Mhost.elements, Mdevice.elements, size,
cudaMemcpyDeviceToHost);
}
// Free a device matrix.
void FreeDeviceMatrix(Matrix* M)
{
cudaFree(M->elements);
M->elements = NULL;
}
// Free a host Matrix
void FreeMatrix(Matrix* M)
{
free(M->elements);
M->elements = NULL;
}
|
f93522fda87342031ed19073037c4a70249cbef8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void Matrix_MultiplElementWise_naiveOLD(const float * A , int Acount, int Acols, const float * B , int Bcount, int Bcols, float * out0 , int out0count, int out0cols)
{
int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x;
int id_row,id_col;
if (id<Acount)
{
if (Acount==Bcount) // matrix .* matrix
{
out0[id] = A[id]*B[id];
}
else if (Bcols==1) // matrix .* row vector
{
id_row = id/Acols;
out0[id] = A[id]*B[id_row];
}
else // matrix .* column vector
{
id_col = id%Acols;
out0[id] = A[id]*B[id_col];
}
}
} | f93522fda87342031ed19073037c4a70249cbef8.cu | #include "includes.h"
__global__ void Matrix_MultiplElementWise_naiveOLD(const float * A , int Acount, int Acols, const float * B , int Bcount, int Bcols, float * out0 , int out0count, int out0cols)
{
int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x;
int id_row,id_col;
if (id<Acount)
{
if (Acount==Bcount) // matrix .* matrix
{
out0[id] = A[id]*B[id];
}
else if (Bcols==1) // matrix .* row vector
{
id_row = id/Acols;
out0[id] = A[id]*B[id_row];
}
else // matrix .* column vector
{
id_col = id%Acols;
out0[id] = A[id]*B[id_col];
}
}
} |
e227d77186e3474823a8053e22f045a8a2bbba41.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void magnitude(float *vec, const int n)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (xIndex < n) { vec[xIndex] = abs(vec[xIndex]); }
} | e227d77186e3474823a8053e22f045a8a2bbba41.cu | #include "includes.h"
__global__ void magnitude(float *vec, const int n)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (xIndex < n) { vec[xIndex] = abs(vec[xIndex]); }
} |
2295618f2b0c0e8062d2d6b40fcbabf9373c5f99.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <stdio.h>
#include "checker_helper.cu"
#define BLOCK_SIZE 4
#define NUM_BLOCKS 4
#define GLOBAL_BUF_SIZE 16384
#define INTERNAL_BUF_SIZE BLOCK_SIZE
__device__ volatile int globalHead;
__device__ volatile int globalTail;
__device__ volatile int blockFence;
// __device__ volatile int tailSem = 0;
// __device__ volatile int headSem = 0;
// __device__ void acquire_semaphore(volatile int *lock){
// while (atomicCAS((int *) lock, 0, 1) != 0);
// }
// __device__ void release_semaphore(volatile int *lock){
// *lock = 0;
// __threadfence();
// }
__global__ void initialize(int *buffer, int width, size_t pitch){
globalHead = 0;
globalTail = 0;
blockFence = 0;
int numQueens = 5;
for (int i = 0; i < ceilf(width / 2); ++i)
for (int j = 0; j < ceilf(width / 2); ++j){
int *row = (int*) ((char*) buffer + (globalTail++) * pitch);
row[0] = i * width + j;
// printf("globalTail: %4d\telement: %4d\n", globalTail, row[0]);
}
for (int i = 0; i < width * width; ++i){
int *row = (int*) ((char*) buffer + i * pitch);
for (int q = 0; q < numQueens; ++q){
printf("%4d\t", row[q]);
}
printf("\n");
row[0] = 2;
row[1] = 20;
row[2] = 11;
row[3] = 30;
row[4] = 60;
if (!checkerFunc (row, width, numQueens)) {printf("this is Not a Solution!\n");}
row[0] = 27;
row[1] = 33;
row[2] = 56;
row[3] = 4;
row[4] = 55;
if (!checkerFunc (row, width, numQueens)) {printf("s2 is Not a Solution!\n");}
}
}
__global__ void qgdKernel(int width, size_t pitch, int numQueens, int *globalBuffer){
int globalIndex = threadIdx.x + blockIdx.x * blockDim.x;
int internalIndex = threadIdx.x;
int blockIndex = blockIdx.x;
__shared__ int internalBuffer[16][INTERNAL_BUF_SIZE];
__shared__ int internalHead;
__shared__ int internalTail;
if (internalIndex == 0){
internalHead = 0;
internalTail = 0;
}
__syncthreads();
for (int i = 0; i < 16; ++i)
internalBuffer[i][internalIndex] = -1;
__syncthreads();
while(globalHead < globalTail){
if (internalIndex == 0){
while(blockFence != blockIndex);
while(true){
if (internalTail == INTERNAL_BUF_SIZE)
break;
if (globalHead == globalTail)
break;
int *row = (int *)((char *) globalBuffer + globalHead * pitch);
for (int i = 0; i < numQueens; ++i){
internalBuffer[i][internalTail] = row[i];
row[i] = -1;
}
++internalTail;
++globalHead;
if (globalHead == globalTail){
globalHead = 0;
globalTail = 0;
}
}
if (++blockFence == NUM_BLOCKS)
blockFence = 0;
}
__syncthreads();
if (internalIndex == 0){
while(blockFence != blockIndex);
while(true){
if (globalTail == GLOBAL_BUF_SIZE)
break;
if (internalHead == internalTail){
internalHead = 0;
internalTail = 0;
break;
}
int *row = (int *)((char *) globalBuffer + globalTail * pitch);
for (int i = 0; i < numQueens; ++i){
row[i] = internalBuffer[i][internalHead];
internalBuffer[i][internalHead] = -1;
}
++internalHead;
++globalTail;
if (internalHead == internalTail){
internalHead = 0;
internalTail = 0;
}
}
if (++blockFence == NUM_BLOCKS)
blockFence = 0;
}
__syncthreads();
break;
}
}
int main(void){
int width = 8;
int numQueens = 5;
int *buffer;
size_t pitch;
hipMallocPitch((void**) &buffer, &pitch, numQueens * sizeof(int), width * width);
hipMemset2D(buffer, pitch, 255, numQueens * sizeof(int), width * width);
hipLaunchKernelGGL(( initialize), dim3(1), dim3(1) , 0, 0, buffer, width, pitch);
hipDeviceSynchronize();
hipLaunchKernelGGL(( qgdKernel), dim3(4), dim3(4) , 0, 0, width, pitch, numQueens, buffer);
hipDeviceSynchronize();
hipFree(buffer);
}
| 2295618f2b0c0e8062d2d6b40fcbabf9373c5f99.cu | #include <iostream>
#include <stdio.h>
#include "checker_helper.cu"
#define BLOCK_SIZE 4
#define NUM_BLOCKS 4
#define GLOBAL_BUF_SIZE 16384
#define INTERNAL_BUF_SIZE BLOCK_SIZE
__device__ volatile int globalHead;
__device__ volatile int globalTail;
__device__ volatile int blockFence;
// __device__ volatile int tailSem = 0;
// __device__ volatile int headSem = 0;
// __device__ void acquire_semaphore(volatile int *lock){
// while (atomicCAS((int *) lock, 0, 1) != 0);
// }
// __device__ void release_semaphore(volatile int *lock){
// *lock = 0;
// __threadfence();
// }
__global__ void initialize(int *buffer, int width, size_t pitch){
globalHead = 0;
globalTail = 0;
blockFence = 0;
int numQueens = 5;
for (int i = 0; i < ceilf(width / 2); ++i)
for (int j = 0; j < ceilf(width / 2); ++j){
int *row = (int*) ((char*) buffer + (globalTail++) * pitch);
row[0] = i * width + j;
// printf("globalTail: %4d\telement: %4d\n", globalTail, row[0]);
}
for (int i = 0; i < width * width; ++i){
int *row = (int*) ((char*) buffer + i * pitch);
for (int q = 0; q < numQueens; ++q){
printf("%4d\t", row[q]);
}
printf("\n");
row[0] = 2;
row[1] = 20;
row[2] = 11;
row[3] = 30;
row[4] = 60;
if (!checkerFunc (row, width, numQueens)) {printf("this is Not a Solution!\n");}
row[0] = 27;
row[1] = 33;
row[2] = 56;
row[3] = 4;
row[4] = 55;
if (!checkerFunc (row, width, numQueens)) {printf("s2 is Not a Solution!\n");}
}
}
__global__ void qgdKernel(int width, size_t pitch, int numQueens, int *globalBuffer){
int globalIndex = threadIdx.x + blockIdx.x * blockDim.x;
int internalIndex = threadIdx.x;
int blockIndex = blockIdx.x;
__shared__ int internalBuffer[16][INTERNAL_BUF_SIZE];
__shared__ int internalHead;
__shared__ int internalTail;
if (internalIndex == 0){
internalHead = 0;
internalTail = 0;
}
__syncthreads();
for (int i = 0; i < 16; ++i)
internalBuffer[i][internalIndex] = -1;
__syncthreads();
while(globalHead < globalTail){
if (internalIndex == 0){
while(blockFence != blockIndex);
while(true){
if (internalTail == INTERNAL_BUF_SIZE)
break;
if (globalHead == globalTail)
break;
int *row = (int *)((char *) globalBuffer + globalHead * pitch);
for (int i = 0; i < numQueens; ++i){
internalBuffer[i][internalTail] = row[i];
row[i] = -1;
}
++internalTail;
++globalHead;
if (globalHead == globalTail){
globalHead = 0;
globalTail = 0;
}
}
if (++blockFence == NUM_BLOCKS)
blockFence = 0;
}
__syncthreads();
if (internalIndex == 0){
while(blockFence != blockIndex);
while(true){
if (globalTail == GLOBAL_BUF_SIZE)
break;
if (internalHead == internalTail){
internalHead = 0;
internalTail = 0;
break;
}
int *row = (int *)((char *) globalBuffer + globalTail * pitch);
for (int i = 0; i < numQueens; ++i){
row[i] = internalBuffer[i][internalHead];
internalBuffer[i][internalHead] = -1;
}
++internalHead;
++globalTail;
if (internalHead == internalTail){
internalHead = 0;
internalTail = 0;
}
}
if (++blockFence == NUM_BLOCKS)
blockFence = 0;
}
__syncthreads();
break;
}
}
int main(void){
int width = 8;
int numQueens = 5;
int *buffer;
size_t pitch;
cudaMallocPitch((void**) &buffer, &pitch, numQueens * sizeof(int), width * width);
cudaMemset2D(buffer, pitch, 255, numQueens * sizeof(int), width * width);
initialize<<< 1, 1 >>>(buffer, width, pitch);
cudaDeviceSynchronize();
qgdKernel<<< 4, 4 >>>(width, pitch, numQueens, buffer);
cudaDeviceSynchronize();
cudaFree(buffer);
}
|
8553f08b5fa489e9a06e7a9c011a73178bf4a573.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "hiprand/hiprand_kernel.h"
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <iostream>
#include <numeric>
using namespace std;
const int threadSideSize = 32;
const int blocksSideSize = 255;
__global__ void calculate(int* sum) {
int xBlock = blockIdx.x / blocksSideSize;
int yBlock = blockIdx.x % blocksSideSize;
int xThread = threadIdx.x / threadSideSize;
int yThread = threadIdx.x % threadSideSize;
int positionX = xBlock * threadSideSize + xThread;
int positionY = yBlock * threadSideSize + yThread;
if (sqrt((float) ((positionX * positionX) + (positionY * positionY))) <= (blocksSideSize * threadSideSize)) {
atomicAdd(sum, 1);
}
}
int main() {
int blockSize = threadSideSize * threadSideSize;
int numBlocks = blocksSideSize * blocksSideSize;
int sum = 0;
int *sumDevice;
hipMalloc(&sumDevice, sizeof(int));
hipLaunchKernelGGL(( calculate), dim3(numBlocks), dim3(blockSize), 0, 0, sumDevice);
hipMemcpy(&sum, sumDevice, sizeof(int), hipMemcpyDeviceToHost);
cout << "pi = " << (double)(4 * sum) / (double)((blocksSideSize * threadSideSize) * (blocksSideSize * threadSideSize)) << endl;
hipFree(sumDevice);
return 0;
}
| 8553f08b5fa489e9a06e7a9c011a73178bf4a573.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "curand_kernel.h"
#include <cuda.h>
#include <curand.h>
#include <iostream>
#include <numeric>
using namespace std;
const int threadSideSize = 32;
const int blocksSideSize = 255;
__global__ void calculate(int* sum) {
int xBlock = blockIdx.x / blocksSideSize;
int yBlock = blockIdx.x % blocksSideSize;
int xThread = threadIdx.x / threadSideSize;
int yThread = threadIdx.x % threadSideSize;
int positionX = xBlock * threadSideSize + xThread;
int positionY = yBlock * threadSideSize + yThread;
if (sqrt((float) ((positionX * positionX) + (positionY * positionY))) <= (blocksSideSize * threadSideSize)) {
atomicAdd(sum, 1);
}
}
int main() {
int blockSize = threadSideSize * threadSideSize;
int numBlocks = blocksSideSize * blocksSideSize;
int sum = 0;
int *sumDevice;
cudaMalloc(&sumDevice, sizeof(int));
calculate<<<numBlocks, blockSize>>>(sumDevice);
cudaMemcpy(&sum, sumDevice, sizeof(int), cudaMemcpyDeviceToHost);
cout << "pi = " << (double)(4 * sum) / (double)((blocksSideSize * threadSideSize) * (blocksSideSize * threadSideSize)) << endl;
cudaFree(sumDevice);
return 0;
}
|
61427059ba2651c790546e824b169d7bd8a874a0.hip | // !!! This is a file automatically generated by hipify!!!
#include<utility>
#include<stdio.h>
#include<assert.h>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <ostream>
#include <iostream>
#include <cstdio>
#include <cstdlib>
#include <cmath>
#include <fstream>
#include <omp.h>
#include <time.h>
#include <string.h>
#include <utility>
#include <time.h>
#include <iomanip>
// HEADER FILES
#include "Helper/jacobi.h"
#include "Helper/residual.h"
#include "Helper/solution_error.h"
#include "Helper/setGPU.h"
// #define RUN_CPU_FLAG 1
#define RUN_GPU_FLAG 1
// #define RUN_SHARED_FLAG 1
// Determine which header files to include based on which directives are active
#ifdef RUN_CPU_FLAG
#include "jacobi-1D-cpu.h"
#endif
#ifdef RUN_GPU_FLAG
#include "jacobi-1D-gpu.h"
#endif
#ifdef RUN_SHARED_FLAG
#include "jacobi-1D-shared.h"
#endif
int main(int argc, char *argv[])
{
// INPUTS ///////////////////////////////////////////////////////////////
// SET CUDA DEVICE TO USE (IMPORTANT FOR ENDEAVOUR WHICH HAS 2!)
// NAVIER-STOKES GPUs: "Quadro K420"
// ENDEAVOUR GPUs: "TITAN V" OR "GeForce GTX 1080 Ti"
// Supercloud has V100s that we'll want to use soon
// std::string gpuToUse = "Quadro K420";
// setGPU(gpuToUse);
// INPUTS AND OUTPUT FILE NAMES
const int nDim = 1024; // 4096; // 65536; //524288; //65536; //atoi(argv[1]);
const int threadsPerBlock = 1024; //32; //512; // 32;
const double TOL = 1.0; //atoi(argv[4]);
// const double residualReductionFactor = 10000.0; //atoi(argv[4]);
// const double errorReductionFactor = 0.95; // 10000000.0; //atoi(argv[4]);
const int OVERLAP = 0;
const int subIterations = threadsPerBlock / 2;
const int numTrials = 20;
std::string CPU_FILE_NAME = "RESULTS/CPU_N1024_TOL1_DOUBLES.txt";
std::string GPU_FILE_NAME = "RESULTS/GPU_N1024_TOL1_DOUBLES.txt";
std::string SHARED_FILE_NAME = "RESULTS/SHARED_N1024_TOL1_DOUBLES.txt";
// SHARED_N1024_ERRORREDUCE100.txt";
/////////////////////////////////////////////////////////////////////////
// INITIALIZE ARRAYS
int nGrids = nDim + 2;
double * initX = new double[nGrids];
double * rhs = new double[nGrids];
// FILL IN INITIAL CONDITION AND RHS VALUES
for (int iGrid = 0; iGrid < nGrids; ++iGrid) {
if (iGrid == 0 || iGrid == nGrids-1) {
initX[iGrid] = 0.0f;
}
else {
initX[iGrid] = 1.0f;
}
rhs[iGrid] = 1.0f;
}
// LOAD EXACT SOLUTION
// double * solution_exact = new double[nGrids];
// std::string SOLUTIONEXACT_FILENAME = "solution_exact_N65536_long.txt";
// loadSolutionExact(solution_exact, SOLUTIONEXACT_FILENAME, nGrids);
/*for (int i = 1; i < nGrids; ++i) {
initX[i] = solution_exact[i];
}*/
// COMPUTE INITIAL RESIDUAL AND SET TOLERANCE
double initResidual = residual1DPoisson(initX, rhs, nGrids);
// double initSolutionError = solutionError1DPoisson(initX, solution_exact, nGrids);
// const double TOL = initResidual / residualReductionFactor; //atoi(argv[4]);
// const double TOL = initSolutionError * errorReductionFactor; // initSolutionError / errorReductionFactor; //atoi(argv[4]);
// Print parameters of the problem to screen
printf("===============INFORMATION============================\n");
// printf("GPU Name: %s\n", gpuToUse.c_str());
printf("Number of unknowns: %d\n", nDim);
printf("Threads Per Block: %d\n", threadsPerBlock);
printf("Residual of initial solution: %f\n", initResidual);
// printf("Solution Error of initial solution: %f\n", initSolutionError);
printf("Desired TOL of residual/solution error: %f\n", TOL);
// printf("Residual reduction factor: %f\n", errorReductionFactor);
printf("Number of Trials: %d\n", numTrials);
printf("======================================================\n");
// CPU - JACOBI
#ifdef RUN_CPU_FLAG
int cpuIterations = jacobiCpuIterationCountResidual(initX, rhs, nGrids, TOL);
// int cpuIterations = jacobiCpuIterationCount(initX, solution_exact, rhs, nGrids, TOL);
float cpuJacobiTimeTrial;
float cpuJacobiTimeAverage;
float cpuTotalTime = 0.0;
double cpuJacobiResidual;
double cpuJacobiSolutionError;
double * solutionJacobiCpu;
for (int iter = 0; iter < numTrials; iter++) {
clock_t cpuJacobiStartTime = clock();
solutionJacobiCpu = jacobiCpu(initX, rhs, nGrids, cpuIterations);
clock_t cpuJacobiEndTime = clock();
cpuJacobiTimeTrial = (cpuJacobiEndTime - cpuJacobiStartTime) / (double) CLOCKS_PER_SEC;
cpuJacobiTimeTrial = cpuJacobiTimeTrial * (1e3); // Convert to ms
cpuTotalTime = cpuTotalTime + cpuJacobiTimeTrial;
printf("Completed CPU trial %d\n", iter);
}
cpuJacobiTimeAverage = cpuTotalTime / numTrials;
cpuJacobiResidual = residual1DPoisson(solutionJacobiCpu, rhs, nGrids);
// cpuJacobiSolutionError = solutionError1DPoisson(solutionJacobiCpu, solution_exact, nGrids);
#endif
// GPU - JACOBI
#ifdef RUN_GPU_FLAG
int gpuIterations = jacobiGpuIterationCountResidual(initX, rhs, nGrids, TOL, threadsPerBlock);
// int gpuIterations = jacobiGpuIterationCount(initX, solution_exact, rhs, nGrids, TOL, threadsPerBlock);
float gpuJacobiTimeTrial;
float gpuJacobiTimeAverage;
float gputotalTime = 0.0;
double gpuJacobiResidual;
double gpuJacobiSolutionError;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
double * solutionJacobiGpu;
for (int iter = 0; iter < numTrials; iter++) {
hipEventRecord(start, 0);
solutionJacobiGpu = jacobiGpu(initX, rhs, nGrids, gpuIterations, threadsPerBlock);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&gpuJacobiTimeTrial, start, stop);
gputotalTime = gputotalTime + gpuJacobiTimeTrial;
printf("Completed GPU trial %d\n", iter);
}
gpuJacobiTimeAverage = gputotalTime / numTrials;
gpuJacobiResidual = residual1DPoisson(solutionJacobiGpu, rhs, nGrids);
// gpuJacobiSolutionError = solutionError1DPoisson(solutionJacobiGpu, solution_exact, nGrids);
#endif
// SHARED - JACOBI
#ifdef RUN_SHARED_FLAG
hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte);
// int sharedCycles = jacobiSharedIterationCountSolutionError(initX, solution_exact, rhs, nGrids, TOL, threadsPerBlock, OVERLAP, subIterations);
int sharedCycles = jacobiSharedIterationCountResidual(initX, rhs, nGrids, TOL, threadsPerBlock, OVERLAP, subIterations);
float sharedJacobiTimeTrial;
float sharedJacobiTimeAverage;
float sharedtotalTime = 0;
double sharedJacobiResidual;
double sharedJacobiSolutionError;
hipEvent_t start_sh, stop_sh;
hipEventCreate(&start_sh);
hipEventCreate(&stop_sh);
double * solutionJacobiShared;
for (int iter = 0; iter < numTrials; iter++) {
hipEventRecord(start_sh, 0);
solutionJacobiShared = jacobiShared(initX, rhs, nGrids, sharedCycles, threadsPerBlock, OVERLAP, subIterations);
hipEventRecord(stop_sh, 0);
hipEventSynchronize(stop_sh);
hipEventElapsedTime(&sharedJacobiTimeTrial, start_sh, stop_sh);
sharedtotalTime = sharedtotalTime + sharedJacobiTimeTrial;
printf("Completed GPU Shared trial %d\n", iter);
}
sharedJacobiTimeAverage = sharedtotalTime / numTrials;
sharedJacobiResidual = residual1DPoisson(solutionJacobiShared, rhs, nGrids);
// sharedJacobiSolutionError = solutionError1DPoisson(solutionJacobiShared, solution_exact, nGrids);
#endif
double dx = 1.0 / (nGrids - 1);
std::cout << std::fixed << std::setprecision(10) << dx*dx << std::endl;
double * residualArray = new double[nGrids];
double * residualArrayExact = new double[nGrids];
residualArray[0] = 0.0;
residualArray[nGrids-1] = 0.0;
/* for (int i = 1; i < nGrids-1; i++) {
// residualArray[i] = abs(rhs[i] + (solutionJacobiCpu[i-1] - 2.0*solutionJacobiCpu[i] + solutionJacobiCpu[i+1]) / (dx*dx));
// residualArray[i] = abs(rhs[i] + (solutionJacobiGpu[i-1] - 2.0*solutionJacobiGpu[i] + solutionJacobiGpu[i+1]) / (dx*dx));
residualArray[i] = abs(rhs[i] + (solutionJacobiShared[i-1] - 2.0*solutionJacobiShared[i] + solutionJacobiShared[i+1]) / (dx*dx));
residualArrayExact[i] = abs(rhs[i] + (solution_exact[i-1] - 2.0*solution_exact[i] + solution_exact[i+1]) / (dx*dx));
}*/
// PRINT SOLUTION - NEEDS ADJUSTING BASED ON WHICH FLAGS ARE ON
for (int i = 0; i < nGrids; i++) {
// printf("Grid %d = %f\n", i, solutionJacobiShared[i]);
// std::cout << std::fixed << std::setprecision(10) << solution_exact[i] << "\t" << solutionJacobiCpu[i] << "\t" << abs(solution_exact[i] - solutionJacobiCpu[i]) << "\t" << residualArray[i] << "\t" << residualArrayExact[i] << std::endl;
// std::cout << std::fixed << std::setprecision(10) << solution_exact[i] << "\t" << solutionJacobiGpu[i] << "\t" << abs(solution_exact[i] - solutionJacobiGpu[i]) << "\t" << residualArray[i] << "\t" << residualArrayExact[i] << std::endl;
// std::cout << std::fixed << std::setprecision(10) << solution_exact[i] << "\t" << solutionJacobiShared[i] << "\t" << abs(solution_exact[i] - solutionJacobiShared[i]) << "\t" << residualArray[i] << "\t" << residualArrayExact[i] << std::endl;
}
// CPU RESULTS
#ifdef RUN_CPU_FLAG
printf("===============CPU============================\n");
printf("Number of Iterations needed for Jacobi CPU: %d \n", cpuIterations);
printf("Time needed for the Jacobi CPU: %f ms\n", cpuJacobiTimeAverage);
printf("Residual of the Jacobi CPU solution is %f\n", cpuJacobiResidual);
// printf("Solution Error of the Jacobi CPU solution is %f\n", cpuJacobiSolutionError);
std::ofstream cpuResults;
cpuResults.open(CPU_FILE_NAME, std::ios::app);
// cpuResults << nDim << " " << residualReductionFactor << " " << numTrials << " " << cpuJacobiTimeAverage << " " << cpuIterations << " " << cpuJacobiResidual << "\n";
// cpuResults << nDim << " " << errorReductionFactor << " " << numTrials << " " << cpuJacobiTimeAverage << " " << cpuIterations << " " << cpuJacobiSolutionError << "\n";
cpuResults << nDim << " " << " " << numTrials << " " << cpuJacobiTimeAverage << " " << cpuIterations << " " << cpuJacobiResidual << "\n";
cpuResults.close();
#endif
// GPU RESULTS
#ifdef RUN_GPU_FLAG
printf("===============GPU============================\n");
printf("Number of Iterations needed for Jacobi GPU: %d \n", gpuIterations);
printf("Time needed for the Jacobi GPU: %f ms\n", gpuJacobiTimeAverage);
printf("Residual of the Jacobi GPU solution is %f\n", gpuJacobiResidual);
// printf("Solution Error of the Jacobi GPU solution is %f\n", gpuJacobiSolutionError);
std::ofstream gpuResults;
gpuResults.open(GPU_FILE_NAME, std::ios::app);
// gpuResults << nDim << " " << threadsPerBlock << " " << residualReductionFactor << " " << numTrials << " " << gpuJacobiTimeAverage << " " << gpuIterations << " " << gpuJacobiResidual << "\n";
// gpuResults << nDim << " " << threadsPerBlock << " " << errorReductionFactor << " " << numTrials << " " << gpuJacobiTimeAverage << " " << gpuIterations << " " << gpuJacobiSolutionError << "\n";
gpuResults << nDim << " " << threadsPerBlock << " " << numTrials << " " << gpuJacobiTimeAverage << " " << gpuIterations << " " << gpuJacobiResidual << "\n";
gpuResults.close();
#endif
// SHARED RESULTS
#ifdef RUN_SHARED_FLAG
printf("===============SHARED============================\n");
printf("Number of Cycles needed for Jacobi Shared: %d (%d) \n", sharedCycles, threadsPerBlock/2);
printf("Time needed for the Jacobi Shared: %f ms\n", sharedJacobiTimeAverage);
printf("Residual of the Jacobi Shared solution is %f\n", sharedJacobiResidual);
// printf("Solution Error of the Jacobi Shared solution is %f\n", sharedJacobiSolutionError);
std::ofstream sharedResults;
sharedResults.open(SHARED_FILE_NAME, std::ios::app);
// sharedResults << nDim << " " << threadsPerBlock << " " << residualReductionFactor << " " << numTrials << " " << sharedJacobiTimeAverage << " " << sharedCycles << " " << subIterations << " " << sharedJacobiResidual << "\n";
// sharedResults << nDim << " " << threadsPerBlock << " " << errorReductionFactor << " " << numTrials << " " << sharedJacobiTimeAverage << " " << sharedCycles << " " << subIterations << " " << sharedJacobiSolutionError << "\n";
sharedResults << nDim << " " << threadsPerBlock << " " << numTrials << " " << sharedJacobiTimeAverage << " " << sharedCycles << " " << subIterations << " " << sharedJacobiResidual << "\n";
sharedResults.close();
#endif
// FREE MEMORY
delete[] initX;
delete[] rhs;
// delete[] solution_exact;
#ifdef RUN_CPU_FLAG
delete[] solutionJacobiCpu;
#endif
#ifdef RUN_GPU_FLAG
delete[] solutionJacobiGpu;
#endif
#ifdef RUN_SHARED_FLAG
delete[] solutionJacobiShared;
#endif
return 0;
}
| 61427059ba2651c790546e824b169d7bd8a874a0.cu | #include<utility>
#include<stdio.h>
#include<assert.h>
#include <cuda_runtime_api.h>
#include <cuda_runtime.h>
#include <ostream>
#include <iostream>
#include <cstdio>
#include <cstdlib>
#include <cmath>
#include <fstream>
#include <omp.h>
#include <time.h>
#include <string.h>
#include <utility>
#include <time.h>
#include <iomanip>
// HEADER FILES
#include "Helper/jacobi.h"
#include "Helper/residual.h"
#include "Helper/solution_error.h"
#include "Helper/setGPU.h"
// #define RUN_CPU_FLAG 1
#define RUN_GPU_FLAG 1
// #define RUN_SHARED_FLAG 1
// Determine which header files to include based on which directives are active
#ifdef RUN_CPU_FLAG
#include "jacobi-1D-cpu.h"
#endif
#ifdef RUN_GPU_FLAG
#include "jacobi-1D-gpu.h"
#endif
#ifdef RUN_SHARED_FLAG
#include "jacobi-1D-shared.h"
#endif
int main(int argc, char *argv[])
{
// INPUTS ///////////////////////////////////////////////////////////////
// SET CUDA DEVICE TO USE (IMPORTANT FOR ENDEAVOUR WHICH HAS 2!)
// NAVIER-STOKES GPUs: "Quadro K420"
// ENDEAVOUR GPUs: "TITAN V" OR "GeForce GTX 1080 Ti"
// Supercloud has V100s that we'll want to use soon
// std::string gpuToUse = "Quadro K420";
// setGPU(gpuToUse);
// INPUTS AND OUTPUT FILE NAMES
const int nDim = 1024; // 4096; // 65536; //524288; //65536; //atoi(argv[1]);
const int threadsPerBlock = 1024; //32; //512; // 32;
const double TOL = 1.0; //atoi(argv[4]);
// const double residualReductionFactor = 10000.0; //atoi(argv[4]);
// const double errorReductionFactor = 0.95; // 10000000.0; //atoi(argv[4]);
const int OVERLAP = 0;
const int subIterations = threadsPerBlock / 2;
const int numTrials = 20;
std::string CPU_FILE_NAME = "RESULTS/CPU_N1024_TOL1_DOUBLES.txt";
std::string GPU_FILE_NAME = "RESULTS/GPU_N1024_TOL1_DOUBLES.txt";
std::string SHARED_FILE_NAME = "RESULTS/SHARED_N1024_TOL1_DOUBLES.txt";
// SHARED_N1024_ERRORREDUCE100.txt";
/////////////////////////////////////////////////////////////////////////
// INITIALIZE ARRAYS
int nGrids = nDim + 2;
double * initX = new double[nGrids];
double * rhs = new double[nGrids];
// FILL IN INITIAL CONDITION AND RHS VALUES
for (int iGrid = 0; iGrid < nGrids; ++iGrid) {
if (iGrid == 0 || iGrid == nGrids-1) {
initX[iGrid] = 0.0f;
}
else {
initX[iGrid] = 1.0f;
}
rhs[iGrid] = 1.0f;
}
// LOAD EXACT SOLUTION
// double * solution_exact = new double[nGrids];
// std::string SOLUTIONEXACT_FILENAME = "solution_exact_N65536_long.txt";
// loadSolutionExact(solution_exact, SOLUTIONEXACT_FILENAME, nGrids);
/*for (int i = 1; i < nGrids; ++i) {
initX[i] = solution_exact[i];
}*/
// COMPUTE INITIAL RESIDUAL AND SET TOLERANCE
double initResidual = residual1DPoisson(initX, rhs, nGrids);
// double initSolutionError = solutionError1DPoisson(initX, solution_exact, nGrids);
// const double TOL = initResidual / residualReductionFactor; //atoi(argv[4]);
// const double TOL = initSolutionError * errorReductionFactor; // initSolutionError / errorReductionFactor; //atoi(argv[4]);
// Print parameters of the problem to screen
printf("===============INFORMATION============================\n");
// printf("GPU Name: %s\n", gpuToUse.c_str());
printf("Number of unknowns: %d\n", nDim);
printf("Threads Per Block: %d\n", threadsPerBlock);
printf("Residual of initial solution: %f\n", initResidual);
// printf("Solution Error of initial solution: %f\n", initSolutionError);
printf("Desired TOL of residual/solution error: %f\n", TOL);
// printf("Residual reduction factor: %f\n", errorReductionFactor);
printf("Number of Trials: %d\n", numTrials);
printf("======================================================\n");
// CPU - JACOBI
#ifdef RUN_CPU_FLAG
int cpuIterations = jacobiCpuIterationCountResidual(initX, rhs, nGrids, TOL);
// int cpuIterations = jacobiCpuIterationCount(initX, solution_exact, rhs, nGrids, TOL);
float cpuJacobiTimeTrial;
float cpuJacobiTimeAverage;
float cpuTotalTime = 0.0;
double cpuJacobiResidual;
double cpuJacobiSolutionError;
double * solutionJacobiCpu;
for (int iter = 0; iter < numTrials; iter++) {
clock_t cpuJacobiStartTime = clock();
solutionJacobiCpu = jacobiCpu(initX, rhs, nGrids, cpuIterations);
clock_t cpuJacobiEndTime = clock();
cpuJacobiTimeTrial = (cpuJacobiEndTime - cpuJacobiStartTime) / (double) CLOCKS_PER_SEC;
cpuJacobiTimeTrial = cpuJacobiTimeTrial * (1e3); // Convert to ms
cpuTotalTime = cpuTotalTime + cpuJacobiTimeTrial;
printf("Completed CPU trial %d\n", iter);
}
cpuJacobiTimeAverage = cpuTotalTime / numTrials;
cpuJacobiResidual = residual1DPoisson(solutionJacobiCpu, rhs, nGrids);
// cpuJacobiSolutionError = solutionError1DPoisson(solutionJacobiCpu, solution_exact, nGrids);
#endif
// GPU - JACOBI
#ifdef RUN_GPU_FLAG
int gpuIterations = jacobiGpuIterationCountResidual(initX, rhs, nGrids, TOL, threadsPerBlock);
// int gpuIterations = jacobiGpuIterationCount(initX, solution_exact, rhs, nGrids, TOL, threadsPerBlock);
float gpuJacobiTimeTrial;
float gpuJacobiTimeAverage;
float gputotalTime = 0.0;
double gpuJacobiResidual;
double gpuJacobiSolutionError;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
double * solutionJacobiGpu;
for (int iter = 0; iter < numTrials; iter++) {
cudaEventRecord(start, 0);
solutionJacobiGpu = jacobiGpu(initX, rhs, nGrids, gpuIterations, threadsPerBlock);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpuJacobiTimeTrial, start, stop);
gputotalTime = gputotalTime + gpuJacobiTimeTrial;
printf("Completed GPU trial %d\n", iter);
}
gpuJacobiTimeAverage = gputotalTime / numTrials;
gpuJacobiResidual = residual1DPoisson(solutionJacobiGpu, rhs, nGrids);
// gpuJacobiSolutionError = solutionError1DPoisson(solutionJacobiGpu, solution_exact, nGrids);
#endif
// SHARED - JACOBI
#ifdef RUN_SHARED_FLAG
cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte);
// int sharedCycles = jacobiSharedIterationCountSolutionError(initX, solution_exact, rhs, nGrids, TOL, threadsPerBlock, OVERLAP, subIterations);
int sharedCycles = jacobiSharedIterationCountResidual(initX, rhs, nGrids, TOL, threadsPerBlock, OVERLAP, subIterations);
float sharedJacobiTimeTrial;
float sharedJacobiTimeAverage;
float sharedtotalTime = 0;
double sharedJacobiResidual;
double sharedJacobiSolutionError;
cudaEvent_t start_sh, stop_sh;
cudaEventCreate(&start_sh);
cudaEventCreate(&stop_sh);
double * solutionJacobiShared;
for (int iter = 0; iter < numTrials; iter++) {
cudaEventRecord(start_sh, 0);
solutionJacobiShared = jacobiShared(initX, rhs, nGrids, sharedCycles, threadsPerBlock, OVERLAP, subIterations);
cudaEventRecord(stop_sh, 0);
cudaEventSynchronize(stop_sh);
cudaEventElapsedTime(&sharedJacobiTimeTrial, start_sh, stop_sh);
sharedtotalTime = sharedtotalTime + sharedJacobiTimeTrial;
printf("Completed GPU Shared trial %d\n", iter);
}
sharedJacobiTimeAverage = sharedtotalTime / numTrials;
sharedJacobiResidual = residual1DPoisson(solutionJacobiShared, rhs, nGrids);
// sharedJacobiSolutionError = solutionError1DPoisson(solutionJacobiShared, solution_exact, nGrids);
#endif
double dx = 1.0 / (nGrids - 1);
std::cout << std::fixed << std::setprecision(10) << dx*dx << std::endl;
double * residualArray = new double[nGrids];
double * residualArrayExact = new double[nGrids];
residualArray[0] = 0.0;
residualArray[nGrids-1] = 0.0;
/* for (int i = 1; i < nGrids-1; i++) {
// residualArray[i] = abs(rhs[i] + (solutionJacobiCpu[i-1] - 2.0*solutionJacobiCpu[i] + solutionJacobiCpu[i+1]) / (dx*dx));
// residualArray[i] = abs(rhs[i] + (solutionJacobiGpu[i-1] - 2.0*solutionJacobiGpu[i] + solutionJacobiGpu[i+1]) / (dx*dx));
residualArray[i] = abs(rhs[i] + (solutionJacobiShared[i-1] - 2.0*solutionJacobiShared[i] + solutionJacobiShared[i+1]) / (dx*dx));
residualArrayExact[i] = abs(rhs[i] + (solution_exact[i-1] - 2.0*solution_exact[i] + solution_exact[i+1]) / (dx*dx));
}*/
// PRINT SOLUTION - NEEDS ADJUSTING BASED ON WHICH FLAGS ARE ON
for (int i = 0; i < nGrids; i++) {
// printf("Grid %d = %f\n", i, solutionJacobiShared[i]);
// std::cout << std::fixed << std::setprecision(10) << solution_exact[i] << "\t" << solutionJacobiCpu[i] << "\t" << abs(solution_exact[i] - solutionJacobiCpu[i]) << "\t" << residualArray[i] << "\t" << residualArrayExact[i] << std::endl;
// std::cout << std::fixed << std::setprecision(10) << solution_exact[i] << "\t" << solutionJacobiGpu[i] << "\t" << abs(solution_exact[i] - solutionJacobiGpu[i]) << "\t" << residualArray[i] << "\t" << residualArrayExact[i] << std::endl;
// std::cout << std::fixed << std::setprecision(10) << solution_exact[i] << "\t" << solutionJacobiShared[i] << "\t" << abs(solution_exact[i] - solutionJacobiShared[i]) << "\t" << residualArray[i] << "\t" << residualArrayExact[i] << std::endl;
}
// CPU RESULTS
#ifdef RUN_CPU_FLAG
printf("===============CPU============================\n");
printf("Number of Iterations needed for Jacobi CPU: %d \n", cpuIterations);
printf("Time needed for the Jacobi CPU: %f ms\n", cpuJacobiTimeAverage);
printf("Residual of the Jacobi CPU solution is %f\n", cpuJacobiResidual);
// printf("Solution Error of the Jacobi CPU solution is %f\n", cpuJacobiSolutionError);
std::ofstream cpuResults;
cpuResults.open(CPU_FILE_NAME, std::ios::app);
// cpuResults << nDim << " " << residualReductionFactor << " " << numTrials << " " << cpuJacobiTimeAverage << " " << cpuIterations << " " << cpuJacobiResidual << "\n";
// cpuResults << nDim << " " << errorReductionFactor << " " << numTrials << " " << cpuJacobiTimeAverage << " " << cpuIterations << " " << cpuJacobiSolutionError << "\n";
cpuResults << nDim << " " << " " << numTrials << " " << cpuJacobiTimeAverage << " " << cpuIterations << " " << cpuJacobiResidual << "\n";
cpuResults.close();
#endif
// GPU RESULTS
#ifdef RUN_GPU_FLAG
printf("===============GPU============================\n");
printf("Number of Iterations needed for Jacobi GPU: %d \n", gpuIterations);
printf("Time needed for the Jacobi GPU: %f ms\n", gpuJacobiTimeAverage);
printf("Residual of the Jacobi GPU solution is %f\n", gpuJacobiResidual);
// printf("Solution Error of the Jacobi GPU solution is %f\n", gpuJacobiSolutionError);
std::ofstream gpuResults;
gpuResults.open(GPU_FILE_NAME, std::ios::app);
// gpuResults << nDim << " " << threadsPerBlock << " " << residualReductionFactor << " " << numTrials << " " << gpuJacobiTimeAverage << " " << gpuIterations << " " << gpuJacobiResidual << "\n";
// gpuResults << nDim << " " << threadsPerBlock << " " << errorReductionFactor << " " << numTrials << " " << gpuJacobiTimeAverage << " " << gpuIterations << " " << gpuJacobiSolutionError << "\n";
gpuResults << nDim << " " << threadsPerBlock << " " << numTrials << " " << gpuJacobiTimeAverage << " " << gpuIterations << " " << gpuJacobiResidual << "\n";
gpuResults.close();
#endif
// SHARED RESULTS
#ifdef RUN_SHARED_FLAG
printf("===============SHARED============================\n");
printf("Number of Cycles needed for Jacobi Shared: %d (%d) \n", sharedCycles, threadsPerBlock/2);
printf("Time needed for the Jacobi Shared: %f ms\n", sharedJacobiTimeAverage);
printf("Residual of the Jacobi Shared solution is %f\n", sharedJacobiResidual);
// printf("Solution Error of the Jacobi Shared solution is %f\n", sharedJacobiSolutionError);
std::ofstream sharedResults;
sharedResults.open(SHARED_FILE_NAME, std::ios::app);
// sharedResults << nDim << " " << threadsPerBlock << " " << residualReductionFactor << " " << numTrials << " " << sharedJacobiTimeAverage << " " << sharedCycles << " " << subIterations << " " << sharedJacobiResidual << "\n";
// sharedResults << nDim << " " << threadsPerBlock << " " << errorReductionFactor << " " << numTrials << " " << sharedJacobiTimeAverage << " " << sharedCycles << " " << subIterations << " " << sharedJacobiSolutionError << "\n";
sharedResults << nDim << " " << threadsPerBlock << " " << numTrials << " " << sharedJacobiTimeAverage << " " << sharedCycles << " " << subIterations << " " << sharedJacobiResidual << "\n";
sharedResults.close();
#endif
// FREE MEMORY
delete[] initX;
delete[] rhs;
// delete[] solution_exact;
#ifdef RUN_CPU_FLAG
delete[] solutionJacobiCpu;
#endif
#ifdef RUN_GPU_FLAG
delete[] solutionJacobiGpu;
#endif
#ifdef RUN_SHARED_FLAG
delete[] solutionJacobiShared;
#endif
return 0;
}
|
615cf8368e36be2682ac9f1b55d8cf9cab45c718.hip | // !!! This is a file automatically generated by hipify!!!
#include <cmath>
#include <cstdio>
#include <cstring>
#include <string>
#include <algorithm>
#include <iostream>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <hip/device_functions.h>
#include <hip/hip_runtime_api.h>
using namespace std;
typedef double ld;
typedef long long LL;
namespace output {
const int OutputBufferSize = 1e6+5;
char buffer[OutputBufferSize];
char *s = buffer;
inline void flush() {
fwrite(buffer, 1, s-buffer, stdout);
s = buffer;
fflush(stdout);
}
inline void print(const char ch) {
// putchar(ch); return;
if (s-buffer>OutputBufferSize-2) flush();
*s++ = ch;
}
inline void print(char *str) {
while (*str!=0) print(char(*str++));
}
inline void print(int x) {
// printf("%d", x); return;
char buf[25] = {0}, *p = buf;
if (x<0) print('-'), x=-x;
if (x == 0) print('0');
while (x) *(++p) = x%10, x/=10;
while (p != buf) print(char(*(p--)+'0'));
}
inline void print(ld x) {
// printf("%.2f", x);
static char buf[100];
sprintf(buf, "%.2f", x);
print(buf);
}
}
struct ios {
static const int IN_LEN=1<<18|1;
char buf[IN_LEN],*s,*t;
inline char read(){
return (s==t)&&(t=(s=buf)+fread(buf,1,IN_LEN,stdin)),s==t?-1:*s++;
}
inline bool isEOF() {
return (s==t)&&(t=(s=buf)+fread(buf,1,IN_LEN,stdin)),s==t;
}
inline ios & operator >> (int &x){
static char c11,boo;
for(c11=read(),boo=0;!isdigit(c11);c11=read()){
if(c11==-1)return *this;
boo|=c11=='-';
}
for(x=0;isdigit(c11);c11=read())x=x*10+(c11^'0');
boo&&(x=-x);
return *this;
}
inline ios & operator >> (LL &x){
static char c11,boo;
for(c11=read(),boo=0;!isdigit(c11);c11=read()){
if(c11==-1)return *this;
boo|=c11=='-';
}
for(x=0;isdigit(c11);c11=read())x=x*10+(c11^'0');
boo&&(x=-x);
return *this;
}
inline ios &operator >> (char *s) {
int len = 0;
char ch;
for (ch=read(); ch=='\n' || ch == ' '; ch=read());
if (ch == -1) {
s[len] = 0;
return *this;
}
for (; ch!='\n' && ch != ' ' && ch != -1;ch=read())
s[len++] = ch;
s[len] = 0;
return *this;
}
inline ios &operator>>(ld &x)
{
char ch;
bool neg = false, dec = false;
double now = 0.1;
for (ch=read(); !isdigit(ch) && (ch!='.' && ch!='-') && ch!=-1; ch=read());
if (ch == '-') neg = true;
else if (ch == '.') { x = 0; dec = true; }
else if (ch != -1) x = ch-'0';
else return *this;
if (!dec) {
for (ch=read(); isdigit(ch) && ch!=-1; ch=read()) {
x = x * 10 + ch-'0';
}
}
if (ch == '.')
for (ch=read(); isdigit(ch) && ch!=-1; ch=read()) {
x += now * (ch - '0'); now *= 0.1;
}
if (neg) x = -x;
return *this;
}
inline ios &operator>>(long double &x)
{
char ch;
bool neg = false, dec = false;
double now = 0.1;
for (ch=read(); !isdigit(ch) && (ch!='.' && ch!='-') && ch!=-1; ch=read());
if (ch == '-') neg = true;
else if (ch == '.') { x = 0; dec = true; }
else if (ch != -1) x = ch-'0';
else return *this;
if (!dec) {
for (ch=read(); isdigit(ch) && ch!=-1; ch=read()) {
x = x * 10 + ch-'0';
}
}
if (ch == '.')
for (ch=read(); isdigit(ch) && ch!=-1; ch=read()) {
x += now * (ch - '0'); now *= 0.1;
}
if (neg) x = -x;
return *this;
}
} io;
inline void handleCudaError(hipError_t err, string name = "fuck") {
if (err != hipSuccess) {
cerr << name << endl;
cerr << hipGetErrorString(err) << endl;
exit(0);
}
}
ld *d_a, *d_b, *d_c, *h_a, *h_b, *h_c;
int an, am, bn, bm;
int n, m;
void copyMatrix(ld *&src, ld *&dst, int n, int m) {
int size = sizeof(ld) * n * m;
handleCudaError(hipMalloc(&dst, size), "hipMalloc in copyMatrix");
handleCudaError(hipMemcpy(dst, src, size, hipMemcpyHostToDevice), "memcpy in copyMatrix");
// handleCudaError(hipMemcpy(src, dst, size, hipMemcpyDeviceToHost), "check in copyMatrix");
// cerr << "end in copyMatrix" << endl;
}
// ld *copyMatrixBack(const ld *src, int n, int m) {
// ld *res;
// int size = sizeof(ld) * n * m;
// res = (ld*)malloc(size);
// cerr << "in copyMatrixBack: size=" << size << endl;
// handleCudaError(hipMemcpy(res, src, size, hipMemcpyDeviceToHost), "memcpy in copyMatrixBack");
// // memcpy(res.a, ptr, size);)
// return res;
// }
template<typename T>
__global__ void matrixMult(T *d_a, T *d_b, T *d_c, int an, int bm, int am) {
int i = blockDim.x * blockIdx.x + threadIdx.x,
j = blockDim.y * blockIdx.y + threadIdx.y;
if (i >= an || j >= bm) return;
ld sum = 0;
if (i < an && j < bm) {
for (int k=0; k<am; ++k)
sum += d_a[i * am + k] * d_b[k * bm + j];
}
if (i * bm + j < an * bm)
d_c[i * bm + j] = sum;
// int index = threadIdx.x;
// if (index < an * bm)
// d_c[index] = 1;
}
void outputMatrix(ld *a, int n, int m) {
// output::print(n); output::print(',');
// output::print(m); output::print('\n');
for (int i=0; i<n; ++i) {
int base = i * m;
output::print(a[base]);
for (int j=1; j<m; ++j) {
output::print(',');
output::print(a[base + j]);
}
output::print('\n');
}
}
int main()
{
// #ifndef Weaverzhu
freopen("input.txt", "r", stdin);
freopen("output.txt", "w", stdout);
// #endif
io >> an >> am; h_a = (ld*)malloc(sizeof(ld) * an * am);
for (int i=0; i<an; ++i)
for (int j=0; j<am; ++j)
io >> h_a[i*am + j];
io >> bn >> bm; h_b = (ld*)malloc(sizeof(ld) * bn * bm);
for (int i=0; i<bn; ++i)
for (int j=0; j<bm; ++j)
io >> h_b[i*bm + j];
// B.readtrans();
// outputMatrix(h_a, an, am);
// outputMatrix(h_b, bn, bm);
int block_size = 16;
dim3 threads(block_size, block_size);
dim3 grid((an + threads.x - 1) / threads.x, (bm + threads.y - 1) / threads.y);
n = an;
m = bm;
// fprintf(stderr, "grid= %d,%d,%d threads= %d,%d,%d\n", grid.x, grid.y, grid.z, threads.x, threads.y, threads.z);
// read into main memory
copyMatrix(h_a, d_a, an, am);
copyMatrix(h_b, d_b, bn, bm);
handleCudaError(hipMalloc(&d_c, sizeof(ld) * n * m), "allocate for h_c");
// puts("entering danger");
hipLaunchKernelGGL(( matrixMult), dim3(threads), dim3(grid), 0, 0, d_a, d_b, d_c, an, bm, am);
// if (hipGetLastError() != hipSuccess) {
// cerr << "failed in matrixMult" << endl;
// exit(0);
// } else cerr << "looks good in matrixMult" << endl;
// puts("FUCK");
// ld *c = copyMatrixBack(d_c, n, m);
h_c = (ld*)malloc(sizeof(ld) * n * m);
int size = sizeof(ld) * n * m;
handleCudaError(hipMemcpy(h_c, d_c, size, hipMemcpyDeviceToHost), "memcpy back");
outputMatrix(h_c, n, m);
output::flush();
return 0;
}
| 615cf8368e36be2682ac9f1b55d8cf9cab45c718.cu | #include <cmath>
#include <cstdio>
#include <cstring>
#include <string>
#include <algorithm>
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <device_functions.h>
#include <cuda_runtime_api.h>
using namespace std;
typedef double ld;
typedef long long LL;
namespace output {
const int OutputBufferSize = 1e6+5;
char buffer[OutputBufferSize];
char *s = buffer;
inline void flush() {
fwrite(buffer, 1, s-buffer, stdout);
s = buffer;
fflush(stdout);
}
inline void print(const char ch) {
// putchar(ch); return;
if (s-buffer>OutputBufferSize-2) flush();
*s++ = ch;
}
inline void print(char *str) {
while (*str!=0) print(char(*str++));
}
inline void print(int x) {
// printf("%d", x); return;
char buf[25] = {0}, *p = buf;
if (x<0) print('-'), x=-x;
if (x == 0) print('0');
while (x) *(++p) = x%10, x/=10;
while (p != buf) print(char(*(p--)+'0'));
}
inline void print(ld x) {
// printf("%.2f", x);
static char buf[100];
sprintf(buf, "%.2f", x);
print(buf);
}
}
struct ios {
static const int IN_LEN=1<<18|1;
char buf[IN_LEN],*s,*t;
inline char read(){
return (s==t)&&(t=(s=buf)+fread(buf,1,IN_LEN,stdin)),s==t?-1:*s++;
}
inline bool isEOF() {
return (s==t)&&(t=(s=buf)+fread(buf,1,IN_LEN,stdin)),s==t;
}
inline ios & operator >> (int &x){
static char c11,boo;
for(c11=read(),boo=0;!isdigit(c11);c11=read()){
if(c11==-1)return *this;
boo|=c11=='-';
}
for(x=0;isdigit(c11);c11=read())x=x*10+(c11^'0');
boo&&(x=-x);
return *this;
}
inline ios & operator >> (LL &x){
static char c11,boo;
for(c11=read(),boo=0;!isdigit(c11);c11=read()){
if(c11==-1)return *this;
boo|=c11=='-';
}
for(x=0;isdigit(c11);c11=read())x=x*10+(c11^'0');
boo&&(x=-x);
return *this;
}
inline ios &operator >> (char *s) {
int len = 0;
char ch;
for (ch=read(); ch=='\n' || ch == ' '; ch=read());
if (ch == -1) {
s[len] = 0;
return *this;
}
for (; ch!='\n' && ch != ' ' && ch != -1;ch=read())
s[len++] = ch;
s[len] = 0;
return *this;
}
inline ios &operator>>(ld &x)
{
char ch;
bool neg = false, dec = false;
double now = 0.1;
for (ch=read(); !isdigit(ch) && (ch!='.' && ch!='-') && ch!=-1; ch=read());
if (ch == '-') neg = true;
else if (ch == '.') { x = 0; dec = true; }
else if (ch != -1) x = ch-'0';
else return *this;
if (!dec) {
for (ch=read(); isdigit(ch) && ch!=-1; ch=read()) {
x = x * 10 + ch-'0';
}
}
if (ch == '.')
for (ch=read(); isdigit(ch) && ch!=-1; ch=read()) {
x += now * (ch - '0'); now *= 0.1;
}
if (neg) x = -x;
return *this;
}
inline ios &operator>>(long double &x)
{
char ch;
bool neg = false, dec = false;
double now = 0.1;
for (ch=read(); !isdigit(ch) && (ch!='.' && ch!='-') && ch!=-1; ch=read());
if (ch == '-') neg = true;
else if (ch == '.') { x = 0; dec = true; }
else if (ch != -1) x = ch-'0';
else return *this;
if (!dec) {
for (ch=read(); isdigit(ch) && ch!=-1; ch=read()) {
x = x * 10 + ch-'0';
}
}
if (ch == '.')
for (ch=read(); isdigit(ch) && ch!=-1; ch=read()) {
x += now * (ch - '0'); now *= 0.1;
}
if (neg) x = -x;
return *this;
}
} io;
inline void handleCudaError(cudaError_t err, string name = "fuck") {
if (err != cudaSuccess) {
cerr << name << endl;
cerr << cudaGetErrorString(err) << endl;
exit(0);
}
}
ld *d_a, *d_b, *d_c, *h_a, *h_b, *h_c;
int an, am, bn, bm;
int n, m;
void copyMatrix(ld *&src, ld *&dst, int n, int m) {
int size = sizeof(ld) * n * m;
handleCudaError(cudaMalloc(&dst, size), "cudaMalloc in copyMatrix");
handleCudaError(cudaMemcpy(dst, src, size, cudaMemcpyHostToDevice), "memcpy in copyMatrix");
// handleCudaError(cudaMemcpy(src, dst, size, cudaMemcpyDeviceToHost), "check in copyMatrix");
// cerr << "end in copyMatrix" << endl;
}
// ld *copyMatrixBack(const ld *src, int n, int m) {
// ld *res;
// int size = sizeof(ld) * n * m;
// res = (ld*)malloc(size);
// cerr << "in copyMatrixBack: size=" << size << endl;
// handleCudaError(cudaMemcpy(res, src, size, cudaMemcpyDeviceToHost), "memcpy in copyMatrixBack");
// // memcpy(res.a, ptr, size);)
// return res;
// }
template<typename T>
__global__ void matrixMult(T *d_a, T *d_b, T *d_c, int an, int bm, int am) {
int i = blockDim.x * blockIdx.x + threadIdx.x,
j = blockDim.y * blockIdx.y + threadIdx.y;
if (i >= an || j >= bm) return;
ld sum = 0;
if (i < an && j < bm) {
for (int k=0; k<am; ++k)
sum += d_a[i * am + k] * d_b[k * bm + j];
}
if (i * bm + j < an * bm)
d_c[i * bm + j] = sum;
// int index = threadIdx.x;
// if (index < an * bm)
// d_c[index] = 1;
}
void outputMatrix(ld *a, int n, int m) {
// output::print(n); output::print(',');
// output::print(m); output::print('\n');
for (int i=0; i<n; ++i) {
int base = i * m;
output::print(a[base]);
for (int j=1; j<m; ++j) {
output::print(',');
output::print(a[base + j]);
}
output::print('\n');
}
}
int main()
{
// #ifndef Weaverzhu
freopen("input.txt", "r", stdin);
freopen("output.txt", "w", stdout);
// #endif
io >> an >> am; h_a = (ld*)malloc(sizeof(ld) * an * am);
for (int i=0; i<an; ++i)
for (int j=0; j<am; ++j)
io >> h_a[i*am + j];
io >> bn >> bm; h_b = (ld*)malloc(sizeof(ld) * bn * bm);
for (int i=0; i<bn; ++i)
for (int j=0; j<bm; ++j)
io >> h_b[i*bm + j];
// B.readtrans();
// outputMatrix(h_a, an, am);
// outputMatrix(h_b, bn, bm);
int block_size = 16;
dim3 threads(block_size, block_size);
dim3 grid((an + threads.x - 1) / threads.x, (bm + threads.y - 1) / threads.y);
n = an;
m = bm;
// fprintf(stderr, "grid= %d,%d,%d threads= %d,%d,%d\n", grid.x, grid.y, grid.z, threads.x, threads.y, threads.z);
// read into main memory
copyMatrix(h_a, d_a, an, am);
copyMatrix(h_b, d_b, bn, bm);
handleCudaError(cudaMalloc(&d_c, sizeof(ld) * n * m), "allocate for h_c");
// puts("entering danger");
matrixMult<<<threads, grid>>>(d_a, d_b, d_c, an, bm, am);
// if (cudaGetLastError() != cudaSuccess) {
// cerr << "failed in matrixMult" << endl;
// exit(0);
// } else cerr << "looks good in matrixMult" << endl;
// puts("FUCK");
// ld *c = copyMatrixBack(d_c, n, m);
h_c = (ld*)malloc(sizeof(ld) * n * m);
int size = sizeof(ld) * n * m;
handleCudaError(cudaMemcpy(h_c, d_c, size, cudaMemcpyDeviceToHost), "memcpy back");
outputMatrix(h_c, n, m);
output::flush();
return 0;
}
|
9dd7cd92a316ad4873a4e04f0137946142b23ed7.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2010-2011, NVIDIA Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA Corporation nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "bvh/cuda/lbvh_builder.h"
#include "bintree/bintree_gen.h"
#include "sampling/random.h"
#include "time/timer.h"
#include "basic/cuda_domains.h"
#include "tree/model.h"
#include "tree/cuda/reduce.h"
namespace nih {
struct bbox_functor
{
NIH_HOST_DEVICE Bbox4f operator() (
const Vector4f op1,
const Vector4f op2) const
{
Bbox4f result;
result.insert( op1 );
result.insert( op2 );
return result;
}
NIH_HOST_DEVICE Bbox4f operator() (
const Bbox4f op1,
const Bbox4f op2) const
{
Bbox4f result;
result.insert( op1 );
result.insert( op2 );
return result;
}
};
/// A simple binary tree context implementation to be used with
/// the Bvh generate() function.
struct LBVH_context
{
/// Cuda accessor struct
struct Context
{
NIH_HOST_DEVICE Context() {}
NIH_HOST_DEVICE Context(Bvh_node* nodes, uint2* leaves) :
m_nodes(nodes), m_leaves(leaves) {}
/// write a new node
NIH_HOST_DEVICE void write_node(const uint32 node, bool p1, bool p2, const uint32 offset, const uint32 skip_node, const uint32 level, const uint32 begin, const uint32 end, const uint32 split_index)
{
const uint32 type = p1 == false && p2 == false ? Bvh_node::kLeaf : Bvh_node::kInternal;
m_nodes[ node ] = Bvh_node( type, offset, skip_node );
}
/// write a new leaf
NIH_HOST_DEVICE void write_leaf(const uint32 index, const uint32 begin, const uint32 end)
{
m_leaves[ index ] = make_uint2( begin, end );
}
Bvh_node* m_nodes; ///< node pointer
uint2* m_leaves; ///< leaf pointer
};
/// constructor
LBVH_context(
thrust::host_vector<Bvh_node>* nodes,
thrust::host_vector<uint2>* leaves) :
m_nodes( nodes ), m_leaves( leaves ) {}
/// reserve space for more nodes
void reserve_nodes(const uint32 n) { if (m_nodes->size() < n) m_nodes->resize(n); }
/// reserve space for more leaves
void reserve_leaves(const uint32 n) { if (m_leaves->size() < n) m_leaves->resize(n); }
/// return a cuda context
Context get_context()
{
return Context(
thrust::raw_pointer_cast( &m_nodes->front() ),
thrust::raw_pointer_cast( &m_leaves->front() ) );
}
thrust::host_vector<Bvh_node>* m_nodes;
thrust::host_vector<uint2>* m_leaves;
};
void lbvh_test()
{
fprintf(stderr, "lbvh test... started\n");
const uint32 n_points = 4*1024*1024;
const uint32 n_tests = 100;
thrust::host_vector<Vector4f> h_points( n_points );
Random random;
for (uint32 i = 0; i < n_points; ++i)
h_points[i] = Vector4f( random.next(), random.next(), random.next(), 1.0f );
thrust::device_vector<Vector4f> d_points( h_points );
thrust::device_vector<Vector4f> d_unsorted_points( h_points );
thrust::device_vector<Bvh_node> bvh_nodes;
thrust::device_vector<uint2> bvh_leaves;
thrust::device_vector<uint32> bvh_index;
cuda::LBVH_builder<uint64> builder( bvh_nodes, bvh_leaves, bvh_index );
hipEvent_t start, stop;
hipEventCreate( &start );
hipEventCreate( &stop );
float time = 0.0f;
for (uint32 i = 0; i <= n_tests; ++i)
{
d_points = d_unsorted_points;
hipDeviceSynchronize();
float dtime;
hipEventRecord( start, 0 );
builder.build(
Bbox3f( Vector3f(0.0f), Vector3f(1.0f) ),
d_points.begin(),
d_points.end(),
16u );
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &dtime, start, stop );
if (i) // skip the first run
time += dtime;
}
time /= 1000.0f * float(n_tests);
hipEventDestroy( start );
hipEventDestroy( stop );
{
thrust::host_vector<uint64> h_codes( builder.m_codes );
thrust::host_vector<Bvh_node> h_nodes;
thrust::host_vector<uint2> h_leaves;
const uint32 n_codes = n_points;
LBVH_context tree( &h_nodes, &h_leaves );
generate(
n_codes,
&h_codes[0],
60,
16u,
false,
tree );
thrust::host_vector<Bvh_node> d_nodes( bvh_nodes );
thrust::host_vector<uint2> d_leaves( bvh_leaves );
// traverse both trees top-down to see whether there's any inconsistencies...
uint32 h_node_id = 0;
uint32 d_node_id = 0;
uint32 node_index = 0;
uint32 leaf_index = 0;
while (h_node_id != uint32(-1))
{
if (d_node_id == uint32(-1))
{
fprintf(stderr, "device node is invalid!\n");
break;
}
Bvh_node h_node = h_nodes[ h_node_id ];
Bvh_node d_node = d_nodes[ d_node_id ];
if (h_node.is_leaf() != d_node.is_leaf())
{
fprintf(stderr, "host node and device node have different topology! (%u) (%s, %s)\n", node_index, h_node.is_leaf() ? "leaf" : "split", d_node.is_leaf() ? "leaf" : "split" );
break;
}
if (h_node.is_leaf())
{
const uint2 h_leaf = h_leaves[ h_node.get_leaf_index() ];
const uint2 d_leaf = d_leaves[ d_node.get_leaf_index() ];
if (h_leaf.x != d_leaf.x ||
h_leaf.y != d_leaf.y)
{
fprintf(stderr, "host and device leaves differ! [%u,%u) != [%u,%u) (%u:%u)\n",
h_leaf.x, h_leaf.y,
d_leaf.x, d_leaf.y,
node_index, leaf_index );
break;
}
h_node_id = h_node.get_skip_node();
d_node_id = d_node.get_skip_node();
leaf_index++;
}
else
{
h_node_id = h_node.get_child(0);
d_node_id = d_node.get_child(0);
}
node_index++;
}
}
fprintf(stderr, "lbvh test... done\n");
fprintf(stderr, " time : %f ms\n", time * 1000.0f );
fprintf(stderr, " points/sec : %f M\n", (n_points / time) / 1.0e6f );
fprintf(stderr, " nodes : %u\n", builder.m_node_count );
fprintf(stderr, " leaves : %u\n", builder.m_leaf_count );
for (uint32 level = 0; level < 60; ++level)
fprintf(stderr, " level %u : %u nodes\n", level, builder.m_levels[level+1] - builder.m_levels[level] );
fprintf(stderr, "lbvh bbox reduction test... started\n");
BFTree<Bvh_node*,device_domain> bvh(
thrust::raw_pointer_cast( &bvh_nodes.front() ),
builder.m_leaf_count,
thrust::raw_pointer_cast( &bvh_leaves.front() ),
60u,
builder.m_levels );
thrust::device_vector<Bbox4f> d_leaf_bboxes( builder.m_leaf_count );
thrust::device_vector<Bbox4f> d_node_bboxes( builder.m_node_count );
hipEventCreate( &start );
hipEventCreate( &stop );
time = 0.0f;
for (uint32 i = 0; i <= n_tests; ++i)
{
float dtime;
hipEventRecord( start, 0 );
cuda::tree_reduce(
bvh,
thrust::raw_pointer_cast( &d_points.front() ),
thrust::raw_pointer_cast( &d_leaf_bboxes.front() ),
thrust::raw_pointer_cast( &d_node_bboxes.front() ),
bbox_functor(),
Bbox4f() );
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &dtime, start, stop );
if (i) // skip the first run
time += dtime;
}
time /= 1000.0f * float(n_tests);
hipEventDestroy( start );
hipEventDestroy( stop );
fprintf(stderr, "lbvh bbox reduction test... done\n");
fprintf(stderr, " time : %f ms\n", time * 1000.0f );
fprintf(stderr, " points/sec : %f M\n", (n_points / time) / 1.0e6f );
}
} // namespace nih
| 9dd7cd92a316ad4873a4e04f0137946142b23ed7.cu | /*
* Copyright (c) 2010-2011, NVIDIA Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA Corporation nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "bvh/cuda/lbvh_builder.h"
#include "bintree/bintree_gen.h"
#include "sampling/random.h"
#include "time/timer.h"
#include "basic/cuda_domains.h"
#include "tree/model.h"
#include "tree/cuda/reduce.h"
namespace nih {
struct bbox_functor
{
NIH_HOST_DEVICE Bbox4f operator() (
const Vector4f op1,
const Vector4f op2) const
{
Bbox4f result;
result.insert( op1 );
result.insert( op2 );
return result;
}
NIH_HOST_DEVICE Bbox4f operator() (
const Bbox4f op1,
const Bbox4f op2) const
{
Bbox4f result;
result.insert( op1 );
result.insert( op2 );
return result;
}
};
/// A simple binary tree context implementation to be used with
/// the Bvh generate() function.
struct LBVH_context
{
/// Cuda accessor struct
struct Context
{
NIH_HOST_DEVICE Context() {}
NIH_HOST_DEVICE Context(Bvh_node* nodes, uint2* leaves) :
m_nodes(nodes), m_leaves(leaves) {}
/// write a new node
NIH_HOST_DEVICE void write_node(const uint32 node, bool p1, bool p2, const uint32 offset, const uint32 skip_node, const uint32 level, const uint32 begin, const uint32 end, const uint32 split_index)
{
const uint32 type = p1 == false && p2 == false ? Bvh_node::kLeaf : Bvh_node::kInternal;
m_nodes[ node ] = Bvh_node( type, offset, skip_node );
}
/// write a new leaf
NIH_HOST_DEVICE void write_leaf(const uint32 index, const uint32 begin, const uint32 end)
{
m_leaves[ index ] = make_uint2( begin, end );
}
Bvh_node* m_nodes; ///< node pointer
uint2* m_leaves; ///< leaf pointer
};
/// constructor
LBVH_context(
thrust::host_vector<Bvh_node>* nodes,
thrust::host_vector<uint2>* leaves) :
m_nodes( nodes ), m_leaves( leaves ) {}
/// reserve space for more nodes
void reserve_nodes(const uint32 n) { if (m_nodes->size() < n) m_nodes->resize(n); }
/// reserve space for more leaves
void reserve_leaves(const uint32 n) { if (m_leaves->size() < n) m_leaves->resize(n); }
/// return a cuda context
Context get_context()
{
return Context(
thrust::raw_pointer_cast( &m_nodes->front() ),
thrust::raw_pointer_cast( &m_leaves->front() ) );
}
thrust::host_vector<Bvh_node>* m_nodes;
thrust::host_vector<uint2>* m_leaves;
};
void lbvh_test()
{
fprintf(stderr, "lbvh test... started\n");
const uint32 n_points = 4*1024*1024;
const uint32 n_tests = 100;
thrust::host_vector<Vector4f> h_points( n_points );
Random random;
for (uint32 i = 0; i < n_points; ++i)
h_points[i] = Vector4f( random.next(), random.next(), random.next(), 1.0f );
thrust::device_vector<Vector4f> d_points( h_points );
thrust::device_vector<Vector4f> d_unsorted_points( h_points );
thrust::device_vector<Bvh_node> bvh_nodes;
thrust::device_vector<uint2> bvh_leaves;
thrust::device_vector<uint32> bvh_index;
cuda::LBVH_builder<uint64> builder( bvh_nodes, bvh_leaves, bvh_index );
cudaEvent_t start, stop;
cudaEventCreate( &start );
cudaEventCreate( &stop );
float time = 0.0f;
for (uint32 i = 0; i <= n_tests; ++i)
{
d_points = d_unsorted_points;
cudaThreadSynchronize();
float dtime;
cudaEventRecord( start, 0 );
builder.build(
Bbox3f( Vector3f(0.0f), Vector3f(1.0f) ),
d_points.begin(),
d_points.end(),
16u );
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &dtime, start, stop );
if (i) // skip the first run
time += dtime;
}
time /= 1000.0f * float(n_tests);
cudaEventDestroy( start );
cudaEventDestroy( stop );
{
thrust::host_vector<uint64> h_codes( builder.m_codes );
thrust::host_vector<Bvh_node> h_nodes;
thrust::host_vector<uint2> h_leaves;
const uint32 n_codes = n_points;
LBVH_context tree( &h_nodes, &h_leaves );
generate(
n_codes,
&h_codes[0],
60,
16u,
false,
tree );
thrust::host_vector<Bvh_node> d_nodes( bvh_nodes );
thrust::host_vector<uint2> d_leaves( bvh_leaves );
// traverse both trees top-down to see whether there's any inconsistencies...
uint32 h_node_id = 0;
uint32 d_node_id = 0;
uint32 node_index = 0;
uint32 leaf_index = 0;
while (h_node_id != uint32(-1))
{
if (d_node_id == uint32(-1))
{
fprintf(stderr, "device node is invalid!\n");
break;
}
Bvh_node h_node = h_nodes[ h_node_id ];
Bvh_node d_node = d_nodes[ d_node_id ];
if (h_node.is_leaf() != d_node.is_leaf())
{
fprintf(stderr, "host node and device node have different topology! (%u) (%s, %s)\n", node_index, h_node.is_leaf() ? "leaf" : "split", d_node.is_leaf() ? "leaf" : "split" );
break;
}
if (h_node.is_leaf())
{
const uint2 h_leaf = h_leaves[ h_node.get_leaf_index() ];
const uint2 d_leaf = d_leaves[ d_node.get_leaf_index() ];
if (h_leaf.x != d_leaf.x ||
h_leaf.y != d_leaf.y)
{
fprintf(stderr, "host and device leaves differ! [%u,%u) != [%u,%u) (%u:%u)\n",
h_leaf.x, h_leaf.y,
d_leaf.x, d_leaf.y,
node_index, leaf_index );
break;
}
h_node_id = h_node.get_skip_node();
d_node_id = d_node.get_skip_node();
leaf_index++;
}
else
{
h_node_id = h_node.get_child(0);
d_node_id = d_node.get_child(0);
}
node_index++;
}
}
fprintf(stderr, "lbvh test... done\n");
fprintf(stderr, " time : %f ms\n", time * 1000.0f );
fprintf(stderr, " points/sec : %f M\n", (n_points / time) / 1.0e6f );
fprintf(stderr, " nodes : %u\n", builder.m_node_count );
fprintf(stderr, " leaves : %u\n", builder.m_leaf_count );
for (uint32 level = 0; level < 60; ++level)
fprintf(stderr, " level %u : %u nodes\n", level, builder.m_levels[level+1] - builder.m_levels[level] );
fprintf(stderr, "lbvh bbox reduction test... started\n");
BFTree<Bvh_node*,device_domain> bvh(
thrust::raw_pointer_cast( &bvh_nodes.front() ),
builder.m_leaf_count,
thrust::raw_pointer_cast( &bvh_leaves.front() ),
60u,
builder.m_levels );
thrust::device_vector<Bbox4f> d_leaf_bboxes( builder.m_leaf_count );
thrust::device_vector<Bbox4f> d_node_bboxes( builder.m_node_count );
cudaEventCreate( &start );
cudaEventCreate( &stop );
time = 0.0f;
for (uint32 i = 0; i <= n_tests; ++i)
{
float dtime;
cudaEventRecord( start, 0 );
cuda::tree_reduce(
bvh,
thrust::raw_pointer_cast( &d_points.front() ),
thrust::raw_pointer_cast( &d_leaf_bboxes.front() ),
thrust::raw_pointer_cast( &d_node_bboxes.front() ),
bbox_functor(),
Bbox4f() );
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &dtime, start, stop );
if (i) // skip the first run
time += dtime;
}
time /= 1000.0f * float(n_tests);
cudaEventDestroy( start );
cudaEventDestroy( stop );
fprintf(stderr, "lbvh bbox reduction test... done\n");
fprintf(stderr, " time : %f ms\n", time * 1000.0f );
fprintf(stderr, " points/sec : %f M\n", (n_points / time) / 1.0e6f );
}
} // namespace nih
|
bcf4db357f4454f44f4c64cab1350d5052b1c2c2.hip | // !!! This is a file automatically generated by hipify!!!
/*
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cctype>
#include <algorithm>
#include <functional>
#include <numeric>
#include <ctime>
#include <time.h>
#include "cm.h"
#include "atof.h"
#include "compress.cu"
#include "sorts.hip"
#include "filter.h"
#include "callbacks.h"
#include "zone_map.h"
#ifdef _WIN64
#define atoll(S) _atoi64(S)
#define fseek(S, S1, S2) _fseeki64(S, S1, S2)
#include <windows.h>
#else
#include <unistd.h>
#endif
using namespace std;
using namespace thrust::placeholders;
size_t total_count = 0, total_max;
clock_t tot;
unsigned int total_segments = 0, old_segments;
size_t process_count;
size_t alloced_sz = 0;
bool fact_file_loaded = 1;
bool verbose;
bool interactive, ssd, delta, star;
unsigned int prs;
void* d_v = nullptr;
void* s_v = nullptr;
queue<string> op_sort;
queue<string> op_presort;
queue<string> op_type;
bool op_case = 0;
string grp_val;
queue<string> op_value;
queue<int_type> op_nums;
queue<float_type> op_nums_f;
queue<unsigned int> op_nums_precision;
queue<string> col_aliases;
map<string, map<string, col_data> > data_dict;
unordered_map<string, unordered_map<unsigned long long int, size_t> > char_hash;
map<string, char*> index_buffers;
map<string, unsigned long long int*> idx_vals;
map<string, char*> buffers;
map<string, size_t> buffer_sizes;
size_t total_buffer_size;
queue<string> buffer_names;
void* alloced_tmp;
bool alloced_switch = 0;
map<string,CudaSet*> varNames; // STL map to manage CudaSet variables
map<string, unsigned int> cpy_bits;
map<string, long long int> cpy_init_val;
char* readbuff = nullptr;
thrust::device_vector<unsigned int> rcol_matches;
thrust::device_vector<int_type> rcol_dev;
struct f_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((x-y) < EPSILON) && ((x-y) > -EPSILON));
}
};
struct f_less
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((y-x) > EPSILON);
}
};
struct f_greater
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((x-y) > EPSILON);
}
};
struct f_greater_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((x-y) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
};
struct f_less_equal
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((y-x) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
};
struct f_not_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((x-y) > EPSILON) || ((x-y) < -EPSILON);
}
};
struct long_to_float_type
{
__host__ __device__
float_type operator()(const int_type x)
{
return (float_type)x;
}
};
template <typename T>
struct power_functor : public thrust::unary_function<T,T>
{
unsigned int a;
__host__ __device__
power_functor(unsigned int a_) {
a = a_;
}
__host__ __device__
T operator()(T x)
{
return x*(unsigned int)pow((double)10,(double)a);
}
};
struct is_zero
{
__host__ __device__
bool operator()(const int &x)
{
return x == 0;
}
};
int get_utc_offset() {
time_t zero = 24*60*60L;
struct tm * timeptr;
int gmtime_hours;
/* get the local time for Jan 2, 1900 00:00 UTC */
timeptr = localtime( &zero );
gmtime_hours = timeptr->tm_hour;
/* if the local time is the "day before" the UTC, subtract 24 hours
from the hours to get the UTC offset */
if( timeptr->tm_mday < 2 )
gmtime_hours -= 24;
return gmtime_hours;
}
/*
the utc analogue of mktime,
(much like timegm on some systems)
*/
time_t tm_to_time_t_utc( struct tm * timeptr ) {
/* gets the epoch time relative to the local time zone,
and then adds the appropriate number of seconds to make it UTC */
return mktime( timeptr ) + get_utc_offset() * 3600;
}
/*class power_functor {
unsigned int a;
public:
power_functor(unsigned int a_) { a = a_; }
__host__ __device__ int_type operator()(int_type x) const
{
return x*(unsigned int)pow((double)10,(double)a);
}
};
*/
void allocColumns(CudaSet* a, queue<string> fields);
void copyColumns(CudaSet* a, queue<string> fields, unsigned int segment, size_t& count, bool rsz, bool flt);
void mygather(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, size_t count, size_t g_size);
void mycopy(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, size_t count, size_t g_size);
void write_compressed_char(string file_name, unsigned int index, size_t mCount);
size_t getFreeMem();
size_t getTotalSystemMemory();
void process_error(int severity, string err);
CudaSet::CudaSet(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs)
: mColumnCount(0), mRecCount(0)
{
initialize(nameRef, typeRef, sizeRef, colsRef, Recs);
source = 1;
text_source = 1;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs, string file_name, unsigned int max)
: mColumnCount(0), mRecCount(0)
{
maxRecs = max;
initialize(nameRef, typeRef, sizeRef, colsRef, Recs, file_name);
source = 1;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(const size_t RecordCount, const unsigned int ColumnCount)
{
initialize(RecordCount, ColumnCount);
keep = false;
source = 0;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(queue<string> op_sel, const queue<string> op_sel_as)
{
initialize(op_sel, op_sel_as);
keep = false;
source = 0;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(CudaSet* a, CudaSet* b, queue<string> op_sel, queue<string> op_sel_as)
{
initialize(a,b, op_sel, op_sel_as);
keep = false;
source = 0;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::~CudaSet()
{
free();
};
void CudaSet::allocColumnOnDevice(string colname, size_t RecordCount)
{
if (type[colname] != 1 ) {
d_columns_int[colname].resize(RecordCount);
}
else
d_columns_float[colname].resize(RecordCount);
};
void CudaSet::resize_join(size_t addRecs)
{
mRecCount = mRecCount + addRecs;
for(unsigned int i=0; i < columnNames.size(); i++) {
if(type[columnNames[i]] != 1) {
h_columns_int[columnNames[i]].resize(mRecCount);
}
else
h_columns_float[columnNames[i]].resize(mRecCount);
};
};
void CudaSet::resize(size_t addRecs)
{
mRecCount = mRecCount + addRecs;
for(unsigned int i=0; i < columnNames.size(); i++) {
if(type[columnNames[i]] != 1) {
h_columns_int[columnNames[i]].resize(mRecCount);
}
else {
h_columns_float[columnNames[i]].resize(mRecCount);
}
};
};
void CudaSet::deAllocColumnOnDevice(string colname)
{
if (type[colname] != 1 && !d_columns_int.empty() && d_columns_int.find(colname) != d_columns_int.end()) {
if(d_columns_int[colname].size() > 0) {
d_columns_int[colname].resize(0);
d_columns_int[colname].shrink_to_fit();
};
}
else
if (type[colname] == 1 && !d_columns_float.empty()) {
if (d_columns_float[colname].size() > 0) {
d_columns_float[colname].resize(0);
d_columns_float[colname].shrink_to_fit();
};
};
};
void CudaSet::allocOnDevice(size_t RecordCount)
{
for(unsigned int i=0; i < columnNames.size(); i++)
allocColumnOnDevice(columnNames[i], RecordCount);
};
void CudaSet::deAllocOnDevice()
{
for(unsigned int i=0; i < columnNames.size(); i++) {
deAllocColumnOnDevice(columnNames[i]);
};
if(prm_d.size()) {
prm_d.resize(0);
prm_d.shrink_to_fit();
};
for (auto it=d_columns_int.begin(); it != d_columns_int.end(); ++it ) {
if(it->second.size() > 0) {
it->second.resize(0);
it->second.shrink_to_fit();
};
};
for (auto it=d_columns_float.begin(); it != d_columns_float.end(); ++it ) {
if(it->second.size() > 0) {
it->second.resize(0);
it->second.shrink_to_fit();
};
};
if(filtered) { // dealloc the source
if(varNames.find(source_name) != varNames.end()) {
varNames[source_name]->deAllocOnDevice();
};
};
};
void CudaSet::resizeDeviceColumn(size_t RecCount, string colname)
{
if (type[colname] != 1) {
d_columns_int[colname].resize(RecCount);
}
else
d_columns_float[colname].resize(RecCount);
};
void CudaSet::resizeDevice(size_t RecCount)
{
for(unsigned int i=0; i < columnNames.size(); i++) {
resizeDeviceColumn(RecCount, columnNames[i]);
};
};
bool CudaSet::onDevice(string colname)
{
if (type[colname] != 1) {
if (!d_columns_int.empty() && d_columns_int[colname].size())
return 1;
}
else
if (!d_columns_float.empty() && d_columns_float[colname].size())
return 1;
return 0;
}
CudaSet* CudaSet::copyDeviceStruct()
{
CudaSet* a = new CudaSet(mRecCount, mColumnCount);
a->not_compressed = not_compressed;
a->segCount = segCount;
a->maxRecs = maxRecs;
a->columnNames = columnNames;
a->ts_cols = ts_cols;
a->cols = cols;
a->type = type;
a->char_size = char_size;
a->decimal = decimal;
a->decimal_zeroes = decimal_zeroes;
for(unsigned int i=0; i < columnNames.size(); i++) {
if(a->type[columnNames[i]] == 0) {
a->d_columns_int[columnNames[i]] = thrust::device_vector<int_type>();
a->h_columns_int[columnNames[i]] = thrust::host_vector<int_type, uninitialized_host_allocator<int_type> >();
}
else
if(a->type[columnNames[i]] == 1) {
a->d_columns_float[columnNames[i]] = thrust::device_vector<float_type>();
a->h_columns_float[columnNames[i]] = thrust::host_vector<float_type, uninitialized_host_allocator<float_type> >();
}
else {
a->h_columns_char[columnNames[i]] = nullptr;
a->d_columns_char[columnNames[i]] = nullptr;
};
};
a->load_file_name = load_file_name;
a->mRecCount = 0;
return a;
}
int_type CudaSet::readSsdSegmentsFromFile(unsigned int segNum, string colname, size_t offset, thrust::host_vector<unsigned int>& prm_vh, CudaSet* dest)
{
string f1 = load_file_name + "." + colname + "." + to_string(segNum);
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
cout << "Error opening " << f1 << " file " << endl;
exit(0);
};
unsigned int cnt, bits;
int_type lower_val;
unsigned short int val_s_r[4096/2];
char val_c_r[4096];
unsigned int val_i_r[4096/4];
unsigned long long int val_l_r[4096/8];
unsigned int idx;
bool idx_set = 0;
fread(&cnt, 4, 1, f);
fread(&lower_val, 8, 1, f);
fseek(f, cnt - (8+4) + 32, SEEK_CUR);
fread(&bits, 4, 1, f);
//cout << "lower_val bits " << lower_val << " " << bits << endl;
if(type[colname] == 0) {
//cout << "lower_val bits " << lower_val << " " << bits << endl;
for(unsigned int i = 0; i < prm_vh.size(); i++) {
if(!idx_set || prm_vh[i] >= idx + 4096/(bits/8)) {
fseek(f, 24 + prm_vh[i]*(bits/8), SEEK_SET);
idx = prm_vh[i];
idx_set = 1;
if(bits == 8) {
fread(&val_c_r[0], 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_c_r[0];
}
else
if(bits == 16) {
fread(&val_s_r, 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_s_r[0];
}
if(bits == 32) {
fread(&val_i_r, 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_i_r[0];
}
if(bits == 84) {
fread(&val_l_r, 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_l_r[0];
}
}
else {
if(bits == 8) {
dest->h_columns_int[colname][i + offset] = val_c_r[prm_vh[i]-idx];
}
else
if(bits == 16) {
dest->h_columns_int[colname][i + offset] = val_s_r[prm_vh[i]-idx];
}
if(bits == 32) {
dest->h_columns_int[colname][i + offset] = val_i_r[prm_vh[i]-idx];
}
if(bits == 84) {
dest->h_columns_int[colname][i + offset] = val_l_r[prm_vh[i]-idx];
}
};
};
}
else
if(type[colname] == 1) {
for(unsigned int i = 0; i < prm_vh.size(); i++) {
if(!idx_set || prm_vh[i] >= idx + 4096/(bits/8)) {
fseek(f, 24 + prm_vh[i]*(bits/8), SEEK_SET);
idx = prm_vh[i];
idx_set = 1;
fread(val_c_r, 4096, 1, f);
memcpy(&dest->h_columns_float[colname][i + offset], &val_c_r[0], bits/8);
}
else {
memcpy(&dest->h_columns_float[colname][i + offset], &val_c_r[(prm_vh[i]-idx)*(bits/8)], bits/8);
};
};
}
else {
//no strings in fact tables
};
fclose(f);
return lower_val;
}
int_type CudaSet::readSsdSegmentsFromFileR(unsigned int segNum, string colname, thrust::host_vector<unsigned int>& prm_vh, thrust::host_vector<unsigned int>& dest)
{
string f1 = load_file_name + "." + colname + "." + to_string(segNum);
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
cout << "Error opening " << f1 << " file " << endl;
exit(0);
};
unsigned int cnt, bits;
int_type lower_val;
fread(&cnt, 4, 1, f);
fread(&lower_val, 8, 1, f);
fseek(f, cnt - (8+4) + 32, SEEK_CUR);
fread(&bits, 4, 1, f);
unsigned short int val_s_r[4096/2];
char val_c_r[4096];
unsigned int val_i_r[4096/4];
unsigned long long int val_l_r[4096/8];
unsigned int idx;
bool idx_set = 0;
for(unsigned int i = 0; i < prm_vh.size(); i++) {
if(!idx_set || prm_vh[i] >= idx + 4096/(bits/8)) {
fseek(f, 24 + prm_vh[i]*(bits/8), SEEK_SET);
idx = prm_vh[i];
idx_set = 1;
if(bits == 8) {
fread(val_c_r, 4096, 1, f);
dest[i] = val_c_r[0];
}
else
if(bits == 16) {
fread(val_s_r, 4096, 1, f);
dest[i] = val_s_r[0];
}
if(bits == 32) {
fread(val_i_r, 4096, 1, f);
dest[i] = val_i_r[0];
}
if(bits == 84) {
fread(val_l_r, 4096, 1, f);
dest[i] = val_l_r[0];
}
}
else {
if(bits == 8) {
dest[i] = val_c_r[prm_vh[i]-idx];
}
else
if(bits == 16) {
dest[i] = val_s_r[prm_vh[i]-idx];
}
if(bits == 32) {
dest[i] = val_i_r[prm_vh[i]-idx];
}
if(bits == 84) {
dest[i] = val_l_r[prm_vh[i]-idx];
}
};
};
fclose(f);
return lower_val;
}
std::clock_t tot_disk;
void CudaSet::readSegmentsFromFile(unsigned int segNum, string colname)
{
string f1 = load_file_name + "." + colname + "." + to_string(segNum);
if(type[colname] == 2)
f1 = f1 + ".idx";
std::clock_t start1 = std::clock();
if(interactive) { //check if data are in buffers
if(buffers.find(f1) == buffers.end()) { // add data to buffers
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
process_error(3, "Error opening " + string(f1) +" file " );
};
fseek(f, 0, SEEK_END);
long fileSize = ftell(f);
while(total_buffer_size + fileSize > getTotalSystemMemory() && !buffer_names.empty()) { //free some buffers
//delete [] buffers[buffer_names.front()];
hipHostFree(buffers[buffer_names.front()]);
total_buffer_size = total_buffer_size - buffer_sizes[buffer_names.front()];
buffer_sizes.erase(buffer_names.front());
buffers.erase(buffer_names.front());
buffer_names.pop();
};
fseek(f, 0, SEEK_SET);
char* buff;
hipHostMalloc((void**) &buff, fileSize,hipHostMallocDefault);
fread(buff, fileSize, 1, f);
fclose(f);
buffers[f1] = buff;
buffer_sizes[f1] = fileSize;
buffer_names.push(f1);
total_buffer_size = total_buffer_size + fileSize;
buffer_names.push(f1);
cout << "added buffer " << f1 << " " << fileSize << endl;
};
// get data from buffers
if(type[colname] != 1) {
unsigned int cnt = ((unsigned int*)buffers[f1])[0];
if(cnt > h_columns_int[colname].size()/8 + 10)
h_columns_int[colname].resize(cnt/8 + 10);
}
else {
unsigned int cnt = ((unsigned int*)buffers[f1])[0];
if(cnt > h_columns_float[colname].size()/8 + 10)
h_columns_float[colname].resize(cnt/8 + 10);
}
}
else {
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
cout << "Error opening " << f1 << " file " << endl;
exit(0);
};
if(type[colname] != 1) {
if(1 > h_columns_int[colname].size())
h_columns_int[colname].resize(1);
fread(h_columns_int[colname].data(), 4, 1, f);
unsigned int cnt = ((unsigned int*)(h_columns_int[colname].data()))[0];
if(cnt/8+10 > h_columns_int[colname].size()) {
h_columns_int[colname].resize(cnt + 10);
};
size_t rr = fread((unsigned int*)(h_columns_int[colname].data()) + 1, 1, cnt+52, f);
if(rr != cnt+52) {
char buf[1024];
sprintf(buf, "Couldn't read %d bytes from %s ,read only", cnt+52, f1.c_str());
process_error(3, string(buf));
};
}
else {
if(1 > h_columns_float[colname].size())
h_columns_float[colname].resize(1);
fread(h_columns_float[colname].data(), 4, 1, f);
unsigned int cnt = ((unsigned int*)(h_columns_float[colname].data()))[0];
if(cnt/8+10 > h_columns_float[colname].size())
h_columns_float[colname].resize(cnt + 10);
size_t rr = fread((unsigned int*)(h_columns_float[colname].data()) + 1, 1, cnt+52, f);
if(rr != cnt+52) {
char buf[1024];
sprintf(buf, "Couldn't read %d bytes from %s ,read only", cnt+52, f1.c_str());
process_error(3, string(buf));
};
}
fclose(f);
};
tot_disk = tot_disk + (std::clock() - start1);
};
void CudaSet::CopyColumnToGpu(string colname, unsigned int segment, size_t offset)
{
if(not_compressed) {
// calculate how many records we need to copy
if(segment < segCount-1) {
mRecCount = maxRecs;
}
else {
mRecCount = hostRecCount - maxRecs*(segCount-1);
};
if(type[colname] != 1) {
if(!alloced_switch) {
thrust::copy(h_columns_int[colname].begin() + maxRecs*segment, h_columns_int[colname].begin() + maxRecs*segment + mRecCount, d_columns_int[colname].begin() + offset);
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::copy(h_columns_int[colname].begin() + maxRecs*segment, h_columns_int[colname].begin() + maxRecs*segment + mRecCount, d_col);
};
}
else {
if(!alloced_switch) {
thrust::copy(h_columns_float[colname].begin() + maxRecs*segment, h_columns_float[colname].begin() + maxRecs*segment + mRecCount, d_columns_float[colname].begin() + offset);
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::copy(h_columns_float[colname].begin() + maxRecs*segment, h_columns_float[colname].begin() + maxRecs*segment + mRecCount, d_col);
};
}
}
else {
readSegmentsFromFile(segment,colname);
if(!d_v)
CUDA_SAFE_CALL(hipMalloc((void **) &d_v, 12));
if(!s_v)
CUDA_SAFE_CALL(hipMalloc((void **) &s_v, 8));
string f1;
if(type[colname] == 2) {
f1 = load_file_name + "." + colname + "." + to_string(segment) + ".idx";
}
else {
f1 = load_file_name + "." + colname + "." + to_string(segment);
};
if(type[colname] != 1) {
if(!alloced_switch) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + offset), h_columns_int[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + offset), buffers[f1], d_v, s_v, colname);
};
}
else {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(alloced_tmp, h_columns_int[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(alloced_tmp, buffers[f1], d_v, s_v, colname);
};
};
}
else {
if(decimal[colname]) {
if(!alloced_switch) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + offset) , h_columns_float[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + offset) , buffers[f1], d_v, s_v, colname);
};
if(!phase_copy) {
thrust::device_ptr<long long int> d_col_int((long long int*)thrust::raw_pointer_cast(d_columns_float[colname].data() + offset));
thrust::transform(d_col_int,d_col_int+mRecCount,d_columns_float[colname].begin(), long_to_float());
};
}
else {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(alloced_tmp, h_columns_float[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(alloced_tmp, buffers[f1], d_v, s_v, colname);
};
if(!phase_copy) {
thrust::device_ptr<long long int> d_col_int((long long int*)alloced_tmp);
thrust::device_ptr<float_type> d_col_float((float_type*)alloced_tmp);
thrust::transform(d_col_int,d_col_int+mRecCount, d_col_float, long_to_float());
};
//for(int i = 0; i < mRecCount;i++)
//cout << "DECOMP " << (float_type)(d_col_int[i]) << " " << d_col_float[i] << endl;
};
}
//else // uncompressed float
// will have to fix it later so uncompressed data will be written by segments too
}
};
}
void CudaSet::CopyColumnToGpu(string colname) // copy all segments
{
if(not_compressed) {
if(type[colname] != 1)
thrust::copy(h_columns_int[colname].begin(), h_columns_int[colname].begin() + mRecCount, d_columns_int[colname].begin());
else
thrust::copy(h_columns_float[colname].begin(), h_columns_float[colname].begin() + mRecCount, d_columns_float[colname].begin());
}
else {
if(!d_v)
CUDA_SAFE_CALL(hipMalloc((void **) &d_v, 12));
if(!s_v)
CUDA_SAFE_CALL(hipMalloc((void **) &s_v, 8));
size_t cnt = 0;
string f1;
for(unsigned int i = 0; i < segCount; i++) {
readSegmentsFromFile(i,colname);
if(type[colname] == 2) {
f1 = load_file_name + "." + colname + "." + to_string(i) + ".idx";
}
else {
f1 = load_file_name + "." + colname + "." + to_string(i);
};
if(type[colname] == 0) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + cnt), h_columns_int[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + cnt), buffers[f1], d_v, s_v, colname);
};
}
else
if(type[colname] == 1) {
if(decimal[colname]) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + cnt) , h_columns_float[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + cnt) , buffers[f1], d_v, s_v, colname);
};
if(!phase_copy) {
thrust::device_ptr<long long int> d_col_int((long long int*)thrust::raw_pointer_cast(d_columns_float[colname].data() + cnt));
thrust::transform(d_col_int,d_col_int+mRecCount,d_columns_float[colname].begin() + cnt, long_to_float());
};
}
// else uncompressed float
// will have to fix it later so uncompressed data will be written by segments too
};
cnt = cnt + mRecCount;
//totalRecs = totals + mRecCount;
};
mRecCount = cnt;
};
}
void CudaSet::CopyColumnToHost(string colname, size_t offset, size_t RecCount)
{
if(type[colname] != 1) {
thrust::copy(d_columns_int[colname].begin(), d_columns_int[colname].begin() + RecCount, h_columns_int[colname].begin() + offset);
}
else
thrust::copy(d_columns_float[colname].begin(), d_columns_float[colname].begin() + RecCount, h_columns_float[colname].begin() + offset);
}
void CudaSet::CopyColumnToHost(string colname)
{
CopyColumnToHost(colname, 0, mRecCount);
}
void CudaSet::CopyToHost(size_t offset, size_t count)
{
for(unsigned int i = 0; i < columnNames.size(); i++) {
CopyColumnToHost(columnNames[i], offset, count);
};
}
float_type* CudaSet::get_float_type_by_name(string name)
{
return thrust::raw_pointer_cast(d_columns_float[name].data());
}
int_type* CudaSet::get_int_by_name(string name)
{
return thrust::raw_pointer_cast(d_columns_int[name].data());
}
float_type* CudaSet::get_host_float_by_name(string name)
{
return thrust::raw_pointer_cast(h_columns_float[name].data());
}
int_type* CudaSet::get_host_int_by_name(string name)
{
return thrust::raw_pointer_cast(h_columns_int[name].data());
}
void CudaSet::GroupBy(stack<string> columnRef)
{
thrust::device_vector<bool> grp_dev(mRecCount);
thrust::fill(grp_dev.begin(), grp_dev.end(), 0);
if(scratch.size() < mRecCount)
scratch.resize(mRecCount*sizeof(bool));
thrust::device_ptr<bool> d_group((bool*)thrust::raw_pointer_cast(scratch.data()));
d_group[mRecCount-1] = 0;
for(int i = 0; i < columnRef.size(); columnRef.pop()) {
unsigned int bits;
if(cpy_bits.empty())
bits = 0;
else
bits = cpy_bits[columnRef.top()];
if(bits == 8) {
thrust::device_ptr<unsigned char> src((unsigned char*)thrust::raw_pointer_cast(d_columns_int[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned char>());
}
else
if(bits == 16) {
thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(d_columns_int[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned short int>());
}
else
if(bits == 32) {
thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(d_columns_int[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned int>());
}
else {
thrust::transform(d_columns_int[columnRef.top()].begin(), d_columns_int[columnRef.top()].begin() + mRecCount - 1,
d_columns_int[columnRef.top()].begin()+1, d_group, thrust::not_equal_to<int_type>());
};
thrust::transform(d_group, d_group+mRecCount, grp_dev.begin(), grp_dev.begin(), thrust::logical_or<bool>());
};
grp_count = thrust::count(grp_dev.begin(), grp_dev.end(), 1) + 1;
//cout << "grp count " << grp_count << endl;
grp.resize(grp_count);
if(grp_count > 1)
thrust::copy_if(thrust::make_counting_iterator((unsigned int)1), thrust::make_counting_iterator((unsigned int)grp_dev.size()),
grp_dev.begin(), grp.begin()+1, thrust::identity<bool>());
grp[0] = 0;
};
void CudaSet::addDeviceColumn(int_type* col, string colname, size_t recCount)
{
if (std::find(columnNames.begin(), columnNames.end(), colname) == columnNames.end()) {
columnNames.push_back(colname);
type[colname] = 0;
d_columns_int[colname] = thrust::device_vector<int_type>(recCount);
h_columns_int[colname] = thrust::host_vector<int_type, uninitialized_host_allocator<int_type> >(recCount);
}
else { // already exists, my need to resize it
if(d_columns_int[colname].size() < recCount) {
d_columns_int[colname].resize(recCount);
};
if(h_columns_int[colname].size() < recCount) {
h_columns_int[colname].resize(recCount);
};
};
// copy data to d columns
thrust::device_ptr<int_type> d_col((int_type*)col);
thrust::copy(d_col, d_col+recCount, d_columns_int[colname].begin());
thrust::copy(d_columns_int[colname].begin(), d_columns_int[colname].begin()+recCount, h_columns_int[colname].begin());
};
void CudaSet::addDeviceColumn(float_type* col, string colname, size_t recCount, bool is_decimal)
{
if (std::find(columnNames.begin(), columnNames.end(), colname) == columnNames.end()) {
columnNames.push_back(colname);
type[colname] = 1;
d_columns_float[colname] = thrust::device_vector<float_type>(recCount);
h_columns_float[colname] = thrust::host_vector<float_type, uninitialized_host_allocator<float_type> >(recCount);
}
else { // already exists, my need to resize it
if(d_columns_float[colname].size() < recCount)
d_columns_float[colname].resize(recCount);
if(h_columns_float[colname].size() < recCount)
h_columns_float[colname].resize(recCount);
};
decimal[colname] = is_decimal;
thrust::device_ptr<float_type> d_col((float_type*)col);
thrust::copy(d_col, d_col+recCount, d_columns_float[colname].begin());
};
void CudaSet::gpu_perm(queue<string> sf, thrust::device_vector<unsigned int>& permutation) {
permutation.resize(mRecCount);
thrust::sequence(permutation.begin(), permutation.begin() + mRecCount,0,1);
unsigned int* raw_ptr = thrust::raw_pointer_cast(permutation.data());
void* temp;
CUDA_SAFE_CALL(hipMalloc((void **) &temp, mRecCount*8));
string sort_type = "ASC";
while(!sf.empty()) {
if (type[sf.front()] == 0) {
update_permutation(d_columns_int[sf.front()], raw_ptr, mRecCount, sort_type, (int_type*)temp, 64);
}
else
if (type[sf.front()] == 1) {
update_permutation(d_columns_float[sf.front()], raw_ptr, mRecCount, sort_type, (float_type*)temp, 64);
}
else {
thrust::host_vector<unsigned int> permutation_h = permutation;
char* temp1 = new char[char_size[sf.front()]*mRecCount];
update_permutation_char_host(h_columns_char[sf.front()], permutation_h.data(), mRecCount, sort_type, temp1, char_size[sf.front()]);
delete [] temp1;
permutation = permutation_h;
};
sf.pop();
};
hipFree(temp);
}
void CudaSet::compress(string file_name, size_t offset, unsigned int check_type, unsigned int check_val, size_t mCount, const bool append)
{
string str(file_name);
thrust::device_vector<unsigned int> permutation;
long long int oldCount;
bool int_check = 0;
void* d;
CUDA_SAFE_CALL(hipMalloc((void **) &d, mCount*float_size));
total_count = total_count + mCount;
if (mCount > total_max && op_sort.empty()) {
total_max = mCount;
};
if(!total_segments && append) {
string s= file_name + "." + columnNames[0] + ".header";
ifstream binary_file(s.c_str(),ios::binary);
if(binary_file) {
binary_file.read((char *)&oldCount, 8);
binary_file.read((char *)&total_segments, 4);
binary_file.read((char *)&maxRecs, 4);
if(total_max < maxRecs)
total_max = maxRecs;
binary_file.close();
total_count = oldCount + mCount;
};
};
if(!op_sort.empty()) { //sort the segment
gpu_perm(op_sort, permutation);
};
// here we need to check for partitions and if partition_count > 0 -> create partitions
if(mCount < partition_count || partition_count == 0)
partition_count = 1;
unsigned int partition_recs = mCount/partition_count;
if(!op_sort.empty()) {
if(total_max < partition_recs)
total_max = partition_recs;
};
total_segments++;
old_segments = total_segments;
size_t new_offset;
for(unsigned int i = 0; i < columnNames.size(); i++) {
std::clock_t start1 = std::clock();
string colname = columnNames[i];
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
new_offset = 0;
if(type[colname] == 0) {
thrust::device_ptr<int_type> d_col((int_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_int[colname].begin(), d_col);
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
if (p < partition_count - 1) {
pfor_compress( (int_type*)d + new_offset, partition_recs*int_size, str, h_columns_int[colname], 0);
}
else {
pfor_compress( (int_type*)d + new_offset, (mCount - partition_recs*p)*int_size, str, h_columns_int[colname], 0);
};
new_offset = new_offset + partition_recs;
total_segments++;
};
}
else {
if(!int_check) {
thrust::copy(h_columns_int[colname].begin() + offset, h_columns_int[colname].begin() + offset + mCount, d_col);
pfor_compress( d, mCount*int_size, str, h_columns_int[colname], 0);
}
else {
pfor_compress( thrust::raw_pointer_cast(d_columns_int[colname].data()), mCount*int_size, str, h_columns_int[colname], 0);
};
};
}
else
if(type[colname] == 1) {
if(decimal[colname]) {
thrust::device_ptr<float_type> d_col((float_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_float[colname].begin(), d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+mCount,d_col_dec, float_to_long());
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
if (p < partition_count - 1)
pfor_compress( (int_type*)d + new_offset, partition_recs*float_size, str, h_columns_float[colname], 1);
else
pfor_compress( (int_type*)d + new_offset, (mCount - partition_recs*p)*float_size, str, h_columns_float[colname], 1);
new_offset = new_offset + partition_recs;
total_segments++;
};
}
else {
thrust::copy(h_columns_float[colname].begin() + offset, h_columns_float[colname].begin() + offset + mCount, d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+mCount,d_col_dec, float_to_long());
pfor_compress( d, mCount*float_size, str, h_columns_float[colname], 1);
};
}
else { // do not compress -- float
thrust::device_ptr<float_type> d_col((float_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_float[colname].begin(), d_col);
thrust::copy(d_col, d_col+mRecCount, h_columns_float[colname].begin());
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
unsigned int curr_cnt;
if (p < partition_count - 1)
curr_cnt = partition_recs;
else
curr_cnt = mCount - partition_recs*p;
fstream binary_file(str.c_str(),ios::out|ios::binary|fstream::app);
binary_file.write((char *)&curr_cnt, 4);
binary_file.write((char *)(h_columns_float[colname].data() + new_offset),curr_cnt*float_size);
new_offset = new_offset + partition_recs;
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
}
else {
fstream binary_file(str.c_str(),ios::out|ios::binary|fstream::app);
binary_file.write((char *)&mCount, 4);
binary_file.write((char *)(h_columns_float[colname].data() + offset),mCount*float_size);
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
};
}
else { //char
//populate char_hash
if(append && char_hash[colname].size() == 0 && varencoding[colname] != 'N') {
string s= file_name + "." + colname;
ifstream binary_file(s.c_str(), ios::binary | ios::ate);
if(binary_file) {
auto sz = binary_file.tellg();
binary_file.seekg(0, binary_file.beg);
char* strings = new char[sz];
binary_file.read(strings, sz);
binary_file.close();
//unsigned int ind = std::find(columnNames.begin(), columnNames.end(), colname) - columnNames.begin();
for (unsigned int z = 0 ; z < sz/char_size[colname]; z++) {
char_hash[colname][MurmurHash64A(&strings[z*char_size[colname]], char_size[colname], hash_seed)/2] = z;
};
delete [] strings;
};
};
if(!op_sort.empty()) {
unsigned int* h_permutation = new unsigned int[mRecCount];
thrust::copy(permutation.begin(), permutation.end(), h_permutation);
char* t = new char[char_size[colname]*mRecCount];
apply_permutation_char_host(h_columns_char[colname], h_permutation, mRecCount, t, char_size[colname]);
delete [] h_permutation;
thrust::copy(t, t+ char_size[colname]*mRecCount, h_columns_char[colname]);
delete [] t;
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
if (p < partition_count - 1)
compress_char(str, colname, partition_recs, new_offset, total_segments-1);
else
compress_char(str, colname, mCount - partition_recs*p, new_offset, total_segments-1);
new_offset = new_offset + partition_recs;
total_segments++;
};
}
else {
compress_char(str, colname, mCount, offset, total_segments-1);
};
};
if((check_type == 1 && fact_file_loaded) || (check_type == 1 && check_val == 0)) {
if(!op_sort.empty())
writeHeader(file_name, colname, total_segments-1);
else {
writeHeader(file_name, colname, total_segments);
};
};
total_segments = old_segments;
};
hipFree(d);
if(!op_sort.empty()) {
total_segments = (old_segments-1)+partition_count;
};
permutation.resize(0);
permutation.shrink_to_fit();
}
void CudaSet::writeHeader(string file_name, string colname, unsigned int tot_segs) {
string str = file_name + "." + colname;
string ff = str;
str += ".header";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
binary_file.write((char *)&total_count, 8);
binary_file.write((char *)&tot_segs, 4);
binary_file.write((char *)&total_max, 4);
binary_file.write((char *)&cnt_counts[ff], 4);
//cout << "HEADER1 " << total_count << " " << tot_segs << " " << total_max << endl;
binary_file.close();
};
void CudaSet::reWriteHeader(string file_name, string colname, unsigned int tot_segs, size_t newRecs, size_t maxRecs1) {
string str = file_name + "." + colname;
string ff = str;
str += ".header";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
binary_file.write((char *)&newRecs, 8);
binary_file.write((char *)&tot_segs, 4);
binary_file.write((char *)&maxRecs1, 4);
//cout << "HEADER2 " << newRecs << endl;
binary_file.close();
};
void CudaSet::writeSortHeader(string file_name)
{
string str(file_name);
unsigned int idx;
if(!op_sort.empty()) {
str += ".sort";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
idx = (unsigned int)op_sort.size();
binary_file.write((char *)&idx, 4);
queue<string> os(op_sort);
while(!os.empty()) {
if(verbose)
cout << "sorted on " << idx << endl;
idx = os.front().size();
binary_file.write((char *)&idx, 4);
binary_file.write(os.front().data(), idx);
os.pop();
};
binary_file.close();
}
else {
str += ".sort";
remove(str.c_str());
};
str = file_name;
if(!op_presort.empty()) {
str += ".presort";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
idx = (unsigned int)op_presort.size();
binary_file.write((char *)&idx, 4);
queue<string> os(op_presort);
while(!os.empty()) {
idx = os.front().size();
binary_file.write((char *)&idx, 4);
binary_file.write(os.front().data(), idx);
os.pop();
};
binary_file.close();
}
else {
str += ".presort";
remove(str.c_str());
};
}
using namespace mgpu;
void CudaSet::Display(unsigned int limit, bool binary, bool term)
{
#define MAXCOLS 128
#define MAXFIELDSIZE 1400
//-- This should/will be converted to an array holding pointers of malloced sized structures--
char bigbuf[MAXCOLS * MAXFIELDSIZE];
memset(bigbuf, 0, MAXCOLS * MAXFIELDSIZE);
char *fields[MAXCOLS];
const char *dcolumns[MAXCOLS];
size_t mCount; // num records in play
bool print_all = 0;
string ss, str;
int rows = 0;
if(limit != 0 && limit < mRecCount)
mCount = limit;
else {
mCount = mRecCount;
print_all = 1;
};
cout << "mRecCount=" << mRecCount << " mcount = " << mCount << " term " << term << " limit=" << limit << " print_all=" << print_all << endl;
unsigned int cc =0;
unordered_map<string, FILE*> file_map;
unordered_map<string, unsigned int> len_map;
for(unsigned int i = 0; i < columnNames.size(); i++)
{
fields[cc] = &(bigbuf[cc*MAXFIELDSIZE]); // a hack to avoid malloc overheads - refine later
dcolumns[cc++] = columnNames[i].c_str();
if(string_map.find(columnNames[i]) != string_map.end()) {
auto s = string_map[columnNames[i]];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
FILE *f;
f = fopen(string_map[columnNames[i]].c_str(), "rb");
file_map[string_map[columnNames[i]]] = f;
len_map[string_map[columnNames[i]]] = len;
};
};
// The goal here is to loop fast and avoid any double handling of outgoing data - pointers are good.
if(not_compressed && prm_d.size() == 0) {
for(unsigned int i=0; i < mCount; i++) { // for each record
for(unsigned int j=0; j < columnNames.size(); j++) { // for each col
if (type[columnNames[j]] != 1) {
if(string_map.find(columnNames[j]) == string_map.end()) {
if(decimal_zeroes[columnNames[j]]) {
str = std::to_string(h_columns_int[columnNames[j]][i]);
//cout << "decimals " << columnNames[j] << " " << decimal_zeroes[columnNames[j]] << " " << h_columns_int[columnNames[j]][i] << endl;
while(str.length() <= decimal_zeroes[columnNames[j]])
str = '0' + str;
str.insert(str.length()- decimal_zeroes[columnNames[j]], ".");
sprintf(fields[j], "%s", str.c_str());
}
else {
if(!ts_cols[columnNames[j]])
sprintf(fields[j], "%lld", (h_columns_int[columnNames[j]])[i] );
else {
time_t ts = (h_columns_int[columnNames[j]][i])/1000;
auto ti = gmtime(&ts);
char buffer[30];
auto rem = (h_columns_int[columnNames[j]][i])%1000;
strftime(buffer,30,"%Y-%m-%d %H.%M.%S", ti);
//fprintf(file_pr, "%s", buffer);
//fprintf(file_pr, ".%d", rem);
sprintf(fields[j], "%s.%d", buffer,rem);
/*time_t tt = h_columns_int[columnNames[j]][i];
auto ti = localtime(&tt);
char buffer[10];
strftime(buffer,80,"%Y-%m-%d", ti);
sprintf(fields[j], "%s", buffer);
*/
};
};
}
else {
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(fields[j], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fields[j][len_map[string_map[columnNames[j]]]] ='\0'; // zero terminate string
};
}
else
sprintf(fields[j], "%.2f", (h_columns_float[columnNames[j]])[i] );
};
row_cb(mColumnCount, (char **)fields, (char **)dcolumns);
rows++;
};
}
else {
queue<string> op_vx;
for(unsigned int i = 0; i < columnNames.size(); i++)
op_vx.push(columnNames[i]);
if(prm_d.size() || source) {
allocColumns(this, op_vx);
};
unsigned int curr_seg = 0;
size_t cnt = 0;
size_t curr_count, sum_printed = 0;
resize(maxRecs);
while(sum_printed < mCount || print_all) {
if(prm_d.size() || source) { // if host arrays are empty
copyColumns(this, op_vx, curr_seg, cnt);
size_t olRecs = mRecCount;
mRecCount = olRecs;
CopyToHost(0,mRecCount);
if(sum_printed + mRecCount <= mCount || print_all)
curr_count = mRecCount;
else
curr_count = mCount - sum_printed;
}
else
curr_count = mCount;
sum_printed = sum_printed + mRecCount;
for(unsigned int i=0; i < curr_count; i++) {
for(unsigned int j=0; j < columnNames.size(); j++) {
if (type[columnNames[j]] != 1) {
if(string_map.find(columnNames[j]) == string_map.end())
sprintf(fields[j], "%lld", (h_columns_int[columnNames[j]])[i] );
else {
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(fields[j], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fields[j][len_map[string_map[columnNames[j]]]] ='\0'; // zero terminate string
};
}
else
sprintf(fields[j], "%.2f", (h_columns_float[columnNames[j]])[i] );
};
row_cb(mColumnCount, (char **)fields, (char**)dcolumns);
rows++;
};
curr_seg++;
if(curr_seg == segCount)
print_all = 0;
};
}; // end else
for(auto it = file_map.begin(); it != file_map.end(); it++)
fclose(it->second);
}
void CudaSet::Store(const string file_name, const char* sep, const unsigned int limit, const bool binary, const bool append, const bool term)
{
if (mRecCount == 0 && binary == 1 && !term) { // write tails
for(unsigned int j=0; j < columnNames.size(); j++) {
writeHeader(file_name, columnNames[j], total_segments);
};
return;
};
size_t mCount;
bool print_all = 0;
string str;
if(limit != 0 && limit < mRecCount)
mCount = limit;
else {
mCount = mRecCount;
print_all = 1;
};
if(binary == 0) {
unordered_map<string, FILE*> file_map;
unordered_map<string, unsigned int> len_map;
string bf;
unsigned int max_len = 0;
for(unsigned int j=0; j < columnNames.size(); j++) {
if(string_map.find(columnNames[j]) != string_map.end()) {
auto s = string_map[columnNames[j]];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
if(len > max_len)
max_len = len;
FILE *f;
f = fopen(string_map[columnNames[j]].c_str(), "rb");
file_map[string_map[columnNames[j]]] = f;
len_map[string_map[columnNames[j]]] = len;
};
};
bf.reserve(max_len);
FILE *file_pr;
if(!term) {
file_pr = fopen(file_name.c_str(), "w");
if (!file_pr)
cout << "Could not open file " << file_name << endl;
}
else
file_pr = stdout;
if(not_compressed && prm_d.size() == 0) {
for(unsigned int i=0; i < mCount; i++) {
for(unsigned int j=0; j < columnNames.size(); j++) {
if (type[columnNames[j]] != 1 ) {
if(string_map.find(columnNames[j]) == string_map.end()) {
if(decimal_zeroes[columnNames[j]]) {
str = std::to_string(h_columns_int[columnNames[j]][i]);
//cout << "decimals " << columnNames[j] << " " << decimal_zeroes[columnNames[j]] << " " << h_columns_int[columnNames[j]][i] << endl;
while(str.length() <= decimal_zeroes[columnNames[j]])
str = '0' + str;
str.insert(str.length()- decimal_zeroes[columnNames[j]], ".");
fprintf(file_pr, "%s", str.c_str());
}
else {
if(!ts_cols[columnNames[j]]) {
fprintf(file_pr, "%lld", (h_columns_int[columnNames[j]])[i]);
}
else {
time_t ts = (h_columns_int[columnNames[j]][i])/1000;
auto ti = gmtime(&ts);
char buffer[30];
auto rem = (h_columns_int[columnNames[j]][i])%1000;
strftime(buffer,30,"%Y-%m-%d %H.%M.%S", ti);
fprintf(file_pr, "%s", buffer);
fprintf(file_pr, ".%d", rem);
};
};
}
else {
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(&bf[0], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fprintf(file_pr, "%.*s", len_map[string_map[columnNames[j]]], bf.c_str());
};
fputs(sep, file_pr);
}
else {
fprintf(file_pr, "%.2f", (h_columns_float[columnNames[j]])[i]);
fputs(sep, file_pr);
}
};
if (i != mCount -1 )
fputs("\n",file_pr);
};
if(!term)
fclose(file_pr);
}
else {
queue<string> op_vx;
string ss;
for(unsigned int j=0; j < columnNames.size(); j++)
op_vx.push(columnNames[j]);
if(prm_d.size() || source) {
allocColumns(this, op_vx);
};
unsigned int curr_seg = 0;
size_t cnt = 0;
size_t curr_count, sum_printed = 0;
mRecCount = 0;
resize(maxRecs);
while(sum_printed < mCount || print_all) {
if(prm_d.size() || source) {
copyColumns(this, op_vx, curr_seg, cnt);
if(curr_seg == 0) {
if(limit != 0 && limit < mRecCount) {
mCount = limit;
print_all = 0;
}
else {
mCount = mRecCount;
print_all = 1;
};
};
// if host arrays are empty
size_t olRecs = mRecCount;
mRecCount = olRecs;
CopyToHost(0,mRecCount);
//cout << "start " << sum_printed << " " << mRecCount << " " << mCount << endl;
if(sum_printed + mRecCount <= mCount || print_all) {
curr_count = mRecCount;
}
else {
curr_count = mCount - sum_printed;
};
}
else {
curr_count = mCount;
};
sum_printed = sum_printed + mRecCount;
//cout << "sum printed " << sum_printed << " " << curr_count << " " << curr_seg << endl;
for(unsigned int i=0; i < curr_count; i++) {
for(unsigned int j=0; j < columnNames.size(); j++) {
if (type[columnNames[j]] != 1) {
if(string_map.find(columnNames[j]) == string_map.end()) {
if(decimal_zeroes[columnNames[j]]) {
str = std::to_string(h_columns_int[columnNames[j]][i]);
//cout << "decimals " << columnNames[j] << " " << decimal_zeroes[columnNames[j]] << " " << h_columns_int[columnNames[j]][i] << endl;
while(str.length() <= decimal_zeroes[columnNames[j]])
str = '0' + str;
str.insert(str.length()- decimal_zeroes[columnNames[j]], ".");
fprintf(file_pr, "%s", str.c_str());
}
else {
if(!ts_cols[columnNames[j]]) {
fprintf(file_pr, "%lld", (h_columns_int[columnNames[j]])[i]);
}
else {
time_t ts = (h_columns_int[columnNames[j]][i])/1000;
auto ti = gmtime(&ts);
char buffer[30];
auto rem = (h_columns_int[columnNames[j]][i])%1000;
strftime(buffer,30,"%Y-%m-%d %H.%M.%S", ti);
fprintf(file_pr, "%s", buffer);
fprintf(file_pr, ".%d", rem);
};
};
}
else {
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(&bf[0], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fprintf(file_pr, "%.*s", len_map[string_map[columnNames[j]]], bf.c_str());
};
fputs(sep, file_pr);
}
else {
fprintf(file_pr, "%.2f", (h_columns_float[columnNames[j]])[i]);
fputs(sep, file_pr);
};
};
if (i != mCount -1 && (curr_seg != segCount || i < curr_count))
fputs("\n",file_pr);
};
curr_seg++;
if(curr_seg == segCount)
print_all = 0;
};
if(!term) {
fclose(file_pr);
};
};
for(auto it = file_map.begin(); it != file_map.end(); it++)
fclose(it->second);
}
else {
//lets update the data dictionary
for(unsigned int j=0; j < columnNames.size(); j++) {
data_dict[file_name][columnNames[j]].col_type = type[columnNames[j]];
if(type[columnNames[j]] != 2) {
if(decimal[columnNames[j]])
data_dict[file_name][columnNames[j]].col_length = decimal_zeroes[columnNames[j]];
else
if (ts_cols[columnNames[j]])
data_dict[file_name][columnNames[j]].col_length = UINT_MAX;
else
data_dict[file_name][columnNames[j]].col_length = 0;
}
else
data_dict[file_name][columnNames[j]].col_length = char_size[columnNames[j]];
};
save_dict = 1;
if(text_source) { //writing a binary file using a text file as a source
compress(file_name, 0, 1, 0, mCount, append);
for(unsigned int i = 0; i< columnNames.size(); i++)
if(type[columnNames[i]] == 2)
deAllocColumnOnDevice(columnNames[i]);
}
else { //writing a binary file using a binary file as a source
fact_file_loaded = 1;
size_t offset = 0;
if(!not_compressed) { // records are compressed, for example after filter op.
//decompress to host
queue<string> op_vx;
for(unsigned int i = 0; i< columnNames.size(); i++) {
op_vx.push(columnNames[i]);
};
allocColumns(this, op_vx);
size_t oldCnt = mRecCount;
mRecCount = 0;
resize(oldCnt);
mRecCount = oldCnt;
for(unsigned int i = 0; i < segCount; i++) {
size_t cnt = 0;
copyColumns(this, op_vx, i, cnt);
CopyToHost(0, mRecCount);
offset = offset + mRecCount;
compress(file_name, 0, 0, i - (segCount-1), mRecCount, append);
};
}
else {
// now we have decompressed records on the host
//call setSegments and compress columns in every segment
segCount = (mRecCount/process_count + 1);
offset = 0;
for(unsigned int z = 0; z < segCount; z++) {
if(z < segCount-1) {
if(mRecCount < process_count) {
mCount = mRecCount;
}
else {
mCount = process_count;
}
}
else {
mCount = mRecCount - (segCount-1)*process_count;
};
compress(file_name, offset, 0, z - (segCount-1), mCount, append);
offset = offset + mCount;
};
};
};
};
}
void CudaSet::compress_char(const string file_name, const string colname, const size_t mCount, const size_t offset, const unsigned int segment)
{
unsigned int len = char_size[colname];
string h_name, i_name, file_no_seg = file_name.substr(0, file_name.find_last_of("."));
i_name = file_no_seg + "." + to_string(segment) + ".idx";
h_name = file_no_seg + "." + to_string(segment) + ".hash";
fstream b_file_str, loc_hashes;
fstream binary_file_h(h_name.c_str(),ios::out|ios::binary|ios::trunc);
binary_file_h.write((char *)&mCount, 4);
if(segment == 0) {
b_file_str.open(file_no_seg.c_str(),ios::out|ios::binary|ios::trunc);
}
else {
b_file_str.open(file_no_seg.c_str(),ios::out|ios::binary|ios::app);
};
if(h_columns_int.find(colname) == h_columns_int.end()) {
h_columns_int[colname] = thrust::host_vector<int_type >(mCount);
}
else {
if(h_columns_int[colname].size() < mCount)
h_columns_int[colname].resize(mCount);
};
if(d_columns_int.find(colname) == d_columns_int.end()) {
d_columns_int[colname] = thrust::device_vector<int_type >(mCount);
}
else {
if(d_columns_int[colname].size() < mCount)
d_columns_int[colname].resize(mCount);
};
size_t cnt;
long long int* hash_array = new long long int[mCount];
unordered_map<unsigned long long int, size_t>::iterator iter;
vector<int_type> test(mCount);
if(char_hash[colname].size() == 0 && varencoding[colname] == 'N')
char_hash[colname][0] = 0;
if(varencoding[colname] != 'N') {
for (unsigned int i = 0 ; i < mCount; i++) {
hash_array[i] = MurmurHash64A(h_columns_char[colname] + (i+offset)*len, len, hash_seed)/2;
iter = char_hash[colname].find(hash_array[i]);
if(iter == char_hash[colname].end()) {
cnt = char_hash[colname].size();
char_hash[colname][hash_array[i]] = cnt;
b_file_str.write((char *)h_columns_char[colname] + (i+offset)*len, len);
test[i] = cnt;
}
else {
test[i] = iter->second;
};
};
}
else {
auto cnt = char_hash[colname][0];
for (unsigned int i = 0 ; i < mCount; i++) {
hash_array[i] = MurmurHash64A(h_columns_char[colname] + (i+offset)*len, len, hash_seed)/2;
//cnt = char_hash[colname][0];
//char_hash[colname][0]++;
cnt++;
b_file_str.write((char *)h_columns_char[colname] + (i+offset)*len, len);
test[i] = cnt;
};
char_hash[colname][0] = cnt;
};
memcpy(h_columns_int[colname].data(), test.data(), mCount*8);
binary_file_h.write((char *)hash_array, 8*mCount);
delete [] hash_array;
thrust::device_vector<int_type> d_col(mCount);
thrust::copy(h_columns_int[colname].begin(), h_columns_int[colname].begin() + mCount, d_col.begin());
pfor_compress(thrust::raw_pointer_cast(d_col.data()), mCount*int_size, i_name, h_columns_int[colname], 0);
binary_file_h.close();
b_file_str.close();
};
bool first_time = 1;
size_t rec_sz = 0;
size_t process_piece;
bool CudaSet::LoadBigFile(FILE* file_p, thrust::device_vector<char>& d_readbuff, thrust::device_vector<char*>& dest,
thrust::device_vector<unsigned int>& ind, thrust::device_vector<unsigned int>& dest_len)
{
const char* sep = separator.c_str();
unsigned int maxx = cols.rbegin()->first;
map<unsigned int, string>::iterator it;
bool done = 0;
std::clock_t start1 = std::clock();
vector<int> types;
vector<int> cl;
types.push_back(0);
for(int i = 0; i < maxx; i++) {
auto iter = cols.find(i+1);
if(iter != cols.end()) {
types.push_back(type[iter->second]);
cl.push_back(iter->first-1);
}
else
types.push_back(0);
};
if(first_time) {
if(process_count*4 > getFreeMem()) {
process_piece = getFreeMem()/4;
}
else
process_piece = process_count;
readbuff = new char[process_piece+1];
d_readbuff.resize(process_piece+1);
cout << "set a piece to " << process_piece << " " << getFreeMem() << endl;
};
thrust::device_vector<unsigned int> ind_cnt(1);
thrust::device_vector<char> sepp(1);
sepp[0] = *sep;
long long int total_processed = 0;
size_t recs_processed = 0;
bool finished = 0;
thrust::device_vector<long long int> dev_pos;
long long int offset;
unsigned int cnt = 1;
const unsigned int max_len = 23;
while(!done) {
auto rb = fread(readbuff, 1, process_piece, file_p);
if(rb < process_piece) {
done = 1;
finished = 1;
fclose(file_p);
};
if(total_processed >= process_count)
done = 1;
thrust::fill(d_readbuff.begin(), d_readbuff.end(),0);
thrust::copy(readbuff, readbuff+rb, d_readbuff.begin());
auto curr_cnt = thrust::count(d_readbuff.begin(), d_readbuff.begin() + rb, '\n') - 1;
if(recs_processed == 0 && first_time) {
rec_sz = curr_cnt;
if(finished)
rec_sz++;
total_max = curr_cnt;
};
if(first_time) {
for(unsigned int i=0; i < columnNames.size(); i++) {
auto colname = columnNames[i];
if (type[colname] == 0) {
d_columns_int[colname].resize(d_columns_int[colname].size() + rec_sz);
h_columns_int[colname].resize(h_columns_int[colname].size() + rec_sz);
}
else
if (type[colname] == 1) {
d_columns_float[colname].resize(d_columns_float[colname].size() + rec_sz);
h_columns_float[colname].resize(h_columns_float[colname].size() + rec_sz);
}
else {
char* c = new char[cnt*rec_sz*char_size[columnNames[i]]];
if(recs_processed > 0) {
memcpy(c, h_columns_char[columnNames[i]], recs_processed*char_size[columnNames[i]]);
delete [] h_columns_char[columnNames[i]];
};
h_columns_char[columnNames[i]] = c;
if(recs_processed == 0) {
void* temp;
CUDA_SAFE_CALL(hipMalloc((void **) &temp, char_size[columnNames[i]]*rec_sz));
hipMemset(temp,0,char_size[columnNames[i]]*rec_sz);
d_columns_char[columnNames[i]] = (char*)temp;
};
};
if(recs_processed == 0) {
ind[i] = cl[i];
void* temp;
if(type[columnNames[i]] != 2) {
if(!ts_cols[columnNames[i]]) {
CUDA_SAFE_CALL(hipMalloc((void **) &temp, max_len*rec_sz));
dest_len[i] = max_len;
}
else {
CUDA_SAFE_CALL(hipMalloc((void **) &temp, 23*rec_sz));
dest_len[i] = 23;
}
}
else {
CUDA_SAFE_CALL(hipMalloc((void **) &temp, char_size[columnNames[i]]*rec_sz));
dest_len[i] = char_size[columnNames[i]];
};
dest[i] = (char*)temp;
};
};
};
for(unsigned int i=0; i < columnNames.size(); i++) {
if(type[columnNames[i]] != 2) {
hipMemset(dest[i],0,max_len*rec_sz);
}
else {
hipMemset(dest[i],0,char_size[columnNames[i]]*rec_sz);
};
};
if(dev_pos.size() < curr_cnt+1)
dev_pos.resize(curr_cnt+1); //avoiding the unnecessary allocs
dev_pos[0] = -1;
thrust::copy_if(thrust::make_counting_iterator((unsigned long long int)0), thrust::make_counting_iterator((unsigned long long int)rb-1),
d_readbuff.begin(), dev_pos.begin()+1, _1 == '\n');
if(!finished) {
if(curr_cnt < rec_sz) {
offset = (dev_pos[curr_cnt] - rb)+1;
//cout << "PATH 1 " << dev_pos[curr_cnt] << " " << offset << endl;
fseek(file_p, offset, SEEK_CUR);
total_processed = total_processed + rb + offset;
mRecCount = curr_cnt;
}
else {
offset = (dev_pos[rec_sz] - rb)+1;
//cout << "PATH 2 " << dev_pos[rec_sz] << " " << offset << endl;
fseek(file_p, offset, SEEK_CUR);
total_processed = total_processed + rb + offset;
mRecCount = rec_sz;
};
}
else {
mRecCount = curr_cnt + 1;
};
thrust::counting_iterator<unsigned int> begin(0);
ind_cnt[0] = mColumnCount;
parse_functor ff((const char*)thrust::raw_pointer_cast(d_readbuff.data()),(char**)thrust::raw_pointer_cast(dest.data()), thrust::raw_pointer_cast(ind.data()),
thrust::raw_pointer_cast(ind_cnt.data()), thrust::raw_pointer_cast(sepp.data()), thrust::raw_pointer_cast(dev_pos.data()), thrust::raw_pointer_cast(dest_len.data()));
thrust::for_each(begin, begin + mRecCount, ff);
ind_cnt[0] = max_len;
for(int i =0; i < mColumnCount; i++) {
if(type[columnNames[i]] == 0) { //int
thrust::device_ptr<char> p1((char*)dest[i]);
if(p1[4] == '-') { //date
if(!ts_cols[columnNames[i]]) {
gpu_date date_ff((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed);
thrust::for_each(begin, begin + mRecCount, date_ff);
}
else {
gpu_tdate date_ff((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed);
thrust::for_each(begin, begin + mRecCount, date_ff);
}
}
else { //int
if(decimal[columnNames[i]]) {
thrust::device_vector<unsigned int> scale(1);
scale[0] = decimal_zeroes[columnNames[i]];
gpu_atold atold((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed,
thrust::raw_pointer_cast(ind_cnt.data()), thrust::raw_pointer_cast(scale.data()));
thrust::for_each(begin, begin + mRecCount, atold);
}
else {
gpu_atoll atoll_ff((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed,
thrust::raw_pointer_cast(ind_cnt.data()));
thrust::for_each(begin, begin + mRecCount, atoll_ff);
};
};
thrust::copy(d_columns_int[columnNames[i]].begin() + recs_processed, d_columns_int[columnNames[i]].begin()+recs_processed+mRecCount, h_columns_int[columnNames[i]].begin() + recs_processed);
}
else
if(type[columnNames[i]] == 1) {
gpu_atof atof_ff((const char*)dest[i],(double*)thrust::raw_pointer_cast(d_columns_float[columnNames[i]].data()) + recs_processed,
thrust::raw_pointer_cast(ind_cnt.data()));
thrust::for_each(begin, begin + mRecCount, atof_ff);
thrust::copy(d_columns_float[columnNames[i]].begin() + recs_processed, d_columns_float[columnNames[i]].begin()+recs_processed+mRecCount, h_columns_float[columnNames[i]].begin() + recs_processed);
}
else {//char is already done
thrust::device_ptr<char> p1((char*)dest[i]);
hipMemcpy( h_columns_char[columnNames[i]] + char_size[columnNames[i]]*recs_processed, (void *)dest[i] , char_size[columnNames[i]]*mRecCount, hipMemcpyDeviceToHost);
};
};
recs_processed = recs_processed + mRecCount;
cnt++;
};
if(finished) {
for(int i =0; i < mColumnCount; i++) {
if(dest[i]) {
hipFree(dest[i]);
dest[i] = nullptr;
};
};
delete [] readbuff;
};
cout << "processed recs " << recs_processed << " " << getFreeMem() << endl;
first_time = 0;
mRecCount = recs_processed;
return finished;
};
void CudaSet::free() {
for(unsigned int i = 0; i < columnNames.size(); i++ ) {
if(type[columnNames[i]] == 0 && h_columns_int[columnNames[i]].size() ) {
h_columns_int[columnNames[i]].resize(0);
h_columns_int[columnNames[i]].shrink_to_fit();
}
else {
h_columns_float[columnNames[i]].resize(0);
h_columns_float[columnNames[i]].shrink_to_fit();
};
};
if(prm_d.size()) {
prm_d.resize(0);
prm_d.shrink_to_fit();
};
deAllocOnDevice();
};
void alloc_pool(unsigned int maxRecs) {
void* temp;
CUDA_SAFE_CALL(hipMalloc((void **) &temp, 8*maxRecs));
alloced_mem.push_back(temp);
};
bool* CudaSet::logical_and(bool* column1, bool* column2)
{
thrust::device_ptr<bool> dev_ptr1(column1);
thrust::device_ptr<bool> dev_ptr2(column2);
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, dev_ptr1, thrust::logical_and<bool>());
thrust::device_free(dev_ptr2);
return column1;
}
bool* CudaSet::logical_or(bool* column1, bool* column2)
{
thrust::device_ptr<bool> dev_ptr1(column1);
thrust::device_ptr<bool> dev_ptr2(column2);
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, dev_ptr1, thrust::logical_or<bool>());
thrust::device_free(dev_ptr2);
return column1;
}
bool* CudaSet::compare(int_type s, int_type d, int_type op_type)
{
bool res;
if (op_type == 2) // >
if(d>s)
res = 1;
else
res = 0;
else
if (op_type == 1) // <
if(d<s)
res = 1;
else
res = 0;
else
if (op_type == 6) // >=
if(d>=s)
res = 1;
else
res = 0;
else
if (op_type == 5) // <=
if(d<=s)
res = 1;
else
res = 0;
else
if (op_type == 4)// =
if(d==s)
res = 1;
else
res = 0;
else // !=
if(d!=s)
res = 1;
else
res = 0;
thrust::device_ptr<bool> p = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(p, p+mRecCount,res,(bool)0);
return thrust::raw_pointer_cast(p);
};
bool* CudaSet::compare(float_type s, float_type d, int_type op_type)
{
bool res;
if (op_type == 2) // >
if ((d-s) > EPSILON)
res = 1;
else
res = 0;
else
if (op_type == 1) // <
if ((s-d) > EPSILON)
res = 1;
else
res = 0;
else
if (op_type == 6) // >=
if (((d-s) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON)))
res = 1;
else
res = 0;
else
if (op_type == 5) // <=
if (((s-d) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON)))
res = 1;
else
res = 0;
else
if (op_type == 4)// =
if (((d-s) < EPSILON) && ((d-s) > -EPSILON))
res = 1;
else
res = 0;
else // !=
if (!(((d-s) < EPSILON) && ((d-s) > -EPSILON)))
res = 1;
else
res = 0;
thrust::device_ptr<bool> p = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(p, p+mRecCount,res,(bool)0);
return thrust::raw_pointer_cast(p);
}
bool* CudaSet::compare(float_type* column1, float_type d, int_type op_type)
{
thrust::device_ptr<bool> res = thrust::device_malloc<bool>(mRecCount);
thrust::device_ptr<float_type> dev_ptr(column1);
if (op_type == 2) // >
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_greater());
else
if (op_type == 1) // <
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_less());
else
if (op_type == 6) // >=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_greater_equal_to());
else
if (op_type == 5) // <=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_less_equal());
else
if (op_type == 4)// =
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_equal_to());
else // !=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_not_equal_to());
return thrust::raw_pointer_cast(res);
}
bool* CudaSet::compare(int_type* column1, int_type d, int_type op_type, unsigned int p1, unsigned int p2)
{
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
thrust::device_ptr<int_type> dev_ptr(column1);
if(p2)
d = d*(int_type)pow(10, p2);
if (op_type == 2) // >
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::greater<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::greater<int_type>());
else
if (op_type == 1) // <
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::less<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::less<int_type>());
else
if (op_type == 6) // >=
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::greater_equal<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::greater_equal<int_type>());
else
if (op_type == 5) // <=
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::less_equal<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::less_equal<int_type>());
else
if (op_type == 4)// =
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::equal_to<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::equal_to<int_type>());
else // !=
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::not_equal_to<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::not_equal_to<int_type>());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(int_type* column1, int_type* column2, int_type op_type, unsigned int p1, unsigned int p2)
{
thrust::device_ptr<int_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr2(column2);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
if (op_type == 2) // >
if(!p1 && !p2) {
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::greater<int_type>());
}
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::greater<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater<int_type>());
else
if (op_type == 1) // <
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::less<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::less<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less<int_type>());
else
if (op_type == 6) // >=
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::greater_equal<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater_equal<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::greater_equal<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater_equal<int_type>());
else
if (op_type == 5) // <=
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::less_equal<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less_equal<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::less_equal<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less_equal<int_type>());
else
if (op_type == 4)// =
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::equal_to<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::equal_to<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::equal_to<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::equal_to<int_type>());
else // !=
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::not_equal_to<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::not_equal_to<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::not_equal_to<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::not_equal_to<int_type>());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, float_type* column2, int_type op_type)
{
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<float_type> dev_ptr2(column2);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater());
else
if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less());
else
if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater_equal_to());
else
if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less_equal());
else
if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_equal_to());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_not_equal_to());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, int_type* column2, int_type op_type)
{
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr(column2);
thrust::device_ptr<float_type> dev_ptr2 = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
thrust::transform(dev_ptr, dev_ptr + mRecCount, dev_ptr2, long_to_float_type());
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater());
else
if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less());
else
if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater_equal_to());
else
if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less_equal());
else
if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_equal_to());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_not_equal_to());
thrust::device_free(dev_ptr2);
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(int_type* column1, float_type* column2, string op_type, bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::device_ptr<int_type> dev_ptr(column1);
thrust::transform(dev_ptr, dev_ptr + mRecCount, temp, long_to_float_type()); // in-place transformation
thrust::device_ptr<float_type> dev_ptr1(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
int_type* CudaSet::op(int_type* column1, int_type d, string op_type, bool reverse, unsigned int p1, unsigned int p2)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
//cout << "OP " << d << " " << op_type << " " << p1 << " " << p2 << endl;
thrust::device_ptr<int_type> temp((int_type*)alloced_mem.back());
thrust::device_ptr<int_type> dev_ptr1(column1);
unsigned int d1 = d;
if(p2)
d = d*(unsigned int)pow(10, p2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d1), temp, thrust::multiplies<int_type>());
}
else
if (op_type.compare("ADD") == 0) {
if(!p1)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d*(unsigned int)pow(10, p2)), temp, thrust::plus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::plus<int_type>());
}
else
if (op_type.compare("MINUS") == 0) {
if(!p1)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d*(unsigned int)pow(10, p2)), temp, thrust::minus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::minus<int_type>());
}
else {
if(!p1)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d*(unsigned int)pow(10, p2)), temp, thrust::divides<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::divides<int_type>());
}
}
else {
if (op_type.compare("MUL") == 0) {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::multiplies<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::multiplies<int_type>());
}
else
if (op_type.compare("ADD") == 0) {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::plus<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::plus<int_type>());
}
else
if (op_type.compare("MINUS") == 0) {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::minus<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::minus<int_type>());
}
else {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::divides<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::divides<int_type>());
};
};
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
int_type* CudaSet::op(int_type* column1, int_type* column2, string op_type, bool reverse, unsigned int p1, unsigned int p2)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<int_type> temp((int_type*)alloced_mem.back());
thrust::device_ptr<int_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr2(column2);
//cout << "OP " << op_type << " " << p1 << " " << p2 << " " << reverse << endl;
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::multiplies<int_type>());
}
else
if (op_type.compare("ADD") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::plus<int_type>());
else
if(p1 && p2) {
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::plus<int_type>());
}
else
if (p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::plus<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::plus<int_type>());
}
else
if (op_type.compare("MINUS") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::minus<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::minus<int_type>());
else
if (p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::minus<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::minus<int_type>());
}
else {
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::divides<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::divides<int_type>());
else
if (p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::divides<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::divides<int_type>());
}
}
else {
if (op_type.compare("MUL") == 0) {
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::multiplies<int_type>());
}
else
if (op_type.compare("ADD") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::plus<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::plus<int_type>());
else
if (p1)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::plus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), dev_ptr1, temp, thrust::plus<int_type>());
}
else
if (op_type.compare("MINUS") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::minus<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::minus<int_type>());
else
if (p1)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::minus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), dev_ptr1, temp, thrust::minus<int_type>());
}
else {
if(!p1 && !p2)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::divides<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::divides<int_type>());
else
if (p1)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::divides<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), dev_ptr1, temp, thrust::divides<int_type>());
}
}
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(float_type* column1, float_type* column2, string op_type, bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<float_type> dev_ptr2(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(int_type* column1, float_type d, string op_type, bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::fill(temp, temp+mRecCount, d);
thrust::device_ptr<int_type> dev_ptr(column1);
thrust::device_ptr<float_type> dev_ptr1 = thrust::device_malloc<float_type>(mRecCount);
thrust::transform(dev_ptr, dev_ptr + mRecCount, dev_ptr1, long_to_float_type());
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
thrust::device_free(dev_ptr1);
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(float_type* column1, float_type d, string op_type,bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::device_ptr<float_type> dev_ptr1(column1);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
alloced_mem.pop_back();
return (float_type*)thrust::raw_pointer_cast(temp);
}
char CudaSet::loadIndex(const string index_name, const unsigned int segment)
{
FILE* f;
unsigned int bits_encoded, fit_count, vals_count, sz, real_count;
void* d_str;
string f1 = index_name + "." + to_string(segment);
char res;
//interactive = 0;
if(interactive) {
if(index_buffers.find(f1) == index_buffers.end()) {
f = fopen (f1.c_str(), "rb" );
fseek(f, 0, SEEK_END);
long fileSize = ftell(f);
char* buff;
hipHostMalloc(&buff, fileSize, hipHostMallocDefault);
fseek(f, 0, SEEK_SET);
fread(buff, fileSize, 1, f);
fclose(f);
index_buffers[f1] = buff;
};
sz = ((unsigned int*)index_buffers[f1])[0];
idx_dictionary_int[index_name].clear();
for(unsigned int i = 0; i < sz; i++) {
idx_dictionary_int[index_name][((int_type*)(index_buffers[f1]+4+8*i))[0]] = i;
};
vals_count = ((unsigned int*)(index_buffers[f1]+4 +8*sz))[2];
real_count = ((unsigned int*)(index_buffers[f1]+4 +8*sz))[3];
mRecCount = real_count;
if(idx_vals.count(index_name) == 0) {
hipMalloc((void **) &d_str, (vals_count+2)*int_size);
hipMemcpy( d_str, (void *) &((index_buffers[f1]+4 +8*sz)[0]), (vals_count+2)*int_size, hipMemcpyHostToDevice);
idx_vals[index_name] = (unsigned long long int*)d_str;
};
}
else {
f = fopen (f1.c_str(), "rb" );
fread(&sz, 4, 1, f);
int_type* d_array = new int_type[sz];
idx_dictionary_int[index_name].clear();
fread((void*)d_array, sz*int_size, 1, f);
for(unsigned int i = 0; i < sz; i++) {
idx_dictionary_int[index_name][d_array[i]] = i;
};
delete [] d_array;
fread(&fit_count, 4, 1, f);
fread(&bits_encoded, 4, 1, f);
fread(&vals_count, 4, 1, f);
fread(&real_count, 4, 1, f);
mRecCount = real_count;
unsigned long long int* int_array = new unsigned long long int[vals_count+2];
fseek ( f , -16 , SEEK_CUR );
fread((void*)int_array, 1, vals_count*8 + 16, f);
fread(&res, 1, 1, f);
fclose(f);
void* d_str;
hipMalloc((void **) &d_str, (vals_count+2)*int_size);
hipMemcpy( d_str, (void *) int_array, (vals_count+2)*int_size, hipMemcpyHostToDevice);
if(idx_vals.count(index_name))
hipFree(idx_vals[index_name]);
idx_vals[index_name] = (unsigned long long int*)d_str;
}
return res;
}
void CudaSet::initialize(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs, string file_name) // compressed data for DIM tables
{
mColumnCount = (unsigned int)nameRef.size();
FILE* f;
string f1;
unsigned int cnt;
char buffer[4000];
string str;
not_compressed = 0;
mRecCount = Recs;
hostRecCount = Recs;
totalRecs = Recs;
load_file_name = file_name;
f1 = file_name + ".sort";
f = fopen (f1.c_str() , "rb" );
if(f) {
unsigned int sz, idx;
fread((char *)&sz, 4, 1, f);
for(unsigned int j = 0; j < sz; j++) {
fread((char *)&idx, 4, 1, f);
fread(buffer, idx, 1, f);
str.assign(buffer, idx);
sorted_fields.push(str);
if(verbose)
cout << "segment sorted on " << str << endl;
};
fclose(f);
};
f1 = file_name + ".presort";
f = fopen (f1.c_str() , "rb" );
if(f) {
unsigned int sz, idx;
fread((char *)&sz, 4, 1, f);
for(unsigned int j = 0; j < sz; j++) {
fread((char *)&idx, 4, 1, f);
fread(buffer, idx, 1, f);
str.assign(buffer, idx);
presorted_fields.push(str);
if(verbose)
cout << "presorted on " << str << endl;
};
fclose(f);
};
tmp_table = 0;
filtered = 0;
for(unsigned int i=0; i < mColumnCount; i++) {
//f1 = file_name + "." + nameRef.front() + ".0";
//f = fopen (f1.c_str() , "rb" );
//fread((char *)&bytes, 4, 1, f); //need to read metadata such as type and length
//fclose(f);
columnNames.push_back(nameRef.front());
cols[colsRef.front()] = nameRef.front();
if (((typeRef.front()).compare("decimal") == 0) || ((typeRef.front()).compare("int") == 0)) {
f1 = file_name + "." + nameRef.front() + ".0";
f = fopen (f1.c_str() , "rb" );
if(!f) {
cout << "Couldn't find field " << nameRef.front() << endl;
exit(0);
};
for(unsigned int j = 0; j < 6; j++)
fread((char *)&cnt, 4, 1, f);
fclose(f);
compTypes[nameRef.front()] = cnt;
};
if((typeRef.front()).compare("timestamp") == 0)
ts_cols[nameRef.front()] = 1;
else
ts_cols[nameRef.front()] = 0;
if ((typeRef.front()).compare("int") == 0 || (typeRef.front()).compare("timestamp") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 0;
decimal_zeroes[nameRef.front()] = 0;
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else
if ((typeRef.front()).compare("float") == 0) {
type[nameRef.front()] = 1;
decimal[nameRef.front()] = 0;
h_columns_float[nameRef.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
d_columns_float[nameRef.front()] = thrust::device_vector<float_type >();
}
else
if ((typeRef.front()).compare("decimal") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 1;
decimal_zeroes[nameRef.front()] = sizeRef.front();
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else {
type[nameRef.front()] = 2;
decimal[nameRef.front()] = 0;
h_columns_char[nameRef.front()] = nullptr;
d_columns_char[nameRef.front()] = nullptr;
char_size[nameRef.front()] = sizeRef.front();
string_map[nameRef.front()] = file_name + "." + nameRef.front();
};
nameRef.pop();
typeRef.pop();
sizeRef.pop();
colsRef.pop();
};
};
void CudaSet::initialize(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs)
{
mColumnCount = (unsigned int)nameRef.size();
tmp_table = 0;
filtered = 0;
mRecCount = 0;
hostRecCount = Recs;
segCount = 0;
for(unsigned int i=0; i < mColumnCount; i++) {
columnNames.push_back(nameRef.front());
cols[colsRef.front()] = nameRef.front();
if((typeRef.front()).compare("timestamp") == 0)
ts_cols[nameRef.front()] = 1;
else
ts_cols[nameRef.front()] = 0;
if ((typeRef.front()).compare("int") == 0 || (typeRef.front()).compare("timestamp") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 0;
decimal_zeroes[nameRef.front()] = 0;
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else
if ((typeRef.front()).compare("float") == 0) {
type[nameRef.front()] = 1;
decimal[nameRef.front()] = 0;
h_columns_float[nameRef.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
d_columns_float[nameRef.front()] = thrust::device_vector<float_type>();
}
else
if ((typeRef.front()).compare("decimal") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 1;
decimal_zeroes[nameRef.front()] = sizeRef.front();
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else {
type[nameRef.front()] = 2;
decimal[nameRef.front()] = 0;
h_columns_char[nameRef.front()] = nullptr;
d_columns_char[nameRef.front()] = nullptr;
char_size[nameRef.front()] = sizeRef.front();
};
nameRef.pop();
typeRef.pop();
sizeRef.pop();
colsRef.pop();
};
};
void CudaSet::initialize(const size_t RecordCount, const unsigned int ColumnCount)
{
mRecCount = RecordCount;
hostRecCount = RecordCount;
mColumnCount = ColumnCount;
filtered = 0;
};
void CudaSet::initialize(queue<string> op_sel, const queue<string> op_sel_as)
{
mRecCount = 0;
mColumnCount = (unsigned int)op_sel.size();
segCount = 1;
not_compressed = 1;
filtered = 0;
col_aliases = op_sel_as;
unsigned int i = 0;
CudaSet *a;
while(!op_sel.empty()) {
for(auto it = varNames.begin(); it != varNames.end(); it++) {
a = it->second;
if(std::find(a->columnNames.begin(), a->columnNames.end(), op_sel.front()) != a->columnNames.end())
break;
};
type[op_sel.front()] = a->type[op_sel.front()];
cols[i] = op_sel.front();
decimal[op_sel.front()] = a->decimal[op_sel.front()];
decimal_zeroes[op_sel.front()] = a->decimal_zeroes[op_sel.front()];
columnNames.push_back(op_sel.front());
if (a->type[op_sel.front()] == 0) {
d_columns_int[op_sel.front()] = thrust::device_vector<int_type>();
//h_columns_int[op_sel.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
h_columns_int[op_sel.front()] = thrust::host_vector<int_type>();
}
else
if (a->type[op_sel.front()] == 1) {
d_columns_float[op_sel.front()] = thrust::device_vector<float_type>();
//h_columns_float[op_sel.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
h_columns_float[op_sel.front()] = thrust::host_vector<float_type>();
}
else {
h_columns_char[op_sel.front()] = nullptr;
d_columns_char[op_sel.front()] = nullptr;
char_size[op_sel.front()] = a->char_size[op_sel.front()];
};
i++;
op_sel.pop();
};
}
void CudaSet::initialize(CudaSet* a, CudaSet* b, queue<string> op_sel, queue<string> op_sel_as)
{
mRecCount = 0;
mColumnCount = 0;
queue<string> q_cnt(op_sel);
unsigned int i = 0;
set<string> field_names;
while(!q_cnt.empty()) {
if( std::find(a->columnNames.begin(), a->columnNames.end(), q_cnt.front()) != a->columnNames.end() ||
std::find(b->columnNames.begin(), b->columnNames.end(), q_cnt.front()) != b->columnNames.end()) {
field_names.insert(q_cnt.front());
};
q_cnt.pop();
}
mColumnCount = (unsigned int)field_names.size();
maxRecs = b->maxRecs;
segCount = 1;
filtered = 0;
not_compressed = 1;
col_aliases = op_sel_as;
i = 0;
while(!op_sel.empty()) {
if(std::find(columnNames.begin(), columnNames.end(), op_sel.front()) == columnNames.end()) {
if(std::find(a->columnNames.begin(), a->columnNames.end(), op_sel.front()) != a->columnNames.end()) {
cols[i] = op_sel.front();
decimal[op_sel.front()] = a->decimal[op_sel.front()];
columnNames.push_back(op_sel.front());
type[op_sel.front()] = a->type[op_sel.front()];
ts_cols[op_sel.front()] = a->ts_cols[op_sel.front()];
if (a->type[op_sel.front()] == 0) {
d_columns_int[op_sel.front()] = thrust::device_vector<int_type>();
h_columns_int[op_sel.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
if(a->string_map.find(op_sel.front()) != a->string_map.end()) {
string_map[op_sel.front()] = a->string_map[op_sel.front()];
};
decimal[op_sel.front()] = a->decimal[op_sel.front()];
decimal_zeroes[op_sel.front()] = a->decimal_zeroes[op_sel.front()];
}
else
if (a->type[op_sel.front()] == 1) {
d_columns_float[op_sel.front()] = thrust::device_vector<float_type>();
h_columns_float[op_sel.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
}
else {
h_columns_char[op_sel.front()] = nullptr;
d_columns_char[op_sel.front()] = nullptr;
char_size[op_sel.front()] = a->char_size[op_sel.front()];
string_map[op_sel.front()] = a->string_map[op_sel.front()];
};
i++;
}
else
if(std::find(b->columnNames.begin(), b->columnNames.end(), op_sel.front()) != b->columnNames.end()) {
columnNames.push_back(op_sel.front());
cols[i] = op_sel.front();
decimal[op_sel.front()] = b->decimal[op_sel.front()];
type[op_sel.front()] = b->type[op_sel.front()];
ts_cols[op_sel.front()] = b->ts_cols[op_sel.front()];
if (b->type[op_sel.front()] == 0) {
d_columns_int[op_sel.front()] = thrust::device_vector<int_type>();
h_columns_int[op_sel.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
if(b->string_map.find(op_sel.front()) != b->string_map.end()) {
string_map[op_sel.front()] = b->string_map[op_sel.front()];
};
decimal[op_sel.front()] = b->decimal[op_sel.front()];
decimal_zeroes[op_sel.front()] = b->decimal_zeroes[op_sel.front()];
}
else
if (b->type[op_sel.front()] == 1) {
d_columns_float[op_sel.front()] = thrust::device_vector<float_type>();
h_columns_float[op_sel.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
}
else {
h_columns_char[op_sel.front()] = nullptr;
d_columns_char[op_sel.front()] = nullptr;
char_size[op_sel.front()] = b->char_size[op_sel.front()];
string_map[op_sel.front()] = b->string_map[op_sel.front()];
};
i++;
}
}
op_sel.pop();
};
};
int_type reverse_op(int_type op_type)
{
if (op_type == 2) // >
return 1;
else
if (op_type == 1) // <
return 2;
else
if (op_type == 6) // >=
return 5;
else
if (op_type == 5) // <=
return 6;
else
return op_type;
}
size_t getFreeMem()
{
size_t available, total;
hipMemGetInfo(&available, &total);
return available;
} ;
void allocColumns(CudaSet* a, queue<string> fields)
{
if(a->filtered) {
CudaSet* t;
if(a->filtered)
t = varNames[a->source_name];
else
t = a;
if(int_size*t->maxRecs > alloced_sz) {
if(alloced_sz) {
hipFree(alloced_tmp);
};
hipMalloc((void **) &alloced_tmp, int_size*t->maxRecs);
alloced_sz = int_size*t->maxRecs;
}
}
else {
while(!fields.empty()) {
if(var_exists(a, fields.front()) && !a->onDevice(fields.front())) {
a->allocColumnOnDevice(fields.front(), a->maxRecs);
}
fields.pop();
};
};
}
void gatherColumns(CudaSet* a, CudaSet* t, string field, unsigned int segment, size_t& count)
{
if(!a->onDevice(field)) {
a->allocColumnOnDevice(field, a->maxRecs);
};
if(a->prm_index == 'R') {
mygather(field, a, t, count, a->mRecCount);
}
else {
mycopy(field, a, t, count, t->mRecCount);
a->mRecCount = t->mRecCount;
};
}
void copyFinalize(CudaSet* a, queue<string> fields, bool ts)
{
set<string> uniques;
if(scratch.size() < a->mRecCount*8)
scratch.resize(a->mRecCount*8);
thrust::device_ptr<int_type> tmp((int_type*)thrust::raw_pointer_cast(scratch.data()));
while(!fields.empty()) {
if (uniques.count(fields.front()) == 0 && var_exists(a, fields.front()) && cpy_bits.find(fields.front()) != cpy_bits.end() && (!a->ts_cols[fields.front()] || ts)) {
if(cpy_bits[fields.front()] == 8) {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<unsigned char> src((unsigned char*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned char>());
}
else {
thrust::device_ptr<unsigned char> src((unsigned char*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned char>());
};
}
else
if(cpy_bits[fields.front()] == 16) {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned short>());
}
else {
thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned short>());
};
}
else
if(cpy_bits[fields.front()] == 32) {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned int>());
}
else {
thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned int>());
};
}
else {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<int_type> src((int_type*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::copy(src, src+a->mRecCount, tmp);
}
else {
thrust::device_ptr<int_type> src((int_type*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::copy(src, src+a->mRecCount, tmp);
};
};
thrust::constant_iterator<int_type> iter(cpy_init_val[fields.front()]);
if(a->type[fields.front()] != 1) {
thrust::transform(tmp, tmp + a->mRecCount, iter, a->d_columns_int[fields.front()].begin(), thrust::plus<int_type>());
}
else {
thrust::device_ptr<int_type> dest((int_type*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(tmp, tmp + a->mRecCount, iter, dest, thrust::plus<int_type>());
thrust::transform(dest, dest+a->mRecCount, a->d_columns_float[fields.front()].begin(), long_to_float());
};
};
uniques.insert(fields.front());
fields.pop();
};
}
void copyColumns(CudaSet* a, queue<string> fields, unsigned int segment, size_t& count, bool rsz, bool flt)
{
//std::clock_t start1 = std::clock();
set<string> uniques;
if(a->filtered) { //filter the segment
if(flt) {
filter_op(a->fil_s, a->fil_f, segment);
};
if(rsz && a->mRecCount) {
queue<string> fields1(fields);
while(!fields1.empty()) {
a->resizeDeviceColumn(a->devRecCount + a->mRecCount, fields1.front());
fields1.pop();
};
a->devRecCount = a->devRecCount + a->mRecCount;
};
};
cpy_bits.clear();
cpy_init_val.clear();
auto f(fields);
while(!fields.empty()) {
if (uniques.count(fields.front()) == 0 && var_exists(a, fields.front())) {
if(a->filtered) {
if(a->mRecCount) {
CudaSet *t = varNames[a->source_name];
alloced_switch = 1;
t->CopyColumnToGpu(fields.front(), segment);
gatherColumns(a, t, fields.front(), segment, count);
alloced_switch = 0;
};
}
else {
if(a->mRecCount) {
a->CopyColumnToGpu(fields.front(), segment, count);
};
};
uniques.insert(fields.front());
};
fields.pop();
};
//std::cout<< "copy time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) <<'\n';
}
void mygather(string colname, CudaSet* a, CudaSet* t, size_t offset, size_t g_size)
{
if(t->type[colname] != 1 ) {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 16) {
thrust::device_ptr<unsigned short int> d_col_source((unsigned short int*)alloced_tmp);
thrust::device_ptr<unsigned short int> d_col_dest((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_int[colname].begin() + offset);
};
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_int[colname].begin() + offset);
};
}
else {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 16) {
thrust::device_ptr<unsigned short int> d_col_source((unsigned short int*)alloced_tmp);
thrust::device_ptr<unsigned short int> d_col_dest((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_float[colname].begin() + offset);
};
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_float[colname].begin() + offset);
};
}
};
void mycopy(string colname, CudaSet* a, CudaSet* t, size_t offset, size_t g_size)
{
if(t->type[colname] != 1) {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 16) {
thrust::device_ptr<short int> d_col_source((short int*)alloced_tmp);
thrust::device_ptr<short int> d_col_dest((short int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()+offset));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col_source((int_type*)alloced_tmp);
thrust::copy(d_col_source, d_col_source + g_size, a->d_columns_int[colname].begin() + offset);
};
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::copy(d_col, d_col + g_size, a->d_columns_int[colname].begin() + offset);
};
}
else {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 16) {
thrust::device_ptr<short int> d_col_source((short int*)alloced_tmp);
thrust::device_ptr<short int> d_col_dest((short int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()+offset));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col_source((int_type*)alloced_tmp);
thrust::copy(d_col_source, d_col_source + g_size, a->d_columns_float[colname].begin() + offset);
};
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::copy(d_col, d_col + g_size, a->d_columns_float[colname].begin() + offset);
};
};
};
size_t load_queue(queue<string> c1, CudaSet* right, string f2, size_t &rcount,
unsigned int start_segment, unsigned int end_segment, bool rsz, bool flt)
{
queue<string> cc;
while(!c1.empty()) {
if(std::find(right->columnNames.begin(), right->columnNames.end(), c1.front()) != right->columnNames.end()) {
if(f2 != c1.front() ) {
cc.push(c1.front());
};
};
c1.pop();
};
if(std::find(right->columnNames.begin(), right->columnNames.end(), f2) != right->columnNames.end()) {
cc.push(f2);
};
if(right->filtered) {
allocColumns(right, cc);
};
rcount = right->maxRecs;
queue<string> ct(cc);
while(!ct.empty()) {
if(right->filtered && rsz) {
right->mRecCount = 0;
}
else {
right->allocColumnOnDevice(ct.front(), rcount*right->segCount);
};
ct.pop();
};
size_t cnt_r = 0;
right->devRecCount = 0;
for(unsigned int i = start_segment; i < end_segment; i++) {
if(!right->filtered)
copyColumns(right, cc, i, cnt_r, rsz, 0);
else
copyColumns(right, cc, i, cnt_r, rsz, flt);
cnt_r = cnt_r + right->mRecCount;
};
right->mRecCount = cnt_r;
return cnt_r;
}
size_t max_char(CudaSet* a)
{
size_t max_char1 = 8;
for(unsigned int i = 0; i < a->columnNames.size(); i++) {
if(a->type[a->columnNames[i]] == 2) {
if (a->char_size[a->columnNames[i]] > max_char1)
max_char1 = a->char_size[a->columnNames[i]];
}
else
if(a->type[a->columnNames[i]] == 0 && a->string_map.find(a->columnNames[i]) != a->string_map.end()) {
auto s = a->string_map[a->columnNames[i]];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
if (len > max_char1)
max_char1 = len;
};
};
return max_char1;
};
size_t max_char(CudaSet* a, queue<string> field_names)
{
size_t max_char = 8;
while (!field_names.empty()) {
if (a->type[field_names.front()] == 2) {
if (a->char_size[field_names.front()] > max_char)
max_char = a->char_size[field_names.front()];
};
field_names.pop();
};
return max_char;
};
void setSegments(CudaSet* a, queue<string> cols)
{
size_t mem_available = getFreeMem();
size_t tot_sz = 0;
while(!cols.empty()) {
if(a->type[cols.front()] != 2)
tot_sz = tot_sz + int_size;
else
tot_sz = tot_sz + a->char_size[cols.front()];
cols.pop();
};
if(a->mRecCount*tot_sz > mem_available/3) { //default is 3
a->segCount = (a->mRecCount*tot_sz)/(mem_available/5) + 1;
a->maxRecs = (a->mRecCount/a->segCount)+1;
};
};
void update_permutation_char_host(char* key, unsigned int* permutation, size_t RecCount, string SortType, char* tmp, unsigned int len)
{
str_gather_host(permutation, RecCount, (void*)key, (void*)tmp, len);
if (SortType.compare("DESC") == 0 )
str_sort_host(tmp, RecCount, permutation, 1, len);
else
str_sort_host(tmp, RecCount, permutation, 0, len);
}
void apply_permutation_char(char* key, unsigned int* permutation, size_t RecCount, char* tmp, unsigned int len)
{
// copy keys to temporary vector
hipMemcpy( (void*)tmp, (void*) key, RecCount*len, hipMemcpyDeviceToDevice);
// permute the keys
str_gather((void*)permutation, RecCount, (void*)tmp, (void*)key, len);
}
void apply_permutation_char_host(char* key, unsigned int* permutation, size_t RecCount, char* res, unsigned int len)
{
str_gather_host(permutation, RecCount, (void*)key, (void*)res, len);
}
void filter_op(const char *s, const char *f, unsigned int segment)
{
CudaSet *a, *b;
a = varNames.find(f)->second;
a->name = f;
//std::clock_t start1 = std::clock();
if(a->mRecCount == 0 && !a->filtered) {
b = new CudaSet(0,1);
}
else {
if(verbose)
cout << "FILTER " << s << " " << f << " " << getFreeMem() << '\xd';
b = varNames[s];
b->name = s;
b->string_map = a->string_map;
size_t cnt = 0;
b->sorted_fields = a->sorted_fields;
b->ts_cols = a->ts_cols;
allocColumns(a, b->fil_value);
if (b->prm_d.size() == 0) {
b->prm_d.resize(a->maxRecs);
};
cout << endl << "MAP CHECK start " << segment << endl;
char map_check = zone_map_check(b->fil_type,b->fil_value,b->fil_nums, b->fil_nums_f, b->fil_nums_precision, a, segment);
cout << endl << "MAP CHECK segment " << segment << " " << map_check << endl;
if(map_check == 'R') {
auto old_ph = phase_copy;
phase_copy = 0;
copyColumns(a, b->fil_value, segment, cnt);
phase_copy = old_ph;
bool* res = filter(b->fil_type,b->fil_value,b->fil_nums, b->fil_nums_f, b->fil_nums_precision, a, segment);
thrust::device_ptr<bool> bp((bool*)res);
b->prm_index = 'R';
b->mRecCount = thrust::count(bp, bp + (unsigned int)a->mRecCount, 1);
thrust::copy_if(thrust::make_counting_iterator((unsigned int)0), thrust::make_counting_iterator((unsigned int)a->mRecCount),
bp, b->prm_d.begin(), thrust::identity<bool>());
hipFree(res);
}
else {
b->prm_index = map_check;
if(map_check == 'A')
b->mRecCount = a->mRecCount;
else
b->mRecCount = 0;
};
if(segment == a->segCount-1)
a->deAllocOnDevice();
}
if(verbose)
cout << endl << "filter result " << b->mRecCount << endl;
}
size_t load_right(CudaSet* right, string f2, queue<string> op_g, queue<string> op_alt, size_t& rcount, unsigned int start_seg, unsigned int end_seg) {
size_t cnt_r = 0;
//if join is on strings then add integer columns to left and right tables and modify colInd1 and colInd2
// need to allocate all right columns
if(right->not_compressed) {
queue<string> op_alt1;
op_alt1.push(f2);
cnt_r = load_queue(op_alt1, right, "", rcount, start_seg, end_seg, 1, 1);
queue<string> op_alt2;
while(!op_alt.empty()) {
if(f2.compare(op_alt.front())) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), op_alt.front()) != right->columnNames.end()) {
op_alt2.push(op_alt.front());
};
};
op_alt.pop();
};
if(!op_alt2.empty())
cnt_r = load_queue(op_alt2, right, "", rcount, start_seg, end_seg, 0, 0);
}
else {
cnt_r = load_queue(op_alt, right, f2, rcount, start_seg, end_seg, 1, 1);
};
return cnt_r;
};
void insert_records(const char* f, const char* s) {
char buf[4096];
size_t size, maxRecs, cnt = 0;
string str_s, str_d;
if(varNames.find(s) == varNames.end()) {
process_error(3, "couldn't find " + string(s) );
};
CudaSet *a;
a = varNames.find(s)->second;
a->name = s;
if(varNames.find(f) == varNames.end()) {
process_error(3, "couldn't find " + string(f) );
};
CudaSet *b;
b = varNames.find(f)->second;
b->name = f;
// if both source and destination are on disk
cout << "SOURCES " << a->source << ":" << b->source << endl;
if(a->source && b->source) {
for(unsigned int i = 0; i < a->segCount; i++) {
for(unsigned int z = 0; z < a->columnNames.size(); z++) {
if(a->type[a->columnNames[z]] != 2) {
str_s = a->load_file_name + "." + a->columnNames[z] + "." + to_string(i);
str_d = b->load_file_name + "." + a->columnNames[z] + "." + to_string(b->segCount + i);
cout << str_s << " " << str_d << endl;
FILE* source = fopen(str_s.c_str(), "rb");
FILE* dest = fopen(str_d.c_str(), "wb");
while (size = fread(buf, 1, BUFSIZ, source)) {
fwrite(buf, 1, size, dest);
}
fclose(source);
fclose(dest);
}
else { //merge strings
//read b's strings
str_s = b->load_file_name + "." + b->columnNames[z];
FILE* dest = fopen(str_s.c_str(), "rb");
auto len = b->char_size[b->columnNames[z]];
map<string, unsigned long long int> map_d;
buf[len] = 0;
unsigned long long cnt = 0;
while (fread(buf, len, 1, dest)) {
map_d[buf] = cnt;
cnt++;
};
fclose(dest);
unsigned long long int cct = cnt;
str_s = a->load_file_name + "." + a->columnNames[z] + "." + to_string(i) + ".hash";
str_d = b->load_file_name + "." + b->columnNames[z] + "." + to_string(b->segCount + i) + ".hash";
FILE* source = fopen(str_s.c_str(), "rb");
dest = fopen(str_d.c_str(), "wb");
while (size = fread(buf, 1, BUFSIZ, source)) {
fwrite(buf, 1, size, dest);
}
fclose(source);
fclose(dest);
str_s = a->load_file_name + "." + a->columnNames[z];
source = fopen(str_s.c_str(), "rb");
map<unsigned long long int, string> map_s;
buf[len] = 0;
cnt = 0;
while (fread(buf, len, 1, source)) {
map_s[cnt] = buf;
cnt++;
};
fclose(source);
queue<string> op_vx;
op_vx.push(a->columnNames[z]);
allocColumns(a, op_vx);
a->resize(a->maxRecs);
a->CopyColumnToGpu(a->columnNames[z], z, 0);
a->CopyColumnToHost(a->columnNames[z]);
str_d = b->load_file_name + "." + b->columnNames[z];
fstream f_file;
f_file.open(str_d.c_str(), ios::out|ios::app|ios::binary);
for(auto j = 0; j < a->mRecCount; j++) {
auto ss = map_s[a->h_columns_int[a->columnNames[z]][j]];
if(map_d.find(ss) == map_d.end()) { //add
f_file.write((char *)ss.c_str(), len);
a->h_columns_int[a->columnNames[z]][j] = cct;
cct++;
}
else {
a->h_columns_int[a->columnNames[z]][j] = map_d[ss];
};
};
f_file.close();
thrust::device_vector<int_type> d_col(a->mRecCount);
thrust::copy(a->h_columns_int[a->columnNames[z]].begin(), a->h_columns_int[a->columnNames[z]].begin() + a->mRecCount, d_col.begin());
auto i_name = b->load_file_name + "." + b->columnNames[z] + "." + to_string(b->segCount + i) + ".idx";
pfor_compress(thrust::raw_pointer_cast(d_col.data()), a->mRecCount*int_size, i_name, a->h_columns_int[a->columnNames[z]], 0);
};
};
};
if(a->maxRecs > b->maxRecs)
maxRecs = a->maxRecs;
else
maxRecs = b->maxRecs;
for(unsigned int i = 0; i < b->columnNames.size(); i++) {
b->reWriteHeader(b->load_file_name, b->columnNames[i], a->segCount + b->segCount, a->totalRecs + b->totalRecs, maxRecs);
};
}
else
if(!a->source && !b->source) { //if both source and destination are in memory
size_t oldCount = b->mRecCount;
b->resize(a->mRecCount);
for(unsigned int z = 0; z< b->mColumnCount; z++) {
if(b->type[a->columnNames[z]] == 0) {
thrust::copy(a->h_columns_int[a->columnNames[z]].begin(), a->h_columns_int[a->columnNames[z]].begin() + a->mRecCount, b->h_columns_int[b->columnNames[z]].begin() + oldCount);
}
else
if(b->type[a->columnNames[z]] == 1) {
thrust::copy(a->h_columns_float[a->columnNames[z]].begin(), a->h_columns_float[a->columnNames[z]].begin() + a->mRecCount, b->h_columns_float[b->columnNames[z]].begin() + oldCount);
}
else {
hipMemcpy(b->h_columns_char[b->columnNames[z]] + b->char_size[b->columnNames[z]]*oldCount, a->h_columns_char[a->columnNames[z]], a->char_size[a->columnNames[z]]*a->mRecCount, hipMemcpyHostToHost);
};
};
}
else
if(!a->source && b->source) {
total_segments = b->segCount;
total_count = b->mRecCount;
total_max = b->maxRecs;;
queue<string> op_vx;
for(unsigned int i=0; i < a->columnNames.size(); i++)
op_vx.push(a->columnNames[i]);
allocColumns(a, op_vx);
a->resize(a->maxRecs);
for(unsigned int i = 0; i < a->segCount; i++) {
if (a->filtered) {
copyColumns(a, op_vx, i, cnt);
a->CopyToHost(0, a->mRecCount);
};
a->compress(b->load_file_name, 0, 1, i - (a->segCount-1), a->mRecCount, 0);
};
for(unsigned int i = 0; i < b->columnNames.size(); i++) {
b->writeHeader(b->load_file_name, b->columnNames[i], total_segments);
};
};
};
void delete_records(const char* f) {
CudaSet *a;
a = varNames.find(f)->second;
a->name = f;
size_t totalRemoved = 0;
size_t maxRecs = 0;
if(!a->keep) { // temporary variable
process_error(2, "Delete operator is only applicable to disk based sets\nfor deleting records from derived sets please use filter operator ");
}
else { // read matching segments, delete, compress and write on a disk replacing the original segments
string str, str_old;
queue<string> op_vx;
size_t cnt;
for ( auto it=data_dict[a->load_file_name].begin() ; it != data_dict[a->load_file_name].end(); ++it ) {
op_vx.push((*it).first);
if (std::find(a->columnNames.begin(), a->columnNames.end(), (*it).first) == a->columnNames.end()) {
if ((*it).second.col_type == 0) {
a->type[(*it).first] = 0;
a->decimal[(*it).first] = 0;
//a->h_columns_int[(*it).first] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
a->h_columns_int[(*it).first] = thrust::host_vector<int_type>();
a->d_columns_int[(*it).first] = thrust::device_vector<int_type>();
}
else
if((*it).second.col_type == 1) {
a->type[(*it).first] = 1;
a->decimal[(*it).first] = 0;
//a->h_columns_float[(*it).first] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
a->h_columns_float[(*it).first] = thrust::host_vector<float_type>();
a->d_columns_float[(*it).first] = thrust::device_vector<float_type>();
}
else
if ((*it).second.col_type == 3) {
a->type[(*it).first] = 1;
a->decimal[(*it).first] = 1;
//a->h_columns_float[(*it).first] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
a->h_columns_float[(*it).first] = thrust::host_vector<float_type>();
a->d_columns_float[(*it).first] = thrust::device_vector<float_type>();
}
else {
a->type[(*it).first] = 2;
a->decimal[(*it).first] = 0;
a->h_columns_char[(*it).first] = nullptr;
a->d_columns_char[(*it).first] = nullptr;
a->char_size[(*it).first] = (*it).second.col_length;
};
a->columnNames.push_back((*it).first);
}
};
allocColumns(a, op_vx);
a->resize(a->maxRecs);
a->prm_d.resize(a->maxRecs);
size_t cc = a->mRecCount;
size_t tmp;
void* d;
CUDA_SAFE_CALL(hipMalloc((void **) &d, a->maxRecs*float_size));
unsigned int new_seg_count = 0;
char map_check;
for(unsigned int i = 0; i < a->segCount; i++) {
map_check = zone_map_check(op_type,op_value,op_nums, op_nums_f, op_nums_precision, a, i);
if(verbose)
cout << "MAP CHECK segment " << i << " " << map_check << endl;
if(map_check != 'N') {
cnt = 0;
copyColumns(a, op_vx, i, cnt);
tmp = a->mRecCount;
if(a->mRecCount) {
bool* res = filter(op_type,op_value,op_nums, op_nums_f, op_nums_precision, a, i);
thrust::device_ptr<bool> bp((bool*)res);
thrust::copy_if(thrust::make_counting_iterator((unsigned int)0), thrust::make_counting_iterator((unsigned int)a->mRecCount),
bp, a->prm_d.begin(), thrust::logical_not<bool>());
a->mRecCount = thrust::count(bp, bp + (unsigned int)a->mRecCount, 0);
hipFree(res);
// cout << "Remained recs count " << a->mRecCount << endl;
if(a->mRecCount > maxRecs)
maxRecs = a->mRecCount;
if (a->mRecCount) {
totalRemoved = totalRemoved + (tmp - a->mRecCount);
if (a->mRecCount == tmp) { //none deleted
if(new_seg_count != i) {
for (auto it=data_dict[a->load_file_name].begin() ; it != data_dict[a->load_file_name].end(); ++it ) {
auto colname = (*it).first;
str_old = a->load_file_name + "." + colname + "." + to_string(i);
str = a->load_file_name + "." + colname + "." + to_string(new_seg_count);
remove(str.c_str());
rename(str_old.c_str(), str.c_str());
};
};
new_seg_count++;
}
else { //some deleted
//cout << "writing segment " << new_seg_count << endl;
map<string, col_data> s = data_dict[a->load_file_name];
for ( map<string, col_data>::iterator it=s.begin() ; it != s.end(); ++it ) {
string colname = (*it).first;
str = a->load_file_name + "." + colname + "." + to_string(new_seg_count);
if(a->type[colname] == 0) {
thrust::device_ptr<int_type> d_col((int_type*)d);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_int[colname].begin(), d_col);
pfor_compress( d, a->mRecCount*int_size, str, a->h_columns_int[colname], 0);
}
else
if(a->type[colname] == 1) {
thrust::device_ptr<float_type> d_col((float_type*)d);
if(a->decimal[colname]) {
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_float[colname].begin(), d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+a->mRecCount, d_col_dec, float_to_long());
pfor_compress( d, a->mRecCount*float_size, str, a->h_columns_float[colname], 1);
}
else {
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_float[colname].begin(), d_col);
thrust::copy(d_col, d_col + a->mRecCount, a->h_columns_float[colname].begin());
fstream binary_file(str.c_str(),ios::out|ios::binary);
binary_file.write((char *)&a->mRecCount, 4);
binary_file.write((char *)(a->h_columns_float[colname].data()),a->mRecCount*float_size);
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
}
else {
thrust::device_ptr<int_type> d_col((int_type*)d);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_int[colname].begin(), d_col);
pfor_compress( d, a->mRecCount*int_size, str + ".hash", a->h_columns_int[colname], 0);
};
};
new_seg_count++;
};
}
else {
totalRemoved = totalRemoved + tmp;
};
}
}
else {
if(new_seg_count != i) {
for(unsigned int z = 0; z < a->columnNames.size(); z++) {
str_old = a->load_file_name + "." + a->columnNames[z] + "." + to_string(i);
str = a->load_file_name + "." + a->columnNames[z] + "." + to_string(new_seg_count);
remove(str.c_str());
rename(str_old.c_str(), str.c_str());
};
};
new_seg_count++;
maxRecs = a->maxRecs;
};
};
if (new_seg_count < a->segCount) {
for(unsigned int i = new_seg_count; i < a->segCount; i++) {
//cout << "delete segment " << i << endl;
for(unsigned int z = 0; z < a->columnNames.size(); z++) {
str = a->load_file_name + "." + a->columnNames[z];
str += "." + to_string(i);
remove(str.c_str());
};
};
};
for(unsigned int i = new_seg_count; i < a->segCount; i++) {
a->reWriteHeader(a->load_file_name, a->columnNames[i], new_seg_count, a->totalRecs-totalRemoved, maxRecs);
};
a->mRecCount = cc;
a->prm_d.resize(0);
a->segCount = new_seg_count;
a->deAllocOnDevice();
hipFree(d);
};
};
void save_col_data(map<string, map<string, col_data> >& data_dict, string file_name)
{
size_t str_len;
fstream binary_file(file_name.c_str(),ios::out|ios::binary|ios::trunc);
size_t len = data_dict.size();
binary_file.write((char *)&len, 8);
for (auto it=data_dict.begin() ; it != data_dict.end(); ++it ) {
str_len = (*it).first.size();
binary_file.write((char *)&str_len, 8);
binary_file.write((char *)(*it).first.data(), str_len);
map<string, col_data> s = (*it).second;
size_t len1 = s.size();
binary_file.write((char *)&len1, 8);
for (auto sit=s.begin() ; sit != s.end(); ++sit ) {
str_len = (*sit).first.size();
binary_file.write((char *)&str_len, 8);
binary_file.write((char *)(*sit).first.data(), str_len);
binary_file.write((char *)&(*sit).second.col_type, 4);
binary_file.write((char *)&(*sit).second.col_length, 4);
};
};
binary_file.close();
}
void load_col_data(map<string, map<string, col_data> >& data_dict, string file_name)
{
size_t str_len, recs, len1;
string str1, str2;
char buffer[4000];
unsigned int col_type, col_length;
fstream binary_file;
binary_file.open(file_name.c_str(),ios::in|ios::binary);
if(binary_file.is_open()) {
binary_file.read((char*)&recs, 8);
for(unsigned int i = 0; i < recs; i++) {
binary_file.read((char*)&str_len, 8);
binary_file.read(buffer, str_len);
str1.assign(buffer, str_len);
binary_file.read((char*)&len1, 8);
for(unsigned int j = 0; j < len1; j++) {
binary_file.read((char*)&str_len, 8);
binary_file.read(buffer, str_len);
str2.assign(buffer, str_len);
binary_file.read((char*)&col_type, 4);
binary_file.read((char*)&col_length, 4);
data_dict[str1][str2].col_type = col_type;
data_dict[str1][str2].col_length = col_length;
//cout << "data DICT " << str1 << " " << str2 << " " << col_type << " " << col_length << endl;
};
};
binary_file.close();
}
else {
cout << "Couldn't open data dictionary" << endl;
};
}
bool var_exists(CudaSet* a, string name) {
if(std::find(a->columnNames.begin(), a->columnNames.end(), name) != a->columnNames.end())
return 1;
else
return 0;
}
int file_exist (const char *filename)
{
std::ifstream infile(filename);
return infile.good();
}
bool check_bitmap_file_exist(CudaSet* left, CudaSet* right)
{
queue<string> cols(right->fil_value);
bool bitmaps_exist = 1;
if(cols.size() == 0) {
bitmaps_exist = 0;
};
while(cols.size() ) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), cols.front()) != right->columnNames.end()) {
string fname = left->load_file_name + "." + right->load_file_name + "." + cols.front() + ".0";
if( !file_exist(fname.c_str())) {
bitmaps_exist = 0;
};
};
cols.pop();
};
return bitmaps_exist;
}
bool check_bitmaps_exist(CudaSet* left, CudaSet* right)
{
//check if there are join bitmap indexes
queue<string> cols(right->fil_value);
bool bitmaps_exist = 1;
if(cols.size() == 0) {
bitmaps_exist = 1;
return 1;
};
while(cols.size() ) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), cols.front()) != right->columnNames.end()) {
string fname = left->load_file_name + "." + right->load_file_name + "." + cols.front() + ".0";
if( !file_exist(fname.c_str())) {
bitmaps_exist = 0;
};
};
cols.pop();
};
if(bitmaps_exist) {
while(!right->fil_nums.empty() ) {
left->fil_nums.push(right->fil_nums.front());
right->fil_nums.pop();
};
while(!right->fil_nums_precision.empty() ) {
left->fil_nums_precision.push(right->fil_nums_precision.front());
right->fil_nums_precision.pop();
};
while(!right->fil_nums_f.empty() ) {
left->fil_nums_f.push(right->fil_nums_f.front());
right->fil_nums_f.pop();
};
while(!right->fil_value.empty() ) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), right->fil_value.front()) != right->columnNames.end()) {
string fname = left->load_file_name + "." + right->load_file_name + "." + right->fil_value.front();
left->fil_value.push(fname);
}
else
left->fil_value.push(right->fil_value.front());
right->fil_value.pop();
};
bool add_and = 1;
if(left->fil_type.empty())
add_and = 0;
while(!right->fil_type.empty() ) {
left->fil_type.push(right->fil_type.front());
right->fil_type.pop();
};
if(add_and) {
left->fil_type.push("AND");
};
return 1;
}
else {
return 0;
};
}
void check_sort(const string str, const char* rtable, const char* rid)
{
CudaSet* right = varNames.find(rtable)->second;
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::app);
binary_file.write((char *)&right->sort_check, 1);
binary_file.close();
}
void update_char_permutation(CudaSet* a, string colname, unsigned int* raw_ptr, string ord, void* temp, bool host)
{
auto s = a->string_map[colname];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
a->h_columns_char[colname] = new char[a->mRecCount*len];
memset(a->h_columns_char[colname], 0, a->mRecCount*len);
thrust::device_ptr<unsigned int> perm(raw_ptr);
thrust::device_ptr<int_type> temp_int((int_type*)temp);
thrust::gather(perm, perm+a->mRecCount, a->d_columns_int[colname].begin(), temp_int);
//for(int z = 0 ; z < a->mRecCount; z++) {
//cout << "Init vals " << a->d_columns_int[colname][z] << " " << perm[z] << " " << temp_int[z] << endl;
//};
//cout << "sz " << a->h_columns_int[colname].size() << " " << a->d_columns_int[colname].size() << " " << len << endl;
hipMemcpy(thrust::raw_pointer_cast(a->h_columns_int[colname].data()), temp, 8*a->mRecCount, hipMemcpyDeviceToHost);
FILE *f;
f = fopen(a->string_map[colname].c_str(), "rb");
for(int z = 0 ; z < a->mRecCount; z++) {
fseek(f, a->h_columns_int[colname][z] * len, SEEK_SET);
fread(a->h_columns_char[colname] + z*len, 1, len, f);
};
fclose(f);
if(!host) {
void *d;
hipMalloc((void **) &d, a->mRecCount*len);
a->d_columns_char[colname] = (char*)d;
hipMemcpy(a->d_columns_char[colname], a->h_columns_char[colname], len*a->mRecCount, hipMemcpyHostToDevice);
if (ord.compare("DESC") == 0 )
str_sort(a->d_columns_char[colname], a->mRecCount, raw_ptr, 1, len);
else
str_sort(a->d_columns_char[colname], a->mRecCount, raw_ptr, 0, len);
hipFree(d);
}
else {
if (ord.compare("DESC") == 0 )
str_sort_host(a->h_columns_char[colname], a->mRecCount, raw_ptr, 1, len);
else
str_sort_host(a->h_columns_char[colname], a->mRecCount, raw_ptr, 0, len);
};
}
void compress_int(const string file_name, const thrust::host_vector<int_type>& res)
{
std::vector<unsigned int> dict_val;
unsigned int bits_encoded;
set<int_type> dict_s;
map<int_type, unsigned int> d_ordered;
for (unsigned int i = 0 ; i < res.size(); i++) {
int_type f = res[i];
dict_s.insert(f);
};
unsigned int i = 0;
for (auto it = dict_s.begin(); it != dict_s.end(); it++) {
d_ordered[*it] = i++;
};
for (unsigned int i = 0 ; i < res.size(); i++) {
int_type f = res[i];
dict_val.push_back(d_ordered[f]);
};
bits_encoded = (unsigned int)ceil(log2(double(d_ordered.size()+1)));
//cout << "bits " << bits_encoded << endl;
unsigned int sz = (unsigned int)d_ordered.size();
// write to a file
fstream binary_file(file_name.c_str(),ios::out|ios::binary|ios::trunc);
binary_file.write((char *)&sz, 4);
for (auto it = d_ordered.begin(); it != d_ordered.end(); it++) {
binary_file.write((char*)(&(it->first)), int_size);
};
unsigned int fit_count = 64/bits_encoded;
unsigned long long int val = 0;
binary_file.write((char *)&fit_count, 4);
binary_file.write((char *)&bits_encoded, 4);
unsigned int curr_cnt = 1;
unsigned int vals_count = (unsigned int)dict_val.size()/fit_count;
if(!vals_count || dict_val.size()%fit_count)
vals_count++;
binary_file.write((char *)&vals_count, 4);
unsigned int real_count = (unsigned int)dict_val.size();
binary_file.write((char *)&real_count, 4);
for(unsigned int i = 0; i < dict_val.size(); i++) {
val = val | dict_val[i];
if(curr_cnt < fit_count)
val = val << bits_encoded;
if( (curr_cnt == fit_count) || (i == (dict_val.size() - 1)) ) {
if (curr_cnt < fit_count) {
val = val << ((fit_count-curr_cnt)-1)*bits_encoded;
};
curr_cnt = 1;
binary_file.write((char *)&val, int_size);
val = 0;
}
else
curr_cnt = curr_cnt + 1;
};
binary_file.close();
};
int_type* get_vec(CudaSet* a, string s1_val, stack<int_type*>& exe_vectors, bool& free_mem) {
int_type* t;
if(std::find(a->columnNames.begin(), a->columnNames.end(), s1_val) != a->columnNames.end()) {
t = a->get_int_by_name(s1_val);
free_mem = 0;
}
else {
t = exe_vectors.top();
exe_vectors.pop();
free_mem = 1;
}
return t;
};
int_type* get_host_vec(CudaSet* a, string s1_val, stack<int_type*>& exe_vectors) {
int_type* t;
if(std::find(a->columnNames.begin(), a->columnNames.end(), s1_val) != a->columnNames.end()) {
t = a->get_host_int_by_name(s1_val);
}
else {
t = exe_vectors.top();
thrust::device_ptr<int_type> st1((int_type*)t);
for(int z = 0; z < 10; z++)
cout << "RESVEC " << st1[z] << endl;
exe_vectors.pop();
}
return t;
};
unsigned int get_decimals(CudaSet* a, string s1_val, stack<unsigned int>& exe_precision) {
unsigned int t;
if(std::find(a->columnNames.begin(), a->columnNames.end(), s1_val) != a->columnNames.end())
t = a->decimal_zeroes[s1_val];
else {
t = exe_precision.top();
exe_precision.pop();
}
return t;
};
#ifdef _WIN64
size_t getTotalSystemMemory()
{
MEMORYSTATUSEX status;
status.dwLength = sizeof(status);
GlobalMemoryStatusEx(&status);
return status.ullTotalPhys;
}
#else
size_t getTotalSystemMemory()
{
long pages = sysconf(_SC_PHYS_PAGES);
long page_size = sysconf(_SC_PAGE_SIZE);
return pages * page_size;
}
#endif
| bcf4db357f4454f44f4c64cab1350d5052b1c2c2.cu | /*
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cctype>
#include <algorithm>
#include <functional>
#include <numeric>
#include <ctime>
#include <time.h>
#include "cm.h"
#include "atof.h"
#include "compress.cu"
#include "sorts.cu"
#include "filter.h"
#include "callbacks.h"
#include "zone_map.h"
#ifdef _WIN64
#define atoll(S) _atoi64(S)
#define fseek(S, S1, S2) _fseeki64(S, S1, S2)
#include <windows.h>
#else
#include <unistd.h>
#endif
using namespace std;
using namespace thrust::placeholders;
size_t total_count = 0, total_max;
clock_t tot;
unsigned int total_segments = 0, old_segments;
size_t process_count;
size_t alloced_sz = 0;
bool fact_file_loaded = 1;
bool verbose;
bool interactive, ssd, delta, star;
unsigned int prs;
void* d_v = nullptr;
void* s_v = nullptr;
queue<string> op_sort;
queue<string> op_presort;
queue<string> op_type;
bool op_case = 0;
string grp_val;
queue<string> op_value;
queue<int_type> op_nums;
queue<float_type> op_nums_f;
queue<unsigned int> op_nums_precision;
queue<string> col_aliases;
map<string, map<string, col_data> > data_dict;
unordered_map<string, unordered_map<unsigned long long int, size_t> > char_hash;
map<string, char*> index_buffers;
map<string, unsigned long long int*> idx_vals;
map<string, char*> buffers;
map<string, size_t> buffer_sizes;
size_t total_buffer_size;
queue<string> buffer_names;
void* alloced_tmp;
bool alloced_switch = 0;
map<string,CudaSet*> varNames; // STL map to manage CudaSet variables
map<string, unsigned int> cpy_bits;
map<string, long long int> cpy_init_val;
char* readbuff = nullptr;
thrust::device_vector<unsigned int> rcol_matches;
thrust::device_vector<int_type> rcol_dev;
struct f_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((x-y) < EPSILON) && ((x-y) > -EPSILON));
}
};
struct f_less
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((y-x) > EPSILON);
}
};
struct f_greater
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((x-y) > EPSILON);
}
};
struct f_greater_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((x-y) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
};
struct f_less_equal
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return (((y-x) > EPSILON) || (((x-y) < EPSILON) && ((x-y) > -EPSILON)));
}
};
struct f_not_equal_to
{
__host__ __device__
bool operator()(const float_type x, const float_type y)
{
return ((x-y) > EPSILON) || ((x-y) < -EPSILON);
}
};
struct long_to_float_type
{
__host__ __device__
float_type operator()(const int_type x)
{
return (float_type)x;
}
};
template <typename T>
struct power_functor : public thrust::unary_function<T,T>
{
unsigned int a;
__host__ __device__
power_functor(unsigned int a_) {
a = a_;
}
__host__ __device__
T operator()(T x)
{
return x*(unsigned int)pow((double)10,(double)a);
}
};
struct is_zero
{
__host__ __device__
bool operator()(const int &x)
{
return x == 0;
}
};
int get_utc_offset() {
time_t zero = 24*60*60L;
struct tm * timeptr;
int gmtime_hours;
/* get the local time for Jan 2, 1900 00:00 UTC */
timeptr = localtime( &zero );
gmtime_hours = timeptr->tm_hour;
/* if the local time is the "day before" the UTC, subtract 24 hours
from the hours to get the UTC offset */
if( timeptr->tm_mday < 2 )
gmtime_hours -= 24;
return gmtime_hours;
}
/*
the utc analogue of mktime,
(much like timegm on some systems)
*/
time_t tm_to_time_t_utc( struct tm * timeptr ) {
/* gets the epoch time relative to the local time zone,
and then adds the appropriate number of seconds to make it UTC */
return mktime( timeptr ) + get_utc_offset() * 3600;
}
/*class power_functor {
unsigned int a;
public:
power_functor(unsigned int a_) { a = a_; }
__host__ __device__ int_type operator()(int_type x) const
{
return x*(unsigned int)pow((double)10,(double)a);
}
};
*/
void allocColumns(CudaSet* a, queue<string> fields);
void copyColumns(CudaSet* a, queue<string> fields, unsigned int segment, size_t& count, bool rsz, bool flt);
void mygather(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, size_t count, size_t g_size);
void mycopy(unsigned int tindex, unsigned int idx, CudaSet* a, CudaSet* t, size_t count, size_t g_size);
void write_compressed_char(string file_name, unsigned int index, size_t mCount);
size_t getFreeMem();
size_t getTotalSystemMemory();
void process_error(int severity, string err);
CudaSet::CudaSet(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs)
: mColumnCount(0), mRecCount(0)
{
initialize(nameRef, typeRef, sizeRef, colsRef, Recs);
source = 1;
text_source = 1;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs, string file_name, unsigned int max)
: mColumnCount(0), mRecCount(0)
{
maxRecs = max;
initialize(nameRef, typeRef, sizeRef, colsRef, Recs, file_name);
source = 1;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(const size_t RecordCount, const unsigned int ColumnCount)
{
initialize(RecordCount, ColumnCount);
keep = false;
source = 0;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(queue<string> op_sel, const queue<string> op_sel_as)
{
initialize(op_sel, op_sel_as);
keep = false;
source = 0;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::CudaSet(CudaSet* a, CudaSet* b, queue<string> op_sel, queue<string> op_sel_as)
{
initialize(a,b, op_sel, op_sel_as);
keep = false;
source = 0;
text_source = 0;
fil_f = nullptr;
fil_s = nullptr;
};
CudaSet::~CudaSet()
{
free();
};
void CudaSet::allocColumnOnDevice(string colname, size_t RecordCount)
{
if (type[colname] != 1 ) {
d_columns_int[colname].resize(RecordCount);
}
else
d_columns_float[colname].resize(RecordCount);
};
void CudaSet::resize_join(size_t addRecs)
{
mRecCount = mRecCount + addRecs;
for(unsigned int i=0; i < columnNames.size(); i++) {
if(type[columnNames[i]] != 1) {
h_columns_int[columnNames[i]].resize(mRecCount);
}
else
h_columns_float[columnNames[i]].resize(mRecCount);
};
};
void CudaSet::resize(size_t addRecs)
{
mRecCount = mRecCount + addRecs;
for(unsigned int i=0; i < columnNames.size(); i++) {
if(type[columnNames[i]] != 1) {
h_columns_int[columnNames[i]].resize(mRecCount);
}
else {
h_columns_float[columnNames[i]].resize(mRecCount);
}
};
};
void CudaSet::deAllocColumnOnDevice(string colname)
{
if (type[colname] != 1 && !d_columns_int.empty() && d_columns_int.find(colname) != d_columns_int.end()) {
if(d_columns_int[colname].size() > 0) {
d_columns_int[colname].resize(0);
d_columns_int[colname].shrink_to_fit();
};
}
else
if (type[colname] == 1 && !d_columns_float.empty()) {
if (d_columns_float[colname].size() > 0) {
d_columns_float[colname].resize(0);
d_columns_float[colname].shrink_to_fit();
};
};
};
void CudaSet::allocOnDevice(size_t RecordCount)
{
for(unsigned int i=0; i < columnNames.size(); i++)
allocColumnOnDevice(columnNames[i], RecordCount);
};
void CudaSet::deAllocOnDevice()
{
for(unsigned int i=0; i < columnNames.size(); i++) {
deAllocColumnOnDevice(columnNames[i]);
};
if(prm_d.size()) {
prm_d.resize(0);
prm_d.shrink_to_fit();
};
for (auto it=d_columns_int.begin(); it != d_columns_int.end(); ++it ) {
if(it->second.size() > 0) {
it->second.resize(0);
it->second.shrink_to_fit();
};
};
for (auto it=d_columns_float.begin(); it != d_columns_float.end(); ++it ) {
if(it->second.size() > 0) {
it->second.resize(0);
it->second.shrink_to_fit();
};
};
if(filtered) { // dealloc the source
if(varNames.find(source_name) != varNames.end()) {
varNames[source_name]->deAllocOnDevice();
};
};
};
void CudaSet::resizeDeviceColumn(size_t RecCount, string colname)
{
if (type[colname] != 1) {
d_columns_int[colname].resize(RecCount);
}
else
d_columns_float[colname].resize(RecCount);
};
void CudaSet::resizeDevice(size_t RecCount)
{
for(unsigned int i=0; i < columnNames.size(); i++) {
resizeDeviceColumn(RecCount, columnNames[i]);
};
};
bool CudaSet::onDevice(string colname)
{
if (type[colname] != 1) {
if (!d_columns_int.empty() && d_columns_int[colname].size())
return 1;
}
else
if (!d_columns_float.empty() && d_columns_float[colname].size())
return 1;
return 0;
}
CudaSet* CudaSet::copyDeviceStruct()
{
CudaSet* a = new CudaSet(mRecCount, mColumnCount);
a->not_compressed = not_compressed;
a->segCount = segCount;
a->maxRecs = maxRecs;
a->columnNames = columnNames;
a->ts_cols = ts_cols;
a->cols = cols;
a->type = type;
a->char_size = char_size;
a->decimal = decimal;
a->decimal_zeroes = decimal_zeroes;
for(unsigned int i=0; i < columnNames.size(); i++) {
if(a->type[columnNames[i]] == 0) {
a->d_columns_int[columnNames[i]] = thrust::device_vector<int_type>();
a->h_columns_int[columnNames[i]] = thrust::host_vector<int_type, uninitialized_host_allocator<int_type> >();
}
else
if(a->type[columnNames[i]] == 1) {
a->d_columns_float[columnNames[i]] = thrust::device_vector<float_type>();
a->h_columns_float[columnNames[i]] = thrust::host_vector<float_type, uninitialized_host_allocator<float_type> >();
}
else {
a->h_columns_char[columnNames[i]] = nullptr;
a->d_columns_char[columnNames[i]] = nullptr;
};
};
a->load_file_name = load_file_name;
a->mRecCount = 0;
return a;
}
int_type CudaSet::readSsdSegmentsFromFile(unsigned int segNum, string colname, size_t offset, thrust::host_vector<unsigned int>& prm_vh, CudaSet* dest)
{
string f1 = load_file_name + "." + colname + "." + to_string(segNum);
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
cout << "Error opening " << f1 << " file " << endl;
exit(0);
};
unsigned int cnt, bits;
int_type lower_val;
unsigned short int val_s_r[4096/2];
char val_c_r[4096];
unsigned int val_i_r[4096/4];
unsigned long long int val_l_r[4096/8];
unsigned int idx;
bool idx_set = 0;
fread(&cnt, 4, 1, f);
fread(&lower_val, 8, 1, f);
fseek(f, cnt - (8+4) + 32, SEEK_CUR);
fread(&bits, 4, 1, f);
//cout << "lower_val bits " << lower_val << " " << bits << endl;
if(type[colname] == 0) {
//cout << "lower_val bits " << lower_val << " " << bits << endl;
for(unsigned int i = 0; i < prm_vh.size(); i++) {
if(!idx_set || prm_vh[i] >= idx + 4096/(bits/8)) {
fseek(f, 24 + prm_vh[i]*(bits/8), SEEK_SET);
idx = prm_vh[i];
idx_set = 1;
if(bits == 8) {
fread(&val_c_r[0], 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_c_r[0];
}
else
if(bits == 16) {
fread(&val_s_r, 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_s_r[0];
}
if(bits == 32) {
fread(&val_i_r, 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_i_r[0];
}
if(bits == 84) {
fread(&val_l_r, 4096, 1, f);
dest->h_columns_int[colname][i + offset] = val_l_r[0];
}
}
else {
if(bits == 8) {
dest->h_columns_int[colname][i + offset] = val_c_r[prm_vh[i]-idx];
}
else
if(bits == 16) {
dest->h_columns_int[colname][i + offset] = val_s_r[prm_vh[i]-idx];
}
if(bits == 32) {
dest->h_columns_int[colname][i + offset] = val_i_r[prm_vh[i]-idx];
}
if(bits == 84) {
dest->h_columns_int[colname][i + offset] = val_l_r[prm_vh[i]-idx];
}
};
};
}
else
if(type[colname] == 1) {
for(unsigned int i = 0; i < prm_vh.size(); i++) {
if(!idx_set || prm_vh[i] >= idx + 4096/(bits/8)) {
fseek(f, 24 + prm_vh[i]*(bits/8), SEEK_SET);
idx = prm_vh[i];
idx_set = 1;
fread(val_c_r, 4096, 1, f);
memcpy(&dest->h_columns_float[colname][i + offset], &val_c_r[0], bits/8);
}
else {
memcpy(&dest->h_columns_float[colname][i + offset], &val_c_r[(prm_vh[i]-idx)*(bits/8)], bits/8);
};
};
}
else {
//no strings in fact tables
};
fclose(f);
return lower_val;
}
int_type CudaSet::readSsdSegmentsFromFileR(unsigned int segNum, string colname, thrust::host_vector<unsigned int>& prm_vh, thrust::host_vector<unsigned int>& dest)
{
string f1 = load_file_name + "." + colname + "." + to_string(segNum);
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
cout << "Error opening " << f1 << " file " << endl;
exit(0);
};
unsigned int cnt, bits;
int_type lower_val;
fread(&cnt, 4, 1, f);
fread(&lower_val, 8, 1, f);
fseek(f, cnt - (8+4) + 32, SEEK_CUR);
fread(&bits, 4, 1, f);
unsigned short int val_s_r[4096/2];
char val_c_r[4096];
unsigned int val_i_r[4096/4];
unsigned long long int val_l_r[4096/8];
unsigned int idx;
bool idx_set = 0;
for(unsigned int i = 0; i < prm_vh.size(); i++) {
if(!idx_set || prm_vh[i] >= idx + 4096/(bits/8)) {
fseek(f, 24 + prm_vh[i]*(bits/8), SEEK_SET);
idx = prm_vh[i];
idx_set = 1;
if(bits == 8) {
fread(val_c_r, 4096, 1, f);
dest[i] = val_c_r[0];
}
else
if(bits == 16) {
fread(val_s_r, 4096, 1, f);
dest[i] = val_s_r[0];
}
if(bits == 32) {
fread(val_i_r, 4096, 1, f);
dest[i] = val_i_r[0];
}
if(bits == 84) {
fread(val_l_r, 4096, 1, f);
dest[i] = val_l_r[0];
}
}
else {
if(bits == 8) {
dest[i] = val_c_r[prm_vh[i]-idx];
}
else
if(bits == 16) {
dest[i] = val_s_r[prm_vh[i]-idx];
}
if(bits == 32) {
dest[i] = val_i_r[prm_vh[i]-idx];
}
if(bits == 84) {
dest[i] = val_l_r[prm_vh[i]-idx];
}
};
};
fclose(f);
return lower_val;
}
std::clock_t tot_disk;
void CudaSet::readSegmentsFromFile(unsigned int segNum, string colname)
{
string f1 = load_file_name + "." + colname + "." + to_string(segNum);
if(type[colname] == 2)
f1 = f1 + ".idx";
std::clock_t start1 = std::clock();
if(interactive) { //check if data are in buffers
if(buffers.find(f1) == buffers.end()) { // add data to buffers
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
process_error(3, "Error opening " + string(f1) +" file " );
};
fseek(f, 0, SEEK_END);
long fileSize = ftell(f);
while(total_buffer_size + fileSize > getTotalSystemMemory() && !buffer_names.empty()) { //free some buffers
//delete [] buffers[buffer_names.front()];
cudaFreeHost(buffers[buffer_names.front()]);
total_buffer_size = total_buffer_size - buffer_sizes[buffer_names.front()];
buffer_sizes.erase(buffer_names.front());
buffers.erase(buffer_names.front());
buffer_names.pop();
};
fseek(f, 0, SEEK_SET);
char* buff;
cudaHostAlloc((void**) &buff, fileSize,cudaHostAllocDefault);
fread(buff, fileSize, 1, f);
fclose(f);
buffers[f1] = buff;
buffer_sizes[f1] = fileSize;
buffer_names.push(f1);
total_buffer_size = total_buffer_size + fileSize;
buffer_names.push(f1);
cout << "added buffer " << f1 << " " << fileSize << endl;
};
// get data from buffers
if(type[colname] != 1) {
unsigned int cnt = ((unsigned int*)buffers[f1])[0];
if(cnt > h_columns_int[colname].size()/8 + 10)
h_columns_int[colname].resize(cnt/8 + 10);
}
else {
unsigned int cnt = ((unsigned int*)buffers[f1])[0];
if(cnt > h_columns_float[colname].size()/8 + 10)
h_columns_float[colname].resize(cnt/8 + 10);
}
}
else {
FILE* f = fopen(f1.c_str(), "rb" );
if(!f) {
cout << "Error opening " << f1 << " file " << endl;
exit(0);
};
if(type[colname] != 1) {
if(1 > h_columns_int[colname].size())
h_columns_int[colname].resize(1);
fread(h_columns_int[colname].data(), 4, 1, f);
unsigned int cnt = ((unsigned int*)(h_columns_int[colname].data()))[0];
if(cnt/8+10 > h_columns_int[colname].size()) {
h_columns_int[colname].resize(cnt + 10);
};
size_t rr = fread((unsigned int*)(h_columns_int[colname].data()) + 1, 1, cnt+52, f);
if(rr != cnt+52) {
char buf[1024];
sprintf(buf, "Couldn't read %d bytes from %s ,read only", cnt+52, f1.c_str());
process_error(3, string(buf));
};
}
else {
if(1 > h_columns_float[colname].size())
h_columns_float[colname].resize(1);
fread(h_columns_float[colname].data(), 4, 1, f);
unsigned int cnt = ((unsigned int*)(h_columns_float[colname].data()))[0];
if(cnt/8+10 > h_columns_float[colname].size())
h_columns_float[colname].resize(cnt + 10);
size_t rr = fread((unsigned int*)(h_columns_float[colname].data()) + 1, 1, cnt+52, f);
if(rr != cnt+52) {
char buf[1024];
sprintf(buf, "Couldn't read %d bytes from %s ,read only", cnt+52, f1.c_str());
process_error(3, string(buf));
};
}
fclose(f);
};
tot_disk = tot_disk + (std::clock() - start1);
};
void CudaSet::CopyColumnToGpu(string colname, unsigned int segment, size_t offset)
{
if(not_compressed) {
// calculate how many records we need to copy
if(segment < segCount-1) {
mRecCount = maxRecs;
}
else {
mRecCount = hostRecCount - maxRecs*(segCount-1);
};
if(type[colname] != 1) {
if(!alloced_switch) {
thrust::copy(h_columns_int[colname].begin() + maxRecs*segment, h_columns_int[colname].begin() + maxRecs*segment + mRecCount, d_columns_int[colname].begin() + offset);
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::copy(h_columns_int[colname].begin() + maxRecs*segment, h_columns_int[colname].begin() + maxRecs*segment + mRecCount, d_col);
};
}
else {
if(!alloced_switch) {
thrust::copy(h_columns_float[colname].begin() + maxRecs*segment, h_columns_float[colname].begin() + maxRecs*segment + mRecCount, d_columns_float[colname].begin() + offset);
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::copy(h_columns_float[colname].begin() + maxRecs*segment, h_columns_float[colname].begin() + maxRecs*segment + mRecCount, d_col);
};
}
}
else {
readSegmentsFromFile(segment,colname);
if(!d_v)
CUDA_SAFE_CALL(cudaMalloc((void **) &d_v, 12));
if(!s_v)
CUDA_SAFE_CALL(cudaMalloc((void **) &s_v, 8));
string f1;
if(type[colname] == 2) {
f1 = load_file_name + "." + colname + "." + to_string(segment) + ".idx";
}
else {
f1 = load_file_name + "." + colname + "." + to_string(segment);
};
if(type[colname] != 1) {
if(!alloced_switch) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + offset), h_columns_int[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + offset), buffers[f1], d_v, s_v, colname);
};
}
else {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(alloced_tmp, h_columns_int[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(alloced_tmp, buffers[f1], d_v, s_v, colname);
};
};
}
else {
if(decimal[colname]) {
if(!alloced_switch) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + offset) , h_columns_float[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + offset) , buffers[f1], d_v, s_v, colname);
};
if(!phase_copy) {
thrust::device_ptr<long long int> d_col_int((long long int*)thrust::raw_pointer_cast(d_columns_float[colname].data() + offset));
thrust::transform(d_col_int,d_col_int+mRecCount,d_columns_float[colname].begin(), long_to_float());
};
}
else {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(alloced_tmp, h_columns_float[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(alloced_tmp, buffers[f1], d_v, s_v, colname);
};
if(!phase_copy) {
thrust::device_ptr<long long int> d_col_int((long long int*)alloced_tmp);
thrust::device_ptr<float_type> d_col_float((float_type*)alloced_tmp);
thrust::transform(d_col_int,d_col_int+mRecCount, d_col_float, long_to_float());
};
//for(int i = 0; i < mRecCount;i++)
//cout << "DECOMP " << (float_type)(d_col_int[i]) << " " << d_col_float[i] << endl;
};
}
//else // uncompressed float
// will have to fix it later so uncompressed data will be written by segments too
}
};
}
void CudaSet::CopyColumnToGpu(string colname) // copy all segments
{
if(not_compressed) {
if(type[colname] != 1)
thrust::copy(h_columns_int[colname].begin(), h_columns_int[colname].begin() + mRecCount, d_columns_int[colname].begin());
else
thrust::copy(h_columns_float[colname].begin(), h_columns_float[colname].begin() + mRecCount, d_columns_float[colname].begin());
}
else {
if(!d_v)
CUDA_SAFE_CALL(cudaMalloc((void **) &d_v, 12));
if(!s_v)
CUDA_SAFE_CALL(cudaMalloc((void **) &s_v, 8));
size_t cnt = 0;
string f1;
for(unsigned int i = 0; i < segCount; i++) {
readSegmentsFromFile(i,colname);
if(type[colname] == 2) {
f1 = load_file_name + "." + colname + "." + to_string(i) + ".idx";
}
else {
f1 = load_file_name + "." + colname + "." + to_string(i);
};
if(type[colname] == 0) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + cnt), h_columns_int[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress(thrust::raw_pointer_cast(d_columns_int[colname].data() + cnt), buffers[f1], d_v, s_v, colname);
};
}
else
if(type[colname] == 1) {
if(decimal[colname]) {
if(buffers.find(f1) == buffers.end()) {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + cnt) , h_columns_float[colname].data(), d_v, s_v, colname);
}
else {
mRecCount = pfor_decompress( thrust::raw_pointer_cast(d_columns_float[colname].data() + cnt) , buffers[f1], d_v, s_v, colname);
};
if(!phase_copy) {
thrust::device_ptr<long long int> d_col_int((long long int*)thrust::raw_pointer_cast(d_columns_float[colname].data() + cnt));
thrust::transform(d_col_int,d_col_int+mRecCount,d_columns_float[colname].begin() + cnt, long_to_float());
};
}
// else uncompressed float
// will have to fix it later so uncompressed data will be written by segments too
};
cnt = cnt + mRecCount;
//totalRecs = totals + mRecCount;
};
mRecCount = cnt;
};
}
void CudaSet::CopyColumnToHost(string colname, size_t offset, size_t RecCount)
{
if(type[colname] != 1) {
thrust::copy(d_columns_int[colname].begin(), d_columns_int[colname].begin() + RecCount, h_columns_int[colname].begin() + offset);
}
else
thrust::copy(d_columns_float[colname].begin(), d_columns_float[colname].begin() + RecCount, h_columns_float[colname].begin() + offset);
}
void CudaSet::CopyColumnToHost(string colname)
{
CopyColumnToHost(colname, 0, mRecCount);
}
void CudaSet::CopyToHost(size_t offset, size_t count)
{
for(unsigned int i = 0; i < columnNames.size(); i++) {
CopyColumnToHost(columnNames[i], offset, count);
};
}
float_type* CudaSet::get_float_type_by_name(string name)
{
return thrust::raw_pointer_cast(d_columns_float[name].data());
}
int_type* CudaSet::get_int_by_name(string name)
{
return thrust::raw_pointer_cast(d_columns_int[name].data());
}
float_type* CudaSet::get_host_float_by_name(string name)
{
return thrust::raw_pointer_cast(h_columns_float[name].data());
}
int_type* CudaSet::get_host_int_by_name(string name)
{
return thrust::raw_pointer_cast(h_columns_int[name].data());
}
void CudaSet::GroupBy(stack<string> columnRef)
{
thrust::device_vector<bool> grp_dev(mRecCount);
thrust::fill(grp_dev.begin(), grp_dev.end(), 0);
if(scratch.size() < mRecCount)
scratch.resize(mRecCount*sizeof(bool));
thrust::device_ptr<bool> d_group((bool*)thrust::raw_pointer_cast(scratch.data()));
d_group[mRecCount-1] = 0;
for(int i = 0; i < columnRef.size(); columnRef.pop()) {
unsigned int bits;
if(cpy_bits.empty())
bits = 0;
else
bits = cpy_bits[columnRef.top()];
if(bits == 8) {
thrust::device_ptr<unsigned char> src((unsigned char*)thrust::raw_pointer_cast(d_columns_int[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned char>());
}
else
if(bits == 16) {
thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(d_columns_int[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned short int>());
}
else
if(bits == 32) {
thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(d_columns_int[columnRef.top()].data()));
thrust::transform(src, src + mRecCount - 1, src+1, d_group, thrust::not_equal_to<unsigned int>());
}
else {
thrust::transform(d_columns_int[columnRef.top()].begin(), d_columns_int[columnRef.top()].begin() + mRecCount - 1,
d_columns_int[columnRef.top()].begin()+1, d_group, thrust::not_equal_to<int_type>());
};
thrust::transform(d_group, d_group+mRecCount, grp_dev.begin(), grp_dev.begin(), thrust::logical_or<bool>());
};
grp_count = thrust::count(grp_dev.begin(), grp_dev.end(), 1) + 1;
//cout << "grp count " << grp_count << endl;
grp.resize(grp_count);
if(grp_count > 1)
thrust::copy_if(thrust::make_counting_iterator((unsigned int)1), thrust::make_counting_iterator((unsigned int)grp_dev.size()),
grp_dev.begin(), grp.begin()+1, thrust::identity<bool>());
grp[0] = 0;
};
void CudaSet::addDeviceColumn(int_type* col, string colname, size_t recCount)
{
if (std::find(columnNames.begin(), columnNames.end(), colname) == columnNames.end()) {
columnNames.push_back(colname);
type[colname] = 0;
d_columns_int[colname] = thrust::device_vector<int_type>(recCount);
h_columns_int[colname] = thrust::host_vector<int_type, uninitialized_host_allocator<int_type> >(recCount);
}
else { // already exists, my need to resize it
if(d_columns_int[colname].size() < recCount) {
d_columns_int[colname].resize(recCount);
};
if(h_columns_int[colname].size() < recCount) {
h_columns_int[colname].resize(recCount);
};
};
// copy data to d columns
thrust::device_ptr<int_type> d_col((int_type*)col);
thrust::copy(d_col, d_col+recCount, d_columns_int[colname].begin());
thrust::copy(d_columns_int[colname].begin(), d_columns_int[colname].begin()+recCount, h_columns_int[colname].begin());
};
void CudaSet::addDeviceColumn(float_type* col, string colname, size_t recCount, bool is_decimal)
{
if (std::find(columnNames.begin(), columnNames.end(), colname) == columnNames.end()) {
columnNames.push_back(colname);
type[colname] = 1;
d_columns_float[colname] = thrust::device_vector<float_type>(recCount);
h_columns_float[colname] = thrust::host_vector<float_type, uninitialized_host_allocator<float_type> >(recCount);
}
else { // already exists, my need to resize it
if(d_columns_float[colname].size() < recCount)
d_columns_float[colname].resize(recCount);
if(h_columns_float[colname].size() < recCount)
h_columns_float[colname].resize(recCount);
};
decimal[colname] = is_decimal;
thrust::device_ptr<float_type> d_col((float_type*)col);
thrust::copy(d_col, d_col+recCount, d_columns_float[colname].begin());
};
void CudaSet::gpu_perm(queue<string> sf, thrust::device_vector<unsigned int>& permutation) {
permutation.resize(mRecCount);
thrust::sequence(permutation.begin(), permutation.begin() + mRecCount,0,1);
unsigned int* raw_ptr = thrust::raw_pointer_cast(permutation.data());
void* temp;
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, mRecCount*8));
string sort_type = "ASC";
while(!sf.empty()) {
if (type[sf.front()] == 0) {
update_permutation(d_columns_int[sf.front()], raw_ptr, mRecCount, sort_type, (int_type*)temp, 64);
}
else
if (type[sf.front()] == 1) {
update_permutation(d_columns_float[sf.front()], raw_ptr, mRecCount, sort_type, (float_type*)temp, 64);
}
else {
thrust::host_vector<unsigned int> permutation_h = permutation;
char* temp1 = new char[char_size[sf.front()]*mRecCount];
update_permutation_char_host(h_columns_char[sf.front()], permutation_h.data(), mRecCount, sort_type, temp1, char_size[sf.front()]);
delete [] temp1;
permutation = permutation_h;
};
sf.pop();
};
cudaFree(temp);
}
void CudaSet::compress(string file_name, size_t offset, unsigned int check_type, unsigned int check_val, size_t mCount, const bool append)
{
string str(file_name);
thrust::device_vector<unsigned int> permutation;
long long int oldCount;
bool int_check = 0;
void* d;
CUDA_SAFE_CALL(cudaMalloc((void **) &d, mCount*float_size));
total_count = total_count + mCount;
if (mCount > total_max && op_sort.empty()) {
total_max = mCount;
};
if(!total_segments && append) {
string s= file_name + "." + columnNames[0] + ".header";
ifstream binary_file(s.c_str(),ios::binary);
if(binary_file) {
binary_file.read((char *)&oldCount, 8);
binary_file.read((char *)&total_segments, 4);
binary_file.read((char *)&maxRecs, 4);
if(total_max < maxRecs)
total_max = maxRecs;
binary_file.close();
total_count = oldCount + mCount;
};
};
if(!op_sort.empty()) { //sort the segment
gpu_perm(op_sort, permutation);
};
// here we need to check for partitions and if partition_count > 0 -> create partitions
if(mCount < partition_count || partition_count == 0)
partition_count = 1;
unsigned int partition_recs = mCount/partition_count;
if(!op_sort.empty()) {
if(total_max < partition_recs)
total_max = partition_recs;
};
total_segments++;
old_segments = total_segments;
size_t new_offset;
for(unsigned int i = 0; i < columnNames.size(); i++) {
std::clock_t start1 = std::clock();
string colname = columnNames[i];
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
new_offset = 0;
if(type[colname] == 0) {
thrust::device_ptr<int_type> d_col((int_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_int[colname].begin(), d_col);
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
if (p < partition_count - 1) {
pfor_compress( (int_type*)d + new_offset, partition_recs*int_size, str, h_columns_int[colname], 0);
}
else {
pfor_compress( (int_type*)d + new_offset, (mCount - partition_recs*p)*int_size, str, h_columns_int[colname], 0);
};
new_offset = new_offset + partition_recs;
total_segments++;
};
}
else {
if(!int_check) {
thrust::copy(h_columns_int[colname].begin() + offset, h_columns_int[colname].begin() + offset + mCount, d_col);
pfor_compress( d, mCount*int_size, str, h_columns_int[colname], 0);
}
else {
pfor_compress( thrust::raw_pointer_cast(d_columns_int[colname].data()), mCount*int_size, str, h_columns_int[colname], 0);
};
};
}
else
if(type[colname] == 1) {
if(decimal[colname]) {
thrust::device_ptr<float_type> d_col((float_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_float[colname].begin(), d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+mCount,d_col_dec, float_to_long());
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
if (p < partition_count - 1)
pfor_compress( (int_type*)d + new_offset, partition_recs*float_size, str, h_columns_float[colname], 1);
else
pfor_compress( (int_type*)d + new_offset, (mCount - partition_recs*p)*float_size, str, h_columns_float[colname], 1);
new_offset = new_offset + partition_recs;
total_segments++;
};
}
else {
thrust::copy(h_columns_float[colname].begin() + offset, h_columns_float[colname].begin() + offset + mCount, d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+mCount,d_col_dec, float_to_long());
pfor_compress( d, mCount*float_size, str, h_columns_float[colname], 1);
};
}
else { // do not compress -- float
thrust::device_ptr<float_type> d_col((float_type*)d);
if(!op_sort.empty()) {
thrust::gather(permutation.begin(), permutation.end(), d_columns_float[colname].begin(), d_col);
thrust::copy(d_col, d_col+mRecCount, h_columns_float[colname].begin());
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
unsigned int curr_cnt;
if (p < partition_count - 1)
curr_cnt = partition_recs;
else
curr_cnt = mCount - partition_recs*p;
fstream binary_file(str.c_str(),ios::out|ios::binary|fstream::app);
binary_file.write((char *)&curr_cnt, 4);
binary_file.write((char *)(h_columns_float[colname].data() + new_offset),curr_cnt*float_size);
new_offset = new_offset + partition_recs;
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
}
else {
fstream binary_file(str.c_str(),ios::out|ios::binary|fstream::app);
binary_file.write((char *)&mCount, 4);
binary_file.write((char *)(h_columns_float[colname].data() + offset),mCount*float_size);
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
};
}
else { //char
//populate char_hash
if(append && char_hash[colname].size() == 0 && varencoding[colname] != 'N') {
string s= file_name + "." + colname;
ifstream binary_file(s.c_str(), ios::binary | ios::ate);
if(binary_file) {
auto sz = binary_file.tellg();
binary_file.seekg(0, binary_file.beg);
char* strings = new char[sz];
binary_file.read(strings, sz);
binary_file.close();
//unsigned int ind = std::find(columnNames.begin(), columnNames.end(), colname) - columnNames.begin();
for (unsigned int z = 0 ; z < sz/char_size[colname]; z++) {
char_hash[colname][MurmurHash64A(&strings[z*char_size[colname]], char_size[colname], hash_seed)/2] = z;
};
delete [] strings;
};
};
if(!op_sort.empty()) {
unsigned int* h_permutation = new unsigned int[mRecCount];
thrust::copy(permutation.begin(), permutation.end(), h_permutation);
char* t = new char[char_size[colname]*mRecCount];
apply_permutation_char_host(h_columns_char[colname], h_permutation, mRecCount, t, char_size[colname]);
delete [] h_permutation;
thrust::copy(t, t+ char_size[colname]*mRecCount, h_columns_char[colname]);
delete [] t;
for(unsigned int p = 0; p < partition_count; p++) {
str = file_name + "." + colname;
curr_file = str;
str += "." + to_string(total_segments-1);
if (p < partition_count - 1)
compress_char(str, colname, partition_recs, new_offset, total_segments-1);
else
compress_char(str, colname, mCount - partition_recs*p, new_offset, total_segments-1);
new_offset = new_offset + partition_recs;
total_segments++;
};
}
else {
compress_char(str, colname, mCount, offset, total_segments-1);
};
};
if((check_type == 1 && fact_file_loaded) || (check_type == 1 && check_val == 0)) {
if(!op_sort.empty())
writeHeader(file_name, colname, total_segments-1);
else {
writeHeader(file_name, colname, total_segments);
};
};
total_segments = old_segments;
};
cudaFree(d);
if(!op_sort.empty()) {
total_segments = (old_segments-1)+partition_count;
};
permutation.resize(0);
permutation.shrink_to_fit();
}
void CudaSet::writeHeader(string file_name, string colname, unsigned int tot_segs) {
string str = file_name + "." + colname;
string ff = str;
str += ".header";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
binary_file.write((char *)&total_count, 8);
binary_file.write((char *)&tot_segs, 4);
binary_file.write((char *)&total_max, 4);
binary_file.write((char *)&cnt_counts[ff], 4);
//cout << "HEADER1 " << total_count << " " << tot_segs << " " << total_max << endl;
binary_file.close();
};
void CudaSet::reWriteHeader(string file_name, string colname, unsigned int tot_segs, size_t newRecs, size_t maxRecs1) {
string str = file_name + "." + colname;
string ff = str;
str += ".header";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
binary_file.write((char *)&newRecs, 8);
binary_file.write((char *)&tot_segs, 4);
binary_file.write((char *)&maxRecs1, 4);
//cout << "HEADER2 " << newRecs << endl;
binary_file.close();
};
void CudaSet::writeSortHeader(string file_name)
{
string str(file_name);
unsigned int idx;
if(!op_sort.empty()) {
str += ".sort";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
idx = (unsigned int)op_sort.size();
binary_file.write((char *)&idx, 4);
queue<string> os(op_sort);
while(!os.empty()) {
if(verbose)
cout << "sorted on " << idx << endl;
idx = os.front().size();
binary_file.write((char *)&idx, 4);
binary_file.write(os.front().data(), idx);
os.pop();
};
binary_file.close();
}
else {
str += ".sort";
remove(str.c_str());
};
str = file_name;
if(!op_presort.empty()) {
str += ".presort";
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::trunc);
idx = (unsigned int)op_presort.size();
binary_file.write((char *)&idx, 4);
queue<string> os(op_presort);
while(!os.empty()) {
idx = os.front().size();
binary_file.write((char *)&idx, 4);
binary_file.write(os.front().data(), idx);
os.pop();
};
binary_file.close();
}
else {
str += ".presort";
remove(str.c_str());
};
}
using namespace mgpu;
void CudaSet::Display(unsigned int limit, bool binary, bool term)
{
#define MAXCOLS 128
#define MAXFIELDSIZE 1400
//-- This should/will be converted to an array holding pointers of malloced sized structures--
char bigbuf[MAXCOLS * MAXFIELDSIZE];
memset(bigbuf, 0, MAXCOLS * MAXFIELDSIZE);
char *fields[MAXCOLS];
const char *dcolumns[MAXCOLS];
size_t mCount; // num records in play
bool print_all = 0;
string ss, str;
int rows = 0;
if(limit != 0 && limit < mRecCount)
mCount = limit;
else {
mCount = mRecCount;
print_all = 1;
};
cout << "mRecCount=" << mRecCount << " mcount = " << mCount << " term " << term << " limit=" << limit << " print_all=" << print_all << endl;
unsigned int cc =0;
unordered_map<string, FILE*> file_map;
unordered_map<string, unsigned int> len_map;
for(unsigned int i = 0; i < columnNames.size(); i++)
{
fields[cc] = &(bigbuf[cc*MAXFIELDSIZE]); // a hack to avoid malloc overheads - refine later
dcolumns[cc++] = columnNames[i].c_str();
if(string_map.find(columnNames[i]) != string_map.end()) {
auto s = string_map[columnNames[i]];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
FILE *f;
f = fopen(string_map[columnNames[i]].c_str(), "rb");
file_map[string_map[columnNames[i]]] = f;
len_map[string_map[columnNames[i]]] = len;
};
};
// The goal here is to loop fast and avoid any double handling of outgoing data - pointers are good.
if(not_compressed && prm_d.size() == 0) {
for(unsigned int i=0; i < mCount; i++) { // for each record
for(unsigned int j=0; j < columnNames.size(); j++) { // for each col
if (type[columnNames[j]] != 1) {
if(string_map.find(columnNames[j]) == string_map.end()) {
if(decimal_zeroes[columnNames[j]]) {
str = std::to_string(h_columns_int[columnNames[j]][i]);
//cout << "decimals " << columnNames[j] << " " << decimal_zeroes[columnNames[j]] << " " << h_columns_int[columnNames[j]][i] << endl;
while(str.length() <= decimal_zeroes[columnNames[j]])
str = '0' + str;
str.insert(str.length()- decimal_zeroes[columnNames[j]], ".");
sprintf(fields[j], "%s", str.c_str());
}
else {
if(!ts_cols[columnNames[j]])
sprintf(fields[j], "%lld", (h_columns_int[columnNames[j]])[i] );
else {
time_t ts = (h_columns_int[columnNames[j]][i])/1000;
auto ti = gmtime(&ts);
char buffer[30];
auto rem = (h_columns_int[columnNames[j]][i])%1000;
strftime(buffer,30,"%Y-%m-%d %H.%M.%S", ti);
//fprintf(file_pr, "%s", buffer);
//fprintf(file_pr, ".%d", rem);
sprintf(fields[j], "%s.%d", buffer,rem);
/*time_t tt = h_columns_int[columnNames[j]][i];
auto ti = localtime(&tt);
char buffer[10];
strftime(buffer,80,"%Y-%m-%d", ti);
sprintf(fields[j], "%s", buffer);
*/
};
};
}
else {
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(fields[j], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fields[j][len_map[string_map[columnNames[j]]]] ='\0'; // zero terminate string
};
}
else
sprintf(fields[j], "%.2f", (h_columns_float[columnNames[j]])[i] );
};
row_cb(mColumnCount, (char **)fields, (char **)dcolumns);
rows++;
};
}
else {
queue<string> op_vx;
for(unsigned int i = 0; i < columnNames.size(); i++)
op_vx.push(columnNames[i]);
if(prm_d.size() || source) {
allocColumns(this, op_vx);
};
unsigned int curr_seg = 0;
size_t cnt = 0;
size_t curr_count, sum_printed = 0;
resize(maxRecs);
while(sum_printed < mCount || print_all) {
if(prm_d.size() || source) { // if host arrays are empty
copyColumns(this, op_vx, curr_seg, cnt);
size_t olRecs = mRecCount;
mRecCount = olRecs;
CopyToHost(0,mRecCount);
if(sum_printed + mRecCount <= mCount || print_all)
curr_count = mRecCount;
else
curr_count = mCount - sum_printed;
}
else
curr_count = mCount;
sum_printed = sum_printed + mRecCount;
for(unsigned int i=0; i < curr_count; i++) {
for(unsigned int j=0; j < columnNames.size(); j++) {
if (type[columnNames[j]] != 1) {
if(string_map.find(columnNames[j]) == string_map.end())
sprintf(fields[j], "%lld", (h_columns_int[columnNames[j]])[i] );
else {
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(fields[j], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fields[j][len_map[string_map[columnNames[j]]]] ='\0'; // zero terminate string
};
}
else
sprintf(fields[j], "%.2f", (h_columns_float[columnNames[j]])[i] );
};
row_cb(mColumnCount, (char **)fields, (char**)dcolumns);
rows++;
};
curr_seg++;
if(curr_seg == segCount)
print_all = 0;
};
}; // end else
for(auto it = file_map.begin(); it != file_map.end(); it++)
fclose(it->second);
}
void CudaSet::Store(const string file_name, const char* sep, const unsigned int limit, const bool binary, const bool append, const bool term)
{
if (mRecCount == 0 && binary == 1 && !term) { // write tails
for(unsigned int j=0; j < columnNames.size(); j++) {
writeHeader(file_name, columnNames[j], total_segments);
};
return;
};
size_t mCount;
bool print_all = 0;
string str;
if(limit != 0 && limit < mRecCount)
mCount = limit;
else {
mCount = mRecCount;
print_all = 1;
};
if(binary == 0) {
unordered_map<string, FILE*> file_map;
unordered_map<string, unsigned int> len_map;
string bf;
unsigned int max_len = 0;
for(unsigned int j=0; j < columnNames.size(); j++) {
if(string_map.find(columnNames[j]) != string_map.end()) {
auto s = string_map[columnNames[j]];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
if(len > max_len)
max_len = len;
FILE *f;
f = fopen(string_map[columnNames[j]].c_str(), "rb");
file_map[string_map[columnNames[j]]] = f;
len_map[string_map[columnNames[j]]] = len;
};
};
bf.reserve(max_len);
FILE *file_pr;
if(!term) {
file_pr = fopen(file_name.c_str(), "w");
if (!file_pr)
cout << "Could not open file " << file_name << endl;
}
else
file_pr = stdout;
if(not_compressed && prm_d.size() == 0) {
for(unsigned int i=0; i < mCount; i++) {
for(unsigned int j=0; j < columnNames.size(); j++) {
if (type[columnNames[j]] != 1 ) {
if(string_map.find(columnNames[j]) == string_map.end()) {
if(decimal_zeroes[columnNames[j]]) {
str = std::to_string(h_columns_int[columnNames[j]][i]);
//cout << "decimals " << columnNames[j] << " " << decimal_zeroes[columnNames[j]] << " " << h_columns_int[columnNames[j]][i] << endl;
while(str.length() <= decimal_zeroes[columnNames[j]])
str = '0' + str;
str.insert(str.length()- decimal_zeroes[columnNames[j]], ".");
fprintf(file_pr, "%s", str.c_str());
}
else {
if(!ts_cols[columnNames[j]]) {
fprintf(file_pr, "%lld", (h_columns_int[columnNames[j]])[i]);
}
else {
time_t ts = (h_columns_int[columnNames[j]][i])/1000;
auto ti = gmtime(&ts);
char buffer[30];
auto rem = (h_columns_int[columnNames[j]][i])%1000;
strftime(buffer,30,"%Y-%m-%d %H.%M.%S", ti);
fprintf(file_pr, "%s", buffer);
fprintf(file_pr, ".%d", rem);
};
};
}
else {
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(&bf[0], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fprintf(file_pr, "%.*s", len_map[string_map[columnNames[j]]], bf.c_str());
};
fputs(sep, file_pr);
}
else {
fprintf(file_pr, "%.2f", (h_columns_float[columnNames[j]])[i]);
fputs(sep, file_pr);
}
};
if (i != mCount -1 )
fputs("\n",file_pr);
};
if(!term)
fclose(file_pr);
}
else {
queue<string> op_vx;
string ss;
for(unsigned int j=0; j < columnNames.size(); j++)
op_vx.push(columnNames[j]);
if(prm_d.size() || source) {
allocColumns(this, op_vx);
};
unsigned int curr_seg = 0;
size_t cnt = 0;
size_t curr_count, sum_printed = 0;
mRecCount = 0;
resize(maxRecs);
while(sum_printed < mCount || print_all) {
if(prm_d.size() || source) {
copyColumns(this, op_vx, curr_seg, cnt);
if(curr_seg == 0) {
if(limit != 0 && limit < mRecCount) {
mCount = limit;
print_all = 0;
}
else {
mCount = mRecCount;
print_all = 1;
};
};
// if host arrays are empty
size_t olRecs = mRecCount;
mRecCount = olRecs;
CopyToHost(0,mRecCount);
//cout << "start " << sum_printed << " " << mRecCount << " " << mCount << endl;
if(sum_printed + mRecCount <= mCount || print_all) {
curr_count = mRecCount;
}
else {
curr_count = mCount - sum_printed;
};
}
else {
curr_count = mCount;
};
sum_printed = sum_printed + mRecCount;
//cout << "sum printed " << sum_printed << " " << curr_count << " " << curr_seg << endl;
for(unsigned int i=0; i < curr_count; i++) {
for(unsigned int j=0; j < columnNames.size(); j++) {
if (type[columnNames[j]] != 1) {
if(string_map.find(columnNames[j]) == string_map.end()) {
if(decimal_zeroes[columnNames[j]]) {
str = std::to_string(h_columns_int[columnNames[j]][i]);
//cout << "decimals " << columnNames[j] << " " << decimal_zeroes[columnNames[j]] << " " << h_columns_int[columnNames[j]][i] << endl;
while(str.length() <= decimal_zeroes[columnNames[j]])
str = '0' + str;
str.insert(str.length()- decimal_zeroes[columnNames[j]], ".");
fprintf(file_pr, "%s", str.c_str());
}
else {
if(!ts_cols[columnNames[j]]) {
fprintf(file_pr, "%lld", (h_columns_int[columnNames[j]])[i]);
}
else {
time_t ts = (h_columns_int[columnNames[j]][i])/1000;
auto ti = gmtime(&ts);
char buffer[30];
auto rem = (h_columns_int[columnNames[j]][i])%1000;
strftime(buffer,30,"%Y-%m-%d %H.%M.%S", ti);
fprintf(file_pr, "%s", buffer);
fprintf(file_pr, ".%d", rem);
};
};
}
else {
fseek(file_map[string_map[columnNames[j]]], h_columns_int[columnNames[j]][i] * len_map[string_map[columnNames[j]]], SEEK_SET);
fread(&bf[0], 1, len_map[string_map[columnNames[j]]], file_map[string_map[columnNames[j]]]);
fprintf(file_pr, "%.*s", len_map[string_map[columnNames[j]]], bf.c_str());
};
fputs(sep, file_pr);
}
else {
fprintf(file_pr, "%.2f", (h_columns_float[columnNames[j]])[i]);
fputs(sep, file_pr);
};
};
if (i != mCount -1 && (curr_seg != segCount || i < curr_count))
fputs("\n",file_pr);
};
curr_seg++;
if(curr_seg == segCount)
print_all = 0;
};
if(!term) {
fclose(file_pr);
};
};
for(auto it = file_map.begin(); it != file_map.end(); it++)
fclose(it->second);
}
else {
//lets update the data dictionary
for(unsigned int j=0; j < columnNames.size(); j++) {
data_dict[file_name][columnNames[j]].col_type = type[columnNames[j]];
if(type[columnNames[j]] != 2) {
if(decimal[columnNames[j]])
data_dict[file_name][columnNames[j]].col_length = decimal_zeroes[columnNames[j]];
else
if (ts_cols[columnNames[j]])
data_dict[file_name][columnNames[j]].col_length = UINT_MAX;
else
data_dict[file_name][columnNames[j]].col_length = 0;
}
else
data_dict[file_name][columnNames[j]].col_length = char_size[columnNames[j]];
};
save_dict = 1;
if(text_source) { //writing a binary file using a text file as a source
compress(file_name, 0, 1, 0, mCount, append);
for(unsigned int i = 0; i< columnNames.size(); i++)
if(type[columnNames[i]] == 2)
deAllocColumnOnDevice(columnNames[i]);
}
else { //writing a binary file using a binary file as a source
fact_file_loaded = 1;
size_t offset = 0;
if(!not_compressed) { // records are compressed, for example after filter op.
//decompress to host
queue<string> op_vx;
for(unsigned int i = 0; i< columnNames.size(); i++) {
op_vx.push(columnNames[i]);
};
allocColumns(this, op_vx);
size_t oldCnt = mRecCount;
mRecCount = 0;
resize(oldCnt);
mRecCount = oldCnt;
for(unsigned int i = 0; i < segCount; i++) {
size_t cnt = 0;
copyColumns(this, op_vx, i, cnt);
CopyToHost(0, mRecCount);
offset = offset + mRecCount;
compress(file_name, 0, 0, i - (segCount-1), mRecCount, append);
};
}
else {
// now we have decompressed records on the host
//call setSegments and compress columns in every segment
segCount = (mRecCount/process_count + 1);
offset = 0;
for(unsigned int z = 0; z < segCount; z++) {
if(z < segCount-1) {
if(mRecCount < process_count) {
mCount = mRecCount;
}
else {
mCount = process_count;
}
}
else {
mCount = mRecCount - (segCount-1)*process_count;
};
compress(file_name, offset, 0, z - (segCount-1), mCount, append);
offset = offset + mCount;
};
};
};
};
}
void CudaSet::compress_char(const string file_name, const string colname, const size_t mCount, const size_t offset, const unsigned int segment)
{
unsigned int len = char_size[colname];
string h_name, i_name, file_no_seg = file_name.substr(0, file_name.find_last_of("."));
i_name = file_no_seg + "." + to_string(segment) + ".idx";
h_name = file_no_seg + "." + to_string(segment) + ".hash";
fstream b_file_str, loc_hashes;
fstream binary_file_h(h_name.c_str(),ios::out|ios::binary|ios::trunc);
binary_file_h.write((char *)&mCount, 4);
if(segment == 0) {
b_file_str.open(file_no_seg.c_str(),ios::out|ios::binary|ios::trunc);
}
else {
b_file_str.open(file_no_seg.c_str(),ios::out|ios::binary|ios::app);
};
if(h_columns_int.find(colname) == h_columns_int.end()) {
h_columns_int[colname] = thrust::host_vector<int_type >(mCount);
}
else {
if(h_columns_int[colname].size() < mCount)
h_columns_int[colname].resize(mCount);
};
if(d_columns_int.find(colname) == d_columns_int.end()) {
d_columns_int[colname] = thrust::device_vector<int_type >(mCount);
}
else {
if(d_columns_int[colname].size() < mCount)
d_columns_int[colname].resize(mCount);
};
size_t cnt;
long long int* hash_array = new long long int[mCount];
unordered_map<unsigned long long int, size_t>::iterator iter;
vector<int_type> test(mCount);
if(char_hash[colname].size() == 0 && varencoding[colname] == 'N')
char_hash[colname][0] = 0;
if(varencoding[colname] != 'N') {
for (unsigned int i = 0 ; i < mCount; i++) {
hash_array[i] = MurmurHash64A(h_columns_char[colname] + (i+offset)*len, len, hash_seed)/2;
iter = char_hash[colname].find(hash_array[i]);
if(iter == char_hash[colname].end()) {
cnt = char_hash[colname].size();
char_hash[colname][hash_array[i]] = cnt;
b_file_str.write((char *)h_columns_char[colname] + (i+offset)*len, len);
test[i] = cnt;
}
else {
test[i] = iter->second;
};
};
}
else {
auto cnt = char_hash[colname][0];
for (unsigned int i = 0 ; i < mCount; i++) {
hash_array[i] = MurmurHash64A(h_columns_char[colname] + (i+offset)*len, len, hash_seed)/2;
//cnt = char_hash[colname][0];
//char_hash[colname][0]++;
cnt++;
b_file_str.write((char *)h_columns_char[colname] + (i+offset)*len, len);
test[i] = cnt;
};
char_hash[colname][0] = cnt;
};
memcpy(h_columns_int[colname].data(), test.data(), mCount*8);
binary_file_h.write((char *)hash_array, 8*mCount);
delete [] hash_array;
thrust::device_vector<int_type> d_col(mCount);
thrust::copy(h_columns_int[colname].begin(), h_columns_int[colname].begin() + mCount, d_col.begin());
pfor_compress(thrust::raw_pointer_cast(d_col.data()), mCount*int_size, i_name, h_columns_int[colname], 0);
binary_file_h.close();
b_file_str.close();
};
bool first_time = 1;
size_t rec_sz = 0;
size_t process_piece;
bool CudaSet::LoadBigFile(FILE* file_p, thrust::device_vector<char>& d_readbuff, thrust::device_vector<char*>& dest,
thrust::device_vector<unsigned int>& ind, thrust::device_vector<unsigned int>& dest_len)
{
const char* sep = separator.c_str();
unsigned int maxx = cols.rbegin()->first;
map<unsigned int, string>::iterator it;
bool done = 0;
std::clock_t start1 = std::clock();
vector<int> types;
vector<int> cl;
types.push_back(0);
for(int i = 0; i < maxx; i++) {
auto iter = cols.find(i+1);
if(iter != cols.end()) {
types.push_back(type[iter->second]);
cl.push_back(iter->first-1);
}
else
types.push_back(0);
};
if(first_time) {
if(process_count*4 > getFreeMem()) {
process_piece = getFreeMem()/4;
}
else
process_piece = process_count;
readbuff = new char[process_piece+1];
d_readbuff.resize(process_piece+1);
cout << "set a piece to " << process_piece << " " << getFreeMem() << endl;
};
thrust::device_vector<unsigned int> ind_cnt(1);
thrust::device_vector<char> sepp(1);
sepp[0] = *sep;
long long int total_processed = 0;
size_t recs_processed = 0;
bool finished = 0;
thrust::device_vector<long long int> dev_pos;
long long int offset;
unsigned int cnt = 1;
const unsigned int max_len = 23;
while(!done) {
auto rb = fread(readbuff, 1, process_piece, file_p);
if(rb < process_piece) {
done = 1;
finished = 1;
fclose(file_p);
};
if(total_processed >= process_count)
done = 1;
thrust::fill(d_readbuff.begin(), d_readbuff.end(),0);
thrust::copy(readbuff, readbuff+rb, d_readbuff.begin());
auto curr_cnt = thrust::count(d_readbuff.begin(), d_readbuff.begin() + rb, '\n') - 1;
if(recs_processed == 0 && first_time) {
rec_sz = curr_cnt;
if(finished)
rec_sz++;
total_max = curr_cnt;
};
if(first_time) {
for(unsigned int i=0; i < columnNames.size(); i++) {
auto colname = columnNames[i];
if (type[colname] == 0) {
d_columns_int[colname].resize(d_columns_int[colname].size() + rec_sz);
h_columns_int[colname].resize(h_columns_int[colname].size() + rec_sz);
}
else
if (type[colname] == 1) {
d_columns_float[colname].resize(d_columns_float[colname].size() + rec_sz);
h_columns_float[colname].resize(h_columns_float[colname].size() + rec_sz);
}
else {
char* c = new char[cnt*rec_sz*char_size[columnNames[i]]];
if(recs_processed > 0) {
memcpy(c, h_columns_char[columnNames[i]], recs_processed*char_size[columnNames[i]]);
delete [] h_columns_char[columnNames[i]];
};
h_columns_char[columnNames[i]] = c;
if(recs_processed == 0) {
void* temp;
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, char_size[columnNames[i]]*rec_sz));
cudaMemset(temp,0,char_size[columnNames[i]]*rec_sz);
d_columns_char[columnNames[i]] = (char*)temp;
};
};
if(recs_processed == 0) {
ind[i] = cl[i];
void* temp;
if(type[columnNames[i]] != 2) {
if(!ts_cols[columnNames[i]]) {
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, max_len*rec_sz));
dest_len[i] = max_len;
}
else {
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, 23*rec_sz));
dest_len[i] = 23;
}
}
else {
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, char_size[columnNames[i]]*rec_sz));
dest_len[i] = char_size[columnNames[i]];
};
dest[i] = (char*)temp;
};
};
};
for(unsigned int i=0; i < columnNames.size(); i++) {
if(type[columnNames[i]] != 2) {
cudaMemset(dest[i],0,max_len*rec_sz);
}
else {
cudaMemset(dest[i],0,char_size[columnNames[i]]*rec_sz);
};
};
if(dev_pos.size() < curr_cnt+1)
dev_pos.resize(curr_cnt+1); //avoiding the unnecessary allocs
dev_pos[0] = -1;
thrust::copy_if(thrust::make_counting_iterator((unsigned long long int)0), thrust::make_counting_iterator((unsigned long long int)rb-1),
d_readbuff.begin(), dev_pos.begin()+1, _1 == '\n');
if(!finished) {
if(curr_cnt < rec_sz) {
offset = (dev_pos[curr_cnt] - rb)+1;
//cout << "PATH 1 " << dev_pos[curr_cnt] << " " << offset << endl;
fseek(file_p, offset, SEEK_CUR);
total_processed = total_processed + rb + offset;
mRecCount = curr_cnt;
}
else {
offset = (dev_pos[rec_sz] - rb)+1;
//cout << "PATH 2 " << dev_pos[rec_sz] << " " << offset << endl;
fseek(file_p, offset, SEEK_CUR);
total_processed = total_processed + rb + offset;
mRecCount = rec_sz;
};
}
else {
mRecCount = curr_cnt + 1;
};
thrust::counting_iterator<unsigned int> begin(0);
ind_cnt[0] = mColumnCount;
parse_functor ff((const char*)thrust::raw_pointer_cast(d_readbuff.data()),(char**)thrust::raw_pointer_cast(dest.data()), thrust::raw_pointer_cast(ind.data()),
thrust::raw_pointer_cast(ind_cnt.data()), thrust::raw_pointer_cast(sepp.data()), thrust::raw_pointer_cast(dev_pos.data()), thrust::raw_pointer_cast(dest_len.data()));
thrust::for_each(begin, begin + mRecCount, ff);
ind_cnt[0] = max_len;
for(int i =0; i < mColumnCount; i++) {
if(type[columnNames[i]] == 0) { //int
thrust::device_ptr<char> p1((char*)dest[i]);
if(p1[4] == '-') { //date
if(!ts_cols[columnNames[i]]) {
gpu_date date_ff((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed);
thrust::for_each(begin, begin + mRecCount, date_ff);
}
else {
gpu_tdate date_ff((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed);
thrust::for_each(begin, begin + mRecCount, date_ff);
}
}
else { //int
if(decimal[columnNames[i]]) {
thrust::device_vector<unsigned int> scale(1);
scale[0] = decimal_zeroes[columnNames[i]];
gpu_atold atold((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed,
thrust::raw_pointer_cast(ind_cnt.data()), thrust::raw_pointer_cast(scale.data()));
thrust::for_each(begin, begin + mRecCount, atold);
}
else {
gpu_atoll atoll_ff((const char*)dest[i],(long long int*)thrust::raw_pointer_cast(d_columns_int[columnNames[i]].data()) + recs_processed,
thrust::raw_pointer_cast(ind_cnt.data()));
thrust::for_each(begin, begin + mRecCount, atoll_ff);
};
};
thrust::copy(d_columns_int[columnNames[i]].begin() + recs_processed, d_columns_int[columnNames[i]].begin()+recs_processed+mRecCount, h_columns_int[columnNames[i]].begin() + recs_processed);
}
else
if(type[columnNames[i]] == 1) {
gpu_atof atof_ff((const char*)dest[i],(double*)thrust::raw_pointer_cast(d_columns_float[columnNames[i]].data()) + recs_processed,
thrust::raw_pointer_cast(ind_cnt.data()));
thrust::for_each(begin, begin + mRecCount, atof_ff);
thrust::copy(d_columns_float[columnNames[i]].begin() + recs_processed, d_columns_float[columnNames[i]].begin()+recs_processed+mRecCount, h_columns_float[columnNames[i]].begin() + recs_processed);
}
else {//char is already done
thrust::device_ptr<char> p1((char*)dest[i]);
cudaMemcpy( h_columns_char[columnNames[i]] + char_size[columnNames[i]]*recs_processed, (void *)dest[i] , char_size[columnNames[i]]*mRecCount, cudaMemcpyDeviceToHost);
};
};
recs_processed = recs_processed + mRecCount;
cnt++;
};
if(finished) {
for(int i =0; i < mColumnCount; i++) {
if(dest[i]) {
cudaFree(dest[i]);
dest[i] = nullptr;
};
};
delete [] readbuff;
};
cout << "processed recs " << recs_processed << " " << getFreeMem() << endl;
first_time = 0;
mRecCount = recs_processed;
return finished;
};
void CudaSet::free() {
for(unsigned int i = 0; i < columnNames.size(); i++ ) {
if(type[columnNames[i]] == 0 && h_columns_int[columnNames[i]].size() ) {
h_columns_int[columnNames[i]].resize(0);
h_columns_int[columnNames[i]].shrink_to_fit();
}
else {
h_columns_float[columnNames[i]].resize(0);
h_columns_float[columnNames[i]].shrink_to_fit();
};
};
if(prm_d.size()) {
prm_d.resize(0);
prm_d.shrink_to_fit();
};
deAllocOnDevice();
};
void alloc_pool(unsigned int maxRecs) {
void* temp;
CUDA_SAFE_CALL(cudaMalloc((void **) &temp, 8*maxRecs));
alloced_mem.push_back(temp);
};
bool* CudaSet::logical_and(bool* column1, bool* column2)
{
thrust::device_ptr<bool> dev_ptr1(column1);
thrust::device_ptr<bool> dev_ptr2(column2);
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, dev_ptr1, thrust::logical_and<bool>());
thrust::device_free(dev_ptr2);
return column1;
}
bool* CudaSet::logical_or(bool* column1, bool* column2)
{
thrust::device_ptr<bool> dev_ptr1(column1);
thrust::device_ptr<bool> dev_ptr2(column2);
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, dev_ptr1, thrust::logical_or<bool>());
thrust::device_free(dev_ptr2);
return column1;
}
bool* CudaSet::compare(int_type s, int_type d, int_type op_type)
{
bool res;
if (op_type == 2) // >
if(d>s)
res = 1;
else
res = 0;
else
if (op_type == 1) // <
if(d<s)
res = 1;
else
res = 0;
else
if (op_type == 6) // >=
if(d>=s)
res = 1;
else
res = 0;
else
if (op_type == 5) // <=
if(d<=s)
res = 1;
else
res = 0;
else
if (op_type == 4)// =
if(d==s)
res = 1;
else
res = 0;
else // !=
if(d!=s)
res = 1;
else
res = 0;
thrust::device_ptr<bool> p = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(p, p+mRecCount,res,(bool)0);
return thrust::raw_pointer_cast(p);
};
bool* CudaSet::compare(float_type s, float_type d, int_type op_type)
{
bool res;
if (op_type == 2) // >
if ((d-s) > EPSILON)
res = 1;
else
res = 0;
else
if (op_type == 1) // <
if ((s-d) > EPSILON)
res = 1;
else
res = 0;
else
if (op_type == 6) // >=
if (((d-s) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON)))
res = 1;
else
res = 0;
else
if (op_type == 5) // <=
if (((s-d) > EPSILON) || (((d-s) < EPSILON) && ((d-s) > -EPSILON)))
res = 1;
else
res = 0;
else
if (op_type == 4)// =
if (((d-s) < EPSILON) && ((d-s) > -EPSILON))
res = 1;
else
res = 0;
else // !=
if (!(((d-s) < EPSILON) && ((d-s) > -EPSILON)))
res = 1;
else
res = 0;
thrust::device_ptr<bool> p = thrust::device_malloc<bool>(mRecCount);
thrust::sequence(p, p+mRecCount,res,(bool)0);
return thrust::raw_pointer_cast(p);
}
bool* CudaSet::compare(float_type* column1, float_type d, int_type op_type)
{
thrust::device_ptr<bool> res = thrust::device_malloc<bool>(mRecCount);
thrust::device_ptr<float_type> dev_ptr(column1);
if (op_type == 2) // >
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_greater());
else
if (op_type == 1) // <
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_less());
else
if (op_type == 6) // >=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_greater_equal_to());
else
if (op_type == 5) // <=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_less_equal());
else
if (op_type == 4)// =
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_equal_to());
else // !=
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), res, f_not_equal_to());
return thrust::raw_pointer_cast(res);
}
bool* CudaSet::compare(int_type* column1, int_type d, int_type op_type, unsigned int p1, unsigned int p2)
{
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
thrust::device_ptr<int_type> dev_ptr(column1);
if(p2)
d = d*(int_type)pow(10, p2);
if (op_type == 2) // >
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::greater<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::greater<int_type>());
else
if (op_type == 1) // <
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::less<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::less<int_type>());
else
if (op_type == 6) // >=
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::greater_equal<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::greater_equal<int_type>());
else
if (op_type == 5) // <=
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::less_equal<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::less_equal<int_type>());
else
if (op_type == 4)// =
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::equal_to<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::equal_to<int_type>());
else // !=
if(!p1)
thrust::transform(dev_ptr, dev_ptr+mRecCount, thrust::make_constant_iterator(d), temp, thrust::not_equal_to<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::not_equal_to<int_type>());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(int_type* column1, int_type* column2, int_type op_type, unsigned int p1, unsigned int p2)
{
thrust::device_ptr<int_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr2(column2);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
if (op_type == 2) // >
if(!p1 && !p2) {
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::greater<int_type>());
}
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::greater<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater<int_type>());
else
if (op_type == 1) // <
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::less<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::less<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less<int_type>());
else
if (op_type == 6) // >=
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::greater_equal<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater_equal<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::greater_equal<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::greater_equal<int_type>());
else
if (op_type == 5) // <=
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::less_equal<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less_equal<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::less_equal<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::less_equal<int_type>());
else
if (op_type == 4)// =
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::equal_to<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::equal_to<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::equal_to<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::equal_to<int_type>());
else // !=
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::not_equal_to<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::not_equal_to<int_type>());
else
if(p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::not_equal_to<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), temp, thrust::not_equal_to<int_type>());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, float_type* column2, int_type op_type)
{
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<float_type> dev_ptr2(column2);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater());
else
if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less());
else
if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater_equal_to());
else
if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less_equal());
else
if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_equal_to());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_not_equal_to());
return thrust::raw_pointer_cast(temp);
}
bool* CudaSet::compare(float_type* column1, int_type* column2, int_type op_type)
{
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr(column2);
thrust::device_ptr<float_type> dev_ptr2 = thrust::device_malloc<float_type>(mRecCount);
thrust::device_ptr<bool> temp = thrust::device_malloc<bool>(mRecCount);
thrust::transform(dev_ptr, dev_ptr + mRecCount, dev_ptr2, long_to_float_type());
if (op_type == 2) // >
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater());
else
if (op_type == 1) // <
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less());
else
if (op_type == 6) // >=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_greater_equal_to());
else
if (op_type == 5) // <=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_less_equal());
else
if (op_type == 4)// =
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_equal_to());
else // !=
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, f_not_equal_to());
thrust::device_free(dev_ptr2);
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(int_type* column1, float_type* column2, string op_type, bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::device_ptr<int_type> dev_ptr(column1);
thrust::transform(dev_ptr, dev_ptr + mRecCount, temp, long_to_float_type()); // in-place transformation
thrust::device_ptr<float_type> dev_ptr1(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
int_type* CudaSet::op(int_type* column1, int_type d, string op_type, bool reverse, unsigned int p1, unsigned int p2)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
//cout << "OP " << d << " " << op_type << " " << p1 << " " << p2 << endl;
thrust::device_ptr<int_type> temp((int_type*)alloced_mem.back());
thrust::device_ptr<int_type> dev_ptr1(column1);
unsigned int d1 = d;
if(p2)
d = d*(unsigned int)pow(10, p2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d1), temp, thrust::multiplies<int_type>());
}
else
if (op_type.compare("ADD") == 0) {
if(!p1)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d*(unsigned int)pow(10, p2)), temp, thrust::plus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::plus<int_type>());
}
else
if (op_type.compare("MINUS") == 0) {
if(!p1)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d*(unsigned int)pow(10, p2)), temp, thrust::minus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::minus<int_type>());
}
else {
if(!p1)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d*(unsigned int)pow(10, p2)), temp, thrust::divides<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_constant_iterator(d), temp, thrust::divides<int_type>());
}
}
else {
if (op_type.compare("MUL") == 0) {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::multiplies<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::multiplies<int_type>());
}
else
if (op_type.compare("ADD") == 0) {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::plus<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::plus<int_type>());
}
else
if (op_type.compare("MINUS") == 0) {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::minus<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::minus<int_type>());
}
else {
if(!p1)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, dev_ptr1, temp, thrust::divides<int_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d) + mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::divides<int_type>());
};
};
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
int_type* CudaSet::op(int_type* column1, int_type* column2, string op_type, bool reverse, unsigned int p1, unsigned int p2)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<int_type> temp((int_type*)alloced_mem.back());
thrust::device_ptr<int_type> dev_ptr1(column1);
thrust::device_ptr<int_type> dev_ptr2(column2);
//cout << "OP " << op_type << " " << p1 << " " << p2 << " " << reverse << endl;
if(reverse == 0) {
if (op_type.compare("MUL") == 0) {
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::multiplies<int_type>());
}
else
if (op_type.compare("ADD") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::plus<int_type>());
else
if(p1 && p2) {
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::plus<int_type>());
}
else
if (p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::plus<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::plus<int_type>());
}
else
if (op_type.compare("MINUS") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::minus<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::minus<int_type>());
else
if (p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::minus<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::minus<int_type>());
}
else {
if(!p1 && !p2)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::divides<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::divides<int_type>());
else
if (p1)
thrust::transform(thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), thrust::make_transform_iterator(dev_ptr1+mRecCount, power_functor<int_type>(p1)), dev_ptr2, temp, thrust::divides<int_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), temp, thrust::divides<int_type>());
}
}
else {
if (op_type.compare("MUL") == 0) {
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::multiplies<int_type>());
}
else
if (op_type.compare("ADD") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::plus<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::plus<int_type>());
else
if (p1)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::plus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), dev_ptr1, temp, thrust::plus<int_type>());
}
else
if (op_type.compare("MINUS") == 0) {
if(!p1 && !p2)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::minus<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::minus<int_type>());
else
if (p1)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::minus<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), dev_ptr1, temp, thrust::minus<int_type>());
}
else {
if(!p1 && !p2)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::divides<int_type>());
else
if(p1 && p2)
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::divides<int_type>());
else
if (p1)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, thrust::make_transform_iterator(dev_ptr1, power_functor<int_type>(p1)), temp, thrust::divides<int_type>());
else
thrust::transform(thrust::make_transform_iterator(dev_ptr2, power_functor<int_type>(p2)), thrust::make_transform_iterator(dev_ptr2+mRecCount, power_functor<int_type>(p2)), dev_ptr1, temp, thrust::divides<int_type>());
}
}
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(float_type* column1, float_type* column2, string op_type, bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::device_ptr<float_type> dev_ptr1(column1);
thrust::device_ptr<float_type> dev_ptr2(column2);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, dev_ptr2, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr2, dev_ptr2+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(int_type* column1, float_type d, string op_type, bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::fill(temp, temp+mRecCount, d);
thrust::device_ptr<int_type> dev_ptr(column1);
thrust::device_ptr<float_type> dev_ptr1 = thrust::device_malloc<float_type>(mRecCount);
thrust::transform(dev_ptr, dev_ptr + mRecCount, dev_ptr1, long_to_float_type());
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, temp, temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(temp, temp+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
thrust::device_free(dev_ptr1);
alloced_mem.pop_back();
return thrust::raw_pointer_cast(temp);
}
float_type* CudaSet::op(float_type* column1, float_type d, string op_type,bool reverse)
{
if(alloced_mem.empty()) {
alloc_pool(maxRecs);
};
thrust::device_ptr<float_type> temp((float_type*)alloced_mem.back());
thrust::device_ptr<float_type> dev_ptr1(column1);
if(reverse == 0) {
if (op_type.compare("MUL") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::minus<float_type>());
else
thrust::transform(dev_ptr1, dev_ptr1+mRecCount, thrust::make_constant_iterator(d), temp, thrust::divides<float_type>());
}
else {
if (op_type.compare("MUL") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::multiplies<float_type>());
else
if (op_type.compare("ADD") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::plus<float_type>());
else
if (op_type.compare("MINUS") == 0)
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::minus<float_type>());
else
thrust::transform(thrust::make_constant_iterator(d), thrust::make_constant_iterator(d)+mRecCount, dev_ptr1, temp, thrust::divides<float_type>());
};
alloced_mem.pop_back();
return (float_type*)thrust::raw_pointer_cast(temp);
}
char CudaSet::loadIndex(const string index_name, const unsigned int segment)
{
FILE* f;
unsigned int bits_encoded, fit_count, vals_count, sz, real_count;
void* d_str;
string f1 = index_name + "." + to_string(segment);
char res;
//interactive = 0;
if(interactive) {
if(index_buffers.find(f1) == index_buffers.end()) {
f = fopen (f1.c_str(), "rb" );
fseek(f, 0, SEEK_END);
long fileSize = ftell(f);
char* buff;
cudaHostAlloc(&buff, fileSize, cudaHostAllocDefault);
fseek(f, 0, SEEK_SET);
fread(buff, fileSize, 1, f);
fclose(f);
index_buffers[f1] = buff;
};
sz = ((unsigned int*)index_buffers[f1])[0];
idx_dictionary_int[index_name].clear();
for(unsigned int i = 0; i < sz; i++) {
idx_dictionary_int[index_name][((int_type*)(index_buffers[f1]+4+8*i))[0]] = i;
};
vals_count = ((unsigned int*)(index_buffers[f1]+4 +8*sz))[2];
real_count = ((unsigned int*)(index_buffers[f1]+4 +8*sz))[3];
mRecCount = real_count;
if(idx_vals.count(index_name) == 0) {
cudaMalloc((void **) &d_str, (vals_count+2)*int_size);
cudaMemcpy( d_str, (void *) &((index_buffers[f1]+4 +8*sz)[0]), (vals_count+2)*int_size, cudaMemcpyHostToDevice);
idx_vals[index_name] = (unsigned long long int*)d_str;
};
}
else {
f = fopen (f1.c_str(), "rb" );
fread(&sz, 4, 1, f);
int_type* d_array = new int_type[sz];
idx_dictionary_int[index_name].clear();
fread((void*)d_array, sz*int_size, 1, f);
for(unsigned int i = 0; i < sz; i++) {
idx_dictionary_int[index_name][d_array[i]] = i;
};
delete [] d_array;
fread(&fit_count, 4, 1, f);
fread(&bits_encoded, 4, 1, f);
fread(&vals_count, 4, 1, f);
fread(&real_count, 4, 1, f);
mRecCount = real_count;
unsigned long long int* int_array = new unsigned long long int[vals_count+2];
fseek ( f , -16 , SEEK_CUR );
fread((void*)int_array, 1, vals_count*8 + 16, f);
fread(&res, 1, 1, f);
fclose(f);
void* d_str;
cudaMalloc((void **) &d_str, (vals_count+2)*int_size);
cudaMemcpy( d_str, (void *) int_array, (vals_count+2)*int_size, cudaMemcpyHostToDevice);
if(idx_vals.count(index_name))
cudaFree(idx_vals[index_name]);
idx_vals[index_name] = (unsigned long long int*)d_str;
}
return res;
}
void CudaSet::initialize(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs, string file_name) // compressed data for DIM tables
{
mColumnCount = (unsigned int)nameRef.size();
FILE* f;
string f1;
unsigned int cnt;
char buffer[4000];
string str;
not_compressed = 0;
mRecCount = Recs;
hostRecCount = Recs;
totalRecs = Recs;
load_file_name = file_name;
f1 = file_name + ".sort";
f = fopen (f1.c_str() , "rb" );
if(f) {
unsigned int sz, idx;
fread((char *)&sz, 4, 1, f);
for(unsigned int j = 0; j < sz; j++) {
fread((char *)&idx, 4, 1, f);
fread(buffer, idx, 1, f);
str.assign(buffer, idx);
sorted_fields.push(str);
if(verbose)
cout << "segment sorted on " << str << endl;
};
fclose(f);
};
f1 = file_name + ".presort";
f = fopen (f1.c_str() , "rb" );
if(f) {
unsigned int sz, idx;
fread((char *)&sz, 4, 1, f);
for(unsigned int j = 0; j < sz; j++) {
fread((char *)&idx, 4, 1, f);
fread(buffer, idx, 1, f);
str.assign(buffer, idx);
presorted_fields.push(str);
if(verbose)
cout << "presorted on " << str << endl;
};
fclose(f);
};
tmp_table = 0;
filtered = 0;
for(unsigned int i=0; i < mColumnCount; i++) {
//f1 = file_name + "." + nameRef.front() + ".0";
//f = fopen (f1.c_str() , "rb" );
//fread((char *)&bytes, 4, 1, f); //need to read metadata such as type and length
//fclose(f);
columnNames.push_back(nameRef.front());
cols[colsRef.front()] = nameRef.front();
if (((typeRef.front()).compare("decimal") == 0) || ((typeRef.front()).compare("int") == 0)) {
f1 = file_name + "." + nameRef.front() + ".0";
f = fopen (f1.c_str() , "rb" );
if(!f) {
cout << "Couldn't find field " << nameRef.front() << endl;
exit(0);
};
for(unsigned int j = 0; j < 6; j++)
fread((char *)&cnt, 4, 1, f);
fclose(f);
compTypes[nameRef.front()] = cnt;
};
if((typeRef.front()).compare("timestamp") == 0)
ts_cols[nameRef.front()] = 1;
else
ts_cols[nameRef.front()] = 0;
if ((typeRef.front()).compare("int") == 0 || (typeRef.front()).compare("timestamp") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 0;
decimal_zeroes[nameRef.front()] = 0;
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else
if ((typeRef.front()).compare("float") == 0) {
type[nameRef.front()] = 1;
decimal[nameRef.front()] = 0;
h_columns_float[nameRef.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
d_columns_float[nameRef.front()] = thrust::device_vector<float_type >();
}
else
if ((typeRef.front()).compare("decimal") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 1;
decimal_zeroes[nameRef.front()] = sizeRef.front();
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else {
type[nameRef.front()] = 2;
decimal[nameRef.front()] = 0;
h_columns_char[nameRef.front()] = nullptr;
d_columns_char[nameRef.front()] = nullptr;
char_size[nameRef.front()] = sizeRef.front();
string_map[nameRef.front()] = file_name + "." + nameRef.front();
};
nameRef.pop();
typeRef.pop();
sizeRef.pop();
colsRef.pop();
};
};
void CudaSet::initialize(queue<string> &nameRef, queue<string> &typeRef, queue<int> &sizeRef, queue<int> &colsRef, size_t Recs)
{
mColumnCount = (unsigned int)nameRef.size();
tmp_table = 0;
filtered = 0;
mRecCount = 0;
hostRecCount = Recs;
segCount = 0;
for(unsigned int i=0; i < mColumnCount; i++) {
columnNames.push_back(nameRef.front());
cols[colsRef.front()] = nameRef.front();
if((typeRef.front()).compare("timestamp") == 0)
ts_cols[nameRef.front()] = 1;
else
ts_cols[nameRef.front()] = 0;
if ((typeRef.front()).compare("int") == 0 || (typeRef.front()).compare("timestamp") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 0;
decimal_zeroes[nameRef.front()] = 0;
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else
if ((typeRef.front()).compare("float") == 0) {
type[nameRef.front()] = 1;
decimal[nameRef.front()] = 0;
h_columns_float[nameRef.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
d_columns_float[nameRef.front()] = thrust::device_vector<float_type>();
}
else
if ((typeRef.front()).compare("decimal") == 0) {
type[nameRef.front()] = 0;
decimal[nameRef.front()] = 1;
decimal_zeroes[nameRef.front()] = sizeRef.front();
h_columns_int[nameRef.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
d_columns_int[nameRef.front()] = thrust::device_vector<int_type>();
}
else {
type[nameRef.front()] = 2;
decimal[nameRef.front()] = 0;
h_columns_char[nameRef.front()] = nullptr;
d_columns_char[nameRef.front()] = nullptr;
char_size[nameRef.front()] = sizeRef.front();
};
nameRef.pop();
typeRef.pop();
sizeRef.pop();
colsRef.pop();
};
};
void CudaSet::initialize(const size_t RecordCount, const unsigned int ColumnCount)
{
mRecCount = RecordCount;
hostRecCount = RecordCount;
mColumnCount = ColumnCount;
filtered = 0;
};
void CudaSet::initialize(queue<string> op_sel, const queue<string> op_sel_as)
{
mRecCount = 0;
mColumnCount = (unsigned int)op_sel.size();
segCount = 1;
not_compressed = 1;
filtered = 0;
col_aliases = op_sel_as;
unsigned int i = 0;
CudaSet *a;
while(!op_sel.empty()) {
for(auto it = varNames.begin(); it != varNames.end(); it++) {
a = it->second;
if(std::find(a->columnNames.begin(), a->columnNames.end(), op_sel.front()) != a->columnNames.end())
break;
};
type[op_sel.front()] = a->type[op_sel.front()];
cols[i] = op_sel.front();
decimal[op_sel.front()] = a->decimal[op_sel.front()];
decimal_zeroes[op_sel.front()] = a->decimal_zeroes[op_sel.front()];
columnNames.push_back(op_sel.front());
if (a->type[op_sel.front()] == 0) {
d_columns_int[op_sel.front()] = thrust::device_vector<int_type>();
//h_columns_int[op_sel.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
h_columns_int[op_sel.front()] = thrust::host_vector<int_type>();
}
else
if (a->type[op_sel.front()] == 1) {
d_columns_float[op_sel.front()] = thrust::device_vector<float_type>();
//h_columns_float[op_sel.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
h_columns_float[op_sel.front()] = thrust::host_vector<float_type>();
}
else {
h_columns_char[op_sel.front()] = nullptr;
d_columns_char[op_sel.front()] = nullptr;
char_size[op_sel.front()] = a->char_size[op_sel.front()];
};
i++;
op_sel.pop();
};
}
void CudaSet::initialize(CudaSet* a, CudaSet* b, queue<string> op_sel, queue<string> op_sel_as)
{
mRecCount = 0;
mColumnCount = 0;
queue<string> q_cnt(op_sel);
unsigned int i = 0;
set<string> field_names;
while(!q_cnt.empty()) {
if( std::find(a->columnNames.begin(), a->columnNames.end(), q_cnt.front()) != a->columnNames.end() ||
std::find(b->columnNames.begin(), b->columnNames.end(), q_cnt.front()) != b->columnNames.end()) {
field_names.insert(q_cnt.front());
};
q_cnt.pop();
}
mColumnCount = (unsigned int)field_names.size();
maxRecs = b->maxRecs;
segCount = 1;
filtered = 0;
not_compressed = 1;
col_aliases = op_sel_as;
i = 0;
while(!op_sel.empty()) {
if(std::find(columnNames.begin(), columnNames.end(), op_sel.front()) == columnNames.end()) {
if(std::find(a->columnNames.begin(), a->columnNames.end(), op_sel.front()) != a->columnNames.end()) {
cols[i] = op_sel.front();
decimal[op_sel.front()] = a->decimal[op_sel.front()];
columnNames.push_back(op_sel.front());
type[op_sel.front()] = a->type[op_sel.front()];
ts_cols[op_sel.front()] = a->ts_cols[op_sel.front()];
if (a->type[op_sel.front()] == 0) {
d_columns_int[op_sel.front()] = thrust::device_vector<int_type>();
h_columns_int[op_sel.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
if(a->string_map.find(op_sel.front()) != a->string_map.end()) {
string_map[op_sel.front()] = a->string_map[op_sel.front()];
};
decimal[op_sel.front()] = a->decimal[op_sel.front()];
decimal_zeroes[op_sel.front()] = a->decimal_zeroes[op_sel.front()];
}
else
if (a->type[op_sel.front()] == 1) {
d_columns_float[op_sel.front()] = thrust::device_vector<float_type>();
h_columns_float[op_sel.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
}
else {
h_columns_char[op_sel.front()] = nullptr;
d_columns_char[op_sel.front()] = nullptr;
char_size[op_sel.front()] = a->char_size[op_sel.front()];
string_map[op_sel.front()] = a->string_map[op_sel.front()];
};
i++;
}
else
if(std::find(b->columnNames.begin(), b->columnNames.end(), op_sel.front()) != b->columnNames.end()) {
columnNames.push_back(op_sel.front());
cols[i] = op_sel.front();
decimal[op_sel.front()] = b->decimal[op_sel.front()];
type[op_sel.front()] = b->type[op_sel.front()];
ts_cols[op_sel.front()] = b->ts_cols[op_sel.front()];
if (b->type[op_sel.front()] == 0) {
d_columns_int[op_sel.front()] = thrust::device_vector<int_type>();
h_columns_int[op_sel.front()] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
if(b->string_map.find(op_sel.front()) != b->string_map.end()) {
string_map[op_sel.front()] = b->string_map[op_sel.front()];
};
decimal[op_sel.front()] = b->decimal[op_sel.front()];
decimal_zeroes[op_sel.front()] = b->decimal_zeroes[op_sel.front()];
}
else
if (b->type[op_sel.front()] == 1) {
d_columns_float[op_sel.front()] = thrust::device_vector<float_type>();
h_columns_float[op_sel.front()] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
}
else {
h_columns_char[op_sel.front()] = nullptr;
d_columns_char[op_sel.front()] = nullptr;
char_size[op_sel.front()] = b->char_size[op_sel.front()];
string_map[op_sel.front()] = b->string_map[op_sel.front()];
};
i++;
}
}
op_sel.pop();
};
};
int_type reverse_op(int_type op_type)
{
if (op_type == 2) // >
return 1;
else
if (op_type == 1) // <
return 2;
else
if (op_type == 6) // >=
return 5;
else
if (op_type == 5) // <=
return 6;
else
return op_type;
}
size_t getFreeMem()
{
size_t available, total;
cudaMemGetInfo(&available, &total);
return available;
} ;
void allocColumns(CudaSet* a, queue<string> fields)
{
if(a->filtered) {
CudaSet* t;
if(a->filtered)
t = varNames[a->source_name];
else
t = a;
if(int_size*t->maxRecs > alloced_sz) {
if(alloced_sz) {
cudaFree(alloced_tmp);
};
cudaMalloc((void **) &alloced_tmp, int_size*t->maxRecs);
alloced_sz = int_size*t->maxRecs;
}
}
else {
while(!fields.empty()) {
if(var_exists(a, fields.front()) && !a->onDevice(fields.front())) {
a->allocColumnOnDevice(fields.front(), a->maxRecs);
}
fields.pop();
};
};
}
void gatherColumns(CudaSet* a, CudaSet* t, string field, unsigned int segment, size_t& count)
{
if(!a->onDevice(field)) {
a->allocColumnOnDevice(field, a->maxRecs);
};
if(a->prm_index == 'R') {
mygather(field, a, t, count, a->mRecCount);
}
else {
mycopy(field, a, t, count, t->mRecCount);
a->mRecCount = t->mRecCount;
};
}
void copyFinalize(CudaSet* a, queue<string> fields, bool ts)
{
set<string> uniques;
if(scratch.size() < a->mRecCount*8)
scratch.resize(a->mRecCount*8);
thrust::device_ptr<int_type> tmp((int_type*)thrust::raw_pointer_cast(scratch.data()));
while(!fields.empty()) {
if (uniques.count(fields.front()) == 0 && var_exists(a, fields.front()) && cpy_bits.find(fields.front()) != cpy_bits.end() && (!a->ts_cols[fields.front()] || ts)) {
if(cpy_bits[fields.front()] == 8) {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<unsigned char> src((unsigned char*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned char>());
}
else {
thrust::device_ptr<unsigned char> src((unsigned char*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned char>());
};
}
else
if(cpy_bits[fields.front()] == 16) {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned short>());
}
else {
thrust::device_ptr<unsigned short int> src((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned short>());
};
}
else
if(cpy_bits[fields.front()] == 32) {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned int>());
}
else {
thrust::device_ptr<unsigned int> src((unsigned int*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(src, src+a->mRecCount, tmp, to_int64<unsigned int>());
};
}
else {
if(a->type[fields.front()] != 1) {
thrust::device_ptr<int_type> src((int_type*)thrust::raw_pointer_cast(a->d_columns_int[fields.front()].data()));
thrust::copy(src, src+a->mRecCount, tmp);
}
else {
thrust::device_ptr<int_type> src((int_type*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::copy(src, src+a->mRecCount, tmp);
};
};
thrust::constant_iterator<int_type> iter(cpy_init_val[fields.front()]);
if(a->type[fields.front()] != 1) {
thrust::transform(tmp, tmp + a->mRecCount, iter, a->d_columns_int[fields.front()].begin(), thrust::plus<int_type>());
}
else {
thrust::device_ptr<int_type> dest((int_type*)thrust::raw_pointer_cast(a->d_columns_float[fields.front()].data()));
thrust::transform(tmp, tmp + a->mRecCount, iter, dest, thrust::plus<int_type>());
thrust::transform(dest, dest+a->mRecCount, a->d_columns_float[fields.front()].begin(), long_to_float());
};
};
uniques.insert(fields.front());
fields.pop();
};
}
void copyColumns(CudaSet* a, queue<string> fields, unsigned int segment, size_t& count, bool rsz, bool flt)
{
//std::clock_t start1 = std::clock();
set<string> uniques;
if(a->filtered) { //filter the segment
if(flt) {
filter_op(a->fil_s, a->fil_f, segment);
};
if(rsz && a->mRecCount) {
queue<string> fields1(fields);
while(!fields1.empty()) {
a->resizeDeviceColumn(a->devRecCount + a->mRecCount, fields1.front());
fields1.pop();
};
a->devRecCount = a->devRecCount + a->mRecCount;
};
};
cpy_bits.clear();
cpy_init_val.clear();
auto f(fields);
while(!fields.empty()) {
if (uniques.count(fields.front()) == 0 && var_exists(a, fields.front())) {
if(a->filtered) {
if(a->mRecCount) {
CudaSet *t = varNames[a->source_name];
alloced_switch = 1;
t->CopyColumnToGpu(fields.front(), segment);
gatherColumns(a, t, fields.front(), segment, count);
alloced_switch = 0;
};
}
else {
if(a->mRecCount) {
a->CopyColumnToGpu(fields.front(), segment, count);
};
};
uniques.insert(fields.front());
};
fields.pop();
};
//std::cout<< "copy time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) <<'\n';
}
void mygather(string colname, CudaSet* a, CudaSet* t, size_t offset, size_t g_size)
{
if(t->type[colname] != 1 ) {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 16) {
thrust::device_ptr<unsigned short int> d_col_source((unsigned short int*)alloced_tmp);
thrust::device_ptr<unsigned short int> d_col_dest((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_int[colname].begin() + offset);
};
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_int[colname].begin() + offset);
};
}
else {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 16) {
thrust::device_ptr<unsigned short int> d_col_source((unsigned short int*)alloced_tmp);
thrust::device_ptr<unsigned short int> d_col_dest((unsigned short int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col_source, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_float[colname].begin() + offset);
};
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + g_size, d_col, a->d_columns_float[colname].begin() + offset);
};
}
};
void mycopy(string colname, CudaSet* a, CudaSet* t, size_t offset, size_t g_size)
{
if(t->type[colname] != 1) {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 16) {
thrust::device_ptr<short int> d_col_source((short int*)alloced_tmp);
thrust::device_ptr<short int> d_col_dest((short int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()+offset));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_int[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col_source((int_type*)alloced_tmp);
thrust::copy(d_col_source, d_col_source + g_size, a->d_columns_int[colname].begin() + offset);
};
}
else {
thrust::device_ptr<int_type> d_col((int_type*)alloced_tmp);
thrust::copy(d_col, d_col + g_size, a->d_columns_int[colname].begin() + offset);
};
}
else {
if(cpy_bits.find(colname) != cpy_bits.end()) { // non-delta compression
if(cpy_bits[colname] == 8) {
thrust::device_ptr<unsigned char> d_col_source((unsigned char*)alloced_tmp);
thrust::device_ptr<unsigned char> d_col_dest((unsigned char*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 16) {
thrust::device_ptr<short int> d_col_source((short int*)alloced_tmp);
thrust::device_ptr<short int> d_col_dest((short int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()+offset));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 32) {
thrust::device_ptr<unsigned int> d_col_source((unsigned int*)alloced_tmp);
thrust::device_ptr<unsigned int> d_col_dest((unsigned int*)thrust::raw_pointer_cast(a->d_columns_float[colname].data()));
thrust::copy(d_col_source, d_col_source + g_size, d_col_dest + offset);
}
else
if(cpy_bits[colname] == 64) {
thrust::device_ptr<int_type> d_col_source((int_type*)alloced_tmp);
thrust::copy(d_col_source, d_col_source + g_size, a->d_columns_float[colname].begin() + offset);
};
}
else {
thrust::device_ptr<float_type> d_col((float_type*)alloced_tmp);
thrust::copy(d_col, d_col + g_size, a->d_columns_float[colname].begin() + offset);
};
};
};
size_t load_queue(queue<string> c1, CudaSet* right, string f2, size_t &rcount,
unsigned int start_segment, unsigned int end_segment, bool rsz, bool flt)
{
queue<string> cc;
while(!c1.empty()) {
if(std::find(right->columnNames.begin(), right->columnNames.end(), c1.front()) != right->columnNames.end()) {
if(f2 != c1.front() ) {
cc.push(c1.front());
};
};
c1.pop();
};
if(std::find(right->columnNames.begin(), right->columnNames.end(), f2) != right->columnNames.end()) {
cc.push(f2);
};
if(right->filtered) {
allocColumns(right, cc);
};
rcount = right->maxRecs;
queue<string> ct(cc);
while(!ct.empty()) {
if(right->filtered && rsz) {
right->mRecCount = 0;
}
else {
right->allocColumnOnDevice(ct.front(), rcount*right->segCount);
};
ct.pop();
};
size_t cnt_r = 0;
right->devRecCount = 0;
for(unsigned int i = start_segment; i < end_segment; i++) {
if(!right->filtered)
copyColumns(right, cc, i, cnt_r, rsz, 0);
else
copyColumns(right, cc, i, cnt_r, rsz, flt);
cnt_r = cnt_r + right->mRecCount;
};
right->mRecCount = cnt_r;
return cnt_r;
}
size_t max_char(CudaSet* a)
{
size_t max_char1 = 8;
for(unsigned int i = 0; i < a->columnNames.size(); i++) {
if(a->type[a->columnNames[i]] == 2) {
if (a->char_size[a->columnNames[i]] > max_char1)
max_char1 = a->char_size[a->columnNames[i]];
}
else
if(a->type[a->columnNames[i]] == 0 && a->string_map.find(a->columnNames[i]) != a->string_map.end()) {
auto s = a->string_map[a->columnNames[i]];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
if (len > max_char1)
max_char1 = len;
};
};
return max_char1;
};
size_t max_char(CudaSet* a, queue<string> field_names)
{
size_t max_char = 8;
while (!field_names.empty()) {
if (a->type[field_names.front()] == 2) {
if (a->char_size[field_names.front()] > max_char)
max_char = a->char_size[field_names.front()];
};
field_names.pop();
};
return max_char;
};
void setSegments(CudaSet* a, queue<string> cols)
{
size_t mem_available = getFreeMem();
size_t tot_sz = 0;
while(!cols.empty()) {
if(a->type[cols.front()] != 2)
tot_sz = tot_sz + int_size;
else
tot_sz = tot_sz + a->char_size[cols.front()];
cols.pop();
};
if(a->mRecCount*tot_sz > mem_available/3) { //default is 3
a->segCount = (a->mRecCount*tot_sz)/(mem_available/5) + 1;
a->maxRecs = (a->mRecCount/a->segCount)+1;
};
};
void update_permutation_char_host(char* key, unsigned int* permutation, size_t RecCount, string SortType, char* tmp, unsigned int len)
{
str_gather_host(permutation, RecCount, (void*)key, (void*)tmp, len);
if (SortType.compare("DESC") == 0 )
str_sort_host(tmp, RecCount, permutation, 1, len);
else
str_sort_host(tmp, RecCount, permutation, 0, len);
}
void apply_permutation_char(char* key, unsigned int* permutation, size_t RecCount, char* tmp, unsigned int len)
{
// copy keys to temporary vector
cudaMemcpy( (void*)tmp, (void*) key, RecCount*len, cudaMemcpyDeviceToDevice);
// permute the keys
str_gather((void*)permutation, RecCount, (void*)tmp, (void*)key, len);
}
void apply_permutation_char_host(char* key, unsigned int* permutation, size_t RecCount, char* res, unsigned int len)
{
str_gather_host(permutation, RecCount, (void*)key, (void*)res, len);
}
void filter_op(const char *s, const char *f, unsigned int segment)
{
CudaSet *a, *b;
a = varNames.find(f)->second;
a->name = f;
//std::clock_t start1 = std::clock();
if(a->mRecCount == 0 && !a->filtered) {
b = new CudaSet(0,1);
}
else {
if(verbose)
cout << "FILTER " << s << " " << f << " " << getFreeMem() << '\xd';
b = varNames[s];
b->name = s;
b->string_map = a->string_map;
size_t cnt = 0;
b->sorted_fields = a->sorted_fields;
b->ts_cols = a->ts_cols;
allocColumns(a, b->fil_value);
if (b->prm_d.size() == 0) {
b->prm_d.resize(a->maxRecs);
};
cout << endl << "MAP CHECK start " << segment << endl;
char map_check = zone_map_check(b->fil_type,b->fil_value,b->fil_nums, b->fil_nums_f, b->fil_nums_precision, a, segment);
cout << endl << "MAP CHECK segment " << segment << " " << map_check << endl;
if(map_check == 'R') {
auto old_ph = phase_copy;
phase_copy = 0;
copyColumns(a, b->fil_value, segment, cnt);
phase_copy = old_ph;
bool* res = filter(b->fil_type,b->fil_value,b->fil_nums, b->fil_nums_f, b->fil_nums_precision, a, segment);
thrust::device_ptr<bool> bp((bool*)res);
b->prm_index = 'R';
b->mRecCount = thrust::count(bp, bp + (unsigned int)a->mRecCount, 1);
thrust::copy_if(thrust::make_counting_iterator((unsigned int)0), thrust::make_counting_iterator((unsigned int)a->mRecCount),
bp, b->prm_d.begin(), thrust::identity<bool>());
cudaFree(res);
}
else {
b->prm_index = map_check;
if(map_check == 'A')
b->mRecCount = a->mRecCount;
else
b->mRecCount = 0;
};
if(segment == a->segCount-1)
a->deAllocOnDevice();
}
if(verbose)
cout << endl << "filter result " << b->mRecCount << endl;
}
size_t load_right(CudaSet* right, string f2, queue<string> op_g, queue<string> op_alt, size_t& rcount, unsigned int start_seg, unsigned int end_seg) {
size_t cnt_r = 0;
//if join is on strings then add integer columns to left and right tables and modify colInd1 and colInd2
// need to allocate all right columns
if(right->not_compressed) {
queue<string> op_alt1;
op_alt1.push(f2);
cnt_r = load_queue(op_alt1, right, "", rcount, start_seg, end_seg, 1, 1);
queue<string> op_alt2;
while(!op_alt.empty()) {
if(f2.compare(op_alt.front())) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), op_alt.front()) != right->columnNames.end()) {
op_alt2.push(op_alt.front());
};
};
op_alt.pop();
};
if(!op_alt2.empty())
cnt_r = load_queue(op_alt2, right, "", rcount, start_seg, end_seg, 0, 0);
}
else {
cnt_r = load_queue(op_alt, right, f2, rcount, start_seg, end_seg, 1, 1);
};
return cnt_r;
};
void insert_records(const char* f, const char* s) {
char buf[4096];
size_t size, maxRecs, cnt = 0;
string str_s, str_d;
if(varNames.find(s) == varNames.end()) {
process_error(3, "couldn't find " + string(s) );
};
CudaSet *a;
a = varNames.find(s)->second;
a->name = s;
if(varNames.find(f) == varNames.end()) {
process_error(3, "couldn't find " + string(f) );
};
CudaSet *b;
b = varNames.find(f)->second;
b->name = f;
// if both source and destination are on disk
cout << "SOURCES " << a->source << ":" << b->source << endl;
if(a->source && b->source) {
for(unsigned int i = 0; i < a->segCount; i++) {
for(unsigned int z = 0; z < a->columnNames.size(); z++) {
if(a->type[a->columnNames[z]] != 2) {
str_s = a->load_file_name + "." + a->columnNames[z] + "." + to_string(i);
str_d = b->load_file_name + "." + a->columnNames[z] + "." + to_string(b->segCount + i);
cout << str_s << " " << str_d << endl;
FILE* source = fopen(str_s.c_str(), "rb");
FILE* dest = fopen(str_d.c_str(), "wb");
while (size = fread(buf, 1, BUFSIZ, source)) {
fwrite(buf, 1, size, dest);
}
fclose(source);
fclose(dest);
}
else { //merge strings
//read b's strings
str_s = b->load_file_name + "." + b->columnNames[z];
FILE* dest = fopen(str_s.c_str(), "rb");
auto len = b->char_size[b->columnNames[z]];
map<string, unsigned long long int> map_d;
buf[len] = 0;
unsigned long long cnt = 0;
while (fread(buf, len, 1, dest)) {
map_d[buf] = cnt;
cnt++;
};
fclose(dest);
unsigned long long int cct = cnt;
str_s = a->load_file_name + "." + a->columnNames[z] + "." + to_string(i) + ".hash";
str_d = b->load_file_name + "." + b->columnNames[z] + "." + to_string(b->segCount + i) + ".hash";
FILE* source = fopen(str_s.c_str(), "rb");
dest = fopen(str_d.c_str(), "wb");
while (size = fread(buf, 1, BUFSIZ, source)) {
fwrite(buf, 1, size, dest);
}
fclose(source);
fclose(dest);
str_s = a->load_file_name + "." + a->columnNames[z];
source = fopen(str_s.c_str(), "rb");
map<unsigned long long int, string> map_s;
buf[len] = 0;
cnt = 0;
while (fread(buf, len, 1, source)) {
map_s[cnt] = buf;
cnt++;
};
fclose(source);
queue<string> op_vx;
op_vx.push(a->columnNames[z]);
allocColumns(a, op_vx);
a->resize(a->maxRecs);
a->CopyColumnToGpu(a->columnNames[z], z, 0);
a->CopyColumnToHost(a->columnNames[z]);
str_d = b->load_file_name + "." + b->columnNames[z];
fstream f_file;
f_file.open(str_d.c_str(), ios::out|ios::app|ios::binary);
for(auto j = 0; j < a->mRecCount; j++) {
auto ss = map_s[a->h_columns_int[a->columnNames[z]][j]];
if(map_d.find(ss) == map_d.end()) { //add
f_file.write((char *)ss.c_str(), len);
a->h_columns_int[a->columnNames[z]][j] = cct;
cct++;
}
else {
a->h_columns_int[a->columnNames[z]][j] = map_d[ss];
};
};
f_file.close();
thrust::device_vector<int_type> d_col(a->mRecCount);
thrust::copy(a->h_columns_int[a->columnNames[z]].begin(), a->h_columns_int[a->columnNames[z]].begin() + a->mRecCount, d_col.begin());
auto i_name = b->load_file_name + "." + b->columnNames[z] + "." + to_string(b->segCount + i) + ".idx";
pfor_compress(thrust::raw_pointer_cast(d_col.data()), a->mRecCount*int_size, i_name, a->h_columns_int[a->columnNames[z]], 0);
};
};
};
if(a->maxRecs > b->maxRecs)
maxRecs = a->maxRecs;
else
maxRecs = b->maxRecs;
for(unsigned int i = 0; i < b->columnNames.size(); i++) {
b->reWriteHeader(b->load_file_name, b->columnNames[i], a->segCount + b->segCount, a->totalRecs + b->totalRecs, maxRecs);
};
}
else
if(!a->source && !b->source) { //if both source and destination are in memory
size_t oldCount = b->mRecCount;
b->resize(a->mRecCount);
for(unsigned int z = 0; z< b->mColumnCount; z++) {
if(b->type[a->columnNames[z]] == 0) {
thrust::copy(a->h_columns_int[a->columnNames[z]].begin(), a->h_columns_int[a->columnNames[z]].begin() + a->mRecCount, b->h_columns_int[b->columnNames[z]].begin() + oldCount);
}
else
if(b->type[a->columnNames[z]] == 1) {
thrust::copy(a->h_columns_float[a->columnNames[z]].begin(), a->h_columns_float[a->columnNames[z]].begin() + a->mRecCount, b->h_columns_float[b->columnNames[z]].begin() + oldCount);
}
else {
cudaMemcpy(b->h_columns_char[b->columnNames[z]] + b->char_size[b->columnNames[z]]*oldCount, a->h_columns_char[a->columnNames[z]], a->char_size[a->columnNames[z]]*a->mRecCount, cudaMemcpyHostToHost);
};
};
}
else
if(!a->source && b->source) {
total_segments = b->segCount;
total_count = b->mRecCount;
total_max = b->maxRecs;;
queue<string> op_vx;
for(unsigned int i=0; i < a->columnNames.size(); i++)
op_vx.push(a->columnNames[i]);
allocColumns(a, op_vx);
a->resize(a->maxRecs);
for(unsigned int i = 0; i < a->segCount; i++) {
if (a->filtered) {
copyColumns(a, op_vx, i, cnt);
a->CopyToHost(0, a->mRecCount);
};
a->compress(b->load_file_name, 0, 1, i - (a->segCount-1), a->mRecCount, 0);
};
for(unsigned int i = 0; i < b->columnNames.size(); i++) {
b->writeHeader(b->load_file_name, b->columnNames[i], total_segments);
};
};
};
void delete_records(const char* f) {
CudaSet *a;
a = varNames.find(f)->second;
a->name = f;
size_t totalRemoved = 0;
size_t maxRecs = 0;
if(!a->keep) { // temporary variable
process_error(2, "Delete operator is only applicable to disk based sets\nfor deleting records from derived sets please use filter operator ");
}
else { // read matching segments, delete, compress and write on a disk replacing the original segments
string str, str_old;
queue<string> op_vx;
size_t cnt;
for ( auto it=data_dict[a->load_file_name].begin() ; it != data_dict[a->load_file_name].end(); ++it ) {
op_vx.push((*it).first);
if (std::find(a->columnNames.begin(), a->columnNames.end(), (*it).first) == a->columnNames.end()) {
if ((*it).second.col_type == 0) {
a->type[(*it).first] = 0;
a->decimal[(*it).first] = 0;
//a->h_columns_int[(*it).first] = thrust::host_vector<int_type, pinned_allocator<int_type> >();
a->h_columns_int[(*it).first] = thrust::host_vector<int_type>();
a->d_columns_int[(*it).first] = thrust::device_vector<int_type>();
}
else
if((*it).second.col_type == 1) {
a->type[(*it).first] = 1;
a->decimal[(*it).first] = 0;
//a->h_columns_float[(*it).first] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
a->h_columns_float[(*it).first] = thrust::host_vector<float_type>();
a->d_columns_float[(*it).first] = thrust::device_vector<float_type>();
}
else
if ((*it).second.col_type == 3) {
a->type[(*it).first] = 1;
a->decimal[(*it).first] = 1;
//a->h_columns_float[(*it).first] = thrust::host_vector<float_type, pinned_allocator<float_type> >();
a->h_columns_float[(*it).first] = thrust::host_vector<float_type>();
a->d_columns_float[(*it).first] = thrust::device_vector<float_type>();
}
else {
a->type[(*it).first] = 2;
a->decimal[(*it).first] = 0;
a->h_columns_char[(*it).first] = nullptr;
a->d_columns_char[(*it).first] = nullptr;
a->char_size[(*it).first] = (*it).second.col_length;
};
a->columnNames.push_back((*it).first);
}
};
allocColumns(a, op_vx);
a->resize(a->maxRecs);
a->prm_d.resize(a->maxRecs);
size_t cc = a->mRecCount;
size_t tmp;
void* d;
CUDA_SAFE_CALL(cudaMalloc((void **) &d, a->maxRecs*float_size));
unsigned int new_seg_count = 0;
char map_check;
for(unsigned int i = 0; i < a->segCount; i++) {
map_check = zone_map_check(op_type,op_value,op_nums, op_nums_f, op_nums_precision, a, i);
if(verbose)
cout << "MAP CHECK segment " << i << " " << map_check << endl;
if(map_check != 'N') {
cnt = 0;
copyColumns(a, op_vx, i, cnt);
tmp = a->mRecCount;
if(a->mRecCount) {
bool* res = filter(op_type,op_value,op_nums, op_nums_f, op_nums_precision, a, i);
thrust::device_ptr<bool> bp((bool*)res);
thrust::copy_if(thrust::make_counting_iterator((unsigned int)0), thrust::make_counting_iterator((unsigned int)a->mRecCount),
bp, a->prm_d.begin(), thrust::logical_not<bool>());
a->mRecCount = thrust::count(bp, bp + (unsigned int)a->mRecCount, 0);
cudaFree(res);
// cout << "Remained recs count " << a->mRecCount << endl;
if(a->mRecCount > maxRecs)
maxRecs = a->mRecCount;
if (a->mRecCount) {
totalRemoved = totalRemoved + (tmp - a->mRecCount);
if (a->mRecCount == tmp) { //none deleted
if(new_seg_count != i) {
for (auto it=data_dict[a->load_file_name].begin() ; it != data_dict[a->load_file_name].end(); ++it ) {
auto colname = (*it).first;
str_old = a->load_file_name + "." + colname + "." + to_string(i);
str = a->load_file_name + "." + colname + "." + to_string(new_seg_count);
remove(str.c_str());
rename(str_old.c_str(), str.c_str());
};
};
new_seg_count++;
}
else { //some deleted
//cout << "writing segment " << new_seg_count << endl;
map<string, col_data> s = data_dict[a->load_file_name];
for ( map<string, col_data>::iterator it=s.begin() ; it != s.end(); ++it ) {
string colname = (*it).first;
str = a->load_file_name + "." + colname + "." + to_string(new_seg_count);
if(a->type[colname] == 0) {
thrust::device_ptr<int_type> d_col((int_type*)d);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_int[colname].begin(), d_col);
pfor_compress( d, a->mRecCount*int_size, str, a->h_columns_int[colname], 0);
}
else
if(a->type[colname] == 1) {
thrust::device_ptr<float_type> d_col((float_type*)d);
if(a->decimal[colname]) {
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_float[colname].begin(), d_col);
thrust::device_ptr<long long int> d_col_dec((long long int*)d);
thrust::transform(d_col,d_col+a->mRecCount, d_col_dec, float_to_long());
pfor_compress( d, a->mRecCount*float_size, str, a->h_columns_float[colname], 1);
}
else {
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_float[colname].begin(), d_col);
thrust::copy(d_col, d_col + a->mRecCount, a->h_columns_float[colname].begin());
fstream binary_file(str.c_str(),ios::out|ios::binary);
binary_file.write((char *)&a->mRecCount, 4);
binary_file.write((char *)(a->h_columns_float[colname].data()),a->mRecCount*float_size);
unsigned int comp_type = 3;
binary_file.write((char *)&comp_type, 4);
binary_file.close();
};
}
else {
thrust::device_ptr<int_type> d_col((int_type*)d);
thrust::gather(a->prm_d.begin(), a->prm_d.begin() + a->mRecCount, a->d_columns_int[colname].begin(), d_col);
pfor_compress( d, a->mRecCount*int_size, str + ".hash", a->h_columns_int[colname], 0);
};
};
new_seg_count++;
};
}
else {
totalRemoved = totalRemoved + tmp;
};
}
}
else {
if(new_seg_count != i) {
for(unsigned int z = 0; z < a->columnNames.size(); z++) {
str_old = a->load_file_name + "." + a->columnNames[z] + "." + to_string(i);
str = a->load_file_name + "." + a->columnNames[z] + "." + to_string(new_seg_count);
remove(str.c_str());
rename(str_old.c_str(), str.c_str());
};
};
new_seg_count++;
maxRecs = a->maxRecs;
};
};
if (new_seg_count < a->segCount) {
for(unsigned int i = new_seg_count; i < a->segCount; i++) {
//cout << "delete segment " << i << endl;
for(unsigned int z = 0; z < a->columnNames.size(); z++) {
str = a->load_file_name + "." + a->columnNames[z];
str += "." + to_string(i);
remove(str.c_str());
};
};
};
for(unsigned int i = new_seg_count; i < a->segCount; i++) {
a->reWriteHeader(a->load_file_name, a->columnNames[i], new_seg_count, a->totalRecs-totalRemoved, maxRecs);
};
a->mRecCount = cc;
a->prm_d.resize(0);
a->segCount = new_seg_count;
a->deAllocOnDevice();
cudaFree(d);
};
};
void save_col_data(map<string, map<string, col_data> >& data_dict, string file_name)
{
size_t str_len;
fstream binary_file(file_name.c_str(),ios::out|ios::binary|ios::trunc);
size_t len = data_dict.size();
binary_file.write((char *)&len, 8);
for (auto it=data_dict.begin() ; it != data_dict.end(); ++it ) {
str_len = (*it).first.size();
binary_file.write((char *)&str_len, 8);
binary_file.write((char *)(*it).first.data(), str_len);
map<string, col_data> s = (*it).second;
size_t len1 = s.size();
binary_file.write((char *)&len1, 8);
for (auto sit=s.begin() ; sit != s.end(); ++sit ) {
str_len = (*sit).first.size();
binary_file.write((char *)&str_len, 8);
binary_file.write((char *)(*sit).first.data(), str_len);
binary_file.write((char *)&(*sit).second.col_type, 4);
binary_file.write((char *)&(*sit).second.col_length, 4);
};
};
binary_file.close();
}
void load_col_data(map<string, map<string, col_data> >& data_dict, string file_name)
{
size_t str_len, recs, len1;
string str1, str2;
char buffer[4000];
unsigned int col_type, col_length;
fstream binary_file;
binary_file.open(file_name.c_str(),ios::in|ios::binary);
if(binary_file.is_open()) {
binary_file.read((char*)&recs, 8);
for(unsigned int i = 0; i < recs; i++) {
binary_file.read((char*)&str_len, 8);
binary_file.read(buffer, str_len);
str1.assign(buffer, str_len);
binary_file.read((char*)&len1, 8);
for(unsigned int j = 0; j < len1; j++) {
binary_file.read((char*)&str_len, 8);
binary_file.read(buffer, str_len);
str2.assign(buffer, str_len);
binary_file.read((char*)&col_type, 4);
binary_file.read((char*)&col_length, 4);
data_dict[str1][str2].col_type = col_type;
data_dict[str1][str2].col_length = col_length;
//cout << "data DICT " << str1 << " " << str2 << " " << col_type << " " << col_length << endl;
};
};
binary_file.close();
}
else {
cout << "Couldn't open data dictionary" << endl;
};
}
bool var_exists(CudaSet* a, string name) {
if(std::find(a->columnNames.begin(), a->columnNames.end(), name) != a->columnNames.end())
return 1;
else
return 0;
}
int file_exist (const char *filename)
{
std::ifstream infile(filename);
return infile.good();
}
bool check_bitmap_file_exist(CudaSet* left, CudaSet* right)
{
queue<string> cols(right->fil_value);
bool bitmaps_exist = 1;
if(cols.size() == 0) {
bitmaps_exist = 0;
};
while(cols.size() ) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), cols.front()) != right->columnNames.end()) {
string fname = left->load_file_name + "." + right->load_file_name + "." + cols.front() + ".0";
if( !file_exist(fname.c_str())) {
bitmaps_exist = 0;
};
};
cols.pop();
};
return bitmaps_exist;
}
bool check_bitmaps_exist(CudaSet* left, CudaSet* right)
{
//check if there are join bitmap indexes
queue<string> cols(right->fil_value);
bool bitmaps_exist = 1;
if(cols.size() == 0) {
bitmaps_exist = 1;
return 1;
};
while(cols.size() ) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), cols.front()) != right->columnNames.end()) {
string fname = left->load_file_name + "." + right->load_file_name + "." + cols.front() + ".0";
if( !file_exist(fname.c_str())) {
bitmaps_exist = 0;
};
};
cols.pop();
};
if(bitmaps_exist) {
while(!right->fil_nums.empty() ) {
left->fil_nums.push(right->fil_nums.front());
right->fil_nums.pop();
};
while(!right->fil_nums_precision.empty() ) {
left->fil_nums_precision.push(right->fil_nums_precision.front());
right->fil_nums_precision.pop();
};
while(!right->fil_nums_f.empty() ) {
left->fil_nums_f.push(right->fil_nums_f.front());
right->fil_nums_f.pop();
};
while(!right->fil_value.empty() ) {
if (std::find(right->columnNames.begin(), right->columnNames.end(), right->fil_value.front()) != right->columnNames.end()) {
string fname = left->load_file_name + "." + right->load_file_name + "." + right->fil_value.front();
left->fil_value.push(fname);
}
else
left->fil_value.push(right->fil_value.front());
right->fil_value.pop();
};
bool add_and = 1;
if(left->fil_type.empty())
add_and = 0;
while(!right->fil_type.empty() ) {
left->fil_type.push(right->fil_type.front());
right->fil_type.pop();
};
if(add_and) {
left->fil_type.push("AND");
};
return 1;
}
else {
return 0;
};
}
void check_sort(const string str, const char* rtable, const char* rid)
{
CudaSet* right = varNames.find(rtable)->second;
fstream binary_file(str.c_str(),ios::out|ios::binary|ios::app);
binary_file.write((char *)&right->sort_check, 1);
binary_file.close();
}
void update_char_permutation(CudaSet* a, string colname, unsigned int* raw_ptr, string ord, void* temp, bool host)
{
auto s = a->string_map[colname];
auto pos = s.find_first_of(".");
auto len = data_dict[s.substr(0, pos)][s.substr(pos+1)].col_length;
a->h_columns_char[colname] = new char[a->mRecCount*len];
memset(a->h_columns_char[colname], 0, a->mRecCount*len);
thrust::device_ptr<unsigned int> perm(raw_ptr);
thrust::device_ptr<int_type> temp_int((int_type*)temp);
thrust::gather(perm, perm+a->mRecCount, a->d_columns_int[colname].begin(), temp_int);
//for(int z = 0 ; z < a->mRecCount; z++) {
//cout << "Init vals " << a->d_columns_int[colname][z] << " " << perm[z] << " " << temp_int[z] << endl;
//};
//cout << "sz " << a->h_columns_int[colname].size() << " " << a->d_columns_int[colname].size() << " " << len << endl;
cudaMemcpy(thrust::raw_pointer_cast(a->h_columns_int[colname].data()), temp, 8*a->mRecCount, cudaMemcpyDeviceToHost);
FILE *f;
f = fopen(a->string_map[colname].c_str(), "rb");
for(int z = 0 ; z < a->mRecCount; z++) {
fseek(f, a->h_columns_int[colname][z] * len, SEEK_SET);
fread(a->h_columns_char[colname] + z*len, 1, len, f);
};
fclose(f);
if(!host) {
void *d;
cudaMalloc((void **) &d, a->mRecCount*len);
a->d_columns_char[colname] = (char*)d;
cudaMemcpy(a->d_columns_char[colname], a->h_columns_char[colname], len*a->mRecCount, cudaMemcpyHostToDevice);
if (ord.compare("DESC") == 0 )
str_sort(a->d_columns_char[colname], a->mRecCount, raw_ptr, 1, len);
else
str_sort(a->d_columns_char[colname], a->mRecCount, raw_ptr, 0, len);
cudaFree(d);
}
else {
if (ord.compare("DESC") == 0 )
str_sort_host(a->h_columns_char[colname], a->mRecCount, raw_ptr, 1, len);
else
str_sort_host(a->h_columns_char[colname], a->mRecCount, raw_ptr, 0, len);
};
}
void compress_int(const string file_name, const thrust::host_vector<int_type>& res)
{
std::vector<unsigned int> dict_val;
unsigned int bits_encoded;
set<int_type> dict_s;
map<int_type, unsigned int> d_ordered;
for (unsigned int i = 0 ; i < res.size(); i++) {
int_type f = res[i];
dict_s.insert(f);
};
unsigned int i = 0;
for (auto it = dict_s.begin(); it != dict_s.end(); it++) {
d_ordered[*it] = i++;
};
for (unsigned int i = 0 ; i < res.size(); i++) {
int_type f = res[i];
dict_val.push_back(d_ordered[f]);
};
bits_encoded = (unsigned int)ceil(log2(double(d_ordered.size()+1)));
//cout << "bits " << bits_encoded << endl;
unsigned int sz = (unsigned int)d_ordered.size();
// write to a file
fstream binary_file(file_name.c_str(),ios::out|ios::binary|ios::trunc);
binary_file.write((char *)&sz, 4);
for (auto it = d_ordered.begin(); it != d_ordered.end(); it++) {
binary_file.write((char*)(&(it->first)), int_size);
};
unsigned int fit_count = 64/bits_encoded;
unsigned long long int val = 0;
binary_file.write((char *)&fit_count, 4);
binary_file.write((char *)&bits_encoded, 4);
unsigned int curr_cnt = 1;
unsigned int vals_count = (unsigned int)dict_val.size()/fit_count;
if(!vals_count || dict_val.size()%fit_count)
vals_count++;
binary_file.write((char *)&vals_count, 4);
unsigned int real_count = (unsigned int)dict_val.size();
binary_file.write((char *)&real_count, 4);
for(unsigned int i = 0; i < dict_val.size(); i++) {
val = val | dict_val[i];
if(curr_cnt < fit_count)
val = val << bits_encoded;
if( (curr_cnt == fit_count) || (i == (dict_val.size() - 1)) ) {
if (curr_cnt < fit_count) {
val = val << ((fit_count-curr_cnt)-1)*bits_encoded;
};
curr_cnt = 1;
binary_file.write((char *)&val, int_size);
val = 0;
}
else
curr_cnt = curr_cnt + 1;
};
binary_file.close();
};
int_type* get_vec(CudaSet* a, string s1_val, stack<int_type*>& exe_vectors, bool& free_mem) {
int_type* t;
if(std::find(a->columnNames.begin(), a->columnNames.end(), s1_val) != a->columnNames.end()) {
t = a->get_int_by_name(s1_val);
free_mem = 0;
}
else {
t = exe_vectors.top();
exe_vectors.pop();
free_mem = 1;
}
return t;
};
int_type* get_host_vec(CudaSet* a, string s1_val, stack<int_type*>& exe_vectors) {
int_type* t;
if(std::find(a->columnNames.begin(), a->columnNames.end(), s1_val) != a->columnNames.end()) {
t = a->get_host_int_by_name(s1_val);
}
else {
t = exe_vectors.top();
thrust::device_ptr<int_type> st1((int_type*)t);
for(int z = 0; z < 10; z++)
cout << "RESVEC " << st1[z] << endl;
exe_vectors.pop();
}
return t;
};
unsigned int get_decimals(CudaSet* a, string s1_val, stack<unsigned int>& exe_precision) {
unsigned int t;
if(std::find(a->columnNames.begin(), a->columnNames.end(), s1_val) != a->columnNames.end())
t = a->decimal_zeroes[s1_val];
else {
t = exe_precision.top();
exe_precision.pop();
}
return t;
};
#ifdef _WIN64
size_t getTotalSystemMemory()
{
MEMORYSTATUSEX status;
status.dwLength = sizeof(status);
GlobalMemoryStatusEx(&status);
return status.ullTotalPhys;
}
#else
size_t getTotalSystemMemory()
{
long pages = sysconf(_SC_PHYS_PAGES);
long page_size = sysconf(_SC_PAGE_SIZE);
return pages * page_size;
}
#endif
|
dd0fdc147032ecf99cf3be8b4cf29bec10469209.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Modified from
// https://github.com/open-mmlab/OpenPCDet/blob/master/pcdet/ops/iou3d_nms/src/iou3d_nms_kernel.cu
/*
3D IoU Calculation and Rotated NMS(modified from 2D NMS written by others)
Written by Shaoshuai Shi
All Rights Reserved 2019-2020.
*/
#include <stdio.h>
#define THREADS_PER_BLOCK 16
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
//#define DEBUG
const int THREADS_PER_BLOCK_NMS = sizeof(unsigned long long) * 8;
const float EPS = 1e-8;
struct Point {
float x, y;
__device__ Point() {}
__device__ Point(double _x, double _y) { x = _x, y = _y; }
__device__ void set(float _x, float _y) {
x = _x;
y = _y;
}
__device__ Point operator+(const Point &b) const {
return Point(x + b.x, y + b.y);
}
__device__ Point operator-(const Point &b) const {
return Point(x - b.x, y - b.y);
}
};
__device__ inline float cross(const Point &a, const Point &b) {
return a.x * b.y - a.y * b.x;
}
__device__ inline float cross(const Point &p1, const Point &p2,
const Point &p0) {
return (p1.x - p0.x) * (p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y);
}
__device__ int check_rect_cross(const Point &p1, const Point &p2,
const Point &q1, const Point &q2) {
int ret = min(p1.x, p2.x) <= max(q1.x, q2.x) &&
min(q1.x, q2.x) <= max(p1.x, p2.x) &&
min(p1.y, p2.y) <= max(q1.y, q2.y) &&
min(q1.y, q2.y) <= max(p1.y, p2.y);
return ret;
}
__device__ inline int check_in_box2d(const float *box, const Point &p) {
// params: box (5) [x1, y1, x2, y2, angle]
const float MARGIN = 1e-5;
float center_x = (box[0] + box[2]) / 2;
float center_y = (box[1] + box[3]) / 2;
float angle_cos = cos(-box[4]),
angle_sin =
sin(-box[4]); // rotate the point in the opposite direction of box
float rot_x =
(p.x - center_x) * angle_cos + (p.y - center_y) * angle_sin + center_x;
float rot_y =
-(p.x - center_x) * angle_sin + (p.y - center_y) * angle_cos + center_y;
#ifdef DEBUG
printf("box: (%.3f, %.3f, %.3f, %.3f, %.3f)\n", box[0], box[1], box[2],
box[3], box[4]);
printf(
"center: (%.3f, %.3f), cossin(%.3f, %.3f), src(%.3f, %.3f), rot(%.3f, "
"%.3f)\n",
center_x, center_y, angle_cos, angle_sin, p.x, p.y, rot_x, rot_y);
#endif
return (rot_x > box[0] - MARGIN && rot_x < box[2] + MARGIN &&
rot_y > box[1] - MARGIN && rot_y < box[3] + MARGIN);
}
__device__ inline int intersection(const Point &p1, const Point &p0,
const Point &q1, const Point &q0,
Point &ans) {
// fast exclusion
if (check_rect_cross(p0, p1, q0, q1) == 0) return 0;
// check cross standing
float s1 = cross(q0, p1, p0);
float s2 = cross(p1, q1, p0);
float s3 = cross(p0, q1, q0);
float s4 = cross(q1, p1, q0);
if (!(s1 * s2 > 0 && s3 * s4 > 0)) return 0;
// calculate intersection of two lines
float s5 = cross(q1, p1, p0);
if (fabs(s5 - s1) > EPS) {
ans.x = (s5 * q0.x - s1 * q1.x) / (s5 - s1);
ans.y = (s5 * q0.y - s1 * q1.y) / (s5 - s1);
} else {
float a0 = p0.y - p1.y, b0 = p1.x - p0.x, c0 = p0.x * p1.y - p1.x * p0.y;
float a1 = q0.y - q1.y, b1 = q1.x - q0.x, c1 = q0.x * q1.y - q1.x * q0.y;
float D = a0 * b1 - a1 * b0;
ans.x = (b0 * c1 - b1 * c0) / D;
ans.y = (a1 * c0 - a0 * c1) / D;
}
return 1;
}
__device__ inline void rotate_around_center(const Point ¢er,
const float angle_cos,
const float angle_sin, Point &p) {
float new_x =
(p.x - center.x) * angle_cos + (p.y - center.y) * angle_sin + center.x;
float new_y =
-(p.x - center.x) * angle_sin + (p.y - center.y) * angle_cos + center.y;
p.set(new_x, new_y);
}
__device__ inline int point_cmp(const Point &a, const Point &b,
const Point ¢er) {
return atan2(a.y - center.y, a.x - center.x) >
atan2(b.y - center.y, b.x - center.x);
}
__device__ inline float box_overlap(const float *box_a, const float *box_b) {
// params: box_a (5) [x1, y1, x2, y2, angle]
// params: box_b (5) [x1, y1, x2, y2, angle]
float a_x1 = box_a[0], a_y1 = box_a[1], a_x2 = box_a[2], a_y2 = box_a[3],
a_angle = box_a[4];
float b_x1 = box_b[0], b_y1 = box_b[1], b_x2 = box_b[2], b_y2 = box_b[3],
b_angle = box_b[4];
Point center_a((a_x1 + a_x2) / 2, (a_y1 + a_y2) / 2);
Point center_b((b_x1 + b_x2) / 2, (b_y1 + b_y2) / 2);
#ifdef DEBUG
printf(
"a: (%.3f, %.3f, %.3f, %.3f, %.3f), b: (%.3f, %.3f, %.3f, %.3f, %.3f)\n",
a_x1, a_y1, a_x2, a_y2, a_angle, b_x1, b_y1, b_x2, b_y2, b_angle);
printf("center a: (%.3f, %.3f), b: (%.3f, %.3f)\n", center_a.x, center_a.y,
center_b.x, center_b.y);
#endif
Point box_a_corners[5];
box_a_corners[0].set(a_x1, a_y1);
box_a_corners[1].set(a_x2, a_y1);
box_a_corners[2].set(a_x2, a_y2);
box_a_corners[3].set(a_x1, a_y2);
Point box_b_corners[5];
box_b_corners[0].set(b_x1, b_y1);
box_b_corners[1].set(b_x2, b_y1);
box_b_corners[2].set(b_x2, b_y2);
box_b_corners[3].set(b_x1, b_y2);
// get oriented corners
float a_angle_cos = cos(a_angle), a_angle_sin = sin(a_angle);
float b_angle_cos = cos(b_angle), b_angle_sin = sin(b_angle);
for (int k = 0; k < 4; k++) {
#ifdef DEBUG
printf("before corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k,
box_a_corners[k].x, box_a_corners[k].y, box_b_corners[k].x,
box_b_corners[k].y);
#endif
rotate_around_center(center_a, a_angle_cos, a_angle_sin, box_a_corners[k]);
rotate_around_center(center_b, b_angle_cos, b_angle_sin, box_b_corners[k]);
#ifdef DEBUG
printf("corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k, box_a_corners[k].x,
box_a_corners[k].y, box_b_corners[k].x, box_b_corners[k].y);
#endif
}
box_a_corners[4] = box_a_corners[0];
box_b_corners[4] = box_b_corners[0];
// get intersection of lines
Point cross_points[16];
Point poly_center;
int cnt = 0, flag = 0;
poly_center.set(0, 0);
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
flag = intersection(box_a_corners[i + 1], box_a_corners[i],
box_b_corners[j + 1], box_b_corners[j],
cross_points[cnt]);
if (flag) {
poly_center = poly_center + cross_points[cnt];
cnt++;
}
}
}
// check corners
for (int k = 0; k < 4; k++) {
if (check_in_box2d(box_a, box_b_corners[k])) {
poly_center = poly_center + box_b_corners[k];
cross_points[cnt] = box_b_corners[k];
cnt++;
}
if (check_in_box2d(box_b, box_a_corners[k])) {
poly_center = poly_center + box_a_corners[k];
cross_points[cnt] = box_a_corners[k];
cnt++;
}
}
poly_center.x /= cnt;
poly_center.y /= cnt;
// sort the points of polygon
Point temp;
for (int j = 0; j < cnt - 1; j++) {
for (int i = 0; i < cnt - j - 1; i++) {
if (point_cmp(cross_points[i], cross_points[i + 1], poly_center)) {
temp = cross_points[i];
cross_points[i] = cross_points[i + 1];
cross_points[i + 1] = temp;
}
}
}
#ifdef DEBUG
printf("cnt=%d\n", cnt);
for (int i = 0; i < cnt; i++) {
printf("All cross point %d: (%.3f, %.3f)\n", i, cross_points[i].x,
cross_points[i].y);
}
#endif
// get the overlap areas
float area = 0;
for (int k = 0; k < cnt - 1; k++) {
area += cross(cross_points[k] - cross_points[0],
cross_points[k + 1] - cross_points[0]);
}
return fabs(area) / 2.0;
}
__device__ inline float iou_bev(const float *box_a, const float *box_b) {
// params: box_a (5) [x1, y1, x2, y2, angle]
// params: box_b (5) [x1, y1, x2, y2, angle]
float sa = (box_a[2] - box_a[0]) * (box_a[3] - box_a[1]);
float sb = (box_b[2] - box_b[0]) * (box_b[3] - box_b[1]);
float s_overlap = box_overlap(box_a, box_b);
return s_overlap / fmaxf(sa + sb - s_overlap, EPS);
}
__global__ void boxes_overlap_kernel(const int num_a, const float *boxes_a,
const int num_b, const float *boxes_b,
float *ans_overlap) {
const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y;
const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x;
if (a_idx >= num_a || b_idx >= num_b) {
return;
}
const float *cur_box_a = boxes_a + a_idx * 5;
const float *cur_box_b = boxes_b + b_idx * 5;
float s_overlap = box_overlap(cur_box_a, cur_box_b);
ans_overlap[a_idx * num_b + b_idx] = s_overlap;
}
__global__ void boxes_iou_bev_kernel(const int num_a, const float *boxes_a,
const int num_b, const float *boxes_b,
float *ans_iou) {
const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y;
const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x;
if (a_idx >= num_a || b_idx >= num_b) {
return;
}
const float *cur_box_a = boxes_a + a_idx * 5;
const float *cur_box_b = boxes_b + b_idx * 5;
float cur_iou_bev = iou_bev(cur_box_a, cur_box_b);
ans_iou[a_idx * num_b + b_idx] = cur_iou_bev;
}
__global__ void nms_kernel(const int boxes_num, const float nms_overlap_thresh,
const float *boxes, unsigned long long *mask) {
// params: boxes (N, 5) [x1, y1, x2, y2, ry]
// params: mask (N, N/THREADS_PER_BLOCK_NMS)
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS,
THREADS_PER_BLOCK_NMS);
const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS,
THREADS_PER_BLOCK_NMS);
__shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x;
const float *cur_box = boxes + cur_box_idx * 5;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (iou_bev(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS);
mask[cur_box_idx * col_blocks + col_start] = t;
}
}
__device__ inline float iou_normal(float const *const a, float const *const b) {
float left = fmaxf(a[0], b[0]), right = fminf(a[2], b[2]);
float top = fmaxf(a[1], b[1]), bottom = fminf(a[3], b[3]);
float width = fmaxf(right - left, 0.f), height = fmaxf(bottom - top, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0]) * (a[3] - a[1]);
float Sb = (b[2] - b[0]) * (b[3] - b[1]);
return interS / fmaxf(Sa + Sb - interS, EPS);
}
__global__ void nms_normal_kernel(const int boxes_num,
const float nms_overlap_thresh,
const float *boxes,
unsigned long long *mask) {
// params: boxes (N, 5) [x1, y1, x2, y2, ry]
// params: mask (N, N/THREADS_PER_BLOCK_NMS)
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS,
THREADS_PER_BLOCK_NMS);
const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS,
THREADS_PER_BLOCK_NMS);
__shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x;
const float *cur_box = boxes + cur_box_idx * 5;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (iou_normal(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS);
mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void boxesoverlapLauncher(const int num_a, const float *boxes_a,
const int num_b, const float *boxes_b,
float *ans_overlap) {
dim3 blocks(
DIVUP(num_b, THREADS_PER_BLOCK),
DIVUP(num_a, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK, THREADS_PER_BLOCK);
hipLaunchKernelGGL(( boxes_overlap_kernel), dim3(blocks), dim3(threads), 0, 0, num_a, boxes_a, num_b, boxes_b,
ans_overlap);
#ifdef DEBUG
hipDeviceSynchronize(); // for using printf in kernel function
#endif
}
void boxesioubevLauncher(const int num_a, const float *boxes_a, const int num_b,
const float *boxes_b, float *ans_iou) {
dim3 blocks(
DIVUP(num_b, THREADS_PER_BLOCK),
DIVUP(num_a, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK, THREADS_PER_BLOCK);
hipLaunchKernelGGL(( boxes_iou_bev_kernel), dim3(blocks), dim3(threads), 0, 0, num_a, boxes_a, num_b, boxes_b,
ans_iou);
}
void nmsLauncher(const float *boxes, unsigned long long *mask, int boxes_num,
float nms_overlap_thresh) {
dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS),
DIVUP(boxes_num, THREADS_PER_BLOCK_NMS));
dim3 threads(THREADS_PER_BLOCK_NMS);
hipLaunchKernelGGL(( nms_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num, nms_overlap_thresh, boxes, mask);
}
void nmsNormalLauncher(const float *boxes, unsigned long long *mask,
int boxes_num, float nms_overlap_thresh) {
dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS),
DIVUP(boxes_num, THREADS_PER_BLOCK_NMS));
dim3 threads(THREADS_PER_BLOCK_NMS);
hipLaunchKernelGGL(( nms_normal_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num, nms_overlap_thresh, boxes,
mask);
} | dd0fdc147032ecf99cf3be8b4cf29bec10469209.cu | // Modified from
// https://github.com/open-mmlab/OpenPCDet/blob/master/pcdet/ops/iou3d_nms/src/iou3d_nms_kernel.cu
/*
3D IoU Calculation and Rotated NMS(modified from 2D NMS written by others)
Written by Shaoshuai Shi
All Rights Reserved 2019-2020.
*/
#include <stdio.h>
#define THREADS_PER_BLOCK 16
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
//#define DEBUG
const int THREADS_PER_BLOCK_NMS = sizeof(unsigned long long) * 8;
const float EPS = 1e-8;
struct Point {
float x, y;
__device__ Point() {}
__device__ Point(double _x, double _y) { x = _x, y = _y; }
__device__ void set(float _x, float _y) {
x = _x;
y = _y;
}
__device__ Point operator+(const Point &b) const {
return Point(x + b.x, y + b.y);
}
__device__ Point operator-(const Point &b) const {
return Point(x - b.x, y - b.y);
}
};
__device__ inline float cross(const Point &a, const Point &b) {
return a.x * b.y - a.y * b.x;
}
__device__ inline float cross(const Point &p1, const Point &p2,
const Point &p0) {
return (p1.x - p0.x) * (p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y);
}
__device__ int check_rect_cross(const Point &p1, const Point &p2,
const Point &q1, const Point &q2) {
int ret = min(p1.x, p2.x) <= max(q1.x, q2.x) &&
min(q1.x, q2.x) <= max(p1.x, p2.x) &&
min(p1.y, p2.y) <= max(q1.y, q2.y) &&
min(q1.y, q2.y) <= max(p1.y, p2.y);
return ret;
}
__device__ inline int check_in_box2d(const float *box, const Point &p) {
// params: box (5) [x1, y1, x2, y2, angle]
const float MARGIN = 1e-5;
float center_x = (box[0] + box[2]) / 2;
float center_y = (box[1] + box[3]) / 2;
float angle_cos = cos(-box[4]),
angle_sin =
sin(-box[4]); // rotate the point in the opposite direction of box
float rot_x =
(p.x - center_x) * angle_cos + (p.y - center_y) * angle_sin + center_x;
float rot_y =
-(p.x - center_x) * angle_sin + (p.y - center_y) * angle_cos + center_y;
#ifdef DEBUG
printf("box: (%.3f, %.3f, %.3f, %.3f, %.3f)\n", box[0], box[1], box[2],
box[3], box[4]);
printf(
"center: (%.3f, %.3f), cossin(%.3f, %.3f), src(%.3f, %.3f), rot(%.3f, "
"%.3f)\n",
center_x, center_y, angle_cos, angle_sin, p.x, p.y, rot_x, rot_y);
#endif
return (rot_x > box[0] - MARGIN && rot_x < box[2] + MARGIN &&
rot_y > box[1] - MARGIN && rot_y < box[3] + MARGIN);
}
__device__ inline int intersection(const Point &p1, const Point &p0,
const Point &q1, const Point &q0,
Point &ans) {
// fast exclusion
if (check_rect_cross(p0, p1, q0, q1) == 0) return 0;
// check cross standing
float s1 = cross(q0, p1, p0);
float s2 = cross(p1, q1, p0);
float s3 = cross(p0, q1, q0);
float s4 = cross(q1, p1, q0);
if (!(s1 * s2 > 0 && s3 * s4 > 0)) return 0;
// calculate intersection of two lines
float s5 = cross(q1, p1, p0);
if (fabs(s5 - s1) > EPS) {
ans.x = (s5 * q0.x - s1 * q1.x) / (s5 - s1);
ans.y = (s5 * q0.y - s1 * q1.y) / (s5 - s1);
} else {
float a0 = p0.y - p1.y, b0 = p1.x - p0.x, c0 = p0.x * p1.y - p1.x * p0.y;
float a1 = q0.y - q1.y, b1 = q1.x - q0.x, c1 = q0.x * q1.y - q1.x * q0.y;
float D = a0 * b1 - a1 * b0;
ans.x = (b0 * c1 - b1 * c0) / D;
ans.y = (a1 * c0 - a0 * c1) / D;
}
return 1;
}
__device__ inline void rotate_around_center(const Point ¢er,
const float angle_cos,
const float angle_sin, Point &p) {
float new_x =
(p.x - center.x) * angle_cos + (p.y - center.y) * angle_sin + center.x;
float new_y =
-(p.x - center.x) * angle_sin + (p.y - center.y) * angle_cos + center.y;
p.set(new_x, new_y);
}
__device__ inline int point_cmp(const Point &a, const Point &b,
const Point ¢er) {
return atan2(a.y - center.y, a.x - center.x) >
atan2(b.y - center.y, b.x - center.x);
}
__device__ inline float box_overlap(const float *box_a, const float *box_b) {
// params: box_a (5) [x1, y1, x2, y2, angle]
// params: box_b (5) [x1, y1, x2, y2, angle]
float a_x1 = box_a[0], a_y1 = box_a[1], a_x2 = box_a[2], a_y2 = box_a[3],
a_angle = box_a[4];
float b_x1 = box_b[0], b_y1 = box_b[1], b_x2 = box_b[2], b_y2 = box_b[3],
b_angle = box_b[4];
Point center_a((a_x1 + a_x2) / 2, (a_y1 + a_y2) / 2);
Point center_b((b_x1 + b_x2) / 2, (b_y1 + b_y2) / 2);
#ifdef DEBUG
printf(
"a: (%.3f, %.3f, %.3f, %.3f, %.3f), b: (%.3f, %.3f, %.3f, %.3f, %.3f)\n",
a_x1, a_y1, a_x2, a_y2, a_angle, b_x1, b_y1, b_x2, b_y2, b_angle);
printf("center a: (%.3f, %.3f), b: (%.3f, %.3f)\n", center_a.x, center_a.y,
center_b.x, center_b.y);
#endif
Point box_a_corners[5];
box_a_corners[0].set(a_x1, a_y1);
box_a_corners[1].set(a_x2, a_y1);
box_a_corners[2].set(a_x2, a_y2);
box_a_corners[3].set(a_x1, a_y2);
Point box_b_corners[5];
box_b_corners[0].set(b_x1, b_y1);
box_b_corners[1].set(b_x2, b_y1);
box_b_corners[2].set(b_x2, b_y2);
box_b_corners[3].set(b_x1, b_y2);
// get oriented corners
float a_angle_cos = cos(a_angle), a_angle_sin = sin(a_angle);
float b_angle_cos = cos(b_angle), b_angle_sin = sin(b_angle);
for (int k = 0; k < 4; k++) {
#ifdef DEBUG
printf("before corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k,
box_a_corners[k].x, box_a_corners[k].y, box_b_corners[k].x,
box_b_corners[k].y);
#endif
rotate_around_center(center_a, a_angle_cos, a_angle_sin, box_a_corners[k]);
rotate_around_center(center_b, b_angle_cos, b_angle_sin, box_b_corners[k]);
#ifdef DEBUG
printf("corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k, box_a_corners[k].x,
box_a_corners[k].y, box_b_corners[k].x, box_b_corners[k].y);
#endif
}
box_a_corners[4] = box_a_corners[0];
box_b_corners[4] = box_b_corners[0];
// get intersection of lines
Point cross_points[16];
Point poly_center;
int cnt = 0, flag = 0;
poly_center.set(0, 0);
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
flag = intersection(box_a_corners[i + 1], box_a_corners[i],
box_b_corners[j + 1], box_b_corners[j],
cross_points[cnt]);
if (flag) {
poly_center = poly_center + cross_points[cnt];
cnt++;
}
}
}
// check corners
for (int k = 0; k < 4; k++) {
if (check_in_box2d(box_a, box_b_corners[k])) {
poly_center = poly_center + box_b_corners[k];
cross_points[cnt] = box_b_corners[k];
cnt++;
}
if (check_in_box2d(box_b, box_a_corners[k])) {
poly_center = poly_center + box_a_corners[k];
cross_points[cnt] = box_a_corners[k];
cnt++;
}
}
poly_center.x /= cnt;
poly_center.y /= cnt;
// sort the points of polygon
Point temp;
for (int j = 0; j < cnt - 1; j++) {
for (int i = 0; i < cnt - j - 1; i++) {
if (point_cmp(cross_points[i], cross_points[i + 1], poly_center)) {
temp = cross_points[i];
cross_points[i] = cross_points[i + 1];
cross_points[i + 1] = temp;
}
}
}
#ifdef DEBUG
printf("cnt=%d\n", cnt);
for (int i = 0; i < cnt; i++) {
printf("All cross point %d: (%.3f, %.3f)\n", i, cross_points[i].x,
cross_points[i].y);
}
#endif
// get the overlap areas
float area = 0;
for (int k = 0; k < cnt - 1; k++) {
area += cross(cross_points[k] - cross_points[0],
cross_points[k + 1] - cross_points[0]);
}
return fabs(area) / 2.0;
}
__device__ inline float iou_bev(const float *box_a, const float *box_b) {
// params: box_a (5) [x1, y1, x2, y2, angle]
// params: box_b (5) [x1, y1, x2, y2, angle]
float sa = (box_a[2] - box_a[0]) * (box_a[3] - box_a[1]);
float sb = (box_b[2] - box_b[0]) * (box_b[3] - box_b[1]);
float s_overlap = box_overlap(box_a, box_b);
return s_overlap / fmaxf(sa + sb - s_overlap, EPS);
}
__global__ void boxes_overlap_kernel(const int num_a, const float *boxes_a,
const int num_b, const float *boxes_b,
float *ans_overlap) {
const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y;
const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x;
if (a_idx >= num_a || b_idx >= num_b) {
return;
}
const float *cur_box_a = boxes_a + a_idx * 5;
const float *cur_box_b = boxes_b + b_idx * 5;
float s_overlap = box_overlap(cur_box_a, cur_box_b);
ans_overlap[a_idx * num_b + b_idx] = s_overlap;
}
__global__ void boxes_iou_bev_kernel(const int num_a, const float *boxes_a,
const int num_b, const float *boxes_b,
float *ans_iou) {
const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y;
const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x;
if (a_idx >= num_a || b_idx >= num_b) {
return;
}
const float *cur_box_a = boxes_a + a_idx * 5;
const float *cur_box_b = boxes_b + b_idx * 5;
float cur_iou_bev = iou_bev(cur_box_a, cur_box_b);
ans_iou[a_idx * num_b + b_idx] = cur_iou_bev;
}
__global__ void nms_kernel(const int boxes_num, const float nms_overlap_thresh,
const float *boxes, unsigned long long *mask) {
// params: boxes (N, 5) [x1, y1, x2, y2, ry]
// params: mask (N, N/THREADS_PER_BLOCK_NMS)
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS,
THREADS_PER_BLOCK_NMS);
const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS,
THREADS_PER_BLOCK_NMS);
__shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x;
const float *cur_box = boxes + cur_box_idx * 5;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (iou_bev(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS);
mask[cur_box_idx * col_blocks + col_start] = t;
}
}
__device__ inline float iou_normal(float const *const a, float const *const b) {
float left = fmaxf(a[0], b[0]), right = fminf(a[2], b[2]);
float top = fmaxf(a[1], b[1]), bottom = fminf(a[3], b[3]);
float width = fmaxf(right - left, 0.f), height = fmaxf(bottom - top, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0]) * (a[3] - a[1]);
float Sb = (b[2] - b[0]) * (b[3] - b[1]);
return interS / fmaxf(Sa + Sb - interS, EPS);
}
__global__ void nms_normal_kernel(const int boxes_num,
const float nms_overlap_thresh,
const float *boxes,
unsigned long long *mask) {
// params: boxes (N, 5) [x1, y1, x2, y2, ry]
// params: mask (N, N/THREADS_PER_BLOCK_NMS)
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS,
THREADS_PER_BLOCK_NMS);
const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS,
THREADS_PER_BLOCK_NMS);
__shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x;
const float *cur_box = boxes + cur_box_idx * 5;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (iou_normal(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS);
mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void boxesoverlapLauncher(const int num_a, const float *boxes_a,
const int num_b, const float *boxes_b,
float *ans_overlap) {
dim3 blocks(
DIVUP(num_b, THREADS_PER_BLOCK),
DIVUP(num_a, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK, THREADS_PER_BLOCK);
boxes_overlap_kernel<<<blocks, threads>>>(num_a, boxes_a, num_b, boxes_b,
ans_overlap);
#ifdef DEBUG
cudaDeviceSynchronize(); // for using printf in kernel function
#endif
}
void boxesioubevLauncher(const int num_a, const float *boxes_a, const int num_b,
const float *boxes_b, float *ans_iou) {
dim3 blocks(
DIVUP(num_b, THREADS_PER_BLOCK),
DIVUP(num_a, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK, THREADS_PER_BLOCK);
boxes_iou_bev_kernel<<<blocks, threads>>>(num_a, boxes_a, num_b, boxes_b,
ans_iou);
}
void nmsLauncher(const float *boxes, unsigned long long *mask, int boxes_num,
float nms_overlap_thresh) {
dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS),
DIVUP(boxes_num, THREADS_PER_BLOCK_NMS));
dim3 threads(THREADS_PER_BLOCK_NMS);
nms_kernel<<<blocks, threads>>>(boxes_num, nms_overlap_thresh, boxes, mask);
}
void nmsNormalLauncher(const float *boxes, unsigned long long *mask,
int boxes_num, float nms_overlap_thresh) {
dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS),
DIVUP(boxes_num, THREADS_PER_BLOCK_NMS));
dim3 threads(THREADS_PER_BLOCK_NMS);
nms_normal_kernel<<<blocks, threads>>>(boxes_num, nms_overlap_thresh, boxes,
mask);
} |
a1eda7653f7d7c3ca01b9375662ccb09bdee5291.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
#include <fcntl.h>
#include <unistd.h>
// #define LIST_SIZE 1610612736 //6 GB of ints
//#define LIST_SIZE 209715200 //500 MB of ints
// #define LIST_SIZE 1048576 // 1MB of ints
// #define LIST_SIZE (2 * 65536)
// #define LIST_SIZE 65536
// #define LIST_SIZE 49152
// #define LIST_SIZE 49152
#define LIST_SIZE 16384
// #define LIST_SIZE (16384 + 8192)
// #define LIST_SIZE (8192 + 4096 + 2048)
// #define LIST_SIZE (8192 + 4096)
// #define LIST_SIZE (3*(8192 + 4096))
// #define LIST_SIZE 8192
// #define LIST_SIZE 4096
#define BLOCK_SIZE 1024
#define CUDA_CALL(x) {hipError_t cuda_error__ = (x); if (cuda_error__) printf("CUDA error: " #x " returned \"%s\"\n", hipGetErrorString(cuda_error__));}
__device__
inline void SWAP(int8_t *_a,int8_t *_b){int8_t __aux; __aux = *_a; *_a = *_b; *_b = __aux;}
void odd_even_bubble_sort_global(int8_t * list, int32_t list_size);
void odd_even_bubble_sort_shared(int8_t * list, int32_t list_size);
int assert_sorted (int8_t * list, int list_size);
int list_to_file(const char *fname, int8_t *buffer, size_t size);
// __global__
// void shared_koronel_64(int32_t * list, int32_t list_size, int8_t even)
// {
// __shared__ int32_t slist[2*BLOCK_SIZE+1];
// int32_t *win = (list + 2*(blockDim.x * blockIdx.x));
// int32_t win_size;
// if (((blockIdx.x+1)*2*blockDim.x) > list_size)
// win_size = list_size % (2*blockDim.x);
// else
// win_size = 2*blockDim.x;
// if (2*threadIdx.x < win_size - 1) {
// if (even) {
// *((int64_t *)slist + threadIdx.x) = *((int64_t *)win + threadIdx.x);
// } else {
// if (threadIdx.x == 0){
// // printf("%d %d\n", even, win);
// slist[1] = win[0];
// } else {
// *((int64_t *)slist + threadIdx.x) = *((int64_t *)(win-1) + threadIdx.x);
// }
// }
// }
// for (int32_t i = 0; i<win_size; i++){
// int32_t pos_oddeven = 2*threadIdx.x + (i&1);
// if (pos_oddeven < win_size - 1)
// if(slist[pos_oddeven]>slist[pos_oddeven+1])
// SWAP(&slist[pos_oddeven], &slist[pos_oddeven+1]);
// __syncthreads();
// }
// if (2*threadIdx.x < win_size - 1) {
// if (even) {
// *((int64_t *)win + threadIdx.x) = *((int64_t *)slist + threadIdx.x);
// } else {
// if (threadIdx.x == 0){
// win[0] = slist[1];
// } else {
// *((int64_t *)(win-1) + threadIdx.x) = *((int64_t *)slist + threadIdx.x);
// }
// }
// }
// }
__global__
void shared_koronel(int8_t * list, int32_t list_size)
{
// if (threadIdx.x == 0 && list_size < 10000) {
// printf("Primer elemento: %d\n", *list);
// }
__shared__ int8_t slist[2*BLOCK_SIZE];
int8_t *win = (list + 2*(blockDim.x * blockIdx.x));
int32_t win_size = 2*blockDim.x - (2*blockDim.x - list_size%(2*blockDim.x))*(((blockIdx.x+1)*2*blockDim.x) > list_size);
if (2*threadIdx.x < win_size - 1) {
slist[2*threadIdx.x] = win[2*threadIdx.x];
slist[2*threadIdx.x + 1] = win[2*threadIdx.x+1];
// slist[threadIdx.x] = win[threadIdx.x];
// slist[threadIdx.x + blockDim.x] = win[threadIdx.x+blockDim.x];
}
for (int32_t i = 0; i<win_size; i++){
int32_t pos_oddeven = 2*threadIdx.x + (i&1);
if (pos_oddeven < win_size - 1)
if(slist[pos_oddeven]>slist[pos_oddeven+1])
SWAP(&slist[pos_oddeven], &slist[pos_oddeven+1]);
__syncthreads();
}
if (2*threadIdx.x < win_size - 1) {
win[2*threadIdx.x] = slist[2*threadIdx.x];
win[2*threadIdx.x+1] = slist[2*threadIdx.x+1];
// win[threadIdx.x] = slist[threadIdx.x];
// win[threadIdx.x + blockDim.x] = slist[threadIdx.x+blockDim.x];
}
}
__global__
void global_koronel(int8_t * list, int32_t list_size)
{
int8_t *win = (list + 2*(blockDim.x * blockIdx.x));
int32_t win_size = 2*blockDim.x - (2*blockDim.x - list_size%(2*blockDim.x))*(((blockIdx.x+1)*2*blockDim.x) > list_size);
for (int32_t i = 0; i<win_size; i++){
int32_t pos_oddeven = 2*threadIdx.x + (i&1);
if (pos_oddeven < win_size - 1)
if(win[pos_oddeven]>win[pos_oddeven+1])
SWAP(&win[pos_oddeven], &win[pos_oddeven+1]);
__syncthreads();
}
}
int main (){
srand(time(NULL));
int8_t * random_numbers_global = (int8_t *) malloc(sizeof(int8_t)*LIST_SIZE);
int8_t * random_numbers_shared = (int8_t *) malloc(sizeof(int8_t)*LIST_SIZE);
// printf("Generando lista aleatoria de %i elementos\n", LIST_SIZE);
for (int i = 0; i<LIST_SIZE; i++){
// random_numbers_global[i] = rand()%20;
random_numbers_global[i] = (int8_t) LIST_SIZE - i;
}
list_to_file("sin_ordenar", random_numbers_global, sizeof(int8_t)*LIST_SIZE);
memcpy(random_numbers_shared, random_numbers_global, sizeof(int8_t)*LIST_SIZE);
int start_print = 0;
// int n_prints = 4096;
int n_prints = LIST_SIZE;
int elem;
// printf("Lista antes de gpu: Elementos desde %i hasta %i \n", start_print, start_print+n_prints);
// for (int i=start_print; i< start_print+n_prints; i++){
// printf("%i ", random_numbers_global[i]);
// }
// printf("\n");
//*************************************
// ODD-EVEN BUBBLE SORT CON GLOBAL MEM
//*************************************
// printf("Odd even bubble sort con memoria global \n");
odd_even_bubble_sort_global(random_numbers_global, LIST_SIZE);
//printf("Despues de gpu (global): Elementos desde %i hasta %i\n", start_print, start_print+n_prints);
//for (int i=start_print; i< start_print+n_prints; i++){
//printf("%i ", random_numbers_global[i]);
//}
//printf("\n");
//printf("Chequeando si la lista con global mem esta ordenada... \n");
//if (elem = assert_sorted(random_numbers_global, LIST_SIZE)) {
// printf("LISTA MAL ORDENADA EN ELEM N %i \n", elem);
// for (int i=((elem-100) > 0)*(elem-100); i < (((elem+100) < LIST_SIZE)*(elem+100) + ((elem+100) >= LIST_SIZE)*LIST_SIZE); i++)
// printf("%i ", random_numbers_global[i]);
// printf("\n");
//} else
// printf("LISTA CON GLOBAL MEM BIEN ORDENADA \n");
//printf("Finalizado sort con memoria global \n");
//*************************************
// ODD-EVEN BUBBLE SORT CON SHARED MEM
//*************************************
// printf("Odd even bubble sort con memoria shared \n");
odd_even_bubble_sort_shared(random_numbers_shared, LIST_SIZE);
// printf("Despues de gpu (shared): Elementos desde %i hasta %i\n", start_print, start_print+n_prints);
// for (int i=start_print; i< start_print+n_prints; i++){
// printf("%i ", random_numbers_shared[i]);
// }
// printf("\n");
// printf("Chequeando si la lista con shared mem esta ordenada... \n");
// if (elem = assert_sorted(random_numbers_shared, LIST_SIZE)) {
// printf("LISTA MAL ORDENADA EN ELEM N %i \n", elem);
// for (int i=((elem-100) > 0)*(elem-100); i < (((elem+100) < LIST_SIZE)*(elem+100) + ((elem+100) >= LIST_SIZE)*LIST_SIZE); i++)
// printf("%i ", random_numbers_shared[i]);
// printf("\n");
// } else
// printf("LISTA CON SHARED MEM BIEN ORDENADA \n");
list_to_file("ordenada", random_numbers_shared, sizeof(int8_t)*LIST_SIZE);
return 0;
}
__host__
void odd_even_bubble_sort_global (int8_t * list, int32_t list_size)
{
int8_t * device_list_ref;
hipEvent_t start, stop;
CUDA_CALL(hipEventCreate(&start));
CUDA_CALL(hipEventCreate(&stop));
dim3 dimGrid ((uint)(LIST_SIZE/(2*BLOCK_SIZE)), 1, 1); //TODO: Usar ceil
// dim3 dimGrid (1, 1, 1); //TODO: Usar ceil
dim3 dimBlock (BLOCK_SIZE, 1, 1);
CUDA_CALL(hipMalloc((void **) &device_list_ref, list_size*sizeof(int8_t)));
CUDA_CALL(hipMemcpy(device_list_ref, list, list_size*sizeof(int8_t), hipMemcpyHostToDevice));
// printf("Llamando al kernel con global memory... \n");
CUDA_CALL(hipEventRecord(start));
for (int i = 0; i < LIST_SIZE; i++){
// if (i%(LIST_SIZE/10)==0)
// printf("%d/100...\n", 10*i/(LIST_SIZE/10));
hipLaunchKernelGGL(( global_koronel), dim3(dimGrid), dim3(dimBlock), 0, 0,
device_list_ref + (i&1),
((LIST_SIZE - (i&1)) >> 1) << 1
);
}
CUDA_CALL(hipEventRecord(stop));
CUDA_CALL(hipEventSynchronize(stop));
float milliseconds = 0;
CUDA_CALL(hipEventElapsedTime(&milliseconds, start, stop));
printf("Tiempo en kernel de global (ms): %f\n", milliseconds/1000);
CUDA_CALL(hipMemcpy(list, device_list_ref, list_size*sizeof(int8_t), hipMemcpyDeviceToHost));
CUDA_CALL(hipFree(device_list_ref));
}
__host__
void odd_even_bubble_sort_shared (int8_t * list, int32_t list_size)
{
int8_t * device_list_ref;
hipEvent_t start, stop;
CUDA_CALL(hipEventCreate(&start));
CUDA_CALL(hipEventCreate(&stop));
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
uint blocks_needed = (uint) ceil((double)LIST_SIZE/(2*BLOCK_SIZE));
// dim3 dimGrid ((uint)(LIST_SIZE/(2*BLOCK_SIZE)), 1, 1); //TODO: Usar ceil
// if (LIST_SIZE/(2*BLOCK_SIZE) > maxnblocks) {
// dim3 dimGrid (maxnblocks, 1, 1); //TODO: Usar ceil
// }else{
// dim3 dimGrid ((uint)(LIST_SIZE/(2*BLOCK_SIZE)), 1, 1); //TODO: Usar ceil
// }
dim3 dimGrid(blocks_needed, 1, 1); //TODO: Usar ceil
dim3 dimBlock (BLOCK_SIZE, 1, 1);
CUDA_CALL(hipMalloc((void **) &device_list_ref, list_size*sizeof(int8_t)));
CUDA_CALL(hipMemcpy(device_list_ref, list, list_size*sizeof(int8_t), hipMemcpyHostToDevice));
// printf("Llamando al kernel con shared memory... \n");
CUDA_CALL(hipEventRecord(start));
for (int i = 0; i < LIST_SIZE; i++){
// if (i%(LIST_SIZE/10)==0)
// printf("%d/100...\n", 10*i/(LIST_SIZE/10));
hipLaunchKernelGGL(( shared_koronel), dim3(dimGrid), dim3(dimBlock), 0, 0,
device_list_ref + (i&1),
((LIST_SIZE - (i&1)) >> 1) << 1
);
}
CUDA_CALL(hipEventRecord(stop));
CUDA_CALL(hipEventSynchronize(stop));
float milliseconds = 0;
CUDA_CALL(hipEventElapsedTime(&milliseconds, start, stop));
printf("Tiempo en kernel de shared (ms): %f\n", milliseconds/1000);
CUDA_CALL(hipMemcpy(list, device_list_ref, list_size*sizeof(int8_t), hipMemcpyDeviceToHost));
CUDA_CALL(hipFree(device_list_ref));
}
__host__
int assert_sorted (int8_t * list, int list_size)
{
for (int i=0; i<list_size-1; i++){
if (list[i] > list[i+1])
return i+1;
}
return 0;
}
int list_to_file(const char *fname, int8_t *buffer, size_t size)
{
int fd = open(fname, O_WRONLY | O_CREAT | O_TRUNC, S_IRWXU);
write(fd, buffer, size);
return 0;
} | a1eda7653f7d7c3ca01b9375662ccb09bdee5291.cu | #include <stdio.h>
#include <math.h>
#include <fcntl.h>
#include <unistd.h>
// #define LIST_SIZE 1610612736 //6 GB of ints
//#define LIST_SIZE 209715200 //500 MB of ints
// #define LIST_SIZE 1048576 // 1MB of ints
// #define LIST_SIZE (2 * 65536)
// #define LIST_SIZE 65536
// #define LIST_SIZE 49152
// #define LIST_SIZE 49152
#define LIST_SIZE 16384
// #define LIST_SIZE (16384 + 8192)
// #define LIST_SIZE (8192 + 4096 + 2048)
// #define LIST_SIZE (8192 + 4096)
// #define LIST_SIZE (3*(8192 + 4096))
// #define LIST_SIZE 8192
// #define LIST_SIZE 4096
#define BLOCK_SIZE 1024
#define CUDA_CALL(x) {cudaError_t cuda_error__ = (x); if (cuda_error__) printf("CUDA error: " #x " returned \"%s\"\n", cudaGetErrorString(cuda_error__));}
__device__
inline void SWAP(int8_t *_a,int8_t *_b){int8_t __aux; __aux = *_a; *_a = *_b; *_b = __aux;}
void odd_even_bubble_sort_global(int8_t * list, int32_t list_size);
void odd_even_bubble_sort_shared(int8_t * list, int32_t list_size);
int assert_sorted (int8_t * list, int list_size);
int list_to_file(const char *fname, int8_t *buffer, size_t size);
// __global__
// void shared_koronel_64(int32_t * list, int32_t list_size, int8_t even)
// {
// __shared__ int32_t slist[2*BLOCK_SIZE+1];
// int32_t *win = (list + 2*(blockDim.x * blockIdx.x));
// int32_t win_size;
// if (((blockIdx.x+1)*2*blockDim.x) > list_size)
// win_size = list_size % (2*blockDim.x);
// else
// win_size = 2*blockDim.x;
// if (2*threadIdx.x < win_size - 1) {
// if (even) {
// *((int64_t *)slist + threadIdx.x) = *((int64_t *)win + threadIdx.x);
// } else {
// if (threadIdx.x == 0){
// // printf("%d %d\n", even, win);
// slist[1] = win[0];
// } else {
// *((int64_t *)slist + threadIdx.x) = *((int64_t *)(win-1) + threadIdx.x);
// }
// }
// }
// for (int32_t i = 0; i<win_size; i++){
// int32_t pos_oddeven = 2*threadIdx.x + (i&1);
// if (pos_oddeven < win_size - 1)
// if(slist[pos_oddeven]>slist[pos_oddeven+1])
// SWAP(&slist[pos_oddeven], &slist[pos_oddeven+1]);
// __syncthreads();
// }
// if (2*threadIdx.x < win_size - 1) {
// if (even) {
// *((int64_t *)win + threadIdx.x) = *((int64_t *)slist + threadIdx.x);
// } else {
// if (threadIdx.x == 0){
// win[0] = slist[1];
// } else {
// *((int64_t *)(win-1) + threadIdx.x) = *((int64_t *)slist + threadIdx.x);
// }
// }
// }
// }
__global__
void shared_koronel(int8_t * list, int32_t list_size)
{
// if (threadIdx.x == 0 && list_size < 10000) {
// printf("Primer elemento: %d\n", *list);
// }
__shared__ int8_t slist[2*BLOCK_SIZE];
int8_t *win = (list + 2*(blockDim.x * blockIdx.x));
int32_t win_size = 2*blockDim.x - (2*blockDim.x - list_size%(2*blockDim.x))*(((blockIdx.x+1)*2*blockDim.x) > list_size);
if (2*threadIdx.x < win_size - 1) {
slist[2*threadIdx.x] = win[2*threadIdx.x];
slist[2*threadIdx.x + 1] = win[2*threadIdx.x+1];
// slist[threadIdx.x] = win[threadIdx.x];
// slist[threadIdx.x + blockDim.x] = win[threadIdx.x+blockDim.x];
}
for (int32_t i = 0; i<win_size; i++){
int32_t pos_oddeven = 2*threadIdx.x + (i&1);
if (pos_oddeven < win_size - 1)
if(slist[pos_oddeven]>slist[pos_oddeven+1])
SWAP(&slist[pos_oddeven], &slist[pos_oddeven+1]);
__syncthreads();
}
if (2*threadIdx.x < win_size - 1) {
win[2*threadIdx.x] = slist[2*threadIdx.x];
win[2*threadIdx.x+1] = slist[2*threadIdx.x+1];
// win[threadIdx.x] = slist[threadIdx.x];
// win[threadIdx.x + blockDim.x] = slist[threadIdx.x+blockDim.x];
}
}
__global__
void global_koronel(int8_t * list, int32_t list_size)
{
int8_t *win = (list + 2*(blockDim.x * blockIdx.x));
int32_t win_size = 2*blockDim.x - (2*blockDim.x - list_size%(2*blockDim.x))*(((blockIdx.x+1)*2*blockDim.x) > list_size);
for (int32_t i = 0; i<win_size; i++){
int32_t pos_oddeven = 2*threadIdx.x + (i&1);
if (pos_oddeven < win_size - 1)
if(win[pos_oddeven]>win[pos_oddeven+1])
SWAP(&win[pos_oddeven], &win[pos_oddeven+1]);
__syncthreads();
}
}
int main (){
srand(time(NULL));
int8_t * random_numbers_global = (int8_t *) malloc(sizeof(int8_t)*LIST_SIZE);
int8_t * random_numbers_shared = (int8_t *) malloc(sizeof(int8_t)*LIST_SIZE);
// printf("Generando lista aleatoria de %i elementos\n", LIST_SIZE);
for (int i = 0; i<LIST_SIZE; i++){
// random_numbers_global[i] = rand()%20;
random_numbers_global[i] = (int8_t) LIST_SIZE - i;
}
list_to_file("sin_ordenar", random_numbers_global, sizeof(int8_t)*LIST_SIZE);
memcpy(random_numbers_shared, random_numbers_global, sizeof(int8_t)*LIST_SIZE);
int start_print = 0;
// int n_prints = 4096;
int n_prints = LIST_SIZE;
int elem;
// printf("Lista antes de gpu: Elementos desde %i hasta %i \n", start_print, start_print+n_prints);
// for (int i=start_print; i< start_print+n_prints; i++){
// printf("%i ", random_numbers_global[i]);
// }
// printf("\n");
//*************************************
// ODD-EVEN BUBBLE SORT CON GLOBAL MEM
//*************************************
// printf("Odd even bubble sort con memoria global \n");
odd_even_bubble_sort_global(random_numbers_global, LIST_SIZE);
//printf("Despues de gpu (global): Elementos desde %i hasta %i\n", start_print, start_print+n_prints);
//for (int i=start_print; i< start_print+n_prints; i++){
//printf("%i ", random_numbers_global[i]);
//}
//printf("\n");
//printf("Chequeando si la lista con global mem esta ordenada... \n");
//if (elem = assert_sorted(random_numbers_global, LIST_SIZE)) {
// printf("LISTA MAL ORDENADA EN ELEM N %i \n", elem);
// for (int i=((elem-100) > 0)*(elem-100); i < (((elem+100) < LIST_SIZE)*(elem+100) + ((elem+100) >= LIST_SIZE)*LIST_SIZE); i++)
// printf("%i ", random_numbers_global[i]);
// printf("\n");
//} else
// printf("LISTA CON GLOBAL MEM BIEN ORDENADA \n");
//printf("Finalizado sort con memoria global \n");
//*************************************
// ODD-EVEN BUBBLE SORT CON SHARED MEM
//*************************************
// printf("Odd even bubble sort con memoria shared \n");
odd_even_bubble_sort_shared(random_numbers_shared, LIST_SIZE);
// printf("Despues de gpu (shared): Elementos desde %i hasta %i\n", start_print, start_print+n_prints);
// for (int i=start_print; i< start_print+n_prints; i++){
// printf("%i ", random_numbers_shared[i]);
// }
// printf("\n");
// printf("Chequeando si la lista con shared mem esta ordenada... \n");
// if (elem = assert_sorted(random_numbers_shared, LIST_SIZE)) {
// printf("LISTA MAL ORDENADA EN ELEM N %i \n", elem);
// for (int i=((elem-100) > 0)*(elem-100); i < (((elem+100) < LIST_SIZE)*(elem+100) + ((elem+100) >= LIST_SIZE)*LIST_SIZE); i++)
// printf("%i ", random_numbers_shared[i]);
// printf("\n");
// } else
// printf("LISTA CON SHARED MEM BIEN ORDENADA \n");
list_to_file("ordenada", random_numbers_shared, sizeof(int8_t)*LIST_SIZE);
return 0;
}
__host__
void odd_even_bubble_sort_global (int8_t * list, int32_t list_size)
{
int8_t * device_list_ref;
cudaEvent_t start, stop;
CUDA_CALL(cudaEventCreate(&start));
CUDA_CALL(cudaEventCreate(&stop));
dim3 dimGrid ((uint)(LIST_SIZE/(2*BLOCK_SIZE)), 1, 1); //TODO: Usar ceil
// dim3 dimGrid (1, 1, 1); //TODO: Usar ceil
dim3 dimBlock (BLOCK_SIZE, 1, 1);
CUDA_CALL(cudaMalloc((void **) &device_list_ref, list_size*sizeof(int8_t)));
CUDA_CALL(cudaMemcpy(device_list_ref, list, list_size*sizeof(int8_t), cudaMemcpyHostToDevice));
// printf("Llamando al kernel con global memory... \n");
CUDA_CALL(cudaEventRecord(start));
for (int i = 0; i < LIST_SIZE; i++){
// if (i%(LIST_SIZE/10)==0)
// printf("%d/100...\n", 10*i/(LIST_SIZE/10));
global_koronel<<<dimGrid, dimBlock>>>
(
device_list_ref + (i&1),
((LIST_SIZE - (i&1)) >> 1) << 1
);
}
CUDA_CALL(cudaEventRecord(stop));
CUDA_CALL(cudaEventSynchronize(stop));
float milliseconds = 0;
CUDA_CALL(cudaEventElapsedTime(&milliseconds, start, stop));
printf("Tiempo en kernel de global (ms): %f\n", milliseconds/1000);
CUDA_CALL(cudaMemcpy(list, device_list_ref, list_size*sizeof(int8_t), cudaMemcpyDeviceToHost));
CUDA_CALL(cudaFree(device_list_ref));
}
__host__
void odd_even_bubble_sort_shared (int8_t * list, int32_t list_size)
{
int8_t * device_list_ref;
cudaEvent_t start, stop;
CUDA_CALL(cudaEventCreate(&start));
CUDA_CALL(cudaEventCreate(&stop));
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
uint blocks_needed = (uint) ceil((double)LIST_SIZE/(2*BLOCK_SIZE));
// dim3 dimGrid ((uint)(LIST_SIZE/(2*BLOCK_SIZE)), 1, 1); //TODO: Usar ceil
// if (LIST_SIZE/(2*BLOCK_SIZE) > maxnblocks) {
// dim3 dimGrid (maxnblocks, 1, 1); //TODO: Usar ceil
// }else{
// dim3 dimGrid ((uint)(LIST_SIZE/(2*BLOCK_SIZE)), 1, 1); //TODO: Usar ceil
// }
dim3 dimGrid(blocks_needed, 1, 1); //TODO: Usar ceil
dim3 dimBlock (BLOCK_SIZE, 1, 1);
CUDA_CALL(cudaMalloc((void **) &device_list_ref, list_size*sizeof(int8_t)));
CUDA_CALL(cudaMemcpy(device_list_ref, list, list_size*sizeof(int8_t), cudaMemcpyHostToDevice));
// printf("Llamando al kernel con shared memory... \n");
CUDA_CALL(cudaEventRecord(start));
for (int i = 0; i < LIST_SIZE; i++){
// if (i%(LIST_SIZE/10)==0)
// printf("%d/100...\n", 10*i/(LIST_SIZE/10));
shared_koronel<<<dimGrid, dimBlock>>>
(
device_list_ref + (i&1),
((LIST_SIZE - (i&1)) >> 1) << 1
);
}
CUDA_CALL(cudaEventRecord(stop));
CUDA_CALL(cudaEventSynchronize(stop));
float milliseconds = 0;
CUDA_CALL(cudaEventElapsedTime(&milliseconds, start, stop));
printf("Tiempo en kernel de shared (ms): %f\n", milliseconds/1000);
CUDA_CALL(cudaMemcpy(list, device_list_ref, list_size*sizeof(int8_t), cudaMemcpyDeviceToHost));
CUDA_CALL(cudaFree(device_list_ref));
}
__host__
int assert_sorted (int8_t * list, int list_size)
{
for (int i=0; i<list_size-1; i++){
if (list[i] > list[i+1])
return i+1;
}
return 0;
}
int list_to_file(const char *fname, int8_t *buffer, size_t size)
{
int fd = open(fname, O_WRONLY | O_CREAT | O_TRUNC, S_IRWXU);
write(fd, buffer, size);
return 0;
} |
80ee86bb2f8936a6b9ad0fc6d85ec72d9c2fff8e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <cmath>
#include "error_checks.h" // Macros CUDA_CHECK and CHECK_ERROR_MSG
__global__ void vector_add(double *C, const double *A, const double *B, int N)
{
// Add the kernel code
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// Do not try to access past the allocated memory
if (idx < N) {
C[idx] = A[idx] + B[idx];
}
}
int main(void)
{
const int N = 20;
const int ThreadsInBlock = 128;
double *dA, *dB, *dC;
double hA[N], hB[N], hC[N];
for(int i = 0; i < N; ++i) {
hA[i] = (double) i;
hB[i] = (double) i * i;
}
/*
Add memory allocations and copies. Wrap your runtime function
calls with CUDA_CHECK( ) macro
*/
CUDA_CHECK( hipMalloc((void**)&dA, sizeof(double)*N) );
// #error Add the remaining memory allocations and copies
CUDA_CHECK( hipMalloc((void**)&dB, sizeof(double)*N) );
CUDA_CHECK( hipMalloc((void**)&dC, sizeof(double)*N) );
CUDA_CHECK( hipMemcpy((void*)dA, (void*)hA, sizeof(double)*N, hipMemcpyHostToDevice) );
CUDA_CHECK( hipMemcpy((void*)dB, (void*)hB, sizeof(double)*N, hipMemcpyHostToDevice) );
// Note the maximum size of threads in a block
dim3 threads(ThreadsInBlock), grid((N + threads.x - 1) / threads.x);
//// Add the kernel call here
// #error Add the CUDA kernel call
// vector_add(double *C, const double *A, const double *B, int N);
// // dereference host pointer hA, hB.
//hipLaunchKernelGGL(( vector_add) , dim3(grid), dim3(threads), 0, 0, dC, hA, hB, N);
hipLaunchKernelGGL(( vector_add) , dim3(grid), dim3(threads), 0, 0, dC, dA, dB, N);
// Here we add an explicit synchronization so that we catch errors
// as early as possible. Don't do this in production code!
hipDeviceSynchronize();
CHECK_ERROR_MSG("vector_add kernel");
//// Copy back the results and free the device memory
// #error Copy back the results and free the allocated memory
CUDA_CHECK( hipMemcpy((void*)hC, (void*)dC, sizeof(double)*N, hipMemcpyDeviceToHost) );
// // dereference device pointer dC[i]
// for (int i = 0; i < N; i++)
// printf("%5.1f\n", dC[i]);
for (int i = 0; i < N; i++)
printf("%5.1f\n", hC[i]);
return 0;
} | 80ee86bb2f8936a6b9ad0fc6d85ec72d9c2fff8e.cu | #include <cstdio>
#include <cmath>
#include "error_checks.h" // Macros CUDA_CHECK and CHECK_ERROR_MSG
__global__ void vector_add(double *C, const double *A, const double *B, int N)
{
// Add the kernel code
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// Do not try to access past the allocated memory
if (idx < N) {
C[idx] = A[idx] + B[idx];
}
}
int main(void)
{
const int N = 20;
const int ThreadsInBlock = 128;
double *dA, *dB, *dC;
double hA[N], hB[N], hC[N];
for(int i = 0; i < N; ++i) {
hA[i] = (double) i;
hB[i] = (double) i * i;
}
/*
Add memory allocations and copies. Wrap your runtime function
calls with CUDA_CHECK( ) macro
*/
CUDA_CHECK( cudaMalloc((void**)&dA, sizeof(double)*N) );
// #error Add the remaining memory allocations and copies
CUDA_CHECK( cudaMalloc((void**)&dB, sizeof(double)*N) );
CUDA_CHECK( cudaMalloc((void**)&dC, sizeof(double)*N) );
CUDA_CHECK( cudaMemcpy((void*)dA, (void*)hA, sizeof(double)*N, cudaMemcpyHostToDevice) );
CUDA_CHECK( cudaMemcpy((void*)dB, (void*)hB, sizeof(double)*N, cudaMemcpyHostToDevice) );
// Note the maximum size of threads in a block
dim3 threads(ThreadsInBlock), grid((N + threads.x - 1) / threads.x);
//// Add the kernel call here
// #error Add the CUDA kernel call
// vector_add(double *C, const double *A, const double *B, int N);
// // dereference host pointer hA, hB.
// vector_add <<<grid, threads>>> (dC, hA, hB, N);
vector_add <<<grid, threads>>> (dC, dA, dB, N);
// Here we add an explicit synchronization so that we catch errors
// as early as possible. Don't do this in production code!
cudaDeviceSynchronize();
CHECK_ERROR_MSG("vector_add kernel");
//// Copy back the results and free the device memory
// #error Copy back the results and free the allocated memory
CUDA_CHECK( cudaMemcpy((void*)hC, (void*)dC, sizeof(double)*N, cudaMemcpyDeviceToHost) );
// // dereference device pointer dC[i]
// for (int i = 0; i < N; i++)
// printf("%5.1f\n", dC[i]);
for (int i = 0; i < N; i++)
printf("%5.1f\n", hC[i]);
return 0;
} |
727ceefc197727d9315ed02fb7af29a277d51cc8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
******************* BEGIN Caffe Copyright Notice and Disclaimer ****************
*
* COPYRIGHT
*
* All contributions by the University of California:
* Copyright (c) 2014-2017 The Regents of the University of California (Regents)
* All rights reserved.
*
* All other contributions:
* Copyright (c) 2014-2017, the respective contributors
* All rights reserved.
*
* Caffe uses a shared copyright model: each contributor holds copyright over
* their contributions to Caffe. The project versioning records all such
* contribution and copyright details. If a contributor wants to further mark
* their specific copyright on a particular contribution, they should indicate
* their copyright solely in the commit message of the change when it is
* committed.
*
* LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* CONTRIBUTION AGREEMENT
*
* By contributing to the BVLC/caffe repository through pull-request, comment,
* or otherwise, the contributor releases their content to the
* license and copyright terms herein.
*
***************** END Caffe Copyright Notice and Disclaimer ********************
*
* Copyright (c) 2018 Microsoft
* Licensed under The MIT License [see LICENSE for details]
* \file modulated_deformable_im2col.cuh
* \brief Function definitions of converting an image to
* column matrix based on kernel, padding, dilation, and offset.
* These functions are mainly used in deformable convolution operators.
* \ref: https://arxiv.org/abs/1703.06211
* \author Yuwen Xiong, Haozhi Qi, Jifeng Dai, Xizhou Zhu, Han Hu, Dazhi Cheng
*/
// modify from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu
#include <ATen/ATen.h>
#include <THH/THHAtomics.cuh>
#include <stdio.h>
#include <math.h>
#include <float.h>
using namespace at;
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
const int CUDA_NUM_THREADS = 1024;
const int kMaxGridNum = 65535;
inline int GET_BLOCKS(const int N)
{
return ::min(kMaxGridNum, (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS);
}
template <typename scalar_t>
__device__ scalar_t deformable_im2col_bilinear(const scalar_t *bottom_data, const int data_width,
const int height, const int width, scalar_t h, scalar_t w)
{
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
scalar_t lh = h - h_low;
scalar_t lw = w - w_low;
scalar_t hh = 1 - lh, hw = 1 - lw;
scalar_t v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = bottom_data[h_low * data_width + w_low];
scalar_t v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
scalar_t v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
scalar_t v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename scalar_t>
__device__ scalar_t get_gradient_weight(scalar_t argmax_h, scalar_t argmax_w,
const int h, const int w, const int height, const int width)
{
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
{
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
scalar_t weight = 0;
if (h == argmax_h_low && w == argmax_w_low)
weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
if (h == argmax_h_low && w == argmax_w_high)
weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
if (h == argmax_h_high && w == argmax_w_low)
weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
if (h == argmax_h_high && w == argmax_w_high)
weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
return weight;
}
template <typename scalar_t>
__device__ scalar_t get_coordinate_weight(scalar_t argmax_h, scalar_t argmax_w,
const int height, const int width, const scalar_t *im_data,
const int data_width, const int bp_dir)
{
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
{
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
scalar_t weight = 0;
if (bp_dir == 0)
{
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
else if (bp_dir == 1)
{
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
return weight;
}
template <typename scalar_t>
__global__ void deformable_im2col_gpu_kernel(const int n, const scalar_t *data_im, const scalar_t *data_offset,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, const int channel_per_deformable_group,
const int batch_size, const int num_channels, const int deformable_group,
const int height_col, const int width_col,
scalar_t *data_col)
{
CUDA_KERNEL_LOOP(index, n)
{
// index index of output matrix
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int b_col = (index / width_col / height_col) % batch_size;
const int c_im = (index / width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
//const scalar_t* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in;
const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width;
const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i)
{
for (int j = 0; j < kernel_w; ++j)
{
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
scalar_t val = static_cast<scalar_t>(0);
const scalar_t h_im = h_in + i * dilation_h + offset_h;
const scalar_t w_im = w_in + j * dilation_w + offset_w;
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width)
{
//const scalar_t map_h = i * dilation_h + offset_h;
//const scalar_t map_w = j * dilation_w + offset_w;
//const int cur_height = height - h_in;
//const int cur_width = width - w_in;
//val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w);
val = deformable_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im);
}
*data_col_ptr = val;
data_col_ptr += batch_size * height_col * width_col;
}
}
}
}
void deformable_im2col(
const at::Tensor data_im, const at::Tensor data_offset, const int channels,
const int height, const int width, const int ksize_h, const int ksize_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, const int parallel_imgs,
const int deformable_group, at::Tensor data_col)
{
// num_axes should be smaller than block size
// todo: check parallel_imgs is correctly passed in
int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * height_col * width_col * parallel_imgs;
int channel_per_deformable_group = channels / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_im.type(), "deformable_im2col_gpu", ([&] {
const scalar_t *data_im_ = data_im.data<scalar_t>();
const scalar_t *data_offset_ = data_offset.data<scalar_t>();
scalar_t *data_col_ = data_col.data<scalar_t>();
hipLaunchKernelGGL(( deformable_im2col_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im_, data_offset_, height, width, ksize_h, ksize_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
channel_per_deformable_group, parallel_imgs, channels, deformable_group,
height_col, width_col, data_col_);
}));
hipError_t err = hipGetLastError();
if (err != hipSuccess)
{
printf("error in deformable_im2col: %s\n", hipGetErrorString(err));
}
}
template <typename scalar_t>
__global__ void deformable_col2im_gpu_kernel(
const int n, const scalar_t *data_col, const scalar_t *data_offset,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int deformable_group,
const int height_col, const int width_col,
scalar_t *grad_im)
{
CUDA_KERNEL_LOOP(index, n)
{
const int j = (index / width_col / height_col / batch_size) % kernel_w;
const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h;
const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h;
// compute the start and end of the output
const int deformable_group_index = c / channel_per_deformable_group;
int w_out = index % width_col;
int h_out = (index / width_col) % height_col;
int b = (index / width_col / height_col) % batch_size;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) *
2 * kernel_h * kernel_w * height_col * width_col;
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h;
const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w;
const scalar_t cur_top_grad = data_col[index];
const int cur_h = (int)cur_inv_h_data;
const int cur_w = (int)cur_inv_w_data;
for (int dy = -2; dy <= 2; dy++)
{
for (int dx = -2; dx <= 2; dx++)
{
if (cur_h + dy >= 0 && cur_h + dy < height &&
cur_w + dx >= 0 && cur_w + dx < width &&
abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
abs(cur_inv_w_data - (cur_w + dx)) < 1)
{
int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx;
scalar_t weight = get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width);
atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad);
}
}
}
}
}
void deformable_col2im(
const at::Tensor data_col, const at::Tensor data_offset, const int channels,
const int height, const int width, const int ksize_h,
const int ksize_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int parallel_imgs, const int deformable_group,
at::Tensor grad_im)
{
// todo: make sure parallel_imgs is passed in correctly
int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * ksize_h * ksize_w * height_col * width_col * parallel_imgs;
int channel_per_deformable_group = channels / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.type(), "deformable_col2im_gpu", ([&] {
const scalar_t *data_col_ = data_col.data<scalar_t>();
const scalar_t *data_offset_ = data_offset.data<scalar_t>();
scalar_t *grad_im_ = grad_im.data<scalar_t>();
hipLaunchKernelGGL(( deformable_col2im_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0,
num_kernels, data_col_, data_offset_, channels, height, width, ksize_h,
ksize_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
parallel_imgs, deformable_group, height_col, width_col, grad_im_);
}));
hipError_t err = hipGetLastError();
if (err != hipSuccess)
{
printf("error in deformable_col2im: %s\n", hipGetErrorString(err));
}
}
template <typename scalar_t>
__global__ void deformable_col2im_coord_gpu_kernel(const int n, const scalar_t *data_col,
const scalar_t *data_im, const scalar_t *data_offset,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int offset_channels, const int deformable_group,
const int height_col, const int width_col, scalar_t *grad_offset)
{
CUDA_KERNEL_LOOP(index, n)
{
scalar_t val = 0;
int w = index % width_col;
int h = (index / width_col) % height_col;
int c = (index / width_col / height_col) % offset_channels;
int b = (index / width_col / height_col) / offset_channels;
// compute the start and end of the output
const int deformable_group_index = c / (2 * kernel_h * kernel_w);
const int col_step = kernel_h * kernel_w;
int cnt = 0;
const scalar_t *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group *
batch_size * width_col * height_col;
const scalar_t *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) *
channel_per_deformable_group / kernel_h / kernel_w * height * width;
const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 *
kernel_h * kernel_w * height_col * width_col;
const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step)
{
const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w;
const int bp_dir = offset_c % 2;
int j = (col_pos / width_col / height_col / batch_size) % kernel_w;
int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h;
int w_out = col_pos % width_col;
int h_out = (col_pos / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out);
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
scalar_t inv_h = h_in + i * dilation_h + offset_h;
scalar_t inv_w = w_in + j * dilation_w + offset_w;
if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width)
{
inv_h = inv_w = -2;
}
const scalar_t weight = get_coordinate_weight(
inv_h, inv_w,
height, width, data_im_ptr + cnt * height * width, width, bp_dir);
val += weight * data_col_ptr[col_pos];
cnt += 1;
}
grad_offset[index] = val;
}
}
void deformable_col2im_coord(
const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset,
const int channels, const int height, const int width, const int ksize_h,
const int ksize_w, const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
const int parallel_imgs, const int deformable_group, at::Tensor grad_offset)
{
int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = height_col * width_col * 2 * ksize_h * ksize_w * deformable_group * parallel_imgs;
int channel_per_deformable_group = channels * ksize_h * ksize_w / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.type(), "deformable_col2im_coord_gpu", ([&] {
const scalar_t *data_col_ = data_col.data<scalar_t>();
const scalar_t *data_im_ = data_im.data<scalar_t>();
const scalar_t *data_offset_ = data_offset.data<scalar_t>();
scalar_t *grad_offset_ = grad_offset.data<scalar_t>();
hipLaunchKernelGGL(( deformable_col2im_coord_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0,
num_kernels, data_col_, data_im_, data_offset_, channels, height, width,
ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
parallel_imgs, 2 * ksize_h * ksize_w * deformable_group, deformable_group,
height_col, width_col, grad_offset_);
}));
}
template <typename scalar_t>
__device__ scalar_t dmcn_im2col_bilinear(const scalar_t *bottom_data, const int data_width,
const int height, const int width, scalar_t h, scalar_t w)
{
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
scalar_t lh = h - h_low;
scalar_t lw = w - w_low;
scalar_t hh = 1 - lh, hw = 1 - lw;
scalar_t v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = bottom_data[h_low * data_width + w_low];
scalar_t v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
scalar_t v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
scalar_t v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename scalar_t>
__device__ scalar_t dmcn_get_gradient_weight(scalar_t argmax_h, scalar_t argmax_w,
const int h, const int w, const int height, const int width)
{
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
{
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
scalar_t weight = 0;
if (h == argmax_h_low && w == argmax_w_low)
weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
if (h == argmax_h_low && w == argmax_w_high)
weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
if (h == argmax_h_high && w == argmax_w_low)
weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
if (h == argmax_h_high && w == argmax_w_high)
weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
return weight;
}
template <typename scalar_t>
__device__ scalar_t dmcn_get_coordinate_weight(scalar_t argmax_h, scalar_t argmax_w,
const int height, const int width, const scalar_t *im_data,
const int data_width, const int bp_dir)
{
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
{
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
scalar_t weight = 0;
if (bp_dir == 0)
{
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
else if (bp_dir == 1)
{
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
return weight;
}
template <typename scalar_t>
__global__ void modulated_deformable_im2col_gpu_kernel(const int n,
const scalar_t *data_im, const scalar_t *data_offset, const scalar_t *data_mask,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int num_channels, const int deformable_group,
const int height_col, const int width_col,
scalar_t *data_col)
{
CUDA_KERNEL_LOOP(index, n)
{
// index index of output matrix
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int b_col = (index / width_col / height_col) % batch_size;
const int c_im = (index / width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
//const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in;
const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width;
const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const scalar_t *data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i)
{
for (int j = 0; j < kernel_w; ++j)
{
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t mask = data_mask_ptr[data_mask_hw_ptr];
scalar_t val = static_cast<scalar_t>(0);
const scalar_t h_im = h_in + i * dilation_h + offset_h;
const scalar_t w_im = w_in + j * dilation_w + offset_w;
//if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) {
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width)
{
//const float map_h = i * dilation_h + offset_h;
//const float map_w = j * dilation_w + offset_w;
//const int cur_height = height - h_in;
//const int cur_width = width - w_in;
//val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w);
val = dmcn_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im);
}
*data_col_ptr = val * mask;
data_col_ptr += batch_size * height_col * width_col;
//data_col_ptr += height_col * width_col;
}
}
}
}
template <typename scalar_t>
__global__ void modulated_deformable_col2im_gpu_kernel(const int n,
const scalar_t *data_col, const scalar_t *data_offset, const scalar_t *data_mask,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int deformable_group,
const int height_col, const int width_col,
scalar_t *grad_im)
{
CUDA_KERNEL_LOOP(index, n)
{
const int j = (index / width_col / height_col / batch_size) % kernel_w;
const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h;
const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h;
// compute the start and end of the output
const int deformable_group_index = c / channel_per_deformable_group;
int w_out = index % width_col;
int h_out = (index / width_col) % height_col;
int b = (index / width_col / height_col) % batch_size;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const scalar_t *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_out) * width_col + w_out;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t mask = data_mask_ptr[data_mask_hw_ptr];
const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h;
const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w;
const scalar_t cur_top_grad = data_col[index] * mask;
const int cur_h = (int)cur_inv_h_data;
const int cur_w = (int)cur_inv_w_data;
for (int dy = -2; dy <= 2; dy++)
{
for (int dx = -2; dx <= 2; dx++)
{
if (cur_h + dy >= 0 && cur_h + dy < height &&
cur_w + dx >= 0 && cur_w + dx < width &&
abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
abs(cur_inv_w_data - (cur_w + dx)) < 1)
{
int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx;
scalar_t weight = dmcn_get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width);
atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad);
}
}
}
}
}
template <typename scalar_t>
__global__ void modulated_deformable_col2im_coord_gpu_kernel(const int n,
const scalar_t *data_col, const scalar_t *data_im,
const scalar_t *data_offset, const scalar_t *data_mask,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int offset_channels, const int deformable_group,
const int height_col, const int width_col,
scalar_t *grad_offset, scalar_t *grad_mask)
{
CUDA_KERNEL_LOOP(index, n)
{
scalar_t val = 0, mval = 0;
int w = index % width_col;
int h = (index / width_col) % height_col;
int c = (index / width_col / height_col) % offset_channels;
int b = (index / width_col / height_col) / offset_channels;
// compute the start and end of the output
const int deformable_group_index = c / (2 * kernel_h * kernel_w);
const int col_step = kernel_h * kernel_w;
int cnt = 0;
const scalar_t *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col;
const scalar_t *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width;
const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const scalar_t *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step)
{
const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w;
const int bp_dir = offset_c % 2;
int j = (col_pos / width_col / height_col / batch_size) % kernel_w;
int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h;
int w_out = col_pos % width_col;
int h_out = (col_pos / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out);
const int data_mask_hw_ptr = (((i * kernel_w + j) * height_col + h_out) * width_col + w_out);
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t mask = data_mask_ptr[data_mask_hw_ptr];
scalar_t inv_h = h_in + i * dilation_h + offset_h;
scalar_t inv_w = w_in + j * dilation_w + offset_w;
if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width)
{
inv_h = inv_w = -2;
}
else
{
mval += data_col_ptr[col_pos] * dmcn_im2col_bilinear(data_im_ptr + cnt * height * width, width, height, width, inv_h, inv_w);
}
const scalar_t weight = dmcn_get_coordinate_weight(
inv_h, inv_w,
height, width, data_im_ptr + cnt * height * width, width, bp_dir);
val += weight * data_col_ptr[col_pos] * mask;
cnt += 1;
}
// KERNEL_ASSIGN(grad_offset[index], offset_req, val);
grad_offset[index] = val;
if (offset_c % 2 == 0)
// KERNEL_ASSIGN(grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w], mask_req, mval);
grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w] = mval;
}
}
void modulated_deformable_im2col_cuda(
const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kenerl_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group, at::Tensor data_col)
{
// num_axes should be smaller than block size
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * batch_size * height_col * width_col;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_im.type(), "modulated_deformable_im2col_gpu", ([&] {
const scalar_t *data_im_ = data_im.data<scalar_t>();
const scalar_t *data_offset_ = data_offset.data<scalar_t>();
const scalar_t *data_mask_ = data_mask.data<scalar_t>();
scalar_t *data_col_ = data_col.data<scalar_t>();
hipLaunchKernelGGL(( modulated_deformable_im2col_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im_, data_offset_, data_mask_, height_im, width_im, kernel_h, kenerl_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group,
batch_size, channels, deformable_group, height_col, width_col, data_col_);
}));
hipError_t err = hipGetLastError();
if (err != hipSuccess)
{
printf("error in modulated_deformable_im2col_cuda: %s\n", hipGetErrorString(err));
}
}
void modulated_deformable_col2im_cuda(
const at::Tensor data_col, const at::Tensor data_offset, const at::Tensor data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group, at::Tensor grad_im)
{
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * kernel_h * kernel_w * batch_size * height_col * width_col;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.type(), "modulated_deformable_col2im_gpu", ([&] {
const scalar_t *data_col_ = data_col.data<scalar_t>();
const scalar_t *data_offset_ = data_offset.data<scalar_t>();
const scalar_t *data_mask_ = data_mask.data<scalar_t>();
scalar_t *grad_im_ = grad_im.data<scalar_t>();
hipLaunchKernelGGL(( modulated_deformable_col2im_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0,
num_kernels, data_col_, data_offset_, data_mask_, channels, height_im, width_im,
kernel_h, kernel_w, pad_h, pad_h, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
batch_size, deformable_group, height_col, width_col, grad_im_);
}));
hipError_t err = hipGetLastError();
if (err != hipSuccess)
{
printf("error in modulated_deformable_col2im_cuda: %s\n", hipGetErrorString(err));
}
}
void modulated_deformable_col2im_coord_cuda(
const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group,
at::Tensor grad_offset, at::Tensor grad_mask)
{
const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h * kernel_w * deformable_group;
const int channel_per_deformable_group = channels * kernel_h * kernel_w / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.type(), "modulated_deformable_col2im_coord_gpu", ([&] {
const scalar_t *data_col_ = data_col.data<scalar_t>();
const scalar_t *data_im_ = data_im.data<scalar_t>();
const scalar_t *data_offset_ = data_offset.data<scalar_t>();
const scalar_t *data_mask_ = data_mask.data<scalar_t>();
scalar_t *grad_offset_ = grad_offset.data<scalar_t>();
scalar_t *grad_mask_ = grad_mask.data<scalar_t>();
hipLaunchKernelGGL(( modulated_deformable_col2im_coord_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0,
num_kernels, data_col_, data_im_, data_offset_, data_mask_, channels, height_im, width_im,
kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
batch_size, 2 * kernel_h * kernel_w * deformable_group, deformable_group, height_col, width_col,
grad_offset_, grad_mask_);
}));
hipError_t err = hipGetLastError();
if (err != hipSuccess)
{
printf("error in modulated_deformable_col2im_coord_cuda: %s\n", hipGetErrorString(err));
}
}
| 727ceefc197727d9315ed02fb7af29a277d51cc8.cu | /*!
******************* BEGIN Caffe Copyright Notice and Disclaimer ****************
*
* COPYRIGHT
*
* All contributions by the University of California:
* Copyright (c) 2014-2017 The Regents of the University of California (Regents)
* All rights reserved.
*
* All other contributions:
* Copyright (c) 2014-2017, the respective contributors
* All rights reserved.
*
* Caffe uses a shared copyright model: each contributor holds copyright over
* their contributions to Caffe. The project versioning records all such
* contribution and copyright details. If a contributor wants to further mark
* their specific copyright on a particular contribution, they should indicate
* their copyright solely in the commit message of the change when it is
* committed.
*
* LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* CONTRIBUTION AGREEMENT
*
* By contributing to the BVLC/caffe repository through pull-request, comment,
* or otherwise, the contributor releases their content to the
* license and copyright terms herein.
*
***************** END Caffe Copyright Notice and Disclaimer ********************
*
* Copyright (c) 2018 Microsoft
* Licensed under The MIT License [see LICENSE for details]
* \file modulated_deformable_im2col.cuh
* \brief Function definitions of converting an image to
* column matrix based on kernel, padding, dilation, and offset.
* These functions are mainly used in deformable convolution operators.
* \ref: https://arxiv.org/abs/1703.06211
* \author Yuwen Xiong, Haozhi Qi, Jifeng Dai, Xizhou Zhu, Han Hu, Dazhi Cheng
*/
// modify from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu
#include <ATen/ATen.h>
#include <THC/THCAtomics.cuh>
#include <stdio.h>
#include <math.h>
#include <float.h>
using namespace at;
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
const int CUDA_NUM_THREADS = 1024;
const int kMaxGridNum = 65535;
inline int GET_BLOCKS(const int N)
{
return std::min(kMaxGridNum, (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS);
}
template <typename scalar_t>
__device__ scalar_t deformable_im2col_bilinear(const scalar_t *bottom_data, const int data_width,
const int height, const int width, scalar_t h, scalar_t w)
{
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
scalar_t lh = h - h_low;
scalar_t lw = w - w_low;
scalar_t hh = 1 - lh, hw = 1 - lw;
scalar_t v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = bottom_data[h_low * data_width + w_low];
scalar_t v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
scalar_t v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
scalar_t v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename scalar_t>
__device__ scalar_t get_gradient_weight(scalar_t argmax_h, scalar_t argmax_w,
const int h, const int w, const int height, const int width)
{
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
{
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
scalar_t weight = 0;
if (h == argmax_h_low && w == argmax_w_low)
weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
if (h == argmax_h_low && w == argmax_w_high)
weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
if (h == argmax_h_high && w == argmax_w_low)
weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
if (h == argmax_h_high && w == argmax_w_high)
weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
return weight;
}
template <typename scalar_t>
__device__ scalar_t get_coordinate_weight(scalar_t argmax_h, scalar_t argmax_w,
const int height, const int width, const scalar_t *im_data,
const int data_width, const int bp_dir)
{
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
{
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
scalar_t weight = 0;
if (bp_dir == 0)
{
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
else if (bp_dir == 1)
{
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
return weight;
}
template <typename scalar_t>
__global__ void deformable_im2col_gpu_kernel(const int n, const scalar_t *data_im, const scalar_t *data_offset,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, const int channel_per_deformable_group,
const int batch_size, const int num_channels, const int deformable_group,
const int height_col, const int width_col,
scalar_t *data_col)
{
CUDA_KERNEL_LOOP(index, n)
{
// index index of output matrix
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int b_col = (index / width_col / height_col) % batch_size;
const int c_im = (index / width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
//const scalar_t* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in;
const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width;
const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i)
{
for (int j = 0; j < kernel_w; ++j)
{
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
scalar_t val = static_cast<scalar_t>(0);
const scalar_t h_im = h_in + i * dilation_h + offset_h;
const scalar_t w_im = w_in + j * dilation_w + offset_w;
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width)
{
//const scalar_t map_h = i * dilation_h + offset_h;
//const scalar_t map_w = j * dilation_w + offset_w;
//const int cur_height = height - h_in;
//const int cur_width = width - w_in;
//val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w);
val = deformable_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im);
}
*data_col_ptr = val;
data_col_ptr += batch_size * height_col * width_col;
}
}
}
}
void deformable_im2col(
const at::Tensor data_im, const at::Tensor data_offset, const int channels,
const int height, const int width, const int ksize_h, const int ksize_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, const int parallel_imgs,
const int deformable_group, at::Tensor data_col)
{
// num_axes should be smaller than block size
// todo: check parallel_imgs is correctly passed in
int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * height_col * width_col * parallel_imgs;
int channel_per_deformable_group = channels / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_im.type(), "deformable_im2col_gpu", ([&] {
const scalar_t *data_im_ = data_im.data<scalar_t>();
const scalar_t *data_offset_ = data_offset.data<scalar_t>();
scalar_t *data_col_ = data_col.data<scalar_t>();
deformable_im2col_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels, data_im_, data_offset_, height, width, ksize_h, ksize_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
channel_per_deformable_group, parallel_imgs, channels, deformable_group,
height_col, width_col, data_col_);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in deformable_im2col: %s\n", cudaGetErrorString(err));
}
}
template <typename scalar_t>
__global__ void deformable_col2im_gpu_kernel(
const int n, const scalar_t *data_col, const scalar_t *data_offset,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int deformable_group,
const int height_col, const int width_col,
scalar_t *grad_im)
{
CUDA_KERNEL_LOOP(index, n)
{
const int j = (index / width_col / height_col / batch_size) % kernel_w;
const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h;
const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h;
// compute the start and end of the output
const int deformable_group_index = c / channel_per_deformable_group;
int w_out = index % width_col;
int h_out = (index / width_col) % height_col;
int b = (index / width_col / height_col) % batch_size;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) *
2 * kernel_h * kernel_w * height_col * width_col;
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h;
const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w;
const scalar_t cur_top_grad = data_col[index];
const int cur_h = (int)cur_inv_h_data;
const int cur_w = (int)cur_inv_w_data;
for (int dy = -2; dy <= 2; dy++)
{
for (int dx = -2; dx <= 2; dx++)
{
if (cur_h + dy >= 0 && cur_h + dy < height &&
cur_w + dx >= 0 && cur_w + dx < width &&
abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
abs(cur_inv_w_data - (cur_w + dx)) < 1)
{
int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx;
scalar_t weight = get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width);
atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad);
}
}
}
}
}
void deformable_col2im(
const at::Tensor data_col, const at::Tensor data_offset, const int channels,
const int height, const int width, const int ksize_h,
const int ksize_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int parallel_imgs, const int deformable_group,
at::Tensor grad_im)
{
// todo: make sure parallel_imgs is passed in correctly
int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * ksize_h * ksize_w * height_col * width_col * parallel_imgs;
int channel_per_deformable_group = channels / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.type(), "deformable_col2im_gpu", ([&] {
const scalar_t *data_col_ = data_col.data<scalar_t>();
const scalar_t *data_offset_ = data_offset.data<scalar_t>();
scalar_t *grad_im_ = grad_im.data<scalar_t>();
deformable_col2im_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels, data_col_, data_offset_, channels, height, width, ksize_h,
ksize_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
parallel_imgs, deformable_group, height_col, width_col, grad_im_);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in deformable_col2im: %s\n", cudaGetErrorString(err));
}
}
template <typename scalar_t>
__global__ void deformable_col2im_coord_gpu_kernel(const int n, const scalar_t *data_col,
const scalar_t *data_im, const scalar_t *data_offset,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int offset_channels, const int deformable_group,
const int height_col, const int width_col, scalar_t *grad_offset)
{
CUDA_KERNEL_LOOP(index, n)
{
scalar_t val = 0;
int w = index % width_col;
int h = (index / width_col) % height_col;
int c = (index / width_col / height_col) % offset_channels;
int b = (index / width_col / height_col) / offset_channels;
// compute the start and end of the output
const int deformable_group_index = c / (2 * kernel_h * kernel_w);
const int col_step = kernel_h * kernel_w;
int cnt = 0;
const scalar_t *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group *
batch_size * width_col * height_col;
const scalar_t *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) *
channel_per_deformable_group / kernel_h / kernel_w * height * width;
const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 *
kernel_h * kernel_w * height_col * width_col;
const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step)
{
const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w;
const int bp_dir = offset_c % 2;
int j = (col_pos / width_col / height_col / batch_size) % kernel_w;
int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h;
int w_out = col_pos % width_col;
int h_out = (col_pos / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out);
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
scalar_t inv_h = h_in + i * dilation_h + offset_h;
scalar_t inv_w = w_in + j * dilation_w + offset_w;
if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width)
{
inv_h = inv_w = -2;
}
const scalar_t weight = get_coordinate_weight(
inv_h, inv_w,
height, width, data_im_ptr + cnt * height * width, width, bp_dir);
val += weight * data_col_ptr[col_pos];
cnt += 1;
}
grad_offset[index] = val;
}
}
void deformable_col2im_coord(
const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset,
const int channels, const int height, const int width, const int ksize_h,
const int ksize_w, const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
const int parallel_imgs, const int deformable_group, at::Tensor grad_offset)
{
int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = height_col * width_col * 2 * ksize_h * ksize_w * deformable_group * parallel_imgs;
int channel_per_deformable_group = channels * ksize_h * ksize_w / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.type(), "deformable_col2im_coord_gpu", ([&] {
const scalar_t *data_col_ = data_col.data<scalar_t>();
const scalar_t *data_im_ = data_im.data<scalar_t>();
const scalar_t *data_offset_ = data_offset.data<scalar_t>();
scalar_t *grad_offset_ = grad_offset.data<scalar_t>();
deformable_col2im_coord_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels, data_col_, data_im_, data_offset_, channels, height, width,
ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
parallel_imgs, 2 * ksize_h * ksize_w * deformable_group, deformable_group,
height_col, width_col, grad_offset_);
}));
}
template <typename scalar_t>
__device__ scalar_t dmcn_im2col_bilinear(const scalar_t *bottom_data, const int data_width,
const int height, const int width, scalar_t h, scalar_t w)
{
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
scalar_t lh = h - h_low;
scalar_t lw = w - w_low;
scalar_t hh = 1 - lh, hw = 1 - lw;
scalar_t v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = bottom_data[h_low * data_width + w_low];
scalar_t v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
scalar_t v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
scalar_t v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename scalar_t>
__device__ scalar_t dmcn_get_gradient_weight(scalar_t argmax_h, scalar_t argmax_w,
const int h, const int w, const int height, const int width)
{
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
{
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
scalar_t weight = 0;
if (h == argmax_h_low && w == argmax_w_low)
weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
if (h == argmax_h_low && w == argmax_w_high)
weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
if (h == argmax_h_high && w == argmax_w_low)
weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
if (h == argmax_h_high && w == argmax_w_high)
weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
return weight;
}
template <typename scalar_t>
__device__ scalar_t dmcn_get_coordinate_weight(scalar_t argmax_h, scalar_t argmax_w,
const int height, const int width, const scalar_t *im_data,
const int data_width, const int bp_dir)
{
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
{
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
scalar_t weight = 0;
if (bp_dir == 0)
{
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
else if (bp_dir == 1)
{
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
return weight;
}
template <typename scalar_t>
__global__ void modulated_deformable_im2col_gpu_kernel(const int n,
const scalar_t *data_im, const scalar_t *data_offset, const scalar_t *data_mask,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int num_channels, const int deformable_group,
const int height_col, const int width_col,
scalar_t *data_col)
{
CUDA_KERNEL_LOOP(index, n)
{
// index index of output matrix
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int b_col = (index / width_col / height_col) % batch_size;
const int c_im = (index / width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
//const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in;
const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width;
const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const scalar_t *data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i)
{
for (int j = 0; j < kernel_w; ++j)
{
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t mask = data_mask_ptr[data_mask_hw_ptr];
scalar_t val = static_cast<scalar_t>(0);
const scalar_t h_im = h_in + i * dilation_h + offset_h;
const scalar_t w_im = w_in + j * dilation_w + offset_w;
//if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) {
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width)
{
//const float map_h = i * dilation_h + offset_h;
//const float map_w = j * dilation_w + offset_w;
//const int cur_height = height - h_in;
//const int cur_width = width - w_in;
//val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w);
val = dmcn_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im);
}
*data_col_ptr = val * mask;
data_col_ptr += batch_size * height_col * width_col;
//data_col_ptr += height_col * width_col;
}
}
}
}
template <typename scalar_t>
__global__ void modulated_deformable_col2im_gpu_kernel(const int n,
const scalar_t *data_col, const scalar_t *data_offset, const scalar_t *data_mask,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int deformable_group,
const int height_col, const int width_col,
scalar_t *grad_im)
{
CUDA_KERNEL_LOOP(index, n)
{
const int j = (index / width_col / height_col / batch_size) % kernel_w;
const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h;
const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h;
// compute the start and end of the output
const int deformable_group_index = c / channel_per_deformable_group;
int w_out = index % width_col;
int h_out = (index / width_col) % height_col;
int b = (index / width_col / height_col) % batch_size;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const scalar_t *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_out) * width_col + w_out;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t mask = data_mask_ptr[data_mask_hw_ptr];
const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h;
const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w;
const scalar_t cur_top_grad = data_col[index] * mask;
const int cur_h = (int)cur_inv_h_data;
const int cur_w = (int)cur_inv_w_data;
for (int dy = -2; dy <= 2; dy++)
{
for (int dx = -2; dx <= 2; dx++)
{
if (cur_h + dy >= 0 && cur_h + dy < height &&
cur_w + dx >= 0 && cur_w + dx < width &&
abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
abs(cur_inv_w_data - (cur_w + dx)) < 1)
{
int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx;
scalar_t weight = dmcn_get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width);
atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad);
}
}
}
}
}
template <typename scalar_t>
__global__ void modulated_deformable_col2im_coord_gpu_kernel(const int n,
const scalar_t *data_col, const scalar_t *data_im,
const scalar_t *data_offset, const scalar_t *data_mask,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int offset_channels, const int deformable_group,
const int height_col, const int width_col,
scalar_t *grad_offset, scalar_t *grad_mask)
{
CUDA_KERNEL_LOOP(index, n)
{
scalar_t val = 0, mval = 0;
int w = index % width_col;
int h = (index / width_col) % height_col;
int c = (index / width_col / height_col) % offset_channels;
int b = (index / width_col / height_col) / offset_channels;
// compute the start and end of the output
const int deformable_group_index = c / (2 * kernel_h * kernel_w);
const int col_step = kernel_h * kernel_w;
int cnt = 0;
const scalar_t *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col;
const scalar_t *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width;
const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const scalar_t *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step)
{
const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w;
const int bp_dir = offset_c % 2;
int j = (col_pos / width_col / height_col / batch_size) % kernel_w;
int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h;
int w_out = col_pos % width_col;
int h_out = (col_pos / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out);
const int data_mask_hw_ptr = (((i * kernel_w + j) * height_col + h_out) * width_col + w_out);
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t mask = data_mask_ptr[data_mask_hw_ptr];
scalar_t inv_h = h_in + i * dilation_h + offset_h;
scalar_t inv_w = w_in + j * dilation_w + offset_w;
if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width)
{
inv_h = inv_w = -2;
}
else
{
mval += data_col_ptr[col_pos] * dmcn_im2col_bilinear(data_im_ptr + cnt * height * width, width, height, width, inv_h, inv_w);
}
const scalar_t weight = dmcn_get_coordinate_weight(
inv_h, inv_w,
height, width, data_im_ptr + cnt * height * width, width, bp_dir);
val += weight * data_col_ptr[col_pos] * mask;
cnt += 1;
}
// KERNEL_ASSIGN(grad_offset[index], offset_req, val);
grad_offset[index] = val;
if (offset_c % 2 == 0)
// KERNEL_ASSIGN(grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w], mask_req, mval);
grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w] = mval;
}
}
void modulated_deformable_im2col_cuda(
const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kenerl_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group, at::Tensor data_col)
{
// num_axes should be smaller than block size
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * batch_size * height_col * width_col;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_im.type(), "modulated_deformable_im2col_gpu", ([&] {
const scalar_t *data_im_ = data_im.data<scalar_t>();
const scalar_t *data_offset_ = data_offset.data<scalar_t>();
const scalar_t *data_mask_ = data_mask.data<scalar_t>();
scalar_t *data_col_ = data_col.data<scalar_t>();
modulated_deformable_im2col_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels, data_im_, data_offset_, data_mask_, height_im, width_im, kernel_h, kenerl_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group,
batch_size, channels, deformable_group, height_col, width_col, data_col_);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in modulated_deformable_im2col_cuda: %s\n", cudaGetErrorString(err));
}
}
void modulated_deformable_col2im_cuda(
const at::Tensor data_col, const at::Tensor data_offset, const at::Tensor data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group, at::Tensor grad_im)
{
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * kernel_h * kernel_w * batch_size * height_col * width_col;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.type(), "modulated_deformable_col2im_gpu", ([&] {
const scalar_t *data_col_ = data_col.data<scalar_t>();
const scalar_t *data_offset_ = data_offset.data<scalar_t>();
const scalar_t *data_mask_ = data_mask.data<scalar_t>();
scalar_t *grad_im_ = grad_im.data<scalar_t>();
modulated_deformable_col2im_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels, data_col_, data_offset_, data_mask_, channels, height_im, width_im,
kernel_h, kernel_w, pad_h, pad_h, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
batch_size, deformable_group, height_col, width_col, grad_im_);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in modulated_deformable_col2im_cuda: %s\n", cudaGetErrorString(err));
}
}
void modulated_deformable_col2im_coord_cuda(
const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group,
at::Tensor grad_offset, at::Tensor grad_mask)
{
const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h * kernel_w * deformable_group;
const int channel_per_deformable_group = channels * kernel_h * kernel_w / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.type(), "modulated_deformable_col2im_coord_gpu", ([&] {
const scalar_t *data_col_ = data_col.data<scalar_t>();
const scalar_t *data_im_ = data_im.data<scalar_t>();
const scalar_t *data_offset_ = data_offset.data<scalar_t>();
const scalar_t *data_mask_ = data_mask.data<scalar_t>();
scalar_t *grad_offset_ = grad_offset.data<scalar_t>();
scalar_t *grad_mask_ = grad_mask.data<scalar_t>();
modulated_deformable_col2im_coord_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels, data_col_, data_im_, data_offset_, data_mask_, channels, height_im, width_im,
kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
batch_size, 2 * kernel_h * kernel_w * deformable_group, deformable_group, height_col, width_col,
grad_offset_, grad_mask_);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in modulated_deformable_col2im_coord_cuda: %s\n", cudaGetErrorString(err));
}
}
|
55579b1d3b19c8b6deedf6fe9482bc19aafd3bd2.hip | // !!! This is a file automatically generated by hipify!!!
//
// Created by kier on 2019/3/6.
//
#include "kernels/gpu/gatherv2.h"
#include "global/operator_factory.h"
#include "backend/name.h"
#include "kernels/gpu/gpu_kernel.h"
#include <numeric>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include "kernels/gpu/cuda_context.h"
#include "core/device_context.h"
#include "utils/ctxmgr_lite.h"
namespace ts {
namespace gpu {
static __global__ void gpu_gatherv2_kernel(int count, const char * x_data, const int * indices_data, char * out_data,
int axis, int bytes, int width_bytes, GpuHypeShape c_shape) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index >= count) return;
int in_index = 0;
auto in_weight_it = c_shape.weights + 1;
int curindex = 0;
for (int k=0; k < axis; k++) {
curindex = indices_data[index * axis + k];
if(k >= c_shape.dims -1) {
in_index += curindex;
}else {
in_index += *in_weight_it * curindex;
in_weight_it++;
}
}
auto src_ptr = x_data + in_index * bytes;
auto dst_ptr = out_data + index * width_bytes;
::memcpy((void *)dst_ptr, (void *)src_ptr,width_bytes);
}
void GatherV2::gather(const Tensor &x, const Tensor &indices, Tensor &out) {
auto memcpy_handler = HardConverter::Query(out.device().type(), x.device().type());
TS_AUTO_CHECK(memcpy_handler != nullptr);
auto device_id = out.device().id();
auto &x_shape = x.sizes();
auto &i_shape = indices.sizes();
int axis = i_shape[i_shape.size() - 1];
auto number = std::accumulate(i_shape.begin(), i_shape.end() - 1, 1, std::multiplies<int>());
auto width = std::accumulate(x_shape.begin() + axis, x_shape.end(), 1, std::multiplies<int>());
auto gpu_hype_shape = MakeGPUHypeShape(x.device(), {x_shape});
auto &x_hype_shape = gpu_hype_shape.second[0];
auto bytes = x.proto().type_bytes();
auto width_bytes = width * bytes;
auto x_data = x.data<char>();
auto out_data = out.data<char>();
auto indices_data = indices.data<int32_t>();
RUN_KERNEL(gpu_gatherv2_kernel, CUDA_BLOCK(number, CUDA_THREAD_NUM), CUDA_THREAD_NUM,
number, x_data, indices_data, out_data, axis, bytes, width_bytes, x_hype_shape);
}
}
}
using namespace ts;
using namespace gpu;
TS_REGISTER_OPERATOR(GatherV2, GPU, name::layer::gatherv2())
| 55579b1d3b19c8b6deedf6fe9482bc19aafd3bd2.cu | //
// Created by kier on 2019/3/6.
//
#include "kernels/gpu/gatherv2.h"
#include "global/operator_factory.h"
#include "backend/name.h"
#include "kernels/gpu/gpu_kernel.h"
#include <numeric>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include "kernels/gpu/cuda_context.h"
#include "core/device_context.h"
#include "utils/ctxmgr_lite.h"
namespace ts {
namespace gpu {
static __global__ void gpu_gatherv2_kernel(int count, const char * x_data, const int * indices_data, char * out_data,
int axis, int bytes, int width_bytes, GpuHypeShape c_shape) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index >= count) return;
int in_index = 0;
auto in_weight_it = c_shape.weights + 1;
int curindex = 0;
for (int k=0; k < axis; k++) {
curindex = indices_data[index * axis + k];
if(k >= c_shape.dims -1) {
in_index += curindex;
}else {
in_index += *in_weight_it * curindex;
in_weight_it++;
}
}
auto src_ptr = x_data + in_index * bytes;
auto dst_ptr = out_data + index * width_bytes;
::memcpy((void *)dst_ptr, (void *)src_ptr,width_bytes);
}
void GatherV2::gather(const Tensor &x, const Tensor &indices, Tensor &out) {
auto memcpy_handler = HardConverter::Query(out.device().type(), x.device().type());
TS_AUTO_CHECK(memcpy_handler != nullptr);
auto device_id = out.device().id();
auto &x_shape = x.sizes();
auto &i_shape = indices.sizes();
int axis = i_shape[i_shape.size() - 1];
auto number = std::accumulate(i_shape.begin(), i_shape.end() - 1, 1, std::multiplies<int>());
auto width = std::accumulate(x_shape.begin() + axis, x_shape.end(), 1, std::multiplies<int>());
auto gpu_hype_shape = MakeGPUHypeShape(x.device(), {x_shape});
auto &x_hype_shape = gpu_hype_shape.second[0];
auto bytes = x.proto().type_bytes();
auto width_bytes = width * bytes;
auto x_data = x.data<char>();
auto out_data = out.data<char>();
auto indices_data = indices.data<int32_t>();
RUN_KERNEL(gpu_gatherv2_kernel, CUDA_BLOCK(number, CUDA_THREAD_NUM), CUDA_THREAD_NUM,
number, x_data, indices_data, out_data, axis, bytes, width_bytes, x_hype_shape);
}
}
}
using namespace ts;
using namespace gpu;
TS_REGISTER_OPERATOR(GatherV2, GPU, name::layer::gatherv2())
|
5f25a6485f2d29961cc9c3f6d7a22ae270da9218.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by op2.py
//
//user function
__device__ void simulation_1_gpu( float *out, const float *in) {
out[0] = in[0];
out[1] = in[1];
out[2] = in[2];
out[3] = in[3];
}
// CUDA kernel function
__global__ void op_cuda_simulation_1(
float *arg0,
const float *__restrict arg1,
int set_size ) {
//process set elements
for ( int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x ){
//user-supplied kernel call
simulation_1_gpu(arg0+n*4,
arg1+n*4);
}
}
//host stub function
void op_par_loop_simulation_1(char const *name, op_set set,
op_arg arg0,
op_arg arg1){
int nargs = 2;
op_arg args[2];
args[0] = arg0;
args[1] = arg1;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(3);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[3].name = name;
OP_kernels[3].count += 1;
if (OP_diags>2) {
printf(" kernel routine w/o indirection: simulation_1");
}
int set_size = op_mpi_halo_exchanges_grouped(set, nargs, args, 2);
if (set_size > 0) {
//set CUDA execution parameters
#ifdef OP_BLOCK_SIZE_3
int nthread = OP_BLOCK_SIZE_3;
#else
int nthread = OP_block_size;
#endif
int nblocks = 200;
hipLaunchKernelGGL(( op_cuda_simulation_1), dim3(nblocks),dim3(nthread), 0, 0,
(float *) arg0.data_d,
(float *) arg1.data_d,
set->size );
}
op_mpi_set_dirtybit_cuda(nargs, args);
if (OP_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
}
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[3].time += wall_t2 - wall_t1;
OP_kernels[3].transfer += (float)set->size * arg0.size * 2.0f;
OP_kernels[3].transfer += (float)set->size * arg1.size;
}
| 5f25a6485f2d29961cc9c3f6d7a22ae270da9218.cu | //
// auto-generated by op2.py
//
//user function
__device__ void simulation_1_gpu( float *out, const float *in) {
out[0] = in[0];
out[1] = in[1];
out[2] = in[2];
out[3] = in[3];
}
// CUDA kernel function
__global__ void op_cuda_simulation_1(
float *arg0,
const float *__restrict arg1,
int set_size ) {
//process set elements
for ( int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x ){
//user-supplied kernel call
simulation_1_gpu(arg0+n*4,
arg1+n*4);
}
}
//host stub function
void op_par_loop_simulation_1(char const *name, op_set set,
op_arg arg0,
op_arg arg1){
int nargs = 2;
op_arg args[2];
args[0] = arg0;
args[1] = arg1;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(3);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[3].name = name;
OP_kernels[3].count += 1;
if (OP_diags>2) {
printf(" kernel routine w/o indirection: simulation_1");
}
int set_size = op_mpi_halo_exchanges_grouped(set, nargs, args, 2);
if (set_size > 0) {
//set CUDA execution parameters
#ifdef OP_BLOCK_SIZE_3
int nthread = OP_BLOCK_SIZE_3;
#else
int nthread = OP_block_size;
#endif
int nblocks = 200;
op_cuda_simulation_1<<<nblocks,nthread>>>(
(float *) arg0.data_d,
(float *) arg1.data_d,
set->size );
}
op_mpi_set_dirtybit_cuda(nargs, args);
if (OP_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
}
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[3].time += wall_t2 - wall_t1;
OP_kernels[3].transfer += (float)set->size * arg0.size * 2.0f;
OP_kernels[3].transfer += (float)set->size * arg1.size;
}
|
4c42e9d653841600c9bf3e8ba29aa4de59049e46.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
const int N = 512;
//const float a[N] = {10,1,8,-1,0,-2,3,5,-2,-3,2,7,0,11,0,2};
//const float b[N] = {10,1,8,-1,0,-2,3,5,-2,-3,2,7,0,11,0,2};
//const float xx[N] = { -4,-3.5,-3,-2.5,-2,-1.5,-1,-0.5,0,0.5,1,1.5,2,2.5,3,3.5 };
__global__ void reduce1(int *a)
{
int tid = threadIdx.x;
for (int i = 1; i < blockDim.x; i *= 2)
{
if (tid % (2 * i) == 0)
a[tid] += a[tid + i];
}
}
__global__ void reduce2(int *a)
{
int tid = threadIdx.x;
for (int i = 1; i < blockDim.x; i *= 2)
{
int idx = 2 * i * tid;
if (idx < blockDim.x)
a[idx] += a[idx + i];
}
}
__global__ void reduce3(float *x)
{
int tid = threadIdx.x;
x[tid] = x[tid] * x[tid] * (x[tid + 1] - x[tid]);
x[N - 1] = 0;
for (unsigned int i = blockDim.x / 2; i > 0; i = i / 2)
{
if (tid < i)
x[tid] += x[tid + i];
}
}
float* integration(float low, float high)
{
float x[N] = {0};
float delta = (high - low) / (N - 1);
for (size_t i = 0; i < N; i++)
{
x[i] = low + i * delta;
/*printf("%.1f\n", x[i]);*/
}
float c[N] = { 0 };
float *dev_a = 0;
hipMalloc((void**)&dev_a, N * sizeof(float));
hipMemcpy(dev_a, x, N * sizeof(float), hipMemcpyHostToDevice);
reduce3 << <1, N >> >(dev_a);
hipMemcpy(c, dev_a, N * sizeof(float), hipMemcpyDeviceToHost);
printf("%.1f\n", c[0]);
return x;
}
__global__ void dot(int *a, int *b)
{
int tid = threadIdx.x;
a[tid] *= b[tid];
for (int i = 1; i < blockDim.x; i *= 2)
{
int idx = 2 * i * tid;
if (idx < blockDim.x)
a[idx] += a[idx + i];
}
}
__global__ void reduce(int* input, int* output)
{
__shared__ int* data;
int tid = threadIdx.x;
data[tid] = input[tid];
__syncthreads();
for (int i = blockDim.x / 2; i > 0; i = i / 2)
{
if (tid < i)
{
data[tid] += data[tid + i];
}
__syncthreads();
}
if (tid == 0) output[blockIdx.x] = data[0];
}
int main()
{
//float c[N] = { 0 };
//float *dev_a = 0;
//float *dev_b = 0;
//hipMalloc((void**)&dev_a, N * sizeof(float));
//hipMalloc((void**)&dev_b, N * sizeof(float));
//hipMemcpy(dev_a, a, N * sizeof(float), hipMemcpyHostToDevice);
//hipMemcpy(dev_b, b, N * sizeof(float), hipMemcpyHostToDevice);
//dot <<<1, N>>>(dev_a, dev_b);
//hipMemcpy(c, dev_a, N * sizeof(int), hipMemcpyDeviceToHost);
//printf("%d\n", c[0]);//395 expected
//hipMemcpy(dev_a, a, N * sizeof(int), hipMemcpyHostToDevice);
//hipMemcpy(dev_b, b, N * sizeof(int), hipMemcpyHostToDevice);
//reduce1 <<<1, N >>>(dev_a);
//hipMemcpy(c, dev_a, N * sizeof(int), hipMemcpyDeviceToHost);
//printf("%d\n", c[0]);//41 expected
//hipMemcpy(dev_a, a, N * sizeof(int), hipMemcpyHostToDevice);
//hipMemcpy(dev_b, b, N * sizeof(int), hipMemcpyHostToDevice);
//reduce2 <<<1, N >>>(dev_a);
//hipMemcpy(c, dev_a, N * sizeof(int), hipMemcpyDeviceToHost);
//printf("%d\n", c[0]);//41 expected
integration(-4, 4);
//hipMemcpy(dev_a, xx, N * sizeof(float), hipMemcpyHostToDevice);
//reduce3 <<<1, N >>>(dev_a);
//hipMemcpy(c, dev_a, N * sizeof(float), hipMemcpyDeviceToHost);
//printf("%.1f\n", c[0]);
//hipMemcpy(dev_b, xx, N * sizeof(float), hipMemcpyHostToDevice);
//integration << <1, N >> > (dev_b);
//hipMemcpy(c, dev_b, N * sizeof(float), hipMemcpyDeviceToHost);
//hipMemcpy(dev_a, a, N * sizeof(int), hipMemcpyHostToDevice);
//hipMemcpy(dev_b, b, N * sizeof(int), hipMemcpyHostToDevice);
//reduce <<<2, N / 2>>>(dev_a, dev_b);
//hipMemcpy(c, dev_b, N * sizeof(int), hipMemcpyDeviceToHost);
//printf("%d\n", c[0]); //41 expected
return 0;
}
| 4c42e9d653841600c9bf3e8ba29aa4de59049e46.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
const int N = 512;
//const float a[N] = {10,1,8,-1,0,-2,3,5,-2,-3,2,7,0,11,0,2};
//const float b[N] = {10,1,8,-1,0,-2,3,5,-2,-3,2,7,0,11,0,2};
//const float xx[N] = { -4,-3.5,-3,-2.5,-2,-1.5,-1,-0.5,0,0.5,1,1.5,2,2.5,3,3.5 };
__global__ void reduce1(int *a)
{
int tid = threadIdx.x;
for (int i = 1; i < blockDim.x; i *= 2)
{
if (tid % (2 * i) == 0)
a[tid] += a[tid + i];
}
}
__global__ void reduce2(int *a)
{
int tid = threadIdx.x;
for (int i = 1; i < blockDim.x; i *= 2)
{
int idx = 2 * i * tid;
if (idx < blockDim.x)
a[idx] += a[idx + i];
}
}
__global__ void reduce3(float *x)
{
int tid = threadIdx.x;
x[tid] = x[tid] * x[tid] * (x[tid + 1] - x[tid]);
x[N - 1] = 0;
for (unsigned int i = blockDim.x / 2; i > 0; i = i / 2)
{
if (tid < i)
x[tid] += x[tid + i];
}
}
float* integration(float low, float high)
{
float x[N] = {0};
float delta = (high - low) / (N - 1);
for (size_t i = 0; i < N; i++)
{
x[i] = low + i * delta;
/*printf("%.1f\n", x[i]);*/
}
float c[N] = { 0 };
float *dev_a = 0;
cudaMalloc((void**)&dev_a, N * sizeof(float));
cudaMemcpy(dev_a, x, N * sizeof(float), cudaMemcpyHostToDevice);
reduce3 << <1, N >> >(dev_a);
cudaMemcpy(c, dev_a, N * sizeof(float), cudaMemcpyDeviceToHost);
printf("%.1f\n", c[0]);
return x;
}
__global__ void dot(int *a, int *b)
{
int tid = threadIdx.x;
a[tid] *= b[tid];
for (int i = 1; i < blockDim.x; i *= 2)
{
int idx = 2 * i * tid;
if (idx < blockDim.x)
a[idx] += a[idx + i];
}
}
__global__ void reduce(int* input, int* output)
{
__shared__ int* data;
int tid = threadIdx.x;
data[tid] = input[tid];
__syncthreads();
for (int i = blockDim.x / 2; i > 0; i = i / 2)
{
if (tid < i)
{
data[tid] += data[tid + i];
}
__syncthreads();
}
if (tid == 0) output[blockIdx.x] = data[0];
}
int main()
{
//float c[N] = { 0 };
//float *dev_a = 0;
//float *dev_b = 0;
//cudaMalloc((void**)&dev_a, N * sizeof(float));
//cudaMalloc((void**)&dev_b, N * sizeof(float));
//cudaMemcpy(dev_a, a, N * sizeof(float), cudaMemcpyHostToDevice);
//cudaMemcpy(dev_b, b, N * sizeof(float), cudaMemcpyHostToDevice);
//dot <<<1, N>>>(dev_a, dev_b);
//cudaMemcpy(c, dev_a, N * sizeof(int), cudaMemcpyDeviceToHost);
//printf("%d\n", c[0]);//395 expected
//cudaMemcpy(dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice);
//cudaMemcpy(dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice);
//reduce1 <<<1, N >>>(dev_a);
//cudaMemcpy(c, dev_a, N * sizeof(int), cudaMemcpyDeviceToHost);
//printf("%d\n", c[0]);//41 expected
//cudaMemcpy(dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice);
//cudaMemcpy(dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice);
//reduce2 <<<1, N >>>(dev_a);
//cudaMemcpy(c, dev_a, N * sizeof(int), cudaMemcpyDeviceToHost);
//printf("%d\n", c[0]);//41 expected
integration(-4, 4);
//cudaMemcpy(dev_a, xx, N * sizeof(float), cudaMemcpyHostToDevice);
//reduce3 <<<1, N >>>(dev_a);
//cudaMemcpy(c, dev_a, N * sizeof(float), cudaMemcpyDeviceToHost);
//printf("%.1f\n", c[0]);
//cudaMemcpy(dev_b, xx, N * sizeof(float), cudaMemcpyHostToDevice);
//integration << <1, N >> > (dev_b);
//cudaMemcpy(c, dev_b, N * sizeof(float), cudaMemcpyDeviceToHost);
//cudaMemcpy(dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice);
//cudaMemcpy(dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice);
//reduce <<<2, N / 2>>>(dev_a, dev_b);
//cudaMemcpy(c, dev_b, N * sizeof(int), cudaMemcpyDeviceToHost);
//printf("%d\n", c[0]); //41 expected
return 0;
}
|
9d0839f9ef38ad7ed3258dc371ba7bc8159a3b50.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Simulated Annealing algorithm for Traveling Salesman Problem
@@ CUDA version: no parallel optimization, single thread
Input: xxx.tsp file
Output: optimal value (total distance)
& solution route: permutation of {1, 2, ..., N}
*/
#include <iostream>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <math.h>
#include <algorithm>
#include <sys/time.h>
#include <pthread.h>
#include <hiprand/hiprand_kernel.h>
#define MAXITER 20 // Proposal 20 routes and then select the best one
#define THRESH1 0.1 // Threshold 1 for the strategy
#define THRESH2 0.89 // Threshold 2 for the strategy
#define RELAX 400 // The times of relaxation of the same temperature
#define ALPHA 0.999 // Cooling rate
#define INITEMP 99.0 // Initial temperature
#define STOPTEMP 0.001 // Termination temperature
#define MAXLAST 3 // Stop if the tour length keeps unchanged for MAXLAST consecutive temperature
#define MAXN 250 // only support N <= 250
#define THREADITER 200
using namespace std;
float minTourDist = -1; // The distance of shortest path
int *minTour = NULL; // The shortest path
int N = 0; // Number of cities
float *dist = NULL; // The distance matrix, use (i-1) instead of i
int *currTour = NULL;
int blockNum = 1; // block number
int threadNum = 1; // thread number
int globalIter = -1; // global iteration count
pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
class rand_x {
unsigned int seed;
public:
rand_x(int init) : seed(init) {}
int operator()(int limit) {
int divisor = RAND_MAX/(limit+1);
int retval;
do {
retval = rand_r(&seed) / divisor;
} while (retval > limit);
return retval;
}
};
/* load the data */
void loadFile(char* filename) {
FILE *pf;
pf = fopen(filename, "r");
if (pf == NULL) {
printf("Cannot open the file!\n");
exit(1);
}
char buff[200];
fscanf(pf, "NAME: %[^\n]s", buff);
printf("%s\n", buff);
fscanf(pf, "\nTYPE: TSP%[^\n]s", buff);
printf("%s\n", buff);
fscanf(pf, "\nCOMMENT: %[^\n]s", buff);
printf("%s\n", buff);
fscanf(pf, "\nDIMENSION: %d", &N);
printf("The N is: %d\n", N);
fscanf(pf, "\nEDGE_WEIGHT_TYPE: %[^\n]s", buff);
printf("the type is: %s\n", buff);
dist = (float *)malloc(sizeof(float) * N * N);
memset(dist, 0, sizeof(float) * N * N);
if (strcmp(buff, "EUC_2D") == 0) {
fscanf(pf, "\nNODE_COORD_SECTION");
float nodeCoord[MAXN][2] = {};
int nid;
float xx, yy;
for (int i = 0; i < N; ++i) {
fscanf(pf, "\n%d %f %f", &nid, &xx, &yy);
nodeCoord[i][0] = xx;
nodeCoord[i][1] = yy;
}
float xi, yi, xj, yj;
for (int i = 0; i < N; ++i) {
for (int j = i + 1; j < N; ++j) {
xi = nodeCoord[i][0];
yi = nodeCoord[i][1];
xj = nodeCoord[j][0];
yj = nodeCoord[j][1];
dist[i*N + j] = (float)sqrt((xi - xj) * (xi - xj) + (yi - yj) * (yi - yj));
dist[j*N + i] = dist[i*N + j];
}
}
}
else if (strcmp(buff, "EXPLICIT") == 0) {
fscanf(pf, "\nEDGE_WEIGHT_FORMAT: %[^\n]s", buff);
fscanf(pf, "\n%[^\n]s", buff);
char *disps = strstr(buff, "DISPLAY_DATA_TYPE");
if (disps != NULL) {
fscanf(pf, "\nEDGE_WEIGHT_SECTION");
}
float weight;
for (int i = 0; i < N; ++i) {
for (int j = 0; j <= i; ++j) {
fscanf(pf, "%f", &weight);
dist[i*N + j] = weight;
dist[j*N + i] = weight;
}
}
}
return;
}
/* Calculate the length of the tour */
float tourLen(int *tour) {
if (tour == NULL) {
printf("tour not exist!\n");
return -1;
}
float cnt = 0;
for (int i = 0; i < N - 1; ++i) {
cnt += dist[tour[i]*N + tour[i+1]];
}
cnt += dist[tour[N-1]*N + tour[0]];
return cnt;
}
/* the main simulated annealing function */
__global__ void saTSP(int cityCnt, int* globalTour, hiprandState_t *randStates, float *dev_dist, float temperature, int relaxiter) {
int thid = (blockIdx.x * blockDim.x) + threadIdx.x;
int *tour = &globalTour[thid * cityCnt];
float currLen = 0;
for (int i = 0; i < cityCnt - 1; ++i) {
currLen += dev_dist[tour[i]*cityCnt + tour[i+1]];
}
currLen += dev_dist[tour[cityCnt-1]*cityCnt + tour[0]];
//float temperature = INITEMP;
//float lastLen = currLen;
//int contCnt = 0; // the continuous same length times
int iterCnt = 0;
while (temperature > STOPTEMP) {
temperature *= ALPHA;
iterCnt += 1;
/* stay in the same temperature for RELAX times */
for (int i = 0; i < relaxiter; ++i) {
/* Proposal 1: Block Reverse between p and q */
int p = (int)(hiprand_uniform(&(randStates[thid])) * (float)(cityCnt + 10)) % cityCnt;
int q = (int)(hiprand_uniform(&(randStates[thid])) * (float)(cityCnt + 10)) % cityCnt;
// If will occur error if p=0 q=N-1...
if (abs(p - q) == cityCnt - 1) {
p = (int)(hiprand_uniform(&(randStates[thid])) * (float)(cityCnt - 3));
q = (int)(hiprand_uniform(&(randStates[thid])) * (float)(cityCnt - 2));
}
if (p == q) {
q = (q + 2) % cityCnt;
}
if (p > q) {
int tmp = p;
p = q;
q = tmp;
}
int p1 = (p - 1 + cityCnt) % cityCnt;
int q1 = (q + 1) % cityCnt;
int tp = tour[p], tq = tour[q], tp1 = tour[p1], tq1 = tour[q1];
float delta = dev_dist[tp*cityCnt + tq1] + dev_dist[tp1*cityCnt + tq] - dev_dist[tp*cityCnt + tp1] - dev_dist[tq*cityCnt + tq1];
/* whether to accept the change */
if ((delta < 0) || ((delta > 0) &&
(expf(-delta/temperature) > hiprand_uniform(&(randStates[thid]))))) {
currLen = currLen + delta;
int mid = (q - p) >> 1;
int tmp;
for (int k = 0; k <= mid; ++k) {
tmp = tour[p+k];
tour[p+k] = tour[q-k];
tour[q-k] = tmp;
}
//currLen = tourLen(tour);
}
}
/*
if ((currLen - lastLen < 1e-2) && (currLen - lastLen > -1e-2)) {
contCnt += 1;
if (contCnt >= MAXLAST) {
//printf("unchanged for %d times1!\n", contCnt);
break;
}
}
else
contCnt = 0;
lastLen = currLen;
*/
}
return;
}
__global__ void setup_kernel_randomness(hiprandState_t * state, unsigned long seed)
{
int s_id = (blockIdx.x*blockDim.x) + threadIdx.x;
hiprand_init(seed*s_id, s_id, 0, &state[s_id]);
}
int main(int argc, char **argv) {
hipError_t err = hipSuccess;
float *dev_dist;
if (argc < 2) {
printf("Usage: ./cuda_tsp <filename> <blockNum> <threadNum>\n");
return 0;
}
else {
loadFile(argv[1]);
err = hipMalloc((void **)&dev_dist, sizeof(float) * N * N);
if (err != hipSuccess) {
fprintf(stderr, "hipMalloc() failed\n");
exit(1);
}
hipMemcpy((void *)dev_dist, dist, sizeof(float) * N * N, hipMemcpyHostToDevice);
}
if (argc == 4) {
blockNum = atoi(argv[2]);
threadNum = atoi(argv[3]);
}
printf("blockNum is: %d, threadNum is: %d\n", blockNum, threadNum);
struct timeval start, stop;
gettimeofday(&start, NULL);
srandom(time(0));
int *dev_currTour; // currTour on device;
int itersCnt = blockNum * threadNum; // total iterations
err = hipMalloc((void **)&dev_currTour, sizeof(int)*N*itersCnt);
if (err != hipSuccess) {
fprintf(stderr, "hipMalloc() failed\n");
exit(1);
}
srand(time(0));
currTour = (int *)malloc(sizeof(int) * N * itersCnt);
for (int i = 0; i < itersCnt; ++i) {
for (int j = 0; j < N; ++j) {
currTour[i*N + j] = j;
}
random_shuffle(currTour+i*N, currTour+(i+1)*N);
/*for (int j = 0; j < N; ++j) {
printf("%d ", currTour[i*N + j]);
}
printf("%d before: %f\n", i, tourLen(currTour + i*N));*/
}
err = hipMemcpy(dev_currTour, currTour, itersCnt * N * sizeof(int), hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "hipMalloc() for dev_currTour failed\n");
exit(1);
}
// allocate random seed for each thread
hiprandState_t *devStates;
hipMalloc((void **)&devStates, itersCnt * sizeof(hiprandState_t));
hipLaunchKernelGGL(( setup_kernel_randomness), dim3(blockNum), dim3(threadNum), 0, 0, devStates, time(0));
hipDeviceSynchronize();
float currLen = 0;
float temperature = INITEMP;
int contCnt = 0;
float tempstep = pow(ALPHA, THREADITER);
//while (temperature > STOPTEMP) {
//printf("%.06f \n", temperature);
hipLaunchKernelGGL(( saTSP), dim3(blockNum), dim3(threadNum), 0, 0, N, dev_currTour, devStates, dev_dist, temperature, RELAX);
hipDeviceSynchronize();
// temperature *= tempstep;
//}
minTour = (int *)malloc(sizeof(int) * N);
memset(currTour, 0, itersCnt * N * sizeof(int));
err = hipMemcpy(currTour, dev_currTour, itersCnt * N * sizeof(int), hipMemcpyDeviceToHost);
if (err != hipSuccess) {
fprintf(stderr, "cudaMemcpyc(Device to Host) failed with %d\n", err);
exit(1);
}
/* find the minimal answer */
int minidx = 0;
for (int i = 0; i < itersCnt; ++i) {
currLen = tourLen(&currTour[i * N]);
/*for (int j = 0; j < N; ++j) {
printf("%d ", currTour[i*N + j]);
}
printf("%d after: %f\n", i, currLen);*/
if ((currLen < minTourDist) || (minTourDist < 0)) {
minTourDist = currLen;
minidx = i;
}
}
for (int i = 0; i < N; ++i) {
minTour[i] = currTour[minidx * N + i];
}
gettimeofday(&stop, NULL);
// ------------- Print the result! -----------------
int tottime = stop.tv_sec - start.tv_sec;
int timemin = tottime / 60;
int timesec = tottime % 60;
printf("Total time usage: %d min %d sec. \n", timemin, timesec);
printf("N is %d, The shortest length is: %f\n And the tour is: \n", N, minTourDist);
for (int i = 0; i < N; ++i) {
printf("%d \n", minTour[i]+1);
}
free(dist);
free(minTour);
free(currTour);
return 0;
}
| 9d0839f9ef38ad7ed3258dc371ba7bc8159a3b50.cu | /*
Simulated Annealing algorithm for Traveling Salesman Problem
@@ CUDA version: no parallel optimization, single thread
Input: xxx.tsp file
Output: optimal value (total distance)
& solution route: permutation of {1, 2, ..., N}
*/
#include <iostream>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <math.h>
#include <algorithm>
#include <sys/time.h>
#include <pthread.h>
#include <curand_kernel.h>
#define MAXITER 20 // Proposal 20 routes and then select the best one
#define THRESH1 0.1 // Threshold 1 for the strategy
#define THRESH2 0.89 // Threshold 2 for the strategy
#define RELAX 400 // The times of relaxation of the same temperature
#define ALPHA 0.999 // Cooling rate
#define INITEMP 99.0 // Initial temperature
#define STOPTEMP 0.001 // Termination temperature
#define MAXLAST 3 // Stop if the tour length keeps unchanged for MAXLAST consecutive temperature
#define MAXN 250 // only support N <= 250
#define THREADITER 200
using namespace std;
float minTourDist = -1; // The distance of shortest path
int *minTour = NULL; // The shortest path
int N = 0; // Number of cities
float *dist = NULL; // The distance matrix, use (i-1) instead of i
int *currTour = NULL;
int blockNum = 1; // block number
int threadNum = 1; // thread number
int globalIter = -1; // global iteration count
pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
class rand_x {
unsigned int seed;
public:
rand_x(int init) : seed(init) {}
int operator()(int limit) {
int divisor = RAND_MAX/(limit+1);
int retval;
do {
retval = rand_r(&seed) / divisor;
} while (retval > limit);
return retval;
}
};
/* load the data */
void loadFile(char* filename) {
FILE *pf;
pf = fopen(filename, "r");
if (pf == NULL) {
printf("Cannot open the file!\n");
exit(1);
}
char buff[200];
fscanf(pf, "NAME: %[^\n]s", buff);
printf("%s\n", buff);
fscanf(pf, "\nTYPE: TSP%[^\n]s", buff);
printf("%s\n", buff);
fscanf(pf, "\nCOMMENT: %[^\n]s", buff);
printf("%s\n", buff);
fscanf(pf, "\nDIMENSION: %d", &N);
printf("The N is: %d\n", N);
fscanf(pf, "\nEDGE_WEIGHT_TYPE: %[^\n]s", buff);
printf("the type is: %s\n", buff);
dist = (float *)malloc(sizeof(float) * N * N);
memset(dist, 0, sizeof(float) * N * N);
if (strcmp(buff, "EUC_2D") == 0) {
fscanf(pf, "\nNODE_COORD_SECTION");
float nodeCoord[MAXN][2] = {};
int nid;
float xx, yy;
for (int i = 0; i < N; ++i) {
fscanf(pf, "\n%d %f %f", &nid, &xx, &yy);
nodeCoord[i][0] = xx;
nodeCoord[i][1] = yy;
}
float xi, yi, xj, yj;
for (int i = 0; i < N; ++i) {
for (int j = i + 1; j < N; ++j) {
xi = nodeCoord[i][0];
yi = nodeCoord[i][1];
xj = nodeCoord[j][0];
yj = nodeCoord[j][1];
dist[i*N + j] = (float)sqrt((xi - xj) * (xi - xj) + (yi - yj) * (yi - yj));
dist[j*N + i] = dist[i*N + j];
}
}
}
else if (strcmp(buff, "EXPLICIT") == 0) {
fscanf(pf, "\nEDGE_WEIGHT_FORMAT: %[^\n]s", buff);
fscanf(pf, "\n%[^\n]s", buff);
char *disps = strstr(buff, "DISPLAY_DATA_TYPE");
if (disps != NULL) {
fscanf(pf, "\nEDGE_WEIGHT_SECTION");
}
float weight;
for (int i = 0; i < N; ++i) {
for (int j = 0; j <= i; ++j) {
fscanf(pf, "%f", &weight);
dist[i*N + j] = weight;
dist[j*N + i] = weight;
}
}
}
return;
}
/* Calculate the length of the tour */
float tourLen(int *tour) {
if (tour == NULL) {
printf("tour not exist!\n");
return -1;
}
float cnt = 0;
for (int i = 0; i < N - 1; ++i) {
cnt += dist[tour[i]*N + tour[i+1]];
}
cnt += dist[tour[N-1]*N + tour[0]];
return cnt;
}
/* the main simulated annealing function */
__global__ void saTSP(int cityCnt, int* globalTour, curandState *randStates, float *dev_dist, float temperature, int relaxiter) {
int thid = (blockIdx.x * blockDim.x) + threadIdx.x;
int *tour = &globalTour[thid * cityCnt];
float currLen = 0;
for (int i = 0; i < cityCnt - 1; ++i) {
currLen += dev_dist[tour[i]*cityCnt + tour[i+1]];
}
currLen += dev_dist[tour[cityCnt-1]*cityCnt + tour[0]];
//float temperature = INITEMP;
//float lastLen = currLen;
//int contCnt = 0; // the continuous same length times
int iterCnt = 0;
while (temperature > STOPTEMP) {
temperature *= ALPHA;
iterCnt += 1;
/* stay in the same temperature for RELAX times */
for (int i = 0; i < relaxiter; ++i) {
/* Proposal 1: Block Reverse between p and q */
int p = (int)(curand_uniform(&(randStates[thid])) * (float)(cityCnt + 10)) % cityCnt;
int q = (int)(curand_uniform(&(randStates[thid])) * (float)(cityCnt + 10)) % cityCnt;
// If will occur error if p=0 q=N-1...
if (abs(p - q) == cityCnt - 1) {
p = (int)(curand_uniform(&(randStates[thid])) * (float)(cityCnt - 3));
q = (int)(curand_uniform(&(randStates[thid])) * (float)(cityCnt - 2));
}
if (p == q) {
q = (q + 2) % cityCnt;
}
if (p > q) {
int tmp = p;
p = q;
q = tmp;
}
int p1 = (p - 1 + cityCnt) % cityCnt;
int q1 = (q + 1) % cityCnt;
int tp = tour[p], tq = tour[q], tp1 = tour[p1], tq1 = tour[q1];
float delta = dev_dist[tp*cityCnt + tq1] + dev_dist[tp1*cityCnt + tq] - dev_dist[tp*cityCnt + tp1] - dev_dist[tq*cityCnt + tq1];
/* whether to accept the change */
if ((delta < 0) || ((delta > 0) &&
(expf(-delta/temperature) > curand_uniform(&(randStates[thid]))))) {
currLen = currLen + delta;
int mid = (q - p) >> 1;
int tmp;
for (int k = 0; k <= mid; ++k) {
tmp = tour[p+k];
tour[p+k] = tour[q-k];
tour[q-k] = tmp;
}
//currLen = tourLen(tour);
}
}
/*
if ((currLen - lastLen < 1e-2) && (currLen - lastLen > -1e-2)) {
contCnt += 1;
if (contCnt >= MAXLAST) {
//printf("unchanged for %d times1!\n", contCnt);
break;
}
}
else
contCnt = 0;
lastLen = currLen;
*/
}
return;
}
__global__ void setup_kernel_randomness(curandState * state, unsigned long seed)
{
int s_id = (blockIdx.x*blockDim.x) + threadIdx.x;
curand_init(seed*s_id, s_id, 0, &state[s_id]);
}
int main(int argc, char **argv) {
cudaError_t err = cudaSuccess;
float *dev_dist;
if (argc < 2) {
printf("Usage: ./cuda_tsp <filename> <blockNum> <threadNum>\n");
return 0;
}
else {
loadFile(argv[1]);
err = cudaMalloc((void **)&dev_dist, sizeof(float) * N * N);
if (err != cudaSuccess) {
fprintf(stderr, "cudaMalloc() failed\n");
exit(1);
}
cudaMemcpy((void *)dev_dist, dist, sizeof(float) * N * N, cudaMemcpyHostToDevice);
}
if (argc == 4) {
blockNum = atoi(argv[2]);
threadNum = atoi(argv[3]);
}
printf("blockNum is: %d, threadNum is: %d\n", blockNum, threadNum);
struct timeval start, stop;
gettimeofday(&start, NULL);
srandom(time(0));
int *dev_currTour; // currTour on device;
int itersCnt = blockNum * threadNum; // total iterations
err = cudaMalloc((void **)&dev_currTour, sizeof(int)*N*itersCnt);
if (err != cudaSuccess) {
fprintf(stderr, "cudaMalloc() failed\n");
exit(1);
}
srand(time(0));
currTour = (int *)malloc(sizeof(int) * N * itersCnt);
for (int i = 0; i < itersCnt; ++i) {
for (int j = 0; j < N; ++j) {
currTour[i*N + j] = j;
}
random_shuffle(currTour+i*N, currTour+(i+1)*N);
/*for (int j = 0; j < N; ++j) {
printf("%d ", currTour[i*N + j]);
}
printf("%d before: %f\n", i, tourLen(currTour + i*N));*/
}
err = cudaMemcpy(dev_currTour, currTour, itersCnt * N * sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "cudaMalloc() for dev_currTour failed\n");
exit(1);
}
// allocate random seed for each thread
curandState *devStates;
cudaMalloc((void **)&devStates, itersCnt * sizeof(curandState));
setup_kernel_randomness<<<blockNum, threadNum>>>(devStates, time(0));
cudaDeviceSynchronize();
float currLen = 0;
float temperature = INITEMP;
int contCnt = 0;
float tempstep = pow(ALPHA, THREADITER);
//while (temperature > STOPTEMP) {
//printf("%.06f \n", temperature);
saTSP<<<blockNum, threadNum>>>(N, dev_currTour, devStates, dev_dist, temperature, RELAX);
cudaDeviceSynchronize();
// temperature *= tempstep;
//}
minTour = (int *)malloc(sizeof(int) * N);
memset(currTour, 0, itersCnt * N * sizeof(int));
err = cudaMemcpy(currTour, dev_currTour, itersCnt * N * sizeof(int), cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
fprintf(stderr, "cudaMemcpyc(Device to Host) failed with %d\n", err);
exit(1);
}
/* find the minimal answer */
int minidx = 0;
for (int i = 0; i < itersCnt; ++i) {
currLen = tourLen(&currTour[i * N]);
/*for (int j = 0; j < N; ++j) {
printf("%d ", currTour[i*N + j]);
}
printf("%d after: %f\n", i, currLen);*/
if ((currLen < minTourDist) || (minTourDist < 0)) {
minTourDist = currLen;
minidx = i;
}
}
for (int i = 0; i < N; ++i) {
minTour[i] = currTour[minidx * N + i];
}
gettimeofday(&stop, NULL);
// ------------- Print the result! -----------------
int tottime = stop.tv_sec - start.tv_sec;
int timemin = tottime / 60;
int timesec = tottime % 60;
printf("Total time usage: %d min %d sec. \n", timemin, timesec);
printf("N is %d, The shortest length is: %f\n And the tour is: \n", N, minTourDist);
for (int i = 0; i < N; ++i) {
printf("%d \n", minTour[i]+1);
}
free(dist);
free(minTour);
free(currTour);
return 0;
}
|
6ed914a66d2d461d27f20fd897c5772fe6da26d3.hip | // !!! This is a file automatically generated by hipify!!!
#include <unittest/unittest.h>
#include <thrust/copy.h>
#include <list>
#include <iterator>
#include <thrust/sequence.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/constant_iterator.h>
void TestCopyFromConstIterator(void)
{
typedef int T;
std::vector<T> v(5);
v[0] = 0; v[1] = 1; v[2] = 2; v[3] = 3; v[4] = 4;
std::vector<int>::const_iterator begin = v.begin();
std::vector<int>::const_iterator end = v.end();
// copy to host_vector
thrust::host_vector<T> h(5, (T) 10);
thrust::host_vector<T>::iterator h_result = thrust::copy(begin, end, h.begin());
ASSERT_EQUAL(h[0], 0);
ASSERT_EQUAL(h[1], 1);
ASSERT_EQUAL(h[2], 2);
ASSERT_EQUAL(h[3], 3);
ASSERT_EQUAL(h[4], 4);
ASSERT_EQUAL_QUIET(h_result, h.end());
// copy to device_vector
thrust::device_vector<T> d(5, (T) 10);
thrust::device_vector<T>::iterator d_result = thrust::copy(begin, end, d.begin());
ASSERT_EQUAL(d[0], 0);
ASSERT_EQUAL(d[1], 1);
ASSERT_EQUAL(d[2], 2);
ASSERT_EQUAL(d[3], 3);
ASSERT_EQUAL(d[4], 4);
ASSERT_EQUAL_QUIET(d_result, d.end());
}
DECLARE_UNITTEST(TestCopyFromConstIterator);
template <class Vector>
void TestCopyMatchingTypes(void)
{
typedef typename Vector::value_type T;
Vector v(5);
v[0] = 0; v[1] = 1; v[2] = 2; v[3] = 3; v[4] = 4;
// copy to host_vector
thrust::host_vector<T> h(5, (T) 10);
typename thrust::host_vector<T>::iterator h_result = thrust::copy(v.begin(), v.end(), h.begin());
ASSERT_EQUAL(h[0], 0);
ASSERT_EQUAL(h[1], 1);
ASSERT_EQUAL(h[2], 2);
ASSERT_EQUAL(h[3], 3);
ASSERT_EQUAL(h[4], 4);
ASSERT_EQUAL_QUIET(h_result, h.end());
// copy to device_vector
thrust::device_vector<T> d(5, (T) 10);
typename thrust::device_vector<T>::iterator d_result = thrust::copy(v.begin(), v.end(), d.begin());
ASSERT_EQUAL(d[0], 0);
ASSERT_EQUAL(d[1], 1);
ASSERT_EQUAL(d[2], 2);
ASSERT_EQUAL(d[3], 3);
ASSERT_EQUAL(d[4], 4);
ASSERT_EQUAL_QUIET(d_result, d.end());
}
DECLARE_VECTOR_UNITTEST(TestCopyMatchingTypes);
template <class Vector>
void TestCopyMixedTypes(void)
{
typedef typename Vector::value_type T;
Vector v(5);
v[0] = 0; v[1] = 1; v[2] = 2; v[3] = 3; v[4] = 4;
// copy to host_vector with different type
thrust::host_vector<float> h(5, (float) 10);
typename thrust::host_vector<float>::iterator h_result = thrust::copy(v.begin(), v.end(), h.begin());
ASSERT_EQUAL(h[0], 0);
ASSERT_EQUAL(h[1], 1);
ASSERT_EQUAL(h[2], 2);
ASSERT_EQUAL(h[3], 3);
ASSERT_EQUAL(h[4], 4);
ASSERT_EQUAL_QUIET(h_result, h.end());
// copy to device_vector with different type
thrust::device_vector<float> d(5, (float) 10);
typename thrust::device_vector<float>::iterator d_result = thrust::copy(v.begin(), v.end(), d.begin());
ASSERT_EQUAL(d[0], 0);
ASSERT_EQUAL(d[1], 1);
ASSERT_EQUAL(d[2], 2);
ASSERT_EQUAL(d[3], 3);
ASSERT_EQUAL(d[4], 4);
ASSERT_EQUAL_QUIET(d_result, d.end());
}
DECLARE_VECTOR_UNITTEST(TestCopyMixedTypes);
void TestCopyVectorBool(void)
{
std::vector<bool> v(3);
v[0] = true; v[1] = false; v[2] = true;
thrust::host_vector<bool> h(3);
thrust::device_vector<bool> d(3);
thrust::copy(v.begin(), v.end(), h.begin());
thrust::copy(v.begin(), v.end(), d.begin());
ASSERT_EQUAL(h[0], true);
ASSERT_EQUAL(h[1], false);
ASSERT_EQUAL(h[2], true);
ASSERT_EQUAL(d[0], true);
ASSERT_EQUAL(d[1], false);
ASSERT_EQUAL(d[2], true);
}
DECLARE_UNITTEST(TestCopyVectorBool);
template <class Vector>
void TestCopyListTo(void)
{
typedef typename Vector::value_type T;
// copy from list to Vector
std::list<T> l;
l.push_back(0);
l.push_back(1);
l.push_back(2);
l.push_back(3);
l.push_back(4);
Vector v(l.size());
typename Vector::iterator v_result = thrust::copy(l.begin(), l.end(), v.begin());
ASSERT_EQUAL(v[0], 0);
ASSERT_EQUAL(v[1], 1);
ASSERT_EQUAL(v[2], 2);
ASSERT_EQUAL(v[3], 3);
ASSERT_EQUAL(v[4], 4);
ASSERT_EQUAL_QUIET(v_result, v.end());
l.clear();
std::back_insert_iterator< std::list<T> > l_result = thrust::copy(v.begin(), v.end(), std::back_insert_iterator< std::list<T> >(l));
ASSERT_EQUAL(l.size(), 5);
typename std::list<T>::const_iterator iter = l.begin();
ASSERT_EQUAL(*iter, 0); iter++;
ASSERT_EQUAL(*iter, 1); iter++;
ASSERT_EQUAL(*iter, 2); iter++;
ASSERT_EQUAL(*iter, 3); iter++;
ASSERT_EQUAL(*iter, 4); iter++;
}
DECLARE_VECTOR_UNITTEST(TestCopyListTo);
template<typename T>
struct is_even
{
__host__ __device__
bool operator()(T x) { return (static_cast<unsigned int>(x) & 1) == 0; }
};
template<typename T>
struct is_true
{
__host__ __device__
bool operator()(T x) { return x ? true : false; }
};
template<typename T>
struct mod_3
{
__host__ __device__
unsigned int operator()(T x) { return static_cast<unsigned int>(x) % 3; }
};
template <class Vector>
void TestCopyIfSimple(void)
{
typedef typename Vector::value_type T;
Vector v(5);
v[0] = 0; v[1] = 1; v[2] = 2; v[3] = 3; v[4] = 4;
Vector dest(3);
typename Vector::iterator dest_end = thrust::copy_if(v.begin(), v.end(), dest.begin(), is_even<T>());
ASSERT_EQUAL(0, dest[0]);
ASSERT_EQUAL(2, dest[1]);
ASSERT_EQUAL(4, dest[2]);
ASSERT_EQUAL_QUIET(dest.end(), dest_end);
}
DECLARE_VECTOR_UNITTEST(TestCopyIfSimple);
template <typename T>
void TestCopyIf(const size_t n)
{
thrust::host_vector<T> h_data = unittest::random_integers<T>(n);
thrust::device_vector<T> d_data = h_data;
typename thrust::host_vector<T>::iterator h_new_end;
typename thrust::device_vector<T>::iterator d_new_end;
// test with Predicate that returns a bool
{
thrust::host_vector<T> h_result(n);
thrust::device_vector<T> d_result(n);
h_new_end = thrust::copy_if(h_data.begin(), h_data.end(), h_result.begin(), is_even<T>());
d_new_end = thrust::copy_if(d_data.begin(), d_data.end(), d_result.begin(), is_even<T>());
h_result.resize(h_new_end - h_result.begin());
d_result.resize(d_new_end - d_result.begin());
ASSERT_EQUAL(h_result, d_result);
}
// test with Predicate that returns a non-bool
{
thrust::host_vector<T> h_result(n);
thrust::device_vector<T> d_result(n);
h_new_end = thrust::copy_if(h_data.begin(), h_data.end(), h_result.begin(), mod_3<T>());
d_new_end = thrust::copy_if(d_data.begin(), d_data.end(), d_result.begin(), mod_3<T>());
h_result.resize(h_new_end - h_result.begin());
d_result.resize(d_new_end - d_result.begin());
ASSERT_EQUAL(h_result, d_result);
}
}
DECLARE_VARIABLE_UNITTEST(TestCopyIf);
template <class Vector>
void TestCopyIfStencilSimple(void)
{
typedef typename Vector::value_type T;
Vector v(5);
v[0] = 0; v[1] = 1; v[2] = 2; v[3] = 3; v[4] = 4;
Vector s(5);
s[0] = 1; s[1] = 1; s[2] = 0; s[3] = 1; s[4] = 0;
Vector dest(3);
typename Vector::iterator dest_end = thrust::copy_if(v.begin(), v.end(), s.begin(), dest.begin(), is_true<T>());
ASSERT_EQUAL(0, dest[0]);
ASSERT_EQUAL(1, dest[1]);
ASSERT_EQUAL(3, dest[2]);
ASSERT_EQUAL_QUIET(dest.end(), dest_end);
}
DECLARE_VECTOR_UNITTEST(TestCopyIfStencilSimple);
template <typename T>
void TestCopyIfStencil(const size_t n)
{
thrust::host_vector<T> h_data(n); thrust::sequence(h_data.begin(), h_data.end());
thrust::device_vector<T> d_data(n); thrust::sequence(d_data.begin(), d_data.end());
thrust::host_vector<T> h_stencil = unittest::random_integers<T>(n);
thrust::device_vector<T> d_stencil = unittest::random_integers<T>(n);
thrust::host_vector<T> h_result(n);
thrust::device_vector<T> d_result(n);
typename thrust::host_vector<T>::iterator h_new_end;
typename thrust::device_vector<T>::iterator d_new_end;
// test with Predicate that returns a bool
{
thrust::host_vector<T> h_result(n);
thrust::device_vector<T> d_result(n);
h_new_end = thrust::copy_if(h_data.begin(), h_data.end(), h_result.begin(), is_even<T>());
d_new_end = thrust::copy_if(d_data.begin(), d_data.end(), d_result.begin(), is_even<T>());
h_result.resize(h_new_end - h_result.begin());
d_result.resize(d_new_end - d_result.begin());
ASSERT_EQUAL(h_result, d_result);
}
// test with Predicate that returns a non-bool
{
thrust::host_vector<T> h_result(n);
thrust::device_vector<T> d_result(n);
h_new_end = thrust::copy_if(h_data.begin(), h_data.end(), h_result.begin(), mod_3<T>());
d_new_end = thrust::copy_if(d_data.begin(), d_data.end(), d_result.begin(), mod_3<T>());
h_result.resize(h_new_end - h_result.begin());
d_result.resize(d_new_end - d_result.begin());
ASSERT_EQUAL(h_result, d_result);
}
}
DECLARE_VARIABLE_UNITTEST(TestCopyIfStencil);
#if THRUST_DEVICE_BACKEND == THRUST_DEVICE_BACKEND_CUDA
// do we really want to test this ever?
void TestCopyDeviceThrow(void)
{
typedef int T;
thrust::device_ptr<T> null_device_ptr((int*)0);
bool caught_exception = false;
try
{
thrust::copy(null_device_ptr, null_device_ptr + 1, null_device_ptr);
} // end try
catch(std::runtime_error)
{
caught_exception = true;
// kill the context so it can revive later
hipDeviceReset();
} // end catch
ASSERT_EQUAL(true, caught_exception);
}
DECLARE_UNITTEST(TestCopyDeviceThrow);
#endif
template <typename Vector>
void TestCopyCountingIterator(void)
{
typedef typename Vector::value_type T;
thrust::counting_iterator<T> iter(1);
Vector vec(4);
thrust::copy(iter, iter + 4, vec.begin());
ASSERT_EQUAL(vec[0], 1);
ASSERT_EQUAL(vec[1], 2);
ASSERT_EQUAL(vec[2], 3);
ASSERT_EQUAL(vec[3], 4);
}
DECLARE_VECTOR_UNITTEST(TestCopyCountingIterator);
template <typename Vector>
void TestCopyZipIterator(void)
{
typedef typename Vector::value_type T;
Vector v1(3); v1[0] = 1; v1[1] = 2; v1[2] = 3;
Vector v2(3); v2[0] = 4; v2[1] = 5; v2[2] = 6;
Vector v3(3, T(0));
Vector v4(3, T(0));
thrust::copy(thrust::make_zip_iterator(thrust::make_tuple(v1.begin(),v2.begin())),
thrust::make_zip_iterator(thrust::make_tuple(v1.end(),v2.end())),
thrust::make_zip_iterator(thrust::make_tuple(v3.begin(),v4.begin())));
ASSERT_EQUAL(v1, v3);
ASSERT_EQUAL(v2, v4);
};
DECLARE_VECTOR_UNITTEST(TestCopyZipIterator);
template <typename Vector>
void TestCopyConstantIteratorToZipIterator(void)
{
typedef typename Vector::value_type T;
Vector v1(3,T(0));
Vector v2(3,T(0));
thrust::copy(thrust::make_constant_iterator(thrust::tuple<T,T>(4,7)),
thrust::make_constant_iterator(thrust::tuple<T,T>(4,7)) + v1.size(),
thrust::make_zip_iterator(thrust::make_tuple(v1.begin(),v2.begin())));
ASSERT_EQUAL(v1[0], 4);
ASSERT_EQUAL(v1[1], 4);
ASSERT_EQUAL(v1[2], 4);
ASSERT_EQUAL(v2[0], 7);
ASSERT_EQUAL(v2[1], 7);
ASSERT_EQUAL(v2[2], 7);
};
DECLARE_VECTOR_UNITTEST(TestCopyConstantIteratorToZipIterator);
| 6ed914a66d2d461d27f20fd897c5772fe6da26d3.cu | #include <unittest/unittest.h>
#include <thrust/copy.h>
#include <list>
#include <iterator>
#include <thrust/sequence.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/constant_iterator.h>
void TestCopyFromConstIterator(void)
{
typedef int T;
std::vector<T> v(5);
v[0] = 0; v[1] = 1; v[2] = 2; v[3] = 3; v[4] = 4;
std::vector<int>::const_iterator begin = v.begin();
std::vector<int>::const_iterator end = v.end();
// copy to host_vector
thrust::host_vector<T> h(5, (T) 10);
thrust::host_vector<T>::iterator h_result = thrust::copy(begin, end, h.begin());
ASSERT_EQUAL(h[0], 0);
ASSERT_EQUAL(h[1], 1);
ASSERT_EQUAL(h[2], 2);
ASSERT_EQUAL(h[3], 3);
ASSERT_EQUAL(h[4], 4);
ASSERT_EQUAL_QUIET(h_result, h.end());
// copy to device_vector
thrust::device_vector<T> d(5, (T) 10);
thrust::device_vector<T>::iterator d_result = thrust::copy(begin, end, d.begin());
ASSERT_EQUAL(d[0], 0);
ASSERT_EQUAL(d[1], 1);
ASSERT_EQUAL(d[2], 2);
ASSERT_EQUAL(d[3], 3);
ASSERT_EQUAL(d[4], 4);
ASSERT_EQUAL_QUIET(d_result, d.end());
}
DECLARE_UNITTEST(TestCopyFromConstIterator);
template <class Vector>
void TestCopyMatchingTypes(void)
{
typedef typename Vector::value_type T;
Vector v(5);
v[0] = 0; v[1] = 1; v[2] = 2; v[3] = 3; v[4] = 4;
// copy to host_vector
thrust::host_vector<T> h(5, (T) 10);
typename thrust::host_vector<T>::iterator h_result = thrust::copy(v.begin(), v.end(), h.begin());
ASSERT_EQUAL(h[0], 0);
ASSERT_EQUAL(h[1], 1);
ASSERT_EQUAL(h[2], 2);
ASSERT_EQUAL(h[3], 3);
ASSERT_EQUAL(h[4], 4);
ASSERT_EQUAL_QUIET(h_result, h.end());
// copy to device_vector
thrust::device_vector<T> d(5, (T) 10);
typename thrust::device_vector<T>::iterator d_result = thrust::copy(v.begin(), v.end(), d.begin());
ASSERT_EQUAL(d[0], 0);
ASSERT_EQUAL(d[1], 1);
ASSERT_EQUAL(d[2], 2);
ASSERT_EQUAL(d[3], 3);
ASSERT_EQUAL(d[4], 4);
ASSERT_EQUAL_QUIET(d_result, d.end());
}
DECLARE_VECTOR_UNITTEST(TestCopyMatchingTypes);
template <class Vector>
void TestCopyMixedTypes(void)
{
typedef typename Vector::value_type T;
Vector v(5);
v[0] = 0; v[1] = 1; v[2] = 2; v[3] = 3; v[4] = 4;
// copy to host_vector with different type
thrust::host_vector<float> h(5, (float) 10);
typename thrust::host_vector<float>::iterator h_result = thrust::copy(v.begin(), v.end(), h.begin());
ASSERT_EQUAL(h[0], 0);
ASSERT_EQUAL(h[1], 1);
ASSERT_EQUAL(h[2], 2);
ASSERT_EQUAL(h[3], 3);
ASSERT_EQUAL(h[4], 4);
ASSERT_EQUAL_QUIET(h_result, h.end());
// copy to device_vector with different type
thrust::device_vector<float> d(5, (float) 10);
typename thrust::device_vector<float>::iterator d_result = thrust::copy(v.begin(), v.end(), d.begin());
ASSERT_EQUAL(d[0], 0);
ASSERT_EQUAL(d[1], 1);
ASSERT_EQUAL(d[2], 2);
ASSERT_EQUAL(d[3], 3);
ASSERT_EQUAL(d[4], 4);
ASSERT_EQUAL_QUIET(d_result, d.end());
}
DECLARE_VECTOR_UNITTEST(TestCopyMixedTypes);
void TestCopyVectorBool(void)
{
std::vector<bool> v(3);
v[0] = true; v[1] = false; v[2] = true;
thrust::host_vector<bool> h(3);
thrust::device_vector<bool> d(3);
thrust::copy(v.begin(), v.end(), h.begin());
thrust::copy(v.begin(), v.end(), d.begin());
ASSERT_EQUAL(h[0], true);
ASSERT_EQUAL(h[1], false);
ASSERT_EQUAL(h[2], true);
ASSERT_EQUAL(d[0], true);
ASSERT_EQUAL(d[1], false);
ASSERT_EQUAL(d[2], true);
}
DECLARE_UNITTEST(TestCopyVectorBool);
template <class Vector>
void TestCopyListTo(void)
{
typedef typename Vector::value_type T;
// copy from list to Vector
std::list<T> l;
l.push_back(0);
l.push_back(1);
l.push_back(2);
l.push_back(3);
l.push_back(4);
Vector v(l.size());
typename Vector::iterator v_result = thrust::copy(l.begin(), l.end(), v.begin());
ASSERT_EQUAL(v[0], 0);
ASSERT_EQUAL(v[1], 1);
ASSERT_EQUAL(v[2], 2);
ASSERT_EQUAL(v[3], 3);
ASSERT_EQUAL(v[4], 4);
ASSERT_EQUAL_QUIET(v_result, v.end());
l.clear();
std::back_insert_iterator< std::list<T> > l_result = thrust::copy(v.begin(), v.end(), std::back_insert_iterator< std::list<T> >(l));
ASSERT_EQUAL(l.size(), 5);
typename std::list<T>::const_iterator iter = l.begin();
ASSERT_EQUAL(*iter, 0); iter++;
ASSERT_EQUAL(*iter, 1); iter++;
ASSERT_EQUAL(*iter, 2); iter++;
ASSERT_EQUAL(*iter, 3); iter++;
ASSERT_EQUAL(*iter, 4); iter++;
}
DECLARE_VECTOR_UNITTEST(TestCopyListTo);
template<typename T>
struct is_even
{
__host__ __device__
bool operator()(T x) { return (static_cast<unsigned int>(x) & 1) == 0; }
};
template<typename T>
struct is_true
{
__host__ __device__
bool operator()(T x) { return x ? true : false; }
};
template<typename T>
struct mod_3
{
__host__ __device__
unsigned int operator()(T x) { return static_cast<unsigned int>(x) % 3; }
};
template <class Vector>
void TestCopyIfSimple(void)
{
typedef typename Vector::value_type T;
Vector v(5);
v[0] = 0; v[1] = 1; v[2] = 2; v[3] = 3; v[4] = 4;
Vector dest(3);
typename Vector::iterator dest_end = thrust::copy_if(v.begin(), v.end(), dest.begin(), is_even<T>());
ASSERT_EQUAL(0, dest[0]);
ASSERT_EQUAL(2, dest[1]);
ASSERT_EQUAL(4, dest[2]);
ASSERT_EQUAL_QUIET(dest.end(), dest_end);
}
DECLARE_VECTOR_UNITTEST(TestCopyIfSimple);
template <typename T>
void TestCopyIf(const size_t n)
{
thrust::host_vector<T> h_data = unittest::random_integers<T>(n);
thrust::device_vector<T> d_data = h_data;
typename thrust::host_vector<T>::iterator h_new_end;
typename thrust::device_vector<T>::iterator d_new_end;
// test with Predicate that returns a bool
{
thrust::host_vector<T> h_result(n);
thrust::device_vector<T> d_result(n);
h_new_end = thrust::copy_if(h_data.begin(), h_data.end(), h_result.begin(), is_even<T>());
d_new_end = thrust::copy_if(d_data.begin(), d_data.end(), d_result.begin(), is_even<T>());
h_result.resize(h_new_end - h_result.begin());
d_result.resize(d_new_end - d_result.begin());
ASSERT_EQUAL(h_result, d_result);
}
// test with Predicate that returns a non-bool
{
thrust::host_vector<T> h_result(n);
thrust::device_vector<T> d_result(n);
h_new_end = thrust::copy_if(h_data.begin(), h_data.end(), h_result.begin(), mod_3<T>());
d_new_end = thrust::copy_if(d_data.begin(), d_data.end(), d_result.begin(), mod_3<T>());
h_result.resize(h_new_end - h_result.begin());
d_result.resize(d_new_end - d_result.begin());
ASSERT_EQUAL(h_result, d_result);
}
}
DECLARE_VARIABLE_UNITTEST(TestCopyIf);
template <class Vector>
void TestCopyIfStencilSimple(void)
{
typedef typename Vector::value_type T;
Vector v(5);
v[0] = 0; v[1] = 1; v[2] = 2; v[3] = 3; v[4] = 4;
Vector s(5);
s[0] = 1; s[1] = 1; s[2] = 0; s[3] = 1; s[4] = 0;
Vector dest(3);
typename Vector::iterator dest_end = thrust::copy_if(v.begin(), v.end(), s.begin(), dest.begin(), is_true<T>());
ASSERT_EQUAL(0, dest[0]);
ASSERT_EQUAL(1, dest[1]);
ASSERT_EQUAL(3, dest[2]);
ASSERT_EQUAL_QUIET(dest.end(), dest_end);
}
DECLARE_VECTOR_UNITTEST(TestCopyIfStencilSimple);
template <typename T>
void TestCopyIfStencil(const size_t n)
{
thrust::host_vector<T> h_data(n); thrust::sequence(h_data.begin(), h_data.end());
thrust::device_vector<T> d_data(n); thrust::sequence(d_data.begin(), d_data.end());
thrust::host_vector<T> h_stencil = unittest::random_integers<T>(n);
thrust::device_vector<T> d_stencil = unittest::random_integers<T>(n);
thrust::host_vector<T> h_result(n);
thrust::device_vector<T> d_result(n);
typename thrust::host_vector<T>::iterator h_new_end;
typename thrust::device_vector<T>::iterator d_new_end;
// test with Predicate that returns a bool
{
thrust::host_vector<T> h_result(n);
thrust::device_vector<T> d_result(n);
h_new_end = thrust::copy_if(h_data.begin(), h_data.end(), h_result.begin(), is_even<T>());
d_new_end = thrust::copy_if(d_data.begin(), d_data.end(), d_result.begin(), is_even<T>());
h_result.resize(h_new_end - h_result.begin());
d_result.resize(d_new_end - d_result.begin());
ASSERT_EQUAL(h_result, d_result);
}
// test with Predicate that returns a non-bool
{
thrust::host_vector<T> h_result(n);
thrust::device_vector<T> d_result(n);
h_new_end = thrust::copy_if(h_data.begin(), h_data.end(), h_result.begin(), mod_3<T>());
d_new_end = thrust::copy_if(d_data.begin(), d_data.end(), d_result.begin(), mod_3<T>());
h_result.resize(h_new_end - h_result.begin());
d_result.resize(d_new_end - d_result.begin());
ASSERT_EQUAL(h_result, d_result);
}
}
DECLARE_VARIABLE_UNITTEST(TestCopyIfStencil);
#if THRUST_DEVICE_BACKEND == THRUST_DEVICE_BACKEND_CUDA
// do we really want to test this ever?
void TestCopyDeviceThrow(void)
{
typedef int T;
thrust::device_ptr<T> null_device_ptr((int*)0);
bool caught_exception = false;
try
{
thrust::copy(null_device_ptr, null_device_ptr + 1, null_device_ptr);
} // end try
catch(std::runtime_error)
{
caught_exception = true;
// kill the context so it can revive later
cudaThreadExit();
} // end catch
ASSERT_EQUAL(true, caught_exception);
}
DECLARE_UNITTEST(TestCopyDeviceThrow);
#endif
template <typename Vector>
void TestCopyCountingIterator(void)
{
typedef typename Vector::value_type T;
thrust::counting_iterator<T> iter(1);
Vector vec(4);
thrust::copy(iter, iter + 4, vec.begin());
ASSERT_EQUAL(vec[0], 1);
ASSERT_EQUAL(vec[1], 2);
ASSERT_EQUAL(vec[2], 3);
ASSERT_EQUAL(vec[3], 4);
}
DECLARE_VECTOR_UNITTEST(TestCopyCountingIterator);
template <typename Vector>
void TestCopyZipIterator(void)
{
typedef typename Vector::value_type T;
Vector v1(3); v1[0] = 1; v1[1] = 2; v1[2] = 3;
Vector v2(3); v2[0] = 4; v2[1] = 5; v2[2] = 6;
Vector v3(3, T(0));
Vector v4(3, T(0));
thrust::copy(thrust::make_zip_iterator(thrust::make_tuple(v1.begin(),v2.begin())),
thrust::make_zip_iterator(thrust::make_tuple(v1.end(),v2.end())),
thrust::make_zip_iterator(thrust::make_tuple(v3.begin(),v4.begin())));
ASSERT_EQUAL(v1, v3);
ASSERT_EQUAL(v2, v4);
};
DECLARE_VECTOR_UNITTEST(TestCopyZipIterator);
template <typename Vector>
void TestCopyConstantIteratorToZipIterator(void)
{
typedef typename Vector::value_type T;
Vector v1(3,T(0));
Vector v2(3,T(0));
thrust::copy(thrust::make_constant_iterator(thrust::tuple<T,T>(4,7)),
thrust::make_constant_iterator(thrust::tuple<T,T>(4,7)) + v1.size(),
thrust::make_zip_iterator(thrust::make_tuple(v1.begin(),v2.begin())));
ASSERT_EQUAL(v1[0], 4);
ASSERT_EQUAL(v1[1], 4);
ASSERT_EQUAL(v1[2], 4);
ASSERT_EQUAL(v2[0], 7);
ASSERT_EQUAL(v2[1], 7);
ASSERT_EQUAL(v2[2], 7);
};
DECLARE_VECTOR_UNITTEST(TestCopyConstantIteratorToZipIterator);
|
6eb80874e42c8ecccaccf294a701c54b924e0e35.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This sample implements a conjugate graident solver on GPU
* using CUBLAS and CUSPARSE
*
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
/* Using updated (v2) interfaces to cublas and cusparse */
#include <hip/hip_runtime.h>
#include <cusparse_v2.h>
#include <rocblas.h>
// Utilities and system includes
#include <helper_functions.h> // helper for shared functions common to CUDA SDK samples
#include <helper_cuda.h> // helper function CUDA error checking and intialization
__global__ void
vectorAXPY(float *A, float *B, float *C, int numElements, float alpham1x)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + alpham1x*B[i];
}
}
__global__ void
vectorSCAL(float *A, float x, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
A[i] = A[i]*x;
}
}
__global__ void
vectorCOPY(float *A, float *B, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
B[i] = A[i];
}
}
__global__ void
vectorMatVec(int *rowptr, int *colind, float *y, float *values, float *x, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
float sub = 0.0;
for (int j = rowptr[i]; j < rowptr[i+1]; j++)
sub += values[j] * x[colind[j]];
y[i] = sub;
}
}
const char *sSDKname = "conjugateGradient";
double mclock(){
struct timeval tp;
double sec,usec;
gettimeofday( &tp, NULL );
sec = double( tp.tv_sec );
usec = double( tp.tv_usec )/1E6;
return sec + usec;
}
#define dot_BS 32
#define kernel_BS 32
/* genTridiag: generate a random tridiagonal symmetric matrix */
void genTridiag(int *I, int *J, float *val, int N, int nz)
{
double RAND_MAXi = 1e6;
double val_r = 12.345 * 1e5;
I[0] = 0, J[0] = 0, J[1] = 1;
val[0] = (float)val_r/RAND_MAXi + 10.0f;
val[1] = (float)val_r/RAND_MAXi;
int start;
for (int i = 1; i < N; i++)
{
if (i > 1)
{
I[i] = I[i-1]+3;
}
else
{
I[1] = 2;
}
start = (i-1)*3 + 2;
J[start] = i - 1;
J[start+1] = i;
if (i < N-1)
{
J[start+2] = i + 1;
}
val[start] = val[start-1];
val[start+1] = (float)val_r/RAND_MAXi + 10.0f;
if (i < N-1)
{
val[start+2] = (float)val_r/RAND_MAXi;
}
}
I[N] = nz;
}
void cgs_basic(int argc, char **argv, int N, int M){
//int M = 0, N = 0,
int nz = 0, *I = NULL, *J = NULL;
float *val = NULL;
const float tol = 1e-10f;
const int max_iter = 1000;
float *x;
float *rhs;
float a, b, na, r0, r1;
int *d_col, *d_row;
float *d_val, *d_x, dot;
float *d_r, *d_p, *d_Ax;
int k;
float alpha, beta, alpham1;
// This will pick the best possible CUDA capable device
hipDeviceProp_t deviceProp;
int devID = findCudaDevice(argc, (const char **)argv);
if (devID < 0)
{
printf("exiting...\n");
exit(EXIT_SUCCESS);
}
checkCudaErrors(hipGetDeviceProperties(&deviceProp, devID));
// Statistics about the GPU device
printf("> GPU device has %d Multi-Processors, SM %d.%d compute capabilities\n\n",
deviceProp.multiProcessorCount, deviceProp.major, deviceProp.minor);
int version = (deviceProp.major * 0x10 + deviceProp.minor);
if (version < 0x11)
{
printf("%s: requires a minimum CUDA compute 1.1 capability\n", sSDKname);
hipDeviceReset();
exit(EXIT_SUCCESS);
}
/* Generate a random tridiagonal symmetric matrix in CSR format */
//M = N = 32*64;//10; //1048576;
printf("M = %d, N = %d\n", M, N);
nz = (N-2)*3 + 4;
I = (int *)malloc(sizeof(int)*(N+1));
J = (int *)malloc(sizeof(int)*nz);
val = (float *)malloc(sizeof(float)*nz);
genTridiag(I, J, val, N, nz);
/*
for (int i = 0; i < nz; i++){
printf("%d\t", J[i]);
}
printf("\n");
for (int i = 0; i < nz; i++){
printf("%2f\t", val[i]);
}
*/
x = (float *)malloc(sizeof(float)*N);
rhs = (float *)malloc(sizeof(float)*N);
for (int i = 0; i < N; i++)
{
rhs[i] = 1.0;
x[i] = 0.0;
}
/* Get handle to the CUBLAS context */
hipblasHandle_t cublasHandle = 0;
hipblasStatus_t cublasStatus;
cublasStatus = hipblasCreate(&cublasHandle);
checkCudaErrors(cublasStatus);
/* Get handle to the CUSPARSE context */
hipsparseHandle_t cusparseHandle = 0;
hipsparseStatus_t cusparseStatus;
cusparseStatus = hipsparseCreate(&cusparseHandle);
checkCudaErrors(cusparseStatus);
hipsparseMatDescr_t descr = 0;
cusparseStatus = hipsparseCreateMatDescr(&descr);
checkCudaErrors(cusparseStatus);
hipsparseSetMatType(descr,HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descr,HIPSPARSE_INDEX_BASE_ZERO);
checkCudaErrors(hipMalloc((void **)&d_col, nz*sizeof(int)));
checkCudaErrors(hipMalloc((void **)&d_row, (N+1)*sizeof(int)));
checkCudaErrors(hipMalloc((void **)&d_val, nz*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_x, N*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_r, N*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_p, N*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_Ax, N*sizeof(float)));
hipMemcpy(d_col, J, nz*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_row, I, (N+1)*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_val, val, nz*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_x, x, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_r, rhs, N*sizeof(float), hipMemcpyHostToDevice);
alpha = 1.0;
alpham1 = -1.0;
beta = 0.0;
r0 = 0.;
double t_start = mclock();
hipsparseScsrmv(cusparseHandle,HIPSPARSE_OPERATION_NON_TRANSPOSE, N, N, nz, &alpha, descr, d_val, d_row, d_col, d_x, &beta, d_Ax);
hipblasSaxpy(cublasHandle, N, &alpham1, d_Ax, 1, d_r, 1); // PODMIEN FUNCKJE (I)
cublasStatus = hipblasSdot(cublasHandle, N, d_r, 1, d_r, 1, &r1); // PODMIEN FUNCKJE (II)
k = 1;
while (r1 > tol*tol && k <= max_iter)
{
if (k > 1)
{
b = r1 / r0;
cublasStatus = hipblasSscal(cublasHandle, N, &b, d_p, 1); // PODMIEN FUNCKJE (I)
cublasStatus = hipblasSaxpy(cublasHandle, N, &alpha, d_r, 1, d_p, 1); // PODMIEN FUNCKJE (I)
}
else
{
cublasStatus = hipblasScopy(cublasHandle, N, d_r, 1, d_p, 1); // PODMIEN FUNCKJE (I)
}
hipsparseScsrmv(cusparseHandle, HIPSPARSE_OPERATION_NON_TRANSPOSE, N, N, nz, &alpha, descr, d_val, d_row, d_col, d_p, &beta, d_Ax); // PODMIEN FUNCKJE (III)
cublasStatus = hipblasSdot(cublasHandle, N, d_p, 1, d_Ax, 1, &dot); // PODMIEN FUNCKJE (II)
a = r1 / dot;
cublasStatus = hipblasSaxpy(cublasHandle, N, &a, d_p, 1, d_x, 1); // PODMIEN FUNCKJE (I)
na = -a;
cublasStatus = hipblasSaxpy(cublasHandle, N, &na, d_Ax, 1, d_r, 1); // PODMIEN FUNCKJE (I)
r0 = r1;
cublasStatus = hipblasSdot(cublasHandle, N, d_r, 1, d_r, 1, &r1); // PODMIEN FUNCKJE (II)
hipDeviceSynchronize();
printf("iteration = %3d, residual = %e\n", k, sqrt(r1));
k++;
}
printf("TIME OF CGS_BASIC = %f\n", mclock() - t_start);
hipMemcpy(x, d_x, N*sizeof(float), hipMemcpyDeviceToHost);
float rsum, diff, err = 0.0;
for (int i = 0; i < N; i++)
{
rsum = 0.0;
for (int j = I[i]; j < I[i+1]; j++)
{
rsum += val[j]*x[J[j]];
}
diff = fabs(rsum - rhs[i]);
if (diff > err)
{
err = diff;
}
}
hipsparseDestroy(cusparseHandle);
hipblasDestroy(cublasHandle);
free(I);
free(J);
free(val);
free(x);
free(rhs);
hipFree(d_col);
hipFree(d_row);
hipFree(d_val);
hipFree(d_x);
hipFree(d_r);
hipFree(d_p);
hipFree(d_Ax);
hipDeviceReset();
printf("Test Summary: Error amount = %e\n", err);
//exit((k <= max_iter) ? 0 : 1);
}
void cgs_TODO(int argc, char **argv, int N, int M){
//int M = 0, N = 0,
int nz = 0, *I = NULL, *J = NULL;
float *val = NULL;
const float tol = 1e-10f;
const int max_iter = 1000;
float *x;
float *rhs;
float a, b, na, r0, r1;
int *d_col, *d_row;
float *d_val, *d_x, dot;
float *d_r, *d_p, *d_Ax;
int k;
float alpha, alpham1; //beta
// This will pick the best possible CUDA capable device
hipDeviceProp_t deviceProp;
int devID = findCudaDevice(argc, (const char **)argv);
if (devID < 0)
{
printf("exiting...\n");
exit(EXIT_SUCCESS);
}
checkCudaErrors(hipGetDeviceProperties(&deviceProp, devID));
// Statistics about the GPU device
printf("> GPU device has %d Multi-Processors, SM %d.%d compute capabilities\n\n",
deviceProp.multiProcessorCount, deviceProp.major, deviceProp.minor);
int version = (deviceProp.major * 0x10 + deviceProp.minor);
if (version < 0x11)
{
printf("%s: requires a minimum CUDA compute 1.1 capability\n", sSDKname);
hipDeviceReset();
exit(EXIT_SUCCESS);
}
/* Generate a random tridiagonal symmetric matrix in CSR format */
//M = N = 32*64;//10; //1048576;
printf("M = %d, N = %d\n", M, N);
nz = (N-2)*3 + 4;
I = (int *)malloc(sizeof(int)*(N+1));
J = (int *)malloc(sizeof(int)*nz);
val = (float *)malloc(sizeof(float)*nz);
genTridiag(I, J, val, N, nz);
/*
for (int i = 0; i < nz; i++){
printf("%d\t", J[i]);
}
printf("\n");
for (int i = 0; i < nz; i++){
printf("%2f\t", val[i]);
}
*/
x = (float *)malloc(sizeof(float)*N);
rhs = (float *)malloc(sizeof(float)*N);
for (int i = 0; i < N; i++)
{
rhs[i] = 1.0;
x[i] = 0.0;
}
/* Get handle to the CUBLAS context */
hipblasHandle_t cublasHandle = 0;
hipblasStatus_t cublasStatus;
cublasStatus = hipblasCreate(&cublasHandle);
checkCudaErrors(cublasStatus);
/* Get handle to the CUSPARSE context */
hipsparseHandle_t cusparseHandle = 0;
hipsparseStatus_t cusparseStatus;
cusparseStatus = hipsparseCreate(&cusparseHandle);
checkCudaErrors(cusparseStatus);
hipsparseMatDescr_t descr = 0;
cusparseStatus = hipsparseCreateMatDescr(&descr);
checkCudaErrors(cusparseStatus);
hipsparseSetMatType(descr,HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descr,HIPSPARSE_INDEX_BASE_ZERO);
checkCudaErrors(hipMalloc((void **)&d_col, nz*sizeof(int)));
checkCudaErrors(hipMalloc((void **)&d_row, (N+1)*sizeof(int)));
checkCudaErrors(hipMalloc((void **)&d_val, nz*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_x, N*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_r, N*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_p, N*sizeof(float)));
checkCudaErrors(hipMalloc((void **)&d_Ax, N*sizeof(float)));
hipMemcpy(d_col, J, nz*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_row, I, (N+1)*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_val, val, nz*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_x, x, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_r, rhs, N*sizeof(float), hipMemcpyHostToDevice);
alpha = 1.0;
alpham1 = -1.0;
//beta = 0.0;
r0 = 0.;
double t_start = mclock();
int threadsPerBlock = 256;
int blocksPerGrid =(N + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
// sparse matrix vector product: d_Ax = A * d_x
//hipsparseScsrmv(cusparseHandle,HIPSPARSE_OPERATION_NON_TRANSPOSE, N, N, nz, &alpha, descr, d_val, d_row, d_col, d_x, &beta, d_Ax); // PODMIEN FUNCKJE (ZADANIE-II)
hipLaunchKernelGGL(( vectorMatVec), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_row, d_col, d_Ax, d_val, d_x, N);
//azpy: d_r = d_r + alpham1 * d_Ax
//hipblasSaxpy(cublasHandle, N, &alpham1, d_Ax, 1, d_r, 1); // PODMIEN FUNCKJE (ZADANIE-I)
hipLaunchKernelGGL(( vectorAXPY), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_r, d_Ax, d_r, N, alpham1);
//err = hipGetLastError();
//dot: r1 = d_r * d_r
cublasStatus = hipblasSdot(cublasHandle, N, d_r, 1, d_r, 1, &r1); // PODMIEN FUNCKJE (ZADANIE-III)
k = 1;
while (r1 > tol*tol && k <= max_iter)
{
if (k > 1)
{
b = r1 / r0;
//scal: d_p = b * d_p
//cublasStatus = hipblasSscal(cublasHandle, N, &b, d_p, 1); // PODMIEN FUNCKJE (ZADANIE-I)
hipLaunchKernelGGL(( vectorSCAL), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_p, b, N);
//axpy: d_p = d_p + alpha * d_r
//cublasStatus = hipblasSaxpy(cublasHandle, N, &alpha, d_r, 1, d_p, 1); // PODMIEN FUNCKJE (ZADANIE-I)
hipLaunchKernelGGL(( vectorAXPY), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_p, d_r, d_p, N, alpha);
}
else
{
//cpy: d_p = d_r
//cublasStatus = hipblasScopy(cublasHandle, N, d_r, 1, d_p, 1); // PODMIEN FUNCKJE (ZADANIE-I)
//threadsPerBlock = 256;
//blocksPerGrid =(N + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( vectorCOPY), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_r, d_p, N);
}
//sparse matrix-vector product: d_Ax = A * d_p
//hipsparseScsrmv(cusparseHandle, HIPSPARSE_OPERATION_NON_TRANSPOSE, N, N, nz, &alpha, descr, d_val, d_row, d_col, d_p, &beta, d_Ax); // PODMIEN FUNCKJE (ZADANIE-II)
hipLaunchKernelGGL(( vectorMatVec), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_row, d_col, d_Ax, d_val, d_p, N);
cublasStatus = hipblasSdot(cublasHandle, N, d_p, 1, d_Ax, 1, &dot); // PODMIEN FUNCKJE (ZADANIE-III)
a = r1 / dot;
//axpy: d_x = d_x + a*d_p
//cublasStatus = hipblasSaxpy(cublasHandle, N, &a, d_p, 1, d_x, 1); // PODMIEN FUNCKJE (ZADANIE-I)
hipLaunchKernelGGL(( vectorAXPY), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_x, d_p, d_x, N, a);
na = -a;
//axpy: d_r = d_r + na * d_Ax
//cublasStatus = hipblasSaxpy(cublasHandle, N, &na, d_Ax, 1, d_r, 1); // PODMIEN FUNCKJE (ZADANIE-I)
hipLaunchKernelGGL(( vectorAXPY), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_r, d_Ax, d_r, N, na);
r0 = r1;
//dot: r1 = d_r * d_r
cublasStatus = hipblasSdot(cublasHandle, N, d_r, 1, d_r, 1, &r1); // PODMIEN FUNCKJE (ZADANIE-III)
hipDeviceSynchronize();
printf("iteration = %3d, residual = %e\n", k, sqrt(r1));
k++;
}
hipMemcpy(x, d_x, N*sizeof(float), hipMemcpyDeviceToHost);
float rsum, diff, err = 0.0;
for (int i = 0; i < N; i++)
{
rsum = 0.0;
for (int j = I[i]; j < I[i+1]; j++)
{
rsum += val[j]*x[J[j]];
}
diff = fabs(rsum - rhs[i]);
if (diff > err)
{
err = diff;
}
}
hipsparseDestroy(cusparseHandle);
hipblasDestroy(cublasHandle);
free(I);
free(J);
free(val);
free(x);
free(rhs);
hipFree(d_col);
hipFree(d_row);
hipFree(d_val);
hipFree(d_x);
hipFree(d_r);
hipFree(d_p);
hipFree(d_Ax);
hipDeviceReset();
printf("TIME OF CGS_TODO = %f\n", mclock() - t_start);
printf("Test Summary: Error amount = %e\n", err);
//exit((k <= max_iter) ? 0 : 1);
}
int main(int argc, char **argv)
{
//int N = 1e6;//1 << 20;
//int N = 256 * (1<<10) -10 ; //1e6;//1 << 20;
int N = 1e5;
int M = N;
cgs_basic(argc, argv, N, M);
cgs_TODO(argc, argv, N, M);
}
| 6eb80874e42c8ecccaccf294a701c54b924e0e35.cu | /*
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This sample implements a conjugate graident solver on GPU
* using CUBLAS and CUSPARSE
*
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
/* Using updated (v2) interfaces to cublas and cusparse */
#include <cuda_runtime.h>
#include <cusparse_v2.h>
#include <cublas_v2.h>
// Utilities and system includes
#include <helper_functions.h> // helper for shared functions common to CUDA SDK samples
#include <helper_cuda.h> // helper function CUDA error checking and intialization
__global__ void
vectorAXPY(float *A, float *B, float *C, int numElements, float alpham1x)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + alpham1x*B[i];
}
}
__global__ void
vectorSCAL(float *A, float x, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
A[i] = A[i]*x;
}
}
__global__ void
vectorCOPY(float *A, float *B, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
B[i] = A[i];
}
}
__global__ void
vectorMatVec(int *rowptr, int *colind, float *y, float *values, float *x, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
float sub = 0.0;
for (int j = rowptr[i]; j < rowptr[i+1]; j++)
sub += values[j] * x[colind[j]];
y[i] = sub;
}
}
const char *sSDKname = "conjugateGradient";
double mclock(){
struct timeval tp;
double sec,usec;
gettimeofday( &tp, NULL );
sec = double( tp.tv_sec );
usec = double( tp.tv_usec )/1E6;
return sec + usec;
}
#define dot_BS 32
#define kernel_BS 32
/* genTridiag: generate a random tridiagonal symmetric matrix */
void genTridiag(int *I, int *J, float *val, int N, int nz)
{
double RAND_MAXi = 1e6;
double val_r = 12.345 * 1e5;
I[0] = 0, J[0] = 0, J[1] = 1;
val[0] = (float)val_r/RAND_MAXi + 10.0f;
val[1] = (float)val_r/RAND_MAXi;
int start;
for (int i = 1; i < N; i++)
{
if (i > 1)
{
I[i] = I[i-1]+3;
}
else
{
I[1] = 2;
}
start = (i-1)*3 + 2;
J[start] = i - 1;
J[start+1] = i;
if (i < N-1)
{
J[start+2] = i + 1;
}
val[start] = val[start-1];
val[start+1] = (float)val_r/RAND_MAXi + 10.0f;
if (i < N-1)
{
val[start+2] = (float)val_r/RAND_MAXi;
}
}
I[N] = nz;
}
void cgs_basic(int argc, char **argv, int N, int M){
//int M = 0, N = 0,
int nz = 0, *I = NULL, *J = NULL;
float *val = NULL;
const float tol = 1e-10f;
const int max_iter = 1000;
float *x;
float *rhs;
float a, b, na, r0, r1;
int *d_col, *d_row;
float *d_val, *d_x, dot;
float *d_r, *d_p, *d_Ax;
int k;
float alpha, beta, alpham1;
// This will pick the best possible CUDA capable device
cudaDeviceProp deviceProp;
int devID = findCudaDevice(argc, (const char **)argv);
if (devID < 0)
{
printf("exiting...\n");
exit(EXIT_SUCCESS);
}
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, devID));
// Statistics about the GPU device
printf("> GPU device has %d Multi-Processors, SM %d.%d compute capabilities\n\n",
deviceProp.multiProcessorCount, deviceProp.major, deviceProp.minor);
int version = (deviceProp.major * 0x10 + deviceProp.minor);
if (version < 0x11)
{
printf("%s: requires a minimum CUDA compute 1.1 capability\n", sSDKname);
cudaDeviceReset();
exit(EXIT_SUCCESS);
}
/* Generate a random tridiagonal symmetric matrix in CSR format */
//M = N = 32*64;//10; //1048576;
printf("M = %d, N = %d\n", M, N);
nz = (N-2)*3 + 4;
I = (int *)malloc(sizeof(int)*(N+1));
J = (int *)malloc(sizeof(int)*nz);
val = (float *)malloc(sizeof(float)*nz);
genTridiag(I, J, val, N, nz);
/*
for (int i = 0; i < nz; i++){
printf("%d\t", J[i]);
}
printf("\n");
for (int i = 0; i < nz; i++){
printf("%2f\t", val[i]);
}
*/
x = (float *)malloc(sizeof(float)*N);
rhs = (float *)malloc(sizeof(float)*N);
for (int i = 0; i < N; i++)
{
rhs[i] = 1.0;
x[i] = 0.0;
}
/* Get handle to the CUBLAS context */
cublasHandle_t cublasHandle = 0;
cublasStatus_t cublasStatus;
cublasStatus = cublasCreate(&cublasHandle);
checkCudaErrors(cublasStatus);
/* Get handle to the CUSPARSE context */
cusparseHandle_t cusparseHandle = 0;
cusparseStatus_t cusparseStatus;
cusparseStatus = cusparseCreate(&cusparseHandle);
checkCudaErrors(cusparseStatus);
cusparseMatDescr_t descr = 0;
cusparseStatus = cusparseCreateMatDescr(&descr);
checkCudaErrors(cusparseStatus);
cusparseSetMatType(descr,CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descr,CUSPARSE_INDEX_BASE_ZERO);
checkCudaErrors(cudaMalloc((void **)&d_col, nz*sizeof(int)));
checkCudaErrors(cudaMalloc((void **)&d_row, (N+1)*sizeof(int)));
checkCudaErrors(cudaMalloc((void **)&d_val, nz*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_x, N*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_r, N*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_p, N*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_Ax, N*sizeof(float)));
cudaMemcpy(d_col, J, nz*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_row, I, (N+1)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_val, val, nz*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_x, x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_r, rhs, N*sizeof(float), cudaMemcpyHostToDevice);
alpha = 1.0;
alpham1 = -1.0;
beta = 0.0;
r0 = 0.;
double t_start = mclock();
cusparseScsrmv(cusparseHandle,CUSPARSE_OPERATION_NON_TRANSPOSE, N, N, nz, &alpha, descr, d_val, d_row, d_col, d_x, &beta, d_Ax);
cublasSaxpy(cublasHandle, N, &alpham1, d_Ax, 1, d_r, 1); // PODMIEN FUNCKJE (I)
cublasStatus = cublasSdot(cublasHandle, N, d_r, 1, d_r, 1, &r1); // PODMIEN FUNCKJE (II)
k = 1;
while (r1 > tol*tol && k <= max_iter)
{
if (k > 1)
{
b = r1 / r0;
cublasStatus = cublasSscal(cublasHandle, N, &b, d_p, 1); // PODMIEN FUNCKJE (I)
cublasStatus = cublasSaxpy(cublasHandle, N, &alpha, d_r, 1, d_p, 1); // PODMIEN FUNCKJE (I)
}
else
{
cublasStatus = cublasScopy(cublasHandle, N, d_r, 1, d_p, 1); // PODMIEN FUNCKJE (I)
}
cusparseScsrmv(cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, N, N, nz, &alpha, descr, d_val, d_row, d_col, d_p, &beta, d_Ax); // PODMIEN FUNCKJE (III)
cublasStatus = cublasSdot(cublasHandle, N, d_p, 1, d_Ax, 1, &dot); // PODMIEN FUNCKJE (II)
a = r1 / dot;
cublasStatus = cublasSaxpy(cublasHandle, N, &a, d_p, 1, d_x, 1); // PODMIEN FUNCKJE (I)
na = -a;
cublasStatus = cublasSaxpy(cublasHandle, N, &na, d_Ax, 1, d_r, 1); // PODMIEN FUNCKJE (I)
r0 = r1;
cublasStatus = cublasSdot(cublasHandle, N, d_r, 1, d_r, 1, &r1); // PODMIEN FUNCKJE (II)
cudaThreadSynchronize();
printf("iteration = %3d, residual = %e\n", k, sqrt(r1));
k++;
}
printf("TIME OF CGS_BASIC = %f\n", mclock() - t_start);
cudaMemcpy(x, d_x, N*sizeof(float), cudaMemcpyDeviceToHost);
float rsum, diff, err = 0.0;
for (int i = 0; i < N; i++)
{
rsum = 0.0;
for (int j = I[i]; j < I[i+1]; j++)
{
rsum += val[j]*x[J[j]];
}
diff = fabs(rsum - rhs[i]);
if (diff > err)
{
err = diff;
}
}
cusparseDestroy(cusparseHandle);
cublasDestroy(cublasHandle);
free(I);
free(J);
free(val);
free(x);
free(rhs);
cudaFree(d_col);
cudaFree(d_row);
cudaFree(d_val);
cudaFree(d_x);
cudaFree(d_r);
cudaFree(d_p);
cudaFree(d_Ax);
cudaDeviceReset();
printf("Test Summary: Error amount = %e\n", err);
//exit((k <= max_iter) ? 0 : 1);
}
void cgs_TODO(int argc, char **argv, int N, int M){
//int M = 0, N = 0,
int nz = 0, *I = NULL, *J = NULL;
float *val = NULL;
const float tol = 1e-10f;
const int max_iter = 1000;
float *x;
float *rhs;
float a, b, na, r0, r1;
int *d_col, *d_row;
float *d_val, *d_x, dot;
float *d_r, *d_p, *d_Ax;
int k;
float alpha, alpham1; //beta
// This will pick the best possible CUDA capable device
cudaDeviceProp deviceProp;
int devID = findCudaDevice(argc, (const char **)argv);
if (devID < 0)
{
printf("exiting...\n");
exit(EXIT_SUCCESS);
}
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, devID));
// Statistics about the GPU device
printf("> GPU device has %d Multi-Processors, SM %d.%d compute capabilities\n\n",
deviceProp.multiProcessorCount, deviceProp.major, deviceProp.minor);
int version = (deviceProp.major * 0x10 + deviceProp.minor);
if (version < 0x11)
{
printf("%s: requires a minimum CUDA compute 1.1 capability\n", sSDKname);
cudaDeviceReset();
exit(EXIT_SUCCESS);
}
/* Generate a random tridiagonal symmetric matrix in CSR format */
//M = N = 32*64;//10; //1048576;
printf("M = %d, N = %d\n", M, N);
nz = (N-2)*3 + 4;
I = (int *)malloc(sizeof(int)*(N+1));
J = (int *)malloc(sizeof(int)*nz);
val = (float *)malloc(sizeof(float)*nz);
genTridiag(I, J, val, N, nz);
/*
for (int i = 0; i < nz; i++){
printf("%d\t", J[i]);
}
printf("\n");
for (int i = 0; i < nz; i++){
printf("%2f\t", val[i]);
}
*/
x = (float *)malloc(sizeof(float)*N);
rhs = (float *)malloc(sizeof(float)*N);
for (int i = 0; i < N; i++)
{
rhs[i] = 1.0;
x[i] = 0.0;
}
/* Get handle to the CUBLAS context */
cublasHandle_t cublasHandle = 0;
cublasStatus_t cublasStatus;
cublasStatus = cublasCreate(&cublasHandle);
checkCudaErrors(cublasStatus);
/* Get handle to the CUSPARSE context */
cusparseHandle_t cusparseHandle = 0;
cusparseStatus_t cusparseStatus;
cusparseStatus = cusparseCreate(&cusparseHandle);
checkCudaErrors(cusparseStatus);
cusparseMatDescr_t descr = 0;
cusparseStatus = cusparseCreateMatDescr(&descr);
checkCudaErrors(cusparseStatus);
cusparseSetMatType(descr,CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descr,CUSPARSE_INDEX_BASE_ZERO);
checkCudaErrors(cudaMalloc((void **)&d_col, nz*sizeof(int)));
checkCudaErrors(cudaMalloc((void **)&d_row, (N+1)*sizeof(int)));
checkCudaErrors(cudaMalloc((void **)&d_val, nz*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_x, N*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_r, N*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_p, N*sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&d_Ax, N*sizeof(float)));
cudaMemcpy(d_col, J, nz*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_row, I, (N+1)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_val, val, nz*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_x, x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_r, rhs, N*sizeof(float), cudaMemcpyHostToDevice);
alpha = 1.0;
alpham1 = -1.0;
//beta = 0.0;
r0 = 0.;
double t_start = mclock();
int threadsPerBlock = 256;
int blocksPerGrid =(N + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
// sparse matrix vector product: d_Ax = A * d_x
//cusparseScsrmv(cusparseHandle,CUSPARSE_OPERATION_NON_TRANSPOSE, N, N, nz, &alpha, descr, d_val, d_row, d_col, d_x, &beta, d_Ax); // PODMIEN FUNCKJE (ZADANIE-II)
vectorMatVec<<<blocksPerGrid, threadsPerBlock>>>(d_row, d_col, d_Ax, d_val, d_x, N);
//azpy: d_r = d_r + alpham1 * d_Ax
//cublasSaxpy(cublasHandle, N, &alpham1, d_Ax, 1, d_r, 1); // PODMIEN FUNCKJE (ZADANIE-I)
vectorAXPY<<<blocksPerGrid, threadsPerBlock>>>(d_r, d_Ax, d_r, N, alpham1);
//err = cudaGetLastError();
//dot: r1 = d_r * d_r
cublasStatus = cublasSdot(cublasHandle, N, d_r, 1, d_r, 1, &r1); // PODMIEN FUNCKJE (ZADANIE-III)
k = 1;
while (r1 > tol*tol && k <= max_iter)
{
if (k > 1)
{
b = r1 / r0;
//scal: d_p = b * d_p
//cublasStatus = cublasSscal(cublasHandle, N, &b, d_p, 1); // PODMIEN FUNCKJE (ZADANIE-I)
vectorSCAL<<<blocksPerGrid, threadsPerBlock>>>(d_p, b, N);
//axpy: d_p = d_p + alpha * d_r
//cublasStatus = cublasSaxpy(cublasHandle, N, &alpha, d_r, 1, d_p, 1); // PODMIEN FUNCKJE (ZADANIE-I)
vectorAXPY<<<blocksPerGrid, threadsPerBlock>>>(d_p, d_r, d_p, N, alpha);
}
else
{
//cpy: d_p = d_r
//cublasStatus = cublasScopy(cublasHandle, N, d_r, 1, d_p, 1); // PODMIEN FUNCKJE (ZADANIE-I)
//threadsPerBlock = 256;
//blocksPerGrid =(N + threadsPerBlock - 1) / threadsPerBlock;
vectorCOPY<<<blocksPerGrid, threadsPerBlock>>>(d_r, d_p, N);
}
//sparse matrix-vector product: d_Ax = A * d_p
//cusparseScsrmv(cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, N, N, nz, &alpha, descr, d_val, d_row, d_col, d_p, &beta, d_Ax); // PODMIEN FUNCKJE (ZADANIE-II)
vectorMatVec<<<blocksPerGrid, threadsPerBlock>>>(d_row, d_col, d_Ax, d_val, d_p, N);
cublasStatus = cublasSdot(cublasHandle, N, d_p, 1, d_Ax, 1, &dot); // PODMIEN FUNCKJE (ZADANIE-III)
a = r1 / dot;
//axpy: d_x = d_x + a*d_p
//cublasStatus = cublasSaxpy(cublasHandle, N, &a, d_p, 1, d_x, 1); // PODMIEN FUNCKJE (ZADANIE-I)
vectorAXPY<<<blocksPerGrid, threadsPerBlock>>>(d_x, d_p, d_x, N, a);
na = -a;
//axpy: d_r = d_r + na * d_Ax
//cublasStatus = cublasSaxpy(cublasHandle, N, &na, d_Ax, 1, d_r, 1); // PODMIEN FUNCKJE (ZADANIE-I)
vectorAXPY<<<blocksPerGrid, threadsPerBlock>>>(d_r, d_Ax, d_r, N, na);
r0 = r1;
//dot: r1 = d_r * d_r
cublasStatus = cublasSdot(cublasHandle, N, d_r, 1, d_r, 1, &r1); // PODMIEN FUNCKJE (ZADANIE-III)
cudaThreadSynchronize();
printf("iteration = %3d, residual = %e\n", k, sqrt(r1));
k++;
}
cudaMemcpy(x, d_x, N*sizeof(float), cudaMemcpyDeviceToHost);
float rsum, diff, err = 0.0;
for (int i = 0; i < N; i++)
{
rsum = 0.0;
for (int j = I[i]; j < I[i+1]; j++)
{
rsum += val[j]*x[J[j]];
}
diff = fabs(rsum - rhs[i]);
if (diff > err)
{
err = diff;
}
}
cusparseDestroy(cusparseHandle);
cublasDestroy(cublasHandle);
free(I);
free(J);
free(val);
free(x);
free(rhs);
cudaFree(d_col);
cudaFree(d_row);
cudaFree(d_val);
cudaFree(d_x);
cudaFree(d_r);
cudaFree(d_p);
cudaFree(d_Ax);
cudaDeviceReset();
printf("TIME OF CGS_TODO = %f\n", mclock() - t_start);
printf("Test Summary: Error amount = %e\n", err);
//exit((k <= max_iter) ? 0 : 1);
}
int main(int argc, char **argv)
{
//int N = 1e6;//1 << 20;
//int N = 256 * (1<<10) -10 ; //1e6;//1 << 20;
int N = 1e5;
int M = N;
cgs_basic(argc, argv, N, M);
cgs_TODO(argc, argv, N, M);
}
|
9644d7ffe807eb2f7fce6b2da4e06b16e17b073f.hip | // !!! This is a file automatically generated by hipify!!!
#include "gtest/gtest.h"
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include "../gpu/arima/arima.h"
#include "cuda_utils2.h"
TEST(ARIMA, differencing) {
const int length = 10;
thrust::device_vector<float> data(length);
for (auto i = 0; i < length; ++i) data[i] = float(i / 2);
thrust::device_vector<float> differenced_data(length);
h2o4gpu::ARIMAModel<float>::Difference(
thrust::raw_pointer_cast(differenced_data.data()),
thrust::raw_pointer_cast(data.data()), length);
OK(hipDeviceSynchronize());
thrust::host_vector<float> h_differenced_data = differenced_data;
ASSERT_FLOAT_EQ(0, h_differenced_data[0]);
ASSERT_FLOAT_EQ(-1, h_differenced_data[1]);
ASSERT_FLOAT_EQ(0, h_differenced_data[2]);
ASSERT_FLOAT_EQ(-1, h_differenced_data[3]);
ASSERT_FLOAT_EQ(0, h_differenced_data[4]);
ASSERT_FLOAT_EQ(-1, h_differenced_data[5]);
ASSERT_FLOAT_EQ(0, h_differenced_data[6]);
ASSERT_FLOAT_EQ(-1, h_differenced_data[7]);
ASSERT_FLOAT_EQ(0, h_differenced_data[8]);
ASSERT_TRUE(std::isnan(h_differenced_data[9]));
}
TEST(ARIMA, ts_data_to_matrix) {
const int length = 7;
const int depth = 3;
const int lda = 6;
thrust::device_vector<float> ts_data(length);
for (auto i = 0; i < length; ++i) ts_data[i] = float(i);
thrust::device_vector<float> A(depth * lda, -1.0);
h2o4gpu::ARIMAModel<float>::AsMatrix(thrust::raw_pointer_cast(ts_data.data()),
thrust::raw_pointer_cast(A.data()),
depth, lda, length);
OK(hipDeviceSynchronize());
thrust::host_vector<float> h_A = A;
ASSERT_FLOAT_EQ(0.0f, h_A[0]);
ASSERT_FLOAT_EQ(1.0f, h_A[1]);
ASSERT_FLOAT_EQ(2.0f, h_A[2]);
ASSERT_FLOAT_EQ(3.0f, h_A[3]);
ASSERT_FLOAT_EQ(4.0f, h_A[4]);
ASSERT_FLOAT_EQ(-1.0f, h_A[5]);
ASSERT_FLOAT_EQ(1.0f, h_A[6]);
ASSERT_FLOAT_EQ(2.0f, h_A[7]);
ASSERT_FLOAT_EQ(3.0f, h_A[8]);
ASSERT_FLOAT_EQ(4.0f, h_A[9]);
ASSERT_FLOAT_EQ(5.0f, h_A[10]);
ASSERT_FLOAT_EQ(-1.0f, h_A[11]);
ASSERT_FLOAT_EQ(2.0f, h_A[12]);
ASSERT_FLOAT_EQ(3.0f, h_A[13]);
ASSERT_FLOAT_EQ(4.0f, h_A[14]);
ASSERT_FLOAT_EQ(5.0f, h_A[15]);
ASSERT_FLOAT_EQ(6.0f, h_A[16]);
ASSERT_FLOAT_EQ(-1.0f, h_A[17]);
}
TEST(ARIMA, double_ts_data_to_matrix1) {
const int length = 7;
const int a_depth = 2;
const int b_depth = 3;
const int lda = 10;
thrust::device_vector<float> ts_a(length);
for (auto i = 0; i < length; ++i) ts_a[i] = float(i);
thrust::device_vector<float> ts_b(length);
for (auto i = 0; i < length; ++i) ts_b[i] = float(i + 1000);
thrust::device_vector<float> A((a_depth + b_depth) * lda, NAN);
h2o4gpu::ARIMAModel<float>::AsMatrix(thrust::raw_pointer_cast(ts_a.data()),
thrust::raw_pointer_cast(ts_b.data()),
thrust::raw_pointer_cast(A.data()),
a_depth, b_depth, lda, length);
OK(hipDeviceSynchronize());
thrust::host_vector<float> h_A = A;
ASSERT_FLOAT_EQ(0.000000, h_A[0]);
ASSERT_FLOAT_EQ(1.000000, h_A[1]);
ASSERT_FLOAT_EQ(2.000000, h_A[2]);
ASSERT_FLOAT_EQ(3.000000, h_A[3]);
ASSERT_FLOAT_EQ(4.000000, h_A[4]);
ASSERT_FLOAT_EQ(5.000000, h_A[5]);
ASSERT_TRUE(std::isnan(h_A[6]));
ASSERT_TRUE(std::isnan(h_A[7]));
ASSERT_TRUE(std::isnan(h_A[8]));
ASSERT_TRUE(std::isnan(h_A[9]));
ASSERT_FLOAT_EQ(1.000000, h_A[10]);
ASSERT_FLOAT_EQ(2.000000, h_A[11]);
ASSERT_FLOAT_EQ(3.000000, h_A[12]);
ASSERT_FLOAT_EQ(4.000000, h_A[13]);
ASSERT_FLOAT_EQ(5.000000, h_A[14]);
ASSERT_FLOAT_EQ(6.000000, h_A[15]);
ASSERT_TRUE(std::isnan(h_A[16]));
ASSERT_TRUE(std::isnan(h_A[17]));
ASSERT_TRUE(std::isnan(h_A[18]));
ASSERT_TRUE(std::isnan(h_A[19]));
ASSERT_FLOAT_EQ(1000.000000, h_A[20]);
ASSERT_FLOAT_EQ(1001.000000, h_A[21]);
ASSERT_FLOAT_EQ(1002.000000, h_A[22]);
ASSERT_FLOAT_EQ(1003.000000, h_A[23]);
ASSERT_FLOAT_EQ(1004.000000, h_A[24]);
ASSERT_TRUE(std::isnan(h_A[25]));
ASSERT_TRUE(std::isnan(h_A[26]));
ASSERT_TRUE(std::isnan(h_A[27]));
ASSERT_TRUE(std::isnan(h_A[28]));
ASSERT_TRUE(std::isnan(h_A[29]));
ASSERT_FLOAT_EQ(1001.000000, h_A[30]);
ASSERT_FLOAT_EQ(1002.000000, h_A[31]);
ASSERT_FLOAT_EQ(1003.000000, h_A[32]);
ASSERT_FLOAT_EQ(1004.000000, h_A[33]);
ASSERT_FLOAT_EQ(1005.000000, h_A[34]);
ASSERT_TRUE(std::isnan(h_A[35]));
ASSERT_TRUE(std::isnan(h_A[36]));
ASSERT_TRUE(std::isnan(h_A[37]));
ASSERT_TRUE(std::isnan(h_A[38]));
ASSERT_TRUE(std::isnan(h_A[39]));
ASSERT_FLOAT_EQ(1002.000000, h_A[40]);
ASSERT_FLOAT_EQ(1003.000000, h_A[41]);
ASSERT_FLOAT_EQ(1004.000000, h_A[42]);
ASSERT_FLOAT_EQ(1005.000000, h_A[43]);
ASSERT_FLOAT_EQ(1006.000000, h_A[44]);
ASSERT_TRUE(std::isnan(h_A[45]));
ASSERT_TRUE(std::isnan(h_A[46]));
ASSERT_TRUE(std::isnan(h_A[47]));
ASSERT_TRUE(std::isnan(h_A[48]));
ASSERT_TRUE(std::isnan(h_A[49]));
}
TEST(ARIMA, double_ts_data_to_matrix2) {
const int length = 7;
const int a_depth = 2;
const int b_depth = 3;
const int lda = 5;
thrust::device_vector<float> ts_a(length);
for (auto i = 0; i < length; ++i) ts_a[i] = float(i);
thrust::device_vector<float> ts_b(length);
for (auto i = 0; i < length; ++i) ts_b[i] = float(i + 1000);
thrust::device_vector<float> A((a_depth + b_depth) * lda, NAN);
h2o4gpu::ARIMAModel<float>::AsMatrix(thrust::raw_pointer_cast(ts_a.data()),
thrust::raw_pointer_cast(ts_b.data()),
thrust::raw_pointer_cast(A.data()),
a_depth, b_depth, lda, length);
OK(hipDeviceSynchronize());
thrust::host_vector<float> h_A = A;
ASSERT_FLOAT_EQ(0.000000, h_A[0]);
ASSERT_FLOAT_EQ(1.000000, h_A[1]);
ASSERT_FLOAT_EQ(2.000000, h_A[2]);
ASSERT_FLOAT_EQ(3.000000, h_A[3]);
ASSERT_FLOAT_EQ(4.000000, h_A[4]);
ASSERT_FLOAT_EQ(1.000000, h_A[5]);
ASSERT_FLOAT_EQ(2.000000, h_A[6]);
ASSERT_FLOAT_EQ(3.000000, h_A[7]);
ASSERT_FLOAT_EQ(4.000000, h_A[8]);
ASSERT_FLOAT_EQ(5.000000, h_A[9]);
ASSERT_FLOAT_EQ(1000.000000, h_A[10]);
ASSERT_FLOAT_EQ(1001.000000, h_A[11]);
ASSERT_FLOAT_EQ(1002.000000, h_A[12]);
ASSERT_FLOAT_EQ(1003.000000, h_A[13]);
ASSERT_FLOAT_EQ(1004.000000, h_A[14]);
ASSERT_FLOAT_EQ(1001.000000, h_A[15]);
ASSERT_FLOAT_EQ(1002.000000, h_A[16]);
ASSERT_FLOAT_EQ(1003.000000, h_A[17]);
ASSERT_FLOAT_EQ(1004.000000, h_A[18]);
ASSERT_FLOAT_EQ(1005.000000, h_A[19]);
ASSERT_FLOAT_EQ(1002.000000, h_A[20]);
ASSERT_FLOAT_EQ(1003.000000, h_A[21]);
ASSERT_FLOAT_EQ(1004.000000, h_A[22]);
ASSERT_FLOAT_EQ(1005.000000, h_A[23]);
ASSERT_FLOAT_EQ(1006.000000, h_A[24]);
}
TEST(ARIMA, applyAR) {
const int length = 10;
thrust::device_vector<float> ts_data(length);
for (auto i = 0; i < length; ++i) ts_data[i] = float(i);
const int p = 2;
thrust::device_vector<float> phi(p);
phi[0] = 1.0;
phi[1] = 0.5;
thrust::device_vector<float> res(length * p, 0);
h2o4gpu::ARIMAModel<float>::Apply(thrust::raw_pointer_cast(res.data()),
thrust::raw_pointer_cast(ts_data.data()),
thrust::raw_pointer_cast(phi.data()), p,
nullptr, nullptr, 0, length);
thrust::host_vector<float> h_res = res;
ASSERT_FLOAT_EQ(-2, h_res[0]);
ASSERT_FLOAT_EQ(-2.5, h_res[1]);
ASSERT_FLOAT_EQ(-3, h_res[2]);
ASSERT_FLOAT_EQ(-3.5, h_res[3]);
ASSERT_FLOAT_EQ(-4, h_res[4]);
ASSERT_FLOAT_EQ(-4.5, h_res[5]);
ASSERT_FLOAT_EQ(-5, h_res[6]);
ASSERT_FLOAT_EQ(-5.5, h_res[7]);
ASSERT_FLOAT_EQ(0, h_res[8]);
ASSERT_FLOAT_EQ(0, h_res[9]);
}
TEST(ARIMA, applyMA) {
const int length = 10;
thrust::device_vector<float> last_residual(length);
thrust::device_vector<float> ts_data(length, 0);
for (auto i = 0; i < length; ++i) last_residual[i] = float(i % 3);
const int q = 3;
thrust::device_vector<float> theta(q);
theta[0] = 1.0;
theta[1] = -0.5;
theta[2] = 0.1;
thrust::device_vector<float> res(length, 0);
h2o4gpu::ARIMAModel<float>::Apply(
thrust::raw_pointer_cast(res.data()),
thrust::raw_pointer_cast(ts_data.data()), nullptr, 0,
thrust::raw_pointer_cast(last_residual.data()),
thrust::raw_pointer_cast(theta.data()), q, length);
OK(hipGetLastError());
OK(hipDeviceSynchronize());
thrust::host_vector<float> h_res = res;
ASSERT_FLOAT_EQ(0.0, h_res[0]);
ASSERT_FLOAT_EQ(-2.1, h_res[1]);
ASSERT_FLOAT_EQ(0.3, h_res[2]);
ASSERT_FLOAT_EQ(0.0, h_res[3]);
ASSERT_FLOAT_EQ(-2.1, h_res[4]);
ASSERT_FLOAT_EQ(0.3, h_res[5]);
ASSERT_FLOAT_EQ(0.0, h_res[6]);
ASSERT_FLOAT_EQ(0.0, h_res[7]);
ASSERT_FLOAT_EQ(0, h_res[8]);
ASSERT_FLOAT_EQ(0, h_res[9]);
}
TEST(ARIMA, applyARMA) {
const int length = 10;
thrust::device_vector<float> last_residual(length);
thrust::device_vector<float> ts_data(length, 0);
for (auto i = 0; i < length; ++i) {
ts_data[i] = float(i % 4);
last_residual[i] = float(i % 3);
}
const int p = 2;
thrust::device_vector<float> phi(p);
phi[0] = 0.8;
phi[1] = -0.1;
const int q = 3;
thrust::device_vector<float> theta(q);
theta[0] = 1.0;
theta[1] = -0.5;
theta[2] = 0.1;
thrust::device_vector<float> res(length, 0);
h2o4gpu::ARIMAModel<float>::Apply(
thrust::raw_pointer_cast(res.data()),
thrust::raw_pointer_cast(ts_data.data()),
thrust::raw_pointer_cast(phi.data()), p,
thrust::raw_pointer_cast(last_residual.data()),
thrust::raw_pointer_cast(theta.data()), q, length);
OK(hipGetLastError());
OK(hipDeviceSynchronize());
thrust::host_vector<float> h_res = res;
ASSERT_FLOAT_EQ(-0.6, h_res[0]);
ASSERT_FLOAT_EQ(-2.4, h_res[1]);
ASSERT_NEAR(-0.1, h_res[2], 1e-6);
ASSERT_FLOAT_EQ(3.1, h_res[3]);
ASSERT_FLOAT_EQ(-2.7, h_res[4]);
ASSERT_NEAR(0.0, h_res[5], 1e-7);
ASSERT_FLOAT_EQ(-0.4, h_res[6]);
ASSERT_NEAR(0.0, h_res[7], 1e-7);
ASSERT_FLOAT_EQ(0, h_res[8]);
ASSERT_FLOAT_EQ(0, h_res[9]);
}
TEST(ARIMA, d_0_p_2_q_0) {
const int length = 10;
thrust::device_vector<float> ts_data(length);
for (auto i = 0; i < length; ++i) ts_data[i] = float(i % 3);
h2o4gpu::ARIMAModel<float> model(2, 0, 0, length);
model.Fit(thrust::raw_pointer_cast(ts_data.data()));
ASSERT_FLOAT_EQ(0.34482756, model.Phi()[0]);
ASSERT_FLOAT_EQ(0.13793102, model.Phi()[1]);
}
TEST(ARIMA, d_0_p_0_q_2_iter_1) {
const int length = 10;
thrust::device_vector<float> ts_data(length);
for (auto i = 0; i < length; ++i) ts_data[i] = float(i % 3);
h2o4gpu::ARIMAModel<float> model(0, 0, 2, length);
model.Fit(thrust::raw_pointer_cast(ts_data.data()));
ASSERT_FLOAT_EQ(0.34482756f, model.Theta()[0]);
ASSERT_FLOAT_EQ(0.13793102f, model.Theta()[1]);
}
TEST(ARIMA, d_0_p_2_q_2_iter_1) {
const int length = 7;
thrust::host_vector<float> h_ts_data(length);
for (auto i = 0; i < length; ++i)
h_ts_data[i] = float(i % 5) + 0.1 * float(i % 7 + 1);
thrust::host_vector<float> ts_data = h_ts_data;
h2o4gpu::ARIMAModel<float> model(2, 0, 2, length);
model.Fit(thrust::raw_pointer_cast(ts_data.data()));
ASSERT_FLOAT_EQ(-2.9589546f, model.Phi()[0]);
ASSERT_FLOAT_EQ(2.8828485f, model.Phi()[1]);
ASSERT_FLOAT_EQ(3.9598641f, model.Theta()[0]);
ASSERT_FLOAT_EQ(-0.61601555f, model.Theta()[1]);
}
// TEST(ARIMA, d_0_p_2_q_2_iter_2) {
// const int length = 7;
// thrust::host_vector<float> h_ts_data(length);
// for (auto i = 0; i < length; ++i)
// h_ts_data[i] = float(i % 5) + 0.1 * float(i % 7 + 1);
// thrust::host_vector<float> ts_data = h_ts_data;
// h2o4gpu::ARIMAModel<float> model(2, 0, 2, length);
// model.Fit(thrust::raw_pointer_cast(ts_data.data()), 2);
// ASSERT_FLOAT_EQ(-2.9589546f, model.Phi()[0]);
// ASSERT_FLOAT_EQ(2.8828485f, model.Phi()[1]);
// ASSERT_FLOAT_EQ(3.9598641f, model.Theta()[0]);
// ASSERT_FLOAT_EQ(-0.61601555f, model.Theta()[1]);
// }
TEST(ARIMA, d_1_p_1_q_1_iter_1) {
const int length = 10;
thrust::device_vector<float> ts_data(length);
for (auto i = 0; i < length; ++i) ts_data[i] = float(i + i % 3);
h2o4gpu::ARIMAModel<float> model(1, 1, 1, length);
model.Fit(thrust::raw_pointer_cast(ts_data.data()));
ASSERT_FLOAT_EQ(-1.0369391f, model.Phi()[0]);
ASSERT_FLOAT_EQ(1.1154615f, model.Theta()[0]);
} | 9644d7ffe807eb2f7fce6b2da4e06b16e17b073f.cu | #include "gtest/gtest.h"
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include "../gpu/arima/arima.h"
#include "cuda_utils2.h"
TEST(ARIMA, differencing) {
const int length = 10;
thrust::device_vector<float> data(length);
for (auto i = 0; i < length; ++i) data[i] = float(i / 2);
thrust::device_vector<float> differenced_data(length);
h2o4gpu::ARIMAModel<float>::Difference(
thrust::raw_pointer_cast(differenced_data.data()),
thrust::raw_pointer_cast(data.data()), length);
OK(cudaDeviceSynchronize());
thrust::host_vector<float> h_differenced_data = differenced_data;
ASSERT_FLOAT_EQ(0, h_differenced_data[0]);
ASSERT_FLOAT_EQ(-1, h_differenced_data[1]);
ASSERT_FLOAT_EQ(0, h_differenced_data[2]);
ASSERT_FLOAT_EQ(-1, h_differenced_data[3]);
ASSERT_FLOAT_EQ(0, h_differenced_data[4]);
ASSERT_FLOAT_EQ(-1, h_differenced_data[5]);
ASSERT_FLOAT_EQ(0, h_differenced_data[6]);
ASSERT_FLOAT_EQ(-1, h_differenced_data[7]);
ASSERT_FLOAT_EQ(0, h_differenced_data[8]);
ASSERT_TRUE(std::isnan(h_differenced_data[9]));
}
TEST(ARIMA, ts_data_to_matrix) {
const int length = 7;
const int depth = 3;
const int lda = 6;
thrust::device_vector<float> ts_data(length);
for (auto i = 0; i < length; ++i) ts_data[i] = float(i);
thrust::device_vector<float> A(depth * lda, -1.0);
h2o4gpu::ARIMAModel<float>::AsMatrix(thrust::raw_pointer_cast(ts_data.data()),
thrust::raw_pointer_cast(A.data()),
depth, lda, length);
OK(cudaDeviceSynchronize());
thrust::host_vector<float> h_A = A;
ASSERT_FLOAT_EQ(0.0f, h_A[0]);
ASSERT_FLOAT_EQ(1.0f, h_A[1]);
ASSERT_FLOAT_EQ(2.0f, h_A[2]);
ASSERT_FLOAT_EQ(3.0f, h_A[3]);
ASSERT_FLOAT_EQ(4.0f, h_A[4]);
ASSERT_FLOAT_EQ(-1.0f, h_A[5]);
ASSERT_FLOAT_EQ(1.0f, h_A[6]);
ASSERT_FLOAT_EQ(2.0f, h_A[7]);
ASSERT_FLOAT_EQ(3.0f, h_A[8]);
ASSERT_FLOAT_EQ(4.0f, h_A[9]);
ASSERT_FLOAT_EQ(5.0f, h_A[10]);
ASSERT_FLOAT_EQ(-1.0f, h_A[11]);
ASSERT_FLOAT_EQ(2.0f, h_A[12]);
ASSERT_FLOAT_EQ(3.0f, h_A[13]);
ASSERT_FLOAT_EQ(4.0f, h_A[14]);
ASSERT_FLOAT_EQ(5.0f, h_A[15]);
ASSERT_FLOAT_EQ(6.0f, h_A[16]);
ASSERT_FLOAT_EQ(-1.0f, h_A[17]);
}
TEST(ARIMA, double_ts_data_to_matrix1) {
const int length = 7;
const int a_depth = 2;
const int b_depth = 3;
const int lda = 10;
thrust::device_vector<float> ts_a(length);
for (auto i = 0; i < length; ++i) ts_a[i] = float(i);
thrust::device_vector<float> ts_b(length);
for (auto i = 0; i < length; ++i) ts_b[i] = float(i + 1000);
thrust::device_vector<float> A((a_depth + b_depth) * lda, NAN);
h2o4gpu::ARIMAModel<float>::AsMatrix(thrust::raw_pointer_cast(ts_a.data()),
thrust::raw_pointer_cast(ts_b.data()),
thrust::raw_pointer_cast(A.data()),
a_depth, b_depth, lda, length);
OK(cudaDeviceSynchronize());
thrust::host_vector<float> h_A = A;
ASSERT_FLOAT_EQ(0.000000, h_A[0]);
ASSERT_FLOAT_EQ(1.000000, h_A[1]);
ASSERT_FLOAT_EQ(2.000000, h_A[2]);
ASSERT_FLOAT_EQ(3.000000, h_A[3]);
ASSERT_FLOAT_EQ(4.000000, h_A[4]);
ASSERT_FLOAT_EQ(5.000000, h_A[5]);
ASSERT_TRUE(std::isnan(h_A[6]));
ASSERT_TRUE(std::isnan(h_A[7]));
ASSERT_TRUE(std::isnan(h_A[8]));
ASSERT_TRUE(std::isnan(h_A[9]));
ASSERT_FLOAT_EQ(1.000000, h_A[10]);
ASSERT_FLOAT_EQ(2.000000, h_A[11]);
ASSERT_FLOAT_EQ(3.000000, h_A[12]);
ASSERT_FLOAT_EQ(4.000000, h_A[13]);
ASSERT_FLOAT_EQ(5.000000, h_A[14]);
ASSERT_FLOAT_EQ(6.000000, h_A[15]);
ASSERT_TRUE(std::isnan(h_A[16]));
ASSERT_TRUE(std::isnan(h_A[17]));
ASSERT_TRUE(std::isnan(h_A[18]));
ASSERT_TRUE(std::isnan(h_A[19]));
ASSERT_FLOAT_EQ(1000.000000, h_A[20]);
ASSERT_FLOAT_EQ(1001.000000, h_A[21]);
ASSERT_FLOAT_EQ(1002.000000, h_A[22]);
ASSERT_FLOAT_EQ(1003.000000, h_A[23]);
ASSERT_FLOAT_EQ(1004.000000, h_A[24]);
ASSERT_TRUE(std::isnan(h_A[25]));
ASSERT_TRUE(std::isnan(h_A[26]));
ASSERT_TRUE(std::isnan(h_A[27]));
ASSERT_TRUE(std::isnan(h_A[28]));
ASSERT_TRUE(std::isnan(h_A[29]));
ASSERT_FLOAT_EQ(1001.000000, h_A[30]);
ASSERT_FLOAT_EQ(1002.000000, h_A[31]);
ASSERT_FLOAT_EQ(1003.000000, h_A[32]);
ASSERT_FLOAT_EQ(1004.000000, h_A[33]);
ASSERT_FLOAT_EQ(1005.000000, h_A[34]);
ASSERT_TRUE(std::isnan(h_A[35]));
ASSERT_TRUE(std::isnan(h_A[36]));
ASSERT_TRUE(std::isnan(h_A[37]));
ASSERT_TRUE(std::isnan(h_A[38]));
ASSERT_TRUE(std::isnan(h_A[39]));
ASSERT_FLOAT_EQ(1002.000000, h_A[40]);
ASSERT_FLOAT_EQ(1003.000000, h_A[41]);
ASSERT_FLOAT_EQ(1004.000000, h_A[42]);
ASSERT_FLOAT_EQ(1005.000000, h_A[43]);
ASSERT_FLOAT_EQ(1006.000000, h_A[44]);
ASSERT_TRUE(std::isnan(h_A[45]));
ASSERT_TRUE(std::isnan(h_A[46]));
ASSERT_TRUE(std::isnan(h_A[47]));
ASSERT_TRUE(std::isnan(h_A[48]));
ASSERT_TRUE(std::isnan(h_A[49]));
}
TEST(ARIMA, double_ts_data_to_matrix2) {
const int length = 7;
const int a_depth = 2;
const int b_depth = 3;
const int lda = 5;
thrust::device_vector<float> ts_a(length);
for (auto i = 0; i < length; ++i) ts_a[i] = float(i);
thrust::device_vector<float> ts_b(length);
for (auto i = 0; i < length; ++i) ts_b[i] = float(i + 1000);
thrust::device_vector<float> A((a_depth + b_depth) * lda, NAN);
h2o4gpu::ARIMAModel<float>::AsMatrix(thrust::raw_pointer_cast(ts_a.data()),
thrust::raw_pointer_cast(ts_b.data()),
thrust::raw_pointer_cast(A.data()),
a_depth, b_depth, lda, length);
OK(cudaDeviceSynchronize());
thrust::host_vector<float> h_A = A;
ASSERT_FLOAT_EQ(0.000000, h_A[0]);
ASSERT_FLOAT_EQ(1.000000, h_A[1]);
ASSERT_FLOAT_EQ(2.000000, h_A[2]);
ASSERT_FLOAT_EQ(3.000000, h_A[3]);
ASSERT_FLOAT_EQ(4.000000, h_A[4]);
ASSERT_FLOAT_EQ(1.000000, h_A[5]);
ASSERT_FLOAT_EQ(2.000000, h_A[6]);
ASSERT_FLOAT_EQ(3.000000, h_A[7]);
ASSERT_FLOAT_EQ(4.000000, h_A[8]);
ASSERT_FLOAT_EQ(5.000000, h_A[9]);
ASSERT_FLOAT_EQ(1000.000000, h_A[10]);
ASSERT_FLOAT_EQ(1001.000000, h_A[11]);
ASSERT_FLOAT_EQ(1002.000000, h_A[12]);
ASSERT_FLOAT_EQ(1003.000000, h_A[13]);
ASSERT_FLOAT_EQ(1004.000000, h_A[14]);
ASSERT_FLOAT_EQ(1001.000000, h_A[15]);
ASSERT_FLOAT_EQ(1002.000000, h_A[16]);
ASSERT_FLOAT_EQ(1003.000000, h_A[17]);
ASSERT_FLOAT_EQ(1004.000000, h_A[18]);
ASSERT_FLOAT_EQ(1005.000000, h_A[19]);
ASSERT_FLOAT_EQ(1002.000000, h_A[20]);
ASSERT_FLOAT_EQ(1003.000000, h_A[21]);
ASSERT_FLOAT_EQ(1004.000000, h_A[22]);
ASSERT_FLOAT_EQ(1005.000000, h_A[23]);
ASSERT_FLOAT_EQ(1006.000000, h_A[24]);
}
TEST(ARIMA, applyAR) {
const int length = 10;
thrust::device_vector<float> ts_data(length);
for (auto i = 0; i < length; ++i) ts_data[i] = float(i);
const int p = 2;
thrust::device_vector<float> phi(p);
phi[0] = 1.0;
phi[1] = 0.5;
thrust::device_vector<float> res(length * p, 0);
h2o4gpu::ARIMAModel<float>::Apply(thrust::raw_pointer_cast(res.data()),
thrust::raw_pointer_cast(ts_data.data()),
thrust::raw_pointer_cast(phi.data()), p,
nullptr, nullptr, 0, length);
thrust::host_vector<float> h_res = res;
ASSERT_FLOAT_EQ(-2, h_res[0]);
ASSERT_FLOAT_EQ(-2.5, h_res[1]);
ASSERT_FLOAT_EQ(-3, h_res[2]);
ASSERT_FLOAT_EQ(-3.5, h_res[3]);
ASSERT_FLOAT_EQ(-4, h_res[4]);
ASSERT_FLOAT_EQ(-4.5, h_res[5]);
ASSERT_FLOAT_EQ(-5, h_res[6]);
ASSERT_FLOAT_EQ(-5.5, h_res[7]);
ASSERT_FLOAT_EQ(0, h_res[8]);
ASSERT_FLOAT_EQ(0, h_res[9]);
}
TEST(ARIMA, applyMA) {
const int length = 10;
thrust::device_vector<float> last_residual(length);
thrust::device_vector<float> ts_data(length, 0);
for (auto i = 0; i < length; ++i) last_residual[i] = float(i % 3);
const int q = 3;
thrust::device_vector<float> theta(q);
theta[0] = 1.0;
theta[1] = -0.5;
theta[2] = 0.1;
thrust::device_vector<float> res(length, 0);
h2o4gpu::ARIMAModel<float>::Apply(
thrust::raw_pointer_cast(res.data()),
thrust::raw_pointer_cast(ts_data.data()), nullptr, 0,
thrust::raw_pointer_cast(last_residual.data()),
thrust::raw_pointer_cast(theta.data()), q, length);
OK(cudaGetLastError());
OK(cudaDeviceSynchronize());
thrust::host_vector<float> h_res = res;
ASSERT_FLOAT_EQ(0.0, h_res[0]);
ASSERT_FLOAT_EQ(-2.1, h_res[1]);
ASSERT_FLOAT_EQ(0.3, h_res[2]);
ASSERT_FLOAT_EQ(0.0, h_res[3]);
ASSERT_FLOAT_EQ(-2.1, h_res[4]);
ASSERT_FLOAT_EQ(0.3, h_res[5]);
ASSERT_FLOAT_EQ(0.0, h_res[6]);
ASSERT_FLOAT_EQ(0.0, h_res[7]);
ASSERT_FLOAT_EQ(0, h_res[8]);
ASSERT_FLOAT_EQ(0, h_res[9]);
}
TEST(ARIMA, applyARMA) {
const int length = 10;
thrust::device_vector<float> last_residual(length);
thrust::device_vector<float> ts_data(length, 0);
for (auto i = 0; i < length; ++i) {
ts_data[i] = float(i % 4);
last_residual[i] = float(i % 3);
}
const int p = 2;
thrust::device_vector<float> phi(p);
phi[0] = 0.8;
phi[1] = -0.1;
const int q = 3;
thrust::device_vector<float> theta(q);
theta[0] = 1.0;
theta[1] = -0.5;
theta[2] = 0.1;
thrust::device_vector<float> res(length, 0);
h2o4gpu::ARIMAModel<float>::Apply(
thrust::raw_pointer_cast(res.data()),
thrust::raw_pointer_cast(ts_data.data()),
thrust::raw_pointer_cast(phi.data()), p,
thrust::raw_pointer_cast(last_residual.data()),
thrust::raw_pointer_cast(theta.data()), q, length);
OK(cudaGetLastError());
OK(cudaDeviceSynchronize());
thrust::host_vector<float> h_res = res;
ASSERT_FLOAT_EQ(-0.6, h_res[0]);
ASSERT_FLOAT_EQ(-2.4, h_res[1]);
ASSERT_NEAR(-0.1, h_res[2], 1e-6);
ASSERT_FLOAT_EQ(3.1, h_res[3]);
ASSERT_FLOAT_EQ(-2.7, h_res[4]);
ASSERT_NEAR(0.0, h_res[5], 1e-7);
ASSERT_FLOAT_EQ(-0.4, h_res[6]);
ASSERT_NEAR(0.0, h_res[7], 1e-7);
ASSERT_FLOAT_EQ(0, h_res[8]);
ASSERT_FLOAT_EQ(0, h_res[9]);
}
TEST(ARIMA, d_0_p_2_q_0) {
const int length = 10;
thrust::device_vector<float> ts_data(length);
for (auto i = 0; i < length; ++i) ts_data[i] = float(i % 3);
h2o4gpu::ARIMAModel<float> model(2, 0, 0, length);
model.Fit(thrust::raw_pointer_cast(ts_data.data()));
ASSERT_FLOAT_EQ(0.34482756, model.Phi()[0]);
ASSERT_FLOAT_EQ(0.13793102, model.Phi()[1]);
}
TEST(ARIMA, d_0_p_0_q_2_iter_1) {
const int length = 10;
thrust::device_vector<float> ts_data(length);
for (auto i = 0; i < length; ++i) ts_data[i] = float(i % 3);
h2o4gpu::ARIMAModel<float> model(0, 0, 2, length);
model.Fit(thrust::raw_pointer_cast(ts_data.data()));
ASSERT_FLOAT_EQ(0.34482756f, model.Theta()[0]);
ASSERT_FLOAT_EQ(0.13793102f, model.Theta()[1]);
}
TEST(ARIMA, d_0_p_2_q_2_iter_1) {
const int length = 7;
thrust::host_vector<float> h_ts_data(length);
for (auto i = 0; i < length; ++i)
h_ts_data[i] = float(i % 5) + 0.1 * float(i % 7 + 1);
thrust::host_vector<float> ts_data = h_ts_data;
h2o4gpu::ARIMAModel<float> model(2, 0, 2, length);
model.Fit(thrust::raw_pointer_cast(ts_data.data()));
ASSERT_FLOAT_EQ(-2.9589546f, model.Phi()[0]);
ASSERT_FLOAT_EQ(2.8828485f, model.Phi()[1]);
ASSERT_FLOAT_EQ(3.9598641f, model.Theta()[0]);
ASSERT_FLOAT_EQ(-0.61601555f, model.Theta()[1]);
}
// TEST(ARIMA, d_0_p_2_q_2_iter_2) {
// const int length = 7;
// thrust::host_vector<float> h_ts_data(length);
// for (auto i = 0; i < length; ++i)
// h_ts_data[i] = float(i % 5) + 0.1 * float(i % 7 + 1);
// thrust::host_vector<float> ts_data = h_ts_data;
// h2o4gpu::ARIMAModel<float> model(2, 0, 2, length);
// model.Fit(thrust::raw_pointer_cast(ts_data.data()), 2);
// ASSERT_FLOAT_EQ(-2.9589546f, model.Phi()[0]);
// ASSERT_FLOAT_EQ(2.8828485f, model.Phi()[1]);
// ASSERT_FLOAT_EQ(3.9598641f, model.Theta()[0]);
// ASSERT_FLOAT_EQ(-0.61601555f, model.Theta()[1]);
// }
TEST(ARIMA, d_1_p_1_q_1_iter_1) {
const int length = 10;
thrust::device_vector<float> ts_data(length);
for (auto i = 0; i < length; ++i) ts_data[i] = float(i + i % 3);
h2o4gpu::ARIMAModel<float> model(1, 1, 1, length);
model.Fit(thrust::raw_pointer_cast(ts_data.data()));
ASSERT_FLOAT_EQ(-1.0369391f, model.Phi()[0]);
ASSERT_FLOAT_EQ(1.1154615f, model.Theta()[0]);
} |
38d2e250fb0f05dee563a0095c5adb29b12931fa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
// CUDA kernel to add elements of two arrays
__global__
void add(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y;
// Allocate Unified Memory -- accessible from CPU or GPU
hipMallocManaged(&x, N*sizeof(float));
hipMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Launch kernel on 1M elements on the GPU
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( add), dim3(numBlocks), dim3(blockSize), 0, 0, N, x, y);
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
hipFree(x);
hipFree(y);
return 0;
}
| 38d2e250fb0f05dee563a0095c5adb29b12931fa.cu | #include <iostream>
#include <math.h>
// CUDA kernel to add elements of two arrays
__global__
void add(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y;
// Allocate Unified Memory -- accessible from CPU or GPU
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Launch kernel on 1M elements on the GPU
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
add<<<numBlocks, blockSize>>>(N, x, y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
}
|
6a898f8a77da1a3ca7128be200c64f74e8f287ea.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <iostream>
#include <ctype.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "hip/hip_runtime.h"
#define CEIL(a,b) ((a+b-1)/b)
#define PI 3.1415926
#define EDGE 0
#define NOEDGE 255
#define DATAMB(bytes) (bytes/1024/1024)
#define DATABW(bytes,timems) ((float)bytes/(timems * 1.024*1024.0*1024.0))
typedef unsigned char uch;
typedef unsigned long ul;
typedef unsigned int ui;
uch *TheImg, *CopyImg; // Where images are stored in CPU
int ThreshLo=50, ThreshHi=100; // "Edge" vs. "No Edge" thresholds
// Where images and temporary results are stored in GPU
uch *GPUImg, *GPUResultImg;
double *GPUBWImg, *GPUGaussImg, *GPUGradient, *GPUTheta;
struct ImgProp{
ui Hpixels;
ui Vpixels;
uch HeaderInfo[54];
ul Hbytes;
} ip;
#define IPHB ip.Hbytes
#define IPH ip.Hpixels
#define IPV ip.Vpixels
#define IMAGESIZE (IPHB*IPV)
#define IMAGEPIX (IPH*IPV)
// Kernel that calculates a B&W image from an RGB image
// resulting image has a double type for each pixel position
__global__
void BWKernel(double *ImgBW, uch *ImgGPU, ui Hpixels)
{
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
ui MYgtid = ThrPerBlk * MYbid + MYtid;
double R, G, B;
// ui NumBlocks = gridDim.x;
ui BlkPerRow = CEIL(Hpixels, ThrPerBlk);
ui RowBytes = (Hpixels * 3 + 3) & (~3);
ui MYrow = MYbid / BlkPerRow;
ui MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk;
if (MYcol >= Hpixels) return; // col out of range
ui MYsrcIndex = MYrow * RowBytes + 3 * MYcol;
ui MYpixIndex = MYrow * Hpixels + MYcol;
B = (double)ImgGPU[MYsrcIndex];
G = (double)ImgGPU[MYsrcIndex + 1];
R = (double)ImgGPU[MYsrcIndex + 2];
ImgBW[MYpixIndex] = (R+G+B)/3.0;
}
__device__
double Gauss[5][5] = { { 2, 4, 5, 4, 2 },
{ 4, 9, 12, 9, 4 },
{ 5, 12, 15, 12, 5 },
{ 4, 9, 12, 9, 4 },
{ 2, 4, 5, 4, 2 } };
// Kernel that calculates a Gauss image from the B&W image
// resulting image has a double type for each pixel position
__global__
void GaussKernel(double *ImgGauss, double *ImgBW, ui Hpixels, ui Vpixels)
{
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
ui MYgtid = ThrPerBlk * MYbid + MYtid;
int row, col, indx, i, j;
double G=0.00;
// ui NumBlocks = gridDim.x;
ui BlkPerRow = CEIL(Hpixels, ThrPerBlk);
int MYrow = MYbid / BlkPerRow;
int MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk;
if (MYcol >= Hpixels) return; // col out of range
ui MYpixIndex = MYrow * Hpixels + MYcol;
if ((MYrow<2) || (MYrow>Vpixels - 3) || (MYcol<2) || (MYcol>Hpixels - 3)) {
ImgGauss[MYpixIndex] = 0.0;
return;
} else {
G = 0.0;
for (i = -2; i <= 2; i++) {
for (j = -2; j <= 2; j++) {
row = MYrow + i;
col = MYcol + j;
indx = row*Hpixels + col;
G += (ImgBW[indx] * Gauss[i + 2][j + 2]);
}
}
ImgGauss[MYpixIndex] = G / 159.00;
}
}
__device__
double Gx[3][3] = { { -1, 0, 1 },
{ -2, 0, 2 },
{ -1, 0, 1 } };
__device__
double Gy[3][3] = { { -1, -2, -1 },
{ 0, 0, 0 },
{ 1, 2, 1 } };
// Kernel that calculates Gradient, Theta from the Gauss image
// resulting image has a double type for each pixel position
__global__
void SobelKernel(double *ImgGrad, double *ImgTheta, double *ImgGauss, ui Hpixels, ui Vpixels)
{
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
ui MYgtid = ThrPerBlk * MYbid + MYtid;
int row, col, indx, i, j;
double GX,GY;
//ui NumBlocks = gridDim.x;
ui BlkPerRow = CEIL(Hpixels, ThrPerBlk);
int MYrow = MYbid / BlkPerRow;
int MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk;
if (MYcol >= Hpixels) return; // col out of range
ui MYpixIndex = MYrow * Hpixels + MYcol;
if ((MYrow<1) || (MYrow>Vpixels - 2) || (MYcol<1) || (MYcol>Hpixels - 2)) {
ImgGrad[MYpixIndex] = 0.0;
ImgTheta[MYpixIndex] = 0.0;
return;
} else {
GX = 0.0; GY = 0.0;
for (i = -1; i <= 1; i++) {
for (j = -1; j <= 1; j++) {
row = MYrow + i;
col = MYcol + j;
indx = row*Hpixels + col;
GX += (ImgGauss[indx] * Gx[i + 1][j + 1]);
GY += (ImgGauss[indx] * Gy[i + 1][j + 1]);
}
}
ImgGrad[MYpixIndex] = sqrt(GX*GX + GY*GY);
ImgTheta[MYpixIndex] = atan(GX / GY)*180.0 / PI;
}
}
// Kernel that calculates the threshold image from Gradient, Theta
// resulting image has an RGB for each pixel, same RGB for each pixel
__global__
void ThresholdKernel(uch *ImgResult, double *ImgGrad, double *ImgTheta, ui Hpixels, ui Vpixels, ui ThreshLo, ui ThreshHi)
{
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
ui MYgtid = ThrPerBlk * MYbid + MYtid;
unsigned char PIXVAL;
double L, H, G, T;
//ui NumBlocks = gridDim.x;
ui BlkPerRow = CEIL(Hpixels, ThrPerBlk);
ui RowBytes = (Hpixels * 3 + 3) & (~3);
int MYrow = MYbid / BlkPerRow;
int MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk;
if (MYcol >= Hpixels) return; // col out of range
ui MYresultIndex = MYrow * RowBytes + 3 * MYcol;
ui MYpixIndex = MYrow * Hpixels + MYcol;
if ((MYrow<1) || (MYrow>Vpixels - 2) || (MYcol<1) || (MYcol>Hpixels - 2)){
ImgResult[MYresultIndex] = NOEDGE;
ImgResult[MYresultIndex + 1] = NOEDGE;
ImgResult[MYresultIndex + 2] = NOEDGE;
return;
} else {
L = (double)ThreshLo; H = (double)ThreshHi;
G = ImgGrad[MYpixIndex];
PIXVAL = NOEDGE;
if (G <= L){ // no edge
PIXVAL = NOEDGE;
} else if (G >= H){ // edge
PIXVAL = EDGE;
} else {
T = ImgTheta[MYpixIndex];
if ((T<-67.5) || (T>67.5)) {
// Look at left and right: [row][col-1] and [row][col+1]
PIXVAL = ((ImgGrad[MYpixIndex - 1]>H) || (ImgGrad[MYpixIndex + 1]>H)) ? EDGE : NOEDGE;
} else if ((T >= -22.5) && (T <= 22.5)) {
// Look at top and bottom: [row-1][col] and [row+1][col]
PIXVAL = ((ImgGrad[MYpixIndex - Hpixels]>H) || (ImgGrad[MYpixIndex + Hpixels]>H)) ? EDGE : NOEDGE;
} else if ((T>22.5) && (T <= 67.5)) {
// Look at upper right, lower left: [row-1][col+1] and [row+1][col-1]
PIXVAL = ((ImgGrad[MYpixIndex - Hpixels + 1]>H) || (ImgGrad[MYpixIndex + Hpixels - 1]>H)) ? EDGE : NOEDGE;
} else if ((T >= -67.5) && (T<-22.5)) {
// Look at upper left, lower right: [row-1][col-1] and [row+1][col+1]
PIXVAL = ((ImgGrad[MYpixIndex - Hpixels - 1]>H) || (ImgGrad[MYpixIndex + Hpixels + 1]>H)) ? EDGE : NOEDGE;
}
}
ImgResult[MYresultIndex] = PIXVAL;
ImgResult[MYresultIndex + 1] = PIXVAL;
ImgResult[MYresultIndex + 2] = PIXVAL;
}
}
/*
// helper function that wraps CUDA API calls, reports any error and exits
void chkCUDAErr(hipError_t error_id)
{
if (error_id != hipSuccess){
printf("CUDA ERROR :::%\n", hipGetErrorString(error_id));
exit(EXIT_FAILURE);
}
}
*/
// Read a 24-bit/pixel BMP file into a 1D linear array.
// Allocate memory to store the 1D image and return its pointer.
uch *ReadBMPlin(char* fn)
{
static uch *Img;
FILE* f = fopen(fn, "rb");
if (f == NULL){
printf("\n\n%s NOT FOUND\n\n", fn);
exit(EXIT_FAILURE);
}
uch HeaderInfo[54];
fread(HeaderInfo, sizeof(uch), 54, f); // read the 54-byte header
// extract image height and width from header
int width = *(int*)&HeaderInfo[18]; ip.Hpixels = width;
int height = *(int*)&HeaderInfo[22]; ip.Vpixels = height;
int RowBytes = (width * 3 + 3) & (~3); ip.Hbytes = RowBytes;
//save header for re-use
memcpy(ip.HeaderInfo, HeaderInfo,54);
printf("\n Input File name: %17s (%u x %u) File Size=%u", fn,
ip.Hpixels, ip.Vpixels, IMAGESIZE);
// allocate memory to store the main image (1 Dimensional array)
Img = (uch *)malloc(IMAGESIZE);
if (Img == NULL) return Img; // Cannot allocate memory
// read the image from disk
fread(Img, sizeof(uch), IMAGESIZE, f);
fclose(f);
return Img;
}
// Write the 1D linear-memory stored image into file.
void WriteBMPlin(uch *Img, char* fn)
{
FILE* f = fopen(fn, "wb");
if (f == NULL){
printf("\n\nFILE CREATION ERROR: %s\n\n", fn);
exit(1); }
//write header
fwrite(ip.HeaderInfo, sizeof(uch), 54, f);
//write data
fwrite(Img, sizeof(uch), IMAGESIZE, f);
printf("\nOutput File name: %17s (%u x %u) File Size=%u", fn, ip.Hpixels, ip.Vpixels, IMAGESIZE);
fclose(f);
}
int main(int argc, char **argv)
{
// clock_t CPUStartTime, CPUEndTime, CPUElapsedTime;
// GPU code run times
float totalTime, totalKernelTime, tfrCPUtoGPU, tfrGPUtoCPU;
float kernelExecTimeBW, kernelExecTimeGauss, kernelExecTimeSobel, kernelExecTimeThreshold;
hipError_t cudaStatus;
hipEvent_t time1, time2, time2BW, time2Gauss, time2Sobel, time3, time4;
char InputFileName[255], OutputFileName[255], ProgName[255];
ui BlkPerRow, ThrPerBlk=256, NumBlocks;
ui GPUDataTfrBW, GPUDataTfrGauss, GPUDataTfrSobel, GPUDataTfrThresh,GPUDataTfrKernel, GPUDataTfrTotal;
hipDeviceProp_t GPUprop;
void *GPUptr; // Pointer to the bulk-allocated GPU memory
ul GPUtotalBufferSize;
ul SupportedKBlocks, SupportedMBlocks, MaxThrPerBlk; char SupportedBlocks[100];
strcpy(ProgName, "imedgeG");
switch (argc){
case 6: ThreshHi = atoi(argv[5]);
case 5: ThreshLo = atoi(argv[4]);
case 4: ThrPerBlk = atoi(argv[3]);
case 3: strcpy(InputFileName, argv[1]);
strcpy(OutputFileName, argv[2]);
break;
default: printf("\n\nUsage: %s InputFilename OutputFilename [ThrPerBlk] [ThreshLo] [ThreshHi]", ProgName);
printf("\n\nExample: %s Astronaut.bmp Output.bmp", ProgName);
printf("\n\nExample: %s Astronaut.bmp Output.bmp 256", ProgName);
printf("\n\nExample: %s Astronaut.bmp Output.bmp 256 50 100",ProgName);
exit(EXIT_FAILURE);
}
if ((ThrPerBlk < 32) || (ThrPerBlk > 1024)) {
printf("Invalid ThrPerBlk option '%u'. Must be between 32 and 1024. \n", ThrPerBlk);
exit(EXIT_FAILURE);
}
if ((ThreshLo<0) || (ThreshHi>255) || (ThreshLo>ThreshHi)){
printf("\nInvalid Thresholds: Threshold must be between [0...255] ...\n");
printf("\n\nNothing executed ... Exiting ...\n\n");
exit(EXIT_FAILURE);
}
// Create CPU memory to store the input and output images
TheImg = ReadBMPlin(InputFileName); // Read the input image if memory can be allocated
if (TheImg == NULL){
printf("Cannot allocate memory for the input image...\n");
exit(EXIT_FAILURE);
}
CopyImg = (uch *)malloc(IMAGESIZE);
if (CopyImg == NULL){
printf("Cannot allocate memory for the input image...\n");
free(TheImg);
exit(EXIT_FAILURE);
}
// Choose which GPU to run on, change this on a multi-GPU system.
int NumGPUs = 0;
hipGetDeviceCount(&NumGPUs);
if (NumGPUs == 0){
printf("\nNo CUDA Device is available\n");
goto EXITERROR;
}
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto EXITERROR;
}
hipGetDeviceProperties(&GPUprop, 0);
SupportedKBlocks = (ui) GPUprop.maxGridSize[0] * (ui) GPUprop.maxGridSize[1] * (ui )GPUprop.maxGridSize[2]/1024;
SupportedMBlocks = SupportedKBlocks / 1024;
sprintf(SupportedBlocks, "%u %c", (SupportedMBlocks>=5) ? SupportedMBlocks : SupportedKBlocks, (SupportedMBlocks>=5) ? 'M':'K');
MaxThrPerBlk = (ui)GPUprop.maxThreadsPerBlock;
hipEventCreate(&time1); hipEventCreate(&time2);
hipEventCreate(&time2BW); hipEventCreate(&time2Gauss); hipEventCreate(&time2Sobel);
hipEventCreate(&time3); hipEventCreate(&time4);
hipEventRecord(time1, 0); // Time stamp at the start of the GPU transfer
// Allocate GPU buffer for the input and output images and the imtermediate results
GPUtotalBufferSize = 4 * sizeof(double)*IMAGEPIX + 2 * sizeof(uch)*IMAGESIZE;
cudaStatus = hipMalloc((void**)&GPUptr, GPUtotalBufferSize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed! Can't allocate GPU memory");
goto EXITERROR;
}
GPUImg = (uch *)GPUptr;
GPUResultImg = GPUImg + IMAGESIZE;
GPUBWImg = (double *)(GPUResultImg + IMAGESIZE);
GPUGaussImg = GPUBWImg + IMAGEPIX;
GPUGradient = GPUGaussImg + IMAGEPIX;
GPUTheta = GPUGradient + IMAGEPIX;
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(GPUImg, TheImg, IMAGESIZE, hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy CPU to GPU failed!");
goto EXITCUDAERROR;
}
hipEventRecord(time2, 0); // Time stamp after the CPU --> GPU tfr(transfer) is done
//dim3 dimBlock(ThrPerBlk);
//dim3 dimGrid(ip.Hpixels*BlkPerRow);
BlkPerRow = CEIL(ip.Hpixels, ThrPerBlk);
NumBlocks = IPV*BlkPerRow;
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
hipLaunchKernelGGL(( BWKernel) , dim3(NumBlocks), dim3(ThrPerBlk) , 0, 0, GPUBWImg, GPUImg, ip.Hpixels);
if ((cudaStatus = hipDeviceSynchronize()) != hipSuccess) goto KERNELERROR;
hipEventRecord(time2BW, 0); // Time stamp after BW image calculation
GPUDataTfrBW = sizeof(double)*IMAGEPIX + sizeof(uch)*IMAGESIZE;
hipLaunchKernelGGL(( GaussKernel) , dim3(NumBlocks), dim3(ThrPerBlk) , 0, 0, GPUGaussImg, GPUBWImg, ip.Hpixels, ip.Vpixels);
if ((cudaStatus = hipDeviceSynchronize()) != hipSuccess) goto KERNELERROR;
hipEventRecord(time2Gauss, 0); // Time stamp after Gauss image calculation
GPUDataTfrGauss = 2*sizeof(double)*IMAGEPIX;
hipLaunchKernelGGL(( SobelKernel) , dim3(NumBlocks), dim3(ThrPerBlk) , 0, 0, GPUGradient, GPUTheta, GPUGaussImg, ip.Hpixels, ip.Vpixels);
if ((cudaStatus = hipDeviceSynchronize()) != hipSuccess) goto KERNELERROR;
hipEventRecord(time2Sobel, 0); // Time stamp after Gradient, Theta computation
GPUDataTfrSobel = 3 * sizeof(double)*IMAGEPIX;
hipLaunchKernelGGL(( ThresholdKernel) , dim3(NumBlocks), dim3(ThrPerBlk) , 0, 0, GPUResultImg, GPUGradient, GPUTheta, ip.Hpixels, ip.Vpixels, ThreshLo, ThreshHi);
if ((cudaStatus = hipDeviceSynchronize()) != hipSuccess) goto KERNELERROR;
GPUDataTfrThresh = sizeof(double)*IMAGEPIX + sizeof(uch)*IMAGESIZE;
GPUDataTfrKernel = GPUDataTfrBW + GPUDataTfrGauss + GPUDataTfrSobel + GPUDataTfrThresh;
GPUDataTfrTotal = GPUDataTfrKernel + 2 * IMAGESIZE;
hipEventRecord(time3, 0);
// Copy output (results) from GPU buffer to host (CPU) memory.
cudaStatus = hipMemcpy(CopyImg, GPUResultImg, IMAGESIZE, hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy GPU to CPU failed!");
goto EXITCUDAERROR;
}
hipEventRecord(time4, 0);
hipEventSynchronize(time1); hipEventSynchronize(time2);
hipEventSynchronize(time2BW); hipEventSynchronize(time2Gauss); hipEventSynchronize(time2Sobel);
hipEventSynchronize(time3); hipEventSynchronize(time4);
hipEventElapsedTime(&totalTime, time1, time4);
hipEventElapsedTime(&tfrCPUtoGPU, time1, time2);
hipEventElapsedTime(&kernelExecTimeBW, time2, time2BW);
hipEventElapsedTime(&kernelExecTimeGauss, time2BW, time2Gauss);
hipEventElapsedTime(&kernelExecTimeSobel, time2Gauss, time2Sobel);
hipEventElapsedTime(&kernelExecTimeThreshold, time2Sobel, time3);
hipEventElapsedTime(&tfrGPUtoCPU, time3, time4);
totalKernelTime = kernelExecTimeBW + kernelExecTimeGauss + kernelExecTimeSobel + kernelExecTimeThreshold;
cudaStatus = hipDeviceSynchronize();
//checkError(hipGetLastError()); // screen for errors in kernel launches
if (cudaStatus != hipSuccess) {
fprintf(stderr, "\n Program failed after hipDeviceSynchronize()!");
free(TheImg);
free(CopyImg);
exit(EXIT_FAILURE);
}
WriteBMPlin(CopyImg, OutputFileName); // Write the flipped image back to disk, lin is linear
printf("\n\n----------------------------------------------------------------------------\n");
printf("%s ComputeCapab=%d.%d [max %s blocks; %d thr/blk] \n",
GPUprop.name, GPUprop.major, GPUprop.minor, SupportedBlocks, MaxThrPerBlk);
printf("----------------------------------------------------------------------------\n");
printf("%s %s %s %u %d %d [%u BLOCKS, %u BLOCKS/ROW]\n", ProgName, InputFileName, OutputFileName, ThrPerBlk, ThreshLo, ThreshHi, NumBlocks, BlkPerRow);
printf("----------------------------------------------------------------------------\n");
printf(" CPU->GPU Transfer =%7.2f ms ... %4d MB ... %6.2f GB/s\n", tfrCPUtoGPU, DATAMB(IMAGESIZE), DATABW(IMAGESIZE,tfrCPUtoGPU));
printf(" GPU->CPU Transfer =%7.2f ms ... %4d MB ... %6.2f GB/s\n", tfrGPUtoCPU, DATAMB(IMAGESIZE), DATABW(IMAGESIZE, tfrGPUtoCPU));
printf("----------------------------------------------------------------------------\n");
printf(" BW Kernel Execution Time =%7.2f ms ... %4d MB ... %6.2f GB/s\n", kernelExecTimeBW, DATAMB(GPUDataTfrBW), DATABW(GPUDataTfrBW, kernelExecTimeBW));
printf(" Gauss Kernel Execution Time =%7.2f ms ... %4d MB ... %6.2f GB/s\n", kernelExecTimeGauss, DATAMB(GPUDataTfrGauss), DATABW(GPUDataTfrGauss, kernelExecTimeGauss));
printf(" Sobel Kernel Execution Time =%7.2f ms ... %4d MB ... %6.2f GB/s\n", kernelExecTimeSobel, DATAMB(GPUDataTfrSobel), DATABW(GPUDataTfrSobel, kernelExecTimeSobel));
printf("Threshold Kernel Execution Time =%7.2f ms ... %4d MB ... %6.2f GB/s\n", kernelExecTimeThreshold, DATAMB(GPUDataTfrThresh), DATABW(GPUDataTfrThresh, kernelExecTimeThreshold));
printf("----------------------------------------------------------------------------\n");
printf(" Total Kernel-only time =%7.2f ms ... %4d MB ... %6.2f GB/s\n", totalKernelTime, DATAMB(GPUDataTfrKernel), DATABW(GPUDataTfrKernel, totalKernelTime));
printf(" Total time with I/O included =%7.2f ms ... %4d MB ... %6.2f GB/s\n", totalTime, DATAMB(GPUDataTfrTotal), DATABW(GPUDataTfrTotal, totalTime));
printf("----------------------------------------------------------------------------\n");
// Deallocate CPU, GPU memory and destroy events.
hipFree(GPUptr);
hipEventDestroy(time1); hipEventDestroy(time2);
hipEventDestroy(time2BW); hipEventDestroy(time2Gauss); hipEventDestroy(time2Sobel);
hipEventDestroy(time3); hipEventDestroy(time4);
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Parallel Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
free(TheImg);
free(CopyImg);
exit(EXIT_FAILURE);
}
free(TheImg);
free(CopyImg);
return(EXIT_SUCCESS);
KERNELERROR:
fprintf(stderr, "\n\ncudaDeviceSynchronize returned error code %d after launching the kernel!\n", cudaStatus);
EXITCUDAERROR:
hipFree(GPUptr);
EXITERROR:
free(TheImg);
free(CopyImg);
return(EXIT_FAILURE);
} | 6a898f8a77da1a3ca7128be200c64f74e8f287ea.cu | #include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <iostream>
#include <ctype.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "cuda.h"
#define CEIL(a,b) ((a+b-1)/b)
#define PI 3.1415926
#define EDGE 0
#define NOEDGE 255
#define DATAMB(bytes) (bytes/1024/1024)
#define DATABW(bytes,timems) ((float)bytes/(timems * 1.024*1024.0*1024.0))
typedef unsigned char uch;
typedef unsigned long ul;
typedef unsigned int ui;
uch *TheImg, *CopyImg; // Where images are stored in CPU
int ThreshLo=50, ThreshHi=100; // "Edge" vs. "No Edge" thresholds
// Where images and temporary results are stored in GPU
uch *GPUImg, *GPUResultImg;
double *GPUBWImg, *GPUGaussImg, *GPUGradient, *GPUTheta;
struct ImgProp{
ui Hpixels;
ui Vpixels;
uch HeaderInfo[54];
ul Hbytes;
} ip;
#define IPHB ip.Hbytes
#define IPH ip.Hpixels
#define IPV ip.Vpixels
#define IMAGESIZE (IPHB*IPV)
#define IMAGEPIX (IPH*IPV)
// Kernel that calculates a B&W image from an RGB image
// resulting image has a double type for each pixel position
__global__
void BWKernel(double *ImgBW, uch *ImgGPU, ui Hpixels)
{
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
ui MYgtid = ThrPerBlk * MYbid + MYtid;
double R, G, B;
// ui NumBlocks = gridDim.x;
ui BlkPerRow = CEIL(Hpixels, ThrPerBlk);
ui RowBytes = (Hpixels * 3 + 3) & (~3);
ui MYrow = MYbid / BlkPerRow;
ui MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk;
if (MYcol >= Hpixels) return; // col out of range
ui MYsrcIndex = MYrow * RowBytes + 3 * MYcol;
ui MYpixIndex = MYrow * Hpixels + MYcol;
B = (double)ImgGPU[MYsrcIndex];
G = (double)ImgGPU[MYsrcIndex + 1];
R = (double)ImgGPU[MYsrcIndex + 2];
ImgBW[MYpixIndex] = (R+G+B)/3.0;
}
__device__
double Gauss[5][5] = { { 2, 4, 5, 4, 2 },
{ 4, 9, 12, 9, 4 },
{ 5, 12, 15, 12, 5 },
{ 4, 9, 12, 9, 4 },
{ 2, 4, 5, 4, 2 } };
// Kernel that calculates a Gauss image from the B&W image
// resulting image has a double type for each pixel position
__global__
void GaussKernel(double *ImgGauss, double *ImgBW, ui Hpixels, ui Vpixels)
{
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
ui MYgtid = ThrPerBlk * MYbid + MYtid;
int row, col, indx, i, j;
double G=0.00;
// ui NumBlocks = gridDim.x;
ui BlkPerRow = CEIL(Hpixels, ThrPerBlk);
int MYrow = MYbid / BlkPerRow;
int MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk;
if (MYcol >= Hpixels) return; // col out of range
ui MYpixIndex = MYrow * Hpixels + MYcol;
if ((MYrow<2) || (MYrow>Vpixels - 3) || (MYcol<2) || (MYcol>Hpixels - 3)) {
ImgGauss[MYpixIndex] = 0.0;
return;
} else {
G = 0.0;
for (i = -2; i <= 2; i++) {
for (j = -2; j <= 2; j++) {
row = MYrow + i;
col = MYcol + j;
indx = row*Hpixels + col;
G += (ImgBW[indx] * Gauss[i + 2][j + 2]);
}
}
ImgGauss[MYpixIndex] = G / 159.00;
}
}
__device__
double Gx[3][3] = { { -1, 0, 1 },
{ -2, 0, 2 },
{ -1, 0, 1 } };
__device__
double Gy[3][3] = { { -1, -2, -1 },
{ 0, 0, 0 },
{ 1, 2, 1 } };
// Kernel that calculates Gradient, Theta from the Gauss image
// resulting image has a double type for each pixel position
__global__
void SobelKernel(double *ImgGrad, double *ImgTheta, double *ImgGauss, ui Hpixels, ui Vpixels)
{
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
ui MYgtid = ThrPerBlk * MYbid + MYtid;
int row, col, indx, i, j;
double GX,GY;
//ui NumBlocks = gridDim.x;
ui BlkPerRow = CEIL(Hpixels, ThrPerBlk);
int MYrow = MYbid / BlkPerRow;
int MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk;
if (MYcol >= Hpixels) return; // col out of range
ui MYpixIndex = MYrow * Hpixels + MYcol;
if ((MYrow<1) || (MYrow>Vpixels - 2) || (MYcol<1) || (MYcol>Hpixels - 2)) {
ImgGrad[MYpixIndex] = 0.0;
ImgTheta[MYpixIndex] = 0.0;
return;
} else {
GX = 0.0; GY = 0.0;
for (i = -1; i <= 1; i++) {
for (j = -1; j <= 1; j++) {
row = MYrow + i;
col = MYcol + j;
indx = row*Hpixels + col;
GX += (ImgGauss[indx] * Gx[i + 1][j + 1]);
GY += (ImgGauss[indx] * Gy[i + 1][j + 1]);
}
}
ImgGrad[MYpixIndex] = sqrt(GX*GX + GY*GY);
ImgTheta[MYpixIndex] = atan(GX / GY)*180.0 / PI;
}
}
// Kernel that calculates the threshold image from Gradient, Theta
// resulting image has an RGB for each pixel, same RGB for each pixel
__global__
void ThresholdKernel(uch *ImgResult, double *ImgGrad, double *ImgTheta, ui Hpixels, ui Vpixels, ui ThreshLo, ui ThreshHi)
{
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
ui MYgtid = ThrPerBlk * MYbid + MYtid;
unsigned char PIXVAL;
double L, H, G, T;
//ui NumBlocks = gridDim.x;
ui BlkPerRow = CEIL(Hpixels, ThrPerBlk);
ui RowBytes = (Hpixels * 3 + 3) & (~3);
int MYrow = MYbid / BlkPerRow;
int MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk;
if (MYcol >= Hpixels) return; // col out of range
ui MYresultIndex = MYrow * RowBytes + 3 * MYcol;
ui MYpixIndex = MYrow * Hpixels + MYcol;
if ((MYrow<1) || (MYrow>Vpixels - 2) || (MYcol<1) || (MYcol>Hpixels - 2)){
ImgResult[MYresultIndex] = NOEDGE;
ImgResult[MYresultIndex + 1] = NOEDGE;
ImgResult[MYresultIndex + 2] = NOEDGE;
return;
} else {
L = (double)ThreshLo; H = (double)ThreshHi;
G = ImgGrad[MYpixIndex];
PIXVAL = NOEDGE;
if (G <= L){ // no edge
PIXVAL = NOEDGE;
} else if (G >= H){ // edge
PIXVAL = EDGE;
} else {
T = ImgTheta[MYpixIndex];
if ((T<-67.5) || (T>67.5)) {
// Look at left and right: [row][col-1] and [row][col+1]
PIXVAL = ((ImgGrad[MYpixIndex - 1]>H) || (ImgGrad[MYpixIndex + 1]>H)) ? EDGE : NOEDGE;
} else if ((T >= -22.5) && (T <= 22.5)) {
// Look at top and bottom: [row-1][col] and [row+1][col]
PIXVAL = ((ImgGrad[MYpixIndex - Hpixels]>H) || (ImgGrad[MYpixIndex + Hpixels]>H)) ? EDGE : NOEDGE;
} else if ((T>22.5) && (T <= 67.5)) {
// Look at upper right, lower left: [row-1][col+1] and [row+1][col-1]
PIXVAL = ((ImgGrad[MYpixIndex - Hpixels + 1]>H) || (ImgGrad[MYpixIndex + Hpixels - 1]>H)) ? EDGE : NOEDGE;
} else if ((T >= -67.5) && (T<-22.5)) {
// Look at upper left, lower right: [row-1][col-1] and [row+1][col+1]
PIXVAL = ((ImgGrad[MYpixIndex - Hpixels - 1]>H) || (ImgGrad[MYpixIndex + Hpixels + 1]>H)) ? EDGE : NOEDGE;
}
}
ImgResult[MYresultIndex] = PIXVAL;
ImgResult[MYresultIndex + 1] = PIXVAL;
ImgResult[MYresultIndex + 2] = PIXVAL;
}
}
/*
// helper function that wraps CUDA API calls, reports any error and exits
void chkCUDAErr(cudaError_t error_id)
{
if (error_id != CUDA_SUCCESS){
printf("CUDA ERROR :::%\n", cudaGetErrorString(error_id));
exit(EXIT_FAILURE);
}
}
*/
// Read a 24-bit/pixel BMP file into a 1D linear array.
// Allocate memory to store the 1D image and return its pointer.
uch *ReadBMPlin(char* fn)
{
static uch *Img;
FILE* f = fopen(fn, "rb");
if (f == NULL){
printf("\n\n%s NOT FOUND\n\n", fn);
exit(EXIT_FAILURE);
}
uch HeaderInfo[54];
fread(HeaderInfo, sizeof(uch), 54, f); // read the 54-byte header
// extract image height and width from header
int width = *(int*)&HeaderInfo[18]; ip.Hpixels = width;
int height = *(int*)&HeaderInfo[22]; ip.Vpixels = height;
int RowBytes = (width * 3 + 3) & (~3); ip.Hbytes = RowBytes;
//save header for re-use
memcpy(ip.HeaderInfo, HeaderInfo,54);
printf("\n Input File name: %17s (%u x %u) File Size=%u", fn,
ip.Hpixels, ip.Vpixels, IMAGESIZE);
// allocate memory to store the main image (1 Dimensional array)
Img = (uch *)malloc(IMAGESIZE);
if (Img == NULL) return Img; // Cannot allocate memory
// read the image from disk
fread(Img, sizeof(uch), IMAGESIZE, f);
fclose(f);
return Img;
}
// Write the 1D linear-memory stored image into file.
void WriteBMPlin(uch *Img, char* fn)
{
FILE* f = fopen(fn, "wb");
if (f == NULL){
printf("\n\nFILE CREATION ERROR: %s\n\n", fn);
exit(1); }
//write header
fwrite(ip.HeaderInfo, sizeof(uch), 54, f);
//write data
fwrite(Img, sizeof(uch), IMAGESIZE, f);
printf("\nOutput File name: %17s (%u x %u) File Size=%u", fn, ip.Hpixels, ip.Vpixels, IMAGESIZE);
fclose(f);
}
int main(int argc, char **argv)
{
// clock_t CPUStartTime, CPUEndTime, CPUElapsedTime;
// GPU code run times
float totalTime, totalKernelTime, tfrCPUtoGPU, tfrGPUtoCPU;
float kernelExecTimeBW, kernelExecTimeGauss, kernelExecTimeSobel, kernelExecTimeThreshold;
cudaError_t cudaStatus;
cudaEvent_t time1, time2, time2BW, time2Gauss, time2Sobel, time3, time4;
char InputFileName[255], OutputFileName[255], ProgName[255];
ui BlkPerRow, ThrPerBlk=256, NumBlocks;
ui GPUDataTfrBW, GPUDataTfrGauss, GPUDataTfrSobel, GPUDataTfrThresh,GPUDataTfrKernel, GPUDataTfrTotal;
cudaDeviceProp GPUprop;
void *GPUptr; // Pointer to the bulk-allocated GPU memory
ul GPUtotalBufferSize;
ul SupportedKBlocks, SupportedMBlocks, MaxThrPerBlk; char SupportedBlocks[100];
strcpy(ProgName, "imedgeG");
switch (argc){
case 6: ThreshHi = atoi(argv[5]);
case 5: ThreshLo = atoi(argv[4]);
case 4: ThrPerBlk = atoi(argv[3]);
case 3: strcpy(InputFileName, argv[1]);
strcpy(OutputFileName, argv[2]);
break;
default: printf("\n\nUsage: %s InputFilename OutputFilename [ThrPerBlk] [ThreshLo] [ThreshHi]", ProgName);
printf("\n\nExample: %s Astronaut.bmp Output.bmp", ProgName);
printf("\n\nExample: %s Astronaut.bmp Output.bmp 256", ProgName);
printf("\n\nExample: %s Astronaut.bmp Output.bmp 256 50 100",ProgName);
exit(EXIT_FAILURE);
}
if ((ThrPerBlk < 32) || (ThrPerBlk > 1024)) {
printf("Invalid ThrPerBlk option '%u'. Must be between 32 and 1024. \n", ThrPerBlk);
exit(EXIT_FAILURE);
}
if ((ThreshLo<0) || (ThreshHi>255) || (ThreshLo>ThreshHi)){
printf("\nInvalid Thresholds: Threshold must be between [0...255] ...\n");
printf("\n\nNothing executed ... Exiting ...\n\n");
exit(EXIT_FAILURE);
}
// Create CPU memory to store the input and output images
TheImg = ReadBMPlin(InputFileName); // Read the input image if memory can be allocated
if (TheImg == NULL){
printf("Cannot allocate memory for the input image...\n");
exit(EXIT_FAILURE);
}
CopyImg = (uch *)malloc(IMAGESIZE);
if (CopyImg == NULL){
printf("Cannot allocate memory for the input image...\n");
free(TheImg);
exit(EXIT_FAILURE);
}
// Choose which GPU to run on, change this on a multi-GPU system.
int NumGPUs = 0;
cudaGetDeviceCount(&NumGPUs);
if (NumGPUs == 0){
printf("\nNo CUDA Device is available\n");
goto EXITERROR;
}
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto EXITERROR;
}
cudaGetDeviceProperties(&GPUprop, 0);
SupportedKBlocks = (ui) GPUprop.maxGridSize[0] * (ui) GPUprop.maxGridSize[1] * (ui )GPUprop.maxGridSize[2]/1024;
SupportedMBlocks = SupportedKBlocks / 1024;
sprintf(SupportedBlocks, "%u %c", (SupportedMBlocks>=5) ? SupportedMBlocks : SupportedKBlocks, (SupportedMBlocks>=5) ? 'M':'K');
MaxThrPerBlk = (ui)GPUprop.maxThreadsPerBlock;
cudaEventCreate(&time1); cudaEventCreate(&time2);
cudaEventCreate(&time2BW); cudaEventCreate(&time2Gauss); cudaEventCreate(&time2Sobel);
cudaEventCreate(&time3); cudaEventCreate(&time4);
cudaEventRecord(time1, 0); // Time stamp at the start of the GPU transfer
// Allocate GPU buffer for the input and output images and the imtermediate results
GPUtotalBufferSize = 4 * sizeof(double)*IMAGEPIX + 2 * sizeof(uch)*IMAGESIZE;
cudaStatus = cudaMalloc((void**)&GPUptr, GPUtotalBufferSize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed! Can't allocate GPU memory");
goto EXITERROR;
}
GPUImg = (uch *)GPUptr;
GPUResultImg = GPUImg + IMAGESIZE;
GPUBWImg = (double *)(GPUResultImg + IMAGESIZE);
GPUGaussImg = GPUBWImg + IMAGEPIX;
GPUGradient = GPUGaussImg + IMAGEPIX;
GPUTheta = GPUGradient + IMAGEPIX;
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(GPUImg, TheImg, IMAGESIZE, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy CPU to GPU failed!");
goto EXITCUDAERROR;
}
cudaEventRecord(time2, 0); // Time stamp after the CPU --> GPU tfr(transfer) is done
//dim3 dimBlock(ThrPerBlk);
//dim3 dimGrid(ip.Hpixels*BlkPerRow);
BlkPerRow = CEIL(ip.Hpixels, ThrPerBlk);
NumBlocks = IPV*BlkPerRow;
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
BWKernel <<< NumBlocks, ThrPerBlk >>> (GPUBWImg, GPUImg, ip.Hpixels);
if ((cudaStatus = cudaDeviceSynchronize()) != cudaSuccess) goto KERNELERROR;
cudaEventRecord(time2BW, 0); // Time stamp after BW image calculation
GPUDataTfrBW = sizeof(double)*IMAGEPIX + sizeof(uch)*IMAGESIZE;
GaussKernel <<< NumBlocks, ThrPerBlk >>> (GPUGaussImg, GPUBWImg, ip.Hpixels, ip.Vpixels);
if ((cudaStatus = cudaDeviceSynchronize()) != cudaSuccess) goto KERNELERROR;
cudaEventRecord(time2Gauss, 0); // Time stamp after Gauss image calculation
GPUDataTfrGauss = 2*sizeof(double)*IMAGEPIX;
SobelKernel <<< NumBlocks, ThrPerBlk >>> (GPUGradient, GPUTheta, GPUGaussImg, ip.Hpixels, ip.Vpixels);
if ((cudaStatus = cudaDeviceSynchronize()) != cudaSuccess) goto KERNELERROR;
cudaEventRecord(time2Sobel, 0); // Time stamp after Gradient, Theta computation
GPUDataTfrSobel = 3 * sizeof(double)*IMAGEPIX;
ThresholdKernel <<< NumBlocks, ThrPerBlk >>> (GPUResultImg, GPUGradient, GPUTheta, ip.Hpixels, ip.Vpixels, ThreshLo, ThreshHi);
if ((cudaStatus = cudaDeviceSynchronize()) != cudaSuccess) goto KERNELERROR;
GPUDataTfrThresh = sizeof(double)*IMAGEPIX + sizeof(uch)*IMAGESIZE;
GPUDataTfrKernel = GPUDataTfrBW + GPUDataTfrGauss + GPUDataTfrSobel + GPUDataTfrThresh;
GPUDataTfrTotal = GPUDataTfrKernel + 2 * IMAGESIZE;
cudaEventRecord(time3, 0);
// Copy output (results) from GPU buffer to host (CPU) memory.
cudaStatus = cudaMemcpy(CopyImg, GPUResultImg, IMAGESIZE, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy GPU to CPU failed!");
goto EXITCUDAERROR;
}
cudaEventRecord(time4, 0);
cudaEventSynchronize(time1); cudaEventSynchronize(time2);
cudaEventSynchronize(time2BW); cudaEventSynchronize(time2Gauss); cudaEventSynchronize(time2Sobel);
cudaEventSynchronize(time3); cudaEventSynchronize(time4);
cudaEventElapsedTime(&totalTime, time1, time4);
cudaEventElapsedTime(&tfrCPUtoGPU, time1, time2);
cudaEventElapsedTime(&kernelExecTimeBW, time2, time2BW);
cudaEventElapsedTime(&kernelExecTimeGauss, time2BW, time2Gauss);
cudaEventElapsedTime(&kernelExecTimeSobel, time2Gauss, time2Sobel);
cudaEventElapsedTime(&kernelExecTimeThreshold, time2Sobel, time3);
cudaEventElapsedTime(&tfrGPUtoCPU, time3, time4);
totalKernelTime = kernelExecTimeBW + kernelExecTimeGauss + kernelExecTimeSobel + kernelExecTimeThreshold;
cudaStatus = cudaDeviceSynchronize();
//checkError(cudaGetLastError()); // screen for errors in kernel launches
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "\n Program failed after cudaDeviceSynchronize()!");
free(TheImg);
free(CopyImg);
exit(EXIT_FAILURE);
}
WriteBMPlin(CopyImg, OutputFileName); // Write the flipped image back to disk, lin is linear
printf("\n\n----------------------------------------------------------------------------\n");
printf("%s ComputeCapab=%d.%d [max %s blocks; %d thr/blk] \n",
GPUprop.name, GPUprop.major, GPUprop.minor, SupportedBlocks, MaxThrPerBlk);
printf("----------------------------------------------------------------------------\n");
printf("%s %s %s %u %d %d [%u BLOCKS, %u BLOCKS/ROW]\n", ProgName, InputFileName, OutputFileName, ThrPerBlk, ThreshLo, ThreshHi, NumBlocks, BlkPerRow);
printf("----------------------------------------------------------------------------\n");
printf(" CPU->GPU Transfer =%7.2f ms ... %4d MB ... %6.2f GB/s\n", tfrCPUtoGPU, DATAMB(IMAGESIZE), DATABW(IMAGESIZE,tfrCPUtoGPU));
printf(" GPU->CPU Transfer =%7.2f ms ... %4d MB ... %6.2f GB/s\n", tfrGPUtoCPU, DATAMB(IMAGESIZE), DATABW(IMAGESIZE, tfrGPUtoCPU));
printf("----------------------------------------------------------------------------\n");
printf(" BW Kernel Execution Time =%7.2f ms ... %4d MB ... %6.2f GB/s\n", kernelExecTimeBW, DATAMB(GPUDataTfrBW), DATABW(GPUDataTfrBW, kernelExecTimeBW));
printf(" Gauss Kernel Execution Time =%7.2f ms ... %4d MB ... %6.2f GB/s\n", kernelExecTimeGauss, DATAMB(GPUDataTfrGauss), DATABW(GPUDataTfrGauss, kernelExecTimeGauss));
printf(" Sobel Kernel Execution Time =%7.2f ms ... %4d MB ... %6.2f GB/s\n", kernelExecTimeSobel, DATAMB(GPUDataTfrSobel), DATABW(GPUDataTfrSobel, kernelExecTimeSobel));
printf("Threshold Kernel Execution Time =%7.2f ms ... %4d MB ... %6.2f GB/s\n", kernelExecTimeThreshold, DATAMB(GPUDataTfrThresh), DATABW(GPUDataTfrThresh, kernelExecTimeThreshold));
printf("----------------------------------------------------------------------------\n");
printf(" Total Kernel-only time =%7.2f ms ... %4d MB ... %6.2f GB/s\n", totalKernelTime, DATAMB(GPUDataTfrKernel), DATABW(GPUDataTfrKernel, totalKernelTime));
printf(" Total time with I/O included =%7.2f ms ... %4d MB ... %6.2f GB/s\n", totalTime, DATAMB(GPUDataTfrTotal), DATABW(GPUDataTfrTotal, totalTime));
printf("----------------------------------------------------------------------------\n");
// Deallocate CPU, GPU memory and destroy events.
cudaFree(GPUptr);
cudaEventDestroy(time1); cudaEventDestroy(time2);
cudaEventDestroy(time2BW); cudaEventDestroy(time2Gauss); cudaEventDestroy(time2Sobel);
cudaEventDestroy(time3); cudaEventDestroy(time4);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Parallel Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
free(TheImg);
free(CopyImg);
exit(EXIT_FAILURE);
}
free(TheImg);
free(CopyImg);
return(EXIT_SUCCESS);
KERNELERROR:
fprintf(stderr, "\n\ncudaDeviceSynchronize returned error code %d after launching the kernel!\n", cudaStatus);
EXITCUDAERROR:
cudaFree(GPUptr);
EXITERROR:
free(TheImg);
free(CopyImg);
return(EXIT_FAILURE);
} |
01df821c29fe577d5d5c80820b2a012b62a07909.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "internal_shared.hpp"
#include "opencv2/gpu/device/border_interpolate.hpp"
#include "opencv2/gpu/device/vec_traits.hpp"
#include "opencv2/gpu/device/vec_math.hpp"
#include "opencv2/gpu/device/saturate_cast.hpp"
namespace cv { namespace gpu { namespace device
{
namespace imgproc
{
template <class SrcPtr, typename D> __global__ void pyrUp(const SrcPtr src, DevMem2D_<D> dst)
{
typedef typename SrcPtr::elem_type src_t;
typedef typename TypeVec<float, VecTraits<D>::cn>::vec_type sum_t;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ sum_t s_srcPatch[10][10];
__shared__ sum_t s_dstPatch[20][16];
if (threadIdx.x < 10 && threadIdx.y < 10)
{
const int srcx = static_cast<int>((blockIdx.x * blockDim.x) / 2 + threadIdx.x) - 1;
const int srcy = static_cast<int>((blockIdx.y * blockDim.y) / 2 + threadIdx.y) - 1;
s_srcPatch[threadIdx.y][threadIdx.x] = saturate_cast<sum_t>(src(srcy, srcx));
}
__syncthreads();
sum_t sum = VecTraits<sum_t>::all(0);
const int evenFlag = static_cast<int>((threadIdx.x & 1) == 0);
const int oddFlag = static_cast<int>((threadIdx.x & 1) != 0);
const bool eveny = ((threadIdx.y & 1) == 0);
const int tidx = threadIdx.x;
if (eveny)
{
sum = sum + (evenFlag * 0.0625f) * s_srcPatch[1 + (threadIdx.y >> 1)][1 + ((tidx - 2) >> 1)];
sum = sum + ( oddFlag * 0.25f ) * s_srcPatch[1 + (threadIdx.y >> 1)][1 + ((tidx - 1) >> 1)];
sum = sum + (evenFlag * 0.375f ) * s_srcPatch[1 + (threadIdx.y >> 1)][1 + ((tidx ) >> 1)];
sum = sum + ( oddFlag * 0.25f ) * s_srcPatch[1 + (threadIdx.y >> 1)][1 + ((tidx + 1) >> 1)];
sum = sum + (evenFlag * 0.0625f) * s_srcPatch[1 + (threadIdx.y >> 1)][1 + ((tidx + 2) >> 1)];
}
s_dstPatch[2 + threadIdx.y][threadIdx.x] = sum;
if (threadIdx.y < 2)
{
sum = VecTraits<sum_t>::all(0);
if (eveny)
{
sum = sum + (evenFlag * 0.0625f) * s_srcPatch[0][1 + ((tidx - 2) >> 1)];
sum = sum + ( oddFlag * 0.25f ) * s_srcPatch[0][1 + ((tidx - 1) >> 1)];
sum = sum + (evenFlag * 0.375f ) * s_srcPatch[0][1 + ((tidx ) >> 1)];
sum = sum + ( oddFlag * 0.25f ) * s_srcPatch[0][1 + ((tidx + 1) >> 1)];
sum = sum + (evenFlag * 0.0625f) * s_srcPatch[0][1 + ((tidx + 2) >> 1)];
}
s_dstPatch[threadIdx.y][threadIdx.x] = sum;
}
if (threadIdx.y > 13)
{
sum = VecTraits<sum_t>::all(0);
if (eveny)
{
sum = sum + (evenFlag * 0.0625f) * s_srcPatch[9][1 + ((tidx - 2) >> 1)];
sum = sum + ( oddFlag * 0.25f ) * s_srcPatch[9][1 + ((tidx - 1) >> 1)];
sum = sum + (evenFlag * 0.375f ) * s_srcPatch[9][1 + ((tidx ) >> 1)];
sum = sum + ( oddFlag * 0.25f ) * s_srcPatch[9][1 + ((tidx + 1) >> 1)];
sum = sum + (evenFlag * 0.0625f) * s_srcPatch[9][1 + ((tidx + 2) >> 1)];
}
s_dstPatch[4 + threadIdx.y][threadIdx.x] = sum;
}
__syncthreads();
sum = VecTraits<sum_t>::all(0);
const int tidy = threadIdx.y;
sum = sum + 0.0625f * s_dstPatch[2 + tidy - 2][threadIdx.x];
sum = sum + 0.25f * s_dstPatch[2 + tidy - 1][threadIdx.x];
sum = sum + 0.375f * s_dstPatch[2 + tidy ][threadIdx.x];
sum = sum + 0.25f * s_dstPatch[2 + tidy + 1][threadIdx.x];
sum = sum + 0.0625f * s_dstPatch[2 + tidy + 2][threadIdx.x];
if (x < dst.cols && y < dst.rows)
dst(y, x) = saturate_cast<D>(4.0f * sum);
}
template <typename T, template <typename> class B> void pyrUp_caller(const DevMem2D_<T>& src, const DevMem2D_<T>& dst, hipStream_t stream)
{
const dim3 block(16, 16);
const dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
B<T> b(src.rows, src.cols);
BorderReader< PtrStep<T>, B<T> > srcReader(src, b);
hipLaunchKernelGGL(( pyrUp), dim3(grid), dim3(block), 0, stream, srcReader, dst);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template <typename T, int cn> void pyrUp_gpu(const DevMem2Db& src, const DevMem2Db& dst, int borderType, hipStream_t stream)
{
typedef typename TypeVec<T, cn>::vec_type type;
typedef void (*caller_t)(const DevMem2D_<type>& src, const DevMem2D_<type>& dst, hipStream_t stream);
static const caller_t callers[] =
{
pyrUp_caller<type, BrdReflect101>, pyrUp_caller<type, BrdReplicate>, pyrUp_caller<type, BrdConstant>, pyrUp_caller<type, BrdReflect>, pyrUp_caller<type, BrdWrap>
};
callers[borderType](static_cast< DevMem2D_<type> >(src), static_cast< DevMem2D_<type> >(dst), stream);
}
template void pyrUp_gpu<uchar, 1>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, hipStream_t stream);
template void pyrUp_gpu<uchar, 2>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, hipStream_t stream);
template void pyrUp_gpu<uchar, 3>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, hipStream_t stream);
template void pyrUp_gpu<uchar, 4>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, hipStream_t stream);
template void pyrUp_gpu<schar, 1>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, hipStream_t stream);
template void pyrUp_gpu<schar, 2>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, hipStream_t stream);
template void pyrUp_gpu<schar, 3>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, hipStream_t stream);
template void pyrUp_gpu<schar, 4>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, hipStream_t stream);
template void pyrUp_gpu<ushort, 1>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, hipStream_t stream);
template void pyrUp_gpu<ushort, 2>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, hipStream_t stream);
template void pyrUp_gpu<ushort, 3>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, hipStream_t stream);
template void pyrUp_gpu<ushort, 4>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, hipStream_t stream);
template void pyrUp_gpu<short, 1>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, hipStream_t stream);
template void pyrUp_gpu<short, 2>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, hipStream_t stream);
template void pyrUp_gpu<short, 3>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, hipStream_t stream);
template void pyrUp_gpu<short, 4>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, hipStream_t stream);
template void pyrUp_gpu<int, 1>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, hipStream_t stream);
template void pyrUp_gpu<int, 2>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, hipStream_t stream);
template void pyrUp_gpu<int, 3>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, hipStream_t stream);
template void pyrUp_gpu<int, 4>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, hipStream_t stream);
template void pyrUp_gpu<float, 1>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, hipStream_t stream);
template void pyrUp_gpu<float, 2>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, hipStream_t stream);
template void pyrUp_gpu<float, 3>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, hipStream_t stream);
template void pyrUp_gpu<float, 4>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, hipStream_t stream);
} // namespace imgproc
}}} // namespace cv { namespace gpu { namespace device
| 01df821c29fe577d5d5c80820b2a012b62a07909.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "internal_shared.hpp"
#include "opencv2/gpu/device/border_interpolate.hpp"
#include "opencv2/gpu/device/vec_traits.hpp"
#include "opencv2/gpu/device/vec_math.hpp"
#include "opencv2/gpu/device/saturate_cast.hpp"
namespace cv { namespace gpu { namespace device
{
namespace imgproc
{
template <class SrcPtr, typename D> __global__ void pyrUp(const SrcPtr src, DevMem2D_<D> dst)
{
typedef typename SrcPtr::elem_type src_t;
typedef typename TypeVec<float, VecTraits<D>::cn>::vec_type sum_t;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ sum_t s_srcPatch[10][10];
__shared__ sum_t s_dstPatch[20][16];
if (threadIdx.x < 10 && threadIdx.y < 10)
{
const int srcx = static_cast<int>((blockIdx.x * blockDim.x) / 2 + threadIdx.x) - 1;
const int srcy = static_cast<int>((blockIdx.y * blockDim.y) / 2 + threadIdx.y) - 1;
s_srcPatch[threadIdx.y][threadIdx.x] = saturate_cast<sum_t>(src(srcy, srcx));
}
__syncthreads();
sum_t sum = VecTraits<sum_t>::all(0);
const int evenFlag = static_cast<int>((threadIdx.x & 1) == 0);
const int oddFlag = static_cast<int>((threadIdx.x & 1) != 0);
const bool eveny = ((threadIdx.y & 1) == 0);
const int tidx = threadIdx.x;
if (eveny)
{
sum = sum + (evenFlag * 0.0625f) * s_srcPatch[1 + (threadIdx.y >> 1)][1 + ((tidx - 2) >> 1)];
sum = sum + ( oddFlag * 0.25f ) * s_srcPatch[1 + (threadIdx.y >> 1)][1 + ((tidx - 1) >> 1)];
sum = sum + (evenFlag * 0.375f ) * s_srcPatch[1 + (threadIdx.y >> 1)][1 + ((tidx ) >> 1)];
sum = sum + ( oddFlag * 0.25f ) * s_srcPatch[1 + (threadIdx.y >> 1)][1 + ((tidx + 1) >> 1)];
sum = sum + (evenFlag * 0.0625f) * s_srcPatch[1 + (threadIdx.y >> 1)][1 + ((tidx + 2) >> 1)];
}
s_dstPatch[2 + threadIdx.y][threadIdx.x] = sum;
if (threadIdx.y < 2)
{
sum = VecTraits<sum_t>::all(0);
if (eveny)
{
sum = sum + (evenFlag * 0.0625f) * s_srcPatch[0][1 + ((tidx - 2) >> 1)];
sum = sum + ( oddFlag * 0.25f ) * s_srcPatch[0][1 + ((tidx - 1) >> 1)];
sum = sum + (evenFlag * 0.375f ) * s_srcPatch[0][1 + ((tidx ) >> 1)];
sum = sum + ( oddFlag * 0.25f ) * s_srcPatch[0][1 + ((tidx + 1) >> 1)];
sum = sum + (evenFlag * 0.0625f) * s_srcPatch[0][1 + ((tidx + 2) >> 1)];
}
s_dstPatch[threadIdx.y][threadIdx.x] = sum;
}
if (threadIdx.y > 13)
{
sum = VecTraits<sum_t>::all(0);
if (eveny)
{
sum = sum + (evenFlag * 0.0625f) * s_srcPatch[9][1 + ((tidx - 2) >> 1)];
sum = sum + ( oddFlag * 0.25f ) * s_srcPatch[9][1 + ((tidx - 1) >> 1)];
sum = sum + (evenFlag * 0.375f ) * s_srcPatch[9][1 + ((tidx ) >> 1)];
sum = sum + ( oddFlag * 0.25f ) * s_srcPatch[9][1 + ((tidx + 1) >> 1)];
sum = sum + (evenFlag * 0.0625f) * s_srcPatch[9][1 + ((tidx + 2) >> 1)];
}
s_dstPatch[4 + threadIdx.y][threadIdx.x] = sum;
}
__syncthreads();
sum = VecTraits<sum_t>::all(0);
const int tidy = threadIdx.y;
sum = sum + 0.0625f * s_dstPatch[2 + tidy - 2][threadIdx.x];
sum = sum + 0.25f * s_dstPatch[2 + tidy - 1][threadIdx.x];
sum = sum + 0.375f * s_dstPatch[2 + tidy ][threadIdx.x];
sum = sum + 0.25f * s_dstPatch[2 + tidy + 1][threadIdx.x];
sum = sum + 0.0625f * s_dstPatch[2 + tidy + 2][threadIdx.x];
if (x < dst.cols && y < dst.rows)
dst(y, x) = saturate_cast<D>(4.0f * sum);
}
template <typename T, template <typename> class B> void pyrUp_caller(const DevMem2D_<T>& src, const DevMem2D_<T>& dst, cudaStream_t stream)
{
const dim3 block(16, 16);
const dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
B<T> b(src.rows, src.cols);
BorderReader< PtrStep<T>, B<T> > srcReader(src, b);
pyrUp<<<grid, block, 0, stream>>>(srcReader, dst);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <typename T, int cn> void pyrUp_gpu(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream)
{
typedef typename TypeVec<T, cn>::vec_type type;
typedef void (*caller_t)(const DevMem2D_<type>& src, const DevMem2D_<type>& dst, cudaStream_t stream);
static const caller_t callers[] =
{
pyrUp_caller<type, BrdReflect101>, pyrUp_caller<type, BrdReplicate>, pyrUp_caller<type, BrdConstant>, pyrUp_caller<type, BrdReflect>, pyrUp_caller<type, BrdWrap>
};
callers[borderType](static_cast< DevMem2D_<type> >(src), static_cast< DevMem2D_<type> >(dst), stream);
}
template void pyrUp_gpu<uchar, 1>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<uchar, 2>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<uchar, 3>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<uchar, 4>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<schar, 1>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<schar, 2>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<schar, 3>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<schar, 4>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<ushort, 1>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<ushort, 2>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<ushort, 3>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<ushort, 4>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<short, 1>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<short, 2>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<short, 3>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<short, 4>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<int, 1>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<int, 2>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<int, 3>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<int, 4>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<float, 1>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<float, 2>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<float, 3>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
template void pyrUp_gpu<float, 4>(const DevMem2Db& src, const DevMem2Db& dst, int borderType, cudaStream_t stream);
} // namespace imgproc
}}} // namespace cv { namespace gpu { namespace device
|
5633fe73fd7862dbd11c88042a59112a2962f3a4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "parquet_gpu.hpp"
#include <io/utilities/block_utils.cuh>
#include <thrust/tuple.h>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace io {
namespace parquet {
namespace gpu {
// Minimal thrift implementation for parsing page headers
// https://github.com/apache/thrift/blob/master/doc/specs/thrift-compact-protocol.md
static const __device__ __constant__ uint8_t g_list2struct[16] = {0,
1,
2,
ST_FLD_BYTE,
ST_FLD_DOUBLE,
5,
ST_FLD_I16,
7,
ST_FLD_I32,
9,
ST_FLD_I64,
ST_FLD_BINARY,
ST_FLD_STRUCT,
ST_FLD_MAP,
ST_FLD_SET,
ST_FLD_LIST};
struct byte_stream_s {
uint8_t const* cur;
uint8_t const* end;
uint8_t const* base;
// Parsed symbols
PageType page_type;
PageInfo page;
ColumnChunkDesc ck;
};
/**
* @brief Get current byte from the byte stream
*
* @param[in] bs Byte stream
*
* @return Current byte pointed to by the byte stream
*/
inline __device__ unsigned int getb(byte_stream_s* bs)
{
return (bs->cur < bs->end) ? *bs->cur++ : 0;
}
inline __device__ void skip_bytes(byte_stream_s* bs, size_t bytecnt)
{
bytecnt = min(bytecnt, (size_t)(bs->end - bs->cur));
bs->cur += bytecnt;
}
/**
* @brief Decode unsigned integer from a byte stream using VarInt encoding
*
* Concatenate least significant 7 bits of each byte to form a 32 bit
* integer. Most significant bit of each byte indicates if more bytes
* are to be used to form the number.
*
* @param[in] bs Byte stream
*
* @return Decoded 32 bit integer
*/
__device__ uint32_t get_u32(byte_stream_s* bs)
{
uint32_t v = 0, l = 0, c;
do {
c = getb(bs);
v |= (c & 0x7f) << l;
l += 7;
} while (c & 0x80);
return v;
}
/**
* @brief Decode signed integer from a byte stream using zigzag encoding
*
* The number n encountered in a byte stream translates to
* -1^(n%2) * ceil(n/2), with the exception of 0 which remains the same.
* i.e. 0, 1, 2, 3, 4, 5 etc convert to 0, -1, 1, -2, 2 respectively.
*
* @param[in] bs Byte stream
*
* @return Decoded 32 bit integer
*/
inline __device__ int32_t get_i32(byte_stream_s* bs)
{
uint32_t u = get_u32(bs);
return (int32_t)((u >> 1u) ^ -(int32_t)(u & 1));
}
__device__ void skip_struct_field(byte_stream_s* bs, int field_type)
{
int struct_depth = 0;
int rep_cnt = 0;
do {
if (rep_cnt != 0) {
rep_cnt--;
} else if (struct_depth != 0) {
unsigned int c;
do {
c = getb(bs);
if (!c) --struct_depth;
} while (!c && struct_depth);
if (!struct_depth) break;
field_type = c & 0xf;
if (!(c & 0xf0)) get_i32(bs);
}
switch (field_type) {
case ST_FLD_TRUE:
case ST_FLD_FALSE: break;
case ST_FLD_I16:
case ST_FLD_I32:
case ST_FLD_I64: get_u32(bs); break;
case ST_FLD_BYTE: skip_bytes(bs, 1); break;
case ST_FLD_DOUBLE: skip_bytes(bs, 8); break;
case ST_FLD_BINARY: skip_bytes(bs, get_u32(bs)); break;
case ST_FLD_LIST:
case ST_FLD_SET: { // NOTE: skipping a list of lists is not handled
auto const c = getb(bs);
int n = c >> 4;
if (n == 0xf) n = get_u32(bs);
field_type = g_list2struct[c & 0xf];
if (field_type == ST_FLD_STRUCT)
struct_depth += n;
else
rep_cnt = n;
} break;
case ST_FLD_STRUCT: struct_depth++; break;
}
} while (rep_cnt || struct_depth);
}
/**
* @brief Determine which decode kernel to run for the given page.
*
* @param page The page to decode
* @param chunk Column chunk the page belongs to
* @return `kernel_mask_bits` value for the given page
*/
__device__ uint32_t kernel_mask_for_page(gpu::PageInfo const& page,
gpu::ColumnChunkDesc const& chunk)
{
if (page.flags & PAGEINFO_FLAGS_DICTIONARY) { return 0; }
if (page.encoding == Encoding::DELTA_BINARY_PACKED) {
return KERNEL_MASK_DELTA_BINARY;
} else if (is_string_col(chunk)) {
return KERNEL_MASK_STRING;
}
// non-string, non-delta
return KERNEL_MASK_GENERAL;
}
/**
* @brief Functor to set value to 32 bit integer read from byte stream
*
* @return True if field type is not int32
*/
struct ParquetFieldInt32 {
int field;
int32_t& val;
__device__ ParquetFieldInt32(int f, int32_t& v) : field(f), val(v) {}
inline __device__ bool operator()(byte_stream_s* bs, int field_type)
{
val = get_i32(bs);
return (field_type != ST_FLD_I32);
}
};
/**
* @brief Functor to set value to enum read from byte stream
*
* @return True if field type is not int32
*/
template <typename Enum>
struct ParquetFieldEnum {
int field;
Enum& val;
__device__ ParquetFieldEnum(int f, Enum& v) : field(f), val(v) {}
inline __device__ bool operator()(byte_stream_s* bs, int field_type)
{
val = static_cast<Enum>(get_i32(bs));
return (field_type != ST_FLD_I32);
}
};
/**
* @brief Functor to run operator on byte stream
*
* @return True if field type is not struct type or if the calling operator
* fails
*/
template <typename Operator>
struct ParquetFieldStruct {
int field;
Operator op;
__device__ ParquetFieldStruct(int f) : field(f) {}
inline __device__ bool operator()(byte_stream_s* bs, int field_type)
{
return ((field_type != ST_FLD_STRUCT) || !op(bs));
}
};
/**
* @brief Functor to run an operator
*
* The purpose of this functor is to replace a switch case. If the field in
* the argument is equal to the field specified in any element of the tuple
* of operators then it is run with the byte stream and field type arguments.
*
* If the field does not match any of the functors then skip_struct_field is
* called over the byte stream.
*
* @return Return value of the selected operator or false if no operator
* matched the field value
*/
template <int index>
struct FunctionSwitchImpl {
template <typename... Operator>
static inline __device__ bool run(byte_stream_s* bs,
int field_type,
int const& field,
thrust::tuple<Operator...>& ops)
{
if (field == thrust::get<index>(ops).field) {
return thrust::get<index>(ops)(bs, field_type);
} else {
return FunctionSwitchImpl<index - 1>::run(bs, field_type, field, ops);
}
}
};
template <>
struct FunctionSwitchImpl<0> {
template <typename... Operator>
static inline __device__ bool run(byte_stream_s* bs,
int field_type,
int const& field,
thrust::tuple<Operator...>& ops)
{
if (field == thrust::get<0>(ops).field) {
return thrust::get<0>(ops)(bs, field_type);
} else {
skip_struct_field(bs, field_type);
return false;
}
}
};
/**
* @brief Function to parse page header based on the tuple of functors provided
*
* Bytes are read from the byte stream and the field delta and field type are
* matched up against user supplied reading functors. If they match then the
* corresponding values are written to references pointed to by the functors.
*
* @return Returns false if an unexpected field is encountered while reading
* byte stream. Otherwise true is returned.
*/
template <typename... Operator>
inline __device__ bool parse_header(thrust::tuple<Operator...>& op, byte_stream_s* bs)
{
constexpr int index = thrust::tuple_size<thrust::tuple<Operator...>>::value - 1;
int field = 0;
while (true) {
auto const current_byte = getb(bs);
if (!current_byte) break;
int const field_delta = current_byte >> 4;
int const field_type = current_byte & 0xf;
field = field_delta ? field + field_delta : get_i32(bs);
bool exit_function = FunctionSwitchImpl<index>::run(bs, field_type, field, op);
if (exit_function) { return false; }
}
return true;
}
struct gpuParseDataPageHeader {
__device__ bool operator()(byte_stream_s* bs)
{
auto op = thrust::make_tuple(ParquetFieldInt32(1, bs->page.num_input_values),
ParquetFieldEnum<Encoding>(2, bs->page.encoding),
ParquetFieldEnum<Encoding>(3, bs->page.definition_level_encoding),
ParquetFieldEnum<Encoding>(4, bs->page.repetition_level_encoding));
return parse_header(op, bs);
}
};
struct gpuParseDictionaryPageHeader {
__device__ bool operator()(byte_stream_s* bs)
{
auto op = thrust::make_tuple(ParquetFieldInt32(1, bs->page.num_input_values),
ParquetFieldEnum<Encoding>(2, bs->page.encoding));
return parse_header(op, bs);
}
};
struct gpuParseDataPageHeaderV2 {
__device__ bool operator()(byte_stream_s* bs)
{
auto op = thrust::make_tuple(ParquetFieldInt32(1, bs->page.num_input_values),
ParquetFieldInt32(2, bs->page.num_nulls),
ParquetFieldInt32(3, bs->page.num_rows),
ParquetFieldEnum<Encoding>(4, bs->page.encoding),
ParquetFieldInt32(5, bs->page.lvl_bytes[level_type::DEFINITION]),
ParquetFieldInt32(6, bs->page.lvl_bytes[level_type::REPETITION]));
return parse_header(op, bs);
}
};
struct gpuParsePageHeader {
__device__ bool operator()(byte_stream_s* bs)
{
auto op = thrust::make_tuple(ParquetFieldEnum<PageType>(1, bs->page_type),
ParquetFieldInt32(2, bs->page.uncompressed_page_size),
ParquetFieldInt32(3, bs->page.compressed_page_size),
ParquetFieldStruct<gpuParseDataPageHeader>(5),
ParquetFieldStruct<gpuParseDictionaryPageHeader>(7),
ParquetFieldStruct<gpuParseDataPageHeaderV2>(8));
return parse_header(op, bs);
}
};
/**
* @brief Kernel for outputting page headers from the specified column chunks
*
* @param[in] chunks List of column chunks
* @param[in] num_chunks Number of column chunks
*/
// blockDim {128,1,1}
__global__ void __launch_bounds__(128)
gpuDecodePageHeaders(ColumnChunkDesc* chunks, int32_t num_chunks)
{
gpuParsePageHeader parse_page_header;
__shared__ byte_stream_s bs_g[4];
int lane_id = threadIdx.x % 32;
int chunk = (blockIdx.x * 4) + (threadIdx.x / 32);
byte_stream_s* const bs = &bs_g[threadIdx.x / 32];
if (chunk < num_chunks and lane_id == 0) bs->ck = chunks[chunk];
__syncthreads();
if (chunk < num_chunks) {
size_t num_values, values_found;
uint32_t data_page_count = 0;
uint32_t dictionary_page_count = 0;
int32_t max_num_pages;
int32_t num_dict_pages = bs->ck.num_dict_pages;
PageInfo* page_info;
if (!lane_id) {
bs->base = bs->cur = bs->ck.compressed_data;
bs->end = bs->base + bs->ck.compressed_size;
bs->page.chunk_idx = chunk;
bs->page.src_col_schema = bs->ck.src_col_schema;
// this computation is only valid for flat schemas. for nested schemas,
// they will be recomputed in the preprocess step by examining repetition and
// definition levels
bs->page.chunk_row = 0;
bs->page.num_rows = 0;
bs->page.skipped_values = -1;
bs->page.skipped_leaf_values = 0;
bs->page.str_bytes = 0;
bs->page.kernel_mask = 0;
}
num_values = bs->ck.num_values;
page_info = bs->ck.page_info;
num_dict_pages = bs->ck.num_dict_pages;
max_num_pages = (page_info) ? bs->ck.max_num_pages : 0;
values_found = 0;
__syncwarp();
while (values_found < num_values && bs->cur < bs->end) {
int index_out = -1;
if (lane_id == 0) {
// this computation is only valid for flat schemas. for nested schemas,
// they will be recomputed in the preprocess step by examining repetition and
// definition levels
bs->page.chunk_row += bs->page.num_rows;
bs->page.num_rows = 0;
bs->page.flags = 0;
// zero out V2 info
bs->page.num_nulls = 0;
bs->page.lvl_bytes[level_type::DEFINITION] = 0;
bs->page.lvl_bytes[level_type::REPETITION] = 0;
if (parse_page_header(bs) && bs->page.compressed_page_size >= 0) {
switch (bs->page_type) {
case PageType::DATA_PAGE:
index_out = num_dict_pages + data_page_count;
data_page_count++;
// this computation is only valid for flat schemas. for nested schemas,
// they will be recomputed in the preprocess step by examining repetition and
// definition levels
bs->page.num_rows = bs->page.num_input_values;
values_found += bs->page.num_input_values;
break;
case PageType::DATA_PAGE_V2:
index_out = num_dict_pages + data_page_count;
data_page_count++;
bs->page.flags |= PAGEINFO_FLAGS_V2;
values_found += bs->page.num_input_values;
// V2 only uses RLE, so it was removed from the header
bs->page.definition_level_encoding = Encoding::RLE;
bs->page.repetition_level_encoding = Encoding::RLE;
break;
case PageType::DICTIONARY_PAGE:
index_out = dictionary_page_count;
dictionary_page_count++;
bs->page.flags |= PAGEINFO_FLAGS_DICTIONARY;
break;
default: index_out = -1; break;
}
bs->page.page_data = const_cast<uint8_t*>(bs->cur);
bs->cur += bs->page.compressed_page_size;
bs->page.kernel_mask = kernel_mask_for_page(bs->page, bs->ck);
} else {
bs->cur = bs->end;
}
}
index_out = shuffle(index_out);
if (index_out >= 0 && index_out < max_num_pages && lane_id == 0)
page_info[index_out] = bs->page;
num_values = shuffle(num_values);
__syncwarp();
}
if (lane_id == 0) {
chunks[chunk].num_data_pages = data_page_count;
chunks[chunk].num_dict_pages = dictionary_page_count;
}
}
}
/**
* @brief Kernel for building dictionary index for the specified column chunks
*
* This function builds an index to point to each dictionary entry
* (string format is 4-byte little-endian string length followed by character
* data). The index is a 32-bit integer which contains the offset of each string
* relative to the beginning of the dictionary page data.
*
* @param[in] chunks List of column chunks
* @param[in] num_chunks Number of column chunks
*/
// blockDim {128,1,1}
__global__ void __launch_bounds__(128)
gpuBuildStringDictionaryIndex(ColumnChunkDesc* chunks, int32_t num_chunks)
{
__shared__ ColumnChunkDesc chunk_g[4];
int lane_id = threadIdx.x % 32;
int chunk = (blockIdx.x * 4) + (threadIdx.x / 32);
ColumnChunkDesc* const ck = &chunk_g[threadIdx.x / 32];
if (chunk < num_chunks and lane_id == 0) *ck = chunks[chunk];
__syncthreads();
if (chunk >= num_chunks) { return; }
if (!lane_id && ck->num_dict_pages > 0 && ck->str_dict_index) {
// Data type to describe a string
string_index_pair* dict_index = ck->str_dict_index;
uint8_t const* dict = ck->page_info[0].page_data;
int dict_size = ck->page_info[0].uncompressed_page_size;
int num_entries = ck->page_info[0].num_input_values;
int pos = 0, cur = 0;
for (int i = 0; i < num_entries; i++) {
int len = 0;
if (cur + 4 <= dict_size) {
len = dict[cur + 0] | (dict[cur + 1] << 8) | (dict[cur + 2] << 16) | (dict[cur + 3] << 24);
if (len >= 0 && cur + 4 + len <= dict_size) {
pos = cur;
cur = cur + 4 + len;
} else {
cur = dict_size;
}
}
// TODO: Could store 8 entries in shared mem, then do a single warp-wide store
dict_index[i].first = reinterpret_cast<char const*>(dict + pos + 4);
dict_index[i].second = len;
}
}
}
void __host__ DecodePageHeaders(ColumnChunkDesc* chunks,
int32_t num_chunks,
rmm::cuda_stream_view stream)
{
dim3 dim_block(128, 1);
dim3 dim_grid((num_chunks + 3) >> 2, 1); // 1 chunk per warp, 4 warps per block
hipLaunchKernelGGL(( gpuDecodePageHeaders), dim3(dim_grid), dim3(dim_block), 0, stream.value(), chunks, num_chunks);
}
void __host__ BuildStringDictionaryIndex(ColumnChunkDesc* chunks,
int32_t num_chunks,
rmm::cuda_stream_view stream)
{
dim3 dim_block(128, 1);
dim3 dim_grid((num_chunks + 3) >> 2, 1); // 1 chunk per warp, 4 warps per block
hipLaunchKernelGGL(( gpuBuildStringDictionaryIndex), dim3(dim_grid), dim3(dim_block), 0, stream.value(), chunks, num_chunks);
}
} // namespace gpu
} // namespace parquet
} // namespace io
} // namespace cudf
| 5633fe73fd7862dbd11c88042a59112a2962f3a4.cu | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "parquet_gpu.hpp"
#include <io/utilities/block_utils.cuh>
#include <thrust/tuple.h>
#include <rmm/cuda_stream_view.hpp>
namespace cudf {
namespace io {
namespace parquet {
namespace gpu {
// Minimal thrift implementation for parsing page headers
// https://github.com/apache/thrift/blob/master/doc/specs/thrift-compact-protocol.md
static const __device__ __constant__ uint8_t g_list2struct[16] = {0,
1,
2,
ST_FLD_BYTE,
ST_FLD_DOUBLE,
5,
ST_FLD_I16,
7,
ST_FLD_I32,
9,
ST_FLD_I64,
ST_FLD_BINARY,
ST_FLD_STRUCT,
ST_FLD_MAP,
ST_FLD_SET,
ST_FLD_LIST};
struct byte_stream_s {
uint8_t const* cur;
uint8_t const* end;
uint8_t const* base;
// Parsed symbols
PageType page_type;
PageInfo page;
ColumnChunkDesc ck;
};
/**
* @brief Get current byte from the byte stream
*
* @param[in] bs Byte stream
*
* @return Current byte pointed to by the byte stream
*/
inline __device__ unsigned int getb(byte_stream_s* bs)
{
return (bs->cur < bs->end) ? *bs->cur++ : 0;
}
inline __device__ void skip_bytes(byte_stream_s* bs, size_t bytecnt)
{
bytecnt = min(bytecnt, (size_t)(bs->end - bs->cur));
bs->cur += bytecnt;
}
/**
* @brief Decode unsigned integer from a byte stream using VarInt encoding
*
* Concatenate least significant 7 bits of each byte to form a 32 bit
* integer. Most significant bit of each byte indicates if more bytes
* are to be used to form the number.
*
* @param[in] bs Byte stream
*
* @return Decoded 32 bit integer
*/
__device__ uint32_t get_u32(byte_stream_s* bs)
{
uint32_t v = 0, l = 0, c;
do {
c = getb(bs);
v |= (c & 0x7f) << l;
l += 7;
} while (c & 0x80);
return v;
}
/**
* @brief Decode signed integer from a byte stream using zigzag encoding
*
* The number n encountered in a byte stream translates to
* -1^(n%2) * ceil(n/2), with the exception of 0 which remains the same.
* i.e. 0, 1, 2, 3, 4, 5 etc convert to 0, -1, 1, -2, 2 respectively.
*
* @param[in] bs Byte stream
*
* @return Decoded 32 bit integer
*/
inline __device__ int32_t get_i32(byte_stream_s* bs)
{
uint32_t u = get_u32(bs);
return (int32_t)((u >> 1u) ^ -(int32_t)(u & 1));
}
__device__ void skip_struct_field(byte_stream_s* bs, int field_type)
{
int struct_depth = 0;
int rep_cnt = 0;
do {
if (rep_cnt != 0) {
rep_cnt--;
} else if (struct_depth != 0) {
unsigned int c;
do {
c = getb(bs);
if (!c) --struct_depth;
} while (!c && struct_depth);
if (!struct_depth) break;
field_type = c & 0xf;
if (!(c & 0xf0)) get_i32(bs);
}
switch (field_type) {
case ST_FLD_TRUE:
case ST_FLD_FALSE: break;
case ST_FLD_I16:
case ST_FLD_I32:
case ST_FLD_I64: get_u32(bs); break;
case ST_FLD_BYTE: skip_bytes(bs, 1); break;
case ST_FLD_DOUBLE: skip_bytes(bs, 8); break;
case ST_FLD_BINARY: skip_bytes(bs, get_u32(bs)); break;
case ST_FLD_LIST:
case ST_FLD_SET: { // NOTE: skipping a list of lists is not handled
auto const c = getb(bs);
int n = c >> 4;
if (n == 0xf) n = get_u32(bs);
field_type = g_list2struct[c & 0xf];
if (field_type == ST_FLD_STRUCT)
struct_depth += n;
else
rep_cnt = n;
} break;
case ST_FLD_STRUCT: struct_depth++; break;
}
} while (rep_cnt || struct_depth);
}
/**
* @brief Determine which decode kernel to run for the given page.
*
* @param page The page to decode
* @param chunk Column chunk the page belongs to
* @return `kernel_mask_bits` value for the given page
*/
__device__ uint32_t kernel_mask_for_page(gpu::PageInfo const& page,
gpu::ColumnChunkDesc const& chunk)
{
if (page.flags & PAGEINFO_FLAGS_DICTIONARY) { return 0; }
if (page.encoding == Encoding::DELTA_BINARY_PACKED) {
return KERNEL_MASK_DELTA_BINARY;
} else if (is_string_col(chunk)) {
return KERNEL_MASK_STRING;
}
// non-string, non-delta
return KERNEL_MASK_GENERAL;
}
/**
* @brief Functor to set value to 32 bit integer read from byte stream
*
* @return True if field type is not int32
*/
struct ParquetFieldInt32 {
int field;
int32_t& val;
__device__ ParquetFieldInt32(int f, int32_t& v) : field(f), val(v) {}
inline __device__ bool operator()(byte_stream_s* bs, int field_type)
{
val = get_i32(bs);
return (field_type != ST_FLD_I32);
}
};
/**
* @brief Functor to set value to enum read from byte stream
*
* @return True if field type is not int32
*/
template <typename Enum>
struct ParquetFieldEnum {
int field;
Enum& val;
__device__ ParquetFieldEnum(int f, Enum& v) : field(f), val(v) {}
inline __device__ bool operator()(byte_stream_s* bs, int field_type)
{
val = static_cast<Enum>(get_i32(bs));
return (field_type != ST_FLD_I32);
}
};
/**
* @brief Functor to run operator on byte stream
*
* @return True if field type is not struct type or if the calling operator
* fails
*/
template <typename Operator>
struct ParquetFieldStruct {
int field;
Operator op;
__device__ ParquetFieldStruct(int f) : field(f) {}
inline __device__ bool operator()(byte_stream_s* bs, int field_type)
{
return ((field_type != ST_FLD_STRUCT) || !op(bs));
}
};
/**
* @brief Functor to run an operator
*
* The purpose of this functor is to replace a switch case. If the field in
* the argument is equal to the field specified in any element of the tuple
* of operators then it is run with the byte stream and field type arguments.
*
* If the field does not match any of the functors then skip_struct_field is
* called over the byte stream.
*
* @return Return value of the selected operator or false if no operator
* matched the field value
*/
template <int index>
struct FunctionSwitchImpl {
template <typename... Operator>
static inline __device__ bool run(byte_stream_s* bs,
int field_type,
int const& field,
thrust::tuple<Operator...>& ops)
{
if (field == thrust::get<index>(ops).field) {
return thrust::get<index>(ops)(bs, field_type);
} else {
return FunctionSwitchImpl<index - 1>::run(bs, field_type, field, ops);
}
}
};
template <>
struct FunctionSwitchImpl<0> {
template <typename... Operator>
static inline __device__ bool run(byte_stream_s* bs,
int field_type,
int const& field,
thrust::tuple<Operator...>& ops)
{
if (field == thrust::get<0>(ops).field) {
return thrust::get<0>(ops)(bs, field_type);
} else {
skip_struct_field(bs, field_type);
return false;
}
}
};
/**
* @brief Function to parse page header based on the tuple of functors provided
*
* Bytes are read from the byte stream and the field delta and field type are
* matched up against user supplied reading functors. If they match then the
* corresponding values are written to references pointed to by the functors.
*
* @return Returns false if an unexpected field is encountered while reading
* byte stream. Otherwise true is returned.
*/
template <typename... Operator>
inline __device__ bool parse_header(thrust::tuple<Operator...>& op, byte_stream_s* bs)
{
constexpr int index = thrust::tuple_size<thrust::tuple<Operator...>>::value - 1;
int field = 0;
while (true) {
auto const current_byte = getb(bs);
if (!current_byte) break;
int const field_delta = current_byte >> 4;
int const field_type = current_byte & 0xf;
field = field_delta ? field + field_delta : get_i32(bs);
bool exit_function = FunctionSwitchImpl<index>::run(bs, field_type, field, op);
if (exit_function) { return false; }
}
return true;
}
struct gpuParseDataPageHeader {
__device__ bool operator()(byte_stream_s* bs)
{
auto op = thrust::make_tuple(ParquetFieldInt32(1, bs->page.num_input_values),
ParquetFieldEnum<Encoding>(2, bs->page.encoding),
ParquetFieldEnum<Encoding>(3, bs->page.definition_level_encoding),
ParquetFieldEnum<Encoding>(4, bs->page.repetition_level_encoding));
return parse_header(op, bs);
}
};
struct gpuParseDictionaryPageHeader {
__device__ bool operator()(byte_stream_s* bs)
{
auto op = thrust::make_tuple(ParquetFieldInt32(1, bs->page.num_input_values),
ParquetFieldEnum<Encoding>(2, bs->page.encoding));
return parse_header(op, bs);
}
};
struct gpuParseDataPageHeaderV2 {
__device__ bool operator()(byte_stream_s* bs)
{
auto op = thrust::make_tuple(ParquetFieldInt32(1, bs->page.num_input_values),
ParquetFieldInt32(2, bs->page.num_nulls),
ParquetFieldInt32(3, bs->page.num_rows),
ParquetFieldEnum<Encoding>(4, bs->page.encoding),
ParquetFieldInt32(5, bs->page.lvl_bytes[level_type::DEFINITION]),
ParquetFieldInt32(6, bs->page.lvl_bytes[level_type::REPETITION]));
return parse_header(op, bs);
}
};
struct gpuParsePageHeader {
__device__ bool operator()(byte_stream_s* bs)
{
auto op = thrust::make_tuple(ParquetFieldEnum<PageType>(1, bs->page_type),
ParquetFieldInt32(2, bs->page.uncompressed_page_size),
ParquetFieldInt32(3, bs->page.compressed_page_size),
ParquetFieldStruct<gpuParseDataPageHeader>(5),
ParquetFieldStruct<gpuParseDictionaryPageHeader>(7),
ParquetFieldStruct<gpuParseDataPageHeaderV2>(8));
return parse_header(op, bs);
}
};
/**
* @brief Kernel for outputting page headers from the specified column chunks
*
* @param[in] chunks List of column chunks
* @param[in] num_chunks Number of column chunks
*/
// blockDim {128,1,1}
__global__ void __launch_bounds__(128)
gpuDecodePageHeaders(ColumnChunkDesc* chunks, int32_t num_chunks)
{
gpuParsePageHeader parse_page_header;
__shared__ byte_stream_s bs_g[4];
int lane_id = threadIdx.x % 32;
int chunk = (blockIdx.x * 4) + (threadIdx.x / 32);
byte_stream_s* const bs = &bs_g[threadIdx.x / 32];
if (chunk < num_chunks and lane_id == 0) bs->ck = chunks[chunk];
__syncthreads();
if (chunk < num_chunks) {
size_t num_values, values_found;
uint32_t data_page_count = 0;
uint32_t dictionary_page_count = 0;
int32_t max_num_pages;
int32_t num_dict_pages = bs->ck.num_dict_pages;
PageInfo* page_info;
if (!lane_id) {
bs->base = bs->cur = bs->ck.compressed_data;
bs->end = bs->base + bs->ck.compressed_size;
bs->page.chunk_idx = chunk;
bs->page.src_col_schema = bs->ck.src_col_schema;
// this computation is only valid for flat schemas. for nested schemas,
// they will be recomputed in the preprocess step by examining repetition and
// definition levels
bs->page.chunk_row = 0;
bs->page.num_rows = 0;
bs->page.skipped_values = -1;
bs->page.skipped_leaf_values = 0;
bs->page.str_bytes = 0;
bs->page.kernel_mask = 0;
}
num_values = bs->ck.num_values;
page_info = bs->ck.page_info;
num_dict_pages = bs->ck.num_dict_pages;
max_num_pages = (page_info) ? bs->ck.max_num_pages : 0;
values_found = 0;
__syncwarp();
while (values_found < num_values && bs->cur < bs->end) {
int index_out = -1;
if (lane_id == 0) {
// this computation is only valid for flat schemas. for nested schemas,
// they will be recomputed in the preprocess step by examining repetition and
// definition levels
bs->page.chunk_row += bs->page.num_rows;
bs->page.num_rows = 0;
bs->page.flags = 0;
// zero out V2 info
bs->page.num_nulls = 0;
bs->page.lvl_bytes[level_type::DEFINITION] = 0;
bs->page.lvl_bytes[level_type::REPETITION] = 0;
if (parse_page_header(bs) && bs->page.compressed_page_size >= 0) {
switch (bs->page_type) {
case PageType::DATA_PAGE:
index_out = num_dict_pages + data_page_count;
data_page_count++;
// this computation is only valid for flat schemas. for nested schemas,
// they will be recomputed in the preprocess step by examining repetition and
// definition levels
bs->page.num_rows = bs->page.num_input_values;
values_found += bs->page.num_input_values;
break;
case PageType::DATA_PAGE_V2:
index_out = num_dict_pages + data_page_count;
data_page_count++;
bs->page.flags |= PAGEINFO_FLAGS_V2;
values_found += bs->page.num_input_values;
// V2 only uses RLE, so it was removed from the header
bs->page.definition_level_encoding = Encoding::RLE;
bs->page.repetition_level_encoding = Encoding::RLE;
break;
case PageType::DICTIONARY_PAGE:
index_out = dictionary_page_count;
dictionary_page_count++;
bs->page.flags |= PAGEINFO_FLAGS_DICTIONARY;
break;
default: index_out = -1; break;
}
bs->page.page_data = const_cast<uint8_t*>(bs->cur);
bs->cur += bs->page.compressed_page_size;
bs->page.kernel_mask = kernel_mask_for_page(bs->page, bs->ck);
} else {
bs->cur = bs->end;
}
}
index_out = shuffle(index_out);
if (index_out >= 0 && index_out < max_num_pages && lane_id == 0)
page_info[index_out] = bs->page;
num_values = shuffle(num_values);
__syncwarp();
}
if (lane_id == 0) {
chunks[chunk].num_data_pages = data_page_count;
chunks[chunk].num_dict_pages = dictionary_page_count;
}
}
}
/**
* @brief Kernel for building dictionary index for the specified column chunks
*
* This function builds an index to point to each dictionary entry
* (string format is 4-byte little-endian string length followed by character
* data). The index is a 32-bit integer which contains the offset of each string
* relative to the beginning of the dictionary page data.
*
* @param[in] chunks List of column chunks
* @param[in] num_chunks Number of column chunks
*/
// blockDim {128,1,1}
__global__ void __launch_bounds__(128)
gpuBuildStringDictionaryIndex(ColumnChunkDesc* chunks, int32_t num_chunks)
{
__shared__ ColumnChunkDesc chunk_g[4];
int lane_id = threadIdx.x % 32;
int chunk = (blockIdx.x * 4) + (threadIdx.x / 32);
ColumnChunkDesc* const ck = &chunk_g[threadIdx.x / 32];
if (chunk < num_chunks and lane_id == 0) *ck = chunks[chunk];
__syncthreads();
if (chunk >= num_chunks) { return; }
if (!lane_id && ck->num_dict_pages > 0 && ck->str_dict_index) {
// Data type to describe a string
string_index_pair* dict_index = ck->str_dict_index;
uint8_t const* dict = ck->page_info[0].page_data;
int dict_size = ck->page_info[0].uncompressed_page_size;
int num_entries = ck->page_info[0].num_input_values;
int pos = 0, cur = 0;
for (int i = 0; i < num_entries; i++) {
int len = 0;
if (cur + 4 <= dict_size) {
len = dict[cur + 0] | (dict[cur + 1] << 8) | (dict[cur + 2] << 16) | (dict[cur + 3] << 24);
if (len >= 0 && cur + 4 + len <= dict_size) {
pos = cur;
cur = cur + 4 + len;
} else {
cur = dict_size;
}
}
// TODO: Could store 8 entries in shared mem, then do a single warp-wide store
dict_index[i].first = reinterpret_cast<char const*>(dict + pos + 4);
dict_index[i].second = len;
}
}
}
void __host__ DecodePageHeaders(ColumnChunkDesc* chunks,
int32_t num_chunks,
rmm::cuda_stream_view stream)
{
dim3 dim_block(128, 1);
dim3 dim_grid((num_chunks + 3) >> 2, 1); // 1 chunk per warp, 4 warps per block
gpuDecodePageHeaders<<<dim_grid, dim_block, 0, stream.value()>>>(chunks, num_chunks);
}
void __host__ BuildStringDictionaryIndex(ColumnChunkDesc* chunks,
int32_t num_chunks,
rmm::cuda_stream_view stream)
{
dim3 dim_block(128, 1);
dim3 dim_grid((num_chunks + 3) >> 2, 1); // 1 chunk per warp, 4 warps per block
gpuBuildStringDictionaryIndex<<<dim_grid, dim_block, 0, stream.value()>>>(chunks, num_chunks);
}
} // namespace gpu
} // namespace parquet
} // namespace io
} // namespace cudf
|
ed538c6a15b46029260fcd8a223ea5c0dfbe8d50.hip | // !!! This is a file automatically generated by hipify!!!
#include <algorithm>
#include <stdio.h>
#include <omp.h>
#include <string>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#define SIZE_BLOCK 1024
__global__ void reduction(double* new_u, double* f, double* u, long N, double h){
__shared__ double temp[SIZE_BLOCK];
int index = (blockIdx.x)*blockDim.x+threadIdx.x;
if(index >= N && index < N*(N-1) && index % N > 0 && index % N < N - 1 ){
double left = u[index - 1];
double right = u[index + 1];
double bottom = u[index + N];
double top = u[index - N];
temp[threadIdx.x] = (left + right + top + bottom + h*h*f[index]) * 0.25;
}else temp[threadIdx.x] = 0;
__syncthreads();
if (threadIdx.x<32) {
temp[threadIdx.x] += temp[threadIdx.x + 32];
__syncwarp();
temp[threadIdx.x] += temp[threadIdx.x + 16];
__syncwarp();
temp[threadIdx.x] += temp[threadIdx.x + 8];
__syncwarp();
temp[threadIdx.x] += temp[threadIdx.x + 4];
__syncwarp();
temp[threadIdx.x] += temp[threadIdx.x + 2];
__syncwarp();
if (threadIdx.x == 0) new_u[blockIdx.x] = temp[0] + temp[1];
}
__syncthreads();
if (threadIdx.x<64) temp[threadIdx.x] += temp[threadIdx.x + 64];
__syncthreads();
if (threadIdx.x<128) temp[threadIdx.x] += temp[threadIdx.x + 128];
__syncthreads();
if (threadIdx.x<256) temp[threadIdx.x] += temp[threadIdx.x + 256];
__syncthreads();
if (threadIdx.x<512) temp[threadIdx.x] += temp[threadIdx.x + 512];
}
//CUDA Version Jacobi
__global__ void jacobi_2d(double* new_u, double* f, double* u, long N, double h)
{
int index = (blockIdx.x) * blockDim.x + threadIdx.x;
if(index >= N && index < N*(N-1) && index % N > 0 && index % N < N - 1){
double left = u[index - 1];
double right = u[index + 1];
double bottom = u[index + N];
double top = u[index - N];
new_u[index] = (left + right + top + bottom + h*h*f[index]) * 0.25;
}
}
void VecSum(double* ptr, const double* a, long N){
double sum = 0;
#pragma omp parallel for schedule(static) reduction(+:sum)
for (long i = 0; i < N*N; i++) sum += a[i];
*ptr = sum;
}
void normal_jacobi_2d(double* new_u, double* f, double* u, long N, double h){
for (long i = 1; i < N; i++){
for (long j = i*N; j < (i+1)*N-1; j++){
if(j % N > 0 && j % N < N - 1 ){
double left = u[j - 1];
double right = u[j + 1];
double top = u[j - N];
double bottom = u[j + N];
new_u[j] = (h*h*f[j] + left + right + top + bottom ) * 0.25;
}
}
}
}
void CheckError(const char *message){
hipError_t ERROR = hipGetLastError();
if(ERROR!=hipSuccess){
fprintf(stderr,"ERROR: %s: %s\n", message, hipGetErrorString(ERROR) );
exit(-1);
}
}
int main() {
long N = 100;
/*Initialization*/
double h = 1.0/(N+1.0), sum = 0.0, correct_sum = 0.0;
double *u,*new_u,*temp_u,*f;
hipHostMalloc((void**)&u, N * N * sizeof(double));
hipHostMalloc((void**)&new_u, N * N * sizeof(double));
hipHostMalloc((void**)&temp_u, N * N * sizeof(double));
hipHostMalloc((void**)&f, N * N * sizeof(double));
memset(u,0,N*N);
memset(new_u,0,N*N);
memset(temp_u,0,N*N);
#pragma omp parallel for schedule(static)
for (long i = 0; i < N*N; i++) f[i] = 1.0;
/*Normal Jacobi Computation*/
for (long k = 0; k < 80; k++){
normal_jacobi_2d( new_u, f, u, N, h);
for (long i = 1; i < N*N; i++)
u[i] = new_u[i];
}
VecSum(&correct_sum, new_u, N);
#pragma omp parallel for schedule(static)
for (long i = 0; i < N*N; i++) new_u[i] = 0.0;
#pragma omp parallel for schedule(static)
for (long i = 0; i < N*N; i++) u[i] = 0.0;
double tt = omp_get_wtime();
double *temp_u_c, *u_c, *f_c;
hipMalloc(&temp_u_c, N*N*sizeof(double));
hipMalloc(&u_c, N*N*sizeof(double));
hipMalloc(&f_c, N*N*sizeof(double));
hipMemcpyAsync(temp_u_c, temp_u, N*N*sizeof(double), hipMemcpyHostToDevice);
hipMemcpyAsync(u_c, u, N*N*sizeof(double), hipMemcpyHostToDevice);
hipDeviceSynchronize();
hipMemcpyAsync(f_c, f, N*N*sizeof(double), hipMemcpyHostToDevice);
hipDeviceSynchronize();
tt = omp_get_wtime();
/*CUDA Version Jacobi Computation*/
for (long k = 0; k < 80; k++){
hipLaunchKernelGGL(( jacobi_2d) , dim3(N), dim3(SIZE_BLOCK) , 0, 0, temp_u_c, f_c, u_c, N, h);
u_c = temp_u_c;
}
hipMemcpyAsync(temp_u, temp_u_c, N*N*sizeof(double), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
VecSum(&sum, temp_u, N);
printf("Absolute Error= %f\n", fabs(sum-correct_sum));
printf("Bandwidth = %f GB/s\n", 1*N*sizeof(double) / (omp_get_wtime()-tt)/1e9);
printf("correct_sum: %f\n", correct_sum);
printf("sum: %f\n", sum);
hipFree(temp_u_c);
hipFree(u_c);
hipFree(f_c);
hipHostFree(f);
hipHostFree(temp_u);
hipHostFree(new_u);
hipHostFree(u);
return 0;
}
| ed538c6a15b46029260fcd8a223ea5c0dfbe8d50.cu | #include <algorithm>
#include <stdio.h>
#include <omp.h>
#include <string>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#define SIZE_BLOCK 1024
__global__ void reduction(double* new_u, double* f, double* u, long N, double h){
__shared__ double temp[SIZE_BLOCK];
int index = (blockIdx.x)*blockDim.x+threadIdx.x;
if(index >= N && index < N*(N-1) && index % N > 0 && index % N < N - 1 ){
double left = u[index - 1];
double right = u[index + 1];
double bottom = u[index + N];
double top = u[index - N];
temp[threadIdx.x] = (left + right + top + bottom + h*h*f[index]) * 0.25;
}else temp[threadIdx.x] = 0;
__syncthreads();
if (threadIdx.x<32) {
temp[threadIdx.x] += temp[threadIdx.x + 32];
__syncwarp();
temp[threadIdx.x] += temp[threadIdx.x + 16];
__syncwarp();
temp[threadIdx.x] += temp[threadIdx.x + 8];
__syncwarp();
temp[threadIdx.x] += temp[threadIdx.x + 4];
__syncwarp();
temp[threadIdx.x] += temp[threadIdx.x + 2];
__syncwarp();
if (threadIdx.x == 0) new_u[blockIdx.x] = temp[0] + temp[1];
}
__syncthreads();
if (threadIdx.x<64) temp[threadIdx.x] += temp[threadIdx.x + 64];
__syncthreads();
if (threadIdx.x<128) temp[threadIdx.x] += temp[threadIdx.x + 128];
__syncthreads();
if (threadIdx.x<256) temp[threadIdx.x] += temp[threadIdx.x + 256];
__syncthreads();
if (threadIdx.x<512) temp[threadIdx.x] += temp[threadIdx.x + 512];
}
//CUDA Version Jacobi
__global__ void jacobi_2d(double* new_u, double* f, double* u, long N, double h)
{
int index = (blockIdx.x) * blockDim.x + threadIdx.x;
if(index >= N && index < N*(N-1) && index % N > 0 && index % N < N - 1){
double left = u[index - 1];
double right = u[index + 1];
double bottom = u[index + N];
double top = u[index - N];
new_u[index] = (left + right + top + bottom + h*h*f[index]) * 0.25;
}
}
void VecSum(double* ptr, const double* a, long N){
double sum = 0;
#pragma omp parallel for schedule(static) reduction(+:sum)
for (long i = 0; i < N*N; i++) sum += a[i];
*ptr = sum;
}
void normal_jacobi_2d(double* new_u, double* f, double* u, long N, double h){
for (long i = 1; i < N; i++){
for (long j = i*N; j < (i+1)*N-1; j++){
if(j % N > 0 && j % N < N - 1 ){
double left = u[j - 1];
double right = u[j + 1];
double top = u[j - N];
double bottom = u[j + N];
new_u[j] = (h*h*f[j] + left + right + top + bottom ) * 0.25;
}
}
}
}
void CheckError(const char *message){
cudaError_t ERROR = cudaGetLastError();
if(ERROR!=cudaSuccess){
fprintf(stderr,"ERROR: %s: %s\n", message, cudaGetErrorString(ERROR) );
exit(-1);
}
}
int main() {
long N = 100;
/*Initialization*/
double h = 1.0/(N+1.0), sum = 0.0, correct_sum = 0.0;
double *u,*new_u,*temp_u,*f;
cudaMallocHost((void**)&u, N * N * sizeof(double));
cudaMallocHost((void**)&new_u, N * N * sizeof(double));
cudaMallocHost((void**)&temp_u, N * N * sizeof(double));
cudaMallocHost((void**)&f, N * N * sizeof(double));
memset(u,0,N*N);
memset(new_u,0,N*N);
memset(temp_u,0,N*N);
#pragma omp parallel for schedule(static)
for (long i = 0; i < N*N; i++) f[i] = 1.0;
/*Normal Jacobi Computation*/
for (long k = 0; k < 80; k++){
normal_jacobi_2d( new_u, f, u, N, h);
for (long i = 1; i < N*N; i++)
u[i] = new_u[i];
}
VecSum(&correct_sum, new_u, N);
#pragma omp parallel for schedule(static)
for (long i = 0; i < N*N; i++) new_u[i] = 0.0;
#pragma omp parallel for schedule(static)
for (long i = 0; i < N*N; i++) u[i] = 0.0;
double tt = omp_get_wtime();
double *temp_u_c, *u_c, *f_c;
cudaMalloc(&temp_u_c, N*N*sizeof(double));
cudaMalloc(&u_c, N*N*sizeof(double));
cudaMalloc(&f_c, N*N*sizeof(double));
cudaMemcpyAsync(temp_u_c, temp_u, N*N*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpyAsync(u_c, u, N*N*sizeof(double), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
cudaMemcpyAsync(f_c, f, N*N*sizeof(double), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
tt = omp_get_wtime();
/*CUDA Version Jacobi Computation*/
for (long k = 0; k < 80; k++){
jacobi_2d <<< N, SIZE_BLOCK >>> (temp_u_c, f_c, u_c, N, h);
u_c = temp_u_c;
}
cudaMemcpyAsync(temp_u, temp_u_c, N*N*sizeof(double), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
VecSum(&sum, temp_u, N);
printf("Absolute Error= %f\n", fabs(sum-correct_sum));
printf("Bandwidth = %f GB/s\n", 1*N*sizeof(double) / (omp_get_wtime()-tt)/1e9);
printf("correct_sum: %f\n", correct_sum);
printf("sum: %f\n", sum);
cudaFree(temp_u_c);
cudaFree(u_c);
cudaFree(f_c);
cudaFreeHost(f);
cudaFreeHost(temp_u);
cudaFreeHost(new_u);
cudaFreeHost(u);
return 0;
}
|
1baf870898ea1534dea5a84b06e2aba32551cbfa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <unistd.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <fcntl.h>
#include <stdlib.h>
#include <iostream>
#include "lodepng.h"
#include "lodepng.cu"
using namespace std;
__global__
void PictureKernell(unsigned char* d_Pin, unsigned char* d_Pout, int n, int m){
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int BLUR_SIZE = 25, new_pos;
if((y < n) && (x < m)) {
int pixValR=0, pixValB=0,pixValG=0, pixels = 0;
int blurRow, blurCol;
for(blurRow = -BLUR_SIZE; blurRow < BLUR_SIZE+1;++blurRow){
for(blurCol = -BLUR_SIZE; blurCol < BLUR_SIZE+1;++blurCol){
int curRow = y + blurRow;
int curCol = x + blurCol;
new_pos = (curRow*m+curCol)*4;
if(curRow > -1 && curRow < n && curCol > -1 && curCol < m){
pixValR += d_Pin[new_pos];
pixValG += d_Pin[new_pos+1];
pixValB += d_Pin[new_pos+2];
pixels++;
}
}
new_pos = (y*m+x)*4;
d_Pout[new_pos] = (unsigned char)(pixValR/pixels);
d_Pout[new_pos+1] = (unsigned char)(pixValG/pixels);
d_Pout[new_pos+2] = (unsigned char)(pixValB/pixels);
d_Pout[new_pos+3] = d_Pin[new_pos+3];
}
}
}
void Picture(unsigned char* Pin, unsigned char* Pout, int n, int m){
unsigned char* d_Pout, *d_Pin;
long int size = n*m*4;
hipMalloc((void **) &d_Pin,size);
hipMemcpy(d_Pin, Pin, size, hipMemcpyHostToDevice);
hipMalloc((void **) &d_Pout,size);
dim3 gridDim((m-1)/8+1,(n-1)/16+1,1);
dim3 blockDim(8,16,1);
hipLaunchKernelGGL(( PictureKernell), dim3(gridDim),dim3(blockDim), 0, 0, d_Pin,d_Pout,n,m);
hipMemcpy(Pout, d_Pout, size, hipMemcpyDeviceToHost);
hipFree(d_Pin); hipFree(d_Pout);
}
int main(int argc, char * argv[] ){
unsigned char *image, *out_image;
int i;
char name_in[100], name_out[100];
unsigned width, height;
i = lodepng_decode32_file(&image, &width, &height, name_in);
if(i < 0) printf("NO\n");
out_image = (unsigned char*) malloc(width*height*4);
/*for(i = 0; i < (width * height)*4; i++){
if(i%4==0) image[i] = 0;
if(i%4==1) image[i] = 255;
if(i%4==3) image[i] = 120;
}*/
Picture(image,out_image,height,width);
lodepng_encode32_file(name_out,out_image,width,height);
free(image);
free(out_image);
return 0;
}
| 1baf870898ea1534dea5a84b06e2aba32551cbfa.cu | #include <stdio.h>
#include <unistd.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <fcntl.h>
#include <stdlib.h>
#include <iostream>
#include "lodepng.h"
#include "lodepng.cu"
using namespace std;
__global__
void PictureKernell(unsigned char* d_Pin, unsigned char* d_Pout, int n, int m){
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int BLUR_SIZE = 25, new_pos;
if((y < n) && (x < m)) {
int pixValR=0, pixValB=0,pixValG=0, pixels = 0;
int blurRow, blurCol;
for(blurRow = -BLUR_SIZE; blurRow < BLUR_SIZE+1;++blurRow){
for(blurCol = -BLUR_SIZE; blurCol < BLUR_SIZE+1;++blurCol){
int curRow = y + blurRow;
int curCol = x + blurCol;
new_pos = (curRow*m+curCol)*4;
if(curRow > -1 && curRow < n && curCol > -1 && curCol < m){
pixValR += d_Pin[new_pos];
pixValG += d_Pin[new_pos+1];
pixValB += d_Pin[new_pos+2];
pixels++;
}
}
new_pos = (y*m+x)*4;
d_Pout[new_pos] = (unsigned char)(pixValR/pixels);
d_Pout[new_pos+1] = (unsigned char)(pixValG/pixels);
d_Pout[new_pos+2] = (unsigned char)(pixValB/pixels);
d_Pout[new_pos+3] = d_Pin[new_pos+3];
}
}
}
void Picture(unsigned char* Pin, unsigned char* Pout, int n, int m){
unsigned char* d_Pout, *d_Pin;
long int size = n*m*4;
cudaMalloc((void **) &d_Pin,size);
cudaMemcpy(d_Pin, Pin, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_Pout,size);
dim3 gridDim((m-1)/8+1,(n-1)/16+1,1);
dim3 blockDim(8,16,1);
PictureKernell<<<gridDim,blockDim>>>(d_Pin,d_Pout,n,m);
cudaMemcpy(Pout, d_Pout, size, cudaMemcpyDeviceToHost);
cudaFree(d_Pin); cudaFree(d_Pout);
}
int main(int argc, char * argv[] ){
unsigned char *image, *out_image;
int i;
char name_in[100], name_out[100];
unsigned width, height;
i = lodepng_decode32_file(&image, &width, &height, name_in);
if(i < 0) printf("NO\n");
out_image = (unsigned char*) malloc(width*height*4);
/*for(i = 0; i < (width * height)*4; i++){
if(i%4==0) image[i] = 0;
if(i%4==1) image[i] = 255;
if(i%4==3) image[i] = 120;
}*/
Picture(image,out_image,height,width);
lodepng_encode32_file(name_out,out_image,width,height);
free(image);
free(out_image);
return 0;
}
|
6362a81a07493ba2711140e1aff8f3aeb7eb4eed.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hip/hip_runtime.h>
#define ENABLE_FP8
#include "bertFp8Plugin.h"
#include "src/fastertransformer/utils/cuda_fp8_utils.h"
#include "src/fastertransformer/utils/cuda_utils.h"
#include "src/fastertransformer/utils/logger.h"
#include <fstream>
using namespace nvinfer1;
namespace ft = fastertransformer;
namespace fastertransformer {
REGISTER_TENSORRT_PLUGIN(BertFp8PluginCreator);
BertFp8Plugin::BertFp8Plugin(BertFp8Config cfg, std::string weightDirPath):
mBertFp8Config(cfg), mWeightDirPath(weightDirPath)
{
}
// assume this is called ONCE PER GPU, via a deserializeCudaEngine() call from the client
// since BertFp8Weights::deserialize() copies weights to the current GPU's device memory
BertFp8Plugin::BertFp8Plugin(const void* data, size_t length)
{
const uint8_t* tmp = (const uint8_t*)data;
mBertFp8Config = BertFp8Config(tmp);
const auto cfg = mBertFp8Config;
mBertWeights.reset(new ft::BertFP8Weight<fp8_t, bf16_t>(cfg.hidden_units,
cfg.num_heads,
cfg.size_per_head,
cfg.intermediate_size,
cfg.num_layers,
cfg.vocab_size,
cfg.max_position_embeddings,
cfg.token_type_vocab_size,
1,
1,
cfg.fp8_mode,
true,
true));
mBertWeights->deserialize(tmp); // copies weights to current GPU's device mem
}
int BertFp8Plugin::initialize() noexcept
{
if (mInitialized) {
return 0;
}
const auto cfg = mBertFp8Config;
// load weights directly from weightDirPath
// this should only be performed during engine build phase and then weights are serialized
// at runtime, deserialize() is called which will load a copy of these weights
if (mBertWeights == nullptr) {
mBertWeights.reset(new ft::BertFP8Weight<fp8_t, bf16_t>(cfg.hidden_units,
cfg.num_heads,
cfg.size_per_head,
cfg.intermediate_size,
cfg.num_layers,
cfg.vocab_size,
cfg.max_position_embeddings,
cfg.token_type_vocab_size,
1,
1,
cfg.fp8_mode,
true,
true));
mBertWeights->loadModel(mWeightDirPath);
mBertWeights->transposeWeight();
}
mAllocator.reset(new ft::Allocator<ft::AllocatorType::CUDA>(ft::getDevice()));
mCublasCtx.reset(new CublasCtx(mAllocator.get()));
ft::AttentionType attention_type =
ft::getAttentionType<fp8_t>(cfg.size_per_head, ft::getSMVersion(), cfg.remove_padding, cfg.max_seq_len);
mBertModel.reset(new ft::BertFP8<fp8_t, bf16_t>(cfg.num_heads,
cfg.size_per_head,
cfg.hidden_units,
cfg.intermediate_size,
cfg.num_layers,
mTensorPara,
mPipelinePara,
ft::getSMVersion(),
1.0f,
mCublasCtx->mStream,
mCublasCtx->mCublasWrapper.get(),
mAllocator.get(),
false,
attention_type,
false,
ft::ActivationType::Gelu,
ft::LayerNormType::post_layernorm,
cfg.fp8_mode));
mInitialized = true;
return 0;
}
const char* BertFp8Plugin::getPluginType() const noexcept
{
return "BertFp8Plugin";
}
const char* BertFp8Plugin::getPluginVersion() const noexcept
{
return "1";
}
void BertFp8Plugin::setPluginNamespace(const char* libNamespace) noexcept
{
mNamespace = libNamespace;
}
const char* BertFp8Plugin::getPluginNamespace() const noexcept
{
return mNamespace.c_str();
}
void BertFp8Plugin::destroy() noexcept
{
delete this;
}
IPluginV2DynamicExt* BertFp8Plugin::clone() const noexcept
{
BertFp8Plugin* ret = new BertFp8Plugin(mBertFp8Config, mWeightDirPath);
ret->mBertWeights = mBertWeights;
ret->initialize();
return ret;
}
void BertFp8Plugin::attachToContext(cudnnContext*, cublasContext*, IGpuAllocator*) noexcept
{
// this->initialize();
}
int BertFp8Plugin::getNbOutputs() const noexcept
{
return 1;
}
DimsExprs BertFp8Plugin::getOutputDimensions(int outputIndex,
const DimsExprs* inputs,
int nbInputs,
IExprBuilder& exprBuilder) noexcept
{
FT_CHECK(outputIndex >= 0 && outputIndex < this->getNbOutputs());
FT_CHECK(nbInputs == 3);
DimsExprs output(inputs[0]);
output.nbDims = 3;
output.d[0] = inputs[0].d[0];
output.d[1] = exprBuilder.constant(mBertFp8Config.max_seq_len);
output.d[2] = exprBuilder.constant(mBertFp8Config.hidden_units);
return output;
}
bool BertFp8Plugin::supportsFormatCombination(int pos,
const PluginTensorDesc* inOut,
int nbInputs,
int nbOutputs) noexcept
{
if (inOut[pos].format != TensorFormat::kLINEAR) {
return false;
}
if (nbInputs != 3 || nbOutputs != 1) {
printf("Wrong input or output count %d %d\n", nbInputs, nbOutputs);
return false;
}
// inputs
if (pos == 0 && inOut[pos].type != nvinfer1::DataType::kINT32) {
return false;
}
if (pos == 1 && inOut[pos].type != nvinfer1::DataType::kINT32) {
return false;
}
if (pos == 2 && inOut[pos].type != nvinfer1::DataType::kINT32) {
return false;
}
// outputs
if (pos == 3 && inOut[pos].type != nvinfer1::DataType::kHALF) {
return false;
}
return true;
}
void BertFp8Plugin::configurePlugin(const DynamicPluginTensorDesc* in,
int nbInputs,
const DynamicPluginTensorDesc* out,
int nbOutputs) noexcept
{
}
void BertFp8Plugin::terminate() noexcept {}
size_t BertFp8Plugin::getWorkspaceSize(const PluginTensorDesc* inputs,
int nbInputs,
const PluginTensorDesc* outputs,
int nbOutputs) const noexcept
{
return 0;
}
int BertFp8Plugin::enqueue(const PluginTensorDesc* inputDesc,
const PluginTensorDesc* outputDesc,
const void* const* inputs,
void* const* outputs,
void* workspace,
hipStream_t stream) noexcept
{
int32_t batchSize = inputDesc[0].dims.d[0];
int32_t maxSeqLenInBatch = inputDesc[0].dims.d[1];
const int32_t* inputIds = static_cast<const int32_t*>(inputs[0]);
const int32_t* tokenTypeIds = static_cast<const int32_t*>(inputs[1]);
const int32_t* sequenceLengths = static_cast<const int32_t*>(inputs[2]);
size_t batchSize_s = batchSize;
size_t maxSeqLen_s = maxSeqLenInBatch;
size_t hiddenUnits_s = mBertFp8Config.hidden_units;
auto inputTensors = ft::TensorMap(std::unordered_map<std::string, ft::Tensor>{
{"input_ids",
ft::Tensor{ft::MEMORY_GPU, ft::TYPE_INT32, std::vector<size_t>{batchSize_s, maxSeqLen_s}, inputIds}},
{"sequence_lengths",
ft::Tensor{ft::MEMORY_GPU, ft::TYPE_INT32, std::vector<size_t>{batchSize_s}, sequenceLengths}},
{"token_type_ids",
ft::Tensor{ft::MEMORY_GPU, ft::TYPE_INT32, std::vector<size_t>{batchSize_s, maxSeqLen_s}, tokenTypeIds}}});
auto outputTensors = ft::TensorMap(std::unordered_map<std::string, ft::Tensor>{
{"output_hidden_state",
ft::Tensor{
ft::MEMORY_GPU, ft::getTensorType<half>(), {batchSize_s, maxSeqLen_s, hiddenUnits_s}, outputs[0]}}});
FT_CHECK(hipEventRecord(mSyncEvent.get(), stream) == hipSuccess);
FT_CHECK(hipStreamWaitEvent(mCublasCtx->mStream, mSyncEvent.get(), 0) == hipSuccess);
mBertModel->forward(&outputTensors, &inputTensors, mBertWeights.get());
FT_CHECK(hipEventRecord(mSyncEvent.get(), mCublasCtx->mStream) == hipSuccess);
FT_CHECK(hipStreamWaitEvent(stream, mSyncEvent.get(), 0) == hipSuccess);
return 0;
}
size_t BertFp8Plugin::getSerializationSize() const noexcept
{
auto sz = mBertWeights->getSerializationSize();
sz += mBertFp8Config.getSerializationSize();
return sz;
}
void BertFp8Plugin::serialize(void* buffer) const noexcept
{
uint8_t* tmp = (uint8_t*)buffer;
mBertFp8Config.serialize(tmp);
mBertWeights->serialize(tmp);
}
nvinfer1::DataType
BertFp8Plugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const noexcept
{
return nvinfer1::DataType::kHALF;
}
const char* BertFp8PluginCreator::getPluginName() const noexcept
{
return "BertFp8Plugin";
}
const char* BertFp8PluginCreator::getPluginVersion() const noexcept
{
return "1";
}
const PluginFieldCollection* BertFp8PluginCreator::getFieldNames() noexcept
{
return nullptr;
}
void BertFp8PluginCreator::setPluginNamespace(const char* libNamespace) noexcept
{
mNamespace = libNamespace;
}
const char* BertFp8PluginCreator::getPluginNamespace() const noexcept
{
return mNamespace.c_str();
}
IPluginV2DynamicExt* BertFp8PluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc) noexcept
{
int32_t num_heads;
int32_t size_per_head;
int32_t num_layers;
int32_t max_seq_len;
int32_t vocab_size;
int32_t max_position_embeddings;
int32_t token_type_vocab_size;
int32_t remove_padding;
int32_t fp8_mode;
std::map<std::string, int32_t*> name2pint = {{"num_heads", &num_heads},
{"size_per_head", &size_per_head},
{"num_layers", &num_layers},
{"max_seq_len", &max_seq_len},
{"vocab_size", &vocab_size},
{"max_position_embeddings", &max_position_embeddings},
{"token_type_vocab_size", &token_type_vocab_size},
{"remove_padding", &remove_padding},
{"fp8_mode", &fp8_mode}};
size_t found = 0;
std::for_each(fc->fields, fc->fields + fc->nbFields, [&name2pint, &found](const auto& f) {
auto iter = name2pint.find(f.name);
if (iter != name2pint.end()) {
*(iter->second) = *(int32_t*)f.data;
found++;
}
});
std::string weightDirPath;
std::map<std::string, std::string*> name2pstr = {{"weightDirPath", &weightDirPath}};
std::for_each(fc->fields, fc->fields + fc->nbFields, [&name2pstr, &found](const auto& f) {
auto iter = name2pstr.find(f.name);
if (iter != name2pstr.end()) {
*(iter->second) = std::string((const char*)f.data, f.length);
found++;
}
});
FT_CHECK(found == name2pint.size() + name2pstr.size());
BertFp8Config cfg{num_heads,
size_per_head,
num_layers,
max_seq_len,
vocab_size,
max_position_embeddings,
token_type_vocab_size,
remove_padding,
fp8_mode};
return new BertFp8Plugin(cfg, weightDirPath);
}
IPluginV2DynamicExt*
BertFp8PluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength) noexcept
{
return new BertFp8Plugin(serialData, serialLength);
}
} // namespace fastertransformer
| 6362a81a07493ba2711140e1aff8f3aeb7eb4eed.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuda.h>
#define ENABLE_FP8
#include "bertFp8Plugin.h"
#include "src/fastertransformer/utils/cuda_fp8_utils.h"
#include "src/fastertransformer/utils/cuda_utils.h"
#include "src/fastertransformer/utils/logger.h"
#include <fstream>
using namespace nvinfer1;
namespace ft = fastertransformer;
namespace fastertransformer {
REGISTER_TENSORRT_PLUGIN(BertFp8PluginCreator);
BertFp8Plugin::BertFp8Plugin(BertFp8Config cfg, std::string weightDirPath):
mBertFp8Config(cfg), mWeightDirPath(weightDirPath)
{
}
// assume this is called ONCE PER GPU, via a deserializeCudaEngine() call from the client
// since BertFp8Weights::deserialize() copies weights to the current GPU's device memory
BertFp8Plugin::BertFp8Plugin(const void* data, size_t length)
{
const uint8_t* tmp = (const uint8_t*)data;
mBertFp8Config = BertFp8Config(tmp);
const auto cfg = mBertFp8Config;
mBertWeights.reset(new ft::BertFP8Weight<fp8_t, bf16_t>(cfg.hidden_units,
cfg.num_heads,
cfg.size_per_head,
cfg.intermediate_size,
cfg.num_layers,
cfg.vocab_size,
cfg.max_position_embeddings,
cfg.token_type_vocab_size,
1,
1,
cfg.fp8_mode,
true,
true));
mBertWeights->deserialize(tmp); // copies weights to current GPU's device mem
}
int BertFp8Plugin::initialize() noexcept
{
if (mInitialized) {
return 0;
}
const auto cfg = mBertFp8Config;
// load weights directly from weightDirPath
// this should only be performed during engine build phase and then weights are serialized
// at runtime, deserialize() is called which will load a copy of these weights
if (mBertWeights == nullptr) {
mBertWeights.reset(new ft::BertFP8Weight<fp8_t, bf16_t>(cfg.hidden_units,
cfg.num_heads,
cfg.size_per_head,
cfg.intermediate_size,
cfg.num_layers,
cfg.vocab_size,
cfg.max_position_embeddings,
cfg.token_type_vocab_size,
1,
1,
cfg.fp8_mode,
true,
true));
mBertWeights->loadModel(mWeightDirPath);
mBertWeights->transposeWeight();
}
mAllocator.reset(new ft::Allocator<ft::AllocatorType::CUDA>(ft::getDevice()));
mCublasCtx.reset(new CublasCtx(mAllocator.get()));
ft::AttentionType attention_type =
ft::getAttentionType<fp8_t>(cfg.size_per_head, ft::getSMVersion(), cfg.remove_padding, cfg.max_seq_len);
mBertModel.reset(new ft::BertFP8<fp8_t, bf16_t>(cfg.num_heads,
cfg.size_per_head,
cfg.hidden_units,
cfg.intermediate_size,
cfg.num_layers,
mTensorPara,
mPipelinePara,
ft::getSMVersion(),
1.0f,
mCublasCtx->mStream,
mCublasCtx->mCublasWrapper.get(),
mAllocator.get(),
false,
attention_type,
false,
ft::ActivationType::Gelu,
ft::LayerNormType::post_layernorm,
cfg.fp8_mode));
mInitialized = true;
return 0;
}
const char* BertFp8Plugin::getPluginType() const noexcept
{
return "BertFp8Plugin";
}
const char* BertFp8Plugin::getPluginVersion() const noexcept
{
return "1";
}
void BertFp8Plugin::setPluginNamespace(const char* libNamespace) noexcept
{
mNamespace = libNamespace;
}
const char* BertFp8Plugin::getPluginNamespace() const noexcept
{
return mNamespace.c_str();
}
void BertFp8Plugin::destroy() noexcept
{
delete this;
}
IPluginV2DynamicExt* BertFp8Plugin::clone() const noexcept
{
BertFp8Plugin* ret = new BertFp8Plugin(mBertFp8Config, mWeightDirPath);
ret->mBertWeights = mBertWeights;
ret->initialize();
return ret;
}
void BertFp8Plugin::attachToContext(cudnnContext*, cublasContext*, IGpuAllocator*) noexcept
{
// this->initialize();
}
int BertFp8Plugin::getNbOutputs() const noexcept
{
return 1;
}
DimsExprs BertFp8Plugin::getOutputDimensions(int outputIndex,
const DimsExprs* inputs,
int nbInputs,
IExprBuilder& exprBuilder) noexcept
{
FT_CHECK(outputIndex >= 0 && outputIndex < this->getNbOutputs());
FT_CHECK(nbInputs == 3);
DimsExprs output(inputs[0]);
output.nbDims = 3;
output.d[0] = inputs[0].d[0];
output.d[1] = exprBuilder.constant(mBertFp8Config.max_seq_len);
output.d[2] = exprBuilder.constant(mBertFp8Config.hidden_units);
return output;
}
bool BertFp8Plugin::supportsFormatCombination(int pos,
const PluginTensorDesc* inOut,
int nbInputs,
int nbOutputs) noexcept
{
if (inOut[pos].format != TensorFormat::kLINEAR) {
return false;
}
if (nbInputs != 3 || nbOutputs != 1) {
printf("Wrong input or output count %d %d\n", nbInputs, nbOutputs);
return false;
}
// inputs
if (pos == 0 && inOut[pos].type != nvinfer1::DataType::kINT32) {
return false;
}
if (pos == 1 && inOut[pos].type != nvinfer1::DataType::kINT32) {
return false;
}
if (pos == 2 && inOut[pos].type != nvinfer1::DataType::kINT32) {
return false;
}
// outputs
if (pos == 3 && inOut[pos].type != nvinfer1::DataType::kHALF) {
return false;
}
return true;
}
void BertFp8Plugin::configurePlugin(const DynamicPluginTensorDesc* in,
int nbInputs,
const DynamicPluginTensorDesc* out,
int nbOutputs) noexcept
{
}
void BertFp8Plugin::terminate() noexcept {}
size_t BertFp8Plugin::getWorkspaceSize(const PluginTensorDesc* inputs,
int nbInputs,
const PluginTensorDesc* outputs,
int nbOutputs) const noexcept
{
return 0;
}
int BertFp8Plugin::enqueue(const PluginTensorDesc* inputDesc,
const PluginTensorDesc* outputDesc,
const void* const* inputs,
void* const* outputs,
void* workspace,
cudaStream_t stream) noexcept
{
int32_t batchSize = inputDesc[0].dims.d[0];
int32_t maxSeqLenInBatch = inputDesc[0].dims.d[1];
const int32_t* inputIds = static_cast<const int32_t*>(inputs[0]);
const int32_t* tokenTypeIds = static_cast<const int32_t*>(inputs[1]);
const int32_t* sequenceLengths = static_cast<const int32_t*>(inputs[2]);
size_t batchSize_s = batchSize;
size_t maxSeqLen_s = maxSeqLenInBatch;
size_t hiddenUnits_s = mBertFp8Config.hidden_units;
auto inputTensors = ft::TensorMap(std::unordered_map<std::string, ft::Tensor>{
{"input_ids",
ft::Tensor{ft::MEMORY_GPU, ft::TYPE_INT32, std::vector<size_t>{batchSize_s, maxSeqLen_s}, inputIds}},
{"sequence_lengths",
ft::Tensor{ft::MEMORY_GPU, ft::TYPE_INT32, std::vector<size_t>{batchSize_s}, sequenceLengths}},
{"token_type_ids",
ft::Tensor{ft::MEMORY_GPU, ft::TYPE_INT32, std::vector<size_t>{batchSize_s, maxSeqLen_s}, tokenTypeIds}}});
auto outputTensors = ft::TensorMap(std::unordered_map<std::string, ft::Tensor>{
{"output_hidden_state",
ft::Tensor{
ft::MEMORY_GPU, ft::getTensorType<half>(), {batchSize_s, maxSeqLen_s, hiddenUnits_s}, outputs[0]}}});
FT_CHECK(cudaEventRecord(mSyncEvent.get(), stream) == cudaSuccess);
FT_CHECK(cudaStreamWaitEvent(mCublasCtx->mStream, mSyncEvent.get(), 0) == cudaSuccess);
mBertModel->forward(&outputTensors, &inputTensors, mBertWeights.get());
FT_CHECK(cudaEventRecord(mSyncEvent.get(), mCublasCtx->mStream) == cudaSuccess);
FT_CHECK(cudaStreamWaitEvent(stream, mSyncEvent.get(), 0) == cudaSuccess);
return 0;
}
size_t BertFp8Plugin::getSerializationSize() const noexcept
{
auto sz = mBertWeights->getSerializationSize();
sz += mBertFp8Config.getSerializationSize();
return sz;
}
void BertFp8Plugin::serialize(void* buffer) const noexcept
{
uint8_t* tmp = (uint8_t*)buffer;
mBertFp8Config.serialize(tmp);
mBertWeights->serialize(tmp);
}
nvinfer1::DataType
BertFp8Plugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const noexcept
{
return nvinfer1::DataType::kHALF;
}
const char* BertFp8PluginCreator::getPluginName() const noexcept
{
return "BertFp8Plugin";
}
const char* BertFp8PluginCreator::getPluginVersion() const noexcept
{
return "1";
}
const PluginFieldCollection* BertFp8PluginCreator::getFieldNames() noexcept
{
return nullptr;
}
void BertFp8PluginCreator::setPluginNamespace(const char* libNamespace) noexcept
{
mNamespace = libNamespace;
}
const char* BertFp8PluginCreator::getPluginNamespace() const noexcept
{
return mNamespace.c_str();
}
IPluginV2DynamicExt* BertFp8PluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc) noexcept
{
int32_t num_heads;
int32_t size_per_head;
int32_t num_layers;
int32_t max_seq_len;
int32_t vocab_size;
int32_t max_position_embeddings;
int32_t token_type_vocab_size;
int32_t remove_padding;
int32_t fp8_mode;
std::map<std::string, int32_t*> name2pint = {{"num_heads", &num_heads},
{"size_per_head", &size_per_head},
{"num_layers", &num_layers},
{"max_seq_len", &max_seq_len},
{"vocab_size", &vocab_size},
{"max_position_embeddings", &max_position_embeddings},
{"token_type_vocab_size", &token_type_vocab_size},
{"remove_padding", &remove_padding},
{"fp8_mode", &fp8_mode}};
size_t found = 0;
std::for_each(fc->fields, fc->fields + fc->nbFields, [&name2pint, &found](const auto& f) {
auto iter = name2pint.find(f.name);
if (iter != name2pint.end()) {
*(iter->second) = *(int32_t*)f.data;
found++;
}
});
std::string weightDirPath;
std::map<std::string, std::string*> name2pstr = {{"weightDirPath", &weightDirPath}};
std::for_each(fc->fields, fc->fields + fc->nbFields, [&name2pstr, &found](const auto& f) {
auto iter = name2pstr.find(f.name);
if (iter != name2pstr.end()) {
*(iter->second) = std::string((const char*)f.data, f.length);
found++;
}
});
FT_CHECK(found == name2pint.size() + name2pstr.size());
BertFp8Config cfg{num_heads,
size_per_head,
num_layers,
max_seq_len,
vocab_size,
max_position_embeddings,
token_type_vocab_size,
remove_padding,
fp8_mode};
return new BertFp8Plugin(cfg, weightDirPath);
}
IPluginV2DynamicExt*
BertFp8PluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength) noexcept
{
return new BertFp8Plugin(serialData, serialLength);
}
} // namespace fastertransformer
|
cc2f080b657e393820c3f4b42f9bf6869724456d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
const int M = 32;
__global__ void bucket_sort(int *key, int *bucket, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n) return;
atomicAdd(bucket + key[i], 1); // Fill bucket by key
__syncthreads(); // Wait for all threads to finish filling bucket
for (int j = 0, k = 0; j <= i; k++) { // Output sorted data to key
key[i] = k;
j += bucket[k];
}
}
int main() {
int n = 50;
int range = 5;
int* key;
hipMallocManaged(&key, n * sizeof(int));
for (int i = 0; i < n; ++i) {
key[i] = rand() % range;
printf("%d ", key[i]);
}
printf("\n");
int* bucket;
hipMallocManaged(&bucket, range * sizeof(int));
hipLaunchKernelGGL(( bucket_sort), dim3((n + M - 1) / M), dim3(M), range, 0, key, bucket, n);
hipDeviceSynchronize();
for (int i = 0; i < n; i++) {
printf("%d ", key[i]);
}
printf("\n");
}
| cc2f080b657e393820c3f4b42f9bf6869724456d.cu | #include <cstdio>
const int M = 32;
__global__ void bucket_sort(int *key, int *bucket, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n) return;
atomicAdd(bucket + key[i], 1); // Fill bucket by key
__syncthreads(); // Wait for all threads to finish filling bucket
for (int j = 0, k = 0; j <= i; k++) { // Output sorted data to key
key[i] = k;
j += bucket[k];
}
}
int main() {
int n = 50;
int range = 5;
int* key;
cudaMallocManaged(&key, n * sizeof(int));
for (int i = 0; i < n; ++i) {
key[i] = rand() % range;
printf("%d ", key[i]);
}
printf("\n");
int* bucket;
cudaMallocManaged(&bucket, range * sizeof(int));
bucket_sort<<<(n + M - 1) / M, M, range>>>(key, bucket, n);
cudaDeviceSynchronize();
for (int i = 0; i < n; i++) {
printf("%d ", key[i]);
}
printf("\n");
}
|
311c6da1e065e8697c609d046447132932abca64.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void MergeRank(float * d_input, float * d_output)
{
int indexA = blockIdx.x * blockDim.x + threadIdx.x;
int indexB = indexA + 2048;
float temp1 = d_input[indexA];
float temp2 = d_input[indexB];
int indexAB = 2048;
while (d_input[indexAB] < temp1) {
indexAB++;
}
int indexBA = 0;
while (d_input[indexBA] < temp2) {
indexBA++;
}
__syncthreads();
d_output[indexA + indexAB + 1] = temp1;
d_output[indexB + indexBA + 1] = temp2;
} | 311c6da1e065e8697c609d046447132932abca64.cu | #include "includes.h"
__global__ void MergeRank(float * d_input, float * d_output)
{
int indexA = blockIdx.x * blockDim.x + threadIdx.x;
int indexB = indexA + 2048;
float temp1 = d_input[indexA];
float temp2 = d_input[indexB];
int indexAB = 2048;
while (d_input[indexAB] < temp1) {
indexAB++;
}
int indexBA = 0;
while (d_input[indexBA] < temp2) {
indexBA++;
}
__syncthreads();
d_output[indexA + indexAB + 1] = temp1;
d_output[indexB + indexBA + 1] = temp2;
} |
e5068996fff22c1aebf800ce4ff1a1ad16594b71.hip | // !!! This is a file automatically generated by hipify!!!
#include "transformcuda.cuh"
int transform(cuwst *w,short sense){
// wrapper to "master" version taking a stream argument
return(transform(w,sense,NULL));
}
int transform(cuwst *w,short sense, hipStream_t stream){
int ret;
if(sense != w->transformed ? BWD : FWD){
printf("\nSense doesn't make sense. We can't FWD/BWD something that is/not transformed!\n");
return(1);
}
if(w->ttype == DWT){
switch(w->filt){
case HAAR:
ret = HaarCUDAMLv2(w->x_h,w->x_d,w->len,sense,w->levels,stream);
break;
case DAUB4:
ret = Daub4CUDA_sh_ml2_streams(w->x_h,w->x_d,w->len,sense,w->levels,stream);
//ret = Daub4(w->x,w->len,sense,w->levels);
break;
case HAARNOHOST:
ret = HaarCUDAMLv3(w->x_d,w->len,sense,w->levels,stream);
break;
case C6F:
ret = C6CUDA_sh_ml2_streams(w->x_h,w->x_d,w->len,sense,w->levels,stream);
break;
case LA8F:
ret = LA8CUDA_sh_ml2_streams(w->x_h,w->x_d,w->len,sense,w->levels,stream);
break;
default:
printf("\nUnrecognised filter\n");
return(1);
break;
}
}
if(w->ttype == MODWT_PO){
switch(w->filt){
case HAAR:
ret = HaarCUDAMODWTv4(w->x_h,w->xmod_h,w->x_d,w->xmod_d,w->len,sense,w->levels);
// this PO version uses streams itself to run the different packets
// however, it is far slower than the TO version!
break;
case HAARNOHOST:
ret = HaarCUDAMODWT(w->x_d, w->xmod_d,w->len,sense,w->levels);
// this is a version without host memory
// it doesn't use streams. It's just slow!
break;
default:
printf("\nUnrecognised filter\n");
return(1);
break;
}
}
if(w->ttype == MODWT_TO){
switch(w->filt){
case HAAR:
ret = HaarCUDAMODWTv6(w->x_h,w->xmod_h,w->x_d,w->xmod_d,w->len,sense,w->levels,stream);
break;
case HAARNOHOST:
ret = HaarCUDAMODWTv6d(w->x_d,w->xmod_d,w->len,sense,w->levels,stream);
// this is a version just using device memory
break;
default:
printf("\nUnrecognised filter\n");
return(1);
break;
}
}
// we switch the 'transformed' boolean
w->transformed = !(w->transformed);
return(ret);
} | e5068996fff22c1aebf800ce4ff1a1ad16594b71.cu | #include "transformcuda.cuh"
int transform(cuwst *w,short sense){
// wrapper to "master" version taking a stream argument
return(transform(w,sense,NULL));
}
int transform(cuwst *w,short sense, cudaStream_t stream){
int ret;
if(sense != w->transformed ? BWD : FWD){
printf("\nSense doesn't make sense. We can't FWD/BWD something that is/not transformed!\n");
return(1);
}
if(w->ttype == DWT){
switch(w->filt){
case HAAR:
ret = HaarCUDAMLv2(w->x_h,w->x_d,w->len,sense,w->levels,stream);
break;
case DAUB4:
ret = Daub4CUDA_sh_ml2_streams(w->x_h,w->x_d,w->len,sense,w->levels,stream);
//ret = Daub4(w->x,w->len,sense,w->levels);
break;
case HAARNOHOST:
ret = HaarCUDAMLv3(w->x_d,w->len,sense,w->levels,stream);
break;
case C6F:
ret = C6CUDA_sh_ml2_streams(w->x_h,w->x_d,w->len,sense,w->levels,stream);
break;
case LA8F:
ret = LA8CUDA_sh_ml2_streams(w->x_h,w->x_d,w->len,sense,w->levels,stream);
break;
default:
printf("\nUnrecognised filter\n");
return(1);
break;
}
}
if(w->ttype == MODWT_PO){
switch(w->filt){
case HAAR:
ret = HaarCUDAMODWTv4(w->x_h,w->xmod_h,w->x_d,w->xmod_d,w->len,sense,w->levels);
// this PO version uses streams itself to run the different packets
// however, it is far slower than the TO version!
break;
case HAARNOHOST:
ret = HaarCUDAMODWT(w->x_d, w->xmod_d,w->len,sense,w->levels);
// this is a version without host memory
// it doesn't use streams. It's just slow!
break;
default:
printf("\nUnrecognised filter\n");
return(1);
break;
}
}
if(w->ttype == MODWT_TO){
switch(w->filt){
case HAAR:
ret = HaarCUDAMODWTv6(w->x_h,w->xmod_h,w->x_d,w->xmod_d,w->len,sense,w->levels,stream);
break;
case HAARNOHOST:
ret = HaarCUDAMODWTv6d(w->x_d,w->xmod_d,w->len,sense,w->levels,stream);
// this is a version just using device memory
break;
default:
printf("\nUnrecognised filter\n");
return(1);
break;
}
}
// we switch the 'transformed' boolean
w->transformed = !(w->transformed);
return(ret);
} |
6da436ce7d2fc5088fe5d5b45afb80e5b21d8bd5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
//#include "input.h"
#include "knnCuda.h"
//#include "check.h"
#include "hipError_t.h"
#include "datasetFunctions.h"
// attributi
#define A 30
// labels
#define LABELS 10
// numtreads
#define NT 32
//#define BLOCK_SIZE 32
int main(int argc, char *argv[])
{
// argomenti:
// train file name
// test file name
// N: numero di sample di train
// M: numero di sample di test
// k: numero di vicini
// BLOCK_SIZE: numro di blocchi per grid
if(argc != 7){
printf(
"Errore non sono stati specificati correttamente i parametri:\n"
"1 - Train fileName\n"
"2 - Test tileName\n"
"3 - Numero sample di train\n"
"4 - Numero sample di test\n"
"5 - K: numero di vicini\n"
"6 - BLOCK_SIZE: numero di blocchi per grid");
exit(EXIT_FAILURE);
}
const char * trainFile = argv[1];
const char * testFile = argv[2];
int N = atoi(argv[3]);
int M = atoi(argv[4]);
int K = atoi(argv[5]);
int BLOCK_SIZE = atoi(argv[6]);
if (K > N){
printf("Errore il numero di vicini non pu essere superiore al numero di sample!\n");
exit(EXIT_FAILURE);
}
if (K % 2 == 0){
printf("Inserire un numero di vicini dispari!\n");
exit(EXIT_FAILURE);
}
// device
int deviceIndex = 0;
// ottengo il numero di schede presenti
int count;
HANDLE_ERROR( hipGetDeviceCount( &count ) );
// controllo l'esistenza della scheda disponbile
if(deviceIndex < count)
{
HANDLE_ERROR(hipSetDevice(deviceIndex));
}
else
{
printf("Device non disponbile!\n");
exit(EXIT_FAILURE);
}
// propriet della scheda video
hipDeviceProp_t prop;
HANDLE_ERROR(hipGetDeviceProperties(&prop, deviceIndex));
// misuro il tempo d'esecuzione
hipEvent_t start, stop, stopRead, stopSendData, primoStep, secondoStep;
HANDLE_ERROR( hipEventCreate( &start ) );
HANDLE_ERROR( hipEventCreate( &stop ) );
HANDLE_ERROR( hipEventCreate( &stopRead ) );
HANDLE_ERROR( hipEventCreate( &stopSendData ) );
HANDLE_ERROR( hipEventCreate( &primoStep ) );
HANDLE_ERROR( hipEventCreate( &secondoStep ) );
HANDLE_ERROR( hipEventRecord( start, 0 ) );
// alloco le matrici del dataset e degli altri vettori utili
float * trainingData= (float *) malloc(N * A * sizeof(float));
float * testingData= (float *) malloc(M * A * sizeof(float));
int * classesTraining = (int*) malloc(N *sizeof(int));
int * classesTesting = (int*) malloc(M *sizeof(int));
float * dist = (float *) malloc(M * N * sizeof(float));
// controllo che le variabili siano state allocate correttamente
if(trainingData == NULL || testingData == NULL || classesTesting == NULL || classesTraining == NULL){
printf("Not enough memory!\n");
exit(EXIT_FAILURE);
}
// leggo il dataset dal file
readFile(trainFile, N, A, trainingData, classesTraining);
readFile(testFile, M, A, testingData, classesTesting);
HANDLE_ERROR( hipEventRecord( stopRead, 0 ) );
HANDLE_ERROR( hipEventSynchronize( stopRead ) );
// puntattori ai dati sul device
float* dev_train;
float* dev_test;
float* dev_dist;
int* dev_label;
// alloco la memoria per il dataset sulla gpu in memoria globale
HANDLE_ERROR( hipMalloc((void**)&dev_train, N * A * sizeof(float)));
HANDLE_ERROR( hipMalloc((void**)&dev_test, M * A * sizeof(float)));
// allocco la matrice delle distanze e delle label
HANDLE_ERROR( hipMalloc((void**)&dev_dist, N * M * sizeof(float)));
// copio il cotenuto del dataset sulle variabili presenti sul device
HANDLE_ERROR( hipMemcpy(dev_train, trainingData, N * A * sizeof(float), hipMemcpyHostToDevice));
HANDLE_ERROR( hipMemcpy(dev_test, testingData, M * A * sizeof(float), hipMemcpyHostToDevice));
HANDLE_ERROR( hipEventRecord(stopSendData, 0));
HANDLE_ERROR( hipEventSynchronize(stopSendData));
// registre il tempo di lettura
float elapsedTimeRead;
HANDLE_ERROR( hipEventElapsedTime(&elapsedTimeRead, start, stopSendData ));
// creo i blocchi da BLOCK_SIZE * BLOCK_SIZE thread
dim3 block(BLOCK_SIZE, BLOCK_SIZE, 1);
// calcolo il numero di righe e colonne in base a BLOCK_SIZE
int dim_row = (M + 1 % BLOCK_SIZE == 0) ? M / BLOCK_SIZE : M / BLOCK_SIZE + 1;
int dim_col = (N + 1 % BLOCK_SIZE == 0) ? N / BLOCK_SIZE : N / BLOCK_SIZE + 1;
// creo la griglia di threads
dim3 grid(dim_col, dim_row, 1);
// calcola distanza euclidea tra i punti train e test attraverso la funzione kernel
hipLaunchKernelGGL(( euclideanDistance_kernel), dim3(grid), dim3(block), 0, 0, N, M, A, dev_train, dev_test, dev_dist);
// alloco le variabili che mi serviranno per calcolare al matrice di confusione
int * label = (int*) malloc(M * K *sizeof(int));
int* countsLabel = (int*) malloc(sizeof(int)* LABELS);
int* confusionMatrix = (int*) malloc(sizeof(int)* LABELS * LABELS);
if(confusionMatrix ==NULL || countsLabel == NULL || label == NULL){
printf("Not enough memory!\n");
exit(EXIT_FAILURE);
}
// inizializza a zero la matrice di confusione
for(int i = 0; i < LABELS * LABELS; i++){
confusionMatrix[i] = 0;
}
// barriera per assicurarsi che tutte le distanze siano state calcolate
hipDeviceSynchronize();
HANDLE_ERROR( hipEventRecord( primoStep, 0 ) );
HANDLE_ERROR( hipEventSynchronize( primoStep ) );
// calcolo il tempo d'esecuzione
HANDLE_ERROR( hipEventElapsedTime( &elapsedTimeRead, start, primoStep ) );
// elimino il dataset su device, perch non verr pi usato
HANDLE_ERROR( hipFree(dev_train) );
HANDLE_ERROR( hipFree(dev_test) );
// inizializzo l'array delle label sul device
HANDLE_ERROR( hipMalloc( (void**)&dev_label, M * K * sizeof(int) ) );
// creo
dim3 blockSort(BLOCK_SIZE, 1, 1);
dim3 gridSort(dim_row, 1, 1);
hipLaunchKernelGGL(( sort_kernel), dim3(gridSort), dim3(blockSort), 0, 0, N, M, K, dev_dist, dev_label);
// barriera per assicurarsi che tutti i threads abbiamo concluso l'operazione
hipDeviceSynchronize();
// copio l'array delle label dal device alla memoria principale
HANDLE_ERROR(hipMemcpy(label , dev_label, M * K * sizeof(int), hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipEventRecord( secondoStep, 0 ) );
HANDLE_ERROR( hipEventSynchronize( secondoStep ) );
HANDLE_ERROR( hipEventElapsedTime( &elapsedTimeRead, start, secondoStep ) );
// numero di errori di classificazione commessi dall'algoritmo KNN
int error = 0;
// calcolo la matrice di confusione, lasciato alla CPU
// ciclo sui samples di test
for (int i = 0; i < M; i++){
// inizializzo l'array utilizzato per contare l'occorrenza delle labels
for(int l = 0; l < LABELS; l++){
countsLabel[l] = 0;
}
int bestLabel = 0;
// ciclo sui k sample di train vicini al sample di test i-esimo
for(int j = 0; j < K; j++){
// indice e classe del sample di train j-esimo
int indice = label[i*K + j];
int classe = classesTraining[indice];
// incremento il contatore di questa classe
countsLabel[classe] = countsLabel[classe] + 1;
// aggiorno la classe migliore se il numero di occorrenze maggiore
if(countsLabel[classe] > countsLabel[bestLabel])
bestLabel = classe;
}
// controllo che la label calcolata corrisponda a quella vera
int realLabel = classesTesting[i];
if (realLabel != bestLabel){
error = error + 1;
}
// aggiorno la matrice di confusione
confusionMatrix[realLabel * LABELS + bestLabel] = confusionMatrix[realLabel * LABELS + bestLabel] + 1;
}
// libero memoria utilizzata
free(trainingData); trainingData = NULL;
free(testingData); testingData = NULL;
free(dist); dist=NULL;
free(classesTraining); classesTraining = NULL;
free(classesTesting); classesTesting = NULL;
free(confusionMatrix); confusionMatrix=NULL;
free(label); label=NULL;
free(countsLabel); countsLabel= NULL;
// libero memoria sul device
HANDLE_ERROR( hipFree(dev_label ) );
HANDLE_ERROR( hipFree(dev_dist ) );
// calcol il tempo totale d'esecuzione
HANDLE_ERROR( hipEventRecord( stop, 0 ) );
HANDLE_ERROR( hipEventSynchronize( stop ) );
float elapsedTime;
HANDLE_ERROR( hipEventElapsedTime( &elapsedTime, start, stop ) );
HANDLE_ERROR( hipEventDestroy( start ) );
HANDLE_ERROR( hipEventDestroy( stop ) );
// salvo risultati su file
saveResultsOnFile(K, N, M, A, elapsedTime/1000,BLOCK_SIZE);
return 0;
} | 6da436ce7d2fc5088fe5d5b45afb80e5b21d8bd5.cu | #include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
//#include "input.h"
#include "knnCuda.h"
//#include "check.h"
#include "cudaError.h"
#include "datasetFunctions.h"
// attributi
#define A 30
// labels
#define LABELS 10
// numtreads
#define NT 32
//#define BLOCK_SIZE 32
int main(int argc, char *argv[])
{
// argomenti:
// train file name
// test file name
// N: numero di sample di train
// M: numero di sample di test
// k: numero di vicini
// BLOCK_SIZE: numro di blocchi per grid
if(argc != 7){
printf(
"Errore non sono stati specificati correttamente i parametri:\n"
"1 - Train fileName\n"
"2 - Test tileName\n"
"3 - Numero sample di train\n"
"4 - Numero sample di test\n"
"5 - K: numero di vicini\n"
"6 - BLOCK_SIZE: numero di blocchi per grid");
exit(EXIT_FAILURE);
}
const char * trainFile = argv[1];
const char * testFile = argv[2];
int N = atoi(argv[3]);
int M = atoi(argv[4]);
int K = atoi(argv[5]);
int BLOCK_SIZE = atoi(argv[6]);
if (K > N){
printf("Errore il numero di vicini non può essere superiore al numero di sample!\n");
exit(EXIT_FAILURE);
}
if (K % 2 == 0){
printf("Inserire un numero di vicini dispari!\n");
exit(EXIT_FAILURE);
}
// device
int deviceIndex = 0;
// ottengo il numero di schede presenti
int count;
HANDLE_ERROR( cudaGetDeviceCount( &count ) );
// controllo l'esistenza della scheda disponbile
if(deviceIndex < count)
{
HANDLE_ERROR(cudaSetDevice(deviceIndex));
}
else
{
printf("Device non disponbile!\n");
exit(EXIT_FAILURE);
}
// proprietà della scheda video
cudaDeviceProp prop;
HANDLE_ERROR(cudaGetDeviceProperties(&prop, deviceIndex));
// misuro il tempo d'esecuzione
cudaEvent_t start, stop, stopRead, stopSendData, primoStep, secondoStep;
HANDLE_ERROR( cudaEventCreate( &start ) );
HANDLE_ERROR( cudaEventCreate( &stop ) );
HANDLE_ERROR( cudaEventCreate( &stopRead ) );
HANDLE_ERROR( cudaEventCreate( &stopSendData ) );
HANDLE_ERROR( cudaEventCreate( &primoStep ) );
HANDLE_ERROR( cudaEventCreate( &secondoStep ) );
HANDLE_ERROR( cudaEventRecord( start, 0 ) );
// alloco le matrici del dataset e degli altri vettori utili
float * trainingData= (float *) malloc(N * A * sizeof(float));
float * testingData= (float *) malloc(M * A * sizeof(float));
int * classesTraining = (int*) malloc(N *sizeof(int));
int * classesTesting = (int*) malloc(M *sizeof(int));
float * dist = (float *) malloc(M * N * sizeof(float));
// controllo che le variabili siano state allocate correttamente
if(trainingData == NULL || testingData == NULL || classesTesting == NULL || classesTraining == NULL){
printf("Not enough memory!\n");
exit(EXIT_FAILURE);
}
// leggo il dataset dal file
readFile(trainFile, N, A, trainingData, classesTraining);
readFile(testFile, M, A, testingData, classesTesting);
HANDLE_ERROR( cudaEventRecord( stopRead, 0 ) );
HANDLE_ERROR( cudaEventSynchronize( stopRead ) );
// puntattori ai dati sul device
float* dev_train;
float* dev_test;
float* dev_dist;
int* dev_label;
// alloco la memoria per il dataset sulla gpu in memoria globale
HANDLE_ERROR( cudaMalloc((void**)&dev_train, N * A * sizeof(float)));
HANDLE_ERROR( cudaMalloc((void**)&dev_test, M * A * sizeof(float)));
// allocco la matrice delle distanze e delle label
HANDLE_ERROR( cudaMalloc((void**)&dev_dist, N * M * sizeof(float)));
// copio il cotenuto del dataset sulle variabili presenti sul device
HANDLE_ERROR( cudaMemcpy(dev_train, trainingData, N * A * sizeof(float), cudaMemcpyHostToDevice));
HANDLE_ERROR( cudaMemcpy(dev_test, testingData, M * A * sizeof(float), cudaMemcpyHostToDevice));
HANDLE_ERROR( cudaEventRecord(stopSendData, 0));
HANDLE_ERROR( cudaEventSynchronize(stopSendData));
// registre il tempo di lettura
float elapsedTimeRead;
HANDLE_ERROR( cudaEventElapsedTime(&elapsedTimeRead, start, stopSendData ));
// creo i blocchi da BLOCK_SIZE * BLOCK_SIZE thread
dim3 block(BLOCK_SIZE, BLOCK_SIZE, 1);
// calcolo il numero di righe e colonne in base a BLOCK_SIZE
int dim_row = (M + 1 % BLOCK_SIZE == 0) ? M / BLOCK_SIZE : M / BLOCK_SIZE + 1;
int dim_col = (N + 1 % BLOCK_SIZE == 0) ? N / BLOCK_SIZE : N / BLOCK_SIZE + 1;
// creo la griglia di threads
dim3 grid(dim_col, dim_row, 1);
// calcola distanza euclidea tra i punti train e test attraverso la funzione kernel
euclideanDistance_kernel<<<grid, block>>>(N, M, A, dev_train, dev_test, dev_dist);
// alloco le variabili che mi serviranno per calcolare al matrice di confusione
int * label = (int*) malloc(M * K *sizeof(int));
int* countsLabel = (int*) malloc(sizeof(int)* LABELS);
int* confusionMatrix = (int*) malloc(sizeof(int)* LABELS * LABELS);
if(confusionMatrix ==NULL || countsLabel == NULL || label == NULL){
printf("Not enough memory!\n");
exit(EXIT_FAILURE);
}
// inizializza a zero la matrice di confusione
for(int i = 0; i < LABELS * LABELS; i++){
confusionMatrix[i] = 0;
}
// barriera per assicurarsi che tutte le distanze siano state calcolate
cudaDeviceSynchronize();
HANDLE_ERROR( cudaEventRecord( primoStep, 0 ) );
HANDLE_ERROR( cudaEventSynchronize( primoStep ) );
// calcolo il tempo d'esecuzione
HANDLE_ERROR( cudaEventElapsedTime( &elapsedTimeRead, start, primoStep ) );
// elimino il dataset su device, perchè non verrà più usato
HANDLE_ERROR( cudaFree(dev_train) );
HANDLE_ERROR( cudaFree(dev_test) );
// inizializzo l'array delle label sul device
HANDLE_ERROR( cudaMalloc( (void**)&dev_label, M * K * sizeof(int) ) );
// creo
dim3 blockSort(BLOCK_SIZE, 1, 1);
dim3 gridSort(dim_row, 1, 1);
sort_kernel<<<gridSort, blockSort>>>(N, M, K, dev_dist, dev_label);
// barriera per assicurarsi che tutti i threads abbiamo concluso l'operazione
cudaDeviceSynchronize();
// copio l'array delle label dal device alla memoria principale
HANDLE_ERROR(cudaMemcpy(label , dev_label, M * K * sizeof(int), cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaEventRecord( secondoStep, 0 ) );
HANDLE_ERROR( cudaEventSynchronize( secondoStep ) );
HANDLE_ERROR( cudaEventElapsedTime( &elapsedTimeRead, start, secondoStep ) );
// numero di errori di classificazione commessi dall'algoritmo KNN
int error = 0;
// calcolo la matrice di confusione, lasciato alla CPU
// ciclo sui samples di test
for (int i = 0; i < M; i++){
// inizializzo l'array utilizzato per contare l'occorrenza delle labels
for(int l = 0; l < LABELS; l++){
countsLabel[l] = 0;
}
int bestLabel = 0;
// ciclo sui k sample di train vicini al sample di test i-esimo
for(int j = 0; j < K; j++){
// indice e classe del sample di train j-esimo
int indice = label[i*K + j];
int classe = classesTraining[indice];
// incremento il contatore di questa classe
countsLabel[classe] = countsLabel[classe] + 1;
// aggiorno la classe migliore se il numero di occorrenze è maggiore
if(countsLabel[classe] > countsLabel[bestLabel])
bestLabel = classe;
}
// controllo che la label calcolata corrisponda a quella vera
int realLabel = classesTesting[i];
if (realLabel != bestLabel){
error = error + 1;
}
// aggiorno la matrice di confusione
confusionMatrix[realLabel * LABELS + bestLabel] = confusionMatrix[realLabel * LABELS + bestLabel] + 1;
}
// libero memoria utilizzata
free(trainingData); trainingData = NULL;
free(testingData); testingData = NULL;
free(dist); dist=NULL;
free(classesTraining); classesTraining = NULL;
free(classesTesting); classesTesting = NULL;
free(confusionMatrix); confusionMatrix=NULL;
free(label); label=NULL;
free(countsLabel); countsLabel= NULL;
// libero memoria sul device
HANDLE_ERROR( cudaFree(dev_label ) );
HANDLE_ERROR( cudaFree(dev_dist ) );
// calcol il tempo totale d'esecuzione
HANDLE_ERROR( cudaEventRecord( stop, 0 ) );
HANDLE_ERROR( cudaEventSynchronize( stop ) );
float elapsedTime;
HANDLE_ERROR( cudaEventElapsedTime( &elapsedTime, start, stop ) );
HANDLE_ERROR( cudaEventDestroy( start ) );
HANDLE_ERROR( cudaEventDestroy( stop ) );
// salvo risultati su file
saveResultsOnFile(K, N, M, A, elapsedTime/1000,BLOCK_SIZE);
return 0;
} |
046ebbcb95aba16c08efa2fc968d4388800f93ab.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/NumericLimits.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/TensorUtils.h>
#include <ATen/NumericUtils.h>
#include <c10/util/accumulate.h>
#include <THH/THHGeneral.h>
#include <THH/THHNumerics.cuh>
#include <hipcub/hipcub.hpp>
namespace at { namespace native {
template <typename integer>
constexpr inline integer ceil_div(integer n, integer m) {
return (n + m - 1) / m;
}
template<typename scalar_t, typename idx_t, typename BinaryOperation>
__device__ void binary_op_update(const scalar_t lhs, scalar_t& rhs, const idx_t lhs_idx, idx_t& rhs_idx, BinaryOperation binary_op) {
if(!THCNumerics<scalar_t>::isnan(rhs) && (THCNumerics<scalar_t>::isnan(lhs) || !binary_op(rhs, lhs))) {
rhs = lhs;
rhs_idx = lhs_idx;
}
}
/* Perform an inclusive scan along the innermost dimension of a tensor.
*
* - num_rows is the size of the flattened outer dimensions;
* - row_size is the size of the innermost dimension;
*
* The outer dimensions of the tensor are considered as a single dimension, i.e. the tensor is
* considered as having 'num_rows' rows of size 'row_size'.
* Each thread block processes one or more sets of contiguous rows (processing multiple rows
* per thread block is quicker than processing a single row, especially for short rows).
*/
template<typename scalar_t, int num_threads_x, int num_threads_y, class BinaryFunction>
__global__ void tensor_kernel_scan_innermost_dim_with_indices(const scalar_t *self_, scalar_t *values_, int64_t *indices_,
int num_rows, int row_size,
scalar_t init, BinaryFunction binary_op) {
__shared__ scalar_t vbuf[num_threads_y][2 * num_threads_x];
__shared__ int64_t ibuf[num_threads_y][2 * num_threads_x];
scalar_t* row_buf = vbuf[threadIdx.y];
int64_t* row_idx_buf = ibuf[threadIdx.y];
for (int block_row = blockIdx.x * blockDim.y;
block_row < num_rows;
block_row += blockDim.y * gridDim.x) {
int row = block_row + threadIdx.y;
const scalar_t *row_self = self_ + row * row_size;
scalar_t *row_values = values_ + row * row_size;
int64_t *row_indices = indices_ + row * row_size;
scalar_t block_total = init;
int64_t block_idx_final = 0;
// Perform scan on one block at a time, keeping track of the total value of
// all blocks processed so far.
for (int block_col = 0; block_col < row_size; block_col += 2 * num_threads_x) {
// Load data into shared memory (two values per thread).
int col1 = block_col + threadIdx.x;
int col2 = block_col + num_threads_x + threadIdx.x;
if (row < num_rows) {
if (col1 < row_size) {
row_buf[threadIdx.x] = row_self[col1];
row_idx_buf[threadIdx.x] = col1;
} else {
row_buf[threadIdx.x] = init;
// No need to set the index here as the value in init will never be selected
}
if (col2 < row_size) {
row_buf[num_threads_x + threadIdx.x] = row_self[col2];
row_idx_buf[num_threads_x + threadIdx.x] = col2;
} else {
row_buf[num_threads_x + threadIdx.x] = init;
// No need to set the index here as the value in init will never be selected
}
// Add the total value of all previous blocks to the first value of this block.
if (threadIdx.x == 0) {
binary_op_update(block_total, row_buf[0], block_idx_final, row_idx_buf[0], binary_op);
}
}
__syncthreads();
// Parallel reduction (up-sweep).
for (int s = num_threads_x, d = 1; s >= 1; s >>= 1, d <<= 1) {
if (row < num_rows && threadIdx.x < s) {
int offset = (2 * threadIdx.x + 1) * d - 1;
binary_op_update(row_buf[offset], row_buf[offset + d], row_idx_buf[offset], row_idx_buf[offset + d], binary_op);
}
__syncthreads();
}
// Down-sweep.
for (int s = 2, d = num_threads_x / 2; d >= 1; s <<= 1, d >>= 1) {
if (row < num_rows && threadIdx.x < s - 1) {
int offset = 2 * (threadIdx.x + 1) * d - 1;
binary_op_update(row_buf[offset], row_buf[offset + d], row_idx_buf[offset], row_idx_buf[offset + d], binary_op);
}
__syncthreads();
}
// Write back to output.
if (row < num_rows) {
if (col1 < row_size){
row_values[col1] = row_buf[threadIdx.x];
row_indices[col1] = row_idx_buf[threadIdx.x];
}
if (col2 < row_size) {
row_values[col2] = row_buf[num_threads_x + threadIdx.x];
row_indices[col2] = row_idx_buf[num_threads_x + threadIdx.x];
}
}
block_total = row_buf[2 * num_threads_x - 1];
block_idx_final = row_idx_buf[2 * num_threads_x - 1];
__syncthreads();
}
}
}
/* Perform an inclusive scan along an outer dimension of a tensor.
*
* - num_orows is the size of the flattened outer dimensions;
* - num_irows is the size of the flattened inner dimensions;
* - row_size is the size of the dimension along which to compute the variance;
*
* The dimensions to the outside and inside of the specified dimension are considered as flattened.
* Thread blocks with the same blockIdx.y process an "outer row" (i.e. an element of the flattened
* outer dimensions, which contains several "inner rows").
* Each thread processes a single inner row at a time.
*/
template<typename scalar_t, class BinaryFunction>
__global__ void tensor_kernel_scan_outer_dim_with_indices(scalar_t *self_, scalar_t *values_, int64_t *indices_,
const uint32_t num_orows, const uint32_t num_irows, const uint32_t row_size, scalar_t init, BinaryFunction binary_op) {
for (uint32_t orow = blockIdx.x; orow < num_orows; orow += gridDim.x) {
for (uint32_t irow = blockIdx.y * blockDim.x + threadIdx.x; irow < num_irows; irow += gridDim.y * blockDim.x) {
scalar_t *self = self_ + orow * row_size * num_irows + irow;
scalar_t *values = values_ + orow * row_size * num_irows + irow;
int64_t *indices = indices_ + orow * row_size * num_irows + irow;
scalar_t out = init;
int64_t out_idx = 0;
for (auto col = decltype(row_size){0}; col < row_size; ++col) {
if(THCNumerics<scalar_t>::isnan(*self) || (!THCNumerics<scalar_t>::isnan(out) && binary_op(*self, out))) {
out = *self;
out_idx = col;
}
*values = out;
*indices = out_idx;
self += num_irows;
values += num_irows;
indices += num_irows;
}
}
}
}
void check_fits_in_unsigned(int64_t val, const char* name) {
constexpr auto umax = std::numeric_limits<uint32_t>::max();
TORCH_CHECK(
val >= 0 && val <= umax, name, " must fit in a 32-bit uint32_t value");
}
template<typename scalar_t, class BinaryFunction>
__host__ void scan_outer_dim_with_indices(const Tensor& self, Tensor& values, Tensor& indices,
int dim, scalar_t init, BinaryFunction binary_op) {
int64_t row_size = self.size(dim);
auto sizes = self.sizes();
// Treat all outer dimensions (i.e. dim_ < dim) as one.
const int64_t num_orows = c10::multiply_integers(sizes.begin(), sizes.begin() + dim);
// Treat all inner dimensions (i.e. dim > dimension) as one.
const int64_t num_irows = c10::multiply_integers(sizes.begin() + dim + 1, sizes.end());
//for performance reasons, cuda kernels use uint32_t for loops over irows, orows and row,
//make sure that input is not bigger than supported by uint32_t
check_fits_in_unsigned(num_irows, "num_irows");
check_fits_in_unsigned(num_orows, "num_orows");
check_fits_in_unsigned(row_size, "row_size");
dim3 threads(::min(512, int(num_irows)));
int64_t maxGridDim = at::cuda::getCurrentDeviceProperties()->maxGridSize[1];
dim3 grid(::min(maxGridDim, num_orows), ::min(maxGridDim, ceil_div(num_irows, int64_t{threads.x})));
hipLaunchKernelGGL(( tensor_kernel_scan_outer_dim_with_indices<scalar_t>), dim3(grid), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
self.data_ptr<scalar_t>(), values.data_ptr<scalar_t>(), indices.data_ptr<int64_t>(),
num_orows, num_irows, row_size, init, binary_op);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
template <typename scalar_t, class BinaryFunction>
__host__ void scan_innermost_dim_with_indices(const Tensor& self, Tensor& values, Tensor& indices, scalar_t init, BinaryFunction binary_op) {
int ndim = self.dim();
// Treat all outer dimensions as a single dimension.
int row_size = self.size(ndim - 1);
int num_rows = self.numel() / row_size;
dim3 threads(16, 32);
dim3 grid(::min(at::cuda::getCurrentDeviceProperties()->maxGridSize[0], ceil_div(num_rows, int(threads.y))));
hipLaunchKernelGGL(( tensor_kernel_scan_innermost_dim_with_indices<scalar_t, 16, 32>), dim3(grid), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
self.data_ptr<scalar_t>(), values.data_ptr<scalar_t>(), indices.data_ptr<int64_t>(),
num_rows, row_size, init, binary_op);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
template<typename scalar_t, typename BinaryFunction>
void scan_dim_with_indices(const Tensor& self, Tensor& values, Tensor& indices, //int64_t dim) {
int64_t dim, scalar_t init, BinaryFunction binary_op) {
int ndim = self.dim();
Tensor self_ = self.contiguous();
Tensor values_ = values.contiguous();
Tensor indices_ = indices.contiguous();
bool copy_values = !values.is_contiguous();
bool copy_indices = !indices.is_contiguous();
if (dim == ndim - 1) {
scan_innermost_dim_with_indices<scalar_t>(self_, values_, indices_, init, binary_op);
} else {
scan_outer_dim_with_indices<scalar_t>(self_, values_, indices_, dim, init, binary_op);
}
if (copy_values){
values.copy_(values_);
}
if (copy_indices){
indices.copy_(indices_);
}
}
void cummax_helper_cuda(const Tensor& self, Tensor& values, Tensor& indices, int64_t dim) {
TensorArg output_arg{ values, "output", 1 };
TensorArg indices_arg{ indices, "indices", 2 };
TensorArg input_arg{ self, "input", 3 };
checkAllSameGPU("cummax", {output_arg, indices_arg, input_arg});
AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Bool, at::ScalarType::Half,
self.scalar_type(), "cummax_cuda", [&]() {
scalar_t init = self.is_floating_point() ? (-1*std::numeric_limits<scalar_t>::infinity()) : std::numeric_limits<scalar_t>::lowest();
scan_dim_with_indices<scalar_t>(self, values, indices, dim, init, std::greater_equal<scalar_t>());
});
}
void cummin_helper_cuda(const Tensor& self, Tensor& values, Tensor& indices, int64_t dim) {
TensorArg output_arg{ values, "output", 1 };
TensorArg indices_arg{ indices, "indices", 2 };
TensorArg input_arg{ self, "input", 3 };
checkAllSameGPU("cummin", {output_arg, indices_arg, input_arg});
AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Bool, at::ScalarType::Half,
self.scalar_type(), "cummin_cuda", [&]() {
scalar_t init = self.is_floating_point() ? std::numeric_limits<scalar_t>::infinity() : std::numeric_limits<scalar_t>::max();
scan_dim_with_indices<scalar_t>(self, values, indices, dim, init, std::less_equal<scalar_t>());
});
}
// TODO: The implementation of `tensor_kernel_scan_outer_dim` and
// `tensor_kernel_scan_innermost_dim` is similar to
// `tensor_kernel_scan_outer_dim_with_indices`
// `tensor_kernel_scan_outer_dim_with_indices` and should be refactored to
// remove the duplication.
/* Perform an inclusive scan along an outer dimension of a tensor.
*
* - num_orows is the size of the flattened outer dimensions;
* - num_irows is the size of the flattened inner dimensions;
* - row_size is the size of the dimension along which to scan;
*
* The dimensions to the outside and inside of the specified dimension are considered as flattened.
* Thread blocks with the same blockIdx.y process an "outer row" (i.e. an element of the flattened
* outer dimensions, which contains several "inner rows").
* Each thread processes a single inner row at a time.
*/
template<typename scalar_t, class BinaryOp>
__global__ void tensor_kernel_scan_outer_dim(scalar_t *tgt_, scalar_t *src_,
const uint32_t num_orows, const uint32_t num_irows, const uint32_t row_size,
const scalar_t init, BinaryOp binary_op)
{
for (uint32_t orow = blockIdx.x; orow < num_orows; orow += gridDim.x) {
for (uint32_t irow = blockIdx.y * blockDim.x + threadIdx.x; irow < num_irows; irow += gridDim.y * blockDim.x) {
scalar_t *src = src_ + orow * row_size * num_irows + irow;
scalar_t *tgt = tgt_ + orow * row_size * num_irows + irow;
scalar_t acc = init;
for (uint32_t col = 0; col < row_size; ++col) {
acc = binary_op(acc, *src);
*tgt = acc;
src += num_irows;
tgt += num_irows;
}
}
}
}
/* Perform an inclusive scan along the innermost dimension of a tensor.
*
* - num_rows is the size of the flattened outer dimensions;
* - row_size is the size of the innermost dimension;
*
* The outer dimensions of the tensor are considered as a single dimension, i.e. the tensor is
* considered as having 'num_rows' rows of size 'row_size'.
* Each thread block processes one or more sets of contiguous rows (processing multiple rows
* per thread block is quicker than processing a single row, especially for short rows).
*/
template<typename T, int num_threads_x, int num_threads_y, class BinaryFunction>
__device__ void tensor_kernel_scan_innermost_dim_impl(T* row_buf, T *tgt_, T *src_,
const uint32_t num_rows, const uint32_t row_size,
T init, BinaryFunction binary_op){
for (uint32_t block_row = blockIdx.x * blockDim.y;
block_row < num_rows;
block_row += blockDim.y * gridDim.x) {
uint32_t row = block_row + threadIdx.y;
T block_total = init;
T *row_src = src_ + row * row_size;
T *row_tgt = tgt_ + row * row_size;
// Perform scan on one block at a time, keeping track of the total value of
// all blocks processed so far.
for (uint32_t block_col = 0; block_col < row_size; block_col += 2 * num_threads_x) {
// Load data into shared memory (two values per thread).
uint32_t col1 = block_col + threadIdx.x;
uint32_t col2 = block_col + num_threads_x + threadIdx.x;
if (row < num_rows) {
if (col1 < row_size) {
row_buf[threadIdx.x] = row_src[col1];
} else {
row_buf[threadIdx.x] = init;
}
if (col2 < row_size) {
row_buf[num_threads_x + threadIdx.x] = row_src[col2];
} else {
row_buf[num_threads_x + threadIdx.x] = init;
}
// Add the total value of all previous blocks to the first value of this block.
if (threadIdx.x == 0) {
row_buf[0] = binary_op(row_buf[0], block_total);
}
}
__syncthreads();
// Parallel reduction (up-sweep).
for (uint32_t s = num_threads_x, d = 1; s >= 1; s >>= 1, d <<= 1) {
if (row < num_rows && threadIdx.x < s) {
uint32_t offset = (2 * threadIdx.x + 1) * d - 1;
row_buf[offset + d] = binary_op(row_buf[offset], row_buf[offset + d]);
}
__syncthreads();
}
// Down-sweep.
for (uint32_t s = 2, d = num_threads_x / 2; d >= 1; s <<= 1, d >>= 1) {
if (row < num_rows && threadIdx.x < s - 1) {
uint32_t offset = 2 * (threadIdx.x + 1) * d - 1;
row_buf[offset + d] = binary_op(row_buf[offset], row_buf[offset + d]);
}
__syncthreads();
}
// Write back to output.
if (row < num_rows) {
if (col1 < row_size) row_tgt[col1] = row_buf[threadIdx.x];
if (col2 < row_size) row_tgt[col2] = row_buf[num_threads_x + threadIdx.x];
}
block_total = row_buf[2 * num_threads_x - 1];
__syncthreads();
}
}
}
template <
typename T,
int num_threads_x,
int num_threads_y,
class BinaryFunction>
__global__ typename std::enable_if<!c10::is_complex<T>::value, void>::type
tensor_kernel_scan_innermost_dim(
T* tgt_,
T* src_,
const uint32_t num_rows,
const uint32_t row_size,
T init,
BinaryFunction binary_op) {
__shared__ T sbuf[num_threads_y][2 * num_threads_x];
T* row_buf = sbuf[threadIdx.y];
tensor_kernel_scan_innermost_dim_impl<T, num_threads_x, num_threads_y>(
row_buf, tgt_, src_, num_rows, row_size, init, binary_op);
}
template <
typename T,
int num_threads_x,
int num_threads_y,
class BinaryFunction>
__global__ typename std::enable_if<c10::is_complex<T>::value, void>::type
tensor_kernel_scan_innermost_dim(
T* tgt_,
T* src_,
const uint32_t num_rows,
const uint32_t row_size,
T init,
BinaryFunction binary_op) {
// As we cannot directly initialize shared array for complex types
// Reference:
// `error: initializer not allowed for __shared__ variable`
// We instead get the base scalar type and allocate twice number of
// elements required of base type and reinterpret them as complex.
using base_t = typename scalar_value_type<T>::type;
__shared__ base_t sbuf[num_threads_y][4 * num_threads_x];
T* row_buf = reinterpret_cast<T*>(sbuf[threadIdx.y]);
tensor_kernel_scan_innermost_dim_impl<T, num_threads_x, num_threads_y>(
row_buf, tgt_, src_, num_rows, row_size, init, binary_op);
}
template<typename scalar_t, class BinaryFunction>
__host__ void scan_outer_dim(const Tensor& self, Tensor& result,
int dim, scalar_t init, BinaryFunction binary_op) {
const int64_t row_size = self.size(dim);
auto sizes = self.sizes();
// Treat all outer dimensions (i.e. dim_ < dim) as one.
const int64_t num_orows = c10::multiply_integers(sizes.begin(), sizes.begin() + dim);
// Treat all inner dimensions (i.e. dim > dimension) as one.
const int64_t num_irows = c10::multiply_integers(sizes.begin() + dim + 1, sizes.end());
dim3 threads(::min(512, int(num_irows)));
int64_t maxGridDim = at::cuda::getCurrentDeviceProperties()->maxGridSize[1];
dim3 grid(::min(maxGridDim, num_orows), ::min(maxGridDim, ceil_div(num_irows, int64_t{threads.x})));
check_fits_in_unsigned(num_irows, "num_irows");
check_fits_in_unsigned(num_orows, "num_orows");
check_fits_in_unsigned(row_size, "row_size");
hipLaunchKernelGGL(( tensor_kernel_scan_outer_dim<scalar_t>), dim3(grid), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
result.data_ptr<scalar_t>(), self.data_ptr<scalar_t>(),
num_orows, num_irows, row_size, init, binary_op);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
template <typename scalar_t, class BinaryFunction>
void scan_innermost_dim(const Tensor& self, Tensor& result, scalar_t init, BinaryFunction binary_op) {
int64_t ndim = self.dim();
// Treat all outer dimensions as a single dimension.
int64_t row_size = self.size(ndim - 1);
int64_t num_rows = self.numel() / row_size;
dim3 threads(16, 32);
int64_t maxGridDim = at::cuda::getCurrentDeviceProperties()->maxGridSize[0];
dim3 grid(::min(maxGridDim, ceil_div(num_rows, int64_t{threads.y})));
check_fits_in_unsigned(num_rows, "Number of rows (self.numel()/self.size(self.dim()-1))");
check_fits_in_unsigned(row_size, "row_size");
hipLaunchKernelGGL(( tensor_kernel_scan_innermost_dim<scalar_t, 16, 32>), dim3(grid), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
result.data_ptr<scalar_t>(), self.data_ptr<scalar_t>(),
num_rows, row_size, init, binary_op);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
template<typename scalar_t, class func_t>
__global__ void transform_vals(scalar_t * a, scalar_t * b, scalar_t * out, func_t binary_op){
*out = binary_op(*a, *b);
}
template<typename scalar_t, typename BinaryFunction>
void scan_cub(const Tensor& self, Tensor& result, scalar_t init, BinaryFunction binary_op) {
int64_t size = self.numel();
// non synchronizing cub call
// even though cub is supposed to support tensors with int_max elements, in reality it doesn't,
// so split at int_max/2
constexpr int max_cub_size = std::numeric_limits<int>::max() / 2 + 1; // 2**30
for (int64_t i = 0; i < size; i += max_cub_size) {
int size_cub = std::min<int64_t>(size - i, max_cub_size);
Tensor first_elem; // need to save it for all iterations other than first
if (i > 0) {
// need to temporarily transform first element of the range we are
// operating on; self might be multi-d, but we need to index a single
// element
auto self_view = at::_unsafe_view(self, -1);
first_elem = self_view[i].clone();
hipLaunchKernelGGL(( transform_vals), dim3(1), dim3(1), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
self.data_ptr<scalar_t>() + i,
result.data_ptr<scalar_t>() + i - 1,
self.data_ptr<scalar_t>() + i,
binary_op);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
size_t temp_storage_bytes = 0;
AT_CUDA_CHECK(hipcub::DeviceScan::InclusiveScan(
nullptr,
temp_storage_bytes,
self.data_ptr<scalar_t>() + i,
result.data_ptr<scalar_t>() + i,
binary_op,
size_cub,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA()));
auto temp_storage = at::native::empty_cuda(
{static_cast<int64_t>(temp_storage_bytes)},
kByte, self.options().layout_opt(), self.options().device_opt(),
self.options().pinned_memory_opt());
AT_CUDA_CHECK(hipcub::DeviceScan::InclusiveScan(
temp_storage.data_ptr(),
temp_storage_bytes,
self.data_ptr<scalar_t>() + i,
result.data_ptr<scalar_t>() + i,
binary_op,
size_cub,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA()));
if (i > 0) {
if (self.data_ptr<scalar_t>() != result.data_ptr<scalar_t>()) {
// restore modified first element only if it's not an inplace operation
auto self_view = at::_unsafe_view(self, -1);
self_view[i].copy_(first_elem, /*non_blocking=*/true);
}
}
}
}
template<typename scalar_t, typename BinaryFunction>
void scan_dim(const Tensor& self, Tensor& result,
int64_t dim, scalar_t init, BinaryFunction binary_op) {
int ndim = self.dim();
Tensor self_ = self.contiguous();
bool copy_result = !result.is_contiguous();
Tensor result_ = result.contiguous();
if (self.numel() == self.size(dim)) {
scan_cub<scalar_t>(self_, result_, init, binary_op);
} else if (dim == ndim - 1) {
scan_innermost_dim<scalar_t>(self_, result_, init, binary_op);
} else {
scan_outer_dim<scalar_t>(self_, result_, dim, init, binary_op);
}
if (copy_result) {
result.copy_(result_);
}
}
Tensor& _logcumsumexp_out_cuda(const Tensor& self, int64_t dim, Tensor& result) {
result.resize_(self.sizes());
if (self.dim() == 0) {
result.fill_(self);
return result;
}
if (self.numel() == 0) {
result.zero_();
return result;
}
auto wrap_dim = maybe_wrap_dim(dim, self.dim());
TensorArg output_arg{ result, "output", 1 };
TensorArg input_arg{ self, "input", 2 };
checkAllSameGPU("logcumsumexp", {output_arg, input_arg});
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half,
self.scalar_type(), "logcumsumexp_cuda", [&]() {
using accscalar_t = acc_type<scalar_t, true>;
scalar_t init = -std::numeric_limits<scalar_t>::infinity();
auto log_add_exp = [] C10_HOST_DEVICE (const scalar_t x, const scalar_t y) -> scalar_t {
scalar_t min = at::_isnan(y) ? y : std::min<scalar_t>(x,y); //std::min returns first arg if one of the args is nan
scalar_t max = at::_isnan(y) ? y : std::max<scalar_t>(x,y); //std::max returns first arg if one of the args is nan
if (min != max || ::isfinite(static_cast<accscalar_t>(min))) {
// nan will be propagated here
return ::log1p(::exp(min - max)) + max;
} else {
// special case to correctly handle infinite inputs
return x;
}
};
scan_dim<scalar_t>(self, result, wrap_dim, init, log_add_exp);
});
return result;
}
Tensor _logcumsumexp_cuda(const Tensor& self, int64_t dim) {
Tensor result = at::empty_like(self, MemoryFormat::Contiguous);
return _logcumsumexp_out_cuda(self, dim, result);
}
Tensor& _cumsum_out_cuda(const Tensor& self, int64_t dim, Tensor& result) {
TensorArg output_arg{result, "output", 1};
TensorArg input_arg{self, "input", 2};
checkAllSameGPU("cumsum", {output_arg, input_arg});
checkSameType("cumsum", output_arg, input_arg);
result.resize_(self.sizes());
if (self.dim() == 0) {
result.fill_(self);
return result;
}
if (self.numel() == 0) {
result.zero_();
return result;
}
auto wrap_dim = maybe_wrap_dim(dim, self.dim());
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND(
at::ScalarType::Half, self.scalar_type(), "cumsum_cuda", [&]() {
scalar_t init = 0;
scan_dim<scalar_t>(
self,
result,
wrap_dim,
init,
std::plus<scalar_t>());
});
return result;
}
Tensor _cumsum_cuda(const Tensor& self, int64_t dim) {
Tensor result = at::empty_like(self, MemoryFormat::Contiguous);
return at::native::_cumsum_out_cuda(self, dim, result);
}
Tensor& _cumprod_out_cuda(const Tensor& self, int64_t dim, Tensor& result) {
TensorArg output_arg{result, "output", 1};
TensorArg input_arg{self, "input", 2};
checkAllSameGPU("cumprod", {output_arg, input_arg});
checkSameType("cumprod", output_arg, input_arg);
result.resize_(self.sizes());
if (self.dim() == 0) {
result.fill_(self);
return result;
}
if (self.numel() == 0) {
result.zero_();
return result;
}
auto wrap_dim = maybe_wrap_dim(dim, self.dim());
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND(
at::ScalarType::Half, self.scalar_type(), "cumprod_cuda", [&]() {
scalar_t init = 1;
scan_dim<scalar_t>(
self,
result,
wrap_dim,
init,
std::multiplies<scalar_t>());
});
return result;
}
Tensor _cumprod_cuda(const Tensor& self, int64_t dim) {
Tensor result = at::empty_like(self, MemoryFormat::Contiguous);
return at::native::_cumprod_out_cuda(self, dim, result);
}
}} // namespace at::native
| 046ebbcb95aba16c08efa2fc968d4388800f93ab.cu | #include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/NumericLimits.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/TensorUtils.h>
#include <ATen/NumericUtils.h>
#include <c10/util/accumulate.h>
#include <THC/THCGeneral.h>
#include <THC/THCNumerics.cuh>
#include <cub/device/device_scan.cuh>
namespace at { namespace native {
template <typename integer>
constexpr inline integer ceil_div(integer n, integer m) {
return (n + m - 1) / m;
}
template<typename scalar_t, typename idx_t, typename BinaryOperation>
__device__ void binary_op_update(const scalar_t lhs, scalar_t& rhs, const idx_t lhs_idx, idx_t& rhs_idx, BinaryOperation binary_op) {
if(!THCNumerics<scalar_t>::isnan(rhs) && (THCNumerics<scalar_t>::isnan(lhs) || !binary_op(rhs, lhs))) {
rhs = lhs;
rhs_idx = lhs_idx;
}
}
/* Perform an inclusive scan along the innermost dimension of a tensor.
*
* - num_rows is the size of the flattened outer dimensions;
* - row_size is the size of the innermost dimension;
*
* The outer dimensions of the tensor are considered as a single dimension, i.e. the tensor is
* considered as having 'num_rows' rows of size 'row_size'.
* Each thread block processes one or more sets of contiguous rows (processing multiple rows
* per thread block is quicker than processing a single row, especially for short rows).
*/
template<typename scalar_t, int num_threads_x, int num_threads_y, class BinaryFunction>
__global__ void tensor_kernel_scan_innermost_dim_with_indices(const scalar_t *self_, scalar_t *values_, int64_t *indices_,
int num_rows, int row_size,
scalar_t init, BinaryFunction binary_op) {
__shared__ scalar_t vbuf[num_threads_y][2 * num_threads_x];
__shared__ int64_t ibuf[num_threads_y][2 * num_threads_x];
scalar_t* row_buf = vbuf[threadIdx.y];
int64_t* row_idx_buf = ibuf[threadIdx.y];
for (int block_row = blockIdx.x * blockDim.y;
block_row < num_rows;
block_row += blockDim.y * gridDim.x) {
int row = block_row + threadIdx.y;
const scalar_t *row_self = self_ + row * row_size;
scalar_t *row_values = values_ + row * row_size;
int64_t *row_indices = indices_ + row * row_size;
scalar_t block_total = init;
int64_t block_idx_final = 0;
// Perform scan on one block at a time, keeping track of the total value of
// all blocks processed so far.
for (int block_col = 0; block_col < row_size; block_col += 2 * num_threads_x) {
// Load data into shared memory (two values per thread).
int col1 = block_col + threadIdx.x;
int col2 = block_col + num_threads_x + threadIdx.x;
if (row < num_rows) {
if (col1 < row_size) {
row_buf[threadIdx.x] = row_self[col1];
row_idx_buf[threadIdx.x] = col1;
} else {
row_buf[threadIdx.x] = init;
// No need to set the index here as the value in init will never be selected
}
if (col2 < row_size) {
row_buf[num_threads_x + threadIdx.x] = row_self[col2];
row_idx_buf[num_threads_x + threadIdx.x] = col2;
} else {
row_buf[num_threads_x + threadIdx.x] = init;
// No need to set the index here as the value in init will never be selected
}
// Add the total value of all previous blocks to the first value of this block.
if (threadIdx.x == 0) {
binary_op_update(block_total, row_buf[0], block_idx_final, row_idx_buf[0], binary_op);
}
}
__syncthreads();
// Parallel reduction (up-sweep).
for (int s = num_threads_x, d = 1; s >= 1; s >>= 1, d <<= 1) {
if (row < num_rows && threadIdx.x < s) {
int offset = (2 * threadIdx.x + 1) * d - 1;
binary_op_update(row_buf[offset], row_buf[offset + d], row_idx_buf[offset], row_idx_buf[offset + d], binary_op);
}
__syncthreads();
}
// Down-sweep.
for (int s = 2, d = num_threads_x / 2; d >= 1; s <<= 1, d >>= 1) {
if (row < num_rows && threadIdx.x < s - 1) {
int offset = 2 * (threadIdx.x + 1) * d - 1;
binary_op_update(row_buf[offset], row_buf[offset + d], row_idx_buf[offset], row_idx_buf[offset + d], binary_op);
}
__syncthreads();
}
// Write back to output.
if (row < num_rows) {
if (col1 < row_size){
row_values[col1] = row_buf[threadIdx.x];
row_indices[col1] = row_idx_buf[threadIdx.x];
}
if (col2 < row_size) {
row_values[col2] = row_buf[num_threads_x + threadIdx.x];
row_indices[col2] = row_idx_buf[num_threads_x + threadIdx.x];
}
}
block_total = row_buf[2 * num_threads_x - 1];
block_idx_final = row_idx_buf[2 * num_threads_x - 1];
__syncthreads();
}
}
}
/* Perform an inclusive scan along an outer dimension of a tensor.
*
* - num_orows is the size of the flattened outer dimensions;
* - num_irows is the size of the flattened inner dimensions;
* - row_size is the size of the dimension along which to compute the variance;
*
* The dimensions to the outside and inside of the specified dimension are considered as flattened.
* Thread blocks with the same blockIdx.y process an "outer row" (i.e. an element of the flattened
* outer dimensions, which contains several "inner rows").
* Each thread processes a single inner row at a time.
*/
template<typename scalar_t, class BinaryFunction>
__global__ void tensor_kernel_scan_outer_dim_with_indices(scalar_t *self_, scalar_t *values_, int64_t *indices_,
const uint32_t num_orows, const uint32_t num_irows, const uint32_t row_size, scalar_t init, BinaryFunction binary_op) {
for (uint32_t orow = blockIdx.x; orow < num_orows; orow += gridDim.x) {
for (uint32_t irow = blockIdx.y * blockDim.x + threadIdx.x; irow < num_irows; irow += gridDim.y * blockDim.x) {
scalar_t *self = self_ + orow * row_size * num_irows + irow;
scalar_t *values = values_ + orow * row_size * num_irows + irow;
int64_t *indices = indices_ + orow * row_size * num_irows + irow;
scalar_t out = init;
int64_t out_idx = 0;
for (auto col = decltype(row_size){0}; col < row_size; ++col) {
if(THCNumerics<scalar_t>::isnan(*self) || (!THCNumerics<scalar_t>::isnan(out) && binary_op(*self, out))) {
out = *self;
out_idx = col;
}
*values = out;
*indices = out_idx;
self += num_irows;
values += num_irows;
indices += num_irows;
}
}
}
}
void check_fits_in_unsigned(int64_t val, const char* name) {
constexpr auto umax = std::numeric_limits<uint32_t>::max();
TORCH_CHECK(
val >= 0 && val <= umax, name, " must fit in a 32-bit uint32_t value");
}
template<typename scalar_t, class BinaryFunction>
__host__ void scan_outer_dim_with_indices(const Tensor& self, Tensor& values, Tensor& indices,
int dim, scalar_t init, BinaryFunction binary_op) {
int64_t row_size = self.size(dim);
auto sizes = self.sizes();
// Treat all outer dimensions (i.e. dim_ < dim) as one.
const int64_t num_orows = c10::multiply_integers(sizes.begin(), sizes.begin() + dim);
// Treat all inner dimensions (i.e. dim > dimension) as one.
const int64_t num_irows = c10::multiply_integers(sizes.begin() + dim + 1, sizes.end());
//for performance reasons, cuda kernels use uint32_t for loops over irows, orows and row,
//make sure that input is not bigger than supported by uint32_t
check_fits_in_unsigned(num_irows, "num_irows");
check_fits_in_unsigned(num_orows, "num_orows");
check_fits_in_unsigned(row_size, "row_size");
dim3 threads(std::min(512, int(num_irows)));
int64_t maxGridDim = at::cuda::getCurrentDeviceProperties()->maxGridSize[1];
dim3 grid(std::min(maxGridDim, num_orows), std::min(maxGridDim, ceil_div(num_irows, int64_t{threads.x})));
tensor_kernel_scan_outer_dim_with_indices<scalar_t><<<grid, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
self.data_ptr<scalar_t>(), values.data_ptr<scalar_t>(), indices.data_ptr<int64_t>(),
num_orows, num_irows, row_size, init, binary_op);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
template <typename scalar_t, class BinaryFunction>
__host__ void scan_innermost_dim_with_indices(const Tensor& self, Tensor& values, Tensor& indices, scalar_t init, BinaryFunction binary_op) {
int ndim = self.dim();
// Treat all outer dimensions as a single dimension.
int row_size = self.size(ndim - 1);
int num_rows = self.numel() / row_size;
dim3 threads(16, 32);
dim3 grid(std::min(at::cuda::getCurrentDeviceProperties()->maxGridSize[0], ceil_div(num_rows, int(threads.y))));
tensor_kernel_scan_innermost_dim_with_indices<scalar_t, 16, 32><<<grid, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
self.data_ptr<scalar_t>(), values.data_ptr<scalar_t>(), indices.data_ptr<int64_t>(),
num_rows, row_size, init, binary_op);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
template<typename scalar_t, typename BinaryFunction>
void scan_dim_with_indices(const Tensor& self, Tensor& values, Tensor& indices, //int64_t dim) {
int64_t dim, scalar_t init, BinaryFunction binary_op) {
int ndim = self.dim();
Tensor self_ = self.contiguous();
Tensor values_ = values.contiguous();
Tensor indices_ = indices.contiguous();
bool copy_values = !values.is_contiguous();
bool copy_indices = !indices.is_contiguous();
if (dim == ndim - 1) {
scan_innermost_dim_with_indices<scalar_t>(self_, values_, indices_, init, binary_op);
} else {
scan_outer_dim_with_indices<scalar_t>(self_, values_, indices_, dim, init, binary_op);
}
if (copy_values){
values.copy_(values_);
}
if (copy_indices){
indices.copy_(indices_);
}
}
void cummax_helper_cuda(const Tensor& self, Tensor& values, Tensor& indices, int64_t dim) {
TensorArg output_arg{ values, "output", 1 };
TensorArg indices_arg{ indices, "indices", 2 };
TensorArg input_arg{ self, "input", 3 };
checkAllSameGPU("cummax", {output_arg, indices_arg, input_arg});
AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Bool, at::ScalarType::Half,
self.scalar_type(), "cummax_cuda", [&]() {
scalar_t init = self.is_floating_point() ? (-1*std::numeric_limits<scalar_t>::infinity()) : std::numeric_limits<scalar_t>::lowest();
scan_dim_with_indices<scalar_t>(self, values, indices, dim, init, std::greater_equal<scalar_t>());
});
}
void cummin_helper_cuda(const Tensor& self, Tensor& values, Tensor& indices, int64_t dim) {
TensorArg output_arg{ values, "output", 1 };
TensorArg indices_arg{ indices, "indices", 2 };
TensorArg input_arg{ self, "input", 3 };
checkAllSameGPU("cummin", {output_arg, indices_arg, input_arg});
AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Bool, at::ScalarType::Half,
self.scalar_type(), "cummin_cuda", [&]() {
scalar_t init = self.is_floating_point() ? std::numeric_limits<scalar_t>::infinity() : std::numeric_limits<scalar_t>::max();
scan_dim_with_indices<scalar_t>(self, values, indices, dim, init, std::less_equal<scalar_t>());
});
}
// TODO: The implementation of `tensor_kernel_scan_outer_dim` and
// `tensor_kernel_scan_innermost_dim` is similar to
// `tensor_kernel_scan_outer_dim_with_indices`
// `tensor_kernel_scan_outer_dim_with_indices` and should be refactored to
// remove the duplication.
/* Perform an inclusive scan along an outer dimension of a tensor.
*
* - num_orows is the size of the flattened outer dimensions;
* - num_irows is the size of the flattened inner dimensions;
* - row_size is the size of the dimension along which to scan;
*
* The dimensions to the outside and inside of the specified dimension are considered as flattened.
* Thread blocks with the same blockIdx.y process an "outer row" (i.e. an element of the flattened
* outer dimensions, which contains several "inner rows").
* Each thread processes a single inner row at a time.
*/
template<typename scalar_t, class BinaryOp>
__global__ void tensor_kernel_scan_outer_dim(scalar_t *tgt_, scalar_t *src_,
const uint32_t num_orows, const uint32_t num_irows, const uint32_t row_size,
const scalar_t init, BinaryOp binary_op)
{
for (uint32_t orow = blockIdx.x; orow < num_orows; orow += gridDim.x) {
for (uint32_t irow = blockIdx.y * blockDim.x + threadIdx.x; irow < num_irows; irow += gridDim.y * blockDim.x) {
scalar_t *src = src_ + orow * row_size * num_irows + irow;
scalar_t *tgt = tgt_ + orow * row_size * num_irows + irow;
scalar_t acc = init;
for (uint32_t col = 0; col < row_size; ++col) {
acc = binary_op(acc, *src);
*tgt = acc;
src += num_irows;
tgt += num_irows;
}
}
}
}
/* Perform an inclusive scan along the innermost dimension of a tensor.
*
* - num_rows is the size of the flattened outer dimensions;
* - row_size is the size of the innermost dimension;
*
* The outer dimensions of the tensor are considered as a single dimension, i.e. the tensor is
* considered as having 'num_rows' rows of size 'row_size'.
* Each thread block processes one or more sets of contiguous rows (processing multiple rows
* per thread block is quicker than processing a single row, especially for short rows).
*/
template<typename T, int num_threads_x, int num_threads_y, class BinaryFunction>
__device__ void tensor_kernel_scan_innermost_dim_impl(T* row_buf, T *tgt_, T *src_,
const uint32_t num_rows, const uint32_t row_size,
T init, BinaryFunction binary_op){
for (uint32_t block_row = blockIdx.x * blockDim.y;
block_row < num_rows;
block_row += blockDim.y * gridDim.x) {
uint32_t row = block_row + threadIdx.y;
T block_total = init;
T *row_src = src_ + row * row_size;
T *row_tgt = tgt_ + row * row_size;
// Perform scan on one block at a time, keeping track of the total value of
// all blocks processed so far.
for (uint32_t block_col = 0; block_col < row_size; block_col += 2 * num_threads_x) {
// Load data into shared memory (two values per thread).
uint32_t col1 = block_col + threadIdx.x;
uint32_t col2 = block_col + num_threads_x + threadIdx.x;
if (row < num_rows) {
if (col1 < row_size) {
row_buf[threadIdx.x] = row_src[col1];
} else {
row_buf[threadIdx.x] = init;
}
if (col2 < row_size) {
row_buf[num_threads_x + threadIdx.x] = row_src[col2];
} else {
row_buf[num_threads_x + threadIdx.x] = init;
}
// Add the total value of all previous blocks to the first value of this block.
if (threadIdx.x == 0) {
row_buf[0] = binary_op(row_buf[0], block_total);
}
}
__syncthreads();
// Parallel reduction (up-sweep).
for (uint32_t s = num_threads_x, d = 1; s >= 1; s >>= 1, d <<= 1) {
if (row < num_rows && threadIdx.x < s) {
uint32_t offset = (2 * threadIdx.x + 1) * d - 1;
row_buf[offset + d] = binary_op(row_buf[offset], row_buf[offset + d]);
}
__syncthreads();
}
// Down-sweep.
for (uint32_t s = 2, d = num_threads_x / 2; d >= 1; s <<= 1, d >>= 1) {
if (row < num_rows && threadIdx.x < s - 1) {
uint32_t offset = 2 * (threadIdx.x + 1) * d - 1;
row_buf[offset + d] = binary_op(row_buf[offset], row_buf[offset + d]);
}
__syncthreads();
}
// Write back to output.
if (row < num_rows) {
if (col1 < row_size) row_tgt[col1] = row_buf[threadIdx.x];
if (col2 < row_size) row_tgt[col2] = row_buf[num_threads_x + threadIdx.x];
}
block_total = row_buf[2 * num_threads_x - 1];
__syncthreads();
}
}
}
template <
typename T,
int num_threads_x,
int num_threads_y,
class BinaryFunction>
__global__ typename std::enable_if<!c10::is_complex<T>::value, void>::type
tensor_kernel_scan_innermost_dim(
T* tgt_,
T* src_,
const uint32_t num_rows,
const uint32_t row_size,
T init,
BinaryFunction binary_op) {
__shared__ T sbuf[num_threads_y][2 * num_threads_x];
T* row_buf = sbuf[threadIdx.y];
tensor_kernel_scan_innermost_dim_impl<T, num_threads_x, num_threads_y>(
row_buf, tgt_, src_, num_rows, row_size, init, binary_op);
}
template <
typename T,
int num_threads_x,
int num_threads_y,
class BinaryFunction>
__global__ typename std::enable_if<c10::is_complex<T>::value, void>::type
tensor_kernel_scan_innermost_dim(
T* tgt_,
T* src_,
const uint32_t num_rows,
const uint32_t row_size,
T init,
BinaryFunction binary_op) {
// As we cannot directly initialize shared array for complex types
// Reference:
// `error: initializer not allowed for __shared__ variable`
// We instead get the base scalar type and allocate twice number of
// elements required of base type and reinterpret them as complex.
using base_t = typename scalar_value_type<T>::type;
__shared__ base_t sbuf[num_threads_y][4 * num_threads_x];
T* row_buf = reinterpret_cast<T*>(sbuf[threadIdx.y]);
tensor_kernel_scan_innermost_dim_impl<T, num_threads_x, num_threads_y>(
row_buf, tgt_, src_, num_rows, row_size, init, binary_op);
}
template<typename scalar_t, class BinaryFunction>
__host__ void scan_outer_dim(const Tensor& self, Tensor& result,
int dim, scalar_t init, BinaryFunction binary_op) {
const int64_t row_size = self.size(dim);
auto sizes = self.sizes();
// Treat all outer dimensions (i.e. dim_ < dim) as one.
const int64_t num_orows = c10::multiply_integers(sizes.begin(), sizes.begin() + dim);
// Treat all inner dimensions (i.e. dim > dimension) as one.
const int64_t num_irows = c10::multiply_integers(sizes.begin() + dim + 1, sizes.end());
dim3 threads(std::min(512, int(num_irows)));
int64_t maxGridDim = at::cuda::getCurrentDeviceProperties()->maxGridSize[1];
dim3 grid(std::min(maxGridDim, num_orows), std::min(maxGridDim, ceil_div(num_irows, int64_t{threads.x})));
check_fits_in_unsigned(num_irows, "num_irows");
check_fits_in_unsigned(num_orows, "num_orows");
check_fits_in_unsigned(row_size, "row_size");
tensor_kernel_scan_outer_dim<scalar_t><<<grid, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
result.data_ptr<scalar_t>(), self.data_ptr<scalar_t>(),
num_orows, num_irows, row_size, init, binary_op);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
template <typename scalar_t, class BinaryFunction>
void scan_innermost_dim(const Tensor& self, Tensor& result, scalar_t init, BinaryFunction binary_op) {
int64_t ndim = self.dim();
// Treat all outer dimensions as a single dimension.
int64_t row_size = self.size(ndim - 1);
int64_t num_rows = self.numel() / row_size;
dim3 threads(16, 32);
int64_t maxGridDim = at::cuda::getCurrentDeviceProperties()->maxGridSize[0];
dim3 grid(std::min(maxGridDim, ceil_div(num_rows, int64_t{threads.y})));
check_fits_in_unsigned(num_rows, "Number of rows (self.numel()/self.size(self.dim()-1))");
check_fits_in_unsigned(row_size, "row_size");
tensor_kernel_scan_innermost_dim<scalar_t, 16, 32><<<grid, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
result.data_ptr<scalar_t>(), self.data_ptr<scalar_t>(),
num_rows, row_size, init, binary_op);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
template<typename scalar_t, class func_t>
__global__ void transform_vals(scalar_t * a, scalar_t * b, scalar_t * out, func_t binary_op){
*out = binary_op(*a, *b);
}
template<typename scalar_t, typename BinaryFunction>
void scan_cub(const Tensor& self, Tensor& result, scalar_t init, BinaryFunction binary_op) {
int64_t size = self.numel();
// non synchronizing cub call
// even though cub is supposed to support tensors with int_max elements, in reality it doesn't,
// so split at int_max/2
constexpr int max_cub_size = std::numeric_limits<int>::max() / 2 + 1; // 2**30
for (int64_t i = 0; i < size; i += max_cub_size) {
int size_cub = std::min<int64_t>(size - i, max_cub_size);
Tensor first_elem; // need to save it for all iterations other than first
if (i > 0) {
// need to temporarily transform first element of the range we are
// operating on; self might be multi-d, but we need to index a single
// element
auto self_view = at::_unsafe_view(self, -1);
first_elem = self_view[i].clone();
transform_vals<<<1, 1, 0, at::cuda::getCurrentCUDAStream()>>>(
self.data_ptr<scalar_t>() + i,
result.data_ptr<scalar_t>() + i - 1,
self.data_ptr<scalar_t>() + i,
binary_op);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
size_t temp_storage_bytes = 0;
AT_CUDA_CHECK(cub::DeviceScan::InclusiveScan(
nullptr,
temp_storage_bytes,
self.data_ptr<scalar_t>() + i,
result.data_ptr<scalar_t>() + i,
binary_op,
size_cub,
at::cuda::getCurrentCUDAStream()));
auto temp_storage = at::native::empty_cuda(
{static_cast<int64_t>(temp_storage_bytes)},
kByte, self.options().layout_opt(), self.options().device_opt(),
self.options().pinned_memory_opt());
AT_CUDA_CHECK(cub::DeviceScan::InclusiveScan(
temp_storage.data_ptr(),
temp_storage_bytes,
self.data_ptr<scalar_t>() + i,
result.data_ptr<scalar_t>() + i,
binary_op,
size_cub,
at::cuda::getCurrentCUDAStream()));
if (i > 0) {
if (self.data_ptr<scalar_t>() != result.data_ptr<scalar_t>()) {
// restore modified first element only if it's not an inplace operation
auto self_view = at::_unsafe_view(self, -1);
self_view[i].copy_(first_elem, /*non_blocking=*/true);
}
}
}
}
template<typename scalar_t, typename BinaryFunction>
void scan_dim(const Tensor& self, Tensor& result,
int64_t dim, scalar_t init, BinaryFunction binary_op) {
int ndim = self.dim();
Tensor self_ = self.contiguous();
bool copy_result = !result.is_contiguous();
Tensor result_ = result.contiguous();
if (self.numel() == self.size(dim)) {
scan_cub<scalar_t>(self_, result_, init, binary_op);
} else if (dim == ndim - 1) {
scan_innermost_dim<scalar_t>(self_, result_, init, binary_op);
} else {
scan_outer_dim<scalar_t>(self_, result_, dim, init, binary_op);
}
if (copy_result) {
result.copy_(result_);
}
}
Tensor& _logcumsumexp_out_cuda(const Tensor& self, int64_t dim, Tensor& result) {
result.resize_(self.sizes());
if (self.dim() == 0) {
result.fill_(self);
return result;
}
if (self.numel() == 0) {
result.zero_();
return result;
}
auto wrap_dim = maybe_wrap_dim(dim, self.dim());
TensorArg output_arg{ result, "output", 1 };
TensorArg input_arg{ self, "input", 2 };
checkAllSameGPU("logcumsumexp", {output_arg, input_arg});
AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half,
self.scalar_type(), "logcumsumexp_cuda", [&]() {
using accscalar_t = acc_type<scalar_t, true>;
scalar_t init = -std::numeric_limits<scalar_t>::infinity();
auto log_add_exp = [] C10_HOST_DEVICE (const scalar_t x, const scalar_t y) -> scalar_t {
scalar_t min = at::_isnan(y) ? y : std::min<scalar_t>(x,y); //std::min returns first arg if one of the args is nan
scalar_t max = at::_isnan(y) ? y : std::max<scalar_t>(x,y); //std::max returns first arg if one of the args is nan
if (min != max || ::isfinite(static_cast<accscalar_t>(min))) {
// nan will be propagated here
return ::log1p(std::exp(min - max)) + max;
} else {
// special case to correctly handle infinite inputs
return x;
}
};
scan_dim<scalar_t>(self, result, wrap_dim, init, log_add_exp);
});
return result;
}
Tensor _logcumsumexp_cuda(const Tensor& self, int64_t dim) {
Tensor result = at::empty_like(self, MemoryFormat::Contiguous);
return _logcumsumexp_out_cuda(self, dim, result);
}
Tensor& _cumsum_out_cuda(const Tensor& self, int64_t dim, Tensor& result) {
TensorArg output_arg{result, "output", 1};
TensorArg input_arg{self, "input", 2};
checkAllSameGPU("cumsum", {output_arg, input_arg});
checkSameType("cumsum", output_arg, input_arg);
result.resize_(self.sizes());
if (self.dim() == 0) {
result.fill_(self);
return result;
}
if (self.numel() == 0) {
result.zero_();
return result;
}
auto wrap_dim = maybe_wrap_dim(dim, self.dim());
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND(
at::ScalarType::Half, self.scalar_type(), "cumsum_cuda", [&]() {
scalar_t init = 0;
scan_dim<scalar_t>(
self,
result,
wrap_dim,
init,
std::plus<scalar_t>());
});
return result;
}
Tensor _cumsum_cuda(const Tensor& self, int64_t dim) {
Tensor result = at::empty_like(self, MemoryFormat::Contiguous);
return at::native::_cumsum_out_cuda(self, dim, result);
}
Tensor& _cumprod_out_cuda(const Tensor& self, int64_t dim, Tensor& result) {
TensorArg output_arg{result, "output", 1};
TensorArg input_arg{self, "input", 2};
checkAllSameGPU("cumprod", {output_arg, input_arg});
checkSameType("cumprod", output_arg, input_arg);
result.resize_(self.sizes());
if (self.dim() == 0) {
result.fill_(self);
return result;
}
if (self.numel() == 0) {
result.zero_();
return result;
}
auto wrap_dim = maybe_wrap_dim(dim, self.dim());
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND(
at::ScalarType::Half, self.scalar_type(), "cumprod_cuda", [&]() {
scalar_t init = 1;
scan_dim<scalar_t>(
self,
result,
wrap_dim,
init,
std::multiplies<scalar_t>());
});
return result;
}
Tensor _cumprod_cuda(const Tensor& self, int64_t dim) {
Tensor result = at::empty_like(self, MemoryFormat::Contiguous);
return at::native::_cumprod_out_cuda(self, dim, result);
}
}} // namespace at::native
|
b78723a012b33e36526d16f10a6be76954a4e18a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* This file belongs to the Galois project, a C++ library for exploiting parallelism.
* The code is being released under the terms of the 3-Clause BSD License (a
* copy is located in LICENSE.txt at the top-level directory).
*
* Copyright (C) 2018, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*/
/* -*- mode: c++ -*- */
#include "gg.h"
#include "ggcuda.h"
void kernel_sizing(CSRGraph &, dim3 &, dim3 &);
#define TB_SIZE 256
const char *GGC_OPTIONS = "coop_conv=False $ outline_iterate_gb=False $ backoff_blocking_factor=4 $ parcomb=True $ np_schedulers=set(['fg', 'tb', 'wp']) $ cc_disable=set([]) $ hacks=set([]) $ np_factor=8 $ instrument=set([]) $ unroll=[] $ instrument_mode=None $ read_props=None $ outline_iterate=True $ ignore_nested_errors=False $ np=True $ write_props=None $ quiet_cgen=True $ retry_backoff=True $ cuda.graph_type=basic $ cuda.use_worklist_slots=True $ cuda.worklist_type=basic";
#include "kernels/reduce.cuh"
#include "bfs_push_cuda.cuh"
static const int __tb_FirstItr_BFS = TB_SIZE;
static const int __tb_BFS = TB_SIZE;
__global__ void InitializeGraph(CSRGraph graph, unsigned int __begin, unsigned int __end, const uint32_t local_infinity, unsigned long long local_src_node, uint32_t * p_dist_current, uint32_t * p_dist_old)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = TB_SIZE;
index_type src_end;
// FP: "1 -> 2;
src_end = __end;
for (index_type src = __begin + tid; src < src_end; src += nthreads)
{
bool pop = src < __end;
if (pop)
{
p_dist_current[src] = (graph.node_data[src] == local_src_node) ? 0 : local_infinity;
p_dist_old[src] = (graph.node_data[src] == local_src_node) ? 0 : local_infinity;
}
}
// FP: "8 -> 9;
}
__global__ void FirstItr_BFS(CSRGraph graph, unsigned int __begin, unsigned int __end, uint32_t * p_dist_current, uint32_t * p_dist_old, DynamicBitset& bitset_dist_current)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = __tb_FirstItr_BFS;
index_type src_end;
index_type src_rup;
// FP: "1 -> 2;
const int _NP_CROSSOVER_WP = 32;
const int _NP_CROSSOVER_TB = __kernel_tb_size;
// FP: "2 -> 3;
const int BLKSIZE = __kernel_tb_size;
const int ITSIZE = BLKSIZE * 8;
// FP: "3 -> 4;
typedef hipcub::BlockScan<multiple_sum<2, index_type>, BLKSIZE> BlockScan;
typedef union np_shared<BlockScan::TempStorage, index_type, struct tb_np, struct warp_np<__kernel_tb_size/32>, struct fg_np<ITSIZE> > npsTy;
// FP: "4 -> 5;
__shared__ npsTy nps ;
// FP: "5 -> 6;
src_end = __end;
src_rup = ((__begin) + roundup(((__end) - (__begin)), (blockDim.x)));
for (index_type src = __begin + tid; src < src_rup; src += nthreads)
{
multiple_sum<2, index_type> _np_mps;
multiple_sum<2, index_type> _np_mps_total;
// FP: "6 -> 7;
bool pop = src < __end;
// FP: "7 -> 8;
if (pop)
{
p_dist_old[src] = p_dist_current[src];
}
// FP: "10 -> 11;
// FP: "13 -> 14;
struct NPInspector1 _np = {0,0,0,0,0,0};
// FP: "14 -> 15;
__shared__ struct { index_type src; } _np_closure [TB_SIZE];
// FP: "15 -> 16;
_np_closure[threadIdx.x].src = src;
// FP: "16 -> 17;
if (pop)
{
_np.size = (graph).getOutDegree(src);
_np.start = (graph).getFirstEdge(src);
}
// FP: "19 -> 20;
// FP: "20 -> 21;
_np_mps.el[0] = _np.size >= _NP_CROSSOVER_WP ? _np.size : 0;
_np_mps.el[1] = _np.size < _NP_CROSSOVER_WP ? _np.size : 0;
// FP: "21 -> 22;
BlockScan(nps.temp_storage).ExclusiveSum(_np_mps, _np_mps, _np_mps_total);
// FP: "22 -> 23;
if (threadIdx.x == 0)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "25 -> 26;
__syncthreads();
// FP: "26 -> 27;
while (true)
{
// FP: "27 -> 28;
if (_np.size >= _NP_CROSSOVER_TB)
{
nps.tb.owner = threadIdx.x;
}
// FP: "30 -> 31;
__syncthreads();
// FP: "31 -> 32;
if (nps.tb.owner == MAX_TB_SIZE + 1)
{
// FP: "32 -> 33;
__syncthreads();
// FP: "33 -> 34;
break;
}
// FP: "35 -> 36;
if (nps.tb.owner == threadIdx.x)
{
nps.tb.start = _np.start;
nps.tb.size = _np.size;
nps.tb.src = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
// FP: "38 -> 39;
__syncthreads();
// FP: "39 -> 40;
int ns = nps.tb.start;
int ne = nps.tb.size;
// FP: "40 -> 41;
if (nps.tb.src == threadIdx.x)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "43 -> 44;
assert(nps.tb.src < __kernel_tb_size);
src = _np_closure[nps.tb.src].src;
// FP: "44 -> 45;
for (int _np_j = threadIdx.x; _np_j < ne; _np_j += BLKSIZE)
{
index_type jj;
jj = ns +_np_j;
{
index_type dst;
uint32_t new_dist;
uint32_t old_dist;
dst = graph.getAbsDestination(jj);
new_dist = 1 + p_dist_current[src];
old_dist = atomicTestMin(&p_dist_current[dst], new_dist);
if (old_dist > new_dist)
{
bitset_dist_current.set(dst);
}
}
}
// FP: "57 -> 58;
__syncthreads();
}
// FP: "59 -> 60;
// FP: "60 -> 61;
{
const int warpid = threadIdx.x / 32;
// FP: "61 -> 62;
const int _np_laneid = cub::LaneId();
// FP: "62 -> 63;
while (__any(_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB))
{
if (_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB)
{
nps.warp.owner[warpid] = _np_laneid;
}
if (nps.warp.owner[warpid] == _np_laneid)
{
nps.warp.start[warpid] = _np.start;
nps.warp.size[warpid] = _np.size;
nps.warp.src[warpid] = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
index_type _np_w_start = nps.warp.start[warpid];
index_type _np_w_size = nps.warp.size[warpid];
assert(nps.warp.src[warpid] < __kernel_tb_size);
src = _np_closure[nps.warp.src[warpid]].src;
for (int _np_ii = _np_laneid; _np_ii < _np_w_size; _np_ii += 32)
{
index_type jj;
jj = _np_w_start +_np_ii;
{
index_type dst;
uint32_t new_dist;
uint32_t old_dist;
dst = graph.getAbsDestination(jj);
new_dist = 1 + p_dist_current[src];
old_dist = atomicTestMin(&p_dist_current[dst], new_dist);
if (old_dist > new_dist)
{
bitset_dist_current.set(dst);
}
}
}
}
// FP: "85 -> 86;
__syncthreads();
// FP: "86 -> 87;
}
// FP: "87 -> 88;
__syncthreads();
// FP: "88 -> 89;
_np.total = _np_mps_total.el[1];
_np.offset = _np_mps.el[1];
// FP: "89 -> 90;
while (_np.work())
{
// FP: "90 -> 91;
int _np_i =0;
// FP: "91 -> 92;
_np.inspect2(nps.fg.itvalue, nps.fg.src, ITSIZE, threadIdx.x);
// FP: "92 -> 93;
__syncthreads();
// FP: "93 -> 94;
// FP: "94 -> 95;
for (_np_i = threadIdx.x; _np_i < ITSIZE && _np.valid(_np_i); _np_i += BLKSIZE)
{
index_type jj;
assert(nps.fg.src[_np_i] < __kernel_tb_size);
src = _np_closure[nps.fg.src[_np_i]].src;
jj= nps.fg.itvalue[_np_i];
{
index_type dst;
uint32_t new_dist;
uint32_t old_dist;
dst = graph.getAbsDestination(jj);
new_dist = 1 + p_dist_current[src];
old_dist = atomicTestMin(&p_dist_current[dst], new_dist);
if (old_dist > new_dist)
{
bitset_dist_current.set(dst);
}
}
}
// FP: "108 -> 109;
_np.execute_round_done(ITSIZE);
// FP: "109 -> 110;
__syncthreads();
}
// FP: "111 -> 112;
assert(threadIdx.x < __kernel_tb_size);
src = _np_closure[threadIdx.x].src;
}
// FP: "113 -> 114;
}
__global__ void BFS(CSRGraph graph, unsigned int __begin, unsigned int __end, const uint32_t local_priority, uint32_t * p_dist_current, uint32_t * p_dist_old, DynamicBitset& bitset_dist_current, HGAccumulator<unsigned int> DGAccumulator_accum, HGAccumulator<unsigned int> work_items)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = __tb_BFS;
__shared__ hipcub::BlockReduce<unsigned int, TB_SIZE>::TempStorage DGAccumulator_accum_ts;
__shared__ hipcub::BlockReduce<unsigned int, TB_SIZE>::TempStorage work_items_ts;
index_type src_end;
index_type src_rup;
// FP: "1 -> 2;
const int _NP_CROSSOVER_WP = 32;
const int _NP_CROSSOVER_TB = __kernel_tb_size;
// FP: "2 -> 3;
const int BLKSIZE = __kernel_tb_size;
const int ITSIZE = BLKSIZE * 8;
// FP: "3 -> 4;
typedef hipcub::BlockScan<multiple_sum<2, index_type>, BLKSIZE> BlockScan;
typedef union np_shared<BlockScan::TempStorage, index_type, struct tb_np, struct warp_np<__kernel_tb_size/32>, struct fg_np<ITSIZE> > npsTy;
// FP: "4 -> 5;
__shared__ npsTy nps ;
// FP: "5 -> 6;
// FP: "6 -> 7;
DGAccumulator_accum.thread_entry();
work_items.thread_entry();
// FP: "7 -> 8;
src_end = __end;
src_rup = ((__begin) + roundup(((__end) - (__begin)), (blockDim.x)));
for (index_type src = __begin + tid; src < src_rup; src += nthreads)
{
multiple_sum<2, index_type> _np_mps;
multiple_sum<2, index_type> _np_mps_total;
// FP: "8 -> 9;
bool pop = src < __end;
// FP: "9 -> 10;
if (pop)
{
if (p_dist_old[src] > p_dist_current[src])
{
DGAccumulator_accum.reduce( 1);
if (local_priority > p_dist_current[src])
{
p_dist_old[src] = p_dist_current[src];
}
else
{
pop = false;
}
}
else
{
pop = false;
}
}
// FP: "15 -> 16;
// FP: "18 -> 19;
struct NPInspector1 _np = {0,0,0,0,0,0};
// FP: "19 -> 20;
__shared__ struct { index_type src; } _np_closure [TB_SIZE];
// FP: "20 -> 21;
_np_closure[threadIdx.x].src = src;
// FP: "21 -> 22;
if (pop)
{
_np.size = (graph).getOutDegree(src);
_np.start = (graph).getFirstEdge(src);
}
// FP: "24 -> 25;
// FP: "25 -> 26;
_np_mps.el[0] = _np.size >= _NP_CROSSOVER_WP ? _np.size : 0;
_np_mps.el[1] = _np.size < _NP_CROSSOVER_WP ? _np.size : 0;
// FP: "26 -> 27;
BlockScan(nps.temp_storage).ExclusiveSum(_np_mps, _np_mps, _np_mps_total);
// FP: "27 -> 28;
if (threadIdx.x == 0)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "30 -> 31;
__syncthreads();
// FP: "31 -> 32;
while (true)
{
// FP: "32 -> 33;
if (_np.size >= _NP_CROSSOVER_TB)
{
nps.tb.owner = threadIdx.x;
}
// FP: "35 -> 36;
__syncthreads();
// FP: "36 -> 37;
if (nps.tb.owner == MAX_TB_SIZE + 1)
{
// FP: "37 -> 38;
__syncthreads();
// FP: "38 -> 39;
break;
}
// FP: "40 -> 41;
if (nps.tb.owner == threadIdx.x)
{
nps.tb.start = _np.start;
nps.tb.size = _np.size;
nps.tb.src = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
// FP: "43 -> 44;
__syncthreads();
// FP: "44 -> 45;
int ns = nps.tb.start;
int ne = nps.tb.size;
// FP: "45 -> 46;
if (nps.tb.src == threadIdx.x)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "48 -> 49;
assert(nps.tb.src < __kernel_tb_size);
src = _np_closure[nps.tb.src].src;
// FP: "49 -> 50;
for (int _np_j = threadIdx.x; _np_j < ne; _np_j += BLKSIZE)
{
index_type jj;
jj = ns +_np_j;
{
work_items.reduce( 1);
index_type dst;
uint32_t new_dist;
uint32_t old_dist;
dst = graph.getAbsDestination(jj);
new_dist = 1 + p_dist_current[src];
old_dist = atomicTestMin(&p_dist_current[dst], new_dist);
if (old_dist > new_dist)
{
bitset_dist_current.set(dst);
}
}
}
// FP: "62 -> 63;
__syncthreads();
}
// FP: "64 -> 65;
// FP: "65 -> 66;
{
const int warpid = threadIdx.x / 32;
// FP: "66 -> 67;
const int _np_laneid = cub::LaneId();
// FP: "67 -> 68;
while (__any(_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB))
{
if (_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB)
{
nps.warp.owner[warpid] = _np_laneid;
}
if (nps.warp.owner[warpid] == _np_laneid)
{
nps.warp.start[warpid] = _np.start;
nps.warp.size[warpid] = _np.size;
nps.warp.src[warpid] = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
index_type _np_w_start = nps.warp.start[warpid];
index_type _np_w_size = nps.warp.size[warpid];
assert(nps.warp.src[warpid] < __kernel_tb_size);
src = _np_closure[nps.warp.src[warpid]].src;
for (int _np_ii = _np_laneid; _np_ii < _np_w_size; _np_ii += 32)
{
index_type jj;
jj = _np_w_start +_np_ii;
{
work_items.reduce( 1);
index_type dst;
uint32_t new_dist;
uint32_t old_dist;
dst = graph.getAbsDestination(jj);
new_dist = 1 + p_dist_current[src];
old_dist = atomicTestMin(&p_dist_current[dst], new_dist);
if (old_dist > new_dist)
{
bitset_dist_current.set(dst);
}
}
}
}
// FP: "90 -> 91;
__syncthreads();
// FP: "91 -> 92;
}
// FP: "92 -> 93;
__syncthreads();
// FP: "93 -> 94;
_np.total = _np_mps_total.el[1];
_np.offset = _np_mps.el[1];
// FP: "94 -> 95;
while (_np.work())
{
// FP: "95 -> 96;
int _np_i =0;
// FP: "96 -> 97;
_np.inspect2(nps.fg.itvalue, nps.fg.src, ITSIZE, threadIdx.x);
// FP: "97 -> 98;
__syncthreads();
// FP: "98 -> 99;
// FP: "99 -> 100;
for (_np_i = threadIdx.x; _np_i < ITSIZE && _np.valid(_np_i); _np_i += BLKSIZE)
{
index_type jj;
assert(nps.fg.src[_np_i] < __kernel_tb_size);
src = _np_closure[nps.fg.src[_np_i]].src;
jj= nps.fg.itvalue[_np_i];
{
work_items.reduce( 1);
index_type dst;
uint32_t new_dist;
uint32_t old_dist;
dst = graph.getAbsDestination(jj);
new_dist = 1 + p_dist_current[src];
old_dist = atomicTestMin(&p_dist_current[dst], new_dist);
if (old_dist > new_dist)
{
bitset_dist_current.set(dst);
}
}
}
// FP: "113 -> 114;
_np.execute_round_done(ITSIZE);
// FP: "114 -> 115;
__syncthreads();
}
// FP: "116 -> 117;
assert(threadIdx.x < __kernel_tb_size);
src = _np_closure[threadIdx.x].src;
}
// FP: "119 -> 120;
DGAccumulator_accum.thread_exit<hipcub::BlockReduce<unsigned int, TB_SIZE> >(DGAccumulator_accum_ts);
work_items.thread_exit<hipcub::BlockReduce<unsigned int, TB_SIZE> >(work_items_ts);
// FP: "120 -> 121;
}
__global__ void BFSSanityCheck(CSRGraph graph, unsigned int __begin, unsigned int __end, const uint32_t local_infinity, uint32_t * p_dist_current, HGAccumulator<uint64_t> DGAccumulator_sum, HGReduceMax<uint32_t> DGMax)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = TB_SIZE;
__shared__ hipcub::BlockReduce<uint64_t, TB_SIZE>::TempStorage DGAccumulator_sum_ts;
__shared__ hipcub::BlockReduce<uint32_t, TB_SIZE>::TempStorage DGMax_ts;
index_type src_end;
// FP: "1 -> 2;
// FP: "2 -> 3;
DGAccumulator_sum.thread_entry();
// FP: "3 -> 4;
// FP: "4 -> 5;
DGMax.thread_entry();
// FP: "5 -> 6;
src_end = __end;
for (index_type src = __begin + tid; src < src_end; src += nthreads)
{
bool pop = src < __end;
if (pop)
{
if (p_dist_current[src] < local_infinity)
{
DGAccumulator_sum.reduce( 1);
DGMax.reduce(p_dist_current[src]);
}
}
}
// FP: "14 -> 15;
DGAccumulator_sum.thread_exit<hipcub::BlockReduce<uint64_t, TB_SIZE> >(DGAccumulator_sum_ts);
// FP: "15 -> 16;
DGMax.thread_exit<hipcub::BlockReduce<uint32_t, TB_SIZE> >(DGMax_ts);
// FP: "16 -> 17;
}
void InitializeGraph_cuda(unsigned int __begin, unsigned int __end, const uint32_t & local_infinity, unsigned long long local_src_node, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
hipLaunchKernelGGL(( InitializeGraph) , dim3(blocks), dim3(threads), 0, 0, ctx->gg, __begin, __end, local_infinity, local_src_node, ctx->dist_current.data.gpu_wr_ptr(), ctx->dist_old.data.gpu_wr_ptr());
// FP: "5 -> 6;
check_cuda_kernel;
// FP: "6 -> 7;
}
void InitializeGraph_allNodes_cuda(const uint32_t & local_infinity, unsigned long long local_src_node, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
InitializeGraph_cuda(0, ctx->gg.nnodes, local_infinity, local_src_node, ctx);
// FP: "2 -> 3;
}
void InitializeGraph_masterNodes_cuda(const uint32_t & local_infinity, unsigned long long local_src_node, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
InitializeGraph_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, local_infinity, local_src_node, ctx);
// FP: "2 -> 3;
}
void InitializeGraph_nodesWithEdges_cuda(const uint32_t & local_infinity, unsigned long long local_src_node, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
InitializeGraph_cuda(0, ctx->numNodesWithEdges, local_infinity, local_src_node, ctx);
// FP: "2 -> 3;
}
void FirstItr_BFS_cuda(unsigned int __begin, unsigned int __end, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
hipLaunchKernelGGL(( FirstItr_BFS) , dim3(blocks), dim3(__tb_FirstItr_BFS), 0, 0, ctx->gg, __begin, __end, ctx->dist_current.data.gpu_wr_ptr(), ctx->dist_old.data.gpu_wr_ptr(), *(ctx->dist_current.is_updated.gpu_rd_ptr()));
// FP: "5 -> 6;
check_cuda_kernel;
// FP: "6 -> 7;
}
void FirstItr_BFS_allNodes_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
FirstItr_BFS_cuda(0, ctx->gg.nnodes, ctx);
// FP: "2 -> 3;
}
void FirstItr_BFS_masterNodes_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
FirstItr_BFS_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, ctx);
// FP: "2 -> 3;
}
void FirstItr_BFS_nodesWithEdges_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
FirstItr_BFS_cuda(0, ctx->numNodesWithEdges, ctx);
// FP: "2 -> 3;
}
void BFS_cuda(unsigned int __begin, unsigned int __end, unsigned int & DGAccumulator_accum, unsigned int & work_items, const uint32_t local_priority, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
HGAccumulator<unsigned int> _DGAccumulator_accum;
HGAccumulator<unsigned int> _work_items;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
Shared<unsigned int> DGAccumulator_accumval = Shared<unsigned int>(1);
// FP: "5 -> 6;
// FP: "6 -> 7;
*(DGAccumulator_accumval.cpu_wr_ptr()) = 0;
// FP: "7 -> 8;
_DGAccumulator_accum.rv = DGAccumulator_accumval.gpu_wr_ptr();
// FP: "8 -> 9;
Shared<unsigned int> work_itemsval = Shared<unsigned int>(1);
*(work_itemsval.cpu_wr_ptr()) = 0;
_work_items.rv = work_itemsval.gpu_wr_ptr();
hipLaunchKernelGGL(( BFS) , dim3(blocks), dim3(__tb_BFS), 0, 0, ctx->gg, __begin, __end, local_priority, ctx->dist_current.data.gpu_wr_ptr(), ctx->dist_old.data.gpu_wr_ptr(), *(ctx->dist_current.is_updated.gpu_rd_ptr()), _DGAccumulator_accum, _work_items);
// FP: "9 -> 10;
check_cuda_kernel;
// FP: "10 -> 11;
DGAccumulator_accum = *(DGAccumulator_accumval.cpu_rd_ptr());
// FP: "11 -> 12;
work_items = *(work_itemsval.cpu_rd_ptr());
}
void BFS_allNodes_cuda(unsigned int & DGAccumulator_accum, unsigned int & work_items, const uint32_t local_priority, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
BFS_cuda(0, ctx->gg.nnodes, DGAccumulator_accum, work_items, local_priority, ctx);
// FP: "2 -> 3;
}
void BFS_masterNodes_cuda(unsigned int & DGAccumulator_accum, unsigned int & work_items, const uint32_t local_priority, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
BFS_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, DGAccumulator_accum, work_items, local_priority, ctx);
// FP: "2 -> 3;
}
void BFS_nodesWithEdges_cuda(unsigned int & DGAccumulator_accum, unsigned int & work_items, const uint32_t local_priority, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
BFS_cuda(0, ctx->numNodesWithEdges, DGAccumulator_accum, work_items, local_priority, ctx);
// FP: "2 -> 3;
}
void BFSSanityCheck_cuda(unsigned int __begin, unsigned int __end, uint64_t & DGAccumulator_sum, uint32_t & DGMax, const uint32_t & local_infinity, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
HGAccumulator<uint64_t> _DGAccumulator_sum;
HGReduceMax<uint32_t> _DGMax;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
Shared<uint64_t> DGAccumulator_sumval = Shared<uint64_t>(1);
// FP: "5 -> 6;
// FP: "6 -> 7;
*(DGAccumulator_sumval.cpu_wr_ptr()) = 0;
// FP: "7 -> 8;
_DGAccumulator_sum.rv = DGAccumulator_sumval.gpu_wr_ptr();
// FP: "8 -> 9;
Shared<uint32_t> DGMaxval = Shared<uint32_t>(1);
// FP: "9 -> 10;
// FP: "10 -> 11;
*(DGMaxval.cpu_wr_ptr()) = 0;
// FP: "11 -> 12;
_DGMax.rv = DGMaxval.gpu_wr_ptr();
// FP: "12 -> 13;
hipLaunchKernelGGL(( BFSSanityCheck) , dim3(blocks), dim3(threads), 0, 0, ctx->gg, __begin, __end, local_infinity, ctx->dist_current.data.gpu_wr_ptr(), _DGAccumulator_sum, _DGMax);
// FP: "13 -> 14;
check_cuda_kernel;
// FP: "14 -> 15;
DGAccumulator_sum = *(DGAccumulator_sumval.cpu_rd_ptr());
// FP: "15 -> 16;
DGMax = *(DGMaxval.cpu_rd_ptr());
// FP: "16 -> 17;
}
void BFSSanityCheck_allNodes_cuda(uint64_t & DGAccumulator_sum, uint32_t & DGMax, const uint32_t & local_infinity, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
BFSSanityCheck_cuda(0, ctx->gg.nnodes, DGAccumulator_sum, DGMax, local_infinity, ctx);
// FP: "2 -> 3;
}
void BFSSanityCheck_masterNodes_cuda(uint64_t & DGAccumulator_sum, uint32_t & DGMax, const uint32_t & local_infinity, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
BFSSanityCheck_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, DGAccumulator_sum, DGMax, local_infinity, ctx);
// FP: "2 -> 3;
}
void BFSSanityCheck_nodesWithEdges_cuda(uint64_t & DGAccumulator_sum, uint32_t & DGMax, const uint32_t & local_infinity, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
BFSSanityCheck_cuda(0, ctx->numNodesWithEdges, DGAccumulator_sum, DGMax, local_infinity, ctx);
// FP: "2 -> 3;
}
| b78723a012b33e36526d16f10a6be76954a4e18a.cu | /*
* This file belongs to the Galois project, a C++ library for exploiting parallelism.
* The code is being released under the terms of the 3-Clause BSD License (a
* copy is located in LICENSE.txt at the top-level directory).
*
* Copyright (C) 2018, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*/
/* -*- mode: c++ -*- */
#include "gg.h"
#include "ggcuda.h"
void kernel_sizing(CSRGraph &, dim3 &, dim3 &);
#define TB_SIZE 256
const char *GGC_OPTIONS = "coop_conv=False $ outline_iterate_gb=False $ backoff_blocking_factor=4 $ parcomb=True $ np_schedulers=set(['fg', 'tb', 'wp']) $ cc_disable=set([]) $ hacks=set([]) $ np_factor=8 $ instrument=set([]) $ unroll=[] $ instrument_mode=None $ read_props=None $ outline_iterate=True $ ignore_nested_errors=False $ np=True $ write_props=None $ quiet_cgen=True $ retry_backoff=True $ cuda.graph_type=basic $ cuda.use_worklist_slots=True $ cuda.worklist_type=basic";
#include "kernels/reduce.cuh"
#include "bfs_push_cuda.cuh"
static const int __tb_FirstItr_BFS = TB_SIZE;
static const int __tb_BFS = TB_SIZE;
__global__ void InitializeGraph(CSRGraph graph, unsigned int __begin, unsigned int __end, const uint32_t local_infinity, unsigned long long local_src_node, uint32_t * p_dist_current, uint32_t * p_dist_old)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = TB_SIZE;
index_type src_end;
// FP: "1 -> 2;
src_end = __end;
for (index_type src = __begin + tid; src < src_end; src += nthreads)
{
bool pop = src < __end;
if (pop)
{
p_dist_current[src] = (graph.node_data[src] == local_src_node) ? 0 : local_infinity;
p_dist_old[src] = (graph.node_data[src] == local_src_node) ? 0 : local_infinity;
}
}
// FP: "8 -> 9;
}
__global__ void FirstItr_BFS(CSRGraph graph, unsigned int __begin, unsigned int __end, uint32_t * p_dist_current, uint32_t * p_dist_old, DynamicBitset& bitset_dist_current)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = __tb_FirstItr_BFS;
index_type src_end;
index_type src_rup;
// FP: "1 -> 2;
const int _NP_CROSSOVER_WP = 32;
const int _NP_CROSSOVER_TB = __kernel_tb_size;
// FP: "2 -> 3;
const int BLKSIZE = __kernel_tb_size;
const int ITSIZE = BLKSIZE * 8;
// FP: "3 -> 4;
typedef cub::BlockScan<multiple_sum<2, index_type>, BLKSIZE> BlockScan;
typedef union np_shared<BlockScan::TempStorage, index_type, struct tb_np, struct warp_np<__kernel_tb_size/32>, struct fg_np<ITSIZE> > npsTy;
// FP: "4 -> 5;
__shared__ npsTy nps ;
// FP: "5 -> 6;
src_end = __end;
src_rup = ((__begin) + roundup(((__end) - (__begin)), (blockDim.x)));
for (index_type src = __begin + tid; src < src_rup; src += nthreads)
{
multiple_sum<2, index_type> _np_mps;
multiple_sum<2, index_type> _np_mps_total;
// FP: "6 -> 7;
bool pop = src < __end;
// FP: "7 -> 8;
if (pop)
{
p_dist_old[src] = p_dist_current[src];
}
// FP: "10 -> 11;
// FP: "13 -> 14;
struct NPInspector1 _np = {0,0,0,0,0,0};
// FP: "14 -> 15;
__shared__ struct { index_type src; } _np_closure [TB_SIZE];
// FP: "15 -> 16;
_np_closure[threadIdx.x].src = src;
// FP: "16 -> 17;
if (pop)
{
_np.size = (graph).getOutDegree(src);
_np.start = (graph).getFirstEdge(src);
}
// FP: "19 -> 20;
// FP: "20 -> 21;
_np_mps.el[0] = _np.size >= _NP_CROSSOVER_WP ? _np.size : 0;
_np_mps.el[1] = _np.size < _NP_CROSSOVER_WP ? _np.size : 0;
// FP: "21 -> 22;
BlockScan(nps.temp_storage).ExclusiveSum(_np_mps, _np_mps, _np_mps_total);
// FP: "22 -> 23;
if (threadIdx.x == 0)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "25 -> 26;
__syncthreads();
// FP: "26 -> 27;
while (true)
{
// FP: "27 -> 28;
if (_np.size >= _NP_CROSSOVER_TB)
{
nps.tb.owner = threadIdx.x;
}
// FP: "30 -> 31;
__syncthreads();
// FP: "31 -> 32;
if (nps.tb.owner == MAX_TB_SIZE + 1)
{
// FP: "32 -> 33;
__syncthreads();
// FP: "33 -> 34;
break;
}
// FP: "35 -> 36;
if (nps.tb.owner == threadIdx.x)
{
nps.tb.start = _np.start;
nps.tb.size = _np.size;
nps.tb.src = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
// FP: "38 -> 39;
__syncthreads();
// FP: "39 -> 40;
int ns = nps.tb.start;
int ne = nps.tb.size;
// FP: "40 -> 41;
if (nps.tb.src == threadIdx.x)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "43 -> 44;
assert(nps.tb.src < __kernel_tb_size);
src = _np_closure[nps.tb.src].src;
// FP: "44 -> 45;
for (int _np_j = threadIdx.x; _np_j < ne; _np_j += BLKSIZE)
{
index_type jj;
jj = ns +_np_j;
{
index_type dst;
uint32_t new_dist;
uint32_t old_dist;
dst = graph.getAbsDestination(jj);
new_dist = 1 + p_dist_current[src];
old_dist = atomicTestMin(&p_dist_current[dst], new_dist);
if (old_dist > new_dist)
{
bitset_dist_current.set(dst);
}
}
}
// FP: "57 -> 58;
__syncthreads();
}
// FP: "59 -> 60;
// FP: "60 -> 61;
{
const int warpid = threadIdx.x / 32;
// FP: "61 -> 62;
const int _np_laneid = cub::LaneId();
// FP: "62 -> 63;
while (__any(_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB))
{
if (_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB)
{
nps.warp.owner[warpid] = _np_laneid;
}
if (nps.warp.owner[warpid] == _np_laneid)
{
nps.warp.start[warpid] = _np.start;
nps.warp.size[warpid] = _np.size;
nps.warp.src[warpid] = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
index_type _np_w_start = nps.warp.start[warpid];
index_type _np_w_size = nps.warp.size[warpid];
assert(nps.warp.src[warpid] < __kernel_tb_size);
src = _np_closure[nps.warp.src[warpid]].src;
for (int _np_ii = _np_laneid; _np_ii < _np_w_size; _np_ii += 32)
{
index_type jj;
jj = _np_w_start +_np_ii;
{
index_type dst;
uint32_t new_dist;
uint32_t old_dist;
dst = graph.getAbsDestination(jj);
new_dist = 1 + p_dist_current[src];
old_dist = atomicTestMin(&p_dist_current[dst], new_dist);
if (old_dist > new_dist)
{
bitset_dist_current.set(dst);
}
}
}
}
// FP: "85 -> 86;
__syncthreads();
// FP: "86 -> 87;
}
// FP: "87 -> 88;
__syncthreads();
// FP: "88 -> 89;
_np.total = _np_mps_total.el[1];
_np.offset = _np_mps.el[1];
// FP: "89 -> 90;
while (_np.work())
{
// FP: "90 -> 91;
int _np_i =0;
// FP: "91 -> 92;
_np.inspect2(nps.fg.itvalue, nps.fg.src, ITSIZE, threadIdx.x);
// FP: "92 -> 93;
__syncthreads();
// FP: "93 -> 94;
// FP: "94 -> 95;
for (_np_i = threadIdx.x; _np_i < ITSIZE && _np.valid(_np_i); _np_i += BLKSIZE)
{
index_type jj;
assert(nps.fg.src[_np_i] < __kernel_tb_size);
src = _np_closure[nps.fg.src[_np_i]].src;
jj= nps.fg.itvalue[_np_i];
{
index_type dst;
uint32_t new_dist;
uint32_t old_dist;
dst = graph.getAbsDestination(jj);
new_dist = 1 + p_dist_current[src];
old_dist = atomicTestMin(&p_dist_current[dst], new_dist);
if (old_dist > new_dist)
{
bitset_dist_current.set(dst);
}
}
}
// FP: "108 -> 109;
_np.execute_round_done(ITSIZE);
// FP: "109 -> 110;
__syncthreads();
}
// FP: "111 -> 112;
assert(threadIdx.x < __kernel_tb_size);
src = _np_closure[threadIdx.x].src;
}
// FP: "113 -> 114;
}
__global__ void BFS(CSRGraph graph, unsigned int __begin, unsigned int __end, const uint32_t local_priority, uint32_t * p_dist_current, uint32_t * p_dist_old, DynamicBitset& bitset_dist_current, HGAccumulator<unsigned int> DGAccumulator_accum, HGAccumulator<unsigned int> work_items)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = __tb_BFS;
__shared__ cub::BlockReduce<unsigned int, TB_SIZE>::TempStorage DGAccumulator_accum_ts;
__shared__ cub::BlockReduce<unsigned int, TB_SIZE>::TempStorage work_items_ts;
index_type src_end;
index_type src_rup;
// FP: "1 -> 2;
const int _NP_CROSSOVER_WP = 32;
const int _NP_CROSSOVER_TB = __kernel_tb_size;
// FP: "2 -> 3;
const int BLKSIZE = __kernel_tb_size;
const int ITSIZE = BLKSIZE * 8;
// FP: "3 -> 4;
typedef cub::BlockScan<multiple_sum<2, index_type>, BLKSIZE> BlockScan;
typedef union np_shared<BlockScan::TempStorage, index_type, struct tb_np, struct warp_np<__kernel_tb_size/32>, struct fg_np<ITSIZE> > npsTy;
// FP: "4 -> 5;
__shared__ npsTy nps ;
// FP: "5 -> 6;
// FP: "6 -> 7;
DGAccumulator_accum.thread_entry();
work_items.thread_entry();
// FP: "7 -> 8;
src_end = __end;
src_rup = ((__begin) + roundup(((__end) - (__begin)), (blockDim.x)));
for (index_type src = __begin + tid; src < src_rup; src += nthreads)
{
multiple_sum<2, index_type> _np_mps;
multiple_sum<2, index_type> _np_mps_total;
// FP: "8 -> 9;
bool pop = src < __end;
// FP: "9 -> 10;
if (pop)
{
if (p_dist_old[src] > p_dist_current[src])
{
DGAccumulator_accum.reduce( 1);
if (local_priority > p_dist_current[src])
{
p_dist_old[src] = p_dist_current[src];
}
else
{
pop = false;
}
}
else
{
pop = false;
}
}
// FP: "15 -> 16;
// FP: "18 -> 19;
struct NPInspector1 _np = {0,0,0,0,0,0};
// FP: "19 -> 20;
__shared__ struct { index_type src; } _np_closure [TB_SIZE];
// FP: "20 -> 21;
_np_closure[threadIdx.x].src = src;
// FP: "21 -> 22;
if (pop)
{
_np.size = (graph).getOutDegree(src);
_np.start = (graph).getFirstEdge(src);
}
// FP: "24 -> 25;
// FP: "25 -> 26;
_np_mps.el[0] = _np.size >= _NP_CROSSOVER_WP ? _np.size : 0;
_np_mps.el[1] = _np.size < _NP_CROSSOVER_WP ? _np.size : 0;
// FP: "26 -> 27;
BlockScan(nps.temp_storage).ExclusiveSum(_np_mps, _np_mps, _np_mps_total);
// FP: "27 -> 28;
if (threadIdx.x == 0)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "30 -> 31;
__syncthreads();
// FP: "31 -> 32;
while (true)
{
// FP: "32 -> 33;
if (_np.size >= _NP_CROSSOVER_TB)
{
nps.tb.owner = threadIdx.x;
}
// FP: "35 -> 36;
__syncthreads();
// FP: "36 -> 37;
if (nps.tb.owner == MAX_TB_SIZE + 1)
{
// FP: "37 -> 38;
__syncthreads();
// FP: "38 -> 39;
break;
}
// FP: "40 -> 41;
if (nps.tb.owner == threadIdx.x)
{
nps.tb.start = _np.start;
nps.tb.size = _np.size;
nps.tb.src = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
// FP: "43 -> 44;
__syncthreads();
// FP: "44 -> 45;
int ns = nps.tb.start;
int ne = nps.tb.size;
// FP: "45 -> 46;
if (nps.tb.src == threadIdx.x)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "48 -> 49;
assert(nps.tb.src < __kernel_tb_size);
src = _np_closure[nps.tb.src].src;
// FP: "49 -> 50;
for (int _np_j = threadIdx.x; _np_j < ne; _np_j += BLKSIZE)
{
index_type jj;
jj = ns +_np_j;
{
work_items.reduce( 1);
index_type dst;
uint32_t new_dist;
uint32_t old_dist;
dst = graph.getAbsDestination(jj);
new_dist = 1 + p_dist_current[src];
old_dist = atomicTestMin(&p_dist_current[dst], new_dist);
if (old_dist > new_dist)
{
bitset_dist_current.set(dst);
}
}
}
// FP: "62 -> 63;
__syncthreads();
}
// FP: "64 -> 65;
// FP: "65 -> 66;
{
const int warpid = threadIdx.x / 32;
// FP: "66 -> 67;
const int _np_laneid = cub::LaneId();
// FP: "67 -> 68;
while (__any(_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB))
{
if (_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB)
{
nps.warp.owner[warpid] = _np_laneid;
}
if (nps.warp.owner[warpid] == _np_laneid)
{
nps.warp.start[warpid] = _np.start;
nps.warp.size[warpid] = _np.size;
nps.warp.src[warpid] = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
index_type _np_w_start = nps.warp.start[warpid];
index_type _np_w_size = nps.warp.size[warpid];
assert(nps.warp.src[warpid] < __kernel_tb_size);
src = _np_closure[nps.warp.src[warpid]].src;
for (int _np_ii = _np_laneid; _np_ii < _np_w_size; _np_ii += 32)
{
index_type jj;
jj = _np_w_start +_np_ii;
{
work_items.reduce( 1);
index_type dst;
uint32_t new_dist;
uint32_t old_dist;
dst = graph.getAbsDestination(jj);
new_dist = 1 + p_dist_current[src];
old_dist = atomicTestMin(&p_dist_current[dst], new_dist);
if (old_dist > new_dist)
{
bitset_dist_current.set(dst);
}
}
}
}
// FP: "90 -> 91;
__syncthreads();
// FP: "91 -> 92;
}
// FP: "92 -> 93;
__syncthreads();
// FP: "93 -> 94;
_np.total = _np_mps_total.el[1];
_np.offset = _np_mps.el[1];
// FP: "94 -> 95;
while (_np.work())
{
// FP: "95 -> 96;
int _np_i =0;
// FP: "96 -> 97;
_np.inspect2(nps.fg.itvalue, nps.fg.src, ITSIZE, threadIdx.x);
// FP: "97 -> 98;
__syncthreads();
// FP: "98 -> 99;
// FP: "99 -> 100;
for (_np_i = threadIdx.x; _np_i < ITSIZE && _np.valid(_np_i); _np_i += BLKSIZE)
{
index_type jj;
assert(nps.fg.src[_np_i] < __kernel_tb_size);
src = _np_closure[nps.fg.src[_np_i]].src;
jj= nps.fg.itvalue[_np_i];
{
work_items.reduce( 1);
index_type dst;
uint32_t new_dist;
uint32_t old_dist;
dst = graph.getAbsDestination(jj);
new_dist = 1 + p_dist_current[src];
old_dist = atomicTestMin(&p_dist_current[dst], new_dist);
if (old_dist > new_dist)
{
bitset_dist_current.set(dst);
}
}
}
// FP: "113 -> 114;
_np.execute_round_done(ITSIZE);
// FP: "114 -> 115;
__syncthreads();
}
// FP: "116 -> 117;
assert(threadIdx.x < __kernel_tb_size);
src = _np_closure[threadIdx.x].src;
}
// FP: "119 -> 120;
DGAccumulator_accum.thread_exit<cub::BlockReduce<unsigned int, TB_SIZE> >(DGAccumulator_accum_ts);
work_items.thread_exit<cub::BlockReduce<unsigned int, TB_SIZE> >(work_items_ts);
// FP: "120 -> 121;
}
__global__ void BFSSanityCheck(CSRGraph graph, unsigned int __begin, unsigned int __end, const uint32_t local_infinity, uint32_t * p_dist_current, HGAccumulator<uint64_t> DGAccumulator_sum, HGReduceMax<uint32_t> DGMax)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = TB_SIZE;
__shared__ cub::BlockReduce<uint64_t, TB_SIZE>::TempStorage DGAccumulator_sum_ts;
__shared__ cub::BlockReduce<uint32_t, TB_SIZE>::TempStorage DGMax_ts;
index_type src_end;
// FP: "1 -> 2;
// FP: "2 -> 3;
DGAccumulator_sum.thread_entry();
// FP: "3 -> 4;
// FP: "4 -> 5;
DGMax.thread_entry();
// FP: "5 -> 6;
src_end = __end;
for (index_type src = __begin + tid; src < src_end; src += nthreads)
{
bool pop = src < __end;
if (pop)
{
if (p_dist_current[src] < local_infinity)
{
DGAccumulator_sum.reduce( 1);
DGMax.reduce(p_dist_current[src]);
}
}
}
// FP: "14 -> 15;
DGAccumulator_sum.thread_exit<cub::BlockReduce<uint64_t, TB_SIZE> >(DGAccumulator_sum_ts);
// FP: "15 -> 16;
DGMax.thread_exit<cub::BlockReduce<uint32_t, TB_SIZE> >(DGMax_ts);
// FP: "16 -> 17;
}
void InitializeGraph_cuda(unsigned int __begin, unsigned int __end, const uint32_t & local_infinity, unsigned long long local_src_node, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
InitializeGraph <<<blocks, threads>>>(ctx->gg, __begin, __end, local_infinity, local_src_node, ctx->dist_current.data.gpu_wr_ptr(), ctx->dist_old.data.gpu_wr_ptr());
// FP: "5 -> 6;
check_cuda_kernel;
// FP: "6 -> 7;
}
void InitializeGraph_allNodes_cuda(const uint32_t & local_infinity, unsigned long long local_src_node, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
InitializeGraph_cuda(0, ctx->gg.nnodes, local_infinity, local_src_node, ctx);
// FP: "2 -> 3;
}
void InitializeGraph_masterNodes_cuda(const uint32_t & local_infinity, unsigned long long local_src_node, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
InitializeGraph_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, local_infinity, local_src_node, ctx);
// FP: "2 -> 3;
}
void InitializeGraph_nodesWithEdges_cuda(const uint32_t & local_infinity, unsigned long long local_src_node, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
InitializeGraph_cuda(0, ctx->numNodesWithEdges, local_infinity, local_src_node, ctx);
// FP: "2 -> 3;
}
void FirstItr_BFS_cuda(unsigned int __begin, unsigned int __end, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
FirstItr_BFS <<<blocks, __tb_FirstItr_BFS>>>(ctx->gg, __begin, __end, ctx->dist_current.data.gpu_wr_ptr(), ctx->dist_old.data.gpu_wr_ptr(), *(ctx->dist_current.is_updated.gpu_rd_ptr()));
// FP: "5 -> 6;
check_cuda_kernel;
// FP: "6 -> 7;
}
void FirstItr_BFS_allNodes_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
FirstItr_BFS_cuda(0, ctx->gg.nnodes, ctx);
// FP: "2 -> 3;
}
void FirstItr_BFS_masterNodes_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
FirstItr_BFS_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, ctx);
// FP: "2 -> 3;
}
void FirstItr_BFS_nodesWithEdges_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
FirstItr_BFS_cuda(0, ctx->numNodesWithEdges, ctx);
// FP: "2 -> 3;
}
void BFS_cuda(unsigned int __begin, unsigned int __end, unsigned int & DGAccumulator_accum, unsigned int & work_items, const uint32_t local_priority, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
HGAccumulator<unsigned int> _DGAccumulator_accum;
HGAccumulator<unsigned int> _work_items;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
Shared<unsigned int> DGAccumulator_accumval = Shared<unsigned int>(1);
// FP: "5 -> 6;
// FP: "6 -> 7;
*(DGAccumulator_accumval.cpu_wr_ptr()) = 0;
// FP: "7 -> 8;
_DGAccumulator_accum.rv = DGAccumulator_accumval.gpu_wr_ptr();
// FP: "8 -> 9;
Shared<unsigned int> work_itemsval = Shared<unsigned int>(1);
*(work_itemsval.cpu_wr_ptr()) = 0;
_work_items.rv = work_itemsval.gpu_wr_ptr();
BFS <<<blocks, __tb_BFS>>>(ctx->gg, __begin, __end, local_priority, ctx->dist_current.data.gpu_wr_ptr(), ctx->dist_old.data.gpu_wr_ptr(), *(ctx->dist_current.is_updated.gpu_rd_ptr()), _DGAccumulator_accum, _work_items);
// FP: "9 -> 10;
check_cuda_kernel;
// FP: "10 -> 11;
DGAccumulator_accum = *(DGAccumulator_accumval.cpu_rd_ptr());
// FP: "11 -> 12;
work_items = *(work_itemsval.cpu_rd_ptr());
}
void BFS_allNodes_cuda(unsigned int & DGAccumulator_accum, unsigned int & work_items, const uint32_t local_priority, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
BFS_cuda(0, ctx->gg.nnodes, DGAccumulator_accum, work_items, local_priority, ctx);
// FP: "2 -> 3;
}
void BFS_masterNodes_cuda(unsigned int & DGAccumulator_accum, unsigned int & work_items, const uint32_t local_priority, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
BFS_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, DGAccumulator_accum, work_items, local_priority, ctx);
// FP: "2 -> 3;
}
void BFS_nodesWithEdges_cuda(unsigned int & DGAccumulator_accum, unsigned int & work_items, const uint32_t local_priority, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
BFS_cuda(0, ctx->numNodesWithEdges, DGAccumulator_accum, work_items, local_priority, ctx);
// FP: "2 -> 3;
}
void BFSSanityCheck_cuda(unsigned int __begin, unsigned int __end, uint64_t & DGAccumulator_sum, uint32_t & DGMax, const uint32_t & local_infinity, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
HGAccumulator<uint64_t> _DGAccumulator_sum;
HGReduceMax<uint32_t> _DGMax;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
Shared<uint64_t> DGAccumulator_sumval = Shared<uint64_t>(1);
// FP: "5 -> 6;
// FP: "6 -> 7;
*(DGAccumulator_sumval.cpu_wr_ptr()) = 0;
// FP: "7 -> 8;
_DGAccumulator_sum.rv = DGAccumulator_sumval.gpu_wr_ptr();
// FP: "8 -> 9;
Shared<uint32_t> DGMaxval = Shared<uint32_t>(1);
// FP: "9 -> 10;
// FP: "10 -> 11;
*(DGMaxval.cpu_wr_ptr()) = 0;
// FP: "11 -> 12;
_DGMax.rv = DGMaxval.gpu_wr_ptr();
// FP: "12 -> 13;
BFSSanityCheck <<<blocks, threads>>>(ctx->gg, __begin, __end, local_infinity, ctx->dist_current.data.gpu_wr_ptr(), _DGAccumulator_sum, _DGMax);
// FP: "13 -> 14;
check_cuda_kernel;
// FP: "14 -> 15;
DGAccumulator_sum = *(DGAccumulator_sumval.cpu_rd_ptr());
// FP: "15 -> 16;
DGMax = *(DGMaxval.cpu_rd_ptr());
// FP: "16 -> 17;
}
void BFSSanityCheck_allNodes_cuda(uint64_t & DGAccumulator_sum, uint32_t & DGMax, const uint32_t & local_infinity, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
BFSSanityCheck_cuda(0, ctx->gg.nnodes, DGAccumulator_sum, DGMax, local_infinity, ctx);
// FP: "2 -> 3;
}
void BFSSanityCheck_masterNodes_cuda(uint64_t & DGAccumulator_sum, uint32_t & DGMax, const uint32_t & local_infinity, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
BFSSanityCheck_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, DGAccumulator_sum, DGMax, local_infinity, ctx);
// FP: "2 -> 3;
}
void BFSSanityCheck_nodesWithEdges_cuda(uint64_t & DGAccumulator_sum, uint32_t & DGMax, const uint32_t & local_infinity, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
BFSSanityCheck_cuda(0, ctx->numNodesWithEdges, DGAccumulator_sum, DGMax, local_infinity, ctx);
// FP: "2 -> 3;
}
|
4ab8602ecf51a4881765b8fa86cc5ba7c3c2f8cf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe/layers/roi_pooling_layer.hpp"
#include <cfloat>
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__global__ void
ROIPoolForward(const int nthreads, const Dtype* bottom_data,
const Dtype spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data, int* argmax_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
int roi_start_w = round(bottom_rois[1] * spatial_scale);
int roi_start_h = round(bottom_rois[2] * spatial_scale);
int roi_end_w = round(bottom_rois[3] * spatial_scale);
int roi_end_h = round(bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h =
static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height);
Dtype bin_size_w =
static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<Dtype>(ph) * bin_size_h));
int wstart = static_cast<int>(floor(static_cast<Dtype>(pw) * bin_size_w));
int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1) * bin_size_h));
int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1) * bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
Dtype maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
bottom_data += (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (bottom_data[bottom_index] > maxval) {
maxval = bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
argmax_data[index] = maxidx;
}
}
template <typename Dtype>
void ROIPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int* argmax_data = max_idx_.mutable_gpu_data();
int count = top[0]->count();
hipLaunchKernelGGL(( ROIPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, spatial_scale_, channels_, height_, width_,
pooled_height_, pooled_width_, bottom_rois, top_data, argmax_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void ROIPoolBackward(const int nthreads, const Dtype* top_diff,
const int* argmax_data, const int num_rois,
const Dtype spatial_scale, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
Dtype* bottom_diff, const Dtype* bottom_rois) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = index / width % height;
int c = index / width / height % channels;
int n = index / width / height / channels;
Dtype gradient = 0;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// SKip if ROI's batch index doesn't match n
if (n != roi_batch_ind) {
continue;
}
int roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
int roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
int roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
int roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Skip if ROI doesn't include (h, w)
const bool in_roi = (w >= roi_start_w && w <= roi_end_w &&
h >= roi_start_h && h <= roi_end_h);
if (!in_roi) {
continue;
}
int offset = (roi_n * channels + c) * pooled_height * pooled_width;
const Dtype* offset_top_diff = top_diff + offset;
const int* offset_argmax_data = argmax_data + offset;
// Compute feasible set of pooled units that could have pooled this bottom
// unit
// FOrce malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h =
static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height);
Dtype bin_size_w =
static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width);
int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h);
int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h);
int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w);
int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w);
phstart = min(max(phstart, 0), pooled_height);
phend = min(max(phend, 0), pooled_height);
pwstart = min(max(pwstart, 0), pooled_width);
pwend = min(max(pwend, 0), pooled_width);
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (offset_argmax_data[ph * pooled_width + pw] == (h * width + w)) {
gradient += offset_top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void ROIPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
const int* argmax_data = max_idx_.gpu_data();
hipLaunchKernelGGL(( ROIPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, argmax_data, top[0]->num(), spatial_scale_, channels_,
height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(ROIPoolingLayer);
} // namespace caffe | 4ab8602ecf51a4881765b8fa86cc5ba7c3c2f8cf.cu | #include "caffe/layers/roi_pooling_layer.hpp"
#include <cfloat>
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__global__ void
ROIPoolForward(const int nthreads, const Dtype* bottom_data,
const Dtype spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data, int* argmax_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
int roi_start_w = round(bottom_rois[1] * spatial_scale);
int roi_start_h = round(bottom_rois[2] * spatial_scale);
int roi_end_w = round(bottom_rois[3] * spatial_scale);
int roi_end_h = round(bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h =
static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height);
Dtype bin_size_w =
static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<Dtype>(ph) * bin_size_h));
int wstart = static_cast<int>(floor(static_cast<Dtype>(pw) * bin_size_w));
int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1) * bin_size_h));
int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1) * bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
Dtype maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
bottom_data += (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (bottom_data[bottom_index] > maxval) {
maxval = bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
argmax_data[index] = maxidx;
}
}
template <typename Dtype>
void ROIPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int* argmax_data = max_idx_.mutable_gpu_data();
int count = top[0]->count();
ROIPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, spatial_scale_, channels_, height_, width_,
pooled_height_, pooled_width_, bottom_rois, top_data, argmax_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void ROIPoolBackward(const int nthreads, const Dtype* top_diff,
const int* argmax_data, const int num_rois,
const Dtype spatial_scale, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
Dtype* bottom_diff, const Dtype* bottom_rois) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = index / width % height;
int c = index / width / height % channels;
int n = index / width / height / channels;
Dtype gradient = 0;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// SKip if ROI's batch index doesn't match n
if (n != roi_batch_ind) {
continue;
}
int roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
int roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
int roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
int roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Skip if ROI doesn't include (h, w)
const bool in_roi = (w >= roi_start_w && w <= roi_end_w &&
h >= roi_start_h && h <= roi_end_h);
if (!in_roi) {
continue;
}
int offset = (roi_n * channels + c) * pooled_height * pooled_width;
const Dtype* offset_top_diff = top_diff + offset;
const int* offset_argmax_data = argmax_data + offset;
// Compute feasible set of pooled units that could have pooled this bottom
// unit
// FOrce malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h =
static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height);
Dtype bin_size_w =
static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width);
int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h);
int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h);
int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w);
int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w);
phstart = min(max(phstart, 0), pooled_height);
phend = min(max(phend, 0), pooled_height);
pwstart = min(max(pwstart, 0), pooled_width);
pwend = min(max(pwend, 0), pooled_width);
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (offset_argmax_data[ph * pooled_width + pw] == (h * width + w)) {
gradient += offset_top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void ROIPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
const int* argmax_data = max_idx_.gpu_data();
ROIPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, argmax_data, top[0]->num(), spatial_scale_, channels_,
height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(ROIPoolingLayer);
} // namespace caffe |
8ed8a029fab9784de7a126577e0f1f6bbeee6968.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/// ****************************************************************************
/// ***************** Common utilities and CUDA Kernels **********************
/// ****************************************************************************
//~ #include "utils.h"
#include "common.h"
#define W_SIGN(a) ((a > 0) ? (1.0f) : (-1.0f))
#define SQRT_2 1.4142135623730951
#include <rocblas.h>
/// soft thresholding of the detail coefficients (2D)
/// Must be lanched with block size (Nc, Nr) : the size of the current coefficient vector
__global__ void w_kern_soft_thresh(DTYPE* c_h, DTYPE* c_v, DTYPE* c_d, DTYPE beta, int Nr, int Nc) {
int gidx = threadIdx.x + blockIdx.x*blockDim.x;
int gidy = threadIdx.y + blockIdx.y*blockDim.y;
DTYPE val = 0.0f;
if (gidx < Nc && gidy < Nr) {
val = c_h[gidy*Nc + gidx];
c_h[gidy*Nc + gidx] = copysignf(max(fabsf(val)-beta, 0.0f), val);
val = c_v[gidy*Nc + gidx];
c_v[gidy*Nc + gidx] = copysignf(max(fabsf(val)-beta, 0.0f), val);
val = c_d[gidy*Nc + gidx];
c_d[gidy*Nc + gidx] = copysignf(max(fabsf(val)-beta, 0.0f), val);
}
}
/// soft thresholding of the detail coefficients (1D)
/// Must be lanched with block size (Nc, Nr) : the size of the current coefficient vector
// CHECKME: consider merging this kernel with the previous kernel
__global__ void w_kern_soft_thresh_1d(DTYPE* c_d, DTYPE beta, int Nr, int Nc) {
int gidx = threadIdx.x + blockIdx.x*blockDim.x;
int gidy = threadIdx.y + blockIdx.y*blockDim.y;
DTYPE val = 0.0f;
if (gidx < Nc && gidy < Nr) {
val = c_d[gidy*Nc + gidx];
c_d[gidy*Nc + gidx] = copysignf(max(fabsf(val)-beta, 0.0f), val);
}
}
/// soft thresholding of the approximation coefficients (2D and 1D)
/// Must be lanched with block size (Nc, Nr) : the size of the current coefficient vector
__global__ void w_kern_soft_thresh_appcoeffs(DTYPE* c_a, DTYPE beta, int Nr, int Nc) {
int gidx = threadIdx.x + blockIdx.x*blockDim.x;
int gidy = threadIdx.y + blockIdx.y*blockDim.y;
DTYPE val = 0.0f;
if (gidx < Nc && gidy < Nr) {
val = c_a[gidy*Nc + gidx];
c_a[gidy*Nc + gidx] = copysignf(max(fabsf(val)-beta, 0.0f), val);
}
}
/// "Cousins-threshold": set to zero the detail such as abs(detail) > abs(app)
/// Must be lanched with block size (Nc, Nr) : the size of the current coefficient vector
__global__ void w_kern_thresh_cousins(DTYPE* c_a, DTYPE* c_h, DTYPE* c_v, DTYPE* c_d, int Nr, int Nc) {
int gidx = threadIdx.x + blockIdx.x*blockDim.x;
int gidy = threadIdx.y + blockIdx.y*blockDim.y;
int tid = gidy*Nc + gidx;
if (gidx < Nc && gidy < Nr) {
DTYPE val = fabsf(c_a[tid]);
if (val > 0) return; // CHECKME: discard non-thresholded coefficients ?
if (fabsf(c_h[tid]) > val) c_h[tid] = 0;
if (fabsf(c_v[tid]) > val) c_v[tid] = 0;
if (fabsf(c_d[tid]) > val) c_d[tid] = 0;
}
}
/// Hard thresholding of the detail coefficients (2D)
/// Must be lanched with block size (Nc, Nr) : the size of the current coefficient vector
__global__ void w_kern_hard_thresh(DTYPE* c_h, DTYPE* c_v, DTYPE* c_d, DTYPE beta, int Nr, int Nc) {
int gidx = threadIdx.x + blockIdx.x*blockDim.x;
int gidy = threadIdx.y + blockIdx.y*blockDim.y;
DTYPE val = 0.0f;
if (gidx < Nc && gidy < Nr) {
val = c_h[gidy*Nc + gidx];
c_h[gidy*Nc + gidx] = max(W_SIGN(fabsf(val)-beta), 0.0f)*val;
val = c_v[gidy*Nc + gidx];
c_v[gidy*Nc + gidx] = max(W_SIGN(fabsf(val)-beta), 0.0f)*val;
val = c_d[gidy*Nc + gidx];
c_d[gidy*Nc + gidx] = max(W_SIGN(fabsf(val)-beta), 0.0f)*val;
}
}
/// Hard thresholding of the detail coefficients (1D)
/// Must be lanched with block size (Nc, Nr) : the size of the current coefficient vector
// CHECKME: consider merging this kernel with the previous kernel
__global__ void w_kern_hard_thresh_1d(DTYPE* c_d, DTYPE beta, int Nr, int Nc) {
int gidx = threadIdx.x + blockIdx.x*blockDim.x;
int gidy = threadIdx.y + blockIdx.y*blockDim.y;
DTYPE val = 0.0f;
if (gidx < Nc && gidy < Nr) {
val = c_d[gidy*Nc + gidx];
c_d[gidy*Nc + gidx] = max(W_SIGN(fabsf(val)-beta), 0.0f)*val;
}
}
/// Hard thresholding of the approximation coefficients (2D and 1D)
__global__ void w_kern_hard_thresh_appcoeffs(DTYPE* c_a, DTYPE beta, int Nr, int Nc) {
int gidx = threadIdx.x + blockIdx.x*blockDim.x;
int gidy = threadIdx.y + blockIdx.y*blockDim.y;
DTYPE val = 0.0f;
if (gidx < Nc && gidy < Nr) {
val = c_a[gidy*Nc + gidx];
c_a[gidy*Nc + gidx] = max(W_SIGN(fabsf(val)-beta), 0.0f)*val;
}
}
/// Circular shift of the image (2D and 1D)
__global__ void w_kern_circshift(DTYPE* d_image, DTYPE* d_out, int Nr, int Nc, int sr, int sc) {
int gidx = threadIdx.x + blockIdx.x*blockDim.x;
int gidy = threadIdx.y + blockIdx.y*blockDim.y;
if (gidx < Nc && gidy < Nr) {
int r = gidy - sr, c = gidx - sc;
if (r < 0) r += Nr;
if (c < 0) c += Nc;
d_out[gidy*Nc + gidx] = d_image[r*Nc + c];
}
}
/// ****************************************************************************
/// ******************** Common CUDA Kernels calls *****************************
/// ****************************************************************************
void w_call_soft_thresh(DTYPE** d_coeffs, DTYPE beta, w_info winfos, int do_thresh_appcoeffs, int normalize, int threshold_cousins) {
int tpb = 16; // Threads per block
dim3 n_threads_per_block = dim3(tpb, tpb, 1);
dim3 n_blocks;
int Nr = winfos.Nr, Nc = winfos.Nc, do_swt = winfos.do_swt, nlevels = winfos.nlevels, ndims = winfos.ndims;
int Nr2 = Nr, Nc2 = Nc;
if (!do_swt) {
if (threshold_cousins) {
puts("Warning: for now, threshold_cousins is only implemented for SWT");
threshold_cousins = 0;
}
if (ndims > 1) w_div2(&Nr2);
w_div2(&Nc2);
}
if (do_thresh_appcoeffs || (threshold_cousins && ndims > 1)) {
DTYPE beta2 = beta;
if (normalize > 0) { // beta2 = beta/sqrt(2)^nlevels
int nlevels2 = nlevels/2;
beta2 /= (1 << nlevels2);
if (nlevels2 *2 != nlevels) beta2 /= SQRT_2;
}
n_blocks = dim3(w_iDivUp(Nc2, tpb), w_iDivUp(Nr2, tpb), 1);
hipLaunchKernelGGL(( w_kern_soft_thresh_appcoeffs), dim3(n_blocks), dim3(n_threads_per_block), 0, 0, d_coeffs[0], beta2, Nr2, Nc2);
}
for (int i = 0; i < nlevels; i++) {
if (!do_swt) {
if (ndims > 1) w_div2(&Nr);
w_div2(&Nc);
}
if (normalize > 0) beta /= SQRT_2;
n_blocks = dim3(w_iDivUp(Nc, tpb), w_iDivUp(Nr, tpb), 1);
if (ndims > 1)hipLaunchKernelGGL(( w_kern_soft_thresh), dim3(n_blocks), dim3(n_threads_per_block), 0, 0, d_coeffs[3*i+1], d_coeffs[3*i+2], d_coeffs[3*i+3], beta, Nr, Nc);
elsehipLaunchKernelGGL(( w_kern_soft_thresh_1d), dim3(n_blocks), dim3(n_threads_per_block), 0, 0, d_coeffs[i+1], beta, Nr, Nc);
if ((threshold_cousins) && (ndims > 1)) { // no effect on 1D data
hipLaunchKernelGGL(( w_kern_thresh_cousins), dim3(n_blocks), dim3(n_threads_per_block), 0, 0, d_coeffs[0], d_coeffs[3*i+1], d_coeffs[3*i+2], d_coeffs[3*i+3], Nr, Nc);
}
}
}
void w_call_hard_thresh(DTYPE** d_coeffs, DTYPE beta, w_info winfos, int do_thresh_appcoeffs, int normalize) {
int tpb = 16; // Threads per block
dim3 n_threads_per_block = dim3(tpb, tpb, 1);
dim3 n_blocks;
int Nr = winfos.Nr, Nc = winfos.Nc, do_swt = winfos.do_swt, nlevels = winfos.nlevels, ndims = winfos.ndims;
int Nr2 = Nr, Nc2 = Nc;
if (!do_swt) {
if (ndims > 1) w_div2(&Nr2);
w_div2(&Nc2);
}
DTYPE beta2 = beta;
if (do_thresh_appcoeffs) {
if (normalize > 0) { // beta2 = beta/sqrt(2)^nlevels
int nlevels2 = nlevels/2;
beta2 /= (1 << nlevels2);
if (nlevels2 *2 != nlevels) beta2 /= SQRT_2;
}
n_blocks = dim3(w_iDivUp(Nc2, tpb), w_iDivUp(Nr2, tpb), 1);
hipLaunchKernelGGL(( w_kern_hard_thresh_appcoeffs), dim3(n_blocks), dim3(n_threads_per_block), 0, 0, d_coeffs[0], beta, Nr2, Nc2);
}
for (int i = 0; i < nlevels; i++) {
if (!do_swt) {
if (ndims > 1) w_div2(&Nr);
w_div2(&Nc);
}
if (normalize > 0) beta /= SQRT_2;
n_blocks = dim3(w_iDivUp(Nc, tpb), w_iDivUp(Nr, tpb), 1);
if (ndims > 1)hipLaunchKernelGGL(( w_kern_hard_thresh), dim3(n_blocks), dim3(n_threads_per_block), 0, 0, d_coeffs[3*i+1], d_coeffs[3*i+2], d_coeffs[3*i+3], beta, Nr, Nc);
elsehipLaunchKernelGGL(( w_kern_hard_thresh_1d), dim3(n_blocks), dim3(n_threads_per_block), 0, 0, d_coeffs[i+1], beta, Nr, Nc);
}
}
void w_shrink(DTYPE** d_coeffs, DTYPE beta, w_info winfos, int do_thresh_appcoeffs) {
int Nr = winfos.Nr, Nc = winfos.Nc, do_swt = winfos.do_swt, nlevels = winfos.nlevels, ndims = winfos.ndims;
int Nr2 = Nr, Nc2 = Nc;
if (!do_swt) {
if (ndims > 1) w_div2(&Nr2);
w_div2(&Nc2);
}
if (do_thresh_appcoeffs) {
cublas_scal(Nr2*Nc2, 1.0f/(1.0f + beta), d_coeffs[0], 1);
}
for (int i = 0; i < nlevels; i++) {
if (!do_swt) {
if (ndims > 1) w_div2(&Nr);
w_div2(&Nc);
}
if (ndims == 2) {
cublas_scal(Nr*Nc, 1.0f/(1.0f + beta), d_coeffs[3*i+1], 1);
cublas_scal(Nr*Nc, 1.0f/(1.0f + beta), d_coeffs[3*i+2], 1);
cublas_scal(Nr*Nc, 1.0f/(1.0f + beta), d_coeffs[3*i+3], 1);
}
else { // 1D
cublas_scal(Nr*Nc, 1.0f/(1.0f + beta), d_coeffs[i+1], 1);
}
}
}
// if inplace = 1, the result is in "d_image" ; otherwise result is in "d_image2".
void w_call_circshift(DTYPE* d_image, DTYPE* d_image2, w_info winfos, int sr, int sc, int inplace) {
int Nr = winfos.Nr, Nc = winfos.Nc, ndims = winfos.ndims;
// Modulus in C can be negative
if (sr < 0) sr += Nr; // or do while loops to ensure positive numbers
if (sc < 0) sc += Nc;
int tpb = 16; // Threads per block
sr = sr % Nr;
sc = sc % Nc;
if (ndims == 1) sr = 0;
dim3 n_blocks = dim3(w_iDivUp(Nc, tpb), w_iDivUp(Nr, tpb), 1);
dim3 n_threads_per_block = dim3(tpb, tpb, 1);
if (inplace) {
hipMemcpy(d_image2, d_image, Nr*Nc*sizeof(DTYPE), hipMemcpyDeviceToDevice);
hipLaunchKernelGGL(( w_kern_circshift), dim3(n_blocks), dim3(n_threads_per_block), 0, 0, d_image2, d_image, Nr, Nc, sr, sc);
}
else {
hipLaunchKernelGGL(( w_kern_circshift), dim3(n_blocks), dim3(n_threads_per_block), 0, 0, d_image, d_image2, Nr, Nc, sr, sc);
}
}
/// Creates an allocated/padded device array : [ An, H1, V1, D1, ..., Hn, Vn, Dn]
DTYPE** w_create_coeffs_buffer(w_info winfos) {
int Nr = winfos.Nr, Nc = winfos.Nc, do_swt = winfos.do_swt, nlevels = winfos.nlevels;
int Nr0 = Nr, Nc0 = Nc;
if (!do_swt) {
w_div2(&Nr0);
w_div2(&Nc0);
}
DTYPE** res = (DTYPE**) calloc(3*nlevels+1, sizeof(DTYPE*));
// Coeffs (H, V, D)
for (int i = 1; i < 3*nlevels+1; i += 3) {
if (!do_swt) {
w_div2(&Nr);
w_div2(&Nc);
}
hipMalloc(&(res[i]), Nr*Nc*sizeof(DTYPE));
hipMemset(res[i], 0, Nr*Nc*sizeof(DTYPE));
hipMalloc(&(res[i+1]), Nr*Nc*sizeof(DTYPE));
hipMemset(res[i+1], 0, Nr*Nc*sizeof(DTYPE));
hipMalloc(&(res[i+2]), Nr*Nc*sizeof(DTYPE));
hipMemset(res[i+2], 0, Nr*Nc*sizeof(DTYPE));
}
// App coeff (last scale). They are also useful as a temp. buffer for the reconstruction, hence a bigger size
hipMalloc(&(res[0]), Nr0*Nc0*sizeof(DTYPE));
hipMemset(res[0], 0, Nr0*Nc0*sizeof(DTYPE));
return res;
}
/// Creates an allocated/padded device array : [ An, D1, ..., Dn]
DTYPE** w_create_coeffs_buffer_1d(w_info winfos) {
int Nr = winfos.Nr, Nc = winfos.Nc, do_swt = winfos.do_swt, nlevels = winfos.nlevels;
int Nc0 = Nc;
if (!do_swt) w_div2(&Nc0);
DTYPE** res = (DTYPE**) calloc(nlevels+1, sizeof(DTYPE*));
// Det coeffs
for (int i = 1; i < nlevels+1; i++) {
if (!do_swt) w_div2(&Nc);
hipMalloc(&(res[i]), Nr*Nc*sizeof(DTYPE));
hipMemset(res[i], 0, Nr*Nc*sizeof(DTYPE));
}
// App coeff (last scale). They are also useful as a temp. buffer for the reconstruction, hence a bigger size
hipMalloc(&(res[0]), Nr*Nc0*sizeof(DTYPE));
hipMemset(res[0], 0, Nr*Nc0*sizeof(DTYPE));
return res;
}
/// Deep free of wavelet coefficients
void w_free_coeffs_buffer(DTYPE** coeffs, int nlevels) {
for (int i = 0; i < 3*nlevels+1; i++) hipFree(coeffs[i]);
free(coeffs);
}
void w_free_coeffs_buffer_1d(DTYPE** coeffs, int nlevels) {
for (int i = 0; i < nlevels+1; i++) hipFree(coeffs[i]);
free(coeffs);
}
/// Deep copy of wavelet coefficients. All structures must be allocated.
void w_copy_coeffs_buffer(DTYPE** dst, DTYPE** src, w_info winfos) {
int Nr = winfos.Nr, Nc = winfos.Nc, nlevels = winfos.nlevels, do_swt = winfos.do_swt;
// Coeffs (H, V, D)
for (int i = 1; i < 3*nlevels+1; i += 3) {
if (!do_swt) {
w_div2(&Nr);
w_div2(&Nc);
}
hipMemcpy(dst[i], src[i], Nr*Nc*sizeof(DTYPE), hipMemcpyDeviceToDevice);
hipMemcpy(dst[i+1], src[i+1], Nr*Nc*sizeof(DTYPE), hipMemcpyDeviceToDevice);
hipMemcpy(dst[i+2], src[i+2], Nr*Nc*sizeof(DTYPE), hipMemcpyDeviceToDevice);
}
// App coeff (last scale)
hipMemcpy(dst[0], src[0], Nr*Nc*sizeof(DTYPE), hipMemcpyDeviceToDevice);
}
void w_copy_coeffs_buffer_1d(DTYPE** dst, DTYPE** src, w_info winfos) {
int Nr = winfos.Nr, Nc = winfos.Nc, nlevels = winfos.nlevels, do_swt = winfos.do_swt;
// Det Coeffs
for (int i = 1; i < nlevels+1; i++) {
if (!do_swt) w_div2(&Nc);
hipMemcpy(dst[i], src[i], Nr*Nc*sizeof(DTYPE), hipMemcpyDeviceToDevice);
}
// App coeff (last scale)
hipMemcpy(dst[0], src[0], Nr*Nc*sizeof(DTYPE), hipMemcpyDeviceToDevice);
}
///
/// ----------------------------------------------------------------------------
///
void w_add_coeffs(DTYPE** dst, DTYPE** src, w_info winfos, DTYPE alpha) {
int Nr = winfos.Nr, Nc = winfos.Nc, do_swt = winfos.do_swt, nlevels = winfos.nlevels;
// Coeffs (H, V, D)
for (int i = 1; i < 3*nlevels+1; i += 3) {
if (!do_swt) {
w_div2(&Nr);
w_div2(&Nc);
}
cublas_axpy(Nr*Nc, alpha, src[i], 1, dst[i], 1);
cublas_axpy(Nr*Nc, alpha, src[i+1], 1, dst[i+1], 1);
cublas_axpy(Nr*Nc, alpha, src[i+2], 1, dst[i+2], 1);
}
// App coeff (last scale)
cublas_axpy(Nr*Nc, alpha, src[0], 1, dst[0], 1);
}
void w_add_coeffs_1d(DTYPE** dst, DTYPE** src, w_info winfos, DTYPE alpha) {
int Nr = winfos.Nr, Nc = winfos.Nc, do_swt = winfos.do_swt, nlevels = winfos.nlevels;
// Det Coeffs
for (int i = 1; i < nlevels+1; i++) {
if (!do_swt) Nc /= 2;
cublas_axpy(Nr*Nc, alpha, src[i], 1, dst[i], 1);
}
// App coeff (last scale)
cublas_axpy(Nr*Nc, alpha, src[0], 1, dst[0], 1);
}
| 8ed8a029fab9784de7a126577e0f1f6bbeee6968.cu | /// ****************************************************************************
/// ***************** Common utilities and CUDA Kernels **********************
/// ****************************************************************************
//~ #include "utils.h"
#include "common.h"
#define W_SIGN(a) ((a > 0) ? (1.0f) : (-1.0f))
#define SQRT_2 1.4142135623730951
#include <cublas.h>
/// soft thresholding of the detail coefficients (2D)
/// Must be lanched with block size (Nc, Nr) : the size of the current coefficient vector
__global__ void w_kern_soft_thresh(DTYPE* c_h, DTYPE* c_v, DTYPE* c_d, DTYPE beta, int Nr, int Nc) {
int gidx = threadIdx.x + blockIdx.x*blockDim.x;
int gidy = threadIdx.y + blockIdx.y*blockDim.y;
DTYPE val = 0.0f;
if (gidx < Nc && gidy < Nr) {
val = c_h[gidy*Nc + gidx];
c_h[gidy*Nc + gidx] = copysignf(max(fabsf(val)-beta, 0.0f), val);
val = c_v[gidy*Nc + gidx];
c_v[gidy*Nc + gidx] = copysignf(max(fabsf(val)-beta, 0.0f), val);
val = c_d[gidy*Nc + gidx];
c_d[gidy*Nc + gidx] = copysignf(max(fabsf(val)-beta, 0.0f), val);
}
}
/// soft thresholding of the detail coefficients (1D)
/// Must be lanched with block size (Nc, Nr) : the size of the current coefficient vector
// CHECKME: consider merging this kernel with the previous kernel
__global__ void w_kern_soft_thresh_1d(DTYPE* c_d, DTYPE beta, int Nr, int Nc) {
int gidx = threadIdx.x + blockIdx.x*blockDim.x;
int gidy = threadIdx.y + blockIdx.y*blockDim.y;
DTYPE val = 0.0f;
if (gidx < Nc && gidy < Nr) {
val = c_d[gidy*Nc + gidx];
c_d[gidy*Nc + gidx] = copysignf(max(fabsf(val)-beta, 0.0f), val);
}
}
/// soft thresholding of the approximation coefficients (2D and 1D)
/// Must be lanched with block size (Nc, Nr) : the size of the current coefficient vector
__global__ void w_kern_soft_thresh_appcoeffs(DTYPE* c_a, DTYPE beta, int Nr, int Nc) {
int gidx = threadIdx.x + blockIdx.x*blockDim.x;
int gidy = threadIdx.y + blockIdx.y*blockDim.y;
DTYPE val = 0.0f;
if (gidx < Nc && gidy < Nr) {
val = c_a[gidy*Nc + gidx];
c_a[gidy*Nc + gidx] = copysignf(max(fabsf(val)-beta, 0.0f), val);
}
}
/// "Cousins-threshold": set to zero the detail such as abs(detail) > abs(app)
/// Must be lanched with block size (Nc, Nr) : the size of the current coefficient vector
__global__ void w_kern_thresh_cousins(DTYPE* c_a, DTYPE* c_h, DTYPE* c_v, DTYPE* c_d, int Nr, int Nc) {
int gidx = threadIdx.x + blockIdx.x*blockDim.x;
int gidy = threadIdx.y + blockIdx.y*blockDim.y;
int tid = gidy*Nc + gidx;
if (gidx < Nc && gidy < Nr) {
DTYPE val = fabsf(c_a[tid]);
if (val > 0) return; // CHECKME: discard non-thresholded coefficients ?
if (fabsf(c_h[tid]) > val) c_h[tid] = 0;
if (fabsf(c_v[tid]) > val) c_v[tid] = 0;
if (fabsf(c_d[tid]) > val) c_d[tid] = 0;
}
}
/// Hard thresholding of the detail coefficients (2D)
/// Must be lanched with block size (Nc, Nr) : the size of the current coefficient vector
__global__ void w_kern_hard_thresh(DTYPE* c_h, DTYPE* c_v, DTYPE* c_d, DTYPE beta, int Nr, int Nc) {
int gidx = threadIdx.x + blockIdx.x*blockDim.x;
int gidy = threadIdx.y + blockIdx.y*blockDim.y;
DTYPE val = 0.0f;
if (gidx < Nc && gidy < Nr) {
val = c_h[gidy*Nc + gidx];
c_h[gidy*Nc + gidx] = max(W_SIGN(fabsf(val)-beta), 0.0f)*val;
val = c_v[gidy*Nc + gidx];
c_v[gidy*Nc + gidx] = max(W_SIGN(fabsf(val)-beta), 0.0f)*val;
val = c_d[gidy*Nc + gidx];
c_d[gidy*Nc + gidx] = max(W_SIGN(fabsf(val)-beta), 0.0f)*val;
}
}
/// Hard thresholding of the detail coefficients (1D)
/// Must be lanched with block size (Nc, Nr) : the size of the current coefficient vector
// CHECKME: consider merging this kernel with the previous kernel
__global__ void w_kern_hard_thresh_1d(DTYPE* c_d, DTYPE beta, int Nr, int Nc) {
int gidx = threadIdx.x + blockIdx.x*blockDim.x;
int gidy = threadIdx.y + blockIdx.y*blockDim.y;
DTYPE val = 0.0f;
if (gidx < Nc && gidy < Nr) {
val = c_d[gidy*Nc + gidx];
c_d[gidy*Nc + gidx] = max(W_SIGN(fabsf(val)-beta), 0.0f)*val;
}
}
/// Hard thresholding of the approximation coefficients (2D and 1D)
__global__ void w_kern_hard_thresh_appcoeffs(DTYPE* c_a, DTYPE beta, int Nr, int Nc) {
int gidx = threadIdx.x + blockIdx.x*blockDim.x;
int gidy = threadIdx.y + blockIdx.y*blockDim.y;
DTYPE val = 0.0f;
if (gidx < Nc && gidy < Nr) {
val = c_a[gidy*Nc + gidx];
c_a[gidy*Nc + gidx] = max(W_SIGN(fabsf(val)-beta), 0.0f)*val;
}
}
/// Circular shift of the image (2D and 1D)
__global__ void w_kern_circshift(DTYPE* d_image, DTYPE* d_out, int Nr, int Nc, int sr, int sc) {
int gidx = threadIdx.x + blockIdx.x*blockDim.x;
int gidy = threadIdx.y + blockIdx.y*blockDim.y;
if (gidx < Nc && gidy < Nr) {
int r = gidy - sr, c = gidx - sc;
if (r < 0) r += Nr;
if (c < 0) c += Nc;
d_out[gidy*Nc + gidx] = d_image[r*Nc + c];
}
}
/// ****************************************************************************
/// ******************** Common CUDA Kernels calls *****************************
/// ****************************************************************************
void w_call_soft_thresh(DTYPE** d_coeffs, DTYPE beta, w_info winfos, int do_thresh_appcoeffs, int normalize, int threshold_cousins) {
int tpb = 16; // Threads per block
dim3 n_threads_per_block = dim3(tpb, tpb, 1);
dim3 n_blocks;
int Nr = winfos.Nr, Nc = winfos.Nc, do_swt = winfos.do_swt, nlevels = winfos.nlevels, ndims = winfos.ndims;
int Nr2 = Nr, Nc2 = Nc;
if (!do_swt) {
if (threshold_cousins) {
puts("Warning: for now, threshold_cousins is only implemented for SWT");
threshold_cousins = 0;
}
if (ndims > 1) w_div2(&Nr2);
w_div2(&Nc2);
}
if (do_thresh_appcoeffs || (threshold_cousins && ndims > 1)) {
DTYPE beta2 = beta;
if (normalize > 0) { // beta2 = beta/sqrt(2)^nlevels
int nlevels2 = nlevels/2;
beta2 /= (1 << nlevels2);
if (nlevels2 *2 != nlevels) beta2 /= SQRT_2;
}
n_blocks = dim3(w_iDivUp(Nc2, tpb), w_iDivUp(Nr2, tpb), 1);
w_kern_soft_thresh_appcoeffs<<<n_blocks, n_threads_per_block>>>(d_coeffs[0], beta2, Nr2, Nc2);
}
for (int i = 0; i < nlevels; i++) {
if (!do_swt) {
if (ndims > 1) w_div2(&Nr);
w_div2(&Nc);
}
if (normalize > 0) beta /= SQRT_2;
n_blocks = dim3(w_iDivUp(Nc, tpb), w_iDivUp(Nr, tpb), 1);
if (ndims > 1) w_kern_soft_thresh<<<n_blocks, n_threads_per_block>>>(d_coeffs[3*i+1], d_coeffs[3*i+2], d_coeffs[3*i+3], beta, Nr, Nc);
else w_kern_soft_thresh_1d<<<n_blocks, n_threads_per_block>>>(d_coeffs[i+1], beta, Nr, Nc);
if ((threshold_cousins) && (ndims > 1)) { // no effect on 1D data
w_kern_thresh_cousins<<<n_blocks, n_threads_per_block>>>(d_coeffs[0], d_coeffs[3*i+1], d_coeffs[3*i+2], d_coeffs[3*i+3], Nr, Nc);
}
}
}
void w_call_hard_thresh(DTYPE** d_coeffs, DTYPE beta, w_info winfos, int do_thresh_appcoeffs, int normalize) {
int tpb = 16; // Threads per block
dim3 n_threads_per_block = dim3(tpb, tpb, 1);
dim3 n_blocks;
int Nr = winfos.Nr, Nc = winfos.Nc, do_swt = winfos.do_swt, nlevels = winfos.nlevels, ndims = winfos.ndims;
int Nr2 = Nr, Nc2 = Nc;
if (!do_swt) {
if (ndims > 1) w_div2(&Nr2);
w_div2(&Nc2);
}
DTYPE beta2 = beta;
if (do_thresh_appcoeffs) {
if (normalize > 0) { // beta2 = beta/sqrt(2)^nlevels
int nlevels2 = nlevels/2;
beta2 /= (1 << nlevels2);
if (nlevels2 *2 != nlevels) beta2 /= SQRT_2;
}
n_blocks = dim3(w_iDivUp(Nc2, tpb), w_iDivUp(Nr2, tpb), 1);
w_kern_hard_thresh_appcoeffs<<<n_blocks, n_threads_per_block>>>(d_coeffs[0], beta, Nr2, Nc2);
}
for (int i = 0; i < nlevels; i++) {
if (!do_swt) {
if (ndims > 1) w_div2(&Nr);
w_div2(&Nc);
}
if (normalize > 0) beta /= SQRT_2;
n_blocks = dim3(w_iDivUp(Nc, tpb), w_iDivUp(Nr, tpb), 1);
if (ndims > 1) w_kern_hard_thresh<<<n_blocks, n_threads_per_block>>>(d_coeffs[3*i+1], d_coeffs[3*i+2], d_coeffs[3*i+3], beta, Nr, Nc);
else w_kern_hard_thresh_1d<<<n_blocks, n_threads_per_block>>>(d_coeffs[i+1], beta, Nr, Nc);
}
}
void w_shrink(DTYPE** d_coeffs, DTYPE beta, w_info winfos, int do_thresh_appcoeffs) {
int Nr = winfos.Nr, Nc = winfos.Nc, do_swt = winfos.do_swt, nlevels = winfos.nlevels, ndims = winfos.ndims;
int Nr2 = Nr, Nc2 = Nc;
if (!do_swt) {
if (ndims > 1) w_div2(&Nr2);
w_div2(&Nc2);
}
if (do_thresh_appcoeffs) {
cublas_scal(Nr2*Nc2, 1.0f/(1.0f + beta), d_coeffs[0], 1);
}
for (int i = 0; i < nlevels; i++) {
if (!do_swt) {
if (ndims > 1) w_div2(&Nr);
w_div2(&Nc);
}
if (ndims == 2) {
cublas_scal(Nr*Nc, 1.0f/(1.0f + beta), d_coeffs[3*i+1], 1);
cublas_scal(Nr*Nc, 1.0f/(1.0f + beta), d_coeffs[3*i+2], 1);
cublas_scal(Nr*Nc, 1.0f/(1.0f + beta), d_coeffs[3*i+3], 1);
}
else { // 1D
cublas_scal(Nr*Nc, 1.0f/(1.0f + beta), d_coeffs[i+1], 1);
}
}
}
// if inplace = 1, the result is in "d_image" ; otherwise result is in "d_image2".
void w_call_circshift(DTYPE* d_image, DTYPE* d_image2, w_info winfos, int sr, int sc, int inplace) {
int Nr = winfos.Nr, Nc = winfos.Nc, ndims = winfos.ndims;
// Modulus in C can be negative
if (sr < 0) sr += Nr; // or do while loops to ensure positive numbers
if (sc < 0) sc += Nc;
int tpb = 16; // Threads per block
sr = sr % Nr;
sc = sc % Nc;
if (ndims == 1) sr = 0;
dim3 n_blocks = dim3(w_iDivUp(Nc, tpb), w_iDivUp(Nr, tpb), 1);
dim3 n_threads_per_block = dim3(tpb, tpb, 1);
if (inplace) {
cudaMemcpy(d_image2, d_image, Nr*Nc*sizeof(DTYPE), cudaMemcpyDeviceToDevice);
w_kern_circshift<<<n_blocks, n_threads_per_block>>>(d_image2, d_image, Nr, Nc, sr, sc);
}
else {
w_kern_circshift<<<n_blocks, n_threads_per_block>>>(d_image, d_image2, Nr, Nc, sr, sc);
}
}
/// Creates an allocated/padded device array : [ An, H1, V1, D1, ..., Hn, Vn, Dn]
DTYPE** w_create_coeffs_buffer(w_info winfos) {
int Nr = winfos.Nr, Nc = winfos.Nc, do_swt = winfos.do_swt, nlevels = winfos.nlevels;
int Nr0 = Nr, Nc0 = Nc;
if (!do_swt) {
w_div2(&Nr0);
w_div2(&Nc0);
}
DTYPE** res = (DTYPE**) calloc(3*nlevels+1, sizeof(DTYPE*));
// Coeffs (H, V, D)
for (int i = 1; i < 3*nlevels+1; i += 3) {
if (!do_swt) {
w_div2(&Nr);
w_div2(&Nc);
}
cudaMalloc(&(res[i]), Nr*Nc*sizeof(DTYPE));
cudaMemset(res[i], 0, Nr*Nc*sizeof(DTYPE));
cudaMalloc(&(res[i+1]), Nr*Nc*sizeof(DTYPE));
cudaMemset(res[i+1], 0, Nr*Nc*sizeof(DTYPE));
cudaMalloc(&(res[i+2]), Nr*Nc*sizeof(DTYPE));
cudaMemset(res[i+2], 0, Nr*Nc*sizeof(DTYPE));
}
// App coeff (last scale). They are also useful as a temp. buffer for the reconstruction, hence a bigger size
cudaMalloc(&(res[0]), Nr0*Nc0*sizeof(DTYPE));
cudaMemset(res[0], 0, Nr0*Nc0*sizeof(DTYPE));
return res;
}
/// Creates an allocated/padded device array : [ An, D1, ..., Dn]
DTYPE** w_create_coeffs_buffer_1d(w_info winfos) {
int Nr = winfos.Nr, Nc = winfos.Nc, do_swt = winfos.do_swt, nlevels = winfos.nlevels;
int Nc0 = Nc;
if (!do_swt) w_div2(&Nc0);
DTYPE** res = (DTYPE**) calloc(nlevels+1, sizeof(DTYPE*));
// Det coeffs
for (int i = 1; i < nlevels+1; i++) {
if (!do_swt) w_div2(&Nc);
cudaMalloc(&(res[i]), Nr*Nc*sizeof(DTYPE));
cudaMemset(res[i], 0, Nr*Nc*sizeof(DTYPE));
}
// App coeff (last scale). They are also useful as a temp. buffer for the reconstruction, hence a bigger size
cudaMalloc(&(res[0]), Nr*Nc0*sizeof(DTYPE));
cudaMemset(res[0], 0, Nr*Nc0*sizeof(DTYPE));
return res;
}
/// Deep free of wavelet coefficients
void w_free_coeffs_buffer(DTYPE** coeffs, int nlevels) {
for (int i = 0; i < 3*nlevels+1; i++) cudaFree(coeffs[i]);
free(coeffs);
}
void w_free_coeffs_buffer_1d(DTYPE** coeffs, int nlevels) {
for (int i = 0; i < nlevels+1; i++) cudaFree(coeffs[i]);
free(coeffs);
}
/// Deep copy of wavelet coefficients. All structures must be allocated.
void w_copy_coeffs_buffer(DTYPE** dst, DTYPE** src, w_info winfos) {
int Nr = winfos.Nr, Nc = winfos.Nc, nlevels = winfos.nlevels, do_swt = winfos.do_swt;
// Coeffs (H, V, D)
for (int i = 1; i < 3*nlevels+1; i += 3) {
if (!do_swt) {
w_div2(&Nr);
w_div2(&Nc);
}
cudaMemcpy(dst[i], src[i], Nr*Nc*sizeof(DTYPE), cudaMemcpyDeviceToDevice);
cudaMemcpy(dst[i+1], src[i+1], Nr*Nc*sizeof(DTYPE), cudaMemcpyDeviceToDevice);
cudaMemcpy(dst[i+2], src[i+2], Nr*Nc*sizeof(DTYPE), cudaMemcpyDeviceToDevice);
}
// App coeff (last scale)
cudaMemcpy(dst[0], src[0], Nr*Nc*sizeof(DTYPE), cudaMemcpyDeviceToDevice);
}
void w_copy_coeffs_buffer_1d(DTYPE** dst, DTYPE** src, w_info winfos) {
int Nr = winfos.Nr, Nc = winfos.Nc, nlevels = winfos.nlevels, do_swt = winfos.do_swt;
// Det Coeffs
for (int i = 1; i < nlevels+1; i++) {
if (!do_swt) w_div2(&Nc);
cudaMemcpy(dst[i], src[i], Nr*Nc*sizeof(DTYPE), cudaMemcpyDeviceToDevice);
}
// App coeff (last scale)
cudaMemcpy(dst[0], src[0], Nr*Nc*sizeof(DTYPE), cudaMemcpyDeviceToDevice);
}
///
/// ----------------------------------------------------------------------------
///
void w_add_coeffs(DTYPE** dst, DTYPE** src, w_info winfos, DTYPE alpha) {
int Nr = winfos.Nr, Nc = winfos.Nc, do_swt = winfos.do_swt, nlevels = winfos.nlevels;
// Coeffs (H, V, D)
for (int i = 1; i < 3*nlevels+1; i += 3) {
if (!do_swt) {
w_div2(&Nr);
w_div2(&Nc);
}
cublas_axpy(Nr*Nc, alpha, src[i], 1, dst[i], 1);
cublas_axpy(Nr*Nc, alpha, src[i+1], 1, dst[i+1], 1);
cublas_axpy(Nr*Nc, alpha, src[i+2], 1, dst[i+2], 1);
}
// App coeff (last scale)
cublas_axpy(Nr*Nc, alpha, src[0], 1, dst[0], 1);
}
void w_add_coeffs_1d(DTYPE** dst, DTYPE** src, w_info winfos, DTYPE alpha) {
int Nr = winfos.Nr, Nc = winfos.Nc, do_swt = winfos.do_swt, nlevels = winfos.nlevels;
// Det Coeffs
for (int i = 1; i < nlevels+1; i++) {
if (!do_swt) Nc /= 2;
cublas_axpy(Nr*Nc, alpha, src[i], 1, dst[i], 1);
}
// App coeff (last scale)
cublas_axpy(Nr*Nc, alpha, src[0], 1, dst[0], 1);
}
|
f2387c3bbf5b24879151d9218f297a5fd53f0840.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <catboost/cuda/cuda_lib/kernel/kernel.cuh>
#include <catboost/cuda/cuda_util/kernel/random_gen.cuh>
#include <catboost/cuda/gpu_data/gpu_structures.h>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
namespace NKernel {
template <ui32 BLOCK_SIZE>
__global__ void ComputeGroupIdsImpl(const ui32* qSizes, const ui32* qOffsets, ui32 offsetsBias, int qCount, ui32* dst) {
const int queriesPerBlock = BLOCK_SIZE / 32;
const int localQid = threadIdx.x / 32;
const int qid = blockIdx.x * queriesPerBlock + localQid;
ui32 writeOffset = qid < qCount ? (qOffsets[qid] - offsetsBias) : 0;
dst += writeOffset;
const int x = threadIdx.x & 31;
const int querySize = qid < qCount ? qSizes[qid] : 0;
for (int i = x; i < querySize; i += 32) {
dst[i] = qid;
}
}
__device__ __forceinline__ ui32 SampledQuerySize(float sampleRate, ui32 qSize) {
const ui32 sampledSize = ceil(sampleRate * qSize);
if (sampledSize < 2) {
return min(2, qSize);
}
return sampledSize;
}
void ComputeGroupIds(const ui32* qSizes, const ui32* qOffsets, ui32 offsetsBias, int qCount, ui32* dst, TCudaStream stream) {
const ui64 blockSize = 128;
const ui64 numBlocks = CeilDivide(static_cast<ui64>(qCount) * 32, blockSize);
if (numBlocks > 0) {
hipLaunchKernelGGL(( ComputeGroupIdsImpl<blockSize>), dim3(numBlocks), dim3(blockSize), 0, stream , qSizes, qOffsets, offsetsBias, qCount, dst);
}
}
__global__ void FillQueryEndMaskImpl(const ui32* qids, const ui32* docs, ui32 docCount, ui32* masks) {
const ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < docCount) {
ui32 idx = docs[i];
const ui32 qid = qids[idx];
const ui32 nextDoc = i + 1 < docCount ? docs[i + 1] : static_cast<ui32>(-1);
const ui32 isEnd = i + 1 < docCount ? qid != qids[nextDoc] : 1;
masks[i] = isEnd;
}
}
__global__ void CreateSortKeysImpl(ui64* seeds, const ui32* qids, ui32 docCount, ui64* keys) {
ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
seeds += i;
ui64 s = seeds[0];
while (i < docCount) {
const ui64 highBits = ((ui64)qids[i]) << 32;
const ui64 lowBits = AdvanceSeed(&s) >> 32;
keys[i] = lowBits | highBits;
i += gridDim.x * blockDim.x;
}
seeds[0] = s;
}
void CreateSortKeys(ui64* seeds, ui32 seedSize, const ui32* qids, ui32 docCount, ui64* keys, TCudaStream stream) {
const ui32 blockSize = 256;
const ui32 numBlocks = min(CeilDivide(seedSize, blockSize),
CeilDivide(docCount, blockSize));
if (numBlocks) {
hipLaunchKernelGGL(( CreateSortKeysImpl), dim3(numBlocks), dim3(blockSize), 0, stream, seeds, qids, docCount, keys);
}
}
void FillQueryEndMask(const ui32* qids, const ui32* docs, ui32 docCount, ui32* masks, TCudaStream stream) {
const ui32 blockSize = 256;
const ui32 numBlocks = (docCount + blockSize - 1) / blockSize;
if (numBlocks) {
hipLaunchKernelGGL(( FillQueryEndMaskImpl), dim3(numBlocks), dim3(blockSize), 0, stream, qids, docs, docCount, masks);
}
}
__global__ void FillTakenDocsMaskImpl(const float* takenQueryMasks,
const ui32* qids,
const ui32* docs, ui32 docCount,
const ui32* queryOffsets,
const ui32 queryOffsetsBias,
const ui32* querySizes,
const float docwiseSampleRate,
const ui32 maxQuerySize,
float* takenMask) {
ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < docCount) {
const ui32 doc = docs[i];
const ui32 queryId = __ldg(qids + doc);
const ui32 queryOffset = __ldg(queryOffsets + queryId) - queryOffsetsBias;
const ui32 querySize = __ldg(querySizes + queryId);
const ui32 sampledQuerySize = min(maxQuerySize, SampledQuerySize(docwiseSampleRate, querySize));
float mask = __ldg(takenQueryMasks + queryId) * ((i - queryOffset) < sampledQuerySize);
takenMask[i] = mask;
i += gridDim.x * blockDim.x;
}
}
void FillTakenDocsMask(const float* takenQueryMasks,
const ui32* qids,
const ui32* docs, ui32 docCount,
const ui32* queryOffsets,
const ui32 queryOffsetsBias,
const ui32* querySizes,
const float docwiseSampleRate,
const ui32 maxQuerySize,
float* takenMask,
TCudaStream stream) {
const ui32 blockSize = 256;
const ui32 numBlocks = (docCount + blockSize - 1) / blockSize;
if (numBlocks) {
hipLaunchKernelGGL(( FillTakenDocsMaskImpl), dim3(numBlocks), dim3(blockSize), 0, stream, takenQueryMasks, qids, docs, docCount, queryOffsets, queryOffsetsBias, querySizes, docwiseSampleRate, maxQuerySize, takenMask);
}
}
template <int BLOCK_SIZE>
__global__ void ComputeGroupMeansImpl(const float* target, const float* weights,
const ui32* qOffsets, int offsetsBias,
const ui32* qSizes, int qCount,
float* queryMeans)
{
const int queriesPerBlock = BLOCK_SIZE / 32;
const int localQid = threadIdx.x / 32;
const int qid = blockIdx.x * queriesPerBlock + localQid;
__shared__ volatile float line[BLOCK_SIZE];
__shared__ float result[queriesPerBlock];
ui32 readOffset = qid < qCount ? (qOffsets[qid] - offsetsBias) : 0;
weights += (weights != nullptr) ? readOffset : 0;
target += readOffset;
queryMeans += blockIdx.x * queriesPerBlock;
line[threadIdx.x] = 0;
const int x = threadIdx.x & 31;
const int querySize = qid < qCount ? qSizes[qid] : 0;
float sumTarget = 0;
float sumWeight = 0;
for (int i = x; i < querySize; i += 32) {
const float t = __ldg(target + i);
const float w = weights != nullptr ? __ldg(weights + i) : 1.0f;
sumTarget += t;
sumWeight += w;
}
line[threadIdx.x] = sumTarget;
const float totalSum = WarpReduce(x, line + localQid * 32, 32);
line[threadIdx.x] = sumWeight;
const float totalWeight = WarpReduce(x, line + localQid * 32, 32);
if (x == 0) {
result[localQid] = totalWeight != 0 ? totalSum / totalWeight : 0;
}
__syncthreads();
if (x == 0 && (qid < qCount)) {
queryMeans[localQid] = result[localQid];
}
}
void ComputeGroupMeans(const float* target, const float* weights,
const ui32* qOffsets, ui32 qOffsetsBias,
const ui32* qSizes, ui32 qCount,
float* result, TCudaStream stream) {
const int blockSize = 128;
const int numBlocks = (qCount * 32 + 127) / blockSize;
if (numBlocks > 0)
{
hipLaunchKernelGGL(( ComputeGroupMeansImpl<blockSize>) , dim3(numBlocks), dim3(blockSize), 0, stream , target, weights, qOffsets, qOffsetsBias, qSizes, qCount, result);
}
}
template <int BLOCK_SIZE>
__global__ void ComputeGroupMeansImpl(const float* target, const float* weights,
const ui32* qOffsets, int qCount,
float* queryMeans)
{
const int queriesPerBlock = BLOCK_SIZE / 32;
const int localQid = threadIdx.x / 32;
const int qid = blockIdx.x * queriesPerBlock + localQid;
__shared__ volatile float line[BLOCK_SIZE];
__shared__ float result[queriesPerBlock];
ui32 queryOffset = qid < qCount ? qOffsets[qid] : 0;
weights += (weights != nullptr) ? queryOffset : 0;
target += queryOffset;
queryMeans += blockIdx.x * queriesPerBlock;
line[threadIdx.x] = 0;
const int x = threadIdx.x & 31;
const int querySize = qid < qCount ? qOffsets[qid + 1] - queryOffset : 0;
float sumTarget = 0;
float sumWeight = 0;
for (int i = x; i < querySize; i += 32) {
const float t = __ldg(target + i);
const float w = weights != nullptr ? __ldg(weights + i) : 1.0f;
sumTarget += t;
sumWeight += w;
}
line[threadIdx.x] = sumTarget;
const float totalSum = WarpReduce(x, line + localQid * 32, 32);
line[threadIdx.x] = sumWeight;
const float totalWeight = WarpReduce(x, line + localQid * 32, 32);
if (x == 0) {
result[localQid] = totalWeight != 0 ? totalSum / totalWeight : 0;
}
__syncthreads();
if (x == 0 && (qid < qCount)) {
queryMeans[localQid] = result[localQid];
}
}
void ComputeGroupMeans(const float* target, const float* weights,
const ui32* qOffsets, ui32 qCount,
float* result, TCudaStream stream) {
const int blockSize = 128;
const int numBlocks = (qCount * 32 + blockSize - 1) / blockSize;
if (numBlocks > 0) {
hipLaunchKernelGGL(( ComputeGroupMeansImpl<blockSize>) , dim3(numBlocks), dim3(blockSize), 0, stream , target, weights, qOffsets, qCount, result);
}
}
template <int BLOCK_SIZE>
__global__ void ComputeGroupMaxImpl(const float* target,
const ui32* qOffsets, int qCount,
float* result) {
const int queriesPerBlock = BLOCK_SIZE / 32;
const int localQid = threadIdx.x / 32;
const int qid = blockIdx.x * queriesPerBlock + localQid;
__shared__ volatile float line[BLOCK_SIZE];
ui32 queryOffset = qid < qCount ? qOffsets[qid] : 0;
target += queryOffset;
result += blockIdx.x * queriesPerBlock;
line[threadIdx.x] = 0;
const int x = threadIdx.x & 31;
const int querySize = qid < qCount ? qOffsets[qid + 1] - queryOffset : 0;
float maxValue = NegativeInfty();
for (int i = x; i < querySize; i += 32) {
const float t = __ldg(target + i);
maxValue = max(t, maxValue);
}
line[threadIdx.x] = maxValue;
const float queryMax = WarpReduce(x, line + localQid * 32, 32, TCudaMax<float>());
__syncthreads();
if (x == 0 && (qid < qCount)) {
result[localQid] = queryMax > NegativeInfty() ? queryMax : 0;
}
}
void ComputeGroupMax(const float* target,
const ui32* qOffsets, ui32 qCount,
float* result, TCudaStream stream) {
const int blockSize = 128;
const int numBlocks = (qCount * 32 + blockSize - 1) / blockSize;
if (numBlocks > 0) {
hipLaunchKernelGGL(( ComputeGroupMaxImpl<blockSize>) , dim3(numBlocks), dim3(blockSize), 0, stream , target, qOffsets, qCount, result);
}
}
__global__ void RemoveGroupMeansImpl(const float* queryMeans, const ui32* qids, ui32 size, float* dst) {
const ui32 docId = blockIdx.x * blockDim.x + threadIdx.x;
if (docId < size) {
dst[docId] -= __ldg(queryMeans + qids[docId]);
}
}
void RemoveGroupBias(const float *queryMeans, const ui32 *qids, ui32 size, float *dst, TCudaStream stream) {
const int blockSize = 256;
const int numBlocks = (size + blockSize - 1) / blockSize;
if (numBlocks > 0) {
hipLaunchKernelGGL(( RemoveGroupMeansImpl), dim3(numBlocks), dim3(blockSize), 0, stream , queryMeans, qids, size, dst);
}
}
}
| f2387c3bbf5b24879151d9218f297a5fd53f0840.cu | #include <catboost/cuda/cuda_lib/kernel/kernel.cuh>
#include <catboost/cuda/cuda_util/kernel/random_gen.cuh>
#include <catboost/cuda/gpu_data/gpu_structures.h>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
namespace NKernel {
template <ui32 BLOCK_SIZE>
__global__ void ComputeGroupIdsImpl(const ui32* qSizes, const ui32* qOffsets, ui32 offsetsBias, int qCount, ui32* dst) {
const int queriesPerBlock = BLOCK_SIZE / 32;
const int localQid = threadIdx.x / 32;
const int qid = blockIdx.x * queriesPerBlock + localQid;
ui32 writeOffset = qid < qCount ? (qOffsets[qid] - offsetsBias) : 0;
dst += writeOffset;
const int x = threadIdx.x & 31;
const int querySize = qid < qCount ? qSizes[qid] : 0;
for (int i = x; i < querySize; i += 32) {
dst[i] = qid;
}
}
__device__ __forceinline__ ui32 SampledQuerySize(float sampleRate, ui32 qSize) {
const ui32 sampledSize = ceil(sampleRate * qSize);
if (sampledSize < 2) {
return min(2, qSize);
}
return sampledSize;
}
void ComputeGroupIds(const ui32* qSizes, const ui32* qOffsets, ui32 offsetsBias, int qCount, ui32* dst, TCudaStream stream) {
const ui64 blockSize = 128;
const ui64 numBlocks = CeilDivide(static_cast<ui64>(qCount) * 32, blockSize);
if (numBlocks > 0) {
ComputeGroupIdsImpl<blockSize><<< numBlocks, blockSize, 0, stream >>>(qSizes, qOffsets, offsetsBias, qCount, dst);
}
}
__global__ void FillQueryEndMaskImpl(const ui32* qids, const ui32* docs, ui32 docCount, ui32* masks) {
const ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < docCount) {
ui32 idx = docs[i];
const ui32 qid = qids[idx];
const ui32 nextDoc = i + 1 < docCount ? docs[i + 1] : static_cast<ui32>(-1);
const ui32 isEnd = i + 1 < docCount ? qid != qids[nextDoc] : 1;
masks[i] = isEnd;
}
}
__global__ void CreateSortKeysImpl(ui64* seeds, const ui32* qids, ui32 docCount, ui64* keys) {
ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
seeds += i;
ui64 s = seeds[0];
while (i < docCount) {
const ui64 highBits = ((ui64)qids[i]) << 32;
const ui64 lowBits = AdvanceSeed(&s) >> 32;
keys[i] = lowBits | highBits;
i += gridDim.x * blockDim.x;
}
seeds[0] = s;
}
void CreateSortKeys(ui64* seeds, ui32 seedSize, const ui32* qids, ui32 docCount, ui64* keys, TCudaStream stream) {
const ui32 blockSize = 256;
const ui32 numBlocks = min(CeilDivide(seedSize, blockSize),
CeilDivide(docCount, blockSize));
if (numBlocks) {
CreateSortKeysImpl<<<numBlocks, blockSize, 0, stream>>>(seeds, qids, docCount, keys);
}
}
void FillQueryEndMask(const ui32* qids, const ui32* docs, ui32 docCount, ui32* masks, TCudaStream stream) {
const ui32 blockSize = 256;
const ui32 numBlocks = (docCount + blockSize - 1) / blockSize;
if (numBlocks) {
FillQueryEndMaskImpl<<<numBlocks, blockSize, 0, stream>>>(qids, docs, docCount, masks);
}
}
__global__ void FillTakenDocsMaskImpl(const float* takenQueryMasks,
const ui32* qids,
const ui32* docs, ui32 docCount,
const ui32* queryOffsets,
const ui32 queryOffsetsBias,
const ui32* querySizes,
const float docwiseSampleRate,
const ui32 maxQuerySize,
float* takenMask) {
ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < docCount) {
const ui32 doc = docs[i];
const ui32 queryId = __ldg(qids + doc);
const ui32 queryOffset = __ldg(queryOffsets + queryId) - queryOffsetsBias;
const ui32 querySize = __ldg(querySizes + queryId);
const ui32 sampledQuerySize = min(maxQuerySize, SampledQuerySize(docwiseSampleRate, querySize));
float mask = __ldg(takenQueryMasks + queryId) * ((i - queryOffset) < sampledQuerySize);
takenMask[i] = mask;
i += gridDim.x * blockDim.x;
}
}
void FillTakenDocsMask(const float* takenQueryMasks,
const ui32* qids,
const ui32* docs, ui32 docCount,
const ui32* queryOffsets,
const ui32 queryOffsetsBias,
const ui32* querySizes,
const float docwiseSampleRate,
const ui32 maxQuerySize,
float* takenMask,
TCudaStream stream) {
const ui32 blockSize = 256;
const ui32 numBlocks = (docCount + blockSize - 1) / blockSize;
if (numBlocks) {
FillTakenDocsMaskImpl<<<numBlocks, blockSize, 0, stream>>>(takenQueryMasks, qids, docs, docCount, queryOffsets, queryOffsetsBias, querySizes, docwiseSampleRate, maxQuerySize, takenMask);
}
}
template <int BLOCK_SIZE>
__global__ void ComputeGroupMeansImpl(const float* target, const float* weights,
const ui32* qOffsets, int offsetsBias,
const ui32* qSizes, int qCount,
float* queryMeans)
{
const int queriesPerBlock = BLOCK_SIZE / 32;
const int localQid = threadIdx.x / 32;
const int qid = blockIdx.x * queriesPerBlock + localQid;
__shared__ volatile float line[BLOCK_SIZE];
__shared__ float result[queriesPerBlock];
ui32 readOffset = qid < qCount ? (qOffsets[qid] - offsetsBias) : 0;
weights += (weights != nullptr) ? readOffset : 0;
target += readOffset;
queryMeans += blockIdx.x * queriesPerBlock;
line[threadIdx.x] = 0;
const int x = threadIdx.x & 31;
const int querySize = qid < qCount ? qSizes[qid] : 0;
float sumTarget = 0;
float sumWeight = 0;
for (int i = x; i < querySize; i += 32) {
const float t = __ldg(target + i);
const float w = weights != nullptr ? __ldg(weights + i) : 1.0f;
sumTarget += t;
sumWeight += w;
}
line[threadIdx.x] = sumTarget;
const float totalSum = WarpReduce(x, line + localQid * 32, 32);
line[threadIdx.x] = sumWeight;
const float totalWeight = WarpReduce(x, line + localQid * 32, 32);
if (x == 0) {
result[localQid] = totalWeight != 0 ? totalSum / totalWeight : 0;
}
__syncthreads();
if (x == 0 && (qid < qCount)) {
queryMeans[localQid] = result[localQid];
}
}
void ComputeGroupMeans(const float* target, const float* weights,
const ui32* qOffsets, ui32 qOffsetsBias,
const ui32* qSizes, ui32 qCount,
float* result, TCudaStream stream) {
const int blockSize = 128;
const int numBlocks = (qCount * 32 + 127) / blockSize;
if (numBlocks > 0)
{
ComputeGroupMeansImpl<blockSize> <<< numBlocks, blockSize, 0, stream >>> (target, weights, qOffsets, qOffsetsBias, qSizes, qCount, result);
}
}
template <int BLOCK_SIZE>
__global__ void ComputeGroupMeansImpl(const float* target, const float* weights,
const ui32* qOffsets, int qCount,
float* queryMeans)
{
const int queriesPerBlock = BLOCK_SIZE / 32;
const int localQid = threadIdx.x / 32;
const int qid = blockIdx.x * queriesPerBlock + localQid;
__shared__ volatile float line[BLOCK_SIZE];
__shared__ float result[queriesPerBlock];
ui32 queryOffset = qid < qCount ? qOffsets[qid] : 0;
weights += (weights != nullptr) ? queryOffset : 0;
target += queryOffset;
queryMeans += blockIdx.x * queriesPerBlock;
line[threadIdx.x] = 0;
const int x = threadIdx.x & 31;
const int querySize = qid < qCount ? qOffsets[qid + 1] - queryOffset : 0;
float sumTarget = 0;
float sumWeight = 0;
for (int i = x; i < querySize; i += 32) {
const float t = __ldg(target + i);
const float w = weights != nullptr ? __ldg(weights + i) : 1.0f;
sumTarget += t;
sumWeight += w;
}
line[threadIdx.x] = sumTarget;
const float totalSum = WarpReduce(x, line + localQid * 32, 32);
line[threadIdx.x] = sumWeight;
const float totalWeight = WarpReduce(x, line + localQid * 32, 32);
if (x == 0) {
result[localQid] = totalWeight != 0 ? totalSum / totalWeight : 0;
}
__syncthreads();
if (x == 0 && (qid < qCount)) {
queryMeans[localQid] = result[localQid];
}
}
void ComputeGroupMeans(const float* target, const float* weights,
const ui32* qOffsets, ui32 qCount,
float* result, TCudaStream stream) {
const int blockSize = 128;
const int numBlocks = (qCount * 32 + blockSize - 1) / blockSize;
if (numBlocks > 0) {
ComputeGroupMeansImpl<blockSize> <<< numBlocks, blockSize, 0, stream >>> (target, weights, qOffsets, qCount, result);
}
}
template <int BLOCK_SIZE>
__global__ void ComputeGroupMaxImpl(const float* target,
const ui32* qOffsets, int qCount,
float* result) {
const int queriesPerBlock = BLOCK_SIZE / 32;
const int localQid = threadIdx.x / 32;
const int qid = blockIdx.x * queriesPerBlock + localQid;
__shared__ volatile float line[BLOCK_SIZE];
ui32 queryOffset = qid < qCount ? qOffsets[qid] : 0;
target += queryOffset;
result += blockIdx.x * queriesPerBlock;
line[threadIdx.x] = 0;
const int x = threadIdx.x & 31;
const int querySize = qid < qCount ? qOffsets[qid + 1] - queryOffset : 0;
float maxValue = NegativeInfty();
for (int i = x; i < querySize; i += 32) {
const float t = __ldg(target + i);
maxValue = max(t, maxValue);
}
line[threadIdx.x] = maxValue;
const float queryMax = WarpReduce(x, line + localQid * 32, 32, TCudaMax<float>());
__syncthreads();
if (x == 0 && (qid < qCount)) {
result[localQid] = queryMax > NegativeInfty() ? queryMax : 0;
}
}
void ComputeGroupMax(const float* target,
const ui32* qOffsets, ui32 qCount,
float* result, TCudaStream stream) {
const int blockSize = 128;
const int numBlocks = (qCount * 32 + blockSize - 1) / blockSize;
if (numBlocks > 0) {
ComputeGroupMaxImpl<blockSize> <<< numBlocks, blockSize, 0, stream >>> (target, qOffsets, qCount, result);
}
}
__global__ void RemoveGroupMeansImpl(const float* queryMeans, const ui32* qids, ui32 size, float* dst) {
const ui32 docId = blockIdx.x * blockDim.x + threadIdx.x;
if (docId < size) {
dst[docId] -= __ldg(queryMeans + qids[docId]);
}
}
void RemoveGroupBias(const float *queryMeans, const ui32 *qids, ui32 size, float *dst, TCudaStream stream) {
const int blockSize = 256;
const int numBlocks = (size + blockSize - 1) / blockSize;
if (numBlocks > 0) {
RemoveGroupMeansImpl<<< numBlocks, blockSize, 0, stream >>> (queryMeans, qids, size, dst);
}
}
}
|
9088a2e9b2943467e7b559f3def483121126eea7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C" __global__ void manual_dot_nn_op_float_m1_k512_n1024_kernel0(float* input0, float* input1, float* output0)
{
int warp_id = threadIdx.x >> 5;
int lane_id = threadIdx.x & 31;
int col_id = blockIdx.x * blockDim.x / 8 + lane_id;
if (col_id < 1024)
{
float val = 0;
int k_start = warp_id * 64;
int k_end = (warp_id + 1) * 64;
for (int i = k_start; i < k_end; i++)
{
val = fma(input0[i], input1[i * 1024 + col_id], val);
}
if (warp_id == 0)
{
output0[col_id]=0;
}
__syncthreads();
atomicAdd(output0 + col_id, val);
}
}
| 9088a2e9b2943467e7b559f3def483121126eea7.cu | extern "C" __global__ void manual_dot_nn_op_float_m1_k512_n1024_kernel0(float* input0, float* input1, float* output0)
{
int warp_id = threadIdx.x >> 5;
int lane_id = threadIdx.x & 31;
int col_id = blockIdx.x * blockDim.x / 8 + lane_id;
if (col_id < 1024)
{
float val = 0;
int k_start = warp_id * 64;
int k_end = (warp_id + 1) * 64;
for (int i = k_start; i < k_end; i++)
{
val = fma(input0[i], input1[i * 1024 + col_id], val);
}
if (warp_id == 0)
{
output0[col_id]=0;
}
__syncthreads();
atomicAdd(output0 + col_id, val);
}
}
|
406dff04f8eb1f94a341d9621e63e109989c366c.hip | // !!! This is a file automatically generated by hipify!!!
#include "stdio.h"
#include<iostream>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define N 1024
#define threadsPerBlock 512
#define cpu_sum(x) (x*(x+1))
__global__ void gpu_dot(float *d_a, float *d_b, float *d_c){
__shared__ float partial_sum[threadsPerBlock];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int index = threadIdx.x;
float sum = 0;
while(tid < N){
sum += d_a[tid] * d_b[tid];
tid += blockDim.x * gridDim.x;
}
partial_sum [index] = sum;
__syncthreads();
int i = blockDim.x/2;
while(i != 0){
if(index < i){
partial_sum[index] += partial_sum[index+i];
}
__syncthreads();
i /= 2;
}
if(index == 0){
d_c[blockIdx.x] = partial_sum[0];
}
}
int main(void){
//Declare Host Array
float *h_a, *h_b, h_c, *partial_sum;
//Declare device Array
float *d_a, *d_b, *d_partial_sum;
//Calculate total number of blocks per grid
int block_calc = (N + threadsPerBlock - 1)/threadsPerBlock;
int blocksPerGrid = (32 < block_calc ? 32 : block_calc);
h_a = (float *)malloc(N * sizeof(float));
h_b = (float *)malloc(N * sizeof(float));
partial_sum = (float *)malloc(blocksPerGrid * sizeof(float));
hipMalloc((void**)&d_a, N * sizeof(float));
hipMalloc((void**)&d_b, N * sizeof(float));
hipMalloc((void**)&d_partial_sum, blocksPerGrid * sizeof(float));
// fill data
for(int i = 0; i < N; i++){
h_a[i] = i;
h_b[i] = 2;
}
hipMemcpy(d_a, h_a, N * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, N * sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( gpu_dot), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_a, d_b, d_partial_sum);
hipMemcpy(partial_sum, d_partial_sum, blocksPerGrid * sizeof(float), hipMemcpyDeviceToHost);
h_c = 0;
for(int i = 0; i < blocksPerGrid; i++){
h_c += partial_sum[i];
}
printf("The computed dot product is: %f\n", h_c);
// Test
if (h_c == cpu_sum((float)(N - 1)))
{
printf("The dot product computed by GPU is correct\n");
}
else
{
printf("Error in dot product computation");
}
hipFree(d_a);
hipFree(d_b);
hipFree(d_partial_sum);
free(h_a);
free(h_b);
free(partial_sum);
return 0;
} | 406dff04f8eb1f94a341d9621e63e109989c366c.cu | #include "stdio.h"
#include<iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#define N 1024
#define threadsPerBlock 512
#define cpu_sum(x) (x*(x+1))
__global__ void gpu_dot(float *d_a, float *d_b, float *d_c){
__shared__ float partial_sum[threadsPerBlock];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int index = threadIdx.x;
float sum = 0;
while(tid < N){
sum += d_a[tid] * d_b[tid];
tid += blockDim.x * gridDim.x;
}
partial_sum [index] = sum;
__syncthreads();
int i = blockDim.x/2;
while(i != 0){
if(index < i){
partial_sum[index] += partial_sum[index+i];
}
__syncthreads();
i /= 2;
}
if(index == 0){
d_c[blockIdx.x] = partial_sum[0];
}
}
int main(void){
//Declare Host Array
float *h_a, *h_b, h_c, *partial_sum;
//Declare device Array
float *d_a, *d_b, *d_partial_sum;
//Calculate total number of blocks per grid
int block_calc = (N + threadsPerBlock - 1)/threadsPerBlock;
int blocksPerGrid = (32 < block_calc ? 32 : block_calc);
h_a = (float *)malloc(N * sizeof(float));
h_b = (float *)malloc(N * sizeof(float));
partial_sum = (float *)malloc(blocksPerGrid * sizeof(float));
cudaMalloc((void**)&d_a, N * sizeof(float));
cudaMalloc((void**)&d_b, N * sizeof(float));
cudaMalloc((void**)&d_partial_sum, blocksPerGrid * sizeof(float));
// fill data
for(int i = 0; i < N; i++){
h_a[i] = i;
h_b[i] = 2;
}
cudaMemcpy(d_a, h_a, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, N * sizeof(float), cudaMemcpyHostToDevice);
gpu_dot<<<blocksPerGrid, threadsPerBlock>>> (d_a, d_b, d_partial_sum);
cudaMemcpy(partial_sum, d_partial_sum, blocksPerGrid * sizeof(float), cudaMemcpyDeviceToHost);
h_c = 0;
for(int i = 0; i < blocksPerGrid; i++){
h_c += partial_sum[i];
}
printf("The computed dot product is: %f\n", h_c);
// Test
if (h_c == cpu_sum((float)(N - 1)))
{
printf("The dot product computed by GPU is correct\n");
}
else
{
printf("Error in dot product computation");
}
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_partial_sum);
free(h_a);
free(h_b);
free(partial_sum);
return 0;
} |
2c9b6237be044b3dea180d40cf9664c024129481.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This sample evaluates fair call and put prices for a
* given set of European options by Black-Scholes formula.
* See supplied whitepaper for more explanations.
*/
#include <helper_functions.h> // helper functions for string parsing
#include <helper_cuda.h> // helper functions CUDA error checking and initialization
////////////////////////////////////////////////////////////////////////////////
// Process an array of optN options on CPU
////////////////////////////////////////////////////////////////////////////////
extern "C" void BlackScholesCPU(
float *h_CallResult,
float *h_PutResult,
float *h_StockPrice,
float *h_OptionStrike,
float *h_OptionYears,
float Riskfree,
float Volatility,
int optN
);
////////////////////////////////////////////////////////////////////////////////
// Process an array of OptN options on GPU
////////////////////////////////////////////////////////////////////////////////
#include "BlackScholes_kernel.cuh"
////////////////////////////////////////////////////////////////////////////////
// Helper function, returning uniformly distributed
// random float in [low, high] range
////////////////////////////////////////////////////////////////////////////////
float RandFloat(float low, float high)
{
float t = (float)rand() / (float)RAND_MAX;
return (1.0f - t) * low + t * high;
}
////////////////////////////////////////////////////////////////////////////////
// Data configuration
////////////////////////////////////////////////////////////////////////////////
const int OPT_N = 4000000;
const int NUM_ITERATIONS = 512;
const int OPT_SZ = OPT_N * sizeof(float);
const float RISKFREE = 0.02f;
const float VOLATILITY = 0.30f;
#define DIV_UP(a, b) ( ((a) + (b) - 1) / (b) )
////////////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
// Start logs
printf("[%s] - Starting...\n", argv[0]);
//'h_' prefix - CPU (host) memory space
float
//Results calculated by CPU for reference
*h_CallResultCPU,
*h_PutResultCPU,
//CPU copy of GPU results
*h_CallResultGPU,
*h_PutResultGPU,
//CPU instance of input data
*h_StockPrice,
*h_OptionStrike,
*h_OptionYears;
//'d_' prefix - GPU (device) memory space
float
//Results calculated by GPU
*d_CallResult,
*d_PutResult,
//GPU instance of input data
*d_StockPrice,
*d_OptionStrike,
*d_OptionYears;
double
delta, ref, sum_delta, sum_ref, max_delta, L1norm, gpuTime;
StopWatchInterface *hTimer = NULL;
int i;
findCudaDevice(argc, (const char **)argv);
sdkCreateTimer(&hTimer);
printf("Initializing data...\n");
printf("...allocating CPU memory for options.\n");
h_CallResultCPU = (float *)malloc(OPT_SZ);
h_PutResultCPU = (float *)malloc(OPT_SZ);
h_CallResultGPU = (float *)malloc(OPT_SZ);
h_PutResultGPU = (float *)malloc(OPT_SZ);
h_StockPrice = (float *)malloc(OPT_SZ);
h_OptionStrike = (float *)malloc(OPT_SZ);
h_OptionYears = (float *)malloc(OPT_SZ);
printf("...allocating GPU memory for options.\n");
checkCudaErrors(hipMalloc((void **)&d_CallResult, OPT_SZ));
checkCudaErrors(hipMalloc((void **)&d_PutResult, OPT_SZ));
checkCudaErrors(hipMalloc((void **)&d_StockPrice, OPT_SZ));
checkCudaErrors(hipMalloc((void **)&d_OptionStrike, OPT_SZ));
checkCudaErrors(hipMalloc((void **)&d_OptionYears, OPT_SZ));
printf("...generating input data in CPU mem.\n");
srand(5347);
//Generate options set
for (i = 0; i < OPT_N; i++)
{
h_CallResultCPU[i] = 0.0f;
h_PutResultCPU[i] = -1.0f;
h_StockPrice[i] = RandFloat(5.0f, 30.0f);
h_OptionStrike[i] = RandFloat(1.0f, 100.0f);
h_OptionYears[i] = RandFloat(0.25f, 10.0f);
}
printf("...copying input data to GPU mem.\n");
//Copy options data to GPU memory for further processing
checkCudaErrors(hipMemcpy(d_StockPrice, h_StockPrice, OPT_SZ, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_OptionStrike, h_OptionStrike, OPT_SZ, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_OptionYears, h_OptionYears, OPT_SZ, hipMemcpyHostToDevice));
printf("Data init done.\n\n");
printf("Executing Black-Scholes GPU kernel (%i iterations)...\n", NUM_ITERATIONS);
checkCudaErrors(hipDeviceSynchronize());
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
for (i = 0; i < NUM_ITERATIONS; i++)
{
hipLaunchKernelGGL(( BlackScholesGPU), dim3(DIV_UP(OPT_N, 128)), dim3(128/*480), 128*/, 0, 0,
d_CallResult,
d_PutResult,
d_StockPrice,
d_OptionStrike,
d_OptionYears,
RISKFREE,
VOLATILITY,
OPT_N
);
getLastCudaError("BlackScholesGPU() execution failed\n");
}
checkCudaErrors(hipDeviceSynchronize());
sdkStopTimer(&hTimer);
gpuTime = sdkGetTimerValue(&hTimer) / NUM_ITERATIONS;
//Both call and put is calculated
printf("Options count : %i \n", 2 * OPT_N);
printf("BlackScholesGPU() time : %f msec\n", gpuTime);
printf("Effective memory bandwidth: %f GB/s\n", ((double)(5 * OPT_N * sizeof(float)) * 1E-9) / (gpuTime * 1E-3));
printf("Gigaoptions per second : %f \n\n", ((double)(2 * OPT_N) * 1E-9) / (gpuTime * 1E-3));
printf("BlackScholes, Throughput = %.4f GOptions/s, Time = %.5f s, Size = %u options, NumDevsUsed = %u, Workgroup = %u\n",
(((double)(2.0 * OPT_N) * 1.0E-9) / (gpuTime * 1.0E-3)), gpuTime*1e-3, (2 * OPT_N), 1, 128);
printf("\nReading back GPU results...\n");
//Read back GPU results to compare them to CPU results
checkCudaErrors(hipMemcpy(h_CallResultGPU, d_CallResult, OPT_SZ, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(h_PutResultGPU, d_PutResult, OPT_SZ, hipMemcpyDeviceToHost));
printf("Checking the results...\n");
printf("...running CPU calculations.\n\n");
//Calculate options values on CPU
BlackScholesCPU(
h_CallResultCPU,
h_PutResultCPU,
h_StockPrice,
h_OptionStrike,
h_OptionYears,
RISKFREE,
VOLATILITY,
OPT_N
);
printf("Comparing the results...\n");
//Calculate max absolute difference and L1 distance
//between CPU and GPU results
sum_delta = 0;
sum_ref = 0;
max_delta = 0;
for (i = 0; i < OPT_N; i++)
{
ref = h_CallResultCPU[i];
delta = fabs(h_CallResultCPU[i] - h_CallResultGPU[i]);
if (delta > max_delta)
{
max_delta = delta;
}
sum_delta += delta;
sum_ref += fabs(ref);
}
L1norm = sum_delta / sum_ref;
printf("L1 norm: %E\n", L1norm);
printf("Max absolute error: %E\n\n", max_delta);
printf("Shutting down...\n");
printf("...releasing GPU memory.\n");
checkCudaErrors(hipFree(d_OptionYears));
checkCudaErrors(hipFree(d_OptionStrike));
checkCudaErrors(hipFree(d_StockPrice));
checkCudaErrors(hipFree(d_PutResult));
checkCudaErrors(hipFree(d_CallResult));
printf("...releasing CPU memory.\n");
free(h_OptionYears);
free(h_OptionStrike);
free(h_StockPrice);
free(h_PutResultGPU);
free(h_CallResultGPU);
free(h_PutResultCPU);
free(h_CallResultCPU);
sdkDeleteTimer(&hTimer);
printf("Shutdown done.\n");
printf("\n[BlackScholes] - Test Summary\n");
hipDeviceReset();
if (L1norm > 1e-6)
{
printf("Test failed!\n");
exit(EXIT_FAILURE);
}
printf("Test passed\n");
exit(EXIT_SUCCESS);
}
| 2c9b6237be044b3dea180d40cf9664c024129481.cu | /*
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This sample evaluates fair call and put prices for a
* given set of European options by Black-Scholes formula.
* See supplied whitepaper for more explanations.
*/
#include <helper_functions.h> // helper functions for string parsing
#include <helper_cuda.h> // helper functions CUDA error checking and initialization
////////////////////////////////////////////////////////////////////////////////
// Process an array of optN options on CPU
////////////////////////////////////////////////////////////////////////////////
extern "C" void BlackScholesCPU(
float *h_CallResult,
float *h_PutResult,
float *h_StockPrice,
float *h_OptionStrike,
float *h_OptionYears,
float Riskfree,
float Volatility,
int optN
);
////////////////////////////////////////////////////////////////////////////////
// Process an array of OptN options on GPU
////////////////////////////////////////////////////////////////////////////////
#include "BlackScholes_kernel.cuh"
////////////////////////////////////////////////////////////////////////////////
// Helper function, returning uniformly distributed
// random float in [low, high] range
////////////////////////////////////////////////////////////////////////////////
float RandFloat(float low, float high)
{
float t = (float)rand() / (float)RAND_MAX;
return (1.0f - t) * low + t * high;
}
////////////////////////////////////////////////////////////////////////////////
// Data configuration
////////////////////////////////////////////////////////////////////////////////
const int OPT_N = 4000000;
const int NUM_ITERATIONS = 512;
const int OPT_SZ = OPT_N * sizeof(float);
const float RISKFREE = 0.02f;
const float VOLATILITY = 0.30f;
#define DIV_UP(a, b) ( ((a) + (b) - 1) / (b) )
////////////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
// Start logs
printf("[%s] - Starting...\n", argv[0]);
//'h_' prefix - CPU (host) memory space
float
//Results calculated by CPU for reference
*h_CallResultCPU,
*h_PutResultCPU,
//CPU copy of GPU results
*h_CallResultGPU,
*h_PutResultGPU,
//CPU instance of input data
*h_StockPrice,
*h_OptionStrike,
*h_OptionYears;
//'d_' prefix - GPU (device) memory space
float
//Results calculated by GPU
*d_CallResult,
*d_PutResult,
//GPU instance of input data
*d_StockPrice,
*d_OptionStrike,
*d_OptionYears;
double
delta, ref, sum_delta, sum_ref, max_delta, L1norm, gpuTime;
StopWatchInterface *hTimer = NULL;
int i;
findCudaDevice(argc, (const char **)argv);
sdkCreateTimer(&hTimer);
printf("Initializing data...\n");
printf("...allocating CPU memory for options.\n");
h_CallResultCPU = (float *)malloc(OPT_SZ);
h_PutResultCPU = (float *)malloc(OPT_SZ);
h_CallResultGPU = (float *)malloc(OPT_SZ);
h_PutResultGPU = (float *)malloc(OPT_SZ);
h_StockPrice = (float *)malloc(OPT_SZ);
h_OptionStrike = (float *)malloc(OPT_SZ);
h_OptionYears = (float *)malloc(OPT_SZ);
printf("...allocating GPU memory for options.\n");
checkCudaErrors(cudaMalloc((void **)&d_CallResult, OPT_SZ));
checkCudaErrors(cudaMalloc((void **)&d_PutResult, OPT_SZ));
checkCudaErrors(cudaMalloc((void **)&d_StockPrice, OPT_SZ));
checkCudaErrors(cudaMalloc((void **)&d_OptionStrike, OPT_SZ));
checkCudaErrors(cudaMalloc((void **)&d_OptionYears, OPT_SZ));
printf("...generating input data in CPU mem.\n");
srand(5347);
//Generate options set
for (i = 0; i < OPT_N; i++)
{
h_CallResultCPU[i] = 0.0f;
h_PutResultCPU[i] = -1.0f;
h_StockPrice[i] = RandFloat(5.0f, 30.0f);
h_OptionStrike[i] = RandFloat(1.0f, 100.0f);
h_OptionYears[i] = RandFloat(0.25f, 10.0f);
}
printf("...copying input data to GPU mem.\n");
//Copy options data to GPU memory for further processing
checkCudaErrors(cudaMemcpy(d_StockPrice, h_StockPrice, OPT_SZ, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_OptionStrike, h_OptionStrike, OPT_SZ, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_OptionYears, h_OptionYears, OPT_SZ, cudaMemcpyHostToDevice));
printf("Data init done.\n\n");
printf("Executing Black-Scholes GPU kernel (%i iterations)...\n", NUM_ITERATIONS);
checkCudaErrors(cudaDeviceSynchronize());
sdkResetTimer(&hTimer);
sdkStartTimer(&hTimer);
for (i = 0; i < NUM_ITERATIONS; i++)
{
BlackScholesGPU<<<DIV_UP(OPT_N, 128), 128/*480, 128*/>>>(
d_CallResult,
d_PutResult,
d_StockPrice,
d_OptionStrike,
d_OptionYears,
RISKFREE,
VOLATILITY,
OPT_N
);
getLastCudaError("BlackScholesGPU() execution failed\n");
}
checkCudaErrors(cudaDeviceSynchronize());
sdkStopTimer(&hTimer);
gpuTime = sdkGetTimerValue(&hTimer) / NUM_ITERATIONS;
//Both call and put is calculated
printf("Options count : %i \n", 2 * OPT_N);
printf("BlackScholesGPU() time : %f msec\n", gpuTime);
printf("Effective memory bandwidth: %f GB/s\n", ((double)(5 * OPT_N * sizeof(float)) * 1E-9) / (gpuTime * 1E-3));
printf("Gigaoptions per second : %f \n\n", ((double)(2 * OPT_N) * 1E-9) / (gpuTime * 1E-3));
printf("BlackScholes, Throughput = %.4f GOptions/s, Time = %.5f s, Size = %u options, NumDevsUsed = %u, Workgroup = %u\n",
(((double)(2.0 * OPT_N) * 1.0E-9) / (gpuTime * 1.0E-3)), gpuTime*1e-3, (2 * OPT_N), 1, 128);
printf("\nReading back GPU results...\n");
//Read back GPU results to compare them to CPU results
checkCudaErrors(cudaMemcpy(h_CallResultGPU, d_CallResult, OPT_SZ, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_PutResultGPU, d_PutResult, OPT_SZ, cudaMemcpyDeviceToHost));
printf("Checking the results...\n");
printf("...running CPU calculations.\n\n");
//Calculate options values on CPU
BlackScholesCPU(
h_CallResultCPU,
h_PutResultCPU,
h_StockPrice,
h_OptionStrike,
h_OptionYears,
RISKFREE,
VOLATILITY,
OPT_N
);
printf("Comparing the results...\n");
//Calculate max absolute difference and L1 distance
//between CPU and GPU results
sum_delta = 0;
sum_ref = 0;
max_delta = 0;
for (i = 0; i < OPT_N; i++)
{
ref = h_CallResultCPU[i];
delta = fabs(h_CallResultCPU[i] - h_CallResultGPU[i]);
if (delta > max_delta)
{
max_delta = delta;
}
sum_delta += delta;
sum_ref += fabs(ref);
}
L1norm = sum_delta / sum_ref;
printf("L1 norm: %E\n", L1norm);
printf("Max absolute error: %E\n\n", max_delta);
printf("Shutting down...\n");
printf("...releasing GPU memory.\n");
checkCudaErrors(cudaFree(d_OptionYears));
checkCudaErrors(cudaFree(d_OptionStrike));
checkCudaErrors(cudaFree(d_StockPrice));
checkCudaErrors(cudaFree(d_PutResult));
checkCudaErrors(cudaFree(d_CallResult));
printf("...releasing CPU memory.\n");
free(h_OptionYears);
free(h_OptionStrike);
free(h_StockPrice);
free(h_PutResultGPU);
free(h_CallResultGPU);
free(h_PutResultCPU);
free(h_CallResultCPU);
sdkDeleteTimer(&hTimer);
printf("Shutdown done.\n");
printf("\n[BlackScholes] - Test Summary\n");
cudaDeviceReset();
if (L1norm > 1e-6)
{
printf("Test failed!\n");
exit(EXIT_FAILURE);
}
printf("Test passed\n");
exit(EXIT_SUCCESS);
}
|
65c12d81fd900f4faeda028b3d7831beddea0c48.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <primitiv/config.h>
#include <primitiv/devices/cuda16/device.h>
#include <primitiv/devices/cuda16/ops/common.h>
#include <primitiv/internal/cuda/utils.h>
namespace {
__constant__ std::uint32_t permute_dims_x_strides[primitiv::Shape::MAX_DEPTH];
__constant__ std::uint32_t permute_dims_y_strides[primitiv::Shape::MAX_DEPTH];
// TODO(vbkaisetsu):
// Implove implementation of permute_dims.
// This function uses for-loops in the kernel code. It becomes slower than
// no-loop implementation.
__global__ void permute_dims_fw_dev(
const half *px, const std::uint32_t ndims, const std::uint32_t size,
half *py) {
const std::uint32_t i = IDX;
const std::uint32_t bid_z = IDY;
const std::uint32_t ofs = bid_z * size;
if (i < size) {
std::uint32_t tmp = i;
std::uint32_t j = 0;
// TODO(vbkaisetsu):
// Implove implementation
for (std::uint32_t d = 0; d < ndims; ++d) {
const std::uint32_t p = tmp / permute_dims_x_strides[d];
tmp -= p * permute_dims_x_strides[d];
j += p * permute_dims_y_strides[d];
}
py[ofs + j] = px[ofs + i];
}
}
__global__ void permute_dims_bw_dev(
const half *py, const std::uint32_t ndims, const std::uint32_t size,
half *px) {
const std::uint32_t i = IDX;
const std::uint32_t bid_z = IDY;
const std::uint32_t ofs = bid_z * size;
if (i < size) {
std::uint32_t tmp = i;
std::uint32_t j = 0;
// TODO(vbkaisetsu):
// Implove implementation
for (std::uint32_t d = 0; d < ndims; ++d) {
const std::uint32_t p = tmp / permute_dims_x_strides[d];
tmp -= p * permute_dims_x_strides[d];
j += p * permute_dims_y_strides[d];
}
const std::size_t ox = ofs + i;
const std::size_t oy = ofs + j;
INPLACE_ADD(px + ox, ::__half2float(py[oy]));
}
}
} // namespace
namespace primitiv {
namespace devices {
void CUDA16::permute_dims_fw_impl(
const Tensor &x, const std::vector<std::uint32_t> &perm,
Tensor &y) {
const std::uint32_t ndims = perm.size();
const std::uint32_t bs = x.shape().batch();
const std::uint32_t size = x.shape().volume();
const std::uint32_t g1 = GRID_SIZE(size, dim1_x_);
std::vector<std::uint32_t> x_strides(ndims);
std::vector<std::uint32_t> y_strides(ndims);
std::uint32_t x_stride_tmp = 1;
std::uint32_t y_stride_tmp = 1;
for (std::uint32_t i = 0; i < ndims; ++i) {
x_strides[ndims - i - 1] = x_stride_tmp;
y_strides[ndims - perm[i] - 1] = y_stride_tmp;
x_stride_tmp *= x.shape()[i];
y_stride_tmp *= y.shape()[i];
}
CUDA_CALL(::hipSetDevice(dev_id_));
CUDA_CALL(::hipMemcpyToSymbol(
permute_dims_x_strides, x_strides.data(),
sizeof(std::uint32_t) * x_strides.size()));
CUDA_CALL(::hipMemcpyToSymbol(
permute_dims_y_strides, y_strides.data(),
sizeof(std::uint32_t) * y_strides.size()));
hipLaunchKernelGGL(( ::permute_dims_fw_dev), dim3(dim3(g1, bs)), dim3(dim1_x_), 0, 0,
CDATA(half, x), ndims, size, MDATA(half, y));
}
void CUDA16::permute_dims_bw_impl(
const Tensor &, const Tensor &, const Tensor &gy,
const std::vector<std::uint32_t> &perm, Tensor &gx) {
const std::uint32_t ndims = perm.size();
const std::uint32_t bs = gx.shape().batch();
const std::uint32_t size = gx.shape().volume();
const std::uint32_t g1 = GRID_SIZE(size, dim1_x_);
std::vector<std::uint32_t> x_strides(ndims);
std::vector<std::uint32_t> y_strides(ndims);
std::uint32_t x_stride_tmp = 1;
std::uint32_t y_stride_tmp = 1;
for (std::uint32_t i = 0; i < ndims; ++i) {
x_strides[ndims - i - 1] = x_stride_tmp;
y_strides[ndims - perm[i] - 1] = y_stride_tmp;
x_stride_tmp *= gx.shape()[i];
y_stride_tmp *= gy.shape()[i];
}
CUDA_CALL(::hipSetDevice(dev_id_));
CUDA_CALL(::hipMemcpyToSymbol(
permute_dims_x_strides, x_strides.data(),
sizeof(std::uint32_t) * x_strides.size()));
CUDA_CALL(::hipMemcpyToSymbol(
permute_dims_y_strides, y_strides.data(),
sizeof(std::uint32_t) * y_strides.size()));
hipLaunchKernelGGL(( ::permute_dims_bw_dev), dim3(dim3(g1, bs)), dim3(dim1_x_), 0, 0,
CDATA(half, gy), ndims, size, MDATA(half, gx));
}
} // namespace devices
} // namespace primitiv
| 65c12d81fd900f4faeda028b3d7831beddea0c48.cu | #include <primitiv/config.h>
#include <primitiv/devices/cuda16/device.h>
#include <primitiv/devices/cuda16/ops/common.h>
#include <primitiv/internal/cuda/utils.h>
namespace {
__constant__ std::uint32_t permute_dims_x_strides[primitiv::Shape::MAX_DEPTH];
__constant__ std::uint32_t permute_dims_y_strides[primitiv::Shape::MAX_DEPTH];
// TODO(vbkaisetsu):
// Implove implementation of permute_dims.
// This function uses for-loops in the kernel code. It becomes slower than
// no-loop implementation.
__global__ void permute_dims_fw_dev(
const half *px, const std::uint32_t ndims, const std::uint32_t size,
half *py) {
const std::uint32_t i = IDX;
const std::uint32_t bid_z = IDY;
const std::uint32_t ofs = bid_z * size;
if (i < size) {
std::uint32_t tmp = i;
std::uint32_t j = 0;
// TODO(vbkaisetsu):
// Implove implementation
for (std::uint32_t d = 0; d < ndims; ++d) {
const std::uint32_t p = tmp / permute_dims_x_strides[d];
tmp -= p * permute_dims_x_strides[d];
j += p * permute_dims_y_strides[d];
}
py[ofs + j] = px[ofs + i];
}
}
__global__ void permute_dims_bw_dev(
const half *py, const std::uint32_t ndims, const std::uint32_t size,
half *px) {
const std::uint32_t i = IDX;
const std::uint32_t bid_z = IDY;
const std::uint32_t ofs = bid_z * size;
if (i < size) {
std::uint32_t tmp = i;
std::uint32_t j = 0;
// TODO(vbkaisetsu):
// Implove implementation
for (std::uint32_t d = 0; d < ndims; ++d) {
const std::uint32_t p = tmp / permute_dims_x_strides[d];
tmp -= p * permute_dims_x_strides[d];
j += p * permute_dims_y_strides[d];
}
const std::size_t ox = ofs + i;
const std::size_t oy = ofs + j;
INPLACE_ADD(px + ox, ::__half2float(py[oy]));
}
}
} // namespace
namespace primitiv {
namespace devices {
void CUDA16::permute_dims_fw_impl(
const Tensor &x, const std::vector<std::uint32_t> &perm,
Tensor &y) {
const std::uint32_t ndims = perm.size();
const std::uint32_t bs = x.shape().batch();
const std::uint32_t size = x.shape().volume();
const std::uint32_t g1 = GRID_SIZE(size, dim1_x_);
std::vector<std::uint32_t> x_strides(ndims);
std::vector<std::uint32_t> y_strides(ndims);
std::uint32_t x_stride_tmp = 1;
std::uint32_t y_stride_tmp = 1;
for (std::uint32_t i = 0; i < ndims; ++i) {
x_strides[ndims - i - 1] = x_stride_tmp;
y_strides[ndims - perm[i] - 1] = y_stride_tmp;
x_stride_tmp *= x.shape()[i];
y_stride_tmp *= y.shape()[i];
}
CUDA_CALL(::cudaSetDevice(dev_id_));
CUDA_CALL(::cudaMemcpyToSymbol(
permute_dims_x_strides, x_strides.data(),
sizeof(std::uint32_t) * x_strides.size()));
CUDA_CALL(::cudaMemcpyToSymbol(
permute_dims_y_strides, y_strides.data(),
sizeof(std::uint32_t) * y_strides.size()));
::permute_dims_fw_dev<<<dim3(g1, bs), dim1_x_>>>(
CDATA(half, x), ndims, size, MDATA(half, y));
}
void CUDA16::permute_dims_bw_impl(
const Tensor &, const Tensor &, const Tensor &gy,
const std::vector<std::uint32_t> &perm, Tensor &gx) {
const std::uint32_t ndims = perm.size();
const std::uint32_t bs = gx.shape().batch();
const std::uint32_t size = gx.shape().volume();
const std::uint32_t g1 = GRID_SIZE(size, dim1_x_);
std::vector<std::uint32_t> x_strides(ndims);
std::vector<std::uint32_t> y_strides(ndims);
std::uint32_t x_stride_tmp = 1;
std::uint32_t y_stride_tmp = 1;
for (std::uint32_t i = 0; i < ndims; ++i) {
x_strides[ndims - i - 1] = x_stride_tmp;
y_strides[ndims - perm[i] - 1] = y_stride_tmp;
x_stride_tmp *= gx.shape()[i];
y_stride_tmp *= gy.shape()[i];
}
CUDA_CALL(::cudaSetDevice(dev_id_));
CUDA_CALL(::cudaMemcpyToSymbol(
permute_dims_x_strides, x_strides.data(),
sizeof(std::uint32_t) * x_strides.size()));
CUDA_CALL(::cudaMemcpyToSymbol(
permute_dims_y_strides, y_strides.data(),
sizeof(std::uint32_t) * y_strides.size()));
::permute_dims_bw_dev<<<dim3(g1, bs), dim1_x_>>>(
CDATA(half, gy), ndims, size, MDATA(half, gx));
}
} // namespace devices
} // namespace primitiv
|
b08cf4244b45dfd30e26568ca03884b80772ed8b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#define MATRIX_SIZE 64
__global__ void Square(int *A)
{
// Block index
/************Add your code***********/
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
/************Add your code***********/
int tx = threadIdx.x;
int ty = threadIdx.y;
//Calculation
/************Add your code***********/
int row = 32*by+ty;
int column = 32*bx+tx;
int index = MATRIX_SIZE*row+column;
A[index]=A[index]*A[index];
}
int main()
{
int size = MATRIX_SIZE*MATRIX_SIZE*sizeof(int);
int *h_A = (int *)malloc(size);
int *d_A;
int i;
//Intialize A
for(i=0;i<MATRIX_SIZE*MATRIX_SIZE;i++)
{
h_A[i] = 2;
}
//Allocate the memory in GPU to store the content of A
/************Add your code***********/
hipMalloc((void **)&d_A,size);
//Copy h_A to d_A
/************Add your code***********/
hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
//Allocate blocks and 32*32 threads per block.
/************Add your code***********/
dim3 dimBlock(32, 32);
dim3 dimGrid(2,2);
//Run the kernel
/************Add your code***********/
hipLaunchKernelGGL(( Square), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A);
//Copy the result to CPU
/************Add your code***********/
hipMemcpy(h_A, d_A, size, hipMemcpyDeviceToHost);
//free GPU memory for d_A
/************Add your code***********/
hipFree(d_A);
//free Host Memory
free(h_A);
return 0;
}
| b08cf4244b45dfd30e26568ca03884b80772ed8b.cu | #include <stdio.h>
#include <cuda_runtime.h>
#define MATRIX_SIZE 64
__global__ void Square(int *A)
{
// Block index
/************Add your code***********/
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
/************Add your code***********/
int tx = threadIdx.x;
int ty = threadIdx.y;
//Calculation
/************Add your code***********/
int row = 32*by+ty;
int column = 32*bx+tx;
int index = MATRIX_SIZE*row+column;
A[index]=A[index]*A[index];
}
int main()
{
int size = MATRIX_SIZE*MATRIX_SIZE*sizeof(int);
int *h_A = (int *)malloc(size);
int *d_A;
int i;
//Intialize A
for(i=0;i<MATRIX_SIZE*MATRIX_SIZE;i++)
{
h_A[i] = 2;
}
//Allocate the memory in GPU to store the content of A
/************Add your code***********/
cudaMalloc((void **)&d_A,size);
//Copy h_A to d_A
/************Add your code***********/
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
//Allocate blocks and 32*32 threads per block.
/************Add your code***********/
dim3 dimBlock(32, 32);
dim3 dimGrid(2,2);
//Run the kernel
/************Add your code***********/
Square<<<dimGrid,dimBlock>>>(d_A);
//Copy the result to CPU
/************Add your code***********/
cudaMemcpy(h_A, d_A, size, cudaMemcpyDeviceToHost);
//free GPU memory for d_A
/************Add your code***********/
cudaFree(d_A);
//free Host Memory
free(h_A);
return 0;
}
|
6fe2f184c8d91e0f4a925179d01317c6d97a56fa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Lab 5, image filters with CUDA.
// Compile with a command-line similar to Lab 4:
// nvcc filter.cu -c -arch=sm_30 -o filter.o
// g++ filter.o milli.c readppm.c -lGL -lm -lcuda -lcudart -L/usr/local/cuda/lib -lglut -o filter
// or (multicore lab)
// nvcc filter.cu -c -arch=sm_20 -o filter.o
// g++ filter.o milli.c readppm.c -lGL -lm -lcuda -L/usr/local/cuda/lib64 -lcudart -lglut -o filter
// 2017-11-27: Early pre-release, dubbed "beta".
// 2017-12-03: First official version! Brand new lab 5 based on the old lab 6.
// Better variable names, better prepared for some lab tasks. More changes may come
// but I call this version 1.0b2.
// 2017-12-04: Two fixes: Added command-lines (above), fixed a bug in computeImages
// that allocated too much memory. b3
// 2017-12-04: More fixes: Tightened up the kernel with edge clamping.
// Less code, nicer result (no borders). Cleaned up some messed up X and Y. b4
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#ifdef __APPLE__
#include <GLUT/glut.h>
#include <OpenGL/gl.h>
#else
#include <GL/glut.h>
#endif
#include "readppm.h"
#include "milli.h"
// Use these for setting shared memory size.
#define maxKernelSizeX 10
#define maxKernelSizeY 10
#define BLOCKSIZE 32
__global__ void filter(unsigned char *image, unsigned char *out, const unsigned int imagesizex, const unsigned int imagesizey, const int kernelsizex, const int kernelsizey)
{
// map from blockIdx to pixel position
int globalx = blockIdx.x * blockDim.x + threadIdx.x;
int globaly = blockIdx.y * blockDim.y + threadIdx.y;
int localx = threadIdx.x;
int localy = threadIdx.y;
int dy, dx;
unsigned int sumx, sumy, sumz;
__shared__ unsigned char local_mem[(BLOCKSIZE+2*maxKernelSizeX)*3][BLOCKSIZE+2*maxKernelSizeY];
int yy = min(max(globaly, 0), imagesizey-1);
int xx = min(max(globalx, 0), imagesizex-1);
// copy core section
local_mem[(kernelsizex+localx)*3+0][kernelsizey+localy] = image[((yy)*imagesizex+(xx))*3+0];
local_mem[(kernelsizex+localx)*3+1][kernelsizey+localy] = image[((yy)*imagesizex+(xx))*3+1];
local_mem[(kernelsizex+localx)*3+2][kernelsizey+localy] = image[((yy)*imagesizex+(xx))*3+2];
// handle up and down edge
yy = min(max(globaly, 0), imagesizey-1);
if (localx < kernelsizex)
{
xx = max(globalx-kernelsizex, 0); //if(globalx-kernalsizex < 0)
local_mem[(localx)*3 + 0][kernelsizey+localy] = image[(yy*imagesizex + xx)*3+0];
local_mem[(localx)*3 + 1][kernelsizey+localy] = image[(yy*imagesizex + xx)*3+1];
local_mem[(localx)*3 + 2][kernelsizey+localy] = image[(yy*imagesizex + xx)*3+2];
}
else if (localx > BLOCKSIZE-1 -kernelsizex)
{
xx = min(globalx+kernelsizex, imagesizex-1); //if(globalx+kernalsizex > imagesizex)
local_mem[(localx+2*kernelsizex)*3 + 0][kernelsizey+localy] = image[(yy*imagesizex + xx)*3+0];
local_mem[(localx+2*kernelsizex)*3 + 1][kernelsizey+localy] = image[(yy*imagesizex + xx)*3+1];
local_mem[(localx+2*kernelsizex)*3 + 2][kernelsizey+localy] = image[(yy*imagesizex + xx)*3+2];
}
//handle left and right edge
xx = min(max(globalx, 0), imagesizex-1);
if (localy < kernelsizey)
{
yy = max(globaly-kernelsizey, 0); //if(globaly-kernalsizey < 0)
local_mem[(kernelsizex+localx)*3+0][localy] = image[(yy*imagesizex + xx)*3+0];
local_mem[(kernelsizex+localx)*3+1][localy] = image[(yy*imagesizex + xx)*3+1];
local_mem[(kernelsizex+localx)*3+2][localy] = image[(yy*imagesizex + xx)*3+2];
}
else if (localy > BLOCKSIZE-1 -kernelsizey)
{
yy = min(globaly+kernelsizey, imagesizey-1); //if(globaly+kernalsizey > 0)
local_mem[(kernelsizex+localx)*3+0][localy+2*kernelsizey] = image[(yy*imagesizex + xx)*3+0];
local_mem[(kernelsizex+localx)*3+1][localy+2*kernelsizey] = image[(yy*imagesizex + xx)*3+1];
local_mem[(kernelsizex+localx)*3+2][localy+2*kernelsizey] = image[(yy*imagesizex + xx)*3+2];
}
//handle corner
if (localx < kernelsizex && localy < kernelsizey)
{
xx = max(globalx-kernelsizex, 0); //if(globalx-kernalsizex < 0)
yy = max(globaly-kernelsizey, 0); //if(globaly-kernalsizey < 0)
local_mem[localx*3 + 0][localy] = image[(yy*imagesizex + xx)*3+0];
local_mem[localx*3 + 1][localy] = image[(yy*imagesizex + xx)*3+1];
local_mem[localx*3 + 2][localy] = image[(yy*imagesizex + xx)*3+2];
}
else if (localx > BLOCKSIZE-1 - kernelsizex && localy > BLOCKSIZE-1 - kernelsizey)
{
xx = min(globalx+kernelsizex, imagesizex-1); //if(globalx+kernalsizex > imagesizex)
yy = min(globaly+kernelsizey, imagesizey-1); //if(globaly+kernalsizey > imagesizey)
local_mem[(localx+2*kernelsizex)*3 + 0][localy+2*kernelsizey] = image[(yy*imagesizex + xx)*3+0];
local_mem[(localx+2*kernelsizex)*3 + 1][localy+2*kernelsizey] = image[(yy*imagesizex + xx)*3+1];
local_mem[(localx+2*kernelsizex)*3 + 2][localy+2*kernelsizey] = image[(yy*imagesizex + xx)*3+2];
}
else if (localx > BLOCKSIZE-1 - kernelsizex && localy < kernelsizey)
{
xx = min(globalx+kernelsizex, imagesizex-1); //if(globalx+kernalsizex > imagesizex)
yy = max(globaly-kernelsizey, 0); //if(globaly-kernalsizey < 0)
local_mem[(localx+2*kernelsizex)*3 + 0][localy] = image[(yy*imagesizex + xx)*3+0];
local_mem[(localx+2*kernelsizex)*3 + 1][localy] = image[(yy*imagesizex + xx)*3+1];
local_mem[(localx+2*kernelsizex)*3 + 2][localy] = image[(yy*imagesizex + xx)*3+2];
}
else if (localx < kernelsizex && localy > BLOCKSIZE-1 - kernelsizey)
{
xx = max(globalx-kernelsizex, 0); //if(globalx-kernalsizex < 0)
yy = min(globaly+kernelsizey, imagesizey-1); //if(globaly+kernalsizey > imagesizey)
local_mem[(localx)*3 + 0][localy+2*kernelsizey] = image[(yy*imagesizex + xx)*3+0];
local_mem[(localx)*3 + 1][localy+2*kernelsizey] = image[(yy*imagesizex + xx)*3+1];
local_mem[(localx)*3 + 2][localy+2*kernelsizey] = image[(yy*imagesizex + xx)*3+2];
}
__syncthreads();
int divby = (2*kernelsizex+1)*(2*kernelsizey+1); // Works for box filters only!
if (globalx < imagesizex && globaly < imagesizey) // If inside image
{
// Filter kernel (simple box filter)
sumx=0;sumy=0;sumz=0;
for(dy=-kernelsizey;dy<=kernelsizey;dy++)
{
for(dx=-kernelsizex;dx<=kernelsizex;dx++)
{
// Use max and min to avoid branching!
yy = min(max(localy+kernelsizey +dy, 0), BLOCKSIZE+2*kernelsizey-1);
xx = min(max(localx+kernelsizex +dx, 0), BLOCKSIZE+2*kernelsizex-1);
sumx += local_mem[(xx)*3+0][yy];
sumy += local_mem[(xx)*3+1][yy];
sumz += local_mem[(xx)*3+2][yy];
}
}
out[((globaly)*imagesizex+(globalx))*3+0] = sumx/divby;
out[((globaly)*imagesizex+(globalx))*3+1] = sumy/divby;
out[((globaly)*imagesizex+(globalx))*3+2] = sumz/divby;
/*out[((globaly)*imagesizex+(globalx))*3+0] = local_mem[(kernelsizex+localx)*3+0][kernelsizey+localy];
out[((globaly)*imagesizex+(globalx))*3+1] = local_mem[(kernelsizex+localx)*3+1][kernelsizey+localy];
out[((globaly)*imagesizex+(globalx))*3+2] = local_mem[(kernelsizex+localx)*3+2][kernelsizey+localy];*/
}
}
// Global variables for image data
unsigned char *image, *pixels, *dev_bitmap, *dev_input;
unsigned int imagesizey, imagesizex; // Image size
////////////////////////////////////////////////////////////////////////////////
// main computation function
////////////////////////////////////////////////////////////////////////////////
void computeImages(int kernelsizex, int kernelsizey)
{
double t;
if (kernelsizex > maxKernelSizeX || kernelsizey > maxKernelSizeY)
{
printf("Kernel size out of bounds!\n");
return;
}
pixels = (unsigned char *) malloc(imagesizex*imagesizey*3);
hipMalloc( (void**)&dev_input, imagesizex*imagesizey*3);
hipMemcpy( dev_input, image, imagesizey*imagesizex*3, hipMemcpyHostToDevice );
hipMalloc( (void**)&dev_bitmap, imagesizex*imagesizey*3);
dim3 grid(ceil(float(imagesizex)/(BLOCKSIZE)),ceil(float(imagesizey)/(BLOCKSIZE)));
dim3 block(BLOCKSIZE,BLOCKSIZE);
ResetMilli();
hipLaunchKernelGGL(( filter), dim3(grid),dim3(block), 0, 0, dev_input, dev_bitmap, imagesizex, imagesizey, kernelsizex, kernelsizey); // change to blocksize = 32*32
hipDeviceSynchronize();
t = GetSeconds();
printf("COST %lf seconds\n", t);
// Check for errors!
hipError_t err = hipGetLastError();
if (err != hipSuccess)
printf("Error: %s\n", hipGetErrorString(err));
hipMemcpy( pixels, dev_bitmap, imagesizey*imagesizex*3, hipMemcpyDeviceToHost );
hipFree( dev_bitmap );
hipFree( dev_input );
}
// Display images
void Draw()
{
// Dump the whole picture onto the screen.
glClearColor( 0.0, 0.0, 0.0, 1.0 );
glClear( GL_COLOR_BUFFER_BIT );
if (imagesizey >= imagesizex)
{ // Not wide - probably square. Original left, result right.
glRasterPos2f(-1, -1);
glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, image );
glRasterPos2i(0, -1);
glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, pixels);
}
else
{ // Wide image! Original on top, result below.
glRasterPos2f(-1, -1);
glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, pixels );
glRasterPos2i(-1, 0);
glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, image );
}
glFlush();
}
// Main program, inits
int main( int argc, char** argv)
{
glutInit(&argc, argv);
glutInitDisplayMode( GLUT_SINGLE | GLUT_RGBA );
if (argc > 1)
image = readppm(argv[1], (int *)&imagesizex, (int *)&imagesizey);
else
image = readppm((char *)"maskros512.ppm", (int *)&imagesizex, (int *)&imagesizey);
if (imagesizey >= imagesizex)
glutInitWindowSize( imagesizex*2, imagesizey );
else
glutInitWindowSize( imagesizex, imagesizey*2 );
glutCreateWindow("Lab 5");
glutDisplayFunc(Draw);
ResetMilli();
computeImages(7, 7);
// You can save the result to a file like this:
writeppm("out.ppm", imagesizey, imagesizex, pixels);
glutMainLoop();
return 0;
}
| 6fe2f184c8d91e0f4a925179d01317c6d97a56fa.cu | // Lab 5, image filters with CUDA.
// Compile with a command-line similar to Lab 4:
// nvcc filter.cu -c -arch=sm_30 -o filter.o
// g++ filter.o milli.c readppm.c -lGL -lm -lcuda -lcudart -L/usr/local/cuda/lib -lglut -o filter
// or (multicore lab)
// nvcc filter.cu -c -arch=sm_20 -o filter.o
// g++ filter.o milli.c readppm.c -lGL -lm -lcuda -L/usr/local/cuda/lib64 -lcudart -lglut -o filter
// 2017-11-27: Early pre-release, dubbed "beta".
// 2017-12-03: First official version! Brand new lab 5 based on the old lab 6.
// Better variable names, better prepared for some lab tasks. More changes may come
// but I call this version 1.0b2.
// 2017-12-04: Two fixes: Added command-lines (above), fixed a bug in computeImages
// that allocated too much memory. b3
// 2017-12-04: More fixes: Tightened up the kernel with edge clamping.
// Less code, nicer result (no borders). Cleaned up some messed up X and Y. b4
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#ifdef __APPLE__
#include <GLUT/glut.h>
#include <OpenGL/gl.h>
#else
#include <GL/glut.h>
#endif
#include "readppm.h"
#include "milli.h"
// Use these for setting shared memory size.
#define maxKernelSizeX 10
#define maxKernelSizeY 10
#define BLOCKSIZE 32
__global__ void filter(unsigned char *image, unsigned char *out, const unsigned int imagesizex, const unsigned int imagesizey, const int kernelsizex, const int kernelsizey)
{
// map from blockIdx to pixel position
int globalx = blockIdx.x * blockDim.x + threadIdx.x;
int globaly = blockIdx.y * blockDim.y + threadIdx.y;
int localx = threadIdx.x;
int localy = threadIdx.y;
int dy, dx;
unsigned int sumx, sumy, sumz;
__shared__ unsigned char local_mem[(BLOCKSIZE+2*maxKernelSizeX)*3][BLOCKSIZE+2*maxKernelSizeY];
int yy = min(max(globaly, 0), imagesizey-1);
int xx = min(max(globalx, 0), imagesizex-1);
// copy core section
local_mem[(kernelsizex+localx)*3+0][kernelsizey+localy] = image[((yy)*imagesizex+(xx))*3+0];
local_mem[(kernelsizex+localx)*3+1][kernelsizey+localy] = image[((yy)*imagesizex+(xx))*3+1];
local_mem[(kernelsizex+localx)*3+2][kernelsizey+localy] = image[((yy)*imagesizex+(xx))*3+2];
// handle up and down edge
yy = min(max(globaly, 0), imagesizey-1);
if (localx < kernelsizex)
{
xx = max(globalx-kernelsizex, 0); //if(globalx-kernalsizex < 0)
local_mem[(localx)*3 + 0][kernelsizey+localy] = image[(yy*imagesizex + xx)*3+0];
local_mem[(localx)*3 + 1][kernelsizey+localy] = image[(yy*imagesizex + xx)*3+1];
local_mem[(localx)*3 + 2][kernelsizey+localy] = image[(yy*imagesizex + xx)*3+2];
}
else if (localx > BLOCKSIZE-1 -kernelsizex)
{
xx = min(globalx+kernelsizex, imagesizex-1); //if(globalx+kernalsizex > imagesizex)
local_mem[(localx+2*kernelsizex)*3 + 0][kernelsizey+localy] = image[(yy*imagesizex + xx)*3+0];
local_mem[(localx+2*kernelsizex)*3 + 1][kernelsizey+localy] = image[(yy*imagesizex + xx)*3+1];
local_mem[(localx+2*kernelsizex)*3 + 2][kernelsizey+localy] = image[(yy*imagesizex + xx)*3+2];
}
//handle left and right edge
xx = min(max(globalx, 0), imagesizex-1);
if (localy < kernelsizey)
{
yy = max(globaly-kernelsizey, 0); //if(globaly-kernalsizey < 0)
local_mem[(kernelsizex+localx)*3+0][localy] = image[(yy*imagesizex + xx)*3+0];
local_mem[(kernelsizex+localx)*3+1][localy] = image[(yy*imagesizex + xx)*3+1];
local_mem[(kernelsizex+localx)*3+2][localy] = image[(yy*imagesizex + xx)*3+2];
}
else if (localy > BLOCKSIZE-1 -kernelsizey)
{
yy = min(globaly+kernelsizey, imagesizey-1); //if(globaly+kernalsizey > 0)
local_mem[(kernelsizex+localx)*3+0][localy+2*kernelsizey] = image[(yy*imagesizex + xx)*3+0];
local_mem[(kernelsizex+localx)*3+1][localy+2*kernelsizey] = image[(yy*imagesizex + xx)*3+1];
local_mem[(kernelsizex+localx)*3+2][localy+2*kernelsizey] = image[(yy*imagesizex + xx)*3+2];
}
//handle corner
if (localx < kernelsizex && localy < kernelsizey)
{
xx = max(globalx-kernelsizex, 0); //if(globalx-kernalsizex < 0)
yy = max(globaly-kernelsizey, 0); //if(globaly-kernalsizey < 0)
local_mem[localx*3 + 0][localy] = image[(yy*imagesizex + xx)*3+0];
local_mem[localx*3 + 1][localy] = image[(yy*imagesizex + xx)*3+1];
local_mem[localx*3 + 2][localy] = image[(yy*imagesizex + xx)*3+2];
}
else if (localx > BLOCKSIZE-1 - kernelsizex && localy > BLOCKSIZE-1 - kernelsizey)
{
xx = min(globalx+kernelsizex, imagesizex-1); //if(globalx+kernalsizex > imagesizex)
yy = min(globaly+kernelsizey, imagesizey-1); //if(globaly+kernalsizey > imagesizey)
local_mem[(localx+2*kernelsizex)*3 + 0][localy+2*kernelsizey] = image[(yy*imagesizex + xx)*3+0];
local_mem[(localx+2*kernelsizex)*3 + 1][localy+2*kernelsizey] = image[(yy*imagesizex + xx)*3+1];
local_mem[(localx+2*kernelsizex)*3 + 2][localy+2*kernelsizey] = image[(yy*imagesizex + xx)*3+2];
}
else if (localx > BLOCKSIZE-1 - kernelsizex && localy < kernelsizey)
{
xx = min(globalx+kernelsizex, imagesizex-1); //if(globalx+kernalsizex > imagesizex)
yy = max(globaly-kernelsizey, 0); //if(globaly-kernalsizey < 0)
local_mem[(localx+2*kernelsizex)*3 + 0][localy] = image[(yy*imagesizex + xx)*3+0];
local_mem[(localx+2*kernelsizex)*3 + 1][localy] = image[(yy*imagesizex + xx)*3+1];
local_mem[(localx+2*kernelsizex)*3 + 2][localy] = image[(yy*imagesizex + xx)*3+2];
}
else if (localx < kernelsizex && localy > BLOCKSIZE-1 - kernelsizey)
{
xx = max(globalx-kernelsizex, 0); //if(globalx-kernalsizex < 0)
yy = min(globaly+kernelsizey, imagesizey-1); //if(globaly+kernalsizey > imagesizey)
local_mem[(localx)*3 + 0][localy+2*kernelsizey] = image[(yy*imagesizex + xx)*3+0];
local_mem[(localx)*3 + 1][localy+2*kernelsizey] = image[(yy*imagesizex + xx)*3+1];
local_mem[(localx)*3 + 2][localy+2*kernelsizey] = image[(yy*imagesizex + xx)*3+2];
}
__syncthreads();
int divby = (2*kernelsizex+1)*(2*kernelsizey+1); // Works for box filters only!
if (globalx < imagesizex && globaly < imagesizey) // If inside image
{
// Filter kernel (simple box filter)
sumx=0;sumy=0;sumz=0;
for(dy=-kernelsizey;dy<=kernelsizey;dy++)
{
for(dx=-kernelsizex;dx<=kernelsizex;dx++)
{
// Use max and min to avoid branching!
yy = min(max(localy+kernelsizey +dy, 0), BLOCKSIZE+2*kernelsizey-1);
xx = min(max(localx+kernelsizex +dx, 0), BLOCKSIZE+2*kernelsizex-1);
sumx += local_mem[(xx)*3+0][yy];
sumy += local_mem[(xx)*3+1][yy];
sumz += local_mem[(xx)*3+2][yy];
}
}
out[((globaly)*imagesizex+(globalx))*3+0] = sumx/divby;
out[((globaly)*imagesizex+(globalx))*3+1] = sumy/divby;
out[((globaly)*imagesizex+(globalx))*3+2] = sumz/divby;
/*out[((globaly)*imagesizex+(globalx))*3+0] = local_mem[(kernelsizex+localx)*3+0][kernelsizey+localy];
out[((globaly)*imagesizex+(globalx))*3+1] = local_mem[(kernelsizex+localx)*3+1][kernelsizey+localy];
out[((globaly)*imagesizex+(globalx))*3+2] = local_mem[(kernelsizex+localx)*3+2][kernelsizey+localy];*/
}
}
// Global variables for image data
unsigned char *image, *pixels, *dev_bitmap, *dev_input;
unsigned int imagesizey, imagesizex; // Image size
////////////////////////////////////////////////////////////////////////////////
// main computation function
////////////////////////////////////////////////////////////////////////////////
void computeImages(int kernelsizex, int kernelsizey)
{
double t;
if (kernelsizex > maxKernelSizeX || kernelsizey > maxKernelSizeY)
{
printf("Kernel size out of bounds!\n");
return;
}
pixels = (unsigned char *) malloc(imagesizex*imagesizey*3);
cudaMalloc( (void**)&dev_input, imagesizex*imagesizey*3);
cudaMemcpy( dev_input, image, imagesizey*imagesizex*3, cudaMemcpyHostToDevice );
cudaMalloc( (void**)&dev_bitmap, imagesizex*imagesizey*3);
dim3 grid(ceil(float(imagesizex)/(BLOCKSIZE)),ceil(float(imagesizey)/(BLOCKSIZE)));
dim3 block(BLOCKSIZE,BLOCKSIZE);
ResetMilli();
filter<<<grid,block>>>(dev_input, dev_bitmap, imagesizex, imagesizey, kernelsizex, kernelsizey); // change to blocksize = 32*32
cudaThreadSynchronize();
t = GetSeconds();
printf("COST %lf seconds\n", t);
// Check for errors!
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
cudaMemcpy( pixels, dev_bitmap, imagesizey*imagesizex*3, cudaMemcpyDeviceToHost );
cudaFree( dev_bitmap );
cudaFree( dev_input );
}
// Display images
void Draw()
{
// Dump the whole picture onto the screen.
glClearColor( 0.0, 0.0, 0.0, 1.0 );
glClear( GL_COLOR_BUFFER_BIT );
if (imagesizey >= imagesizex)
{ // Not wide - probably square. Original left, result right.
glRasterPos2f(-1, -1);
glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, image );
glRasterPos2i(0, -1);
glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, pixels);
}
else
{ // Wide image! Original on top, result below.
glRasterPos2f(-1, -1);
glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, pixels );
glRasterPos2i(-1, 0);
glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, image );
}
glFlush();
}
// Main program, inits
int main( int argc, char** argv)
{
glutInit(&argc, argv);
glutInitDisplayMode( GLUT_SINGLE | GLUT_RGBA );
if (argc > 1)
image = readppm(argv[1], (int *)&imagesizex, (int *)&imagesizey);
else
image = readppm((char *)"maskros512.ppm", (int *)&imagesizex, (int *)&imagesizey);
if (imagesizey >= imagesizex)
glutInitWindowSize( imagesizex*2, imagesizey );
else
glutInitWindowSize( imagesizex, imagesizey*2 );
glutCreateWindow("Lab 5");
glutDisplayFunc(Draw);
ResetMilli();
computeImages(7, 7);
// You can save the result to a file like this:
writeppm("out.ppm", imagesizey, imagesizex, pixels);
glutMainLoop();
return 0;
}
|
0815267381f46246bc36c5f5fdd4a4fa1b36cdc8.hip | // !!! This is a file automatically generated by hipify!!!
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/functional.hpp"
#include "opencv2/core/cuda/transform.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "arithm_func_traits.hpp"
using namespace cv::cuda;
using namespace cv::cuda::device;
namespace arithm
{
template <typename T> struct UseDouble_
{
enum {value = 0};
};
template <> struct UseDouble_<double>
{
enum {value = 1};
};
template <typename T1, typename T2, typename D> struct UseDouble
{
enum {value = (UseDouble_<T1>::value || UseDouble_<T2>::value || UseDouble_<D>::value)};
};
template <typename T1, typename T2, typename D, bool useDouble> struct AddWeighted_;
template <typename T1, typename T2, typename D> struct AddWeighted_<T1, T2, D, false> : binary_function<T1, T2, D>
{
float alpha;
float beta;
float gamma;
__host__ AddWeighted_(double alpha_, double beta_, double gamma_) : alpha(static_cast<float>(alpha_)), beta(static_cast<float>(beta_)), gamma(static_cast<float>(gamma_)) {}
__device__ __forceinline__ D operator ()(T1 a, T2 b) const
{
return saturate_cast<D>(a * alpha + b * beta + gamma);
}
};
template <typename T1, typename T2, typename D> struct AddWeighted_<T1, T2, D, true> : binary_function<T1, T2, D>
{
double alpha;
double beta;
double gamma;
__host__ AddWeighted_(double alpha_, double beta_, double gamma_) : alpha(alpha_), beta(beta_), gamma(gamma_) {}
__device__ __forceinline__ D operator ()(T1 a, T2 b) const
{
return saturate_cast<D>(a * alpha + b * beta + gamma);
}
};
template <typename T1, typename T2, typename D> struct AddWeighted : AddWeighted_<T1, T2, D, UseDouble<T1, T2, D>::value>
{
AddWeighted(double alpha_, double beta_, double gamma_) : AddWeighted_<T1, T2, D, UseDouble<T1, T2, D>::value>(alpha_, beta_, gamma_) {}
};
}
namespace cv { namespace cuda { namespace device
{
template <typename T1, typename T2, typename D, size_t src1_size, size_t src2_size, size_t dst_size> struct AddWeightedTraits : DefaultTransformFunctorTraits< arithm::AddWeighted<T1, T2, D> >
{
};
template <typename T1, typename T2, typename D, size_t src_size, size_t dst_size> struct AddWeightedTraits<T1, T2, D, src_size, src_size, dst_size> : arithm::ArithmFuncTraits<src_size, dst_size>
{
};
template <typename T1, typename T2, typename D> struct TransformFunctorTraits< arithm::AddWeighted<T1, T2, D> > : AddWeightedTraits<T1, T2, D, sizeof(T1), sizeof(T2), sizeof(D)>
{
};
}}}
namespace arithm
{
template <typename T1, typename T2, typename D>
void addWeighted(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream)
{
AddWeighted<T1, T2, D> op(alpha, beta, gamma);
device::transform((PtrStepSz<T1>) src1, (PtrStepSz<T2>) src2, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
template void addWeighted<uchar, uchar, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, uchar, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, uchar, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, uchar, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, uchar, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, uchar, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, uchar, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, schar, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, schar, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, schar, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, schar, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, schar, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, schar, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, schar, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, ushort, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, ushort, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, ushort, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, ushort, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, ushort, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, ushort, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, ushort, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, short, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, short, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, short, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, short, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, short, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, short, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, short, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, int, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, int, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, int, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, int, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, int, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, int, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, int, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<uchar, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, schar, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, schar, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, schar, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, schar, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, schar, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, schar, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, schar, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, ushort, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, ushort, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, ushort, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, ushort, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, ushort, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, ushort, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, ushort, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, short, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, short, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, short, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, short, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, short, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, short, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, short, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, int, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, int, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, int, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, int, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, int, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, int, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, int, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<schar, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, ushort, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, ushort, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, ushort, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, ushort, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, ushort, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, ushort, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, ushort, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, short, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, short, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, short, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, short, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, short, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, short, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, short, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, int, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, int, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, int, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, int, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, int, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, int, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, int, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<ushort, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, short, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, short, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, short, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, short, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, short, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, short, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, short, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, int, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, int, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, int, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, int, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, int, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, int, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, int, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<short, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, int, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, int, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, int, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, int, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, int, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, int, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, int, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<int, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<float, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<double, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<double, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<double, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<double, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<double, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<double, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
template void addWeighted<double, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, hipStream_t stream);
}
#endif /* CUDA_DISABLER */
| 0815267381f46246bc36c5f5fdd4a4fa1b36cdc8.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/functional.hpp"
#include "opencv2/core/cuda/transform.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "arithm_func_traits.hpp"
using namespace cv::cuda;
using namespace cv::cuda::device;
namespace arithm
{
template <typename T> struct UseDouble_
{
enum {value = 0};
};
template <> struct UseDouble_<double>
{
enum {value = 1};
};
template <typename T1, typename T2, typename D> struct UseDouble
{
enum {value = (UseDouble_<T1>::value || UseDouble_<T2>::value || UseDouble_<D>::value)};
};
template <typename T1, typename T2, typename D, bool useDouble> struct AddWeighted_;
template <typename T1, typename T2, typename D> struct AddWeighted_<T1, T2, D, false> : binary_function<T1, T2, D>
{
float alpha;
float beta;
float gamma;
__host__ AddWeighted_(double alpha_, double beta_, double gamma_) : alpha(static_cast<float>(alpha_)), beta(static_cast<float>(beta_)), gamma(static_cast<float>(gamma_)) {}
__device__ __forceinline__ D operator ()(T1 a, T2 b) const
{
return saturate_cast<D>(a * alpha + b * beta + gamma);
}
};
template <typename T1, typename T2, typename D> struct AddWeighted_<T1, T2, D, true> : binary_function<T1, T2, D>
{
double alpha;
double beta;
double gamma;
__host__ AddWeighted_(double alpha_, double beta_, double gamma_) : alpha(alpha_), beta(beta_), gamma(gamma_) {}
__device__ __forceinline__ D operator ()(T1 a, T2 b) const
{
return saturate_cast<D>(a * alpha + b * beta + gamma);
}
};
template <typename T1, typename T2, typename D> struct AddWeighted : AddWeighted_<T1, T2, D, UseDouble<T1, T2, D>::value>
{
AddWeighted(double alpha_, double beta_, double gamma_) : AddWeighted_<T1, T2, D, UseDouble<T1, T2, D>::value>(alpha_, beta_, gamma_) {}
};
}
namespace cv { namespace cuda { namespace device
{
template <typename T1, typename T2, typename D, size_t src1_size, size_t src2_size, size_t dst_size> struct AddWeightedTraits : DefaultTransformFunctorTraits< arithm::AddWeighted<T1, T2, D> >
{
};
template <typename T1, typename T2, typename D, size_t src_size, size_t dst_size> struct AddWeightedTraits<T1, T2, D, src_size, src_size, dst_size> : arithm::ArithmFuncTraits<src_size, dst_size>
{
};
template <typename T1, typename T2, typename D> struct TransformFunctorTraits< arithm::AddWeighted<T1, T2, D> > : AddWeightedTraits<T1, T2, D, sizeof(T1), sizeof(T2), sizeof(D)>
{
};
}}}
namespace arithm
{
template <typename T1, typename T2, typename D>
void addWeighted(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream)
{
AddWeighted<T1, T2, D> op(alpha, beta, gamma);
device::transform((PtrStepSz<T1>) src1, (PtrStepSz<T2>) src2, (PtrStepSz<D>) dst, op, WithOutMask(), stream);
}
template void addWeighted<uchar, uchar, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, uchar, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, uchar, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, uchar, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, uchar, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, uchar, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, uchar, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, schar, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, schar, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, schar, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, schar, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, schar, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, schar, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, schar, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, ushort, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, ushort, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, ushort, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, ushort, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, ushort, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, ushort, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, ushort, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, short, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, short, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, short, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, short, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, short, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, short, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, short, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, int, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, int, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, int, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, int, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, int, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, int, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, int, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<uchar, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, schar, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, schar, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, schar, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, schar, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, schar, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, schar, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, schar, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, ushort, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, ushort, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, ushort, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, ushort, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, ushort, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, ushort, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, ushort, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, short, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, short, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, short, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, short, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, short, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, short, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, short, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, int, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, int, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, int, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, int, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, int, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, int, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, int, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<schar, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, ushort, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, ushort, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, ushort, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, ushort, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, ushort, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, ushort, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, ushort, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, short, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, short, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, short, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, short, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, short, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, short, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, short, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, int, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, int, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, int, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, int, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, int, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, int, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, int, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<ushort, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, short, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, short, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, short, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, short, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, short, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, short, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, short, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, int, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, int, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, int, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, int, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, int, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, int, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, int, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<short, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, int, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, int, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, int, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, int, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, int, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, int, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, int, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<int, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, float, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, float, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, float, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, float, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, float, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, float, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, float, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<float, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<double, double, uchar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<double, double, schar>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<double, double, ushort>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<double, double, short>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<double, double, int>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<double, double, float>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
template void addWeighted<double, double, double>(PtrStepSzb src1, double alpha, PtrStepSzb src2, double beta, double gamma, PtrStepSzb dst, cudaStream_t stream);
}
#endif /* CUDA_DISABLER */
|
e280f7f1e34c3336169c17ee2a2a91d5266339d1.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
// #include <helper_cuda.h>
#define THREADS_PER_BLOCK 3
double f(double x){
return x*x;
}
__global__ void calculate(double *buffer, double start, double step, int N, double (*f) (double)){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < N){
double x = start + i * step;
buffer[i] = f(x);
}
}
double integrate(double *buffer, double start, double end, int div, double (*f) (double)){
int N = div;
double step = (end - start) / div;
hipMallocManaged(&buffer, sizeof(double) * N);
hipLaunchKernelGGL(( calculate), dim3((N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, buffer, start, step, N, f);
hipDeviceSynchronize();
for(int i = 0; i < N; i++)
printf("%d ", buffer[i]);
printf("\n");
double result = (f(start) + f(end)) / 2;
for(int i = 0; i < N; i++)
result += buffer[i]; // f(start + i * step);
return result;
}
int main(void){
hipError_t err = hipSuccess;
double *buffer = NULL;
double result = integrate(buffer, 0, 10, 100, f);
if ((err = hipGetLastError()) != hipSuccess){
fprintf(stderr, "Failed to launch kernel: %s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Result: %d\n", result);
hipFree(buffer);
return 0;
} | e280f7f1e34c3336169c17ee2a2a91d5266339d1.cu | #include <stdio.h>
#include <cuda_runtime.h>
// #include <helper_cuda.h>
#define THREADS_PER_BLOCK 3
double f(double x){
return x*x;
}
__global__ void calculate(double *buffer, double start, double step, int N, double (*f) (double)){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < N){
double x = start + i * step;
buffer[i] = f(x);
}
}
double integrate(double *buffer, double start, double end, int div, double (*f) (double)){
int N = div;
double step = (end - start) / div;
cudaMallocManaged(&buffer, sizeof(double) * N);
calculate<<<(N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(buffer, start, step, N, f);
cudaDeviceSynchronize();
for(int i = 0; i < N; i++)
printf("%d ", buffer[i]);
printf("\n");
double result = (f(start) + f(end)) / 2;
for(int i = 0; i < N; i++)
result += buffer[i]; // f(start + i * step);
return result;
}
int main(void){
cudaError_t err = cudaSuccess;
double *buffer = NULL;
double result = integrate(buffer, 0, 10, 100, f);
if ((err = cudaGetLastError()) != cudaSuccess){
fprintf(stderr, "Failed to launch kernel: %s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Result: %d\n", result);
cudaFree(buffer);
return 0;
} |
49d78731b1d40670104fab5a1338f1fcf5ccd6cc.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vec_add.hip"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int N = XSIZE*YSIZE;
int *A = NULL;
hipMalloc(&A, XSIZE*YSIZE);
int *B = NULL;
hipMalloc(&B, XSIZE*YSIZE);
int *C = NULL;
hipMalloc(&C, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vec_add), dim3(gridBlock),dim3(threadBlock), 0, 0, N,A,B,C);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vec_add), dim3(gridBlock),dim3(threadBlock), 0, 0, N,A,B,C);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vec_add), dim3(gridBlock),dim3(threadBlock), 0, 0, N,A,B,C);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 49d78731b1d40670104fab5a1338f1fcf5ccd6cc.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vec_add.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int N = XSIZE*YSIZE;
int *A = NULL;
cudaMalloc(&A, XSIZE*YSIZE);
int *B = NULL;
cudaMalloc(&B, XSIZE*YSIZE);
int *C = NULL;
cudaMalloc(&C, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vec_add<<<gridBlock,threadBlock>>>(N,A,B,C);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vec_add<<<gridBlock,threadBlock>>>(N,A,B,C);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vec_add<<<gridBlock,threadBlock>>>(N,A,B,C);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
7e23199f4529b676d5b8fd3bb594854dea7b79b8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "main.hpp"
#include "kernels_hip.cuh"
// Device kernels.
__global__ void kernelAbs(
float* arr,
int size)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= size)
{
return;
}
arr[i] = abs(arr[i]);
}
__global__ void kernelEliminateConnections(
float* inGroupConnectionWeghts,
float* turnedOnWires,
int groupSize,
int groupNumber,
float inGroupWireProbability)
{
int groupIdx = blockIdx.x;
int connectionIdx = blockIdx.y * blockDim.y + threadIdx.y;
if ((groupIdx >= groupNumber) || (connectionIdx >= groupSize * groupSize))
{
return;
}
int x = connectionIdx % groupSize;
int y = connectionIdx / groupSize;
int k = groupIdx * groupSize * groupSize + connectionIdx;
if (x == y)
{
inGroupConnectionWeghts[k] = 0.0f;
return;
}
if (turnedOnWires[k] >= inGroupWireProbability)
{
inGroupConnectionWeghts[k] = 0.0f;
}
}
__global__ void kernelProcessInGroupConnections(
float* neuronState,
float* bufferState,
float* inGroupConnectionWeghts,
int groupSize,
int groupNumber)
{
__shared__ float groupNeuronState[512];
int currentNeuron = threadIdx.x;
int currentGroup = blockIdx.x;
if ((currentNeuron >= groupSize) || (currentGroup >= groupNumber))
{
return;
}
groupNeuronState[currentNeuron] = neuronState[currentGroup * groupSize + currentNeuron];
__syncthreads();
int connectionShift = groupSize * (currentGroup * groupSize + currentNeuron);
float ac = 0.0f;
for (int i = 0; i < groupSize; i++)
{
ac += inGroupConnectionWeghts[connectionShift + i] * groupNeuronState[i];
}
bufferState[currentGroup * groupSize + currentNeuron] += ac;
}
__global__ void kernelProcessInterGroupConnections(
float* neuronState,
float* bufferState,
int* interGroupConnectionFrom,
int* interGroupConnectionTo,
float* interGroupConnectionWeights,
int numberOfInterGroupConnections)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= numberOfInterGroupConnections)
{
return;
}
int fromIdx = interGroupConnectionFrom[i];
int toIdx = interGroupConnectionTo[i];
if (neuronState[fromIdx] > 0.5f)
{
float weight = interGroupConnectionWeights[i];
atomicAdd(&bufferState[toIdx], weight);
}
}
__global__ void kernelApplyThreshold(
float* bufferState,
float* thresholds,
int groupSize,
int groupNumber)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= groupSize * groupNumber)
{
return;
}
if (bufferState[i] >= thresholds[i])
{
bufferState[i] = 1.0f;
}
else
{
bufferState[i] = 0.0f;
}
}
// Helper functions.
int RoundUpDiv(int x, int y)
{
return (x + y - 1) / y;
}
void Absify(
void* arr,
int size)
{
int BLOCK = 256;
dim3 threads(BLOCK);
dim3 blocks(RoundUpDiv(size, BLOCK));
hipLaunchKernelGGL(( kernelAbs), dim3(blocks), dim3(threads), 0, 0, (float*)arr, size);
}
void EliminateConnections(
void* inGroupConnectionWeghts,
void* turnedOnWires,
int groupSize,
int groupNumber,
float inGroupWireProbability)
{
int BLOCK = 256;
dim3 threads(1, BLOCK);
dim3 blocks(groupNumber, RoundUpDiv(groupSize * groupSize, BLOCK));
hipLaunchKernelGGL(( kernelEliminateConnections), dim3(blocks), dim3(threads), 0, 0,
(float*)inGroupConnectionWeghts,
(float*)turnedOnWires,
groupSize,
groupNumber,
inGroupWireProbability);
}
// Just to double-check that everything is correct.
void EmulateProcessInGroupConnections(
void* neuronState,
void* bufferState,
void* inGroupConnectionWeghts,
int groupSize,
int groupNumber)
{
std::vector<float> neuronStateVect(groupSize * groupNumber, 0.0f);
std::vector<float> bufferStateVect(groupSize * groupNumber, 0.0f);
std::vector<float> weightsVect(groupSize * groupSize * groupNumber, 0.0f);
hipMemcpy(&neuronStateVect[0], neuronState, groupSize * groupNumber * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(&bufferStateVect[0], bufferState, groupSize * groupNumber * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(&weightsVect[0], inGroupConnectionWeghts, groupSize * groupSize * groupNumber * sizeof(float), hipMemcpyDeviceToHost);
for (int i = 0; i < groupNumber; i++)
{
for (int j = 0; j < groupSize; j++)
{
int weightShift = groupSize * (groupSize * i + j);
float ac = 0.0f;
for (int k = 0; k < groupSize; k++)
{
ac += weightsVect[weightShift + k] * neuronStateVect[i * groupSize + k];
}
bufferStateVect[i * groupSize + j] += ac;
}
}
hipMemcpy(bufferState, &bufferStateVect[0], groupSize * groupNumber * sizeof(float), hipMemcpyHostToDevice);
}
void ProcessInGroupConnections(
void* neuronState,
void* bufferState,
void* inGroupConnectionWeghts,
int groupSize,
int groupNumber)
{
dim3 threads(groupSize);
dim3 blocks(groupNumber);
hipLaunchKernelGGL(( kernelProcessInGroupConnections), dim3(blocks), dim3(threads), 0, 0,
(float*)neuronState,
(float*)bufferState,
(float*)inGroupConnectionWeghts,
groupSize,
groupNumber);
}
void ProcessInterGroupConnections(
void* neuronState,
void* bufferState,
void* interGroupConnectionFrom,
void* interGroupConnectionTo,
void* interGroupConnectionWeights,
int numberOfInterGroupConnections)
{
int BLOCK = 256;
dim3 threads(BLOCK);
dim3 blocks(RoundUpDiv(numberOfInterGroupConnections, BLOCK));
hipLaunchKernelGGL(( kernelProcessInterGroupConnections), dim3(blocks), dim3(threads), 0, 0,
(float*)neuronState,
(float*)bufferState,
(int*)interGroupConnectionFrom,
(int*)interGroupConnectionTo,
(float*)interGroupConnectionWeights,
numberOfInterGroupConnections);
}
void ApplyThreshold(
void* bufferState,
void* thresholds,
int groupSize,
int groupNumber)
{
int BLOCK = 256;
dim3 threads(BLOCK);
dim3 blocks(RoundUpDiv(groupSize * groupNumber, BLOCK));
hipLaunchKernelGGL(( kernelApplyThreshold), dim3(blocks), dim3(threads), 0, 0,
(float*)bufferState,
(float*)thresholds,
groupSize,
groupNumber);
} | 7e23199f4529b676d5b8fd3bb594854dea7b79b8.cu | #include "main.hpp"
#include "kernels.cuh"
// Device kernels.
__global__ void kernelAbs(
float* arr,
int size)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= size)
{
return;
}
arr[i] = abs(arr[i]);
}
__global__ void kernelEliminateConnections(
float* inGroupConnectionWeghts,
float* turnedOnWires,
int groupSize,
int groupNumber,
float inGroupWireProbability)
{
int groupIdx = blockIdx.x;
int connectionIdx = blockIdx.y * blockDim.y + threadIdx.y;
if ((groupIdx >= groupNumber) || (connectionIdx >= groupSize * groupSize))
{
return;
}
int x = connectionIdx % groupSize;
int y = connectionIdx / groupSize;
int k = groupIdx * groupSize * groupSize + connectionIdx;
if (x == y)
{
inGroupConnectionWeghts[k] = 0.0f;
return;
}
if (turnedOnWires[k] >= inGroupWireProbability)
{
inGroupConnectionWeghts[k] = 0.0f;
}
}
__global__ void kernelProcessInGroupConnections(
float* neuronState,
float* bufferState,
float* inGroupConnectionWeghts,
int groupSize,
int groupNumber)
{
__shared__ float groupNeuronState[512];
int currentNeuron = threadIdx.x;
int currentGroup = blockIdx.x;
if ((currentNeuron >= groupSize) || (currentGroup >= groupNumber))
{
return;
}
groupNeuronState[currentNeuron] = neuronState[currentGroup * groupSize + currentNeuron];
__syncthreads();
int connectionShift = groupSize * (currentGroup * groupSize + currentNeuron);
float ac = 0.0f;
for (int i = 0; i < groupSize; i++)
{
ac += inGroupConnectionWeghts[connectionShift + i] * groupNeuronState[i];
}
bufferState[currentGroup * groupSize + currentNeuron] += ac;
}
__global__ void kernelProcessInterGroupConnections(
float* neuronState,
float* bufferState,
int* interGroupConnectionFrom,
int* interGroupConnectionTo,
float* interGroupConnectionWeights,
int numberOfInterGroupConnections)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= numberOfInterGroupConnections)
{
return;
}
int fromIdx = interGroupConnectionFrom[i];
int toIdx = interGroupConnectionTo[i];
if (neuronState[fromIdx] > 0.5f)
{
float weight = interGroupConnectionWeights[i];
atomicAdd(&bufferState[toIdx], weight);
}
}
__global__ void kernelApplyThreshold(
float* bufferState,
float* thresholds,
int groupSize,
int groupNumber)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= groupSize * groupNumber)
{
return;
}
if (bufferState[i] >= thresholds[i])
{
bufferState[i] = 1.0f;
}
else
{
bufferState[i] = 0.0f;
}
}
// Helper functions.
int RoundUpDiv(int x, int y)
{
return (x + y - 1) / y;
}
void Absify(
void* arr,
int size)
{
int BLOCK = 256;
dim3 threads(BLOCK);
dim3 blocks(RoundUpDiv(size, BLOCK));
kernelAbs<<<blocks, threads>>>((float*)arr, size);
}
void EliminateConnections(
void* inGroupConnectionWeghts,
void* turnedOnWires,
int groupSize,
int groupNumber,
float inGroupWireProbability)
{
int BLOCK = 256;
dim3 threads(1, BLOCK);
dim3 blocks(groupNumber, RoundUpDiv(groupSize * groupSize, BLOCK));
kernelEliminateConnections<<<blocks, threads>>>(
(float*)inGroupConnectionWeghts,
(float*)turnedOnWires,
groupSize,
groupNumber,
inGroupWireProbability);
}
// Just to double-check that everything is correct.
void EmulateProcessInGroupConnections(
void* neuronState,
void* bufferState,
void* inGroupConnectionWeghts,
int groupSize,
int groupNumber)
{
std::vector<float> neuronStateVect(groupSize * groupNumber, 0.0f);
std::vector<float> bufferStateVect(groupSize * groupNumber, 0.0f);
std::vector<float> weightsVect(groupSize * groupSize * groupNumber, 0.0f);
cudaMemcpy(&neuronStateVect[0], neuronState, groupSize * groupNumber * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(&bufferStateVect[0], bufferState, groupSize * groupNumber * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(&weightsVect[0], inGroupConnectionWeghts, groupSize * groupSize * groupNumber * sizeof(float), cudaMemcpyDeviceToHost);
for (int i = 0; i < groupNumber; i++)
{
for (int j = 0; j < groupSize; j++)
{
int weightShift = groupSize * (groupSize * i + j);
float ac = 0.0f;
for (int k = 0; k < groupSize; k++)
{
ac += weightsVect[weightShift + k] * neuronStateVect[i * groupSize + k];
}
bufferStateVect[i * groupSize + j] += ac;
}
}
cudaMemcpy(bufferState, &bufferStateVect[0], groupSize * groupNumber * sizeof(float), cudaMemcpyHostToDevice);
}
void ProcessInGroupConnections(
void* neuronState,
void* bufferState,
void* inGroupConnectionWeghts,
int groupSize,
int groupNumber)
{
dim3 threads(groupSize);
dim3 blocks(groupNumber);
kernelProcessInGroupConnections<<<blocks, threads>>>(
(float*)neuronState,
(float*)bufferState,
(float*)inGroupConnectionWeghts,
groupSize,
groupNumber);
}
void ProcessInterGroupConnections(
void* neuronState,
void* bufferState,
void* interGroupConnectionFrom,
void* interGroupConnectionTo,
void* interGroupConnectionWeights,
int numberOfInterGroupConnections)
{
int BLOCK = 256;
dim3 threads(BLOCK);
dim3 blocks(RoundUpDiv(numberOfInterGroupConnections, BLOCK));
kernelProcessInterGroupConnections<<<blocks, threads>>>(
(float*)neuronState,
(float*)bufferState,
(int*)interGroupConnectionFrom,
(int*)interGroupConnectionTo,
(float*)interGroupConnectionWeights,
numberOfInterGroupConnections);
}
void ApplyThreshold(
void* bufferState,
void* thresholds,
int groupSize,
int groupNumber)
{
int BLOCK = 256;
dim3 threads(BLOCK);
dim3 blocks(RoundUpDiv(groupSize * groupNumber, BLOCK));
kernelApplyThreshold<<<blocks, threads>>>(
(float*)bufferState,
(float*)thresholds,
groupSize,
groupNumber);
} |
6c936787277114a353a931f1e239bbb12d9cb38a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "z.h"
#define BLOCK_DIM_X 64
#define N(x) (erf((x)/sqrt(2.0f))/2+0.5f)
__global__ void gpuBlackScholes(float* call,float* S,float* X,float* T,float* r,float* sigma,int len){
int ii=threadIdx.x+blockDim.x*blockIdx.x;
if(ii>len){
return;
}
float d1=
(log(S[ii]/X[ii])+(r[ii]+(sigma[ii]*sigma[ii])/2)*T[ii])/(sigma[ii]*sqrt(T[ii]));
float d2=d1-sigma[ii]*sqrt(T[ii]);
call[ii]=S[ii]*N(d1)-X[ii]*exp(-r[ii]*T[ii])*N(d2);
}
void BlackSholes(zMemory_t out,zMemory_t S,zMemory_t X,zMemory_t T,zMemory_t r,zMemory_t sigma){
size_t len=zMemory_getFlattenedLength(S);
dim3 blockDim(BLOCK_DIM_X);
dim3 gridDim(zCeil(len,blockDim.x));
zState_t st=zMemory_getState(out);
hipStream_t strm=zState_getComputeStream(st,zMemory_getId(out));
hipLaunchKernelGGL(( gpuBlackScholes), dim3(gridDim),dim3(blockDim),0,strm,
(float*)zMemory_getDeviceMemory(out),
(float*)zMemory_getDeviceMemory(S),
(float*)zMemory_getDeviceMemory(X),
(float*)zMemory_getDeviceMemory(T),
(float*)zMemory_getDeviceMemory(r),
(float*)zMemory_getDeviceMemory(sigma),
len);
return;
}
int main(int argc,char* argv[]){
size_t dim=atoi(argv[1]);
zMemoryGroup_t S=zReadFloatArray(st,"S",1,&dim);
zMemoryGroup_t X=zReadFloatArray(st,"X",1,&dim);
zMemoryGroup_t T=zReadFloatArray(st,"T",1,&dim);
zMemoryGroup_t r=zReadFloatArray(st,"r",1,&dim);
zMemoryGroup_t q=zReadFloatArray(st,"q",1,&dim);
zMemoryGroup_t out=zMemoryGroup_new(st,zMemoryType_float,1,&dim);
zMapGroupFunction_t mapFun=zMapGroupFunction_new(st,"blackScholes",BlackSholes);
zMap(st,mapFun,out,S,X,T,r,q);
zWriteFloatArray(st,"out",out);
return 0;
} | 6c936787277114a353a931f1e239bbb12d9cb38a.cu |
#include "z.h"
#define BLOCK_DIM_X 64
#define N(x) (erf((x)/sqrt(2.0f))/2+0.5f)
__global__ void gpuBlackScholes(float* call,float* S,float* X,float* T,float* r,float* sigma,int len){
int ii=threadIdx.x+blockDim.x*blockIdx.x;
if(ii>len){
return;
}
float d1=
(log(S[ii]/X[ii])+(r[ii]+(sigma[ii]*sigma[ii])/2)*T[ii])/(sigma[ii]*sqrt(T[ii]));
float d2=d1-sigma[ii]*sqrt(T[ii]);
call[ii]=S[ii]*N(d1)-X[ii]*exp(-r[ii]*T[ii])*N(d2);
}
void BlackSholes(zMemory_t out,zMemory_t S,zMemory_t X,zMemory_t T,zMemory_t r,zMemory_t sigma){
size_t len=zMemory_getFlattenedLength(S);
dim3 blockDim(BLOCK_DIM_X);
dim3 gridDim(zCeil(len,blockDim.x));
zState_t st=zMemory_getState(out);
cudaStream_t strm=zState_getComputeStream(st,zMemory_getId(out));
gpuBlackScholes<<<gridDim,blockDim,0,strm>>>
((float*)zMemory_getDeviceMemory(out),
(float*)zMemory_getDeviceMemory(S),
(float*)zMemory_getDeviceMemory(X),
(float*)zMemory_getDeviceMemory(T),
(float*)zMemory_getDeviceMemory(r),
(float*)zMemory_getDeviceMemory(sigma),
len);
return;
}
int main(int argc,char* argv[]){
size_t dim=atoi(argv[1]);
zMemoryGroup_t S=zReadFloatArray(st,"S",1,&dim);
zMemoryGroup_t X=zReadFloatArray(st,"X",1,&dim);
zMemoryGroup_t T=zReadFloatArray(st,"T",1,&dim);
zMemoryGroup_t r=zReadFloatArray(st,"r",1,&dim);
zMemoryGroup_t q=zReadFloatArray(st,"q",1,&dim);
zMemoryGroup_t out=zMemoryGroup_new(st,zMemoryType_float,1,&dim);
zMapGroupFunction_t mapFun=zMapGroupFunction_new(st,"blackScholes",BlackSholes);
zMap(st,mapFun,out,S,X,T,r,q);
zWriteFloatArray(st,"out",out);
return 0;
} |
ba10c29d3b2882953904599c51f70999fc055cfc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <sys/time.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void im2col(float *A, int inputSize, int depth, int kernelSize, int stride, int pad, float *col, int outputSize) {
// col
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
if( !(i < outputSize) || !(j < outputSize) ) return;
int Ai = i * stride;
int Aj = j * stride;
for( int d = 0; d < depth; d++ ) {
for(int k = 0; k < kernelSize; k++ ) {
for( int l = 0; l < kernelSize; l++) {
if( Ai + k - pad < 0 || !(Ai + k - pad < inputSize) || Aj + l - pad < 0 || !( Aj + l - pad < inputSize)) {
//col[ d*outputSize*outputSize*kernelSize*kernelSize + (i*outputSize + j)*kernelSize*kernelSize + k*kernelSize + l] = 0;
col[ (i*outputSize + j)*(kernelSize*kernelSize*depth)+ d*kernelSize*kernelSize + k*kernelSize + l] = 0;
}
else col[ (i*outputSize + j)*(kernelSize*kernelSize*depth)+ d*kernelSize*kernelSize + k*kernelSize + l] \
= A[d*inputSize*inputSize + (Ai + k - pad)*inputSize + Aj + l - pad ];
}
}
}
}
// C = A*v A size m*n v size n*1
__global__
void gemm(float *A, float *B, float *C, int m, int n) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if( !( i < m ) ) return;
float sum = 0;
for( int l = 0; l < n; l++ ) {
sum += A[i*n + l] * B[l];
}
C[i] = sum;
}
int main(int argc, char * argv[] ) {
// input: inputSize*inputSize*depth
// kernel: kernelSize*kernelSize*depth
// output: outputSize*outputSize
int inputSize = 7;
int depth = 3;
int kernelSize = 3;
int kernelNum = 3;
int stride[3] = {1 , 2 , 3 };
int pad[3] = {0,0,0};
int outputSize[3];
// stridepaddingpadoutputoutputSize
for(int i = 0; i < kernelNum; i++) {
if((inputSize - kernelSize)%stride[i] != 0) {
pad[i] = (stride[i] - ((inputSize - kernelSize)%stride[i])) / 2;
}
outputSize[i] = (inputSize - kernelSize + 2*pad[i] ) / stride[i] + 1;
}
// ============================= =========================
// ==== CPU
// input:A kernel:kernel output:B
float *A, *kernel[3], *B[3];
A = (float *)malloc(sizeof(float)*inputSize*inputSize*depth);
for(int i = 0; i < 3; i++) {
kernel[i] = (float *)malloc(sizeof(float)*kernelSize*kernelSize*depth);
B[i] = (float *)malloc(sizeof(float)*outputSize[i]*outputSize[i]*depth);
}
// input A
for(int d = 0; d < depth; d++) {
for(int i=0; i<inputSize*inputSize; i++) {
A[d*inputSize*inputSize + i] = i;
}
}
// kernel
for(int i = 0; i < 3; i++){
for(int j = 0; j < kernelSize*kernelSize*depth; j++) {
kernel[i][j] = 1;
}
}
// ==== GPU
float *d_A, *d_kernel[3], *d_B[3], *d_col[3];
hipMalloc((void**)&d_A,sizeof(float)*inputSize*inputSize*depth);
for(int i = 0; i < 3; i++) {
hipMalloc((void**)&d_kernel[i], sizeof(float)*kernelSize*kernelSize*depth);
hipMalloc((void**)&d_B[i],sizeof(float)*outputSize[i]*outputSize[i]*depth);
hipMalloc((void**)&d_col[i], sizeof(float)*outputSize[i]*outputSize[i]*kernelSize*kernelSize*depth);
}
hipMemcpy(d_A,A,sizeof(float)*inputSize*inputSize*depth,hipMemcpyHostToDevice);
for(int i = 0; i < 3; i++) {
hipMemcpy(d_kernel[i],kernel[i],sizeof(float)*kernelSize*kernelSize*depth,hipMemcpyHostToDevice);
}
// ============================= =========================
// ===== im2col
for( int i = 0; i < 3; i++ ) {
int blockx = (int) (log2(outputSize[i])+ 1);
int blocky = blockx;
dim3 Block(blockx,blocky);
dim3 Grid((inputSize+Block.x-1) / Block.x,(inputSize+Block.y-1) / Block.y );
hipLaunchKernelGGL(( im2col) , dim3(Grid), dim3(Block) , 0, 0, d_A,inputSize,depth,kernelSize,stride[i],pad[i],d_col[i],outputSize[i]);
}
hipDeviceSynchronize();
// ==== gemm
struct timeval start, end;
gettimeofday( &start, NULL );
for( int i = 0; i < 3; i++ ) {
int blockx = (int) (log2(outputSize[i]*outputSize[i])+ 1);
dim3 Block(blockx);
dim3 Grid((outputSize[i]*outputSize[i]+Block.x-1) / Block.x);
hipLaunchKernelGGL(( gemm) , dim3(Grid), dim3(Block) , 0, 0, d_col[i],d_kernel[i],d_B[i],outputSize[i]*outputSize[i],kernelSize*kernelSize*depth);
}
//
for( int i = 0; i < 3; i++ ) {
hipMemcpy(B[i],d_B[i],sizeof(float)*outputSize[i]*outputSize[i]*depth,hipMemcpyDeviceToHost);
}
gettimeofday( &end, NULL );
int timeuse = 1000000 * ( end.tv_sec - start.tv_sec ) + end.tv_usec - start.tv_usec;
printf("total time is %f ms\n", timeuse/(float)1000);
//
FILE *b[3];
b[0] = fopen("matrixB21.m", "wb");
b[1] = fopen("matrixB22.m", "wb");
b[2] = fopen("matrixB23.m", "wb");
for(int k = 0; k < 3; k++ ) {
fprintf(b[k], "B = [ \n");
for (int i = 0; i < outputSize[k]; i++)
{
for (int j = 0; j < outputSize[k]; j++)
fprintf(b[k], "%f ", B[k][i * outputSize[k] + j]);
fprintf(b[k], "\n");
}
fprintf(b[k], "];");
}
// ============================= =========================
free(A);
hipFree(d_A);
for(int i = 0; i < 3; i++) {
free(kernel[i]);
free(B[i]);
hipFree(d_B[i]);
hipFree(d_kernel[i]);
hipFree(d_col[i]);
fclose(b[i]);
}
return 0;
} | ba10c29d3b2882953904599c51f70999fc055cfc.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <sys/time.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void im2col(float *A, int inputSize, int depth, int kernelSize, int stride, int pad, float *col, int outputSize) {
// 一个线程完成一次卷积操作中的转换 也就是说 一个线程转换生成col中的一个行向量
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
if( !(i < outputSize) || !(j < outputSize) ) return;
int Ai = i * stride;
int Aj = j * stride;
for( int d = 0; d < depth; d++ ) {
for(int k = 0; k < kernelSize; k++ ) {
for( int l = 0; l < kernelSize; l++) {
if( Ai + k - pad < 0 || !(Ai + k - pad < inputSize) || Aj + l - pad < 0 || !( Aj + l - pad < inputSize)) {
//col[ d*outputSize*outputSize*kernelSize*kernelSize + (i*outputSize + j)*kernelSize*kernelSize + k*kernelSize + l] = 0;
col[ (i*outputSize + j)*(kernelSize*kernelSize*depth)+ d*kernelSize*kernelSize + k*kernelSize + l] = 0;
}
else col[ (i*outputSize + j)*(kernelSize*kernelSize*depth)+ d*kernelSize*kernelSize + k*kernelSize + l] \
= A[d*inputSize*inputSize + (Ai + k - pad)*inputSize + Aj + l - pad ];
}
}
}
}
// 计算 C = A*v A size m*n v size n*1
__global__
void gemm(float *A, float *B, float *C, int m, int n) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if( !( i < m ) ) return;
float sum = 0;
for( int l = 0; l < n; l++ ) {
sum += A[i*n + l] * B[l];
}
C[i] = sum;
}
int main(int argc, char * argv[] ) {
// input: inputSize*inputSize*depth
// kernel: kernelSize*kernelSize*depth
// output: outputSize*outputSize
int inputSize = 7;
int depth = 3;
int kernelSize = 3;
int kernelNum = 3;
int stride[3] = {1 , 2 , 3 };
int pad[3] = {0,0,0};
int outputSize[3];
// 计算不同stride下需要的padding数量pad和output的规模outputSize
for(int i = 0; i < kernelNum; i++) {
if((inputSize - kernelSize)%stride[i] != 0) {
pad[i] = (stride[i] - ((inputSize - kernelSize)%stride[i])) / 2;
}
outputSize[i] = (inputSize - kernelSize + 2*pad[i] ) / stride[i] + 1;
}
// ============================= 资源申请的初始化 =========================
// ==== CPU资源申请和初始化
// input:A kernel:kernel output:B
float *A, *kernel[3], *B[3];
A = (float *)malloc(sizeof(float)*inputSize*inputSize*depth);
for(int i = 0; i < 3; i++) {
kernel[i] = (float *)malloc(sizeof(float)*kernelSize*kernelSize*depth);
B[i] = (float *)malloc(sizeof(float)*outputSize[i]*outputSize[i]*depth);
}
// 初始化input A
for(int d = 0; d < depth; d++) {
for(int i=0; i<inputSize*inputSize; i++) {
A[d*inputSize*inputSize + i] = i;
}
}
// 初始化kernel
for(int i = 0; i < 3; i++){
for(int j = 0; j < kernelSize*kernelSize*depth; j++) {
kernel[i][j] = 1;
}
}
// ==== GPU资源申请和初始化
float *d_A, *d_kernel[3], *d_B[3], *d_col[3];
cudaMalloc((void**)&d_A,sizeof(float)*inputSize*inputSize*depth);
for(int i = 0; i < 3; i++) {
cudaMalloc((void**)&d_kernel[i], sizeof(float)*kernelSize*kernelSize*depth);
cudaMalloc((void**)&d_B[i],sizeof(float)*outputSize[i]*outputSize[i]*depth);
cudaMalloc((void**)&d_col[i], sizeof(float)*outputSize[i]*outputSize[i]*kernelSize*kernelSize*depth);
}
cudaMemcpy(d_A,A,sizeof(float)*inputSize*inputSize*depth,cudaMemcpyHostToDevice);
for(int i = 0; i < 3; i++) {
cudaMemcpy(d_kernel[i],kernel[i],sizeof(float)*kernelSize*kernelSize*depth,cudaMemcpyHostToDevice);
}
// ============================= 调用核函数 =========================
// ===== 调用im2col
for( int i = 0; i < 3; i++ ) {
int blockx = (int) (log2(outputSize[i])+ 1);
int blocky = blockx;
dim3 Block(blockx,blocky);
dim3 Grid((inputSize+Block.x-1) / Block.x,(inputSize+Block.y-1) / Block.y );
im2col <<< Grid, Block >>> (d_A,inputSize,depth,kernelSize,stride[i],pad[i],d_col[i],outputSize[i]);
}
cudaDeviceSynchronize();
// ==== 调用gemm
struct timeval start, end;
gettimeofday( &start, NULL );
for( int i = 0; i < 3; i++ ) {
int blockx = (int) (log2(outputSize[i]*outputSize[i])+ 1);
dim3 Block(blockx);
dim3 Grid((outputSize[i]*outputSize[i]+Block.x-1) / Block.x);
gemm <<< Grid, Block >>> (d_col[i],d_kernel[i],d_B[i],outputSize[i]*outputSize[i],kernelSize*kernelSize*depth);
}
// 结果回传
for( int i = 0; i < 3; i++ ) {
cudaMemcpy(B[i],d_B[i],sizeof(float)*outputSize[i]*outputSize[i]*depth,cudaMemcpyDeviceToHost);
}
gettimeofday( &end, NULL );
int timeuse = 1000000 * ( end.tv_sec - start.tv_sec ) + end.tv_usec - start.tv_usec;
printf("total time is %f ms\n", timeuse/(float)1000);
// 输出结果
FILE *b[3];
b[0] = fopen("matrixB21.m", "wb");
b[1] = fopen("matrixB22.m", "wb");
b[2] = fopen("matrixB23.m", "wb");
for(int k = 0; k < 3; k++ ) {
fprintf(b[k], "B = [ \n");
for (int i = 0; i < outputSize[k]; i++)
{
for (int j = 0; j < outputSize[k]; j++)
fprintf(b[k], "%f ", B[k][i * outputSize[k] + j]);
fprintf(b[k], "\n");
}
fprintf(b[k], "];");
}
// ============================= 资源释放 =========================
free(A);
cudaFree(d_A);
for(int i = 0; i < 3; i++) {
free(kernel[i]);
free(B[i]);
cudaFree(d_B[i]);
cudaFree(d_kernel[i]);
cudaFree(d_col[i]);
fclose(b[i]);
}
return 0;
} |
326f3a0bf6f52845db580cbd6dd2205406ee55c9.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <helper_cuda.h>
#include <iostream>
#include <set>
#include "../../cudaconv3/include/cudaconv2.cuh"
#include "../../util/include/matrix.h"
#include "../include/layer_kernels.cuh"
#include "../include/layer.cuh"
#include "../include/data.cuh"
#include "../include/util.cuh"
#include "../include/weights.cuh"
using namespace std;
/*
* =======================
* Layer
* =======================
*/
Layer::Layer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans) :
_convNetThread(convNetThread), _replicaID(replicaID), _trans(trans) {
_name = pyDictGetString(paramsDict, "name");
_type = pyDictGetString(paramsDict, "type");
_foundGradConsumers = false;
_gradConsumer = pyDictGetInt(paramsDict, "gradConsumer");
_actsTarget = pyDictGetInt(paramsDict, "actsTarget");
_actsGradTarget = pyDictGetInt(paramsDict, "actsGradTarget");
_numOutputs = pyDictGetInt(paramsDict, "outputs");
_numReplicas = pyDictGetInt(paramsDict, "numReplicas");
_numReplicasPrev = 1;
_rcvdBInputMsgs = 0;
_actBroadcaster = NULL;
_gradReducer = NULL;
_initialized = false;
}
Layer::~Layer() {
if (_actBroadcaster != NULL) {
_actBroadcaster->stop();
delete _actBroadcaster;
}
if (_gradReducer != NULL) {
_gradReducer->stop();
delete _gradReducer;
}
// For now, gradReducer doesn't have a destructor
// delete _gradReducer;
for (std::map<int, MemoryView*>::iterator it = _memSrcActs.begin(); it != _memSrcActs.end(); ++it) {
if (it->second->getMemorySource().truncate(_name)) {
delete &it->second->getMemorySource();
}
}
for (std::map<int, MemoryView*>::iterator it = _memSrcActsGrad.begin(); it != _memSrcActsGrad.end(); ++it) {
if (it->second->getMemorySource().truncate(_name)) {
delete &it->second->getMemorySource();
}
}
}
hipStream_t Layer::getStream() {
assert(getDeviceID() >= 0);
return NVMatrix::getDefaultStream(getDeviceID());
}
void Layer::syncStream() {
NVMatrix::syncStream(getStream());
}
void Layer::fpropNext(PASS_TYPE passType, int passIdx) {
if (_next.size() > 0) {
if (getFwdActiveReplicaIdx(passIdx) == 0/*getReplicaIdx()*/) { // 0 turns on pipelining
if (_nextDeviceIDs.size() > 1 || (_nextDeviceIDs.size() == 1 && _nextDeviceIDs[0] != getDeviceID())) {
syncStream(); // Make sure I've finished computing before broadcasting
}
getActBroadcaster().getMessageQueue().enqueue(new BroadcastMessage(getAllActs(), getDeviceID(), getReplicaIdx(), _broadcastFinishQueue));
}
if (getFwdActiveReplicaIdx(passIdx) == getReplicaIdx()) {
_broadcastFinishQueue.dequeue();
assert(_broadcastFinishQueue.getNumElements() == 0);
}
}
for (int i = 0; i < _next.size(); i++) {
_next[i]->getConvNetThread().getMessageQueue().enqueue(new FpropMessage(*_next[i], passType, passIdx));
}
}
bool Layer::fprop(PASS_TYPE passType, int passIdx) {
_rcvdFInputMsgs++;
// I require messages from *all* input replicas because it makes the propagation easier to think about.
// Without this requirement, when all fprop terminal msgs arrive to ConvNet, the forward propagation
// might not actually be finished yet.
if (_rcvdFInputMsgs == getNumExpectedFwdMsgs()) {
// printf("Layer %s[%d] fprop\n", _name.c_str(), getReplicaID());
int ridx = getFwdActiveInputReplicaIdx(passIdx);
assert(getDeviceID() == NVMatrix::getDeviceID());
map<int, NVMatrix*> v;
if (ridx >= 0) {
for (int i = 0; i < getNumLayersPrev(); i++) {
v[i] = &_prev[ridx][i]->getActs(getDeviceID());
}
}
fprop(v, passType, passIdx);
return true;
}
return false;
}
void Layer::fprop(map<int,NVMatrix*>& v, PASS_TYPE passType, int passIdx) {
if (getFwdActiveInputReplicaIdx(passIdx) >= 0) {
assert(v.size() == getNumLayersPrev());
_inputs.clear();
_inputs.insert(v.begin(), v.end());
int numCases = _inputs[0]->getLeadingDim();
for (map<int,MemoryView*>::iterator it = _memSrcActs.begin(); it != _memSrcActs.end(); ++it) {
it->second->getMemory(numCases);
}
if (numCases > 0) {
//printf("layer %s fprop, numcases: %d\n", _name.c_str(), numCases);
_rcvdFInputMsgs = getNumExpectedFwdMsgs();
for (map<int,NVMatrix*>::iterator it = v.begin(); it != v.end(); ++it) {
it->second->transpose(_trans);
}
getActs().transpose(_trans);
fpropCommon(passType);
// First do fprop on the input whose acts matrix I'm sharing, if any
if (_actsTarget >= 0) {
fpropActs(_actsTarget, 0, passType, passIdx);
}
// Then add the rest of the inputs to that
for (int i = 0; i < getNumLayersPrev(); i++) {
if (i != _actsTarget) {
fpropActs(i, _actsTarget >= 0 || i > 0, passType, passIdx);
}
}
}
}
fpropNext(passType, passIdx);
}
void Layer::truncBwdActs() {
// Only truncate actsGrad if I own it
if (_actsGradTarget < 0) {
for (map<int,MemoryView*>::iterator it = _memSrcActsGrad.begin(); it != _memSrcActsGrad.end(); ++it) {
it->second->getMemorySource().truncate(getName());
}
}
if (_actsTarget < 0) {
for (map<int,MemoryView*>::iterator it = _memSrcActs.begin(); it != _memSrcActs.end(); ++it) {
it->second->getMemorySource().truncate(getName());
}
}
}
int Layer::getNumGradProducersNext() {
return _numGradProducersNext;
}
int Layer::getNumExpectedBwdMsgs() {
return _numGradProducersNext * getNumSiblingReplicas();
}
int Layer::getNumExpectedFwdMsgs() {
return getNumLayersPrev() * getNumInputReplicas();
}
void Layer::bprop(PASS_TYPE passType, int passIdx) {
if (getBwdActiveInputReplicaIdx(passIdx) >= 0 && _rcvdBInputMsgs == getNumExpectedBwdMsgs()) {
// printf("Layer %s[%d] bprop\n", _name.c_str(), getReplicaID());
if (_gradReducer != NULL) {
_gradReducer->waitForFinish();
}
// This does sync, but only if it has grad consumers below! so we must sync again before sending bprop terminal messages
bprop(getActsGrad(), passType, passIdx);
if (_bwdTerminal[passIdx]) {
syncStream();
getConvNet().getMessageQueue().enqueue(new Message(BPROP_TERMINAL));
}
}
}
void Layer::bpropActsCall(NVMatrix& v, PASS_TYPE passType, int replicaIdx, int inputIdx) {
Layer& prev = *_prev[replicaIdx][inputIdx];
if (prev.isGradConsumer() && isGradProducer(prev.getName())) {
if (v.getLeadingDim() > 0) { // Only do computation if #cases > 0
bpropActs(v, replicaIdx, inputIdx, prev.getNumComputedActsGrads(getDeviceID()) > 0, passType);
}
prev.getNumComputedActsGrads(getDeviceID())++;
// Synchronize if the previous layer is going to actually do a reduction.
// If the previous layer is on the same GPU as us and has no next layers
// on other GPUs then it won't need to do a reduction.
if (prev.getNextDeviceIDs().size() > 1 || (prev.getNextDeviceIDs().size() == 1 && getDeviceID() != prev.getDeviceID())) {
syncStream();
}
prev.getGradReducer().enqueueReduction(getDeviceID());
}
}
void Layer::bprop(NVMatrix& v, PASS_TYPE passType, int passIdx) {
v.transpose(_trans);
assert(getDeviceID() == NVMatrix::getDeviceID());
int ridx = getBwdActiveInputReplicaIdx(passIdx);
LayerV& prev = _prev[ridx];
map<int, set<Layer*> > prevByDevice = _prevByDevice[ridx];
for (int i = 0; i < prev.size(); i++) {
_inputs[i]->transpose(_trans);
prev[i]->getActsGrad().transpose(_trans);
}
getActs().transpose(_trans);
// NOTE: this should be here (before the bpropActs) because if you have a layer
// that has a weight matrix AND actsGradTarget >= 0, then the stuff below will overwrite
// v which is used in bpropCommon. So bpropCommon must come first.
bpropCommon(v, ridx, passType);
if (isGradProducer()) {
// First propagate activity gradient to all layers whose activity
// gradient matrix I'm definitely not sharing.
for (map<int, set<Layer*> >::const_iterator it = prevByDevice.begin(); it != prevByDevice.end(); ++it) {
const set<Layer*>& deviceLayers = it->second;
for (set<Layer*>::const_iterator it2 = deviceLayers.begin(); it2 != deviceLayers.end(); ++it2) {
if (_actsGradTarget != (*it2)->getInputIdx(_name)) {
bpropActsCall(v, passType, ridx, (*it2)->getInputIdx(_name));
}
}
}
// Then propagate activity gradient to the layer whose activity gradient
// matrix I'm sharing, if any.
if (_actsGradTarget >= 0) {
bpropActsCall(v, passType, ridx, _actsGradTarget);
}
}
// Synchronization is necessary because the kernel calls that compute my backward acts
// execute asynchronously. Therefore I don't want to tell other threads that I've
// computed bprop activities for them when in fact I've only called a function which
// will eventually compute them.
if (_prevDeviceIDs.size() > 1 || (_prevDeviceIDs.size() == 1 && _prevDeviceIDs[0] != getDeviceID())) {
syncStream();
}
if (getConvNet().isConserveMemory()) {
truncBwdActs();
}
if (isGradProducer()) {
/*for (int i = 0; i < prev.size(); i++) {
if (prev[i]->isGradConsumer() && isGradProducer(prev[i]->getName())) {
prev[i]->getGradReducer().enqueueReduction(getDeviceID());
}
}*/
// Send backward messages to *all* replicas.
// Note that the messages will be dismissed unless the passIdx indicates
// that the previous layer should do some work.
for (int r = 0; r < getNumInputReplicas(); r++) {
for (int i = 0; i < _prev[r].size(); i++) {
if (_prev[r][i]->isGradConsumer() && isGradProducer(_prev[r][i]->getName())) {
_prev[r][i]->getConvNetThread().getMessageQueue().enqueue(new BpropMessage(*_prev[r][i], passType, passIdx));
}
}
}
}
}
IActGradReducer& Layer::getGradReducer() {
return *_gradReducer;
}
// This is called between minibatches
void Layer::reset() {
_rcvdFInputMsgs = 0;
_rcvdBInputMsgs = 0;
for (map<int,int>::iterator it = _numComputedActsGrads.begin(); it != _numComputedActsGrads.end(); ++it) {
it->second = 0;
}
}
// This is called between microbatches
void Layer::resetPassIdx() {
_rcvdFInputMsgs = 0;
if (_rcvdBInputMsgs >= getNumExpectedBwdMsgs()) {
reset();
}
}
/*
* Returns number of cases in given matrix.
*/
int Layer::getNumCases(NVMatrix& v) {
return v.getLeadingDim();
}
int Layer::incRcvdBInputMsgs() {
return ++_rcvdBInputMsgs;
}
std::string& Layer::getName() {
return _name;
}
std::string& Layer::getType() {
return _type;
}
int& Layer::getNumComputedActsGrads(int deviceID) {
return _numComputedActsGrads[deviceID];
}
void Layer::addNext(Layer& l) {
_next.push_back(&l);
_numReplicasNext = l.getNumReplicas();
if (count(_nextDeviceIDs.begin(), _nextDeviceIDs.end(), l.getDeviceID()) == 0) {
int pos = rand() % (_nextDeviceIDs.size() + 1);
_nextDeviceIDs.insert(_nextDeviceIDs.begin() + pos, l.getDeviceID());
}
}
void Layer::addPrev(Layer& l, int replicaIdx) {
_prev[replicaIdx].push_back(&l);
_numReplicasPrev = l.getNumReplicas();
l.setInputIdx(getName(), _prev[replicaIdx].size() - 1);
if (l.getDeviceID() >= 0 && count(_prevDeviceIDs.begin(), _prevDeviceIDs.end(), l.getDeviceID()) == 0) {
int pos = rand() % (_prevDeviceIDs.size() + 1);
_prevDeviceIDs.insert(_prevDeviceIDs.begin() + pos, l.getDeviceID());
}
}
void Layer::addReplica(Layer& l) {
assert(_replicas.count(l.getReplicaID()) == 0);
_replicas[l.getReplicaID()] = &l;
}
bool Layer::hasGradProducerNext(std::string& layerName) {
bool b = _next.size() == 0;
for (int i = 0; i < _next.size(); i++) {
b |= _next[i]->hasGradProducerNext(_name);
}
return b && isGradProducer(layerName);
}
bool Layer::postInit() {
// We choose not to populate _outputs[getDeviceID()] here because we do it instead in fprop().
// In fprop(), we can populate it from the _inputs vector, which is a bit more general than populating
// it from _prev->getActs()
// _outputs = _actsTarget < 0 ? new NVMatrix() : &_prev[_actsTarget]->getActs();
if (!_initialized) {
_initialized = true;
map<int,int> numGradProducersNext;
_numGradProducersNext = 0;
for (int r = 0; r < getNumInputReplicas(); ++r) {
for (vector<Layer*>::const_iterator it = _prev[r].begin(); it != _prev[r].end(); ++it) {
(*it)->postInit();
}
}
_memSrcActs[getDeviceID()] = _actsTarget < 0 ? &MemorySource::make(_numOutputs, getDeviceID(), getName())
: &_prev[0][_actsTarget]->getMemorySourceActs(getDeviceID()).clone(_name);
// _actsGradTarget will only be >= 0 when the number of replicas is the same in both layers, so this justifies the use of _prev[0]
_memSrcActsGrad[getDeviceID()] = _actsGradTarget < 0 ? &MemorySource::make(_numOutputs, getDeviceID(), getName())
: &_prev[0][_actsGradTarget]->getMemorySourceActsGrad(getDeviceID()).clone(_name);
for (int i = 0; i < _next.size(); ++i) {
int d = _next[i]->getDeviceID();
_numComputedActsGrads[d] = 0;
if (_next[i]->hasGradProducerNext(_name)) {
if (numGradProducersNext.count(d) == 0) {
numGradProducersNext[d] = 0;
}
numGradProducersNext[d]++;
_numGradProducersNext++;
if (_memSrcActsGrad.count(d) == 0) {
_memSrcActsGrad[d] = &MemorySource::make(_numOutputs, d, getName());
}
}
if (_memSrcActs.count(d) == 0) {
_memSrcActs[d] = &MemorySource::make(_numOutputs, d, getName());
}
}
if (_next.size() == 0) {
_numReplicasNext = getNumReplicas();
}
/*
* Initialize forward broadcaster. First sibling owns it.
*/
if (getReplicaIdx() == 0 && _convNetThread != NULL) {
_actBroadcaster = new ActBroadcaster(getNumSiblingReplicas(), getDeviceCPUs(_convNetThread->getDeviceID()));
_actBroadcaster->start();
}
/*
* Initialize backward reducer.
*/
if (isGradConsumer() && _numGradProducersNext > 0) {
_gradReducer = &IActGradReducer::makeGradReducer(*this, numGradProducersNext);
_gradReducer->start();
}
/*
* Initialize specially sorted previous array
*/
for (int r = 0; r < _prev.size(); ++r) {
for (int i = 0; i < _prev[r].size(); ++i) {
// Previous devices in reverse order of processing by (sequential) GradReducer
_prevByDevice[r][getDeviceID() - _prev[r][i]->getDeviceID()
+ 16 * (_prev[r][i]->getDeviceID() > getDeviceID())].insert(_prev[r][i]);
}
}
return true;
}
return false;
}
ActBroadcaster& Layer::getActBroadcaster() {
return getReplicaIdx() == 0 ? *_actBroadcaster : _replicas[getReplicaID() - getReplicaIdx()]->getActBroadcaster();
}
// Does this layer, or some layer below it, need the gradient
// for parameter updates?
// Only weight layers should be grad consumers themselves.
bool Layer::isGradConsumer() {
if (!_foundGradConsumers && _prev.size() > 0) {
for (int i = 0; i < _prev[0].size(); i++) {
_gradConsumer |= _prev[0][i]->isGradConsumer();
}
_foundGradConsumers = true;
}
return _gradConsumer;
}
// Does this layer produce gradient for layers below?
bool Layer::isGradProducer() {
return true;
}
bool Layer::isGradProducer(std::string& layerName) {
return isGradProducer();
}
map<int,vector<Layer*> >& Layer::getPrev() {
return _prev;
}
vector<Layer*>& Layer::getNext() {
return _next;
}
NVMatrix& Layer::getActs() {
return getActs(getDeviceID());
}
NVMatrix& Layer::getActs(int deviceID) {
assert(_memSrcActs.count(deviceID) > 0);
return _memSrcActs[deviceID]->getMemory();
}
NVMatrix& Layer::getActs(int deviceID, int numCases) {
assert(_memSrcActs.count(deviceID) > 0);
return _memSrcActs[deviceID]->getMemory(numCases);
}
NVMatrix& Layer::getActsGrad(int deviceID) {
assert(_memSrcActsGrad.count(deviceID) > 0);
return _memSrcActsGrad[deviceID]->getMemory(getActs(deviceID).getLeadingDim());
}
NVMatrix& Layer::getActsGrad() {
return getActsGrad(NVMatrix::getDeviceID());
}
map<int, NVMatrix*> Layer::getAllActs() {
map<int, NVMatrix*> m;
for (map<int, MemoryView*>::const_iterator it = _memSrcActs.begin(); it != _memSrcActs.end(); ++it) {
m[it->first] = &it->second->getMemory();
}
return m;
}
map<int, NVMatrix*> Layer::getAllActsGrads() {
map<int, NVMatrix*> m;
for (map<int, MemoryView*>::const_iterator it = _memSrcActsGrad.begin(); it != _memSrcActsGrad.end(); ++it) {
m[it->first] = &it->second->getMemory();
}
return m;
}
int Layer::getDeviceID() {
return _convNetThread == NULL ? -1 : _convNetThread->getDeviceID();
}
ConvNetThread& Layer::getConvNetThread() {
assert(_convNetThread != NULL);
return *_convNetThread;
}
ConvNet& Layer::getConvNet() {
return getConvNetThread().getConvNet();
}
void Layer::setBwdTerminal(int passIdx) {
_bwdTerminal[passIdx] = true;
}
int Layer::getReplicaID() {
return _replicaID;
}
int Layer::getActivePassPeriod() {
return getNumReplicas() / getConvNet().getNumReplicasMin();
}
int Layer::getFwdActiveInputReplicaIdx(int passIdx) {
const int edge = (passIdx / getActivePassPeriod()) % getNumInputReplicas();
return passIdx % getActivePassPeriod() == 0 ? edge : -1;
}
int Layer::getBwdActiveInputReplicaIdx(int passIdx) {
const int edge = (passIdx / getActivePassPeriod()) % getNumInputReplicas();
return (passIdx + 1) % getActivePassPeriod() == 0 ? edge : -1;
}
int Layer::getFwdActiveReplicaIdx(int passIdx) {
assert(_next.size() > 0);
return _next[0]->getFwdActiveInputReplicaIdx(passIdx);
}
int Layer::getNumReplicas() {
return _replicas.size();
}
int Layer::getNumSiblingReplicas() {
return getNumReplicas() / getNumReplicasNext();
}
int Layer::getNumReplicasPrev() {
return _numReplicasPrev;
}
int Layer::getNumReplicasNext() {
return _numReplicasNext;
}
int Layer::getNumInputReplicas() {
return _numReplicasPrev / getNumReplicas();
}
int Layer::getReplicaIdx() {
return getReplicaID() % getNumSiblingReplicas();
}
int Layer::getNumLayersPrev() {
return _prev.size() > 0 ? _prev[0].size() : 0;
}
int Layer::getTaskId(){
return -1;
}
void Layer::setMemorySourceActs(int deviceID, MemoryView& mem) {
assert(_memSrcActs[deviceID]->isParent());
delete _memSrcActs[deviceID];
_memSrcActs[deviceID] = &mem;
if (_actsTarget >= 0 && deviceID == getDeviceID()) {
assert(getNumInputReplicas() == 1);
_prev[0][_actsTarget]->setMemorySourceActs(deviceID, mem.clone(_prev[0][_actsTarget]->getName()));
}
}
void Layer::setMemorySourceActsGrad(int deviceID, MemoryView& mem) {
assert(_memSrcActsGrad[deviceID]->isParent());
delete _memSrcActsGrad[deviceID];
_memSrcActsGrad[deviceID] = &mem;
if (_actsGradTarget >= 0 && deviceID == getDeviceID()) {
assert(getNumInputReplicas() == 1);
_prev[0][_actsGradTarget]->setMemorySourceActsGrad(deviceID, mem.clone(_prev[0][_actsGradTarget]->getName()));
}
}
MemoryView& Layer::getMemorySourceActs(int deviceID) {
return *_memSrcActs[deviceID];
}
MemoryView& Layer::getMemorySourceActsGrad(int deviceID) {
return *_memSrcActsGrad[deviceID];
}
int Layer::getNumOutputs() {
return _numOutputs;
}
void Layer::setInputIdx(std::string& parentName, int idx) {
_inputIndices[parentName] = idx;
}
int Layer::getInputIdx(std::string& parentName) {
return _inputIndices[parentName];
}
/*
* =======================
* NeuronLayer
* =======================
*/
NeuronLayer::NeuronLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: Layer(convNetThread, paramsDict, replicaID, true) {
PyObject* neuronDict = PyDict_GetItemString(paramsDict, "neuron");
_neuronType = pyDictGetString(neuronDict, "type");
_neuron = &Neuron::makeNeuron(neuronDict);
}
NeuronLayer::~NeuronLayer() {
delete _neuron;
}
void NeuronLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 0);
if (!bpropSpecial(v, replicaIdx, inpIdx, scaleTargets, passType)) {
_neuron->computeInputGrad(v, _prev[replicaIdx][0]->getActsGrad(), scaleTargets > 0);
}
}
bool NeuronLayer::bpropSpecial(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
// Special optimization for cross-entropy objective with logistic units.
// Better to just compute the input gradient in one go to avoid division by small numbers.
bool doCrossEntGrad = _neuronType == "logistic" && _next.size() == 1
&& (_next[0]->getType() == "cost.bce" || _next[0]->getType() == "cost.dce")
&& _next[0]->getDeviceID() == getDeviceID()
&& _next[0]->getNumReplicas() == getNumReplicas();
LayerV& prev = _prev[replicaIdx];
if (doCrossEntGrad) {
NVMatrix& labels = _next[0]->getPrev()[replicaIdx][0]->getActs(getDeviceID());
BinomialCrossEntropyCostLayer& cost = *static_cast<BinomialCrossEntropyCostLayer*>(_next[0]);
float gradCoeff = cost.getCoeff();
labels.transpose(_trans);
if (cost.getPosWeight() == 1) {
if (scaleTargets == 0) {
getActs().add(labels, -gradCoeff, gradCoeff, prev[0]->getActsGrad());
} else {
getActs().applyTernary(AddGradientBinaryOperator<NVMatrixBinaryOps::WeightedAdd>(NVMatrixBinaryOps::WeightedAdd(-gradCoeff, gradCoeff)),
labels, prev[0]->getActsGrad(), prev[0]->getActsGrad());
}
} else {
if (scaleTargets == 0) {
getActs().applyBinary(CrossEntLogisticGradientOperator(gradCoeff, cost.getPosWeight()), labels, prev[0]->getActsGrad());
} else {
getActs().applyTernary(AddGradientBinaryOperator<CrossEntLogisticGradientOperator>(CrossEntLogisticGradientOperator(gradCoeff, cost.getPosWeight())),
labels, prev[0]->getActsGrad(), prev[0]->getActsGrad());
}
}
}
return doCrossEntGrad;
}
void NeuronLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
_neuron->activate(*_inputs[0], getActs());
}
std::string& NeuronLayer::getNeuronType() {
return _neuronType;
}
/*
* =======================
* WeightLayer
* =======================
*
* The useGrad parameter here merely expresses a preference by the subclass. It may
* be overridden by the superclass (WeightLayer) and in that case the subclass must follow its wishes.
* So when computing gradient updates, the subclass must always first check weights.isUseGrad().
*
* Note: biases always useGrad.
*/
WeightLayer::WeightLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans, bool useGrad) :
Layer(convNetThread, paramsDict, replicaID, trans) {
_weightUpdatePassPeriod = pyDictGetInt(paramsDict, "updatePeriod");
MatrixV& hWeights = *pyDictGetMatrixV(paramsDict, "weights");
MatrixV& hWeightsInc = *pyDictGetMatrixV(paramsDict, "weightsInc");
Matrix& hBiases = *pyDictGetMatrix(paramsDict, "biases");
Matrix& hBiasesInc = *pyDictGetMatrix(paramsDict, "biasesInc");
PyObject* pyEpsWList = PyDict_GetItemString(paramsDict, "epsW");
PyObject* pyEpsB = PyDict_GetItemString(paramsDict, "epsB");
floatv& momW = *pyDictGetFloatV(paramsDict, "momW");
float momB = pyDictGetFloat(paramsDict, "momB");
floatv& wc = *pyDictGetFloatV(paramsDict, "wc");
floatv& wball = *pyDictGetFloatV(paramsDict, "wballNormed");
/*
* When there are multiple replicas, the present implementation
* requires that useGrad is true. This is because weights.update()
* performs a simultaneous write to both replicas' weightsInc matrix,
* which means that the read should come from somewhere else (i.e. a
* grads matrix).
*/
useGrad |= _numReplicas > 1;
// Source layers for shared weights
stringv& weightSourceLayers = *pyDictGetStringV(paramsDict, "weightSourceLayers");
// Weight matrix indices (inside the above source layers) for shared weights
intv& weightSourceMatrixIndices = *pyDictGetIntV(paramsDict, "weightSourceMatrixIndices");
_weights = new WeightList();
for (int i = 0; i < weightSourceLayers.size(); i++) {
std::string& srcLayerName = weightSourceLayers[i];
int matrixIdx = weightSourceMatrixIndices[i];
PyObject* pyEpsW = PyList_GetItem(pyEpsWList, i);
ParameterSchedule& lrs = ParameterSchedule::make(pyEpsW); // Learning rate schedule
if (srcLayerName == _name) { // Current layer
_weights->addWeights(*new Weights(_weights->at(matrixIdx), lrs, *this));
} else if (srcLayerName != "") {
WeightLayer& srcLayer = *static_cast<WeightLayer*>(&convNetThread->getLayer(srcLayerName));
Weights* srcWeights = &srcLayer.getWeights(matrixIdx);
_weights->addWeights(*new Weights(*srcWeights, lrs, *this));
} else {
_weights->addWeights(*new Weights(*hWeights[i], *hWeightsInc[i], lrs, *this, wc[i], wball[i], momW[i], useGrad));
}
}
_biases = new Weights(hBiases, hBiasesInc, ParameterSchedule::make(pyEpsB), *this, 0, 0, momB, true);
delete &weightSourceLayers;
delete &weightSourceMatrixIndices;
delete &hWeights;
delete &hWeightsInc;
delete &momW;
delete &wc;
delete &wball;
_wStep = 0.02;
_bStep = 0.005;
}
WeightLayer::~WeightLayer() {
delete _weights;
delete _biases;
}
bool WeightLayer::postInit() {
if (Layer::postInit()) {
_weightUpdatePassPeriod = max(_weightUpdatePassPeriod, getActivePassPeriod());
assert(_weightUpdatePassPeriod % getActivePassPeriod() == 0);
return true;
}
return false;
}
void WeightLayer::fpropCommon(PASS_TYPE passType) {
}
void WeightLayer::bpropCommon(NVMatrix& v, int replicaIdx, PASS_TYPE passType) {
if (_biases->getLearningRateSchedule().getBaseValue() > 0) {
if (v.getNumElements() > 0) {
bpropBiases(v, passType);
} else {
_biases->getGrad().resize(_biases->getW());
_biases->getGrad().scale(getBIncScale());
}
_biases->incNumUpdates();
}
for (int i = 0; i < _weights->getSize(); i++) {
if (_weights->at(i).getLearningRateSchedule().getBaseValue() > 0) {
if (v.getNumElements() > 0) {
bpropWeights(v, replicaIdx, i, passType);
} else {
_weights->at(i).getGrad().resize(_weights->at(i).getW());
// This will cause it to forget momentum when shown 0 training cases
// and _useGrad = false but it's not too important.
_weights->at(i).getGrad().scale(getIncScale(i, passType));
}
// Increment its number of updates
_weights->at(i).incNumUpdates();
}
}
}
bool WeightLayer::updateWeights() {
if (getConvNet().getTotalPassesDone() % _weightUpdatePassPeriod == 0) {
_weights->update(getConvNet().getTrainingProgress());
_biases->update(getConvNet().getTrainingProgress());
// constrainWeights();
return true;
}
return false;
}
bool WeightLayer::constrainWeights() {
if (getConvNet().getTotalPassesDone() % _weightUpdatePassPeriod == 0) {
_constrainWeights();
return true;
}
return false;
}
void WeightLayer::_constrainWeights() {
}
void WeightLayer::copyToCPU() {
_weights->copyToCPU();
_biases->copyToCPU();
}
void WeightLayer::copyToGPU() {
_weights->copyToGPU();
_biases->copyToGPU();
}
void WeightLayer::checkGradient() {
for (int i = 0; i < _weights->getSize(); i++) {
getConvNet().checkGradient(_name + " weights[" + tostr(i) + "]", _wStep, _weights->at(i));
}
getConvNet().checkGradient(_name + " biases", _bStep, *_biases);
}
void WeightLayer::addReplica(Layer& l) {
Layer::addReplica(l);
_weights->addReplica(*static_cast<WeightLayer*>(&l)->_weights);
_biases->addReplica(*static_cast<WeightLayer*>(&l)->_biases);
}
Weights& WeightLayer::getWeights(int idx) {
return _weights->at(idx);
}
float WeightLayer::getGradScale(int inpIdx, PASS_TYPE passType) {
// weight update period must be multiple of activation period
// TODO: simply accumulate # of cases seen between weight updates. simpler and more accurate.
double numCases = _weightUpdatePassPeriod * (getConvNet().getMinibatchSize() / double(getConvNet().getNumPasses()));
if (_weights->at(inpIdx).isUseGrad()) {
return passType == PASS_GC ? 1.0f : 1.0f / numCases;
}
return passType == PASS_GC ? 1.0f : _weights->at(inpIdx).getEps(getConvNet().getTrainingProgress()) / numCases;
}
float WeightLayer::getIncScale(int inpIdx, PASS_TYPE passType) {
if (_weights->at(inpIdx).isUseGrad()) {
return _weights->at(inpIdx).getNumUpdates() > 0;
}
return (passType == PASS_GC ? _weights->at(inpIdx).getNumUpdates() > 0
: (_weights->at(inpIdx).getNumUpdates() == 0 ? _weights->at(inpIdx).getMom() : 1.0f));
}
NVMatrix& WeightLayer::getGradTarget(int inpIdx) {
return _weights->at(inpIdx).getGrad();
}
float WeightLayer::getBGradScale(PASS_TYPE passType) {
int numCases = _weightUpdatePassPeriod * DIVUP(getConvNet().getMinibatchSize(), getConvNet().getNumPasses());
return passType == PASS_GC ? 1.0f : 1.0f / numCases;
}
float WeightLayer::getBIncScale() {
return _biases->getNumUpdates() > 0;
}
NVMatrix& WeightLayer::getWeightMatrix(PASS_TYPE passType, int inpIdx) {
return _weights->at(inpIdx).getW();
}
NVMatrix& WeightLayer::getBiasMatrix(PASS_TYPE passType) {
return _biases->getW();
}
/*
* =======================
* FCLayer
* =======================
*/
FCLayer::FCLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool useGrad)
: WeightLayer(convNetThread, paramsDict, replicaID, true, useGrad) {
_wStep = 0.01;
_bStep = 0.01;
}
void FCLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
getActs().addProduct(*_inputs[inpIdx], getWeightMatrix(passType, inpIdx), scaleTargets, 1);
if (scaleTargets == 0) {
getActs().addVector(getBiasMatrix(passType), 1, getActs());
}
}
void FCLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& weights_T = getWeightMatrix(passType, inpIdx).getTranspose();
_prev[replicaIdx][inpIdx]->getActsGrad().addProduct(v, weights_T, scaleTargets, 1);
delete &weights_T;
}
void FCLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) {
_biases->getGrad().addSum(v, 0, getBIncScale(), getBGradScale(passType));
}
void FCLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) {
NVMatrix& prevActs_T = _inputs[inpIdx]->getTranspose();
float scaleGrad = getGradScale(inpIdx, passType);
float scaleInc = getIncScale(inpIdx, passType);
getGradTarget(inpIdx).addProduct(prevActs_T, v, scaleInc, scaleGrad);
delete &prevActs_T;
}
void FCLayer::_constrainWeights() {
for (int i = 0; i < _weights->getSize(); i++) {
if (_weights->at(i).getWBall() > 0 && _weights->at(i).isOwner() && _weights->at(i).getLearningRateSchedule().getBaseValue() > 0) {
// NVMatrix norm2; // Unfortunate extra weight matrix...
_weights->at(i).getW().sumOfSquares(0, _norm2);
// norm2.apply(MaxWeightConstraintOperator(_weights->at(i).getWBall()));
_norm2.apply(HardWeightConstraintOperator(_weights->at(i).getWBall()));
_weights->at(i).getW().eltwiseMultByVector(_norm2);
}
}
}
/*
* =======================
* SplitFCLayer
* =======================
*/
SplitFCLayer::SplitFCLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool useGrad)
: FCLayer(convNetThread, paramsDict, replicaID, useGrad) {
_numParts = pyDictGetInt(paramsDict, "parts");
}
void SplitFCLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
getActs().resize(_inputs[inpIdx]->getNumRows(), _numOutputs, true);
NVMatrixV& splitInput = _inputs[inpIdx]->splitCols(_numParts);
NVMatrixV& splitWeights = getWeightMatrix(passType, inpIdx).splitRows(_numParts);
NVMatrixV& splitTarget = getActs().splitCols(_numParts);
NVMatrix::batchedMatrixMultiply(splitInput, splitWeights, splitTarget, scaleTargets, 1);
if (scaleTargets == 0) {
getActs().addVector(getBiasMatrix(passType), 1, getActs());
}
deleteElements(splitInput, true);
deleteElements(splitWeights, true);
deleteElements(splitTarget, true);
}
void SplitFCLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& weights_T = getWeightMatrix(passType, inpIdx).getTranspose();
_prev[replicaIdx][inpIdx]->getActsGrad().resize(*_inputs[inpIdx]);
NVMatrixV& splitV = v.splitCols(_numParts);
NVMatrixV& splitWeights_T = weights_T.splitCols(_numParts);
NVMatrixV& splitTarget = _prev[replicaIdx][inpIdx]->getActsGrad().splitCols(_numParts);
NVMatrix::batchedMatrixMultiply(splitV, splitWeights_T, splitTarget, scaleTargets, 1);
delete &weights_T;
deleteElements(splitV, true);
deleteElements(splitWeights_T, true);
deleteElements(splitTarget, true);
}
void SplitFCLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) {
NVMatrix& prevActs_T = _inputs[inpIdx]->getTranspose();
NVMatrixV& splitPrevActs_T = prevActs_T.splitRows(_numParts);
NVMatrixV& splitV = v.splitCols(_numParts);
NVMatrixV& splitGradTarget = getGradTarget(inpIdx).splitRows(_numParts);
NVMatrix::batchedMatrixMultiply(splitPrevActs_T, splitV, splitGradTarget, getIncScale(inpIdx, passType), getGradScale(inpIdx, passType));
delete &prevActs_T;
deleteElements(splitPrevActs_T, true);
deleteElements(splitV, true);
deleteElements(splitGradTarget, true);
}
/*
* =======================
* TwoDLayerInterface
* =======================
*/
TwoDLayerInterface::TwoDLayerInterface(PyObject* paramsDict) {
_channels = pyDictGetInt(paramsDict, "channels");
_imgSize = pyDictGetInt(paramsDict, "imgSize");
_imgPixels = _imgSize * _imgSize;
}
/*
* =======================
* LocalLayer
* =======================
*/
LocalLayer::LocalLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool useGrad)
: WeightLayer(convNetThread, paramsDict, replicaID, false, useGrad) {
_padding = pyDictGetIntV(paramsDict, "padding");
_stride = pyDictGetIntV(paramsDict, "stride");
_filterSize = pyDictGetIntV(paramsDict, "filterSize");
_channels = pyDictGetIntV(paramsDict, "channels");
_imgSize = pyDictGetIntV(paramsDict, "imgSize");
_numFilters = pyDictGetInt(paramsDict, "filters");
_groups = pyDictGetIntV(paramsDict, "groups");
_filterChannels = pyDictGetIntV(paramsDict, "filterChannels");
_filterPixels = pyDictGetIntV(paramsDict, "filterPixels");
_imgPixels = pyDictGetIntV(paramsDict, "imgPixels");
_modulesX = pyDictGetInt(paramsDict, "modulesX");
_modules = pyDictGetInt(paramsDict, "modules");
}
LocalLayer::~LocalLayer() {
delete _padding;
delete _stride;
delete _filterSize;
delete _channels;
delete _imgSize;
delete _groups;
delete _filterChannels;
delete _filterPixels;
delete _imgPixels;
}
/*
* =======================
* ConvLayer
* =======================
*/
ConvLayer::ConvLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: LocalLayer(convNetThread, paramsDict, replicaID, true) {
_sumWidth = pyDictGetInt(paramsDict, "sumWidth");
_sharedBiases = pyDictGetInt(paramsDict, "sharedBiases");
_weightContrastNormMin = pyDictGetFloatV(paramsDict, "wcNormMin");
_weightContrastNormMax = pyDictGetFloatV(paramsDict, "wcNormMax");
}
ConvLayer::~ConvLayer() {
delete _weightContrastNormMin;
delete _weightContrastNormMax;
}
void ConvLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convFilterActs(*_inputs[inpIdx], getWeightMatrix(passType, inpIdx), getActs(), _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
if (scaleTargets == 0) {
if (_sharedBiases) {
getActs().reshape(_numFilters, getActs().getNumElements() / _numFilters);
getActs().addVector(getBiasMatrix(passType));
getActs().reshape(_numFilters * _modules, getActs().getNumElements() / (_numFilters * _modules));
} else {
getActs().addVector(getBiasMatrix(passType));
}
}
}
void ConvLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) {
float scaleBGrad = getBGradScale(passType);
float scaleInc = getBIncScale();
if (_sharedBiases) {
v.reshape(_numFilters, v.getNumElements() / _numFilters);
_biases->getGrad().addSum(v, 1, scaleInc, scaleBGrad);
v.reshape(_numFilters * _modules, v.getNumElements() / (_numFilters * _modules));
} else {
_biases->getGrad().addSum(v, 1, scaleInc, scaleBGrad);
}
}
void ConvLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) {
assert(_weights->at(inpIdx).isUseGrad());
bool doPartialSum = _sumWidth < _modulesX;
NVMatrix& tgt = doPartialSum ? _weightGradTmp : _weights->at(inpIdx).getGrad();
float scaleWGrad = getGradScale(inpIdx, passType);
float scaleTargets = getIncScale(inpIdx, passType) * !doPartialSum;
convWeightActs(*_inputs[inpIdx], v, tgt, _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), _sumWidth, scaleTargets, scaleWGrad);
if (doPartialSum) {
scaleTargets = _weights->at(inpIdx).getNumUpdates() > 0;
int outWidth = DIVUP(_modulesX, _sumWidth);
_weightGradTmp.reshape(outWidth*outWidth, _filterChannels->at(inpIdx) * _filterPixels->at(inpIdx) * _numFilters);
_weights->at(inpIdx).getGrad().addSum(_weightGradTmp, 0, scaleTargets, 1);
_weights->at(inpIdx).getGrad().reshape(_filterChannels->at(inpIdx) * _filterPixels->at(inpIdx), _numFilters);
}
}
void ConvLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convImgActs(v, getWeightMatrix(passType, inpIdx), _prev[replicaIdx][inpIdx]->getActsGrad(), _imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX,
_padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
}
void ConvLayer::truncBwdActs() {
LocalLayer::truncBwdActs();
_weightGradTmp.truncate();
}
void ConvLayer::_constrainWeights() {
for (int i = 0; i < _weights->getSize(); i++) {
if (_weightContrastNormMax->at(i) > 0 && _weights->at(i).isOwner() && _weights->at(i).getLearningRateSchedule().getBaseValue() > 0) {
float fz = _weights->at(i).getW().getNumRows();
NVMatrix tmp;
_weights->at(i).getW().sum(0, tmp);
_weights->at(i).getW().addVector(tmp, -1.0f / fz, _weights->at(i).getGrad());
// Now _weights->at(i).getGrad() contains zero-mean filters
_weights->at(i).getGrad().apply(NVMatrixOps::Square());
_weights->at(i).getGrad().sum(0, tmp);
tmp.apply(WeightContrastNormOperator(_weightContrastNormMin->at(i), _weightContrastNormMax->at(i), 1.0f / fz));
// Now tmp has the stdev
_weights->at(i).getW().eltwiseMultByVector(tmp);
}
// It's pretty silly to do both these things but whatever
if (_weights->at(i).getWBall() > 0 && _weights->at(i).isOwner() && _weights->at(i).getLearningRateSchedule().getBaseValue() > 0) {
// NVMatrix norm2;
_weights->at(i).getW().sumOfSquares(0, _norm2);
// norm.apply(MaxWeightConstraintOperator(_weights->at(i).getWBall()));
_norm2.apply(HardWeightConstraintOperator(_weights->at(i).getWBall()));
_weights->at(i).getW().eltwiseMultByVector(_norm2);
}
}
}
/*
* =======================
* LocalUnsharedLayer
* =======================
*/
LocalUnsharedLayer::LocalUnsharedLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: LocalLayer(convNetThread, paramsDict, replicaID, false) {
}
void LocalUnsharedLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
localFilterActs(*_inputs[inpIdx], getWeightMatrix(passType, inpIdx), getActs(), _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
if (scaleTargets == 0) {
getActs().addVector(getBiasMatrix(passType));
}
}
void LocalUnsharedLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) {
_biases->getGrad().addSum(v, 1, getBIncScale(), getBGradScale(passType));
}
void LocalUnsharedLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) {
float scaleWGrad = getGradScale(inpIdx, passType);
float scaleInc = getIncScale(inpIdx, passType);
localWeightActs(*_inputs[inpIdx], v, getGradTarget(inpIdx), _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleInc, scaleWGrad);
}
void LocalUnsharedLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
localImgActs(v, getWeightMatrix(passType, inpIdx), _prev[replicaIdx][inpIdx]->getActsGrad(),_imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX,
_padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
}
void LocalUnsharedLayer::_constrainWeights() {
for (int i = 0; i < _weights->getSize(); i++) {
if (_weights->at(i).getWBall() > 0 && _weights->at(i).isOwner() && _weights->at(i).getLearningRateSchedule().getBaseValue() > 0) {
normalizeLocalWeights(*_weights->at(i), _modules, _weights->at(i).getWBall());
}
}
}
/*
* =======================
* AggSoftmaxLayer
* =======================
*/
AggSoftmaxLayer::AggSoftmaxLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: Layer(convNetThread, paramsDict, replicaID, true), _doUpperGrad(false) {
_hAgg = pyDictGetMatrix(paramsDict, "agg");
}
void AggSoftmaxLayer::copyToGPU(){
_Agg.copyFromHost(*_hAgg, true);
}
void AggSoftmaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
//BP designed for averaging multiple predictions
//not suitable for selection
NVMatrix& input = *_inputs[0];
NVMatrix AggInput;
//
//AggInput.max(1, _max);
//input.addVector(_max, -1, getActs());
//getActs().apply(NVMatrixOps::Exp());
//getActs().sum(1, _sum);
//getActs().eltwiseDivideByVector(_sum);
input.max(1, _max);
input.addVector(_max, -1, getActs());
getActs().apply(NVMatrixOps::Exp());
getActs().sum(1, _sum);
getActs().eltwiseDivideByVector(_sum);
_Pl = getActs();
NVMatrix destsum;
getActs().rightMult(_Agg, 1, destsum);
destsum.copy(getActs());
}
void AggSoftmaxLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 0);
LayerV& prev = _prev[replicaIdx];
if (_doUpperGrad) {
for (int i = 0; i < _next.size(); ++i) {
if (_next[i]->isGradProducer(getName())) {
NVMatrix& labels = _next[i]->getPrev()[replicaIdx][0]->getActs(getDeviceID()); // Get cost's labels
NVMatrix target;
float gradCoeff = dynamic_cast<CostLayer*>(_next[i])->getCoeff();
//computeLogregSoftmaxGrad(labels, getActs(), prev[0]->getActsGrad(), scaleTargets == 1, gradCoeff);
computeLogregAggSoftmaxGrad(labels, _Pl, getActs(), target, 0, gradCoeff);
if(_next[0]->getType()=="cost.tasklogreg") {
//std::cout << "next[0]->getType == cost.tasklogreg" << std::endl;
NVMatrix& tasks = _next[0]->getPrev()[replicaIdx][1]->getActs(getDeviceID());
//std::cout << "tasks matrix in softmax: " << std::endl;
//tasks.print(0, tasks.getNumRows(), 0, tasks.getNumCols());
int taskId = _next[0]->getTaskId();
//std::cout << "taskId : " << taskId << std::endl;
assert(taskId >= 0);
NVMatrix taskIndict;
tasks.equalToScalar(taskId, taskIndict);
taskIndict.transpose(_trans);
target.eltwiseMultByVector(taskIndict);
}
prev[0]->getActsGrad().add(target, scaleTargets==1, 1);
break;
}
}
}
else {
//computeSoftmaxGrad(getActs(), v, prev[0]->getActsGrad(), scaleTargets, 1);
std::cout << "Why this aggsoftmaxlayers should be here?" << std::endl;
int quit = 1;
assert(quit == 0);
}
}
/*
* =======================
* AggCoarseFineSoftmaxLayer
* =======================
*/
AggCoarseFineSoftmaxLayer::AggCoarseFineSoftmaxLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: Layer(convNetThread, paramsDict, replicaID, true), _doUpperGrad(false) {
_hCtFAgg = pyDictGetMatrix(paramsDict, "agg");
_htype = pyDictGetString(paramsDict, "htype");
_hAvgAgg = pyDictGetMatrix(paramsDict, "avgagg");
}
void AggCoarseFineSoftmaxLayer::copyToGPU(){
_CtFAgg.copyFromHost(*_hCtFAgg, true);
_AvgAgg.copyFromHost(*_hAvgAgg, true);
}
// Does this layer produce gradient for layers below?
bool AggCoarseFineSoftmaxLayer::isGradProducer() {
return false;
}
void AggCoarseFineSoftmaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
// take two outputs from two previous layers, 0 -> coarse predictions, 1-> fine predictions
if(inpIdx==0){
NVMatrix& cPreds = *_inputs[0]; // 128*20
NVMatrix& fPreds = *_inputs[1]; // 128*100
//std::cout << "getActs size= " << getActs().getNumRows() << "," << getActs().getNumCols() << std::endl;
//std::cout << "cPreds size= " << cPreds.getNumRows() << "," << cPreds.getNumCols() << std::endl;
//std::cout << "fPreds size= " << fPreds.getNumRows() << "," << fPreds.getNumCols() << std::endl;
if(_htype=="hard"){ //input are predictions
NVMatrix& cmax = cPreds.max(1);
NVMatrix cPredsBin;
cPreds.biggerEqualToVector(cmax, cPredsBin); //binarilize the maximum entry in prediction matrix
NVMatrix fPredsMask;
cPredsBin.rightMult(_CtFAgg, 1, fPredsMask); //binary mask for fine labels probabiltiy computation
fPredsMask.eltwiseMult(fPreds);
NVMatrix& max = fPredsMask.max(1);
fPredsMask.addVector(max, -1, getActs());
getActs().apply(NVMatrixOps::Exp());
NVMatrix& sum = getActs().sum(1);
getActs().eltwiseDivideByVector(sum);
delete &cmax;
delete &max;
delete ∑
} else if (_htype=="soft"){// input are also predictions
NVMatrix& fmax = fPreds.max(1);
fPreds.addVector(fmax, -1, getActs());
getActs().apply(NVMatrixOps::Exp());
NVMatrix catsum, flatsum;
//std::cout << "getActs size= " << getActs().getNumRows() << "," << getActs().getNumCols() << std::endl;
//std::cout << "CtFAgg Size= " << _CtFAgg.getNumRows() << "," << _CtFAgg.getNumCols() << std::endl;
//std::cout << "CtFAgg transpose Size= " << _CtFAgg.getTranspose().getNumRows() << "," << _CtFAgg.getTranspose().getNumCols() << std::endl;
getActs().rightMult(_CtFAgg.getTranspose(), 1, catsum); //category sum 128*20
catsum.rightMult(_CtFAgg, 1, flatsum); // 128*100
getActs().eltwiseDivide(flatsum);//getActs_{ij} = Pr(j|i, c), \sum_{j\in c}\Pr(j|i, c)=1
NVMatrix& cmax = cPreds.max(1);
cPreds.addVector(cmax, -1, catsum);
catsum.apply(NVMatrixOps::Exp());
NVMatrix& csum = catsum.sum(1);
catsum.eltwiseDivideByVector(csum); //catsum_{ic}=Pr(c|i)
catsum.rightMult(_CtFAgg, 1, flatsum); // 128*100
getActs().eltwiseMult(flatsum); // getActs_ij = \Pr(j|i, c)\Pr(c|i) , j\in c
//std::cout << "getActs size= " << getActs().getNumRows() << "," << getActs().getNumCols() << std::endl;
//std::cout << "avgAgg Size= " << _AvgAgg.getNumRows() << "," << getActs().getNumCols() << std::endl;
NVMatrix destsum;
getActs().rightMult(_AvgAgg.getTranspose(), 1, destsum); //getActs grouped sum together
//std::cout << "destsum Size= " << destsum.getNumRows() << "," << destsum.getNumCols() << std::endl;
//std::cout << "getActs before size= " << getActs().getNumRows() << "," << getActs().getNumCols() << std::endl;
destsum.copy(getActs());
//std::cout << "getActs after size= " << getActs().getNumRows() << "," << getActs().getNumCols() << std::endl;
//NVMatrix& sum = getActs().sum(1);
//float max = sum.max();
//float min = sum.min();
//assert(abs(max-1)<0.001);
//assert(abs(min-1)<0.001);
//delete ∑
delete &csum;
delete &cmax;
delete &fmax;
}
//hacked by Saining
//hacked by TB
/*Matrix inputmat;
AggInput.copyToHost(inputmat, true);
std::cout<<"input to softmax "<<_name<<std::endl;
inputmat.print(); */
/*std::cout<<"output of softmax "<<_name<<std::endl;
Matrix outputmat;
getActs().copyToHost(outputmat, true);
outputmat.print();*/
//std::cout<<"layer="<<_name<<", fprob done: from input size ("<<(*_inputs[0]).getNumRows()<<","<<(*_inputs[0]).getNumCols()
// <<") producing output size ("<<getActs().getNumRows()<<","<<getActs().getNumCols()<<")"<<std::endl;
}
}
/*
* =======================
* SoftmaxLayer
* =======================
*/
SoftmaxLayer::SoftmaxLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: Layer(convNetThread, paramsDict, replicaID, true), _doUpperGrad(false) {
}
void SoftmaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
NVMatrix& input = *_inputs[0];
input.max(1, _max);
input.addVector(_max, -1, getActs());
getActs().apply(NVMatrixOps::Exp());
getActs().sum(1, _sum);
getActs().eltwiseDivideByVector(_sum);
}
void SoftmaxLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 0);
LayerV& prev = _prev[replicaIdx];
if (_doUpperGrad) {
// Todo: rethink replica IDs or idxes... this here doesn't make a huge amount of sense
for (int i = 0; i < _next.size(); ++i) {
if (_next[i]->isGradProducer(getName())) {
NVMatrix& labels = _next[i]->getPrev()[replicaIdx][0]->getActs(getDeviceID()); // Get cost's labels
NVMatrix target;
float gradCoeff = dynamic_cast<CostLayer*>(_next[i])->getCoeff();
//computeLogregSoftmaxGrad(labels, getActs(), prev[0]->getActsGrad(), scaleTargets == 1, gradCoeff);
computeLogregSoftmaxGrad(labels, getActs(), target, 0, gradCoeff);
if(_next[0]->getType()=="cost.tasklogreg") {
//std::cout << "next[0]->getType == cost.tasklogreg" << std::endl;
NVMatrix& tasks = _next[0]->getPrev()[replicaIdx][1]->getActs(getDeviceID());
//std::cout << "tasks matrix in softmax: " << std::endl;
//tasks.print(0, tasks.getNumRows(), 0, tasks.getNumCols());
int taskId = _next[0]->getTaskId();
//std::cout << "taskId : " << taskId << std::endl;
assert(taskId >= 0);
NVMatrix taskIndict;
tasks.equalToScalar(taskId, taskIndict);
taskIndict.transpose(_trans);
target.eltwiseMultByVector(taskIndict);
}
prev[0]->getActsGrad().add(target, scaleTargets==1, 1);
break;
}
}
} else {
computeSoftmaxGrad(getActs(), v, prev[0]->getActsGrad(), scaleTargets, 1);
}
}
void SoftmaxLayer::setDoUpperGrad(bool b) {
_doUpperGrad = b;
}
/*
* =======================
* ConcatenationLayer
* =======================
*/
ConcatenationLayer::ConcatenationLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: Layer(convNetThread, paramsDict, replicaID, false) {
_copyOffsets = pyDictGetIntV(paramsDict, "copyOffsets");
_copyOffsets->push_back(_numOutputs);
}
ConcatenationLayer::~ConcatenationLayer() {
delete _copyOffsets;
}
void ConcatenationLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
getActs().resize(_numOutputs, _inputs[inpIdx]->getNumCols());
_inputs[inpIdx]->copy(getActs(), 0, -1, 0, -1, _copyOffsets->at(inpIdx), 0);
}
void ConcatenationLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& copySrc = v.sliceRows(_copyOffsets->at(inpIdx), _copyOffsets->at(inpIdx + 1)); // view
_prev[replicaIdx][inpIdx]->getActsGrad().add(copySrc, scaleTargets, 1);
delete ©Src;
}
/*
* =======================
* PassThroughLayer
* =======================
*/
PassThroughLayer::PassThroughLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: Layer(convNetThread, paramsDict, replicaID, false) {
}
void PassThroughLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
// No-op
}
void PassThroughLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
// No-op
}
bool PassThroughLayer::postInit() {
if (Layer::postInit()) {
assert(getNumInputReplicas() == 1);
for (int i = 0, offset = 0; i < _prev[0].size(); offset += _prev[0][i]->getNumOutputs(), i++) {
MemoryView& vActs = _memSrcActs[getDeviceID()]->getMemorySource().addUser(_prev[0][i]->getName(), pair<int,int>(offset, offset + _prev[0][i]->getNumOutputs()));
MemoryView& vActsGrad = _memSrcActsGrad[getDeviceID()]->getMemorySource().addUser(_prev[0][i]->getName(), pair<int,int>(offset, offset + _prev[0][i]->getNumOutputs()));
_prev[0][i]->setMemorySourceActs(getDeviceID(), vActs);
_prev[0][i]->setMemorySourceActsGrad(getDeviceID(), vActsGrad);
}
return true;
}
return false;
}
/*
* =======================
* EltwiseSumLayer
* =======================
*/
EltwiseSumLayer::EltwiseSumLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) {
_coeffs = pyDictGetFloatV(paramsDict, "coeffs");
}
EltwiseSumLayer::~EltwiseSumLayer() {
delete _coeffs;
}
void EltwiseSumLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
getActs().add(*_inputs[inpIdx], scaleTargets, _coeffs->at(inpIdx));
}
void EltwiseSumLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
_prev[replicaIdx][inpIdx]->getActsGrad().add(v, scaleTargets, _coeffs->at(inpIdx));
}
/*
* =======================
* EltwiseMaxLayer
* =======================
*/
EltwiseMaxLayer::EltwiseMaxLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) {
}
void EltwiseMaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
if (inpIdx == 1) { // First input, do nothing
_inputs[inpIdx]->applyBinary(NVMatrixAggs::Max(), *_inputs[0], getActs());
} else if (inpIdx > 1) {
getActs().applyBinary(NVMatrixAggs::Max(), *_inputs[inpIdx]);
}
}
void EltwiseMaxLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
computeEltwiseMaxGrad(v, *_inputs[inpIdx], getActs(), _prev[replicaIdx][inpIdx]->getActsGrad(), scaleTargets != 0);
}
/*
* =======================
* DropoutLayer
* =======================
*
* TODO: optimize away the case when using dopout over relus. Don't need the keepmask.
*/
DropoutLayer::DropoutLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) {
_enable = pyDictGetInt(paramsDict, "enable");
_keep = pyDictGetFloat(paramsDict, "keep");
}
void DropoutLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
if (_enable && passType == PASS_TRAIN) {
_keepMask.resize(*_inputs[inpIdx]);
_keepMask.randomizeUniform();
_keepMask.apply(DropoutSmallerThanOperator(_keep));
_inputs[inpIdx]->eltwiseMult(_keepMask, getActs());
} else {
_inputs[inpIdx]->copy(getActs());
}
}
void DropoutLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
LayerV& prev = _prev[replicaIdx];
if (_enable && passType == PASS_TRAIN) {
if (scaleTargets != 0) {
v.applyTernary(AddGradientBinaryOperator<NVMatrixBinaryOps::Multiply>(NVMatrixBinaryOps::Multiply()),
_keepMask, prev[inpIdx]->getActsGrad(), prev[inpIdx]->getActsGrad());
} else {
v.eltwiseMult(_keepMask, prev[inpIdx]->getActsGrad());
}
} else {
prev[inpIdx]->getActsGrad().add(v, scaleTargets, 1);
}
}
void DropoutLayer::truncBwdActs() {
Layer::truncBwdActs();
_keepMask.truncate();
}
/*
* =======================
* Dropout2Layer
* =======================
*
* TODO: optimize away the case when using dopout over relus. Don't need the keepmask.
*/
Dropout2Layer::Dropout2Layer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : DropoutLayer(convNetThread, paramsDict, replicaID) {
}
void Dropout2Layer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
if (_enable && passType == PASS_TRAIN) {
_keepMask.resize(*_inputs[inpIdx]);
_keepMask.randomizeUniform();
_keepMask.smallerThanScalar(_keep);
_inputs[inpIdx]->eltwiseMult(_keepMask, getActs());
} else {
_inputs[inpIdx]->scale(_keep, getActs());
}
}
void Dropout2Layer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
LayerV& prev = _prev[replicaIdx];
if (_enable && passType == PASS_TRAIN) {
if (scaleTargets != 0) {
v.applyTernary(AddGradientBinaryOperator<NVMatrixBinaryOps::Multiply>(NVMatrixBinaryOps::Multiply()),
_keepMask, prev[inpIdx]->getActsGrad(), prev[inpIdx]->getActsGrad());
} else {
v.eltwiseMult(_keepMask, prev[inpIdx]->getActsGrad());
}
} else {
if (scaleTargets != 0) {
v.applyBinary(AddGradientOperator<NVMatrixOps::MultByScalar>(NVMatrixOps::MultByScalar(_keep)),
prev[inpIdx]->getActsGrad(), prev[inpIdx]->getActsGrad());
} else {
v.scale(_keep, prev[inpIdx]->getActsGrad());
}
}
}
/*
* =======================
* DataLayer
* =======================
*/
DataLayer::DataLayer(ConvNet* convNet, PyObject* paramsDict, int replicaID) : Layer(NULL, paramsDict, replicaID, false) {
_dataIdx = pyDictGetInt(paramsDict, "dataIdx");
_start = pyDictGetInt(paramsDict, "start");
_end = pyDictGetInt(paramsDict, "end");
_useBuffer = false;
_outstandingCopyRequest = false;
_convNet = convNet;
}
DataLayer::~DataLayer() {
for (map<int,hipStream_t>::const_iterator it = _copyStreams.begin(); it != _copyStreams.end(); ++it) {
checkCudaErrors(hipStreamDestroy(it->second));
}
for (std::map<int, MemoryView*>::iterator it = _memSrcActs2.begin(); it != _memSrcActs2.end(); ++it) {
if (it->second->getMemorySource().truncate(_name)) {
delete &it->second->getMemorySource();
}
}
_copier->stop();
delete _copier;
}
void DataLayer::fprop(PASS_TYPE passType, int passIdx, bool fromBuffer) {
waitForCopyFinish();
if (fromBuffer && getFwdActiveInputReplicaIdx(passIdx) >= 0) {
_useBuffer = !_useBuffer;
}
for (int i = 0; i < _next.size(); i++) {
_next[i]->getConvNetThread().getMessageQueue().enqueue(new FpropMessage(*_next[i], passType, passIdx));
}
}
void DataLayer::waitForCopyFinish() {
if (_outstandingCopyRequest) {
_copyFinishQueue.dequeue();
assert(_copyFinishQueue.getNumElements() == 0);
_outstandingCopyRequest = false;
}
}
hipStream_t DataLayer::getCopyStream(int deviceID) {
if (_copyStreams.count(deviceID) == 0) {
NVMatrix::setDeviceID(deviceID);
checkCudaErrors(hipStreamCreateWithFlags(&_copyStreams[deviceID], hipStreamNonBlocking));
}
return _copyStreams[deviceID];
}
void DataLayer::copyData(CPUData& data, bool other, int passIdx) {
assert(!_outstandingCopyRequest);
assert(_copyFinishQueue.getNumElements() == 0);
_copier->getQueue().enqueue(new DataCopyMessage(data, other, passIdx));
_outstandingCopyRequest = true;
}
int DataLayer::getNumInputReplicas() {
return _convNet->getNumReplicasMax() / getNumReplicas();
}
void DataLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
}
NVMatrix& DataLayer::getActs(int deviceID) {
return getActs(deviceID, false, -1);
}
NVMatrix& DataLayer::getActs(int deviceID, bool other, int numCases) {
// printf("%s[%d] getActs(%d, %d, %d)\n", _name.c_str(), getReplicaID(), deviceID, other, numCases);
assert(_memSrcActs.count(deviceID) > 0);
assert(_memSrcActs2.count(deviceID) > 0);
return (_useBuffer != other ? _memSrcActs2[deviceID]->getMemory(numCases) : _memSrcActs[deviceID]->getMemory(numCases));
}
ConvNet& DataLayer::getConvNet() {
return *_convNet;
}
bool DataLayer::postInit() {
if (Layer::postInit()) {
for (int i = 0; i < _next.size(); ++i) {
int d = _next[i]->getDeviceID();
if (_memSrcActs2.count(d) == 0) {
_memSrcActs2[d] = &MemorySource::make(_numOutputs, d, getName());
}
}
intv cpus = getDeviceCPUs(_next[0]->getDeviceID());
_copier = new DataCopyThread(*this, cpus);
_copier->start();
return true;
}
return false;
}
bool DataLayer::isGradProducer() {
return false;
}
/*
* =======================
* DataCopyThread
* =======================
*/
DataCopyThread::DataCopyThread(DataLayer& parent, intv& cpus) : _parent(&parent), _sleepUsec(0), Thread(true, cpus) {
}
Queue<DataCopyMessage*>& DataCopyThread::getQueue() {
return _queue;
}
void DataCopyThread::stop() {
getQueue().enqueue(new DataCopyExitMessage());
join();
}
void* DataCopyThread::run() {
NVMatrix::setDeviceID(*_parent->getNextDeviceIDs().begin());
bool exit = false;
while(!exit) {
DataCopyMessage& msg = *_queue.dequeue();
exit = msg.getType() == DataCopyMessage::EXIT;
if (!exit) {
CPUData& data = msg.getData();
int passIdx = msg.getPassIdx();
bool other = msg.isOther();
Matrix& dataMatrix = data.getData(_parent->getDataIdx());
// How many times is this layer going to process microbatches from this minibatch?
assert(_parent->getNumReplicasNext() == _parent->getNumReplicas());
int microIdx = _parent->getFwdActiveInputReplicaIdx(passIdx);
if (microIdx >= 0) {
if (_requestTimer.isStarted()) {
double requestIntervalMsec = _requestTimer.stop();
// Sleep for up to 1/20th the average request interval
_sleepUsec = int(round(0.95 * _sleepUsec + 0.05 * (_parent->getReplicaID() / double(_parent->getNumReplicas())) * requestIntervalMsec * 1000.0 / 20.0));
}
_requestTimer.start();
if (other) {
// Sleeping a bit is helpful because in typical nets, copying input data
// as soon as it's available will produce contention with other communications
// that are happening at the time. This is very much a hack, so in the future
// it might be good to replace it with something smarter which schedules access
// to communication links.
usleep(_sleepUsec);
}
microIdx += _parent->getReplicaID() * _parent->getNumInputReplicas();
// Safer to divup because this way you won't get a minibatch size of 0
int microbatchSize = DIVUP(data.getNumCases(), _parent->getConvNet().getNumReplicasMax());
int microStart = microIdx * microbatchSize;
int microEnd = min(data.getNumCases(), (microIdx + 1) * microbatchSize);
// Check that this replica has some data. This can be false when, for example,
// there are only 7 examples in the minibatch but 8 replicas.
if (microStart < microEnd) {
assert(dataMatrix.isView() == dataMatrix.isTrans());
int pipe = _parent->getConvNet().getDataCopyPD().getPipe(_parent->getReplicaID()/2);
if (dataMatrix.isTrans()) {
Matrix& replicaDataMatrix = dataMatrix.sliceCols(microStart, microEnd);
// In this case, dataMatrix is a view on memory allocated by Python.
//_hostMemFwd.copyFromHost(replicaDataMatrix, true);
_hostMemFwd.resize(replicaDataMatrix.getNumRows(), replicaDataMatrix.getNumCols(), true);
memcpy(_hostMemFwd.getDevData(), replicaDataMatrix.getData(), replicaDataMatrix.getNumDataBytes());
delete &replicaDataMatrix; // view
NVMatrix& hostMemFwdSlice = _hostMemFwd.sliceRows(_parent->getStart(), _parent->getEnd());
for (intv::iterator it = _parent->getNextDeviceIDs().begin(); it != _parent->getNextDeviceIDs().end(); ++it) {
int deviceID = *it;
// Copy my output to this guy's GPU
NVMatrix::setDeviceID(deviceID);
// Note to self: this is the path that gets executed in practice
// in my models. It does a transpose & copy simultaneously.
hostMemFwdSlice.flipTrans(_parent->getActs(deviceID, other, microEnd - microStart), _parent->getCopyStream(deviceID));
}
delete &hostMemFwdSlice;
} else {
// Hacky way to copy a slice to _hostMemFwd
_hostMemFwd.resize(dataMatrix.getNumRows(), microEnd - microStart);
Matrix tmp(_hostMemFwd.getDevData(), _hostMemFwd.getNumRows(), _hostMemFwd.getNumCols(), _hostMemFwd.isTrans());
dataMatrix.sliceCols(microStart, microEnd, tmp);
NVMatrix& hostMemFwdSlice = _hostMemFwd.sliceRows(_parent->getStart(), _parent->getEnd());
for (intv::iterator it = _parent->getNextDeviceIDs().begin(); it != _parent->getNextDeviceIDs().end(); ++it) {
int deviceID = *it;
// Copy my output to this guy's GPU
NVMatrix::setDeviceID(deviceID);
hostMemFwdSlice.copy(_parent->getActs(deviceID, other, microEnd - microStart), _parent->getCopyStream(deviceID));
}
delete &hostMemFwdSlice;
}
for (intv::iterator it = _parent->getNextDeviceIDs().begin(); it != _parent->getNextDeviceIDs().end(); ++it) {
int deviceID = *it;
NVMatrix::setDeviceID(deviceID);
NVMatrix::syncStream(_parent->getCopyStream(deviceID));
}
_parent->getConvNet().getDataCopyPD().freePipe(pipe);
} else {
for (intv::iterator it = _parent->getNextDeviceIDs().begin(); it != _parent->getNextDeviceIDs().end(); ++it) {
int deviceID = *it;
_parent->getActs(deviceID, other, 0);
}
}
}
_parent->getCopyFinishQueue().enqueue(1);
}
delete &msg;
}
return NULL;
}
/*
* =====================
* PoolLayer
* =====================
*/
PoolLayer::PoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans)
: Layer(convNetThread, paramsDict, replicaID, trans), TwoDLayerInterface(paramsDict) {
_sizeX = pyDictGetInt(paramsDict, "sizeX");
_start = pyDictGetInt(paramsDict, "start");
_stride = pyDictGetInt(paramsDict, "stride");
_outputsX = pyDictGetInt(paramsDict, "outputsX");
_pool = pyDictGetString(paramsDict, "pool");
}
PoolLayer& PoolLayer::make(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) {
std::string _pool = pyDictGetString(paramsDict, "pool");
if (_pool == "max") {
return *new MaxPoolLayer(convNetThread, paramsDict, replicaID, false);
} else if(_pool == "maxabs") {
return *new MaxPoolLayer(convNetThread, paramsDict, replicaID, true);
} else if(_pool == "avg") {
return *new AvgPoolLayer(convNetThread, paramsDict, replicaID);
}
throw std::string("Unknown pooling layer type ") + _pool;
}
/*
* =====================
* AvgPoolLayer
* =====================
*/
AvgPoolLayer::AvgPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : PoolLayer(convNetThread, paramsDict, replicaID, false) {
}
void AvgPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, AvgPooler());
}
void AvgPoolLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convLocalAvgUndo(v, _prev[replicaIdx][0]->getActsGrad(), _sizeX, _start, _stride, _outputsX, _imgSize, scaleTargets, 1);
}
/*
* =====================
* MaxPoolLayer
* =====================
*/
MaxPoolLayer::MaxPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool abs) : PoolLayer(convNetThread, paramsDict, replicaID, false), _abs(abs) {
}
void MaxPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
if (_abs) {
convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, MaxAbsPooler());
} else {
convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, MaxPooler());
}
}
void MaxPoolLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 0);
convLocalMaxUndo(*_inputs[0], v, getActs(), _prev[replicaIdx][inpIdx]->getActsGrad(), _sizeX, _start, _stride, _outputsX, scaleTargets, 1);
}
/*
* =====================
* CrossMapPoolLayer
* =====================
*/
CrossMapPoolLayer::CrossMapPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans)
: Layer(convNetThread, paramsDict, replicaID, trans), TwoDLayerInterface(paramsDict) {
_size = pyDictGetInt(paramsDict, "size");
_start = pyDictGetInt(paramsDict, "start");
_stride = pyDictGetInt(paramsDict, "stride");
_outputs = pyDictGetInt(paramsDict, "outputChannels");
_pool = pyDictGetString(paramsDict, "pool");
}
CrossMapPoolLayer& CrossMapPoolLayer::make(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) {
std::string _pool = pyDictGetString(paramsDict, "pool");
if (_pool == "max") {
return *new CrossMapMaxPoolLayer(convNetThread, paramsDict, replicaID);
}
throw std::string("Unknown pooling layer type ") + _pool;
}
/*
* =====================
* CrossMapMaxPoolLayer
* =====================
*/
CrossMapMaxPoolLayer::CrossMapMaxPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CrossMapPoolLayer(convNetThread, paramsDict, replicaID, false) {
}
void CrossMapMaxPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convPoolCrossMap(*_inputs[0], getActs(), _start, _size, _outputs, _stride, _imgSize, MaxPooler());
}
void CrossMapMaxPoolLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 0);
convCrossMapMaxPoolUndo(*_inputs[0], v, getActs(), _prev[replicaIdx][0]->getActsGrad(), _imgSize, _start, _size, _stride, scaleTargets, 1);
}
/*
* =====================
* RandomScaleLayer
* =====================
*/
RandomScaleLayer::RandomScaleLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
_maxScale = pyDictGetFloat(paramsDict, "maxScale");
_tgtSize = pyDictGetInt(paramsDict, "tgtSize");
// The smallest size the image could be after rescaling
_minScaledSize = _imgSize / _maxScale;
// The number of discrete scales we're considering
int numScales = _imgSize - _minScaledSize + 1;
// The total number of squares of size _tgtSize that we can extract
// from all these scales
double numCrops = numScales * (numScales + 1) * (2 * numScales + 1) / 6;
// For each scale, record the fraction of the squares that it has.
// This will be the probability of sampling this scale.
_scaleProbs.push_back(1.0 / numCrops);
for (int s = 1; s < numScales; ++s) {
_scaleProbs.push_back(_scaleProbs[s-1] + (s + 1) * (s + 1) / numCrops);
}
}
void RandomScaleLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
if (IS_TRAIN(passType)) {
// _maxScale is in the range [1, 2)
float r = randf;
int rescaledSize = _tgtSize;
float scaleFactor = _maxScale;
// Find which scale we have sampled
for (int s = 0; s < _scaleProbs.size(); ++s) {
if (r <= _scaleProbs[s]) {
rescaledSize += s;
float scaleFactorEnd = _imgSize / float(rescaledSize);
float scaleFactorStart = max(1.0, _imgSize / (1.0 + rescaledSize));
scaleFactor = scaleFactorStart + randf * (scaleFactorEnd - scaleFactorStart);
break;
}
}
assert(rescaledSize >= _tgtSize);
int maxStart = rescaledSize - _tgtSize;
int startY = rand() % (1 + maxStart), startX = rand() % (1 + maxStart);
if (rescaledSize == _imgSize) {
convCrop(*_inputs[0], getActs(), rescaledSize, _tgtSize, startY, startX);
} else {
convResizeBilinear(*_inputs[0], _rescaledActs, _imgSize, rescaledSize, scaleFactor);
convCrop(_rescaledActs, getActs(), rescaledSize, _tgtSize, startY, startX);
}
_rescaledActs.truncate(); // this'll have a different size each time so may as well truncate it.
} else if (IS_MULTIVIEW_TEST(passType)) { // for now...
_inputs[0]->copy(getActs());
} else if (IS_TEST(passType)) { // Test on center patch
convResizeBilinear(*_inputs[0], getActs(), _imgSize, _tgtSize, _maxScale);
}
}
void RandomScaleLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* CropLayer
* =====================
*/
CropLayer::CropLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
_startX = pyDictGetInt(paramsDict, "startX");
_startY = pyDictGetInt(paramsDict, "startY");
_tgtSize = pyDictGetInt(paramsDict, "sizeX");
}
void CropLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convCrop(*_inputs[0], getActs(), _imgSize, _tgtSize, _startY, _startX);
}
void CropLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* NailbedLayer
* =====================
*/
NailbedLayer::NailbedLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
_start = pyDictGetInt(paramsDict, "start");
_stride = pyDictGetInt(paramsDict, "stride");
_outputsX = pyDictGetInt(paramsDict, "outputsX");
}
void NailbedLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convBedOfNails(*_inputs[0], getActs(), _channels, _imgSize, _start, _stride, 0, 1);
}
void NailbedLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convBedOfNailsUndo(v, _prev[replicaIdx][0]->getActsGrad(), _channels, _imgSize, _start, _stride, scaleTargets, 1);
}
/*
* =====================
* GaussianBlurLayer
* =====================
*/
GaussianBlurLayer::GaussianBlurLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
_hFilter = pyDictGetMatrix(paramsDict, "filter");
}
GaussianBlurLayer::~GaussianBlurLayer() {
delete _hFilter;
}
void GaussianBlurLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convGaussianBlur(*_inputs[0], _filter, getActs(), true, _channels, 0, 1);
convGaussianBlur(getActs(), _filter, getActs(), false, _channels, 0, 1);
}
void GaussianBlurLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& tgt = _prev[replicaIdx][0]->getNumComputedActsGrads(getDeviceID()) > 0 ? _actGradsTmp : _prev[replicaIdx][0]->getActsGrad();
convGaussianBlur(v, _filter, tgt, true, _channels, 0, 1);
convGaussianBlur(tgt, _filter, _prev[replicaIdx][0]->getActsGrad(), false, _channels, scaleTargets, 1);
}
void GaussianBlurLayer::copyToGPU() {
_filter.copyFromHost(*_hFilter, true);
}
/*
* =====================
* HorizontalReflectionLayer
* =====================
*/
HorizontalReflectionLayer::HorizontalReflectionLayer(ConvNetThread* convNet, PyObject* paramsDict, int replicaID) : Layer(convNet, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
assert(_channels >= 1 && _channels <= 3);
}
void HorizontalReflectionLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convReflectHorizontal(*_inputs[0], getActs(), _imgSize);
}
void HorizontalReflectionLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convReflectHorizontal(v, _prev[replicaIdx][0]->getActsGrad(), _imgSize);
}
/*
* =====================
* ResizeLayer
* =====================
*/
ResizeLayer::ResizeLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
_tgtSize = pyDictGetInt(paramsDict, "tgtSize");
_scale = pyDictGetFloat(paramsDict, "scale");
}
void ResizeLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convResizeBilinear(*_inputs[0], getActs(), _imgSize, _tgtSize, _scale);
}
// Can't do this
void ResizeLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* RGBToYUVLayer
* =====================
*/
RGBToYUVLayer::RGBToYUVLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) {
}
void RGBToYUVLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convRGBToYUV(*_inputs[0], getActs());
}
// Can't do this
void RGBToYUVLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* RGBToLABLayer
* =====================
*/
RGBToLABLayer::RGBToLABLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) {
_center = pyDictGetInt(paramsDict, "center");
}
void RGBToLABLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convRGBToLAB(*_inputs[0], getActs(), _center);
}
// Can't do this
void RGBToLABLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* ResponseNormLayer
* =====================
*/
ResponseNormLayer::ResponseNormLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
_size = pyDictGetInt(paramsDict, "size");
_scale = pyDictGetFloat(paramsDict, "scale");
_pow = pyDictGetFloat(paramsDict, "pow");
_minDiv = pyDictGetFloat(paramsDict, "minDiv");
}
void ResponseNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convResponseNorm(*_inputs[0], _denoms, getActs(), _channels, _size, _scale, _pow, _minDiv);
}
void ResponseNormLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convResponseNormUndo(v, _denoms, *_inputs[0], getActs(), _prev[replicaIdx][0]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1);
}
void ResponseNormLayer::truncBwdActs() {
Layer::truncBwdActs();
_denoms.truncate();
}
/*
* =====================
* CrossMapResponseNormLayer
* =====================
*/
CrossMapResponseNormLayer::CrossMapResponseNormLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: ResponseNormLayer(convNetThread, paramsDict, replicaID) {
_blocked = pyDictGetInt(paramsDict, "blocked");
}
void CrossMapResponseNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
assert(inpIdx == 0);
convResponseNormCrossMap(*_inputs[0], getActs(), _channels, _size, _scale, _pow, _minDiv, _blocked);
}
void CrossMapResponseNormLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convResponseNormCrossMapUndo(v, *_inputs[0], getActs(), _prev[replicaIdx][0]->getActsGrad(), _channels, _size, _scale, _pow, _minDiv, _blocked, scaleTargets, 1);
}
/*
* =====================
* ContrastNormLayer
* =====================
*/
ContrastNormLayer::ContrastNormLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : ResponseNormLayer(convNetThread, paramsDict, replicaID) {
}
void ContrastNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
NVMatrix& images = *_inputs[0];
convLocalPool(images, _meanDiffs, _channels, _size, -_size/2, 1, _imgSize, AvgPooler());
_meanDiffs.add(images, -1, 1);
convContrastNorm(images, _meanDiffs, _denoms, getActs(), _channels, _size, _scale, _pow, _minDiv);
}
void ContrastNormLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convContrastNormUndo(v, _denoms, _meanDiffs, getActs(), _prev[replicaIdx][inpIdx]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1);
}
void ContrastNormLayer::truncBwdActs() {
ResponseNormLayer::truncBwdActs();
_meanDiffs.truncate();
}
/*
* =====================
* CostLayer
* =====================
*/
CostLayer::CostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans)
: Layer(convNetThread, paramsDict, replicaID, trans) {
_coeff = pyDictGetFloat(paramsDict, "coeff");
_numCases = 0;
_aggregated = pyDictGetInt(paramsDict, "aggregated") != 0;
}
float CostLayer::getCoeff() {
return _coeff;
}
void CostLayer::bprop(NVMatrix& v, PASS_TYPE passType, int passIdx) {
if (_coeff != 0) {
Layer::bprop(v, passType, passIdx);
}
}
bool CostLayer::fprop(PASS_TYPE passType, int passIdx) {
if (Layer::fprop(passType, passIdx)) {
syncStream();
getConvNet().getMessageQueue().enqueue(new Message(FPROP_TERMINAL));
return true;
}
return false;
}
void CostLayer::fpropCommon(PASS_TYPE passType) {
_numCases = Layer::getNumCases(*_inputs[0]);
}
int CostLayer::getNumCases() {
return _numCases;
}
bool CostLayer::isGradProducer() {
return _coeff != 0;
}
doublev& CostLayer::getCost() {
return *new doublev(_costv);
}
// This is called between microbatches
void CostLayer::resetPassIdx() {
Layer::resetPassIdx();
_costv.clear();
}
CostLayer& CostLayer::make(ConvNetThread* convNetThread, PyObject* paramsDict, std::string& type, int replicaID) {
if (type == "cost.crossent") {
return *new CrossEntCostLayer(convNetThread, paramsDict, replicaID);
} else if (type == "cost.bce") {
return *new BinomialCrossEntropyCostLayer(convNetThread, paramsDict, replicaID);
} else if (type == "cost.dce") {
return *new DetectionCrossEntropyCostLayer(convNetThread, paramsDict, replicaID);
} else if (type == "cost.logreg") {
return *new LogregCostLayer(convNetThread, paramsDict, replicaID);
} else if (type == "cost.tasklogreg") {
return *new TaskLogregCostLayer(convNetThread, paramsDict, replicaID);
} else if (type == "cost.sum2") {
return *new SumOfSquaresCostLayer(convNetThread, paramsDict, replicaID);
}
throw std::string("Unknown cost layer type ") + type;
}
/*
* =====================
* CrossEntCostLayer
* =====================
*/
CrossEntCostLayer::CrossEntCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) {
}
void CrossEntCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
// This layer uses its two inputs together
if (inpIdx == 0) {
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
int numCases = labels.getLeadingDim();
NVMatrix& trueLabelLogProbs = getActs(), correctProbs;
computeCrossEntCost(labels, probs, trueLabelLogProbs, correctProbs);
_costv.clear();
_costv.push_back(-trueLabelLogProbs.sum());
_costv.push_back(numCases - correctProbs.sum());
}
}
void CrossEntCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 1);
LayerV& prev = _prev[replicaIdx];
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
NVMatrix& target = prev[1]->getActsGrad();
// Numerical stability optimization: if the layer below me is a softmax layer, let it handle
// the entire gradient computation to avoid multiplying and dividing by a near-zero quantity.
bool doWork = prev[1]->getNext().size() > 1 || prev[1]->getType() != "softmax" || prev[1]->getDeviceID() != getDeviceID();
if (doWork) {
computeCrossEntGrad(labels, probs, target, scaleTargets == 1, _coeff);
}
}
/*
* =====================
* BinomialCrossEntropyCostLayer
* =====================
*/
BinomialCrossEntropyCostLayer::BinomialCrossEntropyCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) {
_computeSoftmaxErrorRate = pyDictGetInt(paramsDict, "computeSoftmaxErrorRate");
_posWeight = pyDictGetFloat(paramsDict, "posWeight");
}
void BinomialCrossEntropyCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
// This layer uses its two inputs together
if (inpIdx == 0) {
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
int numCases = labels.getLeadingDim();
labels.applyBinary(BinomialCrossEntOperator(_posWeight), probs, _tmpProbs);
_costv.clear();
// Cross-entropy cost
_costv.push_back(-_tmpProbs.sum(_tmpbuf));// / labels.getFollowingDim());
// If aggregated, we don't produce these outputs because they're not additive.
// They have no meaning if this is just a partial cost.
if (!_aggregated) {
// "Correct" classifications. To compute these we threshold probs
// and just count the number of entries that agree with labels.
probs.biggerThanScalar(0.5, _tmpProbs);
_tmpProbs.equals(labels);
_costv.push_back((_tmpProbs.getNumElements() - _tmpProbs.sum(_tmpbuf)) / double(labels.getFollowingDim()));
if (_computeSoftmaxErrorRate) {
// Also compute top-1 error as if this is softmax and there's only one correct class
probs.max(0, _tmpVec);
assert(_tmpVec.getNumElements() == numCases); // Make sure we did max on correct axis
probs.equalsVector(_tmpVec, _correctProbs);
_correctProbs.sum(0, _tmpVec); // Divide by the # of labels that we predict as being present
float m = _tmpVec.max();
_correctProbs.eltwiseDivideByVector(_tmpVec);
_correctProbs.eltwiseMult(labels);
_costv.push_back(numCases - _correctProbs.sum(_tmpbuf));
}
}
}
}
void BinomialCrossEntropyCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 1);
LayerV& prev = _prev[replicaIdx];
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
NVMatrix& target = prev[1]->getActsGrad();
// Numerical stability optimization: if the layer below me is a logistic neuron layer, let it handle
// the entire gradient computation to avoid multiplying and dividing by a near-zero quantity.
bool doWork = prev[1]->getNext().size() > 1
|| prev[1]->getType() != "neuron"
|| static_cast<NeuronLayer*>(prev[1])->getNeuronType() != "logistic"
|| prev[1]->getDeviceID() != getDeviceID()
|| prev[1]->getNumReplicas() != getNumReplicas();
if (doWork) {
printf("Computing cross-entropy gradient the stupid way\n");
if (scaleTargets == 0) {
labels.applyBinary(BinomialCrossEntGradientOperator(_coeff, _posWeight), probs, target);
} else {
labels.applyTernary(AddGradientBinaryOperator<BinomialCrossEntGradientOperator>(BinomialCrossEntGradientOperator(_coeff, _posWeight)), probs, target, target);
}
}
}
float BinomialCrossEntropyCostLayer::getPosWeight() {
return _posWeight;
}
/*
* =====================
* DetectionCrossEntropyCostLayer
* =====================
*/
DetectionCrossEntropyCostLayer::DetectionCrossEntropyCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: BinomialCrossEntropyCostLayer(convNetThread, paramsDict, replicaID) {
assert(!_aggregated);
}
void DetectionCrossEntropyCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
BinomialCrossEntropyCostLayer::fpropActs(inpIdx, scaleTargets, passType, passIdx);
// This layer uses its two inputs together
if (inpIdx == 0) {
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
int numCases = labels.getLeadingDim();
/*
* Add information sufficient to compute precision and recall for each class.
*/
// NOTE: _tmpProbs contains ((probs > 0.5) == labels)
labels.sum(1, _numPositive); // sum(labels, 1)
_tmpProbs.eltwiseMult(labels); // labels * ((probs > 0.5) == labels)
_tmpProbs.sum(1, _numTruePositive);
probs.biggerThanScalar(0.5, _tmpProbs);
_tmpProbs.sum(1, _numDeclaredPositive);
_numDeclaredPositive.copyToHost(_hNumDeclaredPositive, true);
_numPositive.copyToHost(_hNumPositive, true);
_numTruePositive.copyToHost(_hNumTruePositive, true);
for (int i = 0; i < labels.getFollowingDim(); ++i) {
_costv.push_back(_hNumDeclaredPositive(i, 0)); // 2
_costv.push_back(_hNumPositive(i, 0)); // 3
_costv.push_back(_hNumTruePositive(i, 0)); // 4
}
}
}
/*
* =====================
* TaskLogregCostLayer
* =====================
*/
TaskLogregCostLayer::TaskLogregCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : LogregCostLayer(convNetThread, paramsDict, replicaID) {
_topk = pyDictGetInt(paramsDict, "topk");
_taskId = pyDictGetInt(paramsDict, "taskId");
assert(_taskId >= 0);
// _numAccumed = 0;
}
void TaskLogregCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
// This layer uses its two inputs together
if (inpIdx == 0) {
NVMatrix& labels = *_inputs[0];
NVMatrix& tasks = *_inputs[1];
NVMatrix* probs = _inputs[2];
_doCompute = !IS_MULTIVIEW_TEST(passType);
if (!_doCompute) {
if (IS_MULTIVIEW_TEST_START(passType)) {
if (_probsAccum.count(passIdx) == 0) {
_probsAccum[passIdx] = new NVMatrix(*probs);
}
probs->copy(*_probsAccum[passIdx]);
_numAccumed[passIdx] = 1;
} else {
_probsAccum[passIdx]->add(*probs);
_numAccumed[passIdx] += 1;
}
if (IS_MULTIVIEW_TEST_END(passType)) {
probs = _probsAccum[passIdx];
probs->scale(1.0 / _numAccumed[passIdx]);
_doCompute = true;
}
}
if (_doCompute) {
int numCases = labels.getNumElements();
probs->max(0,_maxProbs);
if (_topk == 1) {
computeLogregCost(labels, *probs, _maxProbs, _trueLabelLogProbs, _correctProbs);
} else {
computeMultiSoftmaxCost(labels, *probs, _maxProbs, _trueLabelLogProbs, _correctProbs, _topkProbs, _topk);
}
//Task mask part
NVMatrix taskIndict;
//std::cout << "taskId in TaskLogreg: " << _taskId << std::endl;
tasks.equalToScalar(_taskId, taskIndict);
//std::cout << "tasks row " << tasks.getNumRows() << "task cols " << tasks.getNumCols() << std::endl;
//std::cout << "taskIndict row " << taskIndict.getNumRows() << "taskIndcit cols " << taskIndict.getNumCols() << std::endl;
//std::cout << "correctProbs row " << _correctProbs.getNumRows() << "_correctProbs cols " << _correctProbs.getNumCols() << std::endl;
//std::cout << "trueProbs row " << _trueLabelLogProbs.getNumRows() << "_correctProbs cols " << _trueLabelLogProbs.getNumCols() << std::endl;
//std::cout << "topkprobsProbs row " << _topkProbs.getNumRows() << "_correctProbs cols " << _topkProbs.getNumCols() << std::endl;
//std::cout << "taskIndict:" << std::endl;
//taskIndict.print(0,taskIndict.getNumRows(),0,taskIndict.getNumCols());
//std::cout << "_correctProbs before:" << std::endl;
//_correctProbs.print(0,_correctProbs.getNumRows(),0,_correctProbs.getNumCols());
_correctProbs.eltwiseMult(taskIndict);
_correctProbs.eltwiseMult(taskIndict);
//std::cout << "_correctProbs after:" << std::endl;
//_correctProbs.print(0,_correctProbs.getNumRows(),0,_correctProbs.getNumCols());
//std::cout << "_trueLabelLogProbs before:" << std::endl;
//_trueLabelLogProbs.print(0,_trueLabelLogProbs.getNumRows(),0,_trueLabelLogProbs.getNumCols());
_trueLabelLogProbs.eltwiseMult(taskIndict);
_trueLabelLogProbs.eltwiseMult(taskIndict);
//std::cout << "_trueLabelLogProbs after:" << std::endl;
//_trueLabelLogProbs.print(0,_trueLabelLogProbs.getNumRows(),0,_trueLabelLogProbs.getNumCols());
if (_topk > 1) {
_topkProbs.eltwiseMult(taskIndict);
}
int taskCases=taskIndict.sum();
_costv.clear();
if(taskCases != 0) {
double top1 = _correctProbs.sum(_tmpbuf);
//std::cout << "top1: " << std::endl;
//std::cout << top1 << std::endl;
_costv.push_back(-_trueLabelLogProbs.sum(_tmpbuf)*numCases/taskCases);
_costv.push_back((taskCases - top1)*numCases/taskCases);
_costv.push_back((taskCases - (_topk == 1 ? top1 : _topkProbs.sum(_tmpbuf)))*numCases/taskCases);
}
}
}
}
int TaskLogregCostLayer::getTaskId(){
return _taskId;
}
NVMatrix& TaskLogregCostLayer::getProbsAccum(int replicaIdx) {
return *_probsAccum[replicaIdx];
}
void TaskLogregCostLayer::bprop(PASS_TYPE passType, int passIdx) {
if (_coeff != 0) {
Layer::bprop(passType, passIdx);
}
}
void TaskLogregCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 2);
if (inpIdx == 2) {
LayerV& prev = _prev[replicaIdx];
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[2];
NVMatrix& target = prev[2]->getActsGrad();
// Numerical stability optimization: if the layer below me is a softmax layer, let it handle
// the entire gradient computation to avoid multiplying and dividing by a near-zero quantity.
bool doWork = prev[2]->getNext().size() > 1 || ( prev[2]->getType() != "softmax" && prev[2]->getType() != "aggsoftmax" && prev[2]->getType() != "aggcoarsefinesoftmax")
|| prev[2]->getDeviceID() != getDeviceID() || prev[2]->getNumReplicas() != getNumReplicas();
if (prev[2]->getType() == "softmax" || prev[2]->getType() == "aggsoftmax" || prev[2]->getType() == "aggcoarsefinesoftmax") {
static_cast<SoftmaxLayer*>(prev[2])->setDoUpperGrad(!doWork);
}
if (doWork) {
computeLogregGrad(labels, probs, target, scaleTargets == 1, _coeff);
NVMatrix& tasks = prev[1]->getActs();
NVMatrix taskIndict;
tasks.equalToScalar(_taskId, taskIndict);
taskIndict.transpose(_trans);
target.eltwiseMultByVector(taskIndict);
}
}
}
/*
* =====================
* LogregCostLayer
* =====================
*/
LogregCostLayer::LogregCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) {
_topk = pyDictGetInt(paramsDict, "topk");
// _numAccumed = 0;
}
void LogregCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
// This layer uses its two inputs together
if (inpIdx == 0) {
NVMatrix& labels = *_inputs[0];
NVMatrix* probs = _inputs[1];
_doCompute = !IS_MULTIVIEW_TEST(passType);
if (!_doCompute) {
if (IS_MULTIVIEW_TEST_START(passType)) {
if (_probsAccum.count(passIdx) == 0) {
_probsAccum[passIdx] = new NVMatrix(*probs);
}
probs->copy(*_probsAccum[passIdx]);
_numAccumed[passIdx] = 1;
} else {
_probsAccum[passIdx]->add(*probs);
_numAccumed[passIdx] += 1;
}
if (IS_MULTIVIEW_TEST_END(passType)) {
probs = _probsAccum[passIdx];
probs->scale(1.0 / _numAccumed[passIdx]);
_doCompute = true;
}
}
if (_doCompute) {
int numCases = labels.getNumElements();
probs->max(0,_maxProbs);
if (_topk == 1) {
computeLogregCost(labels, *probs, _maxProbs, _trueLabelLogProbs, _correctProbs);
} else {
computeMultiSoftmaxCost(labels, *probs, _maxProbs, _trueLabelLogProbs, _correctProbs, _topkProbs, _topk);
}
_costv.clear();
double top1 = _correctProbs.sum(_tmpbuf);
_costv.push_back(-_trueLabelLogProbs.sum(_tmpbuf));
_costv.push_back(numCases - top1);
_costv.push_back(numCases - (_topk == 1 ? top1 : _topkProbs.sum(_tmpbuf)));
}
}
}
NVMatrix& LogregCostLayer::getProbsAccum(int replicaIdx) {
return *_probsAccum[replicaIdx];
}
void LogregCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (inpIdx == 1) {
LayerV& prev = _prev[replicaIdx];
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
NVMatrix& target = prev[1]->getActsGrad();
// Numerical stability optimization: if the layer below me is a softmax layer, let it handle
// the entire gradient computation to avoid multiplying and dividing by a near-zero quantity.
bool doWork = prev[1]->getNext().size() > 1 || ( prev[1]->getType() != "softmax" && prev[1]->getType() != "aggsoftmax" && prev[1]->getType() != "aggcoarsefinesoftmax")
|| prev[1]->getDeviceID() != getDeviceID() || prev[1]->getNumReplicas() != getNumReplicas();
if (prev[1]->getType() == "softmax" || prev[1]->getType() == "aggsoftmax" || prev[1]->getType() == "aggcoarsefinesoftmax") {
static_cast<SoftmaxLayer*>(prev[1])->setDoUpperGrad(!doWork);
}
if (doWork) {
computeLogregGrad(labels, probs, target, scaleTargets == 1, _coeff);
}
}
}
/*
* =====================
* SumOfSquaresCostLayer
* =====================
*/
SumOfSquaresCostLayer::SumOfSquaresCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) {
}
void SumOfSquaresCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
_inputs[0]->apply(NVMatrixOps::Square(), _tmp);
_costv.clear();
_costv.push_back(_tmp.sum());
}
void SumOfSquaresCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
_prev[replicaIdx][inpIdx]->getActsGrad().add(*_inputs[0], scaleTargets, -2 * _coeff);
}
/*
* =======================================================
* WeightCostLayer, input weights from two previous layers (theta = weights[0], beta = weights[1], M = regTemp)
* =======================================================
*/
WeightCostLayer::WeightCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) {
stringv& weightSourceLayers = *pyDictGetStringV(paramsDict, "weightSourceLayers");
// Source layers for shared weights
intv& weightSourceLayerIndices = *pyDictGetIntV(paramsDict, "weightSourceLayerIndices");
// Weight matrix indices (inside the above source layers) for shared weights
intv& weightSourceMatrixIndices = *pyDictGetIntV(paramsDict, "weightSourceMatrixIndices");
PyObject* pyEpsWList = PyDict_GetItemString(paramsDict, "epsW");
//std::cout<<"layer= "<<_name<<"weights nums = "<<weightSourceLayerIndices.size()<<std::endl;
//floatv& epsW = *pyDictGetFloatV(paramsDict, "epsW");
for (int i = 0; i < weightSourceLayers.size(); i++) {
PyObject* pyEpsW = PyList_GetItem(pyEpsWList, i);
ParameterSchedule& lrs = ParameterSchedule::make(pyEpsW); // Learning rate schedule
std::string& srcLayerName = weightSourceLayers[i];
int matrixIdx = weightSourceMatrixIndices[i];
WeightLayer& srcLayer = *static_cast<WeightLayer*>(&convNetThread->getLayer(srcLayerName));
Weights* srcWeights = &srcLayer.getWeights(matrixIdx);
_weights->addWeights(*new Weights(*srcWeights, lrs, *this));
}
_hregTemp = pyDictGetMatrix(paramsDict, "regTemp");
_regType = pyDictGetString(paramsDict, "regType");
//delete &epsW;
delete &weightSourceLayerIndices;
delete &weightSourceMatrixIndices;
delete &weightSourceLayers;
}
void WeightCostLayer::copyToGPU(){
_regTemp.copyFromHost(*_hregTemp, true);
_weights->copyToGPU();
}
// Does this layer produce gradient for layers below?
bool WeightCostLayer::isGradProducer() {
return false;
}
void WeightCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
if(inpIdx==0){
int numCases = Layer::getNumCases(*_inputs[0]);
//int numCases = getPrev()[replicaIdx][0]->getActs().getNumCols();
//std::cout<<"number of cases = "<<numCases<<std::endl;
if(_regType=="dist"){// minimize distance between two sets of weights
//std::cout<<"w0 size = "<<_weights[0].getW().getNumRows()<<","<<_weights[0].getW().getNumCols()<<std::endl;
//std::cout<<"w1 size = "<<_weights[1].getW().getNumRows()<<","<<_weights[1].getW().getNumCols()<<std::endl;
//std::cout<<"regTemp size = "<<_regTemp.getNumRows() << "," << _regTemp.getNumCols() << std::endl;
_weights->at(0).getW().rightMult(_regTemp, 1, _cost); //cost = theta * M
_cost.subtract(_weights->at(1).getW()); // cost = theta*M - beta
_cost.apply(NVMatrixOps::Square(), getActs()); // getActs = cost.^2
_costv.clear(); //Add by Saining, otherwise wrong during aggregate test outputs
_costv.push_back(getActs().sum() * numCases/2);
} else if(_regType == "simi"){//minimize the similarity between two sets of weights
// \sum_{i in input layer 0 weights indices} \sum_{j in input layer 1 weights indices} regTemp_{ij}|w_i^T w_j|
NVMatrix& fweight_T = _weights->at(0).getW().getTranspose();
//std::cout<<"w0T size ="<<fweight_T.getNumRows()<<","<<fweight_T.getNumCols()<<std::endl;
//std::cout<<"w1 size = "<<_weights[1].getW().getNumRows()<<","<<_weights[1].getW().getNumCols()<<std::endl;
fweight_T.rightMult(_weights->at(1).getW(), 1, _cost); // cost = theta'*beta
_cost.apply(NVMatrixOps::Abs(), getActs()); // cost = abs(theta'*beta)
getActs().eltwiseMult(_regTemp); // getActs = abs(theta'*beta).*M;
_costv.clear();
_costv.push_back(getActs().sum() * numCases);
//std::cout<<"weight cost = "<<_costv.back()<<std::endl;
delete &fweight_T;
}
}
}
void WeightCostLayer::bpropCommon(NVMatrix& v, PASS_TYPE passType) {
for (int i = 0; i < _weights->getSize(); i++) {
if (_weights->at(i).getLearningRateSchedule().getBaseValue() > 0) {
//if (_weights[i].getEps() > 0) {
bpropWeights(i, passType);
// Increment its number of updates
_weights->at(i).incNumUpdates();
}
}d
}
void WeightCostLayer::bpropWeights(int inpIdx, PASS_TYPE passType) {
if(_regType=="dist"){
float scaleCurGrad = (_weights->at(inpIdx).getNumUpdates() > 0 && passType != PASS_GC) * 1;
if(inpIdx==0)
_weights->at(0).getGrad().addProduct(_cost, _regTemp.getTranspose(), scaleCurGrad, -_coeff*_weights->at(0).getEps(getConvNet().getTrainingProgress())); // - partial theta = - coeff*(theta*M - beta)*M';
else if(inpIdx==1)
_weights->at(1).getGrad().add(_cost, scaleCurGrad, _coeff*_weights->at(1).getEps(getConvNet().getTrainingProgress())); // - partial beta = coeff* (theta*M - beta)
} else if (_regType=="simi"){
_cost.apply(NVMatrixOps::Sign(), getActsGrad());
getActsGrad().eltwiseMult(_regTemp);
float scaleCurGrad = (_weights->at(inpIdx).getNumUpdates() > 0 && passType != PASS_GC) * 1;
//std::cout<<"w0 size = "<<_weights[0].getW().getNumRows()<<","<<_weights[0].getW().getNumCols()<<", w1 size = "<<_weights[1].getW().getNumRows()<<","<<_weights[1].getW().getNumCols()<<", actsGrad size="<<getActsGrad().getNumRows()<<","<<getActsGrad().getNumCols()<<std::endl;
if(inpIdx==0)
_weights->at(0).getGrad().addProduct(_weights->at(1).getW(), getActsGrad().getTranspose(), scaleCurGrad, -_coeff*_weights->at(0).getEps(getConvNet().getTrainingProgress()));// - partial theta = -coeff*beta*(sign(theta'*beta).*M)'
else if(inpIdx==1)
_weights->at(1).getGrad().addProduct(_weights->at(0).getW(), getActsGrad(), scaleCurGrad, -_coeff*_weights->at(1).getEps(getConvNet().getTrainingProgress())); // - partial beta = - coeff*theta*(sign(theta'*beta).*M)
}
}
| 326f3a0bf6f52845db580cbd6dd2205406ee55c9.cu | /*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <helper_cuda.h>
#include <iostream>
#include <set>
#include "../../cudaconv3/include/cudaconv2.cuh"
#include "../../util/include/matrix.h"
#include "../include/layer_kernels.cuh"
#include "../include/layer.cuh"
#include "../include/data.cuh"
#include "../include/util.cuh"
#include "../include/weights.cuh"
using namespace std;
/*
* =======================
* Layer
* =======================
*/
Layer::Layer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans) :
_convNetThread(convNetThread), _replicaID(replicaID), _trans(trans) {
_name = pyDictGetString(paramsDict, "name");
_type = pyDictGetString(paramsDict, "type");
_foundGradConsumers = false;
_gradConsumer = pyDictGetInt(paramsDict, "gradConsumer");
_actsTarget = pyDictGetInt(paramsDict, "actsTarget");
_actsGradTarget = pyDictGetInt(paramsDict, "actsGradTarget");
_numOutputs = pyDictGetInt(paramsDict, "outputs");
_numReplicas = pyDictGetInt(paramsDict, "numReplicas");
_numReplicasPrev = 1;
_rcvdBInputMsgs = 0;
_actBroadcaster = NULL;
_gradReducer = NULL;
_initialized = false;
}
Layer::~Layer() {
if (_actBroadcaster != NULL) {
_actBroadcaster->stop();
delete _actBroadcaster;
}
if (_gradReducer != NULL) {
_gradReducer->stop();
delete _gradReducer;
}
// For now, gradReducer doesn't have a destructor
// delete _gradReducer;
for (std::map<int, MemoryView*>::iterator it = _memSrcActs.begin(); it != _memSrcActs.end(); ++it) {
if (it->second->getMemorySource().truncate(_name)) {
delete &it->second->getMemorySource();
}
}
for (std::map<int, MemoryView*>::iterator it = _memSrcActsGrad.begin(); it != _memSrcActsGrad.end(); ++it) {
if (it->second->getMemorySource().truncate(_name)) {
delete &it->second->getMemorySource();
}
}
}
cudaStream_t Layer::getStream() {
assert(getDeviceID() >= 0);
return NVMatrix::getDefaultStream(getDeviceID());
}
void Layer::syncStream() {
NVMatrix::syncStream(getStream());
}
void Layer::fpropNext(PASS_TYPE passType, int passIdx) {
if (_next.size() > 0) {
if (getFwdActiveReplicaIdx(passIdx) == 0/*getReplicaIdx()*/) { // 0 turns on pipelining
if (_nextDeviceIDs.size() > 1 || (_nextDeviceIDs.size() == 1 && _nextDeviceIDs[0] != getDeviceID())) {
syncStream(); // Make sure I've finished computing before broadcasting
}
getActBroadcaster().getMessageQueue().enqueue(new BroadcastMessage(getAllActs(), getDeviceID(), getReplicaIdx(), _broadcastFinishQueue));
}
if (getFwdActiveReplicaIdx(passIdx) == getReplicaIdx()) {
_broadcastFinishQueue.dequeue();
assert(_broadcastFinishQueue.getNumElements() == 0);
}
}
for (int i = 0; i < _next.size(); i++) {
_next[i]->getConvNetThread().getMessageQueue().enqueue(new FpropMessage(*_next[i], passType, passIdx));
}
}
bool Layer::fprop(PASS_TYPE passType, int passIdx) {
_rcvdFInputMsgs++;
// I require messages from *all* input replicas because it makes the propagation easier to think about.
// Without this requirement, when all fprop terminal msgs arrive to ConvNet, the forward propagation
// might not actually be finished yet.
if (_rcvdFInputMsgs == getNumExpectedFwdMsgs()) {
// printf("Layer %s[%d] fprop\n", _name.c_str(), getReplicaID());
int ridx = getFwdActiveInputReplicaIdx(passIdx);
assert(getDeviceID() == NVMatrix::getDeviceID());
map<int, NVMatrix*> v;
if (ridx >= 0) {
for (int i = 0; i < getNumLayersPrev(); i++) {
v[i] = &_prev[ridx][i]->getActs(getDeviceID());
}
}
fprop(v, passType, passIdx);
return true;
}
return false;
}
void Layer::fprop(map<int,NVMatrix*>& v, PASS_TYPE passType, int passIdx) {
if (getFwdActiveInputReplicaIdx(passIdx) >= 0) {
assert(v.size() == getNumLayersPrev());
_inputs.clear();
_inputs.insert(v.begin(), v.end());
int numCases = _inputs[0]->getLeadingDim();
for (map<int,MemoryView*>::iterator it = _memSrcActs.begin(); it != _memSrcActs.end(); ++it) {
it->second->getMemory(numCases);
}
if (numCases > 0) {
//printf("layer %s fprop, numcases: %d\n", _name.c_str(), numCases);
_rcvdFInputMsgs = getNumExpectedFwdMsgs();
for (map<int,NVMatrix*>::iterator it = v.begin(); it != v.end(); ++it) {
it->second->transpose(_trans);
}
getActs().transpose(_trans);
fpropCommon(passType);
// First do fprop on the input whose acts matrix I'm sharing, if any
if (_actsTarget >= 0) {
fpropActs(_actsTarget, 0, passType, passIdx);
}
// Then add the rest of the inputs to that
for (int i = 0; i < getNumLayersPrev(); i++) {
if (i != _actsTarget) {
fpropActs(i, _actsTarget >= 0 || i > 0, passType, passIdx);
}
}
}
}
fpropNext(passType, passIdx);
}
void Layer::truncBwdActs() {
// Only truncate actsGrad if I own it
if (_actsGradTarget < 0) {
for (map<int,MemoryView*>::iterator it = _memSrcActsGrad.begin(); it != _memSrcActsGrad.end(); ++it) {
it->second->getMemorySource().truncate(getName());
}
}
if (_actsTarget < 0) {
for (map<int,MemoryView*>::iterator it = _memSrcActs.begin(); it != _memSrcActs.end(); ++it) {
it->second->getMemorySource().truncate(getName());
}
}
}
int Layer::getNumGradProducersNext() {
return _numGradProducersNext;
}
int Layer::getNumExpectedBwdMsgs() {
return _numGradProducersNext * getNumSiblingReplicas();
}
int Layer::getNumExpectedFwdMsgs() {
return getNumLayersPrev() * getNumInputReplicas();
}
void Layer::bprop(PASS_TYPE passType, int passIdx) {
if (getBwdActiveInputReplicaIdx(passIdx) >= 0 && _rcvdBInputMsgs == getNumExpectedBwdMsgs()) {
// printf("Layer %s[%d] bprop\n", _name.c_str(), getReplicaID());
if (_gradReducer != NULL) {
_gradReducer->waitForFinish();
}
// This does sync, but only if it has grad consumers below! so we must sync again before sending bprop terminal messages
bprop(getActsGrad(), passType, passIdx);
if (_bwdTerminal[passIdx]) {
syncStream();
getConvNet().getMessageQueue().enqueue(new Message(BPROP_TERMINAL));
}
}
}
void Layer::bpropActsCall(NVMatrix& v, PASS_TYPE passType, int replicaIdx, int inputIdx) {
Layer& prev = *_prev[replicaIdx][inputIdx];
if (prev.isGradConsumer() && isGradProducer(prev.getName())) {
if (v.getLeadingDim() > 0) { // Only do computation if #cases > 0
bpropActs(v, replicaIdx, inputIdx, prev.getNumComputedActsGrads(getDeviceID()) > 0, passType);
}
prev.getNumComputedActsGrads(getDeviceID())++;
// Synchronize if the previous layer is going to actually do a reduction.
// If the previous layer is on the same GPU as us and has no next layers
// on other GPUs then it won't need to do a reduction.
if (prev.getNextDeviceIDs().size() > 1 || (prev.getNextDeviceIDs().size() == 1 && getDeviceID() != prev.getDeviceID())) {
syncStream();
}
prev.getGradReducer().enqueueReduction(getDeviceID());
}
}
void Layer::bprop(NVMatrix& v, PASS_TYPE passType, int passIdx) {
v.transpose(_trans);
assert(getDeviceID() == NVMatrix::getDeviceID());
int ridx = getBwdActiveInputReplicaIdx(passIdx);
LayerV& prev = _prev[ridx];
map<int, set<Layer*> > prevByDevice = _prevByDevice[ridx];
for (int i = 0; i < prev.size(); i++) {
_inputs[i]->transpose(_trans);
prev[i]->getActsGrad().transpose(_trans);
}
getActs().transpose(_trans);
// NOTE: this should be here (before the bpropActs) because if you have a layer
// that has a weight matrix AND actsGradTarget >= 0, then the stuff below will overwrite
// v which is used in bpropCommon. So bpropCommon must come first.
bpropCommon(v, ridx, passType);
if (isGradProducer()) {
// First propagate activity gradient to all layers whose activity
// gradient matrix I'm definitely not sharing.
for (map<int, set<Layer*> >::const_iterator it = prevByDevice.begin(); it != prevByDevice.end(); ++it) {
const set<Layer*>& deviceLayers = it->second;
for (set<Layer*>::const_iterator it2 = deviceLayers.begin(); it2 != deviceLayers.end(); ++it2) {
if (_actsGradTarget != (*it2)->getInputIdx(_name)) {
bpropActsCall(v, passType, ridx, (*it2)->getInputIdx(_name));
}
}
}
// Then propagate activity gradient to the layer whose activity gradient
// matrix I'm sharing, if any.
if (_actsGradTarget >= 0) {
bpropActsCall(v, passType, ridx, _actsGradTarget);
}
}
// Synchronization is necessary because the kernel calls that compute my backward acts
// execute asynchronously. Therefore I don't want to tell other threads that I've
// computed bprop activities for them when in fact I've only called a function which
// will eventually compute them.
if (_prevDeviceIDs.size() > 1 || (_prevDeviceIDs.size() == 1 && _prevDeviceIDs[0] != getDeviceID())) {
syncStream();
}
if (getConvNet().isConserveMemory()) {
truncBwdActs();
}
if (isGradProducer()) {
/*for (int i = 0; i < prev.size(); i++) {
if (prev[i]->isGradConsumer() && isGradProducer(prev[i]->getName())) {
prev[i]->getGradReducer().enqueueReduction(getDeviceID());
}
}*/
// Send backward messages to *all* replicas.
// Note that the messages will be dismissed unless the passIdx indicates
// that the previous layer should do some work.
for (int r = 0; r < getNumInputReplicas(); r++) {
for (int i = 0; i < _prev[r].size(); i++) {
if (_prev[r][i]->isGradConsumer() && isGradProducer(_prev[r][i]->getName())) {
_prev[r][i]->getConvNetThread().getMessageQueue().enqueue(new BpropMessage(*_prev[r][i], passType, passIdx));
}
}
}
}
}
IActGradReducer& Layer::getGradReducer() {
return *_gradReducer;
}
// This is called between minibatches
void Layer::reset() {
_rcvdFInputMsgs = 0;
_rcvdBInputMsgs = 0;
for (map<int,int>::iterator it = _numComputedActsGrads.begin(); it != _numComputedActsGrads.end(); ++it) {
it->second = 0;
}
}
// This is called between microbatches
void Layer::resetPassIdx() {
_rcvdFInputMsgs = 0;
if (_rcvdBInputMsgs >= getNumExpectedBwdMsgs()) {
reset();
}
}
/*
* Returns number of cases in given matrix.
*/
int Layer::getNumCases(NVMatrix& v) {
return v.getLeadingDim();
}
int Layer::incRcvdBInputMsgs() {
return ++_rcvdBInputMsgs;
}
std::string& Layer::getName() {
return _name;
}
std::string& Layer::getType() {
return _type;
}
int& Layer::getNumComputedActsGrads(int deviceID) {
return _numComputedActsGrads[deviceID];
}
void Layer::addNext(Layer& l) {
_next.push_back(&l);
_numReplicasNext = l.getNumReplicas();
if (count(_nextDeviceIDs.begin(), _nextDeviceIDs.end(), l.getDeviceID()) == 0) {
int pos = rand() % (_nextDeviceIDs.size() + 1);
_nextDeviceIDs.insert(_nextDeviceIDs.begin() + pos, l.getDeviceID());
}
}
void Layer::addPrev(Layer& l, int replicaIdx) {
_prev[replicaIdx].push_back(&l);
_numReplicasPrev = l.getNumReplicas();
l.setInputIdx(getName(), _prev[replicaIdx].size() - 1);
if (l.getDeviceID() >= 0 && count(_prevDeviceIDs.begin(), _prevDeviceIDs.end(), l.getDeviceID()) == 0) {
int pos = rand() % (_prevDeviceIDs.size() + 1);
_prevDeviceIDs.insert(_prevDeviceIDs.begin() + pos, l.getDeviceID());
}
}
void Layer::addReplica(Layer& l) {
assert(_replicas.count(l.getReplicaID()) == 0);
_replicas[l.getReplicaID()] = &l;
}
bool Layer::hasGradProducerNext(std::string& layerName) {
bool b = _next.size() == 0;
for (int i = 0; i < _next.size(); i++) {
b |= _next[i]->hasGradProducerNext(_name);
}
return b && isGradProducer(layerName);
}
bool Layer::postInit() {
// We choose not to populate _outputs[getDeviceID()] here because we do it instead in fprop().
// In fprop(), we can populate it from the _inputs vector, which is a bit more general than populating
// it from _prev->getActs()
// _outputs = _actsTarget < 0 ? new NVMatrix() : &_prev[_actsTarget]->getActs();
if (!_initialized) {
_initialized = true;
map<int,int> numGradProducersNext;
_numGradProducersNext = 0;
for (int r = 0; r < getNumInputReplicas(); ++r) {
for (vector<Layer*>::const_iterator it = _prev[r].begin(); it != _prev[r].end(); ++it) {
(*it)->postInit();
}
}
_memSrcActs[getDeviceID()] = _actsTarget < 0 ? &MemorySource::make(_numOutputs, getDeviceID(), getName())
: &_prev[0][_actsTarget]->getMemorySourceActs(getDeviceID()).clone(_name);
// _actsGradTarget will only be >= 0 when the number of replicas is the same in both layers, so this justifies the use of _prev[0]
_memSrcActsGrad[getDeviceID()] = _actsGradTarget < 0 ? &MemorySource::make(_numOutputs, getDeviceID(), getName())
: &_prev[0][_actsGradTarget]->getMemorySourceActsGrad(getDeviceID()).clone(_name);
for (int i = 0; i < _next.size(); ++i) {
int d = _next[i]->getDeviceID();
_numComputedActsGrads[d] = 0;
if (_next[i]->hasGradProducerNext(_name)) {
if (numGradProducersNext.count(d) == 0) {
numGradProducersNext[d] = 0;
}
numGradProducersNext[d]++;
_numGradProducersNext++;
if (_memSrcActsGrad.count(d) == 0) {
_memSrcActsGrad[d] = &MemorySource::make(_numOutputs, d, getName());
}
}
if (_memSrcActs.count(d) == 0) {
_memSrcActs[d] = &MemorySource::make(_numOutputs, d, getName());
}
}
if (_next.size() == 0) {
_numReplicasNext = getNumReplicas();
}
/*
* Initialize forward broadcaster. First sibling owns it.
*/
if (getReplicaIdx() == 0 && _convNetThread != NULL) {
_actBroadcaster = new ActBroadcaster(getNumSiblingReplicas(), getDeviceCPUs(_convNetThread->getDeviceID()));
_actBroadcaster->start();
}
/*
* Initialize backward reducer.
*/
if (isGradConsumer() && _numGradProducersNext > 0) {
_gradReducer = &IActGradReducer::makeGradReducer(*this, numGradProducersNext);
_gradReducer->start();
}
/*
* Initialize specially sorted previous array
*/
for (int r = 0; r < _prev.size(); ++r) {
for (int i = 0; i < _prev[r].size(); ++i) {
// Previous devices in reverse order of processing by (sequential) GradReducer
_prevByDevice[r][getDeviceID() - _prev[r][i]->getDeviceID()
+ 16 * (_prev[r][i]->getDeviceID() > getDeviceID())].insert(_prev[r][i]);
}
}
return true;
}
return false;
}
ActBroadcaster& Layer::getActBroadcaster() {
return getReplicaIdx() == 0 ? *_actBroadcaster : _replicas[getReplicaID() - getReplicaIdx()]->getActBroadcaster();
}
// Does this layer, or some layer below it, need the gradient
// for parameter updates?
// Only weight layers should be grad consumers themselves.
bool Layer::isGradConsumer() {
if (!_foundGradConsumers && _prev.size() > 0) {
for (int i = 0; i < _prev[0].size(); i++) {
_gradConsumer |= _prev[0][i]->isGradConsumer();
}
_foundGradConsumers = true;
}
return _gradConsumer;
}
// Does this layer produce gradient for layers below?
bool Layer::isGradProducer() {
return true;
}
bool Layer::isGradProducer(std::string& layerName) {
return isGradProducer();
}
map<int,vector<Layer*> >& Layer::getPrev() {
return _prev;
}
vector<Layer*>& Layer::getNext() {
return _next;
}
NVMatrix& Layer::getActs() {
return getActs(getDeviceID());
}
NVMatrix& Layer::getActs(int deviceID) {
assert(_memSrcActs.count(deviceID) > 0);
return _memSrcActs[deviceID]->getMemory();
}
NVMatrix& Layer::getActs(int deviceID, int numCases) {
assert(_memSrcActs.count(deviceID) > 0);
return _memSrcActs[deviceID]->getMemory(numCases);
}
NVMatrix& Layer::getActsGrad(int deviceID) {
assert(_memSrcActsGrad.count(deviceID) > 0);
return _memSrcActsGrad[deviceID]->getMemory(getActs(deviceID).getLeadingDim());
}
NVMatrix& Layer::getActsGrad() {
return getActsGrad(NVMatrix::getDeviceID());
}
map<int, NVMatrix*> Layer::getAllActs() {
map<int, NVMatrix*> m;
for (map<int, MemoryView*>::const_iterator it = _memSrcActs.begin(); it != _memSrcActs.end(); ++it) {
m[it->first] = &it->second->getMemory();
}
return m;
}
map<int, NVMatrix*> Layer::getAllActsGrads() {
map<int, NVMatrix*> m;
for (map<int, MemoryView*>::const_iterator it = _memSrcActsGrad.begin(); it != _memSrcActsGrad.end(); ++it) {
m[it->first] = &it->second->getMemory();
}
return m;
}
int Layer::getDeviceID() {
return _convNetThread == NULL ? -1 : _convNetThread->getDeviceID();
}
ConvNetThread& Layer::getConvNetThread() {
assert(_convNetThread != NULL);
return *_convNetThread;
}
ConvNet& Layer::getConvNet() {
return getConvNetThread().getConvNet();
}
void Layer::setBwdTerminal(int passIdx) {
_bwdTerminal[passIdx] = true;
}
int Layer::getReplicaID() {
return _replicaID;
}
int Layer::getActivePassPeriod() {
return getNumReplicas() / getConvNet().getNumReplicasMin();
}
int Layer::getFwdActiveInputReplicaIdx(int passIdx) {
const int edge = (passIdx / getActivePassPeriod()) % getNumInputReplicas();
return passIdx % getActivePassPeriod() == 0 ? edge : -1;
}
int Layer::getBwdActiveInputReplicaIdx(int passIdx) {
const int edge = (passIdx / getActivePassPeriod()) % getNumInputReplicas();
return (passIdx + 1) % getActivePassPeriod() == 0 ? edge : -1;
}
int Layer::getFwdActiveReplicaIdx(int passIdx) {
assert(_next.size() > 0);
return _next[0]->getFwdActiveInputReplicaIdx(passIdx);
}
int Layer::getNumReplicas() {
return _replicas.size();
}
int Layer::getNumSiblingReplicas() {
return getNumReplicas() / getNumReplicasNext();
}
int Layer::getNumReplicasPrev() {
return _numReplicasPrev;
}
int Layer::getNumReplicasNext() {
return _numReplicasNext;
}
int Layer::getNumInputReplicas() {
return _numReplicasPrev / getNumReplicas();
}
int Layer::getReplicaIdx() {
return getReplicaID() % getNumSiblingReplicas();
}
int Layer::getNumLayersPrev() {
return _prev.size() > 0 ? _prev[0].size() : 0;
}
int Layer::getTaskId(){
return -1;
}
void Layer::setMemorySourceActs(int deviceID, MemoryView& mem) {
assert(_memSrcActs[deviceID]->isParent());
delete _memSrcActs[deviceID];
_memSrcActs[deviceID] = &mem;
if (_actsTarget >= 0 && deviceID == getDeviceID()) {
assert(getNumInputReplicas() == 1);
_prev[0][_actsTarget]->setMemorySourceActs(deviceID, mem.clone(_prev[0][_actsTarget]->getName()));
}
}
void Layer::setMemorySourceActsGrad(int deviceID, MemoryView& mem) {
assert(_memSrcActsGrad[deviceID]->isParent());
delete _memSrcActsGrad[deviceID];
_memSrcActsGrad[deviceID] = &mem;
if (_actsGradTarget >= 0 && deviceID == getDeviceID()) {
assert(getNumInputReplicas() == 1);
_prev[0][_actsGradTarget]->setMemorySourceActsGrad(deviceID, mem.clone(_prev[0][_actsGradTarget]->getName()));
}
}
MemoryView& Layer::getMemorySourceActs(int deviceID) {
return *_memSrcActs[deviceID];
}
MemoryView& Layer::getMemorySourceActsGrad(int deviceID) {
return *_memSrcActsGrad[deviceID];
}
int Layer::getNumOutputs() {
return _numOutputs;
}
void Layer::setInputIdx(std::string& parentName, int idx) {
_inputIndices[parentName] = idx;
}
int Layer::getInputIdx(std::string& parentName) {
return _inputIndices[parentName];
}
/*
* =======================
* NeuronLayer
* =======================
*/
NeuronLayer::NeuronLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: Layer(convNetThread, paramsDict, replicaID, true) {
PyObject* neuronDict = PyDict_GetItemString(paramsDict, "neuron");
_neuronType = pyDictGetString(neuronDict, "type");
_neuron = &Neuron::makeNeuron(neuronDict);
}
NeuronLayer::~NeuronLayer() {
delete _neuron;
}
void NeuronLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 0);
if (!bpropSpecial(v, replicaIdx, inpIdx, scaleTargets, passType)) {
_neuron->computeInputGrad(v, _prev[replicaIdx][0]->getActsGrad(), scaleTargets > 0);
}
}
bool NeuronLayer::bpropSpecial(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
// Special optimization for cross-entropy objective with logistic units.
// Better to just compute the input gradient in one go to avoid division by small numbers.
bool doCrossEntGrad = _neuronType == "logistic" && _next.size() == 1
&& (_next[0]->getType() == "cost.bce" || _next[0]->getType() == "cost.dce")
&& _next[0]->getDeviceID() == getDeviceID()
&& _next[0]->getNumReplicas() == getNumReplicas();
LayerV& prev = _prev[replicaIdx];
if (doCrossEntGrad) {
NVMatrix& labels = _next[0]->getPrev()[replicaIdx][0]->getActs(getDeviceID());
BinomialCrossEntropyCostLayer& cost = *static_cast<BinomialCrossEntropyCostLayer*>(_next[0]);
float gradCoeff = cost.getCoeff();
labels.transpose(_trans);
if (cost.getPosWeight() == 1) {
if (scaleTargets == 0) {
getActs().add(labels, -gradCoeff, gradCoeff, prev[0]->getActsGrad());
} else {
getActs().applyTernary(AddGradientBinaryOperator<NVMatrixBinaryOps::WeightedAdd>(NVMatrixBinaryOps::WeightedAdd(-gradCoeff, gradCoeff)),
labels, prev[0]->getActsGrad(), prev[0]->getActsGrad());
}
} else {
if (scaleTargets == 0) {
getActs().applyBinary(CrossEntLogisticGradientOperator(gradCoeff, cost.getPosWeight()), labels, prev[0]->getActsGrad());
} else {
getActs().applyTernary(AddGradientBinaryOperator<CrossEntLogisticGradientOperator>(CrossEntLogisticGradientOperator(gradCoeff, cost.getPosWeight())),
labels, prev[0]->getActsGrad(), prev[0]->getActsGrad());
}
}
}
return doCrossEntGrad;
}
void NeuronLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
_neuron->activate(*_inputs[0], getActs());
}
std::string& NeuronLayer::getNeuronType() {
return _neuronType;
}
/*
* =======================
* WeightLayer
* =======================
*
* The useGrad parameter here merely expresses a preference by the subclass. It may
* be overridden by the superclass (WeightLayer) and in that case the subclass must follow its wishes.
* So when computing gradient updates, the subclass must always first check weights.isUseGrad().
*
* Note: biases always useGrad.
*/
WeightLayer::WeightLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans, bool useGrad) :
Layer(convNetThread, paramsDict, replicaID, trans) {
_weightUpdatePassPeriod = pyDictGetInt(paramsDict, "updatePeriod");
MatrixV& hWeights = *pyDictGetMatrixV(paramsDict, "weights");
MatrixV& hWeightsInc = *pyDictGetMatrixV(paramsDict, "weightsInc");
Matrix& hBiases = *pyDictGetMatrix(paramsDict, "biases");
Matrix& hBiasesInc = *pyDictGetMatrix(paramsDict, "biasesInc");
PyObject* pyEpsWList = PyDict_GetItemString(paramsDict, "epsW");
PyObject* pyEpsB = PyDict_GetItemString(paramsDict, "epsB");
floatv& momW = *pyDictGetFloatV(paramsDict, "momW");
float momB = pyDictGetFloat(paramsDict, "momB");
floatv& wc = *pyDictGetFloatV(paramsDict, "wc");
floatv& wball = *pyDictGetFloatV(paramsDict, "wballNormed");
/*
* When there are multiple replicas, the present implementation
* requires that useGrad is true. This is because weights.update()
* performs a simultaneous write to both replicas' weightsInc matrix,
* which means that the read should come from somewhere else (i.e. a
* grads matrix).
*/
useGrad |= _numReplicas > 1;
// Source layers for shared weights
stringv& weightSourceLayers = *pyDictGetStringV(paramsDict, "weightSourceLayers");
// Weight matrix indices (inside the above source layers) for shared weights
intv& weightSourceMatrixIndices = *pyDictGetIntV(paramsDict, "weightSourceMatrixIndices");
_weights = new WeightList();
for (int i = 0; i < weightSourceLayers.size(); i++) {
std::string& srcLayerName = weightSourceLayers[i];
int matrixIdx = weightSourceMatrixIndices[i];
PyObject* pyEpsW = PyList_GetItem(pyEpsWList, i);
ParameterSchedule& lrs = ParameterSchedule::make(pyEpsW); // Learning rate schedule
if (srcLayerName == _name) { // Current layer
_weights->addWeights(*new Weights(_weights->at(matrixIdx), lrs, *this));
} else if (srcLayerName != "") {
WeightLayer& srcLayer = *static_cast<WeightLayer*>(&convNetThread->getLayer(srcLayerName));
Weights* srcWeights = &srcLayer.getWeights(matrixIdx);
_weights->addWeights(*new Weights(*srcWeights, lrs, *this));
} else {
_weights->addWeights(*new Weights(*hWeights[i], *hWeightsInc[i], lrs, *this, wc[i], wball[i], momW[i], useGrad));
}
}
_biases = new Weights(hBiases, hBiasesInc, ParameterSchedule::make(pyEpsB), *this, 0, 0, momB, true);
delete &weightSourceLayers;
delete &weightSourceMatrixIndices;
delete &hWeights;
delete &hWeightsInc;
delete &momW;
delete &wc;
delete &wball;
_wStep = 0.02;
_bStep = 0.005;
}
WeightLayer::~WeightLayer() {
delete _weights;
delete _biases;
}
bool WeightLayer::postInit() {
if (Layer::postInit()) {
_weightUpdatePassPeriod = max(_weightUpdatePassPeriod, getActivePassPeriod());
assert(_weightUpdatePassPeriod % getActivePassPeriod() == 0);
return true;
}
return false;
}
void WeightLayer::fpropCommon(PASS_TYPE passType) {
}
void WeightLayer::bpropCommon(NVMatrix& v, int replicaIdx, PASS_TYPE passType) {
if (_biases->getLearningRateSchedule().getBaseValue() > 0) {
if (v.getNumElements() > 0) {
bpropBiases(v, passType);
} else {
_biases->getGrad().resize(_biases->getW());
_biases->getGrad().scale(getBIncScale());
}
_biases->incNumUpdates();
}
for (int i = 0; i < _weights->getSize(); i++) {
if (_weights->at(i).getLearningRateSchedule().getBaseValue() > 0) {
if (v.getNumElements() > 0) {
bpropWeights(v, replicaIdx, i, passType);
} else {
_weights->at(i).getGrad().resize(_weights->at(i).getW());
// This will cause it to forget momentum when shown 0 training cases
// and _useGrad = false but it's not too important.
_weights->at(i).getGrad().scale(getIncScale(i, passType));
}
// Increment its number of updates
_weights->at(i).incNumUpdates();
}
}
}
bool WeightLayer::updateWeights() {
if (getConvNet().getTotalPassesDone() % _weightUpdatePassPeriod == 0) {
_weights->update(getConvNet().getTrainingProgress());
_biases->update(getConvNet().getTrainingProgress());
// constrainWeights();
return true;
}
return false;
}
bool WeightLayer::constrainWeights() {
if (getConvNet().getTotalPassesDone() % _weightUpdatePassPeriod == 0) {
_constrainWeights();
return true;
}
return false;
}
void WeightLayer::_constrainWeights() {
}
void WeightLayer::copyToCPU() {
_weights->copyToCPU();
_biases->copyToCPU();
}
void WeightLayer::copyToGPU() {
_weights->copyToGPU();
_biases->copyToGPU();
}
void WeightLayer::checkGradient() {
for (int i = 0; i < _weights->getSize(); i++) {
getConvNet().checkGradient(_name + " weights[" + tostr(i) + "]", _wStep, _weights->at(i));
}
getConvNet().checkGradient(_name + " biases", _bStep, *_biases);
}
void WeightLayer::addReplica(Layer& l) {
Layer::addReplica(l);
_weights->addReplica(*static_cast<WeightLayer*>(&l)->_weights);
_biases->addReplica(*static_cast<WeightLayer*>(&l)->_biases);
}
Weights& WeightLayer::getWeights(int idx) {
return _weights->at(idx);
}
float WeightLayer::getGradScale(int inpIdx, PASS_TYPE passType) {
// weight update period must be multiple of activation period
// TODO: simply accumulate # of cases seen between weight updates. simpler and more accurate.
double numCases = _weightUpdatePassPeriod * (getConvNet().getMinibatchSize() / double(getConvNet().getNumPasses()));
if (_weights->at(inpIdx).isUseGrad()) {
return passType == PASS_GC ? 1.0f : 1.0f / numCases;
}
return passType == PASS_GC ? 1.0f : _weights->at(inpIdx).getEps(getConvNet().getTrainingProgress()) / numCases;
}
float WeightLayer::getIncScale(int inpIdx, PASS_TYPE passType) {
if (_weights->at(inpIdx).isUseGrad()) {
return _weights->at(inpIdx).getNumUpdates() > 0;
}
return (passType == PASS_GC ? _weights->at(inpIdx).getNumUpdates() > 0
: (_weights->at(inpIdx).getNumUpdates() == 0 ? _weights->at(inpIdx).getMom() : 1.0f));
}
NVMatrix& WeightLayer::getGradTarget(int inpIdx) {
return _weights->at(inpIdx).getGrad();
}
float WeightLayer::getBGradScale(PASS_TYPE passType) {
int numCases = _weightUpdatePassPeriod * DIVUP(getConvNet().getMinibatchSize(), getConvNet().getNumPasses());
return passType == PASS_GC ? 1.0f : 1.0f / numCases;
}
float WeightLayer::getBIncScale() {
return _biases->getNumUpdates() > 0;
}
NVMatrix& WeightLayer::getWeightMatrix(PASS_TYPE passType, int inpIdx) {
return _weights->at(inpIdx).getW();
}
NVMatrix& WeightLayer::getBiasMatrix(PASS_TYPE passType) {
return _biases->getW();
}
/*
* =======================
* FCLayer
* =======================
*/
FCLayer::FCLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool useGrad)
: WeightLayer(convNetThread, paramsDict, replicaID, true, useGrad) {
_wStep = 0.01;
_bStep = 0.01;
}
void FCLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
getActs().addProduct(*_inputs[inpIdx], getWeightMatrix(passType, inpIdx), scaleTargets, 1);
if (scaleTargets == 0) {
getActs().addVector(getBiasMatrix(passType), 1, getActs());
}
}
void FCLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& weights_T = getWeightMatrix(passType, inpIdx).getTranspose();
_prev[replicaIdx][inpIdx]->getActsGrad().addProduct(v, weights_T, scaleTargets, 1);
delete &weights_T;
}
void FCLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) {
_biases->getGrad().addSum(v, 0, getBIncScale(), getBGradScale(passType));
}
void FCLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) {
NVMatrix& prevActs_T = _inputs[inpIdx]->getTranspose();
float scaleGrad = getGradScale(inpIdx, passType);
float scaleInc = getIncScale(inpIdx, passType);
getGradTarget(inpIdx).addProduct(prevActs_T, v, scaleInc, scaleGrad);
delete &prevActs_T;
}
void FCLayer::_constrainWeights() {
for (int i = 0; i < _weights->getSize(); i++) {
if (_weights->at(i).getWBall() > 0 && _weights->at(i).isOwner() && _weights->at(i).getLearningRateSchedule().getBaseValue() > 0) {
// NVMatrix norm2; // Unfortunate extra weight matrix...
_weights->at(i).getW().sumOfSquares(0, _norm2);
// norm2.apply(MaxWeightConstraintOperator(_weights->at(i).getWBall()));
_norm2.apply(HardWeightConstraintOperator(_weights->at(i).getWBall()));
_weights->at(i).getW().eltwiseMultByVector(_norm2);
}
}
}
/*
* =======================
* SplitFCLayer
* =======================
*/
SplitFCLayer::SplitFCLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool useGrad)
: FCLayer(convNetThread, paramsDict, replicaID, useGrad) {
_numParts = pyDictGetInt(paramsDict, "parts");
}
void SplitFCLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
getActs().resize(_inputs[inpIdx]->getNumRows(), _numOutputs, true);
NVMatrixV& splitInput = _inputs[inpIdx]->splitCols(_numParts);
NVMatrixV& splitWeights = getWeightMatrix(passType, inpIdx).splitRows(_numParts);
NVMatrixV& splitTarget = getActs().splitCols(_numParts);
NVMatrix::batchedMatrixMultiply(splitInput, splitWeights, splitTarget, scaleTargets, 1);
if (scaleTargets == 0) {
getActs().addVector(getBiasMatrix(passType), 1, getActs());
}
deleteElements(splitInput, true);
deleteElements(splitWeights, true);
deleteElements(splitTarget, true);
}
void SplitFCLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& weights_T = getWeightMatrix(passType, inpIdx).getTranspose();
_prev[replicaIdx][inpIdx]->getActsGrad().resize(*_inputs[inpIdx]);
NVMatrixV& splitV = v.splitCols(_numParts);
NVMatrixV& splitWeights_T = weights_T.splitCols(_numParts);
NVMatrixV& splitTarget = _prev[replicaIdx][inpIdx]->getActsGrad().splitCols(_numParts);
NVMatrix::batchedMatrixMultiply(splitV, splitWeights_T, splitTarget, scaleTargets, 1);
delete &weights_T;
deleteElements(splitV, true);
deleteElements(splitWeights_T, true);
deleteElements(splitTarget, true);
}
void SplitFCLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) {
NVMatrix& prevActs_T = _inputs[inpIdx]->getTranspose();
NVMatrixV& splitPrevActs_T = prevActs_T.splitRows(_numParts);
NVMatrixV& splitV = v.splitCols(_numParts);
NVMatrixV& splitGradTarget = getGradTarget(inpIdx).splitRows(_numParts);
NVMatrix::batchedMatrixMultiply(splitPrevActs_T, splitV, splitGradTarget, getIncScale(inpIdx, passType), getGradScale(inpIdx, passType));
delete &prevActs_T;
deleteElements(splitPrevActs_T, true);
deleteElements(splitV, true);
deleteElements(splitGradTarget, true);
}
/*
* =======================
* TwoDLayerInterface
* =======================
*/
TwoDLayerInterface::TwoDLayerInterface(PyObject* paramsDict) {
_channels = pyDictGetInt(paramsDict, "channels");
_imgSize = pyDictGetInt(paramsDict, "imgSize");
_imgPixels = _imgSize * _imgSize;
}
/*
* =======================
* LocalLayer
* =======================
*/
LocalLayer::LocalLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool useGrad)
: WeightLayer(convNetThread, paramsDict, replicaID, false, useGrad) {
_padding = pyDictGetIntV(paramsDict, "padding");
_stride = pyDictGetIntV(paramsDict, "stride");
_filterSize = pyDictGetIntV(paramsDict, "filterSize");
_channels = pyDictGetIntV(paramsDict, "channels");
_imgSize = pyDictGetIntV(paramsDict, "imgSize");
_numFilters = pyDictGetInt(paramsDict, "filters");
_groups = pyDictGetIntV(paramsDict, "groups");
_filterChannels = pyDictGetIntV(paramsDict, "filterChannels");
_filterPixels = pyDictGetIntV(paramsDict, "filterPixels");
_imgPixels = pyDictGetIntV(paramsDict, "imgPixels");
_modulesX = pyDictGetInt(paramsDict, "modulesX");
_modules = pyDictGetInt(paramsDict, "modules");
}
LocalLayer::~LocalLayer() {
delete _padding;
delete _stride;
delete _filterSize;
delete _channels;
delete _imgSize;
delete _groups;
delete _filterChannels;
delete _filterPixels;
delete _imgPixels;
}
/*
* =======================
* ConvLayer
* =======================
*/
ConvLayer::ConvLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: LocalLayer(convNetThread, paramsDict, replicaID, true) {
_sumWidth = pyDictGetInt(paramsDict, "sumWidth");
_sharedBiases = pyDictGetInt(paramsDict, "sharedBiases");
_weightContrastNormMin = pyDictGetFloatV(paramsDict, "wcNormMin");
_weightContrastNormMax = pyDictGetFloatV(paramsDict, "wcNormMax");
}
ConvLayer::~ConvLayer() {
delete _weightContrastNormMin;
delete _weightContrastNormMax;
}
void ConvLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convFilterActs(*_inputs[inpIdx], getWeightMatrix(passType, inpIdx), getActs(), _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
if (scaleTargets == 0) {
if (_sharedBiases) {
getActs().reshape(_numFilters, getActs().getNumElements() / _numFilters);
getActs().addVector(getBiasMatrix(passType));
getActs().reshape(_numFilters * _modules, getActs().getNumElements() / (_numFilters * _modules));
} else {
getActs().addVector(getBiasMatrix(passType));
}
}
}
void ConvLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) {
float scaleBGrad = getBGradScale(passType);
float scaleInc = getBIncScale();
if (_sharedBiases) {
v.reshape(_numFilters, v.getNumElements() / _numFilters);
_biases->getGrad().addSum(v, 1, scaleInc, scaleBGrad);
v.reshape(_numFilters * _modules, v.getNumElements() / (_numFilters * _modules));
} else {
_biases->getGrad().addSum(v, 1, scaleInc, scaleBGrad);
}
}
void ConvLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) {
assert(_weights->at(inpIdx).isUseGrad());
bool doPartialSum = _sumWidth < _modulesX;
NVMatrix& tgt = doPartialSum ? _weightGradTmp : _weights->at(inpIdx).getGrad();
float scaleWGrad = getGradScale(inpIdx, passType);
float scaleTargets = getIncScale(inpIdx, passType) * !doPartialSum;
convWeightActs(*_inputs[inpIdx], v, tgt, _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), _sumWidth, scaleTargets, scaleWGrad);
if (doPartialSum) {
scaleTargets = _weights->at(inpIdx).getNumUpdates() > 0;
int outWidth = DIVUP(_modulesX, _sumWidth);
_weightGradTmp.reshape(outWidth*outWidth, _filterChannels->at(inpIdx) * _filterPixels->at(inpIdx) * _numFilters);
_weights->at(inpIdx).getGrad().addSum(_weightGradTmp, 0, scaleTargets, 1);
_weights->at(inpIdx).getGrad().reshape(_filterChannels->at(inpIdx) * _filterPixels->at(inpIdx), _numFilters);
}
}
void ConvLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convImgActs(v, getWeightMatrix(passType, inpIdx), _prev[replicaIdx][inpIdx]->getActsGrad(), _imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX,
_padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
}
void ConvLayer::truncBwdActs() {
LocalLayer::truncBwdActs();
_weightGradTmp.truncate();
}
void ConvLayer::_constrainWeights() {
for (int i = 0; i < _weights->getSize(); i++) {
if (_weightContrastNormMax->at(i) > 0 && _weights->at(i).isOwner() && _weights->at(i).getLearningRateSchedule().getBaseValue() > 0) {
float fz = _weights->at(i).getW().getNumRows();
NVMatrix tmp;
_weights->at(i).getW().sum(0, tmp);
_weights->at(i).getW().addVector(tmp, -1.0f / fz, _weights->at(i).getGrad());
// Now _weights->at(i).getGrad() contains zero-mean filters
_weights->at(i).getGrad().apply(NVMatrixOps::Square());
_weights->at(i).getGrad().sum(0, tmp);
tmp.apply(WeightContrastNormOperator(_weightContrastNormMin->at(i), _weightContrastNormMax->at(i), 1.0f / fz));
// Now tmp has the stdev
_weights->at(i).getW().eltwiseMultByVector(tmp);
}
// It's pretty silly to do both these things but whatever
if (_weights->at(i).getWBall() > 0 && _weights->at(i).isOwner() && _weights->at(i).getLearningRateSchedule().getBaseValue() > 0) {
// NVMatrix norm2;
_weights->at(i).getW().sumOfSquares(0, _norm2);
// norm.apply(MaxWeightConstraintOperator(_weights->at(i).getWBall()));
_norm2.apply(HardWeightConstraintOperator(_weights->at(i).getWBall()));
_weights->at(i).getW().eltwiseMultByVector(_norm2);
}
}
}
/*
* =======================
* LocalUnsharedLayer
* =======================
*/
LocalUnsharedLayer::LocalUnsharedLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: LocalLayer(convNetThread, paramsDict, replicaID, false) {
}
void LocalUnsharedLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
localFilterActs(*_inputs[inpIdx], getWeightMatrix(passType, inpIdx), getActs(), _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
if (scaleTargets == 0) {
getActs().addVector(getBiasMatrix(passType));
}
}
void LocalUnsharedLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) {
_biases->getGrad().addSum(v, 1, getBIncScale(), getBGradScale(passType));
}
void LocalUnsharedLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) {
float scaleWGrad = getGradScale(inpIdx, passType);
float scaleInc = getIncScale(inpIdx, passType);
localWeightActs(*_inputs[inpIdx], v, getGradTarget(inpIdx), _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleInc, scaleWGrad);
}
void LocalUnsharedLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
localImgActs(v, getWeightMatrix(passType, inpIdx), _prev[replicaIdx][inpIdx]->getActsGrad(),_imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX,
_padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
}
void LocalUnsharedLayer::_constrainWeights() {
for (int i = 0; i < _weights->getSize(); i++) {
if (_weights->at(i).getWBall() > 0 && _weights->at(i).isOwner() && _weights->at(i).getLearningRateSchedule().getBaseValue() > 0) {
normalizeLocalWeights(*_weights->at(i), _modules, _weights->at(i).getWBall());
}
}
}
/*
* =======================
* AggSoftmaxLayer
* =======================
*/
AggSoftmaxLayer::AggSoftmaxLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: Layer(convNetThread, paramsDict, replicaID, true), _doUpperGrad(false) {
_hAgg = pyDictGetMatrix(paramsDict, "agg");
}
void AggSoftmaxLayer::copyToGPU(){
_Agg.copyFromHost(*_hAgg, true);
}
void AggSoftmaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
//BP designed for averaging multiple predictions
//not suitable for selection
NVMatrix& input = *_inputs[0];
NVMatrix AggInput;
//
//AggInput.max(1, _max);
//input.addVector(_max, -1, getActs());
//getActs().apply(NVMatrixOps::Exp());
//getActs().sum(1, _sum);
//getActs().eltwiseDivideByVector(_sum);
input.max(1, _max);
input.addVector(_max, -1, getActs());
getActs().apply(NVMatrixOps::Exp());
getActs().sum(1, _sum);
getActs().eltwiseDivideByVector(_sum);
_Pl = getActs();
NVMatrix destsum;
getActs().rightMult(_Agg, 1, destsum);
destsum.copy(getActs());
}
void AggSoftmaxLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 0);
LayerV& prev = _prev[replicaIdx];
if (_doUpperGrad) {
for (int i = 0; i < _next.size(); ++i) {
if (_next[i]->isGradProducer(getName())) {
NVMatrix& labels = _next[i]->getPrev()[replicaIdx][0]->getActs(getDeviceID()); // Get cost's labels
NVMatrix target;
float gradCoeff = dynamic_cast<CostLayer*>(_next[i])->getCoeff();
//computeLogregSoftmaxGrad(labels, getActs(), prev[0]->getActsGrad(), scaleTargets == 1, gradCoeff);
computeLogregAggSoftmaxGrad(labels, _Pl, getActs(), target, 0, gradCoeff);
if(_next[0]->getType()=="cost.tasklogreg") {
//std::cout << "next[0]->getType == cost.tasklogreg" << std::endl;
NVMatrix& tasks = _next[0]->getPrev()[replicaIdx][1]->getActs(getDeviceID());
//std::cout << "tasks matrix in softmax: " << std::endl;
//tasks.print(0, tasks.getNumRows(), 0, tasks.getNumCols());
int taskId = _next[0]->getTaskId();
//std::cout << "taskId : " << taskId << std::endl;
assert(taskId >= 0);
NVMatrix taskIndict;
tasks.equalToScalar(taskId, taskIndict);
taskIndict.transpose(_trans);
target.eltwiseMultByVector(taskIndict);
}
prev[0]->getActsGrad().add(target, scaleTargets==1, 1);
break;
}
}
}
else {
//computeSoftmaxGrad(getActs(), v, prev[0]->getActsGrad(), scaleTargets, 1);
std::cout << "Why this aggsoftmaxlayers should be here?" << std::endl;
int quit = 1;
assert(quit == 0);
}
}
/*
* =======================
* AggCoarseFineSoftmaxLayer
* =======================
*/
AggCoarseFineSoftmaxLayer::AggCoarseFineSoftmaxLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: Layer(convNetThread, paramsDict, replicaID, true), _doUpperGrad(false) {
_hCtFAgg = pyDictGetMatrix(paramsDict, "agg");
_htype = pyDictGetString(paramsDict, "htype");
_hAvgAgg = pyDictGetMatrix(paramsDict, "avgagg");
}
void AggCoarseFineSoftmaxLayer::copyToGPU(){
_CtFAgg.copyFromHost(*_hCtFAgg, true);
_AvgAgg.copyFromHost(*_hAvgAgg, true);
}
// Does this layer produce gradient for layers below?
bool AggCoarseFineSoftmaxLayer::isGradProducer() {
return false;
}
void AggCoarseFineSoftmaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
// take two outputs from two previous layers, 0 -> coarse predictions, 1-> fine predictions
if(inpIdx==0){
NVMatrix& cPreds = *_inputs[0]; // 128*20
NVMatrix& fPreds = *_inputs[1]; // 128*100
//std::cout << "getActs size= " << getActs().getNumRows() << "," << getActs().getNumCols() << std::endl;
//std::cout << "cPreds size= " << cPreds.getNumRows() << "," << cPreds.getNumCols() << std::endl;
//std::cout << "fPreds size= " << fPreds.getNumRows() << "," << fPreds.getNumCols() << std::endl;
if(_htype=="hard"){ //input are predictions
NVMatrix& cmax = cPreds.max(1);
NVMatrix cPredsBin;
cPreds.biggerEqualToVector(cmax, cPredsBin); //binarilize the maximum entry in prediction matrix
NVMatrix fPredsMask;
cPredsBin.rightMult(_CtFAgg, 1, fPredsMask); //binary mask for fine labels probabiltiy computation
fPredsMask.eltwiseMult(fPreds);
NVMatrix& max = fPredsMask.max(1);
fPredsMask.addVector(max, -1, getActs());
getActs().apply(NVMatrixOps::Exp());
NVMatrix& sum = getActs().sum(1);
getActs().eltwiseDivideByVector(sum);
delete &cmax;
delete &max;
delete ∑
} else if (_htype=="soft"){// input are also predictions
NVMatrix& fmax = fPreds.max(1);
fPreds.addVector(fmax, -1, getActs());
getActs().apply(NVMatrixOps::Exp());
NVMatrix catsum, flatsum;
//std::cout << "getActs size= " << getActs().getNumRows() << "," << getActs().getNumCols() << std::endl;
//std::cout << "CtFAgg Size= " << _CtFAgg.getNumRows() << "," << _CtFAgg.getNumCols() << std::endl;
//std::cout << "CtFAgg transpose Size= " << _CtFAgg.getTranspose().getNumRows() << "," << _CtFAgg.getTranspose().getNumCols() << std::endl;
getActs().rightMult(_CtFAgg.getTranspose(), 1, catsum); //category sum 128*20
catsum.rightMult(_CtFAgg, 1, flatsum); // 128*100
getActs().eltwiseDivide(flatsum);//getActs_{ij} = Pr(j|i, c), \sum_{j\in c}\Pr(j|i, c)=1
NVMatrix& cmax = cPreds.max(1);
cPreds.addVector(cmax, -1, catsum);
catsum.apply(NVMatrixOps::Exp());
NVMatrix& csum = catsum.sum(1);
catsum.eltwiseDivideByVector(csum); //catsum_{ic}=Pr(c|i)
catsum.rightMult(_CtFAgg, 1, flatsum); // 128*100
getActs().eltwiseMult(flatsum); // getActs_ij = \Pr(j|i, c)\Pr(c|i) , j\in c
//std::cout << "getActs size= " << getActs().getNumRows() << "," << getActs().getNumCols() << std::endl;
//std::cout << "avgAgg Size= " << _AvgAgg.getNumRows() << "," << getActs().getNumCols() << std::endl;
NVMatrix destsum;
getActs().rightMult(_AvgAgg.getTranspose(), 1, destsum); //getActs grouped sum together
//std::cout << "destsum Size= " << destsum.getNumRows() << "," << destsum.getNumCols() << std::endl;
//std::cout << "getActs before size= " << getActs().getNumRows() << "," << getActs().getNumCols() << std::endl;
destsum.copy(getActs());
//std::cout << "getActs after size= " << getActs().getNumRows() << "," << getActs().getNumCols() << std::endl;
//NVMatrix& sum = getActs().sum(1);
//float max = sum.max();
//float min = sum.min();
//assert(abs(max-1)<0.001);
//assert(abs(min-1)<0.001);
//delete ∑
delete &csum;
delete &cmax;
delete &fmax;
}
//hacked by Saining
//hacked by TB
/*Matrix inputmat;
AggInput.copyToHost(inputmat, true);
std::cout<<"input to softmax "<<_name<<std::endl;
inputmat.print(); */
/*std::cout<<"output of softmax "<<_name<<std::endl;
Matrix outputmat;
getActs().copyToHost(outputmat, true);
outputmat.print();*/
//std::cout<<"layer="<<_name<<", fprob done: from input size ("<<(*_inputs[0]).getNumRows()<<","<<(*_inputs[0]).getNumCols()
// <<") producing output size ("<<getActs().getNumRows()<<","<<getActs().getNumCols()<<")"<<std::endl;
}
}
/*
* =======================
* SoftmaxLayer
* =======================
*/
SoftmaxLayer::SoftmaxLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: Layer(convNetThread, paramsDict, replicaID, true), _doUpperGrad(false) {
}
void SoftmaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
NVMatrix& input = *_inputs[0];
input.max(1, _max);
input.addVector(_max, -1, getActs());
getActs().apply(NVMatrixOps::Exp());
getActs().sum(1, _sum);
getActs().eltwiseDivideByVector(_sum);
}
void SoftmaxLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 0);
LayerV& prev = _prev[replicaIdx];
if (_doUpperGrad) {
// Todo: rethink replica IDs or idxes... this here doesn't make a huge amount of sense
for (int i = 0; i < _next.size(); ++i) {
if (_next[i]->isGradProducer(getName())) {
NVMatrix& labels = _next[i]->getPrev()[replicaIdx][0]->getActs(getDeviceID()); // Get cost's labels
NVMatrix target;
float gradCoeff = dynamic_cast<CostLayer*>(_next[i])->getCoeff();
//computeLogregSoftmaxGrad(labels, getActs(), prev[0]->getActsGrad(), scaleTargets == 1, gradCoeff);
computeLogregSoftmaxGrad(labels, getActs(), target, 0, gradCoeff);
if(_next[0]->getType()=="cost.tasklogreg") {
//std::cout << "next[0]->getType == cost.tasklogreg" << std::endl;
NVMatrix& tasks = _next[0]->getPrev()[replicaIdx][1]->getActs(getDeviceID());
//std::cout << "tasks matrix in softmax: " << std::endl;
//tasks.print(0, tasks.getNumRows(), 0, tasks.getNumCols());
int taskId = _next[0]->getTaskId();
//std::cout << "taskId : " << taskId << std::endl;
assert(taskId >= 0);
NVMatrix taskIndict;
tasks.equalToScalar(taskId, taskIndict);
taskIndict.transpose(_trans);
target.eltwiseMultByVector(taskIndict);
}
prev[0]->getActsGrad().add(target, scaleTargets==1, 1);
break;
}
}
} else {
computeSoftmaxGrad(getActs(), v, prev[0]->getActsGrad(), scaleTargets, 1);
}
}
void SoftmaxLayer::setDoUpperGrad(bool b) {
_doUpperGrad = b;
}
/*
* =======================
* ConcatenationLayer
* =======================
*/
ConcatenationLayer::ConcatenationLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: Layer(convNetThread, paramsDict, replicaID, false) {
_copyOffsets = pyDictGetIntV(paramsDict, "copyOffsets");
_copyOffsets->push_back(_numOutputs);
}
ConcatenationLayer::~ConcatenationLayer() {
delete _copyOffsets;
}
void ConcatenationLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
getActs().resize(_numOutputs, _inputs[inpIdx]->getNumCols());
_inputs[inpIdx]->copy(getActs(), 0, -1, 0, -1, _copyOffsets->at(inpIdx), 0);
}
void ConcatenationLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& copySrc = v.sliceRows(_copyOffsets->at(inpIdx), _copyOffsets->at(inpIdx + 1)); // view
_prev[replicaIdx][inpIdx]->getActsGrad().add(copySrc, scaleTargets, 1);
delete ©Src;
}
/*
* =======================
* PassThroughLayer
* =======================
*/
PassThroughLayer::PassThroughLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: Layer(convNetThread, paramsDict, replicaID, false) {
}
void PassThroughLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
// No-op
}
void PassThroughLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
// No-op
}
bool PassThroughLayer::postInit() {
if (Layer::postInit()) {
assert(getNumInputReplicas() == 1);
for (int i = 0, offset = 0; i < _prev[0].size(); offset += _prev[0][i]->getNumOutputs(), i++) {
MemoryView& vActs = _memSrcActs[getDeviceID()]->getMemorySource().addUser(_prev[0][i]->getName(), pair<int,int>(offset, offset + _prev[0][i]->getNumOutputs()));
MemoryView& vActsGrad = _memSrcActsGrad[getDeviceID()]->getMemorySource().addUser(_prev[0][i]->getName(), pair<int,int>(offset, offset + _prev[0][i]->getNumOutputs()));
_prev[0][i]->setMemorySourceActs(getDeviceID(), vActs);
_prev[0][i]->setMemorySourceActsGrad(getDeviceID(), vActsGrad);
}
return true;
}
return false;
}
/*
* =======================
* EltwiseSumLayer
* =======================
*/
EltwiseSumLayer::EltwiseSumLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) {
_coeffs = pyDictGetFloatV(paramsDict, "coeffs");
}
EltwiseSumLayer::~EltwiseSumLayer() {
delete _coeffs;
}
void EltwiseSumLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
getActs().add(*_inputs[inpIdx], scaleTargets, _coeffs->at(inpIdx));
}
void EltwiseSumLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
_prev[replicaIdx][inpIdx]->getActsGrad().add(v, scaleTargets, _coeffs->at(inpIdx));
}
/*
* =======================
* EltwiseMaxLayer
* =======================
*/
EltwiseMaxLayer::EltwiseMaxLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) {
}
void EltwiseMaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
if (inpIdx == 1) { // First input, do nothing
_inputs[inpIdx]->applyBinary(NVMatrixAggs::Max(), *_inputs[0], getActs());
} else if (inpIdx > 1) {
getActs().applyBinary(NVMatrixAggs::Max(), *_inputs[inpIdx]);
}
}
void EltwiseMaxLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
computeEltwiseMaxGrad(v, *_inputs[inpIdx], getActs(), _prev[replicaIdx][inpIdx]->getActsGrad(), scaleTargets != 0);
}
/*
* =======================
* DropoutLayer
* =======================
*
* TODO: optimize away the case when using dopout over relus. Don't need the keepmask.
*/
DropoutLayer::DropoutLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) {
_enable = pyDictGetInt(paramsDict, "enable");
_keep = pyDictGetFloat(paramsDict, "keep");
}
void DropoutLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
if (_enable && passType == PASS_TRAIN) {
_keepMask.resize(*_inputs[inpIdx]);
_keepMask.randomizeUniform();
_keepMask.apply(DropoutSmallerThanOperator(_keep));
_inputs[inpIdx]->eltwiseMult(_keepMask, getActs());
} else {
_inputs[inpIdx]->copy(getActs());
}
}
void DropoutLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
LayerV& prev = _prev[replicaIdx];
if (_enable && passType == PASS_TRAIN) {
if (scaleTargets != 0) {
v.applyTernary(AddGradientBinaryOperator<NVMatrixBinaryOps::Multiply>(NVMatrixBinaryOps::Multiply()),
_keepMask, prev[inpIdx]->getActsGrad(), prev[inpIdx]->getActsGrad());
} else {
v.eltwiseMult(_keepMask, prev[inpIdx]->getActsGrad());
}
} else {
prev[inpIdx]->getActsGrad().add(v, scaleTargets, 1);
}
}
void DropoutLayer::truncBwdActs() {
Layer::truncBwdActs();
_keepMask.truncate();
}
/*
* =======================
* Dropout2Layer
* =======================
*
* TODO: optimize away the case when using dopout over relus. Don't need the keepmask.
*/
Dropout2Layer::Dropout2Layer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : DropoutLayer(convNetThread, paramsDict, replicaID) {
}
void Dropout2Layer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
if (_enable && passType == PASS_TRAIN) {
_keepMask.resize(*_inputs[inpIdx]);
_keepMask.randomizeUniform();
_keepMask.smallerThanScalar(_keep);
_inputs[inpIdx]->eltwiseMult(_keepMask, getActs());
} else {
_inputs[inpIdx]->scale(_keep, getActs());
}
}
void Dropout2Layer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
LayerV& prev = _prev[replicaIdx];
if (_enable && passType == PASS_TRAIN) {
if (scaleTargets != 0) {
v.applyTernary(AddGradientBinaryOperator<NVMatrixBinaryOps::Multiply>(NVMatrixBinaryOps::Multiply()),
_keepMask, prev[inpIdx]->getActsGrad(), prev[inpIdx]->getActsGrad());
} else {
v.eltwiseMult(_keepMask, prev[inpIdx]->getActsGrad());
}
} else {
if (scaleTargets != 0) {
v.applyBinary(AddGradientOperator<NVMatrixOps::MultByScalar>(NVMatrixOps::MultByScalar(_keep)),
prev[inpIdx]->getActsGrad(), prev[inpIdx]->getActsGrad());
} else {
v.scale(_keep, prev[inpIdx]->getActsGrad());
}
}
}
/*
* =======================
* DataLayer
* =======================
*/
DataLayer::DataLayer(ConvNet* convNet, PyObject* paramsDict, int replicaID) : Layer(NULL, paramsDict, replicaID, false) {
_dataIdx = pyDictGetInt(paramsDict, "dataIdx");
_start = pyDictGetInt(paramsDict, "start");
_end = pyDictGetInt(paramsDict, "end");
_useBuffer = false;
_outstandingCopyRequest = false;
_convNet = convNet;
}
DataLayer::~DataLayer() {
for (map<int,cudaStream_t>::const_iterator it = _copyStreams.begin(); it != _copyStreams.end(); ++it) {
checkCudaErrors(cudaStreamDestroy(it->second));
}
for (std::map<int, MemoryView*>::iterator it = _memSrcActs2.begin(); it != _memSrcActs2.end(); ++it) {
if (it->second->getMemorySource().truncate(_name)) {
delete &it->second->getMemorySource();
}
}
_copier->stop();
delete _copier;
}
void DataLayer::fprop(PASS_TYPE passType, int passIdx, bool fromBuffer) {
waitForCopyFinish();
if (fromBuffer && getFwdActiveInputReplicaIdx(passIdx) >= 0) {
_useBuffer = !_useBuffer;
}
for (int i = 0; i < _next.size(); i++) {
_next[i]->getConvNetThread().getMessageQueue().enqueue(new FpropMessage(*_next[i], passType, passIdx));
}
}
void DataLayer::waitForCopyFinish() {
if (_outstandingCopyRequest) {
_copyFinishQueue.dequeue();
assert(_copyFinishQueue.getNumElements() == 0);
_outstandingCopyRequest = false;
}
}
cudaStream_t DataLayer::getCopyStream(int deviceID) {
if (_copyStreams.count(deviceID) == 0) {
NVMatrix::setDeviceID(deviceID);
checkCudaErrors(cudaStreamCreateWithFlags(&_copyStreams[deviceID], cudaStreamNonBlocking));
}
return _copyStreams[deviceID];
}
void DataLayer::copyData(CPUData& data, bool other, int passIdx) {
assert(!_outstandingCopyRequest);
assert(_copyFinishQueue.getNumElements() == 0);
_copier->getQueue().enqueue(new DataCopyMessage(data, other, passIdx));
_outstandingCopyRequest = true;
}
int DataLayer::getNumInputReplicas() {
return _convNet->getNumReplicasMax() / getNumReplicas();
}
void DataLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
}
NVMatrix& DataLayer::getActs(int deviceID) {
return getActs(deviceID, false, -1);
}
NVMatrix& DataLayer::getActs(int deviceID, bool other, int numCases) {
// printf("%s[%d] getActs(%d, %d, %d)\n", _name.c_str(), getReplicaID(), deviceID, other, numCases);
assert(_memSrcActs.count(deviceID) > 0);
assert(_memSrcActs2.count(deviceID) > 0);
return (_useBuffer != other ? _memSrcActs2[deviceID]->getMemory(numCases) : _memSrcActs[deviceID]->getMemory(numCases));
}
ConvNet& DataLayer::getConvNet() {
return *_convNet;
}
bool DataLayer::postInit() {
if (Layer::postInit()) {
for (int i = 0; i < _next.size(); ++i) {
int d = _next[i]->getDeviceID();
if (_memSrcActs2.count(d) == 0) {
_memSrcActs2[d] = &MemorySource::make(_numOutputs, d, getName());
}
}
intv cpus = getDeviceCPUs(_next[0]->getDeviceID());
_copier = new DataCopyThread(*this, cpus);
_copier->start();
return true;
}
return false;
}
bool DataLayer::isGradProducer() {
return false;
}
/*
* =======================
* DataCopyThread
* =======================
*/
DataCopyThread::DataCopyThread(DataLayer& parent, intv& cpus) : _parent(&parent), _sleepUsec(0), Thread(true, cpus) {
}
Queue<DataCopyMessage*>& DataCopyThread::getQueue() {
return _queue;
}
void DataCopyThread::stop() {
getQueue().enqueue(new DataCopyExitMessage());
join();
}
void* DataCopyThread::run() {
NVMatrix::setDeviceID(*_parent->getNextDeviceIDs().begin());
bool exit = false;
while(!exit) {
DataCopyMessage& msg = *_queue.dequeue();
exit = msg.getType() == DataCopyMessage::EXIT;
if (!exit) {
CPUData& data = msg.getData();
int passIdx = msg.getPassIdx();
bool other = msg.isOther();
Matrix& dataMatrix = data.getData(_parent->getDataIdx());
// How many times is this layer going to process microbatches from this minibatch?
assert(_parent->getNumReplicasNext() == _parent->getNumReplicas());
int microIdx = _parent->getFwdActiveInputReplicaIdx(passIdx);
if (microIdx >= 0) {
if (_requestTimer.isStarted()) {
double requestIntervalMsec = _requestTimer.stop();
// Sleep for up to 1/20th the average request interval
_sleepUsec = int(round(0.95 * _sleepUsec + 0.05 * (_parent->getReplicaID() / double(_parent->getNumReplicas())) * requestIntervalMsec * 1000.0 / 20.0));
}
_requestTimer.start();
if (other) {
// Sleeping a bit is helpful because in typical nets, copying input data
// as soon as it's available will produce contention with other communications
// that are happening at the time. This is very much a hack, so in the future
// it might be good to replace it with something smarter which schedules access
// to communication links.
usleep(_sleepUsec);
}
microIdx += _parent->getReplicaID() * _parent->getNumInputReplicas();
// Safer to divup because this way you won't get a minibatch size of 0
int microbatchSize = DIVUP(data.getNumCases(), _parent->getConvNet().getNumReplicasMax());
int microStart = microIdx * microbatchSize;
int microEnd = min(data.getNumCases(), (microIdx + 1) * microbatchSize);
// Check that this replica has some data. This can be false when, for example,
// there are only 7 examples in the minibatch but 8 replicas.
if (microStart < microEnd) {
assert(dataMatrix.isView() == dataMatrix.isTrans());
int pipe = _parent->getConvNet().getDataCopyPD().getPipe(_parent->getReplicaID()/2);
if (dataMatrix.isTrans()) {
Matrix& replicaDataMatrix = dataMatrix.sliceCols(microStart, microEnd);
// In this case, dataMatrix is a view on memory allocated by Python.
//_hostMemFwd.copyFromHost(replicaDataMatrix, true);
_hostMemFwd.resize(replicaDataMatrix.getNumRows(), replicaDataMatrix.getNumCols(), true);
memcpy(_hostMemFwd.getDevData(), replicaDataMatrix.getData(), replicaDataMatrix.getNumDataBytes());
delete &replicaDataMatrix; // view
NVMatrix& hostMemFwdSlice = _hostMemFwd.sliceRows(_parent->getStart(), _parent->getEnd());
for (intv::iterator it = _parent->getNextDeviceIDs().begin(); it != _parent->getNextDeviceIDs().end(); ++it) {
int deviceID = *it;
// Copy my output to this guy's GPU
NVMatrix::setDeviceID(deviceID);
// Note to self: this is the path that gets executed in practice
// in my models. It does a transpose & copy simultaneously.
hostMemFwdSlice.flipTrans(_parent->getActs(deviceID, other, microEnd - microStart), _parent->getCopyStream(deviceID));
}
delete &hostMemFwdSlice;
} else {
// Hacky way to copy a slice to _hostMemFwd
_hostMemFwd.resize(dataMatrix.getNumRows(), microEnd - microStart);
Matrix tmp(_hostMemFwd.getDevData(), _hostMemFwd.getNumRows(), _hostMemFwd.getNumCols(), _hostMemFwd.isTrans());
dataMatrix.sliceCols(microStart, microEnd, tmp);
NVMatrix& hostMemFwdSlice = _hostMemFwd.sliceRows(_parent->getStart(), _parent->getEnd());
for (intv::iterator it = _parent->getNextDeviceIDs().begin(); it != _parent->getNextDeviceIDs().end(); ++it) {
int deviceID = *it;
// Copy my output to this guy's GPU
NVMatrix::setDeviceID(deviceID);
hostMemFwdSlice.copy(_parent->getActs(deviceID, other, microEnd - microStart), _parent->getCopyStream(deviceID));
}
delete &hostMemFwdSlice;
}
for (intv::iterator it = _parent->getNextDeviceIDs().begin(); it != _parent->getNextDeviceIDs().end(); ++it) {
int deviceID = *it;
NVMatrix::setDeviceID(deviceID);
NVMatrix::syncStream(_parent->getCopyStream(deviceID));
}
_parent->getConvNet().getDataCopyPD().freePipe(pipe);
} else {
for (intv::iterator it = _parent->getNextDeviceIDs().begin(); it != _parent->getNextDeviceIDs().end(); ++it) {
int deviceID = *it;
_parent->getActs(deviceID, other, 0);
}
}
}
_parent->getCopyFinishQueue().enqueue(1);
}
delete &msg;
}
return NULL;
}
/*
* =====================
* PoolLayer
* =====================
*/
PoolLayer::PoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans)
: Layer(convNetThread, paramsDict, replicaID, trans), TwoDLayerInterface(paramsDict) {
_sizeX = pyDictGetInt(paramsDict, "sizeX");
_start = pyDictGetInt(paramsDict, "start");
_stride = pyDictGetInt(paramsDict, "stride");
_outputsX = pyDictGetInt(paramsDict, "outputsX");
_pool = pyDictGetString(paramsDict, "pool");
}
PoolLayer& PoolLayer::make(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) {
std::string _pool = pyDictGetString(paramsDict, "pool");
if (_pool == "max") {
return *new MaxPoolLayer(convNetThread, paramsDict, replicaID, false);
} else if(_pool == "maxabs") {
return *new MaxPoolLayer(convNetThread, paramsDict, replicaID, true);
} else if(_pool == "avg") {
return *new AvgPoolLayer(convNetThread, paramsDict, replicaID);
}
throw std::string("Unknown pooling layer type ") + _pool;
}
/*
* =====================
* AvgPoolLayer
* =====================
*/
AvgPoolLayer::AvgPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : PoolLayer(convNetThread, paramsDict, replicaID, false) {
}
void AvgPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, AvgPooler());
}
void AvgPoolLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convLocalAvgUndo(v, _prev[replicaIdx][0]->getActsGrad(), _sizeX, _start, _stride, _outputsX, _imgSize, scaleTargets, 1);
}
/*
* =====================
* MaxPoolLayer
* =====================
*/
MaxPoolLayer::MaxPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool abs) : PoolLayer(convNetThread, paramsDict, replicaID, false), _abs(abs) {
}
void MaxPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
if (_abs) {
convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, MaxAbsPooler());
} else {
convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, MaxPooler());
}
}
void MaxPoolLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 0);
convLocalMaxUndo(*_inputs[0], v, getActs(), _prev[replicaIdx][inpIdx]->getActsGrad(), _sizeX, _start, _stride, _outputsX, scaleTargets, 1);
}
/*
* =====================
* CrossMapPoolLayer
* =====================
*/
CrossMapPoolLayer::CrossMapPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans)
: Layer(convNetThread, paramsDict, replicaID, trans), TwoDLayerInterface(paramsDict) {
_size = pyDictGetInt(paramsDict, "size");
_start = pyDictGetInt(paramsDict, "start");
_stride = pyDictGetInt(paramsDict, "stride");
_outputs = pyDictGetInt(paramsDict, "outputChannels");
_pool = pyDictGetString(paramsDict, "pool");
}
CrossMapPoolLayer& CrossMapPoolLayer::make(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) {
std::string _pool = pyDictGetString(paramsDict, "pool");
if (_pool == "max") {
return *new CrossMapMaxPoolLayer(convNetThread, paramsDict, replicaID);
}
throw std::string("Unknown pooling layer type ") + _pool;
}
/*
* =====================
* CrossMapMaxPoolLayer
* =====================
*/
CrossMapMaxPoolLayer::CrossMapMaxPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CrossMapPoolLayer(convNetThread, paramsDict, replicaID, false) {
}
void CrossMapMaxPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convPoolCrossMap(*_inputs[0], getActs(), _start, _size, _outputs, _stride, _imgSize, MaxPooler());
}
void CrossMapMaxPoolLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 0);
convCrossMapMaxPoolUndo(*_inputs[0], v, getActs(), _prev[replicaIdx][0]->getActsGrad(), _imgSize, _start, _size, _stride, scaleTargets, 1);
}
/*
* =====================
* RandomScaleLayer
* =====================
*/
RandomScaleLayer::RandomScaleLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
_maxScale = pyDictGetFloat(paramsDict, "maxScale");
_tgtSize = pyDictGetInt(paramsDict, "tgtSize");
// The smallest size the image could be after rescaling
_minScaledSize = _imgSize / _maxScale;
// The number of discrete scales we're considering
int numScales = _imgSize - _minScaledSize + 1;
// The total number of squares of size _tgtSize that we can extract
// from all these scales
double numCrops = numScales * (numScales + 1) * (2 * numScales + 1) / 6;
// For each scale, record the fraction of the squares that it has.
// This will be the probability of sampling this scale.
_scaleProbs.push_back(1.0 / numCrops);
for (int s = 1; s < numScales; ++s) {
_scaleProbs.push_back(_scaleProbs[s-1] + (s + 1) * (s + 1) / numCrops);
}
}
void RandomScaleLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
if (IS_TRAIN(passType)) {
// _maxScale is in the range [1, 2)
float r = randf;
int rescaledSize = _tgtSize;
float scaleFactor = _maxScale;
// Find which scale we have sampled
for (int s = 0; s < _scaleProbs.size(); ++s) {
if (r <= _scaleProbs[s]) {
rescaledSize += s;
float scaleFactorEnd = _imgSize / float(rescaledSize);
float scaleFactorStart = max(1.0, _imgSize / (1.0 + rescaledSize));
scaleFactor = scaleFactorStart + randf * (scaleFactorEnd - scaleFactorStart);
break;
}
}
assert(rescaledSize >= _tgtSize);
int maxStart = rescaledSize - _tgtSize;
int startY = rand() % (1 + maxStart), startX = rand() % (1 + maxStart);
if (rescaledSize == _imgSize) {
convCrop(*_inputs[0], getActs(), rescaledSize, _tgtSize, startY, startX);
} else {
convResizeBilinear(*_inputs[0], _rescaledActs, _imgSize, rescaledSize, scaleFactor);
convCrop(_rescaledActs, getActs(), rescaledSize, _tgtSize, startY, startX);
}
_rescaledActs.truncate(); // this'll have a different size each time so may as well truncate it.
} else if (IS_MULTIVIEW_TEST(passType)) { // for now...
_inputs[0]->copy(getActs());
} else if (IS_TEST(passType)) { // Test on center patch
convResizeBilinear(*_inputs[0], getActs(), _imgSize, _tgtSize, _maxScale);
}
}
void RandomScaleLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* CropLayer
* =====================
*/
CropLayer::CropLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
_startX = pyDictGetInt(paramsDict, "startX");
_startY = pyDictGetInt(paramsDict, "startY");
_tgtSize = pyDictGetInt(paramsDict, "sizeX");
}
void CropLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convCrop(*_inputs[0], getActs(), _imgSize, _tgtSize, _startY, _startX);
}
void CropLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* NailbedLayer
* =====================
*/
NailbedLayer::NailbedLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
_start = pyDictGetInt(paramsDict, "start");
_stride = pyDictGetInt(paramsDict, "stride");
_outputsX = pyDictGetInt(paramsDict, "outputsX");
}
void NailbedLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convBedOfNails(*_inputs[0], getActs(), _channels, _imgSize, _start, _stride, 0, 1);
}
void NailbedLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convBedOfNailsUndo(v, _prev[replicaIdx][0]->getActsGrad(), _channels, _imgSize, _start, _stride, scaleTargets, 1);
}
/*
* =====================
* GaussianBlurLayer
* =====================
*/
GaussianBlurLayer::GaussianBlurLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
_hFilter = pyDictGetMatrix(paramsDict, "filter");
}
GaussianBlurLayer::~GaussianBlurLayer() {
delete _hFilter;
}
void GaussianBlurLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convGaussianBlur(*_inputs[0], _filter, getActs(), true, _channels, 0, 1);
convGaussianBlur(getActs(), _filter, getActs(), false, _channels, 0, 1);
}
void GaussianBlurLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& tgt = _prev[replicaIdx][0]->getNumComputedActsGrads(getDeviceID()) > 0 ? _actGradsTmp : _prev[replicaIdx][0]->getActsGrad();
convGaussianBlur(v, _filter, tgt, true, _channels, 0, 1);
convGaussianBlur(tgt, _filter, _prev[replicaIdx][0]->getActsGrad(), false, _channels, scaleTargets, 1);
}
void GaussianBlurLayer::copyToGPU() {
_filter.copyFromHost(*_hFilter, true);
}
/*
* =====================
* HorizontalReflectionLayer
* =====================
*/
HorizontalReflectionLayer::HorizontalReflectionLayer(ConvNetThread* convNet, PyObject* paramsDict, int replicaID) : Layer(convNet, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
assert(_channels >= 1 && _channels <= 3);
}
void HorizontalReflectionLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convReflectHorizontal(*_inputs[0], getActs(), _imgSize);
}
void HorizontalReflectionLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convReflectHorizontal(v, _prev[replicaIdx][0]->getActsGrad(), _imgSize);
}
/*
* =====================
* ResizeLayer
* =====================
*/
ResizeLayer::ResizeLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
_tgtSize = pyDictGetInt(paramsDict, "tgtSize");
_scale = pyDictGetFloat(paramsDict, "scale");
}
void ResizeLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convResizeBilinear(*_inputs[0], getActs(), _imgSize, _tgtSize, _scale);
}
// Can't do this
void ResizeLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* RGBToYUVLayer
* =====================
*/
RGBToYUVLayer::RGBToYUVLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) {
}
void RGBToYUVLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convRGBToYUV(*_inputs[0], getActs());
}
// Can't do this
void RGBToYUVLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* RGBToLABLayer
* =====================
*/
RGBToLABLayer::RGBToLABLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) {
_center = pyDictGetInt(paramsDict, "center");
}
void RGBToLABLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convRGBToLAB(*_inputs[0], getActs(), _center);
}
// Can't do this
void RGBToLABLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* ResponseNormLayer
* =====================
*/
ResponseNormLayer::ResponseNormLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
_size = pyDictGetInt(paramsDict, "size");
_scale = pyDictGetFloat(paramsDict, "scale");
_pow = pyDictGetFloat(paramsDict, "pow");
_minDiv = pyDictGetFloat(paramsDict, "minDiv");
}
void ResponseNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convResponseNorm(*_inputs[0], _denoms, getActs(), _channels, _size, _scale, _pow, _minDiv);
}
void ResponseNormLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convResponseNormUndo(v, _denoms, *_inputs[0], getActs(), _prev[replicaIdx][0]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1);
}
void ResponseNormLayer::truncBwdActs() {
Layer::truncBwdActs();
_denoms.truncate();
}
/*
* =====================
* CrossMapResponseNormLayer
* =====================
*/
CrossMapResponseNormLayer::CrossMapResponseNormLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: ResponseNormLayer(convNetThread, paramsDict, replicaID) {
_blocked = pyDictGetInt(paramsDict, "blocked");
}
void CrossMapResponseNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
assert(inpIdx == 0);
convResponseNormCrossMap(*_inputs[0], getActs(), _channels, _size, _scale, _pow, _minDiv, _blocked);
}
void CrossMapResponseNormLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convResponseNormCrossMapUndo(v, *_inputs[0], getActs(), _prev[replicaIdx][0]->getActsGrad(), _channels, _size, _scale, _pow, _minDiv, _blocked, scaleTargets, 1);
}
/*
* =====================
* ContrastNormLayer
* =====================
*/
ContrastNormLayer::ContrastNormLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : ResponseNormLayer(convNetThread, paramsDict, replicaID) {
}
void ContrastNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
NVMatrix& images = *_inputs[0];
convLocalPool(images, _meanDiffs, _channels, _size, -_size/2, 1, _imgSize, AvgPooler());
_meanDiffs.add(images, -1, 1);
convContrastNorm(images, _meanDiffs, _denoms, getActs(), _channels, _size, _scale, _pow, _minDiv);
}
void ContrastNormLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convContrastNormUndo(v, _denoms, _meanDiffs, getActs(), _prev[replicaIdx][inpIdx]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1);
}
void ContrastNormLayer::truncBwdActs() {
ResponseNormLayer::truncBwdActs();
_meanDiffs.truncate();
}
/*
* =====================
* CostLayer
* =====================
*/
CostLayer::CostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans)
: Layer(convNetThread, paramsDict, replicaID, trans) {
_coeff = pyDictGetFloat(paramsDict, "coeff");
_numCases = 0;
_aggregated = pyDictGetInt(paramsDict, "aggregated") != 0;
}
float CostLayer::getCoeff() {
return _coeff;
}
void CostLayer::bprop(NVMatrix& v, PASS_TYPE passType, int passIdx) {
if (_coeff != 0) {
Layer::bprop(v, passType, passIdx);
}
}
bool CostLayer::fprop(PASS_TYPE passType, int passIdx) {
if (Layer::fprop(passType, passIdx)) {
syncStream();
getConvNet().getMessageQueue().enqueue(new Message(FPROP_TERMINAL));
return true;
}
return false;
}
void CostLayer::fpropCommon(PASS_TYPE passType) {
_numCases = Layer::getNumCases(*_inputs[0]);
}
int CostLayer::getNumCases() {
return _numCases;
}
bool CostLayer::isGradProducer() {
return _coeff != 0;
}
doublev& CostLayer::getCost() {
return *new doublev(_costv);
}
// This is called between microbatches
void CostLayer::resetPassIdx() {
Layer::resetPassIdx();
_costv.clear();
}
CostLayer& CostLayer::make(ConvNetThread* convNetThread, PyObject* paramsDict, std::string& type, int replicaID) {
if (type == "cost.crossent") {
return *new CrossEntCostLayer(convNetThread, paramsDict, replicaID);
} else if (type == "cost.bce") {
return *new BinomialCrossEntropyCostLayer(convNetThread, paramsDict, replicaID);
} else if (type == "cost.dce") {
return *new DetectionCrossEntropyCostLayer(convNetThread, paramsDict, replicaID);
} else if (type == "cost.logreg") {
return *new LogregCostLayer(convNetThread, paramsDict, replicaID);
} else if (type == "cost.tasklogreg") {
return *new TaskLogregCostLayer(convNetThread, paramsDict, replicaID);
} else if (type == "cost.sum2") {
return *new SumOfSquaresCostLayer(convNetThread, paramsDict, replicaID);
}
throw std::string("Unknown cost layer type ") + type;
}
/*
* =====================
* CrossEntCostLayer
* =====================
*/
CrossEntCostLayer::CrossEntCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) {
}
void CrossEntCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
// This layer uses its two inputs together
if (inpIdx == 0) {
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
int numCases = labels.getLeadingDim();
NVMatrix& trueLabelLogProbs = getActs(), correctProbs;
computeCrossEntCost(labels, probs, trueLabelLogProbs, correctProbs);
_costv.clear();
_costv.push_back(-trueLabelLogProbs.sum());
_costv.push_back(numCases - correctProbs.sum());
}
}
void CrossEntCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 1);
LayerV& prev = _prev[replicaIdx];
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
NVMatrix& target = prev[1]->getActsGrad();
// Numerical stability optimization: if the layer below me is a softmax layer, let it handle
// the entire gradient computation to avoid multiplying and dividing by a near-zero quantity.
bool doWork = prev[1]->getNext().size() > 1 || prev[1]->getType() != "softmax" || prev[1]->getDeviceID() != getDeviceID();
if (doWork) {
computeCrossEntGrad(labels, probs, target, scaleTargets == 1, _coeff);
}
}
/*
* =====================
* BinomialCrossEntropyCostLayer
* =====================
*/
BinomialCrossEntropyCostLayer::BinomialCrossEntropyCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) {
_computeSoftmaxErrorRate = pyDictGetInt(paramsDict, "computeSoftmaxErrorRate");
_posWeight = pyDictGetFloat(paramsDict, "posWeight");
}
void BinomialCrossEntropyCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
// This layer uses its two inputs together
if (inpIdx == 0) {
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
int numCases = labels.getLeadingDim();
labels.applyBinary(BinomialCrossEntOperator(_posWeight), probs, _tmpProbs);
_costv.clear();
// Cross-entropy cost
_costv.push_back(-_tmpProbs.sum(_tmpbuf));// / labels.getFollowingDim());
// If aggregated, we don't produce these outputs because they're not additive.
// They have no meaning if this is just a partial cost.
if (!_aggregated) {
// "Correct" classifications. To compute these we threshold probs
// and just count the number of entries that agree with labels.
probs.biggerThanScalar(0.5, _tmpProbs);
_tmpProbs.equals(labels);
_costv.push_back((_tmpProbs.getNumElements() - _tmpProbs.sum(_tmpbuf)) / double(labels.getFollowingDim()));
if (_computeSoftmaxErrorRate) {
// Also compute top-1 error as if this is softmax and there's only one correct class
probs.max(0, _tmpVec);
assert(_tmpVec.getNumElements() == numCases); // Make sure we did max on correct axis
probs.equalsVector(_tmpVec, _correctProbs);
_correctProbs.sum(0, _tmpVec); // Divide by the # of labels that we predict as being present
float m = _tmpVec.max();
_correctProbs.eltwiseDivideByVector(_tmpVec);
_correctProbs.eltwiseMult(labels);
_costv.push_back(numCases - _correctProbs.sum(_tmpbuf));
}
}
}
}
void BinomialCrossEntropyCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 1);
LayerV& prev = _prev[replicaIdx];
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
NVMatrix& target = prev[1]->getActsGrad();
// Numerical stability optimization: if the layer below me is a logistic neuron layer, let it handle
// the entire gradient computation to avoid multiplying and dividing by a near-zero quantity.
bool doWork = prev[1]->getNext().size() > 1
|| prev[1]->getType() != "neuron"
|| static_cast<NeuronLayer*>(prev[1])->getNeuronType() != "logistic"
|| prev[1]->getDeviceID() != getDeviceID()
|| prev[1]->getNumReplicas() != getNumReplicas();
if (doWork) {
printf("Computing cross-entropy gradient the stupid way\n");
if (scaleTargets == 0) {
labels.applyBinary(BinomialCrossEntGradientOperator(_coeff, _posWeight), probs, target);
} else {
labels.applyTernary(AddGradientBinaryOperator<BinomialCrossEntGradientOperator>(BinomialCrossEntGradientOperator(_coeff, _posWeight)), probs, target, target);
}
}
}
float BinomialCrossEntropyCostLayer::getPosWeight() {
return _posWeight;
}
/*
* =====================
* DetectionCrossEntropyCostLayer
* =====================
*/
DetectionCrossEntropyCostLayer::DetectionCrossEntropyCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: BinomialCrossEntropyCostLayer(convNetThread, paramsDict, replicaID) {
assert(!_aggregated);
}
void DetectionCrossEntropyCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
BinomialCrossEntropyCostLayer::fpropActs(inpIdx, scaleTargets, passType, passIdx);
// This layer uses its two inputs together
if (inpIdx == 0) {
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
int numCases = labels.getLeadingDim();
/*
* Add information sufficient to compute precision and recall for each class.
*/
// NOTE: _tmpProbs contains ((probs > 0.5) == labels)
labels.sum(1, _numPositive); // sum(labels, 1)
_tmpProbs.eltwiseMult(labels); // labels * ((probs > 0.5) == labels)
_tmpProbs.sum(1, _numTruePositive);
probs.biggerThanScalar(0.5, _tmpProbs);
_tmpProbs.sum(1, _numDeclaredPositive);
_numDeclaredPositive.copyToHost(_hNumDeclaredPositive, true);
_numPositive.copyToHost(_hNumPositive, true);
_numTruePositive.copyToHost(_hNumTruePositive, true);
for (int i = 0; i < labels.getFollowingDim(); ++i) {
_costv.push_back(_hNumDeclaredPositive(i, 0)); // 2
_costv.push_back(_hNumPositive(i, 0)); // 3
_costv.push_back(_hNumTruePositive(i, 0)); // 4
}
}
}
/*
* =====================
* TaskLogregCostLayer
* =====================
*/
TaskLogregCostLayer::TaskLogregCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : LogregCostLayer(convNetThread, paramsDict, replicaID) {
_topk = pyDictGetInt(paramsDict, "topk");
_taskId = pyDictGetInt(paramsDict, "taskId");
assert(_taskId >= 0);
// _numAccumed = 0;
}
void TaskLogregCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
// This layer uses its two inputs together
if (inpIdx == 0) {
NVMatrix& labels = *_inputs[0];
NVMatrix& tasks = *_inputs[1];
NVMatrix* probs = _inputs[2];
_doCompute = !IS_MULTIVIEW_TEST(passType);
if (!_doCompute) {
if (IS_MULTIVIEW_TEST_START(passType)) {
if (_probsAccum.count(passIdx) == 0) {
_probsAccum[passIdx] = new NVMatrix(*probs);
}
probs->copy(*_probsAccum[passIdx]);
_numAccumed[passIdx] = 1;
} else {
_probsAccum[passIdx]->add(*probs);
_numAccumed[passIdx] += 1;
}
if (IS_MULTIVIEW_TEST_END(passType)) {
probs = _probsAccum[passIdx];
probs->scale(1.0 / _numAccumed[passIdx]);
_doCompute = true;
}
}
if (_doCompute) {
int numCases = labels.getNumElements();
probs->max(0,_maxProbs);
if (_topk == 1) {
computeLogregCost(labels, *probs, _maxProbs, _trueLabelLogProbs, _correctProbs);
} else {
computeMultiSoftmaxCost(labels, *probs, _maxProbs, _trueLabelLogProbs, _correctProbs, _topkProbs, _topk);
}
//Task mask part
NVMatrix taskIndict;
//std::cout << "taskId in TaskLogreg: " << _taskId << std::endl;
tasks.equalToScalar(_taskId, taskIndict);
//std::cout << "tasks row " << tasks.getNumRows() << "task cols " << tasks.getNumCols() << std::endl;
//std::cout << "taskIndict row " << taskIndict.getNumRows() << "taskIndcit cols " << taskIndict.getNumCols() << std::endl;
//std::cout << "correctProbs row " << _correctProbs.getNumRows() << "_correctProbs cols " << _correctProbs.getNumCols() << std::endl;
//std::cout << "trueProbs row " << _trueLabelLogProbs.getNumRows() << "_correctProbs cols " << _trueLabelLogProbs.getNumCols() << std::endl;
//std::cout << "topkprobsProbs row " << _topkProbs.getNumRows() << "_correctProbs cols " << _topkProbs.getNumCols() << std::endl;
//std::cout << "taskIndict:" << std::endl;
//taskIndict.print(0,taskIndict.getNumRows(),0,taskIndict.getNumCols());
//std::cout << "_correctProbs before:" << std::endl;
//_correctProbs.print(0,_correctProbs.getNumRows(),0,_correctProbs.getNumCols());
_correctProbs.eltwiseMult(taskIndict);
_correctProbs.eltwiseMult(taskIndict);
//std::cout << "_correctProbs after:" << std::endl;
//_correctProbs.print(0,_correctProbs.getNumRows(),0,_correctProbs.getNumCols());
//std::cout << "_trueLabelLogProbs before:" << std::endl;
//_trueLabelLogProbs.print(0,_trueLabelLogProbs.getNumRows(),0,_trueLabelLogProbs.getNumCols());
_trueLabelLogProbs.eltwiseMult(taskIndict);
_trueLabelLogProbs.eltwiseMult(taskIndict);
//std::cout << "_trueLabelLogProbs after:" << std::endl;
//_trueLabelLogProbs.print(0,_trueLabelLogProbs.getNumRows(),0,_trueLabelLogProbs.getNumCols());
if (_topk > 1) {
_topkProbs.eltwiseMult(taskIndict);
}
int taskCases=taskIndict.sum();
_costv.clear();
if(taskCases != 0) {
double top1 = _correctProbs.sum(_tmpbuf);
//std::cout << "top1: " << std::endl;
//std::cout << top1 << std::endl;
_costv.push_back(-_trueLabelLogProbs.sum(_tmpbuf)*numCases/taskCases);
_costv.push_back((taskCases - top1)*numCases/taskCases);
_costv.push_back((taskCases - (_topk == 1 ? top1 : _topkProbs.sum(_tmpbuf)))*numCases/taskCases);
}
}
}
}
int TaskLogregCostLayer::getTaskId(){
return _taskId;
}
NVMatrix& TaskLogregCostLayer::getProbsAccum(int replicaIdx) {
return *_probsAccum[replicaIdx];
}
void TaskLogregCostLayer::bprop(PASS_TYPE passType, int passIdx) {
if (_coeff != 0) {
Layer::bprop(passType, passIdx);
}
}
void TaskLogregCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 2);
if (inpIdx == 2) {
LayerV& prev = _prev[replicaIdx];
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[2];
NVMatrix& target = prev[2]->getActsGrad();
// Numerical stability optimization: if the layer below me is a softmax layer, let it handle
// the entire gradient computation to avoid multiplying and dividing by a near-zero quantity.
bool doWork = prev[2]->getNext().size() > 1 || ( prev[2]->getType() != "softmax" && prev[2]->getType() != "aggsoftmax" && prev[2]->getType() != "aggcoarsefinesoftmax")
|| prev[2]->getDeviceID() != getDeviceID() || prev[2]->getNumReplicas() != getNumReplicas();
if (prev[2]->getType() == "softmax" || prev[2]->getType() == "aggsoftmax" || prev[2]->getType() == "aggcoarsefinesoftmax") {
static_cast<SoftmaxLayer*>(prev[2])->setDoUpperGrad(!doWork);
}
if (doWork) {
computeLogregGrad(labels, probs, target, scaleTargets == 1, _coeff);
NVMatrix& tasks = prev[1]->getActs();
NVMatrix taskIndict;
tasks.equalToScalar(_taskId, taskIndict);
taskIndict.transpose(_trans);
target.eltwiseMultByVector(taskIndict);
}
}
}
/*
* =====================
* LogregCostLayer
* =====================
*/
LogregCostLayer::LogregCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) {
_topk = pyDictGetInt(paramsDict, "topk");
// _numAccumed = 0;
}
void LogregCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
// This layer uses its two inputs together
if (inpIdx == 0) {
NVMatrix& labels = *_inputs[0];
NVMatrix* probs = _inputs[1];
_doCompute = !IS_MULTIVIEW_TEST(passType);
if (!_doCompute) {
if (IS_MULTIVIEW_TEST_START(passType)) {
if (_probsAccum.count(passIdx) == 0) {
_probsAccum[passIdx] = new NVMatrix(*probs);
}
probs->copy(*_probsAccum[passIdx]);
_numAccumed[passIdx] = 1;
} else {
_probsAccum[passIdx]->add(*probs);
_numAccumed[passIdx] += 1;
}
if (IS_MULTIVIEW_TEST_END(passType)) {
probs = _probsAccum[passIdx];
probs->scale(1.0 / _numAccumed[passIdx]);
_doCompute = true;
}
}
if (_doCompute) {
int numCases = labels.getNumElements();
probs->max(0,_maxProbs);
if (_topk == 1) {
computeLogregCost(labels, *probs, _maxProbs, _trueLabelLogProbs, _correctProbs);
} else {
computeMultiSoftmaxCost(labels, *probs, _maxProbs, _trueLabelLogProbs, _correctProbs, _topkProbs, _topk);
}
_costv.clear();
double top1 = _correctProbs.sum(_tmpbuf);
_costv.push_back(-_trueLabelLogProbs.sum(_tmpbuf));
_costv.push_back(numCases - top1);
_costv.push_back(numCases - (_topk == 1 ? top1 : _topkProbs.sum(_tmpbuf)));
}
}
}
NVMatrix& LogregCostLayer::getProbsAccum(int replicaIdx) {
return *_probsAccum[replicaIdx];
}
void LogregCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (inpIdx == 1) {
LayerV& prev = _prev[replicaIdx];
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
NVMatrix& target = prev[1]->getActsGrad();
// Numerical stability optimization: if the layer below me is a softmax layer, let it handle
// the entire gradient computation to avoid multiplying and dividing by a near-zero quantity.
bool doWork = prev[1]->getNext().size() > 1 || ( prev[1]->getType() != "softmax" && prev[1]->getType() != "aggsoftmax" && prev[1]->getType() != "aggcoarsefinesoftmax")
|| prev[1]->getDeviceID() != getDeviceID() || prev[1]->getNumReplicas() != getNumReplicas();
if (prev[1]->getType() == "softmax" || prev[1]->getType() == "aggsoftmax" || prev[1]->getType() == "aggcoarsefinesoftmax") {
static_cast<SoftmaxLayer*>(prev[1])->setDoUpperGrad(!doWork);
}
if (doWork) {
computeLogregGrad(labels, probs, target, scaleTargets == 1, _coeff);
}
}
}
/*
* =====================
* SumOfSquaresCostLayer
* =====================
*/
SumOfSquaresCostLayer::SumOfSquaresCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) {
}
void SumOfSquaresCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
_inputs[0]->apply(NVMatrixOps::Square(), _tmp);
_costv.clear();
_costv.push_back(_tmp.sum());
}
void SumOfSquaresCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
_prev[replicaIdx][inpIdx]->getActsGrad().add(*_inputs[0], scaleTargets, -2 * _coeff);
}
/*
* =======================================================
* WeightCostLayer, input weights from two previous layers (theta = weights[0], beta = weights[1], M = regTemp)
* =======================================================
*/
WeightCostLayer::WeightCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) {
stringv& weightSourceLayers = *pyDictGetStringV(paramsDict, "weightSourceLayers");
// Source layers for shared weights
intv& weightSourceLayerIndices = *pyDictGetIntV(paramsDict, "weightSourceLayerIndices");
// Weight matrix indices (inside the above source layers) for shared weights
intv& weightSourceMatrixIndices = *pyDictGetIntV(paramsDict, "weightSourceMatrixIndices");
PyObject* pyEpsWList = PyDict_GetItemString(paramsDict, "epsW");
//std::cout<<"layer= "<<_name<<"weights nums = "<<weightSourceLayerIndices.size()<<std::endl;
//floatv& epsW = *pyDictGetFloatV(paramsDict, "epsW");
for (int i = 0; i < weightSourceLayers.size(); i++) {
PyObject* pyEpsW = PyList_GetItem(pyEpsWList, i);
ParameterSchedule& lrs = ParameterSchedule::make(pyEpsW); // Learning rate schedule
std::string& srcLayerName = weightSourceLayers[i];
int matrixIdx = weightSourceMatrixIndices[i];
WeightLayer& srcLayer = *static_cast<WeightLayer*>(&convNetThread->getLayer(srcLayerName));
Weights* srcWeights = &srcLayer.getWeights(matrixIdx);
_weights->addWeights(*new Weights(*srcWeights, lrs, *this));
}
_hregTemp = pyDictGetMatrix(paramsDict, "regTemp");
_regType = pyDictGetString(paramsDict, "regType");
//delete &epsW;
delete &weightSourceLayerIndices;
delete &weightSourceMatrixIndices;
delete &weightSourceLayers;
}
void WeightCostLayer::copyToGPU(){
_regTemp.copyFromHost(*_hregTemp, true);
_weights->copyToGPU();
}
// Does this layer produce gradient for layers below?
bool WeightCostLayer::isGradProducer() {
return false;
}
void WeightCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
if(inpIdx==0){
int numCases = Layer::getNumCases(*_inputs[0]);
//int numCases = getPrev()[replicaIdx][0]->getActs().getNumCols();
//std::cout<<"number of cases = "<<numCases<<std::endl;
if(_regType=="dist"){// minimize distance between two sets of weights
//std::cout<<"w0 size = "<<_weights[0].getW().getNumRows()<<","<<_weights[0].getW().getNumCols()<<std::endl;
//std::cout<<"w1 size = "<<_weights[1].getW().getNumRows()<<","<<_weights[1].getW().getNumCols()<<std::endl;
//std::cout<<"regTemp size = "<<_regTemp.getNumRows() << "," << _regTemp.getNumCols() << std::endl;
_weights->at(0).getW().rightMult(_regTemp, 1, _cost); //cost = theta * M
_cost.subtract(_weights->at(1).getW()); // cost = theta*M - beta
_cost.apply(NVMatrixOps::Square(), getActs()); // getActs = cost.^2
_costv.clear(); //Add by Saining, otherwise wrong during aggregate test outputs
_costv.push_back(getActs().sum() * numCases/2);
} else if(_regType == "simi"){//minimize the similarity between two sets of weights
// \sum_{i in input layer 0 weights indices} \sum_{j in input layer 1 weights indices} regTemp_{ij}|w_i^T w_j|
NVMatrix& fweight_T = _weights->at(0).getW().getTranspose();
//std::cout<<"w0T size ="<<fweight_T.getNumRows()<<","<<fweight_T.getNumCols()<<std::endl;
//std::cout<<"w1 size = "<<_weights[1].getW().getNumRows()<<","<<_weights[1].getW().getNumCols()<<std::endl;
fweight_T.rightMult(_weights->at(1).getW(), 1, _cost); // cost = theta'*beta
_cost.apply(NVMatrixOps::Abs(), getActs()); // cost = abs(theta'*beta)
getActs().eltwiseMult(_regTemp); // getActs = abs(theta'*beta).*M;
_costv.clear();
_costv.push_back(getActs().sum() * numCases);
//std::cout<<"weight cost = "<<_costv.back()<<std::endl;
delete &fweight_T;
}
}
}
void WeightCostLayer::bpropCommon(NVMatrix& v, PASS_TYPE passType) {
for (int i = 0; i < _weights->getSize(); i++) {
if (_weights->at(i).getLearningRateSchedule().getBaseValue() > 0) {
//if (_weights[i].getEps() > 0) {
bpropWeights(i, passType);
// Increment its number of updates
_weights->at(i).incNumUpdates();
}
}d
}
void WeightCostLayer::bpropWeights(int inpIdx, PASS_TYPE passType) {
if(_regType=="dist"){
float scaleCurGrad = (_weights->at(inpIdx).getNumUpdates() > 0 && passType != PASS_GC) * 1;
if(inpIdx==0)
_weights->at(0).getGrad().addProduct(_cost, _regTemp.getTranspose(), scaleCurGrad, -_coeff*_weights->at(0).getEps(getConvNet().getTrainingProgress())); // - partial theta = - coeff*(theta*M - beta)*M';
else if(inpIdx==1)
_weights->at(1).getGrad().add(_cost, scaleCurGrad, _coeff*_weights->at(1).getEps(getConvNet().getTrainingProgress())); // - partial beta = coeff* (theta*M - beta)
} else if (_regType=="simi"){
_cost.apply(NVMatrixOps::Sign(), getActsGrad());
getActsGrad().eltwiseMult(_regTemp);
float scaleCurGrad = (_weights->at(inpIdx).getNumUpdates() > 0 && passType != PASS_GC) * 1;
//std::cout<<"w0 size = "<<_weights[0].getW().getNumRows()<<","<<_weights[0].getW().getNumCols()<<", w1 size = "<<_weights[1].getW().getNumRows()<<","<<_weights[1].getW().getNumCols()<<", actsGrad size="<<getActsGrad().getNumRows()<<","<<getActsGrad().getNumCols()<<std::endl;
if(inpIdx==0)
_weights->at(0).getGrad().addProduct(_weights->at(1).getW(), getActsGrad().getTranspose(), scaleCurGrad, -_coeff*_weights->at(0).getEps(getConvNet().getTrainingProgress()));// - partial theta = -coeff*beta*(sign(theta'*beta).*M)'
else if(inpIdx==1)
_weights->at(1).getGrad().addProduct(_weights->at(0).getW(), getActsGrad(), scaleCurGrad, -_coeff*_weights->at(1).getEps(getConvNet().getTrainingProgress())); // - partial beta = - coeff*theta*(sign(theta'*beta).*M)
}
}
|
c2690a469d21b93e9e458029051162ca3b18e853.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Matrix multiplication based on modified NVIDIA samples code
* Copyright (C) 2014 Ren Oertel ([email protected])
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* TODO: Add commandline arguments for matrix sizes
*
* Last modifications: oere, 2014-05-21, 11:05
*/
// System includes
#include <stdio.h>
#include <assert.h>
#include <getopt.h>
typedef struct config config_t;
struct config {
int device;
int wA;
int hA;
int wB;
int hB;
};
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <int BLOCK_SIZE> __global__ void
matrixMulCUDA(float *C, float *A, float *B, int wA, int wB)
{
// FIXME
float Cvalue = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int e = 0; e < wA; ++e) {
Cvalue += A[row * wA + e] * B[e * wB + col];
}
C[row * wB + col] = Cvalue;
}
void constantInit(float *data, int size, float val)
{
for (int i = 0; i < size; ++i)
{
data[i] = val;
}
}
int matrixMultiply(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB)
{
printf("=====================================\n");
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = (float *)malloc(mem_size_A);
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = (float *)malloc(mem_size_B);
// Initialize host memory
const float valB = 0.01f;
constantInit(h_A, size_A, 1.0f);
constantInit(h_B, size_B, valB);
// Allocate device memory
float *d_A, *d_B, *d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C = (float *) malloc(mem_size_C);
if (h_C == NULL)
{
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
hipError_t error;
error = hipMalloc((void **) &d_A, mem_size_A);
if (error != hipSuccess)
{
printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMalloc((void **) &d_B, mem_size_B);
if (error != hipSuccess)
{
printf("hipMalloc d_B returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMalloc((void **) &d_C, mem_size_C);
if (error != hipSuccess)
{
printf("hipMalloc d_C returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
// copy host memory to device
error = hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMemcpy (d_A,h_A) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMemcpy (d_B,h_B) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
// Performs warmup operation using matrixMul CUDA kernel
if (block_size == 16)
{
hipLaunchKernelGGL(( matrixMulCUDA<16>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
hipLaunchKernelGGL(( matrixMulCUDA<32>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x);
}
hipDeviceSynchronize();
// Create and start timer
printf("Computing result using CUDA Kernel ... ");
fflush(NULL);
// Allocate CUDA events that we'll use for timing
hipEvent_t start;
error = hipEventCreate(&start);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
hipEvent_t stop;
error = hipEventCreate(&stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Record the start event
error = hipEventRecord(start, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Execute the kernel
int nIter = 300;
for (int j = 0; j < nIter; j++)
{
if (block_size == 16)
{
hipLaunchKernelGGL(( matrixMulCUDA<16>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
hipLaunchKernelGGL(( matrixMulCUDA<32>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x);
}
}
// Record the stop event
error = hipEventRecord(stop, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = hipEventSynchronize(stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
printf("done\n");
float msecTotal = 0.0f;
error = hipEventElapsedTime(&msecTotal, start, stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
double gF = gigaFlops * msecPerMatrixMul * 1000;
printf("> overall GFlops = %.2f\n", gF);
printf(
"Performance = %.2f GFlops/sec\nTime = %.3f msec\nSize = %.0f Ops\nWorkgroupSize = %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
error = hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost);
if (error != hipSuccess)
{
printf("hipMemcpy (h_C,d_C) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
printf("Checking computed result for correctness: ");
bool correct = true;
// test relative error by the formula
// |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps
double eps = 1.e-6 ; // machine zero
for (int i = 0; i < (int)(dimsC.x * dimsC.y); i++)
{
double abs_err = fabs(h_C[i] - (dimsA.x * valB));
double dot_length = dimsA.x;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err/abs_val/dot_length ;
if (rel_err > eps)
{
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, h_C[i], dimsA.x*valB, eps);
correct = false;
}
}
printf("%s\n", correct ? "PASS" : "FAIL");
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
hipDeviceReset();
printf("=====================================\n");
if (correct)
{
return EXIT_SUCCESS;
}
else
{
return EXIT_FAILURE;
}
}
/* Print usage information */
void usage(int argc, char *argv[])
{
printf("\nUsage: %s [OPTION]...\n"
"Available arguments:\n\n"
"\t-d/--device=<ID> Choose device (Default: 0)\n", argv[0]);
exit(0);
}
/* Parse commandline arguments */
void parse_opts(int argc, char *argv[], config_t *config)
{
int option = 0;
int option_index = 0;
const char *short_options = "d:h";
const struct option long_options[] = {
{"device", 1, NULL, 'd'},
{"help", 0, NULL, 'h'},
{0, 0, 0, 0}
};
do {
option = getopt_long(argc, argv, short_options,
long_options, &option_index);
switch (option) {
case 'd':
config->device = atoi(optarg);
printf("Device %d selected\n", config->device);
break;
case 'h': /* -h or --help */
usage(argc, argv);
break;
default:
break;
}
}
while (-1 != option);
}
int main(int argc, char **argv)
{
hipError_t error;
hipDeviceProp_t deviceProp;
dim3 dimsA(0, 0, 1);
dim3 dimsB(0, 0, 1);
// Set default configuration, i.e. device id and matrix sizes
config_t config = { .device = 0, .wA = 1024, .hA = 1024, .wB = 1024, .hB = 1024 };
// Parse commandline arguments and override default configuration
parse_opts(argc, argv, &config);
// Set device to use
error = hipSetDevice(config.device);
if (error != hipSuccess)
{
printf("Error: hipSetDevice(%d) returned error code %d, line(%d): %s\n", config.device, error, __LINE__, hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Check if device is usable
error = hipGetDeviceProperties(&deviceProp, config.device);
if (deviceProp.computeMode == hipComputeModeProhibited)
{
fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use hipSetDevice().\n");
exit(EXIT_SUCCESS);
}
if (error != hipSuccess)
{
printf("Error: hipGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
}
else
{
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n", config.device, deviceProp.name, deviceProp.major, deviceProp.minor);
}
// Use a larger block size for Fermi and above
int block_size = (deviceProp.major < 2) ? 16 : 32;
// Set matrix sizes
dimsA.x = config.wA;
dimsA.y = config.hA;
dimsB.x = config.wB;
dimsB.y = config.hB;
if (dimsA.x != dimsB.y)
{
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
dimsA.x, dimsB.y);
exit(EXIT_FAILURE);
}
printf("MatrixA(%d x %d), MatrixB(%d x %d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y);
int matrix_result = matrixMultiply(argc, argv, block_size, dimsA, dimsB);
exit(matrix_result);
}
| c2690a469d21b93e9e458029051162ca3b18e853.cu | /*
* Matrix multiplication based on modified NVIDIA samples code
* Copyright (C) 2014 René Oertel ([email protected])
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* TODO: Add commandline arguments for matrix sizes
*
* Last modifications: oere, 2014-05-21, 11:05
*/
// System includes
#include <stdio.h>
#include <assert.h>
#include <getopt.h>
typedef struct config config_t;
struct config {
int device;
int wA;
int hA;
int wB;
int hB;
};
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <int BLOCK_SIZE> __global__ void
matrixMulCUDA(float *C, float *A, float *B, int wA, int wB)
{
// FIXME
float Cvalue = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int e = 0; e < wA; ++e) {
Cvalue += A[row * wA + e] * B[e * wB + col];
}
C[row * wB + col] = Cvalue;
}
void constantInit(float *data, int size, float val)
{
for (int i = 0; i < size; ++i)
{
data[i] = val;
}
}
int matrixMultiply(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB)
{
printf("=====================================\n");
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = (float *)malloc(mem_size_A);
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = (float *)malloc(mem_size_B);
// Initialize host memory
const float valB = 0.01f;
constantInit(h_A, size_A, 1.0f);
constantInit(h_B, size_B, valB);
// Allocate device memory
float *d_A, *d_B, *d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C = (float *) malloc(mem_size_C);
if (h_C == NULL)
{
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
cudaError_t error;
error = cudaMalloc((void **) &d_A, mem_size_A);
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void **) &d_B, mem_size_B);
if (error != cudaSuccess)
{
printf("cudaMalloc d_B returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void **) &d_C, mem_size_C);
if (error != cudaSuccess)
{
printf("cudaMalloc d_C returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
// copy host memory to device
error = cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_A,h_A) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_B,h_B) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
// Performs warmup operation using matrixMul CUDA kernel
if (block_size == 16)
{
matrixMulCUDA<16><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
matrixMulCUDA<32><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
cudaDeviceSynchronize();
// Create and start timer
printf("Computing result using CUDA Kernel ... ");
fflush(NULL);
// Allocate CUDA events that we'll use for timing
cudaEvent_t start;
error = cudaEventCreate(&start);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
cudaEvent_t stop;
error = cudaEventCreate(&stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Record the start event
error = cudaEventRecord(start, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Execute the kernel
int nIter = 300;
for (int j = 0; j < nIter; j++)
{
if (block_size == 16)
{
matrixMulCUDA<16><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
matrixMulCUDA<32><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
}
// Record the stop event
error = cudaEventRecord(stop, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = cudaEventSynchronize(stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
printf("done\n");
float msecTotal = 0.0f;
error = cudaEventElapsedTime(&msecTotal, start, stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
double gF = gigaFlops * msecPerMatrixMul * 1000;
printf("> overall GFlops = %.2f\n", gF);
printf(
"Performance = %.2f GFlops/sec\nTime = %.3f msec\nSize = %.0f Ops\nWorkgroupSize = %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
error = cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost);
if (error != cudaSuccess)
{
printf("cudaMemcpy (h_C,d_C) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
printf("Checking computed result for correctness: ");
bool correct = true;
// test relative error by the formula
// |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps
double eps = 1.e-6 ; // machine zero
for (int i = 0; i < (int)(dimsC.x * dimsC.y); i++)
{
double abs_err = fabs(h_C[i] - (dimsA.x * valB));
double dot_length = dimsA.x;
double abs_val = fabs(h_C[i]);
double rel_err = abs_err/abs_val/dot_length ;
if (rel_err > eps)
{
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, h_C[i], dimsA.x*valB, eps);
correct = false;
}
}
printf("%s\n", correct ? "PASS" : "FAIL");
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
cudaDeviceReset();
printf("=====================================\n");
if (correct)
{
return EXIT_SUCCESS;
}
else
{
return EXIT_FAILURE;
}
}
/* Print usage information */
void usage(int argc, char *argv[])
{
printf("\nUsage: %s [OPTION]...\n"
"Available arguments:\n\n"
"\t-d/--device=<ID> Choose device (Default: 0)\n", argv[0]);
exit(0);
}
/* Parse commandline arguments */
void parse_opts(int argc, char *argv[], config_t *config)
{
int option = 0;
int option_index = 0;
const char *short_options = "d:h";
const struct option long_options[] = {
{"device", 1, NULL, 'd'},
{"help", 0, NULL, 'h'},
{0, 0, 0, 0}
};
do {
option = getopt_long(argc, argv, short_options,
long_options, &option_index);
switch (option) {
case 'd':
config->device = atoi(optarg);
printf("Device %d selected\n", config->device);
break;
case 'h': /* -h or --help */
usage(argc, argv);
break;
default:
break;
}
}
while (-1 != option);
}
int main(int argc, char **argv)
{
cudaError_t error;
cudaDeviceProp deviceProp;
dim3 dimsA(0, 0, 1);
dim3 dimsB(0, 0, 1);
// Set default configuration, i.e. device id and matrix sizes
config_t config = { .device = 0, .wA = 1024, .hA = 1024, .wB = 1024, .hB = 1024 };
// Parse commandline arguments and override default configuration
parse_opts(argc, argv, &config);
// Set device to use
error = cudaSetDevice(config.device);
if (error != cudaSuccess)
{
printf("Error: cudaSetDevice(%d) returned error code %d, line(%d): %s\n", config.device, error, __LINE__, cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Check if device is usable
error = cudaGetDeviceProperties(&deviceProp, config.device);
if (deviceProp.computeMode == cudaComputeModeProhibited)
{
fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use cudaSetDevice().\n");
exit(EXIT_SUCCESS);
}
if (error != cudaSuccess)
{
printf("Error: cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
}
else
{
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n", config.device, deviceProp.name, deviceProp.major, deviceProp.minor);
}
// Use a larger block size for Fermi and above
int block_size = (deviceProp.major < 2) ? 16 : 32;
// Set matrix sizes
dimsA.x = config.wA;
dimsA.y = config.hA;
dimsB.x = config.wB;
dimsB.y = config.hB;
if (dimsA.x != dimsB.y)
{
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
dimsA.x, dimsB.y);
exit(EXIT_FAILURE);
}
printf("MatrixA(%d x %d), MatrixB(%d x %d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y);
int matrix_result = matrixMultiply(argc, argv, block_size, dimsA, dimsB);
exit(matrix_result);
}
|
09c55d674b32e5e5daf852dfd11937d5df051569.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#define WSIZE 32
#define LOOPS 100000
#define UPPER_BIT 31
#define LOWER_BIT 0
__device__ unsigned int ddata[WSIZE];
// naive warp-level bitwise radix sort
__global__ void mykernel(){
__shared__ volatile unsigned int sdata[WSIZE*2];
// load from global into shared variable
sdata[threadIdx.x] = ddata[threadIdx.x];
unsigned int bitmask = 1<<LOWER_BIT;
unsigned int offset = 0;
unsigned int thrmask = 0xFFFFFFFFU << threadIdx.x;
unsigned int mypos;
// for each LSB to MSB
for (int i = LOWER_BIT; i <= UPPER_BIT; i++){
unsigned int mydata = sdata[((WSIZE-1)-threadIdx.x)+offset];
unsigned int mybit = mydata&bitmask;
// get population of ones and zeroes (cc 2.0 ballot)
unsigned int ones = __ballot(mybit); // cc 2.0
unsigned int zeroes = ~ones;
offset ^= WSIZE; // switch ping-pong buffers
// do zeroes, then ones
if (!mybit) // threads with a zero bit
// get my position in ping-pong buffer
mypos = __popc(zeroes&thrmask);
else // threads with a one bit
// get my position in ping-pong buffer
mypos = __popc(zeroes)+__popc(ones&thrmask);
// move to buffer (or use shfl for cc 3.0)
sdata[mypos-1+offset] = mydata;
// repeat for next bit
bitmask <<= 1;
}
// save results to global
ddata[threadIdx.x] = sdata[threadIdx.x+offset];
}
int main(){
unsigned int hdata[WSIZE];
for (int lcount = 0; lcount < LOOPS; lcount++){
unsigned int range = 1U<<UPPER_BIT;
for (int i = 0; i < WSIZE; i++) {
hdata[i] = rand()%range;
}
printf("Unsorted data------------------------:\n");
for (int i = 0; i < WSIZE; i++) {
printf("%d\n", hdata[i]);
}
// for (int i = 0; i < WSIZE-1; i++) if (hdata[i] > hdata[i+1]) {printf("sort error at loop %d, hdata[%d] = %d, hdata[%d] = %d\n", lcount,i, hdata[i],i+1, hdata[i+1]); return 1;}
// printf("Unsorted data:\n");
// for (int i = 0; i < WSIZE; i++) printf("%u\n", hdata[i]);
// }
hipMemcpyToSymbol(ddata, hdata, WSIZE*sizeof(unsigned int));
hipLaunchKernelGGL(( mykernel), dim3(1), dim3(WSIZE), 0, 0, );
hipMemcpyFromSymbol(hdata, ddata, WSIZE*sizeof(unsigned int));
for (int i = 0; i < WSIZE-1; i++) if (hdata[i] > hdata[i+1]) {printf("sort error at loop %d, hdata[%d] = %d, hdata[%d] = %d\n", lcount,i, hdata[i],i+1, hdata[i+1]); return 1;}
printf("sorted data:\n");
for (int i = 0; i < WSIZE; i++) printf("%u\n", hdata[i]);
}
printf("Success!\n");
return 0;
} | 09c55d674b32e5e5daf852dfd11937d5df051569.cu | #include <stdio.h>
#include <stdlib.h>
#define WSIZE 32
#define LOOPS 100000
#define UPPER_BIT 31
#define LOWER_BIT 0
__device__ unsigned int ddata[WSIZE];
// naive warp-level bitwise radix sort
__global__ void mykernel(){
__shared__ volatile unsigned int sdata[WSIZE*2];
// load from global into shared variable
sdata[threadIdx.x] = ddata[threadIdx.x];
unsigned int bitmask = 1<<LOWER_BIT;
unsigned int offset = 0;
unsigned int thrmask = 0xFFFFFFFFU << threadIdx.x;
unsigned int mypos;
// for each LSB to MSB
for (int i = LOWER_BIT; i <= UPPER_BIT; i++){
unsigned int mydata = sdata[((WSIZE-1)-threadIdx.x)+offset];
unsigned int mybit = mydata&bitmask;
// get population of ones and zeroes (cc 2.0 ballot)
unsigned int ones = __ballot(mybit); // cc 2.0
unsigned int zeroes = ~ones;
offset ^= WSIZE; // switch ping-pong buffers
// do zeroes, then ones
if (!mybit) // threads with a zero bit
// get my position in ping-pong buffer
mypos = __popc(zeroes&thrmask);
else // threads with a one bit
// get my position in ping-pong buffer
mypos = __popc(zeroes)+__popc(ones&thrmask);
// move to buffer (or use shfl for cc 3.0)
sdata[mypos-1+offset] = mydata;
// repeat for next bit
bitmask <<= 1;
}
// save results to global
ddata[threadIdx.x] = sdata[threadIdx.x+offset];
}
int main(){
unsigned int hdata[WSIZE];
for (int lcount = 0; lcount < LOOPS; lcount++){
unsigned int range = 1U<<UPPER_BIT;
for (int i = 0; i < WSIZE; i++) {
hdata[i] = rand()%range;
}
printf("Unsorted data------------------------:\n");
for (int i = 0; i < WSIZE; i++) {
printf("%d\n", hdata[i]);
}
// for (int i = 0; i < WSIZE-1; i++) if (hdata[i] > hdata[i+1]) {printf("sort error at loop %d, hdata[%d] = %d, hdata[%d] = %d\n", lcount,i, hdata[i],i+1, hdata[i+1]); return 1;}
// printf("Unsorted data:\n");
// for (int i = 0; i < WSIZE; i++) printf("%u\n", hdata[i]);
// }
cudaMemcpyToSymbol(ddata, hdata, WSIZE*sizeof(unsigned int));
mykernel<<<1, WSIZE>>>();
cudaMemcpyFromSymbol(hdata, ddata, WSIZE*sizeof(unsigned int));
for (int i = 0; i < WSIZE-1; i++) if (hdata[i] > hdata[i+1]) {printf("sort error at loop %d, hdata[%d] = %d, hdata[%d] = %d\n", lcount,i, hdata[i],i+1, hdata[i+1]); return 1;}
printf("sorted data:\n");
for (int i = 0; i < WSIZE; i++) printf("%u\n", hdata[i]);
}
printf("Success!\n");
return 0;
} |
3138d5c628e860e88cac5f0ec2d7a2bceda61aef.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void orcu_kernel53927(const int sites_on_node, double* A, double* y, double* x) {
const int tid=blockIdx.x*blockDim.x+threadIdx.x;
const int gsize=gridDim.x*blockDim.x;
double ci, ai, bi, ar, br, cr;
int j, k;
for (int i=tid; i<=sites_on_node-1; i+=gsize) {
{
#pragma unroll 2
for (j=0; j<=5; j=j+2) {
cr=ci=0.0;
for (k=0; k<=5; k=k+2) {
ar=A[18*i+3*j+k];
ai=A[18*i+3*j+k+1];
br=x[6*i+k];
bi=x[6*i+k+1];
cr=cr+ar*br-ai*bi;
ci=ci+ar*bi+ai*br;
}
y[6*i+j]=cr;
y[6*i+j+1]=ci;
}
}
}
}
/**************** m_matvec.c (in su3.a) *******************************
* *
* matrix vector multiply *
* y[i] <- A[i]*x[i] *
*/
void mult_su3_mat_vec(double A[], double x[], double y[]) {
const int sites_on_node = 10; // or some other global constant value
register int i,j,k;
register double ar,ai,br,bi,cr,ci;
/*@ begin PerfTuning (
def performance_params {
param TC[] = range(32,1025,32);
param BC[] = range(14,113,14);
param UIF[] = range(1,6);
param PL[] = [16,48];
param CFLAGS[] = map(join, product(['-O0', '-O1', '-O2', '-O3']));
}
def input_params {
param SITES[] = [2,4,6,8,10,12,14,16];
}
def input_vars {
decl dynamic double A[18*SITES] = random;
decl dynamic double x[6*SITES] = random;
decl dynamic double y[6*SITES] = 0;
}
def build {
arg build_command = 'nvcc -arch=sm_20 @CFLAGS';
}
def performance_counter {
arg method = 'basic timer';
arg repetitions = 6;
}
def search {
arg algorithm = 'Exhaustive';
arg resume = True;
arg exhaustive_start_coord = [25, 4, 1, 1, 1]; }
) @*/
/**-- (Generated by Orio)
Best performance cost:
[0.043136000000000001, 0.020288, 0.020223999999999999, 0.019904000000000002, 0.018272, 0.019392]
Tuned for specific problem sizes:
SITES = 6
Best performance parameters:
BC = 14
CFLAGS = -O2
PL = 48
TC = 256
UIF = 2
--**/
int sites_on_node=SITES;
/*@ begin Loop(transform CUDA(threadCount=TC, blockCount=BC, preferL1Size=PL, unrollInner=UIF)
for(i=0; i<=sites_on_node-1; i++) {
for(j=0; j<=5; j+=2) {
cr = ci = 0.0;
for(k=0; k<=5; k+=2) {
ar=A[18*i+3*j+k];
ai=A[18*i+3*j+k+1];
br=x[6*i+k];
bi=x[6*i+k+1];
cr += ar*br - ai*bi;
ci += ar*bi + ai*br;
}
y[6*i+j] =cr;
y[6*i+j+1]=ci;
}
}
) @*/
{
hipDeviceSynchronize();
/*declare variables*/
double *dev_A, *dev_y, *dev_x;
int nthreads=256;
/*calculate device dimensions*/
dim3 dimGrid, dimBlock;
dimBlock.x=nthreads;
dimGrid.x=14;
/*allocate device memory*/
hipMalloc(&dev_A,18 *SITES*sizeof(double));
hipMalloc(&dev_x,6 *SITES*sizeof(double));
hipMalloc(&dev_y,6 *SITES*sizeof(double));
hipDeviceSetCacheConfig(hipFuncCachePreferL1);
/*copy data from host to device*/
hipEventRecord(tstart,0);
hipMemcpy(dev_A,A,18 *SITES*sizeof(double),hipMemcpyHostToDevice);
hipMemcpy(dev_x,x,6 *SITES*sizeof(double),hipMemcpyHostToDevice);
hipEventRecord(tstop,0);
hipEventSynchronize(tstop);
hipEventElapsedTime(&orcu_transfer,tstart,tstop);
hipEventRecord(start,0);
/*invoke device kernel*/
hipLaunchKernelGGL(( orcu_kernel53927), dim3(dimGrid),dim3(dimBlock), 0, 0, sites_on_node,dev_A,dev_y,dev_x);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&orcu_elapsed,start,stop);
/*copy data from device to host*/
hipMemcpy(y,dev_y,6 *SITES*sizeof(double),hipMemcpyDeviceToHost);
hipDeviceSetCacheConfig(hipFuncCachePreferNone);
/*free allocated memory*/
hipFree(dev_A);
hipFree(dev_y);
hipFree(dev_x);
hipError_t err=hipGetLastError();
if (hipSuccess!=err)
printf("CUDA runtime error: %s@",hipGetErrorString(err));
}
/*@ end @*/
/*@ end @*/
}
| 3138d5c628e860e88cac5f0ec2d7a2bceda61aef.cu | __global__ void orcu_kernel53927(const int sites_on_node, double* A, double* y, double* x) {
const int tid=blockIdx.x*blockDim.x+threadIdx.x;
const int gsize=gridDim.x*blockDim.x;
double ci, ai, bi, ar, br, cr;
int j, k;
for (int i=tid; i<=sites_on_node-1; i+=gsize) {
{
#pragma unroll 2
for (j=0; j<=5; j=j+2) {
cr=ci=0.0;
for (k=0; k<=5; k=k+2) {
ar=A[18*i+3*j+k];
ai=A[18*i+3*j+k+1];
br=x[6*i+k];
bi=x[6*i+k+1];
cr=cr+ar*br-ai*bi;
ci=ci+ar*bi+ai*br;
}
y[6*i+j]=cr;
y[6*i+j+1]=ci;
}
}
}
}
/**************** m_matvec.c (in su3.a) *******************************
* *
* matrix vector multiply *
* y[i] <- A[i]*x[i] *
*/
void mult_su3_mat_vec(double A[], double x[], double y[]) {
const int sites_on_node = 10; // or some other global constant value
register int i,j,k;
register double ar,ai,br,bi,cr,ci;
/*@ begin PerfTuning (
def performance_params {
param TC[] = range(32,1025,32);
param BC[] = range(14,113,14);
param UIF[] = range(1,6);
param PL[] = [16,48];
param CFLAGS[] = map(join, product(['-O0', '-O1', '-O2', '-O3']));
}
def input_params {
param SITES[] = [2,4,6,8,10,12,14,16];
}
def input_vars {
decl dynamic double A[18*SITES] = random;
decl dynamic double x[6*SITES] = random;
decl dynamic double y[6*SITES] = 0;
}
def build {
arg build_command = 'nvcc -arch=sm_20 @CFLAGS';
}
def performance_counter {
arg method = 'basic timer';
arg repetitions = 6;
}
def search {
arg algorithm = 'Exhaustive';
arg resume = True;
arg exhaustive_start_coord = [25, 4, 1, 1, 1]; }
) @*/
/**-- (Generated by Orio)
Best performance cost:
[0.043136000000000001, 0.020288, 0.020223999999999999, 0.019904000000000002, 0.018272, 0.019392]
Tuned for specific problem sizes:
SITES = 6
Best performance parameters:
BC = 14
CFLAGS = -O2
PL = 48
TC = 256
UIF = 2
--**/
int sites_on_node=SITES;
/*@ begin Loop(transform CUDA(threadCount=TC, blockCount=BC, preferL1Size=PL, unrollInner=UIF)
for(i=0; i<=sites_on_node-1; i++) {
for(j=0; j<=5; j+=2) {
cr = ci = 0.0;
for(k=0; k<=5; k+=2) {
ar=A[18*i+3*j+k];
ai=A[18*i+3*j+k+1];
br=x[6*i+k];
bi=x[6*i+k+1];
cr += ar*br - ai*bi;
ci += ar*bi + ai*br;
}
y[6*i+j] =cr;
y[6*i+j+1]=ci;
}
}
) @*/
{
cudaDeviceSynchronize();
/*declare variables*/
double *dev_A, *dev_y, *dev_x;
int nthreads=256;
/*calculate device dimensions*/
dim3 dimGrid, dimBlock;
dimBlock.x=nthreads;
dimGrid.x=14;
/*allocate device memory*/
cudaMalloc(&dev_A,18 *SITES*sizeof(double));
cudaMalloc(&dev_x,6 *SITES*sizeof(double));
cudaMalloc(&dev_y,6 *SITES*sizeof(double));
cudaDeviceSetCacheConfig(cudaFuncCachePreferL1);
/*copy data from host to device*/
cudaEventRecord(tstart,0);
cudaMemcpy(dev_A,A,18 *SITES*sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy(dev_x,x,6 *SITES*sizeof(double),cudaMemcpyHostToDevice);
cudaEventRecord(tstop,0);
cudaEventSynchronize(tstop);
cudaEventElapsedTime(&orcu_transfer,tstart,tstop);
cudaEventRecord(start,0);
/*invoke device kernel*/
orcu_kernel53927<<<dimGrid,dimBlock>>>(sites_on_node,dev_A,dev_y,dev_x);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&orcu_elapsed,start,stop);
/*copy data from device to host*/
cudaMemcpy(y,dev_y,6 *SITES*sizeof(double),cudaMemcpyDeviceToHost);
cudaDeviceSetCacheConfig(cudaFuncCachePreferNone);
/*free allocated memory*/
cudaFree(dev_A);
cudaFree(dev_y);
cudaFree(dev_x);
cudaError_t err=cudaGetLastError();
if (cudaSuccess!=err)
printf("CUDA runtime error: %s@",cudaGetErrorString(err));
}
/*@ end @*/
/*@ end @*/
}
|
579545f22c93e3b8c00f3ac90e1a9f46a2a764d0.hip | // !!! This is a file automatically generated by hipify!!!
extern "C" {
#include "rdma.h"
#include "timing.h"
#include "fifo.h"
}
#include <fcntl.h>
#include <unistd.h>
#include <rocblas.h>
#include <hip/hip_runtime.h>
#define M 2048UL
#define N M
#define K N
#define P 16UL
#define LDA1 K
#define LDB1 M
#define LDB2 LDB1*N
#define LDC1 N
#define LDC2 LDC1*M
#define SUB_M 512UL
#define THREAD_NUM 8UL
#define HUGEPAGE_SZ (4UL * 1024UL * 1024UL * 1024UL)
#define AGGREGATED_SZ (SUB_M * SUB_M * SUB_M * 8UL)
// #define IO_QUEUE_SZ (HUGEPAGE_SZ / AGGREGATED_SZ / 2UL)
#define IO_QUEUE_SZ 2UL
void print_config(struct config_t config);
struct fetch_conf {
struct resources *res;
uint64_t m, sub_m;
double *d_B;
char *hugepage_addr;
struct fifo *sending_queue;
struct fifo *complete_queue;
struct timing_info *fetch_timing;
struct timing_info *copy_in_B_timing;
};
struct request_conf {
struct resources *res;
uint64_t id;
uint64_t sub_m;
};
struct fifo_entry {
double *d_B;
};
__global__ void block_tc_kernel(const double *A, const double *B, double *C) {
uint64_t m, n, k, p;
m = blockDim.x * blockIdx.x + threadIdx.x;
n = blockDim.y * blockIdx.y + threadIdx.y;
p = blockDim.z * blockIdx.z + threadIdx.z;
if (m < SUB_M && n < SUB_M && p < P) {
for (k = 0; k < SUB_M; k++) {
C[m + n * SUB_M + p * SUB_M*SUB_M] += B[k + m * SUB_M + n * SUB_M*SUB_M] * A[k + p * K];
}
}
}
int cudaMemcpyFromMmap(struct fetch_conf *conf, char *dst, const char *src, const size_t length, struct timing_info *fetch_timing) {
struct response *res = NULL;
timing_info_push_start(fetch_timing);
res = sock_read_offset(conf->res->sock);
if (res == NULL) {
fprintf(stderr, "sync error before RDMA ops\n");
return 1;
}
// if (res->id == 0) {
// printf("fetching row [%lu:%lu]\n", res->x, res->y);
// } else {
// printf("fetching col [%lu:%lu]\n", res->x, res->y);
// }
// printf("offset: %lu\n", res->offset);
timing_info_push_end(fetch_timing);
timing_info_push_start(conf->copy_in_B_timing);
hipMemcpy(dst, src + res->offset, length, hipMemcpyHostToDevice);
timing_info_push_end(conf->copy_in_B_timing);
free(res);
if (sock_write_data(conf->res->sock)) { /* just send a dummy char back and forth */
fprintf(stderr, "sync error before RDMA ops\n");
return 1;
}
return 0;
}
void *fetch_thread(void *args) {
struct fetch_conf *conf = (struct fetch_conf *) args;
uint64_t n, m, k;
uint64_t dsize = SUB_M * SUB_M * SUB_M;
double *ptr_a;
struct fifo_entry *entry = NULL;
uint64_t count = 0;
for (n = 0; n < N / SUB_M; n++) {
for (m = 0; m < M / SUB_M; m++) {
for (k = 0; k < K / SUB_M; k++) {
entry = (struct fifo_entry *) fifo_pop(conf->complete_queue);
ptr_a = conf->d_B + dsize * (count % IO_QUEUE_SZ);
cudaMemcpyFromMmap(conf, (char *) ptr_a, (char *) conf->hugepage_addr, dsize * sizeof(double), conf->fetch_timing);
count++;
entry->d_B = ptr_a;
fifo_push(conf->sending_queue, entry);
}
}
}
return NULL;
}
void *request_thread(void *args) {
struct request_conf *conf = (struct request_conf *) args;
uint64_t n, m, k;
for (n = 0; n < N / SUB_M; n++) {
for (m = 0; m < M / SUB_M; m++) {
for (k = 0; k < K / SUB_M; k++) {
sock_write_request(conf->res->req_sock, conf->id, n, m, SUB_M, 4, k);
sock_read_data(conf->res->req_sock);
}
}
}
return NULL;
}
void generate_data(double *array, uint64_t size) {
uint64_t i;
for (i = 0; i < size; i++) {
array[i] = (((double) rand())/RAND_MAX - 0.5)*100;
}
}
int nds_tc(struct resources *res, uint64_t id, uint64_t size, uint64_t sub_size, const double *A, double *C) {
double *d_A;
double *d_B;
double *d_C;
double *sub_B, *sub_C;
size_t i, n, m, k, p, nn, mm, a, b;
size_t total_iteration;
struct fifo *sending_queue;
struct fifo *complete_queue;
struct fifo_entry *entries = (struct fifo_entry *) calloc(IO_QUEUE_SZ, sizeof(struct fifo_entry));
struct fifo_entry *entry = NULL;
struct timing_info *queue_timing;
struct timing_info *fetch_timing;
struct timing_info *copy_in_B_timing;
struct timing_info *copy_in_C_timing;
struct timing_info *kernel_timing;
struct timing_info *copy_out_timing;
pthread_t f_thread_id;
struct fetch_conf f_conf;
pthread_t r_thread_id;
struct request_conf r_conf;
struct timeval h_start, h_end;
long duration;
// initialization
total_iteration = (M / SUB_M) * (M / SUB_M) * (M / SUB_M);
queue_timing = timing_info_new(total_iteration);
if (queue_timing == NULL) {
printf("cannot create queue_timing\n");
return -1;
}
fetch_timing = timing_info_new(total_iteration);
if (fetch_timing == NULL) {
printf("cannot create fetch_timing\n");
return -1;
}
copy_in_B_timing = timing_info_new(total_iteration);
if (copy_in_B_timing == NULL) {
printf("cannot create copy_in_B_timing\n");
return -1;
}
copy_in_C_timing = timing_info_new(total_iteration);
if (copy_in_C_timing == NULL) {
printf("cannot create copy_in_C_timing\n");
return -1;
}
kernel_timing = timing_info_new(total_iteration);
if (kernel_timing == NULL) {
printf("cannot create kernel_timing\n");
return -1;
}
copy_out_timing = timing_info_new(total_iteration);
if (copy_out_timing == NULL) {
printf("cannot create copy_out_timing\n");
return -1;
}
// it causes problem if size == 1
sending_queue = fifo_new(IO_QUEUE_SZ * 2);
if (sending_queue == NULL) {
printf("cannot create sending_queue\n");
return -1;
}
complete_queue = fifo_new(IO_QUEUE_SZ * 2);
if (complete_queue == NULL) {
printf("cannot create complete_queue\n");
return -1;
}
for (i = 0; i < IO_QUEUE_SZ; i++) {
fifo_push(complete_queue, entries + i);
}
sub_B = (double *) malloc(SUB_M * SUB_M * SUB_M * sizeof(double));
sub_C = (double *) malloc(SUB_M * SUB_M * P * sizeof(double));
hipMalloc((void **) &d_A, K * P * sizeof(double));
hipMalloc((void **) &d_B, SUB_M * SUB_M * SUB_M * sizeof(double) * IO_QUEUE_SZ);
hipMalloc((void **) &d_C, SUB_M * SUB_M * P * sizeof(double));
hipMemcpy(d_A, A, K * P * sizeof(double), hipMemcpyHostToDevice);
// M * N has to be < 1024
dim3 grid((SUB_M+THREAD_NUM-1)/THREAD_NUM, (SUB_M+THREAD_NUM-1)/THREAD_NUM, (P+THREAD_NUM-1)/THREAD_NUM);
dim3 block(THREAD_NUM, THREAD_NUM, THREAD_NUM);
r_conf.res = res;
r_conf.id = id;
r_conf.sub_m = SUB_M;
pthread_create(&r_thread_id, NULL, request_thread, &r_conf);
// create thread here
f_conf.res = res;
f_conf.m = size;
f_conf.sub_m = sub_size;
f_conf.d_B = d_B;
f_conf.hugepage_addr = res->buf;
f_conf.sending_queue = sending_queue;
f_conf.complete_queue = complete_queue;
f_conf.fetch_timing = fetch_timing;
f_conf.copy_in_B_timing = copy_in_B_timing;
timing_info_set_starting_time(queue_timing);
timing_info_set_starting_time(fetch_timing);
timing_info_set_starting_time(copy_in_B_timing);
timing_info_set_starting_time(kernel_timing);
timing_info_set_starting_time(copy_out_timing);
pthread_create(&f_thread_id, NULL, fetch_thread, &f_conf);
gettimeofday(&h_start, NULL);
// blockGEMM
for (n = 0; n < N; n+=SUB_M) {
for (m = 0; m < M; m+=SUB_M) {
timing_info_push_start(copy_in_C_timing);
hipMemset(d_C, 0, SUB_M * SUB_M * sizeof(double));
// for (nn = n, a = 0; nn < n+SUB_M; nn++, a++) {
// for (mm = m, b = 0; mm < m+SUB_M; mm++, b++) {
// sub_C[b + a * SUB_M] = C[mm + nn * LDC1];
// }
// hipMemcpy(d_C, sub_C, SUB_M * SUB_M * sizeof(double), hipMemcpyHostToDevice);
// }
timing_info_push_end(copy_in_C_timing);
for (k = 0; k < K; k+=SUB_M) {
timing_info_push_start(queue_timing);
entry = (struct fifo_entry *) fifo_pop(sending_queue);
timing_info_push_end(queue_timing);
timing_info_push_start(kernel_timing);
hipLaunchKernelGGL(( block_tc_kernel), dim3(grid), dim3(block), 0, 0, d_A + k, entry->d_B, d_C);
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("CUDA error, error code: %d, error name: %s\n", err, hipGetErrorString(err));
}
fifo_push(complete_queue, entry);
timing_info_push_end(kernel_timing);
}
// assign C
timing_info_push_start(copy_out_timing);
// use hipMemcpy2D but not the bottleneck.
hipMemcpy(sub_C, d_C, SUB_M * SUB_M * P * sizeof(double), hipMemcpyDeviceToHost);
// hipDeviceSynchronize();
for (p = 0; p < P; p++) {
for (nn = n, a = 0; nn < n+SUB_M; nn++, a++) {
for (mm = m, b = 0; mm < m+SUB_M; mm++, b++) {
C[mm + nn * LDC1 + p * LDC2] = sub_C[b + a * SUB_M + p * SUB_M*SUB_M];
}
}
}
timing_info_push_end(copy_out_timing);
}
}
pthread_join(r_thread_id, NULL);
pthread_join(f_thread_id, NULL);
sock_write_request(res->req_sock, -1, n, m, k, 1, 0);
sock_read_data(res->req_sock);
gettimeofday(&h_end, NULL);
duration = ((h_end.tv_sec - h_start.tv_sec) * 1000000) + (h_end.tv_usec - h_start.tv_usec);
printf("End-to-end duration: %f ms\n", (float) duration / 1000);
printf("Row fetch time: %f ms\n", (float) timing_info_duration(fetch_timing) / 1000);
printf("Copy in B time: %f ms\n", (float) timing_info_duration(copy_in_B_timing) / 1000);
printf("Copy in C time: %f ms\n", (float) timing_info_duration(copy_in_C_timing) / 1000);
printf("sending_queue waiting time: %f ms\n", (float) timing_info_duration(queue_timing) / 1000);
printf("Kernel time: %f ms\n", (float) timing_info_duration(kernel_timing) / 1000);
printf("copy out time: %f ms\n", (float) timing_info_duration(copy_out_timing) / 1000);
struct timestamps *tss = NULL;
FILE *fptr;
tss = timing_info_get_timestamps(fetch_timing);
fptr = fopen("fetch_ts.bin", "wb");
fwrite(&tss->count, sizeof(uint64_t), 1, fptr);
fwrite(tss->timestamps, sizeof(uint64_t), tss->count * 2, fptr);
fclose(fptr);
timing_info_free_timestamps(tss);
timing_info_free(fetch_timing);
tss = timing_info_get_timestamps(copy_in_B_timing);
fptr = fopen("copy_in_B_ts.bin", "wb");
fwrite(&tss->count, sizeof(uint64_t), 1, fptr);
fwrite(tss->timestamps, sizeof(uint64_t), tss->count * 2, fptr);
fclose(fptr);
timing_info_free_timestamps(tss);
timing_info_free(copy_in_B_timing);
tss = timing_info_get_timestamps(copy_in_C_timing);
fptr = fopen("copy_in_C_ts.bin", "wb");
fwrite(&tss->count, sizeof(uint64_t), 1, fptr);
fwrite(tss->timestamps, sizeof(uint64_t), tss->count * 2, fptr);
fclose(fptr);
timing_info_free_timestamps(tss);
timing_info_free(copy_in_C_timing);
tss = timing_info_get_timestamps(queue_timing);
fptr = fopen("queue_ts.bin", "wb");
fwrite(&tss->count, sizeof(uint64_t), 1, fptr);
fwrite(tss->timestamps, sizeof(uint64_t), tss->count * 2, fptr);
fclose(fptr);
timing_info_free_timestamps(tss);
timing_info_free(queue_timing);
tss = timing_info_get_timestamps(kernel_timing);
fptr = fopen("ttv_ts.bin", "wb");
fwrite(&tss->count, sizeof(uint64_t), 1, fptr);
fwrite(tss->timestamps, sizeof(uint64_t), tss->count * 2, fptr);
fclose(fptr);
timing_info_free_timestamps(tss);
timing_info_free(kernel_timing);
tss = timing_info_get_timestamps(copy_out_timing);
fptr = fopen("copy_out_ts.bin", "wb");
fwrite(&tss->count, sizeof(uint64_t), 1, fptr);
fwrite(tss->timestamps, sizeof(uint64_t), tss->count * 2, fptr);
fclose(fptr);
timing_info_free_timestamps(tss);
timing_info_free(copy_out_timing);
// write the answer here.
fptr = fopen("answer.bin", "wb");
fwrite(C, sizeof(double), M * N, fptr);
fclose(fptr);
for (i = 0; i < 4; i++) {
printf("%f ", C[i]);
}
printf("\n");
free(sub_B);
free(sub_C);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
fifo_free(sending_queue);
fifo_free(complete_queue);
free(entries);
return 0;
}
/******************************************************************************
* Function: print_config
*
* Input
* none
*
* Output
* none
*
* Returns
* none
*
* Description
* Print out config information
******************************************************************************/
void print_config(struct config_t config) {
fprintf(stdout, " ------------------------------------------------\n");
fprintf(stdout, " Device name : \"%s\"\n", config.dev_name);
fprintf(stdout, " IB port : %u\n", config.ib_port);
if (config.server_name)
fprintf(stdout, " IP : %s\n", config.server_name);
fprintf(stdout, " TCP port : %u\n", config.tcp_port);
if (config.gid_idx >= 0)
fprintf(stdout, " GID index : %u\n", config.gid_idx);
fprintf(stdout, " ------------------------------------------------\n\n");
}
int main(int argc, char *argv[]) {
int rc = 0;
uint64_t matrix_id, n, sub_n;
double *A, *C;
int hugepage_fd;
char *hugepage_addr;
// RDMA
struct resources res;
struct config_t config = {
"mlx4_0", /* dev_name */
"127.0.0.1", /* server_name */
19875, /* tcp_port */
1, /* ib_port */
0 /* gid_idx */
};
// default the iteration is 4 times
if (argc < 5) {
printf("usage: %s <matrix_id> <# of vertices> <# of subvertices> <port>\n", argv[0]);
exit(1);
}
matrix_id = (uint64_t) atoll(argv[1]);
n = (uint64_t) atoll(argv[2]);
sub_n = (uint64_t) atoll(argv[3]);
config.tcp_port = atoi(argv[4]);
/* print the used parameters for info*/
print_config(config);
printf("mapping hugepage\n");
hugepage_fd = open("/dev/hugepages/tensorstore", O_RDWR, 0755);
if (hugepage_fd < 0) {
perror("open");
exit(1);
}
hugepage_addr = (char *) mmap(0, BUF_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, hugepage_fd, 0);
if (hugepage_addr==MAP_FAILED) {
perror("mmap");
exit(1);
}
res.buf = hugepage_addr;
memset(hugepage_addr, 0, BUF_SIZE);
printf("hugepage starting address is: %p\n", hugepage_addr);
printf("socket connection\n");
rc = make_two_tcp_connection(&res, &config);
if (rc < 0) {
perror("sock connect");
exit(1);
}
// generate data
A = (double *) malloc(K * P * sizeof(double));
C = (double *) malloc(M * N * P * sizeof(double));
srand(5);
generate_data(A, K * P);
memset(C, 0, M * N * P * sizeof(double));
printf("calculating the result of pagerank\n");
rc = nds_tc(&res, matrix_id, n, sub_n, A, C);
close(res.sock);
close(res.req_sock);
munmap(hugepage_addr, BUF_SIZE);
close(hugepage_fd);
free(A);
free(C);
return rc;
}
| 579545f22c93e3b8c00f3ac90e1a9f46a2a764d0.cu | extern "C" {
#include "rdma.h"
#include "timing.h"
#include "fifo.h"
}
#include <fcntl.h>
#include <unistd.h>
#include <cublas_v2.h>
#include <cuda_runtime.h>
#define M 2048UL
#define N M
#define K N
#define P 16UL
#define LDA1 K
#define LDB1 M
#define LDB2 LDB1*N
#define LDC1 N
#define LDC2 LDC1*M
#define SUB_M 512UL
#define THREAD_NUM 8UL
#define HUGEPAGE_SZ (4UL * 1024UL * 1024UL * 1024UL)
#define AGGREGATED_SZ (SUB_M * SUB_M * SUB_M * 8UL)
// #define IO_QUEUE_SZ (HUGEPAGE_SZ / AGGREGATED_SZ / 2UL)
#define IO_QUEUE_SZ 2UL
void print_config(struct config_t config);
struct fetch_conf {
struct resources *res;
uint64_t m, sub_m;
double *d_B;
char *hugepage_addr;
struct fifo *sending_queue;
struct fifo *complete_queue;
struct timing_info *fetch_timing;
struct timing_info *copy_in_B_timing;
};
struct request_conf {
struct resources *res;
uint64_t id;
uint64_t sub_m;
};
struct fifo_entry {
double *d_B;
};
__global__ void block_tc_kernel(const double *A, const double *B, double *C) {
uint64_t m, n, k, p;
m = blockDim.x * blockIdx.x + threadIdx.x;
n = blockDim.y * blockIdx.y + threadIdx.y;
p = blockDim.z * blockIdx.z + threadIdx.z;
if (m < SUB_M && n < SUB_M && p < P) {
for (k = 0; k < SUB_M; k++) {
C[m + n * SUB_M + p * SUB_M*SUB_M] += B[k + m * SUB_M + n * SUB_M*SUB_M] * A[k + p * K];
}
}
}
int cudaMemcpyFromMmap(struct fetch_conf *conf, char *dst, const char *src, const size_t length, struct timing_info *fetch_timing) {
struct response *res = NULL;
timing_info_push_start(fetch_timing);
res = sock_read_offset(conf->res->sock);
if (res == NULL) {
fprintf(stderr, "sync error before RDMA ops\n");
return 1;
}
// if (res->id == 0) {
// printf("fetching row [%lu:%lu]\n", res->x, res->y);
// } else {
// printf("fetching col [%lu:%lu]\n", res->x, res->y);
// }
// printf("offset: %lu\n", res->offset);
timing_info_push_end(fetch_timing);
timing_info_push_start(conf->copy_in_B_timing);
cudaMemcpy(dst, src + res->offset, length, cudaMemcpyHostToDevice);
timing_info_push_end(conf->copy_in_B_timing);
free(res);
if (sock_write_data(conf->res->sock)) { /* just send a dummy char back and forth */
fprintf(stderr, "sync error before RDMA ops\n");
return 1;
}
return 0;
}
void *fetch_thread(void *args) {
struct fetch_conf *conf = (struct fetch_conf *) args;
uint64_t n, m, k;
uint64_t dsize = SUB_M * SUB_M * SUB_M;
double *ptr_a;
struct fifo_entry *entry = NULL;
uint64_t count = 0;
for (n = 0; n < N / SUB_M; n++) {
for (m = 0; m < M / SUB_M; m++) {
for (k = 0; k < K / SUB_M; k++) {
entry = (struct fifo_entry *) fifo_pop(conf->complete_queue);
ptr_a = conf->d_B + dsize * (count % IO_QUEUE_SZ);
cudaMemcpyFromMmap(conf, (char *) ptr_a, (char *) conf->hugepage_addr, dsize * sizeof(double), conf->fetch_timing);
count++;
entry->d_B = ptr_a;
fifo_push(conf->sending_queue, entry);
}
}
}
return NULL;
}
void *request_thread(void *args) {
struct request_conf *conf = (struct request_conf *) args;
uint64_t n, m, k;
for (n = 0; n < N / SUB_M; n++) {
for (m = 0; m < M / SUB_M; m++) {
for (k = 0; k < K / SUB_M; k++) {
sock_write_request(conf->res->req_sock, conf->id, n, m, SUB_M, 4, k);
sock_read_data(conf->res->req_sock);
}
}
}
return NULL;
}
void generate_data(double *array, uint64_t size) {
uint64_t i;
for (i = 0; i < size; i++) {
array[i] = (((double) rand())/RAND_MAX - 0.5)*100;
}
}
int nds_tc(struct resources *res, uint64_t id, uint64_t size, uint64_t sub_size, const double *A, double *C) {
double *d_A;
double *d_B;
double *d_C;
double *sub_B, *sub_C;
size_t i, n, m, k, p, nn, mm, a, b;
size_t total_iteration;
struct fifo *sending_queue;
struct fifo *complete_queue;
struct fifo_entry *entries = (struct fifo_entry *) calloc(IO_QUEUE_SZ, sizeof(struct fifo_entry));
struct fifo_entry *entry = NULL;
struct timing_info *queue_timing;
struct timing_info *fetch_timing;
struct timing_info *copy_in_B_timing;
struct timing_info *copy_in_C_timing;
struct timing_info *kernel_timing;
struct timing_info *copy_out_timing;
pthread_t f_thread_id;
struct fetch_conf f_conf;
pthread_t r_thread_id;
struct request_conf r_conf;
struct timeval h_start, h_end;
long duration;
// initialization
total_iteration = (M / SUB_M) * (M / SUB_M) * (M / SUB_M);
queue_timing = timing_info_new(total_iteration);
if (queue_timing == NULL) {
printf("cannot create queue_timing\n");
return -1;
}
fetch_timing = timing_info_new(total_iteration);
if (fetch_timing == NULL) {
printf("cannot create fetch_timing\n");
return -1;
}
copy_in_B_timing = timing_info_new(total_iteration);
if (copy_in_B_timing == NULL) {
printf("cannot create copy_in_B_timing\n");
return -1;
}
copy_in_C_timing = timing_info_new(total_iteration);
if (copy_in_C_timing == NULL) {
printf("cannot create copy_in_C_timing\n");
return -1;
}
kernel_timing = timing_info_new(total_iteration);
if (kernel_timing == NULL) {
printf("cannot create kernel_timing\n");
return -1;
}
copy_out_timing = timing_info_new(total_iteration);
if (copy_out_timing == NULL) {
printf("cannot create copy_out_timing\n");
return -1;
}
// it causes problem if size == 1
sending_queue = fifo_new(IO_QUEUE_SZ * 2);
if (sending_queue == NULL) {
printf("cannot create sending_queue\n");
return -1;
}
complete_queue = fifo_new(IO_QUEUE_SZ * 2);
if (complete_queue == NULL) {
printf("cannot create complete_queue\n");
return -1;
}
for (i = 0; i < IO_QUEUE_SZ; i++) {
fifo_push(complete_queue, entries + i);
}
sub_B = (double *) malloc(SUB_M * SUB_M * SUB_M * sizeof(double));
sub_C = (double *) malloc(SUB_M * SUB_M * P * sizeof(double));
cudaMalloc((void **) &d_A, K * P * sizeof(double));
cudaMalloc((void **) &d_B, SUB_M * SUB_M * SUB_M * sizeof(double) * IO_QUEUE_SZ);
cudaMalloc((void **) &d_C, SUB_M * SUB_M * P * sizeof(double));
cudaMemcpy(d_A, A, K * P * sizeof(double), cudaMemcpyHostToDevice);
// M * N has to be < 1024
dim3 grid((SUB_M+THREAD_NUM-1)/THREAD_NUM, (SUB_M+THREAD_NUM-1)/THREAD_NUM, (P+THREAD_NUM-1)/THREAD_NUM);
dim3 block(THREAD_NUM, THREAD_NUM, THREAD_NUM);
r_conf.res = res;
r_conf.id = id;
r_conf.sub_m = SUB_M;
pthread_create(&r_thread_id, NULL, request_thread, &r_conf);
// create thread here
f_conf.res = res;
f_conf.m = size;
f_conf.sub_m = sub_size;
f_conf.d_B = d_B;
f_conf.hugepage_addr = res->buf;
f_conf.sending_queue = sending_queue;
f_conf.complete_queue = complete_queue;
f_conf.fetch_timing = fetch_timing;
f_conf.copy_in_B_timing = copy_in_B_timing;
timing_info_set_starting_time(queue_timing);
timing_info_set_starting_time(fetch_timing);
timing_info_set_starting_time(copy_in_B_timing);
timing_info_set_starting_time(kernel_timing);
timing_info_set_starting_time(copy_out_timing);
pthread_create(&f_thread_id, NULL, fetch_thread, &f_conf);
gettimeofday(&h_start, NULL);
// blockGEMM
for (n = 0; n < N; n+=SUB_M) {
for (m = 0; m < M; m+=SUB_M) {
timing_info_push_start(copy_in_C_timing);
cudaMemset(d_C, 0, SUB_M * SUB_M * sizeof(double));
// for (nn = n, a = 0; nn < n+SUB_M; nn++, a++) {
// for (mm = m, b = 0; mm < m+SUB_M; mm++, b++) {
// sub_C[b + a * SUB_M] = C[mm + nn * LDC1];
// }
// cudaMemcpy(d_C, sub_C, SUB_M * SUB_M * sizeof(double), cudaMemcpyHostToDevice);
// }
timing_info_push_end(copy_in_C_timing);
for (k = 0; k < K; k+=SUB_M) {
timing_info_push_start(queue_timing);
entry = (struct fifo_entry *) fifo_pop(sending_queue);
timing_info_push_end(queue_timing);
timing_info_push_start(kernel_timing);
block_tc_kernel<<<grid, block>>>(d_A + k, entry->d_B, d_C);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("CUDA error, error code: %d, error name: %s\n", err, cudaGetErrorString(err));
}
fifo_push(complete_queue, entry);
timing_info_push_end(kernel_timing);
}
// assign C
timing_info_push_start(copy_out_timing);
// use cudaMemcpy2D but not the bottleneck.
cudaMemcpy(sub_C, d_C, SUB_M * SUB_M * P * sizeof(double), cudaMemcpyDeviceToHost);
// cudaDeviceSynchronize();
for (p = 0; p < P; p++) {
for (nn = n, a = 0; nn < n+SUB_M; nn++, a++) {
for (mm = m, b = 0; mm < m+SUB_M; mm++, b++) {
C[mm + nn * LDC1 + p * LDC2] = sub_C[b + a * SUB_M + p * SUB_M*SUB_M];
}
}
}
timing_info_push_end(copy_out_timing);
}
}
pthread_join(r_thread_id, NULL);
pthread_join(f_thread_id, NULL);
sock_write_request(res->req_sock, -1, n, m, k, 1, 0);
sock_read_data(res->req_sock);
gettimeofday(&h_end, NULL);
duration = ((h_end.tv_sec - h_start.tv_sec) * 1000000) + (h_end.tv_usec - h_start.tv_usec);
printf("End-to-end duration: %f ms\n", (float) duration / 1000);
printf("Row fetch time: %f ms\n", (float) timing_info_duration(fetch_timing) / 1000);
printf("Copy in B time: %f ms\n", (float) timing_info_duration(copy_in_B_timing) / 1000);
printf("Copy in C time: %f ms\n", (float) timing_info_duration(copy_in_C_timing) / 1000);
printf("sending_queue waiting time: %f ms\n", (float) timing_info_duration(queue_timing) / 1000);
printf("Kernel time: %f ms\n", (float) timing_info_duration(kernel_timing) / 1000);
printf("copy out time: %f ms\n", (float) timing_info_duration(copy_out_timing) / 1000);
struct timestamps *tss = NULL;
FILE *fptr;
tss = timing_info_get_timestamps(fetch_timing);
fptr = fopen("fetch_ts.bin", "wb");
fwrite(&tss->count, sizeof(uint64_t), 1, fptr);
fwrite(tss->timestamps, sizeof(uint64_t), tss->count * 2, fptr);
fclose(fptr);
timing_info_free_timestamps(tss);
timing_info_free(fetch_timing);
tss = timing_info_get_timestamps(copy_in_B_timing);
fptr = fopen("copy_in_B_ts.bin", "wb");
fwrite(&tss->count, sizeof(uint64_t), 1, fptr);
fwrite(tss->timestamps, sizeof(uint64_t), tss->count * 2, fptr);
fclose(fptr);
timing_info_free_timestamps(tss);
timing_info_free(copy_in_B_timing);
tss = timing_info_get_timestamps(copy_in_C_timing);
fptr = fopen("copy_in_C_ts.bin", "wb");
fwrite(&tss->count, sizeof(uint64_t), 1, fptr);
fwrite(tss->timestamps, sizeof(uint64_t), tss->count * 2, fptr);
fclose(fptr);
timing_info_free_timestamps(tss);
timing_info_free(copy_in_C_timing);
tss = timing_info_get_timestamps(queue_timing);
fptr = fopen("queue_ts.bin", "wb");
fwrite(&tss->count, sizeof(uint64_t), 1, fptr);
fwrite(tss->timestamps, sizeof(uint64_t), tss->count * 2, fptr);
fclose(fptr);
timing_info_free_timestamps(tss);
timing_info_free(queue_timing);
tss = timing_info_get_timestamps(kernel_timing);
fptr = fopen("ttv_ts.bin", "wb");
fwrite(&tss->count, sizeof(uint64_t), 1, fptr);
fwrite(tss->timestamps, sizeof(uint64_t), tss->count * 2, fptr);
fclose(fptr);
timing_info_free_timestamps(tss);
timing_info_free(kernel_timing);
tss = timing_info_get_timestamps(copy_out_timing);
fptr = fopen("copy_out_ts.bin", "wb");
fwrite(&tss->count, sizeof(uint64_t), 1, fptr);
fwrite(tss->timestamps, sizeof(uint64_t), tss->count * 2, fptr);
fclose(fptr);
timing_info_free_timestamps(tss);
timing_info_free(copy_out_timing);
// write the answer here.
fptr = fopen("answer.bin", "wb");
fwrite(C, sizeof(double), M * N, fptr);
fclose(fptr);
for (i = 0; i < 4; i++) {
printf("%f ", C[i]);
}
printf("\n");
free(sub_B);
free(sub_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
fifo_free(sending_queue);
fifo_free(complete_queue);
free(entries);
return 0;
}
/******************************************************************************
* Function: print_config
*
* Input
* none
*
* Output
* none
*
* Returns
* none
*
* Description
* Print out config information
******************************************************************************/
void print_config(struct config_t config) {
fprintf(stdout, " ------------------------------------------------\n");
fprintf(stdout, " Device name : \"%s\"\n", config.dev_name);
fprintf(stdout, " IB port : %u\n", config.ib_port);
if (config.server_name)
fprintf(stdout, " IP : %s\n", config.server_name);
fprintf(stdout, " TCP port : %u\n", config.tcp_port);
if (config.gid_idx >= 0)
fprintf(stdout, " GID index : %u\n", config.gid_idx);
fprintf(stdout, " ------------------------------------------------\n\n");
}
int main(int argc, char *argv[]) {
int rc = 0;
uint64_t matrix_id, n, sub_n;
double *A, *C;
int hugepage_fd;
char *hugepage_addr;
// RDMA
struct resources res;
struct config_t config = {
"mlx4_0", /* dev_name */
"127.0.0.1", /* server_name */
19875, /* tcp_port */
1, /* ib_port */
0 /* gid_idx */
};
// default the iteration is 4 times
if (argc < 5) {
printf("usage: %s <matrix_id> <# of vertices> <# of subvertices> <port>\n", argv[0]);
exit(1);
}
matrix_id = (uint64_t) atoll(argv[1]);
n = (uint64_t) atoll(argv[2]);
sub_n = (uint64_t) atoll(argv[3]);
config.tcp_port = atoi(argv[4]);
/* print the used parameters for info*/
print_config(config);
printf("mapping hugepage\n");
hugepage_fd = open("/dev/hugepages/tensorstore", O_RDWR, 0755);
if (hugepage_fd < 0) {
perror("open");
exit(1);
}
hugepage_addr = (char *) mmap(0, BUF_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, hugepage_fd, 0);
if (hugepage_addr==MAP_FAILED) {
perror("mmap");
exit(1);
}
res.buf = hugepage_addr;
memset(hugepage_addr, 0, BUF_SIZE);
printf("hugepage starting address is: %p\n", hugepage_addr);
printf("socket connection\n");
rc = make_two_tcp_connection(&res, &config);
if (rc < 0) {
perror("sock connect");
exit(1);
}
// generate data
A = (double *) malloc(K * P * sizeof(double));
C = (double *) malloc(M * N * P * sizeof(double));
srand(5);
generate_data(A, K * P);
memset(C, 0, M * N * P * sizeof(double));
printf("calculating the result of pagerank\n");
rc = nds_tc(&res, matrix_id, n, sub_n, A, C);
close(res.sock);
close(res.req_sock);
munmap(hugepage_addr, BUF_SIZE);
close(hugepage_fd);
free(A);
free(C);
return rc;
}
|
af6929679b6722dec61dacb92c148de3ab2a56ad.hip | // !!! This is a file automatically generated by hipify!!!
/**
* syrk.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <unistd.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include "../../../common/polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
#define GPU_DEVICE 0
/* Problem size */
#define N 1024 * 16
#define M 1024 * 16
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 32
#define DIM_THREAD_BLOCK_Y 8
/* Declared constant values for alpha and beta (same as values in PolyBench 2.0) */
#define alpha 12435
#define beta 4546
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_arrays(DATA_TYPE* A, DATA_TYPE* C, DATA_TYPE* A_gpu, DATA_TYPE* C_gpu)
{
int i, j;
for (i = 0; i < N; i++)
{
for (j = 0; j < M; j++)
{
A[i*M + j] = ((DATA_TYPE) i*j) / N;
A_gpu[i*M + j] = ((DATA_TYPE) i*j) / N;
}
for (j = 0; j < N; j++)
{
C[i*M + j] = ((DATA_TYPE) i*j + 2) / N;
C_gpu[i*M + j] = ((DATA_TYPE) i*j + 2) / N;
}
}
}
void syrk(DATA_TYPE* A, DATA_TYPE* C)
{
int i, j, k;
/* C := alpha*A*A' + beta*C */
for (i = 0; i < N; i++)
{
for (j = 0; j < N; j++)
{
C[i*M + j] *= beta;
}
}
for (i = 0; i < N; i++)
{
for (j = 0; j < N; j++)
{
for (k = 0; k < M; k++)
{
C[i*N + j] += alpha * A[i*M + k] * A[j*M + k];
}
}
}
}
void compareResults(DATA_TYPE* C, DATA_TYPE* C_outputFromGpu)
{
int i,j,fail;
fail = 0;
// Compare C with D
for (i=0; i<N; i++)
{
for (j=0; j<M; j++)
{
if (percentDiff(C[i*M + j], C_outputFromGpu[i*M + j]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
hipSetDevice( GPU_DEVICE );
return;
}
__global__ void syrk_kernel(DATA_TYPE ALPHA, DATA_TYPE BETA, DATA_TYPE *a, DATA_TYPE *c)
{
/* C := alpha*A*A' + beta*C */
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < N) && (j < N))
{
c[i * N + j] *= beta;
int k;
for(k=0; k< M; k++)
{
c[i * N + j] += alpha * a[i * M + k] * a[j * M + k];
}
}
}
void syrkCuda(DATA_TYPE* A_gpu, DATA_TYPE* C_gpu)
{
double t_start, t_end;
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid((size_t)(ceil(((float)N) / ((float)DIM_THREAD_BLOCK_X))), (size_t)ceil(((float)N) / ((float)DIM_THREAD_BLOCK_Y)));
t_start = rtclock();
hipLaunchKernelGGL(( syrk_kernel), dim3(grid),dim3(block), 0, 0, alpha, beta, A_gpu,C_gpu);
hipDeviceSynchronize();
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
}
int main()
{
double t_start, t_end;
DATA_TYPE* A;
DATA_TYPE* C;
DATA_TYPE* A_gpu;
DATA_TYPE* C_gpu;
A = (DATA_TYPE*)malloc(N*M*sizeof(DATA_TYPE));
C = (DATA_TYPE*)malloc(N*M*sizeof(DATA_TYPE));
hipMallocManaged(&A_gpu, sizeof(DATA_TYPE) * N * M);
hipMallocManaged(&C_gpu, sizeof(DATA_TYPE) * N * N);
init_arrays(A, C, A_gpu, C_gpu);
GPU_argv_init();
syrkCuda(A_gpu, C_gpu);
t_start = rtclock();
syrk(A, C);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
compareResults(C, C_gpu);
free(A);
free(C);
hipFree(A_gpu);
hipFree(C_gpu);
return 0;
}
| af6929679b6722dec61dacb92c148de3ab2a56ad.cu | /**
* syrk.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <unistd.h>
#include <sys/time.h>
#include <cuda.h>
#include "../../../common/polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
#define GPU_DEVICE 0
/* Problem size */
#define N 1024 * 16
#define M 1024 * 16
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 32
#define DIM_THREAD_BLOCK_Y 8
/* Declared constant values for alpha and beta (same as values in PolyBench 2.0) */
#define alpha 12435
#define beta 4546
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_arrays(DATA_TYPE* A, DATA_TYPE* C, DATA_TYPE* A_gpu, DATA_TYPE* C_gpu)
{
int i, j;
for (i = 0; i < N; i++)
{
for (j = 0; j < M; j++)
{
A[i*M + j] = ((DATA_TYPE) i*j) / N;
A_gpu[i*M + j] = ((DATA_TYPE) i*j) / N;
}
for (j = 0; j < N; j++)
{
C[i*M + j] = ((DATA_TYPE) i*j + 2) / N;
C_gpu[i*M + j] = ((DATA_TYPE) i*j + 2) / N;
}
}
}
void syrk(DATA_TYPE* A, DATA_TYPE* C)
{
int i, j, k;
/* C := alpha*A*A' + beta*C */
for (i = 0; i < N; i++)
{
for (j = 0; j < N; j++)
{
C[i*M + j] *= beta;
}
}
for (i = 0; i < N; i++)
{
for (j = 0; j < N; j++)
{
for (k = 0; k < M; k++)
{
C[i*N + j] += alpha * A[i*M + k] * A[j*M + k];
}
}
}
}
void compareResults(DATA_TYPE* C, DATA_TYPE* C_outputFromGpu)
{
int i,j,fail;
fail = 0;
// Compare C with D
for (i=0; i<N; i++)
{
for (j=0; j<M; j++)
{
if (percentDiff(C[i*M + j], C_outputFromGpu[i*M + j]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
cudaSetDevice( GPU_DEVICE );
return;
}
__global__ void syrk_kernel(DATA_TYPE ALPHA, DATA_TYPE BETA, DATA_TYPE *a, DATA_TYPE *c)
{
/* C := alpha*A*A' + beta*C */
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < N) && (j < N))
{
c[i * N + j] *= beta;
int k;
for(k=0; k< M; k++)
{
c[i * N + j] += alpha * a[i * M + k] * a[j * M + k];
}
}
}
void syrkCuda(DATA_TYPE* A_gpu, DATA_TYPE* C_gpu)
{
double t_start, t_end;
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid((size_t)(ceil(((float)N) / ((float)DIM_THREAD_BLOCK_X))), (size_t)ceil(((float)N) / ((float)DIM_THREAD_BLOCK_Y)));
t_start = rtclock();
syrk_kernel<<<grid,block>>>(alpha, beta, A_gpu,C_gpu);
cudaDeviceSynchronize();
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
}
int main()
{
double t_start, t_end;
DATA_TYPE* A;
DATA_TYPE* C;
DATA_TYPE* A_gpu;
DATA_TYPE* C_gpu;
A = (DATA_TYPE*)malloc(N*M*sizeof(DATA_TYPE));
C = (DATA_TYPE*)malloc(N*M*sizeof(DATA_TYPE));
cudaMallocManaged(&A_gpu, sizeof(DATA_TYPE) * N * M);
cudaMallocManaged(&C_gpu, sizeof(DATA_TYPE) * N * N);
init_arrays(A, C, A_gpu, C_gpu);
GPU_argv_init();
syrkCuda(A_gpu, C_gpu);
t_start = rtclock();
syrk(A, C);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
compareResults(C, C_gpu);
free(A);
free(C);
cudaFree(A_gpu);
cudaFree(C_gpu);
return 0;
}
|
93a8d0dc60183031a0907415edd1d87295e05e1b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Exercise 1 from http://webapp.dam.brown.edu/wiki/SciComp/CudaExercises
// Transposition of a matrix
// by Hendrik Riedmann <[email protected]>
// Andrew Cron added bounds checks ...
// Andrew Cron added Z grid dimension to X for larger matrices
#define BLOCK_SIZE %(block_size)d
#define A_BLOCK_STRIDE (BLOCK_SIZE * a_width)
#define A_T_BLOCK_STRIDE (BLOCK_SIZE * a_height)
__global__ void transpose(float *A_t, float *A, int a_width, int a_height)
{
int bidx = blockIdx.x + blockIdx.z;
// Base indices in A and A_t
int base_idx_a = bidx * BLOCK_SIZE +
blockIdx.y * A_BLOCK_STRIDE;
int base_idx_a_t = blockIdx.y * BLOCK_SIZE +
bidx * A_T_BLOCK_STRIDE;
// Global indices in A and A_t
int glob_idx_a = base_idx_a + threadIdx.x + a_width * threadIdx.y;
int glob_idx_a_t = base_idx_a_t + threadIdx.x + a_height * threadIdx.y;
int a_x_pos = bidx * BLOCK_SIZE + threadIdx.x;
int a_y_pos = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int at_x_pos = blockIdx.y * BLOCK_SIZE + threadIdx.x;
int at_y_pos = bidx * BLOCK_SIZE + threadIdx.y;
__shared__ float A_shared[BLOCK_SIZE][BLOCK_SIZE+1];
if( a_x_pos < a_width && a_y_pos < a_height ){
// Store transposed submatrix to shared memory
A_shared[threadIdx.y][threadIdx.x] = A[glob_idx_a];
}
__syncthreads();
if( at_x_pos < a_height && at_y_pos < a_width ){
// Write transposed submatrix to global memory
A_t[glob_idx_a_t] = A_shared[threadIdx.x][threadIdx.y];
}
}
| 93a8d0dc60183031a0907415edd1d87295e05e1b.cu | // Exercise 1 from http://webapp.dam.brown.edu/wiki/SciComp/CudaExercises
// Transposition of a matrix
// by Hendrik Riedmann <[email protected]>
// Andrew Cron added bounds checks ...
// Andrew Cron added Z grid dimension to X for larger matrices
#define BLOCK_SIZE %(block_size)d
#define A_BLOCK_STRIDE (BLOCK_SIZE * a_width)
#define A_T_BLOCK_STRIDE (BLOCK_SIZE * a_height)
__global__ void transpose(float *A_t, float *A, int a_width, int a_height)
{
int bidx = blockIdx.x + blockIdx.z;
// Base indices in A and A_t
int base_idx_a = bidx * BLOCK_SIZE +
blockIdx.y * A_BLOCK_STRIDE;
int base_idx_a_t = blockIdx.y * BLOCK_SIZE +
bidx * A_T_BLOCK_STRIDE;
// Global indices in A and A_t
int glob_idx_a = base_idx_a + threadIdx.x + a_width * threadIdx.y;
int glob_idx_a_t = base_idx_a_t + threadIdx.x + a_height * threadIdx.y;
int a_x_pos = bidx * BLOCK_SIZE + threadIdx.x;
int a_y_pos = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int at_x_pos = blockIdx.y * BLOCK_SIZE + threadIdx.x;
int at_y_pos = bidx * BLOCK_SIZE + threadIdx.y;
__shared__ float A_shared[BLOCK_SIZE][BLOCK_SIZE+1];
if( a_x_pos < a_width && a_y_pos < a_height ){
// Store transposed submatrix to shared memory
A_shared[threadIdx.y][threadIdx.x] = A[glob_idx_a];
}
__syncthreads();
if( at_x_pos < a_height && at_y_pos < a_width ){
// Write transposed submatrix to global memory
A_t[glob_idx_a_t] = A_shared[threadIdx.x][threadIdx.y];
}
}
|
5e7648668eaa1ffc61f01f05d43420e98c3d2031.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <chrono>
#include <math.h>
using namespace std;
__global__
void reduce(int n, float *x, float *y)
{
int oindex = blockIdx.x + blockIdx.y * blockDim.x;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[oindex] = x[i] + y[oindex];
}
// Kernel function to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
__global__
void mul(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] * y[i];
}
int main(void)
{
int N = 640 * 480;
float *x, *y;
int blockSize = 16*16;
int numBlocks = (N + blockSize - 1) / blockSize;
auto start = std::chrono::system_clock::now ();
// Allocate Unified Memory accessible from CPU or GPU
hipMallocManaged(&x, N*sizeof(float));
hipMallocManaged(&y, numBlocks*sizeof(float));
auto stop = std::chrono::system_clock::now ();
chrono::duration< double > dur = stop - start;
std::cout << "alloc took " << dur.count () << " s " << std::endl;
start = std::chrono::system_clock::now ();
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
for (int i = 0; i < numBlocks; i++) {
y[i] = 0.0f;
}
stop = std::chrono::system_clock::now ();
dur = stop - start;
std::cout << "init took " << dur.count () << " s " << std::endl;
auto tstart = std::chrono::system_clock::now ();
start = std::chrono::system_clock::now ();
// Run kernel on 1M elements on the GPU
hipLaunchKernelGGL(( reduce), dim3(numBlocks), dim3(blockSize), 0, 0, N, x, y);
stop = std::chrono::system_clock::now ();
dur = stop - start;
std::cout << "add took " << dur.count () << " s " << std::endl;
start = std::chrono::system_clock::now ();
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
stop = std::chrono::system_clock::now ();
dur = stop - start;
std::cout << "sync took " << dur.count () << " s " << std::endl;
auto tstop = std::chrono::system_clock::now ();
dur = tstop - tstart;
std::cout << "total took " << dur.count () << " s " << std::endl;
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
hipFree(x);
hipFree(y);
return 0;
}
| 5e7648668eaa1ffc61f01f05d43420e98c3d2031.cu | #include <iostream>
#include <chrono>
#include <math.h>
using namespace std;
__global__
void reduce(int n, float *x, float *y)
{
int oindex = blockIdx.x + blockIdx.y * blockDim.x;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[oindex] = x[i] + y[oindex];
}
// Kernel function to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
__global__
void mul(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] * y[i];
}
int main(void)
{
int N = 640 * 480;
float *x, *y;
int blockSize = 16*16;
int numBlocks = (N + blockSize - 1) / blockSize;
auto start = std::chrono::system_clock::now ();
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, numBlocks*sizeof(float));
auto stop = std::chrono::system_clock::now ();
chrono::duration< double > dur = stop - start;
std::cout << "alloc took " << dur.count () << " s " << std::endl;
start = std::chrono::system_clock::now ();
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
for (int i = 0; i < numBlocks; i++) {
y[i] = 0.0f;
}
stop = std::chrono::system_clock::now ();
dur = stop - start;
std::cout << "init took " << dur.count () << " s " << std::endl;
auto tstart = std::chrono::system_clock::now ();
start = std::chrono::system_clock::now ();
// Run kernel on 1M elements on the GPU
reduce<<<numBlocks, blockSize>>>(N, x, y);
stop = std::chrono::system_clock::now ();
dur = stop - start;
std::cout << "add took " << dur.count () << " s " << std::endl;
start = std::chrono::system_clock::now ();
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
stop = std::chrono::system_clock::now ();
dur = stop - start;
std::cout << "sync took " << dur.count () << " s " << std::endl;
auto tstop = std::chrono::system_clock::now ();
dur = tstop - tstart;
std::cout << "total took " << dur.count () << " s " << std::endl;
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
}
|
f093b6642435a7aada25424ca43133bad5392d06.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <fstream>
#include <time.h>
#include <stdlib.h>
#include <stdio.h>
#include <cstdlib>
#pragma once
#ifdef __INTELLISENSE__
void __syncthreads();
#endif
#define THREADS_PER_BLOCK 1024
using namespace std;
using namespace cv;
void showImg(Mat img);
float* extractHOGfeatures(Mat img, const int size);
float* MatF2Float(Mat m);
void trainNN(float* HOGfeatures, const int people, const int images, const int featureSize, const float factor, const int count);
float* prepareTrainingFeatures(const int people, const int images, const int size, const int featureSize);
float predict(float* nn, float* HOGfeatures, int nnidx, int idx, const int featureSize);
void testNN(const int people, const int images, const int size, const int featureSize);
hipError_t GPU(float* HOGfeatures, float* nn, const int people, const int images, const int featureSize, const float factor, const int count);
__global__ void reduction(float* A, float* output, const int N) {
int A_index = blockIdx.x * blockDim.x + threadIdx.x;
int index = threadIdx.x;
__shared__ float data[THREADS_PER_BLOCK];
if (A_index < N) {
int n;
if (blockIdx.x == gridDim.x - 1) {
n = int((N - blockIdx.x * blockDim.x) / 2);
}
else {
n = int(THREADS_PER_BLOCK / 2);
}
if (index < n) {
data[index] = A[A_index] + A[A_index + n];
__syncthreads();
n >>= 1;
while (n > 0) {
if (index < n) {
data[index] += data[index + n];
}
__syncthreads();
n >>= 1;
}
if (index == 0) {
output[blockIdx.x] = data[0];
}
}
}
}
__global__ void multiply(float* features, float* nn, const int featureSize, const int people, const int index, const int nnidx, float* prediction) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < featureSize + 1) {
if (idx % (featureSize + 1) < featureSize) {
prediction[idx] = features[index + idx % featureSize] * nn[nnidx + idx];
}
else {
prediction[idx] = nn[nnidx + idx];
}
}
}
__global__ void trainNNgpu(float *nn, float* features, const int index, const int featureSize, const float factor, float* prediction, float goal, const int nnidx)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < featureSize + 1) {
float pred = exp(prediction[0]) / (exp(prediction[0]) + 1);
if (idx < featureSize) {
nn[nnidx + idx] -= factor * (2 * (pred - goal) * pred * (1 - pred) * features[index + idx]);
}
else {
nn[nnidx + idx] -= factor * (2 * (pred - goal) * pred * (1 - pred));
}
}
}
int main()
{
srand(time(NULL));
const int size = 64;
const int featureSize = (size / 8 - 1) * (size / 8 - 1) * 36;
const int people = 5;
const int images = 9;
const int testImages = 3;
const float factor = 0.1;
const int count = 10000;
float* HOGfeatures = prepareTrainingFeatures(people, images, size, featureSize);
trainNN(HOGfeatures, people, images, featureSize, factor, count);
testNN(people, testImages, size, featureSize);
float* nn = new float[(featureSize + 1) * people];
for (int i = 0; i < (featureSize + 1) * people; i++) {
nn[i] = float(rand()) / float(RAND_MAX) / 10;
}
hipError_t cudaStatus = GPU(HOGfeatures, nn, people, images, featureSize, factor, count);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
testNN(people, testImages, size, featureSize);
return 0;
}
hipError_t GPU(float* HOGfeatures, float* nn, const int people, const int images, const int featureSize, const float factor, const int count)
{
float* dev_features = 0;
float* dev_nn = 0;
float* dev_prediction1 = 0;
float* dev_prediction2 = 0;
hipError_t cudaStatus;
fstream fs;
int blocks = (featureSize + 1) / THREADS_PER_BLOCK + 1;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers
cudaStatus = hipMalloc((void**)&dev_features, people * images * featureSize * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_nn, people * (featureSize + 1) * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_prediction1, (featureSize + 1) * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_prediction2, blocks * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_features, HOGfeatures, people * images * featureSize * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_nn, nn, people * (featureSize + 1) * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch kernels on the GPU
for (int i = 0; i < count; i++) {
int person = rand() % people;
int image = rand() % images;
int index = person * images * featureSize + image * featureSize;
for (int k = 0; k < people; k++) {
int nnidx = k * (featureSize + 1);
float goal;
if (k == person) {
goal = 1;
}
else {
goal = 0;
}
multiply << <blocks, THREADS_PER_BLOCK >> > (dev_features, dev_nn, featureSize, people, index, nnidx, dev_prediction1);
reduction << <blocks, THREADS_PER_BLOCK >> > (dev_prediction1, dev_prediction2, featureSize + 1);
reduction << <1, THREADS_PER_BLOCK >> > (dev_prediction2, dev_prediction2, blocks);
trainNNgpu << <blocks, THREADS_PER_BLOCK >> > (dev_nn, dev_features, index, featureSize, factor, dev_prediction2, goal, nnidx);
}
}
// hipDeviceSynchronize waits for the kernels to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching kernels!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(nn, dev_nn, people * (featureSize + 1) * sizeof(float), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
fs.open("nn.txt", ios::out | ios::trunc);
if (!fs) {
cerr << "unable to open file" << endl;
}
else {
for (int i = 0; i < (featureSize + 1) * people; i++) {
fs << nn[i] << " ";
}
}
fs.close();
delete[] nn;
nn = NULL;
Error:
hipFree(dev_features);
hipFree(dev_nn);
return cudaStatus;
}
float* extractHOGfeatures(Mat img, const int size) {
resize(img, img, Size(size, size));
Mat gx, gy;
Sobel(img, gx, CV_32F, 1, 0, 1);
Sobel(img, gy, CV_32F, 0, 1, 1);
Mat mag, angle;
cartToPolar(gx, gy, mag, angle, 1);
float* mg = MatF2Float(mag);
float* ang = MatF2Float(angle);
for (int i = 0; i < size * size; i++) {
if (ang[i] >= 180) {
ang[i] -= 180;
}
}
const int hogSize = (size / 8) * (size / 8) * 9;
float* hog = new float[hogSize];
for (int i = 0; i < hogSize; i++) {
hog[i] = 0;
}
for (int i = 0; i < int(size / 8); i++) {
for (int j = 0; j < int(size / 8); j++) {
for (int k = 0; k < 8; k++) {
for (int l = 0; l < 8; l++) {
int index = (i * 8 + k) * size + j * 8 + l;
int idx1 = int(ang[index] / 20);
int idx2 = int((idx1 + 1) % 9);
float scalar2 = ang[index] - idx1 * 20;
float scalar1 = (20 - scalar2) / 20;
scalar2 /= 20;
idx1 += int(i * (size / 8 * 9) + j * 9);
idx2 += int(i * (size / 8 * 9) + j * 9);
hog[idx1] += scalar1 * mg[index];
hog[idx2] += scalar2 * mg[index];
}
}
}
}
const int featureSize = (size / 8 - 1) * (size / 8 - 1) * 36;
float* feature = new float[featureSize];
for (int i = 0; i < size / 8 - 1; i++) {
for (int j = 0; j < size / 8 - 1; j++) {
float norm = 0;
for (int k = 0; k < 2; k++) {
for (int l = 0; l < 2; l++) {
for (int m = 0; m < 9; m++) {
int index = (i + k) * size / 8 * 9 + (j + l) * 9 + m;
norm += hog[index] * hog[index];
}
}
}
norm = sqrt(norm);
int idx = i * 36 * (size / 8 - 1) + j * 36;
for (int k = 0; k < 2; k++) {
for (int l = 0; l < 2; l++) {
for (int m = 0; m < 9; m++) {
int index = (i + k) * size / 8 * 9 + (j + l) * 9 + m;
feature[idx] = hog[index] / norm;
idx++;
}
}
}
}
}
delete[] hog;
hog = NULL;
return feature;
}
float* MatF2Float(Mat m) {
const int size = m.rows * m.cols;
float* d = new float[size];
for (int i = 0; i < m.rows; i++) {
for (int j = 0; j < m.cols; j++) {
int idx = i * m.cols + j;
d[idx] = m.at<float>(i, j);
}
}
return d;
}
void showImg(Mat img) {
namedWindow("image", WINDOW_NORMAL);
imshow("image", img);
waitKey(0);
}
float* prepareTrainingFeatures(const int people, const int images, const int size, const int featureSize) {
const int cSize = 13;
char name[cSize];
strcpy_s(name, "1/000001.jpg");
float* HOGfeatures = new float[featureSize * people * images];
for (int i = 1; i <= people; i++) {
for (int j = 1; j <= images; j++) {
name[0] = i + '0';
name[7] = j + '0';
Mat imgM = imread(name, IMREAD_GRAYSCALE);
if (!imgM.data) {
cout << "Could not open or find the image" << std::endl;
}
else {
float* feature = extractHOGfeatures(imgM, size);
for (int k = 0; k < featureSize; k++) {
HOGfeatures[((i - 1) * images + j - 1) * featureSize + k] = feature[k];
}
delete[] feature;
feature = NULL;
}
}
}
return HOGfeatures;
}
void trainNN(float* HOGfeatures, const int people, const int images, const int featureSize, const float factor, const int count) {
float* nn = new float[(featureSize + 1) * people];
for (int i = 0; i < (featureSize + 1) * people; i++) {
nn[i] = float(rand()) / float(RAND_MAX) / 10;
}
for (int i = 0; i < count; i++) {
int person = rand() % people;
int image = rand() % images;
int idx = person * images * featureSize + image * featureSize;
for (int k = 0; k < people; k++) {
float goal;
if (k == person) {
goal = 1;
}
else {
goal = 0;
}
int nnidx = k * (featureSize + 1);
float prediction = predict(nn, HOGfeatures, nnidx, idx, featureSize);
for (int j = 0; j < featureSize; j++) {
nn[nnidx + j] -= factor * (2 * (prediction - goal) * prediction * (1 - prediction) * HOGfeatures[idx + j]);
}
nn[nnidx + featureSize] -= factor * (2 * (prediction - goal) * prediction * (1 - prediction));
}
}
fstream fs;
fs.open("nn.txt", ios::out | ios::trunc);
if (!fs) {
cerr << "unable to open file" << endl;
}
else {
for (int i = 0; i < (featureSize + 1) * people; i++) {
fs << nn[i] << " ";
}
}
fs.close();
delete[] nn;
nn = NULL;
}
float predict(float* nn, float* HOGfeatures, int nnidx, int idx, const int featureSize) {
float prediction = 0;
for (int i = 0; i < featureSize; i++) {
prediction += nn[nnidx + i] * HOGfeatures[idx + i];
}
prediction += nn[nnidx + featureSize];
prediction = exp(prediction) / (exp(prediction) + 1);
return prediction;
}
void testNN(const int people, const int images, const int size, const int featureSize) {
const int cSize = 13;
char name[cSize];
strcpy_s(name, "1/000011.jpg");
fstream fs;
fs.open("nn.txt", ios::in);
float* nn = new float[people * (featureSize + 1)];
for (int i = 0; i < people * (featureSize + 1); i++) {
fs >> nn[i];
}
for (int i = 1; i <= people; i++) {
for (int j = 1; j <= images; j++) {
name[0] = i + '0';
name[7] = j + '0';
Mat imgM = imread(name, IMREAD_GRAYSCALE);
if (!imgM.data) {
cout << "Could not open or find the image" << std::endl;
}
else {
float* feature = extractHOGfeatures(imgM, size);
cout << "person " << i << ":\t";
float max = 0;
int max_person = 0;
for (int k = 0; k < people; k++) {
int nnidx = k * (featureSize + 1);
float prediction = predict(nn, feature, nnidx, 0, featureSize);
if (prediction > max) {
max = prediction;
max_person = k;
}
cout << k + 1 << " - " << prediction << "\t";
}
cout << "prediction - " << max_person + 1 << endl;
delete[] feature;
feature = NULL;
}
}
}
cout << endl;
}
| f093b6642435a7aada25424ca43133bad5392d06.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <fstream>
#include <time.h>
#include <stdlib.h>
#include <stdio.h>
#include <cstdlib>
#pragma once
#ifdef __INTELLISENSE__
void __syncthreads();
#endif
#define THREADS_PER_BLOCK 1024
using namespace std;
using namespace cv;
void showImg(Mat img);
float* extractHOGfeatures(Mat img, const int size);
float* MatF2Float(Mat m);
void trainNN(float* HOGfeatures, const int people, const int images, const int featureSize, const float factor, const int count);
float* prepareTrainingFeatures(const int people, const int images, const int size, const int featureSize);
float predict(float* nn, float* HOGfeatures, int nnidx, int idx, const int featureSize);
void testNN(const int people, const int images, const int size, const int featureSize);
cudaError_t GPU(float* HOGfeatures, float* nn, const int people, const int images, const int featureSize, const float factor, const int count);
__global__ void reduction(float* A, float* output, const int N) {
int A_index = blockIdx.x * blockDim.x + threadIdx.x;
int index = threadIdx.x;
__shared__ float data[THREADS_PER_BLOCK];
if (A_index < N) {
int n;
if (blockIdx.x == gridDim.x - 1) {
n = int((N - blockIdx.x * blockDim.x) / 2);
}
else {
n = int(THREADS_PER_BLOCK / 2);
}
if (index < n) {
data[index] = A[A_index] + A[A_index + n];
__syncthreads();
n >>= 1;
while (n > 0) {
if (index < n) {
data[index] += data[index + n];
}
__syncthreads();
n >>= 1;
}
if (index == 0) {
output[blockIdx.x] = data[0];
}
}
}
}
__global__ void multiply(float* features, float* nn, const int featureSize, const int people, const int index, const int nnidx, float* prediction) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < featureSize + 1) {
if (idx % (featureSize + 1) < featureSize) {
prediction[idx] = features[index + idx % featureSize] * nn[nnidx + idx];
}
else {
prediction[idx] = nn[nnidx + idx];
}
}
}
__global__ void trainNNgpu(float *nn, float* features, const int index, const int featureSize, const float factor, float* prediction, float goal, const int nnidx)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < featureSize + 1) {
float pred = exp(prediction[0]) / (exp(prediction[0]) + 1);
if (idx < featureSize) {
nn[nnidx + idx] -= factor * (2 * (pred - goal) * pred * (1 - pred) * features[index + idx]);
}
else {
nn[nnidx + idx] -= factor * (2 * (pred - goal) * pred * (1 - pred));
}
}
}
int main()
{
srand(time(NULL));
const int size = 64;
const int featureSize = (size / 8 - 1) * (size / 8 - 1) * 36;
const int people = 5;
const int images = 9;
const int testImages = 3;
const float factor = 0.1;
const int count = 10000;
float* HOGfeatures = prepareTrainingFeatures(people, images, size, featureSize);
trainNN(HOGfeatures, people, images, featureSize, factor, count);
testNN(people, testImages, size, featureSize);
float* nn = new float[(featureSize + 1) * people];
for (int i = 0; i < (featureSize + 1) * people; i++) {
nn[i] = float(rand()) / float(RAND_MAX) / 10;
}
cudaError_t cudaStatus = GPU(HOGfeatures, nn, people, images, featureSize, factor, count);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
testNN(people, testImages, size, featureSize);
return 0;
}
cudaError_t GPU(float* HOGfeatures, float* nn, const int people, const int images, const int featureSize, const float factor, const int count)
{
float* dev_features = 0;
float* dev_nn = 0;
float* dev_prediction1 = 0;
float* dev_prediction2 = 0;
cudaError_t cudaStatus;
fstream fs;
int blocks = (featureSize + 1) / THREADS_PER_BLOCK + 1;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers
cudaStatus = cudaMalloc((void**)&dev_features, people * images * featureSize * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_nn, people * (featureSize + 1) * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_prediction1, (featureSize + 1) * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_prediction2, blocks * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_features, HOGfeatures, people * images * featureSize * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_nn, nn, people * (featureSize + 1) * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch kernels on the GPU
for (int i = 0; i < count; i++) {
int person = rand() % people;
int image = rand() % images;
int index = person * images * featureSize + image * featureSize;
for (int k = 0; k < people; k++) {
int nnidx = k * (featureSize + 1);
float goal;
if (k == person) {
goal = 1;
}
else {
goal = 0;
}
multiply << <blocks, THREADS_PER_BLOCK >> > (dev_features, dev_nn, featureSize, people, index, nnidx, dev_prediction1);
reduction << <blocks, THREADS_PER_BLOCK >> > (dev_prediction1, dev_prediction2, featureSize + 1);
reduction << <1, THREADS_PER_BLOCK >> > (dev_prediction2, dev_prediction2, blocks);
trainNNgpu << <blocks, THREADS_PER_BLOCK >> > (dev_nn, dev_features, index, featureSize, factor, dev_prediction2, goal, nnidx);
}
}
// cudaDeviceSynchronize waits for the kernels to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching kernels!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(nn, dev_nn, people * (featureSize + 1) * sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
fs.open("nn.txt", ios::out | ios::trunc);
if (!fs) {
cerr << "unable to open file" << endl;
}
else {
for (int i = 0; i < (featureSize + 1) * people; i++) {
fs << nn[i] << " ";
}
}
fs.close();
delete[] nn;
nn = NULL;
Error:
cudaFree(dev_features);
cudaFree(dev_nn);
return cudaStatus;
}
float* extractHOGfeatures(Mat img, const int size) {
resize(img, img, Size(size, size));
Mat gx, gy;
Sobel(img, gx, CV_32F, 1, 0, 1);
Sobel(img, gy, CV_32F, 0, 1, 1);
Mat mag, angle;
cartToPolar(gx, gy, mag, angle, 1);
float* mg = MatF2Float(mag);
float* ang = MatF2Float(angle);
for (int i = 0; i < size * size; i++) {
if (ang[i] >= 180) {
ang[i] -= 180;
}
}
const int hogSize = (size / 8) * (size / 8) * 9;
float* hog = new float[hogSize];
for (int i = 0; i < hogSize; i++) {
hog[i] = 0;
}
for (int i = 0; i < int(size / 8); i++) {
for (int j = 0; j < int(size / 8); j++) {
for (int k = 0; k < 8; k++) {
for (int l = 0; l < 8; l++) {
int index = (i * 8 + k) * size + j * 8 + l;
int idx1 = int(ang[index] / 20);
int idx2 = int((idx1 + 1) % 9);
float scalar2 = ang[index] - idx1 * 20;
float scalar1 = (20 - scalar2) / 20;
scalar2 /= 20;
idx1 += int(i * (size / 8 * 9) + j * 9);
idx2 += int(i * (size / 8 * 9) + j * 9);
hog[idx1] += scalar1 * mg[index];
hog[idx2] += scalar2 * mg[index];
}
}
}
}
const int featureSize = (size / 8 - 1) * (size / 8 - 1) * 36;
float* feature = new float[featureSize];
for (int i = 0; i < size / 8 - 1; i++) {
for (int j = 0; j < size / 8 - 1; j++) {
float norm = 0;
for (int k = 0; k < 2; k++) {
for (int l = 0; l < 2; l++) {
for (int m = 0; m < 9; m++) {
int index = (i + k) * size / 8 * 9 + (j + l) * 9 + m;
norm += hog[index] * hog[index];
}
}
}
norm = sqrt(norm);
int idx = i * 36 * (size / 8 - 1) + j * 36;
for (int k = 0; k < 2; k++) {
for (int l = 0; l < 2; l++) {
for (int m = 0; m < 9; m++) {
int index = (i + k) * size / 8 * 9 + (j + l) * 9 + m;
feature[idx] = hog[index] / norm;
idx++;
}
}
}
}
}
delete[] hog;
hog = NULL;
return feature;
}
float* MatF2Float(Mat m) {
const int size = m.rows * m.cols;
float* d = new float[size];
for (int i = 0; i < m.rows; i++) {
for (int j = 0; j < m.cols; j++) {
int idx = i * m.cols + j;
d[idx] = m.at<float>(i, j);
}
}
return d;
}
void showImg(Mat img) {
namedWindow("image", WINDOW_NORMAL);
imshow("image", img);
waitKey(0);
}
float* prepareTrainingFeatures(const int people, const int images, const int size, const int featureSize) {
const int cSize = 13;
char name[cSize];
strcpy_s(name, "1/000001.jpg");
float* HOGfeatures = new float[featureSize * people * images];
for (int i = 1; i <= people; i++) {
for (int j = 1; j <= images; j++) {
name[0] = i + '0';
name[7] = j + '0';
Mat imgM = imread(name, IMREAD_GRAYSCALE);
if (!imgM.data) {
cout << "Could not open or find the image" << std::endl;
}
else {
float* feature = extractHOGfeatures(imgM, size);
for (int k = 0; k < featureSize; k++) {
HOGfeatures[((i - 1) * images + j - 1) * featureSize + k] = feature[k];
}
delete[] feature;
feature = NULL;
}
}
}
return HOGfeatures;
}
void trainNN(float* HOGfeatures, const int people, const int images, const int featureSize, const float factor, const int count) {
float* nn = new float[(featureSize + 1) * people];
for (int i = 0; i < (featureSize + 1) * people; i++) {
nn[i] = float(rand()) / float(RAND_MAX) / 10;
}
for (int i = 0; i < count; i++) {
int person = rand() % people;
int image = rand() % images;
int idx = person * images * featureSize + image * featureSize;
for (int k = 0; k < people; k++) {
float goal;
if (k == person) {
goal = 1;
}
else {
goal = 0;
}
int nnidx = k * (featureSize + 1);
float prediction = predict(nn, HOGfeatures, nnidx, idx, featureSize);
for (int j = 0; j < featureSize; j++) {
nn[nnidx + j] -= factor * (2 * (prediction - goal) * prediction * (1 - prediction) * HOGfeatures[idx + j]);
}
nn[nnidx + featureSize] -= factor * (2 * (prediction - goal) * prediction * (1 - prediction));
}
}
fstream fs;
fs.open("nn.txt", ios::out | ios::trunc);
if (!fs) {
cerr << "unable to open file" << endl;
}
else {
for (int i = 0; i < (featureSize + 1) * people; i++) {
fs << nn[i] << " ";
}
}
fs.close();
delete[] nn;
nn = NULL;
}
float predict(float* nn, float* HOGfeatures, int nnidx, int idx, const int featureSize) {
float prediction = 0;
for (int i = 0; i < featureSize; i++) {
prediction += nn[nnidx + i] * HOGfeatures[idx + i];
}
prediction += nn[nnidx + featureSize];
prediction = exp(prediction) / (exp(prediction) + 1);
return prediction;
}
void testNN(const int people, const int images, const int size, const int featureSize) {
const int cSize = 13;
char name[cSize];
strcpy_s(name, "1/000011.jpg");
fstream fs;
fs.open("nn.txt", ios::in);
float* nn = new float[people * (featureSize + 1)];
for (int i = 0; i < people * (featureSize + 1); i++) {
fs >> nn[i];
}
for (int i = 1; i <= people; i++) {
for (int j = 1; j <= images; j++) {
name[0] = i + '0';
name[7] = j + '0';
Mat imgM = imread(name, IMREAD_GRAYSCALE);
if (!imgM.data) {
cout << "Could not open or find the image" << std::endl;
}
else {
float* feature = extractHOGfeatures(imgM, size);
cout << "person " << i << ":\t";
float max = 0;
int max_person = 0;
for (int k = 0; k < people; k++) {
int nnidx = k * (featureSize + 1);
float prediction = predict(nn, feature, nnidx, 0, featureSize);
if (prediction > max) {
max = prediction;
max_person = k;
}
cout << k + 1 << " - " << prediction << "\t";
}
cout << "prediction - " << max_person + 1 << endl;
delete[] feature;
feature = NULL;
}
}
}
cout << endl;
}
|
ea1b5969c2a30d384a167cce30ac71a98494540f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define W 500
#define H 500
#define TX 32
#define TY 32
__device__
unsigned char clip(int n){
return n>255?255:(n<0?0:n);
}
__global__
void distanceKernel(uchar4 *d_out,int w,int h,int2 pos){
const int c=blockIdx.x*blockDim.x+threadIdx.x;
const int r=blockIdx.y*blockDim.y+threadIdx.y;
const int i =r*w+c;
if ((c>=w)||(r>=h)) {
return;
}
const int d = sqrtf((c-pos.x)*(c-pos.x)+(r-pos.y)*(r-pos.y));
const unsigned char intensity = clip(255-d);
d_out[i].x=intensity;
d_out[i].y=intensity;
d_out[i].z=0;
d_out[i].z=255;
}
int main(void)
{
uchar4 *out=(uchar4*)calloc(W*H,sizeof(uchar4));
uchar4 *d_out;
hipMalloc(&d_out,W*H*sizeof(uchar4));
const int2 pos={0,0};
const dim3 blockSize(TX,TY);
const int bx=(W+TX-1)/TX;
const int by=(W+TY-1)/TY;
const dim3 gridSize=dim3(bx,by);
hipLaunchKernelGGL(( distanceKernel), dim3(gridSize),dim3(blockSize), 0, 0, d_out,W,H,pos);
hipMemcpy(out,d_out,W*H*sizeof(uchar4),hipMemcpyDeviceToHost);
hipFree(d_out);
free(out);
return 0;
}
| ea1b5969c2a30d384a167cce30ac71a98494540f.cu | #define W 500
#define H 500
#define TX 32
#define TY 32
__device__
unsigned char clip(int n){
return n>255?255:(n<0?0:n);
}
__global__
void distanceKernel(uchar4 *d_out,int w,int h,int2 pos){
const int c=blockIdx.x*blockDim.x+threadIdx.x;
const int r=blockIdx.y*blockDim.y+threadIdx.y;
const int i =r*w+c;
if ((c>=w)||(r>=h)) {
return;
}
const int d = sqrtf((c-pos.x)*(c-pos.x)+(r-pos.y)*(r-pos.y));
const unsigned char intensity = clip(255-d);
d_out[i].x=intensity;
d_out[i].y=intensity;
d_out[i].z=0;
d_out[i].z=255;
}
int main(void)
{
uchar4 *out=(uchar4*)calloc(W*H,sizeof(uchar4));
uchar4 *d_out;
cudaMalloc(&d_out,W*H*sizeof(uchar4));
const int2 pos={0,0};
const dim3 blockSize(TX,TY);
const int bx=(W+TX-1)/TX;
const int by=(W+TY-1)/TY;
const dim3 gridSize=dim3(bx,by);
distanceKernel<<<gridSize,blockSize>>>(d_out,W,H,pos);
cudaMemcpy(out,d_out,W*H*sizeof(uchar4),cudaMemcpyDeviceToHost);
cudaFree(d_out);
free(out);
return 0;
}
|
8e036229eb1a609f8d0ac1453dd984dbbfa484fe.hip | // !!! This is a file automatically generated by hipify!!!
#include "BufferCompaction.h"
#include "GpuMemUtils.h"
#include "GpuRtConstants.h"
#include "ResultSetBufferAccessors.h"
#include "ResultSetSortImpl.h"
#include "SortUtils.cuh"
#include <thrust/copy.h>
#include <thrust/execution_policy.h>
#include <thrust/host_vector.h>
#include <thrust/sort.h>
#define FORCE_CPU_VERSION
#include "BufferEntryUtils.h"
#undef FORCE_CPU_VERSION
namespace {
template <class K, class V, class I>
std::vector<uint32_t> do_radix_sort(const ExecutorDeviceType device_type,
ThrustAllocator& thrust_allocator,
const int8_t* groupby_buffer,
V dev_oe_col_buffer_begin,
V dev_oe_col_buffer_end,
I dev_idx_buff_begin,
const size_t dev_idx_buff_size,
const PodOrderEntry& oe,
const GroupByBufferLayoutInfo& layout,
const size_t top_n) {
if (dev_idx_buff_size == 0) {
return {};
}
if (oe.is_desc) {
if (device_type == ExecutorDeviceType::GPU) {
thrust::sort_by_key(thrust::device(thrust_allocator),
dev_oe_col_buffer_begin,
dev_oe_col_buffer_end,
dev_idx_buff_begin,
thrust::greater<int64_t>());
} else {
thrust::sort_by_key(dev_oe_col_buffer_begin,
dev_oe_col_buffer_end,
dev_idx_buff_begin,
thrust::greater<int64_t>());
}
} else {
if (device_type == ExecutorDeviceType::GPU) {
thrust::sort_by_key(thrust::device(thrust_allocator),
dev_oe_col_buffer_begin,
dev_oe_col_buffer_end,
dev_idx_buff_begin);
} else {
thrust::sort_by_key(
dev_oe_col_buffer_begin, dev_oe_col_buffer_end, dev_idx_buff_begin);
}
}
// Speculatively transfer only the top_n first, most of the time it'll be enough.
thrust::host_vector<uint32_t> host_vector_result(
dev_idx_buff_begin, dev_idx_buff_begin + ::min(top_n, dev_idx_buff_size));
// Sometimes, radix sort can bring to the front entries which are empty.
// For example, ascending sort on COUNT(*) will bring non-existent groups
// to the front of dev_idx_buff since they're 0 in our system. Re-do the
// transfer in that case to bring the entire dev_idx_buff; existing logic
// in row iteration will take care of skipping the empty rows.
for (size_t i = 0; i < host_vector_result.size(); ++i) {
const auto entry_idx = host_vector_result[i];
if (is_empty_entry<K>(entry_idx, groupby_buffer, layout.row_bytes)) {
host_vector_result = thrust::host_vector<uint32_t>(
dev_idx_buff_begin, dev_idx_buff_begin + dev_idx_buff_size);
break;
}
}
std::vector<uint32_t> result;
result.reserve(::min(top_n, host_vector_result.size()));
for (size_t i = 0; i < host_vector_result.size(); ++i) {
const auto entry_idx = host_vector_result[i];
if (!is_empty_entry<K>(entry_idx, groupby_buffer, layout.row_bytes)) {
result.push_back(entry_idx);
if (result.size() >= top_n) {
break;
}
}
}
return result;
}
void add_nulls(std::vector<uint32_t>& idx_buff,
const std::vector<uint32_t>& null_idx_buff,
const PodOrderEntry& oe) {
if (null_idx_buff.empty()) {
return;
}
const auto insertion_point = oe.nulls_first ? idx_buff.begin() : idx_buff.end();
idx_buff.insert(insertion_point, null_idx_buff.begin(), null_idx_buff.end());
}
template <typename T>
thrust::device_ptr<T> get_device_copy_ptr(const thrust::host_vector<T>& host_vec,
ThrustAllocator& thrust_allocator) {
if (host_vec.empty()) {
return thrust::device_ptr<T>(static_cast<T*>(nullptr));
}
const auto host_vec_bytes = host_vec.size() * sizeof(T);
T* dev_ptr = reinterpret_cast<T*>(
thrust_allocator.allocateScopedBuffer(align_to_int64(host_vec_bytes)));
copy_to_gpu(thrust_allocator.getDataMgr(),
reinterpret_cast<hipDeviceptr_t>(dev_ptr),
&host_vec[0],
host_vec_bytes,
thrust_allocator.getDeviceId());
return thrust::device_ptr<T>(dev_ptr);
}
template <class K>
std::vector<uint32_t> baseline_sort_fp(const ExecutorDeviceType device_type,
const int device_id,
Data_Namespace::DataMgr* data_mgr,
const int8_t* groupby_buffer,
const thrust::host_vector<int64_t>& oe_col_buffer,
const PodOrderEntry& oe,
const GroupByBufferLayoutInfo& layout,
const size_t top_n,
const size_t start,
const size_t step) {
thrust::host_vector<uint32_t> neg_idx_buff;
thrust::host_vector<uint32_t> pos_idx_buff;
std::vector<uint32_t> null_idx_buff;
thrust::host_vector<int64_t> neg_oe_col_buffer;
thrust::host_vector<int64_t> pos_oe_col_buffer;
const auto slice_entry_count =
layout.entry_count / step + (layout.entry_count % step ? 1 : 0);
neg_idx_buff.reserve(slice_entry_count);
pos_idx_buff.reserve(slice_entry_count);
null_idx_buff.reserve(slice_entry_count);
neg_oe_col_buffer.reserve(slice_entry_count);
pos_oe_col_buffer.reserve(slice_entry_count);
size_t oe_col_buffer_idx = 0;
const auto& oe_info = layout.oe_target_info;
const auto col_ti =
oe_info.agg_kind == kAVG ? SQLTypeInfo(kDOUBLE, false) : oe_info.sql_type;
// Execlude AVG b/c collect_order_entry_column already makes its pair collapse into a
// double
const bool float_argument_input =
takes_float_argument(oe_info) && oe_info.agg_kind != kAVG;
auto is_negative =
float_argument_input ? [](const int64_t v) -> bool { return (v & (1 << 31)) != 0; }
: [](const int64_t v) -> bool { return v < 0; };
for (size_t i = start; i < layout.entry_count; i += step, ++oe_col_buffer_idx) {
if (!is_empty_entry<K>(i, groupby_buffer, layout.row_bytes) &&
oe_col_buffer[oe_col_buffer_idx] ==
null_val_bit_pattern(col_ti, float_argument_input)) {
null_idx_buff.push_back(i);
continue;
}
if (is_negative(oe_col_buffer[oe_col_buffer_idx])) { // sign bit works the same for
// integer and floating point
neg_idx_buff.push_back(i);
neg_oe_col_buffer.push_back(oe_col_buffer[oe_col_buffer_idx]);
} else {
pos_idx_buff.push_back(i);
pos_oe_col_buffer.push_back(oe_col_buffer[oe_col_buffer_idx]);
}
}
std::vector<uint32_t> pos_result;
ThrustAllocator thrust_allocator(data_mgr, device_id);
if (device_type == ExecutorDeviceType::GPU) {
const auto dev_pos_idx_buff = get_device_copy_ptr(pos_idx_buff, thrust_allocator);
const auto dev_pos_oe_col_buffer =
get_device_copy_ptr(pos_oe_col_buffer, thrust_allocator);
pos_result = do_radix_sort<K>(device_type,
thrust_allocator,
groupby_buffer,
dev_pos_oe_col_buffer,
dev_pos_oe_col_buffer + pos_oe_col_buffer.size(),
dev_pos_idx_buff,
pos_idx_buff.size(),
oe,
layout,
top_n);
} else {
CHECK(device_type == ExecutorDeviceType::CPU);
pos_result = do_radix_sort<K>(device_type,
thrust_allocator,
groupby_buffer,
pos_oe_col_buffer.begin(),
pos_oe_col_buffer.end(),
pos_idx_buff.begin(),
pos_idx_buff.size(),
oe,
layout,
top_n);
}
std::vector<uint32_t> neg_result;
PodOrderEntry reverse_oe{oe.tle_no, !oe.is_desc, oe.nulls_first};
if (device_type == ExecutorDeviceType::GPU) {
const auto dev_neg_idx_buff = get_device_copy_ptr(neg_idx_buff, thrust_allocator);
const auto dev_neg_oe_col_buffer =
get_device_copy_ptr(neg_oe_col_buffer, thrust_allocator);
neg_result = do_radix_sort<K>(device_type,
thrust_allocator,
groupby_buffer,
dev_neg_oe_col_buffer,
dev_neg_oe_col_buffer + neg_oe_col_buffer.size(),
dev_neg_idx_buff,
neg_idx_buff.size(),
reverse_oe,
layout,
top_n);
} else {
CHECK(device_type == ExecutorDeviceType::CPU);
neg_result = do_radix_sort<K>(device_type,
thrust_allocator,
groupby_buffer,
neg_oe_col_buffer.begin(),
neg_oe_col_buffer.end(),
neg_idx_buff.begin(),
neg_idx_buff.size(),
reverse_oe,
layout,
top_n);
}
if (oe.is_desc) {
pos_result.insert(pos_result.end(), neg_result.begin(), neg_result.end());
add_nulls(pos_result, null_idx_buff, oe);
return pos_result;
}
neg_result.insert(neg_result.end(), pos_result.begin(), pos_result.end());
add_nulls(neg_result, null_idx_buff, oe);
return neg_result;
}
template <class K>
std::vector<uint32_t> baseline_sort_int(const ExecutorDeviceType device_type,
const int device_id,
Data_Namespace::DataMgr* data_mgr,
const int8_t* groupby_buffer,
const thrust::host_vector<int64_t>& oe_col_buffer,
const PodOrderEntry& oe,
const GroupByBufferLayoutInfo& layout,
const size_t top_n,
const size_t start,
const size_t step) {
const auto& entry_ti = get_compact_type(layout.oe_target_info);
std::vector<uint32_t> null_idx_buff;
thrust::host_vector<uint32_t> notnull_idx_buff;
const auto slice_entry_count =
layout.entry_count / step + (layout.entry_count % step ? 1 : 0);
null_idx_buff.reserve(slice_entry_count);
notnull_idx_buff.reserve(slice_entry_count);
thrust::host_vector<int64_t> notnull_oe_col_buffer;
notnull_oe_col_buffer.reserve(slice_entry_count);
size_t oe_col_buffer_idx = 0;
for (size_t i = start; i < layout.entry_count; i += step, ++oe_col_buffer_idx) {
if (!is_empty_entry<K>(i, groupby_buffer, layout.row_bytes) &&
oe_col_buffer[oe_col_buffer_idx] == null_val_bit_pattern(entry_ti, false)) {
null_idx_buff.push_back(i);
} else {
notnull_idx_buff.push_back(i);
notnull_oe_col_buffer.push_back(oe_col_buffer[oe_col_buffer_idx]);
}
}
std::vector<uint32_t> notnull_result;
ThrustAllocator thrust_allocator(data_mgr, device_id);
if (device_type == ExecutorDeviceType::GPU) {
const auto dev_notnull_idx_buff =
get_device_copy_ptr(notnull_idx_buff, thrust_allocator);
const auto dev_notnull_oe_col_buffer =
get_device_copy_ptr(notnull_oe_col_buffer, thrust_allocator);
notnull_result =
do_radix_sort<K>(device_type,
thrust_allocator,
groupby_buffer,
dev_notnull_oe_col_buffer,
dev_notnull_oe_col_buffer + notnull_oe_col_buffer.size(),
dev_notnull_idx_buff,
notnull_idx_buff.size(),
oe,
layout,
top_n);
} else {
CHECK(device_type == ExecutorDeviceType::CPU);
notnull_result = do_radix_sort<K>(device_type,
thrust_allocator,
groupby_buffer,
notnull_oe_col_buffer.begin(),
notnull_oe_col_buffer.end(),
notnull_idx_buff.begin(),
notnull_idx_buff.size(),
oe,
layout,
top_n);
}
add_nulls(notnull_result, null_idx_buff, oe);
return notnull_result;
}
template <class K>
thrust::host_vector<int64_t> collect_order_entry_column(
const int8_t* groupby_buffer,
const GroupByBufferLayoutInfo& layout,
const size_t start,
const size_t step) {
thrust::host_vector<int64_t> oe_col_buffer;
const auto row_ptr = groupby_buffer + start * layout.row_bytes;
auto crt_group_ptr1 = layout.target_groupby_index >= 0
? row_ptr + layout.target_groupby_index * sizeof(K)
: row_ptr + layout.col_off;
const int8_t* crt_group_ptr2{nullptr};
if (layout.oe_target_info.agg_kind == kAVG) {
crt_group_ptr2 = crt_group_ptr1 + layout.col_bytes;
}
const auto entry_ti = get_compact_type(layout.oe_target_info);
const bool float_argument_input = takes_float_argument(layout.oe_target_info);
const auto step_bytes = layout.row_bytes * step;
for (size_t i = start; i < layout.entry_count; i += step) {
auto val1 = read_int_from_buff(crt_group_ptr1,
layout.col_bytes > 0 ? layout.col_bytes : sizeof(K));
if (crt_group_ptr2) {
const auto val2 = read_int_from_buff(crt_group_ptr2, 8);
const auto avg_val = pair_to_double({val1, val2}, entry_ti, float_argument_input);
val1 = *reinterpret_cast<const int64_t*>(&avg_val);
}
oe_col_buffer.push_back(val1);
crt_group_ptr1 += step_bytes;
if (crt_group_ptr2) {
crt_group_ptr2 += step_bytes;
}
}
return oe_col_buffer;
}
} // namespace
template <class K>
std::vector<uint32_t> baseline_sort(const ExecutorDeviceType device_type,
const int device_id,
Data_Namespace::DataMgr* data_mgr,
const int8_t* groupby_buffer,
const PodOrderEntry& oe,
const GroupByBufferLayoutInfo& layout,
const size_t top_n,
const size_t start,
const size_t step) {
auto oe_col_buffer = collect_order_entry_column<K>(groupby_buffer, layout, start, step);
const auto& entry_ti = get_compact_type(layout.oe_target_info);
CHECK(entry_ti.is_number());
if (entry_ti.is_fp() || layout.oe_target_info.agg_kind == kAVG) {
return baseline_sort_fp<K>(device_type,
device_id,
data_mgr,
groupby_buffer,
oe_col_buffer,
oe,
layout,
top_n,
start,
step);
}
// Because of how we represent nulls for integral types, they'd be at the
// wrong position in these two cases. Separate them into a different buffer.
if ((oe.is_desc && oe.nulls_first) || (!oe.is_desc && !oe.nulls_first)) {
return baseline_sort_int<K>(device_type,
device_id,
data_mgr,
groupby_buffer,
oe_col_buffer,
oe,
layout,
top_n,
start,
step);
}
ThrustAllocator thrust_allocator(data_mgr, device_id);
// Fastest path, no need to separate nulls away since they'll end up at the
// right place as a side effect of how we're representing nulls.
if (device_type == ExecutorDeviceType::GPU) {
if (oe_col_buffer.empty()) {
return {};
}
const auto dev_idx_buff =
get_device_ptr<uint32_t>(oe_col_buffer.size(), thrust_allocator);
thrust::sequence(dev_idx_buff, dev_idx_buff + oe_col_buffer.size(), start, step);
const auto dev_oe_col_buffer = get_device_copy_ptr(oe_col_buffer, thrust_allocator);
return do_radix_sort<K>(device_type,
thrust_allocator,
groupby_buffer,
dev_oe_col_buffer,
dev_oe_col_buffer + oe_col_buffer.size(),
dev_idx_buff,
oe_col_buffer.size(),
oe,
layout,
top_n);
}
CHECK(device_type == ExecutorDeviceType::CPU);
thrust::host_vector<uint32_t> host_idx_buff(oe_col_buffer.size());
thrust::sequence(host_idx_buff.begin(), host_idx_buff.end(), start, step);
return do_radix_sort<K>(device_type,
thrust_allocator,
groupby_buffer,
oe_col_buffer.begin(),
oe_col_buffer.end(),
host_idx_buff.begin(),
host_idx_buff.size(),
oe,
layout,
top_n);
}
template std::vector<uint32_t> baseline_sort<int32_t>(
const ExecutorDeviceType device_type,
const int device_id,
Data_Namespace::DataMgr* data_mgr,
const int8_t* groupby_buffer,
const PodOrderEntry& oe,
const GroupByBufferLayoutInfo& layout,
const size_t top_n,
const size_t start,
const size_t step);
template std::vector<uint32_t> baseline_sort<int64_t>(
const ExecutorDeviceType device_type,
const int device_id,
Data_Namespace::DataMgr* data_mgr,
const int8_t* groupby_buffer,
const PodOrderEntry& oe,
const GroupByBufferLayoutInfo& layout,
const size_t top_n,
const size_t start,
const size_t step);
| 8e036229eb1a609f8d0ac1453dd984dbbfa484fe.cu | #include "BufferCompaction.h"
#include "GpuMemUtils.h"
#include "GpuRtConstants.h"
#include "ResultSetBufferAccessors.h"
#include "ResultSetSortImpl.h"
#include "SortUtils.cuh"
#include <thrust/copy.h>
#include <thrust/execution_policy.h>
#include <thrust/host_vector.h>
#include <thrust/sort.h>
#define FORCE_CPU_VERSION
#include "BufferEntryUtils.h"
#undef FORCE_CPU_VERSION
namespace {
template <class K, class V, class I>
std::vector<uint32_t> do_radix_sort(const ExecutorDeviceType device_type,
ThrustAllocator& thrust_allocator,
const int8_t* groupby_buffer,
V dev_oe_col_buffer_begin,
V dev_oe_col_buffer_end,
I dev_idx_buff_begin,
const size_t dev_idx_buff_size,
const PodOrderEntry& oe,
const GroupByBufferLayoutInfo& layout,
const size_t top_n) {
if (dev_idx_buff_size == 0) {
return {};
}
if (oe.is_desc) {
if (device_type == ExecutorDeviceType::GPU) {
thrust::sort_by_key(thrust::device(thrust_allocator),
dev_oe_col_buffer_begin,
dev_oe_col_buffer_end,
dev_idx_buff_begin,
thrust::greater<int64_t>());
} else {
thrust::sort_by_key(dev_oe_col_buffer_begin,
dev_oe_col_buffer_end,
dev_idx_buff_begin,
thrust::greater<int64_t>());
}
} else {
if (device_type == ExecutorDeviceType::GPU) {
thrust::sort_by_key(thrust::device(thrust_allocator),
dev_oe_col_buffer_begin,
dev_oe_col_buffer_end,
dev_idx_buff_begin);
} else {
thrust::sort_by_key(
dev_oe_col_buffer_begin, dev_oe_col_buffer_end, dev_idx_buff_begin);
}
}
// Speculatively transfer only the top_n first, most of the time it'll be enough.
thrust::host_vector<uint32_t> host_vector_result(
dev_idx_buff_begin, dev_idx_buff_begin + std::min(top_n, dev_idx_buff_size));
// Sometimes, radix sort can bring to the front entries which are empty.
// For example, ascending sort on COUNT(*) will bring non-existent groups
// to the front of dev_idx_buff since they're 0 in our system. Re-do the
// transfer in that case to bring the entire dev_idx_buff; existing logic
// in row iteration will take care of skipping the empty rows.
for (size_t i = 0; i < host_vector_result.size(); ++i) {
const auto entry_idx = host_vector_result[i];
if (is_empty_entry<K>(entry_idx, groupby_buffer, layout.row_bytes)) {
host_vector_result = thrust::host_vector<uint32_t>(
dev_idx_buff_begin, dev_idx_buff_begin + dev_idx_buff_size);
break;
}
}
std::vector<uint32_t> result;
result.reserve(std::min(top_n, host_vector_result.size()));
for (size_t i = 0; i < host_vector_result.size(); ++i) {
const auto entry_idx = host_vector_result[i];
if (!is_empty_entry<K>(entry_idx, groupby_buffer, layout.row_bytes)) {
result.push_back(entry_idx);
if (result.size() >= top_n) {
break;
}
}
}
return result;
}
void add_nulls(std::vector<uint32_t>& idx_buff,
const std::vector<uint32_t>& null_idx_buff,
const PodOrderEntry& oe) {
if (null_idx_buff.empty()) {
return;
}
const auto insertion_point = oe.nulls_first ? idx_buff.begin() : idx_buff.end();
idx_buff.insert(insertion_point, null_idx_buff.begin(), null_idx_buff.end());
}
template <typename T>
thrust::device_ptr<T> get_device_copy_ptr(const thrust::host_vector<T>& host_vec,
ThrustAllocator& thrust_allocator) {
if (host_vec.empty()) {
return thrust::device_ptr<T>(static_cast<T*>(nullptr));
}
const auto host_vec_bytes = host_vec.size() * sizeof(T);
T* dev_ptr = reinterpret_cast<T*>(
thrust_allocator.allocateScopedBuffer(align_to_int64(host_vec_bytes)));
copy_to_gpu(thrust_allocator.getDataMgr(),
reinterpret_cast<CUdeviceptr>(dev_ptr),
&host_vec[0],
host_vec_bytes,
thrust_allocator.getDeviceId());
return thrust::device_ptr<T>(dev_ptr);
}
template <class K>
std::vector<uint32_t> baseline_sort_fp(const ExecutorDeviceType device_type,
const int device_id,
Data_Namespace::DataMgr* data_mgr,
const int8_t* groupby_buffer,
const thrust::host_vector<int64_t>& oe_col_buffer,
const PodOrderEntry& oe,
const GroupByBufferLayoutInfo& layout,
const size_t top_n,
const size_t start,
const size_t step) {
thrust::host_vector<uint32_t> neg_idx_buff;
thrust::host_vector<uint32_t> pos_idx_buff;
std::vector<uint32_t> null_idx_buff;
thrust::host_vector<int64_t> neg_oe_col_buffer;
thrust::host_vector<int64_t> pos_oe_col_buffer;
const auto slice_entry_count =
layout.entry_count / step + (layout.entry_count % step ? 1 : 0);
neg_idx_buff.reserve(slice_entry_count);
pos_idx_buff.reserve(slice_entry_count);
null_idx_buff.reserve(slice_entry_count);
neg_oe_col_buffer.reserve(slice_entry_count);
pos_oe_col_buffer.reserve(slice_entry_count);
size_t oe_col_buffer_idx = 0;
const auto& oe_info = layout.oe_target_info;
const auto col_ti =
oe_info.agg_kind == kAVG ? SQLTypeInfo(kDOUBLE, false) : oe_info.sql_type;
// Execlude AVG b/c collect_order_entry_column already makes its pair collapse into a
// double
const bool float_argument_input =
takes_float_argument(oe_info) && oe_info.agg_kind != kAVG;
auto is_negative =
float_argument_input ? [](const int64_t v) -> bool { return (v & (1 << 31)) != 0; }
: [](const int64_t v) -> bool { return v < 0; };
for (size_t i = start; i < layout.entry_count; i += step, ++oe_col_buffer_idx) {
if (!is_empty_entry<K>(i, groupby_buffer, layout.row_bytes) &&
oe_col_buffer[oe_col_buffer_idx] ==
null_val_bit_pattern(col_ti, float_argument_input)) {
null_idx_buff.push_back(i);
continue;
}
if (is_negative(oe_col_buffer[oe_col_buffer_idx])) { // sign bit works the same for
// integer and floating point
neg_idx_buff.push_back(i);
neg_oe_col_buffer.push_back(oe_col_buffer[oe_col_buffer_idx]);
} else {
pos_idx_buff.push_back(i);
pos_oe_col_buffer.push_back(oe_col_buffer[oe_col_buffer_idx]);
}
}
std::vector<uint32_t> pos_result;
ThrustAllocator thrust_allocator(data_mgr, device_id);
if (device_type == ExecutorDeviceType::GPU) {
const auto dev_pos_idx_buff = get_device_copy_ptr(pos_idx_buff, thrust_allocator);
const auto dev_pos_oe_col_buffer =
get_device_copy_ptr(pos_oe_col_buffer, thrust_allocator);
pos_result = do_radix_sort<K>(device_type,
thrust_allocator,
groupby_buffer,
dev_pos_oe_col_buffer,
dev_pos_oe_col_buffer + pos_oe_col_buffer.size(),
dev_pos_idx_buff,
pos_idx_buff.size(),
oe,
layout,
top_n);
} else {
CHECK(device_type == ExecutorDeviceType::CPU);
pos_result = do_radix_sort<K>(device_type,
thrust_allocator,
groupby_buffer,
pos_oe_col_buffer.begin(),
pos_oe_col_buffer.end(),
pos_idx_buff.begin(),
pos_idx_buff.size(),
oe,
layout,
top_n);
}
std::vector<uint32_t> neg_result;
PodOrderEntry reverse_oe{oe.tle_no, !oe.is_desc, oe.nulls_first};
if (device_type == ExecutorDeviceType::GPU) {
const auto dev_neg_idx_buff = get_device_copy_ptr(neg_idx_buff, thrust_allocator);
const auto dev_neg_oe_col_buffer =
get_device_copy_ptr(neg_oe_col_buffer, thrust_allocator);
neg_result = do_radix_sort<K>(device_type,
thrust_allocator,
groupby_buffer,
dev_neg_oe_col_buffer,
dev_neg_oe_col_buffer + neg_oe_col_buffer.size(),
dev_neg_idx_buff,
neg_idx_buff.size(),
reverse_oe,
layout,
top_n);
} else {
CHECK(device_type == ExecutorDeviceType::CPU);
neg_result = do_radix_sort<K>(device_type,
thrust_allocator,
groupby_buffer,
neg_oe_col_buffer.begin(),
neg_oe_col_buffer.end(),
neg_idx_buff.begin(),
neg_idx_buff.size(),
reverse_oe,
layout,
top_n);
}
if (oe.is_desc) {
pos_result.insert(pos_result.end(), neg_result.begin(), neg_result.end());
add_nulls(pos_result, null_idx_buff, oe);
return pos_result;
}
neg_result.insert(neg_result.end(), pos_result.begin(), pos_result.end());
add_nulls(neg_result, null_idx_buff, oe);
return neg_result;
}
template <class K>
std::vector<uint32_t> baseline_sort_int(const ExecutorDeviceType device_type,
const int device_id,
Data_Namespace::DataMgr* data_mgr,
const int8_t* groupby_buffer,
const thrust::host_vector<int64_t>& oe_col_buffer,
const PodOrderEntry& oe,
const GroupByBufferLayoutInfo& layout,
const size_t top_n,
const size_t start,
const size_t step) {
const auto& entry_ti = get_compact_type(layout.oe_target_info);
std::vector<uint32_t> null_idx_buff;
thrust::host_vector<uint32_t> notnull_idx_buff;
const auto slice_entry_count =
layout.entry_count / step + (layout.entry_count % step ? 1 : 0);
null_idx_buff.reserve(slice_entry_count);
notnull_idx_buff.reserve(slice_entry_count);
thrust::host_vector<int64_t> notnull_oe_col_buffer;
notnull_oe_col_buffer.reserve(slice_entry_count);
size_t oe_col_buffer_idx = 0;
for (size_t i = start; i < layout.entry_count; i += step, ++oe_col_buffer_idx) {
if (!is_empty_entry<K>(i, groupby_buffer, layout.row_bytes) &&
oe_col_buffer[oe_col_buffer_idx] == null_val_bit_pattern(entry_ti, false)) {
null_idx_buff.push_back(i);
} else {
notnull_idx_buff.push_back(i);
notnull_oe_col_buffer.push_back(oe_col_buffer[oe_col_buffer_idx]);
}
}
std::vector<uint32_t> notnull_result;
ThrustAllocator thrust_allocator(data_mgr, device_id);
if (device_type == ExecutorDeviceType::GPU) {
const auto dev_notnull_idx_buff =
get_device_copy_ptr(notnull_idx_buff, thrust_allocator);
const auto dev_notnull_oe_col_buffer =
get_device_copy_ptr(notnull_oe_col_buffer, thrust_allocator);
notnull_result =
do_radix_sort<K>(device_type,
thrust_allocator,
groupby_buffer,
dev_notnull_oe_col_buffer,
dev_notnull_oe_col_buffer + notnull_oe_col_buffer.size(),
dev_notnull_idx_buff,
notnull_idx_buff.size(),
oe,
layout,
top_n);
} else {
CHECK(device_type == ExecutorDeviceType::CPU);
notnull_result = do_radix_sort<K>(device_type,
thrust_allocator,
groupby_buffer,
notnull_oe_col_buffer.begin(),
notnull_oe_col_buffer.end(),
notnull_idx_buff.begin(),
notnull_idx_buff.size(),
oe,
layout,
top_n);
}
add_nulls(notnull_result, null_idx_buff, oe);
return notnull_result;
}
template <class K>
thrust::host_vector<int64_t> collect_order_entry_column(
const int8_t* groupby_buffer,
const GroupByBufferLayoutInfo& layout,
const size_t start,
const size_t step) {
thrust::host_vector<int64_t> oe_col_buffer;
const auto row_ptr = groupby_buffer + start * layout.row_bytes;
auto crt_group_ptr1 = layout.target_groupby_index >= 0
? row_ptr + layout.target_groupby_index * sizeof(K)
: row_ptr + layout.col_off;
const int8_t* crt_group_ptr2{nullptr};
if (layout.oe_target_info.agg_kind == kAVG) {
crt_group_ptr2 = crt_group_ptr1 + layout.col_bytes;
}
const auto entry_ti = get_compact_type(layout.oe_target_info);
const bool float_argument_input = takes_float_argument(layout.oe_target_info);
const auto step_bytes = layout.row_bytes * step;
for (size_t i = start; i < layout.entry_count; i += step) {
auto val1 = read_int_from_buff(crt_group_ptr1,
layout.col_bytes > 0 ? layout.col_bytes : sizeof(K));
if (crt_group_ptr2) {
const auto val2 = read_int_from_buff(crt_group_ptr2, 8);
const auto avg_val = pair_to_double({val1, val2}, entry_ti, float_argument_input);
val1 = *reinterpret_cast<const int64_t*>(&avg_val);
}
oe_col_buffer.push_back(val1);
crt_group_ptr1 += step_bytes;
if (crt_group_ptr2) {
crt_group_ptr2 += step_bytes;
}
}
return oe_col_buffer;
}
} // namespace
template <class K>
std::vector<uint32_t> baseline_sort(const ExecutorDeviceType device_type,
const int device_id,
Data_Namespace::DataMgr* data_mgr,
const int8_t* groupby_buffer,
const PodOrderEntry& oe,
const GroupByBufferLayoutInfo& layout,
const size_t top_n,
const size_t start,
const size_t step) {
auto oe_col_buffer = collect_order_entry_column<K>(groupby_buffer, layout, start, step);
const auto& entry_ti = get_compact_type(layout.oe_target_info);
CHECK(entry_ti.is_number());
if (entry_ti.is_fp() || layout.oe_target_info.agg_kind == kAVG) {
return baseline_sort_fp<K>(device_type,
device_id,
data_mgr,
groupby_buffer,
oe_col_buffer,
oe,
layout,
top_n,
start,
step);
}
// Because of how we represent nulls for integral types, they'd be at the
// wrong position in these two cases. Separate them into a different buffer.
if ((oe.is_desc && oe.nulls_first) || (!oe.is_desc && !oe.nulls_first)) {
return baseline_sort_int<K>(device_type,
device_id,
data_mgr,
groupby_buffer,
oe_col_buffer,
oe,
layout,
top_n,
start,
step);
}
ThrustAllocator thrust_allocator(data_mgr, device_id);
// Fastest path, no need to separate nulls away since they'll end up at the
// right place as a side effect of how we're representing nulls.
if (device_type == ExecutorDeviceType::GPU) {
if (oe_col_buffer.empty()) {
return {};
}
const auto dev_idx_buff =
get_device_ptr<uint32_t>(oe_col_buffer.size(), thrust_allocator);
thrust::sequence(dev_idx_buff, dev_idx_buff + oe_col_buffer.size(), start, step);
const auto dev_oe_col_buffer = get_device_copy_ptr(oe_col_buffer, thrust_allocator);
return do_radix_sort<K>(device_type,
thrust_allocator,
groupby_buffer,
dev_oe_col_buffer,
dev_oe_col_buffer + oe_col_buffer.size(),
dev_idx_buff,
oe_col_buffer.size(),
oe,
layout,
top_n);
}
CHECK(device_type == ExecutorDeviceType::CPU);
thrust::host_vector<uint32_t> host_idx_buff(oe_col_buffer.size());
thrust::sequence(host_idx_buff.begin(), host_idx_buff.end(), start, step);
return do_radix_sort<K>(device_type,
thrust_allocator,
groupby_buffer,
oe_col_buffer.begin(),
oe_col_buffer.end(),
host_idx_buff.begin(),
host_idx_buff.size(),
oe,
layout,
top_n);
}
template std::vector<uint32_t> baseline_sort<int32_t>(
const ExecutorDeviceType device_type,
const int device_id,
Data_Namespace::DataMgr* data_mgr,
const int8_t* groupby_buffer,
const PodOrderEntry& oe,
const GroupByBufferLayoutInfo& layout,
const size_t top_n,
const size_t start,
const size_t step);
template std::vector<uint32_t> baseline_sort<int64_t>(
const ExecutorDeviceType device_type,
const int device_id,
Data_Namespace::DataMgr* data_mgr,
const int8_t* groupby_buffer,
const PodOrderEntry& oe,
const GroupByBufferLayoutInfo& layout,
const size_t top_n,
const size_t start,
const size_t step);
|
bf49cc46ad43698e032eb94e2d1680e43d7b7b96.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/device/cuda_util.h"
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/cuda/atomic.cuh"
#include <float.h>
namespace oneflow {
namespace {
// NOTE(Liang Depeng): refer to
// https://stackoverflow.com/questions/17371275/implementing-max-reduce-in-cuda
template<typename T>
__global__ void ReduceMaxMinPerLayer(const T* input_ptr, const int64_t elements, T* max_ptr,
T* min_ptr) {
extern __shared__ unsigned char shared_max_min_memory[];
T* shared_max = reinterpret_cast<T*>(shared_max_min_memory);
T* shared_min = shared_max + blockDim.x;
int64_t tid = threadIdx.x;
int64_t gid = (blockDim.x * blockIdx.x) + tid;
shared_max[tid] = -FLT_MAX;
shared_min[tid] = -FLT_MAX;
while (gid < elements) {
shared_max[tid] = max(shared_max[tid], input_ptr[gid]);
shared_min[tid] = max(shared_min[tid], -input_ptr[gid]);
gid += gridDim.x * blockDim.x;
}
__syncthreads();
gid = (blockDim.x * blockIdx.x) + tid;
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s && gid < elements) {
shared_max[tid] = max(shared_max[tid], shared_max[tid + s]);
shared_min[tid] = max(shared_min[tid], shared_min[tid + s]);
}
__syncthreads();
}
if (tid == 0) {
cuda::atomic::Max(max_ptr, shared_max[0]);
cuda::atomic::Max(min_ptr, shared_min[0]);
}
}
template<typename T>
__global__ void ReduceMaxMinPerChannel(const T* input_ptr, const int64_t elements,
const int64_t num_channels, const int64_t panel_size,
T* max_ptr, T* min_ptr) {
extern __shared__ unsigned char shared_max_min_memory[];
T* shared_max = reinterpret_cast<T*>(shared_max_min_memory);
T* shared_min = shared_max + blockDim.x;
int64_t cur_channel = blockIdx.x;
int64_t tid = threadIdx.x;
while (cur_channel < num_channels) {
shared_max[tid] = -FLT_MAX;
shared_min[tid] = -FLT_MAX;
int64_t index = (panel_size * cur_channel) + tid;
int64_t end = panel_size * (cur_channel + 1);
while (index < end && index < elements) {
shared_max[tid] = max(shared_max[tid], input_ptr[index]);
shared_min[tid] = max(shared_min[tid], -input_ptr[index]);
index += blockDim.x;
}
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s) {
shared_max[tid] = max(shared_max[tid], shared_max[tid + s]);
shared_min[tid] = max(shared_min[tid], shared_min[tid + s]);
}
__syncthreads();
}
if (tid == 0) {
cuda::atomic::Max(&max_ptr[cur_channel], shared_max[0]);
cuda::atomic::Max(&min_ptr[cur_channel], shared_min[0]);
}
// __syncthreads();
cur_channel += gridDim.x;
}
}
template<typename T>
__global__ void InitMaxMin(const int64_t elements, T* max_ptr, T* min_ptr) {
int64_t tid = threadIdx.x;
int64_t gid = (blockDim.x * blockIdx.x) + tid;
while (gid < elements) {
max_ptr[gid] = -FLT_MAX;
min_ptr[gid] = -FLT_MAX;
gid += gridDim.x * blockDim.x;
}
}
template<typename T>
__global__ void CalScaleZeroPointSymmetric(const T* max_ptr, const T* min_ptr,
const int64_t elements, const double quantization_bit,
T* scale, T* zero_point) {
int64_t tid = threadIdx.x;
int64_t gid = (blockDim.x * blockIdx.x) + tid;
while (gid < elements) {
T weight_max = max(fabs(max_ptr[gid]), fabs(min_ptr[gid]));
T denominator = static_cast<T>(pow(2.0, quantization_bit - 1)) - 1;
scale[gid] = weight_max / denominator;
zero_point[gid] = 0;
gid += gridDim.x * blockDim.x;
}
}
template<typename T>
__global__ void CalScaleZeroPointAffine(const T* max_ptr, const T* min_ptr, const int64_t elements,
const double quantization_bit, T* scale, T* zero_point) {
int64_t tid = threadIdx.x;
int64_t gid = (blockDim.x * blockIdx.x) + tid;
while (gid < elements) {
T denominator = static_cast<T>(pow(2.0, quantization_bit)) - 1;
T min = -min_ptr[gid];
T s = (max_ptr[gid] - min) / denominator;
scale[gid] = s;
zero_point[gid] = -min / s;
gid += gridDim.x * blockDim.x;
}
}
} // namespace
#define LAUNCH_CUDA_KERNEL(func, device_ctx_ptr, thread_num, shared_mem_size, ...) \
hipLaunchKernelGGL(( func), dim3(SMBlocksNum4ThreadsNum(thread_num)), dim3(kCudaThreadsNumPerBlock), shared_mem_size, \
(device_ctx_ptr)->cuda_stream(), __VA_ARGS__)
template<typename T>
class GpuMinMaxObserverKernel final : public user_op::OpKernel {
public:
GpuMinMaxObserverKernel() = default;
~GpuMinMaxObserverKernel() = default;
private:
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* in = ctx->Tensor4ArgNameAndIndex("in", 0);
user_op::Tensor* scale = ctx->Tensor4ArgNameAndIndex("scale", 0);
user_op::Tensor* zero_point = ctx->Tensor4ArgNameAndIndex("zero_point", 0);
user_op::Tensor* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0);
const std::string quantization_scheme = ctx->Attr<std::string>("quantization_scheme");
const int32_t quantization_bit = ctx->Attr<int32_t>("quantization_bit");
const bool per_layer_quantization = ctx->Attr<bool>("per_layer_quantization");
const std::string quantization_formula = ctx->Attr<std::string>("quantization_formula");
const int64_t elements = in->shape().elem_cnt();
const int64_t channel = scale->shape().At(0);
const int64_t panel_size = elements / channel;
T* max_ptr = tmp_buffer->mut_dptr<T>();
T* min_ptr = max_ptr + channel;
LAUNCH_CUDA_KERNEL((InitMaxMin<T>), ctx->device_ctx(), channel, 0, channel, max_ptr, min_ptr);
if (per_layer_quantization) {
LAUNCH_CUDA_KERNEL((ReduceMaxMinPerLayer<T>), ctx->device_ctx(), elements,
kCudaThreadsNumPerBlock * 2 * sizeof(T), in->dptr<T>(), elements, max_ptr,
min_ptr);
} else { // per-channel quantization
// NOTE(Liang Depeng): each block of threads will be responsible for
// computing the max and min values of the whole channel.
LAUNCH_CUDA_KERNEL((ReduceMaxMinPerChannel<T>), ctx->device_ctx(),
channel * kCudaThreadsNumPerBlock, kCudaThreadsNumPerBlock * 2 * sizeof(T),
in->dptr<T>(), elements, channel, panel_size, max_ptr, min_ptr);
}
if (quantization_formula == "google") {
if (quantization_scheme == "symmetric") {
LAUNCH_CUDA_KERNEL((CalScaleZeroPointSymmetric<T>), ctx->device_ctx(), channel, 0, max_ptr,
min_ptr, channel, static_cast<double>(quantization_bit),
scale->mut_dptr<T>(), zero_point->mut_dptr<T>());
} else { // quantization_scheme == "affine"
LAUNCH_CUDA_KERNEL((CalScaleZeroPointAffine<T>), ctx->device_ctx(), channel, 0, max_ptr,
min_ptr, channel, static_cast<double>(quantization_bit),
scale->mut_dptr<T>(), zero_point->mut_dptr<T>());
}
} else {
UNIMPLEMENTED();
}
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_MIN_MAX_OBSERVER_KERNEL(dtype) \
REGISTER_USER_KERNEL("min_max_observer") \
.SetCreateFn<GpuMinMaxObserverKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == DeviceType::kGPU) \
& (user_op::HobDataType("in", 0) == GetDataType<dtype>::value)) \
.SetInferTmpSizeFn([](user_op::InferContext* ctx) -> size_t { \
size_t tmp_buffer_size = 1; \
if (ctx->Attr<bool>("per_layer_quantization") == false) { \
const Shape* in_shape = ctx->Shape4ArgNameAndIndex("in", 0); \
tmp_buffer_size = in_shape->At(0); \
} \
return 2 * tmp_buffer_size * sizeof(dtype); \
})
REGISTER_MIN_MAX_OBSERVER_KERNEL(float);
REGISTER_MIN_MAX_OBSERVER_KERNEL(double);
} // namespace oneflow
| bf49cc46ad43698e032eb94e2d1680e43d7b7b96.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/device/cuda_util.h"
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/cuda/atomic.cuh"
#include <float.h>
namespace oneflow {
namespace {
// NOTE(Liang Depeng): refer to
// https://stackoverflow.com/questions/17371275/implementing-max-reduce-in-cuda
template<typename T>
__global__ void ReduceMaxMinPerLayer(const T* input_ptr, const int64_t elements, T* max_ptr,
T* min_ptr) {
extern __shared__ unsigned char shared_max_min_memory[];
T* shared_max = reinterpret_cast<T*>(shared_max_min_memory);
T* shared_min = shared_max + blockDim.x;
int64_t tid = threadIdx.x;
int64_t gid = (blockDim.x * blockIdx.x) + tid;
shared_max[tid] = -FLT_MAX;
shared_min[tid] = -FLT_MAX;
while (gid < elements) {
shared_max[tid] = max(shared_max[tid], input_ptr[gid]);
shared_min[tid] = max(shared_min[tid], -input_ptr[gid]);
gid += gridDim.x * blockDim.x;
}
__syncthreads();
gid = (blockDim.x * blockIdx.x) + tid;
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s && gid < elements) {
shared_max[tid] = max(shared_max[tid], shared_max[tid + s]);
shared_min[tid] = max(shared_min[tid], shared_min[tid + s]);
}
__syncthreads();
}
if (tid == 0) {
cuda::atomic::Max(max_ptr, shared_max[0]);
cuda::atomic::Max(min_ptr, shared_min[0]);
}
}
template<typename T>
__global__ void ReduceMaxMinPerChannel(const T* input_ptr, const int64_t elements,
const int64_t num_channels, const int64_t panel_size,
T* max_ptr, T* min_ptr) {
extern __shared__ unsigned char shared_max_min_memory[];
T* shared_max = reinterpret_cast<T*>(shared_max_min_memory);
T* shared_min = shared_max + blockDim.x;
int64_t cur_channel = blockIdx.x;
int64_t tid = threadIdx.x;
while (cur_channel < num_channels) {
shared_max[tid] = -FLT_MAX;
shared_min[tid] = -FLT_MAX;
int64_t index = (panel_size * cur_channel) + tid;
int64_t end = panel_size * (cur_channel + 1);
while (index < end && index < elements) {
shared_max[tid] = max(shared_max[tid], input_ptr[index]);
shared_min[tid] = max(shared_min[tid], -input_ptr[index]);
index += blockDim.x;
}
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s) {
shared_max[tid] = max(shared_max[tid], shared_max[tid + s]);
shared_min[tid] = max(shared_min[tid], shared_min[tid + s]);
}
__syncthreads();
}
if (tid == 0) {
cuda::atomic::Max(&max_ptr[cur_channel], shared_max[0]);
cuda::atomic::Max(&min_ptr[cur_channel], shared_min[0]);
}
// __syncthreads();
cur_channel += gridDim.x;
}
}
template<typename T>
__global__ void InitMaxMin(const int64_t elements, T* max_ptr, T* min_ptr) {
int64_t tid = threadIdx.x;
int64_t gid = (blockDim.x * blockIdx.x) + tid;
while (gid < elements) {
max_ptr[gid] = -FLT_MAX;
min_ptr[gid] = -FLT_MAX;
gid += gridDim.x * blockDim.x;
}
}
template<typename T>
__global__ void CalScaleZeroPointSymmetric(const T* max_ptr, const T* min_ptr,
const int64_t elements, const double quantization_bit,
T* scale, T* zero_point) {
int64_t tid = threadIdx.x;
int64_t gid = (blockDim.x * blockIdx.x) + tid;
while (gid < elements) {
T weight_max = max(fabs(max_ptr[gid]), fabs(min_ptr[gid]));
T denominator = static_cast<T>(pow(2.0, quantization_bit - 1)) - 1;
scale[gid] = weight_max / denominator;
zero_point[gid] = 0;
gid += gridDim.x * blockDim.x;
}
}
template<typename T>
__global__ void CalScaleZeroPointAffine(const T* max_ptr, const T* min_ptr, const int64_t elements,
const double quantization_bit, T* scale, T* zero_point) {
int64_t tid = threadIdx.x;
int64_t gid = (blockDim.x * blockIdx.x) + tid;
while (gid < elements) {
T denominator = static_cast<T>(pow(2.0, quantization_bit)) - 1;
T min = -min_ptr[gid];
T s = (max_ptr[gid] - min) / denominator;
scale[gid] = s;
zero_point[gid] = -min / s;
gid += gridDim.x * blockDim.x;
}
}
} // namespace
#define LAUNCH_CUDA_KERNEL(func, device_ctx_ptr, thread_num, shared_mem_size, ...) \
func<<<SMBlocksNum4ThreadsNum(thread_num), kCudaThreadsNumPerBlock, shared_mem_size, \
(device_ctx_ptr)->cuda_stream()>>>(__VA_ARGS__)
template<typename T>
class GpuMinMaxObserverKernel final : public user_op::OpKernel {
public:
GpuMinMaxObserverKernel() = default;
~GpuMinMaxObserverKernel() = default;
private:
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* in = ctx->Tensor4ArgNameAndIndex("in", 0);
user_op::Tensor* scale = ctx->Tensor4ArgNameAndIndex("scale", 0);
user_op::Tensor* zero_point = ctx->Tensor4ArgNameAndIndex("zero_point", 0);
user_op::Tensor* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0);
const std::string quantization_scheme = ctx->Attr<std::string>("quantization_scheme");
const int32_t quantization_bit = ctx->Attr<int32_t>("quantization_bit");
const bool per_layer_quantization = ctx->Attr<bool>("per_layer_quantization");
const std::string quantization_formula = ctx->Attr<std::string>("quantization_formula");
const int64_t elements = in->shape().elem_cnt();
const int64_t channel = scale->shape().At(0);
const int64_t panel_size = elements / channel;
T* max_ptr = tmp_buffer->mut_dptr<T>();
T* min_ptr = max_ptr + channel;
LAUNCH_CUDA_KERNEL((InitMaxMin<T>), ctx->device_ctx(), channel, 0, channel, max_ptr, min_ptr);
if (per_layer_quantization) {
LAUNCH_CUDA_KERNEL((ReduceMaxMinPerLayer<T>), ctx->device_ctx(), elements,
kCudaThreadsNumPerBlock * 2 * sizeof(T), in->dptr<T>(), elements, max_ptr,
min_ptr);
} else { // per-channel quantization
// NOTE(Liang Depeng): each block of threads will be responsible for
// computing the max and min values of the whole channel.
LAUNCH_CUDA_KERNEL((ReduceMaxMinPerChannel<T>), ctx->device_ctx(),
channel * kCudaThreadsNumPerBlock, kCudaThreadsNumPerBlock * 2 * sizeof(T),
in->dptr<T>(), elements, channel, panel_size, max_ptr, min_ptr);
}
if (quantization_formula == "google") {
if (quantization_scheme == "symmetric") {
LAUNCH_CUDA_KERNEL((CalScaleZeroPointSymmetric<T>), ctx->device_ctx(), channel, 0, max_ptr,
min_ptr, channel, static_cast<double>(quantization_bit),
scale->mut_dptr<T>(), zero_point->mut_dptr<T>());
} else { // quantization_scheme == "affine"
LAUNCH_CUDA_KERNEL((CalScaleZeroPointAffine<T>), ctx->device_ctx(), channel, 0, max_ptr,
min_ptr, channel, static_cast<double>(quantization_bit),
scale->mut_dptr<T>(), zero_point->mut_dptr<T>());
}
} else {
UNIMPLEMENTED();
}
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_MIN_MAX_OBSERVER_KERNEL(dtype) \
REGISTER_USER_KERNEL("min_max_observer") \
.SetCreateFn<GpuMinMaxObserverKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == DeviceType::kGPU) \
& (user_op::HobDataType("in", 0) == GetDataType<dtype>::value)) \
.SetInferTmpSizeFn([](user_op::InferContext* ctx) -> size_t { \
size_t tmp_buffer_size = 1; \
if (ctx->Attr<bool>("per_layer_quantization") == false) { \
const Shape* in_shape = ctx->Shape4ArgNameAndIndex("in", 0); \
tmp_buffer_size = in_shape->At(0); \
} \
return 2 * tmp_buffer_size * sizeof(dtype); \
})
REGISTER_MIN_MAX_OBSERVER_KERNEL(float);
REGISTER_MIN_MAX_OBSERVER_KERNEL(double);
} // namespace oneflow
|
4241339c85cc2ab0c244ecf5ce1b9cdf37f958cd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* DifferentialDecoder.cpp
*
* Created on: May 15, 2013
* Author: adm85
*/
#include <vector>
#include <iostream>
#include <fstream>
#include "DifferentialDecoder.h"
#include "Kernels.h"
using namespace std;
namespace PAQ_SOQPSK {
DifferentialDecoder::DifferentialDecoder() {
//We set the initial delta_minus_one value to a zero.
initialDelta = 0;
}
DifferentialDecoder::~DifferentialDecoder() {}
/**
* Turns a stream of bit indices into the appropriate bits
*/
vector<unsigned short>& DifferentialDecoder::convertDecisionsToBits(vector<int> bitDecisionArray){
vector<unsigned short>* bitstreamArray = new vector<unsigned short>;
//Push the appropriate binary version of the index onto the return array
for(int i=0; i < bitDecisionArray.size(); i++) {
switch(bitDecisionArray.at(i)) {
case 0:
bitstreamArray->push_back(0);
bitstreamArray->push_back(0);
break;
case 1:
bitstreamArray->push_back(0);
bitstreamArray->push_back(1);
break;
case 2:
bitstreamArray->push_back(1);
bitstreamArray->push_back(0);
break;
case 3:
bitstreamArray->push_back(1);
bitstreamArray->push_back(1);
break;
default:
cout << "Error -- invalid bit decision in bitDecisionArray." << endl;
throw exception();
}
}
return *bitstreamArray;
}
vector<unsigned short>& DifferentialDecoder::convertDecisionsToBitsCuda(vector<int> bitDecisionArray)
{
// Initialization
int* bitdec;
unsigned short* bitstream;
int size = bitDecisionArray.size();
int shortsize = size * 2;
int intsize = size * sizeof(int);
int shortbyte = shortsize * sizeof(unsigned short);
unsigned short* out_stream = new unsigned short[shortsize];
int num_threads = 192;
int num_blocks = size / num_threads;
if(size % num_threads)
num_blocks++;
// Allocate memory on GPU
hipMalloc(&bitstream, shortbyte);
hipMalloc(&bitdec, intsize);
// Copy data to GPU
hipMemcpy(bitdec, bitDecisionArray.data(), intsize, hipMemcpyHostToDevice);
// Run on GPU
hipLaunchKernelGGL(( cudaConvertToBits), dim3(num_blocks), dim3(num_threads), 0, 0, bitdec, bitstream, size);
// Retrieve data from GPU
hipMemcpy(out_stream, bitstream, shortbyte, hipMemcpyDeviceToHost);
// Free memory on GPU
hipFree(bitstream);
hipFree(bitdec);
vector<unsigned short>* ret_vector = new vector<unsigned short>(out_stream, out_stream + shortsize);
return *ret_vector;
}
/**
* Uses the OQPSK decoding algorithm to decode the bitstream
*/
vector<unsigned short>& DifferentialDecoder::decodeBitstream(vector<unsigned short> encodedBitstream) {
//Check that the input array is th e right size. It must be a multiple of two.
if((encodedBitstream.size() % 2) != 0) {
cout << "Error -- encodedBitstream has odd size." << endl;
throw exception();
}
//Variables
vector<unsigned short>* decodedBits = new vector<unsigned short>;
unsigned short b2k, b2k_plus_1;
//For the first decision, we use the initialDelta chosen earlier
b2k = encodedBitstream.at(0) ^ initialDelta;
b2k_plus_1 = encodedBitstream.at(0) ^ encodedBitstream.at(1);
decodedBits->push_back(b2k);
decodedBits->push_back(b2k_plus_1);
//Now we iterate through the rest of the bitstream, following the correct formula
for(int i=2; i < encodedBitstream.size(); i+=2) {
b2k = !encodedBitstream.at(i-1) ^ encodedBitstream.at(i);
b2k_plus_1 = encodedBitstream.at(i) ^ encodedBitstream.at(i+1);
decodedBits->push_back(b2k);
decodedBits->push_back(b2k_plus_1);
}
return *decodedBits;
}
/**
* Runs the decoder on the GPU
*/
vector<unsigned short>& DifferentialDecoder::decodeBitstreamCuda(vector<unsigned short> encodedBitstream)
{
if(encodedBitstream.size() % 2) {
cout << "Error -- encodedBitstream has odd size." << endl;
throw exception();
}
unsigned short b2k, b2k_plus_1;
b2k = encodedBitstream[0] ^ initialDelta;
b2k_plus_1 = encodedBitstream[0] ^ encodedBitstream[1];
// Initialization
int size_bits = encodedBitstream.size();
int size = size_bits * sizeof(unsigned short);
unsigned short* encoded_bits, *dec_bits;
unsigned short* decoded_bits = new unsigned short[size_bits];
decoded_bits[0] = b2k;
decoded_bits[1] = b2k_plus_1;
int num_threads = 192;
int num_blocks = size / num_threads;
if(size % num_threads)
num_blocks++;
// Allocate memory on GPU
hipMalloc(&encoded_bits, size);
hipMalloc(&dec_bits, size);
// Copy data to GPU
hipMemcpy(encoded_bits, encodedBitstream.data(), size, hipMemcpyHostToDevice);
hipMemcpy(dec_bits, decoded_bits, 2 * sizeof(unsigned short), hipMemcpyHostToDevice);
// Run on GPU
hipLaunchKernelGGL(( cudaDecodeBitstream), dim3(num_blocks), dim3(num_threads), 0, 0, encoded_bits, dec_bits, size_bits);
// Copy data from GPU
hipMemcpy(decoded_bits, dec_bits, size, hipMemcpyDeviceToHost);
// Free GPU memory
hipFree(encoded_bits);
hipFree(dec_bits);
vector<unsigned short>* ret_vector = new vector<unsigned short>(decoded_bits, decoded_bits + size_bits);
return *ret_vector;
}
/**
* Wrapper function to decode bits.
*/
vector<unsigned short>& DifferentialDecoder::decode(vector<int>& bitDecisionArray) {
//Convert decisions to bits
vector<unsigned short> encodedBitstream = convertDecisionsToBits(bitDecisionArray);
//Decode bits
return decodeBitstream(encodedBitstream);
}
vector<unsigned short>& DifferentialDecoder::decodeCuda(vector<int>& bitDecisionArray)
{
vector<unsigned short> encoded = convertDecisionsToBitsCuda(bitDecisionArray);
// Decode CUDA
return decodeBitstreamCuda(encoded);
}
} /* namespace SOQPSK_Demod */
| 4241339c85cc2ab0c244ecf5ce1b9cdf37f958cd.cu | /*
* DifferentialDecoder.cpp
*
* Created on: May 15, 2013
* Author: adm85
*/
#include <vector>
#include <iostream>
#include <fstream>
#include "DifferentialDecoder.h"
#include "Kernels.h"
using namespace std;
namespace PAQ_SOQPSK {
DifferentialDecoder::DifferentialDecoder() {
//We set the initial delta_minus_one value to a zero.
initialDelta = 0;
}
DifferentialDecoder::~DifferentialDecoder() {}
/**
* Turns a stream of bit indices into the appropriate bits
*/
vector<unsigned short>& DifferentialDecoder::convertDecisionsToBits(vector<int> bitDecisionArray){
vector<unsigned short>* bitstreamArray = new vector<unsigned short>;
//Push the appropriate binary version of the index onto the return array
for(int i=0; i < bitDecisionArray.size(); i++) {
switch(bitDecisionArray.at(i)) {
case 0:
bitstreamArray->push_back(0);
bitstreamArray->push_back(0);
break;
case 1:
bitstreamArray->push_back(0);
bitstreamArray->push_back(1);
break;
case 2:
bitstreamArray->push_back(1);
bitstreamArray->push_back(0);
break;
case 3:
bitstreamArray->push_back(1);
bitstreamArray->push_back(1);
break;
default:
cout << "Error -- invalid bit decision in bitDecisionArray." << endl;
throw exception();
}
}
return *bitstreamArray;
}
vector<unsigned short>& DifferentialDecoder::convertDecisionsToBitsCuda(vector<int> bitDecisionArray)
{
// Initialization
int* bitdec;
unsigned short* bitstream;
int size = bitDecisionArray.size();
int shortsize = size * 2;
int intsize = size * sizeof(int);
int shortbyte = shortsize * sizeof(unsigned short);
unsigned short* out_stream = new unsigned short[shortsize];
int num_threads = 192;
int num_blocks = size / num_threads;
if(size % num_threads)
num_blocks++;
// Allocate memory on GPU
cudaMalloc(&bitstream, shortbyte);
cudaMalloc(&bitdec, intsize);
// Copy data to GPU
cudaMemcpy(bitdec, bitDecisionArray.data(), intsize, cudaMemcpyHostToDevice);
// Run on GPU
cudaConvertToBits<<<num_blocks, num_threads>>>(bitdec, bitstream, size);
// Retrieve data from GPU
cudaMemcpy(out_stream, bitstream, shortbyte, cudaMemcpyDeviceToHost);
// Free memory on GPU
cudaFree(bitstream);
cudaFree(bitdec);
vector<unsigned short>* ret_vector = new vector<unsigned short>(out_stream, out_stream + shortsize);
return *ret_vector;
}
/**
* Uses the OQPSK decoding algorithm to decode the bitstream
*/
vector<unsigned short>& DifferentialDecoder::decodeBitstream(vector<unsigned short> encodedBitstream) {
//Check that the input array is th e right size. It must be a multiple of two.
if((encodedBitstream.size() % 2) != 0) {
cout << "Error -- encodedBitstream has odd size." << endl;
throw exception();
}
//Variables
vector<unsigned short>* decodedBits = new vector<unsigned short>;
unsigned short b2k, b2k_plus_1;
//For the first decision, we use the initialDelta chosen earlier
b2k = encodedBitstream.at(0) ^ initialDelta;
b2k_plus_1 = encodedBitstream.at(0) ^ encodedBitstream.at(1);
decodedBits->push_back(b2k);
decodedBits->push_back(b2k_plus_1);
//Now we iterate through the rest of the bitstream, following the correct formula
for(int i=2; i < encodedBitstream.size(); i+=2) {
b2k = !encodedBitstream.at(i-1) ^ encodedBitstream.at(i);
b2k_plus_1 = encodedBitstream.at(i) ^ encodedBitstream.at(i+1);
decodedBits->push_back(b2k);
decodedBits->push_back(b2k_plus_1);
}
return *decodedBits;
}
/**
* Runs the decoder on the GPU
*/
vector<unsigned short>& DifferentialDecoder::decodeBitstreamCuda(vector<unsigned short> encodedBitstream)
{
if(encodedBitstream.size() % 2) {
cout << "Error -- encodedBitstream has odd size." << endl;
throw exception();
}
unsigned short b2k, b2k_plus_1;
b2k = encodedBitstream[0] ^ initialDelta;
b2k_plus_1 = encodedBitstream[0] ^ encodedBitstream[1];
// Initialization
int size_bits = encodedBitstream.size();
int size = size_bits * sizeof(unsigned short);
unsigned short* encoded_bits, *dec_bits;
unsigned short* decoded_bits = new unsigned short[size_bits];
decoded_bits[0] = b2k;
decoded_bits[1] = b2k_plus_1;
int num_threads = 192;
int num_blocks = size / num_threads;
if(size % num_threads)
num_blocks++;
// Allocate memory on GPU
cudaMalloc(&encoded_bits, size);
cudaMalloc(&dec_bits, size);
// Copy data to GPU
cudaMemcpy(encoded_bits, encodedBitstream.data(), size, cudaMemcpyHostToDevice);
cudaMemcpy(dec_bits, decoded_bits, 2 * sizeof(unsigned short), cudaMemcpyHostToDevice);
// Run on GPU
cudaDecodeBitstream<<<num_blocks, num_threads>>>(encoded_bits, dec_bits, size_bits);
// Copy data from GPU
cudaMemcpy(decoded_bits, dec_bits, size, cudaMemcpyDeviceToHost);
// Free GPU memory
cudaFree(encoded_bits);
cudaFree(dec_bits);
vector<unsigned short>* ret_vector = new vector<unsigned short>(decoded_bits, decoded_bits + size_bits);
return *ret_vector;
}
/**
* Wrapper function to decode bits.
*/
vector<unsigned short>& DifferentialDecoder::decode(vector<int>& bitDecisionArray) {
//Convert decisions to bits
vector<unsigned short> encodedBitstream = convertDecisionsToBits(bitDecisionArray);
//Decode bits
return decodeBitstream(encodedBitstream);
}
vector<unsigned short>& DifferentialDecoder::decodeCuda(vector<int>& bitDecisionArray)
{
vector<unsigned short> encoded = convertDecisionsToBitsCuda(bitDecisionArray);
// Decode CUDA
return decodeBitstreamCuda(encoded);
}
} /* namespace SOQPSK_Demod */
|
9e9945a8e9594e43d673c5d8cb58b05fbbf175fe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/hip/HIPBlas.h>
#include <c10/hip/HIPException.h>
#include <c10/test/util/complex_test_common.h>
__global__ void test_thrust_kernel() {
// thrust conversion
{
constexpr float num1 = float(1.23);
constexpr float num2 = float(4.56);
assert(c10::complex<float>(thrust::complex<float>(num1, num2)).real() == num1);
assert(c10::complex<float>(thrust::complex<float>(num1, num2)).imag() == num2);
}
{
constexpr double num1 = double(1.23);
constexpr double num2 = double(4.56);
assert(c10::complex<double>(thrust::complex<double>(num1, num2)).real() == num1);
assert(c10::complex<double>(thrust::complex<double>(num1, num2)).imag() == num2);
}
// thrust assignment
auto tup = assignment::one_two_thrust();
assert(std::get<c10::complex<double>>(tup).real() == double(1));
assert(std::get<c10::complex<double>>(tup).imag() == double(2));
assert(std::get<c10::complex<float>>(tup).real() == float(1));
assert(std::get<c10::complex<float>>(tup).imag() == float(2));
}
__global__ void test_std_functions_kernel() {
assert(std::abs(c10::complex<float>(3, 4)) == float(5));
assert(std::abs(c10::complex<double>(3, 4)) == double(5));
assert(std::abs(std::arg(c10::complex<float>(0, 1)) - PI / 2) < 1e-6);
assert(std::abs(std::arg(c10::complex<double>(0, 1)) - PI / 2) < 1e-6);
assert(std::abs(c10::polar(float(1), float(PI / 2)) - c10::complex<float>(0, 1)) < 1e-6);
assert(std::abs(c10::polar(double(1), double(PI / 2)) - c10::complex<double>(0, 1)) < 1e-6);
}
__global__ void test_reinterpret_cast() {
std::complex<float> z(1, 2);
c10::complex<float> zz = *reinterpret_cast<c10::complex<float>*>(&z);
assert(zz.real() == float(1));
assert(zz.imag() == float(2));
std::complex<double> zzz(1, 2);
c10::complex<double> zzzz = *reinterpret_cast<c10::complex<double>*>(&zzz);
assert(zzzz.real() == double(1));
assert(zzzz.imag() == double(2));
hipComplex cuComplex_zz = *reinterpret_cast<hipComplex*>(&zz);
assert(cuComplex_zz.x == float(1));
assert(cuComplex_zz.y == float(2));
hipDoubleComplex cuDoubleComplex_zzzz = *reinterpret_cast<hipDoubleComplex*>(&zzzz);
assert(cuDoubleComplex_zzzz.x == double(1));
assert(cuDoubleComplex_zzzz.y == double(2));
}
int safeDeviceCount() {
int count;
hipError_t err = hipGetDeviceCount(&count);
if (err == hipErrorInsufficientDriver || err == hipErrorNoDevice) {
return 0;
}
return count;
}
#define SKIP_IF_NO_GPU() \
do { \
if (safeDeviceCount() == 0) { \
return; \
} \
} while(0)
TEST(DeviceTests, ThrustConversion) {
SKIP_IF_NO_GPU();
ASSERT_EQ(hipGetLastError(), hipSuccess);
hipDeviceSynchronize();
hipLaunchKernelGGL(( test_thrust_kernel), dim3(1), dim3(1), 0, 0, );
C10_HIP_KERNEL_LAUNCH_CHECK();
hipDeviceSynchronize();
ASSERT_EQ(hipGetLastError(), hipSuccess);
}
TEST(DeviceTests, StdFunctions) {
SKIP_IF_NO_GPU();
hipDeviceSynchronize();
hipLaunchKernelGGL(( test_std_functions_kernel), dim3(1), dim3(1), 0, 0, );
C10_HIP_KERNEL_LAUNCH_CHECK();
hipDeviceSynchronize();
ASSERT_EQ(hipGetLastError(), hipSuccess);
}
TEST(DeviceTests, ReinterpretCast) {
SKIP_IF_NO_GPU();
hipDeviceSynchronize();
hipLaunchKernelGGL(( test_reinterpret_cast), dim3(1), dim3(1), 0, 0, );
C10_HIP_KERNEL_LAUNCH_CHECK();
hipDeviceSynchronize();
ASSERT_EQ(hipGetLastError(), hipSuccess);
}
| 9e9945a8e9594e43d673c5d8cb58b05fbbf175fe.cu | #include <ATen/cuda/CUDABlas.h>
#include <c10/cuda/CUDAException.h>
#include <c10/test/util/complex_test_common.h>
__global__ void test_thrust_kernel() {
// thrust conversion
{
constexpr float num1 = float(1.23);
constexpr float num2 = float(4.56);
assert(c10::complex<float>(thrust::complex<float>(num1, num2)).real() == num1);
assert(c10::complex<float>(thrust::complex<float>(num1, num2)).imag() == num2);
}
{
constexpr double num1 = double(1.23);
constexpr double num2 = double(4.56);
assert(c10::complex<double>(thrust::complex<double>(num1, num2)).real() == num1);
assert(c10::complex<double>(thrust::complex<double>(num1, num2)).imag() == num2);
}
// thrust assignment
auto tup = assignment::one_two_thrust();
assert(std::get<c10::complex<double>>(tup).real() == double(1));
assert(std::get<c10::complex<double>>(tup).imag() == double(2));
assert(std::get<c10::complex<float>>(tup).real() == float(1));
assert(std::get<c10::complex<float>>(tup).imag() == float(2));
}
__global__ void test_std_functions_kernel() {
assert(std::abs(c10::complex<float>(3, 4)) == float(5));
assert(std::abs(c10::complex<double>(3, 4)) == double(5));
assert(std::abs(std::arg(c10::complex<float>(0, 1)) - PI / 2) < 1e-6);
assert(std::abs(std::arg(c10::complex<double>(0, 1)) - PI / 2) < 1e-6);
assert(std::abs(c10::polar(float(1), float(PI / 2)) - c10::complex<float>(0, 1)) < 1e-6);
assert(std::abs(c10::polar(double(1), double(PI / 2)) - c10::complex<double>(0, 1)) < 1e-6);
}
__global__ void test_reinterpret_cast() {
std::complex<float> z(1, 2);
c10::complex<float> zz = *reinterpret_cast<c10::complex<float>*>(&z);
assert(zz.real() == float(1));
assert(zz.imag() == float(2));
std::complex<double> zzz(1, 2);
c10::complex<double> zzzz = *reinterpret_cast<c10::complex<double>*>(&zzz);
assert(zzzz.real() == double(1));
assert(zzzz.imag() == double(2));
cuComplex cuComplex_zz = *reinterpret_cast<cuComplex*>(&zz);
assert(cuComplex_zz.x == float(1));
assert(cuComplex_zz.y == float(2));
cuDoubleComplex cuDoubleComplex_zzzz = *reinterpret_cast<cuDoubleComplex*>(&zzzz);
assert(cuDoubleComplex_zzzz.x == double(1));
assert(cuDoubleComplex_zzzz.y == double(2));
}
int safeDeviceCount() {
int count;
cudaError_t err = cudaGetDeviceCount(&count);
if (err == cudaErrorInsufficientDriver || err == cudaErrorNoDevice) {
return 0;
}
return count;
}
#define SKIP_IF_NO_GPU() \
do { \
if (safeDeviceCount() == 0) { \
return; \
} \
} while(0)
TEST(DeviceTests, ThrustConversion) {
SKIP_IF_NO_GPU();
ASSERT_EQ(cudaGetLastError(), cudaSuccess);
cudaDeviceSynchronize();
test_thrust_kernel<<<1, 1>>>();
C10_CUDA_KERNEL_LAUNCH_CHECK();
cudaDeviceSynchronize();
ASSERT_EQ(cudaGetLastError(), cudaSuccess);
}
TEST(DeviceTests, StdFunctions) {
SKIP_IF_NO_GPU();
cudaDeviceSynchronize();
test_std_functions_kernel<<<1, 1>>>();
C10_CUDA_KERNEL_LAUNCH_CHECK();
cudaDeviceSynchronize();
ASSERT_EQ(cudaGetLastError(), cudaSuccess);
}
TEST(DeviceTests, ReinterpretCast) {
SKIP_IF_NO_GPU();
cudaDeviceSynchronize();
test_reinterpret_cast<<<1, 1>>>();
C10_CUDA_KERNEL_LAUNCH_CHECK();
cudaDeviceSynchronize();
ASSERT_EQ(cudaGetLastError(), cudaSuccess);
}
|
cfadf67a16b54cdd72a128b73300042c640b48fe.hip | // !!! This is a file automatically generated by hipify!!!
#include <torch/extension.h>
#include <ATen/ATen.h>
using namespace at;
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <vector>
#include <iostream>
// Cuda tensor accessor definitions
// restrict pointer traits piroritize speed over memory consumption
#define TensorAcc4R PackedTensorAccessor<scalar_t,4,RestrictPtrTraits,int32_t>
#define TensorAcc5R PackedTensorAccessor<scalar_t,5,RestrictPtrTraits,int32_t>
#define WITHIN_BOUNDS(x, y, H, W) (x >= 0 && x < H && y >= 0 && y < W)
#define THREADS_FORWARD 32
#define THREADS_BACKWARD 5
namespace {
template <typename scalar_t>
__global__ void correlation_cuda_forward_kernel(
const TensorAcc4R rInput1,
const TensorAcc4R rInput2,
TensorAcc5R output,
int kH, int kW,
int patchH, int patchW,
int padH, int padW,
int dilation_patchH, int dilation_patchW,
int dH, int dW) {
const int iH = rInput1.size(1);
const int iW = rInput1.size(2);
const int C = rInput1.size(3);
const int n = blockIdx.x;
const int h = blockIdx.y;
const int w = blockIdx.z;
const int thread = threadIdx.x;
const int start_i = -padH + h * dH;
const int start_j = -padW + w * dW;
const int patchRadH = dilation_patchH * (patchH - 1) / 2;
const int patchRadW = dilation_patchW * (patchW - 1) / 2;
__shared__ scalar_t prod_sum[THREADS_FORWARD];
for(int ph = 0; ph < patchH; ++ph){
int ph_dilated = ph * dilation_patchH - patchRadH;
for(int pw = 0; pw < patchW; ++pw){
int pw_dilated = pw * dilation_patchW - patchRadW;
prod_sum[thread] = 0;
for (int i=0; i<kH; ++i){
int i1 = start_i + i;
int i2 = i1 + ph_dilated;
if WITHIN_BOUNDS(i1, i2, iH, iH){
for (int j=0; j<kW; ++j){
int j1 = start_j + j;
int j2 = j1 + pw_dilated;
if WITHIN_BOUNDS(j1, j2, iW, iW){
for (int c=thread; c<C; c += THREADS_FORWARD){
scalar_t v1 = rInput1[n][i1][j1][c];
scalar_t v2 = rInput2[n][i2][j2][c];
prod_sum[thread] += v1 * v2;
}
}
}
}
}
// accumulate
__syncthreads();
if (thread == 0) {
scalar_t reduce_sum = 0;
for (int index = 0; index < THREADS_FORWARD; ++index) {
reduce_sum += prod_sum[index];
}
output[n][ph][pw][h][w] = reduce_sum;
}
}
}
}
template <typename scalar_t>
__global__ void correlation_cuda_backward_kernel_input1(
const TensorAcc5R gradOutput,
const TensorAcc4R input2,
TensorAcc4R gradInput1,
int kH, int kW,
int patchH, int patchW,
int padH, int padW,
int dilation_patchH, int dilation_patchW,
int dH, int dW,
int batch) {
const int iH = input2.size(2);
const int iW = input2.size(3);
const int H = gradOutput.size(3);
const int W = gradOutput.size(4);
const int patchRadH = (patchH - 1) / 2;
const int patchRadW = (patchW - 1) / 2;
const int n = batch;
const int c = blockIdx.x;
const int h = blockIdx.y;
const int w = blockIdx.z;
const int ph_off = threadIdx.x;
const int pw_off = threadIdx.y;
const int h_2 = h + padH;
const int w_2 = w + padW;
const int start_i2 = h_2 / dH;
const int start_j2 = w_2 / dW;
/*we perform a module but since we have the quotient, we
can cheat a bit*/
const int h_off = h_2 - start_i2 * dH;
const int w_off = w_2 - start_j2 * dW;
__shared__ scalar_t prod_sum[THREADS_BACKWARD][THREADS_BACKWARD];
prod_sum[ph_off][pw_off] = 0;
for (int ph = ph_off; ph < patchH; ph += THREADS_BACKWARD) {
int i1 = h + dilation_patchH * (ph - patchRadH);
for (int pw = pw_off; pw < patchW; pw += THREADS_BACKWARD) {
int j1 = w + dilation_patchW * (pw - patchRadW);
if WITHIN_BOUNDS(i1, j1, iH, iW) {
scalar_t val = input2[n][c][i1][j1];
for(int tmp1 = h_off, i = 0; tmp1 < kH; tmp1 += dH, ++i) {
int i2 = start_i2 - i;
for(int tmp2 = w_off, j = 0; tmp2 < kW; tmp2 += dW, ++j) {
int j2 = start_j2 - j;
if WITHIN_BOUNDS(i2, j2, H, W) {
prod_sum[ph_off][pw_off] += gradOutput[n][ph][pw][i2][j2] * val;
}
}
}
}
}
}
__syncthreads();
if (ph_off == 0 && pw_off == 0){
scalar_t reduce_sum =0;
for (int ph = 0; ph < THREADS_BACKWARD; ++ph){
for (int pw = 0; pw < THREADS_BACKWARD; ++pw){
reduce_sum += prod_sum[ph][pw];
}
}
gradInput1[n][c][h][w] = reduce_sum;
}
}
template <typename scalar_t>
__global__ void correlation_cuda_backward_kernel_input2(
const TensorAcc5R gradOutput,
const TensorAcc4R input1,
TensorAcc4R gradInput2,
int kH, int kW,
int patchH, int patchW,
int padH, int padW,
int dilation_patchH, int dilation_patchW,
int dH, int dW,
int batch) {
const int iH = input1.size(2);
const int iW = input1.size(3);
const int patchRadH = (patchH - 1) / 2;
const int patchRadW = (patchW - 1) / 2;
const int H = gradOutput.size(3);
const int W = gradOutput.size(4);
const int n = batch;
const int c = blockIdx.x;
const int h = blockIdx.y;
const int w = blockIdx.z;
const int ph_off = threadIdx.x;
const int pw_off = threadIdx.y;
__shared__ scalar_t prod_sum[THREADS_BACKWARD][THREADS_BACKWARD];
prod_sum[ph_off][pw_off] = 0;
for (int ph = ph_off; ph < patchH; ph += THREADS_BACKWARD) {
int i1 = h - dilation_patchH * (ph - patchRadH);
for (int pw = pw_off; pw < patchW; pw += THREADS_BACKWARD) {
int j1 = w - dilation_patchW * (pw - patchRadW);
if WITHIN_BOUNDS(i1, j1, iH, iW) {
scalar_t val = input1[n][c][i1][j1];
const int h_2 = i1 + padH;
const int w_2 = j1 + padW;
const int start_i2 = h_2 / dH;
const int start_j2 = w_2 / dW;
const int h_off = h_2 - start_i2 * dH;
const int w_off = w_2 - start_j2 * dW;
for(int tmp1 = h_off, i = 0; tmp1 < kH; tmp1 += dH, ++i) {
int i2 = start_i2 - i;
for(int tmp2 = w_off, j = 0; tmp2 < kW; tmp2 += dW, ++j) {
int j2 = start_j2 - j;
if WITHIN_BOUNDS(i2, j2, H, W) {
prod_sum[ph_off][pw_off] += gradOutput[n][ph][pw][i2][j2] * val;
}
}
}
}
}
}
__syncthreads();
if (ph_off == 0 && pw_off == 0){
scalar_t reduce_sum =0;
for (int ph = 0; ph < THREADS_BACKWARD; ++ph){
for (int pw = 0; pw < THREADS_BACKWARD; ++pw){
reduce_sum += prod_sum[ph][pw];
}
}
gradInput2[n][c][h][w] = reduce_sum;
}
}
}
torch::Tensor correlation_cuda_forward(
torch::Tensor input1,
torch::Tensor input2,
int kH, int kW,
int patchH, int patchW,
int padH, int padW,
int dilation_patchH, int dilation_patchW,
int dH, int dW) {
const int batch_size = input1.size(0);
const int iH = input1.size(2);
const int iW = input1.size(3);
const auto oH = (iH + 2 * padH - kH) / dH + 1;
const auto oW = (iW + 2 * padW - kW) / dW + 1;
auto output = torch::zeros({batch_size, patchH, patchW, oH, oW}, input1.options());
auto trInput1 = input1.permute({0, 2, 3, 1}).contiguous();
auto trInput2 = input2.permute({0, 2, 3, 1}).contiguous();
const int threads = THREADS_FORWARD;
const dim3 blocks(batch_size, oH, oW);
AT_DISPATCH_FLOATING_TYPES(input1.type(), "correlation_forward_cuda", ([&] {
TensorAcc4R trInput1_acc = trInput1.packed_accessor<scalar_t,4,RestrictPtrTraits,int32_t>();
TensorAcc4R trInput2_acc = trInput2.packed_accessor<scalar_t,4,RestrictPtrTraits,int32_t>();
TensorAcc5R output_acc = output.packed_accessor<scalar_t,5,RestrictPtrTraits,int32_t>();
hipLaunchKernelGGL(( correlation_cuda_forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
trInput1_acc, trInput2_acc, output_acc,
kH, kW, patchH, patchW, padH, padW,
dilation_patchH, dilation_patchW, dH, dW);
}));
return output;
}
std::vector<torch::Tensor> correlation_cuda_backward(
torch::Tensor input1,
torch::Tensor input2,
torch::Tensor gradOutput,
int kH, int kW,
int patchH, int patchW,
int padH, int padW,
int dilation_patchH, int dilation_patchW,
int dH, int dW) {
auto gradInput1 = torch::zeros_like(input1);
auto gradInput2 = torch::zeros_like(input2);
const int batch_size = input1.size(0);
const int iH = input1.size(2);
const int iW = input1.size(3);
const int C = input1.size(1);
const dim3 blocks(C, iH, iW);
const dim3 threads(THREADS_BACKWARD, THREADS_BACKWARD);
AT_DISPATCH_FLOATING_TYPES(input1.type(), "correlation_backward_cuda", ([&] {
TensorAcc4R input1_acc = input1.packed_accessor<scalar_t,4,RestrictPtrTraits,int32_t>();
TensorAcc4R input2_acc = input2.packed_accessor<scalar_t,4,RestrictPtrTraits,int32_t>();
TensorAcc4R gradInput1_acc = gradInput1.packed_accessor<scalar_t,4,RestrictPtrTraits,int32_t>();
TensorAcc4R gradInput2_acc = gradInput2.packed_accessor<scalar_t,4,RestrictPtrTraits,int32_t>();
TensorAcc5R gradOutput_acc = gradOutput.packed_accessor<scalar_t,5,RestrictPtrTraits,int32_t>();
for (int n = 0; n < batch_size; ++n){
hipLaunchKernelGGL(( correlation_cuda_backward_kernel_input1<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
gradOutput_acc, input2_acc, gradInput1_acc,
kH, kW, patchH, patchW, padH, padW,
dilation_patchH, dilation_patchW, dH, dW,
n);
}
for (int n = 0; n < batch_size; ++n){
hipLaunchKernelGGL(( correlation_cuda_backward_kernel_input2<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
gradOutput_acc, input1_acc, gradInput2_acc,
kH, kW, patchH, patchW, padH, padW,
dilation_patchH, dilation_patchW, dH, dW,
n);
}
}));
return {gradInput1, gradInput2};
}
| cfadf67a16b54cdd72a128b73300042c640b48fe.cu | #include <torch/extension.h>
#include <ATen/ATen.h>
using namespace at;
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
#include <iostream>
// Cuda tensor accessor definitions
// restrict pointer traits piroritize speed over memory consumption
#define TensorAcc4R PackedTensorAccessor<scalar_t,4,RestrictPtrTraits,int32_t>
#define TensorAcc5R PackedTensorAccessor<scalar_t,5,RestrictPtrTraits,int32_t>
#define WITHIN_BOUNDS(x, y, H, W) (x >= 0 && x < H && y >= 0 && y < W)
#define THREADS_FORWARD 32
#define THREADS_BACKWARD 5
namespace {
template <typename scalar_t>
__global__ void correlation_cuda_forward_kernel(
const TensorAcc4R rInput1,
const TensorAcc4R rInput2,
TensorAcc5R output,
int kH, int kW,
int patchH, int patchW,
int padH, int padW,
int dilation_patchH, int dilation_patchW,
int dH, int dW) {
const int iH = rInput1.size(1);
const int iW = rInput1.size(2);
const int C = rInput1.size(3);
const int n = blockIdx.x;
const int h = blockIdx.y;
const int w = blockIdx.z;
const int thread = threadIdx.x;
const int start_i = -padH + h * dH;
const int start_j = -padW + w * dW;
const int patchRadH = dilation_patchH * (patchH - 1) / 2;
const int patchRadW = dilation_patchW * (patchW - 1) / 2;
__shared__ scalar_t prod_sum[THREADS_FORWARD];
for(int ph = 0; ph < patchH; ++ph){
int ph_dilated = ph * dilation_patchH - patchRadH;
for(int pw = 0; pw < patchW; ++pw){
int pw_dilated = pw * dilation_patchW - patchRadW;
prod_sum[thread] = 0;
for (int i=0; i<kH; ++i){
int i1 = start_i + i;
int i2 = i1 + ph_dilated;
if WITHIN_BOUNDS(i1, i2, iH, iH){
for (int j=0; j<kW; ++j){
int j1 = start_j + j;
int j2 = j1 + pw_dilated;
if WITHIN_BOUNDS(j1, j2, iW, iW){
for (int c=thread; c<C; c += THREADS_FORWARD){
scalar_t v1 = rInput1[n][i1][j1][c];
scalar_t v2 = rInput2[n][i2][j2][c];
prod_sum[thread] += v1 * v2;
}
}
}
}
}
// accumulate
__syncthreads();
if (thread == 0) {
scalar_t reduce_sum = 0;
for (int index = 0; index < THREADS_FORWARD; ++index) {
reduce_sum += prod_sum[index];
}
output[n][ph][pw][h][w] = reduce_sum;
}
}
}
}
template <typename scalar_t>
__global__ void correlation_cuda_backward_kernel_input1(
const TensorAcc5R gradOutput,
const TensorAcc4R input2,
TensorAcc4R gradInput1,
int kH, int kW,
int patchH, int patchW,
int padH, int padW,
int dilation_patchH, int dilation_patchW,
int dH, int dW,
int batch) {
const int iH = input2.size(2);
const int iW = input2.size(3);
const int H = gradOutput.size(3);
const int W = gradOutput.size(4);
const int patchRadH = (patchH - 1) / 2;
const int patchRadW = (patchW - 1) / 2;
const int n = batch;
const int c = blockIdx.x;
const int h = blockIdx.y;
const int w = blockIdx.z;
const int ph_off = threadIdx.x;
const int pw_off = threadIdx.y;
const int h_2 = h + padH;
const int w_2 = w + padW;
const int start_i2 = h_2 / dH;
const int start_j2 = w_2 / dW;
/*we perform a module but since we have the quotient, we
can cheat a bit*/
const int h_off = h_2 - start_i2 * dH;
const int w_off = w_2 - start_j2 * dW;
__shared__ scalar_t prod_sum[THREADS_BACKWARD][THREADS_BACKWARD];
prod_sum[ph_off][pw_off] = 0;
for (int ph = ph_off; ph < patchH; ph += THREADS_BACKWARD) {
int i1 = h + dilation_patchH * (ph - patchRadH);
for (int pw = pw_off; pw < patchW; pw += THREADS_BACKWARD) {
int j1 = w + dilation_patchW * (pw - patchRadW);
if WITHIN_BOUNDS(i1, j1, iH, iW) {
scalar_t val = input2[n][c][i1][j1];
for(int tmp1 = h_off, i = 0; tmp1 < kH; tmp1 += dH, ++i) {
int i2 = start_i2 - i;
for(int tmp2 = w_off, j = 0; tmp2 < kW; tmp2 += dW, ++j) {
int j2 = start_j2 - j;
if WITHIN_BOUNDS(i2, j2, H, W) {
prod_sum[ph_off][pw_off] += gradOutput[n][ph][pw][i2][j2] * val;
}
}
}
}
}
}
__syncthreads();
if (ph_off == 0 && pw_off == 0){
scalar_t reduce_sum =0;
for (int ph = 0; ph < THREADS_BACKWARD; ++ph){
for (int pw = 0; pw < THREADS_BACKWARD; ++pw){
reduce_sum += prod_sum[ph][pw];
}
}
gradInput1[n][c][h][w] = reduce_sum;
}
}
template <typename scalar_t>
__global__ void correlation_cuda_backward_kernel_input2(
const TensorAcc5R gradOutput,
const TensorAcc4R input1,
TensorAcc4R gradInput2,
int kH, int kW,
int patchH, int patchW,
int padH, int padW,
int dilation_patchH, int dilation_patchW,
int dH, int dW,
int batch) {
const int iH = input1.size(2);
const int iW = input1.size(3);
const int patchRadH = (patchH - 1) / 2;
const int patchRadW = (patchW - 1) / 2;
const int H = gradOutput.size(3);
const int W = gradOutput.size(4);
const int n = batch;
const int c = blockIdx.x;
const int h = blockIdx.y;
const int w = blockIdx.z;
const int ph_off = threadIdx.x;
const int pw_off = threadIdx.y;
__shared__ scalar_t prod_sum[THREADS_BACKWARD][THREADS_BACKWARD];
prod_sum[ph_off][pw_off] = 0;
for (int ph = ph_off; ph < patchH; ph += THREADS_BACKWARD) {
int i1 = h - dilation_patchH * (ph - patchRadH);
for (int pw = pw_off; pw < patchW; pw += THREADS_BACKWARD) {
int j1 = w - dilation_patchW * (pw - patchRadW);
if WITHIN_BOUNDS(i1, j1, iH, iW) {
scalar_t val = input1[n][c][i1][j1];
const int h_2 = i1 + padH;
const int w_2 = j1 + padW;
const int start_i2 = h_2 / dH;
const int start_j2 = w_2 / dW;
const int h_off = h_2 - start_i2 * dH;
const int w_off = w_2 - start_j2 * dW;
for(int tmp1 = h_off, i = 0; tmp1 < kH; tmp1 += dH, ++i) {
int i2 = start_i2 - i;
for(int tmp2 = w_off, j = 0; tmp2 < kW; tmp2 += dW, ++j) {
int j2 = start_j2 - j;
if WITHIN_BOUNDS(i2, j2, H, W) {
prod_sum[ph_off][pw_off] += gradOutput[n][ph][pw][i2][j2] * val;
}
}
}
}
}
}
__syncthreads();
if (ph_off == 0 && pw_off == 0){
scalar_t reduce_sum =0;
for (int ph = 0; ph < THREADS_BACKWARD; ++ph){
for (int pw = 0; pw < THREADS_BACKWARD; ++pw){
reduce_sum += prod_sum[ph][pw];
}
}
gradInput2[n][c][h][w] = reduce_sum;
}
}
}
torch::Tensor correlation_cuda_forward(
torch::Tensor input1,
torch::Tensor input2,
int kH, int kW,
int patchH, int patchW,
int padH, int padW,
int dilation_patchH, int dilation_patchW,
int dH, int dW) {
const int batch_size = input1.size(0);
const int iH = input1.size(2);
const int iW = input1.size(3);
const auto oH = (iH + 2 * padH - kH) / dH + 1;
const auto oW = (iW + 2 * padW - kW) / dW + 1;
auto output = torch::zeros({batch_size, patchH, patchW, oH, oW}, input1.options());
auto trInput1 = input1.permute({0, 2, 3, 1}).contiguous();
auto trInput2 = input2.permute({0, 2, 3, 1}).contiguous();
const int threads = THREADS_FORWARD;
const dim3 blocks(batch_size, oH, oW);
AT_DISPATCH_FLOATING_TYPES(input1.type(), "correlation_forward_cuda", ([&] {
TensorAcc4R trInput1_acc = trInput1.packed_accessor<scalar_t,4,RestrictPtrTraits,int32_t>();
TensorAcc4R trInput2_acc = trInput2.packed_accessor<scalar_t,4,RestrictPtrTraits,int32_t>();
TensorAcc5R output_acc = output.packed_accessor<scalar_t,5,RestrictPtrTraits,int32_t>();
correlation_cuda_forward_kernel<scalar_t><<<blocks, threads>>>(
trInput1_acc, trInput2_acc, output_acc,
kH, kW, patchH, patchW, padH, padW,
dilation_patchH, dilation_patchW, dH, dW);
}));
return output;
}
std::vector<torch::Tensor> correlation_cuda_backward(
torch::Tensor input1,
torch::Tensor input2,
torch::Tensor gradOutput,
int kH, int kW,
int patchH, int patchW,
int padH, int padW,
int dilation_patchH, int dilation_patchW,
int dH, int dW) {
auto gradInput1 = torch::zeros_like(input1);
auto gradInput2 = torch::zeros_like(input2);
const int batch_size = input1.size(0);
const int iH = input1.size(2);
const int iW = input1.size(3);
const int C = input1.size(1);
const dim3 blocks(C, iH, iW);
const dim3 threads(THREADS_BACKWARD, THREADS_BACKWARD);
AT_DISPATCH_FLOATING_TYPES(input1.type(), "correlation_backward_cuda", ([&] {
TensorAcc4R input1_acc = input1.packed_accessor<scalar_t,4,RestrictPtrTraits,int32_t>();
TensorAcc4R input2_acc = input2.packed_accessor<scalar_t,4,RestrictPtrTraits,int32_t>();
TensorAcc4R gradInput1_acc = gradInput1.packed_accessor<scalar_t,4,RestrictPtrTraits,int32_t>();
TensorAcc4R gradInput2_acc = gradInput2.packed_accessor<scalar_t,4,RestrictPtrTraits,int32_t>();
TensorAcc5R gradOutput_acc = gradOutput.packed_accessor<scalar_t,5,RestrictPtrTraits,int32_t>();
for (int n = 0; n < batch_size; ++n){
correlation_cuda_backward_kernel_input1<scalar_t><<<blocks, threads>>>(
gradOutput_acc, input2_acc, gradInput1_acc,
kH, kW, patchH, patchW, padH, padW,
dilation_patchH, dilation_patchW, dH, dW,
n);
}
for (int n = 0; n < batch_size; ++n){
correlation_cuda_backward_kernel_input2<scalar_t><<<blocks, threads>>>(
gradOutput_acc, input1_acc, gradInput2_acc,
kH, kW, patchH, patchW, padH, padW,
dilation_patchH, dilation_patchW, dH, dW,
n);
}
}));
return {gradInput1, gradInput2};
}
|
0fbf941e7cbe30fbc222eec0da1f9a8eb38dc471.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "accessor.h"
#include "model.h"
#include "cuda_helper.h"
template<typename DT, int dim>
TensorAccessorR<DT, dim>::TensorAccessorR(PhysicalRegion region,
RegionRequirement req,
FieldID fid,
Context ctx,
Runtime* runtime)
{
const AccessorRO<DT, dim> acc(region, fid);
rect = runtime->get_index_space_domain(
ctx, req.region.get_index_space());
assert(acc.accessor.is_dense_arbitrary(rect));
ptr = acc.ptr(rect);
}
template<typename DT>
__global__
void zero_array(DT* ptr, coord_t size)
{
CUDA_KERNEL_LOOP(i, size)
{
ptr[i] = 0;
}
}
template<typename DT, int dim>
TensorAccessorW<DT, dim>::TensorAccessorW(PhysicalRegion region,
RegionRequirement req,
FieldID fid,
Context ctx,
Runtime* runtime,
bool readOutput)
{
rect = runtime->get_index_space_domain(
ctx, req.region.get_index_space());
if (readOutput) {
const AccessorRW<DT, dim> acc(region, fid);
assert(acc.accessor.is_dense_arbitrary(rect));
ptr = acc.ptr(rect);
} else {
const AccessorWO<DT, dim> acc(region, fid);
assert(acc.accessor.is_dense_arbitrary(rect));
ptr = acc.ptr(rect);
// FIXME: currently we zero init the region if not read output
hipLaunchKernelGGL(( assign_kernel<DT>), dim3(GET_BLOCKS(rect.volume())), dim3(CUDA_NUM_THREADS), 0, 0,
ptr, rect.volume(), 0.0f);
checkCUDA(hipDeviceSynchronize());
}
}
template class TensorAccessorR<float, 1>;
template class TensorAccessorR<float, 2>;
template class TensorAccessorR<float, 3>;
template class TensorAccessorR<int, 1>;
template class TensorAccessorR<int, 2>;
template class TensorAccessorR<int, 3>;
template class TensorAccessorW<float, 1>;
template class TensorAccessorW<float, 2>;
template class TensorAccessorW<float, 3>;
template class TensorAccessorW<int, 1>;
template class TensorAccessorW<int, 2>;
template class TensorAccessorW<int, 3>;
| 0fbf941e7cbe30fbc222eec0da1f9a8eb38dc471.cu | #include "accessor.h"
#include "model.h"
#include "cuda_helper.h"
template<typename DT, int dim>
TensorAccessorR<DT, dim>::TensorAccessorR(PhysicalRegion region,
RegionRequirement req,
FieldID fid,
Context ctx,
Runtime* runtime)
{
const AccessorRO<DT, dim> acc(region, fid);
rect = runtime->get_index_space_domain(
ctx, req.region.get_index_space());
assert(acc.accessor.is_dense_arbitrary(rect));
ptr = acc.ptr(rect);
}
template<typename DT>
__global__
void zero_array(DT* ptr, coord_t size)
{
CUDA_KERNEL_LOOP(i, size)
{
ptr[i] = 0;
}
}
template<typename DT, int dim>
TensorAccessorW<DT, dim>::TensorAccessorW(PhysicalRegion region,
RegionRequirement req,
FieldID fid,
Context ctx,
Runtime* runtime,
bool readOutput)
{
rect = runtime->get_index_space_domain(
ctx, req.region.get_index_space());
if (readOutput) {
const AccessorRW<DT, dim> acc(region, fid);
assert(acc.accessor.is_dense_arbitrary(rect));
ptr = acc.ptr(rect);
} else {
const AccessorWO<DT, dim> acc(region, fid);
assert(acc.accessor.is_dense_arbitrary(rect));
ptr = acc.ptr(rect);
// FIXME: currently we zero init the region if not read output
assign_kernel<DT><<<GET_BLOCKS(rect.volume()), CUDA_NUM_THREADS>>>(
ptr, rect.volume(), 0.0f);
checkCUDA(cudaDeviceSynchronize());
}
}
template class TensorAccessorR<float, 1>;
template class TensorAccessorR<float, 2>;
template class TensorAccessorR<float, 3>;
template class TensorAccessorR<int, 1>;
template class TensorAccessorR<int, 2>;
template class TensorAccessorR<int, 3>;
template class TensorAccessorW<float, 1>;
template class TensorAccessorW<float, 2>;
template class TensorAccessorW<float, 3>;
template class TensorAccessorW<int, 1>;
template class TensorAccessorW<int, 2>;
template class TensorAccessorW<int, 3>;
|
acef4e23504c5a46122b234450dcc7c02e58c647.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
author : Anshul Farkya
created on : 12-feb-2021
*/
#include <stdio.h>
#include <stdlib.h>
#include "kernels.h"
// Print the matrix
void printMatrix(int *mat, int rows, int cols)
{
for(int i=0; i < rows; i++)
{
for(int j=0; j < cols; j++)
{
printf("%d ", mat[i*cols+j]);
}
printf("\n");
}
printf("\n");
}
// Verify the correctness by comparing the sequential output with parallel output
bool compareMatrices(int *gpu, int *ref, int rows, int cols)
{
for(int i=0; i < rows; i++)
{
for(int j=0; j < cols; j++)
{
if (ref[i*cols + j] != gpu[i*cols +j])
{
return false;
}
}
}
return true; // generated output matches expected output
}
// Generating the expected output
void add_matrices(int A[], int B[], int out[], int rows, int cols)
{
for(int i=0; i<rows; i++)
{
for(int j=0; j<cols; j++)
{
out[i*cols + j] = A[i*cols + j] + B[i*cols + j];
}
}
}
int main(int argc, char** argv)
{
FILE *fp;
fp = fopen(argv[1], "r");
if(fp == NULL)
{
printf("Failed to open input file %s\n", argv[1]);
exit(0);
}
// Reading the input matrix from file
int rows,cols; // holds the size of the input square matrix
fscanf(fp, "%d", &rows);
fscanf(fp, "%d", &cols);
//printf("rows = %d\n",rows);
//printf("cols = %d\n",cols);
int numBytes = rows * cols * sizeof(int);
long long k1_blkDimX, k1_gridDimX, k2_blkDimX, k2_blkDimY, k2_gridDimX, k3_blkDimX, k3_blkDimY, k3_gridDimX, k3_gridDimY;
k1_blkDimX = 512;
k1_gridDimX = ceil(float(rows*cols)/k1_blkDimX);
//printf("k1_gridDimX = %d\n",k1_gridDimX);
int *A = (int*) malloc(numBytes);
for(int i = 0; i < rows*cols; i++)
{
fscanf(fp, "%d", &A[i]);
}
int *B = (int*) malloc(numBytes);
for(int i = 0; i < rows*cols; i++)
{
fscanf(fp, "%d", &B[i]);
}
// computing the addition on CPU to generate expected output
int *cpuOut = (int*) malloc(numBytes);
add_matrices(A, B, cpuOut, rows, cols);
//printMatrix(cpuOut, rows,cols);
int *d_A;
hipMalloc(&d_A, numBytes);
hipMemcpy(d_A, A, numBytes, hipMemcpyHostToDevice);
int *d_B;
hipMalloc(&d_B, numBytes);
hipMemcpy(d_B, B, numBytes, hipMemcpyHostToDevice);
int *d_C;
hipMalloc(&d_C, numBytes);
// Launching the first kernel
hipLaunchKernelGGL(( per_row_kernel), dim3(k1_gridDimX), dim3(k1_blkDimX), 0, 0, rows,cols,d_A, d_B, d_C);
hipDeviceSynchronize();
// Copying back from the device
int *out1 = (int*) malloc(numBytes);
hipMemcpy(out1, d_C, numBytes, hipMemcpyDeviceToHost);
//printMatrix(out1, rows,cols);
printf("%s,%s\n", argv[1], compareMatrices(out1, cpuOut, rows, cols) ? "Success" : "Failure");
hipMemset(d_C, 0, numBytes);
k2_blkDimX = 32;
k2_blkDimY = 32;
k2_gridDimX = ceil(float(rows*cols)/(k2_blkDimX*k2_blkDimY));
// Launching the second kernel
dim3 block2D(k2_blkDimX, k2_blkDimY,1);
hipLaunchKernelGGL(( per_column_kernel), dim3(k2_gridDimX), dim3(block2D), 0, 0, rows,cols,d_A, d_B, d_C);
hipDeviceSynchronize();
// Copying back from the device
int *out2 = (int*) malloc(numBytes);
hipMemcpy(out2, d_C, numBytes, hipMemcpyDeviceToHost);
//printMatrix(out2, rows,cols);
printf("%s,%s\n", argv[1],compareMatrices(out2, cpuOut, rows, cols) ? "Success" : "Failure");
hipMemset(d_C, 0, numBytes);
k3_blkDimX = 32;
k3_blkDimY = 32;
k3_gridDimX = 32;
k3_gridDimY = ceil(float(rows*cols)/(k3_blkDimX*k3_blkDimY*k3_gridDimX));
// Launching the third kernel
dim3 grid2D(k3_gridDimX, k3_gridDimY);
dim3 blk2D(k3_blkDimX,k3_blkDimY);
hipLaunchKernelGGL(( per_element_kernel), dim3(grid2D), dim3(blk2D), 0, 0, rows,cols,d_A, d_B, d_C);
hipDeviceSynchronize();
// Copying back from the device
int *out3 = (int*) malloc(numBytes);
hipMemcpy(out3, d_C, numBytes, hipMemcpyDeviceToHost);
printMatrix(out3, rows,cols);
printf("%s,%s\n", argv[1],compareMatrices(out3, cpuOut, rows,cols) ? "Success" : "Failure");
// Deallocating the memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
free(cpuOut);
free(out1);
free(out2);
free(out3);
return(0);
}
| acef4e23504c5a46122b234450dcc7c02e58c647.cu | /*
author : Anshul Farkya
created on : 12-feb-2021
*/
#include <stdio.h>
#include <stdlib.h>
#include "kernels.h"
// Print the matrix
void printMatrix(int *mat, int rows, int cols)
{
for(int i=0; i < rows; i++)
{
for(int j=0; j < cols; j++)
{
printf("%d ", mat[i*cols+j]);
}
printf("\n");
}
printf("\n");
}
// Verify the correctness by comparing the sequential output with parallel output
bool compareMatrices(int *gpu, int *ref, int rows, int cols)
{
for(int i=0; i < rows; i++)
{
for(int j=0; j < cols; j++)
{
if (ref[i*cols + j] != gpu[i*cols +j])
{
return false;
}
}
}
return true; // generated output matches expected output
}
// Generating the expected output
void add_matrices(int A[], int B[], int out[], int rows, int cols)
{
for(int i=0; i<rows; i++)
{
for(int j=0; j<cols; j++)
{
out[i*cols + j] = A[i*cols + j] + B[i*cols + j];
}
}
}
int main(int argc, char** argv)
{
FILE *fp;
fp = fopen(argv[1], "r");
if(fp == NULL)
{
printf("Failed to open input file %s\n", argv[1]);
exit(0);
}
// Reading the input matrix from file
int rows,cols; // holds the size of the input square matrix
fscanf(fp, "%d", &rows);
fscanf(fp, "%d", &cols);
//printf("rows = %d\n",rows);
//printf("cols = %d\n",cols);
int numBytes = rows * cols * sizeof(int);
long long k1_blkDimX, k1_gridDimX, k2_blkDimX, k2_blkDimY, k2_gridDimX, k3_blkDimX, k3_blkDimY, k3_gridDimX, k3_gridDimY;
k1_blkDimX = 512;
k1_gridDimX = ceil(float(rows*cols)/k1_blkDimX);
//printf("k1_gridDimX = %d\n",k1_gridDimX);
int *A = (int*) malloc(numBytes);
for(int i = 0; i < rows*cols; i++)
{
fscanf(fp, "%d", &A[i]);
}
int *B = (int*) malloc(numBytes);
for(int i = 0; i < rows*cols; i++)
{
fscanf(fp, "%d", &B[i]);
}
// computing the addition on CPU to generate expected output
int *cpuOut = (int*) malloc(numBytes);
add_matrices(A, B, cpuOut, rows, cols);
//printMatrix(cpuOut, rows,cols);
int *d_A;
cudaMalloc(&d_A, numBytes);
cudaMemcpy(d_A, A, numBytes, cudaMemcpyHostToDevice);
int *d_B;
cudaMalloc(&d_B, numBytes);
cudaMemcpy(d_B, B, numBytes, cudaMemcpyHostToDevice);
int *d_C;
cudaMalloc(&d_C, numBytes);
// Launching the first kernel
per_row_kernel<<<k1_gridDimX, k1_blkDimX>>>(rows,cols,d_A, d_B, d_C);
cudaDeviceSynchronize();
// Copying back from the device
int *out1 = (int*) malloc(numBytes);
cudaMemcpy(out1, d_C, numBytes, cudaMemcpyDeviceToHost);
//printMatrix(out1, rows,cols);
printf("%s,%s\n", argv[1], compareMatrices(out1, cpuOut, rows, cols) ? "Success" : "Failure");
cudaMemset(d_C, 0, numBytes);
k2_blkDimX = 32;
k2_blkDimY = 32;
k2_gridDimX = ceil(float(rows*cols)/(k2_blkDimX*k2_blkDimY));
// Launching the second kernel
dim3 block2D(k2_blkDimX, k2_blkDimY,1);
per_column_kernel<<<k2_gridDimX, block2D>>>(rows,cols,d_A, d_B, d_C);
cudaDeviceSynchronize();
// Copying back from the device
int *out2 = (int*) malloc(numBytes);
cudaMemcpy(out2, d_C, numBytes, cudaMemcpyDeviceToHost);
//printMatrix(out2, rows,cols);
printf("%s,%s\n", argv[1],compareMatrices(out2, cpuOut, rows, cols) ? "Success" : "Failure");
cudaMemset(d_C, 0, numBytes);
k3_blkDimX = 32;
k3_blkDimY = 32;
k3_gridDimX = 32;
k3_gridDimY = ceil(float(rows*cols)/(k3_blkDimX*k3_blkDimY*k3_gridDimX));
// Launching the third kernel
dim3 grid2D(k3_gridDimX, k3_gridDimY);
dim3 blk2D(k3_blkDimX,k3_blkDimY);
per_element_kernel<<<grid2D, blk2D>>>(rows,cols,d_A, d_B, d_C);
cudaDeviceSynchronize();
// Copying back from the device
int *out3 = (int*) malloc(numBytes);
cudaMemcpy(out3, d_C, numBytes, cudaMemcpyDeviceToHost);
printMatrix(out3, rows,cols);
printf("%s,%s\n", argv[1],compareMatrices(out3, cpuOut, rows,cols) ? "Success" : "Failure");
// Deallocating the memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(cpuOut);
free(out1);
free(out2);
free(out3);
return(0);
}
|
a29c2605380217e0cde1ea36a92ec58fb1c5a9c1.hip | // !!! This is a file automatically generated by hipify!!!
#include <cassert>
#include <cfloat>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <stdio.h>
#include <list>
#include <map>
#include <math.h>
#include <stdlib.h>
#include <vector>
#include <set>
#include <algorithm>
#include <iterator>
#include <fstream>
#include "../include/common.h"
#define K 1
using namespace std;
#define cfd_NBLOCKS 16*65*
//#define cfd_SUPER_BLOCKS_PER_SM 5
#define cfd_BLOCK_SIZE 256
//const int cfd_BLOCK_SIZE = 256;
const int cfd_nBlksPerCluster = 16;
const int cfd_nAtom = cfd_BLOCK_SIZE * cfd_NBLOCKS;
const int cfd_maxNeighbors = 8;
texture<float,1,hipReadModeElementType> tex_mx;
texture<float,1,hipReadModeElementType> tex_my;
texture<float,1,hipReadModeElementType> tex_mz;
texture<float,1,hipReadModeElementType> tex_energy;
inline int * cfd_myBuildNeighborList_blkSchedule(const int nAtom,
int* neighborList, int blockSz)
{
//create non-uniform data sharing
//but avoid that tasks sharing the same data are neighbor tasks by randomization
vector<int> atomInds(nAtom);
vector<int> blkInds((nAtom+blockSz-1)/blockSz);
for(int i=0; i<blkInds.size(); ++i)
blkInds[i] = i;
random_shuffle(blkInds.begin(), blkInds.end());
int *blkOrder = (int*)malloc(blkInds.size()*sizeof(int));
for(int i=0; i<blkInds.size(); ++i)
blkOrder[i] = blkInds[i];
int j=0;
for(vector<int>::iterator it=blkInds.begin(); it!=blkInds.end(); ++it)
{
int blkInd = *it;
for(int i=0; i<blockSz; ++i)
atomInds[j++] = blkInd*blockSz + i;
}
int superBlockSz = blockSz * cfd_nBlksPerCluster;
// Build Neighbor List
for (int i = 0; i < nAtom; i++)
{
int start = i - i%superBlockSz; //difference is here
//int end = i + (superBlockSz - i%superBlockSz)-1;
int nNeighbors = 0;
do {
int j = start + rand() % superBlockSz;
if (i == j || j>=nAtom) continue; // An atom cannot be its own neighbor
neighborList[nNeighbors*nAtom + atomInds[i]] = atomInds[j];
nNeighbors ++;
} while(nNeighbors<cfd_maxNeighbors);
}
return blkOrder;
}
#define GAMMA 1.4f
#define VAR_DENSITY 0
#define VAR_MOMENTUM 1
#define NDIM 3
#define VAR_DENSITY_ENERGY (VAR_MOMENTUM+NDIM)
#define NVAR (VAR_DENSITY_ENERGY+1)
__host__ __device__ inline void compute_velocity(float& density, float3& momentum, float3& velocity)
{
velocity.x = momentum.x / density;
velocity.y = momentum.y / density;
velocity.z = momentum.z / density;
}
__host__ __device__ inline float compute_speed_sqd(float3& velocity)
{
return velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z;
}
__host__ __device__ inline float compute_pressure(float& density, float& density_energy, float& speed_sqd)
{
return (float(GAMMA)-float(1.0f))*(density_energy - float(0.5f)*density*speed_sqd);
}
__host__ __device__ inline float compute_speed_of_sound(float& density, float& pressure)
{
return sqrtf(float(GAMMA)*pressure/density);
}
__host__ __device__ __host__ inline void compute_flux_contribution(float& density, float3& momentum, float& density_energy, float& pressure, float3& velocity, float3& fc_momentum_x, float3& fc_momentum_y, float3& fc_momentum_z, float3& fc_density_energy)
{
fc_momentum_x.x = velocity.x*momentum.x + pressure;
fc_momentum_x.y = velocity.x*momentum.y;
fc_momentum_x.z = velocity.x*momentum.z;
fc_momentum_y.x = fc_momentum_x.y;
fc_momentum_y.y = velocity.y*momentum.y + pressure;
fc_momentum_y.z = velocity.y*momentum.z;
fc_momentum_z.x = fc_momentum_x.z;
fc_momentum_z.y = fc_momentum_y.z;
fc_momentum_z.z = velocity.z*momentum.z + pressure;
float de_p = density_energy+pressure;
fc_density_energy.x = velocity.x*de_p;
fc_density_energy.y = velocity.y*de_p;
fc_density_energy.z = velocity.z*de_p;
}
void check_cfd(int nelr, int* elements_surrounding_elements, float*
normals, float* density, float* mx, float* my, float* mz, float* density_energy, float* fluxes)
{
const float smoothing_coefficient = float(0.2f);
//const int i = (blockDim.x*blockIdx.x + threadIdx.x);
for(int i=0;i<cfd_NBLOCKS*cfd_BLOCK_SIZE;i++){
int j, nb;
float3 normal; float normal_len;
float factor;
//float density_i = variables[i + VAR_DENSITY*nelr];
float density_i = density[i];
float3 momentum_i;
//momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr];
//momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr];
//momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr];
momentum_i.x = mx[i];
momentum_i.y = my[i];
momentum_i.z = mz[i];
//float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr];
float density_energy_i = density_energy[i];
float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i);
float speed_sqd_i = compute_speed_sqd(velocity_i);
float speed_i = sqrtf(speed_sqd_i);
float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);
float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);
float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;
float3 flux_contribution_i_density_energy;
compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy);
//float flux_i_density = float(0.0f);
float flux_i_density = 0.0;
float3 flux_i_momentum;
flux_i_momentum.x = float(0.0f);
flux_i_momentum.y = float(0.0f);
flux_i_momentum.z = float(0.0f);
float flux_i_density_energy = float(0.0f);
float3 velocity_nb;
float density_nb, density_energy_nb;
float3 momentum_nb;
float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;
float3 flux_contribution_nb_density_energy;
float speed_sqd_nb, speed_of_sound_nb, pressure_nb;
#pragma unroll
for(j = 0; j < cfd_maxNeighbors; j++)
{
nb = elements_surrounding_elements[i + j*nelr];
//optimal layout already
// |X for neighbor 0, X for neighbor 1, ... | Y for neighbor 0, Y for neighbor 1, ...
// |Z for neighbor 0, Z for neighbor 1, ... |
normal.x = normals[i + (j + 0*cfd_maxNeighbors)*nelr];
normal.y = normals[i + (j + 1*cfd_maxNeighbors)*nelr];
normal.z = normals[i + (j + 2*cfd_maxNeighbors)*nelr];
normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z);
if(nb >= 0) // a legitimate neighbor
{
//density_nb = variables[nb + VAR_DENSITY*nelr];
//momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr];
//momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr];
//momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr];
density_nb = density[nb];
momentum_nb.x = mx[nb];
momentum_nb.y = my[nb];
momentum_nb.z = mz[nb];
//density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr];
density_energy_nb = density_energy[nb];
compute_velocity(density_nb, momentum_nb, velocity_nb);
speed_sqd_nb = compute_speed_sqd(velocity_nb);
pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);
speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);
compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy);
// artificial viscosity
//factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);
factor = 1.3;
flux_i_density += factor*(density_i-density_nb);
flux_i_density_energy += factor*(density_energy_i-density_energy_nb);
flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x);
flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y);
flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z);
// accumulate cell-centered fluxes
factor = float(0.5f)*normal.x;
flux_i_density += factor*(momentum_nb.x+momentum_i.x);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x);
factor = float(0.5f)*normal.y;
flux_i_density += factor*(momentum_nb.y+momentum_i.y);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y);
factor = float(0.5f)*normal.z;
flux_i_density += factor*(momentum_nb.z+momentum_i.z);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z);
}
}
/*if(((pow((fluxes[i + VAR_DENSITY*nelr] - flux_i_density),2)/flux_i_density)>0.001)||\
((pow((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x),2)/flux_i_momentum.x)>0.001)||\
((pow((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y),2)/flux_i_momentum.y)>0.001)||\
((pow((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z),2)/flux_i_momentum.z)>0.001)||\
((pow((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy),2)/flux_i_density_energy)>0.001))*/
if(((abs((fluxes[i + VAR_DENSITY*nelr] - flux_i_density)/flux_i_density)>0.01)&&(abs((fluxes[i + VAR_DENSITY*nelr] - flux_i_density))>0.01))||\
((abs((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x)/flux_i_momentum.x)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x))>0.01))||\
((abs((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y)/flux_i_momentum.y)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y))>0.01))||\
((abs((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z)/flux_i_momentum.z)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z))>0.01))||\
((abs((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy)/flux_i_density_energy)>0.01)&&(abs((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy))>0.01)))
{printf("failed!%d,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",i,fluxes[i + VAR_DENSITY*nelr],flux_i_density,\
fluxes[i + (VAR_MOMENTUM+0)*nelr],flux_i_momentum.x,\
fluxes[i + (VAR_MOMENTUM+1)*nelr] , flux_i_momentum.y,\
fluxes[i + (VAR_MOMENTUM+2)*nelr],flux_i_momentum.z,\
fluxes[i + VAR_DENSITY_ENERGY*nelr],flux_i_density_energy);
return;}
}
printf("GOOD! passed!\n");
return;
}
__global__ void cfd_kernel(int nelr,int* elements_surrounding_elements, const float*
normals, const float* __restrict__ density, const float* __restrict__ mx, float* my, float* __restrict__ mz, float* density_energy, float* fluxes,int *d_flag)
{
const float smoothing_coefficient = float(0.2f);
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
int j, nb;
float3 normal; float normal_len;
float factor;
//float density_i = variables[i + VAR_DENSITY*nelr];
float density_i = density[i];
float3 momentum_i;
//momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr];
//momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr];
//momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr];
momentum_i.x = tex1Dfetch(tex_mx,i);
momentum_i.y = tex1Dfetch(tex_my,i);//my[i];
momentum_i.z = tex1Dfetch(tex_mz,i);// mz[i];
//float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr];
float density_energy_i = tex1Dfetch(tex_energy,i);//density_energy[i];
float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i);
float speed_sqd_i = compute_speed_sqd(velocity_i);
float speed_i = sqrtf(speed_sqd_i);
float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);
float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);
float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;
float3 flux_contribution_i_density_energy;
compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy);
//float flux_i_density = float(0.0f);
float flux_i_density = 0.0;
float3 flux_i_momentum;
flux_i_momentum.x = float(0.0f);
flux_i_momentum.y = float(0.0f);
flux_i_momentum.z = float(0.0f);
float flux_i_density_energy = float(0.0f);
float3 velocity_nb;
float density_nb, density_energy_nb;
float3 momentum_nb;
float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;
float3 flux_contribution_nb_density_energy;
float speed_sqd_nb, speed_of_sound_nb, pressure_nb;
#pragma unroll
for(j = 0; j < cfd_maxNeighbors; j++)
{
nb = elements_surrounding_elements[i + j*nelr];
//optimal layout already
// |X for neighbor 0, X for neighbor 1, ... | Y for neighbor 0, Y for neighbor 1, ...
// |Z for neighbor 0, Z for neighbor 1, ... |
normal.x = normals[i + (j + 0*cfd_maxNeighbors)*nelr];
normal.y = normals[i + (j + 1*cfd_maxNeighbors)*nelr];
normal.z = normals[i + (j + 2*cfd_maxNeighbors)*nelr];
normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z);
if(nb >= 0) // a legitimate neighbor
{
//density_nb = variables[nb + VAR_DENSITY*nelr];
//momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr];
//momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr];
//momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr];
density_nb = density[nb];
momentum_nb.x = tex1Dfetch(tex_mx,nb);
momentum_nb.y = tex1Dfetch(tex_my,nb);//my[nb];
momentum_nb.z = tex1Dfetch(tex_mz,nb);//mz[nb];
//density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr];
density_energy_nb = tex1Dfetch(tex_energy,nb);//density_energy[nb];
compute_velocity(density_nb, momentum_nb, velocity_nb);
speed_sqd_nb = compute_speed_sqd(velocity_nb);
pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);
speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);
compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy);
// artificial viscosity
//factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);
factor = 1.3;
flux_i_density += factor*(density_i-density_nb);
flux_i_density_energy += factor*(density_energy_i-density_energy_nb);
flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x);
flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y);
flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z);
// accumulate cell-centered fluxes
factor = float(0.5f)*normal.x;
flux_i_density += factor*(momentum_nb.x+momentum_i.x);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x);
factor = float(0.5f)*normal.y;
flux_i_density += factor*(momentum_nb.y+momentum_i.y);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y);
factor = float(0.5f)*normal.z;
flux_i_density += factor*(momentum_nb.z+momentum_i.z);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z);
}
}
fluxes[i + VAR_DENSITY*nelr] = flux_i_density;
fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x;
fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y;
fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z;
fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy;
//if (threadIdx.x==0) atomicAdd(d_flag,1);
}
int main(int argc, char **argv) {
hipSetDevice(2);
srand(2013);
// Allocate problem data on host
//posVecType* position;
//forceVecType* force;
float *density;
float *mx;
float *my;
float *mz;
float *density_energy;
float *normals;
float *fluxes;
int* cfd_neighborList;
hipHostMalloc((void**)&density, cfd_nAtom*sizeof(float));
hipHostMalloc((void**)&mx, cfd_nAtom*sizeof(float));
hipHostMalloc((void**)&my, cfd_nAtom*sizeof(float));
hipHostMalloc((void**)&mz, cfd_nAtom*sizeof(float));
hipHostMalloc((void**)&density_energy, cfd_nAtom*sizeof(float));
hipHostMalloc((void**)&normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float));
hipHostMalloc((void**)&fluxes, cfd_nAtom*NVAR*sizeof(float));
hipHostMalloc((void**)&cfd_neighborList, cfd_nAtom*cfd_maxNeighbors*sizeof(int));
// Allocate device memory for position and force
//forceVecType* d_force;
//posVecType* d_position;
float *d_density;
float *d_mx;
float *d_my;
float *d_mz;
float *d_density_energy;
float *d_normals;
float *d_fluxes;
hipMalloc((void**)&d_density, cfd_nAtom*sizeof(float));
hipMalloc((void**)&d_mx, cfd_nAtom*sizeof(float));
hipMalloc((void**)&d_my, cfd_nAtom*sizeof(float));
hipMalloc((void**)&d_mz, cfd_nAtom*sizeof(float));
hipMalloc((void**)&d_density_energy, cfd_nAtom*sizeof(float));
hipMalloc((void**)&d_normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float));
hipMalloc((void**)&d_fluxes, cfd_nAtom*NVAR*sizeof(float));
hipMemset(d_fluxes, 0, cfd_nAtom*NVAR*sizeof(float));
//hipMemset(d_force, 0, cfd_nAtom*sizeof(forceVecType));
// Allocate device memory for neighbor list
int* d_cfd_neighborList;
hipMalloc((void**)&d_cfd_neighborList, cfd_nAtom*cfd_maxNeighbors*sizeof(int));
//cout << "Initializing test problem (this can take several "
// "minutes for large problems)\n";
// Initialize positions -- random distribution in cubic domain
// domainEdge constant specifies edge length
for (int i = 0; i < cfd_nAtom; i++)
{
density[i] = (float)(drand48());
density_energy[i] = (float)(drand48() );
mx[i] = (float)(drand48() );
my[i] = (float)(drand48() );
mz[i] = (float)(drand48() );
/*
density[i] = 1.1+i*0.01;
density_energy[i] = 1.1+i*0.01;
mx[i] = 1.1+i*0.01;
my[i] = 1.1+i*0.01;
mz[i] = 1.1+i*0.01;
*/
}
for(int i=0; i<cfd_nAtom*NDIM*cfd_maxNeighbors; ++i)
normals[i] = (float)(drand48());
cfd_myBuildNeighborList_blkSchedule(cfd_nAtom, cfd_neighborList, cfd_BLOCK_SIZE);
hipMemcpy(d_cfd_neighborList, cfd_neighborList, cfd_maxNeighbors*cfd_nAtom*sizeof(int), hipMemcpyHostToDevice);
// Copy data to GPU
hipMemcpy(d_density, density, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_mx, mx, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_my, my, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_mz, mz, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_density_energy, density_energy, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_normals, normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float), hipMemcpyHostToDevice);
hipSetDeviceFlags(hipDeviceMapHost);
int *flag_cfd,*d_flag_cfd;
hipHostMalloc((void**)&flag_cfd,sizeof( int),hipHostMallocMapped);
hipHostGetDevicePointer((void**)&d_flag_cfd,(void*)flag_cfd,0);
hipBindTexture(0,tex_mx,d_mx,cfd_nAtom*sizeof(float));
hipBindTexture(0,tex_my,d_my,cfd_nAtom*sizeof(float));
hipBindTexture(0,tex_mz,d_mz,cfd_nAtom*sizeof(float));
hipBindTexture(0,tex_energy,d_density_energy,cfd_nAtom*sizeof(float));
hipEvent_t kernel_start, kernel_stop;
hipEventCreate(&kernel_start);
hipEventCreate(&kernel_stop);
float kernel_time = 0.0f;
hipEventRecord(kernel_start, 0);
int cfd_gridSize = (cfd_nAtom-1+cfd_BLOCK_SIZE) / cfd_BLOCK_SIZE;
hipLaunchKernelGGL(( cfd_kernel), dim3(cfd_gridSize), dim3(cfd_BLOCK_SIZE), 0, 0, cfd_nAtom, d_cfd_neighborList, d_normals, d_density, d_mx, d_my, d_mz, d_density_energy,
d_fluxes,d_flag_cfd);
hipDeviceSynchronize();
hipEventRecord(kernel_stop, 0);
hipEventSynchronize(kernel_stop);
// get elapsed time
kernel_time = 0.0f;
hipEventElapsedTime(&kernel_time, kernel_start, kernel_stop);
kernel_time *= 1.e-3; // Convert to seconds
cout << "kernel exe time: " << kernel_time << endl;
hipMemcpy(fluxes, d_fluxes, cfd_nAtom*NVAR*sizeof(float), hipMemcpyDeviceToHost);
check_cfd(cfd_nAtom,cfd_neighborList,normals,density,mx,my,mz,density_energy,fluxes);
//TODO:verified on small inputs
/*
ifstream fluxesF("../org/fluxes.txt");
for(int i=0; i<cfd_nAtom*NVAR; ++i) {
float f;
fluxesF >> f;
if(abs(f - fluxes[i]) > 0.001) {
fprintf(stderr, "Test failed! i = %d\n", i);
return 1;
}
}*/
// printf("Test passed!\n");
// fluxesF.close();
return 0;
}
| a29c2605380217e0cde1ea36a92ec58fb1c5a9c1.cu |
#include <cassert>
#include <cfloat>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include <iostream>
#include <stdio.h>
#include <list>
#include <map>
#include <math.h>
#include <stdlib.h>
#include <vector>
#include <set>
#include <algorithm>
#include <iterator>
#include <fstream>
#include "../include/common.h"
#define K 1
using namespace std;
#define cfd_NBLOCKS 16*65*
//#define cfd_SUPER_BLOCKS_PER_SM 5
#define cfd_BLOCK_SIZE 256
//const int cfd_BLOCK_SIZE = 256;
const int cfd_nBlksPerCluster = 16;
const int cfd_nAtom = cfd_BLOCK_SIZE * cfd_NBLOCKS;
const int cfd_maxNeighbors = 8;
texture<float,1,cudaReadModeElementType> tex_mx;
texture<float,1,cudaReadModeElementType> tex_my;
texture<float,1,cudaReadModeElementType> tex_mz;
texture<float,1,cudaReadModeElementType> tex_energy;
inline int * cfd_myBuildNeighborList_blkSchedule(const int nAtom,
int* neighborList, int blockSz)
{
//create non-uniform data sharing
//but avoid that tasks sharing the same data are neighbor tasks by randomization
vector<int> atomInds(nAtom);
vector<int> blkInds((nAtom+blockSz-1)/blockSz);
for(int i=0; i<blkInds.size(); ++i)
blkInds[i] = i;
random_shuffle(blkInds.begin(), blkInds.end());
int *blkOrder = (int*)malloc(blkInds.size()*sizeof(int));
for(int i=0; i<blkInds.size(); ++i)
blkOrder[i] = blkInds[i];
int j=0;
for(vector<int>::iterator it=blkInds.begin(); it!=blkInds.end(); ++it)
{
int blkInd = *it;
for(int i=0; i<blockSz; ++i)
atomInds[j++] = blkInd*blockSz + i;
}
int superBlockSz = blockSz * cfd_nBlksPerCluster;
// Build Neighbor List
for (int i = 0; i < nAtom; i++)
{
int start = i - i%superBlockSz; //difference is here
//int end = i + (superBlockSz - i%superBlockSz)-1;
int nNeighbors = 0;
do {
int j = start + rand() % superBlockSz;
if (i == j || j>=nAtom) continue; // An atom cannot be its own neighbor
neighborList[nNeighbors*nAtom + atomInds[i]] = atomInds[j];
nNeighbors ++;
} while(nNeighbors<cfd_maxNeighbors);
}
return blkOrder;
}
#define GAMMA 1.4f
#define VAR_DENSITY 0
#define VAR_MOMENTUM 1
#define NDIM 3
#define VAR_DENSITY_ENERGY (VAR_MOMENTUM+NDIM)
#define NVAR (VAR_DENSITY_ENERGY+1)
__host__ __device__ inline void compute_velocity(float& density, float3& momentum, float3& velocity)
{
velocity.x = momentum.x / density;
velocity.y = momentum.y / density;
velocity.z = momentum.z / density;
}
__host__ __device__ inline float compute_speed_sqd(float3& velocity)
{
return velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z;
}
__host__ __device__ inline float compute_pressure(float& density, float& density_energy, float& speed_sqd)
{
return (float(GAMMA)-float(1.0f))*(density_energy - float(0.5f)*density*speed_sqd);
}
__host__ __device__ inline float compute_speed_of_sound(float& density, float& pressure)
{
return sqrtf(float(GAMMA)*pressure/density);
}
__host__ __device__ __host__ inline void compute_flux_contribution(float& density, float3& momentum, float& density_energy, float& pressure, float3& velocity, float3& fc_momentum_x, float3& fc_momentum_y, float3& fc_momentum_z, float3& fc_density_energy)
{
fc_momentum_x.x = velocity.x*momentum.x + pressure;
fc_momentum_x.y = velocity.x*momentum.y;
fc_momentum_x.z = velocity.x*momentum.z;
fc_momentum_y.x = fc_momentum_x.y;
fc_momentum_y.y = velocity.y*momentum.y + pressure;
fc_momentum_y.z = velocity.y*momentum.z;
fc_momentum_z.x = fc_momentum_x.z;
fc_momentum_z.y = fc_momentum_y.z;
fc_momentum_z.z = velocity.z*momentum.z + pressure;
float de_p = density_energy+pressure;
fc_density_energy.x = velocity.x*de_p;
fc_density_energy.y = velocity.y*de_p;
fc_density_energy.z = velocity.z*de_p;
}
void check_cfd(int nelr, int* elements_surrounding_elements, float*
normals, float* density, float* mx, float* my, float* mz, float* density_energy, float* fluxes)
{
const float smoothing_coefficient = float(0.2f);
//const int i = (blockDim.x*blockIdx.x + threadIdx.x);
for(int i=0;i<cfd_NBLOCKS*cfd_BLOCK_SIZE;i++){
int j, nb;
float3 normal; float normal_len;
float factor;
//float density_i = variables[i + VAR_DENSITY*nelr];
float density_i = density[i];
float3 momentum_i;
//momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr];
//momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr];
//momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr];
momentum_i.x = mx[i];
momentum_i.y = my[i];
momentum_i.z = mz[i];
//float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr];
float density_energy_i = density_energy[i];
float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i);
float speed_sqd_i = compute_speed_sqd(velocity_i);
float speed_i = sqrtf(speed_sqd_i);
float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);
float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);
float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;
float3 flux_contribution_i_density_energy;
compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy);
//float flux_i_density = float(0.0f);
float flux_i_density = 0.0;
float3 flux_i_momentum;
flux_i_momentum.x = float(0.0f);
flux_i_momentum.y = float(0.0f);
flux_i_momentum.z = float(0.0f);
float flux_i_density_energy = float(0.0f);
float3 velocity_nb;
float density_nb, density_energy_nb;
float3 momentum_nb;
float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;
float3 flux_contribution_nb_density_energy;
float speed_sqd_nb, speed_of_sound_nb, pressure_nb;
#pragma unroll
for(j = 0; j < cfd_maxNeighbors; j++)
{
nb = elements_surrounding_elements[i + j*nelr];
//optimal layout already
// |X for neighbor 0, X for neighbor 1, ... | Y for neighbor 0, Y for neighbor 1, ...
// |Z for neighbor 0, Z for neighbor 1, ... |
normal.x = normals[i + (j + 0*cfd_maxNeighbors)*nelr];
normal.y = normals[i + (j + 1*cfd_maxNeighbors)*nelr];
normal.z = normals[i + (j + 2*cfd_maxNeighbors)*nelr];
normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z);
if(nb >= 0) // a legitimate neighbor
{
//density_nb = variables[nb + VAR_DENSITY*nelr];
//momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr];
//momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr];
//momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr];
density_nb = density[nb];
momentum_nb.x = mx[nb];
momentum_nb.y = my[nb];
momentum_nb.z = mz[nb];
//density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr];
density_energy_nb = density_energy[nb];
compute_velocity(density_nb, momentum_nb, velocity_nb);
speed_sqd_nb = compute_speed_sqd(velocity_nb);
pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);
speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);
compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy);
// artificial viscosity
//factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);
factor = 1.3;
flux_i_density += factor*(density_i-density_nb);
flux_i_density_energy += factor*(density_energy_i-density_energy_nb);
flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x);
flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y);
flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z);
// accumulate cell-centered fluxes
factor = float(0.5f)*normal.x;
flux_i_density += factor*(momentum_nb.x+momentum_i.x);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x);
factor = float(0.5f)*normal.y;
flux_i_density += factor*(momentum_nb.y+momentum_i.y);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y);
factor = float(0.5f)*normal.z;
flux_i_density += factor*(momentum_nb.z+momentum_i.z);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z);
}
}
/*if(((pow((fluxes[i + VAR_DENSITY*nelr] - flux_i_density),2)/flux_i_density)>0.001)||\
((pow((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x),2)/flux_i_momentum.x)>0.001)||\
((pow((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y),2)/flux_i_momentum.y)>0.001)||\
((pow((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z),2)/flux_i_momentum.z)>0.001)||\
((pow((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy),2)/flux_i_density_energy)>0.001))*/
if(((abs((fluxes[i + VAR_DENSITY*nelr] - flux_i_density)/flux_i_density)>0.01)&&(abs((fluxes[i + VAR_DENSITY*nelr] - flux_i_density))>0.01))||\
((abs((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x)/flux_i_momentum.x)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x))>0.01))||\
((abs((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y)/flux_i_momentum.y)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y))>0.01))||\
((abs((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z)/flux_i_momentum.z)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z))>0.01))||\
((abs((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy)/flux_i_density_energy)>0.01)&&(abs((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy))>0.01)))
{printf("failed!%d,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",i,fluxes[i + VAR_DENSITY*nelr],flux_i_density,\
fluxes[i + (VAR_MOMENTUM+0)*nelr],flux_i_momentum.x,\
fluxes[i + (VAR_MOMENTUM+1)*nelr] , flux_i_momentum.y,\
fluxes[i + (VAR_MOMENTUM+2)*nelr],flux_i_momentum.z,\
fluxes[i + VAR_DENSITY_ENERGY*nelr],flux_i_density_energy);
return;}
}
printf("GOOD! passed!\n");
return;
}
__global__ void cfd_kernel(int nelr,int* elements_surrounding_elements, const float*
normals, const float* __restrict__ density, const float* __restrict__ mx, float* my, float* __restrict__ mz, float* density_energy, float* fluxes,int *d_flag)
{
const float smoothing_coefficient = float(0.2f);
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
int j, nb;
float3 normal; float normal_len;
float factor;
//float density_i = variables[i + VAR_DENSITY*nelr];
float density_i = density[i];
float3 momentum_i;
//momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr];
//momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr];
//momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr];
momentum_i.x = tex1Dfetch(tex_mx,i);
momentum_i.y = tex1Dfetch(tex_my,i);//my[i];
momentum_i.z = tex1Dfetch(tex_mz,i);// mz[i];
//float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr];
float density_energy_i = tex1Dfetch(tex_energy,i);//density_energy[i];
float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i);
float speed_sqd_i = compute_speed_sqd(velocity_i);
float speed_i = sqrtf(speed_sqd_i);
float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);
float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);
float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;
float3 flux_contribution_i_density_energy;
compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy);
//float flux_i_density = float(0.0f);
float flux_i_density = 0.0;
float3 flux_i_momentum;
flux_i_momentum.x = float(0.0f);
flux_i_momentum.y = float(0.0f);
flux_i_momentum.z = float(0.0f);
float flux_i_density_energy = float(0.0f);
float3 velocity_nb;
float density_nb, density_energy_nb;
float3 momentum_nb;
float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;
float3 flux_contribution_nb_density_energy;
float speed_sqd_nb, speed_of_sound_nb, pressure_nb;
#pragma unroll
for(j = 0; j < cfd_maxNeighbors; j++)
{
nb = elements_surrounding_elements[i + j*nelr];
//optimal layout already
// |X for neighbor 0, X for neighbor 1, ... | Y for neighbor 0, Y for neighbor 1, ...
// |Z for neighbor 0, Z for neighbor 1, ... |
normal.x = normals[i + (j + 0*cfd_maxNeighbors)*nelr];
normal.y = normals[i + (j + 1*cfd_maxNeighbors)*nelr];
normal.z = normals[i + (j + 2*cfd_maxNeighbors)*nelr];
normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z);
if(nb >= 0) // a legitimate neighbor
{
//density_nb = variables[nb + VAR_DENSITY*nelr];
//momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr];
//momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr];
//momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr];
density_nb = density[nb];
momentum_nb.x = tex1Dfetch(tex_mx,nb);
momentum_nb.y = tex1Dfetch(tex_my,nb);//my[nb];
momentum_nb.z = tex1Dfetch(tex_mz,nb);//mz[nb];
//density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr];
density_energy_nb = tex1Dfetch(tex_energy,nb);//density_energy[nb];
compute_velocity(density_nb, momentum_nb, velocity_nb);
speed_sqd_nb = compute_speed_sqd(velocity_nb);
pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);
speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);
compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy);
// artificial viscosity
//factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);
factor = 1.3;
flux_i_density += factor*(density_i-density_nb);
flux_i_density_energy += factor*(density_energy_i-density_energy_nb);
flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x);
flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y);
flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z);
// accumulate cell-centered fluxes
factor = float(0.5f)*normal.x;
flux_i_density += factor*(momentum_nb.x+momentum_i.x);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x);
factor = float(0.5f)*normal.y;
flux_i_density += factor*(momentum_nb.y+momentum_i.y);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y);
factor = float(0.5f)*normal.z;
flux_i_density += factor*(momentum_nb.z+momentum_i.z);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z);
}
}
fluxes[i + VAR_DENSITY*nelr] = flux_i_density;
fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x;
fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y;
fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z;
fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy;
//if (threadIdx.x==0) atomicAdd(d_flag,1);
}
int main(int argc, char **argv) {
cudaSetDevice(2);
srand(2013);
// Allocate problem data on host
//posVecType* position;
//forceVecType* force;
float *density;
float *mx;
float *my;
float *mz;
float *density_energy;
float *normals;
float *fluxes;
int* cfd_neighborList;
cudaMallocHost((void**)&density, cfd_nAtom*sizeof(float));
cudaMallocHost((void**)&mx, cfd_nAtom*sizeof(float));
cudaMallocHost((void**)&my, cfd_nAtom*sizeof(float));
cudaMallocHost((void**)&mz, cfd_nAtom*sizeof(float));
cudaMallocHost((void**)&density_energy, cfd_nAtom*sizeof(float));
cudaMallocHost((void**)&normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float));
cudaMallocHost((void**)&fluxes, cfd_nAtom*NVAR*sizeof(float));
cudaMallocHost((void**)&cfd_neighborList, cfd_nAtom*cfd_maxNeighbors*sizeof(int));
// Allocate device memory for position and force
//forceVecType* d_force;
//posVecType* d_position;
float *d_density;
float *d_mx;
float *d_my;
float *d_mz;
float *d_density_energy;
float *d_normals;
float *d_fluxes;
cudaMalloc((void**)&d_density, cfd_nAtom*sizeof(float));
cudaMalloc((void**)&d_mx, cfd_nAtom*sizeof(float));
cudaMalloc((void**)&d_my, cfd_nAtom*sizeof(float));
cudaMalloc((void**)&d_mz, cfd_nAtom*sizeof(float));
cudaMalloc((void**)&d_density_energy, cfd_nAtom*sizeof(float));
cudaMalloc((void**)&d_normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float));
cudaMalloc((void**)&d_fluxes, cfd_nAtom*NVAR*sizeof(float));
cudaMemset(d_fluxes, 0, cfd_nAtom*NVAR*sizeof(float));
//cudaMemset(d_force, 0, cfd_nAtom*sizeof(forceVecType));
// Allocate device memory for neighbor list
int* d_cfd_neighborList;
cudaMalloc((void**)&d_cfd_neighborList, cfd_nAtom*cfd_maxNeighbors*sizeof(int));
//cout << "Initializing test problem (this can take several "
// "minutes for large problems)\n";
// Initialize positions -- random distribution in cubic domain
// domainEdge constant specifies edge length
for (int i = 0; i < cfd_nAtom; i++)
{
density[i] = (float)(drand48());
density_energy[i] = (float)(drand48() );
mx[i] = (float)(drand48() );
my[i] = (float)(drand48() );
mz[i] = (float)(drand48() );
/*
density[i] = 1.1+i*0.01;
density_energy[i] = 1.1+i*0.01;
mx[i] = 1.1+i*0.01;
my[i] = 1.1+i*0.01;
mz[i] = 1.1+i*0.01;
*/
}
for(int i=0; i<cfd_nAtom*NDIM*cfd_maxNeighbors; ++i)
normals[i] = (float)(drand48());
cfd_myBuildNeighborList_blkSchedule(cfd_nAtom, cfd_neighborList, cfd_BLOCK_SIZE);
cudaMemcpy(d_cfd_neighborList, cfd_neighborList, cfd_maxNeighbors*cfd_nAtom*sizeof(int), cudaMemcpyHostToDevice);
// Copy data to GPU
cudaMemcpy(d_density, density, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_mx, mx, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_my, my, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_mz, mz, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_density_energy, density_energy, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_normals, normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float), cudaMemcpyHostToDevice);
cudaSetDeviceFlags(cudaDeviceMapHost);
int *flag_cfd,*d_flag_cfd;
cudaHostAlloc((void**)&flag_cfd,sizeof( int),cudaHostAllocMapped);
cudaHostGetDevicePointer((void**)&d_flag_cfd,(void*)flag_cfd,0);
cudaBindTexture(0,tex_mx,d_mx,cfd_nAtom*sizeof(float));
cudaBindTexture(0,tex_my,d_my,cfd_nAtom*sizeof(float));
cudaBindTexture(0,tex_mz,d_mz,cfd_nAtom*sizeof(float));
cudaBindTexture(0,tex_energy,d_density_energy,cfd_nAtom*sizeof(float));
cudaEvent_t kernel_start, kernel_stop;
cudaEventCreate(&kernel_start);
cudaEventCreate(&kernel_stop);
float kernel_time = 0.0f;
cudaEventRecord(kernel_start, 0);
int cfd_gridSize = (cfd_nAtom-1+cfd_BLOCK_SIZE) / cfd_BLOCK_SIZE;
cfd_kernel<<<cfd_gridSize, cfd_BLOCK_SIZE>>>(cfd_nAtom, d_cfd_neighborList, d_normals, d_density, d_mx, d_my, d_mz, d_density_energy,
d_fluxes,d_flag_cfd);
cudaDeviceSynchronize();
cudaEventRecord(kernel_stop, 0);
cudaEventSynchronize(kernel_stop);
// get elapsed time
kernel_time = 0.0f;
cudaEventElapsedTime(&kernel_time, kernel_start, kernel_stop);
kernel_time *= 1.e-3; // Convert to seconds
cout << "kernel exe time: " << kernel_time << endl;
cudaMemcpy(fluxes, d_fluxes, cfd_nAtom*NVAR*sizeof(float), cudaMemcpyDeviceToHost);
check_cfd(cfd_nAtom,cfd_neighborList,normals,density,mx,my,mz,density_energy,fluxes);
//TODO:verified on small inputs
/*
ifstream fluxesF("../org/fluxes.txt");
for(int i=0; i<cfd_nAtom*NVAR; ++i) {
float f;
fluxesF >> f;
if(abs(f - fluxes[i]) > 0.001) {
fprintf(stderr, "Test failed! i = %d\n", i);
return 1;
}
}*/
// printf("Test passed!\n");
// fluxesF.close();
return 0;
}
|
1bf5795cdd9fc313407cb16ff820b0e98047f592.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "mixed_tentusscher_myo_epi_2004_S1_1.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu)
{
print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium GPU model\n\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(hipMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(hipMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
// Get the mapping array
uint32_t *mapping = NULL;
uint32_t *mapping_device = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
check_cuda_error(hipMalloc((void **)&mapping_device, extra_data_bytes_size));
check_cuda_error(hipMemcpy(mapping_device, mapping, extra_data_bytes_size, hipMemcpyHostToDevice));
}
hipLaunchKernelGGL(( kernel_set_model_inital_conditions) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, *sv, mapping_device, num_volumes);
check_cuda_error( hipPeekAtLastError() );
hipDeviceSynchronize();
check_cuda_error(hipFree(mapping_device));
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu)
{
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(hipMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(hipMemcpy(stims_currents_device, stim_currents, stim_currents_size, hipMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL)
{
check_cuda_error(hipMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(hipMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, hipMemcpyHostToDevice));
}
// Get the mapping array
uint32_t *mapping = NULL;
uint32_t *mapping_device = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
check_cuda_error(hipMalloc((void **)&mapping_device, extra_data_bytes_size));
check_cuda_error(hipMemcpy(mapping_device, mapping, extra_data_bytes_size, hipMemcpyHostToDevice));
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
hipLaunchKernelGGL(( solve_gpu) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, dt, sv, stims_currents_device, cells_to_solve_device, mapping_device, num_cells_to_solve, num_steps);
check_cuda_error( hipPeekAtLastError() );
check_cuda_error(hipFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(hipFree(cells_to_solve_device));
if(mapping_device) check_cuda_error(hipFree(mapping_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, uint32_t *mapping, int num_volumes)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if (threadID < num_volumes)
{
// Initial conditions for TenTusscher 2004 myocardium
if (mapping[threadID] == 0)
{
// Default initial conditions
/*
*((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M
*((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H
*((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J
*((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs
*((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S
*((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R
*((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D
*((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F
*((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa
*((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G
*((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633};
for (uint32_t i = 0; i < NEQ; i++)
*((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i];
}
// Initial conditions for TenTusscher 2004 epicardium
else
{
// Default initial conditions
/*
*((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M
*((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H
*((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J
*((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs
*((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S
*((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R
*((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D
*((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F
*((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa
*((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G
*((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.7787928226268,0.00123339508649700,0.784831144233936,0.784673023102172,0.000169405106163081,0.487281523786458,0.00289654265697758,0.999998418745548,1.86681673058670e-08,1.83872100639159e-05,0.999777546403090,1.00731261455043,0.999997755681027,4.00467125306598e-05,0.953040239833913,9.39175391367938,139.965667493392};
for (uint32_t i = 0; i < NEQ; i++)
*((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i];
}
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t *mapping, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve)
{
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n)
{
if (mapping[sv_id] == 0)
{
RHS_gpu_myo(sv, rDY, stim_currents[threadID], sv_id, dt);
for(int i = 0; i < NEQ; i++)
{
*((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id);
}
}
else
{
RHS_gpu_epi(sv, rDY, stim_currents[threadID], sv_id, dt);
for (int i = 0; i < NEQ; i++)
{
*((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id);
}
}
}
}
}
inline __device__ void RHS_gpu_myo (real *sv_, real *rDY_, real stim_current, int threadID_, real dt)
{
// State variables
real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Myocardium cell
real Gks=0.062;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Myocardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
// [!] Myocardium cell
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
inline __device__ void RHS_gpu_epi (real *sv_, real *rDY_, real stim_current, int threadID_, real dt)
{
// State variables
real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Epicardium cell
real Gks=0.245;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Epicardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={13.7730247891532,0.000208550376791424,0.000166345602997405,0.000314427207496467,0.272150547490643,0.206045798160674,0.134878222351137,2.91860118931279,0.0222099400341836,2.12194476134155,1099.53480175178,0.000604923870766662,0.118384383617544,0.0193733747777405,0.00390066599158743,2.21704721596155e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
| 1bf5795cdd9fc313407cb16ff820b0e98047f592.cu | #include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "mixed_tentusscher_myo_epi_2004_S1_1.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu)
{
print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium GPU model\n\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(cudaMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(cudaMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
// Get the mapping array
uint32_t *mapping = NULL;
uint32_t *mapping_device = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
check_cuda_error(cudaMalloc((void **)&mapping_device, extra_data_bytes_size));
check_cuda_error(cudaMemcpy(mapping_device, mapping, extra_data_bytes_size, cudaMemcpyHostToDevice));
}
kernel_set_model_inital_conditions <<<GRID, BLOCK_SIZE>>>(*sv, mapping_device, num_volumes);
check_cuda_error( cudaPeekAtLastError() );
cudaDeviceSynchronize();
check_cuda_error(cudaFree(mapping_device));
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu)
{
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(cudaMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(cudaMemcpy(stims_currents_device, stim_currents, stim_currents_size, cudaMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL)
{
check_cuda_error(cudaMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(cudaMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, cudaMemcpyHostToDevice));
}
// Get the mapping array
uint32_t *mapping = NULL;
uint32_t *mapping_device = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
check_cuda_error(cudaMalloc((void **)&mapping_device, extra_data_bytes_size));
check_cuda_error(cudaMemcpy(mapping_device, mapping, extra_data_bytes_size, cudaMemcpyHostToDevice));
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
solve_gpu <<<GRID, BLOCK_SIZE>>>(dt, sv, stims_currents_device, cells_to_solve_device, mapping_device, num_cells_to_solve, num_steps);
check_cuda_error( cudaPeekAtLastError() );
check_cuda_error(cudaFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(cudaFree(cells_to_solve_device));
if(mapping_device) check_cuda_error(cudaFree(mapping_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, uint32_t *mapping, int num_volumes)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if (threadID < num_volumes)
{
// Initial conditions for TenTusscher 2004 myocardium
if (mapping[threadID] == 0)
{
// Default initial conditions
/*
*((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M
*((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H
*((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J
*((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs
*((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S
*((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R
*((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D
*((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F
*((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa
*((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G
*((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633};
for (uint32_t i = 0; i < NEQ; i++)
*((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i];
}
// Initial conditions for TenTusscher 2004 epicardium
else
{
// Default initial conditions
/*
*((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M
*((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H
*((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J
*((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs
*((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S
*((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R
*((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D
*((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F
*((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa
*((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G
*((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.7787928226268,0.00123339508649700,0.784831144233936,0.784673023102172,0.000169405106163081,0.487281523786458,0.00289654265697758,0.999998418745548,1.86681673058670e-08,1.83872100639159e-05,0.999777546403090,1.00731261455043,0.999997755681027,4.00467125306598e-05,0.953040239833913,9.39175391367938,139.965667493392};
for (uint32_t i = 0; i < NEQ; i++)
*((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i];
}
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t *mapping, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve)
{
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n)
{
if (mapping[sv_id] == 0)
{
RHS_gpu_myo(sv, rDY, stim_currents[threadID], sv_id, dt);
for(int i = 0; i < NEQ; i++)
{
*((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id);
}
}
else
{
RHS_gpu_epi(sv, rDY, stim_currents[threadID], sv_id, dt);
for (int i = 0; i < NEQ; i++)
{
*((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id);
}
}
}
}
}
inline __device__ void RHS_gpu_myo (real *sv_, real *rDY_, real stim_current, int threadID_, real dt)
{
// State variables
real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Myocardium cell
real Gks=0.062;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Myocardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
// [!] Myocardium cell
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
inline __device__ void RHS_gpu_epi (real *sv_, real *rDY_, real stim_current, int threadID_, real dt)
{
// State variables
real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Epicardium cell
real Gks=0.245;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Epicardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={13.7730247891532,0.000208550376791424,0.000166345602997405,0.000314427207496467,0.272150547490643,0.206045798160674,0.134878222351137,2.91860118931279,0.0222099400341836,2.12194476134155,1099.53480175178,0.000604923870766662,0.118384383617544,0.0193733747777405,0.00390066599158743,2.21704721596155e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
977ed52908b3518ed86dd0320ffc752995aa25cf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <chrono>
#include <hiprand/hiprand_kernel.h>
#include <stdio.h>
#include "../configuration.h"
#ifdef OPTION_RENDER
#include "../rendering.h"
#endif // OPTION_RENDER
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
static const int kCudaBlockSize = 256;
// Arrays containing all Body objects on device.
__device__ float* dev_Body_pos_x;
__device__ float* dev_Body_pos_y;
__device__ float* dev_Body_vel_x;
__device__ float* dev_Body_vel_y;
__device__ float* dev_Body_mass;
__device__ float* dev_Body_force_x;
__device__ float* dev_Body_force_y;
__device__ float device_checksum;
__device__ void new_Body(int id, float pos_x, float pos_y,
float vel_x, float vel_y, float mass) {
dev_Body_pos_x[id] = pos_x;
dev_Body_pos_y[id] = pos_y;
dev_Body_vel_x[id] = vel_x;
dev_Body_vel_y[id] = vel_y;
dev_Body_mass[id] = mass;
}
__device__ void Body_compute_force(int id) {
dev_Body_force_x[id] = 0.0f;
dev_Body_force_y[id] = 0.0f;
for (int i = 0; i < kNumBodies; ++i) {
// Do not compute force with the body itself.
if (id != i) {
float dx = dev_Body_pos_x[i] - dev_Body_pos_x[id];
float dy = dev_Body_pos_y[i] - dev_Body_pos_y[id];
float dist = sqrt(dx*dx + dy*dy);
float F = kGravityConstant * dev_Body_mass[id] * dev_Body_mass[i]
/ (dist * dist + kDampeningFactor);
dev_Body_force_x[id] += F*dx / dist;
dev_Body_force_y[id] += F*dy / dist;
}
}
}
__device__ void Body_update(int id) {
dev_Body_vel_x[id] += dev_Body_force_x[id]*kDt / dev_Body_mass[id];
dev_Body_vel_y[id] += dev_Body_force_y[id]*kDt / dev_Body_mass[id];
dev_Body_pos_x[id] += dev_Body_vel_x[id]*kDt;
dev_Body_pos_y[id] += dev_Body_vel_y[id]*kDt;
if (dev_Body_pos_x[id] < -1 || dev_Body_pos_x[id] > 1) {
dev_Body_vel_x[id] = -dev_Body_vel_x[id];
}
if (dev_Body_pos_y[id] < -1 || dev_Body_pos_y[id] > 1) {
dev_Body_vel_y[id] = -dev_Body_vel_y[id];
}
}
__device__ void Body_add_checksum(int id) {
atomicAdd(&device_checksum, dev_Body_pos_x[id] + dev_Body_pos_y[id]*2
+ dev_Body_vel_x[id]*3 + dev_Body_vel_y[id]*4);
}
__global__ void kernel_initialize_bodies(float* pos_x, float* pos_y,
float* vel_x, float* vel_y,
float* mass, float* force_x,
float* force_y) {
dev_Body_pos_x = pos_x;
dev_Body_pos_y = pos_y;
dev_Body_vel_x = vel_x;
dev_Body_vel_y = vel_y;
dev_Body_mass = mass;
dev_Body_force_x = force_x;
dev_Body_force_y = force_y;
for (int i = threadIdx.x + blockDim.x * blockIdx.x;
i < kNumBodies; i += blockDim.x * gridDim.x) {
// Initialize random state.
hiprandState_t rand_state;
hiprand_init(kSeed, i, 0, &rand_state);
// Create new Body object.
new_Body(/*id=*/ i,
/*pos_x=*/ 2 * hiprand_uniform(&rand_state) - 1,
/*pos_y=*/ 2 * hiprand_uniform(&rand_state) - 1,
/*vel_x=*/ (hiprand_uniform(&rand_state) - 0.5) / 1000,
/*vel_y=*/ (hiprand_uniform(&rand_state) - 0.5) / 1000,
/*mass=*/ (hiprand_uniform(&rand_state)/2 + 0.5) * kMaxMass);
}
}
__global__ void kernel_compute_force() {
for (int i = threadIdx.x + blockDim.x * blockIdx.x;
i < kNumBodies; i += blockDim.x * gridDim.x) {
Body_compute_force(i);
}
}
__global__ void kernel_update() {
for (int i = threadIdx.x + blockDim.x * blockIdx.x;
i < kNumBodies; i += blockDim.x * gridDim.x) {
Body_update(i);
}
}
__global__ void kernel_compute_checksum() {
device_checksum = 0.0f;
for (int i = 0; i < kNumBodies; ++i) {
Body_add_checksum(i);
}
}
int main(int /*argc*/, char** /*argv*/) {
#ifdef OPTION_RENDER
init_renderer();
#endif // OPTION_RENDER
float* host_Body_pos_x;
float* host_Body_pos_y;
float* host_Body_vel_x;
float* host_Body_vel_y;
float* host_Body_mass;
float* host_Body_force_x;
float* host_Body_force_y;
// Allocate and create Body objects.
hipMalloc(&host_Body_pos_x, sizeof(float)*kNumBodies);
hipMalloc(&host_Body_pos_y, sizeof(float)*kNumBodies);
hipMalloc(&host_Body_vel_x, sizeof(float)*kNumBodies);
hipMalloc(&host_Body_vel_y, sizeof(float)*kNumBodies);
hipMalloc(&host_Body_mass, sizeof(float)*kNumBodies);
hipMalloc(&host_Body_force_x, sizeof(float)*kNumBodies);
hipMalloc(&host_Body_force_y, sizeof(float)*kNumBodies);
#ifdef OPTION_RENDER
float Body_pos_x[kNumBodies];
float Body_pos_y[kNumBodies];
float Body_mass[kNumBodies];
#endif // OPTION_RENDER
auto time_start = std::chrono::system_clock::now();
hipLaunchKernelGGL(( kernel_initialize_bodies), dim3(128), dim3(128), 0, 0,
host_Body_pos_x, host_Body_pos_y,
host_Body_vel_x, host_Body_vel_y,
host_Body_mass, host_Body_force_x,
host_Body_force_y);
gpuErrchk(hipDeviceSynchronize());
for (int i = 0; i < kNumIterations; ++i) {
hipLaunchKernelGGL(( kernel_compute_force),
dim3((kNumBodies + kCudaBlockSize - 1)/kCudaBlockSize),
dim3(kCudaBlockSize), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( kernel_update),
dim3((kNumBodies + kCudaBlockSize - 1)/kCudaBlockSize),
dim3(kCudaBlockSize), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
#ifdef OPTION_RENDER
hipMemcpy(Body_pos_x, host_Body_pos_x, sizeof(float)*kNumBodies,
hipMemcpyDeviceToHost);
hipMemcpy(Body_pos_y, host_Body_pos_y, sizeof(float)*kNumBodies,
hipMemcpyDeviceToHost);
hipMemcpy(Body_mass, host_Body_mass, sizeof(float)*kNumBodies,
hipMemcpyDeviceToHost);
init_frame();
for (int i = 0; i < kNumBodies; ++i) {
draw_body(Body_pos_x[i], Body_pos_y[i], Body_mass[i]);
}
show_frame();
#endif // OPTION_RENDER
}
auto time_end = std::chrono::system_clock::now();
auto elapsed = time_end - time_start;
auto micros = std::chrono::duration_cast<std::chrono::microseconds>(elapsed)
.count();
printf("%lu\n", micros);
#ifndef NDEBUG
hipLaunchKernelGGL(( kernel_compute_checksum), dim3(1), dim3(1), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
float checksum;
hipMemcpyFromSymbol(&checksum, device_checksum, sizeof(device_checksum), 0,
hipMemcpyDeviceToHost);
printf("Checksum: %f\n", checksum);
#endif // NDEBUG
hipFree(host_Body_pos_x);
hipFree(host_Body_pos_y);
hipFree(host_Body_vel_x);
hipFree(host_Body_vel_y);
hipFree(host_Body_mass);
hipFree(host_Body_force_x);
hipFree(host_Body_force_y);
#ifdef OPTION_RENDER
close_renderer();
#endif // OPTION_RENDER
return 0;
}
| 977ed52908b3518ed86dd0320ffc752995aa25cf.cu | #include <chrono>
#include <curand_kernel.h>
#include <stdio.h>
#include "../configuration.h"
#ifdef OPTION_RENDER
#include "../rendering.h"
#endif // OPTION_RENDER
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
static const int kCudaBlockSize = 256;
// Arrays containing all Body objects on device.
__device__ float* dev_Body_pos_x;
__device__ float* dev_Body_pos_y;
__device__ float* dev_Body_vel_x;
__device__ float* dev_Body_vel_y;
__device__ float* dev_Body_mass;
__device__ float* dev_Body_force_x;
__device__ float* dev_Body_force_y;
__device__ float device_checksum;
__device__ void new_Body(int id, float pos_x, float pos_y,
float vel_x, float vel_y, float mass) {
dev_Body_pos_x[id] = pos_x;
dev_Body_pos_y[id] = pos_y;
dev_Body_vel_x[id] = vel_x;
dev_Body_vel_y[id] = vel_y;
dev_Body_mass[id] = mass;
}
__device__ void Body_compute_force(int id) {
dev_Body_force_x[id] = 0.0f;
dev_Body_force_y[id] = 0.0f;
for (int i = 0; i < kNumBodies; ++i) {
// Do not compute force with the body itself.
if (id != i) {
float dx = dev_Body_pos_x[i] - dev_Body_pos_x[id];
float dy = dev_Body_pos_y[i] - dev_Body_pos_y[id];
float dist = sqrt(dx*dx + dy*dy);
float F = kGravityConstant * dev_Body_mass[id] * dev_Body_mass[i]
/ (dist * dist + kDampeningFactor);
dev_Body_force_x[id] += F*dx / dist;
dev_Body_force_y[id] += F*dy / dist;
}
}
}
__device__ void Body_update(int id) {
dev_Body_vel_x[id] += dev_Body_force_x[id]*kDt / dev_Body_mass[id];
dev_Body_vel_y[id] += dev_Body_force_y[id]*kDt / dev_Body_mass[id];
dev_Body_pos_x[id] += dev_Body_vel_x[id]*kDt;
dev_Body_pos_y[id] += dev_Body_vel_y[id]*kDt;
if (dev_Body_pos_x[id] < -1 || dev_Body_pos_x[id] > 1) {
dev_Body_vel_x[id] = -dev_Body_vel_x[id];
}
if (dev_Body_pos_y[id] < -1 || dev_Body_pos_y[id] > 1) {
dev_Body_vel_y[id] = -dev_Body_vel_y[id];
}
}
__device__ void Body_add_checksum(int id) {
atomicAdd(&device_checksum, dev_Body_pos_x[id] + dev_Body_pos_y[id]*2
+ dev_Body_vel_x[id]*3 + dev_Body_vel_y[id]*4);
}
__global__ void kernel_initialize_bodies(float* pos_x, float* pos_y,
float* vel_x, float* vel_y,
float* mass, float* force_x,
float* force_y) {
dev_Body_pos_x = pos_x;
dev_Body_pos_y = pos_y;
dev_Body_vel_x = vel_x;
dev_Body_vel_y = vel_y;
dev_Body_mass = mass;
dev_Body_force_x = force_x;
dev_Body_force_y = force_y;
for (int i = threadIdx.x + blockDim.x * blockIdx.x;
i < kNumBodies; i += blockDim.x * gridDim.x) {
// Initialize random state.
curandState rand_state;
curand_init(kSeed, i, 0, &rand_state);
// Create new Body object.
new_Body(/*id=*/ i,
/*pos_x=*/ 2 * curand_uniform(&rand_state) - 1,
/*pos_y=*/ 2 * curand_uniform(&rand_state) - 1,
/*vel_x=*/ (curand_uniform(&rand_state) - 0.5) / 1000,
/*vel_y=*/ (curand_uniform(&rand_state) - 0.5) / 1000,
/*mass=*/ (curand_uniform(&rand_state)/2 + 0.5) * kMaxMass);
}
}
__global__ void kernel_compute_force() {
for (int i = threadIdx.x + blockDim.x * blockIdx.x;
i < kNumBodies; i += blockDim.x * gridDim.x) {
Body_compute_force(i);
}
}
__global__ void kernel_update() {
for (int i = threadIdx.x + blockDim.x * blockIdx.x;
i < kNumBodies; i += blockDim.x * gridDim.x) {
Body_update(i);
}
}
__global__ void kernel_compute_checksum() {
device_checksum = 0.0f;
for (int i = 0; i < kNumBodies; ++i) {
Body_add_checksum(i);
}
}
int main(int /*argc*/, char** /*argv*/) {
#ifdef OPTION_RENDER
init_renderer();
#endif // OPTION_RENDER
float* host_Body_pos_x;
float* host_Body_pos_y;
float* host_Body_vel_x;
float* host_Body_vel_y;
float* host_Body_mass;
float* host_Body_force_x;
float* host_Body_force_y;
// Allocate and create Body objects.
cudaMalloc(&host_Body_pos_x, sizeof(float)*kNumBodies);
cudaMalloc(&host_Body_pos_y, sizeof(float)*kNumBodies);
cudaMalloc(&host_Body_vel_x, sizeof(float)*kNumBodies);
cudaMalloc(&host_Body_vel_y, sizeof(float)*kNumBodies);
cudaMalloc(&host_Body_mass, sizeof(float)*kNumBodies);
cudaMalloc(&host_Body_force_x, sizeof(float)*kNumBodies);
cudaMalloc(&host_Body_force_y, sizeof(float)*kNumBodies);
#ifdef OPTION_RENDER
float Body_pos_x[kNumBodies];
float Body_pos_y[kNumBodies];
float Body_mass[kNumBodies];
#endif // OPTION_RENDER
auto time_start = std::chrono::system_clock::now();
kernel_initialize_bodies<<<128, 128>>>(
host_Body_pos_x, host_Body_pos_y,
host_Body_vel_x, host_Body_vel_y,
host_Body_mass, host_Body_force_x,
host_Body_force_y);
gpuErrchk(cudaDeviceSynchronize());
for (int i = 0; i < kNumIterations; ++i) {
kernel_compute_force<<<
(kNumBodies + kCudaBlockSize - 1)/kCudaBlockSize,
kCudaBlockSize>>>();
gpuErrchk(cudaDeviceSynchronize());
kernel_update<<<
(kNumBodies + kCudaBlockSize - 1)/kCudaBlockSize,
kCudaBlockSize>>>();
gpuErrchk(cudaDeviceSynchronize());
#ifdef OPTION_RENDER
cudaMemcpy(Body_pos_x, host_Body_pos_x, sizeof(float)*kNumBodies,
cudaMemcpyDeviceToHost);
cudaMemcpy(Body_pos_y, host_Body_pos_y, sizeof(float)*kNumBodies,
cudaMemcpyDeviceToHost);
cudaMemcpy(Body_mass, host_Body_mass, sizeof(float)*kNumBodies,
cudaMemcpyDeviceToHost);
init_frame();
for (int i = 0; i < kNumBodies; ++i) {
draw_body(Body_pos_x[i], Body_pos_y[i], Body_mass[i]);
}
show_frame();
#endif // OPTION_RENDER
}
auto time_end = std::chrono::system_clock::now();
auto elapsed = time_end - time_start;
auto micros = std::chrono::duration_cast<std::chrono::microseconds>(elapsed)
.count();
printf("%lu\n", micros);
#ifndef NDEBUG
kernel_compute_checksum<<<1, 1>>>();
gpuErrchk(cudaDeviceSynchronize());
float checksum;
cudaMemcpyFromSymbol(&checksum, device_checksum, sizeof(device_checksum), 0,
cudaMemcpyDeviceToHost);
printf("Checksum: %f\n", checksum);
#endif // NDEBUG
cudaFree(host_Body_pos_x);
cudaFree(host_Body_pos_y);
cudaFree(host_Body_vel_x);
cudaFree(host_Body_vel_y);
cudaFree(host_Body_mass);
cudaFree(host_Body_force_x);
cudaFree(host_Body_force_y);
#ifdef OPTION_RENDER
close_renderer();
#endif // OPTION_RENDER
return 0;
}
|
672878313c6d20fa789b564c5f73d51da19a1b8e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* _reg_mutualinformation_gpu.cu
*
*
* Created by Marc Modat on 24/03/2009.
* Copyright (c) 2009, University College London. All rights reserved.
* Centre for Medical Image Computing (CMIC)
* See the LICENSE.txt file in the nifty_reg root folder
*
*/
#ifndef _REG_MUTUALINFORMATION_GPU_CU
#define _REG_MUTUALINFORMATION_GPU_CU
#include "_reg_blocksize_gpu.h"
#include "_reg_mutualinformation_gpu.h"
#include "_reg_mutualinformation_gpu_kernels.cu"
void reg_getVoxelBasedNMIGradientUsingPW_gpu( nifti_image *targetImage,
nifti_image *resultImage,
float **targetImageArray_d,
float **resultImageArray_d,
float4 **resultGradientArray_d,
float **logJointHistogram_d,
float4 **voxelNMIGradientArray_d,
int **mask_d,
int activeVoxelNumber,
double *entropies,
int binning)
{
const int voxelNumber = targetImage->nvox;
const int binNumber = binning*(binning+2);
const float4 entropies_h=make_float4(entropies[0],entropies[1],entropies[2],entropies[3]);
const float NMI = (entropies[0]+entropies[1])/entropies[2];
// Bind Symbols
CUDA_SAFE_CALL(hipMemcpyToSymbol(c_VoxelNumber,&voxelNumber,sizeof(int)));
CUDA_SAFE_CALL(hipMemcpyToSymbol(c_Binning,&binning,sizeof(int)));
CUDA_SAFE_CALL(hipMemcpyToSymbol(c_Entropies,&entropies_h,sizeof(float4)));
CUDA_SAFE_CALL(hipMemcpyToSymbol(c_NMI,&NMI,sizeof(float)));
CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ActiveVoxelNumber,&activeVoxelNumber,sizeof(int)));
// Texture binding
CUDA_SAFE_CALL(hipBindTexture(0, targetImageTexture, *targetImageArray_d, voxelNumber*sizeof(float)));
CUDA_SAFE_CALL(hipBindTexture(0, resultImageTexture, *resultImageArray_d, voxelNumber*sizeof(float)));
CUDA_SAFE_CALL(hipBindTexture(0, resultImageGradientTexture, *resultGradientArray_d, voxelNumber*sizeof(float4)));
CUDA_SAFE_CALL(hipBindTexture(0, histogramTexture, *logJointHistogram_d, binNumber*sizeof(float)));
CUDA_SAFE_CALL(hipBindTexture(0, maskTexture, *mask_d, activeVoxelNumber*sizeof(int)));
CUDA_SAFE_CALL(hipMemset(*voxelNMIGradientArray_d, 0, voxelNumber*sizeof(float4)));
const unsigned int Grid_reg_getVoxelBasedNMIGradientUsingPW =
(unsigned int)ceil((float)activeVoxelNumber/(float)Block_reg_getVoxelBasedNMIGradientUsingPW);
dim3 B1(Block_reg_getVoxelBasedNMIGradientUsingPW,1,1);
dim3 G1(Grid_reg_getVoxelBasedNMIGradientUsingPW,1,1);
hipLaunchKernelGGL(( reg_getVoxelBasedNMIGradientUsingPW_kernel) , dim3(G1), dim3(B1) , 0, 0, *voxelNMIGradientArray_d);
CUDA_SAFE_CALL(hipDeviceSynchronize());
#if _VERBOSE
printf("[VERBOSE] reg_getVoxelBasedNMIGradientUsingPW_kernel: %s - Grid size [%i %i %i] - Block size [%i %i %i]\n",
hipGetErrorString(hipGetLastError()),G1.x,G1.y,G1.z,B1.x,B1.y,B1.z);
#endif
}
#endif
| 672878313c6d20fa789b564c5f73d51da19a1b8e.cu | /*
* _reg_mutualinformation_gpu.cu
*
*
* Created by Marc Modat on 24/03/2009.
* Copyright (c) 2009, University College London. All rights reserved.
* Centre for Medical Image Computing (CMIC)
* See the LICENSE.txt file in the nifty_reg root folder
*
*/
#ifndef _REG_MUTUALINFORMATION_GPU_CU
#define _REG_MUTUALINFORMATION_GPU_CU
#include "_reg_blocksize_gpu.h"
#include "_reg_mutualinformation_gpu.h"
#include "_reg_mutualinformation_gpu_kernels.cu"
void reg_getVoxelBasedNMIGradientUsingPW_gpu( nifti_image *targetImage,
nifti_image *resultImage,
float **targetImageArray_d,
float **resultImageArray_d,
float4 **resultGradientArray_d,
float **logJointHistogram_d,
float4 **voxelNMIGradientArray_d,
int **mask_d,
int activeVoxelNumber,
double *entropies,
int binning)
{
const int voxelNumber = targetImage->nvox;
const int binNumber = binning*(binning+2);
const float4 entropies_h=make_float4(entropies[0],entropies[1],entropies[2],entropies[3]);
const float NMI = (entropies[0]+entropies[1])/entropies[2];
// Bind Symbols
CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_VoxelNumber,&voxelNumber,sizeof(int)));
CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_Binning,&binning,sizeof(int)));
CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_Entropies,&entropies_h,sizeof(float4)));
CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_NMI,&NMI,sizeof(float)));
CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ActiveVoxelNumber,&activeVoxelNumber,sizeof(int)));
// Texture binding
CUDA_SAFE_CALL(cudaBindTexture(0, targetImageTexture, *targetImageArray_d, voxelNumber*sizeof(float)));
CUDA_SAFE_CALL(cudaBindTexture(0, resultImageTexture, *resultImageArray_d, voxelNumber*sizeof(float)));
CUDA_SAFE_CALL(cudaBindTexture(0, resultImageGradientTexture, *resultGradientArray_d, voxelNumber*sizeof(float4)));
CUDA_SAFE_CALL(cudaBindTexture(0, histogramTexture, *logJointHistogram_d, binNumber*sizeof(float)));
CUDA_SAFE_CALL(cudaBindTexture(0, maskTexture, *mask_d, activeVoxelNumber*sizeof(int)));
CUDA_SAFE_CALL(cudaMemset(*voxelNMIGradientArray_d, 0, voxelNumber*sizeof(float4)));
const unsigned int Grid_reg_getVoxelBasedNMIGradientUsingPW =
(unsigned int)ceil((float)activeVoxelNumber/(float)Block_reg_getVoxelBasedNMIGradientUsingPW);
dim3 B1(Block_reg_getVoxelBasedNMIGradientUsingPW,1,1);
dim3 G1(Grid_reg_getVoxelBasedNMIGradientUsingPW,1,1);
reg_getVoxelBasedNMIGradientUsingPW_kernel <<< G1, B1 >>> (*voxelNMIGradientArray_d);
CUDA_SAFE_CALL(cudaThreadSynchronize());
#if _VERBOSE
printf("[VERBOSE] reg_getVoxelBasedNMIGradientUsingPW_kernel: %s - Grid size [%i %i %i] - Block size [%i %i %i]\n",
cudaGetErrorString(cudaGetLastError()),G1.x,G1.y,G1.z,B1.x,B1.y,B1.z);
#endif
}
#endif
|
c0c3ce07432ab1ffb877c3c06396f09cbcce729d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation.
// SPDX-License-Identifier: Apache-2.0
// DeepSpeed Team
#include "ds_kernel_utils.h"
#include "memory_access_utils.h"
#include "quantization.h"
#include "quantization_utils.h"
#include "reduction_utils.h"
namespace cg = cooperative_groups;
/*
Pure quantization kernel with no fusion.
*/
template <int q_bits,
quantize::Type quant_type,
int UNROLL,
int internal_unroll,
int threads_per_group,
int max_threads>
__global__ void cached_quantization(int8_t* __restrict__ output_data,
float* __restrict__ params,
const __half* __restrict__ input_data,
int groups,
int elems_per_group)
{
cg::thread_block tb = cg::this_thread_block();
cg::thread_block_tile<hw_warp_size> warp = cg::tiled_partition<hw_warp_size>(tb);
// Indexing offsets
const int block_offset =
(tb.group_index().x * (max_threads / threads_per_group) * elems_per_group) +
(tb.thread_index().y * elems_per_group);
const int elem_offset = tb.thread_index().x * quantize::h_per_load;
const int base_offset = block_offset + elem_offset;
const int stride = tb.size() * quantize::h_per_load;
const __half* input_base = input_data + base_offset; //..
__half2 local_buffer[UNROLL * internal_unroll * quantize::h2_per_load];
#pragma unroll
for (int i = 0; i < UNROLL; i++) {
// Convenience helper, should resolve to register indices and not realize.
__half2* iteration_buffer = local_buffer + i * internal_unroll * quantize::h2_per_load;
#pragma unroll
for (int j = 0; j < internal_unroll; j++) {
const int iteration = i * internal_unroll + j;
mem_access::load_global<quantize::granularity>(
iteration_buffer + j * quantize::h2_per_load,
input_base + iteration * stride,
elem_offset + iteration * stride < elems_per_group);
}
}
quantize::
local_array<quant_type, q_bits, UNROLL * internal_unroll, threads_per_group, max_threads>(
local_buffer, params, output_data, elems_per_group, groups);
}
/********* Launcher methods ***********/
#define LAUNCH_CACHED_QUANT_CALL(q_bits, quant_type) \
hipLaunchKernelGGL(( cached_quantization<q_bits, \
quant_type, \
unroll_factor, \
internal_unroll_l, \
threads_per_group, \
max_threads>) \
, dim3(grid), dim3(block), 0, stream, output_data, params, input_data, groups, elems_per_group);
#define LAUNCH_CACHED_QUANT( \
q_bits, quant_type, unroll_factor_in, internal_unroll_in, threads_per_group_in) \
const int unroll_factor = unroll_factor_in; \
const int internal_unroll_l = internal_unroll_in; \
const int threads_per_group = threads_per_group_in; \
if (q_bits == 4) { \
if (quant_type == quantize::Type::Asymmetric) { \
LAUNCH_CACHED_QUANT_CALL(4, quantize::Type::Asymmetric) \
} else { \
LAUNCH_CACHED_QUANT_CALL(4, quantize::Type::Symmetric) \
} \
} else { \
if (quant_type == quantize::Type::Asymmetric) { \
LAUNCH_CACHED_QUANT_CALL(8, quantize::Type::Asymmetric) \
} else { \
LAUNCH_CACHED_QUANT_CALL(8, quantize::Type::Symmetric) \
} \
}
void launch_quant(int8_t* output_data,
float* params,
const __half* input_data,
const int groups,
const int elems_per_group,
const int num_bits,
const quantize::Type quant_type,
hipStream_t stream)
{
constexpr int max_threads = 256;
constexpr int internal_unroll = 2;
const bool is_subblock_schedule = (elems_per_group <= 128) ? true : false;
const int h_per_step = is_subblock_schedule ? quantize::h_per_load
: quantize::h_per_load * internal_unroll;
// Scheduling concern: may be slightly faster for some inputs to assign multiple stages of
// warp-sized blocks rather than stepping up to 64/96 threads
const int one_step_threads = next_pow2((elems_per_group + h_per_step - 1) / h_per_step);
const int threads_per_group = (one_step_threads < max_threads) ? one_step_threads : max_threads;
const int groups_per_block =
is_subblock_schedule ? (max_threads + threads_per_group - 1) / threads_per_group : 1;
const int groups_launch = (groups_per_block + groups - 1) / groups_per_block;
dim3 block(threads_per_group, groups_per_block);
dim3 grid(groups_launch);
const int elems_per_step = threads_per_group * h_per_step;
const int external_unroll = (elems_per_group + elems_per_step - 1) / elems_per_step;
if (is_subblock_schedule) {
// <=128
if (threads_per_group == 1) {
LAUNCH_CACHED_QUANT(num_bits, quant_type, 1, 1, 1);
} else if (threads_per_group == 2) {
LAUNCH_CACHED_QUANT(num_bits, quant_type, 1, 1, 2);
} else if (threads_per_group == 4) {
LAUNCH_CACHED_QUANT(num_bits, quant_type, 1, 1, 4);
} else if (threads_per_group == 8) {
LAUNCH_CACHED_QUANT(num_bits, quant_type, 1, 1, 8);
} else if (threads_per_group == 16) {
LAUNCH_CACHED_QUANT(num_bits, quant_type, 1, 1, 16);
}
} else if (external_unroll == 1) {
// 129 - 4096 elems
// (this can launch with 1-7 warps as well)
LAUNCH_CACHED_QUANT(num_bits, quant_type, 1, internal_unroll, max_threads);
} else if (external_unroll == 2) {
// 4097 - 8192 elems
LAUNCH_CACHED_QUANT(num_bits, quant_type, 2, internal_unroll, max_threads);
} else if (external_unroll == 3) {
// 8193 - 12288 elems
LAUNCH_CACHED_QUANT(num_bits, quant_type, 3, internal_unroll, max_threads);
} else if (external_unroll == 4) {
// 12289 - 16384 elems
LAUNCH_CACHED_QUANT(num_bits, quant_type, 4, internal_unroll, max_threads);
}
}
| c0c3ce07432ab1ffb877c3c06396f09cbcce729d.cu | // Copyright (c) Microsoft Corporation.
// SPDX-License-Identifier: Apache-2.0
// DeepSpeed Team
#include "ds_kernel_utils.h"
#include "memory_access_utils.h"
#include "quantization.h"
#include "quantization_utils.h"
#include "reduction_utils.h"
namespace cg = cooperative_groups;
/*
Pure quantization kernel with no fusion.
*/
template <int q_bits,
quantize::Type quant_type,
int UNROLL,
int internal_unroll,
int threads_per_group,
int max_threads>
__global__ void cached_quantization(int8_t* __restrict__ output_data,
float* __restrict__ params,
const __half* __restrict__ input_data,
int groups,
int elems_per_group)
{
cg::thread_block tb = cg::this_thread_block();
cg::thread_block_tile<hw_warp_size> warp = cg::tiled_partition<hw_warp_size>(tb);
// Indexing offsets
const int block_offset =
(tb.group_index().x * (max_threads / threads_per_group) * elems_per_group) +
(tb.thread_index().y * elems_per_group);
const int elem_offset = tb.thread_index().x * quantize::h_per_load;
const int base_offset = block_offset + elem_offset;
const int stride = tb.size() * quantize::h_per_load;
const __half* input_base = input_data + base_offset; //..
__half2 local_buffer[UNROLL * internal_unroll * quantize::h2_per_load];
#pragma unroll
for (int i = 0; i < UNROLL; i++) {
// Convenience helper, should resolve to register indices and not realize.
__half2* iteration_buffer = local_buffer + i * internal_unroll * quantize::h2_per_load;
#pragma unroll
for (int j = 0; j < internal_unroll; j++) {
const int iteration = i * internal_unroll + j;
mem_access::load_global<quantize::granularity>(
iteration_buffer + j * quantize::h2_per_load,
input_base + iteration * stride,
elem_offset + iteration * stride < elems_per_group);
}
}
quantize::
local_array<quant_type, q_bits, UNROLL * internal_unroll, threads_per_group, max_threads>(
local_buffer, params, output_data, elems_per_group, groups);
}
/********* Launcher methods ***********/
#define LAUNCH_CACHED_QUANT_CALL(q_bits, quant_type) \
cached_quantization<q_bits, \
quant_type, \
unroll_factor, \
internal_unroll_l, \
threads_per_group, \
max_threads> \
<<<grid, block, 0, stream>>>(output_data, params, input_data, groups, elems_per_group);
#define LAUNCH_CACHED_QUANT( \
q_bits, quant_type, unroll_factor_in, internal_unroll_in, threads_per_group_in) \
const int unroll_factor = unroll_factor_in; \
const int internal_unroll_l = internal_unroll_in; \
const int threads_per_group = threads_per_group_in; \
if (q_bits == 4) { \
if (quant_type == quantize::Type::Asymmetric) { \
LAUNCH_CACHED_QUANT_CALL(4, quantize::Type::Asymmetric) \
} else { \
LAUNCH_CACHED_QUANT_CALL(4, quantize::Type::Symmetric) \
} \
} else { \
if (quant_type == quantize::Type::Asymmetric) { \
LAUNCH_CACHED_QUANT_CALL(8, quantize::Type::Asymmetric) \
} else { \
LAUNCH_CACHED_QUANT_CALL(8, quantize::Type::Symmetric) \
} \
}
void launch_quant(int8_t* output_data,
float* params,
const __half* input_data,
const int groups,
const int elems_per_group,
const int num_bits,
const quantize::Type quant_type,
cudaStream_t stream)
{
constexpr int max_threads = 256;
constexpr int internal_unroll = 2;
const bool is_subblock_schedule = (elems_per_group <= 128) ? true : false;
const int h_per_step = is_subblock_schedule ? quantize::h_per_load
: quantize::h_per_load * internal_unroll;
// Scheduling concern: may be slightly faster for some inputs to assign multiple stages of
// warp-sized blocks rather than stepping up to 64/96 threads
const int one_step_threads = next_pow2((elems_per_group + h_per_step - 1) / h_per_step);
const int threads_per_group = (one_step_threads < max_threads) ? one_step_threads : max_threads;
const int groups_per_block =
is_subblock_schedule ? (max_threads + threads_per_group - 1) / threads_per_group : 1;
const int groups_launch = (groups_per_block + groups - 1) / groups_per_block;
dim3 block(threads_per_group, groups_per_block);
dim3 grid(groups_launch);
const int elems_per_step = threads_per_group * h_per_step;
const int external_unroll = (elems_per_group + elems_per_step - 1) / elems_per_step;
if (is_subblock_schedule) {
// <=128
if (threads_per_group == 1) {
LAUNCH_CACHED_QUANT(num_bits, quant_type, 1, 1, 1);
} else if (threads_per_group == 2) {
LAUNCH_CACHED_QUANT(num_bits, quant_type, 1, 1, 2);
} else if (threads_per_group == 4) {
LAUNCH_CACHED_QUANT(num_bits, quant_type, 1, 1, 4);
} else if (threads_per_group == 8) {
LAUNCH_CACHED_QUANT(num_bits, quant_type, 1, 1, 8);
} else if (threads_per_group == 16) {
LAUNCH_CACHED_QUANT(num_bits, quant_type, 1, 1, 16);
}
} else if (external_unroll == 1) {
// 129 - 4096 elems
// (this can launch with 1-7 warps as well)
LAUNCH_CACHED_QUANT(num_bits, quant_type, 1, internal_unroll, max_threads);
} else if (external_unroll == 2) {
// 4097 - 8192 elems
LAUNCH_CACHED_QUANT(num_bits, quant_type, 2, internal_unroll, max_threads);
} else if (external_unroll == 3) {
// 8193 - 12288 elems
LAUNCH_CACHED_QUANT(num_bits, quant_type, 3, internal_unroll, max_threads);
} else if (external_unroll == 4) {
// 12289 - 16384 elems
LAUNCH_CACHED_QUANT(num_bits, quant_type, 4, internal_unroll, max_threads);
}
}
|
905e4f88c97a2d2c7a08576368afdb8d5f4404d1.hip | // !!! This is a file automatically generated by hipify!!!
//===============================================================================================================================================================================================================200
// INCLUDE/DEFINE
//===============================================================================================================================================================================================================200
#include "hip/hip_runtime.h" // (in library path specified to compiler)
//===============================================================================================================================================================================================================200
// SET DEVICE FUNCTION
//===============================================================================================================================================================================================================200
void setdevice(void)
{
// variables
int num_devices;
int device;
// work
hipGetDeviceCount(&num_devices);
if (num_devices > 1) {
// variables
int max_multiprocessors;
int max_device;
hipDeviceProp_t properties;
// initialize variables
max_multiprocessors = 0;
max_device = 0;
for (device = 0; device < num_devices; device++) {
hipGetDeviceProperties(&properties, device);
if (max_multiprocessors < properties.multiProcessorCount) {
max_multiprocessors = properties.multiProcessorCount;
max_device = device;
}
}
hipSetDevice(max_device);
}
}
//===============================================================================================================================================================================================================200
// GET LAST ERROR FUNCTION
//===============================================================================================================================================================================================================200
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err) {
// fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
printf("Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
fflush(NULL);
exit(EXIT_FAILURE);
}
}
//===============================================================================================================================================================================================================200
// END
//===============================================================================================================================================================================================================200
| 905e4f88c97a2d2c7a08576368afdb8d5f4404d1.cu | //===============================================================================================================================================================================================================200
// INCLUDE/DEFINE
//===============================================================================================================================================================================================================200
#include "cuda.h" // (in library path specified to compiler)
//===============================================================================================================================================================================================================200
// SET DEVICE FUNCTION
//===============================================================================================================================================================================================================200
void setdevice(void)
{
// variables
int num_devices;
int device;
// work
cudaGetDeviceCount(&num_devices);
if (num_devices > 1) {
// variables
int max_multiprocessors;
int max_device;
cudaDeviceProp properties;
// initialize variables
max_multiprocessors = 0;
max_device = 0;
for (device = 0; device < num_devices; device++) {
cudaGetDeviceProperties(&properties, device);
if (max_multiprocessors < properties.multiProcessorCount) {
max_multiprocessors = properties.multiProcessorCount;
max_device = device;
}
}
cudaSetDevice(max_device);
}
}
//===============================================================================================================================================================================================================200
// GET LAST ERROR FUNCTION
//===============================================================================================================================================================================================================200
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
// fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
printf("Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
fflush(NULL);
exit(EXIT_FAILURE);
}
}
//===============================================================================================================================================================================================================200
// END
//===============================================================================================================================================================================================================200
|
3a19d3cd0b4f20a7b2f7989ca29224b6ecd22c7c.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <stdlib.h>
#include <math.h>
#include <stdio.h>
#define IDX(w, t, n_walkers) ((w) + ((t)*(n_walkers)))
/***************************************************************/
__global__ void init_curand_states(int seed,
unsigned int size, hiprandState_t *states);
__global__ void init_walkers(float *walkers, unsigned int
n_walkers, unsigned int n_theta, unsigned int r,
hiprandState_t *states);
__global__ void step_walkers(float *walkers, unsigned int
n_walkers, unsigned int s1_sz, unsigned int
offset, unsigned int n_theta, float a,
hiprandState_t *states);
void walkers_to_file(float* walkers, unsigned int n_walkers,
unsigned int n_theta, const char *f_name);
void get_means(float *walkers, double *means, unsigned int n_walkers,
unsigned int n_theta, int step);
void means_to_file(double* means, unsigned int steps,
unsigned int n_theta, const char *f_name);
/***************************************************************/
unsigned int get_block_size(unsigned int n_walkers)
{
unsigned int factor = ceil((double)n_walkers / 4800);
unsigned int blocksize = factor*32;
if(blocksize > 256)
{
blocksize = 256;
}
return blocksize;
}
int main(int argc, char*argv[])
{
hiprandState_t *states;
float *walkers_d;
double *means;
int seed = 10;
int a = 2;
int r = 2;
if(argc !=4)
{
fprintf(stderr, "usage emcee_emcee_gpu "
"n_walkers, n_theta, steps\n");
fprintf(stderr, "n_walkers: number of walkers "
"to use in ensemble\n");
fprintf(stderr, "n_theta: the dimension of the "
"probability space to sample "
"from \n");
fprintf(stderr, "steps: number of steps each "
"walker will take in the "
"simulation\n");
return 1;
}
unsigned int n_walkers = atoi(argv[1]);
unsigned int n_theta = atoi(argv[2]);
unsigned int steps = atoi(argv[3]);
unsigned int s1_sz = ceil((float) n_walkers / 2);
unsigned int s2_sz = n_walkers - s1_sz;
unsigned int block_sz = get_block_size(n_walkers);
unsigned int n_blocks = ceil((float) n_walkers
/ block_sz);
long states_byte_sz = n_walkers*sizeof(hiprandState_t);
long walker_byte_sz = n_walkers*n_theta*sizeof(float);
unsigned int s_mem_sz = 2*n_theta*sizeof(float);
long means_sz = n_theta*steps*sizeof(double);
means = (double*) malloc(means_sz);
fprintf(stdout,"LAUNCHING %d BLOCKS OF SIZE %d\n",
n_blocks, block_sz);
// allocate and init individual random number seeds
hipMalloc((void**) &states, states_byte_sz);
hipLaunchKernelGGL(( init_curand_states), dim3(2*n_blocks),dim3(block_sz), 0, 0,
seed, n_walkers, states);
// allocate and init each walker.
hipMalloc((void**) &walkers_d, walker_byte_sz);
hipLaunchKernelGGL(( init_walkers), dim3(2*n_blocks),dim3(block_sz), 0, 0, walkers_d, n_walkers,
n_theta, r, states);
for(int s = 0; s < steps; s++)
{
//step with first half of walkers
hipLaunchKernelGGL(( step_walkers), dim3(n_blocks), dim3(block_sz), s_mem_sz, 0,
walkers_d, n_walkers, s1_sz, 0,
n_theta, a, states);
//step with second half of walkers
hipLaunchKernelGGL(( step_walkers), dim3(n_blocks), dim3(block_sz), s_mem_sz, 0,
walkers_d, n_walkers, s2_sz, s1_sz,
n_theta, a, states);
get_means(walkers_d, means, n_walkers,
n_theta, s);
}
const char* f_means = "means_gpu.out";
means_to_file(means, steps, n_theta, f_means);
return 0;
}
/***************************************************************/
__global__ void init_curand_states(int seed,
unsigned int size, hiprandState_t *states)
{
int id = threadIdx.x + blockIdx.x*blockDim.x;
if(id < size)
{
hiprand_init(seed, id, 0, &states[id]);
}
}
/***************************************************************/
__global__ void init_walkers(float *walkers, unsigned int
n_walkers, unsigned int n_theta, unsigned int r,
hiprandState_t *states)
{
int id = threadIdx.x + blockIdx.x*blockDim.x;
if(id < n_walkers)
{
for(int t = 0; t < n_theta; t++)
{
walkers[IDX(id,t,n_walkers)] =
2*r*(hiprand_uniform(&states[id])
-.5);
}
}
}
/***************************************************************/
/* this is inverse CDF of the proposal distribution suggested
in Weare and Goodman 2010.
Parameter a is scaling value nominally set to 2.0.
Parameter u is a random uniform drawn from [0, 1].
The return value is random draw from the proposal
distribution.
*/
__device__ float G(float a, float u)
{
return powf((u*(a-1)+1) / sqrtf(a), 2);
}
/***************************************************************/
/* The Rosenbrock distribution is the test distribution we
wish to approximate expected values from.
See https://en.wikipedia.org/wiki/Rosenbrock_function for
details.
*/
__device__ double Rosenbrock(float *walker)
{
return ((double) exp(-((100*pow(walker[1]
- pow(walker[0],2),2)) +
pow(1 - walker[0],2))
/ 20));
}
/***************************************************************/
__global__ void step_walkers(float *walkers, unsigned int
n_walkers, unsigned int s1_sz, unsigned int
offset, unsigned int n_theta, float a,
hiprandState_t *states)
{
int id = threadIdx.x + (blockIdx.x * blockDim.x);
if(id < s1_sz)
{
extern __shared__ float w1[];
float *w1_prime = &w1[n_theta];
hiprandState_t localState = states[id];
int w1_idx = id + offset;
int w2_idx = s1_sz + ceil((n_walkers - s1_sz) *
hiprand_uniform(&localState)) -
1 - offset;
float z = G(a,hiprand_uniform(&localState));
for(int t=0; t<n_theta; t++)
{
w1[t] = walkers[IDX(w1_idx, t, n_walkers)];
w1_prime[t] = walkers[IDX(w2_idx, t, n_walkers)]
+ z*(w1[t] - walkers[IDX(w2_idx,
t, n_walkers)]);
}
if (hiprand_uniform(&localState) <
(powf(z,n_theta-1)*(
Rosenbrock(w1_prime) /
Rosenbrock(w1))
))
{
for(int t=0; t<n_theta; t++)
{
walkers[IDX(w1_idx, t, n_walkers)] =
w1_prime[t];
}
}
states[id] = localState;
}
}
/***************************************************************/
void walkers_to_file(float* walkers, unsigned int n_walkers,
unsigned int n_theta, const char *f_name)
{
FILE *fp = fopen(f_name,"w");
for(int w = 0; w < n_walkers; w++)
{
for(int t = 0; t < n_theta; t++){
fprintf(fp, "%f\t", walkers[
IDX(w,t,n_walkers)]);
}
fprintf(fp, "\n");
}
fclose(fp);
}
/***************************************************************/
void get_means(float *walkers, double *means, unsigned int n_walkers,
unsigned int n_theta, int step)
{
float *start_ind, *stop_ind;
for(int t =0; t < n_theta; t++)
{
start_ind = walkers + t*n_walkers;
stop_ind = walkers + (t+1)*n_walkers;
thrust::device_vector<float> vec(
start_ind, stop_ind);
means[t + n_theta*step] = thrust::reduce(
vec.begin(), vec.end()) / n_walkers;
}
}
/***************************************************************/
void means_to_file(double* means, unsigned int steps,
unsigned int n_theta, const char *f_name)
{
FILE *fp = fopen(f_name,"w");
for(int s = 0; s < steps; s++)
{
for(int t = 0; t < n_theta; t++){
fprintf(fp, "%f\t", means[t + n_theta*s]);
}
fprintf(fp, "\n");
}
fclose(fp);
}
| 3a19d3cd0b4f20a7b2f7989ca29224b6ecd22c7c.cu | #include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <stdlib.h>
#include <math.h>
#include <stdio.h>
#define IDX(w, t, n_walkers) ((w) + ((t)*(n_walkers)))
/***************************************************************/
__global__ void init_curand_states(int seed,
unsigned int size, curandState_t *states);
__global__ void init_walkers(float *walkers, unsigned int
n_walkers, unsigned int n_theta, unsigned int r,
curandState_t *states);
__global__ void step_walkers(float *walkers, unsigned int
n_walkers, unsigned int s1_sz, unsigned int
offset, unsigned int n_theta, float a,
curandState_t *states);
void walkers_to_file(float* walkers, unsigned int n_walkers,
unsigned int n_theta, const char *f_name);
void get_means(float *walkers, double *means, unsigned int n_walkers,
unsigned int n_theta, int step);
void means_to_file(double* means, unsigned int steps,
unsigned int n_theta, const char *f_name);
/***************************************************************/
unsigned int get_block_size(unsigned int n_walkers)
{
unsigned int factor = ceil((double)n_walkers / 4800);
unsigned int blocksize = factor*32;
if(blocksize > 256)
{
blocksize = 256;
}
return blocksize;
}
int main(int argc, char*argv[])
{
curandState_t *states;
float *walkers_d;
double *means;
int seed = 10;
int a = 2;
int r = 2;
if(argc !=4)
{
fprintf(stderr, "usage emcee_emcee_gpu "
"n_walkers, n_theta, steps\n");
fprintf(stderr, "n_walkers: number of walkers "
"to use in ensemble\n");
fprintf(stderr, "n_theta: the dimension of the "
"probability space to sample "
"from \n");
fprintf(stderr, "steps: number of steps each "
"walker will take in the "
"simulation\n");
return 1;
}
unsigned int n_walkers = atoi(argv[1]);
unsigned int n_theta = atoi(argv[2]);
unsigned int steps = atoi(argv[3]);
unsigned int s1_sz = ceil((float) n_walkers / 2);
unsigned int s2_sz = n_walkers - s1_sz;
unsigned int block_sz = get_block_size(n_walkers);
unsigned int n_blocks = ceil((float) n_walkers
/ block_sz);
long states_byte_sz = n_walkers*sizeof(curandState_t);
long walker_byte_sz = n_walkers*n_theta*sizeof(float);
unsigned int s_mem_sz = 2*n_theta*sizeof(float);
long means_sz = n_theta*steps*sizeof(double);
means = (double*) malloc(means_sz);
fprintf(stdout,"LAUNCHING %d BLOCKS OF SIZE %d\n",
n_blocks, block_sz);
// allocate and init individual random number seeds
cudaMalloc((void**) &states, states_byte_sz);
init_curand_states<<<2*n_blocks,block_sz>>>(
seed, n_walkers, states);
// allocate and init each walker.
cudaMalloc((void**) &walkers_d, walker_byte_sz);
init_walkers<<<2*n_blocks,block_sz>>>(walkers_d, n_walkers,
n_theta, r, states);
for(int s = 0; s < steps; s++)
{
//step with first half of walkers
step_walkers<<<n_blocks, block_sz, s_mem_sz>>>(
walkers_d, n_walkers, s1_sz, 0,
n_theta, a, states);
//step with second half of walkers
step_walkers<<<n_blocks, block_sz, s_mem_sz>>>(
walkers_d, n_walkers, s2_sz, s1_sz,
n_theta, a, states);
get_means(walkers_d, means, n_walkers,
n_theta, s);
}
const char* f_means = "means_gpu.out";
means_to_file(means, steps, n_theta, f_means);
return 0;
}
/***************************************************************/
__global__ void init_curand_states(int seed,
unsigned int size, curandState_t *states)
{
int id = threadIdx.x + blockIdx.x*blockDim.x;
if(id < size)
{
curand_init(seed, id, 0, &states[id]);
}
}
/***************************************************************/
__global__ void init_walkers(float *walkers, unsigned int
n_walkers, unsigned int n_theta, unsigned int r,
curandState_t *states)
{
int id = threadIdx.x + blockIdx.x*blockDim.x;
if(id < n_walkers)
{
for(int t = 0; t < n_theta; t++)
{
walkers[IDX(id,t,n_walkers)] =
2*r*(curand_uniform(&states[id])
-.5);
}
}
}
/***************************************************************/
/* this is inverse CDF of the proposal distribution suggested
in Weare and Goodman 2010.
Parameter a is scaling value nominally set to 2.0.
Parameter u is a random uniform drawn from [0, 1].
The return value is random draw from the proposal
distribution.
*/
__device__ float G(float a, float u)
{
return powf((u*(a-1)+1) / sqrtf(a), 2);
}
/***************************************************************/
/* The Rosenbrock distribution is the test distribution we
wish to approximate expected values from.
See https://en.wikipedia.org/wiki/Rosenbrock_function for
details.
*/
__device__ double Rosenbrock(float *walker)
{
return ((double) exp(-((100*pow(walker[1]
- pow(walker[0],2),2)) +
pow(1 - walker[0],2))
/ 20));
}
/***************************************************************/
__global__ void step_walkers(float *walkers, unsigned int
n_walkers, unsigned int s1_sz, unsigned int
offset, unsigned int n_theta, float a,
curandState_t *states)
{
int id = threadIdx.x + (blockIdx.x * blockDim.x);
if(id < s1_sz)
{
extern __shared__ float w1[];
float *w1_prime = &w1[n_theta];
curandState_t localState = states[id];
int w1_idx = id + offset;
int w2_idx = s1_sz + ceil((n_walkers - s1_sz) *
curand_uniform(&localState)) -
1 - offset;
float z = G(a,curand_uniform(&localState));
for(int t=0; t<n_theta; t++)
{
w1[t] = walkers[IDX(w1_idx, t, n_walkers)];
w1_prime[t] = walkers[IDX(w2_idx, t, n_walkers)]
+ z*(w1[t] - walkers[IDX(w2_idx,
t, n_walkers)]);
}
if (curand_uniform(&localState) <
(powf(z,n_theta-1)*(
Rosenbrock(w1_prime) /
Rosenbrock(w1))
))
{
for(int t=0; t<n_theta; t++)
{
walkers[IDX(w1_idx, t, n_walkers)] =
w1_prime[t];
}
}
states[id] = localState;
}
}
/***************************************************************/
void walkers_to_file(float* walkers, unsigned int n_walkers,
unsigned int n_theta, const char *f_name)
{
FILE *fp = fopen(f_name,"w");
for(int w = 0; w < n_walkers; w++)
{
for(int t = 0; t < n_theta; t++){
fprintf(fp, "%f\t", walkers[
IDX(w,t,n_walkers)]);
}
fprintf(fp, "\n");
}
fclose(fp);
}
/***************************************************************/
void get_means(float *walkers, double *means, unsigned int n_walkers,
unsigned int n_theta, int step)
{
float *start_ind, *stop_ind;
for(int t =0; t < n_theta; t++)
{
start_ind = walkers + t*n_walkers;
stop_ind = walkers + (t+1)*n_walkers;
thrust::device_vector<float> vec(
start_ind, stop_ind);
means[t + n_theta*step] = thrust::reduce(
vec.begin(), vec.end()) / n_walkers;
}
}
/***************************************************************/
void means_to_file(double* means, unsigned int steps,
unsigned int n_theta, const char *f_name)
{
FILE *fp = fopen(f_name,"w");
for(int s = 0; s < steps; s++)
{
for(int t = 0; t < n_theta; t++){
fprintf(fp, "%f\t", means[t + n_theta*s]);
}
fprintf(fp, "\n");
}
fclose(fp);
}
|
59a0550936ddaf73d67123b95654da785ac701ce.hip | // !!! This is a file automatically generated by hipify!!!
#include "cudaUtils.hpp"
void __CUDAErrorCheck(const char *file,
const int line) {
hipError_t status = hipGetLastError();
if (hipSuccess != status) {
printf("!!!Error, CUDAErrorCheck failed, %s:%i : %s\n",
file, line, hipGetErrorString(status));
exit(-1);
}
status = hipDeviceSynchronize();
if(hipSuccess != status) {
printf("!!!Error, CUDAErrorCheck sync failed, %s:%i : %s\n",
file, line, hipGetErrorString(status));
exit(-1);
}
}
| 59a0550936ddaf73d67123b95654da785ac701ce.cu | #include "cudaUtils.hpp"
void __CUDAErrorCheck(const char *file,
const int line) {
cudaError status = cudaGetLastError();
if (cudaSuccess != status) {
printf("!!!Error, CUDAErrorCheck failed, %s:%i : %s\n",
file, line, cudaGetErrorString(status));
exit(-1);
}
status = cudaDeviceSynchronize();
if(cudaSuccess != status) {
printf("!!!Error, CUDAErrorCheck sync failed, %s:%i : %s\n",
file, line, cudaGetErrorString(status));
exit(-1);
}
}
|
14f287a9fd5e58dfc387530c29c72f0e176af0de.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <chrono>
#include <hip/hip_runtime.h>
#include "utils.h"
#include "kernel.h"
int main(int argc, char* argv[])
{
if (argc != 2) {
printf("Usage: %s <repeat>\n", argv[0]);
return 1;
}
const int repeat = atoi(argv[1]);
float *th, *pii, *q;
float *qc, *qi, *qr, *qs;
float *den, *p, *delz;
float *rain,*rainncv;
float *sr;
float *snow, *snowncv;
float delt = 10.f;
int ims = 0, ime = 59, jms = 0, jme = 45, kms = 0, kme = 2;
int ips = 0, ipe = 59, jps = 0, jpe = 45, kps = 0, kpe = 2;
int d3 = (ime-ims+1) * (jme-jms+1) * (kme-kms+1) ;
int d2 = (ime-ims+1) * (jme-jms+1) ;
int dips = 0 ; int dipe = (ipe-ips+1) ;
int djps = 0 ; int djpe = (jpe-jps+1) ;
int dkps = 0 ; int dkpe = (kpe-kps+1) ;
int remx = (ipe-ips+1) % XXX != 0 ? 1 : 0 ;
int remy = (jpe-jps+1) % YYY != 0 ? 1 : 0 ;
dim3 dimBlock( XXX , YYY ) ;
dim3 dimGrid ( (ipe-ips+1) / XXX + remx , (jpe-jps+1) / YYY + remy ) ;
float rain_sum = 0, snow_sum = 0;
long time = 0;
for (int i = 0; i < repeat; i++) {
// read-only in the kernel
TODEV3(pii) ;
TODEV3(den) ;
TODEV3(p) ;
TODEV3(delz) ;
TODEV3(th) ;
TODEV3(q) ;
TODEV3(qc) ;
TODEV3(qi) ;
TODEV3(qr) ;
TODEV3(qs) ;
TODEV2(rain) ;
TODEV2(rainncv) ;
TODEV2(sr) ;
TODEV2(snow) ;
TODEV2(snowncv) ;
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
hipLaunchKernelGGL(( wsm) , dim3(dimGrid), dim3(dimBlock) , 0, 0,
th_d, pii_d, q_d, qc_d, qi_d, qr_d, qs_d, den_d, p_d, delz_d,
rain_d, rainncv_d,
sr_d,
snow_d, snowncv_d,
delt,
dips+1 , (ipe-ips+1) , // ids, ide
djps+1 , (jpe-jps+1) , // jds, jde
dkps+1 , (kpe-kps+1), // kds, kde
dips+1 , dipe , // ims, ime
djps+1 , djpe , // jms, jme
dkps+1 , dkpe, // kms, kme
dips+1 , dipe , // ips, ipe
djps+1 , djpe , // jps, jpe
dkps+1 , dkpe) ; // kps, kpe
hipDeviceSynchronize() ;
auto end = std::chrono::steady_clock::now();
time += std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
FROMDEV2(rain) ;
FROMDEV2(snow) ;
rain_sum = snow_sum = 0;
for (int i = 0; i < d2; i++) {
rain_sum += rain[i];
snow_sum += snow[i];
}
FREE(pii) ;
FREE(den) ;
FREE(p) ;
FREE(delz) ;
FREE(th) ;
FREE(q) ;
FREE(qc) ;
FREE(qi) ;
FREE(qr) ;
FREE(qs) ;
FREE(rain) ;
FREE(rainncv) ;
FREE(sr) ;
FREE(snow) ;
FREE(snowncv) ;
}
printf("Average kernel execution time: %lf (ms)\n", (time * 1e-6) / repeat);
printf("Checksum: rain = %f snow = %f\n", rain_sum, snow_sum);
return(0) ;
}
| 14f287a9fd5e58dfc387530c29c72f0e176af0de.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <chrono>
#include <hip/hip_runtime.h>
#include "utils.h"
#include "kernel.h"
int main(int argc, char* argv[])
{
if (argc != 2) {
printf("Usage: %s <repeat>\n", argv[0]);
return 1;
}
const int repeat = atoi(argv[1]);
float *th, *pii, *q;
float *qc, *qi, *qr, *qs;
float *den, *p, *delz;
float *rain,*rainncv;
float *sr;
float *snow, *snowncv;
float delt = 10.f;
int ims = 0, ime = 59, jms = 0, jme = 45, kms = 0, kme = 2;
int ips = 0, ipe = 59, jps = 0, jpe = 45, kps = 0, kpe = 2;
int d3 = (ime-ims+1) * (jme-jms+1) * (kme-kms+1) ;
int d2 = (ime-ims+1) * (jme-jms+1) ;
int dips = 0 ; int dipe = (ipe-ips+1) ;
int djps = 0 ; int djpe = (jpe-jps+1) ;
int dkps = 0 ; int dkpe = (kpe-kps+1) ;
int remx = (ipe-ips+1) % XXX != 0 ? 1 : 0 ;
int remy = (jpe-jps+1) % YYY != 0 ? 1 : 0 ;
dim3 dimBlock( XXX , YYY ) ;
dim3 dimGrid ( (ipe-ips+1) / XXX + remx , (jpe-jps+1) / YYY + remy ) ;
float rain_sum = 0, snow_sum = 0;
long time = 0;
for (int i = 0; i < repeat; i++) {
// read-only in the kernel
TODEV3(pii) ;
TODEV3(den) ;
TODEV3(p) ;
TODEV3(delz) ;
TODEV3(th) ;
TODEV3(q) ;
TODEV3(qc) ;
TODEV3(qi) ;
TODEV3(qr) ;
TODEV3(qs) ;
TODEV2(rain) ;
TODEV2(rainncv) ;
TODEV2(sr) ;
TODEV2(snow) ;
TODEV2(snowncv) ;
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
wsm <<< dimGrid, dimBlock >>> (
th_d, pii_d, q_d, qc_d, qi_d, qr_d, qs_d, den_d, p_d, delz_d,
rain_d, rainncv_d,
sr_d,
snow_d, snowncv_d,
delt,
dips+1 , (ipe-ips+1) , // ids, ide
djps+1 , (jpe-jps+1) , // jds, jde
dkps+1 , (kpe-kps+1), // kds, kde
dips+1 , dipe , // ims, ime
djps+1 , djpe , // jms, jme
dkps+1 , dkpe, // kms, kme
dips+1 , dipe , // ips, ipe
djps+1 , djpe , // jps, jpe
dkps+1 , dkpe) ; // kps, kpe
hipDeviceSynchronize() ;
auto end = std::chrono::steady_clock::now();
time += std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
FROMDEV2(rain) ;
FROMDEV2(snow) ;
rain_sum = snow_sum = 0;
for (int i = 0; i < d2; i++) {
rain_sum += rain[i];
snow_sum += snow[i];
}
FREE(pii) ;
FREE(den) ;
FREE(p) ;
FREE(delz) ;
FREE(th) ;
FREE(q) ;
FREE(qc) ;
FREE(qi) ;
FREE(qr) ;
FREE(qs) ;
FREE(rain) ;
FREE(rainncv) ;
FREE(sr) ;
FREE(snow) ;
FREE(snowncv) ;
}
printf("Average kernel execution time: %lf (ms)\n", (time * 1e-6) / repeat);
printf("Checksum: rain = %f snow = %f\n", rain_sum, snow_sum);
return(0) ;
}
|
ac9678c920a98e95c4752722be72958fab1e4901.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Context.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/PinnedMemoryAllocator.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/native/LinearAlgebraUtils.h>
#include <ATen/native/hip/MiscUtils.h>
#include <ATen/native/Resize.h>
#include <ATen/native/BatchLinearAlgebra.h>
#include <ATen/native/hip/BatchLinearAlgebraLib.h>
#include <ATen/native/cpu/zmath.h>
#include <THH/THH.h> // for USE_MAGMA
#ifdef USE_MAGMA
#include <magma_types.h>
#include <magma_v2.h>
const bool use_magma_ = true;
#else
const bool use_magma_ = false;
#endif
namespace at {
namespace native {
#ifdef USE_MAGMA
template<class scalar_t>
void magmaSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLu(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info);
template<class scalar_t>
void magmaLuBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLuNoPiv(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* info);
template<class scalar_t>
void magmaLuNoPivBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGetriOptimalBlocksize(magma_int_t n);
template<class scalar_t>
void magmaGetri(
magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dwork,
magma_int_t lwork, magma_int_t* info);
template<class scalar_t>
void magmaGetriBatched(
magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, scalar_t** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholeskySolve(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaCholeskySolveBatched(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholesky(
magma_uplo_t uplo, magma_int_t n, scalar_t* dA,
magma_int_t ldda, magma_int_t* info);
template<class scalar_t>
void magmaCholeskyBatched(
magma_uplo_t uplo, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaTriangularSolveBatched(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
scalar_t** dA_array, magma_int_t ldda, scalar_t** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGeqrfOptimalBlocksize(magma_int_t m, magma_int_t n);
template<class scalar_t>
void magmaGeqrf(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
scalar_t* tau, scalar_t* dT, magma_int_t* info, bool is_v2);
template<class scalar_t>
void magmaOrgqr(
magma_int_t m, magma_int_t n, magma_int_t k, scalar_t* dA,
magma_int_t ldda, scalar_t* tau, scalar_t* dT, magma_int_t nb, magma_int_t* info);
template<class scalar_t, class value_t=scalar_t>
void magmaSyevd(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, scalar_t* dA, magma_int_t ldda,
value_t* w, scalar_t* wA, magma_int_t ldwa, scalar_t* work, magma_int_t lwork, value_t* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info);
template<class scalar_t, class value_t=scalar_t>
void magmaEig(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n, scalar_t *A, magma_int_t lda,
scalar_t *w, scalar_t *VL, magma_int_t ldvl,
scalar_t *VR, magma_int_t ldvr, scalar_t *work, magma_int_t lwork,
value_t *rwork,
magma_int_t *info);
template<class scalar_t, class value_t=scalar_t>
void magmaSvd(
magma_vec_t jobz, magma_int_t m, magma_int_t n, scalar_t* A,
magma_int_t lda, value_t* s, scalar_t* U, magma_int_t ldu,
scalar_t* VT, magma_int_t ldvt, scalar_t* work, magma_int_t lwork,
value_t* rwork,
magma_int_t* iwork, magma_int_t* info);
template<class scalar_t>
void magmaLuSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaLuSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaGels(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
scalar_t* dA, magma_int_t ldda, scalar_t* dB, magma_int_t lddb,
scalar_t* hwork, magma_int_t lwork, magma_int_t* info);
template<>
void magmaSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
magma_int_t* ipiv, double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
magma_int_t* ipiv, float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolve<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* ipiv, c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgesv_gpu(n, nrhs,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolve<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* ipiv, c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgesv_gpu(n, nrhs,
reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv,
reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_dgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_sgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolveBatched<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, c10::complex<double>** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_zgesv_batched(n, nrhs,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, dipiv_array,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolveBatched<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, c10::complex<float>** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_cgesv_batched(n, nrhs,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, dipiv_array,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLu<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLu<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLu<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrf_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLu<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrf_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_dgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_sgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuBatched<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_zgetrf_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuBatched<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_cgetrf_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPiv<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPiv<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPiv<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPiv<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPivBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPivBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPivBatched<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_zgetrf_nopiv_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPivBatched<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_cgetrf_nopiv_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<double>(magma_int_t n) {
return magma_get_dgetri_nb(n);
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<float>(magma_int_t n) {
return magma_get_sgetri_nb(n);
}
template <>
inline magma_int_t magmaGetriOptimalBlocksize<c10::complex<double>>(
magma_int_t n) {
return magma_get_zgetri_nb(n);
}
template <>
inline magma_int_t magmaGetriOptimalBlocksize<c10::complex<float>>(
magma_int_t n) {
return magma_get_cgetri_nb(n);
}
template<>
void magmaGetri<double>(
magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dwork,
magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGetri<float>(
magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dwork,
magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGetri<c10::complex<double>>(
magma_int_t n,
c10::complex<double>* dA,
magma_int_t ldda,
magma_int_t* ipiv,
c10::complex<double>* dwork,
magma_int_t lwork,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetri_gpu(
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
ipiv,
reinterpret_cast<magmaDoubleComplex*>(dwork),
lwork,
info);
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGetri<c10::complex<float>>(
magma_int_t n,
c10::complex<float>* dA,
magma_int_t ldda,
magma_int_t* ipiv,
c10::complex<float>* dwork,
magma_int_t lwork,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetri_gpu(
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
ipiv,
reinterpret_cast<magmaFloatComplex*>(dwork),
lwork,
info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGetriBatched<double>(
magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, double** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGetriBatched<float>(
magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, float** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGetriBatched<c10::complex<double>>(
magma_int_t n,
c10::complex<double>** dA_array,
magma_int_t ldda,
magma_int_t** ipiv_array,
c10::complex<double>** dinvA_array,
magma_int_t lddia,
magma_int_t* info_array,
magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_zgetri_outofplace_batched(
n,
reinterpret_cast<magmaDoubleComplex**>(dA_array),
ldda,
ipiv_array,
reinterpret_cast<magmaDoubleComplex**>(dinvA_array),
lddia,
info_array,
batchsize,
magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGetriBatched<c10::complex<float>>(
magma_int_t n,
c10::complex<float>** dA_array,
magma_int_t ldda,
magma_int_t** ipiv_array,
c10::complex<float>** dinvA_array,
magma_int_t lddia,
magma_int_t* info_array,
magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_cgetri_outofplace_batched(
n,
reinterpret_cast<magmaFloatComplex**>(dA_array),
ldda,
ipiv_array,
reinterpret_cast<magmaFloatComplex**>(dinvA_array),
lddia,
info_array,
batchsize,
magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolve<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dpotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolve<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_spotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolve<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda,
c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zpotrs_gpu(uplo, n, nrhs,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolve<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda,
c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cpotrs_gpu(uplo, n, nrhs,
reinterpret_cast<magmaFloatComplex*>(dA), ldda,
reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolveBatched<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dpotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolveBatched<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_spotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolveBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda,
c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_zpotrs_batched(uplo, n, nrhs,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolveBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda,
c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_cpotrs_batched(uplo, n, nrhs,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<double>(
magma_uplo_t uplo, magma_int_t n, double* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dpotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<float>(
magma_uplo_t uplo, magma_int_t n, float* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_spotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zpotrf_gpu(uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cpotrf_gpu(uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<double>(
magma_uplo_t uplo, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dpotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<float>(
magma_uplo_t uplo, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_spotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_zpotrf_batched(uplo, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_cpotrf_batched(uplo, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolveBatched<double>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
double** dA_array, magma_int_t ldda, double** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_dtrsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolveBatched<float>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
float** dA_array, magma_int_t ldda, float** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_strsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolveBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
c10::complex<double>** dA_array, magma_int_t ldda, c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmaDoubleComplex alpha({1, 0});
magmablas_ztrsm_batched(MagmaLeft, uplo, trans, diag, m, n, alpha,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolveBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
c10::complex<float>** dA_array, magma_int_t ldda, c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmaFloatComplex alpha({1, 0});
magmablas_ctrsm_batched(MagmaLeft, uplo, trans, diag, m, n, alpha,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<double>(magma_int_t m, magma_int_t n) {
return magma_get_dgeqrf_nb(m, n);
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<float>(magma_int_t m, magma_int_t n) {
return magma_get_sgeqrf_nb(m, n);
}
template <>
inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<double>>(
magma_int_t m,
magma_int_t n) {
return magma_get_zgeqrf_nb(m, n);
}
template <>
inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<float>>(
magma_int_t m,
magma_int_t n) {
return magma_get_cgeqrf_nb(m, n);
}
template<>
void magmaGeqrf<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t* info, bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_dgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_dgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGeqrf<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t* info, bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_sgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_sgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGeqrf<c10::complex<double>>(
magma_int_t m,
magma_int_t n,
c10::complex<double>* dA,
magma_int_t ldda,
c10::complex<double>* tau,
c10::complex<double>* dT,
magma_int_t* info,
bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_zgeqrf_gpu(
m,
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
reinterpret_cast<magmaDoubleComplex*>(dT),
info);
} else {
magma_zgeqrf2_gpu(
m,
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
info);
}
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGeqrf<c10::complex<float>>(
magma_int_t m,
magma_int_t n,
c10::complex<float>* dA,
magma_int_t ldda,
c10::complex<float>* tau,
c10::complex<float>* dT,
magma_int_t* info,
bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_cgeqrf_gpu(
m,
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
reinterpret_cast<magmaFloatComplex*>(dT),
info);
} else {
magma_cgeqrf2_gpu(
m,
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
info);
}
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaOrgqr<double>(
magma_int_t m, magma_int_t n, magma_int_t k, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t nb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaOrgqr<float>(
magma_int_t m, magma_int_t n, magma_int_t k, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t nb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaOrgqr<c10::complex<double>>(
magma_int_t m,
magma_int_t n,
magma_int_t k,
c10::complex<double>* dA,
magma_int_t ldda,
c10::complex<double>* tau,
c10::complex<double>* dT,
magma_int_t nb,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zungqr_gpu(
m,
n,
k,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
reinterpret_cast<magmaDoubleComplex*>(dT),
nb,
info);
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaOrgqr<c10::complex<float>>(
magma_int_t m,
magma_int_t n,
magma_int_t k,
c10::complex<float>* dA,
magma_int_t ldda,
c10::complex<float>* tau,
c10::complex<float>* dT,
magma_int_t nb,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cungqr_gpu(
m,
n,
k,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
reinterpret_cast<magmaFloatComplex*>(dT),
nb,
info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSyevd<double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, double* dA, magma_int_t ldda,
double* w, double* wA, magma_int_t ldwa, double* work, magma_int_t lwork, double* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
(void)rwork; // unused
(void)lrwork; // unused
MagmaStreamSyncGuard guard;
magma_dsyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSyevd<float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, float* dA, magma_int_t ldda,
float* w, float* wA, magma_int_t ldwa, float* work, magma_int_t lwork, float* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
(void)rwork; // unused
(void)lrwork; // unused
MagmaStreamSyncGuard guard;
magma_ssyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSyevd<c10::complex<double>, double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
double* w, c10::complex<double>* wA, magma_int_t ldwa, c10::complex<double>* work, magma_int_t lwork, double* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zheevd_gpu(
jobz, uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, w, reinterpret_cast<magmaDoubleComplex*>(wA),
ldwa, reinterpret_cast<magmaDoubleComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSyevd<c10::complex<float>, float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
float* w, c10::complex<float>* wA, magma_int_t ldwa, c10::complex<float>* work, magma_int_t lwork, float* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cheevd_gpu(
jobz, uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, w, reinterpret_cast<magmaFloatComplex*>(wA),
ldwa, reinterpret_cast<magmaFloatComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaEig<double>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
double *A, magma_int_t lda,
double *w,
double *VL, magma_int_t ldvl,
double *VR, magma_int_t ldvr,
double *work, magma_int_t lwork,
double *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
// magma [sd]geev wants to separate output arrays: wr and wi for the real
// and imaginary parts
double *wr = w;
double *wi = w + n;
(void)rwork; // unused
magma_dgeev(jobvl, jobvr, n, A, lda, wr, wi, VL, ldvl, VR, ldvr, work, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaEig<float>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
float *A, magma_int_t lda,
float *w,
float *VL, magma_int_t ldvl,
float *VR, magma_int_t ldvr,
float *work, magma_int_t lwork,
float *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
float *wr = w;
float *wi = w + n;
(void)rwork; // unused
magma_sgeev(jobvl, jobvr, n, A, lda, wr, wi, VL, ldvl, VR, ldvr, work, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaEig<c10::complex<double>, double>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
c10::complex<double> *A, magma_int_t lda,
c10::complex<double> *w,
c10::complex<double> *VL, magma_int_t ldvl,
c10::complex<double> *VR, magma_int_t ldvr,
c10::complex<double> *work, magma_int_t lwork,
double *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
magma_zgeev(jobvl, jobvr, n,
reinterpret_cast<magmaDoubleComplex*>(A), lda,
reinterpret_cast<magmaDoubleComplex*>(w),
reinterpret_cast<magmaDoubleComplex*>(VL), ldvl,
reinterpret_cast<magmaDoubleComplex*>(VR), ldvr,
reinterpret_cast<magmaDoubleComplex*>(work), lwork,
rwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaEig<c10::complex<float>, float>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
c10::complex<float> *A, magma_int_t lda,
c10::complex<float> *w,
c10::complex<float> *VL, magma_int_t ldvl,
c10::complex<float> *VR, magma_int_t ldvr,
c10::complex<float> *work, magma_int_t lwork,
float *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
magma_cgeev(jobvl, jobvr, n,
reinterpret_cast<magmaFloatComplex*>(A), lda,
reinterpret_cast<magmaFloatComplex*>(w),
reinterpret_cast<magmaFloatComplex*>(VL), ldvl,
reinterpret_cast<magmaFloatComplex*>(VR), ldvr,
reinterpret_cast<magmaFloatComplex*>(work), lwork,
rwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSvd<double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, double* A,
magma_int_t lda, double* s, double* U, magma_int_t ldu,
double* VT, magma_int_t ldvt, double* work, magma_int_t lwork,
double *rwork, magma_int_t* iwork, magma_int_t* info) {
(void)rwork; // unused
MagmaStreamSyncGuard guard;
magma_dgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSvd<float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, float* A,
magma_int_t lda, float* s, float* U, magma_int_t ldu,
float* VT, magma_int_t ldvt, float* work, magma_int_t lwork,
float* rwork, magma_int_t* iwork, magma_int_t* info) {
(void)rwork; // unused
MagmaStreamSyncGuard guard;
magma_sgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSvd<c10::complex<float>, float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<float>* A,
magma_int_t lda, float* s, c10::complex<float>* U, magma_int_t ldu,
c10::complex<float>* VT, magma_int_t ldvt, c10::complex<float>* work, magma_int_t lwork,
float *rwork, magma_int_t* iwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgesdd(jobz, m, n, reinterpret_cast<magmaFloatComplex*>(A), lda, s,
reinterpret_cast<magmaFloatComplex*>(U), ldu,
reinterpret_cast<magmaFloatComplex*>(VT), ldvt,
reinterpret_cast<magmaFloatComplex*>(work), lwork,
rwork, iwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSvd<c10::complex<double>, double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<double>* A,
magma_int_t lda, double* s, c10::complex<double>* U, magma_int_t ldu,
c10::complex<double>* VT, magma_int_t ldvt, c10::complex<double>* work, magma_int_t lwork,
double *rwork, magma_int_t* iwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgesdd(jobz, m, n, reinterpret_cast<magmaDoubleComplex*>(A), lda, s,
reinterpret_cast<magmaDoubleComplex*>(U), ldu,
reinterpret_cast<magmaDoubleComplex*>(VT), ldvt,
reinterpret_cast<magmaDoubleComplex*>(work), lwork,
rwork, iwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, magma_int_t* ipiv,
double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, magma_int_t* ipiv,
float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolve<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda, magma_int_t* ipiv,
c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrs_gpu(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolve<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda, magma_int_t* ipiv,
c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrs_gpu(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
double** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
float** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_sgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolveBatched<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_zgetrs_batched(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, dipiv_array, reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolveBatched<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_cgetrs_batched(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, dipiv_array, reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGels<float>(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
float* dA, magma_int_t ldda, float* dB, magma_int_t lddb,
float* hwork, magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgels_gpu(trans, m, n, nrhs,
dA, ldda, dB, lddb,
hwork, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGels<double>(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
double* dA, magma_int_t ldda, double* dB, magma_int_t lddb,
double* hwork, magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgels_gpu(trans, m, n, nrhs,
dA, ldda, dB, lddb,
hwork, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGels<c10::complex<float>>(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
c10::complex<float>* dA, magma_int_t ldda, c10::complex<float>* dB, magma_int_t lddb,
c10::complex<float>* hwork, magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgels_gpu(trans, m, n, nrhs,
reinterpret_cast<magmaFloatComplex*>(dA), ldda,
reinterpret_cast<magmaFloatComplex*>(dB), lddb,
reinterpret_cast<magmaFloatComplex*>(hwork), lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGels<c10::complex<double>>(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
c10::complex<double>* dA, magma_int_t ldda, c10::complex<double>* dB, magma_int_t lddb,
c10::complex<double>* hwork, magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgels_gpu(trans, m, n, nrhs,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb,
reinterpret_cast<magmaDoubleComplex*>(hwork), lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
namespace {
/*
MAGMA can return errors both as a return value and in the info argument.
The return value and info should always be identical.
In general, the meaning is as given in this table.
Predefined error codes are large negative numbers. Using the symbolic
constants below is preferred, but the numeric values can be found in
include/magma_types.h.
Info | Description
----------- | -----------
info = 0 (MAGMA_SUCCESS) | Successful exit
info < 0, but small | For info = -i, the i-th argument had an illegal value
info > 0 | Function-specific error such as singular matrix
MAGMA_ERR_DEVICE_ALLOC | Could not allocate GPU device memory
MAGMA_ERR_HOST_ALLOC | Could not allocate CPU host memory
MAGMA_ERR_ILLEGAL_VALUE | An argument had an illegal value (deprecated; instead it should return -i to say the i-th argument was bad)
MAGMA_ERR_INVALID_PTR | Can't free pointer
MAGMA_ERR_NOT_IMPLEMENTED | Function or option not implemented
MAGMA_ERR_NOT_SUPPORTED | Function or option not supported on the current architecture
*/
void checkMagmaInternalError(magma_int_t info, const std::string& magma_function_name) {
// if info > 0 the error is function-specific, do nothing in this case
TORCH_CHECK(info >= 0,
"MAGMA error: ",
magma_strerror(info),
", info = ", info,
", when calling ", magma_function_name);
}
} // anonymous namespace
#endif // USE_MAGMA
#define ALLOCATE_ARRAY(name, type, size) \
auto storage_##name = pin_memory<type>(size); \
name = static_cast<type*>(storage_##name.data());
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_solve(Tensor& b, Tensor& A, Tensor& infos_out) {
#ifndef USE_MAGMA
AT_ERROR("solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
magma_int_t lda = ::max(magma_int_t{1}, n);
if (b.dim() == 2) {
auto ipiv = at::empty({n}, at::kInt);
// magmaSolve requires infos tensor to live on CPU
Tensor infos = at::empty(infos_out.sizes(), infos_out.options().device(kCPU));
magmaSolve<scalar_t>(n, nrhs, A_data, lda, ipiv.data_ptr<magma_int_t>(),
b_data, lda, infos.data_ptr<magma_int_t>());
infos_out.copy_(infos);
} else {
auto infos_data = infos_out.data_ptr<magma_int_t>();
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur = &infos_data[mini_idx];
magmaSolveBatched<scalar_t>(
n, nrhs, A_array_cur, lda, ipiv_array_cur, b_array_cur, lda,
info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaSolveBatched<scalar_t>(
n, nrhs, &A_array[mini_idx], lda, &ipiv_array[mini_idx], &b_array[mini_idx], lda,
&infos_data[mini_idx], batch_size % batch_limit, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor> _solve_helper_cuda(const Tensor& self, const Tensor& A) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
// infos might not get filled for empty inputs therefore at::zeros is used instead of at::empty
auto infos = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "solve_cuda", [&]{
apply_solve<scalar_t>(self_working_copy, A_working_copy, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "solve_cuda");
} else {
singleCheckErrors(infos.item().toInt(), "solve_cuda");
}
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// This is a type dispatching helper function for 'apply_solve'
Tensor& _linalg_solve_out_helper_cuda(Tensor& result, Tensor& input, Tensor& infos) {
// 'result' and 'input' should be in column major order (it should be checked before calling this function)
// the content of 'result', 'input' and 'infos' is overwritten by 'apply_solve'
// 'result' should contain data of 'other' tensor (right-hand-side of the linear system of equations)
// 'input' should contain data of origianl 'input' tensor (left-hand-side of the linear system)
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_solve_out_cpu", [&]{
apply_solve<scalar_t>(result, input, infos);
});
return result;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Computes the inverse of n-by-n matrix 'self', it is saved to 'self_inv'.
'infos' is an int Tensor containing error codes for each matrix in the batched input.
'infos_lu' is for holding magmaLU errors, and 'infos_getri' is for holding magmaGetri errors
For more information see MAGMA's documentation for GETRI and GETRF routines.
*/
template <typename scalar_t>
static void apply_batched_inverse(Tensor& self, Tensor& self_inv, Tensor& infos_lu, Tensor& infos_getri) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
auto self_mat_stride = matrixStride(self);
auto self_inv_data = self_inv.data_ptr<scalar_t>();
auto self_inv_mat_stride = matrixStride(self_inv);
auto infos_lu_data = infos_lu.data_ptr<magma_int_t>();
auto infos_getri_data = infos_getri.data_ptr<magma_int_t>();
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
// MAGMA does not work with batch_size == 0, let's return early in this case
if (batch_size == 0) {
return;
}
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t lda = std::max<magma_int_t>(1, n);
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** self_array;
scalar_t** self_inv_array;
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * lda);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(self_inv_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
self_inv_array[i] = &self_inv_data[i * self_inv_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
// magmaLuBatched leaves ipiv_data values unwritten for singular matrices.
// Initialize to avoid memory access violations inside magma kernels (gh-51930).
std::fill_n(ipiv_data, batch_size * n, 1);
MAGMAQueue magma_queue(self.get_device());
magmaLuBatched<scalar_t>(
n, n, self_array, lda, ipiv_array, infos_lu_data,
batch_size, magma_queue);
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** self_array_cur = &self_array[mini_idx];
scalar_t** self_inv_array_cur = &self_inv_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur_getri = &infos_getri_data[mini_idx];
magmaGetriBatched<scalar_t>(
n, self_array_cur, lda, ipiv_array_cur, self_inv_array_cur,
lda, info_array_cur_getri, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaGetriBatched<scalar_t>(
n, &self_array[mini_idx], lda, &ipiv_array[mini_idx], &self_inv_array[mini_idx],
lda, &infos_getri_data[mini_idx], batch_size % batch_limit, magma_queue);
}
#endif
}
template <typename scalar_t>
static void apply_single_inverse(Tensor& self, Tensor& infos_lu, Tensor& infos_getri) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t lda = std::max<magma_int_t>(1, n);
magma_int_t lwork = n * magmaGetriOptimalBlocksize<scalar_t>(n);
// magmaLu and magmaGetri requires infos tensor to live on CPU
infos_lu = infos_lu.to(at::kCPU);
infos_getri = infos_getri.to(at::kCPU);
Tensor ipiv = at::empty({lda}, at::kInt);
Tensor dwork = at::empty({lwork}, self.options());
magmaLu<scalar_t>(n, n, self_data, lda, ipiv.data_ptr<magma_int_t>(), infos_lu.data_ptr<magma_int_t>());
magmaGetri<scalar_t>(
n, self_data, lda, ipiv.data_ptr<magma_int_t>(), dwork.data_ptr<scalar_t>(), lwork, infos_getri.data_ptr<magma_int_t>());
#endif
}
Tensor _inverse_helper_cuda_legacy(const Tensor& self) {
auto self_inv_working_copy = cloneBatchedColumnMajor(self);
if (self.dim() > 2) {
auto infos_lu = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
auto infos_getri = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_batched_inverse<scalar_t>(
self_working_copy, self_inv_working_copy, infos_lu, infos_getri);
});
batchCheckErrors(infos_lu, "inverse_cuda");
batchCheckErrors(infos_getri, "inverse_cuda");
} else {
// magmaLu and magmaGetri requires infos tensor to live on CPU
auto infos_lu = at::zeros({1}, self.options().dtype(kInt).device(kCPU));
auto infos_getri = at::zeros({1}, self.options().dtype(kInt).device(kCPU));
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_single_inverse<scalar_t>(self_inv_working_copy, infos_lu, infos_getri);
});
singleCheckErrors(infos_lu.item().toInt(), "inverse_cuda");
singleCheckErrors(infos_getri.item().toInt(), "inverse_cuda");
}
return self_inv_working_copy;
}
Tensor _inverse_helper_cuda(const Tensor& self) {
#ifdef USE_CUSOLVER
if ((self.dim() == 2) || (/* self.dim() > 2 && */ batchCount(self) <= 2) || !use_magma_) {
return _inverse_helper_cuda_lib(self); // cusolver or cublas
} else {
return _inverse_helper_cuda_legacy(self); // magma-cuda
}
#else
return _inverse_helper_cuda_legacy(self); // magma-cuda
#endif
}
// This is a type dispatching helper function for 'apply_batched_inverse' and 'singleCheckErrors'
Tensor& _linalg_inv_out_helper_cuda_legacy(Tensor& result, Tensor& infos_lu, Tensor& infos_getri) {
// assuming result is in column major order and contains the matrices to invert
if (result.dim() > 2) {
auto input_working_copy = cloneBatchedColumnMajor(result);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{
apply_batched_inverse<scalar_t>(
input_working_copy, result, infos_lu, infos_getri);
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{
apply_single_inverse<scalar_t>(result, infos_lu, infos_getri);
});
}
return result;
}
// This is a MAGMA/cuSOLVER dispatching helper function
Tensor& _linalg_inv_out_helper_cuda(Tensor &result, Tensor& infos_lu, Tensor& infos_getri) {
// This function calculates the inverse matrix in-place
// result should be in column major order and contain matrices to invert
#ifdef USE_CUSOLVER
if ((result.dim() == 2) || (/* result.dim() > 2 && */ batchCount(result) <= 2) || !use_magma_) {
return _linalg_inv_out_helper_cuda_lib(result, infos_lu, infos_getri); // cusolver or cublas
} else {
return _linalg_inv_out_helper_cuda_legacy(result, infos_lu, infos_getri); // magma-cuda
}
#else
return _linalg_inv_out_helper_cuda_legacy(result, infos_lu, infos_getri); // magma-cuda
#endif
return result;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky_solve(Tensor& b, Tensor& A, bool upper, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("cholesky_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t lda = std::max<magma_int_t>(1, n);
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
int info_tmp = 0;
if (b.dim() == 2) {
magmaCholeskySolve<scalar_t>(uplo, n, nrhs, A_data, lda,
b_data, lda, &info_tmp);
info = info_tmp;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, A_array_cur, lda, b_array_cur, lda,
info_tmp, batch_limit, magma_queue);
if (info_tmp != 0) {
break;
}
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0 && info_tmp == 0) {
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, &A_array[mini_idx], lda, &b_array[mini_idx], lda,
info_tmp, batch_size % batch_limit, magma_queue);
}
info = info_tmp;
}
#endif
}
Tensor _cholesky_solve_helper_cuda_magma(const Tensor& self, const Tensor& A, bool upper) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "cholesky_solve_cuda", [&]{
apply_cholesky_solve<scalar_t>(self_working_copy, A_working_copy, upper, info);
});
TORCH_CHECK(info == 0, "MAGMA cholesky_solve : invalid argument: ", -info);
return self_working_copy;
}
// Todo: cusolverDn<T>potrsBatched only supports nrhs == 1 and does not have good performance.
// Batched cholesky_solve is dispatched to magma.
Tensor _cholesky_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upper) {
#ifdef USE_CUSOLVER
if (batchCount(self) == 1 || !use_magma_) {
return _cholesky_solve_helper_cuda_cusolver(self, A, upper);
} else {
return _cholesky_solve_helper_cuda_magma(self, A, upper);
}
#else
return _cholesky_solve_helper_cuda_magma(self, A, upper);
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky(const Tensor& self, bool upper, const Tensor& info) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.linalg.cholesky on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. Please use PyTorch built with MAGMA support.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
auto lda = std::max<magma_int_t>(1, n);
if (self.dim() == 2) {
// magmaCholesky requires info to be on CPU
magma_int_t info_cpu = 0;
magmaCholesky<scalar_t>(uplo, n, self_data, lda, &info_cpu);
info.fill_(info_cpu);
} else {
TORCH_INTERNAL_ASSERT(info.is_cuda());
auto info_data = info.data_ptr<magma_int_t>();
// magmaCholeskyBatched supports only upper=false
uplo = MagmaLower;
auto self_mat_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
scalar_t** self_array;
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
}
MAGMAQueue magma_queue(self.get_device());
// Compute as many batches of 262140 possible
// 262140 is the size of the largest batch of matrices that can be run with
// violating maximum kernel configuration
// For complex input the batch limit is 65535 (determined experimentally, see https://github.com/pytorch/pytorch/pull/47047#discussion_r516086923 for more information)
int64_t batch_limit = self.is_complex() ? 65535 : 262140;
for (int64_t mini_idx = 0; mini_idx < batch_size; mini_idx += batch_limit) {
int64_t nbatches = ::min(batch_limit, batch_size - mini_idx);
scalar_t** self_array_cur = &self_array[mini_idx];
magma_int_t* info_array_cur = &info_data[mini_idx];
magmaCholeskyBatched<scalar_t>(
uplo, n, self_array_cur, lda, info_array_cur, nbatches, magma_queue);
}
}
#endif
}
void cholesky_helper_magma(const Tensor& input, bool upper, const Tensor& info) {
Tensor result = input;
if (input.dim() > 2) {
// MAGMA's batched cholesky operator has an off-by-one error causing IMA
// (see https://github.com/pytorch/pytorch/issues/42666). This code is based
// on the #cloneBatchedColumnMajor function however it pads the input with
// one extra element utilizing the fact that the resize_as_ method preserves
// the storage even if it's larger than the new sizes. This way if MAGMA
// reads off bounds it will still be valid user memory.
result = at::empty(input.numel() + 1, input.options());
result.resize_as_(input).transpose_(-2, -1);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(result.transpose(-2, -1).is_contiguous());
// batched MAGMA doesn't support upper=true
// we transpose and conjugate the input as a workaround
result.copy_(upper ? input.conj().transpose(-2, -1) : input);
}
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(
input.scalar_type(), "cholesky_cuda", [&] {
apply_cholesky<scalar_t>(result, upper, info);
});
if (input.dim() > 2) {
// if upper=true we need to tranpose and conjugate the result tensor
// because the cholesky decomposition is stored in the lower triangular part
if (upper) {
input.copy_(result.conj().transpose(-2, -1));
} else {
input.copy_(result);
}
}
}
static void cholesky_kernel(const Tensor& input, const Tensor& info, bool upper) {
#ifdef USE_CUSOLVER
if (batchCount(input) == 1 || !use_magma_ || use_cusolver_potrf_batched_) {
cholesky_helper_cusolver(input, upper, info);
} else {
cholesky_helper_magma(input, upper, info);
}
#else
cholesky_helper_magma(input, upper, info);
#endif // USE_CUSOLVER
}
REGISTER_DISPATCH(cholesky_stub, &cholesky_kernel)
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Computes the inverse of a symmetric (Hermitian) positive-definite matrix n-by-n matrix 'input' using the Cholesky solver
This is an in-place routine, content of 'input' is overwritten.
'infos' is an int Tensor containing error codes for each matrix in the batched input.
MAGMA requires 'infos' to reside in CPU memory.
For more information see MAGMA's documentation for POTRS routine.
*/
template <typename scalar_t>
static void apply_cholesky_inverse(Tensor& input, Tensor& infos, bool upper) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "cholesky_inverse: MAGMA library not found in compilation. Please rebuild with MAGMA.");
#else
// magmaCholeskyInverse (magma_dpotri_gpu) is slow because internally
// it transfers data several times between GPU and CPU and calls lapack routine on CPU
// using magmaCholeskySolveBatched is a lot faster
// note that magmaCholeskySolve is also slow
// 'input' is modified in-place we need to clone it and replace with a diagonal matrix
// for apply_cholesky_solve
auto input_working_copy = cloneBatchedColumnMajor(input);
// 'input' tensor has to be a batch of diagonal matrix
input.fill_(0);
input.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1);
Tensor result_u, input_u;
if (input.dim() == 2) {
// unsqueezing here so that the batched version is used
result_u = input.unsqueeze(0);
input_u = input_working_copy.unsqueeze(0);
} else {
result_u = input;
input_u = input_working_copy;
}
// magma's potrs_batched doesn't take matrix-wise array of ints as an 'info' argument
// it returns a single 'magma_int_t'
// if info = 0 the operation is successful, if info = -i, the i-th parameter had an illegal value.
int64_t info_tmp = 0;
apply_cholesky_solve<scalar_t>(result_u, input_u, upper, info_tmp);
infos.fill_(info_tmp);
#endif
}
// This is a type dispatching helper function for 'apply_cholesky_inverse'
Tensor& cholesky_inverse_kernel_impl_magma(Tensor &result, Tensor& infos, bool upper) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "cholesky_inverse_out_cuda", [&]{
apply_cholesky_inverse<scalar_t>(result, infos, upper);
});
return result;
}
Tensor& cholesky_inverse_kernel_impl(Tensor &result, Tensor& infos, bool upper) {
// This function calculates the inverse matrix in-place
// result should be in column major order and contain matrices to invert
// the content of result is overwritten by 'apply_cholesky_inverse'
#ifdef USE_CUSOLVER
if (batchCount(result) == 1 || !use_magma_) {
return cholesky_inverse_kernel_impl_cusolver(result, infos, upper);
} else {
return cholesky_inverse_kernel_impl_magma(result, infos, upper);
}
#else
return cholesky_inverse_kernel_impl_magma(result, infos, upper);
#endif
}
REGISTER_DISPATCH(cholesky_inverse_stub, &cholesky_inverse_kernel_impl);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Computes the LU decomposition of a mn matrix or batch of matrices in 'input' tensor.
This is an in-place routine, content of 'input', 'pivots', and 'infos' is overwritten.
This is a "looped" variant for calling single input MAGMA function on batched input.
Args:
* `input` - [in] the input matrix for LU decomposition
[out] the LU decomposition
* `pivots` - [out] the pivot indices
* `infos` - [out] error codes, positive values indicate singular matrices
* `compute_pivots` - controls whether LU is computed with or without pivoting
For further details, please see the MAGMA documentation for magma_dgetrf_gpu.
*/
template <typename scalar_t>
static void apply_lu_looped_magma(const Tensor& input, const Tensor& pivots, const Tensor& infos, bool compute_pivots) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.lu on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. lease rebuild with MAGMA.");
#else
// magmaLu and magmaLuNoPiv require infos and pivots tensor to be on CPU
// the data is later copied back to the appropriate output tensor
Tensor infos_cpu = at::empty_like(infos, infos.options().device(kCPU).pinned_memory(true));
auto input_data = input.data_ptr<scalar_t>();
auto infos_data = infos_cpu.data_ptr<magma_int_t>();
auto input_matrix_stride = matrixStride(input);
auto pivots_stride = pivots.size(-1);
auto batch_size = batchCount(input);
magma_int_t m = magma_int_cast(input.size(-2), "m");
magma_int_t n = magma_int_cast(input.size(-1), "n");
auto leading_dimension = std::max<magma_int_t>(1, m);
if (compute_pivots) {
Tensor pivots_cpu = at::empty_like(pivots, pivots.options().device(kCPU).pinned_memory(true));
auto pivots_data = pivots_cpu.data_ptr<magma_int_t>();
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* input_working_ptr = &input_data[i * input_matrix_stride];
int* pivots_working_ptr = &pivots_data[i * pivots_stride];
int* infos_working_ptr = &infos_data[i];
magmaLu<scalar_t>(m, n, input_working_ptr, leading_dimension, pivots_working_ptr, infos_working_ptr);
}
pivots.copy_(pivots_cpu, /*non_blocking=*/true);
} else {
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* input_working_ptr = &input_data[i * input_matrix_stride];
int* infos_working_ptr = &infos_data[i];
magmaLuNoPiv<scalar_t>(m, n, input_working_ptr, leading_dimension, infos_working_ptr);
}
// fill the pivots tensor with indices using 1-based (Fortran) indexing
auto k = ::min(m, n);
Tensor pivots_tmp = at::arange(1, k + 1, input.options().dtype(at::kInt)).expand_as(pivots);
pivots.copy_(pivots_tmp);
}
infos.copy_(infos_cpu, /*non_blocking=*/true);
#endif
}
/*
Computes the LU decomposition of a mn matrix or batch of matrices in 'input' tensor.
This is an in-place routine, content of 'input', 'pivots', and 'infos' is overwritten.
This is a specialized batched variant, it is expected to be faster than the "looped" version only for small inputs.
Args:
* `input` - [in] the input matrix for LU decomposition
[out] the LU decomposition
* `pivots` - [out] the pivot indices
* `infos` - [out] error codes, positive values indicate singular matrices
* `compute_pivots` - controls whether LU is computed with or without pivoting
For further details, please see the MAGMA documentation for magma_dgetrf_batched.
*/
template <typename scalar_t>
static void apply_lu_batched_magma(const Tensor& input, const Tensor& pivots, const Tensor& infos, bool compute_pivots) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.lu on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. lease rebuild with MAGMA.");
#else
auto input_data = input.data_ptr<scalar_t>();
auto infos_data = infos.data_ptr<magma_int_t>();
auto input_matrix_stride = matrixStride(input);
magma_int_t batch_size = magma_int_cast(batchCount(input), "batchCount");
// magmaLuBatched doesn't work with zero batch dimensions
// it gives CUDA error: invalid configuration argument
if (batch_size == 0) {
infos.fill_(0);
return;
}
magma_int_t m = magma_int_cast(input.size(-2), "m");
magma_int_t n = magma_int_cast(input.size(-1), "n");
auto leading_dimension = std::max<magma_int_t>(1, m);
scalar_t** input_array;
ALLOCATE_ARRAY(input_array, scalar_t*, batch_size);
// Set up array of pointers to matrices
for (int64_t i = 0; i < batch_size; i++) {
input_array[i] = &input_data[i * input_matrix_stride];
}
MAGMAQueue magma_queue(input.get_device());
if (compute_pivots) {
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto pivots_stride = pivots.size(-1);
magma_int_t** pivots_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_stride];
}
magmaLuBatched<scalar_t>(m, n, input_array, leading_dimension, pivots_array, infos_data, batch_size, magma_queue);
} else {
magmaLuNoPivBatched<scalar_t>(m, n, input_array, leading_dimension, infos_data, batch_size, magma_queue);
// fill the pivots tensor with indices using 1-based (Fortran) indexing
auto k = ::min(m, n);
Tensor pivots_tmp = at::arange(1, k + 1, input.options().dtype(at::kInt)).expand_as(pivots);
pivots.copy_(pivots_tmp);
}
#endif
}
static void lu_magma(const Tensor& input, const Tensor& pivots, const Tensor& infos, bool compute_pivots) {
// TODO: compare performance and use the best performing option based on input's sizes
if (input.dim() == 2) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "lu_magma", [&]{
apply_lu_looped_magma<scalar_t>(input, pivots, infos, compute_pivots);
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "lu_magma", [&]{
apply_lu_batched_magma<scalar_t>(input, pivots, infos, compute_pivots);
});
}
}
REGISTER_DISPATCH(lu_stub, &lu_magma);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangular_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_triangular_solve_batched(Tensor& A, Tensor& b, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
#ifndef USE_MAGMA
AT_ERROR("triangular_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_trans_t trans = transpose ? MagmaTrans : MagmaNoTrans;
trans = conjugate_transpose ? MagmaConjTrans : trans;
magma_diag_t diag = unitriangular ? MagmaUnit : MagmaNonUnit;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t m = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t n = magma_int_cast(A.size(-1), "A.size(-1)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
// magma returns early if m <= 0 || n <= 0 for magmaTriangularSolveBatched
// magmaTriangularSolve is calling cuBLAS and it prints
// ** On entry to DTRSM parameter number 9 had an illegal value
// so let's use proper lda parameter here
magma_int_t lda = std::max<magma_int_t>(1, m);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit;
int64_t mini_idx; // this is outside the loop because it is used for the case batch_size % batch_limit != 0
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, A_array_cur,
lda, b_array_cur, lda, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, &A_array[mini_idx],
lda, &b_array[mini_idx], lda, batch_size % batch_limit, magma_queue);
}
#endif
}
void triangular_solve_batched_magma(Tensor& A, Tensor& B, Tensor& infos, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
(void)infos; // unused
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(A.scalar_type(), "triangular_solve_cuda", [&]{
apply_triangular_solve_batched<scalar_t>(A, B, upper, transpose, conjugate_transpose, unitriangular);
});
}
void triangular_solve_kernel(Tensor& A, Tensor& B, Tensor& infos, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
// For batches smaller than 8 and matrix sizes larger than 64x64 cuBLAS forloop is faster than batched version
if (batchCount(A) <= 8 && A.size(-1) >= 64) {
triangular_solve_cublas(A, B, infos, upper, transpose, conjugate_transpose, unitriangular);
} else {
#ifndef USE_MAGMA
triangular_solve_batched_cublas(A, B, infos, upper, transpose, conjugate_transpose, unitriangular);
#else
// cuBLAS batched is faster than MAGMA batched up until 512x512, after that MAGMA is faster
if (A.size(-1) <= 512) {
triangular_solve_batched_cublas(A, B, infos, upper, transpose, conjugate_transpose, unitriangular);
} else {
triangular_solve_batched_magma(A, B, infos, upper, transpose, conjugate_transpose, unitriangular);
}
#endif // USE_MAGMA
}
}
REGISTER_DISPATCH(triangular_solve_stub, &triangular_solve_kernel);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ orgqr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tensor& orgqr_kernel_impl(Tensor& result, const Tensor& tau) {
// TODO: It is possible to implement efficient batched orgqr for small tau (tau.size(-1) <= 32)
// using MAGMA, however it fails on Windows because of some illegal memory reads inside MAGMA.
// See discussions in https://github.com/pytorch/pytorch/pull/51348 for comparison of cuSOLVER-MAGMA
// and Windows failure.
// For reference here is the MAGMA-based implementation: https://gist.github.com/IvanYashchuk/2db50002c9d3c1462ff769e6410ad983
#if defined(USE_CUSOLVER)
return orgqr_helper_cusolver(result, tau); // cusolver
#else
TORCH_CHECK(false, "Calling torch.orgqr on a CUDA tensor requires compiling ",
"PyTorch with cuSOLVER. Please use PyTorch built with cuSOLVER support.");
#endif
}
REGISTER_DISPATCH(orgqr_stub, &orgqr_kernel_impl);
void ormqr_kernel(const Tensor& input, const Tensor& tau, const Tensor& other, bool left, bool transpose) {
#if defined(USE_CUSOLVER)
ormqr_cusolver(input, tau, other, left, transpose);
#else
TORCH_CHECK(false,
"Calling torch.ormqr on a CUDA tensor requires compiling ",
"PyTorch with cuSOLVER. Please use PyTorch built with cuSOLVER support.");
#endif
}
REGISTER_DISPATCH(ormqr_stub, &ormqr_kernel);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ qr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_geqrf(const Tensor& input, const Tensor& tau) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.geqrf on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. Please use PyTorch built with MAGMA support.");
#else
magma_int_t m = magma_int_cast(input.size(-2), "m");
magma_int_t n = magma_int_cast(input.size(-1), "n");
auto input_data = input.data_ptr<scalar_t>();
auto input_matrix_stride = matrixStride(input);
auto tau_stride = tau.size(-1);
auto batch_size = batchCount(input);
auto lda = std::max<int>(1, m);
// magmaGeqrf uses a hybrid CPU-GPU algorithm to compute the elementary reflectors.
// The driver routine geqrf2_gpu accepts a tensor on the CPU for elementary reflectors.
Tensor tau_cpu = at::empty(tau.sizes(), tau.options().device(at::kCPU).pinned_memory(true));
scalar_t* tau_data = tau_cpu.data_ptr<scalar_t>();
scalar_t* work_data = nullptr; // workspace is not needed for geqrf2_gpu
magma_int_t info = 0;
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* input_working_ptr = &input_data[i * input_matrix_stride];
scalar_t* tau_working_ptr = &tau_data[i * tau_stride];
// now compute the actual QR and tau
// MAGMA's geqrf2_gpu function is used, this version has LAPACK-complaint arguments.
magmaGeqrf<scalar_t>(m, n, input_working_ptr, lda, tau_working_ptr, work_data, &info, /*is_v2=*/true);
checkMagmaInternalError(info, "geqrf");
}
tau.copy_(tau_cpu, /*non_blocking=*/true);
#endif
}
// This is a type dispatching helper function for 'apply_geqrf'
void geqrf_magma(const Tensor& input, const Tensor& tau) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "geqrf_magma", [&]{
apply_geqrf<scalar_t>(input, tau);
});
}
// This is a backend library dispatching helper function for calling looped batch implementation
void geqrf_looped(const Tensor& input, const Tensor& tau) {
#if defined(USE_CUSOLVER)
return geqrf_cusolver(input, tau);
#else
return geqrf_magma(input, tau);
#endif
}
// This is a backend library dispatching helper function for calling specialized batched implementation
void geqrf_batched(const Tensor& input, const Tensor& tau) {
#ifdef CUDART_VERSION
// if cuBLAS is available
return geqrf_batched_cublas(input, tau);
#else
// TODO: implement MAGMA-based path using magma_zgeqrf_expert_batched
return geqrf_looped(input, tau);
#endif
}
void geqrf_kernel(const Tensor& input, const Tensor& tau) {
// if number of rows is smaller than 32 batched is always faster for batch size > 1
// for larger number of rows number of batches condition
if (input.size(-2) <= 256 && batchCount(input) >= std::max<int64_t>(2, input.size(-2) / 16)) {
return geqrf_batched(input, tau);
} else {
return geqrf_looped(input, tau);
}
}
REGISTER_DISPATCH(geqrf_stub, &geqrf_kernel);
template <typename scalar_t>
static void apply_qr(Tensor& Q, Tensor& R, int64_t q_size_minus_2, int64_t r_size_minus_1, int64_t n_columns,
bool compute_q) {
#ifndef USE_MAGMA
AT_ERROR("qr: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_int_t m = magma_int_cast(q_size_minus_2, "Q.size(-2)");
magma_int_t n = magma_int_cast(r_size_minus_1, "R.size(-1)");
auto r_data = R.data_ptr<scalar_t>();
auto r_matrix_stride = matrixStride(R);
magma_int_t k = m < n ? m : n;
magma_int_t nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n);
int64_t batch_size = batchCount(R);
// magmaGeqrf uses a hybrid CPU-GPU algorithm to compute the elementary reflectors.
// The driver routine magma_(d/s)geqrf2_gpu accepts a tensor on the CPU for elementary reflectors.
Tensor tau = at::empty({k}, Q.options().device(at::kCPU));
Tensor work = at::empty({(2 * k + magma_roundup(n, 32)) * nb}, R.options());
scalar_t* tau_data = tau.data_ptr<scalar_t>();
scalar_t* work_data = work.data_ptr<scalar_t>();
// This phase computes R (the raw version)
// This uses MAGMA's ?geqrf2_gpu function
magma_int_t info = 0;
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* r_working_ptr = &r_data[i * r_matrix_stride];
magmaGeqrf<scalar_t>(m, n, r_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/true);
checkMagmaInternalError(info, "geqrf");
}
if (!compute_q) {
// this is for mode='r'
return;
}
// This phase computes Q (the raw version)
// We require to perform ?geqrf_gpu again due to this bug in MAGMA:
// - ?geqrf_gpu allows fast computation of Q via ?orgqr_gpu, but doesn't give R properly.
// - ?geqrf2_gpu gives correct R, but doesn't allow computation of Q via ?orgqr_gpu
// Refer to the below link for more details:
// http://icl.cs.utk.edu/magma/forum/viewtopic.php?f=2&t=1015&p=2800&hilit=geqrf_gpu#p2800
auto q_data = Q.data_ptr<scalar_t>();
auto q_matrix_stride = matrixStride(Q);
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* q_working_ptr = &q_data[i * q_matrix_stride];
magmaGeqrf<scalar_t>(m, n, q_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/false);
checkMagmaInternalError(info, "geqrf");
magmaOrgqr<scalar_t>(m, n_columns, k, q_working_ptr, m, tau_data, work_data, nb, &info);
checkMagmaInternalError(info, "orgqr");
}
#endif
}
std::tuple<Tensor, Tensor> linalg_qr_helper_magma(const Tensor& self, c10::string_view mode) {
bool compute_q, reduced;
std::tie(compute_q, reduced) = _parse_qr_mode(mode);
// Setup input geometry and inputs for apply_qr
std::vector<int64_t> q_sizes, q_strides;
int64_t n_columns_q;
std::tie(q_sizes, q_strides, n_columns_q) = _compute_geometry_for_Q(self, reduced);
Tensor q_working_copy, r_working_copy;
// If there are no elements, then we simply return a pair of tensors of required dimensions
if (self.numel() == 0) {
int64_t n = self.size(-1);
auto r_shape = self.sizes().vec();
r_shape.end()[-2] = n_columns_q;
r_shape.end()[-1] = n;
r_working_copy = at::empty(r_shape, self.options());
if (compute_q) {
auto q_shape = q_sizes;
q_shape.end()[-1] = n_columns_q;
q_working_copy = at::zeros(q_shape, self.options());
q_working_copy.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1);
} else {
q_working_copy = at::empty({0}, self.options());
}
return std::make_tuple(q_working_copy, r_working_copy);
}
if (compute_q) {
q_working_copy = at::empty_strided(q_sizes, q_strides, self.options());
q_working_copy.narrow(-1, 0, self.size(-1)).copy_(self);
} else {
q_working_copy = at::empty({0}, self.options());
}
r_working_copy = cloneBatchedColumnMajor(self);
int64_t m = q_sizes[self.dim() - 2];
int64_t n = r_working_copy.size(-1);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "qr_cuda", [&]{
apply_qr<scalar_t>(q_working_copy, r_working_copy, m, n, n_columns_q, compute_q);
});
if (compute_q) {
q_working_copy = q_working_copy.narrow(-1, 0, n_columns_q);
}
r_working_copy = r_working_copy.narrow(-2, 0, n_columns_q).triu();
return std::make_tuple(q_working_copy, r_working_copy);
}
std::tuple<Tensor, Tensor> _linalg_qr_helper_cuda(const Tensor& input, c10::string_view mode) {
#if defined(USE_CUSOLVER)
// _linalg_qr_helper_default is a generic function that is implemented using
// geqrf_stub and orgqr_stub. It dispatches to cuSOLVER for CUDA inputs if USE_CUSOLVER is defined
return _linalg_qr_helper_default(input, mode);
#else
return linalg_qr_helper_magma(input, mode);
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ symeig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_magma_eigh(Tensor& values, Tensor& vectors, Tensor& infos, bool upper, bool compute_eigenvectors) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.linalg.eigh/eigvalsh on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. Please use PyTorch built with MAGMA support.");
#else
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(values.device() == kCPU);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.device() == kCPU);
using value_t = typename c10::scalar_value_type<scalar_t>::type;
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_vec_t jobz = compute_eigenvectors ? MagmaVec : MagmaNoVec;
magma_int_t n = magma_int_cast(vectors.size(-1), "n");
auto lda = std::max<magma_int_t>(1, n);
auto batch_size = batchCount(vectors);
auto vectors_stride = matrixStride(vectors);
auto values_stride = values.size(-1);
auto vectors_data = vectors.data_ptr<scalar_t>();
auto values_data = values.data_ptr<value_t>();
auto infos_data = infos.data_ptr<magma_int_t>();
scalar_t* wA;
ALLOCATE_ARRAY(wA, scalar_t, lda * lda);
// Run once, first to get the optimum work sizes.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt;
magma_int_t liwork = -1;
magma_int_t iwkopt;
magma_int_t lrwork = -1;
value_t rwkopt;
magmaSyevd<scalar_t, value_t>(jobz, uplo, n, vectors_data, lda, values_data,
wA, lda, &wkopt, lwork, &rwkopt, lrwork, &iwkopt, liwork, infos_data);
scalar_t* work;
magma_int_t* iwork;
lwork = magma_int_cast(std::max<int64_t>(1, real_impl<scalar_t, value_t>(wkopt)), "work_size");
liwork = magma_int_cast(std::max<int64_t>(1, iwkopt), "iwork_size");
ALLOCATE_ARRAY(work, scalar_t, lwork);
ALLOCATE_ARRAY(iwork, magma_int_t, liwork);
value_t* rwork = nullptr;
c10::Storage storage_rwork;
if (vectors.is_complex()) {
lrwork = magma_int_cast(std::max<int64_t>(1, rwkopt), "rwork_size");
storage_rwork = pin_memory<value_t>(lrwork);
rwork = static_cast<value_t*>(storage_rwork.data());
}
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* vectors_working_ptr = &vectors_data[i * vectors_stride];
value_t* values_working_ptr = &values_data[i * values_stride];
magma_int_t* info_working_ptr = &infos_data[i];
magmaSyevd<scalar_t, value_t>(jobz, uplo, n, vectors_working_ptr, lda, values_working_ptr,
wA, lda, work, lwork, rwork, lrwork, iwork, liwork, info_working_ptr);
// The current behaviour for Linear Algebra functions to raise an error if something goes wrong
// or input doesn't satisfy some requirement
// therefore return early since further computations will be wasted anyway
if (*info_working_ptr != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor> _symeig_helper_cuda(const Tensor& self, bool eigenvectors, bool upper) {
Tensor infos = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt).device(at::kCPU));
auto eigvals_shape = IntArrayRef(self.sizes().data(), self.dim()-1); // self.shape[:-1]
ScalarType real_dtype = toValueType(self.scalar_type());
// magmaSyevd uses a hybrid CPU-GPU algorithm to compute the eigenvalues and eigenvectors.
// The driver routine magma_(d/s)syev_gpu accepts a tensor on the CPU for eigvalenvalues.
// The data is later moved to the appropriate device.
// In the case where self.numel() == 0, we just return an empty tensor of
// dimensions on the CUDA (to avoid the unnecessary "to(at::kCUDA)")
auto eigvals_working_copy = self.numel() == 0
? at::empty(eigvals_shape, self.options().dtype(real_dtype))
: at::empty(eigvals_shape, self.options().dtype(real_dtype).device(at::kCPU));
if (self.numel() == 0) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy, at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT));
}
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "symeig_cuda", [&]{
apply_magma_eigh<scalar_t>(eigvals_working_copy, self_working_copy, infos, upper, eigenvectors);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "symeig_cuda");
} else {
singleCheckErrors(infos.item().toInt(), "symeig_cuda");
}
if (eigenvectors) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), self_working_copy);
} else {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), at::empty({0}, self.options()));
}
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ linalg_eigh ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// This is a type dispatch function for 'apply_magma_eigh'
// For small inputs result is computed on CPU
void linalg_eigh_magma(Tensor& eigenvalues, Tensor& eigenvectors, Tensor& infos, bool upper, bool compute_eigenvectors) {
// MAGMA just calls LAPACK for eigenvectors.size(-1) <= 128
// See https://bitbucket.org/icl/magma/src/e6fdca447bd402693e8b0b950a898b6879bbcc41/src/zheevd_gpu.cpp?at=master#lines-258
// in addition lda is ignored breaking 0x0 inputs
if (eigenvectors.size(-1) > 128) {
// MAGMA requires eigenvalues and infos tensors to reside on CPU
Tensor eigenvalues_cpu = eigenvalues.to(kCPU);
infos = infos.to(kCPU);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(
eigenvectors.scalar_type(), "linalg_eigh_cpu", [&] {
apply_magma_eigh<scalar_t>(
eigenvalues_cpu, eigenvectors, infos, upper, compute_eigenvectors);
});
// Transfer computed by MAGMA results from CPU to GPU
eigenvalues.copy_(eigenvalues_cpu);
} else { // eigenvectors.size(-1) <= 128
// transfer to CPU, compute the result and copy back to GPU
// this is faster than going through MAGMA that does the same
Tensor eigenvalues_cpu = at::empty_like(eigenvalues, eigenvalues.options().device(kCPU));
if (compute_eigenvectors) {
Tensor eigenvectors_cpu = at::empty_like(eigenvectors, eigenvectors.options().device(kCPU));
at::linalg_eigh_out(eigenvalues_cpu, eigenvectors_cpu, eigenvectors.to(kCPU), upper ? "U" : "L");
eigenvectors.copy_(eigenvectors_cpu);
} else {
at::linalg_eigvalsh_out(eigenvalues_cpu, eigenvectors.to(kCPU), upper ? "U" : "L");
}
eigenvalues.copy_(eigenvalues_cpu);
}
}
void linalg_eigh_kernel(Tensor& eigenvalues, Tensor& eigenvectors, Tensor& infos, bool upper, bool compute_eigenvectors) {
#if defined(USE_CUSOLVER)
linalg_eigh_cusolver(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors);
#else
linalg_eigh_magma(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors);
#endif
}
REGISTER_DISPATCH(linalg_eigh_stub, &linalg_eigh_kernel);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ eig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// magmaEig uses a hybrid CPU-GPU algorithm, which takes and return CPU
// memory. So, we accept a GPU tensor, copy it to CPU memory, and later copy
// the returned values from CPU to GPU. See also magmaSymeig, which uses a
// similar approach.
template <typename scalar_t>
static void apply_eig(const Tensor& self, bool eigenvectors, Tensor& out_eigvals, Tensor& out_eigvecs,
int64_t *info_ptr) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "Calling torch.eig on a CUDA tensor requires compiling PyTorch with MAGMA. "
"Either transfer the tensor to the CPU before calling torch.eig or recompile with MAGMA.");
#else
TORCH_INTERNAL_ASSERT(self.device() == at::kCPU, "Internal error: apply_eig needs a CPU tensor");
using value_t = typename c10::scalar_value_type<scalar_t>::type;
magma_vec_t jobvr = eigenvectors ? MagmaVec : MagmaNoVec;
magma_int_t n = magma_int_cast(self.size(-1), "n");
auto self_data = self.data_ptr<scalar_t>();
auto out_eigvals_data = out_eigvals.data_ptr<scalar_t>();
scalar_t *wr = out_eigvals_data;
scalar_t *vr_data = NULL;
magma_int_t ldvr = 1;
if (jobvr == MagmaVec)
{
vr_data = out_eigvecs.data_ptr<scalar_t>();
ldvr = n;
}
value_t *rwork_data = nullptr;
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
ALLOCATE_ARRAY(rwork_data, value_t, n*2);
}
if (n > 0) {
// call magmaEig once to get the optimal size of work_data
scalar_t wkopt;
magma_int_t info;
magmaEig<scalar_t, value_t>(MagmaNoVec, jobvr, n, self_data, n, wr, NULL, 1, vr_data, ldvr, &wkopt, -1, rwork_data, &info);
magma_int_t lwork = static_cast<magma_int_t>(real_impl<scalar_t, value_t>(wkopt));
// call it a 2nd time to to the actual work
scalar_t *work_data = nullptr;
ALLOCATE_ARRAY(work_data, scalar_t, lwork);
magmaEig<scalar_t, value_t>(MagmaNoVec, jobvr, n, self_data, n, wr, NULL, 1, vr_data, ldvr, work_data, lwork, rwork_data, &info);
*info_ptr = info;
}
#endif
}
/*
* Internal helper; like eig_cuda but:
* 1. assume that self is a square matrix of side "n"
* 2. return CPU tensors (because this is what magmaEig returns), which will be copied to GPU memory
* by the caller
*/
std::tuple<Tensor, Tensor> eig_kernel_impl(const Tensor& self, bool& eigenvectors) {
int64_t n = self.size(-1);
// copy self to pinned CPU memory
auto self_working_copy = at::empty_strided(
{n, n}, // square matrix
{1, n}, // column-ordered, as magmaEig expects
at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true));
self_working_copy.copy_(self);
// tensors holding the results. We use empty_strided to make them column-ordered
auto options = self.options().device(at::kCPU).memory_format(LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor out_eigvals;
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
out_eigvals = at::empty({n}, options);
} else {
out_eigvals = at::empty_strided({n, 2}, {1, n}, options);
}
auto out_eigvecs = eigenvectors
? at::empty_strided({n, n}, {1, n}, options)
: Tensor();
int64_t info;
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "eig_cuda", [&]{
apply_eig<scalar_t>(self_working_copy, eigenvectors, out_eigvals, out_eigvecs, &info);
});
singleCheckErrors(info, "eig_cuda");
return std::tuple<Tensor, Tensor>(out_eigvals, out_eigvecs);
}
REGISTER_DISPATCH(eig_stub, &eig_kernel_impl);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ linalg_eig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Computes the eigenvalues and eigenvectors of n-by-n matrix 'input'.
This is an in-place routine, content of 'input', 'values', 'vectors' is overwritten.
'infos' is an int Tensor containing error codes for each matrix in the batched input.
For more information see MAGMA's documentation for GEEV routine.
*/
template <typename scalar_t>
void apply_linalg_eig(Tensor& values, Tensor& vectors, Tensor& input, Tensor& infos, bool compute_eigenvectors) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "Calling torch.linalg.eig on a CUDA tensor requires compiling PyTorch with MAGMA. "
"Either transfer the tensor to the CPU before calling torch.linalg.eig or recompile with MAGMA.");
#else
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(input.device() == at::kCPU);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(values.device() == at::kCPU);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.device() == at::kCPU);
if (compute_eigenvectors) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(vectors.device() == at::kCPU);
}
using value_t = typename c10::scalar_value_type<scalar_t>::type;
magma_vec_t jobvr = compute_eigenvectors ? MagmaVec : MagmaNoVec;
magma_vec_t jobvl = MagmaNoVec; // only right eigenvectors are computed
magma_int_t n = magma_int_cast(input.size(-1), "n");
auto lda = std::max<magma_int_t>(1, n);
auto batch_size = batchCount(input);
auto input_matrix_stride = matrixStride(input);
auto values_stride = values.size(-1);
auto input_data = input.data_ptr<scalar_t>();
auto values_data = values.data_ptr<scalar_t>();
auto infos_data = infos.data_ptr<magma_int_t>();
auto rvectors_data = compute_eigenvectors ? vectors.data_ptr<scalar_t>() : nullptr;
scalar_t* lvectors_data = nullptr; // only right eigenvectors are computed
int64_t ldvr = compute_eigenvectors ? lda : 1;
int64_t ldvl = 1;
Tensor rwork;
value_t* rwork_data = nullptr;
if (input.is_complex()) {
ScalarType real_dtype = toValueType(input.scalar_type());
rwork = at::empty({lda * 2}, input.options().dtype(real_dtype));
rwork_data = rwork.data_ptr<value_t>();
}
// call magmaEig once to get the optimal size of work_data
scalar_t work_query;
magmaEig<scalar_t, value_t>(jobvl, jobvr, n, input_data, lda, values_data,
lvectors_data, ldvl, rvectors_data, ldvr, &work_query, -1, rwork_data, &infos_data[0]);
magma_int_t lwork = std::max<magma_int_t>(1, static_cast<magma_int_t>(real_impl<scalar_t, value_t>(work_query)));
Tensor work = at::empty({lwork}, input.dtype());
auto work_data = work.data_ptr<scalar_t>();
for (auto i = decltype(batch_size){0}; i < batch_size; i++) {
scalar_t* input_working_ptr = &input_data[i * input_matrix_stride];
scalar_t* values_working_ptr = &values_data[i * values_stride];
scalar_t* rvectors_working_ptr = compute_eigenvectors ? &rvectors_data[i * input_matrix_stride] : nullptr;
int* info_working_ptr = &infos_data[i];
magmaEig<scalar_t, value_t>(jobvl, jobvr, n, input_working_ptr, lda, values_working_ptr,
lvectors_data, ldvl, rvectors_working_ptr, ldvr, work_data, lwork, rwork_data, info_working_ptr);
}
#endif
}
// This is a type dispatching helper function for 'apply_linalg_eig'
void linalg_eig_kernel(Tensor& eigenvalues, Tensor& eigenvectors, Tensor& infos, const Tensor& input, bool compute_eigenvectors) {
// This function calculates the non-symmetric eigendecomposition in-place
// tensors should be in batched column major memory format
// the content of eigenvalues, eigenvectors and infos is overwritten by 'apply_linalg_eig'
// apply_linalg_eig modifies the provided input matrix in-place, therefore we need a copy
// MAGMA doesn't have GPU interface for the eigendecomposition and it forces us to transfer 'input' to CPU
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(input.is_cuda());
Tensor input_working_copy = at::empty(input.sizes(), input.options().device(kCPU));
input_working_copy.transpose_(-2, -1); // make input_working_copy to have Fortran contiguous memory layout
input_working_copy.copy_(input);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "linalg_eig_out_cuda", [&]{
apply_linalg_eig<scalar_t>(eigenvalues, eigenvectors, input_working_copy, infos, compute_eigenvectors);
});
}
REGISTER_DISPATCH(linalg_eig_stub, &linalg_eig_kernel);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ svd ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template<typename scalar_t>
static void apply_svd(Tensor& self, Tensor& U, Tensor& S, Tensor& VT,
char jobchar, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("svd: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto U_data = U.data_ptr<scalar_t>();
auto S_data = S.data_ptr<value_t>();
auto VT_data = VT.data_ptr<scalar_t>();
auto self_stride = matrixStride(self);
auto U_stride = matrixStride(U);
auto S_stride = S.size(-1);
auto VT_stride = matrixStride(VT);
auto batchsize = batchCount(self);
magma_vec_t jobz = jobchar == 'A' ? MagmaAllVec : (jobchar == 'S' ? MagmaSomeVec : MagmaNoVec);
magma_int_t m = magma_int_cast(self.size(-2), "m");
magma_int_t n = magma_int_cast(self.size(-1), "n");
auto lda = std::max<magma_int_t>(1, m);
auto ldvt = std::max<magma_int_t>(1, n);
auto mn = ::min(m, n);
c10::Storage storage_rwork;
value_t* rwork = nullptr;
magma_int_t* iwork;
ALLOCATE_ARRAY(iwork, magma_int_t, 8 * mn);
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
auto lrwork = computeLRWorkDim(jobchar, m, n);
storage_rwork = pin_memory<value_t>(lrwork);
rwork = static_cast<value_t*>(storage_rwork.data());
}
magma_int_t info = 0;
// Run once, first to get the optimum work size.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt = 1; // MAGMA might not set the value for the optimal workspace therefore use 1 as the default value
magmaSvd<scalar_t, value_t>(jobz, m, n, self_data, lda, S_data, U_data, lda, VT_data, ldvt, &wkopt, lwork, rwork, iwork, &info);
lwork = magma_int_cast(real_impl<scalar_t, value_t>(wkopt), "work_size");
scalar_t* work;
ALLOCATE_ARRAY(work, scalar_t, lwork);
for (int64_t i = 0; i < batchsize; i++) {
scalar_t* self_working_ptr = &self_data[i * self_stride];
value_t* S_working_ptr = &S_data[i * S_stride];
scalar_t* U_working_ptr = &U_data[i * U_stride];
scalar_t* VT_working_ptr = &VT_data[i * VT_stride];
// Compute S, U (optionally), VT (optionally)
magmaSvd<scalar_t, value_t>(jobz, m, n, self_working_ptr, lda,
S_working_ptr, U_working_ptr, lda, VT_working_ptr, ldvt, work, lwork, rwork, iwork, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda_legacy(const Tensor& self, bool some, bool compute_uv) {
std::vector<int64_t> infos(batchCount(self), 0);
int64_t m = self.size(-2), n = self.size(-1);
int64_t k = ::min(m, n);
char jobchar = compute_uv ? (some ? 'S' : 'A') : 'N';
Tensor U_working_copy, S_working_copy, VT_working_copy;
std::tie(U_working_copy, S_working_copy, VT_working_copy) = _create_U_S_VT(self, some, compute_uv);
// The input matrix, U, S and VT have to reside in pinned memory.
// Additionally, the input and U have to be in column major format.
// _create_U_S_VT takes care of a part of these requirements (for U, S and VT)
// For the input matrix, this requirements are being taken care of below.
// Specify strides
auto self_col_major_strides = at::detail::defaultStrides(self.sizes());
self_col_major_strides[self.dim() - 2] = 1;
self_col_major_strides[self.dim() - 1] = m;
// Create strided tensor in pinned memory
auto self_working_copy = at::empty_strided(self.sizes(), self_col_major_strides,
at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true));
self_working_copy.copy_(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "svd_cuda", [&] {
apply_svd<scalar_t>(self_working_copy, U_working_copy, S_working_copy, VT_working_copy, jobchar, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "svd_cuda");
} else {
singleCheckErrors(infos[0], "svd_cuda");
}
U_working_copy = same_stride_to(U_working_copy, self.options());
S_working_copy = same_stride_to(S_working_copy, S_working_copy.options().device(self.device()));
VT_working_copy = same_stride_to(VT_working_copy, self.options());
if (!compute_uv) {
VT_working_copy.zero_();
U_working_copy.zero_();
}
if (some) {
VT_working_copy = VT_working_copy.narrow(-2, 0, k);
}
// so far we have computed VT, but torch.svd returns V instead. Adjust accordingly.
// Note that the 'apply_svd' routine returns VT = V^T (for real inputs) or VT = V^H (for complex inputs), not V.
VT_working_copy = VT_working_copy.conj();
VT_working_copy.transpose_(-2, -1);
return std::make_tuple(U_working_copy, S_working_copy, VT_working_copy);
}
std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda(const Tensor& self, bool some, bool compute_uv) {
#ifdef USE_CUSOLVER
return _svd_helper_cuda_lib(self, some, compute_uv);
#else
return _svd_helper_cuda_legacy(self, some, compute_uv);
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Solves the matrix equation A X = B
X and B are n-by-nrhs matrices, A is represented using the LU factorization.
This is an in-place routine, content of `b` is overwritten.
This is a "looped" variant for calling single input MAGMA function on batched input.
Args:
* `b` - [in] the right hand side matrix B
[out] the solution matrix X
* `lu` - [in] the LU factorization of matrix A (see at::_lu_with_info)
* `pivots` - [in] the pivot indices (see at::_lu_with_info)
For further details, please see the MAGMA documentation for magma_dgetrs_gpu.
*/
template <typename scalar_t>
static void apply_lu_solve_looped_magma(const Tensor& b, const Tensor& lu, const Tensor& pivots) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.lu_solve on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. lease rebuild with MAGMA.");
#else
auto b_data = b.data_ptr<scalar_t>();
auto lu_data = lu.data_ptr<scalar_t>();
// MAGMA requires pivots to be a CPU tensor
Tensor pivots_cpu = pivots.cpu();
auto pivots_data = pivots_cpu.data_ptr<magma_int_t>();
auto b_stride = matrixStride(b);
auto lu_stride = matrixStride(lu);
auto pivots_stride = pivots_cpu.size(-1);
auto batch_size = batchCount(b);
magma_int_t n = magma_int_cast(lu.size(-2), "n");
magma_int_t nrhs = magma_int_cast(b.size(-1), "nrhs");
auto leading_dimension = std::max<magma_int_t>(1, n);
int info = 0;
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* b_working_ptr = &b_data[i * b_stride];
scalar_t* lu_working_ptr = &lu_data[i * lu_stride];
int* pivots_working_ptr = &pivots_data[i * pivots_stride];
magmaLuSolve<scalar_t>(n, nrhs, lu_working_ptr, leading_dimension, pivots_working_ptr, b_working_ptr, leading_dimension, &info);
// info from magmaLuSolve only reports if the i-th parameter is wrong
// so we don't need to check it all the time
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(info == 0);
}
#endif
}
/*
Solves the matrix equation A X = B
X and B are n-by-nrhs matrices, A is represented using the LU factorization.
This is an in-place routine, content of `b` is overwritten.
This is a specialized batched variant, it is expected to be faster than the "looped" version only for small inputs.
Args:
* `b` - [in] the right hand side matrix B
[out] the solution matrix X
* `lu` - [in] the LU factorization of matrix A (see at::_lu_with_info)
* `pivots` - [in] the pivot indices (see at::_lu_with_info)
For further details, please see the MAGMA documentation for magma_dgetrs_batched.
*/
template <typename scalar_t>
static void apply_lu_solve_batched_magma(const Tensor& b, const Tensor& lu, const Tensor& pivots) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.lu_solve on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. lease rebuild with MAGMA.");
#else
auto b_data = b.data_ptr<scalar_t>();
auto lu_data = lu.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(lu.size(-2), "n");
magma_int_t nrhs = magma_int_cast(b.size(-1), "nrhs");
auto leading_dimension = std::max<magma_int_t>(1, n);
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto b_stride = matrixStride(b);
auto lu_stride = matrixStride(lu);
auto pivots_stride = pivots.size(-1);
magma_int_t batch_size = magma_int_cast(batchCount(b), "batchCount");
magma_int_t** pivots_array;
scalar_t** lu_array;
scalar_t** b_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(lu_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_stride];
b_array[i] = &b_data[i * b_stride];
lu_array[i] = &lu_data[i * lu_stride];
}
MAGMAQueue magma_queue(b.get_device());
// Compute the result in batches of 65535
// that is the maximum allowed number for batch_size in MAGMA
constexpr int64_t batch_limit = 65535;
for (int64_t mini_idx = 0; mini_idx < batch_size; mini_idx += batch_limit) {
int64_t nbatches = ::min(batch_limit, batch_size - mini_idx);
scalar_t** lu_array_cur = &lu_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** pivots_array_cur = &pivots_array[mini_idx];
int info;
magmaLuSolveBatched<scalar_t>(
n, nrhs, lu_array_cur, leading_dimension,
pivots_array_cur, b_array_cur, leading_dimension,
info, nbatches, magma_queue);
// info from magmaLuSolveBatched only reports if the i-th parameter is wrong
// so we don't need to check it all the time
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(info == 0);
}
#endif
}
static void lu_solve_magma(const Tensor& b, const Tensor& lu, const Tensor& pivots) {
// TODO: compare performance and use the best performing option based on lu's sizes
if (b.dim() == 2) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(b.scalar_type(), "lu_solve_magma", [&]{
apply_lu_solve_looped_magma<scalar_t>(b, lu, pivots);
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(b.scalar_type(), "lu_solve_magma", [&]{
apply_lu_solve_batched_magma<scalar_t>(b, lu, pivots);
});
}
}
REGISTER_DISPATCH(lu_solve_stub, &lu_solve_magma);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lstsq ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_gels(const Tensor& a, Tensor& b, Tensor& infos) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "torch.linalg.lstsq: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto trans = MagmaNoTrans;
auto m = magma_int_cast(a.size(-2), "m");
auto n = magma_int_cast(a.size(-1), "n");
TORCH_CHECK(
m >= n,
"torch.linalg.lstsq: only overdetermined systems (input.size(-2) >= input.size(-1)) are allowed on CUDA");
auto nrhs = magma_int_cast(b.size(-1), "nrhs");
auto ldda = std::max<magma_int_t>(1, m);
auto lddb = std::max<magma_int_t>(1, ::max(m, n));
auto nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n);
auto lwork = (m - n + nb) * (nrhs + nb) + nrhs * nb;
Tensor hwork = at::empty({static_cast<int64_t>(lwork)}, a.scalar_type());
auto* hwork_ptr = hwork.data_ptr<scalar_t>();
// MAGMA requires infos tensor to live on CPU
infos = infos.to(at::kCPU);
auto infos_data = infos.data_ptr<magma_int_t>();
batch_iterator_with_broadcasting<scalar_t>(a, b,
[&](scalar_t* a_working_ptr, scalar_t* b_working_ptr,
int64_t a_linear_batch_idx) {
magma_int_t* infos_working_ptr = &infos_data[a_linear_batch_idx];
magmaGels<scalar_t>(trans, m, n, nrhs,
a_working_ptr, ldda, b_working_ptr, lddb,
hwork_ptr, lwork, infos_working_ptr);
}
);
#endif
}
void gels_magma(const Tensor& a, Tensor& b, Tensor& infos) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(a.scalar_type(), "gels_magma", [&] {
apply_gels<scalar_t>(a, b, infos);
});
}
void linalg_lstsq_gels(const Tensor& A, const Tensor& B, const Tensor& infos) {
// The steps for using the QR decomposition for solving least squares problems
// are outlined here https://en.wikipedia.org/wiki/QR_decomposition#Using_for_solution_to_linear_inverse_problems
auto m = A.size(-2);
auto n = A.size(-1);
auto mn = ::min(m, n);
// explicitly broadcast the batch dimensions of A
// TODO: revisit this later to use batch_iterator_with_broadcasting in triangular_solve
IntArrayRef A_batch_sizes(A.sizes().data(), A.dim() - 2);
IntArrayRef B_batch_sizes(B.sizes().data(), B.dim() - 2);
std::vector<int64_t> expand_batch_portion = at::infer_size(A_batch_sizes, B_batch_sizes);
auto tau_shape = A.sizes().vec();
tau_shape.pop_back();
tau_shape.back() = mn;
Tensor tau = at::empty(tau_shape, A.options());
if (m >= n) {
// Step 1: compute QR factorization using geqrf
geqrf_kernel(A, tau);
// explicitly broadcast the batch dimensions of A
// we do it after geqrf so that we don't do redundant computations for the same input
auto A_expand_batch = expand_batch_portion;
A_expand_batch.insert(A_expand_batch.end(), {A.size(-2), A.size(-1)});
Tensor A_expanded = A.expand({A_expand_batch});
bool is_fortran_contiguous = A_expanded.transpose(-2, -1).is_contiguous();
Tensor A_broadcasted = is_fortran_contiguous ? A_expanded : cloneBatchedColumnMajor(A_expanded);
auto tau_expand_batch = expand_batch_portion;
tau_expand_batch.push_back(tau.size(-1));
Tensor tau_broadcasted = tau.expand({tau_expand_batch}).contiguous();
// Step 2: B <- Q^H B
ormqr_kernel(A_broadcasted, tau_broadcasted, B, /*left=*/true, /*transpose=*/true);
// Step 3: solve R X = B
bool upper = true;
bool transpose = false;
bool conjugate_transpose = false;
bool unitriangular = false;
triangular_solve_kernel(
const_cast<Tensor&>(A_broadcasted),
const_cast<Tensor&>(B),
const_cast<Tensor&>(infos),
upper, transpose, conjugate_transpose, unitriangular);
} else { // underdetermined case
Tensor Ah = cloneBatchedColumnMajor(A.conj().transpose(-2, -1));
// Step 1: compute QR factorization of conjugate transpose of A using geqrf
geqrf_kernel(Ah, tau);
// explicitly broadcast the batch dimensions of A
// we do it after geqrf so that we don't do redundant computations for the same input
auto A_expand_batch = expand_batch_portion;
A_expand_batch.insert(A_expand_batch.end(), {Ah.size(-2), Ah.size(-1)});
Tensor Ah_expanded = Ah.expand({A_expand_batch});
bool is_fortran_contiguous = Ah_expanded.transpose(-2, -1).is_contiguous();
Tensor Ah_broadcasted = is_fortran_contiguous ? Ah_expanded : cloneBatchedColumnMajor(Ah_expanded);
// Step 2: R^H Z = B
bool upper = true;
bool transpose = true;
bool conjugate_transpose = true;
bool unitriangular = false;
triangular_solve_kernel(
const_cast<Tensor&>(Ah_broadcasted),
const_cast<Tensor&>(B),
const_cast<Tensor&>(infos),
upper, transpose, conjugate_transpose, unitriangular);
// B matrix has the size max(m, n) x nrhs
// triangular_solve_kernel writes its output into the first m rows of B leaving the rest untouched
// we need to set the rest of the rows to zero so that the multiplication from step 3 is correct
B.narrow(-2, m, n - m).zero_();
auto tau_expand_batch = expand_batch_portion;
tau_expand_batch.push_back(tau.size(-1));
Tensor tau_broadcasted = tau.expand({tau_expand_batch}).contiguous();
// Step 3: X <- Q Z
ormqr_kernel(Ah_broadcasted, tau_broadcasted, B, /*left=*/true, /*transpose=*/false);
}
}
void gels_looped(const Tensor& a, Tensor& b, Tensor& infos) {
#if defined(USE_CUSOLVER)
// linalg_lstsq_gels is a generic function that is implemented using
// geqrf_stub, ormqr_stub, and triangular_solve_stub
// It dispatches to cuSOLVER for CUDA inputs if USE_CUSOLVER is defined
return linalg_lstsq_gels(a, b, infos);
#else
return gels_magma(a, b, infos);
#endif
}
void lstsq_kernel(const Tensor& a, Tensor& b, Tensor& /*rank*/, Tensor& /*singular_values*/, Tensor& infos, double /*rcond*/, std::string /*driver_name*/) {
auto m = a.size(-2);
auto n = a.size(-1);
// first handle the underdetermined case (m < n)
// this case is not supported by MAGMA or cuBLAS
if (m < n) {
#if defined(USE_CUSOLVER)
linalg_lstsq_gels(a, b, infos);
#else
TORCH_CHECK(
false,
"torch.linalg.lstsq: only overdetermined systems (input.size(-2) >= input.size(-1)) are allowed on CUDA. ",
"Please rebuild with cuSOLVER.");
#endif
} else { // m >= n
#ifndef USE_MAGMA
// MAGMA is not available we can either use cuBLAS or cuSOLVER here
// the batched vs looped dispatch is implemented based on the following performance results
// https://github.com/pytorch/pytorch/pull/54725#issuecomment-832234456
if (m <= 256 && batchCount(b) >= std::max<int64_t>(2, m / 16)) {
// if CUDART_VERSION is defined then cuBLAS is available
#ifdef CUDART_VERSION
gels_batched_cublas(a, b, infos);
#else
// this would either call cuSOLVER or MAGMA,
// if MAGMA is called a runtime error is thrown about not finding MAGMA in compilation
gels_looped(a, b, infos);
#endif // CUDART_VERSION
} else {
gels_looped(a, b, infos);
}
#else
// if both MAGMA and cuSOLVER are available this would call cuSOLVER
// MAGMA is called if cuSOLVER is not available
gels_looped(a, b, infos);
#endif // USE_MAGMA
}
}
REGISTER_DISPATCH(lstsq_stub, &lstsq_kernel);
}} // namespace at::native
#undef ALLOCATE_ARRAY
| ac9678c920a98e95c4752722be72958fab1e4901.cu | #include <ATen/Context.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/PinnedMemoryAllocator.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/native/LinearAlgebraUtils.h>
#include <ATen/native/cuda/MiscUtils.h>
#include <ATen/native/Resize.h>
#include <ATen/native/BatchLinearAlgebra.h>
#include <ATen/native/cuda/BatchLinearAlgebraLib.h>
#include <ATen/native/cpu/zmath.h>
#include <THC/THC.h> // for USE_MAGMA
#ifdef USE_MAGMA
#include <magma_types.h>
#include <magma_v2.h>
const bool use_magma_ = true;
#else
const bool use_magma_ = false;
#endif
namespace at {
namespace native {
#ifdef USE_MAGMA
template<class scalar_t>
void magmaSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLu(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info);
template<class scalar_t>
void magmaLuBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLuNoPiv(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* info);
template<class scalar_t>
void magmaLuNoPivBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGetriOptimalBlocksize(magma_int_t n);
template<class scalar_t>
void magmaGetri(
magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dwork,
magma_int_t lwork, magma_int_t* info);
template<class scalar_t>
void magmaGetriBatched(
magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, scalar_t** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholeskySolve(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaCholeskySolveBatched(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholesky(
magma_uplo_t uplo, magma_int_t n, scalar_t* dA,
magma_int_t ldda, magma_int_t* info);
template<class scalar_t>
void magmaCholeskyBatched(
magma_uplo_t uplo, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaTriangularSolveBatched(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
scalar_t** dA_array, magma_int_t ldda, scalar_t** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGeqrfOptimalBlocksize(magma_int_t m, magma_int_t n);
template<class scalar_t>
void magmaGeqrf(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
scalar_t* tau, scalar_t* dT, magma_int_t* info, bool is_v2);
template<class scalar_t>
void magmaOrgqr(
magma_int_t m, magma_int_t n, magma_int_t k, scalar_t* dA,
magma_int_t ldda, scalar_t* tau, scalar_t* dT, magma_int_t nb, magma_int_t* info);
template<class scalar_t, class value_t=scalar_t>
void magmaSyevd(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, scalar_t* dA, magma_int_t ldda,
value_t* w, scalar_t* wA, magma_int_t ldwa, scalar_t* work, magma_int_t lwork, value_t* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info);
template<class scalar_t, class value_t=scalar_t>
void magmaEig(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n, scalar_t *A, magma_int_t lda,
scalar_t *w, scalar_t *VL, magma_int_t ldvl,
scalar_t *VR, magma_int_t ldvr, scalar_t *work, magma_int_t lwork,
value_t *rwork,
magma_int_t *info);
template<class scalar_t, class value_t=scalar_t>
void magmaSvd(
magma_vec_t jobz, magma_int_t m, magma_int_t n, scalar_t* A,
magma_int_t lda, value_t* s, scalar_t* U, magma_int_t ldu,
scalar_t* VT, magma_int_t ldvt, scalar_t* work, magma_int_t lwork,
value_t* rwork,
magma_int_t* iwork, magma_int_t* info);
template<class scalar_t>
void magmaLuSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaLuSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaGels(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
scalar_t* dA, magma_int_t ldda, scalar_t* dB, magma_int_t lddb,
scalar_t* hwork, magma_int_t lwork, magma_int_t* info);
template<>
void magmaSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
magma_int_t* ipiv, double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
magma_int_t* ipiv, float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolve<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* ipiv, c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgesv_gpu(n, nrhs,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolve<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* ipiv, c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgesv_gpu(n, nrhs,
reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv,
reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_dgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_sgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolveBatched<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, c10::complex<double>** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_zgesv_batched(n, nrhs,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, dipiv_array,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolveBatched<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, c10::complex<float>** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_cgesv_batched(n, nrhs,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, dipiv_array,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLu<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLu<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLu<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrf_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLu<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrf_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_dgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_sgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuBatched<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_zgetrf_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuBatched<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_cgetrf_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPiv<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPiv<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPiv<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPiv<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPivBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPivBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPivBatched<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_zgetrf_nopiv_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPivBatched<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_cgetrf_nopiv_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<double>(magma_int_t n) {
return magma_get_dgetri_nb(n);
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<float>(magma_int_t n) {
return magma_get_sgetri_nb(n);
}
template <>
inline magma_int_t magmaGetriOptimalBlocksize<c10::complex<double>>(
magma_int_t n) {
return magma_get_zgetri_nb(n);
}
template <>
inline magma_int_t magmaGetriOptimalBlocksize<c10::complex<float>>(
magma_int_t n) {
return magma_get_cgetri_nb(n);
}
template<>
void magmaGetri<double>(
magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dwork,
magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGetri<float>(
magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dwork,
magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGetri<c10::complex<double>>(
magma_int_t n,
c10::complex<double>* dA,
magma_int_t ldda,
magma_int_t* ipiv,
c10::complex<double>* dwork,
magma_int_t lwork,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetri_gpu(
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
ipiv,
reinterpret_cast<magmaDoubleComplex*>(dwork),
lwork,
info);
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGetri<c10::complex<float>>(
magma_int_t n,
c10::complex<float>* dA,
magma_int_t ldda,
magma_int_t* ipiv,
c10::complex<float>* dwork,
magma_int_t lwork,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetri_gpu(
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
ipiv,
reinterpret_cast<magmaFloatComplex*>(dwork),
lwork,
info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGetriBatched<double>(
magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, double** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGetriBatched<float>(
magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, float** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGetriBatched<c10::complex<double>>(
magma_int_t n,
c10::complex<double>** dA_array,
magma_int_t ldda,
magma_int_t** ipiv_array,
c10::complex<double>** dinvA_array,
magma_int_t lddia,
magma_int_t* info_array,
magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_zgetri_outofplace_batched(
n,
reinterpret_cast<magmaDoubleComplex**>(dA_array),
ldda,
ipiv_array,
reinterpret_cast<magmaDoubleComplex**>(dinvA_array),
lddia,
info_array,
batchsize,
magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGetriBatched<c10::complex<float>>(
magma_int_t n,
c10::complex<float>** dA_array,
magma_int_t ldda,
magma_int_t** ipiv_array,
c10::complex<float>** dinvA_array,
magma_int_t lddia,
magma_int_t* info_array,
magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_cgetri_outofplace_batched(
n,
reinterpret_cast<magmaFloatComplex**>(dA_array),
ldda,
ipiv_array,
reinterpret_cast<magmaFloatComplex**>(dinvA_array),
lddia,
info_array,
batchsize,
magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolve<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dpotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolve<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_spotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolve<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda,
c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zpotrs_gpu(uplo, n, nrhs,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolve<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda,
c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cpotrs_gpu(uplo, n, nrhs,
reinterpret_cast<magmaFloatComplex*>(dA), ldda,
reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolveBatched<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dpotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolveBatched<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_spotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolveBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda,
c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_zpotrs_batched(uplo, n, nrhs,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolveBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda,
c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_cpotrs_batched(uplo, n, nrhs,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<double>(
magma_uplo_t uplo, magma_int_t n, double* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dpotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<float>(
magma_uplo_t uplo, magma_int_t n, float* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_spotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zpotrf_gpu(uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cpotrf_gpu(uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<double>(
magma_uplo_t uplo, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dpotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<float>(
magma_uplo_t uplo, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_spotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_zpotrf_batched(uplo, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_cpotrf_batched(uplo, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolveBatched<double>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
double** dA_array, magma_int_t ldda, double** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_dtrsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolveBatched<float>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
float** dA_array, magma_int_t ldda, float** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_strsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolveBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
c10::complex<double>** dA_array, magma_int_t ldda, c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmaDoubleComplex alpha({1, 0});
magmablas_ztrsm_batched(MagmaLeft, uplo, trans, diag, m, n, alpha,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolveBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
c10::complex<float>** dA_array, magma_int_t ldda, c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmaFloatComplex alpha({1, 0});
magmablas_ctrsm_batched(MagmaLeft, uplo, trans, diag, m, n, alpha,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<double>(magma_int_t m, magma_int_t n) {
return magma_get_dgeqrf_nb(m, n);
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<float>(magma_int_t m, magma_int_t n) {
return magma_get_sgeqrf_nb(m, n);
}
template <>
inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<double>>(
magma_int_t m,
magma_int_t n) {
return magma_get_zgeqrf_nb(m, n);
}
template <>
inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<float>>(
magma_int_t m,
magma_int_t n) {
return magma_get_cgeqrf_nb(m, n);
}
template<>
void magmaGeqrf<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t* info, bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_dgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_dgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGeqrf<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t* info, bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_sgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_sgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGeqrf<c10::complex<double>>(
magma_int_t m,
magma_int_t n,
c10::complex<double>* dA,
magma_int_t ldda,
c10::complex<double>* tau,
c10::complex<double>* dT,
magma_int_t* info,
bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_zgeqrf_gpu(
m,
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
reinterpret_cast<magmaDoubleComplex*>(dT),
info);
} else {
magma_zgeqrf2_gpu(
m,
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
info);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGeqrf<c10::complex<float>>(
magma_int_t m,
magma_int_t n,
c10::complex<float>* dA,
magma_int_t ldda,
c10::complex<float>* tau,
c10::complex<float>* dT,
magma_int_t* info,
bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_cgeqrf_gpu(
m,
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
reinterpret_cast<magmaFloatComplex*>(dT),
info);
} else {
magma_cgeqrf2_gpu(
m,
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
info);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaOrgqr<double>(
magma_int_t m, magma_int_t n, magma_int_t k, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t nb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaOrgqr<float>(
magma_int_t m, magma_int_t n, magma_int_t k, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t nb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaOrgqr<c10::complex<double>>(
magma_int_t m,
magma_int_t n,
magma_int_t k,
c10::complex<double>* dA,
magma_int_t ldda,
c10::complex<double>* tau,
c10::complex<double>* dT,
magma_int_t nb,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zungqr_gpu(
m,
n,
k,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
reinterpret_cast<magmaDoubleComplex*>(dT),
nb,
info);
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaOrgqr<c10::complex<float>>(
magma_int_t m,
magma_int_t n,
magma_int_t k,
c10::complex<float>* dA,
magma_int_t ldda,
c10::complex<float>* tau,
c10::complex<float>* dT,
magma_int_t nb,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cungqr_gpu(
m,
n,
k,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
reinterpret_cast<magmaFloatComplex*>(dT),
nb,
info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSyevd<double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, double* dA, magma_int_t ldda,
double* w, double* wA, magma_int_t ldwa, double* work, magma_int_t lwork, double* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
(void)rwork; // unused
(void)lrwork; // unused
MagmaStreamSyncGuard guard;
magma_dsyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSyevd<float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, float* dA, magma_int_t ldda,
float* w, float* wA, magma_int_t ldwa, float* work, magma_int_t lwork, float* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
(void)rwork; // unused
(void)lrwork; // unused
MagmaStreamSyncGuard guard;
magma_ssyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSyevd<c10::complex<double>, double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
double* w, c10::complex<double>* wA, magma_int_t ldwa, c10::complex<double>* work, magma_int_t lwork, double* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zheevd_gpu(
jobz, uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, w, reinterpret_cast<magmaDoubleComplex*>(wA),
ldwa, reinterpret_cast<magmaDoubleComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSyevd<c10::complex<float>, float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
float* w, c10::complex<float>* wA, magma_int_t ldwa, c10::complex<float>* work, magma_int_t lwork, float* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cheevd_gpu(
jobz, uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, w, reinterpret_cast<magmaFloatComplex*>(wA),
ldwa, reinterpret_cast<magmaFloatComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaEig<double>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
double *A, magma_int_t lda,
double *w,
double *VL, magma_int_t ldvl,
double *VR, magma_int_t ldvr,
double *work, magma_int_t lwork,
double *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
// magma [sd]geev wants to separate output arrays: wr and wi for the real
// and imaginary parts
double *wr = w;
double *wi = w + n;
(void)rwork; // unused
magma_dgeev(jobvl, jobvr, n, A, lda, wr, wi, VL, ldvl, VR, ldvr, work, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaEig<float>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
float *A, magma_int_t lda,
float *w,
float *VL, magma_int_t ldvl,
float *VR, magma_int_t ldvr,
float *work, magma_int_t lwork,
float *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
float *wr = w;
float *wi = w + n;
(void)rwork; // unused
magma_sgeev(jobvl, jobvr, n, A, lda, wr, wi, VL, ldvl, VR, ldvr, work, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaEig<c10::complex<double>, double>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
c10::complex<double> *A, magma_int_t lda,
c10::complex<double> *w,
c10::complex<double> *VL, magma_int_t ldvl,
c10::complex<double> *VR, magma_int_t ldvr,
c10::complex<double> *work, magma_int_t lwork,
double *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
magma_zgeev(jobvl, jobvr, n,
reinterpret_cast<magmaDoubleComplex*>(A), lda,
reinterpret_cast<magmaDoubleComplex*>(w),
reinterpret_cast<magmaDoubleComplex*>(VL), ldvl,
reinterpret_cast<magmaDoubleComplex*>(VR), ldvr,
reinterpret_cast<magmaDoubleComplex*>(work), lwork,
rwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaEig<c10::complex<float>, float>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n,
c10::complex<float> *A, magma_int_t lda,
c10::complex<float> *w,
c10::complex<float> *VL, magma_int_t ldvl,
c10::complex<float> *VR, magma_int_t ldvr,
c10::complex<float> *work, magma_int_t lwork,
float *rwork,
magma_int_t *info) {
MagmaStreamSyncGuard guard;
magma_cgeev(jobvl, jobvr, n,
reinterpret_cast<magmaFloatComplex*>(A), lda,
reinterpret_cast<magmaFloatComplex*>(w),
reinterpret_cast<magmaFloatComplex*>(VL), ldvl,
reinterpret_cast<magmaFloatComplex*>(VR), ldvr,
reinterpret_cast<magmaFloatComplex*>(work), lwork,
rwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSvd<double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, double* A,
magma_int_t lda, double* s, double* U, magma_int_t ldu,
double* VT, magma_int_t ldvt, double* work, magma_int_t lwork,
double *rwork, magma_int_t* iwork, magma_int_t* info) {
(void)rwork; // unused
MagmaStreamSyncGuard guard;
magma_dgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSvd<float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, float* A,
magma_int_t lda, float* s, float* U, magma_int_t ldu,
float* VT, magma_int_t ldvt, float* work, magma_int_t lwork,
float* rwork, magma_int_t* iwork, magma_int_t* info) {
(void)rwork; // unused
MagmaStreamSyncGuard guard;
magma_sgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSvd<c10::complex<float>, float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<float>* A,
magma_int_t lda, float* s, c10::complex<float>* U, magma_int_t ldu,
c10::complex<float>* VT, magma_int_t ldvt, c10::complex<float>* work, magma_int_t lwork,
float *rwork, magma_int_t* iwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgesdd(jobz, m, n, reinterpret_cast<magmaFloatComplex*>(A), lda, s,
reinterpret_cast<magmaFloatComplex*>(U), ldu,
reinterpret_cast<magmaFloatComplex*>(VT), ldvt,
reinterpret_cast<magmaFloatComplex*>(work), lwork,
rwork, iwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSvd<c10::complex<double>, double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<double>* A,
magma_int_t lda, double* s, c10::complex<double>* U, magma_int_t ldu,
c10::complex<double>* VT, magma_int_t ldvt, c10::complex<double>* work, magma_int_t lwork,
double *rwork, magma_int_t* iwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgesdd(jobz, m, n, reinterpret_cast<magmaDoubleComplex*>(A), lda, s,
reinterpret_cast<magmaDoubleComplex*>(U), ldu,
reinterpret_cast<magmaDoubleComplex*>(VT), ldvt,
reinterpret_cast<magmaDoubleComplex*>(work), lwork,
rwork, iwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, magma_int_t* ipiv,
double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, magma_int_t* ipiv,
float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolve<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda, magma_int_t* ipiv,
c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrs_gpu(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolve<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda, magma_int_t* ipiv,
c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrs_gpu(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
double** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
float** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_sgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolveBatched<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_zgetrs_batched(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, dipiv_array, reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolveBatched<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_cgetrs_batched(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, dipiv_array, reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGels<float>(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
float* dA, magma_int_t ldda, float* dB, magma_int_t lddb,
float* hwork, magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgels_gpu(trans, m, n, nrhs,
dA, ldda, dB, lddb,
hwork, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGels<double>(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
double* dA, magma_int_t ldda, double* dB, magma_int_t lddb,
double* hwork, magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgels_gpu(trans, m, n, nrhs,
dA, ldda, dB, lddb,
hwork, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGels<c10::complex<float>>(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
c10::complex<float>* dA, magma_int_t ldda, c10::complex<float>* dB, magma_int_t lddb,
c10::complex<float>* hwork, magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgels_gpu(trans, m, n, nrhs,
reinterpret_cast<magmaFloatComplex*>(dA), ldda,
reinterpret_cast<magmaFloatComplex*>(dB), lddb,
reinterpret_cast<magmaFloatComplex*>(hwork), lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGels<c10::complex<double>>(
magma_trans_t trans, magma_int_t m, magma_int_t n, magma_int_t nrhs,
c10::complex<double>* dA, magma_int_t ldda, c10::complex<double>* dB, magma_int_t lddb,
c10::complex<double>* hwork, magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgels_gpu(trans, m, n, nrhs,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb,
reinterpret_cast<magmaDoubleComplex*>(hwork), lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
namespace {
/*
MAGMA can return errors both as a return value and in the info argument.
The return value and info should always be identical.
In general, the meaning is as given in this table.
Predefined error codes are large negative numbers. Using the symbolic
constants below is preferred, but the numeric values can be found in
include/magma_types.h.
Info | Description
----------- | -----------
info = 0 (MAGMA_SUCCESS) | Successful exit
info < 0, but small | For info = -i, the i-th argument had an illegal value
info > 0 | Function-specific error such as singular matrix
MAGMA_ERR_DEVICE_ALLOC | Could not allocate GPU device memory
MAGMA_ERR_HOST_ALLOC | Could not allocate CPU host memory
MAGMA_ERR_ILLEGAL_VALUE | An argument had an illegal value (deprecated; instead it should return -i to say the i-th argument was bad)
MAGMA_ERR_INVALID_PTR | Can't free pointer
MAGMA_ERR_NOT_IMPLEMENTED | Function or option not implemented
MAGMA_ERR_NOT_SUPPORTED | Function or option not supported on the current architecture
*/
void checkMagmaInternalError(magma_int_t info, const std::string& magma_function_name) {
// if info > 0 the error is function-specific, do nothing in this case
TORCH_CHECK(info >= 0,
"MAGMA error: ",
magma_strerror(info),
", info = ", info,
", when calling ", magma_function_name);
}
} // anonymous namespace
#endif // USE_MAGMA
#define ALLOCATE_ARRAY(name, type, size) \
auto storage_##name = pin_memory<type>(size); \
name = static_cast<type*>(storage_##name.data());
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_solve(Tensor& b, Tensor& A, Tensor& infos_out) {
#ifndef USE_MAGMA
AT_ERROR("solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
magma_int_t lda = std::max(magma_int_t{1}, n);
if (b.dim() == 2) {
auto ipiv = at::empty({n}, at::kInt);
// magmaSolve requires infos tensor to live on CPU
Tensor infos = at::empty(infos_out.sizes(), infos_out.options().device(kCPU));
magmaSolve<scalar_t>(n, nrhs, A_data, lda, ipiv.data_ptr<magma_int_t>(),
b_data, lda, infos.data_ptr<magma_int_t>());
infos_out.copy_(infos);
} else {
auto infos_data = infos_out.data_ptr<magma_int_t>();
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur = &infos_data[mini_idx];
magmaSolveBatched<scalar_t>(
n, nrhs, A_array_cur, lda, ipiv_array_cur, b_array_cur, lda,
info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaSolveBatched<scalar_t>(
n, nrhs, &A_array[mini_idx], lda, &ipiv_array[mini_idx], &b_array[mini_idx], lda,
&infos_data[mini_idx], batch_size % batch_limit, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor> _solve_helper_cuda(const Tensor& self, const Tensor& A) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
// infos might not get filled for empty inputs therefore at::zeros is used instead of at::empty
auto infos = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "solve_cuda", [&]{
apply_solve<scalar_t>(self_working_copy, A_working_copy, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "solve_cuda");
} else {
singleCheckErrors(infos.item().toInt(), "solve_cuda");
}
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// This is a type dispatching helper function for 'apply_solve'
Tensor& _linalg_solve_out_helper_cuda(Tensor& result, Tensor& input, Tensor& infos) {
// 'result' and 'input' should be in column major order (it should be checked before calling this function)
// the content of 'result', 'input' and 'infos' is overwritten by 'apply_solve'
// 'result' should contain data of 'other' tensor (right-hand-side of the linear system of equations)
// 'input' should contain data of origianl 'input' tensor (left-hand-side of the linear system)
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_solve_out_cpu", [&]{
apply_solve<scalar_t>(result, input, infos);
});
return result;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Computes the inverse of n-by-n matrix 'self', it is saved to 'self_inv'.
'infos' is an int Tensor containing error codes for each matrix in the batched input.
'infos_lu' is for holding magmaLU errors, and 'infos_getri' is for holding magmaGetri errors
For more information see MAGMA's documentation for GETRI and GETRF routines.
*/
template <typename scalar_t>
static void apply_batched_inverse(Tensor& self, Tensor& self_inv, Tensor& infos_lu, Tensor& infos_getri) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
auto self_mat_stride = matrixStride(self);
auto self_inv_data = self_inv.data_ptr<scalar_t>();
auto self_inv_mat_stride = matrixStride(self_inv);
auto infos_lu_data = infos_lu.data_ptr<magma_int_t>();
auto infos_getri_data = infos_getri.data_ptr<magma_int_t>();
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
// MAGMA does not work with batch_size == 0, let's return early in this case
if (batch_size == 0) {
return;
}
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t lda = std::max<magma_int_t>(1, n);
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** self_array;
scalar_t** self_inv_array;
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * lda);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(self_inv_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
self_inv_array[i] = &self_inv_data[i * self_inv_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
// magmaLuBatched leaves ipiv_data values unwritten for singular matrices.
// Initialize to avoid memory access violations inside magma kernels (gh-51930).
std::fill_n(ipiv_data, batch_size * n, 1);
MAGMAQueue magma_queue(self.get_device());
magmaLuBatched<scalar_t>(
n, n, self_array, lda, ipiv_array, infos_lu_data,
batch_size, magma_queue);
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** self_array_cur = &self_array[mini_idx];
scalar_t** self_inv_array_cur = &self_inv_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur_getri = &infos_getri_data[mini_idx];
magmaGetriBatched<scalar_t>(
n, self_array_cur, lda, ipiv_array_cur, self_inv_array_cur,
lda, info_array_cur_getri, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaGetriBatched<scalar_t>(
n, &self_array[mini_idx], lda, &ipiv_array[mini_idx], &self_inv_array[mini_idx],
lda, &infos_getri_data[mini_idx], batch_size % batch_limit, magma_queue);
}
#endif
}
template <typename scalar_t>
static void apply_single_inverse(Tensor& self, Tensor& infos_lu, Tensor& infos_getri) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t lda = std::max<magma_int_t>(1, n);
magma_int_t lwork = n * magmaGetriOptimalBlocksize<scalar_t>(n);
// magmaLu and magmaGetri requires infos tensor to live on CPU
infos_lu = infos_lu.to(at::kCPU);
infos_getri = infos_getri.to(at::kCPU);
Tensor ipiv = at::empty({lda}, at::kInt);
Tensor dwork = at::empty({lwork}, self.options());
magmaLu<scalar_t>(n, n, self_data, lda, ipiv.data_ptr<magma_int_t>(), infos_lu.data_ptr<magma_int_t>());
magmaGetri<scalar_t>(
n, self_data, lda, ipiv.data_ptr<magma_int_t>(), dwork.data_ptr<scalar_t>(), lwork, infos_getri.data_ptr<magma_int_t>());
#endif
}
Tensor _inverse_helper_cuda_legacy(const Tensor& self) {
auto self_inv_working_copy = cloneBatchedColumnMajor(self);
if (self.dim() > 2) {
auto infos_lu = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
auto infos_getri = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt));
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_batched_inverse<scalar_t>(
self_working_copy, self_inv_working_copy, infos_lu, infos_getri);
});
batchCheckErrors(infos_lu, "inverse_cuda");
batchCheckErrors(infos_getri, "inverse_cuda");
} else {
// magmaLu and magmaGetri requires infos tensor to live on CPU
auto infos_lu = at::zeros({1}, self.options().dtype(kInt).device(kCPU));
auto infos_getri = at::zeros({1}, self.options().dtype(kInt).device(kCPU));
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_single_inverse<scalar_t>(self_inv_working_copy, infos_lu, infos_getri);
});
singleCheckErrors(infos_lu.item().toInt(), "inverse_cuda");
singleCheckErrors(infos_getri.item().toInt(), "inverse_cuda");
}
return self_inv_working_copy;
}
Tensor _inverse_helper_cuda(const Tensor& self) {
#ifdef USE_CUSOLVER
if ((self.dim() == 2) || (/* self.dim() > 2 && */ batchCount(self) <= 2) || !use_magma_) {
return _inverse_helper_cuda_lib(self); // cusolver or cublas
} else {
return _inverse_helper_cuda_legacy(self); // magma-cuda
}
#else
return _inverse_helper_cuda_legacy(self); // magma-cuda
#endif
}
// This is a type dispatching helper function for 'apply_batched_inverse' and 'singleCheckErrors'
Tensor& _linalg_inv_out_helper_cuda_legacy(Tensor& result, Tensor& infos_lu, Tensor& infos_getri) {
// assuming result is in column major order and contains the matrices to invert
if (result.dim() > 2) {
auto input_working_copy = cloneBatchedColumnMajor(result);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{
apply_batched_inverse<scalar_t>(
input_working_copy, result, infos_lu, infos_getri);
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "linalg_inv_out_cuda", [&]{
apply_single_inverse<scalar_t>(result, infos_lu, infos_getri);
});
}
return result;
}
// This is a MAGMA/cuSOLVER dispatching helper function
Tensor& _linalg_inv_out_helper_cuda(Tensor &result, Tensor& infos_lu, Tensor& infos_getri) {
// This function calculates the inverse matrix in-place
// result should be in column major order and contain matrices to invert
#ifdef USE_CUSOLVER
if ((result.dim() == 2) || (/* result.dim() > 2 && */ batchCount(result) <= 2) || !use_magma_) {
return _linalg_inv_out_helper_cuda_lib(result, infos_lu, infos_getri); // cusolver or cublas
} else {
return _linalg_inv_out_helper_cuda_legacy(result, infos_lu, infos_getri); // magma-cuda
}
#else
return _linalg_inv_out_helper_cuda_legacy(result, infos_lu, infos_getri); // magma-cuda
#endif
return result;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky_solve(Tensor& b, Tensor& A, bool upper, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("cholesky_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t lda = std::max<magma_int_t>(1, n);
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
int info_tmp = 0;
if (b.dim() == 2) {
magmaCholeskySolve<scalar_t>(uplo, n, nrhs, A_data, lda,
b_data, lda, &info_tmp);
info = info_tmp;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, A_array_cur, lda, b_array_cur, lda,
info_tmp, batch_limit, magma_queue);
if (info_tmp != 0) {
break;
}
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0 && info_tmp == 0) {
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, &A_array[mini_idx], lda, &b_array[mini_idx], lda,
info_tmp, batch_size % batch_limit, magma_queue);
}
info = info_tmp;
}
#endif
}
Tensor _cholesky_solve_helper_cuda_magma(const Tensor& self, const Tensor& A, bool upper) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "cholesky_solve_cuda", [&]{
apply_cholesky_solve<scalar_t>(self_working_copy, A_working_copy, upper, info);
});
TORCH_CHECK(info == 0, "MAGMA cholesky_solve : invalid argument: ", -info);
return self_working_copy;
}
// Todo: cusolverDn<T>potrsBatched only supports nrhs == 1 and does not have good performance.
// Batched cholesky_solve is dispatched to magma.
Tensor _cholesky_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upper) {
#ifdef USE_CUSOLVER
if (batchCount(self) == 1 || !use_magma_) {
return _cholesky_solve_helper_cuda_cusolver(self, A, upper);
} else {
return _cholesky_solve_helper_cuda_magma(self, A, upper);
}
#else
return _cholesky_solve_helper_cuda_magma(self, A, upper);
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky(const Tensor& self, bool upper, const Tensor& info) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.linalg.cholesky on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. Please use PyTorch built with MAGMA support.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
auto lda = std::max<magma_int_t>(1, n);
if (self.dim() == 2) {
// magmaCholesky requires info to be on CPU
magma_int_t info_cpu = 0;
magmaCholesky<scalar_t>(uplo, n, self_data, lda, &info_cpu);
info.fill_(info_cpu);
} else {
TORCH_INTERNAL_ASSERT(info.is_cuda());
auto info_data = info.data_ptr<magma_int_t>();
// magmaCholeskyBatched supports only upper=false
uplo = MagmaLower;
auto self_mat_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
scalar_t** self_array;
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
}
MAGMAQueue magma_queue(self.get_device());
// Compute as many batches of 262140 possible
// 262140 is the size of the largest batch of matrices that can be run with
// violating maximum kernel configuration
// For complex input the batch limit is 65535 (determined experimentally, see https://github.com/pytorch/pytorch/pull/47047#discussion_r516086923 for more information)
int64_t batch_limit = self.is_complex() ? 65535 : 262140;
for (int64_t mini_idx = 0; mini_idx < batch_size; mini_idx += batch_limit) {
int64_t nbatches = std::min(batch_limit, batch_size - mini_idx);
scalar_t** self_array_cur = &self_array[mini_idx];
magma_int_t* info_array_cur = &info_data[mini_idx];
magmaCholeskyBatched<scalar_t>(
uplo, n, self_array_cur, lda, info_array_cur, nbatches, magma_queue);
}
}
#endif
}
void cholesky_helper_magma(const Tensor& input, bool upper, const Tensor& info) {
Tensor result = input;
if (input.dim() > 2) {
// MAGMA's batched cholesky operator has an off-by-one error causing IMA
// (see https://github.com/pytorch/pytorch/issues/42666). This code is based
// on the #cloneBatchedColumnMajor function however it pads the input with
// one extra element utilizing the fact that the resize_as_ method preserves
// the storage even if it's larger than the new sizes. This way if MAGMA
// reads off bounds it will still be valid user memory.
result = at::empty(input.numel() + 1, input.options());
result.resize_as_(input).transpose_(-2, -1);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(result.transpose(-2, -1).is_contiguous());
// batched MAGMA doesn't support upper=true
// we transpose and conjugate the input as a workaround
result.copy_(upper ? input.conj().transpose(-2, -1) : input);
}
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(
input.scalar_type(), "cholesky_cuda", [&] {
apply_cholesky<scalar_t>(result, upper, info);
});
if (input.dim() > 2) {
// if upper=true we need to tranpose and conjugate the result tensor
// because the cholesky decomposition is stored in the lower triangular part
if (upper) {
input.copy_(result.conj().transpose(-2, -1));
} else {
input.copy_(result);
}
}
}
static void cholesky_kernel(const Tensor& input, const Tensor& info, bool upper) {
#ifdef USE_CUSOLVER
if (batchCount(input) == 1 || !use_magma_ || use_cusolver_potrf_batched_) {
cholesky_helper_cusolver(input, upper, info);
} else {
cholesky_helper_magma(input, upper, info);
}
#else
cholesky_helper_magma(input, upper, info);
#endif // USE_CUSOLVER
}
REGISTER_DISPATCH(cholesky_stub, &cholesky_kernel)
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Computes the inverse of a symmetric (Hermitian) positive-definite matrix n-by-n matrix 'input' using the Cholesky solver
This is an in-place routine, content of 'input' is overwritten.
'infos' is an int Tensor containing error codes for each matrix in the batched input.
MAGMA requires 'infos' to reside in CPU memory.
For more information see MAGMA's documentation for POTRS routine.
*/
template <typename scalar_t>
static void apply_cholesky_inverse(Tensor& input, Tensor& infos, bool upper) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "cholesky_inverse: MAGMA library not found in compilation. Please rebuild with MAGMA.");
#else
// magmaCholeskyInverse (magma_dpotri_gpu) is slow because internally
// it transfers data several times between GPU and CPU and calls lapack routine on CPU
// using magmaCholeskySolveBatched is a lot faster
// note that magmaCholeskySolve is also slow
// 'input' is modified in-place we need to clone it and replace with a diagonal matrix
// for apply_cholesky_solve
auto input_working_copy = cloneBatchedColumnMajor(input);
// 'input' tensor has to be a batch of diagonal matrix
input.fill_(0);
input.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1);
Tensor result_u, input_u;
if (input.dim() == 2) {
// unsqueezing here so that the batched version is used
result_u = input.unsqueeze(0);
input_u = input_working_copy.unsqueeze(0);
} else {
result_u = input;
input_u = input_working_copy;
}
// magma's potrs_batched doesn't take matrix-wise array of ints as an 'info' argument
// it returns a single 'magma_int_t'
// if info = 0 the operation is successful, if info = -i, the i-th parameter had an illegal value.
int64_t info_tmp = 0;
apply_cholesky_solve<scalar_t>(result_u, input_u, upper, info_tmp);
infos.fill_(info_tmp);
#endif
}
// This is a type dispatching helper function for 'apply_cholesky_inverse'
Tensor& cholesky_inverse_kernel_impl_magma(Tensor &result, Tensor& infos, bool upper) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(result.scalar_type(), "cholesky_inverse_out_cuda", [&]{
apply_cholesky_inverse<scalar_t>(result, infos, upper);
});
return result;
}
Tensor& cholesky_inverse_kernel_impl(Tensor &result, Tensor& infos, bool upper) {
// This function calculates the inverse matrix in-place
// result should be in column major order and contain matrices to invert
// the content of result is overwritten by 'apply_cholesky_inverse'
#ifdef USE_CUSOLVER
if (batchCount(result) == 1 || !use_magma_) {
return cholesky_inverse_kernel_impl_cusolver(result, infos, upper);
} else {
return cholesky_inverse_kernel_impl_magma(result, infos, upper);
}
#else
return cholesky_inverse_kernel_impl_magma(result, infos, upper);
#endif
}
REGISTER_DISPATCH(cholesky_inverse_stub, &cholesky_inverse_kernel_impl);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Computes the LU decomposition of a m×n matrix or batch of matrices in 'input' tensor.
This is an in-place routine, content of 'input', 'pivots', and 'infos' is overwritten.
This is a "looped" variant for calling single input MAGMA function on batched input.
Args:
* `input` - [in] the input matrix for LU decomposition
[out] the LU decomposition
* `pivots` - [out] the pivot indices
* `infos` - [out] error codes, positive values indicate singular matrices
* `compute_pivots` - controls whether LU is computed with or without pivoting
For further details, please see the MAGMA documentation for magma_dgetrf_gpu.
*/
template <typename scalar_t>
static void apply_lu_looped_magma(const Tensor& input, const Tensor& pivots, const Tensor& infos, bool compute_pivots) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.lu on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. lease rebuild with MAGMA.");
#else
// magmaLu and magmaLuNoPiv require infos and pivots tensor to be on CPU
// the data is later copied back to the appropriate output tensor
Tensor infos_cpu = at::empty_like(infos, infos.options().device(kCPU).pinned_memory(true));
auto input_data = input.data_ptr<scalar_t>();
auto infos_data = infos_cpu.data_ptr<magma_int_t>();
auto input_matrix_stride = matrixStride(input);
auto pivots_stride = pivots.size(-1);
auto batch_size = batchCount(input);
magma_int_t m = magma_int_cast(input.size(-2), "m");
magma_int_t n = magma_int_cast(input.size(-1), "n");
auto leading_dimension = std::max<magma_int_t>(1, m);
if (compute_pivots) {
Tensor pivots_cpu = at::empty_like(pivots, pivots.options().device(kCPU).pinned_memory(true));
auto pivots_data = pivots_cpu.data_ptr<magma_int_t>();
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* input_working_ptr = &input_data[i * input_matrix_stride];
int* pivots_working_ptr = &pivots_data[i * pivots_stride];
int* infos_working_ptr = &infos_data[i];
magmaLu<scalar_t>(m, n, input_working_ptr, leading_dimension, pivots_working_ptr, infos_working_ptr);
}
pivots.copy_(pivots_cpu, /*non_blocking=*/true);
} else {
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* input_working_ptr = &input_data[i * input_matrix_stride];
int* infos_working_ptr = &infos_data[i];
magmaLuNoPiv<scalar_t>(m, n, input_working_ptr, leading_dimension, infos_working_ptr);
}
// fill the pivots tensor with indices using 1-based (Fortran) indexing
auto k = std::min(m, n);
Tensor pivots_tmp = at::arange(1, k + 1, input.options().dtype(at::kInt)).expand_as(pivots);
pivots.copy_(pivots_tmp);
}
infos.copy_(infos_cpu, /*non_blocking=*/true);
#endif
}
/*
Computes the LU decomposition of a m×n matrix or batch of matrices in 'input' tensor.
This is an in-place routine, content of 'input', 'pivots', and 'infos' is overwritten.
This is a specialized batched variant, it is expected to be faster than the "looped" version only for small inputs.
Args:
* `input` - [in] the input matrix for LU decomposition
[out] the LU decomposition
* `pivots` - [out] the pivot indices
* `infos` - [out] error codes, positive values indicate singular matrices
* `compute_pivots` - controls whether LU is computed with or without pivoting
For further details, please see the MAGMA documentation for magma_dgetrf_batched.
*/
template <typename scalar_t>
static void apply_lu_batched_magma(const Tensor& input, const Tensor& pivots, const Tensor& infos, bool compute_pivots) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.lu on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. lease rebuild with MAGMA.");
#else
auto input_data = input.data_ptr<scalar_t>();
auto infos_data = infos.data_ptr<magma_int_t>();
auto input_matrix_stride = matrixStride(input);
magma_int_t batch_size = magma_int_cast(batchCount(input), "batchCount");
// magmaLuBatched doesn't work with zero batch dimensions
// it gives CUDA error: invalid configuration argument
if (batch_size == 0) {
infos.fill_(0);
return;
}
magma_int_t m = magma_int_cast(input.size(-2), "m");
magma_int_t n = magma_int_cast(input.size(-1), "n");
auto leading_dimension = std::max<magma_int_t>(1, m);
scalar_t** input_array;
ALLOCATE_ARRAY(input_array, scalar_t*, batch_size);
// Set up array of pointers to matrices
for (int64_t i = 0; i < batch_size; i++) {
input_array[i] = &input_data[i * input_matrix_stride];
}
MAGMAQueue magma_queue(input.get_device());
if (compute_pivots) {
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto pivots_stride = pivots.size(-1);
magma_int_t** pivots_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_stride];
}
magmaLuBatched<scalar_t>(m, n, input_array, leading_dimension, pivots_array, infos_data, batch_size, magma_queue);
} else {
magmaLuNoPivBatched<scalar_t>(m, n, input_array, leading_dimension, infos_data, batch_size, magma_queue);
// fill the pivots tensor with indices using 1-based (Fortran) indexing
auto k = std::min(m, n);
Tensor pivots_tmp = at::arange(1, k + 1, input.options().dtype(at::kInt)).expand_as(pivots);
pivots.copy_(pivots_tmp);
}
#endif
}
static void lu_magma(const Tensor& input, const Tensor& pivots, const Tensor& infos, bool compute_pivots) {
// TODO: compare performance and use the best performing option based on input's sizes
if (input.dim() == 2) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "lu_magma", [&]{
apply_lu_looped_magma<scalar_t>(input, pivots, infos, compute_pivots);
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "lu_magma", [&]{
apply_lu_batched_magma<scalar_t>(input, pivots, infos, compute_pivots);
});
}
}
REGISTER_DISPATCH(lu_stub, &lu_magma);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangular_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_triangular_solve_batched(Tensor& A, Tensor& b, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
#ifndef USE_MAGMA
AT_ERROR("triangular_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_trans_t trans = transpose ? MagmaTrans : MagmaNoTrans;
trans = conjugate_transpose ? MagmaConjTrans : trans;
magma_diag_t diag = unitriangular ? MagmaUnit : MagmaNonUnit;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t m = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t n = magma_int_cast(A.size(-1), "A.size(-1)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
// magma returns early if m <= 0 || n <= 0 for magmaTriangularSolveBatched
// magmaTriangularSolve is calling cuBLAS and it prints
// ** On entry to DTRSM parameter number 9 had an illegal value
// so let's use proper lda parameter here
magma_int_t lda = std::max<magma_int_t>(1, m);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit;
int64_t mini_idx; // this is outside the loop because it is used for the case batch_size % batch_limit != 0
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, A_array_cur,
lda, b_array_cur, lda, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, &A_array[mini_idx],
lda, &b_array[mini_idx], lda, batch_size % batch_limit, magma_queue);
}
#endif
}
void triangular_solve_batched_magma(Tensor& A, Tensor& B, Tensor& infos, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
(void)infos; // unused
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(A.scalar_type(), "triangular_solve_cuda", [&]{
apply_triangular_solve_batched<scalar_t>(A, B, upper, transpose, conjugate_transpose, unitriangular);
});
}
void triangular_solve_kernel(Tensor& A, Tensor& B, Tensor& infos, bool upper, bool transpose, bool conjugate_transpose, bool unitriangular) {
// For batches smaller than 8 and matrix sizes larger than 64x64 cuBLAS forloop is faster than batched version
if (batchCount(A) <= 8 && A.size(-1) >= 64) {
triangular_solve_cublas(A, B, infos, upper, transpose, conjugate_transpose, unitriangular);
} else {
#ifndef USE_MAGMA
triangular_solve_batched_cublas(A, B, infos, upper, transpose, conjugate_transpose, unitriangular);
#else
// cuBLAS batched is faster than MAGMA batched up until 512x512, after that MAGMA is faster
if (A.size(-1) <= 512) {
triangular_solve_batched_cublas(A, B, infos, upper, transpose, conjugate_transpose, unitriangular);
} else {
triangular_solve_batched_magma(A, B, infos, upper, transpose, conjugate_transpose, unitriangular);
}
#endif // USE_MAGMA
}
}
REGISTER_DISPATCH(triangular_solve_stub, &triangular_solve_kernel);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ orgqr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tensor& orgqr_kernel_impl(Tensor& result, const Tensor& tau) {
// TODO: It is possible to implement efficient batched orgqr for small tau (tau.size(-1) <= 32)
// using MAGMA, however it fails on Windows because of some illegal memory reads inside MAGMA.
// See discussions in https://github.com/pytorch/pytorch/pull/51348 for comparison of cuSOLVER-MAGMA
// and Windows failure.
// For reference here is the MAGMA-based implementation: https://gist.github.com/IvanYashchuk/2db50002c9d3c1462ff769e6410ad983
#if defined(USE_CUSOLVER)
return orgqr_helper_cusolver(result, tau); // cusolver
#else
TORCH_CHECK(false, "Calling torch.orgqr on a CUDA tensor requires compiling ",
"PyTorch with cuSOLVER. Please use PyTorch built with cuSOLVER support.");
#endif
}
REGISTER_DISPATCH(orgqr_stub, &orgqr_kernel_impl);
void ormqr_kernel(const Tensor& input, const Tensor& tau, const Tensor& other, bool left, bool transpose) {
#if defined(USE_CUSOLVER)
ormqr_cusolver(input, tau, other, left, transpose);
#else
TORCH_CHECK(false,
"Calling torch.ormqr on a CUDA tensor requires compiling ",
"PyTorch with cuSOLVER. Please use PyTorch built with cuSOLVER support.");
#endif
}
REGISTER_DISPATCH(ormqr_stub, &ormqr_kernel);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ qr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_geqrf(const Tensor& input, const Tensor& tau) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.geqrf on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. Please use PyTorch built with MAGMA support.");
#else
magma_int_t m = magma_int_cast(input.size(-2), "m");
magma_int_t n = magma_int_cast(input.size(-1), "n");
auto input_data = input.data_ptr<scalar_t>();
auto input_matrix_stride = matrixStride(input);
auto tau_stride = tau.size(-1);
auto batch_size = batchCount(input);
auto lda = std::max<int>(1, m);
// magmaGeqrf uses a hybrid CPU-GPU algorithm to compute the elementary reflectors.
// The driver routine geqrf2_gpu accepts a tensor on the CPU for elementary reflectors.
Tensor tau_cpu = at::empty(tau.sizes(), tau.options().device(at::kCPU).pinned_memory(true));
scalar_t* tau_data = tau_cpu.data_ptr<scalar_t>();
scalar_t* work_data = nullptr; // workspace is not needed for geqrf2_gpu
magma_int_t info = 0;
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* input_working_ptr = &input_data[i * input_matrix_stride];
scalar_t* tau_working_ptr = &tau_data[i * tau_stride];
// now compute the actual QR and tau
// MAGMA's geqrf2_gpu function is used, this version has LAPACK-complaint arguments.
magmaGeqrf<scalar_t>(m, n, input_working_ptr, lda, tau_working_ptr, work_data, &info, /*is_v2=*/true);
checkMagmaInternalError(info, "geqrf");
}
tau.copy_(tau_cpu, /*non_blocking=*/true);
#endif
}
// This is a type dispatching helper function for 'apply_geqrf'
void geqrf_magma(const Tensor& input, const Tensor& tau) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "geqrf_magma", [&]{
apply_geqrf<scalar_t>(input, tau);
});
}
// This is a backend library dispatching helper function for calling looped batch implementation
void geqrf_looped(const Tensor& input, const Tensor& tau) {
#if defined(USE_CUSOLVER)
return geqrf_cusolver(input, tau);
#else
return geqrf_magma(input, tau);
#endif
}
// This is a backend library dispatching helper function for calling specialized batched implementation
void geqrf_batched(const Tensor& input, const Tensor& tau) {
#ifdef CUDART_VERSION
// if cuBLAS is available
return geqrf_batched_cublas(input, tau);
#else
// TODO: implement MAGMA-based path using magma_zgeqrf_expert_batched
return geqrf_looped(input, tau);
#endif
}
void geqrf_kernel(const Tensor& input, const Tensor& tau) {
// if number of rows is smaller than 32 batched is always faster for batch size > 1
// for larger number of rows number of batches condition
if (input.size(-2) <= 256 && batchCount(input) >= std::max<int64_t>(2, input.size(-2) / 16)) {
return geqrf_batched(input, tau);
} else {
return geqrf_looped(input, tau);
}
}
REGISTER_DISPATCH(geqrf_stub, &geqrf_kernel);
template <typename scalar_t>
static void apply_qr(Tensor& Q, Tensor& R, int64_t q_size_minus_2, int64_t r_size_minus_1, int64_t n_columns,
bool compute_q) {
#ifndef USE_MAGMA
AT_ERROR("qr: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_int_t m = magma_int_cast(q_size_minus_2, "Q.size(-2)");
magma_int_t n = magma_int_cast(r_size_minus_1, "R.size(-1)");
auto r_data = R.data_ptr<scalar_t>();
auto r_matrix_stride = matrixStride(R);
magma_int_t k = m < n ? m : n;
magma_int_t nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n);
int64_t batch_size = batchCount(R);
// magmaGeqrf uses a hybrid CPU-GPU algorithm to compute the elementary reflectors.
// The driver routine magma_(d/s)geqrf2_gpu accepts a tensor on the CPU for elementary reflectors.
Tensor tau = at::empty({k}, Q.options().device(at::kCPU));
Tensor work = at::empty({(2 * k + magma_roundup(n, 32)) * nb}, R.options());
scalar_t* tau_data = tau.data_ptr<scalar_t>();
scalar_t* work_data = work.data_ptr<scalar_t>();
// This phase computes R (the raw version)
// This uses MAGMA's ?geqrf2_gpu function
magma_int_t info = 0;
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* r_working_ptr = &r_data[i * r_matrix_stride];
magmaGeqrf<scalar_t>(m, n, r_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/true);
checkMagmaInternalError(info, "geqrf");
}
if (!compute_q) {
// this is for mode='r'
return;
}
// This phase computes Q (the raw version)
// We require to perform ?geqrf_gpu again due to this bug in MAGMA:
// - ?geqrf_gpu allows fast computation of Q via ?orgqr_gpu, but doesn't give R properly.
// - ?geqrf2_gpu gives correct R, but doesn't allow computation of Q via ?orgqr_gpu
// Refer to the below link for more details:
// http://icl.cs.utk.edu/magma/forum/viewtopic.php?f=2&t=1015&p=2800&hilit=geqrf_gpu#p2800
auto q_data = Q.data_ptr<scalar_t>();
auto q_matrix_stride = matrixStride(Q);
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* q_working_ptr = &q_data[i * q_matrix_stride];
magmaGeqrf<scalar_t>(m, n, q_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/false);
checkMagmaInternalError(info, "geqrf");
magmaOrgqr<scalar_t>(m, n_columns, k, q_working_ptr, m, tau_data, work_data, nb, &info);
checkMagmaInternalError(info, "orgqr");
}
#endif
}
std::tuple<Tensor, Tensor> linalg_qr_helper_magma(const Tensor& self, c10::string_view mode) {
bool compute_q, reduced;
std::tie(compute_q, reduced) = _parse_qr_mode(mode);
// Setup input geometry and inputs for apply_qr
std::vector<int64_t> q_sizes, q_strides;
int64_t n_columns_q;
std::tie(q_sizes, q_strides, n_columns_q) = _compute_geometry_for_Q(self, reduced);
Tensor q_working_copy, r_working_copy;
// If there are no elements, then we simply return a pair of tensors of required dimensions
if (self.numel() == 0) {
int64_t n = self.size(-1);
auto r_shape = self.sizes().vec();
r_shape.end()[-2] = n_columns_q;
r_shape.end()[-1] = n;
r_working_copy = at::empty(r_shape, self.options());
if (compute_q) {
auto q_shape = q_sizes;
q_shape.end()[-1] = n_columns_q;
q_working_copy = at::zeros(q_shape, self.options());
q_working_copy.diagonal(/*offset=*/0, /*dim1=*/-2, /*dim2=*/-1).fill_(1);
} else {
q_working_copy = at::empty({0}, self.options());
}
return std::make_tuple(q_working_copy, r_working_copy);
}
if (compute_q) {
q_working_copy = at::empty_strided(q_sizes, q_strides, self.options());
q_working_copy.narrow(-1, 0, self.size(-1)).copy_(self);
} else {
q_working_copy = at::empty({0}, self.options());
}
r_working_copy = cloneBatchedColumnMajor(self);
int64_t m = q_sizes[self.dim() - 2];
int64_t n = r_working_copy.size(-1);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "qr_cuda", [&]{
apply_qr<scalar_t>(q_working_copy, r_working_copy, m, n, n_columns_q, compute_q);
});
if (compute_q) {
q_working_copy = q_working_copy.narrow(-1, 0, n_columns_q);
}
r_working_copy = r_working_copy.narrow(-2, 0, n_columns_q).triu();
return std::make_tuple(q_working_copy, r_working_copy);
}
std::tuple<Tensor, Tensor> _linalg_qr_helper_cuda(const Tensor& input, c10::string_view mode) {
#if defined(USE_CUSOLVER)
// _linalg_qr_helper_default is a generic function that is implemented using
// geqrf_stub and orgqr_stub. It dispatches to cuSOLVER for CUDA inputs if USE_CUSOLVER is defined
return _linalg_qr_helper_default(input, mode);
#else
return linalg_qr_helper_magma(input, mode);
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ symeig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_magma_eigh(Tensor& values, Tensor& vectors, Tensor& infos, bool upper, bool compute_eigenvectors) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.linalg.eigh/eigvalsh on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. Please use PyTorch built with MAGMA support.");
#else
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(values.device() == kCPU);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.device() == kCPU);
using value_t = typename c10::scalar_value_type<scalar_t>::type;
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_vec_t jobz = compute_eigenvectors ? MagmaVec : MagmaNoVec;
magma_int_t n = magma_int_cast(vectors.size(-1), "n");
auto lda = std::max<magma_int_t>(1, n);
auto batch_size = batchCount(vectors);
auto vectors_stride = matrixStride(vectors);
auto values_stride = values.size(-1);
auto vectors_data = vectors.data_ptr<scalar_t>();
auto values_data = values.data_ptr<value_t>();
auto infos_data = infos.data_ptr<magma_int_t>();
scalar_t* wA;
ALLOCATE_ARRAY(wA, scalar_t, lda * lda);
// Run once, first to get the optimum work sizes.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt;
magma_int_t liwork = -1;
magma_int_t iwkopt;
magma_int_t lrwork = -1;
value_t rwkopt;
magmaSyevd<scalar_t, value_t>(jobz, uplo, n, vectors_data, lda, values_data,
wA, lda, &wkopt, lwork, &rwkopt, lrwork, &iwkopt, liwork, infos_data);
scalar_t* work;
magma_int_t* iwork;
lwork = magma_int_cast(std::max<int64_t>(1, real_impl<scalar_t, value_t>(wkopt)), "work_size");
liwork = magma_int_cast(std::max<int64_t>(1, iwkopt), "iwork_size");
ALLOCATE_ARRAY(work, scalar_t, lwork);
ALLOCATE_ARRAY(iwork, magma_int_t, liwork);
value_t* rwork = nullptr;
c10::Storage storage_rwork;
if (vectors.is_complex()) {
lrwork = magma_int_cast(std::max<int64_t>(1, rwkopt), "rwork_size");
storage_rwork = pin_memory<value_t>(lrwork);
rwork = static_cast<value_t*>(storage_rwork.data());
}
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* vectors_working_ptr = &vectors_data[i * vectors_stride];
value_t* values_working_ptr = &values_data[i * values_stride];
magma_int_t* info_working_ptr = &infos_data[i];
magmaSyevd<scalar_t, value_t>(jobz, uplo, n, vectors_working_ptr, lda, values_working_ptr,
wA, lda, work, lwork, rwork, lrwork, iwork, liwork, info_working_ptr);
// The current behaviour for Linear Algebra functions to raise an error if something goes wrong
// or input doesn't satisfy some requirement
// therefore return early since further computations will be wasted anyway
if (*info_working_ptr != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor> _symeig_helper_cuda(const Tensor& self, bool eigenvectors, bool upper) {
Tensor infos = at::zeros({std::max<int64_t>(1, batchCount(self))}, self.options().dtype(kInt).device(at::kCPU));
auto eigvals_shape = IntArrayRef(self.sizes().data(), self.dim()-1); // self.shape[:-1]
ScalarType real_dtype = toValueType(self.scalar_type());
// magmaSyevd uses a hybrid CPU-GPU algorithm to compute the eigenvalues and eigenvectors.
// The driver routine magma_(d/s)syev_gpu accepts a tensor on the CPU for eigvalenvalues.
// The data is later moved to the appropriate device.
// In the case where self.numel() == 0, we just return an empty tensor of
// dimensions on the CUDA (to avoid the unnecessary "to(at::kCUDA)")
auto eigvals_working_copy = self.numel() == 0
? at::empty(eigvals_shape, self.options().dtype(real_dtype))
: at::empty(eigvals_shape, self.options().dtype(real_dtype).device(at::kCPU));
if (self.numel() == 0) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy, at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT));
}
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "symeig_cuda", [&]{
apply_magma_eigh<scalar_t>(eigvals_working_copy, self_working_copy, infos, upper, eigenvectors);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "symeig_cuda");
} else {
singleCheckErrors(infos.item().toInt(), "symeig_cuda");
}
if (eigenvectors) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), self_working_copy);
} else {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), at::empty({0}, self.options()));
}
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ linalg_eigh ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// This is a type dispatch function for 'apply_magma_eigh'
// For small inputs result is computed on CPU
void linalg_eigh_magma(Tensor& eigenvalues, Tensor& eigenvectors, Tensor& infos, bool upper, bool compute_eigenvectors) {
// MAGMA just calls LAPACK for eigenvectors.size(-1) <= 128
// See https://bitbucket.org/icl/magma/src/e6fdca447bd402693e8b0b950a898b6879bbcc41/src/zheevd_gpu.cpp?at=master#lines-258
// in addition lda is ignored breaking 0x0 inputs
if (eigenvectors.size(-1) > 128) {
// MAGMA requires eigenvalues and infos tensors to reside on CPU
Tensor eigenvalues_cpu = eigenvalues.to(kCPU);
infos = infos.to(kCPU);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(
eigenvectors.scalar_type(), "linalg_eigh_cpu", [&] {
apply_magma_eigh<scalar_t>(
eigenvalues_cpu, eigenvectors, infos, upper, compute_eigenvectors);
});
// Transfer computed by MAGMA results from CPU to GPU
eigenvalues.copy_(eigenvalues_cpu);
} else { // eigenvectors.size(-1) <= 128
// transfer to CPU, compute the result and copy back to GPU
// this is faster than going through MAGMA that does the same
Tensor eigenvalues_cpu = at::empty_like(eigenvalues, eigenvalues.options().device(kCPU));
if (compute_eigenvectors) {
Tensor eigenvectors_cpu = at::empty_like(eigenvectors, eigenvectors.options().device(kCPU));
at::linalg_eigh_out(eigenvalues_cpu, eigenvectors_cpu, eigenvectors.to(kCPU), upper ? "U" : "L");
eigenvectors.copy_(eigenvectors_cpu);
} else {
at::linalg_eigvalsh_out(eigenvalues_cpu, eigenvectors.to(kCPU), upper ? "U" : "L");
}
eigenvalues.copy_(eigenvalues_cpu);
}
}
void linalg_eigh_kernel(Tensor& eigenvalues, Tensor& eigenvectors, Tensor& infos, bool upper, bool compute_eigenvectors) {
#if defined(USE_CUSOLVER)
linalg_eigh_cusolver(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors);
#else
linalg_eigh_magma(eigenvalues, eigenvectors, infos, upper, compute_eigenvectors);
#endif
}
REGISTER_DISPATCH(linalg_eigh_stub, &linalg_eigh_kernel);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ eig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// magmaEig uses a hybrid CPU-GPU algorithm, which takes and return CPU
// memory. So, we accept a GPU tensor, copy it to CPU memory, and later copy
// the returned values from CPU to GPU. See also magmaSymeig, which uses a
// similar approach.
template <typename scalar_t>
static void apply_eig(const Tensor& self, bool eigenvectors, Tensor& out_eigvals, Tensor& out_eigvecs,
int64_t *info_ptr) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "Calling torch.eig on a CUDA tensor requires compiling PyTorch with MAGMA. "
"Either transfer the tensor to the CPU before calling torch.eig or recompile with MAGMA.");
#else
TORCH_INTERNAL_ASSERT(self.device() == at::kCPU, "Internal error: apply_eig needs a CPU tensor");
using value_t = typename c10::scalar_value_type<scalar_t>::type;
magma_vec_t jobvr = eigenvectors ? MagmaVec : MagmaNoVec;
magma_int_t n = magma_int_cast(self.size(-1), "n");
auto self_data = self.data_ptr<scalar_t>();
auto out_eigvals_data = out_eigvals.data_ptr<scalar_t>();
scalar_t *wr = out_eigvals_data;
scalar_t *vr_data = NULL;
magma_int_t ldvr = 1;
if (jobvr == MagmaVec)
{
vr_data = out_eigvecs.data_ptr<scalar_t>();
ldvr = n;
}
value_t *rwork_data = nullptr;
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
ALLOCATE_ARRAY(rwork_data, value_t, n*2);
}
if (n > 0) {
// call magmaEig once to get the optimal size of work_data
scalar_t wkopt;
magma_int_t info;
magmaEig<scalar_t, value_t>(MagmaNoVec, jobvr, n, self_data, n, wr, NULL, 1, vr_data, ldvr, &wkopt, -1, rwork_data, &info);
magma_int_t lwork = static_cast<magma_int_t>(real_impl<scalar_t, value_t>(wkopt));
// call it a 2nd time to to the actual work
scalar_t *work_data = nullptr;
ALLOCATE_ARRAY(work_data, scalar_t, lwork);
magmaEig<scalar_t, value_t>(MagmaNoVec, jobvr, n, self_data, n, wr, NULL, 1, vr_data, ldvr, work_data, lwork, rwork_data, &info);
*info_ptr = info;
}
#endif
}
/*
* Internal helper; like eig_cuda but:
* 1. assume that self is a square matrix of side "n"
* 2. return CPU tensors (because this is what magmaEig returns), which will be copied to GPU memory
* by the caller
*/
std::tuple<Tensor, Tensor> eig_kernel_impl(const Tensor& self, bool& eigenvectors) {
int64_t n = self.size(-1);
// copy self to pinned CPU memory
auto self_working_copy = at::empty_strided(
{n, n}, // square matrix
{1, n}, // column-ordered, as magmaEig expects
at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true));
self_working_copy.copy_(self);
// tensors holding the results. We use empty_strided to make them column-ordered
auto options = self.options().device(at::kCPU).memory_format(LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor out_eigvals;
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
out_eigvals = at::empty({n}, options);
} else {
out_eigvals = at::empty_strided({n, 2}, {1, n}, options);
}
auto out_eigvecs = eigenvectors
? at::empty_strided({n, n}, {1, n}, options)
: Tensor();
int64_t info;
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "eig_cuda", [&]{
apply_eig<scalar_t>(self_working_copy, eigenvectors, out_eigvals, out_eigvecs, &info);
});
singleCheckErrors(info, "eig_cuda");
return std::tuple<Tensor, Tensor>(out_eigvals, out_eigvecs);
}
REGISTER_DISPATCH(eig_stub, &eig_kernel_impl);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ linalg_eig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Computes the eigenvalues and eigenvectors of n-by-n matrix 'input'.
This is an in-place routine, content of 'input', 'values', 'vectors' is overwritten.
'infos' is an int Tensor containing error codes for each matrix in the batched input.
For more information see MAGMA's documentation for GEEV routine.
*/
template <typename scalar_t>
void apply_linalg_eig(Tensor& values, Tensor& vectors, Tensor& input, Tensor& infos, bool compute_eigenvectors) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "Calling torch.linalg.eig on a CUDA tensor requires compiling PyTorch with MAGMA. "
"Either transfer the tensor to the CPU before calling torch.linalg.eig or recompile with MAGMA.");
#else
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(input.device() == at::kCPU);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(values.device() == at::kCPU);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(infos.device() == at::kCPU);
if (compute_eigenvectors) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(vectors.device() == at::kCPU);
}
using value_t = typename c10::scalar_value_type<scalar_t>::type;
magma_vec_t jobvr = compute_eigenvectors ? MagmaVec : MagmaNoVec;
magma_vec_t jobvl = MagmaNoVec; // only right eigenvectors are computed
magma_int_t n = magma_int_cast(input.size(-1), "n");
auto lda = std::max<magma_int_t>(1, n);
auto batch_size = batchCount(input);
auto input_matrix_stride = matrixStride(input);
auto values_stride = values.size(-1);
auto input_data = input.data_ptr<scalar_t>();
auto values_data = values.data_ptr<scalar_t>();
auto infos_data = infos.data_ptr<magma_int_t>();
auto rvectors_data = compute_eigenvectors ? vectors.data_ptr<scalar_t>() : nullptr;
scalar_t* lvectors_data = nullptr; // only right eigenvectors are computed
int64_t ldvr = compute_eigenvectors ? lda : 1;
int64_t ldvl = 1;
Tensor rwork;
value_t* rwork_data = nullptr;
if (input.is_complex()) {
ScalarType real_dtype = toValueType(input.scalar_type());
rwork = at::empty({lda * 2}, input.options().dtype(real_dtype));
rwork_data = rwork.data_ptr<value_t>();
}
// call magmaEig once to get the optimal size of work_data
scalar_t work_query;
magmaEig<scalar_t, value_t>(jobvl, jobvr, n, input_data, lda, values_data,
lvectors_data, ldvl, rvectors_data, ldvr, &work_query, -1, rwork_data, &infos_data[0]);
magma_int_t lwork = std::max<magma_int_t>(1, static_cast<magma_int_t>(real_impl<scalar_t, value_t>(work_query)));
Tensor work = at::empty({lwork}, input.dtype());
auto work_data = work.data_ptr<scalar_t>();
for (auto i = decltype(batch_size){0}; i < batch_size; i++) {
scalar_t* input_working_ptr = &input_data[i * input_matrix_stride];
scalar_t* values_working_ptr = &values_data[i * values_stride];
scalar_t* rvectors_working_ptr = compute_eigenvectors ? &rvectors_data[i * input_matrix_stride] : nullptr;
int* info_working_ptr = &infos_data[i];
magmaEig<scalar_t, value_t>(jobvl, jobvr, n, input_working_ptr, lda, values_working_ptr,
lvectors_data, ldvl, rvectors_working_ptr, ldvr, work_data, lwork, rwork_data, info_working_ptr);
}
#endif
}
// This is a type dispatching helper function for 'apply_linalg_eig'
void linalg_eig_kernel(Tensor& eigenvalues, Tensor& eigenvectors, Tensor& infos, const Tensor& input, bool compute_eigenvectors) {
// This function calculates the non-symmetric eigendecomposition in-place
// tensors should be in batched column major memory format
// the content of eigenvalues, eigenvectors and infos is overwritten by 'apply_linalg_eig'
// apply_linalg_eig modifies the provided input matrix in-place, therefore we need a copy
// MAGMA doesn't have GPU interface for the eigendecomposition and it forces us to transfer 'input' to CPU
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(input.is_cuda());
Tensor input_working_copy = at::empty(input.sizes(), input.options().device(kCPU));
input_working_copy.transpose_(-2, -1); // make input_working_copy to have Fortran contiguous memory layout
input_working_copy.copy_(input);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(input.scalar_type(), "linalg_eig_out_cuda", [&]{
apply_linalg_eig<scalar_t>(eigenvalues, eigenvectors, input_working_copy, infos, compute_eigenvectors);
});
}
REGISTER_DISPATCH(linalg_eig_stub, &linalg_eig_kernel);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ svd ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template<typename scalar_t>
static void apply_svd(Tensor& self, Tensor& U, Tensor& S, Tensor& VT,
char jobchar, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("svd: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto U_data = U.data_ptr<scalar_t>();
auto S_data = S.data_ptr<value_t>();
auto VT_data = VT.data_ptr<scalar_t>();
auto self_stride = matrixStride(self);
auto U_stride = matrixStride(U);
auto S_stride = S.size(-1);
auto VT_stride = matrixStride(VT);
auto batchsize = batchCount(self);
magma_vec_t jobz = jobchar == 'A' ? MagmaAllVec : (jobchar == 'S' ? MagmaSomeVec : MagmaNoVec);
magma_int_t m = magma_int_cast(self.size(-2), "m");
magma_int_t n = magma_int_cast(self.size(-1), "n");
auto lda = std::max<magma_int_t>(1, m);
auto ldvt = std::max<magma_int_t>(1, n);
auto mn = std::min(m, n);
c10::Storage storage_rwork;
value_t* rwork = nullptr;
magma_int_t* iwork;
ALLOCATE_ARRAY(iwork, magma_int_t, 8 * mn);
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
auto lrwork = computeLRWorkDim(jobchar, m, n);
storage_rwork = pin_memory<value_t>(lrwork);
rwork = static_cast<value_t*>(storage_rwork.data());
}
magma_int_t info = 0;
// Run once, first to get the optimum work size.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt = 1; // MAGMA might not set the value for the optimal workspace therefore use 1 as the default value
magmaSvd<scalar_t, value_t>(jobz, m, n, self_data, lda, S_data, U_data, lda, VT_data, ldvt, &wkopt, lwork, rwork, iwork, &info);
lwork = magma_int_cast(real_impl<scalar_t, value_t>(wkopt), "work_size");
scalar_t* work;
ALLOCATE_ARRAY(work, scalar_t, lwork);
for (int64_t i = 0; i < batchsize; i++) {
scalar_t* self_working_ptr = &self_data[i * self_stride];
value_t* S_working_ptr = &S_data[i * S_stride];
scalar_t* U_working_ptr = &U_data[i * U_stride];
scalar_t* VT_working_ptr = &VT_data[i * VT_stride];
// Compute S, U (optionally), VT (optionally)
magmaSvd<scalar_t, value_t>(jobz, m, n, self_working_ptr, lda,
S_working_ptr, U_working_ptr, lda, VT_working_ptr, ldvt, work, lwork, rwork, iwork, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda_legacy(const Tensor& self, bool some, bool compute_uv) {
std::vector<int64_t> infos(batchCount(self), 0);
int64_t m = self.size(-2), n = self.size(-1);
int64_t k = std::min(m, n);
char jobchar = compute_uv ? (some ? 'S' : 'A') : 'N';
Tensor U_working_copy, S_working_copy, VT_working_copy;
std::tie(U_working_copy, S_working_copy, VT_working_copy) = _create_U_S_VT(self, some, compute_uv);
// The input matrix, U, S and VT have to reside in pinned memory.
// Additionally, the input and U have to be in column major format.
// _create_U_S_VT takes care of a part of these requirements (for U, S and VT)
// For the input matrix, this requirements are being taken care of below.
// Specify strides
auto self_col_major_strides = at::detail::defaultStrides(self.sizes());
self_col_major_strides[self.dim() - 2] = 1;
self_col_major_strides[self.dim() - 1] = m;
// Create strided tensor in pinned memory
auto self_working_copy = at::empty_strided(self.sizes(), self_col_major_strides,
at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true));
self_working_copy.copy_(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "svd_cuda", [&] {
apply_svd<scalar_t>(self_working_copy, U_working_copy, S_working_copy, VT_working_copy, jobchar, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "svd_cuda");
} else {
singleCheckErrors(infos[0], "svd_cuda");
}
U_working_copy = same_stride_to(U_working_copy, self.options());
S_working_copy = same_stride_to(S_working_copy, S_working_copy.options().device(self.device()));
VT_working_copy = same_stride_to(VT_working_copy, self.options());
if (!compute_uv) {
VT_working_copy.zero_();
U_working_copy.zero_();
}
if (some) {
VT_working_copy = VT_working_copy.narrow(-2, 0, k);
}
// so far we have computed VT, but torch.svd returns V instead. Adjust accordingly.
// Note that the 'apply_svd' routine returns VT = V^T (for real inputs) or VT = V^H (for complex inputs), not V.
VT_working_copy = VT_working_copy.conj();
VT_working_copy.transpose_(-2, -1);
return std::make_tuple(U_working_copy, S_working_copy, VT_working_copy);
}
std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda(const Tensor& self, bool some, bool compute_uv) {
#ifdef USE_CUSOLVER
return _svd_helper_cuda_lib(self, some, compute_uv);
#else
return _svd_helper_cuda_legacy(self, some, compute_uv);
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/*
Solves the matrix equation A X = B
X and B are n-by-nrhs matrices, A is represented using the LU factorization.
This is an in-place routine, content of `b` is overwritten.
This is a "looped" variant for calling single input MAGMA function on batched input.
Args:
* `b` - [in] the right hand side matrix B
[out] the solution matrix X
* `lu` - [in] the LU factorization of matrix A (see at::_lu_with_info)
* `pivots` - [in] the pivot indices (see at::_lu_with_info)
For further details, please see the MAGMA documentation for magma_dgetrs_gpu.
*/
template <typename scalar_t>
static void apply_lu_solve_looped_magma(const Tensor& b, const Tensor& lu, const Tensor& pivots) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.lu_solve on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. lease rebuild with MAGMA.");
#else
auto b_data = b.data_ptr<scalar_t>();
auto lu_data = lu.data_ptr<scalar_t>();
// MAGMA requires pivots to be a CPU tensor
Tensor pivots_cpu = pivots.cpu();
auto pivots_data = pivots_cpu.data_ptr<magma_int_t>();
auto b_stride = matrixStride(b);
auto lu_stride = matrixStride(lu);
auto pivots_stride = pivots_cpu.size(-1);
auto batch_size = batchCount(b);
magma_int_t n = magma_int_cast(lu.size(-2), "n");
magma_int_t nrhs = magma_int_cast(b.size(-1), "nrhs");
auto leading_dimension = std::max<magma_int_t>(1, n);
int info = 0;
for (decltype(batch_size) i = 0; i < batch_size; i++) {
scalar_t* b_working_ptr = &b_data[i * b_stride];
scalar_t* lu_working_ptr = &lu_data[i * lu_stride];
int* pivots_working_ptr = &pivots_data[i * pivots_stride];
magmaLuSolve<scalar_t>(n, nrhs, lu_working_ptr, leading_dimension, pivots_working_ptr, b_working_ptr, leading_dimension, &info);
// info from magmaLuSolve only reports if the i-th parameter is wrong
// so we don't need to check it all the time
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(info == 0);
}
#endif
}
/*
Solves the matrix equation A X = B
X and B are n-by-nrhs matrices, A is represented using the LU factorization.
This is an in-place routine, content of `b` is overwritten.
This is a specialized batched variant, it is expected to be faster than the "looped" version only for small inputs.
Args:
* `b` - [in] the right hand side matrix B
[out] the solution matrix X
* `lu` - [in] the LU factorization of matrix A (see at::_lu_with_info)
* `pivots` - [in] the pivot indices (see at::_lu_with_info)
For further details, please see the MAGMA documentation for magma_dgetrs_batched.
*/
template <typename scalar_t>
static void apply_lu_solve_batched_magma(const Tensor& b, const Tensor& lu, const Tensor& pivots) {
#ifndef USE_MAGMA
TORCH_CHECK(
false,
"Calling torch.lu_solve on a CUDA tensor requires compiling ",
"PyTorch with MAGMA. lease rebuild with MAGMA.");
#else
auto b_data = b.data_ptr<scalar_t>();
auto lu_data = lu.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(lu.size(-2), "n");
magma_int_t nrhs = magma_int_cast(b.size(-1), "nrhs");
auto leading_dimension = std::max<magma_int_t>(1, n);
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto b_stride = matrixStride(b);
auto lu_stride = matrixStride(lu);
auto pivots_stride = pivots.size(-1);
magma_int_t batch_size = magma_int_cast(batchCount(b), "batchCount");
magma_int_t** pivots_array;
scalar_t** lu_array;
scalar_t** b_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(lu_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_stride];
b_array[i] = &b_data[i * b_stride];
lu_array[i] = &lu_data[i * lu_stride];
}
MAGMAQueue magma_queue(b.get_device());
// Compute the result in batches of 65535
// that is the maximum allowed number for batch_size in MAGMA
constexpr int64_t batch_limit = 65535;
for (int64_t mini_idx = 0; mini_idx < batch_size; mini_idx += batch_limit) {
int64_t nbatches = std::min(batch_limit, batch_size - mini_idx);
scalar_t** lu_array_cur = &lu_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** pivots_array_cur = &pivots_array[mini_idx];
int info;
magmaLuSolveBatched<scalar_t>(
n, nrhs, lu_array_cur, leading_dimension,
pivots_array_cur, b_array_cur, leading_dimension,
info, nbatches, magma_queue);
// info from magmaLuSolveBatched only reports if the i-th parameter is wrong
// so we don't need to check it all the time
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(info == 0);
}
#endif
}
static void lu_solve_magma(const Tensor& b, const Tensor& lu, const Tensor& pivots) {
// TODO: compare performance and use the best performing option based on lu's sizes
if (b.dim() == 2) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(b.scalar_type(), "lu_solve_magma", [&]{
apply_lu_solve_looped_magma<scalar_t>(b, lu, pivots);
});
} else {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(b.scalar_type(), "lu_solve_magma", [&]{
apply_lu_solve_batched_magma<scalar_t>(b, lu, pivots);
});
}
}
REGISTER_DISPATCH(lu_solve_stub, &lu_solve_magma);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lstsq ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_gels(const Tensor& a, Tensor& b, Tensor& infos) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "torch.linalg.lstsq: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto trans = MagmaNoTrans;
auto m = magma_int_cast(a.size(-2), "m");
auto n = magma_int_cast(a.size(-1), "n");
TORCH_CHECK(
m >= n,
"torch.linalg.lstsq: only overdetermined systems (input.size(-2) >= input.size(-1)) are allowed on CUDA");
auto nrhs = magma_int_cast(b.size(-1), "nrhs");
auto ldda = std::max<magma_int_t>(1, m);
auto lddb = std::max<magma_int_t>(1, std::max(m, n));
auto nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n);
auto lwork = (m - n + nb) * (nrhs + nb) + nrhs * nb;
Tensor hwork = at::empty({static_cast<int64_t>(lwork)}, a.scalar_type());
auto* hwork_ptr = hwork.data_ptr<scalar_t>();
// MAGMA requires infos tensor to live on CPU
infos = infos.to(at::kCPU);
auto infos_data = infos.data_ptr<magma_int_t>();
batch_iterator_with_broadcasting<scalar_t>(a, b,
[&](scalar_t* a_working_ptr, scalar_t* b_working_ptr,
int64_t a_linear_batch_idx) {
magma_int_t* infos_working_ptr = &infos_data[a_linear_batch_idx];
magmaGels<scalar_t>(trans, m, n, nrhs,
a_working_ptr, ldda, b_working_ptr, lddb,
hwork_ptr, lwork, infos_working_ptr);
}
);
#endif
}
void gels_magma(const Tensor& a, Tensor& b, Tensor& infos) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(a.scalar_type(), "gels_magma", [&] {
apply_gels<scalar_t>(a, b, infos);
});
}
void linalg_lstsq_gels(const Tensor& A, const Tensor& B, const Tensor& infos) {
// The steps for using the QR decomposition for solving least squares problems
// are outlined here https://en.wikipedia.org/wiki/QR_decomposition#Using_for_solution_to_linear_inverse_problems
auto m = A.size(-2);
auto n = A.size(-1);
auto mn = std::min(m, n);
// explicitly broadcast the batch dimensions of A
// TODO: revisit this later to use batch_iterator_with_broadcasting in triangular_solve
IntArrayRef A_batch_sizes(A.sizes().data(), A.dim() - 2);
IntArrayRef B_batch_sizes(B.sizes().data(), B.dim() - 2);
std::vector<int64_t> expand_batch_portion = at::infer_size(A_batch_sizes, B_batch_sizes);
auto tau_shape = A.sizes().vec();
tau_shape.pop_back();
tau_shape.back() = mn;
Tensor tau = at::empty(tau_shape, A.options());
if (m >= n) {
// Step 1: compute QR factorization using geqrf
geqrf_kernel(A, tau);
// explicitly broadcast the batch dimensions of A
// we do it after geqrf so that we don't do redundant computations for the same input
auto A_expand_batch = expand_batch_portion;
A_expand_batch.insert(A_expand_batch.end(), {A.size(-2), A.size(-1)});
Tensor A_expanded = A.expand({A_expand_batch});
bool is_fortran_contiguous = A_expanded.transpose(-2, -1).is_contiguous();
Tensor A_broadcasted = is_fortran_contiguous ? A_expanded : cloneBatchedColumnMajor(A_expanded);
auto tau_expand_batch = expand_batch_portion;
tau_expand_batch.push_back(tau.size(-1));
Tensor tau_broadcasted = tau.expand({tau_expand_batch}).contiguous();
// Step 2: B <- Q^H B
ormqr_kernel(A_broadcasted, tau_broadcasted, B, /*left=*/true, /*transpose=*/true);
// Step 3: solve R X = B
bool upper = true;
bool transpose = false;
bool conjugate_transpose = false;
bool unitriangular = false;
triangular_solve_kernel(
const_cast<Tensor&>(A_broadcasted),
const_cast<Tensor&>(B),
const_cast<Tensor&>(infos),
upper, transpose, conjugate_transpose, unitriangular);
} else { // underdetermined case
Tensor Ah = cloneBatchedColumnMajor(A.conj().transpose(-2, -1));
// Step 1: compute QR factorization of conjugate transpose of A using geqrf
geqrf_kernel(Ah, tau);
// explicitly broadcast the batch dimensions of A
// we do it after geqrf so that we don't do redundant computations for the same input
auto A_expand_batch = expand_batch_portion;
A_expand_batch.insert(A_expand_batch.end(), {Ah.size(-2), Ah.size(-1)});
Tensor Ah_expanded = Ah.expand({A_expand_batch});
bool is_fortran_contiguous = Ah_expanded.transpose(-2, -1).is_contiguous();
Tensor Ah_broadcasted = is_fortran_contiguous ? Ah_expanded : cloneBatchedColumnMajor(Ah_expanded);
// Step 2: R^H Z = B
bool upper = true;
bool transpose = true;
bool conjugate_transpose = true;
bool unitriangular = false;
triangular_solve_kernel(
const_cast<Tensor&>(Ah_broadcasted),
const_cast<Tensor&>(B),
const_cast<Tensor&>(infos),
upper, transpose, conjugate_transpose, unitriangular);
// B matrix has the size max(m, n) x nrhs
// triangular_solve_kernel writes its output into the first m rows of B leaving the rest untouched
// we need to set the rest of the rows to zero so that the multiplication from step 3 is correct
B.narrow(-2, m, n - m).zero_();
auto tau_expand_batch = expand_batch_portion;
tau_expand_batch.push_back(tau.size(-1));
Tensor tau_broadcasted = tau.expand({tau_expand_batch}).contiguous();
// Step 3: X <- Q Z
ormqr_kernel(Ah_broadcasted, tau_broadcasted, B, /*left=*/true, /*transpose=*/false);
}
}
void gels_looped(const Tensor& a, Tensor& b, Tensor& infos) {
#if defined(USE_CUSOLVER)
// linalg_lstsq_gels is a generic function that is implemented using
// geqrf_stub, ormqr_stub, and triangular_solve_stub
// It dispatches to cuSOLVER for CUDA inputs if USE_CUSOLVER is defined
return linalg_lstsq_gels(a, b, infos);
#else
return gels_magma(a, b, infos);
#endif
}
void lstsq_kernel(const Tensor& a, Tensor& b, Tensor& /*rank*/, Tensor& /*singular_values*/, Tensor& infos, double /*rcond*/, std::string /*driver_name*/) {
auto m = a.size(-2);
auto n = a.size(-1);
// first handle the underdetermined case (m < n)
// this case is not supported by MAGMA or cuBLAS
if (m < n) {
#if defined(USE_CUSOLVER)
linalg_lstsq_gels(a, b, infos);
#else
TORCH_CHECK(
false,
"torch.linalg.lstsq: only overdetermined systems (input.size(-2) >= input.size(-1)) are allowed on CUDA. ",
"Please rebuild with cuSOLVER.");
#endif
} else { // m >= n
#ifndef USE_MAGMA
// MAGMA is not available we can either use cuBLAS or cuSOLVER here
// the batched vs looped dispatch is implemented based on the following performance results
// https://github.com/pytorch/pytorch/pull/54725#issuecomment-832234456
if (m <= 256 && batchCount(b) >= std::max<int64_t>(2, m / 16)) {
// if CUDART_VERSION is defined then cuBLAS is available
#ifdef CUDART_VERSION
gels_batched_cublas(a, b, infos);
#else
// this would either call cuSOLVER or MAGMA,
// if MAGMA is called a runtime error is thrown about not finding MAGMA in compilation
gels_looped(a, b, infos);
#endif // CUDART_VERSION
} else {
gels_looped(a, b, infos);
}
#else
// if both MAGMA and cuSOLVER are available this would call cuSOLVER
// MAGMA is called if cuSOLVER is not available
gels_looped(a, b, infos);
#endif // USE_MAGMA
}
}
REGISTER_DISPATCH(lstsq_stub, &lstsq_kernel);
}} // namespace at::native
#undef ALLOCATE_ARRAY
|
a844720c02b482ee6d4eddf1033f9bb127df086e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <fstream>
#include <sstream>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include "opencv2/imgproc/imgproc.hpp"
#include <stdbool.h>
#include <math.h>
using namespace cv;
using namespace std;
#define BLOCKSIZE 32 //The number of threads per block should be not greater than 1024
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
typedef struct
{
float x,y,w,h,s;
}box;
__device__
float IOUcalc(box b1, box b2)
{
float ai = (float)(b1.w)*(b1.h);
float aj = (float)(b2.w)*(b2.h);
float x_inter, x2_inter, y_inter, y2_inter;
x_inter = max(b1.x,b2.x);
y_inter = max(b1.y,b2.y);
x2_inter = min((b1.x + b1.w),(b2.x + b2.w));
y2_inter = min((b1.y + b1.h),(b2.y + b2.h));
float w = (float)max((float)0, x2_inter - x_inter);
float h = (float)max((float)0, y2_inter - y_inter);
float inter = ((w*h)/(ai + aj - w*h));
return inter;
}
__global__
void NMS_GPU(box *d_b, bool *d_res)
{
int abs_y = (blockIdx.y * blockDim.y) + threadIdx.y;
int abs_x = (blockIdx.x * blockDim.x) +threadIdx.x;
float theta = 0.6;
if(d_b[abs_x].s < d_b[abs_y].s)
{
if(IOUcalc(d_b[abs_y],d_b[abs_x])>theta)
{
d_res[abs_x] = false;
}
}
}
int main()
{
int count = 6000;
bool *h_res =(bool *)malloc(sizeof(bool)*count);
for(int i=0; i<count; i++)
{
h_res[i] = true;
}
box b[count];
std::ifstream in;
std::string line;
in.open("../boxes.txt"); //y1, x1, y2, x2
if (in.is_open())
{
int i = 0;
while(getline(in, line))
{
istringstream iss(line);
iss >> b[i].y;
iss >> b[i].x;
iss >> b[i].h; //y2
iss >> b[i].w; //x2
b[i].h-=b[i].y; //y2 -> h
b[i].w-=b[i].x; //x2 -> w
i+=1;
if(i==count) break;
}
}
in.close();
in.open("../scores.txt");
if (in.is_open())
{
int i = 0;
while(in >> b[i].s)
{
i+=1;
if(i==count) break;
}
}
in.close();
box *d_b;
bool *d_res;
gpuErrchk(hipMalloc((void**)&d_res, count*sizeof(bool)));
gpuErrchk(hipMemcpy(d_res, h_res,sizeof(bool)*count, hipMemcpyHostToDevice));
gpuErrchk(hipMalloc((void**)&d_b,sizeof(box)*count));
gpuErrchk(hipMemcpy(d_b, b,sizeof(box)*count, hipMemcpyHostToDevice));
//Setting 1: can only work when count <= 1024
//NMS_GPU<<<dim3(1,count,1),count>>>(d_b,d_res);
//Setting 2: work when count > 1024
//NMS_GPU<<<dim3(count,count,1), 1>>>(d_b,d_res);
//Setting 3: work when count > 1024, faster than Setting 2
dim3 gridSize(int(ceil(float(count)/BLOCKSIZE)), int(ceil(float(count)/BLOCKSIZE)),1);
dim3 blockSize(BLOCKSIZE, BLOCKSIZE, 1);
hipLaunchKernelGGL(( NMS_GPU), dim3(gridSize), dim3(blockSize), 0, 0, d_b,d_res);
hipDeviceSynchronize();
gpuErrchk(hipMemcpy(h_res, d_res, sizeof(bool)*count, hipMemcpyDeviceToHost));
printf("Suppressed box id:\n");
for(int i =0; i<count ; i++)
{
if(*(h_res+i) != true)
{
printf("%d ",i);
}
}
return 0;
} | a844720c02b482ee6d4eddf1033f9bb127df086e.cu | #include <stdio.h>
#include <cuda_runtime.h>
#include <iostream>
#include <fstream>
#include <sstream>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include "opencv2/imgproc/imgproc.hpp"
#include <stdbool.h>
#include <math.h>
using namespace cv;
using namespace std;
#define BLOCKSIZE 32 //The number of threads per block should be not greater than 1024
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
typedef struct
{
float x,y,w,h,s;
}box;
__device__
float IOUcalc(box b1, box b2)
{
float ai = (float)(b1.w)*(b1.h);
float aj = (float)(b2.w)*(b2.h);
float x_inter, x2_inter, y_inter, y2_inter;
x_inter = max(b1.x,b2.x);
y_inter = max(b1.y,b2.y);
x2_inter = min((b1.x + b1.w),(b2.x + b2.w));
y2_inter = min((b1.y + b1.h),(b2.y + b2.h));
float w = (float)max((float)0, x2_inter - x_inter);
float h = (float)max((float)0, y2_inter - y_inter);
float inter = ((w*h)/(ai + aj - w*h));
return inter;
}
__global__
void NMS_GPU(box *d_b, bool *d_res)
{
int abs_y = (blockIdx.y * blockDim.y) + threadIdx.y;
int abs_x = (blockIdx.x * blockDim.x) +threadIdx.x;
float theta = 0.6;
if(d_b[abs_x].s < d_b[abs_y].s)
{
if(IOUcalc(d_b[abs_y],d_b[abs_x])>theta)
{
d_res[abs_x] = false;
}
}
}
int main()
{
int count = 6000;
bool *h_res =(bool *)malloc(sizeof(bool)*count);
for(int i=0; i<count; i++)
{
h_res[i] = true;
}
box b[count];
std::ifstream in;
std::string line;
in.open("../boxes.txt"); //y1, x1, y2, x2
if (in.is_open())
{
int i = 0;
while(getline(in, line))
{
istringstream iss(line);
iss >> b[i].y;
iss >> b[i].x;
iss >> b[i].h; //y2
iss >> b[i].w; //x2
b[i].h-=b[i].y; //y2 -> h
b[i].w-=b[i].x; //x2 -> w
i+=1;
if(i==count) break;
}
}
in.close();
in.open("../scores.txt");
if (in.is_open())
{
int i = 0;
while(in >> b[i].s)
{
i+=1;
if(i==count) break;
}
}
in.close();
box *d_b;
bool *d_res;
gpuErrchk(cudaMalloc((void**)&d_res, count*sizeof(bool)));
gpuErrchk(cudaMemcpy(d_res, h_res,sizeof(bool)*count, cudaMemcpyHostToDevice));
gpuErrchk(cudaMalloc((void**)&d_b,sizeof(box)*count));
gpuErrchk(cudaMemcpy(d_b, b,sizeof(box)*count, cudaMemcpyHostToDevice));
//Setting 1: can only work when count <= 1024
//NMS_GPU<<<dim3(1,count,1),count>>>(d_b,d_res);
//Setting 2: work when count > 1024
//NMS_GPU<<<dim3(count,count,1), 1>>>(d_b,d_res);
//Setting 3: work when count > 1024, faster than Setting 2
dim3 gridSize(int(ceil(float(count)/BLOCKSIZE)), int(ceil(float(count)/BLOCKSIZE)),1);
dim3 blockSize(BLOCKSIZE, BLOCKSIZE, 1);
NMS_GPU<<<gridSize, blockSize>>>(d_b,d_res);
cudaThreadSynchronize();
gpuErrchk(cudaMemcpy(h_res, d_res, sizeof(bool)*count, cudaMemcpyDeviceToHost));
printf("Suppressed box id:\n");
for(int i =0; i<count ; i++)
{
if(*(h_res+i) != true)
{
printf("%d ",i);
}
}
return 0;
} |
3e857203cb52e1ea95cc92f0cd6a2b6d758f260f.hip | // !!! This is a file automatically generated by hipify!!!
/*--------------------------------------------------------------------*/
/* CUDA utility Library */
/* written by Viktor K. Decyk, UCLA */
#include <stdlib.h>
#include <stdio.h>
#include "hip/hip_runtime.h"
int nblock_size = 64;
int ngrid_size = 1;
int maxgsx = 65535;
int mmcc = 0;
static int devid;
static hipError_t crc;
__global__ void emptyKernel() {}
/*--------------------------------------------------------------------*/
extern "C" void gpu_setgbsize(int nblock) {
/* set blocksize */
nblock_size = nblock;
return;
}
/*--------------------------------------------------------------------*/
extern "C" int getmmcc() {
/* get major and minor computer capability */
return mmcc;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_fallocate(float **g_f, int nsize, int *irc) {
/* allocate global float memory on GPU, return pointer to C */
void *gptr;
crc = hipMalloc(&gptr,sizeof(float)*nsize);
if (crc) {
printf("hipMalloc float Error=%d:%s,l=%d\n",crc,
hipGetErrorString(crc),nsize);
*irc = 1;
}
*g_f = (float *)gptr;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_iallocate(int **g_i, int nsize, int *irc) {
/* allocate global integer memory on GPU, return pointer to C */
void *gptr;
crc = hipMalloc(&gptr,sizeof(int)*nsize);
if (crc) {
printf("hipMalloc int Error=%d:%s,l=%d\n",crc,
hipGetErrorString(crc),nsize);
*irc = 1;
}
*g_i = (int *)gptr;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_callocate(float2 **g_c, int nsize, int *irc) {
/* allocate global float2 memory on GPU, return pointer to C */
void *gptr;
crc = hipMalloc(&gptr,sizeof(float2)*nsize);
if (crc) {
printf("hipMalloc float2 Error=%d:%s,l=%d\n",crc,
hipGetErrorString(crc),nsize);
*irc = 1;
}
*g_c = (float2 *)gptr;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_deallocate(void *g_d, int *irc) {
/* deallocate global memory on GPU */
crc = hipFree(g_d);
if (crc) {
printf("hipFree Error=%d:%s\n",crc,hipGetErrorString(crc));
*irc = 1;
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_fcopyin(float *f, float *g_f, int nsize) {
/* copy float array from host memory to global GPU memory */
crc = hipMemcpy((void *)g_f,f,sizeof(float)*nsize,
hipMemcpyHostToDevice);
if (crc) {
printf("hipMemcpyHostToDevice float Error=%d:%s\n",crc,
hipGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_fcopyout(float *f, float *g_f, int nsize) {
/* copy float array from global GPU memory to host memory */
crc = hipMemcpy(f,(void *)g_f,sizeof(float)*nsize,
hipMemcpyDeviceToHost);
if (crc) {
printf("hipMemcpyDeviceToHost float Error=%d:%s\n",crc,
hipGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_icopyin(int *f, int *g_f, int nsize) {
/* copy int array from host memory to global GPU memory */
crc = hipMemcpy((void *)g_f,f,sizeof(int)*nsize,
hipMemcpyHostToDevice);
if (crc) {
printf("hipMemcpyHostToDevice int Error=%d:%s\n",crc,
hipGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_icopyout(int *f, int *g_f, int nsize) {
/* copy int array from global GPU memory to host memory */
crc = hipMemcpy(f,(void *)g_f,sizeof(int)*nsize,
hipMemcpyDeviceToHost);
if (crc) {
printf("hipMemcpyDeviceToHost int Error=%d:%s\n",crc,
hipGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_ccopyin(float2 *f, float2 *g_f, int nsize) {
/* copy float2 array from host memory to global GPU memory */
crc = hipMemcpy((void *)g_f,f,sizeof(float2)*nsize,
hipMemcpyHostToDevice);
if (crc) {
printf("hipMemcpyHostToDevice float2 Error=%d:%s\n",crc,
hipGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_ccopyout(float2 *f, float2 *g_f, int nsize) {
/* copy float2 array from global GPU memory to host memory */
crc = hipMemcpy(f,(void *)g_f,sizeof(float2)*nsize,
hipMemcpyDeviceToHost);
if (crc) {
printf("hipMemcpyDeviceToHost float2 Error=%d:%s\n",crc,
hipGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_zfmem(float *g_f, int nsize) {
/* initialize float array in global GPU memory to zero */
crc = hipMemset((void *)g_f,0,sizeof(float)*nsize);
if (crc) {
printf("hipMemset Error=%d:%s\n",crc,hipGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_zcmem(float2 *g_f, int nsize) {
/* initialize float2 array in global GPU memory to zero */
crc = hipMemset((void *)g_f,0,sizeof(float2)*nsize);
if (crc) {
printf("hipMemset Error=%d:%s\n",crc,hipGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_set_cache_size(int nscache) {
/* request preferred cache size, requires CUDA 3.2 or higher */
/* nscache = (0,1,2) = (no,small,big) cache size */
hipFuncCache_t cpref;
if ((nscache < 0) || (nscache > 2))
return;
if (nscache==0)
cpref = hipFuncCachePreferNone;
else if (nscache==1)
cpref = hipFuncCachePreferShared;
else if (nscache==2)
cpref = hipFuncCachePreferL1;
crc = hipDeviceSetCacheConfig(cpref);
/* crc = hipDeviceSetCacheConfig(cpref); */
if (crc) {
printf("hipDeviceSetCacheConfig error=%d:%s\n",crc,
hipGetErrorString(crc));
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void emptykernel() {
int ngx, ngy;
ngx = nblock_size < 32768 ? nblock_size : 32768;
ngy = (ngrid_size - 1)/ngx + 1;
dim3 dimBlock(nblock_size,1);
dim3 dimGrid(ngx,ngy);
crc = hipGetLastError();
hipLaunchKernelGGL(( emptyKernel), dim3(dimGrid),dim3(dimBlock), 0, 0, );
hipDeviceSynchronize();
crc = hipGetLastError();
if (crc) {
printf("emptyKernel error=%d:%s\n",crc,hipGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void init_cu(int dev, int *irc) {
/* initialize CUDA with device dev or selects best GPU available */
/* searches throughs devices, selects the device with the most compute */
/* units, and saves the device id devid */
/* if dev is a valid device, it is used, otherwise the GPU with the */
/* most multi-processors is selected */
/* error code is modified only if there is an error */
int maxcpus = 0, jm = -1;
int j, ndevs, maxunits;
unsigned long msize;
double z;
struct hipDeviceProp_t prop;
/* returns number of device */
crc = hipGetDeviceCount(&ndevs);
if (crc) {
printf("hipGetDeviceCount Error=%i:%s\n",crc,
hipGetErrorString(crc));
*irc = 1;
return;
}
/* get information about devices */
for (j = 0; j < ndevs; j++) {
crc = hipGetDeviceProperties(&prop,j);
if (crc) {
printf("hipGetDeviceProperties Error=%i:%s\n",crc,
hipGetErrorString(crc));
prop.name[0] = 0;
}
maxunits = prop.multiProcessorCount;
if (dev <= 0) {
printf("j=%i:CUDA_DEVICE_NAME=%s,CUDA_MULTIPROCESSOR_COUNT=%i\n",
j,prop.name,maxunits);
msize = prop.totalGlobalMem;
z = ((double) msize)/1073741824.0;
mmcc = 10*prop.major + prop.minor;
printf(" CUDA_GLOBAL_MEM_SIZE=%lu(%f GB),Capability=%d\n",
msize,(float) z,mmcc);
if (maxunits > maxcpus) {
maxcpus = maxunits;
jm = j;
}
}
}
devid = jm;
if (dev >= 0)
devid = dev % ndevs;
printf("using device j=%i\n",devid);
/* get properties for this device */
crc = hipGetDeviceProperties(&prop,devid);
maxgsx = prop.maxGridSize[0];
mmcc = 10*prop.major + prop.minor;
/* set device */
crc = hipSetDevice(devid);
if (crc) {
printf("hipSetDevice Error=%i:%s\n",crc,
hipGetErrorString(crc));
*irc = 1;
return;
}
/* run empty kernel */
emptykernel();
return;
}
/*--------------------------------------------------------------------*/
extern "C" void end_cu() {
/* terminate CUDA */
crc = hipDeviceReset();
if (crc) {
printf("hipDeviceReset Error=%d:%s\n",crc,hipGetErrorString(crc));
}
return;
}
/* Interfaces to Fortran */
/*--------------------------------------------------------------------*/
extern "C" void gpu_setgbsize_(int *nblock) {
gpu_setgbsize(*nblock);
return;
}
/*--------------------------------------------------------------------*/
extern "C" int getmmcc_() {
/* get major and minor computer capability */
return getmmcc();
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_fallocate_(unsigned long *gp_f, int *nsize,
int *irc) {
/* allocate global float memory on GPU, return pointer to Fortran */
float *fptr;
gpu_fallocate(&fptr,*nsize,irc);
*gp_f = (long )fptr;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_iallocate_(unsigned long *gp_i, int *nsize,
int *irc) {
/* allocate global integer memory on GPU, return pointer to Fortran */
int *iptr;
gpu_iallocate(&iptr,*nsize,irc);
*gp_i = (long )iptr;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_callocate_(unsigned long *gp_f, int *nsize,
int *irc) {
/* allocate global float2 memory on GPU, return pointer */
/* to Fortran */
float2 *fptr;
gpu_callocate(&fptr,*nsize,irc);
*gp_f = (long )fptr;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_deallocate_(unsigned long *gp_d, int *irc) {
/* deallocate global memory on GPU, return pointer to Fortran */
void *d;
d = (void *)*gp_d;
gpu_deallocate(d,irc);
*gp_d = 0;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_fcopyin_(float *f, unsigned long *gp_f,
int *nsize) {
/* copy float array from main memory to global GPU memory */
float *g_f;
g_f = (float *)*gp_f;
gpu_fcopyin(f,g_f,*nsize);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_fcopyout_(float *f, unsigned long *gp_f,
int *nsize) {
/* copy float array from global GPU memory to main memory */
float *g_f;
g_f = (float *)*gp_f;
gpu_fcopyout(f,g_f,*nsize);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_icopyin_(int *f, unsigned long *gp_f, int *nsize) {
/* copy int array from main memory to global GPU memory */
int *g_f;
g_f = (int *)*gp_f;
gpu_icopyin(f,g_f,*nsize);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_icopyout_(int *f, unsigned long *gp_f, int *nsize) {
/* copy int array from global GPU memory to main memory */
int *g_f;
g_f = (int *)*gp_f;
gpu_icopyout(f,g_f,*nsize);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_ccopyin_(float2 *f, unsigned long *gp_f,
int *nsize) {
/* copy float2 array from main memory to global GPU memory */
float2 *g_f;
g_f = (float2 *)*gp_f;
gpu_ccopyin(f,g_f,*nsize);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_ccopyout_(float2 *f, unsigned long *gp_f,
int *nsize) {
/* copy float2 array from global GPU memory to main memory */
float2 *g_f;
g_f = (float2 *)*gp_f;
gpu_ccopyout(f,g_f,*nsize);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_zcmem_(unsigned long *gp_f, int *nsize) {
float2 *g_f;
g_f = (float2 *)*gp_f;
gpu_zcmem(g_f,*nsize);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_zfmem_(unsigned long *gp_f, int *nsize) {
float *g_f;
g_f = (float *)*gp_f;
gpu_zfmem(g_f,*nsize);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_set_cache_size_(int *nscache) {
gpu_set_cache_size(*nscache);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void emptykernel_() {
emptykernel();
return;
}
/*--------------------------------------------------------------------*/
extern "C" void init_cu_(int *dev, int *irc) {
init_cu(*dev,irc);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void end_cu_() {
end_cu();
return;
}
| 3e857203cb52e1ea95cc92f0cd6a2b6d758f260f.cu | /*--------------------------------------------------------------------*/
/* CUDA utility Library */
/* written by Viktor K. Decyk, UCLA */
#include <stdlib.h>
#include <stdio.h>
#include "cuda.h"
int nblock_size = 64;
int ngrid_size = 1;
int maxgsx = 65535;
int mmcc = 0;
static int devid;
static cudaError_t crc;
__global__ void emptyKernel() {}
/*--------------------------------------------------------------------*/
extern "C" void gpu_setgbsize(int nblock) {
/* set blocksize */
nblock_size = nblock;
return;
}
/*--------------------------------------------------------------------*/
extern "C" int getmmcc() {
/* get major and minor computer capability */
return mmcc;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_fallocate(float **g_f, int nsize, int *irc) {
/* allocate global float memory on GPU, return pointer to C */
void *gptr;
crc = cudaMalloc(&gptr,sizeof(float)*nsize);
if (crc) {
printf("cudaMalloc float Error=%d:%s,l=%d\n",crc,
cudaGetErrorString(crc),nsize);
*irc = 1;
}
*g_f = (float *)gptr;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_iallocate(int **g_i, int nsize, int *irc) {
/* allocate global integer memory on GPU, return pointer to C */
void *gptr;
crc = cudaMalloc(&gptr,sizeof(int)*nsize);
if (crc) {
printf("cudaMalloc int Error=%d:%s,l=%d\n",crc,
cudaGetErrorString(crc),nsize);
*irc = 1;
}
*g_i = (int *)gptr;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_callocate(float2 **g_c, int nsize, int *irc) {
/* allocate global float2 memory on GPU, return pointer to C */
void *gptr;
crc = cudaMalloc(&gptr,sizeof(float2)*nsize);
if (crc) {
printf("cudaMalloc float2 Error=%d:%s,l=%d\n",crc,
cudaGetErrorString(crc),nsize);
*irc = 1;
}
*g_c = (float2 *)gptr;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_deallocate(void *g_d, int *irc) {
/* deallocate global memory on GPU */
crc = cudaFree(g_d);
if (crc) {
printf("cudaFree Error=%d:%s\n",crc,cudaGetErrorString(crc));
*irc = 1;
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_fcopyin(float *f, float *g_f, int nsize) {
/* copy float array from host memory to global GPU memory */
crc = cudaMemcpy((void *)g_f,f,sizeof(float)*nsize,
cudaMemcpyHostToDevice);
if (crc) {
printf("cudaMemcpyHostToDevice float Error=%d:%s\n",crc,
cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_fcopyout(float *f, float *g_f, int nsize) {
/* copy float array from global GPU memory to host memory */
crc = cudaMemcpy(f,(void *)g_f,sizeof(float)*nsize,
cudaMemcpyDeviceToHost);
if (crc) {
printf("cudaMemcpyDeviceToHost float Error=%d:%s\n",crc,
cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_icopyin(int *f, int *g_f, int nsize) {
/* copy int array from host memory to global GPU memory */
crc = cudaMemcpy((void *)g_f,f,sizeof(int)*nsize,
cudaMemcpyHostToDevice);
if (crc) {
printf("cudaMemcpyHostToDevice int Error=%d:%s\n",crc,
cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_icopyout(int *f, int *g_f, int nsize) {
/* copy int array from global GPU memory to host memory */
crc = cudaMemcpy(f,(void *)g_f,sizeof(int)*nsize,
cudaMemcpyDeviceToHost);
if (crc) {
printf("cudaMemcpyDeviceToHost int Error=%d:%s\n",crc,
cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_ccopyin(float2 *f, float2 *g_f, int nsize) {
/* copy float2 array from host memory to global GPU memory */
crc = cudaMemcpy((void *)g_f,f,sizeof(float2)*nsize,
cudaMemcpyHostToDevice);
if (crc) {
printf("cudaMemcpyHostToDevice float2 Error=%d:%s\n",crc,
cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_ccopyout(float2 *f, float2 *g_f, int nsize) {
/* copy float2 array from global GPU memory to host memory */
crc = cudaMemcpy(f,(void *)g_f,sizeof(float2)*nsize,
cudaMemcpyDeviceToHost);
if (crc) {
printf("cudaMemcpyDeviceToHost float2 Error=%d:%s\n",crc,
cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_zfmem(float *g_f, int nsize) {
/* initialize float array in global GPU memory to zero */
crc = cudaMemset((void *)g_f,0,sizeof(float)*nsize);
if (crc) {
printf("cudaMemset Error=%d:%s\n",crc,cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_zcmem(float2 *g_f, int nsize) {
/* initialize float2 array in global GPU memory to zero */
crc = cudaMemset((void *)g_f,0,sizeof(float2)*nsize);
if (crc) {
printf("cudaMemset Error=%d:%s\n",crc,cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_set_cache_size(int nscache) {
/* request preferred cache size, requires CUDA 3.2 or higher */
/* nscache = (0,1,2) = (no,small,big) cache size */
cudaFuncCache cpref;
if ((nscache < 0) || (nscache > 2))
return;
if (nscache==0)
cpref = cudaFuncCachePreferNone;
else if (nscache==1)
cpref = cudaFuncCachePreferShared;
else if (nscache==2)
cpref = cudaFuncCachePreferL1;
crc = cudaThreadSetCacheConfig(cpref);
/* crc = cudaDeviceSetCacheConfig(cpref); */
if (crc) {
printf("cudaThreadSetCacheConfig error=%d:%s\n",crc,
cudaGetErrorString(crc));
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void emptykernel() {
int ngx, ngy;
ngx = nblock_size < 32768 ? nblock_size : 32768;
ngy = (ngrid_size - 1)/ngx + 1;
dim3 dimBlock(nblock_size,1);
dim3 dimGrid(ngx,ngy);
crc = cudaGetLastError();
emptyKernel<<<dimGrid,dimBlock>>>();
cudaThreadSynchronize();
crc = cudaGetLastError();
if (crc) {
printf("emptyKernel error=%d:%s\n",crc,cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void init_cu(int dev, int *irc) {
/* initialize CUDA with device dev or selects best GPU available */
/* searches throughs devices, selects the device with the most compute */
/* units, and saves the device id devid */
/* if dev is a valid device, it is used, otherwise the GPU with the */
/* most multi-processors is selected */
/* error code is modified only if there is an error */
int maxcpus = 0, jm = -1;
int j, ndevs, maxunits;
unsigned long msize;
double z;
struct cudaDeviceProp prop;
/* returns number of device */
crc = cudaGetDeviceCount(&ndevs);
if (crc) {
printf("cudaGetDeviceCount Error=%i:%s\n",crc,
cudaGetErrorString(crc));
*irc = 1;
return;
}
/* get information about devices */
for (j = 0; j < ndevs; j++) {
crc = cudaGetDeviceProperties(&prop,j);
if (crc) {
printf("cudaGetDeviceProperties Error=%i:%s\n",crc,
cudaGetErrorString(crc));
prop.name[0] = 0;
}
maxunits = prop.multiProcessorCount;
if (dev <= 0) {
printf("j=%i:CUDA_DEVICE_NAME=%s,CUDA_MULTIPROCESSOR_COUNT=%i\n",
j,prop.name,maxunits);
msize = prop.totalGlobalMem;
z = ((double) msize)/1073741824.0;
mmcc = 10*prop.major + prop.minor;
printf(" CUDA_GLOBAL_MEM_SIZE=%lu(%f GB),Capability=%d\n",
msize,(float) z,mmcc);
if (maxunits > maxcpus) {
maxcpus = maxunits;
jm = j;
}
}
}
devid = jm;
if (dev >= 0)
devid = dev % ndevs;
printf("using device j=%i\n",devid);
/* get properties for this device */
crc = cudaGetDeviceProperties(&prop,devid);
maxgsx = prop.maxGridSize[0];
mmcc = 10*prop.major + prop.minor;
/* set device */
crc = cudaSetDevice(devid);
if (crc) {
printf("cudaSetDevice Error=%i:%s\n",crc,
cudaGetErrorString(crc));
*irc = 1;
return;
}
/* run empty kernel */
emptykernel();
return;
}
/*--------------------------------------------------------------------*/
extern "C" void end_cu() {
/* terminate CUDA */
crc = cudaThreadExit();
if (crc) {
printf("cudaThreadExit Error=%d:%s\n",crc,cudaGetErrorString(crc));
}
return;
}
/* Interfaces to Fortran */
/*--------------------------------------------------------------------*/
extern "C" void gpu_setgbsize_(int *nblock) {
gpu_setgbsize(*nblock);
return;
}
/*--------------------------------------------------------------------*/
extern "C" int getmmcc_() {
/* get major and minor computer capability */
return getmmcc();
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_fallocate_(unsigned long *gp_f, int *nsize,
int *irc) {
/* allocate global float memory on GPU, return pointer to Fortran */
float *fptr;
gpu_fallocate(&fptr,*nsize,irc);
*gp_f = (long )fptr;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_iallocate_(unsigned long *gp_i, int *nsize,
int *irc) {
/* allocate global integer memory on GPU, return pointer to Fortran */
int *iptr;
gpu_iallocate(&iptr,*nsize,irc);
*gp_i = (long )iptr;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_callocate_(unsigned long *gp_f, int *nsize,
int *irc) {
/* allocate global float2 memory on GPU, return pointer */
/* to Fortran */
float2 *fptr;
gpu_callocate(&fptr,*nsize,irc);
*gp_f = (long )fptr;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_deallocate_(unsigned long *gp_d, int *irc) {
/* deallocate global memory on GPU, return pointer to Fortran */
void *d;
d = (void *)*gp_d;
gpu_deallocate(d,irc);
*gp_d = 0;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_fcopyin_(float *f, unsigned long *gp_f,
int *nsize) {
/* copy float array from main memory to global GPU memory */
float *g_f;
g_f = (float *)*gp_f;
gpu_fcopyin(f,g_f,*nsize);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_fcopyout_(float *f, unsigned long *gp_f,
int *nsize) {
/* copy float array from global GPU memory to main memory */
float *g_f;
g_f = (float *)*gp_f;
gpu_fcopyout(f,g_f,*nsize);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_icopyin_(int *f, unsigned long *gp_f, int *nsize) {
/* copy int array from main memory to global GPU memory */
int *g_f;
g_f = (int *)*gp_f;
gpu_icopyin(f,g_f,*nsize);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_icopyout_(int *f, unsigned long *gp_f, int *nsize) {
/* copy int array from global GPU memory to main memory */
int *g_f;
g_f = (int *)*gp_f;
gpu_icopyout(f,g_f,*nsize);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_ccopyin_(float2 *f, unsigned long *gp_f,
int *nsize) {
/* copy float2 array from main memory to global GPU memory */
float2 *g_f;
g_f = (float2 *)*gp_f;
gpu_ccopyin(f,g_f,*nsize);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_ccopyout_(float2 *f, unsigned long *gp_f,
int *nsize) {
/* copy float2 array from global GPU memory to main memory */
float2 *g_f;
g_f = (float2 *)*gp_f;
gpu_ccopyout(f,g_f,*nsize);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_zcmem_(unsigned long *gp_f, int *nsize) {
float2 *g_f;
g_f = (float2 *)*gp_f;
gpu_zcmem(g_f,*nsize);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_zfmem_(unsigned long *gp_f, int *nsize) {
float *g_f;
g_f = (float *)*gp_f;
gpu_zfmem(g_f,*nsize);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_set_cache_size_(int *nscache) {
gpu_set_cache_size(*nscache);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void emptykernel_() {
emptykernel();
return;
}
/*--------------------------------------------------------------------*/
extern "C" void init_cu_(int *dev, int *irc) {
init_cu(*dev,irc);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void end_cu_() {
end_cu();
return;
}
|
120ee4c5616383d4a10f2c71d88cd2c7aff1e550.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "internal_shared.hpp"
namespace cv { namespace gpu { namespace device
{
namespace mathfunc
{
//////////////////////////////////////////////////////////////////////////////////////
// Cart <-> Polar
struct Nothing
{
static __device__ __forceinline__ void calc(int, int, float, float, float*, size_t, float)
{
}
};
struct Magnitude
{
static __device__ __forceinline__ void calc(int x, int y, float x_data, float y_data, float* dst, size_t dst_step, float)
{
dst[y * dst_step + x] = ::sqrtf(x_data * x_data + y_data * y_data);
}
};
struct MagnitudeSqr
{
static __device__ __forceinline__ void calc(int x, int y, float x_data, float y_data, float* dst, size_t dst_step, float)
{
dst[y * dst_step + x] = x_data * x_data + y_data * y_data;
}
};
struct Atan2
{
static __device__ __forceinline__ void calc(int x, int y, float x_data, float y_data, float* dst, size_t dst_step, float scale)
{
float angle = ::atan2f(y_data, x_data);
angle += (angle < 0) * 2.0 * CV_PI;
dst[y * dst_step + x] = scale * angle;
}
};
template <typename Mag, typename Angle>
__global__ void cartToPolar(const float* xptr, size_t x_step, const float* yptr, size_t y_step,
float* mag, size_t mag_step, float* angle, size_t angle_step, float scale, int width, int height)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < width && y < height)
{
float x_data = xptr[y * x_step + x];
float y_data = yptr[y * y_step + x];
Mag::calc(x, y, x_data, y_data, mag, mag_step, scale);
Angle::calc(x, y, x_data, y_data, angle, angle_step, scale);
}
}
struct NonEmptyMag
{
static __device__ __forceinline__ float get(const float* mag, size_t mag_step, int x, int y)
{
return mag[y * mag_step + x];
}
};
struct EmptyMag
{
static __device__ __forceinline__ float get(const float*, size_t, int, int)
{
return 1.0f;
}
};
template <typename Mag>
__global__ void polarToCart(const float* mag, size_t mag_step, const float* angle, size_t angle_step, float scale,
float* xptr, size_t x_step, float* yptr, size_t y_step, int width, int height)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < width && y < height)
{
float mag_data = Mag::get(mag, mag_step, x, y);
float angle_data = angle[y * angle_step + x];
float sin_a, cos_a;
::sincosf(scale * angle_data, &sin_a, &cos_a);
xptr[y * x_step + x] = mag_data * cos_a;
yptr[y * y_step + x] = mag_data * sin_a;
}
}
template <typename Mag, typename Angle>
void cartToPolar_caller(PtrStepSzf x, PtrStepSzf y, PtrStepSzf mag, PtrStepSzf angle, bool angleInDegrees, hipStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(x.cols, threads.x);
grid.y = divUp(x.rows, threads.y);
const float scale = angleInDegrees ? (float)(180.0f / CV_PI) : 1.f;
hipLaunchKernelGGL(( cartToPolar<Mag, Angle>), dim3(grid), dim3(threads), 0, stream,
x.data, x.step/x.elemSize(), y.data, y.step/y.elemSize(),
mag.data, mag.step/mag.elemSize(), angle.data, angle.step/angle.elemSize(), scale, x.cols, x.rows);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
void cartToPolar_gpu(PtrStepSzf x, PtrStepSzf y, PtrStepSzf mag, bool magSqr, PtrStepSzf angle, bool angleInDegrees, hipStream_t stream)
{
typedef void (*caller_t)(PtrStepSzf x, PtrStepSzf y, PtrStepSzf mag, PtrStepSzf angle, bool angleInDegrees, hipStream_t stream);
static const caller_t callers[2][2][2] =
{
{
{
cartToPolar_caller<Magnitude, Atan2>,
cartToPolar_caller<Magnitude, Nothing>
},
{
cartToPolar_caller<MagnitudeSqr, Atan2>,
cartToPolar_caller<MagnitudeSqr, Nothing>,
}
},
{
{
cartToPolar_caller<Nothing, Atan2>,
cartToPolar_caller<Nothing, Nothing>
},
{
cartToPolar_caller<Nothing, Atan2>,
cartToPolar_caller<Nothing, Nothing>,
}
}
};
callers[mag.data == 0][magSqr][angle.data == 0](x, y, mag, angle, angleInDegrees, stream);
}
template <typename Mag>
void polarToCart_caller(PtrStepSzf mag, PtrStepSzf angle, PtrStepSzf x, PtrStepSzf y, bool angleInDegrees, hipStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(mag.cols, threads.x);
grid.y = divUp(mag.rows, threads.y);
const float scale = angleInDegrees ? (float)(CV_PI / 180.0f) : 1.0f;
hipLaunchKernelGGL(( polarToCart<Mag>), dim3(grid), dim3(threads), 0, stream, mag.data, mag.step/mag.elemSize(),
angle.data, angle.step/angle.elemSize(), scale, x.data, x.step/x.elemSize(), y.data, y.step/y.elemSize(), mag.cols, mag.rows);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
void polarToCart_gpu(PtrStepSzf mag, PtrStepSzf angle, PtrStepSzf x, PtrStepSzf y, bool angleInDegrees, hipStream_t stream)
{
typedef void (*caller_t)(PtrStepSzf mag, PtrStepSzf angle, PtrStepSzf x, PtrStepSzf y, bool angleInDegrees, hipStream_t stream);
static const caller_t callers[2] =
{
polarToCart_caller<NonEmptyMag>,
polarToCart_caller<EmptyMag>
};
callers[mag.data == 0](mag, angle, x, y, angleInDegrees, stream);
}
} // namespace mathfunc
}}} // namespace cv { namespace gpu { namespace device
| 120ee4c5616383d4a10f2c71d88cd2c7aff1e550.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "internal_shared.hpp"
namespace cv { namespace gpu { namespace device
{
namespace mathfunc
{
//////////////////////////////////////////////////////////////////////////////////////
// Cart <-> Polar
struct Nothing
{
static __device__ __forceinline__ void calc(int, int, float, float, float*, size_t, float)
{
}
};
struct Magnitude
{
static __device__ __forceinline__ void calc(int x, int y, float x_data, float y_data, float* dst, size_t dst_step, float)
{
dst[y * dst_step + x] = ::sqrtf(x_data * x_data + y_data * y_data);
}
};
struct MagnitudeSqr
{
static __device__ __forceinline__ void calc(int x, int y, float x_data, float y_data, float* dst, size_t dst_step, float)
{
dst[y * dst_step + x] = x_data * x_data + y_data * y_data;
}
};
struct Atan2
{
static __device__ __forceinline__ void calc(int x, int y, float x_data, float y_data, float* dst, size_t dst_step, float scale)
{
float angle = ::atan2f(y_data, x_data);
angle += (angle < 0) * 2.0 * CV_PI;
dst[y * dst_step + x] = scale * angle;
}
};
template <typename Mag, typename Angle>
__global__ void cartToPolar(const float* xptr, size_t x_step, const float* yptr, size_t y_step,
float* mag, size_t mag_step, float* angle, size_t angle_step, float scale, int width, int height)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < width && y < height)
{
float x_data = xptr[y * x_step + x];
float y_data = yptr[y * y_step + x];
Mag::calc(x, y, x_data, y_data, mag, mag_step, scale);
Angle::calc(x, y, x_data, y_data, angle, angle_step, scale);
}
}
struct NonEmptyMag
{
static __device__ __forceinline__ float get(const float* mag, size_t mag_step, int x, int y)
{
return mag[y * mag_step + x];
}
};
struct EmptyMag
{
static __device__ __forceinline__ float get(const float*, size_t, int, int)
{
return 1.0f;
}
};
template <typename Mag>
__global__ void polarToCart(const float* mag, size_t mag_step, const float* angle, size_t angle_step, float scale,
float* xptr, size_t x_step, float* yptr, size_t y_step, int width, int height)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < width && y < height)
{
float mag_data = Mag::get(mag, mag_step, x, y);
float angle_data = angle[y * angle_step + x];
float sin_a, cos_a;
::sincosf(scale * angle_data, &sin_a, &cos_a);
xptr[y * x_step + x] = mag_data * cos_a;
yptr[y * y_step + x] = mag_data * sin_a;
}
}
template <typename Mag, typename Angle>
void cartToPolar_caller(PtrStepSzf x, PtrStepSzf y, PtrStepSzf mag, PtrStepSzf angle, bool angleInDegrees, cudaStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(x.cols, threads.x);
grid.y = divUp(x.rows, threads.y);
const float scale = angleInDegrees ? (float)(180.0f / CV_PI) : 1.f;
cartToPolar<Mag, Angle><<<grid, threads, 0, stream>>>(
x.data, x.step/x.elemSize(), y.data, y.step/y.elemSize(),
mag.data, mag.step/mag.elemSize(), angle.data, angle.step/angle.elemSize(), scale, x.cols, x.rows);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
void cartToPolar_gpu(PtrStepSzf x, PtrStepSzf y, PtrStepSzf mag, bool magSqr, PtrStepSzf angle, bool angleInDegrees, cudaStream_t stream)
{
typedef void (*caller_t)(PtrStepSzf x, PtrStepSzf y, PtrStepSzf mag, PtrStepSzf angle, bool angleInDegrees, cudaStream_t stream);
static const caller_t callers[2][2][2] =
{
{
{
cartToPolar_caller<Magnitude, Atan2>,
cartToPolar_caller<Magnitude, Nothing>
},
{
cartToPolar_caller<MagnitudeSqr, Atan2>,
cartToPolar_caller<MagnitudeSqr, Nothing>,
}
},
{
{
cartToPolar_caller<Nothing, Atan2>,
cartToPolar_caller<Nothing, Nothing>
},
{
cartToPolar_caller<Nothing, Atan2>,
cartToPolar_caller<Nothing, Nothing>,
}
}
};
callers[mag.data == 0][magSqr][angle.data == 0](x, y, mag, angle, angleInDegrees, stream);
}
template <typename Mag>
void polarToCart_caller(PtrStepSzf mag, PtrStepSzf angle, PtrStepSzf x, PtrStepSzf y, bool angleInDegrees, cudaStream_t stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
grid.x = divUp(mag.cols, threads.x);
grid.y = divUp(mag.rows, threads.y);
const float scale = angleInDegrees ? (float)(CV_PI / 180.0f) : 1.0f;
polarToCart<Mag><<<grid, threads, 0, stream>>>(mag.data, mag.step/mag.elemSize(),
angle.data, angle.step/angle.elemSize(), scale, x.data, x.step/x.elemSize(), y.data, y.step/y.elemSize(), mag.cols, mag.rows);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
void polarToCart_gpu(PtrStepSzf mag, PtrStepSzf angle, PtrStepSzf x, PtrStepSzf y, bool angleInDegrees, cudaStream_t stream)
{
typedef void (*caller_t)(PtrStepSzf mag, PtrStepSzf angle, PtrStepSzf x, PtrStepSzf y, bool angleInDegrees, cudaStream_t stream);
static const caller_t callers[2] =
{
polarToCart_caller<NonEmptyMag>,
polarToCart_caller<EmptyMag>
};
callers[mag.data == 0](mag, angle, x, y, angleInDegrees, stream);
}
} // namespace mathfunc
}}} // namespace cv { namespace gpu { namespace device
|
d15b85110f549b17305287f01a6c85f0317268e9.hip | // !!! This is a file automatically generated by hipify!!!
#include "funset.hpp"
#include <iostream>
#include <chrono>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include "common.hpp"
/* __global__: ;;,3.2
;void;,
;,
gridblock,(<<< >>>);
a kernel,(GPUCUDAkernel(
),__global__);*/
__global__ static void bgr2gray(const unsigned char* src, int B2Y, int G2Y, int R2Y, int shift, int width, int height, unsigned char* dst)
{
/* gridDim: ,,,
,,.
grid,dim3
blockDim: ,block.dim3,
block;,,
;
blockIdx: ,;
threadblockgrid,blockIdx.x
[0,gridDim.x-1],blockIdx.y[0, gridDim.y-1].uint3,
blockgrid;
threadIdx: ,;
threadblock;threadIdx.x,
threadIdx.y,threadIdx.z;uint3
,threadblock */
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
//if (x == 0 && y == 0) {
// printf("%d, %d, %d, %d, %d, %d\n", width, height, B2Y, G2Y, R2Y, shift);
//}
if (x < width && y < height) {
dst[y * width + x] = (unsigned char)((src[y*width * 3 + 3 * x + 0] * B2Y +
src[y*width * 3 + 3 * x + 1] * G2Y + src[y*width * 3 + 3 * x + 2] * R2Y) >> shift);
}
}
int bgr2gray_gpu(const unsigned char* src, int width, int height, unsigned char* dst, float* elapsed_time)
{
const int R2Y{ 4899 }, G2Y{ 9617 }, B2Y{ 1868 }, yuv_shift{ 14 };
unsigned char *dev_src{ nullptr }, *dev_dst{ nullptr };
// hipMalloc:
hipMalloc(&dev_src, width * height * 3 * sizeof(unsigned char));
hipMalloc(&dev_dst, width * height * sizeof(unsigned char));
/* hipMemcpy: ,:
(1). hipMemcpyHostToHost:
(2). hipMemcpyHostToDevice:
(3). hipMemcpyDeviceToHost:
(4). hipMemcpyDeviceToDevice:
(5). hipMemcpyDefault: ,
(CUDA6.0)
cudaMemcpy */
hipMemcpy(dev_src, src, width * height * 3 * sizeof(unsigned char), hipMemcpyHostToDevice);
/* hipMemset: ,GPU
*/
hipMemset(dev_dst, 0, width * height * sizeof(unsigned char));
TIME_START_GPU
/* dim3: uint33unsigned int
dim3
1 */
// Note1024threads.x*threads.y1024
dim3 threads(32, 32);
dim3 blocks((width + 31) / 32, (height + 31) / 32);
/* <<< >>>: CUDA,,
CUDA,,
;,
,,
;;
kernel,kernel,
GPU, ;
API,<<<Dg,Db,Ns,S>>>
,Dgdim3,grid
.Dg,gridDg.x*Dg.y*Dg.zblock;Db
dim3,block.Db,
blockDb.x*Db.y*Db.zthread;Nssize_t,
,
(extern __shared__);Ns,0;S
cudaStream_t,.S,0. */
// Note: vectordata()cudaMalloccudaMemcpyvector
bgr2gray << <blocks, threads >> >(dev_src, B2Y, G2Y, R2Y, yuv_shift, width, height, dev_dst);
/* hipDeviceSynchronize: kernel, ,
cudaDeviceSynchronize; ,
,,
,,
,cudaDeviceSynchronize
reference: https://stackoverflow.com/questions/11888772/when-to-call-cudadevicesynchronize */
hipDeviceSynchronize();
TIME_END_GPU
hipMemcpy(dst, dev_dst, width * height * sizeof(unsigned char), hipMemcpyDeviceToHost);
// hipFree: cudaMalloc
hipFree(dev_dst);
hipFree(dev_src);
return 0;
}
| d15b85110f549b17305287f01a6c85f0317268e9.cu | #include "funset.hpp"
#include <iostream>
#include <chrono>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include "common.hpp"
/* __global__: 函数类型限定符;在设备上运行;在主机端调用,计算能力3.2及以上可以在
设备端调用;声明的函数的返回值必须是void类型;对此类型函数的调用是异步的,即在
设备完全完成它的运行之前就返回了;对此类型函数的调用必须指定执行配置,即用于在
设备上执行函数时的grid和block的维度,以及相关的流(即插入<<< >>>运算符);
a kernel,表示此函数为内核函数(运行在GPU上的CUDA并行计算函数称为kernel(内核函
数),内核函数必须通过__global__函数类型限定符定义);*/
__global__ static void bgr2gray(const unsigned char* src, int B2Y, int G2Y, int R2Y, int shift, int width, int height, unsigned char* dst)
{
/* gridDim: 内置变量,用于描述线程网格的维度,对于所有线程块来说,这个
变量是一个常数,用来保存线程格每一维的大小,即每个线程格中线程块的数量.
一个grid为三维,为dim3类型;
blockDim: 内置变量,用于说明每个block的维度与尺寸.为dim3类型,包含
了block在三个维度上的尺寸信息;对于所有线程块来说,这个变量是一个常数,
保存的是线程块中每一维的线程数量;
blockIdx: 内置变量,变量中包含的值就是当前执行设备代码的线程块的索引;用
于说明当前thread所在的block在整个grid中的位置,blockIdx.x取值范围是
[0,gridDim.x-1],blockIdx.y取值范围是[0, gridDim.y-1].为uint3类型,
包含了一个block在grid中各个维度上的索引信息;
threadIdx: 内置变量,变量中包含的值就是当前执行设备代码的线程索引;用于
说明当前thread在block中的位置;如果线程是一维的可获取threadIdx.x,如果
是二维的还可获取threadIdx.y,如果是三维的还可获取threadIdx.z;为uint3类
型,包含了一个thread在block中各个维度的索引信息 */
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
//if (x == 0 && y == 0) {
// printf("%d, %d, %d, %d, %d, %d\n", width, height, B2Y, G2Y, R2Y, shift);
//}
if (x < width && y < height) {
dst[y * width + x] = (unsigned char)((src[y*width * 3 + 3 * x + 0] * B2Y +
src[y*width * 3 + 3 * x + 1] * G2Y + src[y*width * 3 + 3 * x + 2] * R2Y) >> shift);
}
}
int bgr2gray_gpu(const unsigned char* src, int width, int height, unsigned char* dst, float* elapsed_time)
{
const int R2Y{ 4899 }, G2Y{ 9617 }, B2Y{ 1868 }, yuv_shift{ 14 };
unsigned char *dev_src{ nullptr }, *dev_dst{ nullptr };
// cudaMalloc: 在设备端分配内存
cudaMalloc(&dev_src, width * height * 3 * sizeof(unsigned char));
cudaMalloc(&dev_dst, width * height * sizeof(unsigned char));
/* cudaMemcpy: 在主机端和设备端拷贝数据,此函数第四个参数仅能是下面之一:
(1). cudaMemcpyHostToHost: 拷贝数据从主机端到主机端
(2). cudaMemcpyHostToDevice: 拷贝数据从主机端到设备端
(3). cudaMemcpyDeviceToHost: 拷贝数据从设备端到主机端
(4). cudaMemcpyDeviceToDevice: 拷贝数据从设备端到设备端
(5). cudaMemcpyDefault: 从指针值自动推断拷贝数据方向,需要支持
统一虚拟寻址(CUDA6.0及以上版本)
cudaMemcpy函数对于主机是同步的 */
cudaMemcpy(dev_src, src, width * height * 3 * sizeof(unsigned char), cudaMemcpyHostToDevice);
/* cudaMemset: 存储器初始化函数,在GPU内存上执行。用指定的值初始化或设置
设备内存 */
cudaMemset(dev_dst, 0, width * height * sizeof(unsigned char));
TIME_START_GPU
/* dim3: 基于uint3定义的内置矢量类型,相当于由3个unsigned int类型组成的
结构体,可表示一个三维数组,在定义dim3类型变量时,凡是没有赋值的元素都
会被赋予默认值1 */
// Note:每一个线程块支持的最大线程数量为1024,即threads.x*threads.y必须小于等于1024
dim3 threads(32, 32);
dim3 blocks((width + 31) / 32, (height + 31) / 32);
/* <<< >>>: 为CUDA引入的运算符,指定线程网格和线程块维度等,传递执行参
数给CUDA编译器和运行时系统,用于说明内核函数中的线程数量,以及线程是如何
组织的;尖括号中这些参数并不是传递给设备代码的参数,而是告诉运行时如何
启动设备代码,传递给设备代码本身的参数是放在圆括号中传递的,就像标准的函
数调用一样;不同计算能力的设备对线程的总数和组织方式有不同的约束;必须
先为kernel中用到的数组或变量分配好足够的空间,再调用kernel函数,否则在
GPU计算时会发生错误,例如越界等 ;
使用运行时API时,需要在调用的内核函数名与参数列表直接以<<<Dg,Db,Ns,S>>>
的形式设置执行配置,其中:Dg是一个dim3型变量,用于设置grid的维度和各个
维度上的尺寸.设置好Dg后,grid中将有Dg.x*Dg.y*Dg.z个block;Db是
一个dim3型变量,用于设置block的维度和各个维度上的尺寸.设置好Db后,每个
block中将有Db.x*Db.y*Db.z个thread;Ns是一个size_t型变量,指定各块为此调
用动态分配的共享存储器大小,这些动态分配的存储器可供声明为外部数组
(extern __shared__)的其他任何变量使用;Ns是一个可选参数,默认值为0;S为
cudaStream_t类型,用于设置与内核函数关联的流.S是一个可选参数,默认值0. */
// Note: 核函数不支持传入参数为vector的data()指针,需要cudaMalloc和cudaMemcpy,因为vector是在主机内存中
bgr2gray << <blocks, threads >> >(dev_src, B2Y, G2Y, R2Y, yuv_shift, width, height, dev_dst);
/* cudaDeviceSynchronize: kernel的启动是异步的, 为了定位它是否出错, 一
般需要加上cudaDeviceSynchronize函数进行同步; 将会一直处于阻塞状态,直到
前面所有请求的任务已经被全部执行完毕,如果前面执行的某个任务失败,将会
返回一个错误;当程序中有多个流,并且流之间在某一点需要通信时,那就必须
在这一点处加上同步的语句,即cudaDeviceSynchronize;异步启动
reference: https://stackoverflow.com/questions/11888772/when-to-call-cudadevicesynchronize */
cudaDeviceSynchronize();
TIME_END_GPU
cudaMemcpy(dst, dev_dst, width * height * sizeof(unsigned char), cudaMemcpyDeviceToHost);
// cudaFree: 释放设备上由cudaMalloc函数分配的内存
cudaFree(dev_dst);
cudaFree(dev_src);
return 0;
}
|
4a60759bea2033a718524244d2fd3690e0ffe81f.hip | // !!! This is a file automatically generated by hipify!!!
////////////////////////////////////////////////////////////////////////////
//
// EXAMPLE OF BAD PREFIX-SCAN CHAPTER 8
// inclusive Kogge_Stone_scan
//
////////////////////////////////////////////////////////////////////////////
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
//Assumption: the number of threads will be equal to section elements
#define SECTION_SIZE 1024
hipError_t Kogge_Stone_scan(float *X, float *Y, unsigned int size, float *msTime);
void sequential_scan(float *x, float *y, int Max_i);
void print_Array(float *A, int size);
int verify_result(float *Y, float *YS, int size);
////////////////////////////////////////////////////////////////////////////////
//! Simple bad prefix sum
//! @param X input data in global memory
//! @param Y output data in global memory
//! @param InputSize size of input and output data
////////////////////////////////////////////////////////////////////////////////
__global__ void Kogge_Stone_scan_kernel(float *X, float *Y, int InputSize)
{
__shared__ float XY[SECTION_SIZE];
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < InputSize) {
XY[threadIdx.x] = X[i];
}
// Perform iterative scan on XY
for (unsigned int stride = 1; stride < blockDim.x; stride *= 2) {
float in;
__syncthreads();
if (threadIdx.x >= stride){
in = XY[threadIdx.x - stride];
}
__syncthreads();
if (threadIdx.x >= stride){
XY[threadIdx.x] += in;
}
}
__syncthreads();
Y[i] = XY[threadIdx.x];
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main()
{
const int arraySize = 1024;
//float X[arraySize] = { 3, 1, 7, 0, 4, 1, 6, 3 };
float *Y, *YS, *X;
float msTime, msTime_seq;
hipEvent_t startTimeCuda, stopTimeCuda;
hipEventCreate(&startTimeCuda);
hipEventCreate(&stopTimeCuda);
X = (float*)malloc(arraySize * sizeof(float));
Y = (float*)malloc(arraySize * sizeof(float));
YS = (float*)malloc(arraySize * sizeof(float));
//fill input vector
for (int i = 0; i < arraySize; i++) {
X[i] = (float)(i + 1.0);
}
//printf("Array input: ");
//print_Array(X, arraySize);
// ---------------------- PERFORM SEQUENTIAL SCAN ----------------
printf("Sequential scan...\n");
hipEventRecord(startTimeCuda, 0);
hipEventSynchronize(startTimeCuda);
sequential_scan(X, YS, arraySize);
hipEventRecord(stopTimeCuda, 0);
hipEventSynchronize(stopTimeCuda);
hipEventElapsedTime(&msTime_seq, startTimeCuda, stopTimeCuda);
printf("HostTime: %f\n\n", msTime_seq);
//print_Array(YS, arraySize);
//printf(" OK!\n");
// ---------------------- PERFORM PARALELL SCAN ------------------
printf("parallel scan...\n");
hipError_t cudaStatus = Kogge_Stone_scan(X, Y, arraySize, &msTime);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
goto Error;
}
//print_Array(Y, arraySize);
//printf(" OK!\n");
// ----------------------- VERIFY THE RESULT ---------------------
if (verify_result(Y, YS, arraySize)) {
goto Error;
}
printf("TEST PASSED!\n");
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
goto Error;
}
printf("Speedup: %f\n", msTime_seq / msTime);
free(X);
free(Y);
free(YS);
#ifdef WIN32
system("pause");
#endif // WIN32
return 0;
Error:
free(X);
free(Y);
free(YS);
#ifdef WIN32
system("pause");
#endif // WIN32
return 1;
}
// Helper function for using CUDA to perform scan in parallel.
hipError_t Kogge_Stone_scan(float *X, float *Y, unsigned int size, float *msTime)
{
float *dev_X, *dev_Y;
hipError_t cudaStatus;
hipEvent_t startTimeCuda, stopTimeCuda;
hipEventCreate(&startTimeCuda);
hipEventCreate(&stopTimeCuda);
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for two vectors.
cudaStatus = hipMalloc((void**)&dev_X, size * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_Y, size * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vector from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_X, X, size * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
hipEventRecord(startTimeCuda, 0);
hipEventSynchronize(startTimeCuda);
Kogge_Stone_scan_kernel << < 1, SECTION_SIZE >> >(dev_X, dev_Y, size);
hipEventRecord(stopTimeCuda, 0);
hipEventSynchronize(stopTimeCuda);
hipEventElapsedTime(msTime, startTimeCuda, stopTimeCuda);
printf("KernelTime: %f\n\n", *msTime);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(Y, dev_Y, size * sizeof(float), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_X);
hipFree(dev_Y);
return cudaStatus;
}
void sequential_scan(float *x, float *y, int Max_i) {
float accumulator = x[0];
y[0] = accumulator;
for (int i = 1; i < Max_i; i++) {
accumulator += x[i];
y[i] = accumulator;
}
}
void print_Array(float *A, int size) {
for (int i = 0; i < size; i++) {
printf("%.2f ", A[i]);
}
printf("\n\n");
}
int verify_result(float *Y, float *YS, int size) {
for (int i = 0; i < size; i++) {
if (Y[i] != YS[i]) {
printf("Error Y[%d] = %.2f != %.2f = YS[%d]\n", i, Y[i], YS[i], i);
return 1;
}
}
return 0;
}
| 4a60759bea2033a718524244d2fd3690e0ffe81f.cu | ////////////////////////////////////////////////////////////////////////////
//
// EXAMPLE OF BAD PREFIX-SCAN CHAPTER 8
// inclusive Kogge_Stone_scan
//
////////////////////////////////////////////////////////////////////////////
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
//Assumption: the number of threads will be equal to section elements
#define SECTION_SIZE 1024
cudaError_t Kogge_Stone_scan(float *X, float *Y, unsigned int size, float *msTime);
void sequential_scan(float *x, float *y, int Max_i);
void print_Array(float *A, int size);
int verify_result(float *Y, float *YS, int size);
////////////////////////////////////////////////////////////////////////////////
//! Simple bad prefix sum
//! @param X input data in global memory
//! @param Y output data in global memory
//! @param InputSize size of input and output data
////////////////////////////////////////////////////////////////////////////////
__global__ void Kogge_Stone_scan_kernel(float *X, float *Y, int InputSize)
{
__shared__ float XY[SECTION_SIZE];
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < InputSize) {
XY[threadIdx.x] = X[i];
}
// Perform iterative scan on XY
for (unsigned int stride = 1; stride < blockDim.x; stride *= 2) {
float in;
__syncthreads();
if (threadIdx.x >= stride){
in = XY[threadIdx.x - stride];
}
__syncthreads();
if (threadIdx.x >= stride){
XY[threadIdx.x] += in;
}
}
__syncthreads();
Y[i] = XY[threadIdx.x];
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main()
{
const int arraySize = 1024;
//float X[arraySize] = { 3, 1, 7, 0, 4, 1, 6, 3 };
float *Y, *YS, *X;
float msTime, msTime_seq;
cudaEvent_t startTimeCuda, stopTimeCuda;
cudaEventCreate(&startTimeCuda);
cudaEventCreate(&stopTimeCuda);
X = (float*)malloc(arraySize * sizeof(float));
Y = (float*)malloc(arraySize * sizeof(float));
YS = (float*)malloc(arraySize * sizeof(float));
//fill input vector
for (int i = 0; i < arraySize; i++) {
X[i] = (float)(i + 1.0);
}
//printf("Array input: ");
//print_Array(X, arraySize);
// ---------------------- PERFORM SEQUENTIAL SCAN ----------------
printf("Sequential scan...\n");
cudaEventRecord(startTimeCuda, 0);
cudaEventSynchronize(startTimeCuda);
sequential_scan(X, YS, arraySize);
cudaEventRecord(stopTimeCuda, 0);
cudaEventSynchronize(stopTimeCuda);
cudaEventElapsedTime(&msTime_seq, startTimeCuda, stopTimeCuda);
printf("HostTime: %f\n\n", msTime_seq);
//print_Array(YS, arraySize);
//printf(" OK!\n");
// ---------------------- PERFORM PARALELL SCAN ------------------
printf("parallel scan...\n");
cudaError_t cudaStatus = Kogge_Stone_scan(X, Y, arraySize, &msTime);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
goto Error;
}
//print_Array(Y, arraySize);
//printf(" OK!\n");
// ----------------------- VERIFY THE RESULT ---------------------
if (verify_result(Y, YS, arraySize)) {
goto Error;
}
printf("TEST PASSED!\n");
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
goto Error;
}
printf("Speedup: %f\n", msTime_seq / msTime);
free(X);
free(Y);
free(YS);
#ifdef WIN32
system("pause");
#endif // WIN32
return 0;
Error:
free(X);
free(Y);
free(YS);
#ifdef WIN32
system("pause");
#endif // WIN32
return 1;
}
// Helper function for using CUDA to perform scan in parallel.
cudaError_t Kogge_Stone_scan(float *X, float *Y, unsigned int size, float *msTime)
{
float *dev_X, *dev_Y;
cudaError_t cudaStatus;
cudaEvent_t startTimeCuda, stopTimeCuda;
cudaEventCreate(&startTimeCuda);
cudaEventCreate(&stopTimeCuda);
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for two vectors.
cudaStatus = cudaMalloc((void**)&dev_X, size * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_Y, size * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vector from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_X, X, size * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
cudaEventRecord(startTimeCuda, 0);
cudaEventSynchronize(startTimeCuda);
Kogge_Stone_scan_kernel << < 1, SECTION_SIZE >> >(dev_X, dev_Y, size);
cudaEventRecord(stopTimeCuda, 0);
cudaEventSynchronize(stopTimeCuda);
cudaEventElapsedTime(msTime, startTimeCuda, stopTimeCuda);
printf("KernelTime: %f\n\n", *msTime);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(Y, dev_Y, size * sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_X);
cudaFree(dev_Y);
return cudaStatus;
}
void sequential_scan(float *x, float *y, int Max_i) {
float accumulator = x[0];
y[0] = accumulator;
for (int i = 1; i < Max_i; i++) {
accumulator += x[i];
y[i] = accumulator;
}
}
void print_Array(float *A, int size) {
for (int i = 0; i < size; i++) {
printf("%.2f ", A[i]);
}
printf("\n\n");
}
int verify_result(float *Y, float *YS, int size) {
for (int i = 0; i < size; i++) {
if (Y[i] != YS[i]) {
printf("Error Y[%d] = %.2f != %.2f = YS[%d]\n", i, Y[i], YS[i], i);
return 1;
}
}
return 0;
}
|
b9f73ffd52db31bbf4716f99cfae1d53872181c6.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/GpuResources.h>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/impl/AuxIndexStructures.h>
#include <faiss/impl/FaissAssert.h>
#include <faiss/gpu/impl/BroadcastSum.cuh>
#include <faiss/gpu/impl/BroadcastSumBurst.cuh>
#include <faiss/gpu/impl/BurstPatchDistance.cuh>
#include <faiss/gpu/impl/DistanceUtils.cuh>
#include <faiss/gpu/impl/MeshSearchSpace.cuh>
#include <faiss/gpu/impl/KmUtils.cuh>
#include <faiss/gpu/impl/ComputeModes.cuh>
#include <faiss/gpu/impl/KmBurstAve.cuh>
#include <faiss/gpu/impl/KmBurstAve4d.cuh>
#include <faiss/gpu/impl/KMeans.cuh>
#include <faiss/gpu/impl/KmBurstL2Norm.cuh>
#include <faiss/gpu/impl/KmBurstTopK.cuh>
#include <faiss/gpu/utils/BurstBlockSelectKernel.cuh>
#include <faiss/gpu/utils/DeviceDefs.cuh>
#include <faiss/gpu/utils/Limits.cuh>
#include <faiss/gpu/utils/MatrixMult.cuh>
#include <faiss/gpu/utils/BurstNnfSimpleBlockSelect.cuh>
#include <faiss/gpu/utils/BlockIndices2Labels.cuh>
#include <faiss/gpu/impl/KmUtils.cuh>
#include <cstdio>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/fill.h>
#include <thrust/for_each.h>
#include <algorithm>
#include <memory>
namespace faiss {
namespace gpu {
template <typename T>
void runKmBurstDistance(
GpuResources* res,
hipStream_t stream,
Tensor<T, 4, true>& burst,
Tensor<int, 5, true>& search_ranges,
Tensor<int, 3, true>& init_blocks,
int kmeansK,
int k,
int t,
int h,
int w,
int c,
int patchsize,
int nsearch,
float std,
Tensor<float, 3, true>& outDistances,
Tensor<int, 5, true>& outIndices,
bool computeL2) {
fprintf(stdout,"start of KmBurstDistance.\n");
// some locals vars
int psHalf = ::floor(patchsize/2);
constexpr int nstreams = 1;
// Size of proposed image
auto nftrs = burst.getSize(0);
auto nframes = burst.getSize(1);
// auto heightPad = burst.getSize(2);
// auto widthPad = burst.getSize(3);
auto height_b = burst.getSize(2);
auto width_b = burst.getSize(3);
// Size of search ranges
int two_sr = search_ranges.getSize(0);
int nframes_sr = search_ranges.getSize(1);
int nsearch_sr = search_ranges.getSize(2);
int height_sr = search_ranges.getSize(3);
int width_sr = search_ranges.getSize(4);
// Size of vals image
auto kOut = outDistances.getSize(0);
auto height = outDistances.getSize(1);
auto width = outDistances.getSize(2);
// Size of indices image
auto two_ind = outIndices.getSize(0);
auto nframes_ind = outIndices.getSize(1);
auto kOut_ind = outIndices.getSize(2);
auto height_ind = outIndices.getSize(3);
auto width_ind = outIndices.getSize(4);
// Assert same size
FAISS_ASSERT(nframes == nframes_ind);
FAISS_ASSERT(nframes == nframes_sr);
FAISS_ASSERT(nsearch == nsearch_sr);
// FAISS_ASSERT(height == (heightPad-2*psHalf));
// FAISS_ASSERT(width == (widthPad-2*psHalf));
FAISS_ASSERT(height == height_b);
FAISS_ASSERT(width == width_b);
FAISS_ASSERT(height == height_ind);
FAISS_ASSERT(width == width_ind);
FAISS_ASSERT(height == height_sr);
FAISS_ASSERT(width == width_sr);
FAISS_ASSERT(k == kOut);
FAISS_ASSERT(k == kOut_ind);
FAISS_ASSERT(2 == two_ind);
FAISS_ASSERT(2 == two_sr);
fprintf(stdout,"post asserts from KmBurstDistance.\n");
// Algorithm vars
int niters = 1;//nframes/2;
int nframes_search = 3;
int ref = nframes/2;
int nclusters = -1;
float mode = 0;
int nblocks = utils::pow(nsearch,nframes_search);
DeviceTensor<int, 2, true> search_frames(res,
makeTempAlloc(AllocType::Other, stream),{niters,nframes_search});
DeviceTensor<int, 3, true> curr_blocks(res,
makeTempAlloc(AllocType::Other, stream),{nframes,height,width});
thrust::copy(thrust::hip::par.on(stream), init_blocks.data(),
init_blocks.end(), curr_blocks.data());
thrust::fill(thrust::hip::par.on(stream), search_frames.data(),
search_frames.end(),ref);
// default: fill without ref
// TODO: allow for random frames across "niters"
for( int i = 0; i < niters; ++i){
int s = 0;
for( int t = 0; t < nframes; ++t){
if (t == ref){ continue; }
hipMemcpy(search_frames[i][s].data(),&t,
sizeof(int),hipMemcpyHostToDevice);
s++;
if (s >= nframes_search){ break; }
}
}
// init for comparison
thrust::fill(thrust::hip::par.on(stream),
outDistances.data(),
outDistances.end(),
Limits<float>::getMax());
// If we're querying against a 0 sized set, just return empty results
if (height == 0 || width == 0 || nftrs == 0) {
thrust::fill(thrust::hip::par.on(stream),
outDistances.data(),
outDistances.end(),
Limits<float>::getMax());
thrust::fill(thrust::hip::par.on(stream),
outIndices.data(),
outIndices.end(),
-1);
return;
}
// By default, aim to use up to 512 MB of memory for the processing, with
// both number of queries and number of centroids being at least 512.
int tileHeight = 0; // batchsize across height
int tileWidth = 0; // batchsize across width
int tileBlocks = 0; // batchsize across blocks
chooseImageTileSize(
height, // image height
width, // image width
nftrs, // num of features per pixel
patchsize, // patchsize
nblocks, // number of image blocks to search
sizeof(T),
res->getTempMemoryAvailableCurrentDevice(),
tileHeight,
tileWidth,
tileBlocks);
// tileBlocks = 128;
int numHeightTiles = utils::divUp(height, tileHeight);
int numWidthTiles = utils::divUp(width, tileWidth);
int numBlockTiles = utils::divUp(nblocks, tileBlocks);
// printf("(tileHeight,tileWidth,tileBlocks): (%d,%d,%d)\n",
// tileHeight,tileWidth,tileBlocks);
DeviceTensor<uint8_t, 4, true> sizes(res,
makeTempAlloc(AllocType::Other, stream),
{kmeansK,tileBlocks,tileHeight,tileWidth});
DeviceTensor<float, 4, true> modes4d(res,
makeTempAlloc(AllocType::Other, stream),
{kmeansK,tileBlocks,height,width});
DeviceTensor<float, 3, true> modes3d(res,
makeTempAlloc(AllocType::Other, stream),
{tileBlocks,height,width});
thrust::fill(thrust::hip::par.on(stream),
modes3d.data(),modes3d.end(),0.);
// We can have any number of vectors to query against, even less than k, in
// which case we'll return -1 for the index
FAISS_ASSERT(k <= GPU_MAX_SELECTION_K); // select limitation
fprintf(stdout,"post gpu check.\n");
//
// Temporary memory space to *execute* a single batch
//
DeviceTensor<float, 3, true> distanceBuf_1(res,
makeTempAlloc(AllocType::Other, stream),
{tileBlocks, tileWidth, tileHeight});
DeviceTensor<float, 3, true> distanceBuf_2(res,
makeTempAlloc(AllocType::Other, stream),
{tileBlocks, tileWidth, tileHeight});
DeviceTensor<float, 3, true> distanceBuf_3(res,
makeTempAlloc(AllocType::Other, stream),
{tileBlocks, tileWidth, tileHeight});
DeviceTensor<float, 3, true> distanceBuf_4(res,
makeTempAlloc(AllocType::Other, stream),
{tileBlocks, tileWidth, tileHeight});
DeviceTensor<float, 3, true> distanceBuf_5(res,
makeTempAlloc(AllocType::Other, stream),
{tileBlocks, tileWidth, tileHeight});
DeviceTensor<float, 3, true> distanceBuf_6(res,
makeTempAlloc(AllocType::Other, stream),
{tileBlocks, tileWidth, tileHeight});
DeviceTensor<float, 3, true> distanceBuf_7(res,
makeTempAlloc(AllocType::Other, stream),
{tileBlocks, tileWidth, tileHeight});
DeviceTensor<float, 3, true> distanceBuf_8(res,
makeTempAlloc(AllocType::Other, stream),
{tileBlocks, tileWidth, tileHeight});
DeviceTensor<float, 3, true> distanceBuf_9(res,
makeTempAlloc(AllocType::Other, stream),
{tileBlocks, tileWidth, tileHeight});
DeviceTensor<float, 3, true> distanceBuf_10(res,
makeTempAlloc(AllocType::Other, stream),
{tileBlocks, tileWidth, tileHeight});
DeviceTensor<float, 3, true> distanceBuf_11(res,
makeTempAlloc(AllocType::Other, stream),
{tileBlocks, tileWidth, tileHeight});
DeviceTensor<float, 3, true> distanceBuf_12(res,
makeTempAlloc(AllocType::Other, stream),
{tileBlocks, tileWidth, tileHeight});
DeviceTensor<float, 3, true> distanceBuf_13(res,
makeTempAlloc(AllocType::Other, stream),
{tileBlocks, tileWidth, tileHeight});
DeviceTensor<float, 3, true> distanceBuf_14(res,
makeTempAlloc(AllocType::Other, stream),
{tileBlocks, tileWidth, tileHeight});
DeviceTensor<float, 3, true> distanceBuf_15(res,
makeTempAlloc(AllocType::Other, stream),
{tileBlocks, tileWidth, tileHeight});
DeviceTensor<float, 3, true> distanceBuf_16(res,
makeTempAlloc(AllocType::Other, stream),
{tileBlocks, tileWidth, tileHeight});
DeviceTensor<float, 3, true>* distanceBufs[16] = {&distanceBuf_1,
&distanceBuf_2,
&distanceBuf_3,
&distanceBuf_4,
&distanceBuf_5,
&distanceBuf_6,
&distanceBuf_7,
&distanceBuf_8,
&distanceBuf_9,
&distanceBuf_10,
&distanceBuf_11,
&distanceBuf_12,
&distanceBuf_13,
&distanceBuf_14,
&distanceBuf_15,
&distanceBuf_16};
// std::vector<DeviceTensor<float, 3, true>> distanceBufs(nstreams,
// DeviceTensor<float, 3, true>(res,
// makeTempAlloc(AllocType::Other, stream),
// {tileHeight, tileWidth, tileBlocks}));
// DeviceTensor<float, 3, true>** distanceBufs = new DeviceTensor<float, 3, true>*[nstreams];
// std::vector<DeviceTensor<float, 3, true>> distanceBufs;
// distanceBufs.resize(nstreams);
// #pragma unroll
// for (int i = 0; i < nstreams; ++i){
// auto distBuf_i = new DeviceTensor<float, 3, true>(res,
// makeTempAlloc(AllocType::Other, stream),
// {tileHeight, tileWidth, tileBlocks});
// distanceBufs[i] = distBuf_i;
// // distanceBufs.push_back(distBuf_i);
// }
// for (int i = 0; i < nstreams; ++i){
// for (int j = 0; j < distanceBufs[i].NumDim; ++j){
// printf("[%d]: getSize(%d): %d\n",i,j,distanceBufs[i].getSize(j));
// }
// //isContiguous
// }
DeviceTensor<int, 5, true> blockBuf_1(res,
makeTempAlloc(AllocType::Other, stream),
{2, nframes, nblocks, tileHeight, tileWidth});
DeviceTensor<int, 5, true> blockBuf_2(res,
makeTempAlloc(AllocType::Other, stream),
{2, nframes, nblocks, tileHeight, tileWidth});
DeviceTensor<int, 5, true> blockBuf_3(res,
makeTempAlloc(AllocType::Other, stream),
{2, nframes, nblocks, tileHeight, tileWidth});
DeviceTensor<int, 5, true> blockBuf_4(res,
makeTempAlloc(AllocType::Other, stream),
{2, nframes, nblocks, tileHeight, tileWidth});
DeviceTensor<int, 5, true> blockBuf_5(res,
makeTempAlloc(AllocType::Other, stream),
{2, nframes, nblocks, tileHeight, tileWidth});
DeviceTensor<int, 5, true> blockBuf_6(res,
makeTempAlloc(AllocType::Other, stream),
{2, nframes, nblocks, tileHeight, tileWidth});
DeviceTensor<int, 5, true> blockBuf_7(res,
makeTempAlloc(AllocType::Other, stream),
{2, nframes, nblocks, tileHeight, tileWidth});
DeviceTensor<int, 5, true> blockBuf_8(res,
makeTempAlloc(AllocType::Other, stream),
{2, nframes, nblocks, tileHeight, tileWidth});
DeviceTensor<int, 5, true> blockBuf_9(res,
makeTempAlloc(AllocType::Other, stream),
{2, nframes, nblocks, tileHeight, tileWidth});
DeviceTensor<int, 5, true> blockBuf_10(res,
makeTempAlloc(AllocType::Other, stream),
{2, nframes, nblocks, tileHeight, tileWidth});
DeviceTensor<int, 5, true> blockBuf_11(res,
makeTempAlloc(AllocType::Other, stream),
{2, nframes, nblocks, tileHeight, tileWidth});
DeviceTensor<int, 5, true> blockBuf_12(res,
makeTempAlloc(AllocType::Other, stream),
{2, nframes, nblocks, tileHeight, tileWidth});
DeviceTensor<int, 5, true> blockBuf_13(res,
makeTempAlloc(AllocType::Other, stream),
{2, nframes, nblocks, tileHeight, tileWidth});
DeviceTensor<int, 5, true> blockBuf_14(res,
makeTempAlloc(AllocType::Other, stream),
{2, nframes, nblocks, tileHeight, tileWidth});
DeviceTensor<int, 5, true> blockBuf_15(res,
makeTempAlloc(AllocType::Other, stream),
{2, nframes, nblocks, tileHeight, tileWidth});
DeviceTensor<int, 5, true> blockBuf_16(res,
makeTempAlloc(AllocType::Other, stream),
{2, nframes, nblocks, tileHeight, tileWidth});
DeviceTensor<int, 5, true>* blockBufs[16] = {&blockBuf_1,&blockBuf_2,
&blockBuf_3,&blockBuf_4,
&blockBuf_5,&blockBuf_6,
&blockBuf_7,&blockBuf_8,
&blockBuf_9,&blockBuf_10,
&blockBuf_11,&blockBuf_12,
&blockBuf_13,&blockBuf_14,
&blockBuf_15,&blockBuf_16};
//
// Temporary memory space for "clustering" and "centroid" vars
//
DeviceTensor<T, 5, true> kmDistBuf_1(res,
makeTempAlloc(AllocType::Other, stream),
{nframes, nframes, tileBlocks, tileHeight, tileWidth});
DeviceTensor<T, 5, true>* kmDistBufs[1] = {&kmDistBuf_1};
DeviceTensor<uint8_t, 4, true> clusterBuf_1(res,
makeTempAlloc(AllocType::Other, stream),
{nframes, tileBlocks, tileHeight, tileWidth});
DeviceTensor<uint8_t, 4, true>* clusterBufs[1] = {&clusterBuf_1};
DeviceTensor<T, 5, true> centroidBuf_1(res,
makeTempAlloc(AllocType::Other, stream),
{nftrs, kmeansK, tileBlocks, tileHeight, tileWidth});
DeviceTensor<T, 5, true>* centroidBufs[1] = {¢roidBuf_1};
//
// Temporary memory space to *ave* a single batch of images
//
DeviceTensor<T, 4, true> aveBuf_1(res,
makeTempAlloc(AllocType::Other, stream),
{nftrs, tileBlocks, tileHeight, tileWidth});
DeviceTensor<T, 4, true> aveBuf_2(res,
makeTempAlloc(AllocType::Other, stream),
{nftrs, tileBlocks, tileHeight, tileWidth});
DeviceTensor<T, 4, true> aveBuf_3(res,
makeTempAlloc(AllocType::Other, stream),
{nftrs, tileBlocks, tileHeight, tileWidth});
DeviceTensor<T, 4, true> aveBuf_4(res,
makeTempAlloc(AllocType::Other, stream),
{nftrs, tileBlocks, tileHeight, tileWidth});
DeviceTensor<T, 4, true> aveBuf_5(res,
makeTempAlloc(AllocType::Other, stream),
{nftrs, tileBlocks, tileHeight, tileWidth});
DeviceTensor<T, 4, true> aveBuf_6(res,
makeTempAlloc(AllocType::Other, stream),
{nftrs, tileBlocks, tileHeight, tileWidth});
DeviceTensor<T, 4, true> aveBuf_7(res,
makeTempAlloc(AllocType::Other, stream),
{nftrs, tileBlocks, tileHeight, tileWidth});
DeviceTensor<T, 4, true> aveBuf_8(res,
makeTempAlloc(AllocType::Other, stream),
{nftrs, tileBlocks, tileHeight, tileWidth});
DeviceTensor<T, 4, true> aveBuf_9(res,
makeTempAlloc(AllocType::Other, stream),
{nftrs, tileBlocks, tileHeight, tileWidth});
DeviceTensor<T, 4, true> aveBuf_10(res,
makeTempAlloc(AllocType::Other, stream),
{nftrs, tileBlocks, tileHeight, tileWidth});
DeviceTensor<T, 4, true> aveBuf_11(res,
makeTempAlloc(AllocType::Other, stream),
{nftrs, tileBlocks, tileHeight, tileWidth});
DeviceTensor<T, 4, true> aveBuf_12(res,
makeTempAlloc(AllocType::Other, stream),
{nftrs, tileBlocks, tileHeight, tileWidth});
DeviceTensor<T, 4, true> aveBuf_13(res,
makeTempAlloc(AllocType::Other, stream),
{nftrs, tileBlocks, tileHeight, tileWidth});
DeviceTensor<T, 4, true> aveBuf_14(res,
makeTempAlloc(AllocType::Other, stream),
{nftrs, tileBlocks, tileHeight, tileWidth});
DeviceTensor<T, 4, true> aveBuf_15(res,
makeTempAlloc(AllocType::Other, stream),
{nftrs, tileBlocks, tileHeight, tileWidth});
DeviceTensor<T, 4, true> aveBuf_16(res,
makeTempAlloc(AllocType::Other, stream),
{nftrs, tileBlocks, tileHeight, tileWidth});
DeviceTensor<T, 4, true>* aveBufs[16];
aveBufs[0] = &aveBuf_1;
aveBufs[1] = &aveBuf_2;
aveBufs[2] = &aveBuf_3;
aveBufs[3] = &aveBuf_4;
aveBufs[4] = &aveBuf_5;
aveBufs[5] = &aveBuf_6;
aveBufs[6] = &aveBuf_7;
aveBufs[7] = &aveBuf_8;
aveBufs[8] = &aveBuf_9;
aveBufs[9] = &aveBuf_10;
aveBufs[10] = &aveBuf_11;
aveBufs[11] = &aveBuf_12;
aveBufs[12] = &aveBuf_13;
aveBufs[13] = &aveBuf_14;
aveBufs[14] = &aveBuf_15;
aveBufs[15] = &aveBuf_16;
// Streams allow for concurrent kernel execs.
auto streams = res->getAlternateStreamsCurrentDevice();
streamWait(streams, {stream});
int curStream = 0;
bool interrupt = false;
// Tile HEIGHT pixels
for (int i = 0; i < height; i += tileHeight) {
if (interrupt || InterruptCallback::is_interrupted()) {
interrupt = true;
break;
}
// create indices for height tiling
int curHeightSize = ::min(tileHeight, height - i);
// create views from height tile
auto curBlocksHeightView = curr_blocks.narrow(1, i, curHeightSize);
auto outDistanceHeightView = outDistances.narrow(1, i, curHeightSize);
auto outIndexHeightView = outIndices.narrow(3, i, curHeightSize);
auto srangesHeightView = search_ranges.narrow(3, i, curHeightSize);
// Tile WIDTH pixels
for (int j = 0; j < width; j += tileWidth) {
if (InterruptCallback::is_interrupted()) {
interrupt = true;
break;
}
// create indices for height tiling
int curWidthSize = ::min(tileWidth, width - j);
// view from width tiling
auto curBlocksView = curBlocksHeightView.narrow(2, j, curWidthSize);
auto outDistanceView = outDistanceHeightView.narrow(2, j, curWidthSize);
auto outIndexView = outIndexHeightView.narrow(4, j, curWidthSize);
auto srangesView = srangesHeightView.narrow(4, j, curWidthSize);
// Iterate over a subset "niters" times
for (int iter = 0; iter < niters; iter += 1){
if (InterruptCallback::is_interrupted()) {
interrupt = true;
break;
}
auto blocks = blockBufs[curStream]
->narrow(3, 0, curHeightSize).narrow(4, 0, curWidthSize);
fprintf(stdout,"about to create search space.\n");
create_search_space(srangesView,blocks,curBlocksView,
search_frames,iter,streams[curStream]);
// Tile the Search-Space
for (int blk = 0; blk < nblocks; blk += tileBlocks) {
if (InterruptCallback::is_interrupted()) {
interrupt = true;
break;
}
// get batch of search space
auto curBlockSize = ::min(tileBlocks, nblocks - blk);
printf("(curHeightSize,curWidthSize,curBlockSize): (%d,%d,%d)\n",
curHeightSize,curWidthSize,curBlockSize);
//
// Views of Tensors
//
auto blockView = blocks.narrow(2, blk, curBlockSize);
auto aveView = aveBufs[curStream]
->narrow(1, 0, curBlockSize)
.narrow(2, 0, curHeightSize)
.narrow(3, 0, curWidthSize);
auto distanceBufView = distanceBufs[curStream]
->narrow(0, 0, curBlockSize)
.narrow(1, 0, curHeightSize)
.narrow(2, 0, curWidthSize);
auto kmDistView = kmDistBufs[curStream]
->narrow(2, 0, curBlockSize)
.narrow(3, 0, curHeightSize)
.narrow(4, 0, curWidthSize);
auto clusterView = clusterBufs[curStream]
->narrow(1, 0, curBlockSize)
.narrow(2, 0, curHeightSize)
.narrow(3, 0, curWidthSize);
auto centroidView = centroidBufs[curStream]
->narrow(2, 0, curBlockSize)
.narrow(3, 0, curHeightSize)
.narrow(4, 0, curWidthSize);
//
// Assert Shapes
//
// FAISS_ASSERT(aveView.getSize(0) == burstView.getSize(0));
// FAISS_ASSERT(aveView.getSize(2) == burstView.getSize(2));
// FAISS_ASSERT(aveView.getSize(3) == burstView.getSize(3));
//
// Compute Clusters using Patches
//
fprintf(stdout,"starting kmeans clustering\n");
float offset = 0;
kmeans_clustering(kmDistView,burst,blockView,
centroidView,clusterView,
sizes,patchsize,
kmeansK,offset,streams[curStream]);
//
// Compute Mode
//
// compute_mode_centroids(std,patchsize,nftrs,
// sizes,modes4d,streams[curStream]);
// kmb_ave4d(modes4d,modes3d,streams[curStream]);
//
// Compute Average of Clusters
//
kmb_ave(centroidView,aveView,streams[curStream]);
// thrust::fill(thrust::hip::par.on(stream),
// aveView.data(),
// aveView.end(),
// 0.);
//
// L2Norm over Patches
//
runKmBurstL2Norm(centroidView,
aveView,
blockView,
distanceBufView,
// outDistanceView,
patchsize,nsearch,true,
streams[curStream]);
//
// Top K Selection
//
kmb_topK(distanceBufView,
blockView,outIndexView,
outDistanceView,modes3d,
streams[curStream]);
} // batching over blockTiles
} // iterating over a subset of frames
curStream = (curStream + 1) % nstreams;
} // batching over widthTiles
} // batching over heightTiles
// Have the desired ordering stream wait on the multi-stream
streamWait({stream}, streams);
if (interrupt) {
FAISS_THROW_MSG("interrupted");
}
}
void runKmBurstDistance(
GpuResources* res,
hipStream_t stream,
Tensor<float, 4, true>& burst,
Tensor<int, 5, true>& search_ranges,
Tensor<int, 3, true>& init_blocks,
int kmeansK,
int k,
int t,
int h,
int w,
int c,
int patchsize,
int nsearch,
float std,
Tensor<float, 3, true>& outDistances,
Tensor<int, 5, true>& outIndices,
bool computeL2){
runKmBurstDistance<float>(
res,
stream,
burst,
search_ranges,
init_blocks,
kmeansK,k,t,h,w,c,
patchsize,
nsearch,
std,
outDistances,
outIndices,
computeL2);
}
void runKmBurstDistance(
GpuResources* res,
hipStream_t stream,
Tensor<half, 4, true>& burst,
Tensor<int, 5, true>& search_ranges,
Tensor<int, 3, true>& init_blocks,
int kmeansK,
int k,
int t,
int h,
int w,
int c,
int patchsize,
int nsearch,
float std,
Tensor<float, 3, true>& outDistances,
Tensor<int, 5, true>& outIndices,
bool computeL2){
runKmBurstDistance<half>(
res,
stream,
burst,
search_ranges,
init_blocks,
kmeansK,k,t,h,w,c,
patchsize,
nsearch,
std,
outDistances,
outIndices,
computeL2);
}
} // end namespace gpu
} // end namespace faiss
| b9f73ffd52db31bbf4716f99cfae1d53872181c6.cu | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/GpuResources.h>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/impl/AuxIndexStructures.h>
#include <faiss/impl/FaissAssert.h>
#include <faiss/gpu/impl/BroadcastSum.cuh>
#include <faiss/gpu/impl/BroadcastSumBurst.cuh>
#include <faiss/gpu/impl/BurstPatchDistance.cuh>
#include <faiss/gpu/impl/DistanceUtils.cuh>
#include <faiss/gpu/impl/MeshSearchSpace.cuh>
#include <faiss/gpu/impl/KmUtils.cuh>
#include <faiss/gpu/impl/ComputeModes.cuh>
#include <faiss/gpu/impl/KmBurstAve.cuh>
#include <faiss/gpu/impl/KmBurstAve4d.cuh>
#include <faiss/gpu/impl/KMeans.cuh>
#include <faiss/gpu/impl/KmBurstL2Norm.cuh>
#include <faiss/gpu/impl/KmBurstTopK.cuh>
#include <faiss/gpu/utils/BurstBlockSelectKernel.cuh>
#include <faiss/gpu/utils/DeviceDefs.cuh>
#include <faiss/gpu/utils/Limits.cuh>
#include <faiss/gpu/utils/MatrixMult.cuh>
#include <faiss/gpu/utils/BurstNnfSimpleBlockSelect.cuh>
#include <faiss/gpu/utils/BlockIndices2Labels.cuh>
#include <faiss/gpu/impl/KmUtils.cuh>
#include <cstdio>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/fill.h>
#include <thrust/for_each.h>
#include <algorithm>
#include <memory>
namespace faiss {
namespace gpu {
template <typename T>
void runKmBurstDistance(
GpuResources* res,
cudaStream_t stream,
Tensor<T, 4, true>& burst,
Tensor<int, 5, true>& search_ranges,
Tensor<int, 3, true>& init_blocks,
int kmeansK,
int k,
int t,
int h,
int w,
int c,
int patchsize,
int nsearch,
float std,
Tensor<float, 3, true>& outDistances,
Tensor<int, 5, true>& outIndices,
bool computeL2) {
fprintf(stdout,"start of KmBurstDistance.\n");
// some locals vars
int psHalf = std::floor(patchsize/2);
constexpr int nstreams = 1;
// Size of proposed image
auto nftrs = burst.getSize(0);
auto nframes = burst.getSize(1);
// auto heightPad = burst.getSize(2);
// auto widthPad = burst.getSize(3);
auto height_b = burst.getSize(2);
auto width_b = burst.getSize(3);
// Size of search ranges
int two_sr = search_ranges.getSize(0);
int nframes_sr = search_ranges.getSize(1);
int nsearch_sr = search_ranges.getSize(2);
int height_sr = search_ranges.getSize(3);
int width_sr = search_ranges.getSize(4);
// Size of vals image
auto kOut = outDistances.getSize(0);
auto height = outDistances.getSize(1);
auto width = outDistances.getSize(2);
// Size of indices image
auto two_ind = outIndices.getSize(0);
auto nframes_ind = outIndices.getSize(1);
auto kOut_ind = outIndices.getSize(2);
auto height_ind = outIndices.getSize(3);
auto width_ind = outIndices.getSize(4);
// Assert same size
FAISS_ASSERT(nframes == nframes_ind);
FAISS_ASSERT(nframes == nframes_sr);
FAISS_ASSERT(nsearch == nsearch_sr);
// FAISS_ASSERT(height == (heightPad-2*psHalf));
// FAISS_ASSERT(width == (widthPad-2*psHalf));
FAISS_ASSERT(height == height_b);
FAISS_ASSERT(width == width_b);
FAISS_ASSERT(height == height_ind);
FAISS_ASSERT(width == width_ind);
FAISS_ASSERT(height == height_sr);
FAISS_ASSERT(width == width_sr);
FAISS_ASSERT(k == kOut);
FAISS_ASSERT(k == kOut_ind);
FAISS_ASSERT(2 == two_ind);
FAISS_ASSERT(2 == two_sr);
fprintf(stdout,"post asserts from KmBurstDistance.\n");
// Algorithm vars
int niters = 1;//nframes/2;
int nframes_search = 3;
int ref = nframes/2;
int nclusters = -1;
float mode = 0;
int nblocks = utils::pow(nsearch,nframes_search);
DeviceTensor<int, 2, true> search_frames(res,
makeTempAlloc(AllocType::Other, stream),{niters,nframes_search});
DeviceTensor<int, 3, true> curr_blocks(res,
makeTempAlloc(AllocType::Other, stream),{nframes,height,width});
thrust::copy(thrust::cuda::par.on(stream), init_blocks.data(),
init_blocks.end(), curr_blocks.data());
thrust::fill(thrust::cuda::par.on(stream), search_frames.data(),
search_frames.end(),ref);
// default: fill without ref
// TODO: allow for random frames across "niters"
for( int i = 0; i < niters; ++i){
int s = 0;
for( int t = 0; t < nframes; ++t){
if (t == ref){ continue; }
cudaMemcpy(search_frames[i][s].data(),&t,
sizeof(int),cudaMemcpyHostToDevice);
s++;
if (s >= nframes_search){ break; }
}
}
// init for comparison
thrust::fill(thrust::cuda::par.on(stream),
outDistances.data(),
outDistances.end(),
Limits<float>::getMax());
// If we're querying against a 0 sized set, just return empty results
if (height == 0 || width == 0 || nftrs == 0) {
thrust::fill(thrust::cuda::par.on(stream),
outDistances.data(),
outDistances.end(),
Limits<float>::getMax());
thrust::fill(thrust::cuda::par.on(stream),
outIndices.data(),
outIndices.end(),
-1);
return;
}
// By default, aim to use up to 512 MB of memory for the processing, with
// both number of queries and number of centroids being at least 512.
int tileHeight = 0; // batchsize across height
int tileWidth = 0; // batchsize across width
int tileBlocks = 0; // batchsize across blocks
chooseImageTileSize(
height, // image height
width, // image width
nftrs, // num of features per pixel
patchsize, // patchsize
nblocks, // number of image blocks to search
sizeof(T),
res->getTempMemoryAvailableCurrentDevice(),
tileHeight,
tileWidth,
tileBlocks);
// tileBlocks = 128;
int numHeightTiles = utils::divUp(height, tileHeight);
int numWidthTiles = utils::divUp(width, tileWidth);
int numBlockTiles = utils::divUp(nblocks, tileBlocks);
// printf("(tileHeight,tileWidth,tileBlocks): (%d,%d,%d)\n",
// tileHeight,tileWidth,tileBlocks);
DeviceTensor<uint8_t, 4, true> sizes(res,
makeTempAlloc(AllocType::Other, stream),
{kmeansK,tileBlocks,tileHeight,tileWidth});
DeviceTensor<float, 4, true> modes4d(res,
makeTempAlloc(AllocType::Other, stream),
{kmeansK,tileBlocks,height,width});
DeviceTensor<float, 3, true> modes3d(res,
makeTempAlloc(AllocType::Other, stream),
{tileBlocks,height,width});
thrust::fill(thrust::cuda::par.on(stream),
modes3d.data(),modes3d.end(),0.);
// We can have any number of vectors to query against, even less than k, in
// which case we'll return -1 for the index
FAISS_ASSERT(k <= GPU_MAX_SELECTION_K); // select limitation
fprintf(stdout,"post gpu check.\n");
//
// Temporary memory space to *execute* a single batch
//
DeviceTensor<float, 3, true> distanceBuf_1(res,
makeTempAlloc(AllocType::Other, stream),
{tileBlocks, tileWidth, tileHeight});
DeviceTensor<float, 3, true> distanceBuf_2(res,
makeTempAlloc(AllocType::Other, stream),
{tileBlocks, tileWidth, tileHeight});
DeviceTensor<float, 3, true> distanceBuf_3(res,
makeTempAlloc(AllocType::Other, stream),
{tileBlocks, tileWidth, tileHeight});
DeviceTensor<float, 3, true> distanceBuf_4(res,
makeTempAlloc(AllocType::Other, stream),
{tileBlocks, tileWidth, tileHeight});
DeviceTensor<float, 3, true> distanceBuf_5(res,
makeTempAlloc(AllocType::Other, stream),
{tileBlocks, tileWidth, tileHeight});
DeviceTensor<float, 3, true> distanceBuf_6(res,
makeTempAlloc(AllocType::Other, stream),
{tileBlocks, tileWidth, tileHeight});
DeviceTensor<float, 3, true> distanceBuf_7(res,
makeTempAlloc(AllocType::Other, stream),
{tileBlocks, tileWidth, tileHeight});
DeviceTensor<float, 3, true> distanceBuf_8(res,
makeTempAlloc(AllocType::Other, stream),
{tileBlocks, tileWidth, tileHeight});
DeviceTensor<float, 3, true> distanceBuf_9(res,
makeTempAlloc(AllocType::Other, stream),
{tileBlocks, tileWidth, tileHeight});
DeviceTensor<float, 3, true> distanceBuf_10(res,
makeTempAlloc(AllocType::Other, stream),
{tileBlocks, tileWidth, tileHeight});
DeviceTensor<float, 3, true> distanceBuf_11(res,
makeTempAlloc(AllocType::Other, stream),
{tileBlocks, tileWidth, tileHeight});
DeviceTensor<float, 3, true> distanceBuf_12(res,
makeTempAlloc(AllocType::Other, stream),
{tileBlocks, tileWidth, tileHeight});
DeviceTensor<float, 3, true> distanceBuf_13(res,
makeTempAlloc(AllocType::Other, stream),
{tileBlocks, tileWidth, tileHeight});
DeviceTensor<float, 3, true> distanceBuf_14(res,
makeTempAlloc(AllocType::Other, stream),
{tileBlocks, tileWidth, tileHeight});
DeviceTensor<float, 3, true> distanceBuf_15(res,
makeTempAlloc(AllocType::Other, stream),
{tileBlocks, tileWidth, tileHeight});
DeviceTensor<float, 3, true> distanceBuf_16(res,
makeTempAlloc(AllocType::Other, stream),
{tileBlocks, tileWidth, tileHeight});
DeviceTensor<float, 3, true>* distanceBufs[16] = {&distanceBuf_1,
&distanceBuf_2,
&distanceBuf_3,
&distanceBuf_4,
&distanceBuf_5,
&distanceBuf_6,
&distanceBuf_7,
&distanceBuf_8,
&distanceBuf_9,
&distanceBuf_10,
&distanceBuf_11,
&distanceBuf_12,
&distanceBuf_13,
&distanceBuf_14,
&distanceBuf_15,
&distanceBuf_16};
// std::vector<DeviceTensor<float, 3, true>> distanceBufs(nstreams,
// DeviceTensor<float, 3, true>(res,
// makeTempAlloc(AllocType::Other, stream),
// {tileHeight, tileWidth, tileBlocks}));
// DeviceTensor<float, 3, true>** distanceBufs = new DeviceTensor<float, 3, true>*[nstreams];
// std::vector<DeviceTensor<float, 3, true>> distanceBufs;
// distanceBufs.resize(nstreams);
// #pragma unroll
// for (int i = 0; i < nstreams; ++i){
// auto distBuf_i = new DeviceTensor<float, 3, true>(res,
// makeTempAlloc(AllocType::Other, stream),
// {tileHeight, tileWidth, tileBlocks});
// distanceBufs[i] = distBuf_i;
// // distanceBufs.push_back(distBuf_i);
// }
// for (int i = 0; i < nstreams; ++i){
// for (int j = 0; j < distanceBufs[i].NumDim; ++j){
// printf("[%d]: getSize(%d): %d\n",i,j,distanceBufs[i].getSize(j));
// }
// //isContiguous
// }
DeviceTensor<int, 5, true> blockBuf_1(res,
makeTempAlloc(AllocType::Other, stream),
{2, nframes, nblocks, tileHeight, tileWidth});
DeviceTensor<int, 5, true> blockBuf_2(res,
makeTempAlloc(AllocType::Other, stream),
{2, nframes, nblocks, tileHeight, tileWidth});
DeviceTensor<int, 5, true> blockBuf_3(res,
makeTempAlloc(AllocType::Other, stream),
{2, nframes, nblocks, tileHeight, tileWidth});
DeviceTensor<int, 5, true> blockBuf_4(res,
makeTempAlloc(AllocType::Other, stream),
{2, nframes, nblocks, tileHeight, tileWidth});
DeviceTensor<int, 5, true> blockBuf_5(res,
makeTempAlloc(AllocType::Other, stream),
{2, nframes, nblocks, tileHeight, tileWidth});
DeviceTensor<int, 5, true> blockBuf_6(res,
makeTempAlloc(AllocType::Other, stream),
{2, nframes, nblocks, tileHeight, tileWidth});
DeviceTensor<int, 5, true> blockBuf_7(res,
makeTempAlloc(AllocType::Other, stream),
{2, nframes, nblocks, tileHeight, tileWidth});
DeviceTensor<int, 5, true> blockBuf_8(res,
makeTempAlloc(AllocType::Other, stream),
{2, nframes, nblocks, tileHeight, tileWidth});
DeviceTensor<int, 5, true> blockBuf_9(res,
makeTempAlloc(AllocType::Other, stream),
{2, nframes, nblocks, tileHeight, tileWidth});
DeviceTensor<int, 5, true> blockBuf_10(res,
makeTempAlloc(AllocType::Other, stream),
{2, nframes, nblocks, tileHeight, tileWidth});
DeviceTensor<int, 5, true> blockBuf_11(res,
makeTempAlloc(AllocType::Other, stream),
{2, nframes, nblocks, tileHeight, tileWidth});
DeviceTensor<int, 5, true> blockBuf_12(res,
makeTempAlloc(AllocType::Other, stream),
{2, nframes, nblocks, tileHeight, tileWidth});
DeviceTensor<int, 5, true> blockBuf_13(res,
makeTempAlloc(AllocType::Other, stream),
{2, nframes, nblocks, tileHeight, tileWidth});
DeviceTensor<int, 5, true> blockBuf_14(res,
makeTempAlloc(AllocType::Other, stream),
{2, nframes, nblocks, tileHeight, tileWidth});
DeviceTensor<int, 5, true> blockBuf_15(res,
makeTempAlloc(AllocType::Other, stream),
{2, nframes, nblocks, tileHeight, tileWidth});
DeviceTensor<int, 5, true> blockBuf_16(res,
makeTempAlloc(AllocType::Other, stream),
{2, nframes, nblocks, tileHeight, tileWidth});
DeviceTensor<int, 5, true>* blockBufs[16] = {&blockBuf_1,&blockBuf_2,
&blockBuf_3,&blockBuf_4,
&blockBuf_5,&blockBuf_6,
&blockBuf_7,&blockBuf_8,
&blockBuf_9,&blockBuf_10,
&blockBuf_11,&blockBuf_12,
&blockBuf_13,&blockBuf_14,
&blockBuf_15,&blockBuf_16};
//
// Temporary memory space for "clustering" and "centroid" vars
//
DeviceTensor<T, 5, true> kmDistBuf_1(res,
makeTempAlloc(AllocType::Other, stream),
{nframes, nframes, tileBlocks, tileHeight, tileWidth});
DeviceTensor<T, 5, true>* kmDistBufs[1] = {&kmDistBuf_1};
DeviceTensor<uint8_t, 4, true> clusterBuf_1(res,
makeTempAlloc(AllocType::Other, stream),
{nframes, tileBlocks, tileHeight, tileWidth});
DeviceTensor<uint8_t, 4, true>* clusterBufs[1] = {&clusterBuf_1};
DeviceTensor<T, 5, true> centroidBuf_1(res,
makeTempAlloc(AllocType::Other, stream),
{nftrs, kmeansK, tileBlocks, tileHeight, tileWidth});
DeviceTensor<T, 5, true>* centroidBufs[1] = {¢roidBuf_1};
//
// Temporary memory space to *ave* a single batch of images
//
DeviceTensor<T, 4, true> aveBuf_1(res,
makeTempAlloc(AllocType::Other, stream),
{nftrs, tileBlocks, tileHeight, tileWidth});
DeviceTensor<T, 4, true> aveBuf_2(res,
makeTempAlloc(AllocType::Other, stream),
{nftrs, tileBlocks, tileHeight, tileWidth});
DeviceTensor<T, 4, true> aveBuf_3(res,
makeTempAlloc(AllocType::Other, stream),
{nftrs, tileBlocks, tileHeight, tileWidth});
DeviceTensor<T, 4, true> aveBuf_4(res,
makeTempAlloc(AllocType::Other, stream),
{nftrs, tileBlocks, tileHeight, tileWidth});
DeviceTensor<T, 4, true> aveBuf_5(res,
makeTempAlloc(AllocType::Other, stream),
{nftrs, tileBlocks, tileHeight, tileWidth});
DeviceTensor<T, 4, true> aveBuf_6(res,
makeTempAlloc(AllocType::Other, stream),
{nftrs, tileBlocks, tileHeight, tileWidth});
DeviceTensor<T, 4, true> aveBuf_7(res,
makeTempAlloc(AllocType::Other, stream),
{nftrs, tileBlocks, tileHeight, tileWidth});
DeviceTensor<T, 4, true> aveBuf_8(res,
makeTempAlloc(AllocType::Other, stream),
{nftrs, tileBlocks, tileHeight, tileWidth});
DeviceTensor<T, 4, true> aveBuf_9(res,
makeTempAlloc(AllocType::Other, stream),
{nftrs, tileBlocks, tileHeight, tileWidth});
DeviceTensor<T, 4, true> aveBuf_10(res,
makeTempAlloc(AllocType::Other, stream),
{nftrs, tileBlocks, tileHeight, tileWidth});
DeviceTensor<T, 4, true> aveBuf_11(res,
makeTempAlloc(AllocType::Other, stream),
{nftrs, tileBlocks, tileHeight, tileWidth});
DeviceTensor<T, 4, true> aveBuf_12(res,
makeTempAlloc(AllocType::Other, stream),
{nftrs, tileBlocks, tileHeight, tileWidth});
DeviceTensor<T, 4, true> aveBuf_13(res,
makeTempAlloc(AllocType::Other, stream),
{nftrs, tileBlocks, tileHeight, tileWidth});
DeviceTensor<T, 4, true> aveBuf_14(res,
makeTempAlloc(AllocType::Other, stream),
{nftrs, tileBlocks, tileHeight, tileWidth});
DeviceTensor<T, 4, true> aveBuf_15(res,
makeTempAlloc(AllocType::Other, stream),
{nftrs, tileBlocks, tileHeight, tileWidth});
DeviceTensor<T, 4, true> aveBuf_16(res,
makeTempAlloc(AllocType::Other, stream),
{nftrs, tileBlocks, tileHeight, tileWidth});
DeviceTensor<T, 4, true>* aveBufs[16];
aveBufs[0] = &aveBuf_1;
aveBufs[1] = &aveBuf_2;
aveBufs[2] = &aveBuf_3;
aveBufs[3] = &aveBuf_4;
aveBufs[4] = &aveBuf_5;
aveBufs[5] = &aveBuf_6;
aveBufs[6] = &aveBuf_7;
aveBufs[7] = &aveBuf_8;
aveBufs[8] = &aveBuf_9;
aveBufs[9] = &aveBuf_10;
aveBufs[10] = &aveBuf_11;
aveBufs[11] = &aveBuf_12;
aveBufs[12] = &aveBuf_13;
aveBufs[13] = &aveBuf_14;
aveBufs[14] = &aveBuf_15;
aveBufs[15] = &aveBuf_16;
// Streams allow for concurrent kernel execs.
auto streams = res->getAlternateStreamsCurrentDevice();
streamWait(streams, {stream});
int curStream = 0;
bool interrupt = false;
// Tile HEIGHT pixels
for (int i = 0; i < height; i += tileHeight) {
if (interrupt || InterruptCallback::is_interrupted()) {
interrupt = true;
break;
}
// create indices for height tiling
int curHeightSize = std::min(tileHeight, height - i);
// create views from height tile
auto curBlocksHeightView = curr_blocks.narrow(1, i, curHeightSize);
auto outDistanceHeightView = outDistances.narrow(1, i, curHeightSize);
auto outIndexHeightView = outIndices.narrow(3, i, curHeightSize);
auto srangesHeightView = search_ranges.narrow(3, i, curHeightSize);
// Tile WIDTH pixels
for (int j = 0; j < width; j += tileWidth) {
if (InterruptCallback::is_interrupted()) {
interrupt = true;
break;
}
// create indices for height tiling
int curWidthSize = std::min(tileWidth, width - j);
// view from width tiling
auto curBlocksView = curBlocksHeightView.narrow(2, j, curWidthSize);
auto outDistanceView = outDistanceHeightView.narrow(2, j, curWidthSize);
auto outIndexView = outIndexHeightView.narrow(4, j, curWidthSize);
auto srangesView = srangesHeightView.narrow(4, j, curWidthSize);
// Iterate over a subset "niters" times
for (int iter = 0; iter < niters; iter += 1){
if (InterruptCallback::is_interrupted()) {
interrupt = true;
break;
}
auto blocks = blockBufs[curStream]
->narrow(3, 0, curHeightSize).narrow(4, 0, curWidthSize);
fprintf(stdout,"about to create search space.\n");
create_search_space(srangesView,blocks,curBlocksView,
search_frames,iter,streams[curStream]);
// Tile the Search-Space
for (int blk = 0; blk < nblocks; blk += tileBlocks) {
if (InterruptCallback::is_interrupted()) {
interrupt = true;
break;
}
// get batch of search space
auto curBlockSize = std::min(tileBlocks, nblocks - blk);
printf("(curHeightSize,curWidthSize,curBlockSize): (%d,%d,%d)\n",
curHeightSize,curWidthSize,curBlockSize);
//
// Views of Tensors
//
auto blockView = blocks.narrow(2, blk, curBlockSize);
auto aveView = aveBufs[curStream]
->narrow(1, 0, curBlockSize)
.narrow(2, 0, curHeightSize)
.narrow(3, 0, curWidthSize);
auto distanceBufView = distanceBufs[curStream]
->narrow(0, 0, curBlockSize)
.narrow(1, 0, curHeightSize)
.narrow(2, 0, curWidthSize);
auto kmDistView = kmDistBufs[curStream]
->narrow(2, 0, curBlockSize)
.narrow(3, 0, curHeightSize)
.narrow(4, 0, curWidthSize);
auto clusterView = clusterBufs[curStream]
->narrow(1, 0, curBlockSize)
.narrow(2, 0, curHeightSize)
.narrow(3, 0, curWidthSize);
auto centroidView = centroidBufs[curStream]
->narrow(2, 0, curBlockSize)
.narrow(3, 0, curHeightSize)
.narrow(4, 0, curWidthSize);
//
// Assert Shapes
//
// FAISS_ASSERT(aveView.getSize(0) == burstView.getSize(0));
// FAISS_ASSERT(aveView.getSize(2) == burstView.getSize(2));
// FAISS_ASSERT(aveView.getSize(3) == burstView.getSize(3));
//
// Compute Clusters using Patches
//
fprintf(stdout,"starting kmeans clustering\n");
float offset = 0;
kmeans_clustering(kmDistView,burst,blockView,
centroidView,clusterView,
sizes,patchsize,
kmeansK,offset,streams[curStream]);
//
// Compute Mode
//
// compute_mode_centroids(std,patchsize,nftrs,
// sizes,modes4d,streams[curStream]);
// kmb_ave4d(modes4d,modes3d,streams[curStream]);
//
// Compute Average of Clusters
//
kmb_ave(centroidView,aveView,streams[curStream]);
// thrust::fill(thrust::cuda::par.on(stream),
// aveView.data(),
// aveView.end(),
// 0.);
//
// L2Norm over Patches
//
runKmBurstL2Norm(centroidView,
aveView,
blockView,
distanceBufView,
// outDistanceView,
patchsize,nsearch,true,
streams[curStream]);
//
// Top K Selection
//
kmb_topK(distanceBufView,
blockView,outIndexView,
outDistanceView,modes3d,
streams[curStream]);
} // batching over blockTiles
} // iterating over a subset of frames
curStream = (curStream + 1) % nstreams;
} // batching over widthTiles
} // batching over heightTiles
// Have the desired ordering stream wait on the multi-stream
streamWait({stream}, streams);
if (interrupt) {
FAISS_THROW_MSG("interrupted");
}
}
void runKmBurstDistance(
GpuResources* res,
cudaStream_t stream,
Tensor<float, 4, true>& burst,
Tensor<int, 5, true>& search_ranges,
Tensor<int, 3, true>& init_blocks,
int kmeansK,
int k,
int t,
int h,
int w,
int c,
int patchsize,
int nsearch,
float std,
Tensor<float, 3, true>& outDistances,
Tensor<int, 5, true>& outIndices,
bool computeL2){
runKmBurstDistance<float>(
res,
stream,
burst,
search_ranges,
init_blocks,
kmeansK,k,t,h,w,c,
patchsize,
nsearch,
std,
outDistances,
outIndices,
computeL2);
}
void runKmBurstDistance(
GpuResources* res,
cudaStream_t stream,
Tensor<half, 4, true>& burst,
Tensor<int, 5, true>& search_ranges,
Tensor<int, 3, true>& init_blocks,
int kmeansK,
int k,
int t,
int h,
int w,
int c,
int patchsize,
int nsearch,
float std,
Tensor<float, 3, true>& outDistances,
Tensor<int, 5, true>& outIndices,
bool computeL2){
runKmBurstDistance<half>(
res,
stream,
burst,
search_ranges,
init_blocks,
kmeansK,k,t,h,w,c,
patchsize,
nsearch,
std,
outDistances,
outIndices,
computeL2);
}
} // end namespace gpu
} // end namespace faiss
|
9688387d322c2a7998882465bb296cbab67565c5.hip | // !!! This is a file automatically generated by hipify!!!
#include "common.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
/*
* CUDA Project - HPC 2019 I
*
* This program demonstrates a simple simulation box which attempts to model
* traffic flow on the GPU and on the host.
* sumArraysOnGPU splits the work of the vector across CUDA threads on the
* GPU. Only a single thread block is used in this small case, for simplicity.
* sumArraysOnHost sequentially iterates through vector elements on the host.
* This version of sumArrays adds host timers to measure GPU and CPU
* performance.
*/
void checkResult(bool *hostRef, bool *gpuRef, const int N)
{
bool match = 1;
for (int i = 0; i < N; i++)
{
if (hostRef[i] ^ gpuRef[i])
{
match = 0;
printf("Arrays do not match!\n");
printf("host %d gpu %d at current %d\n", hostRef[i], gpuRef[i], i);
break;
}
}
if (match) printf("Arrays match.\n\n");
return;
}
void show(bool *ip, int size)
{
// show a bool array
for (int i = 0; i < size; i++)
{
printf("%d ", ip[i]);
}
printf("\n");
}
void initialData(bool *ip, int size)
{
// generate different seed for random number
time_t t;
srand((unsigned) time(&t));
for (int i = 0; i < size; i++)
{
ip[i] = (bool)(rand() & 1);
}
return;
}
void simulateFlowOnHost(bool *road_prev, bool *road_curr, const int N)
{
for (int idx = 0; idx < N; idx++)
{
if (road_prev[idx] == 1 && road_prev[(idx + 1) % N] == 0)
{
road_curr[idx] = 0;
road_curr[(idx + 1) % N] = 1;
}
}
}
__global__ void simulateFlowOnGPU(bool *road_prev, bool *road_curr, const int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N && road_prev[i] == 1 && road_prev[(i + 1) % N] == 0)
{
road_curr[i] = 0;
road_curr[(i + 1) % N] = 1;
}
}
int main(int argc, char **argv)
{
printf("%s Starting...\n", argv[0]);
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
// set up data size of vectors
int nElem, nTimes;
scanf("%d %d", &nElem, &nTimes);
printf("Vector size %d\n", nElem);
printf("Number of times %d\n", nTimes);
// malloc host memory
size_t nBytes = nElem * sizeof(bool);
bool *h_road_init, *h_road_curr, *h_road_prev, *hostRef, *gpuRef;
h_road_init = (bool *)malloc(nBytes);
h_road_prev = (bool *)malloc(nBytes);
h_road_curr = (bool *)malloc(nBytes);
hostRef = (bool *)malloc(nBytes);
gpuRef = (bool *)malloc(nBytes);
double iStart, iElaps;
// initialize data at host side
iStart = seconds();
initialData(h_road_init, nElem);
iElaps = seconds() - iStart;
printf("initialData Time elapsed %f sec\n", iElaps);
memcpy(h_road_curr, h_road_init, nBytes);
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// add vector at host side for result checks
iStart = seconds();
for (int i = 0; i < nTimes; i++)
{
memcpy(h_road_prev, h_road_curr, nBytes);
simulateFlowOnHost(h_road_prev, h_road_curr, nElem);
}
iElaps = seconds() - iStart;
printf("simulateFlowOnHost Time elapsed %f sec\n", iElaps);
memcpy(hostRef, h_road_curr, nBytes);
// malloc device global memory
bool *d_road_prev, *d_road_curr;
CHECK(hipMalloc((bool**)&d_road_prev, nBytes));
CHECK(hipMalloc((bool**)&d_road_curr, nBytes));
// transfer data from host to device
CHECK(hipMemcpy(d_road_curr, h_road_init, nBytes, hipMemcpyHostToDevice));
// invoke kernel at host side
int iLen = 512;
dim3 block (iLen);
dim3 grid ((nElem + block.x - 1) / block.x);
iStart = seconds();
for (int i = 0; i < nTimes; i++)
{
CHECK(hipMemcpy(d_road_prev, d_road_curr, nBytes, hipMemcpyDeviceToDevice));
hipLaunchKernelGGL(( simulateFlowOnGPU), dim3(grid), dim3(block), 0, 0, d_road_prev, d_road_curr, nElem);
CHECK(hipDeviceSynchronize());
}
iElaps = seconds() - iStart;
printf("sumArraysOnGPU <<< %d, %d >>> Time elapsed %f sec\n", grid.x,
block.x, iElaps);
// check kernel error
CHECK(hipGetLastError()) ;
// copy kernel result back to host side
CHECK(hipMemcpy(gpuRef, d_road_curr, nBytes, hipMemcpyDeviceToHost));
// check device results
checkResult(hostRef, gpuRef, nElem);
// free device global memory
CHECK(hipFree(d_road_prev));
CHECK(hipFree(d_road_curr));
// free host memory
free(h_road_init);
free(h_road_prev);
free(h_road_curr);
free(hostRef);
free(gpuRef);
return(0);
} | 9688387d322c2a7998882465bb296cbab67565c5.cu | #include "common.h"
#include <cuda_runtime.h>
#include <stdio.h>
/*
* CUDA Project - HPC 2019 I
*
* This program demonstrates a simple simulation box which attempts to model
* traffic flow on the GPU and on the host.
* sumArraysOnGPU splits the work of the vector across CUDA threads on the
* GPU. Only a single thread block is used in this small case, for simplicity.
* sumArraysOnHost sequentially iterates through vector elements on the host.
* This version of sumArrays adds host timers to measure GPU and CPU
* performance.
*/
void checkResult(bool *hostRef, bool *gpuRef, const int N)
{
bool match = 1;
for (int i = 0; i < N; i++)
{
if (hostRef[i] ^ gpuRef[i])
{
match = 0;
printf("Arrays do not match!\n");
printf("host %d gpu %d at current %d\n", hostRef[i], gpuRef[i], i);
break;
}
}
if (match) printf("Arrays match.\n\n");
return;
}
void show(bool *ip, int size)
{
// show a bool array
for (int i = 0; i < size; i++)
{
printf("%d ", ip[i]);
}
printf("\n");
}
void initialData(bool *ip, int size)
{
// generate different seed for random number
time_t t;
srand((unsigned) time(&t));
for (int i = 0; i < size; i++)
{
ip[i] = (bool)(rand() & 1);
}
return;
}
void simulateFlowOnHost(bool *road_prev, bool *road_curr, const int N)
{
for (int idx = 0; idx < N; idx++)
{
if (road_prev[idx] == 1 && road_prev[(idx + 1) % N] == 0)
{
road_curr[idx] = 0;
road_curr[(idx + 1) % N] = 1;
}
}
}
__global__ void simulateFlowOnGPU(bool *road_prev, bool *road_curr, const int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N && road_prev[i] == 1 && road_prev[(i + 1) % N] == 0)
{
road_curr[i] = 0;
road_curr[(i + 1) % N] = 1;
}
}
int main(int argc, char **argv)
{
printf("%s Starting...\n", argv[0]);
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
// set up data size of vectors
int nElem, nTimes;
scanf("%d %d", &nElem, &nTimes);
printf("Vector size %d\n", nElem);
printf("Number of times %d\n", nTimes);
// malloc host memory
size_t nBytes = nElem * sizeof(bool);
bool *h_road_init, *h_road_curr, *h_road_prev, *hostRef, *gpuRef;
h_road_init = (bool *)malloc(nBytes);
h_road_prev = (bool *)malloc(nBytes);
h_road_curr = (bool *)malloc(nBytes);
hostRef = (bool *)malloc(nBytes);
gpuRef = (bool *)malloc(nBytes);
double iStart, iElaps;
// initialize data at host side
iStart = seconds();
initialData(h_road_init, nElem);
iElaps = seconds() - iStart;
printf("initialData Time elapsed %f sec\n", iElaps);
memcpy(h_road_curr, h_road_init, nBytes);
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// add vector at host side for result checks
iStart = seconds();
for (int i = 0; i < nTimes; i++)
{
memcpy(h_road_prev, h_road_curr, nBytes);
simulateFlowOnHost(h_road_prev, h_road_curr, nElem);
}
iElaps = seconds() - iStart;
printf("simulateFlowOnHost Time elapsed %f sec\n", iElaps);
memcpy(hostRef, h_road_curr, nBytes);
// malloc device global memory
bool *d_road_prev, *d_road_curr;
CHECK(cudaMalloc((bool**)&d_road_prev, nBytes));
CHECK(cudaMalloc((bool**)&d_road_curr, nBytes));
// transfer data from host to device
CHECK(cudaMemcpy(d_road_curr, h_road_init, nBytes, cudaMemcpyHostToDevice));
// invoke kernel at host side
int iLen = 512;
dim3 block (iLen);
dim3 grid ((nElem + block.x - 1) / block.x);
iStart = seconds();
for (int i = 0; i < nTimes; i++)
{
CHECK(cudaMemcpy(d_road_prev, d_road_curr, nBytes, cudaMemcpyDeviceToDevice));
simulateFlowOnGPU<<<grid, block>>>(d_road_prev, d_road_curr, nElem);
CHECK(cudaDeviceSynchronize());
}
iElaps = seconds() - iStart;
printf("sumArraysOnGPU <<< %d, %d >>> Time elapsed %f sec\n", grid.x,
block.x, iElaps);
// check kernel error
CHECK(cudaGetLastError()) ;
// copy kernel result back to host side
CHECK(cudaMemcpy(gpuRef, d_road_curr, nBytes, cudaMemcpyDeviceToHost));
// check device results
checkResult(hostRef, gpuRef, nElem);
// free device global memory
CHECK(cudaFree(d_road_prev));
CHECK(cudaFree(d_road_curr));
// free host memory
free(h_road_init);
free(h_road_prev);
free(h_road_curr);
free(hostRef);
free(gpuRef);
return(0);
} |
85f8cf7984ecb1b4b839351267cca3c234971887.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This is a simple test showing huge access speed gap
* between aligned and misaligned structures
* (those having/missing __align__ keyword).
* It measures per-element copy throughput for
* aligned and misaligned structures on
* big chunks of data.
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cutil_inline.h>
////////////////////////////////////////////////////////////////////////////////
// Misaligned types
////////////////////////////////////////////////////////////////////////////////
typedef unsigned char uint8;
typedef unsigned short int uint16;
typedef struct{
unsigned char r, g, b, a;
} RGBA8_misaligned;
typedef struct{
unsigned int l, a;
} LA32_misaligned;
typedef struct{
unsigned int r, g, b;
} RGB32_misaligned;
typedef struct{
unsigned int r, g, b, a;
} RGBA32_misaligned;
////////////////////////////////////////////////////////////////////////////////
// Aligned types
////////////////////////////////////////////////////////////////////////////////
typedef struct __align__(4){
unsigned char r, g, b, a;
} RGBA8;
typedef unsigned int I32;
typedef struct __align__(8){
unsigned int l, a;
} LA32;
typedef struct __align__(16){
unsigned int r, g, b;
} RGB32;
typedef struct __align__(16){
unsigned int r, g, b, a;
} RGBA32;
////////////////////////////////////////////////////////////////////////////////
// Because G80 class hardware natively supports global memory operations
// only with data elements of 4, 8 and 16 bytes, if structure size
// exceeds 16 bytes, it can't be efficiently read or written,
// since more than one global memory non-coalescable load/store instructions
// will be generated, even if __align__ option is supplied.
// "Structure of arrays" storage strategy offers best performance
// in general case. See section 5.1.2 of the Programming Guide.
////////////////////////////////////////////////////////////////////////////////
typedef struct __align__(16){
RGBA32 c1, c2;
} RGBA32_2;
////////////////////////////////////////////////////////////////////////////////
// Common host and device functions
////////////////////////////////////////////////////////////////////////////////
//Round a / b to nearest higher integer value
int iDivUp(int a, int b){
return (a % b != 0) ? (a / b + 1) : (a / b);
}
//Round a / b to nearest lower integer value
int iDivDown(int a, int b){
return a / b;
}
//Align a to nearest higher multiple of b
int iAlignUp(int a, int b){
return (a % b != 0) ? (a - a % b + b) : a;
}
//Align a to nearest lower multiple of b
int iAlignDown(int a, int b){
return a - a % b;
}
////////////////////////////////////////////////////////////////////////////////
// Simple CUDA kernel.
// Copy is carried out on per-element basis,
// so it's not per-byte in case of padded structures.
////////////////////////////////////////////////////////////////////////////////
template<class TData> __global__ void testKernel(
TData *d_odata,
TData *d_idata,
int numElements
){
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int numThreads = blockDim.x * gridDim.x;
for(int pos = tid; pos < numElements; pos += numThreads)
d_odata[pos] = d_idata[pos];
}
////////////////////////////////////////////////////////////////////////////////
// Validation routine for simple copy kernel.
// We must know "packed" size of TData (number_of_fields * sizeof(simple_type))
// and compare only these "packed" parts of the structure,
// containig actual user data. The compiler behavior with padding bytes
// is undefined, since padding is merely a placeholder
// and doesn't contain any user data.
////////////////////////////////////////////////////////////////////////////////
template<class TData> int testCPU(
TData *h_odata,
TData *h_idata,
int numElements,
int packedElementSize
){
for(int pos = 0; pos < numElements; pos++){
TData src = h_idata[pos];
TData dst = h_odata[pos];
for(int i = 0; i < packedElementSize; i++)
if( ((char *)&src)[i] != ((char *)&dst)[i] ) return 0;
}
return 1;
}
////////////////////////////////////////////////////////////////////////////////
// Data configuration
////////////////////////////////////////////////////////////////////////////////
//Memory chunk size in bytes. Reused for test
#ifdef __DEVICE_EMULATION__
const int MEM_SIZE = 4000000;
const int NUM_ITERATIONS = 1;
#else
const int MEM_SIZE = 50000000;
const int NUM_ITERATIONS = 32;
#endif
//GPU input and output data
unsigned char *d_idata, *d_odata;
//CPU input data and instance of GPU output data
unsigned char *h_idataCPU, *h_odataGPU;
unsigned int hTimer;
template<class TData> int runTest(int packedElementSize, int memory_size){
const int totalMemSizeAligned = iAlignDown(memory_size, sizeof(TData));
const int numElements = iDivDown(memory_size, sizeof(TData));
//Clean output buffer before current test
cutilSafeCall( hipMemset(d_odata, 0, memory_size) );
//Run test
cutilSafeCall( hipDeviceSynchronize() );
cutilCheckError( cutResetTimer(hTimer) );
cutilCheckError( cutStartTimer(hTimer) );
for(int i = 0; i < NUM_ITERATIONS; i++){
hipLaunchKernelGGL(( testKernel<TData>), dim3(64), dim3(256), 0, 0,
(TData *)d_odata,
(TData *)d_idata,
numElements
);
cutilCheckMsg("testKernel() execution failed\n");
}
cutilSafeCall( hipDeviceSynchronize() );
cutilCheckError( cutStopTimer(hTimer) );
double gpuTime = cutGetTimerValue(hTimer) / NUM_ITERATIONS;
printf(
"Avg. time: %f ms / Copy throughput: %f GB/s.\n", gpuTime,
(double)totalMemSizeAligned / (gpuTime * 0.001 * 1073741824.0)
);
//Read back GPU results and run validation
cutilSafeCall( hipMemcpy(h_odataGPU, d_odata, memory_size, hipMemcpyDeviceToHost) );
int flag = testCPU(
(TData *)h_odataGPU,
(TData *)h_idataCPU,
numElements,
packedElementSize
);
printf(flag ? "\tTEST OK\n" : "\tTEST FAILURE\n" );
return !flag;
}
int main(int argc, char **argv){
int i, nTotalFailures = 0;
int devID;
hipDeviceProp_t deviceProps;
printf("[alignedTypes]\n");
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") ) {
devID = cutilDeviceInit(argc, argv);
if (devID < 0) {
printf("exiting...\n");
cutilExit(argc, argv);
exit(0);
}
}
else {
devID = cutGetMaxGflopsDeviceId();
hipSetDevice( devID );
}
// get number of SMs on this GPU
cutilSafeCall(hipGetDeviceProperties(&deviceProps, devID));
printf("CUDA device [%s] has %d Multi-Processors\n", deviceProps.name, deviceProps.multiProcessorCount);
float scale_factor = max((24.0f / (float)deviceProps.multiProcessorCount), 1.0f);
int MemorySize = (int)(MEM_SIZE/scale_factor) & 0xffffff00; // force multiple of 256 bytes
printf("SM scaling value = %4.2f\n", scale_factor);
printf("> Memory Size = %d\n", MemorySize);
cutilCheckError( cutCreateTimer(&hTimer) );
printf("Allocating memory...\n");
h_idataCPU = (unsigned char *)malloc(MemorySize);
h_odataGPU = (unsigned char *)malloc(MemorySize);
cutilSafeCall(hipMalloc((void **)&d_idata, MemorySize));
cutilSafeCall(hipMalloc((void **)&d_odata, MemorySize));
printf("Generating host input data array...\n");
for(i = 0; i < MemorySize; i++)
h_idataCPU[i] = (i & 0xFF) + 1;
printf("Uploading input data to GPU memory...\n");
cutilSafeCall(hipMemcpy(d_idata, h_idataCPU, MemorySize, hipMemcpyHostToDevice) );
printf("Testing misaligned types...\n");
printf("uint8...\n");
nTotalFailures += runTest<uint8>(1, MemorySize);
printf("uint16...\n");
nTotalFailures += runTest<uint16>(2, MemorySize);
printf("RGBA8_misaligned...\n");
nTotalFailures += runTest<RGBA8_misaligned>(4, MemorySize);
printf("LA32_misaligned...\n");
nTotalFailures += runTest<LA32_misaligned>(8, MemorySize);
printf("RGB32_misaligned...\n");
nTotalFailures += runTest<RGB32_misaligned>(12, MemorySize);
printf("RGBA32_misaligned...\n");
nTotalFailures += runTest<RGBA32_misaligned>(16, MemorySize);
printf("Testing aligned types...\n");
printf("RGBA8...\n");
nTotalFailures += runTest<RGBA8>(4, MemorySize);
printf("I32...\n");
nTotalFailures += runTest<I32>(4, MemorySize);
printf("LA32...\n");
nTotalFailures += runTest<LA32>(8, MemorySize);
printf("RGB32...\n");
nTotalFailures += runTest<RGB32>(12, MemorySize);
printf("RGBA32...\n");
nTotalFailures += runTest<RGBA32>(16, MemorySize);
printf("RGBA32_2...\n");
nTotalFailures += runTest<RGBA32_2>(32, MemorySize);
printf("\n[alignedTypes] -> Test Results: %d Failures\n", nTotalFailures);
printf((nTotalFailures==0) ? "PASSED\n" : "FAILED\n" );
printf("Shutting down...\n");
cutilSafeCall(hipFree(d_idata));
cutilSafeCall(hipFree(d_odata));
free(h_odataGPU);
free(h_idataCPU);
cutilCheckError( cutDeleteTimer(hTimer) );
cutilExit(argc, argv);
hipDeviceReset();
}
| 85f8cf7984ecb1b4b839351267cca3c234971887.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This is a simple test showing huge access speed gap
* between aligned and misaligned structures
* (those having/missing __align__ keyword).
* It measures per-element copy throughput for
* aligned and misaligned structures on
* big chunks of data.
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cutil_inline.h>
////////////////////////////////////////////////////////////////////////////////
// Misaligned types
////////////////////////////////////////////////////////////////////////////////
typedef unsigned char uint8;
typedef unsigned short int uint16;
typedef struct{
unsigned char r, g, b, a;
} RGBA8_misaligned;
typedef struct{
unsigned int l, a;
} LA32_misaligned;
typedef struct{
unsigned int r, g, b;
} RGB32_misaligned;
typedef struct{
unsigned int r, g, b, a;
} RGBA32_misaligned;
////////////////////////////////////////////////////////////////////////////////
// Aligned types
////////////////////////////////////////////////////////////////////////////////
typedef struct __align__(4){
unsigned char r, g, b, a;
} RGBA8;
typedef unsigned int I32;
typedef struct __align__(8){
unsigned int l, a;
} LA32;
typedef struct __align__(16){
unsigned int r, g, b;
} RGB32;
typedef struct __align__(16){
unsigned int r, g, b, a;
} RGBA32;
////////////////////////////////////////////////////////////////////////////////
// Because G80 class hardware natively supports global memory operations
// only with data elements of 4, 8 and 16 bytes, if structure size
// exceeds 16 bytes, it can't be efficiently read or written,
// since more than one global memory non-coalescable load/store instructions
// will be generated, even if __align__ option is supplied.
// "Structure of arrays" storage strategy offers best performance
// in general case. See section 5.1.2 of the Programming Guide.
////////////////////////////////////////////////////////////////////////////////
typedef struct __align__(16){
RGBA32 c1, c2;
} RGBA32_2;
////////////////////////////////////////////////////////////////////////////////
// Common host and device functions
////////////////////////////////////////////////////////////////////////////////
//Round a / b to nearest higher integer value
int iDivUp(int a, int b){
return (a % b != 0) ? (a / b + 1) : (a / b);
}
//Round a / b to nearest lower integer value
int iDivDown(int a, int b){
return a / b;
}
//Align a to nearest higher multiple of b
int iAlignUp(int a, int b){
return (a % b != 0) ? (a - a % b + b) : a;
}
//Align a to nearest lower multiple of b
int iAlignDown(int a, int b){
return a - a % b;
}
////////////////////////////////////////////////////////////////////////////////
// Simple CUDA kernel.
// Copy is carried out on per-element basis,
// so it's not per-byte in case of padded structures.
////////////////////////////////////////////////////////////////////////////////
template<class TData> __global__ void testKernel(
TData *d_odata,
TData *d_idata,
int numElements
){
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int numThreads = blockDim.x * gridDim.x;
for(int pos = tid; pos < numElements; pos += numThreads)
d_odata[pos] = d_idata[pos];
}
////////////////////////////////////////////////////////////////////////////////
// Validation routine for simple copy kernel.
// We must know "packed" size of TData (number_of_fields * sizeof(simple_type))
// and compare only these "packed" parts of the structure,
// containig actual user data. The compiler behavior with padding bytes
// is undefined, since padding is merely a placeholder
// and doesn't contain any user data.
////////////////////////////////////////////////////////////////////////////////
template<class TData> int testCPU(
TData *h_odata,
TData *h_idata,
int numElements,
int packedElementSize
){
for(int pos = 0; pos < numElements; pos++){
TData src = h_idata[pos];
TData dst = h_odata[pos];
for(int i = 0; i < packedElementSize; i++)
if( ((char *)&src)[i] != ((char *)&dst)[i] ) return 0;
}
return 1;
}
////////////////////////////////////////////////////////////////////////////////
// Data configuration
////////////////////////////////////////////////////////////////////////////////
//Memory chunk size in bytes. Reused for test
#ifdef __DEVICE_EMULATION__
const int MEM_SIZE = 4000000;
const int NUM_ITERATIONS = 1;
#else
const int MEM_SIZE = 50000000;
const int NUM_ITERATIONS = 32;
#endif
//GPU input and output data
unsigned char *d_idata, *d_odata;
//CPU input data and instance of GPU output data
unsigned char *h_idataCPU, *h_odataGPU;
unsigned int hTimer;
template<class TData> int runTest(int packedElementSize, int memory_size){
const int totalMemSizeAligned = iAlignDown(memory_size, sizeof(TData));
const int numElements = iDivDown(memory_size, sizeof(TData));
//Clean output buffer before current test
cutilSafeCall( cudaMemset(d_odata, 0, memory_size) );
//Run test
cutilSafeCall( cudaThreadSynchronize() );
cutilCheckError( cutResetTimer(hTimer) );
cutilCheckError( cutStartTimer(hTimer) );
for(int i = 0; i < NUM_ITERATIONS; i++){
testKernel<TData><<<64, 256>>>(
(TData *)d_odata,
(TData *)d_idata,
numElements
);
cutilCheckMsg("testKernel() execution failed\n");
}
cutilSafeCall( cudaThreadSynchronize() );
cutilCheckError( cutStopTimer(hTimer) );
double gpuTime = cutGetTimerValue(hTimer) / NUM_ITERATIONS;
printf(
"Avg. time: %f ms / Copy throughput: %f GB/s.\n", gpuTime,
(double)totalMemSizeAligned / (gpuTime * 0.001 * 1073741824.0)
);
//Read back GPU results and run validation
cutilSafeCall( cudaMemcpy(h_odataGPU, d_odata, memory_size, cudaMemcpyDeviceToHost) );
int flag = testCPU(
(TData *)h_odataGPU,
(TData *)h_idataCPU,
numElements,
packedElementSize
);
printf(flag ? "\tTEST OK\n" : "\tTEST FAILURE\n" );
return !flag;
}
int main(int argc, char **argv){
int i, nTotalFailures = 0;
int devID;
cudaDeviceProp deviceProps;
printf("[alignedTypes]\n");
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") ) {
devID = cutilDeviceInit(argc, argv);
if (devID < 0) {
printf("exiting...\n");
cutilExit(argc, argv);
exit(0);
}
}
else {
devID = cutGetMaxGflopsDeviceId();
cudaSetDevice( devID );
}
// get number of SMs on this GPU
cutilSafeCall(cudaGetDeviceProperties(&deviceProps, devID));
printf("CUDA device [%s] has %d Multi-Processors\n", deviceProps.name, deviceProps.multiProcessorCount);
float scale_factor = max((24.0f / (float)deviceProps.multiProcessorCount), 1.0f);
int MemorySize = (int)(MEM_SIZE/scale_factor) & 0xffffff00; // force multiple of 256 bytes
printf("SM scaling value = %4.2f\n", scale_factor);
printf("> Memory Size = %d\n", MemorySize);
cutilCheckError( cutCreateTimer(&hTimer) );
printf("Allocating memory...\n");
h_idataCPU = (unsigned char *)malloc(MemorySize);
h_odataGPU = (unsigned char *)malloc(MemorySize);
cutilSafeCall(cudaMalloc((void **)&d_idata, MemorySize));
cutilSafeCall(cudaMalloc((void **)&d_odata, MemorySize));
printf("Generating host input data array...\n");
for(i = 0; i < MemorySize; i++)
h_idataCPU[i] = (i & 0xFF) + 1;
printf("Uploading input data to GPU memory...\n");
cutilSafeCall(cudaMemcpy(d_idata, h_idataCPU, MemorySize, cudaMemcpyHostToDevice) );
printf("Testing misaligned types...\n");
printf("uint8...\n");
nTotalFailures += runTest<uint8>(1, MemorySize);
printf("uint16...\n");
nTotalFailures += runTest<uint16>(2, MemorySize);
printf("RGBA8_misaligned...\n");
nTotalFailures += runTest<RGBA8_misaligned>(4, MemorySize);
printf("LA32_misaligned...\n");
nTotalFailures += runTest<LA32_misaligned>(8, MemorySize);
printf("RGB32_misaligned...\n");
nTotalFailures += runTest<RGB32_misaligned>(12, MemorySize);
printf("RGBA32_misaligned...\n");
nTotalFailures += runTest<RGBA32_misaligned>(16, MemorySize);
printf("Testing aligned types...\n");
printf("RGBA8...\n");
nTotalFailures += runTest<RGBA8>(4, MemorySize);
printf("I32...\n");
nTotalFailures += runTest<I32>(4, MemorySize);
printf("LA32...\n");
nTotalFailures += runTest<LA32>(8, MemorySize);
printf("RGB32...\n");
nTotalFailures += runTest<RGB32>(12, MemorySize);
printf("RGBA32...\n");
nTotalFailures += runTest<RGBA32>(16, MemorySize);
printf("RGBA32_2...\n");
nTotalFailures += runTest<RGBA32_2>(32, MemorySize);
printf("\n[alignedTypes] -> Test Results: %d Failures\n", nTotalFailures);
printf((nTotalFailures==0) ? "PASSED\n" : "FAILED\n" );
printf("Shutting down...\n");
cutilSafeCall(cudaFree(d_idata));
cutilSafeCall(cudaFree(d_odata));
free(h_odataGPU);
free(h_idataCPU);
cutilCheckError( cutDeleteTimer(hTimer) );
cutilExit(argc, argv);
cudaThreadExit();
}
|
5ba5e891e261ddd23b3d071b132dac65897de2e6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ###
// ###
// ### Practical Course: GPU Programming in Computer Vision
// ###
// ###
// ### Technical University Munich, Computer Vision Group
// ### Summer Semester 2017, September 11 - October 9
// ###
// Exercise 9
// Written by: Jiho Yang (M.Sc student in Computational Science & Engineering)
// Matriculation number: 03675799
#include "helper.h"
#include <iostream>
#include <string>
#include <unistd.h>
using namespace std;
const float pi = 3.141592653589793238462f;
// uncomment to use the camera
//#define CAMERA
// Compute gradient
__global__ void compute_gradient(float *d_gradx, float *d_grady, float *d_imgIn, int w, int h, int nc){
// Get x y z pixel coordinates in 3D kernel
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
int z = threadIdx.z + blockIdx.z*blockDim.z;
// Get high indices
size_t x_high = x + 1 + (size_t)w*y + (size_t)h*w*z;
size_t y_high = x + (size_t)w*(y+1) + (size_t)h*w*z;
size_t idx = x + (size_t)w*y + (size_t)h*w*z;
// Ensure no threads are out of problem domain
if (x < w && y < h){
// Compute gradient
if (x < w-1){
d_gradx[idx] = d_imgIn[x_high] - d_imgIn[idx];
} else
d_gradx[idx] = 0;
if (y < h-1){
d_grady[idx] = d_imgIn[y_high] - d_imgIn[idx];
} else
d_grady[idx] = 0;
}
}
// Compute L2 norm
__device__ void compute_norm(float *d_norm, float *d_vec1, float *d_vec2, int w, int h, int nc){
// Temporary variable for norm
float sqrd1 = 0;
float sqrd2 = 0;
float val1, val2;
// Get coordinates
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
// Get index
int idx = x + (size_t)w*y;
// Compute norm
if (x < w && y < h){
for (size_t c = 0; c < nc; c++){
// Get index
size_t idx_3d = idx + (size_t)w*h*c;
// Compute L2 norm
val1 = d_vec1[idx_3d];
val2 = d_vec2[idx_3d];
sqrd1 += val1*val1;
sqrd2 += val2*val2;
}
d_norm[idx] = sqrtf(sqrd1*sqrd1 + sqrd2*sqrd2);
}
}
// Apply nonlinear diffusion
__device__ void get_diffusion(float *d_gradx, float *d_grady, float *d_norm, int w, int h, int nc){
// Get coordinates
int x = threadIdx.x + blockDim.x*blockIdx.x;
int y = threadIdx.y + blockDim.y*blockIdx.y;
int z = threadIdx.z + blockDim.z*blockIdx.z;
// Get index
size_t idx = x + (size_t)w*y;
size_t idx_3d = x + (size_t)w*y + (size_t)w*h*z;
if (x < w && y < h){
// Diffusion factor
float g;
// Epsilon
float eps = 0.03;
// Constant diffusion
//g = 1.0f;
// Huber diffusion
//g = 1.0f/ max(eps, d_norm[idx]);
// Stronger(?) Huber diffusion
g = (exp(-d_norm[idx]*d_norm[idx]/eps))/eps;
// Apply diffusion
d_gradx[idx_3d] *= g;
d_grady[idx_3d] *= g;
}
}
// Apply diffusion
__global__ void apply_diffusion(float *d_imgIn, float *d_gradx, float *d_grady, float *d_norm, int w, int h, int nc){
// Compute L2 norm
compute_norm(d_norm, d_gradx, d_grady, w, h, nc);
// Get diffusion
get_diffusion(d_gradx, d_grady, d_norm, w, h, nc);
}
// Update image
__global__ void update_image(float *d_imgIn, float *d_div, float tau, int w, int h, int nc){
// Get coordinates
int x = threadIdx.x + blockDim.x*blockIdx.x;
int y = threadIdx.y + blockDim.y*blockIdx.y;
int z = threadIdx.z + blockDim.z*blockIdx.z;
// Get index
size_t idx = x + (size_t)w*y + (size_t)w*h*z;
if (x < w && y < h){
// Update image
d_imgIn[idx] += tau * d_div[idx];
}
}
// Compute divergence
__global__ void compute_divergence(float *d_div, float *d_gradx, float *d_grady, int w, int h, int nc){
// Get x y z pixel coordinates in 3D kernel
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
int z = threadIdx.z + blockIdx.z*blockDim.z;
// Get low indices
size_t idx = x + (size_t)w*y + (size_t)h*w*z;
size_t x_low = x-1 + (size_t)w*y + (size_t)h*w*z;
size_t y_low = x + (size_t)w*(y-1) + (size_t)h*w*z;
// Temporary values
float v_x, v_y;
// Ensure no threads are out of problem domain
if (x < w && y < h){
// Compute divergence
if (x > 1){
v_x = d_gradx[idx] - d_gradx[x_low];
} else
v_x = 0;
if (y > 1){
v_y = d_grady[idx] - d_grady[y_low];
} else
v_y = 0;
// Sum gradients
d_div[idx] = v_x + v_y;
}
}
// Compute eigenvalue of a 2 by 2 matrix
__device__ void compute_eigenvalue(float *d_eigen_value, float d_t1_val, float d_t2_val, float d_t3_val){
// Define matrix
float A[4] = {d_t1_val, d_t2_val, d_t2_val, d_t3_val};
// Define elements
float a = A[0];
float b = A[1];
float c = A[2];
float d = A[3];
// Trace and determinant
float T = a + d;
float D = a*d - b*c;
// Compute eigenvalue
d_eigen_value[0] = T/2 + sqrtf(T*T/4-D);
d_eigen_value[1] = T/2 - sqrtf(T*T/4-D);
// Sort eigenvalue array
if (d_eigen_value[0] > d_eigen_value[1]){
float swap = d_eigen_value[0];
d_eigen_value[0] = d_eigen_value[1];
d_eigen_value[1] = swap;
}
}
// Convolution on global memory
__global__ void convolution_global(float *d_imgIn, float *d_imgOut, float *d_kernel, int w, int h, int nc, int w_kernel, int h_kernel){
// Get coordinates
int x = threadIdx.x + blockDim.x*blockIdx.x;
int y = threadIdx.y + blockDim.y*blockIdx.y;
//int z = threadIdx.z + blockDim.z*blockIdx.z;
// Get indices
size_t idx = x + (size_t)w*y;
// Initialise d_imgOut
// Set origin
int mid = (w_kernel-1)/2;
// Convolution - Note x_kernel is the global x coordinate of kernel in the problem domain
for (size_t c = 0; c < nc; c++){
size_t idx_3d = idx + (size_t)w*h*c;
d_imgOut[idx_3d] = 0.0f;
if (x < w && y < h){
for (size_t j = 0; j < h_kernel; j++){
for (size_t i = 0; i < w_kernel; i++){
// Boundary condition
int x_kernel_global = x - mid + i;
int y_kernel_global = y - mid + j;
// clamping
if (x_kernel_global < 0){
x_kernel_global = 0;
}
if (x_kernel_global > w-1){
x_kernel_global = w - 1;
}
if (y_kernel_global < 0){
y_kernel_global = 0;
}
if (y_kernel_global > h - 1){
y_kernel_global = h - 1;
}
// Get indices
int idx_kernel_local = i + w_kernel*j;
int idx_kernel_global = x_kernel_global + w*y_kernel_global + w*h*c;
// Multiply and sum
d_imgOut[idx_3d] += d_kernel[idx_kernel_local] * d_imgIn[idx_kernel_global];
}
}
}
}
}
// Set up kernel
void get_kernel(float *kernel, int w_kernel, int h_kernel, const float pi, float sigma){
//Set up parameters
int origin = w_kernel/2;
float total = 0.0f;
// Define 2D Gaussian kernel
for (size_t y_kernel = 0; y_kernel < h_kernel; y_kernel++){
for (size_t x_kernel = 0; x_kernel < w_kernel; x_kernel++){
int a = x_kernel - origin;
int b = y_kernel - origin;
int idx = x_kernel + w_kernel*y_kernel;
kernel[idx] = (1.0f / (2.0f*pi*sigma*sigma))*exp(-1*((a*a+b*b) / (2*sigma*sigma)));
total += kernel[idx];
}
}
// Normalise kernel
for (size_t y_kernel = 0; y_kernel < h_kernel; y_kernel++){
for (size_t x_kernel = 0; x_kernel < w_kernel; x_kernel++){
int idx = x_kernel + w_kernel*y_kernel;
kernel[idx] /= total;
}
}
}
// main
int main(int argc, char **argv)
{
// Before the GPU can process your kernels, a so called "CUDA context" must be initialized
// This happens on the very first call to a CUDA function, and takes some time (around half a second)
// We will do it right here, so that the run time measurements are accurate
hipDeviceSynchronize(); CUDA_CHECK;
// Reading command line parameters:
// getParam("param", var, argc, argv) looks whether "-param xyz" is specified, and if so stores the value "xyz" in "var"
// If "-param" is not specified, the value of "var" remains unchanged
//
// return value: getParam("param", ...) returns true if "-param" is specified, and false otherwise
#ifdef CAMERA
#else
// input image
string image = "";
bool ret = getParam("i", image, argc, argv);
if (!ret) cerr << "ERROR: no image specified" << endl;
if (argc <= 1) { cout << "Usage: " << argv[0] << " -i <image> [-repeats <repeats>] [-gray]" << endl; return 1; }
#endif
// number of computation repetitions to get a better run time measurement
int repeats = 1;
getParam("repeats", repeats, argc, argv);
cout << "repeats: " << repeats << endl;
// load the input image as grayscale if "-gray" is specifed
bool gray = false;
getParam("gray", gray, argc, argv);
cout << "gray: " << gray << endl;
// Diffusion
float tau = 0.0025f;
int N = 600;
// Convolution kernel
float sigma = sqrtf(2*tau*N);
getParam("sigma", sigma, argc, argv);
cout << "sigma: " << sigma << endl;
// ### Define your own parameters here as needed
// Init camera / Load input image
#ifdef CAMERA
// Init camera
cv::VideoCapture camera(0);
if(!camera.isOpened()) { cerr << "ERROR: Could not open camera" << endl; return 1; }
int camW = 640;
int camH = 480;
camera.set(CV_CAP_PROP_FRAME_WIDTH,camW);
camera.set(CV_CAP_PROP_FRAME_HEIGHT,camH);
// read in first frame to get the dimensions
cv::Mat mIn;
camera >> mIn;
#else
// Load the input image using opencv (load as grayscale if "gray==true", otherwise as is (may be color or grayscale))
cv::Mat mIn = cv::imread(image.c_str(), (gray? CV_LOAD_IMAGE_GRAYSCALE : -1));
// check
if (mIn.data == NULL) { cerr << "ERROR: Could not load image " << image << endl; return 1; }
#endif
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn,CV_32F);
// convert range of each channel to [0,1] (opencv default is [0,255])
mIn /= 255.f;
// get image dimensions
int w = mIn.cols; // width
int h = mIn.rows; // height
int nc = mIn.channels(); // number of channels
// Define kernel dimensions
int r = ceil(3*sigma);
int w_kernel = r * 2 + 1; //windowing
int h_kernel = w_kernel; //Square kernel
// Kernel information
cout << "image: " << w << " x " << h << endl;
// Set the output image format
// ###
// ###
// ### TODO: Change the output image format as needed
// ###
// ###
cv::Mat mOut(h,w,mIn.type()); // mOut will have the same number of channels as the input image, nc layers
//cv::Mat mOut(h,w,CV_32FC3); // mOut will be a color image, 3 layers
//cv::Mat mOut(h,w,CV_32FC1); // mOut will be a grayscale image, 1 layer
// ### Define your own output images here as needed
cv:: Mat mgradx(h, w, mIn.type());
cv:: Mat mgrady(h, w, mIn.type());
cv:: Mat mOut_orig(h, w, mIn.type());
// Allocate arrays
// input/output image width: w
// input/output image height: h
// input image number of channels: nc
// output image number of channels: mOut.channels(), as defined above (nc, 3, or 1)
// Get array memory
int nbytes = w * h * nc * sizeof(float);
int nbytes_kernel = w_kernel * h_kernel * sizeof(float);
// allocate raw input image array
float *imgIn = new float[(size_t)nbytes];
// allocate raw output array (the computation result will be stored in this array, then later converted to mOut for displaying)
float *imgOut = new float[(size_t)w*h*mOut.channels()];
float *imgOut_orig = new float[(size_t)w*h*mOut_orig.channels()];
// For camera mode: Make a loop to read in camera frames
#ifdef CAMERA
// Read a camera image frame every 30 milliseconds:
// cv::waitKey(30) waits 30 milliseconds for a keyboard input,
// returns a value <0 if no key is pressed during this time, returns immediately with a value >=0 if a key is pressed
while (cv::waitKey(30) < 0)
{
// Get camera image
camera >> mIn;
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn,CV_32F);
// convert range of each channel to [0,1] (opencv default is [0,255])
mIn /= 255.f;
#endif
// Init raw input image array
// opencv images are interleaved: rgb rgb rgb... (actually bgr bgr bgr...)
// But for CUDA it's better to work with layered images: rrr... ggg... bbb...
// So we will convert as necessary, using interleaved "cv::Mat" for loading/saving/displaying, and layered "float*" for CUDA computations
convert_mat_to_layered (imgIn, mIn);
// ###
// ###
// ### TODO: Main computation
// ###
// ###
// Kernel memory allocation
float *kernel = new float[nbytes_kernel];
// Create kernel
get_kernel(kernel, w_kernel, h_kernel, pi, sigma);
// Processor type
string processor;
float *gradx = new float[nbytes];
float *grady = new float[nbytes];
////////////////////////////////////////////////////////////////////// Block setting ///////////////////////////////////////////////////////////////////////
dim3 block = dim3(128, 1, 1);
dim3 grid = dim3((w + block.x - 1) / block.x, (h + block.y - 1) / block.y, (nc + block.z - 1));
Timer timer; timer.start();
// Arrays
float *d_kernel;
float *d_imgIn;
float *d_imgOut;
float *d_imgIn_orig;
float *d_imgOut_orig;
float *d_gradx;
float *d_grady;
float *d_norm;
float *d_div;
float *d_eigen_value;
// CUDA malloc
hipMalloc(&d_kernel, nbytes_kernel); CUDA_CHECK;
hipMalloc(&d_imgIn, nbytes); CUDA_CHECK;
hipMalloc(&d_imgIn_orig, nbytes); CUDA_CHECK;
hipMalloc(&d_imgOut, nbytes); CUDA_CHECK;
hipMalloc(&d_imgOut_orig, nbytes); CUDA_CHECK;
hipMalloc(&d_gradx, nbytes); CUDA_CHECK;
hipMalloc(&d_grady, nbytes); CUDA_CHECK;
hipMalloc(&d_div, nbytes); CUDA_CHECK;
hipMalloc(&d_norm, w*h*sizeof(float)); CUDA_CHECK;
hipMalloc(&d_eigen_value, 2*sizeof(float)); CUDA_CHECK;
// CUDA copy
hipMemcpy(d_kernel, kernel, nbytes_kernel, hipMemcpyHostToDevice); CUDA_CHECK;
hipMemcpy(d_imgIn, imgIn, nbytes, hipMemcpyHostToDevice); CUDA_CHECK;
hipMemcpy(d_imgIn_orig, imgIn, nbytes, hipMemcpyHostToDevice); CUDA_CHECK;
// Update image
float time = 0;
for (size_t i = 0; i < N; i++){
hipLaunchKernelGGL(( compute_gradient) , dim3(grid), dim3(block) , 0, 0, d_gradx, d_grady, d_imgIn, w, h, nc); CUDA_CHECK;
hipLaunchKernelGGL(( apply_diffusion) , dim3(grid), dim3(block) , 0, 0, d_imgIn, d_gradx, d_grady, d_norm, w, h, nc); CUDA_CHECK;
hipLaunchKernelGGL(( compute_divergence) , dim3(grid), dim3(block) , 0, 0, d_div, d_gradx, d_grady, w, h, nc); CUDA_CHECK;
time = time + tau;
hipLaunchKernelGGL(( update_image) , dim3(grid), dim3(block) , 0, 0, d_imgIn, d_div, tau, w, h, nc); CUDA_CHECK;
}
// Convolution
hipLaunchKernelGGL(( convolution_global) , dim3(grid), dim3(block) , 0, 0, d_imgIn_orig, d_imgOut, d_kernel, w, h, nc, w_kernel, h_kernel); CUDA_CHECK;
// Copy the results to host
hipMemcpy(imgOut, d_imgIn, nbytes, hipMemcpyDeviceToHost); CUDA_CHECK;
hipMemcpy(imgOut_orig, d_imgOut, nbytes, hipMemcpyDeviceToHost); CUDA_CHECK;
hipMemcpy(gradx, d_gradx, nbytes, hipMemcpyDeviceToHost); CUDA_CHECK;
hipMemcpy(grady, d_grady, nbytes, hipMemcpyDeviceToHost); CUDA_CHECK;
// Free memory
hipFree(d_imgIn); CUDA_CHECK;
hipFree(d_imgIn_orig); CUDA_CHECK;
hipFree(d_imgOut); CUDA_CHECK;
hipFree(d_imgOut_orig); CUDA_CHECK;
hipFree(d_kernel); CUDA_CHECK;
hipFree(d_div); CUDA_CHECK;
hipFree(d_gradx); CUDA_CHECK;
hipFree(d_grady); CUDA_CHECK;
hipFree(d_norm); CUDA_CHECK;
hipFree(d_eigen_value); CUDA_CHECK;
// Type of processor
processor = "GPU - global memory";
cout << processor << endl;
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
timer.end(); float t = timer.get();
cout << "time: " << t*1000 << " ms" << endl;
// show input image
showImage("Input", mIn, 100, 100); // show at position (x_from_left=100,y_from_above=100)
// show output image: first convert to interleaved opencv format from the layered raw array
convert_layered_to_mat(mOut_orig, imgOut_orig);
showImage("Gaussian convolution", mOut_orig, 100+w+40, 300);
convert_layered_to_mat(mOut, imgOut);
showImage("Diffusion", mOut, 100+w+40, 100);
// ### Display your own output images here as needed
convert_layered_to_mat(mgradx, gradx);
convert_layered_to_mat(mgrady, grady);
// showImage("grad_x", mgradx, 100+w+50, 150);
// showImage("grad_y", mgrady, 100+w+60, 150);
/*
showImage("m1", 10.f*mM1, 50, 200);
showImage("m2", 10.f*mM2, 50 + w, 200);
showImage("m3", 10.f*mM3, 50 + 2 * w, 200);
showImage("t1", 10.f*mT1, 50, 250);
showImage("t2", 10.f*mT2, 50 + w, 250);
showImage("t3", 10.f*mT3, 50 + 2 * w, 250);
*/
#ifdef CAMERA
// end of camera loop
}
#else
// wait for key inputs
cv::waitKey(0);
#endif
// save input and result
cv::imwrite("image_input.png",mIn*255.f); // "imwrite" assumes channel range [0,255]
cv::imwrite("image_result.png",mOut*255.f);
// free allocated arrays
#ifdef CAMERA
delete[] imgIn;
delete[] imgOut;
#else
delete[] imgIn;
delete[] imgOut;
delete[] kernel;
delete[] gradx;
delete[] grady;
#endif
// close all opencv windows
cvDestroyAllWindows();
return 0;
}
| 5ba5e891e261ddd23b3d071b132dac65897de2e6.cu | // ###
// ###
// ### Practical Course: GPU Programming in Computer Vision
// ###
// ###
// ### Technical University Munich, Computer Vision Group
// ### Summer Semester 2017, September 11 - October 9
// ###
// Exercise 9
// Written by: Jiho Yang (M.Sc student in Computational Science & Engineering)
// Matriculation number: 03675799
#include "helper.h"
#include <iostream>
#include <string>
#include <unistd.h>
using namespace std;
const float pi = 3.141592653589793238462f;
// uncomment to use the camera
//#define CAMERA
// Compute gradient
__global__ void compute_gradient(float *d_gradx, float *d_grady, float *d_imgIn, int w, int h, int nc){
// Get x y z pixel coordinates in 3D kernel
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
int z = threadIdx.z + blockIdx.z*blockDim.z;
// Get high indices
size_t x_high = x + 1 + (size_t)w*y + (size_t)h*w*z;
size_t y_high = x + (size_t)w*(y+1) + (size_t)h*w*z;
size_t idx = x + (size_t)w*y + (size_t)h*w*z;
// Ensure no threads are out of problem domain
if (x < w && y < h){
// Compute gradient
if (x < w-1){
d_gradx[idx] = d_imgIn[x_high] - d_imgIn[idx];
} else
d_gradx[idx] = 0;
if (y < h-1){
d_grady[idx] = d_imgIn[y_high] - d_imgIn[idx];
} else
d_grady[idx] = 0;
}
}
// Compute L2 norm
__device__ void compute_norm(float *d_norm, float *d_vec1, float *d_vec2, int w, int h, int nc){
// Temporary variable for norm
float sqrd1 = 0;
float sqrd2 = 0;
float val1, val2;
// Get coordinates
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
// Get index
int idx = x + (size_t)w*y;
// Compute norm
if (x < w && y < h){
for (size_t c = 0; c < nc; c++){
// Get index
size_t idx_3d = idx + (size_t)w*h*c;
// Compute L2 norm
val1 = d_vec1[idx_3d];
val2 = d_vec2[idx_3d];
sqrd1 += val1*val1;
sqrd2 += val2*val2;
}
d_norm[idx] = sqrtf(sqrd1*sqrd1 + sqrd2*sqrd2);
}
}
// Apply nonlinear diffusion
__device__ void get_diffusion(float *d_gradx, float *d_grady, float *d_norm, int w, int h, int nc){
// Get coordinates
int x = threadIdx.x + blockDim.x*blockIdx.x;
int y = threadIdx.y + blockDim.y*blockIdx.y;
int z = threadIdx.z + blockDim.z*blockIdx.z;
// Get index
size_t idx = x + (size_t)w*y;
size_t idx_3d = x + (size_t)w*y + (size_t)w*h*z;
if (x < w && y < h){
// Diffusion factor
float g;
// Epsilon
float eps = 0.03;
// Constant diffusion
//g = 1.0f;
// Huber diffusion
//g = 1.0f/ max(eps, d_norm[idx]);
// Stronger(?) Huber diffusion
g = (exp(-d_norm[idx]*d_norm[idx]/eps))/eps;
// Apply diffusion
d_gradx[idx_3d] *= g;
d_grady[idx_3d] *= g;
}
}
// Apply diffusion
__global__ void apply_diffusion(float *d_imgIn, float *d_gradx, float *d_grady, float *d_norm, int w, int h, int nc){
// Compute L2 norm
compute_norm(d_norm, d_gradx, d_grady, w, h, nc);
// Get diffusion
get_diffusion(d_gradx, d_grady, d_norm, w, h, nc);
}
// Update image
__global__ void update_image(float *d_imgIn, float *d_div, float tau, int w, int h, int nc){
// Get coordinates
int x = threadIdx.x + blockDim.x*blockIdx.x;
int y = threadIdx.y + blockDim.y*blockIdx.y;
int z = threadIdx.z + blockDim.z*blockIdx.z;
// Get index
size_t idx = x + (size_t)w*y + (size_t)w*h*z;
if (x < w && y < h){
// Update image
d_imgIn[idx] += tau * d_div[idx];
}
}
// Compute divergence
__global__ void compute_divergence(float *d_div, float *d_gradx, float *d_grady, int w, int h, int nc){
// Get x y z pixel coordinates in 3D kernel
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
int z = threadIdx.z + blockIdx.z*blockDim.z;
// Get low indices
size_t idx = x + (size_t)w*y + (size_t)h*w*z;
size_t x_low = x-1 + (size_t)w*y + (size_t)h*w*z;
size_t y_low = x + (size_t)w*(y-1) + (size_t)h*w*z;
// Temporary values
float v_x, v_y;
// Ensure no threads are out of problem domain
if (x < w && y < h){
// Compute divergence
if (x > 1){
v_x = d_gradx[idx] - d_gradx[x_low];
} else
v_x = 0;
if (y > 1){
v_y = d_grady[idx] - d_grady[y_low];
} else
v_y = 0;
// Sum gradients
d_div[idx] = v_x + v_y;
}
}
// Compute eigenvalue of a 2 by 2 matrix
__device__ void compute_eigenvalue(float *d_eigen_value, float d_t1_val, float d_t2_val, float d_t3_val){
// Define matrix
float A[4] = {d_t1_val, d_t2_val, d_t2_val, d_t3_val};
// Define elements
float a = A[0];
float b = A[1];
float c = A[2];
float d = A[3];
// Trace and determinant
float T = a + d;
float D = a*d - b*c;
// Compute eigenvalue
d_eigen_value[0] = T/2 + sqrtf(T*T/4-D);
d_eigen_value[1] = T/2 - sqrtf(T*T/4-D);
// Sort eigenvalue array
if (d_eigen_value[0] > d_eigen_value[1]){
float swap = d_eigen_value[0];
d_eigen_value[0] = d_eigen_value[1];
d_eigen_value[1] = swap;
}
}
// Convolution on global memory
__global__ void convolution_global(float *d_imgIn, float *d_imgOut, float *d_kernel, int w, int h, int nc, int w_kernel, int h_kernel){
// Get coordinates
int x = threadIdx.x + blockDim.x*blockIdx.x;
int y = threadIdx.y + blockDim.y*blockIdx.y;
//int z = threadIdx.z + blockDim.z*blockIdx.z;
// Get indices
size_t idx = x + (size_t)w*y;
// Initialise d_imgOut
// Set origin
int mid = (w_kernel-1)/2;
// Convolution - Note x_kernel is the global x coordinate of kernel in the problem domain
for (size_t c = 0; c < nc; c++){
size_t idx_3d = idx + (size_t)w*h*c;
d_imgOut[idx_3d] = 0.0f;
if (x < w && y < h){
for (size_t j = 0; j < h_kernel; j++){
for (size_t i = 0; i < w_kernel; i++){
// Boundary condition
int x_kernel_global = x - mid + i;
int y_kernel_global = y - mid + j;
// clamping
if (x_kernel_global < 0){
x_kernel_global = 0;
}
if (x_kernel_global > w-1){
x_kernel_global = w - 1;
}
if (y_kernel_global < 0){
y_kernel_global = 0;
}
if (y_kernel_global > h - 1){
y_kernel_global = h - 1;
}
// Get indices
int idx_kernel_local = i + w_kernel*j;
int idx_kernel_global = x_kernel_global + w*y_kernel_global + w*h*c;
// Multiply and sum
d_imgOut[idx_3d] += d_kernel[idx_kernel_local] * d_imgIn[idx_kernel_global];
}
}
}
}
}
// Set up kernel
void get_kernel(float *kernel, int w_kernel, int h_kernel, const float pi, float sigma){
//Set up parameters
int origin = w_kernel/2;
float total = 0.0f;
// Define 2D Gaussian kernel
for (size_t y_kernel = 0; y_kernel < h_kernel; y_kernel++){
for (size_t x_kernel = 0; x_kernel < w_kernel; x_kernel++){
int a = x_kernel - origin;
int b = y_kernel - origin;
int idx = x_kernel + w_kernel*y_kernel;
kernel[idx] = (1.0f / (2.0f*pi*sigma*sigma))*exp(-1*((a*a+b*b) / (2*sigma*sigma)));
total += kernel[idx];
}
}
// Normalise kernel
for (size_t y_kernel = 0; y_kernel < h_kernel; y_kernel++){
for (size_t x_kernel = 0; x_kernel < w_kernel; x_kernel++){
int idx = x_kernel + w_kernel*y_kernel;
kernel[idx] /= total;
}
}
}
// main
int main(int argc, char **argv)
{
// Before the GPU can process your kernels, a so called "CUDA context" must be initialized
// This happens on the very first call to a CUDA function, and takes some time (around half a second)
// We will do it right here, so that the run time measurements are accurate
cudaDeviceSynchronize(); CUDA_CHECK;
// Reading command line parameters:
// getParam("param", var, argc, argv) looks whether "-param xyz" is specified, and if so stores the value "xyz" in "var"
// If "-param" is not specified, the value of "var" remains unchanged
//
// return value: getParam("param", ...) returns true if "-param" is specified, and false otherwise
#ifdef CAMERA
#else
// input image
string image = "";
bool ret = getParam("i", image, argc, argv);
if (!ret) cerr << "ERROR: no image specified" << endl;
if (argc <= 1) { cout << "Usage: " << argv[0] << " -i <image> [-repeats <repeats>] [-gray]" << endl; return 1; }
#endif
// number of computation repetitions to get a better run time measurement
int repeats = 1;
getParam("repeats", repeats, argc, argv);
cout << "repeats: " << repeats << endl;
// load the input image as grayscale if "-gray" is specifed
bool gray = false;
getParam("gray", gray, argc, argv);
cout << "gray: " << gray << endl;
// Diffusion
float tau = 0.0025f;
int N = 600;
// Convolution kernel
float sigma = sqrtf(2*tau*N);
getParam("sigma", sigma, argc, argv);
cout << "sigma: " << sigma << endl;
// ### Define your own parameters here as needed
// Init camera / Load input image
#ifdef CAMERA
// Init camera
cv::VideoCapture camera(0);
if(!camera.isOpened()) { cerr << "ERROR: Could not open camera" << endl; return 1; }
int camW = 640;
int camH = 480;
camera.set(CV_CAP_PROP_FRAME_WIDTH,camW);
camera.set(CV_CAP_PROP_FRAME_HEIGHT,camH);
// read in first frame to get the dimensions
cv::Mat mIn;
camera >> mIn;
#else
// Load the input image using opencv (load as grayscale if "gray==true", otherwise as is (may be color or grayscale))
cv::Mat mIn = cv::imread(image.c_str(), (gray? CV_LOAD_IMAGE_GRAYSCALE : -1));
// check
if (mIn.data == NULL) { cerr << "ERROR: Could not load image " << image << endl; return 1; }
#endif
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn,CV_32F);
// convert range of each channel to [0,1] (opencv default is [0,255])
mIn /= 255.f;
// get image dimensions
int w = mIn.cols; // width
int h = mIn.rows; // height
int nc = mIn.channels(); // number of channels
// Define kernel dimensions
int r = ceil(3*sigma);
int w_kernel = r * 2 + 1; //windowing
int h_kernel = w_kernel; //Square kernel
// Kernel information
cout << "image: " << w << " x " << h << endl;
// Set the output image format
// ###
// ###
// ### TODO: Change the output image format as needed
// ###
// ###
cv::Mat mOut(h,w,mIn.type()); // mOut will have the same number of channels as the input image, nc layers
//cv::Mat mOut(h,w,CV_32FC3); // mOut will be a color image, 3 layers
//cv::Mat mOut(h,w,CV_32FC1); // mOut will be a grayscale image, 1 layer
// ### Define your own output images here as needed
cv:: Mat mgradx(h, w, mIn.type());
cv:: Mat mgrady(h, w, mIn.type());
cv:: Mat mOut_orig(h, w, mIn.type());
// Allocate arrays
// input/output image width: w
// input/output image height: h
// input image number of channels: nc
// output image number of channels: mOut.channels(), as defined above (nc, 3, or 1)
// Get array memory
int nbytes = w * h * nc * sizeof(float);
int nbytes_kernel = w_kernel * h_kernel * sizeof(float);
// allocate raw input image array
float *imgIn = new float[(size_t)nbytes];
// allocate raw output array (the computation result will be stored in this array, then later converted to mOut for displaying)
float *imgOut = new float[(size_t)w*h*mOut.channels()];
float *imgOut_orig = new float[(size_t)w*h*mOut_orig.channels()];
// For camera mode: Make a loop to read in camera frames
#ifdef CAMERA
// Read a camera image frame every 30 milliseconds:
// cv::waitKey(30) waits 30 milliseconds for a keyboard input,
// returns a value <0 if no key is pressed during this time, returns immediately with a value >=0 if a key is pressed
while (cv::waitKey(30) < 0)
{
// Get camera image
camera >> mIn;
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn,CV_32F);
// convert range of each channel to [0,1] (opencv default is [0,255])
mIn /= 255.f;
#endif
// Init raw input image array
// opencv images are interleaved: rgb rgb rgb... (actually bgr bgr bgr...)
// But for CUDA it's better to work with layered images: rrr... ggg... bbb...
// So we will convert as necessary, using interleaved "cv::Mat" for loading/saving/displaying, and layered "float*" for CUDA computations
convert_mat_to_layered (imgIn, mIn);
// ###
// ###
// ### TODO: Main computation
// ###
// ###
// Kernel memory allocation
float *kernel = new float[nbytes_kernel];
// Create kernel
get_kernel(kernel, w_kernel, h_kernel, pi, sigma);
// Processor type
string processor;
float *gradx = new float[nbytes];
float *grady = new float[nbytes];
////////////////////////////////////////////////////////////////////// Block setting ///////////////////////////////////////////////////////////////////////
dim3 block = dim3(128, 1, 1);
dim3 grid = dim3((w + block.x - 1) / block.x, (h + block.y - 1) / block.y, (nc + block.z - 1));
Timer timer; timer.start();
// Arrays
float *d_kernel;
float *d_imgIn;
float *d_imgOut;
float *d_imgIn_orig;
float *d_imgOut_orig;
float *d_gradx;
float *d_grady;
float *d_norm;
float *d_div;
float *d_eigen_value;
// CUDA malloc
cudaMalloc(&d_kernel, nbytes_kernel); CUDA_CHECK;
cudaMalloc(&d_imgIn, nbytes); CUDA_CHECK;
cudaMalloc(&d_imgIn_orig, nbytes); CUDA_CHECK;
cudaMalloc(&d_imgOut, nbytes); CUDA_CHECK;
cudaMalloc(&d_imgOut_orig, nbytes); CUDA_CHECK;
cudaMalloc(&d_gradx, nbytes); CUDA_CHECK;
cudaMalloc(&d_grady, nbytes); CUDA_CHECK;
cudaMalloc(&d_div, nbytes); CUDA_CHECK;
cudaMalloc(&d_norm, w*h*sizeof(float)); CUDA_CHECK;
cudaMalloc(&d_eigen_value, 2*sizeof(float)); CUDA_CHECK;
// CUDA copy
cudaMemcpy(d_kernel, kernel, nbytes_kernel, cudaMemcpyHostToDevice); CUDA_CHECK;
cudaMemcpy(d_imgIn, imgIn, nbytes, cudaMemcpyHostToDevice); CUDA_CHECK;
cudaMemcpy(d_imgIn_orig, imgIn, nbytes, cudaMemcpyHostToDevice); CUDA_CHECK;
// Update image
float time = 0;
for (size_t i = 0; i < N; i++){
compute_gradient <<< grid, block >>> (d_gradx, d_grady, d_imgIn, w, h, nc); CUDA_CHECK;
apply_diffusion <<< grid, block >>> (d_imgIn, d_gradx, d_grady, d_norm, w, h, nc); CUDA_CHECK;
compute_divergence <<< grid, block >>> (d_div, d_gradx, d_grady, w, h, nc); CUDA_CHECK;
time = time + tau;
update_image <<< grid, block >>> (d_imgIn, d_div, tau, w, h, nc); CUDA_CHECK;
}
// Convolution
convolution_global <<< grid, block >>> (d_imgIn_orig, d_imgOut, d_kernel, w, h, nc, w_kernel, h_kernel); CUDA_CHECK;
// Copy the results to host
cudaMemcpy(imgOut, d_imgIn, nbytes, cudaMemcpyDeviceToHost); CUDA_CHECK;
cudaMemcpy(imgOut_orig, d_imgOut, nbytes, cudaMemcpyDeviceToHost); CUDA_CHECK;
cudaMemcpy(gradx, d_gradx, nbytes, cudaMemcpyDeviceToHost); CUDA_CHECK;
cudaMemcpy(grady, d_grady, nbytes, cudaMemcpyDeviceToHost); CUDA_CHECK;
// Free memory
cudaFree(d_imgIn); CUDA_CHECK;
cudaFree(d_imgIn_orig); CUDA_CHECK;
cudaFree(d_imgOut); CUDA_CHECK;
cudaFree(d_imgOut_orig); CUDA_CHECK;
cudaFree(d_kernel); CUDA_CHECK;
cudaFree(d_div); CUDA_CHECK;
cudaFree(d_gradx); CUDA_CHECK;
cudaFree(d_grady); CUDA_CHECK;
cudaFree(d_norm); CUDA_CHECK;
cudaFree(d_eigen_value); CUDA_CHECK;
// Type of processor
processor = "GPU - global memory";
cout << processor << endl;
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
timer.end(); float t = timer.get();
cout << "time: " << t*1000 << " ms" << endl;
// show input image
showImage("Input", mIn, 100, 100); // show at position (x_from_left=100,y_from_above=100)
// show output image: first convert to interleaved opencv format from the layered raw array
convert_layered_to_mat(mOut_orig, imgOut_orig);
showImage("Gaussian convolution", mOut_orig, 100+w+40, 300);
convert_layered_to_mat(mOut, imgOut);
showImage("Diffusion", mOut, 100+w+40, 100);
// ### Display your own output images here as needed
convert_layered_to_mat(mgradx, gradx);
convert_layered_to_mat(mgrady, grady);
// showImage("grad_x", mgradx, 100+w+50, 150);
// showImage("grad_y", mgrady, 100+w+60, 150);
/*
showImage("m1", 10.f*mM1, 50, 200);
showImage("m2", 10.f*mM2, 50 + w, 200);
showImage("m3", 10.f*mM3, 50 + 2 * w, 200);
showImage("t1", 10.f*mT1, 50, 250);
showImage("t2", 10.f*mT2, 50 + w, 250);
showImage("t3", 10.f*mT3, 50 + 2 * w, 250);
*/
#ifdef CAMERA
// end of camera loop
}
#else
// wait for key inputs
cv::waitKey(0);
#endif
// save input and result
cv::imwrite("image_input.png",mIn*255.f); // "imwrite" assumes channel range [0,255]
cv::imwrite("image_result.png",mOut*255.f);
// free allocated arrays
#ifdef CAMERA
delete[] imgIn;
delete[] imgOut;
#else
delete[] imgIn;
delete[] imgOut;
delete[] kernel;
delete[] gradx;
delete[] grady;
#endif
// close all opencv windows
cvDestroyAllWindows();
return 0;
}
|
d6775b0b6a927698f5ca9ce84b97252292104980.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "LocalConnect.h"
#include "../common/cuBase.h"
#include "../common/Config.h"
/*
dim3 block = dim3(batch, outputAmount);
dim3 thread= min(outputDim * outputDim, 512);
*/
__global__ void g_LocalConnect_backpropagation_kernelSize1(
double* _curDelta,
double**_w,
double* _nextDelta,
int dim,
int area,
int localKernelSize);
/*
* block = dim3(outputAmount, kernelSize * kernelSize);
* thread= dim3(batch);
*/
__global__ void g_LocalConnect_wgrad_Add(
double** _WgradTmp,
double** Wgrad,
double** w,
int kernelSize,
int batch,
double lambda,
int wgradTmpArea,
int wgradArea,
int wArea);
/*
dim3 block = dim3(batch, outputAmount);
dim3 thread= min(16, min(outputDim * outputDim, 64));
*/
__global__ void g_LocalConnect_wgrad_kernelSize1(
double* _inputs,
double* _curDelta,
double** _wgradTmp,
/*double** _w,*/
int dim,
int area,
int batch,
double lambda);
/*
*dim3 block = dim3(batch, amount);
*dim3 thread= dim3(16, min(outputDim * outputDim, 64));
*/
__global__ void g_LocalConnect_feedforward_1(
double** arrayS,
double** arrayW,
double** arrayB,
double* _output,
int inputSize,
int kernelSize,
int outputDim,
int outputArea,
int batch,
int k1Amount,
int localKernelSize);
template <int OUTPUTDIM2, int THREADS>
__global__ void g_LocalConnect_feedforward_s_2(
double* inputs,
double** arrayW,
double** arrayB,
double* _output,
int inputSize,
int kernelSize,
int outputSize,
int inputArea,
int outputArea,
int batch,
int k1Amount,
int localKernelSize);
/*
* function: get convolution layer and pooling output
* dim3 block = dim3(batch, amount);
* dim3 thread= dim3(min(outputDim * outputDim, 512));
* const kernelsize = 1
*/
__global__ void g_LocalConnect_feedforward_kernelSize1_2(
double* inputs,
double** arrayW,
double** arrayB,
double* _output,
int dim,
int area,
int batch,
int k1Amount,
int localKernelSize);
/*
dim3 block = dim3(batch, outputAmount);
dim3 thread= min(outputDim * outputDim, 512);
*/
__global__ void g_LocalConnect_backpropagation(
double* _convDelta,
double**_w,
double* _poolDelta,
int _convOutputSize,
int _poolOutputSize,
int _kernelAmount1,
int _kernelAmount2,
int _kernelSize,
int _convDeltaArea,
int _poolDeltaArea,
int localKernelSize);
/*
*function: get convolution layer and pooling output
*dim3 block = dim3(batch, amount);
*dim3 thread= dim3(min(outputDim * outputDim, 256));
*/
__global__ void g_LocalConnect_feedforward_2(
double* inputs,
double** arrayW,
double** arrayB,
double* _output,
int inputSize,
int kernelSize,
int outputSize,
int inputArea,
int outputArea,
int batch,
int k1Amount,
int localKernelSize);
/*
* blocks : dim3(batch, cuKernelScan[cl] * localKernelSize, Config::instance()->getChannels()),
* threads : dim3(threadidx)
*/
__global__ void g_LocalConnect_wgrad(
double* _inputs,
double* _curDelta,
double** _wgrad,
int inputDim,
int curDeltaDim,
int kernelSize,
int inputAmount,
int outputAmount,
int inputArea,
int curDeltaAea,
int batch,
double lambda);
/*
* blocks : dim3(batch, cuKernelScan[cl], Config::instance()->getChannels()),
* threads : dim3(threadidx)
*/
__global__ void g_LocalConnect_wgrad_1(double** sArray,
double* convDelta,
double* WgradTmp,
int imgSize,
int convOutputSize,
int kernelAmount2,
int kernelSize,
int sArrayArea,
int convDeltaArea,
int wgrapTmpArea,
int localKernelSize);
/*
*block = dim3(localKernelSize, amount);
*thread= dim3(batch);
*
*/
__global__ void g_LocalConnect_Bgrad(double* delta,
double** bgrad,
int deltaSize,
int kernelAmount2,
int batch,
int deltaArea,
int localKernelSize);
void LocalConnect::calCost()
{
cost->gpuClear();
hipLaunchKernelGGL(( g_getCost_3), dim3(dim3(w.size())), dim3(dim3(32)), sizeof(double) * 32, 0, cost->getDev(),
w.m_devPoint,
lambda,
w[0]->getLen());
hipDeviceSynchronize();
getLastCudaError("LocalConnect:getCost");
}
void LocalConnect::feedforward()
{
if((kernelSize == 3 || kernelSize == 5) && inputDim >= 4 && inputDim <= 8){
dim3 block = dim3(batch, outputAmount);
const int threads = 8;
dim3 thread= dim3(threads, outputDim * outputDim);
if(outputDim == 4){
hipLaunchKernelGGL(( g_LocalConnect_feedforward_s_2<16, threads>), dim3(block), dim3(thread), 0, 0, inputs->getDev(), w.m_devPoint, b.m_devPoint, outputs->getDev(), inputDim,
kernelSize, outputDim, inputs->getArea(), outputs->getArea(), batch, outputAmount, localKernelSize);
}else if(outputDim == 5){
hipLaunchKernelGGL(( g_LocalConnect_feedforward_s_2<25, threads>), dim3(block), dim3(thread), 0, 0, inputs->getDev(), w.m_devPoint, b.m_devPoint, outputs->getDev(), inputDim,
kernelSize, outputDim, inputs->getArea(), outputs->getArea(), batch, outputAmount, localKernelSize);
}else if(outputDim == 6){
hipLaunchKernelGGL(( g_LocalConnect_feedforward_s_2<36, threads>), dim3(block), dim3(thread), 0, 0, inputs->getDev(), w.m_devPoint, b.m_devPoint, outputs->getDev(), inputDim,
kernelSize, outputDim, inputs->getArea(), outputs->getArea(), batch, outputAmount, localKernelSize);
}else if(outputDim == 7){
hipLaunchKernelGGL(( g_LocalConnect_feedforward_s_2<49, threads>), dim3(block), dim3(thread), 0, 0, inputs->getDev(), w.m_devPoint, b.m_devPoint, outputs->getDev(), inputDim,
kernelSize, outputDim, inputs->getArea(), outputs->getArea(), batch, outputAmount, localKernelSize);
}else if(outputDim == 8){
hipLaunchKernelGGL(( g_LocalConnect_feedforward_s_2<64, threads>), dim3(block), dim3(thread), 0, 0, inputs->getDev(), w.m_devPoint, b.m_devPoint, outputs->getDev(), inputDim,
kernelSize, outputDim, inputs->getArea(), outputs->getArea(), batch, outputAmount, localKernelSize);
}
checkCudaErrors(hipDeviceSynchronize());
getLastCudaError("LocalConnect:g_LocalConnect_feedforward_s_2");
}
else if(kernelSize == 1){
dim3 block = dim3(batch, outputAmount);
dim3 thread= dim3(min(outputDim * outputDim, 512));
hipLaunchKernelGGL(( g_LocalConnect_feedforward_kernelSize1_2), dim3(block), dim3(thread), 0, 0,
inputs->getDev(),
w.m_devPoint,
b.m_devPoint,
outputs->getDev(),
inputDim,
inputs->getArea(),
batch,
outputAmount,
localKernelSize);
checkCudaErrors(hipDeviceSynchronize());
getLastCudaError("LocalConnect:g_LocalConnect_feedforward_kernelSize1_2");
}
else {
dim3 block = dim3(batch, outputAmount);
dim3 thread= dim3(8, min(outputDim * outputDim, 64));
hipLaunchKernelGGL(( g_LocalConnect_feedforward_2), dim3(block), dim3(thread),
sizeof(double) * outputDim * outputDim, 0,
inputs->getDev(),
w.m_devPoint,
b.m_devPoint,
outputs->getDev(),
inputDim,
kernelSize,
outputDim,
inputs->getArea(),
outputs->getArea(),
batch,
outputAmount,
localKernelSize);
checkCudaErrors(hipDeviceSynchronize());
getLastCudaError("LocalConnect:g_LocalConnect_feedforward_2");
}
if(NON_LINEARITY >= 0){
dim3 thread = dim3(min(256, outputs->getLen()));
dim3 block = dim3(min(256, (outputs->getLen() + thread.x - 1) / thread.x));
hipLaunchKernelGGL(( g_nonLinearity), dim3(block), dim3(thread), 0, 0,
outputs->getDev(),
outputs->getLen(),
NON_LINEARITY);
checkCudaErrors(hipDeviceSynchronize());
getLastCudaError("LocalConnect::g_nonLinearity");
}
}
void LocalConnect::backpropagation()
{
if(NON_LINEARITY >= 0){
dim3 thread = dim3(min(256, outputs->getLen()));
dim3 block = dim3(min(256, (outputs->getLen() + thread.x - 1) / thread.x));
hipLaunchKernelGGL(( g_dnonLinearity), dim3(block), dim3(thread), 0, 0, curDelta->getDev(),
outputs->getDev(), curDelta->getLen(), NON_LINEARITY);
checkCudaErrors(hipDeviceSynchronize());
getLastCudaError("LocalConnect::g_dnonLinearity");
}
if(inputs){
dim3 block = dim3(batch, outputAmount);
dim3 thread= dim3(min(outputDim * outputDim, 512));
preDelta->gpuClear();
if(kernelSize == 1){
hipLaunchKernelGGL(( g_LocalConnect_backpropagation_kernelSize1), dim3(block), dim3(thread), 0, 0,
curDelta->getDev(),
w.m_devPoint,
preDelta->getDev(),
outputDim,
curDelta->getArea(),
localKernelSize);
checkCudaErrors(hipDeviceSynchronize());
getLastCudaError("LocalConnect::g_LocalConnect_backpropagation_kernelSize1");
}else{
hipLaunchKernelGGL(( g_LocalConnect_backpropagation), dim3(block), dim3(thread), 0, 0,
curDelta->getDev(),
w.m_devPoint,
preDelta->getDev(),
outputDim,
inputDim,
inputAmount,
outputAmount,
kernelSize,
curDelta->getArea(),
preDelta->getArea(),
localKernelSize);
checkCudaErrors(hipDeviceSynchronize());
getLastCudaError("LocalConnect::g_LocalConnect_backpropagation");
}
}
}
void LocalConnect::getGrad()
{
if(kernelSize == 1){
dim3 block = dim3(batch, outputAmount);
dim3 thread= dim3(min(outputDim * outputDim, 512));
hipLaunchKernelGGL(( g_LocalConnect_wgrad_kernelSize1), dim3(block), dim3(thread), sizeof(double) * batch, 0,
inputs->getDev(),
curDelta->getDev(),
wgradTmp.m_devPoint,
inputDim,
inputs->getArea(),
batch,
lambda);
checkCudaErrors(hipDeviceSynchronize());
getLastCudaError("g_LocalConnect_wgrad_kernelSize1");
block = dim3(outputAmount, kernelSize * kernelSize);
thread = dim3(batch);
}
else{
dim3 block = dim3(batch, outputAmount);
dim3 thread= min(9, min(outputDim * outputDim, 64));
hipLaunchKernelGGL(( g_LocalConnect_wgrad), dim3(block), dim3(thread), sizeof(double) * inputDim * inputDim, 0,
inputs->getDev(),
curDelta->getDev(),
wgradTmp.m_devPoint,
inputDim,
outputDim,
kernelSize,
inputAmount,
outputAmount,
inputs->getArea(),
curDelta->getArea(),
batch,
lambda);
checkCudaErrors(hipDeviceSynchronize());
getLastCudaError("g_LocalConnect_wgrad");
}
dim3 block = dim3(outputAmount * localKernelSize, kernelSize * kernelSize);
dim3 thread = dim3(batch);
hipLaunchKernelGGL(( g_LocalConnect_wgrad_Add), dim3(block), dim3(thread), sizeof(double) * batch, 0,
wgradTmp.m_devPoint,
wgrad.m_devPoint,
w.m_devPoint,
kernelSize,
batch,
lambda,
wgradTmp[0]->getArea(),
wgrad[0]->getArea(),
w[0]->getArea());
checkCudaErrors(hipDeviceSynchronize());
getLastCudaError("g_LocalConnect_wgrad_Add");
block = dim3(localKernelSize, outputAmount);
thread= dim3(batch);
hipLaunchKernelGGL(( g_LocalConnect_Bgrad), dim3(block),dim3(thread),sizeof(double) * batch, 0,
curDelta->getDev(),
bgrad.m_devPoint,
outputDim,
outputAmount,
batch,
curDelta->getArea(),
localKernelSize);
checkCudaErrors(hipDeviceSynchronize());
getLastCudaError("LocalConnect::getGrad::g_LocalConnect_Bgrad");
}
void LocalConnect::updateWeight()
{
dim3 thread = min(256, w[0]->getLen());
dim3 block = momentum_w.size();
hipLaunchKernelGGL(( g_vecAdd), dim3(block), dim3(thread), 0, 0, momentum_w.m_devPoint, wgrad.m_devPoint, w.m_devPoint,
momentum_b.m_devPoint, bgrad.m_devPoint, b.m_devPoint,
w[0]->getLen(), b[0]->getLen(),
Config::instance()->getMomentum(),
Config::instance()->getLrate());
}
LocalConnect::LocalConnect(std::string name)
{
m_name = name;
ConfigLocal* config = static_cast<ConfigLocal*>(Config::instance()->getLayerByName(m_name));
ConvLayerBase * preLayer = (ConvLayerBase*)Layers::instance()->get(config->m_input);
inputs = preLayer->getOutputs();
inputAmount = preLayer->outputAmount;
outputAmount = inputAmount;
kernelSize = config->m_kernelSize;
inputDim = preLayer->outputDim;
outputDim = inputDim;
batch = Config::instance()->getBatchSize();
lambda = config->m_weightDecay;
NON_LINEARITY = config->m_nonLinearity;
localKernelSize = outputDim * outputDim;
outputs = new cuMatrix<double> (batch, outputDim * outputDim, outputAmount);
curDelta = new cuMatrix<double>(batch, outputDim * outputDim, outputAmount);
preDelta = preLayer->getCurDelta();
for(int i = 0; i < outputAmount * localKernelSize; i++){
w.push_back(new cuMatrix<double>(kernelSize, kernelSize, 1));
b.push_back(new cuMatrix<double>(1, 1, 1));
wgrad.push_back(new cuMatrix<double>(kernelSize, kernelSize, 1));
bgrad.push_back(new cuMatrix<double>(1, 1, 1));
wgradTmp.push_back(new cuMatrix<double>(batch, kernelSize * kernelSize, 1));
}
w.toGpu();
b.toGpu();
wgrad.toGpu();
bgrad.toGpu();
wgradTmp.toGpu();
for(int i = 0; i < outputAmount * localKernelSize; i++){
momentum_w.push_back(new cuMatrix<double>(kernelSize, kernelSize, 1));
momentum_b.push_back(new cuMatrix<double>(1, 1, 1));
}
momentum_w.toGpu();
momentum_b.toGpu();
this->initRandom();
Layers::instance()->set(m_name, this);
}
void LocalConnect::save(FILE* file)
{
for(int a = 0; a < w.size(); a++){
w[a]->toCpu();
b[a]->toCpu();
for(int c = 0; c < w[a]->channels; c++){
for(int i = 0; i < w[a]->rows; i++){
for(int j = 0; j < w[a]->cols; j++){
fprintf(file, "%lf ", w[a]->get(i, j, c));
}
}
}
for(int c = 0; c < b[a]->channels; c++){
for(int i = 0; i < b[a]->rows; i++){
for(int j = 0; j < b[a]->cols; j++){
fprintf(file, "%lf ", b[a]->get(i, j, c));
}
}
}
}
}
void LocalConnect::clearMomentum()
{
for(int i = 0; i < momentum_b.size(); i++){
momentum_b[i]->gpuClear();
}
for(int i = 0; i < momentum_w.size(); i++){
momentum_w[i]->gpuClear();
}
}
void LocalConnect::initRandom()
{
srand(clock());
double initW = Config::instance()->getLayerByName(m_name)->m_initW;
if(Config::instance()->getLayerByName(m_name)->isGaussian()){
for(int i = 0; i < w.size(); i++){
double epsilon = initW;
for(int c = 0; c < w[i]->channels; c++)
{
double r1 = 0.01 + 5 * (rand()) / RAND_MAX;
double r2 = 0.01 + 5 * (rand()) / RAND_MAX;
createGaussian(w[i]->getHost() + c * w[i]->getArea(), r1,r2,
kernelSize, kernelSize, w[i]->channels,
epsilon);
}
w[i]->toGpu();
}
}
else{
for(int i = 0; i < w.size(); i++){
for(int j = 0; j < w[i]->getLen(); j++){
w[i]->getHost()[j] = initW * (2.0 * rand() / RAND_MAX - 1.0);
//printf("%lf ", w[i]->hostData[j]);
}//printf("\n");
w[i]->toGpu();
}
}
}
void LocalConnect::initFromCheckpoint(FILE* file)
{
double val = 0;
for(int a = 0; a < w.size(); a++){
for(int c = 0; c < w[a]->channels; c++){
for(int i = 0; i < w[a]->rows; i++){
for(int j = 0; j < w[a]->cols; j++){
fscanf(file, "%lf", &val);
w[a]->set(i, j, c, val);
}
}
}
for(int c = 0; c < b[a]->channels; c++){
for(int i = 0; i < w[a]->rows; i++){
for(int j = 0; j < w[a]->cols; j++){
fscanf(file, "%lf", &val);
b[a]->set(i, j, c, val);
}
}
}
w[a]->toGpu();
b[a]->toGpu();
}
}
/*
*dim3 block = dim3(batch, amount);
*dim3 thread= dim3(16, min(outputDim * outputDim, 64));
*/
__global__ void g_LocalConnect_feedforward_1(
double** arrayS,
double** arrayW,
double** arrayB,
double* _output,
int inputSize,
int kernelSize,
int outputDim,
int outputArea,
int batch,
int k1Amount,
int localKernelSize)
{
extern __shared__ double image[];
int sp = blockIdx.x;
int k = blockIdx.y;
int OutputSize2 = outputDim * outputDim;
int inputSize2 = inputSize * inputSize;
int kernelSize2 = kernelSize * kernelSize;
double* curInput = arrayS[sp] + k * inputSize2;
double* curOutput = _output + outputArea * k + sp * OutputSize2;
/*load the image to shared memory*/
for(int i = 0; i < inputSize2; i += blockDim.x * blockDim.y){
int id = i + threadIdx.x + threadIdx.y * blockDim.x;
if(id < inputSize2){
image[id] = curInput[id];
}
}
__syncthreads();
int padding = kernelSize >> 1;
/*convolution*/
for(int ty = 0; ty < OutputSize2; ty += blockDim.y)
{
int tyid = ty + threadIdx.y;
if(tyid < OutputSize2)
{
int x = tyid / outputDim;
int y = tyid % outputDim;
double val = 0.0;
double* w = arrayW[k * localKernelSize + tyid];
double b = arrayB[k * localKernelSize + tyid][0];
for(int tx = 0; tx < kernelSize2; tx += blockDim.x){
int txid = tx + threadIdx.x;
if(txid < kernelSize2){
int i = txid / kernelSize;
int j = txid % kernelSize;
int xx = x + i - padding;
int yy = y + j - padding;
if(xx >= 0 && xx < inputSize && yy >= 0 && yy < inputSize)
val += image[xx * inputSize + yy] * w[i * kernelSize + j];
}
}
curOutput[tyid] = val + b;
}
}
}
/*
* function: get convolution layer and pooling output
* dim3 block = dim3(batch, amount);
* dim3 thread= dim3(8, min(outputDim * outputDim, 64));
*/
__global__ void g_LocalConnect_feedforward_2(
double* inputs,
double** arrayW,
double** arrayB,
double* _output,
int inputSize,
int kernelSize,
int outputSize,
int inputArea,
int outputArea,
int batch,
int k1Amount,
int localKernelSize)
{
extern __shared__ double image[];
int sp = blockIdx.x;
int k = blockIdx.y;
int outputSize2 = outputSize * outputSize;
int inputSize2 = inputSize * inputSize;
int kernelSize2 = kernelSize * kernelSize;
double* curInput = inputs + k * inputArea + sp * inputSize2;
double* curOutput = _output + k * outputArea + sp * outputSize2;
/*load the image to shared memory*/
for(int i = 0; i < inputSize2; i += blockDim.x * blockDim.y){
int id = i + threadIdx.x + threadIdx.y * blockDim.x;
if(id < inputSize2){
image[id] = curInput[id];
curOutput[id] = 0;
}
}
__syncthreads();
int padding = kernelSize >> 1;
/*convolution*/
for(int ty = 0; ty < outputSize2; ty += blockDim.y)
{
int tyid = ty + threadIdx.y;
if(tyid < outputSize2)
{
int x = tyid / outputSize;
int y = tyid % outputSize;
double val = 0.0;
double* w = arrayW[k * localKernelSize + tyid];
for(int tx = 0; tx < kernelSize2; tx += blockDim.x){
int txid = tx + threadIdx.x;
if(txid < kernelSize2){
int i = txid / kernelSize;
int j = txid % kernelSize;
int xx = x + i - padding;
int yy = y + j - padding;
if(xx >= 0 && xx < inputSize && yy >= 0 && yy < inputSize)
val += image[xx * inputSize + yy] * w[i * kernelSize + j];
}
}
atomicAdd(curOutput + tyid, val);
}
}
__syncthreads();
for(int i = 0; i < outputSize2; i += blockDim.y * blockDim.x)
{
int id = i + threadIdx.y * blockDim.x + threadIdx.x;
if(id < outputSize2)
{
double b = arrayB[k * localKernelSize + id][0];
curOutput[id] += b;
}
}
}
/*
* function: get convolution layer and pooling output
* dim3 block = dim3(batch, amount);
* dim3 thread= dim3(min(outputDim * outputDim, 512));
* const kernelsize = 1
*/
__global__ void g_LocalConnect_feedforward_kernelSize1_2(
double* inputs,
double** arrayW,
double** arrayB,
double* _output,
int dim,
int area,
int batch,
int k1Amount,
int localKernelSize)
{
int sp = blockIdx.x;
int k = blockIdx.y;
int outputSize2 = dim * dim;
int inputSize2 = dim * dim;
double* curInput = inputs + k * area + sp * inputSize2;
double* curOutput = _output + k * area + sp * outputSize2;
/*convolution*/
for(int ty = 0; ty < outputSize2; ty += blockDim.x)
{
int tyid = ty + threadIdx.x;
if(tyid < outputSize2)
{
int skip = k * localKernelSize + tyid;
double val = 0.0;
double w = arrayW[skip][0];
double b = arrayB[skip][0];
val = curInput[tyid] * w + b;
curOutput[tyid] = val ;
}
}
}
/*
* function: get convolution layer and pooling output
* dim3 block = dim3(batch, amount);
* dim3 thread= dim3(8, min(outputDim * outputDim, 64));
2<64, 9, 8, 8, 64>
*/
template <int OUTPUTDIM2, int THREADS>
__global__ void g_LocalConnect_feedforward_s_2(
double* inputs,
double** arrayW,
double** arrayB,
double* _output,
int inputSize,
int kernelSize,
int outputSize,
int inputArea,
int outputArea,
int batch,
int k1Amount,
int localKernelSize)
{
__shared__ double image[OUTPUTDIM2];
int sp = blockIdx.x;
int k = blockIdx.y;
__shared__ double convSum[OUTPUTDIM2][THREADS];
int outputSize2 = outputSize * outputSize;
int inputSize2 = inputSize * inputSize;
int kernelSize2 = kernelSize * kernelSize;
double* curInput = inputs + k * inputArea + sp * inputSize2;
double* curOutput = _output + k * outputArea + sp * outputSize2;
/*load the image to shared memory*/
for(int i = 0; i < inputSize2; i += blockDim.x * blockDim.y){
int id = i + threadIdx.x + threadIdx.y * blockDim.x;
if(id < inputSize2){
image[id] = curInput[id];
}
}
__syncthreads();
int padding = kernelSize >> 1;
/*convolution*/
for(int ty = 0; ty < outputSize2; ty += blockDim.y)
{
int tyid = ty + threadIdx.y;
if(tyid < outputSize2)
{
int x = tyid / outputSize;
int y = tyid % outputSize;
double val = 0.0;
double* w = arrayW[k * localKernelSize + tyid];
double* _convSum = convSum[threadIdx.y];
double b = arrayB[k * localKernelSize + tyid][0];
_convSum[threadIdx.x] = 0;
for(int tx = 0; tx < kernelSize2; tx += blockDim.x){
int txid = tx + threadIdx.x;
if(txid < kernelSize2){
int i = txid / kernelSize;
int j = txid % kernelSize;
int xx = x + i - padding;
int yy = y + j - padding;
if(xx >= 0 && xx < inputSize && yy >= 0 && yy < inputSize)
val += image[xx * inputSize + yy] * w[i * kernelSize + j];
}
}
_convSum[threadIdx.x] = val;
__syncthreads();
#pragma unroll
for(int len = THREADS; len != 1; len = (len + 1) >> 1){
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1)) _convSum[threadIdx.x] += _convSum[threadIdx.x + skip];
__syncthreads();
}
if(threadIdx.x == 0)
curOutput[tyid] = _convSum[0] + b;
}
}
}
/*
dim3 block = dim3(batch, outputAmount);
dim3 thread= min(outputDim * outputDim, 512);
*/
__global__ void g_LocalConnect_backpropagation_kernelSize1(
double* _curDelta,
double**_w,
double* _nextDelta,
int dim,
int area,
int localKernelSize)
{
int s = blockIdx.x;
int k = blockIdx.y;
int dim2 = dim * dim;
int skip = k * area + s * dim2;
double* curDelta = _curDelta + skip;
double* nxtDelta = _nextDelta + skip;
for (int tidx = 0; tidx < dim2; tidx += blockDim.x) {
int idx = tidx + threadIdx.x;
if (idx < dim2) {
double val = 0.0;
double w = _w[k * localKernelSize + idx][0];
val = curDelta[idx] * w;
nxtDelta[idx] = val;
}
}
}
/*
dim3 block = dim3(batch, outputAmount);
dim3 thread= min(outputDim * outputDim, 512);
*/
__global__ void g_LocalConnect_backpropagation (
double* _convDelta,
double**_w,
double* _poolDelta,
int _convOutputSize,
int _poolOutputSize,
int _kernelAmount1,
int _kernelAmount2,
int _kernelSize,
int _convDeltaArea,
int _poolDeltaArea,
int localKernelSize)
{
int curSize = _convOutputSize;
int wSize = _kernelSize;
int nxtSize = _poolOutputSize;
int s = blockIdx.x;
int k = blockIdx.y;
int curSize2 = curSize * curSize;
int nxtSize2 = nxtSize * nxtSize;
double* curDelta = _convDelta + k * _convDeltaArea + s * curSize2;
double* nxtDelta = _poolDelta + k * _poolDeltaArea + s * nxtSize2;
int half = wSize >> 1;
for (int tidx = 0; tidx < nxtSize2; tidx += blockDim.x) {
int idx = tidx + threadIdx.x;
if (idx < nxtSize2) {
int i = idx / nxtSize;
int j = idx % nxtSize;
double val = 0.0;
for (int x = 0; x < wSize; x++) {
for (int y = 0; y < wSize; y++) {
int cx = i + (half - x);
int cy = j + (half - y);
int wx = x;
int wy = y;
if(cx >= 0 && cx < curSize && cy >= 0 && cy < curSize){
double* w = _w[k * localKernelSize + cx * curSize + cy];
val += curDelta[cx * curSize + cy] * w[wx * wSize + wy];
}
}
}
nxtDelta[idx] = val;
}
}
}
/*
* dim3 block = dim3(batch, outputAmount);
* dim3 thread= dim3(min(outputDim * outputDim, 512));
*/
__global__ void g_LocalConnect_wgrad_kernelSize1(
double* _inputs,
double* _curDelta,
double** _wgradTmp,
int dim,
int area,
int batch,
double lambda)
{
int b = blockIdx.x;
int k = blockIdx.y;
int dim2 = dim * dim;
int skip = k * area + b * dim2;
double* input = _inputs + skip;
double* curDelta = _curDelta + skip;
for(int y = 0; y < dim2; y += blockDim.x){
int yid = y + threadIdx.x;
if(yid < dim2){
skip = k * dim2 + yid;
double val = input[yid] * curDelta[yid];
//_wgradTmp[skip][0] = val / batch + lambda * _w[skip][0];
_wgradTmp[skip][0] = val;
}
}
}
/*
*dim3 block = dim3(batch, outputAmount);
*dim3 thread= min(9, min(outputDim * outputDim, 64));
*/
__global__ void g_LocalConnect_wgrad(
double* _inputs,
double* _curDelta,
double** _wgradTmp,
/*double** _w,*/
int inputDim,
int curDeltaDim,
int kernelSize,
int inputAmount,
int outputAmount,
int inputArea,
int curDeltaAea,
int batch,
double lambda)
{
int sp = blockIdx.x;
int k = blockIdx.y;
extern __shared__ double image[];
int inputSize2 = inputDim * inputDim;
int curDeltaSize2 = curDeltaDim * curDeltaDim;
int kernelSize2 = kernelSize * kernelSize;
double* input = _inputs + k * inputArea + sp * inputSize2;
/*load the image to shared memory*/
for(int i = 0; i < inputSize2; i += blockDim.x * blockDim.y){
int id = i + threadIdx.x + threadIdx.y * blockDim.x;
if(id < inputSize2){
image[id] = input[id];
}
}
__syncthreads();
double* curDelta = _curDelta + k * curDeltaAea + sp * curDeltaSize2;
int half = (kernelSize >> 1);
for(int y = 0; y < curDeltaSize2; y += blockDim.y){
int yid = y + threadIdx.y;
if(yid < curDeltaSize2){
int ox = yid / curDeltaDim;
int oy = yid % curDeltaDim;
double* wgrad = _wgradTmp[k * curDeltaSize2 + yid] + sp * kernelSize2;
double delta = curDelta[yid];
for(int x = 0; x < kernelSize2; x+= blockDim.x){
int xid = x + threadIdx.x;
if(xid < kernelSize2){
int i = xid / kernelSize;
int j = xid % kernelSize;
int rox = ox + i - half;
int roy = oy + j - half;
if(rox >= 0 && rox < inputDim && roy >=0 && roy < inputDim){
double val = image[rox * inputDim + roy] * delta;
wgrad[xid] = val;
}else{
wgrad[xid] = 0;
}
}
}
}
}
}
/*
*block = dim3(localKernelSize, amount)
*thread= dim3(batch)
*/
__global__ void g_LocalConnect_Bgrad(double* _delta,
double** bgrad,
int deltaSize,
int kernelAmount2,
int batch,
int deltaArea,
int localKernelSize)
{
extern __shared__ double _sum[];
int local = blockIdx.x;
int k = blockIdx.y;
int sp = threadIdx.x;
int deltaSize2 = deltaSize * deltaSize;
double delta = _delta[k * deltaArea + sp * deltaSize2 + local];
_sum[sp] = delta;
__syncthreads();
int len = batch;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
{
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
if(threadIdx.x == 0)
{
bgrad[k * localKernelSize + local][0] = _sum[0] / batch;
}
}
/*
* block = dim3(outputAmount, kernelSize * kernelSize);
* thread= dim3(batch);
*/
__global__ void g_LocalConnect_wgrad_Add(
double** _WgradTmp,
double** Wgrad,
double** w,
int kernelSize,
int batch,
double lambda,
int wgradTmpArea,
int wgradArea,
int wArea)
{
extern __shared__ double _sum[];
int ok = blockIdx.x;
int kid = blockIdx.y;
int tid = threadIdx.x;
_sum[threadIdx.x] = 0;
__syncthreads();
int tlen = batch;
double* wgradTmp = _WgradTmp[ok];
int kernelSize2 = kernelSize * kernelSize;
for(int i = 0; i < tlen; i += blockDim.x)
{
int b = i + threadIdx.x;
if(b < tlen)
{
_sum[threadIdx.x] += wgradTmp[b * kernelSize2 + kid];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(tid < (len >> 1))
{
_sum[tid] += _sum[tid + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
if(tid == 0)
{
Wgrad[ok][kid] = _sum[0] / batch + w[ok][kid] * lambda;
}
}
| d6775b0b6a927698f5ca9ce84b97252292104980.cu | #include "LocalConnect.h"
#include "../common/cuBase.h"
#include "../common/Config.h"
/*
dim3 block = dim3(batch, outputAmount);
dim3 thread= min(outputDim * outputDim, 512);
*/
__global__ void g_LocalConnect_backpropagation_kernelSize1(
double* _curDelta,
double**_w,
double* _nextDelta,
int dim,
int area,
int localKernelSize);
/*
* block = dim3(outputAmount, kernelSize * kernelSize);
* thread= dim3(batch);
*/
__global__ void g_LocalConnect_wgrad_Add(
double** _WgradTmp,
double** Wgrad,
double** w,
int kernelSize,
int batch,
double lambda,
int wgradTmpArea,
int wgradArea,
int wArea);
/*
dim3 block = dim3(batch, outputAmount);
dim3 thread= min(16, min(outputDim * outputDim, 64));
*/
__global__ void g_LocalConnect_wgrad_kernelSize1(
double* _inputs,
double* _curDelta,
double** _wgradTmp,
/*double** _w,*/
int dim,
int area,
int batch,
double lambda);
/*
*dim3 block = dim3(batch, amount);
*dim3 thread= dim3(16, min(outputDim * outputDim, 64));
*/
__global__ void g_LocalConnect_feedforward_1(
double** arrayS,
double** arrayW,
double** arrayB,
double* _output,
int inputSize,
int kernelSize,
int outputDim,
int outputArea,
int batch,
int k1Amount,
int localKernelSize);
template <int OUTPUTDIM2, int THREADS>
__global__ void g_LocalConnect_feedforward_s_2(
double* inputs,
double** arrayW,
double** arrayB,
double* _output,
int inputSize,
int kernelSize,
int outputSize,
int inputArea,
int outputArea,
int batch,
int k1Amount,
int localKernelSize);
/*
* function: get convolution layer and pooling output
* dim3 block = dim3(batch, amount);
* dim3 thread= dim3(min(outputDim * outputDim, 512));
* const kernelsize = 1
*/
__global__ void g_LocalConnect_feedforward_kernelSize1_2(
double* inputs,
double** arrayW,
double** arrayB,
double* _output,
int dim,
int area,
int batch,
int k1Amount,
int localKernelSize);
/*
dim3 block = dim3(batch, outputAmount);
dim3 thread= min(outputDim * outputDim, 512);
*/
__global__ void g_LocalConnect_backpropagation(
double* _convDelta,
double**_w,
double* _poolDelta,
int _convOutputSize,
int _poolOutputSize,
int _kernelAmount1,
int _kernelAmount2,
int _kernelSize,
int _convDeltaArea,
int _poolDeltaArea,
int localKernelSize);
/*
*function: get convolution layer and pooling output
*dim3 block = dim3(batch, amount);
*dim3 thread= dim3(min(outputDim * outputDim, 256));
*/
__global__ void g_LocalConnect_feedforward_2(
double* inputs,
double** arrayW,
double** arrayB,
double* _output,
int inputSize,
int kernelSize,
int outputSize,
int inputArea,
int outputArea,
int batch,
int k1Amount,
int localKernelSize);
/*
* blocks : dim3(batch, cuKernelScan[cl] * localKernelSize, Config::instance()->getChannels()),
* threads : dim3(threadidx)
*/
__global__ void g_LocalConnect_wgrad(
double* _inputs,
double* _curDelta,
double** _wgrad,
int inputDim,
int curDeltaDim,
int kernelSize,
int inputAmount,
int outputAmount,
int inputArea,
int curDeltaAea,
int batch,
double lambda);
/*
* blocks : dim3(batch, cuKernelScan[cl], Config::instance()->getChannels()),
* threads : dim3(threadidx)
*/
__global__ void g_LocalConnect_wgrad_1(double** sArray,
double* convDelta,
double* WgradTmp,
int imgSize,
int convOutputSize,
int kernelAmount2,
int kernelSize,
int sArrayArea,
int convDeltaArea,
int wgrapTmpArea,
int localKernelSize);
/*
*block = dim3(localKernelSize, amount);
*thread= dim3(batch);
*
*/
__global__ void g_LocalConnect_Bgrad(double* delta,
double** bgrad,
int deltaSize,
int kernelAmount2,
int batch,
int deltaArea,
int localKernelSize);
void LocalConnect::calCost()
{
cost->gpuClear();
g_getCost_3<<<dim3(w.size()), dim3(32), sizeof(double) * 32>>>(cost->getDev(),
w.m_devPoint,
lambda,
w[0]->getLen());
cudaDeviceSynchronize();
getLastCudaError("LocalConnect:getCost");
}
void LocalConnect::feedforward()
{
if((kernelSize == 3 || kernelSize == 5) && inputDim >= 4 && inputDim <= 8){
dim3 block = dim3(batch, outputAmount);
const int threads = 8;
dim3 thread= dim3(threads, outputDim * outputDim);
if(outputDim == 4){
g_LocalConnect_feedforward_s_2<16, threads><<<block, thread>>>(inputs->getDev(), w.m_devPoint, b.m_devPoint, outputs->getDev(), inputDim,
kernelSize, outputDim, inputs->getArea(), outputs->getArea(), batch, outputAmount, localKernelSize);
}else if(outputDim == 5){
g_LocalConnect_feedforward_s_2<25, threads><<<block, thread>>>(inputs->getDev(), w.m_devPoint, b.m_devPoint, outputs->getDev(), inputDim,
kernelSize, outputDim, inputs->getArea(), outputs->getArea(), batch, outputAmount, localKernelSize);
}else if(outputDim == 6){
g_LocalConnect_feedforward_s_2<36, threads><<<block, thread>>>(inputs->getDev(), w.m_devPoint, b.m_devPoint, outputs->getDev(), inputDim,
kernelSize, outputDim, inputs->getArea(), outputs->getArea(), batch, outputAmount, localKernelSize);
}else if(outputDim == 7){
g_LocalConnect_feedforward_s_2<49, threads><<<block, thread>>>(inputs->getDev(), w.m_devPoint, b.m_devPoint, outputs->getDev(), inputDim,
kernelSize, outputDim, inputs->getArea(), outputs->getArea(), batch, outputAmount, localKernelSize);
}else if(outputDim == 8){
g_LocalConnect_feedforward_s_2<64, threads><<<block, thread>>>(inputs->getDev(), w.m_devPoint, b.m_devPoint, outputs->getDev(), inputDim,
kernelSize, outputDim, inputs->getArea(), outputs->getArea(), batch, outputAmount, localKernelSize);
}
checkCudaErrors(cudaDeviceSynchronize());
getLastCudaError("LocalConnect:g_LocalConnect_feedforward_s_2");
}
else if(kernelSize == 1){
dim3 block = dim3(batch, outputAmount);
dim3 thread= dim3(min(outputDim * outputDim, 512));
g_LocalConnect_feedforward_kernelSize1_2<<<block, thread>>>(
inputs->getDev(),
w.m_devPoint,
b.m_devPoint,
outputs->getDev(),
inputDim,
inputs->getArea(),
batch,
outputAmount,
localKernelSize);
checkCudaErrors(cudaDeviceSynchronize());
getLastCudaError("LocalConnect:g_LocalConnect_feedforward_kernelSize1_2");
}
else {
dim3 block = dim3(batch, outputAmount);
dim3 thread= dim3(8, min(outputDim * outputDim, 64));
g_LocalConnect_feedforward_2<<<block, thread,
sizeof(double) * outputDim * outputDim>>>
(inputs->getDev(),
w.m_devPoint,
b.m_devPoint,
outputs->getDev(),
inputDim,
kernelSize,
outputDim,
inputs->getArea(),
outputs->getArea(),
batch,
outputAmount,
localKernelSize);
checkCudaErrors(cudaDeviceSynchronize());
getLastCudaError("LocalConnect:g_LocalConnect_feedforward_2");
}
if(NON_LINEARITY >= 0){
dim3 thread = dim3(min(256, outputs->getLen()));
dim3 block = dim3(min(256, (outputs->getLen() + thread.x - 1) / thread.x));
g_nonLinearity<<<block, thread>>>(
outputs->getDev(),
outputs->getLen(),
NON_LINEARITY);
checkCudaErrors(cudaDeviceSynchronize());
getLastCudaError("LocalConnect::g_nonLinearity");
}
}
void LocalConnect::backpropagation()
{
if(NON_LINEARITY >= 0){
dim3 thread = dim3(min(256, outputs->getLen()));
dim3 block = dim3(min(256, (outputs->getLen() + thread.x - 1) / thread.x));
g_dnonLinearity<<<block, thread>>>(curDelta->getDev(),
outputs->getDev(), curDelta->getLen(), NON_LINEARITY);
checkCudaErrors(cudaDeviceSynchronize());
getLastCudaError("LocalConnect::g_dnonLinearity");
}
if(inputs){
dim3 block = dim3(batch, outputAmount);
dim3 thread= dim3(min(outputDim * outputDim, 512));
preDelta->gpuClear();
if(kernelSize == 1){
g_LocalConnect_backpropagation_kernelSize1<<<block, thread>>>(
curDelta->getDev(),
w.m_devPoint,
preDelta->getDev(),
outputDim,
curDelta->getArea(),
localKernelSize);
checkCudaErrors(cudaDeviceSynchronize());
getLastCudaError("LocalConnect::g_LocalConnect_backpropagation_kernelSize1");
}else{
g_LocalConnect_backpropagation<<<block, thread>>>(
curDelta->getDev(),
w.m_devPoint,
preDelta->getDev(),
outputDim,
inputDim,
inputAmount,
outputAmount,
kernelSize,
curDelta->getArea(),
preDelta->getArea(),
localKernelSize);
checkCudaErrors(cudaDeviceSynchronize());
getLastCudaError("LocalConnect::g_LocalConnect_backpropagation");
}
}
}
void LocalConnect::getGrad()
{
if(kernelSize == 1){
dim3 block = dim3(batch, outputAmount);
dim3 thread= dim3(min(outputDim * outputDim, 512));
g_LocalConnect_wgrad_kernelSize1<<<block, thread, sizeof(double) * batch>>>(
inputs->getDev(),
curDelta->getDev(),
wgradTmp.m_devPoint,
inputDim,
inputs->getArea(),
batch,
lambda);
checkCudaErrors(cudaDeviceSynchronize());
getLastCudaError("g_LocalConnect_wgrad_kernelSize1");
block = dim3(outputAmount, kernelSize * kernelSize);
thread = dim3(batch);
}
else{
dim3 block = dim3(batch, outputAmount);
dim3 thread= min(9, min(outputDim * outputDim, 64));
g_LocalConnect_wgrad<<<block, thread, sizeof(double) * inputDim * inputDim>>>(
inputs->getDev(),
curDelta->getDev(),
wgradTmp.m_devPoint,
inputDim,
outputDim,
kernelSize,
inputAmount,
outputAmount,
inputs->getArea(),
curDelta->getArea(),
batch,
lambda);
checkCudaErrors(cudaDeviceSynchronize());
getLastCudaError("g_LocalConnect_wgrad");
}
dim3 block = dim3(outputAmount * localKernelSize, kernelSize * kernelSize);
dim3 thread = dim3(batch);
g_LocalConnect_wgrad_Add<<<block, thread, sizeof(double) * batch>>>(
wgradTmp.m_devPoint,
wgrad.m_devPoint,
w.m_devPoint,
kernelSize,
batch,
lambda,
wgradTmp[0]->getArea(),
wgrad[0]->getArea(),
w[0]->getArea());
checkCudaErrors(cudaDeviceSynchronize());
getLastCudaError("g_LocalConnect_wgrad_Add");
block = dim3(localKernelSize, outputAmount);
thread= dim3(batch);
g_LocalConnect_Bgrad<<<block,thread,sizeof(double) * batch>>>
(curDelta->getDev(),
bgrad.m_devPoint,
outputDim,
outputAmount,
batch,
curDelta->getArea(),
localKernelSize);
checkCudaErrors(cudaDeviceSynchronize());
getLastCudaError("LocalConnect::getGrad::g_LocalConnect_Bgrad");
}
void LocalConnect::updateWeight()
{
dim3 thread = min(256, w[0]->getLen());
dim3 block = momentum_w.size();
g_vecAdd<<<block, thread>>>(momentum_w.m_devPoint, wgrad.m_devPoint, w.m_devPoint,
momentum_b.m_devPoint, bgrad.m_devPoint, b.m_devPoint,
w[0]->getLen(), b[0]->getLen(),
Config::instance()->getMomentum(),
Config::instance()->getLrate());
}
LocalConnect::LocalConnect(std::string name)
{
m_name = name;
ConfigLocal* config = static_cast<ConfigLocal*>(Config::instance()->getLayerByName(m_name));
ConvLayerBase * preLayer = (ConvLayerBase*)Layers::instance()->get(config->m_input);
inputs = preLayer->getOutputs();
inputAmount = preLayer->outputAmount;
outputAmount = inputAmount;
kernelSize = config->m_kernelSize;
inputDim = preLayer->outputDim;
outputDim = inputDim;
batch = Config::instance()->getBatchSize();
lambda = config->m_weightDecay;
NON_LINEARITY = config->m_nonLinearity;
localKernelSize = outputDim * outputDim;
outputs = new cuMatrix<double> (batch, outputDim * outputDim, outputAmount);
curDelta = new cuMatrix<double>(batch, outputDim * outputDim, outputAmount);
preDelta = preLayer->getCurDelta();
for(int i = 0; i < outputAmount * localKernelSize; i++){
w.push_back(new cuMatrix<double>(kernelSize, kernelSize, 1));
b.push_back(new cuMatrix<double>(1, 1, 1));
wgrad.push_back(new cuMatrix<double>(kernelSize, kernelSize, 1));
bgrad.push_back(new cuMatrix<double>(1, 1, 1));
wgradTmp.push_back(new cuMatrix<double>(batch, kernelSize * kernelSize, 1));
}
w.toGpu();
b.toGpu();
wgrad.toGpu();
bgrad.toGpu();
wgradTmp.toGpu();
for(int i = 0; i < outputAmount * localKernelSize; i++){
momentum_w.push_back(new cuMatrix<double>(kernelSize, kernelSize, 1));
momentum_b.push_back(new cuMatrix<double>(1, 1, 1));
}
momentum_w.toGpu();
momentum_b.toGpu();
this->initRandom();
Layers::instance()->set(m_name, this);
}
void LocalConnect::save(FILE* file)
{
for(int a = 0; a < w.size(); a++){
w[a]->toCpu();
b[a]->toCpu();
for(int c = 0; c < w[a]->channels; c++){
for(int i = 0; i < w[a]->rows; i++){
for(int j = 0; j < w[a]->cols; j++){
fprintf(file, "%lf ", w[a]->get(i, j, c));
}
}
}
for(int c = 0; c < b[a]->channels; c++){
for(int i = 0; i < b[a]->rows; i++){
for(int j = 0; j < b[a]->cols; j++){
fprintf(file, "%lf ", b[a]->get(i, j, c));
}
}
}
}
}
void LocalConnect::clearMomentum()
{
for(int i = 0; i < momentum_b.size(); i++){
momentum_b[i]->gpuClear();
}
for(int i = 0; i < momentum_w.size(); i++){
momentum_w[i]->gpuClear();
}
}
void LocalConnect::initRandom()
{
srand(clock());
double initW = Config::instance()->getLayerByName(m_name)->m_initW;
if(Config::instance()->getLayerByName(m_name)->isGaussian()){
for(int i = 0; i < w.size(); i++){
double epsilon = initW;
for(int c = 0; c < w[i]->channels; c++)
{
double r1 = 0.01 + 5 * (rand()) / RAND_MAX;
double r2 = 0.01 + 5 * (rand()) / RAND_MAX;
createGaussian(w[i]->getHost() + c * w[i]->getArea(), r1,r2,
kernelSize, kernelSize, w[i]->channels,
epsilon);
}
w[i]->toGpu();
}
}
else{
for(int i = 0; i < w.size(); i++){
for(int j = 0; j < w[i]->getLen(); j++){
w[i]->getHost()[j] = initW * (2.0 * rand() / RAND_MAX - 1.0);
//printf("%lf ", w[i]->hostData[j]);
}//printf("\n");
w[i]->toGpu();
}
}
}
void LocalConnect::initFromCheckpoint(FILE* file)
{
double val = 0;
for(int a = 0; a < w.size(); a++){
for(int c = 0; c < w[a]->channels; c++){
for(int i = 0; i < w[a]->rows; i++){
for(int j = 0; j < w[a]->cols; j++){
fscanf(file, "%lf", &val);
w[a]->set(i, j, c, val);
}
}
}
for(int c = 0; c < b[a]->channels; c++){
for(int i = 0; i < w[a]->rows; i++){
for(int j = 0; j < w[a]->cols; j++){
fscanf(file, "%lf", &val);
b[a]->set(i, j, c, val);
}
}
}
w[a]->toGpu();
b[a]->toGpu();
}
}
/*
*dim3 block = dim3(batch, amount);
*dim3 thread= dim3(16, min(outputDim * outputDim, 64));
*/
__global__ void g_LocalConnect_feedforward_1(
double** arrayS,
double** arrayW,
double** arrayB,
double* _output,
int inputSize,
int kernelSize,
int outputDim,
int outputArea,
int batch,
int k1Amount,
int localKernelSize)
{
extern __shared__ double image[];
int sp = blockIdx.x;
int k = blockIdx.y;
int OutputSize2 = outputDim * outputDim;
int inputSize2 = inputSize * inputSize;
int kernelSize2 = kernelSize * kernelSize;
double* curInput = arrayS[sp] + k * inputSize2;
double* curOutput = _output + outputArea * k + sp * OutputSize2;
/*load the image to shared memory*/
for(int i = 0; i < inputSize2; i += blockDim.x * blockDim.y){
int id = i + threadIdx.x + threadIdx.y * blockDim.x;
if(id < inputSize2){
image[id] = curInput[id];
}
}
__syncthreads();
int padding = kernelSize >> 1;
/*convolution*/
for(int ty = 0; ty < OutputSize2; ty += blockDim.y)
{
int tyid = ty + threadIdx.y;
if(tyid < OutputSize2)
{
int x = tyid / outputDim;
int y = tyid % outputDim;
double val = 0.0;
double* w = arrayW[k * localKernelSize + tyid];
double b = arrayB[k * localKernelSize + tyid][0];
for(int tx = 0; tx < kernelSize2; tx += blockDim.x){
int txid = tx + threadIdx.x;
if(txid < kernelSize2){
int i = txid / kernelSize;
int j = txid % kernelSize;
int xx = x + i - padding;
int yy = y + j - padding;
if(xx >= 0 && xx < inputSize && yy >= 0 && yy < inputSize)
val += image[xx * inputSize + yy] * w[i * kernelSize + j];
}
}
curOutput[tyid] = val + b;
}
}
}
/*
* function: get convolution layer and pooling output
* dim3 block = dim3(batch, amount);
* dim3 thread= dim3(8, min(outputDim * outputDim, 64));
*/
__global__ void g_LocalConnect_feedforward_2(
double* inputs,
double** arrayW,
double** arrayB,
double* _output,
int inputSize,
int kernelSize,
int outputSize,
int inputArea,
int outputArea,
int batch,
int k1Amount,
int localKernelSize)
{
extern __shared__ double image[];
int sp = blockIdx.x;
int k = blockIdx.y;
int outputSize2 = outputSize * outputSize;
int inputSize2 = inputSize * inputSize;
int kernelSize2 = kernelSize * kernelSize;
double* curInput = inputs + k * inputArea + sp * inputSize2;
double* curOutput = _output + k * outputArea + sp * outputSize2;
/*load the image to shared memory*/
for(int i = 0; i < inputSize2; i += blockDim.x * blockDim.y){
int id = i + threadIdx.x + threadIdx.y * blockDim.x;
if(id < inputSize2){
image[id] = curInput[id];
curOutput[id] = 0;
}
}
__syncthreads();
int padding = kernelSize >> 1;
/*convolution*/
for(int ty = 0; ty < outputSize2; ty += blockDim.y)
{
int tyid = ty + threadIdx.y;
if(tyid < outputSize2)
{
int x = tyid / outputSize;
int y = tyid % outputSize;
double val = 0.0;
double* w = arrayW[k * localKernelSize + tyid];
for(int tx = 0; tx < kernelSize2; tx += blockDim.x){
int txid = tx + threadIdx.x;
if(txid < kernelSize2){
int i = txid / kernelSize;
int j = txid % kernelSize;
int xx = x + i - padding;
int yy = y + j - padding;
if(xx >= 0 && xx < inputSize && yy >= 0 && yy < inputSize)
val += image[xx * inputSize + yy] * w[i * kernelSize + j];
}
}
atomicAdd(curOutput + tyid, val);
}
}
__syncthreads();
for(int i = 0; i < outputSize2; i += blockDim.y * blockDim.x)
{
int id = i + threadIdx.y * blockDim.x + threadIdx.x;
if(id < outputSize2)
{
double b = arrayB[k * localKernelSize + id][0];
curOutput[id] += b;
}
}
}
/*
* function: get convolution layer and pooling output
* dim3 block = dim3(batch, amount);
* dim3 thread= dim3(min(outputDim * outputDim, 512));
* const kernelsize = 1
*/
__global__ void g_LocalConnect_feedforward_kernelSize1_2(
double* inputs,
double** arrayW,
double** arrayB,
double* _output,
int dim,
int area,
int batch,
int k1Amount,
int localKernelSize)
{
int sp = blockIdx.x;
int k = blockIdx.y;
int outputSize2 = dim * dim;
int inputSize2 = dim * dim;
double* curInput = inputs + k * area + sp * inputSize2;
double* curOutput = _output + k * area + sp * outputSize2;
/*convolution*/
for(int ty = 0; ty < outputSize2; ty += blockDim.x)
{
int tyid = ty + threadIdx.x;
if(tyid < outputSize2)
{
int skip = k * localKernelSize + tyid;
double val = 0.0;
double w = arrayW[skip][0];
double b = arrayB[skip][0];
val = curInput[tyid] * w + b;
curOutput[tyid] = val ;
}
}
}
/*
* function: get convolution layer and pooling output
* dim3 block = dim3(batch, amount);
* dim3 thread= dim3(8, min(outputDim * outputDim, 64));
2<64, 9, 8, 8, 64>
*/
template <int OUTPUTDIM2, int THREADS>
__global__ void g_LocalConnect_feedforward_s_2(
double* inputs,
double** arrayW,
double** arrayB,
double* _output,
int inputSize,
int kernelSize,
int outputSize,
int inputArea,
int outputArea,
int batch,
int k1Amount,
int localKernelSize)
{
__shared__ double image[OUTPUTDIM2];
int sp = blockIdx.x;
int k = blockIdx.y;
__shared__ double convSum[OUTPUTDIM2][THREADS];
int outputSize2 = outputSize * outputSize;
int inputSize2 = inputSize * inputSize;
int kernelSize2 = kernelSize * kernelSize;
double* curInput = inputs + k * inputArea + sp * inputSize2;
double* curOutput = _output + k * outputArea + sp * outputSize2;
/*load the image to shared memory*/
for(int i = 0; i < inputSize2; i += blockDim.x * blockDim.y){
int id = i + threadIdx.x + threadIdx.y * blockDim.x;
if(id < inputSize2){
image[id] = curInput[id];
}
}
__syncthreads();
int padding = kernelSize >> 1;
/*convolution*/
for(int ty = 0; ty < outputSize2; ty += blockDim.y)
{
int tyid = ty + threadIdx.y;
if(tyid < outputSize2)
{
int x = tyid / outputSize;
int y = tyid % outputSize;
double val = 0.0;
double* w = arrayW[k * localKernelSize + tyid];
double* _convSum = convSum[threadIdx.y];
double b = arrayB[k * localKernelSize + tyid][0];
_convSum[threadIdx.x] = 0;
for(int tx = 0; tx < kernelSize2; tx += blockDim.x){
int txid = tx + threadIdx.x;
if(txid < kernelSize2){
int i = txid / kernelSize;
int j = txid % kernelSize;
int xx = x + i - padding;
int yy = y + j - padding;
if(xx >= 0 && xx < inputSize && yy >= 0 && yy < inputSize)
val += image[xx * inputSize + yy] * w[i * kernelSize + j];
}
}
_convSum[threadIdx.x] = val;
__syncthreads();
#pragma unroll
for(int len = THREADS; len != 1; len = (len + 1) >> 1){
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1)) _convSum[threadIdx.x] += _convSum[threadIdx.x + skip];
__syncthreads();
}
if(threadIdx.x == 0)
curOutput[tyid] = _convSum[0] + b;
}
}
}
/*
dim3 block = dim3(batch, outputAmount);
dim3 thread= min(outputDim * outputDim, 512);
*/
__global__ void g_LocalConnect_backpropagation_kernelSize1(
double* _curDelta,
double**_w,
double* _nextDelta,
int dim,
int area,
int localKernelSize)
{
int s = blockIdx.x;
int k = blockIdx.y;
int dim2 = dim * dim;
int skip = k * area + s * dim2;
double* curDelta = _curDelta + skip;
double* nxtDelta = _nextDelta + skip;
for (int tidx = 0; tidx < dim2; tidx += blockDim.x) {
int idx = tidx + threadIdx.x;
if (idx < dim2) {
double val = 0.0;
double w = _w[k * localKernelSize + idx][0];
val = curDelta[idx] * w;
nxtDelta[idx] = val;
}
}
}
/*
dim3 block = dim3(batch, outputAmount);
dim3 thread= min(outputDim * outputDim, 512);
*/
__global__ void g_LocalConnect_backpropagation (
double* _convDelta,
double**_w,
double* _poolDelta,
int _convOutputSize,
int _poolOutputSize,
int _kernelAmount1,
int _kernelAmount2,
int _kernelSize,
int _convDeltaArea,
int _poolDeltaArea,
int localKernelSize)
{
int curSize = _convOutputSize;
int wSize = _kernelSize;
int nxtSize = _poolOutputSize;
int s = blockIdx.x;
int k = blockIdx.y;
int curSize2 = curSize * curSize;
int nxtSize2 = nxtSize * nxtSize;
double* curDelta = _convDelta + k * _convDeltaArea + s * curSize2;
double* nxtDelta = _poolDelta + k * _poolDeltaArea + s * nxtSize2;
int half = wSize >> 1;
for (int tidx = 0; tidx < nxtSize2; tidx += blockDim.x) {
int idx = tidx + threadIdx.x;
if (idx < nxtSize2) {
int i = idx / nxtSize;
int j = idx % nxtSize;
double val = 0.0;
for (int x = 0; x < wSize; x++) {
for (int y = 0; y < wSize; y++) {
int cx = i + (half - x);
int cy = j + (half - y);
int wx = x;
int wy = y;
if(cx >= 0 && cx < curSize && cy >= 0 && cy < curSize){
double* w = _w[k * localKernelSize + cx * curSize + cy];
val += curDelta[cx * curSize + cy] * w[wx * wSize + wy];
}
}
}
nxtDelta[idx] = val;
}
}
}
/*
* dim3 block = dim3(batch, outputAmount);
* dim3 thread= dim3(min(outputDim * outputDim, 512));
*/
__global__ void g_LocalConnect_wgrad_kernelSize1(
double* _inputs,
double* _curDelta,
double** _wgradTmp,
int dim,
int area,
int batch,
double lambda)
{
int b = blockIdx.x;
int k = blockIdx.y;
int dim2 = dim * dim;
int skip = k * area + b * dim2;
double* input = _inputs + skip;
double* curDelta = _curDelta + skip;
for(int y = 0; y < dim2; y += blockDim.x){
int yid = y + threadIdx.x;
if(yid < dim2){
skip = k * dim2 + yid;
double val = input[yid] * curDelta[yid];
//_wgradTmp[skip][0] = val / batch + lambda * _w[skip][0];
_wgradTmp[skip][0] = val;
}
}
}
/*
*dim3 block = dim3(batch, outputAmount);
*dim3 thread= min(9, min(outputDim * outputDim, 64));
*/
__global__ void g_LocalConnect_wgrad(
double* _inputs,
double* _curDelta,
double** _wgradTmp,
/*double** _w,*/
int inputDim,
int curDeltaDim,
int kernelSize,
int inputAmount,
int outputAmount,
int inputArea,
int curDeltaAea,
int batch,
double lambda)
{
int sp = blockIdx.x;
int k = blockIdx.y;
extern __shared__ double image[];
int inputSize2 = inputDim * inputDim;
int curDeltaSize2 = curDeltaDim * curDeltaDim;
int kernelSize2 = kernelSize * kernelSize;
double* input = _inputs + k * inputArea + sp * inputSize2;
/*load the image to shared memory*/
for(int i = 0; i < inputSize2; i += blockDim.x * blockDim.y){
int id = i + threadIdx.x + threadIdx.y * blockDim.x;
if(id < inputSize2){
image[id] = input[id];
}
}
__syncthreads();
double* curDelta = _curDelta + k * curDeltaAea + sp * curDeltaSize2;
int half = (kernelSize >> 1);
for(int y = 0; y < curDeltaSize2; y += blockDim.y){
int yid = y + threadIdx.y;
if(yid < curDeltaSize2){
int ox = yid / curDeltaDim;
int oy = yid % curDeltaDim;
double* wgrad = _wgradTmp[k * curDeltaSize2 + yid] + sp * kernelSize2;
double delta = curDelta[yid];
for(int x = 0; x < kernelSize2; x+= blockDim.x){
int xid = x + threadIdx.x;
if(xid < kernelSize2){
int i = xid / kernelSize;
int j = xid % kernelSize;
int rox = ox + i - half;
int roy = oy + j - half;
if(rox >= 0 && rox < inputDim && roy >=0 && roy < inputDim){
double val = image[rox * inputDim + roy] * delta;
wgrad[xid] = val;
}else{
wgrad[xid] = 0;
}
}
}
}
}
}
/*
*block = dim3(localKernelSize, amount)
*thread= dim3(batch)
*/
__global__ void g_LocalConnect_Bgrad(double* _delta,
double** bgrad,
int deltaSize,
int kernelAmount2,
int batch,
int deltaArea,
int localKernelSize)
{
extern __shared__ double _sum[];
int local = blockIdx.x;
int k = blockIdx.y;
int sp = threadIdx.x;
int deltaSize2 = deltaSize * deltaSize;
double delta = _delta[k * deltaArea + sp * deltaSize2 + local];
_sum[sp] = delta;
__syncthreads();
int len = batch;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
{
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
if(threadIdx.x == 0)
{
bgrad[k * localKernelSize + local][0] = _sum[0] / batch;
}
}
/*
* block = dim3(outputAmount, kernelSize * kernelSize);
* thread= dim3(batch);
*/
__global__ void g_LocalConnect_wgrad_Add(
double** _WgradTmp,
double** Wgrad,
double** w,
int kernelSize,
int batch,
double lambda,
int wgradTmpArea,
int wgradArea,
int wArea)
{
extern __shared__ double _sum[];
int ok = blockIdx.x;
int kid = blockIdx.y;
int tid = threadIdx.x;
_sum[threadIdx.x] = 0;
__syncthreads();
int tlen = batch;
double* wgradTmp = _WgradTmp[ok];
int kernelSize2 = kernelSize * kernelSize;
for(int i = 0; i < tlen; i += blockDim.x)
{
int b = i + threadIdx.x;
if(b < tlen)
{
_sum[threadIdx.x] += wgradTmp[b * kernelSize2 + kid];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1)
{
__syncthreads();
int skip = (len + 1) >> 1;
if(tid < (len >> 1))
{
_sum[tid] += _sum[tid + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
if(tid == 0)
{
Wgrad[ok][kid] = _sum[0] / batch + w[ok][kid] * lambda;
}
}
|
c7801994b24343c69deefb057f535c1dc2dc2098.hip | // !!! This is a file automatically generated by hipify!!!
//
// Cardiff University | Computer Science
// Module: CM3203 One Semester Project (40 Credits)
// Title: Parallelisation of Matrix Exponentials in C++/CUDA for Quantum Control
// Date: 2016
//
// Author: Peter Davison
// Supervisor: Dr. Frank C Langbein
// Moderator: Dr. Irena Spasic
//
// Include header file
#include "HIPTimer.cuh"
void CUDATimer::start() {
clear();
hipEventCreate(&t1);
hipEventCreate(&t2);
hipEventRecord(t1, 0);
}
void CUDATimer::stop() {
hipEventRecord(t2, 0);
hipEventSynchronize(t2);
hipEventElapsedTime(&time, t1, t2);
hipEventDestroy(t1);
hipEventDestroy(t2);
}
void CUDATimer::clear() {
time = 0;
}
float CUDATimer::getTime() {
return time;
}
std::ostream& operator<<(std::ostream& oStream, CUDATimer& t) {
oStream << std::setprecision(10) << std::fixed << t.getTime()/1000 << "s" << std::endl;
return oStream;
} | c7801994b24343c69deefb057f535c1dc2dc2098.cu | //
// Cardiff University | Computer Science
// Module: CM3203 One Semester Project (40 Credits)
// Title: Parallelisation of Matrix Exponentials in C++/CUDA for Quantum Control
// Date: 2016
//
// Author: Peter Davison
// Supervisor: Dr. Frank C Langbein
// Moderator: Dr. Irena Spasic
//
// Include header file
#include "CUDATimer.cuh"
void CUDATimer::start() {
clear();
cudaEventCreate(&t1);
cudaEventCreate(&t2);
cudaEventRecord(t1, 0);
}
void CUDATimer::stop() {
cudaEventRecord(t2, 0);
cudaEventSynchronize(t2);
cudaEventElapsedTime(&time, t1, t2);
cudaEventDestroy(t1);
cudaEventDestroy(t2);
}
void CUDATimer::clear() {
time = 0;
}
float CUDATimer::getTime() {
return time;
}
std::ostream& operator<<(std::ostream& oStream, CUDATimer& t) {
oStream << std::setprecision(10) << std::fixed << t.getTime()/1000 << "s" << std::endl;
return oStream;
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.