hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
2a1ee00965694328c04cc78a403304caf4d17246.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/temporal_max_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void temporal_max_forward(const int nthreads, const Dtype* bottom,
const int num, const int count, int* max_idx, Dtype* top) {
CUDA_KERNEL_LOOP(index, nthreads) {
max_idx[index] = 0;
top[index] = bottom[index];
for (int n = 1; n < num; ++n) {
int bottom_idx = n * count + index;
if (bottom[bottom_idx] > top[index]) {
top[index] = bottom[n * count + index];
max_idx[index] = n;
}
}
}
}
template <typename Dtype>
void TemporalMaxLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const int nthreads = bottom[0]->count(1);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( temporal_max_forward<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
nthreads, bottom[0]->gpu_data(), bottom[0]->shape(0), nthreads,
max_idx_.mutable_gpu_data(), top[0]->mutable_gpu_data());
}
template <typename Dtype>
__global__ void temporal_max_backward(
const int nthreads, Dtype* bottom, const int num, const int count,
const int* max_idx, const Dtype* top) {
CUDA_KERNEL_LOOP(index, nthreads) {
int bottom_index = max_idx[index] * count + index;
bottom[bottom_index] = top[index];
}
}
template <typename Dtype>
void TemporalMaxLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const int nthreads = bottom[0]->count(1);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( temporal_max_backward<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
nthreads, bottom[0]->mutable_gpu_diff(), bottom[0]->shape(0), nthreads,
max_idx_.gpu_data(), top[0]->gpu_diff());
}
INSTANTIATE_LAYER_GPU_FUNCS(TemporalMaxLayer);
} // namespace caffe
| 2a1ee00965694328c04cc78a403304caf4d17246.cu | #include <vector>
#include "caffe/layers/temporal_max_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void temporal_max_forward(const int nthreads, const Dtype* bottom,
const int num, const int count, int* max_idx, Dtype* top) {
CUDA_KERNEL_LOOP(index, nthreads) {
max_idx[index] = 0;
top[index] = bottom[index];
for (int n = 1; n < num; ++n) {
int bottom_idx = n * count + index;
if (bottom[bottom_idx] > top[index]) {
top[index] = bottom[n * count + index];
max_idx[index] = n;
}
}
}
}
template <typename Dtype>
void TemporalMaxLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const int nthreads = bottom[0]->count(1);
// NOLINT_NEXT_LINE(whitespace/operators)
temporal_max_forward<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(
nthreads, bottom[0]->gpu_data(), bottom[0]->shape(0), nthreads,
max_idx_.mutable_gpu_data(), top[0]->mutable_gpu_data());
}
template <typename Dtype>
__global__ void temporal_max_backward(
const int nthreads, Dtype* bottom, const int num, const int count,
const int* max_idx, const Dtype* top) {
CUDA_KERNEL_LOOP(index, nthreads) {
int bottom_index = max_idx[index] * count + index;
bottom[bottom_index] = top[index];
}
}
template <typename Dtype>
void TemporalMaxLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const int nthreads = bottom[0]->count(1);
// NOLINT_NEXT_LINE(whitespace/operators)
temporal_max_backward<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(
nthreads, bottom[0]->mutable_gpu_diff(), bottom[0]->shape(0), nthreads,
max_idx_.gpu_data(), top[0]->gpu_diff());
}
INSTANTIATE_LAYER_GPU_FUNCS(TemporalMaxLayer);
} // namespace caffe
|
b3791797033156bc5a609852baf7e02d03389aac.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
__global__
void saxpy(int n, float *x, float *y, float *c)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n){
c[i] = x[i] + y[i];
}
}
__host__
int main(void)
{
int N = 300;
float *x, *y, *c, *d_x, *d_y, *d_c;
x = (float*)malloc(N*sizeof(float));
y = (float*)malloc(N*sizeof(float));
c = (float*)malloc(N*sizeof(float));
hipMalloc( (void**) &d_x, N*sizeof(float));
hipMalloc( (void**) &d_y, N*sizeof(float));
hipMalloc( (void**) &d_c, N*sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
hipMemcpy(d_x, x, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_y, y, N*sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( saxpy), dim3(1), dim3(N), 0, 0, N, d_x, d_y, d_c);
hipMemcpy(c, d_c, N*sizeof(float), hipMemcpyDeviceToHost);
// float maxError = 0.0f;
for (int i = 0; i < N; i++){
printf("%f - ", c[i]);
}
// maxError = max(maxError, abs(y[i]-4.0f));
// printf("Max error: %f\n", maxError);
}
| b3791797033156bc5a609852baf7e02d03389aac.cu | #include <stdio.h>
#include <math.h>
__global__
void saxpy(int n, float *x, float *y, float *c)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n){
c[i] = x[i] + y[i];
}
}
__host__
int main(void)
{
int N = 300;
float *x, *y, *c, *d_x, *d_y, *d_c;
x = (float*)malloc(N*sizeof(float));
y = (float*)malloc(N*sizeof(float));
c = (float*)malloc(N*sizeof(float));
cudaMalloc( (void**) &d_x, N*sizeof(float));
cudaMalloc( (void**) &d_y, N*sizeof(float));
cudaMalloc( (void**) &d_c, N*sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
cudaMemcpy(d_x, x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, N*sizeof(float), cudaMemcpyHostToDevice);
saxpy<<<1, N>>>(N, d_x, d_y, d_c);
cudaMemcpy(c, d_c, N*sizeof(float), cudaMemcpyDeviceToHost);
// float maxError = 0.0f;
for (int i = 0; i < N; i++){
printf("%f - ", c[i]);
}
// maxError = max(maxError, abs(y[i]-4.0f));
// printf("Max error: %f\n", maxError);
}
|
dfd71f4118b8ac12ceec7b044039aceadc6a0259.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C" {
#ifndef REAL
#define REAL float
#endif
#ifndef CAST
#define CAST(fun) fun ## f
#endif
#ifndef REAL2o3
#define REAL2o3 (REAL)0.6666666666666667
#endif
#ifndef REAL3o2
#define REAL3o2 (REAL)1.5
#endif
}
__global__ void uplo_floor (const int sd, const int unit, const int bottom, const REAL* a, const int offset_a, const int ld_a, REAL* b, const int offset_b, const int ld_b) {
const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x;
const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y;
const bool valid = (gid_0 < sd) && (gid_1 < sd);
const bool check = valid &&
((unit == 132) ? bottom * gid_0 > bottom * gid_1 : bottom * gid_0 >= bottom * gid_1);
if (check) {
b[offset_b + gid_0 + gid_1 * ld_b] = CAST(floor)(a[offset_a + gid_0 + gid_1 * ld_a]);
}
} | dfd71f4118b8ac12ceec7b044039aceadc6a0259.cu | #include "includes.h"
extern "C" {
#ifndef REAL
#define REAL float
#endif
#ifndef CAST
#define CAST(fun) fun ## f
#endif
#ifndef REAL2o3
#define REAL2o3 (REAL)0.6666666666666667
#endif
#ifndef REAL3o2
#define REAL3o2 (REAL)1.5
#endif
}
__global__ void uplo_floor (const int sd, const int unit, const int bottom, const REAL* a, const int offset_a, const int ld_a, REAL* b, const int offset_b, const int ld_b) {
const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x;
const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y;
const bool valid = (gid_0 < sd) && (gid_1 < sd);
const bool check = valid &&
((unit == 132) ? bottom * gid_0 > bottom * gid_1 : bottom * gid_0 >= bottom * gid_1);
if (check) {
b[offset_b + gid_0 + gid_1 * ld_b] = CAST(floor)(a[offset_a + gid_0 + gid_1 * ld_a]);
}
} |
e623761008c02b12e75689b3ac3432e3993ce5a0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Modified from https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/cuda/GridSampler.cu
#include <ATen/ATen.h>
#include "grid_sampler_cuda.cuh"
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/detail/TensorInfo.cuh>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/hip/detail/KernelUtils.h>
#include <c10/macros/Macros.h>
namespace mmdetection {
using namespace at::cuda::detail;
using mmdetection::detail::GridSamplerInterpolation;
using mmdetection::detail::GridSamplerPadding;
namespace {
template <typename scalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void grid_sampler_2d_forward_kernel_cuda(
const int nthreads,
TensorInfo<scalar_t, int> input,
TensorInfo<scalar_t, int> grid,
TensorInfo<scalar_t, int> output,
const GridSamplerInterpolation interpolation_mode,
const GridSamplerPadding padding_mode,
bool align_corners) {
int C = input.sizes[1];
int inp_H = input.sizes[2];
int inp_W = input.sizes[3];
int out_H = grid.sizes[1];
int out_W = grid.sizes[2];
int inp_sN = input.strides[0];
int inp_sC = input.strides[1];
int inp_sH = input.strides[2];
int inp_sW = input.strides[3];
int grid_sN = grid.strides[0];
int grid_sH = grid.strides[1];
int grid_sW = grid.strides[2];
int grid_sCoor = grid.strides[3];
int out_sN = output.strides[0];
int out_sC = output.strides[1];
int out_sH = output.strides[2];
int out_sW = output.strides[3];
CUDA_KERNEL_LOOP(index, nthreads) {
const int w = index % out_W;
const int h = (index / out_W) % out_H;
const int n = index / (out_H * out_W);
const int grid_offset = n * grid_sN + h * grid_sH + w * grid_sW;
// get the corresponding input x, y co-ordinates from grid
scalar_t ix = grid.data[grid_offset];
scalar_t iy = grid.data[grid_offset + grid_sCoor];
ix = grid_sampler_compute_source_index(ix, inp_W, padding_mode, align_corners);
iy = grid_sampler_compute_source_index(iy, inp_H, padding_mode, align_corners);
if (interpolation_mode == GridSamplerInterpolation::Bilinear) {
// get NE, NW, SE, SW pixel values from (x, y)
int ix_nw = static_cast<int>(::floor(ix));
int iy_nw = static_cast<int>(::floor(iy));
int ix_ne = ix_nw + 1;
int iy_ne = iy_nw;
int ix_sw = ix_nw;
int iy_sw = iy_nw + 1;
int ix_se = ix_nw + 1;
int iy_se = iy_nw + 1;
// get surfaces to each neighbor:
scalar_t nw = (ix_se - ix) * (iy_se - iy);
scalar_t ne = (ix - ix_sw) * (iy_sw - iy);
scalar_t sw = (ix_ne - ix) * (iy - iy_ne);
scalar_t se = (ix - ix_nw) * (iy - iy_nw);
// calculate bilinear weighted pixel value and set output pixel
auto inp_ptr_NC = input.data + n * inp_sN;
auto out_ptr_NCHW = output.data + n * out_sN + h * out_sH + w * out_sW;
for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCHW += out_sC) {
*out_ptr_NCHW = static_cast<scalar_t>(0);
if (within_bounds_2d(iy_nw, ix_nw, inp_H, inp_W)) {
*out_ptr_NCHW += inp_ptr_NC[iy_nw * inp_sH + ix_nw * inp_sW] * nw;
}
if (within_bounds_2d(iy_ne, ix_ne, inp_H, inp_W)) {
*out_ptr_NCHW += inp_ptr_NC[iy_ne * inp_sH + ix_ne * inp_sW] * ne;
}
if (within_bounds_2d(iy_sw, ix_sw, inp_H, inp_W)) {
*out_ptr_NCHW += inp_ptr_NC[iy_sw * inp_sH + ix_sw * inp_sW] * sw;
}
if (within_bounds_2d(iy_se, ix_se, inp_H, inp_W)) {
*out_ptr_NCHW += inp_ptr_NC[iy_se * inp_sH + ix_se * inp_sW] * se;
}
}
} else if (interpolation_mode == GridSamplerInterpolation::Nearest) {
int ix_nearest = static_cast<int>(::round(ix));
int iy_nearest = static_cast<int>(::round(iy));
// assign nearest neighor pixel value to output pixel
auto inp_ptr_NC = input.data + n * inp_sN;
auto out_ptr_NCHW = output.data + n * out_sN + h * out_sH + w * out_sW;
for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCHW += out_sC) {
if (within_bounds_2d(iy_nearest, ix_nearest, inp_H, inp_W)) {
*out_ptr_NCHW = inp_ptr_NC[iy_nearest * inp_sH + ix_nearest * inp_sW];
} else {
*out_ptr_NCHW = static_cast<scalar_t>(0);
}
}
}
}
}
template <typename scalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void grid_sampler_3d_forward_kernel_cuda(
const int nthreads,
TensorInfo<scalar_t, int> input,
TensorInfo<scalar_t, int> grid,
TensorInfo<scalar_t, int> output,
const GridSamplerInterpolation interpolation_mode,
const GridSamplerPadding padding_mode,
bool align_corners) {
int C = input.sizes[1];
int inp_D = input.sizes[2];
int inp_H = input.sizes[3];
int inp_W = input.sizes[4];
int out_D = grid.sizes[1];
int out_H = grid.sizes[2];
int out_W = grid.sizes[3];
int inp_sN = input.strides[0];
int inp_sC = input.strides[1];
int inp_sD = input.strides[2];
int inp_sH = input.strides[3];
int inp_sW = input.strides[4];
int grid_sN = grid.strides[0];
int grid_sD = grid.strides[1];
int grid_sH = grid.strides[2];
int grid_sW = grid.strides[3];
int grid_sCoor = grid.strides[4];
int out_sN = output.strides[0];
int out_sC = output.strides[1];
int out_sD = output.strides[2];
int out_sH = output.strides[3];
int out_sW = output.strides[4];
CUDA_KERNEL_LOOP(index, nthreads) {
const int w = index % out_W;
const int h = (index / out_W) % out_H;
const int d = (index / (out_H * out_W)) % out_D;
const int n = index / (out_D * out_H * out_W);
const int grid_offset = n * grid_sN + d * grid_sD + h * grid_sH + w * grid_sW;
// get the corresponding input x, y, z co-ordinates from grid
scalar_t ix = grid.data[grid_offset];
scalar_t iy = grid.data[grid_offset + grid_sCoor];
scalar_t iz = grid.data[grid_offset + 2 * grid_sCoor];
ix = grid_sampler_compute_source_index(ix, inp_W, padding_mode, align_corners);
iy = grid_sampler_compute_source_index(iy, inp_H, padding_mode, align_corners);
iz = grid_sampler_compute_source_index(iz, inp_D, padding_mode, align_corners);
if (interpolation_mode == GridSamplerInterpolation::Bilinear) {
// get corner pixel values from (x, y, z)
// for 4d, we used north-east-south-west
// for 5d, we add top-bottom
int ix_tnw = static_cast<int>(::floor(ix));
int iy_tnw = static_cast<int>(::floor(iy));
int iz_tnw = static_cast<int>(::floor(iz));
int ix_tne = ix_tnw + 1;
int iy_tne = iy_tnw;
int iz_tne = iz_tnw;
int ix_tsw = ix_tnw;
int iy_tsw = iy_tnw + 1;
int iz_tsw = iz_tnw;
int ix_tse = ix_tnw + 1;
int iy_tse = iy_tnw + 1;
int iz_tse = iz_tnw;
int ix_bnw = ix_tnw;
int iy_bnw = iy_tnw;
int iz_bnw = iz_tnw + 1;
int ix_bne = ix_tnw + 1;
int iy_bne = iy_tnw;
int iz_bne = iz_tnw + 1;
int ix_bsw = ix_tnw;
int iy_bsw = iy_tnw + 1;
int iz_bsw = iz_tnw + 1;
int ix_bse = ix_tnw + 1;
int iy_bse = iy_tnw + 1;
int iz_bse = iz_tnw + 1;
// get surfaces to each neighbor:
scalar_t tnw = (ix_bse - ix) * (iy_bse - iy) * (iz_bse - iz);
scalar_t tne = (ix - ix_bsw) * (iy_bsw - iy) * (iz_bsw - iz);
scalar_t tsw = (ix_bne - ix) * (iy - iy_bne) * (iz_bne - iz);
scalar_t tse = (ix - ix_bnw) * (iy - iy_bnw) * (iz_bnw - iz);
scalar_t bnw = (ix_tse - ix) * (iy_tse - iy) * (iz - iz_tse);
scalar_t bne = (ix - ix_tsw) * (iy_tsw - iy) * (iz - iz_tsw);
scalar_t bsw = (ix_tne - ix) * (iy - iy_tne) * (iz - iz_tne);
scalar_t bse = (ix - ix_tnw) * (iy - iy_tnw) * (iz - iz_tnw);
auto inp_ptr_NC = input.data + n * inp_sN;
auto out_ptr_NCDHW = output.data + n * out_sN + d * out_sD + h * out_sH + w * out_sW;
for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCDHW += out_sC) {
// (c, iz_tnw, iy_tnw, ix_tnw) * tnw + (c, iz_tne, iy_tne, ix_tne) * tne
// + (c, iz_tsw, iy_tsw, ix_tsw) * tsw + (c, iz_tse, iy_tse, ix_tse) * tse
// + (c, iz_bnw, iy_bnw, ix_bnw) * bnw + (c, iz_bne, iy_bne, ix_bne) * bne
// + (c, iz_bsw, iy_bsw, ix_bsw) * bsw + (c, iz_bse, iy_bse, ix_bse) * bse
*out_ptr_NCDHW = static_cast<scalar_t>(0);
if (within_bounds_3d(iz_tnw, iy_tnw, ix_tnw, inp_D, inp_H, inp_W)) {
*out_ptr_NCDHW += inp_ptr_NC[iz_tnw * inp_sD + iy_tnw * inp_sH + ix_tnw * inp_sW] * tnw;
}
if (within_bounds_3d(iz_tne, iy_tne, ix_tne, inp_D, inp_H, inp_W)) {
*out_ptr_NCDHW += inp_ptr_NC[iz_tne * inp_sD + iy_tne * inp_sH + ix_tne * inp_sW] * tne;
}
if (within_bounds_3d(iz_tsw, iy_tsw, ix_tsw, inp_D, inp_H, inp_W)) {
*out_ptr_NCDHW += inp_ptr_NC[iz_tsw * inp_sD + iy_tsw * inp_sH + ix_tsw * inp_sW] * tsw;
}
if (within_bounds_3d(iz_tse, iy_tse, ix_tse, inp_D, inp_H, inp_W)) {
*out_ptr_NCDHW += inp_ptr_NC[iz_tse * inp_sD + iy_tse * inp_sH + ix_tse * inp_sW] * tse;
}
if (within_bounds_3d(iz_bnw, iy_bnw, ix_bnw, inp_D, inp_H, inp_W)) {
*out_ptr_NCDHW += inp_ptr_NC[iz_bnw * inp_sD + iy_bnw * inp_sH + ix_bnw * inp_sW] * bnw;
}
if (within_bounds_3d(iz_bne, iy_bne, ix_bne, inp_D, inp_H, inp_W)) {
*out_ptr_NCDHW += inp_ptr_NC[iz_bne * inp_sD + iy_bne * inp_sH + ix_bne * inp_sW] * bne;
}
if (within_bounds_3d(iz_bsw, iy_bsw, ix_bsw, inp_D, inp_H, inp_W)) {
*out_ptr_NCDHW += inp_ptr_NC[iz_bsw * inp_sD + iy_bsw * inp_sH + ix_bsw * inp_sW] * bsw;
}
if (within_bounds_3d(iz_bse, iy_bse, ix_bse, inp_D, inp_H, inp_W)) {
*out_ptr_NCDHW += inp_ptr_NC[iz_bse * inp_sD + iy_bse * inp_sH + ix_bse * inp_sW] * bse;
}
}
} else if (interpolation_mode == GridSamplerInterpolation::Nearest) {
int ix_nearest = static_cast<int>(::round(ix));
int iy_nearest = static_cast<int>(::round(iy));
int iz_nearest = static_cast<int>(::round(iz));
// assign nearest neighor pixel value to output pixel
auto inp_ptr_NC = input.data + n * inp_sN;
auto out_ptr_NCDHW = output.data + n * out_sN + d * out_sD + h * out_sH + w * out_sW;
for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCDHW += out_sC) {
if (within_bounds_3d(iz_nearest, iy_nearest, ix_nearest, inp_D, inp_H, inp_W)) {
*out_ptr_NCDHW = inp_ptr_NC[iz_nearest * inp_sD + iy_nearest * inp_sH + ix_nearest * inp_sW];
} else {
*out_ptr_NCDHW = static_cast<scalar_t>(0);
}
}
}
}
}
template <typename scalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void grid_sampler_2d_backward_kernel_cuda(
const int nthreads,
TensorInfo<scalar_t, int> grad_output,
TensorInfo<scalar_t, int> input,
TensorInfo<scalar_t, int> grid,
TensorInfo<scalar_t, int> grad_input, // initialized to zeros
TensorInfo<scalar_t, int> grad_grid, // initialized to empty
const GridSamplerInterpolation interpolation_mode,
const GridSamplerPadding padding_mode,
bool align_corners) {
int C = input.sizes[1];
int inp_H = input.sizes[2];
int inp_W = input.sizes[3];
int out_H = grid.sizes[1];
int out_W = grid.sizes[2];
int inp_sN = input.strides[0];
int inp_sC = input.strides[1];
int inp_sH = input.strides[2];
int inp_sW = input.strides[3];
int grid_sN = grid.strides[0];
int grid_sH = grid.strides[1];
int grid_sW = grid.strides[2];
int grid_sCoor = grid.strides[3];
int gOut_sN = grad_output.strides[0];
int gOut_sC = grad_output.strides[1];
int gOut_sH = grad_output.strides[2];
int gOut_sW = grad_output.strides[3];
int gInp_sN = grad_input.strides[0];
int gInp_sC = grad_input.strides[1];
int gInp_sH = grad_input.strides[2];
int gInp_sW = grad_input.strides[3];
int gGrid_sW = grad_grid.strides[2];
CUDA_KERNEL_LOOP(index, nthreads) {
const int w = index % out_W;
const int h = (index / out_W) % out_H;
const int n = index / (out_H * out_W);
const int grid_offset = n * grid_sN + h * grid_sH + w * grid_sW;
// get the corresponding input x, y co-ordinates from grid
scalar_t ix = grid.data[grid_offset];
scalar_t iy = grid.data[grid_offset + grid_sCoor];
// multipliers for gradients on ix and iy
scalar_t gix_mult, giy_mult;
ix = grid_sampler_compute_source_index_set_grad(ix, inp_W, padding_mode, align_corners, &gix_mult);
iy = grid_sampler_compute_source_index_set_grad(iy, inp_H, padding_mode, align_corners, &giy_mult);
if (interpolation_mode == GridSamplerInterpolation::Bilinear) {
// get NE, NW, SE, SW pixel values from (x, y)
int ix_nw = static_cast<int>(::floor(ix));
int iy_nw = static_cast<int>(::floor(iy));
int ix_ne = ix_nw + 1;
int iy_ne = iy_nw;
int ix_sw = ix_nw;
int iy_sw = iy_nw + 1;
int ix_se = ix_nw + 1;
int iy_se = iy_nw + 1;
// get surfaces to each neighbor:
scalar_t nw = (ix_se - ix) * (iy_se - iy);
scalar_t ne = (ix - ix_sw) * (iy_sw - iy);
scalar_t sw = (ix_ne - ix) * (iy - iy_ne);
scalar_t se = (ix - ix_nw) * (iy - iy_nw);
scalar_t gix = static_cast<scalar_t>(0), giy = static_cast<scalar_t>(0);
scalar_t *gOut_ptr_NCHW = grad_output.data + n * gOut_sN + h * gOut_sH + w * gOut_sW;
scalar_t *gInp_ptr_NC = grad_input.data + n * gInp_sN;
scalar_t *inp_ptr_NC = input.data + n * inp_sN;
for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, gInp_ptr_NC += gInp_sC, gOut_ptr_NCHW += gOut_sC) {
scalar_t gOut = *gOut_ptr_NCHW;
// calculate and set grad_input
safe_add_2d(gInp_ptr_NC, iy_nw, ix_nw, gInp_sH, gInp_sW, inp_H, inp_W, nw * gOut);
safe_add_2d(gInp_ptr_NC, iy_ne, ix_ne, gInp_sH, gInp_sW, inp_H, inp_W, ne * gOut);
safe_add_2d(gInp_ptr_NC, iy_sw, ix_sw, gInp_sH, gInp_sW, inp_H, inp_W, sw * gOut);
safe_add_2d(gInp_ptr_NC, iy_se, ix_se, gInp_sH, gInp_sW, inp_H, inp_W, se * gOut);
// calculate grad_grid
if (within_bounds_2d(iy_nw, ix_nw, inp_H, inp_W)) {
scalar_t nw_val = inp_ptr_NC[iy_nw * inp_sH + ix_nw * inp_sW];
gix -= nw_val * (iy_se - iy) * gOut;
giy -= nw_val * (ix_se - ix) * gOut;
}
if (within_bounds_2d(iy_ne, ix_ne, inp_H, inp_W)) {
scalar_t ne_val = inp_ptr_NC[iy_ne * inp_sH + ix_ne * inp_sW];
gix += ne_val * (iy_sw - iy) * gOut;
giy -= ne_val * (ix - ix_sw) * gOut;
}
if (within_bounds_2d(iy_sw, ix_sw, inp_H, inp_W)) {
scalar_t sw_val = inp_ptr_NC[iy_sw * inp_sH + ix_sw * inp_sW];
gix -= sw_val * (iy - iy_ne) * gOut;
giy += sw_val * (ix_ne - ix) * gOut;
}
if (within_bounds_2d(iy_se, ix_se, inp_H, inp_W)) {
scalar_t se_val = inp_ptr_NC[iy_se * inp_sH + ix_se * inp_sW];
gix += se_val * (iy - iy_nw) * gOut;
giy += se_val * (ix - ix_nw) * gOut;
}
}
// assuming grad_grid is contiguous
// thus we can
// 1. use index with gGrid_sW to directly compute gGrid_ptr_NHW
// 2. directly assign to gGrid_ptr_NHW[0], gGrid_ptr_NHW[1]
scalar_t *gGrid_ptr_NHW = grad_grid.data + index * gGrid_sW;
gGrid_ptr_NHW[0] = gix_mult * gix;
gGrid_ptr_NHW[1] = giy_mult * giy;
} else if (interpolation_mode == GridSamplerInterpolation::Nearest) {
int ix_nearest = static_cast<int>(::round(ix));
int iy_nearest = static_cast<int>(::round(iy));
// assign nearest neighor pixel value to output pixel
scalar_t *gOut_ptr_NCHW = grad_output.data + n * gOut_sN + h * gOut_sH + w * gOut_sW;
scalar_t *gInp_ptr_NC = grad_input.data + n * gInp_sN;
for (int c = 0; c < C; ++c, gInp_ptr_NC += gInp_sC, gOut_ptr_NCHW += gOut_sC) {
// calculate and set grad_input
safe_add_2d(gInp_ptr_NC, iy_nearest, ix_nearest, gInp_sH, gInp_sW, inp_H, inp_W, *gOut_ptr_NCHW);
}
// assuming grad_grid is contiguous
// thus we can
// 1. use index with gGrid_sW to directly compute gGrid_ptr_NHW
// 2. directly assign to gGrid_ptr_NHW[0], gGrid_ptr_NHW[1]
scalar_t *gGrid_ptr_NHW = grad_grid.data + index * gGrid_sW;
gGrid_ptr_NHW[0] = static_cast<scalar_t>(0);
gGrid_ptr_NHW[1] = static_cast<scalar_t>(0);
}
}
}
template <typename scalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void grid_sampler_3d_backward_kernel_cuda(
const int nthreads,
TensorInfo<scalar_t, int> grad_output,
TensorInfo<scalar_t, int> input,
TensorInfo<scalar_t, int> grid,
TensorInfo<scalar_t, int> grad_input, // initialized to zeros
TensorInfo<scalar_t, int> grad_grid, // initialized to empty
const GridSamplerInterpolation interpolation_mode,
const GridSamplerPadding padding_mode,
bool align_corners) {
int C = input.sizes[1];
int inp_D = input.sizes[2];
int inp_H = input.sizes[3];
int inp_W = input.sizes[4];
int out_D = grid.sizes[1];
int out_H = grid.sizes[2];
int out_W = grid.sizes[3];
int inp_sN = input.strides[0];
int inp_sC = input.strides[1];
int inp_sD = input.strides[2];
int inp_sH = input.strides[3];
int inp_sW = input.strides[4];
int grid_sN = grid.strides[0];
int grid_sD = grid.strides[1];
int grid_sH = grid.strides[2];
int grid_sW = grid.strides[3];
int grid_sCoor = grid.strides[4];
int gOut_sN = grad_output.strides[0];
int gOut_sC = grad_output.strides[1];
int gOut_sD = grad_output.strides[2];
int gOut_sH = grad_output.strides[3];
int gOut_sW = grad_output.strides[4];
int gInp_sN = grad_input.strides[0];
int gInp_sC = grad_input.strides[1];
int gInp_sD = grad_input.strides[2];
int gInp_sH = grad_input.strides[3];
int gInp_sW = grad_input.strides[4];
int gGrid_sW = grad_grid.strides[3];
CUDA_KERNEL_LOOP(index, nthreads) {
const int w = index % out_W;
const int h = (index / out_W) % out_H;
const int d = (index / (out_H * out_W)) % out_D;
const int n = index / (out_D * out_H * out_W);
const int grid_offset = n * grid_sN + d * grid_sD + h * grid_sH + w * grid_sW;
// get the corresponding input x, y, z co-ordinates from grid
scalar_t ix = grid.data[grid_offset];
scalar_t iy = grid.data[grid_offset + grid_sCoor];
scalar_t iz = grid.data[grid_offset + 2 * grid_sCoor];
// multipliers for gradients on ix, iy, and iz
scalar_t gix_mult, giy_mult, giz_mult;
ix = grid_sampler_compute_source_index_set_grad(ix, inp_W, padding_mode, align_corners, &gix_mult);
iy = grid_sampler_compute_source_index_set_grad(iy, inp_H, padding_mode, align_corners, &giy_mult);
iz = grid_sampler_compute_source_index_set_grad(iz, inp_D, padding_mode, align_corners, &giz_mult);
if (interpolation_mode == GridSamplerInterpolation::Bilinear) {
// get corner pixel values from (x, y, z)
// for 4d, we used north-east-south-west
// for 5d, we add top-bottom
int ix_tnw = static_cast<int>(::floor(ix));
int iy_tnw = static_cast<int>(::floor(iy));
int iz_tnw = static_cast<int>(::floor(iz));
int ix_tne = ix_tnw + 1;
int iy_tne = iy_tnw;
int iz_tne = iz_tnw;
int ix_tsw = ix_tnw;
int iy_tsw = iy_tnw + 1;
int iz_tsw = iz_tnw;
int ix_tse = ix_tnw + 1;
int iy_tse = iy_tnw + 1;
int iz_tse = iz_tnw;
int ix_bnw = ix_tnw;
int iy_bnw = iy_tnw;
int iz_bnw = iz_tnw + 1;
int ix_bne = ix_tnw + 1;
int iy_bne = iy_tnw;
int iz_bne = iz_tnw + 1;
int ix_bsw = ix_tnw;
int iy_bsw = iy_tnw + 1;
int iz_bsw = iz_tnw + 1;
int ix_bse = ix_tnw + 1;
int iy_bse = iy_tnw + 1;
int iz_bse = iz_tnw + 1;
// get surfaces to each neighbor:
scalar_t tnw = (ix_bse - ix) * (iy_bse - iy) * (iz_bse - iz);
scalar_t tne = (ix - ix_bsw) * (iy_bsw - iy) * (iz_bsw - iz);
scalar_t tsw = (ix_bne - ix) * (iy - iy_bne) * (iz_bne - iz);
scalar_t tse = (ix - ix_bnw) * (iy - iy_bnw) * (iz_bnw - iz);
scalar_t bnw = (ix_tse - ix) * (iy_tse - iy) * (iz - iz_tse);
scalar_t bne = (ix - ix_tsw) * (iy_tsw - iy) * (iz - iz_tsw);
scalar_t bsw = (ix_tne - ix) * (iy - iy_tne) * (iz - iz_tne);
scalar_t bse = (ix - ix_tnw) * (iy - iy_tnw) * (iz - iz_tnw);
scalar_t gix = static_cast<scalar_t>(0), giy = static_cast<scalar_t>(0), giz = static_cast<scalar_t>(0);
scalar_t *gOut_ptr_NCDHW = grad_output.data + n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW;
scalar_t *gInp_ptr_NC = grad_input.data + n * gInp_sN;
scalar_t *inp_ptr_NC = input.data + n * inp_sN;
// calculate bilinear weighted pixel value and set output pixel
for (int c = 0; c < C; ++c, gOut_ptr_NCDHW += gOut_sC, gInp_ptr_NC += gInp_sC, inp_ptr_NC += inp_sC) {
scalar_t gOut = *gOut_ptr_NCDHW;
// calculate and set grad_input
safe_add_3d(gInp_ptr_NC, iz_tnw, iy_tnw, ix_tnw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tnw * gOut);
safe_add_3d(gInp_ptr_NC, iz_tne, iy_tne, ix_tne, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tne * gOut);
safe_add_3d(gInp_ptr_NC, iz_tsw, iy_tsw, ix_tsw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tsw * gOut);
safe_add_3d(gInp_ptr_NC, iz_tse, iy_tse, ix_tse, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tse * gOut);
safe_add_3d(gInp_ptr_NC, iz_bnw, iy_bnw, ix_bnw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bnw * gOut);
safe_add_3d(gInp_ptr_NC, iz_bne, iy_bne, ix_bne, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bne * gOut);
safe_add_3d(gInp_ptr_NC, iz_bsw, iy_bsw, ix_bsw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bsw * gOut);
safe_add_3d(gInp_ptr_NC, iz_bse, iy_bse, ix_bse, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bse * gOut);
// calculate grad_grid
if (within_bounds_3d(iz_tnw, iy_tnw, ix_tnw, inp_D, inp_H, inp_W)) {
scalar_t tnw_val = inp_ptr_NC[iz_tnw * inp_sD + iy_tnw * inp_sH + ix_tnw * inp_sW];
gix -= tnw_val * (iy_bse - iy) * (iz_bse - iz) * gOut;
giy -= tnw_val * (ix_bse - ix) * (iz_bse - iz) * gOut;
giz -= tnw_val * (ix_bse - ix) * (iy_bse - iy) * gOut;
}
if (within_bounds_3d(iz_tne, iy_tne, ix_tne, inp_D, inp_H, inp_W)) {
scalar_t tne_val = inp_ptr_NC[iz_tne * inp_sD + iy_tne * inp_sH + ix_tne * inp_sW];
gix += tne_val * (iy_bsw - iy) * (iz_bsw - iz) * gOut;
giy -= tne_val * (ix - ix_bsw) * (iz_bsw - iz) * gOut;
giz -= tne_val * (ix - ix_bsw) * (iy_bsw - iy) * gOut;
}
if (within_bounds_3d(iz_tsw, iy_tsw, ix_tsw, inp_D, inp_H, inp_W)) {
scalar_t tsw_val = inp_ptr_NC[iz_tsw * inp_sD + iy_tsw * inp_sH + ix_tsw * inp_sW];
gix -= tsw_val * (iy - iy_bne) * (iz_bne - iz) * gOut;
giy += tsw_val * (ix_bne - ix) * (iz_bne - iz) * gOut;
giz -= tsw_val * (ix_bne - ix) * (iy - iy_bne) * gOut;
}
if (within_bounds_3d(iz_tse, iy_tse, ix_tse, inp_D, inp_H, inp_W)) {
scalar_t tse_val = inp_ptr_NC[iz_tse * inp_sD + iy_tse * inp_sH + ix_tse * inp_sW];
gix += tse_val * (iy - iy_bnw) * (iz_bnw - iz) * gOut;
giy += tse_val * (ix - ix_bnw) * (iz_bnw - iz) * gOut;
giz -= tse_val * (ix - ix_bnw) * (iy - iy_bnw) * gOut;
}
if (within_bounds_3d(iz_bnw, iy_bnw, ix_bnw, inp_D, inp_H, inp_W)) {
scalar_t bnw_val = inp_ptr_NC[iz_bnw * inp_sD + iy_bnw * inp_sH + ix_bnw * inp_sW];
gix -= bnw_val * (iy_tse - iy) * (iz - iz_tse) * gOut;
giy -= bnw_val * (ix_tse - ix) * (iz - iz_tse) * gOut;
giz += bnw_val * (ix_tse - ix) * (iy_tse - iy) * gOut;
}
if (within_bounds_3d(iz_bne, iy_bne, ix_bne, inp_D, inp_H, inp_W)) {
scalar_t bne_val = inp_ptr_NC[iz_bne * inp_sD + iy_bne * inp_sH + ix_bne * inp_sW];
gix += bne_val * (iy_tsw - iy) * (iz - iz_tsw) * gOut;
giy -= bne_val * (ix - ix_tsw) * (iz - iz_tsw) * gOut;
giz += bne_val * (ix - ix_tsw) * (iy_tsw - iy) * gOut;
}
if (within_bounds_3d(iz_bsw, iy_bsw, ix_bsw, inp_D, inp_H, inp_W)) {
scalar_t bsw_val = inp_ptr_NC[iz_bsw * inp_sD + iy_bsw * inp_sH + ix_bsw * inp_sW];
gix -= bsw_val * (iy - iy_tne) * (iz - iz_tne) * gOut;
giy += bsw_val * (ix_tne - ix) * (iz - iz_tne) * gOut;
giz += bsw_val * (ix_tne - ix) * (iy - iy_tne) * gOut;
}
if (within_bounds_3d(iz_bse, iy_bse, ix_bse, inp_D, inp_H, inp_W)) {
scalar_t bse_val = inp_ptr_NC[iz_bse * inp_sD + iy_bse * inp_sH + ix_bse * inp_sW];
gix += bse_val * (iy - iy_tnw) * (iz - iz_tnw) * gOut;
giy += bse_val * (ix - ix_tnw) * (iz - iz_tnw) * gOut;
giz += bse_val * (ix - ix_tnw) * (iy - iy_tnw) * gOut;
}
}
// assuming grad_grid is contiguous
// thus we can
// 1. use index with gGrid_sW to directly compute gGrid_ptr_NDHW
// 2. directly assign to gGrid_ptr_NDHW[0], gGrid_ptr_NDHW[1], gGrid_ptr_NDHW[2]
scalar_t *gGrid_ptr_NDHW = grad_grid.data + index * gGrid_sW;
gGrid_ptr_NDHW[0] = gix_mult * gix;
gGrid_ptr_NDHW[1] = giy_mult * giy;
gGrid_ptr_NDHW[2] = giz_mult * giz;
} else if (interpolation_mode == GridSamplerInterpolation::Nearest) {
int ix_nearest = static_cast<int>(::round(ix));
int iy_nearest = static_cast<int>(::round(iy));
int iz_nearest = static_cast<int>(::round(iz));
// assign nearest neighor pixel value to output pixel
scalar_t *gOut_ptr_NCDHW = grad_output.data + n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW;
scalar_t *gInp_ptr_NC = grad_input.data + n * gInp_sN;
for (int c = 0; c < C; ++c, gOut_ptr_NCDHW += gOut_sC, gInp_ptr_NC += gInp_sC) {
// calculate and set grad_input
safe_add_3d(gInp_ptr_NC, iz_nearest, iy_nearest, ix_nearest,
gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, *gOut_ptr_NCDHW);
}
// assuming grad_grid is contiguous
// thus we can
// 1. use index with gGrid_sW to directly compute gGrid_ptr_NDHW
// 2. directly assign to gGrid_ptr_NDHW[0], gGrid_ptr_NDHW[1], gGrid_ptr_NDHW[2]
scalar_t *gGrid_ptr_NDHW = grad_grid.data + index * gGrid_sW;
gGrid_ptr_NDHW[0] = static_cast<scalar_t>(0);
gGrid_ptr_NDHW[1] = static_cast<scalar_t>(0);
gGrid_ptr_NDHW[2] = static_cast<scalar_t>(0);
}
}
}
} // namespace
using namespace at;
// No shape checking needed here. See # NOTE [ grid_sampler Native Functions ].
Tensor grid_sampler_2d_forward_cuda(const Tensor& input, const Tensor& grid,
int64_t interpolation_mode, int64_t padding_mode,
bool align_corners) {
auto N = input.size(0);
auto H = grid.size(1);
auto W = grid.size(2);
auto output = at::empty({N, input.size(1), H, W}, input.options());
int count = static_cast<int>(N * H * W);
if (count > 0) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_2d_forward_cuda", [&] {
hipLaunchKernelGGL(( grid_sampler_2d_forward_kernel_cuda<scalar_t>)
, dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
count,
getTensorInfo<scalar_t, int>(input),
getTensorInfo<scalar_t, int>(grid),
getTensorInfo<scalar_t, int>(output),
static_cast<GridSamplerInterpolation>(interpolation_mode),
static_cast<GridSamplerPadding>(padding_mode),
align_corners);
});
}
return output;
}
// No shape checking needed here. See # NOTE [ grid_sampler Native Functions ].
Tensor grid_sampler_3d_forward_cuda(const Tensor& input, const Tensor& grid,
int64_t interpolation_mode, int64_t padding_mode,
bool align_corners) {
auto N = input.size(0);
auto D = grid.size(1);
auto H = grid.size(2);
auto W = grid.size(3);
auto output = at::empty({N, input.size(1), D, H, W}, input.options());
int count = static_cast<int>(N * D * H * W);
if (count > 0) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_3d_forward_cuda", [&] {
hipLaunchKernelGGL(( grid_sampler_3d_forward_kernel_cuda<scalar_t>)
, dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
count,
getTensorInfo<scalar_t, int>(input),
getTensorInfo<scalar_t, int>(grid),
getTensorInfo<scalar_t, int>(output),
static_cast<GridSamplerInterpolation>(interpolation_mode),
static_cast<GridSamplerPadding>(padding_mode),
align_corners);
});
}
return output;
}
// No shape checking needed here. See # NOTE [ grid_sampler Native Functions ].
std::tuple<Tensor, Tensor>
grid_sampler_2d_backward_cuda(const Tensor& grad_output, const Tensor& input,
const Tensor& grid, int64_t interpolation_mode,
int64_t padding_mode, bool align_corners) {
auto N = input.size(0);
auto H = grid.size(1);
auto W = grid.size(2);
auto grad_input = at::zeros_like(input);
auto grad_grid = at::empty_like(grid);
int count = static_cast<int>(N * H * W);
if (count > 0) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_2d_backward_cuda", [&] {
hipLaunchKernelGGL(( grid_sampler_2d_backward_kernel_cuda<scalar_t>)
, dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
count,
getTensorInfo<scalar_t, int>(grad_output),
getTensorInfo<scalar_t, int>(input),
getTensorInfo<scalar_t, int>(grid),
getTensorInfo<scalar_t, int>(grad_input),
getTensorInfo<scalar_t, int>(grad_grid),
static_cast<GridSamplerInterpolation>(interpolation_mode),
static_cast<GridSamplerPadding>(padding_mode),
align_corners);
});
}
return std::make_tuple(grad_input, grad_grid);
}
// No shape checking needed here. See # NOTE [ grid_sampler Native Functions ].
std::tuple<Tensor, Tensor>
grid_sampler_3d_backward_cuda(const Tensor& grad_output, const Tensor& input,
const Tensor& grid, int64_t interpolation_mode, int64_t padding_mode,
bool align_corners) {
auto N = input.size(0);
auto D = grid.size(1);
auto H = grid.size(2);
auto W = grid.size(3);
auto grad_input = at::zeros_like(input);
auto grad_grid = at::empty_like(grid);
int count = static_cast<int>(N * D * H * W);
if (count > 0) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_3d_backward_cuda", [&] {
hipLaunchKernelGGL(( grid_sampler_3d_backward_kernel_cuda<scalar_t>)
, dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
count,
getTensorInfo<scalar_t, int>(grad_output),
getTensorInfo<scalar_t, int>(input),
getTensorInfo<scalar_t, int>(grid),
getTensorInfo<scalar_t, int>(grad_input),
getTensorInfo<scalar_t, int>(grad_grid),
static_cast<GridSamplerInterpolation>(interpolation_mode),
static_cast<GridSamplerPadding>(padding_mode),
align_corners);
});
}
return std::make_tuple(grad_input, grad_grid);
}
} // namespace mmdetection
| e623761008c02b12e75689b3ac3432e3993ce5a0.cu | // Modified from https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/cuda/GridSampler.cu
#include <ATen/ATen.h>
#include "grid_sampler_cuda.cuh"
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/detail/TensorInfo.cuh>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/cuda/detail/KernelUtils.h>
#include <c10/macros/Macros.h>
namespace mmdetection {
using namespace at::cuda::detail;
using mmdetection::detail::GridSamplerInterpolation;
using mmdetection::detail::GridSamplerPadding;
namespace {
template <typename scalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void grid_sampler_2d_forward_kernel_cuda(
const int nthreads,
TensorInfo<scalar_t, int> input,
TensorInfo<scalar_t, int> grid,
TensorInfo<scalar_t, int> output,
const GridSamplerInterpolation interpolation_mode,
const GridSamplerPadding padding_mode,
bool align_corners) {
int C = input.sizes[1];
int inp_H = input.sizes[2];
int inp_W = input.sizes[3];
int out_H = grid.sizes[1];
int out_W = grid.sizes[2];
int inp_sN = input.strides[0];
int inp_sC = input.strides[1];
int inp_sH = input.strides[2];
int inp_sW = input.strides[3];
int grid_sN = grid.strides[0];
int grid_sH = grid.strides[1];
int grid_sW = grid.strides[2];
int grid_sCoor = grid.strides[3];
int out_sN = output.strides[0];
int out_sC = output.strides[1];
int out_sH = output.strides[2];
int out_sW = output.strides[3];
CUDA_KERNEL_LOOP(index, nthreads) {
const int w = index % out_W;
const int h = (index / out_W) % out_H;
const int n = index / (out_H * out_W);
const int grid_offset = n * grid_sN + h * grid_sH + w * grid_sW;
// get the corresponding input x, y co-ordinates from grid
scalar_t ix = grid.data[grid_offset];
scalar_t iy = grid.data[grid_offset + grid_sCoor];
ix = grid_sampler_compute_source_index(ix, inp_W, padding_mode, align_corners);
iy = grid_sampler_compute_source_index(iy, inp_H, padding_mode, align_corners);
if (interpolation_mode == GridSamplerInterpolation::Bilinear) {
// get NE, NW, SE, SW pixel values from (x, y)
int ix_nw = static_cast<int>(::floor(ix));
int iy_nw = static_cast<int>(::floor(iy));
int ix_ne = ix_nw + 1;
int iy_ne = iy_nw;
int ix_sw = ix_nw;
int iy_sw = iy_nw + 1;
int ix_se = ix_nw + 1;
int iy_se = iy_nw + 1;
// get surfaces to each neighbor:
scalar_t nw = (ix_se - ix) * (iy_se - iy);
scalar_t ne = (ix - ix_sw) * (iy_sw - iy);
scalar_t sw = (ix_ne - ix) * (iy - iy_ne);
scalar_t se = (ix - ix_nw) * (iy - iy_nw);
// calculate bilinear weighted pixel value and set output pixel
auto inp_ptr_NC = input.data + n * inp_sN;
auto out_ptr_NCHW = output.data + n * out_sN + h * out_sH + w * out_sW;
for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCHW += out_sC) {
*out_ptr_NCHW = static_cast<scalar_t>(0);
if (within_bounds_2d(iy_nw, ix_nw, inp_H, inp_W)) {
*out_ptr_NCHW += inp_ptr_NC[iy_nw * inp_sH + ix_nw * inp_sW] * nw;
}
if (within_bounds_2d(iy_ne, ix_ne, inp_H, inp_W)) {
*out_ptr_NCHW += inp_ptr_NC[iy_ne * inp_sH + ix_ne * inp_sW] * ne;
}
if (within_bounds_2d(iy_sw, ix_sw, inp_H, inp_W)) {
*out_ptr_NCHW += inp_ptr_NC[iy_sw * inp_sH + ix_sw * inp_sW] * sw;
}
if (within_bounds_2d(iy_se, ix_se, inp_H, inp_W)) {
*out_ptr_NCHW += inp_ptr_NC[iy_se * inp_sH + ix_se * inp_sW] * se;
}
}
} else if (interpolation_mode == GridSamplerInterpolation::Nearest) {
int ix_nearest = static_cast<int>(::round(ix));
int iy_nearest = static_cast<int>(::round(iy));
// assign nearest neighor pixel value to output pixel
auto inp_ptr_NC = input.data + n * inp_sN;
auto out_ptr_NCHW = output.data + n * out_sN + h * out_sH + w * out_sW;
for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCHW += out_sC) {
if (within_bounds_2d(iy_nearest, ix_nearest, inp_H, inp_W)) {
*out_ptr_NCHW = inp_ptr_NC[iy_nearest * inp_sH + ix_nearest * inp_sW];
} else {
*out_ptr_NCHW = static_cast<scalar_t>(0);
}
}
}
}
}
template <typename scalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void grid_sampler_3d_forward_kernel_cuda(
const int nthreads,
TensorInfo<scalar_t, int> input,
TensorInfo<scalar_t, int> grid,
TensorInfo<scalar_t, int> output,
const GridSamplerInterpolation interpolation_mode,
const GridSamplerPadding padding_mode,
bool align_corners) {
int C = input.sizes[1];
int inp_D = input.sizes[2];
int inp_H = input.sizes[3];
int inp_W = input.sizes[4];
int out_D = grid.sizes[1];
int out_H = grid.sizes[2];
int out_W = grid.sizes[3];
int inp_sN = input.strides[0];
int inp_sC = input.strides[1];
int inp_sD = input.strides[2];
int inp_sH = input.strides[3];
int inp_sW = input.strides[4];
int grid_sN = grid.strides[0];
int grid_sD = grid.strides[1];
int grid_sH = grid.strides[2];
int grid_sW = grid.strides[3];
int grid_sCoor = grid.strides[4];
int out_sN = output.strides[0];
int out_sC = output.strides[1];
int out_sD = output.strides[2];
int out_sH = output.strides[3];
int out_sW = output.strides[4];
CUDA_KERNEL_LOOP(index, nthreads) {
const int w = index % out_W;
const int h = (index / out_W) % out_H;
const int d = (index / (out_H * out_W)) % out_D;
const int n = index / (out_D * out_H * out_W);
const int grid_offset = n * grid_sN + d * grid_sD + h * grid_sH + w * grid_sW;
// get the corresponding input x, y, z co-ordinates from grid
scalar_t ix = grid.data[grid_offset];
scalar_t iy = grid.data[grid_offset + grid_sCoor];
scalar_t iz = grid.data[grid_offset + 2 * grid_sCoor];
ix = grid_sampler_compute_source_index(ix, inp_W, padding_mode, align_corners);
iy = grid_sampler_compute_source_index(iy, inp_H, padding_mode, align_corners);
iz = grid_sampler_compute_source_index(iz, inp_D, padding_mode, align_corners);
if (interpolation_mode == GridSamplerInterpolation::Bilinear) {
// get corner pixel values from (x, y, z)
// for 4d, we used north-east-south-west
// for 5d, we add top-bottom
int ix_tnw = static_cast<int>(::floor(ix));
int iy_tnw = static_cast<int>(::floor(iy));
int iz_tnw = static_cast<int>(::floor(iz));
int ix_tne = ix_tnw + 1;
int iy_tne = iy_tnw;
int iz_tne = iz_tnw;
int ix_tsw = ix_tnw;
int iy_tsw = iy_tnw + 1;
int iz_tsw = iz_tnw;
int ix_tse = ix_tnw + 1;
int iy_tse = iy_tnw + 1;
int iz_tse = iz_tnw;
int ix_bnw = ix_tnw;
int iy_bnw = iy_tnw;
int iz_bnw = iz_tnw + 1;
int ix_bne = ix_tnw + 1;
int iy_bne = iy_tnw;
int iz_bne = iz_tnw + 1;
int ix_bsw = ix_tnw;
int iy_bsw = iy_tnw + 1;
int iz_bsw = iz_tnw + 1;
int ix_bse = ix_tnw + 1;
int iy_bse = iy_tnw + 1;
int iz_bse = iz_tnw + 1;
// get surfaces to each neighbor:
scalar_t tnw = (ix_bse - ix) * (iy_bse - iy) * (iz_bse - iz);
scalar_t tne = (ix - ix_bsw) * (iy_bsw - iy) * (iz_bsw - iz);
scalar_t tsw = (ix_bne - ix) * (iy - iy_bne) * (iz_bne - iz);
scalar_t tse = (ix - ix_bnw) * (iy - iy_bnw) * (iz_bnw - iz);
scalar_t bnw = (ix_tse - ix) * (iy_tse - iy) * (iz - iz_tse);
scalar_t bne = (ix - ix_tsw) * (iy_tsw - iy) * (iz - iz_tsw);
scalar_t bsw = (ix_tne - ix) * (iy - iy_tne) * (iz - iz_tne);
scalar_t bse = (ix - ix_tnw) * (iy - iy_tnw) * (iz - iz_tnw);
auto inp_ptr_NC = input.data + n * inp_sN;
auto out_ptr_NCDHW = output.data + n * out_sN + d * out_sD + h * out_sH + w * out_sW;
for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCDHW += out_sC) {
// (c, iz_tnw, iy_tnw, ix_tnw) * tnw + (c, iz_tne, iy_tne, ix_tne) * tne
// + (c, iz_tsw, iy_tsw, ix_tsw) * tsw + (c, iz_tse, iy_tse, ix_tse) * tse
// + (c, iz_bnw, iy_bnw, ix_bnw) * bnw + (c, iz_bne, iy_bne, ix_bne) * bne
// + (c, iz_bsw, iy_bsw, ix_bsw) * bsw + (c, iz_bse, iy_bse, ix_bse) * bse
*out_ptr_NCDHW = static_cast<scalar_t>(0);
if (within_bounds_3d(iz_tnw, iy_tnw, ix_tnw, inp_D, inp_H, inp_W)) {
*out_ptr_NCDHW += inp_ptr_NC[iz_tnw * inp_sD + iy_tnw * inp_sH + ix_tnw * inp_sW] * tnw;
}
if (within_bounds_3d(iz_tne, iy_tne, ix_tne, inp_D, inp_H, inp_W)) {
*out_ptr_NCDHW += inp_ptr_NC[iz_tne * inp_sD + iy_tne * inp_sH + ix_tne * inp_sW] * tne;
}
if (within_bounds_3d(iz_tsw, iy_tsw, ix_tsw, inp_D, inp_H, inp_W)) {
*out_ptr_NCDHW += inp_ptr_NC[iz_tsw * inp_sD + iy_tsw * inp_sH + ix_tsw * inp_sW] * tsw;
}
if (within_bounds_3d(iz_tse, iy_tse, ix_tse, inp_D, inp_H, inp_W)) {
*out_ptr_NCDHW += inp_ptr_NC[iz_tse * inp_sD + iy_tse * inp_sH + ix_tse * inp_sW] * tse;
}
if (within_bounds_3d(iz_bnw, iy_bnw, ix_bnw, inp_D, inp_H, inp_W)) {
*out_ptr_NCDHW += inp_ptr_NC[iz_bnw * inp_sD + iy_bnw * inp_sH + ix_bnw * inp_sW] * bnw;
}
if (within_bounds_3d(iz_bne, iy_bne, ix_bne, inp_D, inp_H, inp_W)) {
*out_ptr_NCDHW += inp_ptr_NC[iz_bne * inp_sD + iy_bne * inp_sH + ix_bne * inp_sW] * bne;
}
if (within_bounds_3d(iz_bsw, iy_bsw, ix_bsw, inp_D, inp_H, inp_W)) {
*out_ptr_NCDHW += inp_ptr_NC[iz_bsw * inp_sD + iy_bsw * inp_sH + ix_bsw * inp_sW] * bsw;
}
if (within_bounds_3d(iz_bse, iy_bse, ix_bse, inp_D, inp_H, inp_W)) {
*out_ptr_NCDHW += inp_ptr_NC[iz_bse * inp_sD + iy_bse * inp_sH + ix_bse * inp_sW] * bse;
}
}
} else if (interpolation_mode == GridSamplerInterpolation::Nearest) {
int ix_nearest = static_cast<int>(::round(ix));
int iy_nearest = static_cast<int>(::round(iy));
int iz_nearest = static_cast<int>(::round(iz));
// assign nearest neighor pixel value to output pixel
auto inp_ptr_NC = input.data + n * inp_sN;
auto out_ptr_NCDHW = output.data + n * out_sN + d * out_sD + h * out_sH + w * out_sW;
for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCDHW += out_sC) {
if (within_bounds_3d(iz_nearest, iy_nearest, ix_nearest, inp_D, inp_H, inp_W)) {
*out_ptr_NCDHW = inp_ptr_NC[iz_nearest * inp_sD + iy_nearest * inp_sH + ix_nearest * inp_sW];
} else {
*out_ptr_NCDHW = static_cast<scalar_t>(0);
}
}
}
}
}
template <typename scalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void grid_sampler_2d_backward_kernel_cuda(
const int nthreads,
TensorInfo<scalar_t, int> grad_output,
TensorInfo<scalar_t, int> input,
TensorInfo<scalar_t, int> grid,
TensorInfo<scalar_t, int> grad_input, // initialized to zeros
TensorInfo<scalar_t, int> grad_grid, // initialized to empty
const GridSamplerInterpolation interpolation_mode,
const GridSamplerPadding padding_mode,
bool align_corners) {
int C = input.sizes[1];
int inp_H = input.sizes[2];
int inp_W = input.sizes[3];
int out_H = grid.sizes[1];
int out_W = grid.sizes[2];
int inp_sN = input.strides[0];
int inp_sC = input.strides[1];
int inp_sH = input.strides[2];
int inp_sW = input.strides[3];
int grid_sN = grid.strides[0];
int grid_sH = grid.strides[1];
int grid_sW = grid.strides[2];
int grid_sCoor = grid.strides[3];
int gOut_sN = grad_output.strides[0];
int gOut_sC = grad_output.strides[1];
int gOut_sH = grad_output.strides[2];
int gOut_sW = grad_output.strides[3];
int gInp_sN = grad_input.strides[0];
int gInp_sC = grad_input.strides[1];
int gInp_sH = grad_input.strides[2];
int gInp_sW = grad_input.strides[3];
int gGrid_sW = grad_grid.strides[2];
CUDA_KERNEL_LOOP(index, nthreads) {
const int w = index % out_W;
const int h = (index / out_W) % out_H;
const int n = index / (out_H * out_W);
const int grid_offset = n * grid_sN + h * grid_sH + w * grid_sW;
// get the corresponding input x, y co-ordinates from grid
scalar_t ix = grid.data[grid_offset];
scalar_t iy = grid.data[grid_offset + grid_sCoor];
// multipliers for gradients on ix and iy
scalar_t gix_mult, giy_mult;
ix = grid_sampler_compute_source_index_set_grad(ix, inp_W, padding_mode, align_corners, &gix_mult);
iy = grid_sampler_compute_source_index_set_grad(iy, inp_H, padding_mode, align_corners, &giy_mult);
if (interpolation_mode == GridSamplerInterpolation::Bilinear) {
// get NE, NW, SE, SW pixel values from (x, y)
int ix_nw = static_cast<int>(::floor(ix));
int iy_nw = static_cast<int>(::floor(iy));
int ix_ne = ix_nw + 1;
int iy_ne = iy_nw;
int ix_sw = ix_nw;
int iy_sw = iy_nw + 1;
int ix_se = ix_nw + 1;
int iy_se = iy_nw + 1;
// get surfaces to each neighbor:
scalar_t nw = (ix_se - ix) * (iy_se - iy);
scalar_t ne = (ix - ix_sw) * (iy_sw - iy);
scalar_t sw = (ix_ne - ix) * (iy - iy_ne);
scalar_t se = (ix - ix_nw) * (iy - iy_nw);
scalar_t gix = static_cast<scalar_t>(0), giy = static_cast<scalar_t>(0);
scalar_t *gOut_ptr_NCHW = grad_output.data + n * gOut_sN + h * gOut_sH + w * gOut_sW;
scalar_t *gInp_ptr_NC = grad_input.data + n * gInp_sN;
scalar_t *inp_ptr_NC = input.data + n * inp_sN;
for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, gInp_ptr_NC += gInp_sC, gOut_ptr_NCHW += gOut_sC) {
scalar_t gOut = *gOut_ptr_NCHW;
// calculate and set grad_input
safe_add_2d(gInp_ptr_NC, iy_nw, ix_nw, gInp_sH, gInp_sW, inp_H, inp_W, nw * gOut);
safe_add_2d(gInp_ptr_NC, iy_ne, ix_ne, gInp_sH, gInp_sW, inp_H, inp_W, ne * gOut);
safe_add_2d(gInp_ptr_NC, iy_sw, ix_sw, gInp_sH, gInp_sW, inp_H, inp_W, sw * gOut);
safe_add_2d(gInp_ptr_NC, iy_se, ix_se, gInp_sH, gInp_sW, inp_H, inp_W, se * gOut);
// calculate grad_grid
if (within_bounds_2d(iy_nw, ix_nw, inp_H, inp_W)) {
scalar_t nw_val = inp_ptr_NC[iy_nw * inp_sH + ix_nw * inp_sW];
gix -= nw_val * (iy_se - iy) * gOut;
giy -= nw_val * (ix_se - ix) * gOut;
}
if (within_bounds_2d(iy_ne, ix_ne, inp_H, inp_W)) {
scalar_t ne_val = inp_ptr_NC[iy_ne * inp_sH + ix_ne * inp_sW];
gix += ne_val * (iy_sw - iy) * gOut;
giy -= ne_val * (ix - ix_sw) * gOut;
}
if (within_bounds_2d(iy_sw, ix_sw, inp_H, inp_W)) {
scalar_t sw_val = inp_ptr_NC[iy_sw * inp_sH + ix_sw * inp_sW];
gix -= sw_val * (iy - iy_ne) * gOut;
giy += sw_val * (ix_ne - ix) * gOut;
}
if (within_bounds_2d(iy_se, ix_se, inp_H, inp_W)) {
scalar_t se_val = inp_ptr_NC[iy_se * inp_sH + ix_se * inp_sW];
gix += se_val * (iy - iy_nw) * gOut;
giy += se_val * (ix - ix_nw) * gOut;
}
}
// assuming grad_grid is contiguous
// thus we can
// 1. use index with gGrid_sW to directly compute gGrid_ptr_NHW
// 2. directly assign to gGrid_ptr_NHW[0], gGrid_ptr_NHW[1]
scalar_t *gGrid_ptr_NHW = grad_grid.data + index * gGrid_sW;
gGrid_ptr_NHW[0] = gix_mult * gix;
gGrid_ptr_NHW[1] = giy_mult * giy;
} else if (interpolation_mode == GridSamplerInterpolation::Nearest) {
int ix_nearest = static_cast<int>(::round(ix));
int iy_nearest = static_cast<int>(::round(iy));
// assign nearest neighor pixel value to output pixel
scalar_t *gOut_ptr_NCHW = grad_output.data + n * gOut_sN + h * gOut_sH + w * gOut_sW;
scalar_t *gInp_ptr_NC = grad_input.data + n * gInp_sN;
for (int c = 0; c < C; ++c, gInp_ptr_NC += gInp_sC, gOut_ptr_NCHW += gOut_sC) {
// calculate and set grad_input
safe_add_2d(gInp_ptr_NC, iy_nearest, ix_nearest, gInp_sH, gInp_sW, inp_H, inp_W, *gOut_ptr_NCHW);
}
// assuming grad_grid is contiguous
// thus we can
// 1. use index with gGrid_sW to directly compute gGrid_ptr_NHW
// 2. directly assign to gGrid_ptr_NHW[0], gGrid_ptr_NHW[1]
scalar_t *gGrid_ptr_NHW = grad_grid.data + index * gGrid_sW;
gGrid_ptr_NHW[0] = static_cast<scalar_t>(0);
gGrid_ptr_NHW[1] = static_cast<scalar_t>(0);
}
}
}
template <typename scalar_t>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void grid_sampler_3d_backward_kernel_cuda(
const int nthreads,
TensorInfo<scalar_t, int> grad_output,
TensorInfo<scalar_t, int> input,
TensorInfo<scalar_t, int> grid,
TensorInfo<scalar_t, int> grad_input, // initialized to zeros
TensorInfo<scalar_t, int> grad_grid, // initialized to empty
const GridSamplerInterpolation interpolation_mode,
const GridSamplerPadding padding_mode,
bool align_corners) {
int C = input.sizes[1];
int inp_D = input.sizes[2];
int inp_H = input.sizes[3];
int inp_W = input.sizes[4];
int out_D = grid.sizes[1];
int out_H = grid.sizes[2];
int out_W = grid.sizes[3];
int inp_sN = input.strides[0];
int inp_sC = input.strides[1];
int inp_sD = input.strides[2];
int inp_sH = input.strides[3];
int inp_sW = input.strides[4];
int grid_sN = grid.strides[0];
int grid_sD = grid.strides[1];
int grid_sH = grid.strides[2];
int grid_sW = grid.strides[3];
int grid_sCoor = grid.strides[4];
int gOut_sN = grad_output.strides[0];
int gOut_sC = grad_output.strides[1];
int gOut_sD = grad_output.strides[2];
int gOut_sH = grad_output.strides[3];
int gOut_sW = grad_output.strides[4];
int gInp_sN = grad_input.strides[0];
int gInp_sC = grad_input.strides[1];
int gInp_sD = grad_input.strides[2];
int gInp_sH = grad_input.strides[3];
int gInp_sW = grad_input.strides[4];
int gGrid_sW = grad_grid.strides[3];
CUDA_KERNEL_LOOP(index, nthreads) {
const int w = index % out_W;
const int h = (index / out_W) % out_H;
const int d = (index / (out_H * out_W)) % out_D;
const int n = index / (out_D * out_H * out_W);
const int grid_offset = n * grid_sN + d * grid_sD + h * grid_sH + w * grid_sW;
// get the corresponding input x, y, z co-ordinates from grid
scalar_t ix = grid.data[grid_offset];
scalar_t iy = grid.data[grid_offset + grid_sCoor];
scalar_t iz = grid.data[grid_offset + 2 * grid_sCoor];
// multipliers for gradients on ix, iy, and iz
scalar_t gix_mult, giy_mult, giz_mult;
ix = grid_sampler_compute_source_index_set_grad(ix, inp_W, padding_mode, align_corners, &gix_mult);
iy = grid_sampler_compute_source_index_set_grad(iy, inp_H, padding_mode, align_corners, &giy_mult);
iz = grid_sampler_compute_source_index_set_grad(iz, inp_D, padding_mode, align_corners, &giz_mult);
if (interpolation_mode == GridSamplerInterpolation::Bilinear) {
// get corner pixel values from (x, y, z)
// for 4d, we used north-east-south-west
// for 5d, we add top-bottom
int ix_tnw = static_cast<int>(::floor(ix));
int iy_tnw = static_cast<int>(::floor(iy));
int iz_tnw = static_cast<int>(::floor(iz));
int ix_tne = ix_tnw + 1;
int iy_tne = iy_tnw;
int iz_tne = iz_tnw;
int ix_tsw = ix_tnw;
int iy_tsw = iy_tnw + 1;
int iz_tsw = iz_tnw;
int ix_tse = ix_tnw + 1;
int iy_tse = iy_tnw + 1;
int iz_tse = iz_tnw;
int ix_bnw = ix_tnw;
int iy_bnw = iy_tnw;
int iz_bnw = iz_tnw + 1;
int ix_bne = ix_tnw + 1;
int iy_bne = iy_tnw;
int iz_bne = iz_tnw + 1;
int ix_bsw = ix_tnw;
int iy_bsw = iy_tnw + 1;
int iz_bsw = iz_tnw + 1;
int ix_bse = ix_tnw + 1;
int iy_bse = iy_tnw + 1;
int iz_bse = iz_tnw + 1;
// get surfaces to each neighbor:
scalar_t tnw = (ix_bse - ix) * (iy_bse - iy) * (iz_bse - iz);
scalar_t tne = (ix - ix_bsw) * (iy_bsw - iy) * (iz_bsw - iz);
scalar_t tsw = (ix_bne - ix) * (iy - iy_bne) * (iz_bne - iz);
scalar_t tse = (ix - ix_bnw) * (iy - iy_bnw) * (iz_bnw - iz);
scalar_t bnw = (ix_tse - ix) * (iy_tse - iy) * (iz - iz_tse);
scalar_t bne = (ix - ix_tsw) * (iy_tsw - iy) * (iz - iz_tsw);
scalar_t bsw = (ix_tne - ix) * (iy - iy_tne) * (iz - iz_tne);
scalar_t bse = (ix - ix_tnw) * (iy - iy_tnw) * (iz - iz_tnw);
scalar_t gix = static_cast<scalar_t>(0), giy = static_cast<scalar_t>(0), giz = static_cast<scalar_t>(0);
scalar_t *gOut_ptr_NCDHW = grad_output.data + n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW;
scalar_t *gInp_ptr_NC = grad_input.data + n * gInp_sN;
scalar_t *inp_ptr_NC = input.data + n * inp_sN;
// calculate bilinear weighted pixel value and set output pixel
for (int c = 0; c < C; ++c, gOut_ptr_NCDHW += gOut_sC, gInp_ptr_NC += gInp_sC, inp_ptr_NC += inp_sC) {
scalar_t gOut = *gOut_ptr_NCDHW;
// calculate and set grad_input
safe_add_3d(gInp_ptr_NC, iz_tnw, iy_tnw, ix_tnw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tnw * gOut);
safe_add_3d(gInp_ptr_NC, iz_tne, iy_tne, ix_tne, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tne * gOut);
safe_add_3d(gInp_ptr_NC, iz_tsw, iy_tsw, ix_tsw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tsw * gOut);
safe_add_3d(gInp_ptr_NC, iz_tse, iy_tse, ix_tse, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tse * gOut);
safe_add_3d(gInp_ptr_NC, iz_bnw, iy_bnw, ix_bnw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bnw * gOut);
safe_add_3d(gInp_ptr_NC, iz_bne, iy_bne, ix_bne, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bne * gOut);
safe_add_3d(gInp_ptr_NC, iz_bsw, iy_bsw, ix_bsw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bsw * gOut);
safe_add_3d(gInp_ptr_NC, iz_bse, iy_bse, ix_bse, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bse * gOut);
// calculate grad_grid
if (within_bounds_3d(iz_tnw, iy_tnw, ix_tnw, inp_D, inp_H, inp_W)) {
scalar_t tnw_val = inp_ptr_NC[iz_tnw * inp_sD + iy_tnw * inp_sH + ix_tnw * inp_sW];
gix -= tnw_val * (iy_bse - iy) * (iz_bse - iz) * gOut;
giy -= tnw_val * (ix_bse - ix) * (iz_bse - iz) * gOut;
giz -= tnw_val * (ix_bse - ix) * (iy_bse - iy) * gOut;
}
if (within_bounds_3d(iz_tne, iy_tne, ix_tne, inp_D, inp_H, inp_W)) {
scalar_t tne_val = inp_ptr_NC[iz_tne * inp_sD + iy_tne * inp_sH + ix_tne * inp_sW];
gix += tne_val * (iy_bsw - iy) * (iz_bsw - iz) * gOut;
giy -= tne_val * (ix - ix_bsw) * (iz_bsw - iz) * gOut;
giz -= tne_val * (ix - ix_bsw) * (iy_bsw - iy) * gOut;
}
if (within_bounds_3d(iz_tsw, iy_tsw, ix_tsw, inp_D, inp_H, inp_W)) {
scalar_t tsw_val = inp_ptr_NC[iz_tsw * inp_sD + iy_tsw * inp_sH + ix_tsw * inp_sW];
gix -= tsw_val * (iy - iy_bne) * (iz_bne - iz) * gOut;
giy += tsw_val * (ix_bne - ix) * (iz_bne - iz) * gOut;
giz -= tsw_val * (ix_bne - ix) * (iy - iy_bne) * gOut;
}
if (within_bounds_3d(iz_tse, iy_tse, ix_tse, inp_D, inp_H, inp_W)) {
scalar_t tse_val = inp_ptr_NC[iz_tse * inp_sD + iy_tse * inp_sH + ix_tse * inp_sW];
gix += tse_val * (iy - iy_bnw) * (iz_bnw - iz) * gOut;
giy += tse_val * (ix - ix_bnw) * (iz_bnw - iz) * gOut;
giz -= tse_val * (ix - ix_bnw) * (iy - iy_bnw) * gOut;
}
if (within_bounds_3d(iz_bnw, iy_bnw, ix_bnw, inp_D, inp_H, inp_W)) {
scalar_t bnw_val = inp_ptr_NC[iz_bnw * inp_sD + iy_bnw * inp_sH + ix_bnw * inp_sW];
gix -= bnw_val * (iy_tse - iy) * (iz - iz_tse) * gOut;
giy -= bnw_val * (ix_tse - ix) * (iz - iz_tse) * gOut;
giz += bnw_val * (ix_tse - ix) * (iy_tse - iy) * gOut;
}
if (within_bounds_3d(iz_bne, iy_bne, ix_bne, inp_D, inp_H, inp_W)) {
scalar_t bne_val = inp_ptr_NC[iz_bne * inp_sD + iy_bne * inp_sH + ix_bne * inp_sW];
gix += bne_val * (iy_tsw - iy) * (iz - iz_tsw) * gOut;
giy -= bne_val * (ix - ix_tsw) * (iz - iz_tsw) * gOut;
giz += bne_val * (ix - ix_tsw) * (iy_tsw - iy) * gOut;
}
if (within_bounds_3d(iz_bsw, iy_bsw, ix_bsw, inp_D, inp_H, inp_W)) {
scalar_t bsw_val = inp_ptr_NC[iz_bsw * inp_sD + iy_bsw * inp_sH + ix_bsw * inp_sW];
gix -= bsw_val * (iy - iy_tne) * (iz - iz_tne) * gOut;
giy += bsw_val * (ix_tne - ix) * (iz - iz_tne) * gOut;
giz += bsw_val * (ix_tne - ix) * (iy - iy_tne) * gOut;
}
if (within_bounds_3d(iz_bse, iy_bse, ix_bse, inp_D, inp_H, inp_W)) {
scalar_t bse_val = inp_ptr_NC[iz_bse * inp_sD + iy_bse * inp_sH + ix_bse * inp_sW];
gix += bse_val * (iy - iy_tnw) * (iz - iz_tnw) * gOut;
giy += bse_val * (ix - ix_tnw) * (iz - iz_tnw) * gOut;
giz += bse_val * (ix - ix_tnw) * (iy - iy_tnw) * gOut;
}
}
// assuming grad_grid is contiguous
// thus we can
// 1. use index with gGrid_sW to directly compute gGrid_ptr_NDHW
// 2. directly assign to gGrid_ptr_NDHW[0], gGrid_ptr_NDHW[1], gGrid_ptr_NDHW[2]
scalar_t *gGrid_ptr_NDHW = grad_grid.data + index * gGrid_sW;
gGrid_ptr_NDHW[0] = gix_mult * gix;
gGrid_ptr_NDHW[1] = giy_mult * giy;
gGrid_ptr_NDHW[2] = giz_mult * giz;
} else if (interpolation_mode == GridSamplerInterpolation::Nearest) {
int ix_nearest = static_cast<int>(::round(ix));
int iy_nearest = static_cast<int>(::round(iy));
int iz_nearest = static_cast<int>(::round(iz));
// assign nearest neighor pixel value to output pixel
scalar_t *gOut_ptr_NCDHW = grad_output.data + n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW;
scalar_t *gInp_ptr_NC = grad_input.data + n * gInp_sN;
for (int c = 0; c < C; ++c, gOut_ptr_NCDHW += gOut_sC, gInp_ptr_NC += gInp_sC) {
// calculate and set grad_input
safe_add_3d(gInp_ptr_NC, iz_nearest, iy_nearest, ix_nearest,
gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, *gOut_ptr_NCDHW);
}
// assuming grad_grid is contiguous
// thus we can
// 1. use index with gGrid_sW to directly compute gGrid_ptr_NDHW
// 2. directly assign to gGrid_ptr_NDHW[0], gGrid_ptr_NDHW[1], gGrid_ptr_NDHW[2]
scalar_t *gGrid_ptr_NDHW = grad_grid.data + index * gGrid_sW;
gGrid_ptr_NDHW[0] = static_cast<scalar_t>(0);
gGrid_ptr_NDHW[1] = static_cast<scalar_t>(0);
gGrid_ptr_NDHW[2] = static_cast<scalar_t>(0);
}
}
}
} // namespace
using namespace at;
// No shape checking needed here. See # NOTE [ grid_sampler Native Functions ].
Tensor grid_sampler_2d_forward_cuda(const Tensor& input, const Tensor& grid,
int64_t interpolation_mode, int64_t padding_mode,
bool align_corners) {
auto N = input.size(0);
auto H = grid.size(1);
auto W = grid.size(2);
auto output = at::empty({N, input.size(1), H, W}, input.options());
int count = static_cast<int>(N * H * W);
if (count > 0) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_2d_forward_cuda", [&] {
grid_sampler_2d_forward_kernel_cuda<scalar_t>
<<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>(
count,
getTensorInfo<scalar_t, int>(input),
getTensorInfo<scalar_t, int>(grid),
getTensorInfo<scalar_t, int>(output),
static_cast<GridSamplerInterpolation>(interpolation_mode),
static_cast<GridSamplerPadding>(padding_mode),
align_corners);
});
}
return output;
}
// No shape checking needed here. See # NOTE [ grid_sampler Native Functions ].
Tensor grid_sampler_3d_forward_cuda(const Tensor& input, const Tensor& grid,
int64_t interpolation_mode, int64_t padding_mode,
bool align_corners) {
auto N = input.size(0);
auto D = grid.size(1);
auto H = grid.size(2);
auto W = grid.size(3);
auto output = at::empty({N, input.size(1), D, H, W}, input.options());
int count = static_cast<int>(N * D * H * W);
if (count > 0) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_3d_forward_cuda", [&] {
grid_sampler_3d_forward_kernel_cuda<scalar_t>
<<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>(
count,
getTensorInfo<scalar_t, int>(input),
getTensorInfo<scalar_t, int>(grid),
getTensorInfo<scalar_t, int>(output),
static_cast<GridSamplerInterpolation>(interpolation_mode),
static_cast<GridSamplerPadding>(padding_mode),
align_corners);
});
}
return output;
}
// No shape checking needed here. See # NOTE [ grid_sampler Native Functions ].
std::tuple<Tensor, Tensor>
grid_sampler_2d_backward_cuda(const Tensor& grad_output, const Tensor& input,
const Tensor& grid, int64_t interpolation_mode,
int64_t padding_mode, bool align_corners) {
auto N = input.size(0);
auto H = grid.size(1);
auto W = grid.size(2);
auto grad_input = at::zeros_like(input);
auto grad_grid = at::empty_like(grid);
int count = static_cast<int>(N * H * W);
if (count > 0) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_2d_backward_cuda", [&] {
grid_sampler_2d_backward_kernel_cuda<scalar_t>
<<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>(
count,
getTensorInfo<scalar_t, int>(grad_output),
getTensorInfo<scalar_t, int>(input),
getTensorInfo<scalar_t, int>(grid),
getTensorInfo<scalar_t, int>(grad_input),
getTensorInfo<scalar_t, int>(grad_grid),
static_cast<GridSamplerInterpolation>(interpolation_mode),
static_cast<GridSamplerPadding>(padding_mode),
align_corners);
});
}
return std::make_tuple(grad_input, grad_grid);
}
// No shape checking needed here. See # NOTE [ grid_sampler Native Functions ].
std::tuple<Tensor, Tensor>
grid_sampler_3d_backward_cuda(const Tensor& grad_output, const Tensor& input,
const Tensor& grid, int64_t interpolation_mode, int64_t padding_mode,
bool align_corners) {
auto N = input.size(0);
auto D = grid.size(1);
auto H = grid.size(2);
auto W = grid.size(3);
auto grad_input = at::zeros_like(input);
auto grad_grid = at::empty_like(grid);
int count = static_cast<int>(N * D * H * W);
if (count > 0) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_3d_backward_cuda", [&] {
grid_sampler_3d_backward_kernel_cuda<scalar_t>
<<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>(
count,
getTensorInfo<scalar_t, int>(grad_output),
getTensorInfo<scalar_t, int>(input),
getTensorInfo<scalar_t, int>(grid),
getTensorInfo<scalar_t, int>(grad_input),
getTensorInfo<scalar_t, int>(grad_grid),
static_cast<GridSamplerInterpolation>(interpolation_mode),
static_cast<GridSamplerPadding>(padding_mode),
align_corners);
});
}
return std::make_tuple(grad_input, grad_grid);
}
} // namespace mmdetection
|
c8708385696a8ec484dc7fdcec4a31b9cec79e51.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using LayoutDst = cutlass::layout::TensorNCxHWx<32>;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 64>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp<
int8_t, 8, int32_t, int32_t, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 16, 16, false,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| c8708385696a8ec484dc7fdcec4a31b9cec79e51.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using LayoutDst = cutlass::layout::TensorNCxHWx<32>;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 64>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp<
int8_t, 8, int32_t, int32_t, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 16, 16, false,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
cd257463f22c51fdbfe658187fd003c773fc1d51.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2009-2011, NVIDIA Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA Corporation nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
GF100-optimized variant of the "Speculative while-while"
kernel used in:
"Understanding the Efficiency of Ray Traversal on GPUs",
Timo Aila and Samuli Laine,
Proc. High-Performance Graphics 2009
*/
#include "CudaTracerKernels.hpp"
//------------------------------------------------------------------------
#define STACK_SIZE 64 // Size of the traversal stack in local memory.
//------------------------------------------------------------------------
extern "C" __global__ void queryConfig(void)
{
g_config.bvhLayout = BVHLayout_Compact;
g_config.blockWidth = 32; // One warp per row.
g_config.blockHeight = 4; // 4*32 = 128 threads, optimal for GTX480
}
//------------------------------------------------------------------------
TRACE_FUNC
{
// Traversal stack in CUDA thread-local memory.
int traversalStack[STACK_SIZE];
// Live state during traversal, stored in registers.
int rayidx; // Ray index.
float origx, origy, origz; // Ray origin.
float dirx, diry, dirz; // Ray direction.
float tmin; // t-value from which the ray starts. Usually 0.
float idirx, idiry, idirz; // 1 / dir
float oodx, oody, oodz; // orig / dir
char* stackPtr; // Current position in traversal stack.
int leafAddr; // First postponed leaf, non-negative if none.
int nodeAddr; // Non-negative: current internal node, negative: second postponed leaf.
int hitIndex; // Triangle index of the closest intersection, -1 if none.
float hitT; // t-value of the closest intersection.
// Initialize.
{
// Pick ray index.
rayidx = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * (blockIdx.x + gridDim.x * blockIdx.y));
if (rayidx >= numRays)
return;
// Fetch ray.
float4 o = rays[rayidx * 2 + 0];
float4 d = rays[rayidx * 2 + 1];
origx = o.x, origy = o.y, origz = o.z;
dirx = d.x, diry = d.y, dirz = d.z;
tmin = o.w;
float ooeps = exp2f(-80.0f); // Avoid div by zero.
idirx = 1.0f / (fabsf(d.x) > ooeps ? d.x : copysignf(ooeps, d.x));
idiry = 1.0f / (fabsf(d.y) > ooeps ? d.y : copysignf(ooeps, d.y));
idirz = 1.0f / (fabsf(d.z) > ooeps ? d.z : copysignf(ooeps, d.z));
oodx = origx * idirx, oody = origy * idiry, oodz = origz * idirz;
// Setup traversal.
traversalStack[0] = EntrypointSentinel; // Bottom-most entry.
stackPtr = (char*)&traversalStack[0];
leafAddr = 0; // No postponed leaf.
nodeAddr = 0; // Start from the root.
hitIndex = -1; // No triangle intersected so far.
hitT = d.w; // tmax
}
// Traversal loop.
while (nodeAddr != EntrypointSentinel)
{
// Traverse internal nodes until all SIMD lanes have found a leaf.
bool searchingLeaf = true;
while (nodeAddr >= 0 && nodeAddr != EntrypointSentinel)
{
// Fetch AABBs of the two child nodes.
float4* ptr = (float4*)((char*)nodesA + nodeAddr);
float4 n0xy = ptr[0]; // (c0.lo.x, c0.hi.x, c0.lo.y, c0.hi.y)
float4 n1xy = ptr[1]; // (c1.lo.x, c1.hi.x, c1.lo.y, c1.hi.y)
float4 nz = ptr[2]; // (c0.lo.z, c0.hi.z, c1.lo.z, c1.hi.z)
// Intersect the ray against the child nodes.
float c0lox = n0xy.x * idirx - oodx;
float c0hix = n0xy.y * idirx - oodx;
float c0loy = n0xy.z * idiry - oody;
float c0hiy = n0xy.w * idiry - oody;
float c0loz = nz.x * idirz - oodz;
float c0hiz = nz.y * idirz - oodz;
float c1loz = nz.z * idirz - oodz;
float c1hiz = nz.w * idirz - oodz;
float c0min = spanBeginFermi(c0lox, c0hix, c0loy, c0hiy, c0loz, c0hiz, tmin);
float c0max = spanEndFermi (c0lox, c0hix, c0loy, c0hiy, c0loz, c0hiz, hitT);
float c1lox = n1xy.x * idirx - oodx;
float c1hix = n1xy.y * idirx - oodx;
float c1loy = n1xy.z * idiry - oody;
float c1hiy = n1xy.w * idiry - oody;
float c1min = spanBeginFermi(c1lox, c1hix, c1loy, c1hiy, c1loz, c1hiz, tmin);
float c1max = spanEndFermi (c1lox, c1hix, c1loy, c1hiy, c1loz, c1hiz, hitT);
bool traverseChild0 = (c0max >= c0min);
bool traverseChild1 = (c1max >= c1min);
// Neither child was intersected => pop stack.
if (!traverseChild0 && !traverseChild1)
{
nodeAddr = *(int*)stackPtr;
stackPtr -= 4;
}
// Otherwise => fetch child pointers.
else
{
int2 cnodes = *(int2*)&ptr[3];
nodeAddr = (traverseChild0) ? cnodes.x : cnodes.y;
// Both children were intersected => push the farther one.
if (traverseChild0 && traverseChild1)
{
if (c1min < c0min)
swap(nodeAddr, cnodes.y);
stackPtr += 4;
*(int*)stackPtr = cnodes.y;
}
}
// First leaf => postpone and continue traversal.
if (nodeAddr < 0 && leafAddr >= 0)
{
searchingLeaf = false;
leafAddr = nodeAddr;
nodeAddr = *(int*)stackPtr;
stackPtr -= 4;
}
// All SIMD lanes have found a leaf => process them.
if (!__any(searchingLeaf))
break;
}
// Process postponed leaf nodes.
while (leafAddr < 0)
{
// Intersect the ray against each triangle using Sven Woop's algorithm.
for (int triAddr = ~leafAddr;; triAddr += 3)
{
// Read first 16 bytes of the triangle.
// End marker (negative zero) => all triangles processed.
float4 v00 = tex1Dfetch(t_trisA, triAddr + 0);
if (__float_as_int(v00.x) == 0x80000000)
break;
// Compute and check intersection t-value.
float Oz = v00.w - origx*v00.x - origy*v00.y - origz*v00.z;
float invDz = 1.0f / (dirx*v00.x + diry*v00.y + dirz*v00.z);
float t = Oz * invDz;
if (t > tmin && t < hitT)
{
// Compute and check barycentric u.
float4 v11 = tex1Dfetch(t_trisA, triAddr + 1);
float Ox = v11.w + origx*v11.x + origy*v11.y + origz*v11.z;
float Dx = dirx*v11.x + diry*v11.y + dirz*v11.z;
float u = Ox + t*Dx;
if (u >= 0.0f && u <= 1.0f)
{
// Compute and check barycentric v.
float4 v22 = tex1Dfetch(t_trisA, triAddr + 2);
float Oy = v22.w + origx*v22.x + origy*v22.y + origz*v22.z;
float Dy = dirx*v22.x + diry*v22.y + dirz*v22.z;
float v = Oy + t*Dy;
if (v >= 0.0f && u + v <= 1.0f)
{
// Record intersection.
// Closest intersection not required => terminate.
hitT = t;
hitIndex = triAddr;
if (anyHit)
{
nodeAddr = EntrypointSentinel;
break;
}
}
}
}
} // triangle
// Another leaf was postponed => process it as well.
leafAddr = nodeAddr;
if(nodeAddr<0)
{
nodeAddr = *(int*)stackPtr;
stackPtr -= 4;
}
} // leaf
} // traversal
// Remap intersected triangle index, and store the result.
if (hitIndex != -1)
hitIndex = tex1Dfetch(t_triIndices, hitIndex);
STORE_RESULT(rayidx, hitIndex, hitT);
}
//------------------------------------------------------------------------
| cd257463f22c51fdbfe658187fd003c773fc1d51.cu | /*
* Copyright (c) 2009-2011, NVIDIA Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA Corporation nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
GF100-optimized variant of the "Speculative while-while"
kernel used in:
"Understanding the Efficiency of Ray Traversal on GPUs",
Timo Aila and Samuli Laine,
Proc. High-Performance Graphics 2009
*/
#include "CudaTracerKernels.hpp"
//------------------------------------------------------------------------
#define STACK_SIZE 64 // Size of the traversal stack in local memory.
//------------------------------------------------------------------------
extern "C" __global__ void queryConfig(void)
{
g_config.bvhLayout = BVHLayout_Compact;
g_config.blockWidth = 32; // One warp per row.
g_config.blockHeight = 4; // 4*32 = 128 threads, optimal for GTX480
}
//------------------------------------------------------------------------
TRACE_FUNC
{
// Traversal stack in CUDA thread-local memory.
int traversalStack[STACK_SIZE];
// Live state during traversal, stored in registers.
int rayidx; // Ray index.
float origx, origy, origz; // Ray origin.
float dirx, diry, dirz; // Ray direction.
float tmin; // t-value from which the ray starts. Usually 0.
float idirx, idiry, idirz; // 1 / dir
float oodx, oody, oodz; // orig / dir
char* stackPtr; // Current position in traversal stack.
int leafAddr; // First postponed leaf, non-negative if none.
int nodeAddr; // Non-negative: current internal node, negative: second postponed leaf.
int hitIndex; // Triangle index of the closest intersection, -1 if none.
float hitT; // t-value of the closest intersection.
// Initialize.
{
// Pick ray index.
rayidx = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * (blockIdx.x + gridDim.x * blockIdx.y));
if (rayidx >= numRays)
return;
// Fetch ray.
float4 o = rays[rayidx * 2 + 0];
float4 d = rays[rayidx * 2 + 1];
origx = o.x, origy = o.y, origz = o.z;
dirx = d.x, diry = d.y, dirz = d.z;
tmin = o.w;
float ooeps = exp2f(-80.0f); // Avoid div by zero.
idirx = 1.0f / (fabsf(d.x) > ooeps ? d.x : copysignf(ooeps, d.x));
idiry = 1.0f / (fabsf(d.y) > ooeps ? d.y : copysignf(ooeps, d.y));
idirz = 1.0f / (fabsf(d.z) > ooeps ? d.z : copysignf(ooeps, d.z));
oodx = origx * idirx, oody = origy * idiry, oodz = origz * idirz;
// Setup traversal.
traversalStack[0] = EntrypointSentinel; // Bottom-most entry.
stackPtr = (char*)&traversalStack[0];
leafAddr = 0; // No postponed leaf.
nodeAddr = 0; // Start from the root.
hitIndex = -1; // No triangle intersected so far.
hitT = d.w; // tmax
}
// Traversal loop.
while (nodeAddr != EntrypointSentinel)
{
// Traverse internal nodes until all SIMD lanes have found a leaf.
bool searchingLeaf = true;
while (nodeAddr >= 0 && nodeAddr != EntrypointSentinel)
{
// Fetch AABBs of the two child nodes.
float4* ptr = (float4*)((char*)nodesA + nodeAddr);
float4 n0xy = ptr[0]; // (c0.lo.x, c0.hi.x, c0.lo.y, c0.hi.y)
float4 n1xy = ptr[1]; // (c1.lo.x, c1.hi.x, c1.lo.y, c1.hi.y)
float4 nz = ptr[2]; // (c0.lo.z, c0.hi.z, c1.lo.z, c1.hi.z)
// Intersect the ray against the child nodes.
float c0lox = n0xy.x * idirx - oodx;
float c0hix = n0xy.y * idirx - oodx;
float c0loy = n0xy.z * idiry - oody;
float c0hiy = n0xy.w * idiry - oody;
float c0loz = nz.x * idirz - oodz;
float c0hiz = nz.y * idirz - oodz;
float c1loz = nz.z * idirz - oodz;
float c1hiz = nz.w * idirz - oodz;
float c0min = spanBeginFermi(c0lox, c0hix, c0loy, c0hiy, c0loz, c0hiz, tmin);
float c0max = spanEndFermi (c0lox, c0hix, c0loy, c0hiy, c0loz, c0hiz, hitT);
float c1lox = n1xy.x * idirx - oodx;
float c1hix = n1xy.y * idirx - oodx;
float c1loy = n1xy.z * idiry - oody;
float c1hiy = n1xy.w * idiry - oody;
float c1min = spanBeginFermi(c1lox, c1hix, c1loy, c1hiy, c1loz, c1hiz, tmin);
float c1max = spanEndFermi (c1lox, c1hix, c1loy, c1hiy, c1loz, c1hiz, hitT);
bool traverseChild0 = (c0max >= c0min);
bool traverseChild1 = (c1max >= c1min);
// Neither child was intersected => pop stack.
if (!traverseChild0 && !traverseChild1)
{
nodeAddr = *(int*)stackPtr;
stackPtr -= 4;
}
// Otherwise => fetch child pointers.
else
{
int2 cnodes = *(int2*)&ptr[3];
nodeAddr = (traverseChild0) ? cnodes.x : cnodes.y;
// Both children were intersected => push the farther one.
if (traverseChild0 && traverseChild1)
{
if (c1min < c0min)
swap(nodeAddr, cnodes.y);
stackPtr += 4;
*(int*)stackPtr = cnodes.y;
}
}
// First leaf => postpone and continue traversal.
if (nodeAddr < 0 && leafAddr >= 0)
{
searchingLeaf = false;
leafAddr = nodeAddr;
nodeAddr = *(int*)stackPtr;
stackPtr -= 4;
}
// All SIMD lanes have found a leaf => process them.
if (!__any(searchingLeaf))
break;
}
// Process postponed leaf nodes.
while (leafAddr < 0)
{
// Intersect the ray against each triangle using Sven Woop's algorithm.
for (int triAddr = ~leafAddr;; triAddr += 3)
{
// Read first 16 bytes of the triangle.
// End marker (negative zero) => all triangles processed.
float4 v00 = tex1Dfetch(t_trisA, triAddr + 0);
if (__float_as_int(v00.x) == 0x80000000)
break;
// Compute and check intersection t-value.
float Oz = v00.w - origx*v00.x - origy*v00.y - origz*v00.z;
float invDz = 1.0f / (dirx*v00.x + diry*v00.y + dirz*v00.z);
float t = Oz * invDz;
if (t > tmin && t < hitT)
{
// Compute and check barycentric u.
float4 v11 = tex1Dfetch(t_trisA, triAddr + 1);
float Ox = v11.w + origx*v11.x + origy*v11.y + origz*v11.z;
float Dx = dirx*v11.x + diry*v11.y + dirz*v11.z;
float u = Ox + t*Dx;
if (u >= 0.0f && u <= 1.0f)
{
// Compute and check barycentric v.
float4 v22 = tex1Dfetch(t_trisA, triAddr + 2);
float Oy = v22.w + origx*v22.x + origy*v22.y + origz*v22.z;
float Dy = dirx*v22.x + diry*v22.y + dirz*v22.z;
float v = Oy + t*Dy;
if (v >= 0.0f && u + v <= 1.0f)
{
// Record intersection.
// Closest intersection not required => terminate.
hitT = t;
hitIndex = triAddr;
if (anyHit)
{
nodeAddr = EntrypointSentinel;
break;
}
}
}
}
} // triangle
// Another leaf was postponed => process it as well.
leafAddr = nodeAddr;
if(nodeAddr<0)
{
nodeAddr = *(int*)stackPtr;
stackPtr -= 4;
}
} // leaf
} // traversal
// Remap intersected triangle index, and store the result.
if (hitIndex != -1)
hitIndex = tex1Dfetch(t_triIndices, hitIndex);
STORE_RESULT(rayidx, hitIndex, hitT);
}
//------------------------------------------------------------------------
|
a5bee16b0fe991c6860998ade904795ba3351136.hip | // !!! This is a file automatically generated by hipify!!!
#include <vector>
#include "caffe/layers/base_data_layer.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
/*template <typename Dtype>
void BasePrefetchingDataLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
if (prefetch_current_) {
prefetch_free_.push(prefetch_current_);
}
prefetch_current_ = prefetch_full_.pop("Waiting for data");
// Reshape to loaded data.
top[0]->ReshapeLike(prefetch_current_->data_);
top[0]->set_gpu_data(prefetch_current_->data_.mutable_gpu_data());
if (this->output_labels_) {
// Reshape to loaded labels.
top[1]->ReshapeLike(prefetch_current_->label_);
top[1]->set_gpu_data(prefetch_current_->label_.mutable_gpu_data());
}
}*/ // Zahid
template <typename Dtype>
void BasePrefetchingDataLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
Batch<Dtype>* batch = prefetch_full_.pop("Data layer prefetch queue empty");
// Reshape to loaded data.
top[0]->ReshapeLike(batch->data_);
// Copy the data
caffe_copy(batch->data_.count(), batch->data_.gpu_data(),
top[0]->mutable_gpu_data());
if (this->output_labels_) {
// Reshape to loaded labels.
top[1]->ReshapeLike(batch->label_);
// Copy the labels.
caffe_copy(batch->label_.count(), batch->label_.gpu_data(),
top[1]->mutable_gpu_data());
}
// Ensure the copy is synchronous wrt the host, so that the next batch isn't
// copied in meanwhile.
CUDA_CHECK(hipStreamSynchronize(hipStreamDefault));
prefetch_free_.push(batch);
}
INSTANTIATE_LAYER_GPU_FORWARD(BasePrefetchingDataLayer);
} // namespace caffe
| a5bee16b0fe991c6860998ade904795ba3351136.cu | #include <vector>
#include "caffe/layers/base_data_layer.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
/*template <typename Dtype>
void BasePrefetchingDataLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
if (prefetch_current_) {
prefetch_free_.push(prefetch_current_);
}
prefetch_current_ = prefetch_full_.pop("Waiting for data");
// Reshape to loaded data.
top[0]->ReshapeLike(prefetch_current_->data_);
top[0]->set_gpu_data(prefetch_current_->data_.mutable_gpu_data());
if (this->output_labels_) {
// Reshape to loaded labels.
top[1]->ReshapeLike(prefetch_current_->label_);
top[1]->set_gpu_data(prefetch_current_->label_.mutable_gpu_data());
}
}*/ // Zahid
template <typename Dtype>
void BasePrefetchingDataLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
Batch<Dtype>* batch = prefetch_full_.pop("Data layer prefetch queue empty");
// Reshape to loaded data.
top[0]->ReshapeLike(batch->data_);
// Copy the data
caffe_copy(batch->data_.count(), batch->data_.gpu_data(),
top[0]->mutable_gpu_data());
if (this->output_labels_) {
// Reshape to loaded labels.
top[1]->ReshapeLike(batch->label_);
// Copy the labels.
caffe_copy(batch->label_.count(), batch->label_.gpu_data(),
top[1]->mutable_gpu_data());
}
// Ensure the copy is synchronous wrt the host, so that the next batch isn't
// copied in meanwhile.
CUDA_CHECK(cudaStreamSynchronize(cudaStreamDefault));
prefetch_free_.push(batch);
}
INSTANTIATE_LAYER_GPU_FORWARD(BasePrefetchingDataLayer);
} // namespace caffe
|
f70d13e319724ed9cbcf5a1d971c6aff87fd1aac.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018 Preferred Networks, Inc. All rights reserved.
*/
#include <hip/hip_fp16.h>
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#include <NvInfer.h>
#pragma GCC diagnostic pop
namespace chainer_trt {
namespace plugin {
template <typename T>
__global__ void where_kernel(const T* __restrict__ a,
const T* __restrict__ b,
const T* __restrict__ c, T* __restrict__ dst,
const unsigned int n_in) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(n_in <= idx)
return;
const int batch = blockIdx.y;
dst[batch * n_in + idx] = (int)a[batch * n_in + idx]
? b[batch * n_in + idx]
: c[batch * n_in + idx];
}
template <typename T>
void apply_where(const T* a, const T* b, const T* c, T* dst, int n_in,
int batch_size, hipStream_t stream) {
const int block_size = 1024;
const int grid_size = (int)::ceil(1.0 * n_in / block_size);
dim3 grid(grid_size, batch_size);
hipLaunchKernelGGL(( where_kernel<T>), dim3(grid), dim3(block_size), 0, stream, a, b, c, dst, n_in);
}
template void apply_where(const float*, const float*, const float*, float*,
int, int, hipStream_t);
template void apply_where(const __half*, const __half*, const __half*,
__half*, int, int, hipStream_t);
}
}
| f70d13e319724ed9cbcf5a1d971c6aff87fd1aac.cu | /*
* Copyright (c) 2018 Preferred Networks, Inc. All rights reserved.
*/
#include <cuda_fp16.h>
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#include <NvInfer.h>
#pragma GCC diagnostic pop
namespace chainer_trt {
namespace plugin {
template <typename T>
__global__ void where_kernel(const T* __restrict__ a,
const T* __restrict__ b,
const T* __restrict__ c, T* __restrict__ dst,
const unsigned int n_in) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(n_in <= idx)
return;
const int batch = blockIdx.y;
dst[batch * n_in + idx] = (int)a[batch * n_in + idx]
? b[batch * n_in + idx]
: c[batch * n_in + idx];
}
template <typename T>
void apply_where(const T* a, const T* b, const T* c, T* dst, int n_in,
int batch_size, cudaStream_t stream) {
const int block_size = 1024;
const int grid_size = (int)std::ceil(1.0 * n_in / block_size);
dim3 grid(grid_size, batch_size);
where_kernel<T><<<grid, block_size, 0, stream>>>(a, b, c, dst, n_in);
}
template void apply_where(const float*, const float*, const float*, float*,
int, int, cudaStream_t);
template void apply_where(const __half*, const __half*, const __half*,
__half*, int, int, cudaStream_t);
}
}
|
84fec32e6a02c2b88ca5189bcfc58d36b41665b2.hip | // !!! This is a file automatically generated by hipify!!!
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include "grid_stride_range.hpp"
#include "execution.hpp"
#include "vector_traits.hpp"
#include "../cuda4dnn/csl/stream.hpp"
#include "../cuda4dnn/csl/span.hpp"
using namespace cv::dnn::cuda4dnn::csl;
using namespace cv::dnn::cuda4dnn::csl::device;
namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels {
namespace raw {
template <std::size_t N>
__global__ void fp32_to_fp16(Span<__half> output, View<float> input) {
using output_vector_type = get_vector_type_t<__half, N>;
using input_vector_type = get_vector_type_t<float, N>;
auto output_vPtr = output_vector_type::get_pointer(output.data());
auto input_vPtr = input_vector_type::get_pointer(input.data());
for (auto i : grid_stride_range(output.size() / output_vector_type::size())) {
input_vector_type in_vec;
v_load(in_vec, input_vPtr[i]);
output_vector_type out_vec;
for (int j = 0; j < output_vector_type::size(); j++)
out_vec.data[j] = __float2half(in_vec.data[j]);
v_store(output_vPtr[i], out_vec);
}
}
template <std::size_t N>
__global__ void fp16_to_fp32(Span<float> output, View<__half> input) {
using output_vector_type = get_vector_type_t<float, N>;
using input_vector_type = get_vector_type_t<__half, N>;
auto output_vPtr = output_vector_type::get_pointer(output.data());
auto input_vPtr = input_vector_type::get_pointer(input.data());
for (auto i : grid_stride_range(output.size() / output_vector_type::size())) {
input_vector_type in_vec;
v_load(in_vec, input_vPtr[i]);
output_vector_type out_vec;
for (int j = 0; j < output_vector_type::size(); j++)
out_vec.data[j] = __half2float(in_vec.data[j]);
v_store(output_vPtr[i], out_vec);
}
}
}
template <std::size_t N> static
void launch_vectorized_fp32_to_fp16(const Stream& stream, Span<__half> output, View<float> input) {
CV_Assert(is_fully_aligned<__half>(output, N));
CV_Assert(is_fully_aligned<float>(input, N));
auto kernel = raw::fp32_to_fp16<N>;
auto policy = make_policy(kernel, output.size() / N, 0, stream);
launch_kernel(kernel, policy, output, input);
}
void fp32_to_fp16(const Stream& stream, Span<__half> output, View<float> input) {
if (is_fully_aligned<__half>(output, 4) && is_fully_aligned<float>(input, 4)) {
launch_vectorized_fp32_to_fp16<4>(stream, output, input);
} else if (is_fully_aligned<__half>(output, 2) && is_fully_aligned<float>(input, 2)) {
launch_vectorized_fp32_to_fp16<2>(stream, output, input);
} else {
launch_vectorized_fp32_to_fp16<1>(stream, output, input);
}
}
template <std::size_t N> static
void launch_vectorized_fp16_to_fp32(const Stream& stream, Span<float> output, View<__half> input) {
CV_Assert(is_fully_aligned<float>(output, N));
CV_Assert(is_fully_aligned<__half>(input, N));
auto kernel = raw::fp16_to_fp32<N>;
auto policy = make_policy(kernel, output.size() / N, 0, stream);
launch_kernel(kernel, policy, output, input);
}
void fp16_to_fp32(const Stream& stream, Span<float> output, View<__half> input) {
if (is_fully_aligned<float>(output, 4) && is_fully_aligned<__half>(input, 4)) {
launch_vectorized_fp16_to_fp32<4>(stream, output, input);
} else if (is_fully_aligned<float>(output, 2) && is_fully_aligned<__half>(input, 2)) {
launch_vectorized_fp16_to_fp32<2>(stream, output, input);
} else {
launch_vectorized_fp16_to_fp32<1>(stream, output, input);
}
}
}}}} /* namespace cv::dnn::cuda4dnn::kernels */
| 84fec32e6a02c2b88ca5189bcfc58d36b41665b2.cu | // This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include "grid_stride_range.hpp"
#include "execution.hpp"
#include "vector_traits.hpp"
#include "../cuda4dnn/csl/stream.hpp"
#include "../cuda4dnn/csl/span.hpp"
using namespace cv::dnn::cuda4dnn::csl;
using namespace cv::dnn::cuda4dnn::csl::device;
namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels {
namespace raw {
template <std::size_t N>
__global__ void fp32_to_fp16(Span<__half> output, View<float> input) {
using output_vector_type = get_vector_type_t<__half, N>;
using input_vector_type = get_vector_type_t<float, N>;
auto output_vPtr = output_vector_type::get_pointer(output.data());
auto input_vPtr = input_vector_type::get_pointer(input.data());
for (auto i : grid_stride_range(output.size() / output_vector_type::size())) {
input_vector_type in_vec;
v_load(in_vec, input_vPtr[i]);
output_vector_type out_vec;
for (int j = 0; j < output_vector_type::size(); j++)
out_vec.data[j] = __float2half(in_vec.data[j]);
v_store(output_vPtr[i], out_vec);
}
}
template <std::size_t N>
__global__ void fp16_to_fp32(Span<float> output, View<__half> input) {
using output_vector_type = get_vector_type_t<float, N>;
using input_vector_type = get_vector_type_t<__half, N>;
auto output_vPtr = output_vector_type::get_pointer(output.data());
auto input_vPtr = input_vector_type::get_pointer(input.data());
for (auto i : grid_stride_range(output.size() / output_vector_type::size())) {
input_vector_type in_vec;
v_load(in_vec, input_vPtr[i]);
output_vector_type out_vec;
for (int j = 0; j < output_vector_type::size(); j++)
out_vec.data[j] = __half2float(in_vec.data[j]);
v_store(output_vPtr[i], out_vec);
}
}
}
template <std::size_t N> static
void launch_vectorized_fp32_to_fp16(const Stream& stream, Span<__half> output, View<float> input) {
CV_Assert(is_fully_aligned<__half>(output, N));
CV_Assert(is_fully_aligned<float>(input, N));
auto kernel = raw::fp32_to_fp16<N>;
auto policy = make_policy(kernel, output.size() / N, 0, stream);
launch_kernel(kernel, policy, output, input);
}
void fp32_to_fp16(const Stream& stream, Span<__half> output, View<float> input) {
if (is_fully_aligned<__half>(output, 4) && is_fully_aligned<float>(input, 4)) {
launch_vectorized_fp32_to_fp16<4>(stream, output, input);
} else if (is_fully_aligned<__half>(output, 2) && is_fully_aligned<float>(input, 2)) {
launch_vectorized_fp32_to_fp16<2>(stream, output, input);
} else {
launch_vectorized_fp32_to_fp16<1>(stream, output, input);
}
}
template <std::size_t N> static
void launch_vectorized_fp16_to_fp32(const Stream& stream, Span<float> output, View<__half> input) {
CV_Assert(is_fully_aligned<float>(output, N));
CV_Assert(is_fully_aligned<__half>(input, N));
auto kernel = raw::fp16_to_fp32<N>;
auto policy = make_policy(kernel, output.size() / N, 0, stream);
launch_kernel(kernel, policy, output, input);
}
void fp16_to_fp32(const Stream& stream, Span<float> output, View<__half> input) {
if (is_fully_aligned<float>(output, 4) && is_fully_aligned<__half>(input, 4)) {
launch_vectorized_fp16_to_fp32<4>(stream, output, input);
} else if (is_fully_aligned<float>(output, 2) && is_fully_aligned<__half>(input, 2)) {
launch_vectorized_fp16_to_fp32<2>(stream, output, input);
} else {
launch_vectorized_fp16_to_fp32<1>(stream, output, input);
}
}
}}}} /* namespace cv::dnn::cuda4dnn::kernels */
|
c437dcfaa574fad0071bf6d34b16b0ebe6c7f51e.hip | // !!! This is a file automatically generated by hipify!!!
/*
*This puppy is gonna go up on github since I'll be using
*it in my next project so before I forget...
*
*(c) Zachary Job
*All rights reserved, I am not liable for damages.
*Re-distributable and use with my permission contact me
*at [email protected]
*
*Presentable without modification including comments
*for educational purposes
*
*waterfall_tokenize.cu
*4/15/2015
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <pthread.h>
#include <sys/time.h>
#include "Definitions/defs.h"
#include "Definitions/cpuCall.h"
#include "Definitions/gpuCall.h"
#include "settings.h"
/*
* Executes a tokenize system using the exclusion parser
*
* Designed to coalesce and avoid all control statements.
* This allows for lots of math tweaks to have a 0 divergence
* kernel.
*
*
* U S I N G A N A I V E F I L E B U F F E R... will be fixed
*/
int main(int argc, char **argv)
{
//V A R I A B L E S//////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
char
*data, *cfg,
// THIS IS A SUPER NAIVE FIX, CHUNKS ARE NEEDED
*fileBuffer;
int
histBytes, feedBytes,
optCmp, pages,
*histogram, *feedbuffers[(HST_THRD << 1)],
hostThreads,
*memMapS, *dirtyBits, *memOffset,
*memMapE, *trimArr,
i;
long
*arg;
struct tags_t
*configTags;
pthread_t
threads[HST_THRD];
struct timeval
start, end;
FILE
*output;
//C O N F I G U R A T I O N A N D I N S T A T A N T I A T I O N//////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
if(argc < 5)
{
fprintf(stderr, "usage: ./program_name\nREQUIRED ARGS\n"
"FILE 1: Search configuration [search.cfg]\n"
"FILE 2: The tagged data to process [someData.type]\n"
"3: Task compute configuration, pre-compute,\n"
"or run with key pre-computation\n"
"filter [0:2]\n"
"EX) im_the_settings.cfg im_the_big_data.type 0\n"
"4: Container tags to be processed in a document EG 7000\n");
return -1;
}
// Get the arguments
optCmp = atoi(argv[3]);
pages = atoi(argv[4]);
data = strdup(argv[2]);
cfg = strdup(argv[1]);
if(optCmp < 0 || optCmp > 2)
{
fprintf(stderr,"Invalid compute option, run ./program_name for usage\n");
return 2;
}
if(pages < 1)
{
fprintf(stderr,"Insufficient page count, run ./program_name for usage\n");
return 3;
}
// General definitions
hostThreads = HST_THRD;
histBytes = sizeof(int) * HIST_SZ;
feedBytes = sizeof(int) * FEED_BY;
// Page locked histogram for access by CPU and GPU (unified memory)
hipHostMalloc((void**)&histogram, histBytes);
// if GPU memory allocation failed, report an error message
if(!histogram)
fprintf(stderr, "CudaMalloc host reports failure\n"), exit(-7);
memset(histogram, 0, histBytes);
for(i = 0; i < (HST_THRD << 1); i+=2)
{
hipHostMalloc((void**)&feedbuffers[i], feedBytes);
hipHostMalloc((void**)&feedbuffers[i + 1], feedBytes);
if(!feedbuffers[i] || !feedbuffers[i + 1])
fprintf(stderr, "CudaMalloc host reports failure\n"), exit(-7);
}
// init the parser variables
cfgInit(cfg, &memMapS, &memMapE, &dirtyBits,
&memOffset, optCmp, &configTags, &trimArr);
// Enter if not precompute
if(optCmp != 1)
{
//H O S T T H R E A D I N G////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
// Open the file to be parsed
initThreadData(memMapS, memMapE, dirtyBits, memOffset,
trimArr, configTags, &fileBuffer, data);
fprintf(stderr, "Start GPU Compute\n");
gettimeofday(&start, NULL);
// Launch threads to co-process and batch information to the GPU
while(hostThreads > 0)
{
arg = (long *)malloc(sizeof(long *) * 5);
if(!arg)
fprintf(stderr, "Malloc reports failure\n"), exit(-7);
hostThreads--;
arg[0] = (long)(HST_THRD - hostThreads);
arg[1] = (long)histogram;
arg[2] = (long)pages;
arg[3] = (long)feedbuffers[hostThreads << 1];
arg[4] = (long)feedbuffers[(hostThreads << 1) + 1];
if(pthread_create(&threads[hostThreads],NULL,fBThreadMgr,(void *)arg) != 0)
fprintf(stderr, "Host thread could not start\n"), exit(1);
}
// Reset
hostThreads = HST_THRD;
// Post join the threads for a clean death, a soldiers death
while(hostThreads == 8)
{
if(pthread_join(threads[--hostThreads],NULL) != 0)
fprintf(stderr, "Host thread could not join\n"), exit(1);
}
gettimeofday(&end, NULL);
// Everyone likes microsecondss
fprintf(stderr, "TIME(us)::%ld\nEnd GPU Compute\n\n",
(end.tv_sec * 1000000 + end.tv_usec) -
(start.tv_sec * 1000000 + start.tv_usec));
// Open the file to write the histogram results to
if((output = fopen(OUTPUT_HIST,"w")) == NULL)
fprintf(stderr, "Dependency failure, %s could not open\n", OUTPUT_HIST), exit(24);
// Write results
for(i = 0; i < HIST_SZ; i++)
if(histogram[i] != 0) fprintf(output, "%d,%d\n", i, histogram[i]);
//C L E A N U P//////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
free(memMapS);
free(memMapE);
free(memOffset);
free(dirtyBits);
free(configTags);
free(trimArr);
free(data);
free(cfg);
free(fileBuffer);
// free feedbuffers for all the streams
for(i = 0; i < (HST_THRD << 1); i+=2)
{
hipHostFree(feedbuffers[i]);
hipHostFree(feedbuffers[i + 1]);
}
}
hipHostFree(histogram);
return 0;
} | c437dcfaa574fad0071bf6d34b16b0ebe6c7f51e.cu | /*
*This puppy is gonna go up on github since I'll be using
*it in my next project so before I forget...
*
*(c) Zachary Job
*All rights reserved, I am not liable for damages.
*Re-distributable and use with my permission contact me
*at [email protected]
*
*Presentable without modification including comments
*for educational purposes
*
*waterfall_tokenize.cu
*4/15/2015
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <pthread.h>
#include <sys/time.h>
#include "Definitions/defs.h"
#include "Definitions/cpuCall.h"
#include "Definitions/gpuCall.h"
#include "settings.h"
/*
* Executes a tokenize system using the exclusion parser
*
* Designed to coalesce and avoid all control statements.
* This allows for lots of math tweaks to have a 0 divergence
* kernel.
*
*
* U S I N G A N A I V E F I L E B U F F E R... will be fixed
*/
int main(int argc, char **argv)
{
//V A R I A B L E S//////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
char
*data, *cfg,
// THIS IS A SUPER NAIVE FIX, CHUNKS ARE NEEDED
*fileBuffer;
int
histBytes, feedBytes,
optCmp, pages,
*histogram, *feedbuffers[(HST_THRD << 1)],
hostThreads,
*memMapS, *dirtyBits, *memOffset,
*memMapE, *trimArr,
i;
long
*arg;
struct tags_t
*configTags;
pthread_t
threads[HST_THRD];
struct timeval
start, end;
FILE
*output;
//C O N F I G U R A T I O N A N D I N S T A T A N T I A T I O N//////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
if(argc < 5)
{
fprintf(stderr, "usage: ./program_name\nREQUIRED ARGS\n"
"FILE 1: Search configuration [search.cfg]\n"
"FILE 2: The tagged data to process [someData.type]\n"
"3: Task compute configuration, pre-compute,\n"
"or run with key pre-computation\n"
"filter [0:2]\n"
"EX) im_the_settings.cfg im_the_big_data.type 0\n"
"4: Container tags to be processed in a document EG 7000\n");
return -1;
}
// Get the arguments
optCmp = atoi(argv[3]);
pages = atoi(argv[4]);
data = strdup(argv[2]);
cfg = strdup(argv[1]);
if(optCmp < 0 || optCmp > 2)
{
fprintf(stderr,"Invalid compute option, run ./program_name for usage\n");
return 2;
}
if(pages < 1)
{
fprintf(stderr,"Insufficient page count, run ./program_name for usage\n");
return 3;
}
// General definitions
hostThreads = HST_THRD;
histBytes = sizeof(int) * HIST_SZ;
feedBytes = sizeof(int) * FEED_BY;
// Page locked histogram for access by CPU and GPU (unified memory)
cudaMallocHost((void**)&histogram, histBytes);
// if GPU memory allocation failed, report an error message
if(!histogram)
fprintf(stderr, "CudaMalloc host reports failure\n"), exit(-7);
memset(histogram, 0, histBytes);
for(i = 0; i < (HST_THRD << 1); i+=2)
{
cudaMallocHost((void**)&feedbuffers[i], feedBytes);
cudaMallocHost((void**)&feedbuffers[i + 1], feedBytes);
if(!feedbuffers[i] || !feedbuffers[i + 1])
fprintf(stderr, "CudaMalloc host reports failure\n"), exit(-7);
}
// init the parser variables
cfgInit(cfg, &memMapS, &memMapE, &dirtyBits,
&memOffset, optCmp, &configTags, &trimArr);
// Enter if not precompute
if(optCmp != 1)
{
//H O S T T H R E A D I N G////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
// Open the file to be parsed
initThreadData(memMapS, memMapE, dirtyBits, memOffset,
trimArr, configTags, &fileBuffer, data);
fprintf(stderr, "Start GPU Compute\n");
gettimeofday(&start, NULL);
// Launch threads to co-process and batch information to the GPU
while(hostThreads > 0)
{
arg = (long *)malloc(sizeof(long *) * 5);
if(!arg)
fprintf(stderr, "Malloc reports failure\n"), exit(-7);
hostThreads--;
arg[0] = (long)(HST_THRD - hostThreads);
arg[1] = (long)histogram;
arg[2] = (long)pages;
arg[3] = (long)feedbuffers[hostThreads << 1];
arg[4] = (long)feedbuffers[(hostThreads << 1) + 1];
if(pthread_create(&threads[hostThreads],NULL,fBThreadMgr,(void *)arg) != 0)
fprintf(stderr, "Host thread could not start\n"), exit(1);
}
// Reset
hostThreads = HST_THRD;
// Post join the threads for a clean death, a soldiers death
while(hostThreads == 8)
{
if(pthread_join(threads[--hostThreads],NULL) != 0)
fprintf(stderr, "Host thread could not join\n"), exit(1);
}
gettimeofday(&end, NULL);
// Everyone likes microsecondss
fprintf(stderr, "TIME(us)::%ld\nEnd GPU Compute\n\n",
(end.tv_sec * 1000000 + end.tv_usec) -
(start.tv_sec * 1000000 + start.tv_usec));
// Open the file to write the histogram results to
if((output = fopen(OUTPUT_HIST,"w")) == NULL)
fprintf(stderr, "Dependency failure, %s could not open\n", OUTPUT_HIST), exit(24);
// Write results
for(i = 0; i < HIST_SZ; i++)
if(histogram[i] != 0) fprintf(output, "%d,%d\n", i, histogram[i]);
//C L E A N U P//////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////
free(memMapS);
free(memMapE);
free(memOffset);
free(dirtyBits);
free(configTags);
free(trimArr);
free(data);
free(cfg);
free(fileBuffer);
// free feedbuffers for all the streams
for(i = 0; i < (HST_THRD << 1); i+=2)
{
cudaFreeHost(feedbuffers[i]);
cudaFreeHost(feedbuffers[i + 1]);
}
}
cudaFreeHost(histogram);
return 0;
} |
1db9957f98ec008e42c878d5499d6a5c23aac070.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// CUDA kernel to pause for at least num_cycle cycles
__global__ void sleep(int64_t *completed_cycles, int64_t requested_cycles)
{
completed_cycles[0] = 0;
int64_t start = clock64();
while(completed_cycles[0] < requested_cycles) {
completed_cycles[0] = clock64() - start;
}
}
extern "C" void allocate_mem(int64_t **device_value)
{
gpuErrchk( hipMalloc((void**)device_value, sizeof(int64_t)) );
}
extern "C" void copy_mem(int64_t *host_value, int64_t *device_value)
{
gpuErrchk( hipMemcpy(host_value, device_value, sizeof(int64_t), hipMemcpyDeviceToHost) );
}
// Returns number of cycles required for requested seconds
extern "C" int64_t get_cycles(float seconds)
{
// Get device frequency in KHz
int64_t Hz;
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
Hz = int64_t(prop.clockRate) * 1000;
// Calculate number of cycles to wait
int64_t num_cycles;
num_cycles = (int64_t)(seconds * Hz);
return num_cycles;
}
// Launches a kernel that sleeps for num_cycles
extern "C" void sleep_kernel(int64_t *completed_cycles, int64_t requested_cycles)
{
// Our kernel will launch a single thread to sleep the kernel
int blockSize, gridSize;
blockSize = 1;
gridSize = 1;
// Execute the kernel in default stream
hipLaunchKernelGGL(( sleep), dim3(gridSize), dim3(blockSize) , 0, 0, completed_cycles, requested_cycles);
}
// Wait for all work to complete
extern "C" void wait_for_gpu()
{
hipDeviceSynchronize();
}
| 1db9957f98ec008e42c878d5499d6a5c23aac070.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// CUDA kernel to pause for at least num_cycle cycles
__global__ void sleep(int64_t *completed_cycles, int64_t requested_cycles)
{
completed_cycles[0] = 0;
int64_t start = clock64();
while(completed_cycles[0] < requested_cycles) {
completed_cycles[0] = clock64() - start;
}
}
extern "C" void allocate_mem(int64_t **device_value)
{
gpuErrchk( cudaMalloc((void**)device_value, sizeof(int64_t)) );
}
extern "C" void copy_mem(int64_t *host_value, int64_t *device_value)
{
gpuErrchk( cudaMemcpy(host_value, device_value, sizeof(int64_t), cudaMemcpyDeviceToHost) );
}
// Returns number of cycles required for requested seconds
extern "C" int64_t get_cycles(float seconds)
{
// Get device frequency in KHz
int64_t Hz;
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
Hz = int64_t(prop.clockRate) * 1000;
// Calculate number of cycles to wait
int64_t num_cycles;
num_cycles = (int64_t)(seconds * Hz);
return num_cycles;
}
// Launches a kernel that sleeps for num_cycles
extern "C" void sleep_kernel(int64_t *completed_cycles, int64_t requested_cycles)
{
// Our kernel will launch a single thread to sleep the kernel
int blockSize, gridSize;
blockSize = 1;
gridSize = 1;
// Execute the kernel in default stream
sleep<<< gridSize, blockSize >>>(completed_cycles, requested_cycles);
}
// Wait for all work to complete
extern "C" void wait_for_gpu()
{
cudaDeviceSynchronize();
}
|
10636f4bec28f75ebe801e351f45766d2edc4459.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2019, ByteDance CORPORATION. All rights reserved.
#include <hip/hip_runtime.h>
#include <unistd.h>
#include <string>
#include "model/decoder.h"
#include "model/encoder.h"
#include "model_config.pb.h"
#include "proto/transformer_weight.h"
#include "server/custom.h"
#include "server/model_config.h"
#include "server/model_config_cuda.h"
#include "tools/util.h"
/**
@file
Transformer server based on tensorrt inference server.
*/
#define LOG_ERROR std::cerr
#define LOG_INFO std::cout
#ifdef FP16_MODE
const lightseq::cuda::OperationType OPTYPE =
lightseq::cuda::OperationType::FP16;
#else
const lightseq::cuda::OperationType OPTYPE =
lightseq::cuda::OperationType::FP32;
#endif
namespace nvidia {
namespace inferenceserver {
namespace custom {
namespace transformer {
// Integer error codes. TRTIS requires that success must be 0. All
// other codes are interpreted by TRTIS as failures.
enum ErrorCodes {
kSuccess,
kUnknown,
kInvalidModelConfig,
kGpuNotSupported,
kInputOutputShape,
kInputName,
kOutputName,
kInputOutputDataType,
kInputContents,
kInputSize,
kOutputBuffer,
kCudaDevice,
kCudaMalloc,
kCudaMemcpy,
kCudaExecute,
kCudaStream,
kCublas,
kCpuExecute,
kWeightLoad,
kModelSize
};
// Context object. All state must be kept in this object.
class Context {
public:
Context(const std::string& instance_name, const ModelConfig& config,
const int gpu_device);
~Context();
// Initialize the context. Validate that the model configuration,
// etc. is something that we can handle.
int Init();
// Perform custom execution on the payloads.
int Execute(const uint32_t payload_cnt, CustomPayload* payloads,
CustomGetNextInputFn_t input_fn, CustomGetOutputFn_t output_fn);
private:
typedef lightseq::cuda::OperationTypeTraits<OPTYPE> _optraits;
int FreeCudaBuffers();
int AllocateCudaBuffers(void** pdata, size_t byte_size);
int GetInputTensorGPU(CustomGetNextInputFn_t input_fn, void* input_context,
const char* name, const size_t expected_byte_size,
void* input);
int ExecuteGPU(const uint32_t payload_cnt, CustomPayload* payloads,
CustomGetNextInputFn_t input_fn,
CustomGetOutputFn_t output_fn);
// The name of this instance of the backend.
const std::string instance_name_;
// The model configuration.
const ModelConfig model_config_;
// The GPU device ID to execute on or CUSTOM_NO_GPU_DEVICE if should
// execute on CPU.
const int gpu_device_;
// The data-type of the input and output tensors. Must be either
// INT32 or FP32.
DataType datatype_;
int datatype_bytesize_;
// CUDA memory buffers for input and output tensors.
void* d_input_;
void* d_padding_mask_;
void* d_encoder_output_;
void* d_buf_;
void* d_output_;
// The contexts executing on a GPU, the CUDA stream to use for the
// execution.
hipStream_t stream_;
hipblasHandle_t hd_;
lightseq::cuda::TransformerWeight<OPTYPE> tw_;
std::shared_ptr<lightseq::cuda::Decoder<OPTYPE>> decoder_;
std::shared_ptr<lightseq::cuda::Encoder<OPTYPE>> encoder_;
};
Context::Context(const std::string& instance_name,
const ModelConfig& model_config, const int gpu_device)
: instance_name_(instance_name),
model_config_(model_config),
gpu_device_(gpu_device),
datatype_(DataType::TYPE_INVALID),
d_input_(nullptr),
d_padding_mask_(nullptr),
d_encoder_output_(nullptr),
d_buf_(nullptr),
d_output_(nullptr),
stream_(nullptr),
hd_(nullptr) {}
Context::~Context() {
FreeCudaBuffers();
if (hd_ != nullptr) {
hipblasStatus_t cuerr = hipblasDestroy(hd_);
if (cuerr != HIPBLAS_STATUS_SUCCESS) {
LOG_ERROR << "Failed to destroy cublas handle.";
}
hd_ = nullptr;
}
if (stream_ != nullptr) {
hipError_t cuerr = hipStreamDestroy(stream_);
if (cuerr != hipSuccess) {
LOG_ERROR << "Failed to destroy cuda stream: "
<< hipGetErrorString(cuerr);
}
stream_ = nullptr;
}
}
int Context::FreeCudaBuffers() {
if (d_input_ != nullptr) {
hipError_t cuerr = hipFree(d_input_);
if (cuerr != hipSuccess) {
LOG_ERROR << "Failed to free cuda memory: " << hipGetErrorString(cuerr);
}
d_input_ = nullptr;
}
if (d_padding_mask_ != nullptr) {
hipError_t cuerr = hipFree(d_padding_mask_);
if (cuerr != hipSuccess) {
LOG_ERROR << "Failed to free cuda memory: " << hipGetErrorString(cuerr);
}
d_padding_mask_ = nullptr;
}
if (d_encoder_output_ != nullptr) {
hipError_t cuerr = hipFree(d_encoder_output_);
if (cuerr != hipSuccess) {
LOG_ERROR << "Failed to free cuda memory: " << hipGetErrorString(cuerr);
}
d_encoder_output_ = nullptr;
}
if (d_buf_ != nullptr) {
hipError_t cuerr = hipFree(d_buf_);
if (cuerr != hipSuccess) {
LOG_ERROR << "Failed to free cuda memory: " << hipGetErrorString(cuerr);
}
d_buf_ = nullptr;
}
if (d_output_ != nullptr) {
hipError_t cuerr = hipFree(d_output_);
if (cuerr != hipSuccess) {
LOG_ERROR << "Failed to free cuda memory: " << hipGetErrorString(cuerr);
}
d_output_ = nullptr;
}
return kSuccess;
}
int Context::AllocateCudaBuffers(void** pdata, size_t byte_size) {
// Allocate GPU memory buffers large enough for each input and
// output. For performance we allocate once during initialization
// instead of doing it each time we execute.
if (*pdata != nullptr) {
LOG_ERROR << "given pointer own gpu memory before allocate" << std::endl;
return kCudaMalloc;
}
hipError_t cuerr = hipMalloc(pdata, byte_size);
if (cuerr != hipSuccess) {
LOG_ERROR << "unable to allocate memory in function AllocateCudaBuffers"
<< hipGetErrorString(cuerr);
return kCudaMalloc;
}
cuerr = hipStreamSynchronize(stream_);
if (cuerr != hipSuccess) {
LOG_ERROR << "Stream synchronize failed after hipMalloc"
<< hipGetErrorString(cuerr) << std::endl;
return kCudaMalloc;
}
return kSuccess;
}
int Context::Init() {
// Very important to set the CUDA device before performing any
// CUDA API calls. The device is maintained per-CPU-thread, and
// the same CPU thread will always be used with this instance of
// the backend, so only need to set the device once.
LOG_INFO << "Trtis instance init start" << std::endl;
hipError_t cuerr = hipSetDevice(gpu_device_);
if (cuerr != hipSuccess) {
LOG_ERROR << "Failed to set CUDA device to " << gpu_device_ << ": "
<< hipGetErrorString(cuerr);
return kCudaDevice;
}
const int cuda_stream_priority =
GetCudaStreamPriority(model_config_.optimization().priority());
cuerr = hipStreamCreateWithPriority(&stream_, hipStreamDefault,
cuda_stream_priority);
if (cuerr != hipSuccess) {
LOG_ERROR << "Unable to create stream" << hipGetErrorString(cuerr);
return kCudaStream;
}
hipblasStatus_t cublaserr = hipblasCreate(&hd_);
if (cublaserr != HIPBLAS_STATUS_SUCCESS) {
LOG_ERROR << "Failed to creat cublas handle";
return kCublas;
}
cublaserr = hipblasSetStream(hd_, stream_);
if (cublaserr != HIPBLAS_STATUS_SUCCESS) {
LOG_ERROR << "Failed to set stream for cublas handle";
return kCublas;
}
if (model_config_.input_size() != 1) {
return kInputOutputShape;
}
datatype_ = model_config_.input(0).data_type();
if (datatype_ != DataType::TYPE_INT32) {
return kInputOutputDataType;
}
datatype_bytesize_ = GetDataTypeByteSize(datatype_);
if (model_config_.input(0).name() != "src_ids:0") {
return kInputName;
}
if (model_config_.output_size() != 1) {
return kInputOutputShape;
}
if (model_config_.output(0).data_type() != datatype_) {
return kInputOutputDataType;
}
if (model_config_.output(0).name() != "trg_ids:0") {
return kOutputName;
}
char* mz = getenv("MODEL_ZOO");
if (mz == NULL) {
LOG_ERROR << "plz set environment variable MODEL_ZOO !" << std::endl;
return kWeightLoad;
}
std::string model_path = mz;
model_path += "/" + model_config_.name();
std::string res =
"load model weight from " + model_path + "/transformer.pb\n";
LOG_INFO << res;
res = tw_.initializing(model_path + "/transformer.pb");
if (!res.empty()) {
LOG_ERROR << res << std::endl;
return kWeightLoad;
}
int max_batch_size = model_config_.max_batch_size();
int err;
err = AllocateCudaBuffers(
&d_input_, max_batch_size * tw_._max_step * datatype_bytesize_);
if (err != kSuccess) {
return err;
}
err = AllocateCudaBuffers(
&d_padding_mask_, max_batch_size * tw_._max_step * datatype_bytesize_);
if (err != kSuccess) {
return err;
}
// FIXME
err = AllocateCudaBuffers(
&d_encoder_output_,
max_batch_size * tw_._max_step * tw_._hidden_size * datatype_bytesize_);
if (err != kSuccess) {
return err;
}
err = AllocateCudaBuffers(
&d_output_, max_batch_size * tw_._max_step * datatype_bytesize_);
if (err != kSuccess) {
return err;
}
encoder_ = std::make_shared<lightseq::cuda::Encoder<OPTYPE>>(
max_batch_size, reinterpret_cast<int*>(d_input_),
reinterpret_cast<int*>(d_padding_mask_),
reinterpret_cast<_optraits::DataType*>(d_encoder_output_), tw_, stream_,
hd_);
res = encoder_->check();
if (!res.empty()) {
LOG_ERROR << res << std::endl;
return kModelSize;
}
decoder_ = std::make_shared<lightseq::cuda::Decoder<OPTYPE>>(
max_batch_size, reinterpret_cast<int*>(d_padding_mask_),
reinterpret_cast<_optraits::DataType*>(d_encoder_output_),
reinterpret_cast<int*>(d_output_), tw_, stream_, hd_,
false, reinterpret_cast<int*>(d_input_));
res = decoder_->check();
if (!res.empty()) {
LOG_ERROR << res << std::endl;
return kModelSize;
}
long buf_bytesize = max(encoder_->compute_buffer_bytesize(),
decoder_->compute_buffer_bytesize());
err = AllocateCudaBuffers(&d_buf_, buf_bytesize);
if (err != kSuccess) {
return err;
}
// encoder and decoder use the same buffer to save gpu memory useage
encoder_->init_buffer(d_buf_);
decoder_->init_buffer(d_buf_);
// Wait for all init finish.
cuerr = hipStreamSynchronize(stream_);
if (cuerr != hipSuccess) {
LOG_ERROR << "failed to init GPU for transformer: "
<< hipGetErrorString(cuerr) << std::endl;
return kCudaExecute;
}
LOG_INFO << "transformer, release-version[" << __DATE__ << " " << __TIME__
<< "], Trtis instance init succeed!" << std::endl;
return kSuccess;
}
int Context::GetInputTensorGPU(CustomGetNextInputFn_t input_fn,
void* input_context, const char* name,
const size_t expected_byte_size, void* input) {
// The values for an input tensor are not necessarily in one
// contiguous chunk, so we copy the chunks into 'input', which
// points to CUDA memory.
uint64_t total_content_byte_size = 0;
while (true) {
const void* content;
uint64_t content_byte_size = expected_byte_size;
if (!input_fn(input_context, name, &content, &content_byte_size)) {
return kInputContents;
}
// If 'content' returns nullptr we have all the input.
if (content == nullptr) {
break;
}
// If the total amount of content received exceeds what we expect
// then something is wrong.
if ((total_content_byte_size + content_byte_size) > expected_byte_size) {
return kInputSize;
}
hipError_t cuerr = hipMemcpyAsync(
reinterpret_cast<char*>(input) + total_content_byte_size, content,
content_byte_size, hipMemcpyHostToDevice, stream_);
if (cuerr != hipSuccess) {
LOG_ERROR << "failed to copy input values to GPU for transformer: "
<< hipGetErrorString(cuerr) << std::endl;
LOG_ERROR << "try to copy " << total_content_byte_size + content_byte_size
<< " bytes from input" << std::endl;
return kCudaMemcpy;
}
total_content_byte_size += content_byte_size;
}
// Make sure we end up with exactly the amount of input we expect.
if (total_content_byte_size != expected_byte_size) {
return kInputSize;
}
return kSuccess;
}
int Context::ExecuteGPU(const uint32_t payload_cnt, CustomPayload* payloads,
CustomGetNextInputFn_t input_fn,
CustomGetOutputFn_t output_fn) {
// Each payload represents a related set of inputs and required
// outputs. Each payload may have a different batch size. The total
// batch-size of all payloads will not exceed the max-batch-size
// specified in the model configuration.
if (payload_cnt == 0) {
return kSuccess;
}
std::vector<int64_t> shape(
payloads[0].input_shape_dims[0],
payloads[0].input_shape_dims[0] + payloads[0].input_shape_dim_cnts[0]);
int err;
for (uint32_t pidx = 0; pidx < payload_cnt; ++pidx) {
CustomPayload& payload = payloads[pidx];
// For this payload the expected size of the input and output
// tensors is determined by the batch-size of this payload.
const uint64_t batch_seq_len = payload.input_shape_dims[0][0];
if (batch_seq_len > tw_._max_step) {
LOG_ERROR << "too long seq_len: " << batch_seq_len
<< ", skip this request" << std::endl;
return kInputSize;
}
const uint64_t batchn_element_count = payload.batch_size * batch_seq_len;
const uint64_t batchn_byte_size = batchn_element_count * datatype_bytesize_;
// Copy the input tensors into the appropriate CUDA memory buffer.
err = GetInputTensorGPU(input_fn, payload.input_context, "src_ids:0",
batchn_byte_size, d_input_);
if (err != kSuccess) {
payload.error_code = err;
continue;
}
encoder_->run_one_infer(payload.batch_size, batch_seq_len);
decoder_->run_one_infer(payload.batch_size, batch_seq_len);
// The output shape is [payload-batch-size, shape] if the model
// configuration supports batching, or just [shape] if the
// model configuration does not support batching.
std::vector<int64_t> output_shape = {payload.batch_size,
decoder_->_cur_step + 1};
int64_t output_bytesize =
output_shape[0] * output_shape[1] * datatype_bytesize_;
const char* output_name = "trg_ids:0";
void* obuffer;
if (!output_fn(payload.output_context, output_name, output_shape.size(),
&output_shape[0], output_bytesize, &obuffer)) {
payload.error_code = kOutputBuffer;
break;
}
// If no error but the 'obuffer' is returned as nullptr, then
// skip writing this output.
if (obuffer == nullptr) {
continue;
}
hipError_t cuerr = hipGetLastError();
if (cuerr != hipSuccess) {
LOG_ERROR << "failed to launch kernel: " << hipGetErrorString(cuerr)
<< std::endl;
payload.error_code = kCudaExecute;
break;
}
cuerr = hipMemcpyAsync(obuffer, d_output_, output_bytesize,
hipMemcpyDeviceToHost, stream_);
if (cuerr != hipSuccess) {
LOG_ERROR << "failed to copy output values from GPU for transformer: "
<< hipGetErrorString(cuerr) << std::endl;
payload.error_code = kCudaMemcpy;
break;
}
}
// Wait for all compute and memcpy to complete.
hipError_t cuerr = hipStreamSynchronize(stream_);
if (cuerr != hipSuccess) {
LOG_ERROR << "failed to synchronize GPU for transformer: "
<< hipGetErrorString(cuerr) << std::endl;
return kCudaExecute;
}
return kSuccess;
}
int Context::Execute(const uint32_t payload_cnt, CustomPayload* payloads,
CustomGetNextInputFn_t input_fn,
CustomGetOutputFn_t output_fn) {
if (gpu_device_ == CUSTOM_NO_GPU_DEVICE) {
return kCpuExecute;
} else {
return ExecuteGPU(payload_cnt, payloads, input_fn, output_fn);
}
}
/////////////
extern "C" {
int CustomInitialize(const CustomInitializeData* data, void** custom_context) {
// Convert the serialized model config to a ModelConfig object.
ModelConfig model_config;
if (!model_config.ParseFromString(std::string(
data->serialized_model_config, data->serialized_model_config_size))) {
return kInvalidModelConfig;
}
// Create the context and validate that the model configuration is
// something that we can handle.
Context* context = new Context(std::string(data->instance_name), model_config,
data->gpu_device_id);
int err = context->Init();
if (err != kSuccess) {
return err;
}
*custom_context = static_cast<void*>(context);
return kSuccess;
}
int CustomFinalize(void* custom_context) {
if (custom_context != nullptr) {
Context* context = static_cast<Context*>(custom_context);
delete context;
}
return kSuccess;
}
const char* CustomErrorString(void* custom_context, int errcode) {
switch (errcode) {
case kSuccess:
return "success";
case kInvalidModelConfig:
return "invalid model configuration";
case kGpuNotSupported:
return "execution on GPU not supported";
case kInputOutputShape:
return "model must have two inputs and two outputs with the same shape";
case kInputName:
return "model inputs must be named 'src_ids:0' and 'INPUT1'";
case kOutputName:
return "model outputs must be named 'trg_ids:0' and 'OUTPUT1'";
case kInputOutputDataType:
return "model inputs and outputs must have TYPE_INT32 or TYPE_FP32 "
"data-type";
case kInputContents:
return "unable to get input tensor values";
case kInputSize:
return "unexpected size for input tensor";
case kOutputBuffer:
return "unable to get buffer for output tensor values";
case kCudaDevice:
return "hipSetDevice failed";
case kCudaMalloc:
return "hipMalloc failed";
case kCudaMemcpy:
return "hipMemcpy failed";
case kCudaExecute:
return "cuda execution failed";
case kCudaStream:
return "failed to create CUDA stream";
case kCublas:
return "failed to create Cublas handle";
case kCpuExecute:
return "cpu execution failed";
case kWeightLoad:
return "load transformer weight in .pb failed";
case kModelSize:
return "inappropriate transformer model size";
default:
break;
}
return "unknown error";
}
int CustomExecute(void* custom_context, const uint32_t payload_cnt,
CustomPayload* payloads, CustomGetNextInputFn_t input_fn,
CustomGetOutputFn_t output_fn) {
if (custom_context == nullptr) {
return kUnknown;
}
Context* context = static_cast<Context*>(custom_context);
return context->Execute(payload_cnt, payloads, input_fn, output_fn);
}
} // extern "C"
} // namespace transformer
} // namespace custom
} // namespace inferenceserver
} // namespace nvidia
| 10636f4bec28f75ebe801e351f45766d2edc4459.cu | // Copyright (c) 2019, ByteDance CORPORATION. All rights reserved.
#include <cuda.h>
#include <unistd.h>
#include <string>
#include "model/decoder.h"
#include "model/encoder.h"
#include "model_config.pb.h"
#include "proto/transformer_weight.h"
#include "server/custom.h"
#include "server/model_config.h"
#include "server/model_config_cuda.h"
#include "tools/util.h"
/**
@file
Transformer server based on tensorrt inference server.
*/
#define LOG_ERROR std::cerr
#define LOG_INFO std::cout
#ifdef FP16_MODE
const lightseq::cuda::OperationType OPTYPE =
lightseq::cuda::OperationType::FP16;
#else
const lightseq::cuda::OperationType OPTYPE =
lightseq::cuda::OperationType::FP32;
#endif
namespace nvidia {
namespace inferenceserver {
namespace custom {
namespace transformer {
// Integer error codes. TRTIS requires that success must be 0. All
// other codes are interpreted by TRTIS as failures.
enum ErrorCodes {
kSuccess,
kUnknown,
kInvalidModelConfig,
kGpuNotSupported,
kInputOutputShape,
kInputName,
kOutputName,
kInputOutputDataType,
kInputContents,
kInputSize,
kOutputBuffer,
kCudaDevice,
kCudaMalloc,
kCudaMemcpy,
kCudaExecute,
kCudaStream,
kCublas,
kCpuExecute,
kWeightLoad,
kModelSize
};
// Context object. All state must be kept in this object.
class Context {
public:
Context(const std::string& instance_name, const ModelConfig& config,
const int gpu_device);
~Context();
// Initialize the context. Validate that the model configuration,
// etc. is something that we can handle.
int Init();
// Perform custom execution on the payloads.
int Execute(const uint32_t payload_cnt, CustomPayload* payloads,
CustomGetNextInputFn_t input_fn, CustomGetOutputFn_t output_fn);
private:
typedef lightseq::cuda::OperationTypeTraits<OPTYPE> _optraits;
int FreeCudaBuffers();
int AllocateCudaBuffers(void** pdata, size_t byte_size);
int GetInputTensorGPU(CustomGetNextInputFn_t input_fn, void* input_context,
const char* name, const size_t expected_byte_size,
void* input);
int ExecuteGPU(const uint32_t payload_cnt, CustomPayload* payloads,
CustomGetNextInputFn_t input_fn,
CustomGetOutputFn_t output_fn);
// The name of this instance of the backend.
const std::string instance_name_;
// The model configuration.
const ModelConfig model_config_;
// The GPU device ID to execute on or CUSTOM_NO_GPU_DEVICE if should
// execute on CPU.
const int gpu_device_;
// The data-type of the input and output tensors. Must be either
// INT32 or FP32.
DataType datatype_;
int datatype_bytesize_;
// CUDA memory buffers for input and output tensors.
void* d_input_;
void* d_padding_mask_;
void* d_encoder_output_;
void* d_buf_;
void* d_output_;
// The contexts executing on a GPU, the CUDA stream to use for the
// execution.
cudaStream_t stream_;
cublasHandle_t hd_;
lightseq::cuda::TransformerWeight<OPTYPE> tw_;
std::shared_ptr<lightseq::cuda::Decoder<OPTYPE>> decoder_;
std::shared_ptr<lightseq::cuda::Encoder<OPTYPE>> encoder_;
};
Context::Context(const std::string& instance_name,
const ModelConfig& model_config, const int gpu_device)
: instance_name_(instance_name),
model_config_(model_config),
gpu_device_(gpu_device),
datatype_(DataType::TYPE_INVALID),
d_input_(nullptr),
d_padding_mask_(nullptr),
d_encoder_output_(nullptr),
d_buf_(nullptr),
d_output_(nullptr),
stream_(nullptr),
hd_(nullptr) {}
Context::~Context() {
FreeCudaBuffers();
if (hd_ != nullptr) {
cublasStatus_t cuerr = cublasDestroy(hd_);
if (cuerr != CUBLAS_STATUS_SUCCESS) {
LOG_ERROR << "Failed to destroy cublas handle.";
}
hd_ = nullptr;
}
if (stream_ != nullptr) {
cudaError_t cuerr = cudaStreamDestroy(stream_);
if (cuerr != cudaSuccess) {
LOG_ERROR << "Failed to destroy cuda stream: "
<< cudaGetErrorString(cuerr);
}
stream_ = nullptr;
}
}
int Context::FreeCudaBuffers() {
if (d_input_ != nullptr) {
cudaError_t cuerr = cudaFree(d_input_);
if (cuerr != cudaSuccess) {
LOG_ERROR << "Failed to free cuda memory: " << cudaGetErrorString(cuerr);
}
d_input_ = nullptr;
}
if (d_padding_mask_ != nullptr) {
cudaError_t cuerr = cudaFree(d_padding_mask_);
if (cuerr != cudaSuccess) {
LOG_ERROR << "Failed to free cuda memory: " << cudaGetErrorString(cuerr);
}
d_padding_mask_ = nullptr;
}
if (d_encoder_output_ != nullptr) {
cudaError_t cuerr = cudaFree(d_encoder_output_);
if (cuerr != cudaSuccess) {
LOG_ERROR << "Failed to free cuda memory: " << cudaGetErrorString(cuerr);
}
d_encoder_output_ = nullptr;
}
if (d_buf_ != nullptr) {
cudaError_t cuerr = cudaFree(d_buf_);
if (cuerr != cudaSuccess) {
LOG_ERROR << "Failed to free cuda memory: " << cudaGetErrorString(cuerr);
}
d_buf_ = nullptr;
}
if (d_output_ != nullptr) {
cudaError_t cuerr = cudaFree(d_output_);
if (cuerr != cudaSuccess) {
LOG_ERROR << "Failed to free cuda memory: " << cudaGetErrorString(cuerr);
}
d_output_ = nullptr;
}
return kSuccess;
}
int Context::AllocateCudaBuffers(void** pdata, size_t byte_size) {
// Allocate GPU memory buffers large enough for each input and
// output. For performance we allocate once during initialization
// instead of doing it each time we execute.
if (*pdata != nullptr) {
LOG_ERROR << "given pointer own gpu memory before allocate" << std::endl;
return kCudaMalloc;
}
cudaError_t cuerr = cudaMalloc(pdata, byte_size);
if (cuerr != cudaSuccess) {
LOG_ERROR << "unable to allocate memory in function AllocateCudaBuffers"
<< cudaGetErrorString(cuerr);
return kCudaMalloc;
}
cuerr = cudaStreamSynchronize(stream_);
if (cuerr != cudaSuccess) {
LOG_ERROR << "Stream synchronize failed after cudaMalloc"
<< cudaGetErrorString(cuerr) << std::endl;
return kCudaMalloc;
}
return kSuccess;
}
int Context::Init() {
// Very important to set the CUDA device before performing any
// CUDA API calls. The device is maintained per-CPU-thread, and
// the same CPU thread will always be used with this instance of
// the backend, so only need to set the device once.
LOG_INFO << "Trtis instance init start" << std::endl;
cudaError_t cuerr = cudaSetDevice(gpu_device_);
if (cuerr != cudaSuccess) {
LOG_ERROR << "Failed to set CUDA device to " << gpu_device_ << ": "
<< cudaGetErrorString(cuerr);
return kCudaDevice;
}
const int cuda_stream_priority =
GetCudaStreamPriority(model_config_.optimization().priority());
cuerr = cudaStreamCreateWithPriority(&stream_, cudaStreamDefault,
cuda_stream_priority);
if (cuerr != cudaSuccess) {
LOG_ERROR << "Unable to create stream" << cudaGetErrorString(cuerr);
return kCudaStream;
}
cublasStatus_t cublaserr = cublasCreate(&hd_);
if (cublaserr != CUBLAS_STATUS_SUCCESS) {
LOG_ERROR << "Failed to creat cublas handle";
return kCublas;
}
cublaserr = cublasSetStream(hd_, stream_);
if (cublaserr != CUBLAS_STATUS_SUCCESS) {
LOG_ERROR << "Failed to set stream for cublas handle";
return kCublas;
}
if (model_config_.input_size() != 1) {
return kInputOutputShape;
}
datatype_ = model_config_.input(0).data_type();
if (datatype_ != DataType::TYPE_INT32) {
return kInputOutputDataType;
}
datatype_bytesize_ = GetDataTypeByteSize(datatype_);
if (model_config_.input(0).name() != "src_ids:0") {
return kInputName;
}
if (model_config_.output_size() != 1) {
return kInputOutputShape;
}
if (model_config_.output(0).data_type() != datatype_) {
return kInputOutputDataType;
}
if (model_config_.output(0).name() != "trg_ids:0") {
return kOutputName;
}
char* mz = getenv("MODEL_ZOO");
if (mz == NULL) {
LOG_ERROR << "plz set environment variable MODEL_ZOO !" << std::endl;
return kWeightLoad;
}
std::string model_path = mz;
model_path += "/" + model_config_.name();
std::string res =
"load model weight from " + model_path + "/transformer.pb\n";
LOG_INFO << res;
res = tw_.initializing(model_path + "/transformer.pb");
if (!res.empty()) {
LOG_ERROR << res << std::endl;
return kWeightLoad;
}
int max_batch_size = model_config_.max_batch_size();
int err;
err = AllocateCudaBuffers(
&d_input_, max_batch_size * tw_._max_step * datatype_bytesize_);
if (err != kSuccess) {
return err;
}
err = AllocateCudaBuffers(
&d_padding_mask_, max_batch_size * tw_._max_step * datatype_bytesize_);
if (err != kSuccess) {
return err;
}
// FIXME
err = AllocateCudaBuffers(
&d_encoder_output_,
max_batch_size * tw_._max_step * tw_._hidden_size * datatype_bytesize_);
if (err != kSuccess) {
return err;
}
err = AllocateCudaBuffers(
&d_output_, max_batch_size * tw_._max_step * datatype_bytesize_);
if (err != kSuccess) {
return err;
}
encoder_ = std::make_shared<lightseq::cuda::Encoder<OPTYPE>>(
max_batch_size, reinterpret_cast<int*>(d_input_),
reinterpret_cast<int*>(d_padding_mask_),
reinterpret_cast<_optraits::DataType*>(d_encoder_output_), tw_, stream_,
hd_);
res = encoder_->check();
if (!res.empty()) {
LOG_ERROR << res << std::endl;
return kModelSize;
}
decoder_ = std::make_shared<lightseq::cuda::Decoder<OPTYPE>>(
max_batch_size, reinterpret_cast<int*>(d_padding_mask_),
reinterpret_cast<_optraits::DataType*>(d_encoder_output_),
reinterpret_cast<int*>(d_output_), tw_, stream_, hd_,
false, reinterpret_cast<int*>(d_input_));
res = decoder_->check();
if (!res.empty()) {
LOG_ERROR << res << std::endl;
return kModelSize;
}
long buf_bytesize = max(encoder_->compute_buffer_bytesize(),
decoder_->compute_buffer_bytesize());
err = AllocateCudaBuffers(&d_buf_, buf_bytesize);
if (err != kSuccess) {
return err;
}
// encoder and decoder use the same buffer to save gpu memory useage
encoder_->init_buffer(d_buf_);
decoder_->init_buffer(d_buf_);
// Wait for all init finish.
cuerr = cudaStreamSynchronize(stream_);
if (cuerr != cudaSuccess) {
LOG_ERROR << "failed to init GPU for transformer: "
<< cudaGetErrorString(cuerr) << std::endl;
return kCudaExecute;
}
LOG_INFO << "transformer, release-version[" << __DATE__ << " " << __TIME__
<< "], Trtis instance init succeed!" << std::endl;
return kSuccess;
}
int Context::GetInputTensorGPU(CustomGetNextInputFn_t input_fn,
void* input_context, const char* name,
const size_t expected_byte_size, void* input) {
// The values for an input tensor are not necessarily in one
// contiguous chunk, so we copy the chunks into 'input', which
// points to CUDA memory.
uint64_t total_content_byte_size = 0;
while (true) {
const void* content;
uint64_t content_byte_size = expected_byte_size;
if (!input_fn(input_context, name, &content, &content_byte_size)) {
return kInputContents;
}
// If 'content' returns nullptr we have all the input.
if (content == nullptr) {
break;
}
// If the total amount of content received exceeds what we expect
// then something is wrong.
if ((total_content_byte_size + content_byte_size) > expected_byte_size) {
return kInputSize;
}
cudaError_t cuerr = cudaMemcpyAsync(
reinterpret_cast<char*>(input) + total_content_byte_size, content,
content_byte_size, cudaMemcpyHostToDevice, stream_);
if (cuerr != cudaSuccess) {
LOG_ERROR << "failed to copy input values to GPU for transformer: "
<< cudaGetErrorString(cuerr) << std::endl;
LOG_ERROR << "try to copy " << total_content_byte_size + content_byte_size
<< " bytes from input" << std::endl;
return kCudaMemcpy;
}
total_content_byte_size += content_byte_size;
}
// Make sure we end up with exactly the amount of input we expect.
if (total_content_byte_size != expected_byte_size) {
return kInputSize;
}
return kSuccess;
}
int Context::ExecuteGPU(const uint32_t payload_cnt, CustomPayload* payloads,
CustomGetNextInputFn_t input_fn,
CustomGetOutputFn_t output_fn) {
// Each payload represents a related set of inputs and required
// outputs. Each payload may have a different batch size. The total
// batch-size of all payloads will not exceed the max-batch-size
// specified in the model configuration.
if (payload_cnt == 0) {
return kSuccess;
}
std::vector<int64_t> shape(
payloads[0].input_shape_dims[0],
payloads[0].input_shape_dims[0] + payloads[0].input_shape_dim_cnts[0]);
int err;
for (uint32_t pidx = 0; pidx < payload_cnt; ++pidx) {
CustomPayload& payload = payloads[pidx];
// For this payload the expected size of the input and output
// tensors is determined by the batch-size of this payload.
const uint64_t batch_seq_len = payload.input_shape_dims[0][0];
if (batch_seq_len > tw_._max_step) {
LOG_ERROR << "too long seq_len: " << batch_seq_len
<< ", skip this request" << std::endl;
return kInputSize;
}
const uint64_t batchn_element_count = payload.batch_size * batch_seq_len;
const uint64_t batchn_byte_size = batchn_element_count * datatype_bytesize_;
// Copy the input tensors into the appropriate CUDA memory buffer.
err = GetInputTensorGPU(input_fn, payload.input_context, "src_ids:0",
batchn_byte_size, d_input_);
if (err != kSuccess) {
payload.error_code = err;
continue;
}
encoder_->run_one_infer(payload.batch_size, batch_seq_len);
decoder_->run_one_infer(payload.batch_size, batch_seq_len);
// The output shape is [payload-batch-size, shape] if the model
// configuration supports batching, or just [shape] if the
// model configuration does not support batching.
std::vector<int64_t> output_shape = {payload.batch_size,
decoder_->_cur_step + 1};
int64_t output_bytesize =
output_shape[0] * output_shape[1] * datatype_bytesize_;
const char* output_name = "trg_ids:0";
void* obuffer;
if (!output_fn(payload.output_context, output_name, output_shape.size(),
&output_shape[0], output_bytesize, &obuffer)) {
payload.error_code = kOutputBuffer;
break;
}
// If no error but the 'obuffer' is returned as nullptr, then
// skip writing this output.
if (obuffer == nullptr) {
continue;
}
cudaError_t cuerr = cudaGetLastError();
if (cuerr != cudaSuccess) {
LOG_ERROR << "failed to launch kernel: " << cudaGetErrorString(cuerr)
<< std::endl;
payload.error_code = kCudaExecute;
break;
}
cuerr = cudaMemcpyAsync(obuffer, d_output_, output_bytesize,
cudaMemcpyDeviceToHost, stream_);
if (cuerr != cudaSuccess) {
LOG_ERROR << "failed to copy output values from GPU for transformer: "
<< cudaGetErrorString(cuerr) << std::endl;
payload.error_code = kCudaMemcpy;
break;
}
}
// Wait for all compute and memcpy to complete.
cudaError_t cuerr = cudaStreamSynchronize(stream_);
if (cuerr != cudaSuccess) {
LOG_ERROR << "failed to synchronize GPU for transformer: "
<< cudaGetErrorString(cuerr) << std::endl;
return kCudaExecute;
}
return kSuccess;
}
int Context::Execute(const uint32_t payload_cnt, CustomPayload* payloads,
CustomGetNextInputFn_t input_fn,
CustomGetOutputFn_t output_fn) {
if (gpu_device_ == CUSTOM_NO_GPU_DEVICE) {
return kCpuExecute;
} else {
return ExecuteGPU(payload_cnt, payloads, input_fn, output_fn);
}
}
/////////////
extern "C" {
int CustomInitialize(const CustomInitializeData* data, void** custom_context) {
// Convert the serialized model config to a ModelConfig object.
ModelConfig model_config;
if (!model_config.ParseFromString(std::string(
data->serialized_model_config, data->serialized_model_config_size))) {
return kInvalidModelConfig;
}
// Create the context and validate that the model configuration is
// something that we can handle.
Context* context = new Context(std::string(data->instance_name), model_config,
data->gpu_device_id);
int err = context->Init();
if (err != kSuccess) {
return err;
}
*custom_context = static_cast<void*>(context);
return kSuccess;
}
int CustomFinalize(void* custom_context) {
if (custom_context != nullptr) {
Context* context = static_cast<Context*>(custom_context);
delete context;
}
return kSuccess;
}
const char* CustomErrorString(void* custom_context, int errcode) {
switch (errcode) {
case kSuccess:
return "success";
case kInvalidModelConfig:
return "invalid model configuration";
case kGpuNotSupported:
return "execution on GPU not supported";
case kInputOutputShape:
return "model must have two inputs and two outputs with the same shape";
case kInputName:
return "model inputs must be named 'src_ids:0' and 'INPUT1'";
case kOutputName:
return "model outputs must be named 'trg_ids:0' and 'OUTPUT1'";
case kInputOutputDataType:
return "model inputs and outputs must have TYPE_INT32 or TYPE_FP32 "
"data-type";
case kInputContents:
return "unable to get input tensor values";
case kInputSize:
return "unexpected size for input tensor";
case kOutputBuffer:
return "unable to get buffer for output tensor values";
case kCudaDevice:
return "cudaSetDevice failed";
case kCudaMalloc:
return "cudaMalloc failed";
case kCudaMemcpy:
return "cudaMemcpy failed";
case kCudaExecute:
return "cuda execution failed";
case kCudaStream:
return "failed to create CUDA stream";
case kCublas:
return "failed to create Cublas handle";
case kCpuExecute:
return "cpu execution failed";
case kWeightLoad:
return "load transformer weight in .pb failed";
case kModelSize:
return "inappropriate transformer model size";
default:
break;
}
return "unknown error";
}
int CustomExecute(void* custom_context, const uint32_t payload_cnt,
CustomPayload* payloads, CustomGetNextInputFn_t input_fn,
CustomGetOutputFn_t output_fn) {
if (custom_context == nullptr) {
return kUnknown;
}
Context* context = static_cast<Context*>(custom_context);
return context->Execute(payload_cnt, payloads, input_fn, output_fn);
}
} // extern "C"
} // namespace transformer
} // namespace custom
} // namespace inferenceserver
} // namespace nvidia
|
ba07ae6bc536b56d2c0372881d757ad35d8a3702.hip | // !!! This is a file automatically generated by hipify!!!
#include "common.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <device_launch_parameters.h>
#include <iostream>
#include <chrono>
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
using namespace std;
#define MATRIXSIZE 2000
#define TILESIZE 32
// Multiply Matrices in GPU
__global__ void multMatrixGPU(float *MatA, float *MatB, float *MatC, int nx, int ny)
{
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; // col
unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y; // row
unsigned int idx = iy * nx + ix;
float sum = 0;
if (ix < nx && iy < ny) {
for (int i = 0; i < nx; i++) {
sum += MatA[iy * nx + i] * MatB[i * ny + ix];
}
MatC[idx] = sum;
}
}
// Multiply Matrices in GPU with Tiles
__global__ void multMatrixGPUTiles(float *MatA, float *MatB, float *MatC, int nx, int ny)
{
unsigned int ix = threadIdx.x + TILESIZE * blockIdx.x;
unsigned int iy = threadIdx.y + TILESIZE * blockIdx.y;
float sum = 0.0;
__shared__ float TempA[TILESIZE][TILESIZE];
__shared__ float TempB[TILESIZE][TILESIZE];
TempA[threadIdx.y][threadIdx.x] = 0.0;
TempB[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
for (int i = (TILESIZE + nx - 1) / TILESIZE; i >= 0; i--)
{
if ((i * TILESIZE + threadIdx.x < nx) && (iy < ny)) {
TempA[threadIdx.y][threadIdx.x] = MatA[iy * ny + i * TILESIZE + threadIdx.x];
}
if ((i * TILESIZE + threadIdx.y < ny) && (ix < nx)) {
TempB[threadIdx.y][threadIdx.x] = MatB[(i * TILESIZE + threadIdx.y) * ny + ix];
}
__syncthreads();
for (int j = 0; j < TILESIZE; j++) {
sum += TempA[threadIdx.y][j] * TempB[j][threadIdx.x];
}
__syncthreads();
}
if (ix < nx && iy < ny)
{
MatC[iy * ny + ix] = sum;
}
}
// Multiply Matrices in CPU
void multMatrixCPU(float *A, float *B, float *C, const int nx, const int ny)
{
float *ia = A;
float *ib = B;
float *ic = C;
float sum = 0;
for (int i = 0; i < nx; i++) {
for (int j = 0; j < nx; j++) {
for (int k = 0; k < nx; k++) {
ic[i * nx + j] += ia[i * nx + k] * ib[j + k * nx];
}
}
}
return;
}
// Multiply Matrix in CPU Parallel
void multMatrixCPUParallel(float *A, float *B, float *C, const int nx, const int ny)
{
float *ia = A;
float *ib = B;
float *ic = C;
float sum = 0;
int i, j, k;
int nProcessors = omp_get_max_threads();
std::cout << "CPU processors available: " << nProcessors << std::endl;
omp_set_num_threads(6);
#pragma omp parallel for private(sum,i,j,k) shared(ia, ib, ic)
for (i = 0; i < nx; i++) {
for (j = 0; j < nx; j++) {
sum = 0;
for (k = 0; k < nx; k++) {
sum += ia[i * nx + k] * ib[k * nx + j];
}
ic[i * nx + j] = sum;
}
}
return;
}
void initialData(float * ip, const int size)
{
int i;
for (i = 0; i < size; i++)
{
ip[i] = (float)rand() / (RAND_MAX / 10.0f);
}
return;
}
void checkResult(float *hostRef, float *gpuRef, const int nxy)
{
double epsilon = 0.5;
bool match = 1;
for (int i = 0; i < nxy; i++)
{
if (abs(hostRef[i] - gpuRef[i]) > epsilon)
{
match = 0;
printf("host %f gpu %f\n", hostRef[i], gpuRef[i]);
break;
}
}
if (match)
printf("Arrays match.\n\n");
else
printf("Arrays do not match.\n\n");
}
int main(int argc, char **argv)
{
printf("%s Starting...\n", argv[0]);
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
SAFE_CALL(hipGetDeviceProperties(&deviceProp, dev), "Error device prop");
printf("Using Device %d: %s\n", dev, deviceProp.name);
SAFE_CALL(hipSetDevice(dev), "Error setting device");
// set up data size of matrix
int nx = MATRIXSIZE;
int ny = MATRIXSIZE;
int nxy = nx * ny;
int nBytes = nxy * sizeof(float);
printf("Matrix size: nx %d ny %d\n", nx, ny);
printf("Tile size: %d\n\n", TILESIZE);
// malloc host memory
float *h_A , *h_B , *hostRef, *gpuRef, *gpuRefTiles;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
gpuRefTiles = (float *)malloc(nBytes);
// initialize data at host side
initialData(h_A , nxy);
initialData(h_B , nxy);
memset(hostRef, 0, nBytes);
memset(gpuRefTiles, 0, nBytes);
memset(gpuRef, 0, nBytes);
// ------------------- CPU -------------------
auto start_cpu = chrono::high_resolution_clock::now();
multMatrixCPUParallel(h_A , h_B , hostRef, nx, ny);
auto end_cpu = chrono::high_resolution_clock::now();
chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu;
printf("multMatrixCPUParallel elapsed %f ms\n\n", duration_ms.count());
// ------------------- GPU Setup -------------------
// malloc device global memory
float *d_MatA, *d_MatB, *d_MatC;
SAFE_CALL(hipMalloc((void **)&d_MatA, nBytes), "Error allocating d_MatA");
SAFE_CALL(hipMalloc((void **)&d_MatB, nBytes), "Error allocating d_MatB");
SAFE_CALL(hipMalloc((void **)&d_MatC, nBytes), "Error allocating d_MatC");
// transfer data from host to device
SAFE_CALL(hipMemcpy(d_MatA, h_A , nBytes, hipMemcpyHostToDevice), "Error copying d_MatA");
SAFE_CALL(hipMemcpy(d_MatB, h_B , nBytes, hipMemcpyHostToDevice), "Error copying d_MatB");
// invoke kernel at host side
dim3 block(TILESIZE, TILESIZE);
dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y);
// ------------------- GPU Normal -------------------
start_cpu = chrono::high_resolution_clock::now();
multMatrixGPU << <grid, block >> > (d_MatA, d_MatB, d_MatC, nx, ny);
SAFE_CALL(hipDeviceSynchronize(), "Error executing kernel");
end_cpu = chrono::high_resolution_clock::now();
duration_ms = end_cpu - start_cpu;
printf("multMatrixGPU <<<(%d,%d), (%d,%d)>>> elapsed %f ms\n", grid.x, grid.y, block.x, block.y, duration_ms.count());
// SAFE_CALL kernel error
SAFE_CALL(hipGetLastError(), "Error with last error");
// copy kernel result back to host side
SAFE_CALL(hipMemcpy(gpuRef, d_MatC, nBytes, hipMemcpyDeviceToHost), "Error copying d_MatC");
checkResult(hostRef, gpuRef, nxy);
// ------------------- GPU TILES -------------------
start_cpu = chrono::high_resolution_clock::now();
multMatrixGPUTiles << <grid, block >> > (d_MatA, d_MatB, d_MatC, nx, ny);
SAFE_CALL(hipDeviceSynchronize(), "Error executing kernel");
end_cpu = chrono::high_resolution_clock::now();
duration_ms = end_cpu - start_cpu;
printf("multMatrixGPUTiles <<<(%d,%d), (%d,%d)>>> elapsed %f ms\n", grid.x, grid.y, block.x, block.y, duration_ms.count());
// SAFE_CALL kernel error
SAFE_CALL(hipGetLastError(), "Error with last error");
// copy kernel result back to host side
SAFE_CALL(hipMemcpy(gpuRefTiles, d_MatC, nBytes, hipMemcpyDeviceToHost), "Error copying d_MatC");
checkResult(hostRef, gpuRefTiles, nxy);
// free device global memory
SAFE_CALL(hipFree(d_MatA), "Error freeing memory");
SAFE_CALL(hipFree(d_MatB), "Error freeing memory");
SAFE_CALL(hipFree(d_MatC), "Error freeing memory");
// free host memory
free(h_A );
free(h_B );
free(hostRef);
free(gpuRef);
// reset device
SAFE_CALL(hipDeviceReset(), "Error reseting");
return (0);
}
| ba07ae6bc536b56d2c0372881d757ad35d8a3702.cu | #include "common.h"
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include <iostream>
#include <chrono>
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
using namespace std;
#define MATRIXSIZE 2000
#define TILESIZE 32
// Multiply Matrices in GPU
__global__ void multMatrixGPU(float *MatA, float *MatB, float *MatC, int nx, int ny)
{
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; // col
unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y; // row
unsigned int idx = iy * nx + ix;
float sum = 0;
if (ix < nx && iy < ny) {
for (int i = 0; i < nx; i++) {
sum += MatA[iy * nx + i] * MatB[i * ny + ix];
}
MatC[idx] = sum;
}
}
// Multiply Matrices in GPU with Tiles
__global__ void multMatrixGPUTiles(float *MatA, float *MatB, float *MatC, int nx, int ny)
{
unsigned int ix = threadIdx.x + TILESIZE * blockIdx.x;
unsigned int iy = threadIdx.y + TILESIZE * blockIdx.y;
float sum = 0.0;
__shared__ float TempA[TILESIZE][TILESIZE];
__shared__ float TempB[TILESIZE][TILESIZE];
TempA[threadIdx.y][threadIdx.x] = 0.0;
TempB[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
for (int i = (TILESIZE + nx - 1) / TILESIZE; i >= 0; i--)
{
if ((i * TILESIZE + threadIdx.x < nx) && (iy < ny)) {
TempA[threadIdx.y][threadIdx.x] = MatA[iy * ny + i * TILESIZE + threadIdx.x];
}
if ((i * TILESIZE + threadIdx.y < ny) && (ix < nx)) {
TempB[threadIdx.y][threadIdx.x] = MatB[(i * TILESIZE + threadIdx.y) * ny + ix];
}
__syncthreads();
for (int j = 0; j < TILESIZE; j++) {
sum += TempA[threadIdx.y][j] * TempB[j][threadIdx.x];
}
__syncthreads();
}
if (ix < nx && iy < ny)
{
MatC[iy * ny + ix] = sum;
}
}
// Multiply Matrices in CPU
void multMatrixCPU(float *A, float *B, float *C, const int nx, const int ny)
{
float *ia = A;
float *ib = B;
float *ic = C;
float sum = 0;
for (int i = 0; i < nx; i++) {
for (int j = 0; j < nx; j++) {
for (int k = 0; k < nx; k++) {
ic[i * nx + j] += ia[i * nx + k] * ib[j + k * nx];
}
}
}
return;
}
// Multiply Matrix in CPU Parallel
void multMatrixCPUParallel(float *A, float *B, float *C, const int nx, const int ny)
{
float *ia = A;
float *ib = B;
float *ic = C;
float sum = 0;
int i, j, k;
int nProcessors = omp_get_max_threads();
std::cout << "CPU processors available: " << nProcessors << std::endl;
omp_set_num_threads(6);
#pragma omp parallel for private(sum,i,j,k) shared(ia, ib, ic)
for (i = 0; i < nx; i++) {
for (j = 0; j < nx; j++) {
sum = 0;
for (k = 0; k < nx; k++) {
sum += ia[i * nx + k] * ib[k * nx + j];
}
ic[i * nx + j] = sum;
}
}
return;
}
void initialData(float * ip, const int size)
{
int i;
for (i = 0; i < size; i++)
{
ip[i] = (float)rand() / (RAND_MAX / 10.0f);
}
return;
}
void checkResult(float *hostRef, float *gpuRef, const int nxy)
{
double epsilon = 0.5;
bool match = 1;
for (int i = 0; i < nxy; i++)
{
if (abs(hostRef[i] - gpuRef[i]) > epsilon)
{
match = 0;
printf("host %f gpu %f\n", hostRef[i], gpuRef[i]);
break;
}
}
if (match)
printf("Arrays match.\n\n");
else
printf("Arrays do not match.\n\n");
}
int main(int argc, char **argv)
{
printf("%s Starting...\n", argv[0]);
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
SAFE_CALL(cudaGetDeviceProperties(&deviceProp, dev), "Error device prop");
printf("Using Device %d: %s\n", dev, deviceProp.name);
SAFE_CALL(cudaSetDevice(dev), "Error setting device");
// set up data size of matrix
int nx = MATRIXSIZE;
int ny = MATRIXSIZE;
int nxy = nx * ny;
int nBytes = nxy * sizeof(float);
printf("Matrix size: nx %d ny %d\n", nx, ny);
printf("Tile size: %d\n\n", TILESIZE);
// malloc host memory
float *h_A , *h_B , *hostRef, *gpuRef, *gpuRefTiles;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
gpuRefTiles = (float *)malloc(nBytes);
// initialize data at host side
initialData(h_A , nxy);
initialData(h_B , nxy);
memset(hostRef, 0, nBytes);
memset(gpuRefTiles, 0, nBytes);
memset(gpuRef, 0, nBytes);
// ------------------- CPU -------------------
auto start_cpu = chrono::high_resolution_clock::now();
multMatrixCPUParallel(h_A , h_B , hostRef, nx, ny);
auto end_cpu = chrono::high_resolution_clock::now();
chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu;
printf("multMatrixCPUParallel elapsed %f ms\n\n", duration_ms.count());
// ------------------- GPU Setup -------------------
// malloc device global memory
float *d_MatA, *d_MatB, *d_MatC;
SAFE_CALL(cudaMalloc((void **)&d_MatA, nBytes), "Error allocating d_MatA");
SAFE_CALL(cudaMalloc((void **)&d_MatB, nBytes), "Error allocating d_MatB");
SAFE_CALL(cudaMalloc((void **)&d_MatC, nBytes), "Error allocating d_MatC");
// transfer data from host to device
SAFE_CALL(cudaMemcpy(d_MatA, h_A , nBytes, cudaMemcpyHostToDevice), "Error copying d_MatA");
SAFE_CALL(cudaMemcpy(d_MatB, h_B , nBytes, cudaMemcpyHostToDevice), "Error copying d_MatB");
// invoke kernel at host side
dim3 block(TILESIZE, TILESIZE);
dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y);
// ------------------- GPU Normal -------------------
start_cpu = chrono::high_resolution_clock::now();
multMatrixGPU << <grid, block >> > (d_MatA, d_MatB, d_MatC, nx, ny);
SAFE_CALL(cudaDeviceSynchronize(), "Error executing kernel");
end_cpu = chrono::high_resolution_clock::now();
duration_ms = end_cpu - start_cpu;
printf("multMatrixGPU <<<(%d,%d), (%d,%d)>>> elapsed %f ms\n", grid.x, grid.y, block.x, block.y, duration_ms.count());
// SAFE_CALL kernel error
SAFE_CALL(cudaGetLastError(), "Error with last error");
// copy kernel result back to host side
SAFE_CALL(cudaMemcpy(gpuRef, d_MatC, nBytes, cudaMemcpyDeviceToHost), "Error copying d_MatC");
checkResult(hostRef, gpuRef, nxy);
// ------------------- GPU TILES -------------------
start_cpu = chrono::high_resolution_clock::now();
multMatrixGPUTiles << <grid, block >> > (d_MatA, d_MatB, d_MatC, nx, ny);
SAFE_CALL(cudaDeviceSynchronize(), "Error executing kernel");
end_cpu = chrono::high_resolution_clock::now();
duration_ms = end_cpu - start_cpu;
printf("multMatrixGPUTiles <<<(%d,%d), (%d,%d)>>> elapsed %f ms\n", grid.x, grid.y, block.x, block.y, duration_ms.count());
// SAFE_CALL kernel error
SAFE_CALL(cudaGetLastError(), "Error with last error");
// copy kernel result back to host side
SAFE_CALL(cudaMemcpy(gpuRefTiles, d_MatC, nBytes, cudaMemcpyDeviceToHost), "Error copying d_MatC");
checkResult(hostRef, gpuRefTiles, nxy);
// free device global memory
SAFE_CALL(cudaFree(d_MatA), "Error freeing memory");
SAFE_CALL(cudaFree(d_MatB), "Error freeing memory");
SAFE_CALL(cudaFree(d_MatC), "Error freeing memory");
// free host memory
free(h_A );
free(h_B );
free(hostRef);
free(gpuRef);
// reset device
SAFE_CALL(cudaDeviceReset(), "Error reseting");
return (0);
}
|
5343af790b1b856d371afb2f2a3ded72bebb25fd.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <helper_cuda.h>
#include <stdlib.h>
#include <string.h>
#include <ctime>
//const unsigned int W_conv1_size_const = 144;
//__device__ __constant__ float W_conv1_const[W_conv1_size_const];
//__constant__ float W_conv1_const[W_conv1_size_const];
const unsigned int constants_size = (144 + 16 + 6400 + 16);
__constant__ float constants[constants_size];
//__global__ void convolutions_relu_constants_weights(int input_offset, float* features_input, int features_input_size_x, int features_input_step, int features_input_n_channels,
// float* features_output, int features_output_size_x, int features_output_step, int features_output_n_channels,
// int weights_offset, int weights_size_x, int weights_size_y, int weights_step_1, int weights_step_2,
// int biases_offset)
//{
//
// const unsigned int index_output_x = blockIdx.x * blockDim.x + threadIdx.x;
// const unsigned int index_output_y = blockIdx.y * blockDim.y + threadIdx.y;
// const unsigned int index_output_channel = blockIdx.z * blockDim.z + threadIdx.z;
//
//
//
// unsigned int output_1d_index = features_output_step * index_output_channel +
// features_output_size_x * index_output_y + index_output_x;
//
// //unsigned int weights_step_1 = weights_size_y * weights_size_x;
// //unsigned int weights_step_2 = weights_step_1 * features_input_n_channels * index_output_channel;
//
//
// float output_value = 0.0;
//
// for (int index_input_channel = 0; index_input_channel < features_input_n_channels; index_input_channel++)
// {
// unsigned int weights_1d_index_offset = weights_step_2 + weights_step_1 * index_input_channel;
//
// for (int weights_index_y = 0; weights_index_y < weights_size_y; weights_index_y++) {
// for (int weights_index_x = 0; weights_index_x < weights_size_x; weights_index_x++) {
// unsigned int index_input_x = index_output_x + weights_index_x;
// unsigned int index_input_y = index_output_y + weights_index_y;
// unsigned int input_1d_index = input_offset + features_input_step * index_input_channel +
// features_input_size_x * index_input_y + index_input_x;
// unsigned int weights_1d_index = weights_1d_index_offset + weights_size_x * weights_index_y + weights_index_x;
// output_value += features_input[input_1d_index] * constants[weights_offset + weights_1d_index];
//
// }
// }
//
// }
//
// output_value += constants[biases_offset + index_output_channel];
//
//
// output_value = fmaxf(output_value, 0.0); // relu
//
// features_output[output_1d_index] = output_value;
//
//}
__global__ void convolutions_relu_constants_weights_shared_memory(int input_offset, float* features_input, int features_input_size_x, int features_input_step, int features_input_n_channels, int features_input_size,
float* features_output, int features_output_size_x, int features_output_step, int features_output_n_channels,
int weights_offset, int weights_size_x, int weights_size_y, int weights_step_1, int weights_step_2,
int biases_offset,
int n_threds_in_block
)
{
//extern __shared__ float features_input_cache[];
__shared__ float features_input_cache[2704]; //, 2688 - not ok, 2689 - ok, 13*13*16 = 2704
const unsigned int index_output_x = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int index_output_y = blockIdx.y * blockDim.y + threadIdx.y;
const unsigned int index_output_channel = blockIdx.z * blockDim.z + threadIdx.z;
// fill features_input_cache:
//unsigned int features_output_step = features_output_size_y * features_output_size_x;
//unsigned int features_input_step = features_input_size_y * features_input_size_x;
//unsigned int features_input_size = features_input_step * features_input_n_channels;
unsigned int output_1d_index = features_output_step * index_output_channel +
features_output_size_x * index_output_y + index_output_x;
//unsigned int n_threds_in_block = blockDim.x * blockDim.y * blockDim.z;
unsigned int threds_1d_index = threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x;
if (features_input_size == n_threds_in_block)
{
features_input_cache[threds_1d_index] = features_input[input_offset + threds_1d_index];
}
else if (features_input_size < n_threds_in_block)
{
if (threds_1d_index < features_input_size)
{
features_input_cache[threds_1d_index] = features_input[input_offset + threds_1d_index];
}
}
else
{
// case for features_input_size > n_threds_in_block
unsigned int index_1 = (threds_1d_index * features_input_size) / n_threds_in_block;
unsigned int index_2 = ((threds_1d_index + 1) * features_input_size) / n_threds_in_block;
for (unsigned int index = index_1; index < index_2; index++)
{
features_input_cache[index] = features_input[input_offset + index];
}
}
__syncthreads();
// convolutions:
//unsigned int weights_step_1 = weights_size_y * weights_size_x;
//unsigned int weights_step_2 = weights_step_1 * features_input_n_channels * index_output_channel;
float output_value = 0.0;
for (int index_input_channel = 0; index_input_channel < features_input_n_channels; index_input_channel++)
{
unsigned int weights_1d_index_offset = weights_step_2 + weights_step_1 * index_input_channel;
for (int weights_index_y = 0; weights_index_y < weights_size_y; weights_index_y++) {
for (int weights_index_x = 0; weights_index_x < weights_size_x; weights_index_x++) {
unsigned int index_input_x = index_output_x + weights_index_x;
unsigned int index_input_y = index_output_y + weights_index_y;
unsigned int input_1d_index = features_input_step * index_input_channel +
features_input_size_x * index_input_y + index_input_x;
unsigned int weights_1d_index = weights_1d_index_offset + weights_size_x * weights_index_y + weights_index_x;
output_value += features_input_cache[input_1d_index] * constants[weights_offset + weights_1d_index];
//output_value += features_input[input_offset + input_1d_index] * constants[weights_offset + weights_1d_index];
//output_value += features_input_cache[2703] * constants[weights_offset + weights_1d_index];
}
}
}
output_value += constants[biases_offset + index_output_channel];
output_value = fmaxf(output_value, 0.0); // relu
features_output[output_1d_index] = output_value;
}
__global__ void convolutions_relu_shared_memory(int input_offset, float* features_input, int features_input_size_x, int features_input_step, int features_input_n_channels, int features_input_size,
float* features_output, int features_output_size_x, int features_output_size_y_x, int features_output_n_channels,
float* weights, int weights_size_x, int weights_size_y, int weights_step_1, int weights_step_2,
float* biases,
int n_threds_in_block)
{
__shared__ float features_input_cache[144];
const unsigned int index_output_x = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int index_output_y = blockIdx.y * blockDim.y + threadIdx.y;
const unsigned int index_output_channel = blockIdx.z * blockDim.z + threadIdx.z;
//unsigned int n_threds_in_block = blockDim.x * blockDim.y * blockDim.z;
unsigned int threds_1d_index = threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x;
//unsigned int features_input_step = features_input_size_y * features_input_size_x;
//unsigned int features_input_size = features_input_step * features_input_n_channels;
if (features_input_size == n_threds_in_block)
{
features_input_cache[threds_1d_index] = features_input[input_offset + threds_1d_index];
}
else if (features_input_size < n_threds_in_block)
{
if (threds_1d_index < features_input_size)
{
features_input_cache[threds_1d_index] = features_input[input_offset + threds_1d_index];
}
}
else
{
// case for features_input_size > n_threds_in_block
unsigned int index_1 = (threds_1d_index * features_input_size) / n_threds_in_block;
unsigned int index_2 = ((threds_1d_index + 1) * features_input_size) / n_threds_in_block;
for (unsigned int index = index_1; index < index_2; index++)
{
features_input_cache[index] = features_input[input_offset + index];
}
}
__syncthreads();
//unsigned int features_output_size_y_x = features_output_size_y * features_output_size_x;
unsigned int output_1d_index = features_output_size_y_x * index_output_channel +
features_output_size_x * index_output_y + index_output_x;
//unsigned int weights_step_1 = weights_size_y * weights_size_x;
//unsigned int weights_step_2 = weights_step_1 * features_input_n_channels * index_output_channel;
float output_value = 0.0;
for (int index_input_channel = 0; index_input_channel < features_input_n_channels; index_input_channel++)
{
unsigned int weights_1d_index_offset = weights_step_2 + weights_step_1 * index_input_channel;
for (int weights_index_y = 0; weights_index_y < weights_size_y; weights_index_y++) {
for (int weights_index_x = 0; weights_index_x < weights_size_x; weights_index_x++) {
unsigned int index_input_x = index_output_x + weights_index_x;
unsigned int index_input_y = index_output_y + weights_index_y;
unsigned int input_1d_index = features_input_step * index_input_channel +
features_input_size_x * index_input_y + index_input_x;
unsigned int weights_1d_index = weights_1d_index_offset + weights_size_x * weights_index_y + weights_index_x;
output_value += features_input_cache[input_1d_index] * weights[weights_1d_index];
}
}
}
output_value += biases[index_output_channel];
output_value = fmaxf(output_value, 0.0); // relu
features_output[output_1d_index] = output_value;
}
__global__ void convolutions_shared_memory(int input_offset, float* features_input, int features_input_size_x, int features_input_step, int features_input_n_channels, int features_input_size,
float* features_output, int features_output_size_x, int features_output_size_y_x, int features_output_n_channels,
float* weights, int weights_size_x, int weights_size_y, int weights_step_1, int weights_step_2,
float* biases,
int n_threds_in_block)
{
__shared__ float features_input_cache[256];
const unsigned int index_output_x = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int index_output_y = blockIdx.y * blockDim.y + threadIdx.y;
const unsigned int index_output_channel = blockIdx.z * blockDim.z + threadIdx.z;
//unsigned int n_threds_in_block = blockDim.x * blockDim.y * blockDim.z;
unsigned int threds_1d_index = threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x;
//unsigned int features_input_step = features_input_size_y * features_input_size_x;
//unsigned int features_input_size = features_input_step * features_input_n_channels;
if (features_input_size == n_threds_in_block)
{
features_input_cache[threds_1d_index] = features_input[input_offset + threds_1d_index];
}
else if (features_input_size < n_threds_in_block)
{
if (threds_1d_index < features_input_size)
{
features_input_cache[threds_1d_index] = features_input[input_offset + threds_1d_index];
}
}
else
{
// case for features_input_size > n_threds_in_block
unsigned int index_1 = (threds_1d_index * features_input_size) / n_threds_in_block;
unsigned int index_2 = ((threds_1d_index + 1) * features_input_size) / n_threds_in_block;
for (unsigned int index = index_1; index < index_2; index++)
{
features_input_cache[index] = features_input[input_offset + index];
}
}
__syncthreads();
//unsigned int features_output_size_y_x = features_output_size_y * features_output_size_x;
unsigned int output_1d_index = features_output_size_y_x * index_output_channel +
features_output_size_x * index_output_y + index_output_x;
//unsigned int weights_step_1 = weights_size_y * weights_size_x;
//unsigned int weights_step_2 = weights_step_1 * features_input_n_channels * index_output_channel;
float output_value = 0.0;
for (int index_input_channel = 0; index_input_channel < features_input_n_channels; index_input_channel++)
{
unsigned int weights_1d_index_offset = weights_step_2 + weights_step_1 * index_input_channel;
for (int weights_index_y = 0; weights_index_y < weights_size_y; weights_index_y++) {
for (int weights_index_x = 0; weights_index_x < weights_size_x; weights_index_x++) {
unsigned int index_input_x = index_output_x + weights_index_x;
unsigned int index_input_y = index_output_y + weights_index_y;
unsigned int input_1d_index = features_input_step * index_input_channel +
features_input_size_x * index_input_y + index_input_x;
unsigned int weights_1d_index = weights_1d_index_offset + weights_size_x * weights_index_y + weights_index_x;
output_value += features_input_cache[input_1d_index] * weights[weights_1d_index];
}
}
}
output_value += biases[index_output_channel];
features_output[output_1d_index] = output_value;
}
__global__ void max_pooling_2x2(float* features_input, int features_input_size_x, int features_input_size_y_x, int features_input_n_channels,
float* features_output, int features_output_size_x, int features_output_size_y_x)
{
const unsigned int index_output_x = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int index_output_y = blockIdx.y * blockDim.y + threadIdx.y;
const unsigned int index_output_channel = blockIdx.z * blockDim.z + threadIdx.z;
unsigned int index_input_x = 2 * index_output_x;
unsigned int index_input_y = 2 * index_output_y;
unsigned int output_1d_index = features_output_size_y_x * index_output_channel +
features_output_size_x * index_output_y + index_output_x;
unsigned int features_input_step = features_input_size_y_x * index_output_channel;
unsigned int input_1d_index_0_0 = features_input_step +
features_input_size_x * index_input_y + index_input_x;
unsigned int input_1d_index_0_1 = input_1d_index_0_0 + 1;
unsigned int input_1d_index_1_0 = input_1d_index_0_0 + features_input_size_x;
unsigned int input_1d_index_1_1 = input_1d_index_0_0 + 1 + features_input_size_x;
float max_0 = fmaxf(features_input[input_1d_index_0_0], features_input[input_1d_index_0_1]);
float max_1 = fmaxf(features_input[input_1d_index_1_0], features_input[input_1d_index_1_1]);
features_output[output_1d_index] = fmaxf(max_0, max_1);
}
__global__ void max_pooling_3x3(float* features_input, int features_input_size_x, int features_input_size_y_x, int features_input_n_channels,
float* features_output, int features_output_size_x, int features_output_size_y_x)
{
const unsigned int index_output_x = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int index_output_y = blockIdx.y * blockDim.y + threadIdx.y;
const unsigned int index_output_channel = blockIdx.z * blockDim.z + threadIdx.z;
unsigned int index_input_x = 3 * index_output_x;
unsigned int index_input_y = 3 * index_output_y;
unsigned int output_1d_index = features_output_size_y_x * index_output_channel +
features_output_size_x * index_output_y + index_output_x;
unsigned int features_input_step = features_input_size_y_x * index_output_channel;
unsigned int input_1d_index_0_0 = features_input_step +
features_input_size_x * index_input_y + index_input_x;
unsigned int input_1d_index_0_1 = input_1d_index_0_0 + 1;
unsigned int input_1d_index_0_2 = input_1d_index_0_0 + 2;
unsigned int input_1d_index_1_0 = input_1d_index_0_0 + features_input_size_x;
unsigned int input_1d_index_1_1 = input_1d_index_1_0 + 1;
unsigned int input_1d_index_1_2 = input_1d_index_1_0 + 2;
unsigned int input_1d_index_2_0 = input_1d_index_1_0 + features_input_size_x;
unsigned int input_1d_index_2_1 = input_1d_index_2_0 + 1;
unsigned int input_1d_index_2_2 = input_1d_index_2_0 + 2;
float max_0 = fmaxf(features_input[input_1d_index_0_0], features_input[input_1d_index_0_1]);
float max_1 = fmaxf(features_input[input_1d_index_0_2], features_input[input_1d_index_1_0]);
float max_2 = fmaxf(features_input[input_1d_index_1_1], features_input[input_1d_index_1_2]);
float max_3 = fmaxf(features_input[input_1d_index_2_0], features_input[input_1d_index_2_1]);
float max_4 = fmaxf(max_0, max_1);
float max_5 = fmaxf(max_2, max_3);
float max_6 = fmaxf(max_4, max_5);
features_output[output_1d_index] = fmaxf(max_6, features_input[input_1d_index_2_2]);
}
void c_stings_concatinate(char* string_1, char* string_2, char** string_result)
{
*string_result = (char*)malloc(strlen(string_1) + strlen(string_2) + 1);
strcpy(*string_result, string_1);
strcat(*string_result, string_2);
}
void load_data_to_array(char* dir, char* file, float** array_gpu, int size)
{
// https://stackoverflow.com/questions/22826380/cuda-allocation-and-return-array-from-gpu-to-cpu
char* path;
c_stings_concatinate(dir, file, &path);
float* array_cpu;
array_cpu = (float*)malloc(sizeof(float) * size);
FILE* file_id;
file_id = fopen(path, "rb");
int n_floats_readed = fread(array_cpu, sizeof(float), size, file_id);
fclose(file_id);
if (n_floats_readed != size)
{
printf("n_floats_readed != size n_floats_readed = %d size = %d\n", n_floats_readed, size);
}
if (hipMalloc((void**)array_gpu, sizeof(float) * size) != hipSuccess)
{
std::cout << "Error allocating GPU\n";
}
hipMemcpy(*array_gpu, array_cpu, sizeof(float) * size, hipMemcpyHostToDevice);
//float* array_cpu_check;
//array_cpu_check = (float*)malloc(sizeof(float) * size);
//hipMemcpy(array_cpu_check, array_gpu, sizeof(float) * size, hipMemcpyDeviceToHost);
//for (int counter = size-1; counter < size; counter++)
//{
// printf("array_cpu_check[counter] = %.6f array_cpu[counter] = %.6f\n", array_cpu_check[counter], array_cpu[counter]);
//}
//free(array_cpu_check);
free(array_cpu);
free(path);
}
void append_data_to_cpu_array(char* dir, char* file, float* constants_cpu, int size, int* offset)
{
char* path;
c_stings_concatinate(dir, file, &path);
float* pointer_shifted;
FILE* file_id;
file_id = fopen(path, "rb");
pointer_shifted = constants_cpu + *offset;
int n_floats_readed = fread(pointer_shifted, sizeof(float), size, file_id);
fclose(file_id);
*offset += size;
free(path);
}
/*
__global__ void tmp_check(float* array_cpu_4, float* accuracy_ptr)
{
accuracy_ptr[0] = 0.0;
for (int weights_1d_index = 0; weights_1d_index < W_conv1_size_const; weights_1d_index++)
{
if (W_conv1_const[weights_1d_index] == array_cpu_4[weights_1d_index])
//if (W_conv1_const[weights_1d_index] == 0.0)
{
accuracy_ptr[0] += 1.0;
}
//accuracy_ptr[0] += W_conv1_const[weights_1d_index];
}
accuracy_ptr[0] /= W_conv1_size_const;
}
*/
__global__ void check_constants(float* array_cpu, int size, float* n_correct_ptr)
{
for (int index = 0; index < size; index++)
{
if (constants[index] == array_cpu[index])
{
n_correct_ptr[0] += 1.0;
}
}
}
int main(void)
{
char* weights_dir = "F:/freelance/cpp_learning/cuda_learning/weigths_1d/";
// c 3 x 3 p 2 x 2 c 5 x 5 p 3 x 3 c 3 x 3 c 1 x 1
// 28 x 28 x 1 -> 26 x 26 x 16 -> 13 x 13 x 16 -> 9 x 9 x 16 -> 3 x 3 x 16 -> 1 x 1 x 256 -> 1 x 1 x 10
// n mult : 97344 518400 36864 2560
// 784 36864 2704 1296 144 256 10
int input_size_x = 28;
int input_size_y = 28;
int input_n_channels = 1;
int n_output = 10;
int input_step = input_size_x * input_size_y;
int input_size = input_size_x * input_size_y * input_n_channels;
int featuremaps_1_size_x = 26;
int featuremaps_1_size_y = 26;
int featuremaps_1_size_y_x = featuremaps_1_size_x * featuremaps_1_size_y;
int featuremaps_1_n_channels = 16;
int featuremaps_1_size = featuremaps_1_size_x * featuremaps_1_size_y * featuremaps_1_n_channels;
int featuremaps_1_thread_size_x = 26;
int featuremaps_1_thread_size_y = 26;
int featuremaps_1_thread_size_z = 1;
int featuremaps_1_thread_size = featuremaps_1_thread_size_x * featuremaps_1_thread_size_y * featuremaps_1_thread_size_z;
int featuremaps_1_greed_size_x = featuremaps_1_size_x / featuremaps_1_thread_size_x;
int featuremaps_1_greed_size_y = featuremaps_1_size_y / featuremaps_1_thread_size_y;
int featuremaps_1_greed_size_z = featuremaps_1_n_channels / featuremaps_1_thread_size_z;
int featuremaps_1_pooling_size_x = 13;
int featuremaps_1_pooling_size_y = 13;
int featuremaps_1_pooling_n_channels = featuremaps_1_n_channels;
int featuremaps_1_pooling_size_y_x = featuremaps_1_pooling_size_x * featuremaps_1_pooling_size_y;
int featuremaps_1_pooling_size = featuremaps_1_pooling_size_x * featuremaps_1_pooling_size_y * featuremaps_1_n_channels;
int featuremaps_1_pooling_thread_size_x = 13;
int featuremaps_1_pooling_thread_size_y = 13;
int featuremaps_1_pooling_thread_size_z = 4;
int featuremaps_1_pooling_greed_size_x = featuremaps_1_pooling_size_x / featuremaps_1_pooling_thread_size_x;
int featuremaps_1_pooling_greed_size_y = featuremaps_1_pooling_size_y / featuremaps_1_pooling_thread_size_y;
int featuremaps_1_pooling_greed_size_z = featuremaps_1_n_channels / featuremaps_1_pooling_thread_size_z;
int featuremaps_2_size_x = 9;
int featuremaps_2_size_y = 9;
int featuremaps_2_size_y_x = featuremaps_2_size_x * featuremaps_2_size_y;
int featuremaps_2_n_channels = 16;
int featuremaps_2_size = featuremaps_2_size_x * featuremaps_2_size_y * featuremaps_2_n_channels;
int featuremaps_2_thread_size_x = 9;
int featuremaps_2_thread_size_y = 9;
int featuremaps_2_thread_size_z = 8;
int featuremaps_2_thread_size = featuremaps_2_thread_size_x * featuremaps_2_thread_size_y * featuremaps_2_thread_size_z;
int featuremaps_2_greed_size_x = featuremaps_2_size_x / featuremaps_2_thread_size_x;
int featuremaps_2_greed_size_y = featuremaps_2_size_y / featuremaps_2_thread_size_y;
int featuremaps_2_greed_size_z = featuremaps_2_n_channels / featuremaps_2_thread_size_z;
int featuremaps_2_pooling_size_x = 3;
int featuremaps_2_pooling_size_y = 3;
int featuremaps_2_pooling_n_channels = featuremaps_2_n_channels;
int featuremaps_2_pooling_size_y_x = featuremaps_2_pooling_size_x * featuremaps_2_pooling_size_y;
int featuremaps_2_pooling_size = featuremaps_2_pooling_size_x * featuremaps_2_pooling_size_y * featuremaps_2_n_channels;
int featuremaps_2_pooling_thread_size_x = 3;
int featuremaps_2_pooling_thread_size_y = 3;
int featuremaps_2_pooling_thread_size_z = 4;
int featuremaps_2_pooling_greed_size_x = featuremaps_2_pooling_size_x / featuremaps_2_pooling_thread_size_x;
int featuremaps_2_pooling_greed_size_y = featuremaps_2_pooling_size_y / featuremaps_2_pooling_thread_size_y;
int featuremaps_2_pooling_greed_size_z = featuremaps_2_n_channels / featuremaps_2_pooling_thread_size_z;
int featuremaps_3_size_x = 1;
int featuremaps_3_size_y = 1;
int featuremaps_3_n_channels = 256;
int featuremaps_3_size = featuremaps_3_size_x * featuremaps_3_size_y * featuremaps_3_n_channels;
int featuremaps_3_size_y_x = featuremaps_3_size_x * featuremaps_3_size_y;
int featuremaps_3_thread_size_x = 1;
int featuremaps_3_thread_size_y = 1;
int featuremaps_3_thread_size_z = 64;
int featuremaps_3_thread_size = featuremaps_3_thread_size_x * featuremaps_3_thread_size_y * featuremaps_3_thread_size_z;
int featuremaps_3_greed_size_x = featuremaps_3_size_x / featuremaps_3_thread_size_x;
int featuremaps_3_greed_size_y = featuremaps_3_size_y / featuremaps_3_thread_size_y;
int featuremaps_3_greed_size_z = featuremaps_3_n_channels / featuremaps_3_thread_size_z;
int featuremaps_4_size_x = 1;
int featuremaps_4_size_y = 1;
int featuremaps_4_n_channels = n_output;
int featuremaps_4_size = featuremaps_4_size_x * featuremaps_4_size_y * featuremaps_4_n_channels;
int featuremaps_4_size_y_x = featuremaps_4_size_x * featuremaps_4_size_y;
int featuremaps_4_thread_size_x = 1;
int featuremaps_4_thread_size_y = 1;
int featuremaps_4_thread_size_z = 10;
int featuremaps_4_thread_size = featuremaps_4_thread_size_x * featuremaps_4_thread_size_y * featuremaps_4_thread_size_z;
int featuremaps_4_greed_size_x = featuremaps_4_size_x / featuremaps_4_thread_size_x;
int featuremaps_4_greed_size_y = featuremaps_4_size_y / featuremaps_4_thread_size_y;
int featuremaps_4_greed_size_z = featuremaps_4_n_channels / featuremaps_4_thread_size_z;
int W_conv1_size_x = 3;
int W_conv1_size_y = 3;
int W_conv1_size = W_conv1_size_x * W_conv1_size_y * input_n_channels * featuremaps_1_n_channels;
int W_conv1_step_1 = W_conv1_size_x * W_conv1_size_y;
int W_conv1_step_2 = W_conv1_size_x * W_conv1_size_y * input_n_channels;
int b_conv1_size = featuremaps_1_n_channels;
int W_conv2_size_x = 5;
int W_conv2_size_y = 5;
int W_conv2_size = W_conv2_size_x * W_conv2_size_y * featuremaps_1_n_channels * featuremaps_2_n_channels;
int W_conv2_step_1 = W_conv2_size_x * W_conv2_size_y;
int W_conv2_step_2 = W_conv2_size_x * W_conv2_size_y * featuremaps_1_n_channels;
int b_conv2_size = featuremaps_2_n_channels;
int W_conv3_size_x = 3;
int W_conv3_size_y = 3;
int W_conv3_size = W_conv3_size_x * W_conv3_size_y * featuremaps_2_n_channels * featuremaps_3_n_channels;
int W_conv3_step_1 = W_conv3_size_x * W_conv3_size_y;
int W_conv3_step_2 = W_conv3_size_x * W_conv3_size_y * featuremaps_2_n_channels;
int b_conv3_size = featuremaps_3_n_channels;
int W_conv4_size_x = 1;
int W_conv4_size_y = 1;
int W_conv4_size = W_conv4_size_x * W_conv4_size_y * featuremaps_3_n_channels * featuremaps_4_n_channels;
int W_conv4_step_1 = W_conv4_size_x * W_conv4_size_y;
int W_conv4_step_2 = W_conv4_size_x * W_conv4_size_y * featuremaps_3_n_channels;
int b_conv4_size = 10;
int x_val_size = 7840000;
int n_samples = 10000;
//constants
float* constants_cpu;
constants_cpu = (float*)malloc(sizeof(float) * constants_size);
int offset = 0;
int offset_W_conv1 = offset;
append_data_to_cpu_array(weights_dir, "W_conv1.bin", constants_cpu, W_conv1_size, &offset);
int offset_b_conv1 = offset;
append_data_to_cpu_array(weights_dir, "b_conv1.bin", constants_cpu, b_conv1_size, &offset);
int offset_W_conv2 = offset;
append_data_to_cpu_array(weights_dir, "W_conv2.bin", constants_cpu, W_conv2_size, &offset);
int offset_b_conv2 = offset;
append_data_to_cpu_array(weights_dir, "b_conv2.bin", constants_cpu, b_conv2_size, &offset);
//for (int index = 0; index < constants_size; index++)
//{
// printf("%.6f\n", constants_cpu[index]);
//
//}
checkCudaErrors(hipMemcpyToSymbol(constants, constants_cpu, sizeof(float)* constants_size));
float* n_correct_ptr;
if (hipMalloc((void**)&n_correct_ptr, sizeof(float) * 1) != hipSuccess)
{
std::cout << "Error allocating GPU n_correct_ptr\n";
}
//check_constants<<<1, 1>>>(constants_cpu, constants_size, n_correct_ptr);
//hipDeviceSynchronize();
float* n_correct_ptr_cpu;
n_correct_ptr_cpu = (float*)malloc(sizeof(float) * 1);
hipMemcpy(n_correct_ptr_cpu, n_correct_ptr, sizeof(float) * 1, hipMemcpyDeviceToHost);
printf("\n");
printf("check constants:\n");
printf("n_correct_ptr_cpu[0] = %.6f\n", n_correct_ptr_cpu[0]);
printf("constants_size =%d\n", constants_size);
printf("\n");
float* constants_cpu_2;
constants_cpu_2 = (float*)malloc(sizeof(float) * constants_size);
checkCudaErrors(hipMemcpyFromSymbol(constants_cpu_2, constants, sizeof(float) * constants_size));
int is_equal = 0;
int last_correct_index = 0;
for (int index = 0; index < constants_size; index++)
{
if (constants_cpu_2[index] == constants_cpu[index])
{
is_equal = 1;
last_correct_index = index;
}
else
{
is_equal = 0;
}
printf("%.6f %.6f %d\n", constants_cpu_2[index], constants_cpu[index], is_equal);
}
printf("last_correct_index = %d\n", last_correct_index);
//last_correct_index = 6792
hipFree(n_correct_ptr);
free(n_correct_ptr_cpu);
float* W_conv3;
float* b_conv3;
float* W_conv4;
float* b_conv4;
float* x_val;
//W_conv1_1d.size = 144
//b_conv1_1d.size = 16
//W_conv2_1d.size = 6400
//b_conv2_1d.size = 16
//W_conv3_1d.size = 36864
//b_conv3_1d.size = 256
//W_conv4_1d.size = 2560
//b_conv4_1d.size = 10
//x_val_1d.size = 7840000
//y_val.size = 10000
//(144 + 16 + 6400 + 16 + 256 + 2560 + 10)*4 = 37608
load_data_to_array(weights_dir, "W_conv3.bin", &W_conv3, W_conv3_size);
load_data_to_array(weights_dir, "b_conv3.bin", &b_conv3, b_conv3_size);
load_data_to_array(weights_dir, "W_conv4.bin", &W_conv4, W_conv4_size);
load_data_to_array(weights_dir, "b_conv4.bin", &b_conv4, b_conv4_size);
load_data_to_array(weights_dir, "x_val.bin", &x_val, x_val_size);
char* y_val_cpu;
y_val_cpu = (char*)malloc(sizeof(char) * n_samples);
FILE* file_id;
char* path;
c_stings_concatinate(weights_dir, "y_val.bin", &path);
file_id = fopen(path, "rb");
fread(y_val_cpu, sizeof(float), n_samples, file_id);
fclose(file_id);
free(path);
float* featuremaps_1;
if (hipMalloc((void**)&featuremaps_1, sizeof(float) * featuremaps_1_size) != hipSuccess)
{
std::cout << "Error allocating GPU featuremaps_1\n";
}
float* featuremaps_1_pooling;
if (hipMalloc((void**)&featuremaps_1_pooling, sizeof(float) * featuremaps_1_pooling_size) != hipSuccess)
{
std::cout << "Error allocating GPU featuremaps_1_pooling\n";
}
float* featuremaps_2;
if (hipMalloc((void**)&featuremaps_2, sizeof(float) * featuremaps_2_size) != hipSuccess)
{
std::cout << "Error allocating GPU featuremaps_2\n";
}
float* featuremaps_2_pooling;
if (hipMalloc((void**)&featuremaps_2_pooling, sizeof(float) * featuremaps_2_pooling_size) != hipSuccess)
{
std::cout << "Error allocating GPU featuremaps_2_pooling\n";
}
float* featuremaps_3;
if (hipMalloc((void**)&featuremaps_3, sizeof(float) * featuremaps_3_size) != hipSuccess)
{
std::cout << "Error allocating GPU featuremaps_3\n";
}
float* featuremaps_4;
if (hipMalloc((void**)&featuremaps_4, sizeof(float) * featuremaps_4_size) != hipSuccess)
{
std::cout << "Error allocating GPU featuremaps_4\n";
}
dim3 grid_featuremaps_1(featuremaps_1_greed_size_x, featuremaps_1_greed_size_y, featuremaps_1_greed_size_z);
dim3 threadBlock_featuremaps_1(featuremaps_1_thread_size_x, featuremaps_1_thread_size_y, featuremaps_1_thread_size_z);
dim3 grid_featuremaps_1_pooling(featuremaps_1_pooling_greed_size_x, featuremaps_1_pooling_greed_size_y, featuremaps_1_pooling_greed_size_z);
dim3 threadBlock_featuremaps_1_pooling(featuremaps_1_pooling_thread_size_x, featuremaps_1_pooling_thread_size_y, featuremaps_1_pooling_thread_size_z);
dim3 grid_featuremaps_2(featuremaps_2_greed_size_x, featuremaps_2_greed_size_y, featuremaps_2_greed_size_z);
dim3 threadBlock_featuremaps_2(featuremaps_2_thread_size_x, featuremaps_2_thread_size_y, featuremaps_2_thread_size_z);
dim3 grid_featuremaps_2_pooling(featuremaps_2_pooling_greed_size_x, featuremaps_2_pooling_greed_size_y, featuremaps_2_pooling_greed_size_z);
dim3 threadBlock_featuremaps_2_pooling(featuremaps_2_pooling_thread_size_x, featuremaps_2_pooling_thread_size_y, featuremaps_2_pooling_thread_size_z);
dim3 grid_featuremaps_3(featuremaps_3_greed_size_x, featuremaps_3_greed_size_y, featuremaps_3_greed_size_z);
dim3 threadBlock_featuremaps_3(featuremaps_3_thread_size_x, featuremaps_3_thread_size_y, featuremaps_3_thread_size_z);
dim3 grid_featuremaps_4(featuremaps_4_greed_size_x, featuremaps_4_greed_size_y, featuremaps_4_greed_size_z);
dim3 threadBlock_featuremaps_4(featuremaps_4_thread_size_x, featuremaps_4_thread_size_y, featuremaps_4_thread_size_z);
//dim3 grid_featuremaps_1(2, 2, 4);
//dim3 threadBlock_featuremaps_1(13, 13, 4);
//printf("featuremaps_1_size = %d\n", featuremaps_1_size);
//printf("sizeof(featuremaps_1) = %d\n", sizeof(featuremaps_1));
//printf("sizeof(W_conv1) = %d\n", sizeof(W_conv1));
//printf("sizeof(b_conv1) = %d\n", sizeof(b_conv1));
//size_t b_conv1_sized = 0;
//hipError_t er1 = hipGetSymbolSize(&b_conv1_sized, b_conv1);
//printf("b_conv1_sized = %d\n", b_conv1_sized);
//size_t featuremaps_1_sized = 0;
//hipError_t er2 = hipGetSymbolSize(&featuremaps_1_sized, featuremaps_1);
//printf("featuremaps_1_sized = %d\n", featuremaps_1_sized);
//dim3 grid_featuremaps_1(1, 1, 1);
//dim3 threadBlock_featuremaps_1(1, 1, 1);
//convolutions_relu<<<grid_featuremaps_1, threadBlock_featuremaps_1>>>(0, x_val, input_size_x, input_size_y, input_n_channels,
// featuremaps_1, featuremaps_1_size_x, featuremaps_1_size_y, featuremaps_1_n_channels,
// W_conv1, W_conv1_size_x, W_conv2_size_x,
// b_conv1);
//hipDeviceSynchronize();
float* featuremaps_4_tmp_cpu;
featuremaps_4_tmp_cpu = (float*)malloc(sizeof(float) * featuremaps_4_size);
float featuremaps_4_max = 0.0;
int featuremaps_4_max_ind = -1;
int n_correct_answers = 0;
clock_t begin = clock();
for (int sample_count = 0; sample_count < n_samples; sample_count++)
{
//__global__ void convolutions_relu_constants_weights_shared_memory(int input_offset, float* features_input, int features_input_size_x, int features_input_step, int features_input_n_channels, int features_input_size,
// float* features_output, int features_output_size_x, int features_output_step, int features_output_n_channels,
// int weights_offset, int weights_size_x, int weights_size_y, int weights_step_1, int weights_step_2,
// int biases_offset,
// int n_threds_in_block
//)
int input_offset = sample_count * input_step;
hipLaunchKernelGGL(( convolutions_relu_constants_weights_shared_memory), dim3(grid_featuremaps_1), dim3(threadBlock_featuremaps_1), 0, 0, input_offset, x_val, input_size_x, input_step, input_n_channels, input_size,
featuremaps_1, featuremaps_1_size_x, featuremaps_1_size_y_x, featuremaps_1_n_channels,
offset_W_conv1, W_conv1_size_x, W_conv1_size_y, W_conv1_step_1, W_conv1_step_2,
offset_b_conv1,
featuremaps_1_thread_size);
hipDeviceSynchronize();
hipLaunchKernelGGL(( max_pooling_2x2), dim3(grid_featuremaps_1_pooling), dim3(threadBlock_featuremaps_1_pooling), 0, 0, featuremaps_1, featuremaps_1_size_x, featuremaps_1_size_y_x, featuremaps_1_n_channels,
featuremaps_1_pooling, featuremaps_1_pooling_size_x, featuremaps_1_pooling_size_y_x);
hipDeviceSynchronize();
hipLaunchKernelGGL(( convolutions_relu_constants_weights_shared_memory), dim3(grid_featuremaps_2), dim3(threadBlock_featuremaps_2), 0, 0, 0, featuremaps_1_pooling, featuremaps_1_pooling_size_x, featuremaps_1_pooling_size_y_x, featuremaps_1_pooling_n_channels, featuremaps_1_pooling_size,
featuremaps_2, featuremaps_2_size_x, featuremaps_2_size_y_x, featuremaps_2_n_channels,
offset_W_conv2, W_conv2_size_x, W_conv2_size_y, W_conv2_step_1, W_conv2_step_2,
offset_b_conv2,
featuremaps_2_thread_size);
hipDeviceSynchronize();
hipLaunchKernelGGL(( max_pooling_3x3), dim3(grid_featuremaps_2_pooling), dim3(threadBlock_featuremaps_2_pooling), 0, 0, featuremaps_2, featuremaps_2_size_x, featuremaps_2_size_y_x, featuremaps_2_n_channels,
featuremaps_2_pooling, featuremaps_2_pooling_size_x, featuremaps_2_pooling_size_y_x);
hipDeviceSynchronize();
//__global__ void convolutions_relu_shared_memory(int input_offset, float* features_input, int features_input_size_x, int features_input_step, int features_input_n_channels, int features_input_size,
// float* features_output, int features_output_size_x, int features_output_size_y_x, int features_output_n_channels,
// float* weights, int weights_size_x, int weights_size_y, int weights_step_1, int weights_step_2,
// float* biases,
// int n_threds_in_block)
hipLaunchKernelGGL(( convolutions_relu_shared_memory), dim3(grid_featuremaps_3), dim3(threadBlock_featuremaps_3), 0, 0, 0, featuremaps_2_pooling, featuremaps_2_pooling_size_x, featuremaps_2_pooling_size_y_x, featuremaps_2_pooling_n_channels, featuremaps_2_pooling_size,
featuremaps_3, featuremaps_3_size_x, featuremaps_3_size_y_x, featuremaps_3_n_channels,
W_conv3, W_conv3_size_x, W_conv3_size_y, W_conv3_step_1, W_conv3_step_2,
b_conv3,
featuremaps_3_thread_size);
hipDeviceSynchronize();
hipLaunchKernelGGL(( convolutions_shared_memory), dim3(grid_featuremaps_4), dim3(threadBlock_featuremaps_4), 0, 0, 0, featuremaps_3, featuremaps_3_size_x, featuremaps_3_size_y_x, featuremaps_3_n_channels, featuremaps_3_size,
featuremaps_4, featuremaps_4_size_x, featuremaps_4_size_y_x, featuremaps_4_n_channels,
W_conv4, W_conv4_size_x, W_conv4_size_y, W_conv4_step_1, W_conv4_step_2,
b_conv4,
featuremaps_4_thread_size);
hipDeviceSynchronize();
hipMemcpy(featuremaps_4_tmp_cpu, featuremaps_4, sizeof(float)* featuremaps_4_size, hipMemcpyDeviceToHost);
featuremaps_4_max = featuremaps_4_tmp_cpu[0];
featuremaps_4_max_ind = 0;
for (int output_index = 1; output_index < n_output; output_index++)
{
//printf("output_index = %d\n", output_index);
if (featuremaps_4_tmp_cpu[output_index] > featuremaps_4_max)
{
featuremaps_4_max = featuremaps_4_tmp_cpu[output_index];
featuremaps_4_max_ind = output_index;
//printf("featuremaps_4_max = %.6fd\n", featuremaps_4_max);
//printf("featuremaps_4_max_ind = %d\n", featuremaps_4_max_ind);
}
}
//printf("featuremaps_4_max_ind =%d\n", featuremaps_4_max_ind);
//printf("y_val_cpu[sample_count] =%d\n", y_val_cpu[sample_count]);
if (featuremaps_4_max_ind == y_val_cpu[sample_count])
{
n_correct_answers++;
}
}
clock_t end = clock();
double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC;
double time_mean = elapsed_secs / n_samples;
float accuracy = ((float)n_correct_answers) / n_samples;
printf("accuracy = %.8f\n", accuracy);
printf("elapsed_secs = %.8f\n", elapsed_secs);
printf("time_mean = %.8f\n", time_mean);
hipError_t error = hipGetLastError();
if (error != hipSuccess)
{
fprintf(stderr, "last ERROR after check almost at end: %s\n", hipGetErrorString(error));
}
//float* featuremaps_1_tmp_cpu;
//featuremaps_1_tmp_cpu = (float*)malloc(sizeof(float) * featuremaps_1_size);
//hipMemcpy(featuremaps_1_tmp_cpu, featuremaps_1, sizeof(float) * featuremaps_1_size, hipMemcpyDeviceToHost);
//printf("featuremaps_1_tmp_cpu[0] = %.6f\n", featuremaps_1_tmp_cpu[0]);
//printf("featuremaps_1_tmp_cpu[1] = %.6f\n", featuremaps_1_tmp_cpu[1]);
//printf("featuremaps_1_tmp_cpu[2] = %.6f\n", featuremaps_1_tmp_cpu[2]);
//printf("featuremaps_1_tmp_cpu[20] = %.6f\n", featuremaps_1_tmp_cpu[20]);
//printf("featuremaps_1_tmp_cpu[200] = %.6f\n", featuremaps_1_tmp_cpu[200]);
//printf("featuremaps_1_tmp_cpu[1000] = %.6f\n", featuremaps_1_tmp_cpu[1000]);
//printf("featuremaps_1_tmp_cpu[2000] = %.6f\n", featuremaps_1_tmp_cpu[2000]);
//printf("featuremaps_1_tmp_cpu[3000] = %.6f\n", featuremaps_1_tmp_cpu[3000]);
//free(featuremaps_1_tmp_cpu);
//float* featuremaps_1_pooling_tmp_cpu;
//featuremaps_1_pooling_tmp_cpu = (float*)malloc(sizeof(float) * featuremaps_1_pooling_size);
//hipMemcpy(featuremaps_1_pooling_tmp_cpu, featuremaps_1_pooling, sizeof(float) * featuremaps_1_pooling_size, hipMemcpyDeviceToHost);
//printf("featuremaps_1_pooling_tmp_cpu[0] = %.6f\n", featuremaps_1_pooling_tmp_cpu[0]);
//printf("featuremaps_1_pooling_tmp_cpu[1] = %.6f\n", featuremaps_1_pooling_tmp_cpu[1]);
//printf("featuremaps_1_pooling_tmp_cpu[2] = %.6f\n", featuremaps_1_pooling_tmp_cpu[2]);
//printf("featuremaps_1_pooling_tmp_cpu[20] = %.6f\n", featuremaps_1_pooling_tmp_cpu[20]);
//printf("featuremaps_1_pooling_tmp_cpu[200] = %.6f\n", featuremaps_1_pooling_tmp_cpu[200]);
//printf("featuremaps_1_pooling_tmp_cpu[1000] = %.6f\n", featuremaps_1_pooling_tmp_cpu[1000]);
//printf("featuremaps_1_pooling_tmp_cpu[2000] = %.6f\n", featuremaps_1_pooling_tmp_cpu[2000]);
//free(featuremaps_1_pooling_tmp_cpu);
//float* featuremaps_2_tmp_cpu;
//featuremaps_2_tmp_cpu = (float*)malloc(sizeof(float) * featuremaps_2_size);
//hipMemcpy(featuremaps_2_tmp_cpu, featuremaps_2, sizeof(float) * featuremaps_2_size, hipMemcpyDeviceToHost);
//printf("featuremaps_2_tmp_cpu[0] = %.6f\n", featuremaps_2_tmp_cpu[0]);
//printf("featuremaps_2_tmp_cpu[1] = %.6f\n", featuremaps_2_tmp_cpu[1]);
//printf("featuremaps_2_tmp_cpu[2] = %.6f\n", featuremaps_2_tmp_cpu[2]);
//printf("featuremaps_2_tmp_cpu[20] = %.6f\n", featuremaps_2_tmp_cpu[20]);
//printf("featuremaps_2_tmp_cpu[200] = %.6f\n", featuremaps_2_tmp_cpu[200]);
//printf("featuremaps_2_tmp_cpu[1000] = %.6f\n", featuremaps_2_tmp_cpu[1000]);
//free(featuremaps_2_tmp_cpu);
//printf("featuremaps_4_tmp_cpu[0] = %.6f\n", featuremaps_4_tmp_cpu[0]);
//printf("featuremaps_4_tmp_cpu[1] = %.6f\n", featuremaps_4_tmp_cpu[1]);
//printf("featuremaps_4_tmp_cpu[2] = %.6f\n", featuremaps_4_tmp_cpu[2]);
//printf("featuremaps_4_tmp_cpu[3] = %.6f\n", featuremaps_4_tmp_cpu[3]);
//printf("featuremaps_4_tmp_cpu[4] = %.6f\n", featuremaps_4_tmp_cpu[4]);
//printf("featuremaps_4_tmp_cpu[5] = %.6f\n", featuremaps_4_tmp_cpu[5]);
//printf("featuremaps_4_tmp_cpu[6] = %.6f\n", featuremaps_4_tmp_cpu[6]);
//printf("featuremaps_4_tmp_cpu[7] = %.6f\n", featuremaps_4_tmp_cpu[7]);
//printf("featuremaps_4_tmp_cpu[8] = %.6f\n", featuremaps_4_tmp_cpu[8]);
//printf("featuremaps_4_tmp_cpu[9] = %.6f\n", featuremaps_4_tmp_cpu[9]);
free(featuremaps_4_tmp_cpu);
free(y_val_cpu);
hipFree(x_val);
hipFree(featuremaps_1);
hipFree(featuremaps_1_pooling);
hipFree(featuremaps_2);
hipFree(featuremaps_2_pooling);
hipFree(featuremaps_3);
hipFree(featuremaps_4);
hipFree(W_conv3);
hipFree(b_conv3);
hipFree(W_conv4);
hipFree(b_conv4);
return 0;
} | 5343af790b1b856d371afb2f2a3ded72bebb25fd.cu |
#include <iostream>
#include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <helper_cuda.h>
#include <stdlib.h>
#include <string.h>
#include <ctime>
//const unsigned int W_conv1_size_const = 144;
//__device__ __constant__ float W_conv1_const[W_conv1_size_const];
//__constant__ float W_conv1_const[W_conv1_size_const];
const unsigned int constants_size = (144 + 16 + 6400 + 16);
__constant__ float constants[constants_size];
//__global__ void convolutions_relu_constants_weights(int input_offset, float* features_input, int features_input_size_x, int features_input_step, int features_input_n_channels,
// float* features_output, int features_output_size_x, int features_output_step, int features_output_n_channels,
// int weights_offset, int weights_size_x, int weights_size_y, int weights_step_1, int weights_step_2,
// int biases_offset)
//{
//
// const unsigned int index_output_x = blockIdx.x * blockDim.x + threadIdx.x;
// const unsigned int index_output_y = blockIdx.y * blockDim.y + threadIdx.y;
// const unsigned int index_output_channel = blockIdx.z * blockDim.z + threadIdx.z;
//
//
//
// unsigned int output_1d_index = features_output_step * index_output_channel +
// features_output_size_x * index_output_y + index_output_x;
//
// //unsigned int weights_step_1 = weights_size_y * weights_size_x;
// //unsigned int weights_step_2 = weights_step_1 * features_input_n_channels * index_output_channel;
//
//
// float output_value = 0.0;
//
// for (int index_input_channel = 0; index_input_channel < features_input_n_channels; index_input_channel++)
// {
// unsigned int weights_1d_index_offset = weights_step_2 + weights_step_1 * index_input_channel;
//
// for (int weights_index_y = 0; weights_index_y < weights_size_y; weights_index_y++) {
// for (int weights_index_x = 0; weights_index_x < weights_size_x; weights_index_x++) {
// unsigned int index_input_x = index_output_x + weights_index_x;
// unsigned int index_input_y = index_output_y + weights_index_y;
// unsigned int input_1d_index = input_offset + features_input_step * index_input_channel +
// features_input_size_x * index_input_y + index_input_x;
// unsigned int weights_1d_index = weights_1d_index_offset + weights_size_x * weights_index_y + weights_index_x;
// output_value += features_input[input_1d_index] * constants[weights_offset + weights_1d_index];
//
// }
// }
//
// }
//
// output_value += constants[biases_offset + index_output_channel];
//
//
// output_value = fmaxf(output_value, 0.0); // relu
//
// features_output[output_1d_index] = output_value;
//
//}
__global__ void convolutions_relu_constants_weights_shared_memory(int input_offset, float* features_input, int features_input_size_x, int features_input_step, int features_input_n_channels, int features_input_size,
float* features_output, int features_output_size_x, int features_output_step, int features_output_n_channels,
int weights_offset, int weights_size_x, int weights_size_y, int weights_step_1, int weights_step_2,
int biases_offset,
int n_threds_in_block
)
{
//extern __shared__ float features_input_cache[];
__shared__ float features_input_cache[2704]; //, 2688 - not ok, 2689 - ok, 13*13*16 = 2704
const unsigned int index_output_x = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int index_output_y = blockIdx.y * blockDim.y + threadIdx.y;
const unsigned int index_output_channel = blockIdx.z * blockDim.z + threadIdx.z;
// fill features_input_cache:
//unsigned int features_output_step = features_output_size_y * features_output_size_x;
//unsigned int features_input_step = features_input_size_y * features_input_size_x;
//unsigned int features_input_size = features_input_step * features_input_n_channels;
unsigned int output_1d_index = features_output_step * index_output_channel +
features_output_size_x * index_output_y + index_output_x;
//unsigned int n_threds_in_block = blockDim.x * blockDim.y * blockDim.z;
unsigned int threds_1d_index = threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x;
if (features_input_size == n_threds_in_block)
{
features_input_cache[threds_1d_index] = features_input[input_offset + threds_1d_index];
}
else if (features_input_size < n_threds_in_block)
{
if (threds_1d_index < features_input_size)
{
features_input_cache[threds_1d_index] = features_input[input_offset + threds_1d_index];
}
}
else
{
// case for features_input_size > n_threds_in_block
unsigned int index_1 = (threds_1d_index * features_input_size) / n_threds_in_block;
unsigned int index_2 = ((threds_1d_index + 1) * features_input_size) / n_threds_in_block;
for (unsigned int index = index_1; index < index_2; index++)
{
features_input_cache[index] = features_input[input_offset + index];
}
}
__syncthreads();
// convolutions:
//unsigned int weights_step_1 = weights_size_y * weights_size_x;
//unsigned int weights_step_2 = weights_step_1 * features_input_n_channels * index_output_channel;
float output_value = 0.0;
for (int index_input_channel = 0; index_input_channel < features_input_n_channels; index_input_channel++)
{
unsigned int weights_1d_index_offset = weights_step_2 + weights_step_1 * index_input_channel;
for (int weights_index_y = 0; weights_index_y < weights_size_y; weights_index_y++) {
for (int weights_index_x = 0; weights_index_x < weights_size_x; weights_index_x++) {
unsigned int index_input_x = index_output_x + weights_index_x;
unsigned int index_input_y = index_output_y + weights_index_y;
unsigned int input_1d_index = features_input_step * index_input_channel +
features_input_size_x * index_input_y + index_input_x;
unsigned int weights_1d_index = weights_1d_index_offset + weights_size_x * weights_index_y + weights_index_x;
output_value += features_input_cache[input_1d_index] * constants[weights_offset + weights_1d_index];
//output_value += features_input[input_offset + input_1d_index] * constants[weights_offset + weights_1d_index];
//output_value += features_input_cache[2703] * constants[weights_offset + weights_1d_index];
}
}
}
output_value += constants[biases_offset + index_output_channel];
output_value = fmaxf(output_value, 0.0); // relu
features_output[output_1d_index] = output_value;
}
__global__ void convolutions_relu_shared_memory(int input_offset, float* features_input, int features_input_size_x, int features_input_step, int features_input_n_channels, int features_input_size,
float* features_output, int features_output_size_x, int features_output_size_y_x, int features_output_n_channels,
float* weights, int weights_size_x, int weights_size_y, int weights_step_1, int weights_step_2,
float* biases,
int n_threds_in_block)
{
__shared__ float features_input_cache[144];
const unsigned int index_output_x = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int index_output_y = blockIdx.y * blockDim.y + threadIdx.y;
const unsigned int index_output_channel = blockIdx.z * blockDim.z + threadIdx.z;
//unsigned int n_threds_in_block = blockDim.x * blockDim.y * blockDim.z;
unsigned int threds_1d_index = threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x;
//unsigned int features_input_step = features_input_size_y * features_input_size_x;
//unsigned int features_input_size = features_input_step * features_input_n_channels;
if (features_input_size == n_threds_in_block)
{
features_input_cache[threds_1d_index] = features_input[input_offset + threds_1d_index];
}
else if (features_input_size < n_threds_in_block)
{
if (threds_1d_index < features_input_size)
{
features_input_cache[threds_1d_index] = features_input[input_offset + threds_1d_index];
}
}
else
{
// case for features_input_size > n_threds_in_block
unsigned int index_1 = (threds_1d_index * features_input_size) / n_threds_in_block;
unsigned int index_2 = ((threds_1d_index + 1) * features_input_size) / n_threds_in_block;
for (unsigned int index = index_1; index < index_2; index++)
{
features_input_cache[index] = features_input[input_offset + index];
}
}
__syncthreads();
//unsigned int features_output_size_y_x = features_output_size_y * features_output_size_x;
unsigned int output_1d_index = features_output_size_y_x * index_output_channel +
features_output_size_x * index_output_y + index_output_x;
//unsigned int weights_step_1 = weights_size_y * weights_size_x;
//unsigned int weights_step_2 = weights_step_1 * features_input_n_channels * index_output_channel;
float output_value = 0.0;
for (int index_input_channel = 0; index_input_channel < features_input_n_channels; index_input_channel++)
{
unsigned int weights_1d_index_offset = weights_step_2 + weights_step_1 * index_input_channel;
for (int weights_index_y = 0; weights_index_y < weights_size_y; weights_index_y++) {
for (int weights_index_x = 0; weights_index_x < weights_size_x; weights_index_x++) {
unsigned int index_input_x = index_output_x + weights_index_x;
unsigned int index_input_y = index_output_y + weights_index_y;
unsigned int input_1d_index = features_input_step * index_input_channel +
features_input_size_x * index_input_y + index_input_x;
unsigned int weights_1d_index = weights_1d_index_offset + weights_size_x * weights_index_y + weights_index_x;
output_value += features_input_cache[input_1d_index] * weights[weights_1d_index];
}
}
}
output_value += biases[index_output_channel];
output_value = fmaxf(output_value, 0.0); // relu
features_output[output_1d_index] = output_value;
}
__global__ void convolutions_shared_memory(int input_offset, float* features_input, int features_input_size_x, int features_input_step, int features_input_n_channels, int features_input_size,
float* features_output, int features_output_size_x, int features_output_size_y_x, int features_output_n_channels,
float* weights, int weights_size_x, int weights_size_y, int weights_step_1, int weights_step_2,
float* biases,
int n_threds_in_block)
{
__shared__ float features_input_cache[256];
const unsigned int index_output_x = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int index_output_y = blockIdx.y * blockDim.y + threadIdx.y;
const unsigned int index_output_channel = blockIdx.z * blockDim.z + threadIdx.z;
//unsigned int n_threds_in_block = blockDim.x * blockDim.y * blockDim.z;
unsigned int threds_1d_index = threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x;
//unsigned int features_input_step = features_input_size_y * features_input_size_x;
//unsigned int features_input_size = features_input_step * features_input_n_channels;
if (features_input_size == n_threds_in_block)
{
features_input_cache[threds_1d_index] = features_input[input_offset + threds_1d_index];
}
else if (features_input_size < n_threds_in_block)
{
if (threds_1d_index < features_input_size)
{
features_input_cache[threds_1d_index] = features_input[input_offset + threds_1d_index];
}
}
else
{
// case for features_input_size > n_threds_in_block
unsigned int index_1 = (threds_1d_index * features_input_size) / n_threds_in_block;
unsigned int index_2 = ((threds_1d_index + 1) * features_input_size) / n_threds_in_block;
for (unsigned int index = index_1; index < index_2; index++)
{
features_input_cache[index] = features_input[input_offset + index];
}
}
__syncthreads();
//unsigned int features_output_size_y_x = features_output_size_y * features_output_size_x;
unsigned int output_1d_index = features_output_size_y_x * index_output_channel +
features_output_size_x * index_output_y + index_output_x;
//unsigned int weights_step_1 = weights_size_y * weights_size_x;
//unsigned int weights_step_2 = weights_step_1 * features_input_n_channels * index_output_channel;
float output_value = 0.0;
for (int index_input_channel = 0; index_input_channel < features_input_n_channels; index_input_channel++)
{
unsigned int weights_1d_index_offset = weights_step_2 + weights_step_1 * index_input_channel;
for (int weights_index_y = 0; weights_index_y < weights_size_y; weights_index_y++) {
for (int weights_index_x = 0; weights_index_x < weights_size_x; weights_index_x++) {
unsigned int index_input_x = index_output_x + weights_index_x;
unsigned int index_input_y = index_output_y + weights_index_y;
unsigned int input_1d_index = features_input_step * index_input_channel +
features_input_size_x * index_input_y + index_input_x;
unsigned int weights_1d_index = weights_1d_index_offset + weights_size_x * weights_index_y + weights_index_x;
output_value += features_input_cache[input_1d_index] * weights[weights_1d_index];
}
}
}
output_value += biases[index_output_channel];
features_output[output_1d_index] = output_value;
}
__global__ void max_pooling_2x2(float* features_input, int features_input_size_x, int features_input_size_y_x, int features_input_n_channels,
float* features_output, int features_output_size_x, int features_output_size_y_x)
{
const unsigned int index_output_x = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int index_output_y = blockIdx.y * blockDim.y + threadIdx.y;
const unsigned int index_output_channel = blockIdx.z * blockDim.z + threadIdx.z;
unsigned int index_input_x = 2 * index_output_x;
unsigned int index_input_y = 2 * index_output_y;
unsigned int output_1d_index = features_output_size_y_x * index_output_channel +
features_output_size_x * index_output_y + index_output_x;
unsigned int features_input_step = features_input_size_y_x * index_output_channel;
unsigned int input_1d_index_0_0 = features_input_step +
features_input_size_x * index_input_y + index_input_x;
unsigned int input_1d_index_0_1 = input_1d_index_0_0 + 1;
unsigned int input_1d_index_1_0 = input_1d_index_0_0 + features_input_size_x;
unsigned int input_1d_index_1_1 = input_1d_index_0_0 + 1 + features_input_size_x;
float max_0 = fmaxf(features_input[input_1d_index_0_0], features_input[input_1d_index_0_1]);
float max_1 = fmaxf(features_input[input_1d_index_1_0], features_input[input_1d_index_1_1]);
features_output[output_1d_index] = fmaxf(max_0, max_1);
}
__global__ void max_pooling_3x3(float* features_input, int features_input_size_x, int features_input_size_y_x, int features_input_n_channels,
float* features_output, int features_output_size_x, int features_output_size_y_x)
{
const unsigned int index_output_x = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int index_output_y = blockIdx.y * blockDim.y + threadIdx.y;
const unsigned int index_output_channel = blockIdx.z * blockDim.z + threadIdx.z;
unsigned int index_input_x = 3 * index_output_x;
unsigned int index_input_y = 3 * index_output_y;
unsigned int output_1d_index = features_output_size_y_x * index_output_channel +
features_output_size_x * index_output_y + index_output_x;
unsigned int features_input_step = features_input_size_y_x * index_output_channel;
unsigned int input_1d_index_0_0 = features_input_step +
features_input_size_x * index_input_y + index_input_x;
unsigned int input_1d_index_0_1 = input_1d_index_0_0 + 1;
unsigned int input_1d_index_0_2 = input_1d_index_0_0 + 2;
unsigned int input_1d_index_1_0 = input_1d_index_0_0 + features_input_size_x;
unsigned int input_1d_index_1_1 = input_1d_index_1_0 + 1;
unsigned int input_1d_index_1_2 = input_1d_index_1_0 + 2;
unsigned int input_1d_index_2_0 = input_1d_index_1_0 + features_input_size_x;
unsigned int input_1d_index_2_1 = input_1d_index_2_0 + 1;
unsigned int input_1d_index_2_2 = input_1d_index_2_0 + 2;
float max_0 = fmaxf(features_input[input_1d_index_0_0], features_input[input_1d_index_0_1]);
float max_1 = fmaxf(features_input[input_1d_index_0_2], features_input[input_1d_index_1_0]);
float max_2 = fmaxf(features_input[input_1d_index_1_1], features_input[input_1d_index_1_2]);
float max_3 = fmaxf(features_input[input_1d_index_2_0], features_input[input_1d_index_2_1]);
float max_4 = fmaxf(max_0, max_1);
float max_5 = fmaxf(max_2, max_3);
float max_6 = fmaxf(max_4, max_5);
features_output[output_1d_index] = fmaxf(max_6, features_input[input_1d_index_2_2]);
}
void c_stings_concatinate(char* string_1, char* string_2, char** string_result)
{
*string_result = (char*)malloc(strlen(string_1) + strlen(string_2) + 1);
strcpy(*string_result, string_1);
strcat(*string_result, string_2);
}
void load_data_to_array(char* dir, char* file, float** array_gpu, int size)
{
// https://stackoverflow.com/questions/22826380/cuda-allocation-and-return-array-from-gpu-to-cpu
char* path;
c_stings_concatinate(dir, file, &path);
float* array_cpu;
array_cpu = (float*)malloc(sizeof(float) * size);
FILE* file_id;
file_id = fopen(path, "rb");
int n_floats_readed = fread(array_cpu, sizeof(float), size, file_id);
fclose(file_id);
if (n_floats_readed != size)
{
printf("n_floats_readed != size n_floats_readed = %d size = %d\n", n_floats_readed, size);
}
if (cudaMalloc((void**)array_gpu, sizeof(float) * size) != cudaSuccess)
{
std::cout << "Error allocating GPU\n";
}
cudaMemcpy(*array_gpu, array_cpu, sizeof(float) * size, cudaMemcpyHostToDevice);
//float* array_cpu_check;
//array_cpu_check = (float*)malloc(sizeof(float) * size);
//cudaMemcpy(array_cpu_check, array_gpu, sizeof(float) * size, cudaMemcpyDeviceToHost);
//for (int counter = size-1; counter < size; counter++)
//{
// printf("array_cpu_check[counter] = %.6f array_cpu[counter] = %.6f\n", array_cpu_check[counter], array_cpu[counter]);
//}
//free(array_cpu_check);
free(array_cpu);
free(path);
}
void append_data_to_cpu_array(char* dir, char* file, float* constants_cpu, int size, int* offset)
{
char* path;
c_stings_concatinate(dir, file, &path);
float* pointer_shifted;
FILE* file_id;
file_id = fopen(path, "rb");
pointer_shifted = constants_cpu + *offset;
int n_floats_readed = fread(pointer_shifted, sizeof(float), size, file_id);
fclose(file_id);
*offset += size;
free(path);
}
/*
__global__ void tmp_check(float* array_cpu_4, float* accuracy_ptr)
{
accuracy_ptr[0] = 0.0;
for (int weights_1d_index = 0; weights_1d_index < W_conv1_size_const; weights_1d_index++)
{
if (W_conv1_const[weights_1d_index] == array_cpu_4[weights_1d_index])
//if (W_conv1_const[weights_1d_index] == 0.0)
{
accuracy_ptr[0] += 1.0;
}
//accuracy_ptr[0] += W_conv1_const[weights_1d_index];
}
accuracy_ptr[0] /= W_conv1_size_const;
}
*/
__global__ void check_constants(float* array_cpu, int size, float* n_correct_ptr)
{
for (int index = 0; index < size; index++)
{
if (constants[index] == array_cpu[index])
{
n_correct_ptr[0] += 1.0;
}
}
}
int main(void)
{
char* weights_dir = "F:/freelance/cpp_learning/cuda_learning/weigths_1d/";
// c 3 x 3 p 2 x 2 c 5 x 5 p 3 x 3 c 3 x 3 c 1 x 1
// 28 x 28 x 1 -> 26 x 26 x 16 -> 13 x 13 x 16 -> 9 x 9 x 16 -> 3 x 3 x 16 -> 1 x 1 x 256 -> 1 x 1 x 10
// n mult : 97344 518400 36864 2560
// 784 36864 2704 1296 144 256 10
int input_size_x = 28;
int input_size_y = 28;
int input_n_channels = 1;
int n_output = 10;
int input_step = input_size_x * input_size_y;
int input_size = input_size_x * input_size_y * input_n_channels;
int featuremaps_1_size_x = 26;
int featuremaps_1_size_y = 26;
int featuremaps_1_size_y_x = featuremaps_1_size_x * featuremaps_1_size_y;
int featuremaps_1_n_channels = 16;
int featuremaps_1_size = featuremaps_1_size_x * featuremaps_1_size_y * featuremaps_1_n_channels;
int featuremaps_1_thread_size_x = 26;
int featuremaps_1_thread_size_y = 26;
int featuremaps_1_thread_size_z = 1;
int featuremaps_1_thread_size = featuremaps_1_thread_size_x * featuremaps_1_thread_size_y * featuremaps_1_thread_size_z;
int featuremaps_1_greed_size_x = featuremaps_1_size_x / featuremaps_1_thread_size_x;
int featuremaps_1_greed_size_y = featuremaps_1_size_y / featuremaps_1_thread_size_y;
int featuremaps_1_greed_size_z = featuremaps_1_n_channels / featuremaps_1_thread_size_z;
int featuremaps_1_pooling_size_x = 13;
int featuremaps_1_pooling_size_y = 13;
int featuremaps_1_pooling_n_channels = featuremaps_1_n_channels;
int featuremaps_1_pooling_size_y_x = featuremaps_1_pooling_size_x * featuremaps_1_pooling_size_y;
int featuremaps_1_pooling_size = featuremaps_1_pooling_size_x * featuremaps_1_pooling_size_y * featuremaps_1_n_channels;
int featuremaps_1_pooling_thread_size_x = 13;
int featuremaps_1_pooling_thread_size_y = 13;
int featuremaps_1_pooling_thread_size_z = 4;
int featuremaps_1_pooling_greed_size_x = featuremaps_1_pooling_size_x / featuremaps_1_pooling_thread_size_x;
int featuremaps_1_pooling_greed_size_y = featuremaps_1_pooling_size_y / featuremaps_1_pooling_thread_size_y;
int featuremaps_1_pooling_greed_size_z = featuremaps_1_n_channels / featuremaps_1_pooling_thread_size_z;
int featuremaps_2_size_x = 9;
int featuremaps_2_size_y = 9;
int featuremaps_2_size_y_x = featuremaps_2_size_x * featuremaps_2_size_y;
int featuremaps_2_n_channels = 16;
int featuremaps_2_size = featuremaps_2_size_x * featuremaps_2_size_y * featuremaps_2_n_channels;
int featuremaps_2_thread_size_x = 9;
int featuremaps_2_thread_size_y = 9;
int featuremaps_2_thread_size_z = 8;
int featuremaps_2_thread_size = featuremaps_2_thread_size_x * featuremaps_2_thread_size_y * featuremaps_2_thread_size_z;
int featuremaps_2_greed_size_x = featuremaps_2_size_x / featuremaps_2_thread_size_x;
int featuremaps_2_greed_size_y = featuremaps_2_size_y / featuremaps_2_thread_size_y;
int featuremaps_2_greed_size_z = featuremaps_2_n_channels / featuremaps_2_thread_size_z;
int featuremaps_2_pooling_size_x = 3;
int featuremaps_2_pooling_size_y = 3;
int featuremaps_2_pooling_n_channels = featuremaps_2_n_channels;
int featuremaps_2_pooling_size_y_x = featuremaps_2_pooling_size_x * featuremaps_2_pooling_size_y;
int featuremaps_2_pooling_size = featuremaps_2_pooling_size_x * featuremaps_2_pooling_size_y * featuremaps_2_n_channels;
int featuremaps_2_pooling_thread_size_x = 3;
int featuremaps_2_pooling_thread_size_y = 3;
int featuremaps_2_pooling_thread_size_z = 4;
int featuremaps_2_pooling_greed_size_x = featuremaps_2_pooling_size_x / featuremaps_2_pooling_thread_size_x;
int featuremaps_2_pooling_greed_size_y = featuremaps_2_pooling_size_y / featuremaps_2_pooling_thread_size_y;
int featuremaps_2_pooling_greed_size_z = featuremaps_2_n_channels / featuremaps_2_pooling_thread_size_z;
int featuremaps_3_size_x = 1;
int featuremaps_3_size_y = 1;
int featuremaps_3_n_channels = 256;
int featuremaps_3_size = featuremaps_3_size_x * featuremaps_3_size_y * featuremaps_3_n_channels;
int featuremaps_3_size_y_x = featuremaps_3_size_x * featuremaps_3_size_y;
int featuremaps_3_thread_size_x = 1;
int featuremaps_3_thread_size_y = 1;
int featuremaps_3_thread_size_z = 64;
int featuremaps_3_thread_size = featuremaps_3_thread_size_x * featuremaps_3_thread_size_y * featuremaps_3_thread_size_z;
int featuremaps_3_greed_size_x = featuremaps_3_size_x / featuremaps_3_thread_size_x;
int featuremaps_3_greed_size_y = featuremaps_3_size_y / featuremaps_3_thread_size_y;
int featuremaps_3_greed_size_z = featuremaps_3_n_channels / featuremaps_3_thread_size_z;
int featuremaps_4_size_x = 1;
int featuremaps_4_size_y = 1;
int featuremaps_4_n_channels = n_output;
int featuremaps_4_size = featuremaps_4_size_x * featuremaps_4_size_y * featuremaps_4_n_channels;
int featuremaps_4_size_y_x = featuremaps_4_size_x * featuremaps_4_size_y;
int featuremaps_4_thread_size_x = 1;
int featuremaps_4_thread_size_y = 1;
int featuremaps_4_thread_size_z = 10;
int featuremaps_4_thread_size = featuremaps_4_thread_size_x * featuremaps_4_thread_size_y * featuremaps_4_thread_size_z;
int featuremaps_4_greed_size_x = featuremaps_4_size_x / featuremaps_4_thread_size_x;
int featuremaps_4_greed_size_y = featuremaps_4_size_y / featuremaps_4_thread_size_y;
int featuremaps_4_greed_size_z = featuremaps_4_n_channels / featuremaps_4_thread_size_z;
int W_conv1_size_x = 3;
int W_conv1_size_y = 3;
int W_conv1_size = W_conv1_size_x * W_conv1_size_y * input_n_channels * featuremaps_1_n_channels;
int W_conv1_step_1 = W_conv1_size_x * W_conv1_size_y;
int W_conv1_step_2 = W_conv1_size_x * W_conv1_size_y * input_n_channels;
int b_conv1_size = featuremaps_1_n_channels;
int W_conv2_size_x = 5;
int W_conv2_size_y = 5;
int W_conv2_size = W_conv2_size_x * W_conv2_size_y * featuremaps_1_n_channels * featuremaps_2_n_channels;
int W_conv2_step_1 = W_conv2_size_x * W_conv2_size_y;
int W_conv2_step_2 = W_conv2_size_x * W_conv2_size_y * featuremaps_1_n_channels;
int b_conv2_size = featuremaps_2_n_channels;
int W_conv3_size_x = 3;
int W_conv3_size_y = 3;
int W_conv3_size = W_conv3_size_x * W_conv3_size_y * featuremaps_2_n_channels * featuremaps_3_n_channels;
int W_conv3_step_1 = W_conv3_size_x * W_conv3_size_y;
int W_conv3_step_2 = W_conv3_size_x * W_conv3_size_y * featuremaps_2_n_channels;
int b_conv3_size = featuremaps_3_n_channels;
int W_conv4_size_x = 1;
int W_conv4_size_y = 1;
int W_conv4_size = W_conv4_size_x * W_conv4_size_y * featuremaps_3_n_channels * featuremaps_4_n_channels;
int W_conv4_step_1 = W_conv4_size_x * W_conv4_size_y;
int W_conv4_step_2 = W_conv4_size_x * W_conv4_size_y * featuremaps_3_n_channels;
int b_conv4_size = 10;
int x_val_size = 7840000;
int n_samples = 10000;
//constants
float* constants_cpu;
constants_cpu = (float*)malloc(sizeof(float) * constants_size);
int offset = 0;
int offset_W_conv1 = offset;
append_data_to_cpu_array(weights_dir, "W_conv1.bin", constants_cpu, W_conv1_size, &offset);
int offset_b_conv1 = offset;
append_data_to_cpu_array(weights_dir, "b_conv1.bin", constants_cpu, b_conv1_size, &offset);
int offset_W_conv2 = offset;
append_data_to_cpu_array(weights_dir, "W_conv2.bin", constants_cpu, W_conv2_size, &offset);
int offset_b_conv2 = offset;
append_data_to_cpu_array(weights_dir, "b_conv2.bin", constants_cpu, b_conv2_size, &offset);
//for (int index = 0; index < constants_size; index++)
//{
// printf("%.6f\n", constants_cpu[index]);
//
//}
checkCudaErrors(cudaMemcpyToSymbol(constants, constants_cpu, sizeof(float)* constants_size));
float* n_correct_ptr;
if (cudaMalloc((void**)&n_correct_ptr, sizeof(float) * 1) != cudaSuccess)
{
std::cout << "Error allocating GPU n_correct_ptr\n";
}
//check_constants<<<1, 1>>>(constants_cpu, constants_size, n_correct_ptr);
//cudaDeviceSynchronize();
float* n_correct_ptr_cpu;
n_correct_ptr_cpu = (float*)malloc(sizeof(float) * 1);
cudaMemcpy(n_correct_ptr_cpu, n_correct_ptr, sizeof(float) * 1, cudaMemcpyDeviceToHost);
printf("\n");
printf("check constants:\n");
printf("n_correct_ptr_cpu[0] = %.6f\n", n_correct_ptr_cpu[0]);
printf("constants_size =%d\n", constants_size);
printf("\n");
float* constants_cpu_2;
constants_cpu_2 = (float*)malloc(sizeof(float) * constants_size);
checkCudaErrors(cudaMemcpyFromSymbol(constants_cpu_2, constants, sizeof(float) * constants_size));
int is_equal = 0;
int last_correct_index = 0;
for (int index = 0; index < constants_size; index++)
{
if (constants_cpu_2[index] == constants_cpu[index])
{
is_equal = 1;
last_correct_index = index;
}
else
{
is_equal = 0;
}
printf("%.6f %.6f %d\n", constants_cpu_2[index], constants_cpu[index], is_equal);
}
printf("last_correct_index = %d\n", last_correct_index);
//last_correct_index = 6792
cudaFree(n_correct_ptr);
free(n_correct_ptr_cpu);
float* W_conv3;
float* b_conv3;
float* W_conv4;
float* b_conv4;
float* x_val;
//W_conv1_1d.size = 144
//b_conv1_1d.size = 16
//W_conv2_1d.size = 6400
//b_conv2_1d.size = 16
//W_conv3_1d.size = 36864
//b_conv3_1d.size = 256
//W_conv4_1d.size = 2560
//b_conv4_1d.size = 10
//x_val_1d.size = 7840000
//y_val.size = 10000
//(144 + 16 + 6400 + 16 + 256 + 2560 + 10)*4 = 37608
load_data_to_array(weights_dir, "W_conv3.bin", &W_conv3, W_conv3_size);
load_data_to_array(weights_dir, "b_conv3.bin", &b_conv3, b_conv3_size);
load_data_to_array(weights_dir, "W_conv4.bin", &W_conv4, W_conv4_size);
load_data_to_array(weights_dir, "b_conv4.bin", &b_conv4, b_conv4_size);
load_data_to_array(weights_dir, "x_val.bin", &x_val, x_val_size);
char* y_val_cpu;
y_val_cpu = (char*)malloc(sizeof(char) * n_samples);
FILE* file_id;
char* path;
c_stings_concatinate(weights_dir, "y_val.bin", &path);
file_id = fopen(path, "rb");
fread(y_val_cpu, sizeof(float), n_samples, file_id);
fclose(file_id);
free(path);
float* featuremaps_1;
if (cudaMalloc((void**)&featuremaps_1, sizeof(float) * featuremaps_1_size) != cudaSuccess)
{
std::cout << "Error allocating GPU featuremaps_1\n";
}
float* featuremaps_1_pooling;
if (cudaMalloc((void**)&featuremaps_1_pooling, sizeof(float) * featuremaps_1_pooling_size) != cudaSuccess)
{
std::cout << "Error allocating GPU featuremaps_1_pooling\n";
}
float* featuremaps_2;
if (cudaMalloc((void**)&featuremaps_2, sizeof(float) * featuremaps_2_size) != cudaSuccess)
{
std::cout << "Error allocating GPU featuremaps_2\n";
}
float* featuremaps_2_pooling;
if (cudaMalloc((void**)&featuremaps_2_pooling, sizeof(float) * featuremaps_2_pooling_size) != cudaSuccess)
{
std::cout << "Error allocating GPU featuremaps_2_pooling\n";
}
float* featuremaps_3;
if (cudaMalloc((void**)&featuremaps_3, sizeof(float) * featuremaps_3_size) != cudaSuccess)
{
std::cout << "Error allocating GPU featuremaps_3\n";
}
float* featuremaps_4;
if (cudaMalloc((void**)&featuremaps_4, sizeof(float) * featuremaps_4_size) != cudaSuccess)
{
std::cout << "Error allocating GPU featuremaps_4\n";
}
dim3 grid_featuremaps_1(featuremaps_1_greed_size_x, featuremaps_1_greed_size_y, featuremaps_1_greed_size_z);
dim3 threadBlock_featuremaps_1(featuremaps_1_thread_size_x, featuremaps_1_thread_size_y, featuremaps_1_thread_size_z);
dim3 grid_featuremaps_1_pooling(featuremaps_1_pooling_greed_size_x, featuremaps_1_pooling_greed_size_y, featuremaps_1_pooling_greed_size_z);
dim3 threadBlock_featuremaps_1_pooling(featuremaps_1_pooling_thread_size_x, featuremaps_1_pooling_thread_size_y, featuremaps_1_pooling_thread_size_z);
dim3 grid_featuremaps_2(featuremaps_2_greed_size_x, featuremaps_2_greed_size_y, featuremaps_2_greed_size_z);
dim3 threadBlock_featuremaps_2(featuremaps_2_thread_size_x, featuremaps_2_thread_size_y, featuremaps_2_thread_size_z);
dim3 grid_featuremaps_2_pooling(featuremaps_2_pooling_greed_size_x, featuremaps_2_pooling_greed_size_y, featuremaps_2_pooling_greed_size_z);
dim3 threadBlock_featuremaps_2_pooling(featuremaps_2_pooling_thread_size_x, featuremaps_2_pooling_thread_size_y, featuremaps_2_pooling_thread_size_z);
dim3 grid_featuremaps_3(featuremaps_3_greed_size_x, featuremaps_3_greed_size_y, featuremaps_3_greed_size_z);
dim3 threadBlock_featuremaps_3(featuremaps_3_thread_size_x, featuremaps_3_thread_size_y, featuremaps_3_thread_size_z);
dim3 grid_featuremaps_4(featuremaps_4_greed_size_x, featuremaps_4_greed_size_y, featuremaps_4_greed_size_z);
dim3 threadBlock_featuremaps_4(featuremaps_4_thread_size_x, featuremaps_4_thread_size_y, featuremaps_4_thread_size_z);
//dim3 grid_featuremaps_1(2, 2, 4);
//dim3 threadBlock_featuremaps_1(13, 13, 4);
//printf("featuremaps_1_size = %d\n", featuremaps_1_size);
//printf("sizeof(featuremaps_1) = %d\n", sizeof(featuremaps_1));
//printf("sizeof(W_conv1) = %d\n", sizeof(W_conv1));
//printf("sizeof(b_conv1) = %d\n", sizeof(b_conv1));
//size_t b_conv1_sized = 0;
//cudaError_t er1 = cudaGetSymbolSize(&b_conv1_sized, b_conv1);
//printf("b_conv1_sized = %d\n", b_conv1_sized);
//size_t featuremaps_1_sized = 0;
//cudaError_t er2 = cudaGetSymbolSize(&featuremaps_1_sized, featuremaps_1);
//printf("featuremaps_1_sized = %d\n", featuremaps_1_sized);
//dim3 grid_featuremaps_1(1, 1, 1);
//dim3 threadBlock_featuremaps_1(1, 1, 1);
//convolutions_relu<<<grid_featuremaps_1, threadBlock_featuremaps_1>>>(0, x_val, input_size_x, input_size_y, input_n_channels,
// featuremaps_1, featuremaps_1_size_x, featuremaps_1_size_y, featuremaps_1_n_channels,
// W_conv1, W_conv1_size_x, W_conv2_size_x,
// b_conv1);
//cudaDeviceSynchronize();
float* featuremaps_4_tmp_cpu;
featuremaps_4_tmp_cpu = (float*)malloc(sizeof(float) * featuremaps_4_size);
float featuremaps_4_max = 0.0;
int featuremaps_4_max_ind = -1;
int n_correct_answers = 0;
clock_t begin = clock();
for (int sample_count = 0; sample_count < n_samples; sample_count++)
{
//__global__ void convolutions_relu_constants_weights_shared_memory(int input_offset, float* features_input, int features_input_size_x, int features_input_step, int features_input_n_channels, int features_input_size,
// float* features_output, int features_output_size_x, int features_output_step, int features_output_n_channels,
// int weights_offset, int weights_size_x, int weights_size_y, int weights_step_1, int weights_step_2,
// int biases_offset,
// int n_threds_in_block
//)
int input_offset = sample_count * input_step;
convolutions_relu_constants_weights_shared_memory<<<grid_featuremaps_1, threadBlock_featuremaps_1>>>(input_offset, x_val, input_size_x, input_step, input_n_channels, input_size,
featuremaps_1, featuremaps_1_size_x, featuremaps_1_size_y_x, featuremaps_1_n_channels,
offset_W_conv1, W_conv1_size_x, W_conv1_size_y, W_conv1_step_1, W_conv1_step_2,
offset_b_conv1,
featuremaps_1_thread_size);
cudaDeviceSynchronize();
max_pooling_2x2<<<grid_featuremaps_1_pooling, threadBlock_featuremaps_1_pooling>>> (featuremaps_1, featuremaps_1_size_x, featuremaps_1_size_y_x, featuremaps_1_n_channels,
featuremaps_1_pooling, featuremaps_1_pooling_size_x, featuremaps_1_pooling_size_y_x);
cudaDeviceSynchronize();
convolutions_relu_constants_weights_shared_memory<<<grid_featuremaps_2, threadBlock_featuremaps_2>>>(0, featuremaps_1_pooling, featuremaps_1_pooling_size_x, featuremaps_1_pooling_size_y_x, featuremaps_1_pooling_n_channels, featuremaps_1_pooling_size,
featuremaps_2, featuremaps_2_size_x, featuremaps_2_size_y_x, featuremaps_2_n_channels,
offset_W_conv2, W_conv2_size_x, W_conv2_size_y, W_conv2_step_1, W_conv2_step_2,
offset_b_conv2,
featuremaps_2_thread_size);
cudaDeviceSynchronize();
max_pooling_3x3<<<grid_featuremaps_2_pooling, threadBlock_featuremaps_2_pooling>>>(featuremaps_2, featuremaps_2_size_x, featuremaps_2_size_y_x, featuremaps_2_n_channels,
featuremaps_2_pooling, featuremaps_2_pooling_size_x, featuremaps_2_pooling_size_y_x);
cudaDeviceSynchronize();
//__global__ void convolutions_relu_shared_memory(int input_offset, float* features_input, int features_input_size_x, int features_input_step, int features_input_n_channels, int features_input_size,
// float* features_output, int features_output_size_x, int features_output_size_y_x, int features_output_n_channels,
// float* weights, int weights_size_x, int weights_size_y, int weights_step_1, int weights_step_2,
// float* biases,
// int n_threds_in_block)
convolutions_relu_shared_memory<<<grid_featuremaps_3, threadBlock_featuremaps_3>>> (0, featuremaps_2_pooling, featuremaps_2_pooling_size_x, featuremaps_2_pooling_size_y_x, featuremaps_2_pooling_n_channels, featuremaps_2_pooling_size,
featuremaps_3, featuremaps_3_size_x, featuremaps_3_size_y_x, featuremaps_3_n_channels,
W_conv3, W_conv3_size_x, W_conv3_size_y, W_conv3_step_1, W_conv3_step_2,
b_conv3,
featuremaps_3_thread_size);
cudaDeviceSynchronize();
convolutions_shared_memory<<<grid_featuremaps_4, threadBlock_featuremaps_4>>>(0, featuremaps_3, featuremaps_3_size_x, featuremaps_3_size_y_x, featuremaps_3_n_channels, featuremaps_3_size,
featuremaps_4, featuremaps_4_size_x, featuremaps_4_size_y_x, featuremaps_4_n_channels,
W_conv4, W_conv4_size_x, W_conv4_size_y, W_conv4_step_1, W_conv4_step_2,
b_conv4,
featuremaps_4_thread_size);
cudaDeviceSynchronize();
cudaMemcpy(featuremaps_4_tmp_cpu, featuremaps_4, sizeof(float)* featuremaps_4_size, cudaMemcpyDeviceToHost);
featuremaps_4_max = featuremaps_4_tmp_cpu[0];
featuremaps_4_max_ind = 0;
for (int output_index = 1; output_index < n_output; output_index++)
{
//printf("output_index = %d\n", output_index);
if (featuremaps_4_tmp_cpu[output_index] > featuremaps_4_max)
{
featuremaps_4_max = featuremaps_4_tmp_cpu[output_index];
featuremaps_4_max_ind = output_index;
//printf("featuremaps_4_max = %.6fd\n", featuremaps_4_max);
//printf("featuremaps_4_max_ind = %d\n", featuremaps_4_max_ind);
}
}
//printf("featuremaps_4_max_ind =%d\n", featuremaps_4_max_ind);
//printf("y_val_cpu[sample_count] =%d\n", y_val_cpu[sample_count]);
if (featuremaps_4_max_ind == y_val_cpu[sample_count])
{
n_correct_answers++;
}
}
clock_t end = clock();
double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC;
double time_mean = elapsed_secs / n_samples;
float accuracy = ((float)n_correct_answers) / n_samples;
printf("accuracy = %.8f\n", accuracy);
printf("elapsed_secs = %.8f\n", elapsed_secs);
printf("time_mean = %.8f\n", time_mean);
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess)
{
fprintf(stderr, "last ERROR after check almost at end: %s\n", cudaGetErrorString(error));
}
//float* featuremaps_1_tmp_cpu;
//featuremaps_1_tmp_cpu = (float*)malloc(sizeof(float) * featuremaps_1_size);
//cudaMemcpy(featuremaps_1_tmp_cpu, featuremaps_1, sizeof(float) * featuremaps_1_size, cudaMemcpyDeviceToHost);
//printf("featuremaps_1_tmp_cpu[0] = %.6f\n", featuremaps_1_tmp_cpu[0]);
//printf("featuremaps_1_tmp_cpu[1] = %.6f\n", featuremaps_1_tmp_cpu[1]);
//printf("featuremaps_1_tmp_cpu[2] = %.6f\n", featuremaps_1_tmp_cpu[2]);
//printf("featuremaps_1_tmp_cpu[20] = %.6f\n", featuremaps_1_tmp_cpu[20]);
//printf("featuremaps_1_tmp_cpu[200] = %.6f\n", featuremaps_1_tmp_cpu[200]);
//printf("featuremaps_1_tmp_cpu[1000] = %.6f\n", featuremaps_1_tmp_cpu[1000]);
//printf("featuremaps_1_tmp_cpu[2000] = %.6f\n", featuremaps_1_tmp_cpu[2000]);
//printf("featuremaps_1_tmp_cpu[3000] = %.6f\n", featuremaps_1_tmp_cpu[3000]);
//free(featuremaps_1_tmp_cpu);
//float* featuremaps_1_pooling_tmp_cpu;
//featuremaps_1_pooling_tmp_cpu = (float*)malloc(sizeof(float) * featuremaps_1_pooling_size);
//cudaMemcpy(featuremaps_1_pooling_tmp_cpu, featuremaps_1_pooling, sizeof(float) * featuremaps_1_pooling_size, cudaMemcpyDeviceToHost);
//printf("featuremaps_1_pooling_tmp_cpu[0] = %.6f\n", featuremaps_1_pooling_tmp_cpu[0]);
//printf("featuremaps_1_pooling_tmp_cpu[1] = %.6f\n", featuremaps_1_pooling_tmp_cpu[1]);
//printf("featuremaps_1_pooling_tmp_cpu[2] = %.6f\n", featuremaps_1_pooling_tmp_cpu[2]);
//printf("featuremaps_1_pooling_tmp_cpu[20] = %.6f\n", featuremaps_1_pooling_tmp_cpu[20]);
//printf("featuremaps_1_pooling_tmp_cpu[200] = %.6f\n", featuremaps_1_pooling_tmp_cpu[200]);
//printf("featuremaps_1_pooling_tmp_cpu[1000] = %.6f\n", featuremaps_1_pooling_tmp_cpu[1000]);
//printf("featuremaps_1_pooling_tmp_cpu[2000] = %.6f\n", featuremaps_1_pooling_tmp_cpu[2000]);
//free(featuremaps_1_pooling_tmp_cpu);
//float* featuremaps_2_tmp_cpu;
//featuremaps_2_tmp_cpu = (float*)malloc(sizeof(float) * featuremaps_2_size);
//cudaMemcpy(featuremaps_2_tmp_cpu, featuremaps_2, sizeof(float) * featuremaps_2_size, cudaMemcpyDeviceToHost);
//printf("featuremaps_2_tmp_cpu[0] = %.6f\n", featuremaps_2_tmp_cpu[0]);
//printf("featuremaps_2_tmp_cpu[1] = %.6f\n", featuremaps_2_tmp_cpu[1]);
//printf("featuremaps_2_tmp_cpu[2] = %.6f\n", featuremaps_2_tmp_cpu[2]);
//printf("featuremaps_2_tmp_cpu[20] = %.6f\n", featuremaps_2_tmp_cpu[20]);
//printf("featuremaps_2_tmp_cpu[200] = %.6f\n", featuremaps_2_tmp_cpu[200]);
//printf("featuremaps_2_tmp_cpu[1000] = %.6f\n", featuremaps_2_tmp_cpu[1000]);
//free(featuremaps_2_tmp_cpu);
//printf("featuremaps_4_tmp_cpu[0] = %.6f\n", featuremaps_4_tmp_cpu[0]);
//printf("featuremaps_4_tmp_cpu[1] = %.6f\n", featuremaps_4_tmp_cpu[1]);
//printf("featuremaps_4_tmp_cpu[2] = %.6f\n", featuremaps_4_tmp_cpu[2]);
//printf("featuremaps_4_tmp_cpu[3] = %.6f\n", featuremaps_4_tmp_cpu[3]);
//printf("featuremaps_4_tmp_cpu[4] = %.6f\n", featuremaps_4_tmp_cpu[4]);
//printf("featuremaps_4_tmp_cpu[5] = %.6f\n", featuremaps_4_tmp_cpu[5]);
//printf("featuremaps_4_tmp_cpu[6] = %.6f\n", featuremaps_4_tmp_cpu[6]);
//printf("featuremaps_4_tmp_cpu[7] = %.6f\n", featuremaps_4_tmp_cpu[7]);
//printf("featuremaps_4_tmp_cpu[8] = %.6f\n", featuremaps_4_tmp_cpu[8]);
//printf("featuremaps_4_tmp_cpu[9] = %.6f\n", featuremaps_4_tmp_cpu[9]);
free(featuremaps_4_tmp_cpu);
free(y_val_cpu);
cudaFree(x_val);
cudaFree(featuremaps_1);
cudaFree(featuremaps_1_pooling);
cudaFree(featuremaps_2);
cudaFree(featuremaps_2_pooling);
cudaFree(featuremaps_3);
cudaFree(featuremaps_4);
cudaFree(W_conv3);
cudaFree(b_conv3);
cudaFree(W_conv4);
cudaFree(b_conv4);
return 0;
} |
1a8eefcf9e6cb55d74359c0a2d1b7940436c0b56.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define TPB 64
__global__ void D3Q19_RegBC_LBGK_ts(const double * fIn, double * fOut,
const int * SNL,
const int * VW_nl, const double VW_uz,
const int * PE_nl, const double rho_out,
const double omega,
const int Nx, const int Ny, const int Nz)
{
int tid=threadIdx.x+blockIdx.x*blockDim.x;
int nnodes=Nx*Ny*Nz;
if(tid<nnodes){
double f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
double cu;
double w;
//load the data into the registers
f0=fIn[tid]; f1=fIn[nnodes+tid];
f2=fIn[2*nnodes+tid]; f3=fIn[3*nnodes+tid];
f4=fIn[4*nnodes+tid]; f5=fIn[5*nnodes+tid];
f6=fIn[6*nnodes+tid]; f7=fIn[7*nnodes+tid];
f8=fIn[8*nnodes+tid]; f9=fIn[9*nnodes+tid];
f10=fIn[10*nnodes+tid]; f11=fIn[11*nnodes+tid];
f12=fIn[12*nnodes+tid]; f13=fIn[13*nnodes+tid];
f14=fIn[14*nnodes+tid]; f15=fIn[15*nnodes+tid];
f16=fIn[16*nnodes+tid]; f17=fIn[17*nnodes+tid];
f18=fIn[18*nnodes+tid];
//compute density and velocity
double rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+f10+f11+f12+f13+f14+f15+f16+f17+f18;
double ux=f1-f2+f7-f8+f9-f10+f11-f12+f13-f14; ux/=rho;
double uy=f3-f4+f7+f8-f9-f10+f15-f16+f17-f18; uy/=rho;
double uz=f5-f6+f11+f12-f13-f14+f15+f16-f17-f18; uz/=rho;
//take appropriate action if on PE_nl or VW_nl
if(VW_nl[tid]==1){
ux=0.;uy=0.; uz=VW_uz;
//set rho based on uz
rho = (1./(1.-uz))*(2.0*(f6+f13+f14+f17+f18)+(f0+f1+f2+f3+f4+f7+f8+f9+f10));
}
if(PE_nl[tid]==1){
ux=0.; uy=0.; rho=rho_out;
uz = -1.+((2.*(f5+f11+f12+f15+f16)+(f0+f1+f2+f3+f4+f7+f8+f9+f10)))/rho;
}
if(SNL[tid]==1){
ux=0.; uy=0.; uz=0.;
}
//everyone compute equilibrium
double fe0,fe1,fe2,fe3,fe4,fe5,fe6,fe7,fe8,fe9,fe10,fe11,fe12,fe13,fe14,fe15,fe16,fe17,fe18;
//speed 0, ex=ey=ez=0, w=1/3
fe0=rho*(1./3.)*(1.-1.5*(ux*ux+uy*uy+uz*uz));
//speed 1, ex=1, ey=ez=0, w=1/18
cu = 3.*(1.*ux);
fe1=rho*(1./18.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 2, ex=-1, ey=ez=0
cu=3.*(-1.*ux);
fe2=rho*(1./18.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 3 (0,1,0)
cu=3.*(uy);
fe3=rho*(1./18.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 4 (0,-1,0)
cu = 3.*(-uy);
fe4=rho*(1./18.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 5 (0,0,1)
cu = 3.*(uz);
fe5=rho*(1./18.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 6 (0,0,-1)
cu = 3.*(-uz);
fe6=rho*(1./18.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 7 (1,1,0) w= 1/36
cu = 3.*(ux+uy);
fe7=rho*(1./36.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 8 (-1,1,0)
cu = 3.*(-ux+uy);
fe8=rho*(1./36.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 9 (1,-1,0)
cu=3.*(ux-uy);
fe9=rho*(1./36.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 10 (-1,-1,0)
cu = 3.*(-ux-uy);
fe10=rho*(1./36.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 11 (1,0,1)
cu = 3.*(ux+uz);
fe11=rho*(1./36.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 12 (-1,0,1)
cu = 3.*(-ux+uz);
fe12=rho*(1./36.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 13 (1,0,-1)
cu = 3.*(ux-uz);
fe13=rho*(1./36.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 14 (-1,0,-1)
cu=3.*(-ux-uz);
fe14=rho*(1./36.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 15 (0,1,1)
cu=3.*(uy+uz);
fe15=rho*(1./36.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 16 (0,-1,1)
cu=3.*(-uy+uz);
fe16=rho*(1./36.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 17 (0,1,-1)
cu=3.*(uy-uz);
fe17=rho*(1./36.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 18 (0,-1,-1)
cu=3.*(-uy-uz);
fe18=rho*(1./36.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
if((VW_nl[tid]==1) || (PE_nl[tid]==1)){
//float ft0;
double ft1,ft2,ft3,ft4,ft5,ft6,ft7,ft8,ft9,ft10,ft11,ft12,ft13,ft14,ft15,ft16,ft17,ft18;
if(VW_nl[tid]==1){
//bounce-back of non-equilibrium for unknown velocities on west boundary
f5=fe5+(f6-fe6);
f11=fe11+(f14-fe14);
f12=fe12+(f13-fe13);
f15=fe15+(f18-fe18);
f16=fe16+(f17-fe17);
}else{
//bounce-back of non-equilibrium on east boundary
f6=fe6+(f5-fe5);
f13=fe13+(f12-fe12);
f14=fe14+(f11-fe11);
f17=fe17+(f16-fe16);
f18=fe18+(f15-fe15);
}
//ft0=f0-fe0;
ft1=f1-fe1;
ft2=f2-fe2;
ft3=f3-fe3;
ft4=f4-fe4;
ft5=f5-fe5;
ft6=f6-fe6;
ft7=f7-fe7;
ft8=f8-fe8;
ft9=f9-fe9;
ft10=f10-fe10;
ft11=f11-fe11;
ft12=f12-fe12;
ft13=f13-fe13;
ft14=f14-fe14;
ft15=f15-fe15;
ft16=f16-fe16;
ft17=f17-fe17;
ft18=f18-fe18;
// //apply the tensors...
f0= - ft1/3. - ft2/3. - ft3/3. - ft4/3. - ft5/3. - ft6/3. - (2.*ft7)/3. - (2.*ft8)/3. - (2.*ft9)/3. - (2.*ft10)/3. - (2.*ft11)/3. - (2.*ft12)/3. - (2.*ft13)/3. - (2.*ft14)/3. - (2.*ft15)/3. - (2.*ft16)/3. - (2.*ft17)/3. - (2.*ft18)/3.;
f1=(2.*ft1)/3. + (2.*ft2)/3. - ft3/3. - ft4/3. - ft5/3. - ft6/3. + ft7/3. + ft8/3. + ft9/3. + ft10/3. + ft11/3. + ft12/3. + ft13/3. + ft14/3. - (2.*ft15)/3. - (2.*ft16)/3. - (2.*ft17)/3. - (2.*ft18)/3.;
f2=(2.*ft1)/3. + (2.*ft2)/3. - ft3/3. - ft4/3. - ft5/3. - ft6/3. + ft7/3. + ft8/3. + ft9/3. + ft10/3. + ft11/3. + ft12/3. + ft13/3. + ft14/3. - (2.*ft15)/3. - (2.*ft16)/3. - (2.*ft17)/3. - (2.*ft18)/3.;
f3=(2.*ft3)/3. - ft2/3. - ft1/3. + (2.*ft4)/3. - ft5/3. - ft6/3. + ft7/3. + ft8/3. + ft9/3. + ft10/3. - (2.*ft11)/3. - (2.*ft12)/3. - (2.*ft13)/3. - (2.*ft14)/3. + ft15/3. + ft16/3. + ft17/3. + ft18/3.;
f4=(2.*ft3)/3. - ft2/3. - ft1/3. + (2.*ft4)/3. - ft5/3. - ft6/3. + ft7/3. + ft8/3. + ft9/3. + ft10/3. - (2.*ft11)/3. - (2.*ft12)/3. - (2.*ft13)/3. - (2.*ft14)/3. + ft15/3. + ft16/3. + ft17/3. + ft18/3.;
f5=(2.*ft5)/3. - ft2/3. - ft3/3. - ft4/3. - ft1/3. + (2.*ft6)/3. - (2.*ft7)/3. - (2.*ft8)/3. - (2.*ft9)/3. - (2.*ft10)/3. + ft11/3. + ft12/3. + ft13/3. + ft14/3. + ft15/3. + ft16/3. + ft17/3. + ft18/3.;
f6=(2.*ft5)/3. - ft2/3. - ft3/3. - ft4/3. - ft1/3. + (2.*ft6)/3. - (2.*ft7)/3. - (2.*ft8)/3. - (2.*ft9)/3. - (2.*ft10)/3. + ft11/3. + ft12/3. + ft13/3. + ft14/3. + ft15/3. + ft16/3. + ft17/3. + ft18/3.;
f7=(2.*ft1)/3. + (2.*ft2)/3. + (2.*ft3)/3. + (2.*ft4)/3. - ft5/3. - ft6/3. + (10.*ft7)/3. - (2.*ft8)/3. - (2.*ft9)/3. + (10.*ft10)/3. + ft11/3. + ft12/3. + ft13/3. + ft14/3. + ft15/3. + ft16/3. + ft17/3. + ft18/3.;
f8=(2.*ft1)/3. + (2.*ft2)/3. + (2.*ft3)/3. + (2.*ft4)/3. - ft5/3. - ft6/3. - (2.*ft7)/3. + (10.*ft8)/3. + (10.*ft9)/3. - (2.*ft10)/3. + ft11/3. + ft12/3. + ft13/3. + ft14/3. + ft15/3. + ft16/3. + ft17/3. + ft18/3.;
f9=(2.*ft1)/3. + (2.*ft2)/3. + (2.*ft3)/3. + (2.*ft4)/3. - ft5/3. - ft6/3. - (2.*ft7)/3. + (10.*ft8)/3. + (10.*ft9)/3. - (2.*ft10)/3. + ft11/3. + ft12/3. + ft13/3. + ft14/3. + ft15/3. + ft16/3. + ft17/3. + ft18/3.;
f10=(2.*ft1)/3. + (2.*ft2)/3. + (2.*ft3)/3. + (2.*ft4)/3. - ft5/3. - ft6/3. + (10.*ft7)/3. - (2.*ft8)/3. - (2.*ft9)/3. + (10.*ft10)/3. + ft11/3. + ft12/3. + ft13/3. + ft14/3. + ft15/3. + ft16/3. + ft17/3. + ft18/3.;
f11=(2.*ft1)/3. + (2.*ft2)/3. - ft3/3. - ft4/3. + (2.*ft5)/3. + (2.*ft6)/3. + ft7/3. + ft8/3. + ft9/3. + ft10/3. + (10.*ft11)/3. - (2.*ft12)/3. - (2.*ft13)/3. + (10.*ft14)/3. + ft15/3. + ft16/3. + ft17/3. + ft18/3.;
f12=(2.*ft1)/3. + (2.*ft2)/3. - ft3/3. - ft4/3. + (2.*ft5)/3. + (2.*ft6)/3. + ft7/3. + ft8/3. + ft9/3. + ft10/3. - (2.*ft11)/3. + (10.*ft12)/3. + (10.*ft13)/3. - (2.*ft14)/3. + ft15/3. + ft16/3. + ft17/3. + ft18/3.;
f13=(2.*ft1)/3. + (2.*ft2)/3. - ft3/3. - ft4/3. + (2.*ft5)/3. + (2.*ft6)/3. + ft7/3. + ft8/3. + ft9/3. + ft10/3. - (2.*ft11)/3. + (10.*ft12)/3. + (10.*ft13)/3. - (2.*ft14)/3. + ft15/3. + ft16/3. + ft17/3. + ft18/3.;
f14=(2.*ft1)/3. + (2.*ft2)/3. - ft3/3. - ft4/3. + (2.*ft5)/3. + (2.*ft6)/3. + ft7/3. + ft8/3. + ft9/3. + ft10/3. + (10.*ft11)/3. - (2.*ft12)/3. - (2.*ft13)/3. + (10.*ft14)/3. + ft15/3. + ft16/3. + ft17/3. + ft18/3.;
f15=(2.*ft3)/3. - ft2/3. - ft1/3. + (2.*ft4)/3. + (2.*ft5)/3. + (2.*ft6)/3. + ft7/3. + ft8/3. + ft9/3. + ft10/3. + ft11/3. + ft12/3. + ft13/3. + ft14/3. + (10.*ft15)/3. - (2.*ft16)/3. - (2.*ft17)/3. + (10.*ft18)/3.;
f16=(2.*ft3)/3. - ft2/3. - ft1/3. + (2.*ft4)/3. + (2.*ft5)/3. + (2.*ft6)/3. + ft7/3. + ft8/3. + ft9/3. + ft10/3. + ft11/3. + ft12/3. + ft13/3. + ft14/3. - (2.*ft15)/3. + (10.*ft16)/3. + (10.*ft17)/3. - (2.*ft18)/3.;
f17=(2.*ft3)/3. - ft2/3. - ft1/3. + (2.*ft4)/3. + (2.*ft5)/3. + (2.*ft6)/3. + ft7/3. + ft8/3. + ft9/3. + ft10/3. + ft11/3. + ft12/3. + ft13/3. + ft14/3. - (2.*ft15)/3. + (10.*ft16)/3. + (10.*ft17)/3. - (2.*ft18)/3.;
f18=(2.*ft3)/3. - ft2/3. - ft1/3. + (2.*ft4)/3. + (2.*ft5)/3. + (2.*ft6)/3. + ft7/3. + ft8/3. + ft9/3. + ft10/3. + ft11/3. + ft12/3. + ft13/3. + ft14/3. + (10.*ft15)/3. - (2.*ft16)/3. - (2.*ft17)/3. + (10.*ft18)/3.;
//update fIn for all velocities based on this result.
cu= 9./2.; w = 1./3.;
f0=fe0+f0*cu*w;
w=1./18.;
f1=fe1+f1*cu*w;
f2=fe2+f2*cu*w;
f3=fe3+f3*cu*w;
f4=fe4+f4*cu*w;
f5=fe5+f5*cu*w;
f6=fe6+f6*cu*w;
w=1./36.;
f7=fe7+f7*cu*w;
f8=fe8+f8*cu*w;
f9=fe9+f9*cu*w;
f10=fe10+f10*cu*w;
f11=fe11+f11*cu*w;
f12=fe12+f12*cu*w;
f13=fe13+f13*cu*w;
f14=fe14+f14*cu*w;
f15=fe15+f15*cu*w;
f16=fe16+f16*cu*w;
f17=fe17+f17*cu*w;
f18=fe18+f18*cu*w;
}
if(SNL[tid]==0){
//everyone relaxes towards equilibrium
f0=f0-omega*(f0-fe0);
f1=f1-omega*(f1-fe1);
f2=f2-omega*(f2-fe2);
f3=f3-omega*(f3-fe3);
f4=f4-omega*(f4-fe4);
f5=f5-omega*(f5-fe5);
f6=f6-omega*(f6-fe6);
f7=f7-omega*(f7-fe7);
f8=f8-omega*(f8-fe8);
f9=f9-omega*(f9-fe9);
f10=f10-omega*(f10-fe10);
f11=f11-omega*(f11-fe11);
f12=f12-omega*(f12-fe12);
f13=f13-omega*(f13-fe13);
f14=f14-omega*(f14-fe14);
f15=f15-omega*(f15-fe15);
f16=f16-omega*(f16-fe16);
f17=f17-omega*(f17-fe17);
f18=f18-omega*(f18-fe18);
}else{
//bounce back
f0=f0-omega*(f0-fe0);
//1 -- 2
cu=f1;f1=f2;f2=cu;
// 3 -- 4
cu=f3;f3=f4;f4=cu;
//5--6
cu=f5;f5=f6;f6=cu;
//7--10
cu=f7;f7=f10;f10=cu;
//8--9
cu=f8;f8=f9;f9=cu;
//11-14
cu=f11;f11=f14;f14=cu;
//12-13
cu=f12;f12=f13;f13=cu;
//15-18
cu=f15;f15=f18;f18=cu;
//16-17
cu=f16;f16=f17;f17=cu;
}
//now, everybody streams...
int Z = tid/(Nx*Ny);
int Y = (tid - Z*Nx*Ny)/Nx;
int X = tid - Z*Nx*Ny - Y*Nx;
int X_t,Y_t,Z_t,tid_t;
//speed 0 (0,0,0)
fOut[tid]=f0;
//stream(fOut,f0,0,X,Y,Z,0,0,0,Nx,Ny,Nz);
//speed 1 (1,0,0)
X_t=X+1;Y_t=Y; Z_t=Z;
if(X_t==Nx) X_t=0;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[nnodes+tid_t]=f1;
//speed 2 (-1,0,0)
X_t=X-1; Y_t=Y; Z_t=Z;
if(X_t<0)X_t=Nx-1;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[2*nnodes+tid_t]=f2;
//speed 3 (0,1,0)
X_t=X; Y_t=Y+1; Z_t=Z;
if(Y_t==Ny)Y_t=0;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
//tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[3*nnodes+tid_t]=f3;
//speed 4 ( 0,-1,0)
X_t=X; Y_t=Y-1; Z_t=Z;
if(Y_t<0)Y_t=Ny-1;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[4*nnodes+tid_t]=f4;
//speed 5 ( 0,0,1)
X_t=X;Y_t=Y;Z_t=Z+1;
if(Z_t==Nz)Z_t=0;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[5*nnodes+tid_t]=f5;
//speed 6 (0,0,-1)
X_t=X; Y_t=Y;Z_t=Z-1;
if(Z_t<0)Z_t=Nz-1;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[6*nnodes+tid_t]=f6;
//speed 7 (1,1,0)
X_t=X+1;Y_t=Y+1;Z_t=Z;
if(X_t==Nx)X_t=0;
if(Y_t==Ny)Y_t=0;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[7*nnodes+tid_t]=f7;
//speed 8 (-1,1,0)
X_t=X-1;Y_t=Y+1;Z_t=Z;
if(X_t<0)X_t=Nx-1;
if(Y_t==Ny)Y_t=0;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[8*nnodes+tid_t]=f8;
//speed 9 (1,-1,0)
X_t=X+1;Y_t=Y-1;Z_t=Z;
if(X_t==Nx)X_t=0;
if(Y_t<0)Y_t=Ny-1;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[9*nnodes+tid_t]=f9;
//speed 10 (-1,-1,0)
X_t=X-1;Y_t=Y-1;Z_t=Z;
if(X_t<0)X_t=Nx-1;
if(Y_t<0)Y_t=Ny-1;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[10*nnodes+tid_t]=f10;
//speed 11 (1,0,1)
X_t=X+1;Y_t=Y;Z_t=Z+1;
if(X_t==Nx)X_t=0;
if(Z_t==Nz)Z_t=0;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[11*nnodes+tid_t]=f11;
//speed 12 (-1,0,1)
X_t=X-1;Y_t=Y;Z_t=Z+1;
if(X_t<0)X_t=Nx-1;
if(Z_t==Nz)Z_t=0;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[12*nnodes+tid_t]=f12;
//speed 13 (1,0,-1)
X_t=X+1;Y_t=Y;Z_t=Z-1;
if(X_t==Nx)X_t=0;
if(Z_t<0)Z_t=Nz-1;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[13*nnodes+tid_t]=f13;
//speed 14 (-1,0,-1)
X_t=X-1;Y_t=Y;Z_t=Z-1;
if(X_t<0)X_t=Nx-1;
if(Z_t<0)Z_t=Nz-1;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[14*nnodes+tid_t]=f14;
//speed 15 (0,1,1)
X_t=X;Y_t=Y+1;Z_t=Z+1;
if(Y_t==Ny)Y_t=0;
if(Z_t==Nz)Z_t=0;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[15*nnodes+tid_t]=f15;
//speed 16 (0,-1,1)
X_t=X;Y_t=Y-1;Z_t=Z+1;
if(Y_t<0)Y_t=Ny-1;
if(Z_t==Nz)Z_t=0;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[16*nnodes+tid_t]=f16;
//speed 17 (0,1,-1)
X_t=X;Y_t=Y+1;Z_t=Z-1;
if(Y_t==Ny)Y_t=0;
if(Z_t<0)Z_t=Nz-1;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[17*nnodes+tid_t]=f17;
//speed 18 ( 0,-1,-1)
X_t=X;Y_t=Y-1;Z_t=Z-1;
if(Y_t<0)Y_t=Ny-1;
if(Z_t<0)Z_t=Nz-1;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[18*nnodes+tid_t]=f18;
}
}
void D3Q19_RegBC_LBGK(const double * fIn, double * fOut, const int * SNL, const int * VW_nl,
const double VW_uz, const int * PE_nl, const double rho_out, const double omega,
const int Nx, const int Ny, const int Nz)
{
dim3 BLOCKS(TPB,1,1);
dim3 GRIDS((Nx*Ny*Nz+TPB-1)/TPB,1,1);
hipLaunchKernelGGL(( D3Q19_RegBC_LBGK_ts), dim3(GRIDS),dim3(BLOCKS), 0, 0, fIn,fOut,SNL,VW_nl,VW_uz,PE_nl,rho_out,
omega,Nx,Ny,Nz);
}
| 1a8eefcf9e6cb55d74359c0a2d1b7940436c0b56.cu | #include <cuda.h>
#include <cuda_runtime.h>
#define TPB 64
__global__ void D3Q19_RegBC_LBGK_ts(const double * fIn, double * fOut,
const int * SNL,
const int * VW_nl, const double VW_uz,
const int * PE_nl, const double rho_out,
const double omega,
const int Nx, const int Ny, const int Nz)
{
int tid=threadIdx.x+blockIdx.x*blockDim.x;
int nnodes=Nx*Ny*Nz;
if(tid<nnodes){
double f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
double cu;
double w;
//load the data into the registers
f0=fIn[tid]; f1=fIn[nnodes+tid];
f2=fIn[2*nnodes+tid]; f3=fIn[3*nnodes+tid];
f4=fIn[4*nnodes+tid]; f5=fIn[5*nnodes+tid];
f6=fIn[6*nnodes+tid]; f7=fIn[7*nnodes+tid];
f8=fIn[8*nnodes+tid]; f9=fIn[9*nnodes+tid];
f10=fIn[10*nnodes+tid]; f11=fIn[11*nnodes+tid];
f12=fIn[12*nnodes+tid]; f13=fIn[13*nnodes+tid];
f14=fIn[14*nnodes+tid]; f15=fIn[15*nnodes+tid];
f16=fIn[16*nnodes+tid]; f17=fIn[17*nnodes+tid];
f18=fIn[18*nnodes+tid];
//compute density and velocity
double rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+f10+f11+f12+f13+f14+f15+f16+f17+f18;
double ux=f1-f2+f7-f8+f9-f10+f11-f12+f13-f14; ux/=rho;
double uy=f3-f4+f7+f8-f9-f10+f15-f16+f17-f18; uy/=rho;
double uz=f5-f6+f11+f12-f13-f14+f15+f16-f17-f18; uz/=rho;
//take appropriate action if on PE_nl or VW_nl
if(VW_nl[tid]==1){
ux=0.;uy=0.; uz=VW_uz;
//set rho based on uz
rho = (1./(1.-uz))*(2.0*(f6+f13+f14+f17+f18)+(f0+f1+f2+f3+f4+f7+f8+f9+f10));
}
if(PE_nl[tid]==1){
ux=0.; uy=0.; rho=rho_out;
uz = -1.+((2.*(f5+f11+f12+f15+f16)+(f0+f1+f2+f3+f4+f7+f8+f9+f10)))/rho;
}
if(SNL[tid]==1){
ux=0.; uy=0.; uz=0.;
}
//everyone compute equilibrium
double fe0,fe1,fe2,fe3,fe4,fe5,fe6,fe7,fe8,fe9,fe10,fe11,fe12,fe13,fe14,fe15,fe16,fe17,fe18;
//speed 0, ex=ey=ez=0, w=1/3
fe0=rho*(1./3.)*(1.-1.5*(ux*ux+uy*uy+uz*uz));
//speed 1, ex=1, ey=ez=0, w=1/18
cu = 3.*(1.*ux);
fe1=rho*(1./18.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 2, ex=-1, ey=ez=0
cu=3.*(-1.*ux);
fe2=rho*(1./18.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 3 (0,1,0)
cu=3.*(uy);
fe3=rho*(1./18.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 4 (0,-1,0)
cu = 3.*(-uy);
fe4=rho*(1./18.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 5 (0,0,1)
cu = 3.*(uz);
fe5=rho*(1./18.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 6 (0,0,-1)
cu = 3.*(-uz);
fe6=rho*(1./18.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 7 (1,1,0) w= 1/36
cu = 3.*(ux+uy);
fe7=rho*(1./36.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 8 (-1,1,0)
cu = 3.*(-ux+uy);
fe8=rho*(1./36.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 9 (1,-1,0)
cu=3.*(ux-uy);
fe9=rho*(1./36.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 10 (-1,-1,0)
cu = 3.*(-ux-uy);
fe10=rho*(1./36.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 11 (1,0,1)
cu = 3.*(ux+uz);
fe11=rho*(1./36.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 12 (-1,0,1)
cu = 3.*(-ux+uz);
fe12=rho*(1./36.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 13 (1,0,-1)
cu = 3.*(ux-uz);
fe13=rho*(1./36.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 14 (-1,0,-1)
cu=3.*(-ux-uz);
fe14=rho*(1./36.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 15 (0,1,1)
cu=3.*(uy+uz);
fe15=rho*(1./36.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 16 (0,-1,1)
cu=3.*(-uy+uz);
fe16=rho*(1./36.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 17 (0,1,-1)
cu=3.*(uy-uz);
fe17=rho*(1./36.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
//speed 18 (0,-1,-1)
cu=3.*(-uy-uz);
fe18=rho*(1./36.)*(1.+cu+0.5*(cu*cu)-1.5*(ux*ux+uy*uy+uz*uz));
if((VW_nl[tid]==1) || (PE_nl[tid]==1)){
//float ft0;
double ft1,ft2,ft3,ft4,ft5,ft6,ft7,ft8,ft9,ft10,ft11,ft12,ft13,ft14,ft15,ft16,ft17,ft18;
if(VW_nl[tid]==1){
//bounce-back of non-equilibrium for unknown velocities on west boundary
f5=fe5+(f6-fe6);
f11=fe11+(f14-fe14);
f12=fe12+(f13-fe13);
f15=fe15+(f18-fe18);
f16=fe16+(f17-fe17);
}else{
//bounce-back of non-equilibrium on east boundary
f6=fe6+(f5-fe5);
f13=fe13+(f12-fe12);
f14=fe14+(f11-fe11);
f17=fe17+(f16-fe16);
f18=fe18+(f15-fe15);
}
//ft0=f0-fe0;
ft1=f1-fe1;
ft2=f2-fe2;
ft3=f3-fe3;
ft4=f4-fe4;
ft5=f5-fe5;
ft6=f6-fe6;
ft7=f7-fe7;
ft8=f8-fe8;
ft9=f9-fe9;
ft10=f10-fe10;
ft11=f11-fe11;
ft12=f12-fe12;
ft13=f13-fe13;
ft14=f14-fe14;
ft15=f15-fe15;
ft16=f16-fe16;
ft17=f17-fe17;
ft18=f18-fe18;
// //apply the tensors...
f0= - ft1/3. - ft2/3. - ft3/3. - ft4/3. - ft5/3. - ft6/3. - (2.*ft7)/3. - (2.*ft8)/3. - (2.*ft9)/3. - (2.*ft10)/3. - (2.*ft11)/3. - (2.*ft12)/3. - (2.*ft13)/3. - (2.*ft14)/3. - (2.*ft15)/3. - (2.*ft16)/3. - (2.*ft17)/3. - (2.*ft18)/3.;
f1=(2.*ft1)/3. + (2.*ft2)/3. - ft3/3. - ft4/3. - ft5/3. - ft6/3. + ft7/3. + ft8/3. + ft9/3. + ft10/3. + ft11/3. + ft12/3. + ft13/3. + ft14/3. - (2.*ft15)/3. - (2.*ft16)/3. - (2.*ft17)/3. - (2.*ft18)/3.;
f2=(2.*ft1)/3. + (2.*ft2)/3. - ft3/3. - ft4/3. - ft5/3. - ft6/3. + ft7/3. + ft8/3. + ft9/3. + ft10/3. + ft11/3. + ft12/3. + ft13/3. + ft14/3. - (2.*ft15)/3. - (2.*ft16)/3. - (2.*ft17)/3. - (2.*ft18)/3.;
f3=(2.*ft3)/3. - ft2/3. - ft1/3. + (2.*ft4)/3. - ft5/3. - ft6/3. + ft7/3. + ft8/3. + ft9/3. + ft10/3. - (2.*ft11)/3. - (2.*ft12)/3. - (2.*ft13)/3. - (2.*ft14)/3. + ft15/3. + ft16/3. + ft17/3. + ft18/3.;
f4=(2.*ft3)/3. - ft2/3. - ft1/3. + (2.*ft4)/3. - ft5/3. - ft6/3. + ft7/3. + ft8/3. + ft9/3. + ft10/3. - (2.*ft11)/3. - (2.*ft12)/3. - (2.*ft13)/3. - (2.*ft14)/3. + ft15/3. + ft16/3. + ft17/3. + ft18/3.;
f5=(2.*ft5)/3. - ft2/3. - ft3/3. - ft4/3. - ft1/3. + (2.*ft6)/3. - (2.*ft7)/3. - (2.*ft8)/3. - (2.*ft9)/3. - (2.*ft10)/3. + ft11/3. + ft12/3. + ft13/3. + ft14/3. + ft15/3. + ft16/3. + ft17/3. + ft18/3.;
f6=(2.*ft5)/3. - ft2/3. - ft3/3. - ft4/3. - ft1/3. + (2.*ft6)/3. - (2.*ft7)/3. - (2.*ft8)/3. - (2.*ft9)/3. - (2.*ft10)/3. + ft11/3. + ft12/3. + ft13/3. + ft14/3. + ft15/3. + ft16/3. + ft17/3. + ft18/3.;
f7=(2.*ft1)/3. + (2.*ft2)/3. + (2.*ft3)/3. + (2.*ft4)/3. - ft5/3. - ft6/3. + (10.*ft7)/3. - (2.*ft8)/3. - (2.*ft9)/3. + (10.*ft10)/3. + ft11/3. + ft12/3. + ft13/3. + ft14/3. + ft15/3. + ft16/3. + ft17/3. + ft18/3.;
f8=(2.*ft1)/3. + (2.*ft2)/3. + (2.*ft3)/3. + (2.*ft4)/3. - ft5/3. - ft6/3. - (2.*ft7)/3. + (10.*ft8)/3. + (10.*ft9)/3. - (2.*ft10)/3. + ft11/3. + ft12/3. + ft13/3. + ft14/3. + ft15/3. + ft16/3. + ft17/3. + ft18/3.;
f9=(2.*ft1)/3. + (2.*ft2)/3. + (2.*ft3)/3. + (2.*ft4)/3. - ft5/3. - ft6/3. - (2.*ft7)/3. + (10.*ft8)/3. + (10.*ft9)/3. - (2.*ft10)/3. + ft11/3. + ft12/3. + ft13/3. + ft14/3. + ft15/3. + ft16/3. + ft17/3. + ft18/3.;
f10=(2.*ft1)/3. + (2.*ft2)/3. + (2.*ft3)/3. + (2.*ft4)/3. - ft5/3. - ft6/3. + (10.*ft7)/3. - (2.*ft8)/3. - (2.*ft9)/3. + (10.*ft10)/3. + ft11/3. + ft12/3. + ft13/3. + ft14/3. + ft15/3. + ft16/3. + ft17/3. + ft18/3.;
f11=(2.*ft1)/3. + (2.*ft2)/3. - ft3/3. - ft4/3. + (2.*ft5)/3. + (2.*ft6)/3. + ft7/3. + ft8/3. + ft9/3. + ft10/3. + (10.*ft11)/3. - (2.*ft12)/3. - (2.*ft13)/3. + (10.*ft14)/3. + ft15/3. + ft16/3. + ft17/3. + ft18/3.;
f12=(2.*ft1)/3. + (2.*ft2)/3. - ft3/3. - ft4/3. + (2.*ft5)/3. + (2.*ft6)/3. + ft7/3. + ft8/3. + ft9/3. + ft10/3. - (2.*ft11)/3. + (10.*ft12)/3. + (10.*ft13)/3. - (2.*ft14)/3. + ft15/3. + ft16/3. + ft17/3. + ft18/3.;
f13=(2.*ft1)/3. + (2.*ft2)/3. - ft3/3. - ft4/3. + (2.*ft5)/3. + (2.*ft6)/3. + ft7/3. + ft8/3. + ft9/3. + ft10/3. - (2.*ft11)/3. + (10.*ft12)/3. + (10.*ft13)/3. - (2.*ft14)/3. + ft15/3. + ft16/3. + ft17/3. + ft18/3.;
f14=(2.*ft1)/3. + (2.*ft2)/3. - ft3/3. - ft4/3. + (2.*ft5)/3. + (2.*ft6)/3. + ft7/3. + ft8/3. + ft9/3. + ft10/3. + (10.*ft11)/3. - (2.*ft12)/3. - (2.*ft13)/3. + (10.*ft14)/3. + ft15/3. + ft16/3. + ft17/3. + ft18/3.;
f15=(2.*ft3)/3. - ft2/3. - ft1/3. + (2.*ft4)/3. + (2.*ft5)/3. + (2.*ft6)/3. + ft7/3. + ft8/3. + ft9/3. + ft10/3. + ft11/3. + ft12/3. + ft13/3. + ft14/3. + (10.*ft15)/3. - (2.*ft16)/3. - (2.*ft17)/3. + (10.*ft18)/3.;
f16=(2.*ft3)/3. - ft2/3. - ft1/3. + (2.*ft4)/3. + (2.*ft5)/3. + (2.*ft6)/3. + ft7/3. + ft8/3. + ft9/3. + ft10/3. + ft11/3. + ft12/3. + ft13/3. + ft14/3. - (2.*ft15)/3. + (10.*ft16)/3. + (10.*ft17)/3. - (2.*ft18)/3.;
f17=(2.*ft3)/3. - ft2/3. - ft1/3. + (2.*ft4)/3. + (2.*ft5)/3. + (2.*ft6)/3. + ft7/3. + ft8/3. + ft9/3. + ft10/3. + ft11/3. + ft12/3. + ft13/3. + ft14/3. - (2.*ft15)/3. + (10.*ft16)/3. + (10.*ft17)/3. - (2.*ft18)/3.;
f18=(2.*ft3)/3. - ft2/3. - ft1/3. + (2.*ft4)/3. + (2.*ft5)/3. + (2.*ft6)/3. + ft7/3. + ft8/3. + ft9/3. + ft10/3. + ft11/3. + ft12/3. + ft13/3. + ft14/3. + (10.*ft15)/3. - (2.*ft16)/3. - (2.*ft17)/3. + (10.*ft18)/3.;
//update fIn for all velocities based on this result.
cu= 9./2.; w = 1./3.;
f0=fe0+f0*cu*w;
w=1./18.;
f1=fe1+f1*cu*w;
f2=fe2+f2*cu*w;
f3=fe3+f3*cu*w;
f4=fe4+f4*cu*w;
f5=fe5+f5*cu*w;
f6=fe6+f6*cu*w;
w=1./36.;
f7=fe7+f7*cu*w;
f8=fe8+f8*cu*w;
f9=fe9+f9*cu*w;
f10=fe10+f10*cu*w;
f11=fe11+f11*cu*w;
f12=fe12+f12*cu*w;
f13=fe13+f13*cu*w;
f14=fe14+f14*cu*w;
f15=fe15+f15*cu*w;
f16=fe16+f16*cu*w;
f17=fe17+f17*cu*w;
f18=fe18+f18*cu*w;
}
if(SNL[tid]==0){
//everyone relaxes towards equilibrium
f0=f0-omega*(f0-fe0);
f1=f1-omega*(f1-fe1);
f2=f2-omega*(f2-fe2);
f3=f3-omega*(f3-fe3);
f4=f4-omega*(f4-fe4);
f5=f5-omega*(f5-fe5);
f6=f6-omega*(f6-fe6);
f7=f7-omega*(f7-fe7);
f8=f8-omega*(f8-fe8);
f9=f9-omega*(f9-fe9);
f10=f10-omega*(f10-fe10);
f11=f11-omega*(f11-fe11);
f12=f12-omega*(f12-fe12);
f13=f13-omega*(f13-fe13);
f14=f14-omega*(f14-fe14);
f15=f15-omega*(f15-fe15);
f16=f16-omega*(f16-fe16);
f17=f17-omega*(f17-fe17);
f18=f18-omega*(f18-fe18);
}else{
//bounce back
f0=f0-omega*(f0-fe0);
//1 -- 2
cu=f1;f1=f2;f2=cu;
// 3 -- 4
cu=f3;f3=f4;f4=cu;
//5--6
cu=f5;f5=f6;f6=cu;
//7--10
cu=f7;f7=f10;f10=cu;
//8--9
cu=f8;f8=f9;f9=cu;
//11-14
cu=f11;f11=f14;f14=cu;
//12-13
cu=f12;f12=f13;f13=cu;
//15-18
cu=f15;f15=f18;f18=cu;
//16-17
cu=f16;f16=f17;f17=cu;
}
//now, everybody streams...
int Z = tid/(Nx*Ny);
int Y = (tid - Z*Nx*Ny)/Nx;
int X = tid - Z*Nx*Ny - Y*Nx;
int X_t,Y_t,Z_t,tid_t;
//speed 0 (0,0,0)
fOut[tid]=f0;
//stream(fOut,f0,0,X,Y,Z,0,0,0,Nx,Ny,Nz);
//speed 1 (1,0,0)
X_t=X+1;Y_t=Y; Z_t=Z;
if(X_t==Nx) X_t=0;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[nnodes+tid_t]=f1;
//speed 2 (-1,0,0)
X_t=X-1; Y_t=Y; Z_t=Z;
if(X_t<0)X_t=Nx-1;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[2*nnodes+tid_t]=f2;
//speed 3 (0,1,0)
X_t=X; Y_t=Y+1; Z_t=Z;
if(Y_t==Ny)Y_t=0;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
//tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[3*nnodes+tid_t]=f3;
//speed 4 ( 0,-1,0)
X_t=X; Y_t=Y-1; Z_t=Z;
if(Y_t<0)Y_t=Ny-1;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[4*nnodes+tid_t]=f4;
//speed 5 ( 0,0,1)
X_t=X;Y_t=Y;Z_t=Z+1;
if(Z_t==Nz)Z_t=0;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[5*nnodes+tid_t]=f5;
//speed 6 (0,0,-1)
X_t=X; Y_t=Y;Z_t=Z-1;
if(Z_t<0)Z_t=Nz-1;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[6*nnodes+tid_t]=f6;
//speed 7 (1,1,0)
X_t=X+1;Y_t=Y+1;Z_t=Z;
if(X_t==Nx)X_t=0;
if(Y_t==Ny)Y_t=0;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[7*nnodes+tid_t]=f7;
//speed 8 (-1,1,0)
X_t=X-1;Y_t=Y+1;Z_t=Z;
if(X_t<0)X_t=Nx-1;
if(Y_t==Ny)Y_t=0;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[8*nnodes+tid_t]=f8;
//speed 9 (1,-1,0)
X_t=X+1;Y_t=Y-1;Z_t=Z;
if(X_t==Nx)X_t=0;
if(Y_t<0)Y_t=Ny-1;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[9*nnodes+tid_t]=f9;
//speed 10 (-1,-1,0)
X_t=X-1;Y_t=Y-1;Z_t=Z;
if(X_t<0)X_t=Nx-1;
if(Y_t<0)Y_t=Ny-1;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[10*nnodes+tid_t]=f10;
//speed 11 (1,0,1)
X_t=X+1;Y_t=Y;Z_t=Z+1;
if(X_t==Nx)X_t=0;
if(Z_t==Nz)Z_t=0;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[11*nnodes+tid_t]=f11;
//speed 12 (-1,0,1)
X_t=X-1;Y_t=Y;Z_t=Z+1;
if(X_t<0)X_t=Nx-1;
if(Z_t==Nz)Z_t=0;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[12*nnodes+tid_t]=f12;
//speed 13 (1,0,-1)
X_t=X+1;Y_t=Y;Z_t=Z-1;
if(X_t==Nx)X_t=0;
if(Z_t<0)Z_t=Nz-1;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[13*nnodes+tid_t]=f13;
//speed 14 (-1,0,-1)
X_t=X-1;Y_t=Y;Z_t=Z-1;
if(X_t<0)X_t=Nx-1;
if(Z_t<0)Z_t=Nz-1;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[14*nnodes+tid_t]=f14;
//speed 15 (0,1,1)
X_t=X;Y_t=Y+1;Z_t=Z+1;
if(Y_t==Ny)Y_t=0;
if(Z_t==Nz)Z_t=0;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[15*nnodes+tid_t]=f15;
//speed 16 (0,-1,1)
X_t=X;Y_t=Y-1;Z_t=Z+1;
if(Y_t<0)Y_t=Ny-1;
if(Z_t==Nz)Z_t=0;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[16*nnodes+tid_t]=f16;
//speed 17 (0,1,-1)
X_t=X;Y_t=Y+1;Z_t=Z-1;
if(Y_t==Ny)Y_t=0;
if(Z_t<0)Z_t=Nz-1;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[17*nnodes+tid_t]=f17;
//speed 18 ( 0,-1,-1)
X_t=X;Y_t=Y-1;Z_t=Z-1;
if(Y_t<0)Y_t=Ny-1;
if(Z_t<0)Z_t=Nz-1;
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[18*nnodes+tid_t]=f18;
}
}
void D3Q19_RegBC_LBGK(const double * fIn, double * fOut, const int * SNL, const int * VW_nl,
const double VW_uz, const int * PE_nl, const double rho_out, const double omega,
const int Nx, const int Ny, const int Nz)
{
dim3 BLOCKS(TPB,1,1);
dim3 GRIDS((Nx*Ny*Nz+TPB-1)/TPB,1,1);
D3Q19_RegBC_LBGK_ts<<<GRIDS,BLOCKS>>>(fIn,fOut,SNL,VW_nl,VW_uz,PE_nl,rho_out,
omega,Nx,Ny,Nz);
}
|
b535f09ff0cad5311d2dbb49ab87787dbf1763cf.hip | // !!! This is a file automatically generated by hipify!!!
#include "pch.h"
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
extern "C" {
#include "col2im.h"
#include "hip/hip_runtime.h"
}
// src: https://github.com/BVLC/caffe/blob/master/src/caffe/util/im2col.cu
// You may also want to read: https://github.com/BVLC/caffe/blob/master/LICENSE
__global__ void col2im_gpu_kernel(const int n, const float* data_col,
const int height, const int width, const int ksize,
const int pad,
const int stride,
const int height_col, const int width_col,
float *data_im) {
int index = blockIdx.x*blockDim.x+threadIdx.x;
for(; index < n; index += blockDim.x*gridDim.x){
float val = 0;
int w = index % width + pad;
int h = (index / width) % height + pad;
int c = index / (width * height);
// compute the start and end of the output
int w_col_start = (w < ksize) ? 0 : (w - ksize) / stride + 1;
int w_col_end = min(w / stride + 1, width_col);
int h_col_start = (h < ksize) ? 0 : (h - ksize) / stride + 1;
int h_col_end = min(h / stride + 1, height_col);
// equivalent implementation
int offset =
(c * ksize * ksize + h * ksize + w) * height_col * width_col;
int coeff_h_col = (1 - stride * ksize * height_col) * width_col;
int coeff_w_col = (1 - stride * height_col * width_col);
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];
}
}
data_im[index] += val;
}
}
void col2im_gpu(float *data_col,
int channels, int height, int width,
int ksize, int stride, int pad, float *data_im){
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
int num_kernels = channels * height * width;
hipLaunchKernelGGL(( col2im_gpu_kernel), dim3((num_kernels+BLOCK-1)/BLOCK),
dim3(BLOCK),0,get_cuda_stream(),
num_kernels, data_col, height, width, ksize, pad,
stride, height_col,
width_col, data_im);
}
| b535f09ff0cad5311d2dbb49ab87787dbf1763cf.cu | #include "pch.h"
#include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
extern "C" {
#include "col2im.h"
#include "cuda.h"
}
// src: https://github.com/BVLC/caffe/blob/master/src/caffe/util/im2col.cu
// You may also want to read: https://github.com/BVLC/caffe/blob/master/LICENSE
__global__ void col2im_gpu_kernel(const int n, const float* data_col,
const int height, const int width, const int ksize,
const int pad,
const int stride,
const int height_col, const int width_col,
float *data_im) {
int index = blockIdx.x*blockDim.x+threadIdx.x;
for(; index < n; index += blockDim.x*gridDim.x){
float val = 0;
int w = index % width + pad;
int h = (index / width) % height + pad;
int c = index / (width * height);
// compute the start and end of the output
int w_col_start = (w < ksize) ? 0 : (w - ksize) / stride + 1;
int w_col_end = min(w / stride + 1, width_col);
int h_col_start = (h < ksize) ? 0 : (h - ksize) / stride + 1;
int h_col_end = min(h / stride + 1, height_col);
// equivalent implementation
int offset =
(c * ksize * ksize + h * ksize + w) * height_col * width_col;
int coeff_h_col = (1 - stride * ksize * height_col) * width_col;
int coeff_w_col = (1 - stride * height_col * width_col);
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];
}
}
data_im[index] += val;
}
}
void col2im_gpu(float *data_col,
int channels, int height, int width,
int ksize, int stride, int pad, float *data_im){
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
int num_kernels = channels * height * width;
col2im_gpu_kernel<<<(num_kernels+BLOCK-1)/BLOCK,
BLOCK,0,get_cuda_stream()>>>(
num_kernels, data_col, height, width, ksize, pad,
stride, height_col,
width_col, data_im);
}
|
d0585f19703b308900f58c90bb7b2bf69bd62d80.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//---------------------------------*-CUDA-*----------------------------------//
// Copyright 2020-2023 UT-Battelle, LLC, and other Celeritas developers.
// See the top-level COPYRIGHT file for details.
// SPDX-License-Identifier: (Apache-2.0 OR MIT)
//---------------------------------------------------------------------------//
//! \file corecel/cont/Range.test.cu
//---------------------------------------------------------------------------//
#include "Range.test.hh"
#include <thrust/device_vector.h>
#include "corecel/device_runtime_api.h"
#include "corecel/Assert.hh"
#include "corecel/cont/Range.hh"
#include "corecel/sys/Device.hh"
#include "corecel/sys/KernelParamCalculator.device.hh"
namespace celeritas
{
namespace test
{
__global__ void
rangedev_test_kernel(int a, int* x, int* y, int* z, unsigned int n)
{
// grid stride loop
for (auto i : range(blockIdx.x * blockDim.x + threadIdx.x, n)
.step(gridDim.x * blockDim.x))
{
z[i] = a * x[i] + y[i];
}
}
RangeTestOutput rangedev_test(RangeTestInput input)
{
CELER_EXPECT(input.x.size() == input.y.size());
// Local device vectors for working data
thrust::device_vector<int> x_dev(input.x.begin(), input.x.end());
thrust::device_vector<int> y_dev(input.y.begin(), input.y.end());
thrust::device_vector<int> z_dev(input.x.size(), 0);
// Test kernel
CELER_LAUNCH_KERNEL(rangedev_test,
input.threads_per_block,
input.num_threads,
0,
input.a,
thrust::raw_pointer_cast(x_dev.data()),
thrust::raw_pointer_cast(y_dev.data()),
thrust::raw_pointer_cast(z_dev.data()),
z_dev.size());
CELER_DEVICE_CHECK_ERROR();
// Copy result back to CPU
RangeTestOutput result;
result.z.assign(z_dev.size(), 0);
thrust::copy(z_dev.begin(), z_dev.end(), result.z.begin());
return result;
}
//---------------------------------------------------------------------------//
} // namespace test
} // namespace celeritas
| d0585f19703b308900f58c90bb7b2bf69bd62d80.cu | //---------------------------------*-CUDA-*----------------------------------//
// Copyright 2020-2023 UT-Battelle, LLC, and other Celeritas developers.
// See the top-level COPYRIGHT file for details.
// SPDX-License-Identifier: (Apache-2.0 OR MIT)
//---------------------------------------------------------------------------//
//! \file corecel/cont/Range.test.cu
//---------------------------------------------------------------------------//
#include "Range.test.hh"
#include <thrust/device_vector.h>
#include "corecel/device_runtime_api.h"
#include "corecel/Assert.hh"
#include "corecel/cont/Range.hh"
#include "corecel/sys/Device.hh"
#include "corecel/sys/KernelParamCalculator.device.hh"
namespace celeritas
{
namespace test
{
__global__ void
rangedev_test_kernel(int a, int* x, int* y, int* z, unsigned int n)
{
// grid stride loop
for (auto i : range(blockIdx.x * blockDim.x + threadIdx.x, n)
.step(gridDim.x * blockDim.x))
{
z[i] = a * x[i] + y[i];
}
}
RangeTestOutput rangedev_test(RangeTestInput input)
{
CELER_EXPECT(input.x.size() == input.y.size());
// Local device vectors for working data
thrust::device_vector<int> x_dev(input.x.begin(), input.x.end());
thrust::device_vector<int> y_dev(input.y.begin(), input.y.end());
thrust::device_vector<int> z_dev(input.x.size(), 0);
// Test kernel
CELER_LAUNCH_KERNEL(rangedev_test,
input.threads_per_block,
input.num_threads,
0,
input.a,
thrust::raw_pointer_cast(x_dev.data()),
thrust::raw_pointer_cast(y_dev.data()),
thrust::raw_pointer_cast(z_dev.data()),
z_dev.size());
CELER_DEVICE_CHECK_ERROR();
// Copy result back to CPU
RangeTestOutput result;
result.z.assign(z_dev.size(), 0);
thrust::copy(z_dev.begin(), z_dev.end(), result.z.begin());
return result;
}
//---------------------------------------------------------------------------//
} // namespace test
} // namespace celeritas
|
ad5c10fc5b2fb9b1c9ba212bba293532e65bda1f.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/Exceptions.h>
// Another possibility:
// #include <torch/all.h>
#include <assert.h>
#include "type_shim.h"
#include "multi_tensor_apply.cuh"
#define BLOCK_SIZE 512
#define ILP 4
template<typename T>
__device__ __forceinline__ bool is_aligned(T* p){
return ((uint64_t)p) % (ILP*sizeof(T)) == 0;
}
template<typename T>
__device__ __forceinline__ void load_store(T* dst, T* src, int dst_offset, int src_offset){
typedef typename std::aligned_storage<ILP*sizeof(T), ILP*alignof(T)>::type LT;
((LT*)dst)[dst_offset] = ((LT*)src)[src_offset];
}
template<typename x_t, typename y_t, typename out_t>
struct AxpbyFunctor
{
__device__ __forceinline__ void operator()(
int chunk_size,
volatile int* noop_gmem,
TensorListMetadata<3>& tl,
float a,
float b,
int arg_to_check)
{
// I'd like this kernel to propagate infs/nans.
// if(*noop_gmem == 1)
// return;
int tensor_loc = tl.block_to_tensor[blockIdx.x];
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.sizes[tensor_loc];
x_t* x = (x_t*)tl.addresses[0][tensor_loc];
x += chunk_idx*chunk_size;
y_t* y = (y_t*)tl.addresses[1][tensor_loc];
y += chunk_idx*chunk_size;
out_t* out = (out_t*)tl.addresses[2][tensor_loc];
out += chunk_idx*chunk_size;
n -= chunk_idx*chunk_size;
bool finite = true;
x_t r_x[ILP];
y_t r_y[ILP];
out_t r_out[ILP];
// to make things simple, we put aligned case in a different code path
if(n % ILP == 0 && chunk_size % ILP == 0 && is_aligned(x) && is_aligned(y) && is_aligned(out))
{
for(int i_start = threadIdx.x; i_start*ILP < n && i_start*ILP < chunk_size; i_start += blockDim.x)
{
// load
load_store(r_x, x, 0 , i_start);
load_store(r_y, y, 0 , i_start);
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
r_out[ii] = a*static_cast<float>(r_x[ii]) + b*static_cast<float>(r_y[ii]);
if(arg_to_check == -1)
finite = finite && (isfinite(r_x[ii]) && isfinite(r_y[ii]));
if(arg_to_check == 0)
finite = finite && isfinite(r_x[ii]);
if(arg_to_check == 1)
finite = finite && isfinite(r_y[ii]);
}
// store
load_store(out, r_out, i_start , 0);
}
}
else
{
// Non-divergent exit condition for __syncthreads, not necessary here
for(int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x*ILP)
{
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
r_x[ii] = 0;
r_y[ii] = 0;
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
r_x[ii] = x[i];
r_y[ii] = y[i];
}
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
r_out[ii] = a*static_cast<float>(r_x[ii]) + b*static_cast<float>(r_y[ii]);
if(arg_to_check == -1)
finite = finite && (isfinite(r_x[ii]) && isfinite(r_y[ii]));
if(arg_to_check == 0)
finite = finite && isfinite(r_x[ii]);
if(arg_to_check == 1)
finite = finite && isfinite(r_y[ii]);
}
// see note in multi_tensor_scale_kernel.cu
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
out[i] = r_out[ii];
}
}
}
if(!finite)
*noop_gmem = 1; // Blindly fire off a write. These will race but that's ok.
}
};
void multi_tensor_axpby_cuda(
int chunk_size,
at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists,
float a,
float b,
int arg_to_check)
{
using namespace at;
// The output (downscaled) type is always float.
// If build times suffer, think about where to put this dispatch,
// and what logic should be moved out of multi_tensor_apply.
DISPATCH_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(), 0, "multi_tensor_axpby_cuda",
DISPATCH_FLOAT_AND_HALF(tensor_lists[1][0].scalar_type(), 1, "multi_tensor_axpby_cuda",
DISPATCH_FLOAT_AND_HALF(tensor_lists[2][0].scalar_type(), 2, "multi_tensor_axpby_cuda",
multi_tensor_apply<3>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
AxpbyFunctor<scalar_t_0, scalar_t_1, scalar_t_2>(),
a,
b,
arg_to_check); )))
AT_CUDA_CHECK(hipGetLastError());
// AT_CUDA_CHECK(hipDeviceSynchronize());
}
| ad5c10fc5b2fb9b1c9ba212bba293532e65bda1f.cu | #include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/Exceptions.h>
// Another possibility:
// #include <torch/all.h>
#include <assert.h>
#include "type_shim.h"
#include "multi_tensor_apply.cuh"
#define BLOCK_SIZE 512
#define ILP 4
template<typename T>
__device__ __forceinline__ bool is_aligned(T* p){
return ((uint64_t)p) % (ILP*sizeof(T)) == 0;
}
template<typename T>
__device__ __forceinline__ void load_store(T* dst, T* src, int dst_offset, int src_offset){
typedef typename std::aligned_storage<ILP*sizeof(T), ILP*alignof(T)>::type LT;
((LT*)dst)[dst_offset] = ((LT*)src)[src_offset];
}
template<typename x_t, typename y_t, typename out_t>
struct AxpbyFunctor
{
__device__ __forceinline__ void operator()(
int chunk_size,
volatile int* noop_gmem,
TensorListMetadata<3>& tl,
float a,
float b,
int arg_to_check)
{
// I'd like this kernel to propagate infs/nans.
// if(*noop_gmem == 1)
// return;
int tensor_loc = tl.block_to_tensor[blockIdx.x];
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.sizes[tensor_loc];
x_t* x = (x_t*)tl.addresses[0][tensor_loc];
x += chunk_idx*chunk_size;
y_t* y = (y_t*)tl.addresses[1][tensor_loc];
y += chunk_idx*chunk_size;
out_t* out = (out_t*)tl.addresses[2][tensor_loc];
out += chunk_idx*chunk_size;
n -= chunk_idx*chunk_size;
bool finite = true;
x_t r_x[ILP];
y_t r_y[ILP];
out_t r_out[ILP];
// to make things simple, we put aligned case in a different code path
if(n % ILP == 0 && chunk_size % ILP == 0 && is_aligned(x) && is_aligned(y) && is_aligned(out))
{
for(int i_start = threadIdx.x; i_start*ILP < n && i_start*ILP < chunk_size; i_start += blockDim.x)
{
// load
load_store(r_x, x, 0 , i_start);
load_store(r_y, y, 0 , i_start);
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
r_out[ii] = a*static_cast<float>(r_x[ii]) + b*static_cast<float>(r_y[ii]);
if(arg_to_check == -1)
finite = finite && (isfinite(r_x[ii]) && isfinite(r_y[ii]));
if(arg_to_check == 0)
finite = finite && isfinite(r_x[ii]);
if(arg_to_check == 1)
finite = finite && isfinite(r_y[ii]);
}
// store
load_store(out, r_out, i_start , 0);
}
}
else
{
// Non-divergent exit condition for __syncthreads, not necessary here
for(int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x*ILP)
{
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
r_x[ii] = 0;
r_y[ii] = 0;
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
r_x[ii] = x[i];
r_y[ii] = y[i];
}
}
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
r_out[ii] = a*static_cast<float>(r_x[ii]) + b*static_cast<float>(r_y[ii]);
if(arg_to_check == -1)
finite = finite && (isfinite(r_x[ii]) && isfinite(r_y[ii]));
if(arg_to_check == 0)
finite = finite && isfinite(r_x[ii]);
if(arg_to_check == 1)
finite = finite && isfinite(r_y[ii]);
}
// see note in multi_tensor_scale_kernel.cu
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
out[i] = r_out[ii];
}
}
}
if(!finite)
*noop_gmem = 1; // Blindly fire off a write. These will race but that's ok.
}
};
void multi_tensor_axpby_cuda(
int chunk_size,
at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists,
float a,
float b,
int arg_to_check)
{
using namespace at;
// The output (downscaled) type is always float.
// If build times suffer, think about where to put this dispatch,
// and what logic should be moved out of multi_tensor_apply.
DISPATCH_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(), 0, "multi_tensor_axpby_cuda",
DISPATCH_FLOAT_AND_HALF(tensor_lists[1][0].scalar_type(), 1, "multi_tensor_axpby_cuda",
DISPATCH_FLOAT_AND_HALF(tensor_lists[2][0].scalar_type(), 2, "multi_tensor_axpby_cuda",
multi_tensor_apply<3>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
AxpbyFunctor<scalar_t_0, scalar_t_1, scalar_t_2>(),
a,
b,
arg_to_check); )))
AT_CUDA_CHECK(cudaGetLastError());
// AT_CUDA_CHECK(cudaDeviceSynchronize());
}
|
792c04c07308fc1ad9edbe97fe9a3a21e8218f46.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
/*
* Host function to initialize vector elements. This function
* simply initializes each element to equal its index in the
* vector.
*/
__global__ void initWith(float num, float *a, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i+=stride)
a[i] = num;
}
/*
* Device kernel stores into `result` the sum of each
* same-indexed value of `a` and `b`.
*/
__global__ void addVectorsInto(float *result, float *a, float *b, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
result[i] = a[i] + b[i];
}
/*
* Host function to confirm values in `vector`. This function
* assumes all values are the same `target` value.
*/
void checkElementsAre(float target, float *vector, int N)
{
for(int i = 0; i < N; i++)
{
if(vector[i] != target)
{
printf("FAIL: vector[%d] - %0.0f does not equal %0.0f\n", i, vector[i], target);
exit(1);
}
}
printf("Success! All values calculated correctly.\n");
}
int main()
{
const int N = 2<<24;
size_t size = N * sizeof(float);
float *a;
float *b;
float *c;
hipMallocManaged(&a, size);
hipMallocManaged(&b, size);
hipMallocManaged(&c, size);
/*
* nvprof should register performance changes when execution configuration
* is updated.
*/
int deviceId;
hipDeviceProp_t props;
hipGetDevice(&deviceId);
hipGetDeviceProperties(&props, deviceId);
int multiProcessorCount = props.multiProcessorCount;
hipMemPrefetchAsync(a, size, deviceId);
hipMemPrefetchAsync(b, size, deviceId);
hipMemPrefetchAsync(c, size, deviceId);
size_t threadsPerBlock = 1024;
size_t numberOfBlocks = ((N>>10)/multiProcessorCount+1)*multiProcessorCount;
hipLaunchKernelGGL(( initWith), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, 0, 3, a, N);
hipLaunchKernelGGL(( initWith), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, 0, 4, b, N);
hipLaunchKernelGGL(( initWith), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, 0, 0, c, N);
hipDeviceSynchronize();
hipLaunchKernelGGL(( addVectorsInto), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, 0, c, a, b, N);
hipError_t addVectorsErr;
hipError_t asyncErr;
addVectorsErr = hipGetLastError();
if(addVectorsErr != hipSuccess)
printf("Error: %s\n", hipGetErrorString(addVectorsErr));
asyncErr = hipDeviceSynchronize();
// i insert the statement here to fetch best improvement!!!
hipMemPrefetchAsync(c, size, hipCpuDeviceId);
if(asyncErr != hipSuccess)
printf("Error: %s\n", hipGetErrorString(asyncErr));
checkElementsAre(7, c, N);
hipFree(a);
hipFree(b);
hipFree(c);
}
| 792c04c07308fc1ad9edbe97fe9a3a21e8218f46.cu | #include <stdio.h>
/*
* Host function to initialize vector elements. This function
* simply initializes each element to equal its index in the
* vector.
*/
__global__ void initWith(float num, float *a, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i+=stride)
a[i] = num;
}
/*
* Device kernel stores into `result` the sum of each
* same-indexed value of `a` and `b`.
*/
__global__ void addVectorsInto(float *result, float *a, float *b, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
result[i] = a[i] + b[i];
}
/*
* Host function to confirm values in `vector`. This function
* assumes all values are the same `target` value.
*/
void checkElementsAre(float target, float *vector, int N)
{
for(int i = 0; i < N; i++)
{
if(vector[i] != target)
{
printf("FAIL: vector[%d] - %0.0f does not equal %0.0f\n", i, vector[i], target);
exit(1);
}
}
printf("Success! All values calculated correctly.\n");
}
int main()
{
const int N = 2<<24;
size_t size = N * sizeof(float);
float *a;
float *b;
float *c;
cudaMallocManaged(&a, size);
cudaMallocManaged(&b, size);
cudaMallocManaged(&c, size);
/*
* nvprof should register performance changes when execution configuration
* is updated.
*/
int deviceId;
cudaDeviceProp props;
cudaGetDevice(&deviceId);
cudaGetDeviceProperties(&props, deviceId);
int multiProcessorCount = props.multiProcessorCount;
cudaMemPrefetchAsync(a, size, deviceId);
cudaMemPrefetchAsync(b, size, deviceId);
cudaMemPrefetchAsync(c, size, deviceId);
size_t threadsPerBlock = 1024;
size_t numberOfBlocks = ((N>>10)/multiProcessorCount+1)*multiProcessorCount;
initWith<<<numberOfBlocks, threadsPerBlock>>>(3, a, N);
initWith<<<numberOfBlocks, threadsPerBlock>>>(4, b, N);
initWith<<<numberOfBlocks, threadsPerBlock>>>(0, c, N);
cudaDeviceSynchronize();
addVectorsInto<<<numberOfBlocks, threadsPerBlock>>>(c, a, b, N);
cudaError_t addVectorsErr;
cudaError_t asyncErr;
addVectorsErr = cudaGetLastError();
if(addVectorsErr != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(addVectorsErr));
asyncErr = cudaDeviceSynchronize();
// i insert the statement here to fetch best improvement!!!
cudaMemPrefetchAsync(c, size, cudaCpuDeviceId);
if(asyncErr != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(asyncErr));
checkElementsAre(7, c, N);
cudaFree(a);
cudaFree(b);
cudaFree(c);
}
|
f08e70110322390e3fe7ee0b367c00c5e3e51af9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ------------------------------------------------------------------
* Programmer(s): Cody J. Balos @ LLNL
* ------------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2023, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
* ------------------------------------------------------------------
* The following is a simple example problem based off of
* cvRoberts_klu.c. We simulate a scenario where a set of independent
* ODEs are grouped together to form a larger system. For simplicity,
* each set of ODEs is the same problem. The problem is from chemical
* kinetics, and consists of the following three rate equations:
* dy1/dt = -.04*y1 + 1.e4*y2*y3
* dy2/dt = .04*y1 - 1.e4*y2*y3 - 3.e7*(y2)^2
* dy3/dt = 3.e7*(y2)^2
* on the interval from t = 0.0 to t = 4.e10, with initial
* conditions: y1 = 1.0, y2 = y3 = 0. The problem is stiff.
* This program solves the problem with the BDF method, Newton
* iteration, a user-supplied Jacobian routine, and since the grouping
* of the independent systems results in a block diagonal linear
* system, with the cuSOLVER sparse batched QR linear solver. It uses
* a scalar relative tolerance and a vector absolute tolerance. Output
* is printed in decades from t = .4 to t = 4.e10. Run statistics
* (optional outputs) are printed at the end.
*
* The program takes one optional argument, the number of groups
* of independent ODE systems:
*
* ./cvRoberts_block_cusolversp_batchqr [number of groups]
*
* This problem is comparable to the cvRoberts_block_klu.c example.
* ------------------------------------------------------------------*/
#include <stdio.h>
#include <cvode/cvode.h> /* prototypes for CVODE fcts., consts. */
#include <nvector/nvector_cuda.h> /* access to cuda N_Vector */
#include <sunmatrix/sunmatrix_cusparse.h> /* access to cusparse SUNMatrix */
#include <sunlinsol/sunlinsol_cusolversp_batchqr.h> /* access to cuSolverSp batch QR SUNLinearSolver */
#include <sundials/sundials_types.h> /* defs. of realtype, int */
/* Problem Constants */
#define GROUPSIZE 3 /* number of equations per group */
#define Y1 RCONST(1.0) /* initial y components */
#define Y2 RCONST(0.0)
#define Y3 RCONST(0.0)
#define RTOL RCONST(1.0e-4) /* scalar relative tolerance */
#define ATOL1 RCONST(1.0e-8) /* vector absolute tolerance components */
#define ATOL2 RCONST(1.0e-14)
#define ATOL3 RCONST(1.0e-6)
#define T0 RCONST(0.0) /* initial time */
#define T1 RCONST(0.4) /* first output time */
#define TMULT RCONST(10.0) /* output time factor */
#define NOUT 12 /* number of output times */
#define ZERO RCONST(0.0)
/* Functions Called by the Solver */
static int f(realtype t, N_Vector y, N_Vector ydot, void *user_data);
__global__
static void f_kernel(realtype t, realtype* y, realtype* ydot,
int neq, int ngroups);
static int Jac(realtype t, N_Vector y, N_Vector fy, SUNMatrix J,
void *user_data, N_Vector tmp1, N_Vector tmp2, N_Vector tmp3);
__global__
static void j_kernel(int ngroups, int nnzper, realtype* ydata, realtype *Jdata);
/* Private function to initialize the Jacobian sparsity pattern */
static int JacInit(SUNMatrix J);
/* Private function to output results */
static void PrintOutput(realtype t, realtype y1, realtype y2, realtype y3);
/* Private function to print final statistics */
static void PrintFinalStats(void *cvode_mem, SUNLinearSolver LS);
/* Private function to check function return values */
static int check_retval(void *returnvalue, const char *funcname, int opt);
/* user data structure */
typedef struct {
int ngroups;
int neq;
} UserData;
/*
*-------------------------------
* Main Program
*-------------------------------
*/
int main(int argc, char *argv[])
{
SUNContext sunctx;
realtype reltol, t, tout;
realtype *ydata, *abstol_data;
N_Vector y, abstol;
SUNMatrix A;
SUNLinearSolver LS;
void *cvode_mem;
int retval, iout;
int neq, ngroups, groupj;
UserData udata;
hipsparseHandle_t cusp_handle;
cusolverSpHandle_t cusol_handle;
y = abstol = NULL;
A = NULL;
LS = NULL;
cvode_mem = NULL;
/* Parse command line arguments */
if (argc > 1) {
ngroups = atoi(argv[1]);
} else {
ngroups = 100;
}
neq = ngroups * GROUPSIZE;
udata.ngroups = ngroups;
udata.neq = neq;
/* Initialize cuSOLVER and cuSPARSE handles */
hipsparseCreate(&cusp_handle);
cusolverSpCreate(&cusol_handle);
/* Create the SUNDIALS context */
retval = SUNContext_Create(NULL, &sunctx);
if(check_retval(&retval, "SUNContext_Create", 1)) return(1);
/* Create CUDA vector of length neq for I.C. and abstol */
y = N_VNew_Cuda(neq, sunctx);
if (check_retval((void *)y, "N_VNew_Cuda", 0)) return(1);
abstol = N_VNew_Cuda(neq, sunctx);
if (check_retval((void *)abstol, "N_VNew_Cuda", 0)) return(1);
ydata = N_VGetHostArrayPointer_Cuda(y);
abstol_data = N_VGetHostArrayPointer_Cuda(abstol);
/* Initialize y */
for (groupj = 0; groupj < neq; groupj += GROUPSIZE) {
ydata[groupj] = Y1;
ydata[groupj+1] = Y2;
ydata[groupj+2] = Y3;
}
N_VCopyToDevice_Cuda(y);
/* Set the scalar relative tolerance */
reltol = RTOL;
/* Set the vector absolute tolerance */
for (groupj = 0; groupj < neq; groupj += GROUPSIZE) {
abstol_data[groupj] = ATOL1;
abstol_data[groupj+1] = ATOL2;
abstol_data[groupj+2] = ATOL3;
}
N_VCopyToDevice_Cuda(abstol);
/* Call CVodeCreate to create the solver memory and specify the
* Backward Differentiation Formula */
cvode_mem = CVodeCreate(CV_BDF, sunctx);
if (check_retval((void *)cvode_mem, "CVodeCreate", 0)) return(1);
/* Call CVodeInit to initialize the integrator memory and specify the
* user's right hand side function in y'=f(t,y), the inital time T0, and
* the initial dependent variable vector y. */
retval = CVodeInit(cvode_mem, f, T0, y);
if (check_retval(&retval, "CVodeInit", 1)) return(1);
/* Call CVodeSetUserData to attach the user data structure */
retval = CVodeSetUserData(cvode_mem, &udata);
if (check_retval(&retval, "CVodeSetUserData", 1)) return(1);
/* Call CVodeSVtolerances to specify the scalar relative tolerance
* and vector absolute tolerances */
retval = CVodeSVtolerances(cvode_mem, reltol, abstol);
if (check_retval(&retval, "CVodeSVtolerances", 1)) return(1);
/* Create sparse SUNMatrix for use in linear solves */
A = SUNMatrix_cuSparse_NewBlockCSR(ngroups, GROUPSIZE, GROUPSIZE, GROUPSIZE*GROUPSIZE, cusp_handle, sunctx);
if(check_retval((void *)A, "SUNMatrix_cuSparse_NewBlockCSR", 0)) return(1);
/* Set the sparsity pattern to be fixed so that the row pointers
* and column indicies are not zeroed out by SUNMatZero */
retval = SUNMatrix_cuSparse_SetFixedPattern(A, 1);
/* Initialiize the Jacobian with its fixed sparsity pattern */
JacInit(A);
/* Create the SUNLinearSolver object for use by CVode */
LS = SUNLinSol_cuSolverSp_batchQR(y, A, cusol_handle, sunctx);
if(check_retval((void *)LS, "SUNLinSol_cuSolverSp_batchQR", 0)) return(1);
/* Call CVodeSetLinearSolver to attach the matrix and linear solver to CVode */
retval = CVodeSetLinearSolver(cvode_mem, LS, A);
if(check_retval(&retval, "CVodeSetLinearSolver", 1)) return(1);
/* Set the user-supplied Jacobian routine Jac */
retval = CVodeSetJacFn(cvode_mem, Jac);
if(check_retval(&retval, "CVodeSetJacFn", 1)) return(1);
/* In loop, call CVode, print results, and test for error.
Break out of loop when NOUT preset output times have been reached. */
printf(" \nGroup of independent 3-species kinetics problems\n\n");
printf("number of groups = %d\n\n", ngroups);
iout = 0; tout = T1;
while(1) {
retval = CVode(cvode_mem, tout, y, &t, CV_NORMAL);
N_VCopyFromDevice_Cuda(y);
for (groupj = 0; groupj < ngroups; groupj += 10) {
printf("group %d: ", groupj);
PrintOutput(t, ydata[GROUPSIZE*groupj],
ydata[1+GROUPSIZE*groupj],
ydata[2+GROUPSIZE*groupj]);
}
if (check_retval(&retval, "CVode", 1)) break;
if (retval == CV_SUCCESS) {
iout++;
tout *= TMULT;
}
if (iout == NOUT) break;
}
/* Print some final statistics */
PrintFinalStats(cvode_mem, LS);
/* Free y and abstol vectors */
N_VDestroy(y);
N_VDestroy(abstol);
/* Free integrator memory */
CVodeFree(&cvode_mem);
/* Free the linear solver memory */
SUNLinSolFree(LS);
/* Free the matrix memory */
SUNMatDestroy(A);
SUNContext_Free(&sunctx);
/* Destroy the cuSOLVER and cuSPARSE handles */
hipsparseDestroy(cusp_handle);
cusolverSpDestroy(cusol_handle);
return(0);
}
/*
*-------------------------------
* Functions called by the solver
*-------------------------------
*/
/* Right hand side function. This just launches the CUDA kernel
to do the actual computation. At the very least, doing this
saves moving the vector data in y and ydot to/from the device
every evaluation of f. */
static int f(realtype t, N_Vector y, N_Vector ydot, void *user_data)
{
UserData *udata;
realtype *ydata, *ydotdata;
udata = (UserData*) user_data;
ydata = N_VGetDeviceArrayPointer_Cuda(y);
ydotdata = N_VGetDeviceArrayPointer_Cuda(ydot);
unsigned block_size = 32;
unsigned grid_size = (udata->neq + block_size - 1) / block_size;
hipLaunchKernelGGL(( f_kernel), dim3(grid_size), dim3(block_size), 0, 0, t, ydata, ydotdata, udata->neq, udata->ngroups);
hipDeviceSynchronize();
hipError_t cuerr = hipGetLastError();
if (cuerr != hipSuccess) {
fprintf(stderr,
">>> ERROR in f: hipGetLastError returned %s\n",
hipGetErrorName(cuerr));
return(-1);
}
return(0);
}
/* Right hand side function evalutation kernel. */
__global__
static void f_kernel(realtype t, realtype* ydata, realtype* ydotdata,
int neq, int ngroups)
{
realtype y1, y2, y3, yd1, yd3;
int i = blockIdx.x*blockDim.x + threadIdx.x;
int groupj = i*GROUPSIZE;
if (i < neq) {
y1 = ydata[groupj]; y2 = ydata[groupj+1]; y3 = ydata[groupj+2];
yd1 = ydotdata[groupj] = RCONST(-0.04)*y1 + RCONST(1.0e4)*y2*y3;
yd3 = ydotdata[groupj+2] = RCONST(3.0e7)*y2*y2;
ydotdata[groupj+1] = -yd1 - yd3;
}
}
/*
* Jacobian initialization routine. This sets the sparisty pattern of
* the blocks of the Jacobian J(t,y) = df/dy. This is performed on the CPU,
* and only occurs at the beginning of the simulation.
*/
static int JacInit(SUNMatrix J)
{
int rowptrs[4], colvals[9];
/* Zero out the Jacobian */
SUNMatZero(J);
/* there are 3 entries per row */
rowptrs[0] = 0;
rowptrs[1] = 3;
rowptrs[2] = 6;
rowptrs[3] = 9;
/* first row of block */
colvals[0] = 0;
colvals[1] = 1;
colvals[2] = 2;
/* second row of block */
colvals[3] = 0;
colvals[4] = 1;
colvals[5] = 2;
/* third row of block */
colvals[6] = 0;
colvals[7] = 1;
colvals[8] = 2;
/* copy rowptrs, colvals to the device */
SUNMatrix_cuSparse_CopyToDevice(J, NULL, rowptrs, colvals);
hipDeviceSynchronize();
return(0);
}
/*
* Jacobian routine. Compute J(t,y) = df/dy.
* This is done on the GPU.
*/
static int Jac(realtype t, N_Vector y, N_Vector fy, SUNMatrix J,
void *user_data, N_Vector tmp1, N_Vector tmp2, N_Vector tmp3)
{
UserData *udata = (UserData*) user_data;
int nnzper;
realtype *Jdata, *ydata;
unsigned block_size, grid_size;
nnzper = GROUPSIZE * GROUPSIZE;
Jdata = SUNMatrix_cuSparse_Data(J);
ydata = N_VGetDeviceArrayPointer_Cuda(y);
block_size = 32;
grid_size = (udata->neq + block_size - 1) / block_size;
hipLaunchKernelGGL(( j_kernel), dim3(grid_size), dim3(block_size), 0, 0, udata->ngroups, nnzper, ydata, Jdata);
hipDeviceSynchronize();
hipError_t cuerr = hipGetLastError();
if (cuerr != hipSuccess) {
fprintf(stderr,
">>> ERROR in Jac: hipGetLastError returned %s\n",
hipGetErrorName(cuerr));
return(-1);
}
return(0);
}
/* Jacobian evaluation GPU kernel */
__global__
static void j_kernel(int ngroups, int nnzper, realtype* ydata, realtype *Jdata)
{
int groupj;
realtype y2, y3;
for (groupj = blockIdx.x*blockDim.x + threadIdx.x;
groupj < ngroups;
groupj += blockDim.x * gridDim.x)
{
/* get y values */
y2 = ydata[GROUPSIZE*groupj + 1];
y3 = ydata[GROUPSIZE*groupj + 2];
/* first row of block */
Jdata[nnzper*groupj] = RCONST(-0.04);
Jdata[nnzper*groupj + 1] = RCONST(1.0e4)*y3;
Jdata[nnzper*groupj + 2] = RCONST(1.0e4)*y2;
/* second row of block */
Jdata[nnzper*groupj + 3] = RCONST(0.04);
Jdata[nnzper*groupj + 4] = (RCONST(-1.0e4)*y3) - (RCONST(6.0e7)*y2);
Jdata[nnzper*groupj + 5] = RCONST(-1.0e4)*y2;
/* third row of block */
Jdata[nnzper*groupj + 6] = ZERO;
Jdata[nnzper*groupj + 7] = RCONST(6.0e7)*y2;
Jdata[nnzper*groupj + 8] = ZERO;
}
}
/*
*-------------------------------
* Private helper functions
*-------------------------------
*/
static void PrintOutput(realtype t, realtype y1, realtype y2, realtype y3)
{
#if defined(SUNDIALS_EXTENDED_PRECISION)
printf("At t = %0.4Le y =%14.6Le %14.6Le %14.6Le\n", t, y1, y2, y3);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
printf("At t = %0.4e y =%14.6e %14.6e %14.6e\n", t, y1, y2, y3);
#else
printf("At t = %0.4e y =%14.6e %14.6e %14.6e\n", t, y1, y2, y3);
#endif
return;
}
/*
* Get and print some final statistics
*/
static void PrintFinalStats(void *cvode_mem, SUNLinearSolver LS)
{
long int nst, nfe, nsetups, nje, nni, ncfn, netf, nge;
int retval;
retval = CVodeGetNumSteps(cvode_mem, &nst);
check_retval(&retval, "CVodeGetNumSteps", 1);
retval = CVodeGetNumRhsEvals(cvode_mem, &nfe);
check_retval(&retval, "CVodeGetNumRhsEvals", 1);
retval = CVodeGetNumLinSolvSetups(cvode_mem, &nsetups);
check_retval(&retval, "CVodeGetNumLinSolvSetups", 1);
retval = CVodeGetNumErrTestFails(cvode_mem, &netf);
check_retval(&retval, "CVodeGetNumErrTestFails", 1);
retval = CVodeGetNumNonlinSolvIters(cvode_mem, &nni);
check_retval(&retval, "CVodeGetNumNonlinSolvIters", 1);
retval = CVodeGetNumNonlinSolvConvFails(cvode_mem, &ncfn);
check_retval(&retval, "CVodeGetNumNonlinSolvConvFails", 1);
retval = CVodeGetNumJacEvals(cvode_mem, &nje);
check_retval(&retval, "CVodeGetNumJacEvals", 1);
retval = CVodeGetNumGEvals(cvode_mem, &nge);
check_retval(&retval, "CVodeGetNumGEvals", 1);
printf("\nFinal Statistics:\n");
printf("nst = %-6ld nfe = %-6ld nsetups = %-6ld nje = %ld\n",
nst, nfe, nsetups, nje);
printf("nni = %-6ld ncfn = %-6ld netf = %-6ld nge = %ld\n",
nni, ncfn, netf, nge);
}
/*
* Check function return value...
* opt == 0 means SUNDIALS function allocates memory so check if
* returned NULL pointer
* opt == 1 means SUNDIALS function returns an integer value so check if
* retval < 0
* opt == 2 means function allocates memory so check if returned
* NULL pointer
*/
static int check_retval(void *returnvalue, const char *funcname, int opt)
{
int *retval;
/* Check if SUNDIALS function returned NULL pointer - no memory allocated */
if (opt == 0 && returnvalue == NULL) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return(1); }
/* Check if retval < 0 */
else if (opt == 1) {
retval = (int *) returnvalue;
if (*retval < 0) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with retval = %d\n\n",
funcname, *retval);
return(1); }}
/* Check if function returned NULL pointer - no memory allocated */
else if (opt == 2 && returnvalue == NULL) {
fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return(1); }
return(0);
}
| f08e70110322390e3fe7ee0b367c00c5e3e51af9.cu | /* ------------------------------------------------------------------
* Programmer(s): Cody J. Balos @ LLNL
* ------------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2023, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
* ------------------------------------------------------------------
* The following is a simple example problem based off of
* cvRoberts_klu.c. We simulate a scenario where a set of independent
* ODEs are grouped together to form a larger system. For simplicity,
* each set of ODEs is the same problem. The problem is from chemical
* kinetics, and consists of the following three rate equations:
* dy1/dt = -.04*y1 + 1.e4*y2*y3
* dy2/dt = .04*y1 - 1.e4*y2*y3 - 3.e7*(y2)^2
* dy3/dt = 3.e7*(y2)^2
* on the interval from t = 0.0 to t = 4.e10, with initial
* conditions: y1 = 1.0, y2 = y3 = 0. The problem is stiff.
* This program solves the problem with the BDF method, Newton
* iteration, a user-supplied Jacobian routine, and since the grouping
* of the independent systems results in a block diagonal linear
* system, with the cuSOLVER sparse batched QR linear solver. It uses
* a scalar relative tolerance and a vector absolute tolerance. Output
* is printed in decades from t = .4 to t = 4.e10. Run statistics
* (optional outputs) are printed at the end.
*
* The program takes one optional argument, the number of groups
* of independent ODE systems:
*
* ./cvRoberts_block_cusolversp_batchqr [number of groups]
*
* This problem is comparable to the cvRoberts_block_klu.c example.
* ------------------------------------------------------------------*/
#include <stdio.h>
#include <cvode/cvode.h> /* prototypes for CVODE fcts., consts. */
#include <nvector/nvector_cuda.h> /* access to cuda N_Vector */
#include <sunmatrix/sunmatrix_cusparse.h> /* access to cusparse SUNMatrix */
#include <sunlinsol/sunlinsol_cusolversp_batchqr.h> /* access to cuSolverSp batch QR SUNLinearSolver */
#include <sundials/sundials_types.h> /* defs. of realtype, int */
/* Problem Constants */
#define GROUPSIZE 3 /* number of equations per group */
#define Y1 RCONST(1.0) /* initial y components */
#define Y2 RCONST(0.0)
#define Y3 RCONST(0.0)
#define RTOL RCONST(1.0e-4) /* scalar relative tolerance */
#define ATOL1 RCONST(1.0e-8) /* vector absolute tolerance components */
#define ATOL2 RCONST(1.0e-14)
#define ATOL3 RCONST(1.0e-6)
#define T0 RCONST(0.0) /* initial time */
#define T1 RCONST(0.4) /* first output time */
#define TMULT RCONST(10.0) /* output time factor */
#define NOUT 12 /* number of output times */
#define ZERO RCONST(0.0)
/* Functions Called by the Solver */
static int f(realtype t, N_Vector y, N_Vector ydot, void *user_data);
__global__
static void f_kernel(realtype t, realtype* y, realtype* ydot,
int neq, int ngroups);
static int Jac(realtype t, N_Vector y, N_Vector fy, SUNMatrix J,
void *user_data, N_Vector tmp1, N_Vector tmp2, N_Vector tmp3);
__global__
static void j_kernel(int ngroups, int nnzper, realtype* ydata, realtype *Jdata);
/* Private function to initialize the Jacobian sparsity pattern */
static int JacInit(SUNMatrix J);
/* Private function to output results */
static void PrintOutput(realtype t, realtype y1, realtype y2, realtype y3);
/* Private function to print final statistics */
static void PrintFinalStats(void *cvode_mem, SUNLinearSolver LS);
/* Private function to check function return values */
static int check_retval(void *returnvalue, const char *funcname, int opt);
/* user data structure */
typedef struct {
int ngroups;
int neq;
} UserData;
/*
*-------------------------------
* Main Program
*-------------------------------
*/
int main(int argc, char *argv[])
{
SUNContext sunctx;
realtype reltol, t, tout;
realtype *ydata, *abstol_data;
N_Vector y, abstol;
SUNMatrix A;
SUNLinearSolver LS;
void *cvode_mem;
int retval, iout;
int neq, ngroups, groupj;
UserData udata;
cusparseHandle_t cusp_handle;
cusolverSpHandle_t cusol_handle;
y = abstol = NULL;
A = NULL;
LS = NULL;
cvode_mem = NULL;
/* Parse command line arguments */
if (argc > 1) {
ngroups = atoi(argv[1]);
} else {
ngroups = 100;
}
neq = ngroups * GROUPSIZE;
udata.ngroups = ngroups;
udata.neq = neq;
/* Initialize cuSOLVER and cuSPARSE handles */
cusparseCreate(&cusp_handle);
cusolverSpCreate(&cusol_handle);
/* Create the SUNDIALS context */
retval = SUNContext_Create(NULL, &sunctx);
if(check_retval(&retval, "SUNContext_Create", 1)) return(1);
/* Create CUDA vector of length neq for I.C. and abstol */
y = N_VNew_Cuda(neq, sunctx);
if (check_retval((void *)y, "N_VNew_Cuda", 0)) return(1);
abstol = N_VNew_Cuda(neq, sunctx);
if (check_retval((void *)abstol, "N_VNew_Cuda", 0)) return(1);
ydata = N_VGetHostArrayPointer_Cuda(y);
abstol_data = N_VGetHostArrayPointer_Cuda(abstol);
/* Initialize y */
for (groupj = 0; groupj < neq; groupj += GROUPSIZE) {
ydata[groupj] = Y1;
ydata[groupj+1] = Y2;
ydata[groupj+2] = Y3;
}
N_VCopyToDevice_Cuda(y);
/* Set the scalar relative tolerance */
reltol = RTOL;
/* Set the vector absolute tolerance */
for (groupj = 0; groupj < neq; groupj += GROUPSIZE) {
abstol_data[groupj] = ATOL1;
abstol_data[groupj+1] = ATOL2;
abstol_data[groupj+2] = ATOL3;
}
N_VCopyToDevice_Cuda(abstol);
/* Call CVodeCreate to create the solver memory and specify the
* Backward Differentiation Formula */
cvode_mem = CVodeCreate(CV_BDF, sunctx);
if (check_retval((void *)cvode_mem, "CVodeCreate", 0)) return(1);
/* Call CVodeInit to initialize the integrator memory and specify the
* user's right hand side function in y'=f(t,y), the inital time T0, and
* the initial dependent variable vector y. */
retval = CVodeInit(cvode_mem, f, T0, y);
if (check_retval(&retval, "CVodeInit", 1)) return(1);
/* Call CVodeSetUserData to attach the user data structure */
retval = CVodeSetUserData(cvode_mem, &udata);
if (check_retval(&retval, "CVodeSetUserData", 1)) return(1);
/* Call CVodeSVtolerances to specify the scalar relative tolerance
* and vector absolute tolerances */
retval = CVodeSVtolerances(cvode_mem, reltol, abstol);
if (check_retval(&retval, "CVodeSVtolerances", 1)) return(1);
/* Create sparse SUNMatrix for use in linear solves */
A = SUNMatrix_cuSparse_NewBlockCSR(ngroups, GROUPSIZE, GROUPSIZE, GROUPSIZE*GROUPSIZE, cusp_handle, sunctx);
if(check_retval((void *)A, "SUNMatrix_cuSparse_NewBlockCSR", 0)) return(1);
/* Set the sparsity pattern to be fixed so that the row pointers
* and column indicies are not zeroed out by SUNMatZero */
retval = SUNMatrix_cuSparse_SetFixedPattern(A, 1);
/* Initialiize the Jacobian with its fixed sparsity pattern */
JacInit(A);
/* Create the SUNLinearSolver object for use by CVode */
LS = SUNLinSol_cuSolverSp_batchQR(y, A, cusol_handle, sunctx);
if(check_retval((void *)LS, "SUNLinSol_cuSolverSp_batchQR", 0)) return(1);
/* Call CVodeSetLinearSolver to attach the matrix and linear solver to CVode */
retval = CVodeSetLinearSolver(cvode_mem, LS, A);
if(check_retval(&retval, "CVodeSetLinearSolver", 1)) return(1);
/* Set the user-supplied Jacobian routine Jac */
retval = CVodeSetJacFn(cvode_mem, Jac);
if(check_retval(&retval, "CVodeSetJacFn", 1)) return(1);
/* In loop, call CVode, print results, and test for error.
Break out of loop when NOUT preset output times have been reached. */
printf(" \nGroup of independent 3-species kinetics problems\n\n");
printf("number of groups = %d\n\n", ngroups);
iout = 0; tout = T1;
while(1) {
retval = CVode(cvode_mem, tout, y, &t, CV_NORMAL);
N_VCopyFromDevice_Cuda(y);
for (groupj = 0; groupj < ngroups; groupj += 10) {
printf("group %d: ", groupj);
PrintOutput(t, ydata[GROUPSIZE*groupj],
ydata[1+GROUPSIZE*groupj],
ydata[2+GROUPSIZE*groupj]);
}
if (check_retval(&retval, "CVode", 1)) break;
if (retval == CV_SUCCESS) {
iout++;
tout *= TMULT;
}
if (iout == NOUT) break;
}
/* Print some final statistics */
PrintFinalStats(cvode_mem, LS);
/* Free y and abstol vectors */
N_VDestroy(y);
N_VDestroy(abstol);
/* Free integrator memory */
CVodeFree(&cvode_mem);
/* Free the linear solver memory */
SUNLinSolFree(LS);
/* Free the matrix memory */
SUNMatDestroy(A);
SUNContext_Free(&sunctx);
/* Destroy the cuSOLVER and cuSPARSE handles */
cusparseDestroy(cusp_handle);
cusolverSpDestroy(cusol_handle);
return(0);
}
/*
*-------------------------------
* Functions called by the solver
*-------------------------------
*/
/* Right hand side function. This just launches the CUDA kernel
to do the actual computation. At the very least, doing this
saves moving the vector data in y and ydot to/from the device
every evaluation of f. */
static int f(realtype t, N_Vector y, N_Vector ydot, void *user_data)
{
UserData *udata;
realtype *ydata, *ydotdata;
udata = (UserData*) user_data;
ydata = N_VGetDeviceArrayPointer_Cuda(y);
ydotdata = N_VGetDeviceArrayPointer_Cuda(ydot);
unsigned block_size = 32;
unsigned grid_size = (udata->neq + block_size - 1) / block_size;
f_kernel<<<grid_size, block_size>>>(t, ydata, ydotdata, udata->neq, udata->ngroups);
cudaDeviceSynchronize();
cudaError_t cuerr = cudaGetLastError();
if (cuerr != cudaSuccess) {
fprintf(stderr,
">>> ERROR in f: cudaGetLastError returned %s\n",
cudaGetErrorName(cuerr));
return(-1);
}
return(0);
}
/* Right hand side function evalutation kernel. */
__global__
static void f_kernel(realtype t, realtype* ydata, realtype* ydotdata,
int neq, int ngroups)
{
realtype y1, y2, y3, yd1, yd3;
int i = blockIdx.x*blockDim.x + threadIdx.x;
int groupj = i*GROUPSIZE;
if (i < neq) {
y1 = ydata[groupj]; y2 = ydata[groupj+1]; y3 = ydata[groupj+2];
yd1 = ydotdata[groupj] = RCONST(-0.04)*y1 + RCONST(1.0e4)*y2*y3;
yd3 = ydotdata[groupj+2] = RCONST(3.0e7)*y2*y2;
ydotdata[groupj+1] = -yd1 - yd3;
}
}
/*
* Jacobian initialization routine. This sets the sparisty pattern of
* the blocks of the Jacobian J(t,y) = df/dy. This is performed on the CPU,
* and only occurs at the beginning of the simulation.
*/
static int JacInit(SUNMatrix J)
{
int rowptrs[4], colvals[9];
/* Zero out the Jacobian */
SUNMatZero(J);
/* there are 3 entries per row */
rowptrs[0] = 0;
rowptrs[1] = 3;
rowptrs[2] = 6;
rowptrs[3] = 9;
/* first row of block */
colvals[0] = 0;
colvals[1] = 1;
colvals[2] = 2;
/* second row of block */
colvals[3] = 0;
colvals[4] = 1;
colvals[5] = 2;
/* third row of block */
colvals[6] = 0;
colvals[7] = 1;
colvals[8] = 2;
/* copy rowptrs, colvals to the device */
SUNMatrix_cuSparse_CopyToDevice(J, NULL, rowptrs, colvals);
cudaDeviceSynchronize();
return(0);
}
/*
* Jacobian routine. Compute J(t,y) = df/dy.
* This is done on the GPU.
*/
static int Jac(realtype t, N_Vector y, N_Vector fy, SUNMatrix J,
void *user_data, N_Vector tmp1, N_Vector tmp2, N_Vector tmp3)
{
UserData *udata = (UserData*) user_data;
int nnzper;
realtype *Jdata, *ydata;
unsigned block_size, grid_size;
nnzper = GROUPSIZE * GROUPSIZE;
Jdata = SUNMatrix_cuSparse_Data(J);
ydata = N_VGetDeviceArrayPointer_Cuda(y);
block_size = 32;
grid_size = (udata->neq + block_size - 1) / block_size;
j_kernel<<<grid_size, block_size>>>(udata->ngroups, nnzper, ydata, Jdata);
cudaDeviceSynchronize();
cudaError_t cuerr = cudaGetLastError();
if (cuerr != cudaSuccess) {
fprintf(stderr,
">>> ERROR in Jac: cudaGetLastError returned %s\n",
cudaGetErrorName(cuerr));
return(-1);
}
return(0);
}
/* Jacobian evaluation GPU kernel */
__global__
static void j_kernel(int ngroups, int nnzper, realtype* ydata, realtype *Jdata)
{
int groupj;
realtype y2, y3;
for (groupj = blockIdx.x*blockDim.x + threadIdx.x;
groupj < ngroups;
groupj += blockDim.x * gridDim.x)
{
/* get y values */
y2 = ydata[GROUPSIZE*groupj + 1];
y3 = ydata[GROUPSIZE*groupj + 2];
/* first row of block */
Jdata[nnzper*groupj] = RCONST(-0.04);
Jdata[nnzper*groupj + 1] = RCONST(1.0e4)*y3;
Jdata[nnzper*groupj + 2] = RCONST(1.0e4)*y2;
/* second row of block */
Jdata[nnzper*groupj + 3] = RCONST(0.04);
Jdata[nnzper*groupj + 4] = (RCONST(-1.0e4)*y3) - (RCONST(6.0e7)*y2);
Jdata[nnzper*groupj + 5] = RCONST(-1.0e4)*y2;
/* third row of block */
Jdata[nnzper*groupj + 6] = ZERO;
Jdata[nnzper*groupj + 7] = RCONST(6.0e7)*y2;
Jdata[nnzper*groupj + 8] = ZERO;
}
}
/*
*-------------------------------
* Private helper functions
*-------------------------------
*/
static void PrintOutput(realtype t, realtype y1, realtype y2, realtype y3)
{
#if defined(SUNDIALS_EXTENDED_PRECISION)
printf("At t = %0.4Le y =%14.6Le %14.6Le %14.6Le\n", t, y1, y2, y3);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
printf("At t = %0.4e y =%14.6e %14.6e %14.6e\n", t, y1, y2, y3);
#else
printf("At t = %0.4e y =%14.6e %14.6e %14.6e\n", t, y1, y2, y3);
#endif
return;
}
/*
* Get and print some final statistics
*/
static void PrintFinalStats(void *cvode_mem, SUNLinearSolver LS)
{
long int nst, nfe, nsetups, nje, nni, ncfn, netf, nge;
int retval;
retval = CVodeGetNumSteps(cvode_mem, &nst);
check_retval(&retval, "CVodeGetNumSteps", 1);
retval = CVodeGetNumRhsEvals(cvode_mem, &nfe);
check_retval(&retval, "CVodeGetNumRhsEvals", 1);
retval = CVodeGetNumLinSolvSetups(cvode_mem, &nsetups);
check_retval(&retval, "CVodeGetNumLinSolvSetups", 1);
retval = CVodeGetNumErrTestFails(cvode_mem, &netf);
check_retval(&retval, "CVodeGetNumErrTestFails", 1);
retval = CVodeGetNumNonlinSolvIters(cvode_mem, &nni);
check_retval(&retval, "CVodeGetNumNonlinSolvIters", 1);
retval = CVodeGetNumNonlinSolvConvFails(cvode_mem, &ncfn);
check_retval(&retval, "CVodeGetNumNonlinSolvConvFails", 1);
retval = CVodeGetNumJacEvals(cvode_mem, &nje);
check_retval(&retval, "CVodeGetNumJacEvals", 1);
retval = CVodeGetNumGEvals(cvode_mem, &nge);
check_retval(&retval, "CVodeGetNumGEvals", 1);
printf("\nFinal Statistics:\n");
printf("nst = %-6ld nfe = %-6ld nsetups = %-6ld nje = %ld\n",
nst, nfe, nsetups, nje);
printf("nni = %-6ld ncfn = %-6ld netf = %-6ld nge = %ld\n",
nni, ncfn, netf, nge);
}
/*
* Check function return value...
* opt == 0 means SUNDIALS function allocates memory so check if
* returned NULL pointer
* opt == 1 means SUNDIALS function returns an integer value so check if
* retval < 0
* opt == 2 means function allocates memory so check if returned
* NULL pointer
*/
static int check_retval(void *returnvalue, const char *funcname, int opt)
{
int *retval;
/* Check if SUNDIALS function returned NULL pointer - no memory allocated */
if (opt == 0 && returnvalue == NULL) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return(1); }
/* Check if retval < 0 */
else if (opt == 1) {
retval = (int *) returnvalue;
if (*retval < 0) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with retval = %d\n\n",
funcname, *retval);
return(1); }}
/* Check if function returned NULL pointer - no memory allocated */
else if (opt == 2 && returnvalue == NULL) {
fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return(1); }
return(0);
}
|
2ce93bbe6066035b006f8c573f0d97a2f044e89d.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "common.h"
#include "thrust.h"
namespace StreamCompaction {
namespace Thrust {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
thrust::device_vector<int> dev_in(idata, idata + n);
thrust::device_vector<int> dev_out(odata, odata + n);
timer().startGpuTimer();
// TODO use `thrust::exclusive_scan`
// example: for device_vectors dv_in and dv_out:
// thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin());
thrust::exclusive_scan(dev_in.begin(), dev_in.end(), dev_out.begin());
timer().endGpuTimer();
thrust::copy(dev_out.begin(), dev_out.end(), odata);
}
}
}
| 2ce93bbe6066035b006f8c573f0d97a2f044e89d.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "common.h"
#include "thrust.h"
namespace StreamCompaction {
namespace Thrust {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
thrust::device_vector<int> dev_in(idata, idata + n);
thrust::device_vector<int> dev_out(odata, odata + n);
timer().startGpuTimer();
// TODO use `thrust::exclusive_scan`
// example: for device_vectors dv_in and dv_out:
// thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin());
thrust::exclusive_scan(dev_in.begin(), dev_in.end(), dev_out.begin());
timer().endGpuTimer();
thrust::copy(dev_out.begin(), dev_out.end(), odata);
}
}
}
|
26baa980cf6db80b740acd85553f3ff23234f07c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <gtest/gtest.h>
#include "barracuda/kernels/activations.cuh"
#include "barracuda/tensor.cuh"
namespace kerns = bcuda::gpu_kernels;
TEST(ReLUKernel1d, ValueZero) {
auto tensor = bcuda::Tensor<double>::Zeros({46, 2876});
hipLaunchKernelGGL(( kerns::ReLUInplace), dim3(1), dim3(1), 0, 0, tensor.Size(), tensor.Data());
ASSERT_EQ(hipDeviceSynchronize(), hipError_t::hipSuccess);
for (auto i : tensor) {
ASSERT_FLOAT_EQ(i, 0.0f);
}
}
TEST(ReLUKernel1d, ValueOne) {
auto tensor = bcuda::Tensor<float>::Ones({48, 383898});
hipLaunchKernelGGL(( kerns::ReLUInplace), dim3(1), dim3(1), 0, 0, tensor.Size(), tensor.Data());
ASSERT_EQ(hipDeviceSynchronize(), hipError_t::hipSuccess);
for (auto i : tensor) {
ASSERT_FLOAT_EQ(i, 1.0f);
}
}
TEST(ReLUKernel1d, NegativeValue) {
auto tensor = bcuda::Tensor<double>({48, 383898});
hipLaunchKernelGGL(( kerns::LinearFill), dim3(1), dim3(1), 0, 0, tensor.Size(), tensor.Data(), -1.0);
ASSERT_EQ(hipDeviceSynchronize(), hipError_t::hipSuccess);
hipLaunchKernelGGL(( kerns::ReLUInplace), dim3(1), dim3(1), 0, 0, tensor.Size(), tensor.Data());
ASSERT_EQ(hipDeviceSynchronize(), hipError_t::hipSuccess);
for (auto i : tensor) {
ASSERT_FLOAT_EQ(i, 0.0f);
}
}
| 26baa980cf6db80b740acd85553f3ff23234f07c.cu |
#include <gtest/gtest.h>
#include "barracuda/kernels/activations.cuh"
#include "barracuda/tensor.cuh"
namespace kerns = bcuda::gpu_kernels;
TEST(ReLUKernel1d, ValueZero) {
auto tensor = bcuda::Tensor<double>::Zeros({46, 2876});
kerns::ReLUInplace<<<1, 1>>>(tensor.Size(), tensor.Data());
ASSERT_EQ(cudaDeviceSynchronize(), cudaError_t::cudaSuccess);
for (auto i : tensor) {
ASSERT_FLOAT_EQ(i, 0.0f);
}
}
TEST(ReLUKernel1d, ValueOne) {
auto tensor = bcuda::Tensor<float>::Ones({48, 383898});
kerns::ReLUInplace<<<1, 1>>>(tensor.Size(), tensor.Data());
ASSERT_EQ(cudaDeviceSynchronize(), cudaError_t::cudaSuccess);
for (auto i : tensor) {
ASSERT_FLOAT_EQ(i, 1.0f);
}
}
TEST(ReLUKernel1d, NegativeValue) {
auto tensor = bcuda::Tensor<double>({48, 383898});
kerns::LinearFill<<<1, 1>>>(tensor.Size(), tensor.Data(), -1.0);
ASSERT_EQ(cudaDeviceSynchronize(), cudaError_t::cudaSuccess);
kerns::ReLUInplace<<<1, 1>>>(tensor.Size(), tensor.Data());
ASSERT_EQ(cudaDeviceSynchronize(), cudaError_t::cudaSuccess);
for (auto i : tensor) {
ASSERT_FLOAT_EQ(i, 0.0f);
}
}
|
6168b6ab8fa7a9aa04625b80f6ce5ffbaeefeb86.hip | // !!! This is a file automatically generated by hipify!!!
/*******************************************************
* Copyright (c) 2014, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
#include <af/dim4.hpp>
#include <af/defines.h>
#include <ArrayInfo.hpp>
#include <Array.hpp>
#include <regions.hpp>
#include <kernel/regions.hpp>
#include <err_cuda.hpp>
using af::dim4;
namespace cuda
{
template<typename T>
Array<T> regions(const Array<char> &in, af_connectivity connectivity)
{
ARG_ASSERT(2, (connectivity==AF_CONNECTIVITY_4 || connectivity==AF_CONNECTIVITY_8));
const dim4 dims = in.dims();
Array<T> out = createEmptyArray<T>(dims);
// Create bindless texture object for the equiv map.
hipTextureObject_t tex = 0;
// FIXME: Currently disabled, only supported on capaibility >= 3.0
//if (compute >= 3.0) {
// hipResourceDesc resDesc;
// memset(&resDesc, 0, sizeof(resDesc));
// resDesc.resType = hipResourceTypeLinear;
// resDesc.res.linear.devPtr = out->get();
// resDesc.res.linear.desc.f = hipChannelFormatKindFloat;
// resDesc.res.linear.desc.x = 32; // bits per channel
// resDesc.res.linear.sizeInBytes = dims[0] * dims[1] * sizeof(float);
// hipTextureDesc texDesc;
// memset(&texDesc, 0, sizeof(texDesc));
// texDesc.readMode = hipReadModeElementType;
// CUDA_CHECK(hipCreateTextureObject(&tex, &resDesc, &texDesc, NULL));
//}
switch(connectivity) {
case AF_CONNECTIVITY_4:
::regions<T, false, 2>(out, in, tex);
break;
case AF_CONNECTIVITY_8:
::regions<T, true, 2>(out, in, tex);
break;
}
return out;
}
#define INSTANTIATE(T)\
template Array<T> regions<T>(const Array<char> &in, af_connectivity connectivity);
INSTANTIATE(float )
INSTANTIATE(double)
INSTANTIATE(int )
INSTANTIATE(uint )
INSTANTIATE(short )
INSTANTIATE(ushort)
}
| 6168b6ab8fa7a9aa04625b80f6ce5ffbaeefeb86.cu | /*******************************************************
* Copyright (c) 2014, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
#include <af/dim4.hpp>
#include <af/defines.h>
#include <ArrayInfo.hpp>
#include <Array.hpp>
#include <regions.hpp>
#include <kernel/regions.hpp>
#include <err_cuda.hpp>
using af::dim4;
namespace cuda
{
template<typename T>
Array<T> regions(const Array<char> &in, af_connectivity connectivity)
{
ARG_ASSERT(2, (connectivity==AF_CONNECTIVITY_4 || connectivity==AF_CONNECTIVITY_8));
const dim4 dims = in.dims();
Array<T> out = createEmptyArray<T>(dims);
// Create bindless texture object for the equiv map.
cudaTextureObject_t tex = 0;
// FIXME: Currently disabled, only supported on capaibility >= 3.0
//if (compute >= 3.0) {
// cudaResourceDesc resDesc;
// memset(&resDesc, 0, sizeof(resDesc));
// resDesc.resType = cudaResourceTypeLinear;
// resDesc.res.linear.devPtr = out->get();
// resDesc.res.linear.desc.f = cudaChannelFormatKindFloat;
// resDesc.res.linear.desc.x = 32; // bits per channel
// resDesc.res.linear.sizeInBytes = dims[0] * dims[1] * sizeof(float);
// cudaTextureDesc texDesc;
// memset(&texDesc, 0, sizeof(texDesc));
// texDesc.readMode = cudaReadModeElementType;
// CUDA_CHECK(cudaCreateTextureObject(&tex, &resDesc, &texDesc, NULL));
//}
switch(connectivity) {
case AF_CONNECTIVITY_4:
::regions<T, false, 2>(out, in, tex);
break;
case AF_CONNECTIVITY_8:
::regions<T, true, 2>(out, in, tex);
break;
}
return out;
}
#define INSTANTIATE(T)\
template Array<T> regions<T>(const Array<char> &in, af_connectivity connectivity);
INSTANTIATE(float )
INSTANTIATE(double)
INSTANTIATE(int )
INSTANTIATE(uint )
INSTANTIATE(short )
INSTANTIATE(ushort)
}
|
1fd688d4141339bb43181800fbc129c223ad4561.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* jacobi1D.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Will Killian <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <unistd.h>
#include <time.h>
#include <sys/time.h>
#include <string.h>
#include <stdlib.h>
#include <stdarg.h>
#include <math.h>
#define POLYBENCH_TIME 1
#include "../../../utilities/remapping.h"
#include "../../../utilities/remapping_mode.h"
#include "jacobi1D.cuh"
#include <polybench.h>
#include <polybenchUtilFuncts.h>
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
#define RUN_ON_CPU
void init_array(int n, DATA_TYPE POLYBENCH_1D(A,N,n), DATA_TYPE POLYBENCH_1D(B,N,n))
{
int i;
for (i = 0; i < n; i++)
{
A[i] = ((DATA_TYPE) 4 * i + 10) / N;
B[i] = ((DATA_TYPE) 7 * i + 11) / N;
}
}
void runJacobi1DCpu(int tsteps, int n, DATA_TYPE POLYBENCH_1D(A,N,n), DATA_TYPE POLYBENCH_1D(B,N,n))
{
for (int t = 0; t < _PB_TSTEPS; t++)
{
for (int i = 2; i < _PB_N - 1; i++)
{
B[i] = 0.33333 * (A[i-1] + A[i] + A[i + 1]);
}
for (int j = 2; j < _PB_N - 1; j++)
{
A[j] = B[j];
}
}
}
__global__ void runJacobiCUDA_kernel1(int n, DATA_TYPE* A, DATA_TYPE* B)
{
int i = remappingBlockIDx(blockIdx.x, BLOCKXMODE) * blockDim.x + remappingThreadIDx(threadIdx.x, THREADXMODE);
if ((i > 1) && (i < (_PB_N-1)))
{
B[i] = 0.33333f * (A[i-1] + A[i] + A[i + 1]);
}
}
__global__ void runJacobiCUDA_kernel2(int n, DATA_TYPE* A, DATA_TYPE* B)
{
int j = remappingBlockIDx(blockIdx.x, BLOCKXMODE) * blockDim.x + remappingThreadIDx(threadIdx.x, THREADXMODE);
if ((j > 1) && (j < (_PB_N-1)))
{
A[j] = B[j];
}
}
void compareResults(int n, DATA_TYPE POLYBENCH_1D(a,N,n), DATA_TYPE POLYBENCH_1D(a_outputFromGpu,N,n), DATA_TYPE POLYBENCH_1D(b,N,n), DATA_TYPE POLYBENCH_1D(b_outputFromGpu,N,n))
{
int i, fail;
fail = 0;
// Compare a and c
for (i=0; i < n; i++)
{
if (percentDiff(a[i], a_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
for (i=0; i < n; i++)
{
if (percentDiff(b[i], b_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
// Print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void runJacobi1DCUDA(int tsteps, int n, DATA_TYPE POLYBENCH_1D(A,N,n), DATA_TYPE POLYBENCH_1D(B,N,n), DATA_TYPE POLYBENCH_1D(A_outputFromGpu,N,n),
DATA_TYPE POLYBENCH_1D(B_outputFromGpu,N,n))
{
DATA_TYPE* Agpu;
DATA_TYPE* Bgpu;
hipMalloc(&Agpu, N * sizeof(DATA_TYPE));
hipMalloc(&Bgpu, N * sizeof(DATA_TYPE));
hipMemcpy(Agpu, A, N * sizeof(DATA_TYPE), hipMemcpyHostToDevice);
hipMemcpy(Bgpu, B, N * sizeof(DATA_TYPE), hipMemcpyHostToDevice);
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid((unsigned int)ceil( ((float)N) / ((float)block.x) ), 1);
/* Start timer. */
polybench_start_instruments;
for (int t = 0; t < _PB_TSTEPS ; t++)
{
hipLaunchKernelGGL(( runJacobiCUDA_kernel1) , dim3(grid), dim3(block) , 0, 0, n, Agpu, Bgpu);
hipDeviceSynchronize();
hipLaunchKernelGGL(( runJacobiCUDA_kernel2) , dim3(grid), dim3(block), 0, 0, n, Agpu, Bgpu);
hipDeviceSynchronize();
}
/* Stop and print timer. */
printf("GPU Time in seconds:\n");
polybench_stop_instruments;
polybench_print_instruments;
hipMemcpy(A_outputFromGpu, Agpu, sizeof(DATA_TYPE) * N, hipMemcpyDeviceToHost);
hipMemcpy(B_outputFromGpu, Bgpu, sizeof(DATA_TYPE) * N, hipMemcpyDeviceToHost);
hipFree(Agpu);
hipFree(Bgpu);
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int n,
DATA_TYPE POLYBENCH_1D(A,N,n))
{
int i;
for (i = 0; i < n; i++)
{
fprintf(stderr, DATA_PRINTF_MODIFIER, A[i]);
if (i % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int n = N;
int tsteps = TSTEPS;
POLYBENCH_1D_ARRAY_DECL(a,DATA_TYPE,N,n);
POLYBENCH_1D_ARRAY_DECL(b,DATA_TYPE,N,n);
POLYBENCH_1D_ARRAY_DECL(a_outputFromGpu,DATA_TYPE,N,n);
POLYBENCH_1D_ARRAY_DECL(b_outputFromGpu,DATA_TYPE,N,n);
init_array(n, POLYBENCH_ARRAY(a), POLYBENCH_ARRAY(b));
runJacobi1DCUDA(tsteps, n, POLYBENCH_ARRAY(a), POLYBENCH_ARRAY(b), POLYBENCH_ARRAY(a_outputFromGpu), POLYBENCH_ARRAY(b_outputFromGpu));
#ifdef RUN_ON_CPU
/* Start timer. */
polybench_start_instruments;
runJacobi1DCpu(tsteps, n, POLYBENCH_ARRAY(a), POLYBENCH_ARRAY(b));
/* Stop and print timer. */
printf("CPU Time in seconds:\n");
polybench_stop_instruments;
polybench_print_instruments;
compareResults(n, POLYBENCH_ARRAY(a), POLYBENCH_ARRAY(a_outputFromGpu), POLYBENCH_ARRAY(b), POLYBENCH_ARRAY(b_outputFromGpu));
#else //prevent dead code elimination
polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(a_outputFromGpu)));
#endif //RUN_ON_CPU
POLYBENCH_FREE_ARRAY(a);
POLYBENCH_FREE_ARRAY(a_outputFromGpu);
POLYBENCH_FREE_ARRAY(b);
POLYBENCH_FREE_ARRAY(b_outputFromGpu);
return 0;
}
#include <polybench.c>
| 1fd688d4141339bb43181800fbc129c223ad4561.cu | /**
* jacobi1D.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Will Killian <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <unistd.h>
#include <time.h>
#include <sys/time.h>
#include <string.h>
#include <stdlib.h>
#include <stdarg.h>
#include <math.h>
#define POLYBENCH_TIME 1
#include "../../../utilities/remapping.h"
#include "../../../utilities/remapping_mode.h"
#include "jacobi1D.cuh"
#include <polybench.h>
#include <polybenchUtilFuncts.h>
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
#define RUN_ON_CPU
void init_array(int n, DATA_TYPE POLYBENCH_1D(A,N,n), DATA_TYPE POLYBENCH_1D(B,N,n))
{
int i;
for (i = 0; i < n; i++)
{
A[i] = ((DATA_TYPE) 4 * i + 10) / N;
B[i] = ((DATA_TYPE) 7 * i + 11) / N;
}
}
void runJacobi1DCpu(int tsteps, int n, DATA_TYPE POLYBENCH_1D(A,N,n), DATA_TYPE POLYBENCH_1D(B,N,n))
{
for (int t = 0; t < _PB_TSTEPS; t++)
{
for (int i = 2; i < _PB_N - 1; i++)
{
B[i] = 0.33333 * (A[i-1] + A[i] + A[i + 1]);
}
for (int j = 2; j < _PB_N - 1; j++)
{
A[j] = B[j];
}
}
}
__global__ void runJacobiCUDA_kernel1(int n, DATA_TYPE* A, DATA_TYPE* B)
{
int i = remappingBlockIDx(blockIdx.x, BLOCKXMODE) * blockDim.x + remappingThreadIDx(threadIdx.x, THREADXMODE);
if ((i > 1) && (i < (_PB_N-1)))
{
B[i] = 0.33333f * (A[i-1] + A[i] + A[i + 1]);
}
}
__global__ void runJacobiCUDA_kernel2(int n, DATA_TYPE* A, DATA_TYPE* B)
{
int j = remappingBlockIDx(blockIdx.x, BLOCKXMODE) * blockDim.x + remappingThreadIDx(threadIdx.x, THREADXMODE);
if ((j > 1) && (j < (_PB_N-1)))
{
A[j] = B[j];
}
}
void compareResults(int n, DATA_TYPE POLYBENCH_1D(a,N,n), DATA_TYPE POLYBENCH_1D(a_outputFromGpu,N,n), DATA_TYPE POLYBENCH_1D(b,N,n), DATA_TYPE POLYBENCH_1D(b_outputFromGpu,N,n))
{
int i, fail;
fail = 0;
// Compare a and c
for (i=0; i < n; i++)
{
if (percentDiff(a[i], a_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
for (i=0; i < n; i++)
{
if (percentDiff(b[i], b_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
// Print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void runJacobi1DCUDA(int tsteps, int n, DATA_TYPE POLYBENCH_1D(A,N,n), DATA_TYPE POLYBENCH_1D(B,N,n), DATA_TYPE POLYBENCH_1D(A_outputFromGpu,N,n),
DATA_TYPE POLYBENCH_1D(B_outputFromGpu,N,n))
{
DATA_TYPE* Agpu;
DATA_TYPE* Bgpu;
cudaMalloc(&Agpu, N * sizeof(DATA_TYPE));
cudaMalloc(&Bgpu, N * sizeof(DATA_TYPE));
cudaMemcpy(Agpu, A, N * sizeof(DATA_TYPE), cudaMemcpyHostToDevice);
cudaMemcpy(Bgpu, B, N * sizeof(DATA_TYPE), cudaMemcpyHostToDevice);
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid((unsigned int)ceil( ((float)N) / ((float)block.x) ), 1);
/* Start timer. */
polybench_start_instruments;
for (int t = 0; t < _PB_TSTEPS ; t++)
{
runJacobiCUDA_kernel1 <<< grid, block >>> (n, Agpu, Bgpu);
cudaThreadSynchronize();
runJacobiCUDA_kernel2 <<< grid, block>>> (n, Agpu, Bgpu);
cudaThreadSynchronize();
}
/* Stop and print timer. */
printf("GPU Time in seconds:\n");
polybench_stop_instruments;
polybench_print_instruments;
cudaMemcpy(A_outputFromGpu, Agpu, sizeof(DATA_TYPE) * N, cudaMemcpyDeviceToHost);
cudaMemcpy(B_outputFromGpu, Bgpu, sizeof(DATA_TYPE) * N, cudaMemcpyDeviceToHost);
cudaFree(Agpu);
cudaFree(Bgpu);
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int n,
DATA_TYPE POLYBENCH_1D(A,N,n))
{
int i;
for (i = 0; i < n; i++)
{
fprintf(stderr, DATA_PRINTF_MODIFIER, A[i]);
if (i % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int n = N;
int tsteps = TSTEPS;
POLYBENCH_1D_ARRAY_DECL(a,DATA_TYPE,N,n);
POLYBENCH_1D_ARRAY_DECL(b,DATA_TYPE,N,n);
POLYBENCH_1D_ARRAY_DECL(a_outputFromGpu,DATA_TYPE,N,n);
POLYBENCH_1D_ARRAY_DECL(b_outputFromGpu,DATA_TYPE,N,n);
init_array(n, POLYBENCH_ARRAY(a), POLYBENCH_ARRAY(b));
runJacobi1DCUDA(tsteps, n, POLYBENCH_ARRAY(a), POLYBENCH_ARRAY(b), POLYBENCH_ARRAY(a_outputFromGpu), POLYBENCH_ARRAY(b_outputFromGpu));
#ifdef RUN_ON_CPU
/* Start timer. */
polybench_start_instruments;
runJacobi1DCpu(tsteps, n, POLYBENCH_ARRAY(a), POLYBENCH_ARRAY(b));
/* Stop and print timer. */
printf("CPU Time in seconds:\n");
polybench_stop_instruments;
polybench_print_instruments;
compareResults(n, POLYBENCH_ARRAY(a), POLYBENCH_ARRAY(a_outputFromGpu), POLYBENCH_ARRAY(b), POLYBENCH_ARRAY(b_outputFromGpu));
#else //prevent dead code elimination
polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(a_outputFromGpu)));
#endif //RUN_ON_CPU
POLYBENCH_FREE_ARRAY(a);
POLYBENCH_FREE_ARRAY(a_outputFromGpu);
POLYBENCH_FREE_ARRAY(b);
POLYBENCH_FREE_ARRAY(b_outputFromGpu);
return 0;
}
#include <polybench.c>
|
1170f2726615ddf713d33cbaedf7227712845b05.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include "caffe/common.hpp"
#include "caffe/util/im2col.hpp"
namespace caffe {
////////////////////////////////////////////////////////////////////////////////
// MODIFICATION BEGIN
////////////////////////////////////////////////////////////////////////////////
template <typename Dtype>
__global__ void im2col_gpu_mod_kernel(const int n, const Dtype* I,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
Dtype* C,
const int set_offset, const int set_size) {
CUDA_KERNEL_LOOP(index, n) {
const int C_x = (index / set_size) * kernel_h * kernel_w;
const int C_y = index % set_size;
const int C_y_ = set_offset + C_y;
const int I_x = C_y_/width_col * stride_h - pad_h;
const int I_y = C_y_%width_col * stride_w - pad_w;
const int I_c = index / set_size;
Dtype* C_ptr = C + C_x * set_size + C_y;
const Dtype* I_ptr = I + (I_c*height+I_x)*width + I_y;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
int I_x_ = I_x + i * dilation_h;
int I_y_ = I_y + j * dilation_w;
*C_ptr =
(I_x_ >= 0 && I_y_ >= 0 && I_x_ < height && I_y_ < width) ?
I_ptr[i * dilation_h * width + j * dilation_w] : 0;
C_ptr+=set_size;
}
}
}
}
template <typename Dtype>
int im2col_gpu_mod(const Dtype* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
Dtype* data_col, const int set_idx, const int set_size ) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad_h -
(dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w -
(dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
const int offset = set_idx * set_size ;
int size=0;
if(height_col * width_col - offset < set_size){
size = height_col * width_col - offset;
}else{
size = set_size;
}
int num_kernels = channels * size;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( im2col_gpu_mod_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, dilation_h, dilation_w, height_col,
width_col, data_col, offset, size);
CUDA_POST_KERNEL_CHECK;
return size;
}
// Explicit instantiation
template int im2col_gpu_mod<float>(const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, float* data_col, const int block_offset, const int block_size);
template int im2col_gpu_mod<double>(const double* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, double* data_col, const int block_offset, const int block_size);
/*
template <typename Dtype>
__global__ void im2col_gpu_mod_kernel(const int n, const Dtype* I,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
Dtype* C,
const int set_offset, const int set_size) {
CUDA_KERNEL_LOOP(index, n) {
const int i = index%(kernel_w*kernel_h)/kernel_w;
const int j = index%kernel_w;
const int idx = index/(kernel_w*kernel_h);
const int C_x = index / set_size;
const int C_y = idx % set_size;
const int C_y_ = set_offset + C_y;
const int I_x = C_y_/width_col * stride_h - pad_h;
const int I_y = C_y_%width_col * stride_w - pad_w;
const int I_c = idx / set_size;
int I_x_ = I_x + i * dilation_h;
int I_y_ = I_y + j * dilation_w;
C[C_x * set_size + C_y] =
(I_x_ >= 0 && I_y_ >= 0 && I_x_ < height && I_y_ < width) ?
I[(I_c*height+I_x_)*width + I_y_] : 0;
}
}
template <typename Dtype>
int im2col_gpu_mod(const Dtype* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
Dtype* data_col, const int set_idx, const int set_size ) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad_h -
(dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w -
(dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
const int offset = set_idx * set_size ;
int size=0;
if(height_col * width_col - offset < set_size){
size = height_col * width_col - offset;
}else{
size = set_size;
}
int num_kernels = channels * kernel_h * kernel_w * size;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( im2col_gpu_mod_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, dilation_h, dilation_w, height_col,
width_col, data_col, offset, size);
CUDA_POST_KERNEL_CHECK;
return size;
}
// Explicit instantiation
template int im2col_gpu_mod<float>(const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, float* data_col, const int block_offset, const int block_size);
template int im2col_gpu_mod<double>(const double* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, double* data_col, const int block_offset, const int block_size);
*/
////////////////////////////////////////////////////////////////////////////////
// MODIFICATION END
////////////////////////////////////////////////////////////////////////////////
template <typename Dtype>
__global__ void im2col_gpu_kernel(const int n, const Dtype* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
Dtype* data_col) {
CUDA_KERNEL_LOOP(index, n) {
const int h_index = index / width_col;
const int h_col = h_index % height_col;
const int w_col = index % width_col;
const int c_im = h_index / height_col;
const int c_col = c_im * kernel_h * kernel_w;
const int h_offset = h_col * stride_h - pad_h;
const int w_offset = w_col * stride_w - pad_w;
Dtype* data_col_ptr = data_col;
data_col_ptr += (c_col * height_col + h_col) * width_col + w_col;
const Dtype* data_im_ptr = data_im;
data_im_ptr += (c_im * height + h_offset) * width + w_offset;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
int h_im = h_offset + i * dilation_h;
int w_im = w_offset + j * dilation_w;
*data_col_ptr =
(h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ?
data_im_ptr[i * dilation_h * width + j * dilation_w] : 0;
data_col_ptr += height_col * width_col;
}
}
}
}
template <typename Dtype>
void im2col_gpu(const Dtype* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
Dtype* data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad_h -
(dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w -
(dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( im2col_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, dilation_h, dilation_w, height_col,
width_col, data_col);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_gpu<float>(const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, float* data_col);
template void im2col_gpu<double>(const double* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, double* data_col);
template <typename Dtype, int num_axes>
__global__ void im2col_nd_gpu_kernel(const int n, const Dtype* data_im,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_col) {
int d_temp[num_axes]; // NOLINT(runtime/arrays)
int d_iter[num_axes]; // NOLINT(runtime/arrays)
__shared__ int shared_dilation[num_axes];
__shared__ int shared_kernel_shape[num_axes];
__shared__ int shared_pad[num_axes];
__shared__ int shared_stride[num_axes];
__shared__ int shared_col_shape[num_axes + 1];
__shared__ int shared_im_shape[num_axes + 1];
if (threadIdx.x < num_axes) {
shared_dilation[threadIdx.x] = dilation[threadIdx.x];
shared_kernel_shape[threadIdx.x] = kernel_shape[threadIdx.x];
shared_pad[threadIdx.x] = pad[threadIdx.x];
shared_stride[threadIdx.x] = stride[threadIdx.x];
}
if (threadIdx.x < num_axes + 1) {
shared_col_shape[threadIdx.x] = col_shape[threadIdx.x];
shared_im_shape[threadIdx.x] = im_shape[threadIdx.x];
}
__syncthreads();
int i;
CUDA_KERNEL_LOOP(index, n) {
// Initialize channel_in, computed in the loop below, with intermediate
// computations used to compute the spatial indices.
int channel_in = index;
int channel_out = 1;
for (i = num_axes - 1; i >= 0; --i) {
d_temp[i] = channel_in % shared_col_shape[i + 1];
channel_in /= shared_col_shape[i + 1];
channel_out *= shared_kernel_shape[i];
}
channel_out *= channel_in;
int data_col_inc = 1;
for (i = 0; i < num_axes; ++i) {
channel_out *= shared_col_shape[i + 1];
channel_out += d_temp[i];
d_temp[i] = d_temp[i] * shared_stride[i] - shared_pad[i];
channel_in *= shared_im_shape[i + 1];
channel_in += d_temp[i];
data_col_inc *= shared_col_shape[i + 1];
d_iter[i] = 0;
}
Dtype* data_col_ptr = data_col + channel_out;
const Dtype* data_im_ptr = data_im + channel_in;
bool incremented;
do {
bool in_range = true;
for (i = 0; i < num_axes; ++i) {
const int d_iter_im = d_iter[i] * shared_dilation[i] + d_temp[i];
in_range &= d_iter_im >= 0 && d_iter_im < shared_im_shape[i + 1];
if (!in_range) { break; }
}
if (in_range) {
int data_im_offset = d_iter[0] * shared_dilation[0];
for (i = 1; i < num_axes; ++i) {
data_im_offset *= shared_im_shape[i + 1];
data_im_offset += d_iter[i] * shared_dilation[i];
}
*data_col_ptr = data_im_ptr[data_im_offset];
} else {
*data_col_ptr = 0;
}
data_col_ptr += data_col_inc;
incremented = false;
for (i = num_axes - 1; i >= 0; --i) {
const int d_max = shared_kernel_shape[i];
if (d_iter[i] == d_max - 1) {
d_iter[i] = 0;
} else { // d_iter[i] < d_max - 1
++d_iter[i];
incremented = true;
break;
}
} // for (int i = num_axes - 1; i >= 0; --i)
} while (incremented); // do
} // CUDA_KERNEL_LOOP(index, n)
}
template <typename Dtype>
void im2col_nd_gpu(const Dtype* data_im, const int num_spatial_axes,
const int num_kernels, const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_col) {
// num_axes should be smaller than block size
DCHECK_LT(num_spatial_axes, CAFFE_CUDA_NUM_THREADS);
switch (num_spatial_axes) {
case 1:
im2col_nd_gpu_kernel<Dtype, 1> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 2:
im2col_nd_gpu_kernel<Dtype, 2> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 3:
im2col_nd_gpu_kernel<Dtype, 3> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 4:
im2col_nd_gpu_kernel<Dtype, 4> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 5:
im2col_nd_gpu_kernel<Dtype, 5> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 6:
im2col_nd_gpu_kernel<Dtype, 6> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 7:
im2col_nd_gpu_kernel<Dtype, 7> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 8:
im2col_nd_gpu_kernel<Dtype, 8> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 9:
im2col_nd_gpu_kernel<Dtype, 9> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 10:
im2col_nd_gpu_kernel<Dtype, 10> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
default:
LOG(FATAL) << "im2col_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_nd_gpu<float>(const float* data_im,
const int num_spatial_axes, const int col_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, float* data_col);
template void im2col_nd_gpu<double>(const double* data_im,
const int num_spatial_axes, const int col_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, double* data_col);
template <typename Dtype>
__global__ void col2im_gpu_kernel(const int n, const Dtype* data_col,
const int height, const int width, const int channels,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
Dtype* data_im) {
CUDA_KERNEL_LOOP(index, n) {
Dtype val = 0;
const int w_im = index % width + pad_w;
const int h_im = (index / width) % height + pad_h;
const int c_im = index / (width * height);
int kernel_extent_w = (kernel_w - 1) * dilation_w + 1;
int kernel_extent_h = (kernel_h - 1) * dilation_h + 1;
// compute the start and end of the output
const int w_col_start =
(w_im < kernel_extent_w) ? 0 : (w_im - kernel_extent_w) / stride_w + 1;
const int w_col_end = min(w_im / stride_w + 1, width_col);
const int h_col_start =
(h_im < kernel_extent_h) ? 0 : (h_im - kernel_extent_h) / stride_h + 1;
const int h_col_end = min(h_im / stride_h + 1, height_col);
// TODO: use LCM of stride and dilation to avoid unnecessary loops
for (int h_col = h_col_start; h_col < h_col_end; h_col += 1) {
for (int w_col = w_col_start; w_col < w_col_end; w_col += 1) {
int h_k = (h_im - h_col * stride_h);
int w_k = (w_im - w_col * stride_w);
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
int data_col_index = (((c_im * kernel_h + h_k) * kernel_w + w_k) *
height_col + h_col) * width_col + w_col;
val += data_col[data_col_index];
}
}
}
data_im[index] = val;
}
}
template <typename Dtype>
void col2im_gpu(const Dtype* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
Dtype* data_im) {
int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) /
stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) /
stride_w + 1;
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( col2im_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_col, height, width, channels, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
height_col, width_col, data_im);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_gpu<float>(const float* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
float* data_im);
template void col2im_gpu<double>(const double* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
double* data_im);
template <typename Dtype, int num_axes>
__global__ void col2im_nd_gpu_kernel(const int n, const Dtype* data_col,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_im) {
int d_im[num_axes]; // NOLINT(runtime/arrays)
int d_col_iter[num_axes]; // NOLINT(runtime/arrays)
int d_col_start[num_axes]; // NOLINT(runtime/arrays)
int d_col_end[num_axes]; // NOLINT(runtime/arrays)
__shared__ int shared_dilation[num_axes];
__shared__ int shared_kernel_shape[num_axes];
__shared__ int shared_pad[num_axes];
__shared__ int shared_stride[num_axes];
__shared__ int shared_col_shape[num_axes + 1];
__shared__ int shared_im_shape[num_axes + 1];
if (threadIdx.x < num_axes) {
shared_dilation[threadIdx.x] = dilation[threadIdx.x];
shared_kernel_shape[threadIdx.x] = kernel_shape[threadIdx.x];
shared_pad[threadIdx.x] = pad[threadIdx.x];
shared_stride[threadIdx.x] = stride[threadIdx.x];
}
if (threadIdx.x < num_axes + 1) {
shared_col_shape[threadIdx.x] = col_shape[threadIdx.x];
shared_im_shape[threadIdx.x] = im_shape[threadIdx.x];
}
__syncthreads();
CUDA_KERNEL_LOOP(index, n) {
// Initialize channel_in, computed in the loop below, with intermediate
// computations used to compute the spatial indices.
int c_im = index;
// Calculate d_im (image dimensions).
for (int i = num_axes - 1; i >= 0; --i) {
d_im[i] = c_im % shared_im_shape[i + 1] + shared_pad[i];
c_im /= shared_im_shape[i + 1];
}
// Calculate col start/end indices.
bool done = false;
for (int i = 0; i < num_axes; ++i) {
const int kernel_extent =
shared_dilation[i] * (shared_kernel_shape[i] - 1) + 1;
d_col_start[i] = d_col_iter[i] =
(d_im[i] < kernel_extent) ? 0 :
(d_im[i] - kernel_extent) / shared_stride[i] + 1;
d_col_end[i] =
min(d_im[i] / shared_stride[i] + 1, shared_col_shape[i + 1]);
if (d_col_start[i] >= d_col_end[i]) {
// Skip computation if the dimension is 0 at any spatial axis --
// final val will be 0.
data_im[index] = 0;
done = true;
break; // for (int i = 0; i < num_axes; ++i)
}
}
if (done) {
continue; // CUDA_KERNEL_LOOP(index, n)
}
// Loop over the col to compute the output val.
Dtype val = 0;
bool incremented = true;
bool skip = false;
do {
// Compute the final offset.
int final_offset = 0;
int kernel_shape_prod = 1;
int kernel_index;
for (int i = num_axes - 1; i >= 0; --i) {
kernel_index = d_im[i] - d_col_iter[i] * shared_stride[i];
if (kernel_index % shared_dilation[i]) {
skip = true;
break;
} else {
kernel_index /= shared_dilation[i];
final_offset += kernel_index * kernel_shape_prod;
kernel_shape_prod *= shared_kernel_shape[i];
}
}
if (!skip) {
final_offset += kernel_shape_prod * c_im;
for (int i = 0; i < num_axes; ++i) {
final_offset *= shared_col_shape[i + 1];
final_offset += d_col_iter[i];
}
val += data_col[final_offset];
}
skip = false;
incremented = false;
for (int i = num_axes - 1; i >= 0; --i) {
const int d_max = d_col_end[i];
if (d_col_iter[i] == d_max - 1) {
d_col_iter[i] = d_col_start[i];
} else { // d_col_iter[i] < d_max - 1
++d_col_iter[i];
incremented = true;
break; // for (int i = num_axes - 1; i >= 0; --i)
}
} // for (int i = num_axes - 1; i >= 0; --i)
} while (incremented);
data_im[index] = val;
} // CUDA_KERNEL_LOOP(index, n)
}
template <typename Dtype>
void col2im_nd_gpu(const Dtype* data_col, const int num_spatial_axes,
const int im_size, const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_im) {
// num_axes should be smaller than block size
DCHECK_LT(num_spatial_axes, CAFFE_CUDA_NUM_THREADS);
switch (num_spatial_axes) {
case 1:
col2im_nd_gpu_kernel<Dtype, 1> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 2:
col2im_nd_gpu_kernel<Dtype, 2> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 3:
col2im_nd_gpu_kernel<Dtype, 3> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 4:
col2im_nd_gpu_kernel<Dtype, 4> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 5:
col2im_nd_gpu_kernel<Dtype, 5> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 6:
col2im_nd_gpu_kernel<Dtype, 6> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 7:
col2im_nd_gpu_kernel<Dtype, 7> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 8:
col2im_nd_gpu_kernel<Dtype, 8> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 9:
col2im_nd_gpu_kernel<Dtype, 9> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 10:
col2im_nd_gpu_kernel<Dtype, 10> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
default:
LOG(FATAL) << "col2im_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_nd_gpu<float>(const float* data_col,
const int num_spatial_axes, const int im_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, float* data_im);
template void col2im_nd_gpu<double>(const double* data_col,
const int num_spatial_axes, const int im_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, double* data_im);
} // namespace caffe
| 1170f2726615ddf713d33cbaedf7227712845b05.cu | #include <algorithm>
#include "caffe/common.hpp"
#include "caffe/util/im2col.hpp"
namespace caffe {
////////////////////////////////////////////////////////////////////////////////
// MODIFICATION BEGIN
////////////////////////////////////////////////////////////////////////////////
template <typename Dtype>
__global__ void im2col_gpu_mod_kernel(const int n, const Dtype* I,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
Dtype* C,
const int set_offset, const int set_size) {
CUDA_KERNEL_LOOP(index, n) {
const int C_x = (index / set_size) * kernel_h * kernel_w;
const int C_y = index % set_size;
const int C_y_ = set_offset + C_y;
const int I_x = C_y_/width_col * stride_h - pad_h;
const int I_y = C_y_%width_col * stride_w - pad_w;
const int I_c = index / set_size;
Dtype* C_ptr = C + C_x * set_size + C_y;
const Dtype* I_ptr = I + (I_c*height+I_x)*width + I_y;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
int I_x_ = I_x + i * dilation_h;
int I_y_ = I_y + j * dilation_w;
*C_ptr =
(I_x_ >= 0 && I_y_ >= 0 && I_x_ < height && I_y_ < width) ?
I_ptr[i * dilation_h * width + j * dilation_w] : 0;
C_ptr+=set_size;
}
}
}
}
template <typename Dtype>
int im2col_gpu_mod(const Dtype* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
Dtype* data_col, const int set_idx, const int set_size ) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad_h -
(dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w -
(dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
const int offset = set_idx * set_size ;
int size=0;
if(height_col * width_col - offset < set_size){
size = height_col * width_col - offset;
}else{
size = set_size;
}
int num_kernels = channels * size;
// NOLINT_NEXT_LINE(whitespace/operators)
im2col_gpu_mod_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, dilation_h, dilation_w, height_col,
width_col, data_col, offset, size);
CUDA_POST_KERNEL_CHECK;
return size;
}
// Explicit instantiation
template int im2col_gpu_mod<float>(const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, float* data_col, const int block_offset, const int block_size);
template int im2col_gpu_mod<double>(const double* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, double* data_col, const int block_offset, const int block_size);
/*
template <typename Dtype>
__global__ void im2col_gpu_mod_kernel(const int n, const Dtype* I,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
Dtype* C,
const int set_offset, const int set_size) {
CUDA_KERNEL_LOOP(index, n) {
const int i = index%(kernel_w*kernel_h)/kernel_w;
const int j = index%kernel_w;
const int idx = index/(kernel_w*kernel_h);
const int C_x = index / set_size;
const int C_y = idx % set_size;
const int C_y_ = set_offset + C_y;
const int I_x = C_y_/width_col * stride_h - pad_h;
const int I_y = C_y_%width_col * stride_w - pad_w;
const int I_c = idx / set_size;
int I_x_ = I_x + i * dilation_h;
int I_y_ = I_y + j * dilation_w;
C[C_x * set_size + C_y] =
(I_x_ >= 0 && I_y_ >= 0 && I_x_ < height && I_y_ < width) ?
I[(I_c*height+I_x_)*width + I_y_] : 0;
}
}
template <typename Dtype>
int im2col_gpu_mod(const Dtype* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
Dtype* data_col, const int set_idx, const int set_size ) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad_h -
(dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w -
(dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
const int offset = set_idx * set_size ;
int size=0;
if(height_col * width_col - offset < set_size){
size = height_col * width_col - offset;
}else{
size = set_size;
}
int num_kernels = channels * kernel_h * kernel_w * size;
// NOLINT_NEXT_LINE(whitespace/operators)
im2col_gpu_mod_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, dilation_h, dilation_w, height_col,
width_col, data_col, offset, size);
CUDA_POST_KERNEL_CHECK;
return size;
}
// Explicit instantiation
template int im2col_gpu_mod<float>(const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, float* data_col, const int block_offset, const int block_size);
template int im2col_gpu_mod<double>(const double* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, double* data_col, const int block_offset, const int block_size);
*/
////////////////////////////////////////////////////////////////////////////////
// MODIFICATION END
////////////////////////////////////////////////////////////////////////////////
template <typename Dtype>
__global__ void im2col_gpu_kernel(const int n, const Dtype* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
Dtype* data_col) {
CUDA_KERNEL_LOOP(index, n) {
const int h_index = index / width_col;
const int h_col = h_index % height_col;
const int w_col = index % width_col;
const int c_im = h_index / height_col;
const int c_col = c_im * kernel_h * kernel_w;
const int h_offset = h_col * stride_h - pad_h;
const int w_offset = w_col * stride_w - pad_w;
Dtype* data_col_ptr = data_col;
data_col_ptr += (c_col * height_col + h_col) * width_col + w_col;
const Dtype* data_im_ptr = data_im;
data_im_ptr += (c_im * height + h_offset) * width + w_offset;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
int h_im = h_offset + i * dilation_h;
int w_im = w_offset + j * dilation_w;
*data_col_ptr =
(h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ?
data_im_ptr[i * dilation_h * width + j * dilation_w] : 0;
data_col_ptr += height_col * width_col;
}
}
}
}
template <typename Dtype>
void im2col_gpu(const Dtype* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
Dtype* data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad_h -
(dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w -
(dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
im2col_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, dilation_h, dilation_w, height_col,
width_col, data_col);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_gpu<float>(const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, float* data_col);
template void im2col_gpu<double>(const double* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, double* data_col);
template <typename Dtype, int num_axes>
__global__ void im2col_nd_gpu_kernel(const int n, const Dtype* data_im,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_col) {
int d_temp[num_axes]; // NOLINT(runtime/arrays)
int d_iter[num_axes]; // NOLINT(runtime/arrays)
__shared__ int shared_dilation[num_axes];
__shared__ int shared_kernel_shape[num_axes];
__shared__ int shared_pad[num_axes];
__shared__ int shared_stride[num_axes];
__shared__ int shared_col_shape[num_axes + 1];
__shared__ int shared_im_shape[num_axes + 1];
if (threadIdx.x < num_axes) {
shared_dilation[threadIdx.x] = dilation[threadIdx.x];
shared_kernel_shape[threadIdx.x] = kernel_shape[threadIdx.x];
shared_pad[threadIdx.x] = pad[threadIdx.x];
shared_stride[threadIdx.x] = stride[threadIdx.x];
}
if (threadIdx.x < num_axes + 1) {
shared_col_shape[threadIdx.x] = col_shape[threadIdx.x];
shared_im_shape[threadIdx.x] = im_shape[threadIdx.x];
}
__syncthreads();
int i;
CUDA_KERNEL_LOOP(index, n) {
// Initialize channel_in, computed in the loop below, with intermediate
// computations used to compute the spatial indices.
int channel_in = index;
int channel_out = 1;
for (i = num_axes - 1; i >= 0; --i) {
d_temp[i] = channel_in % shared_col_shape[i + 1];
channel_in /= shared_col_shape[i + 1];
channel_out *= shared_kernel_shape[i];
}
channel_out *= channel_in;
int data_col_inc = 1;
for (i = 0; i < num_axes; ++i) {
channel_out *= shared_col_shape[i + 1];
channel_out += d_temp[i];
d_temp[i] = d_temp[i] * shared_stride[i] - shared_pad[i];
channel_in *= shared_im_shape[i + 1];
channel_in += d_temp[i];
data_col_inc *= shared_col_shape[i + 1];
d_iter[i] = 0;
}
Dtype* data_col_ptr = data_col + channel_out;
const Dtype* data_im_ptr = data_im + channel_in;
bool incremented;
do {
bool in_range = true;
for (i = 0; i < num_axes; ++i) {
const int d_iter_im = d_iter[i] * shared_dilation[i] + d_temp[i];
in_range &= d_iter_im >= 0 && d_iter_im < shared_im_shape[i + 1];
if (!in_range) { break; }
}
if (in_range) {
int data_im_offset = d_iter[0] * shared_dilation[0];
for (i = 1; i < num_axes; ++i) {
data_im_offset *= shared_im_shape[i + 1];
data_im_offset += d_iter[i] * shared_dilation[i];
}
*data_col_ptr = data_im_ptr[data_im_offset];
} else {
*data_col_ptr = 0;
}
data_col_ptr += data_col_inc;
incremented = false;
for (i = num_axes - 1; i >= 0; --i) {
const int d_max = shared_kernel_shape[i];
if (d_iter[i] == d_max - 1) {
d_iter[i] = 0;
} else { // d_iter[i] < d_max - 1
++d_iter[i];
incremented = true;
break;
}
} // for (int i = num_axes - 1; i >= 0; --i)
} while (incremented); // do
} // CUDA_KERNEL_LOOP(index, n)
}
template <typename Dtype>
void im2col_nd_gpu(const Dtype* data_im, const int num_spatial_axes,
const int num_kernels, const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_col) {
// num_axes should be smaller than block size
DCHECK_LT(num_spatial_axes, CAFFE_CUDA_NUM_THREADS);
switch (num_spatial_axes) {
case 1:
im2col_nd_gpu_kernel<Dtype, 1> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 2:
im2col_nd_gpu_kernel<Dtype, 2> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 3:
im2col_nd_gpu_kernel<Dtype, 3> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 4:
im2col_nd_gpu_kernel<Dtype, 4> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 5:
im2col_nd_gpu_kernel<Dtype, 5> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 6:
im2col_nd_gpu_kernel<Dtype, 6> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 7:
im2col_nd_gpu_kernel<Dtype, 7> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 8:
im2col_nd_gpu_kernel<Dtype, 8> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 9:
im2col_nd_gpu_kernel<Dtype, 9> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
case 10:
im2col_nd_gpu_kernel<Dtype, 10> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_col);
break;
default:
LOG(FATAL) << "im2col_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_nd_gpu<float>(const float* data_im,
const int num_spatial_axes, const int col_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, float* data_col);
template void im2col_nd_gpu<double>(const double* data_im,
const int num_spatial_axes, const int col_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, double* data_col);
template <typename Dtype>
__global__ void col2im_gpu_kernel(const int n, const Dtype* data_col,
const int height, const int width, const int channels,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
Dtype* data_im) {
CUDA_KERNEL_LOOP(index, n) {
Dtype val = 0;
const int w_im = index % width + pad_w;
const int h_im = (index / width) % height + pad_h;
const int c_im = index / (width * height);
int kernel_extent_w = (kernel_w - 1) * dilation_w + 1;
int kernel_extent_h = (kernel_h - 1) * dilation_h + 1;
// compute the start and end of the output
const int w_col_start =
(w_im < kernel_extent_w) ? 0 : (w_im - kernel_extent_w) / stride_w + 1;
const int w_col_end = min(w_im / stride_w + 1, width_col);
const int h_col_start =
(h_im < kernel_extent_h) ? 0 : (h_im - kernel_extent_h) / stride_h + 1;
const int h_col_end = min(h_im / stride_h + 1, height_col);
// TODO: use LCM of stride and dilation to avoid unnecessary loops
for (int h_col = h_col_start; h_col < h_col_end; h_col += 1) {
for (int w_col = w_col_start; w_col < w_col_end; w_col += 1) {
int h_k = (h_im - h_col * stride_h);
int w_k = (w_im - w_col * stride_w);
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
int data_col_index = (((c_im * kernel_h + h_k) * kernel_w + w_k) *
height_col + h_col) * width_col + w_col;
val += data_col[data_col_index];
}
}
}
data_im[index] = val;
}
}
template <typename Dtype>
void col2im_gpu(const Dtype* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
Dtype* data_im) {
int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) /
stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) /
stride_w + 1;
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
col2im_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_col, height, width, channels, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
height_col, width_col, data_im);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_gpu<float>(const float* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
float* data_im);
template void col2im_gpu<double>(const double* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
double* data_im);
template <typename Dtype, int num_axes>
__global__ void col2im_nd_gpu_kernel(const int n, const Dtype* data_col,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_im) {
int d_im[num_axes]; // NOLINT(runtime/arrays)
int d_col_iter[num_axes]; // NOLINT(runtime/arrays)
int d_col_start[num_axes]; // NOLINT(runtime/arrays)
int d_col_end[num_axes]; // NOLINT(runtime/arrays)
__shared__ int shared_dilation[num_axes];
__shared__ int shared_kernel_shape[num_axes];
__shared__ int shared_pad[num_axes];
__shared__ int shared_stride[num_axes];
__shared__ int shared_col_shape[num_axes + 1];
__shared__ int shared_im_shape[num_axes + 1];
if (threadIdx.x < num_axes) {
shared_dilation[threadIdx.x] = dilation[threadIdx.x];
shared_kernel_shape[threadIdx.x] = kernel_shape[threadIdx.x];
shared_pad[threadIdx.x] = pad[threadIdx.x];
shared_stride[threadIdx.x] = stride[threadIdx.x];
}
if (threadIdx.x < num_axes + 1) {
shared_col_shape[threadIdx.x] = col_shape[threadIdx.x];
shared_im_shape[threadIdx.x] = im_shape[threadIdx.x];
}
__syncthreads();
CUDA_KERNEL_LOOP(index, n) {
// Initialize channel_in, computed in the loop below, with intermediate
// computations used to compute the spatial indices.
int c_im = index;
// Calculate d_im (image dimensions).
for (int i = num_axes - 1; i >= 0; --i) {
d_im[i] = c_im % shared_im_shape[i + 1] + shared_pad[i];
c_im /= shared_im_shape[i + 1];
}
// Calculate col start/end indices.
bool done = false;
for (int i = 0; i < num_axes; ++i) {
const int kernel_extent =
shared_dilation[i] * (shared_kernel_shape[i] - 1) + 1;
d_col_start[i] = d_col_iter[i] =
(d_im[i] < kernel_extent) ? 0 :
(d_im[i] - kernel_extent) / shared_stride[i] + 1;
d_col_end[i] =
min(d_im[i] / shared_stride[i] + 1, shared_col_shape[i + 1]);
if (d_col_start[i] >= d_col_end[i]) {
// Skip computation if the dimension is 0 at any spatial axis --
// final val will be 0.
data_im[index] = 0;
done = true;
break; // for (int i = 0; i < num_axes; ++i)
}
}
if (done) {
continue; // CUDA_KERNEL_LOOP(index, n)
}
// Loop over the col to compute the output val.
Dtype val = 0;
bool incremented = true;
bool skip = false;
do {
// Compute the final offset.
int final_offset = 0;
int kernel_shape_prod = 1;
int kernel_index;
for (int i = num_axes - 1; i >= 0; --i) {
kernel_index = d_im[i] - d_col_iter[i] * shared_stride[i];
if (kernel_index % shared_dilation[i]) {
skip = true;
break;
} else {
kernel_index /= shared_dilation[i];
final_offset += kernel_index * kernel_shape_prod;
kernel_shape_prod *= shared_kernel_shape[i];
}
}
if (!skip) {
final_offset += kernel_shape_prod * c_im;
for (int i = 0; i < num_axes; ++i) {
final_offset *= shared_col_shape[i + 1];
final_offset += d_col_iter[i];
}
val += data_col[final_offset];
}
skip = false;
incremented = false;
for (int i = num_axes - 1; i >= 0; --i) {
const int d_max = d_col_end[i];
if (d_col_iter[i] == d_max - 1) {
d_col_iter[i] = d_col_start[i];
} else { // d_col_iter[i] < d_max - 1
++d_col_iter[i];
incremented = true;
break; // for (int i = num_axes - 1; i >= 0; --i)
}
} // for (int i = num_axes - 1; i >= 0; --i)
} while (incremented);
data_im[index] = val;
} // CUDA_KERNEL_LOOP(index, n)
}
template <typename Dtype>
void col2im_nd_gpu(const Dtype* data_col, const int num_spatial_axes,
const int im_size, const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, Dtype* data_im) {
// num_axes should be smaller than block size
DCHECK_LT(num_spatial_axes, CAFFE_CUDA_NUM_THREADS);
switch (num_spatial_axes) {
case 1:
col2im_nd_gpu_kernel<Dtype, 1> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 2:
col2im_nd_gpu_kernel<Dtype, 2> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 3:
col2im_nd_gpu_kernel<Dtype, 3> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 4:
col2im_nd_gpu_kernel<Dtype, 4> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 5:
col2im_nd_gpu_kernel<Dtype, 5> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 6:
col2im_nd_gpu_kernel<Dtype, 6> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 7:
col2im_nd_gpu_kernel<Dtype, 7> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 8:
col2im_nd_gpu_kernel<Dtype, 8> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 9:
col2im_nd_gpu_kernel<Dtype, 9> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
case 10:
col2im_nd_gpu_kernel<Dtype, 10> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>(
im_size, data_col, im_shape, col_shape,
kernel_shape, pad, stride, dilation, data_im);
break;
default:
LOG(FATAL) << "col2im_nd_gpu does not support computation with "
<< num_spatial_axes << " spatial axes";
}
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_nd_gpu<float>(const float* data_col,
const int num_spatial_axes, const int im_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, float* data_im);
template void col2im_nd_gpu<double>(const double* data_col,
const int num_spatial_axes, const int im_size,
const int* im_shape, const int* col_shape,
const int* kernel_shape, const int* pad, const int* stride,
const int* dilation, double* data_im);
} // namespace caffe
|
5259bdc882303528d819a4dd21d3c54ac515477b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#if GOOGLE_CUDA
#define EIGEN_USE_GPU
#include "/usr/include/third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
__global__ void NmDistanceKernel(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i){
const int batch=512;
__shared__ float buf[batch*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int k2=0;k2<m;k2+=batch){
int end_k=min(m,k2+batch)-k2;
for (int j=threadIdx.x;j<end_k*3;j+=blockDim.x){
buf[j]=xyz2[(i*m+k2)*3+j];
}
__syncthreads();
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
float x1=xyz[(i*n+j)*3+0];
float y1=xyz[(i*n+j)*3+1];
float z1=xyz[(i*n+j)*3+2];
int best_i=0;
float best=0;
int end_ka=end_k-(end_k&3);
if (end_ka==batch){
for (int k=0;k<batch;k+=4){
{
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float x2=buf[k*3+3]-x1;
float y2=buf[k*3+4]-y1;
float z2=buf[k*3+5]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float x2=buf[k*3+6]-x1;
float y2=buf[k*3+7]-y1;
float z2=buf[k*3+8]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float x2=buf[k*3+9]-x1;
float y2=buf[k*3+10]-y1;
float z2=buf[k*3+11]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}else{
for (int k=0;k<end_ka;k+=4){
{
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float x2=buf[k*3+3]-x1;
float y2=buf[k*3+4]-y1;
float z2=buf[k*3+5]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float x2=buf[k*3+6]-x1;
float y2=buf[k*3+7]-y1;
float z2=buf[k*3+8]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float x2=buf[k*3+9]-x1;
float y2=buf[k*3+10]-y1;
float z2=buf[k*3+11]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}
for (int k=end_ka;k<end_k;k++){
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
if (k2==0 || result[(i*n+j)]>best){
result[(i*n+j)]=best;
result_i[(i*n+j)]=best_i;
}
}
__syncthreads();
}
}
}
void NmDistanceKernelLauncher(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i,float * result2,int * result2_i){
hipLaunchKernelGGL(( NmDistanceKernel), dim3(dim3(32,16,1)),dim3(512), 0, 0, b,n,xyz,m,xyz2,result,result_i);
hipLaunchKernelGGL(( NmDistanceKernel), dim3(dim3(32,16,1)),dim3(512), 0, 0, b,m,xyz2,n,xyz,result2,result2_i);
}
__global__ void NmDistanceGradKernel(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,float * grad_xyz1,float * grad_xyz2){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
float x1=xyz1[(i*n+j)*3+0];
float y1=xyz1[(i*n+j)*3+1];
float z1=xyz1[(i*n+j)*3+2];
int j2=idx1[i*n+j];
float x2=xyz2[(i*m+j2)*3+0];
float y2=xyz2[(i*m+j2)*3+1];
float z2=xyz2[(i*m+j2)*3+2];
float g=grad_dist1[i*n+j]*2;
atomicAdd(&(grad_xyz1[(i*n+j)*3+0]),g*(x1-x2));
atomicAdd(&(grad_xyz1[(i*n+j)*3+1]),g*(y1-y2));
atomicAdd(&(grad_xyz1[(i*n+j)*3+2]),g*(z1-z2));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+0]),-(g*(x1-x2)));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+1]),-(g*(y1-y2)));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+2]),-(g*(z1-z2)));
}
}
}
void NmDistanceGradKernelLauncher(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,const float * grad_dist2,const int * idx2,float * grad_xyz1,float * grad_xyz2){
hipMemset(grad_xyz1,0,b*n*3*4);
hipMemset(grad_xyz2,0,b*m*3*4);
hipLaunchKernelGGL(( NmDistanceGradKernel), dim3(dim3(1,16,1)),dim3(256), 0, 0, b,n,xyz1,m,xyz2,grad_dist1,idx1,grad_xyz1,grad_xyz2);
hipLaunchKernelGGL(( NmDistanceGradKernel), dim3(dim3(1,16,1)),dim3(256), 0, 0, b,m,xyz2,n,xyz1,grad_dist2,idx2,grad_xyz2,grad_xyz1);
}
#endif
| 5259bdc882303528d819a4dd21d3c54ac515477b.cu | #if GOOGLE_CUDA
#define EIGEN_USE_GPU
#include "/usr/include/third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
__global__ void NmDistanceKernel(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i){
const int batch=512;
__shared__ float buf[batch*3];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int k2=0;k2<m;k2+=batch){
int end_k=min(m,k2+batch)-k2;
for (int j=threadIdx.x;j<end_k*3;j+=blockDim.x){
buf[j]=xyz2[(i*m+k2)*3+j];
}
__syncthreads();
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
float x1=xyz[(i*n+j)*3+0];
float y1=xyz[(i*n+j)*3+1];
float z1=xyz[(i*n+j)*3+2];
int best_i=0;
float best=0;
int end_ka=end_k-(end_k&3);
if (end_ka==batch){
for (int k=0;k<batch;k+=4){
{
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float x2=buf[k*3+3]-x1;
float y2=buf[k*3+4]-y1;
float z2=buf[k*3+5]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float x2=buf[k*3+6]-x1;
float y2=buf[k*3+7]-y1;
float z2=buf[k*3+8]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float x2=buf[k*3+9]-x1;
float y2=buf[k*3+10]-y1;
float z2=buf[k*3+11]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}else{
for (int k=0;k<end_ka;k+=4){
{
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float x2=buf[k*3+3]-x1;
float y2=buf[k*3+4]-y1;
float z2=buf[k*3+5]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float x2=buf[k*3+6]-x1;
float y2=buf[k*3+7]-y1;
float z2=buf[k*3+8]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float x2=buf[k*3+9]-x1;
float y2=buf[k*3+10]-y1;
float z2=buf[k*3+11]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}
for (int k=end_ka;k<end_k;k++){
float x2=buf[k*3+0]-x1;
float y2=buf[k*3+1]-y1;
float z2=buf[k*3+2]-z1;
float d=x2*x2+y2*y2+z2*z2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
if (k2==0 || result[(i*n+j)]>best){
result[(i*n+j)]=best;
result_i[(i*n+j)]=best_i;
}
}
__syncthreads();
}
}
}
void NmDistanceKernelLauncher(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i,float * result2,int * result2_i){
NmDistanceKernel<<<dim3(32,16,1),512>>>(b,n,xyz,m,xyz2,result,result_i);
NmDistanceKernel<<<dim3(32,16,1),512>>>(b,m,xyz2,n,xyz,result2,result2_i);
}
__global__ void NmDistanceGradKernel(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,float * grad_xyz1,float * grad_xyz2){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
float x1=xyz1[(i*n+j)*3+0];
float y1=xyz1[(i*n+j)*3+1];
float z1=xyz1[(i*n+j)*3+2];
int j2=idx1[i*n+j];
float x2=xyz2[(i*m+j2)*3+0];
float y2=xyz2[(i*m+j2)*3+1];
float z2=xyz2[(i*m+j2)*3+2];
float g=grad_dist1[i*n+j]*2;
atomicAdd(&(grad_xyz1[(i*n+j)*3+0]),g*(x1-x2));
atomicAdd(&(grad_xyz1[(i*n+j)*3+1]),g*(y1-y2));
atomicAdd(&(grad_xyz1[(i*n+j)*3+2]),g*(z1-z2));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+0]),-(g*(x1-x2)));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+1]),-(g*(y1-y2)));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+2]),-(g*(z1-z2)));
}
}
}
void NmDistanceGradKernelLauncher(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,const float * grad_dist2,const int * idx2,float * grad_xyz1,float * grad_xyz2){
cudaMemset(grad_xyz1,0,b*n*3*4);
cudaMemset(grad_xyz2,0,b*m*3*4);
NmDistanceGradKernel<<<dim3(1,16,1),256>>>(b,n,xyz1,m,xyz2,grad_dist1,idx1,grad_xyz1,grad_xyz2);
NmDistanceGradKernel<<<dim3(1,16,1),256>>>(b,m,xyz2,n,xyz1,grad_dist2,idx2,grad_xyz2,grad_xyz1);
}
#endif
|
18006871303205b6fdc71f9248ec7cf1a4a0aee6.hip | // !!! This is a file automatically generated by hipify!!!
#include "Layer.h"
Layer::Layer(const std::string &_network, const std::string &_name, const std::string &_type, bool _ReLU, int _stride,
int _padding) : ReLU(_ReLU), stride(_stride), padding(_padding) {
this->network = _network;
this->name = _name;
this->type = _type;
this->init = false;
}
Layer::~Layer() {
if(init) {
hipHostFree(weights);
hipHostFree(bias);
hipHostFree(activations);
hipHostFree(output_activations);
}
}
float Layer::act_get(int i, int j, int k, int l) const {
uint32_t index = act_shape[1]*act_shape[2]*act_shape[3]*i + act_shape[2]*act_shape[3]*j + act_shape[3]*k + l;
return activations[index];
}
float Layer::wgt_get(int i, int j, int k, int l) const {
uint32_t index = wgt_shape[1]*wgt_shape[2]*wgt_shape[3]*i + wgt_shape[2]*wgt_shape[3]*j + wgt_shape[3]*k + l;
return weights[index];
}
uint64_t Layer::getMaxIndex(const std::string &array) const {
if(array == "weights") {
return wgt_shape[0]*wgt_shape[1]*wgt_shape[2]*wgt_shape[3];
} else if(array == "bias") {
return bias_shape[0];
} else if(array == "activations") {
#ifdef FORCE_ONE_IMAGE
return 1*act_shape[1]*act_shape[2]*act_shape[3];
#else
return act_shape[0]*act_shape[1]*act_shape[2]*act_shape[3];
#endif
} else if(array == "output_activations") {
#ifdef FORCE_ONE_IMAGE
if(out_act_shape.size() == 4) return 1*out_act_shape[1]*out_act_shape[2]*out_act_shape[3];
else return 1*out_act_shape[1];
#else
if(out_act_shape.size() == 4) return out_act_shape[0]*out_act_shape[1]*out_act_shape[2]*out_act_shape[3];
else return out_act_shape[0]*out_act_shape[1];
#endif
} else return 0;
}
void Layer::zero_pad() {
#ifdef FORCE_ONE_IMAGE
int batch_size = (unsigned)1;
#else
int batch_size = act_shape[0];
#endif
int act_channels = act_shape[1];
int Nx = act_shape[2];
int Ny = act_shape[3];
int new_Nx = Nx + 2*padding;
int new_Ny = Ny + 2*padding;
uint64_t new_max_index = batch_size * act_channels * new_Nx * new_Ny;
float* tmp_activations;
hipHostMalloc((void **) &tmp_activations, new_max_index * sizeof(float));
if (tmp_activations == NULL) {
fprintf(stderr, "Error: Failed to allocate padded activations!\n");
exit(EXIT_FAILURE);
}
for(uint64_t i = 0; i < new_max_index; i++) {
tmp_activations[i] = 0;
}
for(int n = 0; n < batch_size; n++) {
for (int k = 0; k < act_channels; k++) {
for (int i = 0; i < Nx; i++) {
for(int j = 0; j < Ny; j++) {
uint32_t index_out = act_channels*new_Nx*new_Ny*n + new_Nx*new_Ny*k + new_Ny*(padding + i) +
(padding + j);
uint32_t index_in = act_channels*Nx*Ny*n + Nx*Ny*k + Ny*i + j;
tmp_activations[index_out] = activations[index_in];
}
}
}
}
hipHostFree(activations);
activations = tmp_activations;
act_shape.clear();
act_shape.push_back(batch_size);
act_shape.push_back(act_channels);
act_shape.push_back(new_Nx);
act_shape.push_back(new_Ny);
}
void Layer::act_split_4D(int K, int X, int Y) {
#ifdef FORCE_ONE_IMAGE
int batch_size = (unsigned)1;
#else
int batch_size = act_shape[0];
#endif
int act_channels = act_shape[1];
int Nx = act_shape[2];
int Ny = act_shape[3];
uint64_t new_max_index = batch_size * K * X * Y;
float* tmp_activations;
hipHostMalloc((void **) &tmp_activations, new_max_index * sizeof(float));
if (tmp_activations == NULL) {
fprintf(stderr, "Error: Failed to allocate padded activations!\n");
exit(EXIT_FAILURE);
}
for(int n = 0; n < batch_size; n++) {
for (int k = 0; k < act_channels; k++) {
for (int i = 0; i < Nx; i++) {
for(int j = 0; j < Ny; j++) {
int new_k = k / (X*Y);
int rem = k % (X*Y);
int new_i = rem / Y;
int new_j = rem % Y;
uint32_t index_out = K*X*Y*n + X*Y*new_k + Y*new_i + new_j;
uint32_t index_in = act_channels*Nx*Ny*n + Nx*Ny*k + Ny*i + j;
tmp_activations[index_out] = activations[index_in];
}
}
}
}
hipHostFree(activations);
activations = tmp_activations;
act_shape.clear();
act_shape.push_back(batch_size);
act_shape.push_back((unsigned)K);
act_shape.push_back((unsigned)X);
act_shape.push_back((unsigned)Y);
}
void Layer::wgt_split_4D(int K, int X, int Y) {
int num_filters = wgt_shape[0];
int wgt_channels = wgt_shape[1];
int Kx = wgt_shape[2];
int Ky = wgt_shape[3];
uint64_t new_max_index = num_filters * K * X * Y;
float* tmp_weights;
hipHostMalloc((void **) &tmp_weights, new_max_index * sizeof(float));
if (tmp_weights == NULL) {
fprintf(stderr, "Error: Failed to allocate padded weights!\n");
exit(EXIT_FAILURE);
}
for(int n = 0; n < num_filters; n++) {
for (int k = 0; k < wgt_channels; k++) {
for (int i = 0; i < Kx; i++) {
for(int j = 0; j < Ky; j++) {
int new_k = k / (X*Y);
int rem = k % (X*Y);
int new_i = rem / Y;
int new_j = rem % Y;
uint32_t index_out = K*X*Y*n + X*Y*new_k + Y*new_i + new_j;
uint32_t index_in = wgt_channels*Kx*Ky*n + Kx*Ky*k + Ky*i + j;
tmp_weights[index_out] = weights[index_in];
}
}
}
}
hipHostFree(weights);
weights = tmp_weights;
wgt_shape.clear();
wgt_shape.push_back(num_filters);
wgt_shape.push_back((unsigned)K);
wgt_shape.push_back((unsigned)X);
wgt_shape.push_back((unsigned)Y);
}
void Layer::reshape_to_2D() {
#ifdef FORCE_ONE_IMAGE
int batch_size = (unsigned)1;
#else
int batch_size = act_shape[0];
#endif
int act_channels = act_shape[1];
int Nx = act_shape[2];
int Ny = act_shape[3];
int new_act_channels = act_channels * Nx * Ny;
act_shape.clear();
act_shape.push_back(batch_size);
act_shape.push_back(new_act_channels);
act_shape.push_back(1);
act_shape.push_back(1);
}
inline
hipError_t check_error(hipError_t err, std::string task) {
if (err != hipSuccess) {
fprintf(stderr, "Error: Failed to %s (error code: %s)!\n", task.c_str(), hipGetErrorString(err));
exit(EXIT_FAILURE);
}
return err;
}
// Read network from numpy arrays
void Layer::read_layer() {
cnpy::NpyArray data_npy;
uint64_t max_index;
cnpy::npy_load("net_traces/" + network + "/wgt-" + name + ".npy" , data_npy, wgt_shape);
max_index = getMaxIndex("weights");
check_error(hipHostMalloc((void **) &weights, max_index * sizeof(float)),"allocate layer weights");
for(uint32_t i = 0; i < max_index; i++)
weights[i] = data_npy.data<float>()[i];
cnpy::npy_load("net_traces/" + network + "/bias-" + name + ".npy" , data_npy, bias_shape);
max_index = getMaxIndex("bias");
check_error(hipHostMalloc((void **) &bias, max_index * sizeof(float)),"allocate layer bias");
for(uint32_t i = 0; i < max_index; i++)
bias[i] = data_npy.data<float>()[i];
cnpy::npy_load("net_traces/" + network + "/act-" + name + "-0.npy" , data_npy, act_shape);
max_index = getMaxIndex("activations");
check_error(hipHostMalloc((void **) &activations, max_index * sizeof(float)),"allocate layer activations");
for(uint32_t i = 0; i < max_index; i++)
activations[i] = data_npy.data<float>()[i];
cnpy::npy_load("net_traces/" + network + "/act-" + name + "-0-out.npy" , data_npy, out_act_shape);
max_index = getMaxIndex("output_activations");
check_error(hipHostMalloc((void **) &output_activations, max_index * sizeof(float)),"allocate layer output activations");
for(uint32_t i = 0; i < max_index; i++)
output_activations[i] = data_npy.data<float>()[i];
this->init = true;
#ifdef VERBOSE
printf("Layer %s loaded into memory\n",name.c_str());
#endif
}
| 18006871303205b6fdc71f9248ec7cf1a4a0aee6.cu | #include "Layer.h"
Layer::Layer(const std::string &_network, const std::string &_name, const std::string &_type, bool _ReLU, int _stride,
int _padding) : ReLU(_ReLU), stride(_stride), padding(_padding) {
this->network = _network;
this->name = _name;
this->type = _type;
this->init = false;
}
Layer::~Layer() {
if(init) {
cudaFreeHost(weights);
cudaFreeHost(bias);
cudaFreeHost(activations);
cudaFreeHost(output_activations);
}
}
float Layer::act_get(int i, int j, int k, int l) const {
uint32_t index = act_shape[1]*act_shape[2]*act_shape[3]*i + act_shape[2]*act_shape[3]*j + act_shape[3]*k + l;
return activations[index];
}
float Layer::wgt_get(int i, int j, int k, int l) const {
uint32_t index = wgt_shape[1]*wgt_shape[2]*wgt_shape[3]*i + wgt_shape[2]*wgt_shape[3]*j + wgt_shape[3]*k + l;
return weights[index];
}
uint64_t Layer::getMaxIndex(const std::string &array) const {
if(array == "weights") {
return wgt_shape[0]*wgt_shape[1]*wgt_shape[2]*wgt_shape[3];
} else if(array == "bias") {
return bias_shape[0];
} else if(array == "activations") {
#ifdef FORCE_ONE_IMAGE
return 1*act_shape[1]*act_shape[2]*act_shape[3];
#else
return act_shape[0]*act_shape[1]*act_shape[2]*act_shape[3];
#endif
} else if(array == "output_activations") {
#ifdef FORCE_ONE_IMAGE
if(out_act_shape.size() == 4) return 1*out_act_shape[1]*out_act_shape[2]*out_act_shape[3];
else return 1*out_act_shape[1];
#else
if(out_act_shape.size() == 4) return out_act_shape[0]*out_act_shape[1]*out_act_shape[2]*out_act_shape[3];
else return out_act_shape[0]*out_act_shape[1];
#endif
} else return 0;
}
void Layer::zero_pad() {
#ifdef FORCE_ONE_IMAGE
int batch_size = (unsigned)1;
#else
int batch_size = act_shape[0];
#endif
int act_channels = act_shape[1];
int Nx = act_shape[2];
int Ny = act_shape[3];
int new_Nx = Nx + 2*padding;
int new_Ny = Ny + 2*padding;
uint64_t new_max_index = batch_size * act_channels * new_Nx * new_Ny;
float* tmp_activations;
cudaMallocHost((void **) &tmp_activations, new_max_index * sizeof(float));
if (tmp_activations == NULL) {
fprintf(stderr, "Error: Failed to allocate padded activations!\n");
exit(EXIT_FAILURE);
}
for(uint64_t i = 0; i < new_max_index; i++) {
tmp_activations[i] = 0;
}
for(int n = 0; n < batch_size; n++) {
for (int k = 0; k < act_channels; k++) {
for (int i = 0; i < Nx; i++) {
for(int j = 0; j < Ny; j++) {
uint32_t index_out = act_channels*new_Nx*new_Ny*n + new_Nx*new_Ny*k + new_Ny*(padding + i) +
(padding + j);
uint32_t index_in = act_channels*Nx*Ny*n + Nx*Ny*k + Ny*i + j;
tmp_activations[index_out] = activations[index_in];
}
}
}
}
cudaFreeHost(activations);
activations = tmp_activations;
act_shape.clear();
act_shape.push_back(batch_size);
act_shape.push_back(act_channels);
act_shape.push_back(new_Nx);
act_shape.push_back(new_Ny);
}
void Layer::act_split_4D(int K, int X, int Y) {
#ifdef FORCE_ONE_IMAGE
int batch_size = (unsigned)1;
#else
int batch_size = act_shape[0];
#endif
int act_channels = act_shape[1];
int Nx = act_shape[2];
int Ny = act_shape[3];
uint64_t new_max_index = batch_size * K * X * Y;
float* tmp_activations;
cudaMallocHost((void **) &tmp_activations, new_max_index * sizeof(float));
if (tmp_activations == NULL) {
fprintf(stderr, "Error: Failed to allocate padded activations!\n");
exit(EXIT_FAILURE);
}
for(int n = 0; n < batch_size; n++) {
for (int k = 0; k < act_channels; k++) {
for (int i = 0; i < Nx; i++) {
for(int j = 0; j < Ny; j++) {
int new_k = k / (X*Y);
int rem = k % (X*Y);
int new_i = rem / Y;
int new_j = rem % Y;
uint32_t index_out = K*X*Y*n + X*Y*new_k + Y*new_i + new_j;
uint32_t index_in = act_channels*Nx*Ny*n + Nx*Ny*k + Ny*i + j;
tmp_activations[index_out] = activations[index_in];
}
}
}
}
cudaFreeHost(activations);
activations = tmp_activations;
act_shape.clear();
act_shape.push_back(batch_size);
act_shape.push_back((unsigned)K);
act_shape.push_back((unsigned)X);
act_shape.push_back((unsigned)Y);
}
void Layer::wgt_split_4D(int K, int X, int Y) {
int num_filters = wgt_shape[0];
int wgt_channels = wgt_shape[1];
int Kx = wgt_shape[2];
int Ky = wgt_shape[3];
uint64_t new_max_index = num_filters * K * X * Y;
float* tmp_weights;
cudaMallocHost((void **) &tmp_weights, new_max_index * sizeof(float));
if (tmp_weights == NULL) {
fprintf(stderr, "Error: Failed to allocate padded weights!\n");
exit(EXIT_FAILURE);
}
for(int n = 0; n < num_filters; n++) {
for (int k = 0; k < wgt_channels; k++) {
for (int i = 0; i < Kx; i++) {
for(int j = 0; j < Ky; j++) {
int new_k = k / (X*Y);
int rem = k % (X*Y);
int new_i = rem / Y;
int new_j = rem % Y;
uint32_t index_out = K*X*Y*n + X*Y*new_k + Y*new_i + new_j;
uint32_t index_in = wgt_channels*Kx*Ky*n + Kx*Ky*k + Ky*i + j;
tmp_weights[index_out] = weights[index_in];
}
}
}
}
cudaFreeHost(weights);
weights = tmp_weights;
wgt_shape.clear();
wgt_shape.push_back(num_filters);
wgt_shape.push_back((unsigned)K);
wgt_shape.push_back((unsigned)X);
wgt_shape.push_back((unsigned)Y);
}
void Layer::reshape_to_2D() {
#ifdef FORCE_ONE_IMAGE
int batch_size = (unsigned)1;
#else
int batch_size = act_shape[0];
#endif
int act_channels = act_shape[1];
int Nx = act_shape[2];
int Ny = act_shape[3];
int new_act_channels = act_channels * Nx * Ny;
act_shape.clear();
act_shape.push_back(batch_size);
act_shape.push_back(new_act_channels);
act_shape.push_back(1);
act_shape.push_back(1);
}
inline
cudaError_t check_error(cudaError_t err, std::string task) {
if (err != cudaSuccess) {
fprintf(stderr, "Error: Failed to %s (error code: %s)!\n", task.c_str(), cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
return err;
}
// Read network from numpy arrays
void Layer::read_layer() {
cnpy::NpyArray data_npy;
uint64_t max_index;
cnpy::npy_load("net_traces/" + network + "/wgt-" + name + ".npy" , data_npy, wgt_shape);
max_index = getMaxIndex("weights");
check_error(cudaMallocHost((void **) &weights, max_index * sizeof(float)),"allocate layer weights");
for(uint32_t i = 0; i < max_index; i++)
weights[i] = data_npy.data<float>()[i];
cnpy::npy_load("net_traces/" + network + "/bias-" + name + ".npy" , data_npy, bias_shape);
max_index = getMaxIndex("bias");
check_error(cudaMallocHost((void **) &bias, max_index * sizeof(float)),"allocate layer bias");
for(uint32_t i = 0; i < max_index; i++)
bias[i] = data_npy.data<float>()[i];
cnpy::npy_load("net_traces/" + network + "/act-" + name + "-0.npy" , data_npy, act_shape);
max_index = getMaxIndex("activations");
check_error(cudaMallocHost((void **) &activations, max_index * sizeof(float)),"allocate layer activations");
for(uint32_t i = 0; i < max_index; i++)
activations[i] = data_npy.data<float>()[i];
cnpy::npy_load("net_traces/" + network + "/act-" + name + "-0-out.npy" , data_npy, out_act_shape);
max_index = getMaxIndex("output_activations");
check_error(cudaMallocHost((void **) &output_activations, max_index * sizeof(float)),"allocate layer output activations");
for(uint32_t i = 0; i < max_index; i++)
output_activations[i] = data_npy.data<float>()[i];
this->init = true;
#ifdef VERBOSE
printf("Layer %s loaded into memory\n",name.c_str());
#endif
}
|
b4926150be92ff9242e9e47cfa3306e2abd70c8d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*! \file plmc_cuda.cu
* \brief Definitions of the piecewise linear reconstruction functions with
limiting applied in the characteristic variables, as decribed
in Stone et al., 2008. */
#ifdef CUDA
#ifdef PLMC
#include<cuda.h>
#include<math.h>
#include"global.h"
#include"global_cuda.h"
#include"plmc_cuda.h"
/*! \fn __global__ void PLMC_cuda(Real *dev_conserved, Real *dev_bounds_L, Real *dev_bounds_R, int nx, int ny, int nz, int n_ghost, Real dx, Real dt, Real gamma, int dir)
* \brief When passed a stencil of conserved variables, returns the left and right
boundary values for the interface calculated using plm. */
__global__ void PLMC_cuda(Real *dev_conserved, Real *dev_bounds_L, Real *dev_bounds_R, int nx, int ny, int nz, int n_ghost, Real dx, Real dt, Real gamma, int dir, int n_fields)
{
int n_cells = nx*ny*nz;
int o1, o2, o3;
if (dir == 0) {
o1 = 1; o2 = 2; o3 = 3;
}
if (dir == 1) {
o1 = 2; o2 = 3; o3 = 1;
}
if (dir == 2) {
o1 = 3; o2 = 1; o3 = 2;
}
// declare primative variables for each stencil
// these will be placed into registers for each thread
Real d_i, vx_i, vy_i, vz_i, p_i;
Real d_imo, vx_imo, vy_imo, vz_imo, p_imo;
Real d_ipo, vx_ipo, vy_ipo, vz_ipo, p_ipo;
// declare other variables to be used
Real a_i;
Real del_d_L, del_vx_L, del_vy_L, del_vz_L, del_p_L;
Real del_d_R, del_vx_R, del_vy_R, del_vz_R, del_p_R;
Real del_d_C, del_vx_C, del_vy_C, del_vz_C, del_p_C;
Real del_d_G, del_vx_G, del_vy_G, del_vz_G, del_p_G;
Real del_a_0_L, del_a_1_L, del_a_2_L, del_a_3_L, del_a_4_L;
Real del_a_0_R, del_a_1_R, del_a_2_R, del_a_3_R, del_a_4_R;
Real del_a_0_C, del_a_1_C, del_a_2_C, del_a_3_C, del_a_4_C;
Real del_a_0_G, del_a_1_G, del_a_2_G, del_a_3_G, del_a_4_G;
Real del_a_0_m, del_a_1_m, del_a_2_m, del_a_3_m, del_a_4_m;
Real lim_slope_a, lim_slope_b;
Real del_d_m_i, del_vx_m_i, del_vy_m_i, del_vz_m_i, del_p_m_i;
Real d_L_iph, vx_L_iph, vy_L_iph, vz_L_iph, p_L_iph;
Real d_R_imh, vx_R_imh, vy_R_imh, vz_R_imh, p_R_imh;
Real C;
#ifndef VL
Real dtodx = dt/dx;
Real lambda_m, lambda_0, lambda_p;
Real qx;
Real lamdiff;
Real sum_0, sum_1, sum_2, sum_3, sum_4;
#endif //CTU
#ifdef DE
Real ge_i, ge_imo, ge_ipo;
Real del_ge_L, del_ge_R, del_ge_C, del_ge_G;
Real del_ge_m_i;
Real ge_L_iph, ge_R_imh;
#ifndef VL
Real sum_ge;
#endif //CTU
#endif
#ifdef SCALAR
Real scalar_i[NSCALARS], scalar_imo[NSCALARS], scalar_ipo[NSCALARS];
Real del_scalar_L[NSCALARS], del_scalar_R[NSCALARS], del_scalar_C[NSCALARS], del_scalar_G[NSCALARS];
Real del_scalar_m_i[NSCALARS];
Real scalar_L_iph[NSCALARS], scalar_R_imh[NSCALARS];
#ifndef VL
Real sum_scalar[NSCALARS];
#endif //CTU
#endif
// get a thread ID
int blockId = blockIdx.x + blockIdx.y*gridDim.x;
int tid = threadIdx.x + blockId*blockDim.x;
int id;
int zid = tid / (nx*ny);
int yid = (tid - zid*nx*ny) / nx;
int xid = tid - zid*nx*ny - yid*nx;
int xs, xe, ys, ye, zs, ze;
if (dir == 0) {
xs = 1; xe = nx-2;
ys = 0; ye = ny;
zs = 0; ze = nz;
}
if (dir == 1) {
xs = 0; xe = nx;
ys = 1; ye = ny-2;
zs = 0; ze = nz;
}
if (dir == 2) {
xs = 0; xe = nx;
ys = 0; ye = ny;
zs = 1; ze = nz-2;
}
if (xid >= xs && xid < xe && yid >= ys && yid < ye && zid >= zs && zid < ze)
{
// load the 3-cell stencil into registers
// cell i
id = xid + yid*nx + zid*nx*ny;
d_i = dev_conserved[ id];
vx_i = dev_conserved[o1*n_cells + id] / d_i;
vy_i = dev_conserved[o2*n_cells + id] / d_i;
vz_i = dev_conserved[o3*n_cells + id] / d_i;
p_i = (dev_conserved[4*n_cells + id] - 0.5*d_i*(vx_i*vx_i + vy_i*vy_i + vz_i*vz_i)) * (gamma - 1.0);
p_i = fmax(p_i, (Real) TINY_NUMBER);
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_i[i] = dev_conserved[(5+i)*n_cells + id] / d_i;
}
#endif
#ifdef DE
ge_i = dev_conserved[(n_fields-1)*n_cells + id] / d_i;
#endif
// cell i-1
if (dir == 0) id = xid-1 + yid*nx + zid*nx*ny;
if (dir == 1) id = xid + (yid-1)*nx + zid*nx*ny;
if (dir == 2) id = xid + yid*nx + (zid-1)*nx*ny;
d_imo = dev_conserved[ id];
vx_imo = dev_conserved[o1*n_cells + id] / d_imo;
vy_imo = dev_conserved[o2*n_cells + id] / d_imo;
vz_imo = dev_conserved[o3*n_cells + id] / d_imo;
p_imo = (dev_conserved[4*n_cells + id] - 0.5*d_imo*(vx_imo*vx_imo + vy_imo*vy_imo + vz_imo*vz_imo)) * (gamma - 1.0);
p_imo = fmax(p_imo, (Real) TINY_NUMBER);
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_imo[i] = dev_conserved[(5+i)*n_cells + id] / d_imo;
}
#endif
#ifdef DE
ge_imo = dev_conserved[(n_fields-1)*n_cells + id] / d_imo;
#endif
// cell i+1
if (dir == 0) id = xid+1 + yid*nx + zid*nx*ny;
if (dir == 1) id = xid + (yid+1)*nx + zid*nx*ny;
if (dir == 2) id = xid + yid*nx + (zid+1)*nx*ny;
d_ipo = dev_conserved[ id];
vx_ipo = dev_conserved[o1*n_cells + id] / d_ipo;
vy_ipo = dev_conserved[o2*n_cells + id] / d_ipo;
vz_ipo = dev_conserved[o3*n_cells + id] / d_ipo;
p_ipo = (dev_conserved[4*n_cells + id] - 0.5*d_ipo*(vx_ipo*vx_ipo + vy_ipo*vy_ipo + vz_ipo*vz_ipo)) * (gamma - 1.0);
p_ipo = fmax(p_ipo, (Real) TINY_NUMBER);
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_ipo[i] = dev_conserved[(5+i)*n_cells + id] / d_ipo;
}
#endif
#ifdef DE
ge_ipo = dev_conserved[(n_fields-1)*n_cells + id] / d_ipo;
#endif
// calculate the adiabatic sound speed in cell i
a_i = sqrt(gamma*p_i/d_i);
// Compute the eigenvalues of the linearized equations in the
// primative variables using the cell-centered primative variables
#ifdef CTU
lambda_m = vx_i-a_i;
lambda_0 = vx_i;
lambda_p = vx_i+a_i;
#endif
// Compute the left, right, centered, and van Leer differences of the primative variables
// Note that here L and R refer to locations relative to the cell center
// left
del_d_L = d_i - d_imo;
del_vx_L = vx_i - vx_imo;
del_vy_L = vy_i - vy_imo;
del_vz_L = vz_i - vz_imo;
del_p_L = p_i - p_imo;
// right
del_d_R = d_ipo - d_i;
del_vx_R = vx_ipo - vx_i;
del_vy_R = vy_ipo - vy_i;
del_vz_R = vz_ipo - vz_i;
del_p_R = p_ipo - p_i;
// centered
del_d_C = 0.5*(d_ipo - d_imo);
del_vx_C = 0.5*(vx_ipo - vx_imo);
del_vy_C = 0.5*(vy_ipo - vy_imo);
del_vz_C = 0.5*(vz_ipo - vz_imo);
del_p_C = 0.5*(p_ipo - p_imo);
// Van Leer
if (del_d_L*del_d_R > 0.0) { del_d_G = 2.0*del_d_L*del_d_R / (del_d_L+del_d_R); }
else { del_d_G = 0.0; }
if (del_vx_L*del_vx_R > 0.0) { del_vx_G = 2.0*del_vx_L*del_vx_R / (del_vx_L+del_vx_R); }
else { del_vx_G = 0.0; }
if (del_vy_L*del_vy_R > 0.0) { del_vy_G = 2.0*del_vy_L*del_vy_R / (del_vy_L+del_vy_R); }
else { del_vy_G = 0.0; }
if (del_vz_L*del_vz_R > 0.0) { del_vz_G = 2.0*del_vz_L*del_vz_R / (del_vz_L+del_vz_R); }
else { del_vz_G = 0.0; }
if (del_p_L*del_p_R > 0.0) { del_p_G = 2.0*del_p_L*del_p_R / (del_p_L+del_p_R); }
else { del_p_G = 0.0; }
#ifdef DE
del_ge_L = ge_i - ge_imo;
del_ge_R = ge_ipo - ge_i;
del_ge_C = 0.5*(ge_ipo - ge_imo);
if (del_ge_L*del_ge_R > 0.0) { del_ge_G = 2.0*del_ge_L*del_ge_R / (del_ge_L+del_ge_R); }
else { del_ge_G = 0.0; }
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
del_scalar_L[i] = scalar_i[i] - scalar_imo[i];
del_scalar_R[i] = scalar_ipo[i] - scalar_i[i];
del_scalar_C[i] = 0.5*(scalar_ipo[i] - scalar_imo[i]);
if (del_scalar_L[i]*del_scalar_R[i] > 0.0) { del_scalar_G[i] = 2.0*del_scalar_L[i]*del_scalar_R[i] / (del_scalar_L[i]+del_scalar_R[i]); }
else { del_scalar_G[i] = 0.0; }
}
#endif
// Project the left, right, centered and van Leer differences onto the characteristic variables
// Stone Eqn 37 (del_a are differences in characteristic variables, see Stone for notation)
// Use the eigenvectors given in Stone 2008, Appendix A
del_a_0_L = -d_i * del_vx_L / (2*a_i) + del_p_L / (2*a_i*a_i);
del_a_1_L = del_d_L - del_p_L / (a_i*a_i);
del_a_2_L = del_vy_L;
del_a_3_L = del_vz_L;
del_a_4_L = d_i * del_vx_L / (2*a_i) + del_p_L / (2*a_i*a_i);
del_a_0_R = -d_i * del_vx_R / (2*a_i) + del_p_R / (2*a_i*a_i);
del_a_1_R = del_d_R - del_p_R / (a_i*a_i);
del_a_2_R = del_vy_R;
del_a_3_R = del_vz_R;
del_a_4_R = d_i * del_vx_R / (2*a_i) + del_p_R / (2*a_i*a_i);
del_a_0_C = -d_i * del_vx_C / (2*a_i) + del_p_C / (2*a_i*a_i);
del_a_1_C = del_d_C - del_p_C / (a_i*a_i);
del_a_2_C = del_vy_C;
del_a_3_C = del_vz_C;
del_a_4_C = d_i * del_vx_C / (2*a_i) + del_p_C / (2*a_i*a_i);
del_a_0_G = -d_i * del_vx_G / (2*a_i) + del_p_G / (2*a_i*a_i);
del_a_1_G = del_d_G - del_p_G / (a_i*a_i);
del_a_2_G = del_vy_G;
del_a_3_G = del_vz_G;
del_a_4_G = d_i * del_vx_G / (2*a_i) + del_p_G / (2*a_i*a_i);
// Apply monotonicity constraints to the differences in the characteristic variables
del_a_0_m = del_a_1_m = del_a_2_m = del_a_3_m = del_a_4_m = 0.0;
if (del_a_0_L*del_a_0_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_0_L), fabs(del_a_0_R));
lim_slope_b = fmin(fabs(del_a_0_C), fabs(del_a_0_G));
del_a_0_m = sgn_CUDA(del_a_0_C) * fmin(2.0*lim_slope_a, lim_slope_b);
}
if (del_a_1_L*del_a_1_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_1_L), fabs(del_a_1_R));
lim_slope_b = fmin(fabs(del_a_1_C), fabs(del_a_1_G));
del_a_1_m = sgn_CUDA(del_a_1_C) * fmin(2.0*lim_slope_a, lim_slope_b);
}
if (del_a_2_L*del_a_2_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_2_L), fabs(del_a_2_R));
lim_slope_b = fmin(fabs(del_a_2_C), fabs(del_a_2_G));
del_a_2_m = sgn_CUDA(del_a_2_C) * fmin(2.0*lim_slope_a, lim_slope_b);
}
if (del_a_3_L*del_a_3_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_3_L), fabs(del_a_3_R));
lim_slope_b = fmin(fabs(del_a_3_C), fabs(del_a_3_G));
del_a_3_m = sgn_CUDA(del_a_3_C) * fmin(2.0*lim_slope_a, lim_slope_b);
}
if (del_a_4_L*del_a_4_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_4_L), fabs(del_a_4_R));
lim_slope_b = fmin(fabs(del_a_4_C), fabs(del_a_4_G));
del_a_4_m = sgn_CUDA(del_a_4_C) * fmin(2.0*lim_slope_a, lim_slope_b);
}
#ifdef DE
del_ge_m_i = 0.0;
if (del_ge_L*del_ge_R > 0.0) {
lim_slope_a = fmin(fabs(del_ge_L), fabs(del_ge_R));
lim_slope_b = fmin(fabs(del_ge_C), fabs(del_ge_G));
del_ge_m_i = sgn_CUDA(del_ge_C) * fmin(2.0*lim_slope_a, lim_slope_b);
}
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
del_scalar_m_i[i] = 0.0;
if (del_scalar_L[i]*del_scalar_R[i] > 0.0) {
lim_slope_a = fmin(fabs(del_scalar_L[i]), fabs(del_scalar_R[i]));
lim_slope_b = fmin(fabs(del_scalar_C[i]), fabs(del_scalar_G[i]));
del_scalar_m_i[i] = sgn_CUDA(del_scalar_C[i]) * fmin(2.0*lim_slope_a, lim_slope_b);
}
}
#endif
// Project the monotonized difference in the characteristic variables back onto the
// primative variables
// Stone Eqn 39
del_d_m_i = del_a_0_m + del_a_1_m + del_a_4_m;
del_vx_m_i = -a_i*del_a_0_m / d_i + a_i* del_a_4_m / d_i;
del_vy_m_i = del_a_2_m;
del_vz_m_i = del_a_3_m;
del_p_m_i = a_i*a_i*del_a_0_m + a_i*a_i*del_a_4_m;
// Compute the left and right interface values using the monotonized difference in the
// primative variables
d_R_imh = d_i - 0.5*del_d_m_i;
vx_R_imh = vx_i - 0.5*del_vx_m_i;
vy_R_imh = vy_i - 0.5*del_vy_m_i;
vz_R_imh = vz_i - 0.5*del_vz_m_i;
p_R_imh = p_i - 0.5*del_p_m_i;
d_L_iph = d_i + 0.5*del_d_m_i;
vx_L_iph = vx_i + 0.5*del_vx_m_i;
vy_L_iph = vy_i + 0.5*del_vy_m_i;
vz_L_iph = vz_i + 0.5*del_vz_m_i;
p_L_iph = p_i + 0.5*del_p_m_i;
#ifdef DE
ge_R_imh = ge_i - 0.5*del_ge_m_i;
ge_L_iph = ge_i + 0.5*del_ge_m_i;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_R_imh[i] = scalar_i[i] - 0.5*del_scalar_m_i[i];
scalar_L_iph[i] = scalar_i[i] + 0.5*del_scalar_m_i[i];
}
#endif
C = d_R_imh + d_L_iph;
d_R_imh = fmax( fmin(d_i, d_imo), d_R_imh );
d_R_imh = fmin( fmax(d_i, d_imo), d_R_imh );
d_L_iph = C - d_R_imh;
d_L_iph = fmax( fmin(d_i, d_ipo), d_L_iph );
d_L_iph = fmin( fmax(d_i, d_ipo), d_L_iph );
d_R_imh = C - d_L_iph;
C = vx_R_imh + vx_L_iph;
vx_R_imh = fmax( fmin(vx_i, vx_imo), vx_R_imh );
vx_R_imh = fmin( fmax(vx_i, vx_imo), vx_R_imh );
vx_L_iph = C - vx_R_imh;
vx_L_iph = fmax( fmin(vx_i, vx_ipo), vx_L_iph );
vx_L_iph = fmin( fmax(vx_i, vx_ipo), vx_L_iph );
vx_R_imh = C - vx_L_iph;
C = vy_R_imh + vy_L_iph;
vy_R_imh = fmax( fmin(vy_i, vy_imo), vy_R_imh );
vy_R_imh = fmin( fmax(vy_i, vy_imo), vy_R_imh );
vy_L_iph = C - vy_R_imh;
vy_L_iph = fmax( fmin(vy_i, vy_ipo), vy_L_iph );
vy_L_iph = fmin( fmax(vy_i, vy_ipo), vy_L_iph );
vy_R_imh = C - vy_L_iph;
C = vz_R_imh + vz_L_iph;
vz_R_imh = fmax( fmin(vz_i, vz_imo), vz_R_imh );
vz_R_imh = fmin( fmax(vz_i, vz_imo), vz_R_imh );
vz_L_iph = C - vz_R_imh;
vz_L_iph = fmax( fmin(vz_i, vz_ipo), vz_L_iph );
vz_L_iph = fmin( fmax(vz_i, vz_ipo), vz_L_iph );
vz_R_imh = C - vz_L_iph;
C = p_R_imh + p_L_iph;
p_R_imh = fmax( fmin(p_i, p_imo), p_R_imh );
p_R_imh = fmin( fmax(p_i, p_imo), p_R_imh );
p_L_iph = C - p_R_imh;
p_L_iph = fmax( fmin(p_i, p_ipo), p_L_iph );
p_L_iph = fmin( fmax(p_i, p_ipo), p_L_iph );
p_R_imh = C - p_L_iph;
del_d_m_i = d_L_iph - d_R_imh;
del_vx_m_i = vx_L_iph - vx_R_imh;
del_vy_m_i = vy_L_iph - vy_R_imh;
del_vz_m_i = vz_L_iph - vz_R_imh;
del_p_m_i = p_L_iph - p_R_imh;
#ifdef DE
C = ge_R_imh + ge_L_iph;
ge_R_imh = fmax( fmin(ge_i, ge_imo), ge_R_imh );
ge_R_imh = fmin( fmax(ge_i, ge_imo), ge_R_imh );
ge_L_iph = C - ge_R_imh;
ge_L_iph = fmax( fmin(ge_i, ge_ipo), ge_L_iph );
ge_L_iph = fmin( fmax(ge_i, ge_ipo), ge_L_iph );
ge_R_imh = C - ge_L_iph;
del_ge_m_i = ge_L_iph - ge_R_imh;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
C = scalar_R_imh[i] + scalar_L_iph[i];
scalar_R_imh[i] = fmax( fmin(scalar_i[i], scalar_imo[i]), scalar_R_imh[i] );
scalar_R_imh[i] = fmin( fmax(scalar_i[i], scalar_imo[i]), scalar_R_imh[i] );
scalar_L_iph[i] = C - scalar_R_imh[i];
scalar_L_iph[i] = fmax( fmin(scalar_i[i], scalar_ipo[i]), scalar_L_iph[i] );
scalar_L_iph[i] = fmin( fmax(scalar_i[i], scalar_ipo[i]), scalar_L_iph[i] );
scalar_R_imh[i] = C - scalar_L_iph[i];
del_scalar_m_i[i] = scalar_L_iph[i] - scalar_R_imh[i];
}
#endif
#ifdef CTU
// Integrate linear interpolation function over domain of dependence
// defined by max(min) eigenvalue
qx = -0.5*fmin(lambda_m, 0)*dtodx;
d_R_imh = d_R_imh + qx * del_d_m_i;
vx_R_imh = vx_R_imh + qx * del_vx_m_i;
vy_R_imh = vy_R_imh + qx * del_vy_m_i;
vz_R_imh = vz_R_imh + qx * del_vz_m_i;
p_R_imh = p_R_imh + qx * del_p_m_i;
qx = 0.5*fmax(lambda_p, 0)*dtodx;
d_L_iph = d_L_iph - qx * del_d_m_i;
vx_L_iph = vx_L_iph - qx * del_vx_m_i;
vy_L_iph = vy_L_iph - qx * del_vy_m_i;
vz_L_iph = vz_L_iph - qx * del_vz_m_i;
p_L_iph = p_L_iph - qx * del_p_m_i;
#ifdef DE
ge_R_imh = ge_R_imh + qx * del_ge_m_i;
ge_L_iph = ge_L_iph - qx * del_ge_m_i;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_R_imh[i] = scalar_R_imh[i] + qx * del_scalar_m_i[i];
scalar_L_iph[i] = scalar_L_iph[i] - qx * del_scalar_m_i[i];
}
#endif
// Perform the characteristic tracing
// Stone Eqns 42 & 43
// left-hand interface value, i+1/2
sum_0 = sum_1 = sum_2 = sum_3 = sum_4 = 0;
#ifdef DE
sum_ge = 0;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
sum_scalar[i] = 0.0;
}
#endif
if (lambda_m >= 0)
{
lamdiff = lambda_p - lambda_m;
sum_0 += lamdiff * (-d_i*del_vx_m_i/(2*a_i) + del_p_m_i/(2*a_i*a_i));
sum_1 += lamdiff * (del_vx_m_i/2.0 - del_p_m_i/(2*a_i*d_i));
sum_4 += lamdiff * (-d_i*del_vx_m_i*a_i/2.0 + del_p_m_i/2.0);
}
if (lambda_0 >= 0)
{
lamdiff = lambda_p - lambda_0;
sum_0 += lamdiff * (del_d_m_i - del_p_m_i/(a_i*a_i));
sum_2 += lamdiff * del_vy_m_i;
sum_3 += lamdiff * del_vz_m_i;
#ifdef DE
sum_ge += lamdiff * del_ge_m_i;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
sum_scalar[i] += lamdiff * del_scalar_m_i[i];
}
#endif
}
if (lambda_p >= 0)
{
lamdiff = lambda_p - lambda_p;
sum_0 += lamdiff * (d_i*del_vx_m_i/(2*a_i) + del_p_m_i/(2*a_i*a_i));
sum_1 += lamdiff * (del_vx_m_i/2.0 + del_p_m_i/(2*a_i*d_i));
sum_4 += lamdiff * (d_i*del_vx_m_i*a_i/2.0 + del_p_m_i/2.0);
}
// add the corrections to the initial guesses for the interface values
d_L_iph += 0.5*dtodx*sum_0;
vx_L_iph += 0.5*dtodx*sum_1;
vy_L_iph += 0.5*dtodx*sum_2;
vz_L_iph += 0.5*dtodx*sum_3;
p_L_iph += 0.5*dtodx*sum_4;
#ifdef DE
ge_L_iph += 0.5*dtodx*sum_ge;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_L_iph[i] += 0.5*dtodx*sum_scalar[i];
}
#endif
// right-hand interface value, i-1/2
sum_0 = sum_1 = sum_2 = sum_3 = sum_4 = 0;
#ifdef DE
sum_ge = 0;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
sum_scalar[i] = 0;
}
#endif
if (lambda_m <= 0)
{
lamdiff = lambda_m - lambda_m;
sum_0 += lamdiff * (-d_i*del_vx_m_i/(2*a_i) + del_p_m_i/(2*a_i*a_i));
sum_1 += lamdiff * (del_vx_m_i/2.0 - del_p_m_i/(2*a_i*d_i));
sum_4 += lamdiff * (-d_i*del_vx_m_i*a_i/2.0 + del_p_m_i/2.0);
}
if (lambda_0 <= 0)
{
lamdiff = lambda_m - lambda_0;
sum_0 += lamdiff * (del_d_m_i - del_p_m_i/(a_i*a_i));
sum_2 += lamdiff * del_vy_m_i;
sum_3 += lamdiff * del_vz_m_i;
#ifdef DE
sum_ge += lamdiff * del_ge_m_i;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
sum_scalar[i] += lamdiff * del_scalar_m_i[i];
}
#endif
}
if (lambda_p <= 0)
{
lamdiff = lambda_m - lambda_p;
sum_0 += lamdiff * (d_i*del_vx_m_i/(2*a_i) + del_p_m_i/(2*a_i*a_i));
sum_1 += lamdiff * (del_vx_m_i/2.0 + del_p_m_i/(2*a_i*d_i));
sum_4 += lamdiff * (d_i*del_vx_m_i*a_i/2.0 + del_p_m_i/2.0);
}
// add the corrections
d_R_imh += 0.5*dtodx*sum_0;
vx_R_imh += 0.5*dtodx*sum_1;
vy_R_imh += 0.5*dtodx*sum_2;
vz_R_imh += 0.5*dtodx*sum_3;
p_R_imh += 0.5*dtodx*sum_4;
#ifdef DE
ge_R_imh += 0.5*dtodx*sum_ge;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_R_imh[i] += 0.5*dtodx*sum_scalar[i];
}
#endif
#endif //CTU
// apply minimum constraints
d_R_imh = fmax(d_R_imh, (Real) TINY_NUMBER);
d_L_iph = fmax(d_L_iph, (Real) TINY_NUMBER);
p_R_imh = fmax(p_R_imh, (Real) TINY_NUMBER);
p_L_iph = fmax(p_L_iph, (Real) TINY_NUMBER);
// Convert the left and right states in the primitive to the conserved variables
// send final values back from kernel
// bounds_R refers to the right side of the i-1/2 interface
if (dir == 0) id = xid-1 + yid*nx + zid*nx*ny;
if (dir == 1) id = xid + (yid-1)*nx + zid*nx*ny;
if (dir == 2) id = xid + yid*nx + (zid-1)*nx*ny;
dev_bounds_R[ id] = d_R_imh;
dev_bounds_R[o1*n_cells + id] = d_R_imh*vx_R_imh;
dev_bounds_R[o2*n_cells + id] = d_R_imh*vy_R_imh;
dev_bounds_R[o3*n_cells + id] = d_R_imh*vz_R_imh;
dev_bounds_R[4*n_cells + id] = (p_R_imh/(gamma-1.0)) + 0.5*d_R_imh*(vx_R_imh*vx_R_imh + vy_R_imh*vy_R_imh + vz_R_imh*vz_R_imh);
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
dev_bounds_R[(5+i)*n_cells + id] = d_R_imh*scalar_R_imh[i];
}
#endif
#ifdef DE
dev_bounds_R[(n_fields-1)*n_cells + id] = d_R_imh*ge_R_imh;
#endif
// bounds_L refers to the left side of the i+1/2 interface
id = xid + yid*nx + zid*nx*ny;
dev_bounds_L[ id] = d_L_iph;
dev_bounds_L[o1*n_cells + id] = d_L_iph*vx_L_iph;
dev_bounds_L[o2*n_cells + id] = d_L_iph*vy_L_iph;
dev_bounds_L[o3*n_cells + id] = d_L_iph*vz_L_iph;
dev_bounds_L[4*n_cells + id] = (p_L_iph/(gamma-1.0)) + 0.5*d_L_iph*(vx_L_iph*vx_L_iph + vy_L_iph*vy_L_iph + vz_L_iph*vz_L_iph);
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
dev_bounds_L[(5+i)*n_cells + id] = d_L_iph*scalar_L_iph[i];
}
#endif
#ifdef DE
dev_bounds_L[(n_fields-1)*n_cells + id] = d_L_iph*ge_L_iph;
#endif
}
}
#endif //PLMC
#endif //CUDA
| b4926150be92ff9242e9e47cfa3306e2abd70c8d.cu | /*! \file plmc_cuda.cu
* \brief Definitions of the piecewise linear reconstruction functions with
limiting applied in the characteristic variables, as decribed
in Stone et al., 2008. */
#ifdef CUDA
#ifdef PLMC
#include<cuda.h>
#include<math.h>
#include"global.h"
#include"global_cuda.h"
#include"plmc_cuda.h"
/*! \fn __global__ void PLMC_cuda(Real *dev_conserved, Real *dev_bounds_L, Real *dev_bounds_R, int nx, int ny, int nz, int n_ghost, Real dx, Real dt, Real gamma, int dir)
* \brief When passed a stencil of conserved variables, returns the left and right
boundary values for the interface calculated using plm. */
__global__ void PLMC_cuda(Real *dev_conserved, Real *dev_bounds_L, Real *dev_bounds_R, int nx, int ny, int nz, int n_ghost, Real dx, Real dt, Real gamma, int dir, int n_fields)
{
int n_cells = nx*ny*nz;
int o1, o2, o3;
if (dir == 0) {
o1 = 1; o2 = 2; o3 = 3;
}
if (dir == 1) {
o1 = 2; o2 = 3; o3 = 1;
}
if (dir == 2) {
o1 = 3; o2 = 1; o3 = 2;
}
// declare primative variables for each stencil
// these will be placed into registers for each thread
Real d_i, vx_i, vy_i, vz_i, p_i;
Real d_imo, vx_imo, vy_imo, vz_imo, p_imo;
Real d_ipo, vx_ipo, vy_ipo, vz_ipo, p_ipo;
// declare other variables to be used
Real a_i;
Real del_d_L, del_vx_L, del_vy_L, del_vz_L, del_p_L;
Real del_d_R, del_vx_R, del_vy_R, del_vz_R, del_p_R;
Real del_d_C, del_vx_C, del_vy_C, del_vz_C, del_p_C;
Real del_d_G, del_vx_G, del_vy_G, del_vz_G, del_p_G;
Real del_a_0_L, del_a_1_L, del_a_2_L, del_a_3_L, del_a_4_L;
Real del_a_0_R, del_a_1_R, del_a_2_R, del_a_3_R, del_a_4_R;
Real del_a_0_C, del_a_1_C, del_a_2_C, del_a_3_C, del_a_4_C;
Real del_a_0_G, del_a_1_G, del_a_2_G, del_a_3_G, del_a_4_G;
Real del_a_0_m, del_a_1_m, del_a_2_m, del_a_3_m, del_a_4_m;
Real lim_slope_a, lim_slope_b;
Real del_d_m_i, del_vx_m_i, del_vy_m_i, del_vz_m_i, del_p_m_i;
Real d_L_iph, vx_L_iph, vy_L_iph, vz_L_iph, p_L_iph;
Real d_R_imh, vx_R_imh, vy_R_imh, vz_R_imh, p_R_imh;
Real C;
#ifndef VL
Real dtodx = dt/dx;
Real lambda_m, lambda_0, lambda_p;
Real qx;
Real lamdiff;
Real sum_0, sum_1, sum_2, sum_3, sum_4;
#endif //CTU
#ifdef DE
Real ge_i, ge_imo, ge_ipo;
Real del_ge_L, del_ge_R, del_ge_C, del_ge_G;
Real del_ge_m_i;
Real ge_L_iph, ge_R_imh;
#ifndef VL
Real sum_ge;
#endif //CTU
#endif
#ifdef SCALAR
Real scalar_i[NSCALARS], scalar_imo[NSCALARS], scalar_ipo[NSCALARS];
Real del_scalar_L[NSCALARS], del_scalar_R[NSCALARS], del_scalar_C[NSCALARS], del_scalar_G[NSCALARS];
Real del_scalar_m_i[NSCALARS];
Real scalar_L_iph[NSCALARS], scalar_R_imh[NSCALARS];
#ifndef VL
Real sum_scalar[NSCALARS];
#endif //CTU
#endif
// get a thread ID
int blockId = blockIdx.x + blockIdx.y*gridDim.x;
int tid = threadIdx.x + blockId*blockDim.x;
int id;
int zid = tid / (nx*ny);
int yid = (tid - zid*nx*ny) / nx;
int xid = tid - zid*nx*ny - yid*nx;
int xs, xe, ys, ye, zs, ze;
if (dir == 0) {
xs = 1; xe = nx-2;
ys = 0; ye = ny;
zs = 0; ze = nz;
}
if (dir == 1) {
xs = 0; xe = nx;
ys = 1; ye = ny-2;
zs = 0; ze = nz;
}
if (dir == 2) {
xs = 0; xe = nx;
ys = 0; ye = ny;
zs = 1; ze = nz-2;
}
if (xid >= xs && xid < xe && yid >= ys && yid < ye && zid >= zs && zid < ze)
{
// load the 3-cell stencil into registers
// cell i
id = xid + yid*nx + zid*nx*ny;
d_i = dev_conserved[ id];
vx_i = dev_conserved[o1*n_cells + id] / d_i;
vy_i = dev_conserved[o2*n_cells + id] / d_i;
vz_i = dev_conserved[o3*n_cells + id] / d_i;
p_i = (dev_conserved[4*n_cells + id] - 0.5*d_i*(vx_i*vx_i + vy_i*vy_i + vz_i*vz_i)) * (gamma - 1.0);
p_i = fmax(p_i, (Real) TINY_NUMBER);
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_i[i] = dev_conserved[(5+i)*n_cells + id] / d_i;
}
#endif
#ifdef DE
ge_i = dev_conserved[(n_fields-1)*n_cells + id] / d_i;
#endif
// cell i-1
if (dir == 0) id = xid-1 + yid*nx + zid*nx*ny;
if (dir == 1) id = xid + (yid-1)*nx + zid*nx*ny;
if (dir == 2) id = xid + yid*nx + (zid-1)*nx*ny;
d_imo = dev_conserved[ id];
vx_imo = dev_conserved[o1*n_cells + id] / d_imo;
vy_imo = dev_conserved[o2*n_cells + id] / d_imo;
vz_imo = dev_conserved[o3*n_cells + id] / d_imo;
p_imo = (dev_conserved[4*n_cells + id] - 0.5*d_imo*(vx_imo*vx_imo + vy_imo*vy_imo + vz_imo*vz_imo)) * (gamma - 1.0);
p_imo = fmax(p_imo, (Real) TINY_NUMBER);
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_imo[i] = dev_conserved[(5+i)*n_cells + id] / d_imo;
}
#endif
#ifdef DE
ge_imo = dev_conserved[(n_fields-1)*n_cells + id] / d_imo;
#endif
// cell i+1
if (dir == 0) id = xid+1 + yid*nx + zid*nx*ny;
if (dir == 1) id = xid + (yid+1)*nx + zid*nx*ny;
if (dir == 2) id = xid + yid*nx + (zid+1)*nx*ny;
d_ipo = dev_conserved[ id];
vx_ipo = dev_conserved[o1*n_cells + id] / d_ipo;
vy_ipo = dev_conserved[o2*n_cells + id] / d_ipo;
vz_ipo = dev_conserved[o3*n_cells + id] / d_ipo;
p_ipo = (dev_conserved[4*n_cells + id] - 0.5*d_ipo*(vx_ipo*vx_ipo + vy_ipo*vy_ipo + vz_ipo*vz_ipo)) * (gamma - 1.0);
p_ipo = fmax(p_ipo, (Real) TINY_NUMBER);
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_ipo[i] = dev_conserved[(5+i)*n_cells + id] / d_ipo;
}
#endif
#ifdef DE
ge_ipo = dev_conserved[(n_fields-1)*n_cells + id] / d_ipo;
#endif
// calculate the adiabatic sound speed in cell i
a_i = sqrt(gamma*p_i/d_i);
// Compute the eigenvalues of the linearized equations in the
// primative variables using the cell-centered primative variables
#ifdef CTU
lambda_m = vx_i-a_i;
lambda_0 = vx_i;
lambda_p = vx_i+a_i;
#endif
// Compute the left, right, centered, and van Leer differences of the primative variables
// Note that here L and R refer to locations relative to the cell center
// left
del_d_L = d_i - d_imo;
del_vx_L = vx_i - vx_imo;
del_vy_L = vy_i - vy_imo;
del_vz_L = vz_i - vz_imo;
del_p_L = p_i - p_imo;
// right
del_d_R = d_ipo - d_i;
del_vx_R = vx_ipo - vx_i;
del_vy_R = vy_ipo - vy_i;
del_vz_R = vz_ipo - vz_i;
del_p_R = p_ipo - p_i;
// centered
del_d_C = 0.5*(d_ipo - d_imo);
del_vx_C = 0.5*(vx_ipo - vx_imo);
del_vy_C = 0.5*(vy_ipo - vy_imo);
del_vz_C = 0.5*(vz_ipo - vz_imo);
del_p_C = 0.5*(p_ipo - p_imo);
// Van Leer
if (del_d_L*del_d_R > 0.0) { del_d_G = 2.0*del_d_L*del_d_R / (del_d_L+del_d_R); }
else { del_d_G = 0.0; }
if (del_vx_L*del_vx_R > 0.0) { del_vx_G = 2.0*del_vx_L*del_vx_R / (del_vx_L+del_vx_R); }
else { del_vx_G = 0.0; }
if (del_vy_L*del_vy_R > 0.0) { del_vy_G = 2.0*del_vy_L*del_vy_R / (del_vy_L+del_vy_R); }
else { del_vy_G = 0.0; }
if (del_vz_L*del_vz_R > 0.0) { del_vz_G = 2.0*del_vz_L*del_vz_R / (del_vz_L+del_vz_R); }
else { del_vz_G = 0.0; }
if (del_p_L*del_p_R > 0.0) { del_p_G = 2.0*del_p_L*del_p_R / (del_p_L+del_p_R); }
else { del_p_G = 0.0; }
#ifdef DE
del_ge_L = ge_i - ge_imo;
del_ge_R = ge_ipo - ge_i;
del_ge_C = 0.5*(ge_ipo - ge_imo);
if (del_ge_L*del_ge_R > 0.0) { del_ge_G = 2.0*del_ge_L*del_ge_R / (del_ge_L+del_ge_R); }
else { del_ge_G = 0.0; }
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
del_scalar_L[i] = scalar_i[i] - scalar_imo[i];
del_scalar_R[i] = scalar_ipo[i] - scalar_i[i];
del_scalar_C[i] = 0.5*(scalar_ipo[i] - scalar_imo[i]);
if (del_scalar_L[i]*del_scalar_R[i] > 0.0) { del_scalar_G[i] = 2.0*del_scalar_L[i]*del_scalar_R[i] / (del_scalar_L[i]+del_scalar_R[i]); }
else { del_scalar_G[i] = 0.0; }
}
#endif
// Project the left, right, centered and van Leer differences onto the characteristic variables
// Stone Eqn 37 (del_a are differences in characteristic variables, see Stone for notation)
// Use the eigenvectors given in Stone 2008, Appendix A
del_a_0_L = -d_i * del_vx_L / (2*a_i) + del_p_L / (2*a_i*a_i);
del_a_1_L = del_d_L - del_p_L / (a_i*a_i);
del_a_2_L = del_vy_L;
del_a_3_L = del_vz_L;
del_a_4_L = d_i * del_vx_L / (2*a_i) + del_p_L / (2*a_i*a_i);
del_a_0_R = -d_i * del_vx_R / (2*a_i) + del_p_R / (2*a_i*a_i);
del_a_1_R = del_d_R - del_p_R / (a_i*a_i);
del_a_2_R = del_vy_R;
del_a_3_R = del_vz_R;
del_a_4_R = d_i * del_vx_R / (2*a_i) + del_p_R / (2*a_i*a_i);
del_a_0_C = -d_i * del_vx_C / (2*a_i) + del_p_C / (2*a_i*a_i);
del_a_1_C = del_d_C - del_p_C / (a_i*a_i);
del_a_2_C = del_vy_C;
del_a_3_C = del_vz_C;
del_a_4_C = d_i * del_vx_C / (2*a_i) + del_p_C / (2*a_i*a_i);
del_a_0_G = -d_i * del_vx_G / (2*a_i) + del_p_G / (2*a_i*a_i);
del_a_1_G = del_d_G - del_p_G / (a_i*a_i);
del_a_2_G = del_vy_G;
del_a_3_G = del_vz_G;
del_a_4_G = d_i * del_vx_G / (2*a_i) + del_p_G / (2*a_i*a_i);
// Apply monotonicity constraints to the differences in the characteristic variables
del_a_0_m = del_a_1_m = del_a_2_m = del_a_3_m = del_a_4_m = 0.0;
if (del_a_0_L*del_a_0_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_0_L), fabs(del_a_0_R));
lim_slope_b = fmin(fabs(del_a_0_C), fabs(del_a_0_G));
del_a_0_m = sgn_CUDA(del_a_0_C) * fmin(2.0*lim_slope_a, lim_slope_b);
}
if (del_a_1_L*del_a_1_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_1_L), fabs(del_a_1_R));
lim_slope_b = fmin(fabs(del_a_1_C), fabs(del_a_1_G));
del_a_1_m = sgn_CUDA(del_a_1_C) * fmin(2.0*lim_slope_a, lim_slope_b);
}
if (del_a_2_L*del_a_2_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_2_L), fabs(del_a_2_R));
lim_slope_b = fmin(fabs(del_a_2_C), fabs(del_a_2_G));
del_a_2_m = sgn_CUDA(del_a_2_C) * fmin(2.0*lim_slope_a, lim_slope_b);
}
if (del_a_3_L*del_a_3_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_3_L), fabs(del_a_3_R));
lim_slope_b = fmin(fabs(del_a_3_C), fabs(del_a_3_G));
del_a_3_m = sgn_CUDA(del_a_3_C) * fmin(2.0*lim_slope_a, lim_slope_b);
}
if (del_a_4_L*del_a_4_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_4_L), fabs(del_a_4_R));
lim_slope_b = fmin(fabs(del_a_4_C), fabs(del_a_4_G));
del_a_4_m = sgn_CUDA(del_a_4_C) * fmin(2.0*lim_slope_a, lim_slope_b);
}
#ifdef DE
del_ge_m_i = 0.0;
if (del_ge_L*del_ge_R > 0.0) {
lim_slope_a = fmin(fabs(del_ge_L), fabs(del_ge_R));
lim_slope_b = fmin(fabs(del_ge_C), fabs(del_ge_G));
del_ge_m_i = sgn_CUDA(del_ge_C) * fmin(2.0*lim_slope_a, lim_slope_b);
}
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
del_scalar_m_i[i] = 0.0;
if (del_scalar_L[i]*del_scalar_R[i] > 0.0) {
lim_slope_a = fmin(fabs(del_scalar_L[i]), fabs(del_scalar_R[i]));
lim_slope_b = fmin(fabs(del_scalar_C[i]), fabs(del_scalar_G[i]));
del_scalar_m_i[i] = sgn_CUDA(del_scalar_C[i]) * fmin(2.0*lim_slope_a, lim_slope_b);
}
}
#endif
// Project the monotonized difference in the characteristic variables back onto the
// primative variables
// Stone Eqn 39
del_d_m_i = del_a_0_m + del_a_1_m + del_a_4_m;
del_vx_m_i = -a_i*del_a_0_m / d_i + a_i* del_a_4_m / d_i;
del_vy_m_i = del_a_2_m;
del_vz_m_i = del_a_3_m;
del_p_m_i = a_i*a_i*del_a_0_m + a_i*a_i*del_a_4_m;
// Compute the left and right interface values using the monotonized difference in the
// primative variables
d_R_imh = d_i - 0.5*del_d_m_i;
vx_R_imh = vx_i - 0.5*del_vx_m_i;
vy_R_imh = vy_i - 0.5*del_vy_m_i;
vz_R_imh = vz_i - 0.5*del_vz_m_i;
p_R_imh = p_i - 0.5*del_p_m_i;
d_L_iph = d_i + 0.5*del_d_m_i;
vx_L_iph = vx_i + 0.5*del_vx_m_i;
vy_L_iph = vy_i + 0.5*del_vy_m_i;
vz_L_iph = vz_i + 0.5*del_vz_m_i;
p_L_iph = p_i + 0.5*del_p_m_i;
#ifdef DE
ge_R_imh = ge_i - 0.5*del_ge_m_i;
ge_L_iph = ge_i + 0.5*del_ge_m_i;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_R_imh[i] = scalar_i[i] - 0.5*del_scalar_m_i[i];
scalar_L_iph[i] = scalar_i[i] + 0.5*del_scalar_m_i[i];
}
#endif
C = d_R_imh + d_L_iph;
d_R_imh = fmax( fmin(d_i, d_imo), d_R_imh );
d_R_imh = fmin( fmax(d_i, d_imo), d_R_imh );
d_L_iph = C - d_R_imh;
d_L_iph = fmax( fmin(d_i, d_ipo), d_L_iph );
d_L_iph = fmin( fmax(d_i, d_ipo), d_L_iph );
d_R_imh = C - d_L_iph;
C = vx_R_imh + vx_L_iph;
vx_R_imh = fmax( fmin(vx_i, vx_imo), vx_R_imh );
vx_R_imh = fmin( fmax(vx_i, vx_imo), vx_R_imh );
vx_L_iph = C - vx_R_imh;
vx_L_iph = fmax( fmin(vx_i, vx_ipo), vx_L_iph );
vx_L_iph = fmin( fmax(vx_i, vx_ipo), vx_L_iph );
vx_R_imh = C - vx_L_iph;
C = vy_R_imh + vy_L_iph;
vy_R_imh = fmax( fmin(vy_i, vy_imo), vy_R_imh );
vy_R_imh = fmin( fmax(vy_i, vy_imo), vy_R_imh );
vy_L_iph = C - vy_R_imh;
vy_L_iph = fmax( fmin(vy_i, vy_ipo), vy_L_iph );
vy_L_iph = fmin( fmax(vy_i, vy_ipo), vy_L_iph );
vy_R_imh = C - vy_L_iph;
C = vz_R_imh + vz_L_iph;
vz_R_imh = fmax( fmin(vz_i, vz_imo), vz_R_imh );
vz_R_imh = fmin( fmax(vz_i, vz_imo), vz_R_imh );
vz_L_iph = C - vz_R_imh;
vz_L_iph = fmax( fmin(vz_i, vz_ipo), vz_L_iph );
vz_L_iph = fmin( fmax(vz_i, vz_ipo), vz_L_iph );
vz_R_imh = C - vz_L_iph;
C = p_R_imh + p_L_iph;
p_R_imh = fmax( fmin(p_i, p_imo), p_R_imh );
p_R_imh = fmin( fmax(p_i, p_imo), p_R_imh );
p_L_iph = C - p_R_imh;
p_L_iph = fmax( fmin(p_i, p_ipo), p_L_iph );
p_L_iph = fmin( fmax(p_i, p_ipo), p_L_iph );
p_R_imh = C - p_L_iph;
del_d_m_i = d_L_iph - d_R_imh;
del_vx_m_i = vx_L_iph - vx_R_imh;
del_vy_m_i = vy_L_iph - vy_R_imh;
del_vz_m_i = vz_L_iph - vz_R_imh;
del_p_m_i = p_L_iph - p_R_imh;
#ifdef DE
C = ge_R_imh + ge_L_iph;
ge_R_imh = fmax( fmin(ge_i, ge_imo), ge_R_imh );
ge_R_imh = fmin( fmax(ge_i, ge_imo), ge_R_imh );
ge_L_iph = C - ge_R_imh;
ge_L_iph = fmax( fmin(ge_i, ge_ipo), ge_L_iph );
ge_L_iph = fmin( fmax(ge_i, ge_ipo), ge_L_iph );
ge_R_imh = C - ge_L_iph;
del_ge_m_i = ge_L_iph - ge_R_imh;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
C = scalar_R_imh[i] + scalar_L_iph[i];
scalar_R_imh[i] = fmax( fmin(scalar_i[i], scalar_imo[i]), scalar_R_imh[i] );
scalar_R_imh[i] = fmin( fmax(scalar_i[i], scalar_imo[i]), scalar_R_imh[i] );
scalar_L_iph[i] = C - scalar_R_imh[i];
scalar_L_iph[i] = fmax( fmin(scalar_i[i], scalar_ipo[i]), scalar_L_iph[i] );
scalar_L_iph[i] = fmin( fmax(scalar_i[i], scalar_ipo[i]), scalar_L_iph[i] );
scalar_R_imh[i] = C - scalar_L_iph[i];
del_scalar_m_i[i] = scalar_L_iph[i] - scalar_R_imh[i];
}
#endif
#ifdef CTU
// Integrate linear interpolation function over domain of dependence
// defined by max(min) eigenvalue
qx = -0.5*fmin(lambda_m, 0)*dtodx;
d_R_imh = d_R_imh + qx * del_d_m_i;
vx_R_imh = vx_R_imh + qx * del_vx_m_i;
vy_R_imh = vy_R_imh + qx * del_vy_m_i;
vz_R_imh = vz_R_imh + qx * del_vz_m_i;
p_R_imh = p_R_imh + qx * del_p_m_i;
qx = 0.5*fmax(lambda_p, 0)*dtodx;
d_L_iph = d_L_iph - qx * del_d_m_i;
vx_L_iph = vx_L_iph - qx * del_vx_m_i;
vy_L_iph = vy_L_iph - qx * del_vy_m_i;
vz_L_iph = vz_L_iph - qx * del_vz_m_i;
p_L_iph = p_L_iph - qx * del_p_m_i;
#ifdef DE
ge_R_imh = ge_R_imh + qx * del_ge_m_i;
ge_L_iph = ge_L_iph - qx * del_ge_m_i;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_R_imh[i] = scalar_R_imh[i] + qx * del_scalar_m_i[i];
scalar_L_iph[i] = scalar_L_iph[i] - qx * del_scalar_m_i[i];
}
#endif
// Perform the characteristic tracing
// Stone Eqns 42 & 43
// left-hand interface value, i+1/2
sum_0 = sum_1 = sum_2 = sum_3 = sum_4 = 0;
#ifdef DE
sum_ge = 0;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
sum_scalar[i] = 0.0;
}
#endif
if (lambda_m >= 0)
{
lamdiff = lambda_p - lambda_m;
sum_0 += lamdiff * (-d_i*del_vx_m_i/(2*a_i) + del_p_m_i/(2*a_i*a_i));
sum_1 += lamdiff * (del_vx_m_i/2.0 - del_p_m_i/(2*a_i*d_i));
sum_4 += lamdiff * (-d_i*del_vx_m_i*a_i/2.0 + del_p_m_i/2.0);
}
if (lambda_0 >= 0)
{
lamdiff = lambda_p - lambda_0;
sum_0 += lamdiff * (del_d_m_i - del_p_m_i/(a_i*a_i));
sum_2 += lamdiff * del_vy_m_i;
sum_3 += lamdiff * del_vz_m_i;
#ifdef DE
sum_ge += lamdiff * del_ge_m_i;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
sum_scalar[i] += lamdiff * del_scalar_m_i[i];
}
#endif
}
if (lambda_p >= 0)
{
lamdiff = lambda_p - lambda_p;
sum_0 += lamdiff * (d_i*del_vx_m_i/(2*a_i) + del_p_m_i/(2*a_i*a_i));
sum_1 += lamdiff * (del_vx_m_i/2.0 + del_p_m_i/(2*a_i*d_i));
sum_4 += lamdiff * (d_i*del_vx_m_i*a_i/2.0 + del_p_m_i/2.0);
}
// add the corrections to the initial guesses for the interface values
d_L_iph += 0.5*dtodx*sum_0;
vx_L_iph += 0.5*dtodx*sum_1;
vy_L_iph += 0.5*dtodx*sum_2;
vz_L_iph += 0.5*dtodx*sum_3;
p_L_iph += 0.5*dtodx*sum_4;
#ifdef DE
ge_L_iph += 0.5*dtodx*sum_ge;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_L_iph[i] += 0.5*dtodx*sum_scalar[i];
}
#endif
// right-hand interface value, i-1/2
sum_0 = sum_1 = sum_2 = sum_3 = sum_4 = 0;
#ifdef DE
sum_ge = 0;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
sum_scalar[i] = 0;
}
#endif
if (lambda_m <= 0)
{
lamdiff = lambda_m - lambda_m;
sum_0 += lamdiff * (-d_i*del_vx_m_i/(2*a_i) + del_p_m_i/(2*a_i*a_i));
sum_1 += lamdiff * (del_vx_m_i/2.0 - del_p_m_i/(2*a_i*d_i));
sum_4 += lamdiff * (-d_i*del_vx_m_i*a_i/2.0 + del_p_m_i/2.0);
}
if (lambda_0 <= 0)
{
lamdiff = lambda_m - lambda_0;
sum_0 += lamdiff * (del_d_m_i - del_p_m_i/(a_i*a_i));
sum_2 += lamdiff * del_vy_m_i;
sum_3 += lamdiff * del_vz_m_i;
#ifdef DE
sum_ge += lamdiff * del_ge_m_i;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
sum_scalar[i] += lamdiff * del_scalar_m_i[i];
}
#endif
}
if (lambda_p <= 0)
{
lamdiff = lambda_m - lambda_p;
sum_0 += lamdiff * (d_i*del_vx_m_i/(2*a_i) + del_p_m_i/(2*a_i*a_i));
sum_1 += lamdiff * (del_vx_m_i/2.0 + del_p_m_i/(2*a_i*d_i));
sum_4 += lamdiff * (d_i*del_vx_m_i*a_i/2.0 + del_p_m_i/2.0);
}
// add the corrections
d_R_imh += 0.5*dtodx*sum_0;
vx_R_imh += 0.5*dtodx*sum_1;
vy_R_imh += 0.5*dtodx*sum_2;
vz_R_imh += 0.5*dtodx*sum_3;
p_R_imh += 0.5*dtodx*sum_4;
#ifdef DE
ge_R_imh += 0.5*dtodx*sum_ge;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_R_imh[i] += 0.5*dtodx*sum_scalar[i];
}
#endif
#endif //CTU
// apply minimum constraints
d_R_imh = fmax(d_R_imh, (Real) TINY_NUMBER);
d_L_iph = fmax(d_L_iph, (Real) TINY_NUMBER);
p_R_imh = fmax(p_R_imh, (Real) TINY_NUMBER);
p_L_iph = fmax(p_L_iph, (Real) TINY_NUMBER);
// Convert the left and right states in the primitive to the conserved variables
// send final values back from kernel
// bounds_R refers to the right side of the i-1/2 interface
if (dir == 0) id = xid-1 + yid*nx + zid*nx*ny;
if (dir == 1) id = xid + (yid-1)*nx + zid*nx*ny;
if (dir == 2) id = xid + yid*nx + (zid-1)*nx*ny;
dev_bounds_R[ id] = d_R_imh;
dev_bounds_R[o1*n_cells + id] = d_R_imh*vx_R_imh;
dev_bounds_R[o2*n_cells + id] = d_R_imh*vy_R_imh;
dev_bounds_R[o3*n_cells + id] = d_R_imh*vz_R_imh;
dev_bounds_R[4*n_cells + id] = (p_R_imh/(gamma-1.0)) + 0.5*d_R_imh*(vx_R_imh*vx_R_imh + vy_R_imh*vy_R_imh + vz_R_imh*vz_R_imh);
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
dev_bounds_R[(5+i)*n_cells + id] = d_R_imh*scalar_R_imh[i];
}
#endif
#ifdef DE
dev_bounds_R[(n_fields-1)*n_cells + id] = d_R_imh*ge_R_imh;
#endif
// bounds_L refers to the left side of the i+1/2 interface
id = xid + yid*nx + zid*nx*ny;
dev_bounds_L[ id] = d_L_iph;
dev_bounds_L[o1*n_cells + id] = d_L_iph*vx_L_iph;
dev_bounds_L[o2*n_cells + id] = d_L_iph*vy_L_iph;
dev_bounds_L[o3*n_cells + id] = d_L_iph*vz_L_iph;
dev_bounds_L[4*n_cells + id] = (p_L_iph/(gamma-1.0)) + 0.5*d_L_iph*(vx_L_iph*vx_L_iph + vy_L_iph*vy_L_iph + vz_L_iph*vz_L_iph);
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
dev_bounds_L[(5+i)*n_cells + id] = d_L_iph*scalar_L_iph[i];
}
#endif
#ifdef DE
dev_bounds_L[(n_fields-1)*n_cells + id] = d_L_iph*ge_L_iph;
#endif
}
}
#endif //PLMC
#endif //CUDA
|
9e0abd315f69342088237634c4b2851f6a44a179.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2011, Alex Krizhevsky ([email protected])
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <cutil_inline.h>
#include <iostream>
#include <layer_kernels.cuh>
#include <layer.cuh>
#include <data.cuh>
#include <util.cuh>
#include <cudaconv2.cuh>
#include <matrix.h>
using namespace std;
/*
* =======================
* Layer
* =======================
*/
Layer::Layer(ConvNet* convNet, PyObject* paramsDict, bool trans) :
_convNet(convNet), _trans(trans) {
_name = pyDictGetString(paramsDict, "name");
_type = pyDictGetString(paramsDict, "type");
_numGradProducersNext = 0;
_foundGradConsumers = false;
_gradConsumer = pyDictGetInt(paramsDict, "gradConsumer");
_actsTarget = pyDictGetInt(paramsDict, "actsTarget");
_actsGradTarget = pyDictGetInt(paramsDict, "actsGradTarget");
_conserveMem = pyDictGetInt(paramsDict, "conserveMem");
_outputs = _actsTarget < 0 ? new NVMatrix() : NULL;
_actsGrad = _actsGradTarget < 0 ? new NVMatrix() : NULL;
}
void Layer::fpropNext(PASS_TYPE passType) {
for (int i = 0; i < _next.size(); i++) {
_next[i]->fprop(passType);
}
}
void Layer::truncBwdActs() {
// Only truncate actsGrad if I own it
if (_conserveMem && _actsGradTarget < 0) {
getActsGrad().truncate();
}
if (_conserveMem) {
getActs().truncate();
}
}
void Layer::fprop(PASS_TYPE passType) {
_rcvdFInputs += 1;
if (_rcvdFInputs == _prev.size()) {
NVMatrixV v;
for (int i = 0; i < _prev.size(); i++) {
v.push_back(&_prev[i]->getActs());
}
fprop(v, passType);
}
}
void Layer::fprop(NVMatrix& v, PASS_TYPE passType) {
NVMatrixV vl;
vl.push_back(&v);
fprop(vl, passType);
}
void Layer::fprop(NVMatrixV& v, PASS_TYPE passType) {
assert(v.size() == _prev.size());
_inputs.clear();
_inputs.insert(_inputs.begin(), v.begin(), v.end());
_outputs = _actsTarget < 0 ? _outputs : _inputs[_actsTarget];
_rcvdFInputs = _prev.size();
for (NVMatrixV::iterator it = v.begin(); it != v.end(); ++it) {
(*it)->transpose(_trans);
}
getActs().transpose(_trans);
// First do fprop on the input whose acts matrix I'm sharing, if any
if (_actsTarget >= 0) {
fpropActs(_actsTarget, 0, passType);
}
// Then add the rest of the inputs to that
for (int i = 0; i < _prev.size(); i++) {
if (i != _actsTarget) {
fpropActs(i, _actsTarget >= 0 || i > 0, passType);
}
}
fpropNext(passType);
}
void Layer::bprop(PASS_TYPE passType) {
if (_rcvdBInputs == _numGradProducersNext) {
_rcvdBInputs++; // avoid doing bprop computation twice
bprop(getActsGrad(), passType);
}
}
void Layer::bprop(NVMatrix& v, PASS_TYPE passType) {
v.transpose(_trans);
for (int i = 0; i < _prev.size(); i++) {
_prev[i]->getActs().transpose(_trans);
_prev[i]->getActsGrad().transpose(_trans);
}
getActs().transpose(_trans);
bpropCommon(v, passType);
if (isGradProducer()) {
// First propagate activity gradient to all layers whose activity
// gradient matrix I'm definitely not sharing.
for (int i = 0; i < _prev.size(); i++) {
if (_prev[i]->isGradConsumer() && _actsGradTarget != i) {
bpropActs(v, i, _prev[i]->getRcvdBInputs() > 0 ? 1 : 0, passType);
_prev[i]->incRcvdBInputs();
}
}
// Then propagate activity gradient to the layer whose activity gradient
// matrix I'm sharing, if any.
if (_actsGradTarget >= 0 && _prev[_actsGradTarget]->isGradConsumer()) {
bpropActs(v, _actsGradTarget, _prev[_actsGradTarget]->getRcvdBInputs() > 0 ? 1 : 0, passType);
_prev[_actsGradTarget]->incRcvdBInputs();
}
}
truncBwdActs();
if (isGradProducer()) {
for (int i = 0; i < _prev.size(); i++) {
if (_prev[i]->isGradConsumer()) {
_prev[i]->bprop(passType);
}
}
}
}
void Layer::reset() {
_rcvdFInputs = 0;
_rcvdBInputs = 0;
}
string& Layer::getName() {
return _name;
}
string& Layer::getType() {
return _type;
}
int Layer::getRcvdFInputs() {
return _rcvdFInputs;
}
int Layer::getRcvdBInputs() {
return _rcvdBInputs;
}
int Layer::incRcvdBInputs() {
return ++_rcvdBInputs;
}
void Layer::addNext(Layer* l) {
_next.push_back(l);
_numGradProducersNext += l->isGradProducer();
}
void Layer::addPrev(Layer* l) {
_prev.push_back(l);
}
void Layer::postInit() {
// _outputs = _actsTarget < 0 ? new NVMatrix() : &_prev[_actsTarget]->getActs();
_actsGrad = _actsGradTarget < 0 ? new NVMatrix() : &_prev[_actsGradTarget]->getActsGrad();
}
// Does this layer, or some layer below it, need the gradient
// for parameter updates?
// Only weight layers should be grad consumers themselves.
bool Layer::isGradConsumer() {
if (!_foundGradConsumers) {
for (int i = 0; i < _prev.size(); i++) {
_gradConsumer |= _prev[i]->isGradConsumer();
}
_foundGradConsumers = true;
}
return _gradConsumer;
}
// Does this layer produce gradient for layers below?
bool Layer::isGradProducer() {
return true;
}
vector<Layer*>& Layer::getPrev() {
return _prev;
}
vector<Layer*>& Layer::getNext() {
return _next;
}
NVMatrix& Layer::getActs() {
assert(_outputs != NULL);
return *_outputs;
}
NVMatrix& Layer::getActsGrad() {
assert(_actsGrad != NULL);
return *_actsGrad;
}
/*
* =======================
* NeuronLayer
* =======================
*/
NeuronLayer::NeuronLayer(ConvNet* convNet, PyObject* paramsDict)
: Layer(convNet, paramsDict, true) {
_neuron = &Neuron::makeNeuron(PyDict_GetItemString(paramsDict, "neuron"));
}
void NeuronLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
_neuron->computeInputGrad(v, _prev[0]->getActsGrad(), scaleTargets > 0);
}
void NeuronLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
_neuron->activate(*_inputs[0], getActs());
}
/*
* =======================
* WeightLayer
* =======================
*/
WeightLayer::WeightLayer(ConvNet* convNet, PyObject* paramsDict, bool trans, bool useGrad) :
Layer(convNet, paramsDict, trans) {
MatrixV& hWeights = *pyDictGetMatrixV(paramsDict, "weights");
MatrixV& hWeightsInc = *pyDictGetMatrixV(paramsDict, "weightsInc");
Matrix& hBiases = *pyDictGetMatrix(paramsDict, "biases");
Matrix& hBiasesInc = *pyDictGetMatrix(paramsDict, "biasesInc");
floatv& momW = *pyDictGetFloatV(paramsDict, "momW");
float momB = pyDictGetFloat(paramsDict, "momB");
floatv& epsW = *pyDictGetFloatV(paramsDict, "epsW");
float epsB = pyDictGetFloat(paramsDict, "epsB");
floatv& wc = *pyDictGetFloatV(paramsDict, "wc");
// Source layers for shared weights
intv& weightSourceLayerIndices = *pyDictGetIntV(paramsDict, "weightSourceLayerIndices");
// Weight matrix indices (inside the above source layers) for shared weights
intv& weightSourceMatrixIndices = *pyDictGetIntV(paramsDict, "weightSourceMatrixIndices");
for (int i = 0; i < weightSourceLayerIndices.size(); i++) {
int srcLayerIdx = weightSourceLayerIndices[i];
int matrixIdx = weightSourceMatrixIndices[i];
if (srcLayerIdx == convNet->getNumLayers()) { // Current layer
_weights.addWeights(*new Weights(_weights[matrixIdx], epsW[i]));
} else if (srcLayerIdx >= 0) {
WeightLayer& srcLayer = *static_cast<WeightLayer*>(&convNet->getLayer(srcLayerIdx));
Weights* srcWeights = &srcLayer.getWeights(matrixIdx);
_weights.addWeights(*new Weights(*srcWeights, epsW[i]));
} else {
_weights.addWeights(*new Weights(*hWeights[i], *hWeightsInc[i], epsW[i], wc[i], momW[i], useGrad));
}
}
_biases = new Weights(hBiases, hBiasesInc, epsB, 0, momB, true);
// Epsilons for finite-difference gradient checking operation
_wStep = 0.001;
_bStep = 0.002;
delete &weightSourceLayerIndices;
delete &weightSourceMatrixIndices;
delete &hWeights;
delete &hWeightsInc;
delete &momW;
delete &epsW;
delete &wc;
}
void WeightLayer::bpropCommon(NVMatrix& v, PASS_TYPE passType) {
if (_biases->getEps() > 0) {
bpropBiases(v, passType);
}
for (int i = 0; i < _weights.getSize(); i++) {
if (_weights[i].getEps() > 0) {
bpropWeights(v, i, passType);
// Increment its number of updates
_weights[i].incNumUpdates();
}
}
}
void WeightLayer::updateWeights() {
_weights.update();
_biases->update();
}
void WeightLayer::copyToCPU() {
_weights.copyToCPU();
_biases->copyToCPU();
}
void WeightLayer::copyToGPU() {
_weights.copyToGPU();
_biases->copyToGPU();
}
void WeightLayer::checkGradients() {
for (int i = 0; i < _weights.getSize(); i++) {
_convNet->checkGradient(_name + " weights[" + tostr(i) + "]", _wStep, _weights[i]);
}
_convNet->checkGradient(_name + " biases", _bStep, *_biases);
}
Weights& WeightLayer::getWeights(int idx) {
return _weights[idx];
}
/*
* =======================
* FCLayer
* =======================
*/
FCLayer::FCLayer(ConvNet* convNet, PyObject* paramsDict) : WeightLayer(convNet, paramsDict, true, false) {
_wStep = 0.1;
_bStep = 0.01;
}
void FCLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
getActs().addProduct(*_inputs[inpIdx], *_weights[inpIdx], scaleTargets, 1);
if (scaleTargets == 0) {
getActs().addVector(_biases->getW());
}
}
void FCLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& weights_T = _weights[inpIdx].getW().getTranspose();
_prev[inpIdx]->getActsGrad().addProduct(v, weights_T, scaleTargets, 1);
delete &weights_T;
}
void FCLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) {
int numCases = v.getNumRows();
float scaleBGrad = passType == PASS_GC ? 1 : _biases->getEps() / numCases;
_biases->getGrad().addSum(v, 0, 0, scaleBGrad);
}
void FCLayer::bpropWeights(NVMatrix& v, int inpIdx, PASS_TYPE passType) {
int numCases = v.getNumRows();
NVMatrix& prevActs_T = _prev[inpIdx]->getActs().getTranspose();
float scaleInc = (_weights[inpIdx].getNumUpdates() == 0 && passType != PASS_GC) * _weights[inpIdx].getMom();
float scaleGrad = passType == PASS_GC ? 1 : _weights[inpIdx].getEps() / numCases;
_weights[inpIdx].getInc().addProduct(prevActs_T, v, scaleInc, scaleGrad);
delete &prevActs_T;
}
/*
* =======================
* LocalLayer
* =======================
*/
LocalLayer::LocalLayer(ConvNet* convNet, PyObject* paramsDict, bool useGrad)
: WeightLayer(convNet, paramsDict, false, useGrad) {
_padding = pyDictGetIntV(paramsDict, "padding");
_stride = pyDictGetIntV(paramsDict, "stride");
_filterSize = pyDictGetIntV(paramsDict, "filterSize");
_channels = pyDictGetIntV(paramsDict, "channels");
_imgSize = pyDictGetIntV(paramsDict, "imgSize");
_numFilters = pyDictGetInt(paramsDict, "filters");
_groups = pyDictGetIntV(paramsDict, "groups");
_filterChannels = pyDictGetIntV(paramsDict, "filterChannels");
_randSparse = pyDictGetIntV(paramsDict, "randSparse");
_overSample = pyDictGetIntV(paramsDict, "overSample");
_filterPixels = pyDictGetIntV(paramsDict, "filterPixels");
_imgPixels = pyDictGetIntV(paramsDict, "imgPixels");
_modulesX = pyDictGetInt(paramsDict, "modulesX");
_modules = pyDictGetInt(paramsDict, "modules");
// It's a vector on the heap to be consistent with all the others...
_filterConns = new vector<FilterConns>();
PyObject* pyFilterConns = PyDict_GetItemString(paramsDict, "filterConns");
for (int i = 0; i < _randSparse->size(); i++) {
FilterConns fc;
if (_randSparse->at(i)) {
fc.hFilterConns = getIntA(PyList_GET_ITEM(pyFilterConns, i));
}
_filterConns->push_back(fc);
}
}
void LocalLayer::copyToGPU() {
WeightLayer::copyToGPU();
for (int i = 0; i < _prev.size(); i++) {
if (_randSparse->at(i)) { // Copy to GPU vector that describes sparse random connectivity
hipMalloc(&_filterConns->at(i).dFilterConns, sizeof(int) * _groups->at(i) * _filterChannels->at(i));
hipMemcpy(_filterConns->at(i).dFilterConns, _filterConns->at(i).hFilterConns,
sizeof(int) * _groups->at(i) * _filterChannels->at(i), hipMemcpyHostToDevice);
cutilCheckMsg("hipMemcpy: failed");
}
}
}
/*
* =======================
* ConvLayer
* =======================
*/
ConvLayer::ConvLayer(ConvNet* convNet, PyObject* paramsDict) : LocalLayer(convNet, paramsDict, true) {
_partialSum = pyDictGetInt(paramsDict, "partialSum");
_sharedBiases = pyDictGetInt(paramsDict, "sharedBiases");
}
void ConvLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (_randSparse->at(inpIdx)) {
convFilterActsSparse(*_inputs[inpIdx], *_weights[inpIdx], getActs(), _filterConns->at(inpIdx).dFilterConns,
_imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx),
_filterChannels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
} else {
convFilterActs(*_inputs[inpIdx], *_weights[inpIdx], getActs(), _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
}
if (scaleTargets == 0) {
if (_sharedBiases) {
getActs().reshape(_numFilters, getActs().getNumElements() / _numFilters);
getActs().addVector(_biases->getW());
getActs().reshape(_numFilters * _modules, getActs().getNumElements() / (_numFilters * _modules));
} else {
getActs().addVector(_biases->getW());
}
}
}
void ConvLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) {
int numCases = v.getNumCols();
float scaleBGrad = passType == PASS_GC ? 1 : _biases->getEps() / numCases;
if (_sharedBiases) {
v.reshape(_numFilters, v.getNumElements() / _numFilters);
_biases->getGrad().addSum(v, 1, 0, scaleBGrad);
v.reshape(_numFilters * _modules, v.getNumElements() / (_numFilters * _modules));
} else {
_biases->getGrad().addSum(v, 1, 0, scaleBGrad);
}
}
void ConvLayer::bpropWeights(NVMatrix& v, int inpIdx, PASS_TYPE passType) {
int numCases = v.getNumCols();
NVMatrix& tgt = _partialSum > 0 ? _weightGradTmp : _weights[inpIdx].getGrad();
float scaleWGrad = passType == PASS_GC ? 1 : _weights[inpIdx].getEps() / numCases;
float scaleTargets = _weights[inpIdx].getNumUpdates() > 0 && _partialSum == 0; // ? 1 : 0;
if (_randSparse->at(inpIdx)) {
convWeightActsSparse(_prev[inpIdx]->getActs(), v, tgt, _filterConns->at(inpIdx).dFilterConns, _imgSize->at(inpIdx), _modulesX, _modulesX,
_filterSize->at(inpIdx), _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx),
_filterChannels->at(inpIdx), _groups->at(inpIdx), _partialSum, scaleTargets, scaleWGrad);
} else {
convWeightActs(_prev[inpIdx]->getActs(), v, tgt, _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), _partialSum, scaleTargets, scaleWGrad);
}
if (_partialSum > 0) {
scaleTargets = _weights[inpIdx].getNumUpdates() > 0;
_weightGradTmp.reshape(_modules / _partialSum, _filterChannels->at(inpIdx) * _filterPixels->at(inpIdx) * _numFilters);
_weights[inpIdx].getGrad().addSum(_weightGradTmp, 0, scaleTargets, 1);
_weights[inpIdx].getGrad().reshape(_filterChannels->at(inpIdx) * _filterPixels->at(inpIdx), _numFilters);
}
}
void ConvLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (_randSparse->at(inpIdx)) {
NVMatrix& tgt = _overSample->at(inpIdx) > 1 ? _actGradTmp : _prev[inpIdx]->getActsGrad();
convImgActsSparse(v, *_weights[inpIdx], tgt, _filterConns->at(inpIdx).dFilterConns,
_imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX, _padding->at(inpIdx), _stride->at(inpIdx),
_channels->at(inpIdx), _filterChannels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
if (_overSample->at(inpIdx) > 1) {
_actGradTmp.reshape(_overSample->at(inpIdx), _actGradTmp.getNumElements() / _overSample->at(inpIdx));
_actGradTmp.sum(0, _prev[inpIdx]->getActsGrad());
_prev[inpIdx]->getActsGrad().reshape(_prev[inpIdx]->getActsGrad().getNumElements() / v.getNumCols(), v.getNumCols());
}
} else {
convImgActs(v, *_weights[inpIdx], _prev[inpIdx]->getActsGrad(), _imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX,
_padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
}
}
void ConvLayer::truncBwdActs() {
LocalLayer::truncBwdActs();
if (_conserveMem) {
_weightGradTmp.truncate();
_actGradTmp.truncate();
}
}
/*
* =======================
* LocalUnsharedLayer
* =======================
*/
LocalUnsharedLayer::LocalUnsharedLayer(ConvNet* convNet, PyObject* paramsDict) : LocalLayer(convNet, paramsDict, false) {
}
void LocalUnsharedLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (_randSparse->at(inpIdx)) {
localFilterActsSparse(*_inputs[inpIdx], *_weights[inpIdx], getActs(), _filterConns->at(inpIdx).dFilterConns,
_imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx),
_filterChannels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
} else {
localFilterActs(*_inputs[inpIdx], *_weights[inpIdx], getActs(), _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
}
if (scaleTargets == 0) {
getActs().addVector(_biases->getW());
}
}
void LocalUnsharedLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) {
int numCases = v.getNumCols();
float scaleBGrad = passType == PASS_GC ? 1 : _biases->getEps() / numCases;
_biases->getGrad().addSum(v, 1, 0, scaleBGrad);
}
void LocalUnsharedLayer::bpropWeights(NVMatrix& v, int inpIdx, PASS_TYPE passType) {
int numCases = v.getNumCols();
float scaleInc = (passType != PASS_GC && _weights[inpIdx].getNumUpdates() == 0) * _weights[inpIdx].getMom(); // momentum
float scaleWGrad = passType == PASS_GC ? 1 : _weights[inpIdx].getEps() / numCases; // eps / numCases
if (_randSparse->at(inpIdx)) {
localWeightActsSparse(_prev[inpIdx]->getActs(), v, _weights[inpIdx].getInc(), _filterConns->at(inpIdx).dFilterConns,
_imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx), _stride->at(inpIdx),
_channels->at(inpIdx), _filterChannels->at(inpIdx), _groups->at(inpIdx), scaleInc, scaleWGrad);
} else {
localWeightActs(_prev[inpIdx]->getActs(), v, _weights[inpIdx].getInc(), _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx),
_padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleInc, scaleWGrad);
}
}
void LocalUnsharedLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (_randSparse->at(inpIdx)) {
localImgActsSparse(v, *_weights[inpIdx], _prev[inpIdx]->getActsGrad(), _filterConns->at(inpIdx).dFilterConns,
_imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx),
_filterChannels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
} else {
localImgActs(v, *_weights[inpIdx], _prev[inpIdx]->getActsGrad(),_imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX,
_padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
}
}
/*
* =======================
* SoftmaxLayer
* =======================
*/
SoftmaxLayer::SoftmaxLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, true) {
}
void SoftmaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& input = *_inputs[0];
NVMatrix& max = input.max(1);
input.addVector(max, -1, getActs());
getActs().apply(NVMatrixOps::Exp());
NVMatrix& sum = getActs().sum(1);
getActs().eltwiseDivideByVector(sum);
delete &max;
delete ∑
}
void SoftmaxLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 0);
bool doLogregGrad = _next.size() == 1 && _next[0]->getType() == "cost.logreg";
if (doLogregGrad) {
NVMatrix& labels = _next[0]->getPrev()[0]->getActs();
float gradCoeff = dynamic_cast<CostLayer*>(_next[0])->getCoeff();
computeLogregSoftmaxGrad(labels, getActs(), _prev[0]->getActsGrad(), scaleTargets == 1, gradCoeff);
} else {
computeSoftmaxGrad(getActs(), v, _prev[0]->getActsGrad(), scaleTargets == 1);
}
}
/*
* =======================
* EltwiseSumLayer
* =======================
*/
EltwiseSumLayer::EltwiseSumLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) {
_coeffs = pyDictGetFloatV(paramsDict, "coeffs");
}
void EltwiseSumLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (scaleTargets == 0) {
_inputs[inpIdx]->scale(_coeffs->at(inpIdx), getActs());
} else {
getActs().add(*_inputs[inpIdx], _coeffs->at(inpIdx));
}
}
void EltwiseSumLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (scaleTargets == 0 ) {
v.scale(_coeffs->at(inpIdx), _prev[inpIdx]->getActsGrad());
} else {
assert(&_prev[inpIdx]->getActsGrad() != &v);
_prev[inpIdx]->getActsGrad().add(v, scaleTargets, _coeffs->at(inpIdx));
}
}
/*
* =======================
* EltwiseMaxLayer
* =======================
*/
EltwiseMaxLayer::EltwiseMaxLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) {
}
void EltwiseMaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (inpIdx == 1) { // First input, do nothing
_inputs[inpIdx]->applyBinary(NVMatrixAggs::Max(), *_inputs[0], getActs());
} else if (inpIdx > 1) {
getActs().applyBinary(NVMatrixAggs::Max(), *_inputs[inpIdx]);
}
}
void EltwiseMaxLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
computeEltwiseMaxGrad(v, *_inputs[inpIdx], getActs(), _prev[inpIdx]->getActsGrad(), scaleTargets != 0);
}
/*
* =======================
* DataLayer
* =======================
*/
DataLayer::DataLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) {
_dataIdx = pyDictGetInt(paramsDict, "dataIdx");
}
void DataLayer::fprop(PASS_TYPE passType) {
throw string("No dava given!");
}
void DataLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
}
void DataLayer::fprop(NVMatrixV& data, PASS_TYPE passType) {
_outputs = data[_dataIdx];
fpropNext(passType);
}
bool DataLayer::isGradProducer() {
return false;
}
/*
* =====================
* PoolLayer
* =====================
*/
PoolLayer::PoolLayer(ConvNet* convNet, PyObject* paramsDict, bool trans)
: Layer(convNet, paramsDict, trans) {
_channels = pyDictGetInt(paramsDict, "channels");
_sizeX = pyDictGetInt(paramsDict, "sizeX");
_start = pyDictGetInt(paramsDict, "start");
_stride = pyDictGetInt(paramsDict, "stride");
_outputsX = pyDictGetInt(paramsDict, "outputsX");
_imgSize = pyDictGetInt(paramsDict, "imgSize");
_pool = pyDictGetString(paramsDict, "pool");
}
PoolLayer& PoolLayer::makePoolLayer(ConvNet* convNet, PyObject* paramsDict) {
string _pool = pyDictGetString(paramsDict, "pool");
if (_pool == "max") {
return *new MaxPoolLayer(convNet, paramsDict);
} else if(_pool == "avg") {
return *new AvgPoolLayer(convNet, paramsDict);
}
throw string("Unknown pooling layer type ") + _pool;
}
/*
* =====================
* AvgPoolLayer
* =====================
*/
AvgPoolLayer::AvgPoolLayer(ConvNet* convNet, PyObject* paramsDict) : PoolLayer(convNet, paramsDict, false) {
}
void AvgPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, AvgPooler());
}
void AvgPoolLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convLocalAvgUndo(v, _prev[0]->getActsGrad(), _sizeX, _start, _stride, _outputsX, _imgSize, scaleTargets, 1);
}
/*
* =====================
* MaxPoolLayer
* =====================
*/
MaxPoolLayer::MaxPoolLayer(ConvNet* convNet, PyObject* paramsDict) : PoolLayer(convNet, paramsDict, false) {
}
void MaxPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, MaxPooler());
}
void MaxPoolLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convLocalMaxUndo(_prev[0]->getActs(), v, getActs(), _prev[inpIdx]->getActsGrad(), _sizeX, _start, _stride, _outputsX, scaleTargets, 1);
}
/*
* =====================
* NailbedLayer
* =====================
*/
NailbedLayer::NailbedLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) {
_channels = pyDictGetInt(paramsDict, "channels");
_start = pyDictGetInt(paramsDict, "start");
_stride = pyDictGetInt(paramsDict, "stride");
_outputsX = pyDictGetInt(paramsDict, "outputsX");
_imgSize = pyDictGetInt(paramsDict, "imgSize");
}
void NailbedLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convBedOfNails(*_inputs[0], getActs(), _channels, _imgSize, _start, _stride, 0, 1);
}
void NailbedLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convBedOfNailsUndo(v, _prev[0]->getActsGrad(), _channels, _imgSize, _start, _stride, scaleTargets, 1);
}
/*
* =====================
* GaussianBlurLayer
* =====================
*/
GaussianBlurLayer::GaussianBlurLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) {
_channels = pyDictGetInt(paramsDict, "channels");
_hFilter = pyDictGetMatrix(paramsDict, "filter");
}
void GaussianBlurLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convGaussianBlur(*_inputs[0], _filter, getActs(), true, _channels, 0, 1);
convGaussianBlur(getActs(), _filter, getActs(), false, _channels, 0, 1);
}
// This is here just for completeness' sake. Why would you backpropagate
// through a blur filter?
void GaussianBlurLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& tgt1 = _prev[0]->getRcvdBInputs() > 0 ? _actGradsTmp : _prev[0]->getActsGrad();
convGaussianBlur(v, _filter, tgt1, true, _channels, 0, 1);
convGaussianBlur(tgt1, _filter, _prev[0]->getActsGrad(), false, _channels, scaleTargets, 1);
}
void GaussianBlurLayer::copyToGPU() {
_filter.copyFromHost(*_hFilter, true);
}
/*
* =====================
* ResizeLayer
* =====================
*/
ResizeLayer::ResizeLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) {
_channels = pyDictGetInt(paramsDict, "channels");
_imgSize = pyDictGetInt(paramsDict, "imgSize");
_tgtSize = pyDictGetInt(paramsDict, "tgtSize");
_scale = pyDictGetFloat(paramsDict, "scale");
}
void ResizeLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convResizeBilinear(*_inputs[0], getActs(), _imgSize, _tgtSize, _scale);
}
// Can't do this
void ResizeLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* RGBToYUVLayer
* =====================
*/
RGBToYUVLayer::RGBToYUVLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) {
}
void RGBToYUVLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convRGBToYUV(*_inputs[0], getActs());
}
// Can't do this
void RGBToYUVLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* RGBToLABLayer
* =====================
*/
RGBToLABLayer::RGBToLABLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) {
_center = pyDictGetInt(paramsDict, "center");
}
void RGBToLABLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convRGBToLAB(*_inputs[0], getActs(), _center);
}
// Can't do this
void RGBToLABLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* ResponseNormLayer
* =====================
*/
ResponseNormLayer::ResponseNormLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) {
_channels = pyDictGetInt(paramsDict, "channels");
_size = pyDictGetInt(paramsDict, "size");
_scale = pyDictGetFloat(paramsDict, "scale");
_pow = pyDictGetFloat(paramsDict, "pow");
}
void ResponseNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convResponseNorm(*_inputs[0], _denoms, getActs(), _channels, _size, _scale, _pow);
}
void ResponseNormLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convResponseNormUndo(v, _denoms, _prev[0]->getActs(), getActs(), _prev[0]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1);
}
void ResponseNormLayer::truncBwdActs() {
Layer::truncBwdActs();
if (_conserveMem) {
_denoms.truncate();
}
}
/*
* =====================
* CrossMapResponseNormLayer
* =====================
*/
CrossMapResponseNormLayer::CrossMapResponseNormLayer(ConvNet* convNet, PyObject* paramsDict) : ResponseNormLayer(convNet, paramsDict) {
_blocked = pyDictGetInt(paramsDict, "blocked");
}
void CrossMapResponseNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convResponseNormCrossMap(*_inputs[0], _denoms, getActs(), _channels, _size, _scale, _pow, _blocked);
}
void CrossMapResponseNormLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convResponseNormCrossMapUndo(v, _denoms, _prev[0]->getActs(), getActs(), _prev[0]->getActsGrad(), _channels, _size, _scale, _pow, _blocked, scaleTargets, 1);
}
/*
* =====================
* ContrastNormLayer
* =====================
*/
ContrastNormLayer::ContrastNormLayer(ConvNet* convNet, PyObject* paramsDict) : ResponseNormLayer(convNet, paramsDict) {
_imgSize = pyDictGetInt(paramsDict, "imgSize");
}
void ContrastNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& images = *_inputs[0];
convLocalPool(images, _meanDiffs, _channels, _size, -_size/2, 1, _imgSize, AvgPooler());
_meanDiffs.add(images, -1, 1);
convContrastNorm(images, _meanDiffs, _denoms, getActs(), _channels, _size, _scale, _pow);
}
void ContrastNormLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convContrastNormUndo(v, _denoms, _meanDiffs, getActs(), _prev[inpIdx]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1);
}
void ContrastNormLayer::truncBwdActs() {
ResponseNormLayer::truncBwdActs();
if (_conserveMem) {
_meanDiffs.truncate();
}
}
/*
* =====================
* CostLayer
* =====================
*/
CostLayer::CostLayer(ConvNet* convNet, PyObject* paramsDict, bool trans)
: Layer(convNet, paramsDict, trans) {
_coeff = pyDictGetFloat(paramsDict, "coeff");
}
float CostLayer::getCoeff() {
return _coeff;
}
void CostLayer::bprop(PASS_TYPE passType) {
if (_coeff != 0) {
Layer::bprop(passType);
}
}
bool CostLayer::isGradProducer() {
return _coeff != 0;
}
doublev& CostLayer::getCost() {
doublev& v = *new doublev();
v.insert(v.begin(), _costv.begin(), _costv.end());
return v;
}
CostLayer& CostLayer::makeCostLayer(ConvNet* convNet, string& type, PyObject* paramsDict) {
if (type == "cost.logreg") {
return *new LogregCostLayer(convNet, paramsDict);
} else if (type == "cost.sum2") {
return *new SumOfSquaresCostLayer(convNet, paramsDict);
} else if (type == "cost.fisher2") {
return *new Fisher2CostLayer(convNet, paramsDict);
} else if (type == "cost.l2-sn") {
return *new L2SNCostLayer(convNet, paramsDict);
} else if (type == "cost.l2-reg") {
return *new L2regCostLayer(convNet, paramsDict);
}
throw string("Unknown cost layer type ") + type;
}
/*
* =====================
* LogregCostLayer
* =====================
*/
LogregCostLayer::LogregCostLayer(ConvNet* convNet, PyObject* paramsDict) : CostLayer(convNet, paramsDict, false) {
}
void LogregCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
// This layer uses its two inputs together
if (inpIdx == 0) {
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
int numCases = labels.getNumElements();
NVMatrix& trueLabelLogProbs = getActs(), correctProbs;
computeLogregCost(labels, probs, trueLabelLogProbs, correctProbs);
_costv.clear();
_costv.push_back(-trueLabelLogProbs.sum());
_costv.push_back(numCases - correctProbs.sum());
}
}
void LogregCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 1);
NVMatrix& labels = _prev[0]->getActs();
NVMatrix& probs = _prev[1]->getActs();
NVMatrix& target = _prev[1]->getActsGrad();
// Numerical stability optimization: if the layer below me is a softmax layer, let it handle
// the entire gradient computation to avoid multiplying and dividing by a near-zero quantity.
bool doWork = _prev[1]->getNext().size() > 1 || _prev[1]->getType() != "softmax";
if (doWork) {
computeLogregGrad(labels, probs, target, scaleTargets == 1, _coeff);
}
}
/*
* =====================
* SumOfSquaresCostLayer
* =====================
*/
SumOfSquaresCostLayer::SumOfSquaresCostLayer(ConvNet* convNet, PyObject* paramsDict) : CostLayer(convNet, paramsDict, false) {
}
void SumOfSquaresCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
_inputs[0]->apply(NVMatrixOps::Square(), getActs());
_costv.clear();
_costv.push_back(getActs().sum());
}
void SumOfSquaresCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
_prev[inpIdx]->getActsGrad().add(*_inputs[0], scaleTargets, -2 * _coeff);
}
/*
* =====================
* Fisher2CostLayer (single network)
* =====================
*/
Fisher2CostLayer::Fisher2CostLayer(ConvNet* convNet, PyObject* paramsDict)
: CostLayer(convNet, paramsDict, false) {
_alpha = pyDictGetFloat(paramsDict, "alpha");
}
void Fisher2CostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
// This layer uses its two inputs together
if (inpIdx == 0) {
NVMatrix& labels = *_inputs[0];
NVMatrix& data = *_inputs[1];
int numCases = labels.getNumElements();
float cost = computeFisher2Cost(labels, data, _alpha);
_costv.clear();
_costv.push_back(cost * numCases);
}
}
void Fisher2CostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (inpIdx == 1) {
NVMatrix& labels = _prev[0]->getActs();
NVMatrix& data = _prev[1]->getActs();
NVMatrix& target = _prev[1]->getActsGrad();
computeFisher2Grad(labels, data, _alpha, target,
scaleTargets == 1, _coeff);
}
}
/*
* =====================
* L2CostLayer for single network
* =====================
*/
L2SNCostLayer::L2SNCostLayer(ConvNet* convNet, PyObject* paramsDict)
: CostLayer(convNet, paramsDict, false) {
_m = pyDictGetFloat(paramsDict, "m");
}
void L2SNCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
// This layer uses its three inputs together
if (inpIdx == 0) {
NVMatrix& labels = *_inputs[0];
NVMatrix& data = *_inputs[1];
int numCases = labels.getNumElements();
NVMatrix& output = getActs();
computeL2SNCost(labels, data, output, _m);
_costv.clear();
_costv.push_back(output.sum());
}
}
void L2SNCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (inpIdx == 1) {
NVMatrix& labels = _prev[0]->getActs();
NVMatrix& data = _prev[1]->getActs();
NVMatrix& dataTarget = _prev[1]->getActsGrad();
computeL2SNGrad(labels, data, dataTarget, _m,
scaleTargets == 1, _coeff);
}
}
/*
* =====================
* L2 CostLayer for vector regression
* =====================
*/
L2regCostLayer::L2regCostLayer(ConvNet* convNet, PyObject* paramsDict)
: CostLayer(convNet, paramsDict, false) {
}
void L2regCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
// This layer uses its three inputs together
if (inpIdx == 0) {
NVMatrix& ground = *_inputs[0];
NVMatrix& predict = *_inputs[1];
int numCases = ground.getNumElements();
NVMatrix& output = getActs();
computeL2regCost(ground, predict, output);
_costv.clear();
_costv.push_back(output.sum());
}
}
void L2regCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 1);
NVMatrix& ground = *_inputs[0];
NVMatrix& predict = *_inputs[1];
NVMatrix& target = _prev[1]->getActsGrad();
computeL2regGrad(ground, predict, target,
scaleTargets == 1, _coeff);
}
/*
* =====================
* ShiftLayer
* =====================
*/
ShiftLayer::ShiftLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) {
_channels = pyDictGetInt(paramsDict, "channels");
_hFilter = pyDictGetMatrix(paramsDict, "filter");
}
void ShiftLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convShift(*_inputs[0], _filter, getActs(), _channels);
}
// This is here just for completeness' sake. Why would you backpropagate
// through a blur filter?
void ShiftLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
//NVMatrix& tgt1 = _prev[0]->getRcvdBInputs() > 0 ? _actGradsTmp : _prev[0]->getActsGrad();
//convShift(v, _filter, tgt1, _channels);
//convShift(tgt1, _filter, _prev[0]->getActsGrad(), _channels);
}
void ShiftLayer::copyToGPU() {
_filter.copyFromHost(*_hFilter, true);
} | 9e0abd315f69342088237634c4b2851f6a44a179.cu | /*
* Copyright (c) 2011, Alex Krizhevsky ([email protected])
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <cutil_inline.h>
#include <iostream>
#include <layer_kernels.cuh>
#include <layer.cuh>
#include <data.cuh>
#include <util.cuh>
#include <cudaconv2.cuh>
#include <matrix.h>
using namespace std;
/*
* =======================
* Layer
* =======================
*/
Layer::Layer(ConvNet* convNet, PyObject* paramsDict, bool trans) :
_convNet(convNet), _trans(trans) {
_name = pyDictGetString(paramsDict, "name");
_type = pyDictGetString(paramsDict, "type");
_numGradProducersNext = 0;
_foundGradConsumers = false;
_gradConsumer = pyDictGetInt(paramsDict, "gradConsumer");
_actsTarget = pyDictGetInt(paramsDict, "actsTarget");
_actsGradTarget = pyDictGetInt(paramsDict, "actsGradTarget");
_conserveMem = pyDictGetInt(paramsDict, "conserveMem");
_outputs = _actsTarget < 0 ? new NVMatrix() : NULL;
_actsGrad = _actsGradTarget < 0 ? new NVMatrix() : NULL;
}
void Layer::fpropNext(PASS_TYPE passType) {
for (int i = 0; i < _next.size(); i++) {
_next[i]->fprop(passType);
}
}
void Layer::truncBwdActs() {
// Only truncate actsGrad if I own it
if (_conserveMem && _actsGradTarget < 0) {
getActsGrad().truncate();
}
if (_conserveMem) {
getActs().truncate();
}
}
void Layer::fprop(PASS_TYPE passType) {
_rcvdFInputs += 1;
if (_rcvdFInputs == _prev.size()) {
NVMatrixV v;
for (int i = 0; i < _prev.size(); i++) {
v.push_back(&_prev[i]->getActs());
}
fprop(v, passType);
}
}
void Layer::fprop(NVMatrix& v, PASS_TYPE passType) {
NVMatrixV vl;
vl.push_back(&v);
fprop(vl, passType);
}
void Layer::fprop(NVMatrixV& v, PASS_TYPE passType) {
assert(v.size() == _prev.size());
_inputs.clear();
_inputs.insert(_inputs.begin(), v.begin(), v.end());
_outputs = _actsTarget < 0 ? _outputs : _inputs[_actsTarget];
_rcvdFInputs = _prev.size();
for (NVMatrixV::iterator it = v.begin(); it != v.end(); ++it) {
(*it)->transpose(_trans);
}
getActs().transpose(_trans);
// First do fprop on the input whose acts matrix I'm sharing, if any
if (_actsTarget >= 0) {
fpropActs(_actsTarget, 0, passType);
}
// Then add the rest of the inputs to that
for (int i = 0; i < _prev.size(); i++) {
if (i != _actsTarget) {
fpropActs(i, _actsTarget >= 0 || i > 0, passType);
}
}
fpropNext(passType);
}
void Layer::bprop(PASS_TYPE passType) {
if (_rcvdBInputs == _numGradProducersNext) {
_rcvdBInputs++; // avoid doing bprop computation twice
bprop(getActsGrad(), passType);
}
}
void Layer::bprop(NVMatrix& v, PASS_TYPE passType) {
v.transpose(_trans);
for (int i = 0; i < _prev.size(); i++) {
_prev[i]->getActs().transpose(_trans);
_prev[i]->getActsGrad().transpose(_trans);
}
getActs().transpose(_trans);
bpropCommon(v, passType);
if (isGradProducer()) {
// First propagate activity gradient to all layers whose activity
// gradient matrix I'm definitely not sharing.
for (int i = 0; i < _prev.size(); i++) {
if (_prev[i]->isGradConsumer() && _actsGradTarget != i) {
bpropActs(v, i, _prev[i]->getRcvdBInputs() > 0 ? 1 : 0, passType);
_prev[i]->incRcvdBInputs();
}
}
// Then propagate activity gradient to the layer whose activity gradient
// matrix I'm sharing, if any.
if (_actsGradTarget >= 0 && _prev[_actsGradTarget]->isGradConsumer()) {
bpropActs(v, _actsGradTarget, _prev[_actsGradTarget]->getRcvdBInputs() > 0 ? 1 : 0, passType);
_prev[_actsGradTarget]->incRcvdBInputs();
}
}
truncBwdActs();
if (isGradProducer()) {
for (int i = 0; i < _prev.size(); i++) {
if (_prev[i]->isGradConsumer()) {
_prev[i]->bprop(passType);
}
}
}
}
void Layer::reset() {
_rcvdFInputs = 0;
_rcvdBInputs = 0;
}
string& Layer::getName() {
return _name;
}
string& Layer::getType() {
return _type;
}
int Layer::getRcvdFInputs() {
return _rcvdFInputs;
}
int Layer::getRcvdBInputs() {
return _rcvdBInputs;
}
int Layer::incRcvdBInputs() {
return ++_rcvdBInputs;
}
void Layer::addNext(Layer* l) {
_next.push_back(l);
_numGradProducersNext += l->isGradProducer();
}
void Layer::addPrev(Layer* l) {
_prev.push_back(l);
}
void Layer::postInit() {
// _outputs = _actsTarget < 0 ? new NVMatrix() : &_prev[_actsTarget]->getActs();
_actsGrad = _actsGradTarget < 0 ? new NVMatrix() : &_prev[_actsGradTarget]->getActsGrad();
}
// Does this layer, or some layer below it, need the gradient
// for parameter updates?
// Only weight layers should be grad consumers themselves.
bool Layer::isGradConsumer() {
if (!_foundGradConsumers) {
for (int i = 0; i < _prev.size(); i++) {
_gradConsumer |= _prev[i]->isGradConsumer();
}
_foundGradConsumers = true;
}
return _gradConsumer;
}
// Does this layer produce gradient for layers below?
bool Layer::isGradProducer() {
return true;
}
vector<Layer*>& Layer::getPrev() {
return _prev;
}
vector<Layer*>& Layer::getNext() {
return _next;
}
NVMatrix& Layer::getActs() {
assert(_outputs != NULL);
return *_outputs;
}
NVMatrix& Layer::getActsGrad() {
assert(_actsGrad != NULL);
return *_actsGrad;
}
/*
* =======================
* NeuronLayer
* =======================
*/
NeuronLayer::NeuronLayer(ConvNet* convNet, PyObject* paramsDict)
: Layer(convNet, paramsDict, true) {
_neuron = &Neuron::makeNeuron(PyDict_GetItemString(paramsDict, "neuron"));
}
void NeuronLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
_neuron->computeInputGrad(v, _prev[0]->getActsGrad(), scaleTargets > 0);
}
void NeuronLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
_neuron->activate(*_inputs[0], getActs());
}
/*
* =======================
* WeightLayer
* =======================
*/
WeightLayer::WeightLayer(ConvNet* convNet, PyObject* paramsDict, bool trans, bool useGrad) :
Layer(convNet, paramsDict, trans) {
MatrixV& hWeights = *pyDictGetMatrixV(paramsDict, "weights");
MatrixV& hWeightsInc = *pyDictGetMatrixV(paramsDict, "weightsInc");
Matrix& hBiases = *pyDictGetMatrix(paramsDict, "biases");
Matrix& hBiasesInc = *pyDictGetMatrix(paramsDict, "biasesInc");
floatv& momW = *pyDictGetFloatV(paramsDict, "momW");
float momB = pyDictGetFloat(paramsDict, "momB");
floatv& epsW = *pyDictGetFloatV(paramsDict, "epsW");
float epsB = pyDictGetFloat(paramsDict, "epsB");
floatv& wc = *pyDictGetFloatV(paramsDict, "wc");
// Source layers for shared weights
intv& weightSourceLayerIndices = *pyDictGetIntV(paramsDict, "weightSourceLayerIndices");
// Weight matrix indices (inside the above source layers) for shared weights
intv& weightSourceMatrixIndices = *pyDictGetIntV(paramsDict, "weightSourceMatrixIndices");
for (int i = 0; i < weightSourceLayerIndices.size(); i++) {
int srcLayerIdx = weightSourceLayerIndices[i];
int matrixIdx = weightSourceMatrixIndices[i];
if (srcLayerIdx == convNet->getNumLayers()) { // Current layer
_weights.addWeights(*new Weights(_weights[matrixIdx], epsW[i]));
} else if (srcLayerIdx >= 0) {
WeightLayer& srcLayer = *static_cast<WeightLayer*>(&convNet->getLayer(srcLayerIdx));
Weights* srcWeights = &srcLayer.getWeights(matrixIdx);
_weights.addWeights(*new Weights(*srcWeights, epsW[i]));
} else {
_weights.addWeights(*new Weights(*hWeights[i], *hWeightsInc[i], epsW[i], wc[i], momW[i], useGrad));
}
}
_biases = new Weights(hBiases, hBiasesInc, epsB, 0, momB, true);
// Epsilons for finite-difference gradient checking operation
_wStep = 0.001;
_bStep = 0.002;
delete &weightSourceLayerIndices;
delete &weightSourceMatrixIndices;
delete &hWeights;
delete &hWeightsInc;
delete &momW;
delete &epsW;
delete &wc;
}
void WeightLayer::bpropCommon(NVMatrix& v, PASS_TYPE passType) {
if (_biases->getEps() > 0) {
bpropBiases(v, passType);
}
for (int i = 0; i < _weights.getSize(); i++) {
if (_weights[i].getEps() > 0) {
bpropWeights(v, i, passType);
// Increment its number of updates
_weights[i].incNumUpdates();
}
}
}
void WeightLayer::updateWeights() {
_weights.update();
_biases->update();
}
void WeightLayer::copyToCPU() {
_weights.copyToCPU();
_biases->copyToCPU();
}
void WeightLayer::copyToGPU() {
_weights.copyToGPU();
_biases->copyToGPU();
}
void WeightLayer::checkGradients() {
for (int i = 0; i < _weights.getSize(); i++) {
_convNet->checkGradient(_name + " weights[" + tostr(i) + "]", _wStep, _weights[i]);
}
_convNet->checkGradient(_name + " biases", _bStep, *_biases);
}
Weights& WeightLayer::getWeights(int idx) {
return _weights[idx];
}
/*
* =======================
* FCLayer
* =======================
*/
FCLayer::FCLayer(ConvNet* convNet, PyObject* paramsDict) : WeightLayer(convNet, paramsDict, true, false) {
_wStep = 0.1;
_bStep = 0.01;
}
void FCLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
getActs().addProduct(*_inputs[inpIdx], *_weights[inpIdx], scaleTargets, 1);
if (scaleTargets == 0) {
getActs().addVector(_biases->getW());
}
}
void FCLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& weights_T = _weights[inpIdx].getW().getTranspose();
_prev[inpIdx]->getActsGrad().addProduct(v, weights_T, scaleTargets, 1);
delete &weights_T;
}
void FCLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) {
int numCases = v.getNumRows();
float scaleBGrad = passType == PASS_GC ? 1 : _biases->getEps() / numCases;
_biases->getGrad().addSum(v, 0, 0, scaleBGrad);
}
void FCLayer::bpropWeights(NVMatrix& v, int inpIdx, PASS_TYPE passType) {
int numCases = v.getNumRows();
NVMatrix& prevActs_T = _prev[inpIdx]->getActs().getTranspose();
float scaleInc = (_weights[inpIdx].getNumUpdates() == 0 && passType != PASS_GC) * _weights[inpIdx].getMom();
float scaleGrad = passType == PASS_GC ? 1 : _weights[inpIdx].getEps() / numCases;
_weights[inpIdx].getInc().addProduct(prevActs_T, v, scaleInc, scaleGrad);
delete &prevActs_T;
}
/*
* =======================
* LocalLayer
* =======================
*/
LocalLayer::LocalLayer(ConvNet* convNet, PyObject* paramsDict, bool useGrad)
: WeightLayer(convNet, paramsDict, false, useGrad) {
_padding = pyDictGetIntV(paramsDict, "padding");
_stride = pyDictGetIntV(paramsDict, "stride");
_filterSize = pyDictGetIntV(paramsDict, "filterSize");
_channels = pyDictGetIntV(paramsDict, "channels");
_imgSize = pyDictGetIntV(paramsDict, "imgSize");
_numFilters = pyDictGetInt(paramsDict, "filters");
_groups = pyDictGetIntV(paramsDict, "groups");
_filterChannels = pyDictGetIntV(paramsDict, "filterChannels");
_randSparse = pyDictGetIntV(paramsDict, "randSparse");
_overSample = pyDictGetIntV(paramsDict, "overSample");
_filterPixels = pyDictGetIntV(paramsDict, "filterPixels");
_imgPixels = pyDictGetIntV(paramsDict, "imgPixels");
_modulesX = pyDictGetInt(paramsDict, "modulesX");
_modules = pyDictGetInt(paramsDict, "modules");
// It's a vector on the heap to be consistent with all the others...
_filterConns = new vector<FilterConns>();
PyObject* pyFilterConns = PyDict_GetItemString(paramsDict, "filterConns");
for (int i = 0; i < _randSparse->size(); i++) {
FilterConns fc;
if (_randSparse->at(i)) {
fc.hFilterConns = getIntA(PyList_GET_ITEM(pyFilterConns, i));
}
_filterConns->push_back(fc);
}
}
void LocalLayer::copyToGPU() {
WeightLayer::copyToGPU();
for (int i = 0; i < _prev.size(); i++) {
if (_randSparse->at(i)) { // Copy to GPU vector that describes sparse random connectivity
cudaMalloc(&_filterConns->at(i).dFilterConns, sizeof(int) * _groups->at(i) * _filterChannels->at(i));
cudaMemcpy(_filterConns->at(i).dFilterConns, _filterConns->at(i).hFilterConns,
sizeof(int) * _groups->at(i) * _filterChannels->at(i), cudaMemcpyHostToDevice);
cutilCheckMsg("cudaMemcpy: failed");
}
}
}
/*
* =======================
* ConvLayer
* =======================
*/
ConvLayer::ConvLayer(ConvNet* convNet, PyObject* paramsDict) : LocalLayer(convNet, paramsDict, true) {
_partialSum = pyDictGetInt(paramsDict, "partialSum");
_sharedBiases = pyDictGetInt(paramsDict, "sharedBiases");
}
void ConvLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (_randSparse->at(inpIdx)) {
convFilterActsSparse(*_inputs[inpIdx], *_weights[inpIdx], getActs(), _filterConns->at(inpIdx).dFilterConns,
_imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx),
_filterChannels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
} else {
convFilterActs(*_inputs[inpIdx], *_weights[inpIdx], getActs(), _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
}
if (scaleTargets == 0) {
if (_sharedBiases) {
getActs().reshape(_numFilters, getActs().getNumElements() / _numFilters);
getActs().addVector(_biases->getW());
getActs().reshape(_numFilters * _modules, getActs().getNumElements() / (_numFilters * _modules));
} else {
getActs().addVector(_biases->getW());
}
}
}
void ConvLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) {
int numCases = v.getNumCols();
float scaleBGrad = passType == PASS_GC ? 1 : _biases->getEps() / numCases;
if (_sharedBiases) {
v.reshape(_numFilters, v.getNumElements() / _numFilters);
_biases->getGrad().addSum(v, 1, 0, scaleBGrad);
v.reshape(_numFilters * _modules, v.getNumElements() / (_numFilters * _modules));
} else {
_biases->getGrad().addSum(v, 1, 0, scaleBGrad);
}
}
void ConvLayer::bpropWeights(NVMatrix& v, int inpIdx, PASS_TYPE passType) {
int numCases = v.getNumCols();
NVMatrix& tgt = _partialSum > 0 ? _weightGradTmp : _weights[inpIdx].getGrad();
float scaleWGrad = passType == PASS_GC ? 1 : _weights[inpIdx].getEps() / numCases;
float scaleTargets = _weights[inpIdx].getNumUpdates() > 0 && _partialSum == 0; // ? 1 : 0;
if (_randSparse->at(inpIdx)) {
convWeightActsSparse(_prev[inpIdx]->getActs(), v, tgt, _filterConns->at(inpIdx).dFilterConns, _imgSize->at(inpIdx), _modulesX, _modulesX,
_filterSize->at(inpIdx), _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx),
_filterChannels->at(inpIdx), _groups->at(inpIdx), _partialSum, scaleTargets, scaleWGrad);
} else {
convWeightActs(_prev[inpIdx]->getActs(), v, tgt, _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), _partialSum, scaleTargets, scaleWGrad);
}
if (_partialSum > 0) {
scaleTargets = _weights[inpIdx].getNumUpdates() > 0;
_weightGradTmp.reshape(_modules / _partialSum, _filterChannels->at(inpIdx) * _filterPixels->at(inpIdx) * _numFilters);
_weights[inpIdx].getGrad().addSum(_weightGradTmp, 0, scaleTargets, 1);
_weights[inpIdx].getGrad().reshape(_filterChannels->at(inpIdx) * _filterPixels->at(inpIdx), _numFilters);
}
}
void ConvLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (_randSparse->at(inpIdx)) {
NVMatrix& tgt = _overSample->at(inpIdx) > 1 ? _actGradTmp : _prev[inpIdx]->getActsGrad();
convImgActsSparse(v, *_weights[inpIdx], tgt, _filterConns->at(inpIdx).dFilterConns,
_imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX, _padding->at(inpIdx), _stride->at(inpIdx),
_channels->at(inpIdx), _filterChannels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
if (_overSample->at(inpIdx) > 1) {
_actGradTmp.reshape(_overSample->at(inpIdx), _actGradTmp.getNumElements() / _overSample->at(inpIdx));
_actGradTmp.sum(0, _prev[inpIdx]->getActsGrad());
_prev[inpIdx]->getActsGrad().reshape(_prev[inpIdx]->getActsGrad().getNumElements() / v.getNumCols(), v.getNumCols());
}
} else {
convImgActs(v, *_weights[inpIdx], _prev[inpIdx]->getActsGrad(), _imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX,
_padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
}
}
void ConvLayer::truncBwdActs() {
LocalLayer::truncBwdActs();
if (_conserveMem) {
_weightGradTmp.truncate();
_actGradTmp.truncate();
}
}
/*
* =======================
* LocalUnsharedLayer
* =======================
*/
LocalUnsharedLayer::LocalUnsharedLayer(ConvNet* convNet, PyObject* paramsDict) : LocalLayer(convNet, paramsDict, false) {
}
void LocalUnsharedLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (_randSparse->at(inpIdx)) {
localFilterActsSparse(*_inputs[inpIdx], *_weights[inpIdx], getActs(), _filterConns->at(inpIdx).dFilterConns,
_imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx),
_filterChannels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
} else {
localFilterActs(*_inputs[inpIdx], *_weights[inpIdx], getActs(), _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
}
if (scaleTargets == 0) {
getActs().addVector(_biases->getW());
}
}
void LocalUnsharedLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) {
int numCases = v.getNumCols();
float scaleBGrad = passType == PASS_GC ? 1 : _biases->getEps() / numCases;
_biases->getGrad().addSum(v, 1, 0, scaleBGrad);
}
void LocalUnsharedLayer::bpropWeights(NVMatrix& v, int inpIdx, PASS_TYPE passType) {
int numCases = v.getNumCols();
float scaleInc = (passType != PASS_GC && _weights[inpIdx].getNumUpdates() == 0) * _weights[inpIdx].getMom(); // momentum
float scaleWGrad = passType == PASS_GC ? 1 : _weights[inpIdx].getEps() / numCases; // eps / numCases
if (_randSparse->at(inpIdx)) {
localWeightActsSparse(_prev[inpIdx]->getActs(), v, _weights[inpIdx].getInc(), _filterConns->at(inpIdx).dFilterConns,
_imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx), _stride->at(inpIdx),
_channels->at(inpIdx), _filterChannels->at(inpIdx), _groups->at(inpIdx), scaleInc, scaleWGrad);
} else {
localWeightActs(_prev[inpIdx]->getActs(), v, _weights[inpIdx].getInc(), _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx),
_padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleInc, scaleWGrad);
}
}
void LocalUnsharedLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (_randSparse->at(inpIdx)) {
localImgActsSparse(v, *_weights[inpIdx], _prev[inpIdx]->getActsGrad(), _filterConns->at(inpIdx).dFilterConns,
_imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx),
_filterChannels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
} else {
localImgActs(v, *_weights[inpIdx], _prev[inpIdx]->getActsGrad(),_imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX,
_padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
}
}
/*
* =======================
* SoftmaxLayer
* =======================
*/
SoftmaxLayer::SoftmaxLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, true) {
}
void SoftmaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& input = *_inputs[0];
NVMatrix& max = input.max(1);
input.addVector(max, -1, getActs());
getActs().apply(NVMatrixOps::Exp());
NVMatrix& sum = getActs().sum(1);
getActs().eltwiseDivideByVector(sum);
delete &max;
delete ∑
}
void SoftmaxLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 0);
bool doLogregGrad = _next.size() == 1 && _next[0]->getType() == "cost.logreg";
if (doLogregGrad) {
NVMatrix& labels = _next[0]->getPrev()[0]->getActs();
float gradCoeff = dynamic_cast<CostLayer*>(_next[0])->getCoeff();
computeLogregSoftmaxGrad(labels, getActs(), _prev[0]->getActsGrad(), scaleTargets == 1, gradCoeff);
} else {
computeSoftmaxGrad(getActs(), v, _prev[0]->getActsGrad(), scaleTargets == 1);
}
}
/*
* =======================
* EltwiseSumLayer
* =======================
*/
EltwiseSumLayer::EltwiseSumLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) {
_coeffs = pyDictGetFloatV(paramsDict, "coeffs");
}
void EltwiseSumLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (scaleTargets == 0) {
_inputs[inpIdx]->scale(_coeffs->at(inpIdx), getActs());
} else {
getActs().add(*_inputs[inpIdx], _coeffs->at(inpIdx));
}
}
void EltwiseSumLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (scaleTargets == 0 ) {
v.scale(_coeffs->at(inpIdx), _prev[inpIdx]->getActsGrad());
} else {
assert(&_prev[inpIdx]->getActsGrad() != &v);
_prev[inpIdx]->getActsGrad().add(v, scaleTargets, _coeffs->at(inpIdx));
}
}
/*
* =======================
* EltwiseMaxLayer
* =======================
*/
EltwiseMaxLayer::EltwiseMaxLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) {
}
void EltwiseMaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (inpIdx == 1) { // First input, do nothing
_inputs[inpIdx]->applyBinary(NVMatrixAggs::Max(), *_inputs[0], getActs());
} else if (inpIdx > 1) {
getActs().applyBinary(NVMatrixAggs::Max(), *_inputs[inpIdx]);
}
}
void EltwiseMaxLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
computeEltwiseMaxGrad(v, *_inputs[inpIdx], getActs(), _prev[inpIdx]->getActsGrad(), scaleTargets != 0);
}
/*
* =======================
* DataLayer
* =======================
*/
DataLayer::DataLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) {
_dataIdx = pyDictGetInt(paramsDict, "dataIdx");
}
void DataLayer::fprop(PASS_TYPE passType) {
throw string("No dava given!");
}
void DataLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
}
void DataLayer::fprop(NVMatrixV& data, PASS_TYPE passType) {
_outputs = data[_dataIdx];
fpropNext(passType);
}
bool DataLayer::isGradProducer() {
return false;
}
/*
* =====================
* PoolLayer
* =====================
*/
PoolLayer::PoolLayer(ConvNet* convNet, PyObject* paramsDict, bool trans)
: Layer(convNet, paramsDict, trans) {
_channels = pyDictGetInt(paramsDict, "channels");
_sizeX = pyDictGetInt(paramsDict, "sizeX");
_start = pyDictGetInt(paramsDict, "start");
_stride = pyDictGetInt(paramsDict, "stride");
_outputsX = pyDictGetInt(paramsDict, "outputsX");
_imgSize = pyDictGetInt(paramsDict, "imgSize");
_pool = pyDictGetString(paramsDict, "pool");
}
PoolLayer& PoolLayer::makePoolLayer(ConvNet* convNet, PyObject* paramsDict) {
string _pool = pyDictGetString(paramsDict, "pool");
if (_pool == "max") {
return *new MaxPoolLayer(convNet, paramsDict);
} else if(_pool == "avg") {
return *new AvgPoolLayer(convNet, paramsDict);
}
throw string("Unknown pooling layer type ") + _pool;
}
/*
* =====================
* AvgPoolLayer
* =====================
*/
AvgPoolLayer::AvgPoolLayer(ConvNet* convNet, PyObject* paramsDict) : PoolLayer(convNet, paramsDict, false) {
}
void AvgPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, AvgPooler());
}
void AvgPoolLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convLocalAvgUndo(v, _prev[0]->getActsGrad(), _sizeX, _start, _stride, _outputsX, _imgSize, scaleTargets, 1);
}
/*
* =====================
* MaxPoolLayer
* =====================
*/
MaxPoolLayer::MaxPoolLayer(ConvNet* convNet, PyObject* paramsDict) : PoolLayer(convNet, paramsDict, false) {
}
void MaxPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, MaxPooler());
}
void MaxPoolLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convLocalMaxUndo(_prev[0]->getActs(), v, getActs(), _prev[inpIdx]->getActsGrad(), _sizeX, _start, _stride, _outputsX, scaleTargets, 1);
}
/*
* =====================
* NailbedLayer
* =====================
*/
NailbedLayer::NailbedLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) {
_channels = pyDictGetInt(paramsDict, "channels");
_start = pyDictGetInt(paramsDict, "start");
_stride = pyDictGetInt(paramsDict, "stride");
_outputsX = pyDictGetInt(paramsDict, "outputsX");
_imgSize = pyDictGetInt(paramsDict, "imgSize");
}
void NailbedLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convBedOfNails(*_inputs[0], getActs(), _channels, _imgSize, _start, _stride, 0, 1);
}
void NailbedLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convBedOfNailsUndo(v, _prev[0]->getActsGrad(), _channels, _imgSize, _start, _stride, scaleTargets, 1);
}
/*
* =====================
* GaussianBlurLayer
* =====================
*/
GaussianBlurLayer::GaussianBlurLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) {
_channels = pyDictGetInt(paramsDict, "channels");
_hFilter = pyDictGetMatrix(paramsDict, "filter");
}
void GaussianBlurLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convGaussianBlur(*_inputs[0], _filter, getActs(), true, _channels, 0, 1);
convGaussianBlur(getActs(), _filter, getActs(), false, _channels, 0, 1);
}
// This is here just for completeness' sake. Why would you backpropagate
// through a blur filter?
void GaussianBlurLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& tgt1 = _prev[0]->getRcvdBInputs() > 0 ? _actGradsTmp : _prev[0]->getActsGrad();
convGaussianBlur(v, _filter, tgt1, true, _channels, 0, 1);
convGaussianBlur(tgt1, _filter, _prev[0]->getActsGrad(), false, _channels, scaleTargets, 1);
}
void GaussianBlurLayer::copyToGPU() {
_filter.copyFromHost(*_hFilter, true);
}
/*
* =====================
* ResizeLayer
* =====================
*/
ResizeLayer::ResizeLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) {
_channels = pyDictGetInt(paramsDict, "channels");
_imgSize = pyDictGetInt(paramsDict, "imgSize");
_tgtSize = pyDictGetInt(paramsDict, "tgtSize");
_scale = pyDictGetFloat(paramsDict, "scale");
}
void ResizeLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convResizeBilinear(*_inputs[0], getActs(), _imgSize, _tgtSize, _scale);
}
// Can't do this
void ResizeLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* RGBToYUVLayer
* =====================
*/
RGBToYUVLayer::RGBToYUVLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) {
}
void RGBToYUVLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convRGBToYUV(*_inputs[0], getActs());
}
// Can't do this
void RGBToYUVLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* RGBToLABLayer
* =====================
*/
RGBToLABLayer::RGBToLABLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) {
_center = pyDictGetInt(paramsDict, "center");
}
void RGBToLABLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convRGBToLAB(*_inputs[0], getActs(), _center);
}
// Can't do this
void RGBToLABLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* ResponseNormLayer
* =====================
*/
ResponseNormLayer::ResponseNormLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) {
_channels = pyDictGetInt(paramsDict, "channels");
_size = pyDictGetInt(paramsDict, "size");
_scale = pyDictGetFloat(paramsDict, "scale");
_pow = pyDictGetFloat(paramsDict, "pow");
}
void ResponseNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convResponseNorm(*_inputs[0], _denoms, getActs(), _channels, _size, _scale, _pow);
}
void ResponseNormLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convResponseNormUndo(v, _denoms, _prev[0]->getActs(), getActs(), _prev[0]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1);
}
void ResponseNormLayer::truncBwdActs() {
Layer::truncBwdActs();
if (_conserveMem) {
_denoms.truncate();
}
}
/*
* =====================
* CrossMapResponseNormLayer
* =====================
*/
CrossMapResponseNormLayer::CrossMapResponseNormLayer(ConvNet* convNet, PyObject* paramsDict) : ResponseNormLayer(convNet, paramsDict) {
_blocked = pyDictGetInt(paramsDict, "blocked");
}
void CrossMapResponseNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convResponseNormCrossMap(*_inputs[0], _denoms, getActs(), _channels, _size, _scale, _pow, _blocked);
}
void CrossMapResponseNormLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convResponseNormCrossMapUndo(v, _denoms, _prev[0]->getActs(), getActs(), _prev[0]->getActsGrad(), _channels, _size, _scale, _pow, _blocked, scaleTargets, 1);
}
/*
* =====================
* ContrastNormLayer
* =====================
*/
ContrastNormLayer::ContrastNormLayer(ConvNet* convNet, PyObject* paramsDict) : ResponseNormLayer(convNet, paramsDict) {
_imgSize = pyDictGetInt(paramsDict, "imgSize");
}
void ContrastNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& images = *_inputs[0];
convLocalPool(images, _meanDiffs, _channels, _size, -_size/2, 1, _imgSize, AvgPooler());
_meanDiffs.add(images, -1, 1);
convContrastNorm(images, _meanDiffs, _denoms, getActs(), _channels, _size, _scale, _pow);
}
void ContrastNormLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convContrastNormUndo(v, _denoms, _meanDiffs, getActs(), _prev[inpIdx]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1);
}
void ContrastNormLayer::truncBwdActs() {
ResponseNormLayer::truncBwdActs();
if (_conserveMem) {
_meanDiffs.truncate();
}
}
/*
* =====================
* CostLayer
* =====================
*/
CostLayer::CostLayer(ConvNet* convNet, PyObject* paramsDict, bool trans)
: Layer(convNet, paramsDict, trans) {
_coeff = pyDictGetFloat(paramsDict, "coeff");
}
float CostLayer::getCoeff() {
return _coeff;
}
void CostLayer::bprop(PASS_TYPE passType) {
if (_coeff != 0) {
Layer::bprop(passType);
}
}
bool CostLayer::isGradProducer() {
return _coeff != 0;
}
doublev& CostLayer::getCost() {
doublev& v = *new doublev();
v.insert(v.begin(), _costv.begin(), _costv.end());
return v;
}
CostLayer& CostLayer::makeCostLayer(ConvNet* convNet, string& type, PyObject* paramsDict) {
if (type == "cost.logreg") {
return *new LogregCostLayer(convNet, paramsDict);
} else if (type == "cost.sum2") {
return *new SumOfSquaresCostLayer(convNet, paramsDict);
} else if (type == "cost.fisher2") {
return *new Fisher2CostLayer(convNet, paramsDict);
} else if (type == "cost.l2-sn") {
return *new L2SNCostLayer(convNet, paramsDict);
} else if (type == "cost.l2-reg") {
return *new L2regCostLayer(convNet, paramsDict);
}
throw string("Unknown cost layer type ") + type;
}
/*
* =====================
* LogregCostLayer
* =====================
*/
LogregCostLayer::LogregCostLayer(ConvNet* convNet, PyObject* paramsDict) : CostLayer(convNet, paramsDict, false) {
}
void LogregCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
// This layer uses its two inputs together
if (inpIdx == 0) {
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
int numCases = labels.getNumElements();
NVMatrix& trueLabelLogProbs = getActs(), correctProbs;
computeLogregCost(labels, probs, trueLabelLogProbs, correctProbs);
_costv.clear();
_costv.push_back(-trueLabelLogProbs.sum());
_costv.push_back(numCases - correctProbs.sum());
}
}
void LogregCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 1);
NVMatrix& labels = _prev[0]->getActs();
NVMatrix& probs = _prev[1]->getActs();
NVMatrix& target = _prev[1]->getActsGrad();
// Numerical stability optimization: if the layer below me is a softmax layer, let it handle
// the entire gradient computation to avoid multiplying and dividing by a near-zero quantity.
bool doWork = _prev[1]->getNext().size() > 1 || _prev[1]->getType() != "softmax";
if (doWork) {
computeLogregGrad(labels, probs, target, scaleTargets == 1, _coeff);
}
}
/*
* =====================
* SumOfSquaresCostLayer
* =====================
*/
SumOfSquaresCostLayer::SumOfSquaresCostLayer(ConvNet* convNet, PyObject* paramsDict) : CostLayer(convNet, paramsDict, false) {
}
void SumOfSquaresCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
_inputs[0]->apply(NVMatrixOps::Square(), getActs());
_costv.clear();
_costv.push_back(getActs().sum());
}
void SumOfSquaresCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
_prev[inpIdx]->getActsGrad().add(*_inputs[0], scaleTargets, -2 * _coeff);
}
/*
* =====================
* Fisher2CostLayer (single network)
* =====================
*/
Fisher2CostLayer::Fisher2CostLayer(ConvNet* convNet, PyObject* paramsDict)
: CostLayer(convNet, paramsDict, false) {
_alpha = pyDictGetFloat(paramsDict, "alpha");
}
void Fisher2CostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
// This layer uses its two inputs together
if (inpIdx == 0) {
NVMatrix& labels = *_inputs[0];
NVMatrix& data = *_inputs[1];
int numCases = labels.getNumElements();
float cost = computeFisher2Cost(labels, data, _alpha);
_costv.clear();
_costv.push_back(cost * numCases);
}
}
void Fisher2CostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (inpIdx == 1) {
NVMatrix& labels = _prev[0]->getActs();
NVMatrix& data = _prev[1]->getActs();
NVMatrix& target = _prev[1]->getActsGrad();
computeFisher2Grad(labels, data, _alpha, target,
scaleTargets == 1, _coeff);
}
}
/*
* =====================
* L2CostLayer for single network
* =====================
*/
L2SNCostLayer::L2SNCostLayer(ConvNet* convNet, PyObject* paramsDict)
: CostLayer(convNet, paramsDict, false) {
_m = pyDictGetFloat(paramsDict, "m");
}
void L2SNCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
// This layer uses its three inputs together
if (inpIdx == 0) {
NVMatrix& labels = *_inputs[0];
NVMatrix& data = *_inputs[1];
int numCases = labels.getNumElements();
NVMatrix& output = getActs();
computeL2SNCost(labels, data, output, _m);
_costv.clear();
_costv.push_back(output.sum());
}
}
void L2SNCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (inpIdx == 1) {
NVMatrix& labels = _prev[0]->getActs();
NVMatrix& data = _prev[1]->getActs();
NVMatrix& dataTarget = _prev[1]->getActsGrad();
computeL2SNGrad(labels, data, dataTarget, _m,
scaleTargets == 1, _coeff);
}
}
/*
* =====================
* L2 CostLayer for vector regression
* =====================
*/
L2regCostLayer::L2regCostLayer(ConvNet* convNet, PyObject* paramsDict)
: CostLayer(convNet, paramsDict, false) {
}
void L2regCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
// This layer uses its three inputs together
if (inpIdx == 0) {
NVMatrix& ground = *_inputs[0];
NVMatrix& predict = *_inputs[1];
int numCases = ground.getNumElements();
NVMatrix& output = getActs();
computeL2regCost(ground, predict, output);
_costv.clear();
_costv.push_back(output.sum());
}
}
void L2regCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 1);
NVMatrix& ground = *_inputs[0];
NVMatrix& predict = *_inputs[1];
NVMatrix& target = _prev[1]->getActsGrad();
computeL2regGrad(ground, predict, target,
scaleTargets == 1, _coeff);
}
/*
* =====================
* ShiftLayer
* =====================
*/
ShiftLayer::ShiftLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) {
_channels = pyDictGetInt(paramsDict, "channels");
_hFilter = pyDictGetMatrix(paramsDict, "filter");
}
void ShiftLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convShift(*_inputs[0], _filter, getActs(), _channels);
}
// This is here just for completeness' sake. Why would you backpropagate
// through a blur filter?
void ShiftLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
//NVMatrix& tgt1 = _prev[0]->getRcvdBInputs() > 0 ? _actGradsTmp : _prev[0]->getActsGrad();
//convShift(v, _filter, tgt1, _channels);
//convShift(tgt1, _filter, _prev[0]->getActsGrad(), _channels);
}
void ShiftLayer::copyToGPU() {
_filter.copyFromHost(*_hFilter, true);
} |
b180ffd989de99dbbcb88340e1b1ff1d6765e487.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Jacobian.h"
__constant__ float c_kPy[90];
__constant__ float c_ikPy[90];
// __device__ float3 mulKVecJ(float* k, float3 vec) {
__device__ float3 mulKVecJ(float k0, float k6, float k4, float k7, float3 vec) {
// return make_float3(k[0]*vec.x+k[6]*vec.z, k[4]*vec.y+k[7]*vec.z, vec.z);
return make_float3(k0*vec.x+k6*vec.z, k4*vec.y+k7*vec.z, vec.z);
}
__device__ float3 mulRVecJ(float* R, float3 vec) {
return make_float3(R[0]*vec.x+R[3]*vec.y+R[6]*vec.z, R[1]*vec.x+R[4]*vec.y+R[7]*vec.z, R[2]*vec.x+R[5]*vec.y+R[8]*vec.z);
}
void setConstMemJ(Eigen::Matrix3f kPy, Eigen::Matrix3f ikPy, int offset) {
hipMemcpyToSymbol (c_kPy, kPy.transpose().data(), 9*sizeof(float), offset);
hipMemcpyToSymbol (c_ikPy, ikPy.transpose().data(), 9*sizeof(float), offset);
}
texture<float, 2, hipReadModeElementType> texX;
texture<float, 2, hipReadModeElementType> texY;
// __global__ void computeJacobian(float *dRef, float *iCrr, float *k, float *ik, int w, int h, float *R, float *t, float *J) {
__global__ void computeJacobian(float *dRef, float *iCrr, int lvl, int w, int h, float *R, float *t, float *J) {
int x = threadIdx.x + blockDim.x*blockIdx.x;
int y = threadIdx.y + blockDim.y*blockIdx.y;
// check if within bounds
if (x < w && y < h)
{
size_t idx = x + (size_t)w*y;
float d = dRef[idx];
if (d>0.0f) {
float3 pos = make_float3(d*(float)x,d*(float)y,d);
// pos = mulKVecJ(ik,pos);
pos = mulKVecJ(c_ikPy[lvl*9], c_ikPy[lvl*9+6], c_ikPy[lvl*9+4], c_ikPy[lvl*9+7], pos);
pos = mulRVecJ(R, pos);
pos = make_float3(pos.x+t[0], pos.y+t[1], pos.z+t[2]);
if (pos.z > 0.0f) {
// float3 pos2 = mulKVecJ(k,pos);
float3 pos2 = mulKVecJ(c_kPy[lvl*9], c_kPy[lvl*9+6], c_kPy[lvl*9+4], c_kPy[lvl*9+7], pos);
pos2.x /= pos2.z; pos2.y /= pos2.z;
if ((int)(pos2.x) < w && (int)(pos2.y) < h && (int)(pos2.x) >= 0 && (int)(pos2.y) >= 0) {
// float dX = tex2D(texX, pos2.x+0.5f, pos2.y+0.5f) * k[0];
// float dY = tex2D(texY, pos2.x+0.5f, pos2.y+0.5f) * k[4];
float dX = tex2D(texX, pos2.x+0.5f, pos2.y+0.5f) * c_kPy[lvl*9];
float dY = tex2D(texY, pos2.x+0.5f, pos2.y+0.5f) * c_kPy[lvl*9+4];
// float dX = (iCrr[min((int)(pos2.x+1), w-1) + w*(int)pos2.y]-iCrr[max((int)(pos2.x-1), 0) + w*(int)pos2.y])*0.5f * k[0];
// float dY = (iCrr[(int)pos2.x + w*min((int)(pos2.y+1), h-1)]-iCrr[(int)pos2.x + w*max((int)(pos2.y-1), 0)])*0.5f * k[4];
dX /= pos.z;
dY /= pos.z;
idx *= 6;
J[idx] = -1.0f * (dX);
J[idx+1] = -1.0f * (dY);
J[idx+2] = -1.0f * (-dX*pos.x/pos.z -dY*pos.y/pos.z);
J[idx+3] = -1.0f * (-dX*pos.x*pos.y/pos.z -dY*(pos.z+pos.y*pos.y/pos.z));
J[idx+4] = -1.0f * (dX*(pos.z+pos.x*pos.x/pos.z) +dY*pos.x*pos.y/pos.z);
J[idx+5] = -1.0f * (-dX*pos.y +dY*pos.x);
}
}
}
}
}
// void computeJacobians(float **d_dPyRef, float **d_iPyCrr, float **d_kPy, float **d_ikPy, int lvl, int w, int h, float *d_R, float *d_t, float *d_J, float *d_dX, float *d_dY) {
void computeJacobians(float **d_dPyRef, float **d_iPyCrr, int lvl, int w, int h, float *d_R, float *d_t, float *d_J, float *d_dX, float *d_dY) {
dim3 block = dim3(32, 8, 1);
dim3 grid;
// for (int i=0; i<lvl; i++) {
// w = (w+1)/2;
// h = (h+1)/2;
// }
grid = dim3(((size_t)w+block.x-1)/block.x, ((size_t)h+block.y-1)/block.y, 1);
texX.addressMode[0] = hipAddressModeClamp; texX.addressMode[1] = hipAddressModeClamp;
texX.filterMode = hipFilterModeLinear; texX.normalized = false;
hipChannelFormatDesc descX = hipCreateChannelDesc<float>();
hipBindTexture2D(NULL, &texX, d_dX, &descX, w, h, w*sizeof(d_dX[0]));
texY.addressMode[0] = hipAddressModeClamp; texY.addressMode[1] = hipAddressModeClamp;
texY.filterMode = hipFilterModeLinear; texY.normalized = false;
hipChannelFormatDesc descY = hipCreateChannelDesc<float>();
hipBindTexture2D(NULL, &texY, d_dY, &descY, w, h, w*sizeof(d_dY[0]));
//hipLaunchKernelGGL(( computeJacobian) , dim3(grid), dim3(block), 0, 0, d_dPyRef[lvl], d_iPyCrr[lvl], d_kPy[lvl], d_ikPy[lvl], w, h, d_R, d_t, d_J);
hipLaunchKernelGGL(( computeJacobian) , dim3(grid), dim3(block), 0, 0, d_dPyRef[lvl], d_iPyCrr[lvl], lvl, w, h, d_R, d_t, d_J);
hipUnbindTexture(texX); hipUnbindTexture(texY);
} | b180ffd989de99dbbcb88340e1b1ff1d6765e487.cu | #include "Jacobian.h"
__constant__ float c_kPy[90];
__constant__ float c_ikPy[90];
// __device__ float3 mulKVecJ(float* k, float3 vec) {
__device__ float3 mulKVecJ(float k0, float k6, float k4, float k7, float3 vec) {
// return make_float3(k[0]*vec.x+k[6]*vec.z, k[4]*vec.y+k[7]*vec.z, vec.z);
return make_float3(k0*vec.x+k6*vec.z, k4*vec.y+k7*vec.z, vec.z);
}
__device__ float3 mulRVecJ(float* R, float3 vec) {
return make_float3(R[0]*vec.x+R[3]*vec.y+R[6]*vec.z, R[1]*vec.x+R[4]*vec.y+R[7]*vec.z, R[2]*vec.x+R[5]*vec.y+R[8]*vec.z);
}
void setConstMemJ(Eigen::Matrix3f kPy, Eigen::Matrix3f ikPy, int offset) {
cudaMemcpyToSymbol (c_kPy, kPy.transpose().data(), 9*sizeof(float), offset);
cudaMemcpyToSymbol (c_ikPy, ikPy.transpose().data(), 9*sizeof(float), offset);
}
texture<float, 2, cudaReadModeElementType> texX;
texture<float, 2, cudaReadModeElementType> texY;
// __global__ void computeJacobian(float *dRef, float *iCrr, float *k, float *ik, int w, int h, float *R, float *t, float *J) {
__global__ void computeJacobian(float *dRef, float *iCrr, int lvl, int w, int h, float *R, float *t, float *J) {
int x = threadIdx.x + blockDim.x*blockIdx.x;
int y = threadIdx.y + blockDim.y*blockIdx.y;
// check if within bounds
if (x < w && y < h)
{
size_t idx = x + (size_t)w*y;
float d = dRef[idx];
if (d>0.0f) {
float3 pos = make_float3(d*(float)x,d*(float)y,d);
// pos = mulKVecJ(ik,pos);
pos = mulKVecJ(c_ikPy[lvl*9], c_ikPy[lvl*9+6], c_ikPy[lvl*9+4], c_ikPy[lvl*9+7], pos);
pos = mulRVecJ(R, pos);
pos = make_float3(pos.x+t[0], pos.y+t[1], pos.z+t[2]);
if (pos.z > 0.0f) {
// float3 pos2 = mulKVecJ(k,pos);
float3 pos2 = mulKVecJ(c_kPy[lvl*9], c_kPy[lvl*9+6], c_kPy[lvl*9+4], c_kPy[lvl*9+7], pos);
pos2.x /= pos2.z; pos2.y /= pos2.z;
if ((int)(pos2.x) < w && (int)(pos2.y) < h && (int)(pos2.x) >= 0 && (int)(pos2.y) >= 0) {
// float dX = tex2D(texX, pos2.x+0.5f, pos2.y+0.5f) * k[0];
// float dY = tex2D(texY, pos2.x+0.5f, pos2.y+0.5f) * k[4];
float dX = tex2D(texX, pos2.x+0.5f, pos2.y+0.5f) * c_kPy[lvl*9];
float dY = tex2D(texY, pos2.x+0.5f, pos2.y+0.5f) * c_kPy[lvl*9+4];
// float dX = (iCrr[min((int)(pos2.x+1), w-1) + w*(int)pos2.y]-iCrr[max((int)(pos2.x-1), 0) + w*(int)pos2.y])*0.5f * k[0];
// float dY = (iCrr[(int)pos2.x + w*min((int)(pos2.y+1), h-1)]-iCrr[(int)pos2.x + w*max((int)(pos2.y-1), 0)])*0.5f * k[4];
dX /= pos.z;
dY /= pos.z;
idx *= 6;
J[idx] = -1.0f * (dX);
J[idx+1] = -1.0f * (dY);
J[idx+2] = -1.0f * (-dX*pos.x/pos.z -dY*pos.y/pos.z);
J[idx+3] = -1.0f * (-dX*pos.x*pos.y/pos.z -dY*(pos.z+pos.y*pos.y/pos.z));
J[idx+4] = -1.0f * (dX*(pos.z+pos.x*pos.x/pos.z) +dY*pos.x*pos.y/pos.z);
J[idx+5] = -1.0f * (-dX*pos.y +dY*pos.x);
}
}
}
}
}
// void computeJacobians(float **d_dPyRef, float **d_iPyCrr, float **d_kPy, float **d_ikPy, int lvl, int w, int h, float *d_R, float *d_t, float *d_J, float *d_dX, float *d_dY) {
void computeJacobians(float **d_dPyRef, float **d_iPyCrr, int lvl, int w, int h, float *d_R, float *d_t, float *d_J, float *d_dX, float *d_dY) {
dim3 block = dim3(32, 8, 1);
dim3 grid;
// for (int i=0; i<lvl; i++) {
// w = (w+1)/2;
// h = (h+1)/2;
// }
grid = dim3(((size_t)w+block.x-1)/block.x, ((size_t)h+block.y-1)/block.y, 1);
texX.addressMode[0] = cudaAddressModeClamp; texX.addressMode[1] = cudaAddressModeClamp;
texX.filterMode = cudaFilterModeLinear; texX.normalized = false;
cudaChannelFormatDesc descX = cudaCreateChannelDesc<float>();
cudaBindTexture2D(NULL, &texX, d_dX, &descX, w, h, w*sizeof(d_dX[0]));
texY.addressMode[0] = cudaAddressModeClamp; texY.addressMode[1] = cudaAddressModeClamp;
texY.filterMode = cudaFilterModeLinear; texY.normalized = false;
cudaChannelFormatDesc descY = cudaCreateChannelDesc<float>();
cudaBindTexture2D(NULL, &texY, d_dY, &descY, w, h, w*sizeof(d_dY[0]));
// computeJacobian <<<grid, block>>> (d_dPyRef[lvl], d_iPyCrr[lvl], d_kPy[lvl], d_ikPy[lvl], w, h, d_R, d_t, d_J);
computeJacobian <<<grid, block>>> (d_dPyRef[lvl], d_iPyCrr[lvl], lvl, w, h, d_R, d_t, d_J);
cudaUnbindTexture(texX); cudaUnbindTexture(texY);
} |
2c248a6873e2bef959486aa63755118986bca147.hip | // !!! This is a file automatically generated by hipify!!!
/******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
* Simple example of DevicePartition::If().
*
* Partitions items from from a sequence of int keys using a
* section functor (greater-than)
*
* To compile using the command line:
* nvcc -arch=sm_XX example_device_select_if.cu -I../.. -lcudart -O3
*
******************************************************************************/
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include <stdio.h>
#include <hipcub/hipcub.hpp>
#include <cub/device/device_partition.cuh>
#include "../../test/test_util.h"
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
bool g_verbose = false; // Whether to display input/output to console
CachingDeviceAllocator g_allocator(true); // Caching allocator for device memory
/// Selection functor type
struct GreaterThan
{
int compare;
__host__ __device__ __forceinline__
GreaterThan(int compare) : compare(compare) {}
__host__ __device__ __forceinline__
bool operator()(const int &a) const {
return (a > compare);
}
};
//---------------------------------------------------------------------
// Test generation
//---------------------------------------------------------------------
/**
* Initialize problem, setting runs of random length chosen from [1..max_segment]
*/
void Initialize(
int *h_in,
int num_items,
int max_segment)
{
int key = 0;
int i = 0;
while (i < num_items)
{
// Randomly select number of repeating occurrences uniformly from [1..max_segment]
unsigned short max_short = (unsigned short) -1;
unsigned short repeat;
RandomBits(repeat);
repeat = (unsigned short) ((float(repeat) * (float(max_segment) / float(max_short))));
repeat = CUB_MAX(1, repeat);
int j = i;
while (j < CUB_MIN(i + repeat, num_items))
{
h_in[j] = key;
j++;
}
i = j;
key++;
}
if (g_verbose)
{
printf("Input:\n");
DisplayResults(h_in, num_items);
printf("\n\n");
}
}
/**
* Solve unique problem
*/
template <typename SelectOp>
int Solve(
int *h_in,
SelectOp select_op,
int *h_reference,
int num_items)
{
int num_selected = 0;
for (int i = 0; i < num_items; ++i)
{
if (select_op(h_in[i]))
{
h_reference[num_selected] = h_in[i];
num_selected++;
}
else
{
h_reference[num_items - (i - num_selected) - 1] = h_in[i];
}
}
return num_selected;
}
//---------------------------------------------------------------------
// Main
//---------------------------------------------------------------------
/**
* Main
*/
int main(int argc, char** argv)
{
int num_items = 150;
int max_segment = 40; // Maximum segment length
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
args.GetCmdLineArgument("n", num_items);
args.GetCmdLineArgument("maxseg", max_segment);
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--n=<input items> "
"[--device=<device-id>] "
"[--maxseg=<max segment length>]"
"[--v] "
"\n", argv[0]);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
// Allocate host arrays
int *h_in = new int[num_items];
int *h_reference = new int[num_items];
// DevicePartition a pivot index
unsigned int pivot_index;
unsigned int max_int = (unsigned int) -1;
RandomBits(pivot_index);
pivot_index = (unsigned int) ((float(pivot_index) * (float(num_items - 1) / float(max_int))));
printf("Pivot idx: %d\n", pivot_index); fflush(stdout);
// Initialize problem and solution
Initialize(h_in, num_items, max_segment);
GreaterThan select_op(h_in[pivot_index]);
int num_selected = Solve(h_in, select_op, h_reference, num_items);
printf("cub::DevicePartition::If %d items, %d selected (avg run length %d), %d-byte elements\n",
num_items, num_selected, (num_selected > 0) ? num_items / num_selected : 0, (int) sizeof(int));
fflush(stdout);
// Allocate problem device arrays
int *d_in = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(int) * num_items));
// Initialize device input
CubDebugExit(hipMemcpy(d_in, h_in, sizeof(int) * num_items, hipMemcpyHostToDevice));
// Allocate device output array and num selected
int *d_out = NULL;
int *d_num_selected_out = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(int) * num_items));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_num_selected_out, sizeof(int)));
// Allocate temporary storage
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
CubDebugExit(DevicePartition::If(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, select_op));
CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes));
// Run
CubDebugExit(DevicePartition::If(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, select_op));
// Check for correctness (and display results, if specified)
int compare = CompareDeviceResults(h_reference, d_out, num_items, true, g_verbose);
printf("\t Data %s ", compare ? "FAIL" : "PASS");
compare = compare | CompareDeviceResults(&num_selected, d_num_selected_out, 1, true, g_verbose);
printf("\t Count %s ", compare ? "FAIL" : "PASS");
AssertEquals(0, compare);
// Cleanup
if (h_in) delete[] h_in;
if (h_reference) delete[] h_reference;
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out));
if (d_num_selected_out) CubDebugExit(g_allocator.DeviceFree(d_num_selected_out));
if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage));
printf("\n\n");
return 0;
}
| 2c248a6873e2bef959486aa63755118986bca147.cu | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
* Simple example of DevicePartition::If().
*
* Partitions items from from a sequence of int keys using a
* section functor (greater-than)
*
* To compile using the command line:
* nvcc -arch=sm_XX example_device_select_if.cu -I../.. -lcudart -O3
*
******************************************************************************/
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include <stdio.h>
#include <cub/util_allocator.cuh>
#include <cub/device/device_partition.cuh>
#include "../../test/test_util.h"
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
bool g_verbose = false; // Whether to display input/output to console
CachingDeviceAllocator g_allocator(true); // Caching allocator for device memory
/// Selection functor type
struct GreaterThan
{
int compare;
__host__ __device__ __forceinline__
GreaterThan(int compare) : compare(compare) {}
__host__ __device__ __forceinline__
bool operator()(const int &a) const {
return (a > compare);
}
};
//---------------------------------------------------------------------
// Test generation
//---------------------------------------------------------------------
/**
* Initialize problem, setting runs of random length chosen from [1..max_segment]
*/
void Initialize(
int *h_in,
int num_items,
int max_segment)
{
int key = 0;
int i = 0;
while (i < num_items)
{
// Randomly select number of repeating occurrences uniformly from [1..max_segment]
unsigned short max_short = (unsigned short) -1;
unsigned short repeat;
RandomBits(repeat);
repeat = (unsigned short) ((float(repeat) * (float(max_segment) / float(max_short))));
repeat = CUB_MAX(1, repeat);
int j = i;
while (j < CUB_MIN(i + repeat, num_items))
{
h_in[j] = key;
j++;
}
i = j;
key++;
}
if (g_verbose)
{
printf("Input:\n");
DisplayResults(h_in, num_items);
printf("\n\n");
}
}
/**
* Solve unique problem
*/
template <typename SelectOp>
int Solve(
int *h_in,
SelectOp select_op,
int *h_reference,
int num_items)
{
int num_selected = 0;
for (int i = 0; i < num_items; ++i)
{
if (select_op(h_in[i]))
{
h_reference[num_selected] = h_in[i];
num_selected++;
}
else
{
h_reference[num_items - (i - num_selected) - 1] = h_in[i];
}
}
return num_selected;
}
//---------------------------------------------------------------------
// Main
//---------------------------------------------------------------------
/**
* Main
*/
int main(int argc, char** argv)
{
int num_items = 150;
int max_segment = 40; // Maximum segment length
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
args.GetCmdLineArgument("n", num_items);
args.GetCmdLineArgument("maxseg", max_segment);
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--n=<input items> "
"[--device=<device-id>] "
"[--maxseg=<max segment length>]"
"[--v] "
"\n", argv[0]);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
// Allocate host arrays
int *h_in = new int[num_items];
int *h_reference = new int[num_items];
// DevicePartition a pivot index
unsigned int pivot_index;
unsigned int max_int = (unsigned int) -1;
RandomBits(pivot_index);
pivot_index = (unsigned int) ((float(pivot_index) * (float(num_items - 1) / float(max_int))));
printf("Pivot idx: %d\n", pivot_index); fflush(stdout);
// Initialize problem and solution
Initialize(h_in, num_items, max_segment);
GreaterThan select_op(h_in[pivot_index]);
int num_selected = Solve(h_in, select_op, h_reference, num_items);
printf("cub::DevicePartition::If %d items, %d selected (avg run length %d), %d-byte elements\n",
num_items, num_selected, (num_selected > 0) ? num_items / num_selected : 0, (int) sizeof(int));
fflush(stdout);
// Allocate problem device arrays
int *d_in = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(int) * num_items));
// Initialize device input
CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(int) * num_items, cudaMemcpyHostToDevice));
// Allocate device output array and num selected
int *d_out = NULL;
int *d_num_selected_out = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(int) * num_items));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_num_selected_out, sizeof(int)));
// Allocate temporary storage
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
CubDebugExit(DevicePartition::If(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, select_op));
CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes));
// Run
CubDebugExit(DevicePartition::If(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, select_op));
// Check for correctness (and display results, if specified)
int compare = CompareDeviceResults(h_reference, d_out, num_items, true, g_verbose);
printf("\t Data %s ", compare ? "FAIL" : "PASS");
compare = compare | CompareDeviceResults(&num_selected, d_num_selected_out, 1, true, g_verbose);
printf("\t Count %s ", compare ? "FAIL" : "PASS");
AssertEquals(0, compare);
// Cleanup
if (h_in) delete[] h_in;
if (h_reference) delete[] h_reference;
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out));
if (d_num_selected_out) CubDebugExit(g_allocator.DeviceFree(d_num_selected_out));
if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage));
printf("\n\n");
return 0;
}
|
ac940fabb48e24b44ac30595d60c0a01d5eba7ce.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
// cuda include
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
__device__ float Grand(hiprandState_t *state){
int index = blockIdx.x * blockDim.x + threadIdx.x;
hiprandState_t local_state = state[index];
float rand_num = hiprand_uniform(&local_state);
state[index] = local_state;
return rand_num;
}
__device__ int GrandInt(hiprandState_t *state, int limit){
float rand_num = Grand(state) * (limit + 1);
return (int)rand_num;
}
__global__ void GSrand(hiprandState_t *state, unsigned int seed){
int index = blockIdx.x * blockDim.x + threadIdx.x;
hiprand_init(seed, index, 0, &state[index]);
}
| ac940fabb48e24b44ac30595d60c0a01d5eba7ce.cu | #include <stdio.h>
#include <stdlib.h>
// cuda include
#include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
__device__ float Grand(curandState *state){
int index = blockIdx.x * blockDim.x + threadIdx.x;
curandState local_state = state[index];
float rand_num = curand_uniform(&local_state);
state[index] = local_state;
return rand_num;
}
__device__ int GrandInt(curandState *state, int limit){
float rand_num = Grand(state) * (limit + 1);
return (int)rand_num;
}
__global__ void GSrand(curandState *state, unsigned int seed){
int index = blockIdx.x * blockDim.x + threadIdx.x;
curand_init(seed, index, 0, &state[index]);
}
|
e4dc27b5bf35fe0a299d818920602f4d8110ecb6.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/GpuIndex.h>
#include <faiss/gpu/GpuResources.h>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/StaticUtils.h>
#include <faiss/impl/FaissAssert.h>
#include <faiss/gpu/utils/CopyUtils.cuh>
#include <algorithm>
#include <limits>
#include <memory>
namespace faiss {
namespace gpu {
/// Default CPU search size for which we use paged copies
constexpr size_t kMinPageSize = (size_t)256 * 1024 * 1024;
/// Size above which we page copies from the CPU to GPU (non-paged
/// memory usage)
constexpr size_t kNonPinnedPageSize = (size_t)256 * 1024 * 1024;
// Default size for which we page add or search
constexpr size_t kAddPageSize = (size_t)256 * 1024 * 1024;
// Or, maximum number of vectors to consider per page of add or search
constexpr size_t kAddVecSize = (size_t)512 * 1024;
// Use a smaller search size, as precomputed code usage on IVFPQ
// requires substantial amounts of memory
// FIXME: parameterize based on algorithm need
constexpr size_t kSearchVecSize = (size_t)32 * 1024;
GpuIndex::GpuIndex(
std::shared_ptr<GpuResources> resources,
int dims,
faiss::MetricType metric,
float metricArg,
GpuIndexConfig config)
: Index(dims, metric),
resources_(resources),
config_(config),
minPagedSize_(kMinPageSize) {
FAISS_THROW_IF_NOT_FMT(
config_.device < getNumDevices(),
"Invalid GPU device %d",
config_.device);
FAISS_THROW_IF_NOT_MSG(dims > 0, "Invalid number of dimensions");
FAISS_THROW_IF_NOT_FMT(
config_.memorySpace == MemorySpace::Device ||
(config_.memorySpace == MemorySpace::Unified &&
getFullUnifiedMemSupport(config_.device)),
"Device %d does not support full CUDA 8 Unified Memory (CC 6.0+)",
config.device);
metric_arg = metricArg;
FAISS_ASSERT((bool)resources_);
resources_->initializeForDevice(config_.device);
}
int GpuIndex::getDevice() const {
return config_.device;
}
void GpuIndex::copyFrom(const faiss::Index* index) {
d = index->d;
metric_type = index->metric_type;
metric_arg = index->metric_arg;
ntotal = index->ntotal;
is_trained = index->is_trained;
}
void GpuIndex::copyTo(faiss::Index* index) const {
index->d = d;
index->metric_type = metric_type;
index->metric_arg = metric_arg;
index->ntotal = ntotal;
index->is_trained = is_trained;
}
void GpuIndex::setMinPagingSize(size_t size) {
minPagedSize_ = size;
}
size_t GpuIndex::getMinPagingSize() const {
return minPagedSize_;
}
void GpuIndex::add(Index::idx_t n, const float* x) {
// Pass to add_with_ids
add_with_ids(n, x, nullptr);
}
void GpuIndex::add_with_ids(
Index::idx_t n,
const float* x,
const Index::idx_t* ids) {
FAISS_THROW_IF_NOT_MSG(this->is_trained, "Index not trained");
// For now, only support <= max int results
FAISS_THROW_IF_NOT_FMT(
n <= (Index::idx_t)std::numeric_limits<int>::max(),
"GPU index only supports up to %d indices",
std::numeric_limits<int>::max());
if (n == 0) {
// nothing to add
return;
}
std::vector<Index::idx_t> generatedIds;
// Generate IDs if we need them
if (!ids && addImplRequiresIDs_()) {
generatedIds = std::vector<Index::idx_t>(n);
for (Index::idx_t i = 0; i < n; ++i) {
generatedIds[i] = this->ntotal + i;
}
}
DeviceScope scope(config_.device);
addPaged_((int)n, x, ids ? ids : generatedIds.data());
}
void GpuIndex::addPaged_(int n, const float* x, const Index::idx_t* ids) {
if (n > 0) {
size_t totalSize = (size_t)n * this->d * sizeof(float);
if (totalSize > kAddPageSize || n > kAddVecSize) {
// How many vectors fit into kAddPageSize?
size_t maxNumVecsForPageSize =
kAddPageSize / ((size_t)this->d * sizeof(float));
// Always add at least 1 vector, if we have huge vectors
maxNumVecsForPageSize = ::max(maxNumVecsForPageSize, (size_t)1);
size_t tileSize = ::min((size_t)n, maxNumVecsForPageSize);
tileSize = ::min(tileSize, kSearchVecSize);
for (size_t i = 0; i < (size_t)n; i += tileSize) {
size_t curNum = ::min(tileSize, n - i);
addPage_(
curNum,
x + i * (size_t)this->d,
ids ? ids + i : nullptr);
}
} else {
addPage_(n, x, ids);
}
}
}
void GpuIndex::addPage_(int n, const float* x, const Index::idx_t* ids) {
// At this point, `x` can be resident on CPU or GPU, and `ids` may be
// resident on CPU, GPU or may be null.
//
// Before continuing, we guarantee that all data will be resident on the
// GPU.
auto stream = resources_->getDefaultStreamCurrentDevice();
auto vecs = toDeviceTemporary<float, 2>(
resources_.get(),
config_.device,
const_cast<float*>(x),
stream,
{n, this->d});
if (ids) {
auto indices = toDeviceTemporary<Index::idx_t, 1>(
resources_.get(),
config_.device,
const_cast<Index::idx_t*>(ids),
stream,
{n});
addImpl_(n, vecs.data(), ids ? indices.data() : nullptr);
} else {
addImpl_(n, vecs.data(), nullptr);
}
}
void GpuIndex::assign(
Index::idx_t n,
const float* x,
Index::idx_t* labels,
Index::idx_t k) const {
FAISS_THROW_IF_NOT_MSG(this->is_trained, "Index not trained");
// For now, only support <= max int results
FAISS_THROW_IF_NOT_FMT(
n <= (Index::idx_t)std::numeric_limits<int>::max(),
"GPU index only supports up to %d indices",
std::numeric_limits<int>::max());
// Maximum k-selection supported is based on the CUDA SDK
FAISS_THROW_IF_NOT_FMT(
k <= (Index::idx_t)getMaxKSelection(),
"GPU index only supports k <= %d (requested %d)",
getMaxKSelection(),
(int)k); // select limitation
DeviceScope scope(config_.device);
auto stream = resources_->getDefaultStream(config_.device);
// We need to create a throw-away buffer for distances, which we don't use
// but which we do need for the search call
DeviceTensor<float, 2, true> distances(
resources_.get(),
makeTempAlloc(AllocType::Other, stream),
{(int)n, (int)k});
// Forward to search
search(n, x, k, distances.data(), labels);
}
void GpuIndex::search(
Index::idx_t n,
const float* x,
Index::idx_t k,
float* distances,
Index::idx_t* labels) const {
FAISS_THROW_IF_NOT(k > 0);
FAISS_THROW_IF_NOT_MSG(this->is_trained, "Index not trained");
// For now, only support <= max int results
FAISS_THROW_IF_NOT_FMT(
n <= (Index::idx_t)std::numeric_limits<int>::max(),
"GPU index only supports up to %d indices",
std::numeric_limits<int>::max());
// Maximum k-selection supported is based on the CUDA SDK
FAISS_THROW_IF_NOT_FMT(
k <= (Index::idx_t)getMaxKSelection(),
"GPU index only supports k <= %d (requested %d)",
getMaxKSelection(),
(int)k); // select limitation
if (n == 0 || k == 0) {
// nothing to search
return;
}
DeviceScope scope(config_.device);
auto stream = resources_->getDefaultStream(config_.device);
// We guarantee that the searchImpl_ will be called with device-resident
// pointers.
// The input vectors may be too large for the GPU, but we still
// assume that the output distances and labels are not.
// Go ahead and make space for output distances and labels on the
// GPU.
// If we reach a point where all inputs are too big, we can add
// another level of tiling.
auto outDistances = toDeviceTemporary<float, 2>(
resources_.get(),
config_.device,
distances,
stream,
{(int)n, (int)k});
auto outLabels = toDeviceTemporary<Index::idx_t, 2>(
resources_.get(), config_.device, labels, stream, {(int)n, (int)k});
bool usePaged = false;
if (getDeviceForAddress(x) == -1) {
// It is possible that the user is querying for a vector set size
// `x` that won't fit on the GPU.
// In this case, we will have to handle paging of the data from CPU
// -> GPU.
// Currently, we don't handle the case where the output data won't
// fit on the GPU (e.g., n * k is too large for the GPU memory).
size_t dataSize = (size_t)n * this->d * sizeof(float);
if (dataSize >= minPagedSize_) {
searchFromCpuPaged_(n, x, k, outDistances.data(), outLabels.data());
usePaged = true;
}
}
if (!usePaged) {
searchNonPaged_(n, x, k, outDistances.data(), outLabels.data());
}
// Copy back if necessary
fromDevice<float, 2>(outDistances, distances, stream);
fromDevice<Index::idx_t, 2>(outLabels, labels, stream);
}
void GpuIndex::searchNonPaged_(
int n,
const float* x,
int k,
float* outDistancesData,
Index::idx_t* outIndicesData) const {
auto stream = resources_->getDefaultStream(config_.device);
// Make sure arguments are on the device we desire; use temporary
// memory allocations to move it if necessary
auto vecs = toDeviceTemporary<float, 2>(
resources_.get(),
config_.device,
const_cast<float*>(x),
stream,
{n, (int)this->d});
searchImpl_(n, vecs.data(), k, outDistancesData, outIndicesData);
}
void GpuIndex::searchFromCpuPaged_(
int n,
const float* x,
int k,
float* outDistancesData,
Index::idx_t* outIndicesData) const {
Tensor<float, 2, true> outDistances(outDistancesData, {n, k});
Tensor<Index::idx_t, 2, true> outIndices(outIndicesData, {n, k});
// Is pinned memory available?
auto pinnedAlloc = resources_->getPinnedMemory();
int pageSizeInVecs =
(int)((pinnedAlloc.second / 2) / (sizeof(float) * this->d));
if (!pinnedAlloc.first || pageSizeInVecs < 1) {
// Just page without overlapping copy with compute
int batchSize = utils::nextHighestPowerOf2(
(int)((size_t)kNonPinnedPageSize / (sizeof(float) * this->d)));
for (int cur = 0; cur < n; cur += batchSize) {
int num = ::min(batchSize, n - cur);
auto outDistancesSlice = outDistances.narrowOutermost(cur, num);
auto outIndicesSlice = outIndices.narrowOutermost(cur, num);
searchNonPaged_(
num,
x + (size_t)cur * this->d,
k,
outDistancesSlice.data(),
outIndicesSlice.data());
}
return;
}
//
// Pinned memory is available, so we can overlap copy with compute.
// We use two pinned memory buffers, and triple-buffer the
// procedure:
//
// 1 CPU copy -> pinned
// 2 pinned copy -> GPU
// 3 GPU compute
//
// 1 2 3 1 2 3 ... (pinned buf A)
// 1 2 3 1 2 ... (pinned buf B)
// 1 2 3 1 ... (pinned buf A)
// time ->
//
auto defaultStream = resources_->getDefaultStream(config_.device);
auto copyStream = resources_->getAsyncCopyStream(config_.device);
FAISS_ASSERT(
(size_t)pageSizeInVecs * this->d <=
(size_t)std::numeric_limits<int>::max());
float* bufPinnedA = (float*)pinnedAlloc.first;
float* bufPinnedB = bufPinnedA + (size_t)pageSizeInVecs * this->d;
float* bufPinned[2] = {bufPinnedA, bufPinnedB};
// Reserve space on the GPU for the destination of the pinned buffer
// copy
DeviceTensor<float, 2, true> bufGpuA(
resources_.get(),
makeTempAlloc(AllocType::Other, defaultStream),
{(int)pageSizeInVecs, (int)this->d});
DeviceTensor<float, 2, true> bufGpuB(
resources_.get(),
makeTempAlloc(AllocType::Other, defaultStream),
{(int)pageSizeInVecs, (int)this->d});
DeviceTensor<float, 2, true>* bufGpus[2] = {&bufGpuA, &bufGpuB};
// Copy completion events for the pinned buffers
std::unique_ptr<CudaEvent> eventPinnedCopyDone[2];
// Execute completion events for the GPU buffers
std::unique_ptr<CudaEvent> eventGpuExecuteDone[2];
// All offsets are in terms of number of vectors; they remain within
// int bounds (as this function only handles max in vectors)
// Current start offset for buffer 1
int cur1 = 0;
int cur1BufIndex = 0;
// Current start offset for buffer 2
int cur2 = -1;
int cur2BufIndex = 0;
// Current start offset for buffer 3
int cur3 = -1;
int cur3BufIndex = 0;
while (cur3 < n) {
// Start async pinned -> GPU copy first (buf 2)
if (cur2 != -1 && cur2 < n) {
// Copy pinned to GPU
int numToCopy = ::min(pageSizeInVecs, n - cur2);
// Make sure any previous execution has completed before continuing
auto& eventPrev = eventGpuExecuteDone[cur2BufIndex];
if (eventPrev.get()) {
eventPrev->streamWaitOnEvent(copyStream);
}
CUDA_VERIFY(hipMemcpyAsync(
bufGpus[cur2BufIndex]->data(),
bufPinned[cur2BufIndex],
(size_t)numToCopy * this->d * sizeof(float),
hipMemcpyHostToDevice,
copyStream));
// Mark a completion event in this stream
eventPinnedCopyDone[cur2BufIndex].reset(new CudaEvent(copyStream));
// We pick up from here
cur3 = cur2;
cur2 += numToCopy;
cur2BufIndex = (cur2BufIndex == 0) ? 1 : 0;
}
if (cur3 != -1 && cur3 < n) {
// Process on GPU
int numToProcess = ::min(pageSizeInVecs, n - cur3);
// Make sure the previous copy has completed before continuing
auto& eventPrev = eventPinnedCopyDone[cur3BufIndex];
FAISS_ASSERT(eventPrev.get());
eventPrev->streamWaitOnEvent(defaultStream);
// Create tensor wrappers
// DeviceTensor<float, 2, true> input(bufGpus[cur3BufIndex]->data(),
// {numToProcess, this->d});
auto outDistancesSlice =
outDistances.narrowOutermost(cur3, numToProcess);
auto outIndicesSlice =
outIndices.narrowOutermost(cur3, numToProcess);
searchImpl_(
numToProcess,
bufGpus[cur3BufIndex]->data(),
k,
outDistancesSlice.data(),
outIndicesSlice.data());
// Create completion event
eventGpuExecuteDone[cur3BufIndex].reset(
new CudaEvent(defaultStream));
// We pick up from here
cur3BufIndex = (cur3BufIndex == 0) ? 1 : 0;
cur3 += numToProcess;
}
if (cur1 < n) {
// Copy CPU mem to CPU pinned
int numToCopy = ::min(pageSizeInVecs, n - cur1);
// Make sure any previous copy has completed before continuing
auto& eventPrev = eventPinnedCopyDone[cur1BufIndex];
if (eventPrev.get()) {
eventPrev->cpuWaitOnEvent();
}
memcpy(bufPinned[cur1BufIndex],
x + (size_t)cur1 * this->d,
(size_t)numToCopy * this->d * sizeof(float));
// We pick up from here
cur2 = cur1;
cur1 += numToCopy;
cur1BufIndex = (cur1BufIndex == 0) ? 1 : 0;
}
}
}
void GpuIndex::compute_residual(
const float* x,
float* residual,
Index::idx_t key) const {
FAISS_THROW_MSG("compute_residual not implemented for this type of index");
}
void GpuIndex::compute_residual_n(
Index::idx_t n,
const float* xs,
float* residuals,
const Index::idx_t* keys) const {
FAISS_THROW_MSG(
"compute_residual_n not implemented for this type of index");
}
std::shared_ptr<GpuResources> GpuIndex::getResources() {
return resources_;
}
} // namespace gpu
} // namespace faiss
| e4dc27b5bf35fe0a299d818920602f4d8110ecb6.cu | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/GpuIndex.h>
#include <faiss/gpu/GpuResources.h>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/StaticUtils.h>
#include <faiss/impl/FaissAssert.h>
#include <faiss/gpu/utils/CopyUtils.cuh>
#include <algorithm>
#include <limits>
#include <memory>
namespace faiss {
namespace gpu {
/// Default CPU search size for which we use paged copies
constexpr size_t kMinPageSize = (size_t)256 * 1024 * 1024;
/// Size above which we page copies from the CPU to GPU (non-paged
/// memory usage)
constexpr size_t kNonPinnedPageSize = (size_t)256 * 1024 * 1024;
// Default size for which we page add or search
constexpr size_t kAddPageSize = (size_t)256 * 1024 * 1024;
// Or, maximum number of vectors to consider per page of add or search
constexpr size_t kAddVecSize = (size_t)512 * 1024;
// Use a smaller search size, as precomputed code usage on IVFPQ
// requires substantial amounts of memory
// FIXME: parameterize based on algorithm need
constexpr size_t kSearchVecSize = (size_t)32 * 1024;
GpuIndex::GpuIndex(
std::shared_ptr<GpuResources> resources,
int dims,
faiss::MetricType metric,
float metricArg,
GpuIndexConfig config)
: Index(dims, metric),
resources_(resources),
config_(config),
minPagedSize_(kMinPageSize) {
FAISS_THROW_IF_NOT_FMT(
config_.device < getNumDevices(),
"Invalid GPU device %d",
config_.device);
FAISS_THROW_IF_NOT_MSG(dims > 0, "Invalid number of dimensions");
FAISS_THROW_IF_NOT_FMT(
config_.memorySpace == MemorySpace::Device ||
(config_.memorySpace == MemorySpace::Unified &&
getFullUnifiedMemSupport(config_.device)),
"Device %d does not support full CUDA 8 Unified Memory (CC 6.0+)",
config.device);
metric_arg = metricArg;
FAISS_ASSERT((bool)resources_);
resources_->initializeForDevice(config_.device);
}
int GpuIndex::getDevice() const {
return config_.device;
}
void GpuIndex::copyFrom(const faiss::Index* index) {
d = index->d;
metric_type = index->metric_type;
metric_arg = index->metric_arg;
ntotal = index->ntotal;
is_trained = index->is_trained;
}
void GpuIndex::copyTo(faiss::Index* index) const {
index->d = d;
index->metric_type = metric_type;
index->metric_arg = metric_arg;
index->ntotal = ntotal;
index->is_trained = is_trained;
}
void GpuIndex::setMinPagingSize(size_t size) {
minPagedSize_ = size;
}
size_t GpuIndex::getMinPagingSize() const {
return minPagedSize_;
}
void GpuIndex::add(Index::idx_t n, const float* x) {
// Pass to add_with_ids
add_with_ids(n, x, nullptr);
}
void GpuIndex::add_with_ids(
Index::idx_t n,
const float* x,
const Index::idx_t* ids) {
FAISS_THROW_IF_NOT_MSG(this->is_trained, "Index not trained");
// For now, only support <= max int results
FAISS_THROW_IF_NOT_FMT(
n <= (Index::idx_t)std::numeric_limits<int>::max(),
"GPU index only supports up to %d indices",
std::numeric_limits<int>::max());
if (n == 0) {
// nothing to add
return;
}
std::vector<Index::idx_t> generatedIds;
// Generate IDs if we need them
if (!ids && addImplRequiresIDs_()) {
generatedIds = std::vector<Index::idx_t>(n);
for (Index::idx_t i = 0; i < n; ++i) {
generatedIds[i] = this->ntotal + i;
}
}
DeviceScope scope(config_.device);
addPaged_((int)n, x, ids ? ids : generatedIds.data());
}
void GpuIndex::addPaged_(int n, const float* x, const Index::idx_t* ids) {
if (n > 0) {
size_t totalSize = (size_t)n * this->d * sizeof(float);
if (totalSize > kAddPageSize || n > kAddVecSize) {
// How many vectors fit into kAddPageSize?
size_t maxNumVecsForPageSize =
kAddPageSize / ((size_t)this->d * sizeof(float));
// Always add at least 1 vector, if we have huge vectors
maxNumVecsForPageSize = std::max(maxNumVecsForPageSize, (size_t)1);
size_t tileSize = std::min((size_t)n, maxNumVecsForPageSize);
tileSize = std::min(tileSize, kSearchVecSize);
for (size_t i = 0; i < (size_t)n; i += tileSize) {
size_t curNum = std::min(tileSize, n - i);
addPage_(
curNum,
x + i * (size_t)this->d,
ids ? ids + i : nullptr);
}
} else {
addPage_(n, x, ids);
}
}
}
void GpuIndex::addPage_(int n, const float* x, const Index::idx_t* ids) {
// At this point, `x` can be resident on CPU or GPU, and `ids` may be
// resident on CPU, GPU or may be null.
//
// Before continuing, we guarantee that all data will be resident on the
// GPU.
auto stream = resources_->getDefaultStreamCurrentDevice();
auto vecs = toDeviceTemporary<float, 2>(
resources_.get(),
config_.device,
const_cast<float*>(x),
stream,
{n, this->d});
if (ids) {
auto indices = toDeviceTemporary<Index::idx_t, 1>(
resources_.get(),
config_.device,
const_cast<Index::idx_t*>(ids),
stream,
{n});
addImpl_(n, vecs.data(), ids ? indices.data() : nullptr);
} else {
addImpl_(n, vecs.data(), nullptr);
}
}
void GpuIndex::assign(
Index::idx_t n,
const float* x,
Index::idx_t* labels,
Index::idx_t k) const {
FAISS_THROW_IF_NOT_MSG(this->is_trained, "Index not trained");
// For now, only support <= max int results
FAISS_THROW_IF_NOT_FMT(
n <= (Index::idx_t)std::numeric_limits<int>::max(),
"GPU index only supports up to %d indices",
std::numeric_limits<int>::max());
// Maximum k-selection supported is based on the CUDA SDK
FAISS_THROW_IF_NOT_FMT(
k <= (Index::idx_t)getMaxKSelection(),
"GPU index only supports k <= %d (requested %d)",
getMaxKSelection(),
(int)k); // select limitation
DeviceScope scope(config_.device);
auto stream = resources_->getDefaultStream(config_.device);
// We need to create a throw-away buffer for distances, which we don't use
// but which we do need for the search call
DeviceTensor<float, 2, true> distances(
resources_.get(),
makeTempAlloc(AllocType::Other, stream),
{(int)n, (int)k});
// Forward to search
search(n, x, k, distances.data(), labels);
}
void GpuIndex::search(
Index::idx_t n,
const float* x,
Index::idx_t k,
float* distances,
Index::idx_t* labels) const {
FAISS_THROW_IF_NOT(k > 0);
FAISS_THROW_IF_NOT_MSG(this->is_trained, "Index not trained");
// For now, only support <= max int results
FAISS_THROW_IF_NOT_FMT(
n <= (Index::idx_t)std::numeric_limits<int>::max(),
"GPU index only supports up to %d indices",
std::numeric_limits<int>::max());
// Maximum k-selection supported is based on the CUDA SDK
FAISS_THROW_IF_NOT_FMT(
k <= (Index::idx_t)getMaxKSelection(),
"GPU index only supports k <= %d (requested %d)",
getMaxKSelection(),
(int)k); // select limitation
if (n == 0 || k == 0) {
// nothing to search
return;
}
DeviceScope scope(config_.device);
auto stream = resources_->getDefaultStream(config_.device);
// We guarantee that the searchImpl_ will be called with device-resident
// pointers.
// The input vectors may be too large for the GPU, but we still
// assume that the output distances and labels are not.
// Go ahead and make space for output distances and labels on the
// GPU.
// If we reach a point where all inputs are too big, we can add
// another level of tiling.
auto outDistances = toDeviceTemporary<float, 2>(
resources_.get(),
config_.device,
distances,
stream,
{(int)n, (int)k});
auto outLabels = toDeviceTemporary<Index::idx_t, 2>(
resources_.get(), config_.device, labels, stream, {(int)n, (int)k});
bool usePaged = false;
if (getDeviceForAddress(x) == -1) {
// It is possible that the user is querying for a vector set size
// `x` that won't fit on the GPU.
// In this case, we will have to handle paging of the data from CPU
// -> GPU.
// Currently, we don't handle the case where the output data won't
// fit on the GPU (e.g., n * k is too large for the GPU memory).
size_t dataSize = (size_t)n * this->d * sizeof(float);
if (dataSize >= minPagedSize_) {
searchFromCpuPaged_(n, x, k, outDistances.data(), outLabels.data());
usePaged = true;
}
}
if (!usePaged) {
searchNonPaged_(n, x, k, outDistances.data(), outLabels.data());
}
// Copy back if necessary
fromDevice<float, 2>(outDistances, distances, stream);
fromDevice<Index::idx_t, 2>(outLabels, labels, stream);
}
void GpuIndex::searchNonPaged_(
int n,
const float* x,
int k,
float* outDistancesData,
Index::idx_t* outIndicesData) const {
auto stream = resources_->getDefaultStream(config_.device);
// Make sure arguments are on the device we desire; use temporary
// memory allocations to move it if necessary
auto vecs = toDeviceTemporary<float, 2>(
resources_.get(),
config_.device,
const_cast<float*>(x),
stream,
{n, (int)this->d});
searchImpl_(n, vecs.data(), k, outDistancesData, outIndicesData);
}
void GpuIndex::searchFromCpuPaged_(
int n,
const float* x,
int k,
float* outDistancesData,
Index::idx_t* outIndicesData) const {
Tensor<float, 2, true> outDistances(outDistancesData, {n, k});
Tensor<Index::idx_t, 2, true> outIndices(outIndicesData, {n, k});
// Is pinned memory available?
auto pinnedAlloc = resources_->getPinnedMemory();
int pageSizeInVecs =
(int)((pinnedAlloc.second / 2) / (sizeof(float) * this->d));
if (!pinnedAlloc.first || pageSizeInVecs < 1) {
// Just page without overlapping copy with compute
int batchSize = utils::nextHighestPowerOf2(
(int)((size_t)kNonPinnedPageSize / (sizeof(float) * this->d)));
for (int cur = 0; cur < n; cur += batchSize) {
int num = std::min(batchSize, n - cur);
auto outDistancesSlice = outDistances.narrowOutermost(cur, num);
auto outIndicesSlice = outIndices.narrowOutermost(cur, num);
searchNonPaged_(
num,
x + (size_t)cur * this->d,
k,
outDistancesSlice.data(),
outIndicesSlice.data());
}
return;
}
//
// Pinned memory is available, so we can overlap copy with compute.
// We use two pinned memory buffers, and triple-buffer the
// procedure:
//
// 1 CPU copy -> pinned
// 2 pinned copy -> GPU
// 3 GPU compute
//
// 1 2 3 1 2 3 ... (pinned buf A)
// 1 2 3 1 2 ... (pinned buf B)
// 1 2 3 1 ... (pinned buf A)
// time ->
//
auto defaultStream = resources_->getDefaultStream(config_.device);
auto copyStream = resources_->getAsyncCopyStream(config_.device);
FAISS_ASSERT(
(size_t)pageSizeInVecs * this->d <=
(size_t)std::numeric_limits<int>::max());
float* bufPinnedA = (float*)pinnedAlloc.first;
float* bufPinnedB = bufPinnedA + (size_t)pageSizeInVecs * this->d;
float* bufPinned[2] = {bufPinnedA, bufPinnedB};
// Reserve space on the GPU for the destination of the pinned buffer
// copy
DeviceTensor<float, 2, true> bufGpuA(
resources_.get(),
makeTempAlloc(AllocType::Other, defaultStream),
{(int)pageSizeInVecs, (int)this->d});
DeviceTensor<float, 2, true> bufGpuB(
resources_.get(),
makeTempAlloc(AllocType::Other, defaultStream),
{(int)pageSizeInVecs, (int)this->d});
DeviceTensor<float, 2, true>* bufGpus[2] = {&bufGpuA, &bufGpuB};
// Copy completion events for the pinned buffers
std::unique_ptr<CudaEvent> eventPinnedCopyDone[2];
// Execute completion events for the GPU buffers
std::unique_ptr<CudaEvent> eventGpuExecuteDone[2];
// All offsets are in terms of number of vectors; they remain within
// int bounds (as this function only handles max in vectors)
// Current start offset for buffer 1
int cur1 = 0;
int cur1BufIndex = 0;
// Current start offset for buffer 2
int cur2 = -1;
int cur2BufIndex = 0;
// Current start offset for buffer 3
int cur3 = -1;
int cur3BufIndex = 0;
while (cur3 < n) {
// Start async pinned -> GPU copy first (buf 2)
if (cur2 != -1 && cur2 < n) {
// Copy pinned to GPU
int numToCopy = std::min(pageSizeInVecs, n - cur2);
// Make sure any previous execution has completed before continuing
auto& eventPrev = eventGpuExecuteDone[cur2BufIndex];
if (eventPrev.get()) {
eventPrev->streamWaitOnEvent(copyStream);
}
CUDA_VERIFY(cudaMemcpyAsync(
bufGpus[cur2BufIndex]->data(),
bufPinned[cur2BufIndex],
(size_t)numToCopy * this->d * sizeof(float),
cudaMemcpyHostToDevice,
copyStream));
// Mark a completion event in this stream
eventPinnedCopyDone[cur2BufIndex].reset(new CudaEvent(copyStream));
// We pick up from here
cur3 = cur2;
cur2 += numToCopy;
cur2BufIndex = (cur2BufIndex == 0) ? 1 : 0;
}
if (cur3 != -1 && cur3 < n) {
// Process on GPU
int numToProcess = std::min(pageSizeInVecs, n - cur3);
// Make sure the previous copy has completed before continuing
auto& eventPrev = eventPinnedCopyDone[cur3BufIndex];
FAISS_ASSERT(eventPrev.get());
eventPrev->streamWaitOnEvent(defaultStream);
// Create tensor wrappers
// DeviceTensor<float, 2, true> input(bufGpus[cur3BufIndex]->data(),
// {numToProcess, this->d});
auto outDistancesSlice =
outDistances.narrowOutermost(cur3, numToProcess);
auto outIndicesSlice =
outIndices.narrowOutermost(cur3, numToProcess);
searchImpl_(
numToProcess,
bufGpus[cur3BufIndex]->data(),
k,
outDistancesSlice.data(),
outIndicesSlice.data());
// Create completion event
eventGpuExecuteDone[cur3BufIndex].reset(
new CudaEvent(defaultStream));
// We pick up from here
cur3BufIndex = (cur3BufIndex == 0) ? 1 : 0;
cur3 += numToProcess;
}
if (cur1 < n) {
// Copy CPU mem to CPU pinned
int numToCopy = std::min(pageSizeInVecs, n - cur1);
// Make sure any previous copy has completed before continuing
auto& eventPrev = eventPinnedCopyDone[cur1BufIndex];
if (eventPrev.get()) {
eventPrev->cpuWaitOnEvent();
}
memcpy(bufPinned[cur1BufIndex],
x + (size_t)cur1 * this->d,
(size_t)numToCopy * this->d * sizeof(float));
// We pick up from here
cur2 = cur1;
cur1 += numToCopy;
cur1BufIndex = (cur1BufIndex == 0) ? 1 : 0;
}
}
}
void GpuIndex::compute_residual(
const float* x,
float* residual,
Index::idx_t key) const {
FAISS_THROW_MSG("compute_residual not implemented for this type of index");
}
void GpuIndex::compute_residual_n(
Index::idx_t n,
const float* xs,
float* residuals,
const Index::idx_t* keys) const {
FAISS_THROW_MSG(
"compute_residual_n not implemented for this type of index");
}
std::shared_ptr<GpuResources> GpuIndex::getResources() {
return resources_;
}
} // namespace gpu
} // namespace faiss
|
32b171803cff29717e9f4a614ce4a08cd18438e7.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "FilmGradeKernelD.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *p_Input = NULL;
hipMalloc(&p_Input, XSIZE*YSIZE);
float *p_Output = NULL;
hipMalloc(&p_Output, XSIZE*YSIZE);
int p_Width = XSIZE;
int p_Height = YSIZE;
float p_Pivot = 1;
int p_Display = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
FilmGradeKernelD), dim3(gridBlock),dim3(threadBlock), 0, 0, p_Input,p_Output,p_Width,p_Height,p_Pivot,p_Display);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
FilmGradeKernelD), dim3(gridBlock),dim3(threadBlock), 0, 0, p_Input,p_Output,p_Width,p_Height,p_Pivot,p_Display);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
FilmGradeKernelD), dim3(gridBlock),dim3(threadBlock), 0, 0, p_Input,p_Output,p_Width,p_Height,p_Pivot,p_Display);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 32b171803cff29717e9f4a614ce4a08cd18438e7.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "FilmGradeKernelD.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *p_Input = NULL;
cudaMalloc(&p_Input, XSIZE*YSIZE);
float *p_Output = NULL;
cudaMalloc(&p_Output, XSIZE*YSIZE);
int p_Width = XSIZE;
int p_Height = YSIZE;
float p_Pivot = 1;
int p_Display = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
FilmGradeKernelD<<<gridBlock,threadBlock>>>(p_Input,p_Output,p_Width,p_Height,p_Pivot,p_Display);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
FilmGradeKernelD<<<gridBlock,threadBlock>>>(p_Input,p_Output,p_Width,p_Height,p_Pivot,p_Display);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
FilmGradeKernelD<<<gridBlock,threadBlock>>>(p_Input,p_Output,p_Width,p_Height,p_Pivot,p_Display);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
f6eae09516d5d0d6f198d93998d7aa579a67930d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Written By Ashwin Raghav
Twitter @ashwinraghav
blog.ashwinraghav.com
github.com/ashwinraghav
If you want to copy the code, by all means DO
*/
#ifndef _KERNEL_H_
#define _KERNEL_H_
__device__ int str_compare(const char * s1, const char*s2)
{
while((*s1 && *s2) && (*s1++ == *s2++));
return *(--s1) - *(--s2);
}
__global__ void Kernel( Node* g_graph_nodes, int* g_graph_edges, bool* g_graph_mask, bool* g_graph_visited, bool* g_graph_dest, int* g_cost, bool *g_over, int no_of_nodes, char* node_type, bool final_step)
{
int tid = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
if( tid<no_of_nodes && g_graph_mask[tid])
{
g_graph_dest[tid] = true;
g_graph_mask[tid]=false;
g_graph_visited[tid]=true;
bool matching_type = str_compare(g_graph_nodes[tid].type, node_type);
if(matching_type == 0){
if(final_step)
g_graph_dest[tid] = true;
for(int i=g_graph_nodes[tid].starting; i<(g_graph_nodes[tid].no_of_edges + g_graph_nodes[tid].starting); i++)
{
int id = g_graph_edges[i];
if(!g_graph_visited[id])
{
g_cost[id]=g_cost[tid]+1;
g_graph_mask[id]=true;
//Change the loop stop value such that loop continues
*g_over=true;
}
}
}
}
}
#endif
| f6eae09516d5d0d6f198d93998d7aa579a67930d.cu | /*
Written By Ashwin Raghav
Twitter @ashwinraghav
blog.ashwinraghav.com
github.com/ashwinraghav
If you want to copy the code, by all means DO
*/
#ifndef _KERNEL_H_
#define _KERNEL_H_
__device__ int str_compare(const char * s1, const char*s2)
{
while((*s1 && *s2) && (*s1++ == *s2++));
return *(--s1) - *(--s2);
}
__global__ void Kernel( Node* g_graph_nodes, int* g_graph_edges, bool* g_graph_mask, bool* g_graph_visited, bool* g_graph_dest, int* g_cost, bool *g_over, int no_of_nodes, char* node_type, bool final_step)
{
int tid = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
if( tid<no_of_nodes && g_graph_mask[tid])
{
g_graph_dest[tid] = true;
g_graph_mask[tid]=false;
g_graph_visited[tid]=true;
bool matching_type = str_compare(g_graph_nodes[tid].type, node_type);
if(matching_type == 0){
if(final_step)
g_graph_dest[tid] = true;
for(int i=g_graph_nodes[tid].starting; i<(g_graph_nodes[tid].no_of_edges + g_graph_nodes[tid].starting); i++)
{
int id = g_graph_edges[i];
if(!g_graph_visited[id])
{
g_cost[id]=g_cost[tid]+1;
g_graph_mask[id]=true;
//Change the loop stop value such that loop continues
*g_over=true;
}
}
}
}
}
#endif
|
88f3ff8d31f887a8fecea968b9ffbb584fcbdaac.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ransac.h"
#include "timing.h"
const int BLOCK_SIZE = 128;
static dim3 blocksPerGrid;
__global__
void kernComputeDiffs(int N, glm::vec2 v, PointDelta * pointDeltas) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N) {
return;
}
pointDeltas[idx].dist = glm::distance(v, pointDeltas[idx].delta);
return;
}
__global__
void kernGeneratePointDeltas(int N, glm::vec2 * pointDiffs, PointDelta * pointDeltas) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N) {
return;
}
pointDeltas[idx].origPos = idx;
pointDeltas[idx].delta = pointDiffs[idx];
return;
}
__global__
void kernSetPointGroup(int N, PointDelta * pointDeltas, bool * pointGroup) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N) {
return;
}
pointGroup[pointDeltas[idx].origPos] = true;
return;
}
struct SortByDist {
__host__ __device__
bool operator()(const PointDelta & d1, const PointDelta & d2) {
return d1.dist < d2.dist;
}
};
struct AddDelta {
__host__ __device__
PointDelta operator()(const PointDelta & d1, const PointDelta & d2) {
PointDelta pd;
pd.delta = d1.delta + d2.delta;
return pd;
}
};
RansacSeparator::RansacSeparator(int N) {
this->N = N;
blocksPerGrid = dim3((this->N + BLOCK_SIZE - 1) / BLOCK_SIZE);
hipMalloc(&this->devPointDeltas, N * sizeof(PointDelta));
hipMalloc(&this->devPointDiffs, N * sizeof(glm::vec2));
hipMalloc(&this->devPointGroup, N * sizeof(bool));
this->thrust_devPointDeltas = thrust::device_pointer_cast(this->devPointDeltas);
}
RansacSeparator::~RansacSeparator() {
hipFree(this->devPointDiffs);
hipFree(this->devPointDeltas);
hipFree(this->devPointGroup);
}
void RansacSeparator::computeDiffs(PointDelta & tempDelta, glm::vec2 & meanVector, int THRESHOLD_N, int ITERATIONS, float SCALE_THRESHOLD_N) {
for (int i = 0; i < ITERATIONS; i++) {
hipLaunchKernelGGL(( kernComputeDiffs), dim3(blocksPerGrid), dim3(BLOCK_SIZE), 0, 0, this->N, meanVector, this->devPointDeltas);
hipDeviceSynchronize();
thrust::sort(this->thrust_devPointDeltas, this->thrust_devPointDeltas + this->N, SortByDist());
tempDelta.delta = glm::vec2(0.0f, 0.0f);
//
tempDelta = thrust::reduce(this->thrust_devPointDeltas, this->thrust_devPointDeltas + THRESHOLD_N, tempDelta, AddDelta());
meanVector = tempDelta.delta * SCALE_THRESHOLD_N;
}
}
pair<glm::vec2,glm::vec2> RansacSeparator::separate(bool * pointGroup, glm::vec2 * pointDiffs, float THRESHOLD, int ITERATIONS) {
hipMemcpy(this->devPointDiffs, pointDiffs, this->N * sizeof(glm::vec2), hipMemcpyHostToDevice);
hipDeviceSynchronize();
TIMEINIT
hipLaunchKernelGGL(( TIMEIT((kernGeneratePointDeltas), dim3(blocksPerGrid), dim3(BLOCK_SIZE), 0, 0, this->N, this->devPointDiffs, this->devPointDeltas)), "Generating Deltas")
hipDeviceSynchronize();
int THRESHOLD_N = THRESHOLD * (float)this->N;
printf("threshold: %d out of %d\n", THRESHOLD_N, this->N);
float SCALE_THRESHOLD_N = 1.0f / (float)THRESHOLD_N;
float SCALE_REMAINDER = 1.0f / (float) (this->N - THRESHOLD_N);
PointDelta tempDelta;
glm::vec2 meanVector(0.0f, 0.0f);
TIMEIT(computeDiffs(tempDelta, meanVector, THRESHOLD_N, ITERATIONS, SCALE_THRESHOLD_N), "Computing Diffs")
tempDelta.delta = glm::vec2(0.0f, 0.0f);
//
tempDelta = thrust::reduce(this->thrust_devPointDeltas + THRESHOLD_N, this->thrust_devPointDeltas + this->N, tempDelta, AddDelta());
tempDelta.delta *= SCALE_REMAINDER;
printf("%f %f, %f %f\n", meanVector.x, meanVector.y, tempDelta.delta.x, tempDelta.delta.y);
hipMemset(this->devPointGroup, 0, this->N * sizeof(bool));
hipLaunchKernelGGL(( TIMEIT((kernSetPointGroup), dim3(blocksPerGrid), dim3(BLOCK_SIZE), 0, 0, THRESHOLD_N, this->devPointDeltas, this->devPointGroup)), "Set Point Group")
TIMEEND
hipDeviceSynchronize();
hipMemcpy(pointGroup, this->devPointGroup, this->N * sizeof(bool), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
//
return make_pair(meanVector, tempDelta.delta);
}
| 88f3ff8d31f887a8fecea968b9ffbb584fcbdaac.cu | #include "ransac.h"
#include "timing.h"
const int BLOCK_SIZE = 128;
static dim3 blocksPerGrid;
__global__
void kernComputeDiffs(int N, glm::vec2 v, PointDelta * pointDeltas) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N) {
return;
}
pointDeltas[idx].dist = glm::distance(v, pointDeltas[idx].delta);
return;
}
__global__
void kernGeneratePointDeltas(int N, glm::vec2 * pointDiffs, PointDelta * pointDeltas) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N) {
return;
}
pointDeltas[idx].origPos = idx;
pointDeltas[idx].delta = pointDiffs[idx];
return;
}
__global__
void kernSetPointGroup(int N, PointDelta * pointDeltas, bool * pointGroup) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N) {
return;
}
pointGroup[pointDeltas[idx].origPos] = true;
return;
}
struct SortByDist {
__host__ __device__
bool operator()(const PointDelta & d1, const PointDelta & d2) {
return d1.dist < d2.dist;
}
};
struct AddDelta {
__host__ __device__
PointDelta operator()(const PointDelta & d1, const PointDelta & d2) {
PointDelta pd;
pd.delta = d1.delta + d2.delta;
return pd;
}
};
RansacSeparator::RansacSeparator(int N) {
this->N = N;
blocksPerGrid = dim3((this->N + BLOCK_SIZE - 1) / BLOCK_SIZE);
cudaMalloc(&this->devPointDeltas, N * sizeof(PointDelta));
cudaMalloc(&this->devPointDiffs, N * sizeof(glm::vec2));
cudaMalloc(&this->devPointGroup, N * sizeof(bool));
this->thrust_devPointDeltas = thrust::device_pointer_cast(this->devPointDeltas);
}
RansacSeparator::~RansacSeparator() {
cudaFree(this->devPointDiffs);
cudaFree(this->devPointDeltas);
cudaFree(this->devPointGroup);
}
void RansacSeparator::computeDiffs(PointDelta & tempDelta, glm::vec2 & meanVector, int THRESHOLD_N, int ITERATIONS, float SCALE_THRESHOLD_N) {
for (int i = 0; i < ITERATIONS; i++) {
kernComputeDiffs<<<blocksPerGrid, BLOCK_SIZE>>>(this->N, meanVector, this->devPointDeltas);
cudaDeviceSynchronize();
thrust::sort(this->thrust_devPointDeltas, this->thrust_devPointDeltas + this->N, SortByDist());
tempDelta.delta = glm::vec2(0.0f, 0.0f);
//求和取平均
tempDelta = thrust::reduce(this->thrust_devPointDeltas, this->thrust_devPointDeltas + THRESHOLD_N, tempDelta, AddDelta());
meanVector = tempDelta.delta * SCALE_THRESHOLD_N;
}
}
pair<glm::vec2,glm::vec2> RansacSeparator::separate(bool * pointGroup, glm::vec2 * pointDiffs, float THRESHOLD, int ITERATIONS) {
cudaMemcpy(this->devPointDiffs, pointDiffs, this->N * sizeof(glm::vec2), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
TIMEINIT
TIMEIT((kernGeneratePointDeltas<<<blocksPerGrid, BLOCK_SIZE>>>(this->N, this->devPointDiffs, this->devPointDeltas)), "Generating Deltas")
cudaDeviceSynchronize();
int THRESHOLD_N = THRESHOLD * (float)this->N;
printf("threshold: %d out of %d\n", THRESHOLD_N, this->N);
float SCALE_THRESHOLD_N = 1.0f / (float)THRESHOLD_N;
float SCALE_REMAINDER = 1.0f / (float) (this->N - THRESHOLD_N);
PointDelta tempDelta;
glm::vec2 meanVector(0.0f, 0.0f);
TIMEIT(computeDiffs(tempDelta, meanVector, THRESHOLD_N, ITERATIONS, SCALE_THRESHOLD_N), "Computing Diffs")
tempDelta.delta = glm::vec2(0.0f, 0.0f);
//这里就是向量剩下的部分
tempDelta = thrust::reduce(this->thrust_devPointDeltas + THRESHOLD_N, this->thrust_devPointDeltas + this->N, tempDelta, AddDelta());
tempDelta.delta *= SCALE_REMAINDER;
printf("%f %f, %f %f\n", meanVector.x, meanVector.y, tempDelta.delta.x, tempDelta.delta.y);
cudaMemset(this->devPointGroup, 0, this->N * sizeof(bool));
TIMEIT((kernSetPointGroup<<<blocksPerGrid, BLOCK_SIZE>>>(THRESHOLD_N, this->devPointDeltas, this->devPointGroup)), "Set Point Group")
TIMEEND
cudaDeviceSynchronize();
cudaMemcpy(pointGroup, this->devPointGroup, this->N * sizeof(bool), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
//分成两个向量
return make_pair(meanVector, tempDelta.delta);
}
|
431e695738d09dca559d4f98ff9fc2555365d90c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "slicekernel.cuh"
#include "params.h"
#include "kernel_hip.cuh"
#include <iostream>
using std::cout;
using std::endl;
std::ostream& operator<<(std::ostream& os, const SliceParameters p)
{
os << "dx: " << p.dx << ", dy: " << p.dy << ", dz: " << p.dz
<< "theta: " << p.theta << ", phi: " << p.phi << ", psi: " << p.psi;
return os;
}
std::ostream& operator<<(std::ostream& os, const BufferParameters p)
{
os << "height: " << p.height << ", width: " << p.width;
return os;
}
extern texture<unsigned char, hipTextureType3D, hipReadModeNormalizedFloat> texVolume;
void invoke_slice_kernel(float *buffer, BufferParameters bp, SliceParameters sp, canonicalOrientation c)
{
cout << "pretending to invoke kernel" << endl;
float* buffer_dev;
checkCudaErrors( hipMalloc( &buffer_dev, bp.height*bp.width*sizeof(float)) );
int dim = 1024;
dim3 grids(dim/16, dim/16);
dim3 threads(16,16);
cout << "launching kernel" << endl;
cout << "kernel parameters: " << sp << endl << bp << endl;
hipLaunchKernelGGL(( slice_kernel), dim3(grids),dim3(threads), 0, 0, buffer_dev, bp, sp, c );
cout << "kernel launched" << endl;
checkCudaErrors( hipMemcpy( buffer, buffer_dev, bp.height*bp.width*sizeof(float), hipMemcpyDeviceToHost) );
checkCudaErrors( hipFree( buffer_dev ) );
}
__global__
void slice_kernel(float *buffer, BufferParameters bp, SliceParameters sp, canonicalOrientation c)
{
int j = threadIdx.y + blockIdx.y * blockDim.y;
int i = threadIdx.x + blockIdx.x * blockDim.x;
// int offset = i + j * blockDim.x * gridDim.x; // ???
int offset = j*bp.height+i;
if(j<bp.height && i<bp.width){
float3 pos;
pos.z = 0;
pos.y = ((float)j)/((float)bp.height);
pos.x = ((float)i)/((float)bp.width);
pos.x += sp.dx;
pos.y += sp.dy;
pos.z += sp.dz;
float sample = tex3D(texVolume, pos.x, pos.y, pos.z);
buffer[offset] = sample;
// buffer[offset] = (pos.x + pos.y)/2;
}
}
| 431e695738d09dca559d4f98ff9fc2555365d90c.cu | #include "slicekernel.cuh"
#include "params.h"
#include "kernel.cuh"
#include <iostream>
using std::cout;
using std::endl;
std::ostream& operator<<(std::ostream& os, const SliceParameters p)
{
os << "dx: " << p.dx << ", dy: " << p.dy << ", dz: " << p.dz
<< "theta: " << p.theta << ", phi: " << p.phi << ", psi: " << p.psi;
return os;
}
std::ostream& operator<<(std::ostream& os, const BufferParameters p)
{
os << "height: " << p.height << ", width: " << p.width;
return os;
}
extern texture<unsigned char, cudaTextureType3D, cudaReadModeNormalizedFloat> texVolume;
void invoke_slice_kernel(float *buffer, BufferParameters bp, SliceParameters sp, canonicalOrientation c)
{
cout << "pretending to invoke kernel" << endl;
float* buffer_dev;
checkCudaErrors( cudaMalloc( &buffer_dev, bp.height*bp.width*sizeof(float)) );
int dim = 1024;
dim3 grids(dim/16, dim/16);
dim3 threads(16,16);
cout << "launching kernel" << endl;
cout << "kernel parameters: " << sp << endl << bp << endl;
slice_kernel<<<grids,threads>>>( buffer_dev, bp, sp, c );
cout << "kernel launched" << endl;
checkCudaErrors( cudaMemcpy( buffer, buffer_dev, bp.height*bp.width*sizeof(float), cudaMemcpyDeviceToHost) );
checkCudaErrors( cudaFree( buffer_dev ) );
}
__global__
void slice_kernel(float *buffer, BufferParameters bp, SliceParameters sp, canonicalOrientation c)
{
int j = threadIdx.y + blockIdx.y * blockDim.y;
int i = threadIdx.x + blockIdx.x * blockDim.x;
// int offset = i + j * blockDim.x * gridDim.x; // ???
int offset = j*bp.height+i;
if(j<bp.height && i<bp.width){
float3 pos;
pos.z = 0;
pos.y = ((float)j)/((float)bp.height);
pos.x = ((float)i)/((float)bp.width);
pos.x += sp.dx;
pos.y += sp.dy;
pos.z += sp.dz;
float sample = tex3D(texVolume, pos.x, pos.y, pos.z);
buffer[offset] = sample;
// buffer[offset] = (pos.x + pos.y)/2;
}
}
|
cd8015945971bf1c578cbad242ab5e06be79e30c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cuda/cuda_headers.hpp>
namespace cuda
{
/**
* Kernel Volume: Compute TSDF Volume in device (GPU)
* depth_map: current depth image input
* color_map : current color image input
* tsdf_volume : output of tsdf volume
* color_volume : output of color volume
* rvect, tvec: transformation from current camera to base camera
* Detail can be found in Volumetric Representation chapter (listing 2)
* http://people.inf.ethz.ch/otmarh/download/Papers/p559-izadi(KinectFusion).pdf
*
*/
__global__ void initializeVolumeKernel(PtrStepSz<float> tsdf_volume, PtrStepSz<float> weight_volume, const Eigen::Vector3i dims)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int z = threadIdx.z + blockIdx.z * blockDim.z;
if (x >= dims(0) || y >= dims(1) || z >= dims(2)) return;
tsdf_volume.ptr(z * dims(1) + y)[x] = 0.0;
weight_volume.ptr(z * dims(1) + y)[x] = 0.0;
}
void initializeVolume(DeviceArray2D<float>& tsdf_volume, DeviceArray2D<float>& weight_volume, const Eigen::Vector3i& dims)
{
const int num_blocks_x = DIV_CEILING(dims(0), THREAD_3D_UNIT);
const int num_blocks_y = DIV_CEILING(dims(1), THREAD_3D_UNIT);
const int num_blocks_z = DIV_CEILING(dims(2), THREAD_3D_UNIT);
const dim3 blocks(num_blocks_x, num_blocks_y, num_blocks_z);
const dim3 threads(THREAD_3D_UNIT, THREAD_3D_UNIT, THREAD_3D_UNIT);
hipLaunchKernelGGL(( initializeVolumeKernel), dim3(blocks), dim3(threads), 0, 0, tsdf_volume, weight_volume, dims);
CheckCuda(hipDeviceSynchronize());
CheckCuda(hipGetLastError());
}
__global__ void integrateKernel(const PtrStepSz<unsigned short> depth_image, PtrStepSz<float> tsdf_volume, PtrStepSz<float> weight_volume, const Eigen::Vector3i dims, float voxel_length,
const float depth_scale, const CameraIntrinsicCuda cam_params, const float truncation_distance, const Eigen::Matrix4f world_to_cam)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int z = blockIdx.z * blockDim.z + threadIdx.z;
if (x >= dims(0) || y >= dims(1) || z >= dims(2)) return;
// Convert to voxel grid to global coordinate
const float3 global_voxel = make_float3((static_cast<float>(x) + 0.5f) * voxel_length, (static_cast<float>(y) + 0.5f) * voxel_length, (static_cast<float>(z) + 0.5f) * voxel_length);
// convert voxel from global to local camera coordinate
const Eigen::Vector3f camera_voxel = (world_to_cam * Eigen::Vector4f(global_voxel.x, global_voxel.y, global_voxel.z, 1.0)).head<3>();
if (camera_voxel(2) <= 0) return;
// projection
const int2 uv = make_int2(__float2int_rn(camera_voxel(0) / camera_voxel(2) * cam_params.fx_ + cam_params.cx_), __float2int_rn(camera_voxel(1) / camera_voxel(2) * cam_params.fy_ + cam_params.cy_));
if (uv.x < 0 || uv.x >= depth_image.cols || uv.y < 0 || uv.y >= depth_image.rows) return;
const float depth = depth_image.ptr(uv.y)[uv.x] * depth_scale;
if (depth <= 0.0001 || depth > 5.0) return;
const float sdf = (depth - camera_voxel(2));
if (sdf >= -truncation_distance)
{
const float new_tsdf = fmin(1.f, sdf / truncation_distance);
const float current_tsdf = tsdf_volume.ptr(z * dims(1) + y)[x];
const short current_weight = weight_volume.ptr(z * dims(1) + y)[x];
const float add_weight = 1;
const float updated_tsdf = (current_weight * current_tsdf + add_weight * new_tsdf) / (current_weight + add_weight);
const float new_weight = current_weight + add_weight;
// const float new_weight = min(current_weight + add_weight, 128.0f);
tsdf_volume.ptr(z * dims(1) + y)[x] = updated_tsdf;
weight_volume.ptr(z * dims(1) + y)[x] = new_weight;
}
}
void integrateTsdfVolume(const DeviceArray2D<unsigned short>& depth_map, DeviceArray2D<float>& tsdf_volume, DeviceArray2D<float>& weight_volume, const Eigen::Vector3i& dims, const float voxel_length,
const float truncated_distance, const CameraIntrinsicCuda& cam_params, const Eigen::Matrix4f& world_to_cam, const float depth_scale)
{
const int num_blocks_x = DIV_CEILING(dims(0), THREAD_3D_UNIT);
const int num_blocks_y = DIV_CEILING(dims(1), THREAD_3D_UNIT);
const int num_blocks_z = DIV_CEILING(dims(2), THREAD_3D_UNIT);
const dim3 blocks(num_blocks_x, num_blocks_y, num_blocks_z);
const dim3 threads(THREAD_3D_UNIT, THREAD_3D_UNIT, THREAD_3D_UNIT);
hipLaunchKernelGGL(( integrateKernel), dim3(blocks), dim3(threads), 0, 0, depth_map, tsdf_volume, weight_volume, dims, voxel_length, depth_scale, cam_params, truncated_distance, world_to_cam);
CheckCuda(hipDeviceSynchronize());
CheckCuda(hipGetLastError());
}
/*
__device__ __forceinline__
float trilinearInterpolation(const float3& point,
const PtrStepSz<float>& volume,
const int& volume_size)
{
int3 point_in_grid = make_int3(__float2int_rn(point.x),
__float2int_rn(point.y),
__float2int_rn(point.z));
const float vx = (__int2float_rn(point_in_grid.x) + 0.5f);
const float vy = (__int2float_rn(point_in_grid.y) + 0.5f);
const float vz = (__int2float_rn(point_in_grid.z) + 0.5f);
point_in_grid.x = (point.x < vx) ? (point_in_grid.x - 1) : point_in_grid.x;
point_in_grid.y = (point.y < vy) ? (point_in_grid.y - 1) : point_in_grid.y;
point_in_grid.z = (point.z < vz) ? (point_in_grid.z - 1) : point_in_grid.z;
const float a = (point.x - (__int2float_rn(point_in_grid.x) + 0.5f));
const float b = (point.y - (__int2float_rn(point_in_grid.y) + 0.5f));
const float c = (point.z - (__int2float_rn(point_in_grid.z) + 0.5f));
return volume.ptr((point_in_grid.z) * volume_size + point_in_grid.y)[point_in_grid.x] * (1 - a) * (1 - b) * (1 - c) +
volume.ptr((point_in_grid.z + 1) * volume_size + point_in_grid.y)[point_in_grid.x] * (1 - a) * (1 - b) * c +
volume.ptr((point_in_grid.z) * volume_size + point_in_grid.y + 1)[point_in_grid.x] * (1 - a) * b * (1 - c) +
volume.ptr((point_in_grid.z + 1) * volume_size + point_in_grid.y + 1)[point_in_grid.x] * (1 - a) * b * c +
volume.ptr((point_in_grid.z) * volume_size + point_in_grid.y)[point_in_grid.x + 1] * a * (1 - b) * (1 - c) +
volume.ptr((point_in_grid.z + 1) * volume_size + point_in_grid.y)[point_in_grid.x + 1] * a * (1 - b) * c +
volume.ptr((point_in_grid.z) * volume_size + point_in_grid.y + 1)[point_in_grid.x + 1] * a * b * (1 - c) +
volume.ptr((point_in_grid.z + 1) * volume_size + point_in_grid.y + 1)[point_in_grid.x + 1] * a * b * c;
}
__device__ __forceinline__
void getMaxMin(const float volume_range,const float3& origin,
const float3& direction, float& max_range, float& min_range)
{
float txmin = ((direction.x > 0 ? 0.f : volume_range) - origin.x) / direction.x;
float tymin = ((direction.y > 0 ? 0.f : volume_range) - origin.y) / direction.y;
float tzmin = ((direction.z > 0 ? 0.f : volume_range) - origin.z) / direction.z;
min_range = fmax(fmax(txmin, tymin), tzmin);
float txmax = ((direction.x > 0 ? volume_range : 0.f) - origin.x) / direction.x;
float tymax = ((direction.y > 0 ? volume_range : 0.f) - origin.y) / direction.y;
float tzmax = ((direction.z > 0 ? volume_range : 0.f) - origin.z) / direction.z;
max_range = fmin(fmin(txmax, tymax), tzmax);
}
__global__
void kernelRayCasting(const PtrStepSz<float> tsdf_volume,
PtrStepSz<float3> model_vertex,
PtrStepSz<float3> model_normal,
const int volume_size, const float voxel_scale,
const CameraParameters cam_parameters,
const float truncation_distance,
const float ray_step,
const mat33 cam_to_world_rot,
const float3 cam_to_world_trans)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= model_vertex.cols || y >= model_vertex.rows)
return;
model_vertex.ptr(y)[x] = make_float3(0, 0, 0);
model_normal.ptr(y)[x] = make_float3(0, 0, 0);
const float volume_max = volume_size * voxel_scale;
const float3 pixel_position= make_float3(
(x - cam_parameters.c_x) / cam_parameters.focal_x,
(y - cam_parameters.c_y) / cam_parameters.focal_y,
1.f);
float3 ray_direction = (cam_to_world_rot * pixel_position);
ray_direction = normalized(ray_direction);
float min_range, max_range;
getMaxMin(volume_max,cam_to_world_trans,ray_direction,max_range,min_range);
float ray_length = fmax(min_range,0.f);
if (ray_length >= max_range)
return;
ray_length += voxel_scale;
float3 grid = (cam_to_world_trans + (ray_direction * ray_length)) / voxel_scale;
float tsdf = tsdf_volume.ptr(
__float2int_rd(grid.z) * volume_size + __float2int_rd(grid.y))[__float2int_rd(grid.x)];
const float max_search_length = ray_length + volume_max * sqrt(2.f);
for (; ray_length < max_search_length; ray_length += truncation_distance * ray_step) {
grid = ((cam_to_world_trans + (ray_direction * (ray_length + truncation_distance * ray_step))) / voxel_scale);
if (grid.x < 1 || grid.x >= volume_size - 1 || grid.y < 1 ||
grid.y >= volume_size - 1 ||
grid.z < 1 || grid.z >= volume_size - 1)
continue;
const float previous_tsdf = tsdf;
tsdf = tsdf_volume.ptr(
__float2int_rd(grid.z) * volume_size + __float2int_rd(grid.y))[__float2int_rd(
grid.x)];
if (previous_tsdf < 0.f && tsdf > 0.f) //Zero crossing from behind
break;
if (previous_tsdf > 0.f && tsdf < 0.f) { //Zero crossing
const float t_star =
ray_length - truncation_distance * ray_step * previous_tsdf / (tsdf - previous_tsdf);
const float3 vertex = cam_to_world_trans + ray_direction * t_star;
const float3 location_in_grid = (vertex / voxel_scale);
if (location_in_grid.x < 1 | location_in_grid.x >= volume_size - 1 ||
location_in_grid.y < 1 || location_in_grid.y >= volume_size - 1 ||
location_in_grid.z < 1 || location_in_grid.z >= volume_size - 1)
break;
//Compute normal
float3 normal, shifted;
shifted = location_in_grid;
shifted.x += 1;
if (shifted.x >= volume_size - 1)
break;
const float Fx1 = trilinearInterpolation(shifted, tsdf_volume, volume_size);
shifted = location_in_grid;
shifted.x -= 1;
if (shifted.x < 1)
break;
const float Fx2 = trilinearInterpolation(shifted, tsdf_volume, volume_size);
normal.x = (Fx1 - Fx2);
shifted = location_in_grid;
shifted.y += 1;
if (shifted.y >= volume_size - 1)
break;
const float Fy1 = trilinearInterpolation(shifted, tsdf_volume, volume_size);
shifted = location_in_grid;
shifted.y -= 1;
if (shifted.y < 1)
break;
const float Fy2 = trilinearInterpolation(shifted, tsdf_volume, volume_size);
normal.y = (Fy1 - Fy2);
shifted = location_in_grid;
shifted.z += 1;
if (shifted.z >= volume_size - 1)
break;
const float Fz1 = trilinearInterpolation(shifted, tsdf_volume, volume_size);
shifted = location_in_grid;
shifted.z -= 1;
if (shifted.z < 1)
break;
const float Fz2 = trilinearInterpolation(shifted, tsdf_volume, volume_size);
normal.z = (Fz1 - Fz2);
if (norm(normal) == 0)
break;
normal = normalized(normal);
// printf("%f %f %f \n",vertex.x(), vertex.y(), vertex.z());
model_vertex.ptr(y)[x] = make_float3(vertex.x, vertex.y, vertex.z);
model_normal.ptr(y)[x] = make_float3(normal.x, normal.y, normal.z);
break;
}
}
}
void hostRayCasting(const DeviceArray2D<float>& tsdf_volume, DeviceArray2D<float3>& model_vertex,
DeviceArray2D<float3>& model_normal,
const CameraParameters& cam_params,const float truncation_distance,
const int volume_res, const float voxel_size, const float ray_step,
const mat33& cam_to_world_rot,const float3& cam_to_world_trans)
{
dim3 block(32,8);
dim3 grid((model_vertex.cols() + block.x - 1) / block.x,
(model_vertex.rows() + block.y - 1) / block.y);
kernelRayCasting<<<grid,block>>>(tsdf_volume,model_vertex,model_normal,volume_res,
voxel_size,cam_params,truncation_distance, ray_step,
cam_to_world_rot,cam_to_world_trans);
CudaSafeCall ( hipGetLastError () );
CudaSafeCall (hipDeviceSynchronize ());
}
*/
} // namespace cuda
| cd8015945971bf1c578cbad242ab5e06be79e30c.cu | #include <cuda/cuda_headers.hpp>
namespace cuda
{
/**
* Kernel Volume: Compute TSDF Volume in device (GPU)
* depth_map: current depth image input
* color_map : current color image input
* tsdf_volume : output of tsdf volume
* color_volume : output of color volume
* rvect, tvec: transformation from current camera to base camera
* Detail can be found in Volumetric Representation chapter (listing 2)
* http://people.inf.ethz.ch/otmarh/download/Papers/p559-izadi(KinectFusion).pdf
*
*/
__global__ void initializeVolumeKernel(PtrStepSz<float> tsdf_volume, PtrStepSz<float> weight_volume, const Eigen::Vector3i dims)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int z = threadIdx.z + blockIdx.z * blockDim.z;
if (x >= dims(0) || y >= dims(1) || z >= dims(2)) return;
tsdf_volume.ptr(z * dims(1) + y)[x] = 0.0;
weight_volume.ptr(z * dims(1) + y)[x] = 0.0;
}
void initializeVolume(DeviceArray2D<float>& tsdf_volume, DeviceArray2D<float>& weight_volume, const Eigen::Vector3i& dims)
{
const int num_blocks_x = DIV_CEILING(dims(0), THREAD_3D_UNIT);
const int num_blocks_y = DIV_CEILING(dims(1), THREAD_3D_UNIT);
const int num_blocks_z = DIV_CEILING(dims(2), THREAD_3D_UNIT);
const dim3 blocks(num_blocks_x, num_blocks_y, num_blocks_z);
const dim3 threads(THREAD_3D_UNIT, THREAD_3D_UNIT, THREAD_3D_UNIT);
initializeVolumeKernel<<<blocks, threads>>>(tsdf_volume, weight_volume, dims);
CheckCuda(cudaDeviceSynchronize());
CheckCuda(cudaGetLastError());
}
__global__ void integrateKernel(const PtrStepSz<unsigned short> depth_image, PtrStepSz<float> tsdf_volume, PtrStepSz<float> weight_volume, const Eigen::Vector3i dims, float voxel_length,
const float depth_scale, const CameraIntrinsicCuda cam_params, const float truncation_distance, const Eigen::Matrix4f world_to_cam)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int z = blockIdx.z * blockDim.z + threadIdx.z;
if (x >= dims(0) || y >= dims(1) || z >= dims(2)) return;
// Convert to voxel grid to global coordinate
const float3 global_voxel = make_float3((static_cast<float>(x) + 0.5f) * voxel_length, (static_cast<float>(y) + 0.5f) * voxel_length, (static_cast<float>(z) + 0.5f) * voxel_length);
// convert voxel from global to local camera coordinate
const Eigen::Vector3f camera_voxel = (world_to_cam * Eigen::Vector4f(global_voxel.x, global_voxel.y, global_voxel.z, 1.0)).head<3>();
if (camera_voxel(2) <= 0) return;
// projection
const int2 uv = make_int2(__float2int_rn(camera_voxel(0) / camera_voxel(2) * cam_params.fx_ + cam_params.cx_), __float2int_rn(camera_voxel(1) / camera_voxel(2) * cam_params.fy_ + cam_params.cy_));
if (uv.x < 0 || uv.x >= depth_image.cols || uv.y < 0 || uv.y >= depth_image.rows) return;
const float depth = depth_image.ptr(uv.y)[uv.x] * depth_scale;
if (depth <= 0.0001 || depth > 5.0) return;
const float sdf = (depth - camera_voxel(2));
if (sdf >= -truncation_distance)
{
const float new_tsdf = fmin(1.f, sdf / truncation_distance);
const float current_tsdf = tsdf_volume.ptr(z * dims(1) + y)[x];
const short current_weight = weight_volume.ptr(z * dims(1) + y)[x];
const float add_weight = 1;
const float updated_tsdf = (current_weight * current_tsdf + add_weight * new_tsdf) / (current_weight + add_weight);
const float new_weight = current_weight + add_weight;
// const float new_weight = min(current_weight + add_weight, 128.0f);
tsdf_volume.ptr(z * dims(1) + y)[x] = updated_tsdf;
weight_volume.ptr(z * dims(1) + y)[x] = new_weight;
}
}
void integrateTsdfVolume(const DeviceArray2D<unsigned short>& depth_map, DeviceArray2D<float>& tsdf_volume, DeviceArray2D<float>& weight_volume, const Eigen::Vector3i& dims, const float voxel_length,
const float truncated_distance, const CameraIntrinsicCuda& cam_params, const Eigen::Matrix4f& world_to_cam, const float depth_scale)
{
const int num_blocks_x = DIV_CEILING(dims(0), THREAD_3D_UNIT);
const int num_blocks_y = DIV_CEILING(dims(1), THREAD_3D_UNIT);
const int num_blocks_z = DIV_CEILING(dims(2), THREAD_3D_UNIT);
const dim3 blocks(num_blocks_x, num_blocks_y, num_blocks_z);
const dim3 threads(THREAD_3D_UNIT, THREAD_3D_UNIT, THREAD_3D_UNIT);
integrateKernel<<<blocks, threads>>>(depth_map, tsdf_volume, weight_volume, dims, voxel_length, depth_scale, cam_params, truncated_distance, world_to_cam);
CheckCuda(cudaDeviceSynchronize());
CheckCuda(cudaGetLastError());
}
/*
__device__ __forceinline__
float trilinearInterpolation(const float3& point,
const PtrStepSz<float>& volume,
const int& volume_size)
{
int3 point_in_grid = make_int3(__float2int_rn(point.x),
__float2int_rn(point.y),
__float2int_rn(point.z));
const float vx = (__int2float_rn(point_in_grid.x) + 0.5f);
const float vy = (__int2float_rn(point_in_grid.y) + 0.5f);
const float vz = (__int2float_rn(point_in_grid.z) + 0.5f);
point_in_grid.x = (point.x < vx) ? (point_in_grid.x - 1) : point_in_grid.x;
point_in_grid.y = (point.y < vy) ? (point_in_grid.y - 1) : point_in_grid.y;
point_in_grid.z = (point.z < vz) ? (point_in_grid.z - 1) : point_in_grid.z;
const float a = (point.x - (__int2float_rn(point_in_grid.x) + 0.5f));
const float b = (point.y - (__int2float_rn(point_in_grid.y) + 0.5f));
const float c = (point.z - (__int2float_rn(point_in_grid.z) + 0.5f));
return volume.ptr((point_in_grid.z) * volume_size + point_in_grid.y)[point_in_grid.x] * (1 - a) * (1 - b) * (1 - c) +
volume.ptr((point_in_grid.z + 1) * volume_size + point_in_grid.y)[point_in_grid.x] * (1 - a) * (1 - b) * c +
volume.ptr((point_in_grid.z) * volume_size + point_in_grid.y + 1)[point_in_grid.x] * (1 - a) * b * (1 - c) +
volume.ptr((point_in_grid.z + 1) * volume_size + point_in_grid.y + 1)[point_in_grid.x] * (1 - a) * b * c +
volume.ptr((point_in_grid.z) * volume_size + point_in_grid.y)[point_in_grid.x + 1] * a * (1 - b) * (1 - c) +
volume.ptr((point_in_grid.z + 1) * volume_size + point_in_grid.y)[point_in_grid.x + 1] * a * (1 - b) * c +
volume.ptr((point_in_grid.z) * volume_size + point_in_grid.y + 1)[point_in_grid.x + 1] * a * b * (1 - c) +
volume.ptr((point_in_grid.z + 1) * volume_size + point_in_grid.y + 1)[point_in_grid.x + 1] * a * b * c;
}
__device__ __forceinline__
void getMaxMin(const float volume_range,const float3& origin,
const float3& direction, float& max_range, float& min_range)
{
float txmin = ((direction.x > 0 ? 0.f : volume_range) - origin.x) / direction.x;
float tymin = ((direction.y > 0 ? 0.f : volume_range) - origin.y) / direction.y;
float tzmin = ((direction.z > 0 ? 0.f : volume_range) - origin.z) / direction.z;
min_range = fmax(fmax(txmin, tymin), tzmin);
float txmax = ((direction.x > 0 ? volume_range : 0.f) - origin.x) / direction.x;
float tymax = ((direction.y > 0 ? volume_range : 0.f) - origin.y) / direction.y;
float tzmax = ((direction.z > 0 ? volume_range : 0.f) - origin.z) / direction.z;
max_range = fmin(fmin(txmax, tymax), tzmax);
}
__global__
void kernelRayCasting(const PtrStepSz<float> tsdf_volume,
PtrStepSz<float3> model_vertex,
PtrStepSz<float3> model_normal,
const int volume_size, const float voxel_scale,
const CameraParameters cam_parameters,
const float truncation_distance,
const float ray_step,
const mat33 cam_to_world_rot,
const float3 cam_to_world_trans)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= model_vertex.cols || y >= model_vertex.rows)
return;
model_vertex.ptr(y)[x] = make_float3(0, 0, 0);
model_normal.ptr(y)[x] = make_float3(0, 0, 0);
const float volume_max = volume_size * voxel_scale;
const float3 pixel_position= make_float3(
(x - cam_parameters.c_x) / cam_parameters.focal_x,
(y - cam_parameters.c_y) / cam_parameters.focal_y,
1.f);
float3 ray_direction = (cam_to_world_rot * pixel_position);
ray_direction = normalized(ray_direction);
float min_range, max_range;
getMaxMin(volume_max,cam_to_world_trans,ray_direction,max_range,min_range);
float ray_length = fmax(min_range,0.f);
if (ray_length >= max_range)
return;
ray_length += voxel_scale;
float3 grid = (cam_to_world_trans + (ray_direction * ray_length)) / voxel_scale;
float tsdf = tsdf_volume.ptr(
__float2int_rd(grid.z) * volume_size + __float2int_rd(grid.y))[__float2int_rd(grid.x)];
const float max_search_length = ray_length + volume_max * sqrt(2.f);
for (; ray_length < max_search_length; ray_length += truncation_distance * ray_step) {
grid = ((cam_to_world_trans + (ray_direction * (ray_length + truncation_distance * ray_step))) / voxel_scale);
if (grid.x < 1 || grid.x >= volume_size - 1 || grid.y < 1 ||
grid.y >= volume_size - 1 ||
grid.z < 1 || grid.z >= volume_size - 1)
continue;
const float previous_tsdf = tsdf;
tsdf = tsdf_volume.ptr(
__float2int_rd(grid.z) * volume_size + __float2int_rd(grid.y))[__float2int_rd(
grid.x)];
if (previous_tsdf < 0.f && tsdf > 0.f) //Zero crossing from behind
break;
if (previous_tsdf > 0.f && tsdf < 0.f) { //Zero crossing
const float t_star =
ray_length - truncation_distance * ray_step * previous_tsdf / (tsdf - previous_tsdf);
const float3 vertex = cam_to_world_trans + ray_direction * t_star;
const float3 location_in_grid = (vertex / voxel_scale);
if (location_in_grid.x < 1 | location_in_grid.x >= volume_size - 1 ||
location_in_grid.y < 1 || location_in_grid.y >= volume_size - 1 ||
location_in_grid.z < 1 || location_in_grid.z >= volume_size - 1)
break;
//Compute normal
float3 normal, shifted;
shifted = location_in_grid;
shifted.x += 1;
if (shifted.x >= volume_size - 1)
break;
const float Fx1 = trilinearInterpolation(shifted, tsdf_volume, volume_size);
shifted = location_in_grid;
shifted.x -= 1;
if (shifted.x < 1)
break;
const float Fx2 = trilinearInterpolation(shifted, tsdf_volume, volume_size);
normal.x = (Fx1 - Fx2);
shifted = location_in_grid;
shifted.y += 1;
if (shifted.y >= volume_size - 1)
break;
const float Fy1 = trilinearInterpolation(shifted, tsdf_volume, volume_size);
shifted = location_in_grid;
shifted.y -= 1;
if (shifted.y < 1)
break;
const float Fy2 = trilinearInterpolation(shifted, tsdf_volume, volume_size);
normal.y = (Fy1 - Fy2);
shifted = location_in_grid;
shifted.z += 1;
if (shifted.z >= volume_size - 1)
break;
const float Fz1 = trilinearInterpolation(shifted, tsdf_volume, volume_size);
shifted = location_in_grid;
shifted.z -= 1;
if (shifted.z < 1)
break;
const float Fz2 = trilinearInterpolation(shifted, tsdf_volume, volume_size);
normal.z = (Fz1 - Fz2);
if (norm(normal) == 0)
break;
normal = normalized(normal);
// printf("%f %f %f \n",vertex.x(), vertex.y(), vertex.z());
model_vertex.ptr(y)[x] = make_float3(vertex.x, vertex.y, vertex.z);
model_normal.ptr(y)[x] = make_float3(normal.x, normal.y, normal.z);
break;
}
}
}
void hostRayCasting(const DeviceArray2D<float>& tsdf_volume, DeviceArray2D<float3>& model_vertex,
DeviceArray2D<float3>& model_normal,
const CameraParameters& cam_params,const float truncation_distance,
const int volume_res, const float voxel_size, const float ray_step,
const mat33& cam_to_world_rot,const float3& cam_to_world_trans)
{
dim3 block(32,8);
dim3 grid((model_vertex.cols() + block.x - 1) / block.x,
(model_vertex.rows() + block.y - 1) / block.y);
kernelRayCasting<<<grid,block>>>(tsdf_volume,model_vertex,model_normal,volume_res,
voxel_size,cam_params,truncation_distance, ray_step,
cam_to_world_rot,cam_to_world_trans);
CudaSafeCall ( cudaGetLastError () );
CudaSafeCall (cudaDeviceSynchronize ());
}
*/
} // namespace cuda
|
7bd507a55ccd34be341aedf1251b586d27a8a8f4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* _et_line_backproject_attenuated_gpu.cu
*
* NiftyRec
* Stefano Pedemonte, May 2012.
* CMIC - Centre for Medical Image Computing
* UCL - University College London.
* Released under BSD licence, see LICENSE.txt
*/
#include "_et_line_backproject_attenuated_gpu.h"
#include "_et_line_backproject_attenuated_gpu_kernels.cu"
void et_line_backproject_attenuated_gpu(float **d_sinogram, float **d_backprojection, float **d_attenuation, int cam, nifti_image *backprojection)
{
int3 backprojection_size = make_int3(backprojection->nx,backprojection->ny,backprojection->nz);
CUDA_SAFE_CALL(hipMemcpyToSymbol(c_backprojection_size,&backprojection_size,sizeof(int3)));
const unsigned int grid = (unsigned int)ceil(backprojection->nx*backprojection->ny/(float)BLOCK);
dim3 B(BLOCK,1,1);
dim3 G(grid,1,1);
float *d_sinogram_ptr = (*d_sinogram) + cam * backprojection->nx * backprojection->ny;
hipLaunchKernelGGL(( et_line_backproject_attenuated_gpu_kernel) , dim3(G),dim3(B), 0, 0, d_sinogram_ptr, *d_backprojection, *d_attenuation);
CUDA_SAFE_CALL(hipDeviceSynchronize());
}
| 7bd507a55ccd34be341aedf1251b586d27a8a8f4.cu | /*
* _et_line_backproject_attenuated_gpu.cu
*
* NiftyRec
* Stefano Pedemonte, May 2012.
* CMIC - Centre for Medical Image Computing
* UCL - University College London.
* Released under BSD licence, see LICENSE.txt
*/
#include "_et_line_backproject_attenuated_gpu.h"
#include "_et_line_backproject_attenuated_gpu_kernels.cu"
void et_line_backproject_attenuated_gpu(float **d_sinogram, float **d_backprojection, float **d_attenuation, int cam, nifti_image *backprojection)
{
int3 backprojection_size = make_int3(backprojection->nx,backprojection->ny,backprojection->nz);
CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_backprojection_size,&backprojection_size,sizeof(int3)));
const unsigned int grid = (unsigned int)ceil(backprojection->nx*backprojection->ny/(float)BLOCK);
dim3 B(BLOCK,1,1);
dim3 G(grid,1,1);
float *d_sinogram_ptr = (*d_sinogram) + cam * backprojection->nx * backprojection->ny;
et_line_backproject_attenuated_gpu_kernel <<<G,B>>> (d_sinogram_ptr, *d_backprojection, *d_attenuation);
CUDA_SAFE_CALL(cudaThreadSynchronize());
}
|
fade481cd6a3852a9a7af69c61052156d5cb4d6e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kmeans.h"
#include <vector>
#include <cstdio>
#include "./SecureCudaArray.hu"
#include <cmath>
#include <iostream>
/*
Kmeans algorithm
Assignment step:
Assign each observation to the cluster with the nearest mean: that with the least squared Euclidean distance.
This is done by get_nearest_centroid functions
Update step:
Recalculate means (centroids) for observations assigned to each cluster.
This is done by get_new_centroid functions
Also created "_shared" versions of these functions to speed up computations by using the architectural caches on GPUs
Unfortunately the version of Cuda my university uses only allows for C-style coding, therefore some of this code is less modularized than I would like it to be.
*/
__device__ float distance(float* __restrict__ a, float* __restrict__ b, int dim) {
float sum = 0;
for(int i = 0; i < dim; i++)
{
sum += (a[i] - b[i]) * (a[i] - b[i]);
}
return sqrt(sum);
}
__global__ void get_nearest_centroid(float* __restrict__ data,
float* __restrict__ centroids,
float* __restrict__ centroid_sum,
int* __restrict__ centroid_count,
int* __restrict__ cluster_assignment,
int N,
int k,
int dim) {
// similar to prefix sum, every thread gets all the data and works on their specific portion of it
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < N) {
float* p = data + (idx * dim);
float min_centroid_dist = distance(centroids, p, dim);
int chosen = 0;
for(int i = 1; i < k; ++i) {
float dist = distance(centroids + (dim * i), p, dim);
if (dist < min_centroid_dist) {
chosen = i;
min_centroid_dist = dist;
}
}
for(int i = 0; i < dim; ++i) {
atomicAdd((centroid_sum + (chosen * dim) + i), p[i]);
}
atomicAdd(¢roid_count[chosen], 1);
cluster_assignment[idx] = chosen;
}
}
__global__ void get_nearest_centroid_shared(float* __restrict__ data,
float* __restrict__ centroids_global,
float* __restrict__ centroid_sum,
int* __restrict__ centroid_count,
int* __restrict__ cluster_assignment,
int N,
int k,
int dim) {
extern __shared__ float centroids[];
// similar to prefix sum, every thread gets all the data and works on their specific portion of it
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= N) return;
// use first few threads to copy over centroids
if (threadIdx.x < k) {
for(int i = 0; i < dim; ++i) {
centroids[(threadIdx.x * dim) + i] = centroids_global[(threadIdx.x * dim) + i];
}
}
__syncthreads();
// doesn't make sense to copy over point to shared memory because it's specific to thread
// instead copy it over to local memory to prevent global memory accesses
float p[1024];
for(int i = 0; i < dim; ++i) {
p[i] = *(data + (idx * dim) + i);
}
float min_centroid_dist = distance(centroids, p, dim);
int chosen = 0;
for(int i = 1; i < k; ++i) {
float dist = distance(centroids + (dim * i), p, dim);
if (dist < min_centroid_dist) {
chosen = i;
min_centroid_dist = dist;
}
}
for(int i = 0; i < dim; ++i) {
atomicAdd((centroid_sum + (chosen * dim) + i), p[i]);
}
atomicAdd(¢roid_count[chosen], 1);
cluster_assignment[idx] = chosen;
}
__global__ void get_new_centroids_shared(float* __restrict__ centroids_global,
float* __restrict__ centroid_sum_global,
int* __restrict__ centroid_count_global,
bool* __restrict__ repeat,
float threshold,
int k,
int dim) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx > k) return;
// 3D array with array 0 being centroids, array 1 being centroid sums, array 2 being centroid counts
extern __shared__ float centroids[];
if (threadIdx.x < k) {
for(int i = 0; i < dim; ++i) {
centroids[threadIdx.x * dim + i] = centroids_global[threadIdx.x * dim + i];
centroids[(k * dim) + threadIdx.x * dim + i] = centroid_sum_global[threadIdx.x * dim + i];
}
centroids[(2 * k * dim) + threadIdx.x] = centroid_count_global[threadIdx.x];
}
__syncthreads();
for(int i = 0; i < dim; ++i) {
// easy shared mem optimization
float cur_centroid = centroids[idx * dim + i];
centroids[idx * dim + i] = centroids[(k * dim) + idx * dim + i] / (float) centroids[(2 * k * dim) + idx];
if(abs(cur_centroid - centroids[idx * dim + i]) > threshold) {
*repeat = true;
}
}
__syncthreads();
if (threadIdx.x < k) {
for(int i = 0; i < dim; ++i) {
centroids_global[threadIdx.x * dim + i] = centroids[threadIdx.x * dim + i];
centroid_sum_global[threadIdx.x * dim + i] = centroids[(k * dim) + threadIdx.x * dim + i];
}
centroid_count_global[threadIdx.x] = centroids[(2 * k * dim) + threadIdx.x];
}
__syncthreads();
}
__global__ void get_new_centroids(float* __restrict__ centroids,
float* __restrict__ centroid_sum,
int* __restrict__ centroid_count,
bool* __restrict__ repeat,
float threshold,
int k,
int dim) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < k) {
for(int i = 0; i < dim; ++i) {
float cur_centroid = *(centroids + (idx * dim) + i);
*(centroids + (idx * dim) + i) = *(centroid_sum + (idx * dim) + i) / (float) centroid_count[idx];
if (abs(cur_centroid - *(centroids + (idx * dim) + i)) > threshold) {
*repeat = true;
}
}
}
}
__global__ void find_nearest_centroid(float* __restrict__ data,
float* __restrict__ centroids_global,
float* __restrict__ min_dist,
int N,
int k,
int dim) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N) return;
extern __shared__ float centroids[];
if (threadIdx.x < k) {
for(int i = 0; i < dim; ++i) {
centroids[(threadIdx.x * dim) + i] = centroids_global[(threadIdx.x * dim) + i];
}
}
__syncthreads();
float* p = data + (idx * dim);
float min_centroid_dist = distance(centroids, p, dim);
for(int i = 1; i < k; ++i) {
min_centroid_dist = min(distance(centroids + (dim * i), p, dim), min_centroid_dist);
}
min_dist[idx] = min_centroid_dist;
}
vector<float> kmeans_plus_plus(vector<Point> points, Point* centroids, int k) {
int N = points.size();
int dim = points[0].coord.size();
float flat_points[N * dim];
for(int i = 0; i < N; ++i) {
for(int j = 0; j < dim; ++j) {
flat_points[i * dim + j] = points[i].coord[j];
}
}
SecureCudaArray<float>* cuda_data = new SecureCudaArray<float>(N * dim);
cuda_data->set(flat_points, N * dim);
float flat_centroids[k * dim];
for(int i = 0; i < k; ++i) {
for(int j = 0; j < dim; ++j) {
flat_centroids[i * dim + j] = centroids[i].coord[j];
}
}
SecureCudaArray<float>* cuda_centroids = new SecureCudaArray<float>(k * dim);
cuda_centroids->set(flat_centroids, k * dim);
SecureCudaArray<float>* cuda_min_dist = new SecureCudaArray<float>(N);
const int threads = 1024;
const int blocks = (N + threads - 1) / threads;
hipLaunchKernelGGL(( find_nearest_centroid), dim3(threads), dim3(blocks), k * dim * sizeof(float), 0, cuda_data->getData(),
cuda_centroids->getData(),
cuda_min_dist->getData(),
N,
k,
dim);
float min_dist_device[N];
cuda_min_dist->get(min_dist_device, N);
delete cuda_min_dist;
delete cuda_centroids;
delete cuda_data;
return vector<float>(min_dist_device, min_dist_device + N);
}
KMeans kmeans_cuda(vector<Point> points, Point* centroids, int k, int max_iterations, float threshold, bool shared) {
// **** Memory ops
int N = points.size();
int dim = points[0].coord.size();
float* flat_points = (float*) malloc(sizeof(float) * N * dim);
for(int i = 0; i < N; ++i) {
for(int j = 0; j < dim; ++j) {
flat_points[i * dim + j] = points[i].coord[j];
}
}
SecureCudaArray<float>* cuda_data = new SecureCudaArray<float>(N * dim);
cuda_data->set(flat_points, N * dim);
free(flat_points);
float flat_centroids[k * dim];
for(int i = 0; i < k; ++i) {
for(int j = 0; j < dim; ++j) {
flat_centroids[i * dim + j] = centroids[i].coord[j];
}
}
SecureCudaArray<float>* cuda_centroids = new SecureCudaArray<float>(k * dim);
cuda_centroids->set(flat_centroids, k * dim);
SecureCudaArray<int>* centroid_counts = new SecureCudaArray<int>(k);
SecureCudaArray<int>* cluster_assignment = new SecureCudaArray<int>(N);
SecureCudaArray<float>* centroid_sum = new SecureCudaArray<float>(k * dim);
cluster_assignment->fillZeroes();
// End of memory ops
const int threads = 1024;
const int blocks = (N + threads - 1) / threads;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// *** Main loop
int iterations = 0;
float total_time = 0;
for(int i = 0; i < max_iterations; ++i) {
centroid_counts->fillZeroes();
centroid_sum->fillZeroes();
hipEventRecord(start);
if (shared) {
hipLaunchKernelGGL(( get_nearest_centroid_shared), dim3(blocks), dim3(threads), k * dim * sizeof(float), 0, cuda_data->getData(),
cuda_centroids->getData(),
centroid_sum->getData(),
centroid_counts->getData(),
cluster_assignment->getData(),
N,
k,
dim);
} else {
hipLaunchKernelGGL(( get_nearest_centroid), dim3(blocks), dim3(threads), 0, 0, cuda_data->getData(),
cuda_centroids->getData(),
centroid_sum->getData(),
centroid_counts->getData(),
cluster_assignment->getData(),
N,
k,
dim);
}
hipDeviceSynchronize();
bool* repeat;
hipMalloc(&repeat, sizeof(bool));
if (shared) {
hipLaunchKernelGGL(( get_new_centroids_shared), dim3(1), dim3(k), ((k * dim * 2) + k) * sizeof(float), 0, cuda_centroids->getData(),
centroid_sum->getData(),
centroid_counts->getData(),
repeat,
threshold,
k,
dim);
} else {
hipLaunchKernelGGL(( get_new_centroids), dim3(1), dim3(k), 0, 0, cuda_centroids->getData(),
centroid_sum->getData(),
centroid_counts->getData(),
repeat,
threshold,
k,
dim);
}
hipDeviceSynchronize();
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
total_time += milliseconds;
iterations++;
bool repeat_loop;
hipMemcpy(&repeat_loop, repeat, sizeof(bool), hipMemcpyDeviceToHost);
if (!repeat_loop) {
break;
}
}
cuda_centroids->get(flat_centroids, k * dim);
for(int i = 0; i < k; ++i) {
for(int j = 0; j < dim; ++j) {
centroids[i].coord[j] = flat_centroids[i * dim + j];
}
}
int assignments[N];
cluster_assignment->get(assignments, N);
delete cluster_assignment;
delete cuda_centroids;
delete cuda_data;
delete centroid_counts;
delete centroid_sum;
return KMeans{vector<Point>(centroids, centroids + k), vector<int>(assignments, assignments + N), total_time, iterations};
} | fade481cd6a3852a9a7af69c61052156d5cb4d6e.cu | #include "kmeans.h"
#include <vector>
#include <cstdio>
#include "./SecureCudaArray.hu"
#include <cmath>
#include <iostream>
/*
Kmeans algorithm
Assignment step:
Assign each observation to the cluster with the nearest mean: that with the least squared Euclidean distance.
This is done by get_nearest_centroid functions
Update step:
Recalculate means (centroids) for observations assigned to each cluster.
This is done by get_new_centroid functions
Also created "_shared" versions of these functions to speed up computations by using the architectural caches on GPUs
Unfortunately the version of Cuda my university uses only allows for C-style coding, therefore some of this code is less modularized than I would like it to be.
*/
__device__ float distance(float* __restrict__ a, float* __restrict__ b, int dim) {
float sum = 0;
for(int i = 0; i < dim; i++)
{
sum += (a[i] - b[i]) * (a[i] - b[i]);
}
return sqrt(sum);
}
__global__ void get_nearest_centroid(float* __restrict__ data,
float* __restrict__ centroids,
float* __restrict__ centroid_sum,
int* __restrict__ centroid_count,
int* __restrict__ cluster_assignment,
int N,
int k,
int dim) {
// similar to prefix sum, every thread gets all the data and works on their specific portion of it
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < N) {
float* p = data + (idx * dim);
float min_centroid_dist = distance(centroids, p, dim);
int chosen = 0;
for(int i = 1; i < k; ++i) {
float dist = distance(centroids + (dim * i), p, dim);
if (dist < min_centroid_dist) {
chosen = i;
min_centroid_dist = dist;
}
}
for(int i = 0; i < dim; ++i) {
atomicAdd((centroid_sum + (chosen * dim) + i), p[i]);
}
atomicAdd(¢roid_count[chosen], 1);
cluster_assignment[idx] = chosen;
}
}
__global__ void get_nearest_centroid_shared(float* __restrict__ data,
float* __restrict__ centroids_global,
float* __restrict__ centroid_sum,
int* __restrict__ centroid_count,
int* __restrict__ cluster_assignment,
int N,
int k,
int dim) {
extern __shared__ float centroids[];
// similar to prefix sum, every thread gets all the data and works on their specific portion of it
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= N) return;
// use first few threads to copy over centroids
if (threadIdx.x < k) {
for(int i = 0; i < dim; ++i) {
centroids[(threadIdx.x * dim) + i] = centroids_global[(threadIdx.x * dim) + i];
}
}
__syncthreads();
// doesn't make sense to copy over point to shared memory because it's specific to thread
// instead copy it over to local memory to prevent global memory accesses
float p[1024];
for(int i = 0; i < dim; ++i) {
p[i] = *(data + (idx * dim) + i);
}
float min_centroid_dist = distance(centroids, p, dim);
int chosen = 0;
for(int i = 1; i < k; ++i) {
float dist = distance(centroids + (dim * i), p, dim);
if (dist < min_centroid_dist) {
chosen = i;
min_centroid_dist = dist;
}
}
for(int i = 0; i < dim; ++i) {
atomicAdd((centroid_sum + (chosen * dim) + i), p[i]);
}
atomicAdd(¢roid_count[chosen], 1);
cluster_assignment[idx] = chosen;
}
__global__ void get_new_centroids_shared(float* __restrict__ centroids_global,
float* __restrict__ centroid_sum_global,
int* __restrict__ centroid_count_global,
bool* __restrict__ repeat,
float threshold,
int k,
int dim) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx > k) return;
// 3D array with array 0 being centroids, array 1 being centroid sums, array 2 being centroid counts
extern __shared__ float centroids[];
if (threadIdx.x < k) {
for(int i = 0; i < dim; ++i) {
centroids[threadIdx.x * dim + i] = centroids_global[threadIdx.x * dim + i];
centroids[(k * dim) + threadIdx.x * dim + i] = centroid_sum_global[threadIdx.x * dim + i];
}
centroids[(2 * k * dim) + threadIdx.x] = centroid_count_global[threadIdx.x];
}
__syncthreads();
for(int i = 0; i < dim; ++i) {
// easy shared mem optimization
float cur_centroid = centroids[idx * dim + i];
centroids[idx * dim + i] = centroids[(k * dim) + idx * dim + i] / (float) centroids[(2 * k * dim) + idx];
if(abs(cur_centroid - centroids[idx * dim + i]) > threshold) {
*repeat = true;
}
}
__syncthreads();
if (threadIdx.x < k) {
for(int i = 0; i < dim; ++i) {
centroids_global[threadIdx.x * dim + i] = centroids[threadIdx.x * dim + i];
centroid_sum_global[threadIdx.x * dim + i] = centroids[(k * dim) + threadIdx.x * dim + i];
}
centroid_count_global[threadIdx.x] = centroids[(2 * k * dim) + threadIdx.x];
}
__syncthreads();
}
__global__ void get_new_centroids(float* __restrict__ centroids,
float* __restrict__ centroid_sum,
int* __restrict__ centroid_count,
bool* __restrict__ repeat,
float threshold,
int k,
int dim) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < k) {
for(int i = 0; i < dim; ++i) {
float cur_centroid = *(centroids + (idx * dim) + i);
*(centroids + (idx * dim) + i) = *(centroid_sum + (idx * dim) + i) / (float) centroid_count[idx];
if (abs(cur_centroid - *(centroids + (idx * dim) + i)) > threshold) {
*repeat = true;
}
}
}
}
__global__ void find_nearest_centroid(float* __restrict__ data,
float* __restrict__ centroids_global,
float* __restrict__ min_dist,
int N,
int k,
int dim) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N) return;
extern __shared__ float centroids[];
if (threadIdx.x < k) {
for(int i = 0; i < dim; ++i) {
centroids[(threadIdx.x * dim) + i] = centroids_global[(threadIdx.x * dim) + i];
}
}
__syncthreads();
float* p = data + (idx * dim);
float min_centroid_dist = distance(centroids, p, dim);
for(int i = 1; i < k; ++i) {
min_centroid_dist = min(distance(centroids + (dim * i), p, dim), min_centroid_dist);
}
min_dist[idx] = min_centroid_dist;
}
vector<float> kmeans_plus_plus(vector<Point> points, Point* centroids, int k) {
int N = points.size();
int dim = points[0].coord.size();
float flat_points[N * dim];
for(int i = 0; i < N; ++i) {
for(int j = 0; j < dim; ++j) {
flat_points[i * dim + j] = points[i].coord[j];
}
}
SecureCudaArray<float>* cuda_data = new SecureCudaArray<float>(N * dim);
cuda_data->set(flat_points, N * dim);
float flat_centroids[k * dim];
for(int i = 0; i < k; ++i) {
for(int j = 0; j < dim; ++j) {
flat_centroids[i * dim + j] = centroids[i].coord[j];
}
}
SecureCudaArray<float>* cuda_centroids = new SecureCudaArray<float>(k * dim);
cuda_centroids->set(flat_centroids, k * dim);
SecureCudaArray<float>* cuda_min_dist = new SecureCudaArray<float>(N);
const int threads = 1024;
const int blocks = (N + threads - 1) / threads;
find_nearest_centroid<<<threads, blocks, k * dim * sizeof(float)>>>(cuda_data->getData(),
cuda_centroids->getData(),
cuda_min_dist->getData(),
N,
k,
dim);
float min_dist_device[N];
cuda_min_dist->get(min_dist_device, N);
delete cuda_min_dist;
delete cuda_centroids;
delete cuda_data;
return vector<float>(min_dist_device, min_dist_device + N);
}
KMeans kmeans_cuda(vector<Point> points, Point* centroids, int k, int max_iterations, float threshold, bool shared) {
// **** Memory ops
int N = points.size();
int dim = points[0].coord.size();
float* flat_points = (float*) malloc(sizeof(float) * N * dim);
for(int i = 0; i < N; ++i) {
for(int j = 0; j < dim; ++j) {
flat_points[i * dim + j] = points[i].coord[j];
}
}
SecureCudaArray<float>* cuda_data = new SecureCudaArray<float>(N * dim);
cuda_data->set(flat_points, N * dim);
free(flat_points);
float flat_centroids[k * dim];
for(int i = 0; i < k; ++i) {
for(int j = 0; j < dim; ++j) {
flat_centroids[i * dim + j] = centroids[i].coord[j];
}
}
SecureCudaArray<float>* cuda_centroids = new SecureCudaArray<float>(k * dim);
cuda_centroids->set(flat_centroids, k * dim);
SecureCudaArray<int>* centroid_counts = new SecureCudaArray<int>(k);
SecureCudaArray<int>* cluster_assignment = new SecureCudaArray<int>(N);
SecureCudaArray<float>* centroid_sum = new SecureCudaArray<float>(k * dim);
cluster_assignment->fillZeroes();
// End of memory ops
const int threads = 1024;
const int blocks = (N + threads - 1) / threads;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// *** Main loop
int iterations = 0;
float total_time = 0;
for(int i = 0; i < max_iterations; ++i) {
centroid_counts->fillZeroes();
centroid_sum->fillZeroes();
cudaEventRecord(start);
if (shared) {
get_nearest_centroid_shared<<<blocks, threads, k * dim * sizeof(float)>>>(cuda_data->getData(),
cuda_centroids->getData(),
centroid_sum->getData(),
centroid_counts->getData(),
cluster_assignment->getData(),
N,
k,
dim);
} else {
get_nearest_centroid<<<blocks, threads>>>(cuda_data->getData(),
cuda_centroids->getData(),
centroid_sum->getData(),
centroid_counts->getData(),
cluster_assignment->getData(),
N,
k,
dim);
}
cudaDeviceSynchronize();
bool* repeat;
cudaMalloc(&repeat, sizeof(bool));
if (shared) {
get_new_centroids_shared<<<1, k, ((k * dim * 2) + k) * sizeof(float)>>>(cuda_centroids->getData(),
centroid_sum->getData(),
centroid_counts->getData(),
repeat,
threshold,
k,
dim);
} else {
get_new_centroids<<<1, k>>>(cuda_centroids->getData(),
centroid_sum->getData(),
centroid_counts->getData(),
repeat,
threshold,
k,
dim);
}
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
total_time += milliseconds;
iterations++;
bool repeat_loop;
cudaMemcpy(&repeat_loop, repeat, sizeof(bool), cudaMemcpyDeviceToHost);
if (!repeat_loop) {
break;
}
}
cuda_centroids->get(flat_centroids, k * dim);
for(int i = 0; i < k; ++i) {
for(int j = 0; j < dim; ++j) {
centroids[i].coord[j] = flat_centroids[i * dim + j];
}
}
int assignments[N];
cluster_assignment->get(assignments, N);
delete cluster_assignment;
delete cuda_centroids;
delete cuda_data;
delete centroid_counts;
delete centroid_sum;
return KMeans{vector<Point>(centroids, centroids + k), vector<int>(assignments, assignments + N), total_time, iterations};
} |
73a0278748cdcc1c4278067321b3f5fb85432e11.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../common/common.h"
#include <stdio.h>
#include <iostream>
/*
* A simple introduction to programming in CUDA. This program prints "Hello
* World from GPU! from 10 CUDA threads running on the GPU.
*/
__global__ void helloFromGPU()
{
printf("Hello World from GPU! 1\n");
}
int main(int argc, char **argv)
{
hipLaunchKernelGGL(( helloFromGPU), dim3(1), dim3(10), 0, 0, );
printf("Hello World from CPU!\n");
CHECK(hipDeviceReset());
char c;
std::cin>>c;
return 0;
}
| 73a0278748cdcc1c4278067321b3f5fb85432e11.cu | #include "../common/common.h"
#include <stdio.h>
#include <iostream>
/*
* A simple introduction to programming in CUDA. This program prints "Hello
* World from GPU! from 10 CUDA threads running on the GPU.
*/
__global__ void helloFromGPU()
{
printf("Hello World from GPU! 1\n");
}
int main(int argc, char **argv)
{
helloFromGPU<<<1, 10>>>();
printf("Hello World from CPU!\n");
CHECK(cudaDeviceReset());
char c;
std::cin>>c;
return 0;
}
|
f844dfb5bb87ecfeb5ca182d18e5c8c17e1d930b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <string>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hip/hip_runtime_api.h>
#include <time.h>
#include "freeglut.h"
#include <mutex>
/* macros */
#define IX(i,j) ((i)+(N+2)*(j))
#define SWAP(x0,x) {float * tmp=x0;x0=x;x=tmp;}
#define MIN(x, y) (x > y) ? y : x
#define MAX(x, y) (x < y) ? y : x
static int allocate_data(void);
__global__ void add_source(float* x, float* s, float dt, int size)
{
int idx = (blockIdx.x * blockDim.x + threadIdx.x);
if (idx < size) {
x[idx] += dt * s[idx];
}
}
__device__ void set_bnd(int N, int b, float* x, int index, int elementsPerThread)
{
int i = index + 1;
if (i > N + 1) {
return;
}
while (i < (index + elementsPerThread) && i <= N + 1) {
x[IX(0, i)] = b == 1 ? -x[IX(1, i)] : x[IX(1, i)];
x[IX(N + 1, i)] = b == 1 ? -x[IX(N, i)] : x[IX(N, i)];
x[IX(i, 0)] = b == 2 ? -x[IX(i, 1)] : x[IX(i, 1)];
x[IX(i, N + 1)] = b == 2 ? -x[IX(i, N)] : x[IX(i, N)];
i++;
}
__syncthreads();
if (index == 0) {
x[IX(0, 0)] = 0.5f * (x[IX(1, 0)] + x[IX(0, 1)]);
x[IX(0, N + 1)] = 0.5f * (x[IX(1, N + 1)] + x[IX(0, N)]);
x[IX(N + 1, 0)] = 0.5f * (x[IX(N, 0)] + x[IX(N + 1, 1)]);
x[IX(N + 1, N + 1)] = 0.5f * (x[IX(N, N + 1)] + x[IX(N + 1, N)]);
}
}
__global__ void lin_solve(int N, int b, float* x, float* x0, float a, float c, int elementPerThread)
{
int i, j, k, idxNew;
int idx = (blockIdx.x * blockDim.x + threadIdx.x) * elementPerThread;
if (idx >= N * N) {
return;
}
for (k = 0; k < 20; k++)
{
idxNew = idx;
while (idxNew < idx + elementPerThread && idxNew < N * N) {
j = idxNew / N + 1;
i = idxNew % N + 1;
x[IX(i, j)] = (x0[IX(i, j)] + a * (x[IX(i - 1, j)] + x[IX(i + 1, j)] + x[IX(i, j - 1)] + x[IX(i, j + 1)])) / c;
idxNew++;
}
__syncthreads();
set_bnd(N, b, x, idx, elementPerThread);
}
}
__global__ void advect(int N, int b, float* d, float* d0, float* u, float* v, float dt, int elementPerThread)
{
int idx = (blockIdx.x * blockDim.x + threadIdx.x) * elementPerThread;
if (idx >= N * N) {
return;
}
int idxNew = idx;
while (idxNew < idx + elementPerThread && idxNew < N * N) {
int j = idxNew / N + 1;
int i = idxNew % N + 1;
int i0, j0, i1, j1;
float x, y, s0, t0, s1, t1, dt0;
dt0 = dt * N;
x = i - dt0 * u[IX(i, j)]; y = j - dt0 * v[IX(i, j)];
if (x < 0.5f) x = 0.5f; if (x > N + 0.5f) x = N + 0.5f; i0 = (int)x; i1 = i0 + 1;
if (y < 0.5f) y = 0.5f; if (y > N + 0.5f) y = N + 0.5f; j0 = (int)y; j1 = j0 + 1;
s1 = x - i0; s0 = 1 - s1; t1 = y - j0; t0 = 1 - t1;
d[IX(i, j)] = s0 * (t0 * d0[IX(i0, j0)] + t1 * d0[IX(i0, j1)]) +
s1 * (t0 * d0[IX(i1, j0)] + t1 * d0[IX(i1, j1)]);
idxNew++;
}
__syncthreads();
set_bnd(N, b, d, idx, elementPerThread);
}
__global__ void project1(int N, float* u, float* v, float* p, float* div, int elementPerThread)
{
int idx = (blockIdx.x * blockDim.x + threadIdx.x) * elementPerThread;
if (idx >= N * N) {
return;
}
int idxNew = idx;
while (idxNew < idx + elementPerThread && idxNew < N * N) {
int j = idxNew / N + 1;
int i = idxNew % N + 1;
div[IX(i, j)] = -0.5f * (u[IX(i + 1, j)] - u[IX(i - 1, j)] + v[IX(i, j + 1)] - v[IX(i, j - 1)]) / N;
p[IX(i, j)] = 0;
idxNew++;
}
__syncthreads();
set_bnd(N, 0, div, idx, elementPerThread);
set_bnd(N, 0, p, idx, elementPerThread);
}
__global__ void project3(int N, float* u, float* v, float* p, int elementPerThread)
{
int idx = (blockIdx.x * blockDim.x + threadIdx.x) * elementPerThread;
if (idx >= N * N) {
return;
}
int idxNew = idx;
while (idxNew < idx + elementPerThread && idxNew < N * N) {
int j = idxNew / N + 1;
int i = idxNew % N + 1;
u[IX(i, j)] -= 0.5f * N * (p[IX(i + 1, j)] - p[IX(i - 1, j)]);
v[IX(i, j)] -= 0.5f * N * (p[IX(i, j + 1)] - p[IX(i, j - 1)]);
idxNew++;
}
__syncthreads();
set_bnd(N, 1, u, idx, elementPerThread);
set_bnd(N, 2, v, idx, elementPerThread);
}
/* global variables */
hipStream_t stream1 = NULL, stream2 = NULL, stream3 = NULL;
// timing variables
auto begin = std::chrono::system_clock::now();
auto end = std::chrono::system_clock::now();
volatile static int N, size;
volatile static float dt, diff, visc, force, source;
volatile static int display_velocity = 0, cuda_streams = 0, shoot_liquid = 0, show_commands=1;
float numIntervals = 100;
float dtMax = 5;
float dtMin = 0.01;
float dt_del = (dtMax - dtMin) / numIntervals;
float viscMax = 1;
float viscMin = 0;
float visc_del = (viscMax - viscMin) / numIntervals;
float diffMax = 1.;
float diffMin = 0;
float diff_del = (diffMax - diffMin) / numIntervals;
float fluidAmountMax = 1000.;
float fluidAmountMin = 1.;
float fluidAmount_del = (fluidAmountMax - fluidAmountMin) / numIntervals;
float forceAmountMax = 5;
float forceAmountMin = 0.1;
float forceAmount_del = (forceAmountMax - forceAmountMin) / numIntervals;
// Parallelize computation :: TODO update values to match
int num_threads_source;
int num_blocks_source;
int num_threads;
int elementsPerThread;
// CPU variables
static float* u, * v, * dens;
static float* u_userInput, * v_userInput, * dens_userInput;
volatile int flag = 0;
std::mutex * guimutexPtr;
// GPU variables
float* u_cuda, * v_cuda, * u_prev_cuda, * v_prev_cuda;
float* p_cuda, * div_cuda;
float* dens_cuda, * dens_prev_cuda;
static int win_id;
static int win_x, win_y;
static int mouse_down[3];
static int omx, omy, mx, my;
static float xtext, ytext;
/*
----------------------------------------------------------------------
free/clear/allocate simulation data
----------------------------------------------------------------------
*/
static void free_data(void)
{
if (u_userInput) free(u_userInput);
if (v_userInput) free(v_userInput);
if (dens_userInput) free(dens_userInput);
if (u) free(u);
if (v) free(v);
if (dens) free(dens);
if (u_cuda) hipFree(u_cuda);
if (v_cuda) hipFree(v_cuda);
if (u_prev_cuda) hipFree(u_prev_cuda);
if (v_prev_cuda) hipFree(v_prev_cuda);
if (p_cuda) hipFree(p_cuda);
if (div_cuda) hipFree(div_cuda);
if (dens_cuda) hipFree(dens_cuda);
if (dens_prev_cuda) hipFree(dens_prev_cuda);
}
static void clear_data(void)
{
hipMemset(u_cuda, 0, sizeof(float) * size);
hipMemset(v_cuda, 0, sizeof(float) * size);
hipMemset(p_cuda, 0, sizeof(float) * size);
hipMemset(div_cuda, 0, sizeof(float) * size);
hipMemset(dens_cuda, 0, sizeof(float) * size);
hipMemset(u_prev_cuda, 0, sizeof(float) * size);
hipMemset(v_prev_cuda, 0, sizeof(float) * size);
hipMemset(dens_prev_cuda, 0, sizeof(float) * size);
for (int i = 0; i < size; i++) {
u_userInput[i] = v_userInput[i] = dens_userInput[i] = 0.0f;
}
}
static void destroy_streams(void) {
// Destroy cuda streams
if (stream1 == stream2 && stream2 == stream3) {
hipStreamDestroy(stream1);
}
else {
hipStreamDestroy(stream1);
hipStreamDestroy(stream2);
hipStreamDestroy(stream3);
}
stream1 = NULL;
stream2 = NULL;
stream3 = NULL;
}
static void create_streams(void) {
if (stream1 != NULL) {
return;
}
if (cuda_streams) {
hipStreamCreate(&stream1);
hipStreamCreate(&stream2);
hipStreamCreate(&stream3);
}
else {
hipStreamCreate(&stream1);
stream2 = stream1;
stream3 = stream1;
}
}
static void set_numThreads(int newValue) {
if (newValue < 1) {
newValue = 1;
}
if (newValue > 1024) {
newValue = 1024;
}
num_threads = newValue;
elementsPerThread = (N * N + 1) / num_threads;
}
static int set_gridSize(int newValue) {
if (newValue < 32) {
newValue = 32;
}
if (newValue > 1000) {
newValue = 1000;
}
if (N == newValue) {
return 1;
}
//hipDeviceReset();
hipDeviceSynchronize();
const std::lock_guard<std::mutex> lock(*guimutexPtr);
free_data();
N = newValue;
size = (N + 2) * (N + 2);
num_threads_source = (N + 2);
num_blocks_source = (N + 2);
set_numThreads(num_threads);
return allocate_data();
}
static int allocate_data(void)
{
// Allocate space for device copies
u = (float*)malloc(size * sizeof(float));
v = (float*)malloc(size * sizeof(float));
dens = (float*)malloc(size * sizeof(float));
u_userInput = (float*)malloc(size * sizeof(float));
v_userInput = (float*)malloc(size * sizeof(float));
dens_userInput = (float*)malloc(size * sizeof(float));
// gpu copies
hipMalloc(&u_cuda, sizeof(float) * size);
hipMalloc(&v_cuda, sizeof(float) * size);
hipMalloc(&p_cuda, sizeof(float) * size);
hipMalloc(&div_cuda, sizeof(float) * size);
hipMalloc(&u_prev_cuda, sizeof(float) * size);
hipMalloc(&v_prev_cuda, sizeof(float) * size);
hipMalloc(&dens_cuda, sizeof(float) * size);
hipMalloc(&dens_prev_cuda, sizeof(float) * size);
if (!u_userInput || !v_userInput || !dens_userInput || !dens || !u || !v || !u_cuda || !v_cuda || !u_prev_cuda || !v_prev_cuda || !dens_cuda || !dens_prev_cuda) {
fprintf(stderr, "cannot allocate data\n");
return (0);
}
return (1);
}
/*
----------------------------------------------------------------------
OpenGL specific drawing routines
----------------------------------------------------------------------
*/
static void pre_display(void)
{
glViewport(0, 0, win_x, win_y);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluOrtho2D(0.0, 1.0, 0.0, 1.0);
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
}
static void post_display(void)
{
glutSwapBuffers();
}
static void draw_velocity(void)
{
int i, j;
float x, y, h;
h = 1.0f / N;
hipMemcpy(u, u_cuda, sizeof(float) * size, hipMemcpyDeviceToHost);
hipMemcpy(v, v_cuda, sizeof(float) * size, hipMemcpyDeviceToHost);
glColor3f(1.0f, 1.0f, 1.0f);
glLineWidth(1.0f);
glBegin(GL_LINES);
for (i = 1; i <= N; i++) {
x = (i - 0.5f) * h;
for (j = 1; j <= N; j++) {
y = (j - 0.5f) * h;
glVertex2f(x, y);
glVertex2f(x + u[IX(i, j)], y + v[IX(i, j)]);
}
}
glEnd();
}
static void draw_density(void)
{
int i, j;
float x, y, h, d00, d01, d10, d11;
h = 1.0f / N;
hipMemcpy(dens, dens_cuda, sizeof(float) * size, hipMemcpyDeviceToHost);
glBegin(GL_QUADS);
for (i = 0; i <= N; i++) {
x = (i - 0.5f) * h;
for (j = 0; j <= N; j++) {
y = (j - 0.5f) * h;
d00 = dens[IX(i, j)];
d01 = dens[IX(i, j + 1)];
d10 = dens[IX(i + 1, j)];
d11 = dens[IX(i + 1, j + 1)];
glColor3f(d00, d00, d00 * 0); glVertex2f(x, y);
glColor3f(d10, d10, d10 * 0); glVertex2f(x + h, y);
glColor3f(d11, d11, d11 * 0); glVertex2f(x + h, y + h);
glColor3f(d01, d01, d01 * 0); glVertex2f(x, y + h);
}
}
glEnd();
}
/*
----------------------------------------------------------------------
relates mouse movements to forces sources
----------------------------------------------------------------------
*/
static void get_from_UI(float* d, float* u, float* v)
{
hipMemcpy(u_prev_cuda, u_userInput, sizeof(float) * size, hipMemcpyHostToDevice);
hipMemcpy(v_prev_cuda, v_userInput, sizeof(float) * size, hipMemcpyHostToDevice);;
hipMemcpy(dens_prev_cuda, dens_userInput, sizeof(float) * size, hipMemcpyHostToDevice);
return;
}
static void get_from_UI_CPU(void)
{
int i, j;
if (shoot_liquid) {
i = (N + 2) / 5;
j = (N + 2) / 5;
u_userInput[IX(i, j)] = force;
v_userInput[IX(i, j)] = force;
dens_userInput[IX(i, j)] = source;
}
if (!mouse_down[0] && !mouse_down[2]) return;
i = (int)((mx / (float)win_x) * N + 1);
j = (int)(((win_y - my) / (float)win_y) * N + 1);
if (i<1 || i>N || j<1 || j>N) return;
if (mouse_down[0]) {
u_userInput[IX(i, j)] += force * (mx - omx);
v_userInput[IX(i, j)] += force * (omy - my);
}
if (mouse_down[2]) {
dens_userInput[IX(i - 1, j)] = source;
dens_userInput[IX(i + 1, j)] = source;
dens_userInput[IX(i, j + 1)] = source;
dens_userInput[IX(i, j - 1)] = source;
dens_userInput[IX(i, j)] = source;
}
omx = mx;
omy = my;
return;
}
/*
----------------------------------------------------------------------
GLUT callback routines
----------------------------------------------------------------------
*/
static void Sliders()
{
const unsigned char a[50] = "Controls";
const unsigned char b[50] = "Time Step [a]";
const unsigned char c[50] = "Viscosity [s]";
const unsigned char d[50] = "Diffussion [d]";
const unsigned char e[50] = "Fluid Amount [f]";
const unsigned char f[50] = "Force Amount [g]";
const unsigned char g[50] = "# threads: ";
const unsigned char h[50] = "grid size: ";
unsigned char numThreads[50];
strcpy((char*) numThreads, std::to_string(num_threads).c_str());
const unsigned char* numThreads_const = (const unsigned char*)numThreads;
unsigned char gridSize[50];
strcpy((char*)gridSize, std::to_string(N).c_str());
const unsigned char* gridSize_const = (const unsigned char*)gridSize;
const unsigned char cudaStreams[50] = "CUDA streams enabled";
const unsigned char dispenser[50] = "Jet enabled";
const unsigned char* aPtr = a;
const unsigned char* bPtr = b;
const unsigned char* cPtr = c;
const unsigned char* dPtr = d;
const unsigned char* ePtr = e;
const unsigned char* fPtr = f;
const unsigned char* gPtr = g;
const unsigned char* hPtr = h;
const unsigned char* streamsPtr = cudaStreams;
const unsigned char* jetPtr = dispenser;
// Drawing Sliders Text Fields
glColor3f(1.0, 1.0, 1.0);
glRasterPos2f(0.78125, 0.94921875);
glutBitmapString(GLUT_BITMAP_9_BY_15, aPtr);
glRasterPos2f(0.64453125, 0.90625000);
glutBitmapString(GLUT_BITMAP_8_BY_13, bPtr);
glRasterPos2f(0.64453125, 0.86718750);
glutBitmapString(GLUT_BITMAP_8_BY_13, cPtr);
glRasterPos2f(0.64453125, 0.82812500);
glutBitmapString(GLUT_BITMAP_8_BY_13, dPtr);
glRasterPos2f(0.64453125, 0.7890625);
glutBitmapString(GLUT_BITMAP_8_BY_13, ePtr);
glRasterPos2f(0.64453125, 0.7500000);
glutBitmapString(GLUT_BITMAP_8_BY_13, fPtr);
glRasterPos2f(0.78125 + 0.05, 0.7109375);
glutBitmapString(GLUT_BITMAP_8_BY_13, gPtr);
glRasterPos2f(0.78125 + 0.05 + 0.1, 0.7109375);
glutBitmapString(GLUT_BITMAP_8_BY_13, numThreads_const);
glRasterPos2f(0.78125 + 0.05, 0.6718750 + 0.025);
glutBitmapString(GLUT_BITMAP_8_BY_13, hPtr);
glRasterPos2f(0.78125 + 0.05 + 0.1, 0.6718750 + 0.025);
glutBitmapString(GLUT_BITMAP_8_BY_13, gridSize_const);
if (cuda_streams) {
glRasterPos2f(0.78125, 0.6718750);
glutBitmapString(GLUT_BITMAP_8_BY_13, streamsPtr);
}
if (shoot_liquid) {
glRasterPos2f(0.78125, 0.6718750 - 0.025);
glutBitmapString(GLUT_BITMAP_8_BY_13, jetPtr);
}
glRasterPos2f(0., 0.);
glBegin(GL_LINES);
glColor3f(1.0, 1.0, 1.0);
// Draw slider boxes.
for (int i = 0; i < 5; i++)
{
// Compute heights.
float heightTop = 1. - (38. + (float)i * 20.) / 512.;
float heightBottom = 1. - (49. + (float)i * 20.) / 512.;
glVertex2d(0.83984375, heightTop);
glVertex2d(0.99609375, heightTop);
glVertex2d(0.83984375, heightBottom);
glVertex2d(0.99609375, heightBottom);
glVertex2d(0.83984375, heightTop);
glVertex2d(0.83984375, heightBottom);
glVertex2d(0.99609375, heightTop);
glVertex2d(0.99609375, heightBottom);
}
// Fill In Sliders
float sliderStart = 0.83984375;
float sliderEnd = 0.99609375;
// Variable bounds.
// Compute dynamic slider fill.
float dtSliderEnd = ((dt / dtMax) * 0.15625) + sliderStart;
float viscSliderEnd = ((visc / viscMax) * 0.15625) + sliderStart;
float diffSliderEnd = ((diff / diffMax) * 0.15625) + sliderStart;
float fluidAmountSliderEnd = ((source / fluidAmountMax) * 0.15625) + sliderStart;
float forceAmountSliderEnd = ((force / forceAmountMax) * 0.15625) + sliderStart;
for (float i = sliderStart; i <= sliderEnd; i += 0.001)
{
float heightTop = 0.0;
float heightBottom = 0.0;
if (i <= dtSliderEnd)
{
heightTop = 1. - (38. + 0. * 20.) / 512.;
heightBottom = 1. - (49. + 0. * 20.) / 512.;
glVertex2d(i, heightTop);
glVertex2d(i, heightBottom);
}
if (i <= viscSliderEnd)
{
heightTop = 1. - (38. + 1. * 20.) / 512.;
heightBottom = 1. - (49. + 1. * 20.) / 512.;
glVertex2d(i, heightTop);
glVertex2d(i, heightBottom);
}
if (i <= diffSliderEnd)
{
heightTop = 1. - (38. + 2. * 20.) / 512.;
heightBottom = 1. - (49. + 2. * 20.) / 512.;
glVertex2d(i, heightTop);
glVertex2d(i, heightBottom);
}
if (i <= fluidAmountSliderEnd)
{
heightTop = 1. - (38. + 3. * 20.) / 512.;
heightBottom = 1. - (49. + 3. * 20.) / 512.;
glVertex2d(i, heightTop);
glVertex2d(i, heightBottom);
}
if (i <= forceAmountSliderEnd)
{
heightTop = 1. - (38. + 4. * 20.) / 512.;
heightBottom = 1. - (49. + 4. * 20.) / 512.;
glVertex2d(i, heightTop);
glVertex2d(i, heightBottom);
}
}
glEnd();
}
static void key_func(unsigned char key, int x, int y)
{
switch (key)
{
case 'c':
case 'C':
clear_data();
break;
case 'q':
case 'Q':
show_commands = !show_commands;
break;
case 'v':
case 'V':
display_velocity = !display_velocity;
break;
case 'a':
dt = MIN(dtMax, dt+ dt_del);
printf("dt is now %f\n", dt);
break;
case 'A':
dt = MAX(dtMin, dt - dt_del);
printf("dt is now %f\n", dt);
break;
case 's':
visc = MIN(viscMax, visc + visc_del);
printf("visc is now %f\n", visc);
break;
case 'S':
visc = MAX(viscMin, visc - visc_del);
printf("visc is now %f\n", visc);
break;
case 'd':
diff = MIN(diffMax, diff + diff_del);
printf("diff is now %f\n", diff);
break;
case 'D':
diff = MAX(diffMin, diff - diff_del);
printf("diff is now %f\n", diff);
break;
case 'f':
source = MIN(fluidAmountMax, source + fluidAmount_del);
printf("fluidAmount is now %f\n", source);
break;
case 'F':
source = MAX(fluidAmountMin, source - fluidAmount_del);
printf("fluidAmount is now %f\n", source);
break;
case 'g':
force = MIN(forceAmountMax, force + forceAmount_del);
printf("forceAmount is now %f\n", force);
break;
case 'G':
force = MAX(forceAmountMin, force - forceAmount_del);
printf("forceAmount is now %f\n", force);
break;
case 'e':
case 'E':
destroy_streams();
cuda_streams = !cuda_streams;
create_streams();
break;
case 'w':
case 'W':
shoot_liquid = !shoot_liquid;
break;
case '1':
set_numThreads(num_threads * 2);
break;
case '2':
set_numThreads(num_threads / 2);
break;
case '3':
if (!set_gridSize(N * 2)) exit(1);
clear_data();
break;
case '4':
if (!set_gridSize(N / 2)) exit(1);
clear_data();
break;
}
}
static void mouse_func(int button, int state, int x, int y)
{
omx = mx = x;
omx = my = y;
mouse_down[button] = state == GLUT_DOWN;
}
static void motion_func(int x, int y)
{
mx = x;
my = y;
}
static void reshape_func(int width, int height)
{
glutSetWindow(win_id);
glutReshapeWindow(width, height);
win_x = width;
win_y = height;
}
static void idle_func(void)
{
// have copy of host data
get_from_UI(dens_prev_cuda, u_prev_cuda, v_prev_cuda);
hipDeviceSynchronize();
////// Velocity timestep parallelization
add_source << < num_blocks_source, num_threads_source >> > (u_cuda, u_prev_cuda, dt, size);
add_source << < num_blocks_source, num_threads_source >> > (v_cuda, v_prev_cuda, dt, size);
add_source << < num_blocks_source, num_threads_source >> > (dens_cuda, dens_prev_cuda, dt, size);
for (int i = 0; i < size; i++) {
u_userInput[i] = v_userInput[i] = dens_userInput[i] = 0.0f;
}
SWAP(u_prev_cuda, u_cuda);
SWAP(v_prev_cuda, v_cuda);
SWAP(dens_prev_cuda, dens_cuda);
hipDeviceSynchronize();
//// diffuse
float a = dt * visc * N * N;
lin_solve << < 1, num_threads, 0, stream1 >> > (N, 1, u_cuda, u_prev_cuda, a, 1 + 4 * a, elementsPerThread); // diffuse u_cuda
lin_solve << < 1, num_threads, 0, stream2 >> > (N, 2, v_cuda, v_prev_cuda, a, 1 + 4 * a, elementsPerThread); // diffuse v_cuda
a = dt * diff * N * N;
lin_solve << < 1, num_threads, 0, stream3 >> > (N, 0, dens_cuda, dens_prev_cuda, a, 1 + 4 * a, elementsPerThread); // diffuse dens_cuda
hipDeviceSynchronize();
// projection step (no swapping beforehand)
project1 << < 1, num_threads >> > (N, u_cuda, v_cuda, p_cuda, div_cuda, elementsPerThread);
lin_solve << < 1, num_threads >> > (N, 0, p_cuda, div_cuda, 1, 4, elementsPerThread);
project3 << < 1, num_threads >> > (N, u_cuda, v_cuda, p_cuda, elementsPerThread);
SWAP(u_prev_cuda, u_cuda);
SWAP(v_prev_cuda, v_cuda);
hipDeviceSynchronize();
advect << < 1, num_threads, 0, stream1 >> > (N, 1, u_cuda, u_prev_cuda, u_prev_cuda, v_prev_cuda, dt, elementsPerThread);
advect << < 1, num_threads, 0, stream2 >> > (N, 2, v_cuda, v_prev_cuda, u_prev_cuda, v_prev_cuda, dt, elementsPerThread);
hipDeviceSynchronize();
// projection step (no swapping beforehand)
project1 << < 1, num_threads >> > (N, u_cuda, v_cuda, p_cuda, div_cuda, elementsPerThread);
lin_solve << < 1, num_threads >> > (N, 0, p_cuda, div_cuda, 1, 4, elementsPerThread);
project3 << < 1, num_threads >> > (N, u_cuda, v_cuda, p_cuda, elementsPerThread);
SWAP(dens_prev_cuda, dens_cuda);
hipDeviceSynchronize();
// Density timestep parallelization
advect << < 1, num_threads >> > (N, 0, dens_cuda, dens_prev_cuda, u_cuda, v_cuda, dt, elementsPerThread);
glutSetWindow(win_id);
glutPostRedisplay();
}
static void display_func(void)
{
pre_display();
if (display_velocity) draw_velocity();
else draw_density();
end = std::chrono::system_clock::now();
auto dur = end - begin;
auto ms = std::chrono::duration_cast<std::chrono::milliseconds>(dur).count();
printf("framerate: %f\n", 1000.0 / ms);
begin = std::chrono::system_clock::now();
if (show_commands) Sliders();
post_display();
}
/*
----------------------------------------------------------------------
open_glut_window --- open a glut compatible window and set callbacks
----------------------------------------------------------------------
*/
static void open_glut_window(void)
{
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE);
glutInitWindowPosition(0, 0);
glutInitWindowSize(win_x, win_y);
win_id = glutCreateWindow("Fluid Simulation");
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
glutSwapBuffers();
glClear(GL_COLOR_BUFFER_BIT);
glutSwapBuffers();
pre_display();
glutKeyboardFunc(key_func);
glutMouseFunc(mouse_func);
glutMotionFunc(motion_func);
// glutReshapeFunc ( reshape_func );
glutIdleFunc(idle_func);
glutDisplayFunc(display_func);
}
/*
----------------------------------------------------------------------
main --- main routine
----------------------------------------------------------------------
*/
int main(int argc, char** argv)
{
glutInit(&argc, argv);
if (argc != 1 && argc != 6) {
fprintf(stderr, "usage : %s N dt diff visc force source\n", argv[0]);
fprintf(stderr, "where:\n"); \
fprintf(stderr, "\t N : grid resolution\n");
fprintf(stderr, "\t dt : time step\n");
fprintf(stderr, "\t diff : diffusion rate of the density\n");
fprintf(stderr, "\t visc : viscosity of the fluid\n");
fprintf(stderr, "\t force : scales the mouse movement that generate a force\n");
fprintf(stderr, "\t source : amount of density that will be deposited\n");
exit(1);
}
if (argc == 1) {
//N = 128;
dt = 1.0f;
diff = 0.0f;
visc = 0.0f;
force = 1.0f;
source = 100.0f;
fprintf(stderr, "Using defaults : N=%d dt=%g diff=%g visc=%g force = %g source=%g\n",
N, dt, diff, visc, force, source);
}
else {
//N = atoi(argv[1]);
dt = atof(argv[2]);
diff = atof(argv[3]);
visc = atof(argv[4]);
force = atof(argv[5]);
source = atof(argv[6]);
}
printf("\n\nHow to use this demo:\n\n");
printf("\t Add densities with the right mouse button\n");
printf("\t Add velocities with the left mouse button and dragging the mouse\n");
printf("\t Toggle density/velocity display with the 'v' key\n");
printf("\t Clear the simulation by pressing the 'c' key\n");
printf("\t Quit by pressing the 'q' key\n");
set_numThreads(1024);
display_velocity = 0;
std::mutex guimutex;
guimutexPtr = &guimutex;
if (!set_gridSize(256)) exit(1);
clear_data();
create_streams();
// A mutex ensures orderly access to std::cout from multiple threads.
std::thread t1([&guimutex]() {
while (!flag) {
{
const std::lock_guard<std::mutex> lock(guimutex);
get_from_UI_CPU();
}
std::this_thread::sleep_for(std::chrono::milliseconds(25));
}
});
win_x = 800;
win_y = 800;
open_glut_window();
glutMainLoop();
//stop GUI thread
flag = 1;
t1.join();
// Free Device space
hipFree(u_cuda);
hipFree(v_cuda);
hipFree(u_prev_cuda);
hipFree(dens_cuda);
hipFree(dens_prev_cuda);
hipFree(v_prev_cuda);
destroy_streams();
exit(0);
} | f844dfb5bb87ecfeb5ca182d18e5c8c17e1d930b.cu | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <string>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda_runtime_api.h>
#include <time.h>
#include "freeglut.h"
#include <mutex>
/* macros */
#define IX(i,j) ((i)+(N+2)*(j))
#define SWAP(x0,x) {float * tmp=x0;x0=x;x=tmp;}
#define MIN(x, y) (x > y) ? y : x
#define MAX(x, y) (x < y) ? y : x
static int allocate_data(void);
__global__ void add_source(float* x, float* s, float dt, int size)
{
int idx = (blockIdx.x * blockDim.x + threadIdx.x);
if (idx < size) {
x[idx] += dt * s[idx];
}
}
__device__ void set_bnd(int N, int b, float* x, int index, int elementsPerThread)
{
int i = index + 1;
if (i > N + 1) {
return;
}
while (i < (index + elementsPerThread) && i <= N + 1) {
x[IX(0, i)] = b == 1 ? -x[IX(1, i)] : x[IX(1, i)];
x[IX(N + 1, i)] = b == 1 ? -x[IX(N, i)] : x[IX(N, i)];
x[IX(i, 0)] = b == 2 ? -x[IX(i, 1)] : x[IX(i, 1)];
x[IX(i, N + 1)] = b == 2 ? -x[IX(i, N)] : x[IX(i, N)];
i++;
}
__syncthreads();
if (index == 0) {
x[IX(0, 0)] = 0.5f * (x[IX(1, 0)] + x[IX(0, 1)]);
x[IX(0, N + 1)] = 0.5f * (x[IX(1, N + 1)] + x[IX(0, N)]);
x[IX(N + 1, 0)] = 0.5f * (x[IX(N, 0)] + x[IX(N + 1, 1)]);
x[IX(N + 1, N + 1)] = 0.5f * (x[IX(N, N + 1)] + x[IX(N + 1, N)]);
}
}
__global__ void lin_solve(int N, int b, float* x, float* x0, float a, float c, int elementPerThread)
{
int i, j, k, idxNew;
int idx = (blockIdx.x * blockDim.x + threadIdx.x) * elementPerThread;
if (idx >= N * N) {
return;
}
for (k = 0; k < 20; k++)
{
idxNew = idx;
while (idxNew < idx + elementPerThread && idxNew < N * N) {
j = idxNew / N + 1;
i = idxNew % N + 1;
x[IX(i, j)] = (x0[IX(i, j)] + a * (x[IX(i - 1, j)] + x[IX(i + 1, j)] + x[IX(i, j - 1)] + x[IX(i, j + 1)])) / c;
idxNew++;
}
__syncthreads();
set_bnd(N, b, x, idx, elementPerThread);
}
}
__global__ void advect(int N, int b, float* d, float* d0, float* u, float* v, float dt, int elementPerThread)
{
int idx = (blockIdx.x * blockDim.x + threadIdx.x) * elementPerThread;
if (idx >= N * N) {
return;
}
int idxNew = idx;
while (idxNew < idx + elementPerThread && idxNew < N * N) {
int j = idxNew / N + 1;
int i = idxNew % N + 1;
int i0, j0, i1, j1;
float x, y, s0, t0, s1, t1, dt0;
dt0 = dt * N;
x = i - dt0 * u[IX(i, j)]; y = j - dt0 * v[IX(i, j)];
if (x < 0.5f) x = 0.5f; if (x > N + 0.5f) x = N + 0.5f; i0 = (int)x; i1 = i0 + 1;
if (y < 0.5f) y = 0.5f; if (y > N + 0.5f) y = N + 0.5f; j0 = (int)y; j1 = j0 + 1;
s1 = x - i0; s0 = 1 - s1; t1 = y - j0; t0 = 1 - t1;
d[IX(i, j)] = s0 * (t0 * d0[IX(i0, j0)] + t1 * d0[IX(i0, j1)]) +
s1 * (t0 * d0[IX(i1, j0)] + t1 * d0[IX(i1, j1)]);
idxNew++;
}
__syncthreads();
set_bnd(N, b, d, idx, elementPerThread);
}
__global__ void project1(int N, float* u, float* v, float* p, float* div, int elementPerThread)
{
int idx = (blockIdx.x * blockDim.x + threadIdx.x) * elementPerThread;
if (idx >= N * N) {
return;
}
int idxNew = idx;
while (idxNew < idx + elementPerThread && idxNew < N * N) {
int j = idxNew / N + 1;
int i = idxNew % N + 1;
div[IX(i, j)] = -0.5f * (u[IX(i + 1, j)] - u[IX(i - 1, j)] + v[IX(i, j + 1)] - v[IX(i, j - 1)]) / N;
p[IX(i, j)] = 0;
idxNew++;
}
__syncthreads();
set_bnd(N, 0, div, idx, elementPerThread);
set_bnd(N, 0, p, idx, elementPerThread);
}
__global__ void project3(int N, float* u, float* v, float* p, int elementPerThread)
{
int idx = (blockIdx.x * blockDim.x + threadIdx.x) * elementPerThread;
if (idx >= N * N) {
return;
}
int idxNew = idx;
while (idxNew < idx + elementPerThread && idxNew < N * N) {
int j = idxNew / N + 1;
int i = idxNew % N + 1;
u[IX(i, j)] -= 0.5f * N * (p[IX(i + 1, j)] - p[IX(i - 1, j)]);
v[IX(i, j)] -= 0.5f * N * (p[IX(i, j + 1)] - p[IX(i, j - 1)]);
idxNew++;
}
__syncthreads();
set_bnd(N, 1, u, idx, elementPerThread);
set_bnd(N, 2, v, idx, elementPerThread);
}
/* global variables */
cudaStream_t stream1 = NULL, stream2 = NULL, stream3 = NULL;
// timing variables
auto begin = std::chrono::system_clock::now();
auto end = std::chrono::system_clock::now();
volatile static int N, size;
volatile static float dt, diff, visc, force, source;
volatile static int display_velocity = 0, cuda_streams = 0, shoot_liquid = 0, show_commands=1;
float numIntervals = 100;
float dtMax = 5;
float dtMin = 0.01;
float dt_del = (dtMax - dtMin) / numIntervals;
float viscMax = 1;
float viscMin = 0;
float visc_del = (viscMax - viscMin) / numIntervals;
float diffMax = 1.;
float diffMin = 0;
float diff_del = (diffMax - diffMin) / numIntervals;
float fluidAmountMax = 1000.;
float fluidAmountMin = 1.;
float fluidAmount_del = (fluidAmountMax - fluidAmountMin) / numIntervals;
float forceAmountMax = 5;
float forceAmountMin = 0.1;
float forceAmount_del = (forceAmountMax - forceAmountMin) / numIntervals;
// Parallelize computation :: TODO update values to match
int num_threads_source;
int num_blocks_source;
int num_threads;
int elementsPerThread;
// CPU variables
static float* u, * v, * dens;
static float* u_userInput, * v_userInput, * dens_userInput;
volatile int flag = 0;
std::mutex * guimutexPtr;
// GPU variables
float* u_cuda, * v_cuda, * u_prev_cuda, * v_prev_cuda;
float* p_cuda, * div_cuda;
float* dens_cuda, * dens_prev_cuda;
static int win_id;
static int win_x, win_y;
static int mouse_down[3];
static int omx, omy, mx, my;
static float xtext, ytext;
/*
----------------------------------------------------------------------
free/clear/allocate simulation data
----------------------------------------------------------------------
*/
static void free_data(void)
{
if (u_userInput) free(u_userInput);
if (v_userInput) free(v_userInput);
if (dens_userInput) free(dens_userInput);
if (u) free(u);
if (v) free(v);
if (dens) free(dens);
if (u_cuda) cudaFree(u_cuda);
if (v_cuda) cudaFree(v_cuda);
if (u_prev_cuda) cudaFree(u_prev_cuda);
if (v_prev_cuda) cudaFree(v_prev_cuda);
if (p_cuda) cudaFree(p_cuda);
if (div_cuda) cudaFree(div_cuda);
if (dens_cuda) cudaFree(dens_cuda);
if (dens_prev_cuda) cudaFree(dens_prev_cuda);
}
static void clear_data(void)
{
cudaMemset(u_cuda, 0, sizeof(float) * size);
cudaMemset(v_cuda, 0, sizeof(float) * size);
cudaMemset(p_cuda, 0, sizeof(float) * size);
cudaMemset(div_cuda, 0, sizeof(float) * size);
cudaMemset(dens_cuda, 0, sizeof(float) * size);
cudaMemset(u_prev_cuda, 0, sizeof(float) * size);
cudaMemset(v_prev_cuda, 0, sizeof(float) * size);
cudaMemset(dens_prev_cuda, 0, sizeof(float) * size);
for (int i = 0; i < size; i++) {
u_userInput[i] = v_userInput[i] = dens_userInput[i] = 0.0f;
}
}
static void destroy_streams(void) {
// Destroy cuda streams
if (stream1 == stream2 && stream2 == stream3) {
cudaStreamDestroy(stream1);
}
else {
cudaStreamDestroy(stream1);
cudaStreamDestroy(stream2);
cudaStreamDestroy(stream3);
}
stream1 = NULL;
stream2 = NULL;
stream3 = NULL;
}
static void create_streams(void) {
if (stream1 != NULL) {
return;
}
if (cuda_streams) {
cudaStreamCreate(&stream1);
cudaStreamCreate(&stream2);
cudaStreamCreate(&stream3);
}
else {
cudaStreamCreate(&stream1);
stream2 = stream1;
stream3 = stream1;
}
}
static void set_numThreads(int newValue) {
if (newValue < 1) {
newValue = 1;
}
if (newValue > 1024) {
newValue = 1024;
}
num_threads = newValue;
elementsPerThread = (N * N + 1) / num_threads;
}
static int set_gridSize(int newValue) {
if (newValue < 32) {
newValue = 32;
}
if (newValue > 1000) {
newValue = 1000;
}
if (N == newValue) {
return 1;
}
//cudaDeviceReset();
cudaDeviceSynchronize();
const std::lock_guard<std::mutex> lock(*guimutexPtr);
free_data();
N = newValue;
size = (N + 2) * (N + 2);
num_threads_source = (N + 2);
num_blocks_source = (N + 2);
set_numThreads(num_threads);
return allocate_data();
}
static int allocate_data(void)
{
// Allocate space for device copies
u = (float*)malloc(size * sizeof(float));
v = (float*)malloc(size * sizeof(float));
dens = (float*)malloc(size * sizeof(float));
u_userInput = (float*)malloc(size * sizeof(float));
v_userInput = (float*)malloc(size * sizeof(float));
dens_userInput = (float*)malloc(size * sizeof(float));
// gpu copies
cudaMalloc(&u_cuda, sizeof(float) * size);
cudaMalloc(&v_cuda, sizeof(float) * size);
cudaMalloc(&p_cuda, sizeof(float) * size);
cudaMalloc(&div_cuda, sizeof(float) * size);
cudaMalloc(&u_prev_cuda, sizeof(float) * size);
cudaMalloc(&v_prev_cuda, sizeof(float) * size);
cudaMalloc(&dens_cuda, sizeof(float) * size);
cudaMalloc(&dens_prev_cuda, sizeof(float) * size);
if (!u_userInput || !v_userInput || !dens_userInput || !dens || !u || !v || !u_cuda || !v_cuda || !u_prev_cuda || !v_prev_cuda || !dens_cuda || !dens_prev_cuda) {
fprintf(stderr, "cannot allocate data\n");
return (0);
}
return (1);
}
/*
----------------------------------------------------------------------
OpenGL specific drawing routines
----------------------------------------------------------------------
*/
static void pre_display(void)
{
glViewport(0, 0, win_x, win_y);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluOrtho2D(0.0, 1.0, 0.0, 1.0);
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
}
static void post_display(void)
{
glutSwapBuffers();
}
static void draw_velocity(void)
{
int i, j;
float x, y, h;
h = 1.0f / N;
cudaMemcpy(u, u_cuda, sizeof(float) * size, cudaMemcpyDeviceToHost);
cudaMemcpy(v, v_cuda, sizeof(float) * size, cudaMemcpyDeviceToHost);
glColor3f(1.0f, 1.0f, 1.0f);
glLineWidth(1.0f);
glBegin(GL_LINES);
for (i = 1; i <= N; i++) {
x = (i - 0.5f) * h;
for (j = 1; j <= N; j++) {
y = (j - 0.5f) * h;
glVertex2f(x, y);
glVertex2f(x + u[IX(i, j)], y + v[IX(i, j)]);
}
}
glEnd();
}
static void draw_density(void)
{
int i, j;
float x, y, h, d00, d01, d10, d11;
h = 1.0f / N;
cudaMemcpy(dens, dens_cuda, sizeof(float) * size, cudaMemcpyDeviceToHost);
glBegin(GL_QUADS);
for (i = 0; i <= N; i++) {
x = (i - 0.5f) * h;
for (j = 0; j <= N; j++) {
y = (j - 0.5f) * h;
d00 = dens[IX(i, j)];
d01 = dens[IX(i, j + 1)];
d10 = dens[IX(i + 1, j)];
d11 = dens[IX(i + 1, j + 1)];
glColor3f(d00, d00, d00 * 0); glVertex2f(x, y);
glColor3f(d10, d10, d10 * 0); glVertex2f(x + h, y);
glColor3f(d11, d11, d11 * 0); glVertex2f(x + h, y + h);
glColor3f(d01, d01, d01 * 0); glVertex2f(x, y + h);
}
}
glEnd();
}
/*
----------------------------------------------------------------------
relates mouse movements to forces sources
----------------------------------------------------------------------
*/
static void get_from_UI(float* d, float* u, float* v)
{
cudaMemcpy(u_prev_cuda, u_userInput, sizeof(float) * size, cudaMemcpyHostToDevice);
cudaMemcpy(v_prev_cuda, v_userInput, sizeof(float) * size, cudaMemcpyHostToDevice);;
cudaMemcpy(dens_prev_cuda, dens_userInput, sizeof(float) * size, cudaMemcpyHostToDevice);
return;
}
static void get_from_UI_CPU(void)
{
int i, j;
if (shoot_liquid) {
i = (N + 2) / 5;
j = (N + 2) / 5;
u_userInput[IX(i, j)] = force;
v_userInput[IX(i, j)] = force;
dens_userInput[IX(i, j)] = source;
}
if (!mouse_down[0] && !mouse_down[2]) return;
i = (int)((mx / (float)win_x) * N + 1);
j = (int)(((win_y - my) / (float)win_y) * N + 1);
if (i<1 || i>N || j<1 || j>N) return;
if (mouse_down[0]) {
u_userInput[IX(i, j)] += force * (mx - omx);
v_userInput[IX(i, j)] += force * (omy - my);
}
if (mouse_down[2]) {
dens_userInput[IX(i - 1, j)] = source;
dens_userInput[IX(i + 1, j)] = source;
dens_userInput[IX(i, j + 1)] = source;
dens_userInput[IX(i, j - 1)] = source;
dens_userInput[IX(i, j)] = source;
}
omx = mx;
omy = my;
return;
}
/*
----------------------------------------------------------------------
GLUT callback routines
----------------------------------------------------------------------
*/
static void Sliders()
{
const unsigned char a[50] = "Controls";
const unsigned char b[50] = "Time Step [a]";
const unsigned char c[50] = "Viscosity [s]";
const unsigned char d[50] = "Diffussion [d]";
const unsigned char e[50] = "Fluid Amount [f]";
const unsigned char f[50] = "Force Amount [g]";
const unsigned char g[50] = "# threads: ";
const unsigned char h[50] = "grid size: ";
unsigned char numThreads[50];
strcpy((char*) numThreads, std::to_string(num_threads).c_str());
const unsigned char* numThreads_const = (const unsigned char*)numThreads;
unsigned char gridSize[50];
strcpy((char*)gridSize, std::to_string(N).c_str());
const unsigned char* gridSize_const = (const unsigned char*)gridSize;
const unsigned char cudaStreams[50] = "CUDA streams enabled";
const unsigned char dispenser[50] = "Jet enabled";
const unsigned char* aPtr = a;
const unsigned char* bPtr = b;
const unsigned char* cPtr = c;
const unsigned char* dPtr = d;
const unsigned char* ePtr = e;
const unsigned char* fPtr = f;
const unsigned char* gPtr = g;
const unsigned char* hPtr = h;
const unsigned char* streamsPtr = cudaStreams;
const unsigned char* jetPtr = dispenser;
// Drawing Sliders Text Fields
glColor3f(1.0, 1.0, 1.0);
glRasterPos2f(0.78125, 0.94921875);
glutBitmapString(GLUT_BITMAP_9_BY_15, aPtr);
glRasterPos2f(0.64453125, 0.90625000);
glutBitmapString(GLUT_BITMAP_8_BY_13, bPtr);
glRasterPos2f(0.64453125, 0.86718750);
glutBitmapString(GLUT_BITMAP_8_BY_13, cPtr);
glRasterPos2f(0.64453125, 0.82812500);
glutBitmapString(GLUT_BITMAP_8_BY_13, dPtr);
glRasterPos2f(0.64453125, 0.7890625);
glutBitmapString(GLUT_BITMAP_8_BY_13, ePtr);
glRasterPos2f(0.64453125, 0.7500000);
glutBitmapString(GLUT_BITMAP_8_BY_13, fPtr);
glRasterPos2f(0.78125 + 0.05, 0.7109375);
glutBitmapString(GLUT_BITMAP_8_BY_13, gPtr);
glRasterPos2f(0.78125 + 0.05 + 0.1, 0.7109375);
glutBitmapString(GLUT_BITMAP_8_BY_13, numThreads_const);
glRasterPos2f(0.78125 + 0.05, 0.6718750 + 0.025);
glutBitmapString(GLUT_BITMAP_8_BY_13, hPtr);
glRasterPos2f(0.78125 + 0.05 + 0.1, 0.6718750 + 0.025);
glutBitmapString(GLUT_BITMAP_8_BY_13, gridSize_const);
if (cuda_streams) {
glRasterPos2f(0.78125, 0.6718750);
glutBitmapString(GLUT_BITMAP_8_BY_13, streamsPtr);
}
if (shoot_liquid) {
glRasterPos2f(0.78125, 0.6718750 - 0.025);
glutBitmapString(GLUT_BITMAP_8_BY_13, jetPtr);
}
glRasterPos2f(0., 0.);
glBegin(GL_LINES);
glColor3f(1.0, 1.0, 1.0);
// Draw slider boxes.
for (int i = 0; i < 5; i++)
{
// Compute heights.
float heightTop = 1. - (38. + (float)i * 20.) / 512.;
float heightBottom = 1. - (49. + (float)i * 20.) / 512.;
glVertex2d(0.83984375, heightTop);
glVertex2d(0.99609375, heightTop);
glVertex2d(0.83984375, heightBottom);
glVertex2d(0.99609375, heightBottom);
glVertex2d(0.83984375, heightTop);
glVertex2d(0.83984375, heightBottom);
glVertex2d(0.99609375, heightTop);
glVertex2d(0.99609375, heightBottom);
}
// Fill In Sliders
float sliderStart = 0.83984375;
float sliderEnd = 0.99609375;
// Variable bounds.
// Compute dynamic slider fill.
float dtSliderEnd = ((dt / dtMax) * 0.15625) + sliderStart;
float viscSliderEnd = ((visc / viscMax) * 0.15625) + sliderStart;
float diffSliderEnd = ((diff / diffMax) * 0.15625) + sliderStart;
float fluidAmountSliderEnd = ((source / fluidAmountMax) * 0.15625) + sliderStart;
float forceAmountSliderEnd = ((force / forceAmountMax) * 0.15625) + sliderStart;
for (float i = sliderStart; i <= sliderEnd; i += 0.001)
{
float heightTop = 0.0;
float heightBottom = 0.0;
if (i <= dtSliderEnd)
{
heightTop = 1. - (38. + 0. * 20.) / 512.;
heightBottom = 1. - (49. + 0. * 20.) / 512.;
glVertex2d(i, heightTop);
glVertex2d(i, heightBottom);
}
if (i <= viscSliderEnd)
{
heightTop = 1. - (38. + 1. * 20.) / 512.;
heightBottom = 1. - (49. + 1. * 20.) / 512.;
glVertex2d(i, heightTop);
glVertex2d(i, heightBottom);
}
if (i <= diffSliderEnd)
{
heightTop = 1. - (38. + 2. * 20.) / 512.;
heightBottom = 1. - (49. + 2. * 20.) / 512.;
glVertex2d(i, heightTop);
glVertex2d(i, heightBottom);
}
if (i <= fluidAmountSliderEnd)
{
heightTop = 1. - (38. + 3. * 20.) / 512.;
heightBottom = 1. - (49. + 3. * 20.) / 512.;
glVertex2d(i, heightTop);
glVertex2d(i, heightBottom);
}
if (i <= forceAmountSliderEnd)
{
heightTop = 1. - (38. + 4. * 20.) / 512.;
heightBottom = 1. - (49. + 4. * 20.) / 512.;
glVertex2d(i, heightTop);
glVertex2d(i, heightBottom);
}
}
glEnd();
}
static void key_func(unsigned char key, int x, int y)
{
switch (key)
{
case 'c':
case 'C':
clear_data();
break;
case 'q':
case 'Q':
show_commands = !show_commands;
break;
case 'v':
case 'V':
display_velocity = !display_velocity;
break;
case 'a':
dt = MIN(dtMax, dt+ dt_del);
printf("dt is now %f\n", dt);
break;
case 'A':
dt = MAX(dtMin, dt - dt_del);
printf("dt is now %f\n", dt);
break;
case 's':
visc = MIN(viscMax, visc + visc_del);
printf("visc is now %f\n", visc);
break;
case 'S':
visc = MAX(viscMin, visc - visc_del);
printf("visc is now %f\n", visc);
break;
case 'd':
diff = MIN(diffMax, diff + diff_del);
printf("diff is now %f\n", diff);
break;
case 'D':
diff = MAX(diffMin, diff - diff_del);
printf("diff is now %f\n", diff);
break;
case 'f':
source = MIN(fluidAmountMax, source + fluidAmount_del);
printf("fluidAmount is now %f\n", source);
break;
case 'F':
source = MAX(fluidAmountMin, source - fluidAmount_del);
printf("fluidAmount is now %f\n", source);
break;
case 'g':
force = MIN(forceAmountMax, force + forceAmount_del);
printf("forceAmount is now %f\n", force);
break;
case 'G':
force = MAX(forceAmountMin, force - forceAmount_del);
printf("forceAmount is now %f\n", force);
break;
case 'e':
case 'E':
destroy_streams();
cuda_streams = !cuda_streams;
create_streams();
break;
case 'w':
case 'W':
shoot_liquid = !shoot_liquid;
break;
case '1':
set_numThreads(num_threads * 2);
break;
case '2':
set_numThreads(num_threads / 2);
break;
case '3':
if (!set_gridSize(N * 2)) exit(1);
clear_data();
break;
case '4':
if (!set_gridSize(N / 2)) exit(1);
clear_data();
break;
}
}
static void mouse_func(int button, int state, int x, int y)
{
omx = mx = x;
omx = my = y;
mouse_down[button] = state == GLUT_DOWN;
}
static void motion_func(int x, int y)
{
mx = x;
my = y;
}
static void reshape_func(int width, int height)
{
glutSetWindow(win_id);
glutReshapeWindow(width, height);
win_x = width;
win_y = height;
}
static void idle_func(void)
{
// have copy of host data
get_from_UI(dens_prev_cuda, u_prev_cuda, v_prev_cuda);
cudaDeviceSynchronize();
////// Velocity timestep parallelization
add_source << < num_blocks_source, num_threads_source >> > (u_cuda, u_prev_cuda, dt, size);
add_source << < num_blocks_source, num_threads_source >> > (v_cuda, v_prev_cuda, dt, size);
add_source << < num_blocks_source, num_threads_source >> > (dens_cuda, dens_prev_cuda, dt, size);
for (int i = 0; i < size; i++) {
u_userInput[i] = v_userInput[i] = dens_userInput[i] = 0.0f;
}
SWAP(u_prev_cuda, u_cuda);
SWAP(v_prev_cuda, v_cuda);
SWAP(dens_prev_cuda, dens_cuda);
cudaDeviceSynchronize();
//// diffuse
float a = dt * visc * N * N;
lin_solve << < 1, num_threads, 0, stream1 >> > (N, 1, u_cuda, u_prev_cuda, a, 1 + 4 * a, elementsPerThread); // diffuse u_cuda
lin_solve << < 1, num_threads, 0, stream2 >> > (N, 2, v_cuda, v_prev_cuda, a, 1 + 4 * a, elementsPerThread); // diffuse v_cuda
a = dt * diff * N * N;
lin_solve << < 1, num_threads, 0, stream3 >> > (N, 0, dens_cuda, dens_prev_cuda, a, 1 + 4 * a, elementsPerThread); // diffuse dens_cuda
cudaDeviceSynchronize();
// projection step (no swapping beforehand)
project1 << < 1, num_threads >> > (N, u_cuda, v_cuda, p_cuda, div_cuda, elementsPerThread);
lin_solve << < 1, num_threads >> > (N, 0, p_cuda, div_cuda, 1, 4, elementsPerThread);
project3 << < 1, num_threads >> > (N, u_cuda, v_cuda, p_cuda, elementsPerThread);
SWAP(u_prev_cuda, u_cuda);
SWAP(v_prev_cuda, v_cuda);
cudaDeviceSynchronize();
advect << < 1, num_threads, 0, stream1 >> > (N, 1, u_cuda, u_prev_cuda, u_prev_cuda, v_prev_cuda, dt, elementsPerThread);
advect << < 1, num_threads, 0, stream2 >> > (N, 2, v_cuda, v_prev_cuda, u_prev_cuda, v_prev_cuda, dt, elementsPerThread);
cudaDeviceSynchronize();
// projection step (no swapping beforehand)
project1 << < 1, num_threads >> > (N, u_cuda, v_cuda, p_cuda, div_cuda, elementsPerThread);
lin_solve << < 1, num_threads >> > (N, 0, p_cuda, div_cuda, 1, 4, elementsPerThread);
project3 << < 1, num_threads >> > (N, u_cuda, v_cuda, p_cuda, elementsPerThread);
SWAP(dens_prev_cuda, dens_cuda);
cudaDeviceSynchronize();
// Density timestep parallelization
advect << < 1, num_threads >> > (N, 0, dens_cuda, dens_prev_cuda, u_cuda, v_cuda, dt, elementsPerThread);
glutSetWindow(win_id);
glutPostRedisplay();
}
static void display_func(void)
{
pre_display();
if (display_velocity) draw_velocity();
else draw_density();
end = std::chrono::system_clock::now();
auto dur = end - begin;
auto ms = std::chrono::duration_cast<std::chrono::milliseconds>(dur).count();
printf("framerate: %f\n", 1000.0 / ms);
begin = std::chrono::system_clock::now();
if (show_commands) Sliders();
post_display();
}
/*
----------------------------------------------------------------------
open_glut_window --- open a glut compatible window and set callbacks
----------------------------------------------------------------------
*/
static void open_glut_window(void)
{
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE);
glutInitWindowPosition(0, 0);
glutInitWindowSize(win_x, win_y);
win_id = glutCreateWindow("Fluid Simulation");
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
glutSwapBuffers();
glClear(GL_COLOR_BUFFER_BIT);
glutSwapBuffers();
pre_display();
glutKeyboardFunc(key_func);
glutMouseFunc(mouse_func);
glutMotionFunc(motion_func);
// glutReshapeFunc ( reshape_func );
glutIdleFunc(idle_func);
glutDisplayFunc(display_func);
}
/*
----------------------------------------------------------------------
main --- main routine
----------------------------------------------------------------------
*/
int main(int argc, char** argv)
{
glutInit(&argc, argv);
if (argc != 1 && argc != 6) {
fprintf(stderr, "usage : %s N dt diff visc force source\n", argv[0]);
fprintf(stderr, "where:\n"); \
fprintf(stderr, "\t N : grid resolution\n");
fprintf(stderr, "\t dt : time step\n");
fprintf(stderr, "\t diff : diffusion rate of the density\n");
fprintf(stderr, "\t visc : viscosity of the fluid\n");
fprintf(stderr, "\t force : scales the mouse movement that generate a force\n");
fprintf(stderr, "\t source : amount of density that will be deposited\n");
exit(1);
}
if (argc == 1) {
//N = 128;
dt = 1.0f;
diff = 0.0f;
visc = 0.0f;
force = 1.0f;
source = 100.0f;
fprintf(stderr, "Using defaults : N=%d dt=%g diff=%g visc=%g force = %g source=%g\n",
N, dt, diff, visc, force, source);
}
else {
//N = atoi(argv[1]);
dt = atof(argv[2]);
diff = atof(argv[3]);
visc = atof(argv[4]);
force = atof(argv[5]);
source = atof(argv[6]);
}
printf("\n\nHow to use this demo:\n\n");
printf("\t Add densities with the right mouse button\n");
printf("\t Add velocities with the left mouse button and dragging the mouse\n");
printf("\t Toggle density/velocity display with the 'v' key\n");
printf("\t Clear the simulation by pressing the 'c' key\n");
printf("\t Quit by pressing the 'q' key\n");
set_numThreads(1024);
display_velocity = 0;
std::mutex guimutex;
guimutexPtr = &guimutex;
if (!set_gridSize(256)) exit(1);
clear_data();
create_streams();
// A mutex ensures orderly access to std::cout from multiple threads.
std::thread t1([&guimutex]() {
while (!flag) {
{
const std::lock_guard<std::mutex> lock(guimutex);
get_from_UI_CPU();
}
std::this_thread::sleep_for(std::chrono::milliseconds(25));
}
});
win_x = 800;
win_y = 800;
open_glut_window();
glutMainLoop();
//stop GUI thread
flag = 1;
t1.join();
// Free Device space
cudaFree(u_cuda);
cudaFree(v_cuda);
cudaFree(u_prev_cuda);
cudaFree(dens_cuda);
cudaFree(dens_prev_cuda);
cudaFree(v_prev_cuda);
destroy_streams();
exit(0);
} |
e4ab475e135c7741fed444aa5aedf2e33bb780e0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <cstdio>
#include <stdlib.h> //for rand(),malloc, free()
#include <windows.h> //for QueryPerformanceCouter()
#define _DEBUG //for debug mode
//#define NDEBUG //for release mode
#if defined(NDEBUG)
//code for release mode
#define CUDA_CHECK(x) (x)
#else
//code for debug mode
#define CUDA_CHECK(x) do{\
(x);\
hipError_t e = hipGetLastError();\
if (hipSuccess != e) {\
printf("cuda failure %s at %s:%d\n",\
hipGetErrorString(e),\
__FILE__, __LINE__);\
exit(1);\
}\
} while (0)
#endif
const int WIDTH = 1024;//matrix size
const int TILE_WIDTH = 32;
const int GRID_WIDTH = (WIDTH / TILE_WIDTH);
//shared memory version
__global__ void matmul(float* g_C, const float* g_A, const float* g_B, const int width) {
__shared__ float s_A[TILE_WIDTH][TILE_WIDTH];
__shared__ float s_B[TILE_WIDTH][TILE_WIDTH];
int by = blockIdx.y; int bx = blockIdx.x;
int ty = threadIdx.y; int tx = threadIdx.x;
int gy = by * TILE_WIDTH + ty;
int gx = bx * TILE_WIDTH + tx;
float sum = 0.0f;
for (register int m = 0; m < width / TILE_WIDTH; m++) {
s_A[ty][tx] = g_A[gy * width + (m * TILE_WIDTH + tx)];
s_B[ty][tx] = g_B[(m * TILE_WIDTH + ty) * width + gx];
__syncthreads();
//use the sharead memory blocks to get the partial sum
for (register int k = 0; k < TILE_WIDTH; k++) {
sum += s_A[ty][k] * s_B[k][tx];
}
__syncthreads();// shared memory sync why?) for
}
g_C[gy * width + gx] = sum;
}
//global memory version
__global__ void matmul(float* c, const float* a, const float* b, const int width) {
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
float sum = 0.0F;
for (register int k = 0; k < width; k++) {
float lhs = a[y * width + k];
float rhs = b[k * width + x];
sum += lhs * rhs;
}
c[y * width + x] = sum;
}
void genData(float* ptr, unsigned int size) {
while (size--) {
*ptr++ = (float)(rand() % 1000) / 1000.0F;
}
}
int main() {
float* pA = NULL;
float* pB = NULL;
float* pC = NULL;
long long cntStart, cntEnd, freq;
QueryPerformanceFrequency((LARGE_INTEGER*)(&freq));
//malloc memories on the host-side
pA = (float*)malloc(WIDTH * WIDTH * sizeof(float));
pB = (float*)malloc(WIDTH * WIDTH * sizeof(float));
pC = (float*)malloc(WIDTH * WIDTH * sizeof(float));
//generate source data
genData(pA, WIDTH * WIDTH);
genData(pB, WIDTH * WIDTH);
//CUDA: allocate device memory
float* pAdev = NULL;
float* pBdev = NULL;
float* pCdev = NULL;
CUDA_CHECK(hipMalloc((void**)&pAdev, WIDTH * WIDTH * sizeof(float)));
CUDA_CHECK(hipMalloc((void**)&pBdev, WIDTH * WIDTH * sizeof(float)));
CUDA_CHECK(hipMalloc((void**)&pCdev, WIDTH * WIDTH * sizeof(float)));
//copy host -> device
CUDA_CHECK(hipMemcpy(pAdev, pA, WIDTH * WIDTH * sizeof(float), hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(pBdev, pB, WIDTH * WIDTH * sizeof(float), hipMemcpyHostToDevice));
//CUDA_CHECK(hipMemcpy(pCdev, pC, WIDTH * WIDTH * sizeof(float), hipMemcpyHostToDevice));
//start the timer
QueryPerformanceCounter((LARGE_INTEGER*)(&cntStart));
//launch the kernel
dim3 dimGrid(GRID_WIDTH, GRID_WIDTH, 1);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1);
matmul << <dimGrid, dimBlock >> > (pCdev, pAdev, pBdev, WIDTH);
//end the timer
QueryPerformanceCounter((LARGE_INTEGER*)(&cntEnd));
CUDA_CHECK(hipPeekAtLastError());
printf("elapsed time=%f msec\n", (double)(cntEnd - cntStart) * 1000.0 / (double)(freq));
//copyt deviece -> host
CUDA_CHECK(hipMemcpy(pC, pCdev, WIDTH * WIDTH * sizeof(float), hipMemcpyDeviceToHost));
//free device memory
CUDA_CHECK(hipFree(pAdev));
CUDA_CHECK(hipFree(pBdev));
CUDA_CHECK(hipFree(pCdev));
//print
int i, j;
i = 0; j = 0;
printf("c[%4d][%4d]=%f\n", i, j, pC[i * WIDTH + j]);
i = WIDTH / 2; j = WIDTH / 2;
printf("c[%4d][%4d]=%f\n", i, j, pC[i * WIDTH + j]);
i = WIDTH - 1; j = WIDTH - 1;
printf("c[%4d][%4d]=%f\n", i, j, pC[i * WIDTH + j]);
//done
return 0;
}
| e4ab475e135c7741fed444aa5aedf2e33bb780e0.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cstdio>
#include <stdlib.h> //for rand(),malloc, free()
#include <windows.h> //for QueryPerformanceCouter()
#define _DEBUG //for debug mode
//#define NDEBUG //for release mode
#if defined(NDEBUG)
//code for release mode
#define CUDA_CHECK(x) (x)
#else
//code for debug mode
#define CUDA_CHECK(x) do{\
(x);\
cudaError_t e = cudaGetLastError();\
if (cudaSuccess != e) {\
printf("cuda failure %s at %s:%d\n",\
cudaGetErrorString(e),\
__FILE__, __LINE__);\
exit(1);\
}\
} while (0)
#endif
const int WIDTH = 1024;//matrix size
const int TILE_WIDTH = 32;
const int GRID_WIDTH = (WIDTH / TILE_WIDTH);
//shared memory version
__global__ void matmul(float* g_C, const float* g_A, const float* g_B, const int width) {
__shared__ float s_A[TILE_WIDTH][TILE_WIDTH];
__shared__ float s_B[TILE_WIDTH][TILE_WIDTH];
int by = blockIdx.y; int bx = blockIdx.x;
int ty = threadIdx.y; int tx = threadIdx.x;
int gy = by * TILE_WIDTH + ty;
int gx = bx * TILE_WIDTH + tx;
float sum = 0.0f;
for (register int m = 0; m < width / TILE_WIDTH; m++) {
s_A[ty][tx] = g_A[gy * width + (m * TILE_WIDTH + tx)];
s_B[ty][tx] = g_B[(m * TILE_WIDTH + ty) * width + gx];
__syncthreads();
//use the sharead memory blocks to get the partial sum
for (register int k = 0; k < TILE_WIDTH; k++) {
sum += s_A[ty][k] * s_B[k][tx];
}
__syncthreads();// shared memory를 다 사용후에 다시 한번더 sync why?) for문을 정지시킴과 동시에 다음 타일을 읽을 준비를 하기 때문
}
g_C[gy * width + gx] = sum;
}
//global memory version
__global__ void matmul(float* c, const float* a, const float* b, const int width) {
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
float sum = 0.0F;
for (register int k = 0; k < width; k++) {
float lhs = a[y * width + k];
float rhs = b[k * width + x];
sum += lhs * rhs;
}
c[y * width + x] = sum;
}
void genData(float* ptr, unsigned int size) {
while (size--) {
*ptr++ = (float)(rand() % 1000) / 1000.0F;
}
}
int main() {
float* pA = NULL;
float* pB = NULL;
float* pC = NULL;
long long cntStart, cntEnd, freq;
QueryPerformanceFrequency((LARGE_INTEGER*)(&freq));
//malloc memories on the host-side
pA = (float*)malloc(WIDTH * WIDTH * sizeof(float));
pB = (float*)malloc(WIDTH * WIDTH * sizeof(float));
pC = (float*)malloc(WIDTH * WIDTH * sizeof(float));
//generate source data
genData(pA, WIDTH * WIDTH);
genData(pB, WIDTH * WIDTH);
//CUDA: allocate device memory
float* pAdev = NULL;
float* pBdev = NULL;
float* pCdev = NULL;
CUDA_CHECK(cudaMalloc((void**)&pAdev, WIDTH * WIDTH * sizeof(float)));
CUDA_CHECK(cudaMalloc((void**)&pBdev, WIDTH * WIDTH * sizeof(float)));
CUDA_CHECK(cudaMalloc((void**)&pCdev, WIDTH * WIDTH * sizeof(float)));
//copy host -> device
CUDA_CHECK(cudaMemcpy(pAdev, pA, WIDTH * WIDTH * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(pBdev, pB, WIDTH * WIDTH * sizeof(float), cudaMemcpyHostToDevice));
//CUDA_CHECK(cudaMemcpy(pCdev, pC, WIDTH * WIDTH * sizeof(float), cudaMemcpyHostToDevice));
//start the timer
QueryPerformanceCounter((LARGE_INTEGER*)(&cntStart));
//launch the kernel
dim3 dimGrid(GRID_WIDTH, GRID_WIDTH, 1);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1);
matmul << <dimGrid, dimBlock >> > (pCdev, pAdev, pBdev, WIDTH);
//end the timer
QueryPerformanceCounter((LARGE_INTEGER*)(&cntEnd));
CUDA_CHECK(cudaPeekAtLastError());
printf("elapsed time=%f msec\n", (double)(cntEnd - cntStart) * 1000.0 / (double)(freq));
//copyt deviece -> host
CUDA_CHECK(cudaMemcpy(pC, pCdev, WIDTH * WIDTH * sizeof(float), cudaMemcpyDeviceToHost));
//free device memory
CUDA_CHECK(cudaFree(pAdev));
CUDA_CHECK(cudaFree(pBdev));
CUDA_CHECK(cudaFree(pCdev));
//print
int i, j;
i = 0; j = 0;
printf("c[%4d][%4d]=%f\n", i, j, pC[i * WIDTH + j]);
i = WIDTH / 2; j = WIDTH / 2;
printf("c[%4d][%4d]=%f\n", i, j, pC[i * WIDTH + j]);
i = WIDTH - 1; j = WIDTH - 1;
printf("c[%4d][%4d]=%f\n", i, j, pC[i * WIDTH + j]);
//done
return 0;
}
|
07573c60ca17063ab7158d13d1987848afa057e7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#ifndef __HIPCC__
#include <stdlib.h>
#include <math.h>
#include <hmpprt/Grouplet.h>
#include <hmpprt/HostTypes.h>
#include <hmpprt/Context.h>
#include <hmpprt/CUDAGrid.h>
#include <hmpprt/CUDAModule.h>
#include <hmpprt/DeviceManager.h>
#include <hmpperr/hmpperr.h>
#include <openacci/openacci_c.h>
#ifdef _WIN32
# define CDLT_API __declspec(dllexport)
#else /* ! _WIN32 */
# define CDLT_API
#endif /* _WIN32 */
#else // ! __HIPCC__
#include <hmpprt/HostTypes.h>
#include <hmpprt/CUDAIntrinsics.h>
extern __shared__ int64_t hmpp_sharedmem[];
#endif // __HIPCC__
#ifndef __HIPCC__
#else
#endif
#define HMPPCG_SIMD_LENGTH 32
# 75 "laplacian.cpp"
#ifndef __HIPCC__
extern "C" CDLT_API void hmpp_acc_region_main_79(hmpprt::s32 height_8, hmpprt::s32 width_11, hmpprt::s32* pSrc_padding_3, hmpprt::u08* pBufL_cp_4)
;
#endif // __HIPCC__
# 75 "laplacian.cpp"
#ifndef __HIPCC__
void hmpp_acc_region_main_79_internal_1(hmpprt::s32 height_19, hmpprt::s32 width_20, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::s32> pSrc_padding_5, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> pBufL_cp_8)
;
#endif // __HIPCC__
# 12 "laplacian.cpp"
#ifndef __HIPCC__
static hmpprt::CUDAGrid * hmpp_acc_region_main_79_parallel_region_1 = 0;
#else
extern "C" __global__ void hmpp_acc_region_main_79_parallel_region_1(hmpprt::s32 height_1, hmpprt::u08* pBufL_cp_1, hmpprt::s32* pSrc_padding_1, hmpprt::s32 width_1);
#endif // __HIPCC__
# 12 "laplacian.cpp"
#ifndef __HIPCC__
extern "C" CDLT_API void hmpp_acc_region_main_55(hmpprt::s32 height_9, hmpprt::s32 width_12, hmpprt::u08* Source_3, hmpprt::s32* pSrc_padding_2)
;
#endif // __HIPCC__
# 12 "laplacian.cpp"
#ifndef __HIPCC__
void hmpp_acc_region_main_55_internal_1(hmpprt::s32 height_20, hmpprt::s32 width_21, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> Source_5, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::s32> pSrc_padding_4)
;
#endif // __HIPCC__
# 243 "laplacian.cpp"
#ifndef __HIPCC__
static hmpprt::CUDAGrid * hmpp_acc_region_main_55_parallel_region_1 = 0;
#else
extern "C" __global__ void hmpp_acc_region_main_55_parallel_region_1(hmpprt::u08* Source_1, hmpprt::s32 height_2, hmpprt::s32* pSrc_padding, hmpprt::s32 width_2);
#endif // __HIPCC__
# 243 "laplacian.cpp"
#ifndef __HIPCC__
extern "C" CDLT_API void hmpp_acc_region_main_247(hmpprt::s32 height_10, hmpprt::s32 width_13, hmpprt::u08* Source_2, hmpprt::s16* LaplacianLayer_1, hmpprt::u08* UpsampleDst_5)
;
#endif // __HIPCC__
# 243 "laplacian.cpp"
#ifndef __HIPCC__
void hmpp_acc_region_main_247_internal_1(hmpprt::s32 height_15, hmpprt::s32 width_22, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> Source_4, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::s16> LaplacianLayer_2, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> UpsampleDst_9)
;
#endif // __HIPCC__
# 229 "laplacian.cpp"
#ifndef __HIPCC__
static hmpprt::CUDAGrid * hmpp_acc_region_main_247_parallel_region_1 = 0;
#else
extern "C" __global__ void hmpp_acc_region_main_247_parallel_region_1(hmpprt::s16* LaplacianLayer, hmpprt::u08* Source, hmpprt::u08* UpsampleDst_1, hmpprt::s32 height_3, hmpprt::s32 width_3);
#endif // __HIPCC__
# 229 "laplacian.cpp"
#ifndef __HIPCC__
extern "C" CDLT_API void hmpp_acc_region_main_230(hmpprt::s32 width_4, hmpprt::s32 halfWidth_5, hmpprt::u08* DownsampleDst_5, hmpprt::u08* UpsampleDst_6)
;
#endif // __HIPCC__
# 229 "laplacian.cpp"
#ifndef __HIPCC__
void hmpp_acc_region_main_230_internal_1(hmpprt::s32 width_27, hmpprt::s32 halfWidth_9, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> DownsampleDst_9, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> UpsampleDst_10)
;
#endif // __HIPCC__
# 201 "laplacian.cpp"
#ifndef __HIPCC__
static hmpprt::CUDAGrid * hmpp_acc_region_main_230_parallel_region_1 = 0;
#else
extern "C" __global__ void hmpp_acc_region_main_230_parallel_region_1(hmpprt::u08* DownsampleDst_1, hmpprt::u08* UpsampleDst_2, hmpprt::s32 halfWidth_1);
#endif // __HIPCC__
# 201 "laplacian.cpp"
#ifndef __HIPCC__
extern "C" CDLT_API void hmpp_acc_region_main_202(hmpprt::s32 width_14, hmpprt::s32 halfWidth_10, hmpprt::s32 halfHeight_8, hmpprt::u08* DownsampleDst_10, hmpprt::u08* UpsampleDst_11)
;
#endif // __HIPCC__
# 201 "laplacian.cpp"
#ifndef __HIPCC__
void hmpp_acc_region_main_202_internal_1(hmpprt::s32 width_23, hmpprt::s32 halfWidth_6, hmpprt::s32 halfHeight_4, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> DownsampleDst_6, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> UpsampleDst_7)
;
#endif // __HIPCC__
# 175 "laplacian.cpp"
#ifndef __HIPCC__
static hmpprt::CUDAGrid * hmpp_acc_region_main_202_parallel_region_1 = 0;
#else
extern "C" __global__ void hmpp_acc_region_main_202_parallel_region_1(hmpprt::u08* DownsampleDst_2, hmpprt::u08* UpsampleDst_3, hmpprt::s32 halfHeight_1, hmpprt::s32 halfWidth_2, hmpprt::s32 width_5);
#endif // __HIPCC__
# 175 "laplacian.cpp"
#ifndef __HIPCC__
extern "C" CDLT_API void hmpp_acc_region_main_188(hmpprt::s32 height_11, hmpprt::s32 width_15, hmpprt::s32 halfWidth_7, hmpprt::s32 halfHeight_2, hmpprt::u08* DownsampleDst_11, hmpprt::u08* UpsampleDst_4)
;
#endif // __HIPCC__
# 175 "laplacian.cpp"
#ifndef __HIPCC__
void hmpp_acc_region_main_188_internal_1(hmpprt::s32 height_16, hmpprt::s32 width_24, hmpprt::s32 halfWidth_11, hmpprt::s32 halfHeight_6, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> DownsampleDst_7, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> UpsampleDst_8)
;
#endif // __HIPCC__
# 154 "laplacian.cpp"
#ifndef __HIPCC__
static hmpprt::CUDAGrid * hmpp_acc_region_main_188_parallel_region_1 = 0;
#else
extern "C" __global__ void hmpp_acc_region_main_188_parallel_region_1(hmpprt::u08* DownsampleDst_3, hmpprt::u08* UpsampleDst, hmpprt::s32 halfHeight_5, hmpprt::s32 halfWidth_3, hmpprt::s32 height_4, hmpprt::s32 width_6);
#endif // __HIPCC__
# 154 "laplacian.cpp"
#ifndef __HIPCC__
extern "C" CDLT_API void hmpp_acc_region_main_159(hmpprt::s32 width_16, hmpprt::s32 halfWidth_4, hmpprt::s32 halfHeight_3, hmpprt::u08* FilterDst_3, hmpprt::u08* DownsampleDst_4)
;
#endif // __HIPCC__
# 154 "laplacian.cpp"
#ifndef __HIPCC__
void hmpp_acc_region_main_159_internal_1(hmpprt::s32 width_25, hmpprt::s32 halfWidth_8, hmpprt::s32 halfHeight_7, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> FilterDst_5, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> DownsampleDst_8)
;
#endif // __HIPCC__
# 132 "laplacian.cpp"
#ifndef __HIPCC__
static hmpprt::CUDAGrid * hmpp_acc_region_main_159_parallel_region_1 = 0;
#else
extern "C" __global__ void hmpp_acc_region_main_159_parallel_region_1(hmpprt::u08* DownsampleDst, hmpprt::u08* FilterDst_1, hmpprt::s32 halfHeight, hmpprt::s32 halfWidth, hmpprt::s32 width_7);
#endif // __HIPCC__
# 132 "laplacian.cpp"
#ifndef __HIPCC__
extern "C" CDLT_API void hmpp_acc_region_main_136(hmpprt::s32 height_5, hmpprt::s32 width_8, hmpprt::u08* FilterDst_2, hmpprt::s32* pSrc_padding2_6)
;
#endif // __HIPCC__
# 132 "laplacian.cpp"
#ifndef __HIPCC__
void hmpp_acc_region_main_136_internal_1(hmpprt::s32 height_18, hmpprt::s32 width_28, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> FilterDst_4, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::s32> pSrc_padding2_1)
;
#endif // __HIPCC__
# 114 "laplacian.cpp"
#ifndef __HIPCC__
static hmpprt::CUDAGrid * hmpp_acc_region_main_136_parallel_region_1 = 0;
#else
extern "C" __global__ void hmpp_acc_region_main_136_parallel_region_1(hmpprt::u08* FilterDst, hmpprt::s32 height_12, hmpprt::s32* pSrc_padding2_4, hmpprt::s32 width_17);
#endif // __HIPCC__
# 114 "laplacian.cpp"
#ifndef __HIPCC__
extern "C" CDLT_API void hmpp_acc_region_main_114(hmpprt::s32 height_17, hmpprt::s32 width_18, hmpprt::s32* pSrc_padding2_7, hmpprt::u08* pBufL_cp_2)
;
#endif // __HIPCC__
# 114 "laplacian.cpp"
#ifndef __HIPCC__
void hmpp_acc_region_main_114_internal_1(hmpprt::s32 height_13, hmpprt::s32 width_26, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::s32> pSrc_padding2_2, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> pBufL_cp_6)
;
#endif // __HIPCC__
# 97 "laplacian.cpp"
#ifndef __HIPCC__
static hmpprt::CUDAGrid * hmpp_acc_region_main_114_parallel_region_1 = 0;
#else
extern "C" __global__ void hmpp_acc_region_main_114_parallel_region_1(hmpprt::s32 height_6, hmpprt::u08* pBufL_cp_5, hmpprt::s32* pSrc_padding2_5, hmpprt::s32 width_9);
#endif // __HIPCC__
# 97 "laplacian.cpp"
#ifndef __HIPCC__
extern "C" CDLT_API void hmpp_acc_region_main_100(hmpprt::s32 height_14, hmpprt::s32 width_10, hmpprt::s32* pSrc_padding2_3, hmpprt::u08* pBufL_cp_7)
;
#endif // __HIPCC__
# 97 "laplacian.cpp"
#ifndef __HIPCC__
void hmpp_acc_region_main_100_internal_1(hmpprt::s32 height, hmpprt::s32 width_19, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::s32> pSrc_padding2_8, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> pBufL_cp_3)
;
#endif // __HIPCC__
# 97 "laplacian.cpp"
#ifndef __HIPCC__
static hmpprt::CUDAGrid * hmpp_acc_region_main_100_parallel_region_1 = 0;
#else
extern "C" __global__ void hmpp_acc_region_main_100_parallel_region_1(hmpprt::s32 height_7, hmpprt::u08* pBufL_cp, hmpprt::s32* pSrc_padding2, hmpprt::s32 width);
#endif // __HIPCC__
# 97 "laplacian.cpp"
#ifdef __HIPCC__
extern "C" __global__ void hmpp_acc_region_main_100_parallel_region_1(hmpprt::s32 height_7, hmpprt::u08* pBufL_cp, hmpprt::s32* pSrc_padding2, hmpprt::s32 width)
{
# 99 "laplacian.cpp"
{
# 103 "laplacian.cpp"
hmpprt::s32 iter_per_gang_10;
# 103 "laplacian.cpp"
hmpprt::s32 first_gang_iter_10;
# 103 "laplacian.cpp"
hmpprt::s32 last_gang_iter_10;
# 103 "laplacian.cpp"
iter_per_gang_10 = ((1 + (height_7 * width - 1) / 192) > 256 ? (1 + (height_7 * width - 1) / 192) : 256);
# 103 "laplacian.cpp"
first_gang_iter_10 = (hmpprt::gr_gbidx()) * iter_per_gang_10;
# 103 "laplacian.cpp"
last_gang_iter_10 = ((first_gang_iter_10 + iter_per_gang_10 - 1) < (height_7 * width - 1) ? (first_gang_iter_10 + iter_per_gang_10 - 1) : (height_7 * width - 1));
# 103 "laplacian.cpp"
hmpprt::s32 h_9;
# 103 "laplacian.cpp"
# 107 "laplacian.cpp"
for (h_9 = first_gang_iter_10 + (hmpprt::gr_btidy()) ; h_9 <= last_gang_iter_10 ; h_9 = h_9 + (hmpprt::gr_btnumy()))
{
# 103 "laplacian.cpp"
hmpprt::s32 h_4;
# 106 "laplacian.cpp"
hmpprt::s32 w_5;
# 107 "laplacian.cpp"
w_5 = h_9 % width;
# 107 "laplacian.cpp"
h_4 = h_9 / width;
# 107 "laplacian.cpp"
*(pSrc_padding2 + ((h_4 + 2) * (width + 4) + w_5 + 2)) = (hmpprt::s32 ) (*(pBufL_cp + (h_4 * width + w_5)));
}
# 97 "laplacian.cpp"
}
}
#endif // __HIPCC__
# 97 "laplacian.cpp"
#ifndef __HIPCC__
void hmpp_acc_region_main_100_internal_1(hmpprt::s32 height, hmpprt::s32 width_19, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::s32> pSrc_padding2_8, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> pBufL_cp_3)
{
# 97 "laplacian.cpp"
if (1)
{
hmpprt::CUDAGridCall __hmppcg_call;
__hmppcg_call.setSizeX(192);
__hmppcg_call.setSizeY(1);
__hmppcg_call.setBlockSizeX(1);
__hmppcg_call.setBlockSizeY(256);
__hmppcg_call.addLocalParameter((hmpprt::s32) (height), "height_7");
__hmppcg_call.addLocalParameter(&pBufL_cp_3, 8, "pBufL_cp");
__hmppcg_call.addLocalParameter(&pSrc_padding2_8, 8, "pSrc_padding2");
__hmppcg_call.addLocalParameter((hmpprt::s32) (width_19), "width");
__hmppcg_call.launch(hmpp_acc_region_main_100_parallel_region_1, hmpprt::Context::getInstance()->getCUDADevice());
}
;
}
#endif // __HIPCC__
# 97 "laplacian.cpp"
#ifndef __HIPCC__
extern "C" CDLT_API void hmpp_acc_region_main_100(hmpprt::s32 height_14, hmpprt::s32 width_10, hmpprt::s32* pSrc_padding2_3, hmpprt::u08* pBufL_cp_7)
{
# 114 "laplacian.cpp"
(hmpp_acc_region_main_100_internal_1(height_14, width_10, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::s32> (pSrc_padding2_3), hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> (pBufL_cp_7)));
}
#endif // __HIPCC__
# 114 "laplacian.cpp"
#ifdef __HIPCC__
extern "C" __global__ void hmpp_acc_region_main_114_parallel_region_1(hmpprt::s32 height_6, hmpprt::u08* pBufL_cp_5, hmpprt::s32* pSrc_padding2_5, hmpprt::s32 width_9)
{
# 116 "laplacian.cpp"
{
# 117 "laplacian.cpp"
hmpprt::s32 iter_per_gang_9;
# 117 "laplacian.cpp"
hmpprt::s32 first_gang_iter_9;
# 117 "laplacian.cpp"
hmpprt::s32 last_gang_iter_9;
# 117 "laplacian.cpp"
iter_per_gang_9 = ((1 + (width_9 - 1) / 192) > 256 ? (1 + (width_9 - 1) / 192) : 256);
# 117 "laplacian.cpp"
first_gang_iter_9 = (hmpprt::gr_gbidx()) * iter_per_gang_9;
# 117 "laplacian.cpp"
last_gang_iter_9 = ((first_gang_iter_9 + iter_per_gang_9 - 1) < (width_9 - 1) ? (first_gang_iter_9 + iter_per_gang_9 - 1) : (width_9 - 1));
# 117 "laplacian.cpp"
hmpprt::s32 w_6;
# 117 "laplacian.cpp"
# 118 "laplacian.cpp"
for (w_6 = first_gang_iter_9 + (hmpprt::gr_btidy()) ; w_6 <= last_gang_iter_9 ; w_6 = w_6 + (hmpprt::gr_btnumy()))
{
# 119 "laplacian.cpp"
*(pSrc_padding2_5 + (width_9 + 4 + (w_6 + 2))) = 2 * (hmpprt::s32 ) (*(pBufL_cp_5 + (width_9 + (w_6 + 2) - 2))) - (hmpprt::s32 ) (*(pBufL_cp_5 + (3 * width_9 + (w_6 + 2) - 2)));
# 120 "laplacian.cpp"
*(pSrc_padding2_5 + (w_6 + 2)) = 4 * (hmpprt::s32 ) (*(pBufL_cp_5 + (3 * width_9 + (w_6 + 2) - 2))) - 4 * (hmpprt::s32 ) (*(pBufL_cp_5 + (width_9 + (w_6 + 2) - 2))) + 2 * (hmpprt::s32 ) (*(pBufL_cp_5 + w_6)) - (hmpprt::s32 ) (*(pBufL_cp_5 + (2 * width_9 + (w_6 + 2) - 2)));
# 123 "laplacian.cpp"
*(pSrc_padding2_5 + ((height_6 + 2) * (width_9 + 4) + (w_6 + 2))) = 2 * (hmpprt::s32 ) (*(pBufL_cp_5 + ((height_6 - 2) * width_9 + (w_6 + 2) - 2))) - (hmpprt::s32 ) (*(pBufL_cp_5 + ((height_6 - 4) * width_9 + (w_6 + 2) - 2)));
# 124 "laplacian.cpp"
*(pSrc_padding2_5 + ((height_6 + 3) * (width_9 + 4) + (w_6 + 2))) = 4 * (hmpprt::s32 ) (*(pBufL_cp_5 + ((height_6 - 4) * width_9 + (w_6 + 2) - 2))) - 4 * (hmpprt::s32 ) (*(pBufL_cp_5 + ((height_6 - 2) * width_9 + (w_6 + 2) - 2))) + 2 * (hmpprt::s32 ) (*(pBufL_cp_5 + ((height_6 - 1) * width_9 + (w_6 + 2) - 2))) - (hmpprt::s32 ) (*(pBufL_cp_5 + ((height_6 - 3) * width_9 + (w_6 + 2) - 2)));
}
# 114 "laplacian.cpp"
}
}
#endif // __HIPCC__
# 114 "laplacian.cpp"
#ifndef __HIPCC__
void hmpp_acc_region_main_114_internal_1(hmpprt::s32 height_13, hmpprt::s32 width_26, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::s32> pSrc_padding2_2, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> pBufL_cp_6)
{
# 114 "laplacian.cpp"
if (1)
{
hmpprt::CUDAGridCall __hmppcg_call;
__hmppcg_call.setSizeX(192);
__hmppcg_call.setSizeY(1);
__hmppcg_call.setBlockSizeX(1);
__hmppcg_call.setBlockSizeY(256);
__hmppcg_call.addLocalParameter((hmpprt::s32) (height_13), "height_6");
__hmppcg_call.addLocalParameter(&pBufL_cp_6, 8, "pBufL_cp_5");
__hmppcg_call.addLocalParameter(&pSrc_padding2_2, 8, "pSrc_padding2_5");
__hmppcg_call.addLocalParameter((hmpprt::s32) (width_26), "width_9");
__hmppcg_call.launch(hmpp_acc_region_main_114_parallel_region_1, hmpprt::Context::getInstance()->getCUDADevice());
}
;
}
#endif // __HIPCC__
# 114 "laplacian.cpp"
#ifndef __HIPCC__
extern "C" CDLT_API void hmpp_acc_region_main_114(hmpprt::s32 height_17, hmpprt::s32 width_18, hmpprt::s32* pSrc_padding2_7, hmpprt::u08* pBufL_cp_2)
{
# 132 "laplacian.cpp"
(hmpp_acc_region_main_114_internal_1(height_17, width_18, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::s32> (pSrc_padding2_7), hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> (pBufL_cp_2)));
}
#endif // __HIPCC__
# 132 "laplacian.cpp"
#ifdef __HIPCC__
extern "C" __global__ void hmpp_acc_region_main_136_parallel_region_1(hmpprt::u08* FilterDst, hmpprt::s32 height_12, hmpprt::s32* pSrc_padding2_4, hmpprt::s32 width_17)
{
# 134 "laplacian.cpp"
{
# 139 "laplacian.cpp"
hmpprt::s32 iter_per_gang_8;
# 139 "laplacian.cpp"
hmpprt::s32 first_gang_iter_8;
# 139 "laplacian.cpp"
hmpprt::s32 last_gang_iter_8;
# 139 "laplacian.cpp"
iter_per_gang_8 = ((1 + (width_17 * height_12 - 1) / 192) > 256 ? (1 + (width_17 * height_12 - 1) / 192) : 256);
# 139 "laplacian.cpp"
first_gang_iter_8 = (hmpprt::gr_gbidx()) * iter_per_gang_8;
# 139 "laplacian.cpp"
last_gang_iter_8 = ((first_gang_iter_8 + iter_per_gang_8 - 1) < (width_17 * height_12 - 1) ? (first_gang_iter_8 + iter_per_gang_8 - 1) : (width_17 * height_12 - 1));
# 139 "laplacian.cpp"
hmpprt::s32 w_10;
# 139 "laplacian.cpp"
# 142 "laplacian.cpp"
for (w_10 = first_gang_iter_8 + (hmpprt::gr_btidy()) ; w_10 <= last_gang_iter_8 ; w_10 = w_10 + (hmpprt::gr_btnumy()))
{
# 139 "laplacian.cpp"
hmpprt::s32 w_7;
# 141 "laplacian.cpp"
hmpprt::s32 h_5;
# 143 "laplacian.cpp"
h_5 = w_10 % height_12;
# 143 "laplacian.cpp"
w_7 = w_10 / height_12;
# 143 "laplacian.cpp"
*(FilterDst + (h_5 * width_17 + w_7)) = (hmpprt::u08 ) (*(pSrc_padding2_4 + (h_5 * (width_17 + 4) + (w_7 + 2))) + (*(pSrc_padding2_4 + ((h_5 + 1) * (width_17 + 4) + (w_7 + 2))) << 2) + 6 * *(pSrc_padding2_4 + ((h_5 + 2) * (width_17 + 4) + (w_7 + 2))) + (*(pSrc_padding2_4 + ((h_5 + 3) * (width_17 + 4) + (w_7 + 2))) << 2) + *(pSrc_padding2_4 + ((h_5 + 4) * (width_17 + 4) + (w_7 + 2))) + 8 >> 4);
}
# 132 "laplacian.cpp"
}
}
#endif // __HIPCC__
# 132 "laplacian.cpp"
#ifndef __HIPCC__
void hmpp_acc_region_main_136_internal_1(hmpprt::s32 height_18, hmpprt::s32 width_28, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> FilterDst_4, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::s32> pSrc_padding2_1)
{
# 132 "laplacian.cpp"
if (1)
{
hmpprt::CUDAGridCall __hmppcg_call;
__hmppcg_call.setSizeX(192);
__hmppcg_call.setSizeY(1);
__hmppcg_call.setBlockSizeX(1);
__hmppcg_call.setBlockSizeY(256);
__hmppcg_call.addLocalParameter(&FilterDst_4, 8, "FilterDst");
__hmppcg_call.addLocalParameter((hmpprt::s32) (height_18), "height_12");
__hmppcg_call.addLocalParameter(&pSrc_padding2_1, 8, "pSrc_padding2_4");
__hmppcg_call.addLocalParameter((hmpprt::s32) (width_28), "width_17");
__hmppcg_call.launch(hmpp_acc_region_main_136_parallel_region_1, hmpprt::Context::getInstance()->getCUDADevice());
}
;
}
#endif // __HIPCC__
# 132 "laplacian.cpp"
#ifndef __HIPCC__
extern "C" CDLT_API void hmpp_acc_region_main_136(hmpprt::s32 height_5, hmpprt::s32 width_8, hmpprt::u08* FilterDst_2, hmpprt::s32* pSrc_padding2_6)
{
# 154 "laplacian.cpp"
(hmpp_acc_region_main_136_internal_1(height_5, width_8, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> (FilterDst_2), hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::s32> (pSrc_padding2_6)));
}
#endif // __HIPCC__
# 154 "laplacian.cpp"
#ifdef __HIPCC__
extern "C" __global__ void hmpp_acc_region_main_159_parallel_region_1(hmpprt::u08* DownsampleDst, hmpprt::u08* FilterDst_1, hmpprt::s32 halfHeight, hmpprt::s32 halfWidth, hmpprt::s32 width_7)
{
# 156 "laplacian.cpp"
{
# 162 "laplacian.cpp"
hmpprt::s32 iter_per_gang_7;
# 162 "laplacian.cpp"
hmpprt::s32 first_gang_iter_7;
# 162 "laplacian.cpp"
hmpprt::s32 last_gang_iter_7;
# 162 "laplacian.cpp"
iter_per_gang_7 = ((1 + (halfHeight * halfWidth - 1) / 192) > 256 ? (1 + (halfHeight * halfWidth - 1) / 192) : 256);
# 162 "laplacian.cpp"
first_gang_iter_7 = (hmpprt::gr_gbidx()) * iter_per_gang_7;
# 162 "laplacian.cpp"
last_gang_iter_7 = ((first_gang_iter_7 + iter_per_gang_7 - 1) < (halfHeight * halfWidth - 1) ? (first_gang_iter_7 + iter_per_gang_7 - 1) : (halfHeight * halfWidth - 1));
# 162 "laplacian.cpp"
hmpprt::s32 y_7;
# 162 "laplacian.cpp"
# 166 "laplacian.cpp"
for (y_7 = first_gang_iter_7 + (hmpprt::gr_btidy()) ; y_7 <= last_gang_iter_7 ; y_7 = y_7 + (hmpprt::gr_btnumy()))
{
# 162 "laplacian.cpp"
hmpprt::s32 y_3;
# 165 "laplacian.cpp"
hmpprt::s32 x_5;
# 167 "laplacian.cpp"
x_5 = y_7 % halfWidth;
# 167 "laplacian.cpp"
y_3 = y_7 / halfWidth;
# 167 "laplacian.cpp"
*(DownsampleDst + (y_3 * halfWidth + x_5)) = *(FilterDst_1 + ((y_3 << 1) * width_7 + (x_5 << 1)));
}
# 154 "laplacian.cpp"
}
}
#endif // __HIPCC__
# 154 "laplacian.cpp"
#ifndef __HIPCC__
void hmpp_acc_region_main_159_internal_1(hmpprt::s32 width_25, hmpprt::s32 halfWidth_8, hmpprt::s32 halfHeight_7, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> FilterDst_5, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> DownsampleDst_8)
{
# 154 "laplacian.cpp"
if (1)
{
hmpprt::CUDAGridCall __hmppcg_call;
__hmppcg_call.setSizeX(192);
__hmppcg_call.setSizeY(1);
__hmppcg_call.setBlockSizeX(1);
__hmppcg_call.setBlockSizeY(256);
__hmppcg_call.addLocalParameter(&DownsampleDst_8, 8, "DownsampleDst");
__hmppcg_call.addLocalParameter(&FilterDst_5, 8, "FilterDst_1");
__hmppcg_call.addLocalParameter((hmpprt::s32) (halfHeight_7), "halfHeight");
__hmppcg_call.addLocalParameter((hmpprt::s32) (halfWidth_8), "halfWidth");
__hmppcg_call.addLocalParameter((hmpprt::s32) (width_25), "width_7");
__hmppcg_call.launch(hmpp_acc_region_main_159_parallel_region_1, hmpprt::Context::getInstance()->getCUDADevice());
}
;
}
#endif // __HIPCC__
# 154 "laplacian.cpp"
#ifndef __HIPCC__
extern "C" CDLT_API void hmpp_acc_region_main_159(hmpprt::s32 width_16, hmpprt::s32 halfWidth_4, hmpprt::s32 halfHeight_3, hmpprt::u08* FilterDst_3, hmpprt::u08* DownsampleDst_4)
{
# 175 "laplacian.cpp"
(hmpp_acc_region_main_159_internal_1(width_16, halfWidth_4, halfHeight_3, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> (FilterDst_3), hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> (DownsampleDst_4)));
}
#endif // __HIPCC__
# 175 "laplacian.cpp"
#ifdef __HIPCC__
extern "C" __global__ void hmpp_acc_region_main_188_parallel_region_1(hmpprt::u08* DownsampleDst_3, hmpprt::u08* UpsampleDst, hmpprt::s32 halfHeight_5, hmpprt::s32 halfWidth_3, hmpprt::s32 height_4, hmpprt::s32 width_6)
{
# 177 "laplacian.cpp"
{
# 191 "laplacian.cpp"
hmpprt::s32 iter_per_gang_6;
# 191 "laplacian.cpp"
hmpprt::s32 first_gang_iter_6;
# 191 "laplacian.cpp"
hmpprt::s32 last_gang_iter_6;
# 191 "laplacian.cpp"
iter_per_gang_6 = ((1 + (halfWidth_3 - 2) / 192) > 256 ? (1 + (halfWidth_3 - 2) / 192) : 256);
# 191 "laplacian.cpp"
first_gang_iter_6 = (hmpprt::gr_gbidx()) * iter_per_gang_6;
# 191 "laplacian.cpp"
last_gang_iter_6 = ((first_gang_iter_6 + iter_per_gang_6 - 1) < (halfWidth_3 - 2) ? (first_gang_iter_6 + iter_per_gang_6 - 1) : (halfWidth_3 - 2));
# 191 "laplacian.cpp"
hmpprt::s32 x_6;
# 191 "laplacian.cpp"
# 192 "laplacian.cpp"
for (x_6 = first_gang_iter_6 + (hmpprt::gr_btidy()) ; x_6 <= last_gang_iter_6 ; x_6 = x_6 + (hmpprt::gr_btnumy()))
{
# 193 "laplacian.cpp"
*(UpsampleDst + ((height_4 - 1) * width_6 + 2 * (x_6 + 1) - 1)) = (hmpprt::u08 ) ((hmpprt::s32 ) (*(DownsampleDst_3 + ((halfHeight_5 - 1) * halfWidth_3 + (x_6 + 1) - 1))) + (hmpprt::s32 ) (*(DownsampleDst_3 + ((halfHeight_5 - 1) * halfWidth_3 + (x_6 + 1)))) + 1 >> 1);
# 194 "laplacian.cpp"
*(UpsampleDst + ((height_4 - 1) * width_6 + 2 * (x_6 + 1))) = *(DownsampleDst_3 + ((halfHeight_5 - 1) * halfWidth_3 + (x_6 + 1)));
}
# 175 "laplacian.cpp"
}
}
#endif // __HIPCC__
# 175 "laplacian.cpp"
#ifndef __HIPCC__
void hmpp_acc_region_main_188_internal_1(hmpprt::s32 height_16, hmpprt::s32 width_24, hmpprt::s32 halfWidth_11, hmpprt::s32 halfHeight_6, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> DownsampleDst_7, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> UpsampleDst_8)
{
# 175 "laplacian.cpp"
if (1)
{
hmpprt::CUDAGridCall __hmppcg_call;
__hmppcg_call.setSizeX(192);
__hmppcg_call.setSizeY(1);
__hmppcg_call.setBlockSizeX(1);
__hmppcg_call.setBlockSizeY(256);
__hmppcg_call.addLocalParameter(&DownsampleDst_7, 8, "DownsampleDst_3");
__hmppcg_call.addLocalParameter(&UpsampleDst_8, 8, "UpsampleDst");
__hmppcg_call.addLocalParameter((hmpprt::s32) (halfHeight_6), "halfHeight_5");
__hmppcg_call.addLocalParameter((hmpprt::s32) (halfWidth_11), "halfWidth_3");
__hmppcg_call.addLocalParameter((hmpprt::s32) (height_16), "height_4");
__hmppcg_call.addLocalParameter((hmpprt::s32) (width_24), "width_6");
__hmppcg_call.launch(hmpp_acc_region_main_188_parallel_region_1, hmpprt::Context::getInstance()->getCUDADevice());
}
;
}
#endif // __HIPCC__
# 175 "laplacian.cpp"
#ifndef __HIPCC__
extern "C" CDLT_API void hmpp_acc_region_main_188(hmpprt::s32 height_11, hmpprt::s32 width_15, hmpprt::s32 halfWidth_7, hmpprt::s32 halfHeight_2, hmpprt::u08* DownsampleDst_11, hmpprt::u08* UpsampleDst_4)
{
# 201 "laplacian.cpp"
(hmpp_acc_region_main_188_internal_1(height_11, width_15, halfWidth_7, halfHeight_2, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> (DownsampleDst_11), hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> (UpsampleDst_4)));
}
#endif // __HIPCC__
# 201 "laplacian.cpp"
#ifdef __HIPCC__
extern "C" __global__ void hmpp_acc_region_main_202_parallel_region_1(hmpprt::u08* DownsampleDst_2, hmpprt::u08* UpsampleDst_3, hmpprt::s32 halfHeight_1, hmpprt::s32 halfWidth_2, hmpprt::s32 width_5)
{
# 203 "laplacian.cpp"
{
# 205 "laplacian.cpp"
hmpprt::s32 iter_per_gang_5;
# 205 "laplacian.cpp"
hmpprt::s32 first_gang_iter_5;
# 205 "laplacian.cpp"
hmpprt::s32 last_gang_iter_5;
# 205 "laplacian.cpp"
iter_per_gang_5 = 1 + - (1 - (halfHeight_1 - 1)) / 192;
# 205 "laplacian.cpp"
first_gang_iter_5 = (hmpprt::gr_gbidx()) * iter_per_gang_5;
# 205 "laplacian.cpp"
last_gang_iter_5 = ((first_gang_iter_5 + iter_per_gang_5 - 1) < ( - (1 - (halfHeight_1 - 1))) ? (first_gang_iter_5 + iter_per_gang_5 - 1) : ( - (1 - (halfHeight_1 - 1))));
# 205 "laplacian.cpp"
hmpprt::s32 y_4;
# 205 "laplacian.cpp"
# 206 "laplacian.cpp"
for (y_4 = first_gang_iter_5 ; y_4 <= last_gang_iter_5 ; y_4 = y_4 + 1)
{
# 209 "laplacian.cpp"
hmpprt::u08 tmp_18;
# 209 "laplacian.cpp"
hmpprt::u08 tmp_19;
# 209 "laplacian.cpp"
hmpprt::u08 tmp_20;
# 209 "laplacian.cpp"
hmpprt::u08 tmp_21;
# 209 "laplacian.cpp"
hmpprt::s32 end_6;
# 209 "laplacian.cpp"
hmpprt::s32 x_7;
# 209 "laplacian.cpp"
# 210 "laplacian.cpp"
# 210 "laplacian.cpp"
for (x_7 = (hmpprt::gr_btidy()), end_6 = halfWidth_2 - 2 ; x_7 <= end_6 ; x_7 = x_7 + (hmpprt::gr_btnumy()))
{
# 211 "laplacian.cpp"
*(UpsampleDst_3 + ((2 * (halfHeight_1 - 1 - y_4) - 1) * width_5 + 2 * (x_7 + 1) - 1)) = (hmpprt::u08 ) ((hmpprt::s32 ) (*(DownsampleDst_2 + ((halfHeight_1 - 1 - y_4) * halfWidth_2 + (x_7 + 1) - 1))) + (hmpprt::s32 ) (*(DownsampleDst_2 + ((halfHeight_1 - 1 - y_4) * halfWidth_2 + (x_7 + 1)))) + (hmpprt::s32 ) (*(DownsampleDst_2 + ((halfHeight_1 - 1 - y_4 - 1) * halfWidth_2 + (x_7 + 1) - 1))) + (hmpprt::s32 ) (*(DownsampleDst_2 + ((halfHeight_1 - 1 - y_4 - 1) * halfWidth_2 + (x_7 + 1)))) + 2 >> 2);
# 212 "laplacian.cpp"
*(UpsampleDst_3 + ((2 * (halfHeight_1 - 1 - y_4) - 1) * width_5 + 2 * (x_7 + 1))) = (hmpprt::u08 ) ((hmpprt::s32 ) (*(DownsampleDst_2 + ((halfHeight_1 - 1 - y_4) * halfWidth_2 + (x_7 + 1)))) + (hmpprt::s32 ) (*(DownsampleDst_2 + ((halfHeight_1 - 1 - y_4 - 1) * halfWidth_2 + (x_7 + 1)))) + 1 >> 1);
# 214 "laplacian.cpp"
*(UpsampleDst_3 + (2 * (halfHeight_1 - 1 - y_4) * width_5 + 2 * (x_7 + 1) - 1)) = (hmpprt::u08 ) ((hmpprt::s32 ) (*(DownsampleDst_2 + ((halfHeight_1 - 1 - y_4) * halfWidth_2 + (x_7 + 1) - 1))) + (hmpprt::s32 ) (*(DownsampleDst_2 + ((halfHeight_1 - 1 - y_4) * halfWidth_2 + (x_7 + 1)))) + 1 >> 1);
# 215 "laplacian.cpp"
*(UpsampleDst_3 + (2 * (halfHeight_1 - 1 - y_4) * width_5 + 2 * (x_7 + 1))) = *(DownsampleDst_2 + ((halfHeight_1 - 1 - y_4) * halfWidth_2 + (x_7 + 1)));
}
# 201 "laplacian.cpp"
# 201 "laplacian.cpp"
tmp_18 = (hmpprt::u08 ) ((hmpprt::s32 ) (*(DownsampleDst_2 + ((halfHeight_1 - 1 - y_4) * halfWidth_2 + halfWidth_2 - 1))) + (hmpprt::s32 ) (*(DownsampleDst_2 + ((halfHeight_1 - 1 - y_4 - 1) * halfWidth_2 + halfWidth_2 - 1))) + 1 >> 1);
# 201 "laplacian.cpp"
if ((hmpprt::gr_btidy()) == 0)
{
# 201 "laplacian.cpp"
*(UpsampleDst_3 + ((2 * (halfHeight_1 - 1 - y_4) - 1) * width_5 + width_5 - 1)) = tmp_18;
}
# 201 "laplacian.cpp"
(hmpprt::gr_barrier());
# 201 "laplacian.cpp"
tmp_19 = *(DownsampleDst_2 + ((halfHeight_1 - 1 - y_4) * halfWidth_2 + halfWidth_2 - 1));
# 201 "laplacian.cpp"
if ((hmpprt::gr_btidy()) == 0)
{
# 201 "laplacian.cpp"
*(UpsampleDst_3 + (2 * (halfHeight_1 - 1 - y_4) * width_5 + width_5 - 1)) = tmp_19;
}
# 201 "laplacian.cpp"
(hmpprt::gr_barrier());
# 201 "laplacian.cpp"
tmp_20 = (hmpprt::u08 ) ((hmpprt::s32 ) (*(DownsampleDst_2 + (halfHeight_1 - 1 - y_4) * halfWidth_2)) + (hmpprt::s32 ) (*(DownsampleDst_2 + (halfHeight_1 - 1 - y_4 - 1) * halfWidth_2)) + 1 >> 1);
# 201 "laplacian.cpp"
if ((hmpprt::gr_btidy()) == 0)
{
# 201 "laplacian.cpp"
*(UpsampleDst_3 + (2 * (halfHeight_1 - 1 - y_4) - 1) * width_5) = tmp_20;
}
# 201 "laplacian.cpp"
(hmpprt::gr_barrier());
# 201 "laplacian.cpp"
tmp_21 = *(DownsampleDst_2 + (halfHeight_1 - 1 - y_4) * halfWidth_2);
# 201 "laplacian.cpp"
if ((hmpprt::gr_btidy()) == 0)
{
# 201 "laplacian.cpp"
*(UpsampleDst_3 + 2 * (halfHeight_1 - 1 - y_4) * width_5) = tmp_21;
}
# 201 "laplacian.cpp"
(hmpprt::gr_barrier());
}
# 201 "laplacian.cpp"
}
}
#endif // __HIPCC__
# 201 "laplacian.cpp"
#ifndef __HIPCC__
void hmpp_acc_region_main_202_internal_1(hmpprt::s32 width_23, hmpprt::s32 halfWidth_6, hmpprt::s32 halfHeight_4, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> DownsampleDst_6, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> UpsampleDst_7)
{
# 201 "laplacian.cpp"
if (1)
{
hmpprt::CUDAGridCall __hmppcg_call;
__hmppcg_call.setSizeX(192);
__hmppcg_call.setSizeY(1);
__hmppcg_call.setBlockSizeX(1);
__hmppcg_call.setBlockSizeY(256);
__hmppcg_call.addLocalParameter(&DownsampleDst_6, 8, "DownsampleDst_2");
__hmppcg_call.addLocalParameter(&UpsampleDst_7, 8, "UpsampleDst_3");
__hmppcg_call.addLocalParameter((hmpprt::s32) (halfHeight_4), "halfHeight_1");
__hmppcg_call.addLocalParameter((hmpprt::s32) (halfWidth_6), "halfWidth_2");
__hmppcg_call.addLocalParameter((hmpprt::s32) (width_23), "width_5");
__hmppcg_call.launch(hmpp_acc_region_main_202_parallel_region_1, hmpprt::Context::getInstance()->getCUDADevice());
}
;
}
#endif // __HIPCC__
# 201 "laplacian.cpp"
#ifndef __HIPCC__
extern "C" CDLT_API void hmpp_acc_region_main_202(hmpprt::s32 width_14, hmpprt::s32 halfWidth_10, hmpprt::s32 halfHeight_8, hmpprt::u08* DownsampleDst_10, hmpprt::u08* UpsampleDst_11)
{
# 229 "laplacian.cpp"
(hmpp_acc_region_main_202_internal_1(width_14, halfWidth_10, halfHeight_8, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> (DownsampleDst_10), hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> (UpsampleDst_11)));
}
#endif // __HIPCC__
# 229 "laplacian.cpp"
#ifdef __HIPCC__
extern "C" __global__ void hmpp_acc_region_main_230_parallel_region_1(hmpprt::u08* DownsampleDst_1, hmpprt::u08* UpsampleDst_2, hmpprt::s32 halfWidth_1)
{
# 231 "laplacian.cpp"
{
# 233 "laplacian.cpp"
hmpprt::s32 iter_per_gang_4;
# 233 "laplacian.cpp"
hmpprt::s32 first_gang_iter_4;
# 233 "laplacian.cpp"
hmpprt::s32 last_gang_iter_4;
# 233 "laplacian.cpp"
iter_per_gang_4 = ((1 + - (1 - (halfWidth_1 - 1)) / 192) > 256 ? (1 + - (1 - (halfWidth_1 - 1)) / 192) : 256);
# 233 "laplacian.cpp"
first_gang_iter_4 = (hmpprt::gr_gbidx()) * iter_per_gang_4;
# 233 "laplacian.cpp"
last_gang_iter_4 = ((first_gang_iter_4 + iter_per_gang_4 - 1) < ( - (1 - (halfWidth_1 - 1))) ? (first_gang_iter_4 + iter_per_gang_4 - 1) : ( - (1 - (halfWidth_1 - 1))));
# 233 "laplacian.cpp"
hmpprt::s32 x_8;
# 233 "laplacian.cpp"
# 234 "laplacian.cpp"
for (x_8 = first_gang_iter_4 + (hmpprt::gr_btidy()) ; x_8 <= last_gang_iter_4 ; x_8 = x_8 + (hmpprt::gr_btnumy()))
{
# 235 "laplacian.cpp"
*(UpsampleDst_2 + 2 * (halfWidth_1 - 1 - x_8)) = *(DownsampleDst_1 + (halfWidth_1 - 1 - x_8));
# 236 "laplacian.cpp"
*(UpsampleDst_2 + (2 * (halfWidth_1 - 1 - x_8) - 1)) = (hmpprt::u08 ) (((hmpprt::s32 ) (*(DownsampleDst_1 + (halfWidth_1 - 1 - x_8 - 1))) + (hmpprt::s32 ) (*(DownsampleDst_1 + (halfWidth_1 - 1 - x_8))) + 1) / 2);
}
# 229 "laplacian.cpp"
}
}
#endif // __HIPCC__
# 229 "laplacian.cpp"
#ifndef __HIPCC__
void hmpp_acc_region_main_230_internal_1(hmpprt::s32 width_27, hmpprt::s32 halfWidth_9, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> DownsampleDst_9, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> UpsampleDst_10)
{
# 229 "laplacian.cpp"
if (1)
{
hmpprt::CUDAGridCall __hmppcg_call;
__hmppcg_call.setSizeX(192);
__hmppcg_call.setSizeY(1);
__hmppcg_call.setBlockSizeX(1);
__hmppcg_call.setBlockSizeY(256);
__hmppcg_call.addLocalParameter(&DownsampleDst_9, 8, "DownsampleDst_1");
__hmppcg_call.addLocalParameter(&UpsampleDst_10, 8, "UpsampleDst_2");
__hmppcg_call.addLocalParameter((hmpprt::s32) (halfWidth_9), "halfWidth_1");
__hmppcg_call.launch(hmpp_acc_region_main_230_parallel_region_1, hmpprt::Context::getInstance()->getCUDADevice());
}
;
}
#endif // __HIPCC__
# 229 "laplacian.cpp"
#ifndef __HIPCC__
extern "C" CDLT_API void hmpp_acc_region_main_230(hmpprt::s32 width_4, hmpprt::s32 halfWidth_5, hmpprt::u08* DownsampleDst_5, hmpprt::u08* UpsampleDst_6)
{
# 243 "laplacian.cpp"
(hmpp_acc_region_main_230_internal_1(width_4, halfWidth_5, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> (DownsampleDst_5), hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> (UpsampleDst_6)));
}
#endif // __HIPCC__
# 243 "laplacian.cpp"
#ifdef __HIPCC__
extern "C" __global__ void hmpp_acc_region_main_247_parallel_region_1(hmpprt::s16* LaplacianLayer, hmpprt::u08* Source, hmpprt::u08* UpsampleDst_1, hmpprt::s32 height_3, hmpprt::s32 width_3)
{
# 245 "laplacian.cpp"
{
# 250 "laplacian.cpp"
hmpprt::s32 iter_per_gang_3;
# 250 "laplacian.cpp"
hmpprt::s32 first_gang_iter_3;
# 250 "laplacian.cpp"
hmpprt::s32 last_gang_iter_3;
# 250 "laplacian.cpp"
iter_per_gang_3 = ((1 + (height_3 * width_3 - 1) / 192) > 256 ? (1 + (height_3 * width_3 - 1) / 192) : 256);
# 250 "laplacian.cpp"
first_gang_iter_3 = (hmpprt::gr_gbidx()) * iter_per_gang_3;
# 250 "laplacian.cpp"
last_gang_iter_3 = ((first_gang_iter_3 + iter_per_gang_3 - 1) < (height_3 * width_3 - 1) ? (first_gang_iter_3 + iter_per_gang_3 - 1) : (height_3 * width_3 - 1));
# 250 "laplacian.cpp"
hmpprt::s32 y_6;
# 250 "laplacian.cpp"
# 254 "laplacian.cpp"
for (y_6 = first_gang_iter_3 + (hmpprt::gr_btidy()) ; y_6 <= last_gang_iter_3 ; y_6 = y_6 + (hmpprt::gr_btnumy()))
{
# 250 "laplacian.cpp"
hmpprt::s32 y_5;
# 253 "laplacian.cpp"
hmpprt::s32 x_9;
# 255 "laplacian.cpp"
x_9 = y_6 % width_3;
# 255 "laplacian.cpp"
y_5 = y_6 / width_3;
# 255 "laplacian.cpp"
*(LaplacianLayer + (y_5 * width_3 + x_9)) = (hmpprt::s16 ) ((hmpprt::s32 ) ((hmpprt::s16 ) (*(Source + (y_5 * width_3 + x_9)))) - (hmpprt::s32 ) ((hmpprt::s16 ) (*(UpsampleDst_1 + (y_5 * width_3 + x_9)))));
}
# 243 "laplacian.cpp"
}
}
#endif // __HIPCC__
# 243 "laplacian.cpp"
#ifndef __HIPCC__
void hmpp_acc_region_main_247_internal_1(hmpprt::s32 height_15, hmpprt::s32 width_22, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> Source_4, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::s16> LaplacianLayer_2, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> UpsampleDst_9)
{
# 243 "laplacian.cpp"
if (1)
{
hmpprt::CUDAGridCall __hmppcg_call;
__hmppcg_call.setSizeX(192);
__hmppcg_call.setSizeY(1);
__hmppcg_call.setBlockSizeX(1);
__hmppcg_call.setBlockSizeY(256);
__hmppcg_call.addLocalParameter(&LaplacianLayer_2, 8, "LaplacianLayer");
__hmppcg_call.addLocalParameter(&Source_4, 8, "Source");
__hmppcg_call.addLocalParameter(&UpsampleDst_9, 8, "UpsampleDst_1");
__hmppcg_call.addLocalParameter((hmpprt::s32) (height_15), "height_3");
__hmppcg_call.addLocalParameter((hmpprt::s32) (width_22), "width_3");
__hmppcg_call.launch(hmpp_acc_region_main_247_parallel_region_1, hmpprt::Context::getInstance()->getCUDADevice());
}
;
}
#endif // __HIPCC__
# 243 "laplacian.cpp"
#ifndef __HIPCC__
extern "C" CDLT_API void hmpp_acc_region_main_247(hmpprt::s32 height_10, hmpprt::s32 width_13, hmpprt::u08* Source_2, hmpprt::s16* LaplacianLayer_1, hmpprt::u08* UpsampleDst_5)
{
# 12 "laplacian.cpp"
(hmpp_acc_region_main_247_internal_1(height_10, width_13, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> (Source_2), hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::s16> (LaplacianLayer_1), hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> (UpsampleDst_5)));
}
#endif // __HIPCC__
# 12 "laplacian.cpp"
#ifdef __HIPCC__
extern "C" __global__ void hmpp_acc_region_main_55_parallel_region_1(hmpprt::u08* Source_1, hmpprt::s32 height_2, hmpprt::s32* pSrc_padding, hmpprt::s32 width_2)
{
# 14 "laplacian.cpp"
{
# 58 "laplacian.cpp"
hmpprt::s32 iter_per_gang_2;
# 58 "laplacian.cpp"
hmpprt::s32 first_gang_iter_2;
# 58 "laplacian.cpp"
hmpprt::s32 last_gang_iter_2;
# 58 "laplacian.cpp"
iter_per_gang_2 = 1 + (height_2 - 1) / 192;
# 58 "laplacian.cpp"
first_gang_iter_2 = (hmpprt::gr_gbidx()) * iter_per_gang_2;
# 58 "laplacian.cpp"
last_gang_iter_2 = ((first_gang_iter_2 + iter_per_gang_2 - 1) < (height_2 - 1) ? (first_gang_iter_2 + iter_per_gang_2 - 1) : (height_2 - 1));
# 58 "laplacian.cpp"
hmpprt::s32 h_6;
# 58 "laplacian.cpp"
# 59 "laplacian.cpp"
for (h_6 = first_gang_iter_2 ; h_6 <= last_gang_iter_2 ; h_6 = h_6 + 1)
{
# 67 "laplacian.cpp"
hmpprt::s32 tmp_22;
# 67 "laplacian.cpp"
hmpprt::s32 tmp_23;
# 67 "laplacian.cpp"
hmpprt::s32 tmp_24;
# 67 "laplacian.cpp"
hmpprt::s32 tmp_25;
# 67 "laplacian.cpp"
tmp_22 = 2 * (hmpprt::s32 ) (*(Source_1 + (h_6 * width_2 + 1))) - (hmpprt::s32 ) (*(Source_1 + (h_6 * width_2 + 3)));
# 67 "laplacian.cpp"
if ((hmpprt::gr_btidy()) == 0)
{
# 67 "laplacian.cpp"
*(pSrc_padding + ((h_6 + 2) * (width_2 + 4) + 1)) = tmp_22;
}
# 67 "laplacian.cpp"
(hmpprt::gr_barrier());
# 67 "laplacian.cpp"
tmp_23 = 4 * (hmpprt::s32 ) (*(Source_1 + (h_6 * width_2 + 3))) - 4 * (hmpprt::s32 ) (*(Source_1 + (h_6 * width_2 + 1))) + 2 * (hmpprt::s32 ) (*(Source_1 + h_6 * width_2)) - (hmpprt::s32 ) (*(Source_1 + (h_6 * width_2 + 2)));
# 67 "laplacian.cpp"
if ((hmpprt::gr_btidy()) == 0)
{
# 67 "laplacian.cpp"
*(pSrc_padding + (h_6 + 2) * (width_2 + 4)) = tmp_23;
}
# 67 "laplacian.cpp"
(hmpprt::gr_barrier());
# 67 "laplacian.cpp"
tmp_24 = 2 * (hmpprt::s32 ) (*(Source_1 + (h_6 * width_2 + width_2 - 2))) - (hmpprt::s32 ) (*(Source_1 + (h_6 * width_2 + width_2 - 4)));
# 67 "laplacian.cpp"
if ((hmpprt::gr_btidy()) == 0)
{
# 67 "laplacian.cpp"
*(pSrc_padding + ((h_6 + 2) * (width_2 + 4) + width_2 + 2)) = tmp_24;
}
# 67 "laplacian.cpp"
(hmpprt::gr_barrier());
# 67 "laplacian.cpp"
tmp_25 = 4 * (hmpprt::s32 ) (*(Source_1 + (h_6 * width_2 + width_2 - 4))) - 4 * (hmpprt::s32 ) (*(Source_1 + (h_6 * width_2 + width_2 - 2))) + 2 * (hmpprt::s32 ) (*(Source_1 + (h_6 * width_2 + width_2 - 1))) - (hmpprt::s32 ) (*(Source_1 + (h_6 * width_2 + width_2 - 3)));
# 67 "laplacian.cpp"
if ((hmpprt::gr_btidy()) == 0)
{
# 67 "laplacian.cpp"
*(pSrc_padding + ((h_6 + 2) * (width_2 + 4) + width_2 + 3)) = tmp_25;
}
# 67 "laplacian.cpp"
(hmpprt::gr_barrier());
# 67 "laplacian.cpp"
hmpprt::s32 end_10;
# 67 "laplacian.cpp"
hmpprt::s32 w_8;
# 67 "laplacian.cpp"
# 68 "laplacian.cpp"
# 68 "laplacian.cpp"
for (w_8 = (hmpprt::gr_btidy()), end_10 = width_2 - 1 ; w_8 <= end_10 ; w_8 = w_8 + (hmpprt::gr_btnumy()))
{
# 68 "laplacian.cpp"
*(pSrc_padding + ((h_6 + 2) * (width_2 + 4) + w_8 + 2)) = (hmpprt::s32 ) (*(Source_1 + (h_6 * width_2 + w_8)));
}
# 12 "laplacian.cpp"
}
# 12 "laplacian.cpp"
}
}
#endif // __HIPCC__
# 12 "laplacian.cpp"
#ifndef __HIPCC__
void hmpp_acc_region_main_55_internal_1(hmpprt::s32 height_20, hmpprt::s32 width_21, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> Source_5, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::s32> pSrc_padding_4)
{
# 12 "laplacian.cpp"
if (1)
{
hmpprt::CUDAGridCall __hmppcg_call;
__hmppcg_call.setSizeX(192);
__hmppcg_call.setSizeY(1);
__hmppcg_call.setBlockSizeX(1);
__hmppcg_call.setBlockSizeY(256);
__hmppcg_call.addLocalParameter(&Source_5, 8, "Source_1");
__hmppcg_call.addLocalParameter((hmpprt::s32) (height_20), "height_2");
__hmppcg_call.addLocalParameter(&pSrc_padding_4, 8, "pSrc_padding");
__hmppcg_call.addLocalParameter((hmpprt::s32) (width_21), "width_2");
__hmppcg_call.launch(hmpp_acc_region_main_55_parallel_region_1, hmpprt::Context::getInstance()->getCUDADevice());
}
;
}
#endif // __HIPCC__
# 12 "laplacian.cpp"
#ifndef __HIPCC__
extern "C" CDLT_API void hmpp_acc_region_main_55(hmpprt::s32 height_9, hmpprt::s32 width_12, hmpprt::u08* Source_3, hmpprt::s32* pSrc_padding_2)
{
# 75 "laplacian.cpp"
(hmpp_acc_region_main_55_internal_1(height_9, width_12, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> (Source_3), hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::s32> (pSrc_padding_2)));
}
#endif // __HIPCC__
# 75 "laplacian.cpp"
#ifdef __HIPCC__
extern "C" __global__ void hmpp_acc_region_main_79_parallel_region_1(hmpprt::s32 height_1, hmpprt::u08* pBufL_cp_1, hmpprt::s32* pSrc_padding_1, hmpprt::s32 width_1)
{
# 77 "laplacian.cpp"
{
# 82 "laplacian.cpp"
hmpprt::s32 iter_per_gang_1;
# 82 "laplacian.cpp"
hmpprt::s32 first_gang_iter_1;
# 82 "laplacian.cpp"
hmpprt::s32 last_gang_iter_1;
# 82 "laplacian.cpp"
iter_per_gang_1 = ((1 + (height_1 * width_1 - 1) / 192) > 256 ? (1 + (height_1 * width_1 - 1) / 192) : 256);
# 82 "laplacian.cpp"
first_gang_iter_1 = (hmpprt::gr_gbidx()) * iter_per_gang_1;
# 82 "laplacian.cpp"
last_gang_iter_1 = ((first_gang_iter_1 + iter_per_gang_1 - 1) < (height_1 * width_1 - 1) ? (first_gang_iter_1 + iter_per_gang_1 - 1) : (height_1 * width_1 - 1));
# 82 "laplacian.cpp"
hmpprt::s32 h_8;
# 82 "laplacian.cpp"
# 85 "laplacian.cpp"
for (h_8 = first_gang_iter_1 + (hmpprt::gr_btidy()) ; h_8 <= last_gang_iter_1 ; h_8 = h_8 + (hmpprt::gr_btnumy()))
{
# 82 "laplacian.cpp"
hmpprt::s32 h_7;
# 84 "laplacian.cpp"
hmpprt::s32 w_9;
# 86 "laplacian.cpp"
w_9 = h_8 % width_1;
# 86 "laplacian.cpp"
h_7 = h_8 / width_1;
# 86 "laplacian.cpp"
*(pBufL_cp_1 + (h_7 * width_1 + w_9)) = (hmpprt::u08 ) (*(pSrc_padding_1 + ((h_7 + 2) * (width_1 + 4) + w_9)) + (*(pSrc_padding_1 + ((h_7 + 2) * (width_1 + 4) + w_9 + 1)) << 2) + 6 * *(pSrc_padding_1 + ((h_7 + 2) * (width_1 + 4) + (w_9 + 2))) + (*(pSrc_padding_1 + ((h_7 + 2) * (width_1 + 4) + (w_9 + 3))) << 2) + *(pSrc_padding_1 + ((h_7 + 2) * (width_1 + 4) + (w_9 + 4))) + 8 >> 4);
}
# 75 "laplacian.cpp"
}
}
#endif // __HIPCC__
# 75 "laplacian.cpp"
#ifndef __HIPCC__
void hmpp_acc_region_main_79_internal_1(hmpprt::s32 height_19, hmpprt::s32 width_20, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::s32> pSrc_padding_5, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> pBufL_cp_8)
{
# 75 "laplacian.cpp"
if (1)
{
hmpprt::CUDAGridCall __hmppcg_call;
__hmppcg_call.setSizeX(192);
__hmppcg_call.setSizeY(1);
__hmppcg_call.setBlockSizeX(1);
__hmppcg_call.setBlockSizeY(256);
__hmppcg_call.addLocalParameter((hmpprt::s32) (height_19), "height_1");
__hmppcg_call.addLocalParameter(&pBufL_cp_8, 8, "pBufL_cp_1");
__hmppcg_call.addLocalParameter(&pSrc_padding_5, 8, "pSrc_padding_1");
__hmppcg_call.addLocalParameter((hmpprt::s32) (width_20), "width_1");
__hmppcg_call.launch(hmpp_acc_region_main_79_parallel_region_1, hmpprt::Context::getInstance()->getCUDADevice());
}
;
}
#endif // __HIPCC__
# 75 "laplacian.cpp"
#ifndef __HIPCC__
extern "C" CDLT_API void hmpp_acc_region_main_79(hmpprt::s32 height_8, hmpprt::s32 width_11, hmpprt::s32* pSrc_padding_3, hmpprt::u08* pBufL_cp_4)
{
# 1 "<preprocessor>"
(hmpp_acc_region_main_79_internal_1(height_8, width_11, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::s32> (pSrc_padding_3), hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> (pBufL_cp_4)));
}
#endif // __HIPCC__
#ifndef __HIPCC__
extern "C" const char * hmpprt_cuda_get_gpu_code();
static hmpprt::CUDAModule * hmpprt_module = 0;
static int hmpprt_uses = 0;
extern "C" CDLT_API void * hmpprt_init()
{
try
{
if (hmpprt_uses++ == 0)
{
hmpprt_module = new hmpprt::CUDAModule(hmpprt_cuda_get_gpu_code());
hmpp_acc_region_main_100_parallel_region_1 = new hmpprt::CUDAGrid(hmpprt_module, "hmpp_acc_region_main_100_parallel_region_1");
hmpp_acc_region_main_114_parallel_region_1 = new hmpprt::CUDAGrid(hmpprt_module, "hmpp_acc_region_main_114_parallel_region_1");
hmpp_acc_region_main_136_parallel_region_1 = new hmpprt::CUDAGrid(hmpprt_module, "hmpp_acc_region_main_136_parallel_region_1");
hmpp_acc_region_main_159_parallel_region_1 = new hmpprt::CUDAGrid(hmpprt_module, "hmpp_acc_region_main_159_parallel_region_1");
hmpp_acc_region_main_188_parallel_region_1 = new hmpprt::CUDAGrid(hmpprt_module, "hmpp_acc_region_main_188_parallel_region_1");
hmpp_acc_region_main_202_parallel_region_1 = new hmpprt::CUDAGrid(hmpprt_module, "hmpp_acc_region_main_202_parallel_region_1");
hmpp_acc_region_main_230_parallel_region_1 = new hmpprt::CUDAGrid(hmpprt_module, "hmpp_acc_region_main_230_parallel_region_1");
hmpp_acc_region_main_247_parallel_region_1 = new hmpprt::CUDAGrid(hmpprt_module, "hmpp_acc_region_main_247_parallel_region_1");
hmpp_acc_region_main_55_parallel_region_1 = new hmpprt::CUDAGrid(hmpprt_module, "hmpp_acc_region_main_55_parallel_region_1");
hmpp_acc_region_main_79_parallel_region_1 = new hmpprt::CUDAGrid(hmpprt_module, "hmpp_acc_region_main_79_parallel_region_1");
}
hmpprt::Context::getInstance()->getGrouplet()->setTarget(hmpprt::CUDA);
hmpprt::Context::getInstance()->getGrouplet()->addSignature("hmpp_acc_region_main_100", "prototype hmpp_acc_region_main_100(height: s32, width: s32, pSrc_padding2: ^cudaglob s32, pBufL_cp: ^cudaglob u8)");
hmpprt::Context::getInstance()->getGrouplet()->addSignature("hmpp_acc_region_main_114", "prototype hmpp_acc_region_main_114(height: s32, width: s32, pSrc_padding2: ^cudaglob s32, pBufL_cp: ^cudaglob u8)");
hmpprt::Context::getInstance()->getGrouplet()->addSignature("hmpp_acc_region_main_136", "prototype hmpp_acc_region_main_136(height: s32, width: s32, FilterDst: ^cudaglob u8, pSrc_padding2: ^cudaglob s32)");
hmpprt::Context::getInstance()->getGrouplet()->addSignature("hmpp_acc_region_main_159", "prototype hmpp_acc_region_main_159(width: s32, halfWidth: s32, halfHeight: s32, FilterDst: ^cudaglob u8, DownsampleDst: ^cudaglob u8)");
hmpprt::Context::getInstance()->getGrouplet()->addSignature("hmpp_acc_region_main_188", "prototype hmpp_acc_region_main_188(height: s32, width: s32, halfWidth: s32, halfHeight: s32, DownsampleDst: ^cudaglob u8, UpsampleDst: ^cudaglob u8)");
hmpprt::Context::getInstance()->getGrouplet()->addSignature("hmpp_acc_region_main_202", "prototype hmpp_acc_region_main_202(width: s32, halfWidth: s32, halfHeight: s32, DownsampleDst: ^cudaglob u8, UpsampleDst: ^cudaglob u8)");
hmpprt::Context::getInstance()->getGrouplet()->addSignature("hmpp_acc_region_main_230", "prototype hmpp_acc_region_main_230(width: s32, halfWidth: s32, DownsampleDst: ^cudaglob u8, UpsampleDst: ^cudaglob u8)");
hmpprt::Context::getInstance()->getGrouplet()->addSignature("hmpp_acc_region_main_247", "prototype hmpp_acc_region_main_247(height: s32, width: s32, Source: ^cudaglob u8, LaplacianLayer: ^cudaglob s16, UpsampleDst: ^cudaglob u8)");
hmpprt::Context::getInstance()->getGrouplet()->addSignature("hmpp_acc_region_main_55", "prototype hmpp_acc_region_main_55(height: s32, width: s32, Source: ^cudaglob u8, pSrc_padding: ^cudaglob s32)");
hmpprt::Context::getInstance()->getGrouplet()->addSignature("hmpp_acc_region_main_79", "prototype hmpp_acc_region_main_79(height: s32, width: s32, pSrc_padding: ^cudaglob s32, pBufL_cp: ^cudaglob u8)");
}
catch (hmpperr::Error & e)
{
return e.clone();
}
catch(...)
{
fprintf(stderr,"Unexpected error in hmpprt_init()\n");
abort();
}
return 0;
}
#endif // __HIPCC__
#ifndef __HIPCC__
extern "C" CDLT_API void * hmpprt_fini()
{
try
{
if (--hmpprt_uses == 0)
{
delete hmpp_acc_region_main_100_parallel_region_1;
delete hmpp_acc_region_main_114_parallel_region_1;
delete hmpp_acc_region_main_136_parallel_region_1;
delete hmpp_acc_region_main_159_parallel_region_1;
delete hmpp_acc_region_main_188_parallel_region_1;
delete hmpp_acc_region_main_202_parallel_region_1;
delete hmpp_acc_region_main_230_parallel_region_1;
delete hmpp_acc_region_main_247_parallel_region_1;
delete hmpp_acc_region_main_55_parallel_region_1;
delete hmpp_acc_region_main_79_parallel_region_1;
delete hmpprt_module;
hmpprt_module = 0;
}
}
catch (hmpperr::Error & e)
{
return e.clone();
}
catch(...)
{
fprintf(stderr,"Unexpected error in hmpprt_fini()\n");
abort();
}
return 0;
}
#endif // __HIPCC__
// footer
| 07573c60ca17063ab7158d13d1987848afa057e7.cu |
#include <stdio.h>
#ifndef __CUDACC__
#include <stdlib.h>
#include <math.h>
#include <hmpprt/Grouplet.h>
#include <hmpprt/HostTypes.h>
#include <hmpprt/Context.h>
#include <hmpprt/CUDAGrid.h>
#include <hmpprt/CUDAModule.h>
#include <hmpprt/DeviceManager.h>
#include <hmpperr/hmpperr.h>
#include <openacci/openacci_c.h>
#ifdef _WIN32
# define CDLT_API __declspec(dllexport)
#else /* ! _WIN32 */
# define CDLT_API
#endif /* _WIN32 */
#else // ! __CUDACC__
#include <hmpprt/HostTypes.h>
#include <hmpprt/CUDAIntrinsics.h>
extern __shared__ int64_t hmpp_sharedmem[];
#endif // __CUDACC__
#ifndef __CUDACC__
#else
#endif
#define HMPPCG_SIMD_LENGTH 32
# 75 "laplacian.cpp"
#ifndef __CUDACC__
extern "C" CDLT_API void hmpp_acc_region_main_79(hmpprt::s32 height_8, hmpprt::s32 width_11, hmpprt::s32* pSrc_padding_3, hmpprt::u08* pBufL_cp_4)
;
#endif // __CUDACC__
# 75 "laplacian.cpp"
#ifndef __CUDACC__
void hmpp_acc_region_main_79_internal_1(hmpprt::s32 height_19, hmpprt::s32 width_20, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::s32> pSrc_padding_5, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> pBufL_cp_8)
;
#endif // __CUDACC__
# 12 "laplacian.cpp"
#ifndef __CUDACC__
static hmpprt::CUDAGrid * hmpp_acc_region_main_79_parallel_region_1 = 0;
#else
extern "C" __global__ void hmpp_acc_region_main_79_parallel_region_1(hmpprt::s32 height_1, hmpprt::u08* pBufL_cp_1, hmpprt::s32* pSrc_padding_1, hmpprt::s32 width_1);
#endif // __CUDACC__
# 12 "laplacian.cpp"
#ifndef __CUDACC__
extern "C" CDLT_API void hmpp_acc_region_main_55(hmpprt::s32 height_9, hmpprt::s32 width_12, hmpprt::u08* Source_3, hmpprt::s32* pSrc_padding_2)
;
#endif // __CUDACC__
# 12 "laplacian.cpp"
#ifndef __CUDACC__
void hmpp_acc_region_main_55_internal_1(hmpprt::s32 height_20, hmpprt::s32 width_21, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> Source_5, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::s32> pSrc_padding_4)
;
#endif // __CUDACC__
# 243 "laplacian.cpp"
#ifndef __CUDACC__
static hmpprt::CUDAGrid * hmpp_acc_region_main_55_parallel_region_1 = 0;
#else
extern "C" __global__ void hmpp_acc_region_main_55_parallel_region_1(hmpprt::u08* Source_1, hmpprt::s32 height_2, hmpprt::s32* pSrc_padding, hmpprt::s32 width_2);
#endif // __CUDACC__
# 243 "laplacian.cpp"
#ifndef __CUDACC__
extern "C" CDLT_API void hmpp_acc_region_main_247(hmpprt::s32 height_10, hmpprt::s32 width_13, hmpprt::u08* Source_2, hmpprt::s16* LaplacianLayer_1, hmpprt::u08* UpsampleDst_5)
;
#endif // __CUDACC__
# 243 "laplacian.cpp"
#ifndef __CUDACC__
void hmpp_acc_region_main_247_internal_1(hmpprt::s32 height_15, hmpprt::s32 width_22, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> Source_4, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::s16> LaplacianLayer_2, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> UpsampleDst_9)
;
#endif // __CUDACC__
# 229 "laplacian.cpp"
#ifndef __CUDACC__
static hmpprt::CUDAGrid * hmpp_acc_region_main_247_parallel_region_1 = 0;
#else
extern "C" __global__ void hmpp_acc_region_main_247_parallel_region_1(hmpprt::s16* LaplacianLayer, hmpprt::u08* Source, hmpprt::u08* UpsampleDst_1, hmpprt::s32 height_3, hmpprt::s32 width_3);
#endif // __CUDACC__
# 229 "laplacian.cpp"
#ifndef __CUDACC__
extern "C" CDLT_API void hmpp_acc_region_main_230(hmpprt::s32 width_4, hmpprt::s32 halfWidth_5, hmpprt::u08* DownsampleDst_5, hmpprt::u08* UpsampleDst_6)
;
#endif // __CUDACC__
# 229 "laplacian.cpp"
#ifndef __CUDACC__
void hmpp_acc_region_main_230_internal_1(hmpprt::s32 width_27, hmpprt::s32 halfWidth_9, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> DownsampleDst_9, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> UpsampleDst_10)
;
#endif // __CUDACC__
# 201 "laplacian.cpp"
#ifndef __CUDACC__
static hmpprt::CUDAGrid * hmpp_acc_region_main_230_parallel_region_1 = 0;
#else
extern "C" __global__ void hmpp_acc_region_main_230_parallel_region_1(hmpprt::u08* DownsampleDst_1, hmpprt::u08* UpsampleDst_2, hmpprt::s32 halfWidth_1);
#endif // __CUDACC__
# 201 "laplacian.cpp"
#ifndef __CUDACC__
extern "C" CDLT_API void hmpp_acc_region_main_202(hmpprt::s32 width_14, hmpprt::s32 halfWidth_10, hmpprt::s32 halfHeight_8, hmpprt::u08* DownsampleDst_10, hmpprt::u08* UpsampleDst_11)
;
#endif // __CUDACC__
# 201 "laplacian.cpp"
#ifndef __CUDACC__
void hmpp_acc_region_main_202_internal_1(hmpprt::s32 width_23, hmpprt::s32 halfWidth_6, hmpprt::s32 halfHeight_4, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> DownsampleDst_6, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> UpsampleDst_7)
;
#endif // __CUDACC__
# 175 "laplacian.cpp"
#ifndef __CUDACC__
static hmpprt::CUDAGrid * hmpp_acc_region_main_202_parallel_region_1 = 0;
#else
extern "C" __global__ void hmpp_acc_region_main_202_parallel_region_1(hmpprt::u08* DownsampleDst_2, hmpprt::u08* UpsampleDst_3, hmpprt::s32 halfHeight_1, hmpprt::s32 halfWidth_2, hmpprt::s32 width_5);
#endif // __CUDACC__
# 175 "laplacian.cpp"
#ifndef __CUDACC__
extern "C" CDLT_API void hmpp_acc_region_main_188(hmpprt::s32 height_11, hmpprt::s32 width_15, hmpprt::s32 halfWidth_7, hmpprt::s32 halfHeight_2, hmpprt::u08* DownsampleDst_11, hmpprt::u08* UpsampleDst_4)
;
#endif // __CUDACC__
# 175 "laplacian.cpp"
#ifndef __CUDACC__
void hmpp_acc_region_main_188_internal_1(hmpprt::s32 height_16, hmpprt::s32 width_24, hmpprt::s32 halfWidth_11, hmpprt::s32 halfHeight_6, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> DownsampleDst_7, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> UpsampleDst_8)
;
#endif // __CUDACC__
# 154 "laplacian.cpp"
#ifndef __CUDACC__
static hmpprt::CUDAGrid * hmpp_acc_region_main_188_parallel_region_1 = 0;
#else
extern "C" __global__ void hmpp_acc_region_main_188_parallel_region_1(hmpprt::u08* DownsampleDst_3, hmpprt::u08* UpsampleDst, hmpprt::s32 halfHeight_5, hmpprt::s32 halfWidth_3, hmpprt::s32 height_4, hmpprt::s32 width_6);
#endif // __CUDACC__
# 154 "laplacian.cpp"
#ifndef __CUDACC__
extern "C" CDLT_API void hmpp_acc_region_main_159(hmpprt::s32 width_16, hmpprt::s32 halfWidth_4, hmpprt::s32 halfHeight_3, hmpprt::u08* FilterDst_3, hmpprt::u08* DownsampleDst_4)
;
#endif // __CUDACC__
# 154 "laplacian.cpp"
#ifndef __CUDACC__
void hmpp_acc_region_main_159_internal_1(hmpprt::s32 width_25, hmpprt::s32 halfWidth_8, hmpprt::s32 halfHeight_7, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> FilterDst_5, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> DownsampleDst_8)
;
#endif // __CUDACC__
# 132 "laplacian.cpp"
#ifndef __CUDACC__
static hmpprt::CUDAGrid * hmpp_acc_region_main_159_parallel_region_1 = 0;
#else
extern "C" __global__ void hmpp_acc_region_main_159_parallel_region_1(hmpprt::u08* DownsampleDst, hmpprt::u08* FilterDst_1, hmpprt::s32 halfHeight, hmpprt::s32 halfWidth, hmpprt::s32 width_7);
#endif // __CUDACC__
# 132 "laplacian.cpp"
#ifndef __CUDACC__
extern "C" CDLT_API void hmpp_acc_region_main_136(hmpprt::s32 height_5, hmpprt::s32 width_8, hmpprt::u08* FilterDst_2, hmpprt::s32* pSrc_padding2_6)
;
#endif // __CUDACC__
# 132 "laplacian.cpp"
#ifndef __CUDACC__
void hmpp_acc_region_main_136_internal_1(hmpprt::s32 height_18, hmpprt::s32 width_28, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> FilterDst_4, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::s32> pSrc_padding2_1)
;
#endif // __CUDACC__
# 114 "laplacian.cpp"
#ifndef __CUDACC__
static hmpprt::CUDAGrid * hmpp_acc_region_main_136_parallel_region_1 = 0;
#else
extern "C" __global__ void hmpp_acc_region_main_136_parallel_region_1(hmpprt::u08* FilterDst, hmpprt::s32 height_12, hmpprt::s32* pSrc_padding2_4, hmpprt::s32 width_17);
#endif // __CUDACC__
# 114 "laplacian.cpp"
#ifndef __CUDACC__
extern "C" CDLT_API void hmpp_acc_region_main_114(hmpprt::s32 height_17, hmpprt::s32 width_18, hmpprt::s32* pSrc_padding2_7, hmpprt::u08* pBufL_cp_2)
;
#endif // __CUDACC__
# 114 "laplacian.cpp"
#ifndef __CUDACC__
void hmpp_acc_region_main_114_internal_1(hmpprt::s32 height_13, hmpprt::s32 width_26, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::s32> pSrc_padding2_2, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> pBufL_cp_6)
;
#endif // __CUDACC__
# 97 "laplacian.cpp"
#ifndef __CUDACC__
static hmpprt::CUDAGrid * hmpp_acc_region_main_114_parallel_region_1 = 0;
#else
extern "C" __global__ void hmpp_acc_region_main_114_parallel_region_1(hmpprt::s32 height_6, hmpprt::u08* pBufL_cp_5, hmpprt::s32* pSrc_padding2_5, hmpprt::s32 width_9);
#endif // __CUDACC__
# 97 "laplacian.cpp"
#ifndef __CUDACC__
extern "C" CDLT_API void hmpp_acc_region_main_100(hmpprt::s32 height_14, hmpprt::s32 width_10, hmpprt::s32* pSrc_padding2_3, hmpprt::u08* pBufL_cp_7)
;
#endif // __CUDACC__
# 97 "laplacian.cpp"
#ifndef __CUDACC__
void hmpp_acc_region_main_100_internal_1(hmpprt::s32 height, hmpprt::s32 width_19, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::s32> pSrc_padding2_8, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> pBufL_cp_3)
;
#endif // __CUDACC__
# 97 "laplacian.cpp"
#ifndef __CUDACC__
static hmpprt::CUDAGrid * hmpp_acc_region_main_100_parallel_region_1 = 0;
#else
extern "C" __global__ void hmpp_acc_region_main_100_parallel_region_1(hmpprt::s32 height_7, hmpprt::u08* pBufL_cp, hmpprt::s32* pSrc_padding2, hmpprt::s32 width);
#endif // __CUDACC__
# 97 "laplacian.cpp"
#ifdef __CUDACC__
extern "C" __global__ void hmpp_acc_region_main_100_parallel_region_1(hmpprt::s32 height_7, hmpprt::u08* pBufL_cp, hmpprt::s32* pSrc_padding2, hmpprt::s32 width)
{
# 99 "laplacian.cpp"
{
# 103 "laplacian.cpp"
hmpprt::s32 iter_per_gang_10;
# 103 "laplacian.cpp"
hmpprt::s32 first_gang_iter_10;
# 103 "laplacian.cpp"
hmpprt::s32 last_gang_iter_10;
# 103 "laplacian.cpp"
iter_per_gang_10 = ((1 + (height_7 * width - 1) / 192) > 256 ? (1 + (height_7 * width - 1) / 192) : 256);
# 103 "laplacian.cpp"
first_gang_iter_10 = (hmpprt::gr_gbidx()) * iter_per_gang_10;
# 103 "laplacian.cpp"
last_gang_iter_10 = ((first_gang_iter_10 + iter_per_gang_10 - 1) < (height_7 * width - 1) ? (first_gang_iter_10 + iter_per_gang_10 - 1) : (height_7 * width - 1));
# 103 "laplacian.cpp"
hmpprt::s32 h_9;
# 103 "laplacian.cpp"
# 107 "laplacian.cpp"
for (h_9 = first_gang_iter_10 + (hmpprt::gr_btidy()) ; h_9 <= last_gang_iter_10 ; h_9 = h_9 + (hmpprt::gr_btnumy()))
{
# 103 "laplacian.cpp"
hmpprt::s32 h_4;
# 106 "laplacian.cpp"
hmpprt::s32 w_5;
# 107 "laplacian.cpp"
w_5 = h_9 % width;
# 107 "laplacian.cpp"
h_4 = h_9 / width;
# 107 "laplacian.cpp"
*(pSrc_padding2 + ((h_4 + 2) * (width + 4) + w_5 + 2)) = (hmpprt::s32 ) (*(pBufL_cp + (h_4 * width + w_5)));
}
# 97 "laplacian.cpp"
}
}
#endif // __CUDACC__
# 97 "laplacian.cpp"
#ifndef __CUDACC__
void hmpp_acc_region_main_100_internal_1(hmpprt::s32 height, hmpprt::s32 width_19, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::s32> pSrc_padding2_8, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> pBufL_cp_3)
{
# 97 "laplacian.cpp"
if (1)
{
hmpprt::CUDAGridCall __hmppcg_call;
__hmppcg_call.setSizeX(192);
__hmppcg_call.setSizeY(1);
__hmppcg_call.setBlockSizeX(1);
__hmppcg_call.setBlockSizeY(256);
__hmppcg_call.addLocalParameter((hmpprt::s32) (height), "height_7");
__hmppcg_call.addLocalParameter(&pBufL_cp_3, 8, "pBufL_cp");
__hmppcg_call.addLocalParameter(&pSrc_padding2_8, 8, "pSrc_padding2");
__hmppcg_call.addLocalParameter((hmpprt::s32) (width_19), "width");
__hmppcg_call.launch(hmpp_acc_region_main_100_parallel_region_1, hmpprt::Context::getInstance()->getCUDADevice());
}
;
}
#endif // __CUDACC__
# 97 "laplacian.cpp"
#ifndef __CUDACC__
extern "C" CDLT_API void hmpp_acc_region_main_100(hmpprt::s32 height_14, hmpprt::s32 width_10, hmpprt::s32* pSrc_padding2_3, hmpprt::u08* pBufL_cp_7)
{
# 114 "laplacian.cpp"
(hmpp_acc_region_main_100_internal_1(height_14, width_10, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::s32> (pSrc_padding2_3), hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> (pBufL_cp_7)));
}
#endif // __CUDACC__
# 114 "laplacian.cpp"
#ifdef __CUDACC__
extern "C" __global__ void hmpp_acc_region_main_114_parallel_region_1(hmpprt::s32 height_6, hmpprt::u08* pBufL_cp_5, hmpprt::s32* pSrc_padding2_5, hmpprt::s32 width_9)
{
# 116 "laplacian.cpp"
{
# 117 "laplacian.cpp"
hmpprt::s32 iter_per_gang_9;
# 117 "laplacian.cpp"
hmpprt::s32 first_gang_iter_9;
# 117 "laplacian.cpp"
hmpprt::s32 last_gang_iter_9;
# 117 "laplacian.cpp"
iter_per_gang_9 = ((1 + (width_9 - 1) / 192) > 256 ? (1 + (width_9 - 1) / 192) : 256);
# 117 "laplacian.cpp"
first_gang_iter_9 = (hmpprt::gr_gbidx()) * iter_per_gang_9;
# 117 "laplacian.cpp"
last_gang_iter_9 = ((first_gang_iter_9 + iter_per_gang_9 - 1) < (width_9 - 1) ? (first_gang_iter_9 + iter_per_gang_9 - 1) : (width_9 - 1));
# 117 "laplacian.cpp"
hmpprt::s32 w_6;
# 117 "laplacian.cpp"
# 118 "laplacian.cpp"
for (w_6 = first_gang_iter_9 + (hmpprt::gr_btidy()) ; w_6 <= last_gang_iter_9 ; w_6 = w_6 + (hmpprt::gr_btnumy()))
{
# 119 "laplacian.cpp"
*(pSrc_padding2_5 + (width_9 + 4 + (w_6 + 2))) = 2 * (hmpprt::s32 ) (*(pBufL_cp_5 + (width_9 + (w_6 + 2) - 2))) - (hmpprt::s32 ) (*(pBufL_cp_5 + (3 * width_9 + (w_6 + 2) - 2)));
# 120 "laplacian.cpp"
*(pSrc_padding2_5 + (w_6 + 2)) = 4 * (hmpprt::s32 ) (*(pBufL_cp_5 + (3 * width_9 + (w_6 + 2) - 2))) - 4 * (hmpprt::s32 ) (*(pBufL_cp_5 + (width_9 + (w_6 + 2) - 2))) + 2 * (hmpprt::s32 ) (*(pBufL_cp_5 + w_6)) - (hmpprt::s32 ) (*(pBufL_cp_5 + (2 * width_9 + (w_6 + 2) - 2)));
# 123 "laplacian.cpp"
*(pSrc_padding2_5 + ((height_6 + 2) * (width_9 + 4) + (w_6 + 2))) = 2 * (hmpprt::s32 ) (*(pBufL_cp_5 + ((height_6 - 2) * width_9 + (w_6 + 2) - 2))) - (hmpprt::s32 ) (*(pBufL_cp_5 + ((height_6 - 4) * width_9 + (w_6 + 2) - 2)));
# 124 "laplacian.cpp"
*(pSrc_padding2_5 + ((height_6 + 3) * (width_9 + 4) + (w_6 + 2))) = 4 * (hmpprt::s32 ) (*(pBufL_cp_5 + ((height_6 - 4) * width_9 + (w_6 + 2) - 2))) - 4 * (hmpprt::s32 ) (*(pBufL_cp_5 + ((height_6 - 2) * width_9 + (w_6 + 2) - 2))) + 2 * (hmpprt::s32 ) (*(pBufL_cp_5 + ((height_6 - 1) * width_9 + (w_6 + 2) - 2))) - (hmpprt::s32 ) (*(pBufL_cp_5 + ((height_6 - 3) * width_9 + (w_6 + 2) - 2)));
}
# 114 "laplacian.cpp"
}
}
#endif // __CUDACC__
# 114 "laplacian.cpp"
#ifndef __CUDACC__
void hmpp_acc_region_main_114_internal_1(hmpprt::s32 height_13, hmpprt::s32 width_26, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::s32> pSrc_padding2_2, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> pBufL_cp_6)
{
# 114 "laplacian.cpp"
if (1)
{
hmpprt::CUDAGridCall __hmppcg_call;
__hmppcg_call.setSizeX(192);
__hmppcg_call.setSizeY(1);
__hmppcg_call.setBlockSizeX(1);
__hmppcg_call.setBlockSizeY(256);
__hmppcg_call.addLocalParameter((hmpprt::s32) (height_13), "height_6");
__hmppcg_call.addLocalParameter(&pBufL_cp_6, 8, "pBufL_cp_5");
__hmppcg_call.addLocalParameter(&pSrc_padding2_2, 8, "pSrc_padding2_5");
__hmppcg_call.addLocalParameter((hmpprt::s32) (width_26), "width_9");
__hmppcg_call.launch(hmpp_acc_region_main_114_parallel_region_1, hmpprt::Context::getInstance()->getCUDADevice());
}
;
}
#endif // __CUDACC__
# 114 "laplacian.cpp"
#ifndef __CUDACC__
extern "C" CDLT_API void hmpp_acc_region_main_114(hmpprt::s32 height_17, hmpprt::s32 width_18, hmpprt::s32* pSrc_padding2_7, hmpprt::u08* pBufL_cp_2)
{
# 132 "laplacian.cpp"
(hmpp_acc_region_main_114_internal_1(height_17, width_18, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::s32> (pSrc_padding2_7), hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> (pBufL_cp_2)));
}
#endif // __CUDACC__
# 132 "laplacian.cpp"
#ifdef __CUDACC__
extern "C" __global__ void hmpp_acc_region_main_136_parallel_region_1(hmpprt::u08* FilterDst, hmpprt::s32 height_12, hmpprt::s32* pSrc_padding2_4, hmpprt::s32 width_17)
{
# 134 "laplacian.cpp"
{
# 139 "laplacian.cpp"
hmpprt::s32 iter_per_gang_8;
# 139 "laplacian.cpp"
hmpprt::s32 first_gang_iter_8;
# 139 "laplacian.cpp"
hmpprt::s32 last_gang_iter_8;
# 139 "laplacian.cpp"
iter_per_gang_8 = ((1 + (width_17 * height_12 - 1) / 192) > 256 ? (1 + (width_17 * height_12 - 1) / 192) : 256);
# 139 "laplacian.cpp"
first_gang_iter_8 = (hmpprt::gr_gbidx()) * iter_per_gang_8;
# 139 "laplacian.cpp"
last_gang_iter_8 = ((first_gang_iter_8 + iter_per_gang_8 - 1) < (width_17 * height_12 - 1) ? (first_gang_iter_8 + iter_per_gang_8 - 1) : (width_17 * height_12 - 1));
# 139 "laplacian.cpp"
hmpprt::s32 w_10;
# 139 "laplacian.cpp"
# 142 "laplacian.cpp"
for (w_10 = first_gang_iter_8 + (hmpprt::gr_btidy()) ; w_10 <= last_gang_iter_8 ; w_10 = w_10 + (hmpprt::gr_btnumy()))
{
# 139 "laplacian.cpp"
hmpprt::s32 w_7;
# 141 "laplacian.cpp"
hmpprt::s32 h_5;
# 143 "laplacian.cpp"
h_5 = w_10 % height_12;
# 143 "laplacian.cpp"
w_7 = w_10 / height_12;
# 143 "laplacian.cpp"
*(FilterDst + (h_5 * width_17 + w_7)) = (hmpprt::u08 ) (*(pSrc_padding2_4 + (h_5 * (width_17 + 4) + (w_7 + 2))) + (*(pSrc_padding2_4 + ((h_5 + 1) * (width_17 + 4) + (w_7 + 2))) << 2) + 6 * *(pSrc_padding2_4 + ((h_5 + 2) * (width_17 + 4) + (w_7 + 2))) + (*(pSrc_padding2_4 + ((h_5 + 3) * (width_17 + 4) + (w_7 + 2))) << 2) + *(pSrc_padding2_4 + ((h_5 + 4) * (width_17 + 4) + (w_7 + 2))) + 8 >> 4);
}
# 132 "laplacian.cpp"
}
}
#endif // __CUDACC__
# 132 "laplacian.cpp"
#ifndef __CUDACC__
void hmpp_acc_region_main_136_internal_1(hmpprt::s32 height_18, hmpprt::s32 width_28, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> FilterDst_4, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::s32> pSrc_padding2_1)
{
# 132 "laplacian.cpp"
if (1)
{
hmpprt::CUDAGridCall __hmppcg_call;
__hmppcg_call.setSizeX(192);
__hmppcg_call.setSizeY(1);
__hmppcg_call.setBlockSizeX(1);
__hmppcg_call.setBlockSizeY(256);
__hmppcg_call.addLocalParameter(&FilterDst_4, 8, "FilterDst");
__hmppcg_call.addLocalParameter((hmpprt::s32) (height_18), "height_12");
__hmppcg_call.addLocalParameter(&pSrc_padding2_1, 8, "pSrc_padding2_4");
__hmppcg_call.addLocalParameter((hmpprt::s32) (width_28), "width_17");
__hmppcg_call.launch(hmpp_acc_region_main_136_parallel_region_1, hmpprt::Context::getInstance()->getCUDADevice());
}
;
}
#endif // __CUDACC__
# 132 "laplacian.cpp"
#ifndef __CUDACC__
extern "C" CDLT_API void hmpp_acc_region_main_136(hmpprt::s32 height_5, hmpprt::s32 width_8, hmpprt::u08* FilterDst_2, hmpprt::s32* pSrc_padding2_6)
{
# 154 "laplacian.cpp"
(hmpp_acc_region_main_136_internal_1(height_5, width_8, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> (FilterDst_2), hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::s32> (pSrc_padding2_6)));
}
#endif // __CUDACC__
# 154 "laplacian.cpp"
#ifdef __CUDACC__
extern "C" __global__ void hmpp_acc_region_main_159_parallel_region_1(hmpprt::u08* DownsampleDst, hmpprt::u08* FilterDst_1, hmpprt::s32 halfHeight, hmpprt::s32 halfWidth, hmpprt::s32 width_7)
{
# 156 "laplacian.cpp"
{
# 162 "laplacian.cpp"
hmpprt::s32 iter_per_gang_7;
# 162 "laplacian.cpp"
hmpprt::s32 first_gang_iter_7;
# 162 "laplacian.cpp"
hmpprt::s32 last_gang_iter_7;
# 162 "laplacian.cpp"
iter_per_gang_7 = ((1 + (halfHeight * halfWidth - 1) / 192) > 256 ? (1 + (halfHeight * halfWidth - 1) / 192) : 256);
# 162 "laplacian.cpp"
first_gang_iter_7 = (hmpprt::gr_gbidx()) * iter_per_gang_7;
# 162 "laplacian.cpp"
last_gang_iter_7 = ((first_gang_iter_7 + iter_per_gang_7 - 1) < (halfHeight * halfWidth - 1) ? (first_gang_iter_7 + iter_per_gang_7 - 1) : (halfHeight * halfWidth - 1));
# 162 "laplacian.cpp"
hmpprt::s32 y_7;
# 162 "laplacian.cpp"
# 166 "laplacian.cpp"
for (y_7 = first_gang_iter_7 + (hmpprt::gr_btidy()) ; y_7 <= last_gang_iter_7 ; y_7 = y_7 + (hmpprt::gr_btnumy()))
{
# 162 "laplacian.cpp"
hmpprt::s32 y_3;
# 165 "laplacian.cpp"
hmpprt::s32 x_5;
# 167 "laplacian.cpp"
x_5 = y_7 % halfWidth;
# 167 "laplacian.cpp"
y_3 = y_7 / halfWidth;
# 167 "laplacian.cpp"
*(DownsampleDst + (y_3 * halfWidth + x_5)) = *(FilterDst_1 + ((y_3 << 1) * width_7 + (x_5 << 1)));
}
# 154 "laplacian.cpp"
}
}
#endif // __CUDACC__
# 154 "laplacian.cpp"
#ifndef __CUDACC__
void hmpp_acc_region_main_159_internal_1(hmpprt::s32 width_25, hmpprt::s32 halfWidth_8, hmpprt::s32 halfHeight_7, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> FilterDst_5, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> DownsampleDst_8)
{
# 154 "laplacian.cpp"
if (1)
{
hmpprt::CUDAGridCall __hmppcg_call;
__hmppcg_call.setSizeX(192);
__hmppcg_call.setSizeY(1);
__hmppcg_call.setBlockSizeX(1);
__hmppcg_call.setBlockSizeY(256);
__hmppcg_call.addLocalParameter(&DownsampleDst_8, 8, "DownsampleDst");
__hmppcg_call.addLocalParameter(&FilterDst_5, 8, "FilterDst_1");
__hmppcg_call.addLocalParameter((hmpprt::s32) (halfHeight_7), "halfHeight");
__hmppcg_call.addLocalParameter((hmpprt::s32) (halfWidth_8), "halfWidth");
__hmppcg_call.addLocalParameter((hmpprt::s32) (width_25), "width_7");
__hmppcg_call.launch(hmpp_acc_region_main_159_parallel_region_1, hmpprt::Context::getInstance()->getCUDADevice());
}
;
}
#endif // __CUDACC__
# 154 "laplacian.cpp"
#ifndef __CUDACC__
extern "C" CDLT_API void hmpp_acc_region_main_159(hmpprt::s32 width_16, hmpprt::s32 halfWidth_4, hmpprt::s32 halfHeight_3, hmpprt::u08* FilterDst_3, hmpprt::u08* DownsampleDst_4)
{
# 175 "laplacian.cpp"
(hmpp_acc_region_main_159_internal_1(width_16, halfWidth_4, halfHeight_3, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> (FilterDst_3), hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> (DownsampleDst_4)));
}
#endif // __CUDACC__
# 175 "laplacian.cpp"
#ifdef __CUDACC__
extern "C" __global__ void hmpp_acc_region_main_188_parallel_region_1(hmpprt::u08* DownsampleDst_3, hmpprt::u08* UpsampleDst, hmpprt::s32 halfHeight_5, hmpprt::s32 halfWidth_3, hmpprt::s32 height_4, hmpprt::s32 width_6)
{
# 177 "laplacian.cpp"
{
# 191 "laplacian.cpp"
hmpprt::s32 iter_per_gang_6;
# 191 "laplacian.cpp"
hmpprt::s32 first_gang_iter_6;
# 191 "laplacian.cpp"
hmpprt::s32 last_gang_iter_6;
# 191 "laplacian.cpp"
iter_per_gang_6 = ((1 + (halfWidth_3 - 2) / 192) > 256 ? (1 + (halfWidth_3 - 2) / 192) : 256);
# 191 "laplacian.cpp"
first_gang_iter_6 = (hmpprt::gr_gbidx()) * iter_per_gang_6;
# 191 "laplacian.cpp"
last_gang_iter_6 = ((first_gang_iter_6 + iter_per_gang_6 - 1) < (halfWidth_3 - 2) ? (first_gang_iter_6 + iter_per_gang_6 - 1) : (halfWidth_3 - 2));
# 191 "laplacian.cpp"
hmpprt::s32 x_6;
# 191 "laplacian.cpp"
# 192 "laplacian.cpp"
for (x_6 = first_gang_iter_6 + (hmpprt::gr_btidy()) ; x_6 <= last_gang_iter_6 ; x_6 = x_6 + (hmpprt::gr_btnumy()))
{
# 193 "laplacian.cpp"
*(UpsampleDst + ((height_4 - 1) * width_6 + 2 * (x_6 + 1) - 1)) = (hmpprt::u08 ) ((hmpprt::s32 ) (*(DownsampleDst_3 + ((halfHeight_5 - 1) * halfWidth_3 + (x_6 + 1) - 1))) + (hmpprt::s32 ) (*(DownsampleDst_3 + ((halfHeight_5 - 1) * halfWidth_3 + (x_6 + 1)))) + 1 >> 1);
# 194 "laplacian.cpp"
*(UpsampleDst + ((height_4 - 1) * width_6 + 2 * (x_6 + 1))) = *(DownsampleDst_3 + ((halfHeight_5 - 1) * halfWidth_3 + (x_6 + 1)));
}
# 175 "laplacian.cpp"
}
}
#endif // __CUDACC__
# 175 "laplacian.cpp"
#ifndef __CUDACC__
void hmpp_acc_region_main_188_internal_1(hmpprt::s32 height_16, hmpprt::s32 width_24, hmpprt::s32 halfWidth_11, hmpprt::s32 halfHeight_6, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> DownsampleDst_7, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> UpsampleDst_8)
{
# 175 "laplacian.cpp"
if (1)
{
hmpprt::CUDAGridCall __hmppcg_call;
__hmppcg_call.setSizeX(192);
__hmppcg_call.setSizeY(1);
__hmppcg_call.setBlockSizeX(1);
__hmppcg_call.setBlockSizeY(256);
__hmppcg_call.addLocalParameter(&DownsampleDst_7, 8, "DownsampleDst_3");
__hmppcg_call.addLocalParameter(&UpsampleDst_8, 8, "UpsampleDst");
__hmppcg_call.addLocalParameter((hmpprt::s32) (halfHeight_6), "halfHeight_5");
__hmppcg_call.addLocalParameter((hmpprt::s32) (halfWidth_11), "halfWidth_3");
__hmppcg_call.addLocalParameter((hmpprt::s32) (height_16), "height_4");
__hmppcg_call.addLocalParameter((hmpprt::s32) (width_24), "width_6");
__hmppcg_call.launch(hmpp_acc_region_main_188_parallel_region_1, hmpprt::Context::getInstance()->getCUDADevice());
}
;
}
#endif // __CUDACC__
# 175 "laplacian.cpp"
#ifndef __CUDACC__
extern "C" CDLT_API void hmpp_acc_region_main_188(hmpprt::s32 height_11, hmpprt::s32 width_15, hmpprt::s32 halfWidth_7, hmpprt::s32 halfHeight_2, hmpprt::u08* DownsampleDst_11, hmpprt::u08* UpsampleDst_4)
{
# 201 "laplacian.cpp"
(hmpp_acc_region_main_188_internal_1(height_11, width_15, halfWidth_7, halfHeight_2, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> (DownsampleDst_11), hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> (UpsampleDst_4)));
}
#endif // __CUDACC__
# 201 "laplacian.cpp"
#ifdef __CUDACC__
extern "C" __global__ void hmpp_acc_region_main_202_parallel_region_1(hmpprt::u08* DownsampleDst_2, hmpprt::u08* UpsampleDst_3, hmpprt::s32 halfHeight_1, hmpprt::s32 halfWidth_2, hmpprt::s32 width_5)
{
# 203 "laplacian.cpp"
{
# 205 "laplacian.cpp"
hmpprt::s32 iter_per_gang_5;
# 205 "laplacian.cpp"
hmpprt::s32 first_gang_iter_5;
# 205 "laplacian.cpp"
hmpprt::s32 last_gang_iter_5;
# 205 "laplacian.cpp"
iter_per_gang_5 = 1 + - (1 - (halfHeight_1 - 1)) / 192;
# 205 "laplacian.cpp"
first_gang_iter_5 = (hmpprt::gr_gbidx()) * iter_per_gang_5;
# 205 "laplacian.cpp"
last_gang_iter_5 = ((first_gang_iter_5 + iter_per_gang_5 - 1) < ( - (1 - (halfHeight_1 - 1))) ? (first_gang_iter_5 + iter_per_gang_5 - 1) : ( - (1 - (halfHeight_1 - 1))));
# 205 "laplacian.cpp"
hmpprt::s32 y_4;
# 205 "laplacian.cpp"
# 206 "laplacian.cpp"
for (y_4 = first_gang_iter_5 ; y_4 <= last_gang_iter_5 ; y_4 = y_4 + 1)
{
# 209 "laplacian.cpp"
hmpprt::u08 tmp_18;
# 209 "laplacian.cpp"
hmpprt::u08 tmp_19;
# 209 "laplacian.cpp"
hmpprt::u08 tmp_20;
# 209 "laplacian.cpp"
hmpprt::u08 tmp_21;
# 209 "laplacian.cpp"
hmpprt::s32 end_6;
# 209 "laplacian.cpp"
hmpprt::s32 x_7;
# 209 "laplacian.cpp"
# 210 "laplacian.cpp"
# 210 "laplacian.cpp"
for (x_7 = (hmpprt::gr_btidy()), end_6 = halfWidth_2 - 2 ; x_7 <= end_6 ; x_7 = x_7 + (hmpprt::gr_btnumy()))
{
# 211 "laplacian.cpp"
*(UpsampleDst_3 + ((2 * (halfHeight_1 - 1 - y_4) - 1) * width_5 + 2 * (x_7 + 1) - 1)) = (hmpprt::u08 ) ((hmpprt::s32 ) (*(DownsampleDst_2 + ((halfHeight_1 - 1 - y_4) * halfWidth_2 + (x_7 + 1) - 1))) + (hmpprt::s32 ) (*(DownsampleDst_2 + ((halfHeight_1 - 1 - y_4) * halfWidth_2 + (x_7 + 1)))) + (hmpprt::s32 ) (*(DownsampleDst_2 + ((halfHeight_1 - 1 - y_4 - 1) * halfWidth_2 + (x_7 + 1) - 1))) + (hmpprt::s32 ) (*(DownsampleDst_2 + ((halfHeight_1 - 1 - y_4 - 1) * halfWidth_2 + (x_7 + 1)))) + 2 >> 2);
# 212 "laplacian.cpp"
*(UpsampleDst_3 + ((2 * (halfHeight_1 - 1 - y_4) - 1) * width_5 + 2 * (x_7 + 1))) = (hmpprt::u08 ) ((hmpprt::s32 ) (*(DownsampleDst_2 + ((halfHeight_1 - 1 - y_4) * halfWidth_2 + (x_7 + 1)))) + (hmpprt::s32 ) (*(DownsampleDst_2 + ((halfHeight_1 - 1 - y_4 - 1) * halfWidth_2 + (x_7 + 1)))) + 1 >> 1);
# 214 "laplacian.cpp"
*(UpsampleDst_3 + (2 * (halfHeight_1 - 1 - y_4) * width_5 + 2 * (x_7 + 1) - 1)) = (hmpprt::u08 ) ((hmpprt::s32 ) (*(DownsampleDst_2 + ((halfHeight_1 - 1 - y_4) * halfWidth_2 + (x_7 + 1) - 1))) + (hmpprt::s32 ) (*(DownsampleDst_2 + ((halfHeight_1 - 1 - y_4) * halfWidth_2 + (x_7 + 1)))) + 1 >> 1);
# 215 "laplacian.cpp"
*(UpsampleDst_3 + (2 * (halfHeight_1 - 1 - y_4) * width_5 + 2 * (x_7 + 1))) = *(DownsampleDst_2 + ((halfHeight_1 - 1 - y_4) * halfWidth_2 + (x_7 + 1)));
}
# 201 "laplacian.cpp"
# 201 "laplacian.cpp"
tmp_18 = (hmpprt::u08 ) ((hmpprt::s32 ) (*(DownsampleDst_2 + ((halfHeight_1 - 1 - y_4) * halfWidth_2 + halfWidth_2 - 1))) + (hmpprt::s32 ) (*(DownsampleDst_2 + ((halfHeight_1 - 1 - y_4 - 1) * halfWidth_2 + halfWidth_2 - 1))) + 1 >> 1);
# 201 "laplacian.cpp"
if ((hmpprt::gr_btidy()) == 0)
{
# 201 "laplacian.cpp"
*(UpsampleDst_3 + ((2 * (halfHeight_1 - 1 - y_4) - 1) * width_5 + width_5 - 1)) = tmp_18;
}
# 201 "laplacian.cpp"
(hmpprt::gr_barrier());
# 201 "laplacian.cpp"
tmp_19 = *(DownsampleDst_2 + ((halfHeight_1 - 1 - y_4) * halfWidth_2 + halfWidth_2 - 1));
# 201 "laplacian.cpp"
if ((hmpprt::gr_btidy()) == 0)
{
# 201 "laplacian.cpp"
*(UpsampleDst_3 + (2 * (halfHeight_1 - 1 - y_4) * width_5 + width_5 - 1)) = tmp_19;
}
# 201 "laplacian.cpp"
(hmpprt::gr_barrier());
# 201 "laplacian.cpp"
tmp_20 = (hmpprt::u08 ) ((hmpprt::s32 ) (*(DownsampleDst_2 + (halfHeight_1 - 1 - y_4) * halfWidth_2)) + (hmpprt::s32 ) (*(DownsampleDst_2 + (halfHeight_1 - 1 - y_4 - 1) * halfWidth_2)) + 1 >> 1);
# 201 "laplacian.cpp"
if ((hmpprt::gr_btidy()) == 0)
{
# 201 "laplacian.cpp"
*(UpsampleDst_3 + (2 * (halfHeight_1 - 1 - y_4) - 1) * width_5) = tmp_20;
}
# 201 "laplacian.cpp"
(hmpprt::gr_barrier());
# 201 "laplacian.cpp"
tmp_21 = *(DownsampleDst_2 + (halfHeight_1 - 1 - y_4) * halfWidth_2);
# 201 "laplacian.cpp"
if ((hmpprt::gr_btidy()) == 0)
{
# 201 "laplacian.cpp"
*(UpsampleDst_3 + 2 * (halfHeight_1 - 1 - y_4) * width_5) = tmp_21;
}
# 201 "laplacian.cpp"
(hmpprt::gr_barrier());
}
# 201 "laplacian.cpp"
}
}
#endif // __CUDACC__
# 201 "laplacian.cpp"
#ifndef __CUDACC__
void hmpp_acc_region_main_202_internal_1(hmpprt::s32 width_23, hmpprt::s32 halfWidth_6, hmpprt::s32 halfHeight_4, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> DownsampleDst_6, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> UpsampleDst_7)
{
# 201 "laplacian.cpp"
if (1)
{
hmpprt::CUDAGridCall __hmppcg_call;
__hmppcg_call.setSizeX(192);
__hmppcg_call.setSizeY(1);
__hmppcg_call.setBlockSizeX(1);
__hmppcg_call.setBlockSizeY(256);
__hmppcg_call.addLocalParameter(&DownsampleDst_6, 8, "DownsampleDst_2");
__hmppcg_call.addLocalParameter(&UpsampleDst_7, 8, "UpsampleDst_3");
__hmppcg_call.addLocalParameter((hmpprt::s32) (halfHeight_4), "halfHeight_1");
__hmppcg_call.addLocalParameter((hmpprt::s32) (halfWidth_6), "halfWidth_2");
__hmppcg_call.addLocalParameter((hmpprt::s32) (width_23), "width_5");
__hmppcg_call.launch(hmpp_acc_region_main_202_parallel_region_1, hmpprt::Context::getInstance()->getCUDADevice());
}
;
}
#endif // __CUDACC__
# 201 "laplacian.cpp"
#ifndef __CUDACC__
extern "C" CDLT_API void hmpp_acc_region_main_202(hmpprt::s32 width_14, hmpprt::s32 halfWidth_10, hmpprt::s32 halfHeight_8, hmpprt::u08* DownsampleDst_10, hmpprt::u08* UpsampleDst_11)
{
# 229 "laplacian.cpp"
(hmpp_acc_region_main_202_internal_1(width_14, halfWidth_10, halfHeight_8, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> (DownsampleDst_10), hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> (UpsampleDst_11)));
}
#endif // __CUDACC__
# 229 "laplacian.cpp"
#ifdef __CUDACC__
extern "C" __global__ void hmpp_acc_region_main_230_parallel_region_1(hmpprt::u08* DownsampleDst_1, hmpprt::u08* UpsampleDst_2, hmpprt::s32 halfWidth_1)
{
# 231 "laplacian.cpp"
{
# 233 "laplacian.cpp"
hmpprt::s32 iter_per_gang_4;
# 233 "laplacian.cpp"
hmpprt::s32 first_gang_iter_4;
# 233 "laplacian.cpp"
hmpprt::s32 last_gang_iter_4;
# 233 "laplacian.cpp"
iter_per_gang_4 = ((1 + - (1 - (halfWidth_1 - 1)) / 192) > 256 ? (1 + - (1 - (halfWidth_1 - 1)) / 192) : 256);
# 233 "laplacian.cpp"
first_gang_iter_4 = (hmpprt::gr_gbidx()) * iter_per_gang_4;
# 233 "laplacian.cpp"
last_gang_iter_4 = ((first_gang_iter_4 + iter_per_gang_4 - 1) < ( - (1 - (halfWidth_1 - 1))) ? (first_gang_iter_4 + iter_per_gang_4 - 1) : ( - (1 - (halfWidth_1 - 1))));
# 233 "laplacian.cpp"
hmpprt::s32 x_8;
# 233 "laplacian.cpp"
# 234 "laplacian.cpp"
for (x_8 = first_gang_iter_4 + (hmpprt::gr_btidy()) ; x_8 <= last_gang_iter_4 ; x_8 = x_8 + (hmpprt::gr_btnumy()))
{
# 235 "laplacian.cpp"
*(UpsampleDst_2 + 2 * (halfWidth_1 - 1 - x_8)) = *(DownsampleDst_1 + (halfWidth_1 - 1 - x_8));
# 236 "laplacian.cpp"
*(UpsampleDst_2 + (2 * (halfWidth_1 - 1 - x_8) - 1)) = (hmpprt::u08 ) (((hmpprt::s32 ) (*(DownsampleDst_1 + (halfWidth_1 - 1 - x_8 - 1))) + (hmpprt::s32 ) (*(DownsampleDst_1 + (halfWidth_1 - 1 - x_8))) + 1) / 2);
}
# 229 "laplacian.cpp"
}
}
#endif // __CUDACC__
# 229 "laplacian.cpp"
#ifndef __CUDACC__
void hmpp_acc_region_main_230_internal_1(hmpprt::s32 width_27, hmpprt::s32 halfWidth_9, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> DownsampleDst_9, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> UpsampleDst_10)
{
# 229 "laplacian.cpp"
if (1)
{
hmpprt::CUDAGridCall __hmppcg_call;
__hmppcg_call.setSizeX(192);
__hmppcg_call.setSizeY(1);
__hmppcg_call.setBlockSizeX(1);
__hmppcg_call.setBlockSizeY(256);
__hmppcg_call.addLocalParameter(&DownsampleDst_9, 8, "DownsampleDst_1");
__hmppcg_call.addLocalParameter(&UpsampleDst_10, 8, "UpsampleDst_2");
__hmppcg_call.addLocalParameter((hmpprt::s32) (halfWidth_9), "halfWidth_1");
__hmppcg_call.launch(hmpp_acc_region_main_230_parallel_region_1, hmpprt::Context::getInstance()->getCUDADevice());
}
;
}
#endif // __CUDACC__
# 229 "laplacian.cpp"
#ifndef __CUDACC__
extern "C" CDLT_API void hmpp_acc_region_main_230(hmpprt::s32 width_4, hmpprt::s32 halfWidth_5, hmpprt::u08* DownsampleDst_5, hmpprt::u08* UpsampleDst_6)
{
# 243 "laplacian.cpp"
(hmpp_acc_region_main_230_internal_1(width_4, halfWidth_5, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> (DownsampleDst_5), hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> (UpsampleDst_6)));
}
#endif // __CUDACC__
# 243 "laplacian.cpp"
#ifdef __CUDACC__
extern "C" __global__ void hmpp_acc_region_main_247_parallel_region_1(hmpprt::s16* LaplacianLayer, hmpprt::u08* Source, hmpprt::u08* UpsampleDst_1, hmpprt::s32 height_3, hmpprt::s32 width_3)
{
# 245 "laplacian.cpp"
{
# 250 "laplacian.cpp"
hmpprt::s32 iter_per_gang_3;
# 250 "laplacian.cpp"
hmpprt::s32 first_gang_iter_3;
# 250 "laplacian.cpp"
hmpprt::s32 last_gang_iter_3;
# 250 "laplacian.cpp"
iter_per_gang_3 = ((1 + (height_3 * width_3 - 1) / 192) > 256 ? (1 + (height_3 * width_3 - 1) / 192) : 256);
# 250 "laplacian.cpp"
first_gang_iter_3 = (hmpprt::gr_gbidx()) * iter_per_gang_3;
# 250 "laplacian.cpp"
last_gang_iter_3 = ((first_gang_iter_3 + iter_per_gang_3 - 1) < (height_3 * width_3 - 1) ? (first_gang_iter_3 + iter_per_gang_3 - 1) : (height_3 * width_3 - 1));
# 250 "laplacian.cpp"
hmpprt::s32 y_6;
# 250 "laplacian.cpp"
# 254 "laplacian.cpp"
for (y_6 = first_gang_iter_3 + (hmpprt::gr_btidy()) ; y_6 <= last_gang_iter_3 ; y_6 = y_6 + (hmpprt::gr_btnumy()))
{
# 250 "laplacian.cpp"
hmpprt::s32 y_5;
# 253 "laplacian.cpp"
hmpprt::s32 x_9;
# 255 "laplacian.cpp"
x_9 = y_6 % width_3;
# 255 "laplacian.cpp"
y_5 = y_6 / width_3;
# 255 "laplacian.cpp"
*(LaplacianLayer + (y_5 * width_3 + x_9)) = (hmpprt::s16 ) ((hmpprt::s32 ) ((hmpprt::s16 ) (*(Source + (y_5 * width_3 + x_9)))) - (hmpprt::s32 ) ((hmpprt::s16 ) (*(UpsampleDst_1 + (y_5 * width_3 + x_9)))));
}
# 243 "laplacian.cpp"
}
}
#endif // __CUDACC__
# 243 "laplacian.cpp"
#ifndef __CUDACC__
void hmpp_acc_region_main_247_internal_1(hmpprt::s32 height_15, hmpprt::s32 width_22, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> Source_4, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::s16> LaplacianLayer_2, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> UpsampleDst_9)
{
# 243 "laplacian.cpp"
if (1)
{
hmpprt::CUDAGridCall __hmppcg_call;
__hmppcg_call.setSizeX(192);
__hmppcg_call.setSizeY(1);
__hmppcg_call.setBlockSizeX(1);
__hmppcg_call.setBlockSizeY(256);
__hmppcg_call.addLocalParameter(&LaplacianLayer_2, 8, "LaplacianLayer");
__hmppcg_call.addLocalParameter(&Source_4, 8, "Source");
__hmppcg_call.addLocalParameter(&UpsampleDst_9, 8, "UpsampleDst_1");
__hmppcg_call.addLocalParameter((hmpprt::s32) (height_15), "height_3");
__hmppcg_call.addLocalParameter((hmpprt::s32) (width_22), "width_3");
__hmppcg_call.launch(hmpp_acc_region_main_247_parallel_region_1, hmpprt::Context::getInstance()->getCUDADevice());
}
;
}
#endif // __CUDACC__
# 243 "laplacian.cpp"
#ifndef __CUDACC__
extern "C" CDLT_API void hmpp_acc_region_main_247(hmpprt::s32 height_10, hmpprt::s32 width_13, hmpprt::u08* Source_2, hmpprt::s16* LaplacianLayer_1, hmpprt::u08* UpsampleDst_5)
{
# 12 "laplacian.cpp"
(hmpp_acc_region_main_247_internal_1(height_10, width_13, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> (Source_2), hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::s16> (LaplacianLayer_1), hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> (UpsampleDst_5)));
}
#endif // __CUDACC__
# 12 "laplacian.cpp"
#ifdef __CUDACC__
extern "C" __global__ void hmpp_acc_region_main_55_parallel_region_1(hmpprt::u08* Source_1, hmpprt::s32 height_2, hmpprt::s32* pSrc_padding, hmpprt::s32 width_2)
{
# 14 "laplacian.cpp"
{
# 58 "laplacian.cpp"
hmpprt::s32 iter_per_gang_2;
# 58 "laplacian.cpp"
hmpprt::s32 first_gang_iter_2;
# 58 "laplacian.cpp"
hmpprt::s32 last_gang_iter_2;
# 58 "laplacian.cpp"
iter_per_gang_2 = 1 + (height_2 - 1) / 192;
# 58 "laplacian.cpp"
first_gang_iter_2 = (hmpprt::gr_gbidx()) * iter_per_gang_2;
# 58 "laplacian.cpp"
last_gang_iter_2 = ((first_gang_iter_2 + iter_per_gang_2 - 1) < (height_2 - 1) ? (first_gang_iter_2 + iter_per_gang_2 - 1) : (height_2 - 1));
# 58 "laplacian.cpp"
hmpprt::s32 h_6;
# 58 "laplacian.cpp"
# 59 "laplacian.cpp"
for (h_6 = first_gang_iter_2 ; h_6 <= last_gang_iter_2 ; h_6 = h_6 + 1)
{
# 67 "laplacian.cpp"
hmpprt::s32 tmp_22;
# 67 "laplacian.cpp"
hmpprt::s32 tmp_23;
# 67 "laplacian.cpp"
hmpprt::s32 tmp_24;
# 67 "laplacian.cpp"
hmpprt::s32 tmp_25;
# 67 "laplacian.cpp"
tmp_22 = 2 * (hmpprt::s32 ) (*(Source_1 + (h_6 * width_2 + 1))) - (hmpprt::s32 ) (*(Source_1 + (h_6 * width_2 + 3)));
# 67 "laplacian.cpp"
if ((hmpprt::gr_btidy()) == 0)
{
# 67 "laplacian.cpp"
*(pSrc_padding + ((h_6 + 2) * (width_2 + 4) + 1)) = tmp_22;
}
# 67 "laplacian.cpp"
(hmpprt::gr_barrier());
# 67 "laplacian.cpp"
tmp_23 = 4 * (hmpprt::s32 ) (*(Source_1 + (h_6 * width_2 + 3))) - 4 * (hmpprt::s32 ) (*(Source_1 + (h_6 * width_2 + 1))) + 2 * (hmpprt::s32 ) (*(Source_1 + h_6 * width_2)) - (hmpprt::s32 ) (*(Source_1 + (h_6 * width_2 + 2)));
# 67 "laplacian.cpp"
if ((hmpprt::gr_btidy()) == 0)
{
# 67 "laplacian.cpp"
*(pSrc_padding + (h_6 + 2) * (width_2 + 4)) = tmp_23;
}
# 67 "laplacian.cpp"
(hmpprt::gr_barrier());
# 67 "laplacian.cpp"
tmp_24 = 2 * (hmpprt::s32 ) (*(Source_1 + (h_6 * width_2 + width_2 - 2))) - (hmpprt::s32 ) (*(Source_1 + (h_6 * width_2 + width_2 - 4)));
# 67 "laplacian.cpp"
if ((hmpprt::gr_btidy()) == 0)
{
# 67 "laplacian.cpp"
*(pSrc_padding + ((h_6 + 2) * (width_2 + 4) + width_2 + 2)) = tmp_24;
}
# 67 "laplacian.cpp"
(hmpprt::gr_barrier());
# 67 "laplacian.cpp"
tmp_25 = 4 * (hmpprt::s32 ) (*(Source_1 + (h_6 * width_2 + width_2 - 4))) - 4 * (hmpprt::s32 ) (*(Source_1 + (h_6 * width_2 + width_2 - 2))) + 2 * (hmpprt::s32 ) (*(Source_1 + (h_6 * width_2 + width_2 - 1))) - (hmpprt::s32 ) (*(Source_1 + (h_6 * width_2 + width_2 - 3)));
# 67 "laplacian.cpp"
if ((hmpprt::gr_btidy()) == 0)
{
# 67 "laplacian.cpp"
*(pSrc_padding + ((h_6 + 2) * (width_2 + 4) + width_2 + 3)) = tmp_25;
}
# 67 "laplacian.cpp"
(hmpprt::gr_barrier());
# 67 "laplacian.cpp"
hmpprt::s32 end_10;
# 67 "laplacian.cpp"
hmpprt::s32 w_8;
# 67 "laplacian.cpp"
# 68 "laplacian.cpp"
# 68 "laplacian.cpp"
for (w_8 = (hmpprt::gr_btidy()), end_10 = width_2 - 1 ; w_8 <= end_10 ; w_8 = w_8 + (hmpprt::gr_btnumy()))
{
# 68 "laplacian.cpp"
*(pSrc_padding + ((h_6 + 2) * (width_2 + 4) + w_8 + 2)) = (hmpprt::s32 ) (*(Source_1 + (h_6 * width_2 + w_8)));
}
# 12 "laplacian.cpp"
}
# 12 "laplacian.cpp"
}
}
#endif // __CUDACC__
# 12 "laplacian.cpp"
#ifndef __CUDACC__
void hmpp_acc_region_main_55_internal_1(hmpprt::s32 height_20, hmpprt::s32 width_21, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> Source_5, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::s32> pSrc_padding_4)
{
# 12 "laplacian.cpp"
if (1)
{
hmpprt::CUDAGridCall __hmppcg_call;
__hmppcg_call.setSizeX(192);
__hmppcg_call.setSizeY(1);
__hmppcg_call.setBlockSizeX(1);
__hmppcg_call.setBlockSizeY(256);
__hmppcg_call.addLocalParameter(&Source_5, 8, "Source_1");
__hmppcg_call.addLocalParameter((hmpprt::s32) (height_20), "height_2");
__hmppcg_call.addLocalParameter(&pSrc_padding_4, 8, "pSrc_padding");
__hmppcg_call.addLocalParameter((hmpprt::s32) (width_21), "width_2");
__hmppcg_call.launch(hmpp_acc_region_main_55_parallel_region_1, hmpprt::Context::getInstance()->getCUDADevice());
}
;
}
#endif // __CUDACC__
# 12 "laplacian.cpp"
#ifndef __CUDACC__
extern "C" CDLT_API void hmpp_acc_region_main_55(hmpprt::s32 height_9, hmpprt::s32 width_12, hmpprt::u08* Source_3, hmpprt::s32* pSrc_padding_2)
{
# 75 "laplacian.cpp"
(hmpp_acc_region_main_55_internal_1(height_9, width_12, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> (Source_3), hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::s32> (pSrc_padding_2)));
}
#endif // __CUDACC__
# 75 "laplacian.cpp"
#ifdef __CUDACC__
extern "C" __global__ void hmpp_acc_region_main_79_parallel_region_1(hmpprt::s32 height_1, hmpprt::u08* pBufL_cp_1, hmpprt::s32* pSrc_padding_1, hmpprt::s32 width_1)
{
# 77 "laplacian.cpp"
{
# 82 "laplacian.cpp"
hmpprt::s32 iter_per_gang_1;
# 82 "laplacian.cpp"
hmpprt::s32 first_gang_iter_1;
# 82 "laplacian.cpp"
hmpprt::s32 last_gang_iter_1;
# 82 "laplacian.cpp"
iter_per_gang_1 = ((1 + (height_1 * width_1 - 1) / 192) > 256 ? (1 + (height_1 * width_1 - 1) / 192) : 256);
# 82 "laplacian.cpp"
first_gang_iter_1 = (hmpprt::gr_gbidx()) * iter_per_gang_1;
# 82 "laplacian.cpp"
last_gang_iter_1 = ((first_gang_iter_1 + iter_per_gang_1 - 1) < (height_1 * width_1 - 1) ? (first_gang_iter_1 + iter_per_gang_1 - 1) : (height_1 * width_1 - 1));
# 82 "laplacian.cpp"
hmpprt::s32 h_8;
# 82 "laplacian.cpp"
# 85 "laplacian.cpp"
for (h_8 = first_gang_iter_1 + (hmpprt::gr_btidy()) ; h_8 <= last_gang_iter_1 ; h_8 = h_8 + (hmpprt::gr_btnumy()))
{
# 82 "laplacian.cpp"
hmpprt::s32 h_7;
# 84 "laplacian.cpp"
hmpprt::s32 w_9;
# 86 "laplacian.cpp"
w_9 = h_8 % width_1;
# 86 "laplacian.cpp"
h_7 = h_8 / width_1;
# 86 "laplacian.cpp"
*(pBufL_cp_1 + (h_7 * width_1 + w_9)) = (hmpprt::u08 ) (*(pSrc_padding_1 + ((h_7 + 2) * (width_1 + 4) + w_9)) + (*(pSrc_padding_1 + ((h_7 + 2) * (width_1 + 4) + w_9 + 1)) << 2) + 6 * *(pSrc_padding_1 + ((h_7 + 2) * (width_1 + 4) + (w_9 + 2))) + (*(pSrc_padding_1 + ((h_7 + 2) * (width_1 + 4) + (w_9 + 3))) << 2) + *(pSrc_padding_1 + ((h_7 + 2) * (width_1 + 4) + (w_9 + 4))) + 8 >> 4);
}
# 75 "laplacian.cpp"
}
}
#endif // __CUDACC__
# 75 "laplacian.cpp"
#ifndef __CUDACC__
void hmpp_acc_region_main_79_internal_1(hmpprt::s32 height_19, hmpprt::s32 width_20, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::s32> pSrc_padding_5, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> pBufL_cp_8)
{
# 75 "laplacian.cpp"
if (1)
{
hmpprt::CUDAGridCall __hmppcg_call;
__hmppcg_call.setSizeX(192);
__hmppcg_call.setSizeY(1);
__hmppcg_call.setBlockSizeX(1);
__hmppcg_call.setBlockSizeY(256);
__hmppcg_call.addLocalParameter((hmpprt::s32) (height_19), "height_1");
__hmppcg_call.addLocalParameter(&pBufL_cp_8, 8, "pBufL_cp_1");
__hmppcg_call.addLocalParameter(&pSrc_padding_5, 8, "pSrc_padding_1");
__hmppcg_call.addLocalParameter((hmpprt::s32) (width_20), "width_1");
__hmppcg_call.launch(hmpp_acc_region_main_79_parallel_region_1, hmpprt::Context::getInstance()->getCUDADevice());
}
;
}
#endif // __CUDACC__
# 75 "laplacian.cpp"
#ifndef __CUDACC__
extern "C" CDLT_API void hmpp_acc_region_main_79(hmpprt::s32 height_8, hmpprt::s32 width_11, hmpprt::s32* pSrc_padding_3, hmpprt::u08* pBufL_cp_4)
{
# 1 "<preprocessor>"
(hmpp_acc_region_main_79_internal_1(height_8, width_11, hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::s32> (pSrc_padding_3), hmpprt::DevicePtr<hmpprt::MS_CUDA_GLOB,hmpprt::u08> (pBufL_cp_4)));
}
#endif // __CUDACC__
#ifndef __CUDACC__
extern "C" const char * hmpprt_cuda_get_gpu_code();
static hmpprt::CUDAModule * hmpprt_module = 0;
static int hmpprt_uses = 0;
extern "C" CDLT_API void * hmpprt_init()
{
try
{
if (hmpprt_uses++ == 0)
{
hmpprt_module = new hmpprt::CUDAModule(hmpprt_cuda_get_gpu_code());
hmpp_acc_region_main_100_parallel_region_1 = new hmpprt::CUDAGrid(hmpprt_module, "hmpp_acc_region_main_100_parallel_region_1");
hmpp_acc_region_main_114_parallel_region_1 = new hmpprt::CUDAGrid(hmpprt_module, "hmpp_acc_region_main_114_parallel_region_1");
hmpp_acc_region_main_136_parallel_region_1 = new hmpprt::CUDAGrid(hmpprt_module, "hmpp_acc_region_main_136_parallel_region_1");
hmpp_acc_region_main_159_parallel_region_1 = new hmpprt::CUDAGrid(hmpprt_module, "hmpp_acc_region_main_159_parallel_region_1");
hmpp_acc_region_main_188_parallel_region_1 = new hmpprt::CUDAGrid(hmpprt_module, "hmpp_acc_region_main_188_parallel_region_1");
hmpp_acc_region_main_202_parallel_region_1 = new hmpprt::CUDAGrid(hmpprt_module, "hmpp_acc_region_main_202_parallel_region_1");
hmpp_acc_region_main_230_parallel_region_1 = new hmpprt::CUDAGrid(hmpprt_module, "hmpp_acc_region_main_230_parallel_region_1");
hmpp_acc_region_main_247_parallel_region_1 = new hmpprt::CUDAGrid(hmpprt_module, "hmpp_acc_region_main_247_parallel_region_1");
hmpp_acc_region_main_55_parallel_region_1 = new hmpprt::CUDAGrid(hmpprt_module, "hmpp_acc_region_main_55_parallel_region_1");
hmpp_acc_region_main_79_parallel_region_1 = new hmpprt::CUDAGrid(hmpprt_module, "hmpp_acc_region_main_79_parallel_region_1");
}
hmpprt::Context::getInstance()->getGrouplet()->setTarget(hmpprt::CUDA);
hmpprt::Context::getInstance()->getGrouplet()->addSignature("hmpp_acc_region_main_100", "prototype hmpp_acc_region_main_100(height: s32, width: s32, pSrc_padding2: ^cudaglob s32, pBufL_cp: ^cudaglob u8)");
hmpprt::Context::getInstance()->getGrouplet()->addSignature("hmpp_acc_region_main_114", "prototype hmpp_acc_region_main_114(height: s32, width: s32, pSrc_padding2: ^cudaglob s32, pBufL_cp: ^cudaglob u8)");
hmpprt::Context::getInstance()->getGrouplet()->addSignature("hmpp_acc_region_main_136", "prototype hmpp_acc_region_main_136(height: s32, width: s32, FilterDst: ^cudaglob u8, pSrc_padding2: ^cudaglob s32)");
hmpprt::Context::getInstance()->getGrouplet()->addSignature("hmpp_acc_region_main_159", "prototype hmpp_acc_region_main_159(width: s32, halfWidth: s32, halfHeight: s32, FilterDst: ^cudaglob u8, DownsampleDst: ^cudaglob u8)");
hmpprt::Context::getInstance()->getGrouplet()->addSignature("hmpp_acc_region_main_188", "prototype hmpp_acc_region_main_188(height: s32, width: s32, halfWidth: s32, halfHeight: s32, DownsampleDst: ^cudaglob u8, UpsampleDst: ^cudaglob u8)");
hmpprt::Context::getInstance()->getGrouplet()->addSignature("hmpp_acc_region_main_202", "prototype hmpp_acc_region_main_202(width: s32, halfWidth: s32, halfHeight: s32, DownsampleDst: ^cudaglob u8, UpsampleDst: ^cudaglob u8)");
hmpprt::Context::getInstance()->getGrouplet()->addSignature("hmpp_acc_region_main_230", "prototype hmpp_acc_region_main_230(width: s32, halfWidth: s32, DownsampleDst: ^cudaglob u8, UpsampleDst: ^cudaglob u8)");
hmpprt::Context::getInstance()->getGrouplet()->addSignature("hmpp_acc_region_main_247", "prototype hmpp_acc_region_main_247(height: s32, width: s32, Source: ^cudaglob u8, LaplacianLayer: ^cudaglob s16, UpsampleDst: ^cudaglob u8)");
hmpprt::Context::getInstance()->getGrouplet()->addSignature("hmpp_acc_region_main_55", "prototype hmpp_acc_region_main_55(height: s32, width: s32, Source: ^cudaglob u8, pSrc_padding: ^cudaglob s32)");
hmpprt::Context::getInstance()->getGrouplet()->addSignature("hmpp_acc_region_main_79", "prototype hmpp_acc_region_main_79(height: s32, width: s32, pSrc_padding: ^cudaglob s32, pBufL_cp: ^cudaglob u8)");
}
catch (hmpperr::Error & e)
{
return e.clone();
}
catch(...)
{
fprintf(stderr,"Unexpected error in hmpprt_init()\n");
abort();
}
return 0;
}
#endif // __CUDACC__
#ifndef __CUDACC__
extern "C" CDLT_API void * hmpprt_fini()
{
try
{
if (--hmpprt_uses == 0)
{
delete hmpp_acc_region_main_100_parallel_region_1;
delete hmpp_acc_region_main_114_parallel_region_1;
delete hmpp_acc_region_main_136_parallel_region_1;
delete hmpp_acc_region_main_159_parallel_region_1;
delete hmpp_acc_region_main_188_parallel_region_1;
delete hmpp_acc_region_main_202_parallel_region_1;
delete hmpp_acc_region_main_230_parallel_region_1;
delete hmpp_acc_region_main_247_parallel_region_1;
delete hmpp_acc_region_main_55_parallel_region_1;
delete hmpp_acc_region_main_79_parallel_region_1;
delete hmpprt_module;
hmpprt_module = 0;
}
}
catch (hmpperr::Error & e)
{
return e.clone();
}
catch(...)
{
fprintf(stderr,"Unexpected error in hmpprt_fini()\n");
abort();
}
return 0;
}
#endif // __CUDACC__
// footer
|
32db84fb77e50ae61cd30cbaf6362a85a759e771.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Triangle/triangle intersection test routine,
* by Tomas Moller, 1997.
* See article "A Fast Triangle-Triangle Intersection Test",
* Journal of Graphics Tools, 2(2), 1997
*
* Updated June 1999: removed the divisions -- a little faster now!
* Updated October 1999: added {} to CROSS and SUB macros
*
* int NoDivTriTriIsect(float V0[3],float V1[3],float V2[3],
* float U0[3],float U1[3],float U2[3])
*
* parameters: vertices of triangle 1: V0,V1,V2
* vertices of triangle 2: U0,U1,U2
* result : returns 1 if the triangles intersect, otherwise 0
*
*/
#include <math.h>
#include <device_launch_parameters.h>
#include "TriangleTriangleIntersect.cuh"
#define FABS(x) (float(fabs(x))) /* implement as is fastest on your machine */
/* if USE_EPSILON_TEST is true then we do a check:
if |dv|<EPSILON then dv=0.0;
else no check is done (which is less robust)
*/
#define USE_EPSILON_TEST TRUE
#define EPSILON 0.000001
/* some macros */
#define CROSS(dest,v1,v2){ \
dest[0]=v1[1]*v2[2]-v1[2]*v2[1]; \
dest[1]=v1[2]*v2[0]-v1[0]*v2[2]; \
dest[2]=v1[0]*v2[1]-v1[1]*v2[0];}
#define DOT(v1,v2) (v1[0]*v2[0]+v1[1]*v2[1]+v1[2]*v2[2])
#define SUB(dest,v1,v2){ \
dest[0]=v1[0]-v2[0]; \
dest[1]=v1[1]-v2[1]; \
dest[2]=v1[2]-v2[2];}
/* sort so that a<=b */
#define SORT(a,b) \
if(a>b) \
{ \
float c; \
c=a; \
a=b; \
b=c; \
}
/* this edge to edge test is based on Franlin Antonio's gem:
"Faster Line Segment Intersection", in Graphics Gems III,
pp. 199-202 */
#define EDGE_EDGE_TEST(V0,U0,U1) \
Bx=U0[i0]-U1[i0]; \
By=U0[i1]-U1[i1]; \
Cx=V0[i0]-U0[i0]; \
Cy=V0[i1]-U0[i1]; \
f=Ay*Bx-Ax*By; \
d=By*Cx-Bx*Cy; \
if((f>0 && d>=0 && d<=f) || (f<0 && d<=0 && d>=f)) \
{ \
e=Ax*Cy-Ay*Cx; \
if(f>0) \
{ \
if(e>=0 && e<=f) return 1; \
} \
else \
{ \
if(e<=0 && e>=f) return 1; \
} \
}
#define EDGE_AGAINST_TRI_EDGES(V0,V1,U0,U1,U2) \
{ \
float Ax,Ay,Bx,By,Cx,Cy,e,d,f; \
Ax=V1[i0]-V0[i0]; \
Ay=V1[i1]-V0[i1]; \
/* test edge U0,U1 against V0,V1 */ \
EDGE_EDGE_TEST(V0,U0,U1); \
/* test edge U1,U2 against V0,V1 */ \
EDGE_EDGE_TEST(V0,U1,U2); \
/* test edge U2,U1 against V0,V1 */ \
EDGE_EDGE_TEST(V0,U2,U0); \
}
#define POINT_IN_TRI(V0,U0,U1,U2) \
{ \
float a,b,c,d0,d1,d2; \
/* is T1 completly inside T2? */ \
/* check if V0 is inside tri(U0,U1,U2) */ \
a=U1[i1]-U0[i1]; \
b=-(U1[i0]-U0[i0]); \
c=-a*U0[i0]-b*U0[i1]; \
d0=a*V0[i0]+b*V0[i1]+c; \
\
a=U2[i1]-U1[i1]; \
b=-(U2[i0]-U1[i0]); \
c=-a*U1[i0]-b*U1[i1]; \
d1=a*V0[i0]+b*V0[i1]+c; \
\
a=U0[i1]-U2[i1]; \
b=-(U0[i0]-U2[i0]); \
c=-a*U2[i0]-b*U2[i1]; \
d2=a*V0[i0]+b*V0[i1]+c; \
if(d0*d1>0.0) \
{ \
if(d0*d2>0.0) return 1; \
} \
}
#define NEWCOMPUTE_INTERVALS(VV0,VV1,VV2,D0,D1,D2,D0D1,D0D2,A,B,C,X0,X1) \
{ \
if(D0D1>0.0f) \
{ \
/* here we know that D0D2<=0.0 */ \
/* that is D0, D1 are on the same side, D2 on the other or on the plane */ \
A=VV2; B=(VV0-VV2)*D2; C=(VV1-VV2)*D2; X0=D2-D0; X1=D2-D1; \
} \
else if(D0D2>0.0f)\
{ \
/* here we know that d0d1<=0.0 */ \
A=VV1; B=(VV0-VV1)*D1; C=(VV2-VV1)*D1; X0=D1-D0; X1=D1-D2; \
} \
else if(D1*D2>0.0f || D0!=0.0f) \
{ \
/* here we know that d0d1<=0.0 or that D0!=0.0 */ \
A=VV0; B=(VV1-VV0)*D0; C=(VV2-VV0)*D0; X0=D0-D1; X1=D0-D2; \
} \
else if(D1!=0.0f) \
{ \
A=VV1; B=(VV0-VV1)*D1; C=(VV2-VV1)*D1; X0=D1-D0; X1=D1-D2; \
} \
else if(D2!=0.0f) \
{ \
A=VV2; B=(VV0-VV2)*D2; C=(VV1-VV2)*D2; X0=D2-D0; X1=D2-D1; \
} \
else \
{ \
/* triangles are coplanar */ \
return coplanar_tri_tri(N1,V0,V1,V2,U0,U1,U2); \
} \
}
__host__ __device__ int NoDivTriTriIsect(float V0[3], float V1[3], float V2[3],
float U0[3], float U1[3], float U2[3])
{
float E1[3], E2[3];
float N1[3], N2[3], d1, d2;
float du0, du1, du2, dv0, dv1, dv2;
float D[3];
float isect1[2], isect2[2];
float du0du1, du0du2, dv0dv1, dv0dv2;
short index;
float vp0, vp1, vp2;
float up0, up1, up2;
float bb, cc, max;
/* compute plane equation of triangle(V0,V1,V2) */
SUB(E1, V1, V0);
SUB(E2, V2, V0);
CROSS(N1, E1, E2);
d1 = -DOT(N1, V0);
/* plane equation 1: N1.X+d1=0 */
/* put U0,U1,U2 into plane equation 1 to compute signed distances to the plane*/
du0 = DOT(N1, U0) + d1;
du1 = DOT(N1, U1) + d1;
du2 = DOT(N1, U2) + d1;
/* coplanarity robustness check */
#if USE_EPSILON_TEST==TRUE
if (FABS(du0) < EPSILON) du0 = 0.0;
if (FABS(du1) < EPSILON) du1 = 0.0;
if (FABS(du2) < EPSILON) du2 = 0.0;
#endif
du0du1 = du0 * du1;
du0du2 = du0 * du2;
if (du0du1 > 0.0f && du0du2 > 0.0f) /* same sign on all of them + not equal 0 ? */
return 0; /* no intersection occurs */
/* compute plane of triangle (U0,U1,U2) */
SUB(E1, U1, U0);
SUB(E2, U2, U0);
CROSS(N2, E1, E2);
d2 = -DOT(N2, U0);
/* plane equation 2: N2.X+d2=0 */
/* put V0,V1,V2 into plane equation 2 */
dv0 = DOT(N2, V0) + d2;
dv1 = DOT(N2, V1) + d2;
dv2 = DOT(N2, V2) + d2;
#if USE_EPSILON_TEST==TRUE
if (FABS(dv0) < EPSILON) dv0 = 0.0;
if (FABS(dv1) < EPSILON) dv1 = 0.0;
if (FABS(dv2) < EPSILON) dv2 = 0.0;
#endif
dv0dv1 = dv0 * dv1;
dv0dv2 = dv0 * dv2;
if (dv0dv1 > 0.0f && dv0dv2 > 0.0f) /* same sign on all of them + not equal 0 ? */
return 0; /* no intersection occurs */
/* compute direction of intersection line */
CROSS(D, N1, N2);
/* compute and index to the largest component of D */
max = (float)FABS(D[0]);
index = 0;
bb = (float)FABS(D[1]);
cc = (float)FABS(D[2]);
if (bb > max) max = bb, index = 1;
if (cc > max) max = cc, index = 2;
/* this is the simplified projection onto L*/
vp0 = V0[index];
vp1 = V1[index];
vp2 = V2[index];
up0 = U0[index];
up1 = U1[index];
up2 = U2[index];
/* compute interval for triangle 1 */
float a, b, c, x0, x1;
NEWCOMPUTE_INTERVALS(vp0, vp1, vp2, dv0, dv1, dv2, dv0dv1, dv0dv2, a, b, c, x0, x1);
/* compute interval for triangle 2 */
float d, e, f, y0, y1;
NEWCOMPUTE_INTERVALS(up0, up1, up2, du0, du1, du2, du0du1, du0du2, d, e, f, y0, y1);
float xx, yy, xxyy, tmp;
xx = x0 * x1;
yy = y0 * y1;
xxyy = xx * yy;
tmp = a * xxyy;
isect1[0] = tmp + b * x1 * yy;
isect1[1] = tmp + c * x0 * yy;
tmp = d * xxyy;
isect2[0] = tmp + e * xx * y1;
isect2[1] = tmp + f * xx * y0;
SORT(isect1[0], isect1[1]);
SORT(isect2[0], isect2[1]);
if (isect1[1] < isect2[0] || isect2[1] < isect1[0]) return 0;
return 1;
}
__host__ __device__ int coplanar_tri_tri(float N[3], float V0[3], float V1[3], float V2[3],
float U0[3], float U1[3], float U2[3])
{
float A[3];
short i0, i1;
/* first project onto an axis-aligned plane, that maximizes the area */
/* of the triangles, compute indices: i0,i1. */
A[0] = FABS(N[0]);
A[1] = FABS(N[1]);
A[2] = FABS(N[2]);
if (A[0] > A[1])
{
if (A[0] > A[2])
{
i0 = 1; /* A[0] is greatest */
i1 = 2;
}
else
{
i0 = 0; /* A[2] is greatest */
i1 = 1;
}
}
else /* A[0]<=A[1] */
{
if (A[2] > A[1])
{
i0 = 0; /* A[2] is greatest */
i1 = 1;
}
else
{
i0 = 0; /* A[1] is greatest */
i1 = 2;
}
}
/* test all edges of triangle 1 against the edges of triangle 2 */
EDGE_AGAINST_TRI_EDGES(V0, V1, U0, U1, U2);
EDGE_AGAINST_TRI_EDGES(V1, V2, U0, U1, U2);
EDGE_AGAINST_TRI_EDGES(V2, V0, U0, U1, U2);
/* finally, test if tri1 is totally contained in tri2 or vice versa */
POINT_IN_TRI(V0, U0, U1, U2);
POINT_IN_TRI(U0, V0, V1, V2);
return 0;
}
__device__ int intersect = false;
__global__ void triangle_triangle_GPU(int3* cudaInsideTriangles, float3* cudaInsideVertices, int3* cudaOutsideTriangles, float3* cudaOutsideVertices, int* cudaIntersectionsPerInsideTriangle, int numberOfInsideTriangles, int numberOfOutsideTriangles) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < numberOfInsideTriangles)
{
float vert1_1[3] = { cudaInsideVertices[cudaInsideTriangles[tid].x].x, cudaInsideVertices[cudaInsideTriangles[tid].x].y, cudaInsideVertices[cudaInsideTriangles[tid].x].z };
float vert1_2[3] = { cudaInsideVertices[cudaInsideTriangles[tid].y].x, cudaInsideVertices[cudaInsideTriangles[tid].y].y, cudaInsideVertices[cudaInsideTriangles[tid].y].z };
float vert1_3[3] = { cudaInsideVertices[cudaInsideTriangles[tid].z].x, cudaInsideVertices[cudaInsideTriangles[tid].z].y, cudaInsideVertices[cudaInsideTriangles[tid].z].z };
int numberOfIntersections = 0;
for (int i = 0; i < numberOfOutsideTriangles; i++)
{
if (!intersect) {
float vert2_1[3] = { cudaOutsideVertices[cudaOutsideTriangles[i].x].x, cudaOutsideVertices[cudaOutsideTriangles[i].x].y, cudaOutsideVertices[cudaOutsideTriangles[i].x].z };
float vert2_2[3] = { cudaOutsideVertices[cudaOutsideTriangles[i].y].x, cudaOutsideVertices[cudaOutsideTriangles[i].y].y, cudaOutsideVertices[cudaOutsideTriangles[i].y].z };
float vert2_3[3] = { cudaOutsideVertices[cudaOutsideTriangles[i].z].x, cudaOutsideVertices[cudaOutsideTriangles[i].z].y, cudaOutsideVertices[cudaOutsideTriangles[i].z].z };
float t, u, v;
if (NoDivTriTriIsect(vert1_1, vert1_2, vert1_3, vert2_1, vert2_2, vert2_3) == 1)
{
numberOfIntersections++;
intersect = true;
//cudaIntersectionsPerInsideTriangle[tid] = 1; // Sneller als je dit weg laat in het geval de meshes elkaar niet sijden ==> dit zorgt er voor dat het trager wordt als de meshes in elkaar liggen
}
if(intersect){ cudaIntersectionsPerInsideTriangle[tid] = 1; } // Sneller als je dit weg laat in het geval de meshes elkaar niet sijden
}
else {
return;
}
}
//printf("numberOfIntersections = %d\n", numberOfIntersections);
cudaIntersectionsPerInsideTriangle[tid] = numberOfIntersections;
}
} | 32db84fb77e50ae61cd30cbaf6362a85a759e771.cu | /* Triangle/triangle intersection test routine,
* by Tomas Moller, 1997.
* See article "A Fast Triangle-Triangle Intersection Test",
* Journal of Graphics Tools, 2(2), 1997
*
* Updated June 1999: removed the divisions -- a little faster now!
* Updated October 1999: added {} to CROSS and SUB macros
*
* int NoDivTriTriIsect(float V0[3],float V1[3],float V2[3],
* float U0[3],float U1[3],float U2[3])
*
* parameters: vertices of triangle 1: V0,V1,V2
* vertices of triangle 2: U0,U1,U2
* result : returns 1 if the triangles intersect, otherwise 0
*
*/
#include <math.h>
#include <device_launch_parameters.h>
#include "TriangleTriangleIntersect.cuh"
#define FABS(x) (float(fabs(x))) /* implement as is fastest on your machine */
/* if USE_EPSILON_TEST is true then we do a check:
if |dv|<EPSILON then dv=0.0;
else no check is done (which is less robust)
*/
#define USE_EPSILON_TEST TRUE
#define EPSILON 0.000001
/* some macros */
#define CROSS(dest,v1,v2){ \
dest[0]=v1[1]*v2[2]-v1[2]*v2[1]; \
dest[1]=v1[2]*v2[0]-v1[0]*v2[2]; \
dest[2]=v1[0]*v2[1]-v1[1]*v2[0];}
#define DOT(v1,v2) (v1[0]*v2[0]+v1[1]*v2[1]+v1[2]*v2[2])
#define SUB(dest,v1,v2){ \
dest[0]=v1[0]-v2[0]; \
dest[1]=v1[1]-v2[1]; \
dest[2]=v1[2]-v2[2];}
/* sort so that a<=b */
#define SORT(a,b) \
if(a>b) \
{ \
float c; \
c=a; \
a=b; \
b=c; \
}
/* this edge to edge test is based on Franlin Antonio's gem:
"Faster Line Segment Intersection", in Graphics Gems III,
pp. 199-202 */
#define EDGE_EDGE_TEST(V0,U0,U1) \
Bx=U0[i0]-U1[i0]; \
By=U0[i1]-U1[i1]; \
Cx=V0[i0]-U0[i0]; \
Cy=V0[i1]-U0[i1]; \
f=Ay*Bx-Ax*By; \
d=By*Cx-Bx*Cy; \
if((f>0 && d>=0 && d<=f) || (f<0 && d<=0 && d>=f)) \
{ \
e=Ax*Cy-Ay*Cx; \
if(f>0) \
{ \
if(e>=0 && e<=f) return 1; \
} \
else \
{ \
if(e<=0 && e>=f) return 1; \
} \
}
#define EDGE_AGAINST_TRI_EDGES(V0,V1,U0,U1,U2) \
{ \
float Ax,Ay,Bx,By,Cx,Cy,e,d,f; \
Ax=V1[i0]-V0[i0]; \
Ay=V1[i1]-V0[i1]; \
/* test edge U0,U1 against V0,V1 */ \
EDGE_EDGE_TEST(V0,U0,U1); \
/* test edge U1,U2 against V0,V1 */ \
EDGE_EDGE_TEST(V0,U1,U2); \
/* test edge U2,U1 against V0,V1 */ \
EDGE_EDGE_TEST(V0,U2,U0); \
}
#define POINT_IN_TRI(V0,U0,U1,U2) \
{ \
float a,b,c,d0,d1,d2; \
/* is T1 completly inside T2? */ \
/* check if V0 is inside tri(U0,U1,U2) */ \
a=U1[i1]-U0[i1]; \
b=-(U1[i0]-U0[i0]); \
c=-a*U0[i0]-b*U0[i1]; \
d0=a*V0[i0]+b*V0[i1]+c; \
\
a=U2[i1]-U1[i1]; \
b=-(U2[i0]-U1[i0]); \
c=-a*U1[i0]-b*U1[i1]; \
d1=a*V0[i0]+b*V0[i1]+c; \
\
a=U0[i1]-U2[i1]; \
b=-(U0[i0]-U2[i0]); \
c=-a*U2[i0]-b*U2[i1]; \
d2=a*V0[i0]+b*V0[i1]+c; \
if(d0*d1>0.0) \
{ \
if(d0*d2>0.0) return 1; \
} \
}
#define NEWCOMPUTE_INTERVALS(VV0,VV1,VV2,D0,D1,D2,D0D1,D0D2,A,B,C,X0,X1) \
{ \
if(D0D1>0.0f) \
{ \
/* here we know that D0D2<=0.0 */ \
/* that is D0, D1 are on the same side, D2 on the other or on the plane */ \
A=VV2; B=(VV0-VV2)*D2; C=(VV1-VV2)*D2; X0=D2-D0; X1=D2-D1; \
} \
else if(D0D2>0.0f)\
{ \
/* here we know that d0d1<=0.0 */ \
A=VV1; B=(VV0-VV1)*D1; C=(VV2-VV1)*D1; X0=D1-D0; X1=D1-D2; \
} \
else if(D1*D2>0.0f || D0!=0.0f) \
{ \
/* here we know that d0d1<=0.0 or that D0!=0.0 */ \
A=VV0; B=(VV1-VV0)*D0; C=(VV2-VV0)*D0; X0=D0-D1; X1=D0-D2; \
} \
else if(D1!=0.0f) \
{ \
A=VV1; B=(VV0-VV1)*D1; C=(VV2-VV1)*D1; X0=D1-D0; X1=D1-D2; \
} \
else if(D2!=0.0f) \
{ \
A=VV2; B=(VV0-VV2)*D2; C=(VV1-VV2)*D2; X0=D2-D0; X1=D2-D1; \
} \
else \
{ \
/* triangles are coplanar */ \
return coplanar_tri_tri(N1,V0,V1,V2,U0,U1,U2); \
} \
}
__host__ __device__ int NoDivTriTriIsect(float V0[3], float V1[3], float V2[3],
float U0[3], float U1[3], float U2[3])
{
float E1[3], E2[3];
float N1[3], N2[3], d1, d2;
float du0, du1, du2, dv0, dv1, dv2;
float D[3];
float isect1[2], isect2[2];
float du0du1, du0du2, dv0dv1, dv0dv2;
short index;
float vp0, vp1, vp2;
float up0, up1, up2;
float bb, cc, max;
/* compute plane equation of triangle(V0,V1,V2) */
SUB(E1, V1, V0);
SUB(E2, V2, V0);
CROSS(N1, E1, E2);
d1 = -DOT(N1, V0);
/* plane equation 1: N1.X+d1=0 */
/* put U0,U1,U2 into plane equation 1 to compute signed distances to the plane*/
du0 = DOT(N1, U0) + d1;
du1 = DOT(N1, U1) + d1;
du2 = DOT(N1, U2) + d1;
/* coplanarity robustness check */
#if USE_EPSILON_TEST==TRUE
if (FABS(du0) < EPSILON) du0 = 0.0;
if (FABS(du1) < EPSILON) du1 = 0.0;
if (FABS(du2) < EPSILON) du2 = 0.0;
#endif
du0du1 = du0 * du1;
du0du2 = du0 * du2;
if (du0du1 > 0.0f && du0du2 > 0.0f) /* same sign on all of them + not equal 0 ? */
return 0; /* no intersection occurs */
/* compute plane of triangle (U0,U1,U2) */
SUB(E1, U1, U0);
SUB(E2, U2, U0);
CROSS(N2, E1, E2);
d2 = -DOT(N2, U0);
/* plane equation 2: N2.X+d2=0 */
/* put V0,V1,V2 into plane equation 2 */
dv0 = DOT(N2, V0) + d2;
dv1 = DOT(N2, V1) + d2;
dv2 = DOT(N2, V2) + d2;
#if USE_EPSILON_TEST==TRUE
if (FABS(dv0) < EPSILON) dv0 = 0.0;
if (FABS(dv1) < EPSILON) dv1 = 0.0;
if (FABS(dv2) < EPSILON) dv2 = 0.0;
#endif
dv0dv1 = dv0 * dv1;
dv0dv2 = dv0 * dv2;
if (dv0dv1 > 0.0f && dv0dv2 > 0.0f) /* same sign on all of them + not equal 0 ? */
return 0; /* no intersection occurs */
/* compute direction of intersection line */
CROSS(D, N1, N2);
/* compute and index to the largest component of D */
max = (float)FABS(D[0]);
index = 0;
bb = (float)FABS(D[1]);
cc = (float)FABS(D[2]);
if (bb > max) max = bb, index = 1;
if (cc > max) max = cc, index = 2;
/* this is the simplified projection onto L*/
vp0 = V0[index];
vp1 = V1[index];
vp2 = V2[index];
up0 = U0[index];
up1 = U1[index];
up2 = U2[index];
/* compute interval for triangle 1 */
float a, b, c, x0, x1;
NEWCOMPUTE_INTERVALS(vp0, vp1, vp2, dv0, dv1, dv2, dv0dv1, dv0dv2, a, b, c, x0, x1);
/* compute interval for triangle 2 */
float d, e, f, y0, y1;
NEWCOMPUTE_INTERVALS(up0, up1, up2, du0, du1, du2, du0du1, du0du2, d, e, f, y0, y1);
float xx, yy, xxyy, tmp;
xx = x0 * x1;
yy = y0 * y1;
xxyy = xx * yy;
tmp = a * xxyy;
isect1[0] = tmp + b * x1 * yy;
isect1[1] = tmp + c * x0 * yy;
tmp = d * xxyy;
isect2[0] = tmp + e * xx * y1;
isect2[1] = tmp + f * xx * y0;
SORT(isect1[0], isect1[1]);
SORT(isect2[0], isect2[1]);
if (isect1[1] < isect2[0] || isect2[1] < isect1[0]) return 0;
return 1;
}
__host__ __device__ int coplanar_tri_tri(float N[3], float V0[3], float V1[3], float V2[3],
float U0[3], float U1[3], float U2[3])
{
float A[3];
short i0, i1;
/* first project onto an axis-aligned plane, that maximizes the area */
/* of the triangles, compute indices: i0,i1. */
A[0] = FABS(N[0]);
A[1] = FABS(N[1]);
A[2] = FABS(N[2]);
if (A[0] > A[1])
{
if (A[0] > A[2])
{
i0 = 1; /* A[0] is greatest */
i1 = 2;
}
else
{
i0 = 0; /* A[2] is greatest */
i1 = 1;
}
}
else /* A[0]<=A[1] */
{
if (A[2] > A[1])
{
i0 = 0; /* A[2] is greatest */
i1 = 1;
}
else
{
i0 = 0; /* A[1] is greatest */
i1 = 2;
}
}
/* test all edges of triangle 1 against the edges of triangle 2 */
EDGE_AGAINST_TRI_EDGES(V0, V1, U0, U1, U2);
EDGE_AGAINST_TRI_EDGES(V1, V2, U0, U1, U2);
EDGE_AGAINST_TRI_EDGES(V2, V0, U0, U1, U2);
/* finally, test if tri1 is totally contained in tri2 or vice versa */
POINT_IN_TRI(V0, U0, U1, U2);
POINT_IN_TRI(U0, V0, V1, V2);
return 0;
}
__device__ int intersect = false;
__global__ void triangle_triangle_GPU(int3* cudaInsideTriangles, float3* cudaInsideVertices, int3* cudaOutsideTriangles, float3* cudaOutsideVertices, int* cudaIntersectionsPerInsideTriangle, int numberOfInsideTriangles, int numberOfOutsideTriangles) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < numberOfInsideTriangles)
{
float vert1_1[3] = { cudaInsideVertices[cudaInsideTriangles[tid].x].x, cudaInsideVertices[cudaInsideTriangles[tid].x].y, cudaInsideVertices[cudaInsideTriangles[tid].x].z };
float vert1_2[3] = { cudaInsideVertices[cudaInsideTriangles[tid].y].x, cudaInsideVertices[cudaInsideTriangles[tid].y].y, cudaInsideVertices[cudaInsideTriangles[tid].y].z };
float vert1_3[3] = { cudaInsideVertices[cudaInsideTriangles[tid].z].x, cudaInsideVertices[cudaInsideTriangles[tid].z].y, cudaInsideVertices[cudaInsideTriangles[tid].z].z };
int numberOfIntersections = 0;
for (int i = 0; i < numberOfOutsideTriangles; i++)
{
if (!intersect) {
float vert2_1[3] = { cudaOutsideVertices[cudaOutsideTriangles[i].x].x, cudaOutsideVertices[cudaOutsideTriangles[i].x].y, cudaOutsideVertices[cudaOutsideTriangles[i].x].z };
float vert2_2[3] = { cudaOutsideVertices[cudaOutsideTriangles[i].y].x, cudaOutsideVertices[cudaOutsideTriangles[i].y].y, cudaOutsideVertices[cudaOutsideTriangles[i].y].z };
float vert2_3[3] = { cudaOutsideVertices[cudaOutsideTriangles[i].z].x, cudaOutsideVertices[cudaOutsideTriangles[i].z].y, cudaOutsideVertices[cudaOutsideTriangles[i].z].z };
float t, u, v;
if (NoDivTriTriIsect(vert1_1, vert1_2, vert1_3, vert2_1, vert2_2, vert2_3) == 1)
{
numberOfIntersections++;
intersect = true;
//cudaIntersectionsPerInsideTriangle[tid] = 1; // Sneller als je dit weg laat in het geval de meshes elkaar niet sijden ==> dit zorgt er voor dat het trager wordt als de meshes in elkaar liggen
}
if(intersect){ cudaIntersectionsPerInsideTriangle[tid] = 1; } // Sneller als je dit weg laat in het geval de meshes elkaar niet sijden
}
else {
return;
}
}
//printf("numberOfIntersections = %d\n", numberOfIntersections);
cudaIntersectionsPerInsideTriangle[tid] = numberOfIntersections;
}
} |
39056f137e8a7b234c58e82ed16a03e6c866d53e.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdio.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
using namespace std;
#define CUDA_CHECK_RETURN(value) {\
hipError_t _m_cudaStat = value;\
if (_m_cudaStat != hipSuccess) {\
fprintf(stderr, "Error %s at line %d in file %s\n", hipGetErrorString(_m_cudaStat), __LINE__, __FILE__);\
exit(1);\
}}
__global__ void transpose(float *A, float *B, int n)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int i = tx + bx * blockDim.x;
int j = ty + by * blockDim.y;
B[j * n + i] = A[i * n + j];
}
void InitMatrix(float *A, float *B, int size)
{
for (int i = 0; i < size; i++)
for (int j = 0; j < size; j++) {
int k = size * i + j;
A[k] = k;
B[k] = 0;
}
}
void printMatrix(float *C, int size)
{
for (int i = 0; i < size * size; i++)
cout << C[i] << "\t";
cout << endl;
}
double wtime()
{
struct timeval t;
gettimeofday(&t, NULL);
return (double)t.tv_sec + (double)t.tv_usec * 1E-6;
}
int main(int argc, char* argv[])
{
if (argc != 4) {
cout << "launch parametrs: [matrix size] [threads_x] [threads_y]" << endl;
return 1;
}
int size = atoi(argv[1]);
int threads_per_block_x = atoi(argv[2]);
int threads_per_block_y = atoi(argv[3]);
srand(time(NULL));
float *A = new float[size * size];
float *B = new float[size * size];
float *dev_A, *dev_B;
hipMalloc((void**)&dev_A, size * size * sizeof(float));
hipMalloc((void**)&dev_B, size * size * sizeof(float));
InitMatrix(A, B, size);
dim3 threads(threads_per_block_x, threads_per_block_y);
dim3 blocks(size / threads.x, size / threads.y);
hipMemcpy(dev_A, A, size * size * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_B, B, size * size * sizeof(float), hipMemcpyHostToDevice);
float elapsedTime;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( transpose) , dim3(blocks), dim3(threads) , 0, 0, dev_A, dev_B, size);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
CUDA_CHECK_RETURN(hipDeviceSynchronize());
CUDA_CHECK_RETURN(hipGetLastError());
hipEventElapsedTime(&elapsedTime, start, stop);
hipMemcpy(B, dev_B, size * size * sizeof(float), hipMemcpyDeviceToHost);
printMatrix(B, size);
cout << "time: " << elapsedTime << " ms" << endl;
delete [] A; delete [] B;
hipEventDestroy(start); hipEventDestroy(stop);
hipFree(dev_A); hipFree(dev_B);
return 0;
}
| 39056f137e8a7b234c58e82ed16a03e6c866d53e.cu | #include <iostream>
#include <stdio.h>
#include <sys/time.h>
#include <cuda.h>
using namespace std;
#define CUDA_CHECK_RETURN(value) {\
cudaError_t _m_cudaStat = value;\
if (_m_cudaStat != cudaSuccess) {\
fprintf(stderr, "Error %s at line %d in file %s\n", cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__);\
exit(1);\
}}
__global__ void transpose(float *A, float *B, int n)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int i = tx + bx * blockDim.x;
int j = ty + by * blockDim.y;
B[j * n + i] = A[i * n + j];
}
void InitMatrix(float *A, float *B, int size)
{
for (int i = 0; i < size; i++)
for (int j = 0; j < size; j++) {
int k = size * i + j;
A[k] = k;
B[k] = 0;
}
}
void printMatrix(float *C, int size)
{
for (int i = 0; i < size * size; i++)
cout << C[i] << "\t";
cout << endl;
}
double wtime()
{
struct timeval t;
gettimeofday(&t, NULL);
return (double)t.tv_sec + (double)t.tv_usec * 1E-6;
}
int main(int argc, char* argv[])
{
if (argc != 4) {
cout << "launch parametrs: [matrix size] [threads_x] [threads_y]" << endl;
return 1;
}
int size = atoi(argv[1]);
int threads_per_block_x = atoi(argv[2]);
int threads_per_block_y = atoi(argv[3]);
srand(time(NULL));
float *A = new float[size * size];
float *B = new float[size * size];
float *dev_A, *dev_B;
cudaMalloc((void**)&dev_A, size * size * sizeof(float));
cudaMalloc((void**)&dev_B, size * size * sizeof(float));
InitMatrix(A, B, size);
dim3 threads(threads_per_block_x, threads_per_block_y);
dim3 blocks(size / threads.x, size / threads.y);
cudaMemcpy(dev_A, A, size * size * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_B, B, size * size * sizeof(float), cudaMemcpyHostToDevice);
float elapsedTime;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
transpose <<< blocks, threads >>> (dev_A, dev_B, size);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
CUDA_CHECK_RETURN(cudaDeviceSynchronize());
CUDA_CHECK_RETURN(cudaGetLastError());
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaMemcpy(B, dev_B, size * size * sizeof(float), cudaMemcpyDeviceToHost);
printMatrix(B, size);
cout << "time: " << elapsedTime << " ms" << endl;
delete [] A; delete [] B;
cudaEventDestroy(start); cudaEventDestroy(stop);
cudaFree(dev_A); cudaFree(dev_B);
return 0;
}
|
876087896ff9e7e79f174de2c00c11db07107bf8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include "constants.h"
//#include "boost/date_time/posix_time/posix_time.hpp"
using namespace std;
using namespace PARAMS;
//Pre: x is defined.
//Post: Converts x from an image array pixel index to
// the real part of the complex graph location that the
// pixel represents.
inline __device__ float pixelXToComplexReal(uint x, int w) {
return(((x / ((float) w)) * SIZE) + START_X - (SIZE / 2.f));
}
//Pre: y is defined.
//Post: Converts y from an image array pixel index to the
// imaginary part of the complex graph location that the
// pixel represents.
// NOTE: The y axis is inverted. (i.e. y = 0 is top of image)
inline __device__ float pixelYToComplexImag(uint y, int h, float vert_size) {
return((-(y/((float) h)) * vert_size) + START_Y + (vert_size / 2.f));
}
//Pre: x and y are defined and are the matrix indices of an image.
// w, h are width and height of image.
// max_depth is the maximum recursive formula depth.
// vert_size is the complex vertical size of the window.
//Post: Computes the pixel value for the Mandelbrot set at
// the given pixel.
inline __device__ uchar getPixelValue(uint x, uint y, int w, int h,
int max_depth, float vert_size) {
float real = pixelXToComplexReal(x, w);
float imag = pixelYToComplexImag(y, h, vert_size);
float init_real = real;
float init_imag = imag;
int i;
for(i = 0; i < max_depth; i++) {
if(ABS(real, imag) > EXCEED_VALUE)
break;
float oldReal = real;
real = MAND_REAL(real, imag, init_real);
imag = MAND_IMAG(oldReal, imag, init_imag);
}
uchar value = (uchar) ((i / ((float)max_depth)) * COLOR_MAX);
return(value);
}
//Pre: image is defined and has length lenImage.
// w, h are width and height of image.
// max_depth is the maximum recursive formula depth.
// vert_size is the complex vertical size of the window.
//Post: Modifies the elements in image to be a grayscale Mandelbrot
// image.
__global__ void mand(uchar * image, int lenImage, int w, int h,
int depth, float vert_size) {
int i = threadIdx.x + (blockDim.x * blockIdx.x);
if(i < lenImage) {
int x = i % w;
int y = i / w;
image[i] = getPixelValue(x, y, w, h, depth, vert_size);
}
}
void printASCIISet(uchar * image) {
int row = 0;
int col = 0;
for(int i = 0; i < height * width; i++) {
if(image[i] > 225)
cout << "O ";
else if(image[i] > 50)
cout << "o ";
else if(image[i] > 5)
cout << ". ";
else
cout << " ";
col++;
if(col == width) {
cout << endl;
row++;
col = 0;
}
//cout << (int) image[row][col] << endl;
}
}
//#define BLOCK_SIZE 16
#define NUM_THREADS 512
#include <fstream>
void writeToFile(uchar * image, char * filename) {
ofstream out(filename);
out << "P6\n";
out << width << ' ' << height << endl;
out << "255" << endl;
// unsigned char curr;
// unsigned int count = 0;
for(int i = 0; i < width * height; i++) {
out << image[i] << image[i] << image[i];
// if(count == 0) {
// curr = image[i];
// count = 1;
// } else if(curr != image[i]) {
// out << count << endl;
// out << (int) curr << endl;
// count = 0;
// } else {
// ++count;
// }
}
// out << 0 << endl;
}
int main(int argc, char ** argv) {
if(argc == 4) {
setParams(atoi(argv[1]), atoi(argv[2]), atoi(argv[3]));
}
//Uncomment to get rid of timing the runtime initialization:
//===
// int * dummy;
// hipMalloc(&dummy, 0);
// hipFree(dummy);
//===
// boost::posix_time::ptime t1(boost::posix_time::microsec_clock::local_time());
int lenImage = height * width;
//Create a greyscale image on HOST:
uchar * image = (uchar*) malloc(sizeof(uchar) * lenImage);
//Create a greyscale image on GPU:
uchar * gpuImage;
hipMalloc(&gpuImage, sizeof(uchar) * lenImage);
//define the number of blocks:
int numBlocks = (lenImage / NUM_THREADS) + 1;
//Activate kernel
// cerr << "blocks: " << numBlocks << endl;
hipLaunchKernelGGL(( mand), dim3(numBlocks), dim3(NUM_THREADS), 0, 0, gpuImage, lenImage, width, height,
max_depth, vert_size);
//Copy mand image back to host
hipMemcpy(image, gpuImage, sizeof(uchar) * lenImage,
hipMemcpyDeviceToHost);
//TEMPORARY CHECK:
//===
//printASCIISet(image);
//===
//WRITE IMAGE:
//===
// cerr << "Saving...................." << endl;
// writeToFile(image, (char*) "mand.ppm");
//===
//Cleanup...
delete[] image;
hipFree(gpuImage);
//Compute time:
// boost::posix_time::ptime t2(boost::posix_time::microsec_clock::local_time());
// boost::posix_time::time_duration duration = t2 - t1;
// long micro = duration.total_microseconds();
// double sec = micro / 1000000.;
// cout << sec << endl;
return(0);
}
| 876087896ff9e7e79f174de2c00c11db07107bf8.cu | #include <iostream>
#include "constants.h"
//#include "boost/date_time/posix_time/posix_time.hpp"
using namespace std;
using namespace PARAMS;
//Pre: x is defined.
//Post: Converts x from an image array pixel index to
// the real part of the complex graph location that the
// pixel represents.
inline __device__ float pixelXToComplexReal(uint x, int w) {
return(((x / ((float) w)) * SIZE) + START_X - (SIZE / 2.f));
}
//Pre: y is defined.
//Post: Converts y from an image array pixel index to the
// imaginary part of the complex graph location that the
// pixel represents.
// NOTE: The y axis is inverted. (i.e. y = 0 is top of image)
inline __device__ float pixelYToComplexImag(uint y, int h, float vert_size) {
return((-(y/((float) h)) * vert_size) + START_Y + (vert_size / 2.f));
}
//Pre: x and y are defined and are the matrix indices of an image.
// w, h are width and height of image.
// max_depth is the maximum recursive formula depth.
// vert_size is the complex vertical size of the window.
//Post: Computes the pixel value for the Mandelbrot set at
// the given pixel.
inline __device__ uchar getPixelValue(uint x, uint y, int w, int h,
int max_depth, float vert_size) {
float real = pixelXToComplexReal(x, w);
float imag = pixelYToComplexImag(y, h, vert_size);
float init_real = real;
float init_imag = imag;
int i;
for(i = 0; i < max_depth; i++) {
if(ABS(real, imag) > EXCEED_VALUE)
break;
float oldReal = real;
real = MAND_REAL(real, imag, init_real);
imag = MAND_IMAG(oldReal, imag, init_imag);
}
uchar value = (uchar) ((i / ((float)max_depth)) * COLOR_MAX);
return(value);
}
//Pre: image is defined and has length lenImage.
// w, h are width and height of image.
// max_depth is the maximum recursive formula depth.
// vert_size is the complex vertical size of the window.
//Post: Modifies the elements in image to be a grayscale Mandelbrot
// image.
__global__ void mand(uchar * image, int lenImage, int w, int h,
int depth, float vert_size) {
int i = threadIdx.x + (blockDim.x * blockIdx.x);
if(i < lenImage) {
int x = i % w;
int y = i / w;
image[i] = getPixelValue(x, y, w, h, depth, vert_size);
}
}
void printASCIISet(uchar * image) {
int row = 0;
int col = 0;
for(int i = 0; i < height * width; i++) {
if(image[i] > 225)
cout << "O ";
else if(image[i] > 50)
cout << "o ";
else if(image[i] > 5)
cout << ". ";
else
cout << " ";
col++;
if(col == width) {
cout << endl;
row++;
col = 0;
}
//cout << (int) image[row][col] << endl;
}
}
//#define BLOCK_SIZE 16
#define NUM_THREADS 512
#include <fstream>
void writeToFile(uchar * image, char * filename) {
ofstream out(filename);
out << "P6\n";
out << width << ' ' << height << endl;
out << "255" << endl;
// unsigned char curr;
// unsigned int count = 0;
for(int i = 0; i < width * height; i++) {
out << image[i] << image[i] << image[i];
// if(count == 0) {
// curr = image[i];
// count = 1;
// } else if(curr != image[i]) {
// out << count << endl;
// out << (int) curr << endl;
// count = 0;
// } else {
// ++count;
// }
}
// out << 0 << endl;
}
int main(int argc, char ** argv) {
if(argc == 4) {
setParams(atoi(argv[1]), atoi(argv[2]), atoi(argv[3]));
}
//Uncomment to get rid of timing the runtime initialization:
//===
// int * dummy;
// cudaMalloc(&dummy, 0);
// cudaFree(dummy);
//===
// boost::posix_time::ptime t1(boost::posix_time::microsec_clock::local_time());
int lenImage = height * width;
//Create a greyscale image on HOST:
uchar * image = (uchar*) malloc(sizeof(uchar) * lenImage);
//Create a greyscale image on GPU:
uchar * gpuImage;
cudaMalloc(&gpuImage, sizeof(uchar) * lenImage);
//define the number of blocks:
int numBlocks = (lenImage / NUM_THREADS) + 1;
//Activate kernel
// cerr << "blocks: " << numBlocks << endl;
mand<<<numBlocks, NUM_THREADS>>> (gpuImage, lenImage, width, height,
max_depth, vert_size);
//Copy mand image back to host
cudaMemcpy(image, gpuImage, sizeof(uchar) * lenImage,
cudaMemcpyDeviceToHost);
//TEMPORARY CHECK:
//===
//printASCIISet(image);
//===
//WRITE IMAGE:
//===
// cerr << "Saving...................." << endl;
// writeToFile(image, (char*) "mand.ppm");
//===
//Cleanup...
delete[] image;
cudaFree(gpuImage);
//Compute time:
// boost::posix_time::ptime t2(boost::posix_time::microsec_clock::local_time());
// boost::posix_time::time_duration duration = t2 - t1;
// long micro = duration.total_microseconds();
// double sec = micro / 1000000.;
// cout << sec << endl;
return(0);
}
|
5021e1a1703c1bdc1a96f159f299bb453e37edab.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2011
@author Azzam Haidar
@author Tingxing Dong
@generated from zgeqr2_kernels.cu normal z -> d, Fri Jan 30 19:00:10 2015
*/
#include "common_magma.h"
#include "batched_kernel_param.h"
static double neg_one = MAGMA_D_NEG_ONE;
static double one = MAGMA_D_ONE;
static double zero = MAGMA_D_ZERO;
__global__ void
dgeqrf_copy_upper_kernel_batched(
int n, int nb,
double **dV_array, int ldv,
double **dR_array, int ldr)
{
double *dV = dV_array[blockIdx.x];
double *dR = dR_array[blockIdx.x];
int tid = threadIdx.x;
int column = (tid / nb + 1) * nb;
if( tid < n && column < n)
{
for(int i=column; i<n; i++)
{
dR[tid + i * ldr] = dV[tid + i * ldv];
}
}
}
void dgeqrf_copy_upper_batched(
magma_int_t n, magma_int_t nb,
double **dV_array, magma_int_t ldv,
double **dR_array, magma_int_t ldr,
magma_int_t batchCount, magma_queue_t queue)
{
/*
copy some data in dV to dR
*/
if( nb >= n) return ;
hipLaunchKernelGGL(( dgeqrf_copy_upper_kernel_batched), dim3(batchCount), dim3(n), 0, queue, n, nb, dV_array, ldv, dR_array, ldr);
}
extern "C" magma_int_t
magma_dlarfb_dgemm_batched(
hipblasHandle_t myhandle,
magma_int_t m, magma_int_t n, magma_int_t k,
double **dV_array, magma_int_t ldv,
double **dT_array, magma_int_t ldt,
double **dA_array, magma_int_t lda,
double **W_array, magma_int_t ldw,
double **W2_array, magma_int_t ldw2,
magma_int_t batchCount, magma_queue_t queue)
{
// W is workspace size of W is nb * n
// W = V^H * A. V is stored in A(i:m, i:ib)
if( m <=0 || n <= 0 || k <=0 ) return 1;
#if 1 // CUBLAS is faster than MAGMABLAS by 17GFLOP/S at size 512 batchCount = 2000
hipblasDgemmBatched(myhandle, HIPBLAS_OP_C, HIPBLAS_OP_N, k, n, m,
&one, (const double**) dV_array, ldv,
(const double**) dA_array, lda,
&zero, W_array, ldw, batchCount );
// W2 = T^H * W
hipblasDgemmBatched(myhandle, HIPBLAS_OP_C, HIPBLAS_OP_N, k, n, k,
&one, (const double**) dT_array, ldt,
(const double**) W_array, ldw,
&zero, W2_array, ldw2, batchCount );
// A = A - V * W2
hipblasDgemmBatched(myhandle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, k,
&neg_one, (const double**) dV_array, ldv,
(const double**) W2_array, ldw2,
&one, dA_array, lda, batchCount );
#else
magmablas_dgemm_batched(MagmaConjTrans, MagmaNoTrans, k, n, m,
one, (const double**) dV_array, ldv,
(const double**) dA_array, lda,
zero, W_array, ldw, batchCount );
// W2 = T^H * W
magmablas_dgemm_batched(MagmaConjTrans, MagmaNoTrans, k, n, k,
one, (const double**) dT_array, ldt,
(const double**) W_array, ldw,
zero, W2_array, ldw2, batchCount );
// A = A - V * W2
magmablas_dgemm_batched(MagmaNoTrans, MagmaNoTrans, m, n, k,
neg_one, (const double**) dV_array, ldv,
(const double**) W2_array, ldw2,
one, dA_array, lda, batchCount );
#endif
return 0;
}
| 5021e1a1703c1bdc1a96f159f299bb453e37edab.cu | /*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2011
@author Azzam Haidar
@author Tingxing Dong
@generated from zgeqr2_kernels.cu normal z -> d, Fri Jan 30 19:00:10 2015
*/
#include "common_magma.h"
#include "batched_kernel_param.h"
static double neg_one = MAGMA_D_NEG_ONE;
static double one = MAGMA_D_ONE;
static double zero = MAGMA_D_ZERO;
__global__ void
dgeqrf_copy_upper_kernel_batched(
int n, int nb,
double **dV_array, int ldv,
double **dR_array, int ldr)
{
double *dV = dV_array[blockIdx.x];
double *dR = dR_array[blockIdx.x];
int tid = threadIdx.x;
int column = (tid / nb + 1) * nb;
if( tid < n && column < n)
{
for(int i=column; i<n; i++)
{
dR[tid + i * ldr] = dV[tid + i * ldv];
}
}
}
void dgeqrf_copy_upper_batched(
magma_int_t n, magma_int_t nb,
double **dV_array, magma_int_t ldv,
double **dR_array, magma_int_t ldr,
magma_int_t batchCount, magma_queue_t queue)
{
/*
copy some data in dV to dR
*/
if( nb >= n) return ;
dgeqrf_copy_upper_kernel_batched<<<batchCount, n, 0, queue>>>(n, nb, dV_array, ldv, dR_array, ldr);
}
extern "C" magma_int_t
magma_dlarfb_dgemm_batched(
cublasHandle_t myhandle,
magma_int_t m, magma_int_t n, magma_int_t k,
double **dV_array, magma_int_t ldv,
double **dT_array, magma_int_t ldt,
double **dA_array, magma_int_t lda,
double **W_array, magma_int_t ldw,
double **W2_array, magma_int_t ldw2,
magma_int_t batchCount, magma_queue_t queue)
{
// W is workspace size of W is nb * n
// W = V^H * A. V is stored in A(i:m, i:ib)
if( m <=0 || n <= 0 || k <=0 ) return 1;
#if 1 // CUBLAS is faster than MAGMABLAS by 17GFLOP/S at size 512 batchCount = 2000
cublasDgemmBatched(myhandle, CUBLAS_OP_C, CUBLAS_OP_N, k, n, m,
&one, (const double**) dV_array, ldv,
(const double**) dA_array, lda,
&zero, W_array, ldw, batchCount );
// W2 = T^H * W
cublasDgemmBatched(myhandle, CUBLAS_OP_C, CUBLAS_OP_N, k, n, k,
&one, (const double**) dT_array, ldt,
(const double**) W_array, ldw,
&zero, W2_array, ldw2, batchCount );
// A = A - V * W2
cublasDgemmBatched(myhandle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k,
&neg_one, (const double**) dV_array, ldv,
(const double**) W2_array, ldw2,
&one, dA_array, lda, batchCount );
#else
magmablas_dgemm_batched(MagmaConjTrans, MagmaNoTrans, k, n, m,
one, (const double**) dV_array, ldv,
(const double**) dA_array, lda,
zero, W_array, ldw, batchCount );
// W2 = T^H * W
magmablas_dgemm_batched(MagmaConjTrans, MagmaNoTrans, k, n, k,
one, (const double**) dT_array, ldt,
(const double**) W_array, ldw,
zero, W2_array, ldw2, batchCount );
// A = A - V * W2
magmablas_dgemm_batched(MagmaNoTrans, MagmaNoTrans, m, n, k,
neg_one, (const double**) dV_array, ldv,
(const double**) W2_array, ldw2,
one, dA_array, lda, batchCount );
#endif
return 0;
}
|
3f1c15083766d8a99b52f0822e1e96be97cc907f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "sp.h"
#ifndef OUT_OF_BOUNDS_LABEL
#define OUT_OF_BOUNDS_LABEL -1
#endif
#ifndef BAD_TOPOLOGY_LABEL
#define BAD_TOPOLOGY_LABEL -2
#endif
#ifndef NUM_OF_CHANNELS
#define NUM_OF_CHANNELS 3
#endif
#ifndef USE_COUNTS
#define USE_COUNTS 1
#endif
#ifndef OUT_OF_BOUNDS_LABEL
#define OUT_OF_BOUNDS_LABEL -1
#endif
/*
* Authors:
* Oren Freifeld, [email protected]
* Yixin Li, Email: [email protected]
*/
__global__ void find_border_pixels( int* seg, bool* border, int nPts, int xdim, int ydim, const int single_border){
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if (idx>=nPts)
return;
border[idx]=0; // init
int x = idx % xdim;
int y = idx / xdim;
int C = seg[idx]; // center
int N,S,E,W; // north, south, east,west
N=S=W=E=OUT_OF_BOUNDS_LABEL; // init
if (y>1){
N = seg[idx-xdim]; // above
}
if (x>1){
W = seg[idx-1]; // left
}
if (y<ydim-1){
S = seg[idx+xdim]; // below
}
if (x<xdim-1){
E = seg[idx+1]; // right
}
// If the nbr is different from the central pixel and is not out-of-bounds,
// then it is a border pixel.
if ((N>=0 && C!=N) || (S>=0 && C!=S) || (E>=0 && C!=E) || (W>=0 && C!=W) ){
if (single_border){
if (N>=0 && C>N) border[idx]=1;
if (S>=0 && C>S) border[idx]=1;
if (E>=0 && C>E) border[idx]=1;
if (W>=0 && C>W) border[idx]=1;
}else{
border[idx]=1;
}
}
return;
}
/*
* Update the superpixel labels for pixels
* that are on the boundary of the superpixels
* and on the (xmod3, ymod3) position of 3*3 block
*/
__global__ void update_seg_subset(
double* img, int* seg, const bool* border,
const int * counts, const double * log_counts,
const double* mu_i, const double* mu_s,
const double* J_i, const double* J_s,
const double* logdet_Sigma_i, const double* logdet_Sigma_s,
const int nPts,
const int xdim, const int ydim,
const int xmod3, const int ymod3,
const int nSuperpixels,
const bool calculate_cov,
const int s_std, const int i_std,
const double prior_weight)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if (idx>=nPts)
return;
if (border[idx]==0) return;
int x = idx % xdim;
if (x % 3 != xmod3) return;
int y = idx / xdim;
if (y % 3 != ymod3) return;
const bool x_greater_than_1 = x>1;
const bool y_greater_than_1 = y>1;
const bool x_smaller_than_xdim_minus_1 = x<xdim-1;
const bool y_smaller_than_ydim_minus_1 = y<ydim-1;
int C = seg[idx]; // center
int N,S,E,W; // north, south, east,west
N=S=W=E=OUT_OF_BOUNDS_LABEL; // init to out-of-bounds
bool nbrs[8];
double* imgC = img + idx * NUM_OF_CHANNELS;
// means
const double* mu_i_N;
const double* mu_i_S;
const double* mu_i_E;
const double* mu_i_W;
const double* mu_s_N;
const double* mu_s_S;
const double* mu_s_E;
const double* mu_s_W;
// Inv Cov
const double* J_i_N;
const double* J_i_S;
const double* J_i_E;
const double* J_i_W;
const double* J_s_N;
const double* J_s_S;
const double* J_s_E;
const double* J_s_W;
bool isNvalid = 0;
bool isSvalid = 0;
bool isEvalid = 0;
bool isWvalid = 0;
// In the implementation below, if the label of the center pixel is
// different from the labels of all its (4-conn) nbrs -- that is, it is
// a single-pixel superpixel -- then we allow it to die off inspite the fact
// that this "changes the connectivity" of this superpixel.
if (x_greater_than_1){
N = seg[idx-xdim]; // the label, above
set_nbrs(idx,xdim,ydim,x_greater_than_1,y_greater_than_1,x_smaller_than_xdim_minus_1,y_smaller_than_ydim_minus_1,seg,nbrs,N);
isNvalid=ischangbale_by_nbrs(nbrs);
if (isNvalid){
mu_i_N = mu_i + N * NUM_OF_CHANNELS;
mu_s_N = mu_s + N * 2;
if (calculate_cov){
J_i_N = J_i + N * NUM_OF_CHANNELS * NUM_OF_CHANNELS;
J_s_N = J_s + N * 4;
}
}
else{
N=BAD_TOPOLOGY_LABEL;
if (N==C) return; // Bug fix, 03/12/2015, Oren Freifeld
}
}
if (y_greater_than_1){
W = seg[idx-1]; // left
set_nbrs(idx,xdim,ydim,x_greater_than_1,y_greater_than_1,x_smaller_than_xdim_minus_1,y_smaller_than_ydim_minus_1,seg,nbrs,W);
isWvalid=ischangbale_by_nbrs(nbrs);
if (isWvalid){
mu_i_W = mu_i + W * NUM_OF_CHANNELS;
mu_s_W = mu_s + W * 2;
if (calculate_cov){
J_i_W = J_i + W * NUM_OF_CHANNELS * NUM_OF_CHANNELS;
J_s_W = J_s + W * 4;
}
}
else{
W=BAD_TOPOLOGY_LABEL;
if (W==C) return; // Bug fix, 03/12/2015, Oren Freifeld
}
}
if (y_smaller_than_ydim_minus_1){
S = seg[idx+xdim]; // below
set_nbrs(idx,xdim,ydim,x_greater_than_1,y_greater_than_1,x_smaller_than_xdim_minus_1,y_smaller_than_ydim_minus_1,seg,nbrs,S);
isSvalid=ischangbale_by_nbrs(nbrs);
if (isSvalid){
mu_i_S = mu_i + S * NUM_OF_CHANNELS;
mu_s_S = mu_s + S * 2;
if (calculate_cov){
J_i_S = J_i + S * NUM_OF_CHANNELS * NUM_OF_CHANNELS;
J_s_S = J_s + S * 4;
}
}
else{
S=BAD_TOPOLOGY_LABEL;
if (S==C) return; // Bug fix, 03/12/2015, Oren Freifeld
}
}
if (x_smaller_than_xdim_minus_1){
E = seg[idx+1]; // right
set_nbrs(idx,xdim,ydim,x_greater_than_1,y_greater_than_1,x_smaller_than_xdim_minus_1,y_smaller_than_ydim_minus_1,seg,nbrs,E);
isEvalid=ischangbale_by_nbrs(nbrs);
if (isEvalid){
mu_i_E = mu_i + E * NUM_OF_CHANNELS;
mu_s_E = mu_s + E * 2;
if (calculate_cov){
J_i_E = J_i + E * NUM_OF_CHANNELS * NUM_OF_CHANNELS;
J_s_E = J_s + E * 4;
}
}
else{
E=BAD_TOPOLOGY_LABEL;
if (E==C) return; // Bug fix, 03/12/2015, Oren Freifeld
}
}
double pt[2];
pt[0]=(double)x;
pt[1]=(double)y;
//---------------
// log-likelihood (ignoring constants)
//---------------
double resN = cal_posterior(isNvalid, N, calculate_cov, imgC, pt, log_counts, prior_weight,
mu_i_N, mu_s_N, J_i_N, J_s_N, logdet_Sigma_i,logdet_Sigma_s, i_std, s_std, false);
double resS = cal_posterior(isSvalid, S, calculate_cov, imgC, pt, log_counts, prior_weight,
mu_i_S, mu_s_S, J_i_S, J_s_S, logdet_Sigma_i,logdet_Sigma_s, i_std, s_std, false);
double resE = cal_posterior(isEvalid, E, calculate_cov, imgC, pt, log_counts, prior_weight,
mu_i_E, mu_s_E, J_i_E, J_s_E, logdet_Sigma_i,logdet_Sigma_s, i_std, s_std , false);
double resW = cal_posterior(isWvalid, W, calculate_cov, imgC, pt, log_counts, prior_weight,
mu_i_W, mu_s_W, J_i_W, J_s_W, logdet_Sigma_i,logdet_Sigma_s, i_std, s_std, false);
bool all_are_valid = (isNvalid || N==OUT_OF_BOUNDS_LABEL) &&
(isSvalid || S==OUT_OF_BOUNDS_LABEL) &&
(isEvalid || E==OUT_OF_BOUNDS_LABEL) &&
(isWvalid || W==OUT_OF_BOUNDS_LABEL);
if (!all_are_valid) return;
//double res_max = -1; // some small negative number (use when using l)
double res_max = log(.000000000000000001); // (use when using ll)
int arg_max = C; // i.e., no change
// In the tests below, the order matters:
// E.g., testing (res_max<resN && isNvalid) is wrong!
// The reason is that if isNvalid, then the test max<resN has no meaning.
// The correct test is thus isNvalid && res_max<resN.
if (isNvalid && res_max<resN ){
res_max=resN;
arg_max=N;
}
if (isSvalid && res_max<resS ){
res_max=resS;
arg_max=S;
}
if (isEvalid && res_max<resE){
res_max=resE;
arg_max=E;
}
if (isWvalid && res_max<resW){
res_max=resW;
arg_max=W;
}
// update seg
seg[idx]=arg_max;
return;
}
| 3f1c15083766d8a99b52f0822e1e96be97cc907f.cu | #include "sp.h"
#ifndef OUT_OF_BOUNDS_LABEL
#define OUT_OF_BOUNDS_LABEL -1
#endif
#ifndef BAD_TOPOLOGY_LABEL
#define BAD_TOPOLOGY_LABEL -2
#endif
#ifndef NUM_OF_CHANNELS
#define NUM_OF_CHANNELS 3
#endif
#ifndef USE_COUNTS
#define USE_COUNTS 1
#endif
#ifndef OUT_OF_BOUNDS_LABEL
#define OUT_OF_BOUNDS_LABEL -1
#endif
/*
* Authors:
* Oren Freifeld, [email protected]
* Yixin Li, Email: [email protected]
*/
__global__ void find_border_pixels( int* seg, bool* border, int nPts, int xdim, int ydim, const int single_border){
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if (idx>=nPts)
return;
border[idx]=0; // init
int x = idx % xdim;
int y = idx / xdim;
int C = seg[idx]; // center
int N,S,E,W; // north, south, east,west
N=S=W=E=OUT_OF_BOUNDS_LABEL; // init
if (y>1){
N = seg[idx-xdim]; // above
}
if (x>1){
W = seg[idx-1]; // left
}
if (y<ydim-1){
S = seg[idx+xdim]; // below
}
if (x<xdim-1){
E = seg[idx+1]; // right
}
// If the nbr is different from the central pixel and is not out-of-bounds,
// then it is a border pixel.
if ((N>=0 && C!=N) || (S>=0 && C!=S) || (E>=0 && C!=E) || (W>=0 && C!=W) ){
if (single_border){
if (N>=0 && C>N) border[idx]=1;
if (S>=0 && C>S) border[idx]=1;
if (E>=0 && C>E) border[idx]=1;
if (W>=0 && C>W) border[idx]=1;
}else{
border[idx]=1;
}
}
return;
}
/*
* Update the superpixel labels for pixels
* that are on the boundary of the superpixels
* and on the (xmod3, ymod3) position of 3*3 block
*/
__global__ void update_seg_subset(
double* img, int* seg, const bool* border,
const int * counts, const double * log_counts,
const double* mu_i, const double* mu_s,
const double* J_i, const double* J_s,
const double* logdet_Sigma_i, const double* logdet_Sigma_s,
const int nPts,
const int xdim, const int ydim,
const int xmod3, const int ymod3,
const int nSuperpixels,
const bool calculate_cov,
const int s_std, const int i_std,
const double prior_weight)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if (idx>=nPts)
return;
if (border[idx]==0) return;
int x = idx % xdim;
if (x % 3 != xmod3) return;
int y = idx / xdim;
if (y % 3 != ymod3) return;
const bool x_greater_than_1 = x>1;
const bool y_greater_than_1 = y>1;
const bool x_smaller_than_xdim_minus_1 = x<xdim-1;
const bool y_smaller_than_ydim_minus_1 = y<ydim-1;
int C = seg[idx]; // center
int N,S,E,W; // north, south, east,west
N=S=W=E=OUT_OF_BOUNDS_LABEL; // init to out-of-bounds
bool nbrs[8];
double* imgC = img + idx * NUM_OF_CHANNELS;
// means
const double* mu_i_N;
const double* mu_i_S;
const double* mu_i_E;
const double* mu_i_W;
const double* mu_s_N;
const double* mu_s_S;
const double* mu_s_E;
const double* mu_s_W;
// Inv Cov
const double* J_i_N;
const double* J_i_S;
const double* J_i_E;
const double* J_i_W;
const double* J_s_N;
const double* J_s_S;
const double* J_s_E;
const double* J_s_W;
bool isNvalid = 0;
bool isSvalid = 0;
bool isEvalid = 0;
bool isWvalid = 0;
// In the implementation below, if the label of the center pixel is
// different from the labels of all its (4-conn) nbrs -- that is, it is
// a single-pixel superpixel -- then we allow it to die off inspite the fact
// that this "changes the connectivity" of this superpixel.
if (x_greater_than_1){
N = seg[idx-xdim]; // the label, above
set_nbrs(idx,xdim,ydim,x_greater_than_1,y_greater_than_1,x_smaller_than_xdim_minus_1,y_smaller_than_ydim_minus_1,seg,nbrs,N);
isNvalid=ischangbale_by_nbrs(nbrs);
if (isNvalid){
mu_i_N = mu_i + N * NUM_OF_CHANNELS;
mu_s_N = mu_s + N * 2;
if (calculate_cov){
J_i_N = J_i + N * NUM_OF_CHANNELS * NUM_OF_CHANNELS;
J_s_N = J_s + N * 4;
}
}
else{
N=BAD_TOPOLOGY_LABEL;
if (N==C) return; // Bug fix, 03/12/2015, Oren Freifeld
}
}
if (y_greater_than_1){
W = seg[idx-1]; // left
set_nbrs(idx,xdim,ydim,x_greater_than_1,y_greater_than_1,x_smaller_than_xdim_minus_1,y_smaller_than_ydim_minus_1,seg,nbrs,W);
isWvalid=ischangbale_by_nbrs(nbrs);
if (isWvalid){
mu_i_W = mu_i + W * NUM_OF_CHANNELS;
mu_s_W = mu_s + W * 2;
if (calculate_cov){
J_i_W = J_i + W * NUM_OF_CHANNELS * NUM_OF_CHANNELS;
J_s_W = J_s + W * 4;
}
}
else{
W=BAD_TOPOLOGY_LABEL;
if (W==C) return; // Bug fix, 03/12/2015, Oren Freifeld
}
}
if (y_smaller_than_ydim_minus_1){
S = seg[idx+xdim]; // below
set_nbrs(idx,xdim,ydim,x_greater_than_1,y_greater_than_1,x_smaller_than_xdim_minus_1,y_smaller_than_ydim_minus_1,seg,nbrs,S);
isSvalid=ischangbale_by_nbrs(nbrs);
if (isSvalid){
mu_i_S = mu_i + S * NUM_OF_CHANNELS;
mu_s_S = mu_s + S * 2;
if (calculate_cov){
J_i_S = J_i + S * NUM_OF_CHANNELS * NUM_OF_CHANNELS;
J_s_S = J_s + S * 4;
}
}
else{
S=BAD_TOPOLOGY_LABEL;
if (S==C) return; // Bug fix, 03/12/2015, Oren Freifeld
}
}
if (x_smaller_than_xdim_minus_1){
E = seg[idx+1]; // right
set_nbrs(idx,xdim,ydim,x_greater_than_1,y_greater_than_1,x_smaller_than_xdim_minus_1,y_smaller_than_ydim_minus_1,seg,nbrs,E);
isEvalid=ischangbale_by_nbrs(nbrs);
if (isEvalid){
mu_i_E = mu_i + E * NUM_OF_CHANNELS;
mu_s_E = mu_s + E * 2;
if (calculate_cov){
J_i_E = J_i + E * NUM_OF_CHANNELS * NUM_OF_CHANNELS;
J_s_E = J_s + E * 4;
}
}
else{
E=BAD_TOPOLOGY_LABEL;
if (E==C) return; // Bug fix, 03/12/2015, Oren Freifeld
}
}
double pt[2];
pt[0]=(double)x;
pt[1]=(double)y;
//---------------
// log-likelihood (ignoring constants)
//---------------
double resN = cal_posterior(isNvalid, N, calculate_cov, imgC, pt, log_counts, prior_weight,
mu_i_N, mu_s_N, J_i_N, J_s_N, logdet_Sigma_i,logdet_Sigma_s, i_std, s_std, false);
double resS = cal_posterior(isSvalid, S, calculate_cov, imgC, pt, log_counts, prior_weight,
mu_i_S, mu_s_S, J_i_S, J_s_S, logdet_Sigma_i,logdet_Sigma_s, i_std, s_std, false);
double resE = cal_posterior(isEvalid, E, calculate_cov, imgC, pt, log_counts, prior_weight,
mu_i_E, mu_s_E, J_i_E, J_s_E, logdet_Sigma_i,logdet_Sigma_s, i_std, s_std , false);
double resW = cal_posterior(isWvalid, W, calculate_cov, imgC, pt, log_counts, prior_weight,
mu_i_W, mu_s_W, J_i_W, J_s_W, logdet_Sigma_i,logdet_Sigma_s, i_std, s_std, false);
bool all_are_valid = (isNvalid || N==OUT_OF_BOUNDS_LABEL) &&
(isSvalid || S==OUT_OF_BOUNDS_LABEL) &&
(isEvalid || E==OUT_OF_BOUNDS_LABEL) &&
(isWvalid || W==OUT_OF_BOUNDS_LABEL);
if (!all_are_valid) return;
//double res_max = -1; // some small negative number (use when using l)
double res_max = log(.000000000000000001); // (use when using ll)
int arg_max = C; // i.e., no change
// In the tests below, the order matters:
// E.g., testing (res_max<resN && isNvalid) is wrong!
// The reason is that if isNvalid, then the test max<resN has no meaning.
// The correct test is thus isNvalid && res_max<resN.
if (isNvalid && res_max<resN ){
res_max=resN;
arg_max=N;
}
if (isSvalid && res_max<resS ){
res_max=resS;
arg_max=S;
}
if (isEvalid && res_max<resE){
res_max=resE;
arg_max=E;
}
if (isWvalid && res_max<resW){
res_max=resW;
arg_max=W;
}
// update seg
seg[idx]=arg_max;
return;
}
|
a297e3fd79142c588fedd926af58f9772ad49c75.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel2_zvel_plus_4_bot [3][2];
static int dims_update_halo_kernel2_zvel_plus_4_bot_h [3][2] = {0};
//user function
__device__
inline void update_halo_kernel2_zvel_plus_4_bot_gpu(ACC<double> &zvel0,
ACC<double> &zvel1,
const int* fields)
{
if(fields[FIELD_ZVEL0] == 1) zvel0(0,0,0) = zvel0(0,4,0);
if(fields[FIELD_ZVEL1] == 1) zvel1(0,0,0) = zvel1(0,4,0);
}
__global__ void ops_update_halo_kernel2_zvel_plus_4_bot(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_zvel_plus_4_bot[0][0] + idx_z * 1*1 * dims_update_halo_kernel2_zvel_plus_4_bot[0][0] * dims_update_halo_kernel2_zvel_plus_4_bot[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_zvel_plus_4_bot[1][0] + idx_z * 1*1 * dims_update_halo_kernel2_zvel_plus_4_bot[1][0] * dims_update_halo_kernel2_zvel_plus_4_bot[1][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_update_halo_kernel2_zvel_plus_4_bot[0][0], dims_update_halo_kernel2_zvel_plus_4_bot[0][1], arg0);
ACC<double> argp1(dims_update_halo_kernel2_zvel_plus_4_bot[1][0], dims_update_halo_kernel2_zvel_plus_4_bot[1][1], arg1);
update_halo_kernel2_zvel_plus_4_bot_gpu(argp0, argp1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_zvel_plus_4_bot(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_zvel_plus_4_bot_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,48)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(48,"update_halo_kernel2_zvel_plus_4_bot");
OPS_kernels[48].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != dims_update_halo_kernel2_zvel_plus_4_bot_h[0][0] || ydim0 != dims_update_halo_kernel2_zvel_plus_4_bot_h[0][1] || xdim1 != dims_update_halo_kernel2_zvel_plus_4_bot_h[1][0] || ydim1 != dims_update_halo_kernel2_zvel_plus_4_bot_h[1][1]) {
dims_update_halo_kernel2_zvel_plus_4_bot_h[0][0] = xdim0;
dims_update_halo_kernel2_zvel_plus_4_bot_h[0][1] = ydim0;
dims_update_halo_kernel2_zvel_plus_4_bot_h[1][0] = xdim1;
dims_update_halo_kernel2_zvel_plus_4_bot_h[1][1] = ydim1;
cutilSafeCall(hipMemcpyToSymbol( dims_update_halo_kernel2_zvel_plus_4_bot, dims_update_halo_kernel2_zvel_plus_4_bot_h, sizeof(dims_update_halo_kernel2_zvel_plus_4_bot)));
}
int *arg2h = (int *)arg2.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[48].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel2_zvel_plus_4_bot), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[48].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[48].mpi_time += t2-t1;
OPS_kernels[48].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[48].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_zvel_plus_4_bot(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 48;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 48;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_zvel_plus_4_bot_execute;
if (OPS_diags > 1) {
ops_timing_realloc(48,"update_halo_kernel2_zvel_plus_4_bot");
}
ops_enqueue_kernel(desc);
}
#endif
| a297e3fd79142c588fedd926af58f9772ad49c75.cu | //
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel2_zvel_plus_4_bot [3][2];
static int dims_update_halo_kernel2_zvel_plus_4_bot_h [3][2] = {0};
//user function
__device__
inline void update_halo_kernel2_zvel_plus_4_bot_gpu(ACC<double> &zvel0,
ACC<double> &zvel1,
const int* fields)
{
if(fields[FIELD_ZVEL0] == 1) zvel0(0,0,0) = zvel0(0,4,0);
if(fields[FIELD_ZVEL1] == 1) zvel1(0,0,0) = zvel1(0,4,0);
}
__global__ void ops_update_halo_kernel2_zvel_plus_4_bot(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_zvel_plus_4_bot[0][0] + idx_z * 1*1 * dims_update_halo_kernel2_zvel_plus_4_bot[0][0] * dims_update_halo_kernel2_zvel_plus_4_bot[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_zvel_plus_4_bot[1][0] + idx_z * 1*1 * dims_update_halo_kernel2_zvel_plus_4_bot[1][0] * dims_update_halo_kernel2_zvel_plus_4_bot[1][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_update_halo_kernel2_zvel_plus_4_bot[0][0], dims_update_halo_kernel2_zvel_plus_4_bot[0][1], arg0);
ACC<double> argp1(dims_update_halo_kernel2_zvel_plus_4_bot[1][0], dims_update_halo_kernel2_zvel_plus_4_bot[1][1], arg1);
update_halo_kernel2_zvel_plus_4_bot_gpu(argp0, argp1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_zvel_plus_4_bot(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_zvel_plus_4_bot_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,48)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(48,"update_halo_kernel2_zvel_plus_4_bot");
OPS_kernels[48].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != dims_update_halo_kernel2_zvel_plus_4_bot_h[0][0] || ydim0 != dims_update_halo_kernel2_zvel_plus_4_bot_h[0][1] || xdim1 != dims_update_halo_kernel2_zvel_plus_4_bot_h[1][0] || ydim1 != dims_update_halo_kernel2_zvel_plus_4_bot_h[1][1]) {
dims_update_halo_kernel2_zvel_plus_4_bot_h[0][0] = xdim0;
dims_update_halo_kernel2_zvel_plus_4_bot_h[0][1] = ydim0;
dims_update_halo_kernel2_zvel_plus_4_bot_h[1][0] = xdim1;
dims_update_halo_kernel2_zvel_plus_4_bot_h[1][1] = ydim1;
cutilSafeCall(cudaMemcpyToSymbol( dims_update_halo_kernel2_zvel_plus_4_bot, dims_update_halo_kernel2_zvel_plus_4_bot_h, sizeof(dims_update_halo_kernel2_zvel_plus_4_bot)));
}
int *arg2h = (int *)arg2.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[48].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_update_halo_kernel2_zvel_plus_4_bot<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[48].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[48].mpi_time += t2-t1;
OPS_kernels[48].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[48].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_zvel_plus_4_bot(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 48;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 48;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_zvel_plus_4_bot_execute;
if (OPS_diags > 1) {
ops_timing_realloc(48,"update_halo_kernel2_zvel_plus_4_bot");
}
ops_enqueue_kernel(desc);
}
#endif
|
3d184391671fc4137e0e53bad91604ed7da4ded9.hip | // !!! This is a file automatically generated by hipify!!!
// generated by gen_cuda_conv_bias_kern_impls.py
#include "../conv_bias_int8_implicit_gemm_cdiv4hwn4_ld_64bit.cuinl"
template void megdnn::cuda::conv_bias_int8::
do_conv_bias_int8_implicit_gemm_cdiv4hwn4_ld_64bit<
PerChannelBiasVisitor,
IConvEpilogue<
Activation<megdnn::param_enumv::ConvBias::NonlineMode::RELU>>>(
const int8_t* d_src, const int8_t* d_filter, PerChannelBiasVisitor bias,
IConvEpilogue<
Activation<megdnn::param_enumv::ConvBias::NonlineMode::RELU>>
epilogue,
const ConvParam& param, float alpha, float beta, hipStream_t stream);
| 3d184391671fc4137e0e53bad91604ed7da4ded9.cu | // generated by gen_cuda_conv_bias_kern_impls.py
#include "../conv_bias_int8_implicit_gemm_cdiv4hwn4_ld_64bit.cuinl"
template void megdnn::cuda::conv_bias_int8::
do_conv_bias_int8_implicit_gemm_cdiv4hwn4_ld_64bit<
PerChannelBiasVisitor,
IConvEpilogue<
Activation<megdnn::param_enumv::ConvBias::NonlineMode::RELU>>>(
const int8_t* d_src, const int8_t* d_filter, PerChannelBiasVisitor bias,
IConvEpilogue<
Activation<megdnn::param_enumv::ConvBias::NonlineMode::RELU>>
epilogue,
const ConvParam& param, float alpha, float beta, cudaStream_t stream);
|
aaedf5e49dd95f701706aa0935d62c129c5c9dcb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/ndarray/ndarray_util.h"
namespace oneflow {
namespace {
template<typename T>
__global__ void BroadcastPReluForwardGpu(const int32_t elem_cnt, const int32_t alpha_size,
const int32_t inner_size, const T* x, const T* alpha,
T* y) {
CUDA_1D_KERNEL_LOOP(i, elem_cnt) {
const T x_i = x[i];
const T alpha_i = alpha[(i / inner_size) % alpha_size];
y[i] = x_i > 0 ? x_i : x_i * alpha_i;
}
}
template<typename T>
__global__ void BroadcastPReluBackwardGpu(const int32_t elem_cnt, const int32_t alpha_size,
const int32_t inner_size, const T* x, const T* alpha,
const T* dy, T* dx, T* alpha_diff) {
CUDA_1D_KERNEL_LOOP(i, elem_cnt) {
const T x_i = x[i];
const T dy_i = dy[i];
const T alpha_i = alpha[(i / inner_size) % alpha_size];
T dx_i = 0;
T alpha_diff_i = 0;
if (x_i > 0) {
dx_i = dy_i;
alpha_diff_i = 0;
} else {
dx_i = dy_i * alpha_i;
alpha_diff_i = dy_i * x_i;
}
dx[i] = dx_i;
alpha_diff[i] = alpha_diff_i;
}
}
template<typename T>
__global__ void ElemwisePReluForwardGpu(const int32_t elem_cnt, const T* x, const T* alpha, T* y) {
CUDA_1D_KERNEL_LOOP(i, elem_cnt) {
const T x_i = x[i];
const T alpha_i = alpha[i];
y[i] = x_i > 0 ? x_i : x_i * alpha_i;
}
}
template<typename T>
__global__ void ElemwisePReluBackwardGpu(const int32_t elem_cnt, const T* x, const T* alpha,
const T* dy, T* dx, T* alpha_diff) {
CUDA_1D_KERNEL_LOOP(i, elem_cnt) {
const T x_i = x[i];
const T dy_i = dy[i];
const T alpha_i = alpha[i];
T dx_i = 0;
T alpha_diff_i = 0;
if (x_i > 0) {
dx_i = dy_i;
alpha_diff_i = 0;
} else {
dx_i = dy_i * alpha_i;
alpha_diff_i = dy_i * x_i;
}
dx[i] = dx_i;
alpha_diff[i] = alpha_diff_i;
}
}
bool IsAlphaShapeContiguous(const ShapeView& alpha_shape, const ShapeView& x_shape) {
if (alpha_shape.elem_cnt() == 1) { return true; }
int64_t begin_idx = -1;
for (int64_t i = 0; i < alpha_shape.NumAxes(); ++i) {
if (alpha_shape.At(i) != 1) {
begin_idx = i;
break;
}
}
CHECK_NE(begin_idx, -1);
int64_t end_idx = -1;
for (int64_t i = alpha_shape.NumAxes(); i > 0; --i) {
if (alpha_shape.At(i - 1) != 1) {
end_idx = i;
break;
}
}
CHECK_NE(end_idx, -1);
if (alpha_shape.elem_cnt() == x_shape.Count(begin_idx + 1, end_idx + 1)) {
return true;
} else {
return false;
}
}
int32_t GetOuterSize(const ShapeView& alpha_shape, const ShapeView& x_shape) {
int32_t outer_size = x_shape.At(0);
for (int32_t i = 0; i < alpha_shape.NumAxes(); ++i) {
if (alpha_shape.At(i) == 1) {
outer_size *= x_shape.At(i + 1);
} else {
break;
}
}
return outer_size;
}
} // namespace
template<typename T>
class GpuPReluKernel final : public user_op::OpKernel {
public:
GpuPReluKernel() = default;
~GpuPReluKernel() = default;
private:
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* x = ctx->Tensor4ArgNameAndIndex("x", 0);
const user_op::Tensor* alpha = ctx->Tensor4ArgNameAndIndex("alpha", 0);
user_op::Tensor* y = ctx->Tensor4ArgNameAndIndex("y", 0);
const int32_t elem_cnt = x->shape().elem_cnt();
if (IsAlphaShapeContiguous(alpha->shape(), x->shape())) {
const int32_t outer_size = GetOuterSize(alpha->shape(), x->shape());
const int32_t alpha_size = alpha->shape().elem_cnt();
const int32_t inner_size = elem_cnt / outer_size / alpha_size;
hipLaunchKernelGGL(( BroadcastPReluForwardGpu<T>), dim3(BlocksNum4ThreadsNum(elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0,
ctx->device_ctx()->cuda_stream(),
elem_cnt, alpha_size, inner_size, x->dptr<T>(), alpha->dptr<T>(), y->mut_dptr<T>());
} else {
user_op::Tensor* broadcasted_alpha = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0);
const Shape& left_extended_shape =
CreateLeftExtendedShape(ShapeView(alpha->shape()), x->shape().NumAxes());
NdarrayUtil<DeviceType::kGPU, T>::BroadcastTo(
ctx->device_ctx(), XpuVarNdarray<T>(x->shape(), broadcasted_alpha->mut_dptr<T>()),
XpuVarNdarray<const T>(left_extended_shape, alpha->dptr<T>()));
hipLaunchKernelGGL(( ElemwisePReluForwardGpu<T>), dim3(BlocksNum4ThreadsNum(elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0,
ctx->device_ctx()->cuda_stream(),
elem_cnt, x->dptr<T>(), broadcasted_alpha->dptr<T>(), y->mut_dptr<T>());
}
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_GPU_PRELU_KERNEL(dtype) \
REGISTER_USER_KERNEL("prelu") \
.SetCreateFn<GpuPReluKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("y", 0) == GetDataType<dtype>::value)) \
.SetInferTmpSizeFn([](user_op::InferContext* ctx) { \
const Shape* in_shape = ctx->Shape4ArgNameAndIndex("x", 0); \
const Shape* alpha_shape = ctx->Shape4ArgNameAndIndex("alpha", 0); \
const int64_t tmp_buffer_size = \
IsAlphaShapeContiguous(*alpha_shape, *in_shape) \
? 0 \
: GetCudaAlignedSize(in_shape->elem_cnt() * sizeof(dtype)); \
return tmp_buffer_size; \
});
REGISTER_GPU_PRELU_KERNEL(float)
REGISTER_GPU_PRELU_KERNEL(double)
template<typename T>
class GpuPReluGradKernel final : public user_op::OpKernel {
public:
GpuPReluGradKernel() = default;
~GpuPReluGradKernel() = default;
private:
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* x = ctx->Tensor4ArgNameAndIndex("x", 0);
const user_op::Tensor* alpha = ctx->Tensor4ArgNameAndIndex("alpha", 0);
const user_op::Tensor* dy = ctx->Tensor4ArgNameAndIndex("dy", 0);
user_op::Tensor* dx = ctx->Tensor4ArgNameAndIndex("dx", 0);
user_op::Tensor* alpha_diff = ctx->Tensor4ArgNameAndIndex("alpha_diff", 0);
user_op::Tensor* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0);
const int32_t elem_cnt = x->shape().elem_cnt();
T* broadcasted_alpha_diff = tmp_buffer->mut_dptr<T>();
T* reduce_sum_tmp_buf = reinterpret_cast<T*>(tmp_buffer->mut_dptr<char>()
+ GetCudaAlignedSize(elem_cnt * sizeof(T)));
const Shape& left_extended_shape =
CreateLeftExtendedShape(ShapeView(alpha->shape()), x->shape().NumAxes());
if (IsAlphaShapeContiguous(alpha->shape(), x->shape())) {
const int32_t outer_size = GetOuterSize(alpha->shape(), x->shape());
const int32_t alpha_size = alpha->shape().elem_cnt();
const int32_t inner_size = elem_cnt / outer_size / alpha_size;
hipLaunchKernelGGL(( BroadcastPReluBackwardGpu<T>), dim3(BlocksNum4ThreadsNum(elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0,
ctx->device_ctx()->cuda_stream(),
elem_cnt, alpha_size, inner_size, x->dptr<T>(), alpha->dptr<T>(), dy->dptr<T>(),
dx->mut_dptr<T>(), broadcasted_alpha_diff);
} else {
T* broadcasted_alpha = reinterpret_cast<T*>(tmp_buffer->mut_dptr<char>()
+ 2 * GetCudaAlignedSize(elem_cnt * sizeof(T)));
NdarrayUtil<DeviceType::kGPU, T>::BroadcastTo(
ctx->device_ctx(), XpuVarNdarray<T>(x->shape(), broadcasted_alpha),
XpuVarNdarray<const T>(left_extended_shape, alpha->dptr<T>()));
hipLaunchKernelGGL(( ElemwisePReluBackwardGpu<T>), dim3(BlocksNum4ThreadsNum(elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0,
ctx->device_ctx()->cuda_stream(),
elem_cnt, x->dptr<T>(), broadcasted_alpha, dy->dptr<T>(), dx->mut_dptr<T>(),
broadcasted_alpha_diff);
}
NdarrayUtil<DeviceType::kGPU, T>::ReduceSum(
ctx->device_ctx(), XpuVarNdarray<T>(left_extended_shape, alpha_diff->mut_dptr<T>()),
XpuVarNdarray<const T>(x->shape(), broadcasted_alpha_diff),
XpuVarNdarray<T>(x->shape(), reduce_sum_tmp_buf));
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_GPU_PRELU_GRAD_KERNEL(dtype) \
REGISTER_USER_KERNEL("prelu_grad") \
.SetCreateFn<GpuPReluGradKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("dx", 0) == GetDataType<dtype>::value)) \
.SetInferTmpSizeFn([](user_op::InferContext* ctx) { \
const Shape* in_shape = ctx->Shape4ArgNameAndIndex("x", 0); \
const Shape* alpha_shape = ctx->Shape4ArgNameAndIndex("alpha", 0); \
const int64_t tmp_buffer_size = \
IsAlphaShapeContiguous(*alpha_shape, *in_shape) \
? 2 * GetCudaAlignedSize(in_shape->elem_cnt() * sizeof(dtype)) \
: 3 * GetCudaAlignedSize(in_shape->elem_cnt() * sizeof(dtype)); \
return tmp_buffer_size; \
});
REGISTER_GPU_PRELU_GRAD_KERNEL(float)
REGISTER_GPU_PRELU_GRAD_KERNEL(double)
} // namespace oneflow
| aaedf5e49dd95f701706aa0935d62c129c5c9dcb.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/ndarray/ndarray_util.h"
namespace oneflow {
namespace {
template<typename T>
__global__ void BroadcastPReluForwardGpu(const int32_t elem_cnt, const int32_t alpha_size,
const int32_t inner_size, const T* x, const T* alpha,
T* y) {
CUDA_1D_KERNEL_LOOP(i, elem_cnt) {
const T x_i = x[i];
const T alpha_i = alpha[(i / inner_size) % alpha_size];
y[i] = x_i > 0 ? x_i : x_i * alpha_i;
}
}
template<typename T>
__global__ void BroadcastPReluBackwardGpu(const int32_t elem_cnt, const int32_t alpha_size,
const int32_t inner_size, const T* x, const T* alpha,
const T* dy, T* dx, T* alpha_diff) {
CUDA_1D_KERNEL_LOOP(i, elem_cnt) {
const T x_i = x[i];
const T dy_i = dy[i];
const T alpha_i = alpha[(i / inner_size) % alpha_size];
T dx_i = 0;
T alpha_diff_i = 0;
if (x_i > 0) {
dx_i = dy_i;
alpha_diff_i = 0;
} else {
dx_i = dy_i * alpha_i;
alpha_diff_i = dy_i * x_i;
}
dx[i] = dx_i;
alpha_diff[i] = alpha_diff_i;
}
}
template<typename T>
__global__ void ElemwisePReluForwardGpu(const int32_t elem_cnt, const T* x, const T* alpha, T* y) {
CUDA_1D_KERNEL_LOOP(i, elem_cnt) {
const T x_i = x[i];
const T alpha_i = alpha[i];
y[i] = x_i > 0 ? x_i : x_i * alpha_i;
}
}
template<typename T>
__global__ void ElemwisePReluBackwardGpu(const int32_t elem_cnt, const T* x, const T* alpha,
const T* dy, T* dx, T* alpha_diff) {
CUDA_1D_KERNEL_LOOP(i, elem_cnt) {
const T x_i = x[i];
const T dy_i = dy[i];
const T alpha_i = alpha[i];
T dx_i = 0;
T alpha_diff_i = 0;
if (x_i > 0) {
dx_i = dy_i;
alpha_diff_i = 0;
} else {
dx_i = dy_i * alpha_i;
alpha_diff_i = dy_i * x_i;
}
dx[i] = dx_i;
alpha_diff[i] = alpha_diff_i;
}
}
bool IsAlphaShapeContiguous(const ShapeView& alpha_shape, const ShapeView& x_shape) {
if (alpha_shape.elem_cnt() == 1) { return true; }
int64_t begin_idx = -1;
for (int64_t i = 0; i < alpha_shape.NumAxes(); ++i) {
if (alpha_shape.At(i) != 1) {
begin_idx = i;
break;
}
}
CHECK_NE(begin_idx, -1);
int64_t end_idx = -1;
for (int64_t i = alpha_shape.NumAxes(); i > 0; --i) {
if (alpha_shape.At(i - 1) != 1) {
end_idx = i;
break;
}
}
CHECK_NE(end_idx, -1);
if (alpha_shape.elem_cnt() == x_shape.Count(begin_idx + 1, end_idx + 1)) {
return true;
} else {
return false;
}
}
int32_t GetOuterSize(const ShapeView& alpha_shape, const ShapeView& x_shape) {
int32_t outer_size = x_shape.At(0);
for (int32_t i = 0; i < alpha_shape.NumAxes(); ++i) {
if (alpha_shape.At(i) == 1) {
outer_size *= x_shape.At(i + 1);
} else {
break;
}
}
return outer_size;
}
} // namespace
template<typename T>
class GpuPReluKernel final : public user_op::OpKernel {
public:
GpuPReluKernel() = default;
~GpuPReluKernel() = default;
private:
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* x = ctx->Tensor4ArgNameAndIndex("x", 0);
const user_op::Tensor* alpha = ctx->Tensor4ArgNameAndIndex("alpha", 0);
user_op::Tensor* y = ctx->Tensor4ArgNameAndIndex("y", 0);
const int32_t elem_cnt = x->shape().elem_cnt();
if (IsAlphaShapeContiguous(alpha->shape(), x->shape())) {
const int32_t outer_size = GetOuterSize(alpha->shape(), x->shape());
const int32_t alpha_size = alpha->shape().elem_cnt();
const int32_t inner_size = elem_cnt / outer_size / alpha_size;
BroadcastPReluForwardGpu<T><<<BlocksNum4ThreadsNum(elem_cnt), kCudaThreadsNumPerBlock, 0,
ctx->device_ctx()->cuda_stream()>>>(
elem_cnt, alpha_size, inner_size, x->dptr<T>(), alpha->dptr<T>(), y->mut_dptr<T>());
} else {
user_op::Tensor* broadcasted_alpha = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0);
const Shape& left_extended_shape =
CreateLeftExtendedShape(ShapeView(alpha->shape()), x->shape().NumAxes());
NdarrayUtil<DeviceType::kGPU, T>::BroadcastTo(
ctx->device_ctx(), XpuVarNdarray<T>(x->shape(), broadcasted_alpha->mut_dptr<T>()),
XpuVarNdarray<const T>(left_extended_shape, alpha->dptr<T>()));
ElemwisePReluForwardGpu<T><<<BlocksNum4ThreadsNum(elem_cnt), kCudaThreadsNumPerBlock, 0,
ctx->device_ctx()->cuda_stream()>>>(
elem_cnt, x->dptr<T>(), broadcasted_alpha->dptr<T>(), y->mut_dptr<T>());
}
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_GPU_PRELU_KERNEL(dtype) \
REGISTER_USER_KERNEL("prelu") \
.SetCreateFn<GpuPReluKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("y", 0) == GetDataType<dtype>::value)) \
.SetInferTmpSizeFn([](user_op::InferContext* ctx) { \
const Shape* in_shape = ctx->Shape4ArgNameAndIndex("x", 0); \
const Shape* alpha_shape = ctx->Shape4ArgNameAndIndex("alpha", 0); \
const int64_t tmp_buffer_size = \
IsAlphaShapeContiguous(*alpha_shape, *in_shape) \
? 0 \
: GetCudaAlignedSize(in_shape->elem_cnt() * sizeof(dtype)); \
return tmp_buffer_size; \
});
REGISTER_GPU_PRELU_KERNEL(float)
REGISTER_GPU_PRELU_KERNEL(double)
template<typename T>
class GpuPReluGradKernel final : public user_op::OpKernel {
public:
GpuPReluGradKernel() = default;
~GpuPReluGradKernel() = default;
private:
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* x = ctx->Tensor4ArgNameAndIndex("x", 0);
const user_op::Tensor* alpha = ctx->Tensor4ArgNameAndIndex("alpha", 0);
const user_op::Tensor* dy = ctx->Tensor4ArgNameAndIndex("dy", 0);
user_op::Tensor* dx = ctx->Tensor4ArgNameAndIndex("dx", 0);
user_op::Tensor* alpha_diff = ctx->Tensor4ArgNameAndIndex("alpha_diff", 0);
user_op::Tensor* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0);
const int32_t elem_cnt = x->shape().elem_cnt();
T* broadcasted_alpha_diff = tmp_buffer->mut_dptr<T>();
T* reduce_sum_tmp_buf = reinterpret_cast<T*>(tmp_buffer->mut_dptr<char>()
+ GetCudaAlignedSize(elem_cnt * sizeof(T)));
const Shape& left_extended_shape =
CreateLeftExtendedShape(ShapeView(alpha->shape()), x->shape().NumAxes());
if (IsAlphaShapeContiguous(alpha->shape(), x->shape())) {
const int32_t outer_size = GetOuterSize(alpha->shape(), x->shape());
const int32_t alpha_size = alpha->shape().elem_cnt();
const int32_t inner_size = elem_cnt / outer_size / alpha_size;
BroadcastPReluBackwardGpu<T><<<BlocksNum4ThreadsNum(elem_cnt), kCudaThreadsNumPerBlock, 0,
ctx->device_ctx()->cuda_stream()>>>(
elem_cnt, alpha_size, inner_size, x->dptr<T>(), alpha->dptr<T>(), dy->dptr<T>(),
dx->mut_dptr<T>(), broadcasted_alpha_diff);
} else {
T* broadcasted_alpha = reinterpret_cast<T*>(tmp_buffer->mut_dptr<char>()
+ 2 * GetCudaAlignedSize(elem_cnt * sizeof(T)));
NdarrayUtil<DeviceType::kGPU, T>::BroadcastTo(
ctx->device_ctx(), XpuVarNdarray<T>(x->shape(), broadcasted_alpha),
XpuVarNdarray<const T>(left_extended_shape, alpha->dptr<T>()));
ElemwisePReluBackwardGpu<T><<<BlocksNum4ThreadsNum(elem_cnt), kCudaThreadsNumPerBlock, 0,
ctx->device_ctx()->cuda_stream()>>>(
elem_cnt, x->dptr<T>(), broadcasted_alpha, dy->dptr<T>(), dx->mut_dptr<T>(),
broadcasted_alpha_diff);
}
NdarrayUtil<DeviceType::kGPU, T>::ReduceSum(
ctx->device_ctx(), XpuVarNdarray<T>(left_extended_shape, alpha_diff->mut_dptr<T>()),
XpuVarNdarray<const T>(x->shape(), broadcasted_alpha_diff),
XpuVarNdarray<T>(x->shape(), reduce_sum_tmp_buf));
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_GPU_PRELU_GRAD_KERNEL(dtype) \
REGISTER_USER_KERNEL("prelu_grad") \
.SetCreateFn<GpuPReluGradKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("dx", 0) == GetDataType<dtype>::value)) \
.SetInferTmpSizeFn([](user_op::InferContext* ctx) { \
const Shape* in_shape = ctx->Shape4ArgNameAndIndex("x", 0); \
const Shape* alpha_shape = ctx->Shape4ArgNameAndIndex("alpha", 0); \
const int64_t tmp_buffer_size = \
IsAlphaShapeContiguous(*alpha_shape, *in_shape) \
? 2 * GetCudaAlignedSize(in_shape->elem_cnt() * sizeof(dtype)) \
: 3 * GetCudaAlignedSize(in_shape->elem_cnt() * sizeof(dtype)); \
return tmp_buffer_size; \
});
REGISTER_GPU_PRELU_GRAD_KERNEL(float)
REGISTER_GPU_PRELU_GRAD_KERNEL(double)
} // namespace oneflow
|
a3501960a0c739ad24cd6fc3bd8f26a9928ce6eb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from sparse-iter/blas/zgedensereimsplit.cu normal z -> s, Tue Feb 9 16:05:45 2016
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
// axpy kernel for matrices stored in the MAGMA format
__global__ void
sgedensereimsplit_kernel(
int num_rows,
int num_cols,
magma_index_t* rowidx,
float * A,
float * ReA,
float * ImA )
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if( row<num_rows ){
for( j=0; j<num_cols; j++ ){
ReA[ j ] = MAGMA_S_MAKE( MAGMA_S_REAL( A[ j ] ), 0.0 );
ImA[ j ] = MAGMA_S_MAKE( MAGMA_S_IMAG( A[ j ] ), 0.0 );
}
}
}
/**
Purpose
-------
This routine takes an input matrix A in DENSE format and located on the GPU
and splits it into two matrixes ReA and ImA containing the real and the
imaginary contributions of A.
The output matrices are allocated within the routine.
Arguments
---------
@param[in]
A magma_s_matrix
input matrix A.
@param[out]
ReA magma_s_matrix*
output matrix contaning real contributions.
@param[out]
ImA magma_s_matrix*
output matrix contaning real contributions.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sblas
********************************************************************/
extern "C"
magma_int_t
magma_sgedensereimsplit(
magma_s_matrix A,
magma_s_matrix *ReA,
magma_s_matrix *ImA,
magma_queue_t queue )
{
magma_smtransfer( A, ReA, Magma_DEV, Magma_DEV, queue );
magma_smtransfer( A, ImA, Magma_DEV, Magma_DEV, queue );
int m = A.num_rows;
int n = A.num_cols;
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( sgedensereimsplit_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, A.row, A.dval, ReA->dval, ImA->dval );
return MAGMA_SUCCESS;
}
| a3501960a0c739ad24cd6fc3bd8f26a9928ce6eb.cu | /*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from sparse-iter/blas/zgedensereimsplit.cu normal z -> s, Tue Feb 9 16:05:45 2016
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
// axpy kernel for matrices stored in the MAGMA format
__global__ void
sgedensereimsplit_kernel(
int num_rows,
int num_cols,
magma_index_t* rowidx,
float * A,
float * ReA,
float * ImA )
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if( row<num_rows ){
for( j=0; j<num_cols; j++ ){
ReA[ j ] = MAGMA_S_MAKE( MAGMA_S_REAL( A[ j ] ), 0.0 );
ImA[ j ] = MAGMA_S_MAKE( MAGMA_S_IMAG( A[ j ] ), 0.0 );
}
}
}
/**
Purpose
-------
This routine takes an input matrix A in DENSE format and located on the GPU
and splits it into two matrixes ReA and ImA containing the real and the
imaginary contributions of A.
The output matrices are allocated within the routine.
Arguments
---------
@param[in]
A magma_s_matrix
input matrix A.
@param[out]
ReA magma_s_matrix*
output matrix contaning real contributions.
@param[out]
ImA magma_s_matrix*
output matrix contaning real contributions.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sblas
********************************************************************/
extern "C"
magma_int_t
magma_sgedensereimsplit(
magma_s_matrix A,
magma_s_matrix *ReA,
magma_s_matrix *ImA,
magma_queue_t queue )
{
magma_smtransfer( A, ReA, Magma_DEV, Magma_DEV, queue );
magma_smtransfer( A, ImA, Magma_DEV, Magma_DEV, queue );
int m = A.num_rows;
int n = A.num_cols;
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
sgedensereimsplit_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( m, n, A.row, A.dval, ReA->dval, ImA->dval );
return MAGMA_SUCCESS;
}
|
268f4de5dc6fda187298a4f3e720724c22697d00.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "hipcub/hipcub.hpp"
#include <array>
#include "kernel.h"
#include "bboxUtils.h"
#include "cub_helper.h"
template <typename T_SCORE>
pluginStatus_t sortScoresPerImage_gpu(
hipStream_t stream,
const int num_images,
const int num_items_per_image,
void* unsorted_scores,
void* unsorted_bbox_indices,
void* sorted_scores,
void* sorted_bbox_indices,
void* workspace)
{
void* d_offsets = workspace;
void* cubWorkspace = nextWorkspacePtr((int8_t*) d_offsets, (num_images + 1) * sizeof(int));
setUniformOffsets(stream, num_images, num_items_per_image, (int*) d_offsets);
const int arrayLen = num_images * num_items_per_image;
size_t temp_storage_bytes = cubSortPairsWorkspaceSize<T_SCORE, int>(arrayLen, num_images);
hipcub::DeviceSegmentedRadixSort::SortPairsDescending(
cubWorkspace, temp_storage_bytes,
(const T_SCORE*) (unsorted_scores), (T_SCORE*) (sorted_scores),
(const int*) (unsorted_bbox_indices), (int*) (sorted_bbox_indices),
arrayLen, num_images,
(const int*) d_offsets, (const int*) d_offsets + 1,
0, sizeof(T_SCORE) * 8,
stream);
CSC(hipGetLastError(), STATUS_FAILURE);
return STATUS_SUCCESS;
}
// sortScoresPerImage LAUNCH CONFIG
typedef pluginStatus_t (*sspiFunc)(hipStream_t,
const int,
const int,
void*,
void*,
void*,
void*,
void*);
struct sspiLaunchConfig
{
DataType t_score;
sspiFunc function;
sspiLaunchConfig(DataType t_score)
: t_score(t_score)
{
}
sspiLaunchConfig(DataType t_score, sspiFunc function)
: t_score(t_score)
, function(function)
{
}
bool operator==(const sspiLaunchConfig& other)
{
return t_score == other.t_score;
}
};
static std::array<sspiLaunchConfig, 1> sspiLCOptions = {
sspiLaunchConfig(DataType::kFLOAT, sortScoresPerImage_gpu<float>)};
pluginStatus_t sortScoresPerImage(
hipStream_t stream,
const int num_images,
const int num_items_per_image,
const DataType DT_SCORE,
void* unsorted_scores,
void* unsorted_bbox_indices,
void* sorted_scores,
void* sorted_bbox_indices,
void* workspace)
{
sspiLaunchConfig lc = sspiLaunchConfig(DT_SCORE);
for (unsigned i = 0; i < sspiLCOptions.size(); ++i)
{
if (lc == sspiLCOptions[i])
{
DEBUG_PRINTF("sortScoresPerImage kernel %d\n", i);
return sspiLCOptions[i].function(stream,
num_images,
num_items_per_image,
unsorted_scores,
unsorted_bbox_indices,
sorted_scores,
sorted_bbox_indices,
workspace);
}
}
return STATUS_BAD_PARAM;
}
size_t sortScoresPerImageWorkspaceSize(
const int num_images,
const int num_items_per_image,
const DataType DT_SCORE)
{
const int arrayLen = num_images * num_items_per_image;
size_t wss[2];
wss[0] = (num_images + 1) * sizeof(int); // offsets
if (DT_SCORE == DataType::kFLOAT)
{
wss[1] = cubSortPairsWorkspaceSize<float, int>(arrayLen, num_images); // cub workspace
}
else
{
printf("SCORE type not supported.\n");
return (size_t) -1;
}
return calculateTotalWorkspaceSize(wss, 2);
}
| 268f4de5dc6fda187298a4f3e720724c22697d00.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cub/cub.cuh"
#include <array>
#include "kernel.h"
#include "bboxUtils.h"
#include "cub_helper.h"
template <typename T_SCORE>
pluginStatus_t sortScoresPerImage_gpu(
cudaStream_t stream,
const int num_images,
const int num_items_per_image,
void* unsorted_scores,
void* unsorted_bbox_indices,
void* sorted_scores,
void* sorted_bbox_indices,
void* workspace)
{
void* d_offsets = workspace;
void* cubWorkspace = nextWorkspacePtr((int8_t*) d_offsets, (num_images + 1) * sizeof(int));
setUniformOffsets(stream, num_images, num_items_per_image, (int*) d_offsets);
const int arrayLen = num_images * num_items_per_image;
size_t temp_storage_bytes = cubSortPairsWorkspaceSize<T_SCORE, int>(arrayLen, num_images);
cub::DeviceSegmentedRadixSort::SortPairsDescending(
cubWorkspace, temp_storage_bytes,
(const T_SCORE*) (unsorted_scores), (T_SCORE*) (sorted_scores),
(const int*) (unsorted_bbox_indices), (int*) (sorted_bbox_indices),
arrayLen, num_images,
(const int*) d_offsets, (const int*) d_offsets + 1,
0, sizeof(T_SCORE) * 8,
stream);
CSC(cudaGetLastError(), STATUS_FAILURE);
return STATUS_SUCCESS;
}
// sortScoresPerImage LAUNCH CONFIG
typedef pluginStatus_t (*sspiFunc)(cudaStream_t,
const int,
const int,
void*,
void*,
void*,
void*,
void*);
struct sspiLaunchConfig
{
DataType t_score;
sspiFunc function;
sspiLaunchConfig(DataType t_score)
: t_score(t_score)
{
}
sspiLaunchConfig(DataType t_score, sspiFunc function)
: t_score(t_score)
, function(function)
{
}
bool operator==(const sspiLaunchConfig& other)
{
return t_score == other.t_score;
}
};
static std::array<sspiLaunchConfig, 1> sspiLCOptions = {
sspiLaunchConfig(DataType::kFLOAT, sortScoresPerImage_gpu<float>)};
pluginStatus_t sortScoresPerImage(
cudaStream_t stream,
const int num_images,
const int num_items_per_image,
const DataType DT_SCORE,
void* unsorted_scores,
void* unsorted_bbox_indices,
void* sorted_scores,
void* sorted_bbox_indices,
void* workspace)
{
sspiLaunchConfig lc = sspiLaunchConfig(DT_SCORE);
for (unsigned i = 0; i < sspiLCOptions.size(); ++i)
{
if (lc == sspiLCOptions[i])
{
DEBUG_PRINTF("sortScoresPerImage kernel %d\n", i);
return sspiLCOptions[i].function(stream,
num_images,
num_items_per_image,
unsorted_scores,
unsorted_bbox_indices,
sorted_scores,
sorted_bbox_indices,
workspace);
}
}
return STATUS_BAD_PARAM;
}
size_t sortScoresPerImageWorkspaceSize(
const int num_images,
const int num_items_per_image,
const DataType DT_SCORE)
{
const int arrayLen = num_images * num_items_per_image;
size_t wss[2];
wss[0] = (num_images + 1) * sizeof(int); // offsets
if (DT_SCORE == DataType::kFLOAT)
{
wss[1] = cubSortPairsWorkspaceSize<float, int>(arrayLen, num_images); // cub workspace
}
else
{
printf("SCORE type not supported.\n");
return (size_t) -1;
}
return calculateTotalWorkspaceSize(wss, 2);
}
|
16e70440cbb61a966e9e332ed9cf2011199916ad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CUDAConvolver.cuh"
#define SIGNAL_CHANNEL 0
__device__ size_t cuda_bounded_index(SignalBuffer_t buffer, size_t channel, size_t index)
{
size_t channel_size = get_channel_buffer_size(buffer, channel);
return index >= channel_size ? index % channel_size : index;
}
size_t bounded_index(size_t max, size_t channel, size_t index)
{
return index >= max ? index % max : index;
}
__global__ void cudaconvolver_kernel_set_size(SignalBuffer_t buf, size_t channel, size_t count)
{
if (get_channel_buffer_size(buf, channel) < count)
set_channel_buffer_size(buf, channel, count);
}
__global__ void cudaconvolver_kernel_output(SignalBuffer_t device_buffer, SignalBuffer_t signal, SignalBuffer_t tmp, size_t channel, size_t temp_index)
{
int k = blockIdx.x * blockDim.x + threadIdx.x;
size_t out_size = get_channel_buffer_size(tmp, channel);
if (k >= out_size)
return;
size_t signal_size = get_channel_buffer_size(signal, SIGNAL_CHANNEL);
hipComplex temp = make_cuComplex(0, 0);
hipComplex signal_sample, input_sample;
size_t index = cuda_bounded_index(tmp, channel, temp_index + k);
hipComplex temp_sample = get_signal_buffer_sample(tmp, channel, index);
for (int i = 0; i < signal_size; i++)
{
signal_sample = get_signal_buffer_sample(signal, SIGNAL_CHANNEL, i);
if (i > k)
input_sample = make_cuComplex(0,0);
else
input_sample = get_signal_buffer_sample(device_buffer, channel, k-i);
temp_sample = cuCaddf(temp_sample, cuCmulf(signal_sample, input_sample));
}
//temp_sample = get_signal_buffer_sample(signal, channel, k);
set_signal_buffer_sample(tmp, channel, index, temp_sample);
}
__global__ void cudaconvolver_kernel_copy(SignalBuffer_t device_buffer, SignalBuffer_t tmp, size_t channel, size_t temp_index)
{
int k = blockIdx.x * blockDim.x + threadIdx.x;
size_t tmp_size = get_channel_buffer_size(tmp, channel);
size_t out_size = get_channel_buffer_size(device_buffer, channel);
if (k >= tmp_size)
return;
size_t index = cuda_bounded_index(tmp, channel, temp_index + k);
hipComplex sample = get_signal_buffer_sample(tmp, channel, index);
set_signal_buffer_sample(device_buffer, channel, k, sample);
if (k < out_size)
set_signal_buffer_sample(tmp, channel, index, make_cuComplex(0,0));
}
CUDAConvolver::CUDAConvolver(AbstractSignalProcessor* previous, BitMask channels_to_process, SignalBuffer_t signal) : CUDASignalProcessor(previous, channels_to_process)
{
this->signal = signal;
}
CUDAConvolver::~CUDAConvolver()
{
delete_signal_buffer(this->signal);
cuda_deallocate_signal_buffer(&(this->tmp));
cuda_deallocate_signal_buffer(&(this->device_signal));
delete[] this->temp_indexes;
delete[] this->samples_remaining;
CUDASignalProcessor::~CUDASignalProcessor();
}
int CUDAConvolver::init(size_t max_buffer_size, size_t channels)
{
hipError_t status;
size_t extra_size = channels * get_max_buffer_size(this->signal) - 1;
status = cuda_allocate_signal_buffer(&(this->tmp), max_buffer_size + extra_size, channels);
if (check_cuda_status(status))
return 0; // check this
status = cuda_clear_signal_buffer_deep(this->tmp);
if (check_cuda_status(status))
return 0; // check this
status = cuda_allocate_signal_buffer(&(this->device_signal), get_max_buffer_size(signal), get_channels(signal));
if (check_cuda_status(status))
return 0; // check this
transfer_buffer_host_to_device(&(this->device_signal), this->signal);
this->temp_indexes = new size_t[channels]{0};
this->samples_remaining = new size_t[channels]{0};
return CUDASignalProcessor::init(max_buffer_size, channels);
}
void CUDAConvolver::exec_kernel(SignalBuffer_t* host_buffer, SignalBuffer_t* device_buffer)
{
LOG("CUDAConvolver kernel start\n");
hipError_t status;
size_t channels = get_channels(*device_buffer);
hipStream_t* streams = new hipStream_t[channels]{ NULL };
for (size_t channel = 0; channel < channels; channel++)
{
if (!has_to_process_channel(channel))
continue;
size_t buffer_size = get_channel_buffer_size(*host_buffer, channel);
size_t signal_size = get_channel_buffer_size(signal, SIGNAL_CHANNEL);
size_t outcount = buffer_size + signal_size - 1;
size_t remaining_samples = this->samples_remaining[channel];
LOG("CUDAConvolver starting stream %lli\n", channel);
status = hipStreamCreate(streams + channel);
check_cuda_status(status, "stream create");
hipStream_t stream = streams[channel];
size_t temp_index = temp_indexes[channel];
size_t bounded_max = get_max_possible_channel_buffer_size(tmp, channel);
cudaconvolver_kernel_set_size << <1, 1, 0, stream >> > (this->tmp, channel, bounded_max);
if ((buffer_size == 0 || signal_size == 0))
{
if (remaining_samples > 0){
size_t max_size = get_max_possible_channel_buffer_size(*host_buffer, channel);
size_t to_read = max_size < remaining_samples ? max_size : remaining_samples;
size_t tmp_size = get_max_buffer_size(tmp);
tmp_size = tmp_size < to_read + temp_index ? tmp_size : to_read+temp_index;
dim3 threadsPerBlock;
dim3 blocks;
get_threads_blocks_count(to_read, threadsPerBlock, blocks);
cudaconvolver_kernel_set_size << <1, 1, 0, stream >> > (*device_buffer, channel, to_read);
cudaconvolver_kernel_copy << <blocks, threadsPerBlock, 0, stream >> > (*device_buffer, this->tmp, channel, temp_index);
this->temp_indexes[channel] = bounded_index(bounded_max, channel, temp_index + to_read);
this->samples_remaining[channel] = remaining_samples - to_read;
}
continue;
}
dim3 threadsPerBlock;
dim3 blocks;
get_threads_blocks_count(outcount, threadsPerBlock, blocks);
cudaconvolver_kernel_output << <blocks, threadsPerBlock, 0, stream >> > (*device_buffer, this->device_signal, this->tmp, channel, temp_index);
cudaconvolver_kernel_copy << <blocks, threadsPerBlock, 0, stream >> > (*device_buffer, this->tmp, channel, temp_index);
LOG("CUDAConvolver started stream %lli\n", channel);
this->temp_indexes[channel] = bounded_index(outcount, channel, temp_index + buffer_size);
this->samples_remaining[channel] = signal_size - 1;
}
for (size_t channel = 0; channel < channels; channel++) {
if (streams[channel] != NULL) {
LOG("CUDAConvolver waiting stream %lli\n", channel);
status = hipStreamSynchronize(streams[channel]);
check_cuda_status(status, "stream sync");
status = hipStreamDestroy(streams[channel]);
check_cuda_status(status, "stream destr");
LOG("CUDAConvolver destroyed stream %lli\n", channel);
}
}
}
CUDAConvolver* create_cuda_convolver_from_file(AbstractSignalProcessor* previous, BitMask mask, std::string filename, size_t conv_size)
{
SF_INFO info;
memset(&info, 0, sizeof(SF_INFO));
SNDFILE* file = sf_open(filename.c_str(), SFM_READ, &info);
if (info.channels != 1) {
std::cout << "only 1 channel convolution kernel allowed" << std::endl;
return NULL;
}
float* real = new float[conv_size];
float* imag = new float[conv_size] {0};
size_t actual_read = sf_read_float(file, real, conv_size);
SignalBuffer_t buffer = create_signal_buffer(conv_size, 1);
signal_buffer_from_floats(buffer, real, imag, actual_read);
sf_close(file);
delete[] real;
delete[] imag;
return new CUDAConvolver(previous, mask, buffer);
} | 16e70440cbb61a966e9e332ed9cf2011199916ad.cu | #include "CUDAConvolver.cuh"
#define SIGNAL_CHANNEL 0
__device__ size_t cuda_bounded_index(SignalBuffer_t buffer, size_t channel, size_t index)
{
size_t channel_size = get_channel_buffer_size(buffer, channel);
return index >= channel_size ? index % channel_size : index;
}
size_t bounded_index(size_t max, size_t channel, size_t index)
{
return index >= max ? index % max : index;
}
__global__ void cudaconvolver_kernel_set_size(SignalBuffer_t buf, size_t channel, size_t count)
{
if (get_channel_buffer_size(buf, channel) < count)
set_channel_buffer_size(buf, channel, count);
}
__global__ void cudaconvolver_kernel_output(SignalBuffer_t device_buffer, SignalBuffer_t signal, SignalBuffer_t tmp, size_t channel, size_t temp_index)
{
int k = blockIdx.x * blockDim.x + threadIdx.x;
size_t out_size = get_channel_buffer_size(tmp, channel);
if (k >= out_size)
return;
size_t signal_size = get_channel_buffer_size(signal, SIGNAL_CHANNEL);
cuComplex temp = make_cuComplex(0, 0);
cuComplex signal_sample, input_sample;
size_t index = cuda_bounded_index(tmp, channel, temp_index + k);
cuComplex temp_sample = get_signal_buffer_sample(tmp, channel, index);
for (int i = 0; i < signal_size; i++)
{
signal_sample = get_signal_buffer_sample(signal, SIGNAL_CHANNEL, i);
if (i > k)
input_sample = make_cuComplex(0,0);
else
input_sample = get_signal_buffer_sample(device_buffer, channel, k-i);
temp_sample = cuCaddf(temp_sample, cuCmulf(signal_sample, input_sample));
}
//temp_sample = get_signal_buffer_sample(signal, channel, k);
set_signal_buffer_sample(tmp, channel, index, temp_sample);
}
__global__ void cudaconvolver_kernel_copy(SignalBuffer_t device_buffer, SignalBuffer_t tmp, size_t channel, size_t temp_index)
{
int k = blockIdx.x * blockDim.x + threadIdx.x;
size_t tmp_size = get_channel_buffer_size(tmp, channel);
size_t out_size = get_channel_buffer_size(device_buffer, channel);
if (k >= tmp_size)
return;
size_t index = cuda_bounded_index(tmp, channel, temp_index + k);
cuComplex sample = get_signal_buffer_sample(tmp, channel, index);
set_signal_buffer_sample(device_buffer, channel, k, sample);
if (k < out_size)
set_signal_buffer_sample(tmp, channel, index, make_cuComplex(0,0));
}
CUDAConvolver::CUDAConvolver(AbstractSignalProcessor* previous, BitMask channels_to_process, SignalBuffer_t signal) : CUDASignalProcessor(previous, channels_to_process)
{
this->signal = signal;
}
CUDAConvolver::~CUDAConvolver()
{
delete_signal_buffer(this->signal);
cuda_deallocate_signal_buffer(&(this->tmp));
cuda_deallocate_signal_buffer(&(this->device_signal));
delete[] this->temp_indexes;
delete[] this->samples_remaining;
CUDASignalProcessor::~CUDASignalProcessor();
}
int CUDAConvolver::init(size_t max_buffer_size, size_t channels)
{
cudaError_t status;
size_t extra_size = channels * get_max_buffer_size(this->signal) - 1;
status = cuda_allocate_signal_buffer(&(this->tmp), max_buffer_size + extra_size, channels);
if (check_cuda_status(status))
return 0; // check this
status = cuda_clear_signal_buffer_deep(this->tmp);
if (check_cuda_status(status))
return 0; // check this
status = cuda_allocate_signal_buffer(&(this->device_signal), get_max_buffer_size(signal), get_channels(signal));
if (check_cuda_status(status))
return 0; // check this
transfer_buffer_host_to_device(&(this->device_signal), this->signal);
this->temp_indexes = new size_t[channels]{0};
this->samples_remaining = new size_t[channels]{0};
return CUDASignalProcessor::init(max_buffer_size, channels);
}
void CUDAConvolver::exec_kernel(SignalBuffer_t* host_buffer, SignalBuffer_t* device_buffer)
{
LOG("CUDAConvolver kernel start\n");
cudaError_t status;
size_t channels = get_channels(*device_buffer);
cudaStream_t* streams = new cudaStream_t[channels]{ NULL };
for (size_t channel = 0; channel < channels; channel++)
{
if (!has_to_process_channel(channel))
continue;
size_t buffer_size = get_channel_buffer_size(*host_buffer, channel);
size_t signal_size = get_channel_buffer_size(signal, SIGNAL_CHANNEL);
size_t outcount = buffer_size + signal_size - 1;
size_t remaining_samples = this->samples_remaining[channel];
LOG("CUDAConvolver starting stream %lli\n", channel);
status = cudaStreamCreate(streams + channel);
check_cuda_status(status, "stream create");
cudaStream_t stream = streams[channel];
size_t temp_index = temp_indexes[channel];
size_t bounded_max = get_max_possible_channel_buffer_size(tmp, channel);
cudaconvolver_kernel_set_size << <1, 1, 0, stream >> > (this->tmp, channel, bounded_max);
if ((buffer_size == 0 || signal_size == 0))
{
if (remaining_samples > 0){
size_t max_size = get_max_possible_channel_buffer_size(*host_buffer, channel);
size_t to_read = max_size < remaining_samples ? max_size : remaining_samples;
size_t tmp_size = get_max_buffer_size(tmp);
tmp_size = tmp_size < to_read + temp_index ? tmp_size : to_read+temp_index;
dim3 threadsPerBlock;
dim3 blocks;
get_threads_blocks_count(to_read, threadsPerBlock, blocks);
cudaconvolver_kernel_set_size << <1, 1, 0, stream >> > (*device_buffer, channel, to_read);
cudaconvolver_kernel_copy << <blocks, threadsPerBlock, 0, stream >> > (*device_buffer, this->tmp, channel, temp_index);
this->temp_indexes[channel] = bounded_index(bounded_max, channel, temp_index + to_read);
this->samples_remaining[channel] = remaining_samples - to_read;
}
continue;
}
dim3 threadsPerBlock;
dim3 blocks;
get_threads_blocks_count(outcount, threadsPerBlock, blocks);
cudaconvolver_kernel_output << <blocks, threadsPerBlock, 0, stream >> > (*device_buffer, this->device_signal, this->tmp, channel, temp_index);
cudaconvolver_kernel_copy << <blocks, threadsPerBlock, 0, stream >> > (*device_buffer, this->tmp, channel, temp_index);
LOG("CUDAConvolver started stream %lli\n", channel);
this->temp_indexes[channel] = bounded_index(outcount, channel, temp_index + buffer_size);
this->samples_remaining[channel] = signal_size - 1;
}
for (size_t channel = 0; channel < channels; channel++) {
if (streams[channel] != NULL) {
LOG("CUDAConvolver waiting stream %lli\n", channel);
status = cudaStreamSynchronize(streams[channel]);
check_cuda_status(status, "stream sync");
status = cudaStreamDestroy(streams[channel]);
check_cuda_status(status, "stream destr");
LOG("CUDAConvolver destroyed stream %lli\n", channel);
}
}
}
CUDAConvolver* create_cuda_convolver_from_file(AbstractSignalProcessor* previous, BitMask mask, std::string filename, size_t conv_size)
{
SF_INFO info;
memset(&info, 0, sizeof(SF_INFO));
SNDFILE* file = sf_open(filename.c_str(), SFM_READ, &info);
if (info.channels != 1) {
std::cout << "only 1 channel convolution kernel allowed" << std::endl;
return NULL;
}
float* real = new float[conv_size];
float* imag = new float[conv_size] {0};
size_t actual_read = sf_read_float(file, real, conv_size);
SignalBuffer_t buffer = create_signal_buffer(conv_size, 1);
signal_buffer_from_floats(buffer, real, imag, actual_read);
sf_close(file);
delete[] real;
delete[] imag;
return new CUDAConvolver(previous, mask, buffer);
} |
e201337833e29c61c0392231491388489bd776d3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <thrust/device_vector.h>
#include "anderson.h"
AN2 :: ~AN2 () {
delete[] a, c, x;
hipFree(dtp);
hipFree(drp);
hipFree(ds);
}
void AN2 :: initialize (Cell * ce, Solvent * sv) {
ngrid = ce -> ngrid;
niv = sv -> natv;
binary = 0;
hipMalloc(&dtp, ngrid * niv * 2 * sizeof(double));
hipMalloc(&drp, ngrid * niv * 2 * sizeof(double));
hipMalloc(&ds, ce -> grid[1] * ce -> grid[2] * 5 * sizeof(double));
b.x = ce -> grid[0];
g.x = ce -> grid[1];
g.y = ce -> grid[2];
s = new double[5];
a = new double[4];
c = new double[2];
x = new double[2];
irho = new double[niv];
for (int iv = 0; iv < niv; ++iv) {
irho[iv] = 1.0 / sv -> rhov[iv];
}
}
void AN2 :: calculate (double * dt, double * dtr) {
__global__ void newdt0(double *, const double * __restrict__,
double *, double *);
__global__ void newdt(double *, const double * __restrict__,
double *, double *,
double s1, double s2, double m, int niv, int biv);
void leg(double * a, double * x, double * b, int n, int nn);
if (count > 0) {
--count;
for (int iv = 0; iv < niv; ++iv) {
int biv = iv + binary * niv;
hipLaunchKernelGGL(( newdt0) , dim3(g), dim3(b) , 0, 0, dt + (iv * ngrid), dtr + (iv * ngrid),
dtp + (biv * ngrid), drp + (biv * ngrid));
}
} else {
cal_theta(dt, dtr);
a[2] = a[1];
leg(a, x, c, 2, 2);
s1 = x[0] + mp * ((binary == 0)? 0 : 1);
s2 = x[1] + mp * ((binary == 0)? 1 : 0);
for (int iv = 0; iv < niv; ++iv) {
hipLaunchKernelGGL(( newdt) , dim3(g), dim3(b) , 0, 0, dt + (iv * ngrid), dtr + (iv * ngrid),
dtp + (iv * ngrid), drp + (iv * ngrid),
s1, s2, m, niv * ngrid, binary * niv * ngrid);
}
}
binary = (binary == 0)? 1 : 0;
}
void AN2 :: cal_theta (double * dt, double * dtr) {
__global__ void theta30(double *, const double * __restrict__,
const double * __restrict__, int);
c[0] = c[1] = a[0] = a[1] = a[3] = 0.0;
for (int iv = 0; iv < niv; ++iv) {
hipLaunchKernelGGL(( theta30) , dim3(g), dim3(b), b.x * 5 * sizeof(double) , 0,
ds, dtr + (iv * ngrid), drp + (iv * ngrid), niv * ngrid);
thrust::device_ptr<double> ds_ptr(ds);
for (int i = 0; i < 5; ++i) {
s[i] = thrust::reduce(ds_ptr + i * g.x * g.y,
ds_ptr + (i + 1) * g.x * g.y);
}
c[0] += s[0] * irho[iv];
c[1] += s[1] * irho[iv];
a[0] += s[2] * irho[iv];
a[1] += s[3] * irho[iv];
a[3] += s[4] * irho[iv];
}
}
void leg(double * a, double * x, double * b, int n, int nn) {
int i, j, k, k1;
double s, p, q, r;
double *ai, *ak;
for (k = 0, ak = a; k < n -1; ++k, ak += nn) {
k1 = k + 1;
p = ak[k];
for (j = k1; j < n; ++j)
ak[j] /= p;
r = b[k] /= p;
for (i = k1, ai = ak + nn; i < n; ++i, ai += nn) {
q = ai[k];
for (j = k1; j < n; ++j)
ai[j] -= q * ak[j];
b[i] -= q * r;
}
}
x[n - 1] = b[n - 1] / ak[n - 1];
for (k = n - 2, ak = a + nn * (n - 2); k >= 0; --k, ak -= nn) {
k1 = k + 1;
s = b[k];
for (j = k1; j < n; ++j)
s -= ak[j] * x[j];
x[k] = s;
}
}
| e201337833e29c61c0392231491388489bd776d3.cu | #include <iostream>
#include <thrust/device_vector.h>
#include "anderson.h"
AN2 :: ~AN2 () {
delete[] a, c, x;
cudaFree(dtp);
cudaFree(drp);
cudaFree(ds);
}
void AN2 :: initialize (Cell * ce, Solvent * sv) {
ngrid = ce -> ngrid;
niv = sv -> natv;
binary = 0;
cudaMalloc(&dtp, ngrid * niv * 2 * sizeof(double));
cudaMalloc(&drp, ngrid * niv * 2 * sizeof(double));
cudaMalloc(&ds, ce -> grid[1] * ce -> grid[2] * 5 * sizeof(double));
b.x = ce -> grid[0];
g.x = ce -> grid[1];
g.y = ce -> grid[2];
s = new double[5];
a = new double[4];
c = new double[2];
x = new double[2];
irho = new double[niv];
for (int iv = 0; iv < niv; ++iv) {
irho[iv] = 1.0 / sv -> rhov[iv];
}
}
void AN2 :: calculate (double * dt, double * dtr) {
__global__ void newdt0(double *, const double * __restrict__,
double *, double *);
__global__ void newdt(double *, const double * __restrict__,
double *, double *,
double s1, double s2, double m, int niv, int biv);
void leg(double * a, double * x, double * b, int n, int nn);
if (count > 0) {
--count;
for (int iv = 0; iv < niv; ++iv) {
int biv = iv + binary * niv;
newdt0 <<< g, b >>> (dt + (iv * ngrid), dtr + (iv * ngrid),
dtp + (biv * ngrid), drp + (biv * ngrid));
}
} else {
cal_theta(dt, dtr);
a[2] = a[1];
leg(a, x, c, 2, 2);
s1 = x[0] + mp * ((binary == 0)? 0 : 1);
s2 = x[1] + mp * ((binary == 0)? 1 : 0);
for (int iv = 0; iv < niv; ++iv) {
newdt <<< g, b >>> (dt + (iv * ngrid), dtr + (iv * ngrid),
dtp + (iv * ngrid), drp + (iv * ngrid),
s1, s2, m, niv * ngrid, binary * niv * ngrid);
}
}
binary = (binary == 0)? 1 : 0;
}
void AN2 :: cal_theta (double * dt, double * dtr) {
__global__ void theta30(double *, const double * __restrict__,
const double * __restrict__, int);
c[0] = c[1] = a[0] = a[1] = a[3] = 0.0;
for (int iv = 0; iv < niv; ++iv) {
theta30 <<< g, b, b.x * 5 * sizeof(double) >>>
(ds, dtr + (iv * ngrid), drp + (iv * ngrid), niv * ngrid);
thrust::device_ptr<double> ds_ptr(ds);
for (int i = 0; i < 5; ++i) {
s[i] = thrust::reduce(ds_ptr + i * g.x * g.y,
ds_ptr + (i + 1) * g.x * g.y);
}
c[0] += s[0] * irho[iv];
c[1] += s[1] * irho[iv];
a[0] += s[2] * irho[iv];
a[1] += s[3] * irho[iv];
a[3] += s[4] * irho[iv];
}
}
void leg(double * a, double * x, double * b, int n, int nn) {
int i, j, k, k1;
double s, p, q, r;
double *ai, *ak;
for (k = 0, ak = a; k < n -1; ++k, ak += nn) {
k1 = k + 1;
p = ak[k];
for (j = k1; j < n; ++j)
ak[j] /= p;
r = b[k] /= p;
for (i = k1, ai = ak + nn; i < n; ++i, ai += nn) {
q = ai[k];
for (j = k1; j < n; ++j)
ai[j] -= q * ak[j];
b[i] -= q * r;
}
}
x[n - 1] = b[n - 1] / ak[n - 1];
for (k = n - 2, ak = a + nn * (n - 2); k >= 0; --k, ak -= nn) {
k1 = k + 1;
s = b[k];
for (j = k1; j < n; ++j)
s -= ak[j] * x[j];
x[k] = s;
}
}
|
603e9eb81c1c17991d914a947e29e50f4773a6da.hip | // !!! This is a file automatically generated by hipify!!!
#include "kernel/ssrt.h"
#include "kernel/context.cuh"
#include "kernel/light.cuh"
#include "kernel/material.cuh"
#include "kernel/intersect.cuh"
#include "kernel/accelerator.cuh"
#include "kernel/compaction.h"
#include "kernel/pt_common.h"
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include "cuda/helper_math.h"
#include "cuda/cudautil.h"
#include "cuda/cudamemory.h"
#include "aten4idaten.h"
#define SEPARATE_SHADOWRAY_HITTEST
//#define DUMP_DEBUG_LOG
//#define ENABLE_PROGRESSIVE
__global__ void genPath(
idaten::SSRT::Path* paths,
aten::ray* rays,
int width, int height,
int sample, int maxSamples,
unsigned int frame,
const aten::CameraParameter* __restrict__ camera,
const unsigned int* sobolmatrices,
const unsigned int* random)
{
const auto ix = blockIdx.x * blockDim.x + threadIdx.x;
const auto iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix >= width || iy >= height) {
return;
}
const auto idx = getIdx(ix, iy, width);
auto& path = paths[idx];
path.isHit = false;
if (path.isKill) {
path.isTerminate = true;
return;
}
#ifdef ENABLE_PROGRESSIVE
#if IDATEN_SAMPLER == IDATEN_SAMPLER_SOBOL
auto scramble = random[idx] * 0x1fe3434f;
path.sampler.init(frame, 0, scramble, sobolmatrices);
#elif IDATEN_SAMPLER == IDATEN_SAMPLER_CMJ
auto rnd = random[idx];
auto scramble = rnd * 0x1fe3434f * ((frame + 133 * rnd) / (aten::CMJ::CMJ_DIM * aten::CMJ::CMJ_DIM));
path.sampler.init(frame % (aten::CMJ::CMJ_DIM * aten::CMJ::CMJ_DIM), 0, scramble);
#endif
#else
auto scramble = (iy * height * 4 + ix * 4) * maxSamples + sample + 1 + frame;
path.sampler.init(frame, 0, scramble, sobolmatrices);
#endif
float s = (ix + path.sampler.nextSample()) / (float)(camera->width);
float t = (iy + path.sampler.nextSample()) / (float)(camera->height);
AT_NAME::CameraSampleResult camsample;
AT_NAME::PinholeCamera::sample(&camsample, camera, s, t);
rays[idx] = camsample.r;
path.throughput = aten::vec3(1);
path.pdfb = 0.0f;
path.isTerminate = false;
path.isSingular = false;
path.samples += 1;
// Accumulate value, so do not reset.
//path.contrib = aten::vec3(0);
}
__global__ void hitTestPrimaryRayInScreenSpace(
hipSurfaceObject_t gbuffer,
idaten::SSRT::Path* paths,
aten::Intersection* isects,
int* hitbools,
int width, int height,
const aten::vec4 camPos,
const aten::GeomParameter* __restrict__ geoms,
const aten::PrimitiveParamter* __restrict__ prims,
const aten::mat4* __restrict__ matrices,
hipTextureObject_t vtxPos)
{
const auto ix = blockIdx.x * blockDim.x + threadIdx.x;
const auto iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix >= width || iy >= height) {
return;
}
const auto idx = getIdx(ix, iy, width);
auto& path = paths[idx];
path.isHit = false;
hitbools[idx] = 0;
if (path.isTerminate) {
return;
}
// Sample data from texture.
float4 data;
surf2Dread(&data, gbuffer, ix * sizeof(float4), iy);
// NOTE
// x : objid
// y : primid
// zw : bary centroid
int objid = __float_as_int(data.x);
int primid = __float_as_int(data.y);
isects[idx].objid = objid;
isects[idx].primid = primid;
// bary centroid.
isects[idx].a = data.z;
isects[idx].b = data.w;
if (objid >= 0) {
aten::PrimitiveParamter prim;
prim.v0 = ((aten::vec4*)prims)[primid * aten::PrimitiveParamter_float4_size + 0];
prim.v1 = ((aten::vec4*)prims)[primid * aten::PrimitiveParamter_float4_size + 1];
isects[idx].mtrlid = prim.mtrlid;
isects[idx].meshid = prim.gemoid;
const auto* obj = &geoms[objid];
float4 p0 = tex1Dfetch<float4>(vtxPos, prim.idx[0]);
float4 p1 = tex1Dfetch<float4>(vtxPos, prim.idx[1]);
float4 p2 = tex1Dfetch<float4>(vtxPos, prim.idx[2]);
real a = data.z;
real b = data.w;
real c = 1 - a - b;
// (barycentric coordinates).
// v0.
// p = (1 - a - b)*v0 + a*v1 + b*v2
auto p = c * p0 + a * p1 + b * p2;
aten::vec4 vp(p.x, p.y, p.z, 1.0f);
if (obj->mtxid >= 0) {
auto mtxL2W = matrices[obj->mtxid * 2 + 0];
vp = mtxL2W.apply(vp);
}
isects[idx].t = (camPos - vp).length();
path.isHit = true;
hitbools[idx] = 1;
}
else {
path.isHit = false;
hitbools[idx] = 0;
}
}
inline __device__ bool intersectsDepthBuffer(float z, float minZ, float maxZ, float zThickness)
{
// z .
z += zThickness;
return (maxZ >= z) && (minZ - zThickness <= z);
}
inline __device__ bool traceScreenSpaceRay(
hipSurfaceObject_t depth,
const aten::vec3& csOrig,
const aten::vec3& csDir,
const aten::mat4& mtxV2C,
int width, int height,
float nearPlaneZ,
float stride,
float jitter,
float2& hitPixel)
{
static const float zThickness = 0.1f;
static const float maxDistance = 1000.0f;
// Clip to the near plane.
float rayLength = (csOrig.z + csDir.z * maxDistance) > -nearPlaneZ
? (-nearPlaneZ - csOrig.z) / csDir.z
: maxDistance;
aten::vec3 csEndPoint = csOrig + csDir * rayLength;
#ifdef DUMP_DEBUG_LOG
printf("rayLenght : %f = (%f - %f) / %f\n", rayLength, nearPlaneZ, csOrig.z, csDir.z);
printf("dir : %f, %f, %f\n", csDir.x, csDir.y, csDir.z);
printf("org : %f, %f, %f\n", csOrig.x, csOrig.y, csOrig.z);
printf("end : %f, %f, %f\n", csEndPoint.x, csEndPoint.y, csEndPoint.z);
#endif
// Project into homogeneous clip space.
aten::vec4 H0 = mtxV2C.apply(aten::vec4(csOrig, 1));
aten::vec4 H1 = mtxV2C.apply(aten::vec4(csEndPoint, 1));
#ifdef DUMP_DEBUG_LOG
printf("H0 : %f, %f, %f, %f\n", H0.x, H0.y, H0.z, H0.w);
printf("H1 : %f, %f, %f, %f\n", H1.x, H1.y, H1.z, H1.w);
#endif
float k0 = 1.0 / H0.w;
float k1 = 1.0 / H1.w;
// The interpolated homogeneous version of the camera-space points.
aten::vec3 Q0 = csOrig * k0;
aten::vec3 Q1 = csEndPoint * k1;
// Screen space point.
aten::vec3 P0 = H0 * k0;
aten::vec3 P1 = H1 * k1;
// [-1, 1] -> [0, 1]
P0 = P0 * 0.5f + 0.5f;
P1 = P1 * 0.5f + 0.5f;
#ifdef DUMP_DEBUG_LOG
printf("P0 : %f, %f, %f\n", P0.x, P0.y, P0.z);
printf("P1 : %f, %f, %f\n", P1.x, P1.y, P1.z);
#endif
P0.x *= width;
P0.y *= height;
P0.z = 0.0f;
P1.x *= width;
P1.y *= height;
P1.z = 0.0f;
#ifdef DUMP_DEBUG_LOG
printf("[%f, %f] -> [%f, %f]\n", P0.x, P0.y, P1.x, P1.y);
#endif
// If the line is degenerate, make it cover at least one pixel to avoid handling zero-pixel extent as a special case later.
// 2.
P1 += aten::squared_length(P0 - P1) < 0.0001f
? aten::vec3(0.01f)
: aten::vec3(0.0f);
aten::vec3 delta = P1 - P0;
// Permute so that the primary iteration is in x to collapse all quadrant-specific DDA cases later.
bool permute = false;
if (abs(delta.x) < abs(delta.y))
{
permute = true;
aten::swapVal(delta.x, delta.y);
aten::swapVal(P0.x, P0.y);
aten::swapVal(P1.x, P1.y);
}
float stepDir = delta.x < 0.0f ? -1.0f : 0.0f;
stepDir = delta.x > 0.0f ? 1.0f : stepDir;
#ifdef DUMP_DEBUG_LOG
printf("delta %f, %f\n", delta.x, delta.y);
printf("stepDir %f\n", stepDir);
printf("permute %s\n", permute ? "true" : "false");
#endif
float invdx = stepDir / delta.x;
// Track the derivatives of Q and k.
aten::vec3 dQ = (Q1 - Q0) * invdx;
float dk = (k1 - k0) * invdx;
// y is slope.
// slope = (y1 - y0) / (x1 - x0)
aten::vec3 dP = aten::vec3(stepDir, delta.y * invdx, 0.0f);
// Adjust end condition for iteration direction
float end = P1.x * stepDir;
int stepCount = 0;
float prevZMaxEstimate = -csOrig.z;
float rayZMin = prevZMaxEstimate;
float rayZMax = prevZMaxEstimate;
float sceneZMax = rayZMax + 100.0f;
dP *= stride;
dQ *= stride;
dk *= stride;
P0 += dP * jitter;
Q0 += dQ * jitter;
k0 += dk * jitter;
float4 PQk = make_float4(P0.x, P0.y, Q0.z, k0);
float4 dPQk = make_float4(dP.x, dP.y, dQ.z, dk);
aten::vec3 Q = Q0;
#ifdef DUMP_DEBUG_LOG
printf("P0 (%f, %f), dP (%f, %f)\n", P0.x, P0.y, dP.x, dP.y);
#endif
static const int maxSteps = 20;
bool isect = false;
bool breakLoop = false;
for (; (stepCount < maxSteps) && !breakLoop; ++stepCount)
{
// Z.
rayZMin = prevZMaxEstimate;
// Z.
// 1/2 pixel .
// Qw1/wView.
rayZMax = -(PQk.z + dPQk.z * 0.5f) / (PQk.w + dPQk.w * 0.5f);
// .
prevZMaxEstimate = rayZMax;
float tmpMin = rayZMin;
float tmpMax = rayZMax;
rayZMin = tmpMin > tmpMax ? tmpMax : tmpMin;
rayZMax = tmpMin > tmpMax ? tmpMin : tmpMax;
hitPixel.x = permute ? PQk.y : PQk.x;
hitPixel.y = permute ? PQk.x : PQk.y;
int ix = (int)hitPixel.x;
int iy = (int)hitPixel.y;
if (ix < 0 || ix >= width || iy < 0 || iy >= height) {
return false;
}
#ifdef DUMP_DEBUG_LOG
printf(" [%d] %d, %d\n", stepCount, ix, iy);
#endif
// .
bool b0 = ((PQk.x * stepDir) <= end);
// .
bool b1 = (sceneZMax != 0.0);
// .
float4 data;
surf2Dread(&data, depth, ix * sizeof(float4), iy);
sceneZMax = data.x;
isect = intersectsDepthBuffer(sceneZMax, rayZMin, rayZMax, zThickness);
breakLoop = b0 && b1 && isect;
PQk += dPQk;
}
//auto isect = intersectsDepthBuffer(sceneZMax, rayZMin, rayZMax, zThickness);
#ifdef DUMP_DEBUG_LOG
printf("[%d]%f, %f, %f\n", stepCount, sceneZMax, rayZMin, rayZMax);
printf("(%s)%d, %d\n", isect ? "true" : "false", (int)hitPixel.x, (int)hitPixel.y);
printf("=======\n");
#endif
return isect;
}
__global__ void hitTestInScreenSpace(
hipSurfaceObject_t gbuffer,
hipSurfaceObject_t depth,
idaten::SSRT::Path* paths,
aten::Intersection* isects,
int* hitbools,
int* notIntersectBools,
int width, int height,
float cameraNearPlaneZ,
const aten::mat4 mtxW2V,
const aten::mat4 mtxV2C,
const aten::ray* __restrict__ rays,
const aten::GeomParameter* __restrict__ geoms,
const aten::PrimitiveParamter* __restrict__ prims,
const aten::mat4* __restrict__ matrices,
hipTextureObject_t* nodes,
hipTextureObject_t vtxPos,
hipTextureObject_t vtxNml)
{
auto ix = blockIdx.x * blockDim.x + threadIdx.x;
auto iy = blockIdx.y * blockDim.y + threadIdx.y;
//int ix = 140;
//int iy = 512 - 320;
if (ix >= width || iy >= height) {
return;
}
const auto idx = getIdx(ix, iy, width);
auto& path = paths[idx];
path.isHit = false;
hitbools[idx] = 0;
notIntersectBools[idx] = 0;
if (path.isTerminate) {
return;
}
aten::vec3 vsOrig = mtxW2V.apply(rays[idx].org);
aten::vec3 vsDir = normalize(mtxW2V.applyXYZ(rays[idx].dir));
auto d = dot(vsDir, aten::vec3(0, 0, 1));
if (abs(d) > 0.96f) {
notIntersectBools[idx] = 1;
return;
}
// TODO
static const float stride = 15.0f;
float c = (ix + iy) * 0.25f;
float jitter = stride > 1.0f ? fmod(c, 1.0f) : 0.0f;
float2 hitPixel = make_float2(0.0f);
bool isIntersect = traceScreenSpaceRay(
depth,
vsOrig, vsDir,
mtxV2C,
width, height,
cameraNearPlaneZ,
stride, jitter,
hitPixel);
ix = (int)hitPixel.x;
iy = (int)hitPixel.y;
isIntersect = isIntersect && (0 <= ix && ix < width && 0 <= iy && iy < height);
int objid = -1;
int primid = -1;
if (isIntersect) {
// Sample data from texture.
float4 data;
surf2Dread(&data, gbuffer, ix * sizeof(float4), iy);
// NOTE
// x : objid
// y : primid
// zw : bary centroid
objid = __float_as_int(data.x);
primid = __float_as_int(data.y);
isects[idx].objid = objid;
isects[idx].primid = primid;
// bary centroid.
isects[idx].a = data.z;
isects[idx].b = data.w;
#ifdef DUMP_DEBUG_LOG
printf("***\n");
printf("objid %d\n", objid);
printf("primid %d\n", primid);
#endif
aten::PrimitiveParamter prim;
prim.v0 = ((aten::vec4*)prims)[primid * aten::PrimitiveParamter_float4_size + 0];
prim.v1 = ((aten::vec4*)prims)[primid * aten::PrimitiveParamter_float4_size + 1];
isects[idx].mtrlid = prim.mtrlid;
isects[idx].meshid = prim.gemoid;
#ifdef DUMP_DEBUG_LOG
printf("mtrlid %d\n", prim.mtrlid);
printf("gemoid %d\n", prim.gemoid);
#endif
}
path.isHit = isIntersect;
hitbools[idx] = isIntersect ? 1 : 0;
notIntersectBools[idx] = isIntersect ? 0 : 1;
}
#define NUM_SM 64 // no. of streaming multiprocessors
#define NUM_WARP_PER_SM 64 // maximum no. of resident warps per SM
#define NUM_BLOCK_PER_SM 32 // maximum no. of resident blocks per SM
#define NUM_BLOCK (NUM_SM * NUM_BLOCK_PER_SM)
#define NUM_WARP_PER_BLOCK (NUM_WARP_PER_SM / NUM_BLOCK_PER_SM)
#define WARP_SIZE 32
__device__ unsigned int _headDev = 0;
__global__ void hitTest(
const int* __restrict__ notIntersectInScreenSpaceIds,
int testNum,
idaten::SSRT::Path* paths,
aten::Intersection* isects,
aten::ray* rays,
int* hitbools,
int width, int height,
const aten::GeomParameter* __restrict__ shapes, int geomnum,
const aten::LightParameter* __restrict__ lights, int lightnum,
hipTextureObject_t* nodes,
const aten::PrimitiveParamter* __restrict__ prims,
hipTextureObject_t vtxPos,
aten::mat4* matrices)
{
// warp-wise head index of tasks in a block
__shared__ volatile unsigned int headBlock[NUM_WARP_PER_BLOCK];
volatile unsigned int& headWarp = headBlock[threadIdx.y];
if (blockIdx.x == 0 && threadIdx.x == 0) {
_headDev = 0;
}
Context ctxt;
{
ctxt.geomnum = geomnum;
ctxt.shapes = shapes;
ctxt.lightnum = lightnum;
ctxt.lights = lights;
ctxt.nodes = nodes;
ctxt.prims = prims;
ctxt.vtxPos = vtxPos;
ctxt.matrices = matrices;
}
do
{
// let lane 0 fetch [wh, wh + WARP_SIZE - 1] for a warp
if (threadIdx.x == 0) {
headWarp = atomicAdd(&_headDev, WARP_SIZE);
}
// task index per thread in a warp
unsigned int idx = headWarp + threadIdx.x;
if (idx >= testNum) {
return;
}
idx = notIntersectInScreenSpaceIds[idx];
int ix = idx % width;
int iy = idx / width;
idx = getIdx(ix, iy, width);
auto& path = paths[idx];
path.isHit = false;
hitbools[idx] = 0;
if (path.isTerminate) {
continue;
}
aten::Intersection isect;
bool isHit = intersectClosest(&ctxt, rays[idx], &isect);
//isects[idx].t = isect.t;
isects[idx].objid = isect.objid;
isects[idx].mtrlid = isect.mtrlid;
isects[idx].meshid = isect.meshid;
isects[idx].primid = isect.primid;
isects[idx].a = isect.a;
isects[idx].b = isect.b;
path.isHit = isHit;
hitbools[idx] = isHit ? 1 : 0;
} while (true);
}
template <bool isFirstBounce>
__global__ void shadeMiss(
idaten::SSRT::Path* paths,
int width, int height)
{
const auto ix = blockIdx.x * blockDim.x + threadIdx.x;
const auto iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix >= width || iy >= height) {
return;
}
const auto idx = getIdx(ix, iy, width);
auto& path = paths[idx];
if (!path.isTerminate && !path.isHit) {
// TODO
auto bg = aten::vec3(0);
if (isFirstBounce) {
path.isKill = true;
}
path.contrib += path.throughput * bg;
path.isTerminate = true;
}
}
template <bool isFirstBounce>
__global__ void shadeMissWithEnvmap(
hipTextureObject_t* textures,
int envmapIdx,
real envmapAvgIllum,
real envmapMultiplyer,
idaten::SSRT::Path* paths,
const aten::ray* __restrict__ rays,
int width, int height)
{
const auto ix = blockIdx.x * blockDim.x + threadIdx.x;
const auto iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix >= width || iy >= height) {
return;
}
const auto idx = getIdx(ix, iy, width);
auto& path = paths[idx];
if (!path.isTerminate && !path.isHit) {
auto r = rays[idx];
auto uv = AT_NAME::envmap::convertDirectionToUV(r.dir);
auto bg = tex2D<float4>(textures[envmapIdx], uv.x, uv.y);
auto emit = aten::vec3(bg.x, bg.y, bg.z);
float misW = 1.0f;
if (isFirstBounce) {
path.isKill = true;
}
else {
auto pdfLight = AT_NAME::ImageBasedLight::samplePdf(emit, envmapAvgIllum);
misW = path.pdfb / (pdfLight + path.pdfb);
emit *= envmapMultiplyer;
}
path.contrib += path.throughput * misW * emit;
path.isTerminate = true;
}
}
__global__ void shade(
unsigned int frame,
hipSurfaceObject_t outSurface,
int width, int height,
idaten::SSRT::Path* paths,
const int* __restrict__ hitindices,
int hitnum,
const aten::Intersection* __restrict__ isects,
aten::ray* rays,
int bounce, int rrBounce,
const aten::GeomParameter* __restrict__ shapes, int geomnum,
aten::MaterialParameter* mtrls,
const aten::LightParameter* __restrict__ lights, int lightnum,
hipTextureObject_t* nodes,
const aten::PrimitiveParamter* __restrict__ prims,
hipTextureObject_t vtxPos,
hipTextureObject_t vtxNml,
const aten::mat4* __restrict__ matrices,
hipTextureObject_t* textures,
const unsigned int* random,
idaten::SSRT::ShadowRay* shadowRays)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= hitnum) {
return;
}
Context ctxt;
{
ctxt.geomnum = geomnum;
ctxt.shapes = shapes;
ctxt.mtrls = mtrls;
ctxt.lightnum = lightnum;
ctxt.lights = lights;
ctxt.nodes = nodes;
ctxt.prims = prims;
ctxt.vtxPos = vtxPos;
ctxt.vtxNml = vtxNml;
ctxt.matrices = matrices;
ctxt.textures = textures;
}
idx = hitindices[idx];
auto& path = paths[idx];
const auto& ray = rays[idx];
#ifdef ENABLE_PROGRESSIVE
#if IDATEN_SAMPLER == IDATEN_SAMPLER_SOBOL
auto scramble = random[idx] * 0x1fe3434f;
path.sampler.init(frame, 4 + bounce * 300, scramble);
#elif IDATEN_SAMPLER == IDATEN_SAMPLER_CMJ
auto rnd = random[idx];
auto scramble = rnd * 0x1fe3434f * ((frame + 331 * rnd) / (aten::CMJ::CMJ_DIM * aten::CMJ::CMJ_DIM));
path.sampler.init(frame % (aten::CMJ::CMJ_DIM * aten::CMJ::CMJ_DIM), 4 + bounce * 300, scramble);
#endif
#endif
aten::hitrecord rec;
const auto& isect = isects[idx];
auto obj = &ctxt.shapes[isect.objid];
evalHitResult(&ctxt, obj, ray, &rec, &isect);
aten::MaterialParameter mtrl = ctxt.mtrls[rec.mtrlid];
bool isBackfacing = dot(rec.normal, -ray.dir) < 0.0f;
// .
// .
aten::vec3 orienting_normal = rec.normal;
if (mtrl.type != aten::MaterialType::Layer) {
mtrl.albedoMap = (int)(mtrl.albedoMap >= 0 ? ctxt.textures[mtrl.albedoMap] : -1);
mtrl.normalMap = (int)(mtrl.normalMap >= 0 ? ctxt.textures[mtrl.normalMap] : -1);
mtrl.roughnessMap = (int)(mtrl.roughnessMap >= 0 ? ctxt.textures[mtrl.roughnessMap] : -1);
}
// Implicit conection to light.
if (mtrl.attrib.isEmissive) {
if (!isBackfacing) {
float weight = 1.0f;
if (bounce > 0 && !path.isSingular) {
auto cosLight = dot(orienting_normal, -ray.dir);
auto dist2 = aten::squared_length(rec.p - ray.org);
if (cosLight >= 0) {
auto pdfLight = 1 / rec.area;
// Convert pdf area to sradian.
// http://www.slideshare.net/h013/edubpt-v100
// p31 - p35
pdfLight = pdfLight * dist2 / cosLight;
weight = path.pdfb / (pdfLight + path.pdfb);
}
}
path.contrib += path.throughput * weight * mtrl.baseColor;
}
// When ray hit the light, tracing will finish.
path.isTerminate = true;
return;
}
if (!mtrl.attrib.isTranslucent && isBackfacing) {
orienting_normal = -orienting_normal;
}
// Apply normal map.
int normalMap = mtrl.normalMap;
if (mtrl.type == aten::MaterialType::Layer) {
// NormalMap .
auto* topmtrl = &ctxt.mtrls[mtrl.layer[0]];
normalMap = (int)(topmtrl->normalMap >= 0 ? ctxt.textures[topmtrl->normalMap] : -1);
}
AT_NAME::material::applyNormalMap(normalMap, orienting_normal, orienting_normal, rec.u, rec.v);
#ifdef SEPARATE_SHADOWRAY_HITTEST
shadowRays[idx].isActive = false;
#endif
// Explicit conection to light.
if (!mtrl.attrib.isSingular)
{
real lightSelectPdf = 1;
aten::LightSampleResult sampleres;
// TODO
// Importance sampling.
int lightidx = aten::cmpMin<int>(path.sampler.nextSample() * lightnum, lightnum - 1);
lightSelectPdf = 1.0f / lightnum;
aten::LightParameter light;
light.pos = ((aten::vec4*)ctxt.lights)[lightidx * aten::LightParameter_float4_size + 0];
light.dir = ((aten::vec4*)ctxt.lights)[lightidx * aten::LightParameter_float4_size + 1];
light.le = ((aten::vec4*)ctxt.lights)[lightidx * aten::LightParameter_float4_size + 2];
light.v0 = ((aten::vec4*)ctxt.lights)[lightidx * aten::LightParameter_float4_size + 3];
light.v1 = ((aten::vec4*)ctxt.lights)[lightidx * aten::LightParameter_float4_size + 4];
light.v2 = ((aten::vec4*)ctxt.lights)[lightidx * aten::LightParameter_float4_size + 5];
//auto light = ctxt.lights[lightidx];
sampleLight(&sampleres, &ctxt, &light, rec.p, orienting_normal, &path.sampler);
const auto& posLight = sampleres.pos;
const auto& nmlLight = sampleres.nml;
real pdfLight = sampleres.pdf;
auto lightobj = sampleres.obj;
auto dirToLight = normalize(sampleres.dir);
auto distToLight = length(posLight - rec.p);
// Ray aim to the area light.
// So, if ray doesn't hit anything in intersectCloserBVH, ray hit the area light.
auto hitobj = lightobj;
aten::Intersection isectTmp;
auto shadowRayOrg = rec.p + AT_MATH_EPSILON * orienting_normal;
auto tmp = rec.p + dirToLight - shadowRayOrg;
auto shadowRayDir = normalize(tmp);
#ifdef SEPARATE_SHADOWRAY_HITTEST
shadowRays[idx].isActive = true;
shadowRays[idx].org = shadowRayOrg;
shadowRays[idx].dir = shadowRayDir;
shadowRays[idx].targetLightId = lightidx;
shadowRays[idx].distToLight = distToLight;
#else
aten::ray shadowRay(shadowRayOrg, shadowRayDir);
bool isHit = intersectCloser(&ctxt, shadowRay, &isectTmp, distToLight - AT_MATH_EPSILON);
if (isHit) {
hitobj = (void*)&ctxt.shapes[isectTmp.objid];
}
isHit = AT_NAME::scene::hitLight(
isHit,
light.attrib,
lightobj,
distToLight,
distHitObjToRayOrg,
isectTmp.t,
hitobj);
if (isHit)
#endif
{
auto cosShadow = dot(orienting_normal, dirToLight);
real pdfb = samplePDF(&ctxt, &mtrl, orienting_normal, ray.dir, dirToLight, rec.u, rec.v);
auto bsdf = sampleBSDF(&ctxt, &mtrl, orienting_normal, ray.dir, dirToLight, rec.u, rec.v);
bsdf *= path.throughput;
// Get light color.
auto emit = sampleres.finalColor;
if (light.attrib.isSingular || light.attrib.isInfinite) {
if (pdfLight > real(0) && cosShadow >= 0) {
// TODO
// .
// singular light finalColor .
// inifinite light pdfLight.
// pdfLight.
auto misW = pdfLight / (pdfb + pdfLight);
#ifdef SEPARATE_SHADOWRAY_HITTEST
shadowRays[idx].lightcontrib =
#else
path.contrib +=
#endif
(misW * bsdf * emit * cosShadow / pdfLight) / lightSelectPdf;
}
}
else {
auto cosLight = dot(nmlLight, -dirToLight);
if (cosShadow >= 0 && cosLight >= 0) {
auto dist2 = aten::squared_length(sampleres.dir);
auto G = cosShadow * cosLight / dist2;
if (pdfb > real(0) && pdfLight > real(0)) {
// Convert pdf from steradian to area.
// http://www.slideshare.net/h013/edubpt-v100
// p31 - p35
pdfb = pdfb * cosLight / dist2;
auto misW = pdfLight / (pdfb + pdfLight);
#ifdef SEPARATE_SHADOWRAY_HITTEST
shadowRays[idx].lightcontrib =
#else
path.contrib +=
#endif
(misW * (bsdf * emit * G) / pdfLight) / lightSelectPdf;
}
}
}
}
}
real russianProb = real(1);
if (bounce > rrBounce) {
auto t = normalize(path.throughput);
auto p = aten::cmpMax(t.r, aten::cmpMax(t.g, t.b));
russianProb = path.sampler.nextSample();
if (russianProb >= p) {
//path.contrib = aten::vec3(0);
path.isTerminate = true;
}
else {
russianProb = p;
}
}
AT_NAME::MaterialSampling sampling;
sampleMaterial(
&sampling,
&ctxt,
&mtrl,
orienting_normal,
ray.dir,
rec.normal,
&path.sampler,
rec.u, rec.v);
auto nextDir = normalize(sampling.dir);
auto pdfb = sampling.pdf;
auto bsdf = sampling.bsdf;
real c = 1;
if (!mtrl.attrib.isSingular) {
// TODO
// AMDabs....
//c = aten::abs(dot(orienting_normal, nextDir));
c = dot(orienting_normal, nextDir);
}
if (pdfb > 0 && c > 0) {
path.throughput *= bsdf * c / pdfb;
path.throughput /= russianProb;
}
else {
path.isTerminate = true;
}
// Make next ray.
rays[idx] = aten::ray(rec.p, nextDir);
path.pdfb = pdfb;
path.isSingular = mtrl.attrib.isSingular;
}
__global__ void hitShadowRay(
idaten::SSRT::Path* paths,
int* hitindices,
int hitnum,
const idaten::SSRT::ShadowRay* __restrict__ shadowRays,
const aten::GeomParameter* __restrict__ shapes, int geomnum,
aten::MaterialParameter* mtrls,
const aten::LightParameter* __restrict__ lights, int lightnum,
hipTextureObject_t* nodes,
const aten::PrimitiveParamter* __restrict__ prims,
hipTextureObject_t vtxPos,
const aten::mat4* __restrict__ matrices)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= hitnum) {
return;
}
Context ctxt;
{
ctxt.geomnum = geomnum;
ctxt.shapes = shapes;
ctxt.mtrls = mtrls;
ctxt.lightnum = lightnum;
ctxt.lights = lights;
ctxt.nodes = nodes;
ctxt.prims = prims;
ctxt.vtxPos = vtxPos;
ctxt.matrices = matrices;
}
idx = hitindices[idx];
auto& shadowRay = shadowRays[idx];
if (shadowRay.isActive) {
auto light = &ctxt.lights[shadowRay.targetLightId];
auto lightobj = (light->objid >= 0 ? &ctxt.shapes[light->objid] : nullptr);
real distHitObjToRayOrg = AT_MATH_INF;
// Ray aim to the area light.
// So, if ray doesn't hit anything in intersectCloserBVH, ray hit the area light.
const aten::GeomParameter* hitobj = lightobj;
aten::Intersection isectTmp;
bool isHit = false;
isHit = intersectCloser(&ctxt, shadowRay, &isectTmp, shadowRay.distToLight - AT_MATH_EPSILON);
if (isHit) {
hitobj = &ctxt.shapes[isectTmp.objid];
}
isHit = AT_NAME::scene::hitLight(
isHit,
light->attrib,
lightobj,
shadowRay.distToLight,
distHitObjToRayOrg,
isectTmp.t,
hitobj);
if (isHit) {
paths[idx].contrib += shadowRay.lightcontrib;
}
}
}
__global__ void gather(
hipSurfaceObject_t outSurface,
const idaten::SSRT::Path* __restrict__ paths,
int width, int height)
{
const auto ix = blockIdx.x * blockDim.x + threadIdx.x;
const auto iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix >= width || iy >= height) {
return;
}
const auto idx = getIdx(ix, iy, width);
const auto& path = paths[idx];
int sample = path.samples;
float4 data;
#ifdef ENABLE_PROGRESSIVE
surf2Dread(&data, outSurface, ix * sizeof(float4), iy);
// First data.w value is 0.
int n = data.w;
data = n * data + make_float4(path.contrib.x, path.contrib.y, path.contrib.z, 0) / sample;
data /= (n + 1);
data.w = n + 1;
#else
data = make_float4(path.contrib.x, path.contrib.y, path.contrib.z, 0) / sample;
data.w = sample;
#endif
surf2Dwrite(
data,
outSurface,
ix * sizeof(float4), iy,
hipBoundaryModeTrap);
}
namespace idaten {
void SSRT::prepare()
{
}
void SSRT::update(
GLuint gltex,
int width, int height,
const aten::CameraParameter& camera,
const std::vector<aten::GeomParameter>& shapes,
const std::vector<aten::MaterialParameter>& mtrls,
const std::vector<aten::LightParameter>& lights,
const std::vector<std::vector<aten::GPUBvhNode>>& nodes,
const std::vector<aten::PrimitiveParamter>& prims,
const std::vector<aten::vertex>& vtxs,
const std::vector<aten::mat4>& mtxs,
const std::vector<TextureResource>& texs,
const EnvmapResource& envmapRsc)
{
idaten::Renderer::update(
gltex,
width, height,
camera,
shapes,
mtrls,
lights,
nodes,
prims,
vtxs,
mtxs,
texs, envmapRsc);
m_hitbools.init(width * height);
m_hitidx.init(width * height);
m_notIntersectInScreenSpaceBools.init(width * height);
m_sobolMatrices.init(AT_COUNTOF(sobol::Matrices::matrices));
m_sobolMatrices.writeByNum(sobol::Matrices::matrices, m_sobolMatrices.maxNum());
auto& r = aten::getRandom();
m_random.init(width * height);
m_random.writeByNum(&r[0], width * height);
}
void SSRT::setGBuffer(
GLuint gltexGbuffer,
GLuint gltexDepth)
{
m_gbuffer.init(gltexGbuffer, idaten::CudaGLRscRegisterType::ReadOnly);
m_depth.init(gltexDepth, idaten::CudaGLRscRegisterType::ReadOnly);
}
#ifdef __AT_DEBUG__
static bool doneSetStackSize = false;
#endif
void SSRT::render(
int width, int height,
int maxSamples,
int maxBounce)
{
#ifdef __AT_DEBUG__
if (!doneSetStackSize) {
size_t val = 0;
hipThreadGetLimit(&val, hipLimitStackSize);
hipThreadSetLimit(hipLimitStackSize, val * 4);
doneSetStackSize = true;
}
#endif
int bounce = 0;
m_paths.init(width * height);
m_isects.init(width * height);
m_rays.init(width * height);
#ifdef SEPARATE_SHADOWRAY_HITTEST
m_shadowRays.init(width * height);
#endif
hipMemset(m_paths.ptr(), 0, m_paths.bytes());
CudaGLResourceMapper rscmap(&m_glimg);
auto outputSurf = m_glimg.bind();
auto vtxTexPos = m_vtxparamsPos.bind();
auto vtxTexNml = m_vtxparamsNml.bind();
{
std::vector<hipTextureObject_t> tmp;
for (int i = 0; i < m_nodeparam.size(); i++) {
auto nodeTex = m_nodeparam[i].bind();
tmp.push_back(nodeTex);
}
m_nodetex.writeByNum(&tmp[0], tmp.size());
}
if (!m_texRsc.empty())
{
std::vector<hipTextureObject_t> tmp;
for (int i = 0; i < m_texRsc.size(); i++) {
auto cudaTex = m_texRsc[i].bind();
tmp.push_back(cudaTex);
}
m_tex.writeByNum(&tmp[0], tmp.size());
}
static const int rrBounce = 3;
auto time = AT_NAME::timer::getSystemTime();
for (int i = 0; i < maxSamples; i++) {
onGenPath(
width, height,
i, maxSamples,
vtxTexPos,
vtxTexNml);
bounce = 0;
while (bounce < maxBounce) {
onHitTest(
width, height,
bounce,
vtxTexPos,
vtxTexNml);
onShadeMiss(width, height, bounce);
int hitcount = 0;
idaten::Compaction::compact(
m_hitidx,
m_hitbools,
&hitcount);
//AT_PRINTF("%d\n", hitcount);
if (hitcount == 0) {
break;
}
onShade(
outputSurf,
hitcount,
width, height,
bounce, rrBounce,
vtxTexPos, vtxTexNml);
bounce++;
}
}
onGather(outputSurf, width, height, maxSamples);
checkCudaErrors(hipDeviceSynchronize());
m_frame++;
{
m_vtxparamsPos.unbind();
m_vtxparamsNml.unbind();
for (int i = 0; i < m_nodeparam.size(); i++) {
m_nodeparam[i].unbind();
}
m_nodetex.reset();
for (int i = 0; i < m_texRsc.size(); i++) {
m_texRsc[i].unbind();
}
m_tex.reset();
}
}
void SSRT::onGenPath(
int width, int height,
int sample, int maxSamples,
hipTextureObject_t texVtxPos,
hipTextureObject_t texVtxNml)
{
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(
(width + block.x - 1) / block.x,
(height + block.y - 1) / block.y);
genPath << <grid, block >> > (
m_paths.ptr(),
m_rays.ptr(),
width, height,
sample, maxSamples,
m_frame,
m_cam.ptr(),
m_sobolMatrices.ptr(),
m_random.ptr());
checkCudaKernel(genPath);
}
void SSRT::onHitTest(
int width, int height,
int bounce,
hipTextureObject_t texVtxPos,
hipTextureObject_t texVtxNml)
{
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(
(width + block.x - 1) / block.x,
(height + block.y - 1) / block.y);
if (bounce == 0) {
aten::vec4 campos = aten::vec4(m_camParam.origin, 1.0f);
CudaGLResourceMapper rscmap(&m_gbuffer);
auto gbuffer = m_gbuffer.bind();
hitTestPrimaryRayInScreenSpace << <grid, block >> > (
gbuffer,
m_paths.ptr(),
m_isects.ptr(),
m_hitbools.ptr(),
width, height,
campos,
m_shapeparam.ptr(),
m_primparams.ptr(),
m_mtxparams.ptr(),
texVtxPos);
checkCudaKernel(hitTestPrimaryRayInScreenSpace);
}
else {
#if 0
hitTest << <grid, block >> > (
m_paths.ptr(),
m_isects.ptr(),
m_rays.ptr(),
m_hitbools.ptr(),
width, height,
m_shapeparam.ptr(), m_shapeparam.num(),
m_lightparam.ptr(), m_lightparam.num(),
m_nodetex.ptr(),
m_primparams.ptr(),
texVtxPos,
m_mtxparams.ptr());
checkCudaKernel(hitTest);
#else
aten::mat4 mtxW2V;
aten::mat4 mtxV2C;
mtxW2V.lookat(
m_camParam.origin,
m_camParam.center,
m_camParam.up);
mtxV2C.perspective(
m_camParam.znear,
m_camParam.zfar,
m_camParam.vfov,
m_camParam.aspect);
CudaGLResourceMapper rscmapGbuffer(&m_gbuffer);
CudaGLResourceMapper rscmapDepth(&m_depth);
auto gbuffer = m_gbuffer.bind();
auto depth = m_depth.bind();
hitTestInScreenSpace << <grid, block >> > (
//hitTestInScreenSpace << <1, 1 >> > (
gbuffer, depth,
m_paths.ptr(),
m_isects.ptr(),
m_hitbools.ptr(),
m_notIntersectInScreenSpaceBools.ptr(),
width, height,
m_camParam.znear,
mtxW2V, mtxV2C,
m_rays.ptr(),
m_shapeparam.ptr(),
m_primparams.ptr(),
m_mtxparams.ptr(),
m_nodetex.ptr(),
texVtxPos,
texVtxNml);
checkCudaKernel(hitTestInScreenSpace);
int hitTestCount = 0;
idaten::Compaction::compact(
m_hitidx,
m_notIntersectInScreenSpaceBools,
&hitTestCount);
//AT_PRINTF("BVHTrabers %d\n", hitTestCount);
dim3 blockPerGrid((hitTestCount + 64 - 1) / 64);
dim3 threadPerBlock(64);
//hitTest << <blockPerGrid, threadPerBlock >> > (
hitTest << <NUM_BLOCK, dim3(WARP_SIZE, NUM_WARP_PER_BLOCK) >> > (
m_hitidx.ptr(), hitTestCount,
m_paths.ptr(),
m_isects.ptr(),
m_rays.ptr(),
m_hitbools.ptr(),
width, height,
m_shapeparam.ptr(), m_shapeparam.num(),
m_lightparam.ptr(), m_lightparam.num(),
m_nodetex.ptr(),
m_primparams.ptr(),
texVtxPos,
m_mtxparams.ptr());
checkCudaKernel(hitTest);
#endif
}
}
void SSRT::onShadeMiss(
int width, int height,
int bounce)
{
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(
(width + block.x - 1) / block.x,
(height + block.y - 1) / block.y);
if (m_envmapRsc.idx >= 0) {
if (bounce == 0) {
shadeMissWithEnvmap<true> << <grid, block >> > (
m_tex.ptr(),
m_envmapRsc.idx, m_envmapRsc.avgIllum, m_envmapRsc.multiplyer,
m_paths.ptr(),
m_rays.ptr(),
width, height);
}
else {
shadeMissWithEnvmap<false> << <grid, block >> > (
m_tex.ptr(),
m_envmapRsc.idx, m_envmapRsc.avgIllum, m_envmapRsc.multiplyer,
m_paths.ptr(),
m_rays.ptr(),
width, height);
}
}
else {
if (bounce == 0) {
shadeMiss<true> << <grid, block >> > (
m_paths.ptr(),
width, height);
}
else {
shadeMiss<false> << <grid, block >> > (
m_paths.ptr(),
width, height);
}
}
checkCudaKernel(shadeMiss);
}
void SSRT::onShade(
hipSurfaceObject_t outputSurf,
int hitcount,
int width, int height,
int bounce, int rrBounce,
hipTextureObject_t texVtxPos,
hipTextureObject_t texVtxNml)
{
dim3 blockPerGrid((hitcount + 64 - 1) / 64);
dim3 threadPerBlock(64);
shade << <blockPerGrid, threadPerBlock >> > (
m_frame,
outputSurf,
width, height,
m_paths.ptr(),
m_hitidx.ptr(), hitcount,
m_isects.ptr(),
m_rays.ptr(),
bounce, rrBounce,
m_shapeparam.ptr(), m_shapeparam.num(),
m_mtrlparam.ptr(),
m_lightparam.ptr(), m_lightparam.num(),
m_nodetex.ptr(),
m_primparams.ptr(),
texVtxPos, texVtxNml,
m_mtxparams.ptr(),
m_tex.ptr(),
m_random.ptr(),
m_shadowRays.ptr());
checkCudaKernel(shade);
#ifdef SEPARATE_SHADOWRAY_HITTEST
hitShadowRay << <blockPerGrid, threadPerBlock >> > (
//hitShadowRay << <1, 1 >> > (
m_paths.ptr(),
m_hitidx.ptr(), hitcount,
m_shadowRays.ptr(),
m_shapeparam.ptr(), m_shapeparam.num(),
m_mtrlparam.ptr(),
m_lightparam.ptr(), m_lightparam.num(),
m_nodetex.ptr(),
m_primparams.ptr(),
texVtxPos,
m_mtxparams.ptr());
checkCudaKernel(hitShadowRay);
#endif
}
void SSRT::onGather(
hipSurfaceObject_t outputSurf,
int width, int height,
int maxSamples)
{
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(
(width + block.x - 1) / block.x,
(height + block.y - 1) / block.y);
gather << <grid, block >> > (
outputSurf,
m_paths.ptr(),
width, height);
}
}
| 603e9eb81c1c17991d914a947e29e50f4773a6da.cu | #include "kernel/ssrt.h"
#include "kernel/context.cuh"
#include "kernel/light.cuh"
#include "kernel/material.cuh"
#include "kernel/intersect.cuh"
#include "kernel/accelerator.cuh"
#include "kernel/compaction.h"
#include "kernel/pt_common.h"
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include "cuda/helper_math.h"
#include "cuda/cudautil.h"
#include "cuda/cudamemory.h"
#include "aten4idaten.h"
#define SEPARATE_SHADOWRAY_HITTEST
//#define DUMP_DEBUG_LOG
//#define ENABLE_PROGRESSIVE
__global__ void genPath(
idaten::SSRT::Path* paths,
aten::ray* rays,
int width, int height,
int sample, int maxSamples,
unsigned int frame,
const aten::CameraParameter* __restrict__ camera,
const unsigned int* sobolmatrices,
const unsigned int* random)
{
const auto ix = blockIdx.x * blockDim.x + threadIdx.x;
const auto iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix >= width || iy >= height) {
return;
}
const auto idx = getIdx(ix, iy, width);
auto& path = paths[idx];
path.isHit = false;
if (path.isKill) {
path.isTerminate = true;
return;
}
#ifdef ENABLE_PROGRESSIVE
#if IDATEN_SAMPLER == IDATEN_SAMPLER_SOBOL
auto scramble = random[idx] * 0x1fe3434f;
path.sampler.init(frame, 0, scramble, sobolmatrices);
#elif IDATEN_SAMPLER == IDATEN_SAMPLER_CMJ
auto rnd = random[idx];
auto scramble = rnd * 0x1fe3434f * ((frame + 133 * rnd) / (aten::CMJ::CMJ_DIM * aten::CMJ::CMJ_DIM));
path.sampler.init(frame % (aten::CMJ::CMJ_DIM * aten::CMJ::CMJ_DIM), 0, scramble);
#endif
#else
auto scramble = (iy * height * 4 + ix * 4) * maxSamples + sample + 1 + frame;
path.sampler.init(frame, 0, scramble, sobolmatrices);
#endif
float s = (ix + path.sampler.nextSample()) / (float)(camera->width);
float t = (iy + path.sampler.nextSample()) / (float)(camera->height);
AT_NAME::CameraSampleResult camsample;
AT_NAME::PinholeCamera::sample(&camsample, camera, s, t);
rays[idx] = camsample.r;
path.throughput = aten::vec3(1);
path.pdfb = 0.0f;
path.isTerminate = false;
path.isSingular = false;
path.samples += 1;
// Accumulate value, so do not reset.
//path.contrib = aten::vec3(0);
}
__global__ void hitTestPrimaryRayInScreenSpace(
cudaSurfaceObject_t gbuffer,
idaten::SSRT::Path* paths,
aten::Intersection* isects,
int* hitbools,
int width, int height,
const aten::vec4 camPos,
const aten::GeomParameter* __restrict__ geoms,
const aten::PrimitiveParamter* __restrict__ prims,
const aten::mat4* __restrict__ matrices,
cudaTextureObject_t vtxPos)
{
const auto ix = blockIdx.x * blockDim.x + threadIdx.x;
const auto iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix >= width || iy >= height) {
return;
}
const auto idx = getIdx(ix, iy, width);
auto& path = paths[idx];
path.isHit = false;
hitbools[idx] = 0;
if (path.isTerminate) {
return;
}
// Sample data from texture.
float4 data;
surf2Dread(&data, gbuffer, ix * sizeof(float4), iy);
// NOTE
// x : objid
// y : primid
// zw : bary centroid
int objid = __float_as_int(data.x);
int primid = __float_as_int(data.y);
isects[idx].objid = objid;
isects[idx].primid = primid;
// bary centroid.
isects[idx].a = data.z;
isects[idx].b = data.w;
if (objid >= 0) {
aten::PrimitiveParamter prim;
prim.v0 = ((aten::vec4*)prims)[primid * aten::PrimitiveParamter_float4_size + 0];
prim.v1 = ((aten::vec4*)prims)[primid * aten::PrimitiveParamter_float4_size + 1];
isects[idx].mtrlid = prim.mtrlid;
isects[idx].meshid = prim.gemoid;
const auto* obj = &geoms[objid];
float4 p0 = tex1Dfetch<float4>(vtxPos, prim.idx[0]);
float4 p1 = tex1Dfetch<float4>(vtxPos, prim.idx[1]);
float4 p2 = tex1Dfetch<float4>(vtxPos, prim.idx[2]);
real a = data.z;
real b = data.w;
real c = 1 - a - b;
// 重心座標系(barycentric coordinates).
// v0基準.
// p = (1 - a - b)*v0 + a*v1 + b*v2
auto p = c * p0 + a * p1 + b * p2;
aten::vec4 vp(p.x, p.y, p.z, 1.0f);
if (obj->mtxid >= 0) {
auto mtxL2W = matrices[obj->mtxid * 2 + 0];
vp = mtxL2W.apply(vp);
}
isects[idx].t = (camPos - vp).length();
path.isHit = true;
hitbools[idx] = 1;
}
else {
path.isHit = false;
hitbools[idx] = 0;
}
}
inline __device__ bool intersectsDepthBuffer(float z, float minZ, float maxZ, float zThickness)
{
// 指定範囲内(レイの始点と終点)に z があれば、それはレイにヒットしたとみなせる.
z += zThickness;
return (maxZ >= z) && (minZ - zThickness <= z);
}
inline __device__ bool traceScreenSpaceRay(
cudaSurfaceObject_t depth,
const aten::vec3& csOrig,
const aten::vec3& csDir,
const aten::mat4& mtxV2C,
int width, int height,
float nearPlaneZ,
float stride,
float jitter,
float2& hitPixel)
{
static const float zThickness = 0.1f;
static const float maxDistance = 1000.0f;
// Clip to the near plane.
float rayLength = (csOrig.z + csDir.z * maxDistance) > -nearPlaneZ
? (-nearPlaneZ - csOrig.z) / csDir.z
: maxDistance;
aten::vec3 csEndPoint = csOrig + csDir * rayLength;
#ifdef DUMP_DEBUG_LOG
printf("rayLenght : %f = (%f - %f) / %f\n", rayLength, nearPlaneZ, csOrig.z, csDir.z);
printf("dir : %f, %f, %f\n", csDir.x, csDir.y, csDir.z);
printf("org : %f, %f, %f\n", csOrig.x, csOrig.y, csOrig.z);
printf("end : %f, %f, %f\n", csEndPoint.x, csEndPoint.y, csEndPoint.z);
#endif
// Project into homogeneous clip space.
aten::vec4 H0 = mtxV2C.apply(aten::vec4(csOrig, 1));
aten::vec4 H1 = mtxV2C.apply(aten::vec4(csEndPoint, 1));
#ifdef DUMP_DEBUG_LOG
printf("H0 : %f, %f, %f, %f\n", H0.x, H0.y, H0.z, H0.w);
printf("H1 : %f, %f, %f, %f\n", H1.x, H1.y, H1.z, H1.w);
#endif
float k0 = 1.0 / H0.w;
float k1 = 1.0 / H1.w;
// The interpolated homogeneous version of the camera-space points.
aten::vec3 Q0 = csOrig * k0;
aten::vec3 Q1 = csEndPoint * k1;
// Screen space point.
aten::vec3 P0 = H0 * k0;
aten::vec3 P1 = H1 * k1;
// [-1, 1] -> [0, 1]
P0 = P0 * 0.5f + 0.5f;
P1 = P1 * 0.5f + 0.5f;
#ifdef DUMP_DEBUG_LOG
printf("P0 : %f, %f, %f\n", P0.x, P0.y, P0.z);
printf("P1 : %f, %f, %f\n", P1.x, P1.y, P1.z);
#endif
P0.x *= width;
P0.y *= height;
P0.z = 0.0f;
P1.x *= width;
P1.y *= height;
P1.z = 0.0f;
#ifdef DUMP_DEBUG_LOG
printf("[%f, %f] -> [%f, %f]\n", P0.x, P0.y, P1.x, P1.y);
#endif
// If the line is degenerate, make it cover at least one pixel to avoid handling zero-pixel extent as a special case later.
// 2点間の距離がある程度離れるようにする.
P1 += aten::squared_length(P0 - P1) < 0.0001f
? aten::vec3(0.01f)
: aten::vec3(0.0f);
aten::vec3 delta = P1 - P0;
// Permute so that the primary iteration is in x to collapse all quadrant-specific DDA cases later.
bool permute = false;
if (abs(delta.x) < abs(delta.y))
{
permute = true;
aten::swapVal(delta.x, delta.y);
aten::swapVal(P0.x, P0.y);
aten::swapVal(P1.x, P1.y);
}
float stepDir = delta.x < 0.0f ? -1.0f : 0.0f;
stepDir = delta.x > 0.0f ? 1.0f : stepDir;
#ifdef DUMP_DEBUG_LOG
printf("delta %f, %f\n", delta.x, delta.y);
printf("stepDir %f\n", stepDir);
printf("permute %s\n", permute ? "true" : "false");
#endif
float invdx = stepDir / delta.x;
// Track the derivatives of Q and k.
aten::vec3 dQ = (Q1 - Q0) * invdx;
float dk = (k1 - k0) * invdx;
// y is slope.
// slope = (y1 - y0) / (x1 - x0)
aten::vec3 dP = aten::vec3(stepDir, delta.y * invdx, 0.0f);
// Adjust end condition for iteration direction
float end = P1.x * stepDir;
int stepCount = 0;
float prevZMaxEstimate = -csOrig.z;
float rayZMin = prevZMaxEstimate;
float rayZMax = prevZMaxEstimate;
float sceneZMax = rayZMax + 100.0f;
dP *= stride;
dQ *= stride;
dk *= stride;
P0 += dP * jitter;
Q0 += dQ * jitter;
k0 += dk * jitter;
float4 PQk = make_float4(P0.x, P0.y, Q0.z, k0);
float4 dPQk = make_float4(dP.x, dP.y, dQ.z, dk);
aten::vec3 Q = Q0;
#ifdef DUMP_DEBUG_LOG
printf("P0 (%f, %f), dP (%f, %f)\n", P0.x, P0.y, dP.x, dP.y);
#endif
static const int maxSteps = 20;
bool isect = false;
bool breakLoop = false;
for (; (stepCount < maxSteps) && !breakLoop; ++stepCount)
{
// 前回のZの最大値が次の最小値になる.
rayZMin = prevZMaxEstimate;
// 次のZの最大値を計算する.
// ただし、1/2 pixel分 余裕を持たせる.
// Qはw成分で除算されていて、そこに1/wで除算するので、元(View座標系)に戻ることになる.
rayZMax = -(PQk.z + dPQk.z * 0.5f) / (PQk.w + dPQk.w * 0.5f);
// 次に向けて最大値を保持.
prevZMaxEstimate = rayZMax;
float tmpMin = rayZMin;
float tmpMax = rayZMax;
rayZMin = tmpMin > tmpMax ? tmpMax : tmpMin;
rayZMax = tmpMin > tmpMax ? tmpMin : tmpMax;
hitPixel.x = permute ? PQk.y : PQk.x;
hitPixel.y = permute ? PQk.x : PQk.y;
int ix = (int)hitPixel.x;
int iy = (int)hitPixel.y;
if (ix < 0 || ix >= width || iy < 0 || iy >= height) {
return false;
}
#ifdef DUMP_DEBUG_LOG
printf(" [%d] %d, %d\n", stepCount, ix, iy);
#endif
// 終点に到達したか.
bool b0 = ((PQk.x * stepDir) <= end);
// 何もないところに到達してないか.
bool b1 = (sceneZMax != 0.0);
// シーン内の現時点での深度値を取得.
float4 data;
surf2Dread(&data, depth, ix * sizeof(float4), iy);
sceneZMax = data.x;
isect = intersectsDepthBuffer(sceneZMax, rayZMin, rayZMax, zThickness);
breakLoop = b0 && b1 && isect;
PQk += dPQk;
}
//auto isect = intersectsDepthBuffer(sceneZMax, rayZMin, rayZMax, zThickness);
#ifdef DUMP_DEBUG_LOG
printf("[%d]%f, %f, %f\n", stepCount, sceneZMax, rayZMin, rayZMax);
printf("(%s)%d, %d\n", isect ? "true" : "false", (int)hitPixel.x, (int)hitPixel.y);
printf("=======\n");
#endif
return isect;
}
__global__ void hitTestInScreenSpace(
cudaSurfaceObject_t gbuffer,
cudaSurfaceObject_t depth,
idaten::SSRT::Path* paths,
aten::Intersection* isects,
int* hitbools,
int* notIntersectBools,
int width, int height,
float cameraNearPlaneZ,
const aten::mat4 mtxW2V,
const aten::mat4 mtxV2C,
const aten::ray* __restrict__ rays,
const aten::GeomParameter* __restrict__ geoms,
const aten::PrimitiveParamter* __restrict__ prims,
const aten::mat4* __restrict__ matrices,
cudaTextureObject_t* nodes,
cudaTextureObject_t vtxPos,
cudaTextureObject_t vtxNml)
{
auto ix = blockIdx.x * blockDim.x + threadIdx.x;
auto iy = blockIdx.y * blockDim.y + threadIdx.y;
//int ix = 140;
//int iy = 512 - 320;
if (ix >= width || iy >= height) {
return;
}
const auto idx = getIdx(ix, iy, width);
auto& path = paths[idx];
path.isHit = false;
hitbools[idx] = 0;
notIntersectBools[idx] = 0;
if (path.isTerminate) {
return;
}
aten::vec3 vsOrig = mtxW2V.apply(rays[idx].org);
aten::vec3 vsDir = normalize(mtxW2V.applyXYZ(rays[idx].dir));
auto d = dot(vsDir, aten::vec3(0, 0, 1));
if (abs(d) > 0.96f) {
notIntersectBools[idx] = 1;
return;
}
// TODO
static const float stride = 15.0f;
float c = (ix + iy) * 0.25f;
float jitter = stride > 1.0f ? fmod(c, 1.0f) : 0.0f;
float2 hitPixel = make_float2(0.0f);
bool isIntersect = traceScreenSpaceRay(
depth,
vsOrig, vsDir,
mtxV2C,
width, height,
cameraNearPlaneZ,
stride, jitter,
hitPixel);
ix = (int)hitPixel.x;
iy = (int)hitPixel.y;
isIntersect = isIntersect && (0 <= ix && ix < width && 0 <= iy && iy < height);
int objid = -1;
int primid = -1;
if (isIntersect) {
// Sample data from texture.
float4 data;
surf2Dread(&data, gbuffer, ix * sizeof(float4), iy);
// NOTE
// x : objid
// y : primid
// zw : bary centroid
objid = __float_as_int(data.x);
primid = __float_as_int(data.y);
isects[idx].objid = objid;
isects[idx].primid = primid;
// bary centroid.
isects[idx].a = data.z;
isects[idx].b = data.w;
#ifdef DUMP_DEBUG_LOG
printf("***\n");
printf("objid %d\n", objid);
printf("primid %d\n", primid);
#endif
aten::PrimitiveParamter prim;
prim.v0 = ((aten::vec4*)prims)[primid * aten::PrimitiveParamter_float4_size + 0];
prim.v1 = ((aten::vec4*)prims)[primid * aten::PrimitiveParamter_float4_size + 1];
isects[idx].mtrlid = prim.mtrlid;
isects[idx].meshid = prim.gemoid;
#ifdef DUMP_DEBUG_LOG
printf("mtrlid %d\n", prim.mtrlid);
printf("gemoid %d\n", prim.gemoid);
#endif
}
path.isHit = isIntersect;
hitbools[idx] = isIntersect ? 1 : 0;
notIntersectBools[idx] = isIntersect ? 0 : 1;
}
#define NUM_SM 64 // no. of streaming multiprocessors
#define NUM_WARP_PER_SM 64 // maximum no. of resident warps per SM
#define NUM_BLOCK_PER_SM 32 // maximum no. of resident blocks per SM
#define NUM_BLOCK (NUM_SM * NUM_BLOCK_PER_SM)
#define NUM_WARP_PER_BLOCK (NUM_WARP_PER_SM / NUM_BLOCK_PER_SM)
#define WARP_SIZE 32
__device__ unsigned int _headDev = 0;
__global__ void hitTest(
const int* __restrict__ notIntersectInScreenSpaceIds,
int testNum,
idaten::SSRT::Path* paths,
aten::Intersection* isects,
aten::ray* rays,
int* hitbools,
int width, int height,
const aten::GeomParameter* __restrict__ shapes, int geomnum,
const aten::LightParameter* __restrict__ lights, int lightnum,
cudaTextureObject_t* nodes,
const aten::PrimitiveParamter* __restrict__ prims,
cudaTextureObject_t vtxPos,
aten::mat4* matrices)
{
// warp-wise head index of tasks in a block
__shared__ volatile unsigned int headBlock[NUM_WARP_PER_BLOCK];
volatile unsigned int& headWarp = headBlock[threadIdx.y];
if (blockIdx.x == 0 && threadIdx.x == 0) {
_headDev = 0;
}
Context ctxt;
{
ctxt.geomnum = geomnum;
ctxt.shapes = shapes;
ctxt.lightnum = lightnum;
ctxt.lights = lights;
ctxt.nodes = nodes;
ctxt.prims = prims;
ctxt.vtxPos = vtxPos;
ctxt.matrices = matrices;
}
do
{
// let lane 0 fetch [wh, wh + WARP_SIZE - 1] for a warp
if (threadIdx.x == 0) {
headWarp = atomicAdd(&_headDev, WARP_SIZE);
}
// task index per thread in a warp
unsigned int idx = headWarp + threadIdx.x;
if (idx >= testNum) {
return;
}
idx = notIntersectInScreenSpaceIds[idx];
int ix = idx % width;
int iy = idx / width;
idx = getIdx(ix, iy, width);
auto& path = paths[idx];
path.isHit = false;
hitbools[idx] = 0;
if (path.isTerminate) {
continue;
}
aten::Intersection isect;
bool isHit = intersectClosest(&ctxt, rays[idx], &isect);
//isects[idx].t = isect.t;
isects[idx].objid = isect.objid;
isects[idx].mtrlid = isect.mtrlid;
isects[idx].meshid = isect.meshid;
isects[idx].primid = isect.primid;
isects[idx].a = isect.a;
isects[idx].b = isect.b;
path.isHit = isHit;
hitbools[idx] = isHit ? 1 : 0;
} while (true);
}
template <bool isFirstBounce>
__global__ void shadeMiss(
idaten::SSRT::Path* paths,
int width, int height)
{
const auto ix = blockIdx.x * blockDim.x + threadIdx.x;
const auto iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix >= width || iy >= height) {
return;
}
const auto idx = getIdx(ix, iy, width);
auto& path = paths[idx];
if (!path.isTerminate && !path.isHit) {
// TODO
auto bg = aten::vec3(0);
if (isFirstBounce) {
path.isKill = true;
}
path.contrib += path.throughput * bg;
path.isTerminate = true;
}
}
template <bool isFirstBounce>
__global__ void shadeMissWithEnvmap(
cudaTextureObject_t* textures,
int envmapIdx,
real envmapAvgIllum,
real envmapMultiplyer,
idaten::SSRT::Path* paths,
const aten::ray* __restrict__ rays,
int width, int height)
{
const auto ix = blockIdx.x * blockDim.x + threadIdx.x;
const auto iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix >= width || iy >= height) {
return;
}
const auto idx = getIdx(ix, iy, width);
auto& path = paths[idx];
if (!path.isTerminate && !path.isHit) {
auto r = rays[idx];
auto uv = AT_NAME::envmap::convertDirectionToUV(r.dir);
auto bg = tex2D<float4>(textures[envmapIdx], uv.x, uv.y);
auto emit = aten::vec3(bg.x, bg.y, bg.z);
float misW = 1.0f;
if (isFirstBounce) {
path.isKill = true;
}
else {
auto pdfLight = AT_NAME::ImageBasedLight::samplePdf(emit, envmapAvgIllum);
misW = path.pdfb / (pdfLight + path.pdfb);
emit *= envmapMultiplyer;
}
path.contrib += path.throughput * misW * emit;
path.isTerminate = true;
}
}
__global__ void shade(
unsigned int frame,
cudaSurfaceObject_t outSurface,
int width, int height,
idaten::SSRT::Path* paths,
const int* __restrict__ hitindices,
int hitnum,
const aten::Intersection* __restrict__ isects,
aten::ray* rays,
int bounce, int rrBounce,
const aten::GeomParameter* __restrict__ shapes, int geomnum,
aten::MaterialParameter* mtrls,
const aten::LightParameter* __restrict__ lights, int lightnum,
cudaTextureObject_t* nodes,
const aten::PrimitiveParamter* __restrict__ prims,
cudaTextureObject_t vtxPos,
cudaTextureObject_t vtxNml,
const aten::mat4* __restrict__ matrices,
cudaTextureObject_t* textures,
const unsigned int* random,
idaten::SSRT::ShadowRay* shadowRays)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= hitnum) {
return;
}
Context ctxt;
{
ctxt.geomnum = geomnum;
ctxt.shapes = shapes;
ctxt.mtrls = mtrls;
ctxt.lightnum = lightnum;
ctxt.lights = lights;
ctxt.nodes = nodes;
ctxt.prims = prims;
ctxt.vtxPos = vtxPos;
ctxt.vtxNml = vtxNml;
ctxt.matrices = matrices;
ctxt.textures = textures;
}
idx = hitindices[idx];
auto& path = paths[idx];
const auto& ray = rays[idx];
#ifdef ENABLE_PROGRESSIVE
#if IDATEN_SAMPLER == IDATEN_SAMPLER_SOBOL
auto scramble = random[idx] * 0x1fe3434f;
path.sampler.init(frame, 4 + bounce * 300, scramble);
#elif IDATEN_SAMPLER == IDATEN_SAMPLER_CMJ
auto rnd = random[idx];
auto scramble = rnd * 0x1fe3434f * ((frame + 331 * rnd) / (aten::CMJ::CMJ_DIM * aten::CMJ::CMJ_DIM));
path.sampler.init(frame % (aten::CMJ::CMJ_DIM * aten::CMJ::CMJ_DIM), 4 + bounce * 300, scramble);
#endif
#endif
aten::hitrecord rec;
const auto& isect = isects[idx];
auto obj = &ctxt.shapes[isect.objid];
evalHitResult(&ctxt, obj, ray, &rec, &isect);
aten::MaterialParameter mtrl = ctxt.mtrls[rec.mtrlid];
bool isBackfacing = dot(rec.normal, -ray.dir) < 0.0f;
// 交差位置の法線.
// 物体からのレイの入出を考慮.
aten::vec3 orienting_normal = rec.normal;
if (mtrl.type != aten::MaterialType::Layer) {
mtrl.albedoMap = (int)(mtrl.albedoMap >= 0 ? ctxt.textures[mtrl.albedoMap] : -1);
mtrl.normalMap = (int)(mtrl.normalMap >= 0 ? ctxt.textures[mtrl.normalMap] : -1);
mtrl.roughnessMap = (int)(mtrl.roughnessMap >= 0 ? ctxt.textures[mtrl.roughnessMap] : -1);
}
// Implicit conection to light.
if (mtrl.attrib.isEmissive) {
if (!isBackfacing) {
float weight = 1.0f;
if (bounce > 0 && !path.isSingular) {
auto cosLight = dot(orienting_normal, -ray.dir);
auto dist2 = aten::squared_length(rec.p - ray.org);
if (cosLight >= 0) {
auto pdfLight = 1 / rec.area;
// Convert pdf area to sradian.
// http://www.slideshare.net/h013/edubpt-v100
// p31 - p35
pdfLight = pdfLight * dist2 / cosLight;
weight = path.pdfb / (pdfLight + path.pdfb);
}
}
path.contrib += path.throughput * weight * mtrl.baseColor;
}
// When ray hit the light, tracing will finish.
path.isTerminate = true;
return;
}
if (!mtrl.attrib.isTranslucent && isBackfacing) {
orienting_normal = -orienting_normal;
}
// Apply normal map.
int normalMap = mtrl.normalMap;
if (mtrl.type == aten::MaterialType::Layer) {
// 最表層の NormalMap を適用.
auto* topmtrl = &ctxt.mtrls[mtrl.layer[0]];
normalMap = (int)(topmtrl->normalMap >= 0 ? ctxt.textures[topmtrl->normalMap] : -1);
}
AT_NAME::material::applyNormalMap(normalMap, orienting_normal, orienting_normal, rec.u, rec.v);
#ifdef SEPARATE_SHADOWRAY_HITTEST
shadowRays[idx].isActive = false;
#endif
// Explicit conection to light.
if (!mtrl.attrib.isSingular)
{
real lightSelectPdf = 1;
aten::LightSampleResult sampleres;
// TODO
// Importance sampling.
int lightidx = aten::cmpMin<int>(path.sampler.nextSample() * lightnum, lightnum - 1);
lightSelectPdf = 1.0f / lightnum;
aten::LightParameter light;
light.pos = ((aten::vec4*)ctxt.lights)[lightidx * aten::LightParameter_float4_size + 0];
light.dir = ((aten::vec4*)ctxt.lights)[lightidx * aten::LightParameter_float4_size + 1];
light.le = ((aten::vec4*)ctxt.lights)[lightidx * aten::LightParameter_float4_size + 2];
light.v0 = ((aten::vec4*)ctxt.lights)[lightidx * aten::LightParameter_float4_size + 3];
light.v1 = ((aten::vec4*)ctxt.lights)[lightidx * aten::LightParameter_float4_size + 4];
light.v2 = ((aten::vec4*)ctxt.lights)[lightidx * aten::LightParameter_float4_size + 5];
//auto light = ctxt.lights[lightidx];
sampleLight(&sampleres, &ctxt, &light, rec.p, orienting_normal, &path.sampler);
const auto& posLight = sampleres.pos;
const auto& nmlLight = sampleres.nml;
real pdfLight = sampleres.pdf;
auto lightobj = sampleres.obj;
auto dirToLight = normalize(sampleres.dir);
auto distToLight = length(posLight - rec.p);
// Ray aim to the area light.
// So, if ray doesn't hit anything in intersectCloserBVH, ray hit the area light.
auto hitobj = lightobj;
aten::Intersection isectTmp;
auto shadowRayOrg = rec.p + AT_MATH_EPSILON * orienting_normal;
auto tmp = rec.p + dirToLight - shadowRayOrg;
auto shadowRayDir = normalize(tmp);
#ifdef SEPARATE_SHADOWRAY_HITTEST
shadowRays[idx].isActive = true;
shadowRays[idx].org = shadowRayOrg;
shadowRays[idx].dir = shadowRayDir;
shadowRays[idx].targetLightId = lightidx;
shadowRays[idx].distToLight = distToLight;
#else
aten::ray shadowRay(shadowRayOrg, shadowRayDir);
bool isHit = intersectCloser(&ctxt, shadowRay, &isectTmp, distToLight - AT_MATH_EPSILON);
if (isHit) {
hitobj = (void*)&ctxt.shapes[isectTmp.objid];
}
isHit = AT_NAME::scene::hitLight(
isHit,
light.attrib,
lightobj,
distToLight,
distHitObjToRayOrg,
isectTmp.t,
hitobj);
if (isHit)
#endif
{
auto cosShadow = dot(orienting_normal, dirToLight);
real pdfb = samplePDF(&ctxt, &mtrl, orienting_normal, ray.dir, dirToLight, rec.u, rec.v);
auto bsdf = sampleBSDF(&ctxt, &mtrl, orienting_normal, ray.dir, dirToLight, rec.u, rec.v);
bsdf *= path.throughput;
// Get light color.
auto emit = sampleres.finalColor;
if (light.attrib.isSingular || light.attrib.isInfinite) {
if (pdfLight > real(0) && cosShadow >= 0) {
// TODO
// ジオメトリタームの扱いについて.
// singular light の場合は、finalColor に距離の除算が含まれている.
// inifinite light の場合は、無限遠方になり、pdfLightに含まれる距離成分と打ち消しあう?.
// (打ち消しあうので、pdfLightには距離成分は含んでいない).
auto misW = pdfLight / (pdfb + pdfLight);
#ifdef SEPARATE_SHADOWRAY_HITTEST
shadowRays[idx].lightcontrib =
#else
path.contrib +=
#endif
(misW * bsdf * emit * cosShadow / pdfLight) / lightSelectPdf;
}
}
else {
auto cosLight = dot(nmlLight, -dirToLight);
if (cosShadow >= 0 && cosLight >= 0) {
auto dist2 = aten::squared_length(sampleres.dir);
auto G = cosShadow * cosLight / dist2;
if (pdfb > real(0) && pdfLight > real(0)) {
// Convert pdf from steradian to area.
// http://www.slideshare.net/h013/edubpt-v100
// p31 - p35
pdfb = pdfb * cosLight / dist2;
auto misW = pdfLight / (pdfb + pdfLight);
#ifdef SEPARATE_SHADOWRAY_HITTEST
shadowRays[idx].lightcontrib =
#else
path.contrib +=
#endif
(misW * (bsdf * emit * G) / pdfLight) / lightSelectPdf;
}
}
}
}
}
real russianProb = real(1);
if (bounce > rrBounce) {
auto t = normalize(path.throughput);
auto p = aten::cmpMax(t.r, aten::cmpMax(t.g, t.b));
russianProb = path.sampler.nextSample();
if (russianProb >= p) {
//path.contrib = aten::vec3(0);
path.isTerminate = true;
}
else {
russianProb = p;
}
}
AT_NAME::MaterialSampling sampling;
sampleMaterial(
&sampling,
&ctxt,
&mtrl,
orienting_normal,
ray.dir,
rec.normal,
&path.sampler,
rec.u, rec.v);
auto nextDir = normalize(sampling.dir);
auto pdfb = sampling.pdf;
auto bsdf = sampling.bsdf;
real c = 1;
if (!mtrl.attrib.isSingular) {
// TODO
// AMDのはabsしているが....
//c = aten::abs(dot(orienting_normal, nextDir));
c = dot(orienting_normal, nextDir);
}
if (pdfb > 0 && c > 0) {
path.throughput *= bsdf * c / pdfb;
path.throughput /= russianProb;
}
else {
path.isTerminate = true;
}
// Make next ray.
rays[idx] = aten::ray(rec.p, nextDir);
path.pdfb = pdfb;
path.isSingular = mtrl.attrib.isSingular;
}
__global__ void hitShadowRay(
idaten::SSRT::Path* paths,
int* hitindices,
int hitnum,
const idaten::SSRT::ShadowRay* __restrict__ shadowRays,
const aten::GeomParameter* __restrict__ shapes, int geomnum,
aten::MaterialParameter* mtrls,
const aten::LightParameter* __restrict__ lights, int lightnum,
cudaTextureObject_t* nodes,
const aten::PrimitiveParamter* __restrict__ prims,
cudaTextureObject_t vtxPos,
const aten::mat4* __restrict__ matrices)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= hitnum) {
return;
}
Context ctxt;
{
ctxt.geomnum = geomnum;
ctxt.shapes = shapes;
ctxt.mtrls = mtrls;
ctxt.lightnum = lightnum;
ctxt.lights = lights;
ctxt.nodes = nodes;
ctxt.prims = prims;
ctxt.vtxPos = vtxPos;
ctxt.matrices = matrices;
}
idx = hitindices[idx];
auto& shadowRay = shadowRays[idx];
if (shadowRay.isActive) {
auto light = &ctxt.lights[shadowRay.targetLightId];
auto lightobj = (light->objid >= 0 ? &ctxt.shapes[light->objid] : nullptr);
real distHitObjToRayOrg = AT_MATH_INF;
// Ray aim to the area light.
// So, if ray doesn't hit anything in intersectCloserBVH, ray hit the area light.
const aten::GeomParameter* hitobj = lightobj;
aten::Intersection isectTmp;
bool isHit = false;
isHit = intersectCloser(&ctxt, shadowRay, &isectTmp, shadowRay.distToLight - AT_MATH_EPSILON);
if (isHit) {
hitobj = &ctxt.shapes[isectTmp.objid];
}
isHit = AT_NAME::scene::hitLight(
isHit,
light->attrib,
lightobj,
shadowRay.distToLight,
distHitObjToRayOrg,
isectTmp.t,
hitobj);
if (isHit) {
paths[idx].contrib += shadowRay.lightcontrib;
}
}
}
__global__ void gather(
cudaSurfaceObject_t outSurface,
const idaten::SSRT::Path* __restrict__ paths,
int width, int height)
{
const auto ix = blockIdx.x * blockDim.x + threadIdx.x;
const auto iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix >= width || iy >= height) {
return;
}
const auto idx = getIdx(ix, iy, width);
const auto& path = paths[idx];
int sample = path.samples;
float4 data;
#ifdef ENABLE_PROGRESSIVE
surf2Dread(&data, outSurface, ix * sizeof(float4), iy);
// First data.w value is 0.
int n = data.w;
data = n * data + make_float4(path.contrib.x, path.contrib.y, path.contrib.z, 0) / sample;
data /= (n + 1);
data.w = n + 1;
#else
data = make_float4(path.contrib.x, path.contrib.y, path.contrib.z, 0) / sample;
data.w = sample;
#endif
surf2Dwrite(
data,
outSurface,
ix * sizeof(float4), iy,
cudaBoundaryModeTrap);
}
namespace idaten {
void SSRT::prepare()
{
}
void SSRT::update(
GLuint gltex,
int width, int height,
const aten::CameraParameter& camera,
const std::vector<aten::GeomParameter>& shapes,
const std::vector<aten::MaterialParameter>& mtrls,
const std::vector<aten::LightParameter>& lights,
const std::vector<std::vector<aten::GPUBvhNode>>& nodes,
const std::vector<aten::PrimitiveParamter>& prims,
const std::vector<aten::vertex>& vtxs,
const std::vector<aten::mat4>& mtxs,
const std::vector<TextureResource>& texs,
const EnvmapResource& envmapRsc)
{
idaten::Renderer::update(
gltex,
width, height,
camera,
shapes,
mtrls,
lights,
nodes,
prims,
vtxs,
mtxs,
texs, envmapRsc);
m_hitbools.init(width * height);
m_hitidx.init(width * height);
m_notIntersectInScreenSpaceBools.init(width * height);
m_sobolMatrices.init(AT_COUNTOF(sobol::Matrices::matrices));
m_sobolMatrices.writeByNum(sobol::Matrices::matrices, m_sobolMatrices.maxNum());
auto& r = aten::getRandom();
m_random.init(width * height);
m_random.writeByNum(&r[0], width * height);
}
void SSRT::setGBuffer(
GLuint gltexGbuffer,
GLuint gltexDepth)
{
m_gbuffer.init(gltexGbuffer, idaten::CudaGLRscRegisterType::ReadOnly);
m_depth.init(gltexDepth, idaten::CudaGLRscRegisterType::ReadOnly);
}
#ifdef __AT_DEBUG__
static bool doneSetStackSize = false;
#endif
void SSRT::render(
int width, int height,
int maxSamples,
int maxBounce)
{
#ifdef __AT_DEBUG__
if (!doneSetStackSize) {
size_t val = 0;
cudaThreadGetLimit(&val, cudaLimitStackSize);
cudaThreadSetLimit(cudaLimitStackSize, val * 4);
doneSetStackSize = true;
}
#endif
int bounce = 0;
m_paths.init(width * height);
m_isects.init(width * height);
m_rays.init(width * height);
#ifdef SEPARATE_SHADOWRAY_HITTEST
m_shadowRays.init(width * height);
#endif
cudaMemset(m_paths.ptr(), 0, m_paths.bytes());
CudaGLResourceMapper rscmap(&m_glimg);
auto outputSurf = m_glimg.bind();
auto vtxTexPos = m_vtxparamsPos.bind();
auto vtxTexNml = m_vtxparamsNml.bind();
{
std::vector<cudaTextureObject_t> tmp;
for (int i = 0; i < m_nodeparam.size(); i++) {
auto nodeTex = m_nodeparam[i].bind();
tmp.push_back(nodeTex);
}
m_nodetex.writeByNum(&tmp[0], tmp.size());
}
if (!m_texRsc.empty())
{
std::vector<cudaTextureObject_t> tmp;
for (int i = 0; i < m_texRsc.size(); i++) {
auto cudaTex = m_texRsc[i].bind();
tmp.push_back(cudaTex);
}
m_tex.writeByNum(&tmp[0], tmp.size());
}
static const int rrBounce = 3;
auto time = AT_NAME::timer::getSystemTime();
for (int i = 0; i < maxSamples; i++) {
onGenPath(
width, height,
i, maxSamples,
vtxTexPos,
vtxTexNml);
bounce = 0;
while (bounce < maxBounce) {
onHitTest(
width, height,
bounce,
vtxTexPos,
vtxTexNml);
onShadeMiss(width, height, bounce);
int hitcount = 0;
idaten::Compaction::compact(
m_hitidx,
m_hitbools,
&hitcount);
//AT_PRINTF("%d\n", hitcount);
if (hitcount == 0) {
break;
}
onShade(
outputSurf,
hitcount,
width, height,
bounce, rrBounce,
vtxTexPos, vtxTexNml);
bounce++;
}
}
onGather(outputSurf, width, height, maxSamples);
checkCudaErrors(cudaDeviceSynchronize());
m_frame++;
{
m_vtxparamsPos.unbind();
m_vtxparamsNml.unbind();
for (int i = 0; i < m_nodeparam.size(); i++) {
m_nodeparam[i].unbind();
}
m_nodetex.reset();
for (int i = 0; i < m_texRsc.size(); i++) {
m_texRsc[i].unbind();
}
m_tex.reset();
}
}
void SSRT::onGenPath(
int width, int height,
int sample, int maxSamples,
cudaTextureObject_t texVtxPos,
cudaTextureObject_t texVtxNml)
{
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(
(width + block.x - 1) / block.x,
(height + block.y - 1) / block.y);
genPath << <grid, block >> > (
m_paths.ptr(),
m_rays.ptr(),
width, height,
sample, maxSamples,
m_frame,
m_cam.ptr(),
m_sobolMatrices.ptr(),
m_random.ptr());
checkCudaKernel(genPath);
}
void SSRT::onHitTest(
int width, int height,
int bounce,
cudaTextureObject_t texVtxPos,
cudaTextureObject_t texVtxNml)
{
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(
(width + block.x - 1) / block.x,
(height + block.y - 1) / block.y);
if (bounce == 0) {
aten::vec4 campos = aten::vec4(m_camParam.origin, 1.0f);
CudaGLResourceMapper rscmap(&m_gbuffer);
auto gbuffer = m_gbuffer.bind();
hitTestPrimaryRayInScreenSpace << <grid, block >> > (
gbuffer,
m_paths.ptr(),
m_isects.ptr(),
m_hitbools.ptr(),
width, height,
campos,
m_shapeparam.ptr(),
m_primparams.ptr(),
m_mtxparams.ptr(),
texVtxPos);
checkCudaKernel(hitTestPrimaryRayInScreenSpace);
}
else {
#if 0
hitTest << <grid, block >> > (
m_paths.ptr(),
m_isects.ptr(),
m_rays.ptr(),
m_hitbools.ptr(),
width, height,
m_shapeparam.ptr(), m_shapeparam.num(),
m_lightparam.ptr(), m_lightparam.num(),
m_nodetex.ptr(),
m_primparams.ptr(),
texVtxPos,
m_mtxparams.ptr());
checkCudaKernel(hitTest);
#else
aten::mat4 mtxW2V;
aten::mat4 mtxV2C;
mtxW2V.lookat(
m_camParam.origin,
m_camParam.center,
m_camParam.up);
mtxV2C.perspective(
m_camParam.znear,
m_camParam.zfar,
m_camParam.vfov,
m_camParam.aspect);
CudaGLResourceMapper rscmapGbuffer(&m_gbuffer);
CudaGLResourceMapper rscmapDepth(&m_depth);
auto gbuffer = m_gbuffer.bind();
auto depth = m_depth.bind();
hitTestInScreenSpace << <grid, block >> > (
//hitTestInScreenSpace << <1, 1 >> > (
gbuffer, depth,
m_paths.ptr(),
m_isects.ptr(),
m_hitbools.ptr(),
m_notIntersectInScreenSpaceBools.ptr(),
width, height,
m_camParam.znear,
mtxW2V, mtxV2C,
m_rays.ptr(),
m_shapeparam.ptr(),
m_primparams.ptr(),
m_mtxparams.ptr(),
m_nodetex.ptr(),
texVtxPos,
texVtxNml);
checkCudaKernel(hitTestInScreenSpace);
int hitTestCount = 0;
idaten::Compaction::compact(
m_hitidx,
m_notIntersectInScreenSpaceBools,
&hitTestCount);
//AT_PRINTF("BVHTrabers %d\n", hitTestCount);
dim3 blockPerGrid((hitTestCount + 64 - 1) / 64);
dim3 threadPerBlock(64);
//hitTest << <blockPerGrid, threadPerBlock >> > (
hitTest << <NUM_BLOCK, dim3(WARP_SIZE, NUM_WARP_PER_BLOCK) >> > (
m_hitidx.ptr(), hitTestCount,
m_paths.ptr(),
m_isects.ptr(),
m_rays.ptr(),
m_hitbools.ptr(),
width, height,
m_shapeparam.ptr(), m_shapeparam.num(),
m_lightparam.ptr(), m_lightparam.num(),
m_nodetex.ptr(),
m_primparams.ptr(),
texVtxPos,
m_mtxparams.ptr());
checkCudaKernel(hitTest);
#endif
}
}
void SSRT::onShadeMiss(
int width, int height,
int bounce)
{
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(
(width + block.x - 1) / block.x,
(height + block.y - 1) / block.y);
if (m_envmapRsc.idx >= 0) {
if (bounce == 0) {
shadeMissWithEnvmap<true> << <grid, block >> > (
m_tex.ptr(),
m_envmapRsc.idx, m_envmapRsc.avgIllum, m_envmapRsc.multiplyer,
m_paths.ptr(),
m_rays.ptr(),
width, height);
}
else {
shadeMissWithEnvmap<false> << <grid, block >> > (
m_tex.ptr(),
m_envmapRsc.idx, m_envmapRsc.avgIllum, m_envmapRsc.multiplyer,
m_paths.ptr(),
m_rays.ptr(),
width, height);
}
}
else {
if (bounce == 0) {
shadeMiss<true> << <grid, block >> > (
m_paths.ptr(),
width, height);
}
else {
shadeMiss<false> << <grid, block >> > (
m_paths.ptr(),
width, height);
}
}
checkCudaKernel(shadeMiss);
}
void SSRT::onShade(
cudaSurfaceObject_t outputSurf,
int hitcount,
int width, int height,
int bounce, int rrBounce,
cudaTextureObject_t texVtxPos,
cudaTextureObject_t texVtxNml)
{
dim3 blockPerGrid((hitcount + 64 - 1) / 64);
dim3 threadPerBlock(64);
shade << <blockPerGrid, threadPerBlock >> > (
m_frame,
outputSurf,
width, height,
m_paths.ptr(),
m_hitidx.ptr(), hitcount,
m_isects.ptr(),
m_rays.ptr(),
bounce, rrBounce,
m_shapeparam.ptr(), m_shapeparam.num(),
m_mtrlparam.ptr(),
m_lightparam.ptr(), m_lightparam.num(),
m_nodetex.ptr(),
m_primparams.ptr(),
texVtxPos, texVtxNml,
m_mtxparams.ptr(),
m_tex.ptr(),
m_random.ptr(),
m_shadowRays.ptr());
checkCudaKernel(shade);
#ifdef SEPARATE_SHADOWRAY_HITTEST
hitShadowRay << <blockPerGrid, threadPerBlock >> > (
//hitShadowRay << <1, 1 >> > (
m_paths.ptr(),
m_hitidx.ptr(), hitcount,
m_shadowRays.ptr(),
m_shapeparam.ptr(), m_shapeparam.num(),
m_mtrlparam.ptr(),
m_lightparam.ptr(), m_lightparam.num(),
m_nodetex.ptr(),
m_primparams.ptr(),
texVtxPos,
m_mtxparams.ptr());
checkCudaKernel(hitShadowRay);
#endif
}
void SSRT::onGather(
cudaSurfaceObject_t outputSurf,
int width, int height,
int maxSamples)
{
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(
(width + block.x - 1) / block.x,
(height + block.y - 1) / block.y);
gather << <grid, block >> > (
outputSurf,
m_paths.ptr(),
width, height);
}
}
|
746b2190e32319d5be60ad8cff6c97eff47200b3.hip | // !!! This is a file automatically generated by hipify!!!
#ifdef PARIS
#include "PoissonPeriodic3DBlockedGPU.hpp"
#include <algorithm>
#include <cassert>
#include <cmath>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#define ERROR(X,...) if (X) { fprintf(stderr,"%s(%d): ",__FILE__,__LINE__); fprintf(stderr,__VA_ARGS__); fprintf(stderr,"\n"); MPI_Abort(MPI_COMM_WORLD,(X)); }
static inline __host__ __device__ double sqr(const double x) { return x*x; }
PoissonPeriodic3DBlockedGPU::PoissonPeriodic3DBlockedGPU(const int n[3], const double lo[3], const double hi[3], const int m[3], const int id[3]):
#ifdef PARIS_3PT
di_(2.0*double(n[0]-1)/(hi[0]-lo[0])),
dj_(2.0*double(n[1]-1)/(hi[1]-lo[1])),
dk_(2.0*double(n[2]-1)/(hi[2]-lo[2])),
#elif defined PARIS_5PT
di_(sqr(double(n[0]-1)/(hi[0]-lo[0]))/6.0),
dj_(sqr(double(n[1]-1)/(hi[1]-lo[1]))/6.0),
dk_(sqr(double(n[2]-1)/(hi[2]-lo[2]))/6.0),
#else
di_{2.0*M_PI*double(n[0]-1)/(double(n[0])*(hi[0]-lo[0]))},
dj_{2.0*M_PI*double(n[1]-1)/(double(n[1])*(hi[1]-lo[1]))},
dk_{2.0*M_PI*double(n[2]-1)/(double(n[2])*(hi[2]-lo[2]))},
#endif
mi_(m[0]),
mj_(m[1]),
mk_(m[2]),
ni_(n[0]),
nj_(n[1]),
nk_(n[2])
{
{
int size = 0;
MPI_Comm_size(MPI_COMM_WORLD,&size);
ERROR(mi_*mj_*mk_ != size,"Task grid %dx%dx%d does not equal total tasks %d",mi_,mj_,mk_,size);
}
{
const int color = id[0];
const int key = id[2]+mk_*id[1];
MPI_Comm_split(MPI_COMM_WORLD,color,key,&commSlab_);
}
{
const int color = 1;
const int key = id[2]+mk_*(id[1]+mj_*id[0]);
MPI_Comm_split(MPI_COMM_WORLD,color,key,&commWorld_);
}
ERROR(ni_%mi_,"%d X elements are not divisible into %d X tasks",ni_,mi_);
const int niBlock = ni_/mi_;
ERROR(nj_%mj_,"%d Y elements are not divisible into %d Y tasks",nj_,mj_);
const int njBlock = nj_/mj_;
ERROR(nk_%mk_,"%d Z elements are not divisible into %d Z tasks",nk_,mk_);
const int nkBlock = nk_/mk_;
const int nBlock = niBlock*njBlock*nkBlock;
const int nh = nk_/2+1;
const int mjk = mj_*mk_;
ERROR(niBlock%mjk,"%d X layers per XYZ block not divisible into %dx%d=%d YZ slabs",niBlock,mj_,mk_,mjk);
const int niSlab = niBlock/mjk;
const int nSlab = niSlab*nj_*2*nh;
const int mijk = mi_*mj_*mk_;
const int njk = nj_*nk_;
const int njh = nj_*nh;
ERROR(njh%mijk,"%dx(%d/2+1)=%d X pencils not divisible into %d tasks",nj_,nk_,njh,mijk);
const int njhPencil = njh/mijk;
const int nPencil = ni_*2*njhPencil;
const int nMax = ::max({nBlock,nSlab,nPencil});
bytes_ = sizeof(double)*nMax;
{
int njnk[2] = {nj_,nk_};
int njnh[2] = {nj_,nh};
CHECK(hipfftPlanMany(&dz2d_,2,njnk,njnk,1,njk,njnh,1,njh,HIPFFT_D2Z,niSlab));
CHECK(hipfftPlanMany(&zd2d_,2,njnk,njnh,1,njh,njnk,1,njk,HIPFFT_Z2D,niSlab));
CHECK(hipfftPlanMany(&zz1d_,1,&ni_,&ni_,1,ni_,&ni_,1,ni_,HIPFFT_Z2Z,njhPencil));
}
#ifdef PARIS_NO_GPU_MPI
CHECK(hipHostMalloc(&ha_,bytes_+bytes_,hipHostMallocDefault));
assert(ha_);
hb_ = ha_+nMax;
#endif
}
PoissonPeriodic3DBlockedGPU::~PoissonPeriodic3DBlockedGPU()
{
#ifdef PARIS_NO_GPU_MPI
CHECK(hipHostFree(ha_));
ha_ = hb_ = nullptr;
#endif
CHECK(hipfftDestroy(zz1d_));
CHECK(hipfftDestroy(zd2d_));
CHECK(hipfftDestroy(dz2d_));
MPI_Comm_free(&commWorld_);
MPI_Comm_free(&commSlab_);
}
void PoissonPeriodic3DBlockedGPU::solve(const long bytes, double *const da, double *const db)
{
// Make local copies for lambda kernels
const double di = di_;
const double dj = dj_;
const double dk = dk_;
const int mj = mj_;
const int mk = mk_;
const int ni = ni_;
const int nj = nj_;
hipfftDoubleComplex *const ca = reinterpret_cast<hipfftDoubleComplex *>(da);
hipfftDoubleComplex *const cb = reinterpret_cast<hipfftDoubleComplex *>(db);
const int nh = nk_/2+1;
const int njh = nj*nh;
const int niBlock = (ni+mi_-1)/mi_;
const int njBlock = (nj+mj-1)/mj;
const int nkBlock = (nk_+mk-1)/mk;
const int mjk = mj*mk;
const int niSlab = (niBlock+mjk-1)/mjk;
const int nBlockSlab = niSlab*njBlock*nkBlock;
ERROR(bytes < bytes_,"Vector bytes %ld less than %ld local elements = %ld bytes",bytes,bytes_/sizeof(double),bytes_);
// Copy blocks to slabs
#ifdef PARIS_NO_GPU_MPI
CHECK(hipMemcpy(ha_,da,bytes_,hipMemcpyDeviceToHost));
MPI_Alltoall(ha_,nBlockSlab,MPI_DOUBLE,hb_,nBlockSlab,MPI_DOUBLE,commSlab_);
CHECK(hipMemcpyAsync(db,hb_,bytes_,hipMemcpyHostToDevice,0));
#else
CHECK(hipDeviceSynchronize());
MPI_Alltoall(da,nBlockSlab,MPI_DOUBLE,db,nBlockSlab,MPI_DOUBLE,commSlab_);
#endif
gpuFor(
niSlab,mj,njBlock,mk,nkBlock,
GPU_LAMBDA(const int i, const int p, const int j, const int q, const int k) {
const int ia = k+nkBlock*(q+mk*(j+njBlock*(p+mj*i)));
const int ib = k+nkBlock*(j+njBlock*(i+niSlab*(q+mk*p)));
da[ia] = db[ib];
});
// da -> cb
CHECK(hipfftExecD2Z(dz2d_,da,cb));
// Copy slabs to pencils
gpuFor(
njh,niSlab,
GPU_LAMBDA(const int jk, const int i) {
const int ia = i+niSlab*jk;
const int ib = jk+njh*i;
ca[ia].x = cb[ib].x;
ca[ia].y = cb[ib].y;
});
const int m = mi_*mj*mk;
const int njhPencil = (njh+m-1)/m;
const int nSlabPencil = 2*njhPencil*niSlab;
#ifdef PARIS_NO_GPU_MPI
CHECK(hipMemcpy(ha_,da,bytes_,hipMemcpyDeviceToHost));
MPI_Alltoall(ha_,nSlabPencil,MPI_DOUBLE,hb_,nSlabPencil,MPI_DOUBLE,MPI_COMM_WORLD);
CHECK(hipMemcpyAsync(db,hb_,bytes_,hipMemcpyHostToDevice,0));
#else
CHECK(hipDeviceSynchronize());
MPI_Alltoall(da,nSlabPencil,MPI_DOUBLE,db,nSlabPencil,MPI_DOUBLE,MPI_COMM_WORLD);
#endif
gpuFor(
njhPencil,m,niSlab,
GPU_LAMBDA(const int jk, const int pq, const int i) {
const int ia = i+niSlab*(pq+m*jk);
const int ib = i+niSlab*(jk+njhPencil*pq);
ca[ia].x = cb[ib].x;
ca[ia].y = cb[ib].y;
});
// ca -> cb
CHECK(hipfftExecZ2Z(zz1d_,ca,cb,HIPFFT_FORWARD));
// Solve Poisson equation
{
#ifdef PARIS_3PT
const double si = M_PI/double(ni_);
const double sj = M_PI/double(nj_);
const double sk = M_PI/double(nk_);
#elif defined PARIS_5PT
const double si = 2.0*M_PI/double(ni_);
const double sj = 2.0*M_PI/double(nj_);
const double sk = 2.0*M_PI/double(nk_);
#endif
int rank = MPI_PROC_NULL;
MPI_Comm_rank(commWorld_,&rank);
const int jkLo = rank*njhPencil;
const int jkHi = ::min(jkLo+njhPencil,njh);
const int djk = jkHi-jkLo;
gpuFor(
djk,ni,
GPU_LAMBDA(int jk, const int i) {
const int ijk = i+ni*jk;
if ((ijk == 0) && (jkLo == 0)) {
cb[0].x = cb[0].y = 0;
} else {
#ifdef PARIS_3PT
const double ii = sqr(sin(double(min(i,ni-i))*si)*di);
#elif defined PARIS_5PT
const double ci = cos(double(min(i,ni-i))*si);
const double ii = di*(2.0*ci*ci-16.0*ci+14.0);
#else
const double ii = sqr(double(min(i,ni-i))*di);
#endif
jk += jkLo;
const int j = jk/nh;
#ifdef PARIS_3PT
const double jj = sqr(sin(double(min(j,nj-j))*sj)*dj);
#elif defined PARIS_5PT
const double cj = cos(double(min(j,nj-j))*sj);
const double jj = dj*(2.0*cj*cj-16.0*cj+14.0);
#else
const double jj = sqr(double(min(j,nj-j))*dj);
#endif
const int k = jk-j*nh;
#ifdef PARIS_3PT
const double kk = sqr(sin(double(k)*sk)*dk);
#elif defined PARIS_5PT
const double ck = cos(double(k)*sk);
const double kk = dk*(2.0*ck*ck-16.0*ck+14.0);
#else
const double kk = sqr(double(k)*dk);
#endif
const double d = -1.0/(ii+jj+kk);
cb[ijk].x *= d;
cb[ijk].y *= d;
}
});
}
// cb -> ca
CHECK(hipfftExecZ2Z(zz1d_,cb,ca,HIPFFT_BACKWARD));
// Copy pencils to slabs
gpuFor(
m,njhPencil,niSlab,
GPU_LAMBDA(const int pq, const int jk, const int i) {
const int ia = i+niSlab*(pq+m*jk);
const int ib = i+niSlab*(jk+njhPencil*pq);
cb[ib].x = ca[ia].x;
cb[ib].y = ca[ia].y;
});
#ifdef PARIS_NO_GPU_MPI
CHECK(hipMemcpy(hb_,db,bytes_,hipMemcpyDeviceToHost));
MPI_Alltoall(hb_,nSlabPencil,MPI_DOUBLE,ha_,nSlabPencil,MPI_DOUBLE,commWorld_);
CHECK(hipMemcpyAsync(da,ha_,bytes_,hipMemcpyHostToDevice,0));
#else
CHECK(hipDeviceSynchronize());
MPI_Alltoall(db,nSlabPencil,MPI_DOUBLE,da,nSlabPencil,MPI_DOUBLE,commWorld_);
#endif
gpuFor(
niSlab,njh,
GPU_LAMBDA(const int i, const int jk) {
const int ia = i+jk*niSlab;
const int ib = jk+njh*i;
cb[ib].x = ca[ia].x;
cb[ib].y = ca[ia].y;
});
// cb -> da
CHECK(hipfftExecZ2D(zd2d_,cb,da));
// Copy slabs to blocks
const double divN = 1.0/(long(ni)*long(nj)*long(nk_));
gpuFor(
mj,mk,niSlab,njBlock,nkBlock,
GPU_LAMBDA(const int p, const int q, const int i, const int j, const int k) {
const int ia = k+nkBlock*(q+mk*(j+njBlock*(p+mj*i)));
const int ib = k+nkBlock*(j+njBlock*(i+niSlab*(q+mk*p)));
db[ib] = divN*da[ia];
});
#ifdef PARIS_NO_GPU_MPI
CHECK(hipMemcpy(hb_,db,bytes_,hipMemcpyDeviceToHost));
MPI_Alltoall(hb_,nBlockSlab,MPI_DOUBLE,ha_,nBlockSlab,MPI_DOUBLE,commSlab_);
CHECK(hipMemcpyAsync(da,ha_,bytes_,hipMemcpyHostToDevice,0));
#else
CHECK(hipDeviceSynchronize());
MPI_Alltoall(db,nBlockSlab,MPI_DOUBLE,da,nBlockSlab,MPI_DOUBLE,commSlab_);
#endif
}
#endif
| 746b2190e32319d5be60ad8cff6c97eff47200b3.cu | #ifdef PARIS
#include "PoissonPeriodic3DBlockedGPU.hpp"
#include <algorithm>
#include <cassert>
#include <cmath>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#define ERROR(X,...) if (X) { fprintf(stderr,"%s(%d): ",__FILE__,__LINE__); fprintf(stderr,__VA_ARGS__); fprintf(stderr,"\n"); MPI_Abort(MPI_COMM_WORLD,(X)); }
static inline __host__ __device__ double sqr(const double x) { return x*x; }
PoissonPeriodic3DBlockedGPU::PoissonPeriodic3DBlockedGPU(const int n[3], const double lo[3], const double hi[3], const int m[3], const int id[3]):
#ifdef PARIS_3PT
di_(2.0*double(n[0]-1)/(hi[0]-lo[0])),
dj_(2.0*double(n[1]-1)/(hi[1]-lo[1])),
dk_(2.0*double(n[2]-1)/(hi[2]-lo[2])),
#elif defined PARIS_5PT
di_(sqr(double(n[0]-1)/(hi[0]-lo[0]))/6.0),
dj_(sqr(double(n[1]-1)/(hi[1]-lo[1]))/6.0),
dk_(sqr(double(n[2]-1)/(hi[2]-lo[2]))/6.0),
#else
di_{2.0*M_PI*double(n[0]-1)/(double(n[0])*(hi[0]-lo[0]))},
dj_{2.0*M_PI*double(n[1]-1)/(double(n[1])*(hi[1]-lo[1]))},
dk_{2.0*M_PI*double(n[2]-1)/(double(n[2])*(hi[2]-lo[2]))},
#endif
mi_(m[0]),
mj_(m[1]),
mk_(m[2]),
ni_(n[0]),
nj_(n[1]),
nk_(n[2])
{
{
int size = 0;
MPI_Comm_size(MPI_COMM_WORLD,&size);
ERROR(mi_*mj_*mk_ != size,"Task grid %dx%dx%d does not equal total tasks %d",mi_,mj_,mk_,size);
}
{
const int color = id[0];
const int key = id[2]+mk_*id[1];
MPI_Comm_split(MPI_COMM_WORLD,color,key,&commSlab_);
}
{
const int color = 1;
const int key = id[2]+mk_*(id[1]+mj_*id[0]);
MPI_Comm_split(MPI_COMM_WORLD,color,key,&commWorld_);
}
ERROR(ni_%mi_,"%d X elements are not divisible into %d X tasks",ni_,mi_);
const int niBlock = ni_/mi_;
ERROR(nj_%mj_,"%d Y elements are not divisible into %d Y tasks",nj_,mj_);
const int njBlock = nj_/mj_;
ERROR(nk_%mk_,"%d Z elements are not divisible into %d Z tasks",nk_,mk_);
const int nkBlock = nk_/mk_;
const int nBlock = niBlock*njBlock*nkBlock;
const int nh = nk_/2+1;
const int mjk = mj_*mk_;
ERROR(niBlock%mjk,"%d X layers per XYZ block not divisible into %dx%d=%d YZ slabs",niBlock,mj_,mk_,mjk);
const int niSlab = niBlock/mjk;
const int nSlab = niSlab*nj_*2*nh;
const int mijk = mi_*mj_*mk_;
const int njk = nj_*nk_;
const int njh = nj_*nh;
ERROR(njh%mijk,"%dx(%d/2+1)=%d X pencils not divisible into %d tasks",nj_,nk_,njh,mijk);
const int njhPencil = njh/mijk;
const int nPencil = ni_*2*njhPencil;
const int nMax = std::max({nBlock,nSlab,nPencil});
bytes_ = sizeof(double)*nMax;
{
int njnk[2] = {nj_,nk_};
int njnh[2] = {nj_,nh};
CHECK(cufftPlanMany(&dz2d_,2,njnk,njnk,1,njk,njnh,1,njh,CUFFT_D2Z,niSlab));
CHECK(cufftPlanMany(&zd2d_,2,njnk,njnh,1,njh,njnk,1,njk,CUFFT_Z2D,niSlab));
CHECK(cufftPlanMany(&zz1d_,1,&ni_,&ni_,1,ni_,&ni_,1,ni_,CUFFT_Z2Z,njhPencil));
}
#ifdef PARIS_NO_GPU_MPI
CHECK(cudaHostAlloc(&ha_,bytes_+bytes_,cudaHostAllocDefault));
assert(ha_);
hb_ = ha_+nMax;
#endif
}
PoissonPeriodic3DBlockedGPU::~PoissonPeriodic3DBlockedGPU()
{
#ifdef PARIS_NO_GPU_MPI
CHECK(cudaFreeHost(ha_));
ha_ = hb_ = nullptr;
#endif
CHECK(cufftDestroy(zz1d_));
CHECK(cufftDestroy(zd2d_));
CHECK(cufftDestroy(dz2d_));
MPI_Comm_free(&commWorld_);
MPI_Comm_free(&commSlab_);
}
void PoissonPeriodic3DBlockedGPU::solve(const long bytes, double *const da, double *const db)
{
// Make local copies for lambda kernels
const double di = di_;
const double dj = dj_;
const double dk = dk_;
const int mj = mj_;
const int mk = mk_;
const int ni = ni_;
const int nj = nj_;
cufftDoubleComplex *const ca = reinterpret_cast<cufftDoubleComplex *>(da);
cufftDoubleComplex *const cb = reinterpret_cast<cufftDoubleComplex *>(db);
const int nh = nk_/2+1;
const int njh = nj*nh;
const int niBlock = (ni+mi_-1)/mi_;
const int njBlock = (nj+mj-1)/mj;
const int nkBlock = (nk_+mk-1)/mk;
const int mjk = mj*mk;
const int niSlab = (niBlock+mjk-1)/mjk;
const int nBlockSlab = niSlab*njBlock*nkBlock;
ERROR(bytes < bytes_,"Vector bytes %ld less than %ld local elements = %ld bytes",bytes,bytes_/sizeof(double),bytes_);
// Copy blocks to slabs
#ifdef PARIS_NO_GPU_MPI
CHECK(cudaMemcpy(ha_,da,bytes_,cudaMemcpyDeviceToHost));
MPI_Alltoall(ha_,nBlockSlab,MPI_DOUBLE,hb_,nBlockSlab,MPI_DOUBLE,commSlab_);
CHECK(cudaMemcpyAsync(db,hb_,bytes_,cudaMemcpyHostToDevice,0));
#else
CHECK(cudaDeviceSynchronize());
MPI_Alltoall(da,nBlockSlab,MPI_DOUBLE,db,nBlockSlab,MPI_DOUBLE,commSlab_);
#endif
gpuFor(
niSlab,mj,njBlock,mk,nkBlock,
GPU_LAMBDA(const int i, const int p, const int j, const int q, const int k) {
const int ia = k+nkBlock*(q+mk*(j+njBlock*(p+mj*i)));
const int ib = k+nkBlock*(j+njBlock*(i+niSlab*(q+mk*p)));
da[ia] = db[ib];
});
// da -> cb
CHECK(cufftExecD2Z(dz2d_,da,cb));
// Copy slabs to pencils
gpuFor(
njh,niSlab,
GPU_LAMBDA(const int jk, const int i) {
const int ia = i+niSlab*jk;
const int ib = jk+njh*i;
ca[ia].x = cb[ib].x;
ca[ia].y = cb[ib].y;
});
const int m = mi_*mj*mk;
const int njhPencil = (njh+m-1)/m;
const int nSlabPencil = 2*njhPencil*niSlab;
#ifdef PARIS_NO_GPU_MPI
CHECK(cudaMemcpy(ha_,da,bytes_,cudaMemcpyDeviceToHost));
MPI_Alltoall(ha_,nSlabPencil,MPI_DOUBLE,hb_,nSlabPencil,MPI_DOUBLE,MPI_COMM_WORLD);
CHECK(cudaMemcpyAsync(db,hb_,bytes_,cudaMemcpyHostToDevice,0));
#else
CHECK(cudaDeviceSynchronize());
MPI_Alltoall(da,nSlabPencil,MPI_DOUBLE,db,nSlabPencil,MPI_DOUBLE,MPI_COMM_WORLD);
#endif
gpuFor(
njhPencil,m,niSlab,
GPU_LAMBDA(const int jk, const int pq, const int i) {
const int ia = i+niSlab*(pq+m*jk);
const int ib = i+niSlab*(jk+njhPencil*pq);
ca[ia].x = cb[ib].x;
ca[ia].y = cb[ib].y;
});
// ca -> cb
CHECK(cufftExecZ2Z(zz1d_,ca,cb,CUFFT_FORWARD));
// Solve Poisson equation
{
#ifdef PARIS_3PT
const double si = M_PI/double(ni_);
const double sj = M_PI/double(nj_);
const double sk = M_PI/double(nk_);
#elif defined PARIS_5PT
const double si = 2.0*M_PI/double(ni_);
const double sj = 2.0*M_PI/double(nj_);
const double sk = 2.0*M_PI/double(nk_);
#endif
int rank = MPI_PROC_NULL;
MPI_Comm_rank(commWorld_,&rank);
const int jkLo = rank*njhPencil;
const int jkHi = std::min(jkLo+njhPencil,njh);
const int djk = jkHi-jkLo;
gpuFor(
djk,ni,
GPU_LAMBDA(int jk, const int i) {
const int ijk = i+ni*jk;
if ((ijk == 0) && (jkLo == 0)) {
cb[0].x = cb[0].y = 0;
} else {
#ifdef PARIS_3PT
const double ii = sqr(sin(double(min(i,ni-i))*si)*di);
#elif defined PARIS_5PT
const double ci = cos(double(min(i,ni-i))*si);
const double ii = di*(2.0*ci*ci-16.0*ci+14.0);
#else
const double ii = sqr(double(min(i,ni-i))*di);
#endif
jk += jkLo;
const int j = jk/nh;
#ifdef PARIS_3PT
const double jj = sqr(sin(double(min(j,nj-j))*sj)*dj);
#elif defined PARIS_5PT
const double cj = cos(double(min(j,nj-j))*sj);
const double jj = dj*(2.0*cj*cj-16.0*cj+14.0);
#else
const double jj = sqr(double(min(j,nj-j))*dj);
#endif
const int k = jk-j*nh;
#ifdef PARIS_3PT
const double kk = sqr(sin(double(k)*sk)*dk);
#elif defined PARIS_5PT
const double ck = cos(double(k)*sk);
const double kk = dk*(2.0*ck*ck-16.0*ck+14.0);
#else
const double kk = sqr(double(k)*dk);
#endif
const double d = -1.0/(ii+jj+kk);
cb[ijk].x *= d;
cb[ijk].y *= d;
}
});
}
// cb -> ca
CHECK(cufftExecZ2Z(zz1d_,cb,ca,CUFFT_INVERSE));
// Copy pencils to slabs
gpuFor(
m,njhPencil,niSlab,
GPU_LAMBDA(const int pq, const int jk, const int i) {
const int ia = i+niSlab*(pq+m*jk);
const int ib = i+niSlab*(jk+njhPencil*pq);
cb[ib].x = ca[ia].x;
cb[ib].y = ca[ia].y;
});
#ifdef PARIS_NO_GPU_MPI
CHECK(cudaMemcpy(hb_,db,bytes_,cudaMemcpyDeviceToHost));
MPI_Alltoall(hb_,nSlabPencil,MPI_DOUBLE,ha_,nSlabPencil,MPI_DOUBLE,commWorld_);
CHECK(cudaMemcpyAsync(da,ha_,bytes_,cudaMemcpyHostToDevice,0));
#else
CHECK(cudaDeviceSynchronize());
MPI_Alltoall(db,nSlabPencil,MPI_DOUBLE,da,nSlabPencil,MPI_DOUBLE,commWorld_);
#endif
gpuFor(
niSlab,njh,
GPU_LAMBDA(const int i, const int jk) {
const int ia = i+jk*niSlab;
const int ib = jk+njh*i;
cb[ib].x = ca[ia].x;
cb[ib].y = ca[ia].y;
});
// cb -> da
CHECK(cufftExecZ2D(zd2d_,cb,da));
// Copy slabs to blocks
const double divN = 1.0/(long(ni)*long(nj)*long(nk_));
gpuFor(
mj,mk,niSlab,njBlock,nkBlock,
GPU_LAMBDA(const int p, const int q, const int i, const int j, const int k) {
const int ia = k+nkBlock*(q+mk*(j+njBlock*(p+mj*i)));
const int ib = k+nkBlock*(j+njBlock*(i+niSlab*(q+mk*p)));
db[ib] = divN*da[ia];
});
#ifdef PARIS_NO_GPU_MPI
CHECK(cudaMemcpy(hb_,db,bytes_,cudaMemcpyDeviceToHost));
MPI_Alltoall(hb_,nBlockSlab,MPI_DOUBLE,ha_,nBlockSlab,MPI_DOUBLE,commSlab_);
CHECK(cudaMemcpyAsync(da,ha_,bytes_,cudaMemcpyHostToDevice,0));
#else
CHECK(cudaDeviceSynchronize());
MPI_Alltoall(db,nBlockSlab,MPI_DOUBLE,da,nBlockSlab,MPI_DOUBLE,commSlab_);
#endif
}
#endif
|
58e1ab9adee8ee5bcecf4888d95a8788b73f0195.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.h"
#include <stdio.h>
#include <math.h> /* ceil */
// Max Threads per block in GeForce 210
#define TxB 512
//Kernel operador not
__global__
void operator_not_kernel(const unsigned char* const inputImage,
unsigned char* const outputImage,
int numRows, int numCols)
{
// El mapeo de los componentes uchar4 aRGBA es:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//Nota: Ignoramos el canal alfa
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i < numRows*numCols){
//Cambiamos si es 0:255 , si es 255:0
outputImage[i] = ( inputImage[i] == 0 ) ? 255 : 0;
}
}
void operator_not_image(unsigned char* const d_inputImage,
unsigned char* const d_outputImage,
size_t numRows, size_t numCols)
{
// Dado que no importa la posicion relativa de los pixels
// en este algoritmo, la estrategia para asignar hilos a
// bloques y rejillas sera sencillamente la de cubrir
// a todos los pixeles con hebras en el eje X
long long int total_px = numRows * numCols; // total pixels
long int grids_n = ceil(total_px / TxB); // grids numer
const dim3 blockSize(TxB, 1, 1);
const dim3 gridSize(grids_n, 1, 1);
hipLaunchKernelGGL(( operator_not_kernel), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImage, d_outputImage, numRows, numCols);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
} | 58e1ab9adee8ee5bcecf4888d95a8788b73f0195.cu | #include "utils.h"
#include <stdio.h>
#include <math.h> /* ceil */
// Max Threads per block in GeForce 210
#define TxB 512
//Kernel operador not
__global__
void operator_not_kernel(const unsigned char* const inputImage,
unsigned char* const outputImage,
int numRows, int numCols)
{
// El mapeo de los componentes uchar4 aRGBA es:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//Nota: Ignoramos el canal alfa
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i < numRows*numCols){
//Cambiamos si es 0:255 , si es 255:0
outputImage[i] = ( inputImage[i] == 0 ) ? 255 : 0;
}
}
void operator_not_image(unsigned char* const d_inputImage,
unsigned char* const d_outputImage,
size_t numRows, size_t numCols)
{
// Dado que no importa la posicion relativa de los pixels
// en este algoritmo, la estrategia para asignar hilos a
// bloques y rejillas sera sencillamente la de cubrir
// a todos los pixeles con hebras en el eje X
long long int total_px = numRows * numCols; // total pixels
long int grids_n = ceil(total_px / TxB); // grids numer
const dim3 blockSize(TxB, 1, 1);
const dim3 gridSize(grids_n, 1, 1);
operator_not_kernel<<<gridSize, blockSize>>>(d_inputImage, d_outputImage, numRows, numCols);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
} |
e0317cba5a8553f8493901f1386e18b846045582.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "roadCrossingsKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int rows = XSIZE;
int segs = 1;
int *adjacency = NULL;
hipMalloc(&adjacency, XSIZE*YSIZE);
int *cross = NULL;
hipMalloc(&cross, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
roadCrossingsKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, rows,segs,adjacency,cross);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
roadCrossingsKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, rows,segs,adjacency,cross);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
roadCrossingsKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, rows,segs,adjacency,cross);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | e0317cba5a8553f8493901f1386e18b846045582.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "roadCrossingsKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int rows = XSIZE;
int segs = 1;
int *adjacency = NULL;
cudaMalloc(&adjacency, XSIZE*YSIZE);
int *cross = NULL;
cudaMalloc(&cross, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
roadCrossingsKernel<<<gridBlock,threadBlock>>>(rows,segs,adjacency,cross);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
roadCrossingsKernel<<<gridBlock,threadBlock>>>(rows,segs,adjacency,cross);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
roadCrossingsKernel<<<gridBlock,threadBlock>>>(rows,segs,adjacency,cross);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
conv_cudnn_op.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the spopecific language governing permissions and
limitations under the License. */
#include <utility>
#include <vector>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/memory/memory.h"
#ifdef PADDLE_WITH_HIP
#include "paddle/fluid/operators/conv_miopen_helper.h"
#else
#include "paddle/fluid/operators/conv_cudnn_helper.h"
#endif
#include "paddle/fluid/operators/conv_op.h"
#include "paddle/fluid/operators/math/padding.h"
#include "paddle/fluid/platform/cudnn_workspace_helper.h"
#include "paddle/fluid/platform/float16.h"
#include "paddle/fluid/platform/profiler.h"
DECLARE_bool(cudnn_deterministic);
DECLARE_uint64(conv_workspace_size_limit);
DECLARE_bool(cudnn_exhaustive_search);
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using ScopedTensorDescriptor = platform::ScopedTensorDescriptor;
using ScopedFilterDescriptor = platform::ScopedFilterDescriptor;
using ScopedConvolutionDescriptor = platform::ScopedConvolutionDescriptor;
using DataLayout = platform::DataLayout;
static inline bool IsVoltaOrLater(const platform::CUDADeviceContext& dev_ctx) {
return dev_ctx.GetComputeCapability() >= 70;
}
template <typename T>
class CUDNNConvOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(ctx.GetPlace()), true,
paddle::platform::errors::PreconditionNotMet("It must use CUDAPlace."));
const Tensor* input = ctx.Input<Tensor>("Input");
auto* filter = ctx.Input<Tensor>("Filter");
auto* output = ctx.Output<Tensor>("Output");
output->mutable_data<T>(ctx.GetPlace());
const std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
int groups = ctx.Attr<int>("groups");
bool exhaustive_search =
FLAGS_cudnn_exhaustive_search || ctx.Attr<bool>("exhaustive_search");
bool deterministic = FLAGS_cudnn_deterministic;
auto exhaustive_deterministic = exhaustive_search && deterministic;
PADDLE_ENFORCE_EQ(exhaustive_deterministic, false,
platform::errors::InvalidArgument(
"Cann't set exhaustive_search True and "
"FLAGS_cudnn_deterministic True at same time."));
const std::string padding_algorithm =
ctx.Attr<std::string>("padding_algorithm");
const std::string data_format = ctx.Attr<std::string>("data_format");
const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
auto dtype = platform::CudnnDataType<T>::type;
#ifdef PADDLE_WITH_HIP
// HIP MIOPEN ONLY SUPPORT NCHW format
auto compute_format = DataLayout::kNCHW;
#else
// Tensor Core introduced from Volta GPUs supports more faster conv op
// with FP16 in NHWC data format.
const bool compute_in_nhwc =
dtype == CUDNN_DATA_HALF && IsVoltaOrLater(dev_ctx);
// We will only do data format conversion from NHWC to NCHW.
// cudnn will convert NCHW to NHWC automatically on Tensor Core.
auto compute_format =
compute_in_nhwc && channel_last ? DataLayout::kNHWC : DataLayout::kNCHW;
#endif
VLOG(3) << "Compute ConvOp with cuDNN:"
<< " data_format=" << data_format << " compute_format="
<< (compute_format == DataLayout::kNHWC ? "NHWC" : "NCHW");
// ------------ transformed tensor -----------
Tensor transformed_input_channel(input->type());
Tensor transformed_output(output->type());
Tensor transformed_filter_channel(filter->type());
T* output_data = nullptr;
if (channel_last && compute_format == DataLayout::kNCHW) {
VLOG(3) << "Transform input tensor from NHWC to NCHW.";
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input, &transformed_input_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input, &transformed_input_channel);
ResizeToChannelFirst<platform::CUDADeviceContext, T>(ctx, output,
&transformed_output);
} else {
transformed_input_channel.ShareDataWith(*input);
transformed_output.ShareDataWith(*output);
}
if (compute_format == DataLayout::kNHWC) {
VLOG(3) << "Transform filter tensor from NCHW to NHWC.";
ResizeToChannelLast<platform::CUDADeviceContext, T>(
ctx, filter, &transformed_filter_channel);
TransToChannelLast<platform::CUDADeviceContext, T>(
ctx, filter, &transformed_filter_channel);
} else {
transformed_filter_channel.ShareDataWith(*filter);
}
output_data = transformed_output.data<T>();
// update padding and dilation
auto in_dims = transformed_input_channel.dims();
auto filter_dims = transformed_filter_channel.dims();
framework::DDim in_data_dims;
framework::DDim filter_data_dims;
if (compute_format == DataLayout::kNCHW) {
in_data_dims = framework::slice_ddim(in_dims, 2, in_dims.size());
filter_data_dims =
framework::slice_ddim(filter_dims, 2, filter_dims.size());
} else {
in_data_dims = framework::slice_ddim(in_dims, 1, in_dims.size() - 1);
filter_data_dims =
framework::slice_ddim(filter_dims, 1, filter_dims.size() - 1);
}
std::vector<int> ksize = framework::vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
in_data_dims, strides, ksize);
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim);
Tensor transformed_input;
std::vector<int> padding_common(data_dim, 0);
if (!is_sys_pad) {
std::vector<int> padding_diff(data_dim);
std::vector<int> new_input_shape_vec(data_dim + 2);
new_input_shape_vec[0] = transformed_input_channel.dims()[0];
if (compute_format == DataLayout::kNCHW) {
new_input_shape_vec[1] = transformed_input_channel.dims()[1];
} else {
new_input_shape_vec[data_dim + 1] =
transformed_input_channel.dims()[data_dim + 1];
}
std::vector<int> input_pad(transformed_input_channel.dims().size() * 2,
0);
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = ::min(paddings[2 * i], paddings[2 * i + 1]);
if (compute_format == DataLayout::kNCHW) {
new_input_shape_vec[i + 2] =
transformed_input_channel.dims()[i + 2] + padding_diff[i];
} else {
new_input_shape_vec[i + 1] =
transformed_input_channel.dims()[i + 1] + padding_diff[i];
}
if (compute_format == DataLayout::kNCHW) {
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
} else {
input_pad[2 * i + 2] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 2 + 1] = paddings[2 * i + 1] - padding_common[i];
}
}
framework::DDim new_input_shape(
framework::make_ddim(new_input_shape_vec));
transformed_input.Resize(new_input_shape);
auto& dev_ctx =
ctx.template device_context<paddle::platform::CUDADeviceContext>();
transformed_input =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
const int rank = transformed_input_channel.dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, transformed_input_channel, pad_value,
&transformed_input);
} break;
case 5: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, transformed_input_channel, pad_value,
&transformed_input);
} break;
default:
PADDLE_THROW(platform::errors::InvalidArgument(
"ConvOp only support tensors with 4 or 5 dimensions."));
}
} else {
transformed_input.ShareDataWith(transformed_input_channel);
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
const T* input_data = transformed_input.data<T>();
const T* filter_data = transformed_filter_channel.data<T>();
// ------------------- cudnn descriptors ---------------------
ConvArgs args{&transformed_input,
&transformed_filter_channel,
&transformed_output,
strides,
padding_common,
dilations,
dtype};
auto handle = dev_ctx.cudnn_handle();
auto workspace_handle = dev_ctx.cudnn_workspace_handle();
DataLayout layout = compute_format == DataLayout::kNHWC ? DataLayout::kNHWC
: DataLayout::kNCHW;
if (transformed_input.dims().size() == 5) {
layout = compute_format == DataLayout::kNHWC ? DataLayout::kNDHWC
: DataLayout::kNCDHW;
}
auto layout_format = GetCudnnTensorFormat(layout);
args.handle = handle;
#ifdef PADDLE_WITH_HIP
// MIOPEN need to set groups in cdesc in miopen_desc.h
args.cdesc.set(dtype, padding_common, strides, dilations,
platform::AllowTF32Cudnn(), groups);
#else
args.cdesc.set(dtype, padding_common, strides, dilations,
platform::AllowTF32Cudnn());
#endif
#if defined(PADDLE_WITH_CUDA) && CUDNN_VERSION_MIN(7, 0, 1)
// cudnn 7 can support groups, no need to do it manually
// FIXME(typhoonzero): find a better way to disable groups
// rather than setting it to 1.
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnSetConvolutionGroupCount(args.cdesc.desc(),
groups));
groups = 1;
#endif
#ifdef PADDLE_WITH_HIP
// MIOPEN do not set groups in wdesc after set groups in cdesc
groups = 1;
#endif
args.idesc.set(transformed_input, layout_format);
args.wdesc.set(transformed_filter_channel, layout_format, groups);
args.odesc.set(transformed_output, layout_format);
int i_n, i_c, i_d, i_h, i_w;
int o_n, o_c, o_d, o_h, o_w;
if (compute_format == DataLayout::kNHWC) {
GetNCDHW(transformed_input.dims(), DataLayout::kNHWC, &i_n, &i_c, &i_d,
&i_h, &i_w);
GetNCDHW(transformed_output.dims(), DataLayout::kNHWC, &o_n, &o_c, &o_d,
&o_h, &o_w);
} else {
GetNCDHW(transformed_input.dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d,
&i_h, &i_w);
GetNCDHW(transformed_output.dims(), DataLayout::kNCHW, &o_n, &o_c, &o_d,
&o_h, &o_w);
}
int group_offset_in = i_c / groups * i_h * i_w * i_d;
int group_offset_out = o_c / groups * o_h * o_w * o_d;
int group_offset_filter = transformed_filter_channel.numel() / groups;
// ------------------- cudnn conv workspace ---------------------
size_t workspace_size = 0; // final workspace to allocate.
// ------------------- cudnn conv algorithm ---------------------
#ifdef PADDLE_WITH_HIP
miopenConvFwdAlgorithm_t algo{};
using search = SearchAlgorithm<miopenConvFwdAlgorithm_t>;
workspace_size = search::GetWorkspaceSize(args);
algo = search::Find<T>(args, exhaustive_search, false, workspace_size, ctx);
#else
cudnnConvolutionFwdAlgo_t algo{};
using search = SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>;
algo = search::Find<T>(args, exhaustive_search, false, ctx);
workspace_size = search::GetWorkspaceSize(args, algo);
#endif
#if defined(PADDLE_WITH_CUDA) && CUDNN_VERSION_MIN(7, 0, 1)
// when groups > 1, SearchAlgorithm find algo is CUDNN_CONVOLUTION_\
// FWD_ALGO_WINOGRAD_NONFUSED, but this kind of algorithm is unstable
// in forward computation, so change the algorithm to CUDNN_CONVOLUTION_\
// FWD_ALGO_IMPLICIT_GEMM manually.
if (ctx.Attr<int>("groups") > 1) {
algo = static_cast<cudnnConvolutionFwdAlgo_t>(0);
}
#endif
// ------------------- cudnn conv forward ---------------------
ScalingParamType<T> alpha = 1.0f;
ScalingParamType<T> beta = 0.0f;
// NOTE(zhiqiu): inplace addto is not supportted in double grad yet.
// ScalingParamType<T> beta = ctx.Attr<bool>("use_addto") ? 1.0f : 0.0f;
// VLOG(4) << "Conv: use_addto = " << ctx.Attr<bool>("use_addto");
#ifdef PADDLE_WITH_HIP
workspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::miopenConvolutionForward(
handle, &alpha, args.idesc.desc(), input_data,
args.wdesc.desc(), filter_data, args.cdesc.desc(), algo,
&beta, args.odesc.desc(), output_data, workspace_ptr,
workspace_size));
},
workspace_size);
#else
for (int i = 0; i < groups; i++) {
workspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionForward(
handle, &alpha, args.idesc.desc(),
input_data + i * group_offset_in, args.wdesc.desc(),
filter_data + i * group_offset_filter, args.cdesc.desc(),
algo, workspace_ptr, workspace_size, &beta,
args.odesc.desc(), output_data + i * group_offset_out));
},
workspace_size);
}
#endif
if (channel_last && compute_format == DataLayout::kNCHW) {
TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_output, output);
}
}
};
template <typename T>
class CUDNNConvGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(ctx.GetPlace()), true,
paddle::platform::errors::PreconditionNotMet("It must use CUDAPlace."));
auto input = ctx.Input<Tensor>("Input");
auto filter = ctx.Input<Tensor>("Filter");
auto output_grad = ctx.Input<Tensor>(framework::GradVarName("Output"));
auto input_grad = ctx.Output<Tensor>(framework::GradVarName("Input"));
auto filter_grad = ctx.Output<Tensor>(framework::GradVarName("Filter"));
if (input_grad) {
input_grad->mutable_data<T>(ctx.GetPlace());
}
if (filter_grad) {
filter_grad->mutable_data<T>(ctx.GetPlace());
}
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm");
int groups = ctx.Attr<int>("groups");
bool exhaustive_search =
FLAGS_cudnn_exhaustive_search || ctx.Attr<bool>("exhaustive_search");
bool deterministic = FLAGS_cudnn_deterministic;
auto exhaustive_deterministic = exhaustive_search && deterministic;
PADDLE_ENFORCE_EQ(exhaustive_deterministic, false,
platform::errors::InvalidArgument(
"Cann't set exhaustive_search True and "
"FLAGS_cudnn_deterministic True at same time."));
const std::string data_format = ctx.Attr<std::string>("data_format");
const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
auto dtype = platform::CudnnDataType<T>::type;
#ifdef PADDLE_WITH_HIP
// HIP MIOPEN ONLY SUPPORT NCHW format
auto compute_format = DataLayout::kNCHW;
#else
const bool compute_in_nhwc =
dtype == CUDNN_DATA_HALF && IsVoltaOrLater(dev_ctx);
auto compute_format =
compute_in_nhwc && channel_last ? DataLayout::kNHWC : DataLayout::kNCHW;
#endif
VLOG(3) << "Compute ConvGradOp with cuDNN:"
<< " data_format=" << data_format << " compute_format="
<< (compute_format == DataLayout::kNHWC ? "NHWC" : "NCHW");
// transform Tensor
Tensor transformed_input_channel(input->type());
Tensor transformed_output_grad_channel(output_grad->type());
Tensor transformed_input_grad_channel(input->type());
Tensor transformed_filter_channel(filter->type());
Tensor transformed_filter_grad_channel(filter->type());
if (channel_last && compute_format == DataLayout::kNCHW) {
VLOG(3) << "Transform input, output_grad, input_grad and tensor from "
"NHWC to NCHW.";
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input, &transformed_input_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input, &transformed_input_channel);
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, output_grad, &transformed_output_grad_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, output_grad, &transformed_output_grad_channel);
if (input_grad) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input_grad, &transformed_input_grad_channel);
// NOTE(zhiqiu): If inplace_addto strategy is enabled, we need to copy
// the data of input_grad to transformed_input_grad_channel.
if (ctx.Attr<bool>("use_addto")) {
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input_grad, &transformed_input_grad_channel);
}
}
} else {
transformed_input_channel.ShareDataWith(*input);
transformed_output_grad_channel.ShareDataWith(*output_grad);
if (input_grad) {
transformed_input_grad_channel.ShareDataWith(*input_grad);
}
}
if (compute_format == DataLayout::kNHWC) {
VLOG(3) << "Transform filter and filter_grad tensor from NCHW to NHWC.";
ResizeToChannelLast<platform::CUDADeviceContext, T>(
ctx, filter, &transformed_filter_channel);
TransToChannelLast<platform::CUDADeviceContext, T>(
ctx, filter, &transformed_filter_channel);
if (filter_grad) {
ResizeToChannelLast<platform::CUDADeviceContext, T>(
ctx, filter_grad, &transformed_filter_grad_channel);
}
} else {
transformed_filter_channel.ShareDataWith(*filter);
if (filter_grad) {
transformed_filter_grad_channel.ShareDataWith(*filter_grad);
}
}
// update paddings
auto in_dims = transformed_input_channel.dims();
auto filter_dims = transformed_filter_channel.dims();
framework::DDim in_data_dims;
framework::DDim filter_data_dims;
if (compute_format == DataLayout::kNCHW) {
in_data_dims = framework::slice_ddim(in_dims, 2, in_dims.size());
filter_data_dims =
framework::slice_ddim(filter_dims, 2, filter_dims.size());
} else {
in_data_dims = framework::slice_ddim(in_dims, 1, in_dims.size() - 1);
filter_data_dims =
framework::slice_ddim(filter_dims, 1, filter_dims.size() - 1);
}
std::vector<int> ksize = framework::vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
in_data_dims, strides, ksize);
// cuDNN only supports padding the same amount on every dimension.
// So we create a new padded input tensor.
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim);
Tensor transformed_input(input->type());
Tensor transformed_input_grad(input->type());
std::vector<int> padding_common(data_dim, 0);
std::vector<int> input_pad(transformed_input_channel.dims().size() * 2, 0);
if (!is_sys_pad) {
// get pad
std::vector<int> padding_diff(data_dim);
std::vector<int> new_input_shape_vec(data_dim + 2);
new_input_shape_vec[0] = transformed_input_channel.dims()[0];
if (compute_format == DataLayout::kNCHW) {
new_input_shape_vec[1] = transformed_input_channel.dims()[1];
} else {
new_input_shape_vec[data_dim + 1] =
transformed_input_channel.dims()[data_dim + 1];
}
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = ::min(paddings[2 * i], paddings[2 * i + 1]);
if (compute_format == DataLayout::kNCHW) {
new_input_shape_vec[i + 2] =
transformed_input_channel.dims()[i + 2] + padding_diff[i];
} else {
new_input_shape_vec[i + 1] =
transformed_input_channel.dims()[i + 1] + padding_diff[i];
}
if (compute_format == DataLayout::kNCHW) {
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
} else {
input_pad[2 * i + 2] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 2 + 1] = paddings[2 * i + 1] - padding_common[i];
}
}
framework::DDim new_input_shape(
framework::make_ddim(new_input_shape_vec));
transformed_input.Resize(new_input_shape);
transformed_input_grad.Resize(new_input_shape);
auto& dev_ctx =
ctx.template device_context<paddle::platform::CUDADeviceContext>();
transformed_input =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
if (input_grad) {
transformed_input_grad =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
}
// pad for input
const int rank = transformed_input_channel.dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, transformed_input_channel, pad_value,
&transformed_input);
} break;
case 5: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, transformed_input_channel, pad_value,
&transformed_input);
} break;
default:
PADDLE_THROW(platform::errors::InvalidArgument(
"ConvOp only support tensors with 4 or 5 dimensions."));
}
} else {
transformed_input.ShareDataWith(transformed_input_channel);
if (input_grad) {
transformed_input_grad.ShareDataWith(transformed_input_grad_channel);
}
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
const T* input_data = transformed_input.data<T>();
const T* output_grad_data = transformed_output_grad_channel.data<T>();
const T* filter_data = transformed_filter_channel.data<T>();
T* filter_grad_data = nullptr;
T* input_grad_data = nullptr;
T* transformed_input_grad_data = nullptr;
ConvArgs args1{&transformed_input_grad,
&transformed_filter_channel,
&transformed_output_grad_channel,
strides,
padding_common,
dilations,
dtype};
ConvArgs args2{&transformed_input,
&transformed_filter_grad_channel,
&transformed_output_grad_channel,
strides,
padding_common,
dilations,
dtype};
auto handle = dev_ctx.cudnn_handle();
DataLayout layout = compute_format == DataLayout::kNHWC ? DataLayout::kNHWC
: DataLayout::kNCHW;
if (transformed_input.dims().size() == 5) {
layout = compute_format == DataLayout::kNHWC ? DataLayout::kNDHWC
: DataLayout::kNCDHW;
}
auto layout_tensor = GetCudnnTensorFormat(layout);
auto workspace_handle = dev_ctx.cudnn_workspace_handle();
int i_n, i_c, i_d, i_h, i_w;
int o_n, o_c, o_d, o_h, o_w;
if (compute_format == DataLayout::kNHWC) {
GetNCDHW(transformed_input.dims(), DataLayout::kNHWC, &i_n, &i_c, &i_d,
&i_h, &i_w);
GetNCDHW(transformed_output_grad_channel.dims(), DataLayout::kNHWC, &o_n,
&o_c, &o_d, &o_h, &o_w);
} else {
GetNCDHW(transformed_input.dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d,
&i_h, &i_w);
GetNCDHW(transformed_output_grad_channel.dims(), DataLayout::kNCHW, &o_n,
&o_c, &o_d, &o_h, &o_w);
}
int group_offset_in = i_c / groups * i_h * i_w * i_d;
int group_offset_out = o_c / groups * o_h * o_w * o_d;
int group_offset_filter = transformed_filter_channel.numel() / groups;
// ------------------- cudnn backward algorithm ---------------------
#ifdef PADDLE_WITH_HIP
miopenConvBwdDataAlgorithm_t data_algo =
static_cast<miopenConvBwdDataAlgorithm_t>(0);
miopenConvBwdWeightsAlgorithm_t filter_algo =
static_cast<miopenConvBwdWeightsAlgorithm_t>(0);
#else
cudnnConvolutionBwdDataAlgo_t data_algo =
static_cast<cudnnConvolutionBwdDataAlgo_t>(0);
cudnnConvolutionBwdFilterAlgo_t filter_algo =
static_cast<cudnnConvolutionBwdFilterAlgo_t>(0);
#endif
size_t workspace_size = 0;
int iwo_groups = groups;
int c_groups = 1;
#if defined(PADDLE_WITH_HIP) || CUDNN_VERSION_MIN(7, 0, 1)
iwo_groups = 1;
c_groups = groups;
groups = 1;
#endif
if (input_grad) {
// ------------------- cudnn descriptors ---------------------
input_grad_data = input_grad->data<T>();
transformed_input_grad_data = transformed_input_grad.data<T>();
args1.handle = handle;
args1.idesc.set(transformed_input_grad, layout_tensor);
args1.wdesc.set(transformed_filter_channel, layout_tensor, iwo_groups);
args1.odesc.set(transformed_output_grad_channel, layout_tensor);
args1.cdesc.set(dtype, padding_common, strides, dilations,
platform::AllowTF32Cudnn(), c_groups);
#ifdef PADDLE_WITH_HIP
using search1 = SearchAlgorithm<miopenConvBwdDataAlgorithm_t>;
workspace_size =
::max(workspace_size, search1::GetWorkspaceSize(args1));
data_algo = search1::Find<T>(args1, exhaustive_search, deterministic,
workspace_size, ctx);
#else
using search1 = SearchAlgorithm<cudnnConvolutionBwdDataAlgoPerf_t>;
data_algo =
search1::Find<T>(args1, exhaustive_search, deterministic, ctx);
workspace_size =
::max(workspace_size, search1::GetWorkspaceSize(args1, data_algo));
#endif
}
if (filter_grad) {
// ------------------- cudnn descriptors ---------------------
filter_grad_data = transformed_filter_grad_channel.data<T>();
args2.handle = handle;
args2.idesc.set(transformed_input, layout_tensor);
args2.wdesc.set(transformed_filter_grad_channel, layout_tensor,
iwo_groups);
args2.odesc.set(transformed_output_grad_channel, layout_tensor);
args2.cdesc.set(dtype, padding_common, strides, dilations,
platform::AllowTF32Cudnn(), c_groups);
#ifdef PADDLE_WITH_HIP
using search2 = SearchAlgorithm<miopenConvBwdWeightsAlgorithm_t>;
workspace_size =
::max(workspace_size, search2::GetWorkspaceSize(args2));
filter_algo = search2::Find<T>(args2, exhaustive_search, deterministic,
workspace_size, ctx);
#else
using search2 = SearchAlgorithm<cudnnConvolutionBwdFilterAlgoPerf_t>;
filter_algo =
search2::Find<T>(args2, exhaustive_search, deterministic, ctx);
workspace_size = ::max(workspace_size,
search2::GetWorkspaceSize(args2, filter_algo));
#endif
}
// ------------------- cudnn conv backward data ---------------------
ScalingParamType<T> alpha = 1.0f;
ScalingParamType<T> beta = ctx.Attr<bool>("use_addto") ? 1.0f : 0.0f;
VLOG(4) << "Conv_grad: use_addto = " << ctx.Attr<bool>("use_addto");
if (input_grad) {
// When beta is 0, it is unnecessary to reset input_grad.
// When beta is 1, the output cannot be reset since addt strategy used.
#ifdef PADDLE_WITH_HIP
workspace_handle.RunFunc(
[&](void* cudnn_workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::miopenConvolutionBackwardData(
handle, &alpha, args1.odesc.desc(), output_grad_data,
args1.wdesc.desc(), filter_data, args1.cdesc.desc(),
data_algo, &beta, args1.idesc.desc(),
transformed_input_grad_data, cudnn_workspace_ptr,
workspace_size));
},
workspace_size);
#else
for (int i = 0; i < groups; i++) {
workspace_handle.RunFunc(
[&](void* cudnn_workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionBackwardData(
handle, &alpha, args1.wdesc.desc(),
filter_data + i * group_offset_filter, args1.odesc.desc(),
output_grad_data + i * group_offset_out,
args1.cdesc.desc(), data_algo, cudnn_workspace_ptr,
workspace_size, &beta, args1.idesc.desc(),
transformed_input_grad_data + i * group_offset_in));
},
workspace_size);
}
#endif
if (!is_sys_pad) {
std::vector<int> starts(transformed_input_channel.dims().size(), 0);
std::vector<int> axes(transformed_input_channel.dims().size(), 0);
for (size_t i = 0; i < transformed_input_channel.dims().size(); ++i) {
starts[i] = input_pad[2 * i];
axes[i] = i;
}
transformed_input_grad_channel.mutable_data(ctx.GetPlace());
if (transformed_input_channel.dims().size() == 4) {
RemovePaddingSlice<paddle::platform::CUDADeviceContext, T, 4>(
ctx, &transformed_input_grad, &transformed_input_grad_channel,
starts, axes);
} else {
RemovePaddingSlice<paddle::platform::CUDADeviceContext, T, 5>(
ctx, &transformed_input_grad, &transformed_input_grad_channel,
starts, axes);
}
}
if (channel_last && compute_format == DataLayout::kNCHW) {
TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_input_grad_channel, input_grad);
}
}
// filter_grad do not use inplace addto.
ScalingParamType<T> beta_filter = 0.0f;
// ------------------- cudnn conv backward filter ---------------------
if (filter_grad) {
// Because beta is zero, it is unnecessary to reset filter_grad.
#ifdef PADDLE_WITH_HIP
workspace_handle.RunFunc(
[&](void* cudnn_workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::miopenConvolutionBackwardWeights(
handle, &alpha, args2.odesc.desc(), output_grad_data,
args2.idesc.desc(), input_data, args2.cdesc.desc(),
filter_algo, &beta, args2.wdesc.desc(), filter_grad_data,
cudnn_workspace_ptr, workspace_size));
},
workspace_size);
#else
for (int i = 0; i < groups; i++) {
workspace_handle.RunFunc(
[&](void* cudnn_workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionBackwardFilter(
handle, &alpha, args2.idesc.desc(),
input_data + i * group_offset_in, args2.odesc.desc(),
output_grad_data + i * group_offset_out,
args2.cdesc.desc(), filter_algo, cudnn_workspace_ptr,
workspace_size, &beta_filter, args2.wdesc.desc(),
filter_grad_data + i * group_offset_filter));
},
workspace_size);
}
#endif
if (compute_format == DataLayout::kNHWC) {
TransToChannelFirst<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_filter_grad_channel, filter_grad);
}
}
}
};
/*
* Inputs: I, W, dO, ddI, ddW
* Outputs: ddO, dW, dI
* ddo = conv(ddI, W) + conv(I, ddW)
* dW = conv_bp_filter(ddI, dO)
* dI = conv_bp_data(ddW, dO)
*/
template <typename T>
class CUDNNConvDoubleGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(ctx.GetPlace()), true,
paddle::platform::errors::PreconditionNotMet("It must use CUDAPlace."));
auto X = ctx.Input<Tensor>("Input");
auto W = ctx.Input<Tensor>("Filter");
auto dO = ctx.Input<Tensor>("DOutput");
auto ddX = ctx.Input<Tensor>("DDInput");
auto ddW = ctx.Input<Tensor>("DDFilter");
auto ddO = ctx.Output<Tensor>("DDOutput");
auto dW = ctx.Output<Tensor>("DFilter");
auto dX = ctx.Output<Tensor>("DInput");
if (ddO) {
ddO->mutable_data<T>(ctx.GetPlace());
math::SetConstant<platform::CUDADeviceContext, T> set_zero;
set_zero(dev_ctx, ddO, static_cast<T>(0));
}
if (dW) {
dW->mutable_data<T>(ctx.GetPlace());
}
if (dX) {
dX->mutable_data<T>(ctx.GetPlace());
}
// const T* x = X->data<T>();
const T* dy = dO->data<T>();
const T* w = W->data<T>();
const T* ddx = nullptr;
const T* ddw = nullptr;
T *dw, *dx, *ddy;
dw = dx = ddy = nullptr;
T* transformed_dx = nullptr;
const std::vector<int>& strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
int groups = ctx.Attr<int>("groups");
bool exhaustive_search =
FLAGS_cudnn_exhaustive_search || ctx.Attr<bool>("exhaustive_search");
bool deterministic = FLAGS_cudnn_deterministic;
auto exhaustive_deterministic = exhaustive_search && deterministic;
PADDLE_ENFORCE_EQ(exhaustive_deterministic, false,
platform::errors::InvalidArgument(
"Cann't set exhaustive_search True and "
"FLAGS_cudnn_deterministic True at same time."));
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm");
const std::string data_format = ctx.Attr<std::string>("data_format");
const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
// transform Tensors to channel first-----------
Tensor transformed_X_channel(X->type());
Tensor transformed_dO_channel(dO->type());
Tensor transformed_ddX_channel(X->type());
Tensor transformed_ddO_channel(dO->type());
Tensor transformed_dX_channel(X->type());
if (channel_last) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, X, &transformed_X_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, X, &transformed_X_channel);
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, dO, &transformed_dO_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, dO, &transformed_dO_channel);
if (ddX) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, ddX, &transformed_ddX_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, ddX, &transformed_ddX_channel);
}
if (ddO) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, ddO, &transformed_ddO_channel);
}
if (dX) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, dX, &transformed_dX_channel);
transformed_dX_channel.mutable_data<T>(ctx.GetPlace());
}
} else {
transformed_X_channel = *X;
transformed_dO_channel = *dO;
if (ddX) {
transformed_ddX_channel = *ddX;
}
if (ddO) {
transformed_ddO_channel.ShareDataWith(*ddO);
}
if (dX) {
transformed_dX_channel.ShareDataWith(*dX);
}
}
auto in_dims = transformed_X_channel.dims();
auto filter_dims = W->dims();
framework::DDim in_data_dims =
framework::slice_ddim(in_dims, 2, in_dims.size());
framework::DDim filter_data_dims =
framework::slice_ddim(filter_dims, 2, filter_dims.size());
std::vector<int> ksize = framework::vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
in_data_dims, strides, ksize);
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim);
Tensor transformed_X(X->type());
Tensor transformed_ddX(X->type());
Tensor transformed_dX(X->type());
std::vector<int> padding_common(data_dim, 0);
std::vector<int> input_pad(X->dims().size() * 2, 0);
if (!is_sys_pad) {
// get pad
std::vector<int> padding_diff(data_dim);
std::vector<int> new_input_shape_vec(data_dim + 2);
new_input_shape_vec[0] = transformed_X_channel.dims()[0];
new_input_shape_vec[1] = transformed_X_channel.dims()[1];
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = ::min(paddings[2 * i], paddings[2 * i + 1]);
new_input_shape_vec[i + 2] =
transformed_X_channel.dims()[i + 2] + padding_diff[i];
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
}
framework::DDim new_input_shape(
framework::make_ddim(new_input_shape_vec));
transformed_X.Resize(new_input_shape);
transformed_ddX.Resize(new_input_shape);
transformed_dX.Resize(new_input_shape);
transformed_X =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
if (ddX) {
transformed_ddX =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
}
if (dX) {
transformed_dX =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
}
// pad for input
const int rank = X->dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, transformed_X_channel, pad_value, &transformed_X);
if (ddX) {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, transformed_ddX_channel, pad_value,
&transformed_ddX);
}
} break;
case 5: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, transformed_X_channel, pad_value, &transformed_X);
if (ddX) {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, transformed_ddX_channel, pad_value,
&transformed_ddX);
}
} break;
default:
PADDLE_THROW(platform::errors::InvalidArgument(
"ConvOp only support tensors with 4 or 5 dimensions."));
}
} else {
transformed_X.ShareDataWith(transformed_X_channel);
if (ddX) {
transformed_ddX.ShareDataWith(transformed_ddX_channel);
}
if (dX) {
transformed_dX.ShareDataWith(transformed_dX_channel);
}
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
const T* x = transformed_X.data<T>();
int iwo_group = groups;
int c_group = 1;
#if defined(PADDLE_WITH_HIP) || CUDNN_VERSION_MIN(7, 0, 1)
iwo_group = 1;
c_group = groups;
groups = 1;
#endif
auto dtype = platform::CudnnDataType<T>::type;
auto handle = dev_ctx.cudnn_handle();
ConvArgs args1{&transformed_ddX,
W,
&transformed_ddO_channel,
strides,
padding_common,
dilations,
dtype};
ConvArgs args2{
&transformed_X, ddW, &transformed_ddO_channel, strides, padding_common,
dilations, dtype};
ConvArgs args3{&transformed_ddX,
dW,
&transformed_dO_channel,
strides,
padding_common,
dilations,
dtype};
ConvArgs args4{
&transformed_dX, ddW, &transformed_dO_channel, strides, padding_common,
dilations, dtype};
#ifdef PADDLE_WITH_HIP
miopenConvFwdAlgorithm_t fwd_algo1 =
static_cast<miopenConvFwdAlgorithm_t>(0);
miopenConvFwdAlgorithm_t fwd_algo2 =
static_cast<miopenConvFwdAlgorithm_t>(0);
miopenConvBwdDataAlgorithm_t data_algo =
static_cast<miopenConvBwdDataAlgorithm_t>(0);
miopenConvBwdWeightsAlgorithm_t filter_algo =
static_cast<miopenConvBwdWeightsAlgorithm_t>(0);
#else
cudnnConvolutionFwdAlgo_t fwd_algo1 =
static_cast<cudnnConvolutionFwdAlgo_t>(0);
cudnnConvolutionFwdAlgo_t fwd_algo2 =
static_cast<cudnnConvolutionFwdAlgo_t>(0);
cudnnConvolutionBwdDataAlgo_t data_algo =
static_cast<cudnnConvolutionBwdDataAlgo_t>(0);
cudnnConvolutionBwdFilterAlgo_t filter_algo =
static_cast<cudnnConvolutionBwdFilterAlgo_t>(0);
#endif
auto layout = GetCudnnTensorFormat(DataLayout::kNCHW);
// ddo = conv(ddI, W) + conv(I, ddW)
size_t workspace_size = 0;
T* transformed_ddy_channel = nullptr;
if (ddO) {
ddy = ddO->data<T>();
transformed_ddy_channel = transformed_ddO_channel.data<T>();
if (ddX) {
args1.handle = handle;
args1.idesc.set(transformed_ddX, iwo_group);
args1.wdesc.set(*W, layout, iwo_group);
args1.odesc.set(transformed_ddO_channel, iwo_group);
args1.cdesc.set(dtype, padding_common, strides, dilations,
platform::AllowTF32Cudnn(), c_group);
#ifdef PADDLE_WITH_HIP
using search1 = SearchAlgorithm<miopenConvFwdAlgorithm_t>;
workspace_size = search1::GetWorkspaceSize(args1);
fwd_algo1 = search1::Find<T>(args1, exhaustive_search, false,
workspace_size, ctx);
#else
using search1 = SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>;
fwd_algo1 = search1::Find<T>(args1, exhaustive_search, false, ctx);
workspace_size = search1::GetWorkspaceSize(args1, fwd_algo1);
#endif
}
if (ddW) {
ddw = ddW->data<T>();
args2.handle = handle;
args2.idesc.set(transformed_X, iwo_group);
args2.wdesc.set(*ddW, layout, iwo_group);
args2.odesc.set(transformed_ddO_channel, iwo_group);
args2.cdesc.set(dtype, padding_common, strides, dilations,
platform::AllowTF32Cudnn(), c_group);
#ifdef PADDLE_WITH_HIP
using search2 = SearchAlgorithm<miopenConvFwdAlgorithm_t>;
workspace_size =
::max(workspace_size, search2::GetWorkspaceSize(args2));
fwd_algo2 = search2::Find<T>(args2, exhaustive_search, false,
workspace_size, ctx);
#else
using search2 = SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>;
fwd_algo2 = search2::Find<T>(args2, exhaustive_search, false, ctx);
workspace_size = ::max(workspace_size,
search2::GetWorkspaceSize(args2, fwd_algo2));
#endif
}
}
if (dW && ddX) {
dw = dW->data<T>();
args3.handle = handle;
args3.idesc.set(transformed_ddX, iwo_group);
args3.wdesc.set(*dW, layout, iwo_group);
args3.odesc.set(transformed_dO_channel, iwo_group);
args3.cdesc.set(dtype, padding_common, strides, dilations,
platform::AllowTF32Cudnn(), c_group);
#ifdef PADDLE_WITH_HIP
using search3 = SearchAlgorithm<miopenConvBwdWeightsAlgorithm_t>;
workspace_size =
::max(workspace_size, search3::GetWorkspaceSize(args3));
filter_algo = search3::Find<T>(args3, exhaustive_search, deterministic,
workspace_size, ctx);
#else
using search3 = SearchAlgorithm<cudnnConvolutionBwdFilterAlgoPerf_t>;
filter_algo =
search3::Find<T>(args3, exhaustive_search, deterministic, ctx);
workspace_size = ::max(workspace_size,
search3::GetWorkspaceSize(args3, filter_algo));
#endif
}
if (ddW && dX) {
transformed_dx = transformed_dX.data<T>();
args4.handle = handle;
args4.idesc.set(transformed_dX, iwo_group);
args4.wdesc.set(*ddW, layout, iwo_group);
args4.odesc.set(transformed_dO_channel, iwo_group);
args4.cdesc.set(dtype, padding_common, strides, dilations,
platform::AllowTF32Cudnn(), c_group);
#ifdef PADDLE_WITH_HIP
using search4 = SearchAlgorithm<miopenConvBwdDataAlgorithm_t>;
workspace_size =
::max(workspace_size, search4::GetWorkspaceSize(args4));
data_algo = search4::Find<T>(args4, exhaustive_search, deterministic,
workspace_size, ctx);
#else
using search4 = SearchAlgorithm<cudnnConvolutionBwdDataAlgoPerf_t>;
data_algo =
search4::Find<T>(args4, exhaustive_search, deterministic, ctx);
workspace_size =
::max(workspace_size, search4::GetWorkspaceSize(args4, data_algo));
#endif
}
int i_n, i_c, i_d, i_h, i_w;
GetNCDHW(transformed_X.dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d, &i_h,
&i_w);
int o_n, o_c, o_d, o_h, o_w;
GetNCDHW(transformed_dO_channel.dims(), DataLayout::kNCHW, &o_n, &o_c, &o_d,
&o_h, &o_w);
int group_offset_in = i_c / groups * i_h * i_w * i_d;
int group_offset_out = o_c / groups * o_h * o_w * o_d;
int group_offset_filter = W->numel() / groups;
ScalingParamType<T> alpha = 1.0f;
ScalingParamType<T> beta = 0.0f;
// NOTE(zhiqiu): inplace addto is not supportted in double grad yet.
// ScalingParamType<T> beta = ctx.Attr<bool>("use_addto") ? 1.0f :
// 0.0f;
// VLOG(4) << "Conv_grad_grad: use_addto = " << ctx.Attr<bool>("use_addto");
auto wkspace_handle = dev_ctx.cudnn_workspace_handle();
if (ddO) {
if (ddX) {
ddx = transformed_ddX.data<T>();
#ifdef PADDLE_WITH_HIP
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::miopenConvolutionForward(
handle, &alpha, args1.idesc.desc(), ddx,
args1.wdesc.desc(), w, args1.cdesc.desc(), fwd_algo1,
&beta, args1.odesc.desc(), transformed_ddy_channel,
workspace_ptr, workspace_size));
},
workspace_size);
#else
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionForward(
handle, &alpha, args1.idesc.desc(),
ddx + i * group_offset_in, args1.wdesc.desc(),
w + i * group_offset_filter, args1.cdesc.desc(),
fwd_algo1, workspace_ptr, workspace_size, &beta,
args1.odesc.desc(),
transformed_ddy_channel + i * group_offset_out));
},
workspace_size);
}
#endif
}
if (ddW) {
#ifdef PADDLE_WITH_HIP
// MIOPEN ONLY support beta to be 0.0f
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::miopenConvolutionForward(
handle, &alpha, args2.idesc.desc(), x, args2.wdesc.desc(),
ddw, args2.cdesc.desc(), fwd_algo2, &beta,
args2.odesc.desc(), transformed_ddy_channel,
workspace_ptr, workspace_size));
},
workspace_size);
#else
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionForward(
handle, &alpha, args2.idesc.desc(),
x + i * group_offset_in, args2.wdesc.desc(),
ddw + i * group_offset_filter, args2.cdesc.desc(),
fwd_algo2, workspace_ptr, workspace_size, &alpha,
args2.odesc.desc(),
transformed_ddy_channel + i * group_offset_out));
},
workspace_size);
}
#endif
}
if (channel_last) {
TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_ddO_channel, ddO);
}
}
T* transformed_dy_channel = transformed_dO_channel.data<T>();
if (dW && ddX) {
ddx = transformed_ddX.data<T>();
#ifdef PADDLE_WITH_HIP
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::miopenConvolutionBackwardWeights(
handle, &alpha, args3.odesc.desc(), transformed_dy_channel,
args3.idesc.desc(), ddx, args3.cdesc.desc(), filter_algo,
&beta, args3.wdesc.desc(), dw, workspace_ptr,
workspace_size));
},
workspace_size);
#else
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionBackwardFilter(
handle, &alpha, args3.idesc.desc(),
ddx + i * group_offset_in, args3.odesc.desc(),
transformed_dy_channel + i * group_offset_out,
args3.cdesc.desc(), filter_algo, workspace_ptr,
workspace_size, &beta, args3.wdesc.desc(),
dw + i * group_offset_filter));
},
workspace_size);
}
#endif
}
if (dX && ddW) {
ddw = ddW->data<T>();
#ifdef PADDLE_WITH_HIP
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::miopenConvolutionBackwardData(
handle, &alpha, args4.odesc.desc(), transformed_dy_channel,
args4.wdesc.desc(), ddw, args4.cdesc.desc(), data_algo,
&beta, args4.idesc.desc(), transformed_dx, workspace_ptr,
workspace_size));
},
workspace_size);
#else
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionBackwardData(
handle, &alpha, args4.wdesc.desc(),
ddw + i * group_offset_filter, args4.odesc.desc(),
transformed_dy_channel + i * group_offset_out,
args4.cdesc.desc(), data_algo, workspace_ptr,
workspace_size, &beta, args4.idesc.desc(),
transformed_dx + i * group_offset_in));
},
workspace_size);
}
#endif
if (!is_sys_pad) {
// reverse padded input
std::vector<int> starts(X->dims().size(), 0);
std::vector<int> axes(X->dims().size(), 0);
for (size_t i = 0; i < X->dims().size(); ++i) {
starts[i] = input_pad[2 * i];
axes[i] = i;
}
if (X->dims().size() == 4) {
RemovePaddingSlice<paddle::platform::CUDADeviceContext, T, 4>(
ctx, &transformed_dX, &transformed_dX_channel, starts, axes);
} else {
RemovePaddingSlice<paddle::platform::CUDADeviceContext, T, 5>(
ctx, &transformed_dX, &transformed_dX_channel, starts, axes);
}
}
if (channel_last) {
TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_dX_channel, dX);
}
}
}
};
} // namespace operators
} // namespace paddle
namespace plat = paddle::platform;
#ifdef PADDLE_WITH_HIP
// MIOPEN do not support double
REGISTER_OP_KERNEL(conv2d, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvOpKernel<float>,
paddle::operators::CUDNNConvOpKernel<plat::float16>);
REGISTER_OP_KERNEL(conv2d_grad, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvGradOpKernel<float>,
paddle::operators::CUDNNConvGradOpKernel<plat::float16>);
REGISTER_OP_KERNEL(
conv2d_grad_grad, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvDoubleGradOpKernel<float>,
paddle::operators::CUDNNConvDoubleGradOpKernel<plat::float16>);
REGISTER_OP_CUDA_KERNEL(
depthwise_conv2d_grad_grad,
paddle::operators::CUDNNConvDoubleGradOpKernel<float>,
paddle::operators::CUDNNConvDoubleGradOpKernel<plat::float16>);
REGISTER_OP_KERNEL(conv3d, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvOpKernel<float>,
paddle::operators::CUDNNConvOpKernel<plat::float16>);
REGISTER_OP_KERNEL(conv3d_grad, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvGradOpKernel<float>);
REGISTER_OP_KERNEL(
conv3d_grad_grad, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvDoubleGradOpKernel<float>,
paddle::operators::CUDNNConvDoubleGradOpKernel<plat::float16>);
#else
REGISTER_OP_KERNEL(conv2d, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvOpKernel<float>,
paddle::operators::CUDNNConvOpKernel<double>,
paddle::operators::CUDNNConvOpKernel<plat::float16>);
REGISTER_OP_KERNEL(conv2d_grad, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvGradOpKernel<float>,
paddle::operators::CUDNNConvGradOpKernel<double>,
paddle::operators::CUDNNConvGradOpKernel<plat::float16>);
REGISTER_OP_KERNEL(
conv2d_grad_grad, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvDoubleGradOpKernel<float>,
paddle::operators::CUDNNConvDoubleGradOpKernel<double>,
paddle::operators::CUDNNConvDoubleGradOpKernel<plat::float16>);
REGISTER_OP_CUDA_KERNEL(
depthwise_conv2d_grad_grad,
paddle::operators::CUDNNConvDoubleGradOpKernel<float>,
paddle::operators::CUDNNConvDoubleGradOpKernel<double>,
paddle::operators::CUDNNConvDoubleGradOpKernel<plat::float16>);
REGISTER_OP_KERNEL(conv3d, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvOpKernel<float>,
paddle::operators::CUDNNConvOpKernel<double>,
paddle::operators::CUDNNConvOpKernel<plat::float16>);
REGISTER_OP_KERNEL(conv3d_grad, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvGradOpKernel<float>,
paddle::operators::CUDNNConvGradOpKernel<double>);
REGISTER_OP_KERNEL(
conv3d_grad_grad, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvDoubleGradOpKernel<float>,
paddle::operators::CUDNNConvDoubleGradOpKernel<double>,
paddle::operators::CUDNNConvDoubleGradOpKernel<plat::float16>);
#endif
| conv_cudnn_op.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the spopecific language governing permissions and
limitations under the License. */
#include <utility>
#include <vector>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/memory/memory.h"
#ifdef PADDLE_WITH_HIP
#include "paddle/fluid/operators/conv_miopen_helper.h"
#else
#include "paddle/fluid/operators/conv_cudnn_helper.h"
#endif
#include "paddle/fluid/operators/conv_op.h"
#include "paddle/fluid/operators/math/padding.h"
#include "paddle/fluid/platform/cudnn_workspace_helper.h"
#include "paddle/fluid/platform/float16.h"
#include "paddle/fluid/platform/profiler.h"
DECLARE_bool(cudnn_deterministic);
DECLARE_uint64(conv_workspace_size_limit);
DECLARE_bool(cudnn_exhaustive_search);
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using ScopedTensorDescriptor = platform::ScopedTensorDescriptor;
using ScopedFilterDescriptor = platform::ScopedFilterDescriptor;
using ScopedConvolutionDescriptor = platform::ScopedConvolutionDescriptor;
using DataLayout = platform::DataLayout;
static inline bool IsVoltaOrLater(const platform::CUDADeviceContext& dev_ctx) {
return dev_ctx.GetComputeCapability() >= 70;
}
template <typename T>
class CUDNNConvOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(ctx.GetPlace()), true,
paddle::platform::errors::PreconditionNotMet("It must use CUDAPlace."));
const Tensor* input = ctx.Input<Tensor>("Input");
auto* filter = ctx.Input<Tensor>("Filter");
auto* output = ctx.Output<Tensor>("Output");
output->mutable_data<T>(ctx.GetPlace());
const std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
int groups = ctx.Attr<int>("groups");
bool exhaustive_search =
FLAGS_cudnn_exhaustive_search || ctx.Attr<bool>("exhaustive_search");
bool deterministic = FLAGS_cudnn_deterministic;
auto exhaustive_deterministic = exhaustive_search && deterministic;
PADDLE_ENFORCE_EQ(exhaustive_deterministic, false,
platform::errors::InvalidArgument(
"Cann't set exhaustive_search True and "
"FLAGS_cudnn_deterministic True at same time."));
const std::string padding_algorithm =
ctx.Attr<std::string>("padding_algorithm");
const std::string data_format = ctx.Attr<std::string>("data_format");
const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
auto dtype = platform::CudnnDataType<T>::type;
#ifdef PADDLE_WITH_HIP
// HIP MIOPEN ONLY SUPPORT NCHW format
auto compute_format = DataLayout::kNCHW;
#else
// Tensor Core introduced from Volta GPUs supports more faster conv op
// with FP16 in NHWC data format.
const bool compute_in_nhwc =
dtype == CUDNN_DATA_HALF && IsVoltaOrLater(dev_ctx);
// We will only do data format conversion from NHWC to NCHW.
// cudnn will convert NCHW to NHWC automatically on Tensor Core.
auto compute_format =
compute_in_nhwc && channel_last ? DataLayout::kNHWC : DataLayout::kNCHW;
#endif
VLOG(3) << "Compute ConvOp with cuDNN:"
<< " data_format=" << data_format << " compute_format="
<< (compute_format == DataLayout::kNHWC ? "NHWC" : "NCHW");
// ------------ transformed tensor -----------
Tensor transformed_input_channel(input->type());
Tensor transformed_output(output->type());
Tensor transformed_filter_channel(filter->type());
T* output_data = nullptr;
if (channel_last && compute_format == DataLayout::kNCHW) {
VLOG(3) << "Transform input tensor from NHWC to NCHW.";
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input, &transformed_input_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input, &transformed_input_channel);
ResizeToChannelFirst<platform::CUDADeviceContext, T>(ctx, output,
&transformed_output);
} else {
transformed_input_channel.ShareDataWith(*input);
transformed_output.ShareDataWith(*output);
}
if (compute_format == DataLayout::kNHWC) {
VLOG(3) << "Transform filter tensor from NCHW to NHWC.";
ResizeToChannelLast<platform::CUDADeviceContext, T>(
ctx, filter, &transformed_filter_channel);
TransToChannelLast<platform::CUDADeviceContext, T>(
ctx, filter, &transformed_filter_channel);
} else {
transformed_filter_channel.ShareDataWith(*filter);
}
output_data = transformed_output.data<T>();
// update padding and dilation
auto in_dims = transformed_input_channel.dims();
auto filter_dims = transformed_filter_channel.dims();
framework::DDim in_data_dims;
framework::DDim filter_data_dims;
if (compute_format == DataLayout::kNCHW) {
in_data_dims = framework::slice_ddim(in_dims, 2, in_dims.size());
filter_data_dims =
framework::slice_ddim(filter_dims, 2, filter_dims.size());
} else {
in_data_dims = framework::slice_ddim(in_dims, 1, in_dims.size() - 1);
filter_data_dims =
framework::slice_ddim(filter_dims, 1, filter_dims.size() - 1);
}
std::vector<int> ksize = framework::vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
in_data_dims, strides, ksize);
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim);
Tensor transformed_input;
std::vector<int> padding_common(data_dim, 0);
if (!is_sys_pad) {
std::vector<int> padding_diff(data_dim);
std::vector<int> new_input_shape_vec(data_dim + 2);
new_input_shape_vec[0] = transformed_input_channel.dims()[0];
if (compute_format == DataLayout::kNCHW) {
new_input_shape_vec[1] = transformed_input_channel.dims()[1];
} else {
new_input_shape_vec[data_dim + 1] =
transformed_input_channel.dims()[data_dim + 1];
}
std::vector<int> input_pad(transformed_input_channel.dims().size() * 2,
0);
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = std::min(paddings[2 * i], paddings[2 * i + 1]);
if (compute_format == DataLayout::kNCHW) {
new_input_shape_vec[i + 2] =
transformed_input_channel.dims()[i + 2] + padding_diff[i];
} else {
new_input_shape_vec[i + 1] =
transformed_input_channel.dims()[i + 1] + padding_diff[i];
}
if (compute_format == DataLayout::kNCHW) {
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
} else {
input_pad[2 * i + 2] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 2 + 1] = paddings[2 * i + 1] - padding_common[i];
}
}
framework::DDim new_input_shape(
framework::make_ddim(new_input_shape_vec));
transformed_input.Resize(new_input_shape);
auto& dev_ctx =
ctx.template device_context<paddle::platform::CUDADeviceContext>();
transformed_input =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
const int rank = transformed_input_channel.dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, transformed_input_channel, pad_value,
&transformed_input);
} break;
case 5: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, transformed_input_channel, pad_value,
&transformed_input);
} break;
default:
PADDLE_THROW(platform::errors::InvalidArgument(
"ConvOp only support tensors with 4 or 5 dimensions."));
}
} else {
transformed_input.ShareDataWith(transformed_input_channel);
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
const T* input_data = transformed_input.data<T>();
const T* filter_data = transformed_filter_channel.data<T>();
// ------------------- cudnn descriptors ---------------------
ConvArgs args{&transformed_input,
&transformed_filter_channel,
&transformed_output,
strides,
padding_common,
dilations,
dtype};
auto handle = dev_ctx.cudnn_handle();
auto workspace_handle = dev_ctx.cudnn_workspace_handle();
DataLayout layout = compute_format == DataLayout::kNHWC ? DataLayout::kNHWC
: DataLayout::kNCHW;
if (transformed_input.dims().size() == 5) {
layout = compute_format == DataLayout::kNHWC ? DataLayout::kNDHWC
: DataLayout::kNCDHW;
}
auto layout_format = GetCudnnTensorFormat(layout);
args.handle = handle;
#ifdef PADDLE_WITH_HIP
// MIOPEN need to set groups in cdesc in miopen_desc.h
args.cdesc.set(dtype, padding_common, strides, dilations,
platform::AllowTF32Cudnn(), groups);
#else
args.cdesc.set(dtype, padding_common, strides, dilations,
platform::AllowTF32Cudnn());
#endif
#if defined(PADDLE_WITH_CUDA) && CUDNN_VERSION_MIN(7, 0, 1)
// cudnn 7 can support groups, no need to do it manually
// FIXME(typhoonzero): find a better way to disable groups
// rather than setting it to 1.
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnSetConvolutionGroupCount(args.cdesc.desc(),
groups));
groups = 1;
#endif
#ifdef PADDLE_WITH_HIP
// MIOPEN do not set groups in wdesc after set groups in cdesc
groups = 1;
#endif
args.idesc.set(transformed_input, layout_format);
args.wdesc.set(transformed_filter_channel, layout_format, groups);
args.odesc.set(transformed_output, layout_format);
int i_n, i_c, i_d, i_h, i_w;
int o_n, o_c, o_d, o_h, o_w;
if (compute_format == DataLayout::kNHWC) {
GetNCDHW(transformed_input.dims(), DataLayout::kNHWC, &i_n, &i_c, &i_d,
&i_h, &i_w);
GetNCDHW(transformed_output.dims(), DataLayout::kNHWC, &o_n, &o_c, &o_d,
&o_h, &o_w);
} else {
GetNCDHW(transformed_input.dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d,
&i_h, &i_w);
GetNCDHW(transformed_output.dims(), DataLayout::kNCHW, &o_n, &o_c, &o_d,
&o_h, &o_w);
}
int group_offset_in = i_c / groups * i_h * i_w * i_d;
int group_offset_out = o_c / groups * o_h * o_w * o_d;
int group_offset_filter = transformed_filter_channel.numel() / groups;
// ------------------- cudnn conv workspace ---------------------
size_t workspace_size = 0; // final workspace to allocate.
// ------------------- cudnn conv algorithm ---------------------
#ifdef PADDLE_WITH_HIP
miopenConvFwdAlgorithm_t algo{};
using search = SearchAlgorithm<miopenConvFwdAlgorithm_t>;
workspace_size = search::GetWorkspaceSize(args);
algo = search::Find<T>(args, exhaustive_search, false, workspace_size, ctx);
#else
cudnnConvolutionFwdAlgo_t algo{};
using search = SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>;
algo = search::Find<T>(args, exhaustive_search, false, ctx);
workspace_size = search::GetWorkspaceSize(args, algo);
#endif
#if defined(PADDLE_WITH_CUDA) && CUDNN_VERSION_MIN(7, 0, 1)
// when groups > 1, SearchAlgorithm find algo is CUDNN_CONVOLUTION_\
// FWD_ALGO_WINOGRAD_NONFUSED, but this kind of algorithm is unstable
// in forward computation, so change the algorithm to CUDNN_CONVOLUTION_\
// FWD_ALGO_IMPLICIT_GEMM manually.
if (ctx.Attr<int>("groups") > 1) {
algo = static_cast<cudnnConvolutionFwdAlgo_t>(0);
}
#endif
// ------------------- cudnn conv forward ---------------------
ScalingParamType<T> alpha = 1.0f;
ScalingParamType<T> beta = 0.0f;
// NOTE(zhiqiu): inplace addto is not supportted in double grad yet.
// ScalingParamType<T> beta = ctx.Attr<bool>("use_addto") ? 1.0f : 0.0f;
// VLOG(4) << "Conv: use_addto = " << ctx.Attr<bool>("use_addto");
#ifdef PADDLE_WITH_HIP
workspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::miopenConvolutionForward(
handle, &alpha, args.idesc.desc(), input_data,
args.wdesc.desc(), filter_data, args.cdesc.desc(), algo,
&beta, args.odesc.desc(), output_data, workspace_ptr,
workspace_size));
},
workspace_size);
#else
for (int i = 0; i < groups; i++) {
workspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionForward(
handle, &alpha, args.idesc.desc(),
input_data + i * group_offset_in, args.wdesc.desc(),
filter_data + i * group_offset_filter, args.cdesc.desc(),
algo, workspace_ptr, workspace_size, &beta,
args.odesc.desc(), output_data + i * group_offset_out));
},
workspace_size);
}
#endif
if (channel_last && compute_format == DataLayout::kNCHW) {
TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_output, output);
}
}
};
template <typename T>
class CUDNNConvGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(ctx.GetPlace()), true,
paddle::platform::errors::PreconditionNotMet("It must use CUDAPlace."));
auto input = ctx.Input<Tensor>("Input");
auto filter = ctx.Input<Tensor>("Filter");
auto output_grad = ctx.Input<Tensor>(framework::GradVarName("Output"));
auto input_grad = ctx.Output<Tensor>(framework::GradVarName("Input"));
auto filter_grad = ctx.Output<Tensor>(framework::GradVarName("Filter"));
if (input_grad) {
input_grad->mutable_data<T>(ctx.GetPlace());
}
if (filter_grad) {
filter_grad->mutable_data<T>(ctx.GetPlace());
}
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm");
int groups = ctx.Attr<int>("groups");
bool exhaustive_search =
FLAGS_cudnn_exhaustive_search || ctx.Attr<bool>("exhaustive_search");
bool deterministic = FLAGS_cudnn_deterministic;
auto exhaustive_deterministic = exhaustive_search && deterministic;
PADDLE_ENFORCE_EQ(exhaustive_deterministic, false,
platform::errors::InvalidArgument(
"Cann't set exhaustive_search True and "
"FLAGS_cudnn_deterministic True at same time."));
const std::string data_format = ctx.Attr<std::string>("data_format");
const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
auto dtype = platform::CudnnDataType<T>::type;
#ifdef PADDLE_WITH_HIP
// HIP MIOPEN ONLY SUPPORT NCHW format
auto compute_format = DataLayout::kNCHW;
#else
const bool compute_in_nhwc =
dtype == CUDNN_DATA_HALF && IsVoltaOrLater(dev_ctx);
auto compute_format =
compute_in_nhwc && channel_last ? DataLayout::kNHWC : DataLayout::kNCHW;
#endif
VLOG(3) << "Compute ConvGradOp with cuDNN:"
<< " data_format=" << data_format << " compute_format="
<< (compute_format == DataLayout::kNHWC ? "NHWC" : "NCHW");
// transform Tensor
Tensor transformed_input_channel(input->type());
Tensor transformed_output_grad_channel(output_grad->type());
Tensor transformed_input_grad_channel(input->type());
Tensor transformed_filter_channel(filter->type());
Tensor transformed_filter_grad_channel(filter->type());
if (channel_last && compute_format == DataLayout::kNCHW) {
VLOG(3) << "Transform input, output_grad, input_grad and tensor from "
"NHWC to NCHW.";
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input, &transformed_input_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input, &transformed_input_channel);
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, output_grad, &transformed_output_grad_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, output_grad, &transformed_output_grad_channel);
if (input_grad) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input_grad, &transformed_input_grad_channel);
// NOTE(zhiqiu): If inplace_addto strategy is enabled, we need to copy
// the data of input_grad to transformed_input_grad_channel.
if (ctx.Attr<bool>("use_addto")) {
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, input_grad, &transformed_input_grad_channel);
}
}
} else {
transformed_input_channel.ShareDataWith(*input);
transformed_output_grad_channel.ShareDataWith(*output_grad);
if (input_grad) {
transformed_input_grad_channel.ShareDataWith(*input_grad);
}
}
if (compute_format == DataLayout::kNHWC) {
VLOG(3) << "Transform filter and filter_grad tensor from NCHW to NHWC.";
ResizeToChannelLast<platform::CUDADeviceContext, T>(
ctx, filter, &transformed_filter_channel);
TransToChannelLast<platform::CUDADeviceContext, T>(
ctx, filter, &transformed_filter_channel);
if (filter_grad) {
ResizeToChannelLast<platform::CUDADeviceContext, T>(
ctx, filter_grad, &transformed_filter_grad_channel);
}
} else {
transformed_filter_channel.ShareDataWith(*filter);
if (filter_grad) {
transformed_filter_grad_channel.ShareDataWith(*filter_grad);
}
}
// update paddings
auto in_dims = transformed_input_channel.dims();
auto filter_dims = transformed_filter_channel.dims();
framework::DDim in_data_dims;
framework::DDim filter_data_dims;
if (compute_format == DataLayout::kNCHW) {
in_data_dims = framework::slice_ddim(in_dims, 2, in_dims.size());
filter_data_dims =
framework::slice_ddim(filter_dims, 2, filter_dims.size());
} else {
in_data_dims = framework::slice_ddim(in_dims, 1, in_dims.size() - 1);
filter_data_dims =
framework::slice_ddim(filter_dims, 1, filter_dims.size() - 1);
}
std::vector<int> ksize = framework::vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
in_data_dims, strides, ksize);
// cuDNN only supports padding the same amount on every dimension.
// So we create a new padded input tensor.
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim);
Tensor transformed_input(input->type());
Tensor transformed_input_grad(input->type());
std::vector<int> padding_common(data_dim, 0);
std::vector<int> input_pad(transformed_input_channel.dims().size() * 2, 0);
if (!is_sys_pad) {
// get pad
std::vector<int> padding_diff(data_dim);
std::vector<int> new_input_shape_vec(data_dim + 2);
new_input_shape_vec[0] = transformed_input_channel.dims()[0];
if (compute_format == DataLayout::kNCHW) {
new_input_shape_vec[1] = transformed_input_channel.dims()[1];
} else {
new_input_shape_vec[data_dim + 1] =
transformed_input_channel.dims()[data_dim + 1];
}
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = std::min(paddings[2 * i], paddings[2 * i + 1]);
if (compute_format == DataLayout::kNCHW) {
new_input_shape_vec[i + 2] =
transformed_input_channel.dims()[i + 2] + padding_diff[i];
} else {
new_input_shape_vec[i + 1] =
transformed_input_channel.dims()[i + 1] + padding_diff[i];
}
if (compute_format == DataLayout::kNCHW) {
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
} else {
input_pad[2 * i + 2] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 2 + 1] = paddings[2 * i + 1] - padding_common[i];
}
}
framework::DDim new_input_shape(
framework::make_ddim(new_input_shape_vec));
transformed_input.Resize(new_input_shape);
transformed_input_grad.Resize(new_input_shape);
auto& dev_ctx =
ctx.template device_context<paddle::platform::CUDADeviceContext>();
transformed_input =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
if (input_grad) {
transformed_input_grad =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
}
// pad for input
const int rank = transformed_input_channel.dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, transformed_input_channel, pad_value,
&transformed_input);
} break;
case 5: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, transformed_input_channel, pad_value,
&transformed_input);
} break;
default:
PADDLE_THROW(platform::errors::InvalidArgument(
"ConvOp only support tensors with 4 or 5 dimensions."));
}
} else {
transformed_input.ShareDataWith(transformed_input_channel);
if (input_grad) {
transformed_input_grad.ShareDataWith(transformed_input_grad_channel);
}
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
const T* input_data = transformed_input.data<T>();
const T* output_grad_data = transformed_output_grad_channel.data<T>();
const T* filter_data = transformed_filter_channel.data<T>();
T* filter_grad_data = nullptr;
T* input_grad_data = nullptr;
T* transformed_input_grad_data = nullptr;
ConvArgs args1{&transformed_input_grad,
&transformed_filter_channel,
&transformed_output_grad_channel,
strides,
padding_common,
dilations,
dtype};
ConvArgs args2{&transformed_input,
&transformed_filter_grad_channel,
&transformed_output_grad_channel,
strides,
padding_common,
dilations,
dtype};
auto handle = dev_ctx.cudnn_handle();
DataLayout layout = compute_format == DataLayout::kNHWC ? DataLayout::kNHWC
: DataLayout::kNCHW;
if (transformed_input.dims().size() == 5) {
layout = compute_format == DataLayout::kNHWC ? DataLayout::kNDHWC
: DataLayout::kNCDHW;
}
auto layout_tensor = GetCudnnTensorFormat(layout);
auto workspace_handle = dev_ctx.cudnn_workspace_handle();
int i_n, i_c, i_d, i_h, i_w;
int o_n, o_c, o_d, o_h, o_w;
if (compute_format == DataLayout::kNHWC) {
GetNCDHW(transformed_input.dims(), DataLayout::kNHWC, &i_n, &i_c, &i_d,
&i_h, &i_w);
GetNCDHW(transformed_output_grad_channel.dims(), DataLayout::kNHWC, &o_n,
&o_c, &o_d, &o_h, &o_w);
} else {
GetNCDHW(transformed_input.dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d,
&i_h, &i_w);
GetNCDHW(transformed_output_grad_channel.dims(), DataLayout::kNCHW, &o_n,
&o_c, &o_d, &o_h, &o_w);
}
int group_offset_in = i_c / groups * i_h * i_w * i_d;
int group_offset_out = o_c / groups * o_h * o_w * o_d;
int group_offset_filter = transformed_filter_channel.numel() / groups;
// ------------------- cudnn backward algorithm ---------------------
#ifdef PADDLE_WITH_HIP
miopenConvBwdDataAlgorithm_t data_algo =
static_cast<miopenConvBwdDataAlgorithm_t>(0);
miopenConvBwdWeightsAlgorithm_t filter_algo =
static_cast<miopenConvBwdWeightsAlgorithm_t>(0);
#else
cudnnConvolutionBwdDataAlgo_t data_algo =
static_cast<cudnnConvolutionBwdDataAlgo_t>(0);
cudnnConvolutionBwdFilterAlgo_t filter_algo =
static_cast<cudnnConvolutionBwdFilterAlgo_t>(0);
#endif
size_t workspace_size = 0;
int iwo_groups = groups;
int c_groups = 1;
#if defined(PADDLE_WITH_HIP) || CUDNN_VERSION_MIN(7, 0, 1)
iwo_groups = 1;
c_groups = groups;
groups = 1;
#endif
if (input_grad) {
// ------------------- cudnn descriptors ---------------------
input_grad_data = input_grad->data<T>();
transformed_input_grad_data = transformed_input_grad.data<T>();
args1.handle = handle;
args1.idesc.set(transformed_input_grad, layout_tensor);
args1.wdesc.set(transformed_filter_channel, layout_tensor, iwo_groups);
args1.odesc.set(transformed_output_grad_channel, layout_tensor);
args1.cdesc.set(dtype, padding_common, strides, dilations,
platform::AllowTF32Cudnn(), c_groups);
#ifdef PADDLE_WITH_HIP
using search1 = SearchAlgorithm<miopenConvBwdDataAlgorithm_t>;
workspace_size =
std::max(workspace_size, search1::GetWorkspaceSize(args1));
data_algo = search1::Find<T>(args1, exhaustive_search, deterministic,
workspace_size, ctx);
#else
using search1 = SearchAlgorithm<cudnnConvolutionBwdDataAlgoPerf_t>;
data_algo =
search1::Find<T>(args1, exhaustive_search, deterministic, ctx);
workspace_size =
std::max(workspace_size, search1::GetWorkspaceSize(args1, data_algo));
#endif
}
if (filter_grad) {
// ------------------- cudnn descriptors ---------------------
filter_grad_data = transformed_filter_grad_channel.data<T>();
args2.handle = handle;
args2.idesc.set(transformed_input, layout_tensor);
args2.wdesc.set(transformed_filter_grad_channel, layout_tensor,
iwo_groups);
args2.odesc.set(transformed_output_grad_channel, layout_tensor);
args2.cdesc.set(dtype, padding_common, strides, dilations,
platform::AllowTF32Cudnn(), c_groups);
#ifdef PADDLE_WITH_HIP
using search2 = SearchAlgorithm<miopenConvBwdWeightsAlgorithm_t>;
workspace_size =
std::max(workspace_size, search2::GetWorkspaceSize(args2));
filter_algo = search2::Find<T>(args2, exhaustive_search, deterministic,
workspace_size, ctx);
#else
using search2 = SearchAlgorithm<cudnnConvolutionBwdFilterAlgoPerf_t>;
filter_algo =
search2::Find<T>(args2, exhaustive_search, deterministic, ctx);
workspace_size = std::max(workspace_size,
search2::GetWorkspaceSize(args2, filter_algo));
#endif
}
// ------------------- cudnn conv backward data ---------------------
ScalingParamType<T> alpha = 1.0f;
ScalingParamType<T> beta = ctx.Attr<bool>("use_addto") ? 1.0f : 0.0f;
VLOG(4) << "Conv_grad: use_addto = " << ctx.Attr<bool>("use_addto");
if (input_grad) {
// When beta is 0, it is unnecessary to reset input_grad.
// When beta is 1, the output cannot be reset since addt strategy used.
#ifdef PADDLE_WITH_HIP
workspace_handle.RunFunc(
[&](void* cudnn_workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::miopenConvolutionBackwardData(
handle, &alpha, args1.odesc.desc(), output_grad_data,
args1.wdesc.desc(), filter_data, args1.cdesc.desc(),
data_algo, &beta, args1.idesc.desc(),
transformed_input_grad_data, cudnn_workspace_ptr,
workspace_size));
},
workspace_size);
#else
for (int i = 0; i < groups; i++) {
workspace_handle.RunFunc(
[&](void* cudnn_workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionBackwardData(
handle, &alpha, args1.wdesc.desc(),
filter_data + i * group_offset_filter, args1.odesc.desc(),
output_grad_data + i * group_offset_out,
args1.cdesc.desc(), data_algo, cudnn_workspace_ptr,
workspace_size, &beta, args1.idesc.desc(),
transformed_input_grad_data + i * group_offset_in));
},
workspace_size);
}
#endif
if (!is_sys_pad) {
std::vector<int> starts(transformed_input_channel.dims().size(), 0);
std::vector<int> axes(transformed_input_channel.dims().size(), 0);
for (size_t i = 0; i < transformed_input_channel.dims().size(); ++i) {
starts[i] = input_pad[2 * i];
axes[i] = i;
}
transformed_input_grad_channel.mutable_data(ctx.GetPlace());
if (transformed_input_channel.dims().size() == 4) {
RemovePaddingSlice<paddle::platform::CUDADeviceContext, T, 4>(
ctx, &transformed_input_grad, &transformed_input_grad_channel,
starts, axes);
} else {
RemovePaddingSlice<paddle::platform::CUDADeviceContext, T, 5>(
ctx, &transformed_input_grad, &transformed_input_grad_channel,
starts, axes);
}
}
if (channel_last && compute_format == DataLayout::kNCHW) {
TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_input_grad_channel, input_grad);
}
}
// filter_grad do not use inplace addto.
ScalingParamType<T> beta_filter = 0.0f;
// ------------------- cudnn conv backward filter ---------------------
if (filter_grad) {
// Because beta is zero, it is unnecessary to reset filter_grad.
#ifdef PADDLE_WITH_HIP
workspace_handle.RunFunc(
[&](void* cudnn_workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::miopenConvolutionBackwardWeights(
handle, &alpha, args2.odesc.desc(), output_grad_data,
args2.idesc.desc(), input_data, args2.cdesc.desc(),
filter_algo, &beta, args2.wdesc.desc(), filter_grad_data,
cudnn_workspace_ptr, workspace_size));
},
workspace_size);
#else
for (int i = 0; i < groups; i++) {
workspace_handle.RunFunc(
[&](void* cudnn_workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionBackwardFilter(
handle, &alpha, args2.idesc.desc(),
input_data + i * group_offset_in, args2.odesc.desc(),
output_grad_data + i * group_offset_out,
args2.cdesc.desc(), filter_algo, cudnn_workspace_ptr,
workspace_size, &beta_filter, args2.wdesc.desc(),
filter_grad_data + i * group_offset_filter));
},
workspace_size);
}
#endif
if (compute_format == DataLayout::kNHWC) {
TransToChannelFirst<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_filter_grad_channel, filter_grad);
}
}
}
};
/*
* Inputs: I, W, dO, ddI, ddW
* Outputs: ddO, dW, dI
* ddo = conv(ddI, W) + conv(I, ddW)
* dW = conv_bp_filter(ddI, dO)
* dI = conv_bp_data(ddW, dO)
*/
template <typename T>
class CUDNNConvDoubleGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(ctx.GetPlace()), true,
paddle::platform::errors::PreconditionNotMet("It must use CUDAPlace."));
auto X = ctx.Input<Tensor>("Input");
auto W = ctx.Input<Tensor>("Filter");
auto dO = ctx.Input<Tensor>("DOutput");
auto ddX = ctx.Input<Tensor>("DDInput");
auto ddW = ctx.Input<Tensor>("DDFilter");
auto ddO = ctx.Output<Tensor>("DDOutput");
auto dW = ctx.Output<Tensor>("DFilter");
auto dX = ctx.Output<Tensor>("DInput");
if (ddO) {
ddO->mutable_data<T>(ctx.GetPlace());
math::SetConstant<platform::CUDADeviceContext, T> set_zero;
set_zero(dev_ctx, ddO, static_cast<T>(0));
}
if (dW) {
dW->mutable_data<T>(ctx.GetPlace());
}
if (dX) {
dX->mutable_data<T>(ctx.GetPlace());
}
// const T* x = X->data<T>();
const T* dy = dO->data<T>();
const T* w = W->data<T>();
const T* ddx = nullptr;
const T* ddw = nullptr;
T *dw, *dx, *ddy;
dw = dx = ddy = nullptr;
T* transformed_dx = nullptr;
const std::vector<int>& strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
int groups = ctx.Attr<int>("groups");
bool exhaustive_search =
FLAGS_cudnn_exhaustive_search || ctx.Attr<bool>("exhaustive_search");
bool deterministic = FLAGS_cudnn_deterministic;
auto exhaustive_deterministic = exhaustive_search && deterministic;
PADDLE_ENFORCE_EQ(exhaustive_deterministic, false,
platform::errors::InvalidArgument(
"Cann't set exhaustive_search True and "
"FLAGS_cudnn_deterministic True at same time."));
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm");
const std::string data_format = ctx.Attr<std::string>("data_format");
const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
// transform Tensors to channel first-----------
Tensor transformed_X_channel(X->type());
Tensor transformed_dO_channel(dO->type());
Tensor transformed_ddX_channel(X->type());
Tensor transformed_ddO_channel(dO->type());
Tensor transformed_dX_channel(X->type());
if (channel_last) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, X, &transformed_X_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, X, &transformed_X_channel);
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, dO, &transformed_dO_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, dO, &transformed_dO_channel);
if (ddX) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, ddX, &transformed_ddX_channel);
TransToChannelFirst<platform::CUDADeviceContext, T>(
ctx, ddX, &transformed_ddX_channel);
}
if (ddO) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, ddO, &transformed_ddO_channel);
}
if (dX) {
ResizeToChannelFirst<platform::CUDADeviceContext, T>(
ctx, dX, &transformed_dX_channel);
transformed_dX_channel.mutable_data<T>(ctx.GetPlace());
}
} else {
transformed_X_channel = *X;
transformed_dO_channel = *dO;
if (ddX) {
transformed_ddX_channel = *ddX;
}
if (ddO) {
transformed_ddO_channel.ShareDataWith(*ddO);
}
if (dX) {
transformed_dX_channel.ShareDataWith(*dX);
}
}
auto in_dims = transformed_X_channel.dims();
auto filter_dims = W->dims();
framework::DDim in_data_dims =
framework::slice_ddim(in_dims, 2, in_dims.size());
framework::DDim filter_data_dims =
framework::slice_ddim(filter_dims, 2, filter_dims.size());
std::vector<int> ksize = framework::vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
in_data_dims, strides, ksize);
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim);
Tensor transformed_X(X->type());
Tensor transformed_ddX(X->type());
Tensor transformed_dX(X->type());
std::vector<int> padding_common(data_dim, 0);
std::vector<int> input_pad(X->dims().size() * 2, 0);
if (!is_sys_pad) {
// get pad
std::vector<int> padding_diff(data_dim);
std::vector<int> new_input_shape_vec(data_dim + 2);
new_input_shape_vec[0] = transformed_X_channel.dims()[0];
new_input_shape_vec[1] = transformed_X_channel.dims()[1];
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = std::min(paddings[2 * i], paddings[2 * i + 1]);
new_input_shape_vec[i + 2] =
transformed_X_channel.dims()[i + 2] + padding_diff[i];
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
}
framework::DDim new_input_shape(
framework::make_ddim(new_input_shape_vec));
transformed_X.Resize(new_input_shape);
transformed_ddX.Resize(new_input_shape);
transformed_dX.Resize(new_input_shape);
transformed_X =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
if (ddX) {
transformed_ddX =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
}
if (dX) {
transformed_dX =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
}
// pad for input
const int rank = X->dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, transformed_X_channel, pad_value, &transformed_X);
if (ddX) {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, transformed_ddX_channel, pad_value,
&transformed_ddX);
}
} break;
case 5: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, transformed_X_channel, pad_value, &transformed_X);
if (ddX) {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, transformed_ddX_channel, pad_value,
&transformed_ddX);
}
} break;
default:
PADDLE_THROW(platform::errors::InvalidArgument(
"ConvOp only support tensors with 4 or 5 dimensions."));
}
} else {
transformed_X.ShareDataWith(transformed_X_channel);
if (ddX) {
transformed_ddX.ShareDataWith(transformed_ddX_channel);
}
if (dX) {
transformed_dX.ShareDataWith(transformed_dX_channel);
}
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
const T* x = transformed_X.data<T>();
int iwo_group = groups;
int c_group = 1;
#if defined(PADDLE_WITH_HIP) || CUDNN_VERSION_MIN(7, 0, 1)
iwo_group = 1;
c_group = groups;
groups = 1;
#endif
auto dtype = platform::CudnnDataType<T>::type;
auto handle = dev_ctx.cudnn_handle();
ConvArgs args1{&transformed_ddX,
W,
&transformed_ddO_channel,
strides,
padding_common,
dilations,
dtype};
ConvArgs args2{
&transformed_X, ddW, &transformed_ddO_channel, strides, padding_common,
dilations, dtype};
ConvArgs args3{&transformed_ddX,
dW,
&transformed_dO_channel,
strides,
padding_common,
dilations,
dtype};
ConvArgs args4{
&transformed_dX, ddW, &transformed_dO_channel, strides, padding_common,
dilations, dtype};
#ifdef PADDLE_WITH_HIP
miopenConvFwdAlgorithm_t fwd_algo1 =
static_cast<miopenConvFwdAlgorithm_t>(0);
miopenConvFwdAlgorithm_t fwd_algo2 =
static_cast<miopenConvFwdAlgorithm_t>(0);
miopenConvBwdDataAlgorithm_t data_algo =
static_cast<miopenConvBwdDataAlgorithm_t>(0);
miopenConvBwdWeightsAlgorithm_t filter_algo =
static_cast<miopenConvBwdWeightsAlgorithm_t>(0);
#else
cudnnConvolutionFwdAlgo_t fwd_algo1 =
static_cast<cudnnConvolutionFwdAlgo_t>(0);
cudnnConvolutionFwdAlgo_t fwd_algo2 =
static_cast<cudnnConvolutionFwdAlgo_t>(0);
cudnnConvolutionBwdDataAlgo_t data_algo =
static_cast<cudnnConvolutionBwdDataAlgo_t>(0);
cudnnConvolutionBwdFilterAlgo_t filter_algo =
static_cast<cudnnConvolutionBwdFilterAlgo_t>(0);
#endif
auto layout = GetCudnnTensorFormat(DataLayout::kNCHW);
// ddo = conv(ddI, W) + conv(I, ddW)
size_t workspace_size = 0;
T* transformed_ddy_channel = nullptr;
if (ddO) {
ddy = ddO->data<T>();
transformed_ddy_channel = transformed_ddO_channel.data<T>();
if (ddX) {
args1.handle = handle;
args1.idesc.set(transformed_ddX, iwo_group);
args1.wdesc.set(*W, layout, iwo_group);
args1.odesc.set(transformed_ddO_channel, iwo_group);
args1.cdesc.set(dtype, padding_common, strides, dilations,
platform::AllowTF32Cudnn(), c_group);
#ifdef PADDLE_WITH_HIP
using search1 = SearchAlgorithm<miopenConvFwdAlgorithm_t>;
workspace_size = search1::GetWorkspaceSize(args1);
fwd_algo1 = search1::Find<T>(args1, exhaustive_search, false,
workspace_size, ctx);
#else
using search1 = SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>;
fwd_algo1 = search1::Find<T>(args1, exhaustive_search, false, ctx);
workspace_size = search1::GetWorkspaceSize(args1, fwd_algo1);
#endif
}
if (ddW) {
ddw = ddW->data<T>();
args2.handle = handle;
args2.idesc.set(transformed_X, iwo_group);
args2.wdesc.set(*ddW, layout, iwo_group);
args2.odesc.set(transformed_ddO_channel, iwo_group);
args2.cdesc.set(dtype, padding_common, strides, dilations,
platform::AllowTF32Cudnn(), c_group);
#ifdef PADDLE_WITH_HIP
using search2 = SearchAlgorithm<miopenConvFwdAlgorithm_t>;
workspace_size =
std::max(workspace_size, search2::GetWorkspaceSize(args2));
fwd_algo2 = search2::Find<T>(args2, exhaustive_search, false,
workspace_size, ctx);
#else
using search2 = SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>;
fwd_algo2 = search2::Find<T>(args2, exhaustive_search, false, ctx);
workspace_size = std::max(workspace_size,
search2::GetWorkspaceSize(args2, fwd_algo2));
#endif
}
}
if (dW && ddX) {
dw = dW->data<T>();
args3.handle = handle;
args3.idesc.set(transformed_ddX, iwo_group);
args3.wdesc.set(*dW, layout, iwo_group);
args3.odesc.set(transformed_dO_channel, iwo_group);
args3.cdesc.set(dtype, padding_common, strides, dilations,
platform::AllowTF32Cudnn(), c_group);
#ifdef PADDLE_WITH_HIP
using search3 = SearchAlgorithm<miopenConvBwdWeightsAlgorithm_t>;
workspace_size =
std::max(workspace_size, search3::GetWorkspaceSize(args3));
filter_algo = search3::Find<T>(args3, exhaustive_search, deterministic,
workspace_size, ctx);
#else
using search3 = SearchAlgorithm<cudnnConvolutionBwdFilterAlgoPerf_t>;
filter_algo =
search3::Find<T>(args3, exhaustive_search, deterministic, ctx);
workspace_size = std::max(workspace_size,
search3::GetWorkspaceSize(args3, filter_algo));
#endif
}
if (ddW && dX) {
transformed_dx = transformed_dX.data<T>();
args4.handle = handle;
args4.idesc.set(transformed_dX, iwo_group);
args4.wdesc.set(*ddW, layout, iwo_group);
args4.odesc.set(transformed_dO_channel, iwo_group);
args4.cdesc.set(dtype, padding_common, strides, dilations,
platform::AllowTF32Cudnn(), c_group);
#ifdef PADDLE_WITH_HIP
using search4 = SearchAlgorithm<miopenConvBwdDataAlgorithm_t>;
workspace_size =
std::max(workspace_size, search4::GetWorkspaceSize(args4));
data_algo = search4::Find<T>(args4, exhaustive_search, deterministic,
workspace_size, ctx);
#else
using search4 = SearchAlgorithm<cudnnConvolutionBwdDataAlgoPerf_t>;
data_algo =
search4::Find<T>(args4, exhaustive_search, deterministic, ctx);
workspace_size =
std::max(workspace_size, search4::GetWorkspaceSize(args4, data_algo));
#endif
}
int i_n, i_c, i_d, i_h, i_w;
GetNCDHW(transformed_X.dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d, &i_h,
&i_w);
int o_n, o_c, o_d, o_h, o_w;
GetNCDHW(transformed_dO_channel.dims(), DataLayout::kNCHW, &o_n, &o_c, &o_d,
&o_h, &o_w);
int group_offset_in = i_c / groups * i_h * i_w * i_d;
int group_offset_out = o_c / groups * o_h * o_w * o_d;
int group_offset_filter = W->numel() / groups;
ScalingParamType<T> alpha = 1.0f;
ScalingParamType<T> beta = 0.0f;
// NOTE(zhiqiu): inplace addto is not supportted in double grad yet.
// ScalingParamType<T> beta = ctx.Attr<bool>("use_addto") ? 1.0f :
// 0.0f;
// VLOG(4) << "Conv_grad_grad: use_addto = " << ctx.Attr<bool>("use_addto");
auto wkspace_handle = dev_ctx.cudnn_workspace_handle();
if (ddO) {
if (ddX) {
ddx = transformed_ddX.data<T>();
#ifdef PADDLE_WITH_HIP
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::miopenConvolutionForward(
handle, &alpha, args1.idesc.desc(), ddx,
args1.wdesc.desc(), w, args1.cdesc.desc(), fwd_algo1,
&beta, args1.odesc.desc(), transformed_ddy_channel,
workspace_ptr, workspace_size));
},
workspace_size);
#else
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionForward(
handle, &alpha, args1.idesc.desc(),
ddx + i * group_offset_in, args1.wdesc.desc(),
w + i * group_offset_filter, args1.cdesc.desc(),
fwd_algo1, workspace_ptr, workspace_size, &beta,
args1.odesc.desc(),
transformed_ddy_channel + i * group_offset_out));
},
workspace_size);
}
#endif
}
if (ddW) {
#ifdef PADDLE_WITH_HIP
// MIOPEN ONLY support beta to be 0.0f
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::miopenConvolutionForward(
handle, &alpha, args2.idesc.desc(), x, args2.wdesc.desc(),
ddw, args2.cdesc.desc(), fwd_algo2, &beta,
args2.odesc.desc(), transformed_ddy_channel,
workspace_ptr, workspace_size));
},
workspace_size);
#else
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionForward(
handle, &alpha, args2.idesc.desc(),
x + i * group_offset_in, args2.wdesc.desc(),
ddw + i * group_offset_filter, args2.cdesc.desc(),
fwd_algo2, workspace_ptr, workspace_size, &alpha,
args2.odesc.desc(),
transformed_ddy_channel + i * group_offset_out));
},
workspace_size);
}
#endif
}
if (channel_last) {
TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_ddO_channel, ddO);
}
}
T* transformed_dy_channel = transformed_dO_channel.data<T>();
if (dW && ddX) {
ddx = transformed_ddX.data<T>();
#ifdef PADDLE_WITH_HIP
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::miopenConvolutionBackwardWeights(
handle, &alpha, args3.odesc.desc(), transformed_dy_channel,
args3.idesc.desc(), ddx, args3.cdesc.desc(), filter_algo,
&beta, args3.wdesc.desc(), dw, workspace_ptr,
workspace_size));
},
workspace_size);
#else
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionBackwardFilter(
handle, &alpha, args3.idesc.desc(),
ddx + i * group_offset_in, args3.odesc.desc(),
transformed_dy_channel + i * group_offset_out,
args3.cdesc.desc(), filter_algo, workspace_ptr,
workspace_size, &beta, args3.wdesc.desc(),
dw + i * group_offset_filter));
},
workspace_size);
}
#endif
}
if (dX && ddW) {
ddw = ddW->data<T>();
#ifdef PADDLE_WITH_HIP
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::miopenConvolutionBackwardData(
handle, &alpha, args4.odesc.desc(), transformed_dy_channel,
args4.wdesc.desc(), ddw, args4.cdesc.desc(), data_algo,
&beta, args4.idesc.desc(), transformed_dx, workspace_ptr,
workspace_size));
},
workspace_size);
#else
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionBackwardData(
handle, &alpha, args4.wdesc.desc(),
ddw + i * group_offset_filter, args4.odesc.desc(),
transformed_dy_channel + i * group_offset_out,
args4.cdesc.desc(), data_algo, workspace_ptr,
workspace_size, &beta, args4.idesc.desc(),
transformed_dx + i * group_offset_in));
},
workspace_size);
}
#endif
if (!is_sys_pad) {
// reverse padded input
std::vector<int> starts(X->dims().size(), 0);
std::vector<int> axes(X->dims().size(), 0);
for (size_t i = 0; i < X->dims().size(); ++i) {
starts[i] = input_pad[2 * i];
axes[i] = i;
}
if (X->dims().size() == 4) {
RemovePaddingSlice<paddle::platform::CUDADeviceContext, T, 4>(
ctx, &transformed_dX, &transformed_dX_channel, starts, axes);
} else {
RemovePaddingSlice<paddle::platform::CUDADeviceContext, T, 5>(
ctx, &transformed_dX, &transformed_dX_channel, starts, axes);
}
}
if (channel_last) {
TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_dX_channel, dX);
}
}
}
};
} // namespace operators
} // namespace paddle
namespace plat = paddle::platform;
#ifdef PADDLE_WITH_HIP
// MIOPEN do not support double
REGISTER_OP_KERNEL(conv2d, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvOpKernel<float>,
paddle::operators::CUDNNConvOpKernel<plat::float16>);
REGISTER_OP_KERNEL(conv2d_grad, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvGradOpKernel<float>,
paddle::operators::CUDNNConvGradOpKernel<plat::float16>);
REGISTER_OP_KERNEL(
conv2d_grad_grad, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvDoubleGradOpKernel<float>,
paddle::operators::CUDNNConvDoubleGradOpKernel<plat::float16>);
REGISTER_OP_CUDA_KERNEL(
depthwise_conv2d_grad_grad,
paddle::operators::CUDNNConvDoubleGradOpKernel<float>,
paddle::operators::CUDNNConvDoubleGradOpKernel<plat::float16>);
REGISTER_OP_KERNEL(conv3d, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvOpKernel<float>,
paddle::operators::CUDNNConvOpKernel<plat::float16>);
REGISTER_OP_KERNEL(conv3d_grad, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvGradOpKernel<float>);
REGISTER_OP_KERNEL(
conv3d_grad_grad, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvDoubleGradOpKernel<float>,
paddle::operators::CUDNNConvDoubleGradOpKernel<plat::float16>);
#else
REGISTER_OP_KERNEL(conv2d, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvOpKernel<float>,
paddle::operators::CUDNNConvOpKernel<double>,
paddle::operators::CUDNNConvOpKernel<plat::float16>);
REGISTER_OP_KERNEL(conv2d_grad, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvGradOpKernel<float>,
paddle::operators::CUDNNConvGradOpKernel<double>,
paddle::operators::CUDNNConvGradOpKernel<plat::float16>);
REGISTER_OP_KERNEL(
conv2d_grad_grad, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvDoubleGradOpKernel<float>,
paddle::operators::CUDNNConvDoubleGradOpKernel<double>,
paddle::operators::CUDNNConvDoubleGradOpKernel<plat::float16>);
REGISTER_OP_CUDA_KERNEL(
depthwise_conv2d_grad_grad,
paddle::operators::CUDNNConvDoubleGradOpKernel<float>,
paddle::operators::CUDNNConvDoubleGradOpKernel<double>,
paddle::operators::CUDNNConvDoubleGradOpKernel<plat::float16>);
REGISTER_OP_KERNEL(conv3d, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvOpKernel<float>,
paddle::operators::CUDNNConvOpKernel<double>,
paddle::operators::CUDNNConvOpKernel<plat::float16>);
REGISTER_OP_KERNEL(conv3d_grad, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvGradOpKernel<float>,
paddle::operators::CUDNNConvGradOpKernel<double>);
REGISTER_OP_KERNEL(
conv3d_grad_grad, CUDNN, plat::CUDAPlace,
paddle::operators::CUDNNConvDoubleGradOpKernel<float>,
paddle::operators::CUDNNConvDoubleGradOpKernel<double>,
paddle::operators::CUDNNConvDoubleGradOpKernel<plat::float16>);
#endif
|
681c2b1cf0b3ae4bcaed4c2b92eb6a95d43d4af0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <vector>
__global__ void vecadd( int * v0, int * v1, std::size_t size )
{
auto tid = threadIdx.x;
v0[ tid ] += v1[ tid ];
}
int main()
{
hipError_t err;
std::size_t const size = 100;
std::size_t const sizeb = size * sizeof( int );
std::vector< int > v0( size );
std::vector< int > v1( size );
/*
int * v0_h = nullptr;
int * v1_h = nullptr;
*/
for( std::size_t i = 0 ; i < size ; ++i )
{
v0[ i ] = v1[ i ] = i;
}
int * v0_d = nullptr;
int * v1_d = nullptr;
hipHostRegister( v0.data(), sizeb, hipHostRegisterDefault );
hipHostRegister( v1.data(), sizeb, hipHostRegisterDefault );
/*
err = hipHostMalloc( &v0_h, sizeb );
if( err != hipSuccess ) { std::cerr << "Error" << std::endl; }
err = hipHostMalloc( &v1_h, sizeb);
if( err != hipSuccess ) { std::cerr << "Error" << std::endl; }
*/
/*
for( std::size_t i = 0 ; i < size ; ++i )
{
v0_h[ i ] = 5;
v1_h[ i ] = 5;
}
*/
err = hipMalloc( &v0_d, sizeb );
if( err != hipSuccess ) { std::cerr << "Error" << std::endl; }
err = hipMalloc( &v1_d, sizeb );
if( err != hipSuccess ) { std::cerr << "Error" << std::endl; }
hipStream_t streams[ 2 ];
for( std::size_t i = 0 ; i < 2 ; ++i )
{
hipStreamCreate( &streams[ i ] );
}
for( std::size_t i = 0 ; i < 2 ; ++i )
{
err = hipMemcpyAsync( v0_d + i*size/2, v0.data() + i*size/2, sizeb/2, hipMemcpyHostToDevice, streams[ i ] );
if( err != hipSuccess ) { std::cerr << "Error 3" << std::endl; }
}
for( std::size_t i = 0 ; i < 2 ; ++i )
{
hipLaunchKernelGGL(( vecadd), dim3(1), dim3(50), 0, streams[ i ] , v0.data() + i*size/2, v1.data() + i*size/2, size/2 );
err = hipGetLastError();
if( err != hipSuccess ) { std::cerr << "Error 3.5" << std::endl; }
}
for( std::size_t i = 0 ; i < 2 ; ++i )
{
err = hipMemcpyAsync( v0.data() + i*size/2, v0_d + i*size/2, sizeb/2, hipMemcpyDeviceToHost, streams[ i ] );
if( err != hipSuccess ) { std::cerr << "Error 4" << std::endl; }
}
hipDeviceSynchronize( );
for( std::size_t i = 0 ; i < 2 ; ++i )
{
hipStreamDestroy( streams[ i ] );
}
for( auto x: v0 )
{
std::cout << x << std::endl;
}
return 0;
} | 681c2b1cf0b3ae4bcaed4c2b92eb6a95d43d4af0.cu | #include <iostream>
#include <vector>
__global__ void vecadd( int * v0, int * v1, std::size_t size )
{
auto tid = threadIdx.x;
v0[ tid ] += v1[ tid ];
}
int main()
{
cudaError_t err;
std::size_t const size = 100;
std::size_t const sizeb = size * sizeof( int );
std::vector< int > v0( size );
std::vector< int > v1( size );
/*
int * v0_h = nullptr;
int * v1_h = nullptr;
*/
for( std::size_t i = 0 ; i < size ; ++i )
{
v0[ i ] = v1[ i ] = i;
}
int * v0_d = nullptr;
int * v1_d = nullptr;
cudaHostRegister( v0.data(), sizeb, cudaHostRegisterDefault );
cudaHostRegister( v1.data(), sizeb, cudaHostRegisterDefault );
/*
err = cudaMallocHost( &v0_h, sizeb );
if( err != cudaSuccess ) { std::cerr << "Error" << std::endl; }
err = cudaMallocHost( &v1_h, sizeb);
if( err != cudaSuccess ) { std::cerr << "Error" << std::endl; }
*/
/*
for( std::size_t i = 0 ; i < size ; ++i )
{
v0_h[ i ] = 5;
v1_h[ i ] = 5;
}
*/
err = cudaMalloc( &v0_d, sizeb );
if( err != cudaSuccess ) { std::cerr << "Error" << std::endl; }
err = cudaMalloc( &v1_d, sizeb );
if( err != cudaSuccess ) { std::cerr << "Error" << std::endl; }
cudaStream_t streams[ 2 ];
for( std::size_t i = 0 ; i < 2 ; ++i )
{
cudaStreamCreate( &streams[ i ] );
}
for( std::size_t i = 0 ; i < 2 ; ++i )
{
err = cudaMemcpyAsync( v0_d + i*size/2, v0.data() + i*size/2, sizeb/2, cudaMemcpyHostToDevice, streams[ i ] );
if( err != cudaSuccess ) { std::cerr << "Error 3" << std::endl; }
}
for( std::size_t i = 0 ; i < 2 ; ++i )
{
vecadd<<< 1, 50, 0, streams[ i ] >>>( v0.data() + i*size/2, v1.data() + i*size/2, size/2 );
err = cudaGetLastError();
if( err != cudaSuccess ) { std::cerr << "Error 3.5" << std::endl; }
}
for( std::size_t i = 0 ; i < 2 ; ++i )
{
err = cudaMemcpyAsync( v0.data() + i*size/2, v0_d + i*size/2, sizeb/2, cudaMemcpyDeviceToHost, streams[ i ] );
if( err != cudaSuccess ) { std::cerr << "Error 4" << std::endl; }
}
cudaDeviceSynchronize( );
for( std::size_t i = 0 ; i < 2 ; ++i )
{
cudaStreamDestroy( streams[ i ] );
}
for( auto x: v0 )
{
std::cout << x << std::endl;
}
return 0;
} |
254272ea2d8a6e088e899e8488c09e460213c3da.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/amp/update_loss_scaling_op.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
template <typename T>
__global__ void GpuUpdateLossScaling(
const bool* found_inf_data, const T* pre_loss_scaling_data,
const int* good_in_data, const int* bad_in_data,
const int incr_every_n_steps, const int decr_every_n_nan_or_inf,
const float incr_ratio, const float decr_ratio,
T* updated_loss_scaling_data, int* good_out_data, int* bad_out_data) {
Update<T>(found_inf_data, pre_loss_scaling_data, good_in_data, bad_in_data,
incr_every_n_steps, decr_every_n_nan_or_inf, incr_ratio, decr_ratio,
updated_loss_scaling_data, good_out_data, bad_out_data);
}
template <typename T>
__global__ void FusedFillIf(T** outs, const size_t xs_size,
const int64_t* starts, const T value,
const bool* has_inf) {
if (!(*has_inf)) return;
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
// copy starts array from global memory to shared memory
extern __shared__ int64_t s_starts[];
for (int i = threadIdx.x; i <= xs_size; i += blockDim.x) {
s_starts[i] = starts[i];
}
__syncthreads();
const int64_t total_num = s_starts[xs_size];
int out_index = 0;
for (int64_t id = tid; id < total_num; id += blockDim.x * gridDim.x) {
// get the "out" index of "id"
// For example:
// id = 15, starts = [0, 10, 10, 20, 30]
// because 10 <= id < 20 ==>
// the id element locate in the 3rd tensor (notice the 2nd tensor size is 0)
int next_out_index = out_index;
while (id >= s_starts[next_out_index]) next_out_index++;
out_index = next_out_index - 1;
// get data pointer and index
T* out_data = outs[out_index];
int64_t idx = id - s_starts[out_index];
// set value
out_data[idx] = value;
}
}
template <typename T>
class UpdateLossScalingFunctor<platform::CUDADeviceContext, T> {
public:
void operator()(const platform::CUDADeviceContext& dev_ctx,
const bool* found_inf_data, const T* pre_loss_scaling_data,
const int* good_in_data, const int* bad_in_data,
const int incr_every_n_steps,
const int decr_every_n_nan_or_inf, const float incr_ratio,
const float decr_ratio, T* updated_loss_scaling_data,
int* good_out_data, int* bad_out_data) const {
hipLaunchKernelGGL(( GpuUpdateLossScaling<T>), dim3(1), dim3(1), 0, dev_ctx.stream(),
found_inf_data, pre_loss_scaling_data, good_in_data, bad_in_data,
incr_every_n_steps, decr_every_n_nan_or_inf, incr_ratio, decr_ratio,
updated_loss_scaling_data, good_out_data, bad_out_data);
}
};
template <typename T>
class LazyZeros<platform::CUDADeviceContext, T> {
public:
void operator()(const platform::CUDADeviceContext& dev_ctx,
const bool* found_inf_data,
const std::vector<const framework::Tensor*>& xs,
const std::vector<framework::Tensor*>& outs) const {
size_t xs_size = xs.size();
const auto& cpu_place = platform::CPUPlace();
// alloc each tensor's start index and copy to device
auto h_in_starts_mem =
memory::Alloc(cpu_place, (xs_size + 1) * sizeof(int64_t));
int64_t* h_starts = reinterpret_cast<int64_t*>(h_in_starts_mem->ptr());
auto d_in_starts_mem =
memory::Alloc(dev_ctx, (xs_size + 1) * sizeof(int64_t));
int64_t* d_starts = reinterpret_cast<int64_t*>(d_in_starts_mem->ptr());
// the start index value of each tensor is
// the sum of previous tensor's size. For example:
// outs = [10, 0, 10, 10] ==> starts = [0, 10, 10, 20, 30]
h_starts[0] = 0;
for (int i = 0; i < xs_size; i++) {
h_starts[i + 1] = h_starts[i] + outs[i]->numel();
}
memory::Copy(BOOST_GET_CONST(platform::CUDAPlace, dev_ctx.GetPlace()),
d_starts, cpu_place, h_starts, (xs_size + 1) * sizeof(int64_t),
dev_ctx.stream());
// copy each tensor of "outs" data address array to device
auto h_out_addrs_mem = memory::Alloc(cpu_place, xs_size * sizeof(T*));
T** h_out_addrs = reinterpret_cast<T**>(h_out_addrs_mem->ptr());
auto d_out_addrs_mem = memory::Alloc(dev_ctx, xs_size * sizeof(T*));
T** d_out_addrs = reinterpret_cast<T**>(d_out_addrs_mem->ptr());
for (size_t i = 0; i < xs_size; ++i) {
h_out_addrs[i] = outs[i]->mutable_data<T>(dev_ctx.GetPlace());
}
memory::Copy(BOOST_GET_CONST(platform::CUDAPlace, dev_ctx.GetPlace()),
d_out_addrs, cpu_place, h_out_addrs, xs_size * sizeof(T*),
dev_ctx.stream());
// launch cuda kernel
int64_t total_num = h_starts[xs_size];
int64_t threads_per_block = ::min(static_cast<int64_t>(1024), total_num);
int64_t elements_per_block =
threads_per_block * 50; // each thread deal with 50 data
int64_t blocks_per_grid =
(total_num + elements_per_block - 1) / elements_per_block;
hipLaunchKernelGGL(( FusedFillIf<T>), dim3(blocks_per_grid), dim3(threads_per_block),
(xs_size + 1) * sizeof(int64_t), dev_ctx.stream(),
d_out_addrs, xs_size, d_starts, static_cast<T>(0), found_inf_data);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
using GPU = paddle::platform::CUDADeviceContext;
REGISTER_OP_CUDA_KERNEL(update_loss_scaling,
ops::UpdateLossScalingKernel<GPU, float>,
ops::UpdateLossScalingKernel<GPU, double>,
ops::UpdateLossScalingKernel<GPU, plat::float16>);
| 254272ea2d8a6e088e899e8488c09e460213c3da.cu | /* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/amp/update_loss_scaling_op.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
template <typename T>
__global__ void GpuUpdateLossScaling(
const bool* found_inf_data, const T* pre_loss_scaling_data,
const int* good_in_data, const int* bad_in_data,
const int incr_every_n_steps, const int decr_every_n_nan_or_inf,
const float incr_ratio, const float decr_ratio,
T* updated_loss_scaling_data, int* good_out_data, int* bad_out_data) {
Update<T>(found_inf_data, pre_loss_scaling_data, good_in_data, bad_in_data,
incr_every_n_steps, decr_every_n_nan_or_inf, incr_ratio, decr_ratio,
updated_loss_scaling_data, good_out_data, bad_out_data);
}
template <typename T>
__global__ void FusedFillIf(T** outs, const size_t xs_size,
const int64_t* starts, const T value,
const bool* has_inf) {
if (!(*has_inf)) return;
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
// copy starts array from global memory to shared memory
extern __shared__ int64_t s_starts[];
for (int i = threadIdx.x; i <= xs_size; i += blockDim.x) {
s_starts[i] = starts[i];
}
__syncthreads();
const int64_t total_num = s_starts[xs_size];
int out_index = 0;
for (int64_t id = tid; id < total_num; id += blockDim.x * gridDim.x) {
// get the "out" index of "id"
// For example:
// id = 15, starts = [0, 10, 10, 20, 30]
// because 10 <= id < 20 ==>
// the id element locate in the 3rd tensor (notice the 2nd tensor size is 0)
int next_out_index = out_index;
while (id >= s_starts[next_out_index]) next_out_index++;
out_index = next_out_index - 1;
// get data pointer and index
T* out_data = outs[out_index];
int64_t idx = id - s_starts[out_index];
// set value
out_data[idx] = value;
}
}
template <typename T>
class UpdateLossScalingFunctor<platform::CUDADeviceContext, T> {
public:
void operator()(const platform::CUDADeviceContext& dev_ctx,
const bool* found_inf_data, const T* pre_loss_scaling_data,
const int* good_in_data, const int* bad_in_data,
const int incr_every_n_steps,
const int decr_every_n_nan_or_inf, const float incr_ratio,
const float decr_ratio, T* updated_loss_scaling_data,
int* good_out_data, int* bad_out_data) const {
GpuUpdateLossScaling<T><<<1, 1, 0, dev_ctx.stream()>>>(
found_inf_data, pre_loss_scaling_data, good_in_data, bad_in_data,
incr_every_n_steps, decr_every_n_nan_or_inf, incr_ratio, decr_ratio,
updated_loss_scaling_data, good_out_data, bad_out_data);
}
};
template <typename T>
class LazyZeros<platform::CUDADeviceContext, T> {
public:
void operator()(const platform::CUDADeviceContext& dev_ctx,
const bool* found_inf_data,
const std::vector<const framework::Tensor*>& xs,
const std::vector<framework::Tensor*>& outs) const {
size_t xs_size = xs.size();
const auto& cpu_place = platform::CPUPlace();
// alloc each tensor's start index and copy to device
auto h_in_starts_mem =
memory::Alloc(cpu_place, (xs_size + 1) * sizeof(int64_t));
int64_t* h_starts = reinterpret_cast<int64_t*>(h_in_starts_mem->ptr());
auto d_in_starts_mem =
memory::Alloc(dev_ctx, (xs_size + 1) * sizeof(int64_t));
int64_t* d_starts = reinterpret_cast<int64_t*>(d_in_starts_mem->ptr());
// the start index value of each tensor is
// the sum of previous tensor's size. For example:
// outs = [10, 0, 10, 10] ==> starts = [0, 10, 10, 20, 30]
h_starts[0] = 0;
for (int i = 0; i < xs_size; i++) {
h_starts[i + 1] = h_starts[i] + outs[i]->numel();
}
memory::Copy(BOOST_GET_CONST(platform::CUDAPlace, dev_ctx.GetPlace()),
d_starts, cpu_place, h_starts, (xs_size + 1) * sizeof(int64_t),
dev_ctx.stream());
// copy each tensor of "outs" data address array to device
auto h_out_addrs_mem = memory::Alloc(cpu_place, xs_size * sizeof(T*));
T** h_out_addrs = reinterpret_cast<T**>(h_out_addrs_mem->ptr());
auto d_out_addrs_mem = memory::Alloc(dev_ctx, xs_size * sizeof(T*));
T** d_out_addrs = reinterpret_cast<T**>(d_out_addrs_mem->ptr());
for (size_t i = 0; i < xs_size; ++i) {
h_out_addrs[i] = outs[i]->mutable_data<T>(dev_ctx.GetPlace());
}
memory::Copy(BOOST_GET_CONST(platform::CUDAPlace, dev_ctx.GetPlace()),
d_out_addrs, cpu_place, h_out_addrs, xs_size * sizeof(T*),
dev_ctx.stream());
// launch cuda kernel
int64_t total_num = h_starts[xs_size];
int64_t threads_per_block = std::min(static_cast<int64_t>(1024), total_num);
int64_t elements_per_block =
threads_per_block * 50; // each thread deal with 50 data
int64_t blocks_per_grid =
(total_num + elements_per_block - 1) / elements_per_block;
FusedFillIf<T><<<blocks_per_grid, threads_per_block,
(xs_size + 1) * sizeof(int64_t), dev_ctx.stream()>>>(
d_out_addrs, xs_size, d_starts, static_cast<T>(0), found_inf_data);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
using GPU = paddle::platform::CUDADeviceContext;
REGISTER_OP_CUDA_KERNEL(update_loss_scaling,
ops::UpdateLossScalingKernel<GPU, float>,
ops::UpdateLossScalingKernel<GPU, double>,
ops::UpdateLossScalingKernel<GPU, plat::float16>);
|
5f82aefa0a06b0e935db75ba7319de8a3481e489.hip | // !!! This is a file automatically generated by hipify!!!
#include "KinLimitBWPdf.hh"
EXEC_TARGET fptype getMomentum (fptype mass, fptype pimass, fptype d0mass) {
if (mass <= 0) return 0;
double lambda = mass*mass - pimass*pimass - d0mass*d0mass;
lambda *= lambda;
lambda -= 4*pimass*pimass*d0mass*d0mass;
if (lambda <= 0) return 0;
return SQRT(0.5*lambda/mass);
}
EXEC_TARGET fptype bwFactor (fptype momentum) {
// 2.56 = 1.6^2, comes from radius for spin-1 particle
return 1/SQRT(1.0 + 2.56 * momentum*momentum);
}
EXEC_TARGET fptype device_KinLimitBW (fptype* evt, fptype* p, unsigned int* indices) {
fptype x = evt[indices[2 + indices[0]]];
fptype mean = p[indices[1]];
fptype width = p[indices[2]];
fptype d0mass = functorConstants[indices[3]+0];
fptype pimass = functorConstants[indices[3]+1];
mean += d0mass;
x += d0mass;
fptype pUsingRealMass = getMomentum(mean, pimass, d0mass);
if (0 >= pUsingRealMass) return 0;
mean *= mean;
fptype pUsingX = getMomentum(x, pimass, d0mass);
fptype phspfactor = pow(pUsingX / pUsingRealMass, 3) * pow(bwFactor(pUsingX) / bwFactor(pUsingRealMass), 2);
fptype phspMassSq = pow(mean - x*x, 2);
fptype phspGammaSq = pow(width*phspfactor, 2);
fptype ret = (phspfactor * mean*width*width)/(phspMassSq + mean*phspGammaSq);
#ifdef CUDAPRINT
/*
if (((0 == THREADIDX) && (0 == BLOCKIDX) && (callnumber < 10)) || (isnan(ret)))
cuPrintf("KinLimitBW %f %f %f %f %f %f %f %f %f %f\n",
p[indices[1]],
width,
x - d0mass,
pUsingX,
pUsingRealMass,
bwFactor(pUsingRealMass),
phspfactor,
phspMassSq,
phspGammaSq,
ret);
*/
#endif
// if (gpuDebug & 1) printf("[%i, %i] KinLimitBW: %f %f %f %f %f\n", BLOCKIDX, THREADIDX, x, mean, width, d0mass, pimass, ret);
return ret;
}
MEM_DEVICE device_function_ptr ptr_to_KinLimitBW = device_KinLimitBW;
__host__ KinLimitBWPdf::KinLimitBWPdf (std::string n, Variable* _x, Variable* mean, Variable* width)
: GooPdf(_x, n)
{
registerParameter(mean);
registerParameter(width);
std::vector<unsigned int> pindices;
pindices.push_back(mean->getIndex());
pindices.push_back(width->getIndex());
pindices.push_back(registerConstants(2));
setMasses(1.8645, 0.13957);
GET_FUNCTION_ADDR(ptr_to_KinLimitBW);
initialise(pindices);
}
__host__ void KinLimitBWPdf::setMasses (fptype bigM, fptype smallM) {
fptype constants[2];
constants[0] = bigM;
constants[1] = smallM;
MEMCPY_TO_SYMBOL(functorConstants, constants, 2*sizeof(fptype), cIndex*sizeof(fptype), hipMemcpyHostToDevice);
}
| 5f82aefa0a06b0e935db75ba7319de8a3481e489.cu | #include "KinLimitBWPdf.hh"
EXEC_TARGET fptype getMomentum (fptype mass, fptype pimass, fptype d0mass) {
if (mass <= 0) return 0;
double lambda = mass*mass - pimass*pimass - d0mass*d0mass;
lambda *= lambda;
lambda -= 4*pimass*pimass*d0mass*d0mass;
if (lambda <= 0) return 0;
return SQRT(0.5*lambda/mass);
}
EXEC_TARGET fptype bwFactor (fptype momentum) {
// 2.56 = 1.6^2, comes from radius for spin-1 particle
return 1/SQRT(1.0 + 2.56 * momentum*momentum);
}
EXEC_TARGET fptype device_KinLimitBW (fptype* evt, fptype* p, unsigned int* indices) {
fptype x = evt[indices[2 + indices[0]]];
fptype mean = p[indices[1]];
fptype width = p[indices[2]];
fptype d0mass = functorConstants[indices[3]+0];
fptype pimass = functorConstants[indices[3]+1];
mean += d0mass;
x += d0mass;
fptype pUsingRealMass = getMomentum(mean, pimass, d0mass);
if (0 >= pUsingRealMass) return 0;
mean *= mean;
fptype pUsingX = getMomentum(x, pimass, d0mass);
fptype phspfactor = pow(pUsingX / pUsingRealMass, 3) * pow(bwFactor(pUsingX) / bwFactor(pUsingRealMass), 2);
fptype phspMassSq = pow(mean - x*x, 2);
fptype phspGammaSq = pow(width*phspfactor, 2);
fptype ret = (phspfactor * mean*width*width)/(phspMassSq + mean*phspGammaSq);
#ifdef CUDAPRINT
/*
if (((0 == THREADIDX) && (0 == BLOCKIDX) && (callnumber < 10)) || (isnan(ret)))
cuPrintf("KinLimitBW %f %f %f %f %f %f %f %f %f %f\n",
p[indices[1]],
width,
x - d0mass,
pUsingX,
pUsingRealMass,
bwFactor(pUsingRealMass),
phspfactor,
phspMassSq,
phspGammaSq,
ret);
*/
#endif
// if (gpuDebug & 1) printf("[%i, %i] KinLimitBW: %f %f %f %f %f\n", BLOCKIDX, THREADIDX, x, mean, width, d0mass, pimass, ret);
return ret;
}
MEM_DEVICE device_function_ptr ptr_to_KinLimitBW = device_KinLimitBW;
__host__ KinLimitBWPdf::KinLimitBWPdf (std::string n, Variable* _x, Variable* mean, Variable* width)
: GooPdf(_x, n)
{
registerParameter(mean);
registerParameter(width);
std::vector<unsigned int> pindices;
pindices.push_back(mean->getIndex());
pindices.push_back(width->getIndex());
pindices.push_back(registerConstants(2));
setMasses(1.8645, 0.13957);
GET_FUNCTION_ADDR(ptr_to_KinLimitBW);
initialise(pindices);
}
__host__ void KinLimitBWPdf::setMasses (fptype bigM, fptype smallM) {
fptype constants[2];
constants[0] = bigM;
constants[1] = smallM;
MEMCPY_TO_SYMBOL(functorConstants, constants, 2*sizeof(fptype), cIndex*sizeof(fptype), cudaMemcpyHostToDevice);
}
|
91be6a3f5748189ed851128ea8846ff75d4488cd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void cuda_hello(){
printf("Hello World from GPU!\n");
}
int main() {
hipLaunchKernelGGL(( cuda_hello), dim3(1),dim3(1), 0, 0, );
hipDeviceSynchronize();
return 0;
} | 91be6a3f5748189ed851128ea8846ff75d4488cd.cu | #include <stdio.h>
__global__ void cuda_hello(){
printf("Hello World from GPU!\n");
}
int main() {
cuda_hello<<<1,1>>>();
cudaDeviceSynchronize();
return 0;
} |
62431a779b21bec8d78e0896371abc5fabcc947e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
============================================================================
Name : main.cu
Author : imtsuki
Version : 0.1.0
Copyright : imtsuki <[email protected]>
Description : Flexible Job Shop Scheduling Problem
============================================================================
*/
#include <iostream>
#include <fstream>
#include <numeric>
#include <cstdlib>
#include <climits>
#include <stdexcept>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/tuple.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
static void CheckCudaErrorAux(const char *, unsigned, const char *,
hipError_t);
#define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value)
const int MAX_OPERATIONS_PER_STEP = 5;
const int MAX_STEPS_PER_JOB = 20;
const int MAX_JOBS = 20;
const int MAX_MACHINES = 20;
int POPULATION_SIZE = 2000;
int INDIVIDUAL_LEN = 20; // TODO
const int SIZE_PARENT_POOL = 7;
int TOTALTHREADS = 2048;
int BLOCKSIZE = 1024;
int total_jobs, total_machines, max_operations;
struct Operation {
int id_machine;
int processing_time;
};
struct Step {
int len;
Operation candidates[MAX_OPERATIONS_PER_STEP];
};
struct Job {
int len;
Step steps[MAX_STEPS_PER_JOB];
};
Job input_data[MAX_JOBS];
struct Gene {
int id_job;
int id_step;
// Make sure update them both.
int id_machine;
int id_operation;
};
std::ostream &operator<<(std::ostream &os, const Gene &gene) {
os << "[" << gene.id_job << ", " << gene.id_step << ", "
<< gene.id_operation << "]";
return os;
}
void parse_input(const char *path) {
auto input = std::ifstream();
input.exceptions(std::ifstream::failbit);
input.open(path);
input >> total_jobs >> total_machines >> max_operations;
if (total_jobs > MAX_JOBS) {
throw std::runtime_error("Too many jobs");
}
if (total_machines > MAX_MACHINES) {
throw std::runtime_error("Too many machines");
}
INDIVIDUAL_LEN = 0;
for (int id_job = 0; id_job < total_jobs; id_job++) {
int number_steps;
input >> number_steps;
if (number_steps > MAX_STEPS_PER_JOB) {
throw std::runtime_error("Too many steps");
}
input_data[id_job].len = number_steps;
for (int id_step = 0; id_step < number_steps; id_step++) {
int number_operations;
input >> number_operations;
if (number_operations > MAX_OPERATIONS_PER_STEP) {
throw std::runtime_error("Too many operations");
}
input_data[id_job].steps[id_step].len = number_operations;
for (int id_operation = 0; id_operation < number_operations;
id_operation++) {
int id_machine;
int processing_time;
input >> id_machine >> processing_time;
input_data[id_job].steps[id_step].candidates[id_operation].id_machine =
id_machine - 1;
input_data[id_job].steps[id_step].candidates[id_operation].processing_time =
processing_time;
}
INDIVIDUAL_LEN++;
}
}
}
__global__ void init_rand_kernel(hiprandState_t *states, int seed) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
hiprand_init(seed, idx, 0, &states[idx]);
}
__global__ void fill_rand_kernel(int *numbers, int len, int max_value,
hiprandState_t *states) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
numbers[idx] = hiprand(&states[idx]) % max_value;
}
__global__ void init_population_kernel(Gene *population, int population_size,
int individual_len, Job *jobs, int total_jobs,
hiprandState_t *rand_states) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int next_step[MAX_JOBS];
if (index < population_size) {
for (int i = index; i < population_size; i += stride) {
int cursor = 0;
Gene *me = population + i * individual_len;
memset(next_step, 0, sizeof(next_step));
while (cursor < individual_len) {
int id_job = hiprand(&rand_states[i]) % total_jobs;
if (next_step[id_job] < jobs[id_job].len) {
me[cursor].id_job = id_job;
me[cursor].id_step = next_step[id_job];
next_step[id_job]++;
cursor++;
}
}
}
}
}
__global__ void pick_parents_kernel(int *parents, int *parent_candidates,
int *scores, int population_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
if (index < population_size) {
for (int i = index; i < population_size; i += stride) {
int best_score = INT_MAX;
int best_index = -1;
for (int j = 0; j < SIZE_PARENT_POOL; j++) {
int k = parent_candidates[i * SIZE_PARENT_POOL + j];
if (scores[k] < best_score) {
best_score = scores[k];
best_index = k;
}
}
parents[i] = best_index;
}
}
}
__device__ void assignment_crossover(Gene *child, Gene *parent_a,
Gene *parent_b, int individual_len, Job *jobs) {
int reverse_index[MAX_JOBS][MAX_STEPS_PER_JOB];
for (int s = 0; s < individual_len; s++) {
int id_job = parent_b[s].id_job;
int id_step = parent_b[s].id_step;
reverse_index[id_job][id_step] = s;
}
for (int s = 0; s < individual_len; s++) {
int id_job = parent_a[s].id_job;
int id_step = parent_a[s].id_step;
int i = reverse_index[id_job][id_step];
child[s] = parent_a[s];
child[s].id_operation = parent_b[i].id_operation;
child[s].id_machine = parent_b[i].id_machine;
}
}
__device__ void sequencing_crossover(Gene *child, Gene *parent_a,
Gene *parent_b, int individual_len, Job *jobs,
hiprandState_t *rand_state) {
int crossover_point = hiprand(rand_state) % individual_len;
int last_step[MAX_JOBS];
for (int i = 0; i < MAX_JOBS; i++) {
last_step[i] = -1;
}
for (int s = 0; s < crossover_point; s++) {
int id_job = parent_b[s].id_job;
int id_step = parent_b[s].id_step;
child[s] = parent_b[s];
last_step[id_job] = id_step;
}
int cursor = crossover_point;
for (int s = 0; s < individual_len; s++) {
int id_job = parent_a[s].id_job;
if (last_step[id_job] < parent_a[s].id_step) {
child[cursor] = parent_a[s];
cursor++;
}
}
}
__device__ void assignment_mutation(Gene *individual, int individual_len,
Job *jobs, hiprandState_t *rand_state) {
int count = 5;
while (count--) {
int mutation_point = hiprand(rand_state) % individual_len;
int id_job = individual[mutation_point].id_job;
int id_step = individual[mutation_point].id_step;
int len = jobs[id_job].steps[id_step].len;
int id_operation = hiprand(rand_state) % len;
individual[mutation_point].id_operation = id_operation;
individual[mutation_point].id_machine =
jobs[id_job].steps[id_step].candidates[id_operation].id_machine;
}
}
__device__ void swapping_mutation(Gene *individual, int individual_len,
Job *jobs, hiprandState_t *rand_state) {
int count = 5;
while (count--) {
int mutation_point = hiprand(rand_state) % (individual_len - 1);
if (individual[mutation_point].id_job
!= individual[mutation_point + 1].id_job) {
thrust::swap(individual[mutation_point],
individual[mutation_point + 1]);
}
}
}
__global__ void stage_1_breed_kernel(int *parents, Gene *population,
Gene *new_population, int population_size, int individual_len,
Job *jobs, hiprandState_t *rand_states) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
if (index < population_size) {
for (int i = index; i < population_size; i += stride) {
if (i < population_size * 8 / 10) {
sequencing_crossover(&new_population[i * individual_len],
&population[parents[i] * individual_len],
&population[parents[i + 1] * individual_len],
individual_len, jobs, &rand_states[i]);
} else {
for (int s = 0; s < individual_len; s++) {
new_population[i * individual_len + s] =
population[parents[i] * individual_len + s];
}
swapping_mutation(&new_population[i * individual_len],
individual_len, jobs, &rand_states[i]);
}
}
}
}
__global__ void stage_2_breed_kernel(int *parents, Gene *population,
Gene *new_population, int population_size, int individual_len,
Job *jobs, hiprandState_t *rand_states) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
if (index < population_size) {
for (int i = index; i < population_size; i += stride) {
if (i < population_size * 4 / 10) {
assignment_crossover(&new_population[i * individual_len],
&population[parents[i] * individual_len],
&population[parents[i + 1] * individual_len],
individual_len, jobs);
} else if (i < population_size * 8 / 10) {
sequencing_crossover(&new_population[i * individual_len],
&population[parents[i] * individual_len],
&population[parents[i + 1] * individual_len],
individual_len, jobs, &rand_states[i]);
} else {
for (int s = 0; s < individual_len; s++) {
new_population[i * individual_len + s] =
population[parents[i] * individual_len + s];
}
if (i < population_size * 9 / 10) {
assignment_mutation(&new_population[i * individual_len],
individual_len, jobs, &rand_states[i]);
} else {
swapping_mutation(&new_population[i * individual_len],
individual_len, jobs, &rand_states[i]);
}
}
}
}
}
__global__ void stage_1_evaluate_kernel(int *scores, Gene *population,
int population_size, int individual_len, Job *jobs) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int value;
int machines[MAX_MACHINES];
int last_step_id_machine[MAX_JOBS];
if (index < population_size) {
for (int i = index; i < population_size; i += stride) {
value = 0;
memset(machines, 0, sizeof(machines));
Gene *me = population + i * individual_len;
for (int s = 0; s < individual_len; s++) {
int id_job = me[s].id_job;
int id_step = me[s].id_step;
int len = jobs[id_job].steps[id_step].len;
int best_end_time = INT_MAX;
int best_id_operation = -1;
int best_id_machine = -1;
// Greedy search to find best operation in this step
for (int id_operation = 0; id_operation < len; id_operation++) {
int processing_time =
jobs[id_job].steps[id_step].candidates[id_operation].processing_time;
int id_machine =
jobs[id_job].steps[id_step].candidates[id_operation].id_machine;
int machine_end_time = machines[id_machine];
if (id_step > 0) {
int previous_id_machine = last_step_id_machine[id_job];
if (machine_end_time < machines[previous_id_machine]) {
machine_end_time = machines[previous_id_machine];
}
}
machine_end_time += processing_time;
if (machine_end_time < best_end_time) {
best_end_time = machine_end_time;
best_id_operation = id_operation;
best_id_machine = id_machine;
}
}
me[s].id_operation = best_id_operation;
me[s].id_machine = best_id_machine;
machines[best_id_machine] = best_end_time;
last_step_id_machine[id_job] = best_id_machine;
if (best_end_time > value) {
value = best_end_time;
}
}
scores[i] = value;
}
}
}
__global__ void stage_2_evaluate_kernel(int *scores, Gene *population,
int population_size, int individual_len, Job *jobs) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int value;
int machines[MAX_MACHINES];
int last_step_id_machine[MAX_JOBS];
if (index < population_size) {
for (int i = index; i < population_size; i += stride) {
value = 0;
memset(machines, 0, sizeof(machines));
Gene *me = population + i * individual_len;
for (int s = 0; s < individual_len; s++) {
int id_job = me[s].id_job;
int id_step = me[s].id_step;
int id_machine = me[s].id_machine;
int id_operation = me[s].id_operation;
int processing_time =
jobs[id_job].steps[id_step].candidates[id_operation].processing_time;
int previous_id_machine = last_step_id_machine[id_job];
machines[id_machine] =
(id_step > 0
&& machines[id_machine]
< machines[previous_id_machine]) ?
machines[previous_id_machine] :
machines[id_machine];
machines[id_machine] += processing_time;
value = machines[id_machine] > value ?
machines[id_machine] : value;
last_step_id_machine[id_job] = id_machine;
}
scores[i] = value;
}
}
}
int main(int argc, const char *argv[]) {
hipDeviceProp_t prop;
CUDA_CHECK_RETURN(hipGetDeviceProperties(&prop, 0));
std::cout << "GPU device: " << prop.name << std::endl;
std::cout << "Number of SM: " << prop.multiProcessorCount << std::endl;
std::cout << "Shared memory per block: " << prop.sharedMemPerBlock / 1024.0
<< " KB" << std::endl;
std::cout << "Max Threads per block: " << prop.maxThreadsPerBlock
<< std::endl;
std::cout << "Max Threads per SM: " << prop.maxThreadsPerMultiProcessor
<< std::endl;
const char *path = "./data/mk01.fjs";
if (argc >= 2) {
path = argv[1];
}
parse_input(path);
std::cout << "total_jobs: " << total_jobs << "\n";
std::cout << "total_machines: " << total_machines << "\n";
std::cout << "INDIVIDUAL_LEN: " << INDIVIDUAL_LEN << "\n";
std::cout << "input data:\n";
for (int id_job = 0; id_job < total_jobs; id_job++) {
std::cout << "[Job " << id_job << "] ";
for (int id_step = 0; id_step < input_data[id_job].len; id_step++) {
std::cout << id_step << ": ";
for (int id_operation = 0;
id_operation < input_data[id_job].steps[id_step].len;
id_operation++) {
std::cout << "("
<< input_data[id_job].steps[id_step].candidates[id_operation].id_machine
<< ", "
<< input_data[id_job].steps[id_step].candidates[id_operation].processing_time
<< ") ";
}
}
std::cout << "\n";
}
Job *jobs;
CUDA_CHECK_RETURN(hipMalloc((void ** )&jobs, MAX_JOBS * sizeof(Job)));
CUDA_CHECK_RETURN(
hipMemcpy(jobs, input_data, MAX_JOBS * sizeof(Job),
hipMemcpyHostToDevice));
thrust::device_vector<Gene> population(POPULATION_SIZE * INDIVIDUAL_LEN);
thrust::device_vector<int> scores(POPULATION_SIZE);
thrust::device_vector<Gene> new_population(
POPULATION_SIZE * INDIVIDUAL_LEN);
Gene *pop_ptr = thrust::raw_pointer_cast(&population[0]);
Gene *new_pop_ptr = thrust::raw_pointer_cast(&new_population[0]);
int *scores_ptr = thrust::raw_pointer_cast(&scores[0]);
hiprandState_t *parent_candidates_states;
CUDA_CHECK_RETURN(
hipMalloc((void ** )&parent_candidates_states,
POPULATION_SIZE * SIZE_PARENT_POOL
* sizeof(hiprandState_t)));
hiprandState_t *population_states;
CUDA_CHECK_RETURN(
hipMalloc((void ** )&population_states,
POPULATION_SIZE * sizeof(hiprandState_t)));
// Parent candidate indexes
int *parent_candidates;
CUDA_CHECK_RETURN(
hipMalloc((void ** )&parent_candidates,
POPULATION_SIZE * SIZE_PARENT_POOL * sizeof(int)));
// Picked parent indexes
int *parents;
CUDA_CHECK_RETURN(
hipMalloc((void ** )&parents, POPULATION_SIZE * sizeof(int)));
hipLaunchKernelGGL(( init_rand_kernel), dim3(POPULATION_SIZE), dim3(1), 0, 0, population_states, time(0));
CUDA_CHECK_RETURN(hipPeekAtLastError());
hipLaunchKernelGGL(( init_rand_kernel), dim3(POPULATION_SIZE * SIZE_PARENT_POOL), dim3(1), 0, 0,
parent_candidates_states, time(0));
CUDA_CHECK_RETURN(hipPeekAtLastError());
hipLaunchKernelGGL(( init_population_kernel), dim3(TOTALTHREADS), dim3(BLOCKSIZE), 0, 0, pop_ptr,
POPULATION_SIZE, INDIVIDUAL_LEN, jobs, total_jobs,
population_states);
CUDA_CHECK_RETURN(hipPeekAtLastError());
hipLaunchKernelGGL(( stage_1_evaluate_kernel), dim3(TOTALTHREADS), dim3(BLOCKSIZE), 0, 0, scores_ptr, pop_ptr,
POPULATION_SIZE, INDIVIDUAL_LEN, jobs);
CUDA_CHECK_RETURN(hipPeekAtLastError());
int stage_1 = 3000;
while (stage_1--) {
hipLaunchKernelGGL(( fill_rand_kernel), dim3(POPULATION_SIZE * SIZE_PARENT_POOL), dim3(1), 0, 0,
parent_candidates, POPULATION_SIZE * SIZE_PARENT_POOL,
POPULATION_SIZE, parent_candidates_states);
CUDA_CHECK_RETURN(hipPeekAtLastError());
hipLaunchKernelGGL(( pick_parents_kernel), dim3(TOTALTHREADS), dim3(BLOCKSIZE), 0, 0, parents,
parent_candidates, scores_ptr, POPULATION_SIZE);
CUDA_CHECK_RETURN(hipPeekAtLastError());
hipLaunchKernelGGL(( stage_1_breed_kernel), dim3(TOTALTHREADS), dim3(BLOCKSIZE), 0, 0, parents, pop_ptr,
new_pop_ptr, POPULATION_SIZE, INDIVIDUAL_LEN, jobs,
population_states);
CUDA_CHECK_RETURN(hipPeekAtLastError());
thrust::copy(thrust::device, new_population.begin(),
new_population.end(), population.begin());
hipLaunchKernelGGL(( stage_1_evaluate_kernel), dim3(TOTALTHREADS), dim3(BLOCKSIZE), 0, 0, scores_ptr,
pop_ptr, POPULATION_SIZE, INDIVIDUAL_LEN, jobs);
CUDA_CHECK_RETURN(hipPeekAtLastError());
if (stage_1 % 100 == 0) {
int min_score = *thrust::min_element(scores.begin(), scores.end());
std::cout << "stage_1: " << stage_1 << " score: " << min_score
<< std::endl;
}
}
int stage_2 = 2000;
while (stage_2--) {
hipLaunchKernelGGL(( fill_rand_kernel), dim3(POPULATION_SIZE * SIZE_PARENT_POOL), dim3(1), 0, 0,
parent_candidates, POPULATION_SIZE * SIZE_PARENT_POOL,
POPULATION_SIZE, parent_candidates_states);
CUDA_CHECK_RETURN(hipPeekAtLastError());
hipLaunchKernelGGL(( pick_parents_kernel), dim3(TOTALTHREADS), dim3(BLOCKSIZE), 0, 0, parents,
parent_candidates, scores_ptr, POPULATION_SIZE);
CUDA_CHECK_RETURN(hipPeekAtLastError());
hipLaunchKernelGGL(( stage_2_breed_kernel), dim3(TOTALTHREADS), dim3(BLOCKSIZE), 0, 0, parents, pop_ptr,
new_pop_ptr, POPULATION_SIZE, INDIVIDUAL_LEN, jobs,
population_states);
CUDA_CHECK_RETURN(hipPeekAtLastError());
thrust::copy(thrust::device, new_population.begin(),
new_population.end(), population.begin());
hipLaunchKernelGGL(( stage_2_evaluate_kernel), dim3(TOTALTHREADS), dim3(BLOCKSIZE), 0, 0, scores_ptr,
pop_ptr, POPULATION_SIZE, INDIVIDUAL_LEN, jobs);
CUDA_CHECK_RETURN(hipPeekAtLastError());
if (stage_2 % 100 == 0) {
int min_score = *thrust::min_element(scores.begin(), scores.end());
std::cout << "stage_2: " << stage_2 << " score: " << min_score
<< std::endl;
}
}
auto min_iter = thrust::min_element(scores.begin(), scores.end());
int index = min_iter - scores.begin();
std::cout << "Done" << std::endl;
std::cout << "Best solution score: " << scores[index] << std::endl;
for (int i = 0; i < INDIVIDUAL_LEN; i++) {
std::cout << population[index * INDIVIDUAL_LEN + i] << " ";
}
std::cout << std::endl;
CUDA_CHECK_RETURN(hipFree(parent_candidates_states));
CUDA_CHECK_RETURN(hipFree(population_states));
CUDA_CHECK_RETURN(hipFree(parent_candidates));
CUDA_CHECK_RETURN(hipFree(parents));
CUDA_CHECK_RETURN(hipFree(jobs));
return 0;
}
/**
* Check the return value of the CUDA runtime API call and exit
* the application if the call has failed.
*/
static void CheckCudaErrorAux(const char *file, unsigned line,
const char *statement, hipError_t err) {
if (err == hipSuccess)
return;
std::cerr << statement << " returned " << hipGetErrorString(err) << "("
<< err << ") at " << file << ":" << line << std::endl;
exit(1);
}
| 62431a779b21bec8d78e0896371abc5fabcc947e.cu | /*
============================================================================
Name : main.cu
Author : imtsuki
Version : 0.1.0
Copyright : imtsuki <[email protected]>
Description : Flexible Job Shop Scheduling Problem
============================================================================
*/
#include <iostream>
#include <fstream>
#include <numeric>
#include <cstdlib>
#include <climits>
#include <stdexcept>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/tuple.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <curand.h>
#include <curand_kernel.h>
static void CheckCudaErrorAux(const char *, unsigned, const char *,
cudaError_t);
#define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value)
const int MAX_OPERATIONS_PER_STEP = 5;
const int MAX_STEPS_PER_JOB = 20;
const int MAX_JOBS = 20;
const int MAX_MACHINES = 20;
int POPULATION_SIZE = 2000;
int INDIVIDUAL_LEN = 20; // TODO
const int SIZE_PARENT_POOL = 7;
int TOTALTHREADS = 2048;
int BLOCKSIZE = 1024;
int total_jobs, total_machines, max_operations;
struct Operation {
int id_machine;
int processing_time;
};
struct Step {
int len;
Operation candidates[MAX_OPERATIONS_PER_STEP];
};
struct Job {
int len;
Step steps[MAX_STEPS_PER_JOB];
};
Job input_data[MAX_JOBS];
struct Gene {
int id_job;
int id_step;
// Make sure update them both.
int id_machine;
int id_operation;
};
std::ostream &operator<<(std::ostream &os, const Gene &gene) {
os << "[" << gene.id_job << ", " << gene.id_step << ", "
<< gene.id_operation << "]";
return os;
}
void parse_input(const char *path) {
auto input = std::ifstream();
input.exceptions(std::ifstream::failbit);
input.open(path);
input >> total_jobs >> total_machines >> max_operations;
if (total_jobs > MAX_JOBS) {
throw std::runtime_error("Too many jobs");
}
if (total_machines > MAX_MACHINES) {
throw std::runtime_error("Too many machines");
}
INDIVIDUAL_LEN = 0;
for (int id_job = 0; id_job < total_jobs; id_job++) {
int number_steps;
input >> number_steps;
if (number_steps > MAX_STEPS_PER_JOB) {
throw std::runtime_error("Too many steps");
}
input_data[id_job].len = number_steps;
for (int id_step = 0; id_step < number_steps; id_step++) {
int number_operations;
input >> number_operations;
if (number_operations > MAX_OPERATIONS_PER_STEP) {
throw std::runtime_error("Too many operations");
}
input_data[id_job].steps[id_step].len = number_operations;
for (int id_operation = 0; id_operation < number_operations;
id_operation++) {
int id_machine;
int processing_time;
input >> id_machine >> processing_time;
input_data[id_job].steps[id_step].candidates[id_operation].id_machine =
id_machine - 1;
input_data[id_job].steps[id_step].candidates[id_operation].processing_time =
processing_time;
}
INDIVIDUAL_LEN++;
}
}
}
__global__ void init_rand_kernel(curandState_t *states, int seed) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
curand_init(seed, idx, 0, &states[idx]);
}
__global__ void fill_rand_kernel(int *numbers, int len, int max_value,
curandState_t *states) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
numbers[idx] = curand(&states[idx]) % max_value;
}
__global__ void init_population_kernel(Gene *population, int population_size,
int individual_len, Job *jobs, int total_jobs,
curandState_t *rand_states) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int next_step[MAX_JOBS];
if (index < population_size) {
for (int i = index; i < population_size; i += stride) {
int cursor = 0;
Gene *me = population + i * individual_len;
memset(next_step, 0, sizeof(next_step));
while (cursor < individual_len) {
int id_job = curand(&rand_states[i]) % total_jobs;
if (next_step[id_job] < jobs[id_job].len) {
me[cursor].id_job = id_job;
me[cursor].id_step = next_step[id_job];
next_step[id_job]++;
cursor++;
}
}
}
}
}
__global__ void pick_parents_kernel(int *parents, int *parent_candidates,
int *scores, int population_size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
if (index < population_size) {
for (int i = index; i < population_size; i += stride) {
int best_score = INT_MAX;
int best_index = -1;
for (int j = 0; j < SIZE_PARENT_POOL; j++) {
int k = parent_candidates[i * SIZE_PARENT_POOL + j];
if (scores[k] < best_score) {
best_score = scores[k];
best_index = k;
}
}
parents[i] = best_index;
}
}
}
__device__ void assignment_crossover(Gene *child, Gene *parent_a,
Gene *parent_b, int individual_len, Job *jobs) {
int reverse_index[MAX_JOBS][MAX_STEPS_PER_JOB];
for (int s = 0; s < individual_len; s++) {
int id_job = parent_b[s].id_job;
int id_step = parent_b[s].id_step;
reverse_index[id_job][id_step] = s;
}
for (int s = 0; s < individual_len; s++) {
int id_job = parent_a[s].id_job;
int id_step = parent_a[s].id_step;
int i = reverse_index[id_job][id_step];
child[s] = parent_a[s];
child[s].id_operation = parent_b[i].id_operation;
child[s].id_machine = parent_b[i].id_machine;
}
}
__device__ void sequencing_crossover(Gene *child, Gene *parent_a,
Gene *parent_b, int individual_len, Job *jobs,
curandState_t *rand_state) {
int crossover_point = curand(rand_state) % individual_len;
int last_step[MAX_JOBS];
for (int i = 0; i < MAX_JOBS; i++) {
last_step[i] = -1;
}
for (int s = 0; s < crossover_point; s++) {
int id_job = parent_b[s].id_job;
int id_step = parent_b[s].id_step;
child[s] = parent_b[s];
last_step[id_job] = id_step;
}
int cursor = crossover_point;
for (int s = 0; s < individual_len; s++) {
int id_job = parent_a[s].id_job;
if (last_step[id_job] < parent_a[s].id_step) {
child[cursor] = parent_a[s];
cursor++;
}
}
}
__device__ void assignment_mutation(Gene *individual, int individual_len,
Job *jobs, curandState_t *rand_state) {
int count = 5;
while (count--) {
int mutation_point = curand(rand_state) % individual_len;
int id_job = individual[mutation_point].id_job;
int id_step = individual[mutation_point].id_step;
int len = jobs[id_job].steps[id_step].len;
int id_operation = curand(rand_state) % len;
individual[mutation_point].id_operation = id_operation;
individual[mutation_point].id_machine =
jobs[id_job].steps[id_step].candidates[id_operation].id_machine;
}
}
__device__ void swapping_mutation(Gene *individual, int individual_len,
Job *jobs, curandState_t *rand_state) {
int count = 5;
while (count--) {
int mutation_point = curand(rand_state) % (individual_len - 1);
if (individual[mutation_point].id_job
!= individual[mutation_point + 1].id_job) {
thrust::swap(individual[mutation_point],
individual[mutation_point + 1]);
}
}
}
__global__ void stage_1_breed_kernel(int *parents, Gene *population,
Gene *new_population, int population_size, int individual_len,
Job *jobs, curandState_t *rand_states) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
if (index < population_size) {
for (int i = index; i < population_size; i += stride) {
if (i < population_size * 8 / 10) {
sequencing_crossover(&new_population[i * individual_len],
&population[parents[i] * individual_len],
&population[parents[i + 1] * individual_len],
individual_len, jobs, &rand_states[i]);
} else {
for (int s = 0; s < individual_len; s++) {
new_population[i * individual_len + s] =
population[parents[i] * individual_len + s];
}
swapping_mutation(&new_population[i * individual_len],
individual_len, jobs, &rand_states[i]);
}
}
}
}
__global__ void stage_2_breed_kernel(int *parents, Gene *population,
Gene *new_population, int population_size, int individual_len,
Job *jobs, curandState_t *rand_states) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
if (index < population_size) {
for (int i = index; i < population_size; i += stride) {
if (i < population_size * 4 / 10) {
assignment_crossover(&new_population[i * individual_len],
&population[parents[i] * individual_len],
&population[parents[i + 1] * individual_len],
individual_len, jobs);
} else if (i < population_size * 8 / 10) {
sequencing_crossover(&new_population[i * individual_len],
&population[parents[i] * individual_len],
&population[parents[i + 1] * individual_len],
individual_len, jobs, &rand_states[i]);
} else {
for (int s = 0; s < individual_len; s++) {
new_population[i * individual_len + s] =
population[parents[i] * individual_len + s];
}
if (i < population_size * 9 / 10) {
assignment_mutation(&new_population[i * individual_len],
individual_len, jobs, &rand_states[i]);
} else {
swapping_mutation(&new_population[i * individual_len],
individual_len, jobs, &rand_states[i]);
}
}
}
}
}
__global__ void stage_1_evaluate_kernel(int *scores, Gene *population,
int population_size, int individual_len, Job *jobs) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int value;
int machines[MAX_MACHINES];
int last_step_id_machine[MAX_JOBS];
if (index < population_size) {
for (int i = index; i < population_size; i += stride) {
value = 0;
memset(machines, 0, sizeof(machines));
Gene *me = population + i * individual_len;
for (int s = 0; s < individual_len; s++) {
int id_job = me[s].id_job;
int id_step = me[s].id_step;
int len = jobs[id_job].steps[id_step].len;
int best_end_time = INT_MAX;
int best_id_operation = -1;
int best_id_machine = -1;
// Greedy search to find best operation in this step
for (int id_operation = 0; id_operation < len; id_operation++) {
int processing_time =
jobs[id_job].steps[id_step].candidates[id_operation].processing_time;
int id_machine =
jobs[id_job].steps[id_step].candidates[id_operation].id_machine;
int machine_end_time = machines[id_machine];
if (id_step > 0) {
int previous_id_machine = last_step_id_machine[id_job];
if (machine_end_time < machines[previous_id_machine]) {
machine_end_time = machines[previous_id_machine];
}
}
machine_end_time += processing_time;
if (machine_end_time < best_end_time) {
best_end_time = machine_end_time;
best_id_operation = id_operation;
best_id_machine = id_machine;
}
}
me[s].id_operation = best_id_operation;
me[s].id_machine = best_id_machine;
machines[best_id_machine] = best_end_time;
last_step_id_machine[id_job] = best_id_machine;
if (best_end_time > value) {
value = best_end_time;
}
}
scores[i] = value;
}
}
}
__global__ void stage_2_evaluate_kernel(int *scores, Gene *population,
int population_size, int individual_len, Job *jobs) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int value;
int machines[MAX_MACHINES];
int last_step_id_machine[MAX_JOBS];
if (index < population_size) {
for (int i = index; i < population_size; i += stride) {
value = 0;
memset(machines, 0, sizeof(machines));
Gene *me = population + i * individual_len;
for (int s = 0; s < individual_len; s++) {
int id_job = me[s].id_job;
int id_step = me[s].id_step;
int id_machine = me[s].id_machine;
int id_operation = me[s].id_operation;
int processing_time =
jobs[id_job].steps[id_step].candidates[id_operation].processing_time;
int previous_id_machine = last_step_id_machine[id_job];
machines[id_machine] =
(id_step > 0
&& machines[id_machine]
< machines[previous_id_machine]) ?
machines[previous_id_machine] :
machines[id_machine];
machines[id_machine] += processing_time;
value = machines[id_machine] > value ?
machines[id_machine] : value;
last_step_id_machine[id_job] = id_machine;
}
scores[i] = value;
}
}
}
int main(int argc, const char *argv[]) {
cudaDeviceProp prop;
CUDA_CHECK_RETURN(cudaGetDeviceProperties(&prop, 0));
std::cout << "GPU device: " << prop.name << std::endl;
std::cout << "Number of SM: " << prop.multiProcessorCount << std::endl;
std::cout << "Shared memory per block: " << prop.sharedMemPerBlock / 1024.0
<< " KB" << std::endl;
std::cout << "Max Threads per block: " << prop.maxThreadsPerBlock
<< std::endl;
std::cout << "Max Threads per SM: " << prop.maxThreadsPerMultiProcessor
<< std::endl;
const char *path = "./data/mk01.fjs";
if (argc >= 2) {
path = argv[1];
}
parse_input(path);
std::cout << "total_jobs: " << total_jobs << "\n";
std::cout << "total_machines: " << total_machines << "\n";
std::cout << "INDIVIDUAL_LEN: " << INDIVIDUAL_LEN << "\n";
std::cout << "input data:\n";
for (int id_job = 0; id_job < total_jobs; id_job++) {
std::cout << "[Job " << id_job << "] ";
for (int id_step = 0; id_step < input_data[id_job].len; id_step++) {
std::cout << id_step << ": ";
for (int id_operation = 0;
id_operation < input_data[id_job].steps[id_step].len;
id_operation++) {
std::cout << "("
<< input_data[id_job].steps[id_step].candidates[id_operation].id_machine
<< ", "
<< input_data[id_job].steps[id_step].candidates[id_operation].processing_time
<< ") ";
}
}
std::cout << "\n";
}
Job *jobs;
CUDA_CHECK_RETURN(cudaMalloc((void ** )&jobs, MAX_JOBS * sizeof(Job)));
CUDA_CHECK_RETURN(
cudaMemcpy(jobs, input_data, MAX_JOBS * sizeof(Job),
cudaMemcpyHostToDevice));
thrust::device_vector<Gene> population(POPULATION_SIZE * INDIVIDUAL_LEN);
thrust::device_vector<int> scores(POPULATION_SIZE);
thrust::device_vector<Gene> new_population(
POPULATION_SIZE * INDIVIDUAL_LEN);
Gene *pop_ptr = thrust::raw_pointer_cast(&population[0]);
Gene *new_pop_ptr = thrust::raw_pointer_cast(&new_population[0]);
int *scores_ptr = thrust::raw_pointer_cast(&scores[0]);
curandState_t *parent_candidates_states;
CUDA_CHECK_RETURN(
cudaMalloc((void ** )&parent_candidates_states,
POPULATION_SIZE * SIZE_PARENT_POOL
* sizeof(curandState_t)));
curandState_t *population_states;
CUDA_CHECK_RETURN(
cudaMalloc((void ** )&population_states,
POPULATION_SIZE * sizeof(curandState_t)));
// Parent candidate indexes
int *parent_candidates;
CUDA_CHECK_RETURN(
cudaMalloc((void ** )&parent_candidates,
POPULATION_SIZE * SIZE_PARENT_POOL * sizeof(int)));
// Picked parent indexes
int *parents;
CUDA_CHECK_RETURN(
cudaMalloc((void ** )&parents, POPULATION_SIZE * sizeof(int)));
init_rand_kernel<<<POPULATION_SIZE, 1>>>(population_states, time(0));
CUDA_CHECK_RETURN(cudaPeekAtLastError());
init_rand_kernel<<<POPULATION_SIZE * SIZE_PARENT_POOL, 1>>>(
parent_candidates_states, time(0));
CUDA_CHECK_RETURN(cudaPeekAtLastError());
init_population_kernel<<<TOTALTHREADS, BLOCKSIZE>>>(pop_ptr,
POPULATION_SIZE, INDIVIDUAL_LEN, jobs, total_jobs,
population_states);
CUDA_CHECK_RETURN(cudaPeekAtLastError());
stage_1_evaluate_kernel<<<TOTALTHREADS, BLOCKSIZE>>>(scores_ptr, pop_ptr,
POPULATION_SIZE, INDIVIDUAL_LEN, jobs);
CUDA_CHECK_RETURN(cudaPeekAtLastError());
int stage_1 = 3000;
while (stage_1--) {
fill_rand_kernel<<<POPULATION_SIZE * SIZE_PARENT_POOL, 1>>>(
parent_candidates, POPULATION_SIZE * SIZE_PARENT_POOL,
POPULATION_SIZE, parent_candidates_states);
CUDA_CHECK_RETURN(cudaPeekAtLastError());
pick_parents_kernel<<<TOTALTHREADS, BLOCKSIZE>>>(parents,
parent_candidates, scores_ptr, POPULATION_SIZE);
CUDA_CHECK_RETURN(cudaPeekAtLastError());
stage_1_breed_kernel<<<TOTALTHREADS, BLOCKSIZE>>>(parents, pop_ptr,
new_pop_ptr, POPULATION_SIZE, INDIVIDUAL_LEN, jobs,
population_states);
CUDA_CHECK_RETURN(cudaPeekAtLastError());
thrust::copy(thrust::device, new_population.begin(),
new_population.end(), population.begin());
stage_1_evaluate_kernel<<<TOTALTHREADS, BLOCKSIZE>>>(scores_ptr,
pop_ptr, POPULATION_SIZE, INDIVIDUAL_LEN, jobs);
CUDA_CHECK_RETURN(cudaPeekAtLastError());
if (stage_1 % 100 == 0) {
int min_score = *thrust::min_element(scores.begin(), scores.end());
std::cout << "stage_1: " << stage_1 << " score: " << min_score
<< std::endl;
}
}
int stage_2 = 2000;
while (stage_2--) {
fill_rand_kernel<<<POPULATION_SIZE * SIZE_PARENT_POOL, 1>>>(
parent_candidates, POPULATION_SIZE * SIZE_PARENT_POOL,
POPULATION_SIZE, parent_candidates_states);
CUDA_CHECK_RETURN(cudaPeekAtLastError());
pick_parents_kernel<<<TOTALTHREADS, BLOCKSIZE>>>(parents,
parent_candidates, scores_ptr, POPULATION_SIZE);
CUDA_CHECK_RETURN(cudaPeekAtLastError());
stage_2_breed_kernel<<<TOTALTHREADS, BLOCKSIZE>>>(parents, pop_ptr,
new_pop_ptr, POPULATION_SIZE, INDIVIDUAL_LEN, jobs,
population_states);
CUDA_CHECK_RETURN(cudaPeekAtLastError());
thrust::copy(thrust::device, new_population.begin(),
new_population.end(), population.begin());
stage_2_evaluate_kernel<<<TOTALTHREADS, BLOCKSIZE>>>(scores_ptr,
pop_ptr, POPULATION_SIZE, INDIVIDUAL_LEN, jobs);
CUDA_CHECK_RETURN(cudaPeekAtLastError());
if (stage_2 % 100 == 0) {
int min_score = *thrust::min_element(scores.begin(), scores.end());
std::cout << "stage_2: " << stage_2 << " score: " << min_score
<< std::endl;
}
}
auto min_iter = thrust::min_element(scores.begin(), scores.end());
int index = min_iter - scores.begin();
std::cout << "Done" << std::endl;
std::cout << "Best solution score: " << scores[index] << std::endl;
for (int i = 0; i < INDIVIDUAL_LEN; i++) {
std::cout << population[index * INDIVIDUAL_LEN + i] << " ";
}
std::cout << std::endl;
CUDA_CHECK_RETURN(cudaFree(parent_candidates_states));
CUDA_CHECK_RETURN(cudaFree(population_states));
CUDA_CHECK_RETURN(cudaFree(parent_candidates));
CUDA_CHECK_RETURN(cudaFree(parents));
CUDA_CHECK_RETURN(cudaFree(jobs));
return 0;
}
/**
* Check the return value of the CUDA runtime API call and exit
* the application if the call has failed.
*/
static void CheckCudaErrorAux(const char *file, unsigned line,
const char *statement, cudaError_t err) {
if (err == cudaSuccess)
return;
std::cerr << statement << " returned " << cudaGetErrorString(err) << "("
<< err << ") at " << file << ":" << line << std::endl;
exit(1);
}
|
443264531c9d2097b1ad7d8c26c9184bc50b2756.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <mpi.h>
#include <cstdio>
__global__ void GPU_Kernel() {
printf(" GPU block : %d / %d GPU thread : %d / %d\n",
blockIdx.x, gridDim.x, threadIdx.x, blockDim.x);
}
int main(int argc, char **argv) {
char hostname[256];
int mpisize, mpirank, gpusize, gpurank, len;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &mpisize);
MPI_Comm_rank(MPI_COMM_WORLD, &mpirank);
MPI_Get_processor_name(hostname, &len);
hipGetDeviceCount(&gpusize);
hipSetDevice(mpirank % gpusize);
hipGetDevice(&gpurank);
for (int irank=0; irank<mpisize; irank++) {
MPI_Barrier(MPI_COMM_WORLD);
if (mpirank == irank) {
printf("Hostname : %s\n", hostname);
printf("MPI rank : %d / %d GPU device : %d / %d\n",
mpirank, mpisize, gpurank, gpusize);
hipLaunchKernelGGL(( GPU_Kernel), dim3(4),dim3(4), 0, 0, );
hipDeviceSynchronize();
}
}
MPI_Finalize();
}
| 443264531c9d2097b1ad7d8c26c9184bc50b2756.cu | #include <mpi.h>
#include <cstdio>
__global__ void GPU_Kernel() {
printf(" GPU block : %d / %d GPU thread : %d / %d\n",
blockIdx.x, gridDim.x, threadIdx.x, blockDim.x);
}
int main(int argc, char **argv) {
char hostname[256];
int mpisize, mpirank, gpusize, gpurank, len;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &mpisize);
MPI_Comm_rank(MPI_COMM_WORLD, &mpirank);
MPI_Get_processor_name(hostname, &len);
cudaGetDeviceCount(&gpusize);
cudaSetDevice(mpirank % gpusize);
cudaGetDevice(&gpurank);
for (int irank=0; irank<mpisize; irank++) {
MPI_Barrier(MPI_COMM_WORLD);
if (mpirank == irank) {
printf("Hostname : %s\n", hostname);
printf("MPI rank : %d / %d GPU device : %d / %d\n",
mpirank, mpisize, gpurank, gpusize);
GPU_Kernel<<<4,4>>>();
cudaDeviceSynchronize();
}
}
MPI_Finalize();
}
|
c96bae11ebb662cf809272a84ce4ea33c145afee.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <stdio.h>
#include <math.h>
#define kx 3
#define ky 3
#define nx 14
#define ny 14
#define ni 512
#define nn 512
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
void random_ints(int* a, int N)
{
int i;
for (i = 0; i < N; i++)
{
a[i] = rand();
}
}
void zeros(int* a, int N)
{
int i;
for (i = 0; i < N; i++)
{
a[i] = 0;
}
}
// CURRENT MEMORY PERFORMANCE = 14.63 GB/s
// perform a single application (matrix-vector multiply) of 1 weights matrix to a full single input feature map
// this means that the batch size is 1(?)
// the dimensions of the weights matrix are (kx, ky)
// the dimensions of all input and output feature maps are (nx, ny)
// the number of input feature maps is ni
// the number of output feature maps is nn
// the input and output feature maps are thus represented as 3D arrays (logically)
// the corresponding weights matrices are thus represented as a 4D array (logically)
// this is what is done in a 3D convolution layer
// this method utilizes a scratchpad memory for better thread block performance
__global__
void matrix_vector_mult(int *inp, int *outp, int *kern)
{
// scratchpad memory used for shared variables
// NOTE: can now hold both entire feature map and entire weights matrix in shared memory
__shared__ int temp_inp[nx * ny]; // input matrix
__shared__ int temp_kern[kx * ky]; // kernel matrix
// only 1 thread in block needs to populate all shared variables but temp_ind
if (threadIdx.x == 0) {
int hold = kx * ky;
int k_start = blockIdx.x * kx * ky;
for (int j = 0; j < hold; j++) { // populate temp_kern
int t = k_start + j;
temp_kern[j] = kern[t];
}
}
int i_index = ((blockIdx.x / nn) * nx * ny) + threadIdx.x; // 1 input feature map per nn output feature maps
int n_index = ((blockIdx.x % nn) * nx * ny) + threadIdx.x; // rotate through output feature maps constantly
temp_inp[threadIdx.x] = inp[i_index]; // piecemeal load in the input feature map
__syncthreads(); // sync all threads to this point - input feature map loaded
int out = 0;
int l_start = threadIdx.x - ky/2 - (ny * (kx/2));
for (int i=0; i<kx; i++) {
for (int j=0; j<ky; j++) {
int curr = l_start + (ny*i) + j;
int k_index = (i*ky) + j;
if ((curr >= 0) && (curr <= (nx*ny-1))) { // check against barriers of input feature map
out += temp_inp[curr] * temp_kern[k_index];
}
}
}
outp[n_index] += out;
}
int main(void)
{
// declare host + device pointers
int *inp, *outp, *kern;
int *d_inp, *d_outp, *d_kern;
// compute array sizes
int i_size = ni*nx*ny;
int o_size = nn*nx*ny;
int k_size = nn*ni*kx*ky;
// allocate space for each array on the device
gpuErrchk( hipMalloc(&d_inp, i_size*sizeof(int)) );
gpuErrchk( hipMalloc(&d_outp, o_size*sizeof(int)) );
gpuErrchk( hipMalloc(&d_kern, k_size*sizeof(int)) );
// allocate space and populate each array on the host
inp = (int*)malloc(i_size*sizeof(int));
outp = (int*)malloc(o_size*sizeof(int));
kern = (int*)malloc(k_size*sizeof(int));
random_ints(inp, i_size);
zeros(outp, o_size);
random_ints(kern, k_size);
// copy populated host arrays to corresponding device arrays
gpuErrchk( hipMemcpy(d_inp, inp, i_size*sizeof(int), hipMemcpyHostToDevice) );
gpuErrchk( hipMemcpy(d_outp, outp, o_size*sizeof(int), hipMemcpyHostToDevice) );
gpuErrchk( hipMemcpy(d_kern, kern, k_size*sizeof(int), hipMemcpyHostToDevice) );
// launch all threads on device
// # blocks = # of distinct weights matrices
// # threads / block = # of elements in a single input/output feature map
hipLaunchKernelGGL(( matrix_vector_mult), dim3(ni*nn), dim3(nx*ny), 0, 0, d_inp, d_outp, d_kern);
// determine if run succeeded
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
// copy output array back to host
gpuErrchk( hipMemcpy(outp, d_outp, o_size, hipMemcpyDeviceToHost) );
// free all memory
free(inp); free(outp); free(kern);
gpuErrchk( hipFree(d_inp) ); gpuErrchk( hipFree(d_outp) ); gpuErrchk( hipFree(d_kern) );
return 0;
} | c96bae11ebb662cf809272a84ce4ea33c145afee.cu | #include <iostream>
#include <stdio.h>
#include <math.h>
#define kx 3
#define ky 3
#define nx 14
#define ny 14
#define ni 512
#define nn 512
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
void random_ints(int* a, int N)
{
int i;
for (i = 0; i < N; i++)
{
a[i] = rand();
}
}
void zeros(int* a, int N)
{
int i;
for (i = 0; i < N; i++)
{
a[i] = 0;
}
}
// CURRENT MEMORY PERFORMANCE = 14.63 GB/s
// perform a single application (matrix-vector multiply) of 1 weights matrix to a full single input feature map
// this means that the batch size is 1(?)
// the dimensions of the weights matrix are (kx, ky)
// the dimensions of all input and output feature maps are (nx, ny)
// the number of input feature maps is ni
// the number of output feature maps is nn
// the input and output feature maps are thus represented as 3D arrays (logically)
// the corresponding weights matrices are thus represented as a 4D array (logically)
// this is what is done in a 3D convolution layer
// this method utilizes a scratchpad memory for better thread block performance
__global__
void matrix_vector_mult(int *inp, int *outp, int *kern)
{
// scratchpad memory used for shared variables
// NOTE: can now hold both entire feature map and entire weights matrix in shared memory
__shared__ int temp_inp[nx * ny]; // input matrix
__shared__ int temp_kern[kx * ky]; // kernel matrix
// only 1 thread in block needs to populate all shared variables but temp_ind
if (threadIdx.x == 0) {
int hold = kx * ky;
int k_start = blockIdx.x * kx * ky;
for (int j = 0; j < hold; j++) { // populate temp_kern
int t = k_start + j;
temp_kern[j] = kern[t];
}
}
int i_index = ((blockIdx.x / nn) * nx * ny) + threadIdx.x; // 1 input feature map per nn output feature maps
int n_index = ((blockIdx.x % nn) * nx * ny) + threadIdx.x; // rotate through output feature maps constantly
temp_inp[threadIdx.x] = inp[i_index]; // piecemeal load in the input feature map
__syncthreads(); // sync all threads to this point - input feature map loaded
int out = 0;
int l_start = threadIdx.x - ky/2 - (ny * (kx/2));
for (int i=0; i<kx; i++) {
for (int j=0; j<ky; j++) {
int curr = l_start + (ny*i) + j;
int k_index = (i*ky) + j;
if ((curr >= 0) && (curr <= (nx*ny-1))) { // check against barriers of input feature map
out += temp_inp[curr] * temp_kern[k_index];
}
}
}
outp[n_index] += out;
}
int main(void)
{
// declare host + device pointers
int *inp, *outp, *kern;
int *d_inp, *d_outp, *d_kern;
// compute array sizes
int i_size = ni*nx*ny;
int o_size = nn*nx*ny;
int k_size = nn*ni*kx*ky;
// allocate space for each array on the device
gpuErrchk( cudaMalloc(&d_inp, i_size*sizeof(int)) );
gpuErrchk( cudaMalloc(&d_outp, o_size*sizeof(int)) );
gpuErrchk( cudaMalloc(&d_kern, k_size*sizeof(int)) );
// allocate space and populate each array on the host
inp = (int*)malloc(i_size*sizeof(int));
outp = (int*)malloc(o_size*sizeof(int));
kern = (int*)malloc(k_size*sizeof(int));
random_ints(inp, i_size);
zeros(outp, o_size);
random_ints(kern, k_size);
// copy populated host arrays to corresponding device arrays
gpuErrchk( cudaMemcpy(d_inp, inp, i_size*sizeof(int), cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(d_outp, outp, o_size*sizeof(int), cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(d_kern, kern, k_size*sizeof(int), cudaMemcpyHostToDevice) );
// launch all threads on device
// # blocks = # of distinct weights matrices
// # threads / block = # of elements in a single input/output feature map
matrix_vector_mult<<<ni*nn, nx*ny>>>(d_inp, d_outp, d_kern);
// determine if run succeeded
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
// copy output array back to host
gpuErrchk( cudaMemcpy(outp, d_outp, o_size, cudaMemcpyDeviceToHost) );
// free all memory
free(inp); free(outp); free(kern);
gpuErrchk( cudaFree(d_inp) ); gpuErrchk( cudaFree(d_outp) ); gpuErrchk( cudaFree(d_kern) );
return 0;
} |
b217b73c7ea5f15b0a69c60831fdec951e18fdeb.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Example of how to use the mxGPUArray API in a MEX file. This example shows
* how to write a MEX function that takes a gpuArray as input and returns a
* gpuArray output for 2D Radon solution, e.g. B=mexFunction(A).
*
* by Syed Alam Abbas, 5/25/2015
*/
#include <arrayfire.h>
#include <af/util.h>
#include "hip/hip_runtime.h"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include "math.h"
using namespace af;
//static const cdouble i_cdouble = { 0, 1 };
//static const array i = constant(i_cdouble, 1, 1, c64);/* imaginary unit */
static int isFirstRun_Uniform = 1; // Flag to check if this is a first run on function Compute2DColumnwise_FrFTUniform
static int isFirstRun_Variable = 1; // Flag to check if this is a first run on function Compute2DComplementaryLines_FrFTVariableScales
static array PreMultiplicationFactor, PostMultiplicationFactor, Seq_En, BetaFactor;
enum SUPPORTED_PLATFORMS
{
CUDA, OPENCL, CPU
};
/* Taking exp of complex numbers*/
array cexp(const array &in)
{
if (!in.iscomplex()) return exp(in);
return exp(real(in))*complex(cos(imag(in)), sin(imag(in)));
}
/* Multiplication of 2 complex numbers require 4 real multiplications*/
void SplitMultiplyComplex(array& A_Complex, array& B_Complex, array& realRealPart, array& realImagPart, array& imagRealPart, array& imagImagPart)
{
// Consider multiplication of complex numbers A_Complex = (a+ib); B_Complex = (c+id)
//array A_Complex, B_Complex;
//array realRealPart; // ac
//array realImagPart; // ad
//array imagRealPart; // bc
//array imagImagPart; // bd
realRealPart = real(A_Complex)*real(B_Complex);
realImagPart = real(A_Complex)*imag(B_Complex);
imagRealPart = imag(A_Complex)*real(B_Complex);
imagImagPart = imag(A_Complex)*imag(B_Complex);
}
/* Uniform FrFT for each column in Image*/
array Compute2DColumnwise_FrFTUniform(array & Image2D, array& ColumnScales_1D, int& d_NoOfElements_, int& d_NoOfScales)
{
/*-----------------------------------Preparing Padded & Tiled Imag2D --------------------------------------------*/
array Zeros = constant(0, d_NoOfElements_, d_NoOfElements_, f64); // Generates on the device
array Zero_Padded_Image2D = join(0, Image2D, Zeros);
array Image2D_Tiled = tile(Zero_Padded_Image2D, 1, 1, d_NoOfScales);
int N = d_NoOfElements_ - 1;
if (isFirstRun_Uniform == 1)
{
/*-------------------------------------------Creating Index Cubes and Sequences----------------------------------------------------*/
array leftSideIndexes = array(seq(0, N, 1)).as(f64);
array rightSideIndexesOnes = -1 * array(seq(1, d_NoOfElements_, 1)).as(f64);
array rightSideIndexesZeros = constant(0, d_NoOfElements_, 1, f64);
array rightSideIndexesN_2 = constant(N / 2, d_NoOfElements_, 1, f64);
array indexedElementsEn = join(0, leftSideIndexes, flip(rightSideIndexesOnes, 0));
array indexedElementsPre = join(0, leftSideIndexes, rightSideIndexesN_2); /* This is for Keeping pre and post multiplication factor upper half only*/
array indexedElementsPost = join(0, leftSideIndexes, rightSideIndexesZeros);
array indexedElements_Tiled_En = tile(pow(indexedElementsEn, 2), 1, d_NoOfElements_, d_NoOfScales);
array indexedElements_Tiled_PreMulti = tile(indexedElementsPre - N / 2, 1, d_NoOfElements_, d_NoOfScales);
array indexedElements_Tiled_PostMulti = tile(indexedElementsPost, 1, d_NoOfElements_, d_NoOfScales);
/*--------------------------Creating FrFT scale cubes------------------------------------*/
array ColumnScales_1D_Mods = moddims(ColumnScales_1D, 1, 1, d_NoOfScales);
array ColumnScales_1D_Tiled_depth = tile(ColumnScales_1D_Mods, 2 * d_NoOfElements_, d_NoOfElements_, 1);
array ColumnScales_1D_FullTiled = moddims(ColumnScales_1D_Tiled_depth, 2 * d_NoOfElements_, d_NoOfElements_, d_NoOfScales);
/*-------------------Precomputing the Essential Sequence Cubes :: All complex values here --------------------*/
//array imaginaryUnit_Tiled = tile(i, 2 * d_NoOfElements_, d_NoOfElements_, d_NoOfScales);
Seq_En = cexp( complex(0, - af::Pi * indexedElements_Tiled_En * ColumnScales_1D_FullTiled / d_NoOfElements_)); /* E(n) as defined in the paper*/
array Ones = constant(1, d_NoOfElements_, d_NoOfElements_, f64);
array subtractValues = tile(join(0, Zeros, Ones), 1, 1, d_NoOfScales); /* This is for Keeping pre and post multiplication factor upper half only*/
PreMultiplicationFactor = cexp( complex (0, af::Pi * indexedElements_Tiled_PreMulti * ColumnScales_1D_FullTiled * N / d_NoOfElements_)) - subtractValues;
PostMultiplicationFactor = cexp( complex ( 0, af::Pi * indexedElements_Tiled_PostMulti * ColumnScales_1D_FullTiled * N / d_NoOfElements_)) - subtractValues;
isFirstRun_Uniform = 0;
//af::deviceGC();
}
/*--------------------Preprocessing Cubes-----------------------*/
array Image2D_Tiled_PreMulti = Image2D_Tiled * PreMultiplicationFactor;
array Image2D_Tiled_PreMulti_SeqEn = Image2D_Tiled_PreMulti * Seq_En;
/*-------------------Computing Convolution--------------------*/
array firstFFT_X = fft(Image2D_Tiled_PreMulti_SeqEn);
array secondFFT_X = fft(conjg(Seq_En));
array interim_FrFT_X = ifft(firstFFT_X * secondFFT_X);
/*-------------------Postprocessing-----------------------------*/
array FrFT_Image_X = interim_FrFT_X * Seq_En * PostMultiplicationFactor;
/*--------------------Grab only the top half elements drop overlapping------------------*/
array FrFT_Image_X_Cube = FrFT_Image_X.rows(0, N);
return FrFT_Image_X_Cube;
}
/* Variable FrFT for each column in Image*/
void Compute2DComplementaryLines_FrFTVariableScales(array & OneD_FrFT, array& ColumnScales_1D, array& final2DFrFTImage, array& final2DFrFTConjImage, int d_NoOfElements, int d_NoOfScales)
{
int N = d_NoOfElements - 1;
if (isFirstRun_Variable == 1)
{
array lineSpacing = array(seq(-N / 2, N / 2)).as(f64);
array lineSpacing_tiled_Y = tile(lineSpacing, 1, d_NoOfElements);
array lineSpacing_Square = lineSpacing_tiled_Y * lineSpacing_tiled_Y.T();
array lineSpacing_Square_TiledLevel = tile(lineSpacing_Square, 1, 1, d_NoOfScales);
//af_print(beta_Levels);
array beta_Mods = moddims(ColumnScales_1D, 1, 1, d_NoOfScales);
array beta_Tiled_depth = tile(beta_Mods, d_NoOfElements, d_NoOfElements, 1);
//af_print(beta_Tiled_depth);
array beta_Tiled = moddims(beta_Tiled_depth, d_NoOfElements, d_NoOfElements, d_NoOfScales);
/*-------------------Precomputing the Essential Sequence Cubes :: All complex values here --------------------*/
BetaFactor = cexp( complex(0, -2 * af::Pi * lineSpacing_Square_TiledLevel * beta_Tiled / d_NoOfElements));
isFirstRun_Variable = 0;
af::deviceGC();
}
// Consider multiplication of complex numbers A = (a+ib); B = (c+id)
array realRealPart; // ac
array realImagPart; // ad
array imagRealPart; // bc
array imagImagPart; // bd
SplitMultiplyComplex(OneD_FrFT, BetaFactor, realRealPart, realImagPart, imagRealPart, imagImagPart);
array tempSeq_X = sum(complex(realRealPart - imagImagPart, realImagPart + imagRealPart)); // sum(flip(FrFT_Image_X_Cube, 0) *BetaFactor);
array tempSeqConj_X = sum(complex(realRealPart + imagImagPart, imagRealPart - realImagPart));// sum(flip(FrFT_Image_X_Cube, 0) * conjg(BetaFactor));
final2DFrFTImage = moddims(tempSeq_X, d_NoOfElements, d_NoOfScales).T();
final2DFrFTConjImage = moddims(tempSeqConj_X, d_NoOfElements, d_NoOfScales).T();
}
/*
* High level Host code
* Computes the FrFT centered using the definition given in the paper,
"An exact and fast computation of Discrete Fourier Transform for polar grid and spherical grid"
by Syed Alam Abbas, 5/25/2015
*/
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[])
{
try {
/* Initialize the MathWorks GPU API. */
mxInitGPU();
mexPrintf("Executing custom mex for computing 2D DFT on a Polar Grid using ArrayFire GPU accelerated library latest!");
// Validate the input
if (nrhs < 5 || nlhs < 2) {
mexErrMsgTxt("Expected 5 inputs and 2 output.");
}
/*Input Variables*/
const double* d_Image;
int d_NoOfAngles;
int d_NoOfLevels;
int d_NoOfElements;
mxGPUArray const * MxInputImage;
mxArray* MxInputImageCPU;
int PLATFORM = (size_t)mxGetScalar(prhs[4]); // Given as an input PLATFORM
switch (PLATFORM) // The settings change for input
{
case CUDA:
case OPENCL:
MxInputImage = mxGPUCreateFromMxArray(prhs[0]); // GPU
/* extract a pointer to the input data which is a real image on the device.*/
d_Image = (double const *)(mxGPUGetDataReadOnly(MxInputImage)); // GPU
break;
case CPU:
MxInputImageCPU = mxDuplicateArray(prhs[0]);
d_Image = mxGetPr(MxInputImageCPU);
break;
default:
break;
}
/* Collect the input data from MATLAB MexArray RHS */
d_NoOfAngles = (size_t)mxGetScalar(prhs[1]); /*Check it, this should always be even*/
d_NoOfLevels = (size_t)mxGetScalar(prhs[2]);
d_NoOfElements = (size_t)mxGetScalar(prhs[3]); /*Check it, this should always be odd*/
int N = d_NoOfElements - 1; /* it is always even as described in the paper*/
/*********************Creating Array Fire object************************************/
array Image(d_NoOfElements, d_NoOfElements, d_Image);
/*--------------------------Creating Alpha levels------------------------------------*/
array alpha_Levels = cos(af::Pi / ((double) d_NoOfAngles) * array(seq(1, d_NoOfLevels)).as(f64));
/*--------------------------Creating Beta levels---------------------------------------*/
array beta_Levels = sin(Pi / ((double)d_NoOfAngles) * array(seq(1, d_NoOfLevels)).as(f64));
/*-------------------Precomputing the Essential Sequence Cubes :: All complex values here --------------------*/
array lineSpacing = array(seq(-N / 2, N / 2)).as(f64);
array lineSpacing_tiled_Y = tile(lineSpacing, 1, d_NoOfElements);
array lineSpacing_Square = lineSpacing_tiled_Y * lineSpacing_tiled_Y.T();
array ZeroNinty_Factor = cexp( complex( 0, -2 * af::Pi * lineSpacing_Square * 1 / d_NoOfElements));
/*-------------------- First dimension uniform FrFT for each Image per level-----------------------*/
array FrFT_Image_X_Cube = Compute2DColumnwise_FrFTUniform(Image.T(), alpha_Levels, d_NoOfElements, d_NoOfLevels);
switch (PLATFORM)
{
case CUDA:
af::deviceGC();
default:
break;
}
array FrFT_Image_Y_Cube = Compute2DColumnwise_FrFTUniform((Image), alpha_Levels, d_NoOfElements, d_NoOfLevels);
FrFT_Image_X_Cube = FrFT_Image_X_Cube.T(); // Now it needs operation to the other dimension
FrFT_Image_Y_Cube = FrFT_Image_Y_Cube.T();
/*--------------------Finally all computations for the Polar Grid-----------*/
// Computing for all the grid expect two special indexes
array levelSeq = array(seq(0, d_NoOfLevels - 1)).as(f64);
array finalIndexSeq1_X = 1 + levelSeq;
array finalIndexSeq2_X = d_NoOfAngles - finalIndexSeq1_X;
array finalIndexSeq3_Y = d_NoOfAngles / 2 - finalIndexSeq1_X;
array finalIndexSeq4_Y = d_NoOfAngles / 2 + finalIndexSeq1_X;
array finalSeq_X, finalSeqConj_X;
Compute2DComplementaryLines_FrFTVariableScales((FrFT_Image_X_Cube), beta_Levels, finalSeq_X, finalSeqConj_X, d_NoOfElements, d_NoOfLevels);
finalSeqConj_X = flip(finalSeqConj_X, 1); // Special operation
array finalSeq_Y, finalSeqConj_Y;
Compute2DComplementaryLines_FrFTVariableScales(FrFT_Image_Y_Cube, beta_Levels, finalSeq_Y, finalSeqConj_Y, d_NoOfElements, d_NoOfLevels);
// Removing just 2 redundant computations for 45 degree case
if (0 == remainder(d_NoOfAngles, 4))
{
finalIndexSeq3_Y = finalIndexSeq3_Y.rows(0, d_NoOfLevels - 2); // Removing just the last rows from 4 structures
finalSeq_Y = finalSeq_Y.rows(0, d_NoOfLevels - 2);
finalIndexSeq4_Y = finalIndexSeq4_Y.rows(0, d_NoOfLevels - 2);
finalSeqConj_Y = finalSeqConj_Y.rows(0, d_NoOfLevels - 2);
}
// Computing seperately for two special indexes
double zeroIndex = 0;
double nintyIndex = d_NoOfAngles / 2;
double values[] = { zeroIndex, nintyIndex };
array SpecialTwoIndexes(2, 1, values);
array ZeroLineFrFT_Image_X_Cube = FrFT_Image_Y_Cube.slice(zeroIndex).col(N / 2);
array NintyLineFrFT_Image_Y_Cube = FrFT_Image_X_Cube.slice(zeroIndex).col(N / 2);
array DFTZeroLine = sum(tile(ZeroLineFrFT_Image_X_Cube, 1, d_NoOfElements) *ZeroNinty_Factor);
array DFTNinetyLine = sum(tile((NintyLineFrFT_Image_Y_Cube), 1, d_NoOfElements) *ZeroNinty_Factor);
array SpecialTwoLines = join(0, DFTZeroLine, DFTNinetyLine);
array UnsortedIndexes = join(0, join(0, join(0, join(0, finalIndexSeq1_X, finalIndexSeq2_X), finalIndexSeq3_Y), finalIndexSeq4_Y), SpecialTwoIndexes);
array tiledUnsortedIndexes = tile(UnsortedIndexes, 1, d_NoOfElements);
array UnsortedPolarGrid = join(0, join(0, join(0, join(0, finalSeq_X, finalSeqConj_X), finalSeq_Y), finalSeqConj_Y), SpecialTwoLines);
array FinalPolarGridReal;// = constant(0, d_NoOfElements, d_NoOfAngles, c64);
array Output_Keys_Sorted;
sort(Output_Keys_Sorted, FinalPolarGridReal, tiledUnsortedIndexes, real(UnsortedPolarGrid));
array FinalPolarGridImag;// = constant(0, d_NoOfElements, d_NoOfAngles, c64);
array Output_Keys_Sorted2;
sort(Output_Keys_Sorted2, FinalPolarGridImag, tiledUnsortedIndexes, imag(UnsortedPolarGrid));
mexPrintf("\nSuccessfully completed the computations of 2D DFT on a full Polar Grid %d-by-%d!", d_NoOfAngles,d_NoOfElements);
double* d_FinalPolarGridReal; // Device pointer obtained from ArrayFire computations
double* d_FinalPolarGridImag; // Device pointer obtained from ArrayFire computations
double* PolarGridReal_OUTPUT; // MATLAB output pointer to be copied to the solution
double* PolarGridImag_OUTPUT; // MATLAB output pointer to be copied to the solution
mwSize dims[] = { d_NoOfAngles, d_NoOfElements };
switch (PLATFORM) // The settings change for input
{
case CUDA:
case OPENCL:
// Final processed double pointers
d_FinalPolarGridReal = FinalPolarGridReal.device<double>();
d_FinalPolarGridImag = FinalPolarGridImag.device<double>();
/*output variables*/
mxGPUArray* mxOutputRealPolarGridImage;
mxGPUArray* mxOutputImagPolarGridImage;
/* Create a GPUArray to hold the result and get its underlying pointer. */
mxOutputRealPolarGridImage = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(MxInputImage),
dims,
mxGPUGetClassID(MxInputImage),
mxGPUGetComplexity(MxInputImage),
MX_GPU_DO_NOT_INITIALIZE);
PolarGridReal_OUTPUT = (double *)(mxGPUGetData(mxOutputRealPolarGridImage));
mxOutputImagPolarGridImage = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(MxInputImage),
dims,
mxGPUGetClassID(MxInputImage),
mxGPUGetComplexity(MxInputImage),
MX_GPU_DO_NOT_INITIALIZE);
PolarGridImag_OUTPUT = (double *)(mxGPUGetData(mxOutputImagPolarGridImage));
/* Copy processed Values from array object to MxArrayRealData */
hipMemcpy(PolarGridReal_OUTPUT, d_FinalPolarGridReal, d_NoOfAngles*d_NoOfElements* sizeof(double), hipMemcpyDeviceToDevice);
hipMemcpy(PolarGridImag_OUTPUT, d_FinalPolarGridImag, d_NoOfAngles*d_NoOfElements* sizeof(double), hipMemcpyDeviceToDevice);
/* Wrap the result up as a MATLAB gpuArray for return. */
plhs[0] = mxGPUCreateMxArrayOnGPU(mxOutputRealPolarGridImage);
plhs[1] = mxGPUCreateMxArrayOnGPU(mxOutputImagPolarGridImage);
/*
* The mxGPUArray pointers are host-side structures that refer to device
* data. These must be destroyed before leaving the MEX function.
*/
mxGPUDestroyGPUArray(MxInputImage);
break;
case CPU:
// Final processed double pointers
d_FinalPolarGridReal = FinalPolarGridReal.host<double>(); // Source
d_FinalPolarGridImag = FinalPolarGridImag.host<double>();
mxArray* mxOutputRealPolarGridImageCPU;
mxArray* mxOutputImagPolarGridImageCPU;
mxOutputRealPolarGridImageCPU = mxCreateNumericArray(2, dims, mxDOUBLE_CLASS, mxREAL);
mxOutputImagPolarGridImageCPU = mxCreateNumericArray(2, dims, mxDOUBLE_CLASS, mxREAL);
PolarGridReal_OUTPUT = mxGetPr(mxOutputRealPolarGridImageCPU);
PolarGridImag_OUTPUT = mxGetPr(mxOutputImagPolarGridImageCPU);
memcpy(PolarGridReal_OUTPUT, d_FinalPolarGridReal, d_NoOfAngles*d_NoOfElements* sizeof(double));
memcpy(PolarGridImag_OUTPUT, d_FinalPolarGridImag, d_NoOfAngles*d_NoOfElements* sizeof(double));
plhs[0] = mxOutputRealPolarGridImageCPU;
plhs[1] = mxOutputImagPolarGridImageCPU;
break;
default:
break;
}
mexPrintf("\nFinished processing custom CUDA mex with ArrayFire for computing 2D DFT on Polar Grid, Status = Success\n");
}
catch (af::exception &ex) {
mexPrintf("%s\n", ex.what());
}
}
| b217b73c7ea5f15b0a69c60831fdec951e18fdeb.cu | /*
* Example of how to use the mxGPUArray API in a MEX file. This example shows
* how to write a MEX function that takes a gpuArray as input and returns a
* gpuArray output for 2D Radon solution, e.g. B=mexFunction(A).
*
* by Syed Alam Abbas, 5/25/2015
*/
#include <arrayfire.h>
#include <af/util.h>
#include "cuda_runtime.h"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include "math.h"
using namespace af;
//static const cdouble i_cdouble = { 0, 1 };
//static const array i = constant(i_cdouble, 1, 1, c64);/* imaginary unit */
static int isFirstRun_Uniform = 1; // Flag to check if this is a first run on function Compute2DColumnwise_FrFTUniform
static int isFirstRun_Variable = 1; // Flag to check if this is a first run on function Compute2DComplementaryLines_FrFTVariableScales
static array PreMultiplicationFactor, PostMultiplicationFactor, Seq_En, BetaFactor;
enum SUPPORTED_PLATFORMS
{
CUDA, OPENCL, CPU
};
/* Taking exp of complex numbers*/
array cexp(const array &in)
{
if (!in.iscomplex()) return exp(in);
return exp(real(in))*complex(cos(imag(in)), sin(imag(in)));
}
/* Multiplication of 2 complex numbers require 4 real multiplications*/
void SplitMultiplyComplex(array& A_Complex, array& B_Complex, array& realRealPart, array& realImagPart, array& imagRealPart, array& imagImagPart)
{
// Consider multiplication of complex numbers A_Complex = (a+ib); B_Complex = (c+id)
//array A_Complex, B_Complex;
//array realRealPart; // ac
//array realImagPart; // ad
//array imagRealPart; // bc
//array imagImagPart; // bd
realRealPart = real(A_Complex)*real(B_Complex);
realImagPart = real(A_Complex)*imag(B_Complex);
imagRealPart = imag(A_Complex)*real(B_Complex);
imagImagPart = imag(A_Complex)*imag(B_Complex);
}
/* Uniform FrFT for each column in Image*/
array Compute2DColumnwise_FrFTUniform(array & Image2D, array& ColumnScales_1D, int& d_NoOfElements_, int& d_NoOfScales)
{
/*-----------------------------------Preparing Padded & Tiled Imag2D --------------------------------------------*/
array Zeros = constant(0, d_NoOfElements_, d_NoOfElements_, f64); // Generates on the device
array Zero_Padded_Image2D = join(0, Image2D, Zeros);
array Image2D_Tiled = tile(Zero_Padded_Image2D, 1, 1, d_NoOfScales);
int N = d_NoOfElements_ - 1;
if (isFirstRun_Uniform == 1)
{
/*-------------------------------------------Creating Index Cubes and Sequences----------------------------------------------------*/
array leftSideIndexes = array(seq(0, N, 1)).as(f64);
array rightSideIndexesOnes = -1 * array(seq(1, d_NoOfElements_, 1)).as(f64);
array rightSideIndexesZeros = constant(0, d_NoOfElements_, 1, f64);
array rightSideIndexesN_2 = constant(N / 2, d_NoOfElements_, 1, f64);
array indexedElementsEn = join(0, leftSideIndexes, flip(rightSideIndexesOnes, 0));
array indexedElementsPre = join(0, leftSideIndexes, rightSideIndexesN_2); /* This is for Keeping pre and post multiplication factor upper half only*/
array indexedElementsPost = join(0, leftSideIndexes, rightSideIndexesZeros);
array indexedElements_Tiled_En = tile(pow(indexedElementsEn, 2), 1, d_NoOfElements_, d_NoOfScales);
array indexedElements_Tiled_PreMulti = tile(indexedElementsPre - N / 2, 1, d_NoOfElements_, d_NoOfScales);
array indexedElements_Tiled_PostMulti = tile(indexedElementsPost, 1, d_NoOfElements_, d_NoOfScales);
/*--------------------------Creating FrFT scale cubes------------------------------------*/
array ColumnScales_1D_Mods = moddims(ColumnScales_1D, 1, 1, d_NoOfScales);
array ColumnScales_1D_Tiled_depth = tile(ColumnScales_1D_Mods, 2 * d_NoOfElements_, d_NoOfElements_, 1);
array ColumnScales_1D_FullTiled = moddims(ColumnScales_1D_Tiled_depth, 2 * d_NoOfElements_, d_NoOfElements_, d_NoOfScales);
/*-------------------Precomputing the Essential Sequence Cubes :: All complex values here --------------------*/
//array imaginaryUnit_Tiled = tile(i, 2 * d_NoOfElements_, d_NoOfElements_, d_NoOfScales);
Seq_En = cexp( complex(0, - af::Pi * indexedElements_Tiled_En * ColumnScales_1D_FullTiled / d_NoOfElements_)); /* E(n) as defined in the paper*/
array Ones = constant(1, d_NoOfElements_, d_NoOfElements_, f64);
array subtractValues = tile(join(0, Zeros, Ones), 1, 1, d_NoOfScales); /* This is for Keeping pre and post multiplication factor upper half only*/
PreMultiplicationFactor = cexp( complex (0, af::Pi * indexedElements_Tiled_PreMulti * ColumnScales_1D_FullTiled * N / d_NoOfElements_)) - subtractValues;
PostMultiplicationFactor = cexp( complex ( 0, af::Pi * indexedElements_Tiled_PostMulti * ColumnScales_1D_FullTiled * N / d_NoOfElements_)) - subtractValues;
isFirstRun_Uniform = 0;
//af::deviceGC();
}
/*--------------------Preprocessing Cubes-----------------------*/
array Image2D_Tiled_PreMulti = Image2D_Tiled * PreMultiplicationFactor;
array Image2D_Tiled_PreMulti_SeqEn = Image2D_Tiled_PreMulti * Seq_En;
/*-------------------Computing Convolution--------------------*/
array firstFFT_X = fft(Image2D_Tiled_PreMulti_SeqEn);
array secondFFT_X = fft(conjg(Seq_En));
array interim_FrFT_X = ifft(firstFFT_X * secondFFT_X);
/*-------------------Postprocessing-----------------------------*/
array FrFT_Image_X = interim_FrFT_X * Seq_En * PostMultiplicationFactor;
/*--------------------Grab only the top half elements drop overlapping------------------*/
array FrFT_Image_X_Cube = FrFT_Image_X.rows(0, N);
return FrFT_Image_X_Cube;
}
/* Variable FrFT for each column in Image*/
void Compute2DComplementaryLines_FrFTVariableScales(array & OneD_FrFT, array& ColumnScales_1D, array& final2DFrFTImage, array& final2DFrFTConjImage, int d_NoOfElements, int d_NoOfScales)
{
int N = d_NoOfElements - 1;
if (isFirstRun_Variable == 1)
{
array lineSpacing = array(seq(-N / 2, N / 2)).as(f64);
array lineSpacing_tiled_Y = tile(lineSpacing, 1, d_NoOfElements);
array lineSpacing_Square = lineSpacing_tiled_Y * lineSpacing_tiled_Y.T();
array lineSpacing_Square_TiledLevel = tile(lineSpacing_Square, 1, 1, d_NoOfScales);
//af_print(beta_Levels);
array beta_Mods = moddims(ColumnScales_1D, 1, 1, d_NoOfScales);
array beta_Tiled_depth = tile(beta_Mods, d_NoOfElements, d_NoOfElements, 1);
//af_print(beta_Tiled_depth);
array beta_Tiled = moddims(beta_Tiled_depth, d_NoOfElements, d_NoOfElements, d_NoOfScales);
/*-------------------Precomputing the Essential Sequence Cubes :: All complex values here --------------------*/
BetaFactor = cexp( complex(0, -2 * af::Pi * lineSpacing_Square_TiledLevel * beta_Tiled / d_NoOfElements));
isFirstRun_Variable = 0;
af::deviceGC();
}
// Consider multiplication of complex numbers A = (a+ib); B = (c+id)
array realRealPart; // ac
array realImagPart; // ad
array imagRealPart; // bc
array imagImagPart; // bd
SplitMultiplyComplex(OneD_FrFT, BetaFactor, realRealPart, realImagPart, imagRealPart, imagImagPart);
array tempSeq_X = sum(complex(realRealPart - imagImagPart, realImagPart + imagRealPart)); // sum(flip(FrFT_Image_X_Cube, 0) *BetaFactor);
array tempSeqConj_X = sum(complex(realRealPart + imagImagPart, imagRealPart - realImagPart));// sum(flip(FrFT_Image_X_Cube, 0) * conjg(BetaFactor));
final2DFrFTImage = moddims(tempSeq_X, d_NoOfElements, d_NoOfScales).T();
final2DFrFTConjImage = moddims(tempSeqConj_X, d_NoOfElements, d_NoOfScales).T();
}
/*
* High level Host code
* Computes the FrFT centered using the definition given in the paper,
"An exact and fast computation of Discrete Fourier Transform for polar grid and spherical grid"
by Syed Alam Abbas, 5/25/2015
*/
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[])
{
try {
/* Initialize the MathWorks GPU API. */
mxInitGPU();
mexPrintf("Executing custom mex for computing 2D DFT on a Polar Grid using ArrayFire GPU accelerated library latest!");
// Validate the input
if (nrhs < 5 || nlhs < 2) {
mexErrMsgTxt("Expected 5 inputs and 2 output.");
}
/*Input Variables*/
const double* d_Image;
int d_NoOfAngles;
int d_NoOfLevels;
int d_NoOfElements;
mxGPUArray const * MxInputImage;
mxArray* MxInputImageCPU;
int PLATFORM = (size_t)mxGetScalar(prhs[4]); // Given as an input PLATFORM
switch (PLATFORM) // The settings change for input
{
case CUDA:
case OPENCL:
MxInputImage = mxGPUCreateFromMxArray(prhs[0]); // GPU
/* extract a pointer to the input data which is a real image on the device.*/
d_Image = (double const *)(mxGPUGetDataReadOnly(MxInputImage)); // GPU
break;
case CPU:
MxInputImageCPU = mxDuplicateArray(prhs[0]);
d_Image = mxGetPr(MxInputImageCPU);
break;
default:
break;
}
/* Collect the input data from MATLAB MexArray RHS */
d_NoOfAngles = (size_t)mxGetScalar(prhs[1]); /*Check it, this should always be even*/
d_NoOfLevels = (size_t)mxGetScalar(prhs[2]);
d_NoOfElements = (size_t)mxGetScalar(prhs[3]); /*Check it, this should always be odd*/
int N = d_NoOfElements - 1; /* it is always even as described in the paper*/
/*********************Creating Array Fire object************************************/
array Image(d_NoOfElements, d_NoOfElements, d_Image);
/*--------------------------Creating Alpha levels------------------------------------*/
array alpha_Levels = cos(af::Pi / ((double) d_NoOfAngles) * array(seq(1, d_NoOfLevels)).as(f64));
/*--------------------------Creating Beta levels---------------------------------------*/
array beta_Levels = sin(Pi / ((double)d_NoOfAngles) * array(seq(1, d_NoOfLevels)).as(f64));
/*-------------------Precomputing the Essential Sequence Cubes :: All complex values here --------------------*/
array lineSpacing = array(seq(-N / 2, N / 2)).as(f64);
array lineSpacing_tiled_Y = tile(lineSpacing, 1, d_NoOfElements);
array lineSpacing_Square = lineSpacing_tiled_Y * lineSpacing_tiled_Y.T();
array ZeroNinty_Factor = cexp( complex( 0, -2 * af::Pi * lineSpacing_Square * 1 / d_NoOfElements));
/*-------------------- First dimension uniform FrFT for each Image per level-----------------------*/
array FrFT_Image_X_Cube = Compute2DColumnwise_FrFTUniform(Image.T(), alpha_Levels, d_NoOfElements, d_NoOfLevels);
switch (PLATFORM)
{
case CUDA:
af::deviceGC();
default:
break;
}
array FrFT_Image_Y_Cube = Compute2DColumnwise_FrFTUniform((Image), alpha_Levels, d_NoOfElements, d_NoOfLevels);
FrFT_Image_X_Cube = FrFT_Image_X_Cube.T(); // Now it needs operation to the other dimension
FrFT_Image_Y_Cube = FrFT_Image_Y_Cube.T();
/*--------------------Finally all computations for the Polar Grid-----------*/
// Computing for all the grid expect two special indexes
array levelSeq = array(seq(0, d_NoOfLevels - 1)).as(f64);
array finalIndexSeq1_X = 1 + levelSeq;
array finalIndexSeq2_X = d_NoOfAngles - finalIndexSeq1_X;
array finalIndexSeq3_Y = d_NoOfAngles / 2 - finalIndexSeq1_X;
array finalIndexSeq4_Y = d_NoOfAngles / 2 + finalIndexSeq1_X;
array finalSeq_X, finalSeqConj_X;
Compute2DComplementaryLines_FrFTVariableScales((FrFT_Image_X_Cube), beta_Levels, finalSeq_X, finalSeqConj_X, d_NoOfElements, d_NoOfLevels);
finalSeqConj_X = flip(finalSeqConj_X, 1); // Special operation
array finalSeq_Y, finalSeqConj_Y;
Compute2DComplementaryLines_FrFTVariableScales(FrFT_Image_Y_Cube, beta_Levels, finalSeq_Y, finalSeqConj_Y, d_NoOfElements, d_NoOfLevels);
// Removing just 2 redundant computations for 45 degree case
if (0 == remainder(d_NoOfAngles, 4))
{
finalIndexSeq3_Y = finalIndexSeq3_Y.rows(0, d_NoOfLevels - 2); // Removing just the last rows from 4 structures
finalSeq_Y = finalSeq_Y.rows(0, d_NoOfLevels - 2);
finalIndexSeq4_Y = finalIndexSeq4_Y.rows(0, d_NoOfLevels - 2);
finalSeqConj_Y = finalSeqConj_Y.rows(0, d_NoOfLevels - 2);
}
// Computing seperately for two special indexes
double zeroIndex = 0;
double nintyIndex = d_NoOfAngles / 2;
double values[] = { zeroIndex, nintyIndex };
array SpecialTwoIndexes(2, 1, values);
array ZeroLineFrFT_Image_X_Cube = FrFT_Image_Y_Cube.slice(zeroIndex).col(N / 2);
array NintyLineFrFT_Image_Y_Cube = FrFT_Image_X_Cube.slice(zeroIndex).col(N / 2);
array DFTZeroLine = sum(tile(ZeroLineFrFT_Image_X_Cube, 1, d_NoOfElements) *ZeroNinty_Factor);
array DFTNinetyLine = sum(tile((NintyLineFrFT_Image_Y_Cube), 1, d_NoOfElements) *ZeroNinty_Factor);
array SpecialTwoLines = join(0, DFTZeroLine, DFTNinetyLine);
array UnsortedIndexes = join(0, join(0, join(0, join(0, finalIndexSeq1_X, finalIndexSeq2_X), finalIndexSeq3_Y), finalIndexSeq4_Y), SpecialTwoIndexes);
array tiledUnsortedIndexes = tile(UnsortedIndexes, 1, d_NoOfElements);
array UnsortedPolarGrid = join(0, join(0, join(0, join(0, finalSeq_X, finalSeqConj_X), finalSeq_Y), finalSeqConj_Y), SpecialTwoLines);
array FinalPolarGridReal;// = constant(0, d_NoOfElements, d_NoOfAngles, c64);
array Output_Keys_Sorted;
sort(Output_Keys_Sorted, FinalPolarGridReal, tiledUnsortedIndexes, real(UnsortedPolarGrid));
array FinalPolarGridImag;// = constant(0, d_NoOfElements, d_NoOfAngles, c64);
array Output_Keys_Sorted2;
sort(Output_Keys_Sorted2, FinalPolarGridImag, tiledUnsortedIndexes, imag(UnsortedPolarGrid));
mexPrintf("\nSuccessfully completed the computations of 2D DFT on a full Polar Grid %d-by-%d!", d_NoOfAngles,d_NoOfElements);
double* d_FinalPolarGridReal; // Device pointer obtained from ArrayFire computations
double* d_FinalPolarGridImag; // Device pointer obtained from ArrayFire computations
double* PolarGridReal_OUTPUT; // MATLAB output pointer to be copied to the solution
double* PolarGridImag_OUTPUT; // MATLAB output pointer to be copied to the solution
mwSize dims[] = { d_NoOfAngles, d_NoOfElements };
switch (PLATFORM) // The settings change for input
{
case CUDA:
case OPENCL:
// Final processed double pointers
d_FinalPolarGridReal = FinalPolarGridReal.device<double>();
d_FinalPolarGridImag = FinalPolarGridImag.device<double>();
/*output variables*/
mxGPUArray* mxOutputRealPolarGridImage;
mxGPUArray* mxOutputImagPolarGridImage;
/* Create a GPUArray to hold the result and get its underlying pointer. */
mxOutputRealPolarGridImage = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(MxInputImage),
dims,
mxGPUGetClassID(MxInputImage),
mxGPUGetComplexity(MxInputImage),
MX_GPU_DO_NOT_INITIALIZE);
PolarGridReal_OUTPUT = (double *)(mxGPUGetData(mxOutputRealPolarGridImage));
mxOutputImagPolarGridImage = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(MxInputImage),
dims,
mxGPUGetClassID(MxInputImage),
mxGPUGetComplexity(MxInputImage),
MX_GPU_DO_NOT_INITIALIZE);
PolarGridImag_OUTPUT = (double *)(mxGPUGetData(mxOutputImagPolarGridImage));
/* Copy processed Values from array object to MxArrayRealData */
cudaMemcpy(PolarGridReal_OUTPUT, d_FinalPolarGridReal, d_NoOfAngles*d_NoOfElements* sizeof(double), cudaMemcpyDeviceToDevice);
cudaMemcpy(PolarGridImag_OUTPUT, d_FinalPolarGridImag, d_NoOfAngles*d_NoOfElements* sizeof(double), cudaMemcpyDeviceToDevice);
/* Wrap the result up as a MATLAB gpuArray for return. */
plhs[0] = mxGPUCreateMxArrayOnGPU(mxOutputRealPolarGridImage);
plhs[1] = mxGPUCreateMxArrayOnGPU(mxOutputImagPolarGridImage);
/*
* The mxGPUArray pointers are host-side structures that refer to device
* data. These must be destroyed before leaving the MEX function.
*/
mxGPUDestroyGPUArray(MxInputImage);
break;
case CPU:
// Final processed double pointers
d_FinalPolarGridReal = FinalPolarGridReal.host<double>(); // Source
d_FinalPolarGridImag = FinalPolarGridImag.host<double>();
mxArray* mxOutputRealPolarGridImageCPU;
mxArray* mxOutputImagPolarGridImageCPU;
mxOutputRealPolarGridImageCPU = mxCreateNumericArray(2, dims, mxDOUBLE_CLASS, mxREAL);
mxOutputImagPolarGridImageCPU = mxCreateNumericArray(2, dims, mxDOUBLE_CLASS, mxREAL);
PolarGridReal_OUTPUT = mxGetPr(mxOutputRealPolarGridImageCPU);
PolarGridImag_OUTPUT = mxGetPr(mxOutputImagPolarGridImageCPU);
memcpy(PolarGridReal_OUTPUT, d_FinalPolarGridReal, d_NoOfAngles*d_NoOfElements* sizeof(double));
memcpy(PolarGridImag_OUTPUT, d_FinalPolarGridImag, d_NoOfAngles*d_NoOfElements* sizeof(double));
plhs[0] = mxOutputRealPolarGridImageCPU;
plhs[1] = mxOutputImagPolarGridImageCPU;
break;
default:
break;
}
mexPrintf("\nFinished processing custom CUDA mex with ArrayFire for computing 2D DFT on Polar Grid, Status = Success\n");
}
catch (af::exception &ex) {
mexPrintf("%s\n", ex.what());
}
}
|
9b4d02ddec01f648e170deeebf311c53b244e667.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void DrawObstacles(uchar4 *ptr, int* indices, int size) {
int thread_id = threadIdx.x + blockIdx.x * blockDim.x;
while (thread_id < size) {
int index = indices[thread_id];
ptr[index].x = 0;
ptr[index].y = 0;
ptr[index].z = 0;
ptr[index].w = 255;
thread_id += blockDim.x * gridDim.x;
}
} | 9b4d02ddec01f648e170deeebf311c53b244e667.cu | #include "includes.h"
__global__ void DrawObstacles(uchar4 *ptr, int* indices, int size) {
int thread_id = threadIdx.x + blockIdx.x * blockDim.x;
while (thread_id < size) {
int index = indices[thread_id];
ptr[index].x = 0;
ptr[index].y = 0;
ptr[index].z = 0;
ptr[index].w = 255;
thread_id += blockDim.x * gridDim.x;
}
} |
2f8467bc004f8978acca07d65d126f0fb961f7af.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2020, Vijay Thakkar ([email protected]).
**************************************************************************************************/
//////////////////////////////////////////////////////////////////////
// THIS BENCHMARK FILE IS GENERATED AUTOMATICALLY : DO NOT MODIFY //
//////////////////////////////////////////////////////////////////////
#include "benchmark/benchmark.h"
#include "cuasr/gemm/device/default_srgemm_configuration.h"
#include "cuasr/gemm/device/srgemm.h"
#include "cuasr/functional.h"
#include "harness.h"
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 8 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_8x32x8_8x32x1_2x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_8x32x8_8x32x1_2x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_16x32x8_16x32x1_4x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_16x32x8_16x32x1_4x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_16x64x8_16x64x1_4x8_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_16x64x8_16x64x1_4x8_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x32x8_32x32x1_8x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x32x8_32x32x1_8x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 8 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_8x32x8_8x16x1_2x2_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_8x32x8_8x16x1_2x2_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 8 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_8x64x8_8x32x1_2x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_8x64x8_8x32x1_2x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_16x32x8_16x16x1_4x2_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_16x32x8_16x16x1_4x2_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_16x64x8_16x32x1_4x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_16x64x8_16x32x1_4x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_16x128x8_16x64x1_4x8_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_16x128x8_16x64x1_4x8_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 1 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x32x8_32x16x1_4x4_8x4_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x32x8_32x16x1_4x4_8x4_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x64x8_32x32x1_8x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x64x8_32x32x1_8x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x32x8_16x32x1_4x4_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x32x8_16x32x1_4x4_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_64x32x8_32x32x1_8x4_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_64x32x8_32x32x1_8x4_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_16x32x8_8x16x1_2x2_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_16x32x8_8x16x1_2x2_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_16x64x8_8x32x1_2x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_16x64x8_8x32x1_2x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x32x8_16x16x1_4x2_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x32x8_16x16x1_4x2_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x64x8_16x32x1_4x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x64x8_16x32x1_4x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x128x8_16x64x1_4x8_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x128x8_16x64x1_4x8_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_64x32x8_32x16x1_4x4_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_64x32x8_32x16x1_4x4_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_64x64x8_32x32x1_8x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_64x64x8_32x32x1_8x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_128x32x8_64x16x1_8x4_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_128x32x8_64x16x1_8x4_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 16 x 64 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_16x64x16_8x16x1_2x2_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_16x64x16_8x16x1_2x2_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 16 x 128 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_16x128x16_8x32x1_2x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_16x128x16_8x32x1_2x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x32x8_16x8x1_2x2_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x32x8_16x8x1_2x2_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x64x8_16x16x1_4x2_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x64x8_16x16x1_4x2_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x128x8_16x32x1_4x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x128x8_16x32x1_4x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_64x64x8_32x16x1_4x4_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_64x64x8_32x16x1_4x4_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x32x8_8x16x1_2x2_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x32x8_8x16x1_2x2_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_64x32x8_16x16x1_4x2_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_64x32x8_16x16x1_4x2_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_64x64x8_16x32x1_4x4_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_64x64x8_16x32x1_4x4_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 2
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_128x32x8_32x16x1_4x4_8x4_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_128x32x8_32x16x1_4x4_8x4_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 32 x 64 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x64x16_8x16x1_2x2_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x64x16_8x16x1_2x2_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 32 x 128 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x128x16_8x32x1_2x4_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x128x16_8x32x1_2x4_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 64 x 32 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_64x32x16_16x8x1_2x2_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>;
using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_64x32x16_16x8x1_2x2_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_64x64x8_16x16x1_4x2_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_64x64x8_16x16x1_4x2_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 128 x 32 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_128x32x16_32x8x1_4x2_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 16>;
using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
hipDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_128x32x16_32x8x1_4x2_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
| 2f8467bc004f8978acca07d65d126f0fb961f7af.cu | /***************************************************************************************************
* Copyright (c) 2020, Vijay Thakkar ([email protected]).
**************************************************************************************************/
//////////////////////////////////////////////////////////////////////
// THIS BENCHMARK FILE IS GENERATED AUTOMATICALLY : DO NOT MODIFY //
//////////////////////////////////////////////////////////////////////
#include "benchmark/benchmark.h"
#include "cuasr/gemm/device/default_srgemm_configuration.h"
#include "cuasr/gemm/device/srgemm.h"
#include "cuasr/functional.h"
#include "harness.h"
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 8 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_8x32x8_8x32x1_2x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_8x32x8_8x32x1_2x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_16x32x8_16x32x1_4x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_16x32x8_16x32x1_4x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_16x64x8_16x64x1_4x8_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_16x64x8_16x64x1_4x8_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 1
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x32x8_32x32x1_8x4_4x8_1x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x32x8_32x32x1_8x4_4x8_1x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 8 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_8x32x8_8x16x1_2x2_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_8x32x8_8x16x1_2x2_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 8 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_8x64x8_8x32x1_2x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<8, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_8x64x8_8x32x1_2x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_16x32x8_16x16x1_4x2_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_16x32x8_16x16x1_4x2_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_16x64x8_16x32x1_4x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_16x64x8_16x32x1_4x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 16 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_16x128x8_16x64x1_4x8_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_16x128x8_16x64x1_4x8_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 1 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x32x8_32x16x1_4x4_8x4_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x32x8_32x16x1_4x4_8x4_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 1 x 2
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x64x8_32x32x1_8x4_4x8_1x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x64x8_32x32x1_8x4_4x8_1x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x32x8_16x32x1_4x4_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x32x8_16x32x1_4x4_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 1
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_64x32x8_32x32x1_8x4_4x8_2x1(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_64x32x8_32x32x1_8x4_4x8_2x1)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 16 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_16x32x8_8x16x1_2x2_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_16x32x8_8x16x1_2x2_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 16 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_16x64x8_8x32x1_2x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_16x64x8_8x32x1_2x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x32x8_16x16x1_4x2_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x32x8_16x16x1_4x2_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x64x8_16x32x1_4x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x64x8_16x32x1_4x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 8
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x128x8_16x64x1_4x8_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x128x8_16x64x1_4x8_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_64x32x8_32x16x1_4x4_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_64x32x8_32x16x1_4x4_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 0)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_64x64x8_32x32x1_8x4_4x8_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_64x64x8_32x32x1_8x4_4x8_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 8 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 2
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_128x32x8_64x16x1_8x4_8x4_2x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<64, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_128x32x8_64x16x1_8x4_8x4_2x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 16 x 64 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_16x64x16_8x16x1_2x2_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_16x64x16_8x16x1_2x2_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 16 x 128 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_16x128x16_8x32x1_2x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<16, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_16x128x16_8x32x1_2x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x32x8_16x8x1_2x2_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 8, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x32x8_16x8x1_2x2_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x64x8_16x16x1_4x2_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x64x8_16x16x1_4x2_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 2 x 4
// Threadblock: 32 x 128 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x128x8_16x32x1_4x4_4x8_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x128x8_16x32x1_4x4_4x8_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 2 x 4
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 1)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_64x64x8_32x16x1_4x4_8x4_2x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_64x64x8_32x16x1_4x4_8x4_2x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 32 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x32x8_8x16x1_2x2_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x32x8_8x16x1_2x2_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 64 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_64x32x8_16x16x1_4x2_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_64x32x8_16x16x1_4x2_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 2
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_64x64x8_16x32x1_4x4_4x8_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_64x64x8_16x32x1_4x4_4x8_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 4
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 2
// Threadblock: 128 x 32 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_128x32x8_32x16x1_4x4_8x4_4x2(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_128x32x8_32x16x1_4x4_8x4_4x2)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 32 x 64 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x64x16_8x16x1_2x2_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 64, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 16, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x64x16_8x16x1_2x2_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 4
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 32 x 128 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x128x16_8x32x1_2x4_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<32, 128, 16>;
using WarpShape = cutlass::gemm::GemmShape<8, 32, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_32x128x16_8x32x1_2x4_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 2 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 64 x 32 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_64x32x16_16x8x1_2x2_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 32, 16>;
using WarpShape = cutlass::gemm::GemmShape<16, 8, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_64x32x16_16x8x1_2x2_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 4 x 8
// Warps / Block: 4 x 4
// Threadblock: 64 x 64 x 8
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_64x64x8_16x16x1_4x2_4x8_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<64, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 16, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_64x64x8_16x16x1_4x2_4x8_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
////////////////////////////////////////////////////////////////////////////////
// Elements / Thread: 4 x 2
// Threads / Warp: 8 x 4
// Warps / Block: 4 x 4
// Threadblock: 128 x 32 x 16
#if defined(CUASR_BENCH_LEVEL) and (CUASR_BENCH_LEVEL >= 2)
static void BM_SM50_device_minimum_plus_dsrgemm_nt_n_128x32x16_32x8x1_4x2_8x4_4x4(benchmark::State &state) {
const auto N = static_cast<int>(state.range(0));
using precision = double;
using OpClass = cutlass::arch::OpClassSimt;
using SmArch = cutlass::arch::Sm50;
using ThreadblockShape = cutlass::gemm::GemmShape<128, 32, 16>;
using WarpShape = cutlass::gemm::GemmShape<32, 8, 16>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using Config = typename cuasr::gemm::device::DefaultSemiRingConfiguration< //
precision, precision, precision, precision, OpClass, //
cuasr::minimum<precision>, cuasr::plus<precision>, SmArch>;
using AddOp = Config::AdditionOp;
using MultOp = Config::MultiplicationOp;
using EpilogueOutputOp = Config::EpilogueOutputOp;
using Srgemm = cuasr::gemm::device::Srgemm< //
AddOp, MultOp, //
precision, cutlass::layout::ColumnMajor, //
precision, cutlass::layout::RowMajor, //
precision, cutlass::layout::ColumnMajor, //
precision, OpClass, SmArch, //
ThreadblockShape, WarpShape, InstructionShape, EpilogueOutputOp, //
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>;
// setup bench harness
cuasr::bench::device::BenchHarness<Srgemm> bench({ N, N, N });
// benchmark loop
for (auto _ : state) {
benchmark::DoNotOptimize(bench.run());
cudaDeviceSynchronize();
}
double flops_per_itr = 2.0 * N * N * N;
state.counters["Flop/s"]
= benchmark::Counter(flops_per_itr, benchmark::Counter::kIsIterationInvariantRate);
}
BENCHMARK(BM_SM50_device_minimum_plus_dsrgemm_nt_n_128x32x16_32x8x1_4x2_8x4_4x4)
->RangeMultiplier(2)->Range(256, 4096);
#endif
|
0461c2baadfa285411a3d19df465a8df2e85aae2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
//#include "hip/hip_runtime.h"
// for using cublas
#include <rocblas.h>
#include <cstdio>
#include <cmath>
#include <cstdlib>
#include <cstring>
#include <complex>
#include <assert.h>
#include <algorithm>
#include <hip/hip_complex.h>
#include "util_type.h"
#include "util.cuh"
#include "util_func.h"
#include "memory_ops.h"
inline __device__ double __shfl_down_double(double var, unsigned int srcLane, int width = 32) {
int2 a = *reinterpret_cast<int2*>(&var);
a.x = __shfl_down_sync(a.x, srcLane, width);
a.y = __shfl_down_sync(a.y, srcLane, width);
return *reinterpret_cast<double*>(&a);
}
inline __device__ int warpReduceSum(int val) {
for (int offset = warpSize / 2; offset > 0; offset /= 2)
val += __shfl_down_sync(0xffffffff, val, offset);
// val += __shfl_down(val, offset);
return val;
}
// __device__ int __popcll ( unsigned long long int x )
inline __device__ int popcount64(ITYPE b) {
return __popcll(b);
/*
b -= (b >> 1) & 0x5555555555555555ULL;
b = ((b >> 2) & 0x3333333333333333ULL) + (b & 0x3333333333333333ULL);
b = ((b >> 4) + b) & 0x0F0F0F0F0F0F0F0FULL;
return (b * 0x0101010101010101ULL) >> 56;
*/
}
//__device__ int __popc ( unsigned int x )
inline __device__ int popcount32(unsigned int b) {
return __popc(b);
/*
unsigned int w = b >> 32;
unsigned int v = b;
v -= (v >> 1) & 0x55555555;
w -= (w >> 1) & 0x55555555;
v = ((v >> 2) & 0x33333333) + (v & 0x33333333);
w = ((w >> 2) & 0x33333333) + (w & 0x33333333);
v = ((v >> 4) + v + (w >> 4) + w) & 0x0F0F0F0F;
return (v * 0x01010101) >> 24;
*/
}
__global__ void deviceReduceWarpAtomicKernel(int *in, int* out, ITYPE N) {
int sum = int(0);
for (ITYPE i = blockIdx.x * blockDim.x + threadIdx.x;
i < N;
i += blockDim.x * gridDim.x) {
sum += in[i];
}
sum = warpReduceSum(sum);
if ((threadIdx.x & (warpSize - 1)) == 0)
atomicAdd(out, sum);
}
__global__ void set_computational_basis_gpu(ITYPE comp_basis, GTYPE* state, ITYPE dim){
ITYPE idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < dim) {
state[idx] = make_cuDoubleComplex(0.0, 0.0);
}
if(idx==comp_basis) state[comp_basis] = make_cuDoubleComplex(1.0, 0.0);
}
__host__ void set_computational_basis_host(ITYPE comp_basis, void* state, ITYPE dim){
hipError_t cudaStatus;
GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state);
unsigned int block = dim <= 1024 ? dim : 1024;
unsigned int grid = dim / block;
set_computational_basis_gpu << <grid, block >> >(comp_basis, state_gpu, dim);
checkCudaErrors(hipDeviceSynchronize(), __FILE__, __LINE__);
cudaStatus = hipGetLastError();
checkCudaErrors(cudaStatus, __FILE__, __LINE__);
state = reinterpret_cast<void*>(state_gpu);
}
// copy state_gpu to state_gpu_copy
void copy_quantum_state_host(void* state_gpu_copy, const void* state_gpu, ITYPE dim){
const GTYPE* psi_gpu = reinterpret_cast<const GTYPE*>(state_gpu);
GTYPE* psi_gpu_copy = reinterpret_cast<GTYPE*>(state_gpu_copy);
checkCudaErrors(hipMemcpy(psi_gpu_copy, psi_gpu, dim * sizeof(GTYPE), hipMemcpyDeviceToDevice));
state_gpu = reinterpret_cast<const void*>(psi_gpu);
state_gpu_copy = reinterpret_cast<void*>(psi_gpu_copy);
}
// copy state_gpu to psi_cpu_copy
void get_quantum_state_host(void* state_gpu, void* psi_cpu_copy, ITYPE dim){
GTYPE* psi_gpu = reinterpret_cast<GTYPE*>(state_gpu);
psi_cpu_copy = reinterpret_cast<CPPCTYPE*>(psi_cpu_copy);
checkCudaErrors(hipMemcpy(psi_cpu_copy, psi_gpu, dim * sizeof(CPPCTYPE), hipMemcpyDeviceToHost));
state_gpu = reinterpret_cast<void*>(psi_gpu);
}
void print_quantum_state_host(void* state, ITYPE dim){
GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state);
CPPCTYPE* state_cpu=(CPPCTYPE*)malloc(sizeof(CPPCTYPE)*dim);
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipMemcpy(state_cpu, state_gpu, dim * sizeof(CPPCTYPE), hipMemcpyDeviceToHost));
for(int i=0;i<dim;++i){
std::cout << i << " : " << state_cpu[i].real() << "+i" << state_cpu[i].imag() << '\n';
}
std::cout << '\n';
free(state_cpu);
state = reinterpret_cast<void*>(state);
}
ITYPE insert_zero_to_basis_index_gsim(ITYPE basis_index, unsigned int qubit_index){
ITYPE temp_basis = (basis_index >> qubit_index) << (qubit_index+1);
return temp_basis + (basis_index & ( (1ULL<<qubit_index) -1));
}
void get_Pauli_masks_partial_list_gsim(const UINT* target_qubit_index_list, const UINT* Pauli_operator_type_list, UINT target_qubit_index_count,
ITYPE* bit_flip_mask, ITYPE* phase_flip_mask, UINT* global_phase_90rot_count, UINT* pivot_qubit_index){
(*bit_flip_mask)=0;
(*phase_flip_mask)=0;
(*global_phase_90rot_count)=0;
(*pivot_qubit_index)=0;
for(UINT cursor=0;cursor < target_qubit_index_count; ++cursor){
UINT target_qubit_index = target_qubit_index_list[cursor];
switch(Pauli_operator_type_list[cursor]){
case 0: // I
break;
case 1: // X
(*bit_flip_mask) ^= 1ULL << target_qubit_index;
(*pivot_qubit_index) = target_qubit_index;
break;
case 2: // Y
(*bit_flip_mask) ^= 1ULL << target_qubit_index;
(*phase_flip_mask) ^= 1ULL << target_qubit_index;
(*global_phase_90rot_count) ++;
(*pivot_qubit_index) = target_qubit_index;
break;
case 3: // Z
(*phase_flip_mask) ^= 1ULL << target_qubit_index;
break;
default:
fprintf(stderr,"Invalid Pauli operator ID called");
assert(0);
}
}
}
void get_Pauli_masks_whole_list_gsim(const UINT* Pauli_operator_type_list, UINT target_qubit_index_count,
ITYPE* bit_flip_mask, ITYPE* phase_flip_mask, UINT* global_phase_90rot_count, UINT* pivot_qubit_index){
(*bit_flip_mask)=0;
(*phase_flip_mask)=0;
(*global_phase_90rot_count)=0;
(*pivot_qubit_index)=0;
for(UINT target_qubit_index=0; target_qubit_index < target_qubit_index_count; ++target_qubit_index){
switch(Pauli_operator_type_list[target_qubit_index]){
case 0: // I
break;
case 1: // X
(*bit_flip_mask) ^= 1ULL << target_qubit_index;
(*pivot_qubit_index) = target_qubit_index;
break;
case 2: // Y
(*bit_flip_mask) ^= 1ULL << target_qubit_index;
(*phase_flip_mask) ^= 1ULL << target_qubit_index;
(*global_phase_90rot_count) ++;
(*pivot_qubit_index) = target_qubit_index;
break;
case 3: // Z
(*phase_flip_mask) ^= 1ULL << target_qubit_index;
break;
default:
fprintf(stderr,"Invalid Pauli operator ID called");
assert(0);
}
}
}
ITYPE* create_matrix_mask_list_gsim(const UINT* qubit_index_list, UINT qubit_index_count){
const ITYPE matrix_dim = 1ULL << qubit_index_count;
ITYPE* mask_list = (ITYPE*) calloc((size_t)matrix_dim, sizeof(ITYPE));
ITYPE cursor = 0;
for(cursor=0;cursor < matrix_dim; ++cursor){
for(UINT bit_cursor = 0; bit_cursor < qubit_index_count;++bit_cursor){
if ((cursor >> bit_cursor) & 1) {
UINT bit_index = qubit_index_list[bit_cursor];
mask_list[cursor] ^= (1ULL << bit_index);
}
}
}
return mask_list;
}
ITYPE create_control_mask_gsim(const UINT* qubit_index_list, const UINT* value_list, UINT size) {
ITYPE mask = 0;
for (UINT cursor = 0; cursor < size; ++cursor) {
mask ^= (1ULL << qubit_index_list[cursor]) * value_list[cursor];
}
return mask;
}
UINT* create_sorted_ui_list_gsim(const UINT* array, size_t size){
UINT* new_array = (UINT*)calloc(size,sizeof(UINT));
memcpy(new_array, array, size*sizeof(UINT));
std::sort(new_array, new_array+size);
return new_array;
}
UINT* create_sorted_ui_list_value_gsim(const UINT* array, size_t size, UINT value){
UINT* new_array = (UINT*)calloc(size+1, sizeof(UINT));
memcpy(new_array,array,size*sizeof(UINT));
new_array[size]=value;
std::sort(new_array, new_array+size+1);
return new_array;
}
UINT* create_sorted_ui_list_list_gsim(const UINT* array1, size_t size1, const UINT* array2, size_t size2){
UINT* new_array = (UINT*)calloc(size1+size2, sizeof(UINT));
memcpy(new_array,array1,size1*sizeof(UINT));
memcpy(new_array+size1,array2,size2*sizeof(UINT));
std::sort(new_array, new_array+size1+size2);
return new_array;
}
// C=alpha*A*B+beta*C
// in this wrapper, we assume beta is always zero!
int cublas_zgemm_wrapper(ITYPE n, CPPCTYPE alpha, const CPPCTYPE *h_A, const CPPCTYPE *h_B, CPPCTYPE beta, CPPCTYPE *h_C){
ITYPE n2 = n*n;
hipblasStatus_t status;
hipblasHandle_t handle;
GTYPE *d_A;// = make_cuDoubleComplex(0.0,0.0);
GTYPE *d_B;// = make_cuDoubleComplex(0,0);
GTYPE *d_C;// = make_cuDoubleComplex(0,0);
GTYPE d_alpha=make_cuDoubleComplex(alpha.real(), alpha.imag());
GTYPE d_beta=make_cuDoubleComplex(beta.real(), beta.imag());
// int dev = 0; //findCudaDevice(argc, (const char **)argv);
/* Initialize CUBLAS */
status = hipblasCreate(&handle);
if (status != HIPBLAS_STATUS_SUCCESS){
fprintf(stderr, "!!!! CUBLAS initialization error\n");
return EXIT_FAILURE;
}
/* Allocate device memory for the matrices */
if (hipMalloc(reinterpret_cast<void **>(&d_A), n2 * sizeof(d_A[0])) != hipSuccess) {
fprintf(stderr, "!!!! device memory allocation error (allocate A)\n");
return EXIT_FAILURE;
}
if (hipMalloc(reinterpret_cast<void **>(&d_B), n2 * sizeof(d_B[0])) != hipSuccess) {
fprintf(stderr, "!!!! device memory allocation error (allocate B)\n");
return EXIT_FAILURE;
}
if (hipMalloc(reinterpret_cast<void **>(&d_C), n2 * sizeof(d_C[0])) != hipSuccess) {
fprintf(stderr, "!!!! device memory allocation error (allocate C)\n");
return EXIT_FAILURE;
}
/* Initialize the device matrices with the host matrices */
//status = hipblasSetVector(n2, sizeof(h_A[0]), h_A, 1, d_A, 1);
status = hipblasSetMatrix(n, n, sizeof(h_A[0]), h_A, n, d_A, n);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (write A)\n");
return EXIT_FAILURE;
}
//status = hipblasSetVector(n2, sizeof(h_B[0]), h_B, 1, d_B, 1);
status = hipblasSetMatrix(n, n, sizeof(h_B[0]), h_B, n, d_B, n);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (write B)\n");
return EXIT_FAILURE;
}
//status = hipblasSetVector(n2, sizeof(h_C[0]), h_C, 1, d_C, 1);
status = hipblasSetMatrix(n, n, sizeof(h_C[0]), h_C, n, d_C, n);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (write C)\n");
return EXIT_FAILURE;
}
/* Performs operation using cublas */
status = hipblasZgemm(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, n, n, n, &d_alpha, d_A,
n, d_B, n, &d_beta, d_C, n);
//status=hipblasZgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, N, N, N, &alpha, d_A, N, d_B, N, &beta, d_C, N);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! kernel execution error.\n");
return EXIT_FAILURE;
}
/* Allocate host memory for reading back the result from device memory */
CPPCTYPE* tmp_h_C = reinterpret_cast<CPPCTYPE *>(malloc(n2 * sizeof(h_C[0])));
if (tmp_h_C == 0) {
fprintf(stderr, "!!!! host memory allocation error (C)\n");
return EXIT_FAILURE;
}
/* Read the result back */
status = hipblasGetMatrix(n, n, sizeof(GTYPE), d_C, n, tmp_h_C, n);
memcpy(h_C, tmp_h_C, sizeof(h_C[0])*n2);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (read C)\n");
return EXIT_FAILURE;
}
if (hipFree(d_A) != hipSuccess) {
fprintf(stderr, "!!!! memory free error (A)\n");
return EXIT_FAILURE;
}
if (hipFree(d_B) != hipSuccess) {
fprintf(stderr, "!!!! memory free error (B)\n");
return EXIT_FAILURE;
}
if (hipFree(d_C) != hipSuccess) {
fprintf(stderr, "!!!! memory free error (C)\n");
return EXIT_FAILURE;
}
/* Shutdown */
status = hipblasDestroy(handle);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! shutdown error (A)\n");
return EXIT_FAILURE;
}
return 0;
}
// C=alpha*A*x+beta*y
// in this wrapper, we assume beta is always zero!
int cublas_zgemv_wrapper(ITYPE n, CPPCTYPE alpha, const CPPCTYPE *h_A, const CPPCTYPE *h_x, CPPCTYPE beta, CPPCTYPE *h_y){
ITYPE n2 = n*n;
hipblasStatus_t status;
hipblasHandle_t handle;
GTYPE *d_A;
GTYPE *d_x;
GTYPE *d_y;
GTYPE d_alpha=make_cuDoubleComplex(alpha.real(), alpha.imag());
GTYPE d_beta=make_cuDoubleComplex(beta.real(), beta.imag());
// int dev = 0; //findCudaDevice(argc, (const char **)argv);
/* Initialize CUBLAS */
printf("simpleCUBLAS test running..\n");
status = hipblasCreate(&handle);
if (status != HIPBLAS_STATUS_SUCCESS){
fprintf(stderr, "!!!! CUBLAS initialization error\n");
return EXIT_FAILURE;
}
/* Allocate device memory for the matrices */
if (hipMalloc(reinterpret_cast<void **>(&d_A), n2 * sizeof(d_A[0])) != hipSuccess) {
fprintf(stderr, "!!!! device memory allocation error (allocate A)\n");
return EXIT_FAILURE;
}
if (hipMalloc(reinterpret_cast<void **>(&d_x), n * sizeof(d_x[0])) != hipSuccess) {
fprintf(stderr, "!!!! device memory allocation error (allocate x)\n");
return EXIT_FAILURE;
}
if (hipMalloc(reinterpret_cast<void **>(&d_y), n * sizeof(d_y[0])) != hipSuccess) {
fprintf(stderr, "!!!! device memory allocation error (allocate y)\n");
return EXIT_FAILURE;
}
/* Initialize the device matrices with the host matrices */
//status = hipblasSetVector(n2, sizeof(h_A[0]), h_A, 1, d_A, 1);
status = hipblasSetMatrix(n, n, sizeof(h_A[0]), h_A, n, d_A, n);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (write A)\n");
return EXIT_FAILURE;
}
status = hipblasSetVector(n, sizeof(h_x[0]), h_x, 1, d_x, 1);
//status = hipblasSetMatrix(n, n, sizeof(h_B[0]), h_B, n, d_B, n);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (write x)\n");
return EXIT_FAILURE;
}
status = hipblasSetVector(n, sizeof(h_y[0]), h_y, 1, d_y, 1);
//status = hipblasSetMatrix(n, n, sizeof(h_C[0]), h_C, n, d_C, n);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (write C)\n");
return EXIT_FAILURE;
}
/* Performs operation using cublas */
status = hipblasZgemv(handle, HIPBLAS_OP_T, n, n, &d_alpha, d_A, n,
d_x, 1, &d_beta, d_y, 1);
/*
hipblasStatus_t hipblasZgemv(hipblasHandle_t handle, hipblasOperation_t trans,
int m, int n,
const hipDoubleComplex *alpha,
const hipDoubleComplex *A, int lda,
const hipDoubleComplex *x, int incx,
const hipDoubleComplex *beta,
hipDoubleComplex *y, int incy)
*/
//status=hipblasZgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, N, N, N, &alpha, d_A, N, d_B, N, &beta, d_C, N);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! kernel execution error.\n");
return EXIT_FAILURE;
}
/* Allocate host memory for reading back the result from device memory */
CPPCTYPE* tmp_h_y = reinterpret_cast<CPPCTYPE *>(malloc(n * sizeof(h_y[0])));
if (tmp_h_y == 0) {
fprintf(stderr, "!!!! host memory allocation error (y)\n");
return EXIT_FAILURE;
}
/* Read the result back */
status = hipblasGetVector(n, sizeof(GTYPE), d_y, 1, tmp_h_y, 1);
/*
hipblasStatus_t hipblasGetVector(int n, int elemSize, const void *x, int incx, void *y, int incy)
*/
memcpy(h_y, tmp_h_y, sizeof(h_y[0])*n);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (read C)\n");
return EXIT_FAILURE;
}
if (hipFree(d_A) != hipSuccess) {
fprintf(stderr, "!!!! memory free error (A)\n");
return EXIT_FAILURE;
}
if (hipFree(d_x) != hipSuccess) {
fprintf(stderr, "!!!! memory free error (x)\n");
return EXIT_FAILURE;
}
if (hipFree(d_y) != hipSuccess) {
fprintf(stderr, "!!!! memory free error (y)\n");
return EXIT_FAILURE;
}
/* Shutdown */
status = hipblasDestroy(handle);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! shutdown error (A)\n");
return EXIT_FAILURE;
}
return 0;
}
// we assume state has already allocated at device
int cublas_zgemv_wrapper(ITYPE n, const CPPCTYPE *h_matrix, GTYPE *d_state){
ITYPE n2 = n*n;
hipblasStatus_t status;
hipblasHandle_t handle;
GTYPE *d_matrix;
GTYPE *d_y; // this will include the answer of the state.
GTYPE d_alpha = make_cuDoubleComplex(1.0, 0.0);
GTYPE d_beta = make_cuDoubleComplex(0.0, 0.0);
// int dev = 0;
/* Initialize CUBLAS */
status = hipblasCreate(&handle);
if (status != HIPBLAS_STATUS_SUCCESS){
fprintf(stderr, "!!!! CUBLAS initialization error\n");
return EXIT_FAILURE;
}
/* Allocate device memory for the matrices */
if (hipMalloc(reinterpret_cast<void **>(&d_matrix), n2 * sizeof(d_matrix[0])) != hipSuccess) {
fprintf(stderr, "!!!! device memory allocation error (allocate A)\n");
return EXIT_FAILURE;
}
if (hipMalloc(reinterpret_cast<void **>(&d_y), n * sizeof(d_y[0])) != hipSuccess) {
fprintf(stderr, "!!!! device memory allocation error (allocate y)\n");
return EXIT_FAILURE;
}
// hipMemset(&d_y, 0, sizeof(d_y[0])*n);
/* Initialize the device matrices with the host matrices */
status = hipblasSetMatrix(n, n, sizeof(h_matrix[0]), h_matrix, n, d_matrix, n);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (write A)\n");
return EXIT_FAILURE;
}
/* Performs operation using cublas */
status = hipblasZgemv(handle, HIPBLAS_OP_T, n, n, &d_alpha, d_matrix, n,
d_state, 1, &d_beta, d_y, 1);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! kernel execution error.\n");
return EXIT_FAILURE;
}
hipMemcpy(d_state, d_y, n * sizeof(GTYPE), hipMemcpyDeviceToDevice);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (read C)\n");
return EXIT_FAILURE;
}
if (hipFree(d_matrix) != hipSuccess) {
fprintf(stderr, "!!!! memory free error (A)\n");
return EXIT_FAILURE;
}
if (hipFree(d_y) != hipSuccess) {
fprintf(stderr, "!!!! memory free error (y)\n");
return EXIT_FAILURE;
}
/* Shutdown */
status = hipblasDestroy(handle);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! shutdown error (A)\n");
return EXIT_FAILURE;
}
return 0;
}
// we assume state and matrix has already allocated at device
int cublas_zgemv_wrapper(ITYPE n, const GTYPE *d_matrix, GTYPE *d_state){
// ITYPE n2 = n*n;
hipblasStatus_t status;
hipblasHandle_t handle;
GTYPE *d_y; // this will include the answer of the state.
GTYPE d_alpha = make_cuDoubleComplex(1.0, 0.0);
GTYPE d_beta = make_cuDoubleComplex(0.0, 0.0);
// int dev = 0;
/* Initialize CUBLAS */
status = hipblasCreate(&handle);
if (status != HIPBLAS_STATUS_SUCCESS){
fprintf(stderr, "!!!! CUBLAS initialization error\n");
return EXIT_FAILURE;
}
if (hipMalloc(reinterpret_cast<void **>(&d_y), n * sizeof(d_y[0])) != hipSuccess) {
fprintf(stderr, "!!!! device memory allocation error (allocate y)\n");
return EXIT_FAILURE;
}
// hipMemset(&d_y, 0, sizeof(d_y[0])*n);
/* Performs operation using cublas */
status = hipblasZgemv(handle, HIPBLAS_OP_T, n, n, &d_alpha, d_matrix, n,
d_state, 1, &d_beta, d_y, 1);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! kernel execution error.\n");
return EXIT_FAILURE;
}
hipMemcpy(d_state, d_y, n * sizeof(GTYPE), hipMemcpyDeviceToDevice);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (read C)\n");
return EXIT_FAILURE;
}
if (hipFree(d_y) != hipSuccess) {
fprintf(stderr, "!!!! memory free error (y)\n");
return EXIT_FAILURE;
}
/* Shutdown */
status = hipblasDestroy(handle);
if (status != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! shutdown error (A)\n");
return EXIT_FAILURE;
}
return 0;
}
| 0461c2baadfa285411a3d19df465a8df2e85aae2.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
//#include "cuda.h"
// for using cublas
#include <cublas_v2.h>
#include <cstdio>
#include <cmath>
#include <cstdlib>
#include <cstring>
#include <complex>
#include <assert.h>
#include <algorithm>
#include <cuComplex.h>
#include "util_type.h"
#include "util.cuh"
#include "util_func.h"
#include "memory_ops.h"
inline __device__ double __shfl_down_double(double var, unsigned int srcLane, int width = 32) {
int2 a = *reinterpret_cast<int2*>(&var);
a.x = __shfl_down_sync(a.x, srcLane, width);
a.y = __shfl_down_sync(a.y, srcLane, width);
return *reinterpret_cast<double*>(&a);
}
inline __device__ int warpReduceSum(int val) {
for (int offset = warpSize / 2; offset > 0; offset /= 2)
val += __shfl_down_sync(0xffffffff, val, offset);
// val += __shfl_down(val, offset);
return val;
}
// __device__ int __popcll ( unsigned long long int x )
inline __device__ int popcount64(ITYPE b) {
return __popcll(b);
/*
b -= (b >> 1) & 0x5555555555555555ULL;
b = ((b >> 2) & 0x3333333333333333ULL) + (b & 0x3333333333333333ULL);
b = ((b >> 4) + b) & 0x0F0F0F0F0F0F0F0FULL;
return (b * 0x0101010101010101ULL) >> 56;
*/
}
//__device__ int __popc ( unsigned int x )
inline __device__ int popcount32(unsigned int b) {
return __popc(b);
/*
unsigned int w = b >> 32;
unsigned int v = b;
v -= (v >> 1) & 0x55555555;
w -= (w >> 1) & 0x55555555;
v = ((v >> 2) & 0x33333333) + (v & 0x33333333);
w = ((w >> 2) & 0x33333333) + (w & 0x33333333);
v = ((v >> 4) + v + (w >> 4) + w) & 0x0F0F0F0F;
return (v * 0x01010101) >> 24;
*/
}
__global__ void deviceReduceWarpAtomicKernel(int *in, int* out, ITYPE N) {
int sum = int(0);
for (ITYPE i = blockIdx.x * blockDim.x + threadIdx.x;
i < N;
i += blockDim.x * gridDim.x) {
sum += in[i];
}
sum = warpReduceSum(sum);
if ((threadIdx.x & (warpSize - 1)) == 0)
atomicAdd(out, sum);
}
__global__ void set_computational_basis_gpu(ITYPE comp_basis, GTYPE* state, ITYPE dim){
ITYPE idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < dim) {
state[idx] = make_cuDoubleComplex(0.0, 0.0);
}
if(idx==comp_basis) state[comp_basis] = make_cuDoubleComplex(1.0, 0.0);
}
__host__ void set_computational_basis_host(ITYPE comp_basis, void* state, ITYPE dim){
cudaError cudaStatus;
GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state);
unsigned int block = dim <= 1024 ? dim : 1024;
unsigned int grid = dim / block;
set_computational_basis_gpu << <grid, block >> >(comp_basis, state_gpu, dim);
checkCudaErrors(cudaDeviceSynchronize(), __FILE__, __LINE__);
cudaStatus = cudaGetLastError();
checkCudaErrors(cudaStatus, __FILE__, __LINE__);
state = reinterpret_cast<void*>(state_gpu);
}
// copy state_gpu to state_gpu_copy
void copy_quantum_state_host(void* state_gpu_copy, const void* state_gpu, ITYPE dim){
const GTYPE* psi_gpu = reinterpret_cast<const GTYPE*>(state_gpu);
GTYPE* psi_gpu_copy = reinterpret_cast<GTYPE*>(state_gpu_copy);
checkCudaErrors(cudaMemcpy(psi_gpu_copy, psi_gpu, dim * sizeof(GTYPE), cudaMemcpyDeviceToDevice));
state_gpu = reinterpret_cast<const void*>(psi_gpu);
state_gpu_copy = reinterpret_cast<void*>(psi_gpu_copy);
}
// copy state_gpu to psi_cpu_copy
void get_quantum_state_host(void* state_gpu, void* psi_cpu_copy, ITYPE dim){
GTYPE* psi_gpu = reinterpret_cast<GTYPE*>(state_gpu);
psi_cpu_copy = reinterpret_cast<CPPCTYPE*>(psi_cpu_copy);
checkCudaErrors(cudaMemcpy(psi_cpu_copy, psi_gpu, dim * sizeof(CPPCTYPE), cudaMemcpyDeviceToHost));
state_gpu = reinterpret_cast<void*>(psi_gpu);
}
void print_quantum_state_host(void* state, ITYPE dim){
GTYPE* state_gpu = reinterpret_cast<GTYPE*>(state);
CPPCTYPE* state_cpu=(CPPCTYPE*)malloc(sizeof(CPPCTYPE)*dim);
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMemcpy(state_cpu, state_gpu, dim * sizeof(CPPCTYPE), cudaMemcpyDeviceToHost));
for(int i=0;i<dim;++i){
std::cout << i << " : " << state_cpu[i].real() << "+i" << state_cpu[i].imag() << '\n';
}
std::cout << '\n';
free(state_cpu);
state = reinterpret_cast<void*>(state);
}
ITYPE insert_zero_to_basis_index_gsim(ITYPE basis_index, unsigned int qubit_index){
ITYPE temp_basis = (basis_index >> qubit_index) << (qubit_index+1);
return temp_basis + (basis_index & ( (1ULL<<qubit_index) -1));
}
void get_Pauli_masks_partial_list_gsim(const UINT* target_qubit_index_list, const UINT* Pauli_operator_type_list, UINT target_qubit_index_count,
ITYPE* bit_flip_mask, ITYPE* phase_flip_mask, UINT* global_phase_90rot_count, UINT* pivot_qubit_index){
(*bit_flip_mask)=0;
(*phase_flip_mask)=0;
(*global_phase_90rot_count)=0;
(*pivot_qubit_index)=0;
for(UINT cursor=0;cursor < target_qubit_index_count; ++cursor){
UINT target_qubit_index = target_qubit_index_list[cursor];
switch(Pauli_operator_type_list[cursor]){
case 0: // I
break;
case 1: // X
(*bit_flip_mask) ^= 1ULL << target_qubit_index;
(*pivot_qubit_index) = target_qubit_index;
break;
case 2: // Y
(*bit_flip_mask) ^= 1ULL << target_qubit_index;
(*phase_flip_mask) ^= 1ULL << target_qubit_index;
(*global_phase_90rot_count) ++;
(*pivot_qubit_index) = target_qubit_index;
break;
case 3: // Z
(*phase_flip_mask) ^= 1ULL << target_qubit_index;
break;
default:
fprintf(stderr,"Invalid Pauli operator ID called");
assert(0);
}
}
}
void get_Pauli_masks_whole_list_gsim(const UINT* Pauli_operator_type_list, UINT target_qubit_index_count,
ITYPE* bit_flip_mask, ITYPE* phase_flip_mask, UINT* global_phase_90rot_count, UINT* pivot_qubit_index){
(*bit_flip_mask)=0;
(*phase_flip_mask)=0;
(*global_phase_90rot_count)=0;
(*pivot_qubit_index)=0;
for(UINT target_qubit_index=0; target_qubit_index < target_qubit_index_count; ++target_qubit_index){
switch(Pauli_operator_type_list[target_qubit_index]){
case 0: // I
break;
case 1: // X
(*bit_flip_mask) ^= 1ULL << target_qubit_index;
(*pivot_qubit_index) = target_qubit_index;
break;
case 2: // Y
(*bit_flip_mask) ^= 1ULL << target_qubit_index;
(*phase_flip_mask) ^= 1ULL << target_qubit_index;
(*global_phase_90rot_count) ++;
(*pivot_qubit_index) = target_qubit_index;
break;
case 3: // Z
(*phase_flip_mask) ^= 1ULL << target_qubit_index;
break;
default:
fprintf(stderr,"Invalid Pauli operator ID called");
assert(0);
}
}
}
ITYPE* create_matrix_mask_list_gsim(const UINT* qubit_index_list, UINT qubit_index_count){
const ITYPE matrix_dim = 1ULL << qubit_index_count;
ITYPE* mask_list = (ITYPE*) calloc((size_t)matrix_dim, sizeof(ITYPE));
ITYPE cursor = 0;
for(cursor=0;cursor < matrix_dim; ++cursor){
for(UINT bit_cursor = 0; bit_cursor < qubit_index_count;++bit_cursor){
if ((cursor >> bit_cursor) & 1) {
UINT bit_index = qubit_index_list[bit_cursor];
mask_list[cursor] ^= (1ULL << bit_index);
}
}
}
return mask_list;
}
ITYPE create_control_mask_gsim(const UINT* qubit_index_list, const UINT* value_list, UINT size) {
ITYPE mask = 0;
for (UINT cursor = 0; cursor < size; ++cursor) {
mask ^= (1ULL << qubit_index_list[cursor]) * value_list[cursor];
}
return mask;
}
UINT* create_sorted_ui_list_gsim(const UINT* array, size_t size){
UINT* new_array = (UINT*)calloc(size,sizeof(UINT));
memcpy(new_array, array, size*sizeof(UINT));
std::sort(new_array, new_array+size);
return new_array;
}
UINT* create_sorted_ui_list_value_gsim(const UINT* array, size_t size, UINT value){
UINT* new_array = (UINT*)calloc(size+1, sizeof(UINT));
memcpy(new_array,array,size*sizeof(UINT));
new_array[size]=value;
std::sort(new_array, new_array+size+1);
return new_array;
}
UINT* create_sorted_ui_list_list_gsim(const UINT* array1, size_t size1, const UINT* array2, size_t size2){
UINT* new_array = (UINT*)calloc(size1+size2, sizeof(UINT));
memcpy(new_array,array1,size1*sizeof(UINT));
memcpy(new_array+size1,array2,size2*sizeof(UINT));
std::sort(new_array, new_array+size1+size2);
return new_array;
}
// C=alpha*A*B+beta*C
// in this wrapper, we assume beta is always zero!
int cublas_zgemm_wrapper(ITYPE n, CPPCTYPE alpha, const CPPCTYPE *h_A, const CPPCTYPE *h_B, CPPCTYPE beta, CPPCTYPE *h_C){
ITYPE n2 = n*n;
cublasStatus_t status;
cublasHandle_t handle;
GTYPE *d_A;// = make_cuDoubleComplex(0.0,0.0);
GTYPE *d_B;// = make_cuDoubleComplex(0,0);
GTYPE *d_C;// = make_cuDoubleComplex(0,0);
GTYPE d_alpha=make_cuDoubleComplex(alpha.real(), alpha.imag());
GTYPE d_beta=make_cuDoubleComplex(beta.real(), beta.imag());
// int dev = 0; //findCudaDevice(argc, (const char **)argv);
/* Initialize CUBLAS */
status = cublasCreate(&handle);
if (status != CUBLAS_STATUS_SUCCESS){
fprintf(stderr, "!!!! CUBLAS initialization error\n");
return EXIT_FAILURE;
}
/* Allocate device memory for the matrices */
if (cudaMalloc(reinterpret_cast<void **>(&d_A), n2 * sizeof(d_A[0])) != cudaSuccess) {
fprintf(stderr, "!!!! device memory allocation error (allocate A)\n");
return EXIT_FAILURE;
}
if (cudaMalloc(reinterpret_cast<void **>(&d_B), n2 * sizeof(d_B[0])) != cudaSuccess) {
fprintf(stderr, "!!!! device memory allocation error (allocate B)\n");
return EXIT_FAILURE;
}
if (cudaMalloc(reinterpret_cast<void **>(&d_C), n2 * sizeof(d_C[0])) != cudaSuccess) {
fprintf(stderr, "!!!! device memory allocation error (allocate C)\n");
return EXIT_FAILURE;
}
/* Initialize the device matrices with the host matrices */
//status = cublasSetVector(n2, sizeof(h_A[0]), h_A, 1, d_A, 1);
status = cublasSetMatrix(n, n, sizeof(h_A[0]), h_A, n, d_A, n);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (write A)\n");
return EXIT_FAILURE;
}
//status = cublasSetVector(n2, sizeof(h_B[0]), h_B, 1, d_B, 1);
status = cublasSetMatrix(n, n, sizeof(h_B[0]), h_B, n, d_B, n);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (write B)\n");
return EXIT_FAILURE;
}
//status = cublasSetVector(n2, sizeof(h_C[0]), h_C, 1, d_C, 1);
status = cublasSetMatrix(n, n, sizeof(h_C[0]), h_C, n, d_C, n);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (write C)\n");
return EXIT_FAILURE;
}
/* Performs operation using cublas */
status = cublasZgemm(handle, CUBLAS_OP_T, CUBLAS_OP_T, n, n, n, &d_alpha, d_A,
n, d_B, n, &d_beta, d_C, n);
//status=cublasZgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, N, N, N, &alpha, d_A, N, d_B, N, &beta, d_C, N);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! kernel execution error.\n");
return EXIT_FAILURE;
}
/* Allocate host memory for reading back the result from device memory */
CPPCTYPE* tmp_h_C = reinterpret_cast<CPPCTYPE *>(malloc(n2 * sizeof(h_C[0])));
if (tmp_h_C == 0) {
fprintf(stderr, "!!!! host memory allocation error (C)\n");
return EXIT_FAILURE;
}
/* Read the result back */
status = cublasGetMatrix(n, n, sizeof(GTYPE), d_C, n, tmp_h_C, n);
memcpy(h_C, tmp_h_C, sizeof(h_C[0])*n2);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (read C)\n");
return EXIT_FAILURE;
}
if (cudaFree(d_A) != cudaSuccess) {
fprintf(stderr, "!!!! memory free error (A)\n");
return EXIT_FAILURE;
}
if (cudaFree(d_B) != cudaSuccess) {
fprintf(stderr, "!!!! memory free error (B)\n");
return EXIT_FAILURE;
}
if (cudaFree(d_C) != cudaSuccess) {
fprintf(stderr, "!!!! memory free error (C)\n");
return EXIT_FAILURE;
}
/* Shutdown */
status = cublasDestroy(handle);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! shutdown error (A)\n");
return EXIT_FAILURE;
}
return 0;
}
// C=alpha*A*x+beta*y
// in this wrapper, we assume beta is always zero!
int cublas_zgemv_wrapper(ITYPE n, CPPCTYPE alpha, const CPPCTYPE *h_A, const CPPCTYPE *h_x, CPPCTYPE beta, CPPCTYPE *h_y){
ITYPE n2 = n*n;
cublasStatus_t status;
cublasHandle_t handle;
GTYPE *d_A;
GTYPE *d_x;
GTYPE *d_y;
GTYPE d_alpha=make_cuDoubleComplex(alpha.real(), alpha.imag());
GTYPE d_beta=make_cuDoubleComplex(beta.real(), beta.imag());
// int dev = 0; //findCudaDevice(argc, (const char **)argv);
/* Initialize CUBLAS */
printf("simpleCUBLAS test running..\n");
status = cublasCreate(&handle);
if (status != CUBLAS_STATUS_SUCCESS){
fprintf(stderr, "!!!! CUBLAS initialization error\n");
return EXIT_FAILURE;
}
/* Allocate device memory for the matrices */
if (cudaMalloc(reinterpret_cast<void **>(&d_A), n2 * sizeof(d_A[0])) != cudaSuccess) {
fprintf(stderr, "!!!! device memory allocation error (allocate A)\n");
return EXIT_FAILURE;
}
if (cudaMalloc(reinterpret_cast<void **>(&d_x), n * sizeof(d_x[0])) != cudaSuccess) {
fprintf(stderr, "!!!! device memory allocation error (allocate x)\n");
return EXIT_FAILURE;
}
if (cudaMalloc(reinterpret_cast<void **>(&d_y), n * sizeof(d_y[0])) != cudaSuccess) {
fprintf(stderr, "!!!! device memory allocation error (allocate y)\n");
return EXIT_FAILURE;
}
/* Initialize the device matrices with the host matrices */
//status = cublasSetVector(n2, sizeof(h_A[0]), h_A, 1, d_A, 1);
status = cublasSetMatrix(n, n, sizeof(h_A[0]), h_A, n, d_A, n);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (write A)\n");
return EXIT_FAILURE;
}
status = cublasSetVector(n, sizeof(h_x[0]), h_x, 1, d_x, 1);
//status = cublasSetMatrix(n, n, sizeof(h_B[0]), h_B, n, d_B, n);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (write x)\n");
return EXIT_FAILURE;
}
status = cublasSetVector(n, sizeof(h_y[0]), h_y, 1, d_y, 1);
//status = cublasSetMatrix(n, n, sizeof(h_C[0]), h_C, n, d_C, n);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (write C)\n");
return EXIT_FAILURE;
}
/* Performs operation using cublas */
status = cublasZgemv(handle, CUBLAS_OP_T, n, n, &d_alpha, d_A, n,
d_x, 1, &d_beta, d_y, 1);
/*
cublasStatus_t cublasZgemv(cublasHandle_t handle, cublasOperation_t trans,
int m, int n,
const cuDoubleComplex *alpha,
const cuDoubleComplex *A, int lda,
const cuDoubleComplex *x, int incx,
const cuDoubleComplex *beta,
cuDoubleComplex *y, int incy)
*/
//status=cublasZgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, N, N, N, &alpha, d_A, N, d_B, N, &beta, d_C, N);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! kernel execution error.\n");
return EXIT_FAILURE;
}
/* Allocate host memory for reading back the result from device memory */
CPPCTYPE* tmp_h_y = reinterpret_cast<CPPCTYPE *>(malloc(n * sizeof(h_y[0])));
if (tmp_h_y == 0) {
fprintf(stderr, "!!!! host memory allocation error (y)\n");
return EXIT_FAILURE;
}
/* Read the result back */
status = cublasGetVector(n, sizeof(GTYPE), d_y, 1, tmp_h_y, 1);
/*
cublasStatus_t cublasGetVector(int n, int elemSize, const void *x, int incx, void *y, int incy)
*/
memcpy(h_y, tmp_h_y, sizeof(h_y[0])*n);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (read C)\n");
return EXIT_FAILURE;
}
if (cudaFree(d_A) != cudaSuccess) {
fprintf(stderr, "!!!! memory free error (A)\n");
return EXIT_FAILURE;
}
if (cudaFree(d_x) != cudaSuccess) {
fprintf(stderr, "!!!! memory free error (x)\n");
return EXIT_FAILURE;
}
if (cudaFree(d_y) != cudaSuccess) {
fprintf(stderr, "!!!! memory free error (y)\n");
return EXIT_FAILURE;
}
/* Shutdown */
status = cublasDestroy(handle);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! shutdown error (A)\n");
return EXIT_FAILURE;
}
return 0;
}
// we assume state has already allocated at device
int cublas_zgemv_wrapper(ITYPE n, const CPPCTYPE *h_matrix, GTYPE *d_state){
ITYPE n2 = n*n;
cublasStatus_t status;
cublasHandle_t handle;
GTYPE *d_matrix;
GTYPE *d_y; // this will include the answer of the state.
GTYPE d_alpha = make_cuDoubleComplex(1.0, 0.0);
GTYPE d_beta = make_cuDoubleComplex(0.0, 0.0);
// int dev = 0;
/* Initialize CUBLAS */
status = cublasCreate(&handle);
if (status != CUBLAS_STATUS_SUCCESS){
fprintf(stderr, "!!!! CUBLAS initialization error\n");
return EXIT_FAILURE;
}
/* Allocate device memory for the matrices */
if (cudaMalloc(reinterpret_cast<void **>(&d_matrix), n2 * sizeof(d_matrix[0])) != cudaSuccess) {
fprintf(stderr, "!!!! device memory allocation error (allocate A)\n");
return EXIT_FAILURE;
}
if (cudaMalloc(reinterpret_cast<void **>(&d_y), n * sizeof(d_y[0])) != cudaSuccess) {
fprintf(stderr, "!!!! device memory allocation error (allocate y)\n");
return EXIT_FAILURE;
}
// cudaMemset(&d_y, 0, sizeof(d_y[0])*n);
/* Initialize the device matrices with the host matrices */
status = cublasSetMatrix(n, n, sizeof(h_matrix[0]), h_matrix, n, d_matrix, n);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (write A)\n");
return EXIT_FAILURE;
}
/* Performs operation using cublas */
status = cublasZgemv(handle, CUBLAS_OP_T, n, n, &d_alpha, d_matrix, n,
d_state, 1, &d_beta, d_y, 1);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! kernel execution error.\n");
return EXIT_FAILURE;
}
cudaMemcpy(d_state, d_y, n * sizeof(GTYPE), cudaMemcpyDeviceToDevice);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (read C)\n");
return EXIT_FAILURE;
}
if (cudaFree(d_matrix) != cudaSuccess) {
fprintf(stderr, "!!!! memory free error (A)\n");
return EXIT_FAILURE;
}
if (cudaFree(d_y) != cudaSuccess) {
fprintf(stderr, "!!!! memory free error (y)\n");
return EXIT_FAILURE;
}
/* Shutdown */
status = cublasDestroy(handle);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! shutdown error (A)\n");
return EXIT_FAILURE;
}
return 0;
}
// we assume state and matrix has already allocated at device
int cublas_zgemv_wrapper(ITYPE n, const GTYPE *d_matrix, GTYPE *d_state){
// ITYPE n2 = n*n;
cublasStatus_t status;
cublasHandle_t handle;
GTYPE *d_y; // this will include the answer of the state.
GTYPE d_alpha = make_cuDoubleComplex(1.0, 0.0);
GTYPE d_beta = make_cuDoubleComplex(0.0, 0.0);
// int dev = 0;
/* Initialize CUBLAS */
status = cublasCreate(&handle);
if (status != CUBLAS_STATUS_SUCCESS){
fprintf(stderr, "!!!! CUBLAS initialization error\n");
return EXIT_FAILURE;
}
if (cudaMalloc(reinterpret_cast<void **>(&d_y), n * sizeof(d_y[0])) != cudaSuccess) {
fprintf(stderr, "!!!! device memory allocation error (allocate y)\n");
return EXIT_FAILURE;
}
// cudaMemset(&d_y, 0, sizeof(d_y[0])*n);
/* Performs operation using cublas */
status = cublasZgemv(handle, CUBLAS_OP_T, n, n, &d_alpha, d_matrix, n,
d_state, 1, &d_beta, d_y, 1);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! kernel execution error.\n");
return EXIT_FAILURE;
}
cudaMemcpy(d_state, d_y, n * sizeof(GTYPE), cudaMemcpyDeviceToDevice);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! device access error (read C)\n");
return EXIT_FAILURE;
}
if (cudaFree(d_y) != cudaSuccess) {
fprintf(stderr, "!!!! memory free error (y)\n");
return EXIT_FAILURE;
}
/* Shutdown */
status = cublasDestroy(handle);
if (status != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "!!!! shutdown error (A)\n");
return EXIT_FAILURE;
}
return 0;
}
|
23f5fdda3252ac06ae045a8895df8a3f50961a90.hip | // !!! This is a file automatically generated by hipify!!!
/// LSU EE 7722 GPU Microarchitecture
//
/// Homework 1 - Spring 2015
//
// Assignment: http://www.ece.lsu.edu/koppel/gp/2015/hw01.pdf
#include <pthread.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include <ctype.h>
#include <time.h>
#include <new>
#include <hip/hip_runtime.h>
#include <assert.h>
#include "util.h"
#define N 16
#define M 16
// Make it easy to switch between float and double for vertex and matrix
// elements.
//
typedef float Elt_Type;
struct App
{
// Number of input and output vectors, that is, the size of the
// input and output arrays.
//
int num_vecs;
Elt_Type matrix[M][N];
// Host pointers to the input and output arrays, and to a CPU-computed
// output array used for checking results.
//
Elt_Type *h_in, *h_out, *h_out_check;
//
// Note: h_in points to an array holding num_vecs N-element vectors,
// and so the total size of h_in is num_vects * N elements.
// GPU pointers to the input and output arrays.
//
Elt_Type *d_in, *d_out;
// GPU pointers to the input and output arrays, cast to float4s.
//
float4 *d_in_f4, *d_out_f4;
//
// The compiler can emit more efficient load and store instructions
// to float4 elements than to four consecutive floats.
//
// Note: These "_f4" pointers only work when Elt_Type is a float.
};
// In host address space.
App app;
// In device constant address space.
__constant__ App d_app;
typedef void (*KPtr)(Elt_Type *dout, const Elt_Type *din);
extern "C" __global__ void
mxv_g_only(Elt_Type* __restrict__ dout, const Elt_Type* __restrict__ din)
{
// No local memory.
//
// In the inner loop use global memory accesses to access the input
// vector elements. Hope that the compiler recognizes the repeated
// accesses and so keeps each input vector element in a register
// rather than reading global memory M times per input element.
//
// The compiler will avoid the repeated reads if it is convinced
// that the input and output arrays don't overlap. For the NVIDIA
// compiler (CUDA 7.0) that seems to require declaring the array
// pointers with the __restrict__ attributes as kernel arguments.
//
// Note that dout and d_app.d_out hold the same address, as do din
// and d_app.d_in.
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
const int num_threads = blockDim.x * gridDim.x;
const int start = tid; // First vector number computed by this thread.
const int stop = d_app.num_vecs;
const int inc = num_threads;
for ( int h=start; h<stop; h += inc )
// Operate on vector number h.
for ( int r=0; r<M; r++ )
{
Elt_Type elt = 0;
for ( int c=0; c<N; c++ ) elt += d_app.matrix[r][c] * din[ h * N + c ];
dout[ h * M + r ] = elt;
}
}
extern "C" __global__ void
mxv_i_lbuf()
{
// Local memory for input vector.
//
// Use local memory to buffer entire input vector, and write
// each output vector element as soon as it is computed.
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
const int num_threads = blockDim.x * gridDim.x;
const int start = tid; // First vector number computed by this thread.
const int stop = d_app.num_vecs;
const int inc = num_threads;
for ( int h=start; h<stop; h += inc )
{
Elt_Type vin[N];
for ( int c=0; c<N; c++ ) vin[c] = d_app.d_in[ h * N + c ];
for ( int r=0; r<M; r++ )
{
Elt_Type elt = 0;
for ( int c=0; c<N; c++ ) elt += d_app.matrix[r][c] * vin[c];
d_app.d_out[ h * M + r ] = elt;
}
}
}
extern "C" __global__ void
mxv_o_lbuf()
{
// Local memory for output vector.
//
// Use local memory to buffer entire output vector. Right after each
// input vector element is read use it to partially compute each
// of the M output vector elements.
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
const int num_threads = blockDim.x * gridDim.x;
const int start = tid; // First vector number computed by this thread.
const int stop = d_app.num_vecs;
const int inc = num_threads;
for ( int h=start; h<stop; h += inc )
{
// Operate on vector number h.
Elt_Type vout[M];
for ( int r=0; r<M; r++ ) vout[r] = 0;
for ( int c=0; c<N; c++ )
{
const Elt_Type vin = d_app.d_in[ h * N + c ];
for ( int r=0; r<M; r++ ) vout[r] += d_app.matrix[r][c] * vin;
}
for ( int r=0; r<M; r++ ) d_app.d_out[ h * M + r ] = vout[ r ];
}
}
extern "C" __global__ void
mxv_o_per_thd()
{
// Assign one vector to M threads, each thread computes one element.
//
// This arrangement avoids the need for any local memory buffering,
// results in efficient global memory writes. Global memory reads
// are still inefficient.
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
const int num_threads = blockDim.x * gridDim.x;
const int start = tid / M; // First vector number computed by this thread.
const int r = tid % M; // Vector element computed by this thread.
const int stop = d_app.num_vecs;
const int inc = num_threads / M;
for ( int h=start; h<stop; h += inc )
{
// Operate on vector number h, compute output vector element r.
Elt_Type vout = 0;
for ( int c=0; c<N; c++ )
vout += d_app.matrix[r][c] * d_app.d_in[ h * N + c ];
d_app.d_out[ h * M + r ] = vout;
}
}
extern "C" __global__ void
mxv_sh()
{
// Local memory for output vector, use shared memory to redistribute
// input- and output-vector elements so that global memory reads and
// writes are fully utilized.
// Chunk Size: Number of vector elements to redistribute at a time.
const int CS = 8;
const int num_threads = blockDim.x * gridDim.x;
// The vector number operated on by threadIdx.x 0 in the first iteration.
//
const int bl_start = blockIdx.x * blockDim.x;
const int stop = d_app.num_vecs;
const int inc = num_threads;
// Used to compute the vector element number to read or write from
// global memory.
//
const int thd_x_offset = threadIdx.x % CS;
// Used to compute the vector number to read or write from global memory.
//
const int thd_x_idx_st = threadIdx.x / CS;
const int64_t BLOCK_SIZE = blockDim.x;
const int64_t MAX_BLOCK_SIZE = 1024;
// Shared memory used to redistribute vector elements.
//
__shared__ Elt_Type vxfer[MAX_BLOCK_SIZE][CS + 1];
for ( int hb = bl_start; hb<stop; hb += inc )
{
// Compute matrix-vector product for vector number hb + threadIdx.x.
Elt_Type vout[M];
for ( int r=0; r<M; r++ ) vout[r] = 0;
for ( int c=0; c<N; c += CS )
{
// Read and redistribute input vector elements c, c+1,
// ... c+CS-1, then use those to compute part of the
// matrix-vector product.
// Read input vector elements sequentially and place them in
// shared memory.
//
// The entire g loop reads CS elements of each thread's
// input vector. Each iteration of the g loop reads CS
// elements from BLOCK_SIZE / CS vectors.
//
__syncthreads();
for ( int g=0; g<CS; g++ )
{
const int idx = g * BLOCK_SIZE / CS + thd_x_idx_st;
vxfer[idx][thd_x_offset] =
d_app.d_in[ hb * N + idx * N + c + thd_x_offset ];
}
// Copy the portion of the input vector just read to local
// memory (the vin array). We expect that the compiler will
// use registers for all values of vin.
//
__syncthreads();
Elt_Type vin[CS];
for ( int cc=0; cc<CS; cc++ ) vin[cc] = vxfer[threadIdx.x][cc];
// Perform the matrix-vector multiplication for the parts of
// the input vector just read.
//
for ( int r=0; r<M; r++ )
for ( int cc=0; cc<CS; cc++ )
if ( c+cc < N ) vout[r] += d_app.matrix[r][c+cc] * vin[cc];
}
// Use shared memory to redistribute the output vector elements to
// threads so that the write to global memory will be efficient.
//
for ( int r=0; r<M; r += CS )
{
__syncthreads();
for ( int rr=0; rr<CS; rr++ ) vxfer[threadIdx.x][rr] = vout[r+rr];
__syncthreads();
for ( int g=0; g<CS; g++ )
{
const int idx = g * BLOCK_SIZE / CS + thd_x_idx_st;
// The if statement is needed of M is not a multiple of CS.
if ( thd_x_offset + r < M )
d_app.d_out[ hb * M + idx * M + r + thd_x_offset ] =
vxfer[idx][thd_x_offset];
}
}
}
}
extern "C" __global__ void
mxv_sh_ochunk()
{
// Local memory for output vector, use shared memory to redistribute
// input- and output-vector elements so that global memory reads and
// writes are fully utilized.
// Chunk Size: Number of vector elements to redistribute at a time.
const int CS = 8;
const int num_threads = blockDim.x * gridDim.x;
// The vector number operated on by threadIdx.x 0 in the first iteration.
//
const int bl_start = blockIdx.x * blockDim.x;
const int stop = d_app.num_vecs;
const int inc = num_threads;
// Used to compute the vector element number to read or write from
// global memory.
//
const int thd_x_offset = threadIdx.x % CS;
// Used to compute the vector number to read or write from global memory.
//
const int thd_x_idx_st = threadIdx.x / CS;
const int64_t BLOCK_SIZE = blockDim.x;
const int64_t MAX_BLOCK_SIZE = 1024;
// Shared memory used to redistribute vector elements.
//
__shared__ Elt_Type vxfer[MAX_BLOCK_SIZE][CS + 1];
for ( int hb = bl_start; hb<stop; hb += inc )
{
// Compute matrix-vector product for vector number hb + threadIdx.x.
Elt_Type vout[M];
for ( int r=0; r<M; r++ ) vout[r] = 0;
for ( int c=0; c<N; c += CS )
{
// Read and redistribute input vector elements c, c+1,
// ... c+CS-1, then use those to compute part of the
// matrix-vector product.
// Read input vector elements sequentially and place them in
// shared memory.
//
// The entire g loop reads CS elements of each thread's
// input vector. Each iteration of the g loop reads CS
// elements from BLOCK_SIZE / CS vectors.
//
__syncthreads();
for ( int g=0; g<CS; g++ )
{
const int idx = g * BLOCK_SIZE / CS + thd_x_idx_st;
vxfer[idx][thd_x_offset] =
d_app.d_in[ hb * N + idx * N + c + thd_x_offset ];
}
// Copy the portion of the input vector just read to local
// memory (the vin array). We expect that the compiler will
// use registers for all values of vin.
//
__syncthreads();
Elt_Type vin[CS];
for ( int cc=0; cc<CS; cc++ ) vin[cc] = vxfer[threadIdx.x][cc];
// Perform the matrix-vector multiplication for the parts of
// the input vector just read.
//
for ( int r=0; r<M; r++ )
for ( int cc=0; cc<CS; cc++ )
if ( c+cc < N ) vout[r] += d_app.matrix[r][c+cc] * vin[cc];
}
// Use shared memory to redistribute the output vector elements to
// threads so that the write to global memory will be efficient.
//
for ( int r=0; r<M; r += CS )
{
__syncthreads();
for ( int rr=0; rr<CS; rr++ ) vxfer[threadIdx.x][rr] = vout[r+rr];
__syncthreads();
for ( int g=0; g<CS; g++ )
{
const int idx = g * BLOCK_SIZE / CS + thd_x_idx_st;
// The if statement is needed of M is not a multiple of CS.
if ( thd_x_offset + r < M )
d_app.d_out[ hb * M + idx * M + r + thd_x_offset ] =
vxfer[idx][thd_x_offset];
}
}
}
}
GPU_Info
print_gpu_and_kernel_info()
{
GPU_Info info;
print_gpu_info();
// Choose GPU 0 because it's usually the better choice.
//
int dev = 0;
CE(hipSetDevice(dev));
printf("Using GPU %d\n",dev);
info.get_gpu_info(dev);
info.GET_INFO(mxv_g_only);
info.GET_INFO(mxv_i_lbuf);
info.GET_INFO(mxv_o_lbuf);
info.GET_INFO(mxv_o_per_thd);
info.GET_INFO(mxv_sh);
info.GET_INFO(mxv_sh_ochunk);
// Print information about kernel.
//
printf("\nCUDA Kernel Resource Usage:\n");
for ( int i=0; i<info.num_kernels; i++ )
{
printf("For %s:\n", info.ki[i].name);
printf(" %6zd shared, %zd const, %zd loc, %d regs; "
"%d max threads per block.\n",
info.ki[i].cfa.sharedSizeBytes,
info.ki[i].cfa.constSizeBytes,
info.ki[i].cfa.localSizeBytes,
info.ki[i].cfa.numRegs,
info.ki[i].cfa.maxThreadsPerBlock);
}
return info;
}
int
main(int argc, char **argv)
{
const bool debug = false;
// Get info about GPU and each kernel.
//
GPU_Info info = print_gpu_and_kernel_info();
// Examine argument 1, block count, default is number of MPs.
//
const int arg1_int =
argc < 2 ? info.cuda_prop.multiProcessorCount : atoi(argv[1]);
const int num_blocks =
arg1_int == 0 ? info.cuda_prop.multiProcessorCount : abs(arg1_int);
// Examine argument 2, number of threads per block.
//
const int thd_per_block_arg = argc < 3 ? 1024 : atoi(argv[2]);
const int thd_per_block_goal =
thd_per_block_arg == 0 ? 1024 : thd_per_block_arg;
const int num_threads = num_blocks * thd_per_block_goal;
const bool vary_warps = thd_per_block_arg == 0;
// Examine argument 3, size of array in MiB. Fractional values okay.
//
app.num_vecs = argc < 4 ? 1 << 20 : int( atof(argv[3]) * (1<<20) );
if ( num_threads <= 0 || app.num_vecs <= 0 )
{
printf("Usage: %s [ NUM_CUDA_BLOCKS ] [THD_PER_BLOCK] "
"[DATA_SIZE_MiB]\n",
argv[0]);
exit(1);
}
const int in_size_elts = app.num_vecs * N;
const int in_size_bytes = in_size_elts * sizeof( app.h_in[0] );
const int out_size_elts = app.num_vecs * M;
const int out_size_bytes = out_size_elts * sizeof( app.h_out[0] );
const int overrun_size_elts = thd_per_block_goal * max(N,M);
const int overrun_size_bytes = overrun_size_elts * sizeof( app.h_out[0] );
// Allocate storage for CPU copy of data.
//
app.h_in = new Elt_Type[ in_size_elts ];
app.h_out = new Elt_Type[ out_size_elts ];
app.h_out_check = new Elt_Type[ out_size_elts ];
// Allocate storage for GPU copy of data.
//
CE( hipMalloc( &app.d_in, in_size_bytes + overrun_size_bytes ) );
app.d_in_f4 = (float4*) app.d_in;
CE( hipMalloc( &app.d_out, out_size_bytes + overrun_size_bytes ) );
app.d_out_f4 = (float4*) app.d_out;
printf("Matrix size: %d x %d. Vectors: %d. %d blocks of %d thds.\n",
N, M, app.num_vecs, num_blocks, thd_per_block_goal);
// Initialize input array.
//
for ( int i=0; i<app.num_vecs; i++ )
for ( int c=0; c<N; c++ )
app.h_in[ i * N + c ] = debug ? Elt_Type(c) : drand48();
// Initialize transformation matrix.
//
for ( int r=0; r<M; r++ )
for ( int c=0; c<N; c++ )
app.matrix[r][c] = debug ? r == c : drand48();
// Compute correct answer.
//
for ( int i=0; i<app.num_vecs; i++ )
for ( int r=0; r<M; r++ )
{
app.h_out_check[ i * M + r ] = 0;
for ( int c=0; c<N; c++ )
app.h_out_check[ i * M + r ] +=
app.h_in[ i * N + c ] * app.matrix[r][c];
}
const int64_t num_ops = int64_t(M) * N * app.num_vecs; // Multiply-adds.
// Amount of data in and out of GPU chip.
const int amt_data_bytes = in_size_bytes + out_size_bytes;
double elapsed_time_s = 86400; // Reassigned to minimum run time.
{
// Prepare events used for timing.
//
hipEvent_t gpu_start_ce, gpu_stop_ce;
CE(hipEventCreate(&gpu_start_ce));
CE(hipEventCreate(&gpu_stop_ce));
// Copy input array from CPU to GPU.
//
CE( hipMemcpy
( app.d_in, app.h_in, in_size_bytes, hipMemcpyHostToDevice ) );
// Copy App structure to GPU.
//
CE( hipMemcpyToSymbol
( d_app, &app, sizeof(app), 0, hipMemcpyHostToDevice ) );
// Launch kernel multiple times and keep track of the best time.
printf("Launching with %d blocks of up to %d threads. \n",
num_blocks, thd_per_block_goal);
for ( int kernel = 0; kernel < info.num_kernels; kernel++ )
{
hipFuncAttributes& cfa = info.ki[kernel].cfa;
const int wp_limit = cfa.maxThreadsPerBlock >> 5;
const int thd_limit = wp_limit << 5;
const int thd_per_block_no_vary = min(thd_per_block_goal,thd_limit);
const int wp_start = 4;
const int wp_stop = vary_warps ? wp_limit : wp_start;
const int wp_inc = 4;
for ( int wp_cnt = wp_start; wp_cnt <= wp_stop; wp_cnt += wp_inc )
{
const int thd_per_block =
vary_warps ? wp_cnt << 5 : thd_per_block_no_vary;
// Zero the output array.
//
CE(hipMemset(app.d_out,0,out_size_bytes));
// Measure execution time starting "now", which is after data
// set to GPU.
//
CE(hipEventRecord(gpu_start_ce,0));
// Launch Kernel
//
KPtr(info.ki[kernel]hipLaunchKernelGGL((.func_ptr)), dim3(num_blocks),dim3(thd_per_block), 0, 0,
app.d_out,app.d_in);
// Stop measuring execution time now, which is before is data
// returned from GPU.
//
CE(hipEventRecord(gpu_stop_ce,0));
CE(hipEventSynchronize(gpu_stop_ce));
float cuda_time_ms = -1.1;
CE(hipEventElapsedTime(&cuda_time_ms,gpu_start_ce,gpu_stop_ce));
const double this_elapsed_time_s = cuda_time_ms * 0.001;
const double thpt_compute_gflops =
num_ops / this_elapsed_time_s * 1e-9;
const double thpt_data_gbps =
amt_data_bytes / this_elapsed_time_s * 1e-9;
if ( vary_warps )
{
const char* const stars = "********************************************************************************";
const int stars_len = 80;
const double comp_frac =
4e9 * thpt_compute_gflops / info.chip_sp_flops;
const int max_st_len = 52;
if ( wp_cnt == wp_start )
printf("Kernel %s:\n", info.ki[kernel].name);
printf("%2d wp %6.0f s %5.0f GF %5.0f GB/s %s\n",
(thd_per_block + 31 ) >> 5,
this_elapsed_time_s * 1e6,
thpt_compute_gflops, thpt_data_gbps,
&stars[stars_len-int(comp_frac*max_st_len)]
);
} else {
printf("K %-15s %2d wp %11.3f s %8.3f GFLOPS %8.3f GB/s\n",
info.ki[kernel].name,
(thd_per_block + 31 ) >> 5,
this_elapsed_time_s * 1e6,
thpt_compute_gflops, thpt_data_gbps);
}
elapsed_time_s = min(this_elapsed_time_s,elapsed_time_s);
// Copy output array from GPU to CPU.
//
CE( hipMemcpy
( app.h_out, app.d_out, out_size_bytes, hipMemcpyDeviceToHost) );
int err_count = 0;
for ( int i=0; i<app.num_vecs; i++ )
for ( int r=0; r<M; r++ )
{
const int idx = i * M + r;
if ( fabs( app.h_out_check[idx] - app.h_out[idx] ) > 1e-5 )
{
err_count++;
if ( err_count < 5 )
printf("Error at vec %d elt %d: %.7f != %.7f (correct)\n",
i, r, app.h_out[idx], app.h_out_check[idx] );
}
}
if ( err_count )
printf("Total errors %d\n", err_count);
}
}
}
}
| 23f5fdda3252ac06ae045a8895df8a3f50961a90.cu | /// LSU EE 7722 GPU Microarchitecture
//
/// Homework 1 - Spring 2015
//
// Assignment: http://www.ece.lsu.edu/koppel/gp/2015/hw01.pdf
#include <pthread.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include <ctype.h>
#include <time.h>
#include <new>
#include <cuda_runtime.h>
#include <assert.h>
#include "util.h"
#define N 16
#define M 16
// Make it easy to switch between float and double for vertex and matrix
// elements.
//
typedef float Elt_Type;
struct App
{
// Number of input and output vectors, that is, the size of the
// input and output arrays.
//
int num_vecs;
Elt_Type matrix[M][N];
// Host pointers to the input and output arrays, and to a CPU-computed
// output array used for checking results.
//
Elt_Type *h_in, *h_out, *h_out_check;
//
// Note: h_in points to an array holding num_vecs N-element vectors,
// and so the total size of h_in is num_vects * N elements.
// GPU pointers to the input and output arrays.
//
Elt_Type *d_in, *d_out;
// GPU pointers to the input and output arrays, cast to float4s.
//
float4 *d_in_f4, *d_out_f4;
//
// The compiler can emit more efficient load and store instructions
// to float4 elements than to four consecutive floats.
//
// Note: These "_f4" pointers only work when Elt_Type is a float.
};
// In host address space.
App app;
// In device constant address space.
__constant__ App d_app;
typedef void (*KPtr)(Elt_Type *dout, const Elt_Type *din);
extern "C" __global__ void
mxv_g_only(Elt_Type* __restrict__ dout, const Elt_Type* __restrict__ din)
{
// No local memory.
//
// In the inner loop use global memory accesses to access the input
// vector elements. Hope that the compiler recognizes the repeated
// accesses and so keeps each input vector element in a register
// rather than reading global memory M times per input element.
//
// The compiler will avoid the repeated reads if it is convinced
// that the input and output arrays don't overlap. For the NVIDIA
// compiler (CUDA 7.0) that seems to require declaring the array
// pointers with the __restrict__ attributes as kernel arguments.
//
// Note that dout and d_app.d_out hold the same address, as do din
// and d_app.d_in.
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
const int num_threads = blockDim.x * gridDim.x;
const int start = tid; // First vector number computed by this thread.
const int stop = d_app.num_vecs;
const int inc = num_threads;
for ( int h=start; h<stop; h += inc )
// Operate on vector number h.
for ( int r=0; r<M; r++ )
{
Elt_Type elt = 0;
for ( int c=0; c<N; c++ ) elt += d_app.matrix[r][c] * din[ h * N + c ];
dout[ h * M + r ] = elt;
}
}
extern "C" __global__ void
mxv_i_lbuf()
{
// Local memory for input vector.
//
// Use local memory to buffer entire input vector, and write
// each output vector element as soon as it is computed.
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
const int num_threads = blockDim.x * gridDim.x;
const int start = tid; // First vector number computed by this thread.
const int stop = d_app.num_vecs;
const int inc = num_threads;
for ( int h=start; h<stop; h += inc )
{
Elt_Type vin[N];
for ( int c=0; c<N; c++ ) vin[c] = d_app.d_in[ h * N + c ];
for ( int r=0; r<M; r++ )
{
Elt_Type elt = 0;
for ( int c=0; c<N; c++ ) elt += d_app.matrix[r][c] * vin[c];
d_app.d_out[ h * M + r ] = elt;
}
}
}
extern "C" __global__ void
mxv_o_lbuf()
{
// Local memory for output vector.
//
// Use local memory to buffer entire output vector. Right after each
// input vector element is read use it to partially compute each
// of the M output vector elements.
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
const int num_threads = blockDim.x * gridDim.x;
const int start = tid; // First vector number computed by this thread.
const int stop = d_app.num_vecs;
const int inc = num_threads;
for ( int h=start; h<stop; h += inc )
{
// Operate on vector number h.
Elt_Type vout[M];
for ( int r=0; r<M; r++ ) vout[r] = 0;
for ( int c=0; c<N; c++ )
{
const Elt_Type vin = d_app.d_in[ h * N + c ];
for ( int r=0; r<M; r++ ) vout[r] += d_app.matrix[r][c] * vin;
}
for ( int r=0; r<M; r++ ) d_app.d_out[ h * M + r ] = vout[ r ];
}
}
extern "C" __global__ void
mxv_o_per_thd()
{
// Assign one vector to M threads, each thread computes one element.
//
// This arrangement avoids the need for any local memory buffering,
// results in efficient global memory writes. Global memory reads
// are still inefficient.
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
const int num_threads = blockDim.x * gridDim.x;
const int start = tid / M; // First vector number computed by this thread.
const int r = tid % M; // Vector element computed by this thread.
const int stop = d_app.num_vecs;
const int inc = num_threads / M;
for ( int h=start; h<stop; h += inc )
{
// Operate on vector number h, compute output vector element r.
Elt_Type vout = 0;
for ( int c=0; c<N; c++ )
vout += d_app.matrix[r][c] * d_app.d_in[ h * N + c ];
d_app.d_out[ h * M + r ] = vout;
}
}
extern "C" __global__ void
mxv_sh()
{
// Local memory for output vector, use shared memory to redistribute
// input- and output-vector elements so that global memory reads and
// writes are fully utilized.
// Chunk Size: Number of vector elements to redistribute at a time.
const int CS = 8;
const int num_threads = blockDim.x * gridDim.x;
// The vector number operated on by threadIdx.x 0 in the first iteration.
//
const int bl_start = blockIdx.x * blockDim.x;
const int stop = d_app.num_vecs;
const int inc = num_threads;
// Used to compute the vector element number to read or write from
// global memory.
//
const int thd_x_offset = threadIdx.x % CS;
// Used to compute the vector number to read or write from global memory.
//
const int thd_x_idx_st = threadIdx.x / CS;
const int64_t BLOCK_SIZE = blockDim.x;
const int64_t MAX_BLOCK_SIZE = 1024;
// Shared memory used to redistribute vector elements.
//
__shared__ Elt_Type vxfer[MAX_BLOCK_SIZE][CS + 1];
for ( int hb = bl_start; hb<stop; hb += inc )
{
// Compute matrix-vector product for vector number hb + threadIdx.x.
Elt_Type vout[M];
for ( int r=0; r<M; r++ ) vout[r] = 0;
for ( int c=0; c<N; c += CS )
{
// Read and redistribute input vector elements c, c+1,
// ... c+CS-1, then use those to compute part of the
// matrix-vector product.
// Read input vector elements sequentially and place them in
// shared memory.
//
// The entire g loop reads CS elements of each thread's
// input vector. Each iteration of the g loop reads CS
// elements from BLOCK_SIZE / CS vectors.
//
__syncthreads();
for ( int g=0; g<CS; g++ )
{
const int idx = g * BLOCK_SIZE / CS + thd_x_idx_st;
vxfer[idx][thd_x_offset] =
d_app.d_in[ hb * N + idx * N + c + thd_x_offset ];
}
// Copy the portion of the input vector just read to local
// memory (the vin array). We expect that the compiler will
// use registers for all values of vin.
//
__syncthreads();
Elt_Type vin[CS];
for ( int cc=0; cc<CS; cc++ ) vin[cc] = vxfer[threadIdx.x][cc];
// Perform the matrix-vector multiplication for the parts of
// the input vector just read.
//
for ( int r=0; r<M; r++ )
for ( int cc=0; cc<CS; cc++ )
if ( c+cc < N ) vout[r] += d_app.matrix[r][c+cc] * vin[cc];
}
// Use shared memory to redistribute the output vector elements to
// threads so that the write to global memory will be efficient.
//
for ( int r=0; r<M; r += CS )
{
__syncthreads();
for ( int rr=0; rr<CS; rr++ ) vxfer[threadIdx.x][rr] = vout[r+rr];
__syncthreads();
for ( int g=0; g<CS; g++ )
{
const int idx = g * BLOCK_SIZE / CS + thd_x_idx_st;
// The if statement is needed of M is not a multiple of CS.
if ( thd_x_offset + r < M )
d_app.d_out[ hb * M + idx * M + r + thd_x_offset ] =
vxfer[idx][thd_x_offset];
}
}
}
}
extern "C" __global__ void
mxv_sh_ochunk()
{
// Local memory for output vector, use shared memory to redistribute
// input- and output-vector elements so that global memory reads and
// writes are fully utilized.
// Chunk Size: Number of vector elements to redistribute at a time.
const int CS = 8;
const int num_threads = blockDim.x * gridDim.x;
// The vector number operated on by threadIdx.x 0 in the first iteration.
//
const int bl_start = blockIdx.x * blockDim.x;
const int stop = d_app.num_vecs;
const int inc = num_threads;
// Used to compute the vector element number to read or write from
// global memory.
//
const int thd_x_offset = threadIdx.x % CS;
// Used to compute the vector number to read or write from global memory.
//
const int thd_x_idx_st = threadIdx.x / CS;
const int64_t BLOCK_SIZE = blockDim.x;
const int64_t MAX_BLOCK_SIZE = 1024;
// Shared memory used to redistribute vector elements.
//
__shared__ Elt_Type vxfer[MAX_BLOCK_SIZE][CS + 1];
for ( int hb = bl_start; hb<stop; hb += inc )
{
// Compute matrix-vector product for vector number hb + threadIdx.x.
Elt_Type vout[M];
for ( int r=0; r<M; r++ ) vout[r] = 0;
for ( int c=0; c<N; c += CS )
{
// Read and redistribute input vector elements c, c+1,
// ... c+CS-1, then use those to compute part of the
// matrix-vector product.
// Read input vector elements sequentially and place them in
// shared memory.
//
// The entire g loop reads CS elements of each thread's
// input vector. Each iteration of the g loop reads CS
// elements from BLOCK_SIZE / CS vectors.
//
__syncthreads();
for ( int g=0; g<CS; g++ )
{
const int idx = g * BLOCK_SIZE / CS + thd_x_idx_st;
vxfer[idx][thd_x_offset] =
d_app.d_in[ hb * N + idx * N + c + thd_x_offset ];
}
// Copy the portion of the input vector just read to local
// memory (the vin array). We expect that the compiler will
// use registers for all values of vin.
//
__syncthreads();
Elt_Type vin[CS];
for ( int cc=0; cc<CS; cc++ ) vin[cc] = vxfer[threadIdx.x][cc];
// Perform the matrix-vector multiplication for the parts of
// the input vector just read.
//
for ( int r=0; r<M; r++ )
for ( int cc=0; cc<CS; cc++ )
if ( c+cc < N ) vout[r] += d_app.matrix[r][c+cc] * vin[cc];
}
// Use shared memory to redistribute the output vector elements to
// threads so that the write to global memory will be efficient.
//
for ( int r=0; r<M; r += CS )
{
__syncthreads();
for ( int rr=0; rr<CS; rr++ ) vxfer[threadIdx.x][rr] = vout[r+rr];
__syncthreads();
for ( int g=0; g<CS; g++ )
{
const int idx = g * BLOCK_SIZE / CS + thd_x_idx_st;
// The if statement is needed of M is not a multiple of CS.
if ( thd_x_offset + r < M )
d_app.d_out[ hb * M + idx * M + r + thd_x_offset ] =
vxfer[idx][thd_x_offset];
}
}
}
}
GPU_Info
print_gpu_and_kernel_info()
{
GPU_Info info;
print_gpu_info();
// Choose GPU 0 because it's usually the better choice.
//
int dev = 0;
CE(cudaSetDevice(dev));
printf("Using GPU %d\n",dev);
info.get_gpu_info(dev);
info.GET_INFO(mxv_g_only);
info.GET_INFO(mxv_i_lbuf);
info.GET_INFO(mxv_o_lbuf);
info.GET_INFO(mxv_o_per_thd);
info.GET_INFO(mxv_sh);
info.GET_INFO(mxv_sh_ochunk);
// Print information about kernel.
//
printf("\nCUDA Kernel Resource Usage:\n");
for ( int i=0; i<info.num_kernels; i++ )
{
printf("For %s:\n", info.ki[i].name);
printf(" %6zd shared, %zd const, %zd loc, %d regs; "
"%d max threads per block.\n",
info.ki[i].cfa.sharedSizeBytes,
info.ki[i].cfa.constSizeBytes,
info.ki[i].cfa.localSizeBytes,
info.ki[i].cfa.numRegs,
info.ki[i].cfa.maxThreadsPerBlock);
}
return info;
}
int
main(int argc, char **argv)
{
const bool debug = false;
// Get info about GPU and each kernel.
//
GPU_Info info = print_gpu_and_kernel_info();
// Examine argument 1, block count, default is number of MPs.
//
const int arg1_int =
argc < 2 ? info.cuda_prop.multiProcessorCount : atoi(argv[1]);
const int num_blocks =
arg1_int == 0 ? info.cuda_prop.multiProcessorCount : abs(arg1_int);
// Examine argument 2, number of threads per block.
//
const int thd_per_block_arg = argc < 3 ? 1024 : atoi(argv[2]);
const int thd_per_block_goal =
thd_per_block_arg == 0 ? 1024 : thd_per_block_arg;
const int num_threads = num_blocks * thd_per_block_goal;
const bool vary_warps = thd_per_block_arg == 0;
// Examine argument 3, size of array in MiB. Fractional values okay.
//
app.num_vecs = argc < 4 ? 1 << 20 : int( atof(argv[3]) * (1<<20) );
if ( num_threads <= 0 || app.num_vecs <= 0 )
{
printf("Usage: %s [ NUM_CUDA_BLOCKS ] [THD_PER_BLOCK] "
"[DATA_SIZE_MiB]\n",
argv[0]);
exit(1);
}
const int in_size_elts = app.num_vecs * N;
const int in_size_bytes = in_size_elts * sizeof( app.h_in[0] );
const int out_size_elts = app.num_vecs * M;
const int out_size_bytes = out_size_elts * sizeof( app.h_out[0] );
const int overrun_size_elts = thd_per_block_goal * max(N,M);
const int overrun_size_bytes = overrun_size_elts * sizeof( app.h_out[0] );
// Allocate storage for CPU copy of data.
//
app.h_in = new Elt_Type[ in_size_elts ];
app.h_out = new Elt_Type[ out_size_elts ];
app.h_out_check = new Elt_Type[ out_size_elts ];
// Allocate storage for GPU copy of data.
//
CE( cudaMalloc( &app.d_in, in_size_bytes + overrun_size_bytes ) );
app.d_in_f4 = (float4*) app.d_in;
CE( cudaMalloc( &app.d_out, out_size_bytes + overrun_size_bytes ) );
app.d_out_f4 = (float4*) app.d_out;
printf("Matrix size: %d x %d. Vectors: %d. %d blocks of %d thds.\n",
N, M, app.num_vecs, num_blocks, thd_per_block_goal);
// Initialize input array.
//
for ( int i=0; i<app.num_vecs; i++ )
for ( int c=0; c<N; c++ )
app.h_in[ i * N + c ] = debug ? Elt_Type(c) : drand48();
// Initialize transformation matrix.
//
for ( int r=0; r<M; r++ )
for ( int c=0; c<N; c++ )
app.matrix[r][c] = debug ? r == c : drand48();
// Compute correct answer.
//
for ( int i=0; i<app.num_vecs; i++ )
for ( int r=0; r<M; r++ )
{
app.h_out_check[ i * M + r ] = 0;
for ( int c=0; c<N; c++ )
app.h_out_check[ i * M + r ] +=
app.h_in[ i * N + c ] * app.matrix[r][c];
}
const int64_t num_ops = int64_t(M) * N * app.num_vecs; // Multiply-adds.
// Amount of data in and out of GPU chip.
const int amt_data_bytes = in_size_bytes + out_size_bytes;
double elapsed_time_s = 86400; // Reassigned to minimum run time.
{
// Prepare events used for timing.
//
cudaEvent_t gpu_start_ce, gpu_stop_ce;
CE(cudaEventCreate(&gpu_start_ce));
CE(cudaEventCreate(&gpu_stop_ce));
// Copy input array from CPU to GPU.
//
CE( cudaMemcpy
( app.d_in, app.h_in, in_size_bytes, cudaMemcpyHostToDevice ) );
// Copy App structure to GPU.
//
CE( cudaMemcpyToSymbol
( d_app, &app, sizeof(app), 0, cudaMemcpyHostToDevice ) );
// Launch kernel multiple times and keep track of the best time.
printf("Launching with %d blocks of up to %d threads. \n",
num_blocks, thd_per_block_goal);
for ( int kernel = 0; kernel < info.num_kernels; kernel++ )
{
cudaFuncAttributes& cfa = info.ki[kernel].cfa;
const int wp_limit = cfa.maxThreadsPerBlock >> 5;
const int thd_limit = wp_limit << 5;
const int thd_per_block_no_vary = min(thd_per_block_goal,thd_limit);
const int wp_start = 4;
const int wp_stop = vary_warps ? wp_limit : wp_start;
const int wp_inc = 4;
for ( int wp_cnt = wp_start; wp_cnt <= wp_stop; wp_cnt += wp_inc )
{
const int thd_per_block =
vary_warps ? wp_cnt << 5 : thd_per_block_no_vary;
// Zero the output array.
//
CE(cudaMemset(app.d_out,0,out_size_bytes));
// Measure execution time starting "now", which is after data
// set to GPU.
//
CE(cudaEventRecord(gpu_start_ce,0));
// Launch Kernel
//
KPtr(info.ki[kernel].func_ptr)<<<num_blocks,thd_per_block>>>
(app.d_out,app.d_in);
// Stop measuring execution time now, which is before is data
// returned from GPU.
//
CE(cudaEventRecord(gpu_stop_ce,0));
CE(cudaEventSynchronize(gpu_stop_ce));
float cuda_time_ms = -1.1;
CE(cudaEventElapsedTime(&cuda_time_ms,gpu_start_ce,gpu_stop_ce));
const double this_elapsed_time_s = cuda_time_ms * 0.001;
const double thpt_compute_gflops =
num_ops / this_elapsed_time_s * 1e-9;
const double thpt_data_gbps =
amt_data_bytes / this_elapsed_time_s * 1e-9;
if ( vary_warps )
{
const char* const stars = "********************************************************************************";
const int stars_len = 80;
const double comp_frac =
4e9 * thpt_compute_gflops / info.chip_sp_flops;
const int max_st_len = 52;
if ( wp_cnt == wp_start )
printf("Kernel %s:\n", info.ki[kernel].name);
printf("%2d wp %6.0f µs %5.0f GF %5.0f GB/s %s\n",
(thd_per_block + 31 ) >> 5,
this_elapsed_time_s * 1e6,
thpt_compute_gflops, thpt_data_gbps,
&stars[stars_len-int(comp_frac*max_st_len)]
);
} else {
printf("K %-15s %2d wp %11.3f µs %8.3f GFLOPS %8.3f GB/s\n",
info.ki[kernel].name,
(thd_per_block + 31 ) >> 5,
this_elapsed_time_s * 1e6,
thpt_compute_gflops, thpt_data_gbps);
}
elapsed_time_s = min(this_elapsed_time_s,elapsed_time_s);
// Copy output array from GPU to CPU.
//
CE( cudaMemcpy
( app.h_out, app.d_out, out_size_bytes, cudaMemcpyDeviceToHost) );
int err_count = 0;
for ( int i=0; i<app.num_vecs; i++ )
for ( int r=0; r<M; r++ )
{
const int idx = i * M + r;
if ( fabs( app.h_out_check[idx] - app.h_out[idx] ) > 1e-5 )
{
err_count++;
if ( err_count < 5 )
printf("Error at vec %d elt %d: %.7f != %.7f (correct)\n",
i, r, app.h_out[idx], app.h_out_check[idx] );
}
}
if ( err_count )
printf("Total errors %d\n", err_count);
}
}
}
}
|
fcf314a5b80dcd437c8cd0686631f284ce29fbc9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void addRows(double *matrix, int *d_i){
int i=*d_i;
int n=blockDim.x+i;
int id= n*(blockIdx.x+i+1) + threadIdx.x+i;
__shared__ double multiplier;
if(threadIdx.x==0){
multiplier=matrix[n*(blockIdx.x+1+i)+i]/matrix[n*i+i];
}
__syncthreads();
matrix[id]-=matrix[n*i+threadIdx.x+i]*multiplier;
} | fcf314a5b80dcd437c8cd0686631f284ce29fbc9.cu | #include "includes.h"
__global__ void addRows(double *matrix, int *d_i){
int i=*d_i;
int n=blockDim.x+i;
int id= n*(blockIdx.x+i+1) + threadIdx.x+i;
__shared__ double multiplier;
if(threadIdx.x==0){
multiplier=matrix[n*(blockIdx.x+1+i)+i]/matrix[n*i+i];
}
__syncthreads();
matrix[id]-=matrix[n*i+threadIdx.x+i]*multiplier;
} |
5fcecc9debfc4def27c74d89e88e34a1b7404364.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "rocblas.h"
#include "activation_func.h"
#include "backward.h"
#include "dropout.h"
extern const int nThreads;
__global__
void PartialCostPartialAL(const int n, const int *Y,
const float *A, float *dA) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
dA[i] = -static_cast<float>(Y[i]) / A[i] +
(1.0f - static_cast<float>(Y[i])) / (1.0f - A[i]);
}
void Backward(const int n_layers, const int *layer_dims, const int batch_size,
const float *X, const int *Y, const float *W, const float *B,
const float *Z, const float *A, float *dW, float *dB, float *dZ,
float *dA, const int *W_index, const int *B_index, const int *Z_index,
const float *oneVec, const float *layer_drop, const float *D,
const float lambda) {
int m, n, k, lda, ldb, ldc, n_elements;
float alpha, beta;
int W_curLoc, B_curLoc, Z_curLoc, Z_nextLoc;
hipblasHandle_t handle;
hipblasCreate(&handle);
int l = n_layers;
Z_curLoc = Z_index[l-1];
n_elements = Z_index[l] - Z_index[l-1];
int nBlocks = (n_elements + nThreads - 1) / nThreads;
// Compute dA of the last layer
hipLaunchKernelGGL(( PartialCostPartialAL), dim3(nBlocks), dim3(nThreads), 0, 0, n_elements, Y,
A+Z_curLoc, dA+Z_curLoc);
// Backpropagation from the last layer L to the second layer
for (l = n_layers; 1 < l; --l) {
W_curLoc = W_index[l-1];
B_curLoc = B_index[l-1];
Z_curLoc = Z_index[l-1];
Z_nextLoc = Z_index[l-2];
n_elements = Z_index[l] - Z_index[l-1];
nBlocks = (n_elements + nThreads - 1) / nThreads;
// Compute dZ
if (l == n_layers)
hipLaunchKernelGGL(( SigmoidBackward), dim3(nBlocks), dim3(nThreads), 0, 0, n_elements, dA+Z_curLoc,
A+Z_curLoc, dZ+Z_curLoc);
else
hipLaunchKernelGGL(( ReluBackward), dim3(nBlocks), dim3(nThreads), 0, 0, n_elements, dA+Z_curLoc,
Z+Z_curLoc, dZ+Z_curLoc);
// Compute dB
m = layer_dims[l];
n = batch_size;
lda = m;
alpha = 1.0f / batch_size;
beta = 0.0f;
hipblasSgemv(handle, HIPBLAS_OP_N, m, n, &alpha, dZ+Z_curLoc, lda,
oneVec, 1, &beta, dB+B_curLoc, 1);
// Compute dW
m = layer_dims[l];
n = layer_dims[l-1];
k = batch_size;
lda = m;
ldb = n;
ldc = m;
if (lambda == 0.0f) {
// dW without L2 regularization
hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_T, m, n, k, &alpha,
dZ+Z_curLoc, lda, A+Z_nextLoc, ldb, &beta, dW+W_curLoc, ldc);
} else {
// dW with L2 regularization
hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_T, m, n, k, &alpha,
dZ+Z_curLoc, lda, A+Z_nextLoc, ldb, &beta, dW+W_curLoc, ldc);
alpha = 1.0f;
beta = lambda / batch_size;
lda = m;
ldb = m;
ldc = lda;
hipblasSgeam(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, &alpha,
dW+W_curLoc, lda, &beta, W+W_curLoc, ldb, dW+W_curLoc, ldc);
}
// Compute dA
m = layer_dims[l-1];
n = batch_size;
k = layer_dims[l];
lda = k;
ldb = k;
ldc = m;
alpha = 1.0f;
beta = 0.0f;
hipblasSgemm(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, m, n, k, &alpha,
W+W_curLoc, lda, dZ+Z_curLoc, ldb, &beta, dA+Z_nextLoc, ldc);
if (D != nullptr && layer_drop[l-1] < 1.0f) {
// Modify dA if dropout is applied
float keep_prob = layer_drop[l-1];
nBlocks = (m*n + nThreads - 1) / nThreads;
hipLaunchKernelGGL(( ApplyDropout), dim3(nBlocks), dim3(nThreads), 0, 0, m*n, D+Z_nextLoc, dA+Z_nextLoc, keep_prob);
}
}
// Backpropagation for the first layer
l = 1;
W_curLoc = W_index[l-1];
B_curLoc = B_index[l-1];
Z_curLoc = Z_index[l-1];
n_elements = Z_index[l] - Z_index[l-1];
nBlocks = (n_elements + nThreads - 1) / nThreads;
// Compute dZ
hipLaunchKernelGGL(( ReluBackward), dim3(nBlocks), dim3(nThreads), 0, 0, n_elements, dA+Z_curLoc,
A+Z_curLoc, dZ+Z_curLoc);
// Compute dB
m = layer_dims[l];
n = batch_size;
lda = m;
alpha = 1.0f / batch_size;
beta = 0.0f;
hipblasSgemv(handle, HIPBLAS_OP_N, m, n, &alpha, dZ+Z_curLoc, lda,
oneVec, 1, &beta, dB+B_curLoc, 1);
// Compute dW. Use X instead of A
m = layer_dims[l];
n = layer_dims[l-1];
k = batch_size;
lda = m;
ldb = n;
ldc = m;
if (lambda == 0.0f) {
// dW without L2 regularization
hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_T, m, n, k, &alpha,
dZ+Z_curLoc, lda, X, ldb, &beta, dW+W_curLoc, ldc);
} else {
// dW with L2 regularization
hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_T, m, n, k, &alpha,
dZ+Z_curLoc, lda, X, ldb, &beta, dW+W_curLoc, ldc);
alpha = 1.0f;
beta = lambda / batch_size;
lda = m;
ldb = m;
ldc = lda;
hipblasSgeam(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, &alpha,
dW+W_curLoc, lda, &beta, W+W_curLoc, ldb, dW+W_curLoc, ldc);
}
hipblasDestroy(handle);
}
| 5fcecc9debfc4def27c74d89e88e34a1b7404364.cu | #include "cublas_v2.h"
#include "activation_func.h"
#include "backward.h"
#include "dropout.h"
extern const int nThreads;
__global__
void PartialCostPartialAL(const int n, const int *Y,
const float *A, float *dA) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
dA[i] = -static_cast<float>(Y[i]) / A[i] +
(1.0f - static_cast<float>(Y[i])) / (1.0f - A[i]);
}
void Backward(const int n_layers, const int *layer_dims, const int batch_size,
const float *X, const int *Y, const float *W, const float *B,
const float *Z, const float *A, float *dW, float *dB, float *dZ,
float *dA, const int *W_index, const int *B_index, const int *Z_index,
const float *oneVec, const float *layer_drop, const float *D,
const float lambda) {
int m, n, k, lda, ldb, ldc, n_elements;
float alpha, beta;
int W_curLoc, B_curLoc, Z_curLoc, Z_nextLoc;
cublasHandle_t handle;
cublasCreate(&handle);
int l = n_layers;
Z_curLoc = Z_index[l-1];
n_elements = Z_index[l] - Z_index[l-1];
int nBlocks = (n_elements + nThreads - 1) / nThreads;
// Compute dA of the last layer
PartialCostPartialAL<<<nBlocks, nThreads>>>(n_elements, Y,
A+Z_curLoc, dA+Z_curLoc);
// Backpropagation from the last layer L to the second layer
for (l = n_layers; 1 < l; --l) {
W_curLoc = W_index[l-1];
B_curLoc = B_index[l-1];
Z_curLoc = Z_index[l-1];
Z_nextLoc = Z_index[l-2];
n_elements = Z_index[l] - Z_index[l-1];
nBlocks = (n_elements + nThreads - 1) / nThreads;
// Compute dZ
if (l == n_layers)
SigmoidBackward<<<nBlocks, nThreads>>>(n_elements, dA+Z_curLoc,
A+Z_curLoc, dZ+Z_curLoc);
else
ReluBackward<<<nBlocks, nThreads>>>(n_elements, dA+Z_curLoc,
Z+Z_curLoc, dZ+Z_curLoc);
// Compute dB
m = layer_dims[l];
n = batch_size;
lda = m;
alpha = 1.0f / batch_size;
beta = 0.0f;
cublasSgemv(handle, CUBLAS_OP_N, m, n, &alpha, dZ+Z_curLoc, lda,
oneVec, 1, &beta, dB+B_curLoc, 1);
// Compute dW
m = layer_dims[l];
n = layer_dims[l-1];
k = batch_size;
lda = m;
ldb = n;
ldc = m;
if (lambda == 0.0f) {
// dW without L2 regularization
cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, m, n, k, &alpha,
dZ+Z_curLoc, lda, A+Z_nextLoc, ldb, &beta, dW+W_curLoc, ldc);
} else {
// dW with L2 regularization
cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, m, n, k, &alpha,
dZ+Z_curLoc, lda, A+Z_nextLoc, ldb, &beta, dW+W_curLoc, ldc);
alpha = 1.0f;
beta = lambda / batch_size;
lda = m;
ldb = m;
ldc = lda;
cublasSgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, &alpha,
dW+W_curLoc, lda, &beta, W+W_curLoc, ldb, dW+W_curLoc, ldc);
}
// Compute dA
m = layer_dims[l-1];
n = batch_size;
k = layer_dims[l];
lda = k;
ldb = k;
ldc = m;
alpha = 1.0f;
beta = 0.0f;
cublasSgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, m, n, k, &alpha,
W+W_curLoc, lda, dZ+Z_curLoc, ldb, &beta, dA+Z_nextLoc, ldc);
if (D != nullptr && layer_drop[l-1] < 1.0f) {
// Modify dA if dropout is applied
float keep_prob = layer_drop[l-1];
nBlocks = (m*n + nThreads - 1) / nThreads;
ApplyDropout<<<nBlocks, nThreads>>>(m*n, D+Z_nextLoc, dA+Z_nextLoc, keep_prob);
}
}
// Backpropagation for the first layer
l = 1;
W_curLoc = W_index[l-1];
B_curLoc = B_index[l-1];
Z_curLoc = Z_index[l-1];
n_elements = Z_index[l] - Z_index[l-1];
nBlocks = (n_elements + nThreads - 1) / nThreads;
// Compute dZ
ReluBackward<<<nBlocks, nThreads>>>(n_elements, dA+Z_curLoc,
A+Z_curLoc, dZ+Z_curLoc);
// Compute dB
m = layer_dims[l];
n = batch_size;
lda = m;
alpha = 1.0f / batch_size;
beta = 0.0f;
cublasSgemv(handle, CUBLAS_OP_N, m, n, &alpha, dZ+Z_curLoc, lda,
oneVec, 1, &beta, dB+B_curLoc, 1);
// Compute dW. Use X instead of A
m = layer_dims[l];
n = layer_dims[l-1];
k = batch_size;
lda = m;
ldb = n;
ldc = m;
if (lambda == 0.0f) {
// dW without L2 regularization
cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, m, n, k, &alpha,
dZ+Z_curLoc, lda, X, ldb, &beta, dW+W_curLoc, ldc);
} else {
// dW with L2 regularization
cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, m, n, k, &alpha,
dZ+Z_curLoc, lda, X, ldb, &beta, dW+W_curLoc, ldc);
alpha = 1.0f;
beta = lambda / batch_size;
lda = m;
ldb = m;
ldc = lda;
cublasSgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, &alpha,
dW+W_curLoc, lda, &beta, W+W_curLoc, ldb, dW+W_curLoc, ldc);
}
cublasDestroy(handle);
}
|
e29774f6fe0d4942ff5dfa301510173fb01bf529.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gpuReductionSum.h"
using namespace std;
using namespace std::chrono;
#define DIM 1024
__global__ void reductionDouble(double* vect, double* vecOut, int size)
{
__shared__ double block[DIM];
unsigned int globalIndex = threadIdx.x + blockDim.x * blockIdx.x;
unsigned int i = threadIdx.x;
if (globalIndex < size)
block[i] = vect[globalIndex];
else
block[i] = 0;
__syncthreads();
for (unsigned int j = blockDim.x / 2; j > 0; j >>= 1)
{
if (i < j)
block[i] += block[i + j];
__syncthreads();
}
if (i == 0)
vecOut[blockIdx.x] = block[0];
}
void sumGPUDouble(double* vector, double* vectorOutput, int vec_size)
{
int numInputElements = vec_size;
int numOutputElements;
int threadsPerBlock = DIM;
double* dev_vec;
double* dev_vecOut;
hipSetDevice(0);
hipMalloc((double**)&dev_vec, vec_size * sizeof(double));
hipMalloc((double**)&dev_vecOut, vec_size * sizeof(double));
hipMemcpy(dev_vec, vector, vec_size * sizeof(double), hipMemcpyHostToDevice);
do
{
numOutputElements = numInputElements / (threadsPerBlock);
if (numInputElements % (threadsPerBlock))
numOutputElements++;
reductionDouble << < numOutputElements, threadsPerBlock >> > (dev_vec, dev_vecOut, numInputElements);
numInputElements = numOutputElements;
if (numOutputElements > 1)
reductionDouble << < numOutputElements, threadsPerBlock >> > (dev_vecOut, dev_vec, numInputElements);
} while (numOutputElements > 1);
hipDeviceSynchronize();
hipMemcpy(vector, dev_vec, vec_size * sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(vectorOutput, dev_vecOut, vec_size * sizeof(double), hipMemcpyDeviceToHost);
hipFree(dev_vec);
hipFree(dev_vecOut);
}
| e29774f6fe0d4942ff5dfa301510173fb01bf529.cu | #include "gpuReductionSum.h"
using namespace std;
using namespace std::chrono;
#define DIM 1024
__global__ void reductionDouble(double* vect, double* vecOut, int size)
{
__shared__ double block[DIM];
unsigned int globalIndex = threadIdx.x + blockDim.x * blockIdx.x;
unsigned int i = threadIdx.x;
if (globalIndex < size)
block[i] = vect[globalIndex];
else
block[i] = 0;
__syncthreads();
for (unsigned int j = blockDim.x / 2; j > 0; j >>= 1)
{
if (i < j)
block[i] += block[i + j];
__syncthreads();
}
if (i == 0)
vecOut[blockIdx.x] = block[0];
}
void sumGPUDouble(double* vector, double* vectorOutput, int vec_size)
{
int numInputElements = vec_size;
int numOutputElements;
int threadsPerBlock = DIM;
double* dev_vec;
double* dev_vecOut;
cudaSetDevice(0);
cudaMalloc((double**)&dev_vec, vec_size * sizeof(double));
cudaMalloc((double**)&dev_vecOut, vec_size * sizeof(double));
cudaMemcpy(dev_vec, vector, vec_size * sizeof(double), cudaMemcpyHostToDevice);
do
{
numOutputElements = numInputElements / (threadsPerBlock);
if (numInputElements % (threadsPerBlock))
numOutputElements++;
reductionDouble << < numOutputElements, threadsPerBlock >> > (dev_vec, dev_vecOut, numInputElements);
numInputElements = numOutputElements;
if (numOutputElements > 1)
reductionDouble << < numOutputElements, threadsPerBlock >> > (dev_vecOut, dev_vec, numInputElements);
} while (numOutputElements > 1);
cudaDeviceSynchronize();
cudaMemcpy(vector, dev_vec, vec_size * sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(vectorOutput, dev_vecOut, vec_size * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(dev_vec);
cudaFree(dev_vecOut);
}
|
c35f11558ed2d84675be78dcdaf3746d66fd0d0f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "cuda/dcn_v2_im2col_cuda.h"
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THH.h>
#include <THH/THHAtomics.cuh>
#include <THH/THHDeviceUtils.cuh>
// extern THCState *state;
THCState *state = at::globalContext().lazyInitCUDA();
// author: Charles Shang
// https://github.com/torch/cunn/blob/master/lib/THCUNN/generic/SpatialConvolutionMM.cu
// [batch gemm]
// https://github.com/pytorch/pytorch/blob/master/aten/src/THC/generic/THCTensorMathBlas.cu
__global__ void createBatchGemmBuffer(const float **input_b, float **output_b,
float **columns_b, const float **ones_b,
const float **weight_b, const float **bias_b,
float *input, float *output,
float *columns, float *ones,
float *weight, float *bias,
const int input_stride, const int output_stride,
const int columns_stride, const int ones_stride,
const int num_batches)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_batches)
{
input_b[idx] = input + idx * input_stride;
output_b[idx] = output + idx * output_stride;
columns_b[idx] = columns + idx * columns_stride;
ones_b[idx] = ones + idx * ones_stride;
// share weights and bias within a Mini-Batch
weight_b[idx] = weight;
bias_b[idx] = bias;
}
}
at::Tensor
dcn_v2_cuda_forward(const at::Tensor &input,
const at::Tensor &weight,
const at::Tensor &bias,
const at::Tensor &offset,
const at::Tensor &mask,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const int dilation_h,
const int dilation_w,
const int deformable_group)
{
using scalar_t = float;
// THCAssertSameGPU(THCudaTensor_checkGPU(state, 5, input, weight, bias, offset, mask));
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor");
AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor");
AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor");
AT_ASSERTM(mask.type().is_cuda(), "mask must be a CUDA tensor");
const int batch = input.size(0);
const int channels = input.size(1);
const int height = input.size(2);
const int width = input.size(3);
const int channels_out = weight.size(0);
const int channels_kernel = weight.size(1);
const int kernel_h_ = weight.size(2);
const int kernel_w_ = weight.size(3);
// printf("Kernels: %d %d %d %d\n", kernel_h_, kernel_w_, kernel_w, kernel_h);
// printf("Channels: %d %d\n", channels, channels_kernel);
// printf("Channels: %d %d\n", channels_out, channels_kernel);
AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w,
"Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_);
AT_ASSERTM(channels == channels_kernel,
"Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel);
const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
auto ones = at::ones({batch, height_out, width_out}, input.options());
auto columns = at::empty({batch, channels * kernel_h * kernel_w, 1 * height_out * width_out}, input.options());
auto output = at::empty({batch, channels_out, height_out, width_out}, input.options());
// prepare for batch-wise computing, which is significantly faster than instance-wise computing
// when batch size is large.
// launch batch threads
int matrices_size = batch * sizeof(float *);
auto input_b = static_cast<const float **>(THCudaMalloc(state, matrices_size));
auto output_b = static_cast<float **>(THCudaMalloc(state, matrices_size));
auto columns_b = static_cast<float **>(THCudaMalloc(state, matrices_size));
auto ones_b = static_cast<const float **>(THCudaMalloc(state, matrices_size));
auto weight_b = static_cast<const float **>(THCudaMalloc(state, matrices_size));
auto bias_b = static_cast<const float **>(THCudaMalloc(state, matrices_size));
const int block = 128;
const int grid = (batch + block - 1) / block;
hipLaunchKernelGGL(( createBatchGemmBuffer), dim3(grid), dim3(block), 0, THCState_getCurrentStream(state),
input_b, output_b,
columns_b, ones_b,
weight_b, bias_b,
input.data<scalar_t>(),
output.data<scalar_t>(),
columns.data<scalar_t>(),
ones.data<scalar_t>(),
weight.data<scalar_t>(),
bias.data<scalar_t>(),
channels * width * height,
channels_out * width_out * height_out,
channels * kernel_h * kernel_w * height_out * width_out,
height_out * width_out,
batch);
long m_ = channels_out;
long n_ = height_out * width_out;
long k_ = 1;
THCudaBlas_SgemmBatched(state,
't',
'n',
n_,
m_,
k_,
1.0f,
ones_b, k_,
bias_b, k_,
0.0f,
output_b, n_,
batch);
modulated_deformable_im2col_cuda(THCState_getCurrentStream(state),
input.data<scalar_t>(),
offset.data<scalar_t>(),
mask.data<scalar_t>(),
batch, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
deformable_group,
columns.data<scalar_t>());
long m = channels_out;
long n = height_out * width_out;
long k = channels * kernel_h * kernel_w;
THCudaBlas_SgemmBatched(state,
'n',
'n',
n,
m,
k,
1.0f,
(const float **)columns_b, n,
weight_b, k,
1.0f,
output_b, n,
batch);
THCudaFree(state, input_b);
THCudaFree(state, output_b);
THCudaFree(state, columns_b);
THCudaFree(state, ones_b);
THCudaFree(state, weight_b);
THCudaFree(state, bias_b);
return output;
}
__global__ void createBatchGemmBufferBackward(
float **grad_output_b,
float **columns_b,
float **ones_b,
float **weight_b,
float **grad_weight_b,
float **grad_bias_b,
float *grad_output,
float *columns,
float *ones,
float *weight,
float *grad_weight,
float *grad_bias,
const int grad_output_stride,
const int columns_stride,
const int ones_stride,
const int num_batches)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_batches)
{
grad_output_b[idx] = grad_output + idx * grad_output_stride;
columns_b[idx] = columns + idx * columns_stride;
ones_b[idx] = ones + idx * ones_stride;
// share weights and bias within a Mini-Batch
weight_b[idx] = weight;
grad_weight_b[idx] = grad_weight;
grad_bias_b[idx] = grad_bias;
}
}
std::vector<at::Tensor> dcn_v2_cuda_backward(const at::Tensor &input,
const at::Tensor &weight,
const at::Tensor &bias,
const at::Tensor &offset,
const at::Tensor &mask,
const at::Tensor &grad_output,
int kernel_h, int kernel_w,
int stride_h, int stride_w,
int pad_h, int pad_w,
int dilation_h, int dilation_w,
int deformable_group)
{
THArgCheck(input.is_contiguous(), 1, "input tensor has to be contiguous");
THArgCheck(weight.is_contiguous(), 2, "weight tensor has to be contiguous");
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor");
AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor");
AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor");
AT_ASSERTM(mask.type().is_cuda(), "mask must be a CUDA tensor");
const int batch = input.size(0);
const int channels = input.size(1);
const int height = input.size(2);
const int width = input.size(3);
const int channels_out = weight.size(0);
const int channels_kernel = weight.size(1);
const int kernel_h_ = weight.size(2);
const int kernel_w_ = weight.size(3);
AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w,
"Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_);
AT_ASSERTM(channels == channels_kernel,
"Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel);
const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
auto ones = at::ones({height_out, width_out}, input.options());
auto columns = at::empty({channels * kernel_h * kernel_w, 1 * height_out * width_out}, input.options());
auto output = at::empty({batch, channels_out, height_out, width_out}, input.options());
auto grad_input = at::zeros_like(input);
auto grad_weight = at::zeros_like(weight);
auto grad_bias = at::zeros_like(bias);
auto grad_offset = at::zeros_like(offset);
auto grad_mask = at::zeros_like(mask);
using scalar_t = float;
for (int b = 0; b < batch; b++)
{
auto input_n = input.select(0, b);
auto offset_n = offset.select(0, b);
auto mask_n = mask.select(0, b);
auto grad_output_n = grad_output.select(0, b);
auto grad_input_n = grad_input.select(0, b);
auto grad_offset_n = grad_offset.select(0, b);
auto grad_mask_n = grad_mask.select(0, b);
long m = channels * kernel_h * kernel_w;
long n = height_out * width_out;
long k = channels_out;
THCudaBlas_Sgemm(state, 'n', 't', n, m, k, 1.0f,
grad_output_n.data<scalar_t>(), n,
weight.data<scalar_t>(), m, 0.0f,
columns.data<scalar_t>(), n);
// gradient w.r.t. input coordinate data
modulated_deformable_col2im_coord_cuda(THCState_getCurrentStream(state),
columns.data<scalar_t>(),
input_n.data<scalar_t>(),
offset_n.data<scalar_t>(),
mask_n.data<scalar_t>(),
1, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, deformable_group,
grad_offset_n.data<scalar_t>(),
grad_mask_n.data<scalar_t>());
// gradient w.r.t. input data
modulated_deformable_col2im_cuda(THCState_getCurrentStream(state),
columns.data<scalar_t>(),
offset_n.data<scalar_t>(),
mask_n.data<scalar_t>(),
1, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, deformable_group,
grad_input_n.data<scalar_t>());
// gradient w.r.t. weight, dWeight should accumulate across the batch and group
modulated_deformable_im2col_cuda(THCState_getCurrentStream(state),
input_n.data<scalar_t>(),
offset_n.data<scalar_t>(),
mask_n.data<scalar_t>(),
1, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, deformable_group,
columns.data<scalar_t>());
long m_ = channels_out;
long n_ = channels * kernel_h * kernel_w;
long k_ = height_out * width_out;
THCudaBlas_Sgemm(state, 't', 'n', n_, m_, k_, 1.0f,
columns.data<scalar_t>(), k_,
grad_output_n.data<scalar_t>(), k_, 1.0f,
grad_weight.data<scalar_t>(), n_);
// gradient w.r.t. bias
// long m_ = channels_out;
// long k__ = height_out * width_out;
THCudaBlas_Sgemv(state,
't',
k_, m_, 1.0f,
grad_output_n.data<scalar_t>(), k_,
ones.data<scalar_t>(), 1, 1.0f,
grad_bias.data<scalar_t>(), 1);
}
return {
grad_input, grad_offset, grad_mask, grad_weight, grad_bias
};
} | c35f11558ed2d84675be78dcdaf3746d66fd0d0f.cu | #include <vector>
#include "cuda/dcn_v2_im2col_cuda.h"
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <THC/THCAtomics.cuh>
#include <THC/THCDeviceUtils.cuh>
// extern THCState *state;
THCState *state = at::globalContext().lazyInitCUDA();
// author: Charles Shang
// https://github.com/torch/cunn/blob/master/lib/THCUNN/generic/SpatialConvolutionMM.cu
// [batch gemm]
// https://github.com/pytorch/pytorch/blob/master/aten/src/THC/generic/THCTensorMathBlas.cu
__global__ void createBatchGemmBuffer(const float **input_b, float **output_b,
float **columns_b, const float **ones_b,
const float **weight_b, const float **bias_b,
float *input, float *output,
float *columns, float *ones,
float *weight, float *bias,
const int input_stride, const int output_stride,
const int columns_stride, const int ones_stride,
const int num_batches)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_batches)
{
input_b[idx] = input + idx * input_stride;
output_b[idx] = output + idx * output_stride;
columns_b[idx] = columns + idx * columns_stride;
ones_b[idx] = ones + idx * ones_stride;
// share weights and bias within a Mini-Batch
weight_b[idx] = weight;
bias_b[idx] = bias;
}
}
at::Tensor
dcn_v2_cuda_forward(const at::Tensor &input,
const at::Tensor &weight,
const at::Tensor &bias,
const at::Tensor &offset,
const at::Tensor &mask,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_h,
const int pad_w,
const int dilation_h,
const int dilation_w,
const int deformable_group)
{
using scalar_t = float;
// THCAssertSameGPU(THCudaTensor_checkGPU(state, 5, input, weight, bias, offset, mask));
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor");
AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor");
AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor");
AT_ASSERTM(mask.type().is_cuda(), "mask must be a CUDA tensor");
const int batch = input.size(0);
const int channels = input.size(1);
const int height = input.size(2);
const int width = input.size(3);
const int channels_out = weight.size(0);
const int channels_kernel = weight.size(1);
const int kernel_h_ = weight.size(2);
const int kernel_w_ = weight.size(3);
// printf("Kernels: %d %d %d %d\n", kernel_h_, kernel_w_, kernel_w, kernel_h);
// printf("Channels: %d %d\n", channels, channels_kernel);
// printf("Channels: %d %d\n", channels_out, channels_kernel);
AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w,
"Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_);
AT_ASSERTM(channels == channels_kernel,
"Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel);
const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
auto ones = at::ones({batch, height_out, width_out}, input.options());
auto columns = at::empty({batch, channels * kernel_h * kernel_w, 1 * height_out * width_out}, input.options());
auto output = at::empty({batch, channels_out, height_out, width_out}, input.options());
// prepare for batch-wise computing, which is significantly faster than instance-wise computing
// when batch size is large.
// launch batch threads
int matrices_size = batch * sizeof(float *);
auto input_b = static_cast<const float **>(THCudaMalloc(state, matrices_size));
auto output_b = static_cast<float **>(THCudaMalloc(state, matrices_size));
auto columns_b = static_cast<float **>(THCudaMalloc(state, matrices_size));
auto ones_b = static_cast<const float **>(THCudaMalloc(state, matrices_size));
auto weight_b = static_cast<const float **>(THCudaMalloc(state, matrices_size));
auto bias_b = static_cast<const float **>(THCudaMalloc(state, matrices_size));
const int block = 128;
const int grid = (batch + block - 1) / block;
createBatchGemmBuffer<<<grid, block, 0, THCState_getCurrentStream(state)>>>(
input_b, output_b,
columns_b, ones_b,
weight_b, bias_b,
input.data<scalar_t>(),
output.data<scalar_t>(),
columns.data<scalar_t>(),
ones.data<scalar_t>(),
weight.data<scalar_t>(),
bias.data<scalar_t>(),
channels * width * height,
channels_out * width_out * height_out,
channels * kernel_h * kernel_w * height_out * width_out,
height_out * width_out,
batch);
long m_ = channels_out;
long n_ = height_out * width_out;
long k_ = 1;
THCudaBlas_SgemmBatched(state,
't',
'n',
n_,
m_,
k_,
1.0f,
ones_b, k_,
bias_b, k_,
0.0f,
output_b, n_,
batch);
modulated_deformable_im2col_cuda(THCState_getCurrentStream(state),
input.data<scalar_t>(),
offset.data<scalar_t>(),
mask.data<scalar_t>(),
batch, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
deformable_group,
columns.data<scalar_t>());
long m = channels_out;
long n = height_out * width_out;
long k = channels * kernel_h * kernel_w;
THCudaBlas_SgemmBatched(state,
'n',
'n',
n,
m,
k,
1.0f,
(const float **)columns_b, n,
weight_b, k,
1.0f,
output_b, n,
batch);
THCudaFree(state, input_b);
THCudaFree(state, output_b);
THCudaFree(state, columns_b);
THCudaFree(state, ones_b);
THCudaFree(state, weight_b);
THCudaFree(state, bias_b);
return output;
}
__global__ void createBatchGemmBufferBackward(
float **grad_output_b,
float **columns_b,
float **ones_b,
float **weight_b,
float **grad_weight_b,
float **grad_bias_b,
float *grad_output,
float *columns,
float *ones,
float *weight,
float *grad_weight,
float *grad_bias,
const int grad_output_stride,
const int columns_stride,
const int ones_stride,
const int num_batches)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_batches)
{
grad_output_b[idx] = grad_output + idx * grad_output_stride;
columns_b[idx] = columns + idx * columns_stride;
ones_b[idx] = ones + idx * ones_stride;
// share weights and bias within a Mini-Batch
weight_b[idx] = weight;
grad_weight_b[idx] = grad_weight;
grad_bias_b[idx] = grad_bias;
}
}
std::vector<at::Tensor> dcn_v2_cuda_backward(const at::Tensor &input,
const at::Tensor &weight,
const at::Tensor &bias,
const at::Tensor &offset,
const at::Tensor &mask,
const at::Tensor &grad_output,
int kernel_h, int kernel_w,
int stride_h, int stride_w,
int pad_h, int pad_w,
int dilation_h, int dilation_w,
int deformable_group)
{
THArgCheck(input.is_contiguous(), 1, "input tensor has to be contiguous");
THArgCheck(weight.is_contiguous(), 2, "weight tensor has to be contiguous");
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor");
AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor");
AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor");
AT_ASSERTM(mask.type().is_cuda(), "mask must be a CUDA tensor");
const int batch = input.size(0);
const int channels = input.size(1);
const int height = input.size(2);
const int width = input.size(3);
const int channels_out = weight.size(0);
const int channels_kernel = weight.size(1);
const int kernel_h_ = weight.size(2);
const int kernel_w_ = weight.size(3);
AT_ASSERTM(kernel_h_ == kernel_h && kernel_w_ == kernel_w,
"Input shape and kernel shape wont match: (%d x %d vs %d x %d).", kernel_h_, kernel_w, kernel_h_, kernel_w_);
AT_ASSERTM(channels == channels_kernel,
"Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel);
const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
auto ones = at::ones({height_out, width_out}, input.options());
auto columns = at::empty({channels * kernel_h * kernel_w, 1 * height_out * width_out}, input.options());
auto output = at::empty({batch, channels_out, height_out, width_out}, input.options());
auto grad_input = at::zeros_like(input);
auto grad_weight = at::zeros_like(weight);
auto grad_bias = at::zeros_like(bias);
auto grad_offset = at::zeros_like(offset);
auto grad_mask = at::zeros_like(mask);
using scalar_t = float;
for (int b = 0; b < batch; b++)
{
auto input_n = input.select(0, b);
auto offset_n = offset.select(0, b);
auto mask_n = mask.select(0, b);
auto grad_output_n = grad_output.select(0, b);
auto grad_input_n = grad_input.select(0, b);
auto grad_offset_n = grad_offset.select(0, b);
auto grad_mask_n = grad_mask.select(0, b);
long m = channels * kernel_h * kernel_w;
long n = height_out * width_out;
long k = channels_out;
THCudaBlas_Sgemm(state, 'n', 't', n, m, k, 1.0f,
grad_output_n.data<scalar_t>(), n,
weight.data<scalar_t>(), m, 0.0f,
columns.data<scalar_t>(), n);
// gradient w.r.t. input coordinate data
modulated_deformable_col2im_coord_cuda(THCState_getCurrentStream(state),
columns.data<scalar_t>(),
input_n.data<scalar_t>(),
offset_n.data<scalar_t>(),
mask_n.data<scalar_t>(),
1, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, deformable_group,
grad_offset_n.data<scalar_t>(),
grad_mask_n.data<scalar_t>());
// gradient w.r.t. input data
modulated_deformable_col2im_cuda(THCState_getCurrentStream(state),
columns.data<scalar_t>(),
offset_n.data<scalar_t>(),
mask_n.data<scalar_t>(),
1, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, deformable_group,
grad_input_n.data<scalar_t>());
// gradient w.r.t. weight, dWeight should accumulate across the batch and group
modulated_deformable_im2col_cuda(THCState_getCurrentStream(state),
input_n.data<scalar_t>(),
offset_n.data<scalar_t>(),
mask_n.data<scalar_t>(),
1, channels, height, width,
height_out, width_out, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, deformable_group,
columns.data<scalar_t>());
long m_ = channels_out;
long n_ = channels * kernel_h * kernel_w;
long k_ = height_out * width_out;
THCudaBlas_Sgemm(state, 't', 'n', n_, m_, k_, 1.0f,
columns.data<scalar_t>(), k_,
grad_output_n.data<scalar_t>(), k_, 1.0f,
grad_weight.data<scalar_t>(), n_);
// gradient w.r.t. bias
// long m_ = channels_out;
// long k__ = height_out * width_out;
THCudaBlas_Sgemv(state,
't',
k_, m_, 1.0f,
grad_output_n.data<scalar_t>(), k_,
ones.data<scalar_t>(), 1, 1.0f,
grad_bias.data<scalar_t>(), 1);
}
return {
grad_input, grad_offset, grad_mask, grad_weight, grad_bias
};
} |
7366fdc494fc331466dd77a8d7874ecd3e35d098.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gpu_runtime.h"
__global__ void onehot_kernel(const float *input, float *output, size_t last_dim, size_t size) {
size_t ind = blockIdx.x * blockDim.x + threadIdx.x;
if (ind >= size) return;
float offset = (float)(ind % last_dim);
float writein = 0;
if (offset == input[ind / last_dim]) {
writein = 1;
} else {
writein = 0;
}
output[ind] = writein;
}
int DLGpuOneHot(const DLArrayHandle input, DLArrayHandle output, DLStreamHandle stream_handle = NULL) {
assert (input->ndim == output->ndim - 1);
size_t insize = 1;
for (int i = 0; i < input->ndim; ++i) {
insize *= input->shape[i];
}
size_t last_dim = output->shape[input->ndim];
size_t size = insize * last_dim;
const float *input_data = (const float*)input->data;
float *output_data = (float*)output->data;
dim3 blocks;
dim3 threads;
if (size <= 1024) {
threads.x = size;
blocks.x = 1;
} else {
threads.x = 1024;
blocks.x = (size + 1023) / 1024;
}
if (stream_handle)
hipLaunchKernelGGL(( onehot_kernel), dim3(blocks), dim3(threads), 0, *(hipStream_t*)stream_handle->handle, input_data, output_data, last_dim, size);
else
hipLaunchKernelGGL(( onehot_kernel), dim3(blocks), dim3(threads), 0, 0, input_data, output_data, last_dim, size);
return 0;
} | 7366fdc494fc331466dd77a8d7874ecd3e35d098.cu | #include "gpu_runtime.h"
__global__ void onehot_kernel(const float *input, float *output, size_t last_dim, size_t size) {
size_t ind = blockIdx.x * blockDim.x + threadIdx.x;
if (ind >= size) return;
float offset = (float)(ind % last_dim);
float writein = 0;
if (offset == input[ind / last_dim]) {
writein = 1;
} else {
writein = 0;
}
output[ind] = writein;
}
int DLGpuOneHot(const DLArrayHandle input, DLArrayHandle output, DLStreamHandle stream_handle = NULL) {
assert (input->ndim == output->ndim - 1);
size_t insize = 1;
for (int i = 0; i < input->ndim; ++i) {
insize *= input->shape[i];
}
size_t last_dim = output->shape[input->ndim];
size_t size = insize * last_dim;
const float *input_data = (const float*)input->data;
float *output_data = (float*)output->data;
dim3 blocks;
dim3 threads;
if (size <= 1024) {
threads.x = size;
blocks.x = 1;
} else {
threads.x = 1024;
blocks.x = (size + 1023) / 1024;
}
if (stream_handle)
onehot_kernel<<<blocks, threads, 0, *(cudaStream_t*)stream_handle->handle>>>(input_data, output_data, last_dim, size);
else
onehot_kernel<<<blocks, threads>>>(input_data, output_data, last_dim, size);
return 0;
} |
17bd19d5546664d9f68ef235ed91be2b61ae7d21.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef WITH_CUDA
#include "core/context_cuda.h"
#include "utils/op_kernel.h"
#include "utils/math_functions.h"
namespace dragon {
namespace kernel {
/*! DropBlock2d <T = float32, Device = CUDA> */
template <typename T>
__global__ void _DropBlock2d_NCHW(
const int count,
const int C,
const int H,
const int W,
const int seed_h,
const int seed_w,
const int block_size,
const uint32_t thresh,
const uint32_t* seed,
int* mask) {
CUDA_1D_KERNEL_LOOP(idx, count) {
if (seed[idx] < thresh) {
const int x = idx % seed_w;
const int y = (idx / seed_w) % seed_h;
const int c = (idx / seed_w / seed_h) % C;
const int n = (idx / seed_w / seed_h) / C;
const int nc = (n * C + c) * H;
for (int i = 0; i < block_size; ++i) {
const int nch = (nc + y + i) * W;
for (int j = 0; j < block_size; ++j)
atomicAnd(&mask[nch + x + j], 0);
}
}
}
}
template <typename T>
__global__ void _DropBlock2d_NHWC(
const int count,
const int C,
const int H,
const int W,
const int seed_h,
const int seed_w,
const int block_size,
const uint32_t thresh,
const uint32_t* seed,
int* mask) {
CUDA_1D_KERNEL_LOOP(idx, count) {
if (seed[idx] < thresh) {
const int x = idx % seed_w;
const int y = (idx / seed_w) % seed_h;
const int c = (idx / seed_w / seed_h) % C;
const int n = (idx / seed_w / seed_h) / C;
for (int i = 0; i < block_size; ++i) {
const int nh = (n * H + y + i) * W;
for (int j = 0; j < block_size; ++j)
atomicAnd(&mask[(nh + x + j) * C + c], 0);
}
}
}
}
template <> void DropBlock2d<CUDAContext>(
const int N,
const int C,
const int H,
const int W,
const int seed_h,
const int seed_w,
const int block_size,
const float gamma,
const string& data_format,
uint32_t* seed,
int* mask,
CUDAContext* ctx) {
const int count = N * C * seed_h * seed_w;
math::RandomUniform<uint32_t, CUDAContext>(
count, 0.f, float(UINT_MAX), seed, ctx);
auto thresh = static_cast<uint32_t>(UINT_MAX * gamma);
if (data_format == "NCHW") {
_DropBlock2d_NCHW<int>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(count, C, H, W, seed_h, seed_w,
block_size, thresh, seed, mask);
} else if(data_format == "NHWC") {
_DropBlock2d_NHWC<int>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(count, C, H, W, seed_h, seed_w,
block_size, thresh, seed, mask);
} else LOG(FATAL) << "Unknown data format: " << data_format;
}
} // namespace kernel
} // namepsace dragon
#endif // WITH_CUDA | 17bd19d5546664d9f68ef235ed91be2b61ae7d21.cu | #ifdef WITH_CUDA
#include "core/context_cuda.h"
#include "utils/op_kernel.h"
#include "utils/math_functions.h"
namespace dragon {
namespace kernel {
/*! DropBlock2d <T = float32, Device = CUDA> */
template <typename T>
__global__ void _DropBlock2d_NCHW(
const int count,
const int C,
const int H,
const int W,
const int seed_h,
const int seed_w,
const int block_size,
const uint32_t thresh,
const uint32_t* seed,
int* mask) {
CUDA_1D_KERNEL_LOOP(idx, count) {
if (seed[idx] < thresh) {
const int x = idx % seed_w;
const int y = (idx / seed_w) % seed_h;
const int c = (idx / seed_w / seed_h) % C;
const int n = (idx / seed_w / seed_h) / C;
const int nc = (n * C + c) * H;
for (int i = 0; i < block_size; ++i) {
const int nch = (nc + y + i) * W;
for (int j = 0; j < block_size; ++j)
atomicAnd(&mask[nch + x + j], 0);
}
}
}
}
template <typename T>
__global__ void _DropBlock2d_NHWC(
const int count,
const int C,
const int H,
const int W,
const int seed_h,
const int seed_w,
const int block_size,
const uint32_t thresh,
const uint32_t* seed,
int* mask) {
CUDA_1D_KERNEL_LOOP(idx, count) {
if (seed[idx] < thresh) {
const int x = idx % seed_w;
const int y = (idx / seed_w) % seed_h;
const int c = (idx / seed_w / seed_h) % C;
const int n = (idx / seed_w / seed_h) / C;
for (int i = 0; i < block_size; ++i) {
const int nh = (n * H + y + i) * W;
for (int j = 0; j < block_size; ++j)
atomicAnd(&mask[(nh + x + j) * C + c], 0);
}
}
}
}
template <> void DropBlock2d<CUDAContext>(
const int N,
const int C,
const int H,
const int W,
const int seed_h,
const int seed_w,
const int block_size,
const float gamma,
const string& data_format,
uint32_t* seed,
int* mask,
CUDAContext* ctx) {
const int count = N * C * seed_h * seed_w;
math::RandomUniform<uint32_t, CUDAContext>(
count, 0.f, float(UINT_MAX), seed, ctx);
auto thresh = static_cast<uint32_t>(UINT_MAX * gamma);
if (data_format == "NCHW") {
_DropBlock2d_NCHW<int>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(count, C, H, W, seed_h, seed_w,
block_size, thresh, seed, mask);
} else if(data_format == "NHWC") {
_DropBlock2d_NHWC<int>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(count, C, H, W, seed_h, seed_w,
block_size, thresh, seed, mask);
} else LOG(FATAL) << "Unknown data format: " << data_format;
}
} // namespace kernel
} // namepsace dragon
#endif // WITH_CUDA |
b915f100ce4d2ab350d15894d11d370a6ca65605.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* matrix transpose program */
#include <stdio.h>
const int P = 32;
/* naive CPU */
void naiveCPU(float *src, float *dst, int M, int N)
{
for (int i = 0; i < N; ++i) {
for (int j = 0; j < M; ++j) {
dst[i*N+j] = src[j*N+i];
}
}
}
/* naive GPU */
__global__ void matrixTranspose(float *_a, float *_b, int cols,int rows)
{
int i = blockIdx.y * blockDim.y + threadIdx.y; // row
int j = blockIdx.x * blockDim.x + threadIdx.x; // col
int index_in = i * cols + j;
int index_out = j * rows + i;
_b[index_out] = _a[index_in];
}
/* shared memory GPU */
__global__ void matrixTransposeShared(float *_a, float *_b, int cols, int rows)
{
__shared__ float mat[P][P];
int bx = blockIdx.x * blockDim.x;
int by = blockIdx.y * blockDim.y;
int i = by + threadIdx.y; int j = bx + threadIdx.x;
int ti = bx + threadIdx.y; int tj = by + threadIdx.x;
if (i < rows && j < cols)
mat[threadIdx.x][threadIdx.y] = _a[i*cols + j];
__syncthreads();
if (tj < cols && ti < rows)
_b[ti*rows+tj] = mat[threadIdx.y][threadIdx.x];
}
/* shared memory without bank conflict */
__global__ void matrixTransposeUnrolled(float *_a, float *_b, int cols, int rows)
{
__shared__ float mat[P][P+1];
int x = blockIdx.x * P + threadIdx.x;
int y = blockIdx.y * P + threadIdx.y;
#pragma unroll
for (int k = 0; k < P; k += 8) {
if (x < rows && y+k < cols)
mat[threadIdx.y+k][threadIdx.x] = _a[(y+k)*rows + x];
}
__syncthreads();
x = blockIdx.y * P + threadIdx.x;
y = blockIdx.x * P + threadIdx.y;
#pragma unroll
for (int k = 0; k < P; k += 8) {
if (x < cols && y+k < rows)
_b[(y+k)*cols + x] = mat[threadIdx.x][threadIdx.y+k];
}
}
/* loop unrolled */
__global__ void matrixTransposeSharedwBC(float *_a, float *_b, int cols, int rows)
{
__shared__ float mat[P][P+1];
int bx = blockIdx.x * blockDim.x;
int by = blockIdx.y * blockDim.y;
int i = by + threadIdx.y; int j = bx + threadIdx.x;
int ti = bx + threadIdx.y; int tj = by + threadIdx.x;
if (i < rows && j < cols)
mat[threadIdx.x][threadIdx.y] = _a[i*cols + j];
__syncthreads();
if (tj < cols && ti < rows)
_b[ti*rows+tj] = mat[threadIdx.y][threadIdx.x];
}
int main(int argc, char **argv)
{
/* N*M matrix, parallelism is P */
const int N = 1024;
const int M = 1024;
const int matSize = N * M * sizeof(float);
dim3 gridDim(N/P, M/P, 1);
dim3 blockDim(P , P, 1);
/* configuration of GPU */
printf("===================\n");
printf("Matrix: %d * %d\n", N, M);
printf("Grid: %d * %d * %d\n", gridDim.x, gridDim.y, gridDim.z);
printf("Block: %d * %d * %d\n", blockDim.x, blockDim.y, blockDim.z);
printf("===================\n");
/* allocate memory for matrix in host */
float *h_matrix = (float *) malloc(matSize);
float *h_transpose = (float *) malloc(matSize);
/* allocate memory for matrix in device */
float *d_matrix, *d_transpose;
hipMalloc(&d_matrix, matSize);
hipMalloc(&d_transpose, matSize);
/* randomly generate a matrix in host */
for (int i = 0; i < N; ++i) {
for (int j = 0; j < M; ++j) {
h_matrix[i*N+j] = (float)rand() / (float)(RAND_MAX) * 100.0;
}
}
/* utility for recording start and finish time */
hipEvent_t tStart, tEnd;
float duration;
hipEventCreate(&tStart);
hipEventCreate(&tEnd);
const int nIterations = 100;
/* 1. naive CPU transpose */
hipEventRecord(tStart, 0);
for (int i = 0; i < nIterations; ++i) {
naiveCPU(h_matrix, h_transpose, N, M);
}
hipEventRecord(tEnd, 0);
hipEventSynchronize(tEnd); // waits for record to terminate
hipEventElapsedTime(&duration, tStart, tEnd);
printf("\nNaive CPU: %f\n", duration / nIterations);
/* 2. naive GPU transpose */
hipMemcpy(d_matrix, h_matrix, matSize, hipMemcpyHostToDevice);
hipMemset(d_transpose, 0, matSize);
hipEventRecord(tStart, 0);
for (int i = 0; i < nIterations; ++i) {
hipLaunchKernelGGL(( matrixTranspose), dim3(gridDim),dim3(blockDim), 0, 0, d_matrix, d_transpose, N, M);
}
hipEventRecord(tEnd, 0);
hipEventSynchronize(tEnd);
hipEventElapsedTime(&duration, tStart, tEnd);
hipMemcpy(h_transpose, d_transpose, matSize, hipMemcpyDeviceToHost);
printf("\nNaive GPU: %f\n", duration / nIterations);
/* 3. shared memory GPU transpose */
hipMemcpy(d_matrix, h_matrix, matSize, hipMemcpyHostToDevice);
hipMemset(d_transpose, 0, matSize);
hipEventRecord(tStart, 0);
for (int i = 0; i < nIterations; ++i) {
hipLaunchKernelGGL(( matrixTransposeShared), dim3(gridDim),dim3(blockDim), 0, 0, d_matrix, d_transpose, N, M);
}
hipEventRecord(tEnd, 0);
hipEventSynchronize(tEnd);
hipEventElapsedTime(&duration, tStart, tEnd);
hipMemcpy(h_transpose, d_transpose, matSize, hipMemcpyDeviceToHost);
printf("\nShared GPU: %f\n", duration / nIterations);
/* 4. shared memory GPU transpose without bank conflict */
hipMemcpy(d_matrix, h_matrix, matSize, hipMemcpyHostToDevice);
hipMemset(d_transpose, 0, matSize);
hipEventRecord(tStart, 0);
for (int i = 0; i < nIterations; ++i) {
hipLaunchKernelGGL(( matrixTransposeSharedwBC), dim3(gridDim),dim3(blockDim), 0, 0, d_matrix, d_transpose, N, M);
}
hipEventRecord(tEnd, 0);
hipEventSynchronize(tEnd);
hipEventElapsedTime(&duration, tStart, tEnd);
hipMemcpy(h_transpose, d_transpose, matSize, hipMemcpyDeviceToHost);
printf("\nSharedwBC GPU: %f\n", duration / nIterations);
duration = 0;
/* 5. unrolled GPU transpose */
dim3 blockDimUnroll(P, 8, 1);
hipMemcpy(d_matrix, h_matrix, matSize, hipMemcpyHostToDevice);
hipMemset(d_transpose, 0, matSize);
hipEventRecord(tStart, 0);
for (int i = 0; i < nIterations; ++i) {
hipLaunchKernelGGL(( matrixTransposeUnrolled), dim3(gridDim),dim3(blockDimUnroll), 0, 0, d_matrix, d_transpose, N, M);
}
hipEventRecord(tEnd, 0);
hipEventSynchronize(tEnd);
hipEventElapsedTime(&duration, tStart, tEnd);
hipMemcpy(h_transpose, d_transpose, matSize, hipMemcpyDeviceToHost);
printf("\nUnrolled GPU: %f\n", duration / nIterations);
return 0;
}
| b915f100ce4d2ab350d15894d11d370a6ca65605.cu | /* matrix transpose program */
#include <stdio.h>
const int P = 32;
/* naive CPU */
void naiveCPU(float *src, float *dst, int M, int N)
{
for (int i = 0; i < N; ++i) {
for (int j = 0; j < M; ++j) {
dst[i*N+j] = src[j*N+i];
}
}
}
/* naive GPU */
__global__ void matrixTranspose(float *_a, float *_b, int cols,int rows)
{
int i = blockIdx.y * blockDim.y + threadIdx.y; // row
int j = blockIdx.x * blockDim.x + threadIdx.x; // col
int index_in = i * cols + j;
int index_out = j * rows + i;
_b[index_out] = _a[index_in];
}
/* shared memory GPU */
__global__ void matrixTransposeShared(float *_a, float *_b, int cols, int rows)
{
__shared__ float mat[P][P];
int bx = blockIdx.x * blockDim.x;
int by = blockIdx.y * blockDim.y;
int i = by + threadIdx.y; int j = bx + threadIdx.x;
int ti = bx + threadIdx.y; int tj = by + threadIdx.x;
if (i < rows && j < cols)
mat[threadIdx.x][threadIdx.y] = _a[i*cols + j];
__syncthreads();
if (tj < cols && ti < rows)
_b[ti*rows+tj] = mat[threadIdx.y][threadIdx.x];
}
/* shared memory without bank conflict */
__global__ void matrixTransposeUnrolled(float *_a, float *_b, int cols, int rows)
{
__shared__ float mat[P][P+1];
int x = blockIdx.x * P + threadIdx.x;
int y = blockIdx.y * P + threadIdx.y;
#pragma unroll
for (int k = 0; k < P; k += 8) {
if (x < rows && y+k < cols)
mat[threadIdx.y+k][threadIdx.x] = _a[(y+k)*rows + x];
}
__syncthreads();
x = blockIdx.y * P + threadIdx.x;
y = blockIdx.x * P + threadIdx.y;
#pragma unroll
for (int k = 0; k < P; k += 8) {
if (x < cols && y+k < rows)
_b[(y+k)*cols + x] = mat[threadIdx.x][threadIdx.y+k];
}
}
/* loop unrolled */
__global__ void matrixTransposeSharedwBC(float *_a, float *_b, int cols, int rows)
{
__shared__ float mat[P][P+1];
int bx = blockIdx.x * blockDim.x;
int by = blockIdx.y * blockDim.y;
int i = by + threadIdx.y; int j = bx + threadIdx.x;
int ti = bx + threadIdx.y; int tj = by + threadIdx.x;
if (i < rows && j < cols)
mat[threadIdx.x][threadIdx.y] = _a[i*cols + j];
__syncthreads();
if (tj < cols && ti < rows)
_b[ti*rows+tj] = mat[threadIdx.y][threadIdx.x];
}
int main(int argc, char **argv)
{
/* N*M matrix, parallelism is P */
const int N = 1024;
const int M = 1024;
const int matSize = N * M * sizeof(float);
dim3 gridDim(N/P, M/P, 1);
dim3 blockDim(P , P, 1);
/* configuration of GPU */
printf("===================\n");
printf("Matrix: %d * %d\n", N, M);
printf("Grid: %d * %d * %d\n", gridDim.x, gridDim.y, gridDim.z);
printf("Block: %d * %d * %d\n", blockDim.x, blockDim.y, blockDim.z);
printf("===================\n");
/* allocate memory for matrix in host */
float *h_matrix = (float *) malloc(matSize);
float *h_transpose = (float *) malloc(matSize);
/* allocate memory for matrix in device */
float *d_matrix, *d_transpose;
cudaMalloc(&d_matrix, matSize);
cudaMalloc(&d_transpose, matSize);
/* randomly generate a matrix in host */
for (int i = 0; i < N; ++i) {
for (int j = 0; j < M; ++j) {
h_matrix[i*N+j] = (float)rand() / (float)(RAND_MAX) * 100.0;
}
}
/* utility for recording start and finish time */
cudaEvent_t tStart, tEnd;
float duration;
cudaEventCreate(&tStart);
cudaEventCreate(&tEnd);
const int nIterations = 100;
/* 1. naive CPU transpose */
cudaEventRecord(tStart, 0);
for (int i = 0; i < nIterations; ++i) {
naiveCPU(h_matrix, h_transpose, N, M);
}
cudaEventRecord(tEnd, 0);
cudaEventSynchronize(tEnd); // waits for record to terminate
cudaEventElapsedTime(&duration, tStart, tEnd);
printf("\nNaive CPU: %f\n", duration / nIterations);
/* 2. naive GPU transpose */
cudaMemcpy(d_matrix, h_matrix, matSize, cudaMemcpyHostToDevice);
cudaMemset(d_transpose, 0, matSize);
cudaEventRecord(tStart, 0);
for (int i = 0; i < nIterations; ++i) {
matrixTranspose<<<gridDim,blockDim>>>(d_matrix, d_transpose, N, M);
}
cudaEventRecord(tEnd, 0);
cudaEventSynchronize(tEnd);
cudaEventElapsedTime(&duration, tStart, tEnd);
cudaMemcpy(h_transpose, d_transpose, matSize, cudaMemcpyDeviceToHost);
printf("\nNaive GPU: %f\n", duration / nIterations);
/* 3. shared memory GPU transpose */
cudaMemcpy(d_matrix, h_matrix, matSize, cudaMemcpyHostToDevice);
cudaMemset(d_transpose, 0, matSize);
cudaEventRecord(tStart, 0);
for (int i = 0; i < nIterations; ++i) {
matrixTransposeShared<<<gridDim,blockDim>>>(d_matrix, d_transpose, N, M);
}
cudaEventRecord(tEnd, 0);
cudaEventSynchronize(tEnd);
cudaEventElapsedTime(&duration, tStart, tEnd);
cudaMemcpy(h_transpose, d_transpose, matSize, cudaMemcpyDeviceToHost);
printf("\nShared GPU: %f\n", duration / nIterations);
/* 4. shared memory GPU transpose without bank conflict */
cudaMemcpy(d_matrix, h_matrix, matSize, cudaMemcpyHostToDevice);
cudaMemset(d_transpose, 0, matSize);
cudaEventRecord(tStart, 0);
for (int i = 0; i < nIterations; ++i) {
matrixTransposeSharedwBC<<<gridDim,blockDim>>>(d_matrix, d_transpose, N, M);
}
cudaEventRecord(tEnd, 0);
cudaEventSynchronize(tEnd);
cudaEventElapsedTime(&duration, tStart, tEnd);
cudaMemcpy(h_transpose, d_transpose, matSize, cudaMemcpyDeviceToHost);
printf("\nSharedwBC GPU: %f\n", duration / nIterations);
duration = 0;
/* 5. unrolled GPU transpose */
dim3 blockDimUnroll(P, 8, 1);
cudaMemcpy(d_matrix, h_matrix, matSize, cudaMemcpyHostToDevice);
cudaMemset(d_transpose, 0, matSize);
cudaEventRecord(tStart, 0);
for (int i = 0; i < nIterations; ++i) {
matrixTransposeUnrolled<<<gridDim,blockDimUnroll>>>(d_matrix, d_transpose, N, M);
}
cudaEventRecord(tEnd, 0);
cudaEventSynchronize(tEnd);
cudaEventElapsedTime(&duration, tStart, tEnd);
cudaMemcpy(h_transpose, d_transpose, matSize, cudaMemcpyDeviceToHost);
printf("\nUnrolled GPU: %f\n", duration / nIterations);
return 0;
}
|
50dfc40fcd48032e401e7b479f99d100efc75b32.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kDivide(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] / b[i];
}
} | 50dfc40fcd48032e401e7b479f99d100efc75b32.cu | #include "includes.h"
__global__ void kDivide(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] / b[i];
}
} |
cc14e415cd2455e554d11ca9cde6bdd8fa061d86.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/loss/w_gd_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/solver.hpp"
namespace caffe {
template <typename Dtype>
static int qcompare(const void * a, const void * b)
{
return *(Dtype*)a - *(Dtype*)b;
}
template <typename Dtype>
static int compare(Dtype a, Dtype b)
{
return a < b;
}
template <typename Dtype>
static __global__ void Dloss_forward_kernel(int ncount, const Dtype *in, const Dtype *mask, Dtype *count, Dtype *loss_g, Dtype *loss_d)
{
CUDA_KERNEL_LOOP(i, ncount)
{
if (mask[i] > Dtype(0.5))
{
loss_g[i] = -in[i];
count[i] = 1;
}
else
{
loss_g[i] = 0;
count[i] = 0;
}
if (mask[i+ncount] > Dtype(0.5))
{
loss_d[i] = -in[i+ncount];
count[i+ncount] = 1;
}
else
{
loss_d[i] = 0;
count[i+ncount] = 0;
}
}
}
template <typename Dtype>
static __global__ void Gloss_forward_kernel(int ncount, const Dtype *in, Dtype *loss_g)
{
CUDA_KERNEL_LOOP(i, ncount)
{
loss_g[i] = -in[i];
}
}
template <typename Dtype>
static __global__ void Dloss_backward_kernel(int ncount, const Dtype* mask, Dtype *count, Dtype *diff_in)
{
CUDA_KERNEL_LOOP(i, ncount)
{
if (mask[i] > Dtype(0.5))
{
diff_in[i] = 1;
count[i] = 1;
}
else
{
diff_in[i] = 0;
count[i] = 0;
}
if (mask[i+ncount] > Dtype(0.5))
{
diff_in[i+ncount] = -1;
count[i+ncount] = 1;
}
else
{
diff_in[i+ncount] = 0;
count[i+ncount] = 0;
}
}
}
template <typename Dtype>
static __global__ void Gloss_backward_kernel(int ncount, Dtype *diff_in)
{
CUDA_KERNEL_LOOP(i, ncount)
{
diff_in[i] = -1;
}
}
template <typename Dtype>
void WGdLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top)
{
if (Caffe::gan_type() == "train_dnet")
{
int num = bottom[0]->num();
int channels = bottom[0]->channels();
int height = bottom[0]->height();
int width = bottom[0]->width();
CHECK_EQ(bottom.size(),1);
CHECK_EQ(num%2,0);
CHECK_EQ(channels,1);
caffe_gpu_set(mask_.count(),Dtype(1),mask_.mutable_gpu_data());
//caffe_gpu_rng_uniform(mask_.count(),Dtype(0),Dtype(1), mask_.mutable_gpu_data());
#if 0
caffe_copy(bottom[0]->count(),bottom[0]->gpu_data(),mask_.mutable_gpu_data());
Dtype count;
std::sort(mask_.mutable_cpu_data(), mask_.mutable_cpu_data() + mask_.count()/2,compare<Dtype>);
std::sort(mask_.mutable_cpu_data()+ mask_.count()/2,mask_.mutable_cpu_data() + mask_.count() ,compare<Dtype>);
int index = mask_.count()/2 * 0.5;
Dtype threshold_fake = mask_.cpu_data()[index];
Dtype threshold_true = mask_.cpu_data()[mask_.count()/2 + index];
//LOG(INFO)<<"-threshold_fake = "<<-threshold_fake;
//LOG(INFO)<<"-threshold_true = "<<-threshold_true;
for (int i=0;i<bottom[0]->count()/2;i++)
{
if (-bottom[0]->cpu_data()[i] < -threshold_fake)
mask_.mutable_cpu_data()[i] = 0.6;
else
mask_.mutable_cpu_data()[i] = 0.4;
if (-bottom[0]->cpu_data()[i+bottom[0]->count()/2] > -threshold_true)
mask_.mutable_cpu_data()[i+bottom[0]->count()/2] = 0.6;
else
mask_.mutable_cpu_data()[i+bottom[0]->count()/2] = 0.4;
}
#endif
hipLaunchKernelGGL(( Dloss_forward_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num/2*height*width)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num/2*height*width, bottom[0]->gpu_data(),mask_.gpu_data(),count_.mutable_gpu_data(),loss_g_.mutable_gpu_data(),loss_d_.mutable_gpu_data());
caffe_gpu_sum(loss_g_.count(),loss_g_.gpu_data(),top[0]->mutable_gpu_data());
Dtype loss_g = top[0]->cpu_data()[0];
Dtype count_g;
caffe_gpu_asum(count_.count()/2,count_.gpu_data(),&count_g);
caffe_gpu_sum(loss_d_.count(),loss_d_.gpu_data(),top[0]->mutable_gpu_data());
Dtype loss_d = top[0]->cpu_data()[0];
Dtype count_d;
caffe_gpu_asum(count_.count()/2,count_.gpu_data()+count_.count()/2,&count_d);
top[0]->mutable_cpu_data()[0] = loss_d/max(count_d,Dtype(1))- loss_g/max(count_g,Dtype(1));
}
else
{
int num = bottom[0]->num();
int channels = bottom[0]->channels();
int height = bottom[0]->height();
int width = bottom[0]->width();
CHECK_EQ(bottom.size(),1);
CHECK_EQ(channels,1);
hipLaunchKernelGGL(( Gloss_forward_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num*height*width)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num*height*width,bottom[0]->gpu_data(),loss_g_.mutable_gpu_data());
caffe_gpu_sum(loss_g_.count(),loss_g_.gpu_data(),top[0]->mutable_gpu_data());
Dtype loss_g = top[0]->cpu_data()[0];
top[0]->mutable_cpu_data()[0] = loss_g / Dtype(num*channels*height*width);
}
if (Solver<Dtype>::iter() % 100 == 0 && Caffe::gan_type() == "train_dnet")
LOG(INFO)<<"d-loss "<<top[0]->cpu_data()[0];
}
template <typename Dtype>
void WGdLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<Blob<Dtype>*>& bottom)
{
if (Caffe::second_pass() == false)
{
if (Caffe::gan_type() == "train_dnet")
{
int num = bottom[0]->num();
int channels = bottom[0]->channels();
int height = bottom[0]->height();
int width = bottom[0]->width();
hipLaunchKernelGGL(( Dloss_backward_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num/2*height*width)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num/2*height*width, mask_.gpu_data(),count_.mutable_gpu_data(),bottom[0]->mutable_gpu_diff());
Dtype count_g;
caffe_gpu_asum(count_.count()/2,count_.gpu_data(),&count_g);
Dtype loss_weights_ = top[0]->cpu_diff()[0] / max(count_g,Dtype(1));
caffe_gpu_scal(bottom[0]->count()/2,loss_weights_,bottom[0]->mutable_gpu_diff());
Dtype count_d;
caffe_gpu_asum(count_.count()/2,count_.gpu_data(),&count_d);
loss_weights_ = top[0]->cpu_diff()[0] / max(count_d,Dtype(1));
caffe_gpu_scal(bottom[0]->count()/2,loss_weights_,bottom[0]->mutable_gpu_diff()+bottom[0]->count()/2);
}
else
{
int num = bottom[0]->num();
int channels = bottom[0]->channels();
int height = bottom[0]->height();
int width = bottom[0]->width();
hipLaunchKernelGGL(( Gloss_backward_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num*height*width)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num*height*width,bottom[0]->mutable_gpu_diff());
Dtype loss_weights_ = top[0]->cpu_diff()[0] / (num*1*height*width);
caffe_gpu_scal(bottom[0]->count(),loss_weights_,bottom[0]->mutable_gpu_diff());
}
}
else
{
caffe_gpu_set(bottom[0]->count(),Dtype(0),bottom[0]->mutable_gpu_diff());
}
}
template <typename Dtype>
void WGdLossLayer<Dtype>::SecForward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top)
{
}
INSTANTIATE_LAYER_GPU_FUNCS(WGdLossLayer);
} // namespace caffe
| cc14e415cd2455e554d11ca9cde6bdd8fa061d86.cu |
#include <vector>
#include "caffe/layers/loss/w_gd_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/solver.hpp"
namespace caffe {
template <typename Dtype>
static int qcompare(const void * a, const void * b)
{
return *(Dtype*)a - *(Dtype*)b;
}
template <typename Dtype>
static int compare(Dtype a, Dtype b)
{
return a < b;
}
template <typename Dtype>
static __global__ void Dloss_forward_kernel(int ncount, const Dtype *in, const Dtype *mask, Dtype *count, Dtype *loss_g, Dtype *loss_d)
{
CUDA_KERNEL_LOOP(i, ncount)
{
if (mask[i] > Dtype(0.5))
{
loss_g[i] = -in[i];
count[i] = 1;
}
else
{
loss_g[i] = 0;
count[i] = 0;
}
if (mask[i+ncount] > Dtype(0.5))
{
loss_d[i] = -in[i+ncount];
count[i+ncount] = 1;
}
else
{
loss_d[i] = 0;
count[i+ncount] = 0;
}
}
}
template <typename Dtype>
static __global__ void Gloss_forward_kernel(int ncount, const Dtype *in, Dtype *loss_g)
{
CUDA_KERNEL_LOOP(i, ncount)
{
loss_g[i] = -in[i];
}
}
template <typename Dtype>
static __global__ void Dloss_backward_kernel(int ncount, const Dtype* mask, Dtype *count, Dtype *diff_in)
{
CUDA_KERNEL_LOOP(i, ncount)
{
if (mask[i] > Dtype(0.5))
{
diff_in[i] = 1;
count[i] = 1;
}
else
{
diff_in[i] = 0;
count[i] = 0;
}
if (mask[i+ncount] > Dtype(0.5))
{
diff_in[i+ncount] = -1;
count[i+ncount] = 1;
}
else
{
diff_in[i+ncount] = 0;
count[i+ncount] = 0;
}
}
}
template <typename Dtype>
static __global__ void Gloss_backward_kernel(int ncount, Dtype *diff_in)
{
CUDA_KERNEL_LOOP(i, ncount)
{
diff_in[i] = -1;
}
}
template <typename Dtype>
void WGdLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top)
{
if (Caffe::gan_type() == "train_dnet")
{
int num = bottom[0]->num();
int channels = bottom[0]->channels();
int height = bottom[0]->height();
int width = bottom[0]->width();
CHECK_EQ(bottom.size(),1);
CHECK_EQ(num%2,0);
CHECK_EQ(channels,1);
caffe_gpu_set(mask_.count(),Dtype(1),mask_.mutable_gpu_data());
//caffe_gpu_rng_uniform(mask_.count(),Dtype(0),Dtype(1), mask_.mutable_gpu_data());
#if 0
caffe_copy(bottom[0]->count(),bottom[0]->gpu_data(),mask_.mutable_gpu_data());
Dtype count;
std::sort(mask_.mutable_cpu_data(), mask_.mutable_cpu_data() + mask_.count()/2,compare<Dtype>);
std::sort(mask_.mutable_cpu_data()+ mask_.count()/2,mask_.mutable_cpu_data() + mask_.count() ,compare<Dtype>);
int index = mask_.count()/2 * 0.5;
Dtype threshold_fake = mask_.cpu_data()[index];
Dtype threshold_true = mask_.cpu_data()[mask_.count()/2 + index];
//LOG(INFO)<<"-threshold_fake = "<<-threshold_fake;
//LOG(INFO)<<"-threshold_true = "<<-threshold_true;
for (int i=0;i<bottom[0]->count()/2;i++)
{
if (-bottom[0]->cpu_data()[i] < -threshold_fake)
mask_.mutable_cpu_data()[i] = 0.6;
else
mask_.mutable_cpu_data()[i] = 0.4;
if (-bottom[0]->cpu_data()[i+bottom[0]->count()/2] > -threshold_true)
mask_.mutable_cpu_data()[i+bottom[0]->count()/2] = 0.6;
else
mask_.mutable_cpu_data()[i+bottom[0]->count()/2] = 0.4;
}
#endif
Dloss_forward_kernel<Dtype><<<CAFFE_GET_BLOCKS(num/2*height*width), CAFFE_CUDA_NUM_THREADS>>>
(num/2*height*width, bottom[0]->gpu_data(),mask_.gpu_data(),count_.mutable_gpu_data(),loss_g_.mutable_gpu_data(),loss_d_.mutable_gpu_data());
caffe_gpu_sum(loss_g_.count(),loss_g_.gpu_data(),top[0]->mutable_gpu_data());
Dtype loss_g = top[0]->cpu_data()[0];
Dtype count_g;
caffe_gpu_asum(count_.count()/2,count_.gpu_data(),&count_g);
caffe_gpu_sum(loss_d_.count(),loss_d_.gpu_data(),top[0]->mutable_gpu_data());
Dtype loss_d = top[0]->cpu_data()[0];
Dtype count_d;
caffe_gpu_asum(count_.count()/2,count_.gpu_data()+count_.count()/2,&count_d);
top[0]->mutable_cpu_data()[0] = loss_d/max(count_d,Dtype(1))- loss_g/max(count_g,Dtype(1));
}
else
{
int num = bottom[0]->num();
int channels = bottom[0]->channels();
int height = bottom[0]->height();
int width = bottom[0]->width();
CHECK_EQ(bottom.size(),1);
CHECK_EQ(channels,1);
Gloss_forward_kernel<Dtype><<<CAFFE_GET_BLOCKS(num*height*width), CAFFE_CUDA_NUM_THREADS>>>
(num*height*width,bottom[0]->gpu_data(),loss_g_.mutable_gpu_data());
caffe_gpu_sum(loss_g_.count(),loss_g_.gpu_data(),top[0]->mutable_gpu_data());
Dtype loss_g = top[0]->cpu_data()[0];
top[0]->mutable_cpu_data()[0] = loss_g / Dtype(num*channels*height*width);
}
if (Solver<Dtype>::iter() % 100 == 0 && Caffe::gan_type() == "train_dnet")
LOG(INFO)<<"d-loss "<<top[0]->cpu_data()[0];
}
template <typename Dtype>
void WGdLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<Blob<Dtype>*>& bottom)
{
if (Caffe::second_pass() == false)
{
if (Caffe::gan_type() == "train_dnet")
{
int num = bottom[0]->num();
int channels = bottom[0]->channels();
int height = bottom[0]->height();
int width = bottom[0]->width();
Dloss_backward_kernel<Dtype><<<CAFFE_GET_BLOCKS(num/2*height*width), CAFFE_CUDA_NUM_THREADS>>>
(num/2*height*width, mask_.gpu_data(),count_.mutable_gpu_data(),bottom[0]->mutable_gpu_diff());
Dtype count_g;
caffe_gpu_asum(count_.count()/2,count_.gpu_data(),&count_g);
Dtype loss_weights_ = top[0]->cpu_diff()[0] / max(count_g,Dtype(1));
caffe_gpu_scal(bottom[0]->count()/2,loss_weights_,bottom[0]->mutable_gpu_diff());
Dtype count_d;
caffe_gpu_asum(count_.count()/2,count_.gpu_data(),&count_d);
loss_weights_ = top[0]->cpu_diff()[0] / max(count_d,Dtype(1));
caffe_gpu_scal(bottom[0]->count()/2,loss_weights_,bottom[0]->mutable_gpu_diff()+bottom[0]->count()/2);
}
else
{
int num = bottom[0]->num();
int channels = bottom[0]->channels();
int height = bottom[0]->height();
int width = bottom[0]->width();
Gloss_backward_kernel<Dtype><<<CAFFE_GET_BLOCKS(num*height*width), CAFFE_CUDA_NUM_THREADS>>>
(num*height*width,bottom[0]->mutable_gpu_diff());
Dtype loss_weights_ = top[0]->cpu_diff()[0] / (num*1*height*width);
caffe_gpu_scal(bottom[0]->count(),loss_weights_,bottom[0]->mutable_gpu_diff());
}
}
else
{
caffe_gpu_set(bottom[0]->count(),Dtype(0),bottom[0]->mutable_gpu_diff());
}
}
template <typename Dtype>
void WGdLossLayer<Dtype>::SecForward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top)
{
}
INSTANTIATE_LAYER_GPU_FUNCS(WGdLossLayer);
} // namespace caffe
|
6d4bb40cfd7dbe702e3a0523425c1ab67e8e67aa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ------------------------------------------------------------------
// Faster R-CNN
// Copyright (c) 2015 Microsoft
// Licensed under The MIT License [see fast-rcnn/LICENSE for details]
// Written by Shaoqing Ren
// ------------------------------------------------------------------
#include "gpu_nms.hpp"
#include <vector>
#include <iostream>
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of hipError_t error */ \
do { \
hipError_t error = condition; \
if (error != hipSuccess) { \
std::cout << hipGetErrorString(error) << std::endl; \
} \
} while (0)
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
int const threadsPerBlock = sizeof(unsigned long long) * 8;
__device__ inline float devIoU(float const * const a, float const * const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
__device__ inline float devInsiders(float const * const a, float const * const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return max(interS / Sa, interS/Sb);
}
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, unsigned long long *dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void _set_device(int device_id) {
int current_device;
CUDA_CHECK(hipGetDevice(¤t_device));
if (current_device == device_id) {
return;
}
// The call to hipSetDevice must come before any calls to Get, which
// may perform initialization using the GPU.
CUDA_CHECK(hipSetDevice(device_id));
}
void _nms(int* keep_out, int* num_out, const float* boxes_host, int boxes_num,
int boxes_dim, float nms_overlap_thresh, int device_id) {
_set_device(device_id);
float* boxes_dev = NULL;
unsigned long long* mask_dev = NULL;
const int col_blocks = DIVUP(boxes_num, threadsPerBlock);
CUDA_CHECK(hipMalloc(&boxes_dev,
boxes_num * boxes_dim * sizeof(float)));
CUDA_CHECK(hipMemcpy(boxes_dev,
boxes_host,
boxes_num * boxes_dim * sizeof(float),
hipMemcpyHostToDevice));
CUDA_CHECK(hipMalloc(&mask_dev,
boxes_num * col_blocks * sizeof(unsigned long long)));
dim3 blocks(DIVUP(boxes_num, threadsPerBlock),
DIVUP(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
hipLaunchKernelGGL(( nms_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev);
std::vector<unsigned long long> mask_host(boxes_num * col_blocks);
CUDA_CHECK(hipMemcpy(&mask_host[0],
mask_dev,
sizeof(unsigned long long) * boxes_num * col_blocks,
hipMemcpyDeviceToHost));
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
*num_out = num_to_keep;
CUDA_CHECK(hipFree(boxes_dev));
CUDA_CHECK(hipFree(mask_dev));
}
| 6d4bb40cfd7dbe702e3a0523425c1ab67e8e67aa.cu | // ------------------------------------------------------------------
// Faster R-CNN
// Copyright (c) 2015 Microsoft
// Licensed under The MIT License [see fast-rcnn/LICENSE for details]
// Written by Shaoqing Ren
// ------------------------------------------------------------------
#include "gpu_nms.hpp"
#include <vector>
#include <iostream>
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
if (error != cudaSuccess) { \
std::cout << cudaGetErrorString(error) << std::endl; \
} \
} while (0)
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
int const threadsPerBlock = sizeof(unsigned long long) * 8;
__device__ inline float devIoU(float const * const a, float const * const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
__device__ inline float devInsiders(float const * const a, float const * const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return max(interS / Sa, interS/Sb);
}
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, unsigned long long *dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void _set_device(int device_id) {
int current_device;
CUDA_CHECK(cudaGetDevice(¤t_device));
if (current_device == device_id) {
return;
}
// The call to cudaSetDevice must come before any calls to Get, which
// may perform initialization using the GPU.
CUDA_CHECK(cudaSetDevice(device_id));
}
void _nms(int* keep_out, int* num_out, const float* boxes_host, int boxes_num,
int boxes_dim, float nms_overlap_thresh, int device_id) {
_set_device(device_id);
float* boxes_dev = NULL;
unsigned long long* mask_dev = NULL;
const int col_blocks = DIVUP(boxes_num, threadsPerBlock);
CUDA_CHECK(cudaMalloc(&boxes_dev,
boxes_num * boxes_dim * sizeof(float)));
CUDA_CHECK(cudaMemcpy(boxes_dev,
boxes_host,
boxes_num * boxes_dim * sizeof(float),
cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMalloc(&mask_dev,
boxes_num * col_blocks * sizeof(unsigned long long)));
dim3 blocks(DIVUP(boxes_num, threadsPerBlock),
DIVUP(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
nms_kernel<<<blocks, threads>>>(boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev);
std::vector<unsigned long long> mask_host(boxes_num * col_blocks);
CUDA_CHECK(cudaMemcpy(&mask_host[0],
mask_dev,
sizeof(unsigned long long) * boxes_num * col_blocks,
cudaMemcpyDeviceToHost));
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
*num_out = num_to_keep;
CUDA_CHECK(cudaFree(boxes_dev));
CUDA_CHECK(cudaFree(mask_dev));
}
|
d7145c78a6e741d357ca007db7a5a8e785fd3dab.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layers/bnll_layer.hpp"
namespace caffe {
const float kBNLL_THRESHOLD = 50.;
template <typename Ftype>
__global__ void BNLLForward(const int n, const Ftype* in, Ftype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ?
in[index] + log(1. + exp(-in[index])) :
log(1. + exp(in[index]));
}
}
template <typename Ftype, typename Btype>
void BNLLLayer<Ftype, Btype>::Forward_gpu(const vector<Blob*>& bottom,
const vector<Blob*>& top) {
const Ftype* bottom_data = bottom[0]->gpu_data<Ftype>();
Ftype* top_data = top[0]->mutable_gpu_data<Ftype>();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( BNLLForward), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, Caffe::thread_stream(),
count, bottom_data, top_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Btype>
__global__ void BNLLBackward(const int n, const Btype* in_diff,
const Btype* in_data, Btype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
float expval = exp(min(in_data[index], Btype(kBNLL_THRESHOLD)));
out_diff[index] = in_diff[index] * expval / (expval + 1.);
}
}
template <typename Ftype, typename Btype>
void BNLLLayer<Ftype, Btype>::Backward_gpu(const vector<Blob*>& top,
const vector<bool>& propagate_down,
const vector<Blob*>& bottom) {
if (propagate_down[0]) {
const Btype* bottom_data = bottom[0]->gpu_data<Btype>();
const Btype* top_diff = top[0]->gpu_diff<Btype>();
Btype* bottom_diff = bottom[0]->mutable_gpu_diff<Btype>();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( BNLLBackward), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, Caffe::thread_stream(),
count, top_diff, bottom_data, bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS_FB(BNLLLayer);
} // namespace caffe
| d7145c78a6e741d357ca007db7a5a8e785fd3dab.cu | #include <algorithm>
#include <vector>
#include "caffe/layers/bnll_layer.hpp"
namespace caffe {
const float kBNLL_THRESHOLD = 50.;
template <typename Ftype>
__global__ void BNLLForward(const int n, const Ftype* in, Ftype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ?
in[index] + log(1. + exp(-in[index])) :
log(1. + exp(in[index]));
}
}
template <typename Ftype, typename Btype>
void BNLLLayer<Ftype, Btype>::Forward_gpu(const vector<Blob*>& bottom,
const vector<Blob*>& top) {
const Ftype* bottom_data = bottom[0]->gpu_data<Ftype>();
Ftype* top_data = top[0]->mutable_gpu_data<Ftype>();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
BNLLForward<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS, 0, Caffe::thread_stream()>>>(
count, bottom_data, top_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Btype>
__global__ void BNLLBackward(const int n, const Btype* in_diff,
const Btype* in_data, Btype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
float expval = exp(min(in_data[index], Btype(kBNLL_THRESHOLD)));
out_diff[index] = in_diff[index] * expval / (expval + 1.);
}
}
template <typename Ftype, typename Btype>
void BNLLLayer<Ftype, Btype>::Backward_gpu(const vector<Blob*>& top,
const vector<bool>& propagate_down,
const vector<Blob*>& bottom) {
if (propagate_down[0]) {
const Btype* bottom_data = bottom[0]->gpu_data<Btype>();
const Btype* top_diff = top[0]->gpu_diff<Btype>();
Btype* bottom_diff = bottom[0]->mutable_gpu_diff<Btype>();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
BNLLBackward<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS, 0, Caffe::thread_stream()>>>(
count, top_diff, bottom_data, bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS_FB(BNLLLayer);
} // namespace caffe
|
71fe25b407ce93a881a02450715eeaf7e520a57e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (C) Mellanox Technologies Ltd. 2020-2021. ALL RIGHTS RESERVED.
*
* See file LICENSE for terms.
*/
#ifdef __cplusplus
extern "C" {
#endif
#include "../mc_cuda.h"
#include "utils/ucc_math.h"
#ifdef __cplusplus
}
#endif
#include "mc_cuda_reduce_ops.h"
#define CUDA_REDUCE_WITH_OP(NAME, OP) \
template <typename T> \
__global__ void UCC_REDUCE_CUDA_ ## NAME (const T *s1, const T *s2, T *d, \
size_t count) \
{ \
size_t start = blockIdx.x * blockDim.x + threadIdx.x; \
size_t step = blockDim.x * gridDim.x; \
for (size_t i = start; i < count; i+=step) { \
d[i] = OP(s1[i], s2[i]); \
} \
} \
#define CUDA_REDUCE_WITH_OP_SPECIALIZED(NAME, OP, TYPE) \
template <> \
__global__ void UCC_REDUCE_CUDA_ ## NAME (const TYPE *s1, const TYPE *s2, \
TYPE *d, size_t count) \
{ \
size_t start = blockIdx.x * blockDim.x + threadIdx.x; \
size_t step = blockDim.x * gridDim.x; \
for (size_t i = start; i < count; i+=step) { \
d[i] = OP(s1[i], s2[i]); \
} \
} \
CUDA_REDUCE_WITH_OP(MAX, DO_OP_MAX)
CUDA_REDUCE_WITH_OP(MIN, DO_OP_MIN)
CUDA_REDUCE_WITH_OP(SUM, DO_OP_SUM)
CUDA_REDUCE_WITH_OP(PROD, DO_OP_PROD)
CUDA_REDUCE_WITH_OP(LAND, DO_OP_LAND)
CUDA_REDUCE_WITH_OP(BAND, DO_OP_BAND)
CUDA_REDUCE_WITH_OP(LOR, DO_OP_LOR)
CUDA_REDUCE_WITH_OP(BOR, DO_OP_BOR)
CUDA_REDUCE_WITH_OP(LXOR, DO_OP_LXOR)
CUDA_REDUCE_WITH_OP(BXOR, DO_OP_BXOR)
CUDA_REDUCE_WITH_OP_SPECIALIZED(MAX, DO_OP_MAX_HALF, __half)
CUDA_REDUCE_WITH_OP_SPECIALIZED(MIN, DO_OP_MIN_HALF, __half)
CUDA_REDUCE_WITH_OP_SPECIALIZED(SUM, DO_OP_SUM_HALF, __half)
CUDA_REDUCE_WITH_OP_SPECIALIZED(PROD, DO_OP_PROD_HALF, __half)
#define LAUNCH_KERNEL(NAME, type, src1, src2, dest, count, s, b, t) \
do { \
UCC_REDUCE_CUDA_ ##hipLaunchKernelGGL(( NAME<type>) , dim3(b), dim3(t), 0, s, src1, src2, \
dest, count); \
} while(0)
#define DT_REDUCE_INT(type, op, src1_p, src2_p, dest_p, count, s, b, t) do { \
const type *sbuf1 = (type *)src1_p; \
const type *sbuf2= (type *)src2_p; \
type *dest = (type *)dest_p; \
switch(op) { \
case UCC_OP_MAX: \
LAUNCH_KERNEL(MAX, type, sbuf1, sbuf2, dest, count, s, b, t); \
break; \
case UCC_OP_MIN: \
LAUNCH_KERNEL(MIN, type, sbuf1, sbuf2, dest, count, s, b, t); \
break; \
case UCC_OP_SUM: \
LAUNCH_KERNEL(SUM, type, sbuf1, sbuf2, dest, count, s, b, t); \
break; \
case UCC_OP_PROD: \
LAUNCH_KERNEL(PROD, type, sbuf1, sbuf2, dest, count, s, b, t); \
break; \
case UCC_OP_LAND: \
LAUNCH_KERNEL(LAND, type, sbuf1, sbuf2, dest, count, s, b, t); \
break; \
case UCC_OP_BAND: \
LAUNCH_KERNEL(BAND, type, sbuf1, sbuf2, dest, count, s, b, t); \
break; \
case UCC_OP_LOR: \
LAUNCH_KERNEL(LOR, type, sbuf1, sbuf2, dest, count, s, b, t); \
break; \
case UCC_OP_BOR: \
LAUNCH_KERNEL(BOR, type, sbuf1, sbuf2, dest, count, s, b, t); \
break; \
case UCC_OP_LXOR: \
LAUNCH_KERNEL(LXOR, type, sbuf1, sbuf2, dest, count, s, b, t); \
break; \
case UCC_OP_BXOR: \
LAUNCH_KERNEL(BXOR, type, sbuf1, sbuf2, dest, count, s, b, t); \
break; \
default: \
mc_error(&ucc_mc_cuda.super, "int dtype does not support " \
"requested reduce op: %d", op); \
return UCC_ERR_NOT_SUPPORTED; \
} \
} while(0)
#define DT_REDUCE_FLOAT(type, op, src1_p, src2_p, dest_p, count, s, b, t) do { \
const type *sbuf1 = (const type *)src1_p; \
const type *sbuf2 = (const type *)src2_p; \
type *dest = (type *)dest_p; \
switch(op) { \
case UCC_OP_MAX: \
LAUNCH_KERNEL(MAX, type, sbuf1, sbuf2, dest, count, s, b, t); \
break; \
case UCC_OP_MIN: \
LAUNCH_KERNEL(MIN, type, sbuf1, sbuf2, dest, count, s, b, t); \
break; \
case UCC_OP_SUM: \
LAUNCH_KERNEL(SUM, type, sbuf1, sbuf2, dest, count, s, b, t); \
break; \
case UCC_OP_PROD: \
LAUNCH_KERNEL(PROD, type, sbuf1, sbuf2, dest, count, s, b, t); \
break; \
default: \
mc_error(&ucc_mc_cuda.super, "float dtype does not support " \
"requested reduce op: %d", op); \
return UCC_ERR_NOT_SUPPORTED; \
} \
} while(0)
#ifdef __cplusplus
extern "C" {
#endif
ucc_status_t ucc_mc_cuda_reduce(const void *src1, const void *src2, void *dst,
size_t count, ucc_datatype_t dt,
ucc_reduction_op_t op)
{
int th = MC_CUDA_CONFIG->reduce_num_threads;;
unsigned long bk = (count + th - 1)/th;;
hipStream_t stream;
UCC_MC_CUDA_INIT_STREAM();
stream = ucc_mc_cuda.stream;
if (MC_CUDA_CONFIG->reduce_num_blocks != UCC_ULUNITS_AUTO) {
bk = ucc_min(bk, MC_CUDA_CONFIG->reduce_num_blocks);
}
switch (dt)
{
case UCC_DT_INT16:
DT_REDUCE_INT(int16_t, op, src1, src2, dst, count, stream, bk, th);
break;
case UCC_DT_INT32:
DT_REDUCE_INT(int32_t, op, src1, src2, dst, count, stream, bk, th);
break;
case UCC_DT_INT64:
DT_REDUCE_INT(int64_t, op, src1, src2, dst, count, stream, bk, th);
break;
case UCC_DT_FLOAT16:
ucc_assert(2 == sizeof(__half));
DT_REDUCE_FLOAT(__half, op, src1, src2, dst, count, stream, bk, th);
break;
case UCC_DT_FLOAT32:
ucc_assert(4 == sizeof(float));
DT_REDUCE_FLOAT(float, op, src1, src2, dst, count, stream, bk, th);
break;
case UCC_DT_FLOAT64:
ucc_assert(8 == sizeof(double));
DT_REDUCE_FLOAT(double, op, src1, src2, dst, count, stream, bk, th);
break;
default:
mc_error(&ucc_mc_cuda.super, "unsupported reduction type (%d)", dt);
return UCC_ERR_NOT_SUPPORTED;
}
CUDACHECK(hipGetLastError());
CUDACHECK(hipStreamSynchronize(stream));
return UCC_OK;
}
#ifdef __cplusplus
}
#endif
| 71fe25b407ce93a881a02450715eeaf7e520a57e.cu | /**
* Copyright (C) Mellanox Technologies Ltd. 2020-2021. ALL RIGHTS RESERVED.
*
* See file LICENSE for terms.
*/
#ifdef __cplusplus
extern "C" {
#endif
#include "../mc_cuda.h"
#include "utils/ucc_math.h"
#ifdef __cplusplus
}
#endif
#include "mc_cuda_reduce_ops.h"
#define CUDA_REDUCE_WITH_OP(NAME, OP) \
template <typename T> \
__global__ void UCC_REDUCE_CUDA_ ## NAME (const T *s1, const T *s2, T *d, \
size_t count) \
{ \
size_t start = blockIdx.x * blockDim.x + threadIdx.x; \
size_t step = blockDim.x * gridDim.x; \
for (size_t i = start; i < count; i+=step) { \
d[i] = OP(s1[i], s2[i]); \
} \
} \
#define CUDA_REDUCE_WITH_OP_SPECIALIZED(NAME, OP, TYPE) \
template <> \
__global__ void UCC_REDUCE_CUDA_ ## NAME (const TYPE *s1, const TYPE *s2, \
TYPE *d, size_t count) \
{ \
size_t start = blockIdx.x * blockDim.x + threadIdx.x; \
size_t step = blockDim.x * gridDim.x; \
for (size_t i = start; i < count; i+=step) { \
d[i] = OP(s1[i], s2[i]); \
} \
} \
CUDA_REDUCE_WITH_OP(MAX, DO_OP_MAX)
CUDA_REDUCE_WITH_OP(MIN, DO_OP_MIN)
CUDA_REDUCE_WITH_OP(SUM, DO_OP_SUM)
CUDA_REDUCE_WITH_OP(PROD, DO_OP_PROD)
CUDA_REDUCE_WITH_OP(LAND, DO_OP_LAND)
CUDA_REDUCE_WITH_OP(BAND, DO_OP_BAND)
CUDA_REDUCE_WITH_OP(LOR, DO_OP_LOR)
CUDA_REDUCE_WITH_OP(BOR, DO_OP_BOR)
CUDA_REDUCE_WITH_OP(LXOR, DO_OP_LXOR)
CUDA_REDUCE_WITH_OP(BXOR, DO_OP_BXOR)
CUDA_REDUCE_WITH_OP_SPECIALIZED(MAX, DO_OP_MAX_HALF, __half)
CUDA_REDUCE_WITH_OP_SPECIALIZED(MIN, DO_OP_MIN_HALF, __half)
CUDA_REDUCE_WITH_OP_SPECIALIZED(SUM, DO_OP_SUM_HALF, __half)
CUDA_REDUCE_WITH_OP_SPECIALIZED(PROD, DO_OP_PROD_HALF, __half)
#define LAUNCH_KERNEL(NAME, type, src1, src2, dest, count, s, b, t) \
do { \
UCC_REDUCE_CUDA_ ## NAME<type> <<<b, t, 0, s>>>(src1, src2, \
dest, count); \
} while(0)
#define DT_REDUCE_INT(type, op, src1_p, src2_p, dest_p, count, s, b, t) do { \
const type *sbuf1 = (type *)src1_p; \
const type *sbuf2= (type *)src2_p; \
type *dest = (type *)dest_p; \
switch(op) { \
case UCC_OP_MAX: \
LAUNCH_KERNEL(MAX, type, sbuf1, sbuf2, dest, count, s, b, t); \
break; \
case UCC_OP_MIN: \
LAUNCH_KERNEL(MIN, type, sbuf1, sbuf2, dest, count, s, b, t); \
break; \
case UCC_OP_SUM: \
LAUNCH_KERNEL(SUM, type, sbuf1, sbuf2, dest, count, s, b, t); \
break; \
case UCC_OP_PROD: \
LAUNCH_KERNEL(PROD, type, sbuf1, sbuf2, dest, count, s, b, t); \
break; \
case UCC_OP_LAND: \
LAUNCH_KERNEL(LAND, type, sbuf1, sbuf2, dest, count, s, b, t); \
break; \
case UCC_OP_BAND: \
LAUNCH_KERNEL(BAND, type, sbuf1, sbuf2, dest, count, s, b, t); \
break; \
case UCC_OP_LOR: \
LAUNCH_KERNEL(LOR, type, sbuf1, sbuf2, dest, count, s, b, t); \
break; \
case UCC_OP_BOR: \
LAUNCH_KERNEL(BOR, type, sbuf1, sbuf2, dest, count, s, b, t); \
break; \
case UCC_OP_LXOR: \
LAUNCH_KERNEL(LXOR, type, sbuf1, sbuf2, dest, count, s, b, t); \
break; \
case UCC_OP_BXOR: \
LAUNCH_KERNEL(BXOR, type, sbuf1, sbuf2, dest, count, s, b, t); \
break; \
default: \
mc_error(&ucc_mc_cuda.super, "int dtype does not support " \
"requested reduce op: %d", op); \
return UCC_ERR_NOT_SUPPORTED; \
} \
} while(0)
#define DT_REDUCE_FLOAT(type, op, src1_p, src2_p, dest_p, count, s, b, t) do { \
const type *sbuf1 = (const type *)src1_p; \
const type *sbuf2 = (const type *)src2_p; \
type *dest = (type *)dest_p; \
switch(op) { \
case UCC_OP_MAX: \
LAUNCH_KERNEL(MAX, type, sbuf1, sbuf2, dest, count, s, b, t); \
break; \
case UCC_OP_MIN: \
LAUNCH_KERNEL(MIN, type, sbuf1, sbuf2, dest, count, s, b, t); \
break; \
case UCC_OP_SUM: \
LAUNCH_KERNEL(SUM, type, sbuf1, sbuf2, dest, count, s, b, t); \
break; \
case UCC_OP_PROD: \
LAUNCH_KERNEL(PROD, type, sbuf1, sbuf2, dest, count, s, b, t); \
break; \
default: \
mc_error(&ucc_mc_cuda.super, "float dtype does not support " \
"requested reduce op: %d", op); \
return UCC_ERR_NOT_SUPPORTED; \
} \
} while(0)
#ifdef __cplusplus
extern "C" {
#endif
ucc_status_t ucc_mc_cuda_reduce(const void *src1, const void *src2, void *dst,
size_t count, ucc_datatype_t dt,
ucc_reduction_op_t op)
{
int th = MC_CUDA_CONFIG->reduce_num_threads;;
unsigned long bk = (count + th - 1)/th;;
cudaStream_t stream;
UCC_MC_CUDA_INIT_STREAM();
stream = ucc_mc_cuda.stream;
if (MC_CUDA_CONFIG->reduce_num_blocks != UCC_ULUNITS_AUTO) {
bk = ucc_min(bk, MC_CUDA_CONFIG->reduce_num_blocks);
}
switch (dt)
{
case UCC_DT_INT16:
DT_REDUCE_INT(int16_t, op, src1, src2, dst, count, stream, bk, th);
break;
case UCC_DT_INT32:
DT_REDUCE_INT(int32_t, op, src1, src2, dst, count, stream, bk, th);
break;
case UCC_DT_INT64:
DT_REDUCE_INT(int64_t, op, src1, src2, dst, count, stream, bk, th);
break;
case UCC_DT_FLOAT16:
ucc_assert(2 == sizeof(__half));
DT_REDUCE_FLOAT(__half, op, src1, src2, dst, count, stream, bk, th);
break;
case UCC_DT_FLOAT32:
ucc_assert(4 == sizeof(float));
DT_REDUCE_FLOAT(float, op, src1, src2, dst, count, stream, bk, th);
break;
case UCC_DT_FLOAT64:
ucc_assert(8 == sizeof(double));
DT_REDUCE_FLOAT(double, op, src1, src2, dst, count, stream, bk, th);
break;
default:
mc_error(&ucc_mc_cuda.super, "unsupported reduction type (%d)", dt);
return UCC_ERR_NOT_SUPPORTED;
}
CUDACHECK(cudaGetLastError());
CUDACHECK(cudaStreamSynchronize(stream));
return UCC_OK;
}
#ifdef __cplusplus
}
#endif
|
268367d6626e008302f98b14b4f234427b694bec.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <hip/hip_runtime_api.h>
#include<stdio.h>
#include<fstream>
#include<sstream>
#include<string>
#include<vector>
#include<algorithm>
#include<iterator>
using namespace std;
#ifndef __CUDACC_RTC__
#define __CUDACC_RTC__
#endif
// structure for trajectory
struct trajectory {
// a struct containing all the points consistent in a trajectory
vector<int> trajectoryPoints;
float* distance = new float[0];
};
// global function for the GPU to calculate the total distance in the entire trajectory
__global__ void euclidean(int* points, int stride, float* distArray, float* distance, int length) {
int i = threadIdx.x;
int index = i * stride;
int squaredSum = 0;
// allow multi-dimensional
for (int j = 0; j < stride; j++) {
squaredSum += (points[index + j] - points[stride + index + j]) * (points[index + j] - points[stride + index + j]);
}
//distArray[i] = hypotf((points[index] - points[stride + index]), (points[index + 1] - points[stride + index + 1]));
distArray[i] = sqrtf(squaredSum);
__syncthreads(); // sync all the threads before performing a reduction
// reduce
float sum = 0;
if (threadIdx.x == 0) {
for (int k = 0;k < length;k++) {
sum += distArray[k];
}
*distance = sum;
}
}
__global__ static void bitonicsort(float * values, int lineNos)
{
extern __shared__ float shared[];
const unsigned int tid = threadIdx.x;
shared[tid] = values[tid];
__syncthreads();
for (unsigned int k = 2;k <= lineNos ;k *= 2) {
for (unsigned int j = k / 2; j > 0; j /= 2)
{
unsigned int ixj = tid ^ j;
if (ixj > tid) {
if ((tid & k) == 0)
{
if (shared[tid] > shared[ixj])
{
float temp = shared[tid];
shared[tid] = shared[ixj];
shared[ixj] = temp;
}
}
else
{
if (shared[tid] < shared[ixj])
{
float temp = shared[tid];
shared[tid] = shared[ixj];
shared[ixj] = temp;
}
}
}
__syncthreads();
}
}
values[tid] = shared[tid];
}
int main(int argc, char** argv)
{
// variables used everywhere
int num_of_stops = 0;
int num_of_rows = 0;
// accept commandline arguments or exit if none provided (filename, from the same folder)
if (argv[1] == NULL || argv[1] == "") {
printf("Error Reading File\n");
exit(0);
}
// read file using ifstream
std::ifstream myFile(argv[1]);
vector<float> values;
vector<trajectory> trajectories;
float * d_values;
/*while (!std::feof(myFile)) {
std::getline(myFile);
fscanf(myFile, "%d\n", &values[i]);
for (int j = 0; j < ) {
}
}*/
string line = "";
// open file and start parsing line by line
while (getline(myFile, line))
{
trajectory trajTemp;
stringstream lineTokens(line);
string temp; // breaking line into string
vector<int> temp1;
// parse line and extract numbers from the line
while (getline(lineTokens, temp, ' ')) {
temp1.push_back(atoi(temp.c_str()));
}
if (2 == temp1.size()) {
// from the first line, acquire the number of stops
num_of_rows = temp1[0];
num_of_stops = temp1[1];
}
else {
// this applies for the lines after the first line
// calculating stride
int stride = temp1.size() / num_of_stops;
// fill in the origin point
for (int i = 0; i < stride; i++) {
temp1.insert(temp1.begin(),0);
}
// calculating parallel euclidean distance using GPU for the entire trajectory
// this performs an outer loop parallelization by loop distribution
// i.e. I broke the Euclidean distance calculation and sorting in 2 different GPU operations
// this ensures that calculation heavy operations are efficiently performed by the GPU.
int* d_points;
float* d_distArray;
float* d_res;
hipMalloc(&d_points, temp1.size() * sizeof(int));
hipMalloc(&d_distArray, num_of_stops * sizeof(float));
hipMalloc(&d_res, sizeof(float));
hipMemcpy(d_points, temp1.data(), temp1.size() * sizeof(int), hipMemcpyHostToDevice);
euclidean << <1, num_of_stops >> > (d_points, stride, d_distArray, d_res, num_of_stops);
hipMemcpy(trajTemp.distance, d_res, sizeof(float), hipMemcpyDeviceToHost);
hipFree(d_points);
hipFree(d_distArray);
hipFree(d_res);
values.push_back(*trajTemp.distance);
// add all points to the temporary trajectory vector
trajTemp.trajectoryPoints = temp1;
// push current trajectory to our trajectories vector
trajectories.push_back(trajTemp);
}
}
// close the input file
myFile.close();
hipMalloc(&d_values, num_of_rows * sizeof(float));
hipMemcpy(d_values, values.data(), sizeof(float) * num_of_rows, hipMemcpyHostToDevice);
bitonicsort << <1, num_of_rows, num_of_rows * sizeof(float)>> > (d_values, num_of_rows);
float* sortedValues = new float[num_of_rows];
hipMemcpy(sortedValues, d_values, sizeof(float) * num_of_rows, hipMemcpyDeviceToHost);
// uncomment this snippet to debug the code and see output of the sorted array coming from the GPU
/*printf("\n---------------------------------\n");
printf("Here is the sorted array from GPU: \n");
for (int i = 0; i < num_of_rows; i++)
{
printf("%f, ", sortedValues[i]);
}
printf("\n---------------------------------\n");*/
// write the output to a file
std::ofstream output_file("./output.txt");
for (int i = 0; i < num_of_rows; i++)
{
// the trajectories are not sorte in the data structure but we now have the sorted array
vector<int> trajectoryToPrint;
for (int j = 0; j < trajectories.size(); j++) {
if (*trajectories[j].distance == sortedValues[i]) {
trajectoryToPrint = trajectories[j].trajectoryPoints;
}
}
// write vector to file
ostream_iterator<int> output_iterator(output_file, " ");
copy(trajectoryToPrint.begin()+2, trajectoryToPrint.end(), output_iterator);
output_file << "\n";
}
// free memory
hipFree(d_values);
free(sortedValues);
}
| 268367d6626e008302f98b14b4f234427b694bec.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <cuda_runtime_api.h>
#include<stdio.h>
#include<fstream>
#include<sstream>
#include<string>
#include<vector>
#include<algorithm>
#include<iterator>
using namespace std;
#ifndef __CUDACC_RTC__
#define __CUDACC_RTC__
#endif
// structure for trajectory
struct trajectory {
// a struct containing all the points consistent in a trajectory
vector<int> trajectoryPoints;
float* distance = new float[0];
};
// global function for the GPU to calculate the total distance in the entire trajectory
__global__ void euclidean(int* points, int stride, float* distArray, float* distance, int length) {
int i = threadIdx.x;
int index = i * stride;
int squaredSum = 0;
// allow multi-dimensional
for (int j = 0; j < stride; j++) {
squaredSum += (points[index + j] - points[stride + index + j]) * (points[index + j] - points[stride + index + j]);
}
//distArray[i] = hypotf((points[index] - points[stride + index]), (points[index + 1] - points[stride + index + 1]));
distArray[i] = sqrtf(squaredSum);
__syncthreads(); // sync all the threads before performing a reduction
// reduce
float sum = 0;
if (threadIdx.x == 0) {
for (int k = 0;k < length;k++) {
sum += distArray[k];
}
*distance = sum;
}
}
__global__ static void bitonicsort(float * values, int lineNos)
{
extern __shared__ float shared[];
const unsigned int tid = threadIdx.x;
shared[tid] = values[tid];
__syncthreads();
for (unsigned int k = 2;k <= lineNos ;k *= 2) {
for (unsigned int j = k / 2; j > 0; j /= 2)
{
unsigned int ixj = tid ^ j;
if (ixj > tid) {
if ((tid & k) == 0)
{
if (shared[tid] > shared[ixj])
{
float temp = shared[tid];
shared[tid] = shared[ixj];
shared[ixj] = temp;
}
}
else
{
if (shared[tid] < shared[ixj])
{
float temp = shared[tid];
shared[tid] = shared[ixj];
shared[ixj] = temp;
}
}
}
__syncthreads();
}
}
values[tid] = shared[tid];
}
int main(int argc, char** argv)
{
// variables used everywhere
int num_of_stops = 0;
int num_of_rows = 0;
// accept commandline arguments or exit if none provided (filename, from the same folder)
if (argv[1] == NULL || argv[1] == "") {
printf("Error Reading File\n");
exit(0);
}
// read file using ifstream
std::ifstream myFile(argv[1]);
vector<float> values;
vector<trajectory> trajectories;
float * d_values;
/*while (!std::feof(myFile)) {
std::getline(myFile);
fscanf(myFile, "%d\n", &values[i]);
for (int j = 0; j < ) {
}
}*/
string line = "";
// open file and start parsing line by line
while (getline(myFile, line))
{
trajectory trajTemp;
stringstream lineTokens(line);
string temp; // breaking line into string
vector<int> temp1;
// parse line and extract numbers from the line
while (getline(lineTokens, temp, ' ')) {
temp1.push_back(atoi(temp.c_str()));
}
if (2 == temp1.size()) {
// from the first line, acquire the number of stops
num_of_rows = temp1[0];
num_of_stops = temp1[1];
}
else {
// this applies for the lines after the first line
// calculating stride
int stride = temp1.size() / num_of_stops;
// fill in the origin point
for (int i = 0; i < stride; i++) {
temp1.insert(temp1.begin(),0);
}
// calculating parallel euclidean distance using GPU for the entire trajectory
// this performs an outer loop parallelization by loop distribution
// i.e. I broke the Euclidean distance calculation and sorting in 2 different GPU operations
// this ensures that calculation heavy operations are efficiently performed by the GPU.
int* d_points;
float* d_distArray;
float* d_res;
cudaMalloc(&d_points, temp1.size() * sizeof(int));
cudaMalloc(&d_distArray, num_of_stops * sizeof(float));
cudaMalloc(&d_res, sizeof(float));
cudaMemcpy(d_points, temp1.data(), temp1.size() * sizeof(int), cudaMemcpyHostToDevice);
euclidean << <1, num_of_stops >> > (d_points, stride, d_distArray, d_res, num_of_stops);
cudaMemcpy(trajTemp.distance, d_res, sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_points);
cudaFree(d_distArray);
cudaFree(d_res);
values.push_back(*trajTemp.distance);
// add all points to the temporary trajectory vector
trajTemp.trajectoryPoints = temp1;
// push current trajectory to our trajectories vector
trajectories.push_back(trajTemp);
}
}
// close the input file
myFile.close();
cudaMalloc(&d_values, num_of_rows * sizeof(float));
cudaMemcpy(d_values, values.data(), sizeof(float) * num_of_rows, cudaMemcpyHostToDevice);
bitonicsort << <1, num_of_rows, num_of_rows * sizeof(float)>> > (d_values, num_of_rows);
float* sortedValues = new float[num_of_rows];
cudaMemcpy(sortedValues, d_values, sizeof(float) * num_of_rows, cudaMemcpyDeviceToHost);
// uncomment this snippet to debug the code and see output of the sorted array coming from the GPU
/*printf("\n---------------------------------\n");
printf("Here is the sorted array from GPU: \n");
for (int i = 0; i < num_of_rows; i++)
{
printf("%f, ", sortedValues[i]);
}
printf("\n---------------------------------\n");*/
// write the output to a file
std::ofstream output_file("./output.txt");
for (int i = 0; i < num_of_rows; i++)
{
// the trajectories are not sorte in the data structure but we now have the sorted array
vector<int> trajectoryToPrint;
for (int j = 0; j < trajectories.size(); j++) {
if (*trajectories[j].distance == sortedValues[i]) {
trajectoryToPrint = trajectories[j].trajectoryPoints;
}
}
// write vector to file
ostream_iterator<int> output_iterator(output_file, " ");
copy(trajectoryToPrint.begin()+2, trajectoryToPrint.end(), output_iterator);
output_file << "\n";
}
// free memory
cudaFree(d_values);
free(sortedValues);
}
|
6424ba2ba1498bdedfb32bf5261d43902b70377c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* University of Illinois Open Source License
* Copyright 2010 Luthey-Schulten Group,
* All rights reserved.
*
* Developed by: Luthey-Schulten Group
* University of Illinois at Urbana-Champaign
* http://www.scs.uiuc.edu/~schulten
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the Software), to deal with
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is furnished to
* do so, subject to the following conditions:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimers.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimers in the documentation
* and/or other materials provided with the distribution.
*
* - Neither the names of the Luthey-Schulten Group, University of Illinois at
* Urbana-Champaign, nor the names of its contributors may be used to endorse or
* promote products derived from this Software without specific prior written
* permission.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS WITH THE SOFTWARE.
*
* Author(s): Elijah Roberts
*/
#include "lm/Cuda.h"
#define LS_WORDS_PER_SITE 2
#define LS_APRON_SIZE 3
#define LS_X_BLOCK_MAX_X_SIZE 160
#define LS_Y_BLOCK_X_SIZE 16
#define LS_Y_BLOCK_Y_SIZE 8
#define LS_Z_BLOCK_X_SIZE 16
#define LS_Z_BLOCK_Z_SIZE 8
#define LS_BOUNDARY_PERIODIC 1
#include "lm/rdme/dev/lattice_sim_1d_dev.cu"
__global__ void cu_CopyXWindowPeriodicUnalignedSites_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeXYZSize);
__global__ void cu_CopyXWindowPeriodicUnalignedAprons_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeXYZSize);
void cu_CopyXWindowPeriodicUnalignedSites(unsigned int * host_inLattice, unsigned int * host_outLattice, unsigned int latticeXSize, unsigned int latticeYSize, unsigned int latticeZSize)
{
void* inLattice;
void* outLattice;
CUDA_EXCEPTION_CHECK(hipMalloc(&inLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int)));
CUDA_EXCEPTION_CHECK(hipMalloc(&outLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int)));
CUDA_EXCEPTION_CHECK(hipMemcpy(inLattice, host_inLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int), hipMemcpyHostToDevice));
CUDA_EXCEPTION_CHECK(hipMemset(outLattice, 0xFF, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int)));
unsigned int gridXSize;
dim3 gridSize, threadBlockSize;
calculateXLaunchParameters(&gridXSize, &gridSize, &threadBlockSize, LS_X_BLOCK_MAX_X_SIZE, latticeXSize, latticeYSize, latticeZSize);
hipLaunchKernelGGL(( CUDA_EXCEPTION_EXECUTE((cu_CopyXWindowPeriodicUnalignedSites_kernel), dim3(gridSize),dim3(threadBlockSize), 0, 0, (unsigned int*)inLattice, (unsigned int*)outLattice, gridXSize, latticeXSize, latticeYSize, latticeXSize*latticeYSize*latticeZSize)));
CUDA_EXCEPTION_CHECK(hipStreamSynchronize(0));
CUDA_EXCEPTION_CHECK(hipMemcpy(host_outLattice, outLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int), hipMemcpyDeviceToHost));
CUDA_EXCEPTION_CHECK(hipFree(outLattice));
CUDA_EXCEPTION_CHECK(hipFree(inLattice));
}
__global__ void cu_CopyXWindowPeriodicUnalignedSites_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeXYZSize)
{
__shared__ unsigned int bx, by, bz;
calculateBlockPosition(&bx, &by, &bz, gridXSize);
// Figure out the offset of this thread in the lattice and the lattice segment.
unsigned int latticeXIndex = (bx*blockDim.x) + threadIdx.x;
unsigned int latticeIndex = (bz*latticeXSize*latticeYSize) + (by*latticeXSize) + latticeXIndex;
unsigned int windowIndex = threadIdx.x+LS_APRON_SIZE;
///////////////////////////////////////////
// Load the lattice into shared memory. //
///////////////////////////////////////////
// Shared memory to store the lattice segment. Each lattice site has four particles, eight bits for each particle.
__shared__ unsigned int window[LS_X_WINDOW_SIZE*LS_WORDS_PER_SITE];
// Copy the x window from device memory into shared memory.
copyXWindowFromLattice(bx, inLattice, window, latticeIndex, latticeXIndex, latticeXSize, latticeXYZSize, windowIndex);
// Copy the x window from shared memory to device memory.
copyXWindowToLattice(outLattice, window, latticeIndex, latticeXIndex, latticeXSize, latticeXYZSize, windowIndex);
}
void cu_CopyXWindowPeriodicUnalignedAprons(unsigned int * host_inLattice, unsigned int * host_outLattice, unsigned int latticeXSize, unsigned int latticeYSize, unsigned int latticeZSize)
{
void* inLattice;
void* outLattice;
CUDA_EXCEPTION_CHECK(hipMalloc(&inLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int)));
CUDA_EXCEPTION_CHECK(hipMalloc(&outLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int)));
CUDA_EXCEPTION_CHECK(hipMemcpy(inLattice, host_inLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int), hipMemcpyHostToDevice));
CUDA_EXCEPTION_CHECK(hipMemset(outLattice, 0xFF, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int)));
unsigned int gridXSize;
dim3 gridSize, threadBlockSize;
calculateXLaunchParameters(&gridXSize, &gridSize, &threadBlockSize, LS_X_BLOCK_MAX_X_SIZE, latticeXSize, latticeYSize, latticeZSize);
hipLaunchKernelGGL(( CUDA_EXCEPTION_EXECUTE((cu_CopyXWindowPeriodicUnalignedAprons_kernel), dim3(gridSize),dim3(threadBlockSize), 0, 0, (unsigned int*)inLattice, (unsigned int*)outLattice, gridXSize, latticeXSize, latticeYSize, latticeXSize*latticeYSize*latticeZSize)));
CUDA_EXCEPTION_CHECK(hipStreamSynchronize(0));
CUDA_EXCEPTION_CHECK(hipMemcpy(host_outLattice, outLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int), hipMemcpyDeviceToHost));
CUDA_EXCEPTION_CHECK(hipFree(outLattice));
CUDA_EXCEPTION_CHECK(hipFree(inLattice));
}
__global__ void cu_CopyXWindowPeriodicUnalignedAprons_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeXYZSize)
{
__shared__ unsigned int bx, by, bz;
calculateBlockPosition(&bx, &by, &bz, gridXSize);
// Figure out the offset of this thread in the lattice and the lattice segment.
unsigned int latticeXIndex = (bx*blockDim.x) + threadIdx.x;
unsigned int latticeIndex = (bz*latticeXSize*latticeYSize) + (by*latticeXSize) + latticeXIndex;
unsigned int windowIndex = threadIdx.x+LS_APRON_SIZE;
///////////////////////////////////////////
// Load the lattice into shared memory. //
///////////////////////////////////////////
// Shared memory to store the lattice segment. Each lattice site has four particles, eight bits for each particle.
__shared__ unsigned int window[LS_X_WINDOW_SIZE*LS_WORDS_PER_SITE];
// Copy the x window from device memory into shared memory.
copyXWindowFromLattice(bx, inLattice, window, latticeIndex, latticeXIndex, latticeXSize, latticeXYZSize, windowIndex);
__syncthreads();
if (latticeXIndex < latticeXSize)
{
outLattice[latticeIndex] = 0;
outLattice[latticeIndex+latticeXYZSize] = 0;
// If this is the first part of the block, load the leading apron.
if (windowIndex < 2*LS_APRON_SIZE)
{
outLattice[latticeIndex] = window[windowIndex-LS_APRON_SIZE];
outLattice[latticeIndex+latticeXYZSize] = window[windowIndex-LS_APRON_SIZE+LS_X_WINDOW_SIZE];
}
// If this is the last part of the block, load the trailing apron.
int threadBlockWidth = ((bx+1)*blockDim.x <= latticeXSize)?(blockDim.x):(latticeXSize-(bx*blockDim.x));
if (windowIndex >= threadBlockWidth)
{
outLattice[latticeIndex] = window[windowIndex+LS_APRON_SIZE];
outLattice[latticeIndex+latticeXYZSize] = window[windowIndex+LS_APRON_SIZE+LS_X_WINDOW_SIZE];
}
}
}
| 6424ba2ba1498bdedfb32bf5261d43902b70377c.cu | /*
* University of Illinois Open Source License
* Copyright 2010 Luthey-Schulten Group,
* All rights reserved.
*
* Developed by: Luthey-Schulten Group
* University of Illinois at Urbana-Champaign
* http://www.scs.uiuc.edu/~schulten
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the Software), to deal with
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is furnished to
* do so, subject to the following conditions:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimers.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimers in the documentation
* and/or other materials provided with the distribution.
*
* - Neither the names of the Luthey-Schulten Group, University of Illinois at
* Urbana-Champaign, nor the names of its contributors may be used to endorse or
* promote products derived from this Software without specific prior written
* permission.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS WITH THE SOFTWARE.
*
* Author(s): Elijah Roberts
*/
#include "lm/Cuda.h"
#define LS_WORDS_PER_SITE 2
#define LS_APRON_SIZE 3
#define LS_X_BLOCK_MAX_X_SIZE 160
#define LS_Y_BLOCK_X_SIZE 16
#define LS_Y_BLOCK_Y_SIZE 8
#define LS_Z_BLOCK_X_SIZE 16
#define LS_Z_BLOCK_Z_SIZE 8
#define LS_BOUNDARY_PERIODIC 1
#include "lm/rdme/dev/lattice_sim_1d_dev.cu"
__global__ void cu_CopyXWindowPeriodicUnalignedSites_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeXYZSize);
__global__ void cu_CopyXWindowPeriodicUnalignedAprons_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeXYZSize);
void cu_CopyXWindowPeriodicUnalignedSites(unsigned int * host_inLattice, unsigned int * host_outLattice, unsigned int latticeXSize, unsigned int latticeYSize, unsigned int latticeZSize)
{
void* inLattice;
void* outLattice;
CUDA_EXCEPTION_CHECK(cudaMalloc(&inLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int)));
CUDA_EXCEPTION_CHECK(cudaMalloc(&outLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int)));
CUDA_EXCEPTION_CHECK(cudaMemcpy(inLattice, host_inLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int), cudaMemcpyHostToDevice));
CUDA_EXCEPTION_CHECK(cudaMemset(outLattice, 0xFF, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int)));
unsigned int gridXSize;
dim3 gridSize, threadBlockSize;
calculateXLaunchParameters(&gridXSize, &gridSize, &threadBlockSize, LS_X_BLOCK_MAX_X_SIZE, latticeXSize, latticeYSize, latticeZSize);
CUDA_EXCEPTION_EXECUTE((cu_CopyXWindowPeriodicUnalignedSites_kernel<<<gridSize,threadBlockSize>>>((unsigned int*)inLattice, (unsigned int*)outLattice, gridXSize, latticeXSize, latticeYSize, latticeXSize*latticeYSize*latticeZSize)));
CUDA_EXCEPTION_CHECK(cudaStreamSynchronize(0));
CUDA_EXCEPTION_CHECK(cudaMemcpy(host_outLattice, outLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int), cudaMemcpyDeviceToHost));
CUDA_EXCEPTION_CHECK(cudaFree(outLattice));
CUDA_EXCEPTION_CHECK(cudaFree(inLattice));
}
__global__ void cu_CopyXWindowPeriodicUnalignedSites_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeXYZSize)
{
__shared__ unsigned int bx, by, bz;
calculateBlockPosition(&bx, &by, &bz, gridXSize);
// Figure out the offset of this thread in the lattice and the lattice segment.
unsigned int latticeXIndex = (bx*blockDim.x) + threadIdx.x;
unsigned int latticeIndex = (bz*latticeXSize*latticeYSize) + (by*latticeXSize) + latticeXIndex;
unsigned int windowIndex = threadIdx.x+LS_APRON_SIZE;
///////////////////////////////////////////
// Load the lattice into shared memory. //
///////////////////////////////////////////
// Shared memory to store the lattice segment. Each lattice site has four particles, eight bits for each particle.
__shared__ unsigned int window[LS_X_WINDOW_SIZE*LS_WORDS_PER_SITE];
// Copy the x window from device memory into shared memory.
copyXWindowFromLattice(bx, inLattice, window, latticeIndex, latticeXIndex, latticeXSize, latticeXYZSize, windowIndex);
// Copy the x window from shared memory to device memory.
copyXWindowToLattice(outLattice, window, latticeIndex, latticeXIndex, latticeXSize, latticeXYZSize, windowIndex);
}
void cu_CopyXWindowPeriodicUnalignedAprons(unsigned int * host_inLattice, unsigned int * host_outLattice, unsigned int latticeXSize, unsigned int latticeYSize, unsigned int latticeZSize)
{
void* inLattice;
void* outLattice;
CUDA_EXCEPTION_CHECK(cudaMalloc(&inLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int)));
CUDA_EXCEPTION_CHECK(cudaMalloc(&outLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int)));
CUDA_EXCEPTION_CHECK(cudaMemcpy(inLattice, host_inLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int), cudaMemcpyHostToDevice));
CUDA_EXCEPTION_CHECK(cudaMemset(outLattice, 0xFF, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int)));
unsigned int gridXSize;
dim3 gridSize, threadBlockSize;
calculateXLaunchParameters(&gridXSize, &gridSize, &threadBlockSize, LS_X_BLOCK_MAX_X_SIZE, latticeXSize, latticeYSize, latticeZSize);
CUDA_EXCEPTION_EXECUTE((cu_CopyXWindowPeriodicUnalignedAprons_kernel<<<gridSize,threadBlockSize>>>((unsigned int*)inLattice, (unsigned int*)outLattice, gridXSize, latticeXSize, latticeYSize, latticeXSize*latticeYSize*latticeZSize)));
CUDA_EXCEPTION_CHECK(cudaStreamSynchronize(0));
CUDA_EXCEPTION_CHECK(cudaMemcpy(host_outLattice, outLattice, latticeXSize*latticeYSize*latticeZSize*LS_WORDS_PER_SITE*sizeof(unsigned int), cudaMemcpyDeviceToHost));
CUDA_EXCEPTION_CHECK(cudaFree(outLattice));
CUDA_EXCEPTION_CHECK(cudaFree(inLattice));
}
__global__ void cu_CopyXWindowPeriodicUnalignedAprons_kernel(const unsigned int* inLattice, unsigned int* outLattice, const unsigned int gridXSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeXYZSize)
{
__shared__ unsigned int bx, by, bz;
calculateBlockPosition(&bx, &by, &bz, gridXSize);
// Figure out the offset of this thread in the lattice and the lattice segment.
unsigned int latticeXIndex = (bx*blockDim.x) + threadIdx.x;
unsigned int latticeIndex = (bz*latticeXSize*latticeYSize) + (by*latticeXSize) + latticeXIndex;
unsigned int windowIndex = threadIdx.x+LS_APRON_SIZE;
///////////////////////////////////////////
// Load the lattice into shared memory. //
///////////////////////////////////////////
// Shared memory to store the lattice segment. Each lattice site has four particles, eight bits for each particle.
__shared__ unsigned int window[LS_X_WINDOW_SIZE*LS_WORDS_PER_SITE];
// Copy the x window from device memory into shared memory.
copyXWindowFromLattice(bx, inLattice, window, latticeIndex, latticeXIndex, latticeXSize, latticeXYZSize, windowIndex);
__syncthreads();
if (latticeXIndex < latticeXSize)
{
outLattice[latticeIndex] = 0;
outLattice[latticeIndex+latticeXYZSize] = 0;
// If this is the first part of the block, load the leading apron.
if (windowIndex < 2*LS_APRON_SIZE)
{
outLattice[latticeIndex] = window[windowIndex-LS_APRON_SIZE];
outLattice[latticeIndex+latticeXYZSize] = window[windowIndex-LS_APRON_SIZE+LS_X_WINDOW_SIZE];
}
// If this is the last part of the block, load the trailing apron.
int threadBlockWidth = ((bx+1)*blockDim.x <= latticeXSize)?(blockDim.x):(latticeXSize-(bx*blockDim.x));
if (windowIndex >= threadBlockWidth)
{
outLattice[latticeIndex] = window[windowIndex+LS_APRON_SIZE];
outLattice[latticeIndex+latticeXYZSize] = window[windowIndex+LS_APRON_SIZE+LS_X_WINDOW_SIZE];
}
}
}
|
9f8954aaa8101e6210a54e8e32114af5206b3b4f.hip | // !!! This is a file automatically generated by hipify!!!
// libs NV
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
// libs generiques
#include <math.h>
// lib spec
#include "kernelMedian.cu"
// formats des images
#define UCHAR (unsigned char)0x10
#define USHORT (unsigned short)0x1000
// longueur max ligne dans fichier pgm
#define SIZE_LINE_TEXT 256
void checkError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
fprintf(stderr, "Erreur CUDA : %s: %s.\n", msg,
hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
/**
* \fn int type_image_ppm(int *prof, int *i_dim, int *j_dim, int *level, char *file_name)
* \brief Fonction qui renvoie le type de l'image ppm et des caracteristiques
*
* \param[out] prof profondeur de l'image 1 pour pgm 3 pour ppm, 0 sinon
* \param[out] i_dim renvoie la dimension verticale de l'image (si NULL, renvoie que prof)
* \param[out] j_dim renvoie la dimension horizontale de l'image
* \param[out] level renvoie la dynamique de l'image
* \param[in] file_name fichier image
*
* \return 1 si ok O sinon
*
*/
int type_image_ppm(int *prof, unsigned int *i_dim, unsigned int *j_dim, int *level, char *file_name)
{
char buffer[SIZE_LINE_TEXT] ;
FILE *file ;
*prof = 0 ;
file = fopen(file_name, "rb");
if (file == NULL)
return 0 ;
// lecture de la premiere ligne
fgets(buffer, SIZE_LINE_TEXT, file);
/* pgm */
if ((buffer[0] == 'P') & (buffer[1] == '5'))
*prof = 1 ; // GGG
/* ppm */
if ((buffer[0] == 'P') & (buffer[1] == '6'))
*prof = 3 ; // RVBRVBRVB
/* type non gere */
if (*prof == 0) return 0 ;
/* pour une utilisation du type */
/* ret = type_image_ppm(&prof, NULL, NULL, NULL, file_name) */
if (i_dim == NULL)
return 1 ;
/* on saute les lignes de commentaires */
fgets(buffer, SIZE_LINE_TEXT, file);
while ((buffer[0] == '#')|(buffer[0] == '\n'))
fgets(buffer, SIZE_LINE_TEXT, file);
/* on lit les dimensions de l'image */
sscanf(buffer, "%d %d", j_dim, i_dim) ;
fgets(buffer, SIZE_LINE_TEXT, file);
sscanf(buffer, "%d", level) ;
fclose(file);
return 1 ;
}
/**
* \fn void load_pgm2int(int **image, int i_dim, int j_dim,
* int nb_level, char *fichier_image)
* \brief lecture pgm 8 ou 16 bits
*
* \param[out] image
* \param[in] i_dim dimension verticale de l'image
* \param[in] j_dim dimension horizontale de l'image
* \param[in] nb_level dynamique de l'image
* \param[in] fichier_image fichier image
*
*
*/
template<class IMG_TYPE>
void load_pgm2uw(IMG_TYPE *image, int i_dim, int j_dim, char *fichier_image)
{
int i, j ;
char buffer[SIZE_LINE_TEXT] ;
FILE *file = fopen(fichier_image, "rb");
fgets(buffer, SIZE_LINE_TEXT, file); /* P5 */
/* on saute les lignes de commentaires */
fgets(buffer, SIZE_LINE_TEXT, file);
while ((buffer[0] == '#')|(buffer[0] == '\n'))
fgets(buffer, SIZE_LINE_TEXT, file);
/* derniere ligne lue : dimensions */
fgets(buffer, SIZE_LINE_TEXT, file); /* dynamique */
/* data */
// fichier en char ou en short, on convertit selon
IMG_TYPE * ligne;
ligne = (IMG_TYPE *)malloc(sizeof(IMG_TYPE)*j_dim) ;
for (i=0;i<i_dim;i++)
{
fread(ligne, sizeof(IMG_TYPE), j_dim, file);
for (j=0;j<j_dim;j++)
image[i*j_dim+j] = (IMG_TYPE)(ligne[j]);
}
free(ligne);
fclose(file);
}
template<class IMG_TYPE>
void save_2pgm(char *fichier_image, IMG_TYPE *image, int j_dim, int i_dim)
{
int i, j ;
FILE *file=fopen(fichier_image,"wb");
// entete pgm
// format
fprintf(file, "P5\n") ;
fprintf(file, "# AND - DISC\n") ;
fprintf(file, "# FEMTO-ST Institute - Belfort - France\n") ;
// taille
fprintf(file, "%d %d\n", j_dim, i_dim) ;
// dynamique
unsigned short dyn = (1<<sizeof(IMG_TYPE)*8) -1;
printf("save2pgm dyn=%d\n",dyn);
IMG_TYPE *ligne;
fprintf(file, "%d\n" , dyn );
ligne = (IMG_TYPE *)malloc(sizeof(IMG_TYPE)*j_dim );
for (i=0;i<i_dim;i++)
{
for (j=0;j<j_dim;j++)
ligne[j] = (IMG_TYPE)(image[i*j_dim + j]) ;
fwrite(ligne, sizeof(IMG_TYPE), j_dim, file);
}
free(ligne) ;
fclose(file);
}
template<class IMG_TYPE>
void run_test(int argc, char **argv, int r, int ppt, IMG_TYPE flag){
// assuming this is the one
int gpuId = 0;
if (argc > 2)
gpuId = atoi(argv[1]);
hipSetDevice( gpuId );
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, gpuId);
// for time measurements
StopWatchInterface *timer = 0;
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
// input file name
char * image_name = argv[argc-1] ;
// CPU
IMG_TYPE * h_datac=NULL ; // primary input image
IMG_TYPE * h_datacPadded=NULL ; // padded memory area for input image
unsigned int H, L, size; // image dimensions and size
IMG_TYPE * h_out_gpu_c ; // output image ready for hard disk write
dim3 dimBlock, dimGrid ; // grid dimensions
int bsx=32, bsy=8 ; // default values of thread block dimensions
int ct=0; // counter for mean execution time
int profImage;
int depth;
int bitDepth;
// GPU data output
IMG_TYPE *d_outc ;
IMG_TYPE *d_inc ;
// dummy alloc to avoid possible high latency when first using the GPU
short * d_bidon ;
hipMalloc( (void**) &d_bidon, sizeof(short)) ;
if (type_image_ppm(&profImage, &H, &L, &depth, image_name)){
bitDepth = log2(1.0*(depth+1));
// loads image from hard disk
sdkStartTimer(&timer);
h_datac = (IMG_TYPE*)malloc(H*L*sizeof(IMG_TYPE));
//sdkLoadPGM(image_name, &h_datac, &L, &H);
load_pgm2uw(h_datac, H, L, image_name);
sdkStopTimer(&timer);
// memory size of the image (CPU side)
size = H * L * sizeof( IMG_TYPE );
// loading summary
printf("\n***** CONVOMED SUMMARY *****\n") ;
printf("GPU : %s\n", prop.name);
printf("Image %d bits %s (%d x %d) pixels = %d Bytes loaded in %f ms,\n", bitDepth, image_name, L, H, size, sdkGetTimerValue(&timer));
}
sdkResetTimer(&timer);
sdkStartTimer(&timer);
// GPU memory allocations
int Hpitch ;
checkCudaErrors(hipMalloc( (void**) &d_outc, H*L*sizeof(IMG_TYPE) )) ;
checkError("Alloc dout_c") ;
checkCudaErrors(hipMallocPitch( (void**) &d_inc, (size_t*)&Hpitch, (size_t)((L+2*r)*sizeof(IMG_TYPE)), (size_t)((H+2*r)*sizeof(IMG_TYPE)) )) ;
checkError("Alloc d_inc");
sdkStopTimer(&timer);
printf("GPU memory allocations done in %f ms\n", sdkGetTimerValue(&timer)) ;
// PAGED LOCKED mem
sdkResetTimer(&timer);
sdkStartTimer(&timer);
checkCudaErrors(hipHostMalloc((void**)&h_out_gpu_c, H*L*sizeof(IMG_TYPE), hipHostMallocDefault)) ;
h_datacPadded = (IMG_TYPE *)malloc((H+2*r)*Hpitch*sizeof(IMG_TYPE)) ;
if (h_datacPadded != NULL) printf("ALLOC padded mem CPU OK\n");
sdkStopTimer(&timer);
printf("CPU memory allocations done in %f ms\n", sdkGetTimerValue(&timer)) ;
int i, j ;
int h_dim = Hpitch/sizeof(IMG_TYPE) ;
for (i=0; i<H; i++)
for (j=0; j<L; j++)
h_datacPadded[(i+r)*h_dim+j+r] = 1.0*h_datac[i*L+j] ;
sdkResetTimer(&timer);
sdkStartTimer(&timer);
checkCudaErrors(hipMemcpy( d_inc, h_datacPadded, (H+2*r)*Hpitch, hipMemcpyHostToDevice)) ;
checkError("Copie h_datac en GMEM --> d_inc");
sdkStopTimer(&timer);
printf("Input image copied into global memory in %f ms\n", sdkGetTimerValue(&timer)) ;
sdkResetTimer(&timer) ;
/*****************************
* Kernels calls
*****************************/
checkError("Config cache");
dimBlock = dim3(bsx,bsy,1) ;
dimGrid = dim3( (L/dimBlock.x)/1, (H/dimBlock.y)/ppt, 1 ) ;
sdkResetTimer(&timer);
sdkStartTimer(&timer);
for (ct=0; ct<100 ; ct++)
hipLaunchKernelGGL(( medianFilterSharedKernel), dim3(dimGrid), dim3(dimBlock), 0 , 0, d_inc, d_outc, L, h_dim) ;
hipDeviceSynchronize() ;
sdkStopTimer(&timer);
printf("Mean runtime (on %d executions): %f ms - soit %.0f Mpixels/s \n", (ct), sdkGetTimerValue(&timer)/(ct), L*H/(1000.0*sdkGetTimerValue(&timer)/(ct))) ;
sdkResetTimer(&timer);
sdkStartTimer(&timer);
checkCudaErrors(hipMemcpy((void*)h_out_gpu_c , d_outc, H*L*sizeof(IMG_TYPE), hipMemcpyDeviceToHost)) ;
checkError("Copie D_out depuis GMEM vers mem CPU");
sdkStopTimer(&timer);
printf("Ouput image (image_out.pgm) copied from GPU to CPU in %f ms\n", sdkGetTimerValue(&timer)) ;
printf("***** END OF CONVOMED EXECUTION *****\n\n");
//sdkSavePGM("image_out.pgm", h_out_gpu_c, L, H) ;
save_2pgm((char*)("image_out.pgm"), h_out_gpu_c, L, H);
checkError("Writing img on disk");
hipFree(d_inc) ;
hipFree(d_outc);
hipHostFree(h_out_gpu_c);
}
int main(int argc, char **argv){
// mask radius
int r=2 ;
//pixels per thread
int ppt=2 ;
unsigned int H, L;
int profImage, depth, bitDepth;
char * image_name = argv[argc-1] ;
if (type_image_ppm(&profImage, &H, &L, &depth, image_name)){
bitDepth = log2(1.0*(depth+1));
switch(bitDepth){
case 8:
run_test(argc, argv, r, ppt, UCHAR);
break;
case 16:
run_test(argc, argv, r, ppt, USHORT);
break;
}
}
return EXIT_SUCCESS ;
}
| 9f8954aaa8101e6210a54e8e32114af5206b3b4f.cu | // libs NV
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
// libs generiques
#include <math.h>
// lib spec
#include "kernelMedian.cu"
// formats des images
#define UCHAR (unsigned char)0x10
#define USHORT (unsigned short)0x1000
// longueur max ligne dans fichier pgm
#define SIZE_LINE_TEXT 256
void checkError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Erreur CUDA : %s: %s.\n", msg,
cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
/**
* \fn int type_image_ppm(int *prof, int *i_dim, int *j_dim, int *level, char *file_name)
* \brief Fonction qui renvoie le type de l'image ppm et des caracteristiques
*
* \param[out] prof profondeur de l'image 1 pour pgm 3 pour ppm, 0 sinon
* \param[out] i_dim renvoie la dimension verticale de l'image (si NULL, renvoie que prof)
* \param[out] j_dim renvoie la dimension horizontale de l'image
* \param[out] level renvoie la dynamique de l'image
* \param[in] file_name fichier image
*
* \return 1 si ok O sinon
*
*/
int type_image_ppm(int *prof, unsigned int *i_dim, unsigned int *j_dim, int *level, char *file_name)
{
char buffer[SIZE_LINE_TEXT] ;
FILE *file ;
*prof = 0 ;
file = fopen(file_name, "rb");
if (file == NULL)
return 0 ;
// lecture de la premiere ligne
fgets(buffer, SIZE_LINE_TEXT, file);
/* pgm */
if ((buffer[0] == 'P') & (buffer[1] == '5'))
*prof = 1 ; // GGG
/* ppm */
if ((buffer[0] == 'P') & (buffer[1] == '6'))
*prof = 3 ; // RVBRVBRVB
/* type non gere */
if (*prof == 0) return 0 ;
/* pour une utilisation du type */
/* ret = type_image_ppm(&prof, NULL, NULL, NULL, file_name) */
if (i_dim == NULL)
return 1 ;
/* on saute les lignes de commentaires */
fgets(buffer, SIZE_LINE_TEXT, file);
while ((buffer[0] == '#')|(buffer[0] == '\n'))
fgets(buffer, SIZE_LINE_TEXT, file);
/* on lit les dimensions de l'image */
sscanf(buffer, "%d %d", j_dim, i_dim) ;
fgets(buffer, SIZE_LINE_TEXT, file);
sscanf(buffer, "%d", level) ;
fclose(file);
return 1 ;
}
/**
* \fn void load_pgm2int(int **image, int i_dim, int j_dim,
* int nb_level, char *fichier_image)
* \brief lecture pgm 8 ou 16 bits
*
* \param[out] image
* \param[in] i_dim dimension verticale de l'image
* \param[in] j_dim dimension horizontale de l'image
* \param[in] nb_level dynamique de l'image
* \param[in] fichier_image fichier image
*
*
*/
template<class IMG_TYPE>
void load_pgm2uw(IMG_TYPE *image, int i_dim, int j_dim, char *fichier_image)
{
int i, j ;
char buffer[SIZE_LINE_TEXT] ;
FILE *file = fopen(fichier_image, "rb");
fgets(buffer, SIZE_LINE_TEXT, file); /* P5 */
/* on saute les lignes de commentaires */
fgets(buffer, SIZE_LINE_TEXT, file);
while ((buffer[0] == '#')|(buffer[0] == '\n'))
fgets(buffer, SIZE_LINE_TEXT, file);
/* derniere ligne lue : dimensions */
fgets(buffer, SIZE_LINE_TEXT, file); /* dynamique */
/* data */
// fichier en char ou en short, on convertit selon
IMG_TYPE * ligne;
ligne = (IMG_TYPE *)malloc(sizeof(IMG_TYPE)*j_dim) ;
for (i=0;i<i_dim;i++)
{
fread(ligne, sizeof(IMG_TYPE), j_dim, file);
for (j=0;j<j_dim;j++)
image[i*j_dim+j] = (IMG_TYPE)(ligne[j]);
}
free(ligne);
fclose(file);
}
template<class IMG_TYPE>
void save_2pgm(char *fichier_image, IMG_TYPE *image, int j_dim, int i_dim)
{
int i, j ;
FILE *file=fopen(fichier_image,"wb");
// entete pgm
// format
fprintf(file, "P5\n") ;
fprintf(file, "# AND - DISC\n") ;
fprintf(file, "# FEMTO-ST Institute - Belfort - France\n") ;
// taille
fprintf(file, "%d %d\n", j_dim, i_dim) ;
// dynamique
unsigned short dyn = (1<<sizeof(IMG_TYPE)*8) -1;
printf("save2pgm dyn=%d\n",dyn);
IMG_TYPE *ligne;
fprintf(file, "%d\n" , dyn );
ligne = (IMG_TYPE *)malloc(sizeof(IMG_TYPE)*j_dim );
for (i=0;i<i_dim;i++)
{
for (j=0;j<j_dim;j++)
ligne[j] = (IMG_TYPE)(image[i*j_dim + j]) ;
fwrite(ligne, sizeof(IMG_TYPE), j_dim, file);
}
free(ligne) ;
fclose(file);
}
template<class IMG_TYPE>
void run_test(int argc, char **argv, int r, int ppt, IMG_TYPE flag){
// assuming this is the one
int gpuId = 0;
if (argc > 2)
gpuId = atoi(argv[1]);
cudaSetDevice( gpuId );
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, gpuId);
// for time measurements
StopWatchInterface *timer = 0;
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
// input file name
char * image_name = argv[argc-1] ;
// CPU
IMG_TYPE * h_datac=NULL ; // primary input image
IMG_TYPE * h_datacPadded=NULL ; // padded memory area for input image
unsigned int H, L, size; // image dimensions and size
IMG_TYPE * h_out_gpu_c ; // output image ready for hard disk write
dim3 dimBlock, dimGrid ; // grid dimensions
int bsx=32, bsy=8 ; // default values of thread block dimensions
int ct=0; // counter for mean execution time
int profImage;
int depth;
int bitDepth;
// GPU data output
IMG_TYPE *d_outc ;
IMG_TYPE *d_inc ;
// dummy alloc to avoid possible high latency when first using the GPU
short * d_bidon ;
cudaMalloc( (void**) &d_bidon, sizeof(short)) ;
if (type_image_ppm(&profImage, &H, &L, &depth, image_name)){
bitDepth = log2(1.0*(depth+1));
// loads image from hard disk
sdkStartTimer(&timer);
h_datac = (IMG_TYPE*)malloc(H*L*sizeof(IMG_TYPE));
//sdkLoadPGM(image_name, &h_datac, &L, &H);
load_pgm2uw(h_datac, H, L, image_name);
sdkStopTimer(&timer);
// memory size of the image (CPU side)
size = H * L * sizeof( IMG_TYPE );
// loading summary
printf("\n***** CONVOMED SUMMARY *****\n") ;
printf("GPU : %s\n", prop.name);
printf("Image %d bits %s (%d x %d) pixels = %d Bytes loaded in %f ms,\n", bitDepth, image_name, L, H, size, sdkGetTimerValue(&timer));
}
sdkResetTimer(&timer);
sdkStartTimer(&timer);
// GPU memory allocations
int Hpitch ;
checkCudaErrors(cudaMalloc( (void**) &d_outc, H*L*sizeof(IMG_TYPE) )) ;
checkError("Alloc dout_c") ;
checkCudaErrors(cudaMallocPitch( (void**) &d_inc, (size_t*)&Hpitch, (size_t)((L+2*r)*sizeof(IMG_TYPE)), (size_t)((H+2*r)*sizeof(IMG_TYPE)) )) ;
checkError("Alloc d_inc");
sdkStopTimer(&timer);
printf("GPU memory allocations done in %f ms\n", sdkGetTimerValue(&timer)) ;
// PAGED LOCKED mem
sdkResetTimer(&timer);
sdkStartTimer(&timer);
checkCudaErrors(cudaHostAlloc((void**)&h_out_gpu_c, H*L*sizeof(IMG_TYPE), cudaHostAllocDefault)) ;
h_datacPadded = (IMG_TYPE *)malloc((H+2*r)*Hpitch*sizeof(IMG_TYPE)) ;
if (h_datacPadded != NULL) printf("ALLOC padded mem CPU OK\n");
sdkStopTimer(&timer);
printf("CPU memory allocations done in %f ms\n", sdkGetTimerValue(&timer)) ;
int i, j ;
int h_dim = Hpitch/sizeof(IMG_TYPE) ;
for (i=0; i<H; i++)
for (j=0; j<L; j++)
h_datacPadded[(i+r)*h_dim+j+r] = 1.0*h_datac[i*L+j] ;
sdkResetTimer(&timer);
sdkStartTimer(&timer);
checkCudaErrors(cudaMemcpy( d_inc, h_datacPadded, (H+2*r)*Hpitch, cudaMemcpyHostToDevice)) ;
checkError("Copie h_datac en GMEM --> d_inc");
sdkStopTimer(&timer);
printf("Input image copied into global memory in %f ms\n", sdkGetTimerValue(&timer)) ;
sdkResetTimer(&timer) ;
/*****************************
* Kernels calls
*****************************/
checkError("Config cache");
dimBlock = dim3(bsx,bsy,1) ;
dimGrid = dim3( (L/dimBlock.x)/1, (H/dimBlock.y)/ppt, 1 ) ;
sdkResetTimer(&timer);
sdkStartTimer(&timer);
for (ct=0; ct<100 ; ct++)
medianFilterSharedKernel<<< dimGrid, dimBlock, 0 >>>(d_inc, d_outc, L, h_dim) ;
cudaThreadSynchronize() ;
sdkStopTimer(&timer);
printf("Mean runtime (on %d executions): %f ms - soit %.0f Mpixels/s \n", (ct), sdkGetTimerValue(&timer)/(ct), L*H/(1000.0*sdkGetTimerValue(&timer)/(ct))) ;
sdkResetTimer(&timer);
sdkStartTimer(&timer);
checkCudaErrors(cudaMemcpy((void*)h_out_gpu_c , d_outc, H*L*sizeof(IMG_TYPE), cudaMemcpyDeviceToHost)) ;
checkError("Copie D_out depuis GMEM vers mem CPU");
sdkStopTimer(&timer);
printf("Ouput image (image_out.pgm) copied from GPU to CPU in %f ms\n", sdkGetTimerValue(&timer)) ;
printf("***** END OF CONVOMED EXECUTION *****\n\n");
//sdkSavePGM("image_out.pgm", h_out_gpu_c, L, H) ;
save_2pgm((char*)("image_out.pgm"), h_out_gpu_c, L, H);
checkError("Writing img on disk");
cudaFree(d_inc) ;
cudaFree(d_outc);
cudaFreeHost(h_out_gpu_c);
}
int main(int argc, char **argv){
// mask radius
int r=2 ;
//pixels per thread
int ppt=2 ;
unsigned int H, L;
int profImage, depth, bitDepth;
char * image_name = argv[argc-1] ;
if (type_image_ppm(&profImage, &H, &L, &depth, image_name)){
bitDepth = log2(1.0*(depth+1));
switch(bitDepth){
case 8:
run_test(argc, argv, r, ppt, UCHAR);
break;
case 16:
run_test(argc, argv, r, ppt, USHORT);
break;
}
}
return EXIT_SUCCESS ;
}
|
1f2978e2abdaf8845061cbbe1e57f6ef1f7f3be5.hip | // !!! This is a file automatically generated by hipify!!!
//============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//
// Copyright 2017 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
// Copyright 2017 UT-Battelle, LLC.
// Copyright 2017 Los Alamos National Security.
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
// Under the terms of Contract DE-AC52-06NA25396 with Los Alamos National
// Laboratory (LANL), the U.S. Government retains certain rights in
// this software.
//============================================================================
#include <cstdlib>
#include <mutex>
#include <vtkm/cont/Logging.h>
#include <vtkm/cont/cuda/ErrorCuda.h>
#include <vtkm/cont/cuda/internal/CudaAllocator.h>
#define NO_VTKM_MANAGED_MEMORY "NO_VTKM_MANAGED_MEMORY"
#include <mutex>
#include <vector>
VTKM_THIRDPARTY_PRE_INCLUDE
#include <hip/hip_runtime.h>
VTKM_THIRDPARTY_POST_INCLUDE
// These static vars are in an anon namespace to work around MSVC linker issues.
namespace
{
#if CUDART_VERSION >= 8000
// Has CudaAllocator::Initialize been called by any thread?
static std::once_flag IsInitialized;
#endif
// True if concurrent pagable managed memory is not disabled by user via a system
// environment variable and all devices support it.
static bool ManagedMemoryEnabled = false;
// Avoid overhead of hipMemAdvise and hipMemPrefetchAsync for small buffers.
// This value should be > 0 or else these functions will error out.
static std::size_t Threshold = 1 << 20;
}
namespace vtkm
{
namespace cont
{
namespace cuda
{
namespace internal
{
bool CudaAllocator::UsingManagedMemory()
{
CudaAllocator::Initialize();
return ManagedMemoryEnabled;
}
bool CudaAllocator::IsDevicePointer(const void* ptr)
{
CudaAllocator::Initialize();
if (!ptr)
{
return false;
}
hipPointerAttribute_t attr;
hipError_t err = hipPointerGetAttributes(&attr, ptr);
// This function will return invalid value if the pointer is unknown to the
// cuda runtime. Manually catch this value since it's not really an error.
if (err == hipErrorInvalidValue)
{
hipGetLastError(); // Clear the error so we don't raise it later...
return false;
}
VTKM_CUDA_CALL(err /*= hipPointerGetAttributes(&attr, ptr)*/);
return attr.devicePointer == ptr;
}
bool CudaAllocator::IsManagedPointer(const void* ptr)
{
if (!ptr || !ManagedMemoryEnabled)
{
return false;
}
hipPointerAttribute_t attr;
hipError_t err = hipPointerGetAttributes(&attr, ptr);
// This function will return invalid value if the pointer is unknown to the
// cuda runtime. Manually catch this value since it's not really an error.
if (err == hipErrorInvalidValue)
{
hipGetLastError(); // Clear the error so we don't raise it later...
return false;
}
VTKM_CUDA_CALL(err /*= hipPointerGetAttributes(&attr, ptr)*/);
#if CUDART_VERSION < 10000 // isManaged deprecated in CUDA 10.
return attr.isManaged != 0;
#else // attr.type doesn't exist before CUDA 10
return attr.type == cudaMemoryTypeManaged;
#endif
}
void* CudaAllocator::Allocate(std::size_t numBytes)
{
CudaAllocator::Initialize();
// When numBytes is zero hipMallocManaged returns an error and the behavior
// of hipMalloc is not documented. Just return nullptr.
if (numBytes == 0)
{
return nullptr;
}
void* ptr = nullptr;
if (ManagedMemoryEnabled)
{
VTKM_CUDA_CALL(hipMallocManaged(&ptr, numBytes));
}
else
{
VTKM_CUDA_CALL(hipMalloc(&ptr, numBytes));
}
{
VTKM_LOG_F(vtkm::cont::LogLevel::MemExec,
"Allocated CUDA array of %s at %p.",
vtkm::cont::GetSizeString(numBytes).c_str(),
ptr);
}
return ptr;
}
void* CudaAllocator::AllocateUnManaged(std::size_t numBytes)
{
void* ptr = nullptr;
VTKM_CUDA_CALL(hipMalloc(&ptr, numBytes));
{
VTKM_LOG_F(vtkm::cont::LogLevel::MemExec,
"Allocated CUDA array of %s at %p.",
vtkm::cont::GetSizeString(numBytes).c_str(),
ptr);
}
return ptr;
}
void CudaAllocator::Free(void* ptr)
{
VTKM_LOG_F(vtkm::cont::LogLevel::MemExec, "Freeing CUDA allocation at %p.", ptr);
VTKM_CUDA_CALL(hipFree(ptr));
}
void CudaAllocator::FreeDeferred(void* ptr, std::size_t numBytes)
{
static std::mutex deferredMutex;
static std::vector<void*> deferredPointers;
static std::size_t deferredSize = 0;
constexpr std::size_t bufferLimit = 2 << 24; //16MB buffer
{
VTKM_LOG_F(vtkm::cont::LogLevel::MemExec,
"Deferring free of CUDA allocation at %p of %s.",
ptr,
vtkm::cont::GetSizeString(numBytes).c_str());
}
std::vector<void*> toFree;
// critical section
{
std::lock_guard<std::mutex> lock(deferredMutex);
deferredPointers.push_back(ptr);
deferredSize += numBytes;
if (deferredSize >= bufferLimit)
{
toFree.swap(deferredPointers);
deferredSize = 0;
}
}
for (auto&& p : toFree)
{
VTKM_LOG_F(vtkm::cont::LogLevel::MemExec, "Freeing deferred CUDA allocation at %p.", p);
VTKM_CUDA_CALL(hipFree(p));
}
}
void CudaAllocator::PrepareForControl(const void* ptr, std::size_t numBytes)
{
if (IsManagedPointer(ptr) && numBytes >= Threshold)
{
#if CUDART_VERSION >= 8000
// TODO these hints need to be benchmarked and adjusted once we start
// sharing the pointers between cont/exec
VTKM_CUDA_CALL(hipMemAdvise(ptr, numBytes, hipMemAdviseSetAccessedBy, hipCpuDeviceId));
VTKM_CUDA_CALL(hipMemPrefetchAsync(ptr, numBytes, hipCpuDeviceId, cudaStreamPerThread));
#endif // CUDA >= 8.0
}
}
void CudaAllocator::PrepareForInput(const void* ptr, std::size_t numBytes)
{
if (IsManagedPointer(ptr) && numBytes >= Threshold)
{
#if CUDART_VERSION >= 8000
int dev;
VTKM_CUDA_CALL(hipGetDevice(&dev));
// VTKM_CUDA_CALL(hipMemAdvise(ptr, numBytes, hipMemAdviseSetPreferredLocation, dev));
// VTKM_CUDA_CALL(hipMemAdvise(ptr, numBytes, hipMemAdviseSetReadMostly, dev));
VTKM_CUDA_CALL(hipMemAdvise(ptr, numBytes, hipMemAdviseSetAccessedBy, dev));
VTKM_CUDA_CALL(hipMemPrefetchAsync(ptr, numBytes, dev, cudaStreamPerThread));
#endif // CUDA >= 8.0
}
}
void CudaAllocator::PrepareForOutput(const void* ptr, std::size_t numBytes)
{
if (IsManagedPointer(ptr) && numBytes >= Threshold)
{
#if CUDART_VERSION >= 8000
int dev;
VTKM_CUDA_CALL(hipGetDevice(&dev));
// VTKM_CUDA_CALL(hipMemAdvise(ptr, numBytes, hipMemAdviseSetPreferredLocation, dev));
// VTKM_CUDA_CALL(hipMemAdvise(ptr, numBytes, hipMemAdviseUnsetReadMostly, dev));
VTKM_CUDA_CALL(hipMemAdvise(ptr, numBytes, hipMemAdviseSetAccessedBy, dev));
VTKM_CUDA_CALL(hipMemPrefetchAsync(ptr, numBytes, dev, cudaStreamPerThread));
#endif // CUDA >= 8.0
}
}
void CudaAllocator::PrepareForInPlace(const void* ptr, std::size_t numBytes)
{
if (IsManagedPointer(ptr) && numBytes >= Threshold)
{
#if CUDART_VERSION >= 8000
int dev;
VTKM_CUDA_CALL(hipGetDevice(&dev));
// VTKM_CUDA_CALL(hipMemAdvise(ptr, numBytes, hipMemAdviseSetPreferredLocation, dev));
// VTKM_CUDA_CALL(hipMemAdvise(ptr, numBytes, hipMemAdviseUnsetReadMostly, dev));
VTKM_CUDA_CALL(hipMemAdvise(ptr, numBytes, hipMemAdviseSetAccessedBy, dev));
VTKM_CUDA_CALL(hipMemPrefetchAsync(ptr, numBytes, dev, cudaStreamPerThread));
#endif // CUDA >= 8.0
}
}
void CudaAllocator::Initialize()
{
#if CUDART_VERSION >= 8000
std::call_once(IsInitialized, []() {
bool managedMemorySupported = true;
int numDevices;
VTKM_CUDA_CALL(hipGetDeviceCount(&numDevices));
if (numDevices == 0)
{
return;
}
// Check all devices, use the feature set supported by all
bool managed = true;
hipDeviceProp_t prop;
for (int i = 0; i < numDevices && managed; ++i)
{
VTKM_CUDA_CALL(hipGetDeviceProperties(&prop, i));
// We check for concurrentManagedAccess, as devices with only the
// managedAccess property have extra synchronization requirements.
managed = managed && prop.concurrentManagedAccess;
}
managedMemorySupported = managed;
// Check if users want to disable managed memory
#pragma warning(push)
// getenv is not thread safe on windows but since it's inside a call_once block so
// it's fine to suppress the warning here.
#pragma warning(disable : 4996)
const char* buf = std::getenv(NO_VTKM_MANAGED_MEMORY);
#pragma warning(pop)
if (buf != nullptr)
{
ManagedMemoryEnabled = (std::string(buf) != "1");
}
ManagedMemoryEnabled = ManagedMemoryEnabled && managedMemorySupported;
});
#endif
}
}
}
}
} // end namespace vtkm::cont::cuda::internal
| 1f2978e2abdaf8845061cbbe1e57f6ef1f7f3be5.cu | //============================================================================
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//
// Copyright 2017 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
// Copyright 2017 UT-Battelle, LLC.
// Copyright 2017 Los Alamos National Security.
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
// Under the terms of Contract DE-AC52-06NA25396 with Los Alamos National
// Laboratory (LANL), the U.S. Government retains certain rights in
// this software.
//============================================================================
#include <cstdlib>
#include <mutex>
#include <vtkm/cont/Logging.h>
#include <vtkm/cont/cuda/ErrorCuda.h>
#include <vtkm/cont/cuda/internal/CudaAllocator.h>
#define NO_VTKM_MANAGED_MEMORY "NO_VTKM_MANAGED_MEMORY"
#include <mutex>
#include <vector>
VTKM_THIRDPARTY_PRE_INCLUDE
#include <cuda_runtime.h>
VTKM_THIRDPARTY_POST_INCLUDE
// These static vars are in an anon namespace to work around MSVC linker issues.
namespace
{
#if CUDART_VERSION >= 8000
// Has CudaAllocator::Initialize been called by any thread?
static std::once_flag IsInitialized;
#endif
// True if concurrent pagable managed memory is not disabled by user via a system
// environment variable and all devices support it.
static bool ManagedMemoryEnabled = false;
// Avoid overhead of cudaMemAdvise and cudaMemPrefetchAsync for small buffers.
// This value should be > 0 or else these functions will error out.
static std::size_t Threshold = 1 << 20;
}
namespace vtkm
{
namespace cont
{
namespace cuda
{
namespace internal
{
bool CudaAllocator::UsingManagedMemory()
{
CudaAllocator::Initialize();
return ManagedMemoryEnabled;
}
bool CudaAllocator::IsDevicePointer(const void* ptr)
{
CudaAllocator::Initialize();
if (!ptr)
{
return false;
}
cudaPointerAttributes attr;
cudaError_t err = cudaPointerGetAttributes(&attr, ptr);
// This function will return invalid value if the pointer is unknown to the
// cuda runtime. Manually catch this value since it's not really an error.
if (err == cudaErrorInvalidValue)
{
cudaGetLastError(); // Clear the error so we don't raise it later...
return false;
}
VTKM_CUDA_CALL(err /*= cudaPointerGetAttributes(&attr, ptr)*/);
return attr.devicePointer == ptr;
}
bool CudaAllocator::IsManagedPointer(const void* ptr)
{
if (!ptr || !ManagedMemoryEnabled)
{
return false;
}
cudaPointerAttributes attr;
cudaError_t err = cudaPointerGetAttributes(&attr, ptr);
// This function will return invalid value if the pointer is unknown to the
// cuda runtime. Manually catch this value since it's not really an error.
if (err == cudaErrorInvalidValue)
{
cudaGetLastError(); // Clear the error so we don't raise it later...
return false;
}
VTKM_CUDA_CALL(err /*= cudaPointerGetAttributes(&attr, ptr)*/);
#if CUDART_VERSION < 10000 // isManaged deprecated in CUDA 10.
return attr.isManaged != 0;
#else // attr.type doesn't exist before CUDA 10
return attr.type == cudaMemoryTypeManaged;
#endif
}
void* CudaAllocator::Allocate(std::size_t numBytes)
{
CudaAllocator::Initialize();
// When numBytes is zero cudaMallocManaged returns an error and the behavior
// of cudaMalloc is not documented. Just return nullptr.
if (numBytes == 0)
{
return nullptr;
}
void* ptr = nullptr;
if (ManagedMemoryEnabled)
{
VTKM_CUDA_CALL(cudaMallocManaged(&ptr, numBytes));
}
else
{
VTKM_CUDA_CALL(cudaMalloc(&ptr, numBytes));
}
{
VTKM_LOG_F(vtkm::cont::LogLevel::MemExec,
"Allocated CUDA array of %s at %p.",
vtkm::cont::GetSizeString(numBytes).c_str(),
ptr);
}
return ptr;
}
void* CudaAllocator::AllocateUnManaged(std::size_t numBytes)
{
void* ptr = nullptr;
VTKM_CUDA_CALL(cudaMalloc(&ptr, numBytes));
{
VTKM_LOG_F(vtkm::cont::LogLevel::MemExec,
"Allocated CUDA array of %s at %p.",
vtkm::cont::GetSizeString(numBytes).c_str(),
ptr);
}
return ptr;
}
void CudaAllocator::Free(void* ptr)
{
VTKM_LOG_F(vtkm::cont::LogLevel::MemExec, "Freeing CUDA allocation at %p.", ptr);
VTKM_CUDA_CALL(cudaFree(ptr));
}
void CudaAllocator::FreeDeferred(void* ptr, std::size_t numBytes)
{
static std::mutex deferredMutex;
static std::vector<void*> deferredPointers;
static std::size_t deferredSize = 0;
constexpr std::size_t bufferLimit = 2 << 24; //16MB buffer
{
VTKM_LOG_F(vtkm::cont::LogLevel::MemExec,
"Deferring free of CUDA allocation at %p of %s.",
ptr,
vtkm::cont::GetSizeString(numBytes).c_str());
}
std::vector<void*> toFree;
// critical section
{
std::lock_guard<std::mutex> lock(deferredMutex);
deferredPointers.push_back(ptr);
deferredSize += numBytes;
if (deferredSize >= bufferLimit)
{
toFree.swap(deferredPointers);
deferredSize = 0;
}
}
for (auto&& p : toFree)
{
VTKM_LOG_F(vtkm::cont::LogLevel::MemExec, "Freeing deferred CUDA allocation at %p.", p);
VTKM_CUDA_CALL(cudaFree(p));
}
}
void CudaAllocator::PrepareForControl(const void* ptr, std::size_t numBytes)
{
if (IsManagedPointer(ptr) && numBytes >= Threshold)
{
#if CUDART_VERSION >= 8000
// TODO these hints need to be benchmarked and adjusted once we start
// sharing the pointers between cont/exec
VTKM_CUDA_CALL(cudaMemAdvise(ptr, numBytes, cudaMemAdviseSetAccessedBy, cudaCpuDeviceId));
VTKM_CUDA_CALL(cudaMemPrefetchAsync(ptr, numBytes, cudaCpuDeviceId, cudaStreamPerThread));
#endif // CUDA >= 8.0
}
}
void CudaAllocator::PrepareForInput(const void* ptr, std::size_t numBytes)
{
if (IsManagedPointer(ptr) && numBytes >= Threshold)
{
#if CUDART_VERSION >= 8000
int dev;
VTKM_CUDA_CALL(cudaGetDevice(&dev));
// VTKM_CUDA_CALL(cudaMemAdvise(ptr, numBytes, cudaMemAdviseSetPreferredLocation, dev));
// VTKM_CUDA_CALL(cudaMemAdvise(ptr, numBytes, cudaMemAdviseSetReadMostly, dev));
VTKM_CUDA_CALL(cudaMemAdvise(ptr, numBytes, cudaMemAdviseSetAccessedBy, dev));
VTKM_CUDA_CALL(cudaMemPrefetchAsync(ptr, numBytes, dev, cudaStreamPerThread));
#endif // CUDA >= 8.0
}
}
void CudaAllocator::PrepareForOutput(const void* ptr, std::size_t numBytes)
{
if (IsManagedPointer(ptr) && numBytes >= Threshold)
{
#if CUDART_VERSION >= 8000
int dev;
VTKM_CUDA_CALL(cudaGetDevice(&dev));
// VTKM_CUDA_CALL(cudaMemAdvise(ptr, numBytes, cudaMemAdviseSetPreferredLocation, dev));
// VTKM_CUDA_CALL(cudaMemAdvise(ptr, numBytes, cudaMemAdviseUnsetReadMostly, dev));
VTKM_CUDA_CALL(cudaMemAdvise(ptr, numBytes, cudaMemAdviseSetAccessedBy, dev));
VTKM_CUDA_CALL(cudaMemPrefetchAsync(ptr, numBytes, dev, cudaStreamPerThread));
#endif // CUDA >= 8.0
}
}
void CudaAllocator::PrepareForInPlace(const void* ptr, std::size_t numBytes)
{
if (IsManagedPointer(ptr) && numBytes >= Threshold)
{
#if CUDART_VERSION >= 8000
int dev;
VTKM_CUDA_CALL(cudaGetDevice(&dev));
// VTKM_CUDA_CALL(cudaMemAdvise(ptr, numBytes, cudaMemAdviseSetPreferredLocation, dev));
// VTKM_CUDA_CALL(cudaMemAdvise(ptr, numBytes, cudaMemAdviseUnsetReadMostly, dev));
VTKM_CUDA_CALL(cudaMemAdvise(ptr, numBytes, cudaMemAdviseSetAccessedBy, dev));
VTKM_CUDA_CALL(cudaMemPrefetchAsync(ptr, numBytes, dev, cudaStreamPerThread));
#endif // CUDA >= 8.0
}
}
void CudaAllocator::Initialize()
{
#if CUDART_VERSION >= 8000
std::call_once(IsInitialized, []() {
bool managedMemorySupported = true;
int numDevices;
VTKM_CUDA_CALL(cudaGetDeviceCount(&numDevices));
if (numDevices == 0)
{
return;
}
// Check all devices, use the feature set supported by all
bool managed = true;
cudaDeviceProp prop;
for (int i = 0; i < numDevices && managed; ++i)
{
VTKM_CUDA_CALL(cudaGetDeviceProperties(&prop, i));
// We check for concurrentManagedAccess, as devices with only the
// managedAccess property have extra synchronization requirements.
managed = managed && prop.concurrentManagedAccess;
}
managedMemorySupported = managed;
// Check if users want to disable managed memory
#pragma warning(push)
// getenv is not thread safe on windows but since it's inside a call_once block so
// it's fine to suppress the warning here.
#pragma warning(disable : 4996)
const char* buf = std::getenv(NO_VTKM_MANAGED_MEMORY);
#pragma warning(pop)
if (buf != nullptr)
{
ManagedMemoryEnabled = (std::string(buf) != "1");
}
ManagedMemoryEnabled = ManagedMemoryEnabled && managedMemorySupported;
});
#endif
}
}
}
}
} // end namespace vtkm::cont::cuda::internal
|
ebcb6061eca544400c011b5846e03c024beb1b9c.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <chrono>
#include <hip/hip_runtime_api.h>
__global__ void parallel_for(const int n, double* da) {
int tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid < n) {
double dummy = 123.456;
da[tid] = dummy + 123.456*dummy;
}
}
int main()
{
const int N = 10000000;
int blockSize = 256;
int numBlocks = (N + blockSize -1) / blockSize;
double* da;
hipMalloc((void**)&da, sizeof(double)*N);
//warm up
for(int j=0; j<10; j++)
{
hipLaunchKernelGGL(( parallel_for), dim3(numBlocks), dim3(blockSize), 0, 0, N, da);
}
typedef std::chrono::high_resolution_clock Time;
typedef std::chrono::duration<float> fsec;
hipDeviceSynchronize();
auto start_clock = Time::now();
hipProfilerStart();
for(int j=0; j<10; j++)
{
hipLaunchKernelGGL(( parallel_for), dim3(numBlocks), dim3(blockSize), 0, 0, N, da);
}
hipDeviceSynchronize();
hipProfilerStop();
auto finish_clock = Time::now();
fsec fs = finish_clock - start_clock;
std::cout << "time taken for cuda parallel for (msecs):" << fs.count()*1e3 << std::endl;
hipFree(da);
return 0;
}
| ebcb6061eca544400c011b5846e03c024beb1b9c.cu | #include <iostream>
#include <chrono>
#include <cuda_profiler_api.h>
__global__ void parallel_for(const int n, double* da) {
int tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid < n) {
double dummy = 123.456;
da[tid] = dummy + 123.456*dummy;
}
}
int main()
{
const int N = 10000000;
int blockSize = 256;
int numBlocks = (N + blockSize -1) / blockSize;
double* da;
cudaMalloc((void**)&da, sizeof(double)*N);
//warm up
for(int j=0; j<10; j++)
{
parallel_for<<<numBlocks, blockSize>>>(N, da);
}
typedef std::chrono::high_resolution_clock Time;
typedef std::chrono::duration<float> fsec;
cudaDeviceSynchronize();
auto start_clock = Time::now();
cudaProfilerStart();
for(int j=0; j<10; j++)
{
parallel_for<<<numBlocks, blockSize>>>(N, da);
}
cudaDeviceSynchronize();
cudaProfilerStop();
auto finish_clock = Time::now();
fsec fs = finish_clock - start_clock;
std::cout << "time taken for cuda parallel for (msecs):" << fs.count()*1e3 << std::endl;
cudaFree(da);
return 0;
}
|
6a5361260f208338c35728b86b42cb8c66f1fa15.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**********************************************************************
Copyright 2013 Advanced Micro Devices, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
********************************************************************/
#include "sobel.h"
#include "SDKBitMap.h"
#include "kernels.hip"
static bool compare(const float *refData, const float *data,
const int length, const float epsilon = 1e-6f)
{
float error = 0.0f;
float ref = 0.0f;
for(int i = 1; i < length; ++i)
{
float diff = refData[i] - data[i];
// if (diff != 0) printf("mismatch @%d: %f %f\n", i, refData[i] , data[i]);
error += diff * diff;
ref += refData[i] * refData[i];
}
float normRef = sqrtf((float) ref);
if (fabs((float) ref) < 1e-7f)
{
return false;
}
float normError = sqrtf((float) error);
error = normError / normRef;
return error < epsilon;
}
int main(int argc, char * argv[])
{
if (argc != 3) {
printf("Usage: %s <path to file> <repeat>\n", argv[0]);
return 1;
}
const char* filePath = argv[1];
const int iterations = atoi(argv[2]);
// load input bitmap image
SDKBitMap inputBitmap;
inputBitmap.load(filePath);
// error if image did not load
if(!inputBitmap.isLoaded())
{
printf("Failed to load input image!");
return SDK_FAILURE;
}
// get width and height of input image
const int height = inputBitmap.getHeight();
const int width = inputBitmap.getWidth();
const int pixelSize = sizeof(uchar4);
const int imageSize = width * height * pixelSize;
printf("Image height = %d and width = %d\n", height, width);
// allocate memory for input image data
uchar4 *inputImageData = (uchar4*) malloc (imageSize);
if (inputImageData == NULL)
printf("Failed to allocate memory! (inputImageData)");
// allocate memory for output image data
uchar4 *outputImageData = (uchar4*) malloc (imageSize);
if (outputImageData == NULL)
printf("Failed to allocate memory! (outputImageData)");
// initialize the output
memset(outputImageData, 0, imageSize);
// get the pointer to pixel data
uchar4 *pixelData = inputBitmap.getPixels();
if(pixelData == NULL)
printf("Failed to read pixel Data!");
// Copy pixel data into inputImageData
memcpy(inputImageData, pixelData, imageSize);
// allocate memory for verification output
uchar4* verificationOutput = (uchar4*) malloc (imageSize);
if (verificationOutput == NULL)
printf("verificationOutput heap allocation failed!");
// initialize the output
memset(verificationOutput, 0, imageSize);
// Create memory object for input Image
uchar4 *inputImageBuffer;
hipMalloc((void**)&inputImageBuffer, imageSize);
hipMemcpy(inputImageBuffer, inputImageData, imageSize, hipMemcpyHostToDevice);
uchar4 *outputImageBuffer;
hipMalloc((void**)&outputImageBuffer, imageSize);
hipMemset(outputImageBuffer, 0, imageSize);
// Enqueue a kernel run call.
const int blockSizeX = 16;
const int blockSizeY = 16;
dim3 grid (width/blockSizeX, height/blockSizeY);
dim3 block (blockSizeX, blockSizeY);
printf("Executing kernel for %d iterations", iterations);
printf("-------------------------------------------\n");
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for(int i = 0; i < iterations; i++)
{
hipLaunchKernelGGL(sobel_filter, grid, block, 0, 0, inputImageBuffer, outputImageBuffer, width, height);
}
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average kernel execution time: %f (us)\n", (time * 1e-3f) / iterations);
hipMemcpy(outputImageData, outputImageBuffer, imageSize, hipMemcpyDeviceToHost);
hipFree(inputImageBuffer);
hipFree(outputImageBuffer);
// reference implementation
reference (verificationOutput, inputImageData, width, height, pixelSize);
float *outputDevice = (float*) malloc (imageSize * sizeof(float));
if (outputDevice == NULL)
printf("Failed to allocate host memory! (outputDevice)");
float *outputReference = (float*) malloc (imageSize * sizeof(float));
if (outputReference == NULL)
printf("Failed to allocate host memory!" "(outputReference)");
// copy uchar data to float array
for(int i = 0; i < width * height; i++)
{
outputDevice[i * 4 + 0] = outputImageData[i].x;
outputDevice[i * 4 + 1] = outputImageData[i].y;
outputDevice[i * 4 + 2] = outputImageData[i].z;
outputDevice[i * 4 + 3] = outputImageData[i].w;
outputReference[i * 4 + 0] = verificationOutput[i].x;
outputReference[i * 4 + 1] = verificationOutput[i].y;
outputReference[i * 4 + 2] = verificationOutput[i].z;
outputReference[i * 4 + 3] = verificationOutput[i].w;
}
// compare the results and see if they match
if(compare(outputReference, outputDevice, imageSize))
printf("PASS\n");
else
printf("FAIL\n");
free(outputDevice);
free(outputReference);
free(verificationOutput);
free(inputImageData);
free(outputImageData);
return SDK_SUCCESS;
}
| 6a5361260f208338c35728b86b42cb8c66f1fa15.cu | /**********************************************************************
Copyright ©2013 Advanced Micro Devices, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
• Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
• Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
********************************************************************/
#include "sobel.h"
#include "SDKBitMap.h"
#include "kernels.cu"
static bool compare(const float *refData, const float *data,
const int length, const float epsilon = 1e-6f)
{
float error = 0.0f;
float ref = 0.0f;
for(int i = 1; i < length; ++i)
{
float diff = refData[i] - data[i];
// if (diff != 0) printf("mismatch @%d: %f %f\n", i, refData[i] , data[i]);
error += diff * diff;
ref += refData[i] * refData[i];
}
float normRef = sqrtf((float) ref);
if (fabs((float) ref) < 1e-7f)
{
return false;
}
float normError = sqrtf((float) error);
error = normError / normRef;
return error < epsilon;
}
int main(int argc, char * argv[])
{
if (argc != 3) {
printf("Usage: %s <path to file> <repeat>\n", argv[0]);
return 1;
}
const char* filePath = argv[1];
const int iterations = atoi(argv[2]);
// load input bitmap image
SDKBitMap inputBitmap;
inputBitmap.load(filePath);
// error if image did not load
if(!inputBitmap.isLoaded())
{
printf("Failed to load input image!");
return SDK_FAILURE;
}
// get width and height of input image
const int height = inputBitmap.getHeight();
const int width = inputBitmap.getWidth();
const int pixelSize = sizeof(uchar4);
const int imageSize = width * height * pixelSize;
printf("Image height = %d and width = %d\n", height, width);
// allocate memory for input image data
uchar4 *inputImageData = (uchar4*) malloc (imageSize);
if (inputImageData == NULL)
printf("Failed to allocate memory! (inputImageData)");
// allocate memory for output image data
uchar4 *outputImageData = (uchar4*) malloc (imageSize);
if (outputImageData == NULL)
printf("Failed to allocate memory! (outputImageData)");
// initialize the output
memset(outputImageData, 0, imageSize);
// get the pointer to pixel data
uchar4 *pixelData = inputBitmap.getPixels();
if(pixelData == NULL)
printf("Failed to read pixel Data!");
// Copy pixel data into inputImageData
memcpy(inputImageData, pixelData, imageSize);
// allocate memory for verification output
uchar4* verificationOutput = (uchar4*) malloc (imageSize);
if (verificationOutput == NULL)
printf("verificationOutput heap allocation failed!");
// initialize the output
memset(verificationOutput, 0, imageSize);
// Create memory object for input Image
uchar4 *inputImageBuffer;
hipMalloc((void**)&inputImageBuffer, imageSize);
hipMemcpy(inputImageBuffer, inputImageData, imageSize, hipMemcpyHostToDevice);
uchar4 *outputImageBuffer;
hipMalloc((void**)&outputImageBuffer, imageSize);
hipMemset(outputImageBuffer, 0, imageSize);
// Enqueue a kernel run call.
const int blockSizeX = 16;
const int blockSizeY = 16;
dim3 grid (width/blockSizeX, height/blockSizeY);
dim3 block (blockSizeX, blockSizeY);
printf("Executing kernel for %d iterations", iterations);
printf("-------------------------------------------\n");
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for(int i = 0; i < iterations; i++)
{
hipLaunchKernelGGL(sobel_filter, grid, block, 0, 0, inputImageBuffer, outputImageBuffer, width, height);
}
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average kernel execution time: %f (us)\n", (time * 1e-3f) / iterations);
hipMemcpy(outputImageData, outputImageBuffer, imageSize, hipMemcpyDeviceToHost);
hipFree(inputImageBuffer);
hipFree(outputImageBuffer);
// reference implementation
reference (verificationOutput, inputImageData, width, height, pixelSize);
float *outputDevice = (float*) malloc (imageSize * sizeof(float));
if (outputDevice == NULL)
printf("Failed to allocate host memory! (outputDevice)");
float *outputReference = (float*) malloc (imageSize * sizeof(float));
if (outputReference == NULL)
printf("Failed to allocate host memory!" "(outputReference)");
// copy uchar data to float array
for(int i = 0; i < width * height; i++)
{
outputDevice[i * 4 + 0] = outputImageData[i].x;
outputDevice[i * 4 + 1] = outputImageData[i].y;
outputDevice[i * 4 + 2] = outputImageData[i].z;
outputDevice[i * 4 + 3] = outputImageData[i].w;
outputReference[i * 4 + 0] = verificationOutput[i].x;
outputReference[i * 4 + 1] = verificationOutput[i].y;
outputReference[i * 4 + 2] = verificationOutput[i].z;
outputReference[i * 4 + 3] = verificationOutput[i].w;
}
// compare the results and see if they match
if(compare(outputReference, outputDevice, imageSize))
printf("PASS\n");
else
printf("FAIL\n");
free(outputDevice);
free(outputReference);
free(verificationOutput);
free(inputImageData);
free(outputImageData);
return SDK_SUCCESS;
}
|
678ced24395c4d218f3c3ed297f40db0883264aa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<string.h>
#include<stdlib.h>
#include<iostream>
#include<limits.h>
#include<algorithm>
#include<sys/time.h>
using namespace std;
#define INF INT_MAX-1
int m;
int rowSize;
int tilesize[3] = {2, 4, INT_MAX};
void print_matrix(float *d)
{
int i, j;
for (i = 0; i < 32; i++)
{
for (j = 0; j < 32; j++)
printf("%0.1f\t", d[i * rowSize + j]);
puts("");
}
}
__global__ void Dloop_FW(float *d_a, int xRowStart, int xColStart, int uRowStart, int uColStart, int vRowStart, int vColStart, int currSize, int rowSize)
{
int j = blockIdx.x * blockDim.x + threadIdx.x + xColStart;
if (j >= rowSize)
return;
//int i = xRowStart + rowSize*blockIdx.y;
int i = blockIdx.y * blockDim.y + threadIdx.y + xRowStart;
if (i >= rowSize)
return;
__shared__ int intermed;
for (int k = vRowStart; k < (vRowStart + currSize); k++) {
if (threadIdx.x == 0) {
intermed = d_a[i * rowSize + k];
}
__syncthreads();
if (i != j && j != k && i != k)
d_a[i * rowSize + j] = fmin(d_a[i * rowSize + j], intermed + d_a[k * rowSize + j]);
}
}
void FW_D_loop(float *d_a, int xRowStart, int xColStart, int uRowStart, int uColStart, int vRowStart, int vColStart, int currSize, int rowSize)
{
int threadsPerBlock;
if (currSize <= 1024)
threadsPerBlock = currSize;
else
threadsPerBlock = 1024;
dim3 blocksPerGrid( currSize / threadsPerBlock , currSize);
hipLaunchKernelGGL(( Dloop_FW) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_a, xRowStart, xColStart, uRowStart, uColStart, vRowStart, vColStart, currSize, rowSize);
//hipDeviceSynchronize();
}
void DFW(float *d_a, int xRowStart, int xColStart, int uRowStart, int uColStart, int vRowStart, int vColStart, int currSize, int d, int rowSize)
{
int r = tilesize[d];
if (r > currSize)
FW_D_loop(d_a, xRowStart, xColStart, uRowStart, uColStart, vRowStart, vColStart, currSize, rowSize);
else
{
int newsize = currSize / r;
for (int k = 1; k <= r; k++) {
for (int i = 1; i <= r; i++) {
for (int j = 1; j <= r; j++) {
DFW(d_a, xRowStart + (i - 1)*newsize, xColStart + (j - 1)*newsize, uRowStart + (i - 1)*newsize, uColStart + (k - 1)*newsize, vRowStart + (k - 1)*newsize, vColStart + (j - 1)*newsize, newsize, d + 1, rowSize);
}
}
hipDeviceSynchronize();
}
}
}
__global__ void Cloop_FW(float *d_a, int xRowStart, int xColStart, int uRowStart, int uColStart, int vRowStart, int vColStart, int currSize, int rowSize)
{
__shared__ int intermed;
int i = blockIdx.x * blockDim.x + threadIdx.x + xRowStart;
if (i >= rowSize)
return;
for (int k = vRowStart; k < (vRowStart + currSize); k++)
{
for (int j = xColStart; j < (xColStart + currSize); j++)
{
if (threadIdx.x == 0) {
intermed = d_a[k * rowSize + j];
}
__syncthreads();
if (i != j && j != k && i != k)
d_a[i * rowSize + j ] = fmin( d_a[i * rowSize + j ], d_a[i * rowSize + k] + intermed);
}
}
}
void FW_C_loop(float* d_a, int xRowStart, int xColStart, int uRowStart, int uColStart, int vRowStart, int vColStart, int currSize, int rowSize)
{
int threadsPerBlock;
if (currSize <= 1024)
threadsPerBlock = currSize;
else
threadsPerBlock = 1024;
int noOfBlocks = currSize / threadsPerBlock;
hipLaunchKernelGGL(( Cloop_FW) , dim3(noOfBlocks), dim3(threadsPerBlock), 0, 0, d_a, xRowStart, xColStart, uRowStart, uColStart, vRowStart, vColStart, currSize, rowSize);
//hipDeviceSynchronize();
}
void CFW(float* d_a, int xRowStart, int xColStart, int uRowStart, int uColStart, int vRowStart, int vColStart, int currSize, int d, int rowSize)
{
int r = tilesize[d];
if (r > currSize)
FW_C_loop(d_a, xRowStart, xColStart, uRowStart, uColStart, vRowStart, vColStart, currSize, rowSize);
else
{
int newsize = currSize / r;
for (int k = 1; k <= r; k++) {
for (int i = 1; i <= r; i++) {
CFW(d_a, xRowStart + (i - 1)*newsize, xColStart + (k - 1)*newsize, uRowStart + (i - 1)*newsize, uColStart + (k - 1)*newsize, vRowStart + (k - 1)*newsize, vColStart + (k - 1)*newsize, newsize, d + 1, rowSize);
}
hipDeviceSynchronize();
for (int i = 1; i <= r; i++) {
for (int j = 1; j <= r; j++) {
if (j != k)
DFW(d_a, xRowStart + (i - 1)*newsize, xColStart + (j - 1)*newsize, uRowStart + (i - 1)*newsize, uColStart + (k - 1)*newsize, vRowStart + (k - 1)*newsize, vColStart + (j - 1)*newsize, newsize, d + 1, rowSize);
}
}
hipDeviceSynchronize();
}
}
}
__global__ void Bloop_FW(float *d_a, int xRowStart, int xColStart, int uRowStart, int uColStart, int vRowStart, int vColStart, int currSize, int rowSize)
{
__shared__ int intermed;
int j = blockIdx.x * blockDim.x + threadIdx.x + xColStart;
if (j >= rowSize)
return;
for (int k = vRowStart; k < (vRowStart + currSize); k++)
{
for (int i = xRowStart; i < (xRowStart + currSize); i++)
{
if (threadIdx.x == 0) {
intermed = d_a[i * rowSize + k];
}
__syncthreads();
if (i != j && j != k && i != k)
d_a[i * rowSize + j ] = fmin(intermed + d_a[k * rowSize + j], d_a[i * rowSize + j]);
}
}
}
void FW_B_loop(float* d_a, int xRowStart, int xColStart, int uRowStart, int uColStart, int vRowStart, int vColStart, int currSize, int rowSize)
{
int threadsPerBlock;
if (currSize < 1024)
{
threadsPerBlock = currSize;
}
else
{
threadsPerBlock = 1024;
}
int noOfBlocks = currSize / threadsPerBlock;
hipLaunchKernelGGL(( Bloop_FW) , dim3(noOfBlocks), dim3(threadsPerBlock), 0, 0, d_a, xRowStart, xColStart, uRowStart, uColStart, vRowStart, vColStart, currSize, rowSize);
//hipDeviceSynchronize();
}
void BFW(float* d_a, int xRowStart, int xColStart, int uRowStart, int uColStart, int vRowStart, int vColStart, int currSize, int d, int rowSize)
{
int r = tilesize[d];
if (r > currSize)
FW_B_loop(d_a, xRowStart, xColStart, uRowStart, uColStart, vRowStart, vColStart, currSize, rowSize);
else
{
int newsize = currSize / r;
for (int k = 1; k <= r; k++) {
for (int j = 1; j <= r; j++) {
BFW(d_a, xRowStart + (k - 1)*newsize, xColStart + (j - 1)*newsize, uRowStart + (k - 1)*newsize, uColStart + (k - 1)*newsize, vRowStart + (k - 1)*newsize, vColStart + (j - 1)*newsize, newsize, d + 1, rowSize);
}
hipDeviceSynchronize();
for (int i = 1; i <= r; i++) {
for (int j = 1; j <= r; j++) {
if (i != k)
DFW(d_a, xRowStart + (i - 1)*newsize, xColStart + (j - 1)*newsize, uRowStart + (i - 1)*newsize, uColStart + (k - 1)*newsize, vRowStart + (k - 1)*newsize, vColStart + (j - 1)*newsize, newsize, d + 1, rowSize);
}
}
hipDeviceSynchronize();
}
}
}
__global__ void Aloop_FW(float *d_a, int xRowStart, int xColStart, int uRowStart, int uColStart, int vRowStart, int vColStart, int currSize, int rowSize)
{
/*int col = blockIdx.x * blockDim.x + threadIdx.x;
if(col >= rowSize)
return;
*/
for (int k = vRowStart; k < (vRowStart + currSize); k++)
{
for (int i = xRowStart; i < (xRowStart + currSize); i++)
{
for (int j = xColStart; j < (xColStart + currSize); j++)
{
if (i != j && j != k && i != k)
d_a[i * rowSize + j] = fmin( d_a[i * rowSize + k] + d_a[k * rowSize + j] , d_a[i * rowSize + j]);
}
}
}
}
void FW_A_loop(float* d_a, int xRowStart, int xColStart, int uRowStart, int uColStart, int vRowStart, int vColStart, int currSize, int rowSize)
{
hipLaunchKernelGGL(( Aloop_FW) , dim3(1), dim3(1), 0, 0, d_a, xRowStart, xColStart, uRowStart, uColStart, vRowStart, vColStart, currSize, rowSize);
}
void AFW(float* d_a, int xRowStart, int xColStart, int uRowStart, int uColStart, int vRowStart, int vColStart, int currSize, int d, int rowSize)
{
int r = tilesize[d];
if (r > currSize)
FW_A_loop(d_a, xRowStart, xColStart, uRowStart, uColStart, vRowStart, vColStart, currSize, rowSize);
else
{
int newsize = currSize / r;
for (int k = 1; k <= r; k++) {
AFW(d_a, xRowStart + (k - 1)*newsize, xColStart + (k - 1)*newsize, uRowStart + (k - 1)*newsize, uColStart + (k - 1)*newsize, vRowStart + (k - 1)*newsize, vColStart + (k - 1)*newsize, newsize, d + 1, rowSize);
for (int j = 1; j <= r; j++) {
if (j != k)
BFW(d_a, xRowStart + (k - 1)*newsize, xColStart + (j - 1)*newsize, uRowStart + (k - 1)*newsize, uColStart + (k - 1)*newsize, vRowStart + (k - 1)*newsize, vColStart + (j - 1)*newsize, newsize, d + 1, rowSize);
}
for (int i = 1; i <= r; i++) {
if (i != k)
CFW(d_a, xRowStart + (i - 1)*newsize, xColStart + (k - 1)*newsize, uRowStart + (i - 1)*newsize, uColStart + (k - 1)*newsize, vRowStart + (k - 1)*newsize, vColStart + (k - 1)*newsize, newsize, d + 1, rowSize);
}
hipDeviceSynchronize();
for (int i = 1; i <= r; i++) {
for (int j = 1; j <= r; j++) {
if (i != k && j != k)
DFW(d_a, xRowStart + (i - 1)*newsize, xColStart + (j - 1)*newsize, uRowStart + (i - 1)*newsize, uColStart + (k - 1)*newsize, vRowStart + (k - 1)*newsize, vColStart + (j - 1)*newsize, newsize, d + 1, rowSize);
}
}
hipDeviceSynchronize();
}
}
}
int main(int argc, char *argv[])
{
float *d_a;
float *a;
size_t pitch;
rowSize = atoi(argv[1]);
int colSize = rowSize;
int i, j;
hipError_t err = hipSuccess;
size_t totalSize = rowSize * colSize * sizeof(float);
a = (float *) malloc(totalSize);
if (!a)
{
printf("Unable to allocate memory for host array\n");
return 1;
}
err = hipMallocPitch(&d_a, &pitch, rowSize * sizeof(float), colSize);
if (!d_a)
{
printf("memory failed for hipMalloc");
return 1;
}
if (err != 0) {
printf("%s-%d", hipGetErrorString(err), 3);
return 1;
}
for (i = 0; i < rowSize; i++)
for (j = 0; j < colSize; j++)
{
if (i == j) {
a[i * rowSize + j] = 0;
}
else {
a[i * rowSize + j] = (i + j) % 5 ? (i + j) : (i + j) % 7;
}
}
err = hipMemcpy(d_a, a, totalSize, hipMemcpyHostToDevice);
struct timeval tv1, tv2;
gettimeofday(&tv1, NULL);
AFW(d_a, 0, 0, 0, 0, 0, 0, rowSize, 0, rowSize);
gettimeofday(&tv2, NULL);
printf ("Total Execution time = %f seconds\n", (double)(tv2.tv_usec - tv1.tv_usec) / 1000000 + (double)(tv2.tv_sec - tv1.tv_sec));
err = hipMemcpy(a, d_a, totalSize, hipMemcpyDeviceToHost);
print_matrix(a);
return 0;
}
| 678ced24395c4d218f3c3ed297f40db0883264aa.cu | #include<stdio.h>
#include<string.h>
#include<stdlib.h>
#include<iostream>
#include<limits.h>
#include<algorithm>
#include<sys/time.h>
using namespace std;
#define INF INT_MAX-1
int m;
int rowSize;
int tilesize[3] = {2, 4, INT_MAX};
void print_matrix(float *d)
{
int i, j;
for (i = 0; i < 32; i++)
{
for (j = 0; j < 32; j++)
printf("%0.1f\t", d[i * rowSize + j]);
puts("");
}
}
__global__ void Dloop_FW(float *d_a, int xRowStart, int xColStart, int uRowStart, int uColStart, int vRowStart, int vColStart, int currSize, int rowSize)
{
int j = blockIdx.x * blockDim.x + threadIdx.x + xColStart;
if (j >= rowSize)
return;
//int i = xRowStart + rowSize*blockIdx.y;
int i = blockIdx.y * blockDim.y + threadIdx.y + xRowStart;
if (i >= rowSize)
return;
__shared__ int intermed;
for (int k = vRowStart; k < (vRowStart + currSize); k++) {
if (threadIdx.x == 0) {
intermed = d_a[i * rowSize + k];
}
__syncthreads();
if (i != j && j != k && i != k)
d_a[i * rowSize + j] = fmin(d_a[i * rowSize + j], intermed + d_a[k * rowSize + j]);
}
}
void FW_D_loop(float *d_a, int xRowStart, int xColStart, int uRowStart, int uColStart, int vRowStart, int vColStart, int currSize, int rowSize)
{
int threadsPerBlock;
if (currSize <= 1024)
threadsPerBlock = currSize;
else
threadsPerBlock = 1024;
dim3 blocksPerGrid( currSize / threadsPerBlock , currSize);
Dloop_FW <<< blocksPerGrid, threadsPerBlock>>>(d_a, xRowStart, xColStart, uRowStart, uColStart, vRowStart, vColStart, currSize, rowSize);
//cudaThreadSynchronize();
}
void DFW(float *d_a, int xRowStart, int xColStart, int uRowStart, int uColStart, int vRowStart, int vColStart, int currSize, int d, int rowSize)
{
int r = tilesize[d];
if (r > currSize)
FW_D_loop(d_a, xRowStart, xColStart, uRowStart, uColStart, vRowStart, vColStart, currSize, rowSize);
else
{
int newsize = currSize / r;
for (int k = 1; k <= r; k++) {
for (int i = 1; i <= r; i++) {
for (int j = 1; j <= r; j++) {
DFW(d_a, xRowStart + (i - 1)*newsize, xColStart + (j - 1)*newsize, uRowStart + (i - 1)*newsize, uColStart + (k - 1)*newsize, vRowStart + (k - 1)*newsize, vColStart + (j - 1)*newsize, newsize, d + 1, rowSize);
}
}
cudaThreadSynchronize();
}
}
}
__global__ void Cloop_FW(float *d_a, int xRowStart, int xColStart, int uRowStart, int uColStart, int vRowStart, int vColStart, int currSize, int rowSize)
{
__shared__ int intermed;
int i = blockIdx.x * blockDim.x + threadIdx.x + xRowStart;
if (i >= rowSize)
return;
for (int k = vRowStart; k < (vRowStart + currSize); k++)
{
for (int j = xColStart; j < (xColStart + currSize); j++)
{
if (threadIdx.x == 0) {
intermed = d_a[k * rowSize + j];
}
__syncthreads();
if (i != j && j != k && i != k)
d_a[i * rowSize + j ] = fmin( d_a[i * rowSize + j ], d_a[i * rowSize + k] + intermed);
}
}
}
void FW_C_loop(float* d_a, int xRowStart, int xColStart, int uRowStart, int uColStart, int vRowStart, int vColStart, int currSize, int rowSize)
{
int threadsPerBlock;
if (currSize <= 1024)
threadsPerBlock = currSize;
else
threadsPerBlock = 1024;
int noOfBlocks = currSize / threadsPerBlock;
Cloop_FW <<< noOfBlocks, threadsPerBlock>>>(d_a, xRowStart, xColStart, uRowStart, uColStart, vRowStart, vColStart, currSize, rowSize);
//cudaThreadSynchronize();
}
void CFW(float* d_a, int xRowStart, int xColStart, int uRowStart, int uColStart, int vRowStart, int vColStart, int currSize, int d, int rowSize)
{
int r = tilesize[d];
if (r > currSize)
FW_C_loop(d_a, xRowStart, xColStart, uRowStart, uColStart, vRowStart, vColStart, currSize, rowSize);
else
{
int newsize = currSize / r;
for (int k = 1; k <= r; k++) {
for (int i = 1; i <= r; i++) {
CFW(d_a, xRowStart + (i - 1)*newsize, xColStart + (k - 1)*newsize, uRowStart + (i - 1)*newsize, uColStart + (k - 1)*newsize, vRowStart + (k - 1)*newsize, vColStart + (k - 1)*newsize, newsize, d + 1, rowSize);
}
cudaThreadSynchronize();
for (int i = 1; i <= r; i++) {
for (int j = 1; j <= r; j++) {
if (j != k)
DFW(d_a, xRowStart + (i - 1)*newsize, xColStart + (j - 1)*newsize, uRowStart + (i - 1)*newsize, uColStart + (k - 1)*newsize, vRowStart + (k - 1)*newsize, vColStart + (j - 1)*newsize, newsize, d + 1, rowSize);
}
}
cudaThreadSynchronize();
}
}
}
__global__ void Bloop_FW(float *d_a, int xRowStart, int xColStart, int uRowStart, int uColStart, int vRowStart, int vColStart, int currSize, int rowSize)
{
__shared__ int intermed;
int j = blockIdx.x * blockDim.x + threadIdx.x + xColStart;
if (j >= rowSize)
return;
for (int k = vRowStart; k < (vRowStart + currSize); k++)
{
for (int i = xRowStart; i < (xRowStart + currSize); i++)
{
if (threadIdx.x == 0) {
intermed = d_a[i * rowSize + k];
}
__syncthreads();
if (i != j && j != k && i != k)
d_a[i * rowSize + j ] = fmin(intermed + d_a[k * rowSize + j], d_a[i * rowSize + j]);
}
}
}
void FW_B_loop(float* d_a, int xRowStart, int xColStart, int uRowStart, int uColStart, int vRowStart, int vColStart, int currSize, int rowSize)
{
int threadsPerBlock;
if (currSize < 1024)
{
threadsPerBlock = currSize;
}
else
{
threadsPerBlock = 1024;
}
int noOfBlocks = currSize / threadsPerBlock;
Bloop_FW <<< noOfBlocks, threadsPerBlock>>>(d_a, xRowStart, xColStart, uRowStart, uColStart, vRowStart, vColStart, currSize, rowSize);
//cudaThreadSynchronize();
}
void BFW(float* d_a, int xRowStart, int xColStart, int uRowStart, int uColStart, int vRowStart, int vColStart, int currSize, int d, int rowSize)
{
int r = tilesize[d];
if (r > currSize)
FW_B_loop(d_a, xRowStart, xColStart, uRowStart, uColStart, vRowStart, vColStart, currSize, rowSize);
else
{
int newsize = currSize / r;
for (int k = 1; k <= r; k++) {
for (int j = 1; j <= r; j++) {
BFW(d_a, xRowStart + (k - 1)*newsize, xColStart + (j - 1)*newsize, uRowStart + (k - 1)*newsize, uColStart + (k - 1)*newsize, vRowStart + (k - 1)*newsize, vColStart + (j - 1)*newsize, newsize, d + 1, rowSize);
}
cudaThreadSynchronize();
for (int i = 1; i <= r; i++) {
for (int j = 1; j <= r; j++) {
if (i != k)
DFW(d_a, xRowStart + (i - 1)*newsize, xColStart + (j - 1)*newsize, uRowStart + (i - 1)*newsize, uColStart + (k - 1)*newsize, vRowStart + (k - 1)*newsize, vColStart + (j - 1)*newsize, newsize, d + 1, rowSize);
}
}
cudaThreadSynchronize();
}
}
}
__global__ void Aloop_FW(float *d_a, int xRowStart, int xColStart, int uRowStart, int uColStart, int vRowStart, int vColStart, int currSize, int rowSize)
{
/*int col = blockIdx.x * blockDim.x + threadIdx.x;
if(col >= rowSize)
return;
*/
for (int k = vRowStart; k < (vRowStart + currSize); k++)
{
for (int i = xRowStart; i < (xRowStart + currSize); i++)
{
for (int j = xColStart; j < (xColStart + currSize); j++)
{
if (i != j && j != k && i != k)
d_a[i * rowSize + j] = fmin( d_a[i * rowSize + k] + d_a[k * rowSize + j] , d_a[i * rowSize + j]);
}
}
}
}
void FW_A_loop(float* d_a, int xRowStart, int xColStart, int uRowStart, int uColStart, int vRowStart, int vColStart, int currSize, int rowSize)
{
Aloop_FW <<< 1, 1>>>(d_a, xRowStart, xColStart, uRowStart, uColStart, vRowStart, vColStart, currSize, rowSize);
}
void AFW(float* d_a, int xRowStart, int xColStart, int uRowStart, int uColStart, int vRowStart, int vColStart, int currSize, int d, int rowSize)
{
int r = tilesize[d];
if (r > currSize)
FW_A_loop(d_a, xRowStart, xColStart, uRowStart, uColStart, vRowStart, vColStart, currSize, rowSize);
else
{
int newsize = currSize / r;
for (int k = 1; k <= r; k++) {
AFW(d_a, xRowStart + (k - 1)*newsize, xColStart + (k - 1)*newsize, uRowStart + (k - 1)*newsize, uColStart + (k - 1)*newsize, vRowStart + (k - 1)*newsize, vColStart + (k - 1)*newsize, newsize, d + 1, rowSize);
for (int j = 1; j <= r; j++) {
if (j != k)
BFW(d_a, xRowStart + (k - 1)*newsize, xColStart + (j - 1)*newsize, uRowStart + (k - 1)*newsize, uColStart + (k - 1)*newsize, vRowStart + (k - 1)*newsize, vColStart + (j - 1)*newsize, newsize, d + 1, rowSize);
}
for (int i = 1; i <= r; i++) {
if (i != k)
CFW(d_a, xRowStart + (i - 1)*newsize, xColStart + (k - 1)*newsize, uRowStart + (i - 1)*newsize, uColStart + (k - 1)*newsize, vRowStart + (k - 1)*newsize, vColStart + (k - 1)*newsize, newsize, d + 1, rowSize);
}
cudaThreadSynchronize();
for (int i = 1; i <= r; i++) {
for (int j = 1; j <= r; j++) {
if (i != k && j != k)
DFW(d_a, xRowStart + (i - 1)*newsize, xColStart + (j - 1)*newsize, uRowStart + (i - 1)*newsize, uColStart + (k - 1)*newsize, vRowStart + (k - 1)*newsize, vColStart + (j - 1)*newsize, newsize, d + 1, rowSize);
}
}
cudaThreadSynchronize();
}
}
}
int main(int argc, char *argv[])
{
float *d_a;
float *a;
size_t pitch;
rowSize = atoi(argv[1]);
int colSize = rowSize;
int i, j;
cudaError_t err = cudaSuccess;
size_t totalSize = rowSize * colSize * sizeof(float);
a = (float *) malloc(totalSize);
if (!a)
{
printf("Unable to allocate memory for host array\n");
return 1;
}
err = cudaMallocPitch(&d_a, &pitch, rowSize * sizeof(float), colSize);
if (!d_a)
{
printf("memory failed for cudaMalloc");
return 1;
}
if (err != 0) {
printf("%s-%d", cudaGetErrorString(err), 3);
return 1;
}
for (i = 0; i < rowSize; i++)
for (j = 0; j < colSize; j++)
{
if (i == j) {
a[i * rowSize + j] = 0;
}
else {
a[i * rowSize + j] = (i + j) % 5 ? (i + j) : (i + j) % 7;
}
}
err = cudaMemcpy(d_a, a, totalSize, cudaMemcpyHostToDevice);
struct timeval tv1, tv2;
gettimeofday(&tv1, NULL);
AFW(d_a, 0, 0, 0, 0, 0, 0, rowSize, 0, rowSize);
gettimeofday(&tv2, NULL);
printf ("Total Execution time = %f seconds\n", (double)(tv2.tv_usec - tv1.tv_usec) / 1000000 + (double)(tv2.tv_sec - tv1.tv_sec));
err = cudaMemcpy(a, d_a, totalSize, cudaMemcpyDeviceToHost);
print_matrix(a);
return 0;
}
|
02e4b25e05b375fca690f876f82a8400a037e299.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#define WIDTH 16
__global__ void MatMulKernel(float *Md, float *Nd, float *Pd, int width);
void MatMul(float *M, float *N, float *P, int width);
int main(int argc, char *argv[])
{
int width = WIDTH;
float M[WIDTH][WIDTH] = {0};
float N[WIDTH][WIDTH] = {0};
float P[WIDTH][WIDTH] = {0};
float MxN[WIDTH][WIDTH] = {0};
int pass = 1;
for (int i = 0; i < width; ++i) {
for (int j = 0; j < width; ++j) {
M[i][j] = rand() % 30;
N[i][j] = rand() % 30;
}
}
struct timeval starttime, endtime;
gettimeofday(&starttime, NULL);
for (int i = 0; i < width; ++i) {
for (int j = 0; j < width; ++j) {
for (int k = 0; k < width; ++k) {
MxN[i][j] += M[i][k] * N[k][j];
}
}
}
gettimeofday(&endtime, NULL);
double executime;
executime = (endtime.tv_sec - starttime.tv_sec) * 1000.0;
executime += (endtime.tv_usec - starttime.tv_usec) / 1000.0;
printf("CPU time: %13lf msec\n", executime);
MatMul((float *)M, (float *)N, (float *)P, width);
for (int i = 0; i < width; ++i) {
for (int j = 0; j < width; ++j) {
if(MxN[i][j] != P[i][j]) {
printf("MxN[%d][%d] = %2.0f P[%d][%d] = %2.0f\n", i, j, MxN[i][j], i, j, P[i][j]);
pass = 0;
}
}
}
printf("Test %s\n", (pass)?"PASSED":"FAILED");
return 0;
}
// Matrix multiplication kernel called by MatMul()
__global__ void MatMulKernel(float *Md, float *Nd, float *Pd, int width)
{
// Thread row and column within matrix
int row = threadIdx.y;
int col = threadIdx.x;
// Each thread computes one element of P
// by accumulating results into Pvalue
float Pvalue = 0;
// Multiply M and N
for (int k = 0; k < width; ++k) {
float Melement = *(Md + row*width + k);
float Nelement = *(Nd + k*width + col);
Pvalue += Melement * Nelement;
}
// Write Pvalue to device memory
// Each thread writes one element
*(Pd + row*width + col) = Pvalue;
}
// Matrix multiplication - Host code
void MatMul(float *M, float *N, float *P, int width)
{
size_t size = width * width * sizeof(float);
float *Md, *Nd, *Pd;
// Allocate and Load M, N to device memory
hipMalloc((void **)&Md, size);
hipMemcpy(Md, M, size, hipMemcpyHostToDevice);
hipMalloc((void **)&Nd, size);
hipMemcpy(Nd, N, size, hipMemcpyHostToDevice);
// Allocate P on the device
hipMalloc((void **)&Pd, size);
// Setup the execution configuration
dim3 dimGrid(1, 1);
dim3 dimBlock(width, width);
// Get start time event
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
// Invoke kernel
hipLaunchKernelGGL(( MatMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, Md, Nd, Pd, width);
hipError_t cuda_err = hipGetLastError();
if ( hipSuccess != cuda_err ){
printf("before kernel call: error = %s\n", hipGetErrorString (cuda_err));
exit(1) ;
}
// Get stop time event
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// Compute execution time
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
printf("GPU time: %13f msec\n", elapsedTime);
hipEventDestroy(start);
hipEventDestroy(stop);
// Read P from device memory
hipMemcpy(P, Pd, size, hipMemcpyDeviceToHost);
// Free device memory
hipFree(Md);
hipFree(Nd);
hipFree(Pd);
}
| 02e4b25e05b375fca690f876f82a8400a037e299.cu | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <cuda_runtime.h>
#define WIDTH 16
__global__ void MatMulKernel(float *Md, float *Nd, float *Pd, int width);
void MatMul(float *M, float *N, float *P, int width);
int main(int argc, char *argv[])
{
int width = WIDTH;
float M[WIDTH][WIDTH] = {0};
float N[WIDTH][WIDTH] = {0};
float P[WIDTH][WIDTH] = {0};
float MxN[WIDTH][WIDTH] = {0};
int pass = 1;
for (int i = 0; i < width; ++i) {
for (int j = 0; j < width; ++j) {
M[i][j] = rand() % 30;
N[i][j] = rand() % 30;
}
}
struct timeval starttime, endtime;
gettimeofday(&starttime, NULL);
for (int i = 0; i < width; ++i) {
for (int j = 0; j < width; ++j) {
for (int k = 0; k < width; ++k) {
MxN[i][j] += M[i][k] * N[k][j];
}
}
}
gettimeofday(&endtime, NULL);
double executime;
executime = (endtime.tv_sec - starttime.tv_sec) * 1000.0;
executime += (endtime.tv_usec - starttime.tv_usec) / 1000.0;
printf("CPU time: %13lf msec\n", executime);
MatMul((float *)M, (float *)N, (float *)P, width);
for (int i = 0; i < width; ++i) {
for (int j = 0; j < width; ++j) {
if(MxN[i][j] != P[i][j]) {
printf("MxN[%d][%d] = %2.0f P[%d][%d] = %2.0f\n", i, j, MxN[i][j], i, j, P[i][j]);
pass = 0;
}
}
}
printf("Test %s\n", (pass)?"PASSED":"FAILED");
return 0;
}
// Matrix multiplication kernel called by MatMul()
__global__ void MatMulKernel(float *Md, float *Nd, float *Pd, int width)
{
// Thread row and column within matrix
int row = threadIdx.y;
int col = threadIdx.x;
// Each thread computes one element of P
// by accumulating results into Pvalue
float Pvalue = 0;
// Multiply M and N
for (int k = 0; k < width; ++k) {
float Melement = *(Md + row*width + k);
float Nelement = *(Nd + k*width + col);
Pvalue += Melement * Nelement;
}
// Write Pvalue to device memory
// Each thread writes one element
*(Pd + row*width + col) = Pvalue;
}
// Matrix multiplication - Host code
void MatMul(float *M, float *N, float *P, int width)
{
size_t size = width * width * sizeof(float);
float *Md, *Nd, *Pd;
// Allocate and Load M, N to device memory
cudaMalloc((void **)&Md, size);
cudaMemcpy(Md, M, size, cudaMemcpyHostToDevice);
cudaMalloc((void **)&Nd, size);
cudaMemcpy(Nd, N, size, cudaMemcpyHostToDevice);
// Allocate P on the device
cudaMalloc((void **)&Pd, size);
// Setup the execution configuration
dim3 dimGrid(1, 1);
dim3 dimBlock(width, width);
// Get start time event
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// Invoke kernel
MatMulKernel<<<dimGrid, dimBlock>>>(Md, Nd, Pd, width);
cudaError_t cuda_err = cudaGetLastError();
if ( cudaSuccess != cuda_err ){
printf("before kernel call: error = %s\n", cudaGetErrorString (cuda_err));
exit(1) ;
}
// Get stop time event
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Compute execution time
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("GPU time: %13f msec\n", elapsedTime);
cudaEventDestroy(start);
cudaEventDestroy(stop);
// Read P from device memory
cudaMemcpy(P, Pd, size, cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(Md);
cudaFree(Nd);
cudaFree(Pd);
}
|
811bae3ba8eefcd340b036cb97e8f2ba77233508.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hipcub/hipcub.hpp>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/rmac_regions_op.h"
namespace cub {
template <typename KeyT, typename ValueT>
inline __host__ __device__ bool operator<(
const hipcub::KeyValuePair<KeyT, ValueT>& kv1,
const hipcub::KeyValuePair<KeyT, ValueT>& kv2) {
return (kv1.value < kv2.value) ||
(kv1.value == kv2.value && kv2.key < kv1.key);
}
} // namespace cub
namespace caffe2 {
namespace {
__global__ void NumRMACRegionsKernel(
const int W,
const int H,
const int min_step,
const int max_step,
const float overlap,
const int scales,
int* num_rois_data) {
// steps(idx) regions for long dimension
typedef hipcub::KeyValuePair<int, float> KeyValuePair; // <step, value>
KeyValuePair kv, min_kv;
min_kv.value = FLT_MAX;
// Local reduction
int minW = min(H, W);
int diff = max(H, W) - minW;
CUDA_1D_KERNEL_LOOP(index, max_step - min_step + 1) {
kv.key = min_step + index;
float b = diff / (1.0 * kv.key);
kv.value = fabsf((minW * minW - minW * b) / (minW * minW) - overlap);
if (kv < min_kv) {
min_kv = kv;
}
}
// Block-wise arg-min reduction to find step
int step;
{
typedef hipcub::BlockReduce<KeyValuePair, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
min_kv = BlockReduce(temp_storage).Reduce(min_kv, hipcub::Min());
__shared__ int step_shared;
if (threadIdx.x == 0) {
step_shared = min_kv.key;
}
__syncthreads();
step = step_shared;
}
// Region overplus per dimension
int Wd = (W > H) ? step : 0;
int Hd = (H > W) ? step : 0;
// Local reduction to compute the total number of rois at all scales
int num_rois = 0;
CUDA_1D_KERNEL_LOOP(index, scales) {
int l = index + 1;
int region_size = 2 * minW / (l + 1);
num_rois += (region_size > 0) ? ((l + Wd) * (l + Hd)) : 0;
}
// Block-wise sum reduction to compute num_rois at all scales
{
typedef hipcub::BlockReduce<int, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
num_rois = BlockReduce(temp_storage).Sum(num_rois);
}
if (threadIdx.x == 0) {
num_rois_data[0] = num_rois;
num_rois_data[1] = Wd;
num_rois_data[2] = Hd;
}
}
__global__ void RMACRegionsKernel(
const int W,
const int H,
const int N,
const int* num_rois_data,
float* output) {
int num_rois = num_rois_data[0];
int Wd = num_rois_data[1];
int Hd = num_rois_data[2];
// Block-wide temp shared storage for intermediate ROI results to avoid
// uncoalesced writes to global mem
__shared__ float output_shared[CAFFE_CUDA_NUM_THREADS * 5];
CUDA_1D_KERNEL_LOOP(index, N) {
int batch_id = index / num_rois;
int roi_id = index % num_rois;
int roi[5];
roi[0] = batch_id;
// Find the scale corresponding to this index and the roi_id relative
// to the scale.
int l = 0;
int num_rois_at_scale = 0;
do {
roi_id -= num_rois_at_scale;
l++;
num_rois_at_scale = (l + Wd) * (l + Hd);
} while (roi_id - num_rois_at_scale >= 0);
int region_size = 2 * min(H, W) / (l + 1);
float bw =
(l + Wd - 1 > 0) ? ((W - region_size) / (1.0 * (l + Wd - 1))) : 0;
float bh =
(l + Hd - 1 > 0) ? ((H - region_size) / (1.0 * (l + Hd - 1))) : 0;
int i = roi_id / (l + Hd);
int j = roi_id % (l + Hd);
roi[1] = bw * i;
roi[2] = bh * j;
// Careful with the borders
if (roi[1] + region_size > W) {
roi[1] -= (roi[1] + region_size - W);
}
if (roi[2] + region_size > H) {
roi[2] -= (roi[2] + region_size - H);
}
roi[3] = roi[1] + region_size - 1;
roi[4] = roi[2] + region_size - 1;
// Writing directly to output (global memory) will result in uncoalesced
// writes. Write output to shared mem first and then write ROI results to
// global output in a coalesced manner.
__syncthreads(); // Since output_shared is reused across loop iterations
for (int i = 0; i < 5; ++i) {
output_shared[threadIdx.x * 5 + i] = roi[i];
}
__syncthreads();
int offset = index - threadIdx.x;
float* output_offset = output + offset * 5;
int num_threads = min(blockDim.x, N - offset); // Active threads in block
for (int i = 0; i < 5; ++i) {
output_offset[num_threads * i + threadIdx.x] =
output_shared[num_threads * i + threadIdx.x];
}
}
}
} // namespace
template <>
bool RMACRegionsOp<CUDAContext>::RunOnDevice() {
const auto& X = Input(0); // Input tensor
// RoIs
if (X.numel() == 0) {
return true;
}
int batch_size = X.dim32(0);
int H = X.dim32(2);
int W = X.dim32(3);
// Compute number of regions
int min_step = 1;
int max_step = 6;
ReinitializeTensor(&num_rois_, {3}, at::dtype<int>().device(CUDA)); // num_rois, Wd, Hd
hipLaunchKernelGGL(( NumRMACRegionsKernel),
dim3(1),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
W,
H,
min_step,
max_step,
overlap_,
scales_,
num_rois_.mutable_data<int>());
// Bit awkward, but the size of the output tensor depends on the output of
// NumRMACRegionsKernel (number of RoIs), so need to copy that to CPU
// to Resize() output appropriately.
int num_rois = 0;
context_.CopyBytesToCPU(sizeof(int), num_rois_.data<int>(), &num_rois);
int N = batch_size * num_rois;
auto* output = Output(0, {N, 5}, at::dtype<float>()); // [batch_id x1 y1 x2 y2]
// Compute region coordinates
hipLaunchKernelGGL(( RMACRegionsKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
W, H, N, num_rois_.data<int>(), output->template mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(RMACRegions, RMACRegionsOp<CUDAContext>);
} // namespace caffe2
| 811bae3ba8eefcd340b036cb97e8f2ba77233508.cu | #include <cub/block/block_reduce.cuh>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/rmac_regions_op.h"
namespace cub {
template <typename KeyT, typename ValueT>
inline __host__ __device__ bool operator<(
const cub::KeyValuePair<KeyT, ValueT>& kv1,
const cub::KeyValuePair<KeyT, ValueT>& kv2) {
return (kv1.value < kv2.value) ||
(kv1.value == kv2.value && kv2.key < kv1.key);
}
} // namespace cub
namespace caffe2 {
namespace {
__global__ void NumRMACRegionsKernel(
const int W,
const int H,
const int min_step,
const int max_step,
const float overlap,
const int scales,
int* num_rois_data) {
// steps(idx) regions for long dimension
typedef cub::KeyValuePair<int, float> KeyValuePair; // <step, value>
KeyValuePair kv, min_kv;
min_kv.value = FLT_MAX;
// Local reduction
int minW = min(H, W);
int diff = max(H, W) - minW;
CUDA_1D_KERNEL_LOOP(index, max_step - min_step + 1) {
kv.key = min_step + index;
float b = diff / (1.0 * kv.key);
kv.value = fabsf((minW * minW - minW * b) / (minW * minW) - overlap);
if (kv < min_kv) {
min_kv = kv;
}
}
// Block-wise arg-min reduction to find step
int step;
{
typedef cub::BlockReduce<KeyValuePair, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
min_kv = BlockReduce(temp_storage).Reduce(min_kv, cub::Min());
__shared__ int step_shared;
if (threadIdx.x == 0) {
step_shared = min_kv.key;
}
__syncthreads();
step = step_shared;
}
// Region overplus per dimension
int Wd = (W > H) ? step : 0;
int Hd = (H > W) ? step : 0;
// Local reduction to compute the total number of rois at all scales
int num_rois = 0;
CUDA_1D_KERNEL_LOOP(index, scales) {
int l = index + 1;
int region_size = 2 * minW / (l + 1);
num_rois += (region_size > 0) ? ((l + Wd) * (l + Hd)) : 0;
}
// Block-wise sum reduction to compute num_rois at all scales
{
typedef cub::BlockReduce<int, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
num_rois = BlockReduce(temp_storage).Sum(num_rois);
}
if (threadIdx.x == 0) {
num_rois_data[0] = num_rois;
num_rois_data[1] = Wd;
num_rois_data[2] = Hd;
}
}
__global__ void RMACRegionsKernel(
const int W,
const int H,
const int N,
const int* num_rois_data,
float* output) {
int num_rois = num_rois_data[0];
int Wd = num_rois_data[1];
int Hd = num_rois_data[2];
// Block-wide temp shared storage for intermediate ROI results to avoid
// uncoalesced writes to global mem
__shared__ float output_shared[CAFFE_CUDA_NUM_THREADS * 5];
CUDA_1D_KERNEL_LOOP(index, N) {
int batch_id = index / num_rois;
int roi_id = index % num_rois;
int roi[5];
roi[0] = batch_id;
// Find the scale corresponding to this index and the roi_id relative
// to the scale.
int l = 0;
int num_rois_at_scale = 0;
do {
roi_id -= num_rois_at_scale;
l++;
num_rois_at_scale = (l + Wd) * (l + Hd);
} while (roi_id - num_rois_at_scale >= 0);
int region_size = 2 * min(H, W) / (l + 1);
float bw =
(l + Wd - 1 > 0) ? ((W - region_size) / (1.0 * (l + Wd - 1))) : 0;
float bh =
(l + Hd - 1 > 0) ? ((H - region_size) / (1.0 * (l + Hd - 1))) : 0;
int i = roi_id / (l + Hd);
int j = roi_id % (l + Hd);
roi[1] = bw * i;
roi[2] = bh * j;
// Careful with the borders
if (roi[1] + region_size > W) {
roi[1] -= (roi[1] + region_size - W);
}
if (roi[2] + region_size > H) {
roi[2] -= (roi[2] + region_size - H);
}
roi[3] = roi[1] + region_size - 1;
roi[4] = roi[2] + region_size - 1;
// Writing directly to output (global memory) will result in uncoalesced
// writes. Write output to shared mem first and then write ROI results to
// global output in a coalesced manner.
__syncthreads(); // Since output_shared is reused across loop iterations
for (int i = 0; i < 5; ++i) {
output_shared[threadIdx.x * 5 + i] = roi[i];
}
__syncthreads();
int offset = index - threadIdx.x;
float* output_offset = output + offset * 5;
int num_threads = min(blockDim.x, N - offset); // Active threads in block
for (int i = 0; i < 5; ++i) {
output_offset[num_threads * i + threadIdx.x] =
output_shared[num_threads * i + threadIdx.x];
}
}
}
} // namespace
template <>
bool RMACRegionsOp<CUDAContext>::RunOnDevice() {
const auto& X = Input(0); // Input tensor
// RoIs
if (X.numel() == 0) {
return true;
}
int batch_size = X.dim32(0);
int H = X.dim32(2);
int W = X.dim32(3);
// Compute number of regions
int min_step = 1;
int max_step = 6;
ReinitializeTensor(&num_rois_, {3}, at::dtype<int>().device(CUDA)); // num_rois, Wd, Hd
NumRMACRegionsKernel<<<
1,
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
W,
H,
min_step,
max_step,
overlap_,
scales_,
num_rois_.mutable_data<int>());
// Bit awkward, but the size of the output tensor depends on the output of
// NumRMACRegionsKernel (number of RoIs), so need to copy that to CPU
// to Resize() output appropriately.
int num_rois = 0;
context_.CopyBytesToCPU(sizeof(int), num_rois_.data<int>(), &num_rois);
int N = batch_size * num_rois;
auto* output = Output(0, {N, 5}, at::dtype<float>()); // [batch_id x1 y1 x2 y2]
// Compute region coordinates
RMACRegionsKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
W, H, N, num_rois_.data<int>(), output->template mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(RMACRegions, RMACRegionsOp<CUDAContext>);
} // namespace caffe2
|
f019e45343c825b9436bcad24e982955f533fef0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef CUDA
#include <device_types.h>
#include "Common/CUDATools.h"
#include "Common/CUDADeviceTools.h"
template<typename T, size_t threadsPerBlock>
__global__ void kernel_reductionSum(T *data, T *sum, int count, int repeatCount)
{
__shared__ T ssum[threadsPerBlock];
for (int i = 0; i < repeatCount; i++)
{
unsigned int threadID = threadIdx.x;
unsigned int countPerBlock = (count + gridDim.x - 1) / gridDim.x;
ssum[threadID] = 0;
T *pBase = data + blockIdx.x * countPerBlock;
T *pValue = pBase + threadID;
T *pValueMax = pBase + countPerBlock;
if (pValueMax > data + count)
pValueMax = data + count;
T *pResult = sum + blockIdx.x;
while (pValue < pValueMax)
{
ssum[threadID] += *pValue;
pValue += blockDim.x;
}
__syncthreads();
for (int i = blockDim.x >> 1; i > 16; i >>= 1)
{
if (threadID < i)
ssum[threadID] += ssum[threadID + i];
__syncthreads();
}
#ifdef CUDA50_
T value = ssum[threadID];
if (sizeof(T) == sizeof(int))
{
value += __shfl_xor((T)value, 16, 32);
value += __shfl_xor((T)value, 8, 32);
value += __shfl_xor((T)value, 4, 32);
value += __shfl_xor((T)value, 2, 32);
value += __shfl_xor((T)value, 1, 32);
} else
if (sizeof(T) == sizeof(double))
{
//!!
}
if (threadID == 0)
*pResult = value;
#else
#pragma unroll
for (int i = 16; i > 0; i >>= 1)
{
if (threadID < i)
ssum[threadID] += ssum[threadID + i];
__syncthreads();
}
if (threadID == 0)
*pResult = ssum[threadID];
#endif
__syncthreads();
}
}
template<typename T>
__global__ void kernel_alignedRead(T *data, int count, int repeatCount)
{
unsigned int countPerBlock = (count + gridDim.x - 1) / gridDim.x;
T *pmax = data + blockIdx.x * countPerBlock + countPerBlock;
size_t inc = blockDim.x;
for (int i = 0; i < repeatCount; i++)
{
T *p = data + blockIdx.x * countPerBlock + threadIdx.x;
T sum = 0;
while (p < pmax)
{
sum += *p;
p += inc;
}
if (threadIdx.x > 1024) // to avoid removal by optimization
printf("sum: %f\n", sum);
}
}
template<typename T>
__global__ void kernel_notAlignedRead(T *data, int count, int repeatCount)
{
unsigned int countPerBlock = (count + gridDim.x - 1) / gridDim.x;
unsigned int countPerThread = (countPerBlock + blockDim.x - 1) / blockDim.x;
T *pmax = data + blockIdx.x * countPerBlock + threadIdx.x * countPerThread + countPerThread;
size_t inc = 1;
for (int i = 0; i < repeatCount; i++)
{
T *p = data + blockIdx.x * countPerBlock + threadIdx.x * countPerThread;
T sum = 0;
while (p < pmax)
{
sum += *p;
p += inc;
}
if (threadIdx.x > 1024) // to avoid removal by optimization
printf("sum: %f\n", sum);
}
}
template<typename T>
__global__ void kernel_alignedWrite(T *data, int count, int repeatCount)
{
unsigned int countPerBlock = (count + gridDim.x - 1) / gridDim.x;
T *pmax = data + blockIdx.x * countPerBlock + countPerBlock;
size_t inc = blockDim.x;
for (int i = 0; i < repeatCount; i++)
{
T *p = data + blockIdx.x * countPerBlock + threadIdx.x;
while (p < pmax)
{
*p = 0;
p += inc;
}
}
}
template<typename T>
__global__ void kernel_notAlignedWrite(T *data, int count, int repeatCount)
{
unsigned int countPerBlock = (count + gridDim.x - 1) / gridDim.x;
unsigned int countPerThread = (countPerBlock + blockDim.x - 1) / blockDim.x;
T *pmax = data + blockIdx.x * countPerBlock + threadIdx.x * countPerThread + countPerThread;
size_t inc = 1;
for (int i = 0; i < repeatCount; i++)
{
T *p = data + blockIdx.x * countPerBlock + threadIdx.x * countPerThread;
while (p < pmax)
{
*p = 0;
p += inc;
}
}
}
template<typename T>
void cuda_alignedRead(T *data, int count, int repeatCount, int blockCount, int threadsPerBlock)
{
hipLaunchKernelGGL(( kernel_alignedRead<T>), dim3(blockCount), dim3(threadsPerBlock), 0, 0, data, count, repeatCount);
}
template<typename T>
void cuda_notAlignedRead(T *data, int count, int repeatCount, int blockCount, int threadsPerBlock)
{
hipLaunchKernelGGL(( kernel_notAlignedRead<T>), dim3(blockCount), dim3(threadsPerBlock), 0, 0, data, count, repeatCount);
}
template<typename T>
void cuda_alignedWrite(T *data, int count, int repeatCount, int blockCount, int threadsPerBlock)
{
hipLaunchKernelGGL(( kernel_alignedWrite<T>), dim3(blockCount), dim3(threadsPerBlock), 0, 0, data, count, repeatCount);
}
template<typename T>
void cuda_notAlignedWrite(T *data, int count, int repeatCount, int blockCount, int threadsPerBlock)
{
hipLaunchKernelGGL(( kernel_notAlignedWrite<T>), dim3(blockCount), dim3(threadsPerBlock), 0, 0, data, count, repeatCount);
}
template<typename T>
void cuda_reductionSum(T *data, T *sum, T *temp, int count, int repeatCount, int blockCount, int threadsPerBlock)
{
switch (threadsPerBlock)
{
case 1:
case 2:
case 4:
case 8:
case 16:
case 32:
hipLaunchKernelGGL(( kernel_reductionSum<T, 32>), dim3(blockCount), dim3(threadsPerBlock), 0, 0, data, temp, count, repeatCount);
hipLaunchKernelGGL(( kernel_reductionSum<T, 32>), dim3(1), dim3(threadsPerBlock), 0, 0, temp, sum, blockCount, repeatCount);
break;
case 64:
hipLaunchKernelGGL(( kernel_reductionSum<T, 64>), dim3(blockCount), dim3(threadsPerBlock), 0, 0, data, temp, count, repeatCount);
hipLaunchKernelGGL(( kernel_reductionSum<T, 64>), dim3(1), dim3(threadsPerBlock), 0, 0, temp, sum, blockCount, repeatCount);
break;
case 128:
hipLaunchKernelGGL(( kernel_reductionSum<T, 128>), dim3(blockCount), dim3(threadsPerBlock), 0, 0, data, temp, count, repeatCount);
hipLaunchKernelGGL(( kernel_reductionSum<T, 128>), dim3(1), dim3(threadsPerBlock), 0, 0, temp, sum, blockCount, repeatCount);
break;
case 256:
hipLaunchKernelGGL(( kernel_reductionSum<T, 256>), dim3(blockCount), dim3(threadsPerBlock), 0, 0, data, temp, count, repeatCount);
hipLaunchKernelGGL(( kernel_reductionSum<T, 256>), dim3(1), dim3(threadsPerBlock), 0, 0, temp, sum, blockCount, repeatCount);
break;
case 512:
hipLaunchKernelGGL(( kernel_reductionSum<T, 512>), dim3(blockCount), dim3(threadsPerBlock), 0, 0, data, temp, count, repeatCount);
hipLaunchKernelGGL(( kernel_reductionSum<T, 512>), dim3(1), dim3(threadsPerBlock), 0, 0, temp, sum, blockCount, repeatCount);
break;
case 1024:
hipLaunchKernelGGL(( kernel_reductionSum<T, 1024>), dim3(blockCount), dim3(threadsPerBlock), 0, 0, data, temp, count, repeatCount);
hipLaunchKernelGGL(( kernel_reductionSum<T, 1024>), dim3(1), dim3(threadsPerBlock), 0, 0, temp, sum, blockCount, repeatCount);
break;
}
}
__global__ void kernel_doTinyTask(int a, int b)
{
int sum = a + b;
if (threadIdx.x > 1024) // to avoid removal by optimization
printf("%d", sum);
}
void cuda_doTinyTask(int blockCount, int threadCount)
{
hipLaunchKernelGGL(( kernel_doTinyTask), dim3(blockCount), dim3(threadCount), 0, 0, blockCount, threadCount);
}
template<typename T>
__global__ void kernel_doAdd(int count)
{
int bulkCount = count >> 5;
for (int i = 0; i < bulkCount; i++)
{
T value = i;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
if (threadIdx.x > 1024) // to avoid removal by optimization
printf("sum: %f", value);
}
}
/*
template<>
__global__ void kernel_doAdd<float>(int count)
{
int bulkCount = count >> 5;
for (int i = 0; i < bulkCount; i++)
{
float4 value;
value.x = i;
value.y = value.x + 1.0f;
value.z = value.x + 2.0f;
value.w = value.x + 3.0f;
value.x = value.x + value.x;
value.y = value.y + value.y;
value.z = value.z + value.z;
value.w = value.w + value.w;
value.x = value.x + value.x;
value.y = value.y + value.y;
value.z = value.z + value.z;
value.w = value.w + value.w;
value.x = value.x + value.x;
value.y = value.y + value.y;
value.z = value.z + value.z;
value.w = value.w + value.w;
value.x = value.x + value.x;
value.y = value.y + value.y;
value.z = value.z + value.z;
value.w = value.w + value.w;
value.x = value.x + value.x;
value.y = value.y + value.y;
value.z = value.z + value.z;
value.w = value.w + value.w;
value.x = value.x + value.x;
value.y = value.y + value.y;
value.z = value.z + value.z;
value.w = value.w + value.w;
value.x = value.x + value.x;
value.y = value.y + value.y;
value.z = value.z + value.z;
value.w = value.w + value.w;
value.x = value.x + value.x;
value.y = value.y + value.y;
value.z = value.z + value.z;
value.w = value.w + value.w;
if (threadIdx.x > 1024) // to avoid removal by optimization
printf("sum: %f,%f,%f,%f", value.x, value.y, value.z, value.w);
}
}
*/
template<typename T>
void cuda_doAdd(int count, int blockCount, int threadCount)
{
hipLaunchKernelGGL(( kernel_doAdd<T>), dim3(blockCount), dim3(threadCount), 0, 0, count);
}
template<typename T>
__global__ void kernel_doAdd_indep(int count)
{
int bulkCount = count >> 6;
for (int i = 0; i < bulkCount; i++)
{
T value1 = i, value2 = (T)1.0 + i, value3 = (T)2.0 + i, value4 = (T)3.0 + i;
value1 = value1 + value1;
value2 = value2 + value2;
value3 = value3 + value3;
value4 = value4 + value4;
value1 = value1 + value1;
value2 = value2 + value2;
value3 = value3 + value3;
value4 = value4 + value4;
value1 = value1 + value1;
value2 = value2 + value2;
value3 = value3 + value3;
value4 = value4 + value4;
value1 = value1 + value1;
value2 = value2 + value2;
value3 = value3 + value3;
value4 = value4 + value4;
value1 = value1 + value1;
value2 = value2 + value2;
value3 = value3 + value3;
value4 = value4 + value4;
value1 = value1 + value1;
value2 = value2 + value2;
value3 = value3 + value3;
value4 = value4 + value4;
value1 = value1 + value1;
value2 = value2 + value2;
value3 = value3 + value3;
value4 = value4 + value4;
value1 = value1 + value1;
value2 = value2 + value2;
value3 = value3 + value3;
value4 = value4 + value4;
value1 = value1 + value1;
value2 = value2 + value2;
value3 = value3 + value3;
value4 = value4 + value4;
value1 = value1 + value1;
value2 = value2 + value2;
value3 = value3 + value3;
value4 = value4 + value4;
value1 = value1 + value1;
value2 = value2 + value2;
value3 = value3 + value3;
value4 = value4 + value4;
value1 = value1 + value1;
value2 = value2 + value2;
value3 = value3 + value3;
value4 = value4 + value4;
value1 = value1 + value1;
value2 = value2 + value2;
value3 = value3 + value3;
value4 = value4 + value4;
value1 = value1 + value1;
value2 = value2 + value2;
value3 = value3 + value3;
value4 = value4 + value4;
value1 = value1 + value1;
value2 = value2 + value2;
value3 = value3 + value3;
value4 = value4 + value4;
value1 = value1 + value1;
value2 = value2 + value2;
value3 = value3 + value3;
value4 = value4 + value4;
if (threadIdx.x > 1024) // to avoid removal by optimization
printf("sum: %f, %f, %f, %f", value1, value2, value3, value4);
}
}
template<typename T>
void cuda_doAdd_indep(int count, int blockCount, int threadCount)
{
hipLaunchKernelGGL(( kernel_doAdd_indep<T>), dim3(blockCount), dim3(threadCount), 0, 0, count);
}
template<typename T>
__global__ void kernel_doMad(int count)
{
int bulkCount = count >> 6;
for (int i = 0; i < bulkCount; i++)
{
T value1 = i, value2 = i + (T)1.0, value3 = i + (T)2.0, value4 = i + (T)3.0;
value1 = value1 + value1 * value1;
value2 = value2 + value2 * value2;
value3 = value3 + value3 * value3;
value4 = value4 + value4 * value4;
value1 = value1 + value1 * value1;
value2 = value2 + value2 * value2;
value3 = value3 + value3 * value3;
value4 = value4 + value4 * value4;
value1 = value1 + value1 * value1;
value2 = value2 + value2 * value2;
value3 = value3 + value3 * value3;
value4 = value4 + value4 * value4;
value1 = value1 + value1 * value1;
value2 = value2 + value2 * value2;
value3 = value3 + value3 * value3;
value4 = value4 + value4 * value4;
value1 = value1 + value1 * value1;
value2 = value2 + value2 * value2;
value3 = value3 + value3 * value3;
value4 = value4 + value4 * value4;
value1 = value1 + value1 * value1;
value2 = value2 + value2 * value2;
value3 = value3 + value3 * value3;
value4 = value4 + value4 * value4;
value1 = value1 + value1 * value1;
value2 = value2 + value2 * value2;
value3 = value3 + value3 * value3;
value4 = value4 + value4 * value4;
value1 = value1 + value1 * value1;
value2 = value2 + value2 * value2;
value3 = value3 + value3 * value3;
value4 = value4 + value4 * value4;
if (threadIdx.x > 1024) // to avoid removal by optimization
printf("sum: %f,%f,%f,%f", value1, value2, value3, value4);
}
}
template<typename T>
void cuda_doMad(int count, int blockCount, int threadCount)
{
hipLaunchKernelGGL(( kernel_doMad<T>), dim3(blockCount), dim3(threadCount), 0, 0, count);
}
template<typename T>
__global__ void kernel_doMadSF(int count)
{
int bulkCount = count >> 6;
for (int i = 0; i < bulkCount; i++)
{
T value1 = i, value2 = i + (T)1.0, value3 = i + (T)2.0, value4 = i + (T)3.0;
value1 = value1 + value1 * value1;
value2 = sin(value2);
value3 = sqrt(value3);
value4 = value4 / value4;
value1 = value1 + value1 * value1;
value2 = sin(value2);
value3 = sqrt(value3);
value4 = value4 / value4;
value1 = value1 + value1 * value1;
value2 = sin(value2);
value3 = sqrt(value3);
value4 = value4 / value4;
value1 = value1 + value1 * value1;
value2 = sin(value2);
value3 = sqrt(value3);
value4 = value4 / value4;
value1 = value1 + value1 * value1;
value2 = sin(value2);
value3 = sqrt(value3);
value4 = value4 / value4;
value1 = value1 + value1 * value1;
value2 = sin(value2);
value3 = sqrt(value3);
value4 = value4 / value4;
value1 = value1 + value1 * value1;
value2 = sin(value2);
value3 = sqrt(value3);
value4 = value4 / value4;
value1 = value1 + value1 * value1;
value2 = sin(value2);
value3 = sqrt(value3);
value4 = value4 / value4;
value1 = value1 + value1 * value1;
value2 = sin(value2);
value3 = sqrt(value3);
value4 = value4 / value4;
value1 = value1 + value1 * value1;
value2 = sin(value2);
value3 = sqrt(value3);
value4 = value4 / value4;
value1 = value1 + value1 * value1;
value2 = sin(value2);
value3 = sqrt(value3);
value4 = value4 / value4;
value1 = value1 + value1 * value1;
value2 = sin(value2);
value3 = sqrt(value3);
value4 = value4 / value4;
value1 = value1 + value1 * value1;
value2 = sin(value2);
value3 = sqrt(value3);
value4 = value4 / value4;
if (threadIdx.x > 1024) // to avoid removal by optimization
printf("sum: %f,%f,%f,%f", value1, value2, value3, value4);
}
}
template<typename T>
void cuda_doMadSF(int count, int blockCount, int threadCount)
{
hipLaunchKernelGGL(( kernel_doMadSF<T>), dim3(blockCount), dim3(threadCount), 0, 0, count);
}
template<typename T>
__global__ void kernel_doMul(int count)
{
int bulkCount = count >> 5;
for (int i = 0; i < bulkCount; i++)
{
T value = (T)i;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
if (threadIdx.x > 1024) // to avoid removal by optimization
printf("%f", value);
}
}
template<typename T>
void cuda_doMul(int count, int blockCount, int threadCount)
{
hipLaunchKernelGGL(( kernel_doMul<T>), dim3(blockCount), dim3(threadCount), 0, 0, count);
}
template<typename T>
__global__ void kernel_doDiv(int count)
{
int bulkCount = count >> 5;
for (int i = 0; i < bulkCount; i++)
{
T value = (T)i + (T)1.2345;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
if (threadIdx.x > 1024) // to avoid removal by optimization
printf("%f", value);
}
}
template<typename T>
void cuda_doDiv(int count, int blockCount, int threadCount)
{
hipLaunchKernelGGL(( kernel_doDiv<T>), dim3(blockCount), dim3(threadCount), 0, 0, count);
}
template<typename T>
__global__ void kernel_doSin(int count)
{
int bulkCount = count >> 5;
for (int i = 0; i < bulkCount; i++)
{
T value = (T)1.0 + i;
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
if (threadIdx.x > 1024) // to avoid removal by optimization
printf("%f", value);
}
}
/*
template<>
__global__ void kernel_doSin<float>(int count)
{
int bulkCount = count >> 5;
for (int i = 0; i < bulkCount; i++)
{
float value = 1.0f + i;
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
if (threadIdx.x > 1024) // to avoid removal by optimization
printf("%f", value);
}
}
*/
template<typename T>
void cuda_doSin(int count, int blockCount, int threadCount)
{
hipLaunchKernelGGL(( kernel_doSin<T>), dim3(blockCount), dim3(threadCount), 0, 0, count);
}
#ifdef CUDA50
template<bool waitForCompletion>
__global__ void kernel_doDynamicTinyTask(int blockCount, int threadCount,
double *time)
{
DTimingCounter counter;
DTimingClearAndStart(counter);
for (int i = 0; i < 1000; i++)
{
hipLaunchKernelGGL(( kernel_doTinyTask), dim3(blockCount), dim3(threadCount), 0, 0, i, i);
if (waitForCompletion)
hipDeviceSynchronize();
}
DTimingFinish(counter);
*time = DTimingSeconds(counter) / 1000;
}
double cuda_doDynamicTinyTask(int blockCount, int threadCount, bool waitForCompletion)
{
deviceMem<double> d_time(1);
if (waitForCompletion)
hipLaunchKernelGGL(( kernel_doDynamicTinyTask<true>), dim3(1), dim3(1), 0, 0, blockCount, threadCount, d_time.dptr);
else
hipLaunchKernelGGL(( kernel_doDynamicTinyTask<false>), dim3(1), dim3(1), 0, 0, blockCount, threadCount, d_time.dptr);
cudaSafeCall(hipDeviceSynchronize());
double result;
d_time.copyTo(&result);
return result;
}
#endif
// template instantiation
template void cuda_reductionSum<int>(int *, int *, int *, int, int, int, int);
template void cuda_reductionSum<__int64>(__int64 *, __int64 *, __int64 *, int, int, int, int);
template void cuda_reductionSum<float>(float *, float *, float *, int, int, int, int);
template void cuda_reductionSum<double>(double *, double *, double *, int, int, int, int);
template void cuda_alignedRead<int>(int *, int, int, int, int);
template void cuda_alignedRead<__int64>(__int64 *, int, int, int, int);
template void cuda_alignedRead<float>(float *, int, int, int, int);
template void cuda_alignedRead<double>(double *, int, int, int, int);
template void cuda_notAlignedRead<int>(int *, int, int, int, int);
template void cuda_notAlignedRead<__int64>(__int64 *, int, int, int, int);
template void cuda_notAlignedRead<float>(float *, int, int, int, int);
template void cuda_notAlignedRead<double>(double *, int, int, int, int);
template void cuda_alignedWrite<int>(int *, int, int, int, int);
template void cuda_alignedWrite<__int64>(__int64 *, int, int, int, int);
template void cuda_alignedWrite<float>(float *, int, int, int, int);
template void cuda_alignedWrite<double>(double *, int, int, int, int);
template void cuda_notAlignedWrite<int>(int *, int, int, int, int);
template void cuda_notAlignedWrite<__int64>(__int64 *, int, int, int, int);
template void cuda_notAlignedWrite<float>(float *, int, int, int, int);
template void cuda_notAlignedWrite<double>(double *, int, int, int, int);
template void cuda_doAdd<int>(int, int, int);
template void cuda_doAdd<__int64>(int, int, int);
template void cuda_doAdd<float>(int, int, int);
template void cuda_doAdd<double>(int, int, int);
template void cuda_doAdd_indep<int>(int, int, int);
template void cuda_doAdd_indep<__int64>(int, int, int);
template void cuda_doAdd_indep<float>(int, int, int);
template void cuda_doAdd_indep<double>(int, int, int);
template void cuda_doMad<int>(int, int, int);
template void cuda_doMad<__int64>(int, int, int);
template void cuda_doMad<float>(int, int, int);
template void cuda_doMad<double>(int, int, int);
template void cuda_doMadSF<float>(int, int, int);
template void cuda_doMadSF<double>(int, int, int);
template void cuda_doMul<int>(int, int, int);
template void cuda_doMul<__int64>(int, int, int);
template void cuda_doMul<float>(int, int, int);
template void cuda_doMul<double>(int, int, int);
template void cuda_doDiv<int>(int, int, int);
template void cuda_doDiv<__int64>(int, int, int);
template void cuda_doDiv<float>(int, int, int);
template void cuda_doDiv<double>(int, int, int);
template void cuda_doSin<float>(int, int, int);
template void cuda_doSin<double>(int, int, int);
#endif | f019e45343c825b9436bcad24e982955f533fef0.cu | #ifdef CUDA
#include <device_types.h>
#include "Common/CUDATools.h"
#include "Common/CUDADeviceTools.h"
template<typename T, size_t threadsPerBlock>
__global__ void kernel_reductionSum(T *data, T *sum, int count, int repeatCount)
{
__shared__ T ssum[threadsPerBlock];
for (int i = 0; i < repeatCount; i++)
{
unsigned int threadID = threadIdx.x;
unsigned int countPerBlock = (count + gridDim.x - 1) / gridDim.x;
ssum[threadID] = 0;
T *pBase = data + blockIdx.x * countPerBlock;
T *pValue = pBase + threadID;
T *pValueMax = pBase + countPerBlock;
if (pValueMax > data + count)
pValueMax = data + count;
T *pResult = sum + blockIdx.x;
while (pValue < pValueMax)
{
ssum[threadID] += *pValue;
pValue += blockDim.x;
}
__syncthreads();
for (int i = blockDim.x >> 1; i > 16; i >>= 1)
{
if (threadID < i)
ssum[threadID] += ssum[threadID + i];
__syncthreads();
}
#ifdef CUDA50_
T value = ssum[threadID];
if (sizeof(T) == sizeof(int))
{
value += __shfl_xor((T)value, 16, 32);
value += __shfl_xor((T)value, 8, 32);
value += __shfl_xor((T)value, 4, 32);
value += __shfl_xor((T)value, 2, 32);
value += __shfl_xor((T)value, 1, 32);
} else
if (sizeof(T) == sizeof(double))
{
//!!
}
if (threadID == 0)
*pResult = value;
#else
#pragma unroll
for (int i = 16; i > 0; i >>= 1)
{
if (threadID < i)
ssum[threadID] += ssum[threadID + i];
__syncthreads();
}
if (threadID == 0)
*pResult = ssum[threadID];
#endif
__syncthreads();
}
}
template<typename T>
__global__ void kernel_alignedRead(T *data, int count, int repeatCount)
{
unsigned int countPerBlock = (count + gridDim.x - 1) / gridDim.x;
T *pmax = data + blockIdx.x * countPerBlock + countPerBlock;
size_t inc = blockDim.x;
for (int i = 0; i < repeatCount; i++)
{
T *p = data + blockIdx.x * countPerBlock + threadIdx.x;
T sum = 0;
while (p < pmax)
{
sum += *p;
p += inc;
}
if (threadIdx.x > 1024) // to avoid removal by optimization
printf("sum: %f\n", sum);
}
}
template<typename T>
__global__ void kernel_notAlignedRead(T *data, int count, int repeatCount)
{
unsigned int countPerBlock = (count + gridDim.x - 1) / gridDim.x;
unsigned int countPerThread = (countPerBlock + blockDim.x - 1) / blockDim.x;
T *pmax = data + blockIdx.x * countPerBlock + threadIdx.x * countPerThread + countPerThread;
size_t inc = 1;
for (int i = 0; i < repeatCount; i++)
{
T *p = data + blockIdx.x * countPerBlock + threadIdx.x * countPerThread;
T sum = 0;
while (p < pmax)
{
sum += *p;
p += inc;
}
if (threadIdx.x > 1024) // to avoid removal by optimization
printf("sum: %f\n", sum);
}
}
template<typename T>
__global__ void kernel_alignedWrite(T *data, int count, int repeatCount)
{
unsigned int countPerBlock = (count + gridDim.x - 1) / gridDim.x;
T *pmax = data + blockIdx.x * countPerBlock + countPerBlock;
size_t inc = blockDim.x;
for (int i = 0; i < repeatCount; i++)
{
T *p = data + blockIdx.x * countPerBlock + threadIdx.x;
while (p < pmax)
{
*p = 0;
p += inc;
}
}
}
template<typename T>
__global__ void kernel_notAlignedWrite(T *data, int count, int repeatCount)
{
unsigned int countPerBlock = (count + gridDim.x - 1) / gridDim.x;
unsigned int countPerThread = (countPerBlock + blockDim.x - 1) / blockDim.x;
T *pmax = data + blockIdx.x * countPerBlock + threadIdx.x * countPerThread + countPerThread;
size_t inc = 1;
for (int i = 0; i < repeatCount; i++)
{
T *p = data + blockIdx.x * countPerBlock + threadIdx.x * countPerThread;
while (p < pmax)
{
*p = 0;
p += inc;
}
}
}
template<typename T>
void cuda_alignedRead(T *data, int count, int repeatCount, int blockCount, int threadsPerBlock)
{
kernel_alignedRead<T><<<blockCount, threadsPerBlock>>>(data, count, repeatCount);
}
template<typename T>
void cuda_notAlignedRead(T *data, int count, int repeatCount, int blockCount, int threadsPerBlock)
{
kernel_notAlignedRead<T><<<blockCount, threadsPerBlock>>>(data, count, repeatCount);
}
template<typename T>
void cuda_alignedWrite(T *data, int count, int repeatCount, int blockCount, int threadsPerBlock)
{
kernel_alignedWrite<T><<<blockCount, threadsPerBlock>>>(data, count, repeatCount);
}
template<typename T>
void cuda_notAlignedWrite(T *data, int count, int repeatCount, int blockCount, int threadsPerBlock)
{
kernel_notAlignedWrite<T><<<blockCount, threadsPerBlock>>>(data, count, repeatCount);
}
template<typename T>
void cuda_reductionSum(T *data, T *sum, T *temp, int count, int repeatCount, int blockCount, int threadsPerBlock)
{
switch (threadsPerBlock)
{
case 1:
case 2:
case 4:
case 8:
case 16:
case 32:
kernel_reductionSum<T, 32><<<blockCount, threadsPerBlock>>>(data, temp, count, repeatCount);
kernel_reductionSum<T, 32><<<1, threadsPerBlock>>>(temp, sum, blockCount, repeatCount);
break;
case 64:
kernel_reductionSum<T, 64><<<blockCount, threadsPerBlock>>>(data, temp, count, repeatCount);
kernel_reductionSum<T, 64><<<1, threadsPerBlock>>>(temp, sum, blockCount, repeatCount);
break;
case 128:
kernel_reductionSum<T, 128><<<blockCount, threadsPerBlock>>>(data, temp, count, repeatCount);
kernel_reductionSum<T, 128><<<1, threadsPerBlock>>>(temp, sum, blockCount, repeatCount);
break;
case 256:
kernel_reductionSum<T, 256><<<blockCount, threadsPerBlock>>>(data, temp, count, repeatCount);
kernel_reductionSum<T, 256><<<1, threadsPerBlock>>>(temp, sum, blockCount, repeatCount);
break;
case 512:
kernel_reductionSum<T, 512><<<blockCount, threadsPerBlock>>>(data, temp, count, repeatCount);
kernel_reductionSum<T, 512><<<1, threadsPerBlock>>>(temp, sum, blockCount, repeatCount);
break;
case 1024:
kernel_reductionSum<T, 1024><<<blockCount, threadsPerBlock>>>(data, temp, count, repeatCount);
kernel_reductionSum<T, 1024><<<1, threadsPerBlock>>>(temp, sum, blockCount, repeatCount);
break;
}
}
__global__ void kernel_doTinyTask(int a, int b)
{
int sum = a + b;
if (threadIdx.x > 1024) // to avoid removal by optimization
printf("%d", sum);
}
void cuda_doTinyTask(int blockCount, int threadCount)
{
kernel_doTinyTask<<<blockCount, threadCount>>>(blockCount, threadCount);
}
template<typename T>
__global__ void kernel_doAdd(int count)
{
int bulkCount = count >> 5;
for (int i = 0; i < bulkCount; i++)
{
T value = i;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
value = value + value;
if (threadIdx.x > 1024) // to avoid removal by optimization
printf("sum: %f", value);
}
}
/*
template<>
__global__ void kernel_doAdd<float>(int count)
{
int bulkCount = count >> 5;
for (int i = 0; i < bulkCount; i++)
{
float4 value;
value.x = i;
value.y = value.x + 1.0f;
value.z = value.x + 2.0f;
value.w = value.x + 3.0f;
value.x = value.x + value.x;
value.y = value.y + value.y;
value.z = value.z + value.z;
value.w = value.w + value.w;
value.x = value.x + value.x;
value.y = value.y + value.y;
value.z = value.z + value.z;
value.w = value.w + value.w;
value.x = value.x + value.x;
value.y = value.y + value.y;
value.z = value.z + value.z;
value.w = value.w + value.w;
value.x = value.x + value.x;
value.y = value.y + value.y;
value.z = value.z + value.z;
value.w = value.w + value.w;
value.x = value.x + value.x;
value.y = value.y + value.y;
value.z = value.z + value.z;
value.w = value.w + value.w;
value.x = value.x + value.x;
value.y = value.y + value.y;
value.z = value.z + value.z;
value.w = value.w + value.w;
value.x = value.x + value.x;
value.y = value.y + value.y;
value.z = value.z + value.z;
value.w = value.w + value.w;
value.x = value.x + value.x;
value.y = value.y + value.y;
value.z = value.z + value.z;
value.w = value.w + value.w;
if (threadIdx.x > 1024) // to avoid removal by optimization
printf("sum: %f,%f,%f,%f", value.x, value.y, value.z, value.w);
}
}
*/
template<typename T>
void cuda_doAdd(int count, int blockCount, int threadCount)
{
kernel_doAdd<T><<<blockCount, threadCount>>>(count);
}
template<typename T>
__global__ void kernel_doAdd_indep(int count)
{
int bulkCount = count >> 6;
for (int i = 0; i < bulkCount; i++)
{
T value1 = i, value2 = (T)1.0 + i, value3 = (T)2.0 + i, value4 = (T)3.0 + i;
value1 = value1 + value1;
value2 = value2 + value2;
value3 = value3 + value3;
value4 = value4 + value4;
value1 = value1 + value1;
value2 = value2 + value2;
value3 = value3 + value3;
value4 = value4 + value4;
value1 = value1 + value1;
value2 = value2 + value2;
value3 = value3 + value3;
value4 = value4 + value4;
value1 = value1 + value1;
value2 = value2 + value2;
value3 = value3 + value3;
value4 = value4 + value4;
value1 = value1 + value1;
value2 = value2 + value2;
value3 = value3 + value3;
value4 = value4 + value4;
value1 = value1 + value1;
value2 = value2 + value2;
value3 = value3 + value3;
value4 = value4 + value4;
value1 = value1 + value1;
value2 = value2 + value2;
value3 = value3 + value3;
value4 = value4 + value4;
value1 = value1 + value1;
value2 = value2 + value2;
value3 = value3 + value3;
value4 = value4 + value4;
value1 = value1 + value1;
value2 = value2 + value2;
value3 = value3 + value3;
value4 = value4 + value4;
value1 = value1 + value1;
value2 = value2 + value2;
value3 = value3 + value3;
value4 = value4 + value4;
value1 = value1 + value1;
value2 = value2 + value2;
value3 = value3 + value3;
value4 = value4 + value4;
value1 = value1 + value1;
value2 = value2 + value2;
value3 = value3 + value3;
value4 = value4 + value4;
value1 = value1 + value1;
value2 = value2 + value2;
value3 = value3 + value3;
value4 = value4 + value4;
value1 = value1 + value1;
value2 = value2 + value2;
value3 = value3 + value3;
value4 = value4 + value4;
value1 = value1 + value1;
value2 = value2 + value2;
value3 = value3 + value3;
value4 = value4 + value4;
value1 = value1 + value1;
value2 = value2 + value2;
value3 = value3 + value3;
value4 = value4 + value4;
if (threadIdx.x > 1024) // to avoid removal by optimization
printf("sum: %f, %f, %f, %f", value1, value2, value3, value4);
}
}
template<typename T>
void cuda_doAdd_indep(int count, int blockCount, int threadCount)
{
kernel_doAdd_indep<T><<<blockCount, threadCount>>>(count);
}
template<typename T>
__global__ void kernel_doMad(int count)
{
int bulkCount = count >> 6;
for (int i = 0; i < bulkCount; i++)
{
T value1 = i, value2 = i + (T)1.0, value3 = i + (T)2.0, value4 = i + (T)3.0;
value1 = value1 + value1 * value1;
value2 = value2 + value2 * value2;
value3 = value3 + value3 * value3;
value4 = value4 + value4 * value4;
value1 = value1 + value1 * value1;
value2 = value2 + value2 * value2;
value3 = value3 + value3 * value3;
value4 = value4 + value4 * value4;
value1 = value1 + value1 * value1;
value2 = value2 + value2 * value2;
value3 = value3 + value3 * value3;
value4 = value4 + value4 * value4;
value1 = value1 + value1 * value1;
value2 = value2 + value2 * value2;
value3 = value3 + value3 * value3;
value4 = value4 + value4 * value4;
value1 = value1 + value1 * value1;
value2 = value2 + value2 * value2;
value3 = value3 + value3 * value3;
value4 = value4 + value4 * value4;
value1 = value1 + value1 * value1;
value2 = value2 + value2 * value2;
value3 = value3 + value3 * value3;
value4 = value4 + value4 * value4;
value1 = value1 + value1 * value1;
value2 = value2 + value2 * value2;
value3 = value3 + value3 * value3;
value4 = value4 + value4 * value4;
value1 = value1 + value1 * value1;
value2 = value2 + value2 * value2;
value3 = value3 + value3 * value3;
value4 = value4 + value4 * value4;
if (threadIdx.x > 1024) // to avoid removal by optimization
printf("sum: %f,%f,%f,%f", value1, value2, value3, value4);
}
}
template<typename T>
void cuda_doMad(int count, int blockCount, int threadCount)
{
kernel_doMad<T><<<blockCount, threadCount>>>(count);
}
template<typename T>
__global__ void kernel_doMadSF(int count)
{
int bulkCount = count >> 6;
for (int i = 0; i < bulkCount; i++)
{
T value1 = i, value2 = i + (T)1.0, value3 = i + (T)2.0, value4 = i + (T)3.0;
value1 = value1 + value1 * value1;
value2 = sin(value2);
value3 = sqrt(value3);
value4 = value4 / value4;
value1 = value1 + value1 * value1;
value2 = sin(value2);
value3 = sqrt(value3);
value4 = value4 / value4;
value1 = value1 + value1 * value1;
value2 = sin(value2);
value3 = sqrt(value3);
value4 = value4 / value4;
value1 = value1 + value1 * value1;
value2 = sin(value2);
value3 = sqrt(value3);
value4 = value4 / value4;
value1 = value1 + value1 * value1;
value2 = sin(value2);
value3 = sqrt(value3);
value4 = value4 / value4;
value1 = value1 + value1 * value1;
value2 = sin(value2);
value3 = sqrt(value3);
value4 = value4 / value4;
value1 = value1 + value1 * value1;
value2 = sin(value2);
value3 = sqrt(value3);
value4 = value4 / value4;
value1 = value1 + value1 * value1;
value2 = sin(value2);
value3 = sqrt(value3);
value4 = value4 / value4;
value1 = value1 + value1 * value1;
value2 = sin(value2);
value3 = sqrt(value3);
value4 = value4 / value4;
value1 = value1 + value1 * value1;
value2 = sin(value2);
value3 = sqrt(value3);
value4 = value4 / value4;
value1 = value1 + value1 * value1;
value2 = sin(value2);
value3 = sqrt(value3);
value4 = value4 / value4;
value1 = value1 + value1 * value1;
value2 = sin(value2);
value3 = sqrt(value3);
value4 = value4 / value4;
value1 = value1 + value1 * value1;
value2 = sin(value2);
value3 = sqrt(value3);
value4 = value4 / value4;
if (threadIdx.x > 1024) // to avoid removal by optimization
printf("sum: %f,%f,%f,%f", value1, value2, value3, value4);
}
}
template<typename T>
void cuda_doMadSF(int count, int blockCount, int threadCount)
{
kernel_doMadSF<T><<<blockCount, threadCount>>>(count);
}
template<typename T>
__global__ void kernel_doMul(int count)
{
int bulkCount = count >> 5;
for (int i = 0; i < bulkCount; i++)
{
T value = (T)i;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
value = value * value;
if (threadIdx.x > 1024) // to avoid removal by optimization
printf("%f", value);
}
}
template<typename T>
void cuda_doMul(int count, int blockCount, int threadCount)
{
kernel_doMul<T><<<blockCount, threadCount>>>(count);
}
template<typename T>
__global__ void kernel_doDiv(int count)
{
int bulkCount = count >> 5;
for (int i = 0; i < bulkCount; i++)
{
T value = (T)i + (T)1.2345;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
value = value / value;
if (threadIdx.x > 1024) // to avoid removal by optimization
printf("%f", value);
}
}
template<typename T>
void cuda_doDiv(int count, int blockCount, int threadCount)
{
kernel_doDiv<T><<<blockCount, threadCount>>>(count);
}
template<typename T>
__global__ void kernel_doSin(int count)
{
int bulkCount = count >> 5;
for (int i = 0; i < bulkCount; i++)
{
T value = (T)1.0 + i;
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
value = sin(value);
if (threadIdx.x > 1024) // to avoid removal by optimization
printf("%f", value);
}
}
/*
template<>
__global__ void kernel_doSin<float>(int count)
{
int bulkCount = count >> 5;
for (int i = 0; i < bulkCount; i++)
{
float value = 1.0f + i;
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
value = sinf(value);
if (threadIdx.x > 1024) // to avoid removal by optimization
printf("%f", value);
}
}
*/
template<typename T>
void cuda_doSin(int count, int blockCount, int threadCount)
{
kernel_doSin<T><<<blockCount, threadCount>>>(count);
}
#ifdef CUDA50
template<bool waitForCompletion>
__global__ void kernel_doDynamicTinyTask(int blockCount, int threadCount,
double *time)
{
DTimingCounter counter;
DTimingClearAndStart(counter);
for (int i = 0; i < 1000; i++)
{
kernel_doTinyTask<<<blockCount, threadCount>>>(i, i);
if (waitForCompletion)
cudaDeviceSynchronize();
}
DTimingFinish(counter);
*time = DTimingSeconds(counter) / 1000;
}
double cuda_doDynamicTinyTask(int blockCount, int threadCount, bool waitForCompletion)
{
deviceMem<double> d_time(1);
if (waitForCompletion)
kernel_doDynamicTinyTask<true><<<1, 1>>>(blockCount, threadCount, d_time.dptr);
else
kernel_doDynamicTinyTask<false><<<1, 1>>>(blockCount, threadCount, d_time.dptr);
cudaSafeCall(cudaThreadSynchronize());
double result;
d_time.copyTo(&result);
return result;
}
#endif
// template instantiation
template void cuda_reductionSum<int>(int *, int *, int *, int, int, int, int);
template void cuda_reductionSum<__int64>(__int64 *, __int64 *, __int64 *, int, int, int, int);
template void cuda_reductionSum<float>(float *, float *, float *, int, int, int, int);
template void cuda_reductionSum<double>(double *, double *, double *, int, int, int, int);
template void cuda_alignedRead<int>(int *, int, int, int, int);
template void cuda_alignedRead<__int64>(__int64 *, int, int, int, int);
template void cuda_alignedRead<float>(float *, int, int, int, int);
template void cuda_alignedRead<double>(double *, int, int, int, int);
template void cuda_notAlignedRead<int>(int *, int, int, int, int);
template void cuda_notAlignedRead<__int64>(__int64 *, int, int, int, int);
template void cuda_notAlignedRead<float>(float *, int, int, int, int);
template void cuda_notAlignedRead<double>(double *, int, int, int, int);
template void cuda_alignedWrite<int>(int *, int, int, int, int);
template void cuda_alignedWrite<__int64>(__int64 *, int, int, int, int);
template void cuda_alignedWrite<float>(float *, int, int, int, int);
template void cuda_alignedWrite<double>(double *, int, int, int, int);
template void cuda_notAlignedWrite<int>(int *, int, int, int, int);
template void cuda_notAlignedWrite<__int64>(__int64 *, int, int, int, int);
template void cuda_notAlignedWrite<float>(float *, int, int, int, int);
template void cuda_notAlignedWrite<double>(double *, int, int, int, int);
template void cuda_doAdd<int>(int, int, int);
template void cuda_doAdd<__int64>(int, int, int);
template void cuda_doAdd<float>(int, int, int);
template void cuda_doAdd<double>(int, int, int);
template void cuda_doAdd_indep<int>(int, int, int);
template void cuda_doAdd_indep<__int64>(int, int, int);
template void cuda_doAdd_indep<float>(int, int, int);
template void cuda_doAdd_indep<double>(int, int, int);
template void cuda_doMad<int>(int, int, int);
template void cuda_doMad<__int64>(int, int, int);
template void cuda_doMad<float>(int, int, int);
template void cuda_doMad<double>(int, int, int);
template void cuda_doMadSF<float>(int, int, int);
template void cuda_doMadSF<double>(int, int, int);
template void cuda_doMul<int>(int, int, int);
template void cuda_doMul<__int64>(int, int, int);
template void cuda_doMul<float>(int, int, int);
template void cuda_doMul<double>(int, int, int);
template void cuda_doDiv<int>(int, int, int);
template void cuda_doDiv<__int64>(int, int, int);
template void cuda_doDiv<float>(int, int, int);
template void cuda_doDiv<double>(int, int, int);
template void cuda_doSin<float>(int, int, int);
template void cuda_doSin<double>(int, int, int);
#endif |
71eec64516be94303717468febebd3835ac9a2ea.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// 16CO145 Sumukha PK
// 16CO234 Prajval M
#include "wb.h"
#include <iostream>
#include <stdio.h>
#define NUM_BINS 4096
#define SIZE 1024
#define BIN_CAP 127
#define CUDA_CHECK(ans) \
{ \
gpuAssert((ans), __FILE__, __LINE__); \
}
inline void gpuAssert(hipError_t code, const char *file, int line,
bool abort = true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code),
file, line);
if (abort)
exit(code);
}
}
__global__ void hist(int *d_ip, int *d_bin, int inputLength)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx < inputLength)
atomicAdd(&d_bin[d_ip[idx]], 1);
}
__global__ void saturate(int * d_bin)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(d_bin[idx] > BIN_CAP)
d_bin[idx] = BIN_CAP;
}
int main(int argc, char *argv[])
{
int inputLength;
int *hostInput;
int *hostBins;
int *deviceInput;
int *deviceBins;
/* Read input arguments here */
wbArg_t args = {argc, argv};
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput = (int *)wbImport(wbArg_getInputFile(args, 3), &inputLength);
hostBins = (int *)malloc(NUM_BINS * sizeof(int));
for (int i = 0; i < NUM_BINS; i++)
hostBins[i] = 0;
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The input length is ", inputLength);
wbLog(TRACE, "The number of bins is ", NUM_BINS);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
hipMalloc((void **)&deviceInput, inputLength * sizeof(int));
hipMalloc((void **)&deviceBins, NUM_BINS * sizeof(int));
CUDA_CHECK(hipDeviceSynchronize());
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
hipMemcpy(deviceInput, hostInput, inputLength * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(deviceBins, hostBins, NUM_BINS * sizeof(int), hipMemcpyHostToDevice);
CUDA_CHECK(hipDeviceSynchronize());
wbTime_stop(GPU, "Copying input memory to the GPU.");
// Launch kernel
// ----------------------------------------------------------
wbLog(TRACE, "Launching kernel");
wbTime_start(Compute, "Performing CUDA computation");
//@@ Perform kernel computation here
long long int d_x = inputLength > SIZE ? (long long int)ceil(inputLength/(float)SIZE) : 1;
hipLaunchKernelGGL(( hist), dim3(d_x), dim3(SIZE), 0, 0, deviceInput, deviceBins, inputLength);
hipLaunchKernelGGL(( saturate), dim3(4), dim3(SIZE), 0, 0, deviceBins);
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
hipMemcpy(hostInput, deviceInput, inputLength * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(hostBins, deviceBins, NUM_BINS * sizeof(int), hipMemcpyDeviceToHost);
CUDA_CHECK(hipDeviceSynchronize());
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
hipFree(deviceInput);
hipFree(deviceBins);
wbTime_stop(GPU, "Freeing GPU Memory");
// Verify correctness
// -----------------------------------------------------
wbSolution(args, hostBins, NUM_BINS);
free(hostBins);
free(hostInput);
return 0;
} | 71eec64516be94303717468febebd3835ac9a2ea.cu | // 16CO145 Sumukha PK
// 16CO234 Prajval M
#include "wb.h"
#include <iostream>
#include <stdio.h>
#define NUM_BINS 4096
#define SIZE 1024
#define BIN_CAP 127
#define CUDA_CHECK(ans) \
{ \
gpuAssert((ans), __FILE__, __LINE__); \
}
inline void gpuAssert(cudaError_t code, const char *file, int line,
bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code),
file, line);
if (abort)
exit(code);
}
}
__global__ void hist(int *d_ip, int *d_bin, int inputLength)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx < inputLength)
atomicAdd(&d_bin[d_ip[idx]], 1);
}
__global__ void saturate(int * d_bin)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(d_bin[idx] > BIN_CAP)
d_bin[idx] = BIN_CAP;
}
int main(int argc, char *argv[])
{
int inputLength;
int *hostInput;
int *hostBins;
int *deviceInput;
int *deviceBins;
/* Read input arguments here */
wbArg_t args = {argc, argv};
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput = (int *)wbImport(wbArg_getInputFile(args, 3), &inputLength);
hostBins = (int *)malloc(NUM_BINS * sizeof(int));
for (int i = 0; i < NUM_BINS; i++)
hostBins[i] = 0;
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The input length is ", inputLength);
wbLog(TRACE, "The number of bins is ", NUM_BINS);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
cudaMalloc((void **)&deviceInput, inputLength * sizeof(int));
cudaMalloc((void **)&deviceBins, NUM_BINS * sizeof(int));
CUDA_CHECK(cudaDeviceSynchronize());
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
cudaMemcpy(deviceInput, hostInput, inputLength * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(deviceBins, hostBins, NUM_BINS * sizeof(int), cudaMemcpyHostToDevice);
CUDA_CHECK(cudaDeviceSynchronize());
wbTime_stop(GPU, "Copying input memory to the GPU.");
// Launch kernel
// ----------------------------------------------------------
wbLog(TRACE, "Launching kernel");
wbTime_start(Compute, "Performing CUDA computation");
//@@ Perform kernel computation here
long long int d_x = inputLength > SIZE ? (long long int)ceil(inputLength/(float)SIZE) : 1;
hist<<<d_x, SIZE>>>(deviceInput, deviceBins, inputLength);
saturate<<<4, SIZE>>>(deviceBins);
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
cudaMemcpy(hostInput, deviceInput, inputLength * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(hostBins, deviceBins, NUM_BINS * sizeof(int), cudaMemcpyDeviceToHost);
CUDA_CHECK(cudaDeviceSynchronize());
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
cudaFree(deviceInput);
cudaFree(deviceBins);
wbTime_stop(GPU, "Freeing GPU Memory");
// Verify correctness
// -----------------------------------------------------
wbSolution(args, hostBins, NUM_BINS);
free(hostBins);
free(hostInput);
return 0;
} |
62a0ddc136f36fd277012b527ef4b8be4f1f2d7e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#define SIZE_OF_GRID 512
double * neville_s(double *,double *, double *,int, int);//implimentazione seriale
double * load_k0(double *,double *, double *, int , int, int, int);//prepara e lancia il kernel0
__global__ void kernel0(double *, double *, double *, double *, int, int);
double * load_k1(double *,double *, double *, int , int, int, int);//prepara e lancia il kernel1
__global__ void kernel1(double *, double *, double *, double *, int, int);
int main (int argc, char *argv[]){
double *px, *py;
int N, Nx, ii, dimx, dimy;
double *x, *y,*y1, *y2, PI, step;
clock_t start;
float cpu_time, gpu0_time, gpu1_time, err_medio;
hipEvent_t start0, stop0, start1, stop1;
PI = 4*atan(1.0);
Nx = 200000;
x = (double *)malloc(Nx*sizeof(double));
srand(123);
for (ii = 0; ii < Nx; ii++)
x[ii] = PI * rand() / (double) RAND_MAX;
N = 32; // N-1 e' il grado del pol.
px = (double *)malloc(N*sizeof(double));
py = (double *)malloc(N*sizeof(double));
// lookup table: sin() tra
// 0 e PI
step = 1.0 / (N-1);
for (ii = 0; ii < N; ii++){
px[ii] = ii*step*PI;
py[ii] = sin(px[ii]);
}
//implementazione seriale
start = clock();
y = neville_s(px,py,x,N,Nx);
start = clock() - start;
cpu_time = start/(float)CLOCKS_PER_SEC;
cpu_time *= 1000.0;//porto in millisecondi
err_medio = 0;
for(ii = 0; ii < Nx; ii++) err_medio += fabs(y[ii] - sin(x[ii]));
printf("CPU time: %12.10f [ms]\n\terrore medio: %.15f\n", cpu_time,err_medio);
//calcolo la dimensione della griglia
if(Nx < 513){//griglia ad una dimensione
dimx = Nx;
dimy = 1;
}
else{//griglia a due dimensioni (max 512*512 punti)
dimx = 512;
dimy = (int)(Nx/512) + 1;
}
//implementazione kernel 0
hipEventCreate(&start0);
hipEventCreate(&stop0);
hipEventRecord( start0, 0 );
y1 = load_k0(px,py,x,N,Nx,dimx,dimy);
hipEventRecord( stop0, 0 );
hipEventSynchronize( stop0 );
hipEventElapsedTime( &gpu0_time, start0, stop0 );
hipEventDestroy(start0);
hipEventDestroy(stop0);
err_medio = 0;
for(ii = 0; ii < Nx; ii++) err_medio += fabs(y1[ii] - sin(x[ii]));
printf("kernel0: % 12.3f [ms], speedup: %3.0f.\n",gpu0_time, cpu_time/gpu0_time);
printf("\terrore medio: %.15f\n",err_medio);
//implementazione kernel 1
hipEventCreate(&start1);
hipEventCreate(&stop1);
hipEventRecord( start1, 0 );
y2 = load_k1(px,py,x,N,Nx,dimx,dimy);
hipEventRecord( stop1, 0 );
hipEventSynchronize( stop1 );
hipEventElapsedTime( &gpu1_time, start1, stop1 );
hipEventDestroy(start1);
hipEventDestroy(stop1);
err_medio = 0;
for(ii = 0; ii < Nx; ii++) err_medio += fabs(y2[ii] - sin(x[ii]));
printf("kernel1: % 12.3f [ms], speedup: %3.0f.\n",gpu1_time, cpu_time/gpu1_time);
printf("\terrore medio: %.15f\n",err_medio);
free(px);
free(py);
free(x);
free(y);
free(y1);
free(y2);
return 0;
}
/*implementazione dell'algoritmo seriale*/
double * neville_s(double * px,double * py, double * x,int N, int Nx){
double * y;//contiene f(x)
double * s;//vettore utilizzato per la risoluzione dell'algoritmo
int ii,jj,kk;//indici
//allocazione memoria
y = (double *)malloc(sizeof(double)*Nx);
s = (double *)malloc(sizeof(double)*N);
//implementazione del metodo
for(ii = 0; ii<Nx; ii++){
//copio i valori di py in s
for(jj = 0; jj<N; jj++) s[jj] = py[jj];
//algoritmo di Neville
for(jj = 1; jj<=N-1; jj++){
for(kk = 0; kk<=N-jj-1; kk++){
s[kk]=((px[kk+jj] - x[ii])*s[kk] + (x[ii]-px[kk])*s[kk+1])/(px[kk+jj] - px[kk]);
}
}
//in s[0] troviamo il risultato dell'interpolazione
y[ii] = s[0];
}
free(s);
return y;
}
/* ha il compito di preparare e passare gli elementi che il
kernel 0 dovra' poi usare*/
double * load_k0(double *px,double * py,double *x,int N, int Nx,int dimx,int dimy){
double * px_d, * py_d, * x_d, * y_d, * y;
//int N_d, Nx_d;
//allocazione memoria per device
checkCudaErrors(hipMalloc((void **) &px_d, sizeof(double)*N));
checkCudaErrors(hipMalloc((void **) &py_d, sizeof(double)*N));
checkCudaErrors(hipMalloc((void **) &x_d, sizeof(double)*Nx));
checkCudaErrors(hipMalloc((void **) &y_d, sizeof(double)*Nx));
//copio i dati che mi servono nel device
checkCudaErrors(hipMemcpy(px_d, px, sizeof(double)*N, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(py_d, py, sizeof(double)*N, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(x_d, x, sizeof(double)*Nx, hipMemcpyHostToDevice));
//checkCudaErrors(hipMemcpy(N_d, N, sizeof(int), hipMemcpyHostToDevice));
//checkCudaErrors(hipMemcpy(Nx_d, Nx, sizeof(int), hipMemcpyHostToDevice));
//alloco il vettore che conterra' il risultato
y = (double *)malloc(sizeof(double)*Nx);
//definisco le dimensioni di griglia e blocco
dim3 dimBlock(N, 1, 1);
dim3 dimGrid(dimx, dimy, 1);
//lancio il kernel0
hipLaunchKernelGGL(( kernel0) , dim3(dimGrid), dim3(dimBlock) , 0, 0, px_d, py_d, x_d, y_d, N, Nx);
//copia dei risultati ottenuti dal kernel
checkCudaErrors( hipMemcpy(y, y_d, sizeof(double)*Nx, hipMemcpyDeviceToHost) );
return y;
}
//implementazione del kernel 0
__global__ void kernel0(double *px, double *py, double *x, double *y, int N, int Nx){
unsigned int t_index,b_index;
double x_blk;//indica il valore del punto da interpolare del blocco
__shared__ double s_x[32];
__shared__ double s_y[32];
int ii, jj;//indici generici
//calcolo degli indici
t_index = threadIdx.x;//indice del thread
b_index = blockIdx.x + blockIdx.y * gridDim.x;//indice del blocco
if(b_index < Nx){//filtro dei primi Nx blocchi
x_blk = x[b_index];//ottengo il valore di x da interpolare del nos_ytro blocco
//copio i valori di y in s
s_x[t_index] = px[t_index];
s_y[t_index] = py[t_index];
//interpolazione sul thread 0
if(t_index == 0){
//algoritmo di Neville
for(ii = 1; ii<N; ii++){
for(jj = 0; jj<N-ii; jj++){
s_y[jj]=(s_y[jj]*(s_x[jj+ii] - x_blk) + s_y[jj+1]*(x_blk-s_x[jj]))/(s_x[jj+ii] - s_x[jj]);
}
}
y[b_index] = s_y[0];//copio il risultato
}
}
}
/* ha il compito di preparare e passare gli elementi che il
kernel 1 dovra' poi usare*/
double * load_k1(double *px,double * py,double *x,int N, int Nx,int dimx,int dimy){
double * px_d, * py_d, * x_d, * y_d, * y;
//allocazione memoria per device
checkCudaErrors(hipMalloc((void **) &px_d, sizeof(double)*N));
checkCudaErrors(hipMalloc((void **) &py_d, sizeof(double)*N));
checkCudaErrors(hipMalloc((void **) &x_d, sizeof(double)*Nx));
checkCudaErrors(hipMalloc((void **) &y_d, sizeof(double)*Nx));
//copio i dati che mi servono nel device
checkCudaErrors(hipMemcpy(px_d, px, sizeof(double)*N, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(py_d, py, sizeof(double)*N, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(x_d, x, sizeof(double)*Nx, hipMemcpyHostToDevice));
//alloco il vettore che conterra' il risultato
y = (double *)malloc(sizeof(double)*Nx);
//definisco le dimensioni di griglia e blocco
dim3 dimBlock(N, 1, 1);
dim3 dimGrid(dimx, dimy, 1);
//lancio il kernel1
hipLaunchKernelGGL(( kernel1) , dim3(dimGrid), dim3(dimBlock) , 0, 0, px_d, py_d, x_d, y_d, N, Nx);
//copia dei risultati ottenuti dal kernel
checkCudaErrors( hipMemcpy(y, y_d, sizeof(double)*Nx, hipMemcpyDeviceToHost) );
return y;
}
//implementazione del kernel 1
__global__ void kernel1(double *px, double *py, double *x, double *y, int N, int Nx){
unsigned int t_index,b_index;
double x_blk;//indica il valore del punto da interpolare del blocco
__shared__ double s_x[32];
__shared__ double s_y[32];
int ii;//indici generici
double cpy1, cpy2;//memorizzo i valori s che mi servono
//calcolo degli indici
t_index = threadIdx.x;//indice del thread
b_index = blockIdx.x + blockIdx.y * gridDim.x;//indice del blocco
if(b_index < Nx){//filtro dei primi Nx blocchi
x_blk = x[b_index];//ottengo il valore di x da interpolare del nos_ytro blocco
//copio i valori di y in s
s_x[t_index] = px[t_index];
s_y[t_index] = py[t_index];
//applico l'algoritmo di Neville
for(ii = 0; ii < N -1; ii++){
//copio i valori che mi servono prima che altri thread me li modifichino
cpy1 = s_y[t_index];
//uso il modulo perche' il 32-esimo thread possa accedere a s_y[0]
//tanto tale thread non serve a nulla e non fa nulla per il risultato
//ad ogni giro il numero di thread "inutili" aumenta di 1
cpy2 = s_y[(t_index + 1)%N];
//prima di toccare il vettore mi assicuro che tutti i thread si servirano
//copiati i valori necessari per l'elaboraione
__syncthreads();
//calcolo l's
s_y[t_index] = ((s_x[(t_index+ii+1)%N]-x_blk)*cpy1 + (x_blk-s_x[t_index])*cpy2)/ (s_x[(t_index+ii+1)%N]-s_x[t_index]);
}
//copio il valore in y
if(t_index == 0)
y[b_index] = s_y[0];
}
}
| 62a0ddc136f36fd277012b527ef4b8be4f1f2d7e.cu | #include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <sys/time.h>
#include <cuda.h>
#include <helper_cuda.h>
#define SIZE_OF_GRID 512
double * neville_s(double *,double *, double *,int, int);//implimentazione seriale
double * load_k0(double *,double *, double *, int , int, int, int);//prepara e lancia il kernel0
__global__ void kernel0(double *, double *, double *, double *, int, int);
double * load_k1(double *,double *, double *, int , int, int, int);//prepara e lancia il kernel1
__global__ void kernel1(double *, double *, double *, double *, int, int);
int main (int argc, char *argv[]){
double *px, *py;
int N, Nx, ii, dimx, dimy;
double *x, *y,*y1, *y2, PI, step;
clock_t start;
float cpu_time, gpu0_time, gpu1_time, err_medio;
cudaEvent_t start0, stop0, start1, stop1;
PI = 4*atan(1.0);
Nx = 200000;
x = (double *)malloc(Nx*sizeof(double));
srand(123);
for (ii = 0; ii < Nx; ii++)
x[ii] = PI * rand() / (double) RAND_MAX;
N = 32; // N-1 e' il grado del pol.
px = (double *)malloc(N*sizeof(double));
py = (double *)malloc(N*sizeof(double));
// lookup table: sin() tra
// 0 e PI
step = 1.0 / (N-1);
for (ii = 0; ii < N; ii++){
px[ii] = ii*step*PI;
py[ii] = sin(px[ii]);
}
//implementazione seriale
start = clock();
y = neville_s(px,py,x,N,Nx);
start = clock() - start;
cpu_time = start/(float)CLOCKS_PER_SEC;
cpu_time *= 1000.0;//porto in millisecondi
err_medio = 0;
for(ii = 0; ii < Nx; ii++) err_medio += fabs(y[ii] - sin(x[ii]));
printf("CPU time: %12.10f [ms]\n\terrore medio: %.15f\n", cpu_time,err_medio);
//calcolo la dimensione della griglia
if(Nx < 513){//griglia ad una dimensione
dimx = Nx;
dimy = 1;
}
else{//griglia a due dimensioni (max 512*512 punti)
dimx = 512;
dimy = (int)(Nx/512) + 1;
}
//implementazione kernel 0
cudaEventCreate(&start0);
cudaEventCreate(&stop0);
cudaEventRecord( start0, 0 );
y1 = load_k0(px,py,x,N,Nx,dimx,dimy);
cudaEventRecord( stop0, 0 );
cudaEventSynchronize( stop0 );
cudaEventElapsedTime( &gpu0_time, start0, stop0 );
cudaEventDestroy(start0);
cudaEventDestroy(stop0);
err_medio = 0;
for(ii = 0; ii < Nx; ii++) err_medio += fabs(y1[ii] - sin(x[ii]));
printf("kernel0: % 12.3f [ms], speedup: %3.0f.\n",gpu0_time, cpu_time/gpu0_time);
printf("\terrore medio: %.15f\n",err_medio);
//implementazione kernel 1
cudaEventCreate(&start1);
cudaEventCreate(&stop1);
cudaEventRecord( start1, 0 );
y2 = load_k1(px,py,x,N,Nx,dimx,dimy);
cudaEventRecord( stop1, 0 );
cudaEventSynchronize( stop1 );
cudaEventElapsedTime( &gpu1_time, start1, stop1 );
cudaEventDestroy(start1);
cudaEventDestroy(stop1);
err_medio = 0;
for(ii = 0; ii < Nx; ii++) err_medio += fabs(y2[ii] - sin(x[ii]));
printf("kernel1: % 12.3f [ms], speedup: %3.0f.\n",gpu1_time, cpu_time/gpu1_time);
printf("\terrore medio: %.15f\n",err_medio);
free(px);
free(py);
free(x);
free(y);
free(y1);
free(y2);
return 0;
}
/*implementazione dell'algoritmo seriale*/
double * neville_s(double * px,double * py, double * x,int N, int Nx){
double * y;//contiene f(x)
double * s;//vettore utilizzato per la risoluzione dell'algoritmo
int ii,jj,kk;//indici
//allocazione memoria
y = (double *)malloc(sizeof(double)*Nx);
s = (double *)malloc(sizeof(double)*N);
//implementazione del metodo
for(ii = 0; ii<Nx; ii++){
//copio i valori di py in s
for(jj = 0; jj<N; jj++) s[jj] = py[jj];
//algoritmo di Neville
for(jj = 1; jj<=N-1; jj++){
for(kk = 0; kk<=N-jj-1; kk++){
s[kk]=((px[kk+jj] - x[ii])*s[kk] + (x[ii]-px[kk])*s[kk+1])/(px[kk+jj] - px[kk]);
}
}
//in s[0] troviamo il risultato dell'interpolazione
y[ii] = s[0];
}
free(s);
return y;
}
/* ha il compito di preparare e passare gli elementi che il
kernel 0 dovra' poi usare*/
double * load_k0(double *px,double * py,double *x,int N, int Nx,int dimx,int dimy){
double * px_d, * py_d, * x_d, * y_d, * y;
//int N_d, Nx_d;
//allocazione memoria per device
checkCudaErrors(cudaMalloc((void **) &px_d, sizeof(double)*N));
checkCudaErrors(cudaMalloc((void **) &py_d, sizeof(double)*N));
checkCudaErrors(cudaMalloc((void **) &x_d, sizeof(double)*Nx));
checkCudaErrors(cudaMalloc((void **) &y_d, sizeof(double)*Nx));
//copio i dati che mi servono nel device
checkCudaErrors(cudaMemcpy(px_d, px, sizeof(double)*N, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(py_d, py, sizeof(double)*N, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(x_d, x, sizeof(double)*Nx, cudaMemcpyHostToDevice));
//checkCudaErrors(cudaMemcpy(N_d, N, sizeof(int), cudaMemcpyHostToDevice));
//checkCudaErrors(cudaMemcpy(Nx_d, Nx, sizeof(int), cudaMemcpyHostToDevice));
//alloco il vettore che conterra' il risultato
y = (double *)malloc(sizeof(double)*Nx);
//definisco le dimensioni di griglia e blocco
dim3 dimBlock(N, 1, 1);
dim3 dimGrid(dimx, dimy, 1);
//lancio il kernel0
kernel0 <<< dimGrid, dimBlock >>> (px_d, py_d, x_d, y_d, N, Nx);
//copia dei risultati ottenuti dal kernel
checkCudaErrors( cudaMemcpy(y, y_d, sizeof(double)*Nx, cudaMemcpyDeviceToHost) );
return y;
}
//implementazione del kernel 0
__global__ void kernel0(double *px, double *py, double *x, double *y, int N, int Nx){
unsigned int t_index,b_index;
double x_blk;//indica il valore del punto da interpolare del blocco
__shared__ double s_x[32];
__shared__ double s_y[32];
int ii, jj;//indici generici
//calcolo degli indici
t_index = threadIdx.x;//indice del thread
b_index = blockIdx.x + blockIdx.y * gridDim.x;//indice del blocco
if(b_index < Nx){//filtro dei primi Nx blocchi
x_blk = x[b_index];//ottengo il valore di x da interpolare del nos_ytro blocco
//copio i valori di y in s
s_x[t_index] = px[t_index];
s_y[t_index] = py[t_index];
//interpolazione sul thread 0
if(t_index == 0){
//algoritmo di Neville
for(ii = 1; ii<N; ii++){
for(jj = 0; jj<N-ii; jj++){
s_y[jj]=(s_y[jj]*(s_x[jj+ii] - x_blk) + s_y[jj+1]*(x_blk-s_x[jj]))/(s_x[jj+ii] - s_x[jj]);
}
}
y[b_index] = s_y[0];//copio il risultato
}
}
}
/* ha il compito di preparare e passare gli elementi che il
kernel 1 dovra' poi usare*/
double * load_k1(double *px,double * py,double *x,int N, int Nx,int dimx,int dimy){
double * px_d, * py_d, * x_d, * y_d, * y;
//allocazione memoria per device
checkCudaErrors(cudaMalloc((void **) &px_d, sizeof(double)*N));
checkCudaErrors(cudaMalloc((void **) &py_d, sizeof(double)*N));
checkCudaErrors(cudaMalloc((void **) &x_d, sizeof(double)*Nx));
checkCudaErrors(cudaMalloc((void **) &y_d, sizeof(double)*Nx));
//copio i dati che mi servono nel device
checkCudaErrors(cudaMemcpy(px_d, px, sizeof(double)*N, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(py_d, py, sizeof(double)*N, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(x_d, x, sizeof(double)*Nx, cudaMemcpyHostToDevice));
//alloco il vettore che conterra' il risultato
y = (double *)malloc(sizeof(double)*Nx);
//definisco le dimensioni di griglia e blocco
dim3 dimBlock(N, 1, 1);
dim3 dimGrid(dimx, dimy, 1);
//lancio il kernel1
kernel1 <<< dimGrid, dimBlock >>> (px_d, py_d, x_d, y_d, N, Nx);
//copia dei risultati ottenuti dal kernel
checkCudaErrors( cudaMemcpy(y, y_d, sizeof(double)*Nx, cudaMemcpyDeviceToHost) );
return y;
}
//implementazione del kernel 1
__global__ void kernel1(double *px, double *py, double *x, double *y, int N, int Nx){
unsigned int t_index,b_index;
double x_blk;//indica il valore del punto da interpolare del blocco
__shared__ double s_x[32];
__shared__ double s_y[32];
int ii;//indici generici
double cpy1, cpy2;//memorizzo i valori s che mi servono
//calcolo degli indici
t_index = threadIdx.x;//indice del thread
b_index = blockIdx.x + blockIdx.y * gridDim.x;//indice del blocco
if(b_index < Nx){//filtro dei primi Nx blocchi
x_blk = x[b_index];//ottengo il valore di x da interpolare del nos_ytro blocco
//copio i valori di y in s
s_x[t_index] = px[t_index];
s_y[t_index] = py[t_index];
//applico l'algoritmo di Neville
for(ii = 0; ii < N -1; ii++){
//copio i valori che mi servono prima che altri thread me li modifichino
cpy1 = s_y[t_index];
//uso il modulo perche' il 32-esimo thread possa accedere a s_y[0]
//tanto tale thread non serve a nulla e non fa nulla per il risultato
//ad ogni giro il numero di thread "inutili" aumenta di 1
cpy2 = s_y[(t_index + 1)%N];
//prima di toccare il vettore mi assicuro che tutti i thread si servirano
//copiati i valori necessari per l'elaboraione
__syncthreads();
//calcolo l's
s_y[t_index] = ((s_x[(t_index+ii+1)%N]-x_blk)*cpy1 + (x_blk-s_x[t_index])*cpy2)/ (s_x[(t_index+ii+1)%N]-s_x[t_index]);
}
//copio il valore in y
if(t_index == 0)
y[b_index] = s_y[0];
}
}
|
b34886e4a2362aa44952d0e8d663ea710912a64b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <float.h>
#include <vector>
#include <cassert>
#include <cstdio>
#include <algorithm>
#include "CUDA_ConvNN.h"
#include "CUDA_ConvNN.cuh"
#include "CUDA_ConvNN_Layer.h"
#include "CUDA_Common.h"
using namespace std;
//#define TIMING
bool g_use_fast = true;
namespace CNN
{
CudaConvNN::CudaConvNN()
{
m_momentum_init = false;
m_batch_size = 0;
m_image_width = 0;
m_image_channels = 0;
d_images = NULL;
d_labels = NULL;
hipEventCreate(&m_start);
hipEventCreate(&m_stop);
}
CudaConvNN::~CudaConvNN()
{
if(d_images) {
hipFree(d_images);
}
if(d_labels) {
hipFree(d_labels);
}
for(size_t i=0; i < m_layers.size(); i++) {
m_layers[i].CleanUp();
}
}
void CudaConvNN::Init(int image_width, int channels, int categories, int batch_size)
{
assert(m_layers.empty());
assert(image_width > 0);
assert(channels > 0);
assert(categories > 0);
assert(batch_size > 0);
CUDA_SAFE_CALL(hipMalloc((void**)&d_images, batch_size * image_width * image_width * channels * sizeof(REAL)));
CUDA_SAFE_CALL(hipMalloc((void**)&d_labels, batch_size * categories * sizeof(REAL)));
m_batch_size = batch_size;
m_image_width = image_width;
m_image_channels = channels;
}
void CudaConvNN::AddLayer(LayerType type, int conv_size, int out_channels)
{
assert(type == CONV_RELU || type == CONV_TANH || type == CONV_ABS_TANH || type == CONV_SIGMOID);
assert(d_images);
assert(d_labels);
Layer L;
if(m_layers.empty()) {
L.num_images = m_batch_size;
L.d_in = d_images;
L.in_image_width = m_image_width;
L.in_channels = m_image_channels;
}
else {
L.num_images = m_layers.back().num_images;
L.d_in = m_layers.back().d_out;
L.in_image_width = m_layers.back().out_image_width;
L.in_channels = m_layers.back().out_channels;
}
L.conv_size = conv_size;
L.out_channels = out_channels;
L.InitConv(type);
L.InitRandWeights(0.1);
m_layers.push_back(L);
}
void CudaConvNN::AddLayer(LayerType type, int out_size)
{
assert(type == NN_LINEAR || type == NN_RELU || type == NN_TANH || type == NN_ABS_TANH || type == NN_SIGMOID);
assert(d_images);
assert(d_labels);
Layer L;
if(m_layers.empty()) {
L.num_images = m_batch_size;
L.d_in = d_images;
L.in_size = m_image_width * m_image_width * m_image_channels;
}
else {
L.num_images = m_layers.back().num_images;
L.d_in = m_layers.back().d_out;
if(m_layers.back().out_size) {
L.in_size = m_layers.back().out_size;
}
else {
L.in_size = m_layers.back().out_size_per_sample;
}
}
L.out_size = out_size;
L.InitNN(type);
L.InitRandWeights(0.1);
m_layers.push_back(L);
}
void CudaConvNN::AddLayer(LayerType type)
{
assert(type == MAX_POOL || type == AVG_POOL || type == NN_SOFTMAX);
assert(d_images);
assert(d_labels);
assert(m_image_width > 0);
assert(m_image_channels > 0);
assert(m_batch_size > 0);
Layer L;
if(m_layers.empty()) {
L.num_images = m_batch_size;
L.d_in = d_images;
L.in_image_width = m_image_width;
L.in_channels = m_image_channels;
L.in_size = m_image_width * m_image_width * m_image_channels;
}
else {
L.num_images = m_layers.back().num_images;
L.d_in = m_layers.back().d_out;
L.in_image_width = m_layers.back().out_image_width;
L.in_channels = m_layers.back().out_channels;
L.in_size = m_layers.back().out_size;
}
if(type == MAX_POOL || type == AVG_POOL) {
L.InitPool(type);
}
else if(type == NN_SOFTMAX) {
L.InitSoftMax();
}
else {
assert(0);
}
m_layers.push_back(L);
}
void CudaConvNN::FeedForward()
{
// Forward pass
for(size_t l=0; l < m_layers.size(); l++) {
Layer &L = m_layers[l];
L.ZeroOutput();
L.ZeroSumGrad();
L.ZeroMaxPool();
#ifdef TIMING
hipEventRecord(m_start, 0);
REAL time;
#endif
if(L.type == CONV_RELU || L.type == CONV_LINEAR || L.type == CONV_TANH || L.type == CONV_ABS_TANH || L.type == CONV_SIGMOID) {
if(g_use_fast && L.in_image_width == 32) {
// assert(L.out_image_width*L.out_image_width < NUM_THREADS);
#ifdef TIMING
printf("FAST CONVOLVE - 32\n");
#endif
int iter = ceil((REAL)L.out_image_width*L.out_image_width*L.out_channels / NUM_THREADS);
for(int i=0; i < iter; i++) {
int offset = i*NUM_THREADS;
hipLaunchKernelGGL(( ConvolveFast<32>), dim3(m_batch_size), dim3(NUM_THREADS), 0, 0, offset, L.type, L.d_in, L.in_image_width ,L.in_channels,
L.d_weights, L.conv_size, L.out_channels, L.d_biases,
L.d_out, L.d_out_deriv);
}
}
else if(g_use_fast && L.in_image_width == 24) {
// assert(L.out_image_width*L.out_image_width < NUM_THREADS);
#ifdef TIMING
printf("FAST CONVOLVE - 32\n");
#endif
int iter = ceil((REAL)L.out_image_width*L.out_image_width*L.out_channels / NUM_THREADS);
for(int i=0; i < iter; i++) {
int offset = i*NUM_THREADS;
hipLaunchKernelGGL(( ConvolveFast<24>), dim3(m_batch_size), dim3(NUM_THREADS), 0, 0, offset, L.type, L.d_in, L.in_image_width ,L.in_channels,
L.d_weights, L.conv_size, L.out_channels, L.d_biases,
L.d_out, L.d_out_deriv);
}
}
else if(g_use_fast && L.in_image_width == 14) {
#ifdef TIMING
printf("FAST CONVOLVE - 14\n");
#endif
assert(L.out_image_width*L.out_image_width < NUM_THREADS);
int iter = ceil((REAL)L.out_image_width*L.out_image_width*L.out_channels / NUM_THREADS);
for(int i=0; i < iter; i++) {
int offset = i*NUM_THREADS;
hipLaunchKernelGGL(( ConvolveFast<14>), dim3(m_batch_size), dim3(NUM_THREADS), 0, 0, offset, L.type, L.d_in, L.in_image_width ,L.in_channels,
L.d_weights, L.conv_size, L.out_channels, L.d_biases,
L.d_out, L.d_out_deriv);
}
}
else if(g_use_fast && L.in_image_width == 10) {
#ifdef TIMING
printf("FAST CONVOLVE - 14\n");
#endif
assert(L.out_image_width*L.out_image_width < NUM_THREADS);
int iter = ceil((REAL)L.out_image_width*L.out_image_width*L.out_channels / NUM_THREADS);
for(int i=0; i < iter; i++) {
int offset = i*NUM_THREADS;
hipLaunchKernelGGL(( ConvolveFast<10>), dim3(m_batch_size), dim3(NUM_THREADS), 0, 0, offset, L.type, L.d_in, L.in_image_width ,L.in_channels,
L.d_weights, L.conv_size, L.out_channels, L.d_biases,
L.d_out, L.d_out_deriv);
}
}
else {
int iter = ceil((REAL)L.out_image_width*L.out_image_width*L.out_channels / NUM_THREADS);
for(int i=0; i < iter; i++) {
int offset = i*NUM_THREADS;
hipLaunchKernelGGL(( Convolve), dim3(m_batch_size), dim3(NUM_THREADS), 0, 0, offset, L.type, L.d_in, L.in_image_width ,L.in_channels,
L.d_weights, L.conv_size, L.out_channels, L.d_biases,
L.d_out, L.d_out_deriv);
}
}
#ifdef TIMING
hipEventRecord(m_stop, 0);
hipEventSynchronize(m_stop);
hipEventElapsedTime(&time, m_start, m_stop);
printf("Layer %d, Convolve: %f ms\n", l, time);
#endif
}
else if(L.type == MAX_POOL) {
L.ZeroMaxPool();
int iter = ceil((REAL)L.out_channels*L.out_image_width*L.out_image_width / NUM_THREADS);
for(int i=0; i < iter; i++) {
int offset = i*NUM_THREADS;
hipLaunchKernelGGL(( MaxPool), dim3(m_batch_size), dim3(NUM_THREADS), 0, 0, offset, L.d_in, L.in_image_width, L.in_channels, L.d_out, L.d_mask);
}
#ifdef TIMING
hipEventRecord(m_stop, 0);
hipEventSynchronize(m_stop);
hipEventElapsedTime(&time, m_start, m_stop);
printf("Layer %d, Max pool: %f ms\n", l, time);
#endif
}
else if(L.type == AVG_POOL) {
int iter = ceil((REAL)L.out_channels*L.out_image_width*L.out_image_width / NUM_THREADS);
for(int i=0; i < iter; i++) {
int offset = i*NUM_THREADS;
hipLaunchKernelGGL(( AvgPool), dim3(m_batch_size), dim3(NUM_THREADS), 0, 0, offset, L.d_in, L.in_image_width, L.in_channels, L.d_out, L.d_mask);
}
#ifdef TIMING
hipEventRecord(m_stop, 0);
hipEventSynchronize(m_stop);
hipEventElapsedTime(&time, m_start, m_stop);
printf("Layer %d, Avg pool: %f ms\n", l, time);
#endif
}
else if(L.type == NN_LINEAR || L.type == NN_RELU || L.type == NN_TANH || L.type == NN_ABS_TANH || L.type == NN_SIGMOID) {
int iter = ceil((REAL)L.out_size / NUM_THREADS);
for(int i=0; i < iter; i++) {
int offset = i*NUM_THREADS;
hipLaunchKernelGGL(( NN), dim3(m_batch_size), dim3(NUM_THREADS), 0, 0, offset, L.type, L.d_in, L.in_size,
L.d_weights, L.out_size, L.d_biases,
L.d_out, L.d_out_deriv);
}
#ifdef TIMING
hipEventRecord(m_stop, 0);
hipEventSynchronize(m_stop);
hipEventElapsedTime(&time, m_start, m_stop);
printf("Layer %d, NN: %f ms\n", l, time);
#endif
}
else if(L.type == NN_SOFTMAX) {
hipLaunchKernelGGL(( SoftMax), dim3(m_batch_size), dim3(1), 0, 0, L.d_in, L.in_size, L.d_out);
#ifdef TIMING
hipEventRecord(m_stop, 0);
hipEventSynchronize(m_stop);
hipEventElapsedTime(&time, m_start, m_stop);
printf("Layer %d, NN_SOFTMAX: %f ms\n", l, time);
#endif
}
else {
printf("Layer not supported\n");
assert(0);
}
}
}
size_t CudaConvNN::TotalMemUsed()
{
size_t sum=0;
for(size_t i=0; i < m_layers.size(); i++) {
sum += m_layers[i].total_mem_used;
}
return sum;
}
void CudaConvNN::BackProp(REAL learning_rate, REAL momentum_rate)
{
// Backward pass
for(int l=(int)m_layers.size()-1; l >= 0; l--) {
Layer &L = m_layers[l];
REAL *prev_grad = NULL;
if(l >= 1) {
prev_grad = m_layers[l-1].d_grad;
}
#ifdef TIMING
hipEventRecord(m_start, 0);
REAL time;
#endif
if(L.type == CONV_RELU || L.type == CONV_LINEAR || L.type == CONV_TANH || L.type == CONV_ABS_TANH || L.type == CONV_SIGMOID) {
if(g_use_fast && L.in_image_width == 32) {
#ifdef TIMING
printf("FAST BACKPROP - 32\n");
#endif
// Faster version - if the image is small enough it can be cached fully in shared memory
for(int in_idx=0; in_idx < L.in_channels; in_idx++) {
int iter = ceil((REAL)L.out_channels*L.conv_size*L.conv_size / NUM_THREADS);
for(int j=0; j < iter; j++) {
int offset = j*NUM_THREADS;
hipLaunchKernelGGL(( BackpropConvFast<32>), dim3(m_batch_size), dim3(NUM_THREADS), 0, 0,
in_idx, offset, L.d_grad, L.d_out_deriv, L.d_weights,
L.d_in, L.in_image_width, L.in_channels,
L.out_channels, L.conv_size,
L.d_sum_weight_grad, L.d_sum_bias_grad);
}
}
}
else if(g_use_fast && L.in_image_width == 24) {
#ifdef TIMING
printf("FAST BACKPROP - 32\n");
#endif
// Faster version - if the image is small enough it can be cached fully in shared memory
for(int in_idx=0; in_idx < L.in_channels; in_idx++) {
int iter = ceil((REAL)L.out_channels*L.conv_size*L.conv_size / NUM_THREADS);
for(int j=0; j < iter; j++) {
int offset = j*NUM_THREADS;
hipLaunchKernelGGL(( BackpropConvFast<24>), dim3(m_batch_size), dim3(NUM_THREADS), 0, 0,
in_idx, offset, L.d_grad, L.d_out_deriv, L.d_weights,
L.d_in, L.in_image_width, L.in_channels,
L.out_channels, L.conv_size,
L.d_sum_weight_grad, L.d_sum_bias_grad);
}
}
}
else if(g_use_fast && L.in_image_width == 14) {
#ifdef TIMING
printf("FAST BACKPROP - 14\n");
#endif
// Faster version - if the image is small enough it can be cached fully in shared memory
assert(L.in_image_width*L.in_image_width < NUM_THREADS);
for(int in_idx=0; in_idx < L.in_channels; in_idx++) {
int iter = ceil((REAL)L.out_channels*L.conv_size*L.conv_size / NUM_THREADS);
for(int j=0; j < iter; j++) {
int offset = j*NUM_THREADS;
hipLaunchKernelGGL(( BackpropConvFast<14>), dim3(m_batch_size), dim3(NUM_THREADS), 0, 0,
in_idx, offset, L.d_grad, L.d_out_deriv, L.d_weights,
L.d_in, L.in_image_width, L.in_channels,
L.out_channels, L.conv_size,
L.d_sum_weight_grad, L.d_sum_bias_grad);
}
}
}
else if(g_use_fast && L.in_image_width == 10) {
#ifdef TIMING
printf("FAST BACKPROP - 14\n");
#endif
// Faster version - if the image is small enough it can be cached fully in shared memory
assert(L.in_image_width*L.in_image_width < NUM_THREADS);
for(int in_idx=0; in_idx < L.in_channels; in_idx++) {
int iter = ceil((REAL)L.out_channels*L.conv_size*L.conv_size / NUM_THREADS);
for(int j=0; j < iter; j++) {
int offset = j*NUM_THREADS;
hipLaunchKernelGGL(( BackpropConvFast<10>), dim3(m_batch_size), dim3(NUM_THREADS), 0, 0,
in_idx, offset, L.d_grad, L.d_out_deriv, L.d_weights,
L.d_in, L.in_image_width, L.in_channels,
L.out_channels, L.conv_size,
L.d_sum_weight_grad, L.d_sum_bias_grad);
}
}
}
else {
#ifdef TIMING
printf("SLOW: %d\n", L.in_image_width);
#endif
int iter = ceil((REAL)L.in_channels*L.out_channels*L.conv_size*L.conv_size / NUM_THREADS);
for(int i=0; i < iter; i++) {
int offset = i*NUM_THREADS;
hipLaunchKernelGGL(( BackpropConv), dim3(m_batch_size), dim3(NUM_THREADS), 0, 0, offset, L.d_grad, L.d_out_deriv, L.d_weights,
L.d_in, L.in_image_width, L.in_channels,
L.out_channels, L.conv_size,
L.d_sum_weight_grad, L.d_sum_bias_grad);
}
}
#ifdef TIMING
hipEventRecord(m_stop, 0);
hipEventSynchronize(m_stop);
hipEventElapsedTime(&time, m_start, m_stop);
printf("Layer %d, Backprop Convolve 1: %f ms\n", l, time);
#endif
if(prev_grad) {
hipEventRecord(m_start, 0);
if(L.out_image_width == 10) {
#ifdef TIMING
printf("FAST BACKPROP2 - 10\n");
#endif
int iter = ceil((REAL)L.in_channels*L.in_image_width*L.in_image_width / NUM_THREADS);
for(int i=0; i < iter; i++) {
int offset = i*NUM_THREADS;
hipLaunchKernelGGL(( BackpropConv2Fast<10>), dim3(m_batch_size), dim3(NUM_THREADS), 0, 0, offset, L.d_grad, L.d_out_deriv, L.d_weights,
L.d_in, L.in_image_width, L.in_channels,
L.out_channels, L.conv_size,
prev_grad);
}
}
else if(L.out_image_width == 6) {
#ifdef TIMING
printf("FAST BACKPROP2 - 6\n");
#endif
int iter = ceil((REAL)L.in_channels*L.in_image_width*L.in_image_width / NUM_THREADS);
for(int i=0; i < iter; i++) {
int offset = i*NUM_THREADS;
hipLaunchKernelGGL(( BackpropConv2Fast<6>), dim3(m_batch_size), dim3(NUM_THREADS), 0, 0, offset, L.d_grad, L.d_out_deriv, L.d_weights,
L.d_in, L.in_image_width, L.in_channels,
L.out_channels, L.conv_size,
prev_grad);
}
}
else {
#ifdef TIMING
printf("SLOW BACKPROP2 - %d\n", L.out_image_width);
#endif
int iter = ceil((REAL)L.in_channels*L.in_image_width*L.in_image_width / NUM_THREADS);
for(int i=0; i < iter; i++) {
int offset = i*NUM_THREADS;
hipLaunchKernelGGL(( BackpropConv2), dim3(m_batch_size), dim3(NUM_THREADS), 0, 0, offset, L.d_grad, L.d_out_deriv, L.d_weights,
L.d_in, L.in_image_width, L.in_channels,
L.out_channels, L.conv_size,
prev_grad);
}
}
#ifdef TIMING
hipEventRecord(m_stop, 0);
hipEventSynchronize(m_stop);
hipEventElapsedTime(&time, m_start, m_stop);
printf("Layer %d, Backprop Convolve 2: %f ms\n", l, time);
#endif
}
}
else if(L.type == MAX_POOL) {
hipLaunchKernelGGL(( BackpropMaxPool), dim3(m_batch_size), dim3(L.out_image_width), 0, 0, L.d_grad, L.out_image_width, L.out_channels, L.d_mask, prev_grad);
#ifdef TIMING
hipEventRecord(m_stop, 0);
hipEventSynchronize(m_stop);
hipEventElapsedTime(&time, m_start, m_stop);
printf("Layer %d, Backprop Max Pool: %f ms\n", l, time);
#endif
}
else if(L.type == AVG_POOL) {
hipLaunchKernelGGL(( BackpropAvgPool), dim3(m_batch_size), dim3(L.out_image_width), 0, 0, L.d_grad, L.out_image_width, L.out_channels, L.d_mask, prev_grad);
#ifdef TIMING
hipEventRecord(m_stop, 0);
hipEventSynchronize(m_stop);
hipEventElapsedTime(&time, m_start, m_stop);
printf("Layer %d, Backprop Avg Pool: %f ms\n", l, time);
#endif
}
else if(L.type == NN_LINEAR || L.type == NN_RELU || L.type == NN_TANH || L.type == NN_ABS_TANH || L.type == NN_SIGMOID) {
int iter = ceil((REAL)L.out_size/NUM_THREADS);
// Weights
for(int i=0; i < iter; i++) {
int thread_offset = i*NUM_THREADS;
hipLaunchKernelGGL(( BackpropNN), dim3(m_batch_size), dim3(NUM_THREADS), 0, 0, thread_offset,
L.d_grad,
L.d_in, L.in_size,
L.d_out_deriv, L.out_size,
L.d_weights, L.weight_rows, L.weight_cols,
L.d_sum_weight_grad, L.d_sum_bias_grad);
}
if(prev_grad) {
int iter = ceil((REAL)L.weight_cols/NUM_THREADS);
for(int i=0; i < iter; i++) {
int thread_offset = i*NUM_THREADS;
hipLaunchKernelGGL(( BackpropNN2), dim3(m_batch_size), dim3(NUM_THREADS), 0, 0, thread_offset,
L.d_grad,
L.d_in, L.in_size,
L.d_out_deriv, L.out_size,
L.d_weights, L.weight_rows, L.weight_cols,
prev_grad);
}
}
#ifdef TIMING
hipEventRecord(m_stop, 0);
hipEventSynchronize(m_stop);
hipEventElapsedTime(&time, m_start, m_stop);
printf("Layer %d, Backprop NN: %f ms\n", l, time);
#endif
}
else if(L.type == NN_SOFTMAX) {
assert(L.in_size < NUM_THREADS);
assert(L.d_out);
assert(d_labels);
assert(prev_grad);
hipLaunchKernelGGL(( Y_minus_target), dim3(m_batch_size), dim3(L.in_size), 0, 0, L.d_out, d_labels, L.in_size, prev_grad);
#ifdef TIMING
hipEventRecord(m_stop, 0);
hipEventSynchronize(m_stop);
hipEventElapsedTime(&time, m_start, m_stop);
printf("Layer %d, Backprop Softmax: %f ms\n", l, time);
#endif
}
else {
printf("Layer type not implemented yet\n");
assert(0);
}
}
// Standard backprop
for(size_t l=0; l < m_layers.size(); l++) {
Layer &L = m_layers[l];
if(L.weight_size == 0) {
continue;
}
#ifdef TIMING
hipEventRecord(m_start, 0);
REAL time;
#endif
int blocks = ceil((REAL)L.weight_size/NUM_THREADS);
hipLaunchKernelGGL(( AvgGrads), dim3(blocks), dim3(NUM_THREADS), 0, 0, L.d_sum_weight_grad, L.weight_size, m_batch_size, L.d_delta_weight);
blocks = ceil((REAL)L.bias_size/NUM_THREADS);
hipLaunchKernelGGL(( AvgGrads), dim3(blocks), dim3(NUM_THREADS), 0, 0, L.d_sum_bias_grad, L.bias_size, m_batch_size, L.d_delta_bias);
// Weights
blocks = ceil((REAL)L.weight_size/NUM_THREADS);
hipLaunchKernelGGL(( UpdateWeights), dim3(blocks), dim3(NUM_THREADS), 0, 0, L.d_momentum_delta_weight, L.d_delta_weight, L.weight_size, learning_rate, momentum_rate, L.d_weights);
// Bias
blocks = ceil((REAL)L.bias_size/NUM_THREADS);
hipLaunchKernelGGL(( UpdateWeights), dim3(blocks), dim3(NUM_THREADS), 0, 0, L.d_momentum_delta_bias, L.d_delta_bias, L.bias_size, learning_rate, momentum_rate, L.d_biases);
#ifdef TIMING
hipEventRecord(m_stop, 0);
hipEventSynchronize(m_stop);
hipEventElapsedTime(&time, m_start, m_stop);
printf("Layer %d, Weight update: %f ms\n", l, time);
#endif
}
}
void CudaConvNN::SaveWeights()
{
for(size_t i=0; i < m_layers.size(); i++) {
Layer &L = m_layers[i];
if(L.weight_size == 0) {
continue;
}
char file[128];
vector <REAL> weights;
// Weights
{
sprintf(file, "layer_%02d_weights.raw", (int)i);
FILE *fp = fopen(file, "w+");
assert(fp);
L.GetWeights(weights);
fwrite(&weights[0], weights.size()*sizeof(REAL), 1, fp);
fclose(fp);
}
// Momentum weights
{
sprintf(file, "layer_%02d_momentum_weights.raw", (int)i);
FILE *fp = fopen(file, "w+");
assert(fp);
L.GetMomentumWeights(weights);
fwrite(&weights[0], weights.size()*sizeof(REAL), 1, fp);
fclose(fp);
}
// Bias
{
sprintf(file, "layer_%02d_biases.raw", (int)i);
FILE *fp = fopen(file, "w+");
assert(fp);
L.GetBiases(weights);
fwrite(&weights[0], weights.size()*sizeof(REAL), 1, fp);
fclose(fp);
}
// Momentum Bias
{
sprintf(file, "layer_%02d_momentum_biases.raw", (int)i);
FILE *fp = fopen(file, "w+");
assert(fp);
L.GetMomentumBiases(weights);
fwrite(&weights[0], weights.size()*sizeof(REAL), 1, fp);
fclose(fp);
}
}
}
bool CudaConvNN::LoadWeights()
{
for(size_t i=0; i < m_layers.size(); i++) {
Layer &L = m_layers[i];
if(L.weight_size == 0) {
continue;
}
char file[128];
// Weights
{
sprintf(file, "layer_%02d_weights.raw", (int)i);
FILE *fp = fopen(file, "r");
if(fp == NULL) {
return false;
}
vector <REAL> weights(L.weight_size);
size_t n = fread(&weights[0], 1, weights.size()*sizeof(REAL), fp);
assert(n == weights.size()*sizeof(REAL));
L.SetWeights(&weights[0]);
fclose(fp);
}
// Mometum Weights
{
sprintf(file, "layer_%02d_momentum_weights.raw", (int)i);
FILE *fp = fopen(file, "r");
if(fp == NULL) {
return false;
}
vector <REAL> weights(L.weight_size);
size_t n = fread(&weights[0], 1, weights.size()*sizeof(REAL), fp);
assert(n == weights.size()*sizeof(REAL));
L.SetMomentumWeights(&weights[0]);
fclose(fp);
}
// Bias
{
sprintf(file, "layer_%02d_biases.raw", (int)i);
FILE *fp = fopen(file, "r");
if(fp == NULL) {
return false;
}
vector <REAL> weights(L.bias_size);
size_t n = fread(&weights[0], 1, weights.size()*sizeof(REAL), fp);
assert(n == weights.size()*sizeof(REAL));
L.SetBiases(&weights[0]);
fclose(fp);
}
// Momentum Bias
{
sprintf(file, "layer_%02d_momentum_biases.raw", (int)i);
FILE *fp = fopen(file, "r");
if(fp == NULL) {
return false;
}
vector <REAL> weights(L.bias_size);
size_t n = fread(&weights[0], 1, weights.size()*sizeof(REAL), fp);
assert(n == weights.size()*sizeof(REAL));
L.SetMomentumBiases(&weights[0]);
fclose(fp);
}
}
return true;
}
void CudaConvNN::SetImages(const REAL *data, size_t size)
{
assert(d_images);
assert(size);
CUDA_SAFE_CALL(hipMemcpy(d_images, data, size, hipMemcpyHostToDevice));
}
void CudaConvNN::SetLabels(const REAL *data, size_t size)
{
assert(d_labels);
assert(size);
CUDA_SAFE_CALL(hipMemcpy(d_labels, data, size, hipMemcpyHostToDevice));
}
void CudaConvNN::CheckValues()
{
for(size_t i=0; i < m_layers.size(); i++) {
m_layers[i].CheckValues();
}
}
} // namespace
| b34886e4a2362aa44952d0e8d663ea710912a64b.cu | #include <cstdio>
#include <float.h>
#include <vector>
#include <cassert>
#include <cstdio>
#include <algorithm>
#include "CUDA_ConvNN.h"
#include "CUDA_ConvNN.cuh"
#include "CUDA_ConvNN_Layer.h"
#include "CUDA_Common.h"
using namespace std;
//#define TIMING
bool g_use_fast = true;
namespace CNN
{
CudaConvNN::CudaConvNN()
{
m_momentum_init = false;
m_batch_size = 0;
m_image_width = 0;
m_image_channels = 0;
d_images = NULL;
d_labels = NULL;
cudaEventCreate(&m_start);
cudaEventCreate(&m_stop);
}
CudaConvNN::~CudaConvNN()
{
if(d_images) {
cudaFree(d_images);
}
if(d_labels) {
cudaFree(d_labels);
}
for(size_t i=0; i < m_layers.size(); i++) {
m_layers[i].CleanUp();
}
}
void CudaConvNN::Init(int image_width, int channels, int categories, int batch_size)
{
assert(m_layers.empty());
assert(image_width > 0);
assert(channels > 0);
assert(categories > 0);
assert(batch_size > 0);
CUDA_SAFE_CALL(cudaMalloc((void**)&d_images, batch_size * image_width * image_width * channels * sizeof(REAL)));
CUDA_SAFE_CALL(cudaMalloc((void**)&d_labels, batch_size * categories * sizeof(REAL)));
m_batch_size = batch_size;
m_image_width = image_width;
m_image_channels = channels;
}
void CudaConvNN::AddLayer(LayerType type, int conv_size, int out_channels)
{
assert(type == CONV_RELU || type == CONV_TANH || type == CONV_ABS_TANH || type == CONV_SIGMOID);
assert(d_images);
assert(d_labels);
Layer L;
if(m_layers.empty()) {
L.num_images = m_batch_size;
L.d_in = d_images;
L.in_image_width = m_image_width;
L.in_channels = m_image_channels;
}
else {
L.num_images = m_layers.back().num_images;
L.d_in = m_layers.back().d_out;
L.in_image_width = m_layers.back().out_image_width;
L.in_channels = m_layers.back().out_channels;
}
L.conv_size = conv_size;
L.out_channels = out_channels;
L.InitConv(type);
L.InitRandWeights(0.1);
m_layers.push_back(L);
}
void CudaConvNN::AddLayer(LayerType type, int out_size)
{
assert(type == NN_LINEAR || type == NN_RELU || type == NN_TANH || type == NN_ABS_TANH || type == NN_SIGMOID);
assert(d_images);
assert(d_labels);
Layer L;
if(m_layers.empty()) {
L.num_images = m_batch_size;
L.d_in = d_images;
L.in_size = m_image_width * m_image_width * m_image_channels;
}
else {
L.num_images = m_layers.back().num_images;
L.d_in = m_layers.back().d_out;
if(m_layers.back().out_size) {
L.in_size = m_layers.back().out_size;
}
else {
L.in_size = m_layers.back().out_size_per_sample;
}
}
L.out_size = out_size;
L.InitNN(type);
L.InitRandWeights(0.1);
m_layers.push_back(L);
}
void CudaConvNN::AddLayer(LayerType type)
{
assert(type == MAX_POOL || type == AVG_POOL || type == NN_SOFTMAX);
assert(d_images);
assert(d_labels);
assert(m_image_width > 0);
assert(m_image_channels > 0);
assert(m_batch_size > 0);
Layer L;
if(m_layers.empty()) {
L.num_images = m_batch_size;
L.d_in = d_images;
L.in_image_width = m_image_width;
L.in_channels = m_image_channels;
L.in_size = m_image_width * m_image_width * m_image_channels;
}
else {
L.num_images = m_layers.back().num_images;
L.d_in = m_layers.back().d_out;
L.in_image_width = m_layers.back().out_image_width;
L.in_channels = m_layers.back().out_channels;
L.in_size = m_layers.back().out_size;
}
if(type == MAX_POOL || type == AVG_POOL) {
L.InitPool(type);
}
else if(type == NN_SOFTMAX) {
L.InitSoftMax();
}
else {
assert(0);
}
m_layers.push_back(L);
}
void CudaConvNN::FeedForward()
{
// Forward pass
for(size_t l=0; l < m_layers.size(); l++) {
Layer &L = m_layers[l];
L.ZeroOutput();
L.ZeroSumGrad();
L.ZeroMaxPool();
#ifdef TIMING
cudaEventRecord(m_start, 0);
REAL time;
#endif
if(L.type == CONV_RELU || L.type == CONV_LINEAR || L.type == CONV_TANH || L.type == CONV_ABS_TANH || L.type == CONV_SIGMOID) {
if(g_use_fast && L.in_image_width == 32) {
// assert(L.out_image_width*L.out_image_width < NUM_THREADS);
#ifdef TIMING
printf("FAST CONVOLVE - 32\n");
#endif
int iter = ceil((REAL)L.out_image_width*L.out_image_width*L.out_channels / NUM_THREADS);
for(int i=0; i < iter; i++) {
int offset = i*NUM_THREADS;
ConvolveFast<32><<<m_batch_size, NUM_THREADS>>>(offset, L.type, L.d_in, L.in_image_width ,L.in_channels,
L.d_weights, L.conv_size, L.out_channels, L.d_biases,
L.d_out, L.d_out_deriv);
}
}
else if(g_use_fast && L.in_image_width == 24) {
// assert(L.out_image_width*L.out_image_width < NUM_THREADS);
#ifdef TIMING
printf("FAST CONVOLVE - 32\n");
#endif
int iter = ceil((REAL)L.out_image_width*L.out_image_width*L.out_channels / NUM_THREADS);
for(int i=0; i < iter; i++) {
int offset = i*NUM_THREADS;
ConvolveFast<24><<<m_batch_size, NUM_THREADS>>>(offset, L.type, L.d_in, L.in_image_width ,L.in_channels,
L.d_weights, L.conv_size, L.out_channels, L.d_biases,
L.d_out, L.d_out_deriv);
}
}
else if(g_use_fast && L.in_image_width == 14) {
#ifdef TIMING
printf("FAST CONVOLVE - 14\n");
#endif
assert(L.out_image_width*L.out_image_width < NUM_THREADS);
int iter = ceil((REAL)L.out_image_width*L.out_image_width*L.out_channels / NUM_THREADS);
for(int i=0; i < iter; i++) {
int offset = i*NUM_THREADS;
ConvolveFast<14><<<m_batch_size, NUM_THREADS>>>(offset, L.type, L.d_in, L.in_image_width ,L.in_channels,
L.d_weights, L.conv_size, L.out_channels, L.d_biases,
L.d_out, L.d_out_deriv);
}
}
else if(g_use_fast && L.in_image_width == 10) {
#ifdef TIMING
printf("FAST CONVOLVE - 14\n");
#endif
assert(L.out_image_width*L.out_image_width < NUM_THREADS);
int iter = ceil((REAL)L.out_image_width*L.out_image_width*L.out_channels / NUM_THREADS);
for(int i=0; i < iter; i++) {
int offset = i*NUM_THREADS;
ConvolveFast<10><<<m_batch_size, NUM_THREADS>>>(offset, L.type, L.d_in, L.in_image_width ,L.in_channels,
L.d_weights, L.conv_size, L.out_channels, L.d_biases,
L.d_out, L.d_out_deriv);
}
}
else {
int iter = ceil((REAL)L.out_image_width*L.out_image_width*L.out_channels / NUM_THREADS);
for(int i=0; i < iter; i++) {
int offset = i*NUM_THREADS;
Convolve<<<m_batch_size, NUM_THREADS>>>(offset, L.type, L.d_in, L.in_image_width ,L.in_channels,
L.d_weights, L.conv_size, L.out_channels, L.d_biases,
L.d_out, L.d_out_deriv);
}
}
#ifdef TIMING
cudaEventRecord(m_stop, 0);
cudaEventSynchronize(m_stop);
cudaEventElapsedTime(&time, m_start, m_stop);
printf("Layer %d, Convolve: %f ms\n", l, time);
#endif
}
else if(L.type == MAX_POOL) {
L.ZeroMaxPool();
int iter = ceil((REAL)L.out_channels*L.out_image_width*L.out_image_width / NUM_THREADS);
for(int i=0; i < iter; i++) {
int offset = i*NUM_THREADS;
MaxPool<<<m_batch_size, NUM_THREADS>>>(offset, L.d_in, L.in_image_width, L.in_channels, L.d_out, L.d_mask);
}
#ifdef TIMING
cudaEventRecord(m_stop, 0);
cudaEventSynchronize(m_stop);
cudaEventElapsedTime(&time, m_start, m_stop);
printf("Layer %d, Max pool: %f ms\n", l, time);
#endif
}
else if(L.type == AVG_POOL) {
int iter = ceil((REAL)L.out_channels*L.out_image_width*L.out_image_width / NUM_THREADS);
for(int i=0; i < iter; i++) {
int offset = i*NUM_THREADS;
AvgPool<<<m_batch_size, NUM_THREADS>>>(offset, L.d_in, L.in_image_width, L.in_channels, L.d_out, L.d_mask);
}
#ifdef TIMING
cudaEventRecord(m_stop, 0);
cudaEventSynchronize(m_stop);
cudaEventElapsedTime(&time, m_start, m_stop);
printf("Layer %d, Avg pool: %f ms\n", l, time);
#endif
}
else if(L.type == NN_LINEAR || L.type == NN_RELU || L.type == NN_TANH || L.type == NN_ABS_TANH || L.type == NN_SIGMOID) {
int iter = ceil((REAL)L.out_size / NUM_THREADS);
for(int i=0; i < iter; i++) {
int offset = i*NUM_THREADS;
NN<<<m_batch_size, NUM_THREADS>>>(offset, L.type, L.d_in, L.in_size,
L.d_weights, L.out_size, L.d_biases,
L.d_out, L.d_out_deriv);
}
#ifdef TIMING
cudaEventRecord(m_stop, 0);
cudaEventSynchronize(m_stop);
cudaEventElapsedTime(&time, m_start, m_stop);
printf("Layer %d, NN: %f ms\n", l, time);
#endif
}
else if(L.type == NN_SOFTMAX) {
SoftMax<<<m_batch_size, 1>>>(L.d_in, L.in_size, L.d_out);
#ifdef TIMING
cudaEventRecord(m_stop, 0);
cudaEventSynchronize(m_stop);
cudaEventElapsedTime(&time, m_start, m_stop);
printf("Layer %d, NN_SOFTMAX: %f ms\n", l, time);
#endif
}
else {
printf("Layer not supported\n");
assert(0);
}
}
}
size_t CudaConvNN::TotalMemUsed()
{
size_t sum=0;
for(size_t i=0; i < m_layers.size(); i++) {
sum += m_layers[i].total_mem_used;
}
return sum;
}
void CudaConvNN::BackProp(REAL learning_rate, REAL momentum_rate)
{
// Backward pass
for(int l=(int)m_layers.size()-1; l >= 0; l--) {
Layer &L = m_layers[l];
REAL *prev_grad = NULL;
if(l >= 1) {
prev_grad = m_layers[l-1].d_grad;
}
#ifdef TIMING
cudaEventRecord(m_start, 0);
REAL time;
#endif
if(L.type == CONV_RELU || L.type == CONV_LINEAR || L.type == CONV_TANH || L.type == CONV_ABS_TANH || L.type == CONV_SIGMOID) {
if(g_use_fast && L.in_image_width == 32) {
#ifdef TIMING
printf("FAST BACKPROP - 32\n");
#endif
// Faster version - if the image is small enough it can be cached fully in shared memory
for(int in_idx=0; in_idx < L.in_channels; in_idx++) {
int iter = ceil((REAL)L.out_channels*L.conv_size*L.conv_size / NUM_THREADS);
for(int j=0; j < iter; j++) {
int offset = j*NUM_THREADS;
BackpropConvFast<32><<<m_batch_size, NUM_THREADS>>>
(in_idx, offset, L.d_grad, L.d_out_deriv, L.d_weights,
L.d_in, L.in_image_width, L.in_channels,
L.out_channels, L.conv_size,
L.d_sum_weight_grad, L.d_sum_bias_grad);
}
}
}
else if(g_use_fast && L.in_image_width == 24) {
#ifdef TIMING
printf("FAST BACKPROP - 32\n");
#endif
// Faster version - if the image is small enough it can be cached fully in shared memory
for(int in_idx=0; in_idx < L.in_channels; in_idx++) {
int iter = ceil((REAL)L.out_channels*L.conv_size*L.conv_size / NUM_THREADS);
for(int j=0; j < iter; j++) {
int offset = j*NUM_THREADS;
BackpropConvFast<24><<<m_batch_size, NUM_THREADS>>>
(in_idx, offset, L.d_grad, L.d_out_deriv, L.d_weights,
L.d_in, L.in_image_width, L.in_channels,
L.out_channels, L.conv_size,
L.d_sum_weight_grad, L.d_sum_bias_grad);
}
}
}
else if(g_use_fast && L.in_image_width == 14) {
#ifdef TIMING
printf("FAST BACKPROP - 14\n");
#endif
// Faster version - if the image is small enough it can be cached fully in shared memory
assert(L.in_image_width*L.in_image_width < NUM_THREADS);
for(int in_idx=0; in_idx < L.in_channels; in_idx++) {
int iter = ceil((REAL)L.out_channels*L.conv_size*L.conv_size / NUM_THREADS);
for(int j=0; j < iter; j++) {
int offset = j*NUM_THREADS;
BackpropConvFast<14><<<m_batch_size, NUM_THREADS>>>
(in_idx, offset, L.d_grad, L.d_out_deriv, L.d_weights,
L.d_in, L.in_image_width, L.in_channels,
L.out_channels, L.conv_size,
L.d_sum_weight_grad, L.d_sum_bias_grad);
}
}
}
else if(g_use_fast && L.in_image_width == 10) {
#ifdef TIMING
printf("FAST BACKPROP - 14\n");
#endif
// Faster version - if the image is small enough it can be cached fully in shared memory
assert(L.in_image_width*L.in_image_width < NUM_THREADS);
for(int in_idx=0; in_idx < L.in_channels; in_idx++) {
int iter = ceil((REAL)L.out_channels*L.conv_size*L.conv_size / NUM_THREADS);
for(int j=0; j < iter; j++) {
int offset = j*NUM_THREADS;
BackpropConvFast<10><<<m_batch_size, NUM_THREADS>>>
(in_idx, offset, L.d_grad, L.d_out_deriv, L.d_weights,
L.d_in, L.in_image_width, L.in_channels,
L.out_channels, L.conv_size,
L.d_sum_weight_grad, L.d_sum_bias_grad);
}
}
}
else {
#ifdef TIMING
printf("SLOW: %d\n", L.in_image_width);
#endif
int iter = ceil((REAL)L.in_channels*L.out_channels*L.conv_size*L.conv_size / NUM_THREADS);
for(int i=0; i < iter; i++) {
int offset = i*NUM_THREADS;
BackpropConv<<<m_batch_size, NUM_THREADS>>>(offset, L.d_grad, L.d_out_deriv, L.d_weights,
L.d_in, L.in_image_width, L.in_channels,
L.out_channels, L.conv_size,
L.d_sum_weight_grad, L.d_sum_bias_grad);
}
}
#ifdef TIMING
cudaEventRecord(m_stop, 0);
cudaEventSynchronize(m_stop);
cudaEventElapsedTime(&time, m_start, m_stop);
printf("Layer %d, Backprop Convolve 1: %f ms\n", l, time);
#endif
if(prev_grad) {
cudaEventRecord(m_start, 0);
if(L.out_image_width == 10) {
#ifdef TIMING
printf("FAST BACKPROP2 - 10\n");
#endif
int iter = ceil((REAL)L.in_channels*L.in_image_width*L.in_image_width / NUM_THREADS);
for(int i=0; i < iter; i++) {
int offset = i*NUM_THREADS;
BackpropConv2Fast<10><<<m_batch_size, NUM_THREADS>>>(offset, L.d_grad, L.d_out_deriv, L.d_weights,
L.d_in, L.in_image_width, L.in_channels,
L.out_channels, L.conv_size,
prev_grad);
}
}
else if(L.out_image_width == 6) {
#ifdef TIMING
printf("FAST BACKPROP2 - 6\n");
#endif
int iter = ceil((REAL)L.in_channels*L.in_image_width*L.in_image_width / NUM_THREADS);
for(int i=0; i < iter; i++) {
int offset = i*NUM_THREADS;
BackpropConv2Fast<6><<<m_batch_size, NUM_THREADS>>>(offset, L.d_grad, L.d_out_deriv, L.d_weights,
L.d_in, L.in_image_width, L.in_channels,
L.out_channels, L.conv_size,
prev_grad);
}
}
else {
#ifdef TIMING
printf("SLOW BACKPROP2 - %d\n", L.out_image_width);
#endif
int iter = ceil((REAL)L.in_channels*L.in_image_width*L.in_image_width / NUM_THREADS);
for(int i=0; i < iter; i++) {
int offset = i*NUM_THREADS;
BackpropConv2<<<m_batch_size, NUM_THREADS>>>(offset, L.d_grad, L.d_out_deriv, L.d_weights,
L.d_in, L.in_image_width, L.in_channels,
L.out_channels, L.conv_size,
prev_grad);
}
}
#ifdef TIMING
cudaEventRecord(m_stop, 0);
cudaEventSynchronize(m_stop);
cudaEventElapsedTime(&time, m_start, m_stop);
printf("Layer %d, Backprop Convolve 2: %f ms\n", l, time);
#endif
}
}
else if(L.type == MAX_POOL) {
BackpropMaxPool<<<m_batch_size, L.out_image_width>>>(L.d_grad, L.out_image_width, L.out_channels, L.d_mask, prev_grad);
#ifdef TIMING
cudaEventRecord(m_stop, 0);
cudaEventSynchronize(m_stop);
cudaEventElapsedTime(&time, m_start, m_stop);
printf("Layer %d, Backprop Max Pool: %f ms\n", l, time);
#endif
}
else if(L.type == AVG_POOL) {
BackpropAvgPool<<<m_batch_size, L.out_image_width>>>(L.d_grad, L.out_image_width, L.out_channels, L.d_mask, prev_grad);
#ifdef TIMING
cudaEventRecord(m_stop, 0);
cudaEventSynchronize(m_stop);
cudaEventElapsedTime(&time, m_start, m_stop);
printf("Layer %d, Backprop Avg Pool: %f ms\n", l, time);
#endif
}
else if(L.type == NN_LINEAR || L.type == NN_RELU || L.type == NN_TANH || L.type == NN_ABS_TANH || L.type == NN_SIGMOID) {
int iter = ceil((REAL)L.out_size/NUM_THREADS);
// Weights
for(int i=0; i < iter; i++) {
int thread_offset = i*NUM_THREADS;
BackpropNN<<<m_batch_size, NUM_THREADS>>>(thread_offset,
L.d_grad,
L.d_in, L.in_size,
L.d_out_deriv, L.out_size,
L.d_weights, L.weight_rows, L.weight_cols,
L.d_sum_weight_grad, L.d_sum_bias_grad);
}
if(prev_grad) {
int iter = ceil((REAL)L.weight_cols/NUM_THREADS);
for(int i=0; i < iter; i++) {
int thread_offset = i*NUM_THREADS;
BackpropNN2<<<m_batch_size, NUM_THREADS>>>(thread_offset,
L.d_grad,
L.d_in, L.in_size,
L.d_out_deriv, L.out_size,
L.d_weights, L.weight_rows, L.weight_cols,
prev_grad);
}
}
#ifdef TIMING
cudaEventRecord(m_stop, 0);
cudaEventSynchronize(m_stop);
cudaEventElapsedTime(&time, m_start, m_stop);
printf("Layer %d, Backprop NN: %f ms\n", l, time);
#endif
}
else if(L.type == NN_SOFTMAX) {
assert(L.in_size < NUM_THREADS);
assert(L.d_out);
assert(d_labels);
assert(prev_grad);
Y_minus_target<<<m_batch_size, L.in_size>>>(L.d_out, d_labels, L.in_size, prev_grad);
#ifdef TIMING
cudaEventRecord(m_stop, 0);
cudaEventSynchronize(m_stop);
cudaEventElapsedTime(&time, m_start, m_stop);
printf("Layer %d, Backprop Softmax: %f ms\n", l, time);
#endif
}
else {
printf("Layer type not implemented yet\n");
assert(0);
}
}
// Standard backprop
for(size_t l=0; l < m_layers.size(); l++) {
Layer &L = m_layers[l];
if(L.weight_size == 0) {
continue;
}
#ifdef TIMING
cudaEventRecord(m_start, 0);
REAL time;
#endif
int blocks = ceil((REAL)L.weight_size/NUM_THREADS);
AvgGrads<<<blocks, NUM_THREADS>>>(L.d_sum_weight_grad, L.weight_size, m_batch_size, L.d_delta_weight);
blocks = ceil((REAL)L.bias_size/NUM_THREADS);
AvgGrads<<<blocks, NUM_THREADS>>>(L.d_sum_bias_grad, L.bias_size, m_batch_size, L.d_delta_bias);
// Weights
blocks = ceil((REAL)L.weight_size/NUM_THREADS);
UpdateWeights<<<blocks, NUM_THREADS>>>(L.d_momentum_delta_weight, L.d_delta_weight, L.weight_size, learning_rate, momentum_rate, L.d_weights);
// Bias
blocks = ceil((REAL)L.bias_size/NUM_THREADS);
UpdateWeights<<<blocks, NUM_THREADS>>>(L.d_momentum_delta_bias, L.d_delta_bias, L.bias_size, learning_rate, momentum_rate, L.d_biases);
#ifdef TIMING
cudaEventRecord(m_stop, 0);
cudaEventSynchronize(m_stop);
cudaEventElapsedTime(&time, m_start, m_stop);
printf("Layer %d, Weight update: %f ms\n", l, time);
#endif
}
}
void CudaConvNN::SaveWeights()
{
for(size_t i=0; i < m_layers.size(); i++) {
Layer &L = m_layers[i];
if(L.weight_size == 0) {
continue;
}
char file[128];
vector <REAL> weights;
// Weights
{
sprintf(file, "layer_%02d_weights.raw", (int)i);
FILE *fp = fopen(file, "w+");
assert(fp);
L.GetWeights(weights);
fwrite(&weights[0], weights.size()*sizeof(REAL), 1, fp);
fclose(fp);
}
// Momentum weights
{
sprintf(file, "layer_%02d_momentum_weights.raw", (int)i);
FILE *fp = fopen(file, "w+");
assert(fp);
L.GetMomentumWeights(weights);
fwrite(&weights[0], weights.size()*sizeof(REAL), 1, fp);
fclose(fp);
}
// Bias
{
sprintf(file, "layer_%02d_biases.raw", (int)i);
FILE *fp = fopen(file, "w+");
assert(fp);
L.GetBiases(weights);
fwrite(&weights[0], weights.size()*sizeof(REAL), 1, fp);
fclose(fp);
}
// Momentum Bias
{
sprintf(file, "layer_%02d_momentum_biases.raw", (int)i);
FILE *fp = fopen(file, "w+");
assert(fp);
L.GetMomentumBiases(weights);
fwrite(&weights[0], weights.size()*sizeof(REAL), 1, fp);
fclose(fp);
}
}
}
bool CudaConvNN::LoadWeights()
{
for(size_t i=0; i < m_layers.size(); i++) {
Layer &L = m_layers[i];
if(L.weight_size == 0) {
continue;
}
char file[128];
// Weights
{
sprintf(file, "layer_%02d_weights.raw", (int)i);
FILE *fp = fopen(file, "r");
if(fp == NULL) {
return false;
}
vector <REAL> weights(L.weight_size);
size_t n = fread(&weights[0], 1, weights.size()*sizeof(REAL), fp);
assert(n == weights.size()*sizeof(REAL));
L.SetWeights(&weights[0]);
fclose(fp);
}
// Mometum Weights
{
sprintf(file, "layer_%02d_momentum_weights.raw", (int)i);
FILE *fp = fopen(file, "r");
if(fp == NULL) {
return false;
}
vector <REAL> weights(L.weight_size);
size_t n = fread(&weights[0], 1, weights.size()*sizeof(REAL), fp);
assert(n == weights.size()*sizeof(REAL));
L.SetMomentumWeights(&weights[0]);
fclose(fp);
}
// Bias
{
sprintf(file, "layer_%02d_biases.raw", (int)i);
FILE *fp = fopen(file, "r");
if(fp == NULL) {
return false;
}
vector <REAL> weights(L.bias_size);
size_t n = fread(&weights[0], 1, weights.size()*sizeof(REAL), fp);
assert(n == weights.size()*sizeof(REAL));
L.SetBiases(&weights[0]);
fclose(fp);
}
// Momentum Bias
{
sprintf(file, "layer_%02d_momentum_biases.raw", (int)i);
FILE *fp = fopen(file, "r");
if(fp == NULL) {
return false;
}
vector <REAL> weights(L.bias_size);
size_t n = fread(&weights[0], 1, weights.size()*sizeof(REAL), fp);
assert(n == weights.size()*sizeof(REAL));
L.SetMomentumBiases(&weights[0]);
fclose(fp);
}
}
return true;
}
void CudaConvNN::SetImages(const REAL *data, size_t size)
{
assert(d_images);
assert(size);
CUDA_SAFE_CALL(cudaMemcpy(d_images, data, size, cudaMemcpyHostToDevice));
}
void CudaConvNN::SetLabels(const REAL *data, size_t size)
{
assert(d_labels);
assert(size);
CUDA_SAFE_CALL(cudaMemcpy(d_labels, data, size, cudaMemcpyHostToDevice));
}
void CudaConvNN::CheckValues()
{
for(size_t i=0; i < m_layers.size(); i++) {
m_layers[i].CheckValues();
}
}
} // namespace
|
79bb08f3d4efd743dbfdc90a506406171f1bd716.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Enda Carroll
// Sept 2019
// Function declarations for cuThomasConstantBatch routine to solve batches of tridiagonal systems
// Copyright 2019 Enda Carroll
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// ---------------------------------------------------------------------
// Standard Libraries and Headers
// ---------------------------------------------------------------------
// ---------------------------------------------------------------------
// User Libraries and Headers
// ---------------------------------------------------------------------
#include "cuThomasBatch.h"
/**
* Function to perform a prefactorization of the LHS using the Thomas algorithm (performed on host device)
*
* @param la Lower diagonal of the LHS matrix - array of lenght n
* @param lb Main diagonal of the LHS matrix - array of lenght n
* * @param lc Upper diagonal of the LHS matrix - array of lenght n
* @param n Size of the system being solved
*/
void thomasFactorConstantBatch(double* la, double* lb, double* lc, int n) {
int rowCurrent;
int rowPrevious;
rowCurrent = 0;
// First row
lb[rowCurrent] = lb[rowCurrent];
lc[rowCurrent] = lc[rowCurrent] / lb[rowCurrent];
for (int i = 1; i < n - 1; ++i) {
rowPrevious = rowCurrent;
rowCurrent += 1;
la[rowCurrent] = la[rowCurrent];
lb[rowCurrent] = lb[rowCurrent] - la[rowCurrent]*lc[rowPrevious];
lc[rowCurrent] = lc[rowCurrent] / lb[rowCurrent];
}
rowPrevious = rowCurrent;
rowCurrent += 1;
// Last row
la[rowCurrent] = la[rowCurrent];
lb[rowCurrent] = lb[rowCurrent] - la[rowCurrent]*lc[rowPrevious];
}
/**
* Kernel to solve a prefactorized system using the Thomas alogrithm
*
* @param la Lower diagonal of the LHS matrix - array of lenght n
* @param lb Main diagonal of the LHS matrix - array of lenght n
* @param lc Upper diagonal of the LHS matrix - array of lenght n
* @param d RHS array - size n by m
* @param n Size of the system being solved
* @param m Size of the batch
*/
__global__ void cuThomasConstantBatch(double* la, double* lb, double* lc, double* d, int n, int m ) {
int rowCurrent;
int rowPrevious;
int rowAhead;
// set the current row
rowCurrent = threadIdx.x + blockDim.x*blockIdx.x;
int i = 0;
if ( rowCurrent < m )
{
//----- Forward Sweep
d[rowCurrent] = d[rowCurrent] / lb[i];
#pragma unroll
for (i = 1; i < n; ++i) {
rowPrevious = rowCurrent;
rowCurrent += m;
d[rowCurrent] = (d[rowCurrent] - la[i]*d[rowPrevious]) / (lb[i]);
}
//----- Back Sub
d[rowCurrent] = d[rowCurrent];
#pragma unroll
for (i = n - 2; i >= 0; --i) {
rowAhead = rowCurrent;
rowCurrent -= m;
d[rowCurrent] = d[rowCurrent] - lc[i] * d[rowAhead];
}
}
}
| 79bb08f3d4efd743dbfdc90a506406171f1bd716.cu | // Enda Carroll
// Sept 2019
// Function declarations for cuThomasConstantBatch routine to solve batches of tridiagonal systems
// Copyright 2019 Enda Carroll
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// ---------------------------------------------------------------------
// Standard Libraries and Headers
// ---------------------------------------------------------------------
// ---------------------------------------------------------------------
// User Libraries and Headers
// ---------------------------------------------------------------------
#include "cuThomasBatch.h"
/**
* Function to perform a prefactorization of the LHS using the Thomas algorithm (performed on host device)
*
* @param la Lower diagonal of the LHS matrix - array of lenght n
* @param lb Main diagonal of the LHS matrix - array of lenght n
* * @param lc Upper diagonal of the LHS matrix - array of lenght n
* @param n Size of the system being solved
*/
void thomasFactorConstantBatch(double* la, double* lb, double* lc, int n) {
int rowCurrent;
int rowPrevious;
rowCurrent = 0;
// First row
lb[rowCurrent] = lb[rowCurrent];
lc[rowCurrent] = lc[rowCurrent] / lb[rowCurrent];
for (int i = 1; i < n - 1; ++i) {
rowPrevious = rowCurrent;
rowCurrent += 1;
la[rowCurrent] = la[rowCurrent];
lb[rowCurrent] = lb[rowCurrent] - la[rowCurrent]*lc[rowPrevious];
lc[rowCurrent] = lc[rowCurrent] / lb[rowCurrent];
}
rowPrevious = rowCurrent;
rowCurrent += 1;
// Last row
la[rowCurrent] = la[rowCurrent];
lb[rowCurrent] = lb[rowCurrent] - la[rowCurrent]*lc[rowPrevious];
}
/**
* Kernel to solve a prefactorized system using the Thomas alogrithm
*
* @param la Lower diagonal of the LHS matrix - array of lenght n
* @param lb Main diagonal of the LHS matrix - array of lenght n
* @param lc Upper diagonal of the LHS matrix - array of lenght n
* @param d RHS array - size n by m
* @param n Size of the system being solved
* @param m Size of the batch
*/
__global__ void cuThomasConstantBatch(double* la, double* lb, double* lc, double* d, int n, int m ) {
int rowCurrent;
int rowPrevious;
int rowAhead;
// set the current row
rowCurrent = threadIdx.x + blockDim.x*blockIdx.x;
int i = 0;
if ( rowCurrent < m )
{
//----- Forward Sweep
d[rowCurrent] = d[rowCurrent] / lb[i];
#pragma unroll
for (i = 1; i < n; ++i) {
rowPrevious = rowCurrent;
rowCurrent += m;
d[rowCurrent] = (d[rowCurrent] - la[i]*d[rowPrevious]) / (lb[i]);
}
//----- Back Sub
d[rowCurrent] = d[rowCurrent];
#pragma unroll
for (i = n - 2; i >= 0; --i) {
rowAhead = rowCurrent;
rowCurrent -= m;
d[rowCurrent] = d[rowCurrent] - lc[i] * d[rowAhead];
}
}
}
|
b06abe423524367b566d4dd1f02ce05e7145885e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@author Mark Gates
@generated from zlaset.cu normal z -> c, Tue Sep 2 12:38:16 2014
*/
#include "common_magma.h"
#define BLK_X 64
#define BLK_Y 32
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to clacpy, clag2z, clag2z, cgeadd.
*/
__global__
void claset_full(
int m, int n,
magmaFloatComplex offdiag, magmaFloatComplex diag,
magmaFloatComplex *A, int lda )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (below diag || above diag || offdiag == diag) */
bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y || ind + BLK_X <= iby || offdiag == diag));
/* do only rows inside matrix */
if ( ind < m ) {
A += ind + iby*lda;
if ( full ) {
// full block-column, off-diagonal block or offdiag == diag
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
A[j*lda] = offdiag;
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( iby+j == ind )
A[j*lda] = diag;
else
A[j*lda] = offdiag;
}
}
}
}
/*
Similar to claset_full, but updates only the diagonal and below.
Blocks that are fully above the diagonal exit immediately.
Code similar to clacpy, zlat2c, clat2z.
*/
__global__
void claset_lower(
int m, int n,
magmaFloatComplex offdiag, magmaFloatComplex diag,
magmaFloatComplex *A, int lda )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (below diag) */
bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y));
/* do only rows inside matrix, and blocks not above diag */
if ( ind < m && ind + BLK_X > iby ) {
A += ind + iby*lda;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
A[j*lda] = offdiag;
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( iby+j == ind )
A[j*lda] = diag;
else if ( ind > iby+j )
A[j*lda] = offdiag;
}
}
}
}
/*
Similar to claset_full, but updates only the diagonal and above.
Blocks that are fully below the diagonal exit immediately.
Code similar to clacpy, zlat2c, clat2z.
*/
__global__
void claset_upper(
int m, int n,
magmaFloatComplex offdiag, magmaFloatComplex diag,
magmaFloatComplex *A, int lda )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (above diag) */
bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby));
/* do only rows inside matrix, and blocks not below diag */
if ( ind < m && ind < iby + BLK_Y ) {
A += ind + iby*lda;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
A[j*lda] = offdiag;
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( iby+j == ind )
A[j*lda] = diag;
else if ( ind < iby+j )
A[j*lda] = offdiag;
}
}
}
}
/**
Purpose
-------
CLASET_STREAM initializes a 2-D array A to DIAG on the diagonal and
OFFDIAG on the off-diagonals.
This is the same as CLASET, but adds queue argument.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA to be set.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
Otherwise: All of the matrix dA is set.
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
offdiag COMPLEX
The scalar OFFDIAG. (In LAPACK this is called ALPHA.)
@param[in]
diag COMPLEX
The scalar DIAG. (In LAPACK this is called BETA.)
@param[in]
dA COMPLEX array, dimension (LDDA,N)
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
On exit, A(i,j) = OFFDIAG, 1 <= i <= m, 1 <= j <= n, i != j;
A(i,i) = DIAG, 1 <= i <= min(m,n)
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_caux2
********************************************************************/
extern "C"
void magmablas_claset_q(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magmaFloatComplex offdiag, magmaFloatComplex diag,
magmaFloatComplex *dA, magma_int_t ldda,
magma_queue_t queue)
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m) )
info = -7;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( m == 0 || n == 0 ) {
return;
}
dim3 threads( BLK_X, 1 );
dim3 grid( (m-1)/BLK_X + 1, (n-1)/BLK_Y + 1 );
if (uplo == MagmaLower)
hipLaunchKernelGGL(( claset_lower), dim3(grid), dim3(threads), 0, queue , m, n, offdiag, diag, dA, ldda);
else if (uplo == MagmaUpper)
hipLaunchKernelGGL(( claset_upper), dim3(grid), dim3(threads), 0, queue , m, n, offdiag, diag, dA, ldda);
else
hipLaunchKernelGGL(( claset_full) , dim3(grid), dim3(threads), 0, queue , m, n, offdiag, diag, dA, ldda);
}
/**
@see magmablas_claset_q
@ingroup magma_caux2
********************************************************************/
extern "C"
void magmablas_claset(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magmaFloatComplex offdiag, magmaFloatComplex diag,
magmaFloatComplex *dA, magma_int_t ldda )
{
magmablas_claset_q( uplo, m, n, offdiag, diag, dA, ldda, magma_stream );
}
| b06abe423524367b566d4dd1f02ce05e7145885e.cu | /*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@author Mark Gates
@generated from zlaset.cu normal z -> c, Tue Sep 2 12:38:16 2014
*/
#include "common_magma.h"
#define BLK_X 64
#define BLK_Y 32
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to clacpy, clag2z, clag2z, cgeadd.
*/
__global__
void claset_full(
int m, int n,
magmaFloatComplex offdiag, magmaFloatComplex diag,
magmaFloatComplex *A, int lda )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (below diag || above diag || offdiag == diag) */
bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y || ind + BLK_X <= iby || offdiag == diag));
/* do only rows inside matrix */
if ( ind < m ) {
A += ind + iby*lda;
if ( full ) {
// full block-column, off-diagonal block or offdiag == diag
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
A[j*lda] = offdiag;
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( iby+j == ind )
A[j*lda] = diag;
else
A[j*lda] = offdiag;
}
}
}
}
/*
Similar to claset_full, but updates only the diagonal and below.
Blocks that are fully above the diagonal exit immediately.
Code similar to clacpy, zlat2c, clat2z.
*/
__global__
void claset_lower(
int m, int n,
magmaFloatComplex offdiag, magmaFloatComplex diag,
magmaFloatComplex *A, int lda )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (below diag) */
bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y));
/* do only rows inside matrix, and blocks not above diag */
if ( ind < m && ind + BLK_X > iby ) {
A += ind + iby*lda;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
A[j*lda] = offdiag;
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( iby+j == ind )
A[j*lda] = diag;
else if ( ind > iby+j )
A[j*lda] = offdiag;
}
}
}
}
/*
Similar to claset_full, but updates only the diagonal and above.
Blocks that are fully below the diagonal exit immediately.
Code similar to clacpy, zlat2c, clat2z.
*/
__global__
void claset_upper(
int m, int n,
magmaFloatComplex offdiag, magmaFloatComplex diag,
magmaFloatComplex *A, int lda )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (above diag) */
bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby));
/* do only rows inside matrix, and blocks not below diag */
if ( ind < m && ind < iby + BLK_Y ) {
A += ind + iby*lda;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
A[j*lda] = offdiag;
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( iby+j == ind )
A[j*lda] = diag;
else if ( ind < iby+j )
A[j*lda] = offdiag;
}
}
}
}
/**
Purpose
-------
CLASET_STREAM initializes a 2-D array A to DIAG on the diagonal and
OFFDIAG on the off-diagonals.
This is the same as CLASET, but adds queue argument.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA to be set.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
Otherwise: All of the matrix dA is set.
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
offdiag COMPLEX
The scalar OFFDIAG. (In LAPACK this is called ALPHA.)
@param[in]
diag COMPLEX
The scalar DIAG. (In LAPACK this is called BETA.)
@param[in]
dA COMPLEX array, dimension (LDDA,N)
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
On exit, A(i,j) = OFFDIAG, 1 <= i <= m, 1 <= j <= n, i != j;
A(i,i) = DIAG, 1 <= i <= min(m,n)
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_caux2
********************************************************************/
extern "C"
void magmablas_claset_q(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magmaFloatComplex offdiag, magmaFloatComplex diag,
magmaFloatComplex *dA, magma_int_t ldda,
magma_queue_t queue)
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m) )
info = -7;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( m == 0 || n == 0 ) {
return;
}
dim3 threads( BLK_X, 1 );
dim3 grid( (m-1)/BLK_X + 1, (n-1)/BLK_Y + 1 );
if (uplo == MagmaLower)
claset_lower<<< grid, threads, 0, queue >>> (m, n, offdiag, diag, dA, ldda);
else if (uplo == MagmaUpper)
claset_upper<<< grid, threads, 0, queue >>> (m, n, offdiag, diag, dA, ldda);
else
claset_full <<< grid, threads, 0, queue >>> (m, n, offdiag, diag, dA, ldda);
}
/**
@see magmablas_claset_q
@ingroup magma_caux2
********************************************************************/
extern "C"
void magmablas_claset(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magmaFloatComplex offdiag, magmaFloatComplex diag,
magmaFloatComplex *dA, magma_int_t ldda )
{
magmablas_claset_q( uplo, m, n, offdiag, diag, dA, ldda, magma_stream );
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.