hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
cd4e77bc74e7bd32832047eed651a148c25f5a97.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) OpenMMLab. All rights reserved.
#include <stdio.h>
#include <stdlib.h>
#include "pytorch_cuda_helper.hpp"
#include "voxelization_cuda_kernel.cuh"
int HardVoxelizeForwardCUDAKernelLauncher(
const at::Tensor &points, at::Tensor &voxels, at::Tensor &coors,
at::Tensor &num_points_per_voxel, const std::vector<float> voxel_size,
const std::vector<float> coors_range, const int max_points,
const int max_voxels, const int NDim = 3) {
// current version tooks about 0.04s for one frame on cpu
// check device
at::hip::HIPGuardMasqueradingAsCUDA device_guard(points.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
const int num_points = points.size(0);
const int num_features = points.size(1);
const float voxel_x = voxel_size[0];
const float voxel_y = voxel_size[1];
const float voxel_z = voxel_size[2];
const float coors_x_min = coors_range[0];
const float coors_y_min = coors_range[1];
const float coors_z_min = coors_range[2];
const float coors_x_max = coors_range[3];
const float coors_y_max = coors_range[4];
const float coors_z_max = coors_range[5];
const int grid_x = round((coors_x_max - coors_x_min) / voxel_x);
const int grid_y = round((coors_y_max - coors_y_min) / voxel_y);
const int grid_z = round((coors_z_max - coors_z_min) / voxel_z);
// map points to voxel coors
at::Tensor temp_coors =
at::zeros({num_points, NDim}, points.options().dtype(at::kInt));
dim3 grid(::min(at::cuda::ATenCeilDiv(num_points, 512), 4096));
dim3 block(512);
// 1. link point to corresponding voxel coors
AT_DISPATCH_ALL_TYPES(
points.scalar_type(), "hard_voxelize_kernel", ([&] {
hipLaunchKernelGGL(( dynamic_voxelize_kernel<scalar_t, int>), dim3(grid), dim3(block), 0, stream,
points.contiguous().data_ptr<scalar_t>(),
temp_coors.contiguous().data_ptr<int>(), voxel_x, voxel_y, voxel_z,
coors_x_min, coors_y_min, coors_z_min, coors_x_max, coors_y_max,
coors_z_max, grid_x, grid_y, grid_z, num_points, num_features,
NDim);
}));
AT_CUDA_CHECK(hipGetLastError());
// 2. map point to the idx of the corresponding voxel, find duplicate coor
// create some temporary variables
auto point_to_pointidx = -at::ones(
{
num_points,
},
points.options().dtype(at::kInt));
auto point_to_voxelidx = -at::ones(
{
num_points,
},
points.options().dtype(at::kInt));
dim3 map_grid(::min(at::cuda::ATenCeilDiv(num_points, 512), 4096));
dim3 map_block(512);
AT_DISPATCH_ALL_TYPES(
temp_coors.scalar_type(), "determin_duplicate", ([&] {
hipLaunchKernelGGL(( point_to_voxelidx_kernel<int>), dim3(map_grid), dim3(map_block), 0, stream,
temp_coors.contiguous().data_ptr<int>(),
point_to_voxelidx.contiguous().data_ptr<int>(),
point_to_pointidx.contiguous().data_ptr<int>(), max_points,
max_voxels, num_points, NDim);
}));
AT_CUDA_CHECK(hipGetLastError());
// 3. determine voxel num and voxel's coor index
// make the logic in the CUDA device could accelerate about 10 times
auto coor_to_voxelidx = -at::ones(
{
num_points,
},
points.options().dtype(at::kInt));
auto voxel_num = at::zeros(
{
1,
},
points.options().dtype(at::kInt)); // must be zero from the beginning
AT_DISPATCH_ALL_TYPES(temp_coors.scalar_type(), "determin_duplicate", ([&] {
hipLaunchKernelGGL(( determin_voxel_num<int>), dim3(1), dim3(1), 0, stream,
num_points_per_voxel.contiguous().data_ptr<int>(),
point_to_voxelidx.contiguous().data_ptr<int>(),
point_to_pointidx.contiguous().data_ptr<int>(),
coor_to_voxelidx.contiguous().data_ptr<int>(),
voxel_num.contiguous().data_ptr<int>(),
max_points, max_voxels, num_points);
}));
AT_CUDA_CHECK(hipGetLastError());
// 4. copy point features to voxels
// Step 4 & 5 could be parallel
auto pts_output_size = num_points * num_features;
dim3 cp_grid(::min(at::cuda::ATenCeilDiv(pts_output_size, 512), 4096));
dim3 cp_block(512);
AT_DISPATCH_ALL_TYPES(
points.scalar_type(), "assign_point_to_voxel", ([&] {
hipLaunchKernelGGL(( assign_point_to_voxel<float, int>), dim3(cp_grid), dim3(cp_block), 0, stream,
pts_output_size, points.contiguous().data_ptr<float>(),
point_to_voxelidx.contiguous().data_ptr<int>(),
coor_to_voxelidx.contiguous().data_ptr<int>(),
voxels.contiguous().data_ptr<float>(), max_points, num_features,
num_points, NDim);
}));
// hipDeviceSynchronize();
// AT_CUDA_CHECK(hipGetLastError());
// 5. copy coors of each voxels
auto coors_output_size = num_points * NDim;
dim3 coors_cp_grid(
::min(at::cuda::ATenCeilDiv(coors_output_size, 512), 4096));
dim3 coors_cp_block(512);
AT_DISPATCH_ALL_TYPES(
points.scalar_type(), "assign_point_to_voxel", ([&] {
hipLaunchKernelGGL(( assign_voxel_coors<float, int>)
, dim3(coors_cp_grid), dim3(coors_cp_block), 0, stream,
coors_output_size, temp_coors.contiguous().data_ptr<int>(),
point_to_voxelidx.contiguous().data_ptr<int>(),
coor_to_voxelidx.contiguous().data_ptr<int>(),
coors.contiguous().data_ptr<int>(), num_points, NDim);
}));
AT_CUDA_CHECK(hipGetLastError());
auto voxel_num_cpu = voxel_num.to(at::kCPU);
int voxel_num_int = voxel_num_cpu.data_ptr<int>()[0];
return voxel_num_int;
}
void DynamicVoxelizeForwardCUDAKernelLauncher(
const at::Tensor &points, at::Tensor &coors,
const std::vector<float> voxel_size, const std::vector<float> coors_range,
const int NDim = 3) {
// current version tooks about 0.04s for one frame on cpu
// check device
at::hip::HIPGuardMasqueradingAsCUDA device_guard(points.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
const int num_points = points.size(0);
const int num_features = points.size(1);
const float voxel_x = voxel_size[0];
const float voxel_y = voxel_size[1];
const float voxel_z = voxel_size[2];
const float coors_x_min = coors_range[0];
const float coors_y_min = coors_range[1];
const float coors_z_min = coors_range[2];
const float coors_x_max = coors_range[3];
const float coors_y_max = coors_range[4];
const float coors_z_max = coors_range[5];
const int grid_x = round((coors_x_max - coors_x_min) / voxel_x);
const int grid_y = round((coors_y_max - coors_y_min) / voxel_y);
const int grid_z = round((coors_z_max - coors_z_min) / voxel_z);
const int col_blocks = at::cuda::ATenCeilDiv(num_points, THREADS_PER_BLOCK);
dim3 blocks(col_blocks);
dim3 threads(THREADS_PER_BLOCK);
AT_DISPATCH_ALL_TYPES(points.scalar_type(), "dynamic_voxelize_kernel", [&] {
hipLaunchKernelGGL(( dynamic_voxelize_kernel<scalar_t, int>), dim3(blocks), dim3(threads), 0, stream,
points.contiguous().data_ptr<scalar_t>(),
coors.contiguous().data_ptr<int>(), voxel_x, voxel_y, voxel_z,
coors_x_min, coors_y_min, coors_z_min, coors_x_max, coors_y_max,
coors_z_max, grid_x, grid_y, grid_z, num_points, num_features, NDim);
});
AT_CUDA_CHECK(hipGetLastError());
}
| cd4e77bc74e7bd32832047eed651a148c25f5a97.cu | // Copyright (c) OpenMMLab. All rights reserved.
#include <stdio.h>
#include <stdlib.h>
#include "pytorch_cuda_helper.hpp"
#include "voxelization_cuda_kernel.cuh"
int HardVoxelizeForwardCUDAKernelLauncher(
const at::Tensor &points, at::Tensor &voxels, at::Tensor &coors,
at::Tensor &num_points_per_voxel, const std::vector<float> voxel_size,
const std::vector<float> coors_range, const int max_points,
const int max_voxels, const int NDim = 3) {
// current version tooks about 0.04s for one frame on cpu
// check device
at::cuda::CUDAGuard device_guard(points.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
const int num_points = points.size(0);
const int num_features = points.size(1);
const float voxel_x = voxel_size[0];
const float voxel_y = voxel_size[1];
const float voxel_z = voxel_size[2];
const float coors_x_min = coors_range[0];
const float coors_y_min = coors_range[1];
const float coors_z_min = coors_range[2];
const float coors_x_max = coors_range[3];
const float coors_y_max = coors_range[4];
const float coors_z_max = coors_range[5];
const int grid_x = round((coors_x_max - coors_x_min) / voxel_x);
const int grid_y = round((coors_y_max - coors_y_min) / voxel_y);
const int grid_z = round((coors_z_max - coors_z_min) / voxel_z);
// map points to voxel coors
at::Tensor temp_coors =
at::zeros({num_points, NDim}, points.options().dtype(at::kInt));
dim3 grid(std::min(at::cuda::ATenCeilDiv(num_points, 512), 4096));
dim3 block(512);
// 1. link point to corresponding voxel coors
AT_DISPATCH_ALL_TYPES(
points.scalar_type(), "hard_voxelize_kernel", ([&] {
dynamic_voxelize_kernel<scalar_t, int><<<grid, block, 0, stream>>>(
points.contiguous().data_ptr<scalar_t>(),
temp_coors.contiguous().data_ptr<int>(), voxel_x, voxel_y, voxel_z,
coors_x_min, coors_y_min, coors_z_min, coors_x_max, coors_y_max,
coors_z_max, grid_x, grid_y, grid_z, num_points, num_features,
NDim);
}));
AT_CUDA_CHECK(cudaGetLastError());
// 2. map point to the idx of the corresponding voxel, find duplicate coor
// create some temporary variables
auto point_to_pointidx = -at::ones(
{
num_points,
},
points.options().dtype(at::kInt));
auto point_to_voxelidx = -at::ones(
{
num_points,
},
points.options().dtype(at::kInt));
dim3 map_grid(std::min(at::cuda::ATenCeilDiv(num_points, 512), 4096));
dim3 map_block(512);
AT_DISPATCH_ALL_TYPES(
temp_coors.scalar_type(), "determin_duplicate", ([&] {
point_to_voxelidx_kernel<int><<<map_grid, map_block, 0, stream>>>(
temp_coors.contiguous().data_ptr<int>(),
point_to_voxelidx.contiguous().data_ptr<int>(),
point_to_pointidx.contiguous().data_ptr<int>(), max_points,
max_voxels, num_points, NDim);
}));
AT_CUDA_CHECK(cudaGetLastError());
// 3. determine voxel num and voxel's coor index
// make the logic in the CUDA device could accelerate about 10 times
auto coor_to_voxelidx = -at::ones(
{
num_points,
},
points.options().dtype(at::kInt));
auto voxel_num = at::zeros(
{
1,
},
points.options().dtype(at::kInt)); // must be zero from the beginning
AT_DISPATCH_ALL_TYPES(temp_coors.scalar_type(), "determin_duplicate", ([&] {
determin_voxel_num<int><<<1, 1, 0, stream>>>(
num_points_per_voxel.contiguous().data_ptr<int>(),
point_to_voxelidx.contiguous().data_ptr<int>(),
point_to_pointidx.contiguous().data_ptr<int>(),
coor_to_voxelidx.contiguous().data_ptr<int>(),
voxel_num.contiguous().data_ptr<int>(),
max_points, max_voxels, num_points);
}));
AT_CUDA_CHECK(cudaGetLastError());
// 4. copy point features to voxels
// Step 4 & 5 could be parallel
auto pts_output_size = num_points * num_features;
dim3 cp_grid(std::min(at::cuda::ATenCeilDiv(pts_output_size, 512), 4096));
dim3 cp_block(512);
AT_DISPATCH_ALL_TYPES(
points.scalar_type(), "assign_point_to_voxel", ([&] {
assign_point_to_voxel<float, int><<<cp_grid, cp_block, 0, stream>>>(
pts_output_size, points.contiguous().data_ptr<float>(),
point_to_voxelidx.contiguous().data_ptr<int>(),
coor_to_voxelidx.contiguous().data_ptr<int>(),
voxels.contiguous().data_ptr<float>(), max_points, num_features,
num_points, NDim);
}));
// cudaDeviceSynchronize();
// AT_CUDA_CHECK(cudaGetLastError());
// 5. copy coors of each voxels
auto coors_output_size = num_points * NDim;
dim3 coors_cp_grid(
std::min(at::cuda::ATenCeilDiv(coors_output_size, 512), 4096));
dim3 coors_cp_block(512);
AT_DISPATCH_ALL_TYPES(
points.scalar_type(), "assign_point_to_voxel", ([&] {
assign_voxel_coors<float, int>
<<<coors_cp_grid, coors_cp_block, 0, stream>>>(
coors_output_size, temp_coors.contiguous().data_ptr<int>(),
point_to_voxelidx.contiguous().data_ptr<int>(),
coor_to_voxelidx.contiguous().data_ptr<int>(),
coors.contiguous().data_ptr<int>(), num_points, NDim);
}));
AT_CUDA_CHECK(cudaGetLastError());
auto voxel_num_cpu = voxel_num.to(at::kCPU);
int voxel_num_int = voxel_num_cpu.data_ptr<int>()[0];
return voxel_num_int;
}
void DynamicVoxelizeForwardCUDAKernelLauncher(
const at::Tensor &points, at::Tensor &coors,
const std::vector<float> voxel_size, const std::vector<float> coors_range,
const int NDim = 3) {
// current version tooks about 0.04s for one frame on cpu
// check device
at::cuda::CUDAGuard device_guard(points.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
const int num_points = points.size(0);
const int num_features = points.size(1);
const float voxel_x = voxel_size[0];
const float voxel_y = voxel_size[1];
const float voxel_z = voxel_size[2];
const float coors_x_min = coors_range[0];
const float coors_y_min = coors_range[1];
const float coors_z_min = coors_range[2];
const float coors_x_max = coors_range[3];
const float coors_y_max = coors_range[4];
const float coors_z_max = coors_range[5];
const int grid_x = round((coors_x_max - coors_x_min) / voxel_x);
const int grid_y = round((coors_y_max - coors_y_min) / voxel_y);
const int grid_z = round((coors_z_max - coors_z_min) / voxel_z);
const int col_blocks = at::cuda::ATenCeilDiv(num_points, THREADS_PER_BLOCK);
dim3 blocks(col_blocks);
dim3 threads(THREADS_PER_BLOCK);
AT_DISPATCH_ALL_TYPES(points.scalar_type(), "dynamic_voxelize_kernel", [&] {
dynamic_voxelize_kernel<scalar_t, int><<<blocks, threads, 0, stream>>>(
points.contiguous().data_ptr<scalar_t>(),
coors.contiguous().data_ptr<int>(), voxel_x, voxel_y, voxel_z,
coors_x_min, coors_y_min, coors_z_min, coors_x_max, coors_y_max,
coors_z_max, grid_x, grid_y, grid_z, num_points, num_features, NDim);
});
AT_CUDA_CHECK(cudaGetLastError());
}
|
fea11d2ec403c8e71a513978d2eb3e0813d3737c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
* Copyright (c) 2019 Konduit K.K.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected])
//
#include <ops/declarable/helpers/convolutions.h>
#include <helpers/PointersManager.h>
#include <math/templatemath.h>
namespace sd {
namespace ops {
//////////////////////////////////////////////////////////////////////////
template <typename T>
__global__ static void pooling2dBPCuda(const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int poolingMode, const int extraParam0) {
// x: input [bS, iC, iH, iW]
// y: gradO [bS, iC, oH, oW]
// z: gradI [bS, iC, iH, iW] -> gradI is output in this function
const T* x = reinterpret_cast<const T*>(vx);
const T* y = reinterpret_cast<const T*>(vy);
T* z = reinterpret_cast<T*>(vz);
Nd4jLong coord2, coord3;
__shared__ int rank, kHeff, kWeff, iH, iW, kProd;
__shared__ Nd4jLong yLen, *sharedMem;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
yLen = shape::length(yShapeInfo);
rank = 4;
kHeff = kH + (kH - 1) * (dH - 1);
kWeff = kW + (kW - 1) * (dW - 1);
iH = xShapeInfo[3];
iW = xShapeInfo[4];
kProd = kH * kW;
}
__syncthreads();
const auto yInd = threadIdx.x + blockIdx.x * blockDim.x;
if(yInd >= yLen)
return;
auto coords = sharedMem + threadIdx.x * rank;
shape::index2coords(yInd, yShapeInfo, coords);
const auto yOffset = shape::getOffset(yShapeInfo, coords);
int hstart = coords[2] * sH - pH;
int wstart = coords[3] * sW - pW;
int hend = hstart + kHeff;
int wend = wstart + kWeff;
if(hstart < 0)
hstart += dH * ((-hstart + dH - 1) / dH);
if(wstart < 0)
wstart += dW * ((-wstart + dW - 1) / dW);
if(hend > iH)
hend -= dH * ((hend - iH + dH - 1) / dH);
if(wend > iW)
wend -= dW * ((wend - iW + dW - 1) / dW);
switch (poolingMode) {
/*** max ***/
case 0: {
coord2 = hstart;
coord3 = wstart;
T max = -DataTypeUtils::max<T>();
for (coords[2] = hstart; coords[2] < hend; coords[2] += dH) {
for (coords[3] = wstart; coords[3] < wend; coords[3] += dW){
T val = x[shape::getOffset(xShapeInfo, coords)];
if (val > max) {
max = val;
coord2 = coords[2];
coord3 = coords[3];
}
}
}
coords[2] = coord2;
coords[3] = coord3;
auto zOffset = shape::getOffset(zShapeInfo, coords);
sd::math::atomics::nd4j_atomicAdd<T>(&z[zOffset], y[yOffset]);
//z[zOffset] += y[yOffset];
}
break;
/*** avg ***/
case 1: {
T val = y[yOffset];
if (extraParam0 == 0) //Exclude padding
val /= sd::math::nd4j_ceil<double,T>(static_cast<double>(hend - hstart) / static_cast<double>(dH)) * sd::math::nd4j_ceil<double,T>(static_cast<double>(wend - wstart) / static_cast<double>(dW)); //Accounts for dilation
else if (extraParam0 == 1) //Include padding
val /= kProd;
for (coords[2] = hstart; coords[2] < hend; coords[2] += dH)
for (coords[3] = wstart; coords[3] < wend; coords[3] += dW)
sd::math::atomics::nd4j_atomicAdd<T>(&z[shape::getOffset(zShapeInfo, coords)], val);
}
break;
/*** pnorm ***/
case 2: {
T sum = static_cast<T>(0.);
T val = y[yOffset];
for (coords[2] = hstart; coords[2] < hend; coords[2] += dH)
for (coords[3] = wstart; coords[3] < wend; coords[3] += dW)
sum += sd::math::nd4j_pow<T,T,T>(sd::math::nd4j_abs<T>(x[shape::getOffset(xShapeInfo, coords)]), extraParam0);
val *= sd::math::nd4j_pow<T,T,T>(sum, ((T)1.f - extraParam0) / extraParam0);
for (coords[2] = hstart; coords[2] < hend; coords[2] += dH) {
for (coords[3] = wstart; coords[3] < wend; coords[3] += dW) {
const auto xOffset = shape::getOffset(xShapeInfo, coords);
const auto zOffset = shape::getOffset(zShapeInfo, coords);
sd::math::atomics::nd4j_atomicAdd<T>(&z[zOffset], val * sd::math::nd4j_pow<T,T,T>(sd::math::nd4j_abs<T>(x[xOffset]), extraParam0 - 1.f) * sd::math::nd4j_sgn<T,T>(x[xOffset]));
}
}
}
break;
}
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static void pooling2dBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo,
const void* vy, const Nd4jLong* yShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const int kH, const int kW,
const int sH, const int sW,
const int pH, const int pW,
const int dH, const int dW,
const int poolingMode, const int extraParam0) {
hipLaunchKernelGGL(( pooling2dBPCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo, kH, kW, sH, sW, pH, pW, dH, dW, poolingMode, extraParam0);
}
//////////////////////////////////////////////////////////////////////////
void ConvolutionUtils::pooling2dBP(sd::graph::Context& block, const NDArray& input, const NDArray& gradO, NDArray& gradI, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int poolingMode, const int extraParam0) {
// initial zeroing of gradI
gradI.nullify();
PointersManager manager(block.launchContext(), "pooling2dBP");
const int threadsPerBlock = 256;
const int blocksPerGrid = (gradO.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = gradO.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128;
NDArray::prepareSpecialUse({&gradI}, {&input, &gradO});
BUILD_SINGLE_SELECTOR(input.dataType(), pooling2dBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), gradO.specialBuffer(), gradO.specialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), kH, kW, sH, sW, pH, pW, dH, dW, poolingMode, extraParam0), FLOAT_TYPES);
NDArray::registerSpecialUse({&gradI}, {&input, &gradO});
manager.synchronize();
}
}
} | fea11d2ec403c8e71a513978d2eb3e0813d3737c.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
* Copyright (c) 2019 Konduit K.K.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected])
//
#include <ops/declarable/helpers/convolutions.h>
#include <helpers/PointersManager.h>
#include <math/templatemath.h>
namespace sd {
namespace ops {
//////////////////////////////////////////////////////////////////////////
template <typename T>
__global__ static void pooling2dBPCuda(const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int poolingMode, const int extraParam0) {
// x: input [bS, iC, iH, iW]
// y: gradO [bS, iC, oH, oW]
// z: gradI [bS, iC, iH, iW] -> gradI is output in this function
const T* x = reinterpret_cast<const T*>(vx);
const T* y = reinterpret_cast<const T*>(vy);
T* z = reinterpret_cast<T*>(vz);
Nd4jLong coord2, coord3;
__shared__ int rank, kHeff, kWeff, iH, iW, kProd;
__shared__ Nd4jLong yLen, *sharedMem;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
yLen = shape::length(yShapeInfo);
rank = 4;
kHeff = kH + (kH - 1) * (dH - 1);
kWeff = kW + (kW - 1) * (dW - 1);
iH = xShapeInfo[3];
iW = xShapeInfo[4];
kProd = kH * kW;
}
__syncthreads();
const auto yInd = threadIdx.x + blockIdx.x * blockDim.x;
if(yInd >= yLen)
return;
auto coords = sharedMem + threadIdx.x * rank;
shape::index2coords(yInd, yShapeInfo, coords);
const auto yOffset = shape::getOffset(yShapeInfo, coords);
int hstart = coords[2] * sH - pH;
int wstart = coords[3] * sW - pW;
int hend = hstart + kHeff;
int wend = wstart + kWeff;
if(hstart < 0)
hstart += dH * ((-hstart + dH - 1) / dH);
if(wstart < 0)
wstart += dW * ((-wstart + dW - 1) / dW);
if(hend > iH)
hend -= dH * ((hend - iH + dH - 1) / dH);
if(wend > iW)
wend -= dW * ((wend - iW + dW - 1) / dW);
switch (poolingMode) {
/*** max ***/
case 0: {
coord2 = hstart;
coord3 = wstart;
T max = -DataTypeUtils::max<T>();
for (coords[2] = hstart; coords[2] < hend; coords[2] += dH) {
for (coords[3] = wstart; coords[3] < wend; coords[3] += dW){
T val = x[shape::getOffset(xShapeInfo, coords)];
if (val > max) {
max = val;
coord2 = coords[2];
coord3 = coords[3];
}
}
}
coords[2] = coord2;
coords[3] = coord3;
auto zOffset = shape::getOffset(zShapeInfo, coords);
sd::math::atomics::nd4j_atomicAdd<T>(&z[zOffset], y[yOffset]);
//z[zOffset] += y[yOffset];
}
break;
/*** avg ***/
case 1: {
T val = y[yOffset];
if (extraParam0 == 0) //Exclude padding
val /= sd::math::nd4j_ceil<double,T>(static_cast<double>(hend - hstart) / static_cast<double>(dH)) * sd::math::nd4j_ceil<double,T>(static_cast<double>(wend - wstart) / static_cast<double>(dW)); //Accounts for dilation
else if (extraParam0 == 1) //Include padding
val /= kProd;
for (coords[2] = hstart; coords[2] < hend; coords[2] += dH)
for (coords[3] = wstart; coords[3] < wend; coords[3] += dW)
sd::math::atomics::nd4j_atomicAdd<T>(&z[shape::getOffset(zShapeInfo, coords)], val);
}
break;
/*** pnorm ***/
case 2: {
T sum = static_cast<T>(0.);
T val = y[yOffset];
for (coords[2] = hstart; coords[2] < hend; coords[2] += dH)
for (coords[3] = wstart; coords[3] < wend; coords[3] += dW)
sum += sd::math::nd4j_pow<T,T,T>(sd::math::nd4j_abs<T>(x[shape::getOffset(xShapeInfo, coords)]), extraParam0);
val *= sd::math::nd4j_pow<T,T,T>(sum, ((T)1.f - extraParam0) / extraParam0);
for (coords[2] = hstart; coords[2] < hend; coords[2] += dH) {
for (coords[3] = wstart; coords[3] < wend; coords[3] += dW) {
const auto xOffset = shape::getOffset(xShapeInfo, coords);
const auto zOffset = shape::getOffset(zShapeInfo, coords);
sd::math::atomics::nd4j_atomicAdd<T>(&z[zOffset], val * sd::math::nd4j_pow<T,T,T>(sd::math::nd4j_abs<T>(x[xOffset]), extraParam0 - 1.f) * sd::math::nd4j_sgn<T,T>(x[xOffset]));
}
}
}
break;
}
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static void pooling2dBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo,
const void* vy, const Nd4jLong* yShapeInfo,
void* vz, const Nd4jLong* zShapeInfo,
const int kH, const int kW,
const int sH, const int sW,
const int pH, const int pW,
const int dH, const int dW,
const int poolingMode, const int extraParam0) {
pooling2dBPCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo, kH, kW, sH, sW, pH, pW, dH, dW, poolingMode, extraParam0);
}
//////////////////////////////////////////////////////////////////////////
void ConvolutionUtils::pooling2dBP(sd::graph::Context& block, const NDArray& input, const NDArray& gradO, NDArray& gradI, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int poolingMode, const int extraParam0) {
// initial zeroing of gradI
gradI.nullify();
PointersManager manager(block.launchContext(), "pooling2dBP");
const int threadsPerBlock = 256;
const int blocksPerGrid = (gradO.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = gradO.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128;
NDArray::prepareSpecialUse({&gradI}, {&input, &gradO});
BUILD_SINGLE_SELECTOR(input.dataType(), pooling2dBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), gradO.specialBuffer(), gradO.specialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), kH, kW, sH, sW, pH, pW, dH, dW, poolingMode, extraParam0), FLOAT_TYPES);
NDArray::registerSpecialUse({&gradI}, {&input, &gradO});
manager.synchronize();
}
}
} |
8d0038a8bd15f822f21d67d0ae7e5a9fcf10f2b4.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "efficient.h"
namespace StreamCompaction {
namespace Efficient {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
__global__ void kernUpSweep(int n_pot, int* data, int d)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
int temp_d = 1 << (d + 1);
int k = index * temp_d;
if (k >= n_pot) {
return;
}
int power1 = 1 << (d + 1);
int power2 = 1 << d;
data[k + power1 - 1] += data[k + power2 - 1];
}
__global__ void kernDownSweep(int n_pot, int* data, int d)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
int temp_d = 1 << (d + 1);
int k = index * temp_d;
if (k >= n_pot) {
return;
}
int power1 = 1 << (d + 1);
int power2 = 1 << d;
int t = data[k + power2 - 1];
data[k + power2 - 1] = data[k + power1 - 1];
data[k + power1 - 1] += t;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
int *dev_data;
// Get power of two length
int logValue = ilog2ceil(n);
int n_pot = 1 << logValue;
// CUDA memory arrangement and error checking
hipMalloc((void**)&dev_data, n_pot * sizeof(int));
checkCUDAError("hipMalloc dev_data failed!");
hipMemset(dev_data, 0, n_pot * sizeof(int));
checkCUDAError("hipMemset dev_data failed!");
hipMemcpy(dev_data, idata, n * sizeof(int), hipMemcpyHostToDevice);
checkCUDAError("hipMemcpy from idata to dev_data failed!");
timer().startGpuTimer();
// Up-Sweep
for(int d = 0; d <= ilog2ceil(n) - 1; ++d)
{
dim3 blocksPerGrid((n_pot / pow(2, d + 1) + blockSize - 1) / blockSize);
hipLaunchKernelGGL(( kernUpSweep), dim3(blocksPerGrid), dim3(blockSize), 0, 0, n_pot, dev_data, d);
}
// Down-Sweep
hipMemset(dev_data + n_pot - 1, 0, sizeof(int));
for(int d = ilog2ceil(n) - 1; d >=0; --d)
{
dim3 blocksPerGrid((n_pot / pow(2, d + 1) + blockSize - 1) / blockSize);
hipLaunchKernelGGL(( kernDownSweep), dim3(blocksPerGrid), dim3(blockSize), 0, 0, n_pot, dev_data, d);
}
timer().endGpuTimer();
hipMemcpy(odata, dev_data, n * sizeof(int), hipMemcpyDeviceToHost);
checkCUDAError("hipMemcpy from dev_data to idata failed!");
hipFree(dev_data);
}
/**
* Performs stream compaction on idata, storing the result into odata.
* All zeroes are discarded.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to compact.
* @returns The number of elements remaining after compaction.
*/
int compact(int n, int *odata, const int *idata) {
int* dev_idata;
int* dev_boolData;
int* dev_indices;
int* dev_odata;
int logValue = ilog2ceil(n);
int n_pot = 1 << logValue;
hipMalloc((void**)&dev_idata, n_pot * sizeof(int));
checkCUDAError("hipMalloc dev_idata failed!");
hipMalloc((void**)&dev_boolData, n_pot * sizeof(int));
checkCUDAError("hipMalloc dev_boolData failed!");
hipMalloc((void**)&dev_indices, n_pot * sizeof(int));
checkCUDAError("hipMalloc dev_indices failed!");
hipMemset(dev_idata, 0, n_pot * sizeof(int));
checkCUDAError("hipMemset dev_idata failed!");
hipMemcpy(dev_idata, idata, n * sizeof(int), hipMemcpyHostToDevice);
checkCUDAError("hipMemcpy from idata to dev_idata failed!");
timer().startGpuTimer();
dim3 mapBlocksPerGrid((n_pot + blockSize - 1) / blockSize);
// Compute temporary array containing 1 and 0
hipLaunchKernelGGL(( Common::kernMapToBoolean), dim3(mapBlocksPerGrid), dim3(blockSize), 0, 0, n_pot, dev_boolData, dev_idata);
hipMemcpy(dev_indices, dev_boolData, n_pot * sizeof(int), hipMemcpyDeviceToDevice);
checkCUDAError("hipMemcpy from dev_boolData to dev_indices failed!");
// Run exclusive scan on mapped array
// Up-Sweep
for(int d = 0; d <= ilog2ceil(n) - 1; ++d)
{
dim3 blocksPerGrid((n_pot / pow(2, d + 1) + blockSize - 1) / blockSize);
hipLaunchKernelGGL(( kernUpSweep), dim3(blocksPerGrid), dim3(blockSize), 0, 0, n_pot, dev_indices, d);
}
// Down-Sweep
hipMemset(dev_indices + n_pot - 1, 0, sizeof(int));
for(int d = ilog2ceil(n) - 1; d >=0; --d)
{
dim3 blocksPerGrid((n_pot / pow(2, d + 1) + blockSize - 1) / blockSize);
hipLaunchKernelGGL(( kernDownSweep), dim3(blocksPerGrid), dim3(blockSize), 0, 0, n_pot, dev_indices, d);
}
// Scatter
int arrayCount = 0;
hipMemcpy(&arrayCount, dev_indices + n_pot - 1, sizeof(int), hipMemcpyDeviceToHost);
hipMalloc((void**)&dev_odata, arrayCount * sizeof(int));
checkCUDAError("hipMalloc dev_odata failed!");
hipLaunchKernelGGL(( Common::kernScatter), dim3(mapBlocksPerGrid), dim3(blockSize), 0, 0, n_pot, dev_odata, dev_idata, dev_boolData, dev_indices);
timer().endGpuTimer();
hipMemcpy(odata, dev_odata, arrayCount * sizeof(int), hipMemcpyDeviceToHost);
checkCUDAError("hipMemcpy from dev_odata to odata failed!");
hipFree(dev_idata);
hipFree(dev_boolData);
hipFree(dev_indices);
hipFree(dev_odata);
return arrayCount;
}
}
}
| 8d0038a8bd15f822f21d67d0ae7e5a9fcf10f2b4.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "efficient.h"
namespace StreamCompaction {
namespace Efficient {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
__global__ void kernUpSweep(int n_pot, int* data, int d)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
int temp_d = 1 << (d + 1);
int k = index * temp_d;
if (k >= n_pot) {
return;
}
int power1 = 1 << (d + 1);
int power2 = 1 << d;
data[k + power1 - 1] += data[k + power2 - 1];
}
__global__ void kernDownSweep(int n_pot, int* data, int d)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
int temp_d = 1 << (d + 1);
int k = index * temp_d;
if (k >= n_pot) {
return;
}
int power1 = 1 << (d + 1);
int power2 = 1 << d;
int t = data[k + power2 - 1];
data[k + power2 - 1] = data[k + power1 - 1];
data[k + power1 - 1] += t;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
int *dev_data;
// Get power of two length
int logValue = ilog2ceil(n);
int n_pot = 1 << logValue;
// CUDA memory arrangement and error checking
cudaMalloc((void**)&dev_data, n_pot * sizeof(int));
checkCUDAError("cudaMalloc dev_data failed!");
cudaMemset(dev_data, 0, n_pot * sizeof(int));
checkCUDAError("cudaMemset dev_data failed!");
cudaMemcpy(dev_data, idata, n * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAError("cudaMemcpy from idata to dev_data failed!");
timer().startGpuTimer();
// Up-Sweep
for(int d = 0; d <= ilog2ceil(n) - 1; ++d)
{
dim3 blocksPerGrid((n_pot / pow(2, d + 1) + blockSize - 1) / blockSize);
kernUpSweep<<<blocksPerGrid, blockSize>>>(n_pot, dev_data, d);
}
// Down-Sweep
cudaMemset(dev_data + n_pot - 1, 0, sizeof(int));
for(int d = ilog2ceil(n) - 1; d >=0; --d)
{
dim3 blocksPerGrid((n_pot / pow(2, d + 1) + blockSize - 1) / blockSize);
kernDownSweep<<<blocksPerGrid, blockSize>>>(n_pot, dev_data, d);
}
timer().endGpuTimer();
cudaMemcpy(odata, dev_data, n * sizeof(int), cudaMemcpyDeviceToHost);
checkCUDAError("cudaMemcpy from dev_data to idata failed!");
cudaFree(dev_data);
}
/**
* Performs stream compaction on idata, storing the result into odata.
* All zeroes are discarded.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to compact.
* @returns The number of elements remaining after compaction.
*/
int compact(int n, int *odata, const int *idata) {
int* dev_idata;
int* dev_boolData;
int* dev_indices;
int* dev_odata;
int logValue = ilog2ceil(n);
int n_pot = 1 << logValue;
cudaMalloc((void**)&dev_idata, n_pot * sizeof(int));
checkCUDAError("cudaMalloc dev_idata failed!");
cudaMalloc((void**)&dev_boolData, n_pot * sizeof(int));
checkCUDAError("cudaMalloc dev_boolData failed!");
cudaMalloc((void**)&dev_indices, n_pot * sizeof(int));
checkCUDAError("cudaMalloc dev_indices failed!");
cudaMemset(dev_idata, 0, n_pot * sizeof(int));
checkCUDAError("cudaMemset dev_idata failed!");
cudaMemcpy(dev_idata, idata, n * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAError("cudaMemcpy from idata to dev_idata failed!");
timer().startGpuTimer();
dim3 mapBlocksPerGrid((n_pot + blockSize - 1) / blockSize);
// Compute temporary array containing 1 and 0
Common::kernMapToBoolean<<<mapBlocksPerGrid, blockSize>>>(n_pot, dev_boolData, dev_idata);
cudaMemcpy(dev_indices, dev_boolData, n_pot * sizeof(int), cudaMemcpyDeviceToDevice);
checkCUDAError("cudaMemcpy from dev_boolData to dev_indices failed!");
// Run exclusive scan on mapped array
// Up-Sweep
for(int d = 0; d <= ilog2ceil(n) - 1; ++d)
{
dim3 blocksPerGrid((n_pot / pow(2, d + 1) + blockSize - 1) / blockSize);
kernUpSweep<<<blocksPerGrid, blockSize>>>(n_pot, dev_indices, d);
}
// Down-Sweep
cudaMemset(dev_indices + n_pot - 1, 0, sizeof(int));
for(int d = ilog2ceil(n) - 1; d >=0; --d)
{
dim3 blocksPerGrid((n_pot / pow(2, d + 1) + blockSize - 1) / blockSize);
kernDownSweep<<<blocksPerGrid, blockSize>>>(n_pot, dev_indices, d);
}
// Scatter
int arrayCount = 0;
cudaMemcpy(&arrayCount, dev_indices + n_pot - 1, sizeof(int), cudaMemcpyDeviceToHost);
cudaMalloc((void**)&dev_odata, arrayCount * sizeof(int));
checkCUDAError("cudaMalloc dev_odata failed!");
Common::kernScatter<<<mapBlocksPerGrid, blockSize>>>(n_pot, dev_odata, dev_idata, dev_boolData, dev_indices);
timer().endGpuTimer();
cudaMemcpy(odata, dev_odata, arrayCount * sizeof(int), cudaMemcpyDeviceToHost);
checkCUDAError("cudaMemcpy from dev_odata to odata failed!");
cudaFree(dev_idata);
cudaFree(dev_boolData);
cudaFree(dev_indices);
cudaFree(dev_odata);
return arrayCount;
}
}
}
|
b7967c047bb00e892eef00db7bc082bc97ae179b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@generated from zgemm_reduce.cu normal z -> s, Sat Nov 15 19:53:57 2014
*/
#include "common_magma.h"
#include "magma_templates.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
// size of work for a thread block
#define BLK_M 16
#define BLK_N 16
// BLK_K gets defined in magmablas_sgemm_reduce,
// because it depends on the CUDA architecture at runtime.
//==============================================================================
// BLK_K size is templated, as it depends on CUDA architecture at runtime.
// Hmm... how to compile for both CUDA arch 1.x and 2.x?
template< int BLK_K >
__global__
void sgemm_reduce_kernel(
int m, int n, int k,
float alpha,
const float* __restrict__ dA, int lda,
const float* __restrict__ dB, int ldb,
float beta,
float * __restrict__ dC, int ldc)
{
#if (__CUDA_ARCH__ >= 200)
const int tx = threadIdx.x;
if (blockIdx.x*BLK_M + threadIdx.y < m && blockIdx.y*BLK_N + threadIdx.z < n){
dA += (blockIdx.x*BLK_M + threadIdx.y) * lda;
dB += (blockIdx.y*BLK_N + threadIdx.z) * ldb;
dC += blockIdx.x*BLK_M + blockIdx.y*BLK_N * ldc;
// was: sum[BLK_M][BLK_N+1][BLK_K+1];
// moved 3rd dimension to 1st dimension to make magma_sum_reduce_3d interface nicer.
__shared__ float sum[BLK_K][BLK_M+1][BLK_N+1];
float lsum;
/* w := v**H * C */
lsum = MAGMA_S_ZERO;
for( int j = tx; j < k; j += BLK_K )
lsum += MAGMA_S_CNJG( dA[j] )* dB[j];
sum[tx][threadIdx.y][threadIdx.z] = lsum;
magma_sum_reduce_3d< BLK_K, BLK_M+1, BLK_N+1 >( tx, threadIdx.y, threadIdx.z, sum );
/* C := C - v * w */
__syncthreads();
if (threadIdx.x == 0) {
if (MAGMA_S_EQUAL(beta, MAGMA_S_ZERO))
dC[threadIdx.y + threadIdx.z*ldc] = alpha*sum[0][threadIdx.y][threadIdx.z];
else
dC[threadIdx.y + threadIdx.z*ldc] = beta* dC[threadIdx.y + threadIdx.z*ldc] +
alpha*sum[0][threadIdx.y][threadIdx.z];
}
}
#endif
}
//==============================================================================
/**
Purpose
-------
SGEMM_REDUCE performs one of the matrix-matrix operations
C := alpha*A^T*B + beta*C,
where alpha and beta are scalars, and A, B and C are matrices, with A
a k-by-m matrix, B a k-by-n matrix, and C an m-by-n matrix.
This routine is tuned for m, n << k. Typically, m and n are expected
to be less than 128.
@ingroup magma_sblas3
********************************************************************/
extern "C" void
magmablas_sgemm_reduce(
magma_int_t m, magma_int_t n, magma_int_t k,
float alpha,
magmaFloat_const_ptr dA, magma_int_t ldda,
magmaFloat_const_ptr dB, magma_int_t lddb,
float beta,
magmaFloat_ptr dC, magma_int_t lddc )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( k < 0 )
info = -3;
else if ( ldda < m )
info = -6;
else if ( lddb < k )
info = -8;
else if ( lddc < m )
info = -11;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// call CUDA ARCH 1.x -- maximum 512 threads
const int NUM_THREADS = 512;
const int BLK_K = (NUM_THREADS / (BLK_M * BLK_N)); // == 2
dim3 blocks( (m-1)/BLK_M + 1, (n-1)/BLK_N + 1 );
dim3 threads( BLK_K, BLK_M, BLK_N );
hipLaunchKernelGGL(( sgemm_reduce_kernel<BLK_K>) , dim3(blocks), dim3(threads), 0, magma_stream ,
m, n, k, alpha, dA, ldda, dB, lddb, beta, dC, lddc );
}
else {
// --------------------
// call CUDA ARCH 2.x -- maximum 1024 threads
const int NUM_THREADS = 1024;
const int BLK_K = (NUM_THREADS / (BLK_M * BLK_N)); // == 4
dim3 blocks( (m-1)/BLK_M + 1, (n-1)/BLK_N + 1 );
dim3 threads( BLK_K, BLK_M, BLK_N );
hipLaunchKernelGGL(( sgemm_reduce_kernel<BLK_K>) , dim3(blocks), dim3(threads), 0, magma_stream ,
m, n, k, alpha, dA, ldda, dB, lddb, beta, dC, lddc );
}
}
//==============================================================================
| b7967c047bb00e892eef00db7bc082bc97ae179b.cu | /*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@generated from zgemm_reduce.cu normal z -> s, Sat Nov 15 19:53:57 2014
*/
#include "common_magma.h"
#include "magma_templates.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
// size of work for a thread block
#define BLK_M 16
#define BLK_N 16
// BLK_K gets defined in magmablas_sgemm_reduce,
// because it depends on the CUDA architecture at runtime.
//==============================================================================
// BLK_K size is templated, as it depends on CUDA architecture at runtime.
// Hmm... how to compile for both CUDA arch 1.x and 2.x?
template< int BLK_K >
__global__
void sgemm_reduce_kernel(
int m, int n, int k,
float alpha,
const float* __restrict__ dA, int lda,
const float* __restrict__ dB, int ldb,
float beta,
float * __restrict__ dC, int ldc)
{
#if (__CUDA_ARCH__ >= 200)
const int tx = threadIdx.x;
if (blockIdx.x*BLK_M + threadIdx.y < m && blockIdx.y*BLK_N + threadIdx.z < n){
dA += (blockIdx.x*BLK_M + threadIdx.y) * lda;
dB += (blockIdx.y*BLK_N + threadIdx.z) * ldb;
dC += blockIdx.x*BLK_M + blockIdx.y*BLK_N * ldc;
// was: sum[BLK_M][BLK_N+1][BLK_K+1];
// moved 3rd dimension to 1st dimension to make magma_sum_reduce_3d interface nicer.
__shared__ float sum[BLK_K][BLK_M+1][BLK_N+1];
float lsum;
/* w := v**H * C */
lsum = MAGMA_S_ZERO;
for( int j = tx; j < k; j += BLK_K )
lsum += MAGMA_S_CNJG( dA[j] )* dB[j];
sum[tx][threadIdx.y][threadIdx.z] = lsum;
magma_sum_reduce_3d< BLK_K, BLK_M+1, BLK_N+1 >( tx, threadIdx.y, threadIdx.z, sum );
/* C := C - v * w */
__syncthreads();
if (threadIdx.x == 0) {
if (MAGMA_S_EQUAL(beta, MAGMA_S_ZERO))
dC[threadIdx.y + threadIdx.z*ldc] = alpha*sum[0][threadIdx.y][threadIdx.z];
else
dC[threadIdx.y + threadIdx.z*ldc] = beta* dC[threadIdx.y + threadIdx.z*ldc] +
alpha*sum[0][threadIdx.y][threadIdx.z];
}
}
#endif
}
//==============================================================================
/**
Purpose
-------
SGEMM_REDUCE performs one of the matrix-matrix operations
C := alpha*A^T*B + beta*C,
where alpha and beta are scalars, and A, B and C are matrices, with A
a k-by-m matrix, B a k-by-n matrix, and C an m-by-n matrix.
This routine is tuned for m, n << k. Typically, m and n are expected
to be less than 128.
@ingroup magma_sblas3
********************************************************************/
extern "C" void
magmablas_sgemm_reduce(
magma_int_t m, magma_int_t n, magma_int_t k,
float alpha,
magmaFloat_const_ptr dA, magma_int_t ldda,
magmaFloat_const_ptr dB, magma_int_t lddb,
float beta,
magmaFloat_ptr dC, magma_int_t lddc )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( k < 0 )
info = -3;
else if ( ldda < m )
info = -6;
else if ( lddb < k )
info = -8;
else if ( lddc < m )
info = -11;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// call CUDA ARCH 1.x -- maximum 512 threads
const int NUM_THREADS = 512;
const int BLK_K = (NUM_THREADS / (BLK_M * BLK_N)); // == 2
dim3 blocks( (m-1)/BLK_M + 1, (n-1)/BLK_N + 1 );
dim3 threads( BLK_K, BLK_M, BLK_N );
sgemm_reduce_kernel<BLK_K> <<< blocks, threads, 0, magma_stream >>>
( m, n, k, alpha, dA, ldda, dB, lddb, beta, dC, lddc );
}
else {
// --------------------
// call CUDA ARCH 2.x -- maximum 1024 threads
const int NUM_THREADS = 1024;
const int BLK_K = (NUM_THREADS / (BLK_M * BLK_N)); // == 4
dim3 blocks( (m-1)/BLK_M + 1, (n-1)/BLK_N + 1 );
dim3 threads( BLK_K, BLK_M, BLK_N );
sgemm_reduce_kernel<BLK_K> <<< blocks, threads, 0, magma_stream >>>
( m, n, k, alpha, dA, ldda, dB, lddb, beta, dC, lddc );
}
}
//==============================================================================
|
ce5debc997457e968193227eb4ccd41af6af2982.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* University of Pittsburgh
* Department of Computer Science
* CS1645: Introduction to HPC Systems
* Instructor: Xiaolong Cui
* This is a skeleton for implementing prefix sum using GPU, inspired
* by nvidia course of similar name.
*/
#include <stdio.h>
#include <math.h>
#include <string.h>
#define N 512
/*
* You should implement the parallel scan function here!
*/
__global__ void parallel_scan(float *g_odata, float *g_idata, int n) {
}
/*
* Fills an array a with n random floats.
*/
void random_floats(float* a, int n) {
float d;
// Comment out this line if you want consistent "random".
srand(time(NULL));
for (int i = 0; i < n; ++i) {
d = rand() % 8;
a[i] = ((rand() % 64) / (d > 0 ? d : 1));
}
}
/*
* Simple Serial implementation of exclusive scan.
*/
void serial_scan(float* out, float* in, int n) {
float total_sum = 0;
out[0] = 0;
for (int i = 1; i < n; i++) {
total_sum += in[i-1];
out[i] = out[i-1] + in[i-1];
}
if (total_sum != out[n-1]) {
printf("Warning: exceeding accuracy of float.\n");
}
}
/*
* This is a simple function that confirms that the output of the scan
* function matches that of a golden image (array).
*/
bool printError(float *gold_out, float *test_out, bool show_all) {
bool firstFail = true;
bool error = false;
float epislon = 0.1;
float diff = 0.0;
for (int i = 0; i < N; ++i) {
diff = abs(gold_out[i] - test_out[i]);
if ((diff > epislon) && firstFail) {
printf("ERROR: gold_out[%d] = %f != test_out[%d] = %f // diff = %f \n", i, gold_out[i], i, test_out[i], diff);
firstFail = show_all;
error = true;
}
}
return error;
}
int main(void) {
float *in, *out, *gold_out; // host
int size = sizeof(float) * N;
in = (float *)malloc(size);
random_floats(in, N);
out = (float *)malloc(size);
gold_out = (float *)malloc(size);
// ***********
// RUN SERIAL SCAN
// ***********
serial_scan(gold_out, in, N);
// ***********
// RUN PARALLEL SCAN
// ***********
if (printError(gold_out, out, false)) {
printf("ERROR: The parallel scan function failed to produce proper output.\n");
} else {
printf("CONGRATS: The parallel scan function produced proper output.\n");
}
return 0;
}
| ce5debc997457e968193227eb4ccd41af6af2982.cu | /**
* University of Pittsburgh
* Department of Computer Science
* CS1645: Introduction to HPC Systems
* Instructor: Xiaolong Cui
* This is a skeleton for implementing prefix sum using GPU, inspired
* by nvidia course of similar name.
*/
#include <stdio.h>
#include <math.h>
#include <string.h>
#define N 512
/*
* You should implement the parallel scan function here!
*/
__global__ void parallel_scan(float *g_odata, float *g_idata, int n) {
}
/*
* Fills an array a with n random floats.
*/
void random_floats(float* a, int n) {
float d;
// Comment out this line if you want consistent "random".
srand(time(NULL));
for (int i = 0; i < n; ++i) {
d = rand() % 8;
a[i] = ((rand() % 64) / (d > 0 ? d : 1));
}
}
/*
* Simple Serial implementation of exclusive scan.
*/
void serial_scan(float* out, float* in, int n) {
float total_sum = 0;
out[0] = 0;
for (int i = 1; i < n; i++) {
total_sum += in[i-1];
out[i] = out[i-1] + in[i-1];
}
if (total_sum != out[n-1]) {
printf("Warning: exceeding accuracy of float.\n");
}
}
/*
* This is a simple function that confirms that the output of the scan
* function matches that of a golden image (array).
*/
bool printError(float *gold_out, float *test_out, bool show_all) {
bool firstFail = true;
bool error = false;
float epislon = 0.1;
float diff = 0.0;
for (int i = 0; i < N; ++i) {
diff = abs(gold_out[i] - test_out[i]);
if ((diff > epislon) && firstFail) {
printf("ERROR: gold_out[%d] = %f != test_out[%d] = %f // diff = %f \n", i, gold_out[i], i, test_out[i], diff);
firstFail = show_all;
error = true;
}
}
return error;
}
int main(void) {
float *in, *out, *gold_out; // host
int size = sizeof(float) * N;
in = (float *)malloc(size);
random_floats(in, N);
out = (float *)malloc(size);
gold_out = (float *)malloc(size);
// ***********
// RUN SERIAL SCAN
// ***********
serial_scan(gold_out, in, N);
// ***********
// RUN PARALLEL SCAN
// ***********
if (printError(gold_out, out, false)) {
printf("ERROR: The parallel scan function failed to produce proper output.\n");
} else {
printf("CONGRATS: The parallel scan function produced proper output.\n");
}
return 0;
}
|
b422fa5b28fcb67a4073b90b784361bc71311b17.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2020 Michael Koesel and respective contributors
// SPDX-License-Identifier: MIT
// See accompanying LICENSE file for detailed information
#include "dogm/common.h"
#include "dogm/cuda_utils.h"
#include "dogm/dogm_types.h"
#include "dogm/kernel/mass_update.h"
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
namespace dogm
{
__device__ float predict_free_mass(const GridCell& grid_cell, float m_occ_pred, float alpha = 0.9)
{
return min(alpha * grid_cell.free_mass, 1.0f - m_occ_pred);
}
__device__ float2 update_masses(float m_occ_pred, float m_free_pred, const MeasurementCell& meas_cell)
{
float unknown_pred = 1.0 - m_occ_pred - m_free_pred;
float meas_unknown = 1.0 - meas_cell.free_mass - meas_cell.occ_mass;
float K = m_free_pred * meas_cell.occ_mass + m_occ_pred * meas_cell.free_mass;
float occ_mass =
(m_occ_pred * meas_unknown + unknown_pred * meas_cell.occ_mass + m_occ_pred * meas_cell.occ_mass) / (1.0 - K);
float free_mass =
(m_free_pred * meas_unknown + unknown_pred * meas_cell.free_mass + m_free_pred * meas_cell.free_mass) /
(1.0 - K);
return make_float2(occ_mass, free_mass);
}
__device__ float separate_newborn_part(float m_occ_pred, float m_occ_up, float p_B)
{
return (m_occ_up * p_B * (1.0 - m_occ_pred)) / (m_occ_pred + p_B * (1.0 - m_occ_pred));
}
__device__ void store_values(float rho_b, float rho_p, float m_free_up, float m_occ_up, float m_occ_pred,
GridCell* __restrict__ grid_cell_array, int i)
{
grid_cell_array[i].pers_occ_mass = rho_p;
grid_cell_array[i].new_born_occ_mass = rho_b;
grid_cell_array[i].free_mass = m_free_up;
grid_cell_array[i].occ_mass = m_occ_up;
grid_cell_array[i].pred_occ_mass = m_occ_pred;
}
__device__ void normalize_weights(const ParticlesSoA& particle_array, float* __restrict__ weight_array, int start_idx,
int end_idx, float occ_pred)
{
for (int i = start_idx; i < end_idx + 1; i++)
{
weight_array[i] = weight_array[i] / occ_pred;
particle_array.weight[i] = weight_array[i];
}
}
__global__ void gridCellPredictionUpdateKernel(GridCell* __restrict__ grid_cell_array, ParticlesSoA particle_array,
float* __restrict__ weight_array,
const float* __restrict__ weight_array_accum,
const MeasurementCell* __restrict__ meas_cell_array,
float* __restrict__ born_masses_array, float p_B, int cell_count)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < cell_count; i += blockDim.x * gridDim.x)
{
int start_idx = grid_cell_array[i].start_idx;
int end_idx = grid_cell_array[i].end_idx;
if (start_idx != -1)
{
float m_occ_pred = subtract(weight_array_accum, start_idx, end_idx);
if (m_occ_pred > 1.0f)
{
// printf("Predicted mass greater 1. Mass is: %f\n", m_occ_pred);
normalize_weights(particle_array, weight_array, start_idx, end_idx, m_occ_pred);
m_occ_pred = 1.0f;
}
float m_free_pred = predict_free_mass(grid_cell_array[i], m_occ_pred);
float2 masses_up = update_masses(m_occ_pred, m_free_pred, meas_cell_array[i]);
float rho_b = separate_newborn_part(m_occ_pred, masses_up.x, p_B);
float rho_p = masses_up.x - rho_b;
born_masses_array[i] = rho_b;
// printf("Rho B: %f\n", rho_b);
store_values(rho_b, rho_p, masses_up.y, masses_up.x, m_occ_pred, grid_cell_array, i);
}
else
{
float m_occ = grid_cell_array[i].occ_mass;
float m_free = predict_free_mass(grid_cell_array[i], m_occ);
float2 masses_up = update_masses(m_occ, m_free, meas_cell_array[i]);
born_masses_array[i] = 0.0f;
store_values(0.0f, masses_up.x, masses_up.y, masses_up.x, 0.0f, grid_cell_array, i);
}
}
}
} /* namespace dogm */
| b422fa5b28fcb67a4073b90b784361bc71311b17.cu | // Copyright (c) 2020 Michael Koesel and respective contributors
// SPDX-License-Identifier: MIT
// See accompanying LICENSE file for detailed information
#include "dogm/common.h"
#include "dogm/cuda_utils.h"
#include "dogm/dogm_types.h"
#include "dogm/kernel/mass_update.h"
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
namespace dogm
{
__device__ float predict_free_mass(const GridCell& grid_cell, float m_occ_pred, float alpha = 0.9)
{
return min(alpha * grid_cell.free_mass, 1.0f - m_occ_pred);
}
__device__ float2 update_masses(float m_occ_pred, float m_free_pred, const MeasurementCell& meas_cell)
{
float unknown_pred = 1.0 - m_occ_pred - m_free_pred;
float meas_unknown = 1.0 - meas_cell.free_mass - meas_cell.occ_mass;
float K = m_free_pred * meas_cell.occ_mass + m_occ_pred * meas_cell.free_mass;
float occ_mass =
(m_occ_pred * meas_unknown + unknown_pred * meas_cell.occ_mass + m_occ_pred * meas_cell.occ_mass) / (1.0 - K);
float free_mass =
(m_free_pred * meas_unknown + unknown_pred * meas_cell.free_mass + m_free_pred * meas_cell.free_mass) /
(1.0 - K);
return make_float2(occ_mass, free_mass);
}
__device__ float separate_newborn_part(float m_occ_pred, float m_occ_up, float p_B)
{
return (m_occ_up * p_B * (1.0 - m_occ_pred)) / (m_occ_pred + p_B * (1.0 - m_occ_pred));
}
__device__ void store_values(float rho_b, float rho_p, float m_free_up, float m_occ_up, float m_occ_pred,
GridCell* __restrict__ grid_cell_array, int i)
{
grid_cell_array[i].pers_occ_mass = rho_p;
grid_cell_array[i].new_born_occ_mass = rho_b;
grid_cell_array[i].free_mass = m_free_up;
grid_cell_array[i].occ_mass = m_occ_up;
grid_cell_array[i].pred_occ_mass = m_occ_pred;
}
__device__ void normalize_weights(const ParticlesSoA& particle_array, float* __restrict__ weight_array, int start_idx,
int end_idx, float occ_pred)
{
for (int i = start_idx; i < end_idx + 1; i++)
{
weight_array[i] = weight_array[i] / occ_pred;
particle_array.weight[i] = weight_array[i];
}
}
__global__ void gridCellPredictionUpdateKernel(GridCell* __restrict__ grid_cell_array, ParticlesSoA particle_array,
float* __restrict__ weight_array,
const float* __restrict__ weight_array_accum,
const MeasurementCell* __restrict__ meas_cell_array,
float* __restrict__ born_masses_array, float p_B, int cell_count)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < cell_count; i += blockDim.x * gridDim.x)
{
int start_idx = grid_cell_array[i].start_idx;
int end_idx = grid_cell_array[i].end_idx;
if (start_idx != -1)
{
float m_occ_pred = subtract(weight_array_accum, start_idx, end_idx);
if (m_occ_pred > 1.0f)
{
// printf("Predicted mass greater 1. Mass is: %f\n", m_occ_pred);
normalize_weights(particle_array, weight_array, start_idx, end_idx, m_occ_pred);
m_occ_pred = 1.0f;
}
float m_free_pred = predict_free_mass(grid_cell_array[i], m_occ_pred);
float2 masses_up = update_masses(m_occ_pred, m_free_pred, meas_cell_array[i]);
float rho_b = separate_newborn_part(m_occ_pred, masses_up.x, p_B);
float rho_p = masses_up.x - rho_b;
born_masses_array[i] = rho_b;
// printf("Rho B: %f\n", rho_b);
store_values(rho_b, rho_p, masses_up.y, masses_up.x, m_occ_pred, grid_cell_array, i);
}
else
{
float m_occ = grid_cell_array[i].occ_mass;
float m_free = predict_free_mass(grid_cell_array[i], m_occ);
float2 masses_up = update_masses(m_occ, m_free, meas_cell_array[i]);
born_masses_array[i] = 0.0f;
store_values(0.0f, masses_up.x, masses_up.y, masses_up.x, 0.0f, grid_cell_array, i);
}
}
}
} /* namespace dogm */
|
76cf26d26f08dcb8382c44839ba74b65dde9b944.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/operators/channel_shuffle_op.h"
#include <array>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T, bool kNFirst>
__global__ void ChannelShuffleNCHWKernel(
const int G,
const int K,
const int HxW,
const T* X,
T* Y) {
const int C = G * K;
const int n = kNFirst ? blockIdx.x : blockIdx.y;
const int s = kNFirst ? blockIdx.y : blockIdx.x;
const int g = blockIdx.z % G;
const int k = blockIdx.z / G;
const int offset = s * CAFFE_CUDA_NUM_THREADS + threadIdx.x;
if (offset < HxW) {
#if __CUDA_ARCH__ >= 350
Y[(n * C + blockIdx.z) * HxW + offset] =
__ldg(X + (n * C + g * K + k) * HxW + offset);
#else
Y[(n * C + blockIdx.z) * HxW + offset] =
X[(n * C + g * K + k) * HxW + offset];
#endif
}
}
template <typename T, int kSharedSize>
__global__ void
ChannelShuffleNHWCKernel(const int G, const int K, const float* X, float* Y) {
__shared__ T sdata[kSharedSize];
const int C = G * K;
const int offset = blockIdx.x * C;
for (int i = threadIdx.x; i < C; i += blockDim.x) {
#if __CUDA_ARCH__ >= 350
sdata[i] = __ldg(X + offset + i);
#else
sdata[i] = X[offset + i];
#endif
}
__syncthreads();
for (int i = threadIdx.x; i < C; i += blockDim.x) {
const int g = i % G;
const int k = i / G;
Y[offset + i] = sdata[g * K + k];
}
}
template <>
bool ChannelShuffleOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() {
const auto& X = Input(0);
auto* Y = Output(0);
Y->ResizeLike(X);
const int N = X.dim32(0);
const int C = X.dim32(1);
const int G = this->group_;
CAFFE_ENFORCE_EQ(C % G, 0);
if (X.size() == 0) {
return true;
}
const int K = C / G;
const int HxW = X.size() / (N * C);
const int S = (HxW + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS;
const float* X_data = X.data<float>();
float* Y_data = Y->mutable_data<float>();
if (N <= kCUDAGridDimMaxY) {
const dim3 dim_grid(S, N, C);
hipLaunchKernelGGL(( ChannelShuffleNCHWKernel<float, false>)
, dim3(dim_grid), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
G, K, HxW, X_data, Y_data);
} else {
const dim3 dim_grid(N, S, C);
hipLaunchKernelGGL(( ChannelShuffleNCHWKernel<float, true>)
, dim3(dim_grid), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
G, K, HxW, X_data, Y_data);
}
return true;
}
template <>
bool ChannelShuffleOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() {
const auto& X = Input(0);
auto* Y = Output(0);
Y->ResizeLike(X);
const int ndim = X.ndim();
const int N = X.dim32(0);
const int C = X.dim32(ndim - 1);
const int G = this->group_;
CAFFE_ENFORCE_EQ(C % G, 0);
if (X.size() == 0) {
return true;
}
const int K = C / G;
const int HxW = X.size() / (N * C);
const int outer_size = N * HxW;
const float* X_data = X.data<float>();
float* Y_data = Y->mutable_data<float>();
if (C <= 32) {
hipLaunchKernelGGL(( ChannelShuffleNHWCKernel<float, 32>)
, dim3(outer_size), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
G, K, X_data, Y_data);
} else if (C <= 128) {
hipLaunchKernelGGL(( ChannelShuffleNHWCKernel<float, 128>)
, dim3(outer_size), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
G, K, X_data, Y_data);
} else if (C <= 512) {
hipLaunchKernelGGL(( ChannelShuffleNHWCKernel<float, 512>)
, dim3(outer_size), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
G, K, X_data, Y_data);
} else {
const std::array<int, 3> dims = {N * HxW, G, K};
const std::array<int, 3> axes = {0, 2, 1};
math::Transpose<float, CUDAContext>(
3, dims.data(), axes.data(), X_data, Y_data, &context_);
}
return true;
}
template <>
bool ChannelShuffleGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() {
const auto& dY = Input(0);
auto* dX = Output(0);
dX->ResizeLike(dY);
const int N = dY.dim32(0);
const int C = dY.dim32(1);
const int G = this->group_;
CAFFE_ENFORCE_EQ(C % G, 0);
if (dY.size() == 0) {
return true;
}
const int K = C / G;
const int HxW = dY.size() / (N * C);
const int S = (HxW + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS;
const float* dY_data = dY.data<float>();
float* dX_data = dX->mutable_data<float>();
if (N <= kCUDAGridDimMaxY) {
const dim3 dim_grid(S, N, C);
hipLaunchKernelGGL(( ChannelShuffleNCHWKernel<float, false>)
, dim3(dim_grid), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
K, G, HxW, dY_data, dX_data);
} else {
const dim3 dim_grid(N, S, C);
hipLaunchKernelGGL(( ChannelShuffleNCHWKernel<float, true>)
, dim3(dim_grid), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
K, G, HxW, dY_data, dX_data);
}
return true;
}
template <>
bool ChannelShuffleGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() {
const auto& dY = Input(0);
auto* dX = Output(0);
dX->ResizeLike(dY);
const int ndim = dY.ndim();
const int N = dY.dim32(0);
const int C = dY.dim32(ndim - 1);
const int G = this->group_;
CAFFE_ENFORCE_EQ(C % G, 0);
if (dY.size() == 0) {
return true;
}
const int K = C / G;
const int HxW = dY.size() / (N * C);
const int outer_size = N * HxW;
const float* dY_data = dY.data<float>();
float* dX_data = dX->mutable_data<float>();
if (C <= 32) {
hipLaunchKernelGGL(( ChannelShuffleNHWCKernel<float, 32>)
, dim3(outer_size), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
K, G, dY_data, dX_data);
} else if (C <= 128) {
hipLaunchKernelGGL(( ChannelShuffleNHWCKernel<float, 128>)
, dim3(outer_size), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
K, G, dY_data, dX_data);
} else if (C <= 512) {
hipLaunchKernelGGL(( ChannelShuffleNHWCKernel<float, 512>)
, dim3(outer_size), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(),
K, G, dY_data, dX_data);
} else {
const std::array<int, 3> dims = {N * HxW, K, G};
const std::array<int, 3> axes = {0, 2, 1};
math::Transpose<float, CUDAContext>(
3, dims.data(), axes.data(), dY_data, dX_data, &context_);
}
return true;
}
REGISTER_CUDA_OPERATOR(ChannelShuffle, ChannelShuffleOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
ChannelShuffleGradient,
ChannelShuffleGradientOp<float, CUDAContext>);
} // namespace caffe2
| 76cf26d26f08dcb8382c44839ba74b65dde9b944.cu | #include "caffe2/operators/channel_shuffle_op.h"
#include <array>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T, bool kNFirst>
__global__ void ChannelShuffleNCHWKernel(
const int G,
const int K,
const int HxW,
const T* X,
T* Y) {
const int C = G * K;
const int n = kNFirst ? blockIdx.x : blockIdx.y;
const int s = kNFirst ? blockIdx.y : blockIdx.x;
const int g = blockIdx.z % G;
const int k = blockIdx.z / G;
const int offset = s * CAFFE_CUDA_NUM_THREADS + threadIdx.x;
if (offset < HxW) {
#if __CUDA_ARCH__ >= 350
Y[(n * C + blockIdx.z) * HxW + offset] =
__ldg(X + (n * C + g * K + k) * HxW + offset);
#else
Y[(n * C + blockIdx.z) * HxW + offset] =
X[(n * C + g * K + k) * HxW + offset];
#endif
}
}
template <typename T, int kSharedSize>
__global__ void
ChannelShuffleNHWCKernel(const int G, const int K, const float* X, float* Y) {
__shared__ T sdata[kSharedSize];
const int C = G * K;
const int offset = blockIdx.x * C;
for (int i = threadIdx.x; i < C; i += blockDim.x) {
#if __CUDA_ARCH__ >= 350
sdata[i] = __ldg(X + offset + i);
#else
sdata[i] = X[offset + i];
#endif
}
__syncthreads();
for (int i = threadIdx.x; i < C; i += blockDim.x) {
const int g = i % G;
const int k = i / G;
Y[offset + i] = sdata[g * K + k];
}
}
template <>
bool ChannelShuffleOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() {
const auto& X = Input(0);
auto* Y = Output(0);
Y->ResizeLike(X);
const int N = X.dim32(0);
const int C = X.dim32(1);
const int G = this->group_;
CAFFE_ENFORCE_EQ(C % G, 0);
if (X.size() == 0) {
return true;
}
const int K = C / G;
const int HxW = X.size() / (N * C);
const int S = (HxW + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS;
const float* X_data = X.data<float>();
float* Y_data = Y->mutable_data<float>();
if (N <= kCUDAGridDimMaxY) {
const dim3 dim_grid(S, N, C);
ChannelShuffleNCHWKernel<float, false>
<<<dim_grid, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
G, K, HxW, X_data, Y_data);
} else {
const dim3 dim_grid(N, S, C);
ChannelShuffleNCHWKernel<float, true>
<<<dim_grid, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
G, K, HxW, X_data, Y_data);
}
return true;
}
template <>
bool ChannelShuffleOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() {
const auto& X = Input(0);
auto* Y = Output(0);
Y->ResizeLike(X);
const int ndim = X.ndim();
const int N = X.dim32(0);
const int C = X.dim32(ndim - 1);
const int G = this->group_;
CAFFE_ENFORCE_EQ(C % G, 0);
if (X.size() == 0) {
return true;
}
const int K = C / G;
const int HxW = X.size() / (N * C);
const int outer_size = N * HxW;
const float* X_data = X.data<float>();
float* Y_data = Y->mutable_data<float>();
if (C <= 32) {
ChannelShuffleNHWCKernel<float, 32>
<<<outer_size, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
G, K, X_data, Y_data);
} else if (C <= 128) {
ChannelShuffleNHWCKernel<float, 128>
<<<outer_size, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
G, K, X_data, Y_data);
} else if (C <= 512) {
ChannelShuffleNHWCKernel<float, 512>
<<<outer_size, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
G, K, X_data, Y_data);
} else {
const std::array<int, 3> dims = {N * HxW, G, K};
const std::array<int, 3> axes = {0, 2, 1};
math::Transpose<float, CUDAContext>(
3, dims.data(), axes.data(), X_data, Y_data, &context_);
}
return true;
}
template <>
bool ChannelShuffleGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() {
const auto& dY = Input(0);
auto* dX = Output(0);
dX->ResizeLike(dY);
const int N = dY.dim32(0);
const int C = dY.dim32(1);
const int G = this->group_;
CAFFE_ENFORCE_EQ(C % G, 0);
if (dY.size() == 0) {
return true;
}
const int K = C / G;
const int HxW = dY.size() / (N * C);
const int S = (HxW + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS;
const float* dY_data = dY.data<float>();
float* dX_data = dX->mutable_data<float>();
if (N <= kCUDAGridDimMaxY) {
const dim3 dim_grid(S, N, C);
ChannelShuffleNCHWKernel<float, false>
<<<dim_grid, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
K, G, HxW, dY_data, dX_data);
} else {
const dim3 dim_grid(N, S, C);
ChannelShuffleNCHWKernel<float, true>
<<<dim_grid, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
K, G, HxW, dY_data, dX_data);
}
return true;
}
template <>
bool ChannelShuffleGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() {
const auto& dY = Input(0);
auto* dX = Output(0);
dX->ResizeLike(dY);
const int ndim = dY.ndim();
const int N = dY.dim32(0);
const int C = dY.dim32(ndim - 1);
const int G = this->group_;
CAFFE_ENFORCE_EQ(C % G, 0);
if (dY.size() == 0) {
return true;
}
const int K = C / G;
const int HxW = dY.size() / (N * C);
const int outer_size = N * HxW;
const float* dY_data = dY.data<float>();
float* dX_data = dX->mutable_data<float>();
if (C <= 32) {
ChannelShuffleNHWCKernel<float, 32>
<<<outer_size, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
K, G, dY_data, dX_data);
} else if (C <= 128) {
ChannelShuffleNHWCKernel<float, 128>
<<<outer_size, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
K, G, dY_data, dX_data);
} else if (C <= 512) {
ChannelShuffleNHWCKernel<float, 512>
<<<outer_size, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(
K, G, dY_data, dX_data);
} else {
const std::array<int, 3> dims = {N * HxW, K, G};
const std::array<int, 3> axes = {0, 2, 1};
math::Transpose<float, CUDAContext>(
3, dims.data(), axes.data(), dY_data, dX_data, &context_);
}
return true;
}
REGISTER_CUDA_OPERATOR(ChannelShuffle, ChannelShuffleOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
ChannelShuffleGradient,
ChannelShuffleGradientOp<float, CUDAContext>);
} // namespace caffe2
|
3407f0ee31ee8337e1d23923d468cbfb49c1e495.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//pass
//--gridDim=[2,2,1] --blockDim=[2,2,2]
#include "printf.h"
#define CUPRINTF cuPrintf
__global__ void testKernel(int val)
{
CUPRINTF("\tValue is:%d\n", val);
}
| 3407f0ee31ee8337e1d23923d468cbfb49c1e495.cu | //pass
//--gridDim=[2,2,1] --blockDim=[2,2,2]
#include "printf.h"
#define CUPRINTF cuPrintf
__global__ void testKernel(int val)
{
CUPRINTF("\tValue is:%d\n", val);
}
|
1abf11eda73798285e36581fff00c902b794b3d2.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <time.h>
#include <hip/hip_runtime.h>
void initialize(int *menacc, int *womenacc, int *menpre, int *womenlock, int n) {
int i;
for(i=0; i<=n; i++) {
menacc[i] = -1;
womenacc[i] = -1;
menpre[i] = 1;
womenlock[i] = 0;
}
}
// kernel-1 implementation
__global__ void stable_matching(int n, int *d_men, int *d_women,
int *d_menacc, int *d_womenacc, int *d_menpre, int *d_matched, int *d_womenlock) {
int j = threadIdx.x + 1, idx;
idx = d_men[j*(n+1) + d_menpre[j]];
if(j <= n && d_menacc[j] == -1) {
bool isSet = false;
do {
if(isSet = atomicCAS(&d_womenlock[idx], 0, 1) == 0) {
if(d_womenacc[idx] == -1) {
d_womenacc[idx] = j;
d_menacc[j] = idx;
atomicAdd(d_matched, 1);
}
else if(d_women[idx*(n+1) + d_womenacc[idx]] > d_women[idx*(n+1) + j]) {
d_menacc[d_womenacc[idx]] = -1;
d_menacc[j] = idx;
d_womenacc[idx] = j;
}
}
if(isSet) {
atomicCAS(&d_womenlock[idx], 1, 0);
}
} while(!isSet);
d_menpre[j]++;
}
}
int main()
{
int n,i,j,k;
int matched=0;
int *d_matched;
int *men, *women;
int *menacc, *womenacc, *menpre, *womenlock;
int *d_men, *d_women;
int *d_menacc, *d_womenacc, *d_menpre, *d_womenlock;
clock_t beg, end;
double time_taken;
scanf("%d",&n);
men = (int *) malloc((n+1)*(n+1)*sizeof(int));
women = (int *) malloc((n+1)*(n+1)*sizeof(int));
menacc = (int *) malloc((n+1)*sizeof(int));
womenacc = (int *) malloc((n+1)*sizeof(int));
womenlock = (int *) malloc((n+1)*sizeof(int));
menpre = (int *) malloc((n+1)*sizeof(int));
hipMalloc(&d_men, (n+1)*(n+1)*sizeof(int));
hipMalloc(&d_women, (n+1)*(n+1)*sizeof(int));
hipMalloc(&d_menacc, (n+1)*sizeof(int));
hipMalloc(&d_womenacc, (n+1)*sizeof(int));
hipMalloc(&d_womenlock, (n+1)*sizeof(int));
hipMalloc(&d_menpre, (n+1)*sizeof(int));
hipMalloc(&d_matched, sizeof(int));
initialize(menacc, womenacc, menpre, womenlock, n);
beg = clock();
for(i=1; i<=n; i++) {
for(j=0; j<=n; j++) {
scanf("%d", &men[i*(n+1) + j]);
}
}
for(i=1; i<=n; i++) {
for(j=0; j<=n; j++) {
scanf("%d", &k);
women[i*(n+1) + k] = j;
}
}
end = clock();
time_taken = ((double)(end-beg) * 1000000)/CLOCKS_PER_SEC;
printf("read time : %f us, ", time_taken);
hipMemcpy(d_men, men, (n+1)*(n+1)*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_women, women, (n+1)*(n+1)*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_menacc, menacc, (n+1)*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_womenlock, womenlock, (n+1)*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_womenacc, womenacc, (n+1)*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_menpre, menpre, (n+1)*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_matched, &matched, sizeof(int), hipMemcpyHostToDevice);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float milliseconds = 0;
hipEventRecord(start,0);
int ct=0;
while(matched != n) {
ct++;
hipLaunchKernelGGL(( stable_matching) , dim3(1), dim3(n) , 0, 0, n, d_men, d_women, d_menacc, d_womenacc, d_menpre, d_matched, d_womenlock);
hipMemcpy(&matched, d_matched, sizeof(int), hipMemcpyDeviceToHost);
}
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
hipMemcpy(menacc, d_menacc, (n+1)*sizeof(int), hipMemcpyDeviceToHost);
printf("compute time : %f us\n", milliseconds*1000);
printf("count : %d\n", ct);
for(j=1;j<=n;j++)
printf("%d %d\n", j, menacc[j]);
free(men); free(women);
free(menacc); free(womenacc); free(menpre); free(womenlock);
hipFree(&d_men); hipFree(&d_women); hipFree(&d_matched);
hipFree(&d_menacc); hipFree(&d_womenacc); hipFree(&d_menpre); hipFree(&d_womenlock);
return 0;
}
| 1abf11eda73798285e36581fff00c902b794b3d2.cu | #include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <time.h>
#include <cuda.h>
void initialize(int *menacc, int *womenacc, int *menpre, int *womenlock, int n) {
int i;
for(i=0; i<=n; i++) {
menacc[i] = -1;
womenacc[i] = -1;
menpre[i] = 1;
womenlock[i] = 0;
}
}
// kernel-1 implementation
__global__ void stable_matching(int n, int *d_men, int *d_women,
int *d_menacc, int *d_womenacc, int *d_menpre, int *d_matched, int *d_womenlock) {
int j = threadIdx.x + 1, idx;
idx = d_men[j*(n+1) + d_menpre[j]];
if(j <= n && d_menacc[j] == -1) {
bool isSet = false;
do {
if(isSet = atomicCAS(&d_womenlock[idx], 0, 1) == 0) {
if(d_womenacc[idx] == -1) {
d_womenacc[idx] = j;
d_menacc[j] = idx;
atomicAdd(d_matched, 1);
}
else if(d_women[idx*(n+1) + d_womenacc[idx]] > d_women[idx*(n+1) + j]) {
d_menacc[d_womenacc[idx]] = -1;
d_menacc[j] = idx;
d_womenacc[idx] = j;
}
}
if(isSet) {
atomicCAS(&d_womenlock[idx], 1, 0);
}
} while(!isSet);
d_menpre[j]++;
}
}
int main()
{
int n,i,j,k;
int matched=0;
int *d_matched;
int *men, *women;
int *menacc, *womenacc, *menpre, *womenlock;
int *d_men, *d_women;
int *d_menacc, *d_womenacc, *d_menpre, *d_womenlock;
clock_t beg, end;
double time_taken;
scanf("%d",&n);
men = (int *) malloc((n+1)*(n+1)*sizeof(int));
women = (int *) malloc((n+1)*(n+1)*sizeof(int));
menacc = (int *) malloc((n+1)*sizeof(int));
womenacc = (int *) malloc((n+1)*sizeof(int));
womenlock = (int *) malloc((n+1)*sizeof(int));
menpre = (int *) malloc((n+1)*sizeof(int));
cudaMalloc(&d_men, (n+1)*(n+1)*sizeof(int));
cudaMalloc(&d_women, (n+1)*(n+1)*sizeof(int));
cudaMalloc(&d_menacc, (n+1)*sizeof(int));
cudaMalloc(&d_womenacc, (n+1)*sizeof(int));
cudaMalloc(&d_womenlock, (n+1)*sizeof(int));
cudaMalloc(&d_menpre, (n+1)*sizeof(int));
cudaMalloc(&d_matched, sizeof(int));
initialize(menacc, womenacc, menpre, womenlock, n);
beg = clock();
for(i=1; i<=n; i++) {
for(j=0; j<=n; j++) {
scanf("%d", &men[i*(n+1) + j]);
}
}
for(i=1; i<=n; i++) {
for(j=0; j<=n; j++) {
scanf("%d", &k);
women[i*(n+1) + k] = j;
}
}
end = clock();
time_taken = ((double)(end-beg) * 1000000)/CLOCKS_PER_SEC;
printf("read time : %f us, ", time_taken);
cudaMemcpy(d_men, men, (n+1)*(n+1)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_women, women, (n+1)*(n+1)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_menacc, menacc, (n+1)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_womenlock, womenlock, (n+1)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_womenacc, womenacc, (n+1)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_menpre, menpre, (n+1)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_matched, &matched, sizeof(int), cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float milliseconds = 0;
cudaEventRecord(start,0);
int ct=0;
while(matched != n) {
ct++;
stable_matching <<< 1, n >>>(n, d_men, d_women, d_menacc, d_womenacc, d_menpre, d_matched, d_womenlock);
cudaMemcpy(&matched, d_matched, sizeof(int), cudaMemcpyDeviceToHost);
}
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
cudaMemcpy(menacc, d_menacc, (n+1)*sizeof(int), cudaMemcpyDeviceToHost);
printf("compute time : %f us\n", milliseconds*1000);
printf("count : %d\n", ct);
for(j=1;j<=n;j++)
printf("%d %d\n", j, menacc[j]);
free(men); free(women);
free(menacc); free(womenacc); free(menpre); free(womenlock);
cudaFree(&d_men); cudaFree(&d_women); cudaFree(&d_matched);
cudaFree(&d_menacc); cudaFree(&d_womenacc); cudaFree(&d_menpre); cudaFree(&d_womenlock);
return 0;
}
|
a7942adfb13dc25f3451a1fa6f9915c54b7257a2.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#define _USE_MATH_DEFINES
#include <ATen/native/Activation.h>
#include <cmath>
#include <thrust/tuple.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/core/TensorBase.h>
#include <c10/core/Scalar.h>
#include <c10/hip/HIPMathCompat.h>
#include <ATen/hip/ApplyGridUtils.cuh>
#include <ATen/hip/detail/OffsetCalculator.cuh>
#include <ATen/native/hip/Loops.cuh>
namespace at::native {
namespace {
void hardshrink_kernel(TensorIteratorBase& iter, const Scalar& value) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"hardshrink_cuda",
[&]() {
auto lambd = value.to<scalar_t>();
gpu_kernel(iter, [lambd] GPU_LAMBDA(scalar_t a) -> scalar_t {
return (a >= -lambd && a <= lambd) ? scalar_t(0) : a;
});
});
}
} // namespace
REGISTER_DISPATCH(hardshrink_stub, &hardshrink_kernel);
} // namespace at::native
| a7942adfb13dc25f3451a1fa6f9915c54b7257a2.cu | #define TORCH_ASSERT_NO_OPERATORS
#define _USE_MATH_DEFINES
#include <ATen/native/Activation.h>
#include <cmath>
#include <thrust/tuple.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/core/TensorBase.h>
#include <c10/core/Scalar.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <ATen/cuda/ApplyGridUtils.cuh>
#include <ATen/cuda/detail/OffsetCalculator.cuh>
#include <ATen/native/cuda/Loops.cuh>
namespace at::native {
namespace {
void hardshrink_kernel(TensorIteratorBase& iter, const Scalar& value) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"hardshrink_cuda",
[&]() {
auto lambd = value.to<scalar_t>();
gpu_kernel(iter, [lambd] GPU_LAMBDA(scalar_t a) -> scalar_t {
return (a >= -lambd && a <= lambd) ? scalar_t(0) : a;
});
});
}
} // namespace
REGISTER_DISPATCH(hardshrink_stub, &hardshrink_kernel);
} // namespace at::native
|
48ecc0c0527cfe169e244b192876e78d4432292e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
#else
__device__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
// Run in a single block
// Calculate bilayer COM
extern "C" __global__ void computeOrigin(
const real4* __restrict__ posq,
double* origin,
double* origin_buffer,
const int* __restrict__ particles_for_origin,
const double* __restrict__ mass_for_origin
) {
int threadIndex = threadIdx.x;
origin_buffer[threadIndex] = 0.0;
// Calculate COM
for (int index=threadIndex; index<ORIGIN_NUM_ATOMS; index+=blockDim.x) {
origin_buffer[threadIndex] += mass_for_origin[index] * posq[particles_for_origin[index]].z;
}
__syncthreads();
// Do parallel reduction for origin_buffer
for (unsigned int stride=blockDim.x/2; stride>0; stride>>=1) {
if (threadIndex < stride) {
origin_buffer[threadIndex] += origin_buffer[threadIndex + stride];
}
__syncthreads();
}
__syncthreads();
if (threadIndex == 0) {
origin[0] = origin_buffer[0] / total_mass_origin;
}
}
// Run in a single block
extern "C" __global__ void computePreFFtotal(
double* A_real_xray_current,
double* A_real_neutron_current,
double* A_complex_xray_current,
double* A_complex_neutron_current
) {
int threadIndex = threadIdx.x;
// Init A_current
// Set the A to 0.0 to avoid porblems on reduction step if blockDim.x > Nq_xray or Nq_neutron
for (int i=0; i<Nq_xray; ++i){
A_real_xray_current[i*blockDim.x + threadIndex] = 0.0;
A_complex_xray_current[i*blockDim.x + threadIndex] = 0.0;
}
for (int i=0; i<Nq_neutron; ++i){
A_real_neutron_current[i*blockDim.x + threadIndex] = 0.0;
A_complex_neutron_current[i*blockDim.x + threadIndex] = 0.0;
}
}
// Calculate form factor components for the current system
extern "C" __global__ void computeFFtotal(
const real4* __restrict__ posq,
double* origin,
const float* __restrict__ box,
const int* __restrict__ is_water,
const float* __restrict__ xray_strength,
const float* __restrict__ neutron_strength,
const char* __restrict__ atom_names,
const float* __restrict__ xray_qs,
const float* __restrict__ neutron_qs,
const float* __restrict__ cutoff,
const float* __restrict__ d_parts,
double* A_real_xray_current,
double* A_real_neutron_current,
double* A_complex_xray_current,
double* A_complex_neutron_current
) {
int threadIndex = threadIdx.x;
// Now the system itself. A_real_current, A_complex_current
for (int index=blockIdx.x * blockDim.x + threadIndex; index<Natoms; index+=blockDim.x * gridDim.x) {
// Wrap the coords of the atom
float z = posq[index].z - origin[0];
if (fabsf(z) > box[2]/2.0) {
if (z<0.0) {
z = z - box[2]*floorf((z - 0.5*(box[2]))/box[2]);
}
if (z>=0.0) {
z = z - box[2]*floorf((z + 0.5*(box[2]))/box[2]);
}
}
if (fabsf(z)<=cutoff[0]){
// X-ray
for (int i=0; i<Nq_xray; ++i){
atomicAdd(&A_real_xray_current[i*blockDim.x + threadIndex], xray_strength[index*Nq_xray + i] * cosf(xray_qs[i]*z));
atomicAdd(&A_complex_xray_current[i*blockDim.x + threadIndex], xray_strength[index*Nq_xray + i] * sinf(xray_qs[i]*z));
}
// Neutron
for (int i=0; i<Nq_neutron; ++i){
atomicAdd(&A_real_neutron_current[i*blockDim.x + threadIndex], neutron_strength[index*Nq_neutron + i] * cosf(neutron_qs[i]*z));
atomicAdd(&A_complex_neutron_current[i*blockDim.x + threadIndex], neutron_strength[index*Nq_neutron + i] * sinf(neutron_qs[i]*z));
}
}
}
}
// Run in a single block
extern "C" __global__ void computePostFFtotal(
double* A_real_xray_current,
double* A_real_neutron_current,
double* A_complex_xray_current,
double* A_complex_neutron_current,
double* A_real_xray_out,
double* A_real_neutron_out,
double* A_complex_xray_out,
double* A_complex_neutron_out,
double* A_sqr_xray_out,
double* A_sqr_neutron_out
) {
int threadIndex = threadIdx.x;
// Reduce to have one value for each q and calculate forces
// Reduce X-ray
for (int i=0; i<Nq_xray; ++i){
for (unsigned int stride=blockDim.x/2; stride>0; stride>>=1) {
if (threadIndex < stride) {
A_real_xray_current[threadIndex + i*blockDim.x] += A_real_xray_current[threadIndex + i*blockDim.x + stride];
A_complex_xray_current[threadIndex + i*blockDim.x] += A_complex_xray_current[threadIndex + i*blockDim.x + stride];
}
__syncthreads();
}
}
__syncthreads();
// We have the reduced values in A_real_xray_current[i*blockDim.x] for each i-th q value
// Now put them into the first Nq_xray elements of A_real_xray_current for a future use
for (int index=threadIndex; index<Nq_xray; index+=blockDim.x) {
A_real_xray_out[index] = A_real_xray_current[index*blockDim.x];
A_complex_xray_out[index] = A_complex_xray_current[index*blockDim.x];
A_sqr_xray_out[index] = A_real_xray_current[index*blockDim.x]*A_real_xray_current[index*blockDim.x] + A_complex_xray_current[index*blockDim.x]*A_complex_xray_current[index*blockDim.x];
}
// Reduce neutron
for (int i=0; i<Nq_neutron; ++i){
for (unsigned int stride=blockDim.x/2; stride>0; stride>>=1) {
if (threadIndex < stride) {
A_real_neutron_current[threadIndex + i*blockDim.x] += A_real_neutron_current[threadIndex + i*blockDim.x + stride];
A_complex_neutron_current[threadIndex + i*blockDim.x] += A_complex_neutron_current[threadIndex + i*blockDim.x + stride];
}
__syncthreads();
}
}
__syncthreads();
// We have the reduced values in A_real_neutron_current[i*blockDim.x] for each i-th q value
// Now put them into the first Nq_neutrin elements of A_real_neutron_current for a future use
for (int index=threadIndex; index<Nq_neutron; index+=blockDim.x) {
A_real_neutron_out[index] = A_real_neutron_current[index*blockDim.x];
A_complex_neutron_out[index] = A_complex_neutron_current[index*blockDim.x];
A_sqr_neutron_out[index] = A_real_neutron_current[index*blockDim.x]*A_real_neutron_current[index*blockDim.x] + A_complex_neutron_current[index*blockDim.x]*A_complex_neutron_current[index*blockDim.x];
}
}
// Here we precalculate B_real/B_sqr
extern "C" __global__ void computePreForce(
const real4* __restrict__ posq,
const float* __restrict__ box,
const float* __restrict__ xray_h_strength,
const float* __restrict__ xray_o_strength,
const float* __restrict__ xray_qs,
const float* __restrict__ neutron_qs,
const float* __restrict__ neutron_o_strength,
const float* __restrict__ neutron_h_strength,
const float* __restrict__ neutron_d_strength,
const double* __restrict__ w_dens,
const double* __restrict__ w_dens_sqr,
const float* __restrict__ cutoff,
const float* __restrict__ d_parts,
double* B_real_xray_current,
double* B_real_neutron_current,
double* B_sqr_xray_current,
double* B_sqr_neutron_current
) {
int threadIndex = threadIdx.x;
// Get B_real
// Xray
for (int index=threadIndex; index<Nq_xray; index+=blockDim.x) {
const float wXrayStrength = 2.0 * xray_h_strength[index] * (1.0 + (-0.48)*exp(-xray_qs[index]*xray_qs[index]/(2*0.22*0.22))) +
xray_o_strength[index] * (1.0 + 0.12*exp(-xray_qs[index]*xray_qs[index]/(2*0.22*0.22)));
B_real_xray_current[index] = 2.0 * w_dens[0] * box[0] * box[1] * wXrayStrength * sinf(xray_qs[index]*cutoff[0]) / xray_qs[index];
B_sqr_xray_current[index] = w_dens_sqr[0] * (2.0*box[0] * box[1] * wXrayStrength * sinf(xray_qs[index]*cutoff[0]) / xray_qs[index])
* (2.0*box[0] * box[1] * wXrayStrength * sinf(xray_qs[index]*cutoff[0]) / xray_qs[index]);
}
// Neutron
for (int index=threadIndex; index<Nq_neutron; index+=blockDim.x) {
const double w_neutr_scatt_streng = neutron_o_strength[0] + 2.0*(d_parts[index] * neutron_d_strength[0] + (1.0 - d_parts[index]) * neutron_h_strength[0]);
B_real_neutron_current[index] = 2.0 * w_dens[0] * box[0] * box[1] * w_neutr_scatt_streng * sinf(neutron_qs[index]*cutoff[0]) / neutron_qs[index];
B_sqr_neutron_current[index] = w_dens_sqr[0] * (2.0*box[0] * box[1] * w_neutr_scatt_streng * sinf(neutron_qs[index]*cutoff[0]) / neutron_qs[index])
* (2.0*box[0] * box[1] * w_neutr_scatt_streng * sinf(neutron_qs[index]*cutoff[0]) / neutron_qs[index]);
}
}
// Calculate force
extern "C" __global__ void computeForce(
const real4* __restrict__ posq,
const float* __restrict__ alpha,
double* origin,
const double* __restrict__ k_xray,
const double* __restrict__ k_neutron,
const double* __restrict__ T,
const float* __restrict__ box,
const int* __restrict__ is_water,
const float* __restrict__ xray_strength,
const float* __restrict__ neutron_strength,
const char* __restrict__ atom_names,
const float* __restrict__ xray_qs,
const float* __restrict__ neutron_qs,
const float* __restrict__ cutoff,
const float* __restrict__ d_parts,
double* B_real_xray_current,
double* B_real_neutron_current,
double* A_real_xray_out,
double* A_real_neutron_out,
double* A_complex_xray_out,
double* A_complex_neutron_out,
double* F_total_xray,
double* F_total_neutron,
const int* __restrict__ particles,
const double* __restrict__ F_exp_xray,
const double* __restrict__ F_exp_neutron,
const double* __restrict__ delta_F_exp_xray,
const double* __restrict__ delta_F_exp_neutron,
unsigned long long* __restrict__ forceBuffer
) {
int threadIndex = threadIdx.x;
const double const_xray = (-2.0) * alpha[0] * k_xray[0]* T[0] * (1.380658e-23 * 6.0221367e23 /1e3) * (1.0/Nq_xray); // k_Boltzmann (J*K^-1) * N_avagadro (mol^-1) * 10e-3 (J->kJ) = 0.0083144621 (kJ/(mol*K))
const double const_neutron = (-2.0) * alpha[0] * k_neutron[0]* T[0] * (1.380658e-23 * 6.0221367e23 /1e3) * (1.0/Nq_neutron); // k_Boltzmann (J*K^-1) * N_avagadro (mol^-1) * 10e-3 (J->kJ) = 0.0083144621 (kJ/(mol*K))
for (int index=blockIdx.x * blockDim.x + threadIndex; index<particles_size; index+=blockDim.x * gridDim.x) {
// Wrap the coords of the atom
float z = posq[particles[index]].z - origin[0];
if (fabsf(z) > box[2]/2.0) {
if (z<0.0) {
z = z - box[2]*floorf((z - 0.5*(box[2]))/box[2]);
}
if (z>=0.0) {
z = z - box[2]*floorf((z + 0.5*(box[2]))/box[2]);
}
}
if (fabsf(z)<=cutoff[0]){
double scatt_streng_for_force;
// Collect force for the atom number particles[index]
double force = 0.0;
// X-ray
for (int i=0; i<Nq_xray; ++i){
scatt_streng_for_force = xray_strength[particles[index]*Nq_xray + i];
force += const_xray * ((F_total_xray[i] - F_exp_xray[i]) / (delta_F_exp_xray[i]*delta_F_exp_xray[i])) *
((1.0/F_total_xray[i]) * scatt_streng_for_force * xray_qs[i] *
(-sinf(xray_qs[i]*z)*(A_real_xray_out[i] - B_real_xray_current[i]) + cosf(xray_qs[i]*z)*A_complex_xray_out[i]));
}
// Neutron
for (int i=0; i<Nq_neutron; ++i){
scatt_streng_for_force = neutron_strength[particles[index]*Nq_neutron + i];
force += const_neutron * ((F_total_neutron[i] - F_exp_neutron[(i)]) / (delta_F_exp_neutron[i]*delta_F_exp_neutron[i])) *
((1.0/F_total_neutron[i]) * scatt_streng_for_force * neutron_qs[i] *
(-sinf(neutron_qs[i]*z)*(A_real_neutron_out[i] - B_real_neutron_current[i]) + cosf(neutron_qs[i]*z)*A_complex_neutron_out[i]));
}
// Add the result
atomicAdd(&forceBuffer[particles[index]+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long)(force*0x100000000)));
}
}
}
// Calculate energy
extern "C" __global__ void computeEnergy(
const float* __restrict__ alpha,
float* energy_buffer,
const double* __restrict__ k_xray,
const double* __restrict__ k_neutron,
const double* __restrict__ T,
double* F_total_xray,
double* F_total_neutron,
const double* __restrict__ F_exp_xray,
const double* __restrict__ F_exp_neutron,
const double* __restrict__ delta_F_exp_xray,
const double* __restrict__ delta_F_exp_neutron,
real* __restrict__ energyBuffer
) {
int threadIndex = threadIdx.x;
// Compute energy
// Zero out
energy_buffer[threadIndex] = 0.0;
for (int index=threadIndex; index<Nq_xray; index+=blockDim.x) {// k_B (J*K^-1) * N_avagadro (mol^-1) * 10e-3 (J->kJ) = 0.0083144621 (kJ/(mol*K))
energy_buffer[threadIndex] += alpha[0] * k_xray[0]* T[0] * (1.380658e-23 * 6.0221367e23 /1e3) * (1.0/Nq_xray) * ((F_total_xray[index] - F_exp_xray[index])*(F_total_xray[index] - F_exp_xray[index]) / (delta_F_exp_xray[index]*delta_F_exp_xray[index]));
}
__syncthreads();
for (int index=threadIndex; index<Nq_neutron; index+=blockDim.x) {// k_B (J*K^-1) * N_avagadro (mol^-1) * 10e-3 (J->kJ) = 0.0083144621 (kJ/(mol*K))
energy_buffer[threadIndex] += alpha[0] * k_neutron[0]* T[0] * (1.380658e-23 * 6.0221367e23 /1e3) * (1.0/Nq_neutron) * ((F_total_neutron[index] - F_exp_neutron[index])*(F_total_neutron[index] - F_exp_neutron[index]) / (delta_F_exp_neutron[index]*delta_F_exp_neutron[index]));
}
__syncthreads();
// Reduce energy
for (unsigned int stride=blockDim.x/2; stride>0; stride>>=1) {
if (threadIndex < stride) {
energy_buffer[threadIndex] += energy_buffer[threadIndex + stride];
}
__syncthreads();
}
if (threadIndex == 0) {
energyBuffer[0] += energy_buffer[0];
}
}
// This kernel is used if we set on_gpu flag to true for updateParametersInContext(context, on_gpu)
// If on_gpu is set to false we do the form factor calculation on the host
extern "C" __global__ void computeGlobalFtotal(
const double* __restrict__ A_real_xray_out,
const double* __restrict__ A_complex_xray_out,
const double* __restrict__ A_sqr_xray_out,
const double* __restrict__ A_real_neutron_out,
const double* __restrict__ A_complex_neutron_out,
const double* __restrict__ A_sqr_neutron_out,
const double* __restrict__ B_real_xray_global,
const double* __restrict__ B_sqr_xray_global,
const double* __restrict__ B_real_neutron_global,
const double* __restrict__ B_sqr_neutron_global,
double* F_total_xray,
double* F_total_neutron
) {
int threadIndex = threadIdx.x;
for (int index=blockIdx.x * blockDim.x + threadIndex; index<Nq_xray; index+=blockDim.x * gridDim.x) {
double intens = A_real_xray_out[index]*A_real_xray_out[index] + A_complex_xray_out[index]*A_complex_xray_out[index]
+ B_real_xray_global[index]*B_real_xray_global[index] - 2.0*A_real_xray_out[index]*B_real_xray_global[index]
+ A_sqr_xray_out[index] - A_real_xray_out[index]*A_real_xray_out[index] - A_complex_xray_out[index]*A_complex_xray_out[index]
- B_sqr_xray_global[index] + B_real_xray_global[index]*B_real_xray_global[index];
F_total_xray[index] = copysignf(1.0, intens)*sqrt(fabs(intens));
}
for (int index=blockIdx.x * blockDim.x + threadIndex; index<Nq_neutron; index+=blockDim.x * gridDim.x) {
double intens = A_real_neutron_out[index]*A_real_neutron_out[index] + A_complex_neutron_out[index]*A_complex_neutron_out[index]
+ B_real_neutron_global[index]*B_real_neutron_global[index] - 2.0*A_real_neutron_out[index]*B_real_neutron_global[index]
+ A_sqr_neutron_out[index] - A_real_neutron_out[index]*A_real_neutron_out[index] - A_complex_neutron_out[index]*A_complex_neutron_out[index]
- B_sqr_neutron_global[index] + B_real_neutron_global[index]*B_real_neutron_global[index];
F_total_neutron[index] = copysignf(1.0, intens)*sqrt(fabs(intens));
}
__syncthreads();
}
| 48ecc0c0527cfe169e244b192876e78d4432292e.cu | #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
#else
__device__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
// Run in a single block
// Calculate bilayer COM
extern "C" __global__ void computeOrigin(
const real4* __restrict__ posq,
double* origin,
double* origin_buffer,
const int* __restrict__ particles_for_origin,
const double* __restrict__ mass_for_origin
) {
int threadIndex = threadIdx.x;
origin_buffer[threadIndex] = 0.0;
// Calculate COM
for (int index=threadIndex; index<ORIGIN_NUM_ATOMS; index+=blockDim.x) {
origin_buffer[threadIndex] += mass_for_origin[index] * posq[particles_for_origin[index]].z;
}
__syncthreads();
// Do parallel reduction for origin_buffer
for (unsigned int stride=blockDim.x/2; stride>0; stride>>=1) {
if (threadIndex < stride) {
origin_buffer[threadIndex] += origin_buffer[threadIndex + stride];
}
__syncthreads();
}
__syncthreads();
if (threadIndex == 0) {
origin[0] = origin_buffer[0] / total_mass_origin;
}
}
// Run in a single block
extern "C" __global__ void computePreFFtotal(
double* A_real_xray_current,
double* A_real_neutron_current,
double* A_complex_xray_current,
double* A_complex_neutron_current
) {
int threadIndex = threadIdx.x;
// Init A_current
// Set the A to 0.0 to avoid porblems on reduction step if blockDim.x > Nq_xray or Nq_neutron
for (int i=0; i<Nq_xray; ++i){
A_real_xray_current[i*blockDim.x + threadIndex] = 0.0;
A_complex_xray_current[i*blockDim.x + threadIndex] = 0.0;
}
for (int i=0; i<Nq_neutron; ++i){
A_real_neutron_current[i*blockDim.x + threadIndex] = 0.0;
A_complex_neutron_current[i*blockDim.x + threadIndex] = 0.0;
}
}
// Calculate form factor components for the current system
extern "C" __global__ void computeFFtotal(
const real4* __restrict__ posq,
double* origin,
const float* __restrict__ box,
const int* __restrict__ is_water,
const float* __restrict__ xray_strength,
const float* __restrict__ neutron_strength,
const char* __restrict__ atom_names,
const float* __restrict__ xray_qs,
const float* __restrict__ neutron_qs,
const float* __restrict__ cutoff,
const float* __restrict__ d_parts,
double* A_real_xray_current,
double* A_real_neutron_current,
double* A_complex_xray_current,
double* A_complex_neutron_current
) {
int threadIndex = threadIdx.x;
// Now the system itself. A_real_current, A_complex_current
for (int index=blockIdx.x * blockDim.x + threadIndex; index<Natoms; index+=blockDim.x * gridDim.x) {
// Wrap the coords of the atom
float z = posq[index].z - origin[0];
if (fabsf(z) > box[2]/2.0) {
if (z<0.0) {
z = z - box[2]*floorf((z - 0.5*(box[2]))/box[2]);
}
if (z>=0.0) {
z = z - box[2]*floorf((z + 0.5*(box[2]))/box[2]);
}
}
if (fabsf(z)<=cutoff[0]){
// X-ray
for (int i=0; i<Nq_xray; ++i){
atomicAdd(&A_real_xray_current[i*blockDim.x + threadIndex], xray_strength[index*Nq_xray + i] * cosf(xray_qs[i]*z));
atomicAdd(&A_complex_xray_current[i*blockDim.x + threadIndex], xray_strength[index*Nq_xray + i] * sinf(xray_qs[i]*z));
}
// Neutron
for (int i=0; i<Nq_neutron; ++i){
atomicAdd(&A_real_neutron_current[i*blockDim.x + threadIndex], neutron_strength[index*Nq_neutron + i] * cosf(neutron_qs[i]*z));
atomicAdd(&A_complex_neutron_current[i*blockDim.x + threadIndex], neutron_strength[index*Nq_neutron + i] * sinf(neutron_qs[i]*z));
}
}
}
}
// Run in a single block
extern "C" __global__ void computePostFFtotal(
double* A_real_xray_current,
double* A_real_neutron_current,
double* A_complex_xray_current,
double* A_complex_neutron_current,
double* A_real_xray_out,
double* A_real_neutron_out,
double* A_complex_xray_out,
double* A_complex_neutron_out,
double* A_sqr_xray_out,
double* A_sqr_neutron_out
) {
int threadIndex = threadIdx.x;
// Reduce to have one value for each q and calculate forces
// Reduce X-ray
for (int i=0; i<Nq_xray; ++i){
for (unsigned int stride=blockDim.x/2; stride>0; stride>>=1) {
if (threadIndex < stride) {
A_real_xray_current[threadIndex + i*blockDim.x] += A_real_xray_current[threadIndex + i*blockDim.x + stride];
A_complex_xray_current[threadIndex + i*blockDim.x] += A_complex_xray_current[threadIndex + i*blockDim.x + stride];
}
__syncthreads();
}
}
__syncthreads();
// We have the reduced values in A_real_xray_current[i*blockDim.x] for each i-th q value
// Now put them into the first Nq_xray elements of A_real_xray_current for a future use
for (int index=threadIndex; index<Nq_xray; index+=blockDim.x) {
A_real_xray_out[index] = A_real_xray_current[index*blockDim.x];
A_complex_xray_out[index] = A_complex_xray_current[index*blockDim.x];
A_sqr_xray_out[index] = A_real_xray_current[index*blockDim.x]*A_real_xray_current[index*blockDim.x] + A_complex_xray_current[index*blockDim.x]*A_complex_xray_current[index*blockDim.x];
}
// Reduce neutron
for (int i=0; i<Nq_neutron; ++i){
for (unsigned int stride=blockDim.x/2; stride>0; stride>>=1) {
if (threadIndex < stride) {
A_real_neutron_current[threadIndex + i*blockDim.x] += A_real_neutron_current[threadIndex + i*blockDim.x + stride];
A_complex_neutron_current[threadIndex + i*blockDim.x] += A_complex_neutron_current[threadIndex + i*blockDim.x + stride];
}
__syncthreads();
}
}
__syncthreads();
// We have the reduced values in A_real_neutron_current[i*blockDim.x] for each i-th q value
// Now put them into the first Nq_neutrin elements of A_real_neutron_current for a future use
for (int index=threadIndex; index<Nq_neutron; index+=blockDim.x) {
A_real_neutron_out[index] = A_real_neutron_current[index*blockDim.x];
A_complex_neutron_out[index] = A_complex_neutron_current[index*blockDim.x];
A_sqr_neutron_out[index] = A_real_neutron_current[index*blockDim.x]*A_real_neutron_current[index*blockDim.x] + A_complex_neutron_current[index*blockDim.x]*A_complex_neutron_current[index*blockDim.x];
}
}
// Here we precalculate B_real/B_sqr
extern "C" __global__ void computePreForce(
const real4* __restrict__ posq,
const float* __restrict__ box,
const float* __restrict__ xray_h_strength,
const float* __restrict__ xray_o_strength,
const float* __restrict__ xray_qs,
const float* __restrict__ neutron_qs,
const float* __restrict__ neutron_o_strength,
const float* __restrict__ neutron_h_strength,
const float* __restrict__ neutron_d_strength,
const double* __restrict__ w_dens,
const double* __restrict__ w_dens_sqr,
const float* __restrict__ cutoff,
const float* __restrict__ d_parts,
double* B_real_xray_current,
double* B_real_neutron_current,
double* B_sqr_xray_current,
double* B_sqr_neutron_current
) {
int threadIndex = threadIdx.x;
// Get B_real
// Xray
for (int index=threadIndex; index<Nq_xray; index+=blockDim.x) {
const float wXrayStrength = 2.0 * xray_h_strength[index] * (1.0 + (-0.48)*exp(-xray_qs[index]*xray_qs[index]/(2*0.22*0.22))) +
xray_o_strength[index] * (1.0 + 0.12*exp(-xray_qs[index]*xray_qs[index]/(2*0.22*0.22)));
B_real_xray_current[index] = 2.0 * w_dens[0] * box[0] * box[1] * wXrayStrength * sinf(xray_qs[index]*cutoff[0]) / xray_qs[index];
B_sqr_xray_current[index] = w_dens_sqr[0] * (2.0*box[0] * box[1] * wXrayStrength * sinf(xray_qs[index]*cutoff[0]) / xray_qs[index])
* (2.0*box[0] * box[1] * wXrayStrength * sinf(xray_qs[index]*cutoff[0]) / xray_qs[index]);
}
// Neutron
for (int index=threadIndex; index<Nq_neutron; index+=blockDim.x) {
const double w_neutr_scatt_streng = neutron_o_strength[0] + 2.0*(d_parts[index] * neutron_d_strength[0] + (1.0 - d_parts[index]) * neutron_h_strength[0]);
B_real_neutron_current[index] = 2.0 * w_dens[0] * box[0] * box[1] * w_neutr_scatt_streng * sinf(neutron_qs[index]*cutoff[0]) / neutron_qs[index];
B_sqr_neutron_current[index] = w_dens_sqr[0] * (2.0*box[0] * box[1] * w_neutr_scatt_streng * sinf(neutron_qs[index]*cutoff[0]) / neutron_qs[index])
* (2.0*box[0] * box[1] * w_neutr_scatt_streng * sinf(neutron_qs[index]*cutoff[0]) / neutron_qs[index]);
}
}
// Calculate force
extern "C" __global__ void computeForce(
const real4* __restrict__ posq,
const float* __restrict__ alpha,
double* origin,
const double* __restrict__ k_xray,
const double* __restrict__ k_neutron,
const double* __restrict__ T,
const float* __restrict__ box,
const int* __restrict__ is_water,
const float* __restrict__ xray_strength,
const float* __restrict__ neutron_strength,
const char* __restrict__ atom_names,
const float* __restrict__ xray_qs,
const float* __restrict__ neutron_qs,
const float* __restrict__ cutoff,
const float* __restrict__ d_parts,
double* B_real_xray_current,
double* B_real_neutron_current,
double* A_real_xray_out,
double* A_real_neutron_out,
double* A_complex_xray_out,
double* A_complex_neutron_out,
double* F_total_xray,
double* F_total_neutron,
const int* __restrict__ particles,
const double* __restrict__ F_exp_xray,
const double* __restrict__ F_exp_neutron,
const double* __restrict__ delta_F_exp_xray,
const double* __restrict__ delta_F_exp_neutron,
unsigned long long* __restrict__ forceBuffer
) {
int threadIndex = threadIdx.x;
const double const_xray = (-2.0) * alpha[0] * k_xray[0]* T[0] * (1.380658e-23 * 6.0221367e23 /1e3) * (1.0/Nq_xray); // k_Boltzmann (J*K^-1) * N_avagadro (mol^-1) * 10e-3 (J->kJ) = 0.0083144621 (kJ/(mol*K))
const double const_neutron = (-2.0) * alpha[0] * k_neutron[0]* T[0] * (1.380658e-23 * 6.0221367e23 /1e3) * (1.0/Nq_neutron); // k_Boltzmann (J*K^-1) * N_avagadro (mol^-1) * 10e-3 (J->kJ) = 0.0083144621 (kJ/(mol*K))
for (int index=blockIdx.x * blockDim.x + threadIndex; index<particles_size; index+=blockDim.x * gridDim.x) {
// Wrap the coords of the atom
float z = posq[particles[index]].z - origin[0];
if (fabsf(z) > box[2]/2.0) {
if (z<0.0) {
z = z - box[2]*floorf((z - 0.5*(box[2]))/box[2]);
}
if (z>=0.0) {
z = z - box[2]*floorf((z + 0.5*(box[2]))/box[2]);
}
}
if (fabsf(z)<=cutoff[0]){
double scatt_streng_for_force;
// Collect force for the atom number particles[index]
double force = 0.0;
// X-ray
for (int i=0; i<Nq_xray; ++i){
scatt_streng_for_force = xray_strength[particles[index]*Nq_xray + i];
force += const_xray * ((F_total_xray[i] - F_exp_xray[i]) / (delta_F_exp_xray[i]*delta_F_exp_xray[i])) *
((1.0/F_total_xray[i]) * scatt_streng_for_force * xray_qs[i] *
(-sinf(xray_qs[i]*z)*(A_real_xray_out[i] - B_real_xray_current[i]) + cosf(xray_qs[i]*z)*A_complex_xray_out[i]));
}
// Neutron
for (int i=0; i<Nq_neutron; ++i){
scatt_streng_for_force = neutron_strength[particles[index]*Nq_neutron + i];
force += const_neutron * ((F_total_neutron[i] - F_exp_neutron[(i)]) / (delta_F_exp_neutron[i]*delta_F_exp_neutron[i])) *
((1.0/F_total_neutron[i]) * scatt_streng_for_force * neutron_qs[i] *
(-sinf(neutron_qs[i]*z)*(A_real_neutron_out[i] - B_real_neutron_current[i]) + cosf(neutron_qs[i]*z)*A_complex_neutron_out[i]));
}
// Add the result
atomicAdd(&forceBuffer[particles[index]+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long)(force*0x100000000)));
}
}
}
// Calculate energy
extern "C" __global__ void computeEnergy(
const float* __restrict__ alpha,
float* energy_buffer,
const double* __restrict__ k_xray,
const double* __restrict__ k_neutron,
const double* __restrict__ T,
double* F_total_xray,
double* F_total_neutron,
const double* __restrict__ F_exp_xray,
const double* __restrict__ F_exp_neutron,
const double* __restrict__ delta_F_exp_xray,
const double* __restrict__ delta_F_exp_neutron,
real* __restrict__ energyBuffer
) {
int threadIndex = threadIdx.x;
// Compute energy
// Zero out
energy_buffer[threadIndex] = 0.0;
for (int index=threadIndex; index<Nq_xray; index+=blockDim.x) {// k_B (J*K^-1) * N_avagadro (mol^-1) * 10e-3 (J->kJ) = 0.0083144621 (kJ/(mol*K))
energy_buffer[threadIndex] += alpha[0] * k_xray[0]* T[0] * (1.380658e-23 * 6.0221367e23 /1e3) * (1.0/Nq_xray) * ((F_total_xray[index] - F_exp_xray[index])*(F_total_xray[index] - F_exp_xray[index]) / (delta_F_exp_xray[index]*delta_F_exp_xray[index]));
}
__syncthreads();
for (int index=threadIndex; index<Nq_neutron; index+=blockDim.x) {// k_B (J*K^-1) * N_avagadro (mol^-1) * 10e-3 (J->kJ) = 0.0083144621 (kJ/(mol*K))
energy_buffer[threadIndex] += alpha[0] * k_neutron[0]* T[0] * (1.380658e-23 * 6.0221367e23 /1e3) * (1.0/Nq_neutron) * ((F_total_neutron[index] - F_exp_neutron[index])*(F_total_neutron[index] - F_exp_neutron[index]) / (delta_F_exp_neutron[index]*delta_F_exp_neutron[index]));
}
__syncthreads();
// Reduce energy
for (unsigned int stride=blockDim.x/2; stride>0; stride>>=1) {
if (threadIndex < stride) {
energy_buffer[threadIndex] += energy_buffer[threadIndex + stride];
}
__syncthreads();
}
if (threadIndex == 0) {
energyBuffer[0] += energy_buffer[0];
}
}
// This kernel is used if we set on_gpu flag to true for updateParametersInContext(context, on_gpu)
// If on_gpu is set to false we do the form factor calculation on the host
extern "C" __global__ void computeGlobalFtotal(
const double* __restrict__ A_real_xray_out,
const double* __restrict__ A_complex_xray_out,
const double* __restrict__ A_sqr_xray_out,
const double* __restrict__ A_real_neutron_out,
const double* __restrict__ A_complex_neutron_out,
const double* __restrict__ A_sqr_neutron_out,
const double* __restrict__ B_real_xray_global,
const double* __restrict__ B_sqr_xray_global,
const double* __restrict__ B_real_neutron_global,
const double* __restrict__ B_sqr_neutron_global,
double* F_total_xray,
double* F_total_neutron
) {
int threadIndex = threadIdx.x;
for (int index=blockIdx.x * blockDim.x + threadIndex; index<Nq_xray; index+=blockDim.x * gridDim.x) {
double intens = A_real_xray_out[index]*A_real_xray_out[index] + A_complex_xray_out[index]*A_complex_xray_out[index]
+ B_real_xray_global[index]*B_real_xray_global[index] - 2.0*A_real_xray_out[index]*B_real_xray_global[index]
+ A_sqr_xray_out[index] - A_real_xray_out[index]*A_real_xray_out[index] - A_complex_xray_out[index]*A_complex_xray_out[index]
- B_sqr_xray_global[index] + B_real_xray_global[index]*B_real_xray_global[index];
F_total_xray[index] = copysignf(1.0, intens)*sqrt(fabs(intens));
}
for (int index=blockIdx.x * blockDim.x + threadIndex; index<Nq_neutron; index+=blockDim.x * gridDim.x) {
double intens = A_real_neutron_out[index]*A_real_neutron_out[index] + A_complex_neutron_out[index]*A_complex_neutron_out[index]
+ B_real_neutron_global[index]*B_real_neutron_global[index] - 2.0*A_real_neutron_out[index]*B_real_neutron_global[index]
+ A_sqr_neutron_out[index] - A_real_neutron_out[index]*A_real_neutron_out[index] - A_complex_neutron_out[index]*A_complex_neutron_out[index]
- B_sqr_neutron_global[index] + B_real_neutron_global[index]*B_real_neutron_global[index];
F_total_neutron[index] = copysignf(1.0, intens)*sqrt(fabs(intens));
}
__syncthreads();
}
|
2d9b2dd43c36e2bf3520dd5855585d7aee42bb24.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gpu_hog.hpp"
__constant__ const float gpu_boundary_x[NUM_SECTOR + 1] = {1.00000000, 0.939692616, 0.766044438, 0.499999970, 0.173648104,
-0.173648298, -0.500000060, -0.766044617, -0.939692676, -1.00000000};
__constant__ const float gpu_boundary_y[NUM_SECTOR + 1] = {0.000000000, 0.342020154, 0.642787635, 0.866025448, 0.984807789,
0.984807730, 0.866025388, 0.642787457, 0.342020005, -8.74227766e-008};
__constant__ int gpu_nearest[4] = {-1,-1,1,1};
__constant__ float gpu_w[8] = {0.625, 0.375, 0.875, 0.125,
0.875, 0.125 , 0.625 , 0.375};
__global__ void GetmapofHOG(uchar* in, int width, int height, int channel, float *map, int numFeatures, int stringSize){
int k = 4;
int i = blockIdx.y;
int ii = threadIdx.y;
int jj = threadIdx.x%k;
int j = blockIdx.x*k+threadIdx.x/k;//x->j
int dx = j*k+jj;//blockIdx.x*blockDim.x+threadIdx.x;//j * k + jj;
int dy = i*k+ii;//i * k + ii;
int alfa0,alfa1;
int offset = (dx+dy*width)*channel;
int dw = width*channel;
float x2,y2,sqrt2;
float x3,y3,sqrt3;
float x,y,tmp_sqrt;
if (dy > 0 && dy < height - 1 && dx > 0 && dx < width - 1)
{
x = in[offset+3] - in[offset-3];y = in[offset+dw] - in[offset-dw];
tmp_sqrt = sqrtf(x * x + y * y);
x2 = in[offset+4] - in[offset-2];y2 = in[offset+1+dw] - in[offset+1-dw];
sqrt2 = sqrtf(x2 * x2 + y2 * y2);
x3 = in[offset+5] - in[offset-1];y3 = in[offset+2+dw] - in[offset+2-dw];
sqrt3 = sqrtf(x3 * x3 + y3 * y3);
if(sqrt2>tmp_sqrt){
tmp_sqrt = sqrt2;
x = x2;y = y2;
}
if(sqrt3>tmp_sqrt){
tmp_sqrt = sqrt3;
x = x3;y = y3;
}
float Gmax = gpu_boundary_x[0] * x + gpu_boundary_y[0] * y;
int Gmaxi = 0;
float dotProd;
for (int kk = 0; kk < NUM_SECTOR; kk++)
{
dotProd = gpu_boundary_x[kk] * x + gpu_boundary_y[kk] * y;
if (dotProd > Gmax)
{
Gmax = dotProd;
Gmaxi = kk;
}
else
{
if (-dotProd > Gmax)
{
Gmax = -dotProd;
Gmaxi = kk + NUM_SECTOR;
}
}
}
alfa0 = Gmaxi % NUM_SECTOR;
alfa1 = Gmaxi + NUM_SECTOR;
float rd = tmp_sqrt;
float *mapoffset = map+i * stringSize + j * numFeatures;
int ns = gpu_nearest[ii] * stringSize;
int nn = gpu_nearest[jj] * numFeatures;
float tmp0 = rd * gpu_w[ii * 2] * gpu_w[jj * 2];
atomicAdd(mapoffset + alfa0,tmp0);
atomicAdd(mapoffset + alfa1,tmp0);
int flagi = i + gpu_nearest[ii];
int flagj = j + gpu_nearest[jj];
if ((flagi >= 0) && (flagi <= gridDim.y - 1))
{
tmp0 = rd * gpu_w[ii * 2 + 1] * gpu_w[jj * 2 ];
atomicAdd(mapoffset + ns + alfa0,tmp0);
atomicAdd(mapoffset + ns + alfa1,tmp0);
}
if ((flagj >= 0) && (flagj <= width/4 - 1))
{
tmp0 = rd * gpu_w[ii * 2] * gpu_w[jj * 2 + 1];
atomicAdd(mapoffset + nn + alfa0,tmp0);
atomicAdd(mapoffset + nn + alfa1,tmp0);
}
if ((flagi >= 0) &&
(flagi <= gridDim.y - 1) &&
(flagj >= 0) &&
(flagj <= width/4 - 1))
{
tmp0 = rd * gpu_w[ii * 2 + 1] * gpu_w[jj * 2 + 1];
atomicAdd(mapoffset + ns + nn + alfa0,tmp0);
atomicAdd(mapoffset + ns + nn + alfa1,tmp0);
}
}
}
__global__ void getpartOfNorm(float *partOfNorm, float *map, int sizeX){
int p = NUM_SECTOR;
int jj = threadIdx.x%p;//for(ii = 0; ii < 2 * p; ii++)
int i = blockIdx.y;//(i = 1; i <= sizeY; i++)
int djj = threadIdx.x/p;
int j = blockIdx.x*7+djj;//(j = 0; j < sizeX; j++)
int pos1, pos2;
__shared__ float val_vec[63];
pos2 = i*sizeX+j;
pos1 = pos2*3*p;
float tmp = map[pos1 + jj];
int readset = 9*djj;
int tmpoffset = readset+jj;
val_vec[tmpoffset] = tmp*tmp;
__syncthreads();
if(jj<4) val_vec[tmpoffset] += val_vec[tmpoffset+4];
__syncthreads();
if(jj<2) val_vec[tmpoffset] += val_vec[tmpoffset+2];
__syncthreads();
if(jj==2){
float val = val_vec[readset] + val_vec[readset+1] + val_vec[readset+8];
partOfNorm[pos2] = val;
}
}
__global__ void PCANTFeatureMaps(float *partOfNorm, float *map, float *newData, int sizeX, int xp0){
int jj = threadIdx.x%18;//for(ii = 0; ii < 2 * p; ii++)
int i = blockIdx.y;//(i = 1; i <= sizeY; i++)
int djj = threadIdx.x/18;
int j = blockIdx.x*5+djj;//(j = 0; j < sizeX; j++)
int i1 = i+1;
int j1 = j+1;
float valOfNorm;
int pos01, pos2;
int p = NUM_SECTOR;
__shared__ float val_vec[640];
int p_partOfNorm = (i1 )*(sizeX + 2) + (j1 );
float p00 = partOfNorm[p_partOfNorm];
float p01 = partOfNorm[p_partOfNorm+1];
float p0i = partOfNorm[p_partOfNorm-1];
float p10 = partOfNorm[p_partOfNorm+sizeX + 2];
float pi0 = partOfNorm[p_partOfNorm-sizeX - 2];
pos01 = p_partOfNorm * xp0 + jj;
float map1 = map[pos01 ] ;
float map2 = map[pos01 + p] ;
float nD0, nD4, nD1, nD6, nD2, nD8, nD3, nD10;
float nDMax = 0.2f;float tmp;
valOfNorm = sqrtf(p00+p01+p10+partOfNorm[p_partOfNorm+sizeX+3]) + FLT_EPSILON;
if(jj<NUM_SECTOR) {tmp = fdividef(map1, valOfNorm);nD0 = tmp>nDMax?nDMax:tmp;}
tmp = fdividef(map2, valOfNorm);nD4 = tmp>nDMax?nDMax:tmp;
int tmpoffset = djj*32+jj;
val_vec[tmpoffset] = nD4;
valOfNorm = sqrtf(p00+p01+pi0+partOfNorm[p_partOfNorm-sizeX-1]) + FLT_EPSILON;
if(jj<NUM_SECTOR) {tmp = fdividef(map1, valOfNorm);nD1 = tmp>nDMax?nDMax:tmp;}
tmp = fdividef(map2, valOfNorm);nD6 = tmp>nDMax?nDMax:tmp;
val_vec[160+tmpoffset] = nD6;
valOfNorm = sqrtf(p00+p0i+p10+partOfNorm[p_partOfNorm+sizeX+1]) + FLT_EPSILON;
if(jj<NUM_SECTOR) {tmp = fdividef(map1, valOfNorm);nD2 = tmp>nDMax?nDMax:tmp;}
tmp = fdividef(map2, valOfNorm);nD8 = tmp>nDMax?nDMax:tmp;
val_vec[320+tmpoffset] = nD8;
valOfNorm = sqrtf(p00+p0i+pi0+partOfNorm[p_partOfNorm-sizeX-3]) + FLT_EPSILON;
if(jj<NUM_SECTOR) {tmp = fdividef(map1, valOfNorm);nD3 = tmp>nDMax?nDMax:tmp;}
tmp = fdividef(map2, valOfNorm);nD10 = tmp>nDMax?nDMax:tmp;
val_vec[480+tmpoffset] = nD10;
int pp2 = NUM_SECTOR * 3 + 4;
int yp = 4;
int xp = NUM_SECTOR;
int k=0;
float val = 0.0f;
pos2 = ((i)*sizeX + j)*pp2;
k = jj;
val = nD4+nD6+nD8+nD10;
newData[pos2 + k]= val * 0.5;
if(jj< xp)
{
k = xp * 2 + jj;
val = nD0+nD1+nD2+nD3;
newData[pos2 + k]= val * 0.5;
}
k = xp * 3;
__syncthreads();
int readset = djj*32;
for(int ii=0;ii<yp;ii++){
if(jj<9) val_vec[tmpoffset] += val_vec[tmpoffset+9];
__syncthreads();
if(jj<4) val_vec[tmpoffset] += val_vec[tmpoffset+4];
__syncthreads();
if(jj<2) val_vec[tmpoffset] += val_vec[tmpoffset+2];
__syncthreads();
if(jj==0){
val = val_vec[readset] + val_vec[readset+1] + val_vec[readset+8];
newData[pos2 + k]=val * 0.2357226;
}
k++;
tmpoffset += 160;
readset += 160;
}
}
__global__ void MultiGetmapofHOGPitch(uchar* in0, int width, int height, int channel,
float *map0, int numFeatures, int stringSize, int pitch_in, int pitch_map){
int k = 4;
int i = blockIdx.y;
int ii = threadIdx.y;
int jj = threadIdx.x%k;
int j = blockIdx.x*k+threadIdx.x/k;//x->j
int dx = j*k+jj;//blockIdx.x*blockDim.x+threadIdx.x;//j * k + jj;
int dy = i*k+ii;//i * k + ii;
int alfa0,alfa1;
int offset = (dx+dy*width)*channel;
int dw = width*channel;
float x2,y2,sqrt2;
float x3,y3,sqrt3;
float x,y,tmp_sqrt;
uchar *in = in0+blockIdx.z*pitch_in;
float *map = map0+blockIdx.z*pitch_map;
if (dy > 0 && dy < height - 1 && dx > 0 && dx < width - 1)
{
x = in[offset+3] - in[offset-3];y = in[offset+dw] - in[offset-dw];
tmp_sqrt = sqrtf(x * x + y * y);
x2 = in[offset+4] - in[offset-2];y2 = in[offset+1+dw] - in[offset+1-dw];
sqrt2 = sqrtf(x2 * x2 + y2 * y2);
x3 = in[offset+5] - in[offset-1];y3 = in[offset+2+dw] - in[offset+2-dw];
sqrt3 = sqrtf(x3 * x3 + y3 * y3);
if(sqrt2>tmp_sqrt){
tmp_sqrt = sqrt2;
x = x2;y = y2;
}
if(sqrt3>tmp_sqrt){
tmp_sqrt = sqrt3;
x = x3;y = y3;
}
float Gmax = gpu_boundary_x[0] * x + gpu_boundary_y[0] * y;
int Gmaxi = 0;
float dotProd;
for (int kk = 0; kk < NUM_SECTOR; kk++)
{
dotProd = gpu_boundary_x[kk] * x + gpu_boundary_y[kk] * y;
if (dotProd > Gmax)
{
Gmax = dotProd;
Gmaxi = kk;
}
else
{
if (-dotProd > Gmax)
{
Gmax = -dotProd;
Gmaxi = kk + NUM_SECTOR;
}
}
}
alfa0 = Gmaxi % NUM_SECTOR;
alfa1 = Gmaxi + NUM_SECTOR;
float rd = tmp_sqrt;
float *mapoffset = map+i * stringSize + j * numFeatures;
int ns = gpu_nearest[ii] * stringSize;
int nn = gpu_nearest[jj] * numFeatures;
float tmp0 = rd * gpu_w[ii * 2] * gpu_w[jj * 2];
atomicAdd(mapoffset + alfa0,tmp0);
atomicAdd(mapoffset + alfa1,tmp0);
int flagi = i + gpu_nearest[ii];
int flagj = j + gpu_nearest[jj];
if ((flagi >= 0) && (flagi <= gridDim.y - 1))
{
tmp0 = rd * gpu_w[ii * 2 + 1] * gpu_w[jj * 2 ];
atomicAdd(mapoffset + ns + alfa0,tmp0);
atomicAdd(mapoffset + ns + alfa1,tmp0);
}
if ((flagj >= 0) && (flagj <= width/4 - 1))
{
tmp0 = rd * gpu_w[ii * 2] * gpu_w[jj * 2 + 1];
atomicAdd(mapoffset + nn + alfa0,tmp0);
atomicAdd(mapoffset + nn + alfa1,tmp0);
}
if ((flagi >= 0) &&
(flagi <= gridDim.y - 1) &&
(flagj >= 0) &&
(flagj <= width/4 - 1))
{
tmp0 = rd * gpu_w[ii * 2 + 1] * gpu_w[jj * 2 + 1];
atomicAdd(mapoffset + ns + nn + alfa0,tmp0);
atomicAdd(mapoffset + ns + nn + alfa1,tmp0);
}
}
}
__global__ void MultigetpartOfNormPitch(float *partOfNorm, float *map, int sizeX,
int pitch_partOfNorm, int pitch_map){
int p = NUM_SECTOR;
int jj = threadIdx.x%p;//for(ii = 0; ii < 2 * p; ii++)
int i = blockIdx.y;//(i = 1; i <= sizeY; i++)
int djj = threadIdx.x/p;
int j = blockIdx.x*7+djj;//(j = 0; j < sizeX; j++)
int pos1, pos2;
__shared__ float val_vec[63];
pos2 = i*sizeX+j + pitch_partOfNorm*blockIdx.z;
pos1 = (i*sizeX+j)*3*p + pitch_map*blockIdx.z;
float tmp = map[pos1 + jj];
int readset = 9*djj;
int tmpoffset = readset+jj;
val_vec[tmpoffset] = tmp*tmp;
__syncthreads();
if(jj<4) val_vec[tmpoffset] += val_vec[tmpoffset+4];
__syncthreads();
if(jj<2) val_vec[tmpoffset] += val_vec[tmpoffset+2];
__syncthreads();
if(jj==2){
float val = val_vec[readset] + val_vec[readset+1] + val_vec[readset+8];
partOfNorm[pos2] = val;
}
}
__global__ void MultiPCANTFeatureMapsPitch(float *partOfNorm, float *map, float *newData, int sizeX, int xp0,
int pitch_partOfNorm, int pitch_map){
int jj = threadIdx.x%18;//for(ii = 0; ii < 2 * p; ii++)
int i = blockIdx.y;//(i = 1; i <= sizeY; i++)
int djj = threadIdx.x/18;
int j = blockIdx.x*5+djj;//(j = 0; j < sizeX; j++)
int i1 = i+1;
int j1 = j+1;
float valOfNorm;
int pos01, pos2;
int p = NUM_SECTOR;
__shared__ float val_vec[640];
int p_partOfNorm = (i1 )*(sizeX + 2) + (j1 )+ pitch_partOfNorm*blockIdx.z;
float p00 = partOfNorm[p_partOfNorm];
float p01 = partOfNorm[p_partOfNorm+1];
float p0i = partOfNorm[p_partOfNorm-1];
float p10 = partOfNorm[p_partOfNorm+sizeX + 2];
float pi0 = partOfNorm[p_partOfNorm-sizeX - 2];
pos01 = (i1*(sizeX + 2) + j1) * xp0 + jj + pitch_map*blockIdx.z;
float map1 = map[pos01 ] ;
float map2 = map[pos01 + p] ;
float nD0, nD4, nD1, nD6, nD2, nD8, nD3, nD10;
float nDMax = 0.2f;float tmp;
valOfNorm = sqrtf(p00+p01+p10+partOfNorm[p_partOfNorm+sizeX+3]) + FLT_EPSILON;
if(jj<NUM_SECTOR) {tmp = fdividef(map1, valOfNorm);nD0 = tmp>nDMax?nDMax:tmp;}
tmp = fdividef(map2, valOfNorm);nD4 = tmp>nDMax?nDMax:tmp;
int tmpoffset = djj*32+jj;
val_vec[tmpoffset] = nD4;
valOfNorm = sqrtf(p00+p01+pi0+partOfNorm[p_partOfNorm-sizeX-1]) + FLT_EPSILON;
if(jj<NUM_SECTOR) {tmp = fdividef(map1, valOfNorm);nD1 = tmp>nDMax?nDMax:tmp;}
tmp = fdividef(map2, valOfNorm);nD6 = tmp>nDMax?nDMax:tmp;
val_vec[160+tmpoffset] = nD6;
valOfNorm = sqrtf(p00+p0i+p10+partOfNorm[p_partOfNorm+sizeX+1]) + FLT_EPSILON;
if(jj<NUM_SECTOR) {tmp = fdividef(map1, valOfNorm);nD2 = tmp>nDMax?nDMax:tmp;}
tmp = fdividef(map2, valOfNorm);nD8 = tmp>nDMax?nDMax:tmp;
val_vec[320+tmpoffset] = nD8;
valOfNorm = sqrtf(p00+p0i+pi0+partOfNorm[p_partOfNorm-sizeX-3]) + FLT_EPSILON;
if(jj<NUM_SECTOR) {tmp = fdividef(map1, valOfNorm);nD3 = tmp>nDMax?nDMax:tmp;}
tmp = fdividef(map2, valOfNorm);nD10 = tmp>nDMax?nDMax:tmp;
val_vec[480+tmpoffset] = nD10;
int pp2 = NUM_SECTOR * 3 + 4;
int yp = 4;
int xp = NUM_SECTOR;
int k=0;
float val = 0.0f;
pos2 = ((i)*sizeX + j)*pp2+ pitch_partOfNorm*blockIdx.z*12*xp;
k = jj;
val = nD4+nD6+nD8+nD10;
newData[pos2 + k]= val * 0.5;
if(jj< xp)
{
k = xp * 2 + jj;
val = nD0+nD1+nD2+nD3;
newData[pos2 + k]= val * 0.5;
}
k = xp * 3;
__syncthreads();
int readset = djj*32;
for(int ii=0;ii<yp;ii++){
if(jj<9) val_vec[tmpoffset] += val_vec[tmpoffset+9];
__syncthreads();
if(jj<4) val_vec[tmpoffset] += val_vec[tmpoffset+4];
__syncthreads();
if(jj<2) val_vec[tmpoffset] += val_vec[tmpoffset+2];
__syncthreads();
if(jj==0){
val = val_vec[readset] + val_vec[readset+1] + val_vec[readset+8];
newData[pos2 + k]=val * 0.2357226;
}
k++;
tmpoffset += 160;
readset += 160;
}
}
__global__ void MultiGetmapofHOG(uchar* in0, int width, int height, int channel,
float *map0, int numFeatures, int stringSize, int page_partOfNorm){
int k = 4;
int i = blockIdx.y;
int ii = threadIdx.y;
int jj = threadIdx.x%k;
int j = blockIdx.x*k+threadIdx.x/k;//x->j
int dx = j*k+jj;//blockIdx.x*blockDim.x+threadIdx.x;//j * k + jj;
int dy = i*k+ii;//i * k + ii;
int alfa0,alfa1;
int offset = (dx+dy*width)*channel;
int dw = width*channel;
float x2,y2,sqrt2;
float x3,y3,sqrt3;
float x,y,tmp_sqrt;
uchar *in = in0+blockIdx.z*dw*height;
float *map = map0+blockIdx.z*page_partOfNorm*31;
if (dy > 0 && dy < height - 1 && dx > 0 && dx < width - 1)
{
x = in[offset+3] - in[offset-3];y = in[offset+dw] - in[offset-dw];
tmp_sqrt = sqrtf(x * x + y * y);
x2 = in[offset+4] - in[offset-2];y2 = in[offset+1+dw] - in[offset+1-dw];
sqrt2 = sqrtf(x2 * x2 + y2 * y2);
x3 = in[offset+5] - in[offset-1];y3 = in[offset+2+dw] - in[offset+2-dw];
sqrt3 = sqrtf(x3 * x3 + y3 * y3);
if(sqrt2>tmp_sqrt){
tmp_sqrt = sqrt2;
x = x2;y = y2;
}
if(sqrt3>tmp_sqrt){
tmp_sqrt = sqrt3;
x = x3;y = y3;
}
float Gmax = gpu_boundary_x[0] * x + gpu_boundary_y[0] * y;
int Gmaxi = 0;
float dotProd;
for (int kk = 0; kk < NUM_SECTOR; kk++)
{
dotProd = gpu_boundary_x[kk] * x + gpu_boundary_y[kk] * y;
if (dotProd > Gmax)
{
Gmax = dotProd;
Gmaxi = kk;
}
else
{
if (-dotProd > Gmax)
{
Gmax = -dotProd;
Gmaxi = kk + NUM_SECTOR;
}
}
}
alfa0 = Gmaxi % NUM_SECTOR;
alfa1 = Gmaxi + NUM_SECTOR;
float rd = tmp_sqrt;
float *mapoffset = map+i * stringSize + j * numFeatures;
int ns = gpu_nearest[ii] * stringSize;
int nn = gpu_nearest[jj] * numFeatures;
float tmp0 = rd * gpu_w[ii * 2] * gpu_w[jj * 2];
atomicAdd(mapoffset + alfa0,tmp0);
atomicAdd(mapoffset + alfa1,tmp0);
int flagi = i + gpu_nearest[ii];
int flagj = j + gpu_nearest[jj];
if ((flagi >= 0) && (flagi <= gridDim.y - 1))
{
tmp0 = rd * gpu_w[ii * 2 + 1] * gpu_w[jj * 2 ];
atomicAdd(mapoffset + ns + alfa0,tmp0);
atomicAdd(mapoffset + ns + alfa1,tmp0);
}
if ((flagj >= 0) && (flagj <= width/4 - 1))
{
tmp0 = rd * gpu_w[ii * 2] * gpu_w[jj * 2 + 1];
atomicAdd(mapoffset + nn + alfa0,tmp0);
atomicAdd(mapoffset + nn + alfa1,tmp0);
}
if ((flagi >= 0) &&
(flagi <= gridDim.y - 1) &&
(flagj >= 0) &&
(flagj <= width/4 - 1))
{
tmp0 = rd * gpu_w[ii * 2 + 1] * gpu_w[jj * 2 + 1];
atomicAdd(mapoffset + ns + nn + alfa0,tmp0);
atomicAdd(mapoffset + ns + nn + alfa1,tmp0);
}
}
}
__global__ void MultigetpartOfNorm(float *partOfNorm, float *map, int sizeX, int page_partOfNorm){
int p = NUM_SECTOR;
int jj = threadIdx.x%p;//for(ii = 0; ii < 2 * p; ii++)
int i = blockIdx.y;//(i = 1; i <= sizeY; i++)
int djj = threadIdx.x/p;
int j = blockIdx.x*7+djj;//(j = 0; j < sizeX; j++)
int pos1, pos2;
__shared__ float val_vec[63];
pos2 = i*sizeX+j + page_partOfNorm*blockIdx.z;
pos1 = (i*sizeX+j)*3*p + page_partOfNorm*blockIdx.z*(3*p+4);
float tmp = map[pos1 + jj];
int readset = 9*djj;
int tmpoffset = readset+jj;
val_vec[tmpoffset] = tmp*tmp;
__syncthreads();
if(jj<4) val_vec[tmpoffset] += val_vec[tmpoffset+4];
__syncthreads();
if(jj<2) val_vec[tmpoffset] += val_vec[tmpoffset+2];
__syncthreads();
if(jj==2){
float val = val_vec[readset] + val_vec[readset+1] + val_vec[readset+8];
partOfNorm[pos2] = val;
}
}
__global__ void MultiPCANTFeatureMaps(float *partOfNorm, float *map, float *newData, int sizeX, int xp0, int page_partOfNorm){
int jj = threadIdx.x%18;//for(ii = 0; ii < 2 * p; ii++)
int i = blockIdx.y;//(i = 1; i <= sizeY; i++)
int djj = threadIdx.x/18;
int j = blockIdx.x*5+djj;//(j = 0; j < sizeX; j++)
int i1 = i+1;
int j1 = j+1;
float valOfNorm;
int pos01, pos2;
int p = NUM_SECTOR;
__shared__ float val_vec[640];
int p_partOfNorm = (i1 )*(sizeX + 2) + (j1 )+ page_partOfNorm*blockIdx.z;
float p00 = partOfNorm[p_partOfNorm];
float p01 = partOfNorm[p_partOfNorm+1];
float p0i = partOfNorm[p_partOfNorm-1];
float p10 = partOfNorm[p_partOfNorm+sizeX + 2];
float pi0 = partOfNorm[p_partOfNorm-sizeX - 2];
pos01 = (i1*(sizeX + 2) + j1) * xp0 + jj + page_partOfNorm*blockIdx.z*(3*p+4);
float map1 = map[pos01 ] ;
float map2 = map[pos01 + p] ;
float nD0, nD4, nD1, nD6, nD2, nD8, nD3, nD10;
float nDMax = 0.2f;float tmp;
valOfNorm = sqrtf(p00+p01+p10+partOfNorm[p_partOfNorm+sizeX+3]) + FLT_EPSILON;
if(jj<NUM_SECTOR) {tmp = fdividef(map1, valOfNorm);nD0 = tmp>nDMax?nDMax:tmp;}
tmp = fdividef(map2, valOfNorm);nD4 = tmp>nDMax?nDMax:tmp;
int tmpoffset = djj*32+jj;
val_vec[tmpoffset] = nD4;
valOfNorm = sqrtf(p00+p01+pi0+partOfNorm[p_partOfNorm-sizeX-1]) + FLT_EPSILON;
if(jj<NUM_SECTOR) {tmp = fdividef(map1, valOfNorm);nD1 = tmp>nDMax?nDMax:tmp;}
tmp = fdividef(map2, valOfNorm);nD6 = tmp>nDMax?nDMax:tmp;
val_vec[160+tmpoffset] = nD6;
valOfNorm = sqrtf(p00+p0i+p10+partOfNorm[p_partOfNorm+sizeX+1]) + FLT_EPSILON;
if(jj<NUM_SECTOR) {tmp = fdividef(map1, valOfNorm);nD2 = tmp>nDMax?nDMax:tmp;}
tmp = fdividef(map2, valOfNorm);nD8 = tmp>nDMax?nDMax:tmp;
val_vec[320+tmpoffset] = nD8;
valOfNorm = sqrtf(p00+p0i+pi0+partOfNorm[p_partOfNorm-sizeX-3]) + FLT_EPSILON;
if(jj<NUM_SECTOR) {tmp = fdividef(map1, valOfNorm);nD3 = tmp>nDMax?nDMax:tmp;}
tmp = fdividef(map2, valOfNorm);nD10 = tmp>nDMax?nDMax:tmp;
val_vec[480+tmpoffset] = nD10;
int pp2 = NUM_SECTOR * 3 + 4;
int yp = 4;
int xp = NUM_SECTOR;
int k=0;
float val = 0.0f;
pos2 = ((i)*sizeX + j)*pp2+ page_partOfNorm*blockIdx.z*12*xp;
k = jj;
val = nD4+nD6+nD8+nD10;
newData[pos2 + k]= val * 0.5;
if(jj< xp)
{
k = xp * 2 + jj;
val = nD0+nD1+nD2+nD3;
newData[pos2 + k]= val * 0.5;
}
k = xp * 3;
__syncthreads();
int readset = djj*32;
for(int ii=0;ii<yp;ii++){
if(jj<9) val_vec[tmpoffset] += val_vec[tmpoffset+9];
__syncthreads();
if(jj<4) val_vec[tmpoffset] += val_vec[tmpoffset+4];
__syncthreads();
if(jj<2) val_vec[tmpoffset] += val_vec[tmpoffset+2];
__syncthreads();
if(jj==0){
val = val_vec[readset] + val_vec[readset+1] + val_vec[readset+8];
newData[pos2 + k]=val * 0.2357226;
}
k++;
tmpoffset += 160;
readset += 160;
}
}
int getPcaHogFeatureMaps(const IplImage* image, const int k, CvLSVMFeatureMapCaskade **map, int channels)
{
int sizeX, sizeY;
int p, p0, pp, stringSize;
int height, width;
int pos, xp;
uchar *in;
float * partOfNorm; // norm of C(i, j)
float *d_map;
float * finData;
height = image->height;
width = image->width ;
sizeX = (int)width / k;
sizeY = (int)height / k;
p0 = 3 * NUM_SECTOR;
stringSize = sizeX * p0;
p = NUM_SECTOR;
xp = NUM_SECTOR * 3;
pp = NUM_SECTOR * 3 + 4;
dim3 block1((sizeX+3)/4, sizeY);
dim3 thread1(16, 4);
dim3 block2((sizeX+2)/5, sizeY-2);
dim3 thread2(10*NUM_SECTOR, 1);
dim3 block3((sizeX+6)/7, sizeY);
dim3 thread3(63, 1);
hipMalloc((void**)&in, height * width * channels * sizeof(uchar));
hipMalloc((void**)&partOfNorm, sizeof (float) * (sizeX * sizeY));
hipMalloc((void**)&d_map, sizeof (float) * (sizeX * sizeY * p0));
hipMalloc((void**)&finData, sizeof (float) * ((sizeX-2)* (sizeY-2) * NUM_SECTOR * 12));
hipMemcpy(in, image->imageData, height * width * channels * sizeof(uchar), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( GetmapofHOG), dim3(block1), dim3(thread1) , 0, 0, in, width, height, channels, d_map, p0, stringSize);
hipDeviceSynchronize();
hipLaunchKernelGGL(( getpartOfNorm), dim3(block3), dim3(thread3) , 0, 0, partOfNorm, d_map, sizeX);
hipLaunchKernelGGL(( PCANTFeatureMaps), dim3(block2), dim3(thread2) , 0, 0, partOfNorm, d_map, finData, sizeX-2, xp);
hipDeviceSynchronize();
allocFeatureMapObject(map, sizeX-2, sizeY-2, pp);
hipMemcpy((*map)->map, finData, sizeof (float) * ((sizeX-2)* (sizeY-2)* pp), hipMemcpyDeviceToHost);
hipFree(in);
hipFree(d_map);
hipFree(partOfNorm);
hipFree(finData);
return LATENT_SVM_OK;
}
/*
PCAHOGMaps::PCAHOGMaps(cv::Size _tmpl_sz){
p0 = 3 * NUM_SECTOR;
p = NUM_SECTOR;
xp = NUM_SECTOR * 3;
pp = NUM_SECTOR * 3 + 4;
height = _tmpl_sz.height;
width = _tmpl_sz.width ;
sizeX = (int)width / 4;
sizeY = (int)height / 4;
stringSize = sizeX * p0;
block1 = dim3((sizeX+3)/4, sizeY);
thread1 = dim3(16, 4);
block2 = dim3((sizeX+2)/5, sizeY-2);
thread2 = dim3(10*NUM_SECTOR, 1);
block3 = dim3((sizeX+6)/7, sizeY);
thread3 = dim3(63, 1);
hipMalloc((void**)&in, height * width * 3 * sizeof(uchar));
hipMalloc((void**)&d_map, sizeof (float) * (sizeX * sizeY * NUM_SECTOR*12));
hipMalloc((void**)&partOfNorm, sizeof (float) * (sizeX * sizeY));
hipMalloc((void**)&finData, sizeof (float) * (sizeX * sizeY * NUM_SECTOR * 12));
}
using namespace KCFTracker
{
int KCFTracker::getMaps(const IplImage* image, const int k, CvLSVMFeatureMapCaskade **map, int channels){
hipMemcpy(in, image->imageData, height * width * channels * sizeof(uchar), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( GetmapofHOG), dim3(block1), dim3(thread1) , 0, 0, in, width, height, channels, d_map, p0, stringSize);
hipDeviceSynchronize();
hipLaunchKernelGGL(( getpartOfNorm), dim3(block3), dim3(thread3) , 0, 0, partOfNorm, d_map, sizeX);
hipLaunchKernelGGL(( PCANTFeatureMaps), dim3(block2), dim3(thread2) , 0, 0, partOfNorm, d_map, finData, sizeX-2, xp);
hipDeviceSynchronize();
allocFeatureMapObject(map, sizeX-2, sizeY-2, pp);
hipMemcpy((*map)->map, finData, sizeof (float) * ((sizeX-2)* (sizeY-2)* pp), hipMemcpyDeviceToHost);
return LATENT_SVM_OK;
}
KCFTracker::~KCFTracker(){
hipFree(in);
hipFree(d_map);
hipFree(partOfNorm);
hipFree(finData);
}
}*/ | 2d9b2dd43c36e2bf3520dd5855585d7aee42bb24.cu | #include "gpu_hog.hpp"
__constant__ const float gpu_boundary_x[NUM_SECTOR + 1] = {1.00000000, 0.939692616, 0.766044438, 0.499999970, 0.173648104,
-0.173648298, -0.500000060, -0.766044617, -0.939692676, -1.00000000};
__constant__ const float gpu_boundary_y[NUM_SECTOR + 1] = {0.000000000, 0.342020154, 0.642787635, 0.866025448, 0.984807789,
0.984807730, 0.866025388, 0.642787457, 0.342020005, -8.74227766e-008};
__constant__ int gpu_nearest[4] = {-1,-1,1,1};
__constant__ float gpu_w[8] = {0.625, 0.375, 0.875, 0.125,
0.875, 0.125 , 0.625 , 0.375};
__global__ void GetmapofHOG(uchar* in, int width, int height, int channel, float *map, int numFeatures, int stringSize){
int k = 4;
int i = blockIdx.y;
int ii = threadIdx.y;
int jj = threadIdx.x%k;
int j = blockIdx.x*k+threadIdx.x/k;//x->j
int dx = j*k+jj;//blockIdx.x*blockDim.x+threadIdx.x;//j * k + jj;
int dy = i*k+ii;//i * k + ii;
int alfa0,alfa1;
int offset = (dx+dy*width)*channel;
int dw = width*channel;
float x2,y2,sqrt2;
float x3,y3,sqrt3;
float x,y,tmp_sqrt;
if (dy > 0 && dy < height - 1 && dx > 0 && dx < width - 1)
{
x = in[offset+3] - in[offset-3];y = in[offset+dw] - in[offset-dw];
tmp_sqrt = sqrtf(x * x + y * y);
x2 = in[offset+4] - in[offset-2];y2 = in[offset+1+dw] - in[offset+1-dw];
sqrt2 = sqrtf(x2 * x2 + y2 * y2);
x3 = in[offset+5] - in[offset-1];y3 = in[offset+2+dw] - in[offset+2-dw];
sqrt3 = sqrtf(x3 * x3 + y3 * y3);
if(sqrt2>tmp_sqrt){
tmp_sqrt = sqrt2;
x = x2;y = y2;
}
if(sqrt3>tmp_sqrt){
tmp_sqrt = sqrt3;
x = x3;y = y3;
}
float Gmax = gpu_boundary_x[0] * x + gpu_boundary_y[0] * y;
int Gmaxi = 0;
float dotProd;
for (int kk = 0; kk < NUM_SECTOR; kk++)
{
dotProd = gpu_boundary_x[kk] * x + gpu_boundary_y[kk] * y;
if (dotProd > Gmax)
{
Gmax = dotProd;
Gmaxi = kk;
}
else
{
if (-dotProd > Gmax)
{
Gmax = -dotProd;
Gmaxi = kk + NUM_SECTOR;
}
}
}
alfa0 = Gmaxi % NUM_SECTOR;
alfa1 = Gmaxi + NUM_SECTOR;
float rd = tmp_sqrt;
float *mapoffset = map+i * stringSize + j * numFeatures;
int ns = gpu_nearest[ii] * stringSize;
int nn = gpu_nearest[jj] * numFeatures;
float tmp0 = rd * gpu_w[ii * 2] * gpu_w[jj * 2];
atomicAdd(mapoffset + alfa0,tmp0);
atomicAdd(mapoffset + alfa1,tmp0);
int flagi = i + gpu_nearest[ii];
int flagj = j + gpu_nearest[jj];
if ((flagi >= 0) && (flagi <= gridDim.y - 1))
{
tmp0 = rd * gpu_w[ii * 2 + 1] * gpu_w[jj * 2 ];
atomicAdd(mapoffset + ns + alfa0,tmp0);
atomicAdd(mapoffset + ns + alfa1,tmp0);
}
if ((flagj >= 0) && (flagj <= width/4 - 1))
{
tmp0 = rd * gpu_w[ii * 2] * gpu_w[jj * 2 + 1];
atomicAdd(mapoffset + nn + alfa0,tmp0);
atomicAdd(mapoffset + nn + alfa1,tmp0);
}
if ((flagi >= 0) &&
(flagi <= gridDim.y - 1) &&
(flagj >= 0) &&
(flagj <= width/4 - 1))
{
tmp0 = rd * gpu_w[ii * 2 + 1] * gpu_w[jj * 2 + 1];
atomicAdd(mapoffset + ns + nn + alfa0,tmp0);
atomicAdd(mapoffset + ns + nn + alfa1,tmp0);
}
}
}
__global__ void getpartOfNorm(float *partOfNorm, float *map, int sizeX){
int p = NUM_SECTOR;
int jj = threadIdx.x%p;//for(ii = 0; ii < 2 * p; ii++)
int i = blockIdx.y;//(i = 1; i <= sizeY; i++)
int djj = threadIdx.x/p;
int j = blockIdx.x*7+djj;//(j = 0; j < sizeX; j++)
int pos1, pos2;
__shared__ float val_vec[63];
pos2 = i*sizeX+j;
pos1 = pos2*3*p;
float tmp = map[pos1 + jj];
int readset = 9*djj;
int tmpoffset = readset+jj;
val_vec[tmpoffset] = tmp*tmp;
__syncthreads();
if(jj<4) val_vec[tmpoffset] += val_vec[tmpoffset+4];
__syncthreads();
if(jj<2) val_vec[tmpoffset] += val_vec[tmpoffset+2];
__syncthreads();
if(jj==2){
float val = val_vec[readset] + val_vec[readset+1] + val_vec[readset+8];
partOfNorm[pos2] = val;
}
}
__global__ void PCANTFeatureMaps(float *partOfNorm, float *map, float *newData, int sizeX, int xp0){
int jj = threadIdx.x%18;//for(ii = 0; ii < 2 * p; ii++)
int i = blockIdx.y;//(i = 1; i <= sizeY; i++)
int djj = threadIdx.x/18;
int j = blockIdx.x*5+djj;//(j = 0; j < sizeX; j++)
int i1 = i+1;
int j1 = j+1;
float valOfNorm;
int pos01, pos2;
int p = NUM_SECTOR;
__shared__ float val_vec[640];
int p_partOfNorm = (i1 )*(sizeX + 2) + (j1 );
float p00 = partOfNorm[p_partOfNorm];
float p01 = partOfNorm[p_partOfNorm+1];
float p0i = partOfNorm[p_partOfNorm-1];
float p10 = partOfNorm[p_partOfNorm+sizeX + 2];
float pi0 = partOfNorm[p_partOfNorm-sizeX - 2];
pos01 = p_partOfNorm * xp0 + jj;
float map1 = map[pos01 ] ;
float map2 = map[pos01 + p] ;
float nD0, nD4, nD1, nD6, nD2, nD8, nD3, nD10;
float nDMax = 0.2f;float tmp;
valOfNorm = sqrtf(p00+p01+p10+partOfNorm[p_partOfNorm+sizeX+3]) + FLT_EPSILON;
if(jj<NUM_SECTOR) {tmp = fdividef(map1, valOfNorm);nD0 = tmp>nDMax?nDMax:tmp;}
tmp = fdividef(map2, valOfNorm);nD4 = tmp>nDMax?nDMax:tmp;
int tmpoffset = djj*32+jj;
val_vec[tmpoffset] = nD4;
valOfNorm = sqrtf(p00+p01+pi0+partOfNorm[p_partOfNorm-sizeX-1]) + FLT_EPSILON;
if(jj<NUM_SECTOR) {tmp = fdividef(map1, valOfNorm);nD1 = tmp>nDMax?nDMax:tmp;}
tmp = fdividef(map2, valOfNorm);nD6 = tmp>nDMax?nDMax:tmp;
val_vec[160+tmpoffset] = nD6;
valOfNorm = sqrtf(p00+p0i+p10+partOfNorm[p_partOfNorm+sizeX+1]) + FLT_EPSILON;
if(jj<NUM_SECTOR) {tmp = fdividef(map1, valOfNorm);nD2 = tmp>nDMax?nDMax:tmp;}
tmp = fdividef(map2, valOfNorm);nD8 = tmp>nDMax?nDMax:tmp;
val_vec[320+tmpoffset] = nD8;
valOfNorm = sqrtf(p00+p0i+pi0+partOfNorm[p_partOfNorm-sizeX-3]) + FLT_EPSILON;
if(jj<NUM_SECTOR) {tmp = fdividef(map1, valOfNorm);nD3 = tmp>nDMax?nDMax:tmp;}
tmp = fdividef(map2, valOfNorm);nD10 = tmp>nDMax?nDMax:tmp;
val_vec[480+tmpoffset] = nD10;
int pp2 = NUM_SECTOR * 3 + 4;
int yp = 4;
int xp = NUM_SECTOR;
int k=0;
float val = 0.0f;
pos2 = ((i)*sizeX + j)*pp2;
k = jj;
val = nD4+nD6+nD8+nD10;
newData[pos2 + k]= val * 0.5;
if(jj< xp)
{
k = xp * 2 + jj;
val = nD0+nD1+nD2+nD3;
newData[pos2 + k]= val * 0.5;
}
k = xp * 3;
__syncthreads();
int readset = djj*32;
for(int ii=0;ii<yp;ii++){
if(jj<9) val_vec[tmpoffset] += val_vec[tmpoffset+9];
__syncthreads();
if(jj<4) val_vec[tmpoffset] += val_vec[tmpoffset+4];
__syncthreads();
if(jj<2) val_vec[tmpoffset] += val_vec[tmpoffset+2];
__syncthreads();
if(jj==0){
val = val_vec[readset] + val_vec[readset+1] + val_vec[readset+8];
newData[pos2 + k]=val * 0.2357226;
}
k++;
tmpoffset += 160;
readset += 160;
}
}
__global__ void MultiGetmapofHOGPitch(uchar* in0, int width, int height, int channel,
float *map0, int numFeatures, int stringSize, int pitch_in, int pitch_map){
int k = 4;
int i = blockIdx.y;
int ii = threadIdx.y;
int jj = threadIdx.x%k;
int j = blockIdx.x*k+threadIdx.x/k;//x->j
int dx = j*k+jj;//blockIdx.x*blockDim.x+threadIdx.x;//j * k + jj;
int dy = i*k+ii;//i * k + ii;
int alfa0,alfa1;
int offset = (dx+dy*width)*channel;
int dw = width*channel;
float x2,y2,sqrt2;
float x3,y3,sqrt3;
float x,y,tmp_sqrt;
uchar *in = in0+blockIdx.z*pitch_in;
float *map = map0+blockIdx.z*pitch_map;
if (dy > 0 && dy < height - 1 && dx > 0 && dx < width - 1)
{
x = in[offset+3] - in[offset-3];y = in[offset+dw] - in[offset-dw];
tmp_sqrt = sqrtf(x * x + y * y);
x2 = in[offset+4] - in[offset-2];y2 = in[offset+1+dw] - in[offset+1-dw];
sqrt2 = sqrtf(x2 * x2 + y2 * y2);
x3 = in[offset+5] - in[offset-1];y3 = in[offset+2+dw] - in[offset+2-dw];
sqrt3 = sqrtf(x3 * x3 + y3 * y3);
if(sqrt2>tmp_sqrt){
tmp_sqrt = sqrt2;
x = x2;y = y2;
}
if(sqrt3>tmp_sqrt){
tmp_sqrt = sqrt3;
x = x3;y = y3;
}
float Gmax = gpu_boundary_x[0] * x + gpu_boundary_y[0] * y;
int Gmaxi = 0;
float dotProd;
for (int kk = 0; kk < NUM_SECTOR; kk++)
{
dotProd = gpu_boundary_x[kk] * x + gpu_boundary_y[kk] * y;
if (dotProd > Gmax)
{
Gmax = dotProd;
Gmaxi = kk;
}
else
{
if (-dotProd > Gmax)
{
Gmax = -dotProd;
Gmaxi = kk + NUM_SECTOR;
}
}
}
alfa0 = Gmaxi % NUM_SECTOR;
alfa1 = Gmaxi + NUM_SECTOR;
float rd = tmp_sqrt;
float *mapoffset = map+i * stringSize + j * numFeatures;
int ns = gpu_nearest[ii] * stringSize;
int nn = gpu_nearest[jj] * numFeatures;
float tmp0 = rd * gpu_w[ii * 2] * gpu_w[jj * 2];
atomicAdd(mapoffset + alfa0,tmp0);
atomicAdd(mapoffset + alfa1,tmp0);
int flagi = i + gpu_nearest[ii];
int flagj = j + gpu_nearest[jj];
if ((flagi >= 0) && (flagi <= gridDim.y - 1))
{
tmp0 = rd * gpu_w[ii * 2 + 1] * gpu_w[jj * 2 ];
atomicAdd(mapoffset + ns + alfa0,tmp0);
atomicAdd(mapoffset + ns + alfa1,tmp0);
}
if ((flagj >= 0) && (flagj <= width/4 - 1))
{
tmp0 = rd * gpu_w[ii * 2] * gpu_w[jj * 2 + 1];
atomicAdd(mapoffset + nn + alfa0,tmp0);
atomicAdd(mapoffset + nn + alfa1,tmp0);
}
if ((flagi >= 0) &&
(flagi <= gridDim.y - 1) &&
(flagj >= 0) &&
(flagj <= width/4 - 1))
{
tmp0 = rd * gpu_w[ii * 2 + 1] * gpu_w[jj * 2 + 1];
atomicAdd(mapoffset + ns + nn + alfa0,tmp0);
atomicAdd(mapoffset + ns + nn + alfa1,tmp0);
}
}
}
__global__ void MultigetpartOfNormPitch(float *partOfNorm, float *map, int sizeX,
int pitch_partOfNorm, int pitch_map){
int p = NUM_SECTOR;
int jj = threadIdx.x%p;//for(ii = 0; ii < 2 * p; ii++)
int i = blockIdx.y;//(i = 1; i <= sizeY; i++)
int djj = threadIdx.x/p;
int j = blockIdx.x*7+djj;//(j = 0; j < sizeX; j++)
int pos1, pos2;
__shared__ float val_vec[63];
pos2 = i*sizeX+j + pitch_partOfNorm*blockIdx.z;
pos1 = (i*sizeX+j)*3*p + pitch_map*blockIdx.z;
float tmp = map[pos1 + jj];
int readset = 9*djj;
int tmpoffset = readset+jj;
val_vec[tmpoffset] = tmp*tmp;
__syncthreads();
if(jj<4) val_vec[tmpoffset] += val_vec[tmpoffset+4];
__syncthreads();
if(jj<2) val_vec[tmpoffset] += val_vec[tmpoffset+2];
__syncthreads();
if(jj==2){
float val = val_vec[readset] + val_vec[readset+1] + val_vec[readset+8];
partOfNorm[pos2] = val;
}
}
__global__ void MultiPCANTFeatureMapsPitch(float *partOfNorm, float *map, float *newData, int sizeX, int xp0,
int pitch_partOfNorm, int pitch_map){
int jj = threadIdx.x%18;//for(ii = 0; ii < 2 * p; ii++)
int i = blockIdx.y;//(i = 1; i <= sizeY; i++)
int djj = threadIdx.x/18;
int j = blockIdx.x*5+djj;//(j = 0; j < sizeX; j++)
int i1 = i+1;
int j1 = j+1;
float valOfNorm;
int pos01, pos2;
int p = NUM_SECTOR;
__shared__ float val_vec[640];
int p_partOfNorm = (i1 )*(sizeX + 2) + (j1 )+ pitch_partOfNorm*blockIdx.z;
float p00 = partOfNorm[p_partOfNorm];
float p01 = partOfNorm[p_partOfNorm+1];
float p0i = partOfNorm[p_partOfNorm-1];
float p10 = partOfNorm[p_partOfNorm+sizeX + 2];
float pi0 = partOfNorm[p_partOfNorm-sizeX - 2];
pos01 = (i1*(sizeX + 2) + j1) * xp0 + jj + pitch_map*blockIdx.z;
float map1 = map[pos01 ] ;
float map2 = map[pos01 + p] ;
float nD0, nD4, nD1, nD6, nD2, nD8, nD3, nD10;
float nDMax = 0.2f;float tmp;
valOfNorm = sqrtf(p00+p01+p10+partOfNorm[p_partOfNorm+sizeX+3]) + FLT_EPSILON;
if(jj<NUM_SECTOR) {tmp = fdividef(map1, valOfNorm);nD0 = tmp>nDMax?nDMax:tmp;}
tmp = fdividef(map2, valOfNorm);nD4 = tmp>nDMax?nDMax:tmp;
int tmpoffset = djj*32+jj;
val_vec[tmpoffset] = nD4;
valOfNorm = sqrtf(p00+p01+pi0+partOfNorm[p_partOfNorm-sizeX-1]) + FLT_EPSILON;
if(jj<NUM_SECTOR) {tmp = fdividef(map1, valOfNorm);nD1 = tmp>nDMax?nDMax:tmp;}
tmp = fdividef(map2, valOfNorm);nD6 = tmp>nDMax?nDMax:tmp;
val_vec[160+tmpoffset] = nD6;
valOfNorm = sqrtf(p00+p0i+p10+partOfNorm[p_partOfNorm+sizeX+1]) + FLT_EPSILON;
if(jj<NUM_SECTOR) {tmp = fdividef(map1, valOfNorm);nD2 = tmp>nDMax?nDMax:tmp;}
tmp = fdividef(map2, valOfNorm);nD8 = tmp>nDMax?nDMax:tmp;
val_vec[320+tmpoffset] = nD8;
valOfNorm = sqrtf(p00+p0i+pi0+partOfNorm[p_partOfNorm-sizeX-3]) + FLT_EPSILON;
if(jj<NUM_SECTOR) {tmp = fdividef(map1, valOfNorm);nD3 = tmp>nDMax?nDMax:tmp;}
tmp = fdividef(map2, valOfNorm);nD10 = tmp>nDMax?nDMax:tmp;
val_vec[480+tmpoffset] = nD10;
int pp2 = NUM_SECTOR * 3 + 4;
int yp = 4;
int xp = NUM_SECTOR;
int k=0;
float val = 0.0f;
pos2 = ((i)*sizeX + j)*pp2+ pitch_partOfNorm*blockIdx.z*12*xp;
k = jj;
val = nD4+nD6+nD8+nD10;
newData[pos2 + k]= val * 0.5;
if(jj< xp)
{
k = xp * 2 + jj;
val = nD0+nD1+nD2+nD3;
newData[pos2 + k]= val * 0.5;
}
k = xp * 3;
__syncthreads();
int readset = djj*32;
for(int ii=0;ii<yp;ii++){
if(jj<9) val_vec[tmpoffset] += val_vec[tmpoffset+9];
__syncthreads();
if(jj<4) val_vec[tmpoffset] += val_vec[tmpoffset+4];
__syncthreads();
if(jj<2) val_vec[tmpoffset] += val_vec[tmpoffset+2];
__syncthreads();
if(jj==0){
val = val_vec[readset] + val_vec[readset+1] + val_vec[readset+8];
newData[pos2 + k]=val * 0.2357226;
}
k++;
tmpoffset += 160;
readset += 160;
}
}
__global__ void MultiGetmapofHOG(uchar* in0, int width, int height, int channel,
float *map0, int numFeatures, int stringSize, int page_partOfNorm){
int k = 4;
int i = blockIdx.y;
int ii = threadIdx.y;
int jj = threadIdx.x%k;
int j = blockIdx.x*k+threadIdx.x/k;//x->j
int dx = j*k+jj;//blockIdx.x*blockDim.x+threadIdx.x;//j * k + jj;
int dy = i*k+ii;//i * k + ii;
int alfa0,alfa1;
int offset = (dx+dy*width)*channel;
int dw = width*channel;
float x2,y2,sqrt2;
float x3,y3,sqrt3;
float x,y,tmp_sqrt;
uchar *in = in0+blockIdx.z*dw*height;
float *map = map0+blockIdx.z*page_partOfNorm*31;
if (dy > 0 && dy < height - 1 && dx > 0 && dx < width - 1)
{
x = in[offset+3] - in[offset-3];y = in[offset+dw] - in[offset-dw];
tmp_sqrt = sqrtf(x * x + y * y);
x2 = in[offset+4] - in[offset-2];y2 = in[offset+1+dw] - in[offset+1-dw];
sqrt2 = sqrtf(x2 * x2 + y2 * y2);
x3 = in[offset+5] - in[offset-1];y3 = in[offset+2+dw] - in[offset+2-dw];
sqrt3 = sqrtf(x3 * x3 + y3 * y3);
if(sqrt2>tmp_sqrt){
tmp_sqrt = sqrt2;
x = x2;y = y2;
}
if(sqrt3>tmp_sqrt){
tmp_sqrt = sqrt3;
x = x3;y = y3;
}
float Gmax = gpu_boundary_x[0] * x + gpu_boundary_y[0] * y;
int Gmaxi = 0;
float dotProd;
for (int kk = 0; kk < NUM_SECTOR; kk++)
{
dotProd = gpu_boundary_x[kk] * x + gpu_boundary_y[kk] * y;
if (dotProd > Gmax)
{
Gmax = dotProd;
Gmaxi = kk;
}
else
{
if (-dotProd > Gmax)
{
Gmax = -dotProd;
Gmaxi = kk + NUM_SECTOR;
}
}
}
alfa0 = Gmaxi % NUM_SECTOR;
alfa1 = Gmaxi + NUM_SECTOR;
float rd = tmp_sqrt;
float *mapoffset = map+i * stringSize + j * numFeatures;
int ns = gpu_nearest[ii] * stringSize;
int nn = gpu_nearest[jj] * numFeatures;
float tmp0 = rd * gpu_w[ii * 2] * gpu_w[jj * 2];
atomicAdd(mapoffset + alfa0,tmp0);
atomicAdd(mapoffset + alfa1,tmp0);
int flagi = i + gpu_nearest[ii];
int flagj = j + gpu_nearest[jj];
if ((flagi >= 0) && (flagi <= gridDim.y - 1))
{
tmp0 = rd * gpu_w[ii * 2 + 1] * gpu_w[jj * 2 ];
atomicAdd(mapoffset + ns + alfa0,tmp0);
atomicAdd(mapoffset + ns + alfa1,tmp0);
}
if ((flagj >= 0) && (flagj <= width/4 - 1))
{
tmp0 = rd * gpu_w[ii * 2] * gpu_w[jj * 2 + 1];
atomicAdd(mapoffset + nn + alfa0,tmp0);
atomicAdd(mapoffset + nn + alfa1,tmp0);
}
if ((flagi >= 0) &&
(flagi <= gridDim.y - 1) &&
(flagj >= 0) &&
(flagj <= width/4 - 1))
{
tmp0 = rd * gpu_w[ii * 2 + 1] * gpu_w[jj * 2 + 1];
atomicAdd(mapoffset + ns + nn + alfa0,tmp0);
atomicAdd(mapoffset + ns + nn + alfa1,tmp0);
}
}
}
__global__ void MultigetpartOfNorm(float *partOfNorm, float *map, int sizeX, int page_partOfNorm){
int p = NUM_SECTOR;
int jj = threadIdx.x%p;//for(ii = 0; ii < 2 * p; ii++)
int i = blockIdx.y;//(i = 1; i <= sizeY; i++)
int djj = threadIdx.x/p;
int j = blockIdx.x*7+djj;//(j = 0; j < sizeX; j++)
int pos1, pos2;
__shared__ float val_vec[63];
pos2 = i*sizeX+j + page_partOfNorm*blockIdx.z;
pos1 = (i*sizeX+j)*3*p + page_partOfNorm*blockIdx.z*(3*p+4);
float tmp = map[pos1 + jj];
int readset = 9*djj;
int tmpoffset = readset+jj;
val_vec[tmpoffset] = tmp*tmp;
__syncthreads();
if(jj<4) val_vec[tmpoffset] += val_vec[tmpoffset+4];
__syncthreads();
if(jj<2) val_vec[tmpoffset] += val_vec[tmpoffset+2];
__syncthreads();
if(jj==2){
float val = val_vec[readset] + val_vec[readset+1] + val_vec[readset+8];
partOfNorm[pos2] = val;
}
}
__global__ void MultiPCANTFeatureMaps(float *partOfNorm, float *map, float *newData, int sizeX, int xp0, int page_partOfNorm){
int jj = threadIdx.x%18;//for(ii = 0; ii < 2 * p; ii++)
int i = blockIdx.y;//(i = 1; i <= sizeY; i++)
int djj = threadIdx.x/18;
int j = blockIdx.x*5+djj;//(j = 0; j < sizeX; j++)
int i1 = i+1;
int j1 = j+1;
float valOfNorm;
int pos01, pos2;
int p = NUM_SECTOR;
__shared__ float val_vec[640];
int p_partOfNorm = (i1 )*(sizeX + 2) + (j1 )+ page_partOfNorm*blockIdx.z;
float p00 = partOfNorm[p_partOfNorm];
float p01 = partOfNorm[p_partOfNorm+1];
float p0i = partOfNorm[p_partOfNorm-1];
float p10 = partOfNorm[p_partOfNorm+sizeX + 2];
float pi0 = partOfNorm[p_partOfNorm-sizeX - 2];
pos01 = (i1*(sizeX + 2) + j1) * xp0 + jj + page_partOfNorm*blockIdx.z*(3*p+4);
float map1 = map[pos01 ] ;
float map2 = map[pos01 + p] ;
float nD0, nD4, nD1, nD6, nD2, nD8, nD3, nD10;
float nDMax = 0.2f;float tmp;
valOfNorm = sqrtf(p00+p01+p10+partOfNorm[p_partOfNorm+sizeX+3]) + FLT_EPSILON;
if(jj<NUM_SECTOR) {tmp = fdividef(map1, valOfNorm);nD0 = tmp>nDMax?nDMax:tmp;}
tmp = fdividef(map2, valOfNorm);nD4 = tmp>nDMax?nDMax:tmp;
int tmpoffset = djj*32+jj;
val_vec[tmpoffset] = nD4;
valOfNorm = sqrtf(p00+p01+pi0+partOfNorm[p_partOfNorm-sizeX-1]) + FLT_EPSILON;
if(jj<NUM_SECTOR) {tmp = fdividef(map1, valOfNorm);nD1 = tmp>nDMax?nDMax:tmp;}
tmp = fdividef(map2, valOfNorm);nD6 = tmp>nDMax?nDMax:tmp;
val_vec[160+tmpoffset] = nD6;
valOfNorm = sqrtf(p00+p0i+p10+partOfNorm[p_partOfNorm+sizeX+1]) + FLT_EPSILON;
if(jj<NUM_SECTOR) {tmp = fdividef(map1, valOfNorm);nD2 = tmp>nDMax?nDMax:tmp;}
tmp = fdividef(map2, valOfNorm);nD8 = tmp>nDMax?nDMax:tmp;
val_vec[320+tmpoffset] = nD8;
valOfNorm = sqrtf(p00+p0i+pi0+partOfNorm[p_partOfNorm-sizeX-3]) + FLT_EPSILON;
if(jj<NUM_SECTOR) {tmp = fdividef(map1, valOfNorm);nD3 = tmp>nDMax?nDMax:tmp;}
tmp = fdividef(map2, valOfNorm);nD10 = tmp>nDMax?nDMax:tmp;
val_vec[480+tmpoffset] = nD10;
int pp2 = NUM_SECTOR * 3 + 4;
int yp = 4;
int xp = NUM_SECTOR;
int k=0;
float val = 0.0f;
pos2 = ((i)*sizeX + j)*pp2+ page_partOfNorm*blockIdx.z*12*xp;
k = jj;
val = nD4+nD6+nD8+nD10;
newData[pos2 + k]= val * 0.5;
if(jj< xp)
{
k = xp * 2 + jj;
val = nD0+nD1+nD2+nD3;
newData[pos2 + k]= val * 0.5;
}
k = xp * 3;
__syncthreads();
int readset = djj*32;
for(int ii=0;ii<yp;ii++){
if(jj<9) val_vec[tmpoffset] += val_vec[tmpoffset+9];
__syncthreads();
if(jj<4) val_vec[tmpoffset] += val_vec[tmpoffset+4];
__syncthreads();
if(jj<2) val_vec[tmpoffset] += val_vec[tmpoffset+2];
__syncthreads();
if(jj==0){
val = val_vec[readset] + val_vec[readset+1] + val_vec[readset+8];
newData[pos2 + k]=val * 0.2357226;
}
k++;
tmpoffset += 160;
readset += 160;
}
}
int getPcaHogFeatureMaps(const IplImage* image, const int k, CvLSVMFeatureMapCaskade **map, int channels)
{
int sizeX, sizeY;
int p, p0, pp, stringSize;
int height, width;
int pos, xp;
uchar *in;
float * partOfNorm; // norm of C(i, j)
float *d_map;
float * finData;
height = image->height;
width = image->width ;
sizeX = (int)width / k;
sizeY = (int)height / k;
p0 = 3 * NUM_SECTOR;
stringSize = sizeX * p0;
p = NUM_SECTOR;
xp = NUM_SECTOR * 3;
pp = NUM_SECTOR * 3 + 4;
dim3 block1((sizeX+3)/4, sizeY);
dim3 thread1(16, 4);
dim3 block2((sizeX+2)/5, sizeY-2);
dim3 thread2(10*NUM_SECTOR, 1);
dim3 block3((sizeX+6)/7, sizeY);
dim3 thread3(63, 1);
cudaMalloc((void**)&in, height * width * channels * sizeof(uchar));
cudaMalloc((void**)&partOfNorm, sizeof (float) * (sizeX * sizeY));
cudaMalloc((void**)&d_map, sizeof (float) * (sizeX * sizeY * p0));
cudaMalloc((void**)&finData, sizeof (float) * ((sizeX-2)* (sizeY-2) * NUM_SECTOR * 12));
cudaMemcpy(in, image->imageData, height * width * channels * sizeof(uchar), cudaMemcpyHostToDevice);
GetmapofHOG<<< block1, thread1 >>>(in, width, height, channels, d_map, p0, stringSize);
cudaDeviceSynchronize();
getpartOfNorm<<< block3, thread3 >>>(partOfNorm, d_map, sizeX);
PCANTFeatureMaps<<< block2, thread2 >>>(partOfNorm, d_map, finData, sizeX-2, xp);
cudaDeviceSynchronize();
allocFeatureMapObject(map, sizeX-2, sizeY-2, pp);
cudaMemcpy((*map)->map, finData, sizeof (float) * ((sizeX-2)* (sizeY-2)* pp), cudaMemcpyDeviceToHost);
cudaFree(in);
cudaFree(d_map);
cudaFree(partOfNorm);
cudaFree(finData);
return LATENT_SVM_OK;
}
/*
PCAHOGMaps::PCAHOGMaps(cv::Size _tmpl_sz){
p0 = 3 * NUM_SECTOR;
p = NUM_SECTOR;
xp = NUM_SECTOR * 3;
pp = NUM_SECTOR * 3 + 4;
height = _tmpl_sz.height;
width = _tmpl_sz.width ;
sizeX = (int)width / 4;
sizeY = (int)height / 4;
stringSize = sizeX * p0;
block1 = dim3((sizeX+3)/4, sizeY);
thread1 = dim3(16, 4);
block2 = dim3((sizeX+2)/5, sizeY-2);
thread2 = dim3(10*NUM_SECTOR, 1);
block3 = dim3((sizeX+6)/7, sizeY);
thread3 = dim3(63, 1);
cudaMalloc((void**)&in, height * width * 3 * sizeof(uchar));
cudaMalloc((void**)&d_map, sizeof (float) * (sizeX * sizeY * NUM_SECTOR*12));
cudaMalloc((void**)&partOfNorm, sizeof (float) * (sizeX * sizeY));
cudaMalloc((void**)&finData, sizeof (float) * (sizeX * sizeY * NUM_SECTOR * 12));
}
using namespace KCFTracker
{
int KCFTracker::getMaps(const IplImage* image, const int k, CvLSVMFeatureMapCaskade **map, int channels){
cudaMemcpy(in, image->imageData, height * width * channels * sizeof(uchar), cudaMemcpyHostToDevice);
GetmapofHOG<<< block1, thread1 >>>(in, width, height, channels, d_map, p0, stringSize);
cudaDeviceSynchronize();
getpartOfNorm<<< block3, thread3 >>>(partOfNorm, d_map, sizeX);
PCANTFeatureMaps<<< block2, thread2 >>>(partOfNorm, d_map, finData, sizeX-2, xp);
cudaDeviceSynchronize();
allocFeatureMapObject(map, sizeX-2, sizeY-2, pp);
cudaMemcpy((*map)->map, finData, sizeof (float) * ((sizeX-2)* (sizeY-2)* pp), cudaMemcpyDeviceToHost);
return LATENT_SVM_OK;
}
KCFTracker::~KCFTracker(){
cudaFree(in);
cudaFree(d_map);
cudaFree(partOfNorm);
cudaFree(finData);
}
}*/ |
082fef1e3596d190e898511ac4b312f1d0b75ce2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
/* GPUstrcpy */
__device__ void
dev_strcpy(char *dst, const char *src)
{
while (*dst++ = *src++);
}
/* GPU */
__global__ void gen_hello(char *A)
{
dev_strcpy(A, "hello");
}
int main()
{
char *d_hello;
/* */
char hello[128];
/* GPU() */
hipMalloc((void**)&d_hello, 128);
/* gen_hello */
hipLaunchKernelGGL(( gen_hello), dim3(1),dim3(1), 0, 0, d_hello);
/* GPU */
hipMemcpy(hello, d_hello, 128, hipMemcpyDeviceToHost);
/* */
hipFree(d_hello);
/* */
puts(hello);
}
| 082fef1e3596d190e898511ac4b312f1d0b75ce2.cu | #include <stdio.h>
/* GPU用strcpy */
__device__ void
dev_strcpy(char *dst, const char *src)
{
while (*dst++ = *src++);
}
/* GPU側エントリ */
__global__ void gen_hello(char *A)
{
dev_strcpy(A, "hello");
}
int main()
{
char *d_hello;
/* ホストのメモリを確保 */
char hello[128];
/* GPU側のメモリ(デバイスメモリ)確保 */
cudaMalloc((void**)&d_hello, 128);
/* gen_hello 呼び出し */
gen_hello<<<1,1>>>(d_hello);
/* GPU側のデータを取得 */
cudaMemcpy(hello, d_hello, 128, cudaMemcpyDeviceToHost);
/* 確保したメモリを解放 */
cudaFree(d_hello);
/* 出力 */
puts(hello);
}
|
cd5741fb797af864d2344348685399007446d23c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// include files
//
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
//
// template kernel routine
//
template <int size>
__global__ void my_first_kernel(float *x)
{
float xl[size];
int tid = threadIdx.x + blockDim.x*blockIdx.x;
for (int i=0; i<size; i++) {
xl[i] = expf((float) i*tid);
}
float sum = 0.0f;
for (int i=0; i<size; i++) {
for (int j=0; j<size; j++) {
sum += xl[i]*xl[j];
}
}
x[tid] = sum;
}
//
// CUDA routine to be called by main code
//
extern
int prac6(int nblocks, int nthreads)
{
float *h_x, *d_x;
int nsize, n;
// allocate memory for arrays
nsize = nblocks*nthreads ;
h_x = (float *)malloc(nsize*sizeof(float));
hipMalloc((void **)&d_x, nsize*sizeof(float));
// execute kernel for size=2
hipLaunchKernelGGL(( my_first_kernel<2>), dim3(nblocks),dim3(nthreads), 0, 0, d_x);
hipMemcpy(h_x,d_x,nsize*sizeof(float),hipMemcpyDeviceToHost);
for (n=0; n<nsize; n++) printf(" n, x = %d %g \n",n,h_x[n]);
// execute kernel for size=3
hipLaunchKernelGGL(( my_first_kernel<3>), dim3(nblocks),dim3(nthreads), 0, 0, d_x);
hipMemcpy(h_x,d_x,nsize*sizeof(int),hipMemcpyDeviceToHost);
for (n=0; n<nsize; n++) printf(" n, i = %d %g \n",n,h_x[n]);
// free memory
hipFree(d_x);
free(h_x);
return 0;
}
| cd5741fb797af864d2344348685399007446d23c.cu | //
// include files
//
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
//
// template kernel routine
//
template <int size>
__global__ void my_first_kernel(float *x)
{
float xl[size];
int tid = threadIdx.x + blockDim.x*blockIdx.x;
for (int i=0; i<size; i++) {
xl[i] = expf((float) i*tid);
}
float sum = 0.0f;
for (int i=0; i<size; i++) {
for (int j=0; j<size; j++) {
sum += xl[i]*xl[j];
}
}
x[tid] = sum;
}
//
// CUDA routine to be called by main code
//
extern
int prac6(int nblocks, int nthreads)
{
float *h_x, *d_x;
int nsize, n;
// allocate memory for arrays
nsize = nblocks*nthreads ;
h_x = (float *)malloc(nsize*sizeof(float));
cudaMalloc((void **)&d_x, nsize*sizeof(float));
// execute kernel for size=2
my_first_kernel<2><<<nblocks,nthreads>>>(d_x);
cudaMemcpy(h_x,d_x,nsize*sizeof(float),cudaMemcpyDeviceToHost);
for (n=0; n<nsize; n++) printf(" n, x = %d %g \n",n,h_x[n]);
// execute kernel for size=3
my_first_kernel<3><<<nblocks,nthreads>>>(d_x);
cudaMemcpy(h_x,d_x,nsize*sizeof(int),cudaMemcpyDeviceToHost);
for (n=0; n<nsize; n++) printf(" n, i = %d %g \n",n,h_x[n]);
// free memory
cudaFree(d_x);
free(h_x);
return 0;
}
|
9df4e7b8ea18ea1b291edbef19ec8f9fae41a7c1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cudnn.h>
#include <hip/hip_fp16.h>
#include <opencv2/opencv.hpp>
#include <stdexcept>
#include <vector>
#define cudaCheckError(status) { cudaAssert(status, __FILE__, __LINE__); }
inline auto cudaAssert(hipError_t status, const char* file, int line) -> void {
if (status != hipSuccess) {
fprintf(stderr, "CUDA API error \"%s\" at %s:%i\n", hipGetErrorString(status), file, line);
exit(EXIT_FAILURE);
}
}
#define cudnnCheckError(status) { cudnnAssert(status, __FILE__, __LINE__); }
inline auto cudnnAssert(cudnnStatus_t status, const char* file, int line) -> void {
if (status != CUDNN_STATUS_SUCCESS) {
fprintf(stderr, "CUDNN API error \"%s\" at %s:%i\n", cudnnGetErrorString(status), file, line);
exit(EXIT_FAILURE);
}
}
#define CUDA_1D_KERNEL_LOOP(idx, n) \
for (size_t idx = blockIdx.x * blockDim.x + threadIdx.x; idx < (n); \
idx += blockDim.x * gridDim.x)
__global__
void float2half(const float* input, std::size_t size, half* output) {
CUDA_1D_KERNEL_LOOP(idx, size) {
output[idx] = __float2half(input[idx]);
}
}
__global__
void half2float(const half* input, std::size_t size, float* output) {
CUDA_1D_KERNEL_LOOP(idx, size) {
output[idx] = __half2float(input[idx]);
}
}
auto main(int argc, const char** argv) -> int {
if (argc != 2) {
std::cout << "usage: conv2d <filename>\n";
return -1;
}
const char* filename = argv[1];
cv::Mat image = cv::imread(filename);
if (image.empty()) {
throw std::runtime_error("cv::imread() failed: image not found");
}
image.convertTo(image, CV_32FC3);
cv::normalize(image, image, 0.0, 1.0, cv::NORM_MINMAX);
const std::size_t image_size = image.total() * image.channels();
cudnnHandle_t cudnn_handle;
cudnnCheckError(cudnnCreate(&cudnn_handle));
float* d_input = nullptr;
cudaCheckError(hipMalloc(&d_input, image_size * sizeof(float)));
cudaCheckError(hipMemcpy(d_input, image.ptr<float>(0), image_size * sizeof(float), hipMemcpyDefault));
half* fp16_image = nullptr;
cudaCheckError(hipMalloc(&fp16_image, image_size * sizeof(half)));
hipLaunchKernelGGL(( float2half), dim3(1), dim3(64), 0, 0, d_input, image_size, fp16_image);
hipFree(d_input);
cudnnTensorDescriptor_t input_desc;
cudnnCheckError(cudnnCreateTensorDescriptor(&input_desc));
cudnnCheckError(cudnnSetTensor4dDescriptor(
input_desc,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_HALF,
1,
image.channels(),
image.rows,
image.cols
));
cudnnFilterDescriptor_t filter_desc;
cudnnCheckError(cudnnCreateFilterDescriptor(&filter_desc));
cudnnCheckError(cudnnSetFilter4dDescriptor(
filter_desc,
CUDNN_DATA_HALF,
CUDNN_TENSOR_NCHW,
image.channels(),
image.channels(),
3,
3
));
cudnnConvolutionDescriptor_t conv_desc;
cudnnCheckError(cudnnCreateConvolutionDescriptor(&conv_desc));
cudnnCheckError(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1,
1, 1,
1, 1,
CUDNN_CROSS_CORRELATION,
CUDNN_DATA_HALF
));
cudnnCheckError(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
cudnnTensorDescriptor_t output_desc;
cudnnCheckError(cudnnCreateTensorDescriptor(&output_desc));
cudnnCheckError(cudnnSetTensor4dDescriptor(
output_desc,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_HALF,
1,
image.channels(),
image.rows,
image.cols
));
cudnnConvolutionFwdAlgo_t fwd_algo;
int requested_algo_count;
int algo_count;
cudnnCheckError(cudnnGetConvolutionForwardAlgorithmMaxCount(cudnn_handle, &requested_algo_count));
std::vector<cudnnConvolutionFwdAlgoPerf_t> perf_results(requested_algo_count);
cudnnCheckError(cudnnFindConvolutionForwardAlgorithm(
cudnn_handle,
input_desc,
filter_desc,
conv_desc,
output_desc,
requested_algo_count,
&algo_count,
perf_results.data()
));
fwd_algo = perf_results.front().algo;
std::size_t workspace_size = 0;
cudnnCheckError(cudnnGetConvolutionForwardWorkspaceSize(
cudnn_handle,
input_desc,
filter_desc,
conv_desc,
output_desc,
fwd_algo,
&workspace_size
));
void* d_workspace = nullptr;
cudaCheckError(hipMalloc(&d_workspace, workspace_size));
const std::vector<half> filter = {
0.0625, 0.125, 0.0625,
0.125, 0.25, 0.125,
0.0625, 0.125, 0.0625
};
std::vector<half> h_filter;
for (std::size_t idx = 0; idx < 3 * 3; ++idx) {
for (const auto& val : filter) {
h_filter.emplace_back(val);
}
}
half* d_filter = nullptr;
const std::size_t filter_size = h_filter.size();
cudaCheckError(hipMalloc(&d_filter, filter_size * sizeof(half)));
cudaCheckError(hipMemcpy(d_filter, h_filter.data(), filter_size * sizeof(half), hipMemcpyDefault));
half* d_output = nullptr;
cudaCheckError(hipMalloc(&d_output, image_size * sizeof(half)));
constexpr float alpha = 1.0f;
constexpr float beta = 0.0f;
cudnnCheckError(cudnnConvolutionForward(
cudnn_handle,
&alpha,
input_desc,
fp16_image,
filter_desc,
d_filter,
conv_desc,
fwd_algo,
d_workspace,
workspace_size,
&beta,
output_desc,
d_output
));
float* fp32_output = nullptr;
cudaCheckError(hipMalloc(&fp32_output, image_size * sizeof(float)));
hipLaunchKernelGGL(( half2float), dim3(1), dim3(64), 0, 0, d_output, image_size, fp32_output);
hipFree(d_output);
cv::Mat output(image.rows, image.cols, CV_32FC3);
cudaCheckError(hipMemcpy(output.ptr<float>(0), fp32_output, image_size * sizeof(float), hipMemcpyDefault));
cv::normalize(output, output, 0.0, 255.0, cv::NORM_MINMAX);
output.convertTo(output, CV_8UC3);
cv::imshow("output", output);
cv::waitKey();
cv::imwrite("output.png", output);
hipFree(d_filter);
hipFree(d_workspace);
hipFree(fp16_image);
hipFree(fp32_output);
cudnnDestroyTensorDescriptor(input_desc);
cudnnDestroyFilterDescriptor(filter_desc);
cudnnDestroyConvolutionDescriptor(conv_desc);
cudnnDestroyTensorDescriptor(output_desc);
cudnnDestroy(cudnn_handle);
return 0;
}
| 9df4e7b8ea18ea1b291edbef19ec8f9fae41a7c1.cu | #include <cudnn.h>
#include <cuda_fp16.h>
#include <opencv2/opencv.hpp>
#include <stdexcept>
#include <vector>
#define cudaCheckError(status) { cudaAssert(status, __FILE__, __LINE__); }
inline auto cudaAssert(cudaError_t status, const char* file, int line) -> void {
if (status != cudaSuccess) {
fprintf(stderr, "CUDA API error \"%s\" at %s:%i\n", cudaGetErrorString(status), file, line);
exit(EXIT_FAILURE);
}
}
#define cudnnCheckError(status) { cudnnAssert(status, __FILE__, __LINE__); }
inline auto cudnnAssert(cudnnStatus_t status, const char* file, int line) -> void {
if (status != CUDNN_STATUS_SUCCESS) {
fprintf(stderr, "CUDNN API error \"%s\" at %s:%i\n", cudnnGetErrorString(status), file, line);
exit(EXIT_FAILURE);
}
}
#define CUDA_1D_KERNEL_LOOP(idx, n) \
for (size_t idx = blockIdx.x * blockDim.x + threadIdx.x; idx < (n); \
idx += blockDim.x * gridDim.x)
__global__
void float2half(const float* input, std::size_t size, half* output) {
CUDA_1D_KERNEL_LOOP(idx, size) {
output[idx] = __float2half(input[idx]);
}
}
__global__
void half2float(const half* input, std::size_t size, float* output) {
CUDA_1D_KERNEL_LOOP(idx, size) {
output[idx] = __half2float(input[idx]);
}
}
auto main(int argc, const char** argv) -> int {
if (argc != 2) {
std::cout << "usage: conv2d <filename>\n";
return -1;
}
const char* filename = argv[1];
cv::Mat image = cv::imread(filename);
if (image.empty()) {
throw std::runtime_error("cv::imread() failed: image not found");
}
image.convertTo(image, CV_32FC3);
cv::normalize(image, image, 0.0, 1.0, cv::NORM_MINMAX);
const std::size_t image_size = image.total() * image.channels();
cudnnHandle_t cudnn_handle;
cudnnCheckError(cudnnCreate(&cudnn_handle));
float* d_input = nullptr;
cudaCheckError(cudaMalloc(&d_input, image_size * sizeof(float)));
cudaCheckError(cudaMemcpy(d_input, image.ptr<float>(0), image_size * sizeof(float), cudaMemcpyDefault));
half* fp16_image = nullptr;
cudaCheckError(cudaMalloc(&fp16_image, image_size * sizeof(half)));
float2half<<<1, 64>>>(d_input, image_size, fp16_image);
cudaFree(d_input);
cudnnTensorDescriptor_t input_desc;
cudnnCheckError(cudnnCreateTensorDescriptor(&input_desc));
cudnnCheckError(cudnnSetTensor4dDescriptor(
input_desc,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_HALF,
1,
image.channels(),
image.rows,
image.cols
));
cudnnFilterDescriptor_t filter_desc;
cudnnCheckError(cudnnCreateFilterDescriptor(&filter_desc));
cudnnCheckError(cudnnSetFilter4dDescriptor(
filter_desc,
CUDNN_DATA_HALF,
CUDNN_TENSOR_NCHW,
image.channels(),
image.channels(),
3,
3
));
cudnnConvolutionDescriptor_t conv_desc;
cudnnCheckError(cudnnCreateConvolutionDescriptor(&conv_desc));
cudnnCheckError(cudnnSetConvolution2dDescriptor(
conv_desc,
1, 1,
1, 1,
1, 1,
CUDNN_CROSS_CORRELATION,
CUDNN_DATA_HALF
));
cudnnCheckError(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH));
cudnnTensorDescriptor_t output_desc;
cudnnCheckError(cudnnCreateTensorDescriptor(&output_desc));
cudnnCheckError(cudnnSetTensor4dDescriptor(
output_desc,
CUDNN_TENSOR_NHWC,
CUDNN_DATA_HALF,
1,
image.channels(),
image.rows,
image.cols
));
cudnnConvolutionFwdAlgo_t fwd_algo;
int requested_algo_count;
int algo_count;
cudnnCheckError(cudnnGetConvolutionForwardAlgorithmMaxCount(cudnn_handle, &requested_algo_count));
std::vector<cudnnConvolutionFwdAlgoPerf_t> perf_results(requested_algo_count);
cudnnCheckError(cudnnFindConvolutionForwardAlgorithm(
cudnn_handle,
input_desc,
filter_desc,
conv_desc,
output_desc,
requested_algo_count,
&algo_count,
perf_results.data()
));
fwd_algo = perf_results.front().algo;
std::size_t workspace_size = 0;
cudnnCheckError(cudnnGetConvolutionForwardWorkspaceSize(
cudnn_handle,
input_desc,
filter_desc,
conv_desc,
output_desc,
fwd_algo,
&workspace_size
));
void* d_workspace = nullptr;
cudaCheckError(cudaMalloc(&d_workspace, workspace_size));
const std::vector<half> filter = {
0.0625, 0.125, 0.0625,
0.125, 0.25, 0.125,
0.0625, 0.125, 0.0625
};
std::vector<half> h_filter;
for (std::size_t idx = 0; idx < 3 * 3; ++idx) {
for (const auto& val : filter) {
h_filter.emplace_back(val);
}
}
half* d_filter = nullptr;
const std::size_t filter_size = h_filter.size();
cudaCheckError(cudaMalloc(&d_filter, filter_size * sizeof(half)));
cudaCheckError(cudaMemcpy(d_filter, h_filter.data(), filter_size * sizeof(half), cudaMemcpyDefault));
half* d_output = nullptr;
cudaCheckError(cudaMalloc(&d_output, image_size * sizeof(half)));
constexpr float alpha = 1.0f;
constexpr float beta = 0.0f;
cudnnCheckError(cudnnConvolutionForward(
cudnn_handle,
&alpha,
input_desc,
fp16_image,
filter_desc,
d_filter,
conv_desc,
fwd_algo,
d_workspace,
workspace_size,
&beta,
output_desc,
d_output
));
float* fp32_output = nullptr;
cudaCheckError(cudaMalloc(&fp32_output, image_size * sizeof(float)));
half2float<<<1, 64>>>(d_output, image_size, fp32_output);
cudaFree(d_output);
cv::Mat output(image.rows, image.cols, CV_32FC3);
cudaCheckError(cudaMemcpy(output.ptr<float>(0), fp32_output, image_size * sizeof(float), cudaMemcpyDefault));
cv::normalize(output, output, 0.0, 255.0, cv::NORM_MINMAX);
output.convertTo(output, CV_8UC3);
cv::imshow("output", output);
cv::waitKey();
cv::imwrite("output.png", output);
cudaFree(d_filter);
cudaFree(d_workspace);
cudaFree(fp16_image);
cudaFree(fp32_output);
cudnnDestroyTensorDescriptor(input_desc);
cudnnDestroyFilterDescriptor(filter_desc);
cudnnDestroyConvolutionDescriptor(conv_desc);
cudnnDestroyTensorDescriptor(output_desc);
cudnnDestroy(cudnn_handle);
return 0;
}
|
b9a983984cb334366141220d2ab62f5142f878fd.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
//include CUDA Runtime
#include <hip/hip_runtime.h>
#include <thrust/memory.h>
#include <thrust/system/hip/memory.h>
//thrust includes
#include <thrust/device_vector.h>
#include <thrust/device_ptr.h>
#include <thrust/tabulate.h>
#include <thrust/sort.h>
#include <thrust/random.h>
#include <iostream>
#include <iomanip>
#include <chrono>
int main(int argc, char *argv[]) {
int N = 1e6;
thrust::default_random_engine rng(123456);
thrust::uniform_int_distribution<int> dist(0, 1e9);
while(N <= 1e9)
{
// raw pointer to device memory
int * raw_ptr;
//alloc memory
hipError_t ref = hipHostMalloc(&raw_ptr, N * sizeof(int));
if(ref != hipSuccess)
std::cout << "Malloc failed at N = " << N << std::endl;
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, 0);
if(deviceProp.totalGlobalMem > N*sizeof(int))
{
//Data fits into GPU, create device_ptr
std::cout << "Data fits on GPU" << std::endl;
std::cout << "Data size: " << N << std::endl;
// wrap raw pointer with a device_ptr
thrust::device_ptr<int> dev_ptr(raw_ptr);
for(size_t i = 0; i < N; i++)
{
dev_ptr[i] = dist(rng);
}
std::cout << "Vector is filled with random numbers" << std::endl;
hipDeviceSynchronize();
auto startTime = std::chrono::high_resolution_clock::now();
// Do the sorting thing
thrust::sort(dev_ptr, dev_ptr + N);
hipDeviceSynchronize();
auto endTime= std::chrono::high_resolution_clock::now();
std::chrono::duration<double> diff = endTime-startTime;
std::cout << "Elapsed time for sorting in ms: "<< diff.count()*1000 << std::endl;
}
else
{
//Data is too big to be moved completely to GPU work on host_ptr
int * h_ptr;
int * d_ptr;
if(hipHostMalloc(&h_ptr, N * sizeof(int)) == hipSuccess)
{
if(hipHostGetDevicePointer(&d_ptr, h_ptr, 0) == hipSuccess)
{
thrust::hip::pointer<int> begin = thrust::hip::pointer<int>(d_ptr);
thrust::hip::pointer<int> end = begin + N;
thrust::tabulate(begin, end, thrust::placeholders::_1 % 1024);
thrust::sort(thrust::hip::par(&d_ptr), begin, end);
}
}
/*
// wrap raw pointer with a device_ptr
thrust::host_ptr<int> host_ptr(raw_ptr);
for(size_t i = 0; i < N; i++)
{
host_ptr[i] = dist(rng);
}
// Do the sorting thingy
thrust::sort(host_ptr.begin(), host_ptr.end());
*/
}
// Free the allocated memory
hipHostFree(raw_ptr);
// Increment N
N = N*10;
}
}
| b9a983984cb334366141220d2ab62f5142f878fd.cu | #include <stdio.h>
//include CUDA Runtime
#include <cuda_runtime.h>
#include <thrust/memory.h>
#include <thrust/system/cuda/memory.h>
//thrust includes
#include <thrust/device_vector.h>
#include <thrust/device_ptr.h>
#include <thrust/tabulate.h>
#include <thrust/sort.h>
#include <thrust/random.h>
#include <iostream>
#include <iomanip>
#include <chrono>
int main(int argc, char *argv[]) {
int N = 1e6;
thrust::default_random_engine rng(123456);
thrust::uniform_int_distribution<int> dist(0, 1e9);
while(N <= 1e9)
{
// raw pointer to device memory
int * raw_ptr;
//alloc memory
cudaError_t ref = cudaMallocHost(&raw_ptr, N * sizeof(int));
if(ref != cudaSuccess)
std::cout << "Malloc failed at N = " << N << std::endl;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, 0);
if(deviceProp.totalGlobalMem > N*sizeof(int))
{
//Data fits into GPU, create device_ptr
std::cout << "Data fits on GPU" << std::endl;
std::cout << "Data size: " << N << std::endl;
// wrap raw pointer with a device_ptr
thrust::device_ptr<int> dev_ptr(raw_ptr);
for(size_t i = 0; i < N; i++)
{
dev_ptr[i] = dist(rng);
}
std::cout << "Vector is filled with random numbers" << std::endl;
cudaDeviceSynchronize();
auto startTime = std::chrono::high_resolution_clock::now();
// Do the sorting thing
thrust::sort(dev_ptr, dev_ptr + N);
cudaDeviceSynchronize();
auto endTime= std::chrono::high_resolution_clock::now();
std::chrono::duration<double> diff = endTime-startTime;
std::cout << "Elapsed time for sorting in ms: "<< diff.count()*1000 << std::endl;
}
else
{
//Data is too big to be moved completely to GPU work on host_ptr
int * h_ptr;
int * d_ptr;
if(cudaMallocHost(&h_ptr, N * sizeof(int)) == cudaSuccess)
{
if(cudaHostGetDevicePointer(&d_ptr, h_ptr, 0) == cudaSuccess)
{
thrust::cuda::pointer<int> begin = thrust::cuda::pointer<int>(d_ptr);
thrust::cuda::pointer<int> end = begin + N;
thrust::tabulate(begin, end, thrust::placeholders::_1 % 1024);
thrust::sort(thrust::cuda::par(&d_ptr), begin, end);
}
}
/*
// wrap raw pointer with a device_ptr
thrust::host_ptr<int> host_ptr(raw_ptr);
for(size_t i = 0; i < N; i++)
{
host_ptr[i] = dist(rng);
}
// Do the sorting thingy
thrust::sort(host_ptr.begin(), host_ptr.end());
*/
}
// Free the allocated memory
cudaFreeHost(raw_ptr);
// Increment N
N = N*10;
}
}
|
edd2b248b83042b7837ec15c8e64eeb01cef9362.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "yololayer.h"
using namespace Yolo;
namespace nvinfer1
{
YoloLayerPlugin::YoloLayerPlugin(const int cudaThread /*= 512*/):mThreadCount(cudaThread)
{
mClassCount = CLASS_NUM;
mYoloKernel.clear();
mYoloKernel.push_back(yolo1);
mYoloKernel.push_back(yolo2);
mKernelCount = mYoloKernel.size();
}
YoloLayerPlugin::~YoloLayerPlugin()
{
}
// create the plugin at runtime from a byte stream
YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length)
{
using namespace Tn;
const char *d = reinterpret_cast<const char *>(data), *a = d;
read(d, mClassCount);
read(d, mThreadCount);
read(d, mKernelCount);
mYoloKernel.resize(mKernelCount);
auto kernelSize = mKernelCount*sizeof(YoloKernel);
memcpy(mYoloKernel.data(),d,kernelSize);
d += kernelSize;
assert(d == a + length);
}
void YoloLayerPlugin::serialize(void* buffer)
{
using namespace Tn;
char* d = static_cast<char*>(buffer), *a = d;
write(d, mClassCount);
write(d, mThreadCount);
write(d, mKernelCount);
auto kernelSize = mKernelCount*sizeof(YoloKernel);
memcpy(d,mYoloKernel.data(),kernelSize);
d += kernelSize;
assert(d == a + getSerializationSize());
}
size_t YoloLayerPlugin::getSerializationSize()
{
return sizeof(mClassCount) + sizeof(mThreadCount) + sizeof(mKernelCount) + sizeof(Yolo::YoloKernel) * mYoloKernel.size();
}
int YoloLayerPlugin::initialize()
{
return 0;
}
Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims)
{
//output the result to channel
int totalsize = MAX_OUTPUT_BBOX_COUNT * sizeof(Detection) / sizeof(float);
return Dims3(totalsize + 1, 1, 1);
}
__device__ float Logist(float data){ return 1.0f /(1.0f + exp(-data)); };
__global__ void CalDetection(const float *input, float *output,int noElements,
int yoloWidth,int yoloHeight,const float anchors[CHECK_COUNT*2],int classes,int outputElem) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= noElements) return;
int total_grid = yoloWidth * yoloHeight;
int bnIdx = idx / total_grid;
idx = idx - total_grid*bnIdx;
int info_len_i = 5 + classes;
const float* curInput = input + bnIdx * (info_len_i * total_grid * CHECK_COUNT);
for (int k = 0; k < 3; ++k) {
int class_id = 0;
float max_cls_prob = 0.0;
for (int i = 5; i < info_len_i; ++i) {
float p = Logist(curInput[idx + k * info_len_i * total_grid + i * total_grid]);
if (p > max_cls_prob) {
max_cls_prob = p;
class_id = i - 5;
}
}
float box_prob = Logist(curInput[idx + k * info_len_i * total_grid + 4 * total_grid]);
if (max_cls_prob < IGNORE_THRESH || box_prob < IGNORE_THRESH) continue;
float *res_count = output + bnIdx*outputElem;
int count = (int)atomicAdd(res_count, 1);
if (count >= MAX_OUTPUT_BBOX_COUNT) return;
char* data = (char * )res_count + sizeof(float) + count*sizeof(Detection);
Detection* det = (Detection*)(data);
int row = idx / yoloWidth;
int col = idx % yoloWidth;
//Location
det->bbox[0] = (col + Logist(curInput[idx + k * info_len_i * total_grid + 0 * total_grid])) * INPUT_W / yoloWidth;
det->bbox[1] = (row + Logist(curInput[idx + k * info_len_i * total_grid + 1 * total_grid])) * INPUT_H / yoloHeight;
det->bbox[2] = exp(curInput[idx + k * info_len_i * total_grid + 2 * total_grid]) * anchors[2*k];
det->bbox[3] = exp(curInput[idx + k * info_len_i * total_grid + 3 * total_grid]) * anchors[2*k + 1];
det->det_confidence = box_prob;
det->class_id = class_id;
det->class_confidence = max_cls_prob;
}
}
void YoloLayerPlugin::forwardGpu(const float *const * inputs, float* output, hipStream_t stream, int batchSize) {
void* devAnchor;
size_t AnchorLen = sizeof(float)* CHECK_COUNT*2;
CUDA_CHECK(hipMalloc(&devAnchor,AnchorLen));
int outputElem = 1 + MAX_OUTPUT_BBOX_COUNT * sizeof(Detection) / sizeof(float);
for(int idx = 0 ; idx < batchSize; ++idx) {
CUDA_CHECK(hipMemset(output + idx*outputElem, 0, sizeof(float)));
}
int numElem = 0;
for (unsigned int i = 0;i< mYoloKernel.size();++i)
{
const auto& yolo = mYoloKernel[i];
numElem = yolo.width*yolo.height*batchSize;
if (numElem < mThreadCount)
mThreadCount = numElem;
CUDA_CHECK(hipMemcpy(devAnchor, yolo.anchors, AnchorLen, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( CalDetection), dim3((yolo.width*yolo.height*batchSize + mThreadCount - 1) / mThreadCount), dim3(mThreadCount), 0, 0,
inputs[i],output, numElem, yolo.width, yolo.height, (float *)devAnchor, mClassCount ,outputElem);
}
CUDA_CHECK(hipFree(devAnchor));
}
int YoloLayerPlugin::enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, hipStream_t stream)
{
//assert(batchSize == 1);
//GPU
//CUDA_CHECK(hipStreamSynchronize(stream));
forwardGpu((const float *const *)inputs, (float*)outputs[0], stream, batchSize);
return 0;
}
}
| edd2b248b83042b7837ec15c8e64eeb01cef9362.cu | #include "yololayer.h"
using namespace Yolo;
namespace nvinfer1
{
YoloLayerPlugin::YoloLayerPlugin(const int cudaThread /*= 512*/):mThreadCount(cudaThread)
{
mClassCount = CLASS_NUM;
mYoloKernel.clear();
mYoloKernel.push_back(yolo1);
mYoloKernel.push_back(yolo2);
mKernelCount = mYoloKernel.size();
}
YoloLayerPlugin::~YoloLayerPlugin()
{
}
// create the plugin at runtime from a byte stream
YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length)
{
using namespace Tn;
const char *d = reinterpret_cast<const char *>(data), *a = d;
read(d, mClassCount);
read(d, mThreadCount);
read(d, mKernelCount);
mYoloKernel.resize(mKernelCount);
auto kernelSize = mKernelCount*sizeof(YoloKernel);
memcpy(mYoloKernel.data(),d,kernelSize);
d += kernelSize;
assert(d == a + length);
}
void YoloLayerPlugin::serialize(void* buffer)
{
using namespace Tn;
char* d = static_cast<char*>(buffer), *a = d;
write(d, mClassCount);
write(d, mThreadCount);
write(d, mKernelCount);
auto kernelSize = mKernelCount*sizeof(YoloKernel);
memcpy(d,mYoloKernel.data(),kernelSize);
d += kernelSize;
assert(d == a + getSerializationSize());
}
size_t YoloLayerPlugin::getSerializationSize()
{
return sizeof(mClassCount) + sizeof(mThreadCount) + sizeof(mKernelCount) + sizeof(Yolo::YoloKernel) * mYoloKernel.size();
}
int YoloLayerPlugin::initialize()
{
return 0;
}
Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims)
{
//output the result to channel
int totalsize = MAX_OUTPUT_BBOX_COUNT * sizeof(Detection) / sizeof(float);
return Dims3(totalsize + 1, 1, 1);
}
__device__ float Logist(float data){ return 1.0f /(1.0f + exp(-data)); };
__global__ void CalDetection(const float *input, float *output,int noElements,
int yoloWidth,int yoloHeight,const float anchors[CHECK_COUNT*2],int classes,int outputElem) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= noElements) return;
int total_grid = yoloWidth * yoloHeight;
int bnIdx = idx / total_grid;
idx = idx - total_grid*bnIdx;
int info_len_i = 5 + classes;
const float* curInput = input + bnIdx * (info_len_i * total_grid * CHECK_COUNT);
for (int k = 0; k < 3; ++k) {
int class_id = 0;
float max_cls_prob = 0.0;
for (int i = 5; i < info_len_i; ++i) {
float p = Logist(curInput[idx + k * info_len_i * total_grid + i * total_grid]);
if (p > max_cls_prob) {
max_cls_prob = p;
class_id = i - 5;
}
}
float box_prob = Logist(curInput[idx + k * info_len_i * total_grid + 4 * total_grid]);
if (max_cls_prob < IGNORE_THRESH || box_prob < IGNORE_THRESH) continue;
float *res_count = output + bnIdx*outputElem;
int count = (int)atomicAdd(res_count, 1);
if (count >= MAX_OUTPUT_BBOX_COUNT) return;
char* data = (char * )res_count + sizeof(float) + count*sizeof(Detection);
Detection* det = (Detection*)(data);
int row = idx / yoloWidth;
int col = idx % yoloWidth;
//Location
det->bbox[0] = (col + Logist(curInput[idx + k * info_len_i * total_grid + 0 * total_grid])) * INPUT_W / yoloWidth;
det->bbox[1] = (row + Logist(curInput[idx + k * info_len_i * total_grid + 1 * total_grid])) * INPUT_H / yoloHeight;
det->bbox[2] = exp(curInput[idx + k * info_len_i * total_grid + 2 * total_grid]) * anchors[2*k];
det->bbox[3] = exp(curInput[idx + k * info_len_i * total_grid + 3 * total_grid]) * anchors[2*k + 1];
det->det_confidence = box_prob;
det->class_id = class_id;
det->class_confidence = max_cls_prob;
}
}
void YoloLayerPlugin::forwardGpu(const float *const * inputs, float* output, cudaStream_t stream, int batchSize) {
void* devAnchor;
size_t AnchorLen = sizeof(float)* CHECK_COUNT*2;
CUDA_CHECK(cudaMalloc(&devAnchor,AnchorLen));
int outputElem = 1 + MAX_OUTPUT_BBOX_COUNT * sizeof(Detection) / sizeof(float);
for(int idx = 0 ; idx < batchSize; ++idx) {
CUDA_CHECK(cudaMemset(output + idx*outputElem, 0, sizeof(float)));
}
int numElem = 0;
for (unsigned int i = 0;i< mYoloKernel.size();++i)
{
const auto& yolo = mYoloKernel[i];
numElem = yolo.width*yolo.height*batchSize;
if (numElem < mThreadCount)
mThreadCount = numElem;
CUDA_CHECK(cudaMemcpy(devAnchor, yolo.anchors, AnchorLen, cudaMemcpyHostToDevice));
CalDetection<<< (yolo.width*yolo.height*batchSize + mThreadCount - 1) / mThreadCount, mThreadCount>>>
(inputs[i],output, numElem, yolo.width, yolo.height, (float *)devAnchor, mClassCount ,outputElem);
}
CUDA_CHECK(cudaFree(devAnchor));
}
int YoloLayerPlugin::enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, cudaStream_t stream)
{
//assert(batchSize == 1);
//GPU
//CUDA_CHECK(cudaStreamSynchronize(stream));
forwardGpu((const float *const *)inputs, (float*)outputs[0], stream, batchSize);
return 0;
}
}
|
641086e269da419025f133e27535b441bd667cc9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "OptionParser.h"
#include "ResultDatabase.h"
#include "cudacommon.h"
#include <stdio.h>
#include <thrust/scan.h>
#include <thrust/execution_policy.h>
float kernelTime = 0.0f;
float transferTime = 0.0f;
hipEvent_t start, stop;
float elapsedTime;
__device__ bool check(int val, int bound) {
return (val < bound);
}
__global__ void markMatches(int *arr, int *results, int size, int bound) {
// Block index
int bx = blockIdx.x;
// Thread index
int tx = threadIdx.x;
int tid = (blockDim.x * bx) + tx;
for( ; tid < size; tid += blockDim.x * gridDim.x) {
if(check(arr[tid], bound)) {
results[tid] = 1;
} else {
results[tid] = 0;
}
}
}
__global__ void mapMatches(int *arr, int *results, int *prefix, int *final, int size) {
// Block index
int bx = blockIdx.x;
// Thread index
int tx = threadIdx.x;
int tid = (blockDim.x * bx) + tx;
for( ; tid < size; tid += blockDim.x * gridDim.x) {
if(results[tid]) {
final[prefix[tid]] = arr[tid];
}
}
}
void seedArr(int *arr, int size) {
for(int i = 0; i < size; i++) {
arr[i] = rand() % 100;
}
}
void where(ResultDatabase &resultDB, int size, int coverage) {
int *arr = (int*)malloc(sizeof(int) * size);
int *final;
seedArr(arr, size);
int *d_arr;
int *d_results;
int *d_prefix;
int *d_final;
CUDA_SAFE_CALL(hipMalloc( (void**) &d_arr, sizeof(int) * size));
CUDA_SAFE_CALL(hipMalloc( (void**) &d_results, sizeof(int) * size));
CUDA_SAFE_CALL(hipMalloc( (void**) &d_prefix, sizeof(int) * size));
hipEventRecord(start, 0);
hipMemcpy(d_arr, arr, sizeof(int) * size, hipMemcpyHostToDevice);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
transferTime += elapsedTime * 1.e-3;
dim3 grid(size / 1024 + 1, 1, 1);
dim3 threads(1024, 1, 1);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( markMatches), dim3(grid), dim3(threads), 0, 0, d_arr, d_results, size, coverage);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
kernelTime += elapsedTime * 1.e-3;
CHECK_CUDA_ERROR();
hipEventRecord(start, 0);
thrust::exclusive_scan(thrust::device, d_results, d_results + size, d_prefix);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
kernelTime += elapsedTime * 1.e-3;
CHECK_CUDA_ERROR();
int matchSize;
hipEventRecord(start, 0);
hipMemcpy(&matchSize, d_prefix + size - 1, sizeof(int), hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
transferTime += elapsedTime * 1.e-3;
matchSize++;
CUDA_SAFE_CALL(hipMalloc( (void**) &d_final, sizeof(int) * matchSize));
final = (int*)malloc(sizeof(int) * matchSize);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( mapMatches), dim3(grid), dim3(threads), 0, 0, d_arr, d_results, d_prefix, d_final, size);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
kernelTime += elapsedTime * 1.e-3;
CHECK_CUDA_ERROR();
hipEventRecord(start, 0);
hipMemcpy(final, d_final, sizeof(int) * matchSize, hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
transferTime += elapsedTime * 1.e-3;
free(arr);
free(final);
CUDA_SAFE_CALL(hipFree(d_arr));
CUDA_SAFE_CALL(hipFree(d_results));
CUDA_SAFE_CALL(hipFree(d_prefix));
CUDA_SAFE_CALL(hipFree(d_final));
char atts[1024];
sprintf(atts, "size:%d, coverage:%d", size, coverage);
resultDB.AddResult("where_kernel_time", atts, "sec", kernelTime);
resultDB.AddResult("where_transfer_time", atts, "sec", transferTime);
resultDB.AddResult("where_total_time", atts, "sec", kernelTime+transferTime);
resultDB.AddResult("where_parity", atts, "N", transferTime / kernelTime);
resultDB.AddOverall("Time", "sec", kernelTime+transferTime);
}
void addBenchmarkSpecOptions(OptionParser &op) {
op.addOption("length", OPT_INT, "0", "number of elements in input");
op.addOption("coverage", OPT_INT, "-1", "0 to 100 percentage of elements to allow through where filter");
}
void RunBenchmark(ResultDatabase &resultDB, OptionParser &op) {
printf("Running Where\n");
srand(7);
bool quiet = op.getOptionBool("quiet");
int size = op.getOptionInt("length");
int coverage = op.getOptionInt("coverage");
if(size == 0 || coverage == -1) {
int sizes[4] = {1000, 10000, 500000000, 1000000000};
int coverages[4] = {20, 30, 40, 80};
size = sizes[op.getOptionInt("size") - 1];
coverage = coverages[op.getOptionInt("size") - 1];
}
if(!quiet) {
printf("Using size=%d, coverage=%d\n", size, coverage);
}
hipEventCreate(&start);
hipEventCreate(&stop);
int passes = op.getOptionInt("passes");
for(int i = 0; i < passes; i++) {
kernelTime = 0.0f;
transferTime = 0.0f;
if(!quiet) {
printf("Pass %d: ", i);
}
where(resultDB, size, coverage);
if(!quiet) {
printf("Done.\n");
}
}
}
| 641086e269da419025f133e27535b441bd667cc9.cu | #include "OptionParser.h"
#include "ResultDatabase.h"
#include "cudacommon.h"
#include <stdio.h>
#include <thrust/scan.h>
#include <thrust/execution_policy.h>
float kernelTime = 0.0f;
float transferTime = 0.0f;
cudaEvent_t start, stop;
float elapsedTime;
__device__ bool check(int val, int bound) {
return (val < bound);
}
__global__ void markMatches(int *arr, int *results, int size, int bound) {
// Block index
int bx = blockIdx.x;
// Thread index
int tx = threadIdx.x;
int tid = (blockDim.x * bx) + tx;
for( ; tid < size; tid += blockDim.x * gridDim.x) {
if(check(arr[tid], bound)) {
results[tid] = 1;
} else {
results[tid] = 0;
}
}
}
__global__ void mapMatches(int *arr, int *results, int *prefix, int *final, int size) {
// Block index
int bx = blockIdx.x;
// Thread index
int tx = threadIdx.x;
int tid = (blockDim.x * bx) + tx;
for( ; tid < size; tid += blockDim.x * gridDim.x) {
if(results[tid]) {
final[prefix[tid]] = arr[tid];
}
}
}
void seedArr(int *arr, int size) {
for(int i = 0; i < size; i++) {
arr[i] = rand() % 100;
}
}
void where(ResultDatabase &resultDB, int size, int coverage) {
int *arr = (int*)malloc(sizeof(int) * size);
int *final;
seedArr(arr, size);
int *d_arr;
int *d_results;
int *d_prefix;
int *d_final;
CUDA_SAFE_CALL(cudaMalloc( (void**) &d_arr, sizeof(int) * size));
CUDA_SAFE_CALL(cudaMalloc( (void**) &d_results, sizeof(int) * size));
CUDA_SAFE_CALL(cudaMalloc( (void**) &d_prefix, sizeof(int) * size));
cudaEventRecord(start, 0);
cudaMemcpy(d_arr, arr, sizeof(int) * size, cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
transferTime += elapsedTime * 1.e-3;
dim3 grid(size / 1024 + 1, 1, 1);
dim3 threads(1024, 1, 1);
cudaEventRecord(start, 0);
markMatches<<<grid, threads>>>(d_arr, d_results, size, coverage);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
kernelTime += elapsedTime * 1.e-3;
CHECK_CUDA_ERROR();
cudaEventRecord(start, 0);
thrust::exclusive_scan(thrust::device, d_results, d_results + size, d_prefix);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
kernelTime += elapsedTime * 1.e-3;
CHECK_CUDA_ERROR();
int matchSize;
cudaEventRecord(start, 0);
cudaMemcpy(&matchSize, d_prefix + size - 1, sizeof(int), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
transferTime += elapsedTime * 1.e-3;
matchSize++;
CUDA_SAFE_CALL(cudaMalloc( (void**) &d_final, sizeof(int) * matchSize));
final = (int*)malloc(sizeof(int) * matchSize);
cudaEventRecord(start, 0);
mapMatches<<<grid, threads>>>(d_arr, d_results, d_prefix, d_final, size);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
kernelTime += elapsedTime * 1.e-3;
CHECK_CUDA_ERROR();
cudaEventRecord(start, 0);
cudaMemcpy(final, d_final, sizeof(int) * matchSize, cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
transferTime += elapsedTime * 1.e-3;
free(arr);
free(final);
CUDA_SAFE_CALL(cudaFree(d_arr));
CUDA_SAFE_CALL(cudaFree(d_results));
CUDA_SAFE_CALL(cudaFree(d_prefix));
CUDA_SAFE_CALL(cudaFree(d_final));
char atts[1024];
sprintf(atts, "size:%d, coverage:%d", size, coverage);
resultDB.AddResult("where_kernel_time", atts, "sec", kernelTime);
resultDB.AddResult("where_transfer_time", atts, "sec", transferTime);
resultDB.AddResult("where_total_time", atts, "sec", kernelTime+transferTime);
resultDB.AddResult("where_parity", atts, "N", transferTime / kernelTime);
resultDB.AddOverall("Time", "sec", kernelTime+transferTime);
}
void addBenchmarkSpecOptions(OptionParser &op) {
op.addOption("length", OPT_INT, "0", "number of elements in input");
op.addOption("coverage", OPT_INT, "-1", "0 to 100 percentage of elements to allow through where filter");
}
void RunBenchmark(ResultDatabase &resultDB, OptionParser &op) {
printf("Running Where\n");
srand(7);
bool quiet = op.getOptionBool("quiet");
int size = op.getOptionInt("length");
int coverage = op.getOptionInt("coverage");
if(size == 0 || coverage == -1) {
int sizes[4] = {1000, 10000, 500000000, 1000000000};
int coverages[4] = {20, 30, 40, 80};
size = sizes[op.getOptionInt("size") - 1];
coverage = coverages[op.getOptionInt("size") - 1];
}
if(!quiet) {
printf("Using size=%d, coverage=%d\n", size, coverage);
}
cudaEventCreate(&start);
cudaEventCreate(&stop);
int passes = op.getOptionInt("passes");
for(int i = 0; i < passes; i++) {
kernelTime = 0.0f;
transferTime = 0.0f;
if(!quiet) {
printf("Pass %d: ", i);
}
where(resultDB, size, coverage);
if(!quiet) {
printf("Done.\n");
}
}
}
|
f18b64734b9f9b7ff4ba3bd4e1f471c8d25456ba.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hipcub/hipcub.hpp>
#include <vector>
#include "ssdOpt.h"
#include "ssdOptMacros.h"
template <typename KeyT, typename ValueT>
size_t cubSortPairsWorkspaceSize(int num_items, int num_segments)
{
size_t temp_storage_bytes = 0;
hipcub::DeviceSegmentedRadixSort::SortPairsDescending(
(void*) NULL, temp_storage_bytes,
(const KeyT*) NULL, (KeyT*) NULL,
(const ValueT*) NULL, (ValueT*) NULL,
num_items, // # items
num_segments, // # segments
(const int*) NULL, (const int*) NULL);
return temp_storage_bytes;
}
namespace nvinfer1
{
namespace plugin
{
namespace {
// sort one segment per cta
template<typename T_SCORE, int BLOCK_THREADS, int ELEMENTS_PER_THREAD>
__global__ void blockSortKernel(const T_SCORE *d_keys_in, T_SCORE *d_keys_out, const int *d_values_in, int *d_values_out, int* active_count_per_batch, int num_items, int stride_items, int num_segments)
{
// Specialize BlockRadixSort for a 1D block
typedef cub::BlockRadixSort<T_SCORE, BLOCK_THREADS, ELEMENTS_PER_THREAD, int> BlockRadixSort;
// Allocate shared memory for BlockRadixSort
__shared__ typename BlockRadixSort::TempStorage temp_storage;
if (blockIdx.x >= num_segments)
return;
int num_active_items = active_count_per_batch[blockIdx.x];
// Obtain a segment of consecutive items that are blocked across threads
T_SCORE thread_keys[ELEMENTS_PER_THREAD];
int thread_values[ELEMENTS_PER_THREAD];
int block_offset = blockIdx.x * stride_items;
cub::LoadDirectStriped<BLOCK_THREADS>(threadIdx.x, d_keys_in + block_offset, thread_keys, num_active_items, 0);
cub::LoadDirectStriped<BLOCK_THREADS>(threadIdx.x, d_values_in + block_offset, thread_values, num_active_items, -1);
__syncthreads();
// Collectively sort the keys and values among block threads
BlockRadixSort(temp_storage).SortDescendingBlockedToStriped(thread_keys, thread_values);
// Store output in striped fashion
cub::StoreDirectStriped<BLOCK_THREADS>(threadIdx.x, d_keys_out + block_offset, thread_keys, num_items);
cub::StoreDirectStriped<BLOCK_THREADS>(threadIdx.x, d_values_out + block_offset, thread_values, num_items);
}
/// block sort kernel
template <typename T_SCORE>
void blockSort(const T_SCORE *d_keys_in, T_SCORE *d_keys_out, const int *d_values_in, int *d_values_out, int* active_count_per_batch, int num_items, int stride_items, int num_segments, hipStream_t stream)
{
if (num_items == 0)
return;
int warps_per_cta = (num_items + 31) / 32;
assert(warps_per_cta <= 8);
dim3 block(warps_per_cta * 32);
dim3 grid(num_segments);
using kernel_func = void (*)(const T_SCORE *d_keys_in, T_SCORE *d_keys_out, const int *d_values_in, int *d_values_out, int* active_count_per_batch, int num_items, int stride_items, int num_segments);
static const kernel_func kernel_funcs[] = {
&blockSortKernel<T_SCORE, 32, 1>,
&blockSortKernel<T_SCORE, 64, 1>,
&blockSortKernel<T_SCORE, 96, 1>,
&blockSortKernel<T_SCORE, 128, 1>,
&blockSortKernel<T_SCORE, 160, 1>,
&blockSortKernel<T_SCORE, 192, 1>,
&blockSortKernel<T_SCORE, 224, 1>,
&blockSortKernel<T_SCORE, 256, 1>,
};
kernel_funcs[warps_per_cta -hipLaunchKernelGGL(( 1)], dim3(grid), dim3(block), 0, stream, d_keys_in, d_keys_out, d_values_in, d_values_out, active_count_per_batch, num_items, stride_items, num_segments);
}
template <int ITEMS_PER_THREAD>
__global__ void top_k_cuda(int *in, int *in_indices, int *out, int* out_indices, int* active_count_per_batch, int items, unsigned int num_top_k, int segments)
{
extern __shared__ uint32_t dynamic_memory[];
uint32_t* selected_items = dynamic_memory;
int32_t* selected_indices = reinterpret_cast<int32_t*>(selected_items + num_top_k);
__shared__ unsigned int selected_count;
unsigned int old_selected_count;
int batch = blockIdx.x;
int first_index = batch * items;
in += first_index;
in_indices += first_index;
out += first_index;
out_indices += first_index;
items = active_count_per_batch[batch];
// Feed input
uint32_t thread_items[ITEMS_PER_THREAD];
int32_t thread_indices[ITEMS_PER_THREAD];
for (int i = 0; i < ITEMS_PER_THREAD; ++i) {
int offset = threadIdx.x + i * blockDim.x;
if (offset < items) {
thread_items[i] = in[offset];
thread_indices[i] = in_indices[offset];
}
else {
thread_items[i] = 0;
thread_indices[i] = -1;
}
}
if (items <= num_top_k) {
if (threadIdx.x == 0) {
active_count_per_batch[batch] = items;
}
// we know that the results are compact, so we can bail out early.
for (int i = 0; i < ITEMS_PER_THREAD; ++i) {
int offset = threadIdx.x + i * blockDim.x;
if (offset < num_top_k) {
out[offset] = thread_items[i];
out_indices[offset] = thread_indices[i];
}
else {
return;
}
}
}
uint32_t select_mask = 0;
uint32_t save_mask = 0;
uint32_t save_bit = 0;
if (threadIdx.x == 0) {
selected_count = 0;
old_selected_count = 0;
}
#define MTA_D 0
// iterate over bits
for (int i = 0; i < 32; ++i) {
__syncthreads();
uint32_t bit = select_mask | (1u << (31 - i));
uint32_t &bit_mask = bit;
// determine the number of elements for the current selection mask
for (int item = 0; item < ITEMS_PER_THREAD; ++item) {
if ((thread_items[item] & bit) == bit) {
unsigned int offset = atomicAdd(&selected_count,1);
if (offset < num_top_k) {
selected_items[offset] = thread_items[item];
selected_indices[offset] = thread_indices[item];
}
else {
break;
}
}
}
// remove items from the list
// TODO this has to be something different!
__syncthreads();
int sc = selected_count;
__syncthreads();
if (sc < num_top_k) {
for (int i = 0; i < ITEMS_PER_THREAD; ++i) {
if ((thread_items[i] & bit) == bit) {
thread_items[i] = 0;
}
}
}
if (sc == num_top_k || i == 31) {
break;
}
else if (sc > num_top_k)
{
// There are too many bits in the current selection
// Save the current state and go to the next bit
// If there are not enough items left using the next bit
// it's necessary to restart here with the current bit not set
save_mask = bit_mask;
save_bit = i + 1;
select_mask |= bit;
if (threadIdx.x == 0)
{
selected_count = old_selected_count;
}
}
else {
if (save_mask) {
select_mask = save_mask;
i = save_bit;
save_mask = 0;
}
if (threadIdx.x == 0) {
old_selected_count = sc;
}
}
}
__syncthreads();
// store data to global memory
int sc = selected_count;
for (int i = threadIdx.x; i < num_top_k; i += blockDim.x) {
out[i] = (i < sc) ? selected_items[i] : 1;
out_indices[i] = (i < sc && selected_items[0] > 0) ? selected_indices[i] : -1;
}
if (threadIdx.x == 0) {
active_count_per_batch[batch] = num_top_k;
}
}
}
template <typename T_SCORE>
ssdStatus_t topKScoresPerImage_gpu(
hipStream_t stream,
const int num_images,
const int num_items_per_image,
const int num_top_k,
void* unsorted_scores,
void* unsorted_bbox_indices,
void* sorted_scores,
void* sorted_bbox_indices,
void* active_count_per_class,
void* workspace)
{
void* d_offsets = workspace;
void* cubWorkspace = nextWorkspacePtr((int8_t*) d_offsets, (num_images + 1) * sizeof(int));
uint32_t smem_size = num_top_k * (sizeof(int) + sizeof(uint32_t));
uint32_t num_warps = (num_items_per_image > 1024) ? 32 : (num_items_per_image + 31) / 32;
dim3 block(num_warps * 32);
dim3 grid(num_images);
using top_k_kernel = void (*)(int *in, int *in_indices, int *out, int* out_indices, int* active_count_per_class, int items, unsigned int num_top_k, int segments);
top_k_kernel top_k_kernels[] = {
top_k_cuda<1>,
top_k_cuda<2>,
top_k_cuda<3>,
top_k_cuda<4>,
top_k_cuda<5>,
top_k_cuda<6>,
top_k_cuda<7>,
top_k_cuda<8>,
top_k_cuda<9>,
top_k_cuda<10>,
top_k_cuda<11>,
top_k_cuda<12>,
top_k_cuda<13>,
top_k_cuda<14>,
top_k_cuda<15>,
top_k_cuda<16>,
top_k_cuda<17>,
top_k_cuda<18>,
top_k_cuda<19>,
top_k_cuda<20>,
top_k_cuda<21>,
top_k_cuda<22>,
top_k_cuda<23>,
top_k_cuda<24>,
top_k_cuda<25>,
top_k_cuda<26>,
top_k_cuda<27>,
top_k_cuda<28>,
top_k_cuda<29>,
top_k_cuda<30>,
top_k_cuda<31>,
top_k_cuda<32>,
};
int kernel_index = num_items_per_image / block.x;
while (kernel_index >= 32) {
kernel_index /= 2;
num_warps *= 2;
}
//printf("kernel index image %d\n", kernel_index);
assert(kernel_index < 32);
block.x = num_warps * 32;
top_k_kernelshipLaunchKernelGGL(([kernel_index)], dim3(grid), dim3(block), smem_size, stream, (int*) (unsorted_scores), (int*)unsorted_bbox_indices, (int*) (sorted_scores), (int*)sorted_bbox_indices, (int*)active_count_per_class, num_items_per_image, num_top_k, num_images);
blockSort<T_SCORE>(
(const T_SCORE*) (sorted_scores), (T_SCORE*) (sorted_scores),
(const int*) (sorted_bbox_indices), (int*) (sorted_bbox_indices), (int*) active_count_per_class,
num_top_k, num_items_per_image, num_images, stream
);
CSC(hipGetLastError(), STATUS_FAILURE);
return STATUS_SUCCESS;
}
// sortScoresPerImage LAUNCH CONFIG {{{
typedef ssdStatus_t (*tkspiFunc)(hipStream_t,
const int,
const int,
const int,
void*,
void*,
void*,
void*,
void*,
void*);
struct tkspiLaunchConfig
{
DType_t t_score;
tkspiFunc function;
tkspiLaunchConfig(DType_t t_score)
: t_score(t_score)
{
}
tkspiLaunchConfig(DType_t t_score, tkspiFunc function)
: t_score(t_score)
, function(function)
{
}
bool operator==(const tkspiLaunchConfig& other)
{
return t_score == other.t_score;
}
};
using nvinfer1::DataType;
static std::vector<tkspiLaunchConfig> tkspiFuncVec;
bool tkspiInit()
{
tkspiFuncVec.push_back(tkspiLaunchConfig(DataType::kFLOAT,
topKScoresPerImage_gpu<float>));
return true;
}
static bool initialized = tkspiInit();
//}}}
ssdStatus_t topKScoresPerImage(
hipStream_t stream,
const int num_images,
const int num_items_per_image,
const int num_top_k,
const DType_t DT_SCORE,
void* unsorted_scores,
void* unsorted_bbox_indices,
void* sorted_scores,
void* sorted_bbox_indices,
void* active_count_per_gpu,
void* workspace)
{
tkspiLaunchConfig lc = tkspiLaunchConfig(DT_SCORE);
for (unsigned i = 0; i < tkspiFuncVec.size(); ++i)
{
if (lc == tkspiFuncVec[i])
{
DEBUG_PRINTF("topKScoresPerImage kernel %d\n", i);
return tkspiFuncVec[i].function(stream,
num_images,
num_items_per_image,
num_top_k,
unsorted_scores,
unsorted_bbox_indices,
sorted_scores,
sorted_bbox_indices,
active_count_per_gpu,
workspace);
}
}
return STATUS_BAD_PARAM;
}
size_t topKScoresPerImageWorkspaceSize(
const int num_images,
const int num_items_per_image,
const int num_top_k,
const DType_t DT_SCORE)
{
const int arrayLen = num_images * num_items_per_image;
size_t wss[2];
wss[0] = (num_images + 1) * sizeof(int); // offsets
if (DT_SCORE == DataType::kFLOAT)
{
wss[1] = cubSortPairsWorkspaceSize<float, int>(arrayLen, num_images); // cub workspace
}
else
{
printf("SCORE type not supported.\n");
return (size_t) -1;
}
return calculateTotalWorkspaceSize(wss, 2);
}
} // namespace plugin
} // namespace nvinfer1
| f18b64734b9f9b7ff4ba3bd4e1f471c8d25456ba.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cub/cub.cuh>
#include <vector>
#include "ssdOpt.h"
#include "ssdOptMacros.h"
template <typename KeyT, typename ValueT>
size_t cubSortPairsWorkspaceSize(int num_items, int num_segments)
{
size_t temp_storage_bytes = 0;
cub::DeviceSegmentedRadixSort::SortPairsDescending(
(void*) NULL, temp_storage_bytes,
(const KeyT*) NULL, (KeyT*) NULL,
(const ValueT*) NULL, (ValueT*) NULL,
num_items, // # items
num_segments, // # segments
(const int*) NULL, (const int*) NULL);
return temp_storage_bytes;
}
namespace nvinfer1
{
namespace plugin
{
namespace {
// sort one segment per cta
template<typename T_SCORE, int BLOCK_THREADS, int ELEMENTS_PER_THREAD>
__global__ void blockSortKernel(const T_SCORE *d_keys_in, T_SCORE *d_keys_out, const int *d_values_in, int *d_values_out, int* active_count_per_batch, int num_items, int stride_items, int num_segments)
{
// Specialize BlockRadixSort for a 1D block
typedef cub::BlockRadixSort<T_SCORE, BLOCK_THREADS, ELEMENTS_PER_THREAD, int> BlockRadixSort;
// Allocate shared memory for BlockRadixSort
__shared__ typename BlockRadixSort::TempStorage temp_storage;
if (blockIdx.x >= num_segments)
return;
int num_active_items = active_count_per_batch[blockIdx.x];
// Obtain a segment of consecutive items that are blocked across threads
T_SCORE thread_keys[ELEMENTS_PER_THREAD];
int thread_values[ELEMENTS_PER_THREAD];
int block_offset = blockIdx.x * stride_items;
cub::LoadDirectStriped<BLOCK_THREADS>(threadIdx.x, d_keys_in + block_offset, thread_keys, num_active_items, 0);
cub::LoadDirectStriped<BLOCK_THREADS>(threadIdx.x, d_values_in + block_offset, thread_values, num_active_items, -1);
__syncthreads();
// Collectively sort the keys and values among block threads
BlockRadixSort(temp_storage).SortDescendingBlockedToStriped(thread_keys, thread_values);
// Store output in striped fashion
cub::StoreDirectStriped<BLOCK_THREADS>(threadIdx.x, d_keys_out + block_offset, thread_keys, num_items);
cub::StoreDirectStriped<BLOCK_THREADS>(threadIdx.x, d_values_out + block_offset, thread_values, num_items);
}
/// block sort kernel
template <typename T_SCORE>
void blockSort(const T_SCORE *d_keys_in, T_SCORE *d_keys_out, const int *d_values_in, int *d_values_out, int* active_count_per_batch, int num_items, int stride_items, int num_segments, cudaStream_t stream)
{
if (num_items == 0)
return;
int warps_per_cta = (num_items + 31) / 32;
assert(warps_per_cta <= 8);
dim3 block(warps_per_cta * 32);
dim3 grid(num_segments);
using kernel_func = void (*)(const T_SCORE *d_keys_in, T_SCORE *d_keys_out, const int *d_values_in, int *d_values_out, int* active_count_per_batch, int num_items, int stride_items, int num_segments);
static const kernel_func kernel_funcs[] = {
&blockSortKernel<T_SCORE, 32, 1>,
&blockSortKernel<T_SCORE, 64, 1>,
&blockSortKernel<T_SCORE, 96, 1>,
&blockSortKernel<T_SCORE, 128, 1>,
&blockSortKernel<T_SCORE, 160, 1>,
&blockSortKernel<T_SCORE, 192, 1>,
&blockSortKernel<T_SCORE, 224, 1>,
&blockSortKernel<T_SCORE, 256, 1>,
};
kernel_funcs[warps_per_cta - 1]<<<grid, block, 0, stream>>>(d_keys_in, d_keys_out, d_values_in, d_values_out, active_count_per_batch, num_items, stride_items, num_segments);
}
template <int ITEMS_PER_THREAD>
__global__ void top_k_cuda(int *in, int *in_indices, int *out, int* out_indices, int* active_count_per_batch, int items, unsigned int num_top_k, int segments)
{
extern __shared__ uint32_t dynamic_memory[];
uint32_t* selected_items = dynamic_memory;
int32_t* selected_indices = reinterpret_cast<int32_t*>(selected_items + num_top_k);
__shared__ unsigned int selected_count;
unsigned int old_selected_count;
int batch = blockIdx.x;
int first_index = batch * items;
in += first_index;
in_indices += first_index;
out += first_index;
out_indices += first_index;
items = active_count_per_batch[batch];
// Feed input
uint32_t thread_items[ITEMS_PER_THREAD];
int32_t thread_indices[ITEMS_PER_THREAD];
for (int i = 0; i < ITEMS_PER_THREAD; ++i) {
int offset = threadIdx.x + i * blockDim.x;
if (offset < items) {
thread_items[i] = in[offset];
thread_indices[i] = in_indices[offset];
}
else {
thread_items[i] = 0;
thread_indices[i] = -1;
}
}
if (items <= num_top_k) {
if (threadIdx.x == 0) {
active_count_per_batch[batch] = items;
}
// we know that the results are compact, so we can bail out early.
for (int i = 0; i < ITEMS_PER_THREAD; ++i) {
int offset = threadIdx.x + i * blockDim.x;
if (offset < num_top_k) {
out[offset] = thread_items[i];
out_indices[offset] = thread_indices[i];
}
else {
return;
}
}
}
uint32_t select_mask = 0;
uint32_t save_mask = 0;
uint32_t save_bit = 0;
if (threadIdx.x == 0) {
selected_count = 0;
old_selected_count = 0;
}
#define MTA_D 0
// iterate over bits
for (int i = 0; i < 32; ++i) {
__syncthreads();
uint32_t bit = select_mask | (1u << (31 - i));
uint32_t &bit_mask = bit;
// determine the number of elements for the current selection mask
for (int item = 0; item < ITEMS_PER_THREAD; ++item) {
if ((thread_items[item] & bit) == bit) {
unsigned int offset = atomicAdd(&selected_count,1);
if (offset < num_top_k) {
selected_items[offset] = thread_items[item];
selected_indices[offset] = thread_indices[item];
}
else {
break;
}
}
}
// remove items from the list
// TODO this has to be something different!
__syncthreads();
int sc = selected_count;
__syncthreads();
if (sc < num_top_k) {
for (int i = 0; i < ITEMS_PER_THREAD; ++i) {
if ((thread_items[i] & bit) == bit) {
thread_items[i] = 0;
}
}
}
if (sc == num_top_k || i == 31) {
break;
}
else if (sc > num_top_k)
{
// There are too many bits in the current selection
// Save the current state and go to the next bit
// If there are not enough items left using the next bit
// it's necessary to restart here with the current bit not set
save_mask = bit_mask;
save_bit = i + 1;
select_mask |= bit;
if (threadIdx.x == 0)
{
selected_count = old_selected_count;
}
}
else {
if (save_mask) {
select_mask = save_mask;
i = save_bit;
save_mask = 0;
}
if (threadIdx.x == 0) {
old_selected_count = sc;
}
}
}
__syncthreads();
// store data to global memory
int sc = selected_count;
for (int i = threadIdx.x; i < num_top_k; i += blockDim.x) {
out[i] = (i < sc) ? selected_items[i] : 1;
out_indices[i] = (i < sc && selected_items[0] > 0) ? selected_indices[i] : -1;
}
if (threadIdx.x == 0) {
active_count_per_batch[batch] = num_top_k;
}
}
}
template <typename T_SCORE>
ssdStatus_t topKScoresPerImage_gpu(
cudaStream_t stream,
const int num_images,
const int num_items_per_image,
const int num_top_k,
void* unsorted_scores,
void* unsorted_bbox_indices,
void* sorted_scores,
void* sorted_bbox_indices,
void* active_count_per_class,
void* workspace)
{
void* d_offsets = workspace;
void* cubWorkspace = nextWorkspacePtr((int8_t*) d_offsets, (num_images + 1) * sizeof(int));
uint32_t smem_size = num_top_k * (sizeof(int) + sizeof(uint32_t));
uint32_t num_warps = (num_items_per_image > 1024) ? 32 : (num_items_per_image + 31) / 32;
dim3 block(num_warps * 32);
dim3 grid(num_images);
using top_k_kernel = void (*)(int *in, int *in_indices, int *out, int* out_indices, int* active_count_per_class, int items, unsigned int num_top_k, int segments);
top_k_kernel top_k_kernels[] = {
top_k_cuda<1>,
top_k_cuda<2>,
top_k_cuda<3>,
top_k_cuda<4>,
top_k_cuda<5>,
top_k_cuda<6>,
top_k_cuda<7>,
top_k_cuda<8>,
top_k_cuda<9>,
top_k_cuda<10>,
top_k_cuda<11>,
top_k_cuda<12>,
top_k_cuda<13>,
top_k_cuda<14>,
top_k_cuda<15>,
top_k_cuda<16>,
top_k_cuda<17>,
top_k_cuda<18>,
top_k_cuda<19>,
top_k_cuda<20>,
top_k_cuda<21>,
top_k_cuda<22>,
top_k_cuda<23>,
top_k_cuda<24>,
top_k_cuda<25>,
top_k_cuda<26>,
top_k_cuda<27>,
top_k_cuda<28>,
top_k_cuda<29>,
top_k_cuda<30>,
top_k_cuda<31>,
top_k_cuda<32>,
};
int kernel_index = num_items_per_image / block.x;
while (kernel_index >= 32) {
kernel_index /= 2;
num_warps *= 2;
}
//printf("kernel index image %d\n", kernel_index);
assert(kernel_index < 32);
block.x = num_warps * 32;
top_k_kernels[kernel_index]<<<grid, block, smem_size, stream>>>((int*) (unsorted_scores), (int*)unsorted_bbox_indices, (int*) (sorted_scores), (int*)sorted_bbox_indices, (int*)active_count_per_class, num_items_per_image, num_top_k, num_images);
blockSort<T_SCORE>(
(const T_SCORE*) (sorted_scores), (T_SCORE*) (sorted_scores),
(const int*) (sorted_bbox_indices), (int*) (sorted_bbox_indices), (int*) active_count_per_class,
num_top_k, num_items_per_image, num_images, stream
);
CSC(cudaGetLastError(), STATUS_FAILURE);
return STATUS_SUCCESS;
}
// sortScoresPerImage LAUNCH CONFIG {{{
typedef ssdStatus_t (*tkspiFunc)(cudaStream_t,
const int,
const int,
const int,
void*,
void*,
void*,
void*,
void*,
void*);
struct tkspiLaunchConfig
{
DType_t t_score;
tkspiFunc function;
tkspiLaunchConfig(DType_t t_score)
: t_score(t_score)
{
}
tkspiLaunchConfig(DType_t t_score, tkspiFunc function)
: t_score(t_score)
, function(function)
{
}
bool operator==(const tkspiLaunchConfig& other)
{
return t_score == other.t_score;
}
};
using nvinfer1::DataType;
static std::vector<tkspiLaunchConfig> tkspiFuncVec;
bool tkspiInit()
{
tkspiFuncVec.push_back(tkspiLaunchConfig(DataType::kFLOAT,
topKScoresPerImage_gpu<float>));
return true;
}
static bool initialized = tkspiInit();
//}}}
ssdStatus_t topKScoresPerImage(
cudaStream_t stream,
const int num_images,
const int num_items_per_image,
const int num_top_k,
const DType_t DT_SCORE,
void* unsorted_scores,
void* unsorted_bbox_indices,
void* sorted_scores,
void* sorted_bbox_indices,
void* active_count_per_gpu,
void* workspace)
{
tkspiLaunchConfig lc = tkspiLaunchConfig(DT_SCORE);
for (unsigned i = 0; i < tkspiFuncVec.size(); ++i)
{
if (lc == tkspiFuncVec[i])
{
DEBUG_PRINTF("topKScoresPerImage kernel %d\n", i);
return tkspiFuncVec[i].function(stream,
num_images,
num_items_per_image,
num_top_k,
unsorted_scores,
unsorted_bbox_indices,
sorted_scores,
sorted_bbox_indices,
active_count_per_gpu,
workspace);
}
}
return STATUS_BAD_PARAM;
}
size_t topKScoresPerImageWorkspaceSize(
const int num_images,
const int num_items_per_image,
const int num_top_k,
const DType_t DT_SCORE)
{
const int arrayLen = num_images * num_items_per_image;
size_t wss[2];
wss[0] = (num_images + 1) * sizeof(int); // offsets
if (DT_SCORE == DataType::kFLOAT)
{
wss[1] = cubSortPairsWorkspaceSize<float, int>(arrayLen, num_images); // cub workspace
}
else
{
printf("SCORE type not supported.\n");
return (size_t) -1;
}
return calculateTotalWorkspaceSize(wss, 2);
}
} // namespace plugin
} // namespace nvinfer1
|
d4736130e5eb69dbe0c7a606fd307a72375c2008.hip | // !!! This is a file automatically generated by hipify!!!
/*
* EDDL Library - European Distributed Deep Learning Library.
* Version: 0.7
* copyright (c) 2020, Universidad Politcnica de Valencia (UPV), PRHLT Research Centre
* Date: April 2020
* Author: PRHLT Research Centre, UPV, ([email protected]), ([email protected])
* All rights reserved
*/
#include <cstdio>
#include <string>
#include <stdexcept>
#include <iostream>
#include "eddl/hardware/gpu/gpu_tensor.h"
#include "eddl/hardware/gpu/gpu_kernels.h"
// CUDA, NVIDIA compute capabilities:
// https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#compute-capabilities
// -----------------------------------------------------------------
// GRID
// Maximum dimensionality of grid of thread blocks: 3
// Maximum x-dimension of a grid of thread blocks (2^31)-1
// Maximum y- or z-dimension of a grid of thread blocks: 65535
// THREAD BLOCK
// Maximum dimensionality of thread block: 3
// Maximum x- or y-dimension of a block: 1024
// Maximum z-dimension of a block: 64
//
// Maximum number of threads per block: 1024
// -----------------------------------------------------------------
hipblasHandle_t hcublas[64];
hiprandGenerator_t random_generator[64];
hipblasStatus_t bstatus;
hiprandStatus_t rstatus;
static const char *_curandGetErrorEnum(hiprandStatus_t error){
switch (error)
{
case HIPRAND_STATUS_ALLOCATION_FAILED:
return "HIPRAND_STATUS_ALLOCATION_FAILED";
case HIPRAND_STATUS_INITIALIZATION_FAILED:
return "HIPRAND_STATUS_INITIALIZATION_FAILED";
case HIPRAND_STATUS_VERSION_MISMATCH:
return "HIPRAND_STATUS_VERSION_MISMATCH";
case HIPRAND_STATUS_TYPE_ERROR:
return "HIPRAND_STATUS_TYPE_ERROR";
case HIPRAND_STATUS_OUT_OF_RANGE:
return "HIPRAND_STATUS_OUT_OF_RANGE";
case HIPRAND_STATUS_PREEXISTING_FAILURE:
return "HIPRAND_STATUS_PREEXISTING_FAILURE";
case HIPRAND_STATUS_NOT_INITIALIZED:
return "HIPRAND_STATUS_NOT_INITIALIZED";
case HIPRAND_STATUS_DOUBLE_PRECISION_REQUIRED:
return "HIPRAND_STATUS_DOUBLE_PRECISION_REQUIRED";
case HIPRAND_STATUS_LENGTH_NOT_MULTIPLE:
return "HIPRAND_STATUS_LENGTH_NOT_MULTIPLE";
default:
std::string text = "unknown hiprand error: " + std::to_string(error) + " | (_curandGetErrorEnum)";
throw std::invalid_argument(text);
}
}
void check_cublas(hipblasStatus_t status, const char *f)
{
if ( status!= HIPBLAS_STATUS_SUCCESS)
{
std::string text = "error in cublas execution in " + std::string(f) + " | (check_cublas)";
throw std::runtime_error(text);
}
}
void check_curand(hiprandStatus_t status, const char *f)
{
if ( status!= HIPRAND_STATUS_SUCCESS)
{
std::string text = "error in hiprand execution in " + std::string(_curandGetErrorEnum(status)) + " | (check_curand)";
throw std::runtime_error(text);
}
}
void check_cuda(hipError_t err,const char *msg)
{
if(err!=hipSuccess)
{
std::string error_type = hipGetErrorString(err);
std::string text = "[CUDA ERROR]: " + error_type + " ("+ std::to_string(err) + ") raised in " + std::string(msg) + " | (check_cuda)";
throw std::runtime_error(text);
}
}
void gpu_set_device(int device)
{
hipSetDevice(device);
}
void gpu_init(int device)
{
int nDevices;
hipGetDeviceCount(&nDevices);
if (device>nDevices)
{
std::string text = "GPU " + std::to_string(device) + " not available. Number of available GPUs is " + std::to_string(nDevices) + ". Further information running nvidia-smi | (gpu_init)";
throw std::runtime_error(text);
}
fprintf(stderr,"Selecting GPU device %d\n",device);
hipSetDevice(device);
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop,device);
fprintf(stderr,"EDDLL is running on GPU device %d, %s\n",device,prop.name);
/// CUBLAS
bstatus=hipblasCreate(&(hcublas[device]));
// try to init cublas several times
int i=0;
while ((bstatus!= HIPBLAS_STATUS_SUCCESS)&&(i<10)) {
bstatus=hipblasCreate(&(hcublas[device]));
i++;
fprintf(stderr,".\n");
}
if ( bstatus!= HIPBLAS_STATUS_SUCCESS)
{
std::string text = "problem in cublas create (gpu_init)";
throw std::runtime_error(text);
}
fprintf(stderr,"CuBlas initialized on GPU device %d, %s\n",device,prop.name);
bstatus = hipblasSetAtomicsMode(hcublas[device],HIPBLAS_ATOMICS_NOT_ALLOWED);
if ( bstatus!= HIPBLAS_STATUS_SUCCESS)
{
std::string text = "problem in cublas execution getting: NOT IMPLEMENTED | (gpu_init)";
throw std::runtime_error(text);
}
// CURAND
rstatus=hiprandCreateGenerator(&(random_generator[device]),HIPRAND_RNG_PSEUDO_MRG32K3A);
if (rstatus != HIPRAND_STATUS_SUCCESS)
{
std::string text = "error creating random numbers on gpu | (gpu_init)";
throw std::runtime_error(text);
}
rstatus=hiprandSetPseudoRandomGeneratorSeed(random_generator[device],1234);
if (rstatus != HIPRAND_STATUS_SUCCESS) {
std::string text = "error setting the seed for program | (gpu_init)";
throw std::runtime_error(text);
}
fprintf(stderr,"CuRand initialized on GPU device %d, %s\n",device,prop.name);
}
float* gpu_create_tensor(int dev,int size)
{
float* devicePointer;
hipSetDevice(dev);
check_cuda(hipMalloc((void**)&devicePointer,size*sizeof(float)),"create_tensor");
return devicePointer;
}
void gpu_delete_tensor(int dev, float* p)
{
hipSetDevice(dev);
check_cuda(hipFree(p),"delete_tensor");
}
void gpu_delete_tensor_int(int dev, int* p)
{
hipSetDevice(dev);
check_cuda(hipFree(p),"delete_tensor_int");
}
int gpu_devices()
{
int nDevices;
hipGetDeviceCount(&nDevices);
return nDevices;
}
| d4736130e5eb69dbe0c7a606fd307a72375c2008.cu | /*
* EDDL Library - European Distributed Deep Learning Library.
* Version: 0.7
* copyright (c) 2020, Universidad Politécnica de Valencia (UPV), PRHLT Research Centre
* Date: April 2020
* Author: PRHLT Research Centre, UPV, ([email protected]), ([email protected])
* All rights reserved
*/
#include <cstdio>
#include <string>
#include <stdexcept>
#include <iostream>
#include "eddl/hardware/gpu/gpu_tensor.h"
#include "eddl/hardware/gpu/gpu_kernels.h"
// CUDA, NVIDIA compute capabilities:
// https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#compute-capabilities
// -----------------------------------------------------------------
// GRID
// Maximum dimensionality of grid of thread blocks: 3
// Maximum x-dimension of a grid of thread blocks (2^31)-1
// Maximum y- or z-dimension of a grid of thread blocks: 65535
// THREAD BLOCK
// Maximum dimensionality of thread block: 3
// Maximum x- or y-dimension of a block: 1024
// Maximum z-dimension of a block: 64
//
// Maximum number of threads per block: 1024
// -----------------------------------------------------------------
cublasHandle_t hcublas[64];
curandGenerator_t random_generator[64];
cublasStatus_t bstatus;
curandStatus_t rstatus;
static const char *_curandGetErrorEnum(curandStatus_t error){
switch (error)
{
case CURAND_STATUS_ALLOCATION_FAILED:
return "CURAND_STATUS_ALLOCATION_FAILED";
case CURAND_STATUS_INITIALIZATION_FAILED:
return "CURAND_STATUS_INITIALIZATION_FAILED";
case CURAND_STATUS_VERSION_MISMATCH:
return "CURAND_STATUS_VERSION_MISMATCH";
case CURAND_STATUS_TYPE_ERROR:
return "CURAND_STATUS_TYPE_ERROR";
case CURAND_STATUS_OUT_OF_RANGE:
return "CURAND_STATUS_OUT_OF_RANGE";
case CURAND_STATUS_PREEXISTING_FAILURE:
return "CURAND_STATUS_PREEXISTING_FAILURE";
case CURAND_STATUS_NOT_INITIALIZED:
return "CURAND_STATUS_NOT_INITIALIZED";
case CURAND_STATUS_DOUBLE_PRECISION_REQUIRED:
return "CURAND_STATUS_DOUBLE_PRECISION_REQUIRED";
case CURAND_STATUS_LENGTH_NOT_MULTIPLE:
return "CURAND_STATUS_LENGTH_NOT_MULTIPLE";
default:
std::string text = "unknown curand error: " + std::to_string(error) + " | (_curandGetErrorEnum)";
throw std::invalid_argument(text);
}
}
void check_cublas(cublasStatus_t status, const char *f)
{
if ( status!= CUBLAS_STATUS_SUCCESS)
{
std::string text = "error in cublas execution in " + std::string(f) + " | (check_cublas)";
throw std::runtime_error(text);
}
}
void check_curand(curandStatus_t status, const char *f)
{
if ( status!= CURAND_STATUS_SUCCESS)
{
std::string text = "error in curand execution in " + std::string(_curandGetErrorEnum(status)) + " | (check_curand)";
throw std::runtime_error(text);
}
}
void check_cuda(cudaError_t err,const char *msg)
{
if(err!=cudaSuccess)
{
std::string error_type = cudaGetErrorString(err);
std::string text = "[CUDA ERROR]: " + error_type + " ("+ std::to_string(err) + ") raised in " + std::string(msg) + " | (check_cuda)";
throw std::runtime_error(text);
}
}
void gpu_set_device(int device)
{
cudaSetDevice(device);
}
void gpu_init(int device)
{
int nDevices;
cudaGetDeviceCount(&nDevices);
if (device>nDevices)
{
std::string text = "GPU " + std::to_string(device) + " not available. Number of available GPUs is " + std::to_string(nDevices) + ". Further information running nvidia-smi | (gpu_init)";
throw std::runtime_error(text);
}
fprintf(stderr,"Selecting GPU device %d\n",device);
cudaSetDevice(device);
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop,device);
fprintf(stderr,"EDDLL is running on GPU device %d, %s\n",device,prop.name);
/// CUBLAS
bstatus=cublasCreate(&(hcublas[device]));
// try to init cublas several times
int i=0;
while ((bstatus!= CUBLAS_STATUS_SUCCESS)&&(i<10)) {
bstatus=cublasCreate(&(hcublas[device]));
i++;
fprintf(stderr,".\n");
}
if ( bstatus!= CUBLAS_STATUS_SUCCESS)
{
std::string text = "problem in cublas create (gpu_init)";
throw std::runtime_error(text);
}
fprintf(stderr,"CuBlas initialized on GPU device %d, %s\n",device,prop.name);
bstatus = cublasSetAtomicsMode(hcublas[device],CUBLAS_ATOMICS_NOT_ALLOWED);
if ( bstatus!= CUBLAS_STATUS_SUCCESS)
{
std::string text = "problem in cublas execution getting: NOT IMPLEMENTED | (gpu_init)";
throw std::runtime_error(text);
}
// CURAND
rstatus=curandCreateGenerator(&(random_generator[device]),CURAND_RNG_PSEUDO_MRG32K3A);
if (rstatus != CURAND_STATUS_SUCCESS)
{
std::string text = "error creating random numbers on gpu | (gpu_init)";
throw std::runtime_error(text);
}
rstatus=curandSetPseudoRandomGeneratorSeed(random_generator[device],1234);
if (rstatus != CURAND_STATUS_SUCCESS) {
std::string text = "error setting the seed for program | (gpu_init)";
throw std::runtime_error(text);
}
fprintf(stderr,"CuRand initialized on GPU device %d, %s\n",device,prop.name);
}
float* gpu_create_tensor(int dev,int size)
{
float* devicePointer;
cudaSetDevice(dev);
check_cuda(cudaMalloc((void**)&devicePointer,size*sizeof(float)),"create_tensor");
return devicePointer;
}
void gpu_delete_tensor(int dev, float* p)
{
cudaSetDevice(dev);
check_cuda(cudaFree(p),"delete_tensor");
}
void gpu_delete_tensor_int(int dev, int* p)
{
cudaSetDevice(dev);
check_cuda(cudaFree(p),"delete_tensor_int");
}
int gpu_devices()
{
int nDevices;
cudaGetDeviceCount(&nDevices);
return nDevices;
}
|
2460982c12fb368ac3c924cecac5e0ae1257ff84.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "nonseparable.h"
#include "common.h"
// outer product of arrays "a", "b" of length "len"
DTYPE* w_outer(DTYPE* a, DTYPE* b, int len) {
DTYPE* res = (DTYPE*) calloc(len*len, sizeof(DTYPE));
for (int i = 0; i < len; i++) {
for (int j = 0; j < len; j++) {
res[i*len+j] = a[i]*b[j];
}
}
return res;
}
/// Compute the four filters A, H, V, D from a family name.
/// These filters are separable, i.e computed from 1D filters.
/// wname: name of the filter ("haar", "db3", "sym4", ...)
/// direction: 1 for forward transform, -1 for inverse transform
/// Returns : the filter width "hlen" if success ; a negative value otherwise.
int w_compute_filters(const char* wname, int direction, int do_swt) {
if (direction == 0) {
puts("ERROR: w_compute_filters(): please specify a direction for second argument : +1 for forward, -1 for inverse)");
return -1;
}
int hlen = 0;
DTYPE* f1_l; // 1D lowpass
DTYPE* f1_h; // 1D highpass
DTYPE* f2_a, *f2_h, *f2_v, *f2_d; // 2D filters
// Haar filters has specific kernels
if (!do_swt) {
if ((!strcasecmp(wname, "haar")) || (!strcasecmp(wname, "db1")) || (!strcasecmp(wname, "bior1.1")) || (!strcasecmp(wname, "rbior1.1"))) {
return 2;
}
}
// Browse available filters (see filters.h)
int i;
for (i = 0; i < 72; i++) {
if (!strcasecmp(wname, all_filters[i].wname)) {
hlen = all_filters[i].hlen;
if (direction > 0) {
f1_l = all_filters[i].f_l;
f1_h = all_filters[i].f_h;
}
else {
f1_l = all_filters[i].i_l;
f1_h = all_filters[i].i_h;
}
break;
}
}
if (hlen == 0) {
printf("ERROR: w_compute_filters(): unknown filter %s\n", wname);
return -2;
}
// Create the separable 2D filters
f2_a = w_outer(f1_l, f1_l, hlen);
f2_h = w_outer(f1_l, f1_h, hlen); // CHECKME
f2_v = w_outer(f1_h, f1_l, hlen);
f2_d = w_outer(f1_h, f1_h, hlen);
// Copy the filters to device constant memory
hipMemcpyToSymbol(c_kern_LL, f2_a, hlen*hlen*sizeof(DTYPE), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(c_kern_LH, f2_h, hlen*hlen*sizeof(DTYPE), 0, hipMemcpyHostToDevice); // CHECKME
hipMemcpyToSymbol(c_kern_HL, f2_v, hlen*hlen*sizeof(DTYPE), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(c_kern_HH, f2_d, hlen*hlen*sizeof(DTYPE), 0, hipMemcpyHostToDevice);
return hlen;
}
// must be run with grid size = (Nc/2, Nr/2) where Nr = numrows of input image
__global__ void w_kern_forward(DTYPE* img, DTYPE* c_a, DTYPE* c_h, DTYPE* c_v, DTYPE* c_d, int Nr, int Nc, int hlen) {
int gidx = threadIdx.x + blockIdx.x*blockDim.x;
int gidy = threadIdx.y + blockIdx.y*blockDim.y;
int Nr2 = Nr/2, Nc2 = Nc/2;
if (gidy < Nr2 && gidx < Nc2) {
int c, hL, hR;
if (hlen & 1) { // odd kernel size
c = hlen/2;
hL = c;
hR = c;
}
else { // even kernel size : center is shifted to the left
c = hlen/2 - 1;
hL = c;
hR = c+1;
}
int jy1 = c - 2*gidy;
int jy2 = Nr - 1 - 2*gidy + c;
int jx1 = c - 2*gidx;
int jx2 = Nc - 1 - 2*gidx + c;
DTYPE res_a = 0, res_h = 0, res_v = 0, res_d = 0;
DTYPE img_val;
// Convolution with periodic boundaries extension.
// The following can be sped-up by splitting into 3*3 loops, but it would be a nightmare for readability
for (int jy = 0; jy <= hR+hL; jy++) {
int idx_y = gidy*2 - c + jy;
if (jy < jy1) idx_y += Nr;
if (jy > jy2) idx_y -= Nr ;
for (int jx = 0; jx <= hR+hL; jx++) {
int idx_x = gidx*2 - c + jx;
if (jx < jx1) idx_x += Nc;
if (jx > jx2) idx_x -= Nc ;
img_val = img[idx_y*Nc + idx_x];
res_a += img_val * c_kern_LL[(hlen-1-jy)*hlen + (hlen-1 - jx)];
res_h += img_val * c_kern_LH[(hlen-1-jy)*hlen + (hlen-1 - jx)];
res_v += img_val * c_kern_HL[(hlen-1-jy)*hlen + (hlen-1 - jx)];
res_d += img_val * c_kern_HH[(hlen-1-jy)*hlen + (hlen-1 - jx)];
}
}
c_a[(gidy)* Nc2 + (gidx)] = res_a;
c_h[(gidy)* Nc2 + (gidx)] = res_h;
c_v[(gidy)* Nc2 + (gidx)] = res_v;
c_d[(gidy)* Nc2 + (gidx)] = res_d;
}
}
// must be run with grid size = (2*Nr, 2*Nc) ; Nr = numrows of input
__global__ void w_kern_inverse(DTYPE* img, DTYPE* c_a, DTYPE* c_h, DTYPE* c_v, DTYPE* c_d, int Nr, int Nc, int hlen) {
int gidx = threadIdx.x + blockIdx.x*blockDim.x;
int gidy = threadIdx.y + blockIdx.y*blockDim.y;
int Nr2 = Nr*2, Nc2 = Nc*2;
//~ if ((gidy < Nr2-10 && gidx < Nc2-10) && (gidx > 10 && gidy > 10)) {
if (gidy < Nr2 && gidx < Nc2) {
int c, hL, hR;
int hlen2 = hlen/2; // Convolutions with even/odd indices of the kernels
if (hlen2 & 1) { // odd half-kernel size
c = hlen2/2;
hL = c;
hR = c;
}
else { // even half-kernel size : center is shifted to the RIGHT for reconstruction.
c = hlen2/2 - 0;
hL = c;
hR = c-1;
// virtual id for shift
// TODO : for the very first convolution (on the edges), this is not exactly accurate (?)
gidx += 1;
gidy += 1;
}
int jy1 = c - gidy/2;
int jy2 = Nr - 1 - gidy/2 + c;
int jx1 = c - gidx/2;
int jx2 = Nc - 1 - gidx/2 + c;
// There are 4 threads/coeff index. Each thread will do a convolution with the even/odd indices of the kernels along each dimension.
int offset_x = 1-(gidx & 1);
int offset_y = 1-(gidy & 1);
DTYPE res_a = 0, res_h = 0, res_v = 0, res_d = 0;
for (int jy = 0; jy <= hR+hL; jy++) {
int idx_y = gidy/2 - c + jy;
if (jy < jy1) idx_y += Nr;
if (jy > jy2) idx_y -= Nr;
for (int jx = 0; jx <= hR+hL; jx++) {
int idx_x = gidx/2 - c + jx;
if (jx < jx1) idx_x += Nc;
if (jx > jx2) idx_x -= Nc;
res_a += c_a[idx_y*Nc + idx_x] * c_kern_LL[(hlen-1- (2*jy + offset_y))*hlen + (hlen-1 - (2*jx + offset_x))];
res_h += c_h[idx_y*Nc + idx_x] * c_kern_LH[(hlen-1- (2*jy + offset_y))*hlen + (hlen-1 - (2*jx + offset_x))];
res_v += c_v[idx_y*Nc + idx_x] * c_kern_HL[(hlen-1- (2*jy + offset_y))*hlen + (hlen-1 - (2*jx + offset_x))];
res_d += c_d[idx_y*Nc + idx_x] * c_kern_HH[(hlen-1- (2*jy + offset_y))*hlen + (hlen-1 - (2*jx + offset_x))];
}
}
if ((hlen2 & 1) == 1) img[gidy * Nc2 + gidx] = res_a + res_h + res_v + res_d;
else img[(gidy-1) * Nc2 + (gidx-1)] = res_a + res_h + res_v + res_d;
}
}
int w_forward(DTYPE* d_image, DTYPE** d_coeffs, DTYPE* d_tmp, w_info winfos) {
int Nr = winfos.Nr, Nc = winfos.Nc, levels = winfos.nlevels, hlen = winfos.hlen;
int tpb = 16; // TODO : tune for max perfs.
int Nc2 = Nc/2, Nr2 = Nr/2;
DTYPE* d_tmp1, *d_tmp2;
d_tmp1 = d_coeffs[0];
d_tmp2 = d_tmp;
// First level
dim3 n_blocks = dim3(w_iDivUp(Nc2, tpb), w_iDivUp(Nr2, tpb), 1);
dim3 n_threads_per_block = dim3(tpb, tpb, 1);
hipLaunchKernelGGL(( w_kern_forward), dim3(n_blocks), dim3(n_threads_per_block), 0, 0, d_image, d_coeffs[0], d_coeffs[1], d_coeffs[2], d_coeffs[3], Nr2*2, Nc2*2, hlen);
for (int i=1; i < levels; i++) {
Nc2 /= 2;
Nr2 /= 2;
n_blocks = dim3(w_iDivUp(Nc2, tpb), w_iDivUp(Nr2, tpb), 1);
hipLaunchKernelGGL(( w_kern_forward), dim3(n_blocks), dim3(n_threads_per_block), 0, 0, d_tmp1, d_tmp2, d_coeffs[3*i+1], d_coeffs[3*i+2], d_coeffs[3*i+3], Nr2*2, Nc2*2, hlen);
w_swap_ptr(&d_tmp1, &d_tmp2);
}
if ((levels & 1) == 0) hipMemcpy(d_coeffs[0], d_tmp, Nr2*Nc2*sizeof(DTYPE), hipMemcpyDeviceToDevice);
return 0;
}
int w_inverse(DTYPE* d_image, DTYPE** d_coeffs, DTYPE* d_tmp, w_info winfos) {
int Nr = winfos.Nr, Nc = winfos.Nc, levels = winfos.nlevels, hlen = winfos.hlen;
int Nr0 = Nr, Nc0 = Nc;
Nr /= w_ipow2(levels);
Nc /= w_ipow2(levels);
int tpb = 16; // TODO : tune for max perfs.
DTYPE* d_tmp1, *d_tmp2;
d_tmp1 = d_coeffs[0];
d_tmp2 = d_tmp;
dim3 n_threads_per_block = dim3(tpb, tpb, 1);
dim3 n_blocks;
for (int i = levels-1; i >= 1; i--) {
n_blocks = dim3(w_iDivUp(Nc*2, tpb), w_iDivUp(Nr*2, tpb), 1);
hipLaunchKernelGGL(( w_kern_inverse), dim3(n_blocks), dim3(n_threads_per_block), 0, 0, d_tmp2, d_tmp1, d_coeffs[3*i+1], d_coeffs[3*i+2], d_coeffs[3*i+3], Nr, Nc, hlen);
Nr *= 2;
Nc *= 2;
w_swap_ptr(&d_tmp1, &d_tmp2);
}
if ((levels & 1) == 0) hipMemcpy(d_coeffs[0], d_tmp, (Nr0/2)*(Nc0/2)*sizeof(DTYPE), hipMemcpyDeviceToDevice);
//~ CUDACHECK;
// First level
n_blocks = dim3(w_iDivUp(Nc*2, tpb), w_iDivUp(Nr*2, tpb), 1);
hipLaunchKernelGGL(( w_kern_inverse), dim3(n_blocks), dim3(n_threads_per_block), 0, 0, d_image, d_coeffs[0], d_coeffs[1], d_coeffs[2], d_coeffs[3], Nr, Nc, hlen);
return 0;
}
/// ----------------------------------------------------------------------------
/// ------------------------- Undecimated DWT --------------------------------
/// ----------------------------------------------------------------------------
// must be run with grid size = (Nc, Nr) where Nr = numrows of input image
__global__ void w_kern_forward_swt(DTYPE* img, DTYPE* c_a, DTYPE* c_h, DTYPE* c_v, DTYPE* c_d, int Nr, int Nc, int hlen, int level) {
int gidx = threadIdx.x + blockIdx.x*blockDim.x;
int gidy = threadIdx.y + blockIdx.y*blockDim.y;
if (gidy < Nr && gidx < Nc) {
int factor = 1 << (level - 1);
int c, hL, hR;
if (hlen & 1) { // odd kernel size
c = hlen/2;
hL = c;
hR = c;
}
else { // even kernel size : center is shifted to the left
c = hlen/2 - 1;
hL = c;
hR = c+1;
}
c *= factor;
int jx1 = c - gidx;
int jx2 = Nc - 1 - gidx + c;
int jy1 = c - gidy;
int jy2 = Nr - 1 - gidy + c;
DTYPE res_a = 0, res_h = 0, res_v = 0, res_d = 0;
DTYPE img_val;
// Convolution with periodic boundaries extension.
// The filters are 2-upsampled at each level : [h0, h1, h2, h3] --> [h0, 0, h1, 0, h2, 0, h3, 0]
for (int jy = 0; jy <= hR+hL; jy++) {
int idx_y = gidy - c + factor*jy;
if (factor*jy < jy1) idx_y += Nr;
if (factor*jy > jy2) idx_y -= Nr;
for (int jx = 0; jx <= hR+hL; jx++) {
int idx_x = gidx + jx*factor - c;
if (factor*jx < jx1) idx_x += Nc;
if (factor*jx > jx2) idx_x -= Nc;
img_val = img[idx_y*Nc + idx_x];
res_a += img_val * c_kern_LL[(hlen-1-jy)*hlen + (hlen-1 - jx)];
res_h += img_val * c_kern_LH[(hlen-1-jy)*hlen + (hlen-1 - jx)];
res_v += img_val * c_kern_HL[(hlen-1-jy)*hlen + (hlen-1 - jx)];
res_d += img_val * c_kern_HH[(hlen-1-jy)*hlen + (hlen-1 - jx)];
}
}
c_a[gidy* Nc + gidx] = res_a;
c_h[gidy* Nc + gidx] = res_h;
c_v[gidy* Nc + gidx] = res_v;
c_d[gidy* Nc + gidx] = res_d;
}
}
// must be run with grid size = (2*Nr, 2*Nc) ; Nr = numrows of input
__global__ void w_kern_inverse_swt(DTYPE* img, DTYPE* c_a, DTYPE* c_h, DTYPE* c_v, DTYPE* c_d, int Nr, int Nc, int hlen, int level) {
int gidx = threadIdx.x + blockIdx.x*blockDim.x;
int gidy = threadIdx.y + blockIdx.y*blockDim.y;
if (gidy < Nr && gidx < Nc) {
int factor = 1 << (level - 1);
int c, hL, hR;
if (hlen & 1) { // odd half-kernel size
c = hlen/2;
hL = c;
hR = c;
}
else { // even half-kernel size : center is shifted to the RIGHT for reconstruction.
c = hlen/2 - 0;
hL = c;
hR = c-1;
}
c *= factor;
int jy1 = c - gidy;
int jy2 = Nr - 1 - gidy + c;
int jx1 = c - gidx;
int jx2 = Nc - 1 - gidx + c;
DTYPE res_a = 0, res_h = 0, res_v = 0, res_d = 0;
for (int jy = 0; jy <= hR+hL; jy++) {
int idx_y = gidy - c + jy*factor;
if (factor*jy < jy1) idx_y += Nr;
if (factor*jy > jy2) idx_y -= Nr;
for (int jx = 0; jx <= hR+hL; jx++) {
int idx_x = gidx - c + jx*factor;
if (factor*jx < jx1) idx_x += Nc;
if (factor*jx > jx2) idx_x -= Nc;
res_a += c_a[idx_y*Nc + idx_x] * c_kern_LL[(hlen-1-jy)*hlen + (hlen-1 - jx)]/4;
res_h += c_h[idx_y*Nc + idx_x] * c_kern_LH[(hlen-1-jy)*hlen + (hlen-1 - jx)]/4;
res_v += c_v[idx_y*Nc + idx_x] * c_kern_HL[(hlen-1-jy)*hlen + (hlen-1 - jx)]/4;
res_d += c_d[idx_y*Nc + idx_x] * c_kern_HH[(hlen-1-jy)*hlen + (hlen-1 - jx)]/4;
}
}
img[gidy * Nc + gidx] = res_a + res_h + res_v + res_d;
}
}
int w_forward_swt(DTYPE* d_image, DTYPE** d_coeffs, DTYPE* d_tmp, w_info winfos) {
int Nr = winfos.Nr, Nc = winfos.Nc, levels = winfos.nlevels, hlen = winfos.hlen;
DTYPE* d_tmp1, *d_tmp2;
d_tmp1 = d_coeffs[0];
d_tmp2 = d_tmp;
// First level
int tpb = 16; // TODO : tune for max perfs.
dim3 n_blocks = dim3(w_iDivUp(Nc, tpb), w_iDivUp(Nr, tpb), 1);
dim3 n_threads_per_block = dim3(tpb, tpb, 1);
hipLaunchKernelGGL(( w_kern_forward_swt), dim3(n_blocks), dim3(n_threads_per_block), 0, 0, d_image, d_coeffs[0], d_coeffs[1], d_coeffs[2], d_coeffs[3], Nr, Nc, hlen, 1);
for (int i=1; i < levels; i++) {
hipLaunchKernelGGL(( w_kern_forward_swt), dim3(n_blocks), dim3(n_threads_per_block), 0, 0, d_tmp1, d_tmp2, d_coeffs[3*i+1], d_coeffs[3*i+2], d_coeffs[3*i+3], Nr, Nc, hlen, i+1);
w_swap_ptr(&d_tmp1, &d_tmp2);
}
if ((levels & 1) == 0) hipMemcpy(d_coeffs[0], d_tmp, Nr*Nc*sizeof(DTYPE), hipMemcpyDeviceToDevice);
return 0;
}
int w_inverse_swt(DTYPE* d_image, DTYPE** d_coeffs, DTYPE* d_tmp, w_info winfos) {
int Nr = winfos.Nr, Nc = winfos.Nc, levels = winfos.nlevels, hlen = winfos.hlen;
DTYPE* d_tmp1, *d_tmp2;
d_tmp1 = d_coeffs[0];
d_tmp2 = d_tmp;
int tpb = 16; // TODO : tune for max perfs.
dim3 n_threads_per_block = dim3(tpb, tpb, 1);
dim3 n_blocks = dim3(w_iDivUp(Nc, tpb), w_iDivUp(Nr, tpb), 1);
for (int i = levels-1; i >= 1; i--) {
hipLaunchKernelGGL(( w_kern_inverse_swt), dim3(n_blocks), dim3(n_threads_per_block), 0, 0, d_tmp2, d_tmp1, d_coeffs[3*i+1], d_coeffs[3*i+2], d_coeffs[3*i+3], Nr, Nc, hlen, i+1);
w_swap_ptr(&d_tmp1, &d_tmp2);
}
if ((levels & 1) == 0) hipMemcpy(d_coeffs[0], d_tmp, Nr*Nc*sizeof(DTYPE), hipMemcpyDeviceToDevice);
// First scale
n_blocks = dim3(w_iDivUp(Nc, tpb), w_iDivUp(Nr, tpb), 1);
hipLaunchKernelGGL(( w_kern_inverse_swt), dim3(n_blocks), dim3(n_threads_per_block), 0, 0, d_image, d_coeffs[0], d_coeffs[1], d_coeffs[2], d_coeffs[3], Nr, Nc, hlen, 1);
return 0;
}
| 2460982c12fb368ac3c924cecac5e0ae1257ff84.cu | #include "nonseparable.h"
#include "common.h"
// outer product of arrays "a", "b" of length "len"
DTYPE* w_outer(DTYPE* a, DTYPE* b, int len) {
DTYPE* res = (DTYPE*) calloc(len*len, sizeof(DTYPE));
for (int i = 0; i < len; i++) {
for (int j = 0; j < len; j++) {
res[i*len+j] = a[i]*b[j];
}
}
return res;
}
/// Compute the four filters A, H, V, D from a family name.
/// These filters are separable, i.e computed from 1D filters.
/// wname: name of the filter ("haar", "db3", "sym4", ...)
/// direction: 1 for forward transform, -1 for inverse transform
/// Returns : the filter width "hlen" if success ; a negative value otherwise.
int w_compute_filters(const char* wname, int direction, int do_swt) {
if (direction == 0) {
puts("ERROR: w_compute_filters(): please specify a direction for second argument : +1 for forward, -1 for inverse)");
return -1;
}
int hlen = 0;
DTYPE* f1_l; // 1D lowpass
DTYPE* f1_h; // 1D highpass
DTYPE* f2_a, *f2_h, *f2_v, *f2_d; // 2D filters
// Haar filters has specific kernels
if (!do_swt) {
if ((!strcasecmp(wname, "haar")) || (!strcasecmp(wname, "db1")) || (!strcasecmp(wname, "bior1.1")) || (!strcasecmp(wname, "rbior1.1"))) {
return 2;
}
}
// Browse available filters (see filters.h)
int i;
for (i = 0; i < 72; i++) {
if (!strcasecmp(wname, all_filters[i].wname)) {
hlen = all_filters[i].hlen;
if (direction > 0) {
f1_l = all_filters[i].f_l;
f1_h = all_filters[i].f_h;
}
else {
f1_l = all_filters[i].i_l;
f1_h = all_filters[i].i_h;
}
break;
}
}
if (hlen == 0) {
printf("ERROR: w_compute_filters(): unknown filter %s\n", wname);
return -2;
}
// Create the separable 2D filters
f2_a = w_outer(f1_l, f1_l, hlen);
f2_h = w_outer(f1_l, f1_h, hlen); // CHECKME
f2_v = w_outer(f1_h, f1_l, hlen);
f2_d = w_outer(f1_h, f1_h, hlen);
// Copy the filters to device constant memory
cudaMemcpyToSymbol(c_kern_LL, f2_a, hlen*hlen*sizeof(DTYPE), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(c_kern_LH, f2_h, hlen*hlen*sizeof(DTYPE), 0, cudaMemcpyHostToDevice); // CHECKME
cudaMemcpyToSymbol(c_kern_HL, f2_v, hlen*hlen*sizeof(DTYPE), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(c_kern_HH, f2_d, hlen*hlen*sizeof(DTYPE), 0, cudaMemcpyHostToDevice);
return hlen;
}
// must be run with grid size = (Nc/2, Nr/2) where Nr = numrows of input image
__global__ void w_kern_forward(DTYPE* img, DTYPE* c_a, DTYPE* c_h, DTYPE* c_v, DTYPE* c_d, int Nr, int Nc, int hlen) {
int gidx = threadIdx.x + blockIdx.x*blockDim.x;
int gidy = threadIdx.y + blockIdx.y*blockDim.y;
int Nr2 = Nr/2, Nc2 = Nc/2;
if (gidy < Nr2 && gidx < Nc2) {
int c, hL, hR;
if (hlen & 1) { // odd kernel size
c = hlen/2;
hL = c;
hR = c;
}
else { // even kernel size : center is shifted to the left
c = hlen/2 - 1;
hL = c;
hR = c+1;
}
int jy1 = c - 2*gidy;
int jy2 = Nr - 1 - 2*gidy + c;
int jx1 = c - 2*gidx;
int jx2 = Nc - 1 - 2*gidx + c;
DTYPE res_a = 0, res_h = 0, res_v = 0, res_d = 0;
DTYPE img_val;
// Convolution with periodic boundaries extension.
// The following can be sped-up by splitting into 3*3 loops, but it would be a nightmare for readability
for (int jy = 0; jy <= hR+hL; jy++) {
int idx_y = gidy*2 - c + jy;
if (jy < jy1) idx_y += Nr;
if (jy > jy2) idx_y -= Nr ;
for (int jx = 0; jx <= hR+hL; jx++) {
int idx_x = gidx*2 - c + jx;
if (jx < jx1) idx_x += Nc;
if (jx > jx2) idx_x -= Nc ;
img_val = img[idx_y*Nc + idx_x];
res_a += img_val * c_kern_LL[(hlen-1-jy)*hlen + (hlen-1 - jx)];
res_h += img_val * c_kern_LH[(hlen-1-jy)*hlen + (hlen-1 - jx)];
res_v += img_val * c_kern_HL[(hlen-1-jy)*hlen + (hlen-1 - jx)];
res_d += img_val * c_kern_HH[(hlen-1-jy)*hlen + (hlen-1 - jx)];
}
}
c_a[(gidy)* Nc2 + (gidx)] = res_a;
c_h[(gidy)* Nc2 + (gidx)] = res_h;
c_v[(gidy)* Nc2 + (gidx)] = res_v;
c_d[(gidy)* Nc2 + (gidx)] = res_d;
}
}
// must be run with grid size = (2*Nr, 2*Nc) ; Nr = numrows of input
__global__ void w_kern_inverse(DTYPE* img, DTYPE* c_a, DTYPE* c_h, DTYPE* c_v, DTYPE* c_d, int Nr, int Nc, int hlen) {
int gidx = threadIdx.x + blockIdx.x*blockDim.x;
int gidy = threadIdx.y + blockIdx.y*blockDim.y;
int Nr2 = Nr*2, Nc2 = Nc*2;
//~ if ((gidy < Nr2-10 && gidx < Nc2-10) && (gidx > 10 && gidy > 10)) {
if (gidy < Nr2 && gidx < Nc2) {
int c, hL, hR;
int hlen2 = hlen/2; // Convolutions with even/odd indices of the kernels
if (hlen2 & 1) { // odd half-kernel size
c = hlen2/2;
hL = c;
hR = c;
}
else { // even half-kernel size : center is shifted to the RIGHT for reconstruction.
c = hlen2/2 - 0;
hL = c;
hR = c-1;
// virtual id for shift
// TODO : for the very first convolution (on the edges), this is not exactly accurate (?)
gidx += 1;
gidy += 1;
}
int jy1 = c - gidy/2;
int jy2 = Nr - 1 - gidy/2 + c;
int jx1 = c - gidx/2;
int jx2 = Nc - 1 - gidx/2 + c;
// There are 4 threads/coeff index. Each thread will do a convolution with the even/odd indices of the kernels along each dimension.
int offset_x = 1-(gidx & 1);
int offset_y = 1-(gidy & 1);
DTYPE res_a = 0, res_h = 0, res_v = 0, res_d = 0;
for (int jy = 0; jy <= hR+hL; jy++) {
int idx_y = gidy/2 - c + jy;
if (jy < jy1) idx_y += Nr;
if (jy > jy2) idx_y -= Nr;
for (int jx = 0; jx <= hR+hL; jx++) {
int idx_x = gidx/2 - c + jx;
if (jx < jx1) idx_x += Nc;
if (jx > jx2) idx_x -= Nc;
res_a += c_a[idx_y*Nc + idx_x] * c_kern_LL[(hlen-1- (2*jy + offset_y))*hlen + (hlen-1 - (2*jx + offset_x))];
res_h += c_h[idx_y*Nc + idx_x] * c_kern_LH[(hlen-1- (2*jy + offset_y))*hlen + (hlen-1 - (2*jx + offset_x))];
res_v += c_v[idx_y*Nc + idx_x] * c_kern_HL[(hlen-1- (2*jy + offset_y))*hlen + (hlen-1 - (2*jx + offset_x))];
res_d += c_d[idx_y*Nc + idx_x] * c_kern_HH[(hlen-1- (2*jy + offset_y))*hlen + (hlen-1 - (2*jx + offset_x))];
}
}
if ((hlen2 & 1) == 1) img[gidy * Nc2 + gidx] = res_a + res_h + res_v + res_d;
else img[(gidy-1) * Nc2 + (gidx-1)] = res_a + res_h + res_v + res_d;
}
}
int w_forward(DTYPE* d_image, DTYPE** d_coeffs, DTYPE* d_tmp, w_info winfos) {
int Nr = winfos.Nr, Nc = winfos.Nc, levels = winfos.nlevels, hlen = winfos.hlen;
int tpb = 16; // TODO : tune for max perfs.
int Nc2 = Nc/2, Nr2 = Nr/2;
DTYPE* d_tmp1, *d_tmp2;
d_tmp1 = d_coeffs[0];
d_tmp2 = d_tmp;
// First level
dim3 n_blocks = dim3(w_iDivUp(Nc2, tpb), w_iDivUp(Nr2, tpb), 1);
dim3 n_threads_per_block = dim3(tpb, tpb, 1);
w_kern_forward<<<n_blocks, n_threads_per_block>>>(d_image, d_coeffs[0], d_coeffs[1], d_coeffs[2], d_coeffs[3], Nr2*2, Nc2*2, hlen);
for (int i=1; i < levels; i++) {
Nc2 /= 2;
Nr2 /= 2;
n_blocks = dim3(w_iDivUp(Nc2, tpb), w_iDivUp(Nr2, tpb), 1);
w_kern_forward<<<n_blocks, n_threads_per_block>>>(d_tmp1, d_tmp2, d_coeffs[3*i+1], d_coeffs[3*i+2], d_coeffs[3*i+3], Nr2*2, Nc2*2, hlen);
w_swap_ptr(&d_tmp1, &d_tmp2);
}
if ((levels & 1) == 0) cudaMemcpy(d_coeffs[0], d_tmp, Nr2*Nc2*sizeof(DTYPE), cudaMemcpyDeviceToDevice);
return 0;
}
int w_inverse(DTYPE* d_image, DTYPE** d_coeffs, DTYPE* d_tmp, w_info winfos) {
int Nr = winfos.Nr, Nc = winfos.Nc, levels = winfos.nlevels, hlen = winfos.hlen;
int Nr0 = Nr, Nc0 = Nc;
Nr /= w_ipow2(levels);
Nc /= w_ipow2(levels);
int tpb = 16; // TODO : tune for max perfs.
DTYPE* d_tmp1, *d_tmp2;
d_tmp1 = d_coeffs[0];
d_tmp2 = d_tmp;
dim3 n_threads_per_block = dim3(tpb, tpb, 1);
dim3 n_blocks;
for (int i = levels-1; i >= 1; i--) {
n_blocks = dim3(w_iDivUp(Nc*2, tpb), w_iDivUp(Nr*2, tpb), 1);
w_kern_inverse<<<n_blocks, n_threads_per_block>>>(d_tmp2, d_tmp1, d_coeffs[3*i+1], d_coeffs[3*i+2], d_coeffs[3*i+3], Nr, Nc, hlen);
Nr *= 2;
Nc *= 2;
w_swap_ptr(&d_tmp1, &d_tmp2);
}
if ((levels & 1) == 0) cudaMemcpy(d_coeffs[0], d_tmp, (Nr0/2)*(Nc0/2)*sizeof(DTYPE), cudaMemcpyDeviceToDevice);
//~ CUDACHECK;
// First level
n_blocks = dim3(w_iDivUp(Nc*2, tpb), w_iDivUp(Nr*2, tpb), 1);
w_kern_inverse<<<n_blocks, n_threads_per_block>>>(d_image, d_coeffs[0], d_coeffs[1], d_coeffs[2], d_coeffs[3], Nr, Nc, hlen);
return 0;
}
/// ----------------------------------------------------------------------------
/// ------------------------- Undecimated DWT --------------------------------
/// ----------------------------------------------------------------------------
// must be run with grid size = (Nc, Nr) where Nr = numrows of input image
__global__ void w_kern_forward_swt(DTYPE* img, DTYPE* c_a, DTYPE* c_h, DTYPE* c_v, DTYPE* c_d, int Nr, int Nc, int hlen, int level) {
int gidx = threadIdx.x + blockIdx.x*blockDim.x;
int gidy = threadIdx.y + blockIdx.y*blockDim.y;
if (gidy < Nr && gidx < Nc) {
int factor = 1 << (level - 1);
int c, hL, hR;
if (hlen & 1) { // odd kernel size
c = hlen/2;
hL = c;
hR = c;
}
else { // even kernel size : center is shifted to the left
c = hlen/2 - 1;
hL = c;
hR = c+1;
}
c *= factor;
int jx1 = c - gidx;
int jx2 = Nc - 1 - gidx + c;
int jy1 = c - gidy;
int jy2 = Nr - 1 - gidy + c;
DTYPE res_a = 0, res_h = 0, res_v = 0, res_d = 0;
DTYPE img_val;
// Convolution with periodic boundaries extension.
// The filters are 2-upsampled at each level : [h0, h1, h2, h3] --> [h0, 0, h1, 0, h2, 0, h3, 0]
for (int jy = 0; jy <= hR+hL; jy++) {
int idx_y = gidy - c + factor*jy;
if (factor*jy < jy1) idx_y += Nr;
if (factor*jy > jy2) idx_y -= Nr;
for (int jx = 0; jx <= hR+hL; jx++) {
int idx_x = gidx + jx*factor - c;
if (factor*jx < jx1) idx_x += Nc;
if (factor*jx > jx2) idx_x -= Nc;
img_val = img[idx_y*Nc + idx_x];
res_a += img_val * c_kern_LL[(hlen-1-jy)*hlen + (hlen-1 - jx)];
res_h += img_val * c_kern_LH[(hlen-1-jy)*hlen + (hlen-1 - jx)];
res_v += img_val * c_kern_HL[(hlen-1-jy)*hlen + (hlen-1 - jx)];
res_d += img_val * c_kern_HH[(hlen-1-jy)*hlen + (hlen-1 - jx)];
}
}
c_a[gidy* Nc + gidx] = res_a;
c_h[gidy* Nc + gidx] = res_h;
c_v[gidy* Nc + gidx] = res_v;
c_d[gidy* Nc + gidx] = res_d;
}
}
// must be run with grid size = (2*Nr, 2*Nc) ; Nr = numrows of input
__global__ void w_kern_inverse_swt(DTYPE* img, DTYPE* c_a, DTYPE* c_h, DTYPE* c_v, DTYPE* c_d, int Nr, int Nc, int hlen, int level) {
int gidx = threadIdx.x + blockIdx.x*blockDim.x;
int gidy = threadIdx.y + blockIdx.y*blockDim.y;
if (gidy < Nr && gidx < Nc) {
int factor = 1 << (level - 1);
int c, hL, hR;
if (hlen & 1) { // odd half-kernel size
c = hlen/2;
hL = c;
hR = c;
}
else { // even half-kernel size : center is shifted to the RIGHT for reconstruction.
c = hlen/2 - 0;
hL = c;
hR = c-1;
}
c *= factor;
int jy1 = c - gidy;
int jy2 = Nr - 1 - gidy + c;
int jx1 = c - gidx;
int jx2 = Nc - 1 - gidx + c;
DTYPE res_a = 0, res_h = 0, res_v = 0, res_d = 0;
for (int jy = 0; jy <= hR+hL; jy++) {
int idx_y = gidy - c + jy*factor;
if (factor*jy < jy1) idx_y += Nr;
if (factor*jy > jy2) idx_y -= Nr;
for (int jx = 0; jx <= hR+hL; jx++) {
int idx_x = gidx - c + jx*factor;
if (factor*jx < jx1) idx_x += Nc;
if (factor*jx > jx2) idx_x -= Nc;
res_a += c_a[idx_y*Nc + idx_x] * c_kern_LL[(hlen-1-jy)*hlen + (hlen-1 - jx)]/4;
res_h += c_h[idx_y*Nc + idx_x] * c_kern_LH[(hlen-1-jy)*hlen + (hlen-1 - jx)]/4;
res_v += c_v[idx_y*Nc + idx_x] * c_kern_HL[(hlen-1-jy)*hlen + (hlen-1 - jx)]/4;
res_d += c_d[idx_y*Nc + idx_x] * c_kern_HH[(hlen-1-jy)*hlen + (hlen-1 - jx)]/4;
}
}
img[gidy * Nc + gidx] = res_a + res_h + res_v + res_d;
}
}
int w_forward_swt(DTYPE* d_image, DTYPE** d_coeffs, DTYPE* d_tmp, w_info winfos) {
int Nr = winfos.Nr, Nc = winfos.Nc, levels = winfos.nlevels, hlen = winfos.hlen;
DTYPE* d_tmp1, *d_tmp2;
d_tmp1 = d_coeffs[0];
d_tmp2 = d_tmp;
// First level
int tpb = 16; // TODO : tune for max perfs.
dim3 n_blocks = dim3(w_iDivUp(Nc, tpb), w_iDivUp(Nr, tpb), 1);
dim3 n_threads_per_block = dim3(tpb, tpb, 1);
w_kern_forward_swt<<<n_blocks, n_threads_per_block>>>(d_image, d_coeffs[0], d_coeffs[1], d_coeffs[2], d_coeffs[3], Nr, Nc, hlen, 1);
for (int i=1; i < levels; i++) {
w_kern_forward_swt<<<n_blocks, n_threads_per_block>>>(d_tmp1, d_tmp2, d_coeffs[3*i+1], d_coeffs[3*i+2], d_coeffs[3*i+3], Nr, Nc, hlen, i+1);
w_swap_ptr(&d_tmp1, &d_tmp2);
}
if ((levels & 1) == 0) cudaMemcpy(d_coeffs[0], d_tmp, Nr*Nc*sizeof(DTYPE), cudaMemcpyDeviceToDevice);
return 0;
}
int w_inverse_swt(DTYPE* d_image, DTYPE** d_coeffs, DTYPE* d_tmp, w_info winfos) {
int Nr = winfos.Nr, Nc = winfos.Nc, levels = winfos.nlevels, hlen = winfos.hlen;
DTYPE* d_tmp1, *d_tmp2;
d_tmp1 = d_coeffs[0];
d_tmp2 = d_tmp;
int tpb = 16; // TODO : tune for max perfs.
dim3 n_threads_per_block = dim3(tpb, tpb, 1);
dim3 n_blocks = dim3(w_iDivUp(Nc, tpb), w_iDivUp(Nr, tpb), 1);
for (int i = levels-1; i >= 1; i--) {
w_kern_inverse_swt<<<n_blocks, n_threads_per_block>>>(d_tmp2, d_tmp1, d_coeffs[3*i+1], d_coeffs[3*i+2], d_coeffs[3*i+3], Nr, Nc, hlen, i+1);
w_swap_ptr(&d_tmp1, &d_tmp2);
}
if ((levels & 1) == 0) cudaMemcpy(d_coeffs[0], d_tmp, Nr*Nc*sizeof(DTYPE), cudaMemcpyDeviceToDevice);
// First scale
n_blocks = dim3(w_iDivUp(Nc, tpb), w_iDivUp(Nr, tpb), 1);
w_kern_inverse_swt<<<n_blocks, n_threads_per_block>>>(d_image, d_coeffs[0], d_coeffs[1], d_coeffs[2], d_coeffs[3], Nr, Nc, hlen, 1);
return 0;
}
|
30d253a7728ffb2383c4f37218b994ba5873f895.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef _RADIXSORT_KERNEL_H_
#define _RADIXSORT_KERNEL_H_
#include <hip/hip_runtime.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <thrust/sequence.h>
#include <thrust/random.h>
#include <thrust/generate.h>
#include <thrust/detail/type_traits.h>
#include <cutil_inline.h>
#include <shrUtils.h>
#include <algorithm>
#include <time.h>
#include <limits.h>
#include "solver_implement.h"
#define TILE_DIM 16
const float visc = 0.009f;
float *f0_data, *f1_data, *f2_data, *f3_data, *f4_data, *f5_data, *f6_data, *f7_data, *f8_data;
float *density_data, *injection_data, *velocity_data;
texture<float, hipTextureType2D> f1_tex, f2_tex, f3_tex, f4_tex,
f5_tex, f6_tex, f7_tex, f8_tex;
hipArray *f1_array, *f2_array, *f3_array, *f4_array,
*f5_array, *f6_array, *f7_array, *f8_array;
hipArray *density_array;
texture<float, hipTextureType2D> densityTex;
__global__ void
show_scalar(float *id, unsigned char*mask, unsigned char *od, int w, int h)
{
uint x = blockIdx.x*TILE_DIM + threadIdx.x;
uint y = blockIdx.y*TILE_DIM + threadIdx.y;
uint i = y * w + x;
if(mask[i] == 0) {
od[i*3] = od[i*3+1] = od[i*3+2] = 0;
}
else {
float luminance = id[i] * 255;
if(luminance > 255.f)
luminance = 255.f;
od[i*3] = od[i*3+1] = od[i*3+2] = (unsigned char)luminance;
}
}
__global__ void
show_velocity(float *id, unsigned char*mask, unsigned char *od, int w, int h)
{
uint x = blockIdx.x*TILE_DIM + threadIdx.x;
uint y = blockIdx.y*TILE_DIM + threadIdx.y;
uint i = y * w + x;
if(mask[i] == 0) {
od[i*3] = od[i*3+1] = od[i*3+2] = 0;
}
else {
float r = id[i*2] * 128 + 127;
if(r > 255.f)
r = 255.f;
float g = id[i*2 + 1] * 128 + 127;
if(g > 255.f)
g = 255.f;
od[i*3] = r;
od[i*3+1] = g;
od[i*3+2] = 127;
}
}
__global__ void
advect_density(float *result, float *u, int w, int h)
{
uint x = blockIdx.x*TILE_DIM + threadIdx.x;
uint y = blockIdx.y*TILE_DIM + threadIdx.y;
uint i = y * w + x;
float tx = (float)x + 0.5 - u[i * 2];
float ty = (float)y + 0.5 - u[i * 2 + 1];
result[i] = tex2D(densityTex, tx, ty);
if(result[i] < 0.f) result[i] = 0.f;
if(result[i] > 1.5f) result[i] = 1.25f;
}
__global__ void
inject_energy(float *d,
float *f1_data, float *f2_data, float *f3_data, float *f4_data,
float *f5_data, float *f6_data, float *f7_data, float *f8_data,
float *stir, unsigned char * obstacle, int w, int h)
{
uint x = blockIdx.x*TILE_DIM + threadIdx.x;
uint y = blockIdx.y*TILE_DIM + threadIdx.y;
uint i = y * w + x;
if(obstacle[i] > 0) {
float ux = stir[i * 2];
float uy = stir[i * 2 + 1];
float e = ux * ux + uy * uy;
d[i] += e;
if(d[i] > 1.15f) d[i] = 1.15f;
f1_data[i] += e * uy/9.f;
f2_data[i] += e * ux/9.f;
f3_data[i] += e * (-uy)/9.f;
f4_data[i] += e * (-ux)/9.f;
f5_data[i] += e * (ux + uy)/36.f;
f6_data[i] += e * (ux - uy)/36.f;
f7_data[i] += e * (-ux - uy)/36.f;
f8_data[i] += e * (-ux + uy)/36.f;
}
stir[i * 2] *= 0.99f;
stir[i * 2 + 1] *= 0.99f;
}
__global__ void
boundary_condition_kernel(float *f1_data, float *f2_data, float *f3_data, float *f4_data,
float *f5_data, float *f6_data, float *f7_data, float *f8_data,
unsigned char * obstacle, int w)
{
uint x = blockIdx.x*TILE_DIM + threadIdx.x;
uint y = blockIdx.y*TILE_DIM + threadIdx.y;
uint i = y * w + x;
float tmp;
if(obstacle[i] < 1) {
tmp = f2_data[i];
f2_data[i] = f4_data[i];
f4_data[i] = tmp;
tmp = f1_data[i];
f1_data[i] = f3_data[i];
f3_data[i] = tmp;
tmp = f8_data[i];
f8_data[i] = f6_data[i];
f6_data[i] = tmp;
tmp = f7_data[i];
f7_data[i] = f5_data[i];
f5_data[i] = tmp;
}
}
__global__ void stream_kernel (float *f1_data, float *f2_data,
float *f3_data, float *f4_data, float *f5_data,
float *f6_data, float *f7_data, float *f8_data,
int width)
{
uint x = blockIdx.x*TILE_DIM + threadIdx.x;
uint y = blockIdx.y*TILE_DIM + threadIdx.y;
uint i = y * width + x;
f1_data[i] = tex2D(f1_tex, (float) x , (float) (y-1));
f2_data[i] = tex2D(f2_tex, (float) (x-1) , (float) y);
f3_data[i] = tex2D(f3_tex, (float) x , (float) (y+1));
f4_data[i] = tex2D(f4_tex, (float) (x+1) , (float) y);
f5_data[i] = tex2D(f5_tex, (float) (x-1) , (float) (y-1));
f6_data[i] = tex2D(f6_tex, (float) (x-1) , (float) (y+1));
f7_data[i] = tex2D(f7_tex, (float) (x+1) , (float) (y+1));
f8_data[i] = tex2D(f8_tex, (float) (x+1) , (float) (y-1));
}
__global__ void reset_kernel (float *f0_data, float *f1_data, float *f2_data, float *f3_data,
float *f4_data, float *f5_data, float *f6_data, float *f7_data, float *f8_data,
float *density, float * injection, float * velocity, int width)
{
uint x = blockIdx.x*TILE_DIM + threadIdx.x;
uint y = blockIdx.y*TILE_DIM + threadIdx.y;
uint i = y * width + x;
float faceq1 = 4.f/9.f;
float faceq2 = 1.f/9.f;
float faceq3 = 1.f/36.f;
f0_data[i] = faceq1;
f1_data[i] = faceq2;
f2_data[i] = faceq2;
f3_data[i] = faceq2;
f4_data[i] = faceq2;
f5_data[i] = faceq3;
f6_data[i] = faceq3;
f7_data[i] = faceq3;
f8_data[i] = faceq3;
density[i] = 0.f;
injection[i*2] = injection[i*2+1] = 0.f;
velocity[i*2] = velocity[i*2+1] = 0.f;
}
__global__ void collide_kernel (float *f0_data, float *f1_data, float *f2_data, float *f3_data,
float *f4_data, float *f5_data, float *f6_data, float *f7_data, float *f8_data,
float *velocity, float * density, int width)
{
uint x = blockIdx.x*TILE_DIM + threadIdx.x;
uint y = blockIdx.y*TILE_DIM + threadIdx.y;
uint i = y * width + x;
float ro, v_x, v_y;
float tau = (5.f*visc + 1.0f)/2.0f;
float f0now, f1now, f2now, f3now, f4now, f5now, f6now, f7now, f8now;
float f0eq, f1eq, f2eq, f3eq, f4eq, f5eq, f6eq, f7eq, f8eq;
// Read all f's and store in registers
f0now = f0_data[i];
f1now = f1_data[i];
f2now = f2_data[i];
f3now = f3_data[i];
f4now = f4_data[i];
f5now = f5_data[i];
f6now = f6_data[i];
f7now = f7_data[i];
f8now = f8_data[i];
float faceq1 = 4.f/9.f;
float faceq2 = 1.f/9.f;
float faceq3 = 1.f/36.f;
// Macroscopic flow props:
ro = f0now + f1now + f2now + f3now + f4now + f5now + f6now + f7now + f8now;
v_x = (f2now + f5now + f6now - f4now - f7now - f8now)/ro;
v_y = (f1now + f5now + f8now - f3now - f6now - f7now)/ro;
v_y -= density[i] * 0.0005f;
float speedcap = 0.22f;
if (v_x < -speedcap) v_x = -speedcap;
if (v_x > speedcap) v_x = speedcap;
if (v_y < -speedcap) v_y = -speedcap;
if (v_y > speedcap) v_y = speedcap;
velocity[i*2] = v_x;
velocity[i*2 +1] = v_y;
float uu = v_x * v_x;
float vv = v_y * v_y;
float uv = v_x * v_y;
// Calculate equilibrium f's
f0eq = ro * faceq1 * (1.0f - 1.5f * (uu + vv));
f1eq = ro * faceq2 * (1.0f + 3.0f * v_y + 3.f * vv - 1.5f * uu);
f2eq = ro * faceq2 * (1.0f + 3.0f * v_x + 3.f * uu - 1.5f * vv);
f3eq = ro * faceq2 * (1.0f - 3.0f * v_y + 3.f * vv - 1.5f * uu);
f4eq = ro * faceq2 * (1.0f - 3.0f * v_x + 3.f * uu - 1.5f * vv);
f5eq = ro * faceq3 * (1.0f + 3.0f * v_x + 3.f * v_y + 3.f * uu + 3.f * vv + 9.f * uv);
f6eq = ro * faceq3 * (1.0f + 3.0f * v_x - 3.f * v_y + 3.f * uu + 3.f * vv - 9.f * uv);
f7eq = ro * faceq3 * (1.0f - 3.0f * v_x - 3.f * v_y + 3.f * uu + 3.f * vv + 9.f * uv);
f8eq = ro * faceq3 * (1.0f - 3.0f * v_x + 3.f * v_y + 3.f * uu + 3.f * vv - 9.f * uv);
// Do collisions
f0_data[i] += (f0eq - f0_data[i]) / tau;
f1_data[i] += (f1eq - f1_data[i]) / tau;
f2_data[i] += (f2eq - f2_data[i]) / tau;
f3_data[i] += (f3eq - f3_data[i]) / tau;
f4_data[i] += (f4eq - f4_data[i]) / tau;
f5_data[i] += (f5eq - f5_data[i]) / tau;
f6_data[i] += (f6eq - f6_data[i]) / tau;
f7_data[i] += (f7eq - f7_data[i]) / tau;
f8_data[i] += (f8eq - f8_data[i]) / tau;
}
extern "C" void initializeSolverData(int width, int height)
{
const int size = width * height * sizeof(float);
cutilSafeCall( hipMalloc((void **)&density_data, size) );
cutilSafeCall( hipMalloc((void **)&injection_data, size * 2) );
cutilSafeCall( hipMalloc((void **)&velocity_data, size * 2) );
cutilSafeCall( hipMalloc((void **)&f0_data, size) );
cutilSafeCall( hipMalloc((void **)&f1_data, size) );
cutilSafeCall( hipMalloc((void **)&f2_data, size) );
cutilSafeCall( hipMalloc((void **)&f3_data, size) );
cutilSafeCall( hipMalloc((void **)&f4_data, size) );
cutilSafeCall( hipMalloc((void **)&f5_data, size) );
cutilSafeCall( hipMalloc((void **)&f6_data, size) );
cutilSafeCall( hipMalloc((void **)&f7_data, size) );
cutilSafeCall( hipMalloc((void **)&f8_data, size) );
hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
cutilSafeCall( hipMallocArray( &density_array, &channelDesc, width, height ));
cutilSafeCall( hipMallocArray( &f1_array, &channelDesc, width, height ));
cutilSafeCall( hipMallocArray( &f2_array, &channelDesc, width, height ));
cutilSafeCall( hipMallocArray( &f3_array, &channelDesc, width, height ));
cutilSafeCall( hipMallocArray( &f4_array, &channelDesc, width, height ));
cutilSafeCall( hipMallocArray( &f5_array, &channelDesc, width, height ));
cutilSafeCall( hipMallocArray( &f6_array, &channelDesc, width, height ));
cutilSafeCall( hipMallocArray( &f7_array, &channelDesc, width, height ));
cutilSafeCall( hipMallocArray( &f8_array, &channelDesc, width, height ));
dim3 grid = dim3(width/TILE_DIM, height/TILE_DIM);
dim3 block = dim3(TILE_DIM, TILE_DIM);
hipLaunchKernelGGL(( reset_kernel), dim3(grid), dim3(block), 0, 0, f0_data, f1_data, f2_data, f3_data,
f4_data, f5_data, f6_data, f7_data, f8_data,
density_data, injection_data, velocity_data, width);
}
extern "C"
void destroySolverData()
{
printf("reset");
cutilSafeCall( hipFree(density_data) );
cutilSafeCall( hipFree(injection_data) );
cutilSafeCall( hipFree(velocity_data) );
cutilSafeCall( hipFree(f0_data) );
cutilSafeCall( hipFree(f1_data) );
cutilSafeCall( hipFree(f2_data) );
cutilSafeCall( hipFree(f3_data) );
cutilSafeCall( hipFree(f4_data) );
cutilSafeCall( hipFree(f5_data) );
cutilSafeCall( hipFree(f6_data) );
cutilSafeCall( hipFree(f7_data) );
cutilSafeCall( hipFree(f8_data) );
cutilSafeCall(hipFreeArray(f1_array));
cutilSafeCall(hipFreeArray(f2_array));
cutilSafeCall(hipFreeArray(f3_array));
cutilSafeCall(hipFreeArray(f4_array));
cutilSafeCall(hipFreeArray(f5_array));
cutilSafeCall(hipFreeArray(f6_array));
cutilSafeCall(hipFreeArray(f7_array));
cutilSafeCall(hipFreeArray(f8_array));
hipDeviceReset();
}
extern "C"
void getDisplayField(int width, int height, unsigned char * obstable, unsigned char *outImage)
{
const int obstacleLength = width * height * sizeof(unsigned char);
unsigned char *d_obstacle;
cutilSafeCall( hipMalloc((void **)&d_obstacle, obstacleLength) );
cutilSafeCall( hipMemcpy(d_obstacle, obstable, obstacleLength, hipMemcpyHostToDevice) );
const int size = width * height * sizeof(unsigned char) * 3;
unsigned char *d_Out;
cutilSafeCall( hipMalloc((void **)&d_Out, size) );
dim3 grid = dim3(width/TILE_DIM, height/TILE_DIM);
dim3 block = dim3(TILE_DIM, TILE_DIM);
hipLaunchKernelGGL(( show_scalar), dim3(grid), dim3(block), 0, 0, density_data, d_obstacle, d_Out, width, height);
cutilSafeCall( hipMemcpy( outImage, d_Out, size, hipMemcpyDeviceToHost));
cutilSafeCall( hipFree(d_Out));
cutilSafeCall( hipFree(d_obstacle));
}
extern "C"
void advanceSolver(int width, int height, float *impulse, unsigned char * obstable)
{
const int obstacleLength = width * height * sizeof(unsigned char);
unsigned char *d_obstacle;
cutilSafeCall( hipMalloc((void **)&d_obstacle, obstacleLength) );
cutilSafeCall( hipMemcpy(d_obstacle, obstable, obstacleLength, hipMemcpyHostToDevice) );
const int size = width * height * sizeof(float);
cutilSafeCall( hipMemcpy(injection_data, impulse, size * 2, hipMemcpyHostToDevice) );
dim3 grid = dim3(width/TILE_DIM, height/TILE_DIM);
dim3 block = dim3(TILE_DIM, TILE_DIM);
hipLaunchKernelGGL(( inject_energy), dim3(grid), dim3(block), 0, 0, density_data,
f1_data, f2_data, f3_data, f4_data,
f5_data, f6_data, f7_data, f8_data,
injection_data, d_obstacle, width, height);
cutilSafeCall( hipMemcpy(impulse, injection_data, size * 2, hipMemcpyDeviceToHost));
hipLaunchKernelGGL(( boundary_condition_kernel), dim3(grid), dim3(block), 0, 0, f1_data, f2_data, f3_data, f4_data,
f5_data, f6_data, f7_data, f8_data,
d_obstacle, width);
hipLaunchKernelGGL(( collide_kernel), dim3(grid), dim3(block), 0, 0, f0_data, f1_data, f2_data, f3_data,
f4_data, f5_data, f6_data, f7_data, f8_data,
velocity_data, density_data,
width);
cutilSafeCall( hipMemcpyToArray( f1_array, 0, 0, f1_data, size, hipMemcpyDeviceToDevice));
cutilSafeCall( hipMemcpyToArray( f2_array, 0, 0, f2_data, size, hipMemcpyDeviceToDevice));
cutilSafeCall( hipMemcpyToArray( f3_array, 0, 0, f3_data, size, hipMemcpyDeviceToDevice));
cutilSafeCall( hipMemcpyToArray( f4_array, 0, 0, f4_data, size, hipMemcpyDeviceToDevice));
cutilSafeCall( hipMemcpyToArray( f5_array, 0, 0, f5_data, size, hipMemcpyDeviceToDevice));
cutilSafeCall( hipMemcpyToArray( f6_array, 0, 0, f6_data, size, hipMemcpyDeviceToDevice));
cutilSafeCall( hipMemcpyToArray( f7_array, 0, 0, f7_data, size, hipMemcpyDeviceToDevice));
cutilSafeCall( hipMemcpyToArray( f8_array, 0, 0, f8_data, size, hipMemcpyDeviceToDevice));
f1_tex.filterMode = hipFilterModePoint;
f1_tex.addressMode[0] = hipAddressModeClamp;
f1_tex.addressMode[1] = hipAddressModeClamp;
f1_tex.normalized = false;
CUDA_SAFE_CALL(hipBindTextureToArray(f1_tex, f1_array));
f2_tex.filterMode = hipFilterModePoint;
f2_tex.addressMode[0] = hipAddressModeClamp;
f2_tex.addressMode[1] = hipAddressModeClamp;
f2_tex.normalized = false;
CUDA_SAFE_CALL(hipBindTextureToArray(f2_tex, f2_array));
f3_tex.filterMode = hipFilterModePoint;
f3_tex.addressMode[0] = hipAddressModeClamp;
f3_tex.addressMode[1] = hipAddressModeClamp;
f3_tex.normalized = false;
CUDA_SAFE_CALL(hipBindTextureToArray(f3_tex, f3_array));
f4_tex.filterMode = hipFilterModePoint;
f4_tex.addressMode[0] = hipAddressModeClamp;
f4_tex.addressMode[1] = hipAddressModeClamp;
f4_tex.normalized = false;
CUDA_SAFE_CALL(hipBindTextureToArray(f4_tex, f4_array));
f5_tex.filterMode = hipFilterModePoint;
f5_tex.addressMode[0] = hipAddressModeClamp;
f5_tex.addressMode[1] = hipAddressModeClamp;
f5_tex.normalized = false;
CUDA_SAFE_CALL(hipBindTextureToArray(f5_tex, f5_array));
f6_tex.filterMode = hipFilterModePoint;
f6_tex.addressMode[0] = hipAddressModeClamp;
f6_tex.addressMode[1] = hipAddressModeClamp;
f6_tex.normalized = false;
CUDA_SAFE_CALL(hipBindTextureToArray(f6_tex, f6_array));
f7_tex.filterMode = hipFilterModePoint;
f7_tex.addressMode[0] = hipAddressModeClamp;
f7_tex.addressMode[1] = hipAddressModeClamp;
f7_tex.normalized = false;
CUDA_SAFE_CALL(hipBindTextureToArray(f7_tex, f7_array));
f8_tex.filterMode = hipFilterModePoint;
f8_tex.addressMode[0] = hipAddressModeClamp;
f8_tex.addressMode[1] = hipAddressModeClamp;
f8_tex.normalized = false;
CUDA_SAFE_CALL(hipBindTextureToArray(f8_tex, f8_array));
hipLaunchKernelGGL(( stream_kernel), dim3(grid), dim3(block), 0, 0, f1_data, f2_data, f3_data, f4_data,
f5_data, f6_data, f7_data, f8_data,
width);
CUDA_SAFE_CALL(hipUnbindTexture(f1_tex));
CUDA_SAFE_CALL(hipUnbindTexture(f2_tex));
CUDA_SAFE_CALL(hipUnbindTexture(f3_tex));
CUDA_SAFE_CALL(hipUnbindTexture(f4_tex));
CUDA_SAFE_CALL(hipUnbindTexture(f5_tex));
CUDA_SAFE_CALL(hipUnbindTexture(f6_tex));
CUDA_SAFE_CALL(hipUnbindTexture(f7_tex));
CUDA_SAFE_CALL(hipUnbindTexture(f8_tex));
cutilSafeCall( hipMemcpyToArray( density_array, 0, 0, density_data, size, hipMemcpyDeviceToDevice));
densityTex.addressMode[0] = hipAddressModeClamp;
densityTex.addressMode[1] = hipAddressModeClamp;
densityTex.filterMode = hipFilterModeLinear;
densityTex.normalized = false;
hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
cutilSafeCall( hipBindTextureToArray(densityTex, density_array, channelDesc));
hipLaunchKernelGGL(( advect_density), dim3(grid), dim3(block), 0, 0, density_data, velocity_data, width, height);
cutilSafeCall(hipUnbindTexture(densityTex));
cutilSafeCall( hipFree(d_obstacle));
}
#endif | 30d253a7728ffb2383c4f37218b994ba5873f895.cu | #ifndef _RADIXSORT_KERNEL_H_
#define _RADIXSORT_KERNEL_H_
#include <cuda_runtime.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <thrust/sequence.h>
#include <thrust/random.h>
#include <thrust/generate.h>
#include <thrust/detail/type_traits.h>
#include <cutil_inline.h>
#include <shrUtils.h>
#include <algorithm>
#include <time.h>
#include <limits.h>
#include "solver_implement.h"
#define TILE_DIM 16
const float visc = 0.009f;
float *f0_data, *f1_data, *f2_data, *f3_data, *f4_data, *f5_data, *f6_data, *f7_data, *f8_data;
float *density_data, *injection_data, *velocity_data;
texture<float, cudaTextureType2D> f1_tex, f2_tex, f3_tex, f4_tex,
f5_tex, f6_tex, f7_tex, f8_tex;
cudaArray *f1_array, *f2_array, *f3_array, *f4_array,
*f5_array, *f6_array, *f7_array, *f8_array;
cudaArray *density_array;
texture<float, cudaTextureType2D> densityTex;
__global__ void
show_scalar(float *id, unsigned char*mask, unsigned char *od, int w, int h)
{
uint x = blockIdx.x*TILE_DIM + threadIdx.x;
uint y = blockIdx.y*TILE_DIM + threadIdx.y;
uint i = y * w + x;
if(mask[i] == 0) {
od[i*3] = od[i*3+1] = od[i*3+2] = 0;
}
else {
float luminance = id[i] * 255;
if(luminance > 255.f)
luminance = 255.f;
od[i*3] = od[i*3+1] = od[i*3+2] = (unsigned char)luminance;
}
}
__global__ void
show_velocity(float *id, unsigned char*mask, unsigned char *od, int w, int h)
{
uint x = blockIdx.x*TILE_DIM + threadIdx.x;
uint y = blockIdx.y*TILE_DIM + threadIdx.y;
uint i = y * w + x;
if(mask[i] == 0) {
od[i*3] = od[i*3+1] = od[i*3+2] = 0;
}
else {
float r = id[i*2] * 128 + 127;
if(r > 255.f)
r = 255.f;
float g = id[i*2 + 1] * 128 + 127;
if(g > 255.f)
g = 255.f;
od[i*3] = r;
od[i*3+1] = g;
od[i*3+2] = 127;
}
}
__global__ void
advect_density(float *result, float *u, int w, int h)
{
uint x = blockIdx.x*TILE_DIM + threadIdx.x;
uint y = blockIdx.y*TILE_DIM + threadIdx.y;
uint i = y * w + x;
float tx = (float)x + 0.5 - u[i * 2];
float ty = (float)y + 0.5 - u[i * 2 + 1];
result[i] = tex2D(densityTex, tx, ty);
if(result[i] < 0.f) result[i] = 0.f;
if(result[i] > 1.5f) result[i] = 1.25f;
}
__global__ void
inject_energy(float *d,
float *f1_data, float *f2_data, float *f3_data, float *f4_data,
float *f5_data, float *f6_data, float *f7_data, float *f8_data,
float *stir, unsigned char * obstacle, int w, int h)
{
uint x = blockIdx.x*TILE_DIM + threadIdx.x;
uint y = blockIdx.y*TILE_DIM + threadIdx.y;
uint i = y * w + x;
if(obstacle[i] > 0) {
float ux = stir[i * 2];
float uy = stir[i * 2 + 1];
float e = ux * ux + uy * uy;
d[i] += e;
if(d[i] > 1.15f) d[i] = 1.15f;
f1_data[i] += e * uy/9.f;
f2_data[i] += e * ux/9.f;
f3_data[i] += e * (-uy)/9.f;
f4_data[i] += e * (-ux)/9.f;
f5_data[i] += e * (ux + uy)/36.f;
f6_data[i] += e * (ux - uy)/36.f;
f7_data[i] += e * (-ux - uy)/36.f;
f8_data[i] += e * (-ux + uy)/36.f;
}
stir[i * 2] *= 0.99f;
stir[i * 2 + 1] *= 0.99f;
}
__global__ void
boundary_condition_kernel(float *f1_data, float *f2_data, float *f3_data, float *f4_data,
float *f5_data, float *f6_data, float *f7_data, float *f8_data,
unsigned char * obstacle, int w)
{
uint x = blockIdx.x*TILE_DIM + threadIdx.x;
uint y = blockIdx.y*TILE_DIM + threadIdx.y;
uint i = y * w + x;
float tmp;
if(obstacle[i] < 1) {
tmp = f2_data[i];
f2_data[i] = f4_data[i];
f4_data[i] = tmp;
tmp = f1_data[i];
f1_data[i] = f3_data[i];
f3_data[i] = tmp;
tmp = f8_data[i];
f8_data[i] = f6_data[i];
f6_data[i] = tmp;
tmp = f7_data[i];
f7_data[i] = f5_data[i];
f5_data[i] = tmp;
}
}
__global__ void stream_kernel (float *f1_data, float *f2_data,
float *f3_data, float *f4_data, float *f5_data,
float *f6_data, float *f7_data, float *f8_data,
int width)
{
uint x = blockIdx.x*TILE_DIM + threadIdx.x;
uint y = blockIdx.y*TILE_DIM + threadIdx.y;
uint i = y * width + x;
f1_data[i] = tex2D(f1_tex, (float) x , (float) (y-1));
f2_data[i] = tex2D(f2_tex, (float) (x-1) , (float) y);
f3_data[i] = tex2D(f3_tex, (float) x , (float) (y+1));
f4_data[i] = tex2D(f4_tex, (float) (x+1) , (float) y);
f5_data[i] = tex2D(f5_tex, (float) (x-1) , (float) (y-1));
f6_data[i] = tex2D(f6_tex, (float) (x-1) , (float) (y+1));
f7_data[i] = tex2D(f7_tex, (float) (x+1) , (float) (y+1));
f8_data[i] = tex2D(f8_tex, (float) (x+1) , (float) (y-1));
}
__global__ void reset_kernel (float *f0_data, float *f1_data, float *f2_data, float *f3_data,
float *f4_data, float *f5_data, float *f6_data, float *f7_data, float *f8_data,
float *density, float * injection, float * velocity, int width)
{
uint x = blockIdx.x*TILE_DIM + threadIdx.x;
uint y = blockIdx.y*TILE_DIM + threadIdx.y;
uint i = y * width + x;
float faceq1 = 4.f/9.f;
float faceq2 = 1.f/9.f;
float faceq3 = 1.f/36.f;
f0_data[i] = faceq1;
f1_data[i] = faceq2;
f2_data[i] = faceq2;
f3_data[i] = faceq2;
f4_data[i] = faceq2;
f5_data[i] = faceq3;
f6_data[i] = faceq3;
f7_data[i] = faceq3;
f8_data[i] = faceq3;
density[i] = 0.f;
injection[i*2] = injection[i*2+1] = 0.f;
velocity[i*2] = velocity[i*2+1] = 0.f;
}
__global__ void collide_kernel (float *f0_data, float *f1_data, float *f2_data, float *f3_data,
float *f4_data, float *f5_data, float *f6_data, float *f7_data, float *f8_data,
float *velocity, float * density, int width)
{
uint x = blockIdx.x*TILE_DIM + threadIdx.x;
uint y = blockIdx.y*TILE_DIM + threadIdx.y;
uint i = y * width + x;
float ro, v_x, v_y;
float tau = (5.f*visc + 1.0f)/2.0f;
float f0now, f1now, f2now, f3now, f4now, f5now, f6now, f7now, f8now;
float f0eq, f1eq, f2eq, f3eq, f4eq, f5eq, f6eq, f7eq, f8eq;
// Read all f's and store in registers
f0now = f0_data[i];
f1now = f1_data[i];
f2now = f2_data[i];
f3now = f3_data[i];
f4now = f4_data[i];
f5now = f5_data[i];
f6now = f6_data[i];
f7now = f7_data[i];
f8now = f8_data[i];
float faceq1 = 4.f/9.f;
float faceq2 = 1.f/9.f;
float faceq3 = 1.f/36.f;
// Macroscopic flow props:
ro = f0now + f1now + f2now + f3now + f4now + f5now + f6now + f7now + f8now;
v_x = (f2now + f5now + f6now - f4now - f7now - f8now)/ro;
v_y = (f1now + f5now + f8now - f3now - f6now - f7now)/ro;
v_y -= density[i] * 0.0005f;
float speedcap = 0.22f;
if (v_x < -speedcap) v_x = -speedcap;
if (v_x > speedcap) v_x = speedcap;
if (v_y < -speedcap) v_y = -speedcap;
if (v_y > speedcap) v_y = speedcap;
velocity[i*2] = v_x;
velocity[i*2 +1] = v_y;
float uu = v_x * v_x;
float vv = v_y * v_y;
float uv = v_x * v_y;
// Calculate equilibrium f's
f0eq = ro * faceq1 * (1.0f - 1.5f * (uu + vv));
f1eq = ro * faceq2 * (1.0f + 3.0f * v_y + 3.f * vv - 1.5f * uu);
f2eq = ro * faceq2 * (1.0f + 3.0f * v_x + 3.f * uu - 1.5f * vv);
f3eq = ro * faceq2 * (1.0f - 3.0f * v_y + 3.f * vv - 1.5f * uu);
f4eq = ro * faceq2 * (1.0f - 3.0f * v_x + 3.f * uu - 1.5f * vv);
f5eq = ro * faceq3 * (1.0f + 3.0f * v_x + 3.f * v_y + 3.f * uu + 3.f * vv + 9.f * uv);
f6eq = ro * faceq3 * (1.0f + 3.0f * v_x - 3.f * v_y + 3.f * uu + 3.f * vv - 9.f * uv);
f7eq = ro * faceq3 * (1.0f - 3.0f * v_x - 3.f * v_y + 3.f * uu + 3.f * vv + 9.f * uv);
f8eq = ro * faceq3 * (1.0f - 3.0f * v_x + 3.f * v_y + 3.f * uu + 3.f * vv - 9.f * uv);
// Do collisions
f0_data[i] += (f0eq - f0_data[i]) / tau;
f1_data[i] += (f1eq - f1_data[i]) / tau;
f2_data[i] += (f2eq - f2_data[i]) / tau;
f3_data[i] += (f3eq - f3_data[i]) / tau;
f4_data[i] += (f4eq - f4_data[i]) / tau;
f5_data[i] += (f5eq - f5_data[i]) / tau;
f6_data[i] += (f6eq - f6_data[i]) / tau;
f7_data[i] += (f7eq - f7_data[i]) / tau;
f8_data[i] += (f8eq - f8_data[i]) / tau;
}
extern "C" void initializeSolverData(int width, int height)
{
const int size = width * height * sizeof(float);
cutilSafeCall( cudaMalloc((void **)&density_data, size) );
cutilSafeCall( cudaMalloc((void **)&injection_data, size * 2) );
cutilSafeCall( cudaMalloc((void **)&velocity_data, size * 2) );
cutilSafeCall( cudaMalloc((void **)&f0_data, size) );
cutilSafeCall( cudaMalloc((void **)&f1_data, size) );
cutilSafeCall( cudaMalloc((void **)&f2_data, size) );
cutilSafeCall( cudaMalloc((void **)&f3_data, size) );
cutilSafeCall( cudaMalloc((void **)&f4_data, size) );
cutilSafeCall( cudaMalloc((void **)&f5_data, size) );
cutilSafeCall( cudaMalloc((void **)&f6_data, size) );
cutilSafeCall( cudaMalloc((void **)&f7_data, size) );
cutilSafeCall( cudaMalloc((void **)&f8_data, size) );
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
cutilSafeCall( cudaMallocArray( &density_array, &channelDesc, width, height ));
cutilSafeCall( cudaMallocArray( &f1_array, &channelDesc, width, height ));
cutilSafeCall( cudaMallocArray( &f2_array, &channelDesc, width, height ));
cutilSafeCall( cudaMallocArray( &f3_array, &channelDesc, width, height ));
cutilSafeCall( cudaMallocArray( &f4_array, &channelDesc, width, height ));
cutilSafeCall( cudaMallocArray( &f5_array, &channelDesc, width, height ));
cutilSafeCall( cudaMallocArray( &f6_array, &channelDesc, width, height ));
cutilSafeCall( cudaMallocArray( &f7_array, &channelDesc, width, height ));
cutilSafeCall( cudaMallocArray( &f8_array, &channelDesc, width, height ));
dim3 grid = dim3(width/TILE_DIM, height/TILE_DIM);
dim3 block = dim3(TILE_DIM, TILE_DIM);
reset_kernel<<<grid, block>>>( f0_data, f1_data, f2_data, f3_data,
f4_data, f5_data, f6_data, f7_data, f8_data,
density_data, injection_data, velocity_data, width);
}
extern "C"
void destroySolverData()
{
printf("reset");
cutilSafeCall( cudaFree(density_data) );
cutilSafeCall( cudaFree(injection_data) );
cutilSafeCall( cudaFree(velocity_data) );
cutilSafeCall( cudaFree(f0_data) );
cutilSafeCall( cudaFree(f1_data) );
cutilSafeCall( cudaFree(f2_data) );
cutilSafeCall( cudaFree(f3_data) );
cutilSafeCall( cudaFree(f4_data) );
cutilSafeCall( cudaFree(f5_data) );
cutilSafeCall( cudaFree(f6_data) );
cutilSafeCall( cudaFree(f7_data) );
cutilSafeCall( cudaFree(f8_data) );
cutilSafeCall(cudaFreeArray(f1_array));
cutilSafeCall(cudaFreeArray(f2_array));
cutilSafeCall(cudaFreeArray(f3_array));
cutilSafeCall(cudaFreeArray(f4_array));
cutilSafeCall(cudaFreeArray(f5_array));
cutilSafeCall(cudaFreeArray(f6_array));
cutilSafeCall(cudaFreeArray(f7_array));
cutilSafeCall(cudaFreeArray(f8_array));
cudaDeviceReset();
}
extern "C"
void getDisplayField(int width, int height, unsigned char * obstable, unsigned char *outImage)
{
const int obstacleLength = width * height * sizeof(unsigned char);
unsigned char *d_obstacle;
cutilSafeCall( cudaMalloc((void **)&d_obstacle, obstacleLength) );
cutilSafeCall( cudaMemcpy(d_obstacle, obstable, obstacleLength, cudaMemcpyHostToDevice) );
const int size = width * height * sizeof(unsigned char) * 3;
unsigned char *d_Out;
cutilSafeCall( cudaMalloc((void **)&d_Out, size) );
dim3 grid = dim3(width/TILE_DIM, height/TILE_DIM);
dim3 block = dim3(TILE_DIM, TILE_DIM);
show_scalar<<< grid, block>>>(density_data, d_obstacle, d_Out, width, height);
cutilSafeCall( cudaMemcpy( outImage, d_Out, size, cudaMemcpyDeviceToHost));
cutilSafeCall( cudaFree(d_Out));
cutilSafeCall( cudaFree(d_obstacle));
}
extern "C"
void advanceSolver(int width, int height, float *impulse, unsigned char * obstable)
{
const int obstacleLength = width * height * sizeof(unsigned char);
unsigned char *d_obstacle;
cutilSafeCall( cudaMalloc((void **)&d_obstacle, obstacleLength) );
cutilSafeCall( cudaMemcpy(d_obstacle, obstable, obstacleLength, cudaMemcpyHostToDevice) );
const int size = width * height * sizeof(float);
cutilSafeCall( cudaMemcpy(injection_data, impulse, size * 2, cudaMemcpyHostToDevice) );
dim3 grid = dim3(width/TILE_DIM, height/TILE_DIM);
dim3 block = dim3(TILE_DIM, TILE_DIM);
inject_energy<<<grid, block>>>(density_data,
f1_data, f2_data, f3_data, f4_data,
f5_data, f6_data, f7_data, f8_data,
injection_data, d_obstacle, width, height);
cutilSafeCall( cudaMemcpy(impulse, injection_data, size * 2, cudaMemcpyDeviceToHost));
boundary_condition_kernel<<<grid, block>>>(f1_data, f2_data, f3_data, f4_data,
f5_data, f6_data, f7_data, f8_data,
d_obstacle, width);
collide_kernel<<<grid, block>>>( f0_data, f1_data, f2_data, f3_data,
f4_data, f5_data, f6_data, f7_data, f8_data,
velocity_data, density_data,
width);
cutilSafeCall( cudaMemcpyToArray( f1_array, 0, 0, f1_data, size, cudaMemcpyDeviceToDevice));
cutilSafeCall( cudaMemcpyToArray( f2_array, 0, 0, f2_data, size, cudaMemcpyDeviceToDevice));
cutilSafeCall( cudaMemcpyToArray( f3_array, 0, 0, f3_data, size, cudaMemcpyDeviceToDevice));
cutilSafeCall( cudaMemcpyToArray( f4_array, 0, 0, f4_data, size, cudaMemcpyDeviceToDevice));
cutilSafeCall( cudaMemcpyToArray( f5_array, 0, 0, f5_data, size, cudaMemcpyDeviceToDevice));
cutilSafeCall( cudaMemcpyToArray( f6_array, 0, 0, f6_data, size, cudaMemcpyDeviceToDevice));
cutilSafeCall( cudaMemcpyToArray( f7_array, 0, 0, f7_data, size, cudaMemcpyDeviceToDevice));
cutilSafeCall( cudaMemcpyToArray( f8_array, 0, 0, f8_data, size, cudaMemcpyDeviceToDevice));
f1_tex.filterMode = cudaFilterModePoint;
f1_tex.addressMode[0] = cudaAddressModeClamp;
f1_tex.addressMode[1] = cudaAddressModeClamp;
f1_tex.normalized = false;
CUDA_SAFE_CALL(cudaBindTextureToArray(f1_tex, f1_array));
f2_tex.filterMode = cudaFilterModePoint;
f2_tex.addressMode[0] = cudaAddressModeClamp;
f2_tex.addressMode[1] = cudaAddressModeClamp;
f2_tex.normalized = false;
CUDA_SAFE_CALL(cudaBindTextureToArray(f2_tex, f2_array));
f3_tex.filterMode = cudaFilterModePoint;
f3_tex.addressMode[0] = cudaAddressModeClamp;
f3_tex.addressMode[1] = cudaAddressModeClamp;
f3_tex.normalized = false;
CUDA_SAFE_CALL(cudaBindTextureToArray(f3_tex, f3_array));
f4_tex.filterMode = cudaFilterModePoint;
f4_tex.addressMode[0] = cudaAddressModeClamp;
f4_tex.addressMode[1] = cudaAddressModeClamp;
f4_tex.normalized = false;
CUDA_SAFE_CALL(cudaBindTextureToArray(f4_tex, f4_array));
f5_tex.filterMode = cudaFilterModePoint;
f5_tex.addressMode[0] = cudaAddressModeClamp;
f5_tex.addressMode[1] = cudaAddressModeClamp;
f5_tex.normalized = false;
CUDA_SAFE_CALL(cudaBindTextureToArray(f5_tex, f5_array));
f6_tex.filterMode = cudaFilterModePoint;
f6_tex.addressMode[0] = cudaAddressModeClamp;
f6_tex.addressMode[1] = cudaAddressModeClamp;
f6_tex.normalized = false;
CUDA_SAFE_CALL(cudaBindTextureToArray(f6_tex, f6_array));
f7_tex.filterMode = cudaFilterModePoint;
f7_tex.addressMode[0] = cudaAddressModeClamp;
f7_tex.addressMode[1] = cudaAddressModeClamp;
f7_tex.normalized = false;
CUDA_SAFE_CALL(cudaBindTextureToArray(f7_tex, f7_array));
f8_tex.filterMode = cudaFilterModePoint;
f8_tex.addressMode[0] = cudaAddressModeClamp;
f8_tex.addressMode[1] = cudaAddressModeClamp;
f8_tex.normalized = false;
CUDA_SAFE_CALL(cudaBindTextureToArray(f8_tex, f8_array));
stream_kernel<<<grid, block>>>(f1_data, f2_data, f3_data, f4_data,
f5_data, f6_data, f7_data, f8_data,
width);
CUDA_SAFE_CALL(cudaUnbindTexture(f1_tex));
CUDA_SAFE_CALL(cudaUnbindTexture(f2_tex));
CUDA_SAFE_CALL(cudaUnbindTexture(f3_tex));
CUDA_SAFE_CALL(cudaUnbindTexture(f4_tex));
CUDA_SAFE_CALL(cudaUnbindTexture(f5_tex));
CUDA_SAFE_CALL(cudaUnbindTexture(f6_tex));
CUDA_SAFE_CALL(cudaUnbindTexture(f7_tex));
CUDA_SAFE_CALL(cudaUnbindTexture(f8_tex));
cutilSafeCall( cudaMemcpyToArray( density_array, 0, 0, density_data, size, cudaMemcpyDeviceToDevice));
densityTex.addressMode[0] = cudaAddressModeClamp;
densityTex.addressMode[1] = cudaAddressModeClamp;
densityTex.filterMode = cudaFilterModeLinear;
densityTex.normalized = false;
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
cutilSafeCall( cudaBindTextureToArray(densityTex, density_array, channelDesc));
advect_density<<<grid, block>>>(density_data, velocity_data, width, height);
cutilSafeCall(cudaUnbindTexture(densityTex));
cutilSafeCall( cudaFree(d_obstacle));
}
#endif |
f6a2efd24f6e39ab0b4462045c7c17b8f1a750c2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by op2.m on 25-Oct-2011 14:51:27
//
// user function
__device__
#include "adt_calc.h"
// CUDA kernel function
__global__ void op_cuda_adt_calc(
double *ind_arg0, int *ind_arg0_maps,
short *arg0_maps,
short *arg1_maps,
short *arg2_maps,
short *arg3_maps,
double *arg4,
double *arg5,
int *ind_arg_sizes,
int *ind_arg_offs,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors) {
__shared__ int *ind_arg0_map, ind_arg0_size;
__shared__ double *ind_arg0_s;
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (threadIdx.x==0) {
// get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
ind_arg0_size = ind_arg_sizes[0+blockId*1];
ind_arg0_map = ind_arg0_maps + ind_arg_offs[0+blockId*1];
// set shared memory pointers
int nbytes = 0;
ind_arg0_s = (double *) &shared[nbytes];
}
__syncthreads(); // make sure all of above completed
// copy indirect datasets into shared memory or zero increment
for (int n=threadIdx.x; n<ind_arg0_size*2; n+=blockDim.x)
ind_arg0_s[n] = ind_arg0[n%2+ind_arg0_map[n/2]*2];
__syncthreads();
// process set elements
for (int n=threadIdx.x; n<nelem; n+=blockDim.x) {
// user-supplied kernel call
adt_calc( ind_arg0_s+arg0_maps[n+offset_b]*2,
ind_arg0_s+arg1_maps[n+offset_b]*2,
ind_arg0_s+arg2_maps[n+offset_b]*2,
ind_arg0_s+arg3_maps[n+offset_b]*2,
arg4+(n+offset_b)*4,
arg5+(n+offset_b)*1 );
}
}
// host stub function
void op_par_loop_adt_calc(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5 ){
int nargs = 6;
op_arg args[6] = {arg0,arg1,arg2,arg3,arg4,arg5};
int ninds = 1;
int inds[6] = {0,0,0,0,-1,-1};
if (OP_diags>2) {
printf(" kernel routine with indirection: adt_calc \n");
}
// get plan
#ifdef OP_PART_SIZE_1
int part_size = OP_PART_SIZE_1;
#else
int part_size = OP_part_size;
#endif
op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds);
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timers(&cpu_t1, &wall_t1);
// execute plan
int block_offset = 0;
for (int col=0; col < Plan->ncolors; col++) {
#ifdef OP_BLOCK_SIZE_1
int nthread = OP_BLOCK_SIZE_1;
#else
int nthread = OP_block_size;
#endif
int nblocks = Plan->ncolblk[col];
int nshared = Plan->nshared;
hipLaunchKernelGGL(( op_cuda_adt_calc), dim3(nblocks),dim3(nthread),nshared, 0,
(double *)arg0.data_d, Plan->ind_maps[0],
Plan->loc_maps[0],
Plan->loc_maps[1],
Plan->loc_maps[2],
Plan->loc_maps[3],
(double *)arg4.data_d,
(double *)arg5.data_d,
Plan->ind_sizes,
Plan->ind_offs,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol);
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg("op_cuda_adt_calc execution failed\n");
block_offset += nblocks;
}
// update kernel record
op_timers(&cpu_t2, &wall_t2);
op_timing_realloc(1);
OP_kernels[1].name = name;
OP_kernels[1].count += 1;
OP_kernels[1].time += wall_t2 - wall_t1;
OP_kernels[1].transfer += Plan->transfer;
OP_kernels[1].transfer2 += Plan->transfer2;
}
| f6a2efd24f6e39ab0b4462045c7c17b8f1a750c2.cu | //
// auto-generated by op2.m on 25-Oct-2011 14:51:27
//
// user function
__device__
#include "adt_calc.h"
// CUDA kernel function
__global__ void op_cuda_adt_calc(
double *ind_arg0, int *ind_arg0_maps,
short *arg0_maps,
short *arg1_maps,
short *arg2_maps,
short *arg3_maps,
double *arg4,
double *arg5,
int *ind_arg_sizes,
int *ind_arg_offs,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors) {
__shared__ int *ind_arg0_map, ind_arg0_size;
__shared__ double *ind_arg0_s;
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (threadIdx.x==0) {
// get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
ind_arg0_size = ind_arg_sizes[0+blockId*1];
ind_arg0_map = ind_arg0_maps + ind_arg_offs[0+blockId*1];
// set shared memory pointers
int nbytes = 0;
ind_arg0_s = (double *) &shared[nbytes];
}
__syncthreads(); // make sure all of above completed
// copy indirect datasets into shared memory or zero increment
for (int n=threadIdx.x; n<ind_arg0_size*2; n+=blockDim.x)
ind_arg0_s[n] = ind_arg0[n%2+ind_arg0_map[n/2]*2];
__syncthreads();
// process set elements
for (int n=threadIdx.x; n<nelem; n+=blockDim.x) {
// user-supplied kernel call
adt_calc( ind_arg0_s+arg0_maps[n+offset_b]*2,
ind_arg0_s+arg1_maps[n+offset_b]*2,
ind_arg0_s+arg2_maps[n+offset_b]*2,
ind_arg0_s+arg3_maps[n+offset_b]*2,
arg4+(n+offset_b)*4,
arg5+(n+offset_b)*1 );
}
}
// host stub function
void op_par_loop_adt_calc(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5 ){
int nargs = 6;
op_arg args[6] = {arg0,arg1,arg2,arg3,arg4,arg5};
int ninds = 1;
int inds[6] = {0,0,0,0,-1,-1};
if (OP_diags>2) {
printf(" kernel routine with indirection: adt_calc \n");
}
// get plan
#ifdef OP_PART_SIZE_1
int part_size = OP_PART_SIZE_1;
#else
int part_size = OP_part_size;
#endif
op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds);
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timers(&cpu_t1, &wall_t1);
// execute plan
int block_offset = 0;
for (int col=0; col < Plan->ncolors; col++) {
#ifdef OP_BLOCK_SIZE_1
int nthread = OP_BLOCK_SIZE_1;
#else
int nthread = OP_block_size;
#endif
int nblocks = Plan->ncolblk[col];
int nshared = Plan->nshared;
op_cuda_adt_calc<<<nblocks,nthread,nshared>>>(
(double *)arg0.data_d, Plan->ind_maps[0],
Plan->loc_maps[0],
Plan->loc_maps[1],
Plan->loc_maps[2],
Plan->loc_maps[3],
(double *)arg4.data_d,
(double *)arg5.data_d,
Plan->ind_sizes,
Plan->ind_offs,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol);
cutilSafeCall(cudaThreadSynchronize());
cutilCheckMsg("op_cuda_adt_calc execution failed\n");
block_offset += nblocks;
}
// update kernel record
op_timers(&cpu_t2, &wall_t2);
op_timing_realloc(1);
OP_kernels[1].name = name;
OP_kernels[1].count += 1;
OP_kernels[1].time += wall_t2 - wall_t1;
OP_kernels[1].transfer += Plan->transfer;
OP_kernels[1].transfer2 += Plan->transfer2;
}
|
282b1e7d80f21787d0e0c8faacf1e83456019251.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void GatherOps_forward_kernel(double *out, const double *v, const long long *ii, int n){
int p = blockIdx.x *blockDim.x + threadIdx.x;
if (p < n){
out[p] = v[ii[p]-1];
}
}
void Gpu_GatherOps_forward(double *out, const double *v, const long long *ii, int n){
hipLaunchKernelGGL(( GatherOps_forward_kernel), dim3((n-1)/64 + 1), dim3(64) , 0, 0, out, v, ii, n);
}
__global__ void GatherOps_backward_kernel(double *grad_v,
const double *grad_out,
const double *out, const double *v, const long long *ii, int n){
int p = blockIdx.x *blockDim.x + threadIdx.x;
if (p < n){
grad_v[ii[p]-1] = grad_out[p];
}
}
extern __global__ void setzero_kernel(double *out, int n);
void Gpu_GatherOps_backward(
double *grad_v,
const double *grad_out,
const double *out, const double *v, const long long *ii, int n, int N
){
hipLaunchKernelGGL(( setzero_kernel), dim3((N - 1)/64 + 1), dim3(64) , 0, 0, grad_v, N);
hipLaunchKernelGGL(( GatherOps_backward_kernel), dim3((n-1)/64 + 1), dim3(64) , 0, 0, grad_v, grad_out, out, v, ii, n);
}
| 282b1e7d80f21787d0e0c8faacf1e83456019251.cu | #include "cuda.h"
__global__ void GatherOps_forward_kernel(double *out, const double *v, const long long *ii, int n){
int p = blockIdx.x *blockDim.x + threadIdx.x;
if (p < n){
out[p] = v[ii[p]-1];
}
}
void Gpu_GatherOps_forward(double *out, const double *v, const long long *ii, int n){
GatherOps_forward_kernel<<< (n-1)/64 + 1, 64 >>>(out, v, ii, n);
}
__global__ void GatherOps_backward_kernel(double *grad_v,
const double *grad_out,
const double *out, const double *v, const long long *ii, int n){
int p = blockIdx.x *blockDim.x + threadIdx.x;
if (p < n){
grad_v[ii[p]-1] = grad_out[p];
}
}
extern __global__ void setzero_kernel(double *out, int n);
void Gpu_GatherOps_backward(
double *grad_v,
const double *grad_out,
const double *out, const double *v, const long long *ii, int n, int N
){
setzero_kernel<<< (N - 1)/64 + 1, 64 >>>(grad_v, N);
GatherOps_backward_kernel<<< (n-1)/64 + 1, 64 >>>(grad_v, grad_out, out, v, ii, n);
}
|
79b3ed8fd922bc9c2da73a0a67c2ab6a0dd3f2b5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//pass
//--gridDim=256 --blockDim=256
__global__ void rngSetupStates(
hiprandState_t *rngState,
unsigned long long seed,
unsigned long long offset)
{
// determine global thread id
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// Each thread gets the same seed, a different
// sequence number. A different offset is used for
// each device.
hiprand_init(seed, tid, offset, &rngState[tid]);
}
| 79b3ed8fd922bc9c2da73a0a67c2ab6a0dd3f2b5.cu | //pass
//--gridDim=256 --blockDim=256
__global__ void rngSetupStates(
curandState *rngState,
unsigned long long seed,
unsigned long long offset)
{
// determine global thread id
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// Each thread gets the same seed, a different
// sequence number. A different offset is used for
// each device.
curand_init(seed, tid, offset, &rngState[tid]);
}
|
ef276dfdef14262b8c42b551d85cf3ec3eb7a7cc.hip | // !!! This is a file automatically generated by hipify!!!
#define GRB_USE_CUDA
#define private public
#include <iostream>
#include <algorithm>
#include <string>
#include <cstdio>
#include <cstdlib>
// #include <hip/hip_runtime_api.h>
#include <boost/program_options.hpp>
#include "graphblas/graphblas.hpp"
#include "graphblas/algorithm/sssp.hpp"
#include "graphblas/algorithm/common.hpp"
#include "test/test.hpp"
bool debug_;
bool memory_;
int main(int argc, char** argv) {
std::vector<graphblas::Index> row_indices;
std::vector<graphblas::Index> col_indices;
std::vector<float> values;
graphblas::Index nrows, ncols, nvals;
// Parse arguments
bool debug;
bool transpose;
bool mtxinfo;
int directed;
int niter;
int source;
int seed;
char* dat_name;
po::variables_map vm;
// Read in sparse matrix
if (argc < 2) {
fprintf(stderr, "Usage: %s [matrix-market-filename]\n", argv[0]);
exit(1);
} else {
parseArgs(argc, argv, &vm);
debug = vm["debug" ].as<bool>();
transpose = vm["transpose"].as<bool>();
mtxinfo = vm["mtxinfo" ].as<bool>();
directed = vm["directed" ].as<int>();
niter = vm["niter" ].as<int>();
source = vm["source" ].as<int>();
seed = vm["seed" ].as<int>();
/*!
* This is an imperfect solution, because this should happen in
* desc.loadArgs(vm) instead of application code!
* TODO(@ctcyang): fix this
*/
readMtx(argv[argc-1], &row_indices, &col_indices, &values, &nrows, &ncols,
&nvals, directed, mtxinfo, &dat_name);
}
// Descriptor desc
graphblas::Descriptor desc;
CHECK(desc.loadArgs(vm));
if (transpose)
CHECK(desc.toggle(graphblas::GrB_INP1));
// Matrix A
graphblas::Matrix<float> a(nrows, ncols);
CHECK(a.build(&row_indices, &col_indices, &values, nvals, GrB_NULL,
dat_name));
CHECK(a.nrows(&nrows));
CHECK(a.ncols(&ncols));
CHECK(a.nvals(&nvals));
if (debug) CHECK(a.print());
// For SSSP, do uniform random distance between 1 and 64
// Set seed
setEnv("GRB_SEED", seed);
setEnv("GRB_UNIFORM_START", 1);
setEnv("GRB_UNIFORM_END", 64);
desc.set(GrB_BACKEND, GrB_SEQUENTIAL);
graphblas::apply<float, float, float>(&a, GrB_NULL, GrB_NULL, set_uniform_random<float>(), &a, &desc);
desc.set(GrB_BACKEND, GrB_CUDA);
if (debug) CHECK(a.print());
// Vector v
graphblas::Vector<float> v(nrows);
// Cpu BFS
CpuTimer sssp_cpu;
float* h_sssp_cpu = reinterpret_cast<float*>(malloc(nrows*sizeof(float)));
int depth = 10000;
sssp_cpu.Start();
int d = graphblas::algorithm::ssspCpu(source, &a, h_sssp_cpu, depth,
transpose);
sssp_cpu.Stop();
// Warmup
CpuTimer warmup;
warmup.Start();
graphblas::algorithm::sssp(&v, &a, source, &desc);
warmup.Stop();
std::vector<float> h_sssp_gpu;
CHECK(v.extractTuples(&h_sssp_gpu, &nrows));
VERIFY_LIST_FLOAT(h_sssp_cpu, h_sssp_gpu, nrows);
// Benchmark
graphblas::Vector<float> y(nrows);
CpuTimer vxm_gpu;
// hipProfilerStart();
vxm_gpu.Start();
float tight = 0.f;
float val;
for (int i = 0; i < niter; i++) {
val = graphblas::algorithm::sssp(&y, &a, source, &desc);
tight += val;
}
// hipProfilerStop();
vxm_gpu.Stop();
float flop = 0;
std::cout << "cpu, " << sssp_cpu.ElapsedMillis() << ", \n";
std::cout << "warmup, " << warmup.ElapsedMillis() << ", " <<
flop/warmup.ElapsedMillis()/1000000.0 << "\n";
float elapsed_vxm = vxm_gpu.ElapsedMillis();
std::cout << "tight, " << tight/niter << "\n";
std::cout << "vxm, " << elapsed_vxm/niter << "\n";
if (niter) {
std::vector<float> h_sssp_gpu2;
CHECK(y.extractTuples(&h_sssp_gpu2, &nrows));
VERIFY_LIST_FLOAT(h_sssp_cpu, h_sssp_gpu2, nrows);
}
return 0;
}
| ef276dfdef14262b8c42b551d85cf3ec3eb7a7cc.cu | #define GRB_USE_CUDA
#define private public
#include <iostream>
#include <algorithm>
#include <string>
#include <cstdio>
#include <cstdlib>
// #include <cuda_profiler_api.h>
#include <boost/program_options.hpp>
#include "graphblas/graphblas.hpp"
#include "graphblas/algorithm/sssp.hpp"
#include "graphblas/algorithm/common.hpp"
#include "test/test.hpp"
bool debug_;
bool memory_;
int main(int argc, char** argv) {
std::vector<graphblas::Index> row_indices;
std::vector<graphblas::Index> col_indices;
std::vector<float> values;
graphblas::Index nrows, ncols, nvals;
// Parse arguments
bool debug;
bool transpose;
bool mtxinfo;
int directed;
int niter;
int source;
int seed;
char* dat_name;
po::variables_map vm;
// Read in sparse matrix
if (argc < 2) {
fprintf(stderr, "Usage: %s [matrix-market-filename]\n", argv[0]);
exit(1);
} else {
parseArgs(argc, argv, &vm);
debug = vm["debug" ].as<bool>();
transpose = vm["transpose"].as<bool>();
mtxinfo = vm["mtxinfo" ].as<bool>();
directed = vm["directed" ].as<int>();
niter = vm["niter" ].as<int>();
source = vm["source" ].as<int>();
seed = vm["seed" ].as<int>();
/*!
* This is an imperfect solution, because this should happen in
* desc.loadArgs(vm) instead of application code!
* TODO(@ctcyang): fix this
*/
readMtx(argv[argc-1], &row_indices, &col_indices, &values, &nrows, &ncols,
&nvals, directed, mtxinfo, &dat_name);
}
// Descriptor desc
graphblas::Descriptor desc;
CHECK(desc.loadArgs(vm));
if (transpose)
CHECK(desc.toggle(graphblas::GrB_INP1));
// Matrix A
graphblas::Matrix<float> a(nrows, ncols);
CHECK(a.build(&row_indices, &col_indices, &values, nvals, GrB_NULL,
dat_name));
CHECK(a.nrows(&nrows));
CHECK(a.ncols(&ncols));
CHECK(a.nvals(&nvals));
if (debug) CHECK(a.print());
// For SSSP, do uniform random distance between 1 and 64
// Set seed
setEnv("GRB_SEED", seed);
setEnv("GRB_UNIFORM_START", 1);
setEnv("GRB_UNIFORM_END", 64);
desc.set(GrB_BACKEND, GrB_SEQUENTIAL);
graphblas::apply<float, float, float>(&a, GrB_NULL, GrB_NULL, set_uniform_random<float>(), &a, &desc);
desc.set(GrB_BACKEND, GrB_CUDA);
if (debug) CHECK(a.print());
// Vector v
graphblas::Vector<float> v(nrows);
// Cpu BFS
CpuTimer sssp_cpu;
float* h_sssp_cpu = reinterpret_cast<float*>(malloc(nrows*sizeof(float)));
int depth = 10000;
sssp_cpu.Start();
int d = graphblas::algorithm::ssspCpu(source, &a, h_sssp_cpu, depth,
transpose);
sssp_cpu.Stop();
// Warmup
CpuTimer warmup;
warmup.Start();
graphblas::algorithm::sssp(&v, &a, source, &desc);
warmup.Stop();
std::vector<float> h_sssp_gpu;
CHECK(v.extractTuples(&h_sssp_gpu, &nrows));
VERIFY_LIST_FLOAT(h_sssp_cpu, h_sssp_gpu, nrows);
// Benchmark
graphblas::Vector<float> y(nrows);
CpuTimer vxm_gpu;
// cudaProfilerStart();
vxm_gpu.Start();
float tight = 0.f;
float val;
for (int i = 0; i < niter; i++) {
val = graphblas::algorithm::sssp(&y, &a, source, &desc);
tight += val;
}
// cudaProfilerStop();
vxm_gpu.Stop();
float flop = 0;
std::cout << "cpu, " << sssp_cpu.ElapsedMillis() << ", \n";
std::cout << "warmup, " << warmup.ElapsedMillis() << ", " <<
flop/warmup.ElapsedMillis()/1000000.0 << "\n";
float elapsed_vxm = vxm_gpu.ElapsedMillis();
std::cout << "tight, " << tight/niter << "\n";
std::cout << "vxm, " << elapsed_vxm/niter << "\n";
if (niter) {
std::vector<float> h_sssp_gpu2;
CHECK(y.extractTuples(&h_sssp_gpu2, &nrows));
VERIFY_LIST_FLOAT(h_sssp_cpu, h_sssp_gpu2, nrows);
}
return 0;
}
|
c51e6b47834e10fec474831f6190c8966c906779.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel3_plus_4_front [3][2];
static int dims_update_halo_kernel3_plus_4_front_h [3][2] = {0};
//user function
__device__
inline void update_halo_kernel3_plus_4_front_gpu(ACC<double> &vol_flux_x,
ACC<double> &mass_flux_x,
const int* fields) {
if(fields[FIELD_VOL_FLUX_X] == 1) vol_flux_x(0,0,0) = vol_flux_x(0,0,-4);
if(fields[FIELD_MASS_FLUX_X] == 1) mass_flux_x(0,0,0) = mass_flux_x(0,0,-4);
}
__global__ void ops_update_halo_kernel3_plus_4_front(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel3_plus_4_front[0][0] + idx_z * 1*1 * dims_update_halo_kernel3_plus_4_front[0][0] * dims_update_halo_kernel3_plus_4_front[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel3_plus_4_front[1][0] + idx_z * 1*1 * dims_update_halo_kernel3_plus_4_front[1][0] * dims_update_halo_kernel3_plus_4_front[1][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_update_halo_kernel3_plus_4_front[0][0], dims_update_halo_kernel3_plus_4_front[0][1], arg0);
ACC<double> argp1(dims_update_halo_kernel3_plus_4_front[1][0], dims_update_halo_kernel3_plus_4_front[1][1], arg1);
update_halo_kernel3_plus_4_front_gpu(argp0, argp1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel3_plus_4_front(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel3_plus_4_front_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,69)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(69,"update_halo_kernel3_plus_4_front");
OPS_kernels[69].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != dims_update_halo_kernel3_plus_4_front_h[0][0] || ydim0 != dims_update_halo_kernel3_plus_4_front_h[0][1] || xdim1 != dims_update_halo_kernel3_plus_4_front_h[1][0] || ydim1 != dims_update_halo_kernel3_plus_4_front_h[1][1]) {
dims_update_halo_kernel3_plus_4_front_h[0][0] = xdim0;
dims_update_halo_kernel3_plus_4_front_h[0][1] = ydim0;
dims_update_halo_kernel3_plus_4_front_h[1][0] = xdim1;
dims_update_halo_kernel3_plus_4_front_h[1][1] = ydim1;
cutilSafeCall(hipMemcpyToSymbol( dims_update_halo_kernel3_plus_4_front, dims_update_halo_kernel3_plus_4_front_h, sizeof(dims_update_halo_kernel3_plus_4_front)));
}
int *arg2h = (int *)arg2.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[69].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel3_plus_4_front), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[69].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[69].mpi_time += t2-t1;
OPS_kernels[69].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[69].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel3_plus_4_front(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 69;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 69;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel3_plus_4_front_execute;
if (OPS_diags > 1) {
ops_timing_realloc(69,"update_halo_kernel3_plus_4_front");
}
ops_enqueue_kernel(desc);
}
#endif
| c51e6b47834e10fec474831f6190c8966c906779.cu | //
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel3_plus_4_front [3][2];
static int dims_update_halo_kernel3_plus_4_front_h [3][2] = {0};
//user function
__device__
inline void update_halo_kernel3_plus_4_front_gpu(ACC<double> &vol_flux_x,
ACC<double> &mass_flux_x,
const int* fields) {
if(fields[FIELD_VOL_FLUX_X] == 1) vol_flux_x(0,0,0) = vol_flux_x(0,0,-4);
if(fields[FIELD_MASS_FLUX_X] == 1) mass_flux_x(0,0,0) = mass_flux_x(0,0,-4);
}
__global__ void ops_update_halo_kernel3_plus_4_front(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel3_plus_4_front[0][0] + idx_z * 1*1 * dims_update_halo_kernel3_plus_4_front[0][0] * dims_update_halo_kernel3_plus_4_front[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel3_plus_4_front[1][0] + idx_z * 1*1 * dims_update_halo_kernel3_plus_4_front[1][0] * dims_update_halo_kernel3_plus_4_front[1][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_update_halo_kernel3_plus_4_front[0][0], dims_update_halo_kernel3_plus_4_front[0][1], arg0);
ACC<double> argp1(dims_update_halo_kernel3_plus_4_front[1][0], dims_update_halo_kernel3_plus_4_front[1][1], arg1);
update_halo_kernel3_plus_4_front_gpu(argp0, argp1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel3_plus_4_front(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel3_plus_4_front_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,69)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(69,"update_halo_kernel3_plus_4_front");
OPS_kernels[69].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != dims_update_halo_kernel3_plus_4_front_h[0][0] || ydim0 != dims_update_halo_kernel3_plus_4_front_h[0][1] || xdim1 != dims_update_halo_kernel3_plus_4_front_h[1][0] || ydim1 != dims_update_halo_kernel3_plus_4_front_h[1][1]) {
dims_update_halo_kernel3_plus_4_front_h[0][0] = xdim0;
dims_update_halo_kernel3_plus_4_front_h[0][1] = ydim0;
dims_update_halo_kernel3_plus_4_front_h[1][0] = xdim1;
dims_update_halo_kernel3_plus_4_front_h[1][1] = ydim1;
cutilSafeCall(cudaMemcpyToSymbol( dims_update_halo_kernel3_plus_4_front, dims_update_halo_kernel3_plus_4_front_h, sizeof(dims_update_halo_kernel3_plus_4_front)));
}
int *arg2h = (int *)arg2.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[69].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_update_halo_kernel3_plus_4_front<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[69].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[69].mpi_time += t2-t1;
OPS_kernels[69].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[69].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel3_plus_4_front(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 69;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 69;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel3_plus_4_front_execute;
if (OPS_diags > 1) {
ops_timing_realloc(69,"update_halo_kernel3_plus_4_front");
}
ops_enqueue_kernel(desc);
}
#endif
|
df9f0e7adbb80bda018b28b16735acb2064aed44.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <cuda_gl_interop.h>
#include <stdio.h>
#define MAXNUMRESOURCES 100
# define SAFE_CALL(call) { \
hipError_t err = call; \
if( hipSuccess != err) { \
sprintf(errMsg, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, hipGetErrorString(err) ); \
return 1; \
} }
//Global vars
struct cudaGraphicsResource *cuda_pbo[MAXNUMRESOURCES];
int next_resource_idx=0;
int INVERTED=0;
//also works if a skip call to initCuda ???
extern "C" int initCuda(char *errMsg)
{
int device;
SAFE_CALL(hipGetDevice(&device));
SAFE_CALL(hipGLSetGLDevice(device));
//hipSetDeviceFlags(hipDeviceMapHost);
return 0;
}
//also works if a skip call to initCuda ???
extern "C" int makeCurrent(char *errMsg)
{
int device;
SAFE_CALL(hipGetDevice(&device));
SAFE_CALL(hipGLSetGLDevice(device));
//hipSetDeviceFlags(hipDeviceMapHost);
return 0;
}
extern "C" int regbuf(GLuint pbo, int *resource_idx, char *errMsg){
if (next_resource_idx >= MAXNUMRESOURCES){
sprintf(errMsg, "Maximum number of resources exceeded");
return 1;
}
*resource_idx=next_resource_idx;
next_resource_idx+=1;
SAFE_CALL(hipGraphicsGLRegisterBuffer(&cuda_pbo[*resource_idx], pbo, hipGraphicsMapFlagsNone));
return 0;
}
extern "C" int glmap(GLuint pbo, int resource_idx, void **d_ptr, char *errMsg)
{
size_t num_bytes;
SAFE_CALL(hipGraphicsMapResources(1, &cuda_pbo[resource_idx], 0));
SAFE_CALL(hipGraphicsResourceGetMappedPointer(d_ptr, &num_bytes, cuda_pbo[resource_idx]));
return 0;
}
extern "C" int glunmap(GLuint pbo, int resource_idx, char *errMsg)
{
SAFE_CALL(hipGraphicsUnmapResources(1, &cuda_pbo[resource_idx], 0));
return 0;
}
| df9f0e7adbb80bda018b28b16735acb2064aed44.cu | #include <cuda.h>
#include <cuda_gl_interop.h>
#include <stdio.h>
#define MAXNUMRESOURCES 100
# define SAFE_CALL(call) { \
cudaError err = call; \
if( cudaSuccess != err) { \
sprintf(errMsg, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, cudaGetErrorString(err) ); \
return 1; \
} }
//Global vars
struct cudaGraphicsResource *cuda_pbo[MAXNUMRESOURCES];
int next_resource_idx=0;
int INVERTED=0;
//also works if a skip call to initCuda ???
extern "C" int initCuda(char *errMsg)
{
int device;
SAFE_CALL(cudaGetDevice(&device));
SAFE_CALL(cudaGLSetGLDevice(device));
//cudaSetDeviceFlags(cudaDeviceMapHost);
return 0;
}
//also works if a skip call to initCuda ???
extern "C" int makeCurrent(char *errMsg)
{
int device;
SAFE_CALL(cudaGetDevice(&device));
SAFE_CALL(cudaGLSetGLDevice(device));
//cudaSetDeviceFlags(cudaDeviceMapHost);
return 0;
}
extern "C" int regbuf(GLuint pbo, int *resource_idx, char *errMsg){
if (next_resource_idx >= MAXNUMRESOURCES){
sprintf(errMsg, "Maximum number of resources exceeded");
return 1;
}
*resource_idx=next_resource_idx;
next_resource_idx+=1;
SAFE_CALL(cudaGraphicsGLRegisterBuffer(&cuda_pbo[*resource_idx], pbo, cudaGraphicsMapFlagsNone));
return 0;
}
extern "C" int glmap(GLuint pbo, int resource_idx, void **d_ptr, char *errMsg)
{
size_t num_bytes;
SAFE_CALL(cudaGraphicsMapResources(1, &cuda_pbo[resource_idx], 0));
SAFE_CALL(cudaGraphicsResourceGetMappedPointer(d_ptr, &num_bytes, cuda_pbo[resource_idx]));
return 0;
}
extern "C" int glunmap(GLuint pbo, int resource_idx, char *errMsg)
{
SAFE_CALL(cudaGraphicsUnmapResources(1, &cuda_pbo[resource_idx], 0));
return 0;
}
|
57d1aa5307739ee18d3bef55bba6bea9158e347e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kWriteRowsMult(float* data, float* target, int num_images, int num_modules, int num_modules_batch, int module_id_offset, float alpha, float beta) {
int c = blockIdx.y;
int src_module_id = blockIdx.x;
int dst_module_id = module_id_offset + blockIdx.x;
data += num_images * (src_module_id + c * num_modules_batch);
target += num_images * (dst_module_id + c * num_modules);
for (int im = threadIdx.x; im < num_images; im += blockDim.x) {
target[im] = alpha * target[im] + beta * data[im];
}
} | 57d1aa5307739ee18d3bef55bba6bea9158e347e.cu | #include "includes.h"
__global__ void kWriteRowsMult(float* data, float* target, int num_images, int num_modules, int num_modules_batch, int module_id_offset, float alpha, float beta) {
int c = blockIdx.y;
int src_module_id = blockIdx.x;
int dst_module_id = module_id_offset + blockIdx.x;
data += num_images * (src_module_id + c * num_modules_batch);
target += num_images * (dst_module_id + c * num_modules);
for (int im = threadIdx.x; im < num_images; im += blockDim.x) {
target[im] = alpha * target[im] + beta * data[im];
}
} |
00ffd4f6650164afe9681ad35132a59ff30728af.hip | // !!! This is a file automatically generated by hipify!!!
#include <blas_quda.h>
#include <tune_quda.h>
#include <float_vector.h>
#include <color_spinor_field_order.h>
//#define QUAD_SUM
#ifdef QUAD_SUM
#include <dbldbl.h>
#endif
#include <cub_helper.cuh>
template<typename> struct ScalarType { };
template<> struct ScalarType<double> { typedef double type; };
template<> struct ScalarType<double2> { typedef double type; };
template<> struct ScalarType<double3> { typedef double type; };
template<typename> struct Vec2Type { };
template<> struct Vec2Type<double> { typedef double2 type; };
#ifdef QUAD_SUM
#define QudaSumFloat doubledouble
#define QudaSumFloat2 doubledouble2
#define QudaSumFloat3 doubledouble3
template<> struct ScalarType<doubledouble> { typedef doubledouble type; };
template<> struct ScalarType<doubledouble2> { typedef doubledouble type; };
template<> struct ScalarType<doubledouble3> { typedef doubledouble type; };
template<> struct Vec2Type<doubledouble> { typedef doubledouble2 type; };
#else
#define QudaSumFloat double
#define QudaSumFloat2 double2
#define QudaSumFloat3 double3
#endif
#define REDUCE_MAX_BLOCKS 65536
void checkSpinor(const ColorSpinorField &a, const ColorSpinorField &b) {
if (a.Precision() != b.Precision())
errorQuda("precisions do not match: %d %d", a.Precision(), b.Precision());
if (a.Length() != b.Length())
errorQuda("lengths do not match: %lu %lu", a.Length(), b.Length());
if (a.Stride() != b.Stride())
errorQuda("strides do not match: %d %d", a.Stride(), b.Stride());
}
void checkLength(const ColorSpinorField &a, ColorSpinorField &b) { \
if (a.Length() != b.Length())
errorQuda("lengths do not match: %lu %lu", a.Length(), b.Length());
if (a.Stride() != b.Stride())
errorQuda("strides do not match: %d %d", a.Stride(), b.Stride());
}
static struct {
const char *vol_str;
const char *aux_str;
char aux_tmp[quda::TuneKey::aux_n];
} blasStrings;
// These are used for reduction kernels
static QudaSumFloat *d_reduce=0;
static QudaSumFloat *h_reduce=0;
static QudaSumFloat *hd_reduce=0;
static hipEvent_t reduceEnd;
namespace quda {
namespace blas {
hipStream_t* getStream();
void* getDeviceReduceBuffer() { return d_reduce; }
void* getMappedHostReduceBuffer() { return hd_reduce; }
void* getHostReduceBuffer() { return h_reduce; }
void initReduce()
{
const int MaxReduce = 12;
// reduction buffer size
size_t bytes = MaxReduce*3*REDUCE_MAX_BLOCKS*sizeof(QudaSumFloat); // Factor of N for composite reductions
if (!d_reduce) d_reduce = (QudaSumFloat *) device_malloc(bytes);
// these arrays are actually oversized currently (only needs to be QudaSumFloat3)
// if the device supports host-mapped memory then use a host-mapped array for the reduction
if (!h_reduce) {
// only use zero copy reductions when using 64-bit
#if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__)
if(deviceProp.canMapHostMemory) {
h_reduce = (QudaSumFloat *) mapped_malloc(bytes);
hipHostGetDevicePointer(&hd_reduce, h_reduce, 0); // set the matching device pointer
} else
#endif
{
h_reduce = (QudaSumFloat *) pinned_malloc(bytes);
hd_reduce = d_reduce;
}
memset(h_reduce, 0, bytes); // added to ensure that valgrind doesn't report h_reduce is unitialised
}
hipEventCreateWithFlags(&reduceEnd, hipEventDisableTiming);
checkCudaError();
}
void endReduce(void)
{
if (d_reduce) {
device_free(d_reduce);
d_reduce = 0;
}
if (h_reduce) {
host_free(h_reduce);
h_reduce = 0;
}
hd_reduce = 0;
hipEventDestroy(reduceEnd);
}
namespace reduce {
#include <texture.h>
#include <reduce_core.cuh>
#include <reduce_core.h>
#include <reduce_mixed_core.h>
#include <multi_reduce_core.h>
} // namespace reduce
/**
Base class from which all reduction functors should derive.
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct ReduceFunctor {
//! pre-computation routine called before the "M-loop"
virtual __device__ __host__ void pre() { ; }
//! where the reduction is usually computed and any auxiliary operations
virtual __device__ __host__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y,
FloatN &z, FloatN &w, FloatN &v) = 0;
//! post-computation routine called after the "M-loop"
virtual __device__ __host__ void post(ReduceType &sum) { ; }
};
/**
Return the L1 norm of x
*/
template<typename ReduceType> __device__ __host__ ReduceType norm1_(const double2 &a) {
return (ReduceType)fabs(a.x) + (ReduceType)fabs(a.y);
}
template<typename ReduceType> __device__ __host__ ReduceType norm1_(const float2 &a) {
return (ReduceType)fabs(a.x) + (ReduceType)fabs(a.y);
}
template<typename ReduceType> __device__ __host__ ReduceType norm1_(const float4 &a) {
return (ReduceType)fabs(a.x) + (ReduceType)fabs(a.y) + (ReduceType)fabs(a.z) + (ReduceType)fabs(a.w);
}
template <typename ReduceType, typename Float2, typename FloatN>
struct Norm1 : public ReduceFunctor<ReduceType, Float2, FloatN> {
Norm1(const Float2 &a, const Float2 &b) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z,FloatN &w, FloatN &v)
{ sum += norm1_<ReduceType>(x); }
static int streams() { return 1; } //! total number of input and output streams
static int flops() { return 2; } //! flops per element
};
double norm1(const ColorSpinorField &x) {
#ifdef HOST_DEBUG
ColorSpinorField &y = const_cast<ColorSpinorField&>(x); // FIXME
return reduce::reduceCuda<double,QudaSumFloat,Norm1,0,0,0,0,0,false>
(make_double2(0.0, 0.0), make_double2(0.0, 0.0), y, y, y, y, y);
#else
errorQuda("L1 norm kernel only built when HOST_DEBUG is enabled");
return 0.0;
#endif
}
/**
Return the L2 norm of x
*/
template<typename ReduceType> __device__ __host__ void norm2_(ReduceType &sum, const double2 &a) {
sum += (ReduceType)a.x*(ReduceType)a.x;
sum += (ReduceType)a.y*(ReduceType)a.y;
}
template<typename ReduceType> __device__ __host__ void norm2_(ReduceType &sum, const float2 &a) {
sum += (ReduceType)a.x*(ReduceType)a.x;
sum += (ReduceType)a.y*(ReduceType)a.y;
}
template<typename ReduceType> __device__ __host__ void norm2_(ReduceType &sum, const float4 &a) {
sum += (ReduceType)a.x*(ReduceType)a.x;
sum += (ReduceType)a.y*(ReduceType)a.y;
sum += (ReduceType)a.z*(ReduceType)a.z;
sum += (ReduceType)a.w*(ReduceType)a.w;
}
template <typename ReduceType, typename Float2, typename FloatN>
struct Norm2 : public ReduceFunctor<ReduceType, Float2, FloatN> {
Norm2(const Float2 &a, const Float2 &b) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z,FloatN &w, FloatN &v)
{ norm2_<ReduceType>(sum,x); }
static int streams() { return 1; } //! total number of input and output streams
static int flops() { return 2; } //! flops per element
};
double norm2(const ColorSpinorField &x) {
ColorSpinorField &y = const_cast<ColorSpinorField&>(x);
return reduce::reduceCuda<double,QudaSumFloat,Norm2,0,0,0,0,0,false>
(make_double2(0.0, 0.0), make_double2(0.0, 0.0), y, y, y, y, y);
}
/**
Return the real dot product of x and y
*/
template<typename ReduceType> __device__ __host__ void dot_(ReduceType &sum, const double2 &a, const double2 &b) {
sum += (ReduceType)a.x*(ReduceType)b.x;
sum += (ReduceType)a.y*(ReduceType)b.y;
}
template<typename ReduceType> __device__ __host__ void dot_(ReduceType &sum, const float2 &a, const float2 &b) {
sum += (ReduceType)a.x*(ReduceType)b.x;
sum += (ReduceType)a.y*(ReduceType)b.y;
}
template<typename ReduceType> __device__ __host__ void dot_(ReduceType &sum, const float4 &a, const float4 &b) {
sum += (ReduceType)a.x*(ReduceType)b.x;
sum += (ReduceType)a.y*(ReduceType)b.y;
sum += (ReduceType)a.z*(ReduceType)b.z;
sum += (ReduceType)a.w*(ReduceType)b.w;
}
template <typename ReduceType, typename Float2, typename FloatN>
struct Dot : public ReduceFunctor<ReduceType, Float2, FloatN> {
Dot(const Float2 &a, const Float2 &b) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v)
{ dot_<ReduceType>(sum,x,y); }
static int streams() { return 2; } //! total number of input and output streams
static int flops() { return 2; } //! flops per element
};
double reDotProduct(ColorSpinorField &x, ColorSpinorField &y) {
return reduce::reduceCuda<double,QudaSumFloat,Dot,0,0,0,0,0,false>
(make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
}
void reDotProduct(double* result, std::vector<cudaColorSpinorField*>& x, std::vector<cudaColorSpinorField*>& y){
#ifndef SSTEP
errorQuda("S-step code not built\n");
#else
switch(x.size()){
case 1:
reduce::multiReduceCuda<1,double,QudaSumFloat,Dot,0,0,0,0,0,false>
(result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
break;
case 2:
reduce::multiReduceCuda<2,double,QudaSumFloat,Dot,0,0,0,0,0,false>
(result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
break;
case 3:
reduce::multiReduceCuda<3,double,QudaSumFloat,Dot,0,0,0,0,0,false>
(result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
break;
case 4:
reduce::multiReduceCuda<4,double,QudaSumFloat,Dot,0,0,0,0,0,false>
(result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
break;
case 5:
reduce::multiReduceCuda<5,double,QudaSumFloat,Dot,0,0,0,0,0,false>
(result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
break;
case 6:
reduce::multiReduceCuda<6,double,QudaSumFloat,Dot,0,0,0,0,0,false>
(result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
break;
case 7:
reduce::multiReduceCuda<7,double,QudaSumFloat,Dot,0,0,0,0,0,false>
(result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
break;
case 8:
reduce::multiReduceCuda<8,double,QudaSumFloat,Dot,0,0,0,0,0,false>
(result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
break;
case 9:
reduce::multiReduceCuda<9,double,QudaSumFloat,Dot,0,0,0,0,0,false>
(result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
break;
case 10:
reduce::multiReduceCuda<10,double,QudaSumFloat,Dot,0,0,0,0,0,false>
(result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
break;
case 11:
reduce::multiReduceCuda<11,double,QudaSumFloat,Dot,0,0,0,0,0,false>
(result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
break;
case 12:
reduce::multiReduceCuda<12,double,QudaSumFloat,Dot,0,0,0,0,0,false>
(result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
break;
case 13:
reduce::multiReduceCuda<13,double,QudaSumFloat,Dot,0,0,0,0,0,false>
(result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
break;
case 14:
reduce::multiReduceCuda<14,double,QudaSumFloat,Dot,0,0,0,0,0,false>
(result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
break;
case 15:
reduce::multiReduceCuda<15,double,QudaSumFloat,Dot,0,0,0,0,0,false>
(result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
break;
case 16:
reduce::multiReduceCuda<16,double,QudaSumFloat,Dot,0,0,0,0,0,false>
(result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
break;
default:
errorQuda("Unsupported vector size");
break;
}
#endif // SSTEP
}
/**
* Returns the real component of the dot product of a and b and
* the norm of a
*/
template<typename ReduceType, typename InputType>
__device__ __host__ ReduceType dotNormA_(const InputType &a, const InputType &b) {
typedef typename ScalarType<ReduceType>::type scalar;
ReduceType c;
dot_<scalar>(c.x,a,b);
norm2_<scalar>(c.y,a);
return c;
}
template <typename ReduceType, typename Float2, typename FloatN>
struct DotNormA : public ReduceFunctor<ReduceType, Float2, FloatN> {
DotNormA(const Float2 &a, const Float2 &b){}
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v)
{sum += dotNormA_<ReduceType,FloatN>(x,y);}
static int streams() { return 2; }
static int flops() { return 4; }
};
double2 reDotProductNormA(ColorSpinorField &x,ColorSpinorField &y){
return reduce::reduceCuda<double2,QudaSumFloat2,DotNormA,0,0,0,0,0,false>
(make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
}
/**
First performs the operation y[i] += a*x[i]
Return the norm of y
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct axpyNorm2 : public ReduceFunctor<ReduceType, Float2, FloatN> {
Float2 a;
axpyNorm2(const Float2 &a, const Float2 &b) : a(a) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v) {
y += a.x*x; norm2_<ReduceType>(sum,y); }
static int streams() { return 3; } //! total number of input and output streams
static int flops() { return 4; } //! flops per element
};
double axpyNorm(const double &a, ColorSpinorField &x, ColorSpinorField &y) {
return reduce::reduceCuda<double,QudaSumFloat,axpyNorm2,0,1,0,0,0,false>
(make_double2(a, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
}
/**
First performs the operation y[i] += a*x[i]
Return real dot product (x,y)
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct AxpyReDot : public ReduceFunctor<ReduceType, Float2, FloatN> {
Float2 a;
AxpyReDot(const Float2 &a, const Float2 &b) : a(a) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v) {
y += a.x*x; dot_<ReduceType>(sum,x,y); }
static int streams() { return 3; } //! total number of input and output streams
static int flops() { return 4; } //! flops per element
};
double axpyReDot(const double &a, ColorSpinorField &x, ColorSpinorField &y) {
return reduce::reduceCuda<double,QudaSumFloat,AxpyReDot,0,1,0,0,0,false>
(make_double2(a, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
}
/**
First performs the operation y[i] = x[i] - y[i]
Second returns the norm of y
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct xmyNorm2 : public ReduceFunctor<ReduceType, Float2, FloatN> {
xmyNorm2(const Float2 &a, const Float2 &b) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v) {
y = x - y; norm2_<ReduceType>(sum,y); }
static int streams() { return 3; } //! total number of input and output streams
static int flops() { return 3; } //! flops per element
};
double xmyNorm(ColorSpinorField &x, ColorSpinorField &y) {
return reduce::reduceCuda<double,QudaSumFloat,xmyNorm2,0,1,0,0,0,false>
(make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
}
/**
Functor to perform the operation y += a * x (complex-valued)
*/
__device__ __host__ void Caxpy_(const double2 &a, const double2 &x, double2 &y) {
y.x += a.x*x.x; y.x -= a.y*x.y;
y.y += a.y*x.x; y.y += a.x*x.y;
}
__device__ __host__ void Caxpy_(const float2 &a, const float2 &x, float2 &y) {
y.x += a.x*x.x; y.x -= a.y*x.y;
y.y += a.y*x.x; y.y += a.x*x.y;
}
__device__ __host__ void Caxpy_(const float2 &a, const float4 &x, float4 &y) {
y.x += a.x*x.x; y.x -= a.y*x.y;
y.y += a.y*x.x; y.y += a.x*x.y;
y.z += a.x*x.z; y.z -= a.y*x.w;
y.w += a.y*x.z; y.w += a.x*x.w;
}
/**
First performs the operation y[i] = a*x[i] + y[i] (complex-valued)
Second returns the norm of y
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct caxpyNorm2 : public ReduceFunctor<ReduceType, Float2, FloatN> {
Float2 a;
caxpyNorm2(const Float2 &a, const Float2 &b) : a(a) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v) {
Caxpy_(a, x, y); norm2_<ReduceType>(sum,y); }
static int streams() { return 3; } //! total number of input and output streams
static int flops() { return 6; } //! flops per element
};
double caxpyNorm(const Complex &a, ColorSpinorField &x, ColorSpinorField &y) {
return reduce::reduceCuda<double,QudaSumFloat,caxpyNorm2,0,1,0,0,0,false>
(make_double2(REAL(a), IMAG(a)), make_double2(0.0, 0.0), x, y, x, x, x);
}
/**
double caxpyXmayNormCuda(float a, float *x, float *y, n){}
First performs the operation y[i] = a*x[i] + y[i]
Second performs the operator x[i] -= a*z[i]
Third returns the norm of x
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct caxpyxmaznormx : public ReduceFunctor<ReduceType, Float2, FloatN> {
Float2 a;
caxpyxmaznormx(const Float2 &a, const Float2 &b) : a(a) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v)
{ Caxpy_(a, x, y); Caxpy_(-a,z,x); norm2_<ReduceType>(sum,x); }
static int streams() { return 5; } //! total number of input and output streams
static int flops() { return 10; } //! flops per element
};
double caxpyXmazNormX(const Complex &a, ColorSpinorField &x,
ColorSpinorField &y, ColorSpinorField &z) {
return reduce::reduceCuda<double,QudaSumFloat,caxpyxmaznormx,1,1,0,0,0,false>
(make_double2(REAL(a), IMAG(a)), make_double2(0.0, 0.0), x, y, z, x, x);
}
/**
double cabxpyAxNorm(float a, complex b, float *x, float *y, n){}
First performs the operation y[i] += a*b*x[i]
Second performs x[i] *= a
Third returns the norm of x
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct cabxpyaxnorm : public ReduceFunctor<ReduceType, Float2, FloatN> {
Float2 a;
Float2 b;
cabxpyaxnorm(const Float2 &a, const Float2 &b) : a(a), b(b) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v)
{ x *= a.x; Caxpy_(b, x, y); norm2_<ReduceType>(sum,y); }
static int streams() { return 4; } //! total number of input and output streams
static int flops() { return 10; } //! flops per element
};
double cabxpyAxNorm(const double &a, const Complex &b,
ColorSpinorField &x, ColorSpinorField &y) {
return reduce::reduceCuda<double,QudaSumFloat,cabxpyaxnorm,1,1,0,0,0,false>
(make_double2(a, 0.0), make_double2(REAL(b), IMAG(b)), x, y, x, x, x);
}
/**
Returns complex-valued dot product of x and y
*/
template<typename ReduceType>
__device__ __host__ void cdot_(ReduceType &sum, const double2 &a, const double2 &b) {
typedef typename ScalarType<ReduceType>::type scalar;
sum.x += (scalar)a.x*(scalar)b.x;
sum.x += (scalar)a.y*(scalar)b.y;
sum.y += (scalar)a.x*(scalar)b.y;
sum.y -= (scalar)a.y*(scalar)b.x;
}
template<typename ReduceType>
__device__ __host__ void cdot_(ReduceType &sum, const float2 &a, const float2 &b) {
typedef typename ScalarType<ReduceType>::type scalar;
sum.x += (scalar)a.x*(scalar)b.x;
sum.x += (scalar)a.y*(scalar)b.y;
sum.y += (scalar)a.x*(scalar)b.y;
sum.y -= (scalar)a.y*(scalar)b.x;
}
template<typename ReduceType>
__device__ __host__ void cdot_(ReduceType &sum, const float4 &a, const float4 &b) {
typedef typename ScalarType<ReduceType>::type scalar;
sum.x += (scalar)a.x*(scalar)b.x;
sum.x += (scalar)a.y*(scalar)b.y;
sum.x += (scalar)a.z*(scalar)b.z;
sum.x += (scalar)a.w*(scalar)b.w;
sum.y += (scalar)a.x*(scalar)b.y;
sum.y -= (scalar)a.y*(scalar)b.x;
sum.y += (scalar)a.z*(scalar)b.w;
sum.y -= (scalar)a.w*(scalar)b.z;
}
template <typename ReduceType, typename Float2, typename FloatN>
struct Cdot : public ReduceFunctor<ReduceType, Float2, FloatN> {
Cdot(const Float2 &a, const Float2 &b) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v)
{ cdot_<ReduceType>(sum,x,y); }
static int streams() { return 2; } //! total number of input and output streams
static int flops() { return 4; } //! flops per element
};
Complex cDotProduct(ColorSpinorField &x, ColorSpinorField &y) {
double2 cdot = reduce::reduceCuda<double2,QudaSumFloat2,Cdot,0,0,0,0,0,false>
(make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
return Complex(cdot.x, cdot.y);
}
void cDotProduct(Complex* result, std::vector<cudaColorSpinorField*>& x, std::vector<cudaColorSpinorField*>& y){
double2* cdot = new double2[x.size()];
switch(x.size()){
case 1:
reduce::multiReduceCuda<1,double2,QudaSumFloat2,Cdot,0,0,0,0,0,false>
(cdot, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
break;
case 2:
reduce::multiReduceCuda<2,double2,QudaSumFloat2,Cdot,0,0,0,0,0,false>
(cdot, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
break;
case 3:
reduce::multiReduceCuda<3,double2,QudaSumFloat2,Cdot,0,0,0,0,0,false>
(cdot, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
break;
case 4:
reduce::multiReduceCuda<4,double2,QudaSumFloat2,Cdot,0,0,0,0,0,false>
(cdot, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
break;
case 5:
reduce::multiReduceCuda<5,double2,QudaSumFloat2,Cdot,0,0,0,0,0,false>
(cdot, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
break;
default:
errorQuda("Unsupported vector size\n");
break;
}
for (unsigned int i=0; i<x.size(); ++i) result[i] = Complex(cdot[i].x,cdot[i].y);
delete[] cdot;
}
/**
double2 xpaycDotzyCuda(float2 *x, float a, float2 *y, float2 *z, int n) {}
First performs the operation y = x + a*y
Second returns cdot product (z,y)
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct xpaycdotzy : public ReduceFunctor<ReduceType, Float2, FloatN> {
Float2 a;
xpaycdotzy(const Float2 &a, const Float2 &b) : a(a) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v)
{ y = x + a.x*y; cdot_<ReduceType>(sum,z,y); }
static int streams() { return 4; } //! total number of input and output streams
static int flops() { return 6; } //! flops per element
};
Complex xpaycDotzy(ColorSpinorField &x, const double &a, ColorSpinorField &y, ColorSpinorField &z) {
double2 cdot = reduce::reduceCuda<double2,QudaSumFloat2,xpaycdotzy,0,1,0,0,0,false>
(make_double2(a, 0.0), make_double2(0.0, 0.0), x, y, z, x, x);
return Complex(cdot.x, cdot.y);
}
/**
double caxpyDotzyCuda(float a, float *x, float *y, float *z, n){}
First performs the operation y[i] = a*x[i] + y[i]
Second returns the dot product (z,y)
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct caxpydotzy : public ReduceFunctor<ReduceType, Float2, FloatN> {
Float2 a;
caxpydotzy(const Float2 &a, const Float2 &b) : a(a) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v)
{ Caxpy_(a, x, y); cdot_<ReduceType>(sum,z,y); }
static int streams() { return 4; } //! total number of input and output streams
static int flops() { return 8; } //! flops per element
};
Complex caxpyDotzy(const Complex &a, ColorSpinorField &x, ColorSpinorField &y, ColorSpinorField &z) {
double2 cdot = reduce::reduceCuda<double2,QudaSumFloat2,caxpydotzy,0,1,0,0,0,false>
(make_double2(REAL(a), IMAG(a)), make_double2(0.0, 0.0), x, y, z, x, x);
return Complex(cdot.x, cdot.y);
}
/**
First returns the dot product (x,y)
Returns the norm of x
*/
template<typename ReduceType, typename InputType>
__device__ __host__ void cdotNormA_(ReduceType &sum, const InputType &a, const InputType &b) {
typedef typename ScalarType<ReduceType>::type scalar;
typedef typename Vec2Type<scalar>::type vec2;
cdot_<ReduceType>(sum,a,b);
norm2_<scalar>(sum.z,a);
}
template <typename ReduceType, typename Float2, typename FloatN>
struct CdotNormA : public ReduceFunctor<ReduceType, Float2, FloatN> {
CdotNormA(const Float2 &a, const Float2 &b) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v)
{ cdotNormA_<ReduceType>(sum,x,y); }
static int streams() { return 2; } //! total number of input and output streams
static int flops() { return 6; } //! flops per element
};
double3 cDotProductNormA(ColorSpinorField &x, ColorSpinorField &y) {
return reduce::reduceCuda<double3,QudaSumFloat3,CdotNormA,0,0,0,0,0,false>
(make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
}
/**
First returns the dot product (x,y)
Returns the norm of y
*/
template<typename ReduceType, typename InputType>
__device__ __host__ void cdotNormB_(ReduceType &sum, const InputType &a, const InputType &b) {
typedef typename ScalarType<ReduceType>::type scalar;
typedef typename Vec2Type<scalar>::type vec2;
cdot_<ReduceType>(sum,a,b);
norm2_<scalar>(sum.z,b);
}
template <typename ReduceType, typename Float2, typename FloatN>
struct CdotNormB : public ReduceFunctor<ReduceType, Float2, FloatN> {
CdotNormB(const Float2 &a, const Float2 &b) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v)
{ cdotNormB_<ReduceType>(sum,x,y); }
static int streams() { return 2; } //! total number of input and output streams
static int flops() { return 6; } //! flops per element
};
double3 cDotProductNormB(ColorSpinorField &x, ColorSpinorField &y) {
return reduce::reduceCuda<double3,QudaSumFloat3,CdotNormB,0,0,0,0,0,false>
(make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
}
/**
This convoluted kernel does the following:
z += a*x + b*y, y -= b*w, norm = (y,y), dot = (u, y)
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct caxpbypzYmbwcDotProductUYNormY_ : public ReduceFunctor<ReduceType, Float2, FloatN> {
Float2 a;
Float2 b;
caxpbypzYmbwcDotProductUYNormY_(const Float2 &a, const Float2 &b) : a(a), b(b) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v) { Caxpy_(a, x, z); Caxpy_(b, y, z); Caxpy_(-b, w, y); cdotNormB_<ReduceType>(sum,v,y); }
static int streams() { return 7; } //! total number of input and output streams
static int flops() { return 18; } //! flops per element
};
double3 caxpbypzYmbwcDotProductUYNormY(const Complex &a, ColorSpinorField &x,
const Complex &b, ColorSpinorField &y,
ColorSpinorField &z, ColorSpinorField &w,
ColorSpinorField &u) {
if (x.Precision() != z.Precision()) {
return reduce::mixed::reduceCuda<double3,QudaSumFloat3,caxpbypzYmbwcDotProductUYNormY_,0,1,1,0,0,false>
(make_double2(REAL(a), IMAG(a)), make_double2(REAL(b), IMAG(b)), x, y, z, w, u);
} else {
return reduce::reduceCuda<double3,QudaSumFloat3,caxpbypzYmbwcDotProductUYNormY_,0,1,1,0,0,false>
(make_double2(REAL(a), IMAG(a)), make_double2(REAL(b), IMAG(b)), x, y, z, w, u);
}
}
/**
Specialized kernel for the modified CG norm computation for
computing beta. Computes y = y + a*x and returns norm(y) and
dot(y, delta(y)) where delta(y) is the difference between the
input and out y vector.
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct axpyCGNorm2 : public ReduceFunctor<ReduceType, Float2, FloatN> {
Float2 a;
axpyCGNorm2(const Float2 &a, const Float2 &b) : a(a) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v) {
typedef typename ScalarType<ReduceType>::type scalar;
FloatN y_new = y + a.x*x;
norm2_<scalar>(sum.x,y_new);
dot_<scalar>(sum.y,y_new,y_new-y);
y = y_new;
}
static int streams() { return 3; } //! total number of input and output streams
static int flops() { return 6; } //! flops per real element
};
Complex axpyCGNorm(const double &a, ColorSpinorField &x, ColorSpinorField &y) {
double2 cg_norm = reduce::reduceCuda<double2,QudaSumFloat2,axpyCGNorm2,0,1,0,0,0,false>
(make_double2(a, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
return Complex(cg_norm.x, cg_norm.y);
}
/**
This kernel returns (x, x) and (r,r) and also returns the so-called
heavy quark norm as used by MILC: 1 / N * \sum_i (r, r)_i / (x, x)_i, where
i is site index and N is the number of sites.
When this kernel is launched, we must enforce that the parameter M
in the launcher corresponds to the number of FloatN fields used to
represent the spinor, e.g., M=6 for Wilson and M=3 for staggered.
This is only the case for half-precision kernels by default. To
enable this, the siteUnroll template parameter must be set true
when reduceCuda is instantiated.
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct HeavyQuarkResidualNorm_ : public ReduceFunctor<ReduceType, Float2, FloatN> {
typedef typename scalar<ReduceType>::type real;
Float2 a;
Float2 b;
ReduceType aux;
HeavyQuarkResidualNorm_(const Float2 &a, const Float2 &b) : a(a), b(b), aux{ } { ; }
__device__ __host__ void pre() { aux.x = 0; aux.y = 0; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v) {
norm2_<real>(aux.x,x); norm2_<real>(aux.y,y);
}
//! sum the solution and residual norms, and compute the heavy-quark norm
__device__ __host__ void post(ReduceType &sum)
{
sum.x += aux.x; sum.y += aux.y; sum.z += (aux.x > 0.0) ? (aux.y / aux.x) : static_cast<real>(1.0);
}
static int streams() { return 2; } //! total number of input and output streams
static int flops() { return 4; } //! undercounts since it excludes the per-site division
};
double3 HeavyQuarkResidualNorm(ColorSpinorField &x, ColorSpinorField &r) {
double3 rtn = reduce::reduceCuda<double3,QudaSumFloat3,HeavyQuarkResidualNorm_,0,0,0,0,0,true>
(make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, r, r, r, r);
rtn.z /= (x.Volume()*comm_size());
return rtn;
}
/**
Variant of the HeavyQuarkResidualNorm kernel: this takes three
arguments, the first two are summed together to form the
solution, with the third being the residual vector. This removes
the need an additional xpy call in the solvers, impriving
performance.
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct xpyHeavyQuarkResidualNorm_ : public ReduceFunctor<ReduceType, Float2, FloatN> {
typedef typename scalar<ReduceType>::type real;
Float2 a;
Float2 b;
ReduceType aux;
xpyHeavyQuarkResidualNorm_(const Float2 &a, const Float2 &b) : a(a), b(b), aux{ } { ; }
__device__ __host__ void pre() { aux.x = 0; aux.y = 0; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v) {
norm2_<real>(aux.x,x + y); norm2_<real>(aux.y,z);
}
//! sum the solution and residual norms, and compute the heavy-quark norm
__device__ __host__ void post(ReduceType &sum)
{
sum.x += aux.x; sum.y += aux.y; sum.z += (aux.x > 0.0) ? (aux.y / aux.x) : static_cast<real>(1.0);
}
static int streams() { return 3; } //! total number of input and output streams
static int flops() { return 5; }
};
double3 xpyHeavyQuarkResidualNorm(ColorSpinorField &x, ColorSpinorField &y,
ColorSpinorField &r) {
double3 rtn = reduce::reduceCuda<double3,QudaSumFloat3,xpyHeavyQuarkResidualNorm_,0,0,0,0,0,true>
(make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, r, r, r);
rtn.z /= (x.Volume()*comm_size());
return rtn;
}
/**
double3 tripleCGUpdate(V x, V y, V z){}
First performs the operation norm2(x)
Second performs the operatio norm2(y)
Third performs the operation dotPropduct(y,z)
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct tripleCGReduction_ : public ReduceFunctor<ReduceType, Float2, FloatN> {
tripleCGReduction_(const Float2 &a, const Float2 &b) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v) {
typedef typename ScalarType<ReduceType>::type scalar;
norm2_<scalar>(sum.x,x); norm2_<scalar>(sum.y,y); dot_<scalar>(sum.z,y,z);
}
static int streams() { return 3; } //! total number of input and output streams
static int flops() { return 6; } //! flops per element
};
double3 tripleCGReduction(ColorSpinorField &x, ColorSpinorField &y, ColorSpinorField &z) {
return reduce::reduceCuda<double3,QudaSumFloat3,tripleCGReduction_,0,0,0,0,0,false>
(make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, z, x, x);
}
} // namespace blas
} // namespace quda
| 00ffd4f6650164afe9681ad35132a59ff30728af.cu | #include <blas_quda.h>
#include <tune_quda.h>
#include <float_vector.h>
#include <color_spinor_field_order.h>
//#define QUAD_SUM
#ifdef QUAD_SUM
#include <dbldbl.h>
#endif
#include <cub_helper.cuh>
template<typename> struct ScalarType { };
template<> struct ScalarType<double> { typedef double type; };
template<> struct ScalarType<double2> { typedef double type; };
template<> struct ScalarType<double3> { typedef double type; };
template<typename> struct Vec2Type { };
template<> struct Vec2Type<double> { typedef double2 type; };
#ifdef QUAD_SUM
#define QudaSumFloat doubledouble
#define QudaSumFloat2 doubledouble2
#define QudaSumFloat3 doubledouble3
template<> struct ScalarType<doubledouble> { typedef doubledouble type; };
template<> struct ScalarType<doubledouble2> { typedef doubledouble type; };
template<> struct ScalarType<doubledouble3> { typedef doubledouble type; };
template<> struct Vec2Type<doubledouble> { typedef doubledouble2 type; };
#else
#define QudaSumFloat double
#define QudaSumFloat2 double2
#define QudaSumFloat3 double3
#endif
#define REDUCE_MAX_BLOCKS 65536
void checkSpinor(const ColorSpinorField &a, const ColorSpinorField &b) {
if (a.Precision() != b.Precision())
errorQuda("precisions do not match: %d %d", a.Precision(), b.Precision());
if (a.Length() != b.Length())
errorQuda("lengths do not match: %lu %lu", a.Length(), b.Length());
if (a.Stride() != b.Stride())
errorQuda("strides do not match: %d %d", a.Stride(), b.Stride());
}
void checkLength(const ColorSpinorField &a, ColorSpinorField &b) { \
if (a.Length() != b.Length())
errorQuda("lengths do not match: %lu %lu", a.Length(), b.Length());
if (a.Stride() != b.Stride())
errorQuda("strides do not match: %d %d", a.Stride(), b.Stride());
}
static struct {
const char *vol_str;
const char *aux_str;
char aux_tmp[quda::TuneKey::aux_n];
} blasStrings;
// These are used for reduction kernels
static QudaSumFloat *d_reduce=0;
static QudaSumFloat *h_reduce=0;
static QudaSumFloat *hd_reduce=0;
static cudaEvent_t reduceEnd;
namespace quda {
namespace blas {
cudaStream_t* getStream();
void* getDeviceReduceBuffer() { return d_reduce; }
void* getMappedHostReduceBuffer() { return hd_reduce; }
void* getHostReduceBuffer() { return h_reduce; }
void initReduce()
{
const int MaxReduce = 12;
// reduction buffer size
size_t bytes = MaxReduce*3*REDUCE_MAX_BLOCKS*sizeof(QudaSumFloat); // Factor of N for composite reductions
if (!d_reduce) d_reduce = (QudaSumFloat *) device_malloc(bytes);
// these arrays are actually oversized currently (only needs to be QudaSumFloat3)
// if the device supports host-mapped memory then use a host-mapped array for the reduction
if (!h_reduce) {
// only use zero copy reductions when using 64-bit
#if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__)
if(deviceProp.canMapHostMemory) {
h_reduce = (QudaSumFloat *) mapped_malloc(bytes);
cudaHostGetDevicePointer(&hd_reduce, h_reduce, 0); // set the matching device pointer
} else
#endif
{
h_reduce = (QudaSumFloat *) pinned_malloc(bytes);
hd_reduce = d_reduce;
}
memset(h_reduce, 0, bytes); // added to ensure that valgrind doesn't report h_reduce is unitialised
}
cudaEventCreateWithFlags(&reduceEnd, cudaEventDisableTiming);
checkCudaError();
}
void endReduce(void)
{
if (d_reduce) {
device_free(d_reduce);
d_reduce = 0;
}
if (h_reduce) {
host_free(h_reduce);
h_reduce = 0;
}
hd_reduce = 0;
cudaEventDestroy(reduceEnd);
}
namespace reduce {
#include <texture.h>
#include <reduce_core.cuh>
#include <reduce_core.h>
#include <reduce_mixed_core.h>
#include <multi_reduce_core.h>
} // namespace reduce
/**
Base class from which all reduction functors should derive.
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct ReduceFunctor {
//! pre-computation routine called before the "M-loop"
virtual __device__ __host__ void pre() { ; }
//! where the reduction is usually computed and any auxiliary operations
virtual __device__ __host__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y,
FloatN &z, FloatN &w, FloatN &v) = 0;
//! post-computation routine called after the "M-loop"
virtual __device__ __host__ void post(ReduceType &sum) { ; }
};
/**
Return the L1 norm of x
*/
template<typename ReduceType> __device__ __host__ ReduceType norm1_(const double2 &a) {
return (ReduceType)fabs(a.x) + (ReduceType)fabs(a.y);
}
template<typename ReduceType> __device__ __host__ ReduceType norm1_(const float2 &a) {
return (ReduceType)fabs(a.x) + (ReduceType)fabs(a.y);
}
template<typename ReduceType> __device__ __host__ ReduceType norm1_(const float4 &a) {
return (ReduceType)fabs(a.x) + (ReduceType)fabs(a.y) + (ReduceType)fabs(a.z) + (ReduceType)fabs(a.w);
}
template <typename ReduceType, typename Float2, typename FloatN>
struct Norm1 : public ReduceFunctor<ReduceType, Float2, FloatN> {
Norm1(const Float2 &a, const Float2 &b) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z,FloatN &w, FloatN &v)
{ sum += norm1_<ReduceType>(x); }
static int streams() { return 1; } //! total number of input and output streams
static int flops() { return 2; } //! flops per element
};
double norm1(const ColorSpinorField &x) {
#ifdef HOST_DEBUG
ColorSpinorField &y = const_cast<ColorSpinorField&>(x); // FIXME
return reduce::reduceCuda<double,QudaSumFloat,Norm1,0,0,0,0,0,false>
(make_double2(0.0, 0.0), make_double2(0.0, 0.0), y, y, y, y, y);
#else
errorQuda("L1 norm kernel only built when HOST_DEBUG is enabled");
return 0.0;
#endif
}
/**
Return the L2 norm of x
*/
template<typename ReduceType> __device__ __host__ void norm2_(ReduceType &sum, const double2 &a) {
sum += (ReduceType)a.x*(ReduceType)a.x;
sum += (ReduceType)a.y*(ReduceType)a.y;
}
template<typename ReduceType> __device__ __host__ void norm2_(ReduceType &sum, const float2 &a) {
sum += (ReduceType)a.x*(ReduceType)a.x;
sum += (ReduceType)a.y*(ReduceType)a.y;
}
template<typename ReduceType> __device__ __host__ void norm2_(ReduceType &sum, const float4 &a) {
sum += (ReduceType)a.x*(ReduceType)a.x;
sum += (ReduceType)a.y*(ReduceType)a.y;
sum += (ReduceType)a.z*(ReduceType)a.z;
sum += (ReduceType)a.w*(ReduceType)a.w;
}
template <typename ReduceType, typename Float2, typename FloatN>
struct Norm2 : public ReduceFunctor<ReduceType, Float2, FloatN> {
Norm2(const Float2 &a, const Float2 &b) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z,FloatN &w, FloatN &v)
{ norm2_<ReduceType>(sum,x); }
static int streams() { return 1; } //! total number of input and output streams
static int flops() { return 2; } //! flops per element
};
double norm2(const ColorSpinorField &x) {
ColorSpinorField &y = const_cast<ColorSpinorField&>(x);
return reduce::reduceCuda<double,QudaSumFloat,Norm2,0,0,0,0,0,false>
(make_double2(0.0, 0.0), make_double2(0.0, 0.0), y, y, y, y, y);
}
/**
Return the real dot product of x and y
*/
template<typename ReduceType> __device__ __host__ void dot_(ReduceType &sum, const double2 &a, const double2 &b) {
sum += (ReduceType)a.x*(ReduceType)b.x;
sum += (ReduceType)a.y*(ReduceType)b.y;
}
template<typename ReduceType> __device__ __host__ void dot_(ReduceType &sum, const float2 &a, const float2 &b) {
sum += (ReduceType)a.x*(ReduceType)b.x;
sum += (ReduceType)a.y*(ReduceType)b.y;
}
template<typename ReduceType> __device__ __host__ void dot_(ReduceType &sum, const float4 &a, const float4 &b) {
sum += (ReduceType)a.x*(ReduceType)b.x;
sum += (ReduceType)a.y*(ReduceType)b.y;
sum += (ReduceType)a.z*(ReduceType)b.z;
sum += (ReduceType)a.w*(ReduceType)b.w;
}
template <typename ReduceType, typename Float2, typename FloatN>
struct Dot : public ReduceFunctor<ReduceType, Float2, FloatN> {
Dot(const Float2 &a, const Float2 &b) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v)
{ dot_<ReduceType>(sum,x,y); }
static int streams() { return 2; } //! total number of input and output streams
static int flops() { return 2; } //! flops per element
};
double reDotProduct(ColorSpinorField &x, ColorSpinorField &y) {
return reduce::reduceCuda<double,QudaSumFloat,Dot,0,0,0,0,0,false>
(make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
}
void reDotProduct(double* result, std::vector<cudaColorSpinorField*>& x, std::vector<cudaColorSpinorField*>& y){
#ifndef SSTEP
errorQuda("S-step code not built\n");
#else
switch(x.size()){
case 1:
reduce::multiReduceCuda<1,double,QudaSumFloat,Dot,0,0,0,0,0,false>
(result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
break;
case 2:
reduce::multiReduceCuda<2,double,QudaSumFloat,Dot,0,0,0,0,0,false>
(result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
break;
case 3:
reduce::multiReduceCuda<3,double,QudaSumFloat,Dot,0,0,0,0,0,false>
(result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
break;
case 4:
reduce::multiReduceCuda<4,double,QudaSumFloat,Dot,0,0,0,0,0,false>
(result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
break;
case 5:
reduce::multiReduceCuda<5,double,QudaSumFloat,Dot,0,0,0,0,0,false>
(result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
break;
case 6:
reduce::multiReduceCuda<6,double,QudaSumFloat,Dot,0,0,0,0,0,false>
(result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
break;
case 7:
reduce::multiReduceCuda<7,double,QudaSumFloat,Dot,0,0,0,0,0,false>
(result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
break;
case 8:
reduce::multiReduceCuda<8,double,QudaSumFloat,Dot,0,0,0,0,0,false>
(result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
break;
case 9:
reduce::multiReduceCuda<9,double,QudaSumFloat,Dot,0,0,0,0,0,false>
(result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
break;
case 10:
reduce::multiReduceCuda<10,double,QudaSumFloat,Dot,0,0,0,0,0,false>
(result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
break;
case 11:
reduce::multiReduceCuda<11,double,QudaSumFloat,Dot,0,0,0,0,0,false>
(result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
break;
case 12:
reduce::multiReduceCuda<12,double,QudaSumFloat,Dot,0,0,0,0,0,false>
(result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
break;
case 13:
reduce::multiReduceCuda<13,double,QudaSumFloat,Dot,0,0,0,0,0,false>
(result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
break;
case 14:
reduce::multiReduceCuda<14,double,QudaSumFloat,Dot,0,0,0,0,0,false>
(result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
break;
case 15:
reduce::multiReduceCuda<15,double,QudaSumFloat,Dot,0,0,0,0,0,false>
(result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
break;
case 16:
reduce::multiReduceCuda<16,double,QudaSumFloat,Dot,0,0,0,0,0,false>
(result, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
break;
default:
errorQuda("Unsupported vector size");
break;
}
#endif // SSTEP
}
/**
* Returns the real component of the dot product of a and b and
* the norm of a
*/
template<typename ReduceType, typename InputType>
__device__ __host__ ReduceType dotNormA_(const InputType &a, const InputType &b) {
typedef typename ScalarType<ReduceType>::type scalar;
ReduceType c;
dot_<scalar>(c.x,a,b);
norm2_<scalar>(c.y,a);
return c;
}
template <typename ReduceType, typename Float2, typename FloatN>
struct DotNormA : public ReduceFunctor<ReduceType, Float2, FloatN> {
DotNormA(const Float2 &a, const Float2 &b){}
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v)
{sum += dotNormA_<ReduceType,FloatN>(x,y);}
static int streams() { return 2; }
static int flops() { return 4; }
};
double2 reDotProductNormA(ColorSpinorField &x,ColorSpinorField &y){
return reduce::reduceCuda<double2,QudaSumFloat2,DotNormA,0,0,0,0,0,false>
(make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
}
/**
First performs the operation y[i] += a*x[i]
Return the norm of y
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct axpyNorm2 : public ReduceFunctor<ReduceType, Float2, FloatN> {
Float2 a;
axpyNorm2(const Float2 &a, const Float2 &b) : a(a) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v) {
y += a.x*x; norm2_<ReduceType>(sum,y); }
static int streams() { return 3; } //! total number of input and output streams
static int flops() { return 4; } //! flops per element
};
double axpyNorm(const double &a, ColorSpinorField &x, ColorSpinorField &y) {
return reduce::reduceCuda<double,QudaSumFloat,axpyNorm2,0,1,0,0,0,false>
(make_double2(a, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
}
/**
First performs the operation y[i] += a*x[i]
Return real dot product (x,y)
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct AxpyReDot : public ReduceFunctor<ReduceType, Float2, FloatN> {
Float2 a;
AxpyReDot(const Float2 &a, const Float2 &b) : a(a) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v) {
y += a.x*x; dot_<ReduceType>(sum,x,y); }
static int streams() { return 3; } //! total number of input and output streams
static int flops() { return 4; } //! flops per element
};
double axpyReDot(const double &a, ColorSpinorField &x, ColorSpinorField &y) {
return reduce::reduceCuda<double,QudaSumFloat,AxpyReDot,0,1,0,0,0,false>
(make_double2(a, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
}
/**
First performs the operation y[i] = x[i] - y[i]
Second returns the norm of y
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct xmyNorm2 : public ReduceFunctor<ReduceType, Float2, FloatN> {
xmyNorm2(const Float2 &a, const Float2 &b) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v) {
y = x - y; norm2_<ReduceType>(sum,y); }
static int streams() { return 3; } //! total number of input and output streams
static int flops() { return 3; } //! flops per element
};
double xmyNorm(ColorSpinorField &x, ColorSpinorField &y) {
return reduce::reduceCuda<double,QudaSumFloat,xmyNorm2,0,1,0,0,0,false>
(make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
}
/**
Functor to perform the operation y += a * x (complex-valued)
*/
__device__ __host__ void Caxpy_(const double2 &a, const double2 &x, double2 &y) {
y.x += a.x*x.x; y.x -= a.y*x.y;
y.y += a.y*x.x; y.y += a.x*x.y;
}
__device__ __host__ void Caxpy_(const float2 &a, const float2 &x, float2 &y) {
y.x += a.x*x.x; y.x -= a.y*x.y;
y.y += a.y*x.x; y.y += a.x*x.y;
}
__device__ __host__ void Caxpy_(const float2 &a, const float4 &x, float4 &y) {
y.x += a.x*x.x; y.x -= a.y*x.y;
y.y += a.y*x.x; y.y += a.x*x.y;
y.z += a.x*x.z; y.z -= a.y*x.w;
y.w += a.y*x.z; y.w += a.x*x.w;
}
/**
First performs the operation y[i] = a*x[i] + y[i] (complex-valued)
Second returns the norm of y
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct caxpyNorm2 : public ReduceFunctor<ReduceType, Float2, FloatN> {
Float2 a;
caxpyNorm2(const Float2 &a, const Float2 &b) : a(a) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v) {
Caxpy_(a, x, y); norm2_<ReduceType>(sum,y); }
static int streams() { return 3; } //! total number of input and output streams
static int flops() { return 6; } //! flops per element
};
double caxpyNorm(const Complex &a, ColorSpinorField &x, ColorSpinorField &y) {
return reduce::reduceCuda<double,QudaSumFloat,caxpyNorm2,0,1,0,0,0,false>
(make_double2(REAL(a), IMAG(a)), make_double2(0.0, 0.0), x, y, x, x, x);
}
/**
double caxpyXmayNormCuda(float a, float *x, float *y, n){}
First performs the operation y[i] = a*x[i] + y[i]
Second performs the operator x[i] -= a*z[i]
Third returns the norm of x
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct caxpyxmaznormx : public ReduceFunctor<ReduceType, Float2, FloatN> {
Float2 a;
caxpyxmaznormx(const Float2 &a, const Float2 &b) : a(a) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v)
{ Caxpy_(a, x, y); Caxpy_(-a,z,x); norm2_<ReduceType>(sum,x); }
static int streams() { return 5; } //! total number of input and output streams
static int flops() { return 10; } //! flops per element
};
double caxpyXmazNormX(const Complex &a, ColorSpinorField &x,
ColorSpinorField &y, ColorSpinorField &z) {
return reduce::reduceCuda<double,QudaSumFloat,caxpyxmaznormx,1,1,0,0,0,false>
(make_double2(REAL(a), IMAG(a)), make_double2(0.0, 0.0), x, y, z, x, x);
}
/**
double cabxpyAxNorm(float a, complex b, float *x, float *y, n){}
First performs the operation y[i] += a*b*x[i]
Second performs x[i] *= a
Third returns the norm of x
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct cabxpyaxnorm : public ReduceFunctor<ReduceType, Float2, FloatN> {
Float2 a;
Float2 b;
cabxpyaxnorm(const Float2 &a, const Float2 &b) : a(a), b(b) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v)
{ x *= a.x; Caxpy_(b, x, y); norm2_<ReduceType>(sum,y); }
static int streams() { return 4; } //! total number of input and output streams
static int flops() { return 10; } //! flops per element
};
double cabxpyAxNorm(const double &a, const Complex &b,
ColorSpinorField &x, ColorSpinorField &y) {
return reduce::reduceCuda<double,QudaSumFloat,cabxpyaxnorm,1,1,0,0,0,false>
(make_double2(a, 0.0), make_double2(REAL(b), IMAG(b)), x, y, x, x, x);
}
/**
Returns complex-valued dot product of x and y
*/
template<typename ReduceType>
__device__ __host__ void cdot_(ReduceType &sum, const double2 &a, const double2 &b) {
typedef typename ScalarType<ReduceType>::type scalar;
sum.x += (scalar)a.x*(scalar)b.x;
sum.x += (scalar)a.y*(scalar)b.y;
sum.y += (scalar)a.x*(scalar)b.y;
sum.y -= (scalar)a.y*(scalar)b.x;
}
template<typename ReduceType>
__device__ __host__ void cdot_(ReduceType &sum, const float2 &a, const float2 &b) {
typedef typename ScalarType<ReduceType>::type scalar;
sum.x += (scalar)a.x*(scalar)b.x;
sum.x += (scalar)a.y*(scalar)b.y;
sum.y += (scalar)a.x*(scalar)b.y;
sum.y -= (scalar)a.y*(scalar)b.x;
}
template<typename ReduceType>
__device__ __host__ void cdot_(ReduceType &sum, const float4 &a, const float4 &b) {
typedef typename ScalarType<ReduceType>::type scalar;
sum.x += (scalar)a.x*(scalar)b.x;
sum.x += (scalar)a.y*(scalar)b.y;
sum.x += (scalar)a.z*(scalar)b.z;
sum.x += (scalar)a.w*(scalar)b.w;
sum.y += (scalar)a.x*(scalar)b.y;
sum.y -= (scalar)a.y*(scalar)b.x;
sum.y += (scalar)a.z*(scalar)b.w;
sum.y -= (scalar)a.w*(scalar)b.z;
}
template <typename ReduceType, typename Float2, typename FloatN>
struct Cdot : public ReduceFunctor<ReduceType, Float2, FloatN> {
Cdot(const Float2 &a, const Float2 &b) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v)
{ cdot_<ReduceType>(sum,x,y); }
static int streams() { return 2; } //! total number of input and output streams
static int flops() { return 4; } //! flops per element
};
Complex cDotProduct(ColorSpinorField &x, ColorSpinorField &y) {
double2 cdot = reduce::reduceCuda<double2,QudaSumFloat2,Cdot,0,0,0,0,0,false>
(make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
return Complex(cdot.x, cdot.y);
}
void cDotProduct(Complex* result, std::vector<cudaColorSpinorField*>& x, std::vector<cudaColorSpinorField*>& y){
double2* cdot = new double2[x.size()];
switch(x.size()){
case 1:
reduce::multiReduceCuda<1,double2,QudaSumFloat2,Cdot,0,0,0,0,0,false>
(cdot, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
break;
case 2:
reduce::multiReduceCuda<2,double2,QudaSumFloat2,Cdot,0,0,0,0,0,false>
(cdot, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
break;
case 3:
reduce::multiReduceCuda<3,double2,QudaSumFloat2,Cdot,0,0,0,0,0,false>
(cdot, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
break;
case 4:
reduce::multiReduceCuda<4,double2,QudaSumFloat2,Cdot,0,0,0,0,0,false>
(cdot, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
break;
case 5:
reduce::multiReduceCuda<5,double2,QudaSumFloat2,Cdot,0,0,0,0,0,false>
(cdot, make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
break;
default:
errorQuda("Unsupported vector size\n");
break;
}
for (unsigned int i=0; i<x.size(); ++i) result[i] = Complex(cdot[i].x,cdot[i].y);
delete[] cdot;
}
/**
double2 xpaycDotzyCuda(float2 *x, float a, float2 *y, float2 *z, int n) {}
First performs the operation y = x + a*y
Second returns cdot product (z,y)
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct xpaycdotzy : public ReduceFunctor<ReduceType, Float2, FloatN> {
Float2 a;
xpaycdotzy(const Float2 &a, const Float2 &b) : a(a) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v)
{ y = x + a.x*y; cdot_<ReduceType>(sum,z,y); }
static int streams() { return 4; } //! total number of input and output streams
static int flops() { return 6; } //! flops per element
};
Complex xpaycDotzy(ColorSpinorField &x, const double &a, ColorSpinorField &y, ColorSpinorField &z) {
double2 cdot = reduce::reduceCuda<double2,QudaSumFloat2,xpaycdotzy,0,1,0,0,0,false>
(make_double2(a, 0.0), make_double2(0.0, 0.0), x, y, z, x, x);
return Complex(cdot.x, cdot.y);
}
/**
double caxpyDotzyCuda(float a, float *x, float *y, float *z, n){}
First performs the operation y[i] = a*x[i] + y[i]
Second returns the dot product (z,y)
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct caxpydotzy : public ReduceFunctor<ReduceType, Float2, FloatN> {
Float2 a;
caxpydotzy(const Float2 &a, const Float2 &b) : a(a) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v)
{ Caxpy_(a, x, y); cdot_<ReduceType>(sum,z,y); }
static int streams() { return 4; } //! total number of input and output streams
static int flops() { return 8; } //! flops per element
};
Complex caxpyDotzy(const Complex &a, ColorSpinorField &x, ColorSpinorField &y, ColorSpinorField &z) {
double2 cdot = reduce::reduceCuda<double2,QudaSumFloat2,caxpydotzy,0,1,0,0,0,false>
(make_double2(REAL(a), IMAG(a)), make_double2(0.0, 0.0), x, y, z, x, x);
return Complex(cdot.x, cdot.y);
}
/**
First returns the dot product (x,y)
Returns the norm of x
*/
template<typename ReduceType, typename InputType>
__device__ __host__ void cdotNormA_(ReduceType &sum, const InputType &a, const InputType &b) {
typedef typename ScalarType<ReduceType>::type scalar;
typedef typename Vec2Type<scalar>::type vec2;
cdot_<ReduceType>(sum,a,b);
norm2_<scalar>(sum.z,a);
}
template <typename ReduceType, typename Float2, typename FloatN>
struct CdotNormA : public ReduceFunctor<ReduceType, Float2, FloatN> {
CdotNormA(const Float2 &a, const Float2 &b) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v)
{ cdotNormA_<ReduceType>(sum,x,y); }
static int streams() { return 2; } //! total number of input and output streams
static int flops() { return 6; } //! flops per element
};
double3 cDotProductNormA(ColorSpinorField &x, ColorSpinorField &y) {
return reduce::reduceCuda<double3,QudaSumFloat3,CdotNormA,0,0,0,0,0,false>
(make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
}
/**
First returns the dot product (x,y)
Returns the norm of y
*/
template<typename ReduceType, typename InputType>
__device__ __host__ void cdotNormB_(ReduceType &sum, const InputType &a, const InputType &b) {
typedef typename ScalarType<ReduceType>::type scalar;
typedef typename Vec2Type<scalar>::type vec2;
cdot_<ReduceType>(sum,a,b);
norm2_<scalar>(sum.z,b);
}
template <typename ReduceType, typename Float2, typename FloatN>
struct CdotNormB : public ReduceFunctor<ReduceType, Float2, FloatN> {
CdotNormB(const Float2 &a, const Float2 &b) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v)
{ cdotNormB_<ReduceType>(sum,x,y); }
static int streams() { return 2; } //! total number of input and output streams
static int flops() { return 6; } //! flops per element
};
double3 cDotProductNormB(ColorSpinorField &x, ColorSpinorField &y) {
return reduce::reduceCuda<double3,QudaSumFloat3,CdotNormB,0,0,0,0,0,false>
(make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
}
/**
This convoluted kernel does the following:
z += a*x + b*y, y -= b*w, norm = (y,y), dot = (u, y)
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct caxpbypzYmbwcDotProductUYNormY_ : public ReduceFunctor<ReduceType, Float2, FloatN> {
Float2 a;
Float2 b;
caxpbypzYmbwcDotProductUYNormY_(const Float2 &a, const Float2 &b) : a(a), b(b) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v) { Caxpy_(a, x, z); Caxpy_(b, y, z); Caxpy_(-b, w, y); cdotNormB_<ReduceType>(sum,v,y); }
static int streams() { return 7; } //! total number of input and output streams
static int flops() { return 18; } //! flops per element
};
double3 caxpbypzYmbwcDotProductUYNormY(const Complex &a, ColorSpinorField &x,
const Complex &b, ColorSpinorField &y,
ColorSpinorField &z, ColorSpinorField &w,
ColorSpinorField &u) {
if (x.Precision() != z.Precision()) {
return reduce::mixed::reduceCuda<double3,QudaSumFloat3,caxpbypzYmbwcDotProductUYNormY_,0,1,1,0,0,false>
(make_double2(REAL(a), IMAG(a)), make_double2(REAL(b), IMAG(b)), x, y, z, w, u);
} else {
return reduce::reduceCuda<double3,QudaSumFloat3,caxpbypzYmbwcDotProductUYNormY_,0,1,1,0,0,false>
(make_double2(REAL(a), IMAG(a)), make_double2(REAL(b), IMAG(b)), x, y, z, w, u);
}
}
/**
Specialized kernel for the modified CG norm computation for
computing beta. Computes y = y + a*x and returns norm(y) and
dot(y, delta(y)) where delta(y) is the difference between the
input and out y vector.
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct axpyCGNorm2 : public ReduceFunctor<ReduceType, Float2, FloatN> {
Float2 a;
axpyCGNorm2(const Float2 &a, const Float2 &b) : a(a) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v) {
typedef typename ScalarType<ReduceType>::type scalar;
FloatN y_new = y + a.x*x;
norm2_<scalar>(sum.x,y_new);
dot_<scalar>(sum.y,y_new,y_new-y);
y = y_new;
}
static int streams() { return 3; } //! total number of input and output streams
static int flops() { return 6; } //! flops per real element
};
Complex axpyCGNorm(const double &a, ColorSpinorField &x, ColorSpinorField &y) {
double2 cg_norm = reduce::reduceCuda<double2,QudaSumFloat2,axpyCGNorm2,0,1,0,0,0,false>
(make_double2(a, 0.0), make_double2(0.0, 0.0), x, y, x, x, x);
return Complex(cg_norm.x, cg_norm.y);
}
/**
This kernel returns (x, x) and (r,r) and also returns the so-called
heavy quark norm as used by MILC: 1 / N * \sum_i (r, r)_i / (x, x)_i, where
i is site index and N is the number of sites.
When this kernel is launched, we must enforce that the parameter M
in the launcher corresponds to the number of FloatN fields used to
represent the spinor, e.g., M=6 for Wilson and M=3 for staggered.
This is only the case for half-precision kernels by default. To
enable this, the siteUnroll template parameter must be set true
when reduceCuda is instantiated.
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct HeavyQuarkResidualNorm_ : public ReduceFunctor<ReduceType, Float2, FloatN> {
typedef typename scalar<ReduceType>::type real;
Float2 a;
Float2 b;
ReduceType aux;
HeavyQuarkResidualNorm_(const Float2 &a, const Float2 &b) : a(a), b(b), aux{ } { ; }
__device__ __host__ void pre() { aux.x = 0; aux.y = 0; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v) {
norm2_<real>(aux.x,x); norm2_<real>(aux.y,y);
}
//! sum the solution and residual norms, and compute the heavy-quark norm
__device__ __host__ void post(ReduceType &sum)
{
sum.x += aux.x; sum.y += aux.y; sum.z += (aux.x > 0.0) ? (aux.y / aux.x) : static_cast<real>(1.0);
}
static int streams() { return 2; } //! total number of input and output streams
static int flops() { return 4; } //! undercounts since it excludes the per-site division
};
double3 HeavyQuarkResidualNorm(ColorSpinorField &x, ColorSpinorField &r) {
double3 rtn = reduce::reduceCuda<double3,QudaSumFloat3,HeavyQuarkResidualNorm_,0,0,0,0,0,true>
(make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, r, r, r, r);
rtn.z /= (x.Volume()*comm_size());
return rtn;
}
/**
Variant of the HeavyQuarkResidualNorm kernel: this takes three
arguments, the first two are summed together to form the
solution, with the third being the residual vector. This removes
the need an additional xpy call in the solvers, impriving
performance.
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct xpyHeavyQuarkResidualNorm_ : public ReduceFunctor<ReduceType, Float2, FloatN> {
typedef typename scalar<ReduceType>::type real;
Float2 a;
Float2 b;
ReduceType aux;
xpyHeavyQuarkResidualNorm_(const Float2 &a, const Float2 &b) : a(a), b(b), aux{ } { ; }
__device__ __host__ void pre() { aux.x = 0; aux.y = 0; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v) {
norm2_<real>(aux.x,x + y); norm2_<real>(aux.y,z);
}
//! sum the solution and residual norms, and compute the heavy-quark norm
__device__ __host__ void post(ReduceType &sum)
{
sum.x += aux.x; sum.y += aux.y; sum.z += (aux.x > 0.0) ? (aux.y / aux.x) : static_cast<real>(1.0);
}
static int streams() { return 3; } //! total number of input and output streams
static int flops() { return 5; }
};
double3 xpyHeavyQuarkResidualNorm(ColorSpinorField &x, ColorSpinorField &y,
ColorSpinorField &r) {
double3 rtn = reduce::reduceCuda<double3,QudaSumFloat3,xpyHeavyQuarkResidualNorm_,0,0,0,0,0,true>
(make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, r, r, r);
rtn.z /= (x.Volume()*comm_size());
return rtn;
}
/**
double3 tripleCGUpdate(V x, V y, V z){}
First performs the operation norm2(x)
Second performs the operatio norm2(y)
Third performs the operation dotPropduct(y,z)
*/
template <typename ReduceType, typename Float2, typename FloatN>
struct tripleCGReduction_ : public ReduceFunctor<ReduceType, Float2, FloatN> {
tripleCGReduction_(const Float2 &a, const Float2 &b) { ; }
__device__ __host__ void operator()(ReduceType &sum, FloatN &x, FloatN &y, FloatN &z, FloatN &w, FloatN &v) {
typedef typename ScalarType<ReduceType>::type scalar;
norm2_<scalar>(sum.x,x); norm2_<scalar>(sum.y,y); dot_<scalar>(sum.z,y,z);
}
static int streams() { return 3; } //! total number of input and output streams
static int flops() { return 6; } //! flops per element
};
double3 tripleCGReduction(ColorSpinorField &x, ColorSpinorField &y, ColorSpinorField &z) {
return reduce::reduceCuda<double3,QudaSumFloat3,tripleCGReduction_,0,0,0,0,0,false>
(make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, z, x, x);
}
} // namespace blas
} // namespace quda
|
30dba12bd83eae7ccc2c14a7a3d1d70f125b059f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include <cfloat>
#include <string>
#include <vector>
#ifdef __NVCC__
#include "hipcub/hipcub.hpp"
#endif
#ifdef __HIPCC__
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#endif
#include "paddle/fluid/framework/data_layout.h"
#include "paddle/fluid/operators/batch_norm_op.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/norm_utils.cu.h"
#include "paddle/fluid/platform/float16.h"
DECLARE_bool(cudnn_batchnorm_spatial_persistent);
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using DataLayout = framework::DataLayout;
template <typename T>
using CudnnDataType = platform::CudnnDataType<T>;
template <typename T>
using BatchNormParamType = typename CudnnDataType<T>::BatchNormParamType;
template <typename T, framework::DataLayout layout>
static __global__ void BNForwardInference(
const T *x, const BatchNormParamType<T> *mean,
const BatchNormParamType<T> *variance, const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *bias, const int C, const int N, const int HxW,
const double epsilon, T *y) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int num = N * C * HxW;
for (int i = gid; i < num; i += stride) {
const int c = layout == framework::DataLayout::kNCHW ? i / HxW % C : i % C;
BatchNormParamType<T> x_sub_mean =
static_cast<BatchNormParamType<T>>(x[i]) - mean[c];
BatchNormParamType<T> inv_var = 1 / sqrt(variance[c] + epsilon);
y[i] = static_cast<T>(scale[c] * x_sub_mean * inv_var + bias[c]);
}
}
template <typename T, int BlockDim, framework::DataLayout layout>
static __global__ LAUNCH_BOUNDS(BlockDim) void BNForwardTraining(
const T *x, const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *bias, const int C, const int N, const int HxW,
const double epsilon, double exponentialAverageFactor, T *y,
BatchNormParamType<T> *mean, BatchNormParamType<T> *variance,
BatchNormParamType<T> *save_mean,
BatchNormParamType<T> *save_inv_variance) {
int outer_size = C;
int inner_size = N * HxW;
typedef hipcub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce;
__shared__ typename BlockReduce::TempStorage mean_storage;
__shared__ typename BlockReduce::TempStorage variance_storeage;
__shared__ BatchNormParamType<T> mean_val;
__shared__ BatchNormParamType<T> variance_val;
__shared__ BatchNormParamType<T> inv_var_val;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
BatchNormParamType<T> x_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> x_square_sum = static_cast<BatchNormParamType<T>>(0);
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == framework::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
BatchNormParamType<T> x_i = static_cast<BatchNormParamType<T>>(x[index]);
x_sum += x_i;
x_square_sum += x_i * x_i;
}
x_sum = BlockReduce(mean_storage).Reduce(x_sum, hipcub::Sum());
x_square_sum =
BlockReduce(variance_storeage).Reduce(x_square_sum, hipcub::Sum());
if (threadIdx.x == 0) {
mean_val = x_sum / inner_size;
variance_val = x_square_sum / inner_size - mean_val * mean_val;
inv_var_val = 1 / sqrt(variance_val + epsilon);
if (save_mean && save_inv_variance) {
save_mean[i] = mean_val;
save_inv_variance[i] = inv_var_val;
}
mean[i] = (1 - exponentialAverageFactor) * mean_val +
exponentialAverageFactor * mean[i];
variance[i] = (1 - exponentialAverageFactor) * variance_val +
exponentialAverageFactor * variance[i];
}
__syncthreads();
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == framework::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
BatchNormParamType<T> x_sub_mean =
static_cast<BatchNormParamType<T>>(x[index]) - mean_val;
y[index] = scale[i] * x_sub_mean * inv_var_val + bias[i];
}
}
}
template <typename T>
class BatchNormKernel<platform::CUDADeviceContext, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(ctx.GetPlace()), true,
platform::errors::InvalidArgument("It must use CUDAPlace."));
double epsilon = static_cast<double>(ctx.Attr<float>("epsilon"));
float momentum = ctx.Attr<float>("momentum");
const bool is_test = ctx.Attr<bool>("is_test");
const bool use_global_stats = ctx.Attr<bool>("use_global_stats");
const bool trainable_stats = ctx.Attr<bool>("trainable_statistics");
const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
const DataLayout data_layout =
framework::StringToDataLayout(data_layout_str);
bool test_mode = is_test && (!trainable_stats);
// Get the size for each dimension.
// NCHW [batch_size, in_channels, in_height, in_width]
const auto *x = ctx.Input<Tensor>("X");
const auto &x_dims = x->dims();
PADDLE_ENFORCE_EQ(
x_dims.size() >= 2 && x_dims.size() <= 5, true,
platform::errors::InvalidArgument(
"The size of input's dimensions should be between 2 and 5"
"But received: the size of input's dimensions is [%d]",
x_dims.size()));
auto *y = ctx.Output<Tensor>("Y");
y->mutable_data<T>(ctx.GetPlace());
int N, C, H, W, D;
ExtractNCWHD(x_dims, data_layout, &N, &C, &H, &W, &D);
auto dtype = platform::CudnnDataType<T>::type;
#ifdef PADDLE_WITH_HIP
auto compute_format = data_layout == DataLayout::kNHWC ? DataLayout::kNHWC
: DataLayout::kNCHW;
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// HIP do not support compute format of NHWC
// auto compute_format = DataLayout::kNCHW;
#else
const bool fast_nhwc_batch_norm =
test_mode ||
(dtype == CUDNN_DATA_HALF && FLAGS_cudnn_batchnorm_spatial_persistent);
auto compute_format =
fast_nhwc_batch_norm && data_layout == DataLayout::kNHWC
? DataLayout::kNHWC
: DataLayout::kNCHW;
#endif
Tensor transformed_x(x->type());
Tensor transformed_y(y->type());
if (data_layout == DataLayout::kNHWC &&
compute_format == DataLayout::kNCHW && x_dims.size() > 2) {
VLOG(3) << "Transform input tensor from NHWC to NCHW.";
ResizeToChannelFirst<platform::CUDADeviceContext, T>(ctx, x,
&transformed_x);
TransToChannelFirst<platform::CUDADeviceContext, T>(ctx, x,
&transformed_x);
ResizeToChannelFirst<platform::CUDADeviceContext, T>(ctx, y,
&transformed_y);
} else {
transformed_x.ShareDataWith(*x);
transformed_y.ShareDataWith(*y);
}
// ------------------- cudnn descriptors ---------------------
#ifdef PADDLE_WITH_HIP
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// miopenTensorDescriptor_t data_desc_;
// miopenTensorDescriptor_t bn_param_desc_;
// miopenBatchNormMode_t mode_;
// PADDLE_ENFORCE_CUDA_SUCCESS(
// platform::dynload::miopenCreateTensorDescriptor(&data_desc_));
// PADDLE_ENFORCE_CUDA_SUCCESS(
// platform::dynload::miopenCreateTensorDescriptor(&bn_param_desc_));
#else
cudnnTensorDescriptor_t data_desc_;
cudnnTensorDescriptor_t bn_param_desc_;
cudnnBatchNormMode_t mode_;
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnCreateTensorDescriptor(&data_desc_));
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnCreateTensorDescriptor(&bn_param_desc_));
#endif
if (epsilon <= CUDNN_BN_MIN_EPSILON - FLT_EPSILON) {
LOG(ERROR) << "Provided epsilon is smaller than "
<< "CUDNN_BN_MIN_EPSILON. Setting it to "
<< "CUDNN_BN_MIN_EPSILON instead.";
}
epsilon = ::max(epsilon, CUDNN_BN_MIN_EPSILON);
#ifdef PADDLE_WITH_HIP
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// mode_ = miopenBNSpatial;
#elif CUDNN_VERSION_MIN(7, 0, 1)
if (FLAGS_cudnn_batchnorm_spatial_persistent) {
mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT;
} else {
mode_ = CUDNN_BATCHNORM_SPATIAL;
}
#else
mode_ = CUDNN_BATCHNORM_SPATIAL;
#endif // CUDNN_VERSION_MIN(7, 0, 1)
VLOG(3) << "Setting descriptors.";
std::vector<int> dims;
std::vector<int> strides;
if (compute_format == DataLayout::kNCHW) {
dims = {N, C, H, W, D};
strides = {C * H * W * D, H * W * D, W * D, D, 1};
} else {
dims = {N, C, H, W, D};
strides = {H * W * D * C, 1, W * D * C, D * C, C};
}
#ifdef PADDLE_WITH_HIP
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenSetTensorDescriptor(
// data_desc_, CudnnDataType<T>::type,
// x_dims.size() > 3 ? x_dims.size() : 4, const_cast<int *>(dims.data()),
// const_cast<int *>(strides.data())));
// Note: PERSISTENT not implemented for inference
// PADDLE_ENFORCE_CUDA_SUCCESS(
// platform::dynload::miopenDeriveBNTensorDescriptor(
// bn_param_desc_, data_desc_, test_mode ? miopenBNSpatial : mode_));
#else
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor(
data_desc_, CudnnDataType<T>::type,
x_dims.size() > 3 ? x_dims.size() : 4, dims.data(), strides.data()));
// Note: PERSISTENT not implemented for inference
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnDeriveBNTensorDescriptor(
bn_param_desc_, data_desc_,
test_mode ? CUDNN_BATCHNORM_SPATIAL : mode_));
#endif
const auto *scale = ctx.Input<Tensor>("Scale");
const auto *bias = ctx.Input<Tensor>("Bias");
auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
auto handle = dev_ctx.cudnn_handle();
// Now, depending on whether we are running test or not, we have two paths.
// It is training mode when it's not reference AND not using pre-trained
// model.
bool training = !test_mode && !use_global_stats;
if (!training) {
// only when test we use input to do computation.
const auto *est_mean = ctx.Input<Tensor>("Mean");
const auto *est_var = ctx.Input<Tensor>("Variance");
// Run inference mode.
PADDLE_ENFORCE_EQ(
est_mean->dims().size(), 1UL,
platform::errors::InvalidArgument(
"The size of mean's dimensions must equal to 1."
"But received: the size of mean's dimensions mean is [%d],"
"the dimensions of mean is [%s].",
est_mean->dims().size(), est_mean->dims()));
PADDLE_ENFORCE_EQ(
est_var->dims().size(), 1UL,
platform::errors::InvalidArgument(
"The size of variance's dimensions must equal to 1."
"But received: the size of variance's dimensions is [%d],"
"the dimensions of variance is [%s].",
est_var->dims().size(), est_var->dims()));
PADDLE_ENFORCE_EQ(
est_mean->dims()[0], C,
platform::errors::InvalidArgument(
"The first dimension of mean must equal to the number of "
"Channels, which is [%d]. But received: the first dimension"
"of mean is [%d], the dimensions of mean is [%s].",
C, est_mean->dims()[0], est_mean->dims()));
PADDLE_ENFORCE_EQ(
est_var->dims()[0], C,
platform::errors::InvalidArgument(
"The first dimension of variance must equal to the number"
"of Channels, which is [%d]. But received: the first dimension of"
"variance is [%d], the dimensions of variance is [%s].",
C, est_var->dims()[0], est_var->dims()));
#ifdef PADDLE_WITH_HIP
const int block_size = 256;
const int grid_size = (N * C * H * W * D + block_size - 1) / block_size;
if (compute_format == DataLayout::kNCHW) {
hipLaunchKernelGGL(( BNForwardInference<
T,
DataLayout::kNCHW>), dim3(grid_size), dim3(block_size), 0, dev_ctx.stream(),
transformed_x.template data<T>(),
est_mean->template data<BatchNormParamType<T>>(),
est_var->template data<BatchNormParamType<T>>(),
scale->template data<BatchNormParamType<T>>(),
bias->template data<BatchNormParamType<T>>(), C, N, H * W * D,
epsilon, transformed_y.template data<T>());
} else {
hipLaunchKernelGGL(( BNForwardInference<
T,
DataLayout::kNHWC>), dim3(grid_size), dim3(block_size), 0, dev_ctx.stream(),
transformed_x.template data<T>(),
est_mean->template data<BatchNormParamType<T>>(),
est_var->template data<BatchNormParamType<T>>(),
scale->template data<BatchNormParamType<T>>(),
bias->template data<BatchNormParamType<T>>(), C, N, H * W * D,
epsilon, transformed_y.template data<T>());
}
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// PADDLE_ENFORCE_CUDA_SUCCESS(
// platform::dynload::miopenBatchNormalizationForwardInference(
// handle, miopenBNSpatial,
// const_cast<void *>(
// static_cast<const void *>(CudnnDataType<T>::kOne())),
// const_cast<void *>(
// static_cast<const void *>(CudnnDataType<T>::kZero())),
// data_desc_,
// static_cast<const void *>(transformed_x.template data<T>()),
// data_desc_,
// static_cast<void *>(
// transformed_y.template mutable_data<T>(ctx.GetPlace())),
// bn_param_desc_,
// const_cast<void *>(static_cast<const void *>(
// scale->template data<BatchNormParamType<T>>())),
// const_cast<void *>(static_cast<const void *>(
// bias->template data<BatchNormParamType<T>>())),
// const_cast<void *>(static_cast<const void *>(
// est_mean->template data<BatchNormParamType<T>>())),
// const_cast<void *>(static_cast<const void *>(
// est_var->template data<BatchNormParamType<T>>())),
// epsilon));
#else
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnBatchNormalizationForwardInference(
handle,
// Note: PERSISTENT not implemented for inference
CUDNN_BATCHNORM_SPATIAL, CudnnDataType<T>::kOne(),
CudnnDataType<T>::kZero(), data_desc_,
transformed_x.template data<T>(), data_desc_,
transformed_y.template mutable_data<T>(ctx.GetPlace()),
bn_param_desc_, scale->template data<BatchNormParamType<T>>(),
bias->template data<BatchNormParamType<T>>(),
est_mean->template data<BatchNormParamType<T>>(),
est_var->template data<BatchNormParamType<T>>(), epsilon));
#endif
} else {
// if MomentumTensor is set, use MomentumTensor value, momentum
// is only used in this training branch
if (ctx.HasInput("MomentumTensor")) {
const auto *mom_tensor = ctx.Input<Tensor>("MomentumTensor");
Tensor mom_cpu;
TensorCopySync(*mom_tensor, platform::CPUPlace(), &mom_cpu);
momentum = mom_cpu.data<float>()[0];
}
// Run training mode.
// obtain running mean and running inv var, and see if we need to
// initialize them.
auto *mean_out = ctx.Output<Tensor>("MeanOut");
auto *variance_out = ctx.Output<Tensor>("VarianceOut");
mean_out->mutable_data<BatchNormParamType<T>>(ctx.GetPlace());
variance_out->mutable_data<BatchNormParamType<T>>(ctx.GetPlace());
auto *saved_mean = ctx.Output<Tensor>("SavedMean");
auto *saved_variance = ctx.Output<Tensor>("SavedVariance");
saved_mean->mutable_data<BatchNormParamType<T>>(ctx.GetPlace());
saved_variance->mutable_data<BatchNormParamType<T>>(ctx.GetPlace());
math::SetConstant<platform::CUDADeviceContext, BatchNormParamType<T>>
functor;
functor(dev_ctx, saved_mean, static_cast<BatchNormParamType<T>>(0));
functor(dev_ctx, saved_variance, static_cast<BatchNormParamType<T>>(0));
if ((N * H * W * D) == 1) {
// Only 1 element in normalization dimension,
// skip the batch norm calculation, let y = x.
framework::TensorCopy(*x, ctx.GetPlace(), y);
} else {
double this_factor = 1. - momentum;
bool called = false;
#if CUDNN_VERSION_MIN(7, 4, 1)
called = true;
size_t workspace_size = 0;
size_t reserve_space_size = 0;
void *reserve_space_ptr = nullptr;
void *workspace_ptr = nullptr;
Tensor workspace_tensor;
// Create reserve space and workspace for batch norm.
// Create tensor for each batchnorm op, it will be used in the
// backward. Thus this tensor shouldn't be temp.
auto *reserve_space = ctx.Output<Tensor>("ReserveSpace");
PADDLE_ENFORCE_NOT_NULL(
reserve_space,
platform::errors::NotFound(
"The argument ReserveSpace of batch_norm op is not found."));
// --------------- cudnn batchnorm workspace ---------------
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::
cudnnGetBatchNormalizationForwardTrainingExWorkspaceSize(
/*handle=*/handle,
/*mode=*/mode_,
/*bnIps=*/CUDNN_BATCHNORM_OPS_BN,
/*xDesc=*/data_desc_,
/*zDesc=*/nullptr,
/*yDesc=*/data_desc_,
/*bnScaleBiasMeanVarDesc=*/bn_param_desc_,
/*activationDesc=*/nullptr,
/*sizeInBytes=*/&workspace_size));
// -------------- cudnn batchnorm reserve space --------------
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::
cudnnGetBatchNormalizationTrainingExReserveSpaceSize(
/*handle=*/handle,
/*mode=*/mode_,
/*bnOps=*/CUDNN_BATCHNORM_OPS_BN,
/*activationDesc=*/nullptr,
/*xDesc=*/data_desc_,
/*sizeInBytes=*/&reserve_space_size));
reserve_space_ptr = reserve_space->mutable_data(
ctx.GetPlace(), transformed_x.type(), reserve_space_size);
workspace_ptr = workspace_tensor.mutable_data(
ctx.GetPlace(), transformed_x.type(), workspace_size);
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnBatchNormalizationForwardTrainingEx(
handle, mode_, CUDNN_BATCHNORM_OPS_BN, CudnnDataType<T>::kOne(),
CudnnDataType<T>::kZero(), data_desc_,
transformed_x.template data<T>(), nullptr, nullptr, data_desc_,
transformed_y.template data<T>(), bn_param_desc_,
scale->template data<BatchNormParamType<T>>(),
bias->template data<BatchNormParamType<T>>(), this_factor,
mean_out->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
variance_out->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
epsilon,
saved_mean->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
saved_variance->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
nullptr, workspace_ptr, workspace_size, reserve_space_ptr,
reserve_space_size));
#endif // CUDNN_VERSION_MIN(7, 4, 1)
if (!called) {
#ifdef PADDLE_WITH_HIP
const int num = transformed_x.numel();
const int block = 256;
const int max_threads = dev_ctx.GetMaxPhysicalThreadCount();
const int max_blocks = ::max(max_threads / block, 1);
const int grid = ::min(C, max_blocks);
if (compute_format == DataLayout::kNCHW) {
hipLaunchKernelGGL(( BNForwardTraining<
T, block,
DataLayout::kNCHW>), dim3(grid), dim3(block), 0, dev_ctx.stream(),
transformed_x.template data<T>(),
scale->template data<BatchNormParamType<T>>(),
bias->template data<BatchNormParamType<T>>(), C, N, H * W * D,
epsilon, this_factor, transformed_y.template data<T>(),
mean_out->template data<BatchNormParamType<T>>(),
variance_out->template data<BatchNormParamType<T>>(),
saved_mean->template data<BatchNormParamType<T>>(),
saved_variance->template data<BatchNormParamType<T>>());
} else {
hipLaunchKernelGGL(( BNForwardTraining<
T, block,
DataLayout::kNHWC>), dim3(grid), dim3(block), 0, dev_ctx.stream(),
transformed_x.template data<T>(),
scale->template data<BatchNormParamType<T>>(),
bias->template data<BatchNormParamType<T>>(), C, N, H * W * D,
epsilon, this_factor, transformed_y.template data<T>(),
mean_out->template data<BatchNormParamType<T>>(),
variance_out->template data<BatchNormParamType<T>>(),
saved_mean->template data<BatchNormParamType<T>>(),
saved_variance->template data<BatchNormParamType<T>>());
}
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// PADDLE_ENFORCE_CUDA_SUCCESS(
// platform::dynload::miopenBatchNormalizationForwardTraining(
// handle, mode_, const_cast<void *>(static_cast<const void *>(
// CudnnDataType<T>::kOne())),
// const_cast<void *>(
// static_cast<const void *>(CudnnDataType<T>::kZero())),
// data_desc_,
// static_cast<const void *>(transformed_x.template data<T>()),
// data_desc_,
// static_cast<void *>(
// transformed_y.template mutable_data<T>(ctx.GetPlace())),
// bn_param_desc_,
// const_cast<void *>(static_cast<const void *>(
// scale->template data<BatchNormParamType<T>>())),
// const_cast<void *>(static_cast<const void *>(
// bias->template data<BatchNormParamType<T>>())),
// this_factor,
// static_cast<void *>(
// mean_out->template mutable_data<BatchNormParamType<T>>(
// ctx.GetPlace())),
// static_cast<void *>(variance_out->template mutable_data<
// BatchNormParamType<T>>(ctx.GetPlace())),
// epsilon,
// static_cast<void *>(
// saved_mean->template mutable_data<BatchNormParamType<T>>(
// ctx.GetPlace())),
// static_cast<void *>(saved_variance->template mutable_data<
// BatchNormParamType<T>>(ctx.GetPlace()))));
#else
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnBatchNormalizationForwardTraining(
handle, mode_, CudnnDataType<T>::kOne(),
CudnnDataType<T>::kZero(), data_desc_,
transformed_x.template data<T>(), data_desc_,
transformed_y.template mutable_data<T>(ctx.GetPlace()),
bn_param_desc_, scale->template data<BatchNormParamType<T>>(),
bias->template data<BatchNormParamType<T>>(), this_factor,
mean_out->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
variance_out->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
epsilon,
saved_mean->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
saved_variance->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace())));
#endif
}
}
}
if (data_layout == DataLayout::kNHWC &&
compute_format == DataLayout::kNCHW && x_dims.size() > 2) {
VLOG(3) << "Transform batchnorm output from NCHW to NHWC";
TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_y, y);
}
#ifdef PADDLE_WITH_HIP
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// clean when exit.
// PADDLE_ENFORCE_CUDA_SUCCESS(
// platform::dynload::miopenDestroyTensorDescriptor(data_desc_));
// PADDLE_ENFORCE_CUDA_SUCCESS(
// platform::dynload::miopenDestroyTensorDescriptor(bn_param_desc_));
#else
// clean when exit.
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnDestroyTensorDescriptor(data_desc_));
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnDestroyTensorDescriptor(bn_param_desc_));
#endif
}
};
template <typename T, int BlockDim, framework::DataLayout layout>
static __global__ LAUNCH_BOUNDS(BlockDim) void KeBNBackwardScaleBias(
const T *dy, const T *x, const BatchNormParamType<T> *mean,
const BatchNormParamType<T> *variance, const double epsilon, const int N,
const int C, const int HxW, BatchNormParamType<T> *dscale,
BatchNormParamType<T> *dbias) {
const int outer_size = C;
const int inner_size = N * HxW;
typedef hipcub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce;
__shared__ typename BlockReduce::TempStorage ds_storage;
__shared__ typename BlockReduce::TempStorage db_storage;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
BatchNormParamType<T> ds_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> db_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> inv_var_i = 1.0 / sqrt(variance[i] + epsilon);
BatchNormParamType<T> mean_i = mean[i];
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == framework::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
ds_sum += static_cast<BatchNormParamType<T>>(dy[index]) *
(static_cast<BatchNormParamType<T>>(x[index]) - mean_i);
db_sum += static_cast<BatchNormParamType<T>>(dy[index]);
}
ds_sum = BlockReduce(ds_storage).Reduce(ds_sum, hipcub::Sum());
db_sum = BlockReduce(db_storage).Reduce(db_sum, hipcub::Sum());
if (threadIdx.x == 0) {
dscale[i] = ds_sum * inv_var_i;
dbias[i] = db_sum;
}
__syncthreads();
}
}
template <typename T, framework::DataLayout layout>
static __global__ void KeBNBackwardData(const T *dy,
const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *variance,
const double epsilon, const int C,
const int HxW, const int num, T *dx) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = gid; i < num; i += stride) {
const int c = layout == framework::DataLayout::kNCHW ? i / HxW % C : i % C;
BatchNormParamType<T> inv_var = 1.0 / sqrt(variance[c] + epsilon);
dx[i] = static_cast<T>(static_cast<BatchNormParamType<T>>(dy[i]) *
scale[c] * inv_var);
}
}
template <typename T>
static __global__ void KeBNRestoreData(const framework::DataLayout layout, T *x,
const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *bias,
const BatchNormParamType<T> *mean,
const BatchNormParamType<T> *variance,
double epsilon, int C, int M,
const int num, const T *y) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = gid; i < num; i += stride) {
const int c = layout == framework::DataLayout::kNCHW ? (i / M) % C : i % C;
auto y_i = static_cast<BatchNormParamType<T>>(y[i]);
auto x_i = (y_i - bias[c]) / scale[c] / variance[c] + mean[c];
x[i] = static_cast<T>(x_i);
}
}
template <typename T>
class InplaceHelper {
public:
void operator()(const framework::DataLayout layout, T *x,
const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *bias,
const BatchNormParamType<T> *mean,
const BatchNormParamType<T> *variance, double epsilon, int C,
int M, const int num, const T *y, int grid2, const int block,
const gpuStream_t &stream) {
PADDLE_ENFORCE_EQ(x, y, platform::errors::InvalidArgument(
"X and Y should be inplaced in inplace mode"));
hipLaunchKernelGGL(( KeBNRestoreData), dim3(grid2), dim3(block), 0, stream,
layout, x, scale, bias, mean, variance, epsilon, C, M, num, y);
}
};
template <typename T, int BlockDim, framework::DataLayout layout>
static __global__ LAUNCH_BOUNDS(BlockDim) void BNBackward(
const T *dy, const T *x, const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *saved_mean,
const BatchNormParamType<T> *saved_inv_variance, const int C, const int N,
const int HxW, const double epsilon, T *dx, BatchNormParamType<T> *dscale,
BatchNormParamType<T> *dbias) {
const int outer_size = C;
const int inner_size = N * HxW;
typedef hipcub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce;
__shared__ typename BlockReduce::TempStorage ds_storage;
__shared__ typename BlockReduce::TempStorage db_storage;
__shared__ typename BlockReduce::TempStorage mean_storage;
__shared__ typename BlockReduce::TempStorage variance_storeage;
__shared__ BatchNormParamType<T> inv_var_val;
__shared__ BatchNormParamType<T> mean_val;
__shared__ BatchNormParamType<T> dscale_val;
__shared__ BatchNormParamType<T> dbias_val;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
BatchNormParamType<T> ds_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> db_sum = static_cast<BatchNormParamType<T>>(0);
if (saved_mean && saved_inv_variance) {
if (threadIdx.x == 0) {
inv_var_val = saved_inv_variance[i];
mean_val = saved_mean[i];
}
} else {
BatchNormParamType<T> x_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> x_square_sum =
static_cast<BatchNormParamType<T>>(0);
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == framework::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
BatchNormParamType<T> x_i =
static_cast<BatchNormParamType<T>>(x[index]);
x_sum += x_i;
x_square_sum += x_i * x_i;
}
x_sum = BlockReduce(mean_storage).Reduce(x_sum, hipcub::Sum());
x_square_sum =
BlockReduce(variance_storeage).Reduce(x_square_sum, hipcub::Sum());
if (threadIdx.x == 0) {
mean_val = x_sum / inner_size;
inv_var_val =
1 / sqrt(x_square_sum / inner_size - mean_val * mean_val + epsilon);
}
}
__syncthreads();
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == framework::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
BatchNormParamType<T> dy_i =
static_cast<BatchNormParamType<T>>(dy[index]);
ds_sum +=
dy_i * (static_cast<BatchNormParamType<T>>(x[index]) - mean_val);
db_sum += dy_i;
}
ds_sum = BlockReduce(ds_storage).Reduce(ds_sum, hipcub::Sum());
db_sum = BlockReduce(db_storage).Reduce(db_sum, hipcub::Sum());
if (threadIdx.x == 0) {
dscale_val = ds_sum * inv_var_val;
dbias_val = db_sum;
dscale[i] = dscale_val;
dbias[i] = dbias_val;
}
__syncthreads();
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == framework::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
dx[index] = scale[i] * inv_var_val *
(static_cast<BatchNormParamType<T>>(dy[index]) -
dbias_val / static_cast<BatchNormParamType<T>>(inner_size) -
(static_cast<BatchNormParamType<T>>(x[index]) - mean_val) *
inv_var_val * dscale_val / inner_size);
}
}
}
template <typename T, int BlockDim, framework::DataLayout layout>
static __global__ LAUNCH_BOUNDS(BlockDim) void BNBackwardData(
const T *dy, const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *mean, const T *x,
const BatchNormParamType<T> *variance, const int C, const int N,
const int HxW, T *dx) {
const int outer_size = C;
const int inner_size = N * HxW;
typedef hipcub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce;
__shared__ typename BlockReduce::TempStorage dy_storage;
__shared__ typename BlockReduce::TempStorage dy_x_sub_mean_storage;
__shared__ BatchNormParamType<T> dy_sum_val;
__shared__ BatchNormParamType<T> dy_x_sub_mean_sum_val;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
BatchNormParamType<T> inv_var_i = variance[i];
BatchNormParamType<T> mean_i = mean[i];
BatchNormParamType<T> dy_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> dy_x_sub_mean_sum =
static_cast<BatchNormParamType<T>>(0);
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == framework::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
BatchNormParamType<T> dy_i =
static_cast<BatchNormParamType<T>>(dy[index]);
dy_sum += dy_i;
dy_x_sub_mean_sum +=
dy_i * (static_cast<BatchNormParamType<T>>(x[index]) - mean_i);
}
dy_sum = BlockReduce(dy_storage).Reduce(dy_sum, hipcub::Sum());
dy_x_sub_mean_sum = BlockReduce(dy_x_sub_mean_storage)
.Reduce(dy_x_sub_mean_sum, hipcub::Sum());
if (threadIdx.x == 0) {
dy_sum_val = dy_sum;
dy_x_sub_mean_sum_val = dy_x_sub_mean_sum;
}
__syncthreads();
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == framework::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
dx[index] =
(static_cast<BatchNormParamType<T>>(dy[index]) -
dy_sum_val / static_cast<BatchNormParamType<T>>(inner_size) -
(static_cast<BatchNormParamType<T>>(x[index]) - mean_i) *
dy_x_sub_mean_sum_val * inv_var_i * inv_var_i / inner_size) *
scale[i] * inv_var_i;
}
}
}
template <typename T>
class BatchNormGradKernel<platform::CUDADeviceContext, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(ctx.GetPlace()), true,
platform::errors::InvalidArgument("It must use CUDAPlace."));
double epsilon = static_cast<double>(ctx.Attr<float>("epsilon"));
const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
bool use_global_stats = ctx.Attr<bool>("use_global_stats");
const DataLayout data_layout =
framework::StringToDataLayout(data_layout_str);
const auto *d_y = ctx.Input<Tensor>(framework::GradVarName("Y"));
const auto *scale = ctx.Input<Tensor>("Scale");
const auto *bias = ctx.Input<Tensor>("Bias");
auto *d_x = ctx.Output<Tensor>(framework::GradVarName("X"));
auto *d_scale = ctx.Output<Tensor>(framework::GradVarName("Scale"));
auto *d_bias = ctx.Output<Tensor>(framework::GradVarName("Bias"));
// batch_norm with inplace as false will take X as grad input, which
// is same as cuDNN batch_norm backward calculation, batch_norm
// with inplace as true only take Y as input and X should be calculate
// by inverse operation of batch_norm on Y
const Tensor *x;
bool is_inplace;
if (ctx.HasInput("Y")) {
x = ctx.Input<Tensor>("Y");
is_inplace = true;
PADDLE_ENFORCE_EQ(d_x, d_y,
platform::errors::InvalidArgument(
"X@GRAD and Y@GRAD not inplace in inplace mode"));
} else {
x = ctx.Input<Tensor>("X");
is_inplace = false;
PADDLE_ENFORCE_NE(d_x, d_y,
platform::errors::InvalidArgument(
"X@GRAD and Y@GRAD inplaced in non-inplace mode"));
}
const bool is_test = ctx.Attr<bool>("is_test");
use_global_stats = is_test || use_global_stats;
const auto &x_dims = x->dims();
PADDLE_ENFORCE_EQ(
x_dims.size() >= 2 && x_dims.size() <= 5, true,
platform::errors::InvalidArgument(
"The size of input's dimensions should be between 2 and 5."
"But received: the size of input's dimensions is [%d],"
"the dimensions of input is [%s]",
x_dims.size(), x_dims));
int N, C, H, W, D;
ExtractNCWHD(x_dims, data_layout, &N, &C, &H, &W, &D);
// init output
d_x->mutable_data<T>(ctx.GetPlace());
if (d_scale && d_bias) {
d_scale->mutable_data<BatchNormParamType<T>>(ctx.GetPlace());
d_bias->mutable_data<BatchNormParamType<T>>(ctx.GetPlace());
}
PADDLE_ENFORCE_EQ(
scale->dims().size(), 1UL,
platform::errors::InvalidArgument(
"The size of scale's dimensions must equal to 1. But received: "
"the size of scale's dimensions is [%d], the dimensions of scale "
"is [%s].",
scale->dims().size(), scale->dims()));
PADDLE_ENFORCE_EQ(
scale->dims()[0], C,
platform::errors::InvalidArgument(
"The first dimension of scale must equal to Channels[%d]. But "
"received: the first dimension of scale is [%d]",
C, scale->dims()[0]));
auto dtype = platform::CudnnDataType<T>::type;
const auto *reserve_space = ctx.Input<Tensor>("ReserveSpace");
#ifdef PADDLE_WITH_HIP
auto compute_format = data_layout == DataLayout::kNHWC ? DataLayout::kNHWC
: DataLayout::kNCHW;
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// HIP do not support compute format of NHWC
// auto compute_format = DataLayout::kNCHW;
#else
const bool fast_nhwc_batch_norm =
dtype == CUDNN_DATA_HALF && FLAGS_cudnn_batchnorm_spatial_persistent &&
reserve_space != nullptr;
auto compute_format =
fast_nhwc_batch_norm && data_layout == DataLayout::kNHWC
? DataLayout::kNHWC
: DataLayout::kNCHW;
#endif
Tensor transformed_x(x->type());
Tensor transformed_d_y(d_y->type());
Tensor transformed_d_x(d_x->type());
if (data_layout == DataLayout::kNHWC &&
compute_format == DataLayout::kNCHW) {
VLOG(3) << "Transform input tensor from NHWC to NCHW.";
ResizeToChannelFirst<platform::CUDADeviceContext, T>(ctx, x,
&transformed_x);
TransToChannelFirst<platform::CUDADeviceContext, T>(ctx, x,
&transformed_x);
ResizeToChannelFirst<platform::CUDADeviceContext, T>(ctx, d_y,
&transformed_d_y);
TransToChannelFirst<platform::CUDADeviceContext, T>(ctx, d_y,
&transformed_d_y);
ResizeToChannelFirst<platform::CUDADeviceContext, T>(ctx, d_x,
&transformed_d_x);
} else {
transformed_x.ShareDataWith(*x);
transformed_d_y.ShareDataWith(*d_y);
transformed_d_x.ShareDataWith(*d_x);
}
std::vector<int> dims;
std::vector<int> strides;
if (compute_format == DataLayout::kNCHW) {
dims = {N, C, H, W, D};
strides = {C * H * W * D, H * W * D, W * D, D, 1};
} else {
dims = {N, C, H, W, D};
strides = {H * W * C * D, 1, W * D * C, D * C, C};
}
auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
const int num = transformed_x.numel();
#ifdef HIPCC
const int block = 256;
#else
const int block = 512;
#endif
int max_threads = dev_ctx.GetMaxPhysicalThreadCount();
const int max_blocks = ::max(max_threads / block, 1);
int grid1 = (num + block - 1) / block;
int grid2 = ::min(C, max_blocks);
auto stream = dev_ctx.stream();
InplaceHelper<T> inplace_functor;
if (!use_global_stats) {
if ((N * H * W * D) == 1) {
framework::TensorCopy(*d_y, ctx.GetPlace(), d_x);
math::SetConstant<platform::CUDADeviceContext, BatchNormParamType<T>>
functor;
functor(dev_ctx, d_scale, static_cast<BatchNormParamType<T>>(0));
functor(dev_ctx, d_bias, static_cast<BatchNormParamType<T>>(0));
return;
}
// ------------------- cudnn descriptors ---------------------
#ifdef PADDLE_WITH_HIP
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// miopenTensorDescriptor_t data_desc_;
// miopenTensorDescriptor_t bn_param_desc_;
// miopenBatchNormMode_t mode_;
// PADDLE_ENFORCE_CUDA_SUCCESS(
// platform::dynload::miopenCreateTensorDescriptor(&data_desc_));
// PADDLE_ENFORCE_CUDA_SUCCESS(
// platform::dynload::miopenCreateTensorDescriptor(&bn_param_desc_));
#else
cudnnTensorDescriptor_t data_desc_;
cudnnTensorDescriptor_t bn_param_desc_;
cudnnBatchNormMode_t mode_;
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnCreateTensorDescriptor(&data_desc_));
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnCreateTensorDescriptor(&bn_param_desc_));
#endif
if (epsilon <= CUDNN_BN_MIN_EPSILON - FLT_EPSILON) {
LOG(ERROR) << "Provided epsilon is smaller than "
<< "CUDNN_BN_MIN_EPSILON. Setting it to "
<< "CUDNN_BN_MIN_EPSILON instead.";
}
epsilon = ::max(epsilon, CUDNN_BN_MIN_EPSILON);
#ifdef PADDLE_WITH_HIP
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// mode_ = miopenBNSpatial;
#elif CUDNN_VERSION_MIN(7, 0, 1)
if (FLAGS_cudnn_batchnorm_spatial_persistent) {
mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT;
} else {
mode_ = CUDNN_BATCHNORM_SPATIAL;
}
#else
mode_ = CUDNN_BATCHNORM_SPATIAL;
#endif // CUDNN_VERSION_MIN(7, 0, 1)
#ifdef PADDLE_WITH_HIP
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenSetTensorDescriptor(
// data_desc_, CudnnDataType<T>::type,
// x_dims.size() > 3 ? x_dims.size() : 4, const_cast<int *>(dims.data()),
// const_cast<int *>(strides.data())));
// PADDLE_ENFORCE_CUDA_SUCCESS(
// platform::dynload::miopenDeriveBNTensorDescriptor(bn_param_desc_,
// data_desc_, mode_));
#else
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor(
data_desc_, CudnnDataType<T>::type,
x_dims.size() > 3 ? x_dims.size() : 4, dims.data(), strides.data()));
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnDeriveBNTensorDescriptor(bn_param_desc_,
data_desc_, mode_));
#endif
const auto *saved_mean = ctx.Input<Tensor>("SavedMean");
const auto *saved_var = ctx.Input<Tensor>("SavedVariance");
const auto *saved_mean_data =
saved_mean->template data<BatchNormParamType<T>>();
const auto *saved_var_data =
saved_var->template data<BatchNormParamType<T>>();
if (is_inplace) {
inplace_functor(compute_format, transformed_x.data<T>(),
scale->template data<BatchNormParamType<T>>(),
bias->template data<BatchNormParamType<T>>(),
saved_mean_data, saved_var_data, epsilon, C, H * W * D,
num, transformed_x.data<T>(), grid2, block, stream);
}
// This branch calls CUDNN APIs
if (d_scale && d_bias) {
bool called = false;
#if CUDNN_VERSION_MIN(7, 4, 1)
called = true;
size_t workspace_size = 0;
void *workspace_ptr = nullptr;
Tensor workspace_tensor;
auto reserve_space_size = reserve_space->memory_size();
// --------------- cudnn batchnorm workspace ---------------
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::
cudnnGetBatchNormalizationBackwardExWorkspaceSize(
/*handle=*/dev_ctx.cudnn_handle(),
/*mode=*/mode_,
/*bnIps=*/CUDNN_BATCHNORM_OPS_BN,
/*xDesc=*/data_desc_,
/*yDesc=*/data_desc_,
/*dyDesc=*/data_desc_,
/*dzDesc=*/nullptr,
/*dxDesc=*/data_desc_,
/*bnScaleBiasMeanVarDesc=*/bn_param_desc_,
/*activationDesc=*/nullptr,
/*sizeInBytes=*/&workspace_size));
workspace_ptr = workspace_tensor.mutable_data(
ctx.GetPlace(), transformed_x.type(), workspace_size);
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnBatchNormalizationBackwardEx(
/*handle=*/dev_ctx.cudnn_handle(),
/*mode=*/mode_,
/*bnOps=*/CUDNN_BATCHNORM_OPS_BN,
/*alphaDataDiff=*/CudnnDataType<T>::kOne(),
/*betaDataDiff=*/CudnnDataType<T>::kZero(),
/*alphaParamDiff=*/CudnnDataType<T>::kOne(),
/*betaParamDiff=*/CudnnDataType<T>::kZero(),
/*xDesc=*/data_desc_,
/*xData=*/transformed_x.template data<T>(),
/*yDesc=*/nullptr,
/*yData=*/nullptr,
/*dyDesc=*/data_desc_,
/*dyData=*/transformed_d_y.template data<T>(),
/*dzDesc=*/nullptr,
/*dzData=*/nullptr,
/*dxDesc=*/data_desc_,
/*dxData=*/transformed_d_x.template mutable_data<T>(
ctx.GetPlace()),
/*dBnScaleBiasDesc=*/bn_param_desc_,
/*bnScaleData=*/scale->template data<BatchNormParamType<T>>(),
/*bnBiasData=*/nullptr,
/*dBnScaleData=*/d_scale
->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
/*dBnBiasData=*/d_bias
->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
/*epsilon=*/epsilon,
/*savedMean=*/saved_mean_data,
/*savedInvVariance=*/saved_var_data,
/*activationDesc=*/nullptr,
/*workspace=*/workspace_ptr,
/*workSpaceSizeInBytes=*/workspace_size,
/*reserveSpace=*/const_cast<T *>(
reserve_space->template data<T>()),
/*reserveSpaceSizeInBytes=*/reserve_space_size));
#endif // CUDNN_VERSION_MIN(7, 4, 1)
if (!called) {
#ifdef PADDLE_WITH_HIP
if (compute_format == DataLayout::kNCHW) {
hipLaunchKernelGGL(( BNBackward<
T, block,
DataLayout::kNCHW>), dim3(grid2), dim3(block), 0, dev_ctx.stream(),
transformed_d_y.template data<T>(),
transformed_x.template data<T>(),
scale->template data<BatchNormParamType<T>>(), saved_mean_data,
saved_var_data, C, N, H * W * D, epsilon,
transformed_d_x.template data<T>(),
d_scale->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
d_bias->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()));
} else {
hipLaunchKernelGGL(( BNBackward<
T, block,
DataLayout::kNHWC>), dim3(grid2), dim3(block), 0, dev_ctx.stream(),
transformed_d_y.template data<T>(),
transformed_x.template data<T>(),
scale->template data<BatchNormParamType<T>>(), saved_mean_data,
saved_var_data, C, N, H * W * D, epsilon,
transformed_d_x.template data<T>(),
d_scale->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
d_bias->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()));
}
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// PADDLE_ENFORCE_CUDA_SUCCESS(
// platform::dynload::miopenBatchNormalizationBackward(
// dev_ctx.cudnn_handle(), mode_, CudnnDataType<T>::kOne(),
// CudnnDataType<T>::kZero(), CudnnDataType<T>::kOne(),
// CudnnDataType<T>::kZero(), data_desc_,
// transformed_x.template data<T>(), data_desc_,
// transformed_d_y.template data<T>(), data_desc_,
// transformed_d_x.template mutable_data<T>(ctx.GetPlace()),
// bn_param_desc_, scale->template data<BatchNormParamType<T>>(),
// d_scale->template mutable_data<BatchNormParamType<T>>(
// ctx.GetPlace()),
// d_bias->template mutable_data<BatchNormParamType<T>>(
// ctx.GetPlace()),
// epsilon, saved_mean_data, saved_var_data));
#else
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnBatchNormalizationBackward(
dev_ctx.cudnn_handle(), mode_, CudnnDataType<T>::kOne(),
CudnnDataType<T>::kZero(), CudnnDataType<T>::kOne(),
CudnnDataType<T>::kZero(), data_desc_,
transformed_x.template data<T>(), data_desc_,
transformed_d_y.template data<T>(), data_desc_,
transformed_d_x.template mutable_data<T>(ctx.GetPlace()),
bn_param_desc_, scale->template data<BatchNormParamType<T>>(),
d_scale->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
d_bias->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
epsilon, saved_mean_data, saved_var_data));
#endif
}
if (data_layout == DataLayout::kNHWC &&
compute_format == DataLayout::kNCHW) {
VLOG(3) << "Transform batchnorm output from NCHW to NHWC";
TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_d_x, d_x);
}
} else {
// This branch call CUDA kernels
if (compute_format == DataLayout::kNCHW) {
if (d_x) {
hipLaunchKernelGGL(( BNBackwardData<T, block, framework::DataLayout::kNCHW>),
dim3(grid2), dim3(block), 0, dev_ctx.stream(),
d_y->data<T>(), scale->data<BatchNormParamType<T>>(),
saved_mean_data, x->data<T>(), saved_var_data, C, N, H * W * D,
d_x->data<T>());
}
} else {
if (d_x) {
hipLaunchKernelGGL(( BNBackwardData<T, block, framework::DataLayout::kNHWC>),
dim3(grid2), dim3(block), 0, dev_ctx.stream(),
d_y->data<T>(), scale->data<BatchNormParamType<T>>(),
saved_mean_data, x->data<T>(), saved_var_data, C, N, H * W * D,
d_x->data<T>());
}
}
}
#ifdef PADDLE_WITH_HIP
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// clean when exit.
// PADDLE_ENFORCE_CUDA_SUCCESS(
// platform::dynload::miopenDestroyTensorDescriptor(data_desc_));
// PADDLE_ENFORCE_CUDA_SUCCESS(
// platform::dynload::miopenDestroyTensorDescriptor(bn_param_desc_));
#else
// clean when exit.
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnDestroyTensorDescriptor(data_desc_));
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnDestroyTensorDescriptor(bn_param_desc_));
#endif
} else {
const auto *running_mean = ctx.Input<Tensor>("Mean");
const auto *running_var = ctx.Input<Tensor>("Variance");
const auto *running_mean_data =
running_mean->template data<BatchNormParamType<T>>();
const auto *running_var_data =
running_var->template data<BatchNormParamType<T>>();
if (is_inplace) {
auto px = *x;
inplace_functor(data_layout, px.mutable_data<T>(ctx.GetPlace()),
scale->template data<BatchNormParamType<T>>(),
bias->template data<BatchNormParamType<T>>(),
running_mean_data, running_var_data, epsilon, C,
H * W * D, num, x->data<T>(), grid2, block, stream);
}
if (compute_format == DataLayout::kNCHW) {
if (d_x) {
hipLaunchKernelGGL(( KeBNBackwardData<
T, framework::DataLayout::kNCHW>), dim3(grid1), dim3(block), 0, stream,
d_y->data<T>(), scale->data<BatchNormParamType<T>>(),
running_var_data, epsilon, C, H * W, num, d_x->data<T>());
}
if (d_scale && d_bias) {
hipLaunchKernelGGL(( KeBNBackwardScaleBias<
T, block,
framework::DataLayout::kNCHW>), dim3(grid2), dim3(block), 0, stream,
d_y->data<T>(), x->data<T>(), running_mean_data, running_var_data,
epsilon, N, C, H * W * D, d_scale->data<BatchNormParamType<T>>(),
d_bias->data<BatchNormParamType<T>>());
}
} else {
if (d_x) {
hipLaunchKernelGGL(( KeBNBackwardData<
T, framework::DataLayout::kNHWC>), dim3(grid1), dim3(block), 0, stream,
d_y->data<T>(), scale->data<BatchNormParamType<T>>(),
running_var_data, epsilon, C, H * W, num, d_x->data<T>());
}
if (d_scale && d_bias) {
hipLaunchKernelGGL(( KeBNBackwardScaleBias<
T, block,
framework::DataLayout::kNHWC>), dim3(grid2), dim3(block), 0, stream,
d_y->data<T>(), x->data<T>(), running_mean_data, running_var_data,
epsilon, N, C, H * W * D, d_scale->data<BatchNormParamType<T>>(),
d_bias->data<BatchNormParamType<T>>());
}
}
}
}
};
template <typename T>
class BatchNormDoubleGradKernel<platform::CUDADeviceContext, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
const auto *X = ctx.Input<Tensor>("X");
const auto *Scale = ctx.Input<Tensor>("Scale");
const auto *dY = ctx.Input<Tensor>("DY");
const auto *Saved_mean = ctx.Input<Tensor>("SavedMean");
const auto *Saved_variance = ctx.Input<Tensor>("SavedVariance");
const double epsilon = static_cast<double>(ctx.Attr<float>("epsilon"));
const bool use_global_stats = ctx.Attr<bool>("use_global_stats");
const bool is_test = ctx.Attr<bool>("is_test");
PADDLE_ENFORCE_EQ(
is_test, false,
platform::errors::InvalidArgument(
"`is_test = True` CANNOT be used in train program. If "
"you want to use global status in pre_train model, "
"please set `use_global_stats = True`"));
const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
const DataLayout data_layout =
framework::StringToDataLayout(data_layout_str);
const auto *ddX = ctx.Input<Tensor>("DDX");
const auto *ddScale = ctx.Input<Tensor>("DDScale");
const auto *ddBias = ctx.Input<Tensor>("DDBias");
auto *dX = ctx.Output<Tensor>("DX");
auto *dScale = ctx.Output<Tensor>("DScale");
auto *ddY = ctx.Output<Tensor>("DDY");
NormDoubleGradFunctor<platform::CUDADeviceContext, T>(
ctx, data_layout, X, Scale, dY, Saved_mean, Saved_variance, epsilon,
use_global_stats, ddX, ddScale, ddBias, dX, dScale, ddY);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
#ifdef PADDLE_WITH_HIP
// MIOPEN do not support double
REGISTER_OP_CUDA_KERNEL(
batch_norm, ops::BatchNormKernel<plat::CUDADeviceContext, float>,
ops::BatchNormKernel<plat::CUDADeviceContext, plat::float16>);
REGISTER_OP_CUDA_KERNEL(
batch_norm_grad, ops::BatchNormGradKernel<plat::CUDADeviceContext, float>,
ops::BatchNormGradKernel<plat::CUDADeviceContext, plat::float16>);
REGISTER_OP_CUDA_KERNEL(
batch_norm_grad_grad,
ops::BatchNormDoubleGradKernel<plat::CUDADeviceContext, float>);
#else
REGISTER_OP_CUDA_KERNEL(
batch_norm, ops::BatchNormKernel<plat::CUDADeviceContext, float>,
ops::BatchNormKernel<plat::CUDADeviceContext, double>,
ops::BatchNormKernel<plat::CUDADeviceContext, plat::float16>);
REGISTER_OP_CUDA_KERNEL(
batch_norm_grad, ops::BatchNormGradKernel<plat::CUDADeviceContext, float>,
ops::BatchNormGradKernel<plat::CUDADeviceContext, double>,
ops::BatchNormGradKernel<plat::CUDADeviceContext, plat::float16>);
REGISTER_OP_CUDA_KERNEL(
batch_norm_grad_grad,
ops::BatchNormDoubleGradKernel<plat::CUDADeviceContext, float>,
ops::BatchNormDoubleGradKernel<plat::CUDADeviceContext, double>);
#endif
| 30dba12bd83eae7ccc2c14a7a3d1d70f125b059f.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include <cfloat>
#include <string>
#include <vector>
#ifdef __NVCC__
#include "cub/cub.cuh"
#endif
#ifdef __HIPCC__
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#endif
#include "paddle/fluid/framework/data_layout.h"
#include "paddle/fluid/operators/batch_norm_op.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/norm_utils.cu.h"
#include "paddle/fluid/platform/float16.h"
DECLARE_bool(cudnn_batchnorm_spatial_persistent);
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using DataLayout = framework::DataLayout;
template <typename T>
using CudnnDataType = platform::CudnnDataType<T>;
template <typename T>
using BatchNormParamType = typename CudnnDataType<T>::BatchNormParamType;
template <typename T, framework::DataLayout layout>
static __global__ void BNForwardInference(
const T *x, const BatchNormParamType<T> *mean,
const BatchNormParamType<T> *variance, const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *bias, const int C, const int N, const int HxW,
const double epsilon, T *y) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int num = N * C * HxW;
for (int i = gid; i < num; i += stride) {
const int c = layout == framework::DataLayout::kNCHW ? i / HxW % C : i % C;
BatchNormParamType<T> x_sub_mean =
static_cast<BatchNormParamType<T>>(x[i]) - mean[c];
BatchNormParamType<T> inv_var = 1 / sqrt(variance[c] + epsilon);
y[i] = static_cast<T>(scale[c] * x_sub_mean * inv_var + bias[c]);
}
}
template <typename T, int BlockDim, framework::DataLayout layout>
static __global__ LAUNCH_BOUNDS(BlockDim) void BNForwardTraining(
const T *x, const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *bias, const int C, const int N, const int HxW,
const double epsilon, double exponentialAverageFactor, T *y,
BatchNormParamType<T> *mean, BatchNormParamType<T> *variance,
BatchNormParamType<T> *save_mean,
BatchNormParamType<T> *save_inv_variance) {
int outer_size = C;
int inner_size = N * HxW;
typedef cub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce;
__shared__ typename BlockReduce::TempStorage mean_storage;
__shared__ typename BlockReduce::TempStorage variance_storeage;
__shared__ BatchNormParamType<T> mean_val;
__shared__ BatchNormParamType<T> variance_val;
__shared__ BatchNormParamType<T> inv_var_val;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
BatchNormParamType<T> x_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> x_square_sum = static_cast<BatchNormParamType<T>>(0);
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == framework::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
BatchNormParamType<T> x_i = static_cast<BatchNormParamType<T>>(x[index]);
x_sum += x_i;
x_square_sum += x_i * x_i;
}
x_sum = BlockReduce(mean_storage).Reduce(x_sum, cub::Sum());
x_square_sum =
BlockReduce(variance_storeage).Reduce(x_square_sum, cub::Sum());
if (threadIdx.x == 0) {
mean_val = x_sum / inner_size;
variance_val = x_square_sum / inner_size - mean_val * mean_val;
inv_var_val = 1 / sqrt(variance_val + epsilon);
if (save_mean && save_inv_variance) {
save_mean[i] = mean_val;
save_inv_variance[i] = inv_var_val;
}
mean[i] = (1 - exponentialAverageFactor) * mean_val +
exponentialAverageFactor * mean[i];
variance[i] = (1 - exponentialAverageFactor) * variance_val +
exponentialAverageFactor * variance[i];
}
__syncthreads();
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == framework::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
BatchNormParamType<T> x_sub_mean =
static_cast<BatchNormParamType<T>>(x[index]) - mean_val;
y[index] = scale[i] * x_sub_mean * inv_var_val + bias[i];
}
}
}
template <typename T>
class BatchNormKernel<platform::CUDADeviceContext, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(ctx.GetPlace()), true,
platform::errors::InvalidArgument("It must use CUDAPlace."));
double epsilon = static_cast<double>(ctx.Attr<float>("epsilon"));
float momentum = ctx.Attr<float>("momentum");
const bool is_test = ctx.Attr<bool>("is_test");
const bool use_global_stats = ctx.Attr<bool>("use_global_stats");
const bool trainable_stats = ctx.Attr<bool>("trainable_statistics");
const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
const DataLayout data_layout =
framework::StringToDataLayout(data_layout_str);
bool test_mode = is_test && (!trainable_stats);
// Get the size for each dimension.
// NCHW [batch_size, in_channels, in_height, in_width]
const auto *x = ctx.Input<Tensor>("X");
const auto &x_dims = x->dims();
PADDLE_ENFORCE_EQ(
x_dims.size() >= 2 && x_dims.size() <= 5, true,
platform::errors::InvalidArgument(
"The size of input's dimensions should be between 2 and 5"
"But received: the size of input's dimensions is [%d]",
x_dims.size()));
auto *y = ctx.Output<Tensor>("Y");
y->mutable_data<T>(ctx.GetPlace());
int N, C, H, W, D;
ExtractNCWHD(x_dims, data_layout, &N, &C, &H, &W, &D);
auto dtype = platform::CudnnDataType<T>::type;
#ifdef PADDLE_WITH_HIP
auto compute_format = data_layout == DataLayout::kNHWC ? DataLayout::kNHWC
: DataLayout::kNCHW;
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// HIP do not support compute format of NHWC
// auto compute_format = DataLayout::kNCHW;
#else
const bool fast_nhwc_batch_norm =
test_mode ||
(dtype == CUDNN_DATA_HALF && FLAGS_cudnn_batchnorm_spatial_persistent);
auto compute_format =
fast_nhwc_batch_norm && data_layout == DataLayout::kNHWC
? DataLayout::kNHWC
: DataLayout::kNCHW;
#endif
Tensor transformed_x(x->type());
Tensor transformed_y(y->type());
if (data_layout == DataLayout::kNHWC &&
compute_format == DataLayout::kNCHW && x_dims.size() > 2) {
VLOG(3) << "Transform input tensor from NHWC to NCHW.";
ResizeToChannelFirst<platform::CUDADeviceContext, T>(ctx, x,
&transformed_x);
TransToChannelFirst<platform::CUDADeviceContext, T>(ctx, x,
&transformed_x);
ResizeToChannelFirst<platform::CUDADeviceContext, T>(ctx, y,
&transformed_y);
} else {
transformed_x.ShareDataWith(*x);
transformed_y.ShareDataWith(*y);
}
// ------------------- cudnn descriptors ---------------------
#ifdef PADDLE_WITH_HIP
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// miopenTensorDescriptor_t data_desc_;
// miopenTensorDescriptor_t bn_param_desc_;
// miopenBatchNormMode_t mode_;
// PADDLE_ENFORCE_CUDA_SUCCESS(
// platform::dynload::miopenCreateTensorDescriptor(&data_desc_));
// PADDLE_ENFORCE_CUDA_SUCCESS(
// platform::dynload::miopenCreateTensorDescriptor(&bn_param_desc_));
#else
cudnnTensorDescriptor_t data_desc_;
cudnnTensorDescriptor_t bn_param_desc_;
cudnnBatchNormMode_t mode_;
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnCreateTensorDescriptor(&data_desc_));
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnCreateTensorDescriptor(&bn_param_desc_));
#endif
if (epsilon <= CUDNN_BN_MIN_EPSILON - FLT_EPSILON) {
LOG(ERROR) << "Provided epsilon is smaller than "
<< "CUDNN_BN_MIN_EPSILON. Setting it to "
<< "CUDNN_BN_MIN_EPSILON instead.";
}
epsilon = std::max(epsilon, CUDNN_BN_MIN_EPSILON);
#ifdef PADDLE_WITH_HIP
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// mode_ = miopenBNSpatial;
#elif CUDNN_VERSION_MIN(7, 0, 1)
if (FLAGS_cudnn_batchnorm_spatial_persistent) {
mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT;
} else {
mode_ = CUDNN_BATCHNORM_SPATIAL;
}
#else
mode_ = CUDNN_BATCHNORM_SPATIAL;
#endif // CUDNN_VERSION_MIN(7, 0, 1)
VLOG(3) << "Setting descriptors.";
std::vector<int> dims;
std::vector<int> strides;
if (compute_format == DataLayout::kNCHW) {
dims = {N, C, H, W, D};
strides = {C * H * W * D, H * W * D, W * D, D, 1};
} else {
dims = {N, C, H, W, D};
strides = {H * W * D * C, 1, W * D * C, D * C, C};
}
#ifdef PADDLE_WITH_HIP
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenSetTensorDescriptor(
// data_desc_, CudnnDataType<T>::type,
// x_dims.size() > 3 ? x_dims.size() : 4, const_cast<int *>(dims.data()),
// const_cast<int *>(strides.data())));
// Note: PERSISTENT not implemented for inference
// PADDLE_ENFORCE_CUDA_SUCCESS(
// platform::dynload::miopenDeriveBNTensorDescriptor(
// bn_param_desc_, data_desc_, test_mode ? miopenBNSpatial : mode_));
#else
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor(
data_desc_, CudnnDataType<T>::type,
x_dims.size() > 3 ? x_dims.size() : 4, dims.data(), strides.data()));
// Note: PERSISTENT not implemented for inference
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnDeriveBNTensorDescriptor(
bn_param_desc_, data_desc_,
test_mode ? CUDNN_BATCHNORM_SPATIAL : mode_));
#endif
const auto *scale = ctx.Input<Tensor>("Scale");
const auto *bias = ctx.Input<Tensor>("Bias");
auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
auto handle = dev_ctx.cudnn_handle();
// Now, depending on whether we are running test or not, we have two paths.
// It is training mode when it's not reference AND not using pre-trained
// model.
bool training = !test_mode && !use_global_stats;
if (!training) {
// only when test we use input to do computation.
const auto *est_mean = ctx.Input<Tensor>("Mean");
const auto *est_var = ctx.Input<Tensor>("Variance");
// Run inference mode.
PADDLE_ENFORCE_EQ(
est_mean->dims().size(), 1UL,
platform::errors::InvalidArgument(
"The size of mean's dimensions must equal to 1."
"But received: the size of mean's dimensions mean is [%d],"
"the dimensions of mean is [%s].",
est_mean->dims().size(), est_mean->dims()));
PADDLE_ENFORCE_EQ(
est_var->dims().size(), 1UL,
platform::errors::InvalidArgument(
"The size of variance's dimensions must equal to 1."
"But received: the size of variance's dimensions is [%d],"
"the dimensions of variance is [%s].",
est_var->dims().size(), est_var->dims()));
PADDLE_ENFORCE_EQ(
est_mean->dims()[0], C,
platform::errors::InvalidArgument(
"The first dimension of mean must equal to the number of "
"Channels, which is [%d]. But received: the first dimension"
"of mean is [%d], the dimensions of mean is [%s].",
C, est_mean->dims()[0], est_mean->dims()));
PADDLE_ENFORCE_EQ(
est_var->dims()[0], C,
platform::errors::InvalidArgument(
"The first dimension of variance must equal to the number"
"of Channels, which is [%d]. But received: the first dimension of"
"variance is [%d], the dimensions of variance is [%s].",
C, est_var->dims()[0], est_var->dims()));
#ifdef PADDLE_WITH_HIP
const int block_size = 256;
const int grid_size = (N * C * H * W * D + block_size - 1) / block_size;
if (compute_format == DataLayout::kNCHW) {
BNForwardInference<
T,
DataLayout::kNCHW><<<grid_size, block_size, 0, dev_ctx.stream()>>>(
transformed_x.template data<T>(),
est_mean->template data<BatchNormParamType<T>>(),
est_var->template data<BatchNormParamType<T>>(),
scale->template data<BatchNormParamType<T>>(),
bias->template data<BatchNormParamType<T>>(), C, N, H * W * D,
epsilon, transformed_y.template data<T>());
} else {
BNForwardInference<
T,
DataLayout::kNHWC><<<grid_size, block_size, 0, dev_ctx.stream()>>>(
transformed_x.template data<T>(),
est_mean->template data<BatchNormParamType<T>>(),
est_var->template data<BatchNormParamType<T>>(),
scale->template data<BatchNormParamType<T>>(),
bias->template data<BatchNormParamType<T>>(), C, N, H * W * D,
epsilon, transformed_y.template data<T>());
}
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// PADDLE_ENFORCE_CUDA_SUCCESS(
// platform::dynload::miopenBatchNormalizationForwardInference(
// handle, miopenBNSpatial,
// const_cast<void *>(
// static_cast<const void *>(CudnnDataType<T>::kOne())),
// const_cast<void *>(
// static_cast<const void *>(CudnnDataType<T>::kZero())),
// data_desc_,
// static_cast<const void *>(transformed_x.template data<T>()),
// data_desc_,
// static_cast<void *>(
// transformed_y.template mutable_data<T>(ctx.GetPlace())),
// bn_param_desc_,
// const_cast<void *>(static_cast<const void *>(
// scale->template data<BatchNormParamType<T>>())),
// const_cast<void *>(static_cast<const void *>(
// bias->template data<BatchNormParamType<T>>())),
// const_cast<void *>(static_cast<const void *>(
// est_mean->template data<BatchNormParamType<T>>())),
// const_cast<void *>(static_cast<const void *>(
// est_var->template data<BatchNormParamType<T>>())),
// epsilon));
#else
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnBatchNormalizationForwardInference(
handle,
// Note: PERSISTENT not implemented for inference
CUDNN_BATCHNORM_SPATIAL, CudnnDataType<T>::kOne(),
CudnnDataType<T>::kZero(), data_desc_,
transformed_x.template data<T>(), data_desc_,
transformed_y.template mutable_data<T>(ctx.GetPlace()),
bn_param_desc_, scale->template data<BatchNormParamType<T>>(),
bias->template data<BatchNormParamType<T>>(),
est_mean->template data<BatchNormParamType<T>>(),
est_var->template data<BatchNormParamType<T>>(), epsilon));
#endif
} else {
// if MomentumTensor is set, use MomentumTensor value, momentum
// is only used in this training branch
if (ctx.HasInput("MomentumTensor")) {
const auto *mom_tensor = ctx.Input<Tensor>("MomentumTensor");
Tensor mom_cpu;
TensorCopySync(*mom_tensor, platform::CPUPlace(), &mom_cpu);
momentum = mom_cpu.data<float>()[0];
}
// Run training mode.
// obtain running mean and running inv var, and see if we need to
// initialize them.
auto *mean_out = ctx.Output<Tensor>("MeanOut");
auto *variance_out = ctx.Output<Tensor>("VarianceOut");
mean_out->mutable_data<BatchNormParamType<T>>(ctx.GetPlace());
variance_out->mutable_data<BatchNormParamType<T>>(ctx.GetPlace());
auto *saved_mean = ctx.Output<Tensor>("SavedMean");
auto *saved_variance = ctx.Output<Tensor>("SavedVariance");
saved_mean->mutable_data<BatchNormParamType<T>>(ctx.GetPlace());
saved_variance->mutable_data<BatchNormParamType<T>>(ctx.GetPlace());
math::SetConstant<platform::CUDADeviceContext, BatchNormParamType<T>>
functor;
functor(dev_ctx, saved_mean, static_cast<BatchNormParamType<T>>(0));
functor(dev_ctx, saved_variance, static_cast<BatchNormParamType<T>>(0));
if ((N * H * W * D) == 1) {
// Only 1 element in normalization dimension,
// skip the batch norm calculation, let y = x.
framework::TensorCopy(*x, ctx.GetPlace(), y);
} else {
double this_factor = 1. - momentum;
bool called = false;
#if CUDNN_VERSION_MIN(7, 4, 1)
called = true;
size_t workspace_size = 0;
size_t reserve_space_size = 0;
void *reserve_space_ptr = nullptr;
void *workspace_ptr = nullptr;
Tensor workspace_tensor;
// Create reserve space and workspace for batch norm.
// Create tensor for each batchnorm op, it will be used in the
// backward. Thus this tensor shouldn't be temp.
auto *reserve_space = ctx.Output<Tensor>("ReserveSpace");
PADDLE_ENFORCE_NOT_NULL(
reserve_space,
platform::errors::NotFound(
"The argument ReserveSpace of batch_norm op is not found."));
// --------------- cudnn batchnorm workspace ---------------
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::
cudnnGetBatchNormalizationForwardTrainingExWorkspaceSize(
/*handle=*/handle,
/*mode=*/mode_,
/*bnIps=*/CUDNN_BATCHNORM_OPS_BN,
/*xDesc=*/data_desc_,
/*zDesc=*/nullptr,
/*yDesc=*/data_desc_,
/*bnScaleBiasMeanVarDesc=*/bn_param_desc_,
/*activationDesc=*/nullptr,
/*sizeInBytes=*/&workspace_size));
// -------------- cudnn batchnorm reserve space --------------
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::
cudnnGetBatchNormalizationTrainingExReserveSpaceSize(
/*handle=*/handle,
/*mode=*/mode_,
/*bnOps=*/CUDNN_BATCHNORM_OPS_BN,
/*activationDesc=*/nullptr,
/*xDesc=*/data_desc_,
/*sizeInBytes=*/&reserve_space_size));
reserve_space_ptr = reserve_space->mutable_data(
ctx.GetPlace(), transformed_x.type(), reserve_space_size);
workspace_ptr = workspace_tensor.mutable_data(
ctx.GetPlace(), transformed_x.type(), workspace_size);
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnBatchNormalizationForwardTrainingEx(
handle, mode_, CUDNN_BATCHNORM_OPS_BN, CudnnDataType<T>::kOne(),
CudnnDataType<T>::kZero(), data_desc_,
transformed_x.template data<T>(), nullptr, nullptr, data_desc_,
transformed_y.template data<T>(), bn_param_desc_,
scale->template data<BatchNormParamType<T>>(),
bias->template data<BatchNormParamType<T>>(), this_factor,
mean_out->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
variance_out->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
epsilon,
saved_mean->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
saved_variance->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
nullptr, workspace_ptr, workspace_size, reserve_space_ptr,
reserve_space_size));
#endif // CUDNN_VERSION_MIN(7, 4, 1)
if (!called) {
#ifdef PADDLE_WITH_HIP
const int num = transformed_x.numel();
const int block = 256;
const int max_threads = dev_ctx.GetMaxPhysicalThreadCount();
const int max_blocks = std::max(max_threads / block, 1);
const int grid = std::min(C, max_blocks);
if (compute_format == DataLayout::kNCHW) {
BNForwardTraining<
T, block,
DataLayout::kNCHW><<<grid, block, 0, dev_ctx.stream()>>>(
transformed_x.template data<T>(),
scale->template data<BatchNormParamType<T>>(),
bias->template data<BatchNormParamType<T>>(), C, N, H * W * D,
epsilon, this_factor, transformed_y.template data<T>(),
mean_out->template data<BatchNormParamType<T>>(),
variance_out->template data<BatchNormParamType<T>>(),
saved_mean->template data<BatchNormParamType<T>>(),
saved_variance->template data<BatchNormParamType<T>>());
} else {
BNForwardTraining<
T, block,
DataLayout::kNHWC><<<grid, block, 0, dev_ctx.stream()>>>(
transformed_x.template data<T>(),
scale->template data<BatchNormParamType<T>>(),
bias->template data<BatchNormParamType<T>>(), C, N, H * W * D,
epsilon, this_factor, transformed_y.template data<T>(),
mean_out->template data<BatchNormParamType<T>>(),
variance_out->template data<BatchNormParamType<T>>(),
saved_mean->template data<BatchNormParamType<T>>(),
saved_variance->template data<BatchNormParamType<T>>());
}
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// PADDLE_ENFORCE_CUDA_SUCCESS(
// platform::dynload::miopenBatchNormalizationForwardTraining(
// handle, mode_, const_cast<void *>(static_cast<const void *>(
// CudnnDataType<T>::kOne())),
// const_cast<void *>(
// static_cast<const void *>(CudnnDataType<T>::kZero())),
// data_desc_,
// static_cast<const void *>(transformed_x.template data<T>()),
// data_desc_,
// static_cast<void *>(
// transformed_y.template mutable_data<T>(ctx.GetPlace())),
// bn_param_desc_,
// const_cast<void *>(static_cast<const void *>(
// scale->template data<BatchNormParamType<T>>())),
// const_cast<void *>(static_cast<const void *>(
// bias->template data<BatchNormParamType<T>>())),
// this_factor,
// static_cast<void *>(
// mean_out->template mutable_data<BatchNormParamType<T>>(
// ctx.GetPlace())),
// static_cast<void *>(variance_out->template mutable_data<
// BatchNormParamType<T>>(ctx.GetPlace())),
// epsilon,
// static_cast<void *>(
// saved_mean->template mutable_data<BatchNormParamType<T>>(
// ctx.GetPlace())),
// static_cast<void *>(saved_variance->template mutable_data<
// BatchNormParamType<T>>(ctx.GetPlace()))));
#else
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnBatchNormalizationForwardTraining(
handle, mode_, CudnnDataType<T>::kOne(),
CudnnDataType<T>::kZero(), data_desc_,
transformed_x.template data<T>(), data_desc_,
transformed_y.template mutable_data<T>(ctx.GetPlace()),
bn_param_desc_, scale->template data<BatchNormParamType<T>>(),
bias->template data<BatchNormParamType<T>>(), this_factor,
mean_out->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
variance_out->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
epsilon,
saved_mean->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
saved_variance->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace())));
#endif
}
}
}
if (data_layout == DataLayout::kNHWC &&
compute_format == DataLayout::kNCHW && x_dims.size() > 2) {
VLOG(3) << "Transform batchnorm output from NCHW to NHWC";
TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_y, y);
}
#ifdef PADDLE_WITH_HIP
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// clean when exit.
// PADDLE_ENFORCE_CUDA_SUCCESS(
// platform::dynload::miopenDestroyTensorDescriptor(data_desc_));
// PADDLE_ENFORCE_CUDA_SUCCESS(
// platform::dynload::miopenDestroyTensorDescriptor(bn_param_desc_));
#else
// clean when exit.
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnDestroyTensorDescriptor(data_desc_));
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnDestroyTensorDescriptor(bn_param_desc_));
#endif
}
};
template <typename T, int BlockDim, framework::DataLayout layout>
static __global__ LAUNCH_BOUNDS(BlockDim) void KeBNBackwardScaleBias(
const T *dy, const T *x, const BatchNormParamType<T> *mean,
const BatchNormParamType<T> *variance, const double epsilon, const int N,
const int C, const int HxW, BatchNormParamType<T> *dscale,
BatchNormParamType<T> *dbias) {
const int outer_size = C;
const int inner_size = N * HxW;
typedef cub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce;
__shared__ typename BlockReduce::TempStorage ds_storage;
__shared__ typename BlockReduce::TempStorage db_storage;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
BatchNormParamType<T> ds_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> db_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> inv_var_i = 1.0 / sqrt(variance[i] + epsilon);
BatchNormParamType<T> mean_i = mean[i];
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == framework::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
ds_sum += static_cast<BatchNormParamType<T>>(dy[index]) *
(static_cast<BatchNormParamType<T>>(x[index]) - mean_i);
db_sum += static_cast<BatchNormParamType<T>>(dy[index]);
}
ds_sum = BlockReduce(ds_storage).Reduce(ds_sum, cub::Sum());
db_sum = BlockReduce(db_storage).Reduce(db_sum, cub::Sum());
if (threadIdx.x == 0) {
dscale[i] = ds_sum * inv_var_i;
dbias[i] = db_sum;
}
__syncthreads();
}
}
template <typename T, framework::DataLayout layout>
static __global__ void KeBNBackwardData(const T *dy,
const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *variance,
const double epsilon, const int C,
const int HxW, const int num, T *dx) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = gid; i < num; i += stride) {
const int c = layout == framework::DataLayout::kNCHW ? i / HxW % C : i % C;
BatchNormParamType<T> inv_var = 1.0 / sqrt(variance[c] + epsilon);
dx[i] = static_cast<T>(static_cast<BatchNormParamType<T>>(dy[i]) *
scale[c] * inv_var);
}
}
template <typename T>
static __global__ void KeBNRestoreData(const framework::DataLayout layout, T *x,
const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *bias,
const BatchNormParamType<T> *mean,
const BatchNormParamType<T> *variance,
double epsilon, int C, int M,
const int num, const T *y) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = gid; i < num; i += stride) {
const int c = layout == framework::DataLayout::kNCHW ? (i / M) % C : i % C;
auto y_i = static_cast<BatchNormParamType<T>>(y[i]);
auto x_i = (y_i - bias[c]) / scale[c] / variance[c] + mean[c];
x[i] = static_cast<T>(x_i);
}
}
template <typename T>
class InplaceHelper {
public:
void operator()(const framework::DataLayout layout, T *x,
const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *bias,
const BatchNormParamType<T> *mean,
const BatchNormParamType<T> *variance, double epsilon, int C,
int M, const int num, const T *y, int grid2, const int block,
const gpuStream_t &stream) {
PADDLE_ENFORCE_EQ(x, y, platform::errors::InvalidArgument(
"X and Y should be inplaced in inplace mode"));
KeBNRestoreData<<<grid2, block, 0, stream>>>(
layout, x, scale, bias, mean, variance, epsilon, C, M, num, y);
}
};
template <typename T, int BlockDim, framework::DataLayout layout>
static __global__ LAUNCH_BOUNDS(BlockDim) void BNBackward(
const T *dy, const T *x, const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *saved_mean,
const BatchNormParamType<T> *saved_inv_variance, const int C, const int N,
const int HxW, const double epsilon, T *dx, BatchNormParamType<T> *dscale,
BatchNormParamType<T> *dbias) {
const int outer_size = C;
const int inner_size = N * HxW;
typedef cub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce;
__shared__ typename BlockReduce::TempStorage ds_storage;
__shared__ typename BlockReduce::TempStorage db_storage;
__shared__ typename BlockReduce::TempStorage mean_storage;
__shared__ typename BlockReduce::TempStorage variance_storeage;
__shared__ BatchNormParamType<T> inv_var_val;
__shared__ BatchNormParamType<T> mean_val;
__shared__ BatchNormParamType<T> dscale_val;
__shared__ BatchNormParamType<T> dbias_val;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
BatchNormParamType<T> ds_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> db_sum = static_cast<BatchNormParamType<T>>(0);
if (saved_mean && saved_inv_variance) {
if (threadIdx.x == 0) {
inv_var_val = saved_inv_variance[i];
mean_val = saved_mean[i];
}
} else {
BatchNormParamType<T> x_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> x_square_sum =
static_cast<BatchNormParamType<T>>(0);
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == framework::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
BatchNormParamType<T> x_i =
static_cast<BatchNormParamType<T>>(x[index]);
x_sum += x_i;
x_square_sum += x_i * x_i;
}
x_sum = BlockReduce(mean_storage).Reduce(x_sum, cub::Sum());
x_square_sum =
BlockReduce(variance_storeage).Reduce(x_square_sum, cub::Sum());
if (threadIdx.x == 0) {
mean_val = x_sum / inner_size;
inv_var_val =
1 / sqrt(x_square_sum / inner_size - mean_val * mean_val + epsilon);
}
}
__syncthreads();
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == framework::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
BatchNormParamType<T> dy_i =
static_cast<BatchNormParamType<T>>(dy[index]);
ds_sum +=
dy_i * (static_cast<BatchNormParamType<T>>(x[index]) - mean_val);
db_sum += dy_i;
}
ds_sum = BlockReduce(ds_storage).Reduce(ds_sum, cub::Sum());
db_sum = BlockReduce(db_storage).Reduce(db_sum, cub::Sum());
if (threadIdx.x == 0) {
dscale_val = ds_sum * inv_var_val;
dbias_val = db_sum;
dscale[i] = dscale_val;
dbias[i] = dbias_val;
}
__syncthreads();
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == framework::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
dx[index] = scale[i] * inv_var_val *
(static_cast<BatchNormParamType<T>>(dy[index]) -
dbias_val / static_cast<BatchNormParamType<T>>(inner_size) -
(static_cast<BatchNormParamType<T>>(x[index]) - mean_val) *
inv_var_val * dscale_val / inner_size);
}
}
}
template <typename T, int BlockDim, framework::DataLayout layout>
static __global__ LAUNCH_BOUNDS(BlockDim) void BNBackwardData(
const T *dy, const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *mean, const T *x,
const BatchNormParamType<T> *variance, const int C, const int N,
const int HxW, T *dx) {
const int outer_size = C;
const int inner_size = N * HxW;
typedef cub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce;
__shared__ typename BlockReduce::TempStorage dy_storage;
__shared__ typename BlockReduce::TempStorage dy_x_sub_mean_storage;
__shared__ BatchNormParamType<T> dy_sum_val;
__shared__ BatchNormParamType<T> dy_x_sub_mean_sum_val;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
BatchNormParamType<T> inv_var_i = variance[i];
BatchNormParamType<T> mean_i = mean[i];
BatchNormParamType<T> dy_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> dy_x_sub_mean_sum =
static_cast<BatchNormParamType<T>>(0);
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == framework::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
BatchNormParamType<T> dy_i =
static_cast<BatchNormParamType<T>>(dy[index]);
dy_sum += dy_i;
dy_x_sub_mean_sum +=
dy_i * (static_cast<BatchNormParamType<T>>(x[index]) - mean_i);
}
dy_sum = BlockReduce(dy_storage).Reduce(dy_sum, cub::Sum());
dy_x_sub_mean_sum = BlockReduce(dy_x_sub_mean_storage)
.Reduce(dy_x_sub_mean_sum, cub::Sum());
if (threadIdx.x == 0) {
dy_sum_val = dy_sum;
dy_x_sub_mean_sum_val = dy_x_sub_mean_sum;
}
__syncthreads();
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == framework::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
dx[index] =
(static_cast<BatchNormParamType<T>>(dy[index]) -
dy_sum_val / static_cast<BatchNormParamType<T>>(inner_size) -
(static_cast<BatchNormParamType<T>>(x[index]) - mean_i) *
dy_x_sub_mean_sum_val * inv_var_i * inv_var_i / inner_size) *
scale[i] * inv_var_i;
}
}
}
template <typename T>
class BatchNormGradKernel<platform::CUDADeviceContext, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
PADDLE_ENFORCE_EQ(
platform::is_gpu_place(ctx.GetPlace()), true,
platform::errors::InvalidArgument("It must use CUDAPlace."));
double epsilon = static_cast<double>(ctx.Attr<float>("epsilon"));
const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
bool use_global_stats = ctx.Attr<bool>("use_global_stats");
const DataLayout data_layout =
framework::StringToDataLayout(data_layout_str);
const auto *d_y = ctx.Input<Tensor>(framework::GradVarName("Y"));
const auto *scale = ctx.Input<Tensor>("Scale");
const auto *bias = ctx.Input<Tensor>("Bias");
auto *d_x = ctx.Output<Tensor>(framework::GradVarName("X"));
auto *d_scale = ctx.Output<Tensor>(framework::GradVarName("Scale"));
auto *d_bias = ctx.Output<Tensor>(framework::GradVarName("Bias"));
// batch_norm with inplace as false will take X as grad input, which
// is same as cuDNN batch_norm backward calculation, batch_norm
// with inplace as true only take Y as input and X should be calculate
// by inverse operation of batch_norm on Y
const Tensor *x;
bool is_inplace;
if (ctx.HasInput("Y")) {
x = ctx.Input<Tensor>("Y");
is_inplace = true;
PADDLE_ENFORCE_EQ(d_x, d_y,
platform::errors::InvalidArgument(
"X@GRAD and Y@GRAD not inplace in inplace mode"));
} else {
x = ctx.Input<Tensor>("X");
is_inplace = false;
PADDLE_ENFORCE_NE(d_x, d_y,
platform::errors::InvalidArgument(
"X@GRAD and Y@GRAD inplaced in non-inplace mode"));
}
const bool is_test = ctx.Attr<bool>("is_test");
use_global_stats = is_test || use_global_stats;
const auto &x_dims = x->dims();
PADDLE_ENFORCE_EQ(
x_dims.size() >= 2 && x_dims.size() <= 5, true,
platform::errors::InvalidArgument(
"The size of input's dimensions should be between 2 and 5."
"But received: the size of input's dimensions is [%d],"
"the dimensions of input is [%s]",
x_dims.size(), x_dims));
int N, C, H, W, D;
ExtractNCWHD(x_dims, data_layout, &N, &C, &H, &W, &D);
// init output
d_x->mutable_data<T>(ctx.GetPlace());
if (d_scale && d_bias) {
d_scale->mutable_data<BatchNormParamType<T>>(ctx.GetPlace());
d_bias->mutable_data<BatchNormParamType<T>>(ctx.GetPlace());
}
PADDLE_ENFORCE_EQ(
scale->dims().size(), 1UL,
platform::errors::InvalidArgument(
"The size of scale's dimensions must equal to 1. But received: "
"the size of scale's dimensions is [%d], the dimensions of scale "
"is [%s].",
scale->dims().size(), scale->dims()));
PADDLE_ENFORCE_EQ(
scale->dims()[0], C,
platform::errors::InvalidArgument(
"The first dimension of scale must equal to Channels[%d]. But "
"received: the first dimension of scale is [%d]",
C, scale->dims()[0]));
auto dtype = platform::CudnnDataType<T>::type;
const auto *reserve_space = ctx.Input<Tensor>("ReserveSpace");
#ifdef PADDLE_WITH_HIP
auto compute_format = data_layout == DataLayout::kNHWC ? DataLayout::kNHWC
: DataLayout::kNCHW;
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// HIP do not support compute format of NHWC
// auto compute_format = DataLayout::kNCHW;
#else
const bool fast_nhwc_batch_norm =
dtype == CUDNN_DATA_HALF && FLAGS_cudnn_batchnorm_spatial_persistent &&
reserve_space != nullptr;
auto compute_format =
fast_nhwc_batch_norm && data_layout == DataLayout::kNHWC
? DataLayout::kNHWC
: DataLayout::kNCHW;
#endif
Tensor transformed_x(x->type());
Tensor transformed_d_y(d_y->type());
Tensor transformed_d_x(d_x->type());
if (data_layout == DataLayout::kNHWC &&
compute_format == DataLayout::kNCHW) {
VLOG(3) << "Transform input tensor from NHWC to NCHW.";
ResizeToChannelFirst<platform::CUDADeviceContext, T>(ctx, x,
&transformed_x);
TransToChannelFirst<platform::CUDADeviceContext, T>(ctx, x,
&transformed_x);
ResizeToChannelFirst<platform::CUDADeviceContext, T>(ctx, d_y,
&transformed_d_y);
TransToChannelFirst<platform::CUDADeviceContext, T>(ctx, d_y,
&transformed_d_y);
ResizeToChannelFirst<platform::CUDADeviceContext, T>(ctx, d_x,
&transformed_d_x);
} else {
transformed_x.ShareDataWith(*x);
transformed_d_y.ShareDataWith(*d_y);
transformed_d_x.ShareDataWith(*d_x);
}
std::vector<int> dims;
std::vector<int> strides;
if (compute_format == DataLayout::kNCHW) {
dims = {N, C, H, W, D};
strides = {C * H * W * D, H * W * D, W * D, D, 1};
} else {
dims = {N, C, H, W, D};
strides = {H * W * C * D, 1, W * D * C, D * C, C};
}
auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
const int num = transformed_x.numel();
#ifdef HIPCC
const int block = 256;
#else
const int block = 512;
#endif
int max_threads = dev_ctx.GetMaxPhysicalThreadCount();
const int max_blocks = std::max(max_threads / block, 1);
int grid1 = (num + block - 1) / block;
int grid2 = std::min(C, max_blocks);
auto stream = dev_ctx.stream();
InplaceHelper<T> inplace_functor;
if (!use_global_stats) {
if ((N * H * W * D) == 1) {
framework::TensorCopy(*d_y, ctx.GetPlace(), d_x);
math::SetConstant<platform::CUDADeviceContext, BatchNormParamType<T>>
functor;
functor(dev_ctx, d_scale, static_cast<BatchNormParamType<T>>(0));
functor(dev_ctx, d_bias, static_cast<BatchNormParamType<T>>(0));
return;
}
// ------------------- cudnn descriptors ---------------------
#ifdef PADDLE_WITH_HIP
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// miopenTensorDescriptor_t data_desc_;
// miopenTensorDescriptor_t bn_param_desc_;
// miopenBatchNormMode_t mode_;
// PADDLE_ENFORCE_CUDA_SUCCESS(
// platform::dynload::miopenCreateTensorDescriptor(&data_desc_));
// PADDLE_ENFORCE_CUDA_SUCCESS(
// platform::dynload::miopenCreateTensorDescriptor(&bn_param_desc_));
#else
cudnnTensorDescriptor_t data_desc_;
cudnnTensorDescriptor_t bn_param_desc_;
cudnnBatchNormMode_t mode_;
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnCreateTensorDescriptor(&data_desc_));
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnCreateTensorDescriptor(&bn_param_desc_));
#endif
if (epsilon <= CUDNN_BN_MIN_EPSILON - FLT_EPSILON) {
LOG(ERROR) << "Provided epsilon is smaller than "
<< "CUDNN_BN_MIN_EPSILON. Setting it to "
<< "CUDNN_BN_MIN_EPSILON instead.";
}
epsilon = std::max(epsilon, CUDNN_BN_MIN_EPSILON);
#ifdef PADDLE_WITH_HIP
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// mode_ = miopenBNSpatial;
#elif CUDNN_VERSION_MIN(7, 0, 1)
if (FLAGS_cudnn_batchnorm_spatial_persistent) {
mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT;
} else {
mode_ = CUDNN_BATCHNORM_SPATIAL;
}
#else
mode_ = CUDNN_BATCHNORM_SPATIAL;
#endif // CUDNN_VERSION_MIN(7, 0, 1)
#ifdef PADDLE_WITH_HIP
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::miopenSetTensorDescriptor(
// data_desc_, CudnnDataType<T>::type,
// x_dims.size() > 3 ? x_dims.size() : 4, const_cast<int *>(dims.data()),
// const_cast<int *>(strides.data())));
// PADDLE_ENFORCE_CUDA_SUCCESS(
// platform::dynload::miopenDeriveBNTensorDescriptor(bn_param_desc_,
// data_desc_, mode_));
#else
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor(
data_desc_, CudnnDataType<T>::type,
x_dims.size() > 3 ? x_dims.size() : 4, dims.data(), strides.data()));
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnDeriveBNTensorDescriptor(bn_param_desc_,
data_desc_, mode_));
#endif
const auto *saved_mean = ctx.Input<Tensor>("SavedMean");
const auto *saved_var = ctx.Input<Tensor>("SavedVariance");
const auto *saved_mean_data =
saved_mean->template data<BatchNormParamType<T>>();
const auto *saved_var_data =
saved_var->template data<BatchNormParamType<T>>();
if (is_inplace) {
inplace_functor(compute_format, transformed_x.data<T>(),
scale->template data<BatchNormParamType<T>>(),
bias->template data<BatchNormParamType<T>>(),
saved_mean_data, saved_var_data, epsilon, C, H * W * D,
num, transformed_x.data<T>(), grid2, block, stream);
}
// This branch calls CUDNN APIs
if (d_scale && d_bias) {
bool called = false;
#if CUDNN_VERSION_MIN(7, 4, 1)
called = true;
size_t workspace_size = 0;
void *workspace_ptr = nullptr;
Tensor workspace_tensor;
auto reserve_space_size = reserve_space->memory_size();
// --------------- cudnn batchnorm workspace ---------------
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::
cudnnGetBatchNormalizationBackwardExWorkspaceSize(
/*handle=*/dev_ctx.cudnn_handle(),
/*mode=*/mode_,
/*bnIps=*/CUDNN_BATCHNORM_OPS_BN,
/*xDesc=*/data_desc_,
/*yDesc=*/data_desc_,
/*dyDesc=*/data_desc_,
/*dzDesc=*/nullptr,
/*dxDesc=*/data_desc_,
/*bnScaleBiasMeanVarDesc=*/bn_param_desc_,
/*activationDesc=*/nullptr,
/*sizeInBytes=*/&workspace_size));
workspace_ptr = workspace_tensor.mutable_data(
ctx.GetPlace(), transformed_x.type(), workspace_size);
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnBatchNormalizationBackwardEx(
/*handle=*/dev_ctx.cudnn_handle(),
/*mode=*/mode_,
/*bnOps=*/CUDNN_BATCHNORM_OPS_BN,
/*alphaDataDiff=*/CudnnDataType<T>::kOne(),
/*betaDataDiff=*/CudnnDataType<T>::kZero(),
/*alphaParamDiff=*/CudnnDataType<T>::kOne(),
/*betaParamDiff=*/CudnnDataType<T>::kZero(),
/*xDesc=*/data_desc_,
/*xData=*/transformed_x.template data<T>(),
/*yDesc=*/nullptr,
/*yData=*/nullptr,
/*dyDesc=*/data_desc_,
/*dyData=*/transformed_d_y.template data<T>(),
/*dzDesc=*/nullptr,
/*dzData=*/nullptr,
/*dxDesc=*/data_desc_,
/*dxData=*/transformed_d_x.template mutable_data<T>(
ctx.GetPlace()),
/*dBnScaleBiasDesc=*/bn_param_desc_,
/*bnScaleData=*/scale->template data<BatchNormParamType<T>>(),
/*bnBiasData=*/nullptr,
/*dBnScaleData=*/d_scale
->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
/*dBnBiasData=*/d_bias
->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
/*epsilon=*/epsilon,
/*savedMean=*/saved_mean_data,
/*savedInvVariance=*/saved_var_data,
/*activationDesc=*/nullptr,
/*workspace=*/workspace_ptr,
/*workSpaceSizeInBytes=*/workspace_size,
/*reserveSpace=*/const_cast<T *>(
reserve_space->template data<T>()),
/*reserveSpaceSizeInBytes=*/reserve_space_size));
#endif // CUDNN_VERSION_MIN(7, 4, 1)
if (!called) {
#ifdef PADDLE_WITH_HIP
if (compute_format == DataLayout::kNCHW) {
BNBackward<
T, block,
DataLayout::kNCHW><<<grid2, block, 0, dev_ctx.stream()>>>(
transformed_d_y.template data<T>(),
transformed_x.template data<T>(),
scale->template data<BatchNormParamType<T>>(), saved_mean_data,
saved_var_data, C, N, H * W * D, epsilon,
transformed_d_x.template data<T>(),
d_scale->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
d_bias->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()));
} else {
BNBackward<
T, block,
DataLayout::kNHWC><<<grid2, block, 0, dev_ctx.stream()>>>(
transformed_d_y.template data<T>(),
transformed_x.template data<T>(),
scale->template data<BatchNormParamType<T>>(), saved_mean_data,
saved_var_data, C, N, H * W * D, epsilon,
transformed_d_x.template data<T>(),
d_scale->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
d_bias->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()));
}
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// PADDLE_ENFORCE_CUDA_SUCCESS(
// platform::dynload::miopenBatchNormalizationBackward(
// dev_ctx.cudnn_handle(), mode_, CudnnDataType<T>::kOne(),
// CudnnDataType<T>::kZero(), CudnnDataType<T>::kOne(),
// CudnnDataType<T>::kZero(), data_desc_,
// transformed_x.template data<T>(), data_desc_,
// transformed_d_y.template data<T>(), data_desc_,
// transformed_d_x.template mutable_data<T>(ctx.GetPlace()),
// bn_param_desc_, scale->template data<BatchNormParamType<T>>(),
// d_scale->template mutable_data<BatchNormParamType<T>>(
// ctx.GetPlace()),
// d_bias->template mutable_data<BatchNormParamType<T>>(
// ctx.GetPlace()),
// epsilon, saved_mean_data, saved_var_data));
#else
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnBatchNormalizationBackward(
dev_ctx.cudnn_handle(), mode_, CudnnDataType<T>::kOne(),
CudnnDataType<T>::kZero(), CudnnDataType<T>::kOne(),
CudnnDataType<T>::kZero(), data_desc_,
transformed_x.template data<T>(), data_desc_,
transformed_d_y.template data<T>(), data_desc_,
transformed_d_x.template mutable_data<T>(ctx.GetPlace()),
bn_param_desc_, scale->template data<BatchNormParamType<T>>(),
d_scale->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
d_bias->template mutable_data<BatchNormParamType<T>>(
ctx.GetPlace()),
epsilon, saved_mean_data, saved_var_data));
#endif
}
if (data_layout == DataLayout::kNHWC &&
compute_format == DataLayout::kNCHW) {
VLOG(3) << "Transform batchnorm output from NCHW to NHWC";
TransToChannelLast<paddle::platform::CUDADeviceContext, T>(
ctx, &transformed_d_x, d_x);
}
} else {
// This branch call CUDA kernels
if (compute_format == DataLayout::kNCHW) {
if (d_x) {
BNBackwardData<T, block, framework::DataLayout::kNCHW><<<
grid2, block, 0, dev_ctx.stream()>>>(
d_y->data<T>(), scale->data<BatchNormParamType<T>>(),
saved_mean_data, x->data<T>(), saved_var_data, C, N, H * W * D,
d_x->data<T>());
}
} else {
if (d_x) {
BNBackwardData<T, block, framework::DataLayout::kNHWC><<<
grid2, block, 0, dev_ctx.stream()>>>(
d_y->data<T>(), scale->data<BatchNormParamType<T>>(),
saved_mean_data, x->data<T>(), saved_var_data, C, N, H * W * D,
d_x->data<T>());
}
}
}
#ifdef PADDLE_WITH_HIP
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// clean when exit.
// PADDLE_ENFORCE_CUDA_SUCCESS(
// platform::dynload::miopenDestroyTensorDescriptor(data_desc_));
// PADDLE_ENFORCE_CUDA_SUCCESS(
// platform::dynload::miopenDestroyTensorDescriptor(bn_param_desc_));
#else
// clean when exit.
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnDestroyTensorDescriptor(data_desc_));
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnDestroyTensorDescriptor(bn_param_desc_));
#endif
} else {
const auto *running_mean = ctx.Input<Tensor>("Mean");
const auto *running_var = ctx.Input<Tensor>("Variance");
const auto *running_mean_data =
running_mean->template data<BatchNormParamType<T>>();
const auto *running_var_data =
running_var->template data<BatchNormParamType<T>>();
if (is_inplace) {
auto px = *x;
inplace_functor(data_layout, px.mutable_data<T>(ctx.GetPlace()),
scale->template data<BatchNormParamType<T>>(),
bias->template data<BatchNormParamType<T>>(),
running_mean_data, running_var_data, epsilon, C,
H * W * D, num, x->data<T>(), grid2, block, stream);
}
if (compute_format == DataLayout::kNCHW) {
if (d_x) {
KeBNBackwardData<
T, framework::DataLayout::kNCHW><<<grid1, block, 0, stream>>>(
d_y->data<T>(), scale->data<BatchNormParamType<T>>(),
running_var_data, epsilon, C, H * W, num, d_x->data<T>());
}
if (d_scale && d_bias) {
KeBNBackwardScaleBias<
T, block,
framework::DataLayout::kNCHW><<<grid2, block, 0, stream>>>(
d_y->data<T>(), x->data<T>(), running_mean_data, running_var_data,
epsilon, N, C, H * W * D, d_scale->data<BatchNormParamType<T>>(),
d_bias->data<BatchNormParamType<T>>());
}
} else {
if (d_x) {
KeBNBackwardData<
T, framework::DataLayout::kNHWC><<<grid1, block, 0, stream>>>(
d_y->data<T>(), scale->data<BatchNormParamType<T>>(),
running_var_data, epsilon, C, H * W, num, d_x->data<T>());
}
if (d_scale && d_bias) {
KeBNBackwardScaleBias<
T, block,
framework::DataLayout::kNHWC><<<grid2, block, 0, stream>>>(
d_y->data<T>(), x->data<T>(), running_mean_data, running_var_data,
epsilon, N, C, H * W * D, d_scale->data<BatchNormParamType<T>>(),
d_bias->data<BatchNormParamType<T>>());
}
}
}
}
};
template <typename T>
class BatchNormDoubleGradKernel<platform::CUDADeviceContext, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
const auto *X = ctx.Input<Tensor>("X");
const auto *Scale = ctx.Input<Tensor>("Scale");
const auto *dY = ctx.Input<Tensor>("DY");
const auto *Saved_mean = ctx.Input<Tensor>("SavedMean");
const auto *Saved_variance = ctx.Input<Tensor>("SavedVariance");
const double epsilon = static_cast<double>(ctx.Attr<float>("epsilon"));
const bool use_global_stats = ctx.Attr<bool>("use_global_stats");
const bool is_test = ctx.Attr<bool>("is_test");
PADDLE_ENFORCE_EQ(
is_test, false,
platform::errors::InvalidArgument(
"`is_test = True` CANNOT be used in train program. If "
"you want to use global status in pre_train model, "
"please set `use_global_stats = True`"));
const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
const DataLayout data_layout =
framework::StringToDataLayout(data_layout_str);
const auto *ddX = ctx.Input<Tensor>("DDX");
const auto *ddScale = ctx.Input<Tensor>("DDScale");
const auto *ddBias = ctx.Input<Tensor>("DDBias");
auto *dX = ctx.Output<Tensor>("DX");
auto *dScale = ctx.Output<Tensor>("DScale");
auto *ddY = ctx.Output<Tensor>("DDY");
NormDoubleGradFunctor<platform::CUDADeviceContext, T>(
ctx, data_layout, X, Scale, dY, Saved_mean, Saved_variance, epsilon,
use_global_stats, ddX, ddScale, ddBias, dX, dScale, ddY);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
#ifdef PADDLE_WITH_HIP
// MIOPEN do not support double
REGISTER_OP_CUDA_KERNEL(
batch_norm, ops::BatchNormKernel<plat::CUDADeviceContext, float>,
ops::BatchNormKernel<plat::CUDADeviceContext, plat::float16>);
REGISTER_OP_CUDA_KERNEL(
batch_norm_grad, ops::BatchNormGradKernel<plat::CUDADeviceContext, float>,
ops::BatchNormGradKernel<plat::CUDADeviceContext, plat::float16>);
REGISTER_OP_CUDA_KERNEL(
batch_norm_grad_grad,
ops::BatchNormDoubleGradKernel<plat::CUDADeviceContext, float>);
#else
REGISTER_OP_CUDA_KERNEL(
batch_norm, ops::BatchNormKernel<plat::CUDADeviceContext, float>,
ops::BatchNormKernel<plat::CUDADeviceContext, double>,
ops::BatchNormKernel<plat::CUDADeviceContext, plat::float16>);
REGISTER_OP_CUDA_KERNEL(
batch_norm_grad, ops::BatchNormGradKernel<plat::CUDADeviceContext, float>,
ops::BatchNormGradKernel<plat::CUDADeviceContext, double>,
ops::BatchNormGradKernel<plat::CUDADeviceContext, plat::float16>);
REGISTER_OP_CUDA_KERNEL(
batch_norm_grad_grad,
ops::BatchNormDoubleGradKernel<plat::CUDADeviceContext, float>,
ops::BatchNormDoubleGradKernel<plat::CUDADeviceContext, double>);
#endif
|
4e69499497c5206ad25325a4433cd0ab9550d821.hip | // !!! This is a file automatically generated by hipify!!!
// Summatory of two matrices
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <ctime>
#include <cstdlib>
#include <cmath>
void init_mat(int size, float*& A) {
A = new float[size * size];
srand(time(NULL));
for (int i = 0; i < size; i++) {
for (int j = 0; j < size; j++) {
A[i*size + j] = rand() % 1000;
}
}
}
/*
B) Write a kernel that has each thread to produce one output matrix
element. Fill in the execution configuration parameters for this design.
*/
__global__ void element_per_thread(float* c_output, float* in_1, float* in_2, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < n && j < n) {
c_output[i * n + j] = in_1[i * n + j] + in_2[i * n + j];
}
}
/*
C) Write a kernel that has each thread to produce one output matrix row.
Fill in the execution configuration parameters for the design.
*/
__global__ void thread_per_row(float* c_output, float* in_1, float* in_2, int n) {
int row = blockIdx.x * blockDim.x + threadIdx.x;
if (row < n) {
for (int j = 0; j < n; j++) {
c_output[n * row + j] = in_1[n * row + j] + in_2[n * row + j];
}
}
}
/*
D) Write a kernel that has each thread to produce one output matrix column.
Fill in the execution configuration parameters for the design.
*/
__global__ void thread_per_col(float* c_output, float* in_1, float* in_2, int n) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col < n) {
for (int i = 0; i < n; i++) {
c_output[i * n + col] = in_1[i * n + col] + in_2[i * n + col];
}
}
}
/*
A) Write the host stub function by allocating memory for the input and
output matrices, transferring input data to device; launch the kernel,
transferring the output data to host and freeing the device memory for
the input and output data.Leave the execution configuration parameters
open for this step.
*/
void host_func(float* output, float* in_1, float* in_2, int n, int kernel_func=0) {
float* c_output, * c_in1, * c_in2;
int n2 = n * n;
hipMalloc((void**)&c_output, n2 * sizeof(float));
hipMalloc((void**)&c_in1, n2 * sizeof(float));
hipMalloc((void**)&c_in2, n2 * sizeof(float));
hipMemcpy(c_in1, in_1, n2 * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(c_in2, in_2, n2 * sizeof(float), hipMemcpyHostToDevice);
/*
kernel call
*/
if (kernel_func == 0) {
// thread per element
dim3 dimGrid(ceil(n / 32.0), ceil(n / 32.0), 1);
dim3 dimBlock(32, 32, 1);
element_per_thread << < dimGrid, dimBlock >> > (c_output, c_in1, c_in2, n);
}
else if (kernel_func == 1) {
// thread per row
dim3 dimGrid(ceil(n / 1024.0), 1, 1);
dim3 dimBlock(1024, 1, 1);
thread_per_row << < dimGrid, dimBlock >> > (c_output, c_in1, c_in2, n);
}
else if (kernel_func == 2) {
// thread per col
dim3 dimGrid(ceil(n / 1024.0), 1, 1);
dim3 dimBlock(1024, 1, 1);
thread_per_col << < dimGrid, dimBlock >> > (c_output, c_in1, c_in2, n);
}
hipMemcpy(output, c_output, n2 * sizeof(float), hipMemcpyDeviceToHost);
hipFree(c_output);
hipFree(c_in1);
hipFree(c_in2);
}
int main()
{
int sizes[] = { 1000, 2500, 5000, 10000 };
for (int j = 0; j < 4; j++) {
for (int i = 0; i < 3; i++) {
std::cout << "Size: " << sizes[j] << std::endl;
float* A, * B;
init_mat(sizes[j], A);
init_mat(sizes[j], B);
float* output = new float[sizes[j] * sizes[j]];
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
host_func(output, A, B, sizes[j], i);
hipEventRecord(stop);
hipEventSynchronize(stop);
float elapsed_time = 0;
hipEventElapsedTime(&elapsed_time, start, stop);
std::cout << "Operation " << i << ", time: " << elapsed_time << std::endl;
delete[] A;
delete[] B;
delete[] output;
}
}
return 0;
} | 4e69499497c5206ad25325a4433cd0ab9550d821.cu | // Summatory of two matrices
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <ctime>
#include <cstdlib>
#include <cmath>
void init_mat(int size, float*& A) {
A = new float[size * size];
srand(time(NULL));
for (int i = 0; i < size; i++) {
for (int j = 0; j < size; j++) {
A[i*size + j] = rand() % 1000;
}
}
}
/*
B) Write a kernel that has each thread to produce one output matrix
element. Fill in the execution configuration parameters for this design.
*/
__global__ void element_per_thread(float* c_output, float* in_1, float* in_2, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < n && j < n) {
c_output[i * n + j] = in_1[i * n + j] + in_2[i * n + j];
}
}
/*
C) Write a kernel that has each thread to produce one output matrix row.
Fill in the execution configuration parameters for the design.
*/
__global__ void thread_per_row(float* c_output, float* in_1, float* in_2, int n) {
int row = blockIdx.x * blockDim.x + threadIdx.x;
if (row < n) {
for (int j = 0; j < n; j++) {
c_output[n * row + j] = in_1[n * row + j] + in_2[n * row + j];
}
}
}
/*
D) Write a kernel that has each thread to produce one output matrix column.
Fill in the execution configuration parameters for the design.
*/
__global__ void thread_per_col(float* c_output, float* in_1, float* in_2, int n) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col < n) {
for (int i = 0; i < n; i++) {
c_output[i * n + col] = in_1[i * n + col] + in_2[i * n + col];
}
}
}
/*
A) Write the host stub function by allocating memory for the input and
output matrices, transferring input data to device; launch the kernel,
transferring the output data to host and freeing the device memory for
the input and output data.Leave the execution configuration parameters
open for this step.
*/
void host_func(float* output, float* in_1, float* in_2, int n, int kernel_func=0) {
float* c_output, * c_in1, * c_in2;
int n2 = n * n;
cudaMalloc((void**)&c_output, n2 * sizeof(float));
cudaMalloc((void**)&c_in1, n2 * sizeof(float));
cudaMalloc((void**)&c_in2, n2 * sizeof(float));
cudaMemcpy(c_in1, in_1, n2 * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(c_in2, in_2, n2 * sizeof(float), cudaMemcpyHostToDevice);
/*
kernel call
*/
if (kernel_func == 0) {
// thread per element
dim3 dimGrid(ceil(n / 32.0), ceil(n / 32.0), 1);
dim3 dimBlock(32, 32, 1);
element_per_thread << < dimGrid, dimBlock >> > (c_output, c_in1, c_in2, n);
}
else if (kernel_func == 1) {
// thread per row
dim3 dimGrid(ceil(n / 1024.0), 1, 1);
dim3 dimBlock(1024, 1, 1);
thread_per_row << < dimGrid, dimBlock >> > (c_output, c_in1, c_in2, n);
}
else if (kernel_func == 2) {
// thread per col
dim3 dimGrid(ceil(n / 1024.0), 1, 1);
dim3 dimBlock(1024, 1, 1);
thread_per_col << < dimGrid, dimBlock >> > (c_output, c_in1, c_in2, n);
}
cudaMemcpy(output, c_output, n2 * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(c_output);
cudaFree(c_in1);
cudaFree(c_in2);
}
int main()
{
int sizes[] = { 1000, 2500, 5000, 10000 };
for (int j = 0; j < 4; j++) {
for (int i = 0; i < 3; i++) {
std::cout << "Size: " << sizes[j] << std::endl;
float* A, * B;
init_mat(sizes[j], A);
init_mat(sizes[j], B);
float* output = new float[sizes[j] * sizes[j]];
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
host_func(output, A, B, sizes[j], i);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float elapsed_time = 0;
cudaEventElapsedTime(&elapsed_time, start, stop);
std::cout << "Operation " << i << ", time: " << elapsed_time << std::endl;
delete[] A;
delete[] B;
delete[] output;
}
}
return 0;
} |
e272fd523ae08e63c00ada3a2e4479c58b4b0b5f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//pass
//--gridDim=[10,40] --blockDim=[32,6]
texture<float, 2, hipReadModeElementType> texSource;
texture<float, 2, hipReadModeElementType> texTarget;
__global__ void ComputeDerivativesKernel(int width, int height, int stride,
float *Ix, float *Iy, float *Iz)
{
__requires(width == 320);
__requires(height == 240);
__requires(stride == 320);
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
float dx = 1.0f / (float)width;
float dy = 1.0f / (float)height;
float x = ((float)ix + 0.5f) * dx;
float y = ((float)iy + 0.5f) * dy;
float t0, t1;
// x derivative
t0 = tex2D(texSource, x - 2.0f * dx, y);
t0 -= tex2D(texSource, x - 1.0f * dx, y) * 8.0f;
t0 += tex2D(texSource, x + 1.0f * dx, y) * 8.0f;
t0 -= tex2D(texSource, x + 2.0f * dx, y);
t0 /= 12.0f;
t1 = tex2D(texTarget, x - 2.0f * dx, y);
t1 -= tex2D(texTarget, x - 1.0f * dx, y) * 8.0f;
t1 += tex2D(texTarget, x + 1.0f * dx, y) * 8.0f;
t1 -= tex2D(texTarget, x + 2.0f * dx, y);
t1 /= 12.0f;
Ix[pos] = (t0 + t1) * 0.5f;
// t derivative
Iz[pos] = tex2D(texTarget, x, y) - tex2D(texSource, x, y);
// y derivative
t0 = tex2D(texSource, x, y - 2.0f * dy);
t0 -= tex2D(texSource, x, y - 1.0f * dy) * 8.0f;
t0 += tex2D(texSource, x, y + 1.0f * dy) * 8.0f;
t0 -= tex2D(texSource, x, y + 2.0f * dy);
t0 /= 12.0f;
t1 = tex2D(texTarget, x, y - 2.0f * dy);
t1 -= tex2D(texTarget, x, y - 1.0f * dy) * 8.0f;
t1 += tex2D(texTarget, x, y + 1.0f * dy) * 8.0f;
t1 -= tex2D(texTarget, x, y + 2.0f * dy);
t1 /= 12.0f;
Iy[pos] = (t0 + t1) * 0.5f;
}
| e272fd523ae08e63c00ada3a2e4479c58b4b0b5f.cu | //pass
//--gridDim=[10,40] --blockDim=[32,6]
texture<float, 2, cudaReadModeElementType> texSource;
texture<float, 2, cudaReadModeElementType> texTarget;
__global__ void ComputeDerivativesKernel(int width, int height, int stride,
float *Ix, float *Iy, float *Iz)
{
__requires(width == 320);
__requires(height == 240);
__requires(stride == 320);
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
float dx = 1.0f / (float)width;
float dy = 1.0f / (float)height;
float x = ((float)ix + 0.5f) * dx;
float y = ((float)iy + 0.5f) * dy;
float t0, t1;
// x derivative
t0 = tex2D(texSource, x - 2.0f * dx, y);
t0 -= tex2D(texSource, x - 1.0f * dx, y) * 8.0f;
t0 += tex2D(texSource, x + 1.0f * dx, y) * 8.0f;
t0 -= tex2D(texSource, x + 2.0f * dx, y);
t0 /= 12.0f;
t1 = tex2D(texTarget, x - 2.0f * dx, y);
t1 -= tex2D(texTarget, x - 1.0f * dx, y) * 8.0f;
t1 += tex2D(texTarget, x + 1.0f * dx, y) * 8.0f;
t1 -= tex2D(texTarget, x + 2.0f * dx, y);
t1 /= 12.0f;
Ix[pos] = (t0 + t1) * 0.5f;
// t derivative
Iz[pos] = tex2D(texTarget, x, y) - tex2D(texSource, x, y);
// y derivative
t0 = tex2D(texSource, x, y - 2.0f * dy);
t0 -= tex2D(texSource, x, y - 1.0f * dy) * 8.0f;
t0 += tex2D(texSource, x, y + 1.0f * dy) * 8.0f;
t0 -= tex2D(texSource, x, y + 2.0f * dy);
t0 /= 12.0f;
t1 = tex2D(texTarget, x, y - 2.0f * dy);
t1 -= tex2D(texTarget, x, y - 1.0f * dy) * 8.0f;
t1 += tex2D(texTarget, x, y + 1.0f * dy) * 8.0f;
t1 -= tex2D(texTarget, x, y + 2.0f * dy);
t1 /= 12.0f;
Iy[pos] = (t0 + t1) * 0.5f;
}
|
1c2a486dff280cc85fc0da750ebb74de0e9fbc18.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//taken and modified from https://github.com/sp2823/caffe
//see also: https://github.com/BVLC/caffe/pull/5665/commits
#include <vector>
#include "caffe/layers/conv_dw_layer.hpp"
#include "caffe/util/gpu_util.cuh"
namespace caffe {
template<typename Ftype, typename Btype>
__global__ void ConvolutionDepthwiseWeightForward(const int nthreads,
const Btype* const bottom_data, const Ftype* const weight_data,
const int num, const int channels, const int top_height,
const int top_width, const int bottom_height, const int bottom_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
const int dilation_h, const int dilation_w, Ftype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / channels / top_height / top_width;
const int c = (index / top_height / top_width) % channels;
const int h = (index / top_width) % top_height;
const int w = index % top_width;
const Ftype* weight = weight_data + c * kernel_h * kernel_w;
Ftype value = 0;
for (int kh = 0; kh < kernel_h; ++kh) {
for (int kw = 0; kw < kernel_w; ++kw) {
const int h_in = -pad_h + h * stride_h + kh * dilation_h;
const int w_in = -pad_w + w * stride_w + kw * dilation_w;
if ((h_in >= 0) && (h_in < bottom_height)
&& (w_in >= 0) && (w_in < bottom_width)) {
const int offset = ((n * channels + c) * bottom_height + h_in)
* bottom_width + w_in;
value += (*weight) * bottom_data[offset];
}
++weight;
}
}
top_data[index] = value;
}
}
template<typename Ftype, typename Btype>
__global__ void ConvolutionDepthwiseBiasForward(const int nthreads,
const Ftype* const bias_data, const int num, const int channels,
const int top_height, const int top_width, Ftype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int c = (index / top_height / top_width) % channels;
top_data[index] += bias_data[c];
}
}
template<typename Ftype, typename Btype>
void ConvolutionDepthwiseLayer<Ftype,Btype>::Forward_gpu(
const vector<Blob*>& bottom, const vector<Blob*>& top) {
this->Quantize_gpu(bottom, top);
const Btype* bottom_data = bottom[0]->gpu_data<Btype>();
Ftype* top_data = top[0]->mutable_gpu_data<Ftype>();
const Ftype* weight_data = this->blobs_[0]->template gpu_data<Ftype>();
const int count = top[0]->count();
const int num = top[0]->num();
const int channels = top[0]->channels();
const int top_height = top[0]->height();
const int top_width = top[0]->width();
const int bottom_height = bottom[0]->height();
const int bottom_width = bottom[0]->width();
ConvolutionDepthwiseWeightForward<Ftype,Btype>
// NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, weight_data, num, channels,
top_height, top_width, bottom_height, bottom_width,
kernel_h_, kernel_w_, stride_h_, stride_w_,
pad_h_, pad_w_, dilation_h_, dilation_w_, top_data);
if (this->layer_param_.convolution_param().bias_term()) {
const Ftype* bias_data = this->blobs_[1]->template gpu_data<Ftype>();
ConvolutionDepthwiseBiasForward<Ftype,Btype>
// NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bias_data, num, channels,
top_height, top_width, top_data);
}
this->Quantize_gpu(bottom, top);
}
template<typename Ftype, typename Btype>
__global__ void ConvolutionDepthwiseWeightBackward(const int nthreads,
const Ftype* const top_diff, const Btype* const bottom_data,
const int num, const int channels, const int top_height,
const int top_width, const int bottom_height, const int bottom_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
const int dilation_h, const int dilation_w, Btype* const buffer_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int h = (index / top_width) % top_height;
const int w = index % top_width;
const int kh = (index / kernel_w / num / top_height / top_width)
% kernel_h;
const int kw = (index / num / top_height / top_width) % kernel_w;
const int h_in = -pad_h + h * stride_h + kh * dilation_h;
const int w_in = -pad_w + w * stride_w + kw * dilation_w;
if ((h_in >= 0) && (h_in < bottom_height)
&& (w_in >= 0) && (w_in < bottom_width)) {
const int c = index / kernel_h / kernel_w / num / top_height / top_width;
const int n = (index / top_height / top_width) % num;
const int top_offset = ((n * channels + c) * top_height + h)
* top_width + w;
const int bottom_offset = ((n * channels + c) * bottom_height + h_in)
* bottom_width + w_in;
buffer_data[index] = top_diff[top_offset] * bottom_data[bottom_offset];
} else {
buffer_data[index] = 0;
}
}
}
template<typename Ftype, typename Btype>
__global__ void ConvolutionDepthwiseBottomBackward(const int nthreads,
const Ftype* const top_diff, const Btype* const weight_data,
const int num, const int channels, const int top_height,
const int top_width, const int bottom_height, const int bottom_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
const int dilation_h, const int dilation_w, Btype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / channels / bottom_height / bottom_width;
const int c = (index / bottom_height / bottom_width) % channels;
const int h = (index / bottom_width) % bottom_height;
const int w = index % bottom_width;
const Btype* weight = weight_data + c * kernel_h * kernel_w;
Btype value = 0;
for (int kh = 0; kh < kernel_h; ++kh) {
for (int kw = 0; kw < kernel_w; ++kw) {
const int h_out_s = h + pad_h - kh * dilation_h;
const int w_out_s = w + pad_w - kw * dilation_w;
if (((h_out_s % stride_h) == 0) && ((w_out_s % stride_w) == 0)) {
const int h_out = h_out_s / stride_h;
const int w_out = w_out_s / stride_w;
if ((h_out >= 0) && (h_out < top_height)
&& (w_out >= 0) && (w_out < top_width)) {
const int offset = ((n * channels + c) * top_height + h_out)
* top_width + w_out;
value += (*weight) * top_diff[offset];
}
}
++weight;
}
}
bottom_diff[index] += value;
}
}
template<typename Ftype, typename Btype>
__global__ void ConvolutionDepthwiseBiasBackward(const int nthreads,
const Ftype* const top_diff, const int num, const int channels,
const int top_height, const int top_width, Btype* const buffer_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int c = index / num / top_height / top_width;
const int n = (index / top_height / top_width) % num;
const int h = (index / top_width) % top_height;
const int w = index % top_width;
const int offset = ((n * channels + c) * top_height + h) * top_width + w;
buffer_data[index] = top_diff[offset];
}
}
template<typename Ftype, typename Btype>
void ConvolutionDepthwiseLayer<Ftype,Btype>::Backward_gpu(
const vector<Blob*>& top, const vector<bool>& propagate_down,
const vector<Blob*>& bottom) {
const Ftype* top_diff = top[0]->gpu_diff<Ftype>();
const int bottom_count = bottom[0]->count();
const int num = top[0]->num();
const int channels = top[0]->channels();
const int top_height = top[0]->height();
const int top_width = top[0]->width();
const int bottom_height = bottom[0]->height();
const int bottom_width = bottom[0]->width();
const int length = num * top_height * top_width;
caffe_gpu_set(bottom_count, Btype(0), bottom[0]->mutable_gpu_diff<Btype>());
if (this->layer_param_.convolution_param().bias_term()
&& this->param_propagate_down_[1]) {
const int bias_buffer_count = bias_buffer_.count();
Btype* bias_buffer_mutable_data = bias_buffer_.mutable_gpu_data();
ConvolutionDepthwiseBiasBackward<Ftype,Btype>
// NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(bias_buffer_count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
bias_buffer_count, top_diff, num, channels,
top_height, top_width, bias_buffer_mutable_data);
const int bias_count = this->blobs_[1]->count();
const Btype* bias_buffer_data = bias_buffer_.gpu_data();
Btype* bias_diff = this->blobs_[1]->template mutable_gpu_diff<Btype>();
const Btype* bias_multiplier_data = bias_multiplier_.gpu_data();
caffe_gpu_gemv(CblasNoTrans, bias_count, length, Btype(1),
bias_buffer_data, bias_multiplier_data, Btype(1), bias_diff);
}
if (this->param_propagate_down_[0]) {
const int weight_buffer_count = weight_buffer_.count();
const Btype* bottom_data = bottom[0]->gpu_data<Btype>();
Btype* weight_buffer_mutable_data = weight_buffer_.mutable_gpu_data();
ConvolutionDepthwiseWeightBackward<Ftype,Btype>
// NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(weight_buffer_count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
weight_buffer_count, top_diff, bottom_data, num, channels,
top_height, top_width, bottom_height, bottom_width,
kernel_h_, kernel_w_, stride_h_, stride_w_,
pad_h_, pad_w_, dilation_h_, dilation_w_, weight_buffer_mutable_data);
const int weight_count = this->blobs_[0]->count();
const Btype* weight_buffer_data = weight_buffer_.gpu_data();
Btype* weight_diff = this->blobs_[0]->template mutable_gpu_diff<Btype>();
const Btype* weight_multiplier_data = weight_multiplier_.gpu_data();
caffe_gpu_gemv(CblasNoTrans, weight_count, length, Btype(1),
weight_buffer_data, weight_multiplier_data, Btype(1), weight_diff);
}
if (propagate_down[0]) {
const Btype* weight_data = this->blobs_[0]->template gpu_data<Btype>();
Btype* bottom_diff = bottom[0]->mutable_gpu_diff<Btype>();
ConvolutionDepthwiseBottomBackward<Ftype,Btype>
// NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(bottom_count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
bottom_count, top_diff, weight_data, num, channels,
top_height, top_width, bottom_height, bottom_width,
kernel_h_, kernel_w_, stride_h_, stride_w_,
pad_h_, pad_w_, dilation_h_, dilation_w_, bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS_FB(ConvolutionDepthwiseLayer);
} // namespace caffe
| 1c2a486dff280cc85fc0da750ebb74de0e9fbc18.cu | //taken and modified from https://github.com/sp2823/caffe
//see also: https://github.com/BVLC/caffe/pull/5665/commits
#include <vector>
#include "caffe/layers/conv_dw_layer.hpp"
#include "caffe/util/gpu_util.cuh"
namespace caffe {
template<typename Ftype, typename Btype>
__global__ void ConvolutionDepthwiseWeightForward(const int nthreads,
const Btype* const bottom_data, const Ftype* const weight_data,
const int num, const int channels, const int top_height,
const int top_width, const int bottom_height, const int bottom_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
const int dilation_h, const int dilation_w, Ftype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / channels / top_height / top_width;
const int c = (index / top_height / top_width) % channels;
const int h = (index / top_width) % top_height;
const int w = index % top_width;
const Ftype* weight = weight_data + c * kernel_h * kernel_w;
Ftype value = 0;
for (int kh = 0; kh < kernel_h; ++kh) {
for (int kw = 0; kw < kernel_w; ++kw) {
const int h_in = -pad_h + h * stride_h + kh * dilation_h;
const int w_in = -pad_w + w * stride_w + kw * dilation_w;
if ((h_in >= 0) && (h_in < bottom_height)
&& (w_in >= 0) && (w_in < bottom_width)) {
const int offset = ((n * channels + c) * bottom_height + h_in)
* bottom_width + w_in;
value += (*weight) * bottom_data[offset];
}
++weight;
}
}
top_data[index] = value;
}
}
template<typename Ftype, typename Btype>
__global__ void ConvolutionDepthwiseBiasForward(const int nthreads,
const Ftype* const bias_data, const int num, const int channels,
const int top_height, const int top_width, Ftype* const top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int c = (index / top_height / top_width) % channels;
top_data[index] += bias_data[c];
}
}
template<typename Ftype, typename Btype>
void ConvolutionDepthwiseLayer<Ftype,Btype>::Forward_gpu(
const vector<Blob*>& bottom, const vector<Blob*>& top) {
this->Quantize_gpu(bottom, top);
const Btype* bottom_data = bottom[0]->gpu_data<Btype>();
Ftype* top_data = top[0]->mutable_gpu_data<Ftype>();
const Ftype* weight_data = this->blobs_[0]->template gpu_data<Ftype>();
const int count = top[0]->count();
const int num = top[0]->num();
const int channels = top[0]->channels();
const int top_height = top[0]->height();
const int top_width = top[0]->width();
const int bottom_height = bottom[0]->height();
const int bottom_width = bottom[0]->width();
ConvolutionDepthwiseWeightForward<Ftype,Btype>
// NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, weight_data, num, channels,
top_height, top_width, bottom_height, bottom_width,
kernel_h_, kernel_w_, stride_h_, stride_w_,
pad_h_, pad_w_, dilation_h_, dilation_w_, top_data);
if (this->layer_param_.convolution_param().bias_term()) {
const Ftype* bias_data = this->blobs_[1]->template gpu_data<Ftype>();
ConvolutionDepthwiseBiasForward<Ftype,Btype>
// NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bias_data, num, channels,
top_height, top_width, top_data);
}
this->Quantize_gpu(bottom, top);
}
template<typename Ftype, typename Btype>
__global__ void ConvolutionDepthwiseWeightBackward(const int nthreads,
const Ftype* const top_diff, const Btype* const bottom_data,
const int num, const int channels, const int top_height,
const int top_width, const int bottom_height, const int bottom_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
const int dilation_h, const int dilation_w, Btype* const buffer_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int h = (index / top_width) % top_height;
const int w = index % top_width;
const int kh = (index / kernel_w / num / top_height / top_width)
% kernel_h;
const int kw = (index / num / top_height / top_width) % kernel_w;
const int h_in = -pad_h + h * stride_h + kh * dilation_h;
const int w_in = -pad_w + w * stride_w + kw * dilation_w;
if ((h_in >= 0) && (h_in < bottom_height)
&& (w_in >= 0) && (w_in < bottom_width)) {
const int c = index / kernel_h / kernel_w / num / top_height / top_width;
const int n = (index / top_height / top_width) % num;
const int top_offset = ((n * channels + c) * top_height + h)
* top_width + w;
const int bottom_offset = ((n * channels + c) * bottom_height + h_in)
* bottom_width + w_in;
buffer_data[index] = top_diff[top_offset] * bottom_data[bottom_offset];
} else {
buffer_data[index] = 0;
}
}
}
template<typename Ftype, typename Btype>
__global__ void ConvolutionDepthwiseBottomBackward(const int nthreads,
const Ftype* const top_diff, const Btype* const weight_data,
const int num, const int channels, const int top_height,
const int top_width, const int bottom_height, const int bottom_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
const int dilation_h, const int dilation_w, Btype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / channels / bottom_height / bottom_width;
const int c = (index / bottom_height / bottom_width) % channels;
const int h = (index / bottom_width) % bottom_height;
const int w = index % bottom_width;
const Btype* weight = weight_data + c * kernel_h * kernel_w;
Btype value = 0;
for (int kh = 0; kh < kernel_h; ++kh) {
for (int kw = 0; kw < kernel_w; ++kw) {
const int h_out_s = h + pad_h - kh * dilation_h;
const int w_out_s = w + pad_w - kw * dilation_w;
if (((h_out_s % stride_h) == 0) && ((w_out_s % stride_w) == 0)) {
const int h_out = h_out_s / stride_h;
const int w_out = w_out_s / stride_w;
if ((h_out >= 0) && (h_out < top_height)
&& (w_out >= 0) && (w_out < top_width)) {
const int offset = ((n * channels + c) * top_height + h_out)
* top_width + w_out;
value += (*weight) * top_diff[offset];
}
}
++weight;
}
}
bottom_diff[index] += value;
}
}
template<typename Ftype, typename Btype>
__global__ void ConvolutionDepthwiseBiasBackward(const int nthreads,
const Ftype* const top_diff, const int num, const int channels,
const int top_height, const int top_width, Btype* const buffer_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int c = index / num / top_height / top_width;
const int n = (index / top_height / top_width) % num;
const int h = (index / top_width) % top_height;
const int w = index % top_width;
const int offset = ((n * channels + c) * top_height + h) * top_width + w;
buffer_data[index] = top_diff[offset];
}
}
template<typename Ftype, typename Btype>
void ConvolutionDepthwiseLayer<Ftype,Btype>::Backward_gpu(
const vector<Blob*>& top, const vector<bool>& propagate_down,
const vector<Blob*>& bottom) {
const Ftype* top_diff = top[0]->gpu_diff<Ftype>();
const int bottom_count = bottom[0]->count();
const int num = top[0]->num();
const int channels = top[0]->channels();
const int top_height = top[0]->height();
const int top_width = top[0]->width();
const int bottom_height = bottom[0]->height();
const int bottom_width = bottom[0]->width();
const int length = num * top_height * top_width;
caffe_gpu_set(bottom_count, Btype(0), bottom[0]->mutable_gpu_diff<Btype>());
if (this->layer_param_.convolution_param().bias_term()
&& this->param_propagate_down_[1]) {
const int bias_buffer_count = bias_buffer_.count();
Btype* bias_buffer_mutable_data = bias_buffer_.mutable_gpu_data();
ConvolutionDepthwiseBiasBackward<Ftype,Btype>
// NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(bias_buffer_count), CAFFE_CUDA_NUM_THREADS>>>(
bias_buffer_count, top_diff, num, channels,
top_height, top_width, bias_buffer_mutable_data);
const int bias_count = this->blobs_[1]->count();
const Btype* bias_buffer_data = bias_buffer_.gpu_data();
Btype* bias_diff = this->blobs_[1]->template mutable_gpu_diff<Btype>();
const Btype* bias_multiplier_data = bias_multiplier_.gpu_data();
caffe_gpu_gemv(CblasNoTrans, bias_count, length, Btype(1),
bias_buffer_data, bias_multiplier_data, Btype(1), bias_diff);
}
if (this->param_propagate_down_[0]) {
const int weight_buffer_count = weight_buffer_.count();
const Btype* bottom_data = bottom[0]->gpu_data<Btype>();
Btype* weight_buffer_mutable_data = weight_buffer_.mutable_gpu_data();
ConvolutionDepthwiseWeightBackward<Ftype,Btype>
// NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(weight_buffer_count), CAFFE_CUDA_NUM_THREADS>>>(
weight_buffer_count, top_diff, bottom_data, num, channels,
top_height, top_width, bottom_height, bottom_width,
kernel_h_, kernel_w_, stride_h_, stride_w_,
pad_h_, pad_w_, dilation_h_, dilation_w_, weight_buffer_mutable_data);
const int weight_count = this->blobs_[0]->count();
const Btype* weight_buffer_data = weight_buffer_.gpu_data();
Btype* weight_diff = this->blobs_[0]->template mutable_gpu_diff<Btype>();
const Btype* weight_multiplier_data = weight_multiplier_.gpu_data();
caffe_gpu_gemv(CblasNoTrans, weight_count, length, Btype(1),
weight_buffer_data, weight_multiplier_data, Btype(1), weight_diff);
}
if (propagate_down[0]) {
const Btype* weight_data = this->blobs_[0]->template gpu_data<Btype>();
Btype* bottom_diff = bottom[0]->mutable_gpu_diff<Btype>();
ConvolutionDepthwiseBottomBackward<Ftype,Btype>
// NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(bottom_count), CAFFE_CUDA_NUM_THREADS>>>(
bottom_count, top_diff, weight_data, num, channels,
top_height, top_width, bottom_height, bottom_width,
kernel_h_, kernel_w_, stride_h_, stride_w_,
pad_h_, pad_w_, dilation_h_, dilation_w_, bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS_FB(ConvolutionDepthwiseLayer);
} // namespace caffe
|
7453407d0648afe0e0c13884158de07588c568f7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Compile
nvcc -m64 -arch=compute_52 -code=sm_52 -O3 poisson_3d_deltax.cu -lcufft -lcudart
*/
#include <complex>
#include <cstdio>
#include <hipfft.h>
#include <math.h>
#include <string.h>
using namespace std;
__global__ void getPhi(double *d_phi_k, int Nx, int Ny, int Nz_half, double dx){
// Change made inside d_phi_k
int N = Nx * Ny * Nz_half;
int index = blockDim.x * blockIdx.x + threadIdx.x;
int nx, ny;
double kx2, ky2, kz2;
while(index < N) {
int i = index / (Ny * Nz_half);
int j = (index / Nz_half) % Ny;
int k = index % Nz_half;
if(2 * i < Nx){
nx = i;
}
else{
nx = Nx - i;
}
if(2 * j < Ny){
ny = j;
}
else{
ny = Ny - j;
}
kx2 = pow(2.0 * M_PI * (double)nx / (Nx * dx), 2);
ky2 = pow(2.0 * M_PI * (double)ny / (Nx * dx), 2);
kz2 = pow(2.0 * M_PI * (double)k / (Nx * dx), 2);
if(index != 0){
// d_phi_k[2*index] = 4.0 * M_PI * d_phi_k[2*index] / (kx2 + ky2 + kz2);
// d_phi_k[2*index+1] = 4.0 * M_PI * d_phi_k[2*index+1] / (kx2 + ky2 + kz2);
d_phi_k[2*index] = d_phi_k[2*index] / (kx2 + ky2 + kz2);
d_phi_k[2*index+1] = d_phi_k[2*index+1] / (kx2 + ky2 + kz2);
}
index = index + blockDim.x * gridDim.x;
}
}
int main ()
{
// Set GPU Device
int gid;
printf("Enter the GPU ID (0/1): ");
scanf("%d",&gid);
printf("%d\n", gid);
hipSetDevice(gid);
int Nx, Ny, Nz, N;
printf("Enter the sample points of the cube in each side: ");
scanf("%d", &Nx);
printf("Each side sample points = %d\n", Nx);
Ny = Nx;
Nz = Nx;
N = pow(Nx, 3);
double dx;
double L;
printf("Enter the length of the cube ( > 1.0 ): ");
scanf("%lf", &L);
printf("Length = %lf\n", L);
dx = L / (double) Nx;
printf("dx = %lf\n", dx);
// Find phi(1.0) point index as the base point
int datum = (int)(1.0 / dx);
int io;
printf("Print the data (0/1) ? ");
scanf("%d",&io);
printf("%d\n", io);
/*
Initialize
*/
double *lo;
complex<double> *lo_k;
lo = (double*) malloc(sizeof(double) * N);
lo_k = (complex<double> *) malloc(sizeof(complex<double>) * Nx * Ny * (Nz/2+1));
memset(lo, 0.0, sizeof(double) * N);
// point charge at the origin
lo[0] = 1.0;
/*
Poisson Eq with FFT method
*/
// FFT lo -> lo_k
hipfftHandle plan;
hipfftDoubleReal *dataIn;
hipfftDoubleComplex *dataOut;
hipMalloc((void**)&dataIn, sizeof(hipfftDoubleReal) * N);
hipMalloc((void**)&dataOut, sizeof(hipfftDoubleComplex) * N);
hipMemcpy(dataIn, lo, sizeof(hipfftDoubleReal) * N, hipMemcpyHostToDevice);
if(hipfftPlan3d(&plan, Nx, Ny, Nz, HIPFFT_D2Z) != HIPFFT_SUCCESS){
printf("CUFFT error: hipfftPlan3d creation failed.\n");
exit(1);
}
if(hipfftExecD2Z(plan, dataIn, dataOut) != HIPFFT_SUCCESS){
printf("CUFFT error: hipfftExecD2Z forward failed.\n");
exit(1);
}
// Copy only the non redundant data
hipMemcpy(lo_k, dataOut, sizeof(hipfftDoubleComplex) * Nx * Ny * (Nz/2+1), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
hipfftDestroy(plan);
hipFree(dataIn);
hipFree(dataOut);
free(lo);
// Print the data of lo_k
// for(int i = 0; i < Nx * Ny * (Nz/2+1); i = i+1){
// printf("%.3lf + i * %.3lf\n", real(lo_k[i]), imag(lo_k[i]));
// }
// Calculate lo_k / k**2 = phi_k
complex<double> *phi_k;
double *d_phi_k;
phi_k = (complex<double> *)malloc(sizeof(complex<double>) * Nx * Ny * (Nz/2+1));
hipMalloc((void**)&d_phi_k, sizeof(double) * 2 * Nx * Ny * (Nz/2+1));
hipMemcpy(d_phi_k, lo_k, sizeof(double) * 2 * Nx * Ny * (Nz/2+ 1), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( getPhi) , dim3(64), dim3(64), 0, 0, d_phi_k, Nx, Ny, Nz/2+1, dx);
hipMemcpy(phi_k, d_phi_k, sizeof(double) * 2 * Nx * Ny * (Nz/2+1), hipMemcpyDeviceToHost);
hipFree(d_phi_k);
free(lo_k);
// IFFT phi_k -> phi
double *phi;
phi = (double*) malloc(sizeof(double) * N);
hipMalloc((void**)&dataIn, sizeof(hipfftDoubleReal) * N);
hipMalloc((void**)&dataOut, sizeof(hipfftDoubleComplex) * Nx * Ny * (Nz/2+1));
hipMemcpy(dataOut, phi_k, sizeof(hipfftDoubleComplex) * Nx * Ny * (Nz/2+1), hipMemcpyHostToDevice);
if(hipfftPlan3d(&plan, Nx, Ny, Nz, HIPFFT_Z2D) != HIPFFT_SUCCESS){
printf("CUFFT error: hipfftPlan3d creation failed.\n");
exit(1);
}
if(hipfftExecZ2D(plan, dataOut, dataIn) != HIPFFT_SUCCESS){
printf("CUFFT error: hipfftExecZ2D forward failed.\n");
exit(1);
}
hipMemcpy(phi, dataIn, sizeof(hipfftDoubleReal) * N, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
hipFree(dataIn);
hipFree(dataOut);
free(phi_k);
// Print out on screen
if(io == 1){
printf("phi-X r phi-D r\n");
for(int i = 0; i < Nx; i = i+1){
printf("%.5lf %.5lf ", (phi[i] - phi[datum]) / (double)N, (double)i * dx);
printf("%.5lf %.5lf\n", (phi[i*Ny*Nz + i*Ny + i] - phi[datum]) / (double)N, sqrt(3.0 * pow((double)i * dx,2)));
}
}
// Print phi(1)
printf("phi(1) = %lf\n", phi[datum]);
hipfftDestroy(plan);
hipDeviceReset();
return 0;
}
| 7453407d0648afe0e0c13884158de07588c568f7.cu | /*
Compile
nvcc -m64 -arch=compute_52 -code=sm_52 -O3 poisson_3d_deltax.cu -lcufft -lcudart
*/
#include <complex>
#include <cstdio>
#include <cufft.h>
#include <math.h>
#include <string.h>
using namespace std;
__global__ void getPhi(double *d_phi_k, int Nx, int Ny, int Nz_half, double dx){
// Change made inside d_phi_k
int N = Nx * Ny * Nz_half;
int index = blockDim.x * blockIdx.x + threadIdx.x;
int nx, ny;
double kx2, ky2, kz2;
while(index < N) {
int i = index / (Ny * Nz_half);
int j = (index / Nz_half) % Ny;
int k = index % Nz_half;
if(2 * i < Nx){
nx = i;
}
else{
nx = Nx - i;
}
if(2 * j < Ny){
ny = j;
}
else{
ny = Ny - j;
}
kx2 = pow(2.0 * M_PI * (double)nx / (Nx * dx), 2);
ky2 = pow(2.0 * M_PI * (double)ny / (Nx * dx), 2);
kz2 = pow(2.0 * M_PI * (double)k / (Nx * dx), 2);
if(index != 0){
// d_phi_k[2*index] = 4.0 * M_PI * d_phi_k[2*index] / (kx2 + ky2 + kz2);
// d_phi_k[2*index+1] = 4.0 * M_PI * d_phi_k[2*index+1] / (kx2 + ky2 + kz2);
d_phi_k[2*index] = d_phi_k[2*index] / (kx2 + ky2 + kz2);
d_phi_k[2*index+1] = d_phi_k[2*index+1] / (kx2 + ky2 + kz2);
}
index = index + blockDim.x * gridDim.x;
}
}
int main ()
{
// Set GPU Device
int gid;
printf("Enter the GPU ID (0/1): ");
scanf("%d",&gid);
printf("%d\n", gid);
cudaSetDevice(gid);
int Nx, Ny, Nz, N;
printf("Enter the sample points of the cube in each side: ");
scanf("%d", &Nx);
printf("Each side sample points = %d\n", Nx);
Ny = Nx;
Nz = Nx;
N = pow(Nx, 3);
double dx;
double L;
printf("Enter the length of the cube ( > 1.0 ): ");
scanf("%lf", &L);
printf("Length = %lf\n", L);
dx = L / (double) Nx;
printf("dx = %lf\n", dx);
// Find phi(1.0) point index as the base point
int datum = (int)(1.0 / dx);
int io;
printf("Print the data (0/1) ? ");
scanf("%d",&io);
printf("%d\n", io);
/*
Initialize
*/
double *lo;
complex<double> *lo_k;
lo = (double*) malloc(sizeof(double) * N);
lo_k = (complex<double> *) malloc(sizeof(complex<double>) * Nx * Ny * (Nz/2+1));
memset(lo, 0.0, sizeof(double) * N);
// point charge at the origin
lo[0] = 1.0;
/*
Poisson Eq with FFT method
*/
// FFT lo -> lo_k
cufftHandle plan;
cufftDoubleReal *dataIn;
cufftDoubleComplex *dataOut;
cudaMalloc((void**)&dataIn, sizeof(cufftDoubleReal) * N);
cudaMalloc((void**)&dataOut, sizeof(cufftDoubleComplex) * N);
cudaMemcpy(dataIn, lo, sizeof(cufftDoubleReal) * N, cudaMemcpyHostToDevice);
if(cufftPlan3d(&plan, Nx, Ny, Nz, CUFFT_D2Z) != CUFFT_SUCCESS){
printf("CUFFT error: cufftPlan3d creation failed.\n");
exit(1);
}
if(cufftExecD2Z(plan, dataIn, dataOut) != CUFFT_SUCCESS){
printf("CUFFT error: cufftExecD2Z forward failed.\n");
exit(1);
}
// Copy only the non redundant data
cudaMemcpy(lo_k, dataOut, sizeof(cufftDoubleComplex) * Nx * Ny * (Nz/2+1), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cufftDestroy(plan);
cudaFree(dataIn);
cudaFree(dataOut);
free(lo);
// Print the data of lo_k
// for(int i = 0; i < Nx * Ny * (Nz/2+1); i = i+1){
// printf("%.3lf + i * %.3lf\n", real(lo_k[i]), imag(lo_k[i]));
// }
// Calculate lo_k / k**2 = phi_k
complex<double> *phi_k;
double *d_phi_k;
phi_k = (complex<double> *)malloc(sizeof(complex<double>) * Nx * Ny * (Nz/2+1));
cudaMalloc((void**)&d_phi_k, sizeof(double) * 2 * Nx * Ny * (Nz/2+1));
cudaMemcpy(d_phi_k, lo_k, sizeof(double) * 2 * Nx * Ny * (Nz/2+ 1), cudaMemcpyHostToDevice);
getPhi <<<64, 64>>> (d_phi_k, Nx, Ny, Nz/2+1, dx);
cudaMemcpy(phi_k, d_phi_k, sizeof(double) * 2 * Nx * Ny * (Nz/2+1), cudaMemcpyDeviceToHost);
cudaFree(d_phi_k);
free(lo_k);
// IFFT phi_k -> phi
double *phi;
phi = (double*) malloc(sizeof(double) * N);
cudaMalloc((void**)&dataIn, sizeof(cufftDoubleReal) * N);
cudaMalloc((void**)&dataOut, sizeof(cufftDoubleComplex) * Nx * Ny * (Nz/2+1));
cudaMemcpy(dataOut, phi_k, sizeof(cufftDoubleComplex) * Nx * Ny * (Nz/2+1), cudaMemcpyHostToDevice);
if(cufftPlan3d(&plan, Nx, Ny, Nz, CUFFT_Z2D) != CUFFT_SUCCESS){
printf("CUFFT error: cufftPlan3d creation failed.\n");
exit(1);
}
if(cufftExecZ2D(plan, dataOut, dataIn) != CUFFT_SUCCESS){
printf("CUFFT error: cufftExecZ2D forward failed.\n");
exit(1);
}
cudaMemcpy(phi, dataIn, sizeof(cufftDoubleReal) * N, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cudaFree(dataIn);
cudaFree(dataOut);
free(phi_k);
// Print out on screen
if(io == 1){
printf("phi-X r phi-D r\n");
for(int i = 0; i < Nx; i = i+1){
printf("%.5lf %.5lf ", (phi[i] - phi[datum]) / (double)N, (double)i * dx);
printf("%.5lf %.5lf\n", (phi[i*Ny*Nz + i*Ny + i] - phi[datum]) / (double)N, sqrt(3.0 * pow((double)i * dx,2)));
}
}
// Print phi(1)
printf("phi(1) = %lf\n", phi[datum]);
cufftDestroy(plan);
cudaDeviceReset();
return 0;
}
|
2fd9fc59700e3ec51658611f2b6b866304f50f62.hip | // !!! This is a file automatically generated by hipify!!!
/**
* main.cpp: This file is part of the gpumembench micro-benchmark suite.
*
* Contact: Elias Konstantinidis <[email protected]>
**/
#include <stdio.h>
#include <stdlib.h>
#include <chrono>
#include <hip/hip_runtime.h>
#define VECTOR_SIZE 1024
// Initialize vector data
template <class T>
__device__ T init_vector(int v){
return 0;
}
template <>
__device__ int init_vector(int v){
return v;
}
template <>
__device__ int2 init_vector(int v){
return make_int2(v, v);
}
template <>
__device__ int4 init_vector(int v){
return make_int4(v, v, v, v);
}
// Sum up vector data
template <class T>
__device__ int reduce_vector(T v){
return 0;
}
template <>
__device__ int reduce_vector(int v){
return v;
}
template <>
__device__ int reduce_vector(int2 v){
return v.x + v.y;
}
template <>
__device__ int reduce_vector(int4 v){
return v.x + v.y + v.z + v.w;
}
template <class T>
__device__ void add_vector(T &target, const T &v){
}
__device__ void add_vector(int &target, const int &v) {
target += v;
}
__device__ void add_vector(int2 &target, const int2 &v) {
target.x += v.x;
target.y += v.y;
}
__device__ void add_vector(int4 &target, const int4 &v) {
target.x += v.x;
target.y += v.y;
target.z += v.z;
target.w += v.w;
}
__device__ __constant__ int constant_data[VECTOR_SIZE];
template <class T>
__global__
void benchmark_constant(int *output, int repeat)
{
T* constant_data_p = (T*)constant_data;
T sum = init_vector<T>(0);
for(int i=0; i<4; i++){
for(int j=0; j<VECTOR_SIZE/(sizeof(T)/sizeof(int)); j+=4){
add_vector(sum, constant_data_p[j+i]);
}
}
if( threadIdx.x==0 && blockIdx.x==0 ) {
*output = reduce_vector(sum);
}
}
template<typename T>
int test_case (int *a, long gridsize, int repeat) {
const int BLOCK_SIZE = 256;
const int TOTAL_BLOCKS = gridsize/(BLOCK_SIZE);
hipMemcpyToSymbol(constant_data, a, VECTOR_SIZE*sizeof(int));
int *cd, c;
hipMalloc((void**)&cd, sizeof(int));
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(TOTAL_BLOCKS);
// warm up
for (int i = 0; i < repeat; i++) {
hipLaunchKernelGGL(( benchmark_constant<T>), dim3(dimGrid), dim3(dimBlock) , 0, 0, cd, repeat);
}
hipMemset(cd, 0, sizeof(int));
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++) {
hipLaunchKernelGGL(( benchmark_constant<T>), dim3(dimGrid), dim3(dimBlock) , 0, 0, cd, repeat);
}
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
// verify
hipMemcpy(&c, cd, sizeof(int), hipMemcpyDeviceToHost);
printf("%s\t", (c == VECTOR_SIZE) ? "PASS" : "FAIL");
hipFree(cd);
printf("Average kernel execution time (memory access width = %zu bytes): %f ms\n",
sizeof(T), time * 1e-6f / repeat);
return 0;
}
void constbenchGPU(int *a, long gridsize, int repeat) {
test_case< int>(a, gridsize, repeat);
test_case<int2>(a, gridsize, repeat);
test_case<int4>(a, gridsize, repeat);
}
int main(int argc, char* argv[]) {
if (argc != 2) {
printf("Constant memory bandwidth microbenchmark\n");
printf("Usage: %s <repeat>\n", argv[0]);
return 1;
}
const int repeat = atoi(argv[1]);
unsigned int datasize = VECTOR_SIZE * sizeof(int);
int *v = (int*)malloc(datasize);
for(int i = 0; i < VECTOR_SIZE; i++) v[i] = 1;
constbenchGPU(v, 4096*VECTOR_SIZE, repeat);
free(v);
return 0;
}
| 2fd9fc59700e3ec51658611f2b6b866304f50f62.cu | /**
* main.cpp: This file is part of the gpumembench micro-benchmark suite.
*
* Contact: Elias Konstantinidis <[email protected]>
**/
#include <stdio.h>
#include <stdlib.h>
#include <chrono>
#include <cuda.h>
#define VECTOR_SIZE 1024
// Initialize vector data
template <class T>
__device__ T init_vector(int v){
return 0;
}
template <>
__device__ int init_vector(int v){
return v;
}
template <>
__device__ int2 init_vector(int v){
return make_int2(v, v);
}
template <>
__device__ int4 init_vector(int v){
return make_int4(v, v, v, v);
}
// Sum up vector data
template <class T>
__device__ int reduce_vector(T v){
return 0;
}
template <>
__device__ int reduce_vector(int v){
return v;
}
template <>
__device__ int reduce_vector(int2 v){
return v.x + v.y;
}
template <>
__device__ int reduce_vector(int4 v){
return v.x + v.y + v.z + v.w;
}
template <class T>
__device__ void add_vector(T &target, const T &v){
}
__device__ void add_vector(int &target, const int &v) {
target += v;
}
__device__ void add_vector(int2 &target, const int2 &v) {
target.x += v.x;
target.y += v.y;
}
__device__ void add_vector(int4 &target, const int4 &v) {
target.x += v.x;
target.y += v.y;
target.z += v.z;
target.w += v.w;
}
__device__ __constant__ int constant_data[VECTOR_SIZE];
template <class T>
__global__
void benchmark_constant(int *output, int repeat)
{
T* constant_data_p = (T*)constant_data;
T sum = init_vector<T>(0);
for(int i=0; i<4; i++){
for(int j=0; j<VECTOR_SIZE/(sizeof(T)/sizeof(int)); j+=4){
add_vector(sum, constant_data_p[j+i]);
}
}
if( threadIdx.x==0 && blockIdx.x==0 ) {
*output = reduce_vector(sum);
}
}
template<typename T>
int test_case (int *a, long gridsize, int repeat) {
const int BLOCK_SIZE = 256;
const int TOTAL_BLOCKS = gridsize/(BLOCK_SIZE);
cudaMemcpyToSymbol(constant_data, a, VECTOR_SIZE*sizeof(int));
int *cd, c;
cudaMalloc((void**)&cd, sizeof(int));
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(TOTAL_BLOCKS);
// warm up
for (int i = 0; i < repeat; i++) {
benchmark_constant<T><<< dimGrid, dimBlock >>>(cd, repeat);
}
cudaMemset(cd, 0, sizeof(int));
cudaDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++) {
benchmark_constant<T><<< dimGrid, dimBlock >>>(cd, repeat);
}
cudaDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
// verify
cudaMemcpy(&c, cd, sizeof(int), cudaMemcpyDeviceToHost);
printf("%s\t", (c == VECTOR_SIZE) ? "PASS" : "FAIL");
cudaFree(cd);
printf("Average kernel execution time (memory access width = %zu bytes): %f ms\n",
sizeof(T), time * 1e-6f / repeat);
return 0;
}
void constbenchGPU(int *a, long gridsize, int repeat) {
test_case< int>(a, gridsize, repeat);
test_case<int2>(a, gridsize, repeat);
test_case<int4>(a, gridsize, repeat);
}
int main(int argc, char* argv[]) {
if (argc != 2) {
printf("Constant memory bandwidth microbenchmark\n");
printf("Usage: %s <repeat>\n", argv[0]);
return 1;
}
const int repeat = atoi(argv[1]);
unsigned int datasize = VECTOR_SIZE * sizeof(int);
int *v = (int*)malloc(datasize);
for(int i = 0; i < VECTOR_SIZE; i++) v[i] = 1;
constbenchGPU(v, 4096*VECTOR_SIZE, repeat);
free(v);
return 0;
}
|
e331ec3105c0ad177c8e0463cf212fadd849a2bb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdio.h>
hipError_t addWithCuda(int *c, int *a, int *b, unsigned int size);
__global__ void addKernel(int *c, int *a, int *b)
{
__shared__ int temp[1024];
int i = threadIdx.x;
temp[i] = a[i] + b[i];
__syncthreads();
if (threadIdx.x == 0) {
int sum = 0;
for (int i = 0; i< 1024; i++) sum += temp[i];
*c = sum;
}
}
int main()
{
const int arraySize = 1024;
int a[arraySize];
int b[arraySize];
for (int number = 0; number < arraySize; number++) {
a[number] = number;
b[number] = number * 10;
};
int c[arraySize] = { 0 };
// Add vectors in parallel.
int device =0;
struct hipDeviceProp_t properties;
hipGetDeviceProperties(&properties, device);
std::cout << "using " << properties.multiProcessorCount << " multiprocessors" << std::endl;
std::cout << "max threads per processor: " << properties.maxThreadsPerMultiProcessor << std::endl;
hipError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t addWithCuda(int *c, int *a, int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
hipLaunchKernelGGL(( addKernel), dim3(1), dim3(size), 0, 0, dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cudaStatus;
}
| e331ec3105c0ad177c8e0463cf212fadd849a2bb.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdio.h>
cudaError_t addWithCuda(int *c, int *a, int *b, unsigned int size);
__global__ void addKernel(int *c, int *a, int *b)
{
__shared__ int temp[1024];
int i = threadIdx.x;
temp[i] = a[i] + b[i];
__syncthreads();
if (threadIdx.x == 0) {
int sum = 0;
for (int i = 0; i< 1024; i++) sum += temp[i];
*c = sum;
}
}
int main()
{
const int arraySize = 1024;
int a[arraySize];
int b[arraySize];
for (int number = 0; number < arraySize; number++) {
a[number] = number;
b[number] = number * 10;
};
int c[arraySize] = { 0 };
// Add vectors in parallel.
int device =0;
struct cudaDeviceProp properties;
cudaGetDeviceProperties(&properties, device);
std::cout << "using " << properties.multiProcessorCount << " multiprocessors" << std::endl;
std::cout << "max threads per processor: " << properties.maxThreadsPerMultiProcessor << std::endl;
cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, int *a, int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel<<<1, size>>>(dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
74bac25bc623b836b004f81bcf910e205dfcbcea.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <utilities/error_utils.hpp>
#include <rmm/rmm.h>
#include <cudf/types.h>
#include <cudf/legacy/column.hpp>
#include <cuspatial/soa_readers.hpp>
#include <utility/utility.hpp>
namespace cuspatial
{
/**
* @brief read uint32_t (unsigned integer with 32 bit fixed length) data from file as column
* see soa_readers.hpp
*/
gdf_column read_uint32_soa(const char *filename)
{
gdf_column values;
memset(&values,0,sizeof(gdf_column));
uint32_t *data=nullptr;
size_t num_l=read_field<uint32_t>(filename,data);
if(data==nullptr)
return values;
uint32_t* temp_val{nullptr};
RMM_TRY( RMM_ALLOC(&temp_val, num_l * sizeof(uint32_t), 0) );
hipStream_t stream{0};
CUDA_TRY( hipMemcpyAsync(temp_val, data,
num_l * sizeof(uint32_t) ,
hipMemcpyHostToDevice,stream) );
gdf_column_view_augmented(&values, temp_val, nullptr, num_l,
GDF_INT32, 0,
gdf_dtype_extra_info{TIME_UNIT_NONE}, "id");
return values;
}//read_uint32_soa
}//cuspatial
| 74bac25bc623b836b004f81bcf910e205dfcbcea.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda_runtime.h>
#include <utilities/error_utils.hpp>
#include <rmm/rmm.h>
#include <cudf/types.h>
#include <cudf/legacy/column.hpp>
#include <cuspatial/soa_readers.hpp>
#include <utility/utility.hpp>
namespace cuspatial
{
/**
* @brief read uint32_t (unsigned integer with 32 bit fixed length) data from file as column
* see soa_readers.hpp
*/
gdf_column read_uint32_soa(const char *filename)
{
gdf_column values;
memset(&values,0,sizeof(gdf_column));
uint32_t *data=nullptr;
size_t num_l=read_field<uint32_t>(filename,data);
if(data==nullptr)
return values;
uint32_t* temp_val{nullptr};
RMM_TRY( RMM_ALLOC(&temp_val, num_l * sizeof(uint32_t), 0) );
cudaStream_t stream{0};
CUDA_TRY( cudaMemcpyAsync(temp_val, data,
num_l * sizeof(uint32_t) ,
cudaMemcpyHostToDevice,stream) );
gdf_column_view_augmented(&values, temp_val, nullptr, num_l,
GDF_INT32, 0,
gdf_dtype_extra_info{TIME_UNIT_NONE}, "id");
return values;
}//read_uint32_soa
}//cuspatial
|
f199b6f59f3878812960948819fae7792675a711.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2017 Sony Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/image_augmentation.hpp>
#include <nbla/cuda/math.hpp>
#include <nbla/variable.hpp>
#include <hiprand/hiprand_kernel.h>
namespace nbla {
__global__ void kernel_prepare_curand(const int num, hiprandStateXORWOW_t *state,
const int seed) {
NBLA_CUDA_KERNEL_LOOP(idx, num) { hiprand_init(seed, idx, 0, &state[idx]); }
}
template <typename T>
__global__ void IAKernel(const T *x, const int w_in, const int h_in,
const float x0_in, const float y0_in, T *y,
const int w_out, const int h_out, const float x_ax,
const float y_ax, const float x_ay, const float y_ay,
const float distortion, const float brightness,
const float contrast, const float contrast_center,
hiprandStateXORWOW_t *state, const float noise) {
const int x_out = blockDim.x * blockIdx.x + threadIdx.x;
const int y_out = blockDim.y * blockIdx.y + threadIdx.y;
if (x_out < w_out && y_out < h_out) {
const int out_offset = w_out * y_out + x_out;
const float w_out_half = w_out * 0.5f;
const float h_out_half = h_out * 0.5f;
float dist_x = (x_out - w_out_half) / w_out_half;
float dist_y = (y_out - h_out_half) / h_out_half;
const float r = sqrt(dist_x * dist_x + dist_y * dist_y);
const float r2 = r * r;
const float dist_scale = 1.0f / (1.0f + distortion);
dist_x = (dist_x + dist_x * distortion * r2) * w_out_half * dist_scale +
w_out_half;
dist_y = (dist_y + dist_y * distortion * r2) * h_out_half * dist_scale +
h_out_half;
float x_in = x0_in + dist_x * x_ax + dist_y * y_ax;
float y_in = y0_in + dist_x * x_ay + dist_y * y_ay;
if (x_in < 0) {
x_in = 0.0;
} else if (x_in > w_in - 1) {
x_in = w_in - 1;
}
if (y_in < 0) {
y_in = 0.0;
} else if (y_in > h_in - 1) {
y_in = h_in - 1;
}
// Prepare linear interpolation
const int intx = (int)x_in;
const int inty = (int)y_in;
const float fmodx = x_in - intx;
const float fmody = y_in - inty;
const int intx_plus1 = intx < w_in - 1 ? intx + 1 : intx;
const int inty_plus1 = inty < h_in - 1 ? inty + 1 : inty;
// Top left
const int pos0 = intx + inty * w_in;
const T pos0_gain = (1 - fmodx) * (1 - fmody);
// Top right
const int pos1 = intx_plus1 + inty * w_in;
const T pos1_gain = fmodx * (1 - fmody);
// Bottom left
const int pos2 = intx + inty_plus1 * w_in;
const T pos2_gain = (1 - fmodx) * fmody;
// Bottom right
const int pos3 = intx_plus1 + inty_plus1 * w_in;
const T pos3_gain = fmodx * fmody;
// Linear interpolation
T result = x[pos0] * pos0_gain + x[pos1] * pos1_gain + x[pos2] * pos2_gain +
x[pos3] * pos3_gain;
result = (result + brightness) * contrast + contrast_center;
if (state) {
result += hiprand_normal(&state[out_offset]) * noise;
}
y[out_offset] = result;
}
}
template <typename T>
void ImageAugmentationCuda<T>::setup_impl(const Variables &inputs,
const Variables &outputs) {
ImageAugmentation<T>::setup_impl(inputs, outputs);
Shape_t shape_in = inputs[0]->shape();
const int w_in = shape_in[shape_in.size() - 1];
const int h_in = shape_in[shape_in.size() - 2];
const int num_ch = shape_in.size() >= 3 ? shape_in[shape_in.size() - 3] : 1;
const int num_image = inputs[0]->size() / (w_in * h_in * num_ch);
int curand_state_len = 0;
if (this->noise_ > 0.0) {
const int data_size = w_in * h_in;
if (data_size > curand_state_len) {
curand_state_len = data_size;
}
}
if (curand_state_len) {
int curand_state_size =
(sizeof(hiprandStateXORWOW_t) - 1) / sizeof(T) + sizeof(int);
// prepare hiprand state
Shape_t state_shape;
state_shape.push_back(curand_state_len * curand_state_size);
curand_state_.reshape(state_shape, true);
int *state = curand_state_.cast_data_and_get_pointer<int>(this->ctx_, true);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_prepare_curand, curand_state_len,
(hiprandStateXORWOW_t *)state, this->seed_);
}
}
template <typename T>
void ImageAugmentationCuda<T>::forward_impl(const Variables &inputs,
const Variables &outputs) {
cuda_set_device(std::stoi(this->ctx_.device_id));
Shape_t shape_in = inputs[0]->shape();
const int w_in = shape_in[shape_in.size() - 1];
const int h_in = shape_in[shape_in.size() - 2];
const int w_in_pad = w_in + this->pad_[1] * 2;
const int h_in_pad = h_in + this->pad_[0] * 2;
const int num_ch = shape_in.size() >= 3 ? shape_in[shape_in.size() - 3] : 1;
const int num_image = inputs[0]->size() / (w_in * h_in * num_ch);
// std::cout << "shape_in : w=" << w_in << ", h=" << h_in << ", ch=" << num_ch
// << ", num=" << num_image << "\n";
Shape_t shape_out = outputs[0]->shape();
const int w_out = shape_out[shape_out.size() - 1];
const int h_out = shape_out[shape_out.size() - 2];
// std::cout << "shape_out : w=" << w_out << ", h=" << h_out << "\n";
const Tc *x = inputs[0]->get_data_pointer<Tc>(this->ctx_);
Tc *y = outputs[0]->cast_data_and_get_pointer<Tc>(this->ctx_, true);
const int ch_size_in = h_in * w_in;
const int ch_size_out = h_out * w_out;
vector<float> channel_brightness(num_ch);
vector<float> channel_contrast(num_ch);
int *state =
this->noise_ > 0.0
? curand_state_.cast_data_and_get_pointer<int>(this->ctx_, false)
: nullptr;
for (int iim = 0; iim < num_image; ++iim) {
// Define augmentation settings
// std::cout << "* image " << iim << "\n";
const int im_offset_in = iim * w_in * h_in * num_ch;
const Tc *x_im = x + im_offset_in;
int im_offset_out = iim * w_out * h_out * num_ch;
Tc *y_im = y + im_offset_out;
// std::cout << "offset : in=" << im_offset_in << ", out=" << im_offset_out
// << "\n";
const float scale =
this->min_scale_ *
::exp(
(this->rgen_() % 1001) * 0.001f *
::log(this->max_scale_ /
this->min_scale_)); // [this->min_scale_, this->max_scale_]
const float scale_x = ::exp(-::log(this->aspect_ratio_) * 0.5 +
(this->rgen_() % 1001) * 0.001f *
::log(this->aspect_ratio_));
const float scale_y = 1.0 / scale_x;
const float i_scale_x = 1.0f / (scale * scale_x);
const float i_scale_y = 1.0f / (scale * scale_y);
// std::cout << "scale : min=" << min_scale_ << ", max=" << max_scale_ << ",
// v=" << scale << ", inv=" << i_scale << "\n";
const float angle = -this->angle_ +
((this->rgen_() % 1001) * 0.001f) * this->angle_ *
2; // [-angle_, angle_]
// std::cout << "angle : " << angle << "\n";
// Preparation
const float w_scaled = w_in_pad * scale * scale_x;
const float h_scaled = h_in_pad * scale * scale_y;
// std::cout << "shape_scaled : w=" << w_scaled << ", h=" << h_scaled <<
// "\n";
const float cx = (w_out - 1) * 0.5f;
const float cy = (h_out - 1) * 0.5f;
// std::cout << "center : x=" << cx << ", y=" << cy << "\n";
const float cx_scaled =
((this->rgen_() % 1001) * 0.001f) * (w_scaled - w_out) + cx;
const float cy_scaled =
((this->rgen_() % 1001) * 0.001f) * (h_scaled - h_out) + cy;
// std::cout << "center_scaled : x=" << cx_scaled << ", y=" << cy_scaled <<
// "\n";
const bool flip_lr = this->flip_lr_ & (this->rgen_() % 2);
const bool flip_ud = this->flip_ud_ & (this->rgen_() % 2);
const float global_brightness =
((this->rgen_() % 1001) * 0.001f * this->brightness_ * 2.0f) -
this->brightness_;
// std::cout << "global_brightness : " << global_brightness << "\n";
const float global_contrast = ::exp((this->rgen_() % 1001) * 0.001f *
::log(this->contrast_) * 2.0f) /
this->contrast_;
// std::cout << "global_contrast : " << global_contrast << "\n";
for (int ic = 0; ic < num_ch; ++ic) {
const float ch_brightness =
this->brightness_each_
? ((this->rgen_() % 1001) * 0.001f * this->brightness_ * 2.0f) -
this->brightness_
: global_brightness;
channel_brightness[ic] = ch_brightness - this->contrast_center_;
// std::cout << "channel_brightness - 0.5 : " << channel_brightness[ic] <<
// "\n";
const float ch_contrast =
this->contrast_each_
? ::exp((this->rgen_() % 1001) * 0.001f *
::log(this->contrast_) * 2.0f) /
this->contrast_
: global_contrast;
channel_contrast[ic] = ch_contrast;
// std::cout << "channel_contrast : " << channel_contrast[ic] << "\n";
}
const float distortion =
::exp(((this->rgen_() % 1001) * 0.001f * 2.0f * this->distortion_) -
this->distortion_) -
1.0f;
// std::cout << "distortion : " << distortion << "\n";
const float noise = (this->rgen_() % 1001) * 0.001f * this->noise_;
// std::cout << "noise : " << noise << "\n";
// Pixel loop
const float cos_theta = std::cos(angle);
const float sin_theta = std::sin(angle);
const float x_ax = (flip_lr ? -cos_theta : cos_theta) * i_scale_x;
const float y_ax = (flip_lr ? sin_theta : -sin_theta) * i_scale_y;
const float x_ay = (flip_ud ? -sin_theta : sin_theta) * i_scale_x;
const float y_ay = (flip_ud ? -cos_theta : cos_theta) * i_scale_y;
float x0_in =
(cx_scaled * i_scale_x) - (x_ax * cx + y_ax * cy) - this->pad_[1];
float y0_in =
(cy_scaled * i_scale_y) - (x_ay * cx + y_ay * cy) - this->pad_[0];
dim3 threads(32, 16);
dim3 blocks((w_out - 1) / threads.x + 1, (h_out - 1) / threads.y + 1);
for (int ic = 0; ic < num_ch; ++ic) {
hipLaunchKernelGGL(( IAKernel), dim3(blocks), dim3(threads), 0, 0,
x_im + ch_size_in * ic, w_in, h_in, x0_in, y0_in,
y_im + ch_size_out * ic, w_out, h_out, x_ax, y_ax, x_ay, y_ay,
distortion, channel_brightness[ic], channel_contrast[ic],
this->contrast_center_, (hiprandStateXORWOW_t *)state, noise);
NBLA_CUDA_KERNEL_CHECK();
}
}
}
template <typename T>
void ImageAugmentationCuda<T>::backward_impl(const Variables &inputs,
const Variables &outputs,
const vector<bool> &propagate_down,
const vector<bool> &accum) {
// Not supported
}
}
| f199b6f59f3878812960948819fae7792675a711.cu | // Copyright (c) 2017 Sony Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/image_augmentation.hpp>
#include <nbla/cuda/math.hpp>
#include <nbla/variable.hpp>
#include <curand_kernel.h>
namespace nbla {
__global__ void kernel_prepare_curand(const int num, curandStateXORWOW_t *state,
const int seed) {
NBLA_CUDA_KERNEL_LOOP(idx, num) { curand_init(seed, idx, 0, &state[idx]); }
}
template <typename T>
__global__ void IAKernel(const T *x, const int w_in, const int h_in,
const float x0_in, const float y0_in, T *y,
const int w_out, const int h_out, const float x_ax,
const float y_ax, const float x_ay, const float y_ay,
const float distortion, const float brightness,
const float contrast, const float contrast_center,
curandStateXORWOW_t *state, const float noise) {
const int x_out = blockDim.x * blockIdx.x + threadIdx.x;
const int y_out = blockDim.y * blockIdx.y + threadIdx.y;
if (x_out < w_out && y_out < h_out) {
const int out_offset = w_out * y_out + x_out;
const float w_out_half = w_out * 0.5f;
const float h_out_half = h_out * 0.5f;
float dist_x = (x_out - w_out_half) / w_out_half;
float dist_y = (y_out - h_out_half) / h_out_half;
const float r = sqrt(dist_x * dist_x + dist_y * dist_y);
const float r2 = r * r;
const float dist_scale = 1.0f / (1.0f + distortion);
dist_x = (dist_x + dist_x * distortion * r2) * w_out_half * dist_scale +
w_out_half;
dist_y = (dist_y + dist_y * distortion * r2) * h_out_half * dist_scale +
h_out_half;
float x_in = x0_in + dist_x * x_ax + dist_y * y_ax;
float y_in = y0_in + dist_x * x_ay + dist_y * y_ay;
if (x_in < 0) {
x_in = 0.0;
} else if (x_in > w_in - 1) {
x_in = w_in - 1;
}
if (y_in < 0) {
y_in = 0.0;
} else if (y_in > h_in - 1) {
y_in = h_in - 1;
}
// Prepare linear interpolation
const int intx = (int)x_in;
const int inty = (int)y_in;
const float fmodx = x_in - intx;
const float fmody = y_in - inty;
const int intx_plus1 = intx < w_in - 1 ? intx + 1 : intx;
const int inty_plus1 = inty < h_in - 1 ? inty + 1 : inty;
// Top left
const int pos0 = intx + inty * w_in;
const T pos0_gain = (1 - fmodx) * (1 - fmody);
// Top right
const int pos1 = intx_plus1 + inty * w_in;
const T pos1_gain = fmodx * (1 - fmody);
// Bottom left
const int pos2 = intx + inty_plus1 * w_in;
const T pos2_gain = (1 - fmodx) * fmody;
// Bottom right
const int pos3 = intx_plus1 + inty_plus1 * w_in;
const T pos3_gain = fmodx * fmody;
// Linear interpolation
T result = x[pos0] * pos0_gain + x[pos1] * pos1_gain + x[pos2] * pos2_gain +
x[pos3] * pos3_gain;
result = (result + brightness) * contrast + contrast_center;
if (state) {
result += curand_normal(&state[out_offset]) * noise;
}
y[out_offset] = result;
}
}
template <typename T>
void ImageAugmentationCuda<T>::setup_impl(const Variables &inputs,
const Variables &outputs) {
ImageAugmentation<T>::setup_impl(inputs, outputs);
Shape_t shape_in = inputs[0]->shape();
const int w_in = shape_in[shape_in.size() - 1];
const int h_in = shape_in[shape_in.size() - 2];
const int num_ch = shape_in.size() >= 3 ? shape_in[shape_in.size() - 3] : 1;
const int num_image = inputs[0]->size() / (w_in * h_in * num_ch);
int curand_state_len = 0;
if (this->noise_ > 0.0) {
const int data_size = w_in * h_in;
if (data_size > curand_state_len) {
curand_state_len = data_size;
}
}
if (curand_state_len) {
int curand_state_size =
(sizeof(curandStateXORWOW_t) - 1) / sizeof(T) + sizeof(int);
// prepare curand state
Shape_t state_shape;
state_shape.push_back(curand_state_len * curand_state_size);
curand_state_.reshape(state_shape, true);
int *state = curand_state_.cast_data_and_get_pointer<int>(this->ctx_, true);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_prepare_curand, curand_state_len,
(curandStateXORWOW_t *)state, this->seed_);
}
}
template <typename T>
void ImageAugmentationCuda<T>::forward_impl(const Variables &inputs,
const Variables &outputs) {
cuda_set_device(std::stoi(this->ctx_.device_id));
Shape_t shape_in = inputs[0]->shape();
const int w_in = shape_in[shape_in.size() - 1];
const int h_in = shape_in[shape_in.size() - 2];
const int w_in_pad = w_in + this->pad_[1] * 2;
const int h_in_pad = h_in + this->pad_[0] * 2;
const int num_ch = shape_in.size() >= 3 ? shape_in[shape_in.size() - 3] : 1;
const int num_image = inputs[0]->size() / (w_in * h_in * num_ch);
// std::cout << "shape_in : w=" << w_in << ", h=" << h_in << ", ch=" << num_ch
// << ", num=" << num_image << "\n";
Shape_t shape_out = outputs[0]->shape();
const int w_out = shape_out[shape_out.size() - 1];
const int h_out = shape_out[shape_out.size() - 2];
// std::cout << "shape_out : w=" << w_out << ", h=" << h_out << "\n";
const Tc *x = inputs[0]->get_data_pointer<Tc>(this->ctx_);
Tc *y = outputs[0]->cast_data_and_get_pointer<Tc>(this->ctx_, true);
const int ch_size_in = h_in * w_in;
const int ch_size_out = h_out * w_out;
vector<float> channel_brightness(num_ch);
vector<float> channel_contrast(num_ch);
int *state =
this->noise_ > 0.0
? curand_state_.cast_data_and_get_pointer<int>(this->ctx_, false)
: nullptr;
for (int iim = 0; iim < num_image; ++iim) {
// Define augmentation settings
// std::cout << "* image " << iim << "\n";
const int im_offset_in = iim * w_in * h_in * num_ch;
const Tc *x_im = x + im_offset_in;
int im_offset_out = iim * w_out * h_out * num_ch;
Tc *y_im = y + im_offset_out;
// std::cout << "offset : in=" << im_offset_in << ", out=" << im_offset_out
// << "\n";
const float scale =
this->min_scale_ *
std::exp(
(this->rgen_() % 1001) * 0.001f *
std::log(this->max_scale_ /
this->min_scale_)); // [this->min_scale_, this->max_scale_]
const float scale_x = std::exp(-std::log(this->aspect_ratio_) * 0.5 +
(this->rgen_() % 1001) * 0.001f *
std::log(this->aspect_ratio_));
const float scale_y = 1.0 / scale_x;
const float i_scale_x = 1.0f / (scale * scale_x);
const float i_scale_y = 1.0f / (scale * scale_y);
// std::cout << "scale : min=" << min_scale_ << ", max=" << max_scale_ << ",
// v=" << scale << ", inv=" << i_scale << "\n";
const float angle = -this->angle_ +
((this->rgen_() % 1001) * 0.001f) * this->angle_ *
2; // [-angle_, angle_]
// std::cout << "angle : " << angle << "\n";
// Preparation
const float w_scaled = w_in_pad * scale * scale_x;
const float h_scaled = h_in_pad * scale * scale_y;
// std::cout << "shape_scaled : w=" << w_scaled << ", h=" << h_scaled <<
// "\n";
const float cx = (w_out - 1) * 0.5f;
const float cy = (h_out - 1) * 0.5f;
// std::cout << "center : x=" << cx << ", y=" << cy << "\n";
const float cx_scaled =
((this->rgen_() % 1001) * 0.001f) * (w_scaled - w_out) + cx;
const float cy_scaled =
((this->rgen_() % 1001) * 0.001f) * (h_scaled - h_out) + cy;
// std::cout << "center_scaled : x=" << cx_scaled << ", y=" << cy_scaled <<
// "\n";
const bool flip_lr = this->flip_lr_ & (this->rgen_() % 2);
const bool flip_ud = this->flip_ud_ & (this->rgen_() % 2);
const float global_brightness =
((this->rgen_() % 1001) * 0.001f * this->brightness_ * 2.0f) -
this->brightness_;
// std::cout << "global_brightness : " << global_brightness << "\n";
const float global_contrast = std::exp((this->rgen_() % 1001) * 0.001f *
std::log(this->contrast_) * 2.0f) /
this->contrast_;
// std::cout << "global_contrast : " << global_contrast << "\n";
for (int ic = 0; ic < num_ch; ++ic) {
const float ch_brightness =
this->brightness_each_
? ((this->rgen_() % 1001) * 0.001f * this->brightness_ * 2.0f) -
this->brightness_
: global_brightness;
channel_brightness[ic] = ch_brightness - this->contrast_center_;
// std::cout << "channel_brightness - 0.5 : " << channel_brightness[ic] <<
// "\n";
const float ch_contrast =
this->contrast_each_
? std::exp((this->rgen_() % 1001) * 0.001f *
std::log(this->contrast_) * 2.0f) /
this->contrast_
: global_contrast;
channel_contrast[ic] = ch_contrast;
// std::cout << "channel_contrast : " << channel_contrast[ic] << "\n";
}
const float distortion =
std::exp(((this->rgen_() % 1001) * 0.001f * 2.0f * this->distortion_) -
this->distortion_) -
1.0f;
// std::cout << "distortion : " << distortion << "\n";
const float noise = (this->rgen_() % 1001) * 0.001f * this->noise_;
// std::cout << "noise : " << noise << "\n";
// Pixel loop
const float cos_theta = std::cos(angle);
const float sin_theta = std::sin(angle);
const float x_ax = (flip_lr ? -cos_theta : cos_theta) * i_scale_x;
const float y_ax = (flip_lr ? sin_theta : -sin_theta) * i_scale_y;
const float x_ay = (flip_ud ? -sin_theta : sin_theta) * i_scale_x;
const float y_ay = (flip_ud ? -cos_theta : cos_theta) * i_scale_y;
float x0_in =
(cx_scaled * i_scale_x) - (x_ax * cx + y_ax * cy) - this->pad_[1];
float y0_in =
(cy_scaled * i_scale_y) - (x_ay * cx + y_ay * cy) - this->pad_[0];
dim3 threads(32, 16);
dim3 blocks((w_out - 1) / threads.x + 1, (h_out - 1) / threads.y + 1);
for (int ic = 0; ic < num_ch; ++ic) {
IAKernel<<<blocks, threads>>>(
x_im + ch_size_in * ic, w_in, h_in, x0_in, y0_in,
y_im + ch_size_out * ic, w_out, h_out, x_ax, y_ax, x_ay, y_ay,
distortion, channel_brightness[ic], channel_contrast[ic],
this->contrast_center_, (curandStateXORWOW_t *)state, noise);
NBLA_CUDA_KERNEL_CHECK();
}
}
}
template <typename T>
void ImageAugmentationCuda<T>::backward_impl(const Variables &inputs,
const Variables &outputs,
const vector<bool> &propagate_down,
const vector<bool> &accum) {
// Not supported
}
}
|
e20c8565cccf0ff80723fa44d845c46722d52397.hip | // !!! This is a file automatically generated by hipify!!!
// You need to write a simple program to perform computation with 1D array in CPU and GPU, then compare the result.
// includes, system
#include <stdio.h>
#include <assert.h>
#include <hip/hip_runtime.h>
// Simple utility function to check for CUDA runtime errors
void checkCUDAError(const char *msg);
// Part 3 of 5: implement the kernel
__global__ void calculate1DKernel(int *d_a)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
d_a[idx] = 1000 * blockIdx.x + threadIdx.x;
printf("%u: \t %u = 1000 * %u + %u\n", idx, d_a[idx] , blockIdx.x, threadIdx.x);
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
// pointer for host memory
int *h_a;
// pointer for device memory
int *d_a;
// define grid and block size
int numBlocks = 8;
int numThreadsPerBlock = 8;
// Part 1 of 5: allocate host and device memory
size_t size = numBlocks * numThreadsPerBlock * sizeof(int);
hipHostMalloc((void **)&h_a, size);
hipMalloc((void **)&d_a, size);
// Part 2 of 5: launch kernel
hipMemcpy(d_a, h_a, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( calculate1DKernel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, d_a);
// block until the device has completed
hipDeviceSynchronize();
// check if kernel execution generated an error
checkCUDAError("kernel execution");
// Part 4 of 5: device to host copy
hipMemcpy(h_a, d_a, size, hipMemcpyDeviceToHost);
// Check for any CUDA errors
checkCUDAError("hipMemcpy");
// Part 5 of 5: verify the data returned to the host is correct
// i represents blockIdx.x
for(int i = 0; i < numBlocks; i++){
// j represents threadIdx.x
for(int j = 0; j < numThreadsPerBlock; j++){
int idx = i * numThreadsPerBlock + j;
printf("%u\n", idx);
assert(h_a[idx] == (1000 * i + j));
}
}
// free device memory
hipFree(d_a);
// free host memory
hipHostFree(h_a);
// If the program makes it this far, then the results are correct and
// there are no run-time errors. Good work!
printf("Correct!\n");
return 0;
}
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
exit(-1);
}
}
| e20c8565cccf0ff80723fa44d845c46722d52397.cu | // You need to write a simple program to perform computation with 1D array in CPU and GPU, then compare the result.
// includes, system
#include <stdio.h>
#include <assert.h>
#include <cuda_runtime.h>
// Simple utility function to check for CUDA runtime errors
void checkCUDAError(const char *msg);
// Part 3 of 5: implement the kernel
__global__ void calculate1DKernel(int *d_a)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
d_a[idx] = 1000 * blockIdx.x + threadIdx.x;
printf("%u: \t %u = 1000 * %u + %u\n", idx, d_a[idx] , blockIdx.x, threadIdx.x);
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
// pointer for host memory
int *h_a;
// pointer for device memory
int *d_a;
// define grid and block size
int numBlocks = 8;
int numThreadsPerBlock = 8;
// Part 1 of 5: allocate host and device memory
size_t size = numBlocks * numThreadsPerBlock * sizeof(int);
cudaMallocHost((void **)&h_a, size);
cudaMalloc((void **)&d_a, size);
// Part 2 of 5: launch kernel
cudaMemcpy(d_a, h_a, size, cudaMemcpyHostToDevice);
calculate1DKernel<<<numBlocks, numThreadsPerBlock>>>(d_a);
// block until the device has completed
cudaThreadSynchronize();
// check if kernel execution generated an error
checkCUDAError("kernel execution");
// Part 4 of 5: device to host copy
cudaMemcpy(h_a, d_a, size, cudaMemcpyDeviceToHost);
// Check for any CUDA errors
checkCUDAError("cudaMemcpy");
// Part 5 of 5: verify the data returned to the host is correct
// i represents blockIdx.x
for(int i = 0; i < numBlocks; i++){
// j represents threadIdx.x
for(int j = 0; j < numThreadsPerBlock; j++){
int idx = i * numThreadsPerBlock + j;
printf("%u\n", idx);
assert(h_a[idx] == (1000 * i + j));
}
}
// free device memory
cudaFree(d_a);
// free host memory
cudaFreeHost(h_a);
// If the program makes it this far, then the results are correct and
// there are no run-time errors. Good work!
printf("Correct!\n");
return 0;
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(-1);
}
}
|
2dc0bea682a8b2ed0e485e77a2b319f3f3cb0909.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Matrix multiplication: C = A * B.
* Device code.
*/
#ifndef _MATRIXMUL_KERNEL_H_
#define _MATRIXMUL_KERNEL_H_
#include <stdio.h>
#include "matrixMul.h"
#define CHECK_BANK_CONFLICTS 0
#if CHECK_BANK_CONFLICTS
#define AS(i, j) CUT_BANK_CHECKER(((float*)&As[0][0]), (BLOCK_SIZE * i + j))
#define BS(i, j) CUT_BANK_CHECKER(((float*)&Bs[0][0]), (BLOCK_SIZE * i + j))
#else
#define AS(i, j) As[i][j]
#define BS(i, j) Bs[i][j]
#endif
////////////////////////////////////////////////////////////////////////////////
//! Matrix multiplication on the device: C = A * B
//! wA is A's width and wB is B's width
////////////////////////////////////////////////////////////////////////////////
extern "C" // this allows cubin file has same name as matrixMul
__global__ void
matrixMul( float* C, float* A, float* B, int wA, int wB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
AS(ty, tx) = A[a + wA * ty + tx];
BS(ty, tx) = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
for (int k = 0; k < BLOCK_SIZE; ++k)
Csub += AS(ty, k) * BS(k, tx);
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
#endif // #ifndef _MATRIXMUL_KERNEL_H_
| 2dc0bea682a8b2ed0e485e77a2b319f3f3cb0909.cu | /* Matrix multiplication: C = A * B.
* Device code.
*/
#ifndef _MATRIXMUL_KERNEL_H_
#define _MATRIXMUL_KERNEL_H_
#include <stdio.h>
#include "matrixMul.h"
#define CHECK_BANK_CONFLICTS 0
#if CHECK_BANK_CONFLICTS
#define AS(i, j) CUT_BANK_CHECKER(((float*)&As[0][0]), (BLOCK_SIZE * i + j))
#define BS(i, j) CUT_BANK_CHECKER(((float*)&Bs[0][0]), (BLOCK_SIZE * i + j))
#else
#define AS(i, j) As[i][j]
#define BS(i, j) Bs[i][j]
#endif
////////////////////////////////////////////////////////////////////////////////
//! Matrix multiplication on the device: C = A * B
//! wA is A's width and wB is B's width
////////////////////////////////////////////////////////////////////////////////
extern "C" // this allows cubin file has same name as matrixMul
__global__ void
matrixMul( float* C, float* A, float* B, int wA, int wB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
AS(ty, tx) = A[a + wA * ty + tx];
BS(ty, tx) = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
for (int k = 0; k < BLOCK_SIZE; ++k)
Csub += AS(ty, k) * BS(k, tx);
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
#endif // #ifndef _MATRIXMUL_KERNEL_H_
|
e93280bdba2a64bbb89059bad8e21b4eec2c301c.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/native/SharedReduceOps.h>
#include <ATen/AccumulateType.h>
#include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/native/hip/DeviceSqrt.cuh>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/Reduce.cuh>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/ReduceOps.h>
#include <limits>
#include <tuple>
namespace at { namespace native {
template <typename scalar_t, typename acc_t=scalar_t, typename out_t=scalar_t>
void sum_kernel_impl(TensorIterator& iter) {
gpu_reduce_kernel<scalar_t, out_t>(iter, func_wrapper<out_t> ([]GPU_LAMBDA(acc_t a, acc_t b) -> acc_t {
return a + b;
}));
}
template <typename scalar_t>
void std_var_kernel_impl(TensorIterator& iter, bool unbiased, bool take_sqrt) {
gpu_reduce_kernel<scalar_t, scalar_t>(iter, WelfordOps<scalar_t, scalar_t> { unbiased, take_sqrt }, WelfordData<scalar_t> {});
}
template <>
void std_var_kernel_impl<at::Half>(TensorIterator& iter, bool unbiased, bool take_sqrt) {
gpu_reduce_kernel<at::Half, at::Half>(iter, WelfordOps<at::Half, float> { unbiased, take_sqrt }, WelfordData<float> {});
}
#ifdef __HIPCC__
template <>
void sum_kernel_impl<int16_t, int16_t>(TensorIterator& iter) {
// There is a Register Coalescing bug in LLVM causing the hcc
// compiler segfaults:
// https://bugs.llvm.org/show_bug.cgi?id=39602
// To work around it, use int32 as the accumulate type.
gpu_reduce_kernel<int16_t, int16_t>(iter, func_wrapper<int16_t> ([]GPU_LAMBDA(int32_t a, int32_t b) -> int32_t {
return a + b;
}));
}
#endif
template <typename scalar_t, typename acc_t=scalar_t>
void prod_kernel_impl(TensorIterator& iter) {
gpu_reduce_kernel<scalar_t, scalar_t>(iter, func_wrapper<scalar_t> ([]GPU_LAMBDA(acc_t a, acc_t b) -> acc_t {
return a * b;
}), 1);
}
static void std_var_kernel_cuda(TensorIterator& iter, bool unbiased, bool take_sqrt) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.type(), "std", [&]() {
std_var_kernel_impl<scalar_t>(iter, unbiased, take_sqrt);
});
}
template <typename scalar_t, typename acc_t=scalar_t, typename out_t=scalar_t>
void mean_kernel_impl(TensorIterator& iter) {
float factor = float(iter.num_output_elements()) / iter.numel();
gpu_reduce_kernel<scalar_t, out_t>(iter, MeanOps<acc_t, float> {factor});
}
#ifdef __HIPCC__
template <>
void mean_kernel_impl<int16_t, int16_t, int16_t>(TensorIterator& iter) {
// There is a Register Coalescing bug in LLVM causing the hcc
// compiler segfaults:
// https://bugs.llvm.org/show_bug.cgi?id=39602
// To work around it, use int32 as the accumulate type.
float factor = float(iter.num_output_elements()) / iter.numel();
gpu_reduce_kernel<int16_t, int16_t>(iter, MeanOps<int32_t, float> {factor});
}
#endif // __HIPCC__
template <typename scalar_t, typename acc_t=scalar_t, typename out_t=scalar_t>
void norm_kernel_cuda_impl(TensorIterator& iter, Scalar val) {
float p;
if (val.isIntegral()) {
p = val.to<int64_t>();
} else if (val.isFloatingPoint()) {
p = val.to<acc_t>();
} else {
AT_ERROR("norm_kernel_cuda_impl expects norm to be integer or float");
}
if (p == static_cast<float>(0)) {
gpu_reduce_kernel<scalar_t, out_t>(iter, NormZeroOps<acc_t>(), 0);
} else if (p == static_cast<float>(1)) {
gpu_reduce_kernel<scalar_t, out_t>(iter, NormOneOps<acc_t>(), 0);
} else if (p == static_cast<float>(INFINITY)) {
gpu_reduce_kernel<scalar_t, out_t>(iter, AbsMaxOps<acc_t>(), std::numeric_limits<acc_t>::min());
} else if (p == static_cast<float>(-INFINITY)) {
gpu_reduce_kernel<scalar_t, out_t>(iter, AbsMinOps<acc_t>(), std::numeric_limits<acc_t>::max());
} else {
gpu_reduce_kernel<scalar_t, out_t>(iter, NormOps<acc_t>{ acc_t(p) }, 0);
}
}
static void sum_kernel_cuda(TensorIterator& iter) {
if (iter.type().scalarType() == kHalf) {
return sum_kernel_impl<at::Half, float>(iter);
} else if (iter.type(1).scalarType() == kHalf && iter.type().scalarType() == kFloat) {
// type promotion that does cast and reduction in a single kernel
return sum_kernel_impl<at::Half, float, float>(iter);
}
AT_DISPATCH_ALL_TYPES(iter.type(), "sum", [&]() {
sum_kernel_impl<scalar_t>(iter);
});
}
static void prod_kernel_cuda(TensorIterator& iter) {
if (iter.type().scalarType() == kHalf) {
return prod_kernel_impl<at::Half, float>(iter);
}
AT_DISPATCH_ALL_TYPES(iter.type(), "prod", [&]() {
prod_kernel_impl<scalar_t>(iter);
});
}
static void mean_kernel_cuda(TensorIterator& iter) {
if (iter.type().scalarType() == kHalf) {
return mean_kernel_impl<at::Half, float>(iter);
} else if (iter.type(1).scalarType() == kHalf && iter.type().scalarType() == kFloat) {
// type promotion that does cast and reduction in a single kernel
return mean_kernel_impl<at::Half, float, float>(iter);
}
AT_DISPATCH_ALL_TYPES(iter.type(), "mean", [&]() {
mean_kernel_impl<scalar_t>(iter);
});
}
static void norm_kernel_cuda(TensorIterator& iter, Scalar p) {
if (iter.type().scalarType() == kHalf) {
return norm_kernel_cuda_impl<at::Half, float>(iter, p);
} else if (iter.type(1).scalarType() == kHalf && iter.type().scalarType() == kFloat) {
// type promotion that does cast and reduction in a single kernel
return norm_kernel_cuda_impl<at::Half, float, float>(iter, p);
}
AT_DISPATCH_FLOATING_TYPES(iter.type(), "norm", [&]() {
norm_kernel_cuda_impl<scalar_t>(iter, p);
});
}
void and_kernel_cuda(TensorIterator& iter) {
gpu_reduce_kernel<uint8_t, uint8_t>(
iter, func_wrapper<uint8_t> ([]GPU_LAMBDA(uint8_t a, uint8_t b) -> uint8_t {
return a && b;
}), true);
}
REGISTER_DISPATCH(std_var_stub, &std_var_kernel_cuda);
REGISTER_DISPATCH(sum_stub, &sum_kernel_cuda);
REGISTER_DISPATCH(prod_stub, &prod_kernel_cuda);
REGISTER_DISPATCH(mean_stub, &mean_kernel_cuda);
REGISTER_DISPATCH(norm_stub, &norm_kernel_cuda);
REGISTER_DISPATCH(and_stub, &and_kernel_cuda);
}} // namespace at::native
| e93280bdba2a64bbb89059bad8e21b4eec2c301c.cu | #include <ATen/native/SharedReduceOps.h>
#include <ATen/AccumulateType.h>
#include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/native/cuda/DeviceSqrt.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Reduce.cuh>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/ReduceOps.h>
#include <limits>
#include <tuple>
namespace at { namespace native {
template <typename scalar_t, typename acc_t=scalar_t, typename out_t=scalar_t>
void sum_kernel_impl(TensorIterator& iter) {
gpu_reduce_kernel<scalar_t, out_t>(iter, func_wrapper<out_t> ([]GPU_LAMBDA(acc_t a, acc_t b) -> acc_t {
return a + b;
}));
}
template <typename scalar_t>
void std_var_kernel_impl(TensorIterator& iter, bool unbiased, bool take_sqrt) {
gpu_reduce_kernel<scalar_t, scalar_t>(iter, WelfordOps<scalar_t, scalar_t> { unbiased, take_sqrt }, WelfordData<scalar_t> {});
}
template <>
void std_var_kernel_impl<at::Half>(TensorIterator& iter, bool unbiased, bool take_sqrt) {
gpu_reduce_kernel<at::Half, at::Half>(iter, WelfordOps<at::Half, float> { unbiased, take_sqrt }, WelfordData<float> {});
}
#ifdef __HIPCC__
template <>
void sum_kernel_impl<int16_t, int16_t>(TensorIterator& iter) {
// There is a Register Coalescing bug in LLVM causing the hcc
// compiler segfaults:
// https://bugs.llvm.org/show_bug.cgi?id=39602
// To work around it, use int32 as the accumulate type.
gpu_reduce_kernel<int16_t, int16_t>(iter, func_wrapper<int16_t> ([]GPU_LAMBDA(int32_t a, int32_t b) -> int32_t {
return a + b;
}));
}
#endif
template <typename scalar_t, typename acc_t=scalar_t>
void prod_kernel_impl(TensorIterator& iter) {
gpu_reduce_kernel<scalar_t, scalar_t>(iter, func_wrapper<scalar_t> ([]GPU_LAMBDA(acc_t a, acc_t b) -> acc_t {
return a * b;
}), 1);
}
static void std_var_kernel_cuda(TensorIterator& iter, bool unbiased, bool take_sqrt) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.type(), "std", [&]() {
std_var_kernel_impl<scalar_t>(iter, unbiased, take_sqrt);
});
}
template <typename scalar_t, typename acc_t=scalar_t, typename out_t=scalar_t>
void mean_kernel_impl(TensorIterator& iter) {
float factor = float(iter.num_output_elements()) / iter.numel();
gpu_reduce_kernel<scalar_t, out_t>(iter, MeanOps<acc_t, float> {factor});
}
#ifdef __HIPCC__
template <>
void mean_kernel_impl<int16_t, int16_t, int16_t>(TensorIterator& iter) {
// There is a Register Coalescing bug in LLVM causing the hcc
// compiler segfaults:
// https://bugs.llvm.org/show_bug.cgi?id=39602
// To work around it, use int32 as the accumulate type.
float factor = float(iter.num_output_elements()) / iter.numel();
gpu_reduce_kernel<int16_t, int16_t>(iter, MeanOps<int32_t, float> {factor});
}
#endif // __HIPCC__
template <typename scalar_t, typename acc_t=scalar_t, typename out_t=scalar_t>
void norm_kernel_cuda_impl(TensorIterator& iter, Scalar val) {
float p;
if (val.isIntegral()) {
p = val.to<int64_t>();
} else if (val.isFloatingPoint()) {
p = val.to<acc_t>();
} else {
AT_ERROR("norm_kernel_cuda_impl expects norm to be integer or float");
}
if (p == static_cast<float>(0)) {
gpu_reduce_kernel<scalar_t, out_t>(iter, NormZeroOps<acc_t>(), 0);
} else if (p == static_cast<float>(1)) {
gpu_reduce_kernel<scalar_t, out_t>(iter, NormOneOps<acc_t>(), 0);
} else if (p == static_cast<float>(INFINITY)) {
gpu_reduce_kernel<scalar_t, out_t>(iter, AbsMaxOps<acc_t>(), std::numeric_limits<acc_t>::min());
} else if (p == static_cast<float>(-INFINITY)) {
gpu_reduce_kernel<scalar_t, out_t>(iter, AbsMinOps<acc_t>(), std::numeric_limits<acc_t>::max());
} else {
gpu_reduce_kernel<scalar_t, out_t>(iter, NormOps<acc_t>{ acc_t(p) }, 0);
}
}
static void sum_kernel_cuda(TensorIterator& iter) {
if (iter.type().scalarType() == kHalf) {
return sum_kernel_impl<at::Half, float>(iter);
} else if (iter.type(1).scalarType() == kHalf && iter.type().scalarType() == kFloat) {
// type promotion that does cast and reduction in a single kernel
return sum_kernel_impl<at::Half, float, float>(iter);
}
AT_DISPATCH_ALL_TYPES(iter.type(), "sum", [&]() {
sum_kernel_impl<scalar_t>(iter);
});
}
static void prod_kernel_cuda(TensorIterator& iter) {
if (iter.type().scalarType() == kHalf) {
return prod_kernel_impl<at::Half, float>(iter);
}
AT_DISPATCH_ALL_TYPES(iter.type(), "prod", [&]() {
prod_kernel_impl<scalar_t>(iter);
});
}
static void mean_kernel_cuda(TensorIterator& iter) {
if (iter.type().scalarType() == kHalf) {
return mean_kernel_impl<at::Half, float>(iter);
} else if (iter.type(1).scalarType() == kHalf && iter.type().scalarType() == kFloat) {
// type promotion that does cast and reduction in a single kernel
return mean_kernel_impl<at::Half, float, float>(iter);
}
AT_DISPATCH_ALL_TYPES(iter.type(), "mean", [&]() {
mean_kernel_impl<scalar_t>(iter);
});
}
static void norm_kernel_cuda(TensorIterator& iter, Scalar p) {
if (iter.type().scalarType() == kHalf) {
return norm_kernel_cuda_impl<at::Half, float>(iter, p);
} else if (iter.type(1).scalarType() == kHalf && iter.type().scalarType() == kFloat) {
// type promotion that does cast and reduction in a single kernel
return norm_kernel_cuda_impl<at::Half, float, float>(iter, p);
}
AT_DISPATCH_FLOATING_TYPES(iter.type(), "norm", [&]() {
norm_kernel_cuda_impl<scalar_t>(iter, p);
});
}
void and_kernel_cuda(TensorIterator& iter) {
gpu_reduce_kernel<uint8_t, uint8_t>(
iter, func_wrapper<uint8_t> ([]GPU_LAMBDA(uint8_t a, uint8_t b) -> uint8_t {
return a && b;
}), true);
}
REGISTER_DISPATCH(std_var_stub, &std_var_kernel_cuda);
REGISTER_DISPATCH(sum_stub, &sum_kernel_cuda);
REGISTER_DISPATCH(prod_stub, &prod_kernel_cuda);
REGISTER_DISPATCH(mean_stub, &mean_kernel_cuda);
REGISTER_DISPATCH(norm_stub, &norm_kernel_cuda);
REGISTER_DISPATCH(and_stub, &and_kernel_cuda);
}} // namespace at::native
|
48d9b73dab691ac583bf1792f54c97964ab7f74a.hip | // !!! This is a file automatically generated by hipify!!!
/*
Assignment 3
Thomas Kistler
10/20/17
*/
#include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <hip/hip_runtime.h>
/*k is the number of additions per thread: */
int k = 100;f
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
__global__ void
vectorAdd(const float *A, const float *B, float *C, int numElements)
{
int start = blockDim.x * blockIdx.x + threadIdx.x;
int end = start + k;
if (end > numElements) end = numElements;
for(int i = start; i < end; i++);
{
C[i] = A[i] + B[i];
}
}
/**
* Host main routine
*/
int
main(void)
{
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
// Print the vector length to be used, and compute its size
int numElements = 100000000;
size_t size = numElements * sizeof(float);
printf("[Vector addition of %d elements]\n", numElements);
printf("-With k = %d, (additions per thread)\n", k );
// Allocate the host input vector A
float *h_A = (float *)malloc(size);
// Allocate the host input vector B
float *h_B = (float *)malloc(size);
// Allocate the host output vector C
float *h_C = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
}
// Allocate the device input vector A
float *d_A = NULL;
err = hipMalloc((void **)&d_A, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
float *d_B = NULL;
err = hipMalloc((void **)&d_B, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector C
float *d_C = NULL;
err = hipMalloc((void **)&d_C, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid =(numElements + (threadsPerBlock * k) - 1) / (threadsPerBlock * k);
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
printf("With k = %d additions per thread\n",k);
hipLaunchKernelGGL(( vectorAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, numElements);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("Test PASSED\n");
// Free device global memory
err = hipFree(d_A);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_B);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_C);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
free(h_A);
free(h_B);
free(h_C);
// Reset the device and exit
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exits
err = hipDeviceReset();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Done\n");
return 0;
}
| 48d9b73dab691ac583bf1792f54c97964ab7f74a.cu | /*
Assignment 3
Thomas Kistler
10/20/17
*/
#include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
/*k is the number of additions per thread: */
int k = 100;f
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
__global__ void
vectorAdd(const float *A, const float *B, float *C, int numElements)
{
int start = blockDim.x * blockIdx.x + threadIdx.x;
int end = start + k;
if (end > numElements) end = numElements;
for(int i = start; i < end; i++);
{
C[i] = A[i] + B[i];
}
}
/**
* Host main routine
*/
int
main(void)
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
// Print the vector length to be used, and compute its size
int numElements = 100000000;
size_t size = numElements * sizeof(float);
printf("[Vector addition of %d elements]\n", numElements);
printf("-With k = %d, (additions per thread)\n", k );
// Allocate the host input vector A
float *h_A = (float *)malloc(size);
// Allocate the host input vector B
float *h_B = (float *)malloc(size);
// Allocate the host output vector C
float *h_C = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
}
// Allocate the device input vector A
float *d_A = NULL;
err = cudaMalloc((void **)&d_A, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
float *d_B = NULL;
err = cudaMalloc((void **)&d_B, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector C
float *d_C = NULL;
err = cudaMalloc((void **)&d_C, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid =(numElements + (threadsPerBlock * k) - 1) / (threadsPerBlock * k);
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
printf("With k = %d additions per thread\n",k);
vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, numElements);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("Test PASSED\n");
// Free device global memory
err = cudaFree(d_A);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_B);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_C);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
free(h_A);
free(h_B);
free(h_C);
// Reset the device and exit
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
err = cudaDeviceReset();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Done\n");
return 0;
}
|
eecf9128488884612412cc6ab2ea624534361775.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Based on CUDA SDK template from NVIDIA
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <unistd.h>
#include <assert.h>
#include <float.h>
// includes, project
#include <cutil_inline.h>
#define max(a,b) (((a)>(b))?(a):(b))
#define min(a,b) (((a)<(b))?(a):(b))
#define MAX_BRIGHTNESS 255
#define CONVSIZE 324
#define BLOCKSIZE 256
// Use int instead `unsigned char' so that we can
// store negative values.
typedef int pixel_t;
// convolution of in image to out image using kernel of kn width
void convolution(const pixel_t *in, pixel_t *out, const float *kernel,
const int nx, const int ny, const int kn)
{
assert(kn % 2 == 1);
assert(nx > kn && ny > kn);
const int khalf = kn / 2;
for (int m = khalf; m < nx - khalf; m++)
for (int n = khalf; n < ny - khalf; n++) {
float pixel = 0.0;
size_t c = 0;
for (int j = -khalf; j <= khalf; j++)
for (int i = -khalf; i <= khalf; i++) {
pixel += in[(n + j) * nx + m + i] * kernel[c];
c++;
}
out[n * nx + m] = (pixel_t)pixel;
}
}
// convolution of in image to out image using kernel of kn width
__global__ void CUDA_convolution_kernel(pixel_t *A, pixel_t *B, float *kirnel, int nx, int ny, int kn)
{
int m = threadIdx.x + blockIdx.x * blockDim.x;
int n = threadIdx.y + blockIdx.y * blockDim.y;
int id = m + n * nx;
int khalf = kn/2;
if( (m < khalf) || (n < khalf) || (m >= nx - khalf) || (n >= ny - khalf) ){
return;
}
float pixel = 0.0;
size_t c = 0;
for (int j = -khalf; j <= khalf; j++){
for (int i = -khalf; i <= khalf; i++) {
pixel += A[(n + j) * nx + m + i] * kirnel[c];
c++;
}
}
B[id] = (pixel_t)pixel;
}
void CUDA_convolution(pixel_t *in, pixel_t *out, float *kernel,
int nx, int ny, int kn)
{
//assert(kn % 2 == 1);
//assert(nx > kn && ny > kn);
int memsize = nx * ny * sizeof(pixel_t);
pixel_t* A;
pixel_t* B;
float* kirnel;
hipMalloc((void**)&A, memsize);
hipMalloc((void**)&B, memsize);
hipMalloc((void**)&kirnel, kn*kn*sizeof(float));
hipMemcpy(A, in, memsize, hipMemcpyHostToDevice);
hipMemset(B, 0, memsize);
hipMemcpy(kirnel, kernel, kn*kn*sizeof(float), hipMemcpyHostToDevice);
dim3 dimGrid(ceil(float(nx)/16),ceil(float(ny)/16));
dim3 dimBlock(16,16);
hipLaunchKernelGGL(( CUDA_convolution_kernel) , dim3(dimGrid), dim3(dimBlock), 0, 0, A, B, kirnel, nx, ny, kn);
hipMemcpy(out, B, memsize, hipMemcpyDeviceToHost);
hipFree(A);
hipFree(B);
hipFree(kirnel);
}
// determines min and max of in image
void min_max(const pixel_t *in, const int nx, const int ny, pixel_t *pmin, pixel_t *pmax)
{
int min = INT_MAX, max = -INT_MAX;
for (int m = 0; m < nx; m++)
for (int n = 0; n < ny ; n++) {
int pixel = in[n*nx + m];
if (pixel < min)
min = pixel;
if (pixel > max)
max = pixel;
}
*pmin = min; *pmax = max;
}
// normalizes inout image using min and max values
void normalize( pixel_t *inout,
const int nx, const int ny, const int kn,
const int min, const int max)
{
const int khalf = kn / 2;
for (int m = khalf; m < nx - khalf; m++)
for (int n = khalf; n < ny - khalf; n++) {
pixel_t pixel = MAX_BRIGHTNESS * ((int)inout[n * nx + m] -(float) min) / ((float)max - (float)min);
inout[n * nx + m] = pixel;
}
}
/*
* gaussianFilter:
* http://www.songho.ca/dsp/cannyedge/cannyedge.html
* determine size of kernel (odd #)
* 0.0 <= sigma < 0.5 : 3
* 0.5 <= sigma < 1.0 : 5
* 1.0 <= sigma < 1.5 : 7
* 1.5 <= sigma < 2.0 : 9
* 2.0 <= sigma < 2.5 : 11
* 2.5 <= sigma < 3.0 : 13 ...
* kernelSize = 2 * int(2*sigma) + 3;
*/
void gaussian_filter(const pixel_t *in, pixel_t *out,
const int nx, const int ny, const float sigma)
{
const int n = 2 * (int)(2 * sigma) + 3;
const float mean = (float)floor(n / 2.0);
float kernel[n * n]; // variable length array
fprintf(stderr, "gaussian_filter: kernel size %d, sigma=%g\n",
n, sigma);
size_t c = 0;
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++) {
kernel[c] = exp(-0.5 * (pow((i - mean) / sigma, 2.0) +
pow((j - mean) / sigma, 2.0)))
/ (2 * M_PI * sigma * sigma);
c++;
}
convolution(in, out, kernel, nx, ny, n);
pixel_t max, min;
min_max(out, nx, ny, &min, &max);
normalize(out, nx, ny, n, min, max);
}
// Canny non-maximum suppression
void non_maximum_supression(const pixel_t *after_Gx, const pixel_t * after_Gy, const pixel_t *G, pixel_t *nms,
const int nx, const int ny)
{
for (int i = 1; i < nx - 1; i++)
for (int j = 1; j < ny - 1; j++) {
const int c = i + nx * j;
const int nn = c - nx;
const int ss = c + nx;
const int ww = c + 1;
const int ee = c - 1;
const int nw = nn + 1;
const int ne = nn - 1;
const int sw = ss + 1;
const int se = ss - 1;
const float dir = (float)(fmod(atan2(after_Gy[c],
after_Gx[c]) + M_PI,
M_PI) / M_PI) * 8;
if (((dir <= 1 || dir > 7) && G[c] > G[ee] &&
G[c] > G[ww]) || // 0 deg
((dir > 1 && dir <= 3) && G[c] > G[nw] &&
G[c] > G[se]) || // 45 deg
((dir > 3 && dir <= 5) && G[c] > G[nn] &&
G[c] > G[ss]) || // 90 deg
((dir > 5 && dir <= 7) && G[c] > G[ne] &&
G[c] > G[sw])) // 135 deg
nms[c] = G[c];
else
nms[c] = 0;
}
}
__global__ void CUDA_non_maximum_supression_kernel(pixel_t *A, pixel_t *B, pixel_t *C, pixel_t *out, int nx, int ny)
{
int m = threadIdx.x + blockIdx.x * blockDim.x;
int n = threadIdx.y + blockIdx.y * blockDim.y;
const int c = m + nx * n; // id
if( (m < 1) || (n < 1) || (m >= nx - 1) || (n >= ny - 1) ){
return;
}
const int nn = c - nx;
const int ss = c + nx;
const int ww = c + 1;
const int ee = c - 1;
const int nw = nn + 1;
const int ne = nn - 1;
const int sw = ss + 1;
const int se = ss - 1;
const float dir = (float)(fmod(atan2((double)B[c],(double)A[c]) + M_PI, M_PI) / M_PI) * 8;
if (((dir <= 1 || dir > 7) && C[c] > C[ee] &&
C[c] > C[ww]) || // 0 deg
((dir > 1 && dir <= 3) && C[c] > C[nw] &&
C[c] > C[se]) || // 45 deg
((dir > 3 && dir <= 5) && C[c] > C[nn] &&
C[c] > C[ss]) || // 90 deg
((dir > 5 && dir <= 7) && C[c] > C[ne] &&
C[c] > C[sw])) // 135 deg
out[c] = C[c];
else
out[c] = 0;
}
void CUDA_non_maximum_supression(const pixel_t *after_Gx, const pixel_t * after_Gy, const pixel_t *G, pixel_t *nms, const int nx, const int ny)
{
int memsize = nx * ny * sizeof(pixel_t);
pixel_t* A;
pixel_t* B;
pixel_t* C;
pixel_t* out;
hipMalloc((void**)&A, memsize);
hipMalloc((void**)&B, memsize);
hipMalloc((void**)&C, memsize);
hipMalloc((void**)&out, memsize);
hipMemcpy(A, after_Gx, memsize, hipMemcpyHostToDevice);
hipMemcpy(B, after_Gy, memsize, hipMemcpyHostToDevice);
hipMemset( out, 0, memsize);
hipMemcpy(C, G, memsize, hipMemcpyHostToDevice);
dim3 dimGrid(ceil(float(nx)/16),ceil(float(ny)/16));
dim3 dimBlock(16,16);
hipLaunchKernelGGL(( CUDA_non_maximum_supression_kernel) , dim3(dimGrid), dim3(dimBlock), 0, 0, A, B, C, out, nx, ny);
hipMemcpy(nms, out, memsize, hipMemcpyDeviceToHost);
hipFree(A);
hipFree(B);
hipFree(C);
hipFree(out);
}
// edges found in first pass for nms > tmax
void first_edges(const pixel_t *nms, pixel_t *reference,
const int nx, const int ny, const int tmax)
{
size_t c = 1;
for (int j = 1; j < ny - 1; j++) {
for (int i = 1; i < nx - 1; i++) {
if (nms[c] >= tmax) { // trace edges
reference[c] = MAX_BRIGHTNESS;
}
c++;
}
c+=2; // because borders are not considered
}
}
// texture<Type, Dim, ReadMode> texRef;
texture<pixel_t,1,hipReadModeElementType> nms_texture;
__global__ void CUDA_first_edges_kernel(pixel_t *nms, pixel_t *reference, int nx, int ny, int tmax){
int xx = threadIdx.x + blockIdx.x * blockDim.x;
int yy = threadIdx.y + blockIdx.y * blockDim.y;
int id = xx + yy * nx;
if ((xx>0 && xx<nx-1) && (yy>0 && yy<ny-1))
{
// fetch linear texture
pixel_t nms_texture_fetched = tex1Dfetch(nms_texture,id+1);
if (nms_texture_fetched >= tmax) { // trace edges
reference[id+1] = MAX_BRIGHTNESS;
}
}
}
void CUDA_first_edges(pixel_t *nms, pixel_t *reference, int nx, int ny, int tmax)
{
int memsize = nx * ny * sizeof(pixel_t);
pixel_t* A;
pixel_t* out;
hipMalloc((void**)&A, memsize);
hipMemcpy(A, nms, memsize, hipMemcpyHostToDevice);
hipBindTexture(0, nms_texture, A, memsize);
hipMalloc((void**)&out, memsize);
hipMemset(out, 0, memsize);
dim3 dimGrid(ceil(float(nx)/16),ceil(float(ny)/16));
dim3 dimBlock(16,16);
hipLaunchKernelGGL(( CUDA_first_edges_kernel) , dim3(dimGrid), dim3(dimBlock), 0, 0, A, out, nx, ny, tmax);
hipUnbindTexture(nms_texture);
hipMemcpy(reference, out, memsize, hipMemcpyDeviceToHost);
hipFree(A);
hipFree(out);
}
__global__ void CUDA_hysteresis_edges_kernel(const pixel_t *nms, pixel_t *reference, const int nx, const int ny, const int tmin, bool *pchanged)
{
int xx = threadIdx.x + blockIdx.x * blockDim.x;
int yy = threadIdx.y + blockIdx.y * blockDim.y;
int id = xx + yy * nx;
if ((xx>0 && xx<nx-1) && (yy>0 && yy<ny-1))
{
int nbs[8]; // neighbours
nbs[0] = id - nx; // nn
nbs[1] = id + nx; // ss
nbs[2] = id + 1; // ww
nbs[3] = id - 1; // ee
nbs[4] = nbs[0] + 1; // nw
nbs[5] = nbs[0] - 1; // ne
nbs[6] = nbs[1] + 1; // sw
nbs[7] = nbs[1] - 1; // se
if (nms[id] >= tmin && reference[id] == 0) {
for(int k = 0; k < 8; k++){
if (reference[nbs[k]] != 0) {
reference[id] = MAX_BRIGHTNESS;
*pchanged = true;
}
}
}
}
}
void CUDA_hysteresis_edges(const pixel_t *nms, pixel_t *reference, const int nx, const int ny, const int tmin, bool *pchanged)
{
int memsize = nx * ny * sizeof(pixel_t);
pixel_t* A;
pixel_t* out;
bool *pc;
hipMalloc((void**)&A, memsize);
hipMalloc((void**)&out, memsize);
hipMalloc((void**)&pc, sizeof(bool));
hipMemcpy(A, nms, memsize, hipMemcpyHostToDevice);
hipMemcpy(out, reference, memsize, hipMemcpyHostToDevice);
hipMemcpy(pc, pchanged, sizeof(bool), hipMemcpyHostToDevice);
dim3 dimGrid(ceil(float(nx)/16),ceil(float(ny)/16));
dim3 dimBlock(16,16);
hipLaunchKernelGGL(( CUDA_hysteresis_edges_kernel) , dim3(dimGrid), dim3(dimBlock), 0, 0, A, out, nx, ny, tmin, pc);
hipMemcpy(reference, out, memsize, hipMemcpyDeviceToHost);
hipMemcpy(pchanged, pc, sizeof(bool), hipMemcpyDeviceToHost);
hipFree(A);
hipFree(out);
hipFree(pc);
}
// edges found in after first passes for nms > tmin && neighbor is edge
void hysteresis_edges(const pixel_t *nms, pixel_t *reference,
const int nx, const int ny, const int tmin, bool *pchanged)
{
// Tracing edges with hysteresis . Non-recursive implementation.
for (int i = 1; i < nx - 1; i++) {
for (int j = 1; j < ny - 1; j++) {
size_t t = i + j * nx;
int nbs[8]; // neighbours
nbs[0] = t - nx; // nn
nbs[1] = t + nx; // ss
nbs[2] = t + 1; // ww
nbs[3] = t - 1; // ee
nbs[4] = nbs[0] + 1; // nw
nbs[5] = nbs[0] - 1; // ne
nbs[6] = nbs[1] + 1; // sw
nbs[7] = nbs[1] - 1; // se
if (nms[t] >= tmin && reference[t] == 0) {
for(int k = 0; k < 8; k++)
if (reference[nbs[k]] != 0) {
reference[t] = MAX_BRIGHTNESS;
*pchanged = true;
}
}
}
}
}
/*
* Links:
* http://en.wikipedia.org/wiki/Canny_edge_detector
* http://www.tomgibara.com/computer-vision/CannyEdgeDetector.java
* http://fourier.eng.hmc.edu/e161/lectures/canny/node1.html
* http://www.songho.ca/dsp/cannyedge/cannyedge.html
*
* Note: T1 and T2 are lower and upper thresholds.
*/
//canny edge detector code to run on the host
void cannyHost( const int *h_idata, const int w, const int h,
const int tmin, // tmin canny parameter
const int tmax, // tmax canny parameter
const float sigma, // sigma canny parameter
int * reference)
{
const int nx = w;
const int ny = h;
pixel_t *G = (pixel_t *) calloc(nx * ny, sizeof(pixel_t));
pixel_t *after_Gx = (pixel_t *) calloc(nx * ny, sizeof(pixel_t));
pixel_t *after_Gy = (pixel_t *) calloc(nx * ny, sizeof(pixel_t));
pixel_t *nms = (pixel_t *) calloc(nx * ny, sizeof(pixel_t));
if (G == NULL || after_Gx == NULL || after_Gy == NULL ||
nms == NULL || reference == NULL) {
fprintf(stderr, "canny_edge_detection:"
" Failed memory allocation(s).\n");
exit(1);
}
// Gaussian filter
gaussian_filter(h_idata, reference, nx, ny, sigma);
const float Gx[] = {-1, 0, 1,
-2, 0, 2,
-1, 0, 1};
// Gradient along x
convolution(reference, after_Gx, Gx, nx, ny, 3);
const float Gy[] = { 1, 2, 1,
0, 0, 0,
-1,-2,-1};
// Gradient along y
convolution(reference, after_Gy, Gy, nx, ny, 3);
// Merging gradients
for (int i = 1; i < nx - 1; i++)
for (int j = 1; j < ny - 1; j++) {
const int c = i + nx * j;
G[c] = (pixel_t)(hypot((double)(after_Gx[c]), (double)( after_Gy[c]) ));
}
// Non-maximum suppression, straightforward implementation.
non_maximum_supression(after_Gx, after_Gy, G, nms, nx, ny);
// edges with nms >= tmax
memset(reference, 0, sizeof(pixel_t) * nx * ny);
first_edges(nms, reference, nx, ny, tmax);
// edges with nms >= tmin && neighbor is edge
bool changed;
do {
changed = false;
hysteresis_edges(nms, reference, nx, ny, tmin, &changed);
} while (changed==true);
free(after_Gx);
free(after_Gy);
free(G);
free(nms);
}
// canny edge detector code to run on the GPU
void cannyDevice( const int *h_idata, const int w, const int h,
const int tmin, const int tmax,
const float sigma,
int * h_odata)
{
const int nx = w;
const int ny = h;
pixel_t *G = (pixel_t *) calloc(nx * ny, sizeof(pixel_t));
pixel_t *after_Gx = (pixel_t *) calloc(nx * ny, sizeof(pixel_t));
pixel_t *after_Gy = (pixel_t *) calloc(nx * ny, sizeof(pixel_t));
pixel_t *nms = (pixel_t *) calloc(nx * ny, sizeof(pixel_t));
if (G == NULL || after_Gx == NULL || after_Gy == NULL ||
nms == NULL || h_odata == NULL) {
fprintf(stderr, "canny_edge_detection:"
" Failed memory allocation(s).\n");
exit(1);
}
// Gaussian filter
gaussian_filter(h_idata, h_odata, nx, ny, sigma);
//CUDA_gaussian_filter(h_idata, h_odata, nx, ny, sigma);
const float Gx[] = {-1, 0, 1,
-2, 0, 2,
-1, 0, 1};
// Gradient along x
//convolution(h_odata, after_Gx, Gx, nx, ny, 3);
CUDA_convolution(h_odata, after_Gx,(float*) Gx, nx, ny, 3); // convolution function in CUDA
const float Gy[] = { 1, 2, 1,
0, 0, 0,
-1,-2,-1};
// Gradient along y
//convolution(h_odata, after_Gy, Gy, nx, ny, 3);
CUDA_convolution(h_odata, after_Gy, (float *) Gy, nx, ny, 3); // convolution function in CUDA
// Merging gradients
for (int i = 1; i < nx - 1; i++)
for (int j = 1; j < ny - 1; j++) {
const int c = i + nx * j;
G[c] = (pixel_t)(hypot((double)(after_Gx[c]), (double)( after_Gy[c]) ));
}
// Non-maximum suppression, straightforward implementation.
//non_maximum_supression(after_Gx, after_Gy, G, nms, nx, ny);
CUDA_non_maximum_supression(after_Gx, after_Gy, G, nms, nx, ny);
// edges with nms >= tmax
memset(h_odata, 0, sizeof(pixel_t) * nx * ny);
//first_edges(nms, h_odata, nx, ny, tmax);
CUDA_first_edges(nms, h_odata, nx, ny, tmax);
// edges with nms >= tmin && neighbor is edge
bool changed;
do {
changed = false;
CUDA_hysteresis_edges(nms, h_odata, nx, ny, tmin, &changed);
//CUDA_hysteresis_edges(nms, h_odata, nx, ny, tmin, &changed);
} while (changed==true);
free(after_Gx);
free(after_Gy);
free(G);
free(nms);
}
// print command line format
void usage(char *command)
{
printf("Usage: %s [-h] [-d device] [-i inputfile] [-o outputfile] [-r referenceFile] [-w windowsize] [-t threshold]\n",command);
}
// main
int main( int argc, char** argv)
{
// default command line options
int deviceId = 0;
char *fileIn=(char *)"lena.pgm",*fileOut=(char *)"lenaOut.pgm",*referenceOut=(char *)"reference.pgm";
int tmin = 45, tmax = 50;
float sigma=1.0f;
// parse command line arguments
int opt;
while( (opt = getopt(argc,argv,"d:i:o:r:n:x:s:h")) !=-1)
{
switch(opt)
{
case 'd': // device
if(sscanf(optarg,"%d",&deviceId)!=1)
{
usage(argv[0]);
exit(1);
}
break;
case 'i': // input image filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
fileIn = strdup(optarg);
break;
case 'o': // output image (from device) filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
fileOut = strdup(optarg);
break;
case 'r': // output image (from host) filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
referenceOut = strdup(optarg);
break;
case 'n': // tmin
if(strlen(optarg)==0 || sscanf(optarg,"%d",&tmin)!=1)
{
usage(argv[0]);
exit(1);
}
break;
case 'x': // tmax
if(strlen(optarg)==0 || sscanf(optarg,"%d",&tmax)!=1)
{
usage(argv[0]);
exit(1);
}
break;
case 's': // sigma
if(strlen(optarg)==0 || sscanf(optarg,"%f",&sigma)!=1)
{
usage(argv[0]);
exit(1);
}
break;
case 'h': // help
usage(argv[0]);
exit(0);
break;
}
}
// select cuda device
cutilSafeCall( hipSetDevice( deviceId ) );
// create events to measure host canny detector time and device canny detector time
hipEvent_t startH, stopH, startD, stopD;
hipEventCreate(&startH);
hipEventCreate(&stopH);
hipEventCreate(&startD);
hipEventCreate(&stopD);
// allocate host memory
int* h_idata=NULL;
unsigned int h,w;
//load pgm
if (cutLoadPGMi(fileIn, (unsigned int **)&h_idata, &w, &h) != CUTTrue) {
printf("Failed to load image file: %s\n", fileIn);
exit(1);
}
// allocate mem for the result on host side
//int* h_odata = (int*) malloc( h*w*sizeof(unsigned int));
//int* reference = (int*) malloc( h*w*sizeof(unsigned int));
int* h_odata = (int*) calloc( h*w, sizeof(unsigned int)); // fazer hipMalloc??
int* reference = (int*) calloc( h*w, sizeof(unsigned int));
// detect edges at host
hipEventRecord( startH, 0 );
cannyHost(h_idata, w, h, tmin, tmax, sigma, reference);
hipEventRecord( stopH, 0 );
hipEventSynchronize( stopH );
// detect edges at GPU
hipEventRecord( startD, 0 );
cannyDevice(h_idata, w, h, tmin, tmax, sigma, h_odata);
hipEventRecord( stopD, 0 );
hipEventSynchronize( stopD );
// check if kernel execution generated and error
cutilCheckMsg("Kernel execution failed");
float timeH, timeD;
hipEventElapsedTime( &timeH, startH, stopH );
printf( "Host processing time: %f (ms)\n", timeH);
hipEventElapsedTime( &timeD, startD, stopD );
printf( "Device processing time: %f (ms)\n", timeD);
// save output images
if (cutSavePGMi(referenceOut, (unsigned int *)reference, w, h) != CUTTrue) {
printf("Failed to save image file: %s\n", referenceOut);
exit(1);
}
if (cutSavePGMi(fileOut,(unsigned int *) h_odata, w, h) != CUTTrue) {
printf("Failed to save image file: %s\n", fileOut);
exit(1);
}
// cleanup memory
cutFree( h_idata);
free( h_odata);
free( reference);
cutilDeviceReset();
}
| eecf9128488884612412cc6ab2ea624534361775.cu |
// Based on CUDA SDK template from NVIDIA
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <unistd.h>
#include <assert.h>
#include <float.h>
// includes, project
#include <cutil_inline.h>
#define max(a,b) (((a)>(b))?(a):(b))
#define min(a,b) (((a)<(b))?(a):(b))
#define MAX_BRIGHTNESS 255
#define CONVSIZE 324
#define BLOCKSIZE 256
// Use int instead `unsigned char' so that we can
// store negative values.
typedef int pixel_t;
// convolution of in image to out image using kernel of kn width
void convolution(const pixel_t *in, pixel_t *out, const float *kernel,
const int nx, const int ny, const int kn)
{
assert(kn % 2 == 1);
assert(nx > kn && ny > kn);
const int khalf = kn / 2;
for (int m = khalf; m < nx - khalf; m++)
for (int n = khalf; n < ny - khalf; n++) {
float pixel = 0.0;
size_t c = 0;
for (int j = -khalf; j <= khalf; j++)
for (int i = -khalf; i <= khalf; i++) {
pixel += in[(n + j) * nx + m + i] * kernel[c];
c++;
}
out[n * nx + m] = (pixel_t)pixel;
}
}
// convolution of in image to out image using kernel of kn width
__global__ void CUDA_convolution_kernel(pixel_t *A, pixel_t *B, float *kirnel, int nx, int ny, int kn)
{
int m = threadIdx.x + blockIdx.x * blockDim.x;
int n = threadIdx.y + blockIdx.y * blockDim.y;
int id = m + n * nx;
int khalf = kn/2;
if( (m < khalf) || (n < khalf) || (m >= nx - khalf) || (n >= ny - khalf) ){
return;
}
float pixel = 0.0;
size_t c = 0;
for (int j = -khalf; j <= khalf; j++){
for (int i = -khalf; i <= khalf; i++) {
pixel += A[(n + j) * nx + m + i] * kirnel[c];
c++;
}
}
B[id] = (pixel_t)pixel;
}
void CUDA_convolution(pixel_t *in, pixel_t *out, float *kernel,
int nx, int ny, int kn)
{
//assert(kn % 2 == 1);
//assert(nx > kn && ny > kn);
int memsize = nx * ny * sizeof(pixel_t);
pixel_t* A;
pixel_t* B;
float* kirnel;
cudaMalloc((void**)&A, memsize);
cudaMalloc((void**)&B, memsize);
cudaMalloc((void**)&kirnel, kn*kn*sizeof(float));
cudaMemcpy(A, in, memsize, cudaMemcpyHostToDevice);
cudaMemset(B, 0, memsize);
cudaMemcpy(kirnel, kernel, kn*kn*sizeof(float), cudaMemcpyHostToDevice);
dim3 dimGrid(ceil(float(nx)/16),ceil(float(ny)/16));
dim3 dimBlock(16,16);
CUDA_convolution_kernel <<<dimGrid, dimBlock>>> (A, B, kirnel, nx, ny, kn);
cudaMemcpy(out, B, memsize, cudaMemcpyDeviceToHost);
cudaFree(A);
cudaFree(B);
cudaFree(kirnel);
}
// determines min and max of in image
void min_max(const pixel_t *in, const int nx, const int ny, pixel_t *pmin, pixel_t *pmax)
{
int min = INT_MAX, max = -INT_MAX;
for (int m = 0; m < nx; m++)
for (int n = 0; n < ny ; n++) {
int pixel = in[n*nx + m];
if (pixel < min)
min = pixel;
if (pixel > max)
max = pixel;
}
*pmin = min; *pmax = max;
}
// normalizes inout image using min and max values
void normalize( pixel_t *inout,
const int nx, const int ny, const int kn,
const int min, const int max)
{
const int khalf = kn / 2;
for (int m = khalf; m < nx - khalf; m++)
for (int n = khalf; n < ny - khalf; n++) {
pixel_t pixel = MAX_BRIGHTNESS * ((int)inout[n * nx + m] -(float) min) / ((float)max - (float)min);
inout[n * nx + m] = pixel;
}
}
/*
* gaussianFilter:
* http://www.songho.ca/dsp/cannyedge/cannyedge.html
* determine size of kernel (odd #)
* 0.0 <= sigma < 0.5 : 3
* 0.5 <= sigma < 1.0 : 5
* 1.0 <= sigma < 1.5 : 7
* 1.5 <= sigma < 2.0 : 9
* 2.0 <= sigma < 2.5 : 11
* 2.5 <= sigma < 3.0 : 13 ...
* kernelSize = 2 * int(2*sigma) + 3;
*/
void gaussian_filter(const pixel_t *in, pixel_t *out,
const int nx, const int ny, const float sigma)
{
const int n = 2 * (int)(2 * sigma) + 3;
const float mean = (float)floor(n / 2.0);
float kernel[n * n]; // variable length array
fprintf(stderr, "gaussian_filter: kernel size %d, sigma=%g\n",
n, sigma);
size_t c = 0;
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++) {
kernel[c] = exp(-0.5 * (pow((i - mean) / sigma, 2.0) +
pow((j - mean) / sigma, 2.0)))
/ (2 * M_PI * sigma * sigma);
c++;
}
convolution(in, out, kernel, nx, ny, n);
pixel_t max, min;
min_max(out, nx, ny, &min, &max);
normalize(out, nx, ny, n, min, max);
}
// Canny non-maximum suppression
void non_maximum_supression(const pixel_t *after_Gx, const pixel_t * after_Gy, const pixel_t *G, pixel_t *nms,
const int nx, const int ny)
{
for (int i = 1; i < nx - 1; i++)
for (int j = 1; j < ny - 1; j++) {
const int c = i + nx * j;
const int nn = c - nx;
const int ss = c + nx;
const int ww = c + 1;
const int ee = c - 1;
const int nw = nn + 1;
const int ne = nn - 1;
const int sw = ss + 1;
const int se = ss - 1;
const float dir = (float)(fmod(atan2(after_Gy[c],
after_Gx[c]) + M_PI,
M_PI) / M_PI) * 8;
if (((dir <= 1 || dir > 7) && G[c] > G[ee] &&
G[c] > G[ww]) || // 0 deg
((dir > 1 && dir <= 3) && G[c] > G[nw] &&
G[c] > G[se]) || // 45 deg
((dir > 3 && dir <= 5) && G[c] > G[nn] &&
G[c] > G[ss]) || // 90 deg
((dir > 5 && dir <= 7) && G[c] > G[ne] &&
G[c] > G[sw])) // 135 deg
nms[c] = G[c];
else
nms[c] = 0;
}
}
__global__ void CUDA_non_maximum_supression_kernel(pixel_t *A, pixel_t *B, pixel_t *C, pixel_t *out, int nx, int ny)
{
int m = threadIdx.x + blockIdx.x * blockDim.x;
int n = threadIdx.y + blockIdx.y * blockDim.y;
const int c = m + nx * n; // id
if( (m < 1) || (n < 1) || (m >= nx - 1) || (n >= ny - 1) ){
return;
}
const int nn = c - nx;
const int ss = c + nx;
const int ww = c + 1;
const int ee = c - 1;
const int nw = nn + 1;
const int ne = nn - 1;
const int sw = ss + 1;
const int se = ss - 1;
const float dir = (float)(fmod(atan2((double)B[c],(double)A[c]) + M_PI, M_PI) / M_PI) * 8;
if (((dir <= 1 || dir > 7) && C[c] > C[ee] &&
C[c] > C[ww]) || // 0 deg
((dir > 1 && dir <= 3) && C[c] > C[nw] &&
C[c] > C[se]) || // 45 deg
((dir > 3 && dir <= 5) && C[c] > C[nn] &&
C[c] > C[ss]) || // 90 deg
((dir > 5 && dir <= 7) && C[c] > C[ne] &&
C[c] > C[sw])) // 135 deg
out[c] = C[c];
else
out[c] = 0;
}
void CUDA_non_maximum_supression(const pixel_t *after_Gx, const pixel_t * after_Gy, const pixel_t *G, pixel_t *nms, const int nx, const int ny)
{
int memsize = nx * ny * sizeof(pixel_t);
pixel_t* A;
pixel_t* B;
pixel_t* C;
pixel_t* out;
cudaMalloc((void**)&A, memsize);
cudaMalloc((void**)&B, memsize);
cudaMalloc((void**)&C, memsize);
cudaMalloc((void**)&out, memsize);
cudaMemcpy(A, after_Gx, memsize, cudaMemcpyHostToDevice);
cudaMemcpy(B, after_Gy, memsize, cudaMemcpyHostToDevice);
cudaMemset( out, 0, memsize);
cudaMemcpy(C, G, memsize, cudaMemcpyHostToDevice);
dim3 dimGrid(ceil(float(nx)/16),ceil(float(ny)/16));
dim3 dimBlock(16,16);
CUDA_non_maximum_supression_kernel <<<dimGrid, dimBlock>>> (A, B, C, out, nx, ny);
cudaMemcpy(nms, out, memsize, cudaMemcpyDeviceToHost);
cudaFree(A);
cudaFree(B);
cudaFree(C);
cudaFree(out);
}
// edges found in first pass for nms > tmax
void first_edges(const pixel_t *nms, pixel_t *reference,
const int nx, const int ny, const int tmax)
{
size_t c = 1;
for (int j = 1; j < ny - 1; j++) {
for (int i = 1; i < nx - 1; i++) {
if (nms[c] >= tmax) { // trace edges
reference[c] = MAX_BRIGHTNESS;
}
c++;
}
c+=2; // because borders are not considered
}
}
// texture<Type, Dim, ReadMode> texRef;
texture<pixel_t,1,cudaReadModeElementType> nms_texture;
__global__ void CUDA_first_edges_kernel(pixel_t *nms, pixel_t *reference, int nx, int ny, int tmax){
int xx = threadIdx.x + blockIdx.x * blockDim.x;
int yy = threadIdx.y + blockIdx.y * blockDim.y;
int id = xx + yy * nx;
if ((xx>0 && xx<nx-1) && (yy>0 && yy<ny-1))
{
// fetch linear texture
pixel_t nms_texture_fetched = tex1Dfetch(nms_texture,id+1);
if (nms_texture_fetched >= tmax) { // trace edges
reference[id+1] = MAX_BRIGHTNESS;
}
}
}
void CUDA_first_edges(pixel_t *nms, pixel_t *reference, int nx, int ny, int tmax)
{
int memsize = nx * ny * sizeof(pixel_t);
pixel_t* A;
pixel_t* out;
cudaMalloc((void**)&A, memsize);
cudaMemcpy(A, nms, memsize, cudaMemcpyHostToDevice);
cudaBindTexture(0, nms_texture, A, memsize);
cudaMalloc((void**)&out, memsize);
cudaMemset(out, 0, memsize);
dim3 dimGrid(ceil(float(nx)/16),ceil(float(ny)/16));
dim3 dimBlock(16,16);
CUDA_first_edges_kernel <<<dimGrid, dimBlock>>> (A, out, nx, ny, tmax);
cudaUnbindTexture(nms_texture);
cudaMemcpy(reference, out, memsize, cudaMemcpyDeviceToHost);
cudaFree(A);
cudaFree(out);
}
__global__ void CUDA_hysteresis_edges_kernel(const pixel_t *nms, pixel_t *reference, const int nx, const int ny, const int tmin, bool *pchanged)
{
int xx = threadIdx.x + blockIdx.x * blockDim.x;
int yy = threadIdx.y + blockIdx.y * blockDim.y;
int id = xx + yy * nx;
if ((xx>0 && xx<nx-1) && (yy>0 && yy<ny-1))
{
int nbs[8]; // neighbours
nbs[0] = id - nx; // nn
nbs[1] = id + nx; // ss
nbs[2] = id + 1; // ww
nbs[3] = id - 1; // ee
nbs[4] = nbs[0] + 1; // nw
nbs[5] = nbs[0] - 1; // ne
nbs[6] = nbs[1] + 1; // sw
nbs[7] = nbs[1] - 1; // se
if (nms[id] >= tmin && reference[id] == 0) {
for(int k = 0; k < 8; k++){
if (reference[nbs[k]] != 0) {
reference[id] = MAX_BRIGHTNESS;
*pchanged = true;
}
}
}
}
}
void CUDA_hysteresis_edges(const pixel_t *nms, pixel_t *reference, const int nx, const int ny, const int tmin, bool *pchanged)
{
int memsize = nx * ny * sizeof(pixel_t);
pixel_t* A;
pixel_t* out;
bool *pc;
cudaMalloc((void**)&A, memsize);
cudaMalloc((void**)&out, memsize);
cudaMalloc((void**)&pc, sizeof(bool));
cudaMemcpy(A, nms, memsize, cudaMemcpyHostToDevice);
cudaMemcpy(out, reference, memsize, cudaMemcpyHostToDevice);
cudaMemcpy(pc, pchanged, sizeof(bool), cudaMemcpyHostToDevice);
dim3 dimGrid(ceil(float(nx)/16),ceil(float(ny)/16));
dim3 dimBlock(16,16);
CUDA_hysteresis_edges_kernel <<<dimGrid, dimBlock>>> (A, out, nx, ny, tmin, pc);
cudaMemcpy(reference, out, memsize, cudaMemcpyDeviceToHost);
cudaMemcpy(pchanged, pc, sizeof(bool), cudaMemcpyDeviceToHost);
cudaFree(A);
cudaFree(out);
cudaFree(pc);
}
// edges found in after first passes for nms > tmin && neighbor is edge
void hysteresis_edges(const pixel_t *nms, pixel_t *reference,
const int nx, const int ny, const int tmin, bool *pchanged)
{
// Tracing edges with hysteresis . Non-recursive implementation.
for (int i = 1; i < nx - 1; i++) {
for (int j = 1; j < ny - 1; j++) {
size_t t = i + j * nx;
int nbs[8]; // neighbours
nbs[0] = t - nx; // nn
nbs[1] = t + nx; // ss
nbs[2] = t + 1; // ww
nbs[3] = t - 1; // ee
nbs[4] = nbs[0] + 1; // nw
nbs[5] = nbs[0] - 1; // ne
nbs[6] = nbs[1] + 1; // sw
nbs[7] = nbs[1] - 1; // se
if (nms[t] >= tmin && reference[t] == 0) {
for(int k = 0; k < 8; k++)
if (reference[nbs[k]] != 0) {
reference[t] = MAX_BRIGHTNESS;
*pchanged = true;
}
}
}
}
}
/*
* Links:
* http://en.wikipedia.org/wiki/Canny_edge_detector
* http://www.tomgibara.com/computer-vision/CannyEdgeDetector.java
* http://fourier.eng.hmc.edu/e161/lectures/canny/node1.html
* http://www.songho.ca/dsp/cannyedge/cannyedge.html
*
* Note: T1 and T2 are lower and upper thresholds.
*/
//canny edge detector code to run on the host
void cannyHost( const int *h_idata, const int w, const int h,
const int tmin, // tmin canny parameter
const int tmax, // tmax canny parameter
const float sigma, // sigma canny parameter
int * reference)
{
const int nx = w;
const int ny = h;
pixel_t *G = (pixel_t *) calloc(nx * ny, sizeof(pixel_t));
pixel_t *after_Gx = (pixel_t *) calloc(nx * ny, sizeof(pixel_t));
pixel_t *after_Gy = (pixel_t *) calloc(nx * ny, sizeof(pixel_t));
pixel_t *nms = (pixel_t *) calloc(nx * ny, sizeof(pixel_t));
if (G == NULL || after_Gx == NULL || after_Gy == NULL ||
nms == NULL || reference == NULL) {
fprintf(stderr, "canny_edge_detection:"
" Failed memory allocation(s).\n");
exit(1);
}
// Gaussian filter
gaussian_filter(h_idata, reference, nx, ny, sigma);
const float Gx[] = {-1, 0, 1,
-2, 0, 2,
-1, 0, 1};
// Gradient along x
convolution(reference, after_Gx, Gx, nx, ny, 3);
const float Gy[] = { 1, 2, 1,
0, 0, 0,
-1,-2,-1};
// Gradient along y
convolution(reference, after_Gy, Gy, nx, ny, 3);
// Merging gradients
for (int i = 1; i < nx - 1; i++)
for (int j = 1; j < ny - 1; j++) {
const int c = i + nx * j;
G[c] = (pixel_t)(hypot((double)(after_Gx[c]), (double)( after_Gy[c]) ));
}
// Non-maximum suppression, straightforward implementation.
non_maximum_supression(after_Gx, after_Gy, G, nms, nx, ny);
// edges with nms >= tmax
memset(reference, 0, sizeof(pixel_t) * nx * ny);
first_edges(nms, reference, nx, ny, tmax);
// edges with nms >= tmin && neighbor is edge
bool changed;
do {
changed = false;
hysteresis_edges(nms, reference, nx, ny, tmin, &changed);
} while (changed==true);
free(after_Gx);
free(after_Gy);
free(G);
free(nms);
}
// canny edge detector code to run on the GPU
void cannyDevice( const int *h_idata, const int w, const int h,
const int tmin, const int tmax,
const float sigma,
int * h_odata)
{
const int nx = w;
const int ny = h;
pixel_t *G = (pixel_t *) calloc(nx * ny, sizeof(pixel_t));
pixel_t *after_Gx = (pixel_t *) calloc(nx * ny, sizeof(pixel_t));
pixel_t *after_Gy = (pixel_t *) calloc(nx * ny, sizeof(pixel_t));
pixel_t *nms = (pixel_t *) calloc(nx * ny, sizeof(pixel_t));
if (G == NULL || after_Gx == NULL || after_Gy == NULL ||
nms == NULL || h_odata == NULL) {
fprintf(stderr, "canny_edge_detection:"
" Failed memory allocation(s).\n");
exit(1);
}
// Gaussian filter
gaussian_filter(h_idata, h_odata, nx, ny, sigma);
//CUDA_gaussian_filter(h_idata, h_odata, nx, ny, sigma);
const float Gx[] = {-1, 0, 1,
-2, 0, 2,
-1, 0, 1};
// Gradient along x
//convolution(h_odata, after_Gx, Gx, nx, ny, 3);
CUDA_convolution(h_odata, after_Gx,(float*) Gx, nx, ny, 3); // convolution function in CUDA
const float Gy[] = { 1, 2, 1,
0, 0, 0,
-1,-2,-1};
// Gradient along y
//convolution(h_odata, after_Gy, Gy, nx, ny, 3);
CUDA_convolution(h_odata, after_Gy, (float *) Gy, nx, ny, 3); // convolution function in CUDA
// Merging gradients
for (int i = 1; i < nx - 1; i++)
for (int j = 1; j < ny - 1; j++) {
const int c = i + nx * j;
G[c] = (pixel_t)(hypot((double)(after_Gx[c]), (double)( after_Gy[c]) ));
}
// Non-maximum suppression, straightforward implementation.
//non_maximum_supression(after_Gx, after_Gy, G, nms, nx, ny);
CUDA_non_maximum_supression(after_Gx, after_Gy, G, nms, nx, ny);
// edges with nms >= tmax
memset(h_odata, 0, sizeof(pixel_t) * nx * ny);
//first_edges(nms, h_odata, nx, ny, tmax);
CUDA_first_edges(nms, h_odata, nx, ny, tmax);
// edges with nms >= tmin && neighbor is edge
bool changed;
do {
changed = false;
CUDA_hysteresis_edges(nms, h_odata, nx, ny, tmin, &changed);
//CUDA_hysteresis_edges(nms, h_odata, nx, ny, tmin, &changed);
} while (changed==true);
free(after_Gx);
free(after_Gy);
free(G);
free(nms);
}
// print command line format
void usage(char *command)
{
printf("Usage: %s [-h] [-d device] [-i inputfile] [-o outputfile] [-r referenceFile] [-w windowsize] [-t threshold]\n",command);
}
// main
int main( int argc, char** argv)
{
// default command line options
int deviceId = 0;
char *fileIn=(char *)"lena.pgm",*fileOut=(char *)"lenaOut.pgm",*referenceOut=(char *)"reference.pgm";
int tmin = 45, tmax = 50;
float sigma=1.0f;
// parse command line arguments
int opt;
while( (opt = getopt(argc,argv,"d:i:o:r:n:x:s:h")) !=-1)
{
switch(opt)
{
case 'd': // device
if(sscanf(optarg,"%d",&deviceId)!=1)
{
usage(argv[0]);
exit(1);
}
break;
case 'i': // input image filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
fileIn = strdup(optarg);
break;
case 'o': // output image (from device) filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
fileOut = strdup(optarg);
break;
case 'r': // output image (from host) filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
referenceOut = strdup(optarg);
break;
case 'n': // tmin
if(strlen(optarg)==0 || sscanf(optarg,"%d",&tmin)!=1)
{
usage(argv[0]);
exit(1);
}
break;
case 'x': // tmax
if(strlen(optarg)==0 || sscanf(optarg,"%d",&tmax)!=1)
{
usage(argv[0]);
exit(1);
}
break;
case 's': // sigma
if(strlen(optarg)==0 || sscanf(optarg,"%f",&sigma)!=1)
{
usage(argv[0]);
exit(1);
}
break;
case 'h': // help
usage(argv[0]);
exit(0);
break;
}
}
// select cuda device
cutilSafeCall( cudaSetDevice( deviceId ) );
// create events to measure host canny detector time and device canny detector time
cudaEvent_t startH, stopH, startD, stopD;
cudaEventCreate(&startH);
cudaEventCreate(&stopH);
cudaEventCreate(&startD);
cudaEventCreate(&stopD);
// allocate host memory
int* h_idata=NULL;
unsigned int h,w;
//load pgm
if (cutLoadPGMi(fileIn, (unsigned int **)&h_idata, &w, &h) != CUTTrue) {
printf("Failed to load image file: %s\n", fileIn);
exit(1);
}
// allocate mem for the result on host side
//int* h_odata = (int*) malloc( h*w*sizeof(unsigned int));
//int* reference = (int*) malloc( h*w*sizeof(unsigned int));
int* h_odata = (int*) calloc( h*w, sizeof(unsigned int)); // fazer cudaMalloc??
int* reference = (int*) calloc( h*w, sizeof(unsigned int));
// detect edges at host
cudaEventRecord( startH, 0 );
cannyHost(h_idata, w, h, tmin, tmax, sigma, reference);
cudaEventRecord( stopH, 0 );
cudaEventSynchronize( stopH );
// detect edges at GPU
cudaEventRecord( startD, 0 );
cannyDevice(h_idata, w, h, tmin, tmax, sigma, h_odata);
cudaEventRecord( stopD, 0 );
cudaEventSynchronize( stopD );
// check if kernel execution generated and error
cutilCheckMsg("Kernel execution failed");
float timeH, timeD;
cudaEventElapsedTime( &timeH, startH, stopH );
printf( "Host processing time: %f (ms)\n", timeH);
cudaEventElapsedTime( &timeD, startD, stopD );
printf( "Device processing time: %f (ms)\n", timeD);
// save output images
if (cutSavePGMi(referenceOut, (unsigned int *)reference, w, h) != CUTTrue) {
printf("Failed to save image file: %s\n", referenceOut);
exit(1);
}
if (cutSavePGMi(fileOut,(unsigned int *) h_odata, w, h) != CUTTrue) {
printf("Failed to save image file: %s\n", fileOut);
exit(1);
}
// cleanup memory
cutFree( h_idata);
free( h_odata);
free( reference);
cutilDeviceReset();
}
|
3e82460ef2f8f4153a42636bf23872930e422376.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*****************************************************************************/
// File : colors.cu
// Author : Zhihua Ban
// Cotact : [email protected]
// Last Revised : 2017-1-19
/*****************************************************************************/
// Copyright 2017 Zhihua Ban. All rights reserved.
/*****************************************************************************/
// Desc : color manipulation
/*****************************************************************************/
#include "colors.cuh"
#include "cudart_util.h"
// not part of superpixel segmenation
namespace su{ namespace gpu{
__global__ void kernel_bgr2lab(PixI *lab, int lab_steps, PixI* bgr, int bgr_steps, int W, int H){
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
if (x >= W || y >= H){
return;
}
float R = normalizer__*(bgr[y*bgr_steps + x]).f2(); // R
float G = normalizer__*(bgr[y*bgr_steps + x]).f1(); // G
float B = normalizer__*(bgr[y*bgr_steps + x]).f0(); // B
if (R <= 0.04045f) R = R / 12.92f;
else R = ::pow((R + 0.055f) / 1.055f, 2.4f);
if (G <= 0.04045f) G = G / 12.92f;
else G = ::pow((G + 0.055f) / 1.055f, 2.4f);
if (B <= 0.04045f) B = B / 12.92f;
else B = ::pow((B + 0.055f) / 1.055f, 2.4f);
float X = R*0.412453f + G*0.357580f + B*0.180423f;
float Y = R*0.212671f + G*0.715160f + B*0.072169f;
float Z = R*0.019334f + G*0.119193f + B*0.950227f;
Y = Y * iYr__;
Z = Z * iZr__;
X = X * iXr__;
if (X > epsilon__) X = ::pow(X, 1.f / 3.f);
else X = (kappa__*X + 16.f) / 116.f;
if (Y > epsilon__) Y = ::pow(Y, 1.f / 3.f);
else Y = (kappa__*Y + 16.f) / 116.f;
if (Z > epsilon__) Z = ::pow(Z, 1.f / 3.f);
else Z = (kappa__*Z + 16.f) / 116.f;
#define CL (uint32_t)((116.f*Y - 16.f))
#define CA (uint32_t)(500.f*(X - Y) + 128.f + 0.5f)
#define CB (uint32_t)(200.f*(Y - Z) + 128.f + 0.5f)
PixI pix(0);
pix.set(CL, CA, CB);
lab[y*lab_steps + x] = pix;
#undef CL
#undef CA
#undef CB
}
void bgr2lab(PixI *lab, int lab_steps, PixI* bgr, int bgr_steps, int W, int H){
int _BX = 32;
int _BY = 4;
dim3 blocks(_BX, _BY);
dim3 grids;
grids.x = (W + blocks.x - 1) / blocks.x;
grids.y = (H + blocks.y - 1) / blocks.y;
hipLaunchKernelGGL(( kernel_bgr2lab) , dim3(grids), dim3(blocks), 0, 0, lab, lab_steps, bgr, bgr_steps, W, H);
CUDART_LAST_CHECK;
}
}} // end namespace
| 3e82460ef2f8f4153a42636bf23872930e422376.cu | /*****************************************************************************/
// File : colors.cu
// Author : Zhihua Ban
// Cotact : [email protected]
// Last Revised : 2017-1-19
/*****************************************************************************/
// Copyright 2017 Zhihua Ban. All rights reserved.
/*****************************************************************************/
// Desc : color manipulation
/*****************************************************************************/
#include "colors.cuh"
#include "cudart_util.h"
// not part of superpixel segmenation
namespace su{ namespace gpu{
__global__ void kernel_bgr2lab(PixI *lab, int lab_steps, PixI* bgr, int bgr_steps, int W, int H){
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
if (x >= W || y >= H){
return;
}
float R = normalizer__*(bgr[y*bgr_steps + x]).f2(); // R
float G = normalizer__*(bgr[y*bgr_steps + x]).f1(); // G
float B = normalizer__*(bgr[y*bgr_steps + x]).f0(); // B
if (R <= 0.04045f) R = R / 12.92f;
else R = std::pow((R + 0.055f) / 1.055f, 2.4f);
if (G <= 0.04045f) G = G / 12.92f;
else G = std::pow((G + 0.055f) / 1.055f, 2.4f);
if (B <= 0.04045f) B = B / 12.92f;
else B = std::pow((B + 0.055f) / 1.055f, 2.4f);
float X = R*0.412453f + G*0.357580f + B*0.180423f;
float Y = R*0.212671f + G*0.715160f + B*0.072169f;
float Z = R*0.019334f + G*0.119193f + B*0.950227f;
Y = Y * iYr__;
Z = Z * iZr__;
X = X * iXr__;
if (X > epsilon__) X = std::pow(X, 1.f / 3.f);
else X = (kappa__*X + 16.f) / 116.f;
if (Y > epsilon__) Y = std::pow(Y, 1.f / 3.f);
else Y = (kappa__*Y + 16.f) / 116.f;
if (Z > epsilon__) Z = std::pow(Z, 1.f / 3.f);
else Z = (kappa__*Z + 16.f) / 116.f;
#define CL (uint32_t)((116.f*Y - 16.f))
#define CA (uint32_t)(500.f*(X - Y) + 128.f + 0.5f)
#define CB (uint32_t)(200.f*(Y - Z) + 128.f + 0.5f)
PixI pix(0);
pix.set(CL, CA, CB);
lab[y*lab_steps + x] = pix;
#undef CL
#undef CA
#undef CB
}
void bgr2lab(PixI *lab, int lab_steps, PixI* bgr, int bgr_steps, int W, int H){
int _BX = 32;
int _BY = 4;
dim3 blocks(_BX, _BY);
dim3 grids;
grids.x = (W + blocks.x - 1) / blocks.x;
grids.y = (H + blocks.y - 1) / blocks.y;
kernel_bgr2lab <<<grids, blocks>>>(lab, lab_steps, bgr, bgr_steps, W, H);
CUDART_LAST_CHECK;
}
}} // end namespace
|
58d9c9064bf3ebcc03ccf6401b687a5077d806ab.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "bp_gpu.cuh"
__global__ void _update_fc2_b()
{
int i=threadIdx.x+blockDim.x*blockIdx.x;
if(i<FC2_SIZE)
{
_fc2_delta[i]=_alpha*_C[i]*(_fc2_a[i]*(1.0-_fc2_a[i]));
_fc2_db[i]+=_fc2_delta[i];
}
}
void update_fc2_b_gpu()
{
dim3 block(32);
dim3 grid((FC2_SIZE-1)/block.x+1);
hipLaunchKernelGGL(( _update_fc2_b), dim3(block),dim3(grid), 0, 0, );
hipDeviceSynchronize();
}
__global__ void _update_fc2_w()
{
int i=threadIdx.x+blockDim.x*blockIdx.x;
int j=threadIdx.y+blockDim.y*blockIdx.y;
if(i<FC2_SIZE&&j<FC1_SIZE)
_fc2_dw[i][j]+=_fc2_delta[i]*_fc1_a[j];
}
void update_fc2_w_gpu()
{
dim3 block(32,32);
dim3 grid((FC2_SIZE-1)/block.x+1,(FC1_SIZE-1)/block.x+1);
hipLaunchKernelGGL(( _update_fc2_w), dim3(block),dim3(grid), 0, 0, );
hipDeviceSynchronize();
}
__global__ void _update_fc1_b()
{
int i=threadIdx.x+blockDim.x*blockIdx.x;
if(i<FC1_SIZE)
{
float error=0;
for(int j=0;j<FC2_SIZE;j++)
error+=_fc2_delta[j]*_fc2_w[j][i];
_fc1_delta[i]=error*(_fc1_a[i]*(1.0-_fc1_a[i]));
_fc1_db[i]+=_fc1_delta[i];
}
}
void update_fc1_b_gpu()
{
dim3 block(32);
dim3 grid((FC1_SIZE-1)/block.x+1);
hipLaunchKernelGGL(( _update_fc1_b), dim3(block),dim3(grid), 0, 0, );
hipDeviceSynchronize();
}
__global__ void _update_fc1_w(int j)
{
int i=threadIdx.x+blockDim.x*blockIdx.x;
int k=threadIdx.y+blockDim.y*blockIdx.y;
int l=threadIdx.z+blockDim.z*blockIdx.z;
if(i<FC1_SIZE&&k<POOL_SIZE&&l<POOL_SIZE)
_fc1_dw[i][j][k][l]+=_fc1_delta[i]*_pool[j][k][l];
}
void update_fc1_w_gpu()
{
dim3 block(8,8,8);
dim3 grid((FC1_SIZE-1)/block.x+1,(POOL_SIZE-1)/block.y+1,(POOL_SIZE-1)/block.z+1);
// #pragma omp parallel for
for(int j=0;j<CONV_W_NUM;j++)
hipLaunchKernelGGL(( _update_fc1_w), dim3(block),dim3(grid), 0, 0, j);
hipDeviceSynchronize();
}
__global__ void _update_conv_b()
{
int i=threadIdx.x+blockDim.x*blockIdx.x;
if(i<CONV_W_NUM)
{
_conv_sigma_delta[i]=0;
for(int j=0;j<POOL_SIZE;j++)
for(int k=0;k<POOL_SIZE;k++)
{
float error=0;
_conv_delta[i][j][k]=0;
for(int l=0;l<FC1_SIZE;l++)
error+=_fc1_delta[l]*_fc1_w[l][i][j][k];
_conv_delta[i][j][k]=error*(_pool[i][j][k]*(1.0-_pool[i][j][k]));
_conv_sigma_delta[i]+=error*(_pool[i][j][k]*(1.0-_pool[i][j][k]));
}
_conv_db[i]+=_conv_sigma_delta[i];
}
}
void update_conv_b_gpu()
{
dim3 block(32);
dim3 grid((CONV_W_NUM-1)/block.x+1);
hipLaunchKernelGGL(( _update_conv_b), dim3(block),dim3(grid), 0, 0, );
hipDeviceSynchronize();
}
__global__ void _update_conv_w()
{
int i=threadIdx.x+blockDim.x*blockIdx.x;
int j=threadIdx.y+blockDim.y*blockIdx.y;
int k=threadIdx.z+blockDim.z*blockIdx.z;
if(i<CONV_W_NUM&&j<CONV_W_SIZE&&k<CONV_W_SIZE)
{
float error=0;
for(int m=0;m<POOL_SIZE;m++)
for(int n=0;n<POOL_SIZE;n++)
{
int x=_pool_pos[i][m][n]/2;
int y=_pool_pos[i][m][n]%2;
error+=_conv_delta[i][m][n]*_input[2*m+j+x][2*n+k+y];
}
_conv_dw[i][j][k]+=error;
}
}
void update_conv_w_gpu()
{
dim3 block(8,8,8);
dim3 grid((CONV_W_NUM-1)/block.x+1,(CONV_W_SIZE-1)/block.y+1,(CONV_W_SIZE-1)/block.z+1);
hipLaunchKernelGGL(( _update_conv_w), dim3(block),dim3(grid), 0, 0, );
hipDeviceSynchronize();
}
__global__ void assign_fc2_b()
{
int i=threadIdx.x+blockDim.x*blockIdx.x;
if(i<FC2_SIZE)
{
_fc2_b[i]-=(_fc2_db[i]/_minibatch);
_fc2_db[i]=0;
}
}
__global__ void assign_fc2_w()
{
int i=threadIdx.x+blockDim.x*blockIdx.x;
int j=threadIdx.y+blockDim.y*blockIdx.y;
if(i<FC2_SIZE&&j<FC1_SIZE)
{
_fc2_w[i][j]-=(_fc2_dw[i][j]/_minibatch);
_fc2_dw[i][j]=0;
}
}
__global__ void assign_fc1_b()
{
int i=threadIdx.x+blockDim.x*blockIdx.x;
if(i<FC1_SIZE)
{
_fc1_b[i]-=(_fc1_db[i]/_minibatch);
_fc1_db[i]=0;
}
}
__global__ void assign_fc1_w(int j)
{
int i=threadIdx.x+blockDim.x*blockIdx.x;
int k=threadIdx.y+blockDim.y*blockIdx.y;
int l=threadIdx.z+blockDim.z*blockIdx.z;
if(i<FC1_SIZE&&k<POOL_SIZE&&l<POOL_SIZE)
{
_fc1_w[i][j][k][l]-=(_fc1_dw[i][j][k][l]/_minibatch);
_fc1_dw[i][j][k][l]=0;
}
}
__global__ void assign_conv_b()
{
int i=threadIdx.x+blockDim.x*blockIdx.x;
if(i<CONV_W_NUM)
{
_conv_b[i]-=(_conv_db[i]/_minibatch);
_conv_db[i]=0;
}
}
__global__ void assign_conv_w()
{
int i=threadIdx.x+blockDim.x*blockIdx.x;
int l=threadIdx.y+blockDim.y*blockIdx.y;
int m=threadIdx.z+blockDim.z*blockIdx.z;
if(i<CONV_W_NUM&&l<CONV_W_SIZE&&m<CONV_W_SIZE)
{
_conv_w[i][l][m]-=(_conv_dw[i][l][m]/_minibatch);
_conv_dw[i][l][m]=0;
}
}
void assign_grads_gpu()
{
dim3 block1(32);
dim3 grid1((FC2_SIZE-1)/block1.x+1);
hipLaunchKernelGGL(( assign_fc2_b), dim3(block1),dim3(grid1), 0, 0, );
dim3 block2(32,32);
dim3 grid2((FC2_SIZE-1)/block2.x+1,(FC1_SIZE-1)/block2.y+1);
hipLaunchKernelGGL(( assign_fc2_w), dim3(block2),dim3(grid2), 0, 0, );
dim3 block3(32);
dim3 grid3((FC1_SIZE-1)/block3.x+1);
hipLaunchKernelGGL(( assign_fc1_b), dim3(block3),dim3(grid3), 0, 0, );
dim3 block4(8,8,8);
dim3 grid4((FC1_SIZE-1)/block4.x+1,(POOL_SIZE-1)/block4.y+1,(POOL_SIZE-1)/block4.z+1);
for(int j=0;j<CONV_W_NUM;j++)
hipLaunchKernelGGL(( assign_fc1_w), dim3(block4),dim3(grid4), 0, 0, j);
dim3 block5(32);
dim3 grid5((CONV_W_NUM-1)/block5.x+1);
hipLaunchKernelGGL(( assign_conv_b), dim3(block5),dim3(grid5), 0, 0, );
dim3 block6(8,8,8);
dim3 grid6((CONV_W_NUM-1)/block6.x+1,(CONV_W_SIZE-1)/block6.y+1,(CONV_W_SIZE-1)/block6.z+1);
hipLaunchKernelGGL(( assign_conv_w), dim3(block6),dim3(grid6), 0, 0, );
hipDeviceSynchronize();
} | 58d9c9064bf3ebcc03ccf6401b687a5077d806ab.cu | #include "bp_gpu.cuh"
__global__ void _update_fc2_b()
{
int i=threadIdx.x+blockDim.x*blockIdx.x;
if(i<FC2_SIZE)
{
_fc2_delta[i]=_alpha*_C[i]*(_fc2_a[i]*(1.0-_fc2_a[i]));
_fc2_db[i]+=_fc2_delta[i];
}
}
void update_fc2_b_gpu()
{
dim3 block(32);
dim3 grid((FC2_SIZE-1)/block.x+1);
_update_fc2_b<<<block,grid>>>();
cudaDeviceSynchronize();
}
__global__ void _update_fc2_w()
{
int i=threadIdx.x+blockDim.x*blockIdx.x;
int j=threadIdx.y+blockDim.y*blockIdx.y;
if(i<FC2_SIZE&&j<FC1_SIZE)
_fc2_dw[i][j]+=_fc2_delta[i]*_fc1_a[j];
}
void update_fc2_w_gpu()
{
dim3 block(32,32);
dim3 grid((FC2_SIZE-1)/block.x+1,(FC1_SIZE-1)/block.x+1);
_update_fc2_w<<<block,grid>>>();
cudaDeviceSynchronize();
}
__global__ void _update_fc1_b()
{
int i=threadIdx.x+blockDim.x*blockIdx.x;
if(i<FC1_SIZE)
{
float error=0;
for(int j=0;j<FC2_SIZE;j++)
error+=_fc2_delta[j]*_fc2_w[j][i];
_fc1_delta[i]=error*(_fc1_a[i]*(1.0-_fc1_a[i]));
_fc1_db[i]+=_fc1_delta[i];
}
}
void update_fc1_b_gpu()
{
dim3 block(32);
dim3 grid((FC1_SIZE-1)/block.x+1);
_update_fc1_b<<<block,grid>>>();
cudaDeviceSynchronize();
}
__global__ void _update_fc1_w(int j)
{
int i=threadIdx.x+blockDim.x*blockIdx.x;
int k=threadIdx.y+blockDim.y*blockIdx.y;
int l=threadIdx.z+blockDim.z*blockIdx.z;
if(i<FC1_SIZE&&k<POOL_SIZE&&l<POOL_SIZE)
_fc1_dw[i][j][k][l]+=_fc1_delta[i]*_pool[j][k][l];
}
void update_fc1_w_gpu()
{
dim3 block(8,8,8);
dim3 grid((FC1_SIZE-1)/block.x+1,(POOL_SIZE-1)/block.y+1,(POOL_SIZE-1)/block.z+1);
// #pragma omp parallel for
for(int j=0;j<CONV_W_NUM;j++)
_update_fc1_w<<<block,grid>>>(j);
cudaDeviceSynchronize();
}
__global__ void _update_conv_b()
{
int i=threadIdx.x+blockDim.x*blockIdx.x;
if(i<CONV_W_NUM)
{
_conv_sigma_delta[i]=0;
for(int j=0;j<POOL_SIZE;j++)
for(int k=0;k<POOL_SIZE;k++)
{
float error=0;
_conv_delta[i][j][k]=0;
for(int l=0;l<FC1_SIZE;l++)
error+=_fc1_delta[l]*_fc1_w[l][i][j][k];
_conv_delta[i][j][k]=error*(_pool[i][j][k]*(1.0-_pool[i][j][k]));
_conv_sigma_delta[i]+=error*(_pool[i][j][k]*(1.0-_pool[i][j][k]));
}
_conv_db[i]+=_conv_sigma_delta[i];
}
}
void update_conv_b_gpu()
{
dim3 block(32);
dim3 grid((CONV_W_NUM-1)/block.x+1);
_update_conv_b<<<block,grid>>>();
cudaDeviceSynchronize();
}
__global__ void _update_conv_w()
{
int i=threadIdx.x+blockDim.x*blockIdx.x;
int j=threadIdx.y+blockDim.y*blockIdx.y;
int k=threadIdx.z+blockDim.z*blockIdx.z;
if(i<CONV_W_NUM&&j<CONV_W_SIZE&&k<CONV_W_SIZE)
{
float error=0;
for(int m=0;m<POOL_SIZE;m++)
for(int n=0;n<POOL_SIZE;n++)
{
int x=_pool_pos[i][m][n]/2;
int y=_pool_pos[i][m][n]%2;
error+=_conv_delta[i][m][n]*_input[2*m+j+x][2*n+k+y];
}
_conv_dw[i][j][k]+=error;
}
}
void update_conv_w_gpu()
{
dim3 block(8,8,8);
dim3 grid((CONV_W_NUM-1)/block.x+1,(CONV_W_SIZE-1)/block.y+1,(CONV_W_SIZE-1)/block.z+1);
_update_conv_w<<<block,grid>>>();
cudaDeviceSynchronize();
}
__global__ void assign_fc2_b()
{
int i=threadIdx.x+blockDim.x*blockIdx.x;
if(i<FC2_SIZE)
{
_fc2_b[i]-=(_fc2_db[i]/_minibatch);
_fc2_db[i]=0;
}
}
__global__ void assign_fc2_w()
{
int i=threadIdx.x+blockDim.x*blockIdx.x;
int j=threadIdx.y+blockDim.y*blockIdx.y;
if(i<FC2_SIZE&&j<FC1_SIZE)
{
_fc2_w[i][j]-=(_fc2_dw[i][j]/_minibatch);
_fc2_dw[i][j]=0;
}
}
__global__ void assign_fc1_b()
{
int i=threadIdx.x+blockDim.x*blockIdx.x;
if(i<FC1_SIZE)
{
_fc1_b[i]-=(_fc1_db[i]/_minibatch);
_fc1_db[i]=0;
}
}
__global__ void assign_fc1_w(int j)
{
int i=threadIdx.x+blockDim.x*blockIdx.x;
int k=threadIdx.y+blockDim.y*blockIdx.y;
int l=threadIdx.z+blockDim.z*blockIdx.z;
if(i<FC1_SIZE&&k<POOL_SIZE&&l<POOL_SIZE)
{
_fc1_w[i][j][k][l]-=(_fc1_dw[i][j][k][l]/_minibatch);
_fc1_dw[i][j][k][l]=0;
}
}
__global__ void assign_conv_b()
{
int i=threadIdx.x+blockDim.x*blockIdx.x;
if(i<CONV_W_NUM)
{
_conv_b[i]-=(_conv_db[i]/_minibatch);
_conv_db[i]=0;
}
}
__global__ void assign_conv_w()
{
int i=threadIdx.x+blockDim.x*blockIdx.x;
int l=threadIdx.y+blockDim.y*blockIdx.y;
int m=threadIdx.z+blockDim.z*blockIdx.z;
if(i<CONV_W_NUM&&l<CONV_W_SIZE&&m<CONV_W_SIZE)
{
_conv_w[i][l][m]-=(_conv_dw[i][l][m]/_minibatch);
_conv_dw[i][l][m]=0;
}
}
void assign_grads_gpu()
{
dim3 block1(32);
dim3 grid1((FC2_SIZE-1)/block1.x+1);
assign_fc2_b<<<block1,grid1>>>();
dim3 block2(32,32);
dim3 grid2((FC2_SIZE-1)/block2.x+1,(FC1_SIZE-1)/block2.y+1);
assign_fc2_w<<<block2,grid2>>>();
dim3 block3(32);
dim3 grid3((FC1_SIZE-1)/block3.x+1);
assign_fc1_b<<<block3,grid3>>>();
dim3 block4(8,8,8);
dim3 grid4((FC1_SIZE-1)/block4.x+1,(POOL_SIZE-1)/block4.y+1,(POOL_SIZE-1)/block4.z+1);
for(int j=0;j<CONV_W_NUM;j++)
assign_fc1_w<<<block4,grid4>>>(j);
dim3 block5(32);
dim3 grid5((CONV_W_NUM-1)/block5.x+1);
assign_conv_b<<<block5,grid5>>>();
dim3 block6(8,8,8);
dim3 grid6((CONV_W_NUM-1)/block6.x+1,(CONV_W_SIZE-1)/block6.y+1,(CONV_W_SIZE-1)/block6.z+1);
assign_conv_w<<<block6,grid6>>>();
cudaDeviceSynchronize();
} |
29ba646e521f61dedde8ead09efe0529e078fae6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/hip/mem_eff_attention/kernel_backward.h>
using namespace PyTorchMemEffAttention;
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 128, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 128, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_128x64_k128_dropout_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 128, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 128, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_128x64_k128_dropout_sm70` is for sm70-sm75, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 128, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 128, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_128x64_k128_dropout_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 128, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 128, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_128x64_k128_dropout_sm75` is for sm75-sm80, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, true, 128, 128, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, true, 128, 128, 128>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_128x128_k128_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, true, 128, 128, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, true, 128, 128, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_128x128_k128_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, true, false, 64, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, true, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k128_dropout_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, true, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 500
#if __CUDA_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, true, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k128_dropout_sm50` is for sm50-sm70, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 64, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k128_dropout_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k128_dropout_sm70` is for sm70-sm75, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 64, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k128_dropout_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k128_dropout_sm75` is for sm75-sm80, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, false, 64, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k128_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k128_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
| 29ba646e521f61dedde8ead09efe0529e078fae6.cu | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
using namespace PyTorchMemEffAttention;
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 128, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 128, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_128x64_k128_dropout_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 128, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 128, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_128x64_k128_dropout_sm70` is for sm70-sm75, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 128, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 128, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_128x64_k128_dropout_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 128, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 128, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_128x64_k128_dropout_sm75` is for sm75-sm80, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, true, 128, 128, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, true, 128, 128, 128>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_128x128_k128_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, true, 128, 128, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, true, 128, 128, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_128x128_k128_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, true, false, 64, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, true, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k128_dropout_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, true, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 500
#if __CUDA_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, true, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k128_dropout_sm50` is for sm50-sm70, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 64, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k128_dropout_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k128_dropout_sm70` is for sm70-sm75, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 64, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k128_dropout_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k128_dropout_sm75` is for sm75-sm80, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, false, 64, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k128_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k128_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
|
2f4b567d8b7d8e8c6651620ab46e0a2dabd0aa21.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "cudaYUV.h"
#include "cudaVector.h"
#define COLOR_COMPONENT_MASK 0x3FF
#define COLOR_COMPONENT_BIT_SIZE 10
#define FIXED_DECIMAL_POINT 24
#define FIXED_POINT_MULTIPLIER 1.0f
#define FIXED_COLOR_COMPONENT_MASK 0xffffffff
static inline __device__ float clamp( float x ) { return fminf(fmaxf(x, 0.0f), 255.0f); }
// YUV2RGB
template<typename T>
static inline __device__ T YUV2RGB(const uint3& yuvi)
{
const float luma = float(yuvi.x);
const float u = float(yuvi.y) - 512.0f;
const float v = float(yuvi.z) - 512.0f;
const float s = 1.0f / 1024.0f * 255.0f; // TODO clamp for uchar output?
// R = Y + 1.140V
// G = Y - 0.395U - 0.581V
// B = Y + 2.032U
return make_vec<T>(clamp((luma + 1.140f * v) * s),
clamp((luma - 0.395f * u - 0.581f * v) * s),
clamp((luma + 2.032f * u) * s), 255);
}
__device__ uint32_t RGBAPACK_8bit(float red, float green, float blue, uint32_t alpha)
{
uint32_t ARGBpixel = 0;
// Clamp final 10 bit results
red = min(max(red, 0.0f), 255.0f);
green = min(max(green, 0.0f), 255.0f);
blue = min(max(blue, 0.0f), 255.0f);
// Convert to 8 bit unsigned integers per color component
ARGBpixel = ((((uint32_t)red) << 24) |
(((uint32_t)green) << 16) |
(((uint32_t)blue) << 8) | (uint32_t)alpha);
return ARGBpixel;
}
__device__ uint32_t RGBAPACK_10bit(float red, float green, float blue, uint32_t alpha)
{
uint32_t ARGBpixel = 0;
// Clamp final 10 bit results
red = min(max(red, 0.0f), 1023.f);
green = min(max(green, 0.0f), 1023.f);
blue = min(max(blue, 0.0f), 1023.f);
// Convert to 8 bit unsigned integers per color component
ARGBpixel = ((((uint32_t)red >> 2) << 24) |
(((uint32_t)green >> 2) << 16) |
(((uint32_t)blue >> 2) << 8) | (uint32_t)alpha);
return ARGBpixel;
}
__global__ void Passthru(uint32_t *srcImage, size_t nSourcePitch,
uint32_t *dstImage, size_t nDestPitch,
uint32_t width, uint32_t height)
{
int x, y;
uint32_t yuv101010Pel[2];
uint32_t processingPitch = ((width) + 63) & ~63;
uint32_t dstImagePitch = nDestPitch >> 2;
uint8_t *srcImageU8 = (uint8_t *)srcImage;
processingPitch = nSourcePitch;
// Pad borders with duplicate pixels, and we multiply by 2 because we process 2 pixels per thread
x = blockIdx.x * (blockDim.x << 1) + (threadIdx.x << 1);
y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width)
return; //x = width - 1;
if (y >= height)
return; // y = height - 1;
// Read 2 Luma components at a time, so we don't waste processing since CbCr are decimated this way.
// if we move to texture we could read 4 luminance values
yuv101010Pel[0] = (srcImageU8[y * processingPitch + x ]);
yuv101010Pel[1] = (srcImageU8[y * processingPitch + x + 1]);
// this steps performs the color conversion
float luma[2];
luma[0] = (yuv101010Pel[0] & 0x00FF);
luma[1] = (yuv101010Pel[1] & 0x00FF);
// Clamp the results to RGBA
dstImage[y * dstImagePitch + x ] = RGBAPACK_8bit(luma[0], luma[0], luma[0], 255); // alpha=((uint32_t)0xff<< 24);
dstImage[y * dstImagePitch + x + 1 ] = RGBAPACK_8bit(luma[1], luma[1], luma[1], 255);
}
// NV12ToRGBA
template<typename T>
__global__ void NV12ToRGBA(uint32_t* srcImage, size_t nSourcePitch,
T* dstImage, size_t nDestPitch,
uint32_t width, uint32_t height)
{
int x, y;
uint32_t yuv101010Pel[2];
uint32_t processingPitch = ((width) + 63) & ~63;
uint8_t *srcImageU8 = (uint8_t *)srcImage;
processingPitch = nSourcePitch;
// Pad borders with duplicate pixels, and we multiply by 2 because we process 2 pixels per thread
x = blockIdx.x * (blockDim.x << 1) + (threadIdx.x << 1);
y = blockIdx.y * blockDim.y + threadIdx.y;
if( x >= width )
return; //x = width - 1;
if( y >= height )
return; // y = height - 1;
// Read 2 Luma components at a time, so we don't waste processing since CbCr are decimated this way.
// if we move to texture we could read 4 luminance values
yuv101010Pel[0] = (srcImageU8[y * processingPitch + x ]) << 2;
yuv101010Pel[1] = (srcImageU8[y * processingPitch + x + 1]) << 2;
uint32_t chromaOffset = processingPitch * height;
int y_chroma = y >> 1;
if (y & 1) // odd scanline ?
{
uint32_t chromaCb;
uint32_t chromaCr;
chromaCb = srcImageU8[chromaOffset + y_chroma * processingPitch + x ];
chromaCr = srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1];
if (y_chroma < ((height >> 1) - 1)) // interpolate chroma vertically
{
chromaCb = (chromaCb + srcImageU8[chromaOffset + (y_chroma + 1) * processingPitch + x ] + 1) >> 1;
chromaCr = (chromaCr + srcImageU8[chromaOffset + (y_chroma + 1) * processingPitch + x + 1] + 1) >> 1;
}
yuv101010Pel[0] |= (chromaCb << (COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[0] |= (chromaCr << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
yuv101010Pel[1] |= (chromaCb << (COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[1] |= (chromaCr << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
}
else
{
yuv101010Pel[0] |= ((uint32_t)srcImageU8[chromaOffset + y_chroma * processingPitch + x ] << (COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[0] |= ((uint32_t)srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1] << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
yuv101010Pel[1] |= ((uint32_t)srcImageU8[chromaOffset + y_chroma * processingPitch + x ] << (COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[1] |= ((uint32_t)srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1] << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
}
// this steps performs the color conversion
const uint3 yuvi_0 = make_uint3((yuv101010Pel[0] & COLOR_COMPONENT_MASK),
((yuv101010Pel[0] >> COLOR_COMPONENT_BIT_SIZE) & COLOR_COMPONENT_MASK),
((yuv101010Pel[0] >> (COLOR_COMPONENT_BIT_SIZE << 1)) & COLOR_COMPONENT_MASK));
const uint3 yuvi_1 = make_uint3((yuv101010Pel[1] & COLOR_COMPONENT_MASK),
((yuv101010Pel[1] >> COLOR_COMPONENT_BIT_SIZE) & COLOR_COMPONENT_MASK),
((yuv101010Pel[1] >> (COLOR_COMPONENT_BIT_SIZE << 1)) & COLOR_COMPONENT_MASK));
// YUV to RGB transformation conversion
dstImage[y * width + x] = YUV2RGB<T>(yuvi_0);
dstImage[y * width + x + 1] = YUV2RGB<T>(yuvi_1);
}
template<typename T>
hipError_t launchNV12ToRGBA( void* srcDev, T* dstDev, size_t width, size_t height )
{
if( !srcDev || !dstDev )
return hipErrorInvalidDevicePointer;
if( width == 0 || height == 0 )
return hipErrorInvalidValue;
const size_t srcPitch = width * sizeof(uint8_t);
const size_t dstPitch = width * sizeof(T);
const dim3 blockDim(32,8,1);
const dim3 gridDim(iDivUp(width,blockDim.x), iDivUp(height, blockDim.y), 1);
hipLaunchKernelGGL(( NV12ToRGBA<T>), dim3(gridDim), dim3(blockDim), 0, 0, (uint32_t*)srcDev, srcPitch, dstDev, dstPitch, width, height );
return CUDA(hipGetLastError());
}
// cudaNV12ToRGB (uchar3)
hipError_t cudaNV12ToRGB( void* srcDev, uchar3* destDev, size_t width, size_t height )
{
return launchNV12ToRGBA<uchar3>(srcDev, destDev, width, height);
}
// cudaNV12ToRGB (float3)
hipError_t cudaNV12ToRGB( void* srcDev, float3* destDev, size_t width, size_t height )
{
return launchNV12ToRGBA<float3>(srcDev, destDev, width, height);
}
// cudaNV12ToRGBA (uchar4)
hipError_t cudaNV12ToRGBA( void* srcDev, uchar4* destDev, size_t width, size_t height )
{
return launchNV12ToRGBA<uchar4>(srcDev, destDev, width, height);
}
// cudaNV12ToRGBA (float4)
hipError_t cudaNV12ToRGBA( void* srcDev, float4* destDev, size_t width, size_t height )
{
return launchNV12ToRGBA<float4>(srcDev, destDev, width, height);
}
#if 0
// cudaNV12SetupColorspace
hipError_t cudaNV12SetupColorspace( float hue )
{
const float hueSin = sin(hue);
const float hueCos = cos(hue);
float hueCSC[9];
const bool itu601 = false;
if( itu601 /*CSC == ITU601*/)
{
//CCIR 601
hueCSC[0] = 1.1644f;
hueCSC[1] = hueSin * 1.5960f;
hueCSC[2] = hueCos * 1.5960f;
hueCSC[3] = 1.1644f;
hueCSC[4] = (hueCos * -0.3918f) - (hueSin * 0.8130f);
hueCSC[5] = (hueSin * 0.3918f) - (hueCos * 0.8130f);
hueCSC[6] = 1.1644f;
hueCSC[7] = hueCos * 2.0172f;
hueCSC[8] = hueSin * -2.0172f;
}
else /*if(CSC == ITU709)*/
{
//CCIR 709
hueCSC[0] = 1.0f;
hueCSC[1] = hueSin * 1.57480f;
hueCSC[2] = hueCos * 1.57480f;
hueCSC[3] = 1.0;
hueCSC[4] = (hueCos * -0.18732f) - (hueSin * 0.46812f);
hueCSC[5] = (hueSin * 0.18732f) - (hueCos * 0.46812f);
hueCSC[6] = 1.0f;
hueCSC[7] = hueCos * 1.85560f;
hueCSC[8] = hueSin * -1.85560f;
}
if( CUDA_FAILED(hipMemcpyToSymbol(constHueColorSpaceMat, hueCSC, sizeof(float) * 9)) )
return hipErrorInvalidSymbol;
uint32_t cudaAlpha = ((uint32_t)0xff<< 24);
if( CUDA_FAILED(hipMemcpyToSymbol(constAlpha, &cudaAlpha, sizeof(uint32_t))) )
return hipErrorInvalidSymbol;
nv12ColorspaceSetup = true;
return hipSuccess;
}
#endif
| 2f4b567d8b7d8e8c6651620ab46e0a2dabd0aa21.cu | /*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "cudaYUV.h"
#include "cudaVector.h"
#define COLOR_COMPONENT_MASK 0x3FF
#define COLOR_COMPONENT_BIT_SIZE 10
#define FIXED_DECIMAL_POINT 24
#define FIXED_POINT_MULTIPLIER 1.0f
#define FIXED_COLOR_COMPONENT_MASK 0xffffffff
static inline __device__ float clamp( float x ) { return fminf(fmaxf(x, 0.0f), 255.0f); }
// YUV2RGB
template<typename T>
static inline __device__ T YUV2RGB(const uint3& yuvi)
{
const float luma = float(yuvi.x);
const float u = float(yuvi.y) - 512.0f;
const float v = float(yuvi.z) - 512.0f;
const float s = 1.0f / 1024.0f * 255.0f; // TODO clamp for uchar output?
// R = Y + 1.140V
// G = Y - 0.395U - 0.581V
// B = Y + 2.032U
return make_vec<T>(clamp((luma + 1.140f * v) * s),
clamp((luma - 0.395f * u - 0.581f * v) * s),
clamp((luma + 2.032f * u) * s), 255);
}
__device__ uint32_t RGBAPACK_8bit(float red, float green, float blue, uint32_t alpha)
{
uint32_t ARGBpixel = 0;
// Clamp final 10 bit results
red = min(max(red, 0.0f), 255.0f);
green = min(max(green, 0.0f), 255.0f);
blue = min(max(blue, 0.0f), 255.0f);
// Convert to 8 bit unsigned integers per color component
ARGBpixel = ((((uint32_t)red) << 24) |
(((uint32_t)green) << 16) |
(((uint32_t)blue) << 8) | (uint32_t)alpha);
return ARGBpixel;
}
__device__ uint32_t RGBAPACK_10bit(float red, float green, float blue, uint32_t alpha)
{
uint32_t ARGBpixel = 0;
// Clamp final 10 bit results
red = min(max(red, 0.0f), 1023.f);
green = min(max(green, 0.0f), 1023.f);
blue = min(max(blue, 0.0f), 1023.f);
// Convert to 8 bit unsigned integers per color component
ARGBpixel = ((((uint32_t)red >> 2) << 24) |
(((uint32_t)green >> 2) << 16) |
(((uint32_t)blue >> 2) << 8) | (uint32_t)alpha);
return ARGBpixel;
}
__global__ void Passthru(uint32_t *srcImage, size_t nSourcePitch,
uint32_t *dstImage, size_t nDestPitch,
uint32_t width, uint32_t height)
{
int x, y;
uint32_t yuv101010Pel[2];
uint32_t processingPitch = ((width) + 63) & ~63;
uint32_t dstImagePitch = nDestPitch >> 2;
uint8_t *srcImageU8 = (uint8_t *)srcImage;
processingPitch = nSourcePitch;
// Pad borders with duplicate pixels, and we multiply by 2 because we process 2 pixels per thread
x = blockIdx.x * (blockDim.x << 1) + (threadIdx.x << 1);
y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width)
return; //x = width - 1;
if (y >= height)
return; // y = height - 1;
// Read 2 Luma components at a time, so we don't waste processing since CbCr are decimated this way.
// if we move to texture we could read 4 luminance values
yuv101010Pel[0] = (srcImageU8[y * processingPitch + x ]);
yuv101010Pel[1] = (srcImageU8[y * processingPitch + x + 1]);
// this steps performs the color conversion
float luma[2];
luma[0] = (yuv101010Pel[0] & 0x00FF);
luma[1] = (yuv101010Pel[1] & 0x00FF);
// Clamp the results to RGBA
dstImage[y * dstImagePitch + x ] = RGBAPACK_8bit(luma[0], luma[0], luma[0], 255); // alpha=((uint32_t)0xff<< 24);
dstImage[y * dstImagePitch + x + 1 ] = RGBAPACK_8bit(luma[1], luma[1], luma[1], 255);
}
// NV12ToRGBA
template<typename T>
__global__ void NV12ToRGBA(uint32_t* srcImage, size_t nSourcePitch,
T* dstImage, size_t nDestPitch,
uint32_t width, uint32_t height)
{
int x, y;
uint32_t yuv101010Pel[2];
uint32_t processingPitch = ((width) + 63) & ~63;
uint8_t *srcImageU8 = (uint8_t *)srcImage;
processingPitch = nSourcePitch;
// Pad borders with duplicate pixels, and we multiply by 2 because we process 2 pixels per thread
x = blockIdx.x * (blockDim.x << 1) + (threadIdx.x << 1);
y = blockIdx.y * blockDim.y + threadIdx.y;
if( x >= width )
return; //x = width - 1;
if( y >= height )
return; // y = height - 1;
// Read 2 Luma components at a time, so we don't waste processing since CbCr are decimated this way.
// if we move to texture we could read 4 luminance values
yuv101010Pel[0] = (srcImageU8[y * processingPitch + x ]) << 2;
yuv101010Pel[1] = (srcImageU8[y * processingPitch + x + 1]) << 2;
uint32_t chromaOffset = processingPitch * height;
int y_chroma = y >> 1;
if (y & 1) // odd scanline ?
{
uint32_t chromaCb;
uint32_t chromaCr;
chromaCb = srcImageU8[chromaOffset + y_chroma * processingPitch + x ];
chromaCr = srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1];
if (y_chroma < ((height >> 1) - 1)) // interpolate chroma vertically
{
chromaCb = (chromaCb + srcImageU8[chromaOffset + (y_chroma + 1) * processingPitch + x ] + 1) >> 1;
chromaCr = (chromaCr + srcImageU8[chromaOffset + (y_chroma + 1) * processingPitch + x + 1] + 1) >> 1;
}
yuv101010Pel[0] |= (chromaCb << (COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[0] |= (chromaCr << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
yuv101010Pel[1] |= (chromaCb << (COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[1] |= (chromaCr << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
}
else
{
yuv101010Pel[0] |= ((uint32_t)srcImageU8[chromaOffset + y_chroma * processingPitch + x ] << (COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[0] |= ((uint32_t)srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1] << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
yuv101010Pel[1] |= ((uint32_t)srcImageU8[chromaOffset + y_chroma * processingPitch + x ] << (COLOR_COMPONENT_BIT_SIZE + 2));
yuv101010Pel[1] |= ((uint32_t)srcImageU8[chromaOffset + y_chroma * processingPitch + x + 1] << ((COLOR_COMPONENT_BIT_SIZE << 1) + 2));
}
// this steps performs the color conversion
const uint3 yuvi_0 = make_uint3((yuv101010Pel[0] & COLOR_COMPONENT_MASK),
((yuv101010Pel[0] >> COLOR_COMPONENT_BIT_SIZE) & COLOR_COMPONENT_MASK),
((yuv101010Pel[0] >> (COLOR_COMPONENT_BIT_SIZE << 1)) & COLOR_COMPONENT_MASK));
const uint3 yuvi_1 = make_uint3((yuv101010Pel[1] & COLOR_COMPONENT_MASK),
((yuv101010Pel[1] >> COLOR_COMPONENT_BIT_SIZE) & COLOR_COMPONENT_MASK),
((yuv101010Pel[1] >> (COLOR_COMPONENT_BIT_SIZE << 1)) & COLOR_COMPONENT_MASK));
// YUV to RGB transformation conversion
dstImage[y * width + x] = YUV2RGB<T>(yuvi_0);
dstImage[y * width + x + 1] = YUV2RGB<T>(yuvi_1);
}
template<typename T>
cudaError_t launchNV12ToRGBA( void* srcDev, T* dstDev, size_t width, size_t height )
{
if( !srcDev || !dstDev )
return cudaErrorInvalidDevicePointer;
if( width == 0 || height == 0 )
return cudaErrorInvalidValue;
const size_t srcPitch = width * sizeof(uint8_t);
const size_t dstPitch = width * sizeof(T);
const dim3 blockDim(32,8,1);
const dim3 gridDim(iDivUp(width,blockDim.x), iDivUp(height, blockDim.y), 1);
NV12ToRGBA<T><<<gridDim, blockDim>>>( (uint32_t*)srcDev, srcPitch, dstDev, dstPitch, width, height );
return CUDA(cudaGetLastError());
}
// cudaNV12ToRGB (uchar3)
cudaError_t cudaNV12ToRGB( void* srcDev, uchar3* destDev, size_t width, size_t height )
{
return launchNV12ToRGBA<uchar3>(srcDev, destDev, width, height);
}
// cudaNV12ToRGB (float3)
cudaError_t cudaNV12ToRGB( void* srcDev, float3* destDev, size_t width, size_t height )
{
return launchNV12ToRGBA<float3>(srcDev, destDev, width, height);
}
// cudaNV12ToRGBA (uchar4)
cudaError_t cudaNV12ToRGBA( void* srcDev, uchar4* destDev, size_t width, size_t height )
{
return launchNV12ToRGBA<uchar4>(srcDev, destDev, width, height);
}
// cudaNV12ToRGBA (float4)
cudaError_t cudaNV12ToRGBA( void* srcDev, float4* destDev, size_t width, size_t height )
{
return launchNV12ToRGBA<float4>(srcDev, destDev, width, height);
}
#if 0
// cudaNV12SetupColorspace
cudaError_t cudaNV12SetupColorspace( float hue )
{
const float hueSin = sin(hue);
const float hueCos = cos(hue);
float hueCSC[9];
const bool itu601 = false;
if( itu601 /*CSC == ITU601*/)
{
//CCIR 601
hueCSC[0] = 1.1644f;
hueCSC[1] = hueSin * 1.5960f;
hueCSC[2] = hueCos * 1.5960f;
hueCSC[3] = 1.1644f;
hueCSC[4] = (hueCos * -0.3918f) - (hueSin * 0.8130f);
hueCSC[5] = (hueSin * 0.3918f) - (hueCos * 0.8130f);
hueCSC[6] = 1.1644f;
hueCSC[7] = hueCos * 2.0172f;
hueCSC[8] = hueSin * -2.0172f;
}
else /*if(CSC == ITU709)*/
{
//CCIR 709
hueCSC[0] = 1.0f;
hueCSC[1] = hueSin * 1.57480f;
hueCSC[2] = hueCos * 1.57480f;
hueCSC[3] = 1.0;
hueCSC[4] = (hueCos * -0.18732f) - (hueSin * 0.46812f);
hueCSC[5] = (hueSin * 0.18732f) - (hueCos * 0.46812f);
hueCSC[6] = 1.0f;
hueCSC[7] = hueCos * 1.85560f;
hueCSC[8] = hueSin * -1.85560f;
}
if( CUDA_FAILED(cudaMemcpyToSymbol(constHueColorSpaceMat, hueCSC, sizeof(float) * 9)) )
return cudaErrorInvalidSymbol;
uint32_t cudaAlpha = ((uint32_t)0xff<< 24);
if( CUDA_FAILED(cudaMemcpyToSymbol(constAlpha, &cudaAlpha, sizeof(uint32_t))) )
return cudaErrorInvalidSymbol;
nv12ColorspaceSetup = true;
return cudaSuccess;
}
#endif
|
9f8c879fb2df6a8703a5e3b26c59413e86a85895.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* MaxpoolingLayerKernel.cu
*
* Created on: Jun 6, 2017
* Author: carol
*/
#include "cudaUtil.h"
#include "MaxpoolingLayer.h"
__device__ inline size_t get_out_index(size_t out_width, size_t out_height,
size_t out, size_t h_, size_t w_) {
return out * out_width * out_height + h_ / 2 * out_width + (w_ / 2);
}
__device__ inline Pair get_max_loc_pair(size_t first, size_t second) {
Pair ret;
ret.first = first;
ret.second = second;
return ret;
}
__device__ inline float max_in_(float_t *input_, Pair *max_loc,
size_t in_width_, size_t in_height_, size_t in_index, size_t h_,
size_t w_, size_t out_index) {
float_t max_pixel = 0;
size_t tmp;
#pragma unroll
for (size_t x = 0; x < MAXPOOL_SIZE; x++) {
#pragma unroll
for (size_t y = 0; y < MAXPOOL_SIZE; y++) {
tmp = (in_index * in_width_ * in_height_) + ((h_ + y) * in_width_)
+ (w_ + x);
if (max_pixel < input_[tmp]) {
max_pixel = input_[tmp];
max_loc[out_index] = get_max_loc_pair(out_index, tmp);
}
}
}
return max_pixel;
}
__global__ void forward_maxpool_layer_kernel(float_t *input_, Pair *max_loc,
float_t *output_, size_t out_width, size_t out_height,
size_t out_depth_, size_t in_height, size_t in_width) {
int h_ = blockIdx.y * blockDim.y + threadIdx.y;
int w_ = (blockIdx.x * blockDim.x + threadIdx.x) / out_depth_;
int out = (blockIdx.x * blockDim.x + threadIdx.x) % out_depth_;
if ((out < out_depth_) && (h_ < in_height) && (w_ < in_width) && !(h_ % 2)
&& !(w_ % 2)) {
size_t index = get_out_index(out_width, out_height, out, h_, w_);
output_[index] = max_in_(input_, max_loc, in_width, in_height, out, h_,
w_, index);
}
}
__global__ void backpropagation_maxpool(Pair *max_loc, float *g_, float *g_next,
size_t max_size, size_t g_max_size) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x > max_size)
return;
Pair p = max_loc[x];
if (p.first != MAX && p.second != MAX && p.second < g_max_size && p.first < g_max_size) {
g_[p.second] = g_next[p.first];
}
}
__global__ void forward_maxpool_layer_kernel_darknet(int n, int in_h, int in_w,
int in_c, int stride, int size, int pad, float *input, float *output,
size_t *indexes) {
int h = (in_h + 2 * pad) / stride;
int w = (in_w + 2 * pad) / stride;
int c = in_c;
int id = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (id >= n)
return;
int j = id % w;
id /= w;
int i = id % h;
id /= h;
int k = id % c;
id /= c;
int b = id;
int w_offset = -pad;
int h_offset = -pad;
int out_index = j + w * (i + h * (k + c * b));
float max = -INFINITY;
int max_i = -1;
int l, m;
for (l = 0; l < size; ++l) {
for (m = 0; m < size; ++m) {
int cur_h = h_offset + i * stride + l;
int cur_w = w_offset + j * stride + m;
int index = cur_w + in_w * (cur_h + in_h * (k + b * in_c));
int valid = (cur_h >= 0 && cur_h < in_h && cur_w >= 0
&& cur_w < in_w);
float val = (valid != 0) ? input[index] : -INFINITY;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
output[out_index] = max;
indexes[out_index] = max_i;
}
__global__ void backward_maxpool_layer_kernel(int n, int in_h, int in_w,
int in_c, int stride, int size, int pad, float *delta,
float *prev_delta, size_t *indexes) {
int h = (in_h + 2 * pad) / stride;
int w = (in_w + 2 * pad) / stride;
int c = in_c;
int area = (size - 1) / stride;
int id = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (id >= n)
return;
int index = id;
int j = id % in_w;
id /= in_w;
int i = id % in_h;
id /= in_h;
int k = id % in_c;
id /= in_c;
int b = id;
int w_offset = -pad;
int h_offset = -pad;
float d = 0;
int l, m;
for (l = -area; l < area + 1; ++l) {
for (m = -area; m < area + 1; ++m) {
int out_w = (j - w_offset) / stride + m;
int out_h = (i - h_offset) / stride + l;
int out_index = out_w + w * (out_h + h * (k + c * b));
int valid = (out_w >= 0 && out_w < w && out_h >= 0 && out_h < h);
d += (valid && indexes[out_index] == index) ? delta[out_index] : 0;
}
}
prev_delta[index] += d;
}
void MaxpoolingLayer::forward() {
// execute the code on the device
float_t *input = this->input_.d_data();
float_t *output = this->output_.d_data();
// Pair *max_loc_buf = this->max_loc.d_data();
size_t out_width = this->out_width_;
size_t out_height = this->out_height_;
size_t out_depth = this->out_depth_;
size_t in_height = this->in_height_;
size_t in_width = this->in_width_;
size_t *indexes = this->indexes.d_data();
size_t in_depth = this->in_depth_;
// call_forward_maxpool_layer_gpu(input, output,indexes, out_width,
// out_height, out_depth, in_height, in_width, in_depth, 2, 0, 2, this->batch);
// dim3 blocks, threads;
//
// cuda_gridsize(&threads, &blocks, in_width * out_depth, in_height);
//
// forward_maxpool_layer_kernel<<<blocks, threads>>>(input, max_loc, output,
// out_width, out_height, out_depth, in_height, in_width);
// CudaCheckError();
// Trying darknet approach
int h = out_height;
int w = out_width;
int c = out_depth;
size_t n = h * w * c * batch;
dim3 blocks = cuda_gridsize(n);
hipLaunchKernelGGL(( forward_maxpool_layer_kernel_darknet), dim3(blocks), dim3(BLOCK_SIZE_FULL), 0, 0, n, in_height,
in_width, in_depth, this->stride, this->size, this->pad,
input, output, indexes);
CudaCheckError();
}
void MaxpoolingLayer::back_prop() {
g_.clear();
g_.resize(this->in_width_ * this->in_height_ * this->in_depth_);
// Pair *max_loc = this->max_loc.d_data();
float *g_ = this->next->g_.d_data();
float *g_prev = this->g_.d_data();
// size_t max_size = this->max_loc.size();
size_t g_max_size = this->g_.size();
size_t *indexes = this->indexes.d_data();
//call_backpropagation_maxpool(max_loc, g_, g_next, max_size, g_max_size);
// dim3 blocks, threads;
// cuda_gridsize(&threads, &blocks, max_size);
//
// assert(g_max_size != 0);
// backpropagation_maxpool<<<blocks, threads>>>(max_loc, g_, g_next, max_size, g_max_size);
// CudaCheckError();
// size_t n = layer.h * layer.w * layer.c * layer.batch;
int h = this->in_height_, w = this->in_width_, c = this->in_depth_;
size_t n = h * w * c * this->batch;
dim3 blocks = cuda_gridsize(n);
// backward_maxpool_layer_kernel(int n, int in_h, int in_w,
// int in_c, int stride, int size, int pad, float *delta,
// float *prev_delta, size_t *indexes)
hipLaunchKernelGGL(( backward_maxpool_layer_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK_SIZE_FULL), 0, 0, n, h, w, c, this->stride,
this->size, this->pad, g_, g_prev, indexes);
CudaCheckError();
}
//
//int main(){
// int N = 8;
// int N_OUT = 4;
// float *h, *h_out, *d, *d_out;
// int *indexes;
// h = (float*) malloc(sizeof(float) * N * N);
// h_out = (float*) malloc(sizeof(float) * N_OUT * N_OUT);
// hipMalloc(&d, N * N * sizeof(float));
// hipMalloc(&d_out, N_OUT * N_OUT * sizeof(float));
// hipMalloc(&indexes, N_OUT * N_OUT * sizeof(int));
//
//
// for(int i = 0; i < N; i++){
// for(int j = 0; j < N; j++){
// printf("%f ", float(i * N + j));
// h[i * N + j] = i * N + j;
// }
// printf("\n");
// }
// hipMemcpy(d, h, sizeof(float) * N * N, hipMemcpyHostToDevice);
//
// int n = N_OUT * N_OUT;
//
//// forward_maxpool_layer_kernel_darknet(int n, int in_h, int in_w,
//// int in_c, int stride, int size, int pad, float *input, float *output,
//// int *indexes);
// dim3 blocks = cuda_gridsize(n);
//hipLaunchKernelGGL(( forward_maxpool_layer_kernel_darknet), dim3(blocks), dim3(BLOCK_SIZE_FULL), 0, 0, n, N, N, 1, 2, 2, 0, d, d_out, indexes);
// hipError_t s = hipDeviceSynchronize();
// printf("\n%d\n", s);
//
// hipMemcpy(h_out, d_out, sizeof(float) * N_OUT * N_OUT, hipMemcpyDeviceToHost);
//
// for(int i = 0; i < N_OUT * N_OUT; i++)
// printf("%f ", h_out[i]);
// printf("\n");
//
// hipFree(d);
// hipFree(d_out);
// hipFree(indexes);
// free(h);
// free(h_out);
//
//
//}
| 9f8c879fb2df6a8703a5e3b26c59413e86a85895.cu | /*
* MaxpoolingLayerKernel.cu
*
* Created on: Jun 6, 2017
* Author: carol
*/
#include "cudaUtil.h"
#include "MaxpoolingLayer.h"
__device__ inline size_t get_out_index(size_t out_width, size_t out_height,
size_t out, size_t h_, size_t w_) {
return out * out_width * out_height + h_ / 2 * out_width + (w_ / 2);
}
__device__ inline Pair get_max_loc_pair(size_t first, size_t second) {
Pair ret;
ret.first = first;
ret.second = second;
return ret;
}
__device__ inline float max_in_(float_t *input_, Pair *max_loc,
size_t in_width_, size_t in_height_, size_t in_index, size_t h_,
size_t w_, size_t out_index) {
float_t max_pixel = 0;
size_t tmp;
#pragma unroll
for (size_t x = 0; x < MAXPOOL_SIZE; x++) {
#pragma unroll
for (size_t y = 0; y < MAXPOOL_SIZE; y++) {
tmp = (in_index * in_width_ * in_height_) + ((h_ + y) * in_width_)
+ (w_ + x);
if (max_pixel < input_[tmp]) {
max_pixel = input_[tmp];
max_loc[out_index] = get_max_loc_pair(out_index, tmp);
}
}
}
return max_pixel;
}
__global__ void forward_maxpool_layer_kernel(float_t *input_, Pair *max_loc,
float_t *output_, size_t out_width, size_t out_height,
size_t out_depth_, size_t in_height, size_t in_width) {
int h_ = blockIdx.y * blockDim.y + threadIdx.y;
int w_ = (blockIdx.x * blockDim.x + threadIdx.x) / out_depth_;
int out = (blockIdx.x * blockDim.x + threadIdx.x) % out_depth_;
if ((out < out_depth_) && (h_ < in_height) && (w_ < in_width) && !(h_ % 2)
&& !(w_ % 2)) {
size_t index = get_out_index(out_width, out_height, out, h_, w_);
output_[index] = max_in_(input_, max_loc, in_width, in_height, out, h_,
w_, index);
}
}
__global__ void backpropagation_maxpool(Pair *max_loc, float *g_, float *g_next,
size_t max_size, size_t g_max_size) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x > max_size)
return;
Pair p = max_loc[x];
if (p.first != MAX && p.second != MAX && p.second < g_max_size && p.first < g_max_size) {
g_[p.second] = g_next[p.first];
}
}
__global__ void forward_maxpool_layer_kernel_darknet(int n, int in_h, int in_w,
int in_c, int stride, int size, int pad, float *input, float *output,
size_t *indexes) {
int h = (in_h + 2 * pad) / stride;
int w = (in_w + 2 * pad) / stride;
int c = in_c;
int id = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (id >= n)
return;
int j = id % w;
id /= w;
int i = id % h;
id /= h;
int k = id % c;
id /= c;
int b = id;
int w_offset = -pad;
int h_offset = -pad;
int out_index = j + w * (i + h * (k + c * b));
float max = -INFINITY;
int max_i = -1;
int l, m;
for (l = 0; l < size; ++l) {
for (m = 0; m < size; ++m) {
int cur_h = h_offset + i * stride + l;
int cur_w = w_offset + j * stride + m;
int index = cur_w + in_w * (cur_h + in_h * (k + b * in_c));
int valid = (cur_h >= 0 && cur_h < in_h && cur_w >= 0
&& cur_w < in_w);
float val = (valid != 0) ? input[index] : -INFINITY;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
output[out_index] = max;
indexes[out_index] = max_i;
}
__global__ void backward_maxpool_layer_kernel(int n, int in_h, int in_w,
int in_c, int stride, int size, int pad, float *delta,
float *prev_delta, size_t *indexes) {
int h = (in_h + 2 * pad) / stride;
int w = (in_w + 2 * pad) / stride;
int c = in_c;
int area = (size - 1) / stride;
int id = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (id >= n)
return;
int index = id;
int j = id % in_w;
id /= in_w;
int i = id % in_h;
id /= in_h;
int k = id % in_c;
id /= in_c;
int b = id;
int w_offset = -pad;
int h_offset = -pad;
float d = 0;
int l, m;
for (l = -area; l < area + 1; ++l) {
for (m = -area; m < area + 1; ++m) {
int out_w = (j - w_offset) / stride + m;
int out_h = (i - h_offset) / stride + l;
int out_index = out_w + w * (out_h + h * (k + c * b));
int valid = (out_w >= 0 && out_w < w && out_h >= 0 && out_h < h);
d += (valid && indexes[out_index] == index) ? delta[out_index] : 0;
}
}
prev_delta[index] += d;
}
void MaxpoolingLayer::forward() {
// execute the code on the device
float_t *input = this->input_.d_data();
float_t *output = this->output_.d_data();
// Pair *max_loc_buf = this->max_loc.d_data();
size_t out_width = this->out_width_;
size_t out_height = this->out_height_;
size_t out_depth = this->out_depth_;
size_t in_height = this->in_height_;
size_t in_width = this->in_width_;
size_t *indexes = this->indexes.d_data();
size_t in_depth = this->in_depth_;
// call_forward_maxpool_layer_gpu(input, output,indexes, out_width,
// out_height, out_depth, in_height, in_width, in_depth, 2, 0, 2, this->batch);
// dim3 blocks, threads;
//
// cuda_gridsize(&threads, &blocks, in_width * out_depth, in_height);
//
// forward_maxpool_layer_kernel<<<blocks, threads>>>(input, max_loc, output,
// out_width, out_height, out_depth, in_height, in_width);
// CudaCheckError();
// Trying darknet approach
int h = out_height;
int w = out_width;
int c = out_depth;
size_t n = h * w * c * batch;
dim3 blocks = cuda_gridsize(n);
forward_maxpool_layer_kernel_darknet<<<blocks, BLOCK_SIZE_FULL>>>(n, in_height,
in_width, in_depth, this->stride, this->size, this->pad,
input, output, indexes);
CudaCheckError();
}
void MaxpoolingLayer::back_prop() {
g_.clear();
g_.resize(this->in_width_ * this->in_height_ * this->in_depth_);
// Pair *max_loc = this->max_loc.d_data();
float *g_ = this->next->g_.d_data();
float *g_prev = this->g_.d_data();
// size_t max_size = this->max_loc.size();
size_t g_max_size = this->g_.size();
size_t *indexes = this->indexes.d_data();
//call_backpropagation_maxpool(max_loc, g_, g_next, max_size, g_max_size);
// dim3 blocks, threads;
// cuda_gridsize(&threads, &blocks, max_size);
//
// assert(g_max_size != 0);
// backpropagation_maxpool<<<blocks, threads>>>(max_loc, g_, g_next, max_size, g_max_size);
// CudaCheckError();
// size_t n = layer.h * layer.w * layer.c * layer.batch;
int h = this->in_height_, w = this->in_width_, c = this->in_depth_;
size_t n = h * w * c * this->batch;
dim3 blocks = cuda_gridsize(n);
// backward_maxpool_layer_kernel(int n, int in_h, int in_w,
// int in_c, int stride, int size, int pad, float *delta,
// float *prev_delta, size_t *indexes)
backward_maxpool_layer_kernel<<<cuda_gridsize(n), BLOCK_SIZE_FULL>>>(n, h, w, c, this->stride,
this->size, this->pad, g_, g_prev, indexes);
CudaCheckError();
}
//
//int main(){
// int N = 8;
// int N_OUT = 4;
// float *h, *h_out, *d, *d_out;
// int *indexes;
// h = (float*) malloc(sizeof(float) * N * N);
// h_out = (float*) malloc(sizeof(float) * N_OUT * N_OUT);
// cudaMalloc(&d, N * N * sizeof(float));
// cudaMalloc(&d_out, N_OUT * N_OUT * sizeof(float));
// cudaMalloc(&indexes, N_OUT * N_OUT * sizeof(int));
//
//
// for(int i = 0; i < N; i++){
// for(int j = 0; j < N; j++){
// printf("%f ", float(i * N + j));
// h[i * N + j] = i * N + j;
// }
// printf("\n");
// }
// cudaMemcpy(d, h, sizeof(float) * N * N, cudaMemcpyHostToDevice);
//
// int n = N_OUT * N_OUT;
//
//// forward_maxpool_layer_kernel_darknet(int n, int in_h, int in_w,
//// int in_c, int stride, int size, int pad, float *input, float *output,
//// int *indexes);
// dim3 blocks = cuda_gridsize(n);
// forward_maxpool_layer_kernel_darknet<<<blocks, BLOCK_SIZE_FULL>>>(n, N, N, 1, 2, 2, 0, d, d_out, indexes);
// cudaError_t s = cudaDeviceSynchronize();
// printf("\n%d\n", s);
//
// cudaMemcpy(h_out, d_out, sizeof(float) * N_OUT * N_OUT, cudaMemcpyDeviceToHost);
//
// for(int i = 0; i < N_OUT * N_OUT; i++)
// printf("%f ", h_out[i]);
// printf("\n");
//
// cudaFree(d);
// cudaFree(d_out);
// cudaFree(indexes);
// free(h);
// free(h_out);
//
//
//}
|
a4ff63d67a7549f56c32fec42ef241c39b54db16.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// removeDups.cu
//
// Created by John Robinson on 7/15/15.
// Copyright (c) 2015 John Robinson. All rights reserved.
/*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSEARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
//
/*
* The duplicate removal algorithm uses an approach based on the following
* "Efficient Stream Compaction on Wide SIMD Many-Core Architectures"
* by Markus Billeter, Ola Olsson, Ulf Assarsson
* http://www.cse.chalmers.se/~uffe/streamcompaction.pdf
*/
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <iomanip>
using std::setprecision;
using namespace std;
#include <assert.h>
#include <helper_cuda.h>
#include <sm_30_intrinsics.h>
#include "Gpu.h"
#include "removeDups_common.h"
__device__ sint superKeyCompareFirstDimSmplA(const KdCoord ap, const KdCoord bp, const KdCoord *a, const KdCoord *b, const sint p, const sint dim)
{
sint diff = ap - bp;
for (sint i = 1; diff == 0 && i < dim; i++) {
sint r = i + p;
r = (r < dim) ? r : r - dim;
diff = a[r] - b[r];
}
return diff;
}
/*
* Check the validity of the merge sort and remove duplicates from a reference array.
*
* calling parameters:
*
* reference - a vector<int*> that represents one of the reference arrays
* i - the leading dimension for the super key
* dim - the number of dimensions
*
* returns: the end index of the reference array following removal of duplicate elements
*/
__device__ void cuWarpCopyRefVal(refIdx_t refout[], KdCoord valout[], refIdx_t refin[], KdCoord valin[],
sint segSize, const sint numTuples) {
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
uint thrdIdx = (pos & (warpSize-1));
for (sint j = 0; j+thrdIdx < segSize; j += warpSize){
valout[j+thrdIdx] = valin[j+thrdIdx];
refout[j+thrdIdx] = refin[j+thrdIdx];
}
}
__device__ uint d_removeDupsCount; // This is where number of tuples after the dups are removed is returned.
__device__ sint d_removeDupsError; // This is where an error is indicated.
__device__ sint d_removeDupsErrorAdr; // This is where number of tuples after the dups are removed is returned.
__global__ void cuRemoveGaps(refIdx_t refoutx[], KdCoord valoutx[], refIdx_t refinx[], KdCoord valinx[],
uint segSizex, uint segLengths[], const sint numTuples) {
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
uint thrdIdx = (pos & (warpSize-1));
uint warpIndex = ((pos - thrdIdx)/warpSize);
uint segStartOut = 0;
uint segStartIn = warpIndex * segSizex;
uint segSize = 0;
// Do the simple slow implementation first
// Get the seg start and seg size from the segLentghs array written by the
if (thrdIdx == 0) {
for (uint i = 0; i<warpIndex; i++)
segStartOut += segLengths[i];
segSize = segLengths[warpIndex];
}
// Copy to the other threads in the warp.
segStartOut = __shfl(segStartOut, 0);
segSize = __shfl(segSize, 0);
// and do the copy.
cuWarpCopyRefVal(refoutx+segStartOut, valoutx+segStartOut, refinx+segStartIn, valinx+segStartIn, segSize, numTuples);
// if this warp is processing the last segment, store the final size
if (thrdIdx == 0 && ((segStartIn + segSizex) >= numTuples))
d_removeDupsCount = segStartOut + segLengths[warpIndex];
}
__global__ void cuCopyRefVal(refIdx_t refoutx[], KdCoord valoutx[], refIdx_t refinx[], KdCoord valinx[],
sint segSizex, const sint numTuples) {
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
uint thrdIdx = (pos & (warpSize-1));
// uint warpsPerBlock = (SHARED_SIZE_LIMIT/(2*warpSize));
uint warpIndex = ((pos - thrdIdx)/warpSize);
uint segSize;
// Calculate the base addrs of the global memory input and output arrays.
uint segStart = warpIndex * segSizex;
if (segStart + segSizex > numTuples) {
segSize = numTuples - segStart;
} else segSize = segSizex;
cuWarpCopyRefVal(refoutx + segStart, valoutx + segStart, refinx + segStart, valinx + segStart, segSize, numTuples);
}
__global__ void cuRemoveDups(KdCoord coords[], refIdx_t refoutx[], KdCoord valoutx[], refIdx_t refinx[], KdCoord valinx[],
KdCoord otherCoords[], refIdx_t *otherRef,
const int p, const int dim, uint segSizex, uint segLengths[], const sint numTuples)
{
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
uint thrdIdx = (pos & (warpSize-1));
uint warpsPerBlock = (SHARED_SIZE_LIMIT/(2*warpSize));
uint warpIndex = ((pos - thrdIdx)/warpSize);
KdCoord val;
uint ref;
uint segSize;
// Calculate the base addrs of the global memory input and output arrays.
uint segStart = warpIndex * segSizex;
if (segStart + segSizex > numTuples) {
segSize = numTuples - segStart;
} else segSize = segSizex;
refIdx_t* refin = refinx + segStart;
KdCoord* valin = valinx + segStart;
refIdx_t* refout = refoutx + segStart;
KdCoord* valout = valoutx + segStart;
// Allocate the shared memory that will be used for coalescing of writes.
__shared__ KdCoord s_val[SHARED_SIZE_LIMIT];
__shared__ refIdx_t s_ref[SHARED_SIZE_LIMIT];
uint outCnt = 0;
uint oldOutCnt;
// Calculate the base index for this warp in the shared memory array
// SHARED_SIZE_LIMIT/(2*warpSize) is the number of warps per block
// So the warp in block index is the mod of warpIndex by the num warps in block.
uint sharedBase = 2 * warpSize * (warpIndex % warpsPerBlock);
uint sharedAddrMask = (2*warpSize)-1;
sint cmp = 0;
uint maskGEme = ((1 << thrdIdx) - 1);
uint shflMask = 0;
// First handle the special conditions for the initial 32 words
// This needs be a loop to handle the case where the first warps worth of data are all equal.
sint j;
for (j = 0; j < segSize && shflMask == 0; j += warpSize){
if (thrdIdx < segSize) { // only read and compare less than segsize
s_val[sharedBase + thrdIdx] = val = valin[thrdIdx];
s_ref[sharedBase + thrdIdx] = ref = refin[thrdIdx];
if (thrdIdx !=0 ) { // If not the first thread, do a normal compare with shared memory
cmp = superKeyCompareFirstDimSmplA(val, s_val[sharedBase + thrdIdx - 1], coords+ref*dim, coords+s_ref[sharedBase + thrdIdx - 1]*dim,
p, dim);
} else if (warpIndex != 0) { // If first tread but not the first segment, compare with last value of previous segment.
cmp = superKeyCompareFirstDimSmplA(val, *(valin-1), coords+ref*dim, coords+(*(refin-1))*dim,
p, dim);
} else if (otherCoords != NULL) { // First thread of first segment of second GPU needs to compare itself with highest word of the other GPU.
cmp = superKeyCompareFirstDimSmplA(val, *(otherCoords+(*otherRef)*dim), coords+ref*dim, otherCoords+(*otherRef)*dim,
p, dim);
} else { // This handles the case of the very first data word.
cmp = 1; // Indicate the first value is greater so that it is always included
}
} else {
cmp = 0; // Use cmp == 0 in this case to exclude data outside the range
}
// First check for compare failure which is earlier value is gt current
if (cmp<0) {
d_removeDupsError = -1;
atomicMin(&d_removeDupsErrorAdr, (valin - valinx) + thrdIdx);
}
valin += warpSize;
refin += warpSize;
// Check for duplicates, a 1 in the shflMask indicates that this tread is not a dup so keep it.
shflMask = __ballot(cmp>0);
if (cmp > 0) {
// Calculate the address which is determined by the number of non-dups less than this thread.
uint wrtIdx = __popc(shflMask & maskGEme);
s_val[sharedBase + ((outCnt + wrtIdx) & sharedAddrMask)] = val;
s_ref[sharedBase + ((outCnt + wrtIdx) & sharedAddrMask)] = ref;
}
}
// Update the output counter but keep an old value so it's known where to write the output.
oldOutCnt = outCnt;
outCnt += __popc(shflMask);
// If the first read filled the buffer than write it out.
if (((oldOutCnt ^ outCnt) & warpSize) != 0) {
valout[(oldOutCnt & ~(warpSize-1)) + thrdIdx] = s_val[sharedBase + (oldOutCnt & warpSize) + thrdIdx];
refout[(oldOutCnt & ~(warpSize-1)) + thrdIdx] = s_ref[sharedBase + (oldOutCnt & warpSize) + thrdIdx];
}
// OK, first iteration is all done, Now start the deterministic
for (; j < segSize; j += warpSize){
if (j+thrdIdx < segSize) {
s_val[sharedBase + ((outCnt + thrdIdx) & sharedAddrMask)] = val = valin[thrdIdx];
s_ref[sharedBase + ((outCnt + thrdIdx) & sharedAddrMask)] = ref = refin[thrdIdx];
// Do the compare
cmp = superKeyCompareFirstDimSmplA(val, s_val[sharedBase + ((outCnt + thrdIdx - 1) & sharedAddrMask)],
coords+ref*dim, coords+s_ref[sharedBase + ((outCnt + thrdIdx - 1) & sharedAddrMask)]*dim,
p, dim);
} else {
cmp = 0;
}
// First check for compare failure which is earlier value is gt current
if (cmp<0) {
d_removeDupsError = -1;
atomicMin(&d_removeDupsErrorAdr, (valin - valinx) + thrdIdx);
}
valin += warpSize;
refin += warpSize;
// Check for duplicates, a 1 in the shflMask indicates that this tread is not a dup so keep it.
shflMask = __ballot(cmp>0);
if (cmp > 0) {
// Calculate the address which is determined by the number of non dups less than this thread.
uint wrtIdx = __popc(shflMask & maskGEme);
s_val[sharedBase + ((outCnt + wrtIdx) & sharedAddrMask)] = val;
s_ref[sharedBase + ((outCnt + wrtIdx) & sharedAddrMask)] = ref;
}
// Update the output counter but keep an old value so it's known where to write the output.
oldOutCnt = outCnt;
outCnt += __popc(shflMask);
// If the write spilled into the other buffer in shared memory write buffer indicated by old count.
if (((oldOutCnt ^ outCnt) & warpSize) != 0) {
valout[(oldOutCnt & ~(warpSize-1)) + thrdIdx] = s_val[sharedBase + (oldOutCnt & warpSize) + thrdIdx];
refout[(oldOutCnt & ~(warpSize-1)) + thrdIdx] = s_ref[sharedBase + (oldOutCnt & warpSize) + thrdIdx];
}
}
// Write out the final buffer
if ((outCnt & (warpSize-1)) > thrdIdx) {
valout[(outCnt & ~(warpSize-1)) + thrdIdx] = s_val[sharedBase + (outCnt & warpSize) + thrdIdx];
refout[(outCnt & ~(warpSize-1)) + thrdIdx] = s_ref[sharedBase + (outCnt & warpSize) + thrdIdx];
}
// And finally store the number of writes that were done by this warp
if (thrdIdx == 0 && segLengths != NULL) segLengths[warpIndex] = outCnt;
}
uint Gpu::copyRefVal(KdCoord valout[], refIdx_t refout[], KdCoord valin[], refIdx_t refin[], uint numTuples, sint numThreads) {
sint numBlocks;
sint numThrdPerBlk;
// This section just allows for single block execution for easier debug.
if (numThreads >= SHARED_SIZE_LIMIT/2) {
numBlocks = numThreads/(SHARED_SIZE_LIMIT/2);
numThrdPerBlk = SHARED_SIZE_LIMIT/2;
} else {
numBlocks = 1;
numThrdPerBlk = numThreads;
}
sint segSize = (numTuples + (numThreads/32) - 1) / (numThreads/32);
#pragma omp critical (launchLock)
{
setDevice();
hipLaunchKernelGGL(( cuCopyRefVal), dim3(numBlocks), dim3(numThrdPerBlk), 0, stream, refout, valout, refin, valin, segSize, numTuples);
checkCudaErrors(hipGetLastError());
}
return 0;
}
uint Gpu::removeDups(KdCoord coords[], KdCoord val[], refIdx_t ref[], KdCoord valtmp[], refIdx_t reftmp[],
KdCoord valin[], refIdx_t refin[], KdCoord otherCoord[], refIdx_t *otherRef,
const sint p, const sint dim, const sint numTuples, sint numThreads) {
sint numBlocks;
sint numThrdPerBlk;
// This section just allows for single block execution for easier debug.
if (numThreads >= SHARED_SIZE_LIMIT/2) {
numBlocks = numThreads/(SHARED_SIZE_LIMIT/2);
numThrdPerBlk = SHARED_SIZE_LIMIT/2;
} else {
numBlocks = 1;
numThrdPerBlk = numThreads;
}
// Make sure the segmentSize * segments is gt than numTuples so that nothing gets missed.
sint segSize = (numTuples + (numThreads/32) - 1) / (numThreads/32);
uint* d_segLengths;
//#define PRINT_TIME
#ifdef PRINT_TIME
float time;
hipEvent_t t_start, t_stop;
checkCudaErrors(hipEventCreate(&t_start));
checkCudaErrors(hipEventCreate(&t_stop));
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipEventRecord(t_start));
#endif
// Clear the error flag and address
uint removeDupsError = 0;
uint removeDupsErrorAdr = 0x7FFFFFFF;
checkCudaErrors(hipMemcpyToSymbolAsync(d_removeDupsError, &removeDupsError, sizeof(d_removeDupsError), 0, hipMemcpyHostToDevice, stream));
checkCudaErrors(hipMemcpyToSymbolAsync(d_removeDupsErrorAdr, &removeDupsErrorAdr, sizeof(d_removeDupsError), 0, hipMemcpyHostToDevice, stream));
#pragma omp critical (launchLock)
{
setDevice();
checkCudaErrors(hipMalloc((void **)&d_segLengths, numThreads/32 * sizeof(uint)));
hipLaunchKernelGGL(( cuRemoveDups), dim3(numBlocks), dim3(numThrdPerBlk), 0, 0, coords, reftmp, valtmp, refin, valin, otherCoord, otherRef,
p, dim, segSize, d_segLengths, numTuples);
}
checkCudaErrors(hipGetLastError());
#pragma omp critical (launchLock)
{
setDevice();
hipLaunchKernelGGL(( cuRemoveGaps), dim3(numBlocks), dim3(numThrdPerBlk), 0, 0, ref, val, reftmp, valtmp, segSize, d_segLengths, numTuples);
checkCudaErrors(hipGetLastError());
}
#ifdef PRINT_TIME
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipEventRecord(t_stop));
checkCudaErrors(hipEventSynchronize(t_stop));
checkCudaErrors(hipEventElapsedTime(&time, t_start, t_stop));
printf ("removeDups took %f seconds\n",time/1000.0);
checkCudaErrors(hipEventDestroy(t_start));
checkCudaErrors(hipEventDestroy(t_stop));
#endif
// Check to see if any sort errors were detected
checkCudaErrors(hipMemcpyFromSymbolAsync(&removeDupsError, d_removeDupsError, sizeof(d_removeDupsError), 0, hipMemcpyDeviceToHost, stream));
if (removeDupsError != 0) {
cout << "Remove Duplicates found a sorting error on dimension " << p << endl;
checkCudaErrors(hipMemcpyFromSymbolAsync(&removeDupsErrorAdr, d_removeDupsErrorAdr, sizeof(d_removeDupsErrorAdr), 0, hipMemcpyDeviceToHost, stream));
cout << "at address " << removeDupsErrorAdr << endl;
return removeDupsError;
}
// If not return the resulting count.
uint removeDupsCount;
checkCudaErrors(hipMemcpyFromSymbolAsync(&removeDupsCount, d_removeDupsCount, sizeof(d_removeDupsCount), 0, hipMemcpyDeviceToHost, stream));
return removeDupsCount;
}
| a4ff63d67a7549f56c32fec42ef241c39b54db16.cu | //
// removeDups.cu
//
// Created by John Robinson on 7/15/15.
// Copyright (c) 2015 John Robinson. All rights reserved.
/*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSEARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
//
/*
* The duplicate removal algorithm uses an approach based on the following
* "Efficient Stream Compaction on Wide SIMD Many-Core Architectures"
* by Markus Billeter, Ola Olsson, Ulf Assarsson
* http://www.cse.chalmers.se/~uffe/streamcompaction.pdf
*/
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <iomanip>
using std::setprecision;
using namespace std;
#include <assert.h>
#include <helper_cuda.h>
#include <sm_30_intrinsics.h>
#include "Gpu.h"
#include "removeDups_common.h"
__device__ sint superKeyCompareFirstDimSmplA(const KdCoord ap, const KdCoord bp, const KdCoord *a, const KdCoord *b, const sint p, const sint dim)
{
sint diff = ap - bp;
for (sint i = 1; diff == 0 && i < dim; i++) {
sint r = i + p;
r = (r < dim) ? r : r - dim;
diff = a[r] - b[r];
}
return diff;
}
/*
* Check the validity of the merge sort and remove duplicates from a reference array.
*
* calling parameters:
*
* reference - a vector<int*> that represents one of the reference arrays
* i - the leading dimension for the super key
* dim - the number of dimensions
*
* returns: the end index of the reference array following removal of duplicate elements
*/
__device__ void cuWarpCopyRefVal(refIdx_t refout[], KdCoord valout[], refIdx_t refin[], KdCoord valin[],
sint segSize, const sint numTuples) {
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
uint thrdIdx = (pos & (warpSize-1));
for (sint j = 0; j+thrdIdx < segSize; j += warpSize){
valout[j+thrdIdx] = valin[j+thrdIdx];
refout[j+thrdIdx] = refin[j+thrdIdx];
}
}
__device__ uint d_removeDupsCount; // This is where number of tuples after the dups are removed is returned.
__device__ sint d_removeDupsError; // This is where an error is indicated.
__device__ sint d_removeDupsErrorAdr; // This is where number of tuples after the dups are removed is returned.
__global__ void cuRemoveGaps(refIdx_t refoutx[], KdCoord valoutx[], refIdx_t refinx[], KdCoord valinx[],
uint segSizex, uint segLengths[], const sint numTuples) {
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
uint thrdIdx = (pos & (warpSize-1));
uint warpIndex = ((pos - thrdIdx)/warpSize);
uint segStartOut = 0;
uint segStartIn = warpIndex * segSizex;
uint segSize = 0;
// Do the simple slow implementation first
// Get the seg start and seg size from the segLentghs array written by the
if (thrdIdx == 0) {
for (uint i = 0; i<warpIndex; i++)
segStartOut += segLengths[i];
segSize = segLengths[warpIndex];
}
// Copy to the other threads in the warp.
segStartOut = __shfl(segStartOut, 0);
segSize = __shfl(segSize, 0);
// and do the copy.
cuWarpCopyRefVal(refoutx+segStartOut, valoutx+segStartOut, refinx+segStartIn, valinx+segStartIn, segSize, numTuples);
// if this warp is processing the last segment, store the final size
if (thrdIdx == 0 && ((segStartIn + segSizex) >= numTuples))
d_removeDupsCount = segStartOut + segLengths[warpIndex];
}
__global__ void cuCopyRefVal(refIdx_t refoutx[], KdCoord valoutx[], refIdx_t refinx[], KdCoord valinx[],
sint segSizex, const sint numTuples) {
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
uint thrdIdx = (pos & (warpSize-1));
// uint warpsPerBlock = (SHARED_SIZE_LIMIT/(2*warpSize));
uint warpIndex = ((pos - thrdIdx)/warpSize);
uint segSize;
// Calculate the base addrs of the global memory input and output arrays.
uint segStart = warpIndex * segSizex;
if (segStart + segSizex > numTuples) {
segSize = numTuples - segStart;
} else segSize = segSizex;
cuWarpCopyRefVal(refoutx + segStart, valoutx + segStart, refinx + segStart, valinx + segStart, segSize, numTuples);
}
__global__ void cuRemoveDups(KdCoord coords[], refIdx_t refoutx[], KdCoord valoutx[], refIdx_t refinx[], KdCoord valinx[],
KdCoord otherCoords[], refIdx_t *otherRef,
const int p, const int dim, uint segSizex, uint segLengths[], const sint numTuples)
{
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
uint thrdIdx = (pos & (warpSize-1));
uint warpsPerBlock = (SHARED_SIZE_LIMIT/(2*warpSize));
uint warpIndex = ((pos - thrdIdx)/warpSize);
KdCoord val;
uint ref;
uint segSize;
// Calculate the base addrs of the global memory input and output arrays.
uint segStart = warpIndex * segSizex;
if (segStart + segSizex > numTuples) {
segSize = numTuples - segStart;
} else segSize = segSizex;
refIdx_t* refin = refinx + segStart;
KdCoord* valin = valinx + segStart;
refIdx_t* refout = refoutx + segStart;
KdCoord* valout = valoutx + segStart;
// Allocate the shared memory that will be used for coalescing of writes.
__shared__ KdCoord s_val[SHARED_SIZE_LIMIT];
__shared__ refIdx_t s_ref[SHARED_SIZE_LIMIT];
uint outCnt = 0;
uint oldOutCnt;
// Calculate the base index for this warp in the shared memory array
// SHARED_SIZE_LIMIT/(2*warpSize) is the number of warps per block
// So the warp in block index is the mod of warpIndex by the num warps in block.
uint sharedBase = 2 * warpSize * (warpIndex % warpsPerBlock);
uint sharedAddrMask = (2*warpSize)-1;
sint cmp = 0;
uint maskGEme = ((1 << thrdIdx) - 1);
uint shflMask = 0;
// First handle the special conditions for the initial 32 words
// This needs be a loop to handle the case where the first warps worth of data are all equal.
sint j;
for (j = 0; j < segSize && shflMask == 0; j += warpSize){
if (thrdIdx < segSize) { // only read and compare less than segsize
s_val[sharedBase + thrdIdx] = val = valin[thrdIdx];
s_ref[sharedBase + thrdIdx] = ref = refin[thrdIdx];
if (thrdIdx !=0 ) { // If not the first thread, do a normal compare with shared memory
cmp = superKeyCompareFirstDimSmplA(val, s_val[sharedBase + thrdIdx - 1], coords+ref*dim, coords+s_ref[sharedBase + thrdIdx - 1]*dim,
p, dim);
} else if (warpIndex != 0) { // If first tread but not the first segment, compare with last value of previous segment.
cmp = superKeyCompareFirstDimSmplA(val, *(valin-1), coords+ref*dim, coords+(*(refin-1))*dim,
p, dim);
} else if (otherCoords != NULL) { // First thread of first segment of second GPU needs to compare itself with highest word of the other GPU.
cmp = superKeyCompareFirstDimSmplA(val, *(otherCoords+(*otherRef)*dim), coords+ref*dim, otherCoords+(*otherRef)*dim,
p, dim);
} else { // This handles the case of the very first data word.
cmp = 1; // Indicate the first value is greater so that it is always included
}
} else {
cmp = 0; // Use cmp == 0 in this case to exclude data outside the range
}
// First check for compare failure which is earlier value is gt current
if (cmp<0) {
d_removeDupsError = -1;
atomicMin(&d_removeDupsErrorAdr, (valin - valinx) + thrdIdx);
}
valin += warpSize;
refin += warpSize;
// Check for duplicates, a 1 in the shflMask indicates that this tread is not a dup so keep it.
shflMask = __ballot(cmp>0);
if (cmp > 0) {
// Calculate the address which is determined by the number of non-dups less than this thread.
uint wrtIdx = __popc(shflMask & maskGEme);
s_val[sharedBase + ((outCnt + wrtIdx) & sharedAddrMask)] = val;
s_ref[sharedBase + ((outCnt + wrtIdx) & sharedAddrMask)] = ref;
}
}
// Update the output counter but keep an old value so it's known where to write the output.
oldOutCnt = outCnt;
outCnt += __popc(shflMask);
// If the first read filled the buffer than write it out.
if (((oldOutCnt ^ outCnt) & warpSize) != 0) {
valout[(oldOutCnt & ~(warpSize-1)) + thrdIdx] = s_val[sharedBase + (oldOutCnt & warpSize) + thrdIdx];
refout[(oldOutCnt & ~(warpSize-1)) + thrdIdx] = s_ref[sharedBase + (oldOutCnt & warpSize) + thrdIdx];
}
// OK, first iteration is all done, Now start the deterministic
for (; j < segSize; j += warpSize){
if (j+thrdIdx < segSize) {
s_val[sharedBase + ((outCnt + thrdIdx) & sharedAddrMask)] = val = valin[thrdIdx];
s_ref[sharedBase + ((outCnt + thrdIdx) & sharedAddrMask)] = ref = refin[thrdIdx];
// Do the compare
cmp = superKeyCompareFirstDimSmplA(val, s_val[sharedBase + ((outCnt + thrdIdx - 1) & sharedAddrMask)],
coords+ref*dim, coords+s_ref[sharedBase + ((outCnt + thrdIdx - 1) & sharedAddrMask)]*dim,
p, dim);
} else {
cmp = 0;
}
// First check for compare failure which is earlier value is gt current
if (cmp<0) {
d_removeDupsError = -1;
atomicMin(&d_removeDupsErrorAdr, (valin - valinx) + thrdIdx);
}
valin += warpSize;
refin += warpSize;
// Check for duplicates, a 1 in the shflMask indicates that this tread is not a dup so keep it.
shflMask = __ballot(cmp>0);
if (cmp > 0) {
// Calculate the address which is determined by the number of non dups less than this thread.
uint wrtIdx = __popc(shflMask & maskGEme);
s_val[sharedBase + ((outCnt + wrtIdx) & sharedAddrMask)] = val;
s_ref[sharedBase + ((outCnt + wrtIdx) & sharedAddrMask)] = ref;
}
// Update the output counter but keep an old value so it's known where to write the output.
oldOutCnt = outCnt;
outCnt += __popc(shflMask);
// If the write spilled into the other buffer in shared memory write buffer indicated by old count.
if (((oldOutCnt ^ outCnt) & warpSize) != 0) {
valout[(oldOutCnt & ~(warpSize-1)) + thrdIdx] = s_val[sharedBase + (oldOutCnt & warpSize) + thrdIdx];
refout[(oldOutCnt & ~(warpSize-1)) + thrdIdx] = s_ref[sharedBase + (oldOutCnt & warpSize) + thrdIdx];
}
}
// Write out the final buffer
if ((outCnt & (warpSize-1)) > thrdIdx) {
valout[(outCnt & ~(warpSize-1)) + thrdIdx] = s_val[sharedBase + (outCnt & warpSize) + thrdIdx];
refout[(outCnt & ~(warpSize-1)) + thrdIdx] = s_ref[sharedBase + (outCnt & warpSize) + thrdIdx];
}
// And finally store the number of writes that were done by this warp
if (thrdIdx == 0 && segLengths != NULL) segLengths[warpIndex] = outCnt;
}
uint Gpu::copyRefVal(KdCoord valout[], refIdx_t refout[], KdCoord valin[], refIdx_t refin[], uint numTuples, sint numThreads) {
sint numBlocks;
sint numThrdPerBlk;
// This section just allows for single block execution for easier debug.
if (numThreads >= SHARED_SIZE_LIMIT/2) {
numBlocks = numThreads/(SHARED_SIZE_LIMIT/2);
numThrdPerBlk = SHARED_SIZE_LIMIT/2;
} else {
numBlocks = 1;
numThrdPerBlk = numThreads;
}
sint segSize = (numTuples + (numThreads/32) - 1) / (numThreads/32);
#pragma omp critical (launchLock)
{
setDevice();
cuCopyRefVal<<<numBlocks, numThrdPerBlk, 0, stream>>>(refout, valout, refin, valin, segSize, numTuples);
checkCudaErrors(cudaGetLastError());
}
return 0;
}
uint Gpu::removeDups(KdCoord coords[], KdCoord val[], refIdx_t ref[], KdCoord valtmp[], refIdx_t reftmp[],
KdCoord valin[], refIdx_t refin[], KdCoord otherCoord[], refIdx_t *otherRef,
const sint p, const sint dim, const sint numTuples, sint numThreads) {
sint numBlocks;
sint numThrdPerBlk;
// This section just allows for single block execution for easier debug.
if (numThreads >= SHARED_SIZE_LIMIT/2) {
numBlocks = numThreads/(SHARED_SIZE_LIMIT/2);
numThrdPerBlk = SHARED_SIZE_LIMIT/2;
} else {
numBlocks = 1;
numThrdPerBlk = numThreads;
}
// Make sure the segmentSize * segments is gt than numTuples so that nothing gets missed.
sint segSize = (numTuples + (numThreads/32) - 1) / (numThreads/32);
uint* d_segLengths;
//#define PRINT_TIME
#ifdef PRINT_TIME
float time;
cudaEvent_t t_start, t_stop;
checkCudaErrors(cudaEventCreate(&t_start));
checkCudaErrors(cudaEventCreate(&t_stop));
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaEventRecord(t_start));
#endif
// Clear the error flag and address
uint removeDupsError = 0;
uint removeDupsErrorAdr = 0x7FFFFFFF;
checkCudaErrors(cudaMemcpyToSymbolAsync(d_removeDupsError, &removeDupsError, sizeof(d_removeDupsError), 0, cudaMemcpyHostToDevice, stream));
checkCudaErrors(cudaMemcpyToSymbolAsync(d_removeDupsErrorAdr, &removeDupsErrorAdr, sizeof(d_removeDupsError), 0, cudaMemcpyHostToDevice, stream));
#pragma omp critical (launchLock)
{
setDevice();
checkCudaErrors(cudaMalloc((void **)&d_segLengths, numThreads/32 * sizeof(uint)));
cuRemoveDups<<<numBlocks, numThrdPerBlk>>>(coords, reftmp, valtmp, refin, valin, otherCoord, otherRef,
p, dim, segSize, d_segLengths, numTuples);
}
checkCudaErrors(cudaGetLastError());
#pragma omp critical (launchLock)
{
setDevice();
cuRemoveGaps<<<numBlocks, numThrdPerBlk>>>(ref, val, reftmp, valtmp, segSize, d_segLengths, numTuples);
checkCudaErrors(cudaGetLastError());
}
#ifdef PRINT_TIME
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaEventRecord(t_stop));
checkCudaErrors(cudaEventSynchronize(t_stop));
checkCudaErrors(cudaEventElapsedTime(&time, t_start, t_stop));
printf ("removeDups took %f seconds\n",time/1000.0);
checkCudaErrors(cudaEventDestroy(t_start));
checkCudaErrors(cudaEventDestroy(t_stop));
#endif
// Check to see if any sort errors were detected
checkCudaErrors(cudaMemcpyFromSymbolAsync(&removeDupsError, d_removeDupsError, sizeof(d_removeDupsError), 0, cudaMemcpyDeviceToHost, stream));
if (removeDupsError != 0) {
cout << "Remove Duplicates found a sorting error on dimension " << p << endl;
checkCudaErrors(cudaMemcpyFromSymbolAsync(&removeDupsErrorAdr, d_removeDupsErrorAdr, sizeof(d_removeDupsErrorAdr), 0, cudaMemcpyDeviceToHost, stream));
cout << "at address " << removeDupsErrorAdr << endl;
return removeDupsError;
}
// If not return the resulting count.
uint removeDupsCount;
checkCudaErrors(cudaMemcpyFromSymbolAsync(&removeDupsCount, d_removeDupsCount, sizeof(d_removeDupsCount), 0, cudaMemcpyDeviceToHost, stream));
return removeDupsCount;
}
|
4324072c8902a35a6f50023532f59ae3c8cd4e75.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
// @author Yurii Shyrma ([email protected])
//
#include <exceptions/cuda_exception.h>
#include <execution/LaunchContext.h>
#include <helpers/DebugHelper.h>
#include <loops/legacy_ops.h>
#include <loops/reduce_long.h>
#include <loops/scalar.h>
#include <system/op_boilerplate.h>
#include <types/types.h>
using namespace simdOps;
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z, typename OpType>
SD_KERNEL void simpleReduce(const void *x, const sd::LongType *outerXTadShapeInfo,
const sd::LongType *innerXTadShapeInfo, void *extraParams, void *vreductionBuffer, void *z,
const sd::LongType *zShapeInfo) {
functions::reduce::ReduceLongFunction<X, Z>::template transformCudaXD<OpType>(
x, outerXTadShapeInfo, innerXTadShapeInfo, extraParams, vreductionBuffer, z, zShapeInfo);
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z, typename OpType>
SD_DEVICE void reduceScalarGeneric(const void *x, const sd::LongType *xShapeInfo, void *extraParams, void *z,
const sd::LongType *zShapeInfo, int *dimension, int dimensionLength,
void *reductionBuffer, const sd::LongType *tadOnlyShapeInfo) {
functions::reduce::ReduceLongFunction<X, Z>::template execScalarCuda<OpType>(
x, xShapeInfo, extraParams, z, zShapeInfo, reductionBuffer, tadOnlyShapeInfo);
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z, typename OpType>
SD_KERNEL void simpleScalar(const void *x, const sd::LongType *xShapeInfo, void *extraParams, void *z,
const sd::LongType *zShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer,
const sd::LongType *tadOnlyShapeInfo) {
reduceScalarGeneric<X, Z, OpType>(x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength,
reductionBuffer, tadOnlyShapeInfo);
}
namespace functions {
namespace reduce {
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
template <typename OpType>
SD_DEVICE void ReduceLongFunction<X, Z>::aggregatePartials(void *vsPartials, sd::LongType tid, sd::LongType numItems,
void *vextraParams) {
// start the shared memory loop on the next power of 2 less
// than the block size. If block size is not a power of 2,
// accumulate the intermediate sums in the remainder range.
auto sPartials = reinterpret_cast<Z *>(vsPartials);
auto extraParams = reinterpret_cast<X *>(vextraParams);
sd::LongType floorPow2 = numItems;
if (floorPow2 & (floorPow2 - 1)) {
while (floorPow2 & (floorPow2 - 1)) floorPow2 &= floorPow2 - 1;
if (tid >= floorPow2)
sPartials[tid - floorPow2] = OpType::update(sPartials[tid - floorPow2], sPartials[tid], extraParams);
__syncthreads();
}
for (sd::LongType activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) {
if (tid < activeThreads && tid + activeThreads < numItems)
sPartials[tid] = OpType::update(sPartials[tid], sPartials[tid + activeThreads], extraParams);
__syncthreads();
}
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
template <typename OpType>
SD_DEVICE void ReduceLongFunction<X, Z>::transformCudaXD(const void *vx, const sd::LongType *outerXTadShapeInfo,
const sd::LongType *innerXTadShapeInfo, void *vextraParams,
void *vreductionBuffer, void *vz,
const sd::LongType *zShapeInfo) {
auto x = reinterpret_cast<const X *>(vx);
auto z = reinterpret_cast<Z *>(vz);
auto extraParams = reinterpret_cast<X *>(vextraParams);
// shared memory space for storing intermediate results
__shared__ Z sPartials[SD_CUDA_BLOCK_SIZE];
__shared__ int tadLen, numTads;
__shared__ bool sameOffsets;
if (threadIdx.x == 0) {
sameOffsets = shape::haveSameShapeAndStrides(zShapeInfo, outerXTadShapeInfo);
tadLen = shape::length(innerXTadShapeInfo);
numTads = shape::length(outerXTadShapeInfo);
}
__syncthreads();
int coords[SD_MAX_RANK];
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
shape::index2coords(r, outerXTadShapeInfo, coords);
const auto outerOffset = shape::getOffset(outerXTadShapeInfo, coords);
const auto zOffset = sameOffsets ? outerOffset : shape::getOffset(zShapeInfo, coords);
const X *xTad = x + outerOffset;
sPartials[threadIdx.x] = OpType::startingValue(xTad);
for (int i = threadIdx.x; i < tadLen; i += blockDim.x)
sPartials[threadIdx.x] =
OpType::update(sPartials[threadIdx.x],
OpType::op(xTad[shape::getIndexOffset(i, innerXTadShapeInfo)], extraParams), extraParams);
__syncthreads();
// aggregate. do NOT reduce for elements > tadLen
aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::sd_min<int>(blockDim.x, tadLen), extraParams);
__syncthreads();
if (threadIdx.x == 0) z[zOffset] = OpType::postProcess(sPartials[threadIdx.x], tadLen, extraParams);
}
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
template <typename OpType>
SD_DEVICE void ReduceLongFunction<X, Z>::execScalarCuda(const void *vx, const sd::LongType *xShapeInfo,
void *vextraParams, void *vz, const sd::LongType *zShapeInfo,
void *vreductionBuffer, const sd::LongType *tadOnlyShapeInfo) {
auto x = reinterpret_cast<const X *>(vx);
auto z = reinterpret_cast<Z *>(vz);
auto extraParams = reinterpret_cast<X *>(vextraParams);
auto reductionBuffer = reinterpret_cast<Z *>(vreductionBuffer);
auto tid = blockDim.x * blockIdx.x + threadIdx.x;
// shared memory space for storing intermediate results
__shared__ Z sPartials[SD_CUDA_BLOCK_SIZE];
__shared__ sd::LongType xEws;
__shared__ sd::LongType len;
if (threadIdx.x == 0) {
xEws = shape::elementWiseStride(xShapeInfo);
len = shape::length(xShapeInfo);
}
__syncthreads();
sPartials[threadIdx.x] = OpType::startingValue(x);
if (xEws > 0)
for (int i = tid; i < len; i += (blockDim.x * gridDim.x))
sPartials[threadIdx.x] =
OpType::update(sPartials[threadIdx.x], OpType::op(x[i * xEws], extraParams), extraParams);
else
for (int i = tid; i < len; i += blockDim.x * gridDim.x)
sPartials[threadIdx.x] = OpType::update(
sPartials[threadIdx.x], OpType::op(x[shape::getIndexOffset(i, xShapeInfo)], extraParams), extraParams);
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::sd_min<int>(blockDim.x, len), extraParams);
__syncthreads();
if (gridDim.x > 1) {
auto tc = reinterpret_cast<unsigned int *>(reductionBuffer);
__shared__ bool amLast;
tid = threadIdx.x;
if (threadIdx.x == 0)
reductionBuffer[blockIdx.x] = sPartials[0]; // this->postProcess(sPartials[0],len,extraParams);
__threadfence();
__syncthreads();
if (threadIdx.x == 0) {
unsigned int ticket = atomicInc(&tc[16384], gridDim.x);
amLast = (ticket == gridDim.x - 1);
}
__syncthreads();
if (amLast) {
tc[16384] = 0;
sPartials[threadIdx.x] = OpType::startingValue(x);
for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x)
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], reductionBuffer[i], extraParams);
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::sd_min<int>(gridDim.x, blockDim.x), extraParams);
__syncthreads();
if (threadIdx.x == 0) {
z[0] = OpType::postProcess(sPartials[0], len, extraParams);
}
}
} else {
if (threadIdx.x == 0) {
auto tc = reinterpret_cast<unsigned int *>(reductionBuffer);
tc[16384] = 0;
z[0] = OpType::postProcess(sPartials[0], len, extraParams);
}
}
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
template <typename OpType>
SD_HOST void ReduceLongFunction<X, Z>::intermediateXD(dim3 launchDims, hipStream_t *stream, const void *x,
const sd::LongType *dXShapeInfo, const sd::LongType *hXShapeInfo,
void *extraParams, void *vreductionBuffer, void *z,
const sd::LongType *dZShapeInfo, const sd::LongType *hZShapeInfo,
const int *dims) {
if (shape::isEmpty(hXShapeInfo)) {
if (shape::isEmpty(hZShapeInfo)) return;
const auto startingVal = static_cast<Z>(OpType::startingValue(reinterpret_cast<const X *>(x)));
auto res = hipMemcpyAsync(sd::LaunchContext::defaultContext()->getScalarPointer(), &startingVal, sizeof(Z),
hipMemcpyHostToDevice, *stream);
if (res != 0)
throw sd::cuda_exception::build("ReduceLongFunction<X,Z>::intermediateXD: failed to copy temporary scalar", res);
auto ptr = sd::LaunchContext::defaultContext()->getScalarPointer();
// scalar assign
functions::scalar::ScalarTransform<Z, Z, Z>::executeCudaShaped(launchDims, stream, 14, z, dZShapeInfo, hXShapeInfo,
z, dZShapeInfo, hZShapeInfo, ptr, nullptr);
} else {
const int zRank = shape::rank(hZShapeInfo);
const int tadRank = shape::rank(hXShapeInfo) - zRank;
auto outerPack = sd::ConstantShapeHelper::getInstance().createSubArrShapeInfo(hXShapeInfo, dims, zRank);
auto innerPack = sd::ConstantShapeHelper::getInstance().createSubArrShapeInfo(hXShapeInfo, dims + zRank, tadRank);
hipLaunchKernelGGL(( simpleReduce<X, Z, OpType>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream,
x, reinterpret_cast<sd::LongType const *>(outerPack->special()),
reinterpret_cast<sd::LongType const *>(innerPack->special()), extraParams, vreductionBuffer, z, dZShapeInfo);
}
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
template <typename OpType>
SD_HOST void ReduceLongFunction<X, Z>::intermediateScalar(dim3 launchDims, hipStream_t *stream, const void *x,
const sd::LongType *xShapeInfo,
const sd::LongType *hXShapeInfo, void *extraParams, void *z,
const sd::LongType *zShapeInfo,
const sd::LongType *hZShapeInfo, int *dimension,
int dimensionLength, void *reductionBuffer,
const sd::LongType *tadOnlyShapeInfo) {
if (shape::isEmpty(hXShapeInfo)) {
if (shape::isEmpty(hZShapeInfo)) return;
const auto startingVal = static_cast<Z>(OpType::startingValue(reinterpret_cast<const X *>(x)));
auto res = hipMemcpyAsync(z, &startingVal, sizeof(Z), hipMemcpyHostToDevice, *stream);
if (res != 0)
throw sd::cuda_exception::build("ReduceLongFunction<X,Z>::intermediateScalar: failed to copy resulting scalar",
res);
} else {
hipLaunchKernelGGL(( simpleScalar<X, Z, OpType>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream,
x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo);
}
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Y>
SD_HOST void ReduceLongFunction<X, Y>::execReduceScalar(dim3 launchDims, hipStream_t *stream, const int opNum,
const void *x, const sd::LongType *xShapeInfo,
const sd::LongType *hXShapeInfo, void *extraParams, void *z,
const sd::LongType *zShapeInfo, const sd::LongType *hZShapeInfo,
int *dimension, int dimensionLength, void *reductionBuffer,
const sd::LongType *tadOnlyShapeInfo) {
DISPATCH_BY_OPNUM_TT(intermediateScalar,
PARAMS(launchDims, stream, x, xShapeInfo, hXShapeInfo, extraParams, z, zShapeInfo, hZShapeInfo,
dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo),
OPS_A(REDUCE_LONG_OPS));
sd::DebugHelper::checkErrorCode(stream, "execReduceScalarFloat(...) failed");
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Y>
SD_HOST void ReduceLongFunction<X, Y>::execReduceXD(dim3 launchDims, hipStream_t *stream, const int opNum,
const void *x, const sd::LongType *dXShapeInfo,
const sd::LongType *hXShapeInfo, void *extraParams,
void *vreductionBuffer, void *z, const sd::LongType *dZShapeInfo,
const sd::LongType *hZShapeInfo, const int *dims) {
if (shape::length(hZShapeInfo) == 1) {
ReduceLongFunction<X, Y>::execReduceScalar(launchDims, stream, opNum, x, dXShapeInfo, hXShapeInfo, extraParams, z,
dZShapeInfo, hZShapeInfo, nullptr, 0, vreductionBuffer, nullptr);
} else {
DISPATCH_BY_OPNUM_TT(intermediateXD,
PARAMS(launchDims, stream, x, dXShapeInfo, hXShapeInfo, extraParams, vreductionBuffer, z,
dZShapeInfo, hZShapeInfo, dims),
OPS_A(REDUCE_LONG_OPS));
}
DEBUG_KERNEL(stream, opNum);
}
////////////////////////////////////////////////////////////////////////
template <typename X>
SD_DEVICE void initializeShared(X *extraParams, X **sPartials, int sMemSize) {
int sPartialsLength = sMemSize / sizeof(X);
X *sPartialsDeref = (X *)*sPartials;
for (int i = 0; i < sPartialsLength; i++) sPartialsDeref[i] = extraParams[0];
}
BUILD_DOUBLE_TEMPLATE(template class ReduceLongFunction, , SD_COMMON_TYPES, SD_LONG_TYPES);
} // namespace reduce
} // namespace functions
| 4324072c8902a35a6f50023532f59ae3c8cd4e75.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
// @author Yurii Shyrma ([email protected])
//
#include <exceptions/cuda_exception.h>
#include <execution/LaunchContext.h>
#include <helpers/DebugHelper.h>
#include <loops/legacy_ops.h>
#include <loops/reduce_long.h>
#include <loops/scalar.h>
#include <system/op_boilerplate.h>
#include <types/types.h>
using namespace simdOps;
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z, typename OpType>
SD_KERNEL void simpleReduce(const void *x, const sd::LongType *outerXTadShapeInfo,
const sd::LongType *innerXTadShapeInfo, void *extraParams, void *vreductionBuffer, void *z,
const sd::LongType *zShapeInfo) {
functions::reduce::ReduceLongFunction<X, Z>::template transformCudaXD<OpType>(
x, outerXTadShapeInfo, innerXTadShapeInfo, extraParams, vreductionBuffer, z, zShapeInfo);
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z, typename OpType>
SD_DEVICE void reduceScalarGeneric(const void *x, const sd::LongType *xShapeInfo, void *extraParams, void *z,
const sd::LongType *zShapeInfo, int *dimension, int dimensionLength,
void *reductionBuffer, const sd::LongType *tadOnlyShapeInfo) {
functions::reduce::ReduceLongFunction<X, Z>::template execScalarCuda<OpType>(
x, xShapeInfo, extraParams, z, zShapeInfo, reductionBuffer, tadOnlyShapeInfo);
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z, typename OpType>
SD_KERNEL void simpleScalar(const void *x, const sd::LongType *xShapeInfo, void *extraParams, void *z,
const sd::LongType *zShapeInfo, int *dimension, int dimensionLength, void *reductionBuffer,
const sd::LongType *tadOnlyShapeInfo) {
reduceScalarGeneric<X, Z, OpType>(x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength,
reductionBuffer, tadOnlyShapeInfo);
}
namespace functions {
namespace reduce {
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
template <typename OpType>
SD_DEVICE void ReduceLongFunction<X, Z>::aggregatePartials(void *vsPartials, sd::LongType tid, sd::LongType numItems,
void *vextraParams) {
// start the shared memory loop on the next power of 2 less
// than the block size. If block size is not a power of 2,
// accumulate the intermediate sums in the remainder range.
auto sPartials = reinterpret_cast<Z *>(vsPartials);
auto extraParams = reinterpret_cast<X *>(vextraParams);
sd::LongType floorPow2 = numItems;
if (floorPow2 & (floorPow2 - 1)) {
while (floorPow2 & (floorPow2 - 1)) floorPow2 &= floorPow2 - 1;
if (tid >= floorPow2)
sPartials[tid - floorPow2] = OpType::update(sPartials[tid - floorPow2], sPartials[tid], extraParams);
__syncthreads();
}
for (sd::LongType activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) {
if (tid < activeThreads && tid + activeThreads < numItems)
sPartials[tid] = OpType::update(sPartials[tid], sPartials[tid + activeThreads], extraParams);
__syncthreads();
}
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
template <typename OpType>
SD_DEVICE void ReduceLongFunction<X, Z>::transformCudaXD(const void *vx, const sd::LongType *outerXTadShapeInfo,
const sd::LongType *innerXTadShapeInfo, void *vextraParams,
void *vreductionBuffer, void *vz,
const sd::LongType *zShapeInfo) {
auto x = reinterpret_cast<const X *>(vx);
auto z = reinterpret_cast<Z *>(vz);
auto extraParams = reinterpret_cast<X *>(vextraParams);
// shared memory space for storing intermediate results
__shared__ Z sPartials[SD_CUDA_BLOCK_SIZE];
__shared__ int tadLen, numTads;
__shared__ bool sameOffsets;
if (threadIdx.x == 0) {
sameOffsets = shape::haveSameShapeAndStrides(zShapeInfo, outerXTadShapeInfo);
tadLen = shape::length(innerXTadShapeInfo);
numTads = shape::length(outerXTadShapeInfo);
}
__syncthreads();
int coords[SD_MAX_RANK];
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
shape::index2coords(r, outerXTadShapeInfo, coords);
const auto outerOffset = shape::getOffset(outerXTadShapeInfo, coords);
const auto zOffset = sameOffsets ? outerOffset : shape::getOffset(zShapeInfo, coords);
const X *xTad = x + outerOffset;
sPartials[threadIdx.x] = OpType::startingValue(xTad);
for (int i = threadIdx.x; i < tadLen; i += blockDim.x)
sPartials[threadIdx.x] =
OpType::update(sPartials[threadIdx.x],
OpType::op(xTad[shape::getIndexOffset(i, innerXTadShapeInfo)], extraParams), extraParams);
__syncthreads();
// aggregate. do NOT reduce for elements > tadLen
aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::sd_min<int>(blockDim.x, tadLen), extraParams);
__syncthreads();
if (threadIdx.x == 0) z[zOffset] = OpType::postProcess(sPartials[threadIdx.x], tadLen, extraParams);
}
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
template <typename OpType>
SD_DEVICE void ReduceLongFunction<X, Z>::execScalarCuda(const void *vx, const sd::LongType *xShapeInfo,
void *vextraParams, void *vz, const sd::LongType *zShapeInfo,
void *vreductionBuffer, const sd::LongType *tadOnlyShapeInfo) {
auto x = reinterpret_cast<const X *>(vx);
auto z = reinterpret_cast<Z *>(vz);
auto extraParams = reinterpret_cast<X *>(vextraParams);
auto reductionBuffer = reinterpret_cast<Z *>(vreductionBuffer);
auto tid = blockDim.x * blockIdx.x + threadIdx.x;
// shared memory space for storing intermediate results
__shared__ Z sPartials[SD_CUDA_BLOCK_SIZE];
__shared__ sd::LongType xEws;
__shared__ sd::LongType len;
if (threadIdx.x == 0) {
xEws = shape::elementWiseStride(xShapeInfo);
len = shape::length(xShapeInfo);
}
__syncthreads();
sPartials[threadIdx.x] = OpType::startingValue(x);
if (xEws > 0)
for (int i = tid; i < len; i += (blockDim.x * gridDim.x))
sPartials[threadIdx.x] =
OpType::update(sPartials[threadIdx.x], OpType::op(x[i * xEws], extraParams), extraParams);
else
for (int i = tid; i < len; i += blockDim.x * gridDim.x)
sPartials[threadIdx.x] = OpType::update(
sPartials[threadIdx.x], OpType::op(x[shape::getIndexOffset(i, xShapeInfo)], extraParams), extraParams);
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::sd_min<int>(blockDim.x, len), extraParams);
__syncthreads();
if (gridDim.x > 1) {
auto tc = reinterpret_cast<unsigned int *>(reductionBuffer);
__shared__ bool amLast;
tid = threadIdx.x;
if (threadIdx.x == 0)
reductionBuffer[blockIdx.x] = sPartials[0]; // this->postProcess(sPartials[0],len,extraParams);
__threadfence();
__syncthreads();
if (threadIdx.x == 0) {
unsigned int ticket = atomicInc(&tc[16384], gridDim.x);
amLast = (ticket == gridDim.x - 1);
}
__syncthreads();
if (amLast) {
tc[16384] = 0;
sPartials[threadIdx.x] = OpType::startingValue(x);
for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x)
sPartials[threadIdx.x] = OpType::update(sPartials[threadIdx.x], reductionBuffer[i], extraParams);
__syncthreads();
aggregatePartials<OpType>(sPartials, threadIdx.x, sd::math::sd_min<int>(gridDim.x, blockDim.x), extraParams);
__syncthreads();
if (threadIdx.x == 0) {
z[0] = OpType::postProcess(sPartials[0], len, extraParams);
}
}
} else {
if (threadIdx.x == 0) {
auto tc = reinterpret_cast<unsigned int *>(reductionBuffer);
tc[16384] = 0;
z[0] = OpType::postProcess(sPartials[0], len, extraParams);
}
}
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
template <typename OpType>
SD_HOST void ReduceLongFunction<X, Z>::intermediateXD(dim3 launchDims, cudaStream_t *stream, const void *x,
const sd::LongType *dXShapeInfo, const sd::LongType *hXShapeInfo,
void *extraParams, void *vreductionBuffer, void *z,
const sd::LongType *dZShapeInfo, const sd::LongType *hZShapeInfo,
const int *dims) {
if (shape::isEmpty(hXShapeInfo)) {
if (shape::isEmpty(hZShapeInfo)) return;
const auto startingVal = static_cast<Z>(OpType::startingValue(reinterpret_cast<const X *>(x)));
auto res = cudaMemcpyAsync(sd::LaunchContext::defaultContext()->getScalarPointer(), &startingVal, sizeof(Z),
cudaMemcpyHostToDevice, *stream);
if (res != 0)
throw sd::cuda_exception::build("ReduceLongFunction<X,Z>::intermediateXD: failed to copy temporary scalar", res);
auto ptr = sd::LaunchContext::defaultContext()->getScalarPointer();
// scalar assign
functions::scalar::ScalarTransform<Z, Z, Z>::executeCudaShaped(launchDims, stream, 14, z, dZShapeInfo, hXShapeInfo,
z, dZShapeInfo, hZShapeInfo, ptr, nullptr);
} else {
const int zRank = shape::rank(hZShapeInfo);
const int tadRank = shape::rank(hXShapeInfo) - zRank;
auto outerPack = sd::ConstantShapeHelper::getInstance().createSubArrShapeInfo(hXShapeInfo, dims, zRank);
auto innerPack = sd::ConstantShapeHelper::getInstance().createSubArrShapeInfo(hXShapeInfo, dims + zRank, tadRank);
simpleReduce<X, Z, OpType><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(
x, reinterpret_cast<sd::LongType const *>(outerPack->special()),
reinterpret_cast<sd::LongType const *>(innerPack->special()), extraParams, vreductionBuffer, z, dZShapeInfo);
}
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Z>
template <typename OpType>
SD_HOST void ReduceLongFunction<X, Z>::intermediateScalar(dim3 launchDims, cudaStream_t *stream, const void *x,
const sd::LongType *xShapeInfo,
const sd::LongType *hXShapeInfo, void *extraParams, void *z,
const sd::LongType *zShapeInfo,
const sd::LongType *hZShapeInfo, int *dimension,
int dimensionLength, void *reductionBuffer,
const sd::LongType *tadOnlyShapeInfo) {
if (shape::isEmpty(hXShapeInfo)) {
if (shape::isEmpty(hZShapeInfo)) return;
const auto startingVal = static_cast<Z>(OpType::startingValue(reinterpret_cast<const X *>(x)));
auto res = cudaMemcpyAsync(z, &startingVal, sizeof(Z), cudaMemcpyHostToDevice, *stream);
if (res != 0)
throw sd::cuda_exception::build("ReduceLongFunction<X,Z>::intermediateScalar: failed to copy resulting scalar",
res);
} else {
simpleScalar<X, Z, OpType><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(
x, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo);
}
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Y>
SD_HOST void ReduceLongFunction<X, Y>::execReduceScalar(dim3 launchDims, cudaStream_t *stream, const int opNum,
const void *x, const sd::LongType *xShapeInfo,
const sd::LongType *hXShapeInfo, void *extraParams, void *z,
const sd::LongType *zShapeInfo, const sd::LongType *hZShapeInfo,
int *dimension, int dimensionLength, void *reductionBuffer,
const sd::LongType *tadOnlyShapeInfo) {
DISPATCH_BY_OPNUM_TT(intermediateScalar,
PARAMS(launchDims, stream, x, xShapeInfo, hXShapeInfo, extraParams, z, zShapeInfo, hZShapeInfo,
dimension, dimensionLength, reductionBuffer, tadOnlyShapeInfo),
OPS_A(REDUCE_LONG_OPS));
sd::DebugHelper::checkErrorCode(stream, "execReduceScalarFloat(...) failed");
}
////////////////////////////////////////////////////////////////////////
template <typename X, typename Y>
SD_HOST void ReduceLongFunction<X, Y>::execReduceXD(dim3 launchDims, cudaStream_t *stream, const int opNum,
const void *x, const sd::LongType *dXShapeInfo,
const sd::LongType *hXShapeInfo, void *extraParams,
void *vreductionBuffer, void *z, const sd::LongType *dZShapeInfo,
const sd::LongType *hZShapeInfo, const int *dims) {
if (shape::length(hZShapeInfo) == 1) {
ReduceLongFunction<X, Y>::execReduceScalar(launchDims, stream, opNum, x, dXShapeInfo, hXShapeInfo, extraParams, z,
dZShapeInfo, hZShapeInfo, nullptr, 0, vreductionBuffer, nullptr);
} else {
DISPATCH_BY_OPNUM_TT(intermediateXD,
PARAMS(launchDims, stream, x, dXShapeInfo, hXShapeInfo, extraParams, vreductionBuffer, z,
dZShapeInfo, hZShapeInfo, dims),
OPS_A(REDUCE_LONG_OPS));
}
DEBUG_KERNEL(stream, opNum);
}
////////////////////////////////////////////////////////////////////////
template <typename X>
SD_DEVICE void initializeShared(X *extraParams, X **sPartials, int sMemSize) {
int sPartialsLength = sMemSize / sizeof(X);
X *sPartialsDeref = (X *)*sPartials;
for (int i = 0; i < sPartialsLength; i++) sPartialsDeref[i] = extraParams[0];
}
BUILD_DOUBLE_TEMPLATE(template class ReduceLongFunction, , SD_COMMON_TYPES, SD_LONG_TYPES);
} // namespace reduce
} // namespace functions
|
cb7fa4f865d7539d95c29acd047ca2153958c35a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <math.h>
#include <iostream>
const int N = 10000;
const int S = 12;
const int blocksize = 1024;
__global__ void shift_r(unsigned int *a, unsigned int *b, unsigned int n, int s) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
b[i] = a[(n+(i - s)) % n];
}
}
int main() {
thrust::host_vector<unsigned int> a_tab;
thrust::device_vector<unsigned int> ad_tab;
thrust::host_vector<unsigned int> b_tab;
thrust::device_vector<unsigned int> bd_tab;
for (int i = 1; i <= N; i++) {
a_tab.push_back(i);
b_tab.push_back(0);
}
ad_tab = a_tab;
bd_tab = b_tab;
dim3 dimBlock(blocksize);
unsigned int g = ceil((float)N / (float)blocksize);
dim3 dimGrid(g);
shift_r << < dimGrid, dimBlock >> >(ad_tab.data().get(), bd_tab.data().get(), N, S);
b_tab = bd_tab;
for (int i = 0; i < N; i++) {
std::cout << a_tab[i] << " : " << b_tab[i] << "\n";
}
return 0;
} | cb7fa4f865d7539d95c29acd047ca2153958c35a.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <math.h>
#include <iostream>
const int N = 10000;
const int S = 12;
const int blocksize = 1024;
__global__ void shift_r(unsigned int *a, unsigned int *b, unsigned int n, int s) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
b[i] = a[(n+(i - s)) % n];
}
}
int main() {
thrust::host_vector<unsigned int> a_tab;
thrust::device_vector<unsigned int> ad_tab;
thrust::host_vector<unsigned int> b_tab;
thrust::device_vector<unsigned int> bd_tab;
for (int i = 1; i <= N; i++) {
a_tab.push_back(i);
b_tab.push_back(0);
}
ad_tab = a_tab;
bd_tab = b_tab;
dim3 dimBlock(blocksize);
unsigned int g = ceil((float)N / (float)blocksize);
dim3 dimGrid(g);
shift_r << < dimGrid, dimBlock >> >(ad_tab.data().get(), bd_tab.data().get(), N, S);
b_tab = bd_tab;
for (int i = 0; i < N; i++) {
std::cout << a_tab[i] << " : " << b_tab[i] << "\n";
}
return 0;
} |
46a3b4bccd2adfd1126d77bfb7521c22f23e9c9f.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Dispatch.h>
#include <ATen/CUDAGeneratorImpl.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip/DistributionTemplates.h>
#include <ATen/native/Distributions.h>
#include <ATen/native/TensorIterator.h>
namespace at { namespace native {
void uniform_kernel(TensorIterator& iter, double from, double to, Generator gen) {
auto generator = get_generator_or_default<CUDAGeneratorImpl>(gen, cuda::detail::getDefaultCUDAGenerator());
templates::cuda::uniform_kernel(iter, from, to, generator);
}
REGISTER_DISPATCH(uniform_stub, &uniform_kernel);
}} // namespace at::native
| 46a3b4bccd2adfd1126d77bfb7521c22f23e9c9f.cu | #include <ATen/Dispatch.h>
#include <ATen/CUDAGeneratorImpl.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/DistributionTemplates.h>
#include <ATen/native/Distributions.h>
#include <ATen/native/TensorIterator.h>
namespace at { namespace native {
void uniform_kernel(TensorIterator& iter, double from, double to, Generator gen) {
auto generator = get_generator_or_default<CUDAGeneratorImpl>(gen, cuda::detail::getDefaultCUDAGenerator());
templates::cuda::uniform_kernel(iter, from, to, generator);
}
REGISTER_DISPATCH(uniform_stub, &uniform_kernel);
}} // namespace at::native
|
8059f302abf0ea39bcc193d42c7860152b236ffb.hip | // !!! This is a file automatically generated by hipify!!!
#include <wb.h>
int main(int argc, char **argv) {
int deviceCount;
wbArg_read(argc, argv);
hipGetDeviceCount(&deviceCount);
wbTime_start(GPU, "Getting GPU Data."); //@@ start a timer
for (int dev = 0; dev < deviceCount; dev++) {
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
if (dev == 0) {
if (deviceProp.major == 9999 && deviceProp.minor == 9999) {
wbLog(TRACE, "No CUDA GPU has been detected");
return -1;
} else if (deviceCount == 1) {
//@@ WbLog is a provided logging API (similar to Log4J).
//@@ The logging function wbLog takes a level which is either
//@@ OFF, FATAL, ERROR, WARN, INFO, DEBUG, or TRACE and a
//@@ message to be printed.
wbLog(TRACE, "There is 1 device supporting CUDA");
} else {
wbLog(TRACE, "There are ", deviceCount,
" devices supporting CUDA");
}
}
wbLog(TRACE, "Device ", dev, " name: ", deviceProp.name);
wbLog(TRACE, " Computational Capabilities: ", deviceProp.major, ".",
deviceProp.minor);
wbLog(TRACE, " Maximum global memory size: ",
deviceProp.totalGlobalMem);
wbLog(TRACE, " Maximum constant memory size: ",
deviceProp.totalConstMem);
wbLog(TRACE, " Maximum shared memory size per block: ",
deviceProp.sharedMemPerBlock);
wbLog(TRACE, " Maximum block dimensions: ",
deviceProp.maxThreadsDim[0], " x ", deviceProp.maxThreadsDim[1],
" x ", deviceProp.maxThreadsDim[2]);
wbLog(TRACE, " Maximum grid dimensions: ", deviceProp.maxGridSize[0],
" x ", deviceProp.maxGridSize[1], " x ",
deviceProp.maxGridSize[2]);
wbLog(TRACE, " Warp size: ", deviceProp.warpSize);
}
wbTime_stop(GPU, "Getting GPU Data."); //@@ stop the timer
return 0;
}
| 8059f302abf0ea39bcc193d42c7860152b236ffb.cu | #include <wb.h>
int main(int argc, char **argv) {
int deviceCount;
wbArg_read(argc, argv);
cudaGetDeviceCount(&deviceCount);
wbTime_start(GPU, "Getting GPU Data."); //@@ start a timer
for (int dev = 0; dev < deviceCount; dev++) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
if (dev == 0) {
if (deviceProp.major == 9999 && deviceProp.minor == 9999) {
wbLog(TRACE, "No CUDA GPU has been detected");
return -1;
} else if (deviceCount == 1) {
//@@ WbLog is a provided logging API (similar to Log4J).
//@@ The logging function wbLog takes a level which is either
//@@ OFF, FATAL, ERROR, WARN, INFO, DEBUG, or TRACE and a
//@@ message to be printed.
wbLog(TRACE, "There is 1 device supporting CUDA");
} else {
wbLog(TRACE, "There are ", deviceCount,
" devices supporting CUDA");
}
}
wbLog(TRACE, "Device ", dev, " name: ", deviceProp.name);
wbLog(TRACE, " Computational Capabilities: ", deviceProp.major, ".",
deviceProp.minor);
wbLog(TRACE, " Maximum global memory size: ",
deviceProp.totalGlobalMem);
wbLog(TRACE, " Maximum constant memory size: ",
deviceProp.totalConstMem);
wbLog(TRACE, " Maximum shared memory size per block: ",
deviceProp.sharedMemPerBlock);
wbLog(TRACE, " Maximum block dimensions: ",
deviceProp.maxThreadsDim[0], " x ", deviceProp.maxThreadsDim[1],
" x ", deviceProp.maxThreadsDim[2]);
wbLog(TRACE, " Maximum grid dimensions: ", deviceProp.maxGridSize[0],
" x ", deviceProp.maxGridSize[1], " x ",
deviceProp.maxGridSize[2]);
wbLog(TRACE, " Warp size: ", deviceProp.warpSize);
}
wbTime_stop(GPU, "Getting GPU Data."); //@@ stop the timer
return 0;
}
|
ef7ff0f54b6e52b44528506b5745ed6a62b83380.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cublas_extensions.hpp"
#include <gauxc/util/cublas_util.hpp>
#include <gauxc/util/div_ceil.hpp>
#include <gauxc/exceptions/cublas_exception.hpp>
#include "cuda_device_properties.hpp"
namespace GauXC {
namespace cuda {
namespace blas {
using namespace GauXC::cuda;
template <typename T>
__global__ void increment_kernel( const T* X, T* Y ) {
const auto tid = blockIdx.x;
if( tid < 1 ) (*Y) += (*X);
}
template <typename T>
void increment( const T* X, T* Y, hipStream_t stream ) {
hipLaunchKernelGGL(( increment_kernel), dim3(1),dim3(1),0,stream, X,Y);
}
template <>
void dot( hipblasHandle_t handle,
int N,
const double* X,
int INCX,
const double* Y,
int INCY,
double* RES ) {
auto stat = hipblasDdot( handle, N, X, INCX, Y, INCY, RES );
GAUXC_CUBLAS_ERROR("CUBLAS DDOT FAILED", stat );
}
template <typename T>
void gdot( hipblasHandle_t handle,
int N,
const T* X,
int INCX,
const T* Y,
int INCY,
T* SCR,
T* RES ) {
dot( handle, N, X, INCX, Y, INCY, SCR );
auto stream = util::get_stream(handle);
increment( SCR, RES, stream );
}
template
void gdot( hipblasHandle_t handle,
int N,
const double* X,
int INCX,
const double* Y,
int INCY,
double* SCR,
double* RES );
template <typename T>
void __global__ hadamard_product_kernel( int M,
int N,
const T* A,
int LDA,
T* B,
int LDB ) {
const int tid_x = blockIdx.x * blockDim.x + threadIdx.x;
const int tid_y = blockIdx.y * blockDim.y + threadIdx.y;
if( tid_x < M and tid_y < N ) {
B[ tid_x + tid_y*LDB ] *= A[ tid_x + tid_y*LDA ];
}
}
template <typename T>
void hadamard_product( hipblasHandle_t handle,
int M,
int N,
const T* A,
int LDA,
T* B,
int LDB ) {
auto stream = util::get_stream(handle);
dim3 threads(warp_size, max_warps_per_thread_block);
dim3 blocks( util::div_ceil( M, threads.x ),
util::div_ceil( N, threads.y ) );
hipLaunchKernelGGL(( hadamard_product_kernel), dim3(blocks), dim3(threads), 0, stream , M, N, A, LDA, B, LDB );
}
template
void hadamard_product( hipblasHandle_t handle,
int M,
int N,
const double* A,
int LDA,
double* B,
int LDB );
template <>
void gemm( hipblasHandle_t handle,
hipblasOperation_t TA, hipblasOperation_t TB,
int M, int N, int K, double ALPHA,
const double* A, int LDA, const double* B, int LDB,
double BETA, double* C, int LDC ) {
auto stat = hipblasDgemm( handle, TA, TB, M, N, K, &ALPHA, A, LDA,
B, LDB, &BETA, C, LDC );
GAUXC_CUBLAS_ERROR("CUBLAS DGEMM FAILED", stat);
}
template <>
void syr2k( hipblasHandle_t handle,
hipblasFillMode_t UPLO, hipblasOperation_t Trans,
int M, int K, double ALPHA,
const double* A, int LDA, const double* B, int LDB,
double BETA, double* C, int LDC ) {
auto stat = hipblasDsyr2k( handle, UPLO, Trans, M, K, &ALPHA, A, LDA, B, LDB,
&BETA, C, LDC );
GAUXC_CUBLAS_ERROR("CUBLAS DSYR2K FAILED", stat);
}
}
}
}
| ef7ff0f54b6e52b44528506b5745ed6a62b83380.cu | #include "cublas_extensions.hpp"
#include <gauxc/util/cublas_util.hpp>
#include <gauxc/util/div_ceil.hpp>
#include <gauxc/exceptions/cublas_exception.hpp>
#include "cuda_device_properties.hpp"
namespace GauXC {
namespace cuda {
namespace blas {
using namespace GauXC::cuda;
template <typename T>
__global__ void increment_kernel( const T* X, T* Y ) {
const auto tid = blockIdx.x;
if( tid < 1 ) (*Y) += (*X);
}
template <typename T>
void increment( const T* X, T* Y, cudaStream_t stream ) {
increment_kernel<<<1,1,0,stream>>>(X,Y);
}
template <>
void dot( cublasHandle_t handle,
int N,
const double* X,
int INCX,
const double* Y,
int INCY,
double* RES ) {
auto stat = cublasDdot( handle, N, X, INCX, Y, INCY, RES );
GAUXC_CUBLAS_ERROR("CUBLAS DDOT FAILED", stat );
}
template <typename T>
void gdot( cublasHandle_t handle,
int N,
const T* X,
int INCX,
const T* Y,
int INCY,
T* SCR,
T* RES ) {
dot( handle, N, X, INCX, Y, INCY, SCR );
auto stream = util::get_stream(handle);
increment( SCR, RES, stream );
}
template
void gdot( cublasHandle_t handle,
int N,
const double* X,
int INCX,
const double* Y,
int INCY,
double* SCR,
double* RES );
template <typename T>
void __global__ hadamard_product_kernel( int M,
int N,
const T* A,
int LDA,
T* B,
int LDB ) {
const int tid_x = blockIdx.x * blockDim.x + threadIdx.x;
const int tid_y = blockIdx.y * blockDim.y + threadIdx.y;
if( tid_x < M and tid_y < N ) {
B[ tid_x + tid_y*LDB ] *= A[ tid_x + tid_y*LDA ];
}
}
template <typename T>
void hadamard_product( cublasHandle_t handle,
int M,
int N,
const T* A,
int LDA,
T* B,
int LDB ) {
auto stream = util::get_stream(handle);
dim3 threads(warp_size, max_warps_per_thread_block);
dim3 blocks( util::div_ceil( M, threads.x ),
util::div_ceil( N, threads.y ) );
hadamard_product_kernel<<< blocks, threads, 0, stream >>>( M, N, A, LDA, B, LDB );
}
template
void hadamard_product( cublasHandle_t handle,
int M,
int N,
const double* A,
int LDA,
double* B,
int LDB );
template <>
void gemm( cublasHandle_t handle,
cublasOperation_t TA, cublasOperation_t TB,
int M, int N, int K, double ALPHA,
const double* A, int LDA, const double* B, int LDB,
double BETA, double* C, int LDC ) {
auto stat = cublasDgemm( handle, TA, TB, M, N, K, &ALPHA, A, LDA,
B, LDB, &BETA, C, LDC );
GAUXC_CUBLAS_ERROR("CUBLAS DGEMM FAILED", stat);
}
template <>
void syr2k( cublasHandle_t handle,
cublasFillMode_t UPLO, cublasOperation_t Trans,
int M, int K, double ALPHA,
const double* A, int LDA, const double* B, int LDB,
double BETA, double* C, int LDC ) {
auto stat = cublasDsyr2k( handle, UPLO, Trans, M, K, &ALPHA, A, LDA, B, LDB,
&BETA, C, LDC );
GAUXC_CUBLAS_ERROR("CUBLAS DSYR2K FAILED", stat);
}
}
}
}
|
84f8de0d83863339b6abbabe1cb2f74b91e6833c.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <omp.h>
#include <stdlib.h>
#include <math.h>
#include <array>
#include <vector>
#include <sstream>
#include <fstream>
#include <chrono>
#include <hiprand/hiprand_kernel.h>
#include <limits>
#include <memory>
#include <cstddef>
#include <type_traits>
#include "../include/musket.cuh"
#include "../include/BPP_0.cuh"
#include "Randoms.cpp"
Randoms *randoms;
const int BETA = 1;
const double EVAPORATION = 0.5;
const int TAUMAX = 2;
const int Q = 32;
int itemtypes = 50;
int itemcount = 59;
auto bin_capacity = 1000;
bool PRINT = false;
bool PALMA = true;
struct Copybppitemsquantity_map_index_in_place_array_functor {
Copybppitemsquantity_map_index_in_place_array_functor(
const mkt::DArray<int> &_copy_bpp_items_quantity) :
copy_bpp_items_quantity(_copy_bpp_items_quantity) {
}
~Copybppitemsquantity_map_index_in_place_array_functor() {
}
__device__
auto operator()(int indexx, int valuee) {
return valuee;
}
void init(int device) {
copy_bpp_items_quantity.init(device);
}
size_t get_smem_bytes() {
size_t result = 0;
return result;
}
int itemtypess;
int antss;
mkt::DeviceArray<int> copy_bpp_items_quantity;
};
struct Copybppitemsweight_map_index_in_place_array_functor {
Copybppitemsweight_map_index_in_place_array_functor(
const mkt::DArray<int> &_bpp_items_weight) :
bpp_items_weight(_bpp_items_weight) {
}
~Copybppitemsweight_map_index_in_place_array_functor() {
}
__device__
auto operator()(int indexx, int valuee) {
int new_index = ((indexx) % (antss));
return bpp_items_weight.get_global((new_index))/* TODO: For multiple GPUs*/;
}
void init(int device) {
bpp_items_weight.init(device);
}
size_t get_smem_bytes() {
size_t result = 0;
return result;
}
int antss;
mkt::DeviceArray<int> bpp_items_weight;
};
struct Packing_kernel_map_index_in_place_array_functor {
Packing_kernel_map_index_in_place_array_functor(
const mkt::DArray<int> &_d_bins,
const mkt::DArray<int> &_copy_bpp_items_quantity,
const mkt::DArray<double> &_d_eta,
const mkt::DArray<double> &_d_tau,
const mkt::DArray<double> &_d_probabilities,
const mkt::DArray<int> &_copy_bpp_items_weight,
const mkt::DArray<double> &_d_phero,
hiprandState_t *_d_rand_states_ind) :
d_bins(_d_bins), copy_bpp_items_quantity(_copy_bpp_items_quantity), d_eta(
_d_eta), d_tau(_d_tau), d_probabilities(_d_probabilities), copy_bpp_items_weight(
_copy_bpp_items_weight), d_phero(_d_phero), d_rand_states_ind(
_d_rand_states_ind) {
}
~Packing_kernel_map_index_in_place_array_functor() {
}
__device__
auto operator()(int iindex, int y) {
int ant_index = (iindex);
int object_bin_index = ((ant_index) * (itemtypess));
int bins_used = 0;
int actual_bin_weight = 0;
int n_items_in_actual_bin = 0;
int possible_items_to_this_bin = 0;
int bpp_items_prefix = (static_cast<int>((ant_index)) * (itemtypess));
int object_index = 0;
int object_quantity = 0;
int new_object_weight = 0;
int object_weightmax = object_weight;
for (int i = 0; ((i) < (itemtypess)); i++) {
if (((copy_bpp_items_weight.get_global(i)) == (object_weightmax))) {
object_index = (i);
}
}
d_bins.set_global(((ant_index) * (itemtypess)), (object_index));
copy_bpp_items_quantity.set_global(((bpp_items_prefix) + (object_index)), (copy_bpp_items_quantity.get_global(((bpp_items_prefix) + (object_index))) - 1));
n_items_in_actual_bin = ((n_items_in_actual_bin) + 1);
actual_bin_weight += (object_weightmax);
bins_used = ((bins_used) + 1);
int weight_object_j;
int object_i;
int quantity_object_j;
for (int i = 0; ((i) < ((itemcountt) - 1)); i++) {
double eta_tau_sum = 0.0;
for (int j = 0; ((j) < (itemtypess)); j++) {
d_eta.set_global(((object_bin_index) + (j)), 0.0);
d_tau.set_global(((object_bin_index) + (j)), 0.0);
d_probabilities.set_global(((object_bin_index) + (j)), 0.0);
weight_object_j = copy_bpp_items_weight.get_global((j));
quantity_object_j = copy_bpp_items_quantity.get_global(bpp_items_prefix + j);
if (((quantity_object_j) > 0)) {
if (((weight_object_j) < ((bin_capacity2) - (actual_bin_weight)))) {
//TODO if this can be removed (It is never reached)
if (((actual_bin_weight) == 0)) {
d_eta.set_global(((object_bin_index) + (j)), 1.0);
} else {
for (int k = 0; ((k) < (n_items_in_actual_bin)); k++) {
object_i = d_bins.get_global((((object_bin_index) + (i)) - (k)));
d_eta.set_global(((object_bin_index) + (j)), d_phero.get_global( object_i * (int) itemtypess + j));
}
d_eta.set_global(((object_bin_index) + (j)),(d_eta.get_global(((object_bin_index) + (j)))/ (n_items_in_actual_bin)));
}
d_tau.set_global(((object_bin_index) + (j)), (double) pow(weight_object_j, BETA));
eta_tau_sum = eta_tau_sum + (d_eta.get_global(((object_bin_index) + (j))) * d_tau.get_global(((object_bin_index) + (j))));
possible_items_to_this_bin = ((possible_items_to_this_bin) + 1);
}
}
}
if (((possible_items_to_this_bin) > 0)) {
for (int j = 0; ((j) < (itemtypess)); j++) {
double tmp = d_eta.get_global(object_bin_index + j);
double tmp2 = d_tau.get_global(object_bin_index + j);
double thisthat = ((tmp * tmp2) / (eta_tau_sum));
//printf("%.2f;", (tmp * tmp2) / (eta_tau_sum));
d_probabilities.set_global((object_bin_index + j), thisthat);
d_eta.set_global(((object_bin_index) + (j)), 0.0);
d_tau.set_global(((object_bin_index) + (j)), 0.0);
}
eta_tau_sum = 0.0;
double random = hiprand_uniform(&d_rand_states_ind[ant_index]);
int object_j = 0;
double sum = d_probabilities.get_global((object_bin_index));
while (sum < random) {
object_j = ((object_j) + 1);
sum = (sum + d_probabilities.get_global(((object_bin_index) + (object_j))));
}
//printf("%d:", counter);
d_bins.set_global(ant_index * (int) itemtypess + i + 1, (object_j));
weight_object_j = copy_bpp_items_weight.get_global(object_j);
actual_bin_weight += (weight_object_j);
copy_bpp_items_quantity.set_global((bpp_items_prefix + object_j),(copy_bpp_items_quantity.get_global(bpp_items_prefix + object_j) - 1));
n_items_in_actual_bin = ((n_items_in_actual_bin) + 1);
possible_items_to_this_bin = 0;
} else {
possible_items_to_this_bin = 0;
actual_bin_weight = 0;
object_index = 0;
object_weightmax = 0;
for (int k = 0; ((k) < (itemtypess)); k++) {object_quantity = copy_bpp_items_quantity.get_global((bpp_items_prefix + k));
new_object_weight = copy_bpp_items_weight.get_global((k));
if (((object_quantity) > 0)) {
if (((new_object_weight) > (object_weightmax))) {
object_index = (k);
object_weightmax = (new_object_weight);
}
}
}
copy_bpp_items_quantity.set_global((bpp_items_prefix + object_index),(copy_bpp_items_quantity.get_global(bpp_items_prefix + object_index) - 1));
d_bins.set_global(((((ant_index) * static_cast<int>((itemtypess))) + (i)) + 1), (object_index));
n_items_in_actual_bin = (n_items_in_actual_bin) + 1;
actual_bin_weight += (object_weightmax);
bins_used = ((bins_used) + 1);
}
}
if (bins_used > itemcountt) {
bins_used = itemcountt;
}
return (bins_used);
}
void init(int device) {
d_bins.init(device);
copy_bpp_items_quantity.init(device);
d_eta.init(device);
d_tau.init(device);
d_probabilities.init(device);
copy_bpp_items_weight.init(device);
d_phero.init(device);
}
size_t get_smem_bytes() {
size_t result = 0;
return result;
}
int object_weight;
int itemtypess;
int itemcountt;
int BETA2;
int bin_capacity2;
hiprandState_t *d_rand_states_ind;
mkt::DeviceArray<int> d_bins;
mkt::DeviceArray<int> copy_bpp_items_quantity;
mkt::DeviceArray<double> d_eta;
mkt::DeviceArray<double> d_tau;
mkt::DeviceArray<double> d_probabilities;
mkt::DeviceArray<int> copy_bpp_items_weight;
mkt::DeviceArray<double> d_phero;
};
struct Evaporation_kernel_map_index_in_place_array_functor {
Evaporation_kernel_map_index_in_place_array_functor(
const mkt::DArray<double> &_d_phero) :
d_phero(_d_phero) {
}
~Evaporation_kernel_map_index_in_place_array_functor() {
}
__device__
auto operator()(int iindex, double y) {
double result = 0.0;
double RO = (EVAPORATION2);
if ((((iindex) % (itemtypess)) != 0)) {
result = ((1 - (RO)) * d_phero.get_global((iindex))/* TODO: For multiple GPUs*/);
}
return (result);
}
void init(int device) {
d_phero.init(device);
}
size_t get_smem_bytes() {
size_t result = 0;
return result;
}
int itemtypess;
double EVAPORATION2;
mkt::DeviceArray<double> d_phero;
};
struct Update_pheromones_kernel_map_index_in_place_array_functor {
Update_pheromones_kernel_map_index_in_place_array_functor(
const mkt::DArray<int> &_d_fitness, const mkt::DArray<int> &_d_bins,
const mkt::DArray<double> &_d_phero) :
d_fitness(_d_fitness), d_bins(_d_bins), d_phero(_d_phero) {
}
~Update_pheromones_kernel_map_index_in_place_array_functor() {
}
__device__
auto operator()(int iindex, int value) {
int ant_index = (iindex);
double ant_fitness = (d_fitness.get_global((ant_index)) * 1.0);
double actual_bin_weight = 0.0;
int actual_bin_object_index = 0;
int actual_bin_n_objects = 0;
for (int i = 0; ((i) < (itemcountt)); i++) {
double object_weight =
static_cast<double>(d_bins.get_global((((ant_index) * (itemcountt)) + (i)))/* TODO: For multiple GPUs*/);
if ((((actual_bin_weight) + (object_weight)) < (bin_capacity2))) {
actual_bin_n_objects = ((actual_bin_n_objects) + 1);
actual_bin_weight = ((actual_bin_weight) + (object_weight));
} else {
for (int j = 0; ((j) < (actual_bin_n_objects)); j++) {
for (int k = ((j) + 1); ((k) < (actual_bin_n_objects)); k++) {
int object_i = d_bins.get_global(((((ant_index) * (itemcountt)) + (actual_bin_object_index)) + (j)))/* TODO: For multiple GPUs*/;
int object_j = d_bins.get_global(((((ant_index) * (itemcountt)) + (actual_bin_object_index)) + (k)))/* TODO: For multiple GPUs*/;
double delta_pheromone = ((Q)/ (d_fitness.get_global((ant_index)) * 1.0));
//TODO ITEM TYPE ? FOr pheromone updates
d_phero.set_global((((object_i) * (itemcountt)) + (object_j)),((delta_pheromone) + d_phero.get_global((((object_i) * (itemcountt)) + (object_j)))));
d_phero.set_global((((object_j) * (itemcountt)) + (object_i)),((delta_pheromone) + d_phero.get_global((((object_j) * (itemcountt)) + (object_i)))));
}
}
actual_bin_n_objects = 1;
actual_bin_weight = (object_weight);
actual_bin_object_index = (i);
}
}
//TODO ?
if (value > itemcountt) {
value = itemcountt;
}
//printf("%d;", value);
return (value);
}
void init(int device) {
d_fitness.init(device);
d_bins.init(device);
d_phero.init(device);
}
size_t get_smem_bytes() {
size_t result = 0;
return result;
}
int itemcountt;
int bin_capacity2;
mkt::DeviceArray<int> d_fitness;
mkt::DeviceArray<int> d_bins;
mkt::DeviceArray<double> d_phero;
};
template<>
int mkt::reduce_max<int>(mkt::DArray<int> &a) {
int local_result = std::numeric_limits<int>::lowest();
const int gpu_elements = a.get_size_gpu();
int threads = gpu_elements < 1024 ? gpu_elements : 1024; // nextPow2
int blocks = (gpu_elements + threads - 1) / threads;
hipSetDevice(0);
int *d_odata;
hipMalloc((void**) &d_odata, blocks * sizeof(int));
int *devptr = a.get_device_pointer(0);
mkt::kernel::reduce_max_call(gpu_elements, devptr, d_odata, threads, blocks,
mkt::cuda_streams[0], 0);
// fold on gpus: step 2
while (blocks > 1) {
int threads_2 = blocks < 1024 ? blocks : 1024; // nextPow2
int blocks_2 = (blocks + threads_2 - 1) / threads_2;
mkt::kernel::reduce_max_call(blocks, d_odata, d_odata, threads_2,
blocks_2, mkt::cuda_streams[0], 0);
blocks = blocks_2;
}
// copy final sum from device to host
hipMemcpyAsync(&local_result, d_odata, sizeof(int), hipMemcpyDeviceToHost,
mkt::cuda_streams[0]);
mkt::sync_streams();
hipFree(d_odata);
return local_result;
}
template<>
int mkt::reduce_min<int>(mkt::DArray<int> &a) {
int local_result = std::numeric_limits<int>::max();
const int gpu_elements = a.get_size_gpu();
int threads = gpu_elements < 1024 ? gpu_elements : 1024; // nextPow2
int blocks = (gpu_elements + threads - 1) / threads;
//hipSetDevice(0);
int *d_odata;
hipMalloc((void**) &d_odata, blocks * sizeof(int));
int *devptr = a.get_device_pointer(0);
mkt::kernel::reduce_min_call(gpu_elements, devptr, d_odata, threads, blocks,
mkt::cuda_streams[0], 0);
// fold on gpus: step 2
while (blocks > 1) {
int threads_2 = blocks < 1024 ? blocks : 1024; // nextPow2
int blocks_2 = (blocks + threads_2 - 1) / threads_2;
mkt::kernel::reduce_min_call(blocks, d_odata, d_odata, threads_2,
blocks_2, mkt::cuda_streams[0], 0);
blocks = blocks_2;
}
// copy final sum from device to host
hipMemcpyAsync(&local_result, d_odata, sizeof(int), hipMemcpyDeviceToHost,
mkt::cuda_streams[0]);
mkt::sync_streams();
hipFree(d_odata);
return local_result;
}
__global__ void setup_rand_kernel(hiprandState_t *state, unsigned long seed) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
hiprand_init(seed, id, 0, &state[id]);
// hiprand_init(1234, id, 0, &state[id]);
}
int main(int argc, char **argv) {
mkt::init();
/*int nDevices;
hipGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Compute Capability (bits): %d. %d\n",
prop.major, prop.minor);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}*/
mkt::sync_streams();
std::chrono::high_resolution_clock::time_point complete_timer_start = std::chrono::high_resolution_clock::now();
char *n_iterationschar = argv[1];
char *problemchar = argv[2];
char *antschar = argv[3];
int n_iterations = atoi(n_iterationschar);
int problem = atoi(problemchar);
int ants = atoi(antschar);
randoms = new Randoms(15);
std::ifstream fileReader;
//Problem Instances
std::string file_to_read = "";
//Problem Instances
//std::string f60 = "/home/n/n_herr03/BPP/BPP/source/bpp/Falkenauer_t60_00.txt";
//std::string p201 = "/home/n/n_herr03/BPP/BPP/source/bpp/201_2500_NR_0.txt";
//std::string p402 = "/home/n/n_herr03/BPP/BPP/source/bpp/402_10000_NR_0.txt";
//std::string p600 = "/home/n/n_herr03/BPP/BPP/source/bpp/600_20000_NR_0.txt";
//std::string p801 = "/home/n/n_herr03/BPP/BPP/source/bpp/801_40000_NR_0.txt";
//std::string p1002 = "/home/n/n_herr03/BPP/BPP/source/bpp/1002_80000_NR_0.txt";
//if(PALMA){
std::string f60 =
"/home/n/n_herr03/BPP/BPP/LowLevelProgram/source/bpp/Falkenauer_t60_00.txt";
std::string p201 =
"/home/n/n_herr03/BPP/BPP/LowLevelProgram/source/bpp/201_2500_NR_0.txt";
std::string p402 =
"/home/n/n_herr03/BPP/BPP/LowLevelProgram/source/bpp/402_10000_NR_0.txt";
std::string p600 =
"/home/n/n_herr03/BPP/BPP/LowLevelProgram/source/bpp/600_20000_NR_0.txt";
std::string p801 =
"/home/n/n_herr03/BPP//BPP/LowLevelProgram/source/bpp/801_40000_NR_0.txt";
std::string p1002 =
"/home/n/n_herr03/BPP/BPP/LowLevelProgram/source/bpp/1002_80000_NR_0.txt";
//}
switch (problem) {
case 0:
fileReader.open(f60, std::ifstream::in);
break;
case 1:
fileReader.open(p201, std::ifstream::in);
break;
case 2:
fileReader.open(p402, std::ifstream::in);
break;
case 3:
fileReader.open(p600, std::ifstream::in);
break;
case 4:
fileReader.open(p801, std::ifstream::in);
break;
case 5:
fileReader.open(p1002, std::ifstream::in);
break;
default:
break;
}
if (fileReader.is_open()) {
fileReader >> itemtypes;
fileReader >> bin_capacity;
}
fileReader.close();
int pheromone_matrix_size = itemtypes * itemtypes;
mkt::DArray<double> d_phero(0, pheromone_matrix_size, pheromone_matrix_size, 0.0, 1, 0, 0, mkt::DIST, mkt::COPY);
mkt::DArray<int> d_fitness(0, ants, ants, 0, 1, 0, 0, mkt::DIST, mkt::COPY);
mkt::DArray<double> d_probabilities(0, ants * itemtypes, ants * itemtypes, 0.0, 1, 0, 0, mkt::DIST, mkt::COPY);
mkt::DArray<double> d_eta(0, ants * itemtypes, ants * itemtypes, 0.0, 1, 0, 0, mkt::DIST, mkt::COPY);
mkt::DArray<double> d_tau(0, ants * itemtypes, ants * itemtypes, 0.0, 1, 0, 0, mkt::DIST, mkt::COPY);
mkt::DArray<int> bpp_items_weight(0, itemtypes, itemtypes, 0, 1, 0, 0, mkt::DIST, mkt::COPY);
mkt::DArray<int> bpp_items_quantity(0, itemtypes, itemtypes, 0, 1, 0, 0, mkt::DIST, mkt::COPY);
mkt::DArray<int> copy_bpp_items_weight(0, itemtypes, itemtypes, 0, 1, 0, 0, mkt::DIST, mkt::COPY);
mkt::DArray<int> copy_bpp_items_quantity(0, itemtypes * ants, itemtypes * ants, 0, 1, 0, 0, mkt::DIST, mkt::COPY);
mkt::DArray<int> d_bins(0, ants * itemtypes, ants * itemtypes, 0, 1, 0, 0, mkt::DIST, mkt::COPY);
hiprandState_t *d_rand_states_ind;
hipMalloc(&d_rand_states_ind, ants * sizeof(hiprandState_t));
hipLaunchKernelGGL(( setup_rand_kernel), dim3(ants), dim3(1), 0, 0, d_rand_states_ind, time(NULL));
d_bins.update_devices();
d_fitness.update_devices();
d_probabilities.update_devices();
d_eta.update_devices();
d_tau.update_devices();
double randn;
//TODO FIX 1
for (int j = 0; j < itemtypes; j++) {
for (int k = 0; k < itemtypes; k++) {
// if (j != k) {
randn = randoms->Uniforme() * TAUMAX;
d_phero[(j * itemtypes) + k] = randn;
d_phero[(k * itemtypes) + j] = randn;
// } else {
// d_phero[(j * itemtypes) + k] = 0.0;
// d_phero[(k * itemtypes) + j] = 0.0;
// }
}
}
d_phero.update_devices();
Copybppitemsquantity_map_index_in_place_array_functor copybppitemsquantity_map_index_in_place_array_functor { bpp_items_quantity };
Copybppitemsweight_map_index_in_place_array_functor copybppitemsweight_map_index_in_place_array_functor { bpp_items_weight };
Packing_kernel_map_index_in_place_array_functor packing_kernel_map_index_in_place_array_functor
{ d_bins, copy_bpp_items_quantity, d_eta, d_tau, d_probabilities, copy_bpp_items_weight, d_phero, d_rand_states_ind };
Evaporation_kernel_map_index_in_place_array_functor evaporation_kernel_map_index_in_place_array_functor { d_phero };
Update_pheromones_kernel_map_index_in_place_array_functor update_pheromones_kernel_map_index_in_place_array_functor { d_fitness, d_bins, d_phero };
int lines = 0;
double total = 0.0;
switch (problem) {
case 0:
fileReader.open(f60, std::ifstream::in);
break;
case 1:
fileReader.open(p201, std::ifstream::in);
break;
case 2:
fileReader.open(p402, std::ifstream::in);
break;
case 3:
fileReader.open(p600, std::ifstream::in);
break;
case 4:
fileReader.open(p801, std::ifstream::in);
break;
case 5:
fileReader.open(p1002, std::ifstream::in);
break;
default:
break;
}
if (fileReader.is_open()) {
fileReader >> itemtypes;
fileReader >> bin_capacity;
while (lines < itemtypes && !fileReader.eof()) {
double weight;
double quantity;
fileReader >> weight;
fileReader >> quantity;
bpp_items_weight[lines] = weight;
bpp_items_quantity[lines] = quantity;
total += quantity;
lines++;
}
} else {
printf("\nFile not opened");
}
bpp_items_weight.update_devices();
bpp_items_quantity.update_devices();
itemcount = total;
PRINT ? printf("\nSetup Description", itemtypes) : printf("");
PRINT ? printf("\n\tObject Types: %d", itemtypes) : printf("");
PRINT ? printf("\n\tObject Total: %d", itemcount) : printf("");
PRINT ? printf("\n\tAnts: %d \n\tProblem %d:\n", ants, problem) : printf("");
fileReader.close();
PRINT ? printf("\t\t%d itemstypes \n\t\t%d items \n\t\t%d capacity\n\n", itemtypes, itemcount, (bin_capacity)) : printf("");
mkt::sync_streams();
std::chrono::high_resolution_clock::time_point timer_start = std::chrono::high_resolution_clock::now();
int best_fitness = 999999;
if (PALMA) {
printf("\n%d;%d;%d;%d;", ants, problem, itemtypes, itemcount);
}
PRINT ? printf("Run Debug:\n") : printf("");
// === ITERATIONS START================================================================
// =====================================================================================
// =====================================================================================
// =====================================================================================
// =====================================================================================
for (int iterate = 0; ((iterate) < (n_iterations)); iterate++) {
mkt::sync_streams();
std::chrono::high_resolution_clock::time_point timer_start = std::chrono::high_resolution_clock::now();
//TODO Is it COpying files 2 times?
copy_bpp_items_quantity.update_self();
for (int o = 0; o < itemtypes; o++) {
for (int u = 0; u < ants; u++) {
copy_bpp_items_quantity[o * u] = bpp_items_quantity[o];
}
}
copy_bpp_items_quantity.update_devices();
if (!PALMA) {
const char *error = hipGetErrorString(hipPeekAtLastError());
if (PRINT) {
printf("%s\n", error);
}
const char *error2 = hipGetErrorString(hipDeviceSynchronize());
if (PRINT) {
printf("%s\n", error2);
}
}
copybppitemsweight_map_index_in_place_array_functor.antss = (ants);
//TODO Is it COpying files 2 times?
mkt::map_index_in_place<int, Copybppitemsweight_map_index_in_place_array_functor>(copy_bpp_items_weight, copybppitemsweight_map_index_in_place_array_functor);
int maxobject = 0;
if (PRINT) {
copy_bpp_items_quantity.update_self();
printf("\ncopy_bpp_items_quantity:[");
for (int o = 0; o < itemtypes; o++) {
printf("%d;", copy_bpp_items_quantity[o]);
}
}
maxobject = mkt::reduce_max<int>(bpp_items_weight);
for (int o = 0; o < itemtypes; o++) {
if (bpp_items_weight[o] > maxobject) {
maxobject = bpp_items_weight[o];
}
}
packing_kernel_map_index_in_place_array_functor.object_weight = (maxobject);
packing_kernel_map_index_in_place_array_functor.itemtypess = (itemtypes);
packing_kernel_map_index_in_place_array_functor.itemcountt = (itemcount);
packing_kernel_map_index_in_place_array_functor.BETA2 = (BETA);
packing_kernel_map_index_in_place_array_functor.bin_capacity2 = (bin_capacity);
mkt::map_index_in_place<int,Packing_kernel_map_index_in_place_array_functor>(d_fitness, packing_kernel_map_index_in_place_array_functor);
if (!PALMA) {
const char *error = hipGetErrorString(hipPeekAtLastError());
if (PRINT) {
printf("%s\n", error);
}
const char *error2 = hipGetErrorString(hipDeviceSynchronize());
if (PRINT) {
printf("%s\n", error2);
}
}
evaporation_kernel_map_index_in_place_array_functor.itemtypess = (itemtypes);
evaporation_kernel_map_index_in_place_array_functor.EVAPORATION2 = (EVAPORATION);
mkt::map_index_in_place<double, Evaporation_kernel_map_index_in_place_array_functor>(d_phero, evaporation_kernel_map_index_in_place_array_functor);
if (!PALMA) {
const char *error = hipGetErrorString(hipPeekAtLastError());
if (PRINT) {
printf("%s\n", error);
}
const char *error2 = hipGetErrorString(hipDeviceSynchronize());
if (PRINT) {
printf("%s\n", error2);
}
if (PRINT) {
copy_bpp_items_quantity.update_self();
printf("\ncopy_bpp_items_quantity:[");
for (int o = 0; o < itemtypes; o++) {
printf("%d;", copy_bpp_items_quantity[o]);
}
printf("] \n");
}
}
best_fitness = mkt::reduce_min<int>(d_fitness);
if (!PALMA) {
const char *error = hipGetErrorString(hipPeekAtLastError());
if (PRINT) {
printf("%s\n", error);
}
const char *error2 = hipGetErrorString(hipDeviceSynchronize());
if (PRINT) {
printf("%s\n", error2);
}
}
update_pheromones_kernel_map_index_in_place_array_functor.itemcountt = (itemtypes);
update_pheromones_kernel_map_index_in_place_array_functor.bin_capacity2 = (itemcount);
mkt::map_index_in_place<int, Update_pheromones_kernel_map_index_in_place_array_functor>(d_fitness, update_pheromones_kernel_map_index_in_place_array_functor);
mkt::sync_streams();
if (!PALMA) {
const char *error = hipGetErrorString(hipPeekAtLastError());
if (PRINT) {
printf("%s\n", error);
}
const char *error2 = hipGetErrorString(hipDeviceSynchronize());
if (PRINT) {
printf("%s\n", error2);
}
}
mkt::sync_streams();
PRINT ? printf("\nBest Fitness (Number of bins used) Iteration %d: %d", iterate, best_fitness) : printf("");
std::chrono::high_resolution_clock::time_point timer_end = std::chrono::high_resolution_clock::now();
double seconds = std::chrono::duration<double>(timer_end - timer_start).count();
}
mkt::sync_streams();
std::chrono::high_resolution_clock::time_point timer_end = std::chrono::high_resolution_clock::now();
double seconds =std::chrono::duration<double>(timer_end - timer_start).count();
mkt::sync_streams();
std::chrono::high_resolution_clock::time_point complete_timer_end = std::chrono::high_resolution_clock::now();
double complete_seconds = std::chrono::duration<double>(complete_timer_end - complete_timer_start).count();
if (PRINT) {
printf("\nResults:", complete_seconds);
printf("\n\tSeconds: %.5f;", complete_seconds);
printf("\n\tFitness: %d;\n", best_fitness);
}
if (PALMA) {
printf("%.5f;", complete_seconds);
printf("%d;", best_fitness);
}
return EXIT_SUCCESS;
}
| 84f8de0d83863339b6abbabe1cb2f74b91e6833c.cu | #include <cuda.h>
#include <omp.h>
#include <stdlib.h>
#include <math.h>
#include <array>
#include <vector>
#include <sstream>
#include <fstream>
#include <chrono>
#include <curand_kernel.h>
#include <limits>
#include <memory>
#include <cstddef>
#include <type_traits>
#include "../include/musket.cuh"
#include "../include/BPP_0.cuh"
#include "Randoms.cpp"
Randoms *randoms;
const int BETA = 1;
const double EVAPORATION = 0.5;
const int TAUMAX = 2;
const int Q = 32;
int itemtypes = 50;
int itemcount = 59;
auto bin_capacity = 1000;
bool PRINT = false;
bool PALMA = true;
struct Copybppitemsquantity_map_index_in_place_array_functor {
Copybppitemsquantity_map_index_in_place_array_functor(
const mkt::DArray<int> &_copy_bpp_items_quantity) :
copy_bpp_items_quantity(_copy_bpp_items_quantity) {
}
~Copybppitemsquantity_map_index_in_place_array_functor() {
}
__device__
auto operator()(int indexx, int valuee) {
return valuee;
}
void init(int device) {
copy_bpp_items_quantity.init(device);
}
size_t get_smem_bytes() {
size_t result = 0;
return result;
}
int itemtypess;
int antss;
mkt::DeviceArray<int> copy_bpp_items_quantity;
};
struct Copybppitemsweight_map_index_in_place_array_functor {
Copybppitemsweight_map_index_in_place_array_functor(
const mkt::DArray<int> &_bpp_items_weight) :
bpp_items_weight(_bpp_items_weight) {
}
~Copybppitemsweight_map_index_in_place_array_functor() {
}
__device__
auto operator()(int indexx, int valuee) {
int new_index = ((indexx) % (antss));
return bpp_items_weight.get_global((new_index))/* TODO: For multiple GPUs*/;
}
void init(int device) {
bpp_items_weight.init(device);
}
size_t get_smem_bytes() {
size_t result = 0;
return result;
}
int antss;
mkt::DeviceArray<int> bpp_items_weight;
};
struct Packing_kernel_map_index_in_place_array_functor {
Packing_kernel_map_index_in_place_array_functor(
const mkt::DArray<int> &_d_bins,
const mkt::DArray<int> &_copy_bpp_items_quantity,
const mkt::DArray<double> &_d_eta,
const mkt::DArray<double> &_d_tau,
const mkt::DArray<double> &_d_probabilities,
const mkt::DArray<int> &_copy_bpp_items_weight,
const mkt::DArray<double> &_d_phero,
curandState *_d_rand_states_ind) :
d_bins(_d_bins), copy_bpp_items_quantity(_copy_bpp_items_quantity), d_eta(
_d_eta), d_tau(_d_tau), d_probabilities(_d_probabilities), copy_bpp_items_weight(
_copy_bpp_items_weight), d_phero(_d_phero), d_rand_states_ind(
_d_rand_states_ind) {
}
~Packing_kernel_map_index_in_place_array_functor() {
}
__device__
auto operator()(int iindex, int y) {
int ant_index = (iindex);
int object_bin_index = ((ant_index) * (itemtypess));
int bins_used = 0;
int actual_bin_weight = 0;
int n_items_in_actual_bin = 0;
int possible_items_to_this_bin = 0;
int bpp_items_prefix = (static_cast<int>((ant_index)) * (itemtypess));
int object_index = 0;
int object_quantity = 0;
int new_object_weight = 0;
int object_weightmax = object_weight;
for (int i = 0; ((i) < (itemtypess)); i++) {
if (((copy_bpp_items_weight.get_global(i)) == (object_weightmax))) {
object_index = (i);
}
}
d_bins.set_global(((ant_index) * (itemtypess)), (object_index));
copy_bpp_items_quantity.set_global(((bpp_items_prefix) + (object_index)), (copy_bpp_items_quantity.get_global(((bpp_items_prefix) + (object_index))) - 1));
n_items_in_actual_bin = ((n_items_in_actual_bin) + 1);
actual_bin_weight += (object_weightmax);
bins_used = ((bins_used) + 1);
int weight_object_j;
int object_i;
int quantity_object_j;
for (int i = 0; ((i) < ((itemcountt) - 1)); i++) {
double eta_tau_sum = 0.0;
for (int j = 0; ((j) < (itemtypess)); j++) {
d_eta.set_global(((object_bin_index) + (j)), 0.0);
d_tau.set_global(((object_bin_index) + (j)), 0.0);
d_probabilities.set_global(((object_bin_index) + (j)), 0.0);
weight_object_j = copy_bpp_items_weight.get_global((j));
quantity_object_j = copy_bpp_items_quantity.get_global(bpp_items_prefix + j);
if (((quantity_object_j) > 0)) {
if (((weight_object_j) < ((bin_capacity2) - (actual_bin_weight)))) {
//TODO if this can be removed (It is never reached)
if (((actual_bin_weight) == 0)) {
d_eta.set_global(((object_bin_index) + (j)), 1.0);
} else {
for (int k = 0; ((k) < (n_items_in_actual_bin)); k++) {
object_i = d_bins.get_global((((object_bin_index) + (i)) - (k)));
d_eta.set_global(((object_bin_index) + (j)), d_phero.get_global( object_i * (int) itemtypess + j));
}
d_eta.set_global(((object_bin_index) + (j)),(d_eta.get_global(((object_bin_index) + (j)))/ (n_items_in_actual_bin)));
}
d_tau.set_global(((object_bin_index) + (j)), (double) pow(weight_object_j, BETA));
eta_tau_sum = eta_tau_sum + (d_eta.get_global(((object_bin_index) + (j))) * d_tau.get_global(((object_bin_index) + (j))));
possible_items_to_this_bin = ((possible_items_to_this_bin) + 1);
}
}
}
if (((possible_items_to_this_bin) > 0)) {
for (int j = 0; ((j) < (itemtypess)); j++) {
double tmp = d_eta.get_global(object_bin_index + j);
double tmp2 = d_tau.get_global(object_bin_index + j);
double thisthat = ((tmp * tmp2) / (eta_tau_sum));
//printf("%.2f;", (tmp * tmp2) / (eta_tau_sum));
d_probabilities.set_global((object_bin_index + j), thisthat);
d_eta.set_global(((object_bin_index) + (j)), 0.0);
d_tau.set_global(((object_bin_index) + (j)), 0.0);
}
eta_tau_sum = 0.0;
double random = curand_uniform(&d_rand_states_ind[ant_index]);
int object_j = 0;
double sum = d_probabilities.get_global((object_bin_index));
while (sum < random) {
object_j = ((object_j) + 1);
sum = (sum + d_probabilities.get_global(((object_bin_index) + (object_j))));
}
//printf("%d:", counter);
d_bins.set_global(ant_index * (int) itemtypess + i + 1, (object_j));
weight_object_j = copy_bpp_items_weight.get_global(object_j);
actual_bin_weight += (weight_object_j);
copy_bpp_items_quantity.set_global((bpp_items_prefix + object_j),(copy_bpp_items_quantity.get_global(bpp_items_prefix + object_j) - 1));
n_items_in_actual_bin = ((n_items_in_actual_bin) + 1);
possible_items_to_this_bin = 0;
} else {
possible_items_to_this_bin = 0;
actual_bin_weight = 0;
object_index = 0;
object_weightmax = 0;
for (int k = 0; ((k) < (itemtypess)); k++) {object_quantity = copy_bpp_items_quantity.get_global((bpp_items_prefix + k));
new_object_weight = copy_bpp_items_weight.get_global((k));
if (((object_quantity) > 0)) {
if (((new_object_weight) > (object_weightmax))) {
object_index = (k);
object_weightmax = (new_object_weight);
}
}
}
copy_bpp_items_quantity.set_global((bpp_items_prefix + object_index),(copy_bpp_items_quantity.get_global(bpp_items_prefix + object_index) - 1));
d_bins.set_global(((((ant_index) * static_cast<int>((itemtypess))) + (i)) + 1), (object_index));
n_items_in_actual_bin = (n_items_in_actual_bin) + 1;
actual_bin_weight += (object_weightmax);
bins_used = ((bins_used) + 1);
}
}
if (bins_used > itemcountt) {
bins_used = itemcountt;
}
return (bins_used);
}
void init(int device) {
d_bins.init(device);
copy_bpp_items_quantity.init(device);
d_eta.init(device);
d_tau.init(device);
d_probabilities.init(device);
copy_bpp_items_weight.init(device);
d_phero.init(device);
}
size_t get_smem_bytes() {
size_t result = 0;
return result;
}
int object_weight;
int itemtypess;
int itemcountt;
int BETA2;
int bin_capacity2;
curandState *d_rand_states_ind;
mkt::DeviceArray<int> d_bins;
mkt::DeviceArray<int> copy_bpp_items_quantity;
mkt::DeviceArray<double> d_eta;
mkt::DeviceArray<double> d_tau;
mkt::DeviceArray<double> d_probabilities;
mkt::DeviceArray<int> copy_bpp_items_weight;
mkt::DeviceArray<double> d_phero;
};
struct Evaporation_kernel_map_index_in_place_array_functor {
Evaporation_kernel_map_index_in_place_array_functor(
const mkt::DArray<double> &_d_phero) :
d_phero(_d_phero) {
}
~Evaporation_kernel_map_index_in_place_array_functor() {
}
__device__
auto operator()(int iindex, double y) {
double result = 0.0;
double RO = (EVAPORATION2);
if ((((iindex) % (itemtypess)) != 0)) {
result = ((1 - (RO)) * d_phero.get_global((iindex))/* TODO: For multiple GPUs*/);
}
return (result);
}
void init(int device) {
d_phero.init(device);
}
size_t get_smem_bytes() {
size_t result = 0;
return result;
}
int itemtypess;
double EVAPORATION2;
mkt::DeviceArray<double> d_phero;
};
struct Update_pheromones_kernel_map_index_in_place_array_functor {
Update_pheromones_kernel_map_index_in_place_array_functor(
const mkt::DArray<int> &_d_fitness, const mkt::DArray<int> &_d_bins,
const mkt::DArray<double> &_d_phero) :
d_fitness(_d_fitness), d_bins(_d_bins), d_phero(_d_phero) {
}
~Update_pheromones_kernel_map_index_in_place_array_functor() {
}
__device__
auto operator()(int iindex, int value) {
int ant_index = (iindex);
double ant_fitness = (d_fitness.get_global((ant_index)) * 1.0);
double actual_bin_weight = 0.0;
int actual_bin_object_index = 0;
int actual_bin_n_objects = 0;
for (int i = 0; ((i) < (itemcountt)); i++) {
double object_weight =
static_cast<double>(d_bins.get_global((((ant_index) * (itemcountt)) + (i)))/* TODO: For multiple GPUs*/);
if ((((actual_bin_weight) + (object_weight)) < (bin_capacity2))) {
actual_bin_n_objects = ((actual_bin_n_objects) + 1);
actual_bin_weight = ((actual_bin_weight) + (object_weight));
} else {
for (int j = 0; ((j) < (actual_bin_n_objects)); j++) {
for (int k = ((j) + 1); ((k) < (actual_bin_n_objects)); k++) {
int object_i = d_bins.get_global(((((ant_index) * (itemcountt)) + (actual_bin_object_index)) + (j)))/* TODO: For multiple GPUs*/;
int object_j = d_bins.get_global(((((ant_index) * (itemcountt)) + (actual_bin_object_index)) + (k)))/* TODO: For multiple GPUs*/;
double delta_pheromone = ((Q)/ (d_fitness.get_global((ant_index)) * 1.0));
//TODO ITEM TYPE ? FOr pheromone updates
d_phero.set_global((((object_i) * (itemcountt)) + (object_j)),((delta_pheromone) + d_phero.get_global((((object_i) * (itemcountt)) + (object_j)))));
d_phero.set_global((((object_j) * (itemcountt)) + (object_i)),((delta_pheromone) + d_phero.get_global((((object_j) * (itemcountt)) + (object_i)))));
}
}
actual_bin_n_objects = 1;
actual_bin_weight = (object_weight);
actual_bin_object_index = (i);
}
}
//TODO ?
if (value > itemcountt) {
value = itemcountt;
}
//printf("%d;", value);
return (value);
}
void init(int device) {
d_fitness.init(device);
d_bins.init(device);
d_phero.init(device);
}
size_t get_smem_bytes() {
size_t result = 0;
return result;
}
int itemcountt;
int bin_capacity2;
mkt::DeviceArray<int> d_fitness;
mkt::DeviceArray<int> d_bins;
mkt::DeviceArray<double> d_phero;
};
template<>
int mkt::reduce_max<int>(mkt::DArray<int> &a) {
int local_result = std::numeric_limits<int>::lowest();
const int gpu_elements = a.get_size_gpu();
int threads = gpu_elements < 1024 ? gpu_elements : 1024; // nextPow2
int blocks = (gpu_elements + threads - 1) / threads;
cudaSetDevice(0);
int *d_odata;
cudaMalloc((void**) &d_odata, blocks * sizeof(int));
int *devptr = a.get_device_pointer(0);
mkt::kernel::reduce_max_call(gpu_elements, devptr, d_odata, threads, blocks,
mkt::cuda_streams[0], 0);
// fold on gpus: step 2
while (blocks > 1) {
int threads_2 = blocks < 1024 ? blocks : 1024; // nextPow2
int blocks_2 = (blocks + threads_2 - 1) / threads_2;
mkt::kernel::reduce_max_call(blocks, d_odata, d_odata, threads_2,
blocks_2, mkt::cuda_streams[0], 0);
blocks = blocks_2;
}
// copy final sum from device to host
cudaMemcpyAsync(&local_result, d_odata, sizeof(int), cudaMemcpyDeviceToHost,
mkt::cuda_streams[0]);
mkt::sync_streams();
cudaFree(d_odata);
return local_result;
}
template<>
int mkt::reduce_min<int>(mkt::DArray<int> &a) {
int local_result = std::numeric_limits<int>::max();
const int gpu_elements = a.get_size_gpu();
int threads = gpu_elements < 1024 ? gpu_elements : 1024; // nextPow2
int blocks = (gpu_elements + threads - 1) / threads;
//cudaSetDevice(0);
int *d_odata;
cudaMalloc((void**) &d_odata, blocks * sizeof(int));
int *devptr = a.get_device_pointer(0);
mkt::kernel::reduce_min_call(gpu_elements, devptr, d_odata, threads, blocks,
mkt::cuda_streams[0], 0);
// fold on gpus: step 2
while (blocks > 1) {
int threads_2 = blocks < 1024 ? blocks : 1024; // nextPow2
int blocks_2 = (blocks + threads_2 - 1) / threads_2;
mkt::kernel::reduce_min_call(blocks, d_odata, d_odata, threads_2,
blocks_2, mkt::cuda_streams[0], 0);
blocks = blocks_2;
}
// copy final sum from device to host
cudaMemcpyAsync(&local_result, d_odata, sizeof(int), cudaMemcpyDeviceToHost,
mkt::cuda_streams[0]);
mkt::sync_streams();
cudaFree(d_odata);
return local_result;
}
__global__ void setup_rand_kernel(curandState *state, unsigned long seed) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
curand_init(seed, id, 0, &state[id]);
// curand_init(1234, id, 0, &state[id]);
}
int main(int argc, char **argv) {
mkt::init();
/*int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Compute Capability (bits): %d. %d\n",
prop.major, prop.minor);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}*/
mkt::sync_streams();
std::chrono::high_resolution_clock::time_point complete_timer_start = std::chrono::high_resolution_clock::now();
char *n_iterationschar = argv[1];
char *problemchar = argv[2];
char *antschar = argv[3];
int n_iterations = atoi(n_iterationschar);
int problem = atoi(problemchar);
int ants = atoi(antschar);
randoms = new Randoms(15);
std::ifstream fileReader;
//Problem Instances
std::string file_to_read = "";
//Problem Instances
//std::string f60 = "/home/n/n_herr03/BPP/BPP/source/bpp/Falkenauer_t60_00.txt";
//std::string p201 = "/home/n/n_herr03/BPP/BPP/source/bpp/201_2500_NR_0.txt";
//std::string p402 = "/home/n/n_herr03/BPP/BPP/source/bpp/402_10000_NR_0.txt";
//std::string p600 = "/home/n/n_herr03/BPP/BPP/source/bpp/600_20000_NR_0.txt";
//std::string p801 = "/home/n/n_herr03/BPP/BPP/source/bpp/801_40000_NR_0.txt";
//std::string p1002 = "/home/n/n_herr03/BPP/BPP/source/bpp/1002_80000_NR_0.txt";
//if(PALMA){
std::string f60 =
"/home/n/n_herr03/BPP/BPP/LowLevelProgram/source/bpp/Falkenauer_t60_00.txt";
std::string p201 =
"/home/n/n_herr03/BPP/BPP/LowLevelProgram/source/bpp/201_2500_NR_0.txt";
std::string p402 =
"/home/n/n_herr03/BPP/BPP/LowLevelProgram/source/bpp/402_10000_NR_0.txt";
std::string p600 =
"/home/n/n_herr03/BPP/BPP/LowLevelProgram/source/bpp/600_20000_NR_0.txt";
std::string p801 =
"/home/n/n_herr03/BPP//BPP/LowLevelProgram/source/bpp/801_40000_NR_0.txt";
std::string p1002 =
"/home/n/n_herr03/BPP/BPP/LowLevelProgram/source/bpp/1002_80000_NR_0.txt";
//}
switch (problem) {
case 0:
fileReader.open(f60, std::ifstream::in);
break;
case 1:
fileReader.open(p201, std::ifstream::in);
break;
case 2:
fileReader.open(p402, std::ifstream::in);
break;
case 3:
fileReader.open(p600, std::ifstream::in);
break;
case 4:
fileReader.open(p801, std::ifstream::in);
break;
case 5:
fileReader.open(p1002, std::ifstream::in);
break;
default:
break;
}
if (fileReader.is_open()) {
fileReader >> itemtypes;
fileReader >> bin_capacity;
}
fileReader.close();
int pheromone_matrix_size = itemtypes * itemtypes;
mkt::DArray<double> d_phero(0, pheromone_matrix_size, pheromone_matrix_size, 0.0, 1, 0, 0, mkt::DIST, mkt::COPY);
mkt::DArray<int> d_fitness(0, ants, ants, 0, 1, 0, 0, mkt::DIST, mkt::COPY);
mkt::DArray<double> d_probabilities(0, ants * itemtypes, ants * itemtypes, 0.0, 1, 0, 0, mkt::DIST, mkt::COPY);
mkt::DArray<double> d_eta(0, ants * itemtypes, ants * itemtypes, 0.0, 1, 0, 0, mkt::DIST, mkt::COPY);
mkt::DArray<double> d_tau(0, ants * itemtypes, ants * itemtypes, 0.0, 1, 0, 0, mkt::DIST, mkt::COPY);
mkt::DArray<int> bpp_items_weight(0, itemtypes, itemtypes, 0, 1, 0, 0, mkt::DIST, mkt::COPY);
mkt::DArray<int> bpp_items_quantity(0, itemtypes, itemtypes, 0, 1, 0, 0, mkt::DIST, mkt::COPY);
mkt::DArray<int> copy_bpp_items_weight(0, itemtypes, itemtypes, 0, 1, 0, 0, mkt::DIST, mkt::COPY);
mkt::DArray<int> copy_bpp_items_quantity(0, itemtypes * ants, itemtypes * ants, 0, 1, 0, 0, mkt::DIST, mkt::COPY);
mkt::DArray<int> d_bins(0, ants * itemtypes, ants * itemtypes, 0, 1, 0, 0, mkt::DIST, mkt::COPY);
curandState *d_rand_states_ind;
cudaMalloc(&d_rand_states_ind, ants * sizeof(curandState));
setup_rand_kernel<<<ants, 1, 0>>>(d_rand_states_ind, time(NULL));
d_bins.update_devices();
d_fitness.update_devices();
d_probabilities.update_devices();
d_eta.update_devices();
d_tau.update_devices();
double randn;
//TODO FIX 1
for (int j = 0; j < itemtypes; j++) {
for (int k = 0; k < itemtypes; k++) {
// if (j != k) {
randn = randoms->Uniforme() * TAUMAX;
d_phero[(j * itemtypes) + k] = randn;
d_phero[(k * itemtypes) + j] = randn;
// } else {
// d_phero[(j * itemtypes) + k] = 0.0;
// d_phero[(k * itemtypes) + j] = 0.0;
// }
}
}
d_phero.update_devices();
Copybppitemsquantity_map_index_in_place_array_functor copybppitemsquantity_map_index_in_place_array_functor { bpp_items_quantity };
Copybppitemsweight_map_index_in_place_array_functor copybppitemsweight_map_index_in_place_array_functor { bpp_items_weight };
Packing_kernel_map_index_in_place_array_functor packing_kernel_map_index_in_place_array_functor
{ d_bins, copy_bpp_items_quantity, d_eta, d_tau, d_probabilities, copy_bpp_items_weight, d_phero, d_rand_states_ind };
Evaporation_kernel_map_index_in_place_array_functor evaporation_kernel_map_index_in_place_array_functor { d_phero };
Update_pheromones_kernel_map_index_in_place_array_functor update_pheromones_kernel_map_index_in_place_array_functor { d_fitness, d_bins, d_phero };
int lines = 0;
double total = 0.0;
switch (problem) {
case 0:
fileReader.open(f60, std::ifstream::in);
break;
case 1:
fileReader.open(p201, std::ifstream::in);
break;
case 2:
fileReader.open(p402, std::ifstream::in);
break;
case 3:
fileReader.open(p600, std::ifstream::in);
break;
case 4:
fileReader.open(p801, std::ifstream::in);
break;
case 5:
fileReader.open(p1002, std::ifstream::in);
break;
default:
break;
}
if (fileReader.is_open()) {
fileReader >> itemtypes;
fileReader >> bin_capacity;
while (lines < itemtypes && !fileReader.eof()) {
double weight;
double quantity;
fileReader >> weight;
fileReader >> quantity;
bpp_items_weight[lines] = weight;
bpp_items_quantity[lines] = quantity;
total += quantity;
lines++;
}
} else {
printf("\nFile not opened");
}
bpp_items_weight.update_devices();
bpp_items_quantity.update_devices();
itemcount = total;
PRINT ? printf("\nSetup Description", itemtypes) : printf("");
PRINT ? printf("\n\tObject Types: %d", itemtypes) : printf("");
PRINT ? printf("\n\tObject Total: %d", itemcount) : printf("");
PRINT ? printf("\n\tAnts: %d \n\tProblem %d:\n", ants, problem) : printf("");
fileReader.close();
PRINT ? printf("\t\t%d itemstypes \n\t\t%d items \n\t\t%d capacity\n\n", itemtypes, itemcount, (bin_capacity)) : printf("");
mkt::sync_streams();
std::chrono::high_resolution_clock::time_point timer_start = std::chrono::high_resolution_clock::now();
int best_fitness = 999999;
if (PALMA) {
printf("\n%d;%d;%d;%d;", ants, problem, itemtypes, itemcount);
}
PRINT ? printf("Run Debug:\n") : printf("");
// === ITERATIONS START================================================================
// =====================================================================================
// =====================================================================================
// =====================================================================================
// =====================================================================================
for (int iterate = 0; ((iterate) < (n_iterations)); iterate++) {
mkt::sync_streams();
std::chrono::high_resolution_clock::time_point timer_start = std::chrono::high_resolution_clock::now();
//TODO Is it COpying files 2 times?
copy_bpp_items_quantity.update_self();
for (int o = 0; o < itemtypes; o++) {
for (int u = 0; u < ants; u++) {
copy_bpp_items_quantity[o * u] = bpp_items_quantity[o];
}
}
copy_bpp_items_quantity.update_devices();
if (!PALMA) {
const char *error = cudaGetErrorString(cudaPeekAtLastError());
if (PRINT) {
printf("%s\n", error);
}
const char *error2 = cudaGetErrorString(cudaThreadSynchronize());
if (PRINT) {
printf("%s\n", error2);
}
}
copybppitemsweight_map_index_in_place_array_functor.antss = (ants);
//TODO Is it COpying files 2 times?
mkt::map_index_in_place<int, Copybppitemsweight_map_index_in_place_array_functor>(copy_bpp_items_weight, copybppitemsweight_map_index_in_place_array_functor);
int maxobject = 0;
if (PRINT) {
copy_bpp_items_quantity.update_self();
printf("\ncopy_bpp_items_quantity:[");
for (int o = 0; o < itemtypes; o++) {
printf("%d;", copy_bpp_items_quantity[o]);
}
}
maxobject = mkt::reduce_max<int>(bpp_items_weight);
for (int o = 0; o < itemtypes; o++) {
if (bpp_items_weight[o] > maxobject) {
maxobject = bpp_items_weight[o];
}
}
packing_kernel_map_index_in_place_array_functor.object_weight = (maxobject);
packing_kernel_map_index_in_place_array_functor.itemtypess = (itemtypes);
packing_kernel_map_index_in_place_array_functor.itemcountt = (itemcount);
packing_kernel_map_index_in_place_array_functor.BETA2 = (BETA);
packing_kernel_map_index_in_place_array_functor.bin_capacity2 = (bin_capacity);
mkt::map_index_in_place<int,Packing_kernel_map_index_in_place_array_functor>(d_fitness, packing_kernel_map_index_in_place_array_functor);
if (!PALMA) {
const char *error = cudaGetErrorString(cudaPeekAtLastError());
if (PRINT) {
printf("%s\n", error);
}
const char *error2 = cudaGetErrorString(cudaThreadSynchronize());
if (PRINT) {
printf("%s\n", error2);
}
}
evaporation_kernel_map_index_in_place_array_functor.itemtypess = (itemtypes);
evaporation_kernel_map_index_in_place_array_functor.EVAPORATION2 = (EVAPORATION);
mkt::map_index_in_place<double, Evaporation_kernel_map_index_in_place_array_functor>(d_phero, evaporation_kernel_map_index_in_place_array_functor);
if (!PALMA) {
const char *error = cudaGetErrorString(cudaPeekAtLastError());
if (PRINT) {
printf("%s\n", error);
}
const char *error2 = cudaGetErrorString(cudaThreadSynchronize());
if (PRINT) {
printf("%s\n", error2);
}
if (PRINT) {
copy_bpp_items_quantity.update_self();
printf("\ncopy_bpp_items_quantity:[");
for (int o = 0; o < itemtypes; o++) {
printf("%d;", copy_bpp_items_quantity[o]);
}
printf("] \n");
}
}
best_fitness = mkt::reduce_min<int>(d_fitness);
if (!PALMA) {
const char *error = cudaGetErrorString(cudaPeekAtLastError());
if (PRINT) {
printf("%s\n", error);
}
const char *error2 = cudaGetErrorString(cudaThreadSynchronize());
if (PRINT) {
printf("%s\n", error2);
}
}
update_pheromones_kernel_map_index_in_place_array_functor.itemcountt = (itemtypes);
update_pheromones_kernel_map_index_in_place_array_functor.bin_capacity2 = (itemcount);
mkt::map_index_in_place<int, Update_pheromones_kernel_map_index_in_place_array_functor>(d_fitness, update_pheromones_kernel_map_index_in_place_array_functor);
mkt::sync_streams();
if (!PALMA) {
const char *error = cudaGetErrorString(cudaPeekAtLastError());
if (PRINT) {
printf("%s\n", error);
}
const char *error2 = cudaGetErrorString(cudaThreadSynchronize());
if (PRINT) {
printf("%s\n", error2);
}
}
mkt::sync_streams();
PRINT ? printf("\nBest Fitness (Number of bins used) Iteration %d: %d", iterate, best_fitness) : printf("");
std::chrono::high_resolution_clock::time_point timer_end = std::chrono::high_resolution_clock::now();
double seconds = std::chrono::duration<double>(timer_end - timer_start).count();
}
mkt::sync_streams();
std::chrono::high_resolution_clock::time_point timer_end = std::chrono::high_resolution_clock::now();
double seconds =std::chrono::duration<double>(timer_end - timer_start).count();
mkt::sync_streams();
std::chrono::high_resolution_clock::time_point complete_timer_end = std::chrono::high_resolution_clock::now();
double complete_seconds = std::chrono::duration<double>(complete_timer_end - complete_timer_start).count();
if (PRINT) {
printf("\nResults:", complete_seconds);
printf("\n\tSeconds: %.5f;", complete_seconds);
printf("\n\tFitness: %d;\n", best_fitness);
}
if (PALMA) {
printf("%.5f;", complete_seconds);
printf("%d;", best_fitness);
}
return EXIT_SUCCESS;
}
|
743d3cfee938d458f927fe8646aa07d289efe12f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "TAPSO.cuh"
#include "TAPSO_kernels.cu"
#include "Functors.cuh"
#define MUL_INI 2
#define MUL_FIM 0.5
template<typename T>
TAPSO<T>::TAPSO(int numParticles, int numIterations)
:
PSOBase(numParticles, numIterations, T::GetNumDimensions(), T::GetMinPosition(), T::GetMaxPosition()),
_d_positions(_positions.size()),
_d_velocities(_velocities.size()),
_d_minPositions(_minPositions),
_d_maxPositions(_maxPositions),
_d_bestPositions(_bestPositions.size()),
_d_bestFitness(_bestFitness.size()),
_d_state(numParticles)
{
if (_numDimensions > MAX_DIMENSIONS)
throw new exception("_numDimensions > MAX_DIMENSIONS");
CalculateGeometry();
_d_bestGlobalPosition.resize(_numDimensions * _numBlocks);
_d_bestGlobalFitness.resize(_numBlocks);
_bestGlobalPosition.resize(_numDimensions * _numBlocks);
_bestGlobalFitness.resize(_numBlocks);
hipMemcpyToSymbol(_c_TAPSOMinPosition, _minPositions.data(), _minPositions.size() * sizeof(float));
hipMemcpyToSymbol(_c_TAPSOMaxPosition, _maxPositions.data(), _maxPositions.size() * sizeof(float));
}
template<typename T>
void TAPSO<T>::Init()
{
int threadNumber = pow(2, ceil(log(_numThreads)/log(2)));
int blockNumber = pow(2, ceil(log(_numBlocks)/log(2)));
hipLaunchKernelGGL(( k_TAPSOInit<T>), dim3(_numBlocks), dim3(threadNumber), 0, 0, _numParticles, _numDimensions,
raw_pointer_cast(_d_positions.data()),
raw_pointer_cast(_d_velocities.data()),
raw_pointer_cast(_d_bestPositions.data()),
raw_pointer_cast(_d_bestFitness.data()),
raw_pointer_cast(_d_bestGlobalPosition.data()),
raw_pointer_cast(_d_bestGlobalFitness.data()),
raw_pointer_cast(_d_state.data()));
hipDeviceSynchronize();
hipLaunchKernelGGL(( k_TAPSOMinimum), dim3(1), dim3(blockNumber), 0, 0, _numBlocks, _numDimensions,
raw_pointer_cast(_d_bestGlobalPosition.data()),
raw_pointer_cast(_d_bestGlobalFitness.data()));
UpdateHost();
}
template<typename T>
void TAPSO<T>::Iterate()
{
float B = MUL_INI;
float A = (MUL_FIM - MUL_INI)/(float)(_numIterations-1);
int threadNumber = pow(2, ceil(log(_numThreads)/log(2)));
int blockNumber = pow(2, ceil(log(_numBlocks)/log(2)));
for (int i = 0; i < _numIterations; ++i)
{
float k = A*i+B;
hipLaunchKernelGGL(( k_TAPSOIterateMultiBlock<T>), dim3(_numBlocks), dim3(threadNumber), 0, 0, _numParticles, _numDimensions, k,
raw_pointer_cast(_d_positions.data()),
raw_pointer_cast(_d_velocities.data()),
raw_pointer_cast(_d_bestPositions.data()),
raw_pointer_cast(_d_bestFitness.data()),
raw_pointer_cast(_d_bestGlobalPosition.data()),
raw_pointer_cast(_d_bestGlobalFitness.data()),
raw_pointer_cast(_d_state.data()));
if (blockNumber > 1)
{
hipLaunchKernelGGL(( k_TAPSOMinimum), dim3(1), dim3(blockNumber), 0, 0, _numBlocks, _numDimensions,
raw_pointer_cast(_d_bestGlobalPosition.data()),
raw_pointer_cast(_d_bestGlobalFitness.data()));
}
}
_iteration += _numIterations;
UpdateHost();
}
template<typename T>
void TAPSO<T>::UpdateHost()
{
_positions = _d_positions;
_velocities = _d_velocities;
_minPositions = _d_minPositions;
_maxPositions = _d_maxPositions;
_bestPositions = _d_bestPositions;
_bestFitness = _d_bestFitness;
_bestGlobalPosition = _d_bestGlobalPosition;
_bestGlobalFitness = _d_bestGlobalFitness;
}
template<typename T>
void TAPSO<T>::CalculateGeometry()
{
int numDevices;
hipGetDeviceCount(&numDevices);
if (numDevices < 1)
throw std::exception("Nenhum dispositivo cuda");
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, 0);
int maxThreads = devProp.maxThreadsPerBlock;
_numThreads = (_numParticles + 31 ) / 32 * 32;
_numThreads = ::min(((_numThreads + 31)/32)*32, maxThreads);
_numBlocks = (_numParticles + _numThreads - 1) / _numThreads;
}
| 743d3cfee938d458f927fe8646aa07d289efe12f.cu | #include "TAPSO.cuh"
#include "TAPSO_kernels.cu"
#include "Functors.cuh"
#define MUL_INI 2
#define MUL_FIM 0.5
template<typename T>
TAPSO<T>::TAPSO(int numParticles, int numIterations)
:
PSOBase(numParticles, numIterations, T::GetNumDimensions(), T::GetMinPosition(), T::GetMaxPosition()),
_d_positions(_positions.size()),
_d_velocities(_velocities.size()),
_d_minPositions(_minPositions),
_d_maxPositions(_maxPositions),
_d_bestPositions(_bestPositions.size()),
_d_bestFitness(_bestFitness.size()),
_d_state(numParticles)
{
if (_numDimensions > MAX_DIMENSIONS)
throw new exception("_numDimensions > MAX_DIMENSIONS");
CalculateGeometry();
_d_bestGlobalPosition.resize(_numDimensions * _numBlocks);
_d_bestGlobalFitness.resize(_numBlocks);
_bestGlobalPosition.resize(_numDimensions * _numBlocks);
_bestGlobalFitness.resize(_numBlocks);
cudaMemcpyToSymbol(_c_TAPSOMinPosition, _minPositions.data(), _minPositions.size() * sizeof(float));
cudaMemcpyToSymbol(_c_TAPSOMaxPosition, _maxPositions.data(), _maxPositions.size() * sizeof(float));
}
template<typename T>
void TAPSO<T>::Init()
{
int threadNumber = pow(2, ceil(log(_numThreads)/log(2)));
int blockNumber = pow(2, ceil(log(_numBlocks)/log(2)));
k_TAPSOInit<T><<<_numBlocks, threadNumber>>>(_numParticles, _numDimensions,
raw_pointer_cast(_d_positions.data()),
raw_pointer_cast(_d_velocities.data()),
raw_pointer_cast(_d_bestPositions.data()),
raw_pointer_cast(_d_bestFitness.data()),
raw_pointer_cast(_d_bestGlobalPosition.data()),
raw_pointer_cast(_d_bestGlobalFitness.data()),
raw_pointer_cast(_d_state.data()));
cudaDeviceSynchronize();
k_TAPSOMinimum<<<1, blockNumber>>>(_numBlocks, _numDimensions,
raw_pointer_cast(_d_bestGlobalPosition.data()),
raw_pointer_cast(_d_bestGlobalFitness.data()));
UpdateHost();
}
template<typename T>
void TAPSO<T>::Iterate()
{
float B = MUL_INI;
float A = (MUL_FIM - MUL_INI)/(float)(_numIterations-1);
int threadNumber = pow(2, ceil(log(_numThreads)/log(2)));
int blockNumber = pow(2, ceil(log(_numBlocks)/log(2)));
for (int i = 0; i < _numIterations; ++i)
{
float k = A*i+B;
k_TAPSOIterateMultiBlock<T><<<_numBlocks, threadNumber>>>(_numParticles, _numDimensions, k,
raw_pointer_cast(_d_positions.data()),
raw_pointer_cast(_d_velocities.data()),
raw_pointer_cast(_d_bestPositions.data()),
raw_pointer_cast(_d_bestFitness.data()),
raw_pointer_cast(_d_bestGlobalPosition.data()),
raw_pointer_cast(_d_bestGlobalFitness.data()),
raw_pointer_cast(_d_state.data()));
if (blockNumber > 1)
{
k_TAPSOMinimum<<<1, blockNumber>>>(_numBlocks, _numDimensions,
raw_pointer_cast(_d_bestGlobalPosition.data()),
raw_pointer_cast(_d_bestGlobalFitness.data()));
}
}
_iteration += _numIterations;
UpdateHost();
}
template<typename T>
void TAPSO<T>::UpdateHost()
{
_positions = _d_positions;
_velocities = _d_velocities;
_minPositions = _d_minPositions;
_maxPositions = _d_maxPositions;
_bestPositions = _d_bestPositions;
_bestFitness = _d_bestFitness;
_bestGlobalPosition = _d_bestGlobalPosition;
_bestGlobalFitness = _d_bestGlobalFitness;
}
template<typename T>
void TAPSO<T>::CalculateGeometry()
{
int numDevices;
cudaGetDeviceCount(&numDevices);
if (numDevices < 1)
throw std::exception("Nenhum dispositivo cuda");
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, 0);
int maxThreads = devProp.maxThreadsPerBlock;
_numThreads = (_numParticles + 31 ) / 32 * 32;
_numThreads = std::min(((_numThreads + 31)/32)*32, maxThreads);
_numBlocks = (_numParticles + _numThreads - 1) / _numThreads;
}
|
ff8f6a91af936d155aec0e14f3fae89d9a4fa51f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <math.h>
#include "cholesky_inverse_impl.cuh"
template <typename T>
__global__ void CopyUpToLow(const size_t size, const T *input, const int64_t rank, T *output) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) {
int row = pos / rank;
int col = pos % rank;
output[pos] = row <= col ? input[pos] : input[col * rank + row];
}
return;
}
template <typename T>
__global__ void CopyLowToUp(const size_t size, const T *input, const int64_t rank, T *output) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) {
int row = pos / rank;
int col = pos % rank;
output[pos] = col <= row ? input[pos] : input[col * rank + row];
}
return;
}
template <typename T>
void CalCopyUpToLow(const size_t size, T *input, const int64_t rank, T *output, const uint32_t &device_id,
hipStream_t cuda_stream) {
hipLaunchKernelGGL(( CopyUpToLow), dim3(CUDA_BLOCKS(device_id, size)), dim3(CUDA_THREADS(device_id)), 0, cuda_stream, size, input, rank,
output);
return;
}
template <typename T>
void CalCopyLowToUp(const size_t size, T *input, const int64_t rank, T *output, const uint32_t &device_id,
hipStream_t cuda_stream) {
hipLaunchKernelGGL(( CopyLowToUp), dim3(CUDA_BLOCKS(device_id, size)), dim3(CUDA_THREADS(device_id)), 0, cuda_stream, size, input, rank,
output);
return;
}
template
CUDA_LIB_EXPORT void CalCopyUpToLow<float>(const size_t size, float *input, const int64_t rank,
float *output, const uint32_t &device_id, hipStream_t cuda_stream);
template
CUDA_LIB_EXPORT void CalCopyUpToLow<double>(const size_t size, double *input, const int64_t rank,
double *output, const uint32_t &device_id, hipStream_t cuda_stream);
template
CUDA_LIB_EXPORT void CalCopyLowToUp<float>(const size_t size, float *input, const int64_t rank,
float *output, const uint32_t &device_id, hipStream_t cuda_stream);
template
CUDA_LIB_EXPORT void CalCopyLowToUp<double>(const size_t size, double *input, const int64_t rank,
double *output, const uint32_t &device_id, hipStream_t cuda_stream);
| ff8f6a91af936d155aec0e14f3fae89d9a4fa51f.cu | /**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <math.h>
#include "cholesky_inverse_impl.cuh"
template <typename T>
__global__ void CopyUpToLow(const size_t size, const T *input, const int64_t rank, T *output) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) {
int row = pos / rank;
int col = pos % rank;
output[pos] = row <= col ? input[pos] : input[col * rank + row];
}
return;
}
template <typename T>
__global__ void CopyLowToUp(const size_t size, const T *input, const int64_t rank, T *output) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) {
int row = pos / rank;
int col = pos % rank;
output[pos] = col <= row ? input[pos] : input[col * rank + row];
}
return;
}
template <typename T>
void CalCopyUpToLow(const size_t size, T *input, const int64_t rank, T *output, const uint32_t &device_id,
cudaStream_t cuda_stream) {
CopyUpToLow<<<CUDA_BLOCKS(device_id, size), CUDA_THREADS(device_id), 0, cuda_stream>>>(size, input, rank,
output);
return;
}
template <typename T>
void CalCopyLowToUp(const size_t size, T *input, const int64_t rank, T *output, const uint32_t &device_id,
cudaStream_t cuda_stream) {
CopyLowToUp<<<CUDA_BLOCKS(device_id, size), CUDA_THREADS(device_id), 0, cuda_stream>>>(size, input, rank,
output);
return;
}
template
CUDA_LIB_EXPORT void CalCopyUpToLow<float>(const size_t size, float *input, const int64_t rank,
float *output, const uint32_t &device_id, cudaStream_t cuda_stream);
template
CUDA_LIB_EXPORT void CalCopyUpToLow<double>(const size_t size, double *input, const int64_t rank,
double *output, const uint32_t &device_id, cudaStream_t cuda_stream);
template
CUDA_LIB_EXPORT void CalCopyLowToUp<float>(const size_t size, float *input, const int64_t rank,
float *output, const uint32_t &device_id, cudaStream_t cuda_stream);
template
CUDA_LIB_EXPORT void CalCopyLowToUp<double>(const size_t size, double *input, const int64_t rank,
double *output, const uint32_t &device_id, cudaStream_t cuda_stream);
|
decf5cdadd38e8bf2bdb30a4b10eb0a2df3b1ab1.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "generate_binID.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *dIn = NULL;
hipMalloc(&dIn, XSIZE*YSIZE);
int *out = NULL;
hipMalloc(&out, XSIZE*YSIZE);
const int binNumber = 1;
const float lumMin = 1;
const float lumMax = 1;
const int size = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
generate_binID), dim3(gridBlock),dim3(threadBlock), 0, 0, dIn,out,binNumber,lumMin,lumMax,size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
generate_binID), dim3(gridBlock),dim3(threadBlock), 0, 0, dIn,out,binNumber,lumMin,lumMax,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
generate_binID), dim3(gridBlock),dim3(threadBlock), 0, 0, dIn,out,binNumber,lumMin,lumMax,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | decf5cdadd38e8bf2bdb30a4b10eb0a2df3b1ab1.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "generate_binID.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *dIn = NULL;
cudaMalloc(&dIn, XSIZE*YSIZE);
int *out = NULL;
cudaMalloc(&out, XSIZE*YSIZE);
const int binNumber = 1;
const float lumMin = 1;
const float lumMax = 1;
const int size = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
generate_binID<<<gridBlock,threadBlock>>>(dIn,out,binNumber,lumMin,lumMax,size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
generate_binID<<<gridBlock,threadBlock>>>(dIn,out,binNumber,lumMin,lumMax,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
generate_binID<<<gridBlock,threadBlock>>>(dIn,out,binNumber,lumMin,lumMax,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
60aa11adb7583affbbc491cec102e3b15b6fe9ec.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "sumScore.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *score = NULL;
hipMalloc(&score, XSIZE*YSIZE);
int full_size = XSIZE*YSIZE;
int half_size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
sumScore), dim3(gridBlock),dim3(threadBlock), 0, 0, score,full_size,half_size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
sumScore), dim3(gridBlock),dim3(threadBlock), 0, 0, score,full_size,half_size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
sumScore), dim3(gridBlock),dim3(threadBlock), 0, 0, score,full_size,half_size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 60aa11adb7583affbbc491cec102e3b15b6fe9ec.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "sumScore.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *score = NULL;
cudaMalloc(&score, XSIZE*YSIZE);
int full_size = XSIZE*YSIZE;
int half_size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
sumScore<<<gridBlock,threadBlock>>>(score,full_size,half_size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
sumScore<<<gridBlock,threadBlock>>>(score,full_size,half_size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
sumScore<<<gridBlock,threadBlock>>>(score,full_size,half_size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
bbc60c52b8d66896ff00877227fbc874f2b38fc3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Udacity Homework 3
HDR Tone-mapping
Background HDR
==============
A High Definition Range (HDR) image contains a wider variation of intensity
and color than is allowed by the RGB format with 1 byte per channel that we
have used in the previous assignment.
To store this extra information we use single precision floating point for
each channel. This allows for an extremely wide range of intensity values.
In the image for this assignment, the inside of church with light coming in
through stained glass windows, the raw input floating point values for the
channels range from 0 to 275. But the mean is .41 and 98% of the values are
less than 3! This means that certain areas (the windows) are extremely bright
compared to everywhere else. If we linearly map this [0-275] range into the
[0-255] range that we have been using then most values will be mapped to zero!
The only thing we will be able to see are the very brightest areas - the
windows - everything else will appear pitch black.
The problem is that although we have cameras capable of recording the wide
range of intensity that exists in the real world our monitors are not capable
of displaying them. Our eyes are also quite capable of observing a much wider
range of intensities than our image formats / monitors are capable of
displaying.
Tone-mapping is a process that transforms the intensities in the image so that
the brightest values aren't nearly so far away from the mean. That way when
we transform the values into [0-255] we can actually see the entire image.
There are many ways to perform this process and it is as much an art as a
science - there is no single "right" answer. In this homework we will
implement one possible technique.
Background Chrominance-Luminance
================================
The RGB space that we have been using to represent images can be thought of as
one possible set of axes spanning a three dimensional space of color. We
sometimes choose other axes to represent this space because they make certain
operations more convenient.
Another possible way of representing a color image is to separate the color
information (chromaticity) from the brightness information. There are
multiple different methods for doing this - a common one during the analog
television days was known as Chrominance-Luminance or YUV.
We choose to represent the image in this way so that we can remap only the
intensity channel and then recombine the new intensity values with the color
information to form the final image.
Old TV signals used to be transmitted in this way so that black & white
televisions could display the luminance channel while color televisions would
display all three of the channels.
Tone-mapping
============
In this assignment we are going to transform the luminance channel (actually
the log of the luminance, but this is unimportant for the parts of the
algorithm that you will be implementing) by compressing its range to [0, 1].
To do this we need the cumulative distribution of the luminance values.
Example
-------
input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2]
min / max / range: 0 / 9 / 9
histo with 3 bins: [4 7 3]
cdf : [4 11 14]
Your task is to calculate this cumulative distribution by following these
steps.
*/
#include "utils.h"
using namespace std;
__device__ float atomicMaxFloat(float* addr, float val) {
int *addrAsInt = (int *)addr;
int old = *addrAsInt;
while(val > __int_as_float(old)) {
old = atomicCAS(addrAsInt, old, __float_as_int(val));
}
return __int_as_float(old);
}
__device__ float atomicMinFloat(float* addr, float val) {
int *addrAsInt = (int *) addr;
int old = *addrAsInt;
while(val < __int_as_float(old)) {
old = atomicCAS(addrAsInt, old, __float_as_int(val));
}
return __int_as_float(old);
}
__global__ void max_reduce(const float* const d_lum, float* d_max) {
extern __shared__ float shared[];
int tid = threadIdx.x;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
shared[tid] = d_lum[gid];
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s >>=1) {
if (tid < s)
shared[tid] = max(shared[tid], shared[tid+s]);
__syncthreads();
}
if (tid == 0)
atomicMaxFloat(d_max, shared[tid]);
}
__global__ void min_reduce(const float* const d_lum, float* d_min) {
extern __shared__ float shared[];
int tid = threadIdx.x;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
shared[tid] = d_lum[gid];
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s)
shared[tid] = min(shared[tid], shared[tid+s]);
__syncthreads();
}
if (tid == 0)
atomicMinFloat(d_min, shared[tid]);
}
__global__ void calcHisto(unsigned int *d_histo, const float* const d_lum, float llmin, float llRange, size_t numBins) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int bin = min(static_cast<unsigned int>(numBins-1),
static_cast<unsigned int>((d_lum[gid]-llmin)/llRange * numBins));
atomicAdd(&d_histo[bin], 1);
}
// ATTENTION: The scan below is wrong!!! I don't know why
__global__ void scan_wrong(unsigned int *d_histo, unsigned int* const d_cdf, const size_t numBins) {
extern __shared__ unsigned int tmp[];
int tid = threadIdx.x;
tmp[tid] = tid > 0 ? d_histo[tid-1] : 0;
__syncthreads();
for (int offset = 1; offset < numBins; offset <<= 1) {
if (tid >= offset) {
tmp[tid] += tmp[tid - offset];
}
__syncthreads();
}
d_cdf[tid] = tmp[tid];
}
// The scan below is corrent --> This scan can only be used in one block, thus
// the number of threads is limited to 512 or 1024
__global__ void scan_correct(unsigned int *d_in, unsigned int *d_out, int n)
{
extern __shared__ unsigned int tmp[];
int tid = threadIdx.x;
int pout = 0;
int pin = 1;
tmp[pout*n + tid] = (tid > 0) ? d_in[tid-1] : 0;
for (int offset = 1; offset < n; offset <<= 1)
{
pout = 1 - pout;
pin = 1 - pout;
__syncthreads();
tmp[pout*n+tid] = tmp[pin*n+tid];
if (tid >= offset)
tmp[pout*n+tid] += tmp[pin*n+tid - offset];
}
// __syncthreads();
d_out[tid] = tmp[pout*n+tid];
}
void your_histogram_and_prefixsum(const float* const d_logLuminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins)
{
//TODO
/*Here are the steps you need to implement
1) find the minimum and maximum value in the input logLuminance channel
store in min_logLum and max_logLum
2) subtract them to find the range
3) generate a histogram of all the values in the logLuminance channel using
the formula: bin = (lum[i] - lumMin) / lumRange * numBins
4) Perform an exclusive scan (prefix sum) on the histogram to get
the cumulative distribution of luminance values (this should go in the
incoming d_cdf pointer which already has been allocated for you) */
// std::cout << "numRows = " << numRows << ", numCols = " << numCols << std::endl;
// std::cout << "numBins = " << numBins << std::endl;
const dim3 blkDim(numCols, 1, 1);
const dim3 grdDim(numRows, 1, 1);
// step 1
float *d_max, *d_min;
size_t fsize = sizeof(float);
checkCudaErrors(hipMalloc(&d_max, fsize));
checkCudaErrors(hipMalloc(&d_min, fsize));
checkCudaErrors(hipMemcpy(d_max, d_logLuminance, fsize, hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemcpy(d_min, d_logLuminance, fsize, hipMemcpyDeviceToDevice));
// int numEle = numRows * numCols;
// launch cuda kernel max_reduce and min_reduce
hipLaunchKernelGGL(( max_reduce), dim3(grdDim), dim3(blkDim), fsize*numCols, 0, d_logLuminance, d_max);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( min_reduce), dim3(grdDim), dim3(blkDim), fsize*numCols, 0, d_logLuminance, d_min);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
float h_max, h_min;
checkCudaErrors(hipMemcpy(&h_max, d_max, fsize, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(&h_min, d_min, fsize, hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(d_max));
checkCudaErrors(hipFree(d_min));
min_logLum = h_min;
max_logLum = h_max;
// step 2
float llRange = h_max - h_min;
// step 3
size_t histo_size = sizeof(unsigned int) * numBins;
unsigned int *d_histo;
checkCudaErrors(hipMalloc(&d_histo, histo_size));
checkCudaErrors(hipMemset(d_histo, 0, histo_size));
// launch cuda kernel to calculate histogram
hipLaunchKernelGGL(( calcHisto), dim3(grdDim), dim3(blkDim), 0, 0, d_histo, d_logLuminance, h_min, llRange, numBins);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// step 4 Attention: numBins is 512 or 1204 at most
//scan_wrong<<<1, numBins, 2*numBins*sizeof(unsigned int)>>>(d_histo, d_cdf, numBins);
hipLaunchKernelGGL(( scan_correct), dim3(1), dim3(numBins), 2*numBins*sizeof(unsigned int), 0, d_histo, d_cdf, numBins);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
checkCudaErrors(hipFree(d_histo));
}
| bbc60c52b8d66896ff00877227fbc874f2b38fc3.cu | /* Udacity Homework 3
HDR Tone-mapping
Background HDR
==============
A High Definition Range (HDR) image contains a wider variation of intensity
and color than is allowed by the RGB format with 1 byte per channel that we
have used in the previous assignment.
To store this extra information we use single precision floating point for
each channel. This allows for an extremely wide range of intensity values.
In the image for this assignment, the inside of church with light coming in
through stained glass windows, the raw input floating point values for the
channels range from 0 to 275. But the mean is .41 and 98% of the values are
less than 3! This means that certain areas (the windows) are extremely bright
compared to everywhere else. If we linearly map this [0-275] range into the
[0-255] range that we have been using then most values will be mapped to zero!
The only thing we will be able to see are the very brightest areas - the
windows - everything else will appear pitch black.
The problem is that although we have cameras capable of recording the wide
range of intensity that exists in the real world our monitors are not capable
of displaying them. Our eyes are also quite capable of observing a much wider
range of intensities than our image formats / monitors are capable of
displaying.
Tone-mapping is a process that transforms the intensities in the image so that
the brightest values aren't nearly so far away from the mean. That way when
we transform the values into [0-255] we can actually see the entire image.
There are many ways to perform this process and it is as much an art as a
science - there is no single "right" answer. In this homework we will
implement one possible technique.
Background Chrominance-Luminance
================================
The RGB space that we have been using to represent images can be thought of as
one possible set of axes spanning a three dimensional space of color. We
sometimes choose other axes to represent this space because they make certain
operations more convenient.
Another possible way of representing a color image is to separate the color
information (chromaticity) from the brightness information. There are
multiple different methods for doing this - a common one during the analog
television days was known as Chrominance-Luminance or YUV.
We choose to represent the image in this way so that we can remap only the
intensity channel and then recombine the new intensity values with the color
information to form the final image.
Old TV signals used to be transmitted in this way so that black & white
televisions could display the luminance channel while color televisions would
display all three of the channels.
Tone-mapping
============
In this assignment we are going to transform the luminance channel (actually
the log of the luminance, but this is unimportant for the parts of the
algorithm that you will be implementing) by compressing its range to [0, 1].
To do this we need the cumulative distribution of the luminance values.
Example
-------
input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2]
min / max / range: 0 / 9 / 9
histo with 3 bins: [4 7 3]
cdf : [4 11 14]
Your task is to calculate this cumulative distribution by following these
steps.
*/
#include "utils.h"
using namespace std;
__device__ float atomicMaxFloat(float* addr, float val) {
int *addrAsInt = (int *)addr;
int old = *addrAsInt;
while(val > __int_as_float(old)) {
old = atomicCAS(addrAsInt, old, __float_as_int(val));
}
return __int_as_float(old);
}
__device__ float atomicMinFloat(float* addr, float val) {
int *addrAsInt = (int *) addr;
int old = *addrAsInt;
while(val < __int_as_float(old)) {
old = atomicCAS(addrAsInt, old, __float_as_int(val));
}
return __int_as_float(old);
}
__global__ void max_reduce(const float* const d_lum, float* d_max) {
extern __shared__ float shared[];
int tid = threadIdx.x;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
shared[tid] = d_lum[gid];
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s >>=1) {
if (tid < s)
shared[tid] = max(shared[tid], shared[tid+s]);
__syncthreads();
}
if (tid == 0)
atomicMaxFloat(d_max, shared[tid]);
}
__global__ void min_reduce(const float* const d_lum, float* d_min) {
extern __shared__ float shared[];
int tid = threadIdx.x;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
shared[tid] = d_lum[gid];
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s)
shared[tid] = min(shared[tid], shared[tid+s]);
__syncthreads();
}
if (tid == 0)
atomicMinFloat(d_min, shared[tid]);
}
__global__ void calcHisto(unsigned int *d_histo, const float* const d_lum, float llmin, float llRange, size_t numBins) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int bin = min(static_cast<unsigned int>(numBins-1),
static_cast<unsigned int>((d_lum[gid]-llmin)/llRange * numBins));
atomicAdd(&d_histo[bin], 1);
}
// ATTENTION: The scan below is wrong!!! I don't know why
__global__ void scan_wrong(unsigned int *d_histo, unsigned int* const d_cdf, const size_t numBins) {
extern __shared__ unsigned int tmp[];
int tid = threadIdx.x;
tmp[tid] = tid > 0 ? d_histo[tid-1] : 0;
__syncthreads();
for (int offset = 1; offset < numBins; offset <<= 1) {
if (tid >= offset) {
tmp[tid] += tmp[tid - offset];
}
__syncthreads();
}
d_cdf[tid] = tmp[tid];
}
// The scan below is corrent --> This scan can only be used in one block, thus
// the number of threads is limited to 512 or 1024
__global__ void scan_correct(unsigned int *d_in, unsigned int *d_out, int n)
{
extern __shared__ unsigned int tmp[];
int tid = threadIdx.x;
int pout = 0;
int pin = 1;
tmp[pout*n + tid] = (tid > 0) ? d_in[tid-1] : 0;
for (int offset = 1; offset < n; offset <<= 1)
{
pout = 1 - pout;
pin = 1 - pout;
__syncthreads();
tmp[pout*n+tid] = tmp[pin*n+tid];
if (tid >= offset)
tmp[pout*n+tid] += tmp[pin*n+tid - offset];
}
// __syncthreads();
d_out[tid] = tmp[pout*n+tid];
}
void your_histogram_and_prefixsum(const float* const d_logLuminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins)
{
//TODO
/*Here are the steps you need to implement
1) find the minimum and maximum value in the input logLuminance channel
store in min_logLum and max_logLum
2) subtract them to find the range
3) generate a histogram of all the values in the logLuminance channel using
the formula: bin = (lum[i] - lumMin) / lumRange * numBins
4) Perform an exclusive scan (prefix sum) on the histogram to get
the cumulative distribution of luminance values (this should go in the
incoming d_cdf pointer which already has been allocated for you) */
// std::cout << "numRows = " << numRows << ", numCols = " << numCols << std::endl;
// std::cout << "numBins = " << numBins << std::endl;
const dim3 blkDim(numCols, 1, 1);
const dim3 grdDim(numRows, 1, 1);
// step 1
float *d_max, *d_min;
size_t fsize = sizeof(float);
checkCudaErrors(cudaMalloc(&d_max, fsize));
checkCudaErrors(cudaMalloc(&d_min, fsize));
checkCudaErrors(cudaMemcpy(d_max, d_logLuminance, fsize, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(d_min, d_logLuminance, fsize, cudaMemcpyDeviceToDevice));
// int numEle = numRows * numCols;
// launch cuda kernel max_reduce and min_reduce
max_reduce<<<grdDim, blkDim, fsize*numCols>>>(d_logLuminance, d_max);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
min_reduce<<<grdDim, blkDim, fsize*numCols>>>(d_logLuminance, d_min);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
float h_max, h_min;
checkCudaErrors(cudaMemcpy(&h_max, d_max, fsize, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(&h_min, d_min, fsize, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_max));
checkCudaErrors(cudaFree(d_min));
min_logLum = h_min;
max_logLum = h_max;
// step 2
float llRange = h_max - h_min;
// step 3
size_t histo_size = sizeof(unsigned int) * numBins;
unsigned int *d_histo;
checkCudaErrors(cudaMalloc(&d_histo, histo_size));
checkCudaErrors(cudaMemset(d_histo, 0, histo_size));
// launch cuda kernel to calculate histogram
calcHisto<<<grdDim, blkDim>>>(d_histo, d_logLuminance, h_min, llRange, numBins);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// step 4 Attention: numBins is 512 or 1204 at most
//scan_wrong<<<1, numBins, 2*numBins*sizeof(unsigned int)>>>(d_histo, d_cdf, numBins);
scan_correct<<<1, numBins, 2*numBins*sizeof(unsigned int)>>>(d_histo, d_cdf, numBins);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaFree(d_histo));
}
|
e7f14d718179746ab6c86276db8708983925f6dd.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright (c) 2020 by Contributors
* \file array/cuda/sddmm.cu
* \brief SDDMM C APIs and definitions.
*/
#include <dgl/array.h>
#include "./sddmm.cuh"
#include "functor.cuh"
namespace dgl {
namespace aten {
#define SWITCH_OP(op, Op, ...) \
do { \
if ((op) == "add") { \
typedef cuda::binary::Add<DType> Op; \
{ __VA_ARGS__ } \
} else if ((op) == "sub") { \
typedef cuda::binary::Sub<DType> Op; \
{ __VA_ARGS__ } \
} else if ((op) == "mul") { \
typedef cuda::binary::Mul<DType> Op; \
{ __VA_ARGS__ } \
} else if ((op) == "div") { \
typedef cuda::binary::Div<DType> Op; \
{ __VA_ARGS__ } \
} else if ((op) == "copy_lhs") { \
typedef cuda::binary::CopyLhs<DType> Op; \
{ __VA_ARGS__ } \
} else if ((op) == "copy_rhs") { \
typedef cuda::binary::CopyRhs<DType> Op; \
{ __VA_ARGS__ } \
} else if ((op) == "dot") { \
typedef cuda::binary::Dot<DType> Op; \
{ __VA_ARGS__ } \
} else { \
LOG(FATAL) << "Unsupported SpMM/SDDMM binary operator: " << op; \
} \
} while (0)
#define SWITCH_RHS(rhs_target, RhsTarget, ...) \
do { \
if ((rhs_target) == 0) { \
constexpr int RhsTarget = 0; \
{ __VA_ARGS__ } \
} else if ((rhs_target) == 1) { \
constexpr int RhsTarget = 1; \
{ __VA_ARGS__ } \
} else if ((rhs_target) == 2) { \
constexpr int RhsTarget = 2; \
{ __VA_ARGS__ } \
} else { \
LOG(INFO) << "Invalid rhs target: " << (rhs_target); \
} \
} while (0)
#define SWITCH_TARGET(lhs_target, rhs_target, LhsTarget, RhsTarget, ...)\
do { \
if ((lhs_target) == 0) { \
constexpr int LhsTarget = 0; \
SWITCH_RHS(rhs_target, RhsTarget, __VA_ARGS__); \
} else if ((lhs_target) == 1) { \
constexpr int LhsTarget = 1; \
SWITCH_RHS(rhs_target, RhsTarget, __VA_ARGS__); \
} else if ((lhs_target) == 2) { \
constexpr int LhsTarget = 2; \
SWITCH_RHS(rhs_target, RhsTarget, __VA_ARGS__); \
} else { \
LOG(INFO) << "Invalid lhs target: " << (lhs_target); \
} \
} while (0)
/*!
* \brief CUDA implementation of g-SDDMM on Csr format.
*/
template <int XPU, typename IdType, int bits>
void SDDMMCsr(const std::string& op,
const BcastOff& bcast,
const CSRMatrix& csr,
NDArray lhs,
NDArray rhs,
NDArray out,
int lhs_target,
int rhs_target) {
SWITCH_BITS(bits, DType, {
SWITCH_OP(op, Op, {
SWITCH_TARGET(lhs_target, rhs_target, LhsTarget, RhsTarget, {
cuda::SDDMMCsr<IdType, DType, Op, LhsTarget, RhsTarget>(bcast, csr, lhs, rhs, out);
});
});
});
}
/*!
* \brief CUDA implementation of g-SDDMM on Coo format.
*/
template <int XPU, typename IdType, int bits>
void SDDMMCoo(const std::string& op,
const BcastOff& bcast,
const COOMatrix& coo,
NDArray lhs,
NDArray rhs,
NDArray out,
int lhs_target,
int rhs_target) {
SWITCH_BITS(bits, DType, {
SWITCH_OP(op, Op, {
SWITCH_TARGET(lhs_target, rhs_target, LhsTarget, RhsTarget, {
cuda::SDDMMCoo<IdType, DType, Op, LhsTarget, RhsTarget>(bcast, coo, lhs, rhs, out);
});
});
});
}
template void SDDMMCsr<kDLGPU, int32_t, 16>(
const std::string& op, const BcastOff& bcast, const CSRMatrix& csr,
NDArray lhs, NDArray rhs, NDArray out,
int lhs_target, int rhs_target);
template void SDDMMCsr<kDLGPU, int64_t, 16>(
const std::string& op, const BcastOff& bcast, const CSRMatrix& csr,
NDArray lhs, NDArray rhs, NDArray out,
int lhs_target, int rhs_target);
template void SDDMMCsr<kDLGPU, int32_t, 32>(
const std::string& op, const BcastOff& bcast, const CSRMatrix& csr,
NDArray lhs, NDArray rhs, NDArray out,
int lhs_target, int rhs_target);
template void SDDMMCsr<kDLGPU, int64_t, 32>(
const std::string& op, const BcastOff& bcast, const CSRMatrix& csr,
NDArray lhs, NDArray rhs, NDArray out,
int lhs_target, int rhs_target);
template void SDDMMCsr<kDLGPU, int32_t, 64>(
const std::string& op, const BcastOff& bcast, const CSRMatrix& csr,
NDArray lhs, NDArray rhs, NDArray out,
int lhs_target, int rhs_target);
template void SDDMMCsr<kDLGPU, int64_t, 64>(
const std::string& op, const BcastOff& bcast, const CSRMatrix& csr,
NDArray lhs, NDArray rhs, NDArray out,
int lhs_target, int rhs_target);
template void SDDMMCoo<kDLGPU, int32_t, 16>(
const std::string& op, const BcastOff& bcast, const COOMatrix& coo,
NDArray lhs, NDArray rhs, NDArray out,
int lhs_target, int rhs_target);
template void SDDMMCoo<kDLGPU, int64_t, 16>(
const std::string& op, const BcastOff& bcast, const COOMatrix& coo,
NDArray lhs, NDArray rhs, NDArray out,
int lhs_target, int rhs_target);
template void SDDMMCoo<kDLGPU, int32_t, 32>(
const std::string& op, const BcastOff& bcast, const COOMatrix& coo,
NDArray lhs, NDArray rhs, NDArray out,
int lhs_target, int rhs_target);
template void SDDMMCoo<kDLGPU, int64_t, 32>(
const std::string& op, const BcastOff& bcast, const COOMatrix& coo,
NDArray lhs, NDArray rhs, NDArray out,
int lhs_target, int rhs_target);
template void SDDMMCoo<kDLGPU, int32_t, 64>(
const std::string& op, const BcastOff& bcast, const COOMatrix& coo,
NDArray lhs, NDArray rhs, NDArray out,
int lhs_target, int rhs_target);
template void SDDMMCoo<kDLGPU, int64_t, 64>(
const std::string& op, const BcastOff& bcast, const COOMatrix& coo,
NDArray lhs, NDArray rhs, NDArray out,
int lhs_target, int rhs_target);
} // namespace aten
} // namespace dgl
| e7f14d718179746ab6c86276db8708983925f6dd.cu | /*!
* Copyright (c) 2020 by Contributors
* \file array/cuda/sddmm.cu
* \brief SDDMM C APIs and definitions.
*/
#include <dgl/array.h>
#include "./sddmm.cuh"
#include "./functor.cuh"
namespace dgl {
namespace aten {
#define SWITCH_OP(op, Op, ...) \
do { \
if ((op) == "add") { \
typedef cuda::binary::Add<DType> Op; \
{ __VA_ARGS__ } \
} else if ((op) == "sub") { \
typedef cuda::binary::Sub<DType> Op; \
{ __VA_ARGS__ } \
} else if ((op) == "mul") { \
typedef cuda::binary::Mul<DType> Op; \
{ __VA_ARGS__ } \
} else if ((op) == "div") { \
typedef cuda::binary::Div<DType> Op; \
{ __VA_ARGS__ } \
} else if ((op) == "copy_lhs") { \
typedef cuda::binary::CopyLhs<DType> Op; \
{ __VA_ARGS__ } \
} else if ((op) == "copy_rhs") { \
typedef cuda::binary::CopyRhs<DType> Op; \
{ __VA_ARGS__ } \
} else if ((op) == "dot") { \
typedef cuda::binary::Dot<DType> Op; \
{ __VA_ARGS__ } \
} else { \
LOG(FATAL) << "Unsupported SpMM/SDDMM binary operator: " << op; \
} \
} while (0)
#define SWITCH_RHS(rhs_target, RhsTarget, ...) \
do { \
if ((rhs_target) == 0) { \
constexpr int RhsTarget = 0; \
{ __VA_ARGS__ } \
} else if ((rhs_target) == 1) { \
constexpr int RhsTarget = 1; \
{ __VA_ARGS__ } \
} else if ((rhs_target) == 2) { \
constexpr int RhsTarget = 2; \
{ __VA_ARGS__ } \
} else { \
LOG(INFO) << "Invalid rhs target: " << (rhs_target); \
} \
} while (0)
#define SWITCH_TARGET(lhs_target, rhs_target, LhsTarget, RhsTarget, ...)\
do { \
if ((lhs_target) == 0) { \
constexpr int LhsTarget = 0; \
SWITCH_RHS(rhs_target, RhsTarget, __VA_ARGS__); \
} else if ((lhs_target) == 1) { \
constexpr int LhsTarget = 1; \
SWITCH_RHS(rhs_target, RhsTarget, __VA_ARGS__); \
} else if ((lhs_target) == 2) { \
constexpr int LhsTarget = 2; \
SWITCH_RHS(rhs_target, RhsTarget, __VA_ARGS__); \
} else { \
LOG(INFO) << "Invalid lhs target: " << (lhs_target); \
} \
} while (0)
/*!
* \brief CUDA implementation of g-SDDMM on Csr format.
*/
template <int XPU, typename IdType, int bits>
void SDDMMCsr(const std::string& op,
const BcastOff& bcast,
const CSRMatrix& csr,
NDArray lhs,
NDArray rhs,
NDArray out,
int lhs_target,
int rhs_target) {
SWITCH_BITS(bits, DType, {
SWITCH_OP(op, Op, {
SWITCH_TARGET(lhs_target, rhs_target, LhsTarget, RhsTarget, {
cuda::SDDMMCsr<IdType, DType, Op, LhsTarget, RhsTarget>(bcast, csr, lhs, rhs, out);
});
});
});
}
/*!
* \brief CUDA implementation of g-SDDMM on Coo format.
*/
template <int XPU, typename IdType, int bits>
void SDDMMCoo(const std::string& op,
const BcastOff& bcast,
const COOMatrix& coo,
NDArray lhs,
NDArray rhs,
NDArray out,
int lhs_target,
int rhs_target) {
SWITCH_BITS(bits, DType, {
SWITCH_OP(op, Op, {
SWITCH_TARGET(lhs_target, rhs_target, LhsTarget, RhsTarget, {
cuda::SDDMMCoo<IdType, DType, Op, LhsTarget, RhsTarget>(bcast, coo, lhs, rhs, out);
});
});
});
}
template void SDDMMCsr<kDLGPU, int32_t, 16>(
const std::string& op, const BcastOff& bcast, const CSRMatrix& csr,
NDArray lhs, NDArray rhs, NDArray out,
int lhs_target, int rhs_target);
template void SDDMMCsr<kDLGPU, int64_t, 16>(
const std::string& op, const BcastOff& bcast, const CSRMatrix& csr,
NDArray lhs, NDArray rhs, NDArray out,
int lhs_target, int rhs_target);
template void SDDMMCsr<kDLGPU, int32_t, 32>(
const std::string& op, const BcastOff& bcast, const CSRMatrix& csr,
NDArray lhs, NDArray rhs, NDArray out,
int lhs_target, int rhs_target);
template void SDDMMCsr<kDLGPU, int64_t, 32>(
const std::string& op, const BcastOff& bcast, const CSRMatrix& csr,
NDArray lhs, NDArray rhs, NDArray out,
int lhs_target, int rhs_target);
template void SDDMMCsr<kDLGPU, int32_t, 64>(
const std::string& op, const BcastOff& bcast, const CSRMatrix& csr,
NDArray lhs, NDArray rhs, NDArray out,
int lhs_target, int rhs_target);
template void SDDMMCsr<kDLGPU, int64_t, 64>(
const std::string& op, const BcastOff& bcast, const CSRMatrix& csr,
NDArray lhs, NDArray rhs, NDArray out,
int lhs_target, int rhs_target);
template void SDDMMCoo<kDLGPU, int32_t, 16>(
const std::string& op, const BcastOff& bcast, const COOMatrix& coo,
NDArray lhs, NDArray rhs, NDArray out,
int lhs_target, int rhs_target);
template void SDDMMCoo<kDLGPU, int64_t, 16>(
const std::string& op, const BcastOff& bcast, const COOMatrix& coo,
NDArray lhs, NDArray rhs, NDArray out,
int lhs_target, int rhs_target);
template void SDDMMCoo<kDLGPU, int32_t, 32>(
const std::string& op, const BcastOff& bcast, const COOMatrix& coo,
NDArray lhs, NDArray rhs, NDArray out,
int lhs_target, int rhs_target);
template void SDDMMCoo<kDLGPU, int64_t, 32>(
const std::string& op, const BcastOff& bcast, const COOMatrix& coo,
NDArray lhs, NDArray rhs, NDArray out,
int lhs_target, int rhs_target);
template void SDDMMCoo<kDLGPU, int32_t, 64>(
const std::string& op, const BcastOff& bcast, const COOMatrix& coo,
NDArray lhs, NDArray rhs, NDArray out,
int lhs_target, int rhs_target);
template void SDDMMCoo<kDLGPU, int64_t, 64>(
const std::string& op, const BcastOff& bcast, const COOMatrix& coo,
NDArray lhs, NDArray rhs, NDArray out,
int lhs_target, int rhs_target);
} // namespace aten
} // namespace dgl
|
36f4ec1cbf0f7125351a20754de2cdfee0ead62c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <iostream>
#include <hip/hip_runtime_api.h>
//#include <cutil.h>
#include <hip/hip_runtime.h>
#include <string>
#define GPUJOULE_DIR ""
#define SHARED_MEM_ELEMENTS 1024
#define GLOBAL_MEM_ELEMENTS 4096
int num_blocks;
int num_threads_per_block;
int num_iterations;
int divergence;
float* h_A;
float* h_B;
float* h_C;
float* h_res;
float* d_A;
float* d_B;
float* d_C;
float* d_res;
__global__ void init_memory (unsigned long long ** my_ptr_array, unsigned long long * my_array, int stride, int num_blocks_k, int num_threads_per_block_k) {
int block_id;
int warp_id;
int i;
int index;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
void **ptr_array = (void **)my_ptr_array;
unsigned long long *array = (unsigned long long *)my_array;
if (tid == 0) {
// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
//int elements_per_warp = elements_per_block / num_warps_per_block;
int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
// for (block_id = 0; block_id < num_blocks_k; block_id++) {
for (warp_id = 0; warp_id < num_warps_per_block; warp_id++) {
for (i = 0; i < elements_per_warp; i++) {
//index = (block_id * elements_per_block) + (warp_id * elements_per_warp);
index = (warp_id * elements_per_warp);
ptr_array[index + i] = (void*)&array[(index + ((i + 16) % elements_per_warp))];
}
}
/* for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
ptr_array[i] = (void*)&array[(i + 32)%GLOBAL_MEM_ELEMENTS];
}
*/
for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
//array[i] = (unsigned long long)ptr_array[(i+stride)%GLOBAL_MEM_ELEMENTS];
array[i] = (unsigned long long)ptr_array[i];
}
}
__syncthreads();
}
__global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k) {
// unsigned long long int start_time, end_time;
unsigned long long int sum_time = 0;
int i, k;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int block_id = blockIdx.x;
int warp_id = threadIdx.x / 32;
int warp_thread_id = threadIdx.x % 32;
// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
// int elements_per_warp = elements_per_block / num_warps_per_block;
int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
//int index1 = (block_id * elements_per_block) + (warp_id * elements_per_warp) + warp_thread_id;
int index1 = (warp_id * elements_per_warp) + warp_thread_id;
void **ptr_array = (void **)my_ptr_array;
unsigned long long int *array = (unsigned long long int *)my_array;
void **tmp_ptr;
//tmp_ptr = (void *)sdata;
//tmp_ptr = (void **)(&(ptr_array[(threadIdx.x * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[(tid * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[index1]));
tmp_ptr = (void **)(&(array[index1]));
double f1, f2, f3;
f1 = 1.1;
f2 = 2.5;
if (warp_thread_id < divergence) {
/* __asm volatile (
".reg .f32 %r14;\n\t"
"mov.f32 %r14, 2.2;\n\t"
);
*/
for (int l = 0; l < iterations; l++) {
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
}
}
// __syncthreads();
// if ((blockDim.x * blockIdx.x + threadIdx.x) == 0)
duration[tid] = (unsigned long long)(*tmp_ptr) + (f1 * tid);
// __syncthreads();
}
void usage() {
std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl;
}
void parametric_measure_shared(int N, int iterations, int stride) {
hipProfilerStop();
int i;
unsigned long long int * h_a;
unsigned long long int * d_a;
unsigned long long ** h_ptr_a;
unsigned long long ** d_ptr_a;
unsigned long long * duration;
unsigned long long * latency;
hipError_t error_id;
/* allocate array on CPU */
h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N);
h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N);
latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks);
/* initialize array elements on CPU */
for (i = 0; i < N; i++) {
h_ptr_a[i] = (unsigned long long *)&h_a[i];
}
for (i = 0; i < N; i++) {
h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N];
}
/* allocate arrays on GPU */
hipMalloc ((void **) &d_a, sizeof(unsigned long long int) * N );
hipMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N );
hipMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks);
hipDeviceSynchronize ();
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 1 is %s\n", hipGetErrorString(error_id));
}
/* copy array elements from CPU to GPU */
hipMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, hipMemcpyHostToDevice);
hipMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, hipMemcpyHostToDevice);
hipMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyHostToDevice);
hipDeviceSynchronize ();
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 2 is %s\n", hipGetErrorString(error_id));
}
hipLaunchKernelGGL(( init_memory) , dim3(1), dim3(1), 0, 0, d_ptr_a, d_a, stride, num_blocks, num_threads_per_block);
hipDeviceSynchronize();
/* launch kernel*/
//dim3 Db = dim3(13);
//dim3 Dg = dim3(768,1,1);
//printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride);
// int sharedMemSize = sizeof(unsigned long long int) * N ;
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
std::string cmd = "GPUJOULE_DIR/nvml/example/power_monitor 5 > GPUJOULE_DIR/energy_model_ubench/energy_model_data/data_movement_energy/l1_cache/fadd_l1d_30_70_64p_asm_power.txt &";
std::system(cmd.c_str());
std::system("sleep 5");
hipEventRecord(start, 0);
hipProfilerStart();
hipFuncSetCacheConfig(shared_latency, hipFuncCachePreferL1);
//shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration);
//shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence);
hipLaunchKernelGGL(( shared_latency) , dim3(num_blocks), dim3(num_threads_per_block), 0, 0, d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block);
hipDeviceSynchronize();
///hipDeviceSynchronize ();
hipProfilerStop();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
std::system("killall power_monitor");
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 3 is %s\n", hipGetErrorString(error_id));
}
/* copy results from GPU to CPU */
hipMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, hipMemcpyDeviceToHost);
hipMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyDeviceToHost);
hipDeviceSynchronize ();
/* print results*/
unsigned long long max_dur = latency[0];
unsigned long long min_dur = latency[0];
unsigned long long avg_lat = latency[0];
for (int i = 1; i < num_threads_per_block * num_blocks; i++) {
avg_lat += latency[i];
if (latency[i] > max_dur) {
max_dur = latency[i];
} else if (latency[i] < min_dur) {
min_dur = latency[i];
}
}
// printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 256.0 *num_iterations)), (double)(min_dur/(256.0 * num_iterations)), (double)(max_dur/(256.0 * num_iterations)), time);
printf("%f\n", time);
/* free memory on GPU */
hipFree(d_a);
hipFree(d_ptr_a);
hipFree(duration);
hipDeviceSynchronize ();
/*free memory on CPU */
free(h_a);
free(h_ptr_a);
free(latency);
}
int main(int argc, char **argv)
{
int N;
if (argc != 6) {
usage();
exit(1);
}
num_blocks = atoi(argv[1]);
num_threads_per_block = atoi(argv[2]);
num_iterations = atoi(argv[3]);
divergence = atoi(argv[4]);
int stride = atoi(argv[5]);
N = GLOBAL_MEM_ELEMENTS;
parametric_measure_shared(N, 10, stride);
return 0;
}
| 36f4ec1cbf0f7125351a20754de2cdfee0ead62c.cu | #include <stdio.h>
#include <iostream>
#include <cuda_profiler_api.h>
//#include <cutil.h>
#include <cuda_runtime.h>
#include <string>
#define GPUJOULE_DIR ""
#define SHARED_MEM_ELEMENTS 1024
#define GLOBAL_MEM_ELEMENTS 4096
int num_blocks;
int num_threads_per_block;
int num_iterations;
int divergence;
float* h_A;
float* h_B;
float* h_C;
float* h_res;
float* d_A;
float* d_B;
float* d_C;
float* d_res;
__global__ void init_memory (unsigned long long ** my_ptr_array, unsigned long long * my_array, int stride, int num_blocks_k, int num_threads_per_block_k) {
int block_id;
int warp_id;
int i;
int index;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
void **ptr_array = (void **)my_ptr_array;
unsigned long long *array = (unsigned long long *)my_array;
if (tid == 0) {
// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
//int elements_per_warp = elements_per_block / num_warps_per_block;
int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
// for (block_id = 0; block_id < num_blocks_k; block_id++) {
for (warp_id = 0; warp_id < num_warps_per_block; warp_id++) {
for (i = 0; i < elements_per_warp; i++) {
//index = (block_id * elements_per_block) + (warp_id * elements_per_warp);
index = (warp_id * elements_per_warp);
ptr_array[index + i] = (void*)&array[(index + ((i + 16) % elements_per_warp))];
}
}
/* for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
ptr_array[i] = (void*)&array[(i + 32)%GLOBAL_MEM_ELEMENTS];
}
*/
for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
//array[i] = (unsigned long long)ptr_array[(i+stride)%GLOBAL_MEM_ELEMENTS];
array[i] = (unsigned long long)ptr_array[i];
}
}
__syncthreads();
}
__global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k) {
// unsigned long long int start_time, end_time;
unsigned long long int sum_time = 0;
int i, k;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int block_id = blockIdx.x;
int warp_id = threadIdx.x / 32;
int warp_thread_id = threadIdx.x % 32;
// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
// int elements_per_warp = elements_per_block / num_warps_per_block;
int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
//int index1 = (block_id * elements_per_block) + (warp_id * elements_per_warp) + warp_thread_id;
int index1 = (warp_id * elements_per_warp) + warp_thread_id;
void **ptr_array = (void **)my_ptr_array;
unsigned long long int *array = (unsigned long long int *)my_array;
void **tmp_ptr;
//tmp_ptr = (void *)sdata;
//tmp_ptr = (void **)(&(ptr_array[(threadIdx.x * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[(tid * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[index1]));
tmp_ptr = (void **)(&(array[index1]));
double f1, f2, f3;
f1 = 1.1;
f2 = 2.5;
if (warp_thread_id < divergence) {
/* __asm volatile (
".reg .f32 %r14;\n\t"
"mov.f32 %r14, 2.2;\n\t"
);
*/
for (int l = 0; l < iterations; l++) {
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
}
}
// __syncthreads();
// if ((blockDim.x * blockIdx.x + threadIdx.x) == 0)
duration[tid] = (unsigned long long)(*tmp_ptr) + (f1 * tid);
// __syncthreads();
}
void usage() {
std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl;
}
void parametric_measure_shared(int N, int iterations, int stride) {
cudaProfilerStop();
int i;
unsigned long long int * h_a;
unsigned long long int * d_a;
unsigned long long ** h_ptr_a;
unsigned long long ** d_ptr_a;
unsigned long long * duration;
unsigned long long * latency;
cudaError_t error_id;
/* allocate array on CPU */
h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N);
h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N);
latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks);
/* initialize array elements on CPU */
for (i = 0; i < N; i++) {
h_ptr_a[i] = (unsigned long long *)&h_a[i];
}
for (i = 0; i < N; i++) {
h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N];
}
/* allocate arrays on GPU */
cudaMalloc ((void **) &d_a, sizeof(unsigned long long int) * N );
cudaMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N );
cudaMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks);
cudaThreadSynchronize ();
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 1 is %s\n", cudaGetErrorString(error_id));
}
/* copy array elements from CPU to GPU */
cudaMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, cudaMemcpyHostToDevice);
cudaMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, cudaMemcpyHostToDevice);
cudaMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyHostToDevice);
cudaThreadSynchronize ();
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 2 is %s\n", cudaGetErrorString(error_id));
}
init_memory <<<1, 1>>>(d_ptr_a, d_a, stride, num_blocks, num_threads_per_block);
cudaDeviceSynchronize();
/* launch kernel*/
//dim3 Db = dim3(13);
//dim3 Dg = dim3(768,1,1);
//printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride);
// int sharedMemSize = sizeof(unsigned long long int) * N ;
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
std::string cmd = "GPUJOULE_DIR/nvml/example/power_monitor 5 > GPUJOULE_DIR/energy_model_ubench/energy_model_data/data_movement_energy/l1_cache/fadd_l1d_30_70_64p_asm_power.txt &";
std::system(cmd.c_str());
std::system("sleep 5");
cudaEventRecord(start, 0);
cudaProfilerStart();
cudaFuncSetCacheConfig(shared_latency, cudaFuncCachePreferL1);
//shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration);
//shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence);
shared_latency <<<num_blocks, num_threads_per_block>>>(d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block);
cudaDeviceSynchronize();
///cudaThreadSynchronize ();
cudaProfilerStop();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
std::system("killall power_monitor");
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 3 is %s\n", cudaGetErrorString(error_id));
}
/* copy results from GPU to CPU */
cudaMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, cudaMemcpyDeviceToHost);
cudaMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyDeviceToHost);
cudaThreadSynchronize ();
/* print results*/
unsigned long long max_dur = latency[0];
unsigned long long min_dur = latency[0];
unsigned long long avg_lat = latency[0];
for (int i = 1; i < num_threads_per_block * num_blocks; i++) {
avg_lat += latency[i];
if (latency[i] > max_dur) {
max_dur = latency[i];
} else if (latency[i] < min_dur) {
min_dur = latency[i];
}
}
// printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 256.0 *num_iterations)), (double)(min_dur/(256.0 * num_iterations)), (double)(max_dur/(256.0 * num_iterations)), time);
printf("%f\n", time);
/* free memory on GPU */
cudaFree(d_a);
cudaFree(d_ptr_a);
cudaFree(duration);
cudaThreadSynchronize ();
/*free memory on CPU */
free(h_a);
free(h_ptr_a);
free(latency);
}
int main(int argc, char **argv)
{
int N;
if (argc != 6) {
usage();
exit(1);
}
num_blocks = atoi(argv[1]);
num_threads_per_block = atoi(argv[2]);
num_iterations = atoi(argv[3]);
divergence = atoi(argv[4]);
int stride = atoi(argv[5]);
N = GLOBAL_MEM_ELEMENTS;
parametric_measure_shared(N, 10, stride);
return 0;
}
|
7981e47fb5316775807778b28f16f19b471fe131.hip | // !!! This is a file automatically generated by hipify!!!
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include "loops/scalar.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <op_boilerplate.h>
#include <helpers/TAD.h>
#include <types/types.h>
namespace functions {
namespace scalar {
}
} | 7981e47fb5316775807778b28f16f19b471fe131.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include "loops/scalar.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include <op_boilerplate.h>
#include <helpers/TAD.h>
#include <types/types.h>
namespace functions {
namespace scalar {
}
} |
8eb67909bd63411ba1cfa526d5765de689bcd927.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <accelerate_cuda.h>
extern "C" __global__ void map(const Int64* __restrict__ arrIn0_0, Int64* __restrict__ arrOut_0)
{
const int shapeSize = 1;
const int gridSize = blockDim.x * gridDim.x;
int ix;
for (ix = blockDim.x * blockIdx.x + threadIdx.x; ix < shapeSize; ix += gridSize) {
const Int64 x0 = arrIn0_0[ix];
arrOut_0[ix] = (Int64) 1 + x0;
}
}
| 8eb67909bd63411ba1cfa526d5765de689bcd927.cu | #include <accelerate_cuda.h>
extern "C" __global__ void map(const Int64* __restrict__ arrIn0_0, Int64* __restrict__ arrOut_0)
{
const int shapeSize = 1;
const int gridSize = blockDim.x * gridDim.x;
int ix;
for (ix = blockDim.x * blockIdx.x + threadIdx.x; ix < shapeSize; ix += gridSize) {
const Int64 x0 = arrIn0_0[ix];
arrOut_0[ix] = (Int64) 1 + x0;
}
}
|
18679f51f5d2bbed425a21e00b9aa43337b9c96b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <chrono>
#include <random>
#include <hip/hip_runtime.h>
#include "reference.h"
template <typename scalar_t, typename accscalar_t,
typename index_t, int NLL_LOSS_THREADS>
__global__
void nll_loss_forward_reduce2d_kernel(
scalar_t* __restrict__ output,
scalar_t* __restrict__ total_weight,
const scalar_t* __restrict__ input,
const index_t* __restrict__ target,
const scalar_t* __restrict__ weights,
bool size_average,
int64_t nframe,
int64_t kdim,
int64_t ignore_index)
{
__shared__ accscalar_t sm_inputs[NLL_LOSS_THREADS],
acc_weight[NLL_LOSS_THREADS];
int tid = threadIdx.x;
sm_inputs[tid] = static_cast<accscalar_t>(0);
acc_weight[tid] = static_cast<accscalar_t>(0);
for (int i = tid; i < nframe; i += NLL_LOSS_THREADS) {
index_t t = target[i];
if (t != ignore_index) {
scalar_t cur_weight =
weights != nullptr ? weights[t] : static_cast<scalar_t>(1);
sm_inputs[tid] -= input[i * kdim + t] * cur_weight;
acc_weight[tid] += cur_weight;
}
}
__syncthreads();
if (tid == 0) {
accscalar_t output_acc = 0;
accscalar_t total_weight_acc = 0;
for (int i = 0; i < NLL_LOSS_THREADS; ++i) {
output_acc += sm_inputs[i];
total_weight_acc += acc_weight[i];
}
*total_weight = static_cast<scalar_t>(total_weight_acc);
if (size_average) {
*output = static_cast<scalar_t>(output_acc / total_weight_acc);
} else {
*output = static_cast<scalar_t>(output_acc);
}
}
}
template <typename scalar_t, typename index_t, int GPU_THREADS>
void eval(const int64_t nframe,
const int64_t kdim,
const int64_t n_classes,
const bool size_average,
const int64_t ignore_index,
const scalar_t r_output,
const scalar_t r_total_weight,
scalar_t *h_input,
scalar_t *h_weights,
index_t *h_target,
const int repeat)
{
int64_t input_size = nframe * kdim * n_classes;
int64_t input_size_bytes = input_size * sizeof(scalar_t);
int64_t weights_size = nframe;
int64_t weights_size_bytes = weights_size * sizeof(scalar_t);
int64_t target_size = nframe;
int64_t target_size_bytes = target_size * sizeof(index_t);
int output_size_bytes = sizeof(scalar_t);
scalar_t h_output;
scalar_t h_total_weight;
scalar_t *d_output;
scalar_t *d_total_weight;
scalar_t *d_input;
index_t *d_target;
scalar_t *d_weights;
hipMalloc((void**)&d_input, input_size_bytes);
hipMemcpy(d_input, h_input, input_size_bytes, hipMemcpyHostToDevice);
hipMalloc((void**)&d_weights, weights_size_bytes);
hipMemcpy(d_weights, h_weights, weights_size_bytes, hipMemcpyHostToDevice);
hipMalloc((void**)&d_target, target_size_bytes);
hipMemcpy(d_target, h_target, target_size_bytes, hipMemcpyHostToDevice);
hipMalloc((void**)&d_total_weight, output_size_bytes);
hipMalloc((void**)&d_output, output_size_bytes);
dim3 grid (1);
dim3 block (GPU_THREADS);
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++) {
hipLaunchKernelGGL(( nll_loss_forward_reduce2d_kernel
<scalar_t, scalar_t, index_t, GPU_THREADS>)
, dim3(grid), dim3(block), 0, 0, d_output,
d_total_weight,
d_input,
d_target,
d_weights,
size_average,
nframe,
kdim,
ignore_index);
}
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("\nThread block size: %d\n", GPU_THREADS);
printf("Average execution time of nll loss forward reduce 2D kernel: %f (us)\n",
(time * 1e-3f) / repeat);
hipMemcpy(&h_output, d_output, output_size_bytes, hipMemcpyDeviceToHost);
hipMemcpy(&h_total_weight, d_total_weight, output_size_bytes, hipMemcpyDeviceToHost);
bool ok = true;
if (fabs(h_output - r_output) > 1e-1 ||
fabs(h_total_weight - r_total_weight) > 1e-1) {
printf("%f %f %f %f\n", (float)h_output, (float)r_output,
(float)h_total_weight, (float)r_total_weight);
ok = false;
}
printf("%s\n", ok ? "PASS" : "FAIL");
hipFree(d_output);
hipFree(d_total_weight);
hipFree(d_input);
hipFree(d_target);
hipFree(d_weights);
}
template <typename scalar_t, typename index_t>
void driver(char** argv) {
const int64_t nframe = atol(argv[1]);
const int64_t kdim = atol(argv[2]);
const int64_t n_classes = atol(argv[3]);
const int repeat = atoi(argv[4]);
const int64_t input_size = nframe * kdim * n_classes;
const int64_t input_size_bytes = input_size * sizeof(scalar_t);
const int64_t weights_size = nframe;
const int64_t weights_size_bytes = weights_size * sizeof(scalar_t);
const int64_t target_size = nframe;
const int64_t target_size_bytes = target_size * sizeof(index_t);
scalar_t *h_input = (scalar_t*) malloc (input_size_bytes);
scalar_t *h_weights = (scalar_t*) malloc (weights_size_bytes);
index_t *h_target = (index_t*) malloc (target_size_bytes);
std::default_random_engine g (123);
std::uniform_real_distribution<scalar_t> d1 (-1.f, 1.f);
std::uniform_int_distribution<index_t> d2 (0, n_classes-1);
printf("Initialization of input data may take a while..\n");
for (int64_t i = 0; i < input_size; i++)
h_input[i] = d1(g);
for (int64_t i = 0; i < weights_size; i++)
h_weights[i] = d1(g);
for (int64_t i = 0; i < target_size; i++)
h_target[i] = d2(g);
const bool size_average = true;
// the index may not necessarily be in the class range
const int64_t ignore_index = n_classes / 2;
// verify the loss function
scalar_t r_output;
scalar_t r_total_weight;
reference<scalar_t, scalar_t, index_t>(
&r_output, &r_total_weight,
h_input, h_target, h_weights,
size_average, nframe, kdim, ignore_index);
#define EVAL(nThreads) \
eval<scalar_t, index_t, nThreads>(nframe, kdim, n_classes, \
size_average, ignore_index, \
r_output, r_total_weight, \
h_input, h_weights, h_target, repeat)
EVAL(64);
EVAL(128);
EVAL(256);
EVAL(512);
EVAL(1024);
free(h_input);
free(h_target);
free(h_weights);
}
int main(int argc, char* argv[])
{
if (argc != 5) {
printf("Usage: %s <minibatch> <kdim> <classes> <repeat>\n", argv[0]);
return 1;
}
printf("=========== Data type is FP32 ==========\n");
driver<float, int>(argv);
return 0;
}
| 18679f51f5d2bbed425a21e00b9aa43337b9c96b.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <chrono>
#include <random>
#include <hip/hip_runtime.h>
#include "reference.h"
template <typename scalar_t, typename accscalar_t,
typename index_t, int NLL_LOSS_THREADS>
__global__
void nll_loss_forward_reduce2d_kernel(
scalar_t* __restrict__ output,
scalar_t* __restrict__ total_weight,
const scalar_t* __restrict__ input,
const index_t* __restrict__ target,
const scalar_t* __restrict__ weights,
bool size_average,
int64_t nframe,
int64_t kdim,
int64_t ignore_index)
{
__shared__ accscalar_t sm_inputs[NLL_LOSS_THREADS],
acc_weight[NLL_LOSS_THREADS];
int tid = threadIdx.x;
sm_inputs[tid] = static_cast<accscalar_t>(0);
acc_weight[tid] = static_cast<accscalar_t>(0);
for (int i = tid; i < nframe; i += NLL_LOSS_THREADS) {
index_t t = target[i];
if (t != ignore_index) {
scalar_t cur_weight =
weights != nullptr ? weights[t] : static_cast<scalar_t>(1);
sm_inputs[tid] -= input[i * kdim + t] * cur_weight;
acc_weight[tid] += cur_weight;
}
}
__syncthreads();
if (tid == 0) {
accscalar_t output_acc = 0;
accscalar_t total_weight_acc = 0;
for (int i = 0; i < NLL_LOSS_THREADS; ++i) {
output_acc += sm_inputs[i];
total_weight_acc += acc_weight[i];
}
*total_weight = static_cast<scalar_t>(total_weight_acc);
if (size_average) {
*output = static_cast<scalar_t>(output_acc / total_weight_acc);
} else {
*output = static_cast<scalar_t>(output_acc);
}
}
}
template <typename scalar_t, typename index_t, int GPU_THREADS>
void eval(const int64_t nframe,
const int64_t kdim,
const int64_t n_classes,
const bool size_average,
const int64_t ignore_index,
const scalar_t r_output,
const scalar_t r_total_weight,
scalar_t *h_input,
scalar_t *h_weights,
index_t *h_target,
const int repeat)
{
int64_t input_size = nframe * kdim * n_classes;
int64_t input_size_bytes = input_size * sizeof(scalar_t);
int64_t weights_size = nframe;
int64_t weights_size_bytes = weights_size * sizeof(scalar_t);
int64_t target_size = nframe;
int64_t target_size_bytes = target_size * sizeof(index_t);
int output_size_bytes = sizeof(scalar_t);
scalar_t h_output;
scalar_t h_total_weight;
scalar_t *d_output;
scalar_t *d_total_weight;
scalar_t *d_input;
index_t *d_target;
scalar_t *d_weights;
hipMalloc((void**)&d_input, input_size_bytes);
hipMemcpy(d_input, h_input, input_size_bytes, hipMemcpyHostToDevice);
hipMalloc((void**)&d_weights, weights_size_bytes);
hipMemcpy(d_weights, h_weights, weights_size_bytes, hipMemcpyHostToDevice);
hipMalloc((void**)&d_target, target_size_bytes);
hipMemcpy(d_target, h_target, target_size_bytes, hipMemcpyHostToDevice);
hipMalloc((void**)&d_total_weight, output_size_bytes);
hipMalloc((void**)&d_output, output_size_bytes);
dim3 grid (1);
dim3 block (GPU_THREADS);
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++) {
nll_loss_forward_reduce2d_kernel
<scalar_t, scalar_t, index_t, GPU_THREADS>
<<<grid, block>>>(d_output,
d_total_weight,
d_input,
d_target,
d_weights,
size_average,
nframe,
kdim,
ignore_index);
}
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("\nThread block size: %d\n", GPU_THREADS);
printf("Average execution time of nll loss forward reduce 2D kernel: %f (us)\n",
(time * 1e-3f) / repeat);
hipMemcpy(&h_output, d_output, output_size_bytes, hipMemcpyDeviceToHost);
hipMemcpy(&h_total_weight, d_total_weight, output_size_bytes, hipMemcpyDeviceToHost);
bool ok = true;
if (fabs(h_output - r_output) > 1e-1 ||
fabs(h_total_weight - r_total_weight) > 1e-1) {
printf("%f %f %f %f\n", (float)h_output, (float)r_output,
(float)h_total_weight, (float)r_total_weight);
ok = false;
}
printf("%s\n", ok ? "PASS" : "FAIL");
hipFree(d_output);
hipFree(d_total_weight);
hipFree(d_input);
hipFree(d_target);
hipFree(d_weights);
}
template <typename scalar_t, typename index_t>
void driver(char** argv) {
const int64_t nframe = atol(argv[1]);
const int64_t kdim = atol(argv[2]);
const int64_t n_classes = atol(argv[3]);
const int repeat = atoi(argv[4]);
const int64_t input_size = nframe * kdim * n_classes;
const int64_t input_size_bytes = input_size * sizeof(scalar_t);
const int64_t weights_size = nframe;
const int64_t weights_size_bytes = weights_size * sizeof(scalar_t);
const int64_t target_size = nframe;
const int64_t target_size_bytes = target_size * sizeof(index_t);
scalar_t *h_input = (scalar_t*) malloc (input_size_bytes);
scalar_t *h_weights = (scalar_t*) malloc (weights_size_bytes);
index_t *h_target = (index_t*) malloc (target_size_bytes);
std::default_random_engine g (123);
std::uniform_real_distribution<scalar_t> d1 (-1.f, 1.f);
std::uniform_int_distribution<index_t> d2 (0, n_classes-1);
printf("Initialization of input data may take a while..\n");
for (int64_t i = 0; i < input_size; i++)
h_input[i] = d1(g);
for (int64_t i = 0; i < weights_size; i++)
h_weights[i] = d1(g);
for (int64_t i = 0; i < target_size; i++)
h_target[i] = d2(g);
const bool size_average = true;
// the index may not necessarily be in the class range
const int64_t ignore_index = n_classes / 2;
// verify the loss function
scalar_t r_output;
scalar_t r_total_weight;
reference<scalar_t, scalar_t, index_t>(
&r_output, &r_total_weight,
h_input, h_target, h_weights,
size_average, nframe, kdim, ignore_index);
#define EVAL(nThreads) \
eval<scalar_t, index_t, nThreads>(nframe, kdim, n_classes, \
size_average, ignore_index, \
r_output, r_total_weight, \
h_input, h_weights, h_target, repeat)
EVAL(64);
EVAL(128);
EVAL(256);
EVAL(512);
EVAL(1024);
free(h_input);
free(h_target);
free(h_weights);
}
int main(int argc, char* argv[])
{
if (argc != 5) {
printf("Usage: %s <minibatch> <kdim> <classes> <repeat>\n", argv[0]);
return 1;
}
printf("=========== Data type is FP32 ==========\n");
driver<float, int>(argv);
return 0;
}
|
295f5dad87dcafd0b82676e55490ac7e62fe8806.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/common_gpu.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/selu_op.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void SeluKernel(const int N, const T* X, T* Y, T alpha_, T lambda_) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = lambda_ * (X[i] > 0 ? X[i] : alpha_ * __expf(X[i]) - alpha_);
}
}
template <typename T>
__global__ void SeluGradientKernel(
const int N,
const T* Y,
const T* dY,
T* dX,
T alpha_,
T lambda_) {
const T c = lambda_ * alpha_;
CUDA_1D_KERNEL_LOOP(i, N) {
// Reuse Y[i] to avoid computing exp(X[i])
dX[i] = Y[i] > 0 ? lambda_ * dY[i] : dY[i] * (Y[i] + c);
}
}
} // namespace
template <>
bool SeluOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0);
CAFFE_ENFORCE_GT(X.size(), 0);
Y->ResizeLike(X);
hipLaunchKernelGGL(( SeluKernel<float>)
, dim3(CAFFE_GET_BLOCKS(X.size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
X.size(),
X.data<float>(),
Y->template mutable_data<float>(),
alpha_,
lambda_);
return true;
}
template <>
bool SeluGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y = Input(0);
auto& dY = Input(1);
auto* dX = Output(0);
CAFFE_ENFORCE_GT(Y.size(), 0);
CAFFE_ENFORCE_EQ(dY.size(), Y.size());
dX->ResizeLike(Y);
hipLaunchKernelGGL(( SeluGradientKernel<float>)
, dim3(CAFFE_GET_BLOCKS(Y.size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
Y.size(),
Y.data<float>(),
dY.data<float>(),
dX->template mutable_data<float>(),
alpha_,
lambda_);
return true;
}
REGISTER_CUDA_OPERATOR(Selu, SeluOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SeluGradient, SeluGradientOp<float, CUDAContext>);
} // namespace caffe2
| 295f5dad87dcafd0b82676e55490ac7e62fe8806.cu | #include "caffe2/core/common_gpu.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/selu_op.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void SeluKernel(const int N, const T* X, T* Y, T alpha_, T lambda_) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = lambda_ * (X[i] > 0 ? X[i] : alpha_ * __expf(X[i]) - alpha_);
}
}
template <typename T>
__global__ void SeluGradientKernel(
const int N,
const T* Y,
const T* dY,
T* dX,
T alpha_,
T lambda_) {
const T c = lambda_ * alpha_;
CUDA_1D_KERNEL_LOOP(i, N) {
// Reuse Y[i] to avoid computing exp(X[i])
dX[i] = Y[i] > 0 ? lambda_ * dY[i] : dY[i] * (Y[i] + c);
}
}
} // namespace
template <>
bool SeluOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* Y = Output(0);
CAFFE_ENFORCE_GT(X.size(), 0);
Y->ResizeLike(X);
SeluKernel<float>
<<<CAFFE_GET_BLOCKS(X.size()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
X.size(),
X.data<float>(),
Y->template mutable_data<float>(),
alpha_,
lambda_);
return true;
}
template <>
bool SeluGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y = Input(0);
auto& dY = Input(1);
auto* dX = Output(0);
CAFFE_ENFORCE_GT(Y.size(), 0);
CAFFE_ENFORCE_EQ(dY.size(), Y.size());
dX->ResizeLike(Y);
SeluGradientKernel<float>
<<<CAFFE_GET_BLOCKS(Y.size()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
Y.size(),
Y.data<float>(),
dY.data<float>(),
dX->template mutable_data<float>(),
alpha_,
lambda_);
return true;
}
REGISTER_CUDA_OPERATOR(Selu, SeluOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SeluGradient, SeluGradientOp<float, CUDAContext>);
} // namespace caffe2
|
aefc47e93fdec3322fe3b296278b43899377f7a7.hip | // !!! This is a file automatically generated by hipify!!!
// Sequence Alignment -CUDA
// Alex Ringeri
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
const int numThreads = 256;
void CalculateCost(int *d_matrix, int *d_trace, char *d_s1, char *d_s2, int s1, int s2, int comparison, int entries);
void CopyToMatrix(int *dst, int *src, int cols, int rows);
void PrintMatrix(int *arr, char *s1, char *s2, int xDim, int yDim);
//Kernel initializes all elements of matrix to 'value'
__global__ void init_matrix(int *matrix, int value, int maxElements) {
if (blockDim.x * blockIdx.x + threadIdx.x < maxElements)
matrix[blockDim.x * blockIdx.x + threadIdx.x] = value;
}
__global__ void ComputeDiagonal(int i, int prevI, int lastI, int space, int *arr, int *trace, char *s1, char *s2, int s1off, int s2off) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < space) {
int left = arr[prevI + id];
int up = arr[prevI + id + 1];
int upLeft = arr[lastI + id];
if (s1[s1off + id] == s2[s2off - id])
upLeft += 2;
else
upLeft -= 1;
int cost, dir;
if (up > left) {
cost = up - 1;
dir = 1;
} else {
cost = left - 1;
dir = -1;
}
if (upLeft > cost) {
cost = upLeft;
dir = 0;
}
arr[i + id] = max(cost, 0);
trace[i + id] = dir;
}
}
__global__ void CalculateTiledMatrix(int *d_m, int *d_t, char *d_s1, char *d_s2, int cols, int i, int entries, int blockNum){
extern __shared__ int tile[];
int *sh_m = &tile[0]; //pointer to matrix in shared memory
char *sh_s1 = (char*)&tile[(blockDim.x + 1 )*(blockDim.x + 1)]; //pointer to 1st string in shared memory
char *sh_s2 = (char*)&tile[(blockDim.x + 1 )*(blockDim.x + 1) + blockDim.x]; //pointer to 2nd string in shared memory
//Copy boundary data into shared memory
int id = blockIdx.x*blockDim.x + threadIdx.x;
int index = i - (blockIdx.x*blockDim.x)*cols + id;
// Todo: Check condition, blocks larger thread numbers when copying border data to shared memory (rectangular matrices)
if (/*i%cols + id - 1 < cols &&*/ i-(blockIdx.x)*(blockDim.x)*cols -1 + threadIdx.x*cols < entries){/*index + threadIdx.x*cols -1*/
if (threadIdx.x == 0){
sh_m[0] = d_m[index - cols -1];
}
//sh_m[threadIdx.x + 1] = d_m[index - cols];
sh_s2[threadIdx.x] = d_s2[i/cols-(blockIdx.x)*(blockDim.x) -1 + threadIdx.x];
/*if (blockIdx.x == 1)
printf("index: %d\ts2: %d\t\n",index,i/cols-(blockIdx.x)*(blockDim.x) -1 + threadIdx.x);*/
}
if (i%cols + id < cols /*&& i%cols + id - 1 < entries*/ ){
sh_s1[threadIdx.x] = d_s1[i%cols + id - 1];
sh_m[threadIdx.x + 1] = d_m[index - cols];
/*if (blockIdx.x == 1)
printf("index: %d\tS1 %d\n",index, i%cols + id -1);*/
}
if (i - blockIdx.x*blockDim.x*cols + blockIdx.x*blockDim.x + cols*threadIdx.x - 1 < entries){
sh_m[(blockDim.x + 1)*(threadIdx.x + 1)] = d_m[i - blockIdx.x*blockDim.x*cols + blockIdx.x*blockDim.x + cols*threadIdx.x - 1 ];
//printf("s1[%d]\n", index%cols - 1);
}
syncthreads();
//Calculate cost for each diagonal
for (int b = 0; b < 2; b++){
for(int d = 1; d < blockDim.x + 1; d++){
if (threadIdx.x < blockDim.x*b - b*d + d*(1-b) ){
int upLeft = sh_m[(d - d*b + blockDim.x*b - threadIdx.x - 1)*(blockDim.x+1) + d*b + threadIdx.x];//[(d - threadIdx.x - 1)*(blockDim.x+1) + threadIdx.x]
int up = sh_m[(d - d*b + blockDim.x*b - threadIdx.x - 1)*(blockDim.x+1) + threadIdx.x + d*b + 1];//[(d - threadIdx.x - 1)*(blockDim.x+1) + threadIdx.x + 1]
int left = sh_m[(d - d*b + blockDim.x*b - threadIdx.x)*(blockDim.x+1) + d*b + threadIdx.x];//[(d - threadIdx.x)*(blockDim.x+1) + threadIdx.x]
if (sh_s1[threadIdx.x + d*b] == sh_s2[d - d*b + blockDim.x*b - 1-threadIdx.x])
upLeft += 2;
else
upLeft -= 1;
int cost; //int dir;
if (up > left) {
cost = up - 1;
//dir = 1;
} else {
cost = left - 1;
//dir = -1;
}
if (upLeft > cost) {
cost = upLeft;
//dir = 0;
}
sh_m[(d -d*b + blockDim.x*b - threadIdx.x)*(blockDim.x+1) + d*b + threadIdx.x + 1] = max(0, cost);
/*if (threadIdx.x == 31 && index > 79){
printf("ID: %d\tS1 %c\tS2 %c\tUpleft %d\tLeft %d\tUp %d\tCost: %d\ts1[%d]\ts2[%d]\n", index, sh_s1[threadIdx.x+d*b], sh_s2[d - d*b + blockDim.x*b - 1-threadIdx.x], upLeft, left, up, max(0,cost),threadIdx.x +d*b,d - d*b + blockDim.x*b - 1-threadIdx.x );
//printf("index: %d\ts2: %d\n",index,index/cols+threadIdx.x -1);
}*/
}
syncthreads();
}
}
//Copy results from shared memory into global memory
for (int j = 0; j < blockDim.x; j++){
if (i%cols + id < cols && index + j*cols < entries){
d_m[index + j*(cols)] = sh_m[(j+1)*(blockDim.x+1) + threadIdx.x + 1];
}
}
}
__global__ void CalculateTiledDiagonal(int *d_matrix, int *d_trace, char *d_s1, char *d_s2, int diag, int rows, int cols) {
int dSize = 1;
for(int d = diag; d < 2*blockDim.x-1+diag; d++){
int z1 = 0;
int z2 = 0;
int index = 1;
int size = 0;
if (d > rows){
z1 = d - rows;
index+= blockDim.x - 1;
}
if (d > cols){
z2 = d - cols;
size++;
index--;
}
//int z2 = (diag - cols < 0)? 0 : diag - rows;
index += (d*(d+1) + z1*(z1+1) + z2*(z2+1))/2;
//int size += diag - z1 - z2 + 1;
int pos = blockIdx.x * blockDim.x + threadIdx.x;
if (d > blockDim.x -1 + diag)
index += d - diag - blockDim.x +1;
if (threadIdx.x < dSize){
d_matrix[index + pos] = index + pos;
//printf("Block ID : %d Thread ID: %d, pos : %d index : %d d: %d\n",blockIdx.x, threadIdx.x, pos, index, d);
}
dSize++;
}
//loop through diagonals and copy into shared memory
//copy strings into shared memory
//loop through diagonals in shmem and calculate costs
//write result from shmem to global memory
}
__global__ void CalculateCostOneKernel(int *d_matrix, int *d_trace, char *d_s1, char *d_s2, int s1, int s2) {
int i = 3;
int prev = 1;
int last = 0;
for (int slice = 2; slice < s2 + s1 - 1; slice++) {
int z1 = 0;
int z2 = 0;
int numElements = 0;
int off = 1;
if (slice > s1 - 1) {
z1 = slice - s1 + 1;
numElements++;
}
if (slice > s2 - 1) {
z2 = slice - s2 + 1;
numElements++;
off = 0;
}
int size = slice - z1 - z2 + 1;
numElements += size - 2;
if (z2 > 1)
last++;
for (int s = 0; s < (numElements + blockDim.x - 1) / blockDim.x; s++) {
int id = blockDim.x * s + threadIdx.x;
if (id < numElements) {
int upLeft = d_matrix[last + id];
int left = d_matrix[prev + id];
int up = d_matrix[prev + id + 1];
if (d_s1[max(z2 - 1, 0) + id] == d_s2[min(slice - 2, s2 - 2) - id])
upLeft += 2;
else
upLeft -= 1;
int cost, dir;
if (up > left) {
cost = up - 1;
dir = 1;
} else {
cost = left - 1;
dir = -1;
}
if (upLeft > cost) {
cost = upLeft;
dir = 0;
}
d_matrix[i + off + id] = max(cost, 0);
d_trace[i + off + id] = dir;
}
}
last = prev;
prev = i;
i += size;
syncthreads();
}
}
// Main routine:
int main(int argc, char *argv[]) {
hipSetDevice(0);
char AGCT[] = "AGCT";
int lenS1, lenS2;
int numComparisons = 0;
int approach = 0;
if (argc > 4) {
int args[] = { atoi(argv[1]), atoi(argv[2]) };
if (args[0] > args[1]) {
lenS1 = args[0];
lenS2 = args[1];
} else {
lenS1 = args[1];
lenS2 = args[0];
}
numComparisons = atoi(argv[3]);
approach = atoi(argv[4]);
if (approach < 1 || approach > 3) {
printf("Invalid Approach Argument --- Exiting Program\n");
exit(0);
}
}
else {
printf("Invalid Command Line Arguments --- Exiting Program\n");
exit(0);
}
printf("Calculating Cost Matrix: %d elements (%d x %d)\n", (lenS1 + 1) * (lenS2 + 1), lenS1 + 1, lenS2 + 1);
//Allocate strings on host
char * string1 = (char*) malloc(sizeof(char) * lenS1);
char * s2Arr = (char*) malloc(sizeof(char) * numComparisons * lenS2);
//Initialize strings with random numbers
srand(time(NULL));
for (int i = 0; i < lenS1; i++) {
char r = AGCT[rand() % 4];
string1[i] = r;
}
for (int i = 0; i < numComparisons; i++) {
for (int j = 0; j < lenS2; j++) {
char r = AGCT[rand() % 4];
s2Arr[i * lenS2 + j] = r;
}
}
//Allocate strings on device
hipError_t error = hipSuccess;
char *d_string1, *d_s2Arr;
int *d_matrixArr;
unsigned int entries = (lenS1 + 1) * (lenS2 + 1);
error = hipMalloc((void**) &d_string1, sizeof(char) * lenS1);
if (error != hipSuccess) {
printf("Error allocating s1 on device\n");
hipDeviceReset(); exit(0);
}
error = hipMalloc((void**) &d_s2Arr, sizeof(char) * numComparisons * lenS2);
if (error != hipSuccess) {
printf("Error allocating s2array on device\n");
hipDeviceReset(); exit(0);
}
//Initialize sequence strings on device
error = hipMemcpy(d_string1, string1, sizeof(char) * lenS1, hipMemcpyHostToDevice);
if (error != hipSuccess) {
printf("Error copying k1s1 to device\n");
hipDeviceReset(); exit(0);
}
for (int i = 0; i < numComparisons; i++) {
error = hipMemcpy(d_s2Arr, s2Arr, sizeof(char) * numComparisons * lenS2, hipMemcpyHostToDevice);
if (error != hipSuccess) {
printf("Error copying a s2Arr to s_S2arr");
hipDeviceReset(); exit(0);
}
}
/**** Allocate cost matrix ****/
error = hipMalloc((void**) &d_matrixArr, sizeof(int) * numComparisons * entries);
if (error != hipSuccess) {
printf("Error allocating d_matrixArr on device\n");
hipDeviceReset(); exit(0);
}
//Allocate trace table on Device
int *d_trace;
error = hipMalloc((void**) &d_trace, sizeof(int) * entries);
if (error != hipSuccess) {
printf("Error allocating k1 d_trace on device:\n%s", hipGetErrorString(error));
hipDeviceReset(); exit(0);
}
//Initialize trace and score tables
int threadsPerBlock = 1024;
int blocksPerGrid = (entries + threadsPerBlock - 1) / threadsPerBlock;
for(int i=0; i < numComparisons; i++)
hipLaunchKernelGGL(( init_matrix), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, &d_matrixArr[i*entries], 0, entries);
hipLaunchKernelGGL(( init_matrix), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_trace, -2, entries);
hipDeviceSynchronize();
error = hipGetLastError();
if (error != hipSuccess) {
printf( "Error initializing arrays on device(Kernel Launch: init_matrix)\n%s\n", hipGetErrorString(error));
exit(0);
}
/* Do calculation on device: */
if (approach == 1) {
//Calculate seperate problems concurrently over many streams (one kernel per problem)
hipStream_t s[numComparisons];
for (int i = 0; i < numComparisons; i++) {
hipStreamCreate(&s[i]);
hipLaunchKernelGGL(( CalculateCostOneKernel), dim3(1), dim3(256), 0, s[i], &d_matrixArr[i * entries], d_trace, d_string1, &d_s2Arr[i*lenS2], lenS1 + 1, lenS2 + 1);
}
for (int i = 0; i < numComparisons; i++)
hipStreamDestroy(s[i]);
}
else if (approach == 2) {
//Calculate one problem with many kernels
CalculateCost(d_matrixArr, d_trace, d_string1, d_s2Arr, lenS1 + 1,lenS2 + 1, numComparisons, entries);
}
else {
CalculateCost(d_matrixArr, d_trace, d_string1, d_s2Arr, lenS1+1, lenS2+1, numComparisons, entries);
hipDeviceSynchronize();
int *a = (int*)malloc(sizeof(int)*entries);
hipMemcpy(a, d_matrixArr, sizeof(int)*entries, hipMemcpyDeviceToHost);
threadsPerBlock = 1024;
blocksPerGrid = (entries + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( init_matrix), dim3(blocksPerGrid), dim3(threadsPerBlock) , 0, 0, d_matrixArr, 0, entries);
hipDeviceSynchronize();
int *m2d = (int*) malloc(sizeof(int) * entries);
CopyToMatrix(m2d, a, lenS1+1, lenS2+1);
//PrintMatrix(m2d, string1, s2Arr, lenS1+1, lenS2+1);
threadsPerBlock = 32;
blocksPerGrid = 0;
int blocksInRow = (lenS1 + threadsPerBlock -1)/threadsPerBlock;
int blocksInCol = (lenS2 + threadsPerBlock -1)/threadsPerBlock;
int index = lenS1 + 2;
for(int i =0; i < blocksInRow + blocksInCol - 1; i++){
int z1 = 0; int z2 = 0;
if (i >= blocksInRow)
z1 = i - blocksInRow +1;
if (i >= blocksInCol)
z2 = i - blocksInCol +1;
if (i == 0){
blocksPerGrid++;
}
else if (z1 == 0 && z2 == 0){
index += threadsPerBlock*(lenS1+1);
blocksPerGrid++;
}
else if (z1 > 0 && z2 > 0){
index += threadsPerBlock;
blocksPerGrid--;
}
else{
index += threadsPerBlock;
}
hipLaunchKernelGGL(( CalculateTiledMatrix), dim3(blocksPerGrid), dim3(threadsPerBlock), sizeof(int)*(threadsPerBlock+1)*(threadsPerBlock+1) + sizeof(char)*2*threadsPerBlock , 0, d_matrixArr, d_trace, d_string1, d_s2Arr, (lenS1+1), index, entries, blocksPerGrid);
}
hipDeviceSynchronize();
int *r = (int*)malloc(sizeof(int)*entries);
hipMemcpy(r, d_matrixArr, sizeof(int)*entries, hipMemcpyDeviceToHost);
//PrintMatrix(r, string1, s2Arr, lenS1+1, lenS2+1);
/*for (int j =28; j < lenS1; j++)
printf(" |\t%c", s2Arr[j]);
printf("\n\t");
for(int i = 0; i < lenS2 +1; i++){
for( int j =29; j <lenS1 +1; j++)
printf("%d\t", r[i*(lenS1+1) + j]);
printf("\n%c | \t", s2Arr[i]);
}
printf("\n\n");
for (int j =28; j < lenS1; j++)
printf(" |\t%c", s2Arr[j]);
printf("\n\t");
for(int i = 0; i < lenS2 +1 ; i++){
for( int j =29; j <lenS1 +1; j++)
printf("%d\t", m2d[i*(lenS1+1) + j]);
printf("\n%c | \t", s2Arr[i]);
}*/
for(int n = 0; n < entries; n++){
if (m2d[n] != r[n]){
printf("Tiled result different from Diagonal Kernel result: index %d\n", n);
free(m2d); free(r); free(a);
hipDeviceReset();
exit(0);
}
}
printf("Tiled result same as Diagonal\n");
free(r); free(a); free(m2d);
/*//Calculate cost using tiled method
threadsPerBlock = 256;
hipStream_t s[numComparisons];
for (int i = 0; i < numComparisons; i++)
hipStreamCreate(&s[i]);
for (int diag = 2; diag < (lenS1 + lenS2 + 1); diag += threadsPerBlock) {
for (int c = 0; c < numComparisons; c++) {
//Launch Tiled Kernel here
threadsPerBlock = 5;
blocksPerGrid = c+1;
printf("Kernel c=%d, d=%d\n", c,diag);
hipLaunchKernelGGL(( CalculateTiledDiagonal), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, s[c], &d_matrixArr[c*entries], d_trace, d_string1, &d_s2Arr[c*lenS2], diag, lenS1+1, lenS2+1);
//printf("after\n");
}
}
for (int i = 0; i < numComparisons; i++)
hipStreamDestroy(s[i]);*/
}
hipDeviceSynchronize();
error = hipGetLastError();
if (error != hipSuccess) {
printf("Error with kernel launches:(calculating costs) %s\n", hipGetErrorString(error));
hipDeviceReset(); exit(0);
}
//Allocate and copy score table to host
int *k1Result = (int*) malloc(sizeof(int) * entries);
//Allocate final matrix: Used for output (easier printing)
int *matrix2d = (int*) malloc(sizeof(int) * entries);
for (int a = 0; a < numComparisons; a++) {
hipMemcpy(k1Result, &d_matrixArr[a * entries], sizeof(int) * entries, hipMemcpyDeviceToHost);
CopyToMatrix(matrix2d, k1Result, lenS1 + 1, lenS2 + 1);
if (argc > 5 && !strcmp("-v", argv[5])) {
printf("Kernel %d:\n", a);
PrintMatrix(matrix2d, string1, &s2Arr[a * lenS2], lenS1 + 1,lenS2 + 1);
}
}
//Allocate and copy trace table to host
//CopyToMatrix(matrix2d, trace, lenS1+1, lenS2+1);
hipDeviceSynchronize();
//Free device memory
hipFree(d_string1);
hipFree(d_trace);
hipFree(d_s2Arr);
hipFree(d_matrixArr);
//Free host memory
free(string1);
free(matrix2d);
free(k1Result);
free(s2Arr);
hipDeviceReset();
printf("Calculation complete\n");
}
/**
* Method launches one kernel per diagonal to calculate matrix
*/
void CalculateCost(int *d_matrix, int *d_trace, char *d_s1, char *d_s2, int s1, int s2, int comparisons, int entries) {
int i = 3;
int prev = 1;
int last = 0;
hipStream_t stream[comparisons];
for (int a = 0; a < comparisons; a++)
hipStreamCreate(&stream[a]);
for (int slice = 2; slice < s2 + s1 - 1; slice++) {
int z1 = slice < s1 ? 0 : slice - s1 + 1;
int z2 = slice < s2 ? 0 : slice - s2 + 1;
int size = slice - z1 - z2 + 1;
int numElements = size - 2;
if (z2 > 1)
last++;
if (z1 > 0)
numElements++;
int off = 1;
if (z2 > 0) {
numElements++;
off = 0;
};
int blocksPerGrid = (numElements + numThreads - 1) / numThreads;
for (int a = 0; a < comparisons; a++)
hipLaunchKernelGGL(( ComputeDiagonal), dim3(blocksPerGrid), dim3(numThreads), 0, stream[a], i + off, prev, last, numElements, &d_matrix[a * entries], d_trace, d_s1, &d_s2[a*(s2-1)], max(z2 - 1, 0), min(slice - 2, s2 - 2));
last = prev;
prev = i;
i += size;
}
for (int a = 0; a < comparisons; a++)
hipStreamDestroy(stream[a]);
}
void PrintMatrix(int *arr, char *s1, char *s2, int xDim, int yDim) {
printf("\t");
for (int i = 0; i < xDim - 1; i++) {
printf("\t%c", s1[i]);
}
printf("\n------------------------------------------------------------------------------------------------------\n\t|");
for (int i = 0; i < yDim; i++) {
for (int j = 0; j < xDim; j++)
printf("%d\t", arr[i * xDim + j]);
(i == yDim - 1) ? printf("\n") : printf("\n%c\t|", s2[i]);
}
printf("\n");
}
void CopyToMatrix(int *dst, int *src, int cols, int rows) {
/**Credit Mark Byers at Stack overflow: http://stackoverflow.com/a/2112951 */
int i = 0;
for (int slice = 0; slice < cols + rows - 1; ++slice) {
int z1 = slice < cols ? 0 : slice - cols + 1;
int z2 = slice < rows ? 0 : slice - rows + 1;
for (int j = slice - z2; j >= z1; --j) {
dst[cols * j + slice - j] = src[i++];
}
}
}
| aefc47e93fdec3322fe3b296278b43899377f7a7.cu | // Sequence Alignment -CUDA
// Alex Ringeri
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <math.h>
#include <cuda.h>
#include <cuda_runtime.h>
const int numThreads = 256;
void CalculateCost(int *d_matrix, int *d_trace, char *d_s1, char *d_s2, int s1, int s2, int comparison, int entries);
void CopyToMatrix(int *dst, int *src, int cols, int rows);
void PrintMatrix(int *arr, char *s1, char *s2, int xDim, int yDim);
//Kernel initializes all elements of matrix to 'value'
__global__ void init_matrix(int *matrix, int value, int maxElements) {
if (blockDim.x * blockIdx.x + threadIdx.x < maxElements)
matrix[blockDim.x * blockIdx.x + threadIdx.x] = value;
}
__global__ void ComputeDiagonal(int i, int prevI, int lastI, int space, int *arr, int *trace, char *s1, char *s2, int s1off, int s2off) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < space) {
int left = arr[prevI + id];
int up = arr[prevI + id + 1];
int upLeft = arr[lastI + id];
if (s1[s1off + id] == s2[s2off - id])
upLeft += 2;
else
upLeft -= 1;
int cost, dir;
if (up > left) {
cost = up - 1;
dir = 1;
} else {
cost = left - 1;
dir = -1;
}
if (upLeft > cost) {
cost = upLeft;
dir = 0;
}
arr[i + id] = max(cost, 0);
trace[i + id] = dir;
}
}
__global__ void CalculateTiledMatrix(int *d_m, int *d_t, char *d_s1, char *d_s2, int cols, int i, int entries, int blockNum){
extern __shared__ int tile[];
int *sh_m = &tile[0]; //pointer to matrix in shared memory
char *sh_s1 = (char*)&tile[(blockDim.x + 1 )*(blockDim.x + 1)]; //pointer to 1st string in shared memory
char *sh_s2 = (char*)&tile[(blockDim.x + 1 )*(blockDim.x + 1) + blockDim.x]; //pointer to 2nd string in shared memory
//Copy boundary data into shared memory
int id = blockIdx.x*blockDim.x + threadIdx.x;
int index = i - (blockIdx.x*blockDim.x)*cols + id;
// Todo: Check condition, blocks larger thread numbers when copying border data to shared memory (rectangular matrices)
if (/*i%cols + id - 1 < cols &&*/ i-(blockIdx.x)*(blockDim.x)*cols -1 + threadIdx.x*cols < entries){/*index + threadIdx.x*cols -1*/
if (threadIdx.x == 0){
sh_m[0] = d_m[index - cols -1];
}
//sh_m[threadIdx.x + 1] = d_m[index - cols];
sh_s2[threadIdx.x] = d_s2[i/cols-(blockIdx.x)*(blockDim.x) -1 + threadIdx.x];
/*if (blockIdx.x == 1)
printf("index: %d\ts2: %d\t\n",index,i/cols-(blockIdx.x)*(blockDim.x) -1 + threadIdx.x);*/
}
if (i%cols + id < cols /*&& i%cols + id - 1 < entries*/ ){
sh_s1[threadIdx.x] = d_s1[i%cols + id - 1];
sh_m[threadIdx.x + 1] = d_m[index - cols];
/*if (blockIdx.x == 1)
printf("index: %d\tS1 %d\n",index, i%cols + id -1);*/
}
if (i - blockIdx.x*blockDim.x*cols + blockIdx.x*blockDim.x + cols*threadIdx.x - 1 < entries){
sh_m[(blockDim.x + 1)*(threadIdx.x + 1)] = d_m[i - blockIdx.x*blockDim.x*cols + blockIdx.x*blockDim.x + cols*threadIdx.x - 1 ];
//printf("s1[%d]\n", index%cols - 1);
}
syncthreads();
//Calculate cost for each diagonal
for (int b = 0; b < 2; b++){
for(int d = 1; d < blockDim.x + 1; d++){
if (threadIdx.x < blockDim.x*b - b*d + d*(1-b) ){
int upLeft = sh_m[(d - d*b + blockDim.x*b - threadIdx.x - 1)*(blockDim.x+1) + d*b + threadIdx.x];//[(d - threadIdx.x - 1)*(blockDim.x+1) + threadIdx.x]
int up = sh_m[(d - d*b + blockDim.x*b - threadIdx.x - 1)*(blockDim.x+1) + threadIdx.x + d*b + 1];//[(d - threadIdx.x - 1)*(blockDim.x+1) + threadIdx.x + 1]
int left = sh_m[(d - d*b + blockDim.x*b - threadIdx.x)*(blockDim.x+1) + d*b + threadIdx.x];//[(d - threadIdx.x)*(blockDim.x+1) + threadIdx.x]
if (sh_s1[threadIdx.x + d*b] == sh_s2[d - d*b + blockDim.x*b - 1-threadIdx.x])
upLeft += 2;
else
upLeft -= 1;
int cost; //int dir;
if (up > left) {
cost = up - 1;
//dir = 1;
} else {
cost = left - 1;
//dir = -1;
}
if (upLeft > cost) {
cost = upLeft;
//dir = 0;
}
sh_m[(d -d*b + blockDim.x*b - threadIdx.x)*(blockDim.x+1) + d*b + threadIdx.x + 1] = max(0, cost);
/*if (threadIdx.x == 31 && index > 79){
printf("ID: %d\tS1 %c\tS2 %c\tUpleft %d\tLeft %d\tUp %d\tCost: %d\ts1[%d]\ts2[%d]\n", index, sh_s1[threadIdx.x+d*b], sh_s2[d - d*b + blockDim.x*b - 1-threadIdx.x], upLeft, left, up, max(0,cost),threadIdx.x +d*b,d - d*b + blockDim.x*b - 1-threadIdx.x );
//printf("index: %d\ts2: %d\n",index,index/cols+threadIdx.x -1);
}*/
}
syncthreads();
}
}
//Copy results from shared memory into global memory
for (int j = 0; j < blockDim.x; j++){
if (i%cols + id < cols && index + j*cols < entries){
d_m[index + j*(cols)] = sh_m[(j+1)*(blockDim.x+1) + threadIdx.x + 1];
}
}
}
__global__ void CalculateTiledDiagonal(int *d_matrix, int *d_trace, char *d_s1, char *d_s2, int diag, int rows, int cols) {
int dSize = 1;
for(int d = diag; d < 2*blockDim.x-1+diag; d++){
int z1 = 0;
int z2 = 0;
int index = 1;
int size = 0;
if (d > rows){
z1 = d - rows;
index+= blockDim.x - 1;
}
if (d > cols){
z2 = d - cols;
size++;
index--;
}
//int z2 = (diag - cols < 0)? 0 : diag - rows;
index += (d*(d+1) + z1*(z1+1) + z2*(z2+1))/2;
//int size += diag - z1 - z2 + 1;
int pos = blockIdx.x * blockDim.x + threadIdx.x;
if (d > blockDim.x -1 + diag)
index += d - diag - blockDim.x +1;
if (threadIdx.x < dSize){
d_matrix[index + pos] = index + pos;
//printf("Block ID : %d Thread ID: %d, pos : %d index : %d d: %d\n",blockIdx.x, threadIdx.x, pos, index, d);
}
dSize++;
}
//loop through diagonals and copy into shared memory
//copy strings into shared memory
//loop through diagonals in shmem and calculate costs
//write result from shmem to global memory
}
__global__ void CalculateCostOneKernel(int *d_matrix, int *d_trace, char *d_s1, char *d_s2, int s1, int s2) {
int i = 3;
int prev = 1;
int last = 0;
for (int slice = 2; slice < s2 + s1 - 1; slice++) {
int z1 = 0;
int z2 = 0;
int numElements = 0;
int off = 1;
if (slice > s1 - 1) {
z1 = slice - s1 + 1;
numElements++;
}
if (slice > s2 - 1) {
z2 = slice - s2 + 1;
numElements++;
off = 0;
}
int size = slice - z1 - z2 + 1;
numElements += size - 2;
if (z2 > 1)
last++;
for (int s = 0; s < (numElements + blockDim.x - 1) / blockDim.x; s++) {
int id = blockDim.x * s + threadIdx.x;
if (id < numElements) {
int upLeft = d_matrix[last + id];
int left = d_matrix[prev + id];
int up = d_matrix[prev + id + 1];
if (d_s1[max(z2 - 1, 0) + id] == d_s2[min(slice - 2, s2 - 2) - id])
upLeft += 2;
else
upLeft -= 1;
int cost, dir;
if (up > left) {
cost = up - 1;
dir = 1;
} else {
cost = left - 1;
dir = -1;
}
if (upLeft > cost) {
cost = upLeft;
dir = 0;
}
d_matrix[i + off + id] = max(cost, 0);
d_trace[i + off + id] = dir;
}
}
last = prev;
prev = i;
i += size;
syncthreads();
}
}
// Main routine:
int main(int argc, char *argv[]) {
cudaSetDevice(0);
char AGCT[] = "AGCT";
int lenS1, lenS2;
int numComparisons = 0;
int approach = 0;
if (argc > 4) {
int args[] = { atoi(argv[1]), atoi(argv[2]) };
if (args[0] > args[1]) {
lenS1 = args[0];
lenS2 = args[1];
} else {
lenS1 = args[1];
lenS2 = args[0];
}
numComparisons = atoi(argv[3]);
approach = atoi(argv[4]);
if (approach < 1 || approach > 3) {
printf("Invalid Approach Argument --- Exiting Program\n");
exit(0);
}
}
else {
printf("Invalid Command Line Arguments --- Exiting Program\n");
exit(0);
}
printf("Calculating Cost Matrix: %d elements (%d x %d)\n", (lenS1 + 1) * (lenS2 + 1), lenS1 + 1, lenS2 + 1);
//Allocate strings on host
char * string1 = (char*) malloc(sizeof(char) * lenS1);
char * s2Arr = (char*) malloc(sizeof(char) * numComparisons * lenS2);
//Initialize strings with random numbers
srand(time(NULL));
for (int i = 0; i < lenS1; i++) {
char r = AGCT[rand() % 4];
string1[i] = r;
}
for (int i = 0; i < numComparisons; i++) {
for (int j = 0; j < lenS2; j++) {
char r = AGCT[rand() % 4];
s2Arr[i * lenS2 + j] = r;
}
}
//Allocate strings on device
cudaError_t error = cudaSuccess;
char *d_string1, *d_s2Arr;
int *d_matrixArr;
unsigned int entries = (lenS1 + 1) * (lenS2 + 1);
error = cudaMalloc((void**) &d_string1, sizeof(char) * lenS1);
if (error != cudaSuccess) {
printf("Error allocating s1 on device\n");
cudaDeviceReset(); exit(0);
}
error = cudaMalloc((void**) &d_s2Arr, sizeof(char) * numComparisons * lenS2);
if (error != cudaSuccess) {
printf("Error allocating s2array on device\n");
cudaDeviceReset(); exit(0);
}
//Initialize sequence strings on device
error = cudaMemcpy(d_string1, string1, sizeof(char) * lenS1, cudaMemcpyHostToDevice);
if (error != cudaSuccess) {
printf("Error copying k1s1 to device\n");
cudaDeviceReset(); exit(0);
}
for (int i = 0; i < numComparisons; i++) {
error = cudaMemcpy(d_s2Arr, s2Arr, sizeof(char) * numComparisons * lenS2, cudaMemcpyHostToDevice);
if (error != cudaSuccess) {
printf("Error copying a s2Arr to s_S2arr");
cudaDeviceReset(); exit(0);
}
}
/**** Allocate cost matrix ****/
error = cudaMalloc((void**) &d_matrixArr, sizeof(int) * numComparisons * entries);
if (error != cudaSuccess) {
printf("Error allocating d_matrixArr on device\n");
cudaDeviceReset(); exit(0);
}
//Allocate trace table on Device
int *d_trace;
error = cudaMalloc((void**) &d_trace, sizeof(int) * entries);
if (error != cudaSuccess) {
printf("Error allocating k1 d_trace on device:\n%s", cudaGetErrorString(error));
cudaDeviceReset(); exit(0);
}
//Initialize trace and score tables
int threadsPerBlock = 1024;
int blocksPerGrid = (entries + threadsPerBlock - 1) / threadsPerBlock;
for(int i=0; i < numComparisons; i++)
init_matrix<<<blocksPerGrid, threadsPerBlock>>>(&d_matrixArr[i*entries], 0, entries);
init_matrix<<<blocksPerGrid, threadsPerBlock>>>(d_trace, -2, entries);
cudaDeviceSynchronize();
error = cudaGetLastError();
if (error != cudaSuccess) {
printf( "Error initializing arrays on device(Kernel Launch: init_matrix)\n%s\n", cudaGetErrorString(error));
exit(0);
}
/* Do calculation on device: */
if (approach == 1) {
//Calculate seperate problems concurrently over many streams (one kernel per problem)
cudaStream_t s[numComparisons];
for (int i = 0; i < numComparisons; i++) {
cudaStreamCreate(&s[i]);
CalculateCostOneKernel<<<1, 256, 0, s[i]>>>(&d_matrixArr[i * entries], d_trace, d_string1, &d_s2Arr[i*lenS2], lenS1 + 1, lenS2 + 1);
}
for (int i = 0; i < numComparisons; i++)
cudaStreamDestroy(s[i]);
}
else if (approach == 2) {
//Calculate one problem with many kernels
CalculateCost(d_matrixArr, d_trace, d_string1, d_s2Arr, lenS1 + 1,lenS2 + 1, numComparisons, entries);
}
else {
CalculateCost(d_matrixArr, d_trace, d_string1, d_s2Arr, lenS1+1, lenS2+1, numComparisons, entries);
cudaDeviceSynchronize();
int *a = (int*)malloc(sizeof(int)*entries);
cudaMemcpy(a, d_matrixArr, sizeof(int)*entries, cudaMemcpyDeviceToHost);
threadsPerBlock = 1024;
blocksPerGrid = (entries + threadsPerBlock - 1) / threadsPerBlock;
init_matrix<<< blocksPerGrid, threadsPerBlock >>>(d_matrixArr, 0, entries);
cudaDeviceSynchronize();
int *m2d = (int*) malloc(sizeof(int) * entries);
CopyToMatrix(m2d, a, lenS1+1, lenS2+1);
//PrintMatrix(m2d, string1, s2Arr, lenS1+1, lenS2+1);
threadsPerBlock = 32;
blocksPerGrid = 0;
int blocksInRow = (lenS1 + threadsPerBlock -1)/threadsPerBlock;
int blocksInCol = (lenS2 + threadsPerBlock -1)/threadsPerBlock;
int index = lenS1 + 2;
for(int i =0; i < blocksInRow + blocksInCol - 1; i++){
int z1 = 0; int z2 = 0;
if (i >= blocksInRow)
z1 = i - blocksInRow +1;
if (i >= blocksInCol)
z2 = i - blocksInCol +1;
if (i == 0){
blocksPerGrid++;
}
else if (z1 == 0 && z2 == 0){
index += threadsPerBlock*(lenS1+1);
blocksPerGrid++;
}
else if (z1 > 0 && z2 > 0){
index += threadsPerBlock;
blocksPerGrid--;
}
else{
index += threadsPerBlock;
}
CalculateTiledMatrix<<< blocksPerGrid, threadsPerBlock, sizeof(int)*(threadsPerBlock+1)*(threadsPerBlock+1) + sizeof(char)*2*threadsPerBlock >>>(d_matrixArr, d_trace, d_string1, d_s2Arr, (lenS1+1), index, entries, blocksPerGrid);
}
cudaDeviceSynchronize();
int *r = (int*)malloc(sizeof(int)*entries);
cudaMemcpy(r, d_matrixArr, sizeof(int)*entries, cudaMemcpyDeviceToHost);
//PrintMatrix(r, string1, s2Arr, lenS1+1, lenS2+1);
/*for (int j =28; j < lenS1; j++)
printf(" |\t%c", s2Arr[j]);
printf("\n\t");
for(int i = 0; i < lenS2 +1; i++){
for( int j =29; j <lenS1 +1; j++)
printf("%d\t", r[i*(lenS1+1) + j]);
printf("\n%c | \t", s2Arr[i]);
}
printf("\n\n");
for (int j =28; j < lenS1; j++)
printf(" |\t%c", s2Arr[j]);
printf("\n\t");
for(int i = 0; i < lenS2 +1 ; i++){
for( int j =29; j <lenS1 +1; j++)
printf("%d\t", m2d[i*(lenS1+1) + j]);
printf("\n%c | \t", s2Arr[i]);
}*/
for(int n = 0; n < entries; n++){
if (m2d[n] != r[n]){
printf("Tiled result different from Diagonal Kernel result: index %d\n", n);
free(m2d); free(r); free(a);
cudaDeviceReset();
exit(0);
}
}
printf("Tiled result same as Diagonal\n");
free(r); free(a); free(m2d);
/*//Calculate cost using tiled method
threadsPerBlock = 256;
cudaStream_t s[numComparisons];
for (int i = 0; i < numComparisons; i++)
cudaStreamCreate(&s[i]);
for (int diag = 2; diag < (lenS1 + lenS2 + 1); diag += threadsPerBlock) {
for (int c = 0; c < numComparisons; c++) {
//Launch Tiled Kernel here
threadsPerBlock = 5;
blocksPerGrid = c+1;
printf("Kernel c=%d, d=%d\n", c,diag);
CalculateTiledDiagonal<<< blocksPerGrid, threadsPerBlock, 0, s[c]>>>(&d_matrixArr[c*entries], d_trace, d_string1, &d_s2Arr[c*lenS2], diag, lenS1+1, lenS2+1);
//printf("after\n");
}
}
for (int i = 0; i < numComparisons; i++)
cudaStreamDestroy(s[i]);*/
}
cudaDeviceSynchronize();
error = cudaGetLastError();
if (error != cudaSuccess) {
printf("Error with kernel launches:(calculating costs) %s\n", cudaGetErrorString(error));
cudaDeviceReset(); exit(0);
}
//Allocate and copy score table to host
int *k1Result = (int*) malloc(sizeof(int) * entries);
//Allocate final matrix: Used for output (easier printing)
int *matrix2d = (int*) malloc(sizeof(int) * entries);
for (int a = 0; a < numComparisons; a++) {
cudaMemcpy(k1Result, &d_matrixArr[a * entries], sizeof(int) * entries, cudaMemcpyDeviceToHost);
CopyToMatrix(matrix2d, k1Result, lenS1 + 1, lenS2 + 1);
if (argc > 5 && !strcmp("-v", argv[5])) {
printf("Kernel %d:\n", a);
PrintMatrix(matrix2d, string1, &s2Arr[a * lenS2], lenS1 + 1,lenS2 + 1);
}
}
//Allocate and copy trace table to host
//CopyToMatrix(matrix2d, trace, lenS1+1, lenS2+1);
cudaDeviceSynchronize();
//Free device memory
cudaFree(d_string1);
cudaFree(d_trace);
cudaFree(d_s2Arr);
cudaFree(d_matrixArr);
//Free host memory
free(string1);
free(matrix2d);
free(k1Result);
free(s2Arr);
cudaDeviceReset();
printf("Calculation complete\n");
}
/**
* Method launches one kernel per diagonal to calculate matrix
*/
void CalculateCost(int *d_matrix, int *d_trace, char *d_s1, char *d_s2, int s1, int s2, int comparisons, int entries) {
int i = 3;
int prev = 1;
int last = 0;
cudaStream_t stream[comparisons];
for (int a = 0; a < comparisons; a++)
cudaStreamCreate(&stream[a]);
for (int slice = 2; slice < s2 + s1 - 1; slice++) {
int z1 = slice < s1 ? 0 : slice - s1 + 1;
int z2 = slice < s2 ? 0 : slice - s2 + 1;
int size = slice - z1 - z2 + 1;
int numElements = size - 2;
if (z2 > 1)
last++;
if (z1 > 0)
numElements++;
int off = 1;
if (z2 > 0) {
numElements++;
off = 0;
};
int blocksPerGrid = (numElements + numThreads - 1) / numThreads;
for (int a = 0; a < comparisons; a++)
ComputeDiagonal<<<blocksPerGrid, numThreads, 0, stream[a]>>>(i + off, prev, last, numElements, &d_matrix[a * entries], d_trace, d_s1, &d_s2[a*(s2-1)], max(z2 - 1, 0), min(slice - 2, s2 - 2));
last = prev;
prev = i;
i += size;
}
for (int a = 0; a < comparisons; a++)
cudaStreamDestroy(stream[a]);
}
void PrintMatrix(int *arr, char *s1, char *s2, int xDim, int yDim) {
printf("\t");
for (int i = 0; i < xDim - 1; i++) {
printf("\t%c", s1[i]);
}
printf("\n------------------------------------------------------------------------------------------------------\n\t|");
for (int i = 0; i < yDim; i++) {
for (int j = 0; j < xDim; j++)
printf("%d\t", arr[i * xDim + j]);
(i == yDim - 1) ? printf("\n") : printf("\n%c\t|", s2[i]);
}
printf("\n");
}
void CopyToMatrix(int *dst, int *src, int cols, int rows) {
/**Credit Mark Byers at Stack overflow: http://stackoverflow.com/a/2112951 */
int i = 0;
for (int slice = 0; slice < cols + rows - 1; ++slice) {
int z1 = slice < cols ? 0 : slice - cols + 1;
int z2 = slice < rows ? 0 : slice - rows + 1;
for (int j = slice - z2; j >= z1; --j) {
dst[cols * j + slice - j] = src[i++];
}
}
}
|
a353485b7b7d6cbeb68f2b206fa8dfb783d5bc53.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//For more comments go to bfs.h
#include <cstdio>
#include <iostream>
#include <vector>
#include <algorithm>
#include <limits>
#include "bfs.h"
using namespace std;
int *d_adj, *d_edge, *d_edgeSize, *d_distance;
void readGraph(Graph &g) {
int n, m, u, v;
vector<int> vetrices;
cout << "Give number of vertices:" << endl;;
cin >> n;
cout << "Give number of edges:" << endl;
cin >> m;
vector<vector<int> > adj(n);
cout << "Give paths between vetrices:" << endl; //From A to B to C, etc.
for (int i = 0; i < m; i++) {
cout << "Path " << i << endl; //Path indexing to make my life easier
cin >> u >> v;
adj[u].push_back(v);
vetrices.push_back(u);
vetrices.push_back(v);
}
for (int i = 0; i < n; i++) { //Calculate offset
g.edge.push_back(g.adj.size());
g.edgeSize.push_back(adj[i].size());
for (auto e: adj[i]) {
g.adj.push_back(e);
}
}
g.numVertices = n;
g.numEdges = g.adj.size();
sort(vetrices.begin(), vetrices.end());
vetrices.erase(unique(vetrices.begin(), vetrices.end()), vetrices.end());
cout << "List of vertices: ";
for (auto v : vetrices)
cout << v << " ";
cout << endl;
}
// //
//////////// CUDA KERNEL //////////
// //
__global__ void bfs(int N, int level, int *d_adj, int *d_edge, int *d_edgeSize, int *d_distance, bool *explored) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
*explored = false;
if (idx < N && d_distance[idx] == level) {
for (int i = d_edge[idx]; i < d_edge[idx] + d_edgeSize[idx]; i++) {
int v = d_adj[i];
if (level + 1 < d_distance[v]) {
d_distance[v] = level + 1;
*explored = true;
}
}
}
}
void runbfs(int start, Graph &g, vector<int> &distance) {
bool *explored;
size_t Vsize = g.numVertices * sizeof(int);
size_t Esize = g.numEdges * sizeof(int);
distance[start] = 0;
hipHostMalloc((void **) &explored, sizeof(int));
hipMalloc(&d_adj, Esize);
hipMalloc(&d_edge, Vsize);
hipMalloc(&d_edgeSize, Vsize);
hipMalloc(&d_distance, Vsize);
hipMemcpy(d_adj, g.adj.data(), Esize, hipMemcpyHostToDevice);
hipMemcpy(d_edge, g.edge.data(), Vsize, hipMemcpyHostToDevice);
hipMemcpy(d_edgeSize, g.edgeSize.data(), Vsize, hipMemcpyHostToDevice);
hipMemcpy(d_distance, distance.data(), Vsize, hipMemcpyHostToDevice);
*explored = true;
int level = 0;
while (*explored) {
*explored = false;
hipLaunchKernelGGL(( bfs) , dim3(64), dim3(64), Vsize , 0, g.numVertices, level, d_adj, d_edge, d_edgeSize, d_distance, explored);
hipDeviceSynchronize();
level++;
}
hipMemcpy(distance.data(), d_distance, Vsize, hipMemcpyDeviceToHost);
}
void cleanup() {
hipFree(d_adj);
hipFree(d_edge);
hipFree(d_edgeSize);
hipFree(d_distance);
}
// //
////////// MAIN //////////
// //
int main() {
Graph g;
readGraph(g);
vector<int> distance(g.numVertices, numeric_limits<int>::max());
int start;
cout << "Give starting vertex: " << endl;
cin >> start;
runbfs(start, g, distance);
for (auto d : distance)
cout << "Distance from " << start << " is " << d << endl;
cleanup();
return 0;
}
| a353485b7b7d6cbeb68f2b206fa8dfb783d5bc53.cu | //For more comments go to bfs.h
#include <cstdio>
#include <iostream>
#include <vector>
#include <algorithm>
#include <limits>
#include "bfs.h"
using namespace std;
int *d_adj, *d_edge, *d_edgeSize, *d_distance;
void readGraph(Graph &g) {
int n, m, u, v;
vector<int> vetrices;
cout << "Give number of vertices:" << endl;;
cin >> n;
cout << "Give number of edges:" << endl;
cin >> m;
vector<vector<int> > adj(n);
cout << "Give paths between vetrices:" << endl; //From A to B to C, etc.
for (int i = 0; i < m; i++) {
cout << "Path " << i << endl; //Path indexing to make my life easier
cin >> u >> v;
adj[u].push_back(v);
vetrices.push_back(u);
vetrices.push_back(v);
}
for (int i = 0; i < n; i++) { //Calculate offset
g.edge.push_back(g.adj.size());
g.edgeSize.push_back(adj[i].size());
for (auto e: adj[i]) {
g.adj.push_back(e);
}
}
g.numVertices = n;
g.numEdges = g.adj.size();
sort(vetrices.begin(), vetrices.end());
vetrices.erase(unique(vetrices.begin(), vetrices.end()), vetrices.end());
cout << "List of vertices: ";
for (auto v : vetrices)
cout << v << " ";
cout << endl;
}
// //
//////////// CUDA KERNEL //////////
// //
__global__ void bfs(int N, int level, int *d_adj, int *d_edge, int *d_edgeSize, int *d_distance, bool *explored) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
*explored = false;
if (idx < N && d_distance[idx] == level) {
for (int i = d_edge[idx]; i < d_edge[idx] + d_edgeSize[idx]; i++) {
int v = d_adj[i];
if (level + 1 < d_distance[v]) {
d_distance[v] = level + 1;
*explored = true;
}
}
}
}
void runbfs(int start, Graph &g, vector<int> &distance) {
bool *explored;
size_t Vsize = g.numVertices * sizeof(int);
size_t Esize = g.numEdges * sizeof(int);
distance[start] = 0;
cudaMallocHost((void **) &explored, sizeof(int));
cudaMalloc(&d_adj, Esize);
cudaMalloc(&d_edge, Vsize);
cudaMalloc(&d_edgeSize, Vsize);
cudaMalloc(&d_distance, Vsize);
cudaMemcpy(d_adj, g.adj.data(), Esize, cudaMemcpyHostToDevice);
cudaMemcpy(d_edge, g.edge.data(), Vsize, cudaMemcpyHostToDevice);
cudaMemcpy(d_edgeSize, g.edgeSize.data(), Vsize, cudaMemcpyHostToDevice);
cudaMemcpy(d_distance, distance.data(), Vsize, cudaMemcpyHostToDevice);
*explored = true;
int level = 0;
while (*explored) {
*explored = false;
bfs <<< 64, 64, Vsize >>>(g.numVertices, level, d_adj, d_edge, d_edgeSize, d_distance, explored);
cudaDeviceSynchronize();
level++;
}
cudaMemcpy(distance.data(), d_distance, Vsize, cudaMemcpyDeviceToHost);
}
void cleanup() {
cudaFree(d_adj);
cudaFree(d_edge);
cudaFree(d_edgeSize);
cudaFree(d_distance);
}
// //
////////// MAIN //////////
// //
int main() {
Graph g;
readGraph(g);
vector<int> distance(g.numVertices, numeric_limits<int>::max());
int start;
cout << "Give starting vertex: " << endl;
cin >> start;
runbfs(start, g, distance);
for (auto d : distance)
cout << "Distance from " << start << " is " << d << endl;
cleanup();
return 0;
}
|
178daec6ef558cfa6cb45e9b11c029e9acc38cca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<math.h>
#define N 512
__global__ void exclusive_scan(int *d_in)
{
__shared__ int temp_in[N];
int tid = threadIdx.x;
temp_in[tid] = d_in[tid];
__syncthreads();
//Phase 1 (Uptree)
int s = 1;
for(; s<=N-1; s<<=1)
{
int i = 2*s*(threadIdx.x+1)-1;
if((i >= s) && (i<N)) {
//printf("s = %d, i= %d \n", s, i);
int a = temp_in[i];
int b = temp_in[i-s];
__syncthreads();
temp_in[i] = a+b;
}
__syncthreads();
}
//Phase 2 (Downtree)
if(threadIdx.x == 0)
temp_in[N-1] = 0;
for(s = s/2; s >= 1; s>>=1)
{
int i = 2*s*(threadIdx.x+1)-1;
if((i >= s) && (i<N)) {
//printf("s = %d, i= %d \n", s, i);
int r = temp_in[i];
int l = temp_in[i-s];
__syncthreads();
temp_in[i] = l+r;
temp_in[i-s] = r;
}
__syncthreads();
}
d_in[tid] = temp_in[tid];
}
int main()
{
int h_in[N];
int h_out[N];
for(int i=0; i < N; i++)
h_in[i] = 1;
// h_in[0] = 3;
// h_in[1] = 1;
// h_in[2] = 7;
// h_in[3] = 0;
// h_in[4] = 4;
// h_in[5] = 1;
// h_in[6] = 6;
// h_in[7] = 3;
int *d_in;
//int *d_out;
hipMalloc((void**) &d_in, N*sizeof(int));
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipMemcpy(d_in, &h_in, N*sizeof(int), hipMemcpyHostToDevice);
hipEventRecord(start);
//Implementing kernel call
hipLaunchKernelGGL(( exclusive_scan), dim3(1), dim3(N), 0, 0, d_in);
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
hipMemcpy(&h_out, d_in, N*sizeof(int), hipMemcpyDeviceToHost);
for(int i=0; i<N; i++)
printf("out[%d] = %d\n", i, h_out[i]);
hipFree(d_in);
printf("Time used: %f milliseconds\n", milliseconds);
return -1;
}
| 178daec6ef558cfa6cb45e9b11c029e9acc38cca.cu | #include<stdio.h>
#include<math.h>
#define N 512
__global__ void exclusive_scan(int *d_in)
{
__shared__ int temp_in[N];
int tid = threadIdx.x;
temp_in[tid] = d_in[tid];
__syncthreads();
//Phase 1 (Uptree)
int s = 1;
for(; s<=N-1; s<<=1)
{
int i = 2*s*(threadIdx.x+1)-1;
if((i >= s) && (i<N)) {
//printf("s = %d, i= %d \n", s, i);
int a = temp_in[i];
int b = temp_in[i-s];
__syncthreads();
temp_in[i] = a+b;
}
__syncthreads();
}
//Phase 2 (Downtree)
if(threadIdx.x == 0)
temp_in[N-1] = 0;
for(s = s/2; s >= 1; s>>=1)
{
int i = 2*s*(threadIdx.x+1)-1;
if((i >= s) && (i<N)) {
//printf("s = %d, i= %d \n", s, i);
int r = temp_in[i];
int l = temp_in[i-s];
__syncthreads();
temp_in[i] = l+r;
temp_in[i-s] = r;
}
__syncthreads();
}
d_in[tid] = temp_in[tid];
}
int main()
{
int h_in[N];
int h_out[N];
for(int i=0; i < N; i++)
h_in[i] = 1;
// h_in[0] = 3;
// h_in[1] = 1;
// h_in[2] = 7;
// h_in[3] = 0;
// h_in[4] = 4;
// h_in[5] = 1;
// h_in[6] = 6;
// h_in[7] = 3;
int *d_in;
//int *d_out;
cudaMalloc((void**) &d_in, N*sizeof(int));
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaMemcpy(d_in, &h_in, N*sizeof(int), cudaMemcpyHostToDevice);
cudaEventRecord(start);
//Implementing kernel call
exclusive_scan<<<1, N>>>(d_in);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cudaMemcpy(&h_out, d_in, N*sizeof(int), cudaMemcpyDeviceToHost);
for(int i=0; i<N; i++)
printf("out[%d] = %d\n", i, h_out[i]);
cudaFree(d_in);
printf("Time used: %f milliseconds\n", milliseconds);
return -1;
}
|
95cd7a18003e1e9fc0f7cc86054d2de81ce7f8cf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <windows.h>
#include <d2d1.h>
#include <d2d1helper.h>
#pragma comment(lib, "d2d1")
ID2D1Factory* pD2DFactory = NULL;
ID2D1HwndRenderTarget* pRT = NULL;
#define HIBA_00 TEXT("Error:Program initialisation process.")
HINSTANCE hInstGlob;
int SajatiCmdShow;
char szClassName[] = "WindowsApp";
HWND Form1; //Windows handle
LRESULT CALLBACK WndProc0(HWND, UINT, WPARAM, LPARAM);
void D2D_drawing(ID2D1HwndRenderTarget* pRT);
//*********************************
//The main entry point of our program
//*********************************
int WINAPI WinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, PSTR szCmdLine, int iCmdShow)
{
static TCHAR szAppName[] = TEXT("StdWinClassName");
HWND hwnd;
MSG msg;
WNDCLASS wndclass0;
SajatiCmdShow = iCmdShow;
hInstGlob = hInstance;
//*********************************
//Preparing Windows class
//*********************************
wndclass0.style = CS_HREDRAW | CS_VREDRAW;
wndclass0.lpfnWndProc = WndProc0;
wndclass0.cbClsExtra = 0;
wndclass0.cbWndExtra = 0;
wndclass0.hInstance = hInstance;
wndclass0.hIcon = LoadIcon(NULL, IDI_APPLICATION);
wndclass0.hCursor = LoadCursor(NULL, IDC_ARROW);
wndclass0.hbrBackground = (HBRUSH)GetStockObject(LTGRAY_BRUSH);
wndclass0.lpszMenuName = NULL;
wndclass0.lpszClassName = TEXT("WIN0");
//*********************************
//Registering our windows class
//*********************************
if (!RegisterClass(&wndclass0))
{
MessageBox(NULL, HIBA_00, TEXT("Program Start"), MB_ICONERROR);
return 0;
}
//*********************************
//Creating the windows
//*********************************
Form1 = CreateWindow(TEXT("WIN0"),
TEXT("CUDA - DIRECT2D"),
(WS_OVERLAPPED | WS_SYSMENU | WS_THICKFRAME | WS_MAXIMIZEBOX | WS_MINIMIZEBOX),
50,
50,
400,
300,
NULL,
NULL,
hInstance,
NULL);
//*********************************
//Displaying the window
//*********************************
ShowWindow(Form1, SajatiCmdShow);
UpdateWindow(Form1);
//*********************************
//Activating the message processing for our window
//*********************************
while (GetMessage(&msg, NULL, 0, 0))
{
TranslateMessage(&msg);
DispatchMessage(&msg);
}
return msg.wParam;
}
//*********************************
//The window's callback funtcion: handling events
//*********************************
LRESULT CALLBACK WndProc0(HWND hwnd, UINT message, WPARAM wParam, LPARAM lParam)
{
HDC hdc;
PAINTSTRUCT ps;
switch (message)
{
//*********************************
//When creating the window
//*********************************
case WM_CREATE:
D2D1CreateFactory(D2D1_FACTORY_TYPE_SINGLE_THREADED, &pD2DFactory);
pD2DFactory->CreateHwndRenderTarget(
D2D1::RenderTargetProperties(),
D2D1::HwndRenderTargetProperties(
hwnd, D2D1::SizeU(400, 300)),
&pRT);
return 0;
//*********************************
//to eliminate color flickering
//*********************************
case WM_ERASEBKGND:
return (LRESULT)1;
//*********************************
//Repainting the client area of the window
//*********************************
case WM_PAINT:
hdc = BeginPaint(hwnd, &ps);
EndPaint(hwnd, &ps);
D2D_drawing(pRT);
return 0;
//*********************************
//Closing the window, freeing resources
//*********************************
case WM_CLOSE:
pRT->Release();
pD2DFactory->Release();
DestroyWindow(hwnd);
return 0;
//*********************************
//Destroying the window
//*********************************
case WM_DESTROY:
PostQuitMessage(0);
return 0;
}
return DefWindowProc(hwnd, message, wParam, lParam);
}
void D2D_drawing(ID2D1HwndRenderTarget* pRT)
{
pRT->BeginDraw();
//
pRT->EndDraw();
}
| 95cd7a18003e1e9fc0f7cc86054d2de81ce7f8cf.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <windows.h>
#include <d2d1.h>
#include <d2d1helper.h>
#pragma comment(lib, "d2d1")
ID2D1Factory* pD2DFactory = NULL;
ID2D1HwndRenderTarget* pRT = NULL;
#define HIBA_00 TEXT("Error:Program initialisation process.")
HINSTANCE hInstGlob;
int SajatiCmdShow;
char szClassName[] = "WindowsApp";
HWND Form1; //Windows handle
LRESULT CALLBACK WndProc0(HWND, UINT, WPARAM, LPARAM);
void D2D_drawing(ID2D1HwndRenderTarget* pRT);
//*********************************
//The main entry point of our program
//*********************************
int WINAPI WinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, PSTR szCmdLine, int iCmdShow)
{
static TCHAR szAppName[] = TEXT("StdWinClassName");
HWND hwnd;
MSG msg;
WNDCLASS wndclass0;
SajatiCmdShow = iCmdShow;
hInstGlob = hInstance;
//*********************************
//Preparing Windows class
//*********************************
wndclass0.style = CS_HREDRAW | CS_VREDRAW;
wndclass0.lpfnWndProc = WndProc0;
wndclass0.cbClsExtra = 0;
wndclass0.cbWndExtra = 0;
wndclass0.hInstance = hInstance;
wndclass0.hIcon = LoadIcon(NULL, IDI_APPLICATION);
wndclass0.hCursor = LoadCursor(NULL, IDC_ARROW);
wndclass0.hbrBackground = (HBRUSH)GetStockObject(LTGRAY_BRUSH);
wndclass0.lpszMenuName = NULL;
wndclass0.lpszClassName = TEXT("WIN0");
//*********************************
//Registering our windows class
//*********************************
if (!RegisterClass(&wndclass0))
{
MessageBox(NULL, HIBA_00, TEXT("Program Start"), MB_ICONERROR);
return 0;
}
//*********************************
//Creating the windows
//*********************************
Form1 = CreateWindow(TEXT("WIN0"),
TEXT("CUDA - DIRECT2D"),
(WS_OVERLAPPED | WS_SYSMENU | WS_THICKFRAME | WS_MAXIMIZEBOX | WS_MINIMIZEBOX),
50,
50,
400,
300,
NULL,
NULL,
hInstance,
NULL);
//*********************************
//Displaying the window
//*********************************
ShowWindow(Form1, SajatiCmdShow);
UpdateWindow(Form1);
//*********************************
//Activating the message processing for our window
//*********************************
while (GetMessage(&msg, NULL, 0, 0))
{
TranslateMessage(&msg);
DispatchMessage(&msg);
}
return msg.wParam;
}
//*********************************
//The window's callback funtcion: handling events
//*********************************
LRESULT CALLBACK WndProc0(HWND hwnd, UINT message, WPARAM wParam, LPARAM lParam)
{
HDC hdc;
PAINTSTRUCT ps;
switch (message)
{
//*********************************
//When creating the window
//*********************************
case WM_CREATE:
D2D1CreateFactory(D2D1_FACTORY_TYPE_SINGLE_THREADED, &pD2DFactory);
pD2DFactory->CreateHwndRenderTarget(
D2D1::RenderTargetProperties(),
D2D1::HwndRenderTargetProperties(
hwnd, D2D1::SizeU(400, 300)),
&pRT);
return 0;
//*********************************
//to eliminate color flickering
//*********************************
case WM_ERASEBKGND:
return (LRESULT)1;
//*********************************
//Repainting the client area of the window
//*********************************
case WM_PAINT:
hdc = BeginPaint(hwnd, &ps);
EndPaint(hwnd, &ps);
D2D_drawing(pRT);
return 0;
//*********************************
//Closing the window, freeing resources
//*********************************
case WM_CLOSE:
pRT->Release();
pD2DFactory->Release();
DestroyWindow(hwnd);
return 0;
//*********************************
//Destroying the window
//*********************************
case WM_DESTROY:
PostQuitMessage(0);
return 0;
}
return DefWindowProc(hwnd, message, wParam, lParam);
}
void D2D_drawing(ID2D1HwndRenderTarget* pRT)
{
pRT->BeginDraw();
//
pRT->EndDraw();
}
|
9882a301fd0b219a92a8672901c4d73cde7c60f3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/deform_correlation_layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/util/gpu_util.cuh"
namespace caffe {
template <typename Dtype>
__global__ void DeformCorrelationForward(const int nthreads,
const Dtype* b0_data, const Dtype* b1_data, const Dtype* offset_data, const int num, const int channels,
const int b0_height, const int b0_width, const int b1_height, const int b1_width,
const int displacement, const int dilation, const int step_h, const int step_w,
const int window_size, const int b0_dim, const int b0_spatial, const int b1_dim, const int b1_spatial,
const int offset_dim,const int top_dim, const int top_spatial, const int top_width, Dtype* top_data) {
extern __shared__ unsigned char kcache[];
Dtype* cache = (Dtype*)kcache;
const int index = blockIdx.x;
const int c = threadIdx.x;
const int n = index/top_dim;
const int out_c = (index%top_dim)/top_spatial;
const int y = (index%top_spatial)/top_width;
const int x = index%top_width;
const int b0_x = x*step_w;
const int b0_y = y*step_h;
const int dx = (out_c%window_size) - displacement;
const int dy = out_c/window_size - displacement;
const Dtype offset_x = offset_data[n*offset_dim+out_c*2*b1_spatial+y*b1_width+x];
const Dtype offset_y = offset_data[n*offset_dim+(out_c*2+1)*b1_spatial+y*b1_width+x];
const Dtype h = b0_y + dy*dilation + offset_y;
const Dtype w = b0_x + dx*dilation + offset_x;
//bilinear interpolation
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
Dtype hh = h - h_low;
Dtype lh = 1 - hh;
Dtype hw = w - w_low;
Dtype lw = 1 - hw;
Dtype temp = 0.0;
if(h_low >=0 && h_high < b0_height && w_low >= 0 && w_high < b0_width)
{
Dtype v1 = b0_data[n*b0_dim+c*b0_spatial+h_high*b0_width+w_high];
Dtype v2 = b0_data[n*b0_dim+c*b0_spatial+h_low*b0_width+w_high];
Dtype v3 = b0_data[n*b0_dim+c*b0_spatial+h_high*b0_width+w_low];
Dtype v4 = b0_data[n*b0_dim+c*b0_spatial+h_low*b0_width+w_low];
temp = hh*hw*v1 + lh*hw*v2 + hh*lw*v3 + lw*lh*v4;
}
temp *= b1_data[n*b1_dim+c*b1_spatial+y*b1_width+x];
cache[c] = temp/(channels);
__syncthreads();
int half = blockDim.x/2;
while(half!=0)
{
if(threadIdx.x < half)
cache[threadIdx.x] += cache[threadIdx.x+half];
__syncthreads();
half /= 2;
}
if(threadIdx.x==0)
top_data[index] = cache[threadIdx.x];
}
template <typename Dtype>
void DeformCorrelationLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
if(!self_)
{
const int num = bottom[0]->num();
const int channels = bottom[0]->channels();
const int b0_height = bottom[0]->height();
const int b0_width = bottom[0]->width();
const int b1_height = bottom[1]->height();
const int b1_width = bottom[1]->width(); // b1 height width is equal to offset height width
const Dtype* b0_data = bottom[0]->gpu_data();
const Dtype* b1_data = bottom[1]->gpu_data();
const Dtype* offset_data = bottom[2]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int b0_dim = bottom[0]->count(1);
const int b1_dim = bottom[1]->count(1);
const int offset_dim = bottom[2]->count(1);
const int b0_spatial = bottom[0]->count(2);
const int b1_spatial = bottom[1]->count(2);
const int top_width = b1_width;
const int top_spatial = b1_spatial;
const int top_dim = top_spatial*window_size_*window_size_;
const int count = top[0]->count();
DeformCorrelationForward<Dtype>
// NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(count)), dim3(channels), channels*sizeof(Dtype), 0,
count, b0_data, b1_data, offset_data, num, channels,
b0_height, b0_width, b1_height, b1_width, displacement_, dilation_, step_h_, step_w_, window_size_,
b0_dim, b0_spatial, b1_dim, b1_spatial, offset_dim , top_dim, top_spatial, top_width, top_data);
}
else
{
const int num = bottom[0]->num();
const int channels = bottom[0]->channels();
const int b0_height = bottom[0]->height();
const int b0_width = bottom[0]->width();
const Dtype* b0_data = bottom[0]->gpu_data();
const Dtype* offset_data = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int b0_dim = bottom[0]->count(1);
const int offset_dim = bottom[1]->count(1);
const int b0_spatial = bottom[0]->count(2);
const int top_width = b0_width;
const int top_spatial = b0_spatial;
const int top_dim = top_spatial*window_size_*window_size_;
const int count = top[0]->count();
DeformCorrelationForward<Dtype>
// NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(count)), dim3(channels), channels*sizeof(Dtype), 0,
count, b0_data, b0_data, offset_data, num, channels,
b0_height, b0_width, b0_height, b0_width, displacement_, dilation_, step_h_, step_w_, window_size_,
b0_dim, b0_spatial, b0_dim, b0_spatial, offset_dim , top_dim, top_spatial, top_width, top_data);
}
}
template <typename Dtype>
__global__ void DeformCorrelationBackwardOffset(const int nthreads,
const Dtype* top_diff, const Dtype* b0_data, const Dtype* b1_data, const Dtype* offset_data,
const int num, const int channels, const int b0_height, const int b0_width,
const int b1_height, const int b1_width, const int window_size,
const int displacement, const int dilation, const int step_h, const int step_w,
const int b0_dim, const int b0_spatial, const int b1_dim , const int b1_spatial,
const int offset_dim, const int top_dim, const int top_spatial, const int top_width, Dtype* offset_diff) {
CUDA_KERNEL_LOOP(index, nthreads)
{
const int n = index / top_dim;
const int out_c = (index % top_dim) / top_spatial;
const int y = (index % top_spatial) / top_width;
const int x = index % top_width;
const int b0_x = x*step_w;
const int b0_y = y*step_h;
const int dx = (out_c%window_size) - displacement;
const int dy = out_c/window_size - displacement;
const Dtype offset_x = offset_data[n*offset_dim+out_c*2*b1_spatial+y*b1_width+x];
const Dtype offset_y = offset_data[n*offset_dim+(out_c*2+1)*b1_spatial+y*b1_width+x];
const Dtype h = b0_y + dy*dilation + offset_y;
const Dtype w = b0_x + dx*dilation + offset_x;
//bilinear interpolation
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
Dtype hh = h - h_low;
Dtype lh = 1 - hh;
Dtype hw = w - w_low;
Dtype lw = 1 - hw;
Dtype temp_x = 0.0;
Dtype temp_y = 0.0;
if(h_low>=0 && h_high<b0_height && w_low>=0 && w_high < b0_width)
{
for(int c=0; c<=channels; c++)
{
Dtype data = b1_data[n*b1_dim+c*b1_spatial+y*b1_width+x];
temp_x -= lh*b0_data[n*b0_dim+c*b0_spatial+h_low*b0_width+w_low]*data;
temp_y -= lw*b0_data[n*b0_dim+c*b0_spatial+h_low*b0_width+w_low]*data;
temp_x -= hh*b0_data[n*b0_dim+c*b0_spatial+h_high*b0_width+w_low]*data;
temp_y += lw*b0_data[n*b0_dim+c*b0_spatial+h_high*b0_width+w_low]*data;
temp_x += lh*b0_data[n*b0_dim+c*b0_spatial+h_low*b0_width+w_high]*data;
temp_y -= hw*b0_data[n*b0_dim+c*b0_spatial+h_low*b0_width+w_high]*data;
temp_x += hh*b0_data[n*b0_dim+c*b0_spatial+h_high*b0_width+w_high]*data;
temp_y += hw*b0_data[n*b0_dim+c*b0_spatial+h_high*b0_width+w_high]*data;
}
}
offset_diff[n*offset_dim+2*out_c*b1_spatial+y*b1_width+x] = temp_x/channels;
offset_diff[n*offset_dim+(2*out_c+1)*b1_spatial+y*b1_width+x] = temp_y/channels;
}
}
template <typename Dtype>
__global__ void DeformCorrelationBackward1(const int nthreads,
const Dtype* top_diff, const Dtype* b0_data, const Dtype* offset_data, const int num,
const int channels, const int b0_height, const int b0_width,
const int b1_height,const int b1_width, const int window_size,
const int displacement, const int dilation,
const int step_h, const int step_w, const int b0_dim, const int b0_spatial,
const int b1_dim, const int b1_spatial, const int offset_dim, const int top_dim, const int top_spatial,
const int top_width, Dtype* b1_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / b1_dim;
const int c = (index % b1_dim) / b1_spatial;
const int y = (index % b1_spatial) / b1_width;
const int x = index % b1_width;
Dtype temp = 0.0;
const int b0_x = x*step_w;
const int b0_y = y*step_h;
for(int dx=-1*displacement; dx<=displacement; dx++)
for(int dy=-1*displacement; dy<=displacement; dy++)
{
const int out_c = (dy + displacement)*window_size+(dx + displacement);
const Dtype offset_x = offset_data[n*offset_dim+out_c*2*b1_spatial+y*b1_width+x];
const Dtype offset_y = offset_data[n*offset_dim+(out_c*2+1)*b1_spatial+y*b1_width+x];
const Dtype h = b0_y + dy*dilation + offset_y;
const Dtype w = b0_x + dx*dilation + offset_x;
//bilinear interpolation
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
Dtype hh = h - h_low;
Dtype lh = 1 - hh;
Dtype hw = w - w_low;
Dtype lw = 1 - hw;
Dtype data = 0.0;
if(h_low >=0 && h_high < b0_height && w_low >= 0 && w_high < b0_width)
{
Dtype v1 = b0_data[n*b0_dim+c*b0_spatial+h_high*b0_width+w_high];
Dtype v2 = b0_data[n*b0_dim+c*b0_spatial+h_low*b0_width+w_high];
Dtype v3 = b0_data[n*b0_dim+c*b0_spatial+h_high*b0_width+w_low];
Dtype v4 = b0_data[n*b0_dim+c*b0_spatial+h_low*b0_width+w_low];
data = hh*hw*v1 + lh*hw*v2 + hh*lw*v3 + lw*lh*v4;
}
temp += data*top_diff[n*top_dim+out_c*top_spatial+y*top_width+x];
}
b1_diff[index] = temp/channels;
}
}
template <typename Dtype>
__global__ void ComputeGradientToBlob0(const int nthreads,const Dtype* top_diff, const Dtype* b1_data,
const Dtype* offset_data, const int channels, const int b0_height, const int b0_width,
const int b1_height, const int b1_width, const int window_size, const int displacement, const int dilation,
const int step_h, const int step_w, const int b0_dim, const int b0_spatial, const int b1_dim, const int b1_spatial,
const int top_spatial, const int top_width, Dtype* container_data)
{
CUDA_KERNEL_LOOP(index, nthreads)
{
const int y = index/b1_width;
const int x = index%b1_width;
const int b0_x = x*step_w;
const int b0_y = y*step_h;
for(int dx=-1*displacement; dx<=displacement; dx++)
for(int dy=-1*displacement; dy<=displacement; dy++)
{
const int out_c = (dy + displacement)*window_size+(dx + displacement);
const Dtype offset_x = offset_data[out_c*2*b1_spatial+y*b1_width+x];
const Dtype offset_y = offset_data[(out_c*2+1)*b1_spatial+y*b1_width+x];
const Dtype h = b0_y + dy*dilation + offset_y;
const Dtype w = b0_x + dx*dilation + offset_x;
//bilinear interpolation
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
Dtype hh = h - h_low;
Dtype lh = 1 - hh;
Dtype hw = w - w_low;
Dtype lw = 1 - hw;
Dtype diff = top_diff[out_c*top_spatial+y*top_width+x];
if(h_low >=0 && h_high < b0_height && w_low >= 0 && w_high < b0_width)
{
int locate = h_low*b0_width*b1_height*b1_width+w_low*b1_height*b1_width+y*b1_width+x;
container_data[locate] = lw*lh*diff/channels;
locate = h_high*b0_width*b1_height*b1_width+w_low*b1_height*b1_width+y*b1_width+x;
container_data[locate] = lw*hh*diff/channels;
locate = h_low*b0_width*b1_height*b1_width+w_high*b1_height*b1_width+y*b1_width+x;
container_data[locate] = hw*lh*diff/channels;
locate = h_high*b0_width*b1_height*b1_width+w_high*b1_height*b1_width+y*b1_width+x;
container_data[locate] = hw*hh*diff/channels;
}
}
}
}
template <typename Dtype>
void DeformCorrelationLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if(!self_)
{
const int num = bottom[0]->num();
const int channels = bottom[0]->channels();
const int b0_height = bottom[0]->height();
const int b0_width = bottom[0]->width();
const int b1_height = bottom[1]->height();
const int b1_width = bottom[1]->width();
const Dtype* b0_data = bottom[0]->gpu_data();
const Dtype* b1_data = bottom[1]->gpu_data();
const Dtype* offset_data = bottom[2]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* b0_diff = bottom[0]->mutable_gpu_diff();
Dtype* b1_diff = bottom[1]->mutable_gpu_diff();
Dtype* offset_diff = bottom[2]->mutable_gpu_diff();
const int b0_dim = bottom[0]->count(1);
const int b1_dim = bottom[1]->count(1);
const int offset_dim = bottom[2]->count(1);
const int b0_spatial = bottom[0]->count(2);
const int b1_spatial = bottom[1]->count(2);
const int top_width = b1_width;
const int top_spatial = b1_spatial;
const int top_dim = top_spatial*window_size_*window_size_;
for(int i=0; i<=2; i++)
caffe_gpu_set(bottom[i]->count(), Dtype(0) , bottom[i]->mutable_gpu_diff());
if (propagate_down[1]) {
const int b1_count = bottom[1]->count();
DeformCorrelationBackward1<Dtype>
// NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(b1_count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
b1_count, top_diff, b0_data, offset_data , num, channels,
b0_height, b0_width, b1_height, b1_width,
window_size_, displacement_, dilation_, step_h_, step_w_,
b0_dim, b0_spatial, b1_dim, b1_spatial, offset_dim , top_dim, top_spatial, top_width, b1_diff);
}
if (propagate_down[0]) {
const int b0_count = bottom[0]->count();
const int b1_count = bottom[1]->count();
Dtype* container_data = gradient_container_.mutable_gpu_data();
for(int n=0; n<num; n++)
{
caffe_gpu_set(gradient_container_.count(),Dtype(0.0),gradient_container_.mutable_gpu_data());
hipLaunchKernelGGL(( ComputeGradientToBlob0<Dtype>), dim3(CAFFE_GET_BLOCKS(b1_spatial)),dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
b1_spatial, top_diff + n*top_dim, b1_data + n*b1_dim, offset_data + n*offset_dim, channels,
b0_height, b0_width, b1_height, b1_width, window_size_, displacement_,
dilation_, step_h_, step_w_, b0_dim, b0_spatial, b1_dim, b1_spatial, top_spatial, top_width, container_data);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, channels, b0_spatial, b1_spatial,
1.0, b1_data + n*b1_dim, container_data, 0.0, b0_diff + n*b0_dim);
}
}
if (propagate_down[2]) {
const int top_count = top[0]->count();
hipLaunchKernelGGL(( DeformCorrelationBackwardOffset<Dtype>), dim3(CAFFE_GET_BLOCKS(top_count)),dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
top_count, top_diff, b0_data, b1_data, offset_data, num, channels,
b0_height, b0_width, b1_height, b1_width,
window_size_, displacement_, dilation_, step_h_, step_w_,
b0_dim, b0_spatial, b1_dim, b1_spatial, offset_dim , top_dim, top_spatial, top_width, offset_diff);
}
}
else
{
const int num = bottom[0]->num();
const int channels = bottom[0]->channels();
const int b0_height = bottom[0]->height();
const int b0_width = bottom[0]->width();
const Dtype* b0_data = bottom[0]->gpu_data();
const Dtype* offset_data = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* b0_diff = bottom[0]->mutable_gpu_diff();
Dtype* offset_diff = bottom[1]->mutable_gpu_diff();
const int b0_dim = bottom[0]->count(1);
const int offset_dim = bottom[1]->count(1);
const int b0_spatial = bottom[0]->count(2);
const int top_width = b0_width;
const int top_spatial = b0_spatial;
const int top_dim = top_spatial*window_size_*window_size_;
for(int i=0; i<=1; i++)
caffe_gpu_set(bottom[i]->count(), Dtype(0) , bottom[i]->mutable_gpu_diff());
if (propagate_down[0]) {
const int b0_count = bottom[0]->count();
DeformCorrelationBackward1<Dtype>
// NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(b0_count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
b0_count, top_diff, b0_data, offset_data , num, channels,
b0_height, b0_width, b0_height, b0_width,
window_size_, displacement_, dilation_, step_h_, step_w_,
b0_dim, b0_spatial, b0_dim, b0_spatial, offset_dim , top_dim, top_spatial, top_width, b0_diff);
Dtype* container_data = gradient_container_.mutable_gpu_data();
for(int n=0; n<num; n++)
{
caffe_gpu_set(gradient_container_.count(),Dtype(0.0),gradient_container_.mutable_gpu_data());
hipLaunchKernelGGL(( ComputeGradientToBlob0<Dtype>), dim3(CAFFE_GET_BLOCKS(b0_spatial)),dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
b0_spatial, top_diff + n*top_dim, b0_data + n*b0_dim, offset_data + n*offset_dim, channels,
b0_height, b0_width, b0_height, b0_width, window_size_, displacement_,
dilation_, step_h_, step_w_, b0_dim, b0_spatial, b0_dim, b0_spatial, top_spatial, top_width, container_data);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, channels, b0_spatial, b0_spatial,
1.0, b0_data + n*b0_dim, container_data, 1.0, b0_diff + n*b0_dim);
}
}
if (propagate_down[1]) {
const int top_count = top[0]->count();
hipLaunchKernelGGL(( DeformCorrelationBackwardOffset<Dtype>), dim3(CAFFE_GET_BLOCKS(top_count)),dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
top_count, top_diff, b0_data, b0_data, offset_data, num, channels,
b0_height, b0_width, b0_height, b0_width,
window_size_, displacement_, dilation_, step_h_, step_w_,
b0_dim, b0_spatial, b0_dim, b0_spatial, offset_dim , top_dim, top_spatial, top_width, offset_diff);
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(DeformCorrelationLayer);
} // namespace caffe | 9882a301fd0b219a92a8672901c4d73cde7c60f3.cu | #include <vector>
#include "caffe/layers/deform_correlation_layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/util/gpu_util.cuh"
namespace caffe {
template <typename Dtype>
__global__ void DeformCorrelationForward(const int nthreads,
const Dtype* b0_data, const Dtype* b1_data, const Dtype* offset_data, const int num, const int channels,
const int b0_height, const int b0_width, const int b1_height, const int b1_width,
const int displacement, const int dilation, const int step_h, const int step_w,
const int window_size, const int b0_dim, const int b0_spatial, const int b1_dim, const int b1_spatial,
const int offset_dim,const int top_dim, const int top_spatial, const int top_width, Dtype* top_data) {
extern __shared__ unsigned char kcache[];
Dtype* cache = (Dtype*)kcache;
const int index = blockIdx.x;
const int c = threadIdx.x;
const int n = index/top_dim;
const int out_c = (index%top_dim)/top_spatial;
const int y = (index%top_spatial)/top_width;
const int x = index%top_width;
const int b0_x = x*step_w;
const int b0_y = y*step_h;
const int dx = (out_c%window_size) - displacement;
const int dy = out_c/window_size - displacement;
const Dtype offset_x = offset_data[n*offset_dim+out_c*2*b1_spatial+y*b1_width+x];
const Dtype offset_y = offset_data[n*offset_dim+(out_c*2+1)*b1_spatial+y*b1_width+x];
const Dtype h = b0_y + dy*dilation + offset_y;
const Dtype w = b0_x + dx*dilation + offset_x;
//bilinear interpolation
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
Dtype hh = h - h_low;
Dtype lh = 1 - hh;
Dtype hw = w - w_low;
Dtype lw = 1 - hw;
Dtype temp = 0.0;
if(h_low >=0 && h_high < b0_height && w_low >= 0 && w_high < b0_width)
{
Dtype v1 = b0_data[n*b0_dim+c*b0_spatial+h_high*b0_width+w_high];
Dtype v2 = b0_data[n*b0_dim+c*b0_spatial+h_low*b0_width+w_high];
Dtype v3 = b0_data[n*b0_dim+c*b0_spatial+h_high*b0_width+w_low];
Dtype v4 = b0_data[n*b0_dim+c*b0_spatial+h_low*b0_width+w_low];
temp = hh*hw*v1 + lh*hw*v2 + hh*lw*v3 + lw*lh*v4;
}
temp *= b1_data[n*b1_dim+c*b1_spatial+y*b1_width+x];
cache[c] = temp/(channels);
__syncthreads();
int half = blockDim.x/2;
while(half!=0)
{
if(threadIdx.x < half)
cache[threadIdx.x] += cache[threadIdx.x+half];
__syncthreads();
half /= 2;
}
if(threadIdx.x==0)
top_data[index] = cache[threadIdx.x];
}
template <typename Dtype>
void DeformCorrelationLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
if(!self_)
{
const int num = bottom[0]->num();
const int channels = bottom[0]->channels();
const int b0_height = bottom[0]->height();
const int b0_width = bottom[0]->width();
const int b1_height = bottom[1]->height();
const int b1_width = bottom[1]->width(); // b1 height width is equal to offset height width
const Dtype* b0_data = bottom[0]->gpu_data();
const Dtype* b1_data = bottom[1]->gpu_data();
const Dtype* offset_data = bottom[2]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int b0_dim = bottom[0]->count(1);
const int b1_dim = bottom[1]->count(1);
const int offset_dim = bottom[2]->count(1);
const int b0_spatial = bottom[0]->count(2);
const int b1_spatial = bottom[1]->count(2);
const int top_width = b1_width;
const int top_spatial = b1_spatial;
const int top_dim = top_spatial*window_size_*window_size_;
const int count = top[0]->count();
DeformCorrelationForward<Dtype>
// NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(count), channels, channels*sizeof(Dtype)>>>(
count, b0_data, b1_data, offset_data, num, channels,
b0_height, b0_width, b1_height, b1_width, displacement_, dilation_, step_h_, step_w_, window_size_,
b0_dim, b0_spatial, b1_dim, b1_spatial, offset_dim , top_dim, top_spatial, top_width, top_data);
}
else
{
const int num = bottom[0]->num();
const int channels = bottom[0]->channels();
const int b0_height = bottom[0]->height();
const int b0_width = bottom[0]->width();
const Dtype* b0_data = bottom[0]->gpu_data();
const Dtype* offset_data = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int b0_dim = bottom[0]->count(1);
const int offset_dim = bottom[1]->count(1);
const int b0_spatial = bottom[0]->count(2);
const int top_width = b0_width;
const int top_spatial = b0_spatial;
const int top_dim = top_spatial*window_size_*window_size_;
const int count = top[0]->count();
DeformCorrelationForward<Dtype>
// NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(count), channels, channels*sizeof(Dtype)>>>(
count, b0_data, b0_data, offset_data, num, channels,
b0_height, b0_width, b0_height, b0_width, displacement_, dilation_, step_h_, step_w_, window_size_,
b0_dim, b0_spatial, b0_dim, b0_spatial, offset_dim , top_dim, top_spatial, top_width, top_data);
}
}
template <typename Dtype>
__global__ void DeformCorrelationBackwardOffset(const int nthreads,
const Dtype* top_diff, const Dtype* b0_data, const Dtype* b1_data, const Dtype* offset_data,
const int num, const int channels, const int b0_height, const int b0_width,
const int b1_height, const int b1_width, const int window_size,
const int displacement, const int dilation, const int step_h, const int step_w,
const int b0_dim, const int b0_spatial, const int b1_dim , const int b1_spatial,
const int offset_dim, const int top_dim, const int top_spatial, const int top_width, Dtype* offset_diff) {
CUDA_KERNEL_LOOP(index, nthreads)
{
const int n = index / top_dim;
const int out_c = (index % top_dim) / top_spatial;
const int y = (index % top_spatial) / top_width;
const int x = index % top_width;
const int b0_x = x*step_w;
const int b0_y = y*step_h;
const int dx = (out_c%window_size) - displacement;
const int dy = out_c/window_size - displacement;
const Dtype offset_x = offset_data[n*offset_dim+out_c*2*b1_spatial+y*b1_width+x];
const Dtype offset_y = offset_data[n*offset_dim+(out_c*2+1)*b1_spatial+y*b1_width+x];
const Dtype h = b0_y + dy*dilation + offset_y;
const Dtype w = b0_x + dx*dilation + offset_x;
//bilinear interpolation
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
Dtype hh = h - h_low;
Dtype lh = 1 - hh;
Dtype hw = w - w_low;
Dtype lw = 1 - hw;
Dtype temp_x = 0.0;
Dtype temp_y = 0.0;
if(h_low>=0 && h_high<b0_height && w_low>=0 && w_high < b0_width)
{
for(int c=0; c<=channels; c++)
{
Dtype data = b1_data[n*b1_dim+c*b1_spatial+y*b1_width+x];
temp_x -= lh*b0_data[n*b0_dim+c*b0_spatial+h_low*b0_width+w_low]*data;
temp_y -= lw*b0_data[n*b0_dim+c*b0_spatial+h_low*b0_width+w_low]*data;
temp_x -= hh*b0_data[n*b0_dim+c*b0_spatial+h_high*b0_width+w_low]*data;
temp_y += lw*b0_data[n*b0_dim+c*b0_spatial+h_high*b0_width+w_low]*data;
temp_x += lh*b0_data[n*b0_dim+c*b0_spatial+h_low*b0_width+w_high]*data;
temp_y -= hw*b0_data[n*b0_dim+c*b0_spatial+h_low*b0_width+w_high]*data;
temp_x += hh*b0_data[n*b0_dim+c*b0_spatial+h_high*b0_width+w_high]*data;
temp_y += hw*b0_data[n*b0_dim+c*b0_spatial+h_high*b0_width+w_high]*data;
}
}
offset_diff[n*offset_dim+2*out_c*b1_spatial+y*b1_width+x] = temp_x/channels;
offset_diff[n*offset_dim+(2*out_c+1)*b1_spatial+y*b1_width+x] = temp_y/channels;
}
}
template <typename Dtype>
__global__ void DeformCorrelationBackward1(const int nthreads,
const Dtype* top_diff, const Dtype* b0_data, const Dtype* offset_data, const int num,
const int channels, const int b0_height, const int b0_width,
const int b1_height,const int b1_width, const int window_size,
const int displacement, const int dilation,
const int step_h, const int step_w, const int b0_dim, const int b0_spatial,
const int b1_dim, const int b1_spatial, const int offset_dim, const int top_dim, const int top_spatial,
const int top_width, Dtype* b1_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / b1_dim;
const int c = (index % b1_dim) / b1_spatial;
const int y = (index % b1_spatial) / b1_width;
const int x = index % b1_width;
Dtype temp = 0.0;
const int b0_x = x*step_w;
const int b0_y = y*step_h;
for(int dx=-1*displacement; dx<=displacement; dx++)
for(int dy=-1*displacement; dy<=displacement; dy++)
{
const int out_c = (dy + displacement)*window_size+(dx + displacement);
const Dtype offset_x = offset_data[n*offset_dim+out_c*2*b1_spatial+y*b1_width+x];
const Dtype offset_y = offset_data[n*offset_dim+(out_c*2+1)*b1_spatial+y*b1_width+x];
const Dtype h = b0_y + dy*dilation + offset_y;
const Dtype w = b0_x + dx*dilation + offset_x;
//bilinear interpolation
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
Dtype hh = h - h_low;
Dtype lh = 1 - hh;
Dtype hw = w - w_low;
Dtype lw = 1 - hw;
Dtype data = 0.0;
if(h_low >=0 && h_high < b0_height && w_low >= 0 && w_high < b0_width)
{
Dtype v1 = b0_data[n*b0_dim+c*b0_spatial+h_high*b0_width+w_high];
Dtype v2 = b0_data[n*b0_dim+c*b0_spatial+h_low*b0_width+w_high];
Dtype v3 = b0_data[n*b0_dim+c*b0_spatial+h_high*b0_width+w_low];
Dtype v4 = b0_data[n*b0_dim+c*b0_spatial+h_low*b0_width+w_low];
data = hh*hw*v1 + lh*hw*v2 + hh*lw*v3 + lw*lh*v4;
}
temp += data*top_diff[n*top_dim+out_c*top_spatial+y*top_width+x];
}
b1_diff[index] = temp/channels;
}
}
template <typename Dtype>
__global__ void ComputeGradientToBlob0(const int nthreads,const Dtype* top_diff, const Dtype* b1_data,
const Dtype* offset_data, const int channels, const int b0_height, const int b0_width,
const int b1_height, const int b1_width, const int window_size, const int displacement, const int dilation,
const int step_h, const int step_w, const int b0_dim, const int b0_spatial, const int b1_dim, const int b1_spatial,
const int top_spatial, const int top_width, Dtype* container_data)
{
CUDA_KERNEL_LOOP(index, nthreads)
{
const int y = index/b1_width;
const int x = index%b1_width;
const int b0_x = x*step_w;
const int b0_y = y*step_h;
for(int dx=-1*displacement; dx<=displacement; dx++)
for(int dy=-1*displacement; dy<=displacement; dy++)
{
const int out_c = (dy + displacement)*window_size+(dx + displacement);
const Dtype offset_x = offset_data[out_c*2*b1_spatial+y*b1_width+x];
const Dtype offset_y = offset_data[(out_c*2+1)*b1_spatial+y*b1_width+x];
const Dtype h = b0_y + dy*dilation + offset_y;
const Dtype w = b0_x + dx*dilation + offset_x;
//bilinear interpolation
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
Dtype hh = h - h_low;
Dtype lh = 1 - hh;
Dtype hw = w - w_low;
Dtype lw = 1 - hw;
Dtype diff = top_diff[out_c*top_spatial+y*top_width+x];
if(h_low >=0 && h_high < b0_height && w_low >= 0 && w_high < b0_width)
{
int locate = h_low*b0_width*b1_height*b1_width+w_low*b1_height*b1_width+y*b1_width+x;
container_data[locate] = lw*lh*diff/channels;
locate = h_high*b0_width*b1_height*b1_width+w_low*b1_height*b1_width+y*b1_width+x;
container_data[locate] = lw*hh*diff/channels;
locate = h_low*b0_width*b1_height*b1_width+w_high*b1_height*b1_width+y*b1_width+x;
container_data[locate] = hw*lh*diff/channels;
locate = h_high*b0_width*b1_height*b1_width+w_high*b1_height*b1_width+y*b1_width+x;
container_data[locate] = hw*hh*diff/channels;
}
}
}
}
template <typename Dtype>
void DeformCorrelationLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if(!self_)
{
const int num = bottom[0]->num();
const int channels = bottom[0]->channels();
const int b0_height = bottom[0]->height();
const int b0_width = bottom[0]->width();
const int b1_height = bottom[1]->height();
const int b1_width = bottom[1]->width();
const Dtype* b0_data = bottom[0]->gpu_data();
const Dtype* b1_data = bottom[1]->gpu_data();
const Dtype* offset_data = bottom[2]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* b0_diff = bottom[0]->mutable_gpu_diff();
Dtype* b1_diff = bottom[1]->mutable_gpu_diff();
Dtype* offset_diff = bottom[2]->mutable_gpu_diff();
const int b0_dim = bottom[0]->count(1);
const int b1_dim = bottom[1]->count(1);
const int offset_dim = bottom[2]->count(1);
const int b0_spatial = bottom[0]->count(2);
const int b1_spatial = bottom[1]->count(2);
const int top_width = b1_width;
const int top_spatial = b1_spatial;
const int top_dim = top_spatial*window_size_*window_size_;
for(int i=0; i<=2; i++)
caffe_gpu_set(bottom[i]->count(), Dtype(0) , bottom[i]->mutable_gpu_diff());
if (propagate_down[1]) {
const int b1_count = bottom[1]->count();
DeformCorrelationBackward1<Dtype>
// NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(b1_count), CAFFE_CUDA_NUM_THREADS>>>(
b1_count, top_diff, b0_data, offset_data , num, channels,
b0_height, b0_width, b1_height, b1_width,
window_size_, displacement_, dilation_, step_h_, step_w_,
b0_dim, b0_spatial, b1_dim, b1_spatial, offset_dim , top_dim, top_spatial, top_width, b1_diff);
}
if (propagate_down[0]) {
const int b0_count = bottom[0]->count();
const int b1_count = bottom[1]->count();
Dtype* container_data = gradient_container_.mutable_gpu_data();
for(int n=0; n<num; n++)
{
caffe_gpu_set(gradient_container_.count(),Dtype(0.0),gradient_container_.mutable_gpu_data());
ComputeGradientToBlob0<Dtype><<<CAFFE_GET_BLOCKS(b1_spatial),CAFFE_CUDA_NUM_THREADS>>>(
b1_spatial, top_diff + n*top_dim, b1_data + n*b1_dim, offset_data + n*offset_dim, channels,
b0_height, b0_width, b1_height, b1_width, window_size_, displacement_,
dilation_, step_h_, step_w_, b0_dim, b0_spatial, b1_dim, b1_spatial, top_spatial, top_width, container_data);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, channels, b0_spatial, b1_spatial,
1.0, b1_data + n*b1_dim, container_data, 0.0, b0_diff + n*b0_dim);
}
}
if (propagate_down[2]) {
const int top_count = top[0]->count();
DeformCorrelationBackwardOffset<Dtype><<<CAFFE_GET_BLOCKS(top_count),CAFFE_CUDA_NUM_THREADS>>>(
top_count, top_diff, b0_data, b1_data, offset_data, num, channels,
b0_height, b0_width, b1_height, b1_width,
window_size_, displacement_, dilation_, step_h_, step_w_,
b0_dim, b0_spatial, b1_dim, b1_spatial, offset_dim , top_dim, top_spatial, top_width, offset_diff);
}
}
else
{
const int num = bottom[0]->num();
const int channels = bottom[0]->channels();
const int b0_height = bottom[0]->height();
const int b0_width = bottom[0]->width();
const Dtype* b0_data = bottom[0]->gpu_data();
const Dtype* offset_data = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* b0_diff = bottom[0]->mutable_gpu_diff();
Dtype* offset_diff = bottom[1]->mutable_gpu_diff();
const int b0_dim = bottom[0]->count(1);
const int offset_dim = bottom[1]->count(1);
const int b0_spatial = bottom[0]->count(2);
const int top_width = b0_width;
const int top_spatial = b0_spatial;
const int top_dim = top_spatial*window_size_*window_size_;
for(int i=0; i<=1; i++)
caffe_gpu_set(bottom[i]->count(), Dtype(0) , bottom[i]->mutable_gpu_diff());
if (propagate_down[0]) {
const int b0_count = bottom[0]->count();
DeformCorrelationBackward1<Dtype>
// NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(b0_count), CAFFE_CUDA_NUM_THREADS>>>(
b0_count, top_diff, b0_data, offset_data , num, channels,
b0_height, b0_width, b0_height, b0_width,
window_size_, displacement_, dilation_, step_h_, step_w_,
b0_dim, b0_spatial, b0_dim, b0_spatial, offset_dim , top_dim, top_spatial, top_width, b0_diff);
Dtype* container_data = gradient_container_.mutable_gpu_data();
for(int n=0; n<num; n++)
{
caffe_gpu_set(gradient_container_.count(),Dtype(0.0),gradient_container_.mutable_gpu_data());
ComputeGradientToBlob0<Dtype><<<CAFFE_GET_BLOCKS(b0_spatial),CAFFE_CUDA_NUM_THREADS>>>(
b0_spatial, top_diff + n*top_dim, b0_data + n*b0_dim, offset_data + n*offset_dim, channels,
b0_height, b0_width, b0_height, b0_width, window_size_, displacement_,
dilation_, step_h_, step_w_, b0_dim, b0_spatial, b0_dim, b0_spatial, top_spatial, top_width, container_data);
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, channels, b0_spatial, b0_spatial,
1.0, b0_data + n*b0_dim, container_data, 1.0, b0_diff + n*b0_dim);
}
}
if (propagate_down[1]) {
const int top_count = top[0]->count();
DeformCorrelationBackwardOffset<Dtype><<<CAFFE_GET_BLOCKS(top_count),CAFFE_CUDA_NUM_THREADS>>>(
top_count, top_diff, b0_data, b0_data, offset_data, num, channels,
b0_height, b0_width, b0_height, b0_width,
window_size_, displacement_, dilation_, step_h_, step_w_,
b0_dim, b0_spatial, b0_dim, b0_spatial, offset_dim , top_dim, top_spatial, top_width, offset_diff);
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(DeformCorrelationLayer);
} // namespace caffe |
0904b63902288425911eeb177a03ca5a287f7a4e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
const int TILE_DIM = 32;
const int BLOCK_ROWS = 8;
/**
* the non-square CUDA transpose kernel is
* writtern By Amir Hossein Bakhtiary, use as you wish. Shouldn't have any copyright problems.
*/
// http://amirsworklog.blogspot.gr/2015/01/cuda-matrix-transpose-code.html
__global__ void transposeCoalesced(double *odata, const double *idata, int rows,int cols)
{
__shared__ double tile[TILE_DIM][TILE_DIM+1];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
// if (x >= cols||y >= rows){
// return;
// }
int maxJ = TILE_DIM;
int maxJ2 = TILE_DIM;
int otherMaxJ = rows - y;
if (maxJ > otherMaxJ)
maxJ = otherMaxJ;
if ( x < cols ){
for (int j = 0; j < maxJ; j += BLOCK_ROWS)
tile[threadIdx.y+j][threadIdx.x] = idata[(y+j)*cols + x];
}
__syncthreads();
x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset
y = blockIdx.x * TILE_DIM + threadIdx.y;
int otherMaxJ2 = cols - y;
if (maxJ2 > otherMaxJ2){
maxJ2 = otherMaxJ2;
}
if ( x < rows){
for (int j = 0; j < maxJ2; j += BLOCK_ROWS)
odata[(y+j)*rows + x] = tile[threadIdx.x][threadIdx.y + j];
}
}
__global__ void cuconvolve_youngCausal(double * in, double * out, int rows, int columns, double B, double *bf)
{
unsigned int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx<columns)
{
/* Compute first 3 output elements */
out[idx] = B*in[idx];
out[idx+columns] = B*in[idx+columns] + bf[2]*out[idx];
out[idx+2*columns] = B*in[idx+2*columns] + (bf[1]*out[idx]+bf[2]*out[idx+columns]);
/* Recursive computation of output in forward direction using filter parameters bf and B */
for(int i=3; i<rows; i++)
{
out[idx+i*columns] = B*in[idx+i*columns];
for(int j=0; j<3; j++)
{
out[idx+i*columns] += bf[j]*out[idx + (i-(3-j))*columns];
}
}
}
}
__global__ void cuconvolve_youngAnticausal(double * in, double * out, int rows, int columns, double B, double *bb)
{
unsigned int idx = threadIdx.x + blockIdx.x*blockDim.x;
int total = columns*(rows-1);
if(idx<columns)
{
/* Compute last 3 output elements */
out[total + idx] = B*in[total + idx];
out[total + idx - columns] = B*in[total + idx - columns] + bb[0]*out[total + idx];
out[total + idx - 2*columns] = B*in[total + idx - 2*columns] + (bb[0]*out[total + idx - columns]+bb[1]*out[total + idx]);
/* Recursive computation of output in backward direction using filter parameters bb and B */
for (int i=3; i<rows-1; i++)
{
out[total + idx - i*columns] = B*in[total + idx - i*columns];
for (int j=0; j<3; j++)
{
out[total + idx - i*columns] += bb[j]*out[total + idx - (i-(j+1))*columns];
}
}
}
}
extern "C"
void cudaYoung(double * in, double * out, int rows, int columns, double *bf, double *bb, double B)
{
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
/** \brief Array to store output of Causal filter convolution */
double *d_input, *d_output, *d_bf, *d_bb;
hipMalloc((void**) &d_input, rows*columns*sizeof(double));
hipMalloc((void**) &d_output, rows*columns*sizeof(double));
hipMalloc((void**) &d_bf, rows*columns*sizeof(double));
hipMalloc((void**) &d_bb, rows*columns*sizeof(double));
hipMemcpy(d_input, in, rows*columns*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_bf, bf, 3*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_bb, bb, 3*sizeof(double), hipMemcpyHostToDevice);
dim3 dimGrid1((columns+TILE_DIM-1)/TILE_DIM,(rows+TILE_DIM-1)/TILE_DIM, 1);
dim3 dimGrid2((rows+TILE_DIM-1)/TILE_DIM,(columns+TILE_DIM-1)/TILE_DIM, 1);
dim3 dimBlock(TILE_DIM, BLOCK_ROWS, 1);
// -------- Convolve Rows----------
hipLaunchKernelGGL(( transposeCoalesced), dim3(dimGrid1), dim3(dimBlock), 0, 0, d_output, d_input, rows, columns);
hipLaunchKernelGGL(( cuconvolve_youngCausal), dim3(rows/256 + 1) , dim3(256), 0, 0, d_output, d_input, columns, rows, B, d_bf);
hipLaunchKernelGGL(( cuconvolve_youngAnticausal), dim3(rows/256 + 1), dim3(256), 0, 0, d_input, d_output, columns, rows, B, d_bb);
// -------- Convolve Columns ----------
hipLaunchKernelGGL(( transposeCoalesced), dim3(dimGrid2), dim3(dimBlock), 0, 0, d_input, d_output, columns, rows);
hipLaunchKernelGGL(( cuconvolve_youngCausal), dim3(columns/256 + 1), dim3(256), 0, 0, d_input, d_output, rows, columns, B, d_bf);
hipLaunchKernelGGL(( cuconvolve_youngAnticausal), dim3(columns/256 + 1), dim3(256), 0, 0, d_output, d_input, rows, columns, B, d_bb);
hipMemcpy(in, d_input, rows*columns*sizeof(double), hipMemcpyDeviceToHost);
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
printf("Execution time elapsed: %f ms\n", milliseconds);
hipFree(d_input);
hipFree(d_output);
hipFree(d_bf);
hipFree(d_bb);
}
| 0904b63902288425911eeb177a03ca5a287f7a4e.cu |
#include <stdio.h>
const int TILE_DIM = 32;
const int BLOCK_ROWS = 8;
/**
* the non-square CUDA transpose kernel is
* writtern By Amir Hossein Bakhtiary, use as you wish. Shouldn't have any copyright problems.
*/
// http://amirsworklog.blogspot.gr/2015/01/cuda-matrix-transpose-code.html
__global__ void transposeCoalesced(double *odata, const double *idata, int rows,int cols)
{
__shared__ double tile[TILE_DIM][TILE_DIM+1];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
// if (x >= cols||y >= rows){
// return;
// }
int maxJ = TILE_DIM;
int maxJ2 = TILE_DIM;
int otherMaxJ = rows - y;
if (maxJ > otherMaxJ)
maxJ = otherMaxJ;
if ( x < cols ){
for (int j = 0; j < maxJ; j += BLOCK_ROWS)
tile[threadIdx.y+j][threadIdx.x] = idata[(y+j)*cols + x];
}
__syncthreads();
x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset
y = blockIdx.x * TILE_DIM + threadIdx.y;
int otherMaxJ2 = cols - y;
if (maxJ2 > otherMaxJ2){
maxJ2 = otherMaxJ2;
}
if ( x < rows){
for (int j = 0; j < maxJ2; j += BLOCK_ROWS)
odata[(y+j)*rows + x] = tile[threadIdx.x][threadIdx.y + j];
}
}
__global__ void cuconvolve_youngCausal(double * in, double * out, int rows, int columns, double B, double *bf)
{
unsigned int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx<columns)
{
/* Compute first 3 output elements */
out[idx] = B*in[idx];
out[idx+columns] = B*in[idx+columns] + bf[2]*out[idx];
out[idx+2*columns] = B*in[idx+2*columns] + (bf[1]*out[idx]+bf[2]*out[idx+columns]);
/* Recursive computation of output in forward direction using filter parameters bf and B */
for(int i=3; i<rows; i++)
{
out[idx+i*columns] = B*in[idx+i*columns];
for(int j=0; j<3; j++)
{
out[idx+i*columns] += bf[j]*out[idx + (i-(3-j))*columns];
}
}
}
}
__global__ void cuconvolve_youngAnticausal(double * in, double * out, int rows, int columns, double B, double *bb)
{
unsigned int idx = threadIdx.x + blockIdx.x*blockDim.x;
int total = columns*(rows-1);
if(idx<columns)
{
/* Compute last 3 output elements */
out[total + idx] = B*in[total + idx];
out[total + idx - columns] = B*in[total + idx - columns] + bb[0]*out[total + idx];
out[total + idx - 2*columns] = B*in[total + idx - 2*columns] + (bb[0]*out[total + idx - columns]+bb[1]*out[total + idx]);
/* Recursive computation of output in backward direction using filter parameters bb and B */
for (int i=3; i<rows-1; i++)
{
out[total + idx - i*columns] = B*in[total + idx - i*columns];
for (int j=0; j<3; j++)
{
out[total + idx - i*columns] += bb[j]*out[total + idx - (i-(j+1))*columns];
}
}
}
}
extern "C"
void cudaYoung(double * in, double * out, int rows, int columns, double *bf, double *bb, double B)
{
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
/** \brief Array to store output of Causal filter convolution */
double *d_input, *d_output, *d_bf, *d_bb;
cudaMalloc((void**) &d_input, rows*columns*sizeof(double));
cudaMalloc((void**) &d_output, rows*columns*sizeof(double));
cudaMalloc((void**) &d_bf, rows*columns*sizeof(double));
cudaMalloc((void**) &d_bb, rows*columns*sizeof(double));
cudaMemcpy(d_input, in, rows*columns*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_bf, bf, 3*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_bb, bb, 3*sizeof(double), cudaMemcpyHostToDevice);
dim3 dimGrid1((columns+TILE_DIM-1)/TILE_DIM,(rows+TILE_DIM-1)/TILE_DIM, 1);
dim3 dimGrid2((rows+TILE_DIM-1)/TILE_DIM,(columns+TILE_DIM-1)/TILE_DIM, 1);
dim3 dimBlock(TILE_DIM, BLOCK_ROWS, 1);
// -------- Convolve Rows----------
transposeCoalesced<<< dimGrid1, dimBlock>>>(d_output, d_input, rows, columns);
cuconvolve_youngCausal<<<rows/256 + 1 , 256>>>(d_output, d_input, columns, rows, B, d_bf);
cuconvolve_youngAnticausal<<<rows/256 + 1, 256>>>(d_input, d_output, columns, rows, B, d_bb);
// -------- Convolve Columns ----------
transposeCoalesced<<< dimGrid2, dimBlock>>>(d_input, d_output, columns, rows);
cuconvolve_youngCausal<<<columns/256 + 1, 256>>>(d_input, d_output, rows, columns, B, d_bf);
cuconvolve_youngAnticausal<<<columns/256 + 1, 256>>>(d_output, d_input, rows, columns, B, d_bb);
cudaMemcpy(in, d_input, rows*columns*sizeof(double), cudaMemcpyDeviceToHost);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Execution time elapsed: %f ms\n", milliseconds);
cudaFree(d_input);
cudaFree(d_output);
cudaFree(d_bf);
cudaFree(d_bb);
}
|
5b9f12555672dcc32c9b630d01db4f8f6a829dcd.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "sliceIntArray.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int n = 1;
const int indx = 1;
const int *ss = NULL;
hipMalloc(&ss, XSIZE*YSIZE);
int *zz = NULL;
hipMalloc(&zz, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
sliceIntArray), dim3(gridBlock),dim3(threadBlock), 0, 0, n,indx,ss,zz);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
sliceIntArray), dim3(gridBlock),dim3(threadBlock), 0, 0, n,indx,ss,zz);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
sliceIntArray), dim3(gridBlock),dim3(threadBlock), 0, 0, n,indx,ss,zz);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 5b9f12555672dcc32c9b630d01db4f8f6a829dcd.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "sliceIntArray.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int n = 1;
const int indx = 1;
const int *ss = NULL;
cudaMalloc(&ss, XSIZE*YSIZE);
int *zz = NULL;
cudaMalloc(&zz, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
sliceIntArray<<<gridBlock,threadBlock>>>(n,indx,ss,zz);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
sliceIntArray<<<gridBlock,threadBlock>>>(n,indx,ss,zz);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
sliceIntArray<<<gridBlock,threadBlock>>>(n,indx,ss,zz);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
bfbb522704e1e332d3d8df732756f337ed3db9a0.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "cutil.h"
#include "HOGEngine.h"
#include "HOGUtils.h"
#include "HOGHistogram.h"
__device__ __constant__ float cenBound[3], halfBin[3], bandWidth[3],
oneHalf = 0.5f;
__device__ __constant__ int tvbin[3];
texture<float, 1, hipReadModeElementType> texGauss;
hipArray* gaussArray;
hipChannelFormatDesc channelDescGauss;
float *hostWeights;
extern __shared__ float allShared[];
extern int rNoHistogramBins, rNoOfCellsX, rNoOfCellsY, rNoOfBlocksX,
rNoOfBlocksY, rNumberOfWindowsX, rNumberOfWindowsY;
// Stuff set during the InitHistograms function, but needed during allocation
static struct {
int cellSizeX, cellSizeY, blockSizeX, blockSizeY, noHistogramBins;
float wtscale;
float var2x, var2y;
float centerX, centerY;
int h_tvbin[3];
float h_cenBound[3], h_halfBin[3], h_bandWidth[3];
} initVars;
void HostAllocHOGHistogramMemory(void) {
int i, j;
float tx, ty;
int cellSizeX = initVars.cellSizeX;
int cellSizeY = initVars.cellSizeY;
int blockSizeX = initVars.blockSizeX;
int blockSizeY = initVars.blockSizeY;
cutilSafeCall(hipHostMalloc(&hostWeights, cellSizeX * blockSizeX *
cellSizeY * blockSizeY * sizeof(float)));
for (i = 0; i < cellSizeX * blockSizeX; i++) {
for (j = 0; j < cellSizeY * blockSizeY; j++) {
tx = i - initVars.centerX;
ty = j - initVars.centerY;
tx *= tx / initVars.var2x;
ty *= ty / initVars.var2y;
hostWeights[i + j * cellSizeX * blockSizeX] = exp(-(tx + ty));
}
}
}
void DeviceAllocHOGHistogramMemory(void) {
cutilSafeCall(hipMallocArray(&gaussArray, &channelDescGauss,
initVars.cellSizeX * initVars.blockSizeX * initVars.cellSizeY *
initVars.blockSizeY, 1));
}
void CopyInHOGHistogram(void) {
cutilSafeCall(cudaMemcpyToArrayAsync(gaussArray, 0, 0, hostWeights,
sizeof(float) * initVars.cellSizeX * initVars.blockSizeX *
initVars.cellSizeY * initVars.blockSizeY, hipMemcpyHostToDevice, stream));
cutilSafeCall(hipMemcpyToSymbolAsync(cenBound, initVars.h_cenBound, 3 *
sizeof(float), 0, hipMemcpyHostToDevice, stream));
cutilSafeCall(hipMemcpyToSymbolAsync(halfBin, initVars.h_halfBin, 3 *
sizeof(float), 0, hipMemcpyHostToDevice, stream));
cutilSafeCall(hipMemcpyToSymbolAsync(bandWidth, initVars.h_bandWidth, 3 *
sizeof(float), 0, hipMemcpyHostToDevice, stream));
cutilSafeCall(hipMemcpyToSymbolAsync(tvbin, initVars.h_tvbin, 3 *
sizeof(int), 0, hipMemcpyHostToDevice, stream));
cutilSafeCall(hipStreamSynchronize(stream));
}
void HostFreeHOGHistogramMemory(void) {
cutilSafeCall(hipHostFree(hostWeights));
hostWeights = NULL;
}
void DeviceFreeHOGHistogramMemory(void) {
cutilSafeCall(hipFreeArray(gaussArray));
gaussArray = NULL;
}
// wt scale == scale for weighting function span
void InitHistograms(int cellSizeX, int cellSizeY, int blockSizeX,
int blockSizeY, int noHistogramBins, float wtscale) {
initVars.cellSizeX = cellSizeX;
initVars.cellSizeY = cellSizeY;
initVars.blockSizeX = blockSizeX;
initVars.blockSizeY = blockSizeY;
initVars.var2x = cellSizeX * blockSizeX / (2 * wtscale);
initVars.var2y = cellSizeY * blockSizeY / (2 * wtscale);
initVars.var2x *= initVars.var2x * 2;
initVars.var2y *= initVars.var2y * 2;
initVars.centerX = cellSizeX * blockSizeX / 2.0f;
initVars.centerY = cellSizeY * blockSizeY / 2.0f;
channelDescGauss = hipCreateChannelDesc<float>();
initVars.h_cenBound[0] = cellSizeX * blockSizeX / 2.0f;
initVars.h_cenBound[1] = cellSizeY * blockSizeY / 2.0f;
// TODO: Can be 360
initVars.h_cenBound[2] = 180 / 2.0f;
initVars.h_halfBin[0] = blockSizeX / 2.0f;
initVars.h_halfBin[1] = blockSizeY / 2.0f;
initVars.h_halfBin[2] = noHistogramBins / 2.0f;
initVars.h_bandWidth[0] = (float) cellSizeX;
initVars.h_bandWidth[0] = 1.0f / initVars.h_bandWidth[0];
initVars.h_bandWidth[1] = (float) cellSizeY;
initVars.h_bandWidth[1] = 1.0f / initVars.h_bandWidth[1];
// TODO: Can be 360
initVars.h_bandWidth[2] = 180.0f / (float) noHistogramBins;
initVars.h_bandWidth[2] = 1.0f / initVars.h_bandWidth[2];
initVars.h_tvbin[0] = blockSizeX;
initVars.h_tvbin[1] = blockSizeY;
initVars.h_tvbin[2] = noHistogramBins;
}
void CloseHistogram() {}
__global__ void computeBlockHistogramsWithGauss(float2* inputImage,
float1* blockHistograms, int noHistogramBins, int cellSizeX, int cellSizeY,
int blockSizeX, int blockSizeY, int leftoverX, int leftoverY, int width,
int height) {
int i;
float2 localValue;
float* shLocalHistograms = (float*) allShared;
int cellIdx = threadIdx.y;
int cellIdy = threadIdx.z;
int columnId = threadIdx.x;
int smemReadPos = __mul24(cellIdx, noHistogramBins) + __mul24(cellIdy,
blockSizeX) * noHistogramBins;
int gmemWritePos = __mul24(threadIdx.y, noHistogramBins) +
__mul24(threadIdx.z, gridDim.x) * __mul24(blockDim.y, noHistogramBins) +
__mul24(blockIdx.x, noHistogramBins) * blockDim.y + __mul24(blockIdx.y,
gridDim.x) * __mul24(blockDim.y, noHistogramBins) * blockDim.z;
int gmemReadStride = width;
int gmemReadPos = leftoverX + __mul24(leftoverY, gmemReadStride) +
(__mul24(blockIdx.x, cellSizeX) + __mul24(blockIdx.y, cellSizeY) *
gmemReadStride) + (columnId + __mul24(cellIdx, cellSizeX) +
__mul24(cellIdy, cellSizeY) * gmemReadStride);
int histogramSize = __mul24(noHistogramBins, blockSizeX) * blockSizeY;
int smemLocalHistogramPos = (columnId + __mul24(cellIdx, cellSizeX)) *
histogramSize + __mul24(cellIdy, histogramSize) * __mul24(blockSizeX,
cellSizeX);
int cmemReadPos = columnId + __mul24(cellIdx, cellSizeX) + __mul24(cellIdy,
cellSizeY) * __mul24(cellSizeX, blockSizeX);
float atx, aty;
float pIx, pIy, pIz;
int fIx, fIy, fIz;
int cIx, cIy, cIz;
float dx, dy, dz;
float cx, cy, cz;
bool lowervalidx, lowervalidy;
bool uppervalidx, uppervalidy;
bool canWrite;
int offset;
for (i = 0; i < histogramSize; i++) {
shLocalHistograms[smemLocalHistogramPos + i] = 0;
}
#ifdef UNROLL_LOOPS
int halfSizeYm1 = cellSizeY / 2 - 1;
#endif
//if (blockIdx.x == 5 && blockIdx.y == 4)
//{
// int asasa;
// asasa = 0;
// asasa++;
//}
for (i = 0; i < cellSizeY; i++) {
localValue = inputImage[gmemReadPos + i * gmemReadStride];
localValue.x *= tex1D(texGauss, cmemReadPos + i * cellSizeX * blockSizeX);
atx = cellIdx * cellSizeX + columnId + 0.5;
aty = cellIdy * cellSizeY + i + 0.5;
pIx = halfBin[0] - oneHalf + (atx - cenBound[0]) * bandWidth[0];
pIy = halfBin[1] - oneHalf + (aty - cenBound[1]) * bandWidth[1];
pIz = halfBin[2] - oneHalf + (localValue.y - cenBound[2]) * bandWidth[2];
fIx = floorf(pIx);
fIy = floorf(pIy);
fIz = floorf(pIz);
cIx = fIx + 1;
cIy = fIy + 1;
cIz = fIz + 1; //eq ceilf(pI.)
dx = pIx - fIx;
dy = pIy - fIy;
dz = pIz - fIz;
cx = 1 - dx;
cy = 1 - dy;
cz = 1 - dz;
cIz %= tvbin[2];
fIz %= tvbin[2];
if (fIz < 0) fIz += tvbin[2];
if (cIz < 0) cIz += tvbin[2];
#ifdef UNROLL_LOOPS
if ((i & halfSizeYm1) == 0)
#endif
{
uppervalidx = !(cIx >= tvbin[0] - oneHalf || cIx < -oneHalf);
uppervalidy = !(cIy >= tvbin[1] - oneHalf || cIy < -oneHalf);
lowervalidx = !(fIx < -oneHalf || fIx >= tvbin[0] - oneHalf);
lowervalidy = !(fIy < -oneHalf || fIy >= tvbin[1] - oneHalf);
}
canWrite = lowervalidx && lowervalidy;
if (canWrite) {
offset = smemLocalHistogramPos + (fIx + fIy * blockSizeY) *
noHistogramBins;
shLocalHistograms[offset + fIz] += localValue.x * cx * cy * cz;
shLocalHistograms[offset + cIz] += localValue.x * cx * cy * dz;
}
canWrite = lowervalidx && uppervalidy;
if (canWrite) {
offset = smemLocalHistogramPos + (fIx + cIy * blockSizeY) *
noHistogramBins;
shLocalHistograms[offset + fIz] += localValue.x * cx * dy * cz;
shLocalHistograms[offset + cIz] += localValue.x * cx * dy * dz;
}
canWrite = uppervalidx && lowervalidy;
if (canWrite) {
offset = smemLocalHistogramPos + (cIx + fIy * blockSizeY) *
noHistogramBins;
shLocalHistograms[offset + fIz] += localValue.x * dx * cy * cz;
shLocalHistograms[offset + cIz] += localValue.x * dx * cy * dz;
}
canWrite = (uppervalidx) && (uppervalidy);
if (canWrite) {
offset = smemLocalHistogramPos + (cIx + cIy * blockSizeY) *
noHistogramBins;
shLocalHistograms[offset + fIz] += localValue.x * dx * dy * cz;
shLocalHistograms[offset + cIz] += localValue.x * dx * dy * dz;
}
}
__syncthreads();
//TODO -> aligned block size * cell size
int smemTargetHistogramPos;
for (unsigned int s = blockSizeY >> 1; s > 0; s >>= 1) {
if (cellIdy < s && (cellIdy + s) < blockSizeY) {
smemTargetHistogramPos = (columnId + __mul24(cellIdx, cellSizeX)) *
histogramSize + __mul24((cellIdy + s), histogramSize) *
__mul24(blockSizeX, cellSizeX);
#ifdef UNROLL_LOOPS
shLocalHistograms[smemLocalHistogramPos + 0] += shLocalHistograms[smemTargetHistogramPos + 0];
shLocalHistograms[smemLocalHistogramPos + 1] += shLocalHistograms[smemTargetHistogramPos + 1];
shLocalHistograms[smemLocalHistogramPos + 2] += shLocalHistograms[smemTargetHistogramPos + 2];
shLocalHistograms[smemLocalHistogramPos + 3] += shLocalHistograms[smemTargetHistogramPos + 3];
shLocalHistograms[smemLocalHistogramPos + 4] += shLocalHistograms[smemTargetHistogramPos + 4];
shLocalHistograms[smemLocalHistogramPos + 5] += shLocalHistograms[smemTargetHistogramPos + 5];
shLocalHistograms[smemLocalHistogramPos + 6] += shLocalHistograms[smemTargetHistogramPos + 6];
shLocalHistograms[smemLocalHistogramPos + 7] += shLocalHistograms[smemTargetHistogramPos + 7];
shLocalHistograms[smemLocalHistogramPos + 8] += shLocalHistograms[smemTargetHistogramPos + 8];
shLocalHistograms[smemLocalHistogramPos + 9] += shLocalHistograms[smemTargetHistogramPos + 9];
shLocalHistograms[smemLocalHistogramPos + 10] += shLocalHistograms[smemTargetHistogramPos + 10];
shLocalHistograms[smemLocalHistogramPos + 11] += shLocalHistograms[smemTargetHistogramPos + 11];
shLocalHistograms[smemLocalHistogramPos + 12] += shLocalHistograms[smemTargetHistogramPos + 12];
shLocalHistograms[smemLocalHistogramPos + 13] += shLocalHistograms[smemTargetHistogramPos + 13];
shLocalHistograms[smemLocalHistogramPos + 14] += shLocalHistograms[smemTargetHistogramPos + 14];
shLocalHistograms[smemLocalHistogramPos + 15] += shLocalHistograms[smemTargetHistogramPos + 15];
shLocalHistograms[smemLocalHistogramPos + 16] += shLocalHistograms[smemTargetHistogramPos + 16];
shLocalHistograms[smemLocalHistogramPos + 17] += shLocalHistograms[smemTargetHistogramPos + 17];
shLocalHistograms[smemLocalHistogramPos + 18] += shLocalHistograms[smemTargetHistogramPos + 18];
shLocalHistograms[smemLocalHistogramPos + 19] += shLocalHistograms[smemTargetHistogramPos + 19];
shLocalHistograms[smemLocalHistogramPos + 20] += shLocalHistograms[smemTargetHistogramPos + 20];
shLocalHistograms[smemLocalHistogramPos + 21] += shLocalHistograms[smemTargetHistogramPos + 21];
shLocalHistograms[smemLocalHistogramPos + 22] += shLocalHistograms[smemTargetHistogramPos + 22];
shLocalHistograms[smemLocalHistogramPos + 23] += shLocalHistograms[smemTargetHistogramPos + 23];
shLocalHistograms[smemLocalHistogramPos + 24] += shLocalHistograms[smemTargetHistogramPos + 24];
shLocalHistograms[smemLocalHistogramPos + 25] += shLocalHistograms[smemTargetHistogramPos + 25];
shLocalHistograms[smemLocalHistogramPos + 26] += shLocalHistograms[smemTargetHistogramPos + 26];
shLocalHistograms[smemLocalHistogramPos + 27] += shLocalHistograms[smemTargetHistogramPos + 27];
shLocalHistograms[smemLocalHistogramPos + 28] += shLocalHistograms[smemTargetHistogramPos + 28];
shLocalHistograms[smemLocalHistogramPos + 29] += shLocalHistograms[smemTargetHistogramPos + 29];
shLocalHistograms[smemLocalHistogramPos + 30] += shLocalHistograms[smemTargetHistogramPos + 30];
shLocalHistograms[smemLocalHistogramPos + 31] += shLocalHistograms[smemTargetHistogramPos + 31];
shLocalHistograms[smemLocalHistogramPos + 32] += shLocalHistograms[smemTargetHistogramPos + 32];
shLocalHistograms[smemLocalHistogramPos + 33] += shLocalHistograms[smemTargetHistogramPos + 33];
shLocalHistograms[smemLocalHistogramPos + 34] += shLocalHistograms[smemTargetHistogramPos + 34];
shLocalHistograms[smemLocalHistogramPos + 35] += shLocalHistograms[smemTargetHistogramPos + 35];
#else
for (i = 0; i < histogramSize; i++) {
shLocalHistograms[smemLocalHistogramPos + i] +=
shLocalHistograms[smemTargetHistogramPos + i];
}
#endif
}
__syncthreads();
}
for (unsigned int s = blockSizeX >> 1; s > 0; s >>= 1) {
if (cellIdx < s && (cellIdx + s) < blockSizeX) {
smemTargetHistogramPos = (columnId + __mul24((cellIdx + s), cellSizeX)) *
histogramSize + __mul24(cellIdy, histogramSize) * __mul24(blockSizeX,
cellSizeX);
#ifdef UNROLL_LOOPS
shLocalHistograms[smemLocalHistogramPos + 0] += shLocalHistograms[smemTargetHistogramPos + 0];
shLocalHistograms[smemLocalHistogramPos + 1] += shLocalHistograms[smemTargetHistogramPos + 1];
shLocalHistograms[smemLocalHistogramPos + 2] += shLocalHistograms[smemTargetHistogramPos + 2];
shLocalHistograms[smemLocalHistogramPos + 3] += shLocalHistograms[smemTargetHistogramPos + 3];
shLocalHistograms[smemLocalHistogramPos + 4] += shLocalHistograms[smemTargetHistogramPos + 4];
shLocalHistograms[smemLocalHistogramPos + 5] += shLocalHistograms[smemTargetHistogramPos + 5];
shLocalHistograms[smemLocalHistogramPos + 6] += shLocalHistograms[smemTargetHistogramPos + 6];
shLocalHistograms[smemLocalHistogramPos + 7] += shLocalHistograms[smemTargetHistogramPos + 7];
shLocalHistograms[smemLocalHistogramPos + 8] += shLocalHistograms[smemTargetHistogramPos + 8];
shLocalHistograms[smemLocalHistogramPos + 9] += shLocalHistograms[smemTargetHistogramPos + 9];
shLocalHistograms[smemLocalHistogramPos + 10] += shLocalHistograms[smemTargetHistogramPos + 10];
shLocalHistograms[smemLocalHistogramPos + 11] += shLocalHistograms[smemTargetHistogramPos + 11];
shLocalHistograms[smemLocalHistogramPos + 12] += shLocalHistograms[smemTargetHistogramPos + 12];
shLocalHistograms[smemLocalHistogramPos + 13] += shLocalHistograms[smemTargetHistogramPos + 13];
shLocalHistograms[smemLocalHistogramPos + 14] += shLocalHistograms[smemTargetHistogramPos + 14];
shLocalHistograms[smemLocalHistogramPos + 15] += shLocalHistograms[smemTargetHistogramPos + 15];
shLocalHistograms[smemLocalHistogramPos + 16] += shLocalHistograms[smemTargetHistogramPos + 16];
shLocalHistograms[smemLocalHistogramPos + 17] += shLocalHistograms[smemTargetHistogramPos + 17];
shLocalHistograms[smemLocalHistogramPos + 18] += shLocalHistograms[smemTargetHistogramPos + 18];
shLocalHistograms[smemLocalHistogramPos + 19] += shLocalHistograms[smemTargetHistogramPos + 19];
shLocalHistograms[smemLocalHistogramPos + 20] += shLocalHistograms[smemTargetHistogramPos + 20];
shLocalHistograms[smemLocalHistogramPos + 21] += shLocalHistograms[smemTargetHistogramPos + 21];
shLocalHistograms[smemLocalHistogramPos + 22] += shLocalHistograms[smemTargetHistogramPos + 22];
shLocalHistograms[smemLocalHistogramPos + 23] += shLocalHistograms[smemTargetHistogramPos + 23];
shLocalHistograms[smemLocalHistogramPos + 24] += shLocalHistograms[smemTargetHistogramPos + 24];
shLocalHistograms[smemLocalHistogramPos + 25] += shLocalHistograms[smemTargetHistogramPos + 25];
shLocalHistograms[smemLocalHistogramPos + 26] += shLocalHistograms[smemTargetHistogramPos + 26];
shLocalHistograms[smemLocalHistogramPos + 27] += shLocalHistograms[smemTargetHistogramPos + 27];
shLocalHistograms[smemLocalHistogramPos + 28] += shLocalHistograms[smemTargetHistogramPos + 28];
shLocalHistograms[smemLocalHistogramPos + 29] += shLocalHistograms[smemTargetHistogramPos + 29];
shLocalHistograms[smemLocalHistogramPos + 30] += shLocalHistograms[smemTargetHistogramPos + 30];
shLocalHistograms[smemLocalHistogramPos + 31] += shLocalHistograms[smemTargetHistogramPos + 31];
shLocalHistograms[smemLocalHistogramPos + 32] += shLocalHistograms[smemTargetHistogramPos + 32];
shLocalHistograms[smemLocalHistogramPos + 33] += shLocalHistograms[smemTargetHistogramPos + 33];
shLocalHistograms[smemLocalHistogramPos + 34] += shLocalHistograms[smemTargetHistogramPos + 34];
shLocalHistograms[smemLocalHistogramPos + 35] += shLocalHistograms[smemTargetHistogramPos + 35];
#else
for (i = 0; i < histogramSize; i++) {
shLocalHistograms[smemLocalHistogramPos + i] +=
shLocalHistograms[smemTargetHistogramPos + i];
}
#endif
}
__syncthreads();
}
for (unsigned int s = cellSizeX >> 1; s > 0; s >>= 1) {
if (columnId < s && (columnId + s) < cellSizeX) {
smemTargetHistogramPos = (columnId + s + __mul24(cellIdx, cellSizeX)) *
histogramSize + __mul24(cellIdy, histogramSize) * __mul24(blockSizeX,
cellSizeX);
#ifdef UNROLL_LOOPS
shLocalHistograms[smemLocalHistogramPos + 0] += shLocalHistograms[smemTargetHistogramPos + 0];
shLocalHistograms[smemLocalHistogramPos + 1] += shLocalHistograms[smemTargetHistogramPos + 1];
shLocalHistograms[smemLocalHistogramPos + 2] += shLocalHistograms[smemTargetHistogramPos + 2];
shLocalHistograms[smemLocalHistogramPos + 3] += shLocalHistograms[smemTargetHistogramPos + 3];
shLocalHistograms[smemLocalHistogramPos + 4] += shLocalHistograms[smemTargetHistogramPos + 4];
shLocalHistograms[smemLocalHistogramPos + 5] += shLocalHistograms[smemTargetHistogramPos + 5];
shLocalHistograms[smemLocalHistogramPos + 6] += shLocalHistograms[smemTargetHistogramPos + 6];
shLocalHistograms[smemLocalHistogramPos + 7] += shLocalHistograms[smemTargetHistogramPos + 7];
shLocalHistograms[smemLocalHistogramPos + 8] += shLocalHistograms[smemTargetHistogramPos + 8];
shLocalHistograms[smemLocalHistogramPos + 9] += shLocalHistograms[smemTargetHistogramPos + 9];
shLocalHistograms[smemLocalHistogramPos + 10] += shLocalHistograms[smemTargetHistogramPos + 10];
shLocalHistograms[smemLocalHistogramPos + 11] += shLocalHistograms[smemTargetHistogramPos + 11];
shLocalHistograms[smemLocalHistogramPos + 12] += shLocalHistograms[smemTargetHistogramPos + 12];
shLocalHistograms[smemLocalHistogramPos + 13] += shLocalHistograms[smemTargetHistogramPos + 13];
shLocalHistograms[smemLocalHistogramPos + 14] += shLocalHistograms[smemTargetHistogramPos + 14];
shLocalHistograms[smemLocalHistogramPos + 15] += shLocalHistograms[smemTargetHistogramPos + 15];
shLocalHistograms[smemLocalHistogramPos + 16] += shLocalHistograms[smemTargetHistogramPos + 16];
shLocalHistograms[smemLocalHistogramPos + 17] += shLocalHistograms[smemTargetHistogramPos + 17];
shLocalHistograms[smemLocalHistogramPos + 18] += shLocalHistograms[smemTargetHistogramPos + 18];
shLocalHistograms[smemLocalHistogramPos + 19] += shLocalHistograms[smemTargetHistogramPos + 19];
shLocalHistograms[smemLocalHistogramPos + 20] += shLocalHistograms[smemTargetHistogramPos + 20];
shLocalHistograms[smemLocalHistogramPos + 21] += shLocalHistograms[smemTargetHistogramPos + 21];
shLocalHistograms[smemLocalHistogramPos + 22] += shLocalHistograms[smemTargetHistogramPos + 22];
shLocalHistograms[smemLocalHistogramPos + 23] += shLocalHistograms[smemTargetHistogramPos + 23];
shLocalHistograms[smemLocalHistogramPos + 24] += shLocalHistograms[smemTargetHistogramPos + 24];
shLocalHistograms[smemLocalHistogramPos + 25] += shLocalHistograms[smemTargetHistogramPos + 25];
shLocalHistograms[smemLocalHistogramPos + 26] += shLocalHistograms[smemTargetHistogramPos + 26];
shLocalHistograms[smemLocalHistogramPos + 27] += shLocalHistograms[smemTargetHistogramPos + 27];
shLocalHistograms[smemLocalHistogramPos + 28] += shLocalHistograms[smemTargetHistogramPos + 28];
shLocalHistograms[smemLocalHistogramPos + 29] += shLocalHistograms[smemTargetHistogramPos + 29];
shLocalHistograms[smemLocalHistogramPos + 30] += shLocalHistograms[smemTargetHistogramPos + 30];
shLocalHistograms[smemLocalHistogramPos + 31] += shLocalHistograms[smemTargetHistogramPos + 31];
shLocalHistograms[smemLocalHistogramPos + 32] += shLocalHistograms[smemTargetHistogramPos + 32];
shLocalHistograms[smemLocalHistogramPos + 33] += shLocalHistograms[smemTargetHistogramPos + 33];
shLocalHistograms[smemLocalHistogramPos + 34] += shLocalHistograms[smemTargetHistogramPos + 34];
shLocalHistograms[smemLocalHistogramPos + 35] += shLocalHistograms[smemTargetHistogramPos + 35];
#else
for (i = 0; i < histogramSize; i++) {
shLocalHistograms[smemLocalHistogramPos + i] +=
shLocalHistograms[smemTargetHistogramPos + i];
}
#endif
}
__syncthreads();
}
if (columnId == 0) {
//write result to gmem
#ifdef UNROLL_LOOPS
blockHistograms[gmemWritePos + 0].x = shLocalHistograms[smemReadPos + 0];
blockHistograms[gmemWritePos + 1].x = shLocalHistograms[smemReadPos + 1];
blockHistograms[gmemWritePos + 2].x = shLocalHistograms[smemReadPos + 2];
blockHistograms[gmemWritePos + 3].x = shLocalHistograms[smemReadPos + 3];
blockHistograms[gmemWritePos + 4].x = shLocalHistograms[smemReadPos + 4];
blockHistograms[gmemWritePos + 5].x = shLocalHistograms[smemReadPos + 5];
blockHistograms[gmemWritePos + 6].x = shLocalHistograms[smemReadPos + 6];
blockHistograms[gmemWritePos + 7].x = shLocalHistograms[smemReadPos + 7];
blockHistograms[gmemWritePos + 8].x = shLocalHistograms[smemReadPos + 8];
#else
for (i=0; i<noHistogramBins; i++) {
blockHistograms[gmemWritePos + i].x = shLocalHistograms[smemReadPos + i];
}
#endif
}
if (blockIdx.x == 10 && blockIdx.y == 8) {
int asasa;
asasa = 0;
asasa++;
}
}
void ComputeBlockHistogramsWithGauss(float2* inputImage,
float1* blockHistograms, int noHistogramBins, int cellSizeX, int cellSizeY,
int blockSizeX, int blockSizeY, int windowSizeX, int windowSizeY,
int width, int height) {
int leftoverX;
int leftoverY;
dim3 hThreadSize, hBlockSize;
rNoOfCellsX = width / cellSizeX;
rNoOfCellsY = height / cellSizeY;
rNoOfBlocksX = rNoOfCellsX - blockSizeX + 1;
rNoOfBlocksY = rNoOfCellsY - blockSizeY + 1;
rNumberOfWindowsX = (width-windowSizeX)/cellSizeX + 1;
rNumberOfWindowsY = (height-windowSizeY)/cellSizeY + 1;
leftoverX = (width - windowSizeX - cellSizeX * (rNumberOfWindowsX - 1)) / 2;
leftoverY = (height - windowSizeY - cellSizeY * (rNumberOfWindowsY - 1)) / 2;
hThreadSize = dim3(cellSizeX, blockSizeX, blockSizeY);
hBlockSize = dim3(rNoOfBlocksX, rNoOfBlocksY);
cutilSafeCall(hipBindTextureToArray(texGauss, gaussArray, channelDescGauss));
hipLaunchKernelGGL(( computeBlockHistogramsWithGauss), dim3(hBlockSize), dim3(hThreadSize), noHistogramBins *
blockSizeX * blockSizeY * cellSizeX * blockSizeY * blockSizeX *
sizeof(float), stream, inputImage, blockHistograms, noHistogramBins,
cellSizeX, cellSizeY, blockSizeX, blockSizeY, leftoverX, leftoverY, width,
height);
cutilSafeCall(hipStreamSynchronize(stream));
cutilSafeCall(hipUnbindTexture(texGauss));
}
void NormalizeBlockHistograms(float1* blockHistograms, int noHistogramBins,
int cellSizeX, int cellSizeY, int blockSizeX, int blockSizeY, int width,
int height) {
dim3 hThreadSize, hBlockSize;
rNoOfCellsX = width / cellSizeX;
rNoOfCellsY = height / cellSizeY;
rNoOfBlocksX = rNoOfCellsX - blockSizeX + 1;
rNoOfBlocksY = rNoOfCellsY - blockSizeY + 1;
hThreadSize = dim3(noHistogramBins, blockSizeX, blockSizeY);
hBlockSize = dim3(rNoOfBlocksX, rNoOfBlocksY);
int alignedBlockDimX = iClosestPowerOfTwo(noHistogramBins);
int alignedBlockDimY = iClosestPowerOfTwo(blockSizeX);
int alignedBlockDimZ = iClosestPowerOfTwo(blockSizeY);
hipLaunchKernelGGL(( normalizeBlockHistograms), dim3(hBlockSize), dim3(hThreadSize), noHistogramBins *
blockSizeX * blockSizeY * sizeof(float), stream, blockHistograms,
noHistogramBins, rNoOfBlocksX, rNoOfBlocksY, blockSizeX, blockSizeY,
alignedBlockDimX, alignedBlockDimY, alignedBlockDimZ, noHistogramBins *
rNoOfCellsX, rNoOfCellsY);
cutilSafeCall(hipStreamSynchronize(stream));
}
__global__ void normalizeBlockHistograms(float1 *blockHistograms,
int noHistogramBins, int rNoOfHOGBlocksX, int rNoOfHOGBlocksY,
int blockSizeX, int blockSizeY, int alignedBlockDimX, int alignedBlockDimY,
int alignedBlockDimZ, int width, int height) {
int smemLocalHistogramPos, smemTargetHistogramPos, gmemPosBlock,
gmemWritePosBlock;
float* shLocalHistogram = (float*) allShared;
float localValue, norm1, norm2;
float eps2 = 0.01f;
smemLocalHistogramPos = __mul24(threadIdx.y, noHistogramBins) +
__mul24(threadIdx.z, blockDim.x) * blockDim.y + threadIdx.x;
gmemPosBlock = __mul24(threadIdx.y, noHistogramBins) + __mul24(threadIdx.z,
gridDim.x) * __mul24(blockDim.y, blockDim.x) + threadIdx.x +
__mul24(blockIdx.x, noHistogramBins) * blockDim.y + __mul24(blockIdx.y,
gridDim.x) * __mul24(blockDim.y, blockDim.x) * blockDim.z;
gmemWritePosBlock = __mul24(threadIdx.z, noHistogramBins) +
__mul24(threadIdx.y, gridDim.x) * __mul24(blockDim.y, blockDim.x) +
threadIdx.x + __mul24(blockIdx.x, noHistogramBins) * blockDim.y +
__mul24(blockIdx.y, gridDim.x) * __mul24(blockDim.y, blockDim.x) *
blockDim.z;
localValue = blockHistograms[gmemPosBlock].x;
shLocalHistogram[smemLocalHistogramPos] = localValue * localValue;
if (blockIdx.x == 10 && blockIdx.y == 8) {
int asasa;
asasa = 0;
asasa++;
}
__syncthreads();
for(unsigned int s = alignedBlockDimZ >> 1; s > 0; s >>= 1) {
if (threadIdx.z < s && (threadIdx.z + s) < blockDim.z) {
smemTargetHistogramPos = __mul24(threadIdx.y, noHistogramBins) +
__mul24((threadIdx.z + s), blockDim.x) * blockDim.y + threadIdx.x;
shLocalHistogram[smemLocalHistogramPos] +=
shLocalHistogram[smemTargetHistogramPos];
}
__syncthreads();
}
for (unsigned int s = alignedBlockDimY >> 1; s > 0; s >>= 1) {
if (threadIdx.y < s && (threadIdx.y + s) < blockDim.y) {
smemTargetHistogramPos = __mul24((threadIdx.y + s), noHistogramBins) +
__mul24(threadIdx.z, blockDim.x) * blockDim.y + threadIdx.x;
shLocalHistogram[smemLocalHistogramPos] +=
shLocalHistogram[smemTargetHistogramPos];
}
__syncthreads();
}
for(unsigned int s = alignedBlockDimX >> 1; s > 0; s >>= 1) {
if (threadIdx.x < s && (threadIdx.x + s) < blockDim.x) {
smemTargetHistogramPos = __mul24(threadIdx.y, noHistogramBins) +
__mul24(threadIdx.z, blockDim.x) * blockDim.y + (threadIdx.x + s);
shLocalHistogram[smemLocalHistogramPos] +=
shLocalHistogram[smemTargetHistogramPos];
}
__syncthreads();
}
norm1 = sqrtf(shLocalHistogram[0]) + __mul24(noHistogramBins, blockSizeX) *
blockSizeY;
localValue /= norm1;
localValue = fminf(0.2f, localValue); //why 0.2 ??
__syncthreads();
shLocalHistogram[smemLocalHistogramPos] = localValue * localValue;
__syncthreads();
for(unsigned int s = alignedBlockDimZ >> 1; s > 0; s >>= 1) {
if (threadIdx.z < s && (threadIdx.z + s) < blockDim.z) {
smemTargetHistogramPos = __mul24(threadIdx.y, noHistogramBins) +
__mul24((threadIdx.z + s), blockDim.x) * blockDim.y + threadIdx.x;
shLocalHistogram[smemLocalHistogramPos] +=
shLocalHistogram[smemTargetHistogramPos];
}
__syncthreads();
}
for (unsigned int s = alignedBlockDimY >> 1; s > 0; s >>= 1) {
if (threadIdx.y < s && (threadIdx.y + s) < blockDim.y) {
smemTargetHistogramPos = __mul24((threadIdx.y + s), noHistogramBins) +
__mul24(threadIdx.z, blockDim.x) * blockDim.y + threadIdx.x;
shLocalHistogram[smemLocalHistogramPos] +=
shLocalHistogram[smemTargetHistogramPos];
}
__syncthreads();
}
for(unsigned int s = alignedBlockDimX >> 1; s > 0; s >>= 1) {
if (threadIdx.x < s && (threadIdx.x + s) < blockDim.x) {
smemTargetHistogramPos = __mul24(threadIdx.y, noHistogramBins) +
__mul24(threadIdx.z, blockDim.x) * blockDim.y + (threadIdx.x + s);
shLocalHistogram[smemLocalHistogramPos] +=
shLocalHistogram[smemTargetHistogramPos];
}
__syncthreads();
}
norm2 = sqrtf(shLocalHistogram[0]) + eps2;
localValue /= norm2;
blockHistograms[gmemWritePosBlock].x = localValue;
if (blockIdx.x == 10 && blockIdx.y == 8) {
int asasa;
asasa = 0;
asasa++;
}
}
| bfbb522704e1e332d3d8df732756f337ed3db9a0.cu | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include "cutil.h"
#include "HOGEngine.h"
#include "HOGUtils.h"
#include "HOGHistogram.h"
__device__ __constant__ float cenBound[3], halfBin[3], bandWidth[3],
oneHalf = 0.5f;
__device__ __constant__ int tvbin[3];
texture<float, 1, cudaReadModeElementType> texGauss;
cudaArray* gaussArray;
cudaChannelFormatDesc channelDescGauss;
float *hostWeights;
extern __shared__ float allShared[];
extern int rNoHistogramBins, rNoOfCellsX, rNoOfCellsY, rNoOfBlocksX,
rNoOfBlocksY, rNumberOfWindowsX, rNumberOfWindowsY;
// Stuff set during the InitHistograms function, but needed during allocation
static struct {
int cellSizeX, cellSizeY, blockSizeX, blockSizeY, noHistogramBins;
float wtscale;
float var2x, var2y;
float centerX, centerY;
int h_tvbin[3];
float h_cenBound[3], h_halfBin[3], h_bandWidth[3];
} initVars;
void HostAllocHOGHistogramMemory(void) {
int i, j;
float tx, ty;
int cellSizeX = initVars.cellSizeX;
int cellSizeY = initVars.cellSizeY;
int blockSizeX = initVars.blockSizeX;
int blockSizeY = initVars.blockSizeY;
cutilSafeCall(cudaMallocHost(&hostWeights, cellSizeX * blockSizeX *
cellSizeY * blockSizeY * sizeof(float)));
for (i = 0; i < cellSizeX * blockSizeX; i++) {
for (j = 0; j < cellSizeY * blockSizeY; j++) {
tx = i - initVars.centerX;
ty = j - initVars.centerY;
tx *= tx / initVars.var2x;
ty *= ty / initVars.var2y;
hostWeights[i + j * cellSizeX * blockSizeX] = exp(-(tx + ty));
}
}
}
void DeviceAllocHOGHistogramMemory(void) {
cutilSafeCall(cudaMallocArray(&gaussArray, &channelDescGauss,
initVars.cellSizeX * initVars.blockSizeX * initVars.cellSizeY *
initVars.blockSizeY, 1));
}
void CopyInHOGHistogram(void) {
cutilSafeCall(cudaMemcpyToArrayAsync(gaussArray, 0, 0, hostWeights,
sizeof(float) * initVars.cellSizeX * initVars.blockSizeX *
initVars.cellSizeY * initVars.blockSizeY, cudaMemcpyHostToDevice, stream));
cutilSafeCall(cudaMemcpyToSymbolAsync(cenBound, initVars.h_cenBound, 3 *
sizeof(float), 0, cudaMemcpyHostToDevice, stream));
cutilSafeCall(cudaMemcpyToSymbolAsync(halfBin, initVars.h_halfBin, 3 *
sizeof(float), 0, cudaMemcpyHostToDevice, stream));
cutilSafeCall(cudaMemcpyToSymbolAsync(bandWidth, initVars.h_bandWidth, 3 *
sizeof(float), 0, cudaMemcpyHostToDevice, stream));
cutilSafeCall(cudaMemcpyToSymbolAsync(tvbin, initVars.h_tvbin, 3 *
sizeof(int), 0, cudaMemcpyHostToDevice, stream));
cutilSafeCall(cudaStreamSynchronize(stream));
}
void HostFreeHOGHistogramMemory(void) {
cutilSafeCall(cudaFreeHost(hostWeights));
hostWeights = NULL;
}
void DeviceFreeHOGHistogramMemory(void) {
cutilSafeCall(cudaFreeArray(gaussArray));
gaussArray = NULL;
}
// wt scale == scale for weighting function span
void InitHistograms(int cellSizeX, int cellSizeY, int blockSizeX,
int blockSizeY, int noHistogramBins, float wtscale) {
initVars.cellSizeX = cellSizeX;
initVars.cellSizeY = cellSizeY;
initVars.blockSizeX = blockSizeX;
initVars.blockSizeY = blockSizeY;
initVars.var2x = cellSizeX * blockSizeX / (2 * wtscale);
initVars.var2y = cellSizeY * blockSizeY / (2 * wtscale);
initVars.var2x *= initVars.var2x * 2;
initVars.var2y *= initVars.var2y * 2;
initVars.centerX = cellSizeX * blockSizeX / 2.0f;
initVars.centerY = cellSizeY * blockSizeY / 2.0f;
channelDescGauss = cudaCreateChannelDesc<float>();
initVars.h_cenBound[0] = cellSizeX * blockSizeX / 2.0f;
initVars.h_cenBound[1] = cellSizeY * blockSizeY / 2.0f;
// TODO: Can be 360
initVars.h_cenBound[2] = 180 / 2.0f;
initVars.h_halfBin[0] = blockSizeX / 2.0f;
initVars.h_halfBin[1] = blockSizeY / 2.0f;
initVars.h_halfBin[2] = noHistogramBins / 2.0f;
initVars.h_bandWidth[0] = (float) cellSizeX;
initVars.h_bandWidth[0] = 1.0f / initVars.h_bandWidth[0];
initVars.h_bandWidth[1] = (float) cellSizeY;
initVars.h_bandWidth[1] = 1.0f / initVars.h_bandWidth[1];
// TODO: Can be 360
initVars.h_bandWidth[2] = 180.0f / (float) noHistogramBins;
initVars.h_bandWidth[2] = 1.0f / initVars.h_bandWidth[2];
initVars.h_tvbin[0] = blockSizeX;
initVars.h_tvbin[1] = blockSizeY;
initVars.h_tvbin[2] = noHistogramBins;
}
void CloseHistogram() {}
__global__ void computeBlockHistogramsWithGauss(float2* inputImage,
float1* blockHistograms, int noHistogramBins, int cellSizeX, int cellSizeY,
int blockSizeX, int blockSizeY, int leftoverX, int leftoverY, int width,
int height) {
int i;
float2 localValue;
float* shLocalHistograms = (float*) allShared;
int cellIdx = threadIdx.y;
int cellIdy = threadIdx.z;
int columnId = threadIdx.x;
int smemReadPos = __mul24(cellIdx, noHistogramBins) + __mul24(cellIdy,
blockSizeX) * noHistogramBins;
int gmemWritePos = __mul24(threadIdx.y, noHistogramBins) +
__mul24(threadIdx.z, gridDim.x) * __mul24(blockDim.y, noHistogramBins) +
__mul24(blockIdx.x, noHistogramBins) * blockDim.y + __mul24(blockIdx.y,
gridDim.x) * __mul24(blockDim.y, noHistogramBins) * blockDim.z;
int gmemReadStride = width;
int gmemReadPos = leftoverX + __mul24(leftoverY, gmemReadStride) +
(__mul24(blockIdx.x, cellSizeX) + __mul24(blockIdx.y, cellSizeY) *
gmemReadStride) + (columnId + __mul24(cellIdx, cellSizeX) +
__mul24(cellIdy, cellSizeY) * gmemReadStride);
int histogramSize = __mul24(noHistogramBins, blockSizeX) * blockSizeY;
int smemLocalHistogramPos = (columnId + __mul24(cellIdx, cellSizeX)) *
histogramSize + __mul24(cellIdy, histogramSize) * __mul24(blockSizeX,
cellSizeX);
int cmemReadPos = columnId + __mul24(cellIdx, cellSizeX) + __mul24(cellIdy,
cellSizeY) * __mul24(cellSizeX, blockSizeX);
float atx, aty;
float pIx, pIy, pIz;
int fIx, fIy, fIz;
int cIx, cIy, cIz;
float dx, dy, dz;
float cx, cy, cz;
bool lowervalidx, lowervalidy;
bool uppervalidx, uppervalidy;
bool canWrite;
int offset;
for (i = 0; i < histogramSize; i++) {
shLocalHistograms[smemLocalHistogramPos + i] = 0;
}
#ifdef UNROLL_LOOPS
int halfSizeYm1 = cellSizeY / 2 - 1;
#endif
//if (blockIdx.x == 5 && blockIdx.y == 4)
//{
// int asasa;
// asasa = 0;
// asasa++;
//}
for (i = 0; i < cellSizeY; i++) {
localValue = inputImage[gmemReadPos + i * gmemReadStride];
localValue.x *= tex1D(texGauss, cmemReadPos + i * cellSizeX * blockSizeX);
atx = cellIdx * cellSizeX + columnId + 0.5;
aty = cellIdy * cellSizeY + i + 0.5;
pIx = halfBin[0] - oneHalf + (atx - cenBound[0]) * bandWidth[0];
pIy = halfBin[1] - oneHalf + (aty - cenBound[1]) * bandWidth[1];
pIz = halfBin[2] - oneHalf + (localValue.y - cenBound[2]) * bandWidth[2];
fIx = floorf(pIx);
fIy = floorf(pIy);
fIz = floorf(pIz);
cIx = fIx + 1;
cIy = fIy + 1;
cIz = fIz + 1; //eq ceilf(pI.)
dx = pIx - fIx;
dy = pIy - fIy;
dz = pIz - fIz;
cx = 1 - dx;
cy = 1 - dy;
cz = 1 - dz;
cIz %= tvbin[2];
fIz %= tvbin[2];
if (fIz < 0) fIz += tvbin[2];
if (cIz < 0) cIz += tvbin[2];
#ifdef UNROLL_LOOPS
if ((i & halfSizeYm1) == 0)
#endif
{
uppervalidx = !(cIx >= tvbin[0] - oneHalf || cIx < -oneHalf);
uppervalidy = !(cIy >= tvbin[1] - oneHalf || cIy < -oneHalf);
lowervalidx = !(fIx < -oneHalf || fIx >= tvbin[0] - oneHalf);
lowervalidy = !(fIy < -oneHalf || fIy >= tvbin[1] - oneHalf);
}
canWrite = lowervalidx && lowervalidy;
if (canWrite) {
offset = smemLocalHistogramPos + (fIx + fIy * blockSizeY) *
noHistogramBins;
shLocalHistograms[offset + fIz] += localValue.x * cx * cy * cz;
shLocalHistograms[offset + cIz] += localValue.x * cx * cy * dz;
}
canWrite = lowervalidx && uppervalidy;
if (canWrite) {
offset = smemLocalHistogramPos + (fIx + cIy * blockSizeY) *
noHistogramBins;
shLocalHistograms[offset + fIz] += localValue.x * cx * dy * cz;
shLocalHistograms[offset + cIz] += localValue.x * cx * dy * dz;
}
canWrite = uppervalidx && lowervalidy;
if (canWrite) {
offset = smemLocalHistogramPos + (cIx + fIy * blockSizeY) *
noHistogramBins;
shLocalHistograms[offset + fIz] += localValue.x * dx * cy * cz;
shLocalHistograms[offset + cIz] += localValue.x * dx * cy * dz;
}
canWrite = (uppervalidx) && (uppervalidy);
if (canWrite) {
offset = smemLocalHistogramPos + (cIx + cIy * blockSizeY) *
noHistogramBins;
shLocalHistograms[offset + fIz] += localValue.x * dx * dy * cz;
shLocalHistograms[offset + cIz] += localValue.x * dx * dy * dz;
}
}
__syncthreads();
//TODO -> aligned block size * cell size
int smemTargetHistogramPos;
for (unsigned int s = blockSizeY >> 1; s > 0; s >>= 1) {
if (cellIdy < s && (cellIdy + s) < blockSizeY) {
smemTargetHistogramPos = (columnId + __mul24(cellIdx, cellSizeX)) *
histogramSize + __mul24((cellIdy + s), histogramSize) *
__mul24(blockSizeX, cellSizeX);
#ifdef UNROLL_LOOPS
shLocalHistograms[smemLocalHistogramPos + 0] += shLocalHistograms[smemTargetHistogramPos + 0];
shLocalHistograms[smemLocalHistogramPos + 1] += shLocalHistograms[smemTargetHistogramPos + 1];
shLocalHistograms[smemLocalHistogramPos + 2] += shLocalHistograms[smemTargetHistogramPos + 2];
shLocalHistograms[smemLocalHistogramPos + 3] += shLocalHistograms[smemTargetHistogramPos + 3];
shLocalHistograms[smemLocalHistogramPos + 4] += shLocalHistograms[smemTargetHistogramPos + 4];
shLocalHistograms[smemLocalHistogramPos + 5] += shLocalHistograms[smemTargetHistogramPos + 5];
shLocalHistograms[smemLocalHistogramPos + 6] += shLocalHistograms[smemTargetHistogramPos + 6];
shLocalHistograms[smemLocalHistogramPos + 7] += shLocalHistograms[smemTargetHistogramPos + 7];
shLocalHistograms[smemLocalHistogramPos + 8] += shLocalHistograms[smemTargetHistogramPos + 8];
shLocalHistograms[smemLocalHistogramPos + 9] += shLocalHistograms[smemTargetHistogramPos + 9];
shLocalHistograms[smemLocalHistogramPos + 10] += shLocalHistograms[smemTargetHistogramPos + 10];
shLocalHistograms[smemLocalHistogramPos + 11] += shLocalHistograms[smemTargetHistogramPos + 11];
shLocalHistograms[smemLocalHistogramPos + 12] += shLocalHistograms[smemTargetHistogramPos + 12];
shLocalHistograms[smemLocalHistogramPos + 13] += shLocalHistograms[smemTargetHistogramPos + 13];
shLocalHistograms[smemLocalHistogramPos + 14] += shLocalHistograms[smemTargetHistogramPos + 14];
shLocalHistograms[smemLocalHistogramPos + 15] += shLocalHistograms[smemTargetHistogramPos + 15];
shLocalHistograms[smemLocalHistogramPos + 16] += shLocalHistograms[smemTargetHistogramPos + 16];
shLocalHistograms[smemLocalHistogramPos + 17] += shLocalHistograms[smemTargetHistogramPos + 17];
shLocalHistograms[smemLocalHistogramPos + 18] += shLocalHistograms[smemTargetHistogramPos + 18];
shLocalHistograms[smemLocalHistogramPos + 19] += shLocalHistograms[smemTargetHistogramPos + 19];
shLocalHistograms[smemLocalHistogramPos + 20] += shLocalHistograms[smemTargetHistogramPos + 20];
shLocalHistograms[smemLocalHistogramPos + 21] += shLocalHistograms[smemTargetHistogramPos + 21];
shLocalHistograms[smemLocalHistogramPos + 22] += shLocalHistograms[smemTargetHistogramPos + 22];
shLocalHistograms[smemLocalHistogramPos + 23] += shLocalHistograms[smemTargetHistogramPos + 23];
shLocalHistograms[smemLocalHistogramPos + 24] += shLocalHistograms[smemTargetHistogramPos + 24];
shLocalHistograms[smemLocalHistogramPos + 25] += shLocalHistograms[smemTargetHistogramPos + 25];
shLocalHistograms[smemLocalHistogramPos + 26] += shLocalHistograms[smemTargetHistogramPos + 26];
shLocalHistograms[smemLocalHistogramPos + 27] += shLocalHistograms[smemTargetHistogramPos + 27];
shLocalHistograms[smemLocalHistogramPos + 28] += shLocalHistograms[smemTargetHistogramPos + 28];
shLocalHistograms[smemLocalHistogramPos + 29] += shLocalHistograms[smemTargetHistogramPos + 29];
shLocalHistograms[smemLocalHistogramPos + 30] += shLocalHistograms[smemTargetHistogramPos + 30];
shLocalHistograms[smemLocalHistogramPos + 31] += shLocalHistograms[smemTargetHistogramPos + 31];
shLocalHistograms[smemLocalHistogramPos + 32] += shLocalHistograms[smemTargetHistogramPos + 32];
shLocalHistograms[smemLocalHistogramPos + 33] += shLocalHistograms[smemTargetHistogramPos + 33];
shLocalHistograms[smemLocalHistogramPos + 34] += shLocalHistograms[smemTargetHistogramPos + 34];
shLocalHistograms[smemLocalHistogramPos + 35] += shLocalHistograms[smemTargetHistogramPos + 35];
#else
for (i = 0; i < histogramSize; i++) {
shLocalHistograms[smemLocalHistogramPos + i] +=
shLocalHistograms[smemTargetHistogramPos + i];
}
#endif
}
__syncthreads();
}
for (unsigned int s = blockSizeX >> 1; s > 0; s >>= 1) {
if (cellIdx < s && (cellIdx + s) < blockSizeX) {
smemTargetHistogramPos = (columnId + __mul24((cellIdx + s), cellSizeX)) *
histogramSize + __mul24(cellIdy, histogramSize) * __mul24(blockSizeX,
cellSizeX);
#ifdef UNROLL_LOOPS
shLocalHistograms[smemLocalHistogramPos + 0] += shLocalHistograms[smemTargetHistogramPos + 0];
shLocalHistograms[smemLocalHistogramPos + 1] += shLocalHistograms[smemTargetHistogramPos + 1];
shLocalHistograms[smemLocalHistogramPos + 2] += shLocalHistograms[smemTargetHistogramPos + 2];
shLocalHistograms[smemLocalHistogramPos + 3] += shLocalHistograms[smemTargetHistogramPos + 3];
shLocalHistograms[smemLocalHistogramPos + 4] += shLocalHistograms[smemTargetHistogramPos + 4];
shLocalHistograms[smemLocalHistogramPos + 5] += shLocalHistograms[smemTargetHistogramPos + 5];
shLocalHistograms[smemLocalHistogramPos + 6] += shLocalHistograms[smemTargetHistogramPos + 6];
shLocalHistograms[smemLocalHistogramPos + 7] += shLocalHistograms[smemTargetHistogramPos + 7];
shLocalHistograms[smemLocalHistogramPos + 8] += shLocalHistograms[smemTargetHistogramPos + 8];
shLocalHistograms[smemLocalHistogramPos + 9] += shLocalHistograms[smemTargetHistogramPos + 9];
shLocalHistograms[smemLocalHistogramPos + 10] += shLocalHistograms[smemTargetHistogramPos + 10];
shLocalHistograms[smemLocalHistogramPos + 11] += shLocalHistograms[smemTargetHistogramPos + 11];
shLocalHistograms[smemLocalHistogramPos + 12] += shLocalHistograms[smemTargetHistogramPos + 12];
shLocalHistograms[smemLocalHistogramPos + 13] += shLocalHistograms[smemTargetHistogramPos + 13];
shLocalHistograms[smemLocalHistogramPos + 14] += shLocalHistograms[smemTargetHistogramPos + 14];
shLocalHistograms[smemLocalHistogramPos + 15] += shLocalHistograms[smemTargetHistogramPos + 15];
shLocalHistograms[smemLocalHistogramPos + 16] += shLocalHistograms[smemTargetHistogramPos + 16];
shLocalHistograms[smemLocalHistogramPos + 17] += shLocalHistograms[smemTargetHistogramPos + 17];
shLocalHistograms[smemLocalHistogramPos + 18] += shLocalHistograms[smemTargetHistogramPos + 18];
shLocalHistograms[smemLocalHistogramPos + 19] += shLocalHistograms[smemTargetHistogramPos + 19];
shLocalHistograms[smemLocalHistogramPos + 20] += shLocalHistograms[smemTargetHistogramPos + 20];
shLocalHistograms[smemLocalHistogramPos + 21] += shLocalHistograms[smemTargetHistogramPos + 21];
shLocalHistograms[smemLocalHistogramPos + 22] += shLocalHistograms[smemTargetHistogramPos + 22];
shLocalHistograms[smemLocalHistogramPos + 23] += shLocalHistograms[smemTargetHistogramPos + 23];
shLocalHistograms[smemLocalHistogramPos + 24] += shLocalHistograms[smemTargetHistogramPos + 24];
shLocalHistograms[smemLocalHistogramPos + 25] += shLocalHistograms[smemTargetHistogramPos + 25];
shLocalHistograms[smemLocalHistogramPos + 26] += shLocalHistograms[smemTargetHistogramPos + 26];
shLocalHistograms[smemLocalHistogramPos + 27] += shLocalHistograms[smemTargetHistogramPos + 27];
shLocalHistograms[smemLocalHistogramPos + 28] += shLocalHistograms[smemTargetHistogramPos + 28];
shLocalHistograms[smemLocalHistogramPos + 29] += shLocalHistograms[smemTargetHistogramPos + 29];
shLocalHistograms[smemLocalHistogramPos + 30] += shLocalHistograms[smemTargetHistogramPos + 30];
shLocalHistograms[smemLocalHistogramPos + 31] += shLocalHistograms[smemTargetHistogramPos + 31];
shLocalHistograms[smemLocalHistogramPos + 32] += shLocalHistograms[smemTargetHistogramPos + 32];
shLocalHistograms[smemLocalHistogramPos + 33] += shLocalHistograms[smemTargetHistogramPos + 33];
shLocalHistograms[smemLocalHistogramPos + 34] += shLocalHistograms[smemTargetHistogramPos + 34];
shLocalHistograms[smemLocalHistogramPos + 35] += shLocalHistograms[smemTargetHistogramPos + 35];
#else
for (i = 0; i < histogramSize; i++) {
shLocalHistograms[smemLocalHistogramPos + i] +=
shLocalHistograms[smemTargetHistogramPos + i];
}
#endif
}
__syncthreads();
}
for (unsigned int s = cellSizeX >> 1; s > 0; s >>= 1) {
if (columnId < s && (columnId + s) < cellSizeX) {
smemTargetHistogramPos = (columnId + s + __mul24(cellIdx, cellSizeX)) *
histogramSize + __mul24(cellIdy, histogramSize) * __mul24(blockSizeX,
cellSizeX);
#ifdef UNROLL_LOOPS
shLocalHistograms[smemLocalHistogramPos + 0] += shLocalHistograms[smemTargetHistogramPos + 0];
shLocalHistograms[smemLocalHistogramPos + 1] += shLocalHistograms[smemTargetHistogramPos + 1];
shLocalHistograms[smemLocalHistogramPos + 2] += shLocalHistograms[smemTargetHistogramPos + 2];
shLocalHistograms[smemLocalHistogramPos + 3] += shLocalHistograms[smemTargetHistogramPos + 3];
shLocalHistograms[smemLocalHistogramPos + 4] += shLocalHistograms[smemTargetHistogramPos + 4];
shLocalHistograms[smemLocalHistogramPos + 5] += shLocalHistograms[smemTargetHistogramPos + 5];
shLocalHistograms[smemLocalHistogramPos + 6] += shLocalHistograms[smemTargetHistogramPos + 6];
shLocalHistograms[smemLocalHistogramPos + 7] += shLocalHistograms[smemTargetHistogramPos + 7];
shLocalHistograms[smemLocalHistogramPos + 8] += shLocalHistograms[smemTargetHistogramPos + 8];
shLocalHistograms[smemLocalHistogramPos + 9] += shLocalHistograms[smemTargetHistogramPos + 9];
shLocalHistograms[smemLocalHistogramPos + 10] += shLocalHistograms[smemTargetHistogramPos + 10];
shLocalHistograms[smemLocalHistogramPos + 11] += shLocalHistograms[smemTargetHistogramPos + 11];
shLocalHistograms[smemLocalHistogramPos + 12] += shLocalHistograms[smemTargetHistogramPos + 12];
shLocalHistograms[smemLocalHistogramPos + 13] += shLocalHistograms[smemTargetHistogramPos + 13];
shLocalHistograms[smemLocalHistogramPos + 14] += shLocalHistograms[smemTargetHistogramPos + 14];
shLocalHistograms[smemLocalHistogramPos + 15] += shLocalHistograms[smemTargetHistogramPos + 15];
shLocalHistograms[smemLocalHistogramPos + 16] += shLocalHistograms[smemTargetHistogramPos + 16];
shLocalHistograms[smemLocalHistogramPos + 17] += shLocalHistograms[smemTargetHistogramPos + 17];
shLocalHistograms[smemLocalHistogramPos + 18] += shLocalHistograms[smemTargetHistogramPos + 18];
shLocalHistograms[smemLocalHistogramPos + 19] += shLocalHistograms[smemTargetHistogramPos + 19];
shLocalHistograms[smemLocalHistogramPos + 20] += shLocalHistograms[smemTargetHistogramPos + 20];
shLocalHistograms[smemLocalHistogramPos + 21] += shLocalHistograms[smemTargetHistogramPos + 21];
shLocalHistograms[smemLocalHistogramPos + 22] += shLocalHistograms[smemTargetHistogramPos + 22];
shLocalHistograms[smemLocalHistogramPos + 23] += shLocalHistograms[smemTargetHistogramPos + 23];
shLocalHistograms[smemLocalHistogramPos + 24] += shLocalHistograms[smemTargetHistogramPos + 24];
shLocalHistograms[smemLocalHistogramPos + 25] += shLocalHistograms[smemTargetHistogramPos + 25];
shLocalHistograms[smemLocalHistogramPos + 26] += shLocalHistograms[smemTargetHistogramPos + 26];
shLocalHistograms[smemLocalHistogramPos + 27] += shLocalHistograms[smemTargetHistogramPos + 27];
shLocalHistograms[smemLocalHistogramPos + 28] += shLocalHistograms[smemTargetHistogramPos + 28];
shLocalHistograms[smemLocalHistogramPos + 29] += shLocalHistograms[smemTargetHistogramPos + 29];
shLocalHistograms[smemLocalHistogramPos + 30] += shLocalHistograms[smemTargetHistogramPos + 30];
shLocalHistograms[smemLocalHistogramPos + 31] += shLocalHistograms[smemTargetHistogramPos + 31];
shLocalHistograms[smemLocalHistogramPos + 32] += shLocalHistograms[smemTargetHistogramPos + 32];
shLocalHistograms[smemLocalHistogramPos + 33] += shLocalHistograms[smemTargetHistogramPos + 33];
shLocalHistograms[smemLocalHistogramPos + 34] += shLocalHistograms[smemTargetHistogramPos + 34];
shLocalHistograms[smemLocalHistogramPos + 35] += shLocalHistograms[smemTargetHistogramPos + 35];
#else
for (i = 0; i < histogramSize; i++) {
shLocalHistograms[smemLocalHistogramPos + i] +=
shLocalHistograms[smemTargetHistogramPos + i];
}
#endif
}
__syncthreads();
}
if (columnId == 0) {
//write result to gmem
#ifdef UNROLL_LOOPS
blockHistograms[gmemWritePos + 0].x = shLocalHistograms[smemReadPos + 0];
blockHistograms[gmemWritePos + 1].x = shLocalHistograms[smemReadPos + 1];
blockHistograms[gmemWritePos + 2].x = shLocalHistograms[smemReadPos + 2];
blockHistograms[gmemWritePos + 3].x = shLocalHistograms[smemReadPos + 3];
blockHistograms[gmemWritePos + 4].x = shLocalHistograms[smemReadPos + 4];
blockHistograms[gmemWritePos + 5].x = shLocalHistograms[smemReadPos + 5];
blockHistograms[gmemWritePos + 6].x = shLocalHistograms[smemReadPos + 6];
blockHistograms[gmemWritePos + 7].x = shLocalHistograms[smemReadPos + 7];
blockHistograms[gmemWritePos + 8].x = shLocalHistograms[smemReadPos + 8];
#else
for (i=0; i<noHistogramBins; i++) {
blockHistograms[gmemWritePos + i].x = shLocalHistograms[smemReadPos + i];
}
#endif
}
if (blockIdx.x == 10 && blockIdx.y == 8) {
int asasa;
asasa = 0;
asasa++;
}
}
void ComputeBlockHistogramsWithGauss(float2* inputImage,
float1* blockHistograms, int noHistogramBins, int cellSizeX, int cellSizeY,
int blockSizeX, int blockSizeY, int windowSizeX, int windowSizeY,
int width, int height) {
int leftoverX;
int leftoverY;
dim3 hThreadSize, hBlockSize;
rNoOfCellsX = width / cellSizeX;
rNoOfCellsY = height / cellSizeY;
rNoOfBlocksX = rNoOfCellsX - blockSizeX + 1;
rNoOfBlocksY = rNoOfCellsY - blockSizeY + 1;
rNumberOfWindowsX = (width-windowSizeX)/cellSizeX + 1;
rNumberOfWindowsY = (height-windowSizeY)/cellSizeY + 1;
leftoverX = (width - windowSizeX - cellSizeX * (rNumberOfWindowsX - 1)) / 2;
leftoverY = (height - windowSizeY - cellSizeY * (rNumberOfWindowsY - 1)) / 2;
hThreadSize = dim3(cellSizeX, blockSizeX, blockSizeY);
hBlockSize = dim3(rNoOfBlocksX, rNoOfBlocksY);
cutilSafeCall(cudaBindTextureToArray(texGauss, gaussArray, channelDescGauss));
computeBlockHistogramsWithGauss<<<hBlockSize, hThreadSize, noHistogramBins *
blockSizeX * blockSizeY * cellSizeX * blockSizeY * blockSizeX *
sizeof(float), stream>>>(inputImage, blockHistograms, noHistogramBins,
cellSizeX, cellSizeY, blockSizeX, blockSizeY, leftoverX, leftoverY, width,
height);
cutilSafeCall(cudaStreamSynchronize(stream));
cutilSafeCall(cudaUnbindTexture(texGauss));
}
void NormalizeBlockHistograms(float1* blockHistograms, int noHistogramBins,
int cellSizeX, int cellSizeY, int blockSizeX, int blockSizeY, int width,
int height) {
dim3 hThreadSize, hBlockSize;
rNoOfCellsX = width / cellSizeX;
rNoOfCellsY = height / cellSizeY;
rNoOfBlocksX = rNoOfCellsX - blockSizeX + 1;
rNoOfBlocksY = rNoOfCellsY - blockSizeY + 1;
hThreadSize = dim3(noHistogramBins, blockSizeX, blockSizeY);
hBlockSize = dim3(rNoOfBlocksX, rNoOfBlocksY);
int alignedBlockDimX = iClosestPowerOfTwo(noHistogramBins);
int alignedBlockDimY = iClosestPowerOfTwo(blockSizeX);
int alignedBlockDimZ = iClosestPowerOfTwo(blockSizeY);
normalizeBlockHistograms<<<hBlockSize, hThreadSize, noHistogramBins *
blockSizeX * blockSizeY * sizeof(float), stream>>>(blockHistograms,
noHistogramBins, rNoOfBlocksX, rNoOfBlocksY, blockSizeX, blockSizeY,
alignedBlockDimX, alignedBlockDimY, alignedBlockDimZ, noHistogramBins *
rNoOfCellsX, rNoOfCellsY);
cutilSafeCall(cudaStreamSynchronize(stream));
}
__global__ void normalizeBlockHistograms(float1 *blockHistograms,
int noHistogramBins, int rNoOfHOGBlocksX, int rNoOfHOGBlocksY,
int blockSizeX, int blockSizeY, int alignedBlockDimX, int alignedBlockDimY,
int alignedBlockDimZ, int width, int height) {
int smemLocalHistogramPos, smemTargetHistogramPos, gmemPosBlock,
gmemWritePosBlock;
float* shLocalHistogram = (float*) allShared;
float localValue, norm1, norm2;
float eps2 = 0.01f;
smemLocalHistogramPos = __mul24(threadIdx.y, noHistogramBins) +
__mul24(threadIdx.z, blockDim.x) * blockDim.y + threadIdx.x;
gmemPosBlock = __mul24(threadIdx.y, noHistogramBins) + __mul24(threadIdx.z,
gridDim.x) * __mul24(blockDim.y, blockDim.x) + threadIdx.x +
__mul24(blockIdx.x, noHistogramBins) * blockDim.y + __mul24(blockIdx.y,
gridDim.x) * __mul24(blockDim.y, blockDim.x) * blockDim.z;
gmemWritePosBlock = __mul24(threadIdx.z, noHistogramBins) +
__mul24(threadIdx.y, gridDim.x) * __mul24(blockDim.y, blockDim.x) +
threadIdx.x + __mul24(blockIdx.x, noHistogramBins) * blockDim.y +
__mul24(blockIdx.y, gridDim.x) * __mul24(blockDim.y, blockDim.x) *
blockDim.z;
localValue = blockHistograms[gmemPosBlock].x;
shLocalHistogram[smemLocalHistogramPos] = localValue * localValue;
if (blockIdx.x == 10 && blockIdx.y == 8) {
int asasa;
asasa = 0;
asasa++;
}
__syncthreads();
for(unsigned int s = alignedBlockDimZ >> 1; s > 0; s >>= 1) {
if (threadIdx.z < s && (threadIdx.z + s) < blockDim.z) {
smemTargetHistogramPos = __mul24(threadIdx.y, noHistogramBins) +
__mul24((threadIdx.z + s), blockDim.x) * blockDim.y + threadIdx.x;
shLocalHistogram[smemLocalHistogramPos] +=
shLocalHistogram[smemTargetHistogramPos];
}
__syncthreads();
}
for (unsigned int s = alignedBlockDimY >> 1; s > 0; s >>= 1) {
if (threadIdx.y < s && (threadIdx.y + s) < blockDim.y) {
smemTargetHistogramPos = __mul24((threadIdx.y + s), noHistogramBins) +
__mul24(threadIdx.z, blockDim.x) * blockDim.y + threadIdx.x;
shLocalHistogram[smemLocalHistogramPos] +=
shLocalHistogram[smemTargetHistogramPos];
}
__syncthreads();
}
for(unsigned int s = alignedBlockDimX >> 1; s > 0; s >>= 1) {
if (threadIdx.x < s && (threadIdx.x + s) < blockDim.x) {
smemTargetHistogramPos = __mul24(threadIdx.y, noHistogramBins) +
__mul24(threadIdx.z, blockDim.x) * blockDim.y + (threadIdx.x + s);
shLocalHistogram[smemLocalHistogramPos] +=
shLocalHistogram[smemTargetHistogramPos];
}
__syncthreads();
}
norm1 = sqrtf(shLocalHistogram[0]) + __mul24(noHistogramBins, blockSizeX) *
blockSizeY;
localValue /= norm1;
localValue = fminf(0.2f, localValue); //why 0.2 ??
__syncthreads();
shLocalHistogram[smemLocalHistogramPos] = localValue * localValue;
__syncthreads();
for(unsigned int s = alignedBlockDimZ >> 1; s > 0; s >>= 1) {
if (threadIdx.z < s && (threadIdx.z + s) < blockDim.z) {
smemTargetHistogramPos = __mul24(threadIdx.y, noHistogramBins) +
__mul24((threadIdx.z + s), blockDim.x) * blockDim.y + threadIdx.x;
shLocalHistogram[smemLocalHistogramPos] +=
shLocalHistogram[smemTargetHistogramPos];
}
__syncthreads();
}
for (unsigned int s = alignedBlockDimY >> 1; s > 0; s >>= 1) {
if (threadIdx.y < s && (threadIdx.y + s) < blockDim.y) {
smemTargetHistogramPos = __mul24((threadIdx.y + s), noHistogramBins) +
__mul24(threadIdx.z, blockDim.x) * blockDim.y + threadIdx.x;
shLocalHistogram[smemLocalHistogramPos] +=
shLocalHistogram[smemTargetHistogramPos];
}
__syncthreads();
}
for(unsigned int s = alignedBlockDimX >> 1; s > 0; s >>= 1) {
if (threadIdx.x < s && (threadIdx.x + s) < blockDim.x) {
smemTargetHistogramPos = __mul24(threadIdx.y, noHistogramBins) +
__mul24(threadIdx.z, blockDim.x) * blockDim.y + (threadIdx.x + s);
shLocalHistogram[smemLocalHistogramPos] +=
shLocalHistogram[smemTargetHistogramPos];
}
__syncthreads();
}
norm2 = sqrtf(shLocalHistogram[0]) + eps2;
localValue /= norm2;
blockHistograms[gmemWritePosBlock].x = localValue;
if (blockIdx.x == 10 && blockIdx.y == 8) {
int asasa;
asasa = 0;
asasa++;
}
}
|
36437e319db97b82cef775e7610f20f3b9c48ebd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright 2020-2021 by XGBoost Contributors
*/
#include <thrust/iterator/transform_iterator.h>
#include <thrust/reduce.h>
#include <algorithm>
#include <ctgmath>
#include <limits>
#include "../../common/device_helpers.cuh"
#include "../../data/ellpack_page.cuh"
#include "histogram.cuh"
#include "row_partitioner_hip.cuh"
#include "xgboost/base.h"
namespace xgboost {
namespace tree {
// Following 2 functions are slightly modified version of fbcuda.
/* \brief Constructs a rounding factor used to truncate elements in a sum such that the
sum of the truncated elements is the same no matter what the order of the sum is.
* Algorithm 5: Reproducible Sequential Sum in 'Fast Reproducible Floating-Point
* Summation' by Demmel and Nguyen
* In algorithm 5 the bound is calculated as $max(|v_i|) * n$. Here we use the bound
*
* \begin{equation}
* max( fl(\sum^{V}_{v_i>0}{v_i}), fl(\sum^{V}_{v_i<0}|v_i|) )
* \end{equation}
*
* to avoid outliers, as the full reduction is reproducible on GPU with reduction tree.
*/
template <typename T>
T CreateRoundingFactor(T max_abs, int n) {
T delta = max_abs / (static_cast<T>(1.0) - 2 * n * std::numeric_limits<T>::epsilon());
// Calculate ceil(log_2(delta)).
// frexpf() calculates exp and returns `x` such that
// delta = x * 2^exp, where `x` in (-1.0, -0.5] U [0.5, 1).
// Because |x| < 1, exp is exactly ceil(log_2(delta)).
int exp;
::frexp(delta, &exp);
// return M = 2 ^ ceil(log_2(delta))
return std::ldexp(static_cast<T>(1.0), exp);
}
namespace {
struct Pair {
GradientPair first;
GradientPair second;
};
__host__ XGBOOST_DEV_INLINE Pair operator+(Pair const& lhs, Pair const& rhs) {
return {lhs.first + rhs.first, lhs.second + rhs.second};
}
} // anonymous namespace
struct Clip : public thrust::unary_function<GradientPair, Pair> {
static XGBOOST_DEV_INLINE float Pclip(float v) { return v > 0 ? v : 0; }
static XGBOOST_DEV_INLINE float Nclip(float v) { return v < 0 ? abs(v) : 0; }
XGBOOST_DEV_INLINE Pair operator()(GradientPair x) const {
auto pg = Pclip(x.GetGrad());
auto ph = Pclip(x.GetHess());
auto ng = Nclip(x.GetGrad());
auto nh = Nclip(x.GetHess());
return {GradientPair{pg, ph}, GradientPair{ng, nh}};
}
};
GradientQuantizer::GradientQuantizer(common::Span<GradientPair const> gpair) {
using GradientSumT = GradientPairPrecise;
using T = typename GradientSumT::ValueT;
dh::XGBCachingDeviceAllocator<char> alloc;
thrust::device_ptr<GradientPair const> gpair_beg{gpair.data()};
auto beg = thrust::make_transform_iterator(gpair_beg, Clip());
Pair p =
dh::Reduce(thrust::hip::par(alloc), beg, beg + gpair.size(), Pair{}, thrust::plus<Pair>{});
// Treat pair as array of 4 primitive types to allreduce
using ReduceT = typename decltype(p.first)::ValueT;
static_assert(sizeof(Pair) == sizeof(ReduceT) * 4, "Expected to reduce four elements.");
collective::Allreduce<collective::Operation::kSum>(reinterpret_cast<ReduceT*>(&p), 4);
GradientPair positive_sum{p.first}, negative_sum{p.second};
std::size_t total_rows = gpair.size();
collective::Allreduce<collective::Operation::kSum>(&total_rows, 1);
auto histogram_rounding = GradientSumT{
CreateRoundingFactor<T>(::max(positive_sum.GetGrad(), negative_sum.GetGrad()), total_rows),
CreateRoundingFactor<T>(::max(positive_sum.GetHess(), negative_sum.GetHess()),
total_rows)};
using IntT = typename GradientPairInt64::ValueT;
/**
* Factor for converting gradients from fixed-point to floating-point.
*/
to_floating_point_ =
histogram_rounding /
T(IntT(1) << (sizeof(typename GradientSumT::ValueT) * 8 - 2)); // keep 1 for sign bit
/**
* Factor for converting gradients from floating-point to fixed-point. For
* f64:
*
* Precision = 64 - 1 - log2(rounding)
*
* rounding is calcuated as exp(m), see the rounding factor calcuation for
* details.
*/
to_fixed_point_ =
GradientSumT(T(1) / to_floating_point_.GetGrad(), T(1) / to_floating_point_.GetHess());
}
XGBOOST_DEV_INLINE void
AtomicAddGpairShared(xgboost::GradientPairInt64 *dest,
xgboost::GradientPairInt64 const &gpair) {
auto dst_ptr = reinterpret_cast<int64_t *>(dest);
auto g = gpair.GetQuantisedGrad();
auto h = gpair.GetQuantisedHess();
AtomicAdd64As32(dst_ptr, g);
AtomicAdd64As32(dst_ptr + 1, h);
}
// Global 64 bit integer atomics at the time of writing do not benefit from being separated into two
// 32 bit atomics
XGBOOST_DEV_INLINE void AtomicAddGpairGlobal(xgboost::GradientPairInt64* dest,
xgboost::GradientPairInt64 const& gpair) {
auto dst_ptr = reinterpret_cast<uint64_t*>(dest);
auto g = gpair.GetQuantisedGrad();
auto h = gpair.GetQuantisedHess();
atomicAdd(dst_ptr,
*reinterpret_cast<uint64_t*>(&g));
atomicAdd(dst_ptr + 1,
*reinterpret_cast<uint64_t*>(&h));
}
template <int kBlockThreads, int kItemsPerThread,
int kItemsPerTile = kBlockThreads* kItemsPerThread>
class HistogramAgent {
GradientPairInt64* smem_arr_;
GradientPairInt64* d_node_hist_;
dh::LDGIterator<const RowPartitioner::RowIndexT> d_ridx_;
const GradientPair* d_gpair_;
const FeatureGroup group_;
const EllpackDeviceAccessor& matrix_;
const int feature_stride_;
const std::size_t n_elements_;
const GradientQuantizer& rounding_;
public:
__device__ HistogramAgent(GradientPairInt64* smem_arr,
GradientPairInt64* __restrict__ d_node_hist, const FeatureGroup& group,
const EllpackDeviceAccessor& matrix,
common::Span<const RowPartitioner::RowIndexT> d_ridx,
const GradientQuantizer& rounding, const GradientPair* d_gpair)
: smem_arr_(smem_arr),
d_node_hist_(d_node_hist),
d_ridx_(d_ridx.data()),
group_(group),
matrix_(matrix),
feature_stride_(matrix.is_dense ? group.num_features : matrix.row_stride),
n_elements_(feature_stride_ * d_ridx.size()),
rounding_(rounding),
d_gpair_(d_gpair) {}
__device__ void ProcessPartialTileShared(std::size_t offset) {
for (std::size_t idx = offset + threadIdx.x;
idx < min(offset + kBlockThreads * kItemsPerTile, n_elements_); idx += kBlockThreads) {
int ridx = d_ridx_[idx / feature_stride_];
int gidx =
matrix_
.gidx_iter[ridx * matrix_.row_stride + group_.start_feature + idx % feature_stride_] -
group_.start_bin;
if (matrix_.is_dense || gidx != matrix_.NumBins()) {
auto adjusted = rounding_.ToFixedPoint(d_gpair_[ridx]);
AtomicAddGpairShared(smem_arr_ + gidx, adjusted);
}
}
}
// Instruction level parallelism by loop unrolling
// Allows the kernel to pipeline many operations while waiting for global memory
// Increases the throughput of this kernel significantly
__device__ void ProcessFullTileShared(std::size_t offset) {
std::size_t idx[kItemsPerThread];
int ridx[kItemsPerThread];
int gidx[kItemsPerThread];
GradientPair gpair[kItemsPerThread];
#pragma unroll
for (int i = 0; i < kItemsPerThread; i++) {
idx[i] = offset + i * kBlockThreads + threadIdx.x;
}
#pragma unroll
for (int i = 0; i < kItemsPerThread; i++) {
ridx[i] = d_ridx_[idx[i] / feature_stride_];
}
#pragma unroll
for (int i = 0; i < kItemsPerThread; i++) {
gpair[i] = d_gpair_[ridx[i]];
gidx[i] = matrix_.gidx_iter[ridx[i] * matrix_.row_stride + group_.start_feature +
idx[i] % feature_stride_];
}
#pragma unroll
for (int i = 0; i < kItemsPerThread; i++) {
if ((matrix_.is_dense || gidx[i] != matrix_.NumBins())) {
auto adjusted = rounding_.ToFixedPoint(gpair[i]);
AtomicAddGpairShared(smem_arr_ + gidx[i] - group_.start_bin, adjusted);
}
}
}
__device__ void BuildHistogramWithShared() {
dh::BlockFill(smem_arr_, group_.num_bins, GradientPairInt64());
__syncthreads();
std::size_t offset = blockIdx.x * kItemsPerTile;
while (offset + kItemsPerTile <= n_elements_) {
ProcessFullTileShared(offset);
offset += kItemsPerTile * gridDim.x;
}
ProcessPartialTileShared(offset);
// Write shared memory back to global memory
__syncthreads();
for (auto i : dh::BlockStrideRange(0, group_.num_bins)) {
AtomicAddGpairGlobal(d_node_hist_ + group_.start_bin + i, smem_arr_[i]);
}
}
__device__ void BuildHistogramWithGlobal() {
for (auto idx : dh::GridStrideRange(static_cast<std::size_t>(0), n_elements_)) {
int ridx = d_ridx_[idx / feature_stride_];
int gidx =
matrix_
.gidx_iter[ridx * matrix_.row_stride + group_.start_feature + idx % feature_stride_];
if (matrix_.is_dense || gidx != matrix_.NumBins()) {
auto adjusted = rounding_.ToFixedPoint(d_gpair_[ridx]);
AtomicAddGpairGlobal(d_node_hist_ + gidx, adjusted);
}
}
}
};
template <bool use_shared_memory_histograms, int kBlockThreads,
int kItemsPerThread>
__global__ void __launch_bounds__(kBlockThreads)
SharedMemHistKernel(const EllpackDeviceAccessor matrix,
const FeatureGroupsAccessor feature_groups,
common::Span<const RowPartitioner::RowIndexT> d_ridx,
GradientPairInt64* __restrict__ d_node_hist,
const GradientPair* __restrict__ d_gpair,
GradientQuantizer const rounding) {
extern __shared__ char smem[];
const FeatureGroup group = feature_groups[blockIdx.y];
auto smem_arr = reinterpret_cast<GradientPairInt64*>(smem);
auto agent = HistogramAgent<kBlockThreads, kItemsPerThread>(
smem_arr, d_node_hist, group, matrix, d_ridx, rounding, d_gpair);
if (use_shared_memory_histograms) {
agent.BuildHistogramWithShared();
} else {
agent.BuildHistogramWithGlobal();
}
}
void BuildGradientHistogram(EllpackDeviceAccessor const& matrix,
FeatureGroupsAccessor const& feature_groups,
common::Span<GradientPair const> gpair,
common::Span<const uint32_t> d_ridx,
common::Span<GradientPairInt64> histogram,
GradientQuantizer rounding, bool force_global_memory) {
// decide whether to use shared memory
int device = 0;
dh::safe_cuda(hipGetDevice(&device));
// opt into maximum shared memory for the kernel if necessary
size_t max_shared_memory = dh::MaxSharedMemoryOptin(device);
size_t smem_size =
sizeof(GradientPairInt64) * feature_groups.max_group_bins;
bool shared = !force_global_memory && smem_size <= max_shared_memory;
smem_size = shared ? smem_size : 0;
constexpr int kBlockThreads = 1024;
constexpr int kItemsPerThread = 8;
constexpr int kItemsPerTile = kBlockThreads * kItemsPerThread;
auto runit = [&, kMinItemsPerBlock = kItemsPerTile](auto kernel) {
if (shared) {
dh::safe_cuda(hipFuncSetAttribute(kernel, hipFuncAttributeMaxDynamicSharedMemorySize,
max_shared_memory));
}
// determine the launch configuration
int num_groups = feature_groups.NumGroups();
int n_mps = 0;
dh::safe_cuda(hipDeviceGetAttribute(&n_mps, hipDeviceAttributeMultiprocessorCount, device));
int n_blocks_per_mp = 0;
dh::safe_cuda(hipOccupancyMaxActiveBlocksPerMultiprocessor(&n_blocks_per_mp, kernel,
kBlockThreads, smem_size));
// This gives the number of blocks to keep the device occupied
// Use this as the maximum number of blocks
unsigned grid_size = n_blocks_per_mp * n_mps;
// Otherwise launch blocks such that each block has a minimum amount of work to do
// There are fixed costs to launching each block, e.g. zeroing shared memory
// The below amount of minimum work was found by experimentation
int columns_per_group = common::DivRoundUp(matrix.row_stride, feature_groups.NumGroups());
// Average number of matrix elements processed by each group
std::size_t items_per_group = d_ridx.size() * columns_per_group;
// Allocate number of blocks such that each block has about kMinItemsPerBlock work
// Up to a maximum where the device is saturated
grid_size =
min(grid_size,
unsigned(common::DivRoundUp(items_per_group, kMinItemsPerBlock)));
dh::LaunchKernel {dim3(grid_size, num_groups),
static_cast<uint32_t>(kBlockThreads), smem_size}(
kernel, matrix, feature_groups, d_ridx, histogram.data(), gpair.data(), rounding);
};
if (shared) {
runit(SharedMemHistKernel<true, kBlockThreads, kItemsPerThread>);
} else {
runit(SharedMemHistKernel<false, kBlockThreads, kItemsPerThread>);
}
dh::safe_cuda(hipGetLastError());
}
} // namespace tree
} // namespace xgboost
| 36437e319db97b82cef775e7610f20f3b9c48ebd.cu | /*!
* Copyright 2020-2021 by XGBoost Contributors
*/
#include <thrust/iterator/transform_iterator.h>
#include <thrust/reduce.h>
#include <algorithm>
#include <ctgmath>
#include <limits>
#include "../../common/device_helpers.cuh"
#include "../../data/ellpack_page.cuh"
#include "histogram.cuh"
#include "row_partitioner.cuh"
#include "xgboost/base.h"
namespace xgboost {
namespace tree {
// Following 2 functions are slightly modified version of fbcuda.
/* \brief Constructs a rounding factor used to truncate elements in a sum such that the
sum of the truncated elements is the same no matter what the order of the sum is.
* Algorithm 5: Reproducible Sequential Sum in 'Fast Reproducible Floating-Point
* Summation' by Demmel and Nguyen
* In algorithm 5 the bound is calculated as $max(|v_i|) * n$. Here we use the bound
*
* \begin{equation}
* max( fl(\sum^{V}_{v_i>0}{v_i}), fl(\sum^{V}_{v_i<0}|v_i|) )
* \end{equation}
*
* to avoid outliers, as the full reduction is reproducible on GPU with reduction tree.
*/
template <typename T>
T CreateRoundingFactor(T max_abs, int n) {
T delta = max_abs / (static_cast<T>(1.0) - 2 * n * std::numeric_limits<T>::epsilon());
// Calculate ceil(log_2(delta)).
// frexpf() calculates exp and returns `x` such that
// delta = x * 2^exp, where `x` in (-1.0, -0.5] U [0.5, 1).
// Because |x| < 1, exp is exactly ceil(log_2(delta)).
int exp;
std::frexp(delta, &exp);
// return M = 2 ^ ceil(log_2(delta))
return std::ldexp(static_cast<T>(1.0), exp);
}
namespace {
struct Pair {
GradientPair first;
GradientPair second;
};
__host__ XGBOOST_DEV_INLINE Pair operator+(Pair const& lhs, Pair const& rhs) {
return {lhs.first + rhs.first, lhs.second + rhs.second};
}
} // anonymous namespace
struct Clip : public thrust::unary_function<GradientPair, Pair> {
static XGBOOST_DEV_INLINE float Pclip(float v) { return v > 0 ? v : 0; }
static XGBOOST_DEV_INLINE float Nclip(float v) { return v < 0 ? abs(v) : 0; }
XGBOOST_DEV_INLINE Pair operator()(GradientPair x) const {
auto pg = Pclip(x.GetGrad());
auto ph = Pclip(x.GetHess());
auto ng = Nclip(x.GetGrad());
auto nh = Nclip(x.GetHess());
return {GradientPair{pg, ph}, GradientPair{ng, nh}};
}
};
GradientQuantizer::GradientQuantizer(common::Span<GradientPair const> gpair) {
using GradientSumT = GradientPairPrecise;
using T = typename GradientSumT::ValueT;
dh::XGBCachingDeviceAllocator<char> alloc;
thrust::device_ptr<GradientPair const> gpair_beg{gpair.data()};
auto beg = thrust::make_transform_iterator(gpair_beg, Clip());
Pair p =
dh::Reduce(thrust::cuda::par(alloc), beg, beg + gpair.size(), Pair{}, thrust::plus<Pair>{});
// Treat pair as array of 4 primitive types to allreduce
using ReduceT = typename decltype(p.first)::ValueT;
static_assert(sizeof(Pair) == sizeof(ReduceT) * 4, "Expected to reduce four elements.");
collective::Allreduce<collective::Operation::kSum>(reinterpret_cast<ReduceT*>(&p), 4);
GradientPair positive_sum{p.first}, negative_sum{p.second};
std::size_t total_rows = gpair.size();
collective::Allreduce<collective::Operation::kSum>(&total_rows, 1);
auto histogram_rounding = GradientSumT{
CreateRoundingFactor<T>(std::max(positive_sum.GetGrad(), negative_sum.GetGrad()), total_rows),
CreateRoundingFactor<T>(std::max(positive_sum.GetHess(), negative_sum.GetHess()),
total_rows)};
using IntT = typename GradientPairInt64::ValueT;
/**
* Factor for converting gradients from fixed-point to floating-point.
*/
to_floating_point_ =
histogram_rounding /
T(IntT(1) << (sizeof(typename GradientSumT::ValueT) * 8 - 2)); // keep 1 for sign bit
/**
* Factor for converting gradients from floating-point to fixed-point. For
* f64:
*
* Precision = 64 - 1 - log2(rounding)
*
* rounding is calcuated as exp(m), see the rounding factor calcuation for
* details.
*/
to_fixed_point_ =
GradientSumT(T(1) / to_floating_point_.GetGrad(), T(1) / to_floating_point_.GetHess());
}
XGBOOST_DEV_INLINE void
AtomicAddGpairShared(xgboost::GradientPairInt64 *dest,
xgboost::GradientPairInt64 const &gpair) {
auto dst_ptr = reinterpret_cast<int64_t *>(dest);
auto g = gpair.GetQuantisedGrad();
auto h = gpair.GetQuantisedHess();
AtomicAdd64As32(dst_ptr, g);
AtomicAdd64As32(dst_ptr + 1, h);
}
// Global 64 bit integer atomics at the time of writing do not benefit from being separated into two
// 32 bit atomics
XGBOOST_DEV_INLINE void AtomicAddGpairGlobal(xgboost::GradientPairInt64* dest,
xgboost::GradientPairInt64 const& gpair) {
auto dst_ptr = reinterpret_cast<uint64_t*>(dest);
auto g = gpair.GetQuantisedGrad();
auto h = gpair.GetQuantisedHess();
atomicAdd(dst_ptr,
*reinterpret_cast<uint64_t*>(&g));
atomicAdd(dst_ptr + 1,
*reinterpret_cast<uint64_t*>(&h));
}
template <int kBlockThreads, int kItemsPerThread,
int kItemsPerTile = kBlockThreads* kItemsPerThread>
class HistogramAgent {
GradientPairInt64* smem_arr_;
GradientPairInt64* d_node_hist_;
dh::LDGIterator<const RowPartitioner::RowIndexT> d_ridx_;
const GradientPair* d_gpair_;
const FeatureGroup group_;
const EllpackDeviceAccessor& matrix_;
const int feature_stride_;
const std::size_t n_elements_;
const GradientQuantizer& rounding_;
public:
__device__ HistogramAgent(GradientPairInt64* smem_arr,
GradientPairInt64* __restrict__ d_node_hist, const FeatureGroup& group,
const EllpackDeviceAccessor& matrix,
common::Span<const RowPartitioner::RowIndexT> d_ridx,
const GradientQuantizer& rounding, const GradientPair* d_gpair)
: smem_arr_(smem_arr),
d_node_hist_(d_node_hist),
d_ridx_(d_ridx.data()),
group_(group),
matrix_(matrix),
feature_stride_(matrix.is_dense ? group.num_features : matrix.row_stride),
n_elements_(feature_stride_ * d_ridx.size()),
rounding_(rounding),
d_gpair_(d_gpair) {}
__device__ void ProcessPartialTileShared(std::size_t offset) {
for (std::size_t idx = offset + threadIdx.x;
idx < min(offset + kBlockThreads * kItemsPerTile, n_elements_); idx += kBlockThreads) {
int ridx = d_ridx_[idx / feature_stride_];
int gidx =
matrix_
.gidx_iter[ridx * matrix_.row_stride + group_.start_feature + idx % feature_stride_] -
group_.start_bin;
if (matrix_.is_dense || gidx != matrix_.NumBins()) {
auto adjusted = rounding_.ToFixedPoint(d_gpair_[ridx]);
AtomicAddGpairShared(smem_arr_ + gidx, adjusted);
}
}
}
// Instruction level parallelism by loop unrolling
// Allows the kernel to pipeline many operations while waiting for global memory
// Increases the throughput of this kernel significantly
__device__ void ProcessFullTileShared(std::size_t offset) {
std::size_t idx[kItemsPerThread];
int ridx[kItemsPerThread];
int gidx[kItemsPerThread];
GradientPair gpair[kItemsPerThread];
#pragma unroll
for (int i = 0; i < kItemsPerThread; i++) {
idx[i] = offset + i * kBlockThreads + threadIdx.x;
}
#pragma unroll
for (int i = 0; i < kItemsPerThread; i++) {
ridx[i] = d_ridx_[idx[i] / feature_stride_];
}
#pragma unroll
for (int i = 0; i < kItemsPerThread; i++) {
gpair[i] = d_gpair_[ridx[i]];
gidx[i] = matrix_.gidx_iter[ridx[i] * matrix_.row_stride + group_.start_feature +
idx[i] % feature_stride_];
}
#pragma unroll
for (int i = 0; i < kItemsPerThread; i++) {
if ((matrix_.is_dense || gidx[i] != matrix_.NumBins())) {
auto adjusted = rounding_.ToFixedPoint(gpair[i]);
AtomicAddGpairShared(smem_arr_ + gidx[i] - group_.start_bin, adjusted);
}
}
}
__device__ void BuildHistogramWithShared() {
dh::BlockFill(smem_arr_, group_.num_bins, GradientPairInt64());
__syncthreads();
std::size_t offset = blockIdx.x * kItemsPerTile;
while (offset + kItemsPerTile <= n_elements_) {
ProcessFullTileShared(offset);
offset += kItemsPerTile * gridDim.x;
}
ProcessPartialTileShared(offset);
// Write shared memory back to global memory
__syncthreads();
for (auto i : dh::BlockStrideRange(0, group_.num_bins)) {
AtomicAddGpairGlobal(d_node_hist_ + group_.start_bin + i, smem_arr_[i]);
}
}
__device__ void BuildHistogramWithGlobal() {
for (auto idx : dh::GridStrideRange(static_cast<std::size_t>(0), n_elements_)) {
int ridx = d_ridx_[idx / feature_stride_];
int gidx =
matrix_
.gidx_iter[ridx * matrix_.row_stride + group_.start_feature + idx % feature_stride_];
if (matrix_.is_dense || gidx != matrix_.NumBins()) {
auto adjusted = rounding_.ToFixedPoint(d_gpair_[ridx]);
AtomicAddGpairGlobal(d_node_hist_ + gidx, adjusted);
}
}
}
};
template <bool use_shared_memory_histograms, int kBlockThreads,
int kItemsPerThread>
__global__ void __launch_bounds__(kBlockThreads)
SharedMemHistKernel(const EllpackDeviceAccessor matrix,
const FeatureGroupsAccessor feature_groups,
common::Span<const RowPartitioner::RowIndexT> d_ridx,
GradientPairInt64* __restrict__ d_node_hist,
const GradientPair* __restrict__ d_gpair,
GradientQuantizer const rounding) {
extern __shared__ char smem[];
const FeatureGroup group = feature_groups[blockIdx.y];
auto smem_arr = reinterpret_cast<GradientPairInt64*>(smem);
auto agent = HistogramAgent<kBlockThreads, kItemsPerThread>(
smem_arr, d_node_hist, group, matrix, d_ridx, rounding, d_gpair);
if (use_shared_memory_histograms) {
agent.BuildHistogramWithShared();
} else {
agent.BuildHistogramWithGlobal();
}
}
void BuildGradientHistogram(EllpackDeviceAccessor const& matrix,
FeatureGroupsAccessor const& feature_groups,
common::Span<GradientPair const> gpair,
common::Span<const uint32_t> d_ridx,
common::Span<GradientPairInt64> histogram,
GradientQuantizer rounding, bool force_global_memory) {
// decide whether to use shared memory
int device = 0;
dh::safe_cuda(cudaGetDevice(&device));
// opt into maximum shared memory for the kernel if necessary
size_t max_shared_memory = dh::MaxSharedMemoryOptin(device);
size_t smem_size =
sizeof(GradientPairInt64) * feature_groups.max_group_bins;
bool shared = !force_global_memory && smem_size <= max_shared_memory;
smem_size = shared ? smem_size : 0;
constexpr int kBlockThreads = 1024;
constexpr int kItemsPerThread = 8;
constexpr int kItemsPerTile = kBlockThreads * kItemsPerThread;
auto runit = [&, kMinItemsPerBlock = kItemsPerTile](auto kernel) {
if (shared) {
dh::safe_cuda(cudaFuncSetAttribute(kernel, cudaFuncAttributeMaxDynamicSharedMemorySize,
max_shared_memory));
}
// determine the launch configuration
int num_groups = feature_groups.NumGroups();
int n_mps = 0;
dh::safe_cuda(cudaDeviceGetAttribute(&n_mps, cudaDevAttrMultiProcessorCount, device));
int n_blocks_per_mp = 0;
dh::safe_cuda(cudaOccupancyMaxActiveBlocksPerMultiprocessor(&n_blocks_per_mp, kernel,
kBlockThreads, smem_size));
// This gives the number of blocks to keep the device occupied
// Use this as the maximum number of blocks
unsigned grid_size = n_blocks_per_mp * n_mps;
// Otherwise launch blocks such that each block has a minimum amount of work to do
// There are fixed costs to launching each block, e.g. zeroing shared memory
// The below amount of minimum work was found by experimentation
int columns_per_group = common::DivRoundUp(matrix.row_stride, feature_groups.NumGroups());
// Average number of matrix elements processed by each group
std::size_t items_per_group = d_ridx.size() * columns_per_group;
// Allocate number of blocks such that each block has about kMinItemsPerBlock work
// Up to a maximum where the device is saturated
grid_size =
min(grid_size,
unsigned(common::DivRoundUp(items_per_group, kMinItemsPerBlock)));
dh::LaunchKernel {dim3(grid_size, num_groups),
static_cast<uint32_t>(kBlockThreads), smem_size}(
kernel, matrix, feature_groups, d_ridx, histogram.data(), gpair.data(), rounding);
};
if (shared) {
runit(SharedMemHistKernel<true, kBlockThreads, kItemsPerThread>);
} else {
runit(SharedMemHistKernel<false, kBlockThreads, kItemsPerThread>);
}
dh::safe_cuda(cudaGetLastError());
}
} // namespace tree
} // namespace xgboost
|
63ab9814e483121b343a31ccc96857eec8874ffe.hip | // !!! This is a file automatically generated by hipify!!!
// from https://yuki67.github.io/post/cuda_time/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include "cudaTimer.h"
#include <vector>
#include <iostream>
#include <fstream>
#include <numeric>
#include <algorithm>
#include <random>
#include <assert.h>
#define all(a) (a).begin(),(a).end()
//
//
float *a_h, *b_h; //
float *a_d, *b_d; //
long long n, nBytes; // n: / nBytes:(bytes)
// vector<long long> n_list = {160LL, 1024, 3000, 160LL*180*360, 1024LL*1024*1200, /*3000LL*3500*360*/};
// vector<int> n_list = {1024 / sizeof(float), 512 * 1024 / sizeof(float), 1024 * 1024 / sizeof(float), 128 * 1024 * 1024 / sizeof(float)};
//
vector<float> h2d, d2d, d2h;
//
bool flg = false; // printf
int itr_times = 1000; //
ofstream ofs;
random_device seed_gen;
uniform_real_distribution<float> dist(-1.0,1.0);
void setup(){
mt19937 engine(seed_gen());
a_h = (float *)malloc(nBytes); //
b_h = (float *)malloc(nBytes);
hipMalloc((void **) &a_d, nBytes);
hipMalloc((void **) &b_d, nBytes);
fill(a_h, a_h + n, 1.3);
/*
for(long long i = 0; i < n; i++){
a_h[i] = dist(engine);
}
*/
return;
}
void measure(){
//
CudaTimer timer;
// 1. H2D
timer.begin();
// ~~~~~~
hipMemcpy(a_d, a_h, nBytes, hipMemcpyHostToDevice);
h2d.emplace_back( timer.stop_and_report("H2D",flg) );
// 2. D2D
timer.begin();
// ~~~~~~
hipMemcpy(b_d, a_d, nBytes, hipMemcpyDeviceToDevice);
d2d.emplace_back( timer.stop_and_report("D2D",flg) );
// 3.D2H
timer.begin();
// ~~~~~~
hipMemcpy(b_h, b_d, nBytes, hipMemcpyDeviceToHost);
d2h.emplace_back( timer.stop_and_report("D2H",flg) );
//
for(long long i = 0; i < n; i++){
assert(a_h[i] == b_h[i]);
}
return;
}
void memFree(){
//
free(a_h);
free(b_h);
hipFree(a_d);
hipFree(b_d);
return;
}
void csv_out() {ofs << endl;}
template <typename Head, typename... Tail>
void csv_out(Head H, Tail... T){
ofs << H;
csv_out(T...);
}
#define csvo(...) csv_out(__VA_ARGS__)
void put_csv(long long Bytes){
//
assert(h2d.size() == d2d.size() && d2d.size() == d2h.size());
/*
//
for(long long ind = 0; ind < h2d.size(); ++ind){
ofs << h2d.at(ind) << "," << "," << d2d.at(ind) << "," << "," << d2h.at(ind) << "," << endl;
}
*/
//
float h2d_ave = accumulate(h2d.begin(),h2d.end(),0.0f) / h2d.size();
float d2d_ave = accumulate(d2d.begin(),d2d.end(),0.0f) / d2d.size();
float d2h_ave = accumulate(d2h.begin(),d2h.end(),0.0f) / d2h.size();
//ofs << "," << "Ave." <<endl;
//ofs << h2d_ave << "," << "," << d2d_ave << "," << "," << d2h_ave << "," << endl;
//
sort(all(h2d)); sort(all(d2d)); sort(all(d2h));
size_t med_ind = h2d.size() / 2;
float h2d_med = (h2d.size() % 2 == 0
? static_cast<float>(h2d[med_ind] + h2d[med_ind - 1]) / 2
: h2d[med_ind]);
float d2d_med = (d2d.size() % 2 == 0
? static_cast<float>(d2d[med_ind] + d2d[med_ind - 1]) / 2
: d2d[med_ind]);
float d2h_med = (d2h.size() % 2 == 0
? static_cast<float>(d2h[med_ind] + d2h[med_ind - 1]) / 2
: d2h[med_ind]);
//ofs << "," << "Med." <<endl;
//ofs << h2d_med << "," << "," << d2d_med << "," << "," << d2h_med << "," << endl;
//
csvo(Bytes/1024,",,",h2d_ave,",",d2d_ave,",",d2h_ave,",,",h2d_med,",",d2d_med,",",d2h_med);
}
int main() {
string data_name = "time_plot_data.csv";
ofs.open(data_name);
//
csvo("(KBytes)\\(msec.),","<Ave.>,","H2D,","D2D,","D2H,","<Med.>,","H2D,","D2D,","D2H");
//ofs << "H2D" << "," << "," << "D2D" << "," << "," << "D2H" << "," << endl;
// for(long long ele : n_list){
long long ele = 256LL;
long long add = 256LL;
long long base = 10LL;
long long div = 1024LL;
bool flg = false;
while(ele < 1024LL*1024*512){ //1GB?
n = ele;
nBytes = n * sizeof(float);
if(nBytes / (1024 * 1024) > 0){
cout << "transport data size : " << nBytes / (1024 * 1024) << "[M Bytes]" << endl;
// data_name = to_string(nBytes / (1024 * 1024)) + "M_Bytes_measure.csv";
}
else if(nBytes / 1024 > 0){
cout << "transport data size : " << nBytes / 1024 << "[K Bytes]" << endl;
// data_name = to_string(nBytes / 1024) + "K_Bytes_measure.csv";
}
else{
cout << "transport data size : " << nBytes << "[Bytes]" << endl;
// data_name = to_string(nBytes) + "Bytes_measure.csv";
}
h2d.clear();
d2d.clear();
d2h.clear();
for(int i = 0; i < itr_times; ++i){
setup();
measure();
memFree();
}
cout << "finished" << "\n";
put_csv(nBytes);
if(nBytes / div >= base){
base *= 10;
add *= 10;
}
ele += add;
if(!flg && ele * sizeof(float) / 1024 > 1000){
flg = true;
div *= 1024;
base = 10;
add = 256LL * 1024;
ele = 256LL * 1024;
}
}
ofs.close();
return 0;
/*
nBytes = n * sizeof(float);
a_h = (float *)malloc(nBytes); //
b_h = (float *)malloc(nBytes);
hipMalloc((void **) &a_d, nBytes);
hipMalloc((void **) &b_d, nBytes);
for(int i = 0; i < n; i++){
a_h[i] = 100.0f + i;
}
*/
/*
//
CudaTimer timer;
// 1. H2D
timer.begin();
// ~~~~~~
hipMemcpy(a_d, a_h, nBytes, hipMemcpyHostToDevice);
timer.stop_and_report("H2D");
// 2. D2D
timer.begin();
// ~~~~~~
hipMemcpy(b_d, a_d, nBytes, hipMemcpyDeviceToDevice);
timer.stop_and_report("D2D");
// 3.D2H
timer.begin();
// ~~~~~~
hipMemcpy(b_h, b_d, nBytes, hipMemcpyDeviceToHost);
timer.stop_and_report("D2H");
//
for(int i = 0; i < n; i++){
assert(a_h[i] == b_h[i]);
}
*/
/*
//
free(a_h);
free(b_h);
hipFree(a_d);
hipFree(b_d);
*/
}
| 63ab9814e483121b343a31ccc96857eec8874ffe.cu | // from https://yuki67.github.io/post/cuda_time/
#include <cuda_runtime.h>
#include <stdio.h>
#include "cudaTimer.h"
#include <vector>
#include <iostream>
#include <fstream>
#include <numeric>
#include <algorithm>
#include <random>
#include <assert.h>
#define all(a) (a).begin(),(a).end()
// グローバル変数
// コピー用変数の準備
float *a_h, *b_h; // ホストデータ
float *a_d, *b_d; // デバイスデータ
long long n, nBytes; // n:データ数 / nBytes:総データ量(bytes)
// vector<long long> n_list = {160LL, 1024, 3000, 160LL*180*360, 1024LL*1024*1200, /*3000LL*3500*360*/};
// vector<int> n_list = {1024 / sizeof(float), 512 * 1024 / sizeof(float), 1024 * 1024 / sizeof(float), 128 * 1024 * 1024 / sizeof(float)};
// 結果格納変数
vector<float> h2d, d2d, d2h;
// その他制御変数
bool flg = false; // printf起動制御
int itr_times = 1000; // 一つの条件に対する実験の繰り返し回数
ofstream ofs;
random_device seed_gen;
uniform_real_distribution<float> dist(-1.0,1.0);
void setup(){
mt19937 engine(seed_gen());
a_h = (float *)malloc(nBytes); // これで配列として確保できる
b_h = (float *)malloc(nBytes);
cudaMalloc((void **) &a_d, nBytes);
cudaMalloc((void **) &b_d, nBytes);
fill(a_h, a_h + n, 1.3);
/*
for(long long i = 0; i < n; i++){
a_h[i] = dist(engine);
}
*/
return;
}
void measure(){
// 計測
CudaTimer timer;
// 1. H2D
timer.begin();
// ~~~計測対象の動作を記述~~~
cudaMemcpy(a_d, a_h, nBytes, cudaMemcpyHostToDevice);
h2d.emplace_back( timer.stop_and_report("H2D",flg) );
// 2. D2D
timer.begin();
// ~~~計測対象の動作を記述~~~
cudaMemcpy(b_d, a_d, nBytes, cudaMemcpyDeviceToDevice);
d2d.emplace_back( timer.stop_and_report("D2D",flg) );
// 3.D2H
timer.begin();
// ~~~計測対象の動作を記述~~~
cudaMemcpy(b_h, b_d, nBytes, cudaMemcpyDeviceToHost);
d2h.emplace_back( timer.stop_and_report("D2H",flg) );
// 正誤チェック
for(long long i = 0; i < n; i++){
assert(a_h[i] == b_h[i]);
}
return;
}
void memFree(){
// データ解放
free(a_h);
free(b_h);
cudaFree(a_d);
cudaFree(b_d);
return;
}
void csv_out() {ofs << endl;}
template <typename Head, typename... Tail>
void csv_out(Head H, Tail... T){
ofs << H;
csv_out(T...);
}
#define csvo(...) csv_out(__VA_ARGS__)
void put_csv(long long Bytes){
// エラーチェック
assert(h2d.size() == d2d.size() && d2d.size() == d2h.size());
/*
// データ入力
for(long long ind = 0; ind < h2d.size(); ++ind){
ofs << h2d.at(ind) << "," << "," << d2d.at(ind) << "," << "," << d2h.at(ind) << "," << endl;
}
*/
// 平均データ入力
float h2d_ave = accumulate(h2d.begin(),h2d.end(),0.0f) / h2d.size();
float d2d_ave = accumulate(d2d.begin(),d2d.end(),0.0f) / d2d.size();
float d2h_ave = accumulate(d2h.begin(),d2h.end(),0.0f) / d2h.size();
//ofs << "," << "Ave." <<endl;
//ofs << h2d_ave << "," << "," << d2d_ave << "," << "," << d2h_ave << "," << endl;
// 中央値データ入力
sort(all(h2d)); sort(all(d2d)); sort(all(d2h));
size_t med_ind = h2d.size() / 2;
float h2d_med = (h2d.size() % 2 == 0
? static_cast<float>(h2d[med_ind] + h2d[med_ind - 1]) / 2
: h2d[med_ind]);
float d2d_med = (d2d.size() % 2 == 0
? static_cast<float>(d2d[med_ind] + d2d[med_ind - 1]) / 2
: d2d[med_ind]);
float d2h_med = (d2h.size() % 2 == 0
? static_cast<float>(d2h[med_ind] + d2h[med_ind - 1]) / 2
: d2h[med_ind]);
//ofs << "," << "Med." <<endl;
//ofs << h2d_med << "," << "," << d2d_med << "," << "," << d2h_med << "," << endl;
// データ書き込み
csvo(Bytes/1024,",,",h2d_ave,",",d2d_ave,",",d2h_ave,",,",h2d_med,",",d2d_med,",",d2h_med);
}
int main() {
string data_name = "time_plot_data.csv";
ofs.open(data_name);
// 項目の入力
csvo("(KBytes)\\(msec.),","<Ave.>,","H2D,","D2D,","D2H,","<Med.>,","H2D,","D2D,","D2H");
//ofs << "H2D" << "," << "," << "D2D" << "," << "," << "D2H" << "," << endl;
// for(long long ele : n_list){
long long ele = 256LL;
long long add = 256LL;
long long base = 10LL;
long long div = 1024LL;
bool flg = false;
while(ele < 1024LL*1024*512){ //1GBまで?
n = ele;
nBytes = n * sizeof(float);
if(nBytes / (1024 * 1024) > 0){
cout << "transport data size : " << nBytes / (1024 * 1024) << "[M Bytes]" << endl;
// data_name = to_string(nBytes / (1024 * 1024)) + "M_Bytes_measure.csv";
}
else if(nBytes / 1024 > 0){
cout << "transport data size : " << nBytes / 1024 << "[K Bytes]" << endl;
// data_name = to_string(nBytes / 1024) + "K_Bytes_measure.csv";
}
else{
cout << "transport data size : " << nBytes << "[Bytes]" << endl;
// data_name = to_string(nBytes) + "Bytes_measure.csv";
}
h2d.clear();
d2d.clear();
d2h.clear();
for(int i = 0; i < itr_times; ++i){
setup();
measure();
memFree();
}
cout << "finished" << "\n";
put_csv(nBytes);
if(nBytes / div >= base){
base *= 10;
add *= 10;
}
ele += add;
if(!flg && ele * sizeof(float) / 1024 > 1000){
flg = true;
div *= 1024;
base = 10;
add = 256LL * 1024;
ele = 256LL * 1024;
}
}
ofs.close();
return 0;
/*
nBytes = n * sizeof(float);
a_h = (float *)malloc(nBytes); // これで配列として確保できる
b_h = (float *)malloc(nBytes);
cudaMalloc((void **) &a_d, nBytes);
cudaMalloc((void **) &b_d, nBytes);
for(int i = 0; i < n; i++){
a_h[i] = 100.0f + i;
}
*/
/*
// 計測
CudaTimer timer;
// 1. H2D
timer.begin();
// ~~~計測対象の動作を記述~~~
cudaMemcpy(a_d, a_h, nBytes, cudaMemcpyHostToDevice);
timer.stop_and_report("H2D");
// 2. D2D
timer.begin();
// ~~~計測対象の動作を記述~~~
cudaMemcpy(b_d, a_d, nBytes, cudaMemcpyDeviceToDevice);
timer.stop_and_report("D2D");
// 3.D2H
timer.begin();
// ~~~計測対象の動作を記述~~~
cudaMemcpy(b_h, b_d, nBytes, cudaMemcpyDeviceToHost);
timer.stop_and_report("D2H");
// 正誤チェック
for(int i = 0; i < n; i++){
assert(a_h[i] == b_h[i]);
}
*/
/*
// データ解放
free(a_h);
free(b_h);
cudaFree(a_d);
cudaFree(b_d);
*/
}
|
fbc70b5a13b5ba3c79fa955b3d36ba35c14cdc89.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vec_normcdff.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
size_t n = XSIZE*YSIZE;
float *result = NULL;
hipMalloc(&result, XSIZE*YSIZE);
float *y = NULL;
hipMalloc(&y, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vec_normcdff), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,y);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vec_normcdff), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vec_normcdff), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | fbc70b5a13b5ba3c79fa955b3d36ba35c14cdc89.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vec_normcdff.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
size_t n = XSIZE*YSIZE;
float *result = NULL;
cudaMalloc(&result, XSIZE*YSIZE);
float *y = NULL;
cudaMalloc(&y, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vec_normcdff<<<gridBlock,threadBlock>>>(n,result,y);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vec_normcdff<<<gridBlock,threadBlock>>>(n,result,y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vec_normcdff<<<gridBlock,threadBlock>>>(n,result,y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
444fe6b9006145d4a50f9d1c0afc22708a31db0b.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <cuml/common/cuml_allocator.hpp>
#include <functions/logisticReg.cuh>
#include <raft/random/rng.cuh>
#include "test_utils.h"
namespace MLCommon {
namespace Functions {
template <typename T>
struct LogRegLossInputs {
T tolerance;
T n_rows;
T n_cols;
int len;
};
template <typename T>
class LogRegLossTest : public ::testing::TestWithParam<LogRegLossInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<LogRegLossInputs<T>>::GetParam();
int len = params.len;
int n_rows = params.n_rows;
int n_cols = params.n_cols;
T *labels, *coef;
raft::handle_t handle;
hipStream_t stream = handle.get_stream();
allocator.reset(new raft::mr::device::default_allocator);
raft::allocate(in, len);
raft::allocate(out, 1);
raft::allocate(out_lasso, 1);
raft::allocate(out_ridge, 1);
raft::allocate(out_elasticnet, 1);
raft::allocate(out_grad, n_cols);
raft::allocate(out_lasso_grad, n_cols);
raft::allocate(out_ridge_grad, n_cols);
raft::allocate(out_elasticnet_grad, n_cols);
raft::allocate(out_ref, 1);
raft::allocate(out_lasso_ref, 1);
raft::allocate(out_ridge_ref, 1);
raft::allocate(out_elasticnet_ref, 1);
raft::allocate(out_grad_ref, n_cols);
raft::allocate(out_lasso_grad_ref, n_cols);
raft::allocate(out_ridge_grad_ref, n_cols);
raft::allocate(out_elasticnet_grad_ref, n_cols);
raft::allocate(labels, params.n_rows);
raft::allocate(coef, params.n_cols);
T h_in[len] = {0.1, 0.35, -0.9, -1.4, 2.0, 3.1};
raft::update_device(in, h_in, len, stream);
T h_labels[n_rows] = {0.3, 2.0, -1.1};
raft::update_device(labels, h_labels, n_rows, stream);
T h_coef[n_cols] = {0.35, -0.24};
raft::update_device(coef, h_coef, n_cols, stream);
T h_out_ref[1] = {0.38752545};
raft::update_device(out_ref, h_out_ref, 1, stream);
T h_out_lasso_ref[1] = {0.74152};
raft::update_device(out_lasso_ref, h_out_lasso_ref, 1, stream);
T h_out_ridge_ref[1] = {0.4955854};
raft::update_device(out_ridge_ref, h_out_ridge_ref, 1, stream);
T h_out_elasticnet_ref[1] = {0.618555};
raft::update_device(out_elasticnet_ref, h_out_elasticnet_ref, 1, stream);
T h_out_grad_ref[n_cols] = {-0.58284, 0.207666};
raft::update_device(out_grad_ref, h_out_grad_ref, n_cols, stream);
T h_out_lasso_grad_ref[n_cols] = {0.0171, -0.39233};
raft::update_device(out_lasso_grad_ref, h_out_lasso_grad_ref, n_cols,
stream);
T h_out_ridge_grad_ref[n_cols] = {-0.16284, -0.080333};
raft::update_device(out_ridge_grad_ref, h_out_ridge_grad_ref, n_cols,
stream);
T h_out_elasticnet_grad_ref[n_cols] = {-0.07284, -0.23633};
raft::update_device(out_elasticnet_grad_ref, h_out_elasticnet_grad_ref,
n_cols, stream);
T alpha = 0.6;
T l1_ratio = 0.5;
logisticRegLoss(handle, in, params.n_rows, params.n_cols, labels, coef, out,
penalty::NONE, alpha, l1_ratio, stream);
raft::update_device(in, h_in, len, stream);
logisticRegLossGrads(handle, in, params.n_rows, params.n_cols, labels, coef,
out_grad, penalty::NONE, alpha, l1_ratio, stream);
raft::update_device(in, h_in, len, stream);
logisticRegLoss(handle, in, params.n_rows, params.n_cols, labels, coef,
out_lasso, penalty::L1, alpha, l1_ratio, stream);
raft::update_device(in, h_in, len, stream);
logisticRegLossGrads(handle, in, params.n_rows, params.n_cols, labels, coef,
out_lasso_grad, penalty::L1, alpha, l1_ratio, stream);
raft::update_device(in, h_in, len, stream);
logisticRegLoss(handle, in, params.n_rows, params.n_cols, labels, coef,
out_ridge, penalty::L2, alpha, l1_ratio, stream);
logisticRegLossGrads(handle, in, params.n_rows, params.n_cols, labels, coef,
out_ridge_grad, penalty::L2, alpha, l1_ratio, stream);
raft::update_device(in, h_in, len, stream);
logisticRegLoss(handle, in, params.n_rows, params.n_cols, labels, coef,
out_elasticnet, penalty::ELASTICNET, alpha, l1_ratio,
stream);
logisticRegLossGrads(handle, in, params.n_rows, params.n_cols, labels, coef,
out_elasticnet_grad, penalty::ELASTICNET, alpha,
l1_ratio, stream);
raft::update_device(in, h_in, len, stream);
CUDA_CHECK(hipFree(labels));
CUDA_CHECK(hipFree(coef));
}
void TearDown() override {
CUDA_CHECK(hipFree(in));
CUDA_CHECK(hipFree(out));
CUDA_CHECK(hipFree(out_lasso));
CUDA_CHECK(hipFree(out_ridge));
CUDA_CHECK(hipFree(out_elasticnet));
CUDA_CHECK(hipFree(out_grad));
CUDA_CHECK(hipFree(out_lasso_grad));
CUDA_CHECK(hipFree(out_ridge_grad));
CUDA_CHECK(hipFree(out_elasticnet_grad));
CUDA_CHECK(hipFree(out_ref));
CUDA_CHECK(hipFree(out_lasso_ref));
CUDA_CHECK(hipFree(out_ridge_ref));
CUDA_CHECK(hipFree(out_elasticnet_ref));
CUDA_CHECK(hipFree(out_grad_ref));
CUDA_CHECK(hipFree(out_lasso_grad_ref));
CUDA_CHECK(hipFree(out_ridge_grad_ref));
CUDA_CHECK(hipFree(out_elasticnet_grad_ref));
}
protected:
LogRegLossInputs<T> params;
T *in;
T *out, *out_lasso, *out_ridge, *out_elasticnet;
T *out_ref, *out_lasso_ref, *out_ridge_ref, *out_elasticnet_ref;
T *out_grad, *out_lasso_grad, *out_ridge_grad, *out_elasticnet_grad;
T *out_grad_ref, *out_lasso_grad_ref, *out_ridge_grad_ref,
*out_elasticnet_grad_ref;
std::shared_ptr<deviceAllocator> allocator;
};
const std::vector<LogRegLossInputs<float>> inputsf = {{0.01f, 3, 2, 6}};
const std::vector<LogRegLossInputs<double>> inputsd = {{0.01, 3, 2, 6}};
typedef LogRegLossTest<float> LogRegLossTestF;
TEST_P(LogRegLossTestF, Result) {
ASSERT_TRUE(raft::devArrMatch(out_ref, out, 1,
raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_lasso_ref, out_lasso, 1,
raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_ridge_ref, out_ridge, 1,
raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_elasticnet_ref, out_elasticnet, 1,
raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_grad_ref, out_grad, params.n_cols,
raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_lasso_grad_ref, out_lasso_grad,
params.n_cols,
raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_ridge_grad_ref, out_ridge_grad,
params.n_cols,
raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_elasticnet_grad_ref, out_elasticnet_grad,
params.n_cols,
raft::CompareApprox<float>(params.tolerance)));
}
typedef LogRegLossTest<double> LogRegLossTestD;
TEST_P(LogRegLossTestD, Result) {
ASSERT_TRUE(raft::devArrMatch(out_ref, out, 1,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_lasso_ref, out_lasso, 1,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_ridge_ref, out_ridge, 1,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_elasticnet_ref, out_elasticnet, 1,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_grad_ref, out_grad, params.n_cols,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_lasso_grad_ref, out_lasso_grad,
params.n_cols,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_ridge_grad_ref, out_ridge_grad,
params.n_cols,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_elasticnet_grad_ref, out_elasticnet_grad,
params.n_cols,
raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(LogRegLossTests, LogRegLossTestF,
::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(LogRegLossTests, LogRegLossTestD,
::testing::ValuesIn(inputsd));
} // end namespace Functions
} // end namespace MLCommon
| 444fe6b9006145d4a50f9d1c0afc22708a31db0b.cu | /*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <cuml/common/cuml_allocator.hpp>
#include <functions/logisticReg.cuh>
#include <raft/random/rng.cuh>
#include "test_utils.h"
namespace MLCommon {
namespace Functions {
template <typename T>
struct LogRegLossInputs {
T tolerance;
T n_rows;
T n_cols;
int len;
};
template <typename T>
class LogRegLossTest : public ::testing::TestWithParam<LogRegLossInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<LogRegLossInputs<T>>::GetParam();
int len = params.len;
int n_rows = params.n_rows;
int n_cols = params.n_cols;
T *labels, *coef;
raft::handle_t handle;
cudaStream_t stream = handle.get_stream();
allocator.reset(new raft::mr::device::default_allocator);
raft::allocate(in, len);
raft::allocate(out, 1);
raft::allocate(out_lasso, 1);
raft::allocate(out_ridge, 1);
raft::allocate(out_elasticnet, 1);
raft::allocate(out_grad, n_cols);
raft::allocate(out_lasso_grad, n_cols);
raft::allocate(out_ridge_grad, n_cols);
raft::allocate(out_elasticnet_grad, n_cols);
raft::allocate(out_ref, 1);
raft::allocate(out_lasso_ref, 1);
raft::allocate(out_ridge_ref, 1);
raft::allocate(out_elasticnet_ref, 1);
raft::allocate(out_grad_ref, n_cols);
raft::allocate(out_lasso_grad_ref, n_cols);
raft::allocate(out_ridge_grad_ref, n_cols);
raft::allocate(out_elasticnet_grad_ref, n_cols);
raft::allocate(labels, params.n_rows);
raft::allocate(coef, params.n_cols);
T h_in[len] = {0.1, 0.35, -0.9, -1.4, 2.0, 3.1};
raft::update_device(in, h_in, len, stream);
T h_labels[n_rows] = {0.3, 2.0, -1.1};
raft::update_device(labels, h_labels, n_rows, stream);
T h_coef[n_cols] = {0.35, -0.24};
raft::update_device(coef, h_coef, n_cols, stream);
T h_out_ref[1] = {0.38752545};
raft::update_device(out_ref, h_out_ref, 1, stream);
T h_out_lasso_ref[1] = {0.74152};
raft::update_device(out_lasso_ref, h_out_lasso_ref, 1, stream);
T h_out_ridge_ref[1] = {0.4955854};
raft::update_device(out_ridge_ref, h_out_ridge_ref, 1, stream);
T h_out_elasticnet_ref[1] = {0.618555};
raft::update_device(out_elasticnet_ref, h_out_elasticnet_ref, 1, stream);
T h_out_grad_ref[n_cols] = {-0.58284, 0.207666};
raft::update_device(out_grad_ref, h_out_grad_ref, n_cols, stream);
T h_out_lasso_grad_ref[n_cols] = {0.0171, -0.39233};
raft::update_device(out_lasso_grad_ref, h_out_lasso_grad_ref, n_cols,
stream);
T h_out_ridge_grad_ref[n_cols] = {-0.16284, -0.080333};
raft::update_device(out_ridge_grad_ref, h_out_ridge_grad_ref, n_cols,
stream);
T h_out_elasticnet_grad_ref[n_cols] = {-0.07284, -0.23633};
raft::update_device(out_elasticnet_grad_ref, h_out_elasticnet_grad_ref,
n_cols, stream);
T alpha = 0.6;
T l1_ratio = 0.5;
logisticRegLoss(handle, in, params.n_rows, params.n_cols, labels, coef, out,
penalty::NONE, alpha, l1_ratio, stream);
raft::update_device(in, h_in, len, stream);
logisticRegLossGrads(handle, in, params.n_rows, params.n_cols, labels, coef,
out_grad, penalty::NONE, alpha, l1_ratio, stream);
raft::update_device(in, h_in, len, stream);
logisticRegLoss(handle, in, params.n_rows, params.n_cols, labels, coef,
out_lasso, penalty::L1, alpha, l1_ratio, stream);
raft::update_device(in, h_in, len, stream);
logisticRegLossGrads(handle, in, params.n_rows, params.n_cols, labels, coef,
out_lasso_grad, penalty::L1, alpha, l1_ratio, stream);
raft::update_device(in, h_in, len, stream);
logisticRegLoss(handle, in, params.n_rows, params.n_cols, labels, coef,
out_ridge, penalty::L2, alpha, l1_ratio, stream);
logisticRegLossGrads(handle, in, params.n_rows, params.n_cols, labels, coef,
out_ridge_grad, penalty::L2, alpha, l1_ratio, stream);
raft::update_device(in, h_in, len, stream);
logisticRegLoss(handle, in, params.n_rows, params.n_cols, labels, coef,
out_elasticnet, penalty::ELASTICNET, alpha, l1_ratio,
stream);
logisticRegLossGrads(handle, in, params.n_rows, params.n_cols, labels, coef,
out_elasticnet_grad, penalty::ELASTICNET, alpha,
l1_ratio, stream);
raft::update_device(in, h_in, len, stream);
CUDA_CHECK(cudaFree(labels));
CUDA_CHECK(cudaFree(coef));
}
void TearDown() override {
CUDA_CHECK(cudaFree(in));
CUDA_CHECK(cudaFree(out));
CUDA_CHECK(cudaFree(out_lasso));
CUDA_CHECK(cudaFree(out_ridge));
CUDA_CHECK(cudaFree(out_elasticnet));
CUDA_CHECK(cudaFree(out_grad));
CUDA_CHECK(cudaFree(out_lasso_grad));
CUDA_CHECK(cudaFree(out_ridge_grad));
CUDA_CHECK(cudaFree(out_elasticnet_grad));
CUDA_CHECK(cudaFree(out_ref));
CUDA_CHECK(cudaFree(out_lasso_ref));
CUDA_CHECK(cudaFree(out_ridge_ref));
CUDA_CHECK(cudaFree(out_elasticnet_ref));
CUDA_CHECK(cudaFree(out_grad_ref));
CUDA_CHECK(cudaFree(out_lasso_grad_ref));
CUDA_CHECK(cudaFree(out_ridge_grad_ref));
CUDA_CHECK(cudaFree(out_elasticnet_grad_ref));
}
protected:
LogRegLossInputs<T> params;
T *in;
T *out, *out_lasso, *out_ridge, *out_elasticnet;
T *out_ref, *out_lasso_ref, *out_ridge_ref, *out_elasticnet_ref;
T *out_grad, *out_lasso_grad, *out_ridge_grad, *out_elasticnet_grad;
T *out_grad_ref, *out_lasso_grad_ref, *out_ridge_grad_ref,
*out_elasticnet_grad_ref;
std::shared_ptr<deviceAllocator> allocator;
};
const std::vector<LogRegLossInputs<float>> inputsf = {{0.01f, 3, 2, 6}};
const std::vector<LogRegLossInputs<double>> inputsd = {{0.01, 3, 2, 6}};
typedef LogRegLossTest<float> LogRegLossTestF;
TEST_P(LogRegLossTestF, Result) {
ASSERT_TRUE(raft::devArrMatch(out_ref, out, 1,
raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_lasso_ref, out_lasso, 1,
raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_ridge_ref, out_ridge, 1,
raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_elasticnet_ref, out_elasticnet, 1,
raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_grad_ref, out_grad, params.n_cols,
raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_lasso_grad_ref, out_lasso_grad,
params.n_cols,
raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_ridge_grad_ref, out_ridge_grad,
params.n_cols,
raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_elasticnet_grad_ref, out_elasticnet_grad,
params.n_cols,
raft::CompareApprox<float>(params.tolerance)));
}
typedef LogRegLossTest<double> LogRegLossTestD;
TEST_P(LogRegLossTestD, Result) {
ASSERT_TRUE(raft::devArrMatch(out_ref, out, 1,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_lasso_ref, out_lasso, 1,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_ridge_ref, out_ridge, 1,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_elasticnet_ref, out_elasticnet, 1,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_grad_ref, out_grad, params.n_cols,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_lasso_grad_ref, out_lasso_grad,
params.n_cols,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_ridge_grad_ref, out_ridge_grad,
params.n_cols,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(raft::devArrMatch(out_elasticnet_grad_ref, out_elasticnet_grad,
params.n_cols,
raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(LogRegLossTests, LogRegLossTestF,
::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(LogRegLossTests, LogRegLossTestD,
::testing::ValuesIn(inputsd));
} // end namespace Functions
} // end namespace MLCommon
|
1907886bf304d12c529b2f882b665dd846d3c26a.hip | // !!! This is a file automatically generated by hipify!!!
// Vector addition: C = 1/A + 1/B
// using multiple GPUs with OpenMP
// Includes
#include <stdio.h>
#include <stdlib.h>
#include <omp.h> // header for OpenMP
#include <hip/hip_runtime.h>
// Variables
float* h_A; // host vectors
float* h_B;
float* h_C;
float* h_D;
// Functions
void RandomInit(float*, int);
// Device code
__global__ void VecAdd(const float* A, const float* B, float* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = 1.0/A[i] + 1.0/B[i];
__syncthreads();
}
// Host code
int main(void)
{
printf("\n");
printf("Vector Addition with multiple GPUs \n");
int N, NGPU, cpu_thread_id=0;
int *Dev;
long mem = 1024*1024*1024; // 4 Giga for float data type.
printf("Enter the number of GPUs: ");
scanf("%d", &NGPU);
printf("%d\n", NGPU);
Dev = (int *)malloc(sizeof(int)*NGPU);
int numDev = 0;
printf("GPU device number: ");
for(int i = 0; i < NGPU; i++) {
scanf("%d", &Dev[i]);
printf("%d ",Dev[i]);
numDev++;
if(getchar() == '\n') break;
}
printf("\n");
if(numDev != NGPU) {
fprintf(stderr,"Should input %d GPU device numbers\n", NGPU);
exit(1);
}
printf("Enter the size of the vectors: ");
scanf("%d", &N);
printf("%d\n", N);
if (3*N > mem) {
printf("The size of these 3 vectors cannot be fitted into 4 Gbyte\n");
exit(1);
}
long size = N*sizeof(float);
// Set the sizes of threads and blocks
int threadsPerBlock;
printf("Enter the number of threads per block: ");
scanf("%d", &threadsPerBlock);
printf("%d\n", threadsPerBlock);
if(threadsPerBlock > 1024) {
printf("The number of threads per block must be less than 1024 ! \n");
exit(1);
}
int blocksPerGrid = (N + threadsPerBlock*NGPU - 1) / (threadsPerBlock*NGPU);
printf("The number of blocks is %d\n", blocksPerGrid);
if(blocksPerGrid > 2147483647) {
printf("The number of blocks must be less than 2147483647 ! \n");
exit(1);
}
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
h_B = (float*)malloc(size);
h_C = (float*)malloc(size);
if (! h_A || ! h_B || ! h_C) {
printf("!!! Not enough memory.\n");
exit(1);
}
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// declare cuda event for timer
hipEvent_t start, stop;
// hipEventCreate(&start); // events must be created after devices are set
// hipEventCreate(&stop);
float Intime,gputime,Outime;
// Set numbers of threads = numbers of GPU
omp_set_num_threads(NGPU);
// So that "cpu_thread_id" is declared under each threads, and they are independent.
#pragma omp parallel private(cpu_thread_id)
{
float *d_A, *d_B, *d_C;
cpu_thread_id = omp_get_thread_num();
hipSetDevice(Dev[cpu_thread_id]);
// start the timer
// And maybe since OpenMP thread id = 0 , start the first (?)
// Start the clock here, to see how much time it takes to input array.
// And also, we use a thread (here '0') to track the clock.
if(cpu_thread_id == 0) {
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
}
// Allocate vectors in device memory
// size should be devisiable by Number of GPU
hipMalloc((void**)&d_A, size/NGPU);
hipMalloc((void**)&d_B, size/NGPU);
hipMalloc((void**)&d_C, size/NGPU);
// Copy vectors from host memory to device memory
hipMemcpy(d_A, h_A+N/NGPU*cpu_thread_id, size/NGPU, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B+N/NGPU*cpu_thread_id, size/NGPU, hipMemcpyHostToDevice);
// Wait until all threads come to this step, synchronizes all threads on OpenMP
#pragma omp barrier
// stop the timer
if(cpu_thread_id == 0) {
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime( &Intime, start, stop);
printf("Data input time for GPU: %f (ms) \n",Intime);
}
// start the timer
if(cpu_thread_id == 0) hipEventRecord(start,0);
hipLaunchKernelGGL(( VecAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, N/NGPU);
// Blocks until the device has completed all the preceding requested task.
hipDeviceSynchronize();
// stop the timer
if(cpu_thread_id == 0) {
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime( &gputime, start, stop);
printf("Processing time for GPU: %f (ms) \n",gputime);
printf("GPU Gflops: %f\n",3*N/(1000000.0*gputime));
}
// Copy result from device memory to host memory
// h_C contains the result in host memory
// start the timer
if(cpu_thread_id == 0) hipEventRecord(start,0);
hipMemcpy(h_C+N/NGPU*cpu_thread_id, d_C, size/NGPU, hipMemcpyDeviceToHost);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
// stop the timer
if(cpu_thread_id == 0) {
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime( &Outime, start, stop);
printf("Data output time for GPU: %f (ms) \n",Outime);
}
}
float gputime_tot;
gputime_tot = Intime + gputime + Outime;
printf("Total time for GPU: %f (ms) \n",gputime_tot);
// start the timer
hipEventRecord(start,0);
h_D = (float*)malloc(size); // compute the reference solution
for (int i = 0; i < N; ++i) {
h_D[i] = 1.0/h_A[i] + 1.0/h_B[i];
}
// stop the timer
hipEventRecord(stop,0);
hipEventSynchronize(stop);
float cputime;
hipEventElapsedTime( &cputime, start, stop);
printf("Processing time for CPU: %f (ms) \n",cputime);
printf("CPU Gflops: %f\n",3*N/(1000000.0*cputime));
printf("Speed up of GPU = %f\n", cputime/gputime_tot);
// Destroy timer
hipEventDestroy(start);
hipEventDestroy(stop);
// check result
printf("Check result:\n");
double sum=0;
double diff;
for (int i = 0; i < N; ++i) {
diff = abs(h_D[i] - h_C[i]);
sum += diff*diff;
}
sum = sqrt(sum);
printf("norm(h_C - h_D)=%20.15e\n",sum);
for (int i=0; i < NGPU; i++) {
hipSetDevice(i);
hipDeviceReset();
}
return 0;
}
// Allocates an array with random float entries.
void RandomInit(float* data, int n)
{
for (int i = 0; i < n; ++i)
data[i] = rand() / (float)RAND_MAX;
}
| 1907886bf304d12c529b2f882b665dd846d3c26a.cu | // Vector addition: C = 1/A + 1/B
// using multiple GPUs with OpenMP
// Includes
#include <stdio.h>
#include <stdlib.h>
#include <omp.h> // header for OpenMP
#include <cuda_runtime.h>
// Variables
float* h_A; // host vectors
float* h_B;
float* h_C;
float* h_D;
// Functions
void RandomInit(float*, int);
// Device code
__global__ void VecAdd(const float* A, const float* B, float* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = 1.0/A[i] + 1.0/B[i];
__syncthreads();
}
// Host code
int main(void)
{
printf("\n");
printf("Vector Addition with multiple GPUs \n");
int N, NGPU, cpu_thread_id=0;
int *Dev;
long mem = 1024*1024*1024; // 4 Giga for float data type.
printf("Enter the number of GPUs: ");
scanf("%d", &NGPU);
printf("%d\n", NGPU);
Dev = (int *)malloc(sizeof(int)*NGPU);
int numDev = 0;
printf("GPU device number: ");
for(int i = 0; i < NGPU; i++) {
scanf("%d", &Dev[i]);
printf("%d ",Dev[i]);
numDev++;
if(getchar() == '\n') break;
}
printf("\n");
if(numDev != NGPU) {
fprintf(stderr,"Should input %d GPU device numbers\n", NGPU);
exit(1);
}
printf("Enter the size of the vectors: ");
scanf("%d", &N);
printf("%d\n", N);
if (3*N > mem) {
printf("The size of these 3 vectors cannot be fitted into 4 Gbyte\n");
exit(1);
}
long size = N*sizeof(float);
// Set the sizes of threads and blocks
int threadsPerBlock;
printf("Enter the number of threads per block: ");
scanf("%d", &threadsPerBlock);
printf("%d\n", threadsPerBlock);
if(threadsPerBlock > 1024) {
printf("The number of threads per block must be less than 1024 ! \n");
exit(1);
}
int blocksPerGrid = (N + threadsPerBlock*NGPU - 1) / (threadsPerBlock*NGPU);
printf("The number of blocks is %d\n", blocksPerGrid);
if(blocksPerGrid > 2147483647) {
printf("The number of blocks must be less than 2147483647 ! \n");
exit(1);
}
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
h_B = (float*)malloc(size);
h_C = (float*)malloc(size);
if (! h_A || ! h_B || ! h_C) {
printf("!!! Not enough memory.\n");
exit(1);
}
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// declare cuda event for timer
cudaEvent_t start, stop;
// cudaEventCreate(&start); // events must be created after devices are set
// cudaEventCreate(&stop);
float Intime,gputime,Outime;
// Set numbers of threads = numbers of GPU
omp_set_num_threads(NGPU);
// So that "cpu_thread_id" is declared under each threads, and they are independent.
#pragma omp parallel private(cpu_thread_id)
{
float *d_A, *d_B, *d_C;
cpu_thread_id = omp_get_thread_num();
cudaSetDevice(Dev[cpu_thread_id]);
// start the timer
// And maybe since OpenMP thread id = 0 , start the first (?)
// Start the clock here, to see how much time it takes to input array.
// And also, we use a thread (here '0') to track the clock.
if(cpu_thread_id == 0) {
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
}
// Allocate vectors in device memory
// size should be devisiable by Number of GPU
cudaMalloc((void**)&d_A, size/NGPU);
cudaMalloc((void**)&d_B, size/NGPU);
cudaMalloc((void**)&d_C, size/NGPU);
// Copy vectors from host memory to device memory
cudaMemcpy(d_A, h_A+N/NGPU*cpu_thread_id, size/NGPU, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B+N/NGPU*cpu_thread_id, size/NGPU, cudaMemcpyHostToDevice);
// Wait until all threads come to this step, synchronizes all threads on OpenMP
#pragma omp barrier
// stop the timer
if(cpu_thread_id == 0) {
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime( &Intime, start, stop);
printf("Data input time for GPU: %f (ms) \n",Intime);
}
// start the timer
if(cpu_thread_id == 0) cudaEventRecord(start,0);
VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N/NGPU);
// Blocks until the device has completed all the preceding requested task.
cudaDeviceSynchronize();
// stop the timer
if(cpu_thread_id == 0) {
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime( &gputime, start, stop);
printf("Processing time for GPU: %f (ms) \n",gputime);
printf("GPU Gflops: %f\n",3*N/(1000000.0*gputime));
}
// Copy result from device memory to host memory
// h_C contains the result in host memory
// start the timer
if(cpu_thread_id == 0) cudaEventRecord(start,0);
cudaMemcpy(h_C+N/NGPU*cpu_thread_id, d_C, size/NGPU, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// stop the timer
if(cpu_thread_id == 0) {
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime( &Outime, start, stop);
printf("Data output time for GPU: %f (ms) \n",Outime);
}
}
float gputime_tot;
gputime_tot = Intime + gputime + Outime;
printf("Total time for GPU: %f (ms) \n",gputime_tot);
// start the timer
cudaEventRecord(start,0);
h_D = (float*)malloc(size); // compute the reference solution
for (int i = 0; i < N; ++i) {
h_D[i] = 1.0/h_A[i] + 1.0/h_B[i];
}
// stop the timer
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float cputime;
cudaEventElapsedTime( &cputime, start, stop);
printf("Processing time for CPU: %f (ms) \n",cputime);
printf("CPU Gflops: %f\n",3*N/(1000000.0*cputime));
printf("Speed up of GPU = %f\n", cputime/gputime_tot);
// Destroy timer
cudaEventDestroy(start);
cudaEventDestroy(stop);
// check result
printf("Check result:\n");
double sum=0;
double diff;
for (int i = 0; i < N; ++i) {
diff = abs(h_D[i] - h_C[i]);
sum += diff*diff;
}
sum = sqrt(sum);
printf("norm(h_C - h_D)=%20.15e\n",sum);
for (int i=0; i < NGPU; i++) {
cudaSetDevice(i);
cudaDeviceReset();
}
return 0;
}
// Allocates an array with random float entries.
void RandomInit(float* data, int n)
{
for (int i = 0; i < n; ++i)
data[i] = rand() / (float)RAND_MAX;
}
|
6a58dffdd89d5046b7474e41f7a362d12fdb630b.hip | // !!! This is a file automatically generated by hipify!!!
#include "flamegpu/util/detail/JitifyCache.h"
#include <cassert>
#include <regex>
#include <array>
#include "flamegpu/version.h"
#include "flamegpu/exception/FLAMEGPUException.h"
#include "flamegpu/util/detail/compute_capability.cuh"
#include "flamegpu/util/nvtx.h"
// If MSVC earlier than VS 2019
#if defined(_MSC_VER) && _MSC_VER < 1920
#include <filesystem>
using std::tr2::sys::temp_directory_path;
using std::tr2::sys::exists;
using std::tr2::sys::path;
using std::tr2::sys::directory_iterator;
#else
// VS2019 requires this macro, as building pre c++17 cant use std::filesystem
#define _SILENCE_EXPERIMENTAL_FILESYSTEM_DEPRECATION_WARNING
#include <experimental/filesystem>
using std::experimental::filesystem::v1::temp_directory_path;
using std::experimental::filesystem::v1::exists;
using std::experimental::filesystem::v1::path;
using std::experimental::filesystem::v1::directory_iterator;
#endif
using jitify::detail::hash_combine;
using jitify::detail::hash_larson64;
namespace flamegpu {
namespace util {
namespace detail {
namespace {
/**
* Returns the tmp dir for storing cache files
* Defined here to avoid filesystem includes being in header
*/
path getTMP() {
static path result;
if (result.empty()) {
path tmp = std::getenv("FLAMEGPU_TMP_DIR") ? std::getenv("FLAMEGPU_TMP_DIR") : temp_directory_path();
// Create the $tmp/flamegpu/jitifycache(/debug) folder hierarchy
if (!::exists(tmp) && !create_directory(tmp)) {
THROW exception::InvalidFilePath("Directory '%s' does not exist and cannot be created by JitifyCache.", tmp.generic_string().c_str());
}
if (!std::getenv("FLAMEGPU_TMP_DIR")) {
tmp /= "flamegpu";
if (!::exists(tmp)) {
create_directory(tmp);
}
}
tmp /= "jitifycache";
if (!::exists(tmp)) {
create_directory(tmp);
}
#ifdef _DEBUG
tmp /= "debug";
if (!::exists(tmp)) {
create_directory(tmp);
}
#endif
result = tmp;
}
return result;
}
std::string loadFile(const path &filepath) {
std::ifstream ifs;
ifs.open(filepath, std::ifstream::binary);
if (!ifs)
return "";
// get length of file
ifs.seekg(0, ifs.end);
const std::streamoff length = ifs.tellg();
ifs.seekg(0, ifs.beg);
std::string rtn;
rtn.resize(length);
char *buffer = &rtn[0];
ifs.read(buffer, length);
ifs.close();
return rtn;
}
/**
* Find the cuda include directory.
* Throws exceptions if it can not be found.
* @return the path to the CUDA include directory.
*/
std::string getCUDAIncludeDir() {
// Define an array of environment variables to check in order
std::array<const std::string, 2> ENV_VARS { "CUDA_PATH", "CUDA_HOME" };
std::string cuda_include_dir_str = "";
for (const auto& env_var : ENV_VARS) {
std::string env_value = std::getenv(env_var.c_str()) ? std::getenv(env_var.c_str()) : "";
if (!env_value.empty()) {
path check_path = path(env_value) / "include/";
// Use try catch to suppress file permission exceptions etc
try {
if (exists(check_path)) {
cuda_include_dir_str = check_path.string();
break;
}
} catch (...) { }
// Throw if the value is not empty, but it does not exist. Outside the try catch excplicityly.
THROW flamegpu::exception::InvalidFilePath("Error environment variable %s (%s) does not contain a valid CUDA include directory", env_var.c_str(), env_value.c_str());
}
}
// If none of the search enviornmental variables were useful, throw an exception.
if (cuda_include_dir_str.empty()) {
THROW exception::InvalidFilePath("Error could not find CUDA include directory. Please specify using the CUDA_PATH environment variable");
}
return cuda_include_dir_str;
}
/**
* Get the FLAME GPU include directory via the environment variables.
* @param env_var_used modified to return the name of the environment variable which was used, if any.
* @return the FLAME GPU 2+ include directory.
*/
std::string getFLAMEGPUIncludeDir(std::string &env_var_used) {
// Define an array of environment variables to check
std::array<const std::string, 2> ENV_VARS { "FLAMEGPU_INC_DIR", "FLAMEGPU2_INC_DIR" };
std::string include_dir_str = "";
// Iterate the array of environment variables to check for the version header.
for (const auto& env_var : ENV_VARS) {
// If the environment variable exists
std::string env_value = std::getenv(env_var.c_str()) ? std::getenv(env_var.c_str()) : "";
// If it's a value, check if the path exists, and if any expected files are found.
if (!env_value.empty()) {
path check_file = path(env_value) / "flamegpu/flamegpu.h";
// Use try catch to suppress file permission exceptions etc
try {
if (exists(check_file)) {
include_dir_str = env_value;
env_var_used = env_var;
break;
}
} catch (...) { }
// Throw if the value is not empty, but it does not exist. Outside the try catch excplicityly.
THROW flamegpu::exception::InvalidFilePath("Error environment variable %s (%s) does not contain flamegpu/flamegpu.h. Please correct this environment variable.", env_var.c_str(), env_value.c_str());
}
}
// If no appropriate environmental variables were found, check upwards for N levels (assuming the default filestructure is in use)
if (include_dir_str.empty()) {
// Start with the current working directory
path test_dir(".");
// Try multiple levels of directory, to see if we can find include/flamegpu/flamegpu.h
const unsigned int LEVELS = 5;
for (unsigned int level = 0; level < LEVELS; level++) {
// If break out the loop if the test_dir directory does not exist.
if (!exists(test_dir)) {
break;
}
// Check file assuming flamegpu is the root cmake project
path check_file = test_dir;
check_file /= "include/flamegpu/flamegpu.h";
// Use try catch to suppress file permission exceptions etc
try {
if (exists(check_file)) {
test_dir /= "include";
include_dir_str = test_dir.string();
break;
}
} catch (...) { }
// Check file assuming a standalone example is the root cmake project
// We want to see if we can find the build directory
for (auto& p : directory_iterator(test_dir)) {
if (is_directory(p)) {
check_file = p.path();
check_file /= "_deps/flamegpu2-src/include/flamegpu/version.h";
// Use try catch to suppress file permission exceptions etc
try {
if (exists(check_file)) {
test_dir = p.path();
test_dir /= "_deps/flamegpu2-src/include";
include_dir_str = test_dir.string();
goto break_flamegpu_inc_dir_loop; // Break out of nested loop
}
} catch (...) { }
}
}
// Go up a level for next iteration
test_dir /= "..";
}
break_flamegpu_inc_dir_loop:
// If still not found, throw.
if (include_dir_str.empty()) {
// @todo - more appropriate exception?
THROW flamegpu::exception::InvalidAgentFunc("Error compiling runtime agent function: Unable to automatically determine include directory and FLAMEGPU_INC_DIR environment variable not set");
}
}
return include_dir_str;
}
/**
* Confirm that include directory version header matches the version of the static library.
* This only compares up to the pre-release version number. Build metadata is only used for the RTC cache.
* @param flamegpuIncludeDir path to the flamegpu include directory to check.
* @return boolean indicator of success.
*/
bool confirmFLAMEGPUHeaderVersion(const std::string flamegpuIncludeDir, const std::string envVariable) {
static bool header_version_confirmed = false;
if (!header_version_confirmed) {
std::string fileHash;
std::string fileVersionMacro;
std::string fileVersionPrerelease;
// Open version.h
path version_file = path(flamegpuIncludeDir) /= "flamegpu/version.h";
std::ifstream vFile(version_file);
if (vFile.is_open()) {
// Use a regular expression to match the FLAMEGPU_VERSION number macro against lines in the file.
std::regex macroPattern("^#define FLAMEGPU_VERSION ([0-9]+)$");
std::regex prereleasePattern("^static constexpr char VERSION_PRERELEASE\\[\\] = \"(.*)\";$");
std::smatch match;
std::string line;
bool extractedMacro = false;
bool extractedPrerelease = false;
while (std::getline(vFile, line)) {
if (std::regex_search(line, match, macroPattern)) {
fileVersionMacro = match[1];
extractedMacro = true;
} else if (std::regex_search(line, match, prereleasePattern)) {
fileVersionPrerelease = match[1];
extractedPrerelease = true;
}
if (extractedMacro && extractedPrerelease) {
break;
}
}
vFile.close();
if (!extractedMacro || !extractedPrerelease) {
THROW exception::VersionMismatch("Could not extract RTC header version information.\n");
}
}
// Confirm that the version matches, else throw an exception.
if (fileVersionMacro == std::to_string(flamegpu::VERSION) && fileVersionPrerelease == std::string(flamegpu::VERSION_PRERELEASE)) {
header_version_confirmed = true;
} else {
THROW exception::VersionMismatch("RTC header version (%s, %s) does not match version flamegpu library was built with (%s, %s). Set the environment variable %s to the correct include directory.\n",
fileVersionMacro.c_str(), fileVersionPrerelease.c_str(),
std::to_string(flamegpu::VERSION).c_str(), flamegpu::VERSION_PRERELEASE,
envVariable.c_str());
}
}
return header_version_confirmed;
}
} // namespace
std::mutex JitifyCache::instance_mutex;
std::unique_ptr<KernelInstantiation> JitifyCache::compileKernel(const std::string &func_name, const std::vector<std::string> &template_args, const std::string &kernel_src, const std::string &dynamic_header) {
NVTX_RANGE("JitifyCache::compileKernel");
// find and validate the cuda include directory via CUDA_PATH or CUDA_HOME.
static const std::string cuda_include_dir = getCUDAIncludeDir();
// find and validate the the flamegpu include directory
static std::string flamegpu_include_dir_envvar;
static const std::string flamegpu_include_dir = getFLAMEGPUIncludeDir(flamegpu_include_dir_envvar);
// verify that the include directory contains the correct headers.
confirmFLAMEGPUHeaderVersion(flamegpu_include_dir, flamegpu_include_dir_envvar);
// vector of compiler options for jitify
std::vector<std::string> options;
std::vector<std::string> headers;
// fpgu include directory
options.push_back(std::string("-I" + std::string(flamegpu_include_dir)));
// cuda include directory (via CUDA_PATH)
options.push_back(std::string("-I" + cuda_include_dir));
#ifdef USE_GLM
// GLM headers increase build time ~5x, so only enable glm if user is using it
if (kernel_src.find("glm") != std::string::npos) {
options.push_back(std::string("-I") + GLM_PATH);
options.push_back(std::string("-DUSE_GLM"));
}
#endif
// Set the compilation architecture target if it was successfully detected.
int currentDeviceIdx = 0;
hipError_t status = hipGetDevice(¤tDeviceIdx);
if (status == hipSuccess) {
int arch = compute_capability::getComputeCapability(currentDeviceIdx);
options.push_back(std::string("--gpu-architecture=compute_" + std::to_string(arch)));
}
// If CUDA is compiled with -G (--device-debug) forward it to the compiler, otherwise forward lineinfo for profiling.
#if defined(__CUDACC_DEBUG__)
options.push_back("--device-debug");
#else
options.push_back("--generate-line-info");
#endif
// If DEBUG is defined, forward it
#if defined(DEBUG)
options.push_back("-DDEBUG");
#endif
// If NDEBUG is defined, forward it, this should disable asserts in device code.
#if defined(NDEBUG)
options.push_back("-DNDEBUG");
#endif
// pass the c++ language dialect. It may be better to explicitly pass this from CMake.
#if defined(__cplusplus) && __cplusplus > 201700L && defined(__CUDACC_VER_MAJOR__) && __CUDACC_VER_MAJOR__ >= 11
options.push_back("--std=c++17");
#elif defined(__cplusplus) && __cplusplus > 201400L
options.push_back("--std=c++14");
#endif
// If SEATBELTS is defined and false, forward it as off, otherwise forward it as on.
#if !defined(SEATBELTS) || SEATBELTS
options.push_back("--define-macro=SEATBELTS=1");
#else
options.push_back("--define-macro=SEATBELTS=0");
#endif
// cuda.h
std::string include_cuda_h;
include_cuda_h = "--pre-include=" + cuda_include_dir + "/cuda.h";
options.push_back(include_cuda_h);
// get the dynamically generated header from curve rtc
headers.push_back(dynamic_header);
// cassert header (to remove remaining warnings) TODO: Ask Jitify to implement safe version of this
std::string cassert_h = "cassert\n";
headers.push_back(cassert_h);
// jitify to create program (with compilation settings)
try {
auto program = jitify::experimental::Program(kernel_src, headers, options);
assert(template_args.size() == 1 || template_args.size() == 3); // Add this assertion incase template args change
auto kernel = program.kernel(template_args.size() > 1 ? "flamegpu::agent_function_wrapper" : "flamegpu::agent_function_condition_wrapper");
return std::make_unique<KernelInstantiation>(kernel, template_args);
} catch (std::runtime_error const&) {
// jitify does not have a method for getting compile logs so rely on JITIFY_PRINT_LOG defined in cmake
THROW exception::InvalidAgentFunc("Error compiling runtime agent function (or function condition) ('%s'): function had compilation errors (see std::cout), "
"in JitifyCache::buildProgram().",
func_name.c_str());
}
}
std::unique_ptr<KernelInstantiation> JitifyCache::loadKernel(const std::string &func_name, const std::vector<std::string> &template_args, const std::string &kernel_src, const std::string &dynamic_header) {
NVTX_RANGE("JitifyCache::loadKernel");
std::lock_guard<std::mutex> lock(cache_mutex);
// Detect current compute capability=
int currentDeviceIdx = 0;
hipError_t status = hipGetDevice(¤tDeviceIdx);
const std::string arch = std::to_string((status == hipSuccess) ? compute_capability::getComputeCapability(currentDeviceIdx) : 0);
status = hipRuntimeGetVersion(¤tDeviceIdx);
const std::string cuda_version = std::to_string((status == hipSuccess) ? currentDeviceIdx : 0);
const std::string seatbelts = std::to_string(SEATBELTS);
// Cat kernel, dynamic header, header version
const std::string long_reference = kernel_src + dynamic_header; // Don't need to include rest, they are explicit in short reference/filename
// Generate short reference string
// Would prefer to use a proper hash, e.g. md5(reference_string), but that requires extra dependencies
const std::string short_reference =
cuda_version + "_" +
arch + "_" +
seatbelts + "_" +
std::string(flamegpu::VERSION_FULL) + "_" +
// Use jitify hash methods for consistent hashing between OSs
std::to_string(hash_combine(hash_larson64(kernel_src.c_str()), hash_larson64(dynamic_header.c_str())));
// Does a copy with the right reference exist in memory?
if (use_memory_cache) {
const auto it = cache.find(short_reference);
if (it != cache.end()) {
// Check long reference
if (it->second.long_reference == long_reference) {
return std::make_unique<KernelInstantiation>(KernelInstantiation::deserialize(it->second.serialised_kernelinst));
}
}
}
// Does a copy with the right reference exist on disk?
const path cache_file = getTMP() / short_reference;
const path reference_file = cache_file.parent_path() / path(cache_file.filename().string() + ".ref");
if (use_disk_cache && exists(cache_file)) {
// Load the long reference for the cache file
const std::string file_long_reference = loadFile(reference_file);
if (file_long_reference == long_reference) {
// Load the cache file
const std::string serialised_kernelinst = loadFile(cache_file);
if (!serialised_kernelinst.empty()) {
// Add it to cache for later loads
cache.emplace(short_reference, CachedProgram{long_reference, serialised_kernelinst});
// Deserialize and return program
return std::make_unique<KernelInstantiation>(KernelInstantiation::deserialize(serialised_kernelinst));
}
}
}
// Kernel has not yet been cached
{
// Build kernel
auto kernelinst = compileKernel(func_name, template_args, kernel_src, dynamic_header);
// Add it to cache for later loads
const std::string serialised_kernelinst = use_memory_cache || use_disk_cache ? kernelinst->serialize() : "";
if (use_memory_cache) {
cache.emplace(short_reference, CachedProgram{long_reference, serialised_kernelinst});
}
// Save it to disk
if (use_disk_cache) {
std::ofstream ofs(cache_file, std::ofstream::out | std::ofstream::binary | std::ofstream::trunc);
if (ofs) {
ofs << serialised_kernelinst;
ofs.close();
}
ofs = std::ofstream(reference_file, std::ofstream::out | std::ofstream::binary | std::ofstream::trunc);
if (ofs) {
ofs << long_reference;
ofs.close();
}
}
return kernelinst;
}
}
void JitifyCache::useMemoryCache(bool yesno) {
std::lock_guard<std::mutex> lock(cache_mutex);
use_memory_cache = yesno;
}
void JitifyCache::useDiskCache(bool yesno) {
std::lock_guard<std::mutex> lock(cache_mutex);
use_disk_cache = yesno;
}
bool JitifyCache::useMemoryCache() const {
std::lock_guard<std::mutex> lock(cache_mutex);
return use_memory_cache;
}
bool JitifyCache::useDiskCache() const {
std::lock_guard<std::mutex> lock(cache_mutex);
return use_disk_cache;
}
void JitifyCache::clearMemoryCache() {
std::lock_guard<std::mutex> lock(cache_mutex);
cache.clear();
}
void JitifyCache::clearDiskCache() {
std::lock_guard<std::mutex> lock(cache_mutex);
const path tmp_dir = getTMP();
for (const auto & entry : directory_iterator(tmp_dir)) {
if (is_regular_file(entry.path())) {
remove(entry.path());
}
}
}
JitifyCache::JitifyCache()
: use_memory_cache(true)
#ifndef DISABLE_RTC_DISK_CACHE
, use_disk_cache(true) { }
#else
, use_disk_cache(false) { }
#endif
JitifyCache& JitifyCache::getInstance() {
auto lock = std::unique_lock<std::mutex>(instance_mutex); // Mutex to protect from two threads triggering the static instantiation concurrently
static JitifyCache instance; // Instantiated on first use.
return instance;
}
} // namespace detail
} // namespace util
} // namespace flamegpu
| 6a58dffdd89d5046b7474e41f7a362d12fdb630b.cu | #include "flamegpu/util/detail/JitifyCache.h"
#include <cassert>
#include <regex>
#include <array>
#include "flamegpu/version.h"
#include "flamegpu/exception/FLAMEGPUException.h"
#include "flamegpu/util/detail/compute_capability.cuh"
#include "flamegpu/util/nvtx.h"
// If MSVC earlier than VS 2019
#if defined(_MSC_VER) && _MSC_VER < 1920
#include <filesystem>
using std::tr2::sys::temp_directory_path;
using std::tr2::sys::exists;
using std::tr2::sys::path;
using std::tr2::sys::directory_iterator;
#else
// VS2019 requires this macro, as building pre c++17 cant use std::filesystem
#define _SILENCE_EXPERIMENTAL_FILESYSTEM_DEPRECATION_WARNING
#include <experimental/filesystem>
using std::experimental::filesystem::v1::temp_directory_path;
using std::experimental::filesystem::v1::exists;
using std::experimental::filesystem::v1::path;
using std::experimental::filesystem::v1::directory_iterator;
#endif
using jitify::detail::hash_combine;
using jitify::detail::hash_larson64;
namespace flamegpu {
namespace util {
namespace detail {
namespace {
/**
* Returns the tmp dir for storing cache files
* Defined here to avoid filesystem includes being in header
*/
path getTMP() {
static path result;
if (result.empty()) {
path tmp = std::getenv("FLAMEGPU_TMP_DIR") ? std::getenv("FLAMEGPU_TMP_DIR") : temp_directory_path();
// Create the $tmp/flamegpu/jitifycache(/debug) folder hierarchy
if (!::exists(tmp) && !create_directory(tmp)) {
THROW exception::InvalidFilePath("Directory '%s' does not exist and cannot be created by JitifyCache.", tmp.generic_string().c_str());
}
if (!std::getenv("FLAMEGPU_TMP_DIR")) {
tmp /= "flamegpu";
if (!::exists(tmp)) {
create_directory(tmp);
}
}
tmp /= "jitifycache";
if (!::exists(tmp)) {
create_directory(tmp);
}
#ifdef _DEBUG
tmp /= "debug";
if (!::exists(tmp)) {
create_directory(tmp);
}
#endif
result = tmp;
}
return result;
}
std::string loadFile(const path &filepath) {
std::ifstream ifs;
ifs.open(filepath, std::ifstream::binary);
if (!ifs)
return "";
// get length of file
ifs.seekg(0, ifs.end);
const std::streamoff length = ifs.tellg();
ifs.seekg(0, ifs.beg);
std::string rtn;
rtn.resize(length);
char *buffer = &rtn[0];
ifs.read(buffer, length);
ifs.close();
return rtn;
}
/**
* Find the cuda include directory.
* Throws exceptions if it can not be found.
* @return the path to the CUDA include directory.
*/
std::string getCUDAIncludeDir() {
// Define an array of environment variables to check in order
std::array<const std::string, 2> ENV_VARS { "CUDA_PATH", "CUDA_HOME" };
std::string cuda_include_dir_str = "";
for (const auto& env_var : ENV_VARS) {
std::string env_value = std::getenv(env_var.c_str()) ? std::getenv(env_var.c_str()) : "";
if (!env_value.empty()) {
path check_path = path(env_value) / "include/";
// Use try catch to suppress file permission exceptions etc
try {
if (exists(check_path)) {
cuda_include_dir_str = check_path.string();
break;
}
} catch (...) { }
// Throw if the value is not empty, but it does not exist. Outside the try catch excplicityly.
THROW flamegpu::exception::InvalidFilePath("Error environment variable %s (%s) does not contain a valid CUDA include directory", env_var.c_str(), env_value.c_str());
}
}
// If none of the search enviornmental variables were useful, throw an exception.
if (cuda_include_dir_str.empty()) {
THROW exception::InvalidFilePath("Error could not find CUDA include directory. Please specify using the CUDA_PATH environment variable");
}
return cuda_include_dir_str;
}
/**
* Get the FLAME GPU include directory via the environment variables.
* @param env_var_used modified to return the name of the environment variable which was used, if any.
* @return the FLAME GPU 2+ include directory.
*/
std::string getFLAMEGPUIncludeDir(std::string &env_var_used) {
// Define an array of environment variables to check
std::array<const std::string, 2> ENV_VARS { "FLAMEGPU_INC_DIR", "FLAMEGPU2_INC_DIR" };
std::string include_dir_str = "";
// Iterate the array of environment variables to check for the version header.
for (const auto& env_var : ENV_VARS) {
// If the environment variable exists
std::string env_value = std::getenv(env_var.c_str()) ? std::getenv(env_var.c_str()) : "";
// If it's a value, check if the path exists, and if any expected files are found.
if (!env_value.empty()) {
path check_file = path(env_value) / "flamegpu/flamegpu.h";
// Use try catch to suppress file permission exceptions etc
try {
if (exists(check_file)) {
include_dir_str = env_value;
env_var_used = env_var;
break;
}
} catch (...) { }
// Throw if the value is not empty, but it does not exist. Outside the try catch excplicityly.
THROW flamegpu::exception::InvalidFilePath("Error environment variable %s (%s) does not contain flamegpu/flamegpu.h. Please correct this environment variable.", env_var.c_str(), env_value.c_str());
}
}
// If no appropriate environmental variables were found, check upwards for N levels (assuming the default filestructure is in use)
if (include_dir_str.empty()) {
// Start with the current working directory
path test_dir(".");
// Try multiple levels of directory, to see if we can find include/flamegpu/flamegpu.h
const unsigned int LEVELS = 5;
for (unsigned int level = 0; level < LEVELS; level++) {
// If break out the loop if the test_dir directory does not exist.
if (!exists(test_dir)) {
break;
}
// Check file assuming flamegpu is the root cmake project
path check_file = test_dir;
check_file /= "include/flamegpu/flamegpu.h";
// Use try catch to suppress file permission exceptions etc
try {
if (exists(check_file)) {
test_dir /= "include";
include_dir_str = test_dir.string();
break;
}
} catch (...) { }
// Check file assuming a standalone example is the root cmake project
// We want to see if we can find the build directory
for (auto& p : directory_iterator(test_dir)) {
if (is_directory(p)) {
check_file = p.path();
check_file /= "_deps/flamegpu2-src/include/flamegpu/version.h";
// Use try catch to suppress file permission exceptions etc
try {
if (exists(check_file)) {
test_dir = p.path();
test_dir /= "_deps/flamegpu2-src/include";
include_dir_str = test_dir.string();
goto break_flamegpu_inc_dir_loop; // Break out of nested loop
}
} catch (...) { }
}
}
// Go up a level for next iteration
test_dir /= "..";
}
break_flamegpu_inc_dir_loop:
// If still not found, throw.
if (include_dir_str.empty()) {
// @todo - more appropriate exception?
THROW flamegpu::exception::InvalidAgentFunc("Error compiling runtime agent function: Unable to automatically determine include directory and FLAMEGPU_INC_DIR environment variable not set");
}
}
return include_dir_str;
}
/**
* Confirm that include directory version header matches the version of the static library.
* This only compares up to the pre-release version number. Build metadata is only used for the RTC cache.
* @param flamegpuIncludeDir path to the flamegpu include directory to check.
* @return boolean indicator of success.
*/
bool confirmFLAMEGPUHeaderVersion(const std::string flamegpuIncludeDir, const std::string envVariable) {
static bool header_version_confirmed = false;
if (!header_version_confirmed) {
std::string fileHash;
std::string fileVersionMacro;
std::string fileVersionPrerelease;
// Open version.h
path version_file = path(flamegpuIncludeDir) /= "flamegpu/version.h";
std::ifstream vFile(version_file);
if (vFile.is_open()) {
// Use a regular expression to match the FLAMEGPU_VERSION number macro against lines in the file.
std::regex macroPattern("^#define FLAMEGPU_VERSION ([0-9]+)$");
std::regex prereleasePattern("^static constexpr char VERSION_PRERELEASE\\[\\] = \"(.*)\";$");
std::smatch match;
std::string line;
bool extractedMacro = false;
bool extractedPrerelease = false;
while (std::getline(vFile, line)) {
if (std::regex_search(line, match, macroPattern)) {
fileVersionMacro = match[1];
extractedMacro = true;
} else if (std::regex_search(line, match, prereleasePattern)) {
fileVersionPrerelease = match[1];
extractedPrerelease = true;
}
if (extractedMacro && extractedPrerelease) {
break;
}
}
vFile.close();
if (!extractedMacro || !extractedPrerelease) {
THROW exception::VersionMismatch("Could not extract RTC header version information.\n");
}
}
// Confirm that the version matches, else throw an exception.
if (fileVersionMacro == std::to_string(flamegpu::VERSION) && fileVersionPrerelease == std::string(flamegpu::VERSION_PRERELEASE)) {
header_version_confirmed = true;
} else {
THROW exception::VersionMismatch("RTC header version (%s, %s) does not match version flamegpu library was built with (%s, %s). Set the environment variable %s to the correct include directory.\n",
fileVersionMacro.c_str(), fileVersionPrerelease.c_str(),
std::to_string(flamegpu::VERSION).c_str(), flamegpu::VERSION_PRERELEASE,
envVariable.c_str());
}
}
return header_version_confirmed;
}
} // namespace
std::mutex JitifyCache::instance_mutex;
std::unique_ptr<KernelInstantiation> JitifyCache::compileKernel(const std::string &func_name, const std::vector<std::string> &template_args, const std::string &kernel_src, const std::string &dynamic_header) {
NVTX_RANGE("JitifyCache::compileKernel");
// find and validate the cuda include directory via CUDA_PATH or CUDA_HOME.
static const std::string cuda_include_dir = getCUDAIncludeDir();
// find and validate the the flamegpu include directory
static std::string flamegpu_include_dir_envvar;
static const std::string flamegpu_include_dir = getFLAMEGPUIncludeDir(flamegpu_include_dir_envvar);
// verify that the include directory contains the correct headers.
confirmFLAMEGPUHeaderVersion(flamegpu_include_dir, flamegpu_include_dir_envvar);
// vector of compiler options for jitify
std::vector<std::string> options;
std::vector<std::string> headers;
// fpgu include directory
options.push_back(std::string("-I" + std::string(flamegpu_include_dir)));
// cuda include directory (via CUDA_PATH)
options.push_back(std::string("-I" + cuda_include_dir));
#ifdef USE_GLM
// GLM headers increase build time ~5x, so only enable glm if user is using it
if (kernel_src.find("glm") != std::string::npos) {
options.push_back(std::string("-I") + GLM_PATH);
options.push_back(std::string("-DUSE_GLM"));
}
#endif
// Set the compilation architecture target if it was successfully detected.
int currentDeviceIdx = 0;
cudaError_t status = cudaGetDevice(¤tDeviceIdx);
if (status == cudaSuccess) {
int arch = compute_capability::getComputeCapability(currentDeviceIdx);
options.push_back(std::string("--gpu-architecture=compute_" + std::to_string(arch)));
}
// If CUDA is compiled with -G (--device-debug) forward it to the compiler, otherwise forward lineinfo for profiling.
#if defined(__CUDACC_DEBUG__)
options.push_back("--device-debug");
#else
options.push_back("--generate-line-info");
#endif
// If DEBUG is defined, forward it
#if defined(DEBUG)
options.push_back("-DDEBUG");
#endif
// If NDEBUG is defined, forward it, this should disable asserts in device code.
#if defined(NDEBUG)
options.push_back("-DNDEBUG");
#endif
// pass the c++ language dialect. It may be better to explicitly pass this from CMake.
#if defined(__cplusplus) && __cplusplus > 201700L && defined(__CUDACC_VER_MAJOR__) && __CUDACC_VER_MAJOR__ >= 11
options.push_back("--std=c++17");
#elif defined(__cplusplus) && __cplusplus > 201400L
options.push_back("--std=c++14");
#endif
// If SEATBELTS is defined and false, forward it as off, otherwise forward it as on.
#if !defined(SEATBELTS) || SEATBELTS
options.push_back("--define-macro=SEATBELTS=1");
#else
options.push_back("--define-macro=SEATBELTS=0");
#endif
// cuda.h
std::string include_cuda_h;
include_cuda_h = "--pre-include=" + cuda_include_dir + "/cuda.h";
options.push_back(include_cuda_h);
// get the dynamically generated header from curve rtc
headers.push_back(dynamic_header);
// cassert header (to remove remaining warnings) TODO: Ask Jitify to implement safe version of this
std::string cassert_h = "cassert\n";
headers.push_back(cassert_h);
// jitify to create program (with compilation settings)
try {
auto program = jitify::experimental::Program(kernel_src, headers, options);
assert(template_args.size() == 1 || template_args.size() == 3); // Add this assertion incase template args change
auto kernel = program.kernel(template_args.size() > 1 ? "flamegpu::agent_function_wrapper" : "flamegpu::agent_function_condition_wrapper");
return std::make_unique<KernelInstantiation>(kernel, template_args);
} catch (std::runtime_error const&) {
// jitify does not have a method for getting compile logs so rely on JITIFY_PRINT_LOG defined in cmake
THROW exception::InvalidAgentFunc("Error compiling runtime agent function (or function condition) ('%s'): function had compilation errors (see std::cout), "
"in JitifyCache::buildProgram().",
func_name.c_str());
}
}
std::unique_ptr<KernelInstantiation> JitifyCache::loadKernel(const std::string &func_name, const std::vector<std::string> &template_args, const std::string &kernel_src, const std::string &dynamic_header) {
NVTX_RANGE("JitifyCache::loadKernel");
std::lock_guard<std::mutex> lock(cache_mutex);
// Detect current compute capability=
int currentDeviceIdx = 0;
cudaError_t status = cudaGetDevice(¤tDeviceIdx);
const std::string arch = std::to_string((status == cudaSuccess) ? compute_capability::getComputeCapability(currentDeviceIdx) : 0);
status = cudaRuntimeGetVersion(¤tDeviceIdx);
const std::string cuda_version = std::to_string((status == cudaSuccess) ? currentDeviceIdx : 0);
const std::string seatbelts = std::to_string(SEATBELTS);
// Cat kernel, dynamic header, header version
const std::string long_reference = kernel_src + dynamic_header; // Don't need to include rest, they are explicit in short reference/filename
// Generate short reference string
// Would prefer to use a proper hash, e.g. md5(reference_string), but that requires extra dependencies
const std::string short_reference =
cuda_version + "_" +
arch + "_" +
seatbelts + "_" +
std::string(flamegpu::VERSION_FULL) + "_" +
// Use jitify hash methods for consistent hashing between OSs
std::to_string(hash_combine(hash_larson64(kernel_src.c_str()), hash_larson64(dynamic_header.c_str())));
// Does a copy with the right reference exist in memory?
if (use_memory_cache) {
const auto it = cache.find(short_reference);
if (it != cache.end()) {
// Check long reference
if (it->second.long_reference == long_reference) {
return std::make_unique<KernelInstantiation>(KernelInstantiation::deserialize(it->second.serialised_kernelinst));
}
}
}
// Does a copy with the right reference exist on disk?
const path cache_file = getTMP() / short_reference;
const path reference_file = cache_file.parent_path() / path(cache_file.filename().string() + ".ref");
if (use_disk_cache && exists(cache_file)) {
// Load the long reference for the cache file
const std::string file_long_reference = loadFile(reference_file);
if (file_long_reference == long_reference) {
// Load the cache file
const std::string serialised_kernelinst = loadFile(cache_file);
if (!serialised_kernelinst.empty()) {
// Add it to cache for later loads
cache.emplace(short_reference, CachedProgram{long_reference, serialised_kernelinst});
// Deserialize and return program
return std::make_unique<KernelInstantiation>(KernelInstantiation::deserialize(serialised_kernelinst));
}
}
}
// Kernel has not yet been cached
{
// Build kernel
auto kernelinst = compileKernel(func_name, template_args, kernel_src, dynamic_header);
// Add it to cache for later loads
const std::string serialised_kernelinst = use_memory_cache || use_disk_cache ? kernelinst->serialize() : "";
if (use_memory_cache) {
cache.emplace(short_reference, CachedProgram{long_reference, serialised_kernelinst});
}
// Save it to disk
if (use_disk_cache) {
std::ofstream ofs(cache_file, std::ofstream::out | std::ofstream::binary | std::ofstream::trunc);
if (ofs) {
ofs << serialised_kernelinst;
ofs.close();
}
ofs = std::ofstream(reference_file, std::ofstream::out | std::ofstream::binary | std::ofstream::trunc);
if (ofs) {
ofs << long_reference;
ofs.close();
}
}
return kernelinst;
}
}
void JitifyCache::useMemoryCache(bool yesno) {
std::lock_guard<std::mutex> lock(cache_mutex);
use_memory_cache = yesno;
}
void JitifyCache::useDiskCache(bool yesno) {
std::lock_guard<std::mutex> lock(cache_mutex);
use_disk_cache = yesno;
}
bool JitifyCache::useMemoryCache() const {
std::lock_guard<std::mutex> lock(cache_mutex);
return use_memory_cache;
}
bool JitifyCache::useDiskCache() const {
std::lock_guard<std::mutex> lock(cache_mutex);
return use_disk_cache;
}
void JitifyCache::clearMemoryCache() {
std::lock_guard<std::mutex> lock(cache_mutex);
cache.clear();
}
void JitifyCache::clearDiskCache() {
std::lock_guard<std::mutex> lock(cache_mutex);
const path tmp_dir = getTMP();
for (const auto & entry : directory_iterator(tmp_dir)) {
if (is_regular_file(entry.path())) {
remove(entry.path());
}
}
}
JitifyCache::JitifyCache()
: use_memory_cache(true)
#ifndef DISABLE_RTC_DISK_CACHE
, use_disk_cache(true) { }
#else
, use_disk_cache(false) { }
#endif
JitifyCache& JitifyCache::getInstance() {
auto lock = std::unique_lock<std::mutex>(instance_mutex); // Mutex to protect from two threads triggering the static instantiation concurrently
static JitifyCache instance; // Instantiated on first use.
return instance;
}
} // namespace detail
} // namespace util
} // namespace flamegpu
|
594886f874ed1ced5d084e263a2b94f3b53f2d2a.hip | // !!! This is a file automatically generated by hipify!!!
#include "EnemyUpdater.h"
#include <hip/hip_runtime.h>
#include "../Enemy.h"
#include "EnemyUpdaterKernel.cuh"
void EnemyUpdater::Run()
{
int length = EnemyPtr::length;
// CPUGPU
hipMemcpyAsync(
EnemyPtr::device,
EnemyPtr::host,
sizeof(Enemy) * length,
hipMemcpyHostToDevice,
EnemyPtr::stream);
dim3 block(256, 1, 1);
dim3 grid((length + 256 - 1) / 256, 1, 1);
//
EnemyUpdaterKernel::Process << <grid, block, 0, EnemyPtr::stream >> > (EnemyPtr::device, length);
// GPUCPU
hipMemcpyAsync(
EnemyPtr::host,
EnemyPtr::device,
sizeof(Enemy) * length,
hipMemcpyDeviceToHost,
EnemyPtr::stream);
} | 594886f874ed1ced5d084e263a2b94f3b53f2d2a.cu | #include "EnemyUpdater.h"
#include <cuda_runtime.h>
#include "../Enemy.h"
#include "EnemyUpdaterKernel.cuh"
void EnemyUpdater::Run()
{
int length = EnemyPtr::length;
// CPUからGPUにデータを転送
cudaMemcpyAsync(
EnemyPtr::device,
EnemyPtr::host,
sizeof(Enemy) * length,
cudaMemcpyHostToDevice,
EnemyPtr::stream);
dim3 block(256, 1, 1);
dim3 grid((length + 256 - 1) / 256, 1, 1);
// 敵の更新カーネルを実行
EnemyUpdaterKernel::Process << <grid, block, 0, EnemyPtr::stream >> > (EnemyPtr::device, length);
// GPUからCPUにデータを転送
cudaMemcpyAsync(
EnemyPtr::host,
EnemyPtr::device,
sizeof(Enemy) * length,
cudaMemcpyDeviceToHost,
EnemyPtr::stream);
} |
7dc6c24b5e5d776d4060e6cff64560d4a07efd05.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
__global__ void showCpy(float* d_a, int const nx, int const ny){
unsigned int i = threadIdx.x;
if(i<nx*ny)
printf("%d, %5.2f \n", i, d_a[i]);
}
int main(){
int const nx = 1<<4;
int const ny = 1<<4;
float h_a[nx][ny];
for(int i=0; i<nx; i++){
for(int j=0; j<ny; j++){
h_a[i][j] = (float)i + ((float)j)/100;
}
}
float* d_a;
size_t mSize = nx*ny*sizeof(float);
hipMalloc((void**)&d_a, mSize);
hipMemcpy(d_a, h_a, mSize, hipMemcpyHostToDevice);
dim3 grid(1, 1);
dim3 block(nx*ny, 1);
hipLaunchKernelGGL(( showCpy), dim3(grid), dim3(block), 0, 0, d_a, nx, ny);
/* for(int i=0; i<nx; i++){
for(int j=0; j<ny; j++){
printf("%5.2f ", h_a[i][j]);
}
printf("\n");
}
*/
hipDeviceSynchronize();
return 0;
}
| 7dc6c24b5e5d776d4060e6cff64560d4a07efd05.cu | #include<stdio.h>
__global__ void showCpy(float* d_a, int const nx, int const ny){
unsigned int i = threadIdx.x;
if(i<nx*ny)
printf("%d, %5.2f \n", i, d_a[i]);
}
int main(){
int const nx = 1<<4;
int const ny = 1<<4;
float h_a[nx][ny];
for(int i=0; i<nx; i++){
for(int j=0; j<ny; j++){
h_a[i][j] = (float)i + ((float)j)/100;
}
}
float* d_a;
size_t mSize = nx*ny*sizeof(float);
cudaMalloc((void**)&d_a, mSize);
cudaMemcpy(d_a, h_a, mSize, cudaMemcpyHostToDevice);
dim3 grid(1, 1);
dim3 block(nx*ny, 1);
showCpy<<<grid, block>>>(d_a, nx, ny);
/* for(int i=0; i<nx; i++){
for(int j=0; j<ny; j++){
printf("%5.2f ", h_a[i][j]);
}
printf("\n");
}
*/
cudaDeviceSynchronize();
return 0;
}
|
161e7e295f71ef887f88f8fbce601e649e4aaef4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2016-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
template <typename T, Int NTX, Int NTY>
__global__ void BatchwiseMultiplicativeDropout_fp(T *input_features,
T *output_features, T *noise,
Int nActive, Int nPlanes,
Int input_stride,
Int output_stride, T alpha) {
__shared__ T nz[NTX];
for (Int plane = threadIdx.x + blockIdx.x * NTX; plane < nPlanes;
plane += gridDim.x * NTX) {
if (threadIdx.y == 0)
nz[threadIdx.x] = noise[plane];
__syncthreads();
for (Int row = threadIdx.y + blockIdx.y * NTY; row < nActive;
row += gridDim.y * NTY) {
Int i = row * input_stride + plane;
Int o = row * output_stride + plane;
output_features[o] = input_features[i] * nz[threadIdx.x] *
((input_features[i] > 0) ? 1 : alpha);
}
__syncthreads();
}
}
template <typename T, Int NTX, Int NTY>
__global__ void BatchwiseMultiplicativeDropout_bp(
T *input_features, T *d_input_features, T *d_output_features, T *noise,
Int nActive, Int nPlanes, Int input_stride, Int output_stride, T alpha) {
__shared__ T nz[NTX];
for (Int plane = threadIdx.x + blockIdx.x * NTX; plane < nPlanes;
plane += gridDim.x * NTX) {
if (threadIdx.y == 0)
nz[threadIdx.x] = noise[plane];
__syncthreads();
for (Int row = threadIdx.y + blockIdx.y * NTY; row < nActive;
row += gridDim.y * NTY) {
Int i = row * input_stride + plane;
Int o = row * output_stride + plane;
d_input_features[i] = d_output_features[o] * nz[threadIdx.x] *
((input_features[i] > 0) ? 1 : alpha);
}
__syncthreads();
}
}
#define SPARSECONVNET_FOO(NTX, NTY) \
{ \
if (nPlanes % NTX == 0) { \
hipLaunchKernelGGL(( BatchwiseMultiplicativeDropout_fp<T, NTX, NTY>), \
dim3(::min((Int)16, nPlanes / NTX), 16), dim3(dim3(NTX, NTY)), 0, 0, \
input_features, output_features, noise, nActive, nPlanes, nPlanes, \
nPlanes, alpha); \
return; \
} \
}
template <typename T>
void bmd_f(T *input_features, T *output_features, T *noise, Int nActive,
Int nPlanes, T alpha) {
SPARSECONVNET_FOO(32, 32)
SPARSECONVNET_FOO(24, 32)
SPARSECONVNET_FOO(16, 64)
SPARSECONVNET_FOO(12, 64)
SPARSECONVNET_FOO(8, 64)
SPARSECONVNET_FOO(4, 64)
SPARSECONVNET_FOO(1, 64)
}
#undef SPARSECONVNET_FOO
#define SPARSECONVNET_FOO(NTX, NTY) \
{ \
if (nPlanes % NTX == 0) { \
hipLaunchKernelGGL(( BatchwiseMultiplicativeDropout_bp<T, NTX, NTY>), \
dim3(::min((Int)16, nPlanes / NTX), 16), dim3(dim3(NTX, NTY)), 0, 0, \
input_features, d_input_features, d_output_features, noise, nActive, \
nPlanes, nPlanes, nPlanes, alpha); \
return; \
} \
}
template <typename T>
void bmd_b(T *input_features, T *d_input_features, T *d_output_features,
T *noise, Int nActive, Int nPlanes, T alpha) {
SPARSECONVNET_FOO(32, 32)
SPARSECONVNET_FOO(24, 32)
SPARSECONVNET_FOO(16, 64)
SPARSECONVNET_FOO(12, 64)
SPARSECONVNET_FOO(8, 64)
SPARSECONVNET_FOO(4, 64)
SPARSECONVNET_FOO(1, 64)
}
#undef SPARSECONVNET_FOO
| 161e7e295f71ef887f88f8fbce601e649e4aaef4.cu | // Copyright 2016-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
template <typename T, Int NTX, Int NTY>
__global__ void BatchwiseMultiplicativeDropout_fp(T *input_features,
T *output_features, T *noise,
Int nActive, Int nPlanes,
Int input_stride,
Int output_stride, T alpha) {
__shared__ T nz[NTX];
for (Int plane = threadIdx.x + blockIdx.x * NTX; plane < nPlanes;
plane += gridDim.x * NTX) {
if (threadIdx.y == 0)
nz[threadIdx.x] = noise[plane];
__syncthreads();
for (Int row = threadIdx.y + blockIdx.y * NTY; row < nActive;
row += gridDim.y * NTY) {
Int i = row * input_stride + plane;
Int o = row * output_stride + plane;
output_features[o] = input_features[i] * nz[threadIdx.x] *
((input_features[i] > 0) ? 1 : alpha);
}
__syncthreads();
}
}
template <typename T, Int NTX, Int NTY>
__global__ void BatchwiseMultiplicativeDropout_bp(
T *input_features, T *d_input_features, T *d_output_features, T *noise,
Int nActive, Int nPlanes, Int input_stride, Int output_stride, T alpha) {
__shared__ T nz[NTX];
for (Int plane = threadIdx.x + blockIdx.x * NTX; plane < nPlanes;
plane += gridDim.x * NTX) {
if (threadIdx.y == 0)
nz[threadIdx.x] = noise[plane];
__syncthreads();
for (Int row = threadIdx.y + blockIdx.y * NTY; row < nActive;
row += gridDim.y * NTY) {
Int i = row * input_stride + plane;
Int o = row * output_stride + plane;
d_input_features[i] = d_output_features[o] * nz[threadIdx.x] *
((input_features[i] > 0) ? 1 : alpha);
}
__syncthreads();
}
}
#define SPARSECONVNET_FOO(NTX, NTY) \
{ \
if (nPlanes % NTX == 0) { \
BatchwiseMultiplicativeDropout_fp<T, NTX, NTY><<< \
dim3(std::min((Int)16, nPlanes / NTX), 16), dim3(NTX, NTY)>>>( \
input_features, output_features, noise, nActive, nPlanes, nPlanes, \
nPlanes, alpha); \
return; \
} \
}
template <typename T>
void bmd_f(T *input_features, T *output_features, T *noise, Int nActive,
Int nPlanes, T alpha) {
SPARSECONVNET_FOO(32, 32)
SPARSECONVNET_FOO(24, 32)
SPARSECONVNET_FOO(16, 64)
SPARSECONVNET_FOO(12, 64)
SPARSECONVNET_FOO(8, 64)
SPARSECONVNET_FOO(4, 64)
SPARSECONVNET_FOO(1, 64)
}
#undef SPARSECONVNET_FOO
#define SPARSECONVNET_FOO(NTX, NTY) \
{ \
if (nPlanes % NTX == 0) { \
BatchwiseMultiplicativeDropout_bp<T, NTX, NTY><<< \
dim3(std::min((Int)16, nPlanes / NTX), 16), dim3(NTX, NTY)>>>( \
input_features, d_input_features, d_output_features, noise, nActive, \
nPlanes, nPlanes, nPlanes, alpha); \
return; \
} \
}
template <typename T>
void bmd_b(T *input_features, T *d_input_features, T *d_output_features,
T *noise, Int nActive, Int nPlanes, T alpha) {
SPARSECONVNET_FOO(32, 32)
SPARSECONVNET_FOO(24, 32)
SPARSECONVNET_FOO(16, 64)
SPARSECONVNET_FOO(12, 64)
SPARSECONVNET_FOO(8, 64)
SPARSECONVNET_FOO(4, 64)
SPARSECONVNET_FOO(1, 64)
}
#undef SPARSECONVNET_FOO
|
a4bafdf63ec3d2cd9fb4572eece3935d5477e44d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Based on CUDA SDK template from NVIDIA
// sgm algorithm adapted from http://lunokhod.org/?p=1403
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <unistd.h>
#include <assert.h>
#include <float.h>
#include <stdlib.h>
#include <limits.h>
#include <limits>
#include <algorithm>
// includes, project
#include <cutil_inline.h>
#define MMAX_BRIGHTNESS 255
#define PENALTY1 15
#define PENALTY2 100
#define COSTS(i,j,d) costs[(i)*disp_range+(j)*nx*disp_range+(d)]
#define ACCUMULATED_COSTS(i,j,d) accumulated_costs[(i)*disp_range+(j)*nx*disp_range+(d)]
#define LEFT_IMAGE(i,j) left_image[(i)+(j)*nx]
#define RIGHT_IMAGE(i,j) right_image[(i)+(j)*nx]
#define DISP_IMAGE(i,j) disp_image[(i)+(j)*nx]
#define MMAX(a,b) (((a)>(b))?(a):(b))
#define MMIN(a,b) (((a)<(b))?(a):(b))
/* function headers */
void determine_costs(const int *left_image, const int *right_image, int *costs,
const int nx, const int ny, const int disp_range);
void evaluate_path( const int *prior, const int* local,
int path_intensity_gradient, int *curr_cost,
const int nx, const int ny, const int disp_range );
void iterate_direction_dirxpos(const int dirx, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range );
void iterate_direction_dirypos(const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range );
void iterate_direction_dirxneg(const int dirx, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range );
void iterate_direction_diryneg(const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range );
void iterate_direction( const int dirx, const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range ) ;
void d_iterate_direction ( dim3 block, dim3 grid, const int* d_costs,
const int *d_left_image, int *d_accumulated_costs,
const int dirx, const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range );
void inplace_sum_views( int * im1, const int * im2,
const int nx, const int ny, const int disp_range ) ;
int find_min_index( const int *v, const int dist_range ) ;
void create_disparity_view( const int *accumulated_costs , int * disp_image, int nx, int ny) ;
void sgmHost( const int *h_leftIm, const int *h_rightIm,
int *h_dispIm,
const int w, const int h, const int disp_range );
void sgmDevice( const int *h_leftIm, const int *h_rightIm,
int *h_dispImD,
const int w, const int h, const int disp_range );
void usage(char *command);
__device__ int d_find_min_index ( const int *v, const int disp_range );
__device__ void d_evaluate_path ( int *prior, int *local,
int path_intensity_gradient, int *curr_cost,
int nx, int ny, int disp_range );
/* device functions and kernels */
__global__ void d_determine_costs ( int *left_image, int *right_image, int *costs,
int nx, int ny, int disp_range )
{
int row_size = ceil((float) nx / blockDim.x);
int x = ((blockIdx.x % row_size) * blockDim.x) + threadIdx.x;
int y = blockIdx.y;
int d = ((blockIdx.x / row_size) * blockDim.y) + threadIdx.y;
if ( (y < ny) && (d < disp_range) && (x < nx))
{
COSTS(x,y,d) = 255u;
if (x >= d)
COSTS(x,y,d) = abs( LEFT_IMAGE(x,y) - RIGHT_IMAGE(x-d,y) );
}
}
__global__ void d_iterate_direction_dirxpos ( const int dirx, const int *left_image,
int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
int x = 0;
int y = blockIdx.y;
int d = threadIdx.z;
if ( (y < ny) && (d < disp_range) )
{
ACCUMULATED_COSTS(0,y,d) += COSTS(0,y,d);
__syncthreads();
for (x = 1; x < nx; x++)
{
d_evaluate_path( &ACCUMULATED_COSTS(x-dirx,y,0),
&COSTS(x,y,0),
abs(LEFT_IMAGE(x,y)-LEFT_IMAGE(x-dirx,y)) ,
&ACCUMULATED_COSTS(x,y,0), nx, ny, disp_range);
}
}
}
__global__ void d_iterate_direction_dirypos ( const int diry, const int *left_image,
int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
int x = blockIdx.y;
int y = 0;
int d = threadIdx.z;
if ( (x < nx) && (d < disp_range) )
{
ACCUMULATED_COSTS(x,0,d) += COSTS(x,0,d);;
__syncthreads();
for (y = 1; y < ny; y++)
{
d_evaluate_path( &ACCUMULATED_COSTS(x,y-diry,0),
&COSTS(x,y,0),
abs(LEFT_IMAGE(x,y)-LEFT_IMAGE(x,y-diry)) ,
&ACCUMULATED_COSTS(x,y,0), nx, ny, disp_range);
}
}
}
__global__ void d_iterate_direction_dirxneg ( const int dirx, const int *left_image,
int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
int x = nx-1;
int y = blockIdx.y;
int d = threadIdx.z;
if ( (y < ny) && (d < disp_range) )
{
ACCUMULATED_COSTS(nx-1,y,d) += COSTS(nx-1,y,d);
__syncthreads();
for (x = nx-2; x >= 0; x--)
{
d_evaluate_path( &ACCUMULATED_COSTS(x-dirx,y,0),
&COSTS(x,y,0),
abs(LEFT_IMAGE(x,y)-LEFT_IMAGE(x-dirx,y)) ,
&ACCUMULATED_COSTS(x,y,0), nx, ny, disp_range);
}
}
}
__global__ void d_iterate_direction_diryneg ( const int diry, const int *left_image,
int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
int x = blockIdx.y;
int y = ny-1;
int d = threadIdx.z;
if ( (x < nx) && (d < disp_range) )
{
ACCUMULATED_COSTS(x,ny-1,d) += COSTS(x,ny-1,d);;
__syncthreads();
for (y = ny-2; y >= 0; y--)
{
d_evaluate_path( &ACCUMULATED_COSTS(x,y-diry,0),
&COSTS(x,y,0),
abs(LEFT_IMAGE(x,y)-LEFT_IMAGE(x,y-diry)) ,
&ACCUMULATED_COSTS(x,y,0), nx, ny, disp_range);
}
}
}
__device__ void d_evaluate_path ( int *prior, int *local,
int path_intensity_gradient, int *curr_cost,
int nx, int ny, int disp_range )
{
int d = threadIdx.z;
curr_cost[d] = local[threadIdx.z];
__syncthreads();
int e_smooth = INT_MAX;
for ( int d_p = 0; d_p < disp_range; d_p++ )
{
if ( d_p - d == 0 ) {
// No penality
e_smooth = MMIN(e_smooth,prior[d_p]);
} else if ( abs(d_p - d) == 1 ) {
// Small penality
e_smooth = MMIN(e_smooth,prior[d_p]+PENALTY1);
} else {
// Large penality
e_smooth =
MMIN(e_smooth,prior[d_p] +
MMAX(PENALTY1,
path_intensity_gradient ? PENALTY2/path_intensity_gradient : PENALTY2));
}
}
__syncthreads();
curr_cost[d] += e_smooth;
int min = INT_MAX;
for ( int d1 = 0; d1 < disp_range; d1++ ) {
if (prior[d1]<min) min=prior[d1];
}
__syncthreads();
curr_cost[d] -= min;
}
__global__ void d_inplace_sum_views ( int * im1, const int * im2,
const int nx, const int ny, const int disp_range )
{
int pos = (blockIdx.x * blockDim.x) + threadIdx.x;
int size = nx * ny * disp_range;
if ( pos < size )
im1[pos] += im2[pos];
}
__global__ void d_create_disparity_view ( int *accumulated_costs , int * disp_image,
int nx, int ny, int disp_range )
{
int pos = (blockIdx.x * blockDim.x) + threadIdx.x;
int size = nx * ny ;
if ( pos < size )
disp_image[pos] = 4 * d_find_min_index(&accumulated_costs[pos * disp_range], disp_range);
}
__device__ int d_find_min_index ( const int *v, const int disp_range )
{
int min = INT_MAX;
int minind = -1;
for (int d=0; d < disp_range; d++) {
if(v[d]<min) {
min = v[d];
minind = d;
}
}
return minind;
}
/* functions code */
void determine_costs ( const int *left_image, const int *right_image, int *costs,
const int nx, const int ny, const int disp_range )
{
std::fill(costs, costs+nx*ny*disp_range, 255u);
for ( int j = 0; j < ny; j++ ) {
for ( int d = 0; d < disp_range; d++ ) {
for ( int i = d; i < nx; i++ ) {
COSTS(i,j,d) = abs( LEFT_IMAGE(i,j) - RIGHT_IMAGE(i-d,j) );
}
}
}
}
void iterate_direction_dirxpos ( const int dirx, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
const int WIDTH = nx;
const int HEIGHT = ny;
for ( int j = 0; j < HEIGHT; j++ ) {
for ( int i = 0; i < WIDTH; i++ ) {
if(i==0) {
for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(0,j,d) += COSTS(0,j,d);
}
}
else {
evaluate_path( &ACCUMULATED_COSTS(i-dirx,j,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i-dirx,j)) ,
&ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range);
}
}
}
}
void iterate_direction_dirypos ( const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
const int WIDTH = nx;
const int HEIGHT = ny;
for ( int i = 0; i < WIDTH; i++ ) {
for ( int j = 0; j < HEIGHT; j++ ) {
if(j==0) {
for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(i,0,d) += COSTS(i,0,d);
}
}
else {
evaluate_path( &ACCUMULATED_COSTS(i,j-diry,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i,j-diry)),
&ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range );
}
}
}
}
void iterate_direction_dirxneg ( const int dirx, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
const int WIDTH = nx;
const int HEIGHT = ny;
for ( int j = 0; j < HEIGHT; j++ ) {
for ( int i = WIDTH-1; i >= 0; i-- ) {
if(i==WIDTH-1) {
for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(WIDTH-1,j,d) += COSTS(WIDTH-1,j,d);
}
}
else {
evaluate_path( &ACCUMULATED_COSTS(i-dirx,j,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i-dirx,j)),
&ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range );
}
}
}
}
void iterate_direction_diryneg ( const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
const int WIDTH = nx;
const int HEIGHT = ny;
for ( int i = 0; i < WIDTH; i++ ) {
for ( int j = HEIGHT-1; j >= 0; j-- ) {
if(j==HEIGHT-1) {
for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(i,HEIGHT-1,d) += COSTS(i,HEIGHT-1,d);
}
}
else {
evaluate_path( &ACCUMULATED_COSTS(i,j-diry,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i,j-diry)),
&ACCUMULATED_COSTS(i,j,0) , nx, ny, disp_range);
}
}
}
}
/*
*d_iterate_direction: computes iterate_direction_dirxpos() using the
*the GPU
*/
void d_iterate_direction ( int* d_costs,
const int *d_left_image, int *d_accumulated_costs,
const int dirx, const int diry, //const int *left_image,
//const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
dim3 block1d(1);
dim3 grid1d(1);
// Walk along the edges in a clockwise fashion
if ( dirx > 0 ) {
// LEFT MOST EDGE
// Process every pixel along this edge
//iterate_direction_dirxpos(dirx,left_image,costs,accumulated_costs, nx,
// ny, disp_range);
block1d.z = disp_range;
grid1d.y = ny;
hipLaunchKernelGGL(( d_iterate_direction_dirxpos) , dim3(grid1d), dim3(block1d) , 0, 0, dirx, d_left_image,
d_costs, d_accumulated_costs,
nx, ny, disp_range );
}
else if ( diry > 0 ) {
// TOP MOST EDGE
// Process every pixel along this edge only if dirx ==
// 0. Otherwise skip the top left most pixel
// iterate_direction_dirypos(diry,left_image,costs,accumulated_costs, nx,
// ny, disp_range);
block1d.z = disp_range;
grid1d.y = nx;
hipLaunchKernelGGL(( d_iterate_direction_dirypos) , dim3(grid1d), dim3(block1d) , 0, 0, diry, d_left_image,
d_costs, d_accumulated_costs,
nx, ny, disp_range );
}
else if ( dirx < 0 ) {
// RIGHT MOST EDGE
// Process every pixel along this edge only if diry ==
// 0. Otherwise skip the top right most pixel
//iterate_direction_dirxneg(dirx,left_image,costs,accumulated_costs, nx, ny, disp_range);
block1d.z = disp_range;
grid1d.y = ny;
hipLaunchKernelGGL(( d_iterate_direction_dirxneg) , dim3(grid1d), dim3(block1d) , 0, 0, dirx, d_left_image,
d_costs, d_accumulated_costs,
nx, ny, disp_range );
}
else if ( diry < 0 ) {
// BOTTOM MOST EDGE
// Process every pixel along this edge only if dirx ==
// 0. Otherwise skip the bottom left and bottom right pixel
//iterate_direction_diryneg(diry,left_image,costs,accumulated_costs, nx, ny, disp_range);
block1d.z = disp_range;
grid1d.y = nx;
hipLaunchKernelGGL(( d_iterate_direction_diryneg) , dim3(grid1d), dim3(block1d) , 0, 0, diry, d_left_image,
d_costs, d_accumulated_costs,
nx, ny, disp_range );
}
}
void iterate_direction ( const int dirx, const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
// Walk along the edges in a clockwise fashion
if ( dirx > 0 ) {
// LEFT MOST EDGE
// Process every pixel along this edge
iterate_direction_dirxpos(dirx,left_image,costs,accumulated_costs, nx, ny, disp_range);
}
else if ( diry > 0 ) {
// TOP MOST EDGE
// Process every pixel along this edge only if dirx ==
// 0. Otherwise skip the top left most pixel
iterate_direction_dirypos(diry,left_image,costs,accumulated_costs, nx, ny, disp_range);
}
else if ( dirx < 0 ) {
// RIGHT MOST EDGE
// Process every pixel along this edge only if diry ==
// 0. Otherwise skip the top right most pixel
iterate_direction_dirxneg(dirx,left_image,costs,accumulated_costs, nx, ny, disp_range);
}
else if ( diry < 0 ) {
// BOTTOM MOST EDGE
// Process every pixel along this edge only if dirx ==
// 0. Otherwise skip the bottom left and bottom right pixel
iterate_direction_diryneg(diry,left_image,costs,accumulated_costs, nx, ny, disp_range);
}
}
// ADD two cost images
void inplace_sum_views ( int * im1, const int * im2,
const int nx, const int ny, const int disp_range )
{
int *im1_init = im1;
while ( im1 != (im1_init + (nx*ny*disp_range)) ) {
*im1 += *im2;
im1++;
im2++;
}
}
int find_min_index ( const int *v, const int disp_range )
{
int min = std::numeric_limits<int>::max();
int minind = -1;
for (int d=0; d < disp_range; d++) {
if(v[d]<min) {
min = v[d];
minind = d;
}
}
return minind;
}
void evaluate_path ( const int *prior, const int *local,
int path_intensity_gradient, int *curr_cost ,
const int nx, const int ny, const int disp_range )
{
memcpy(curr_cost, local, sizeof(int)*disp_range);
for ( int d = 0; d < disp_range; d++ ) {
int e_smooth = std::numeric_limits<int>::max();
for ( int d_p = 0; d_p < disp_range; d_p++ ) {
if ( d_p - d == 0 ) {
// No penality
e_smooth = MMIN(e_smooth,prior[d_p]);
} else if ( abs(d_p - d) == 1 ) {
// Small penality
e_smooth = MMIN(e_smooth,prior[d_p]+PENALTY1);
} else {
// Large penality
e_smooth =
MMIN(e_smooth,prior[d_p] +
MMAX(PENALTY1,
path_intensity_gradient ? PENALTY2/path_intensity_gradient : PENALTY2));
}
}
curr_cost[d] += e_smooth;
}
int min = std::numeric_limits<int>::max();
for ( int d = 0; d < disp_range; d++ ) {
if (prior[d]<min) min=prior[d];
}
for ( int d = 0; d < disp_range; d++ ) {
curr_cost[d]-=min;
}
}
void create_disparity_view ( const int *accumulated_costs , int * disp_image,
const int nx, const int ny, const int disp_range )
{
for ( int j = 0; j < ny; j++ ) {
for ( int i = 0; i < nx; i++ ) {
DISP_IMAGE(i,j) =
4 * find_min_index( &ACCUMULATED_COSTS(i,j,0), disp_range );
}
}
}
/*
* Links:
* http://www.dlr.de/rmc/rm/en/desktopdefault.aspx/tabid-9389/16104_read-39811/
* http://lunokhod.org/?p=1356
*/
// sgm code to run on the host
void sgmHost ( const int *h_leftIm, const int *h_rightIm,
int *h_dispIm,
const int w, const int h, const int disp_range )
{
const int nx = w;
const int ny = h;
// Processing all costs. W*H*D. D= disp_range
int *costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
if (costs == NULL) {
fprintf(stderr, "sgm_cuda:"
" Failed memory allocation(s).\n");
exit(1);
}
determine_costs(h_leftIm, h_rightIm, costs, nx, ny, disp_range);
int *accumulated_costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
int *dir_accumulated_costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
if (accumulated_costs == NULL || dir_accumulated_costs == NULL) {
fprintf(stderr, "sgm_cuda:"
" Failed memory allocation(s).\n");
exit(1);
}
int dirx=0,diry=0;
for(dirx=-1; dirx<2; dirx++) {
if(dirx==0 && diry==0) continue;
std::fill(dir_accumulated_costs, dir_accumulated_costs+nx*ny*disp_range, 0);
iterate_direction( dirx,diry, h_leftIm, costs, dir_accumulated_costs, nx, ny, disp_range);
inplace_sum_views( accumulated_costs, dir_accumulated_costs, nx, ny, disp_range);
}
dirx=0;
for(diry=-1; diry<2; diry++) {
if(dirx==0 && diry==0) continue;
std::fill(dir_accumulated_costs, dir_accumulated_costs+nx*ny*disp_range, 0);
iterate_direction( dirx,diry, h_leftIm, costs, dir_accumulated_costs, nx, ny, disp_range);
inplace_sum_views( accumulated_costs, dir_accumulated_costs, nx, ny, disp_range);
}
free(costs);
free(dir_accumulated_costs);
create_disparity_view( accumulated_costs, h_dispIm, nx, ny, disp_range );
free(accumulated_costs);
}
// sgm code to run on the GPU
void sgmDevice ( const int *h_leftIm, const int *h_rightIm,
int *h_dispIm,
const int w, const int h, const int disp_range )
{
int nx = w;
int ny = h;
int image_size = nx * ny * sizeof(int); // size in bytes
int costs_size = disp_range * image_size;
// int image_dim = nx * ny;
int costs_dim = disp_range * nx * ny;
/* launching the determine_costs() kernel */
int *d_left_image;
int *d_right_image;
int *d_costs;
//error = hipMalloc ((void **) &d_left_image, image_size)
hipMalloc((void **) &d_left_image, image_size);
hipMalloc((void **) &d_right_image, image_size);
hipMalloc((void **) &d_costs, costs_size);
hipMemset(d_costs, 0, costs_size);
hipMemcpy(d_left_image, h_leftIm, image_size, hipMemcpyHostToDevice);
hipMemcpy(d_right_image, h_rightIm, image_size, hipMemcpyHostToDevice);
int block_x = 32;
int block_y = (disp_range >= 16) ? 16 : disp_range; // 32 * 16 = 512
int z_blocks = (disp_range % block_y)
? ceil((float) disp_range / block_y) + 1
: ceil((float) disp_range / block_y);
int grid_x = ceil((float) nx / block_x) * z_blocks;
int grid_y = ny;
dim3 block(block_x, block_y);
dim3 grid(grid_x, grid_y);
hipLaunchKernelGGL(( d_determine_costs) , dim3(grid), dim3(block) , 0, 0, d_left_image, d_right_image, d_costs,
nx, ny, disp_range);
int *accumulated_costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
//int *dir_accumulated_costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
if (accumulated_costs == NULL) {
fprintf(stderr, "sgm_cuda:"
" Failed memory allocation(s).\n");
exit(1);
}
/* allocating space for dir_accumulated_costs and accumulated_costs on device */
int *d_accumulated_costs;
hipMalloc((void **) &d_accumulated_costs, costs_size);
hipMemset( d_accumulated_costs, 0, costs_size);
int *d_dir_accumulated_costs;
hipMalloc((void **) &d_dir_accumulated_costs, costs_size);
// geometry for d_inplace_sum_views
dim3 block1d(1);
dim3 grid1d(1);
if (costs_dim >= 512)
{
block1d.x = 512;
grid1d.x = ceil((float) costs_dim/512);
}
else
{
block1d.x = costs_dim;
grid1d.x = 1;
}
int dirx=0,diry=0;
for(dirx=-1; dirx<2; dirx++) {
if(dirx==0 && diry==0) continue;
hipMemset( d_dir_accumulated_costs, 0, costs_size);
d_iterate_direction ( d_costs,
d_left_image, d_dir_accumulated_costs,
dirx, diry,
nx, ny, disp_range );
hipLaunchKernelGGL(( d_inplace_sum_views) , dim3(grid1d), dim3(block1d) , 0, 0, d_accumulated_costs, d_dir_accumulated_costs,
nx, ny, disp_range);
}
dirx=0;
for(diry=-1; diry<2; diry++) {
if(dirx==0 && diry==0) continue;
hipMemset( d_dir_accumulated_costs, 0, costs_size);
d_iterate_direction ( d_costs,
d_left_image, d_dir_accumulated_costs,
dirx, diry,
nx, ny, disp_range );
hipLaunchKernelGGL(( d_inplace_sum_views) , dim3(grid1d), dim3(block1d) , 0, 0, d_accumulated_costs, d_dir_accumulated_costs,
nx, ny, disp_range);
}
hipMemcpy(accumulated_costs, d_accumulated_costs, costs_size, hipMemcpyDeviceToHost);
//free(costs);
// device memory mgmt
hipFree(d_dir_accumulated_costs);
hipFree(d_left_image);
hipFree(d_right_image);
hipFree(d_costs);
hipFree(d_accumulated_costs);
create_disparity_view( accumulated_costs, h_dispIm, nx, ny, disp_range );
free(accumulated_costs);
}
// print command line format
void usage ( char *command )
{
printf("Usage: %s [-h] [-d device] [-l leftimage] [-r rightimage] [-o dev_dispimage] [-t host_dispimage] [-p disprange] \n",command);
}
// main
int
main ( int argc, char** argv )
{
// default command line options
int deviceId = 0;
int disp_range = 32;
char *leftIn =(char *)"lbull.pgm",
*rightIn =(char *)"rbull.pgm",
*fileOut =(char *)"d_dbull.pgm",
*referenceOut=(char *)"h_dbull.pgm";
// parse command line arguments
int opt;
while( (opt = getopt(argc,argv,"d:l:o:r:t:p:h")) !=-1)
{
switch(opt)
{
case 'd': // device
if(sscanf(optarg,"%d",&deviceId)!=1)
{
usage(argv[0]);
exit(1);
}
break;
case 'l': // left image filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
leftIn = strdup(optarg);
break;
case 'r': // right image filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
rightIn = strdup(optarg);
break;
case 'o': // output image (from device) filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
fileOut = strdup(optarg);
break;
case 't': // output image (from host) filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
referenceOut = strdup(optarg);
break;
case 'p': // disp_range
if(sscanf(optarg,"%d",&disp_range)==0)
{
usage(argv[0]);
exit(1);
}
break;
case 'h': // help
usage(argv[0]);
exit(0);
break;
}
}
if(optind < argc) {
fprintf(stderr,"Error in arguments\n");
usage(argv[0]);
exit(1);
}
// select cuda device
cutilSafeCall( hipSetDevice( deviceId ) );
// create events to measure host sgm time and device sgm time
hipEvent_t startH, stopH, startD, stopD;
hipEventCreate(&startH);
hipEventCreate(&stopH);
hipEventCreate(&startD);
hipEventCreate(&stopD);
// allocate host memory
int* h_ldata=NULL;
int* h_rdata=NULL;
unsigned int h,w;
//load left pgm
if (cutLoadPGMi(leftIn, (unsigned int **)&h_ldata, &w, &h) != CUTTrue) {
printf("Failed to load image file: %s\n", leftIn);
exit(1);
}
//load right pgm
if (cutLoadPGMi(rightIn, (unsigned int **)&h_rdata, &w, &h) != CUTTrue) {
printf("Failed to load image file: %s\n", rightIn);
exit(1);
}
// allocate mem for the result on host side
int* h_odata = (int*) malloc( h*w*sizeof(int));
int* reference = (int*) malloc( h*w*sizeof(int));
// sgm at host
hipEventRecord( startH, 0 );
sgmHost(h_ldata, h_rdata, reference, w, h, disp_range);
hipEventRecord( stopH, 0 );
hipEventSynchronize( stopH );
// sgm at GPU
hipEventRecord( startD, 0 );
sgmDevice(h_ldata, h_rdata, h_odata, w, h, disp_range);
hipEventRecord( stopD, 0 );
hipEventSynchronize( stopD );
// check if kernel execution generated and error
cutilCheckMsg("Kernel execution failed");
float timeH, timeD;
hipEventElapsedTime( &timeH, startH, stopH );
printf( "Host processing time: %f (ms)\n", timeH);
hipEventElapsedTime( &timeD, startD, stopD );
printf( "Device processing time: %f (ms)\n", timeD);
printf( "SpeedUp: %f (ms)\n", timeH /timeD);
// save output images
// save output images
if (cutSavePGMi(referenceOut, (unsigned int *)reference, w, h) != CUTTrue) {
printf("Failed to save image file: %s\n", referenceOut);
exit(1);
}
if (cutSavePGMi(fileOut,(unsigned int *) h_odata, w, h) != CUTTrue) {
printf("Failed to save image file: %s\n", fileOut);
exit(1);
}
// cleanup memory
cutFree( h_ldata);
cutFree( h_rdata);
free( h_odata);
free( reference);
cutilDeviceReset();
}
| a4bafdf63ec3d2cd9fb4572eece3935d5477e44d.cu |
// Based on CUDA SDK template from NVIDIA
// sgm algorithm adapted from http://lunokhod.org/?p=1403
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <unistd.h>
#include <assert.h>
#include <float.h>
#include <stdlib.h>
#include <limits.h>
#include <limits>
#include <algorithm>
// includes, project
#include <cutil_inline.h>
#define MMAX_BRIGHTNESS 255
#define PENALTY1 15
#define PENALTY2 100
#define COSTS(i,j,d) costs[(i)*disp_range+(j)*nx*disp_range+(d)]
#define ACCUMULATED_COSTS(i,j,d) accumulated_costs[(i)*disp_range+(j)*nx*disp_range+(d)]
#define LEFT_IMAGE(i,j) left_image[(i)+(j)*nx]
#define RIGHT_IMAGE(i,j) right_image[(i)+(j)*nx]
#define DISP_IMAGE(i,j) disp_image[(i)+(j)*nx]
#define MMAX(a,b) (((a)>(b))?(a):(b))
#define MMIN(a,b) (((a)<(b))?(a):(b))
/* function headers */
void determine_costs(const int *left_image, const int *right_image, int *costs,
const int nx, const int ny, const int disp_range);
void evaluate_path( const int *prior, const int* local,
int path_intensity_gradient, int *curr_cost,
const int nx, const int ny, const int disp_range );
void iterate_direction_dirxpos(const int dirx, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range );
void iterate_direction_dirypos(const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range );
void iterate_direction_dirxneg(const int dirx, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range );
void iterate_direction_diryneg(const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range );
void iterate_direction( const int dirx, const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range ) ;
void d_iterate_direction ( dim3 block, dim3 grid, const int* d_costs,
const int *d_left_image, int *d_accumulated_costs,
const int dirx, const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range );
void inplace_sum_views( int * im1, const int * im2,
const int nx, const int ny, const int disp_range ) ;
int find_min_index( const int *v, const int dist_range ) ;
void create_disparity_view( const int *accumulated_costs , int * disp_image, int nx, int ny) ;
void sgmHost( const int *h_leftIm, const int *h_rightIm,
int *h_dispIm,
const int w, const int h, const int disp_range );
void sgmDevice( const int *h_leftIm, const int *h_rightIm,
int *h_dispImD,
const int w, const int h, const int disp_range );
void usage(char *command);
__device__ int d_find_min_index ( const int *v, const int disp_range );
__device__ void d_evaluate_path ( int *prior, int *local,
int path_intensity_gradient, int *curr_cost,
int nx, int ny, int disp_range );
/* device functions and kernels */
__global__ void d_determine_costs ( int *left_image, int *right_image, int *costs,
int nx, int ny, int disp_range )
{
int row_size = ceil((float) nx / blockDim.x);
int x = ((blockIdx.x % row_size) * blockDim.x) + threadIdx.x;
int y = blockIdx.y;
int d = ((blockIdx.x / row_size) * blockDim.y) + threadIdx.y;
if ( (y < ny) && (d < disp_range) && (x < nx))
{
COSTS(x,y,d) = 255u;
if (x >= d)
COSTS(x,y,d) = abs( LEFT_IMAGE(x,y) - RIGHT_IMAGE(x-d,y) );
}
}
__global__ void d_iterate_direction_dirxpos ( const int dirx, const int *left_image,
int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
int x = 0;
int y = blockIdx.y;
int d = threadIdx.z;
if ( (y < ny) && (d < disp_range) )
{
ACCUMULATED_COSTS(0,y,d) += COSTS(0,y,d);
__syncthreads();
for (x = 1; x < nx; x++)
{
d_evaluate_path( &ACCUMULATED_COSTS(x-dirx,y,0),
&COSTS(x,y,0),
abs(LEFT_IMAGE(x,y)-LEFT_IMAGE(x-dirx,y)) ,
&ACCUMULATED_COSTS(x,y,0), nx, ny, disp_range);
}
}
}
__global__ void d_iterate_direction_dirypos ( const int diry, const int *left_image,
int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
int x = blockIdx.y;
int y = 0;
int d = threadIdx.z;
if ( (x < nx) && (d < disp_range) )
{
ACCUMULATED_COSTS(x,0,d) += COSTS(x,0,d);;
__syncthreads();
for (y = 1; y < ny; y++)
{
d_evaluate_path( &ACCUMULATED_COSTS(x,y-diry,0),
&COSTS(x,y,0),
abs(LEFT_IMAGE(x,y)-LEFT_IMAGE(x,y-diry)) ,
&ACCUMULATED_COSTS(x,y,0), nx, ny, disp_range);
}
}
}
__global__ void d_iterate_direction_dirxneg ( const int dirx, const int *left_image,
int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
int x = nx-1;
int y = blockIdx.y;
int d = threadIdx.z;
if ( (y < ny) && (d < disp_range) )
{
ACCUMULATED_COSTS(nx-1,y,d) += COSTS(nx-1,y,d);
__syncthreads();
for (x = nx-2; x >= 0; x--)
{
d_evaluate_path( &ACCUMULATED_COSTS(x-dirx,y,0),
&COSTS(x,y,0),
abs(LEFT_IMAGE(x,y)-LEFT_IMAGE(x-dirx,y)) ,
&ACCUMULATED_COSTS(x,y,0), nx, ny, disp_range);
}
}
}
__global__ void d_iterate_direction_diryneg ( const int diry, const int *left_image,
int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
int x = blockIdx.y;
int y = ny-1;
int d = threadIdx.z;
if ( (x < nx) && (d < disp_range) )
{
ACCUMULATED_COSTS(x,ny-1,d) += COSTS(x,ny-1,d);;
__syncthreads();
for (y = ny-2; y >= 0; y--)
{
d_evaluate_path( &ACCUMULATED_COSTS(x,y-diry,0),
&COSTS(x,y,0),
abs(LEFT_IMAGE(x,y)-LEFT_IMAGE(x,y-diry)) ,
&ACCUMULATED_COSTS(x,y,0), nx, ny, disp_range);
}
}
}
__device__ void d_evaluate_path ( int *prior, int *local,
int path_intensity_gradient, int *curr_cost,
int nx, int ny, int disp_range )
{
int d = threadIdx.z;
curr_cost[d] = local[threadIdx.z];
__syncthreads();
int e_smooth = INT_MAX;
for ( int d_p = 0; d_p < disp_range; d_p++ )
{
if ( d_p - d == 0 ) {
// No penality
e_smooth = MMIN(e_smooth,prior[d_p]);
} else if ( abs(d_p - d) == 1 ) {
// Small penality
e_smooth = MMIN(e_smooth,prior[d_p]+PENALTY1);
} else {
// Large penality
e_smooth =
MMIN(e_smooth,prior[d_p] +
MMAX(PENALTY1,
path_intensity_gradient ? PENALTY2/path_intensity_gradient : PENALTY2));
}
}
__syncthreads();
curr_cost[d] += e_smooth;
int min = INT_MAX;
for ( int d1 = 0; d1 < disp_range; d1++ ) {
if (prior[d1]<min) min=prior[d1];
}
__syncthreads();
curr_cost[d] -= min;
}
__global__ void d_inplace_sum_views ( int * im1, const int * im2,
const int nx, const int ny, const int disp_range )
{
int pos = (blockIdx.x * blockDim.x) + threadIdx.x;
int size = nx * ny * disp_range;
if ( pos < size )
im1[pos] += im2[pos];
}
__global__ void d_create_disparity_view ( int *accumulated_costs , int * disp_image,
int nx, int ny, int disp_range )
{
int pos = (blockIdx.x * blockDim.x) + threadIdx.x;
int size = nx * ny ;
if ( pos < size )
disp_image[pos] = 4 * d_find_min_index(&accumulated_costs[pos * disp_range], disp_range);
}
__device__ int d_find_min_index ( const int *v, const int disp_range )
{
int min = INT_MAX;
int minind = -1;
for (int d=0; d < disp_range; d++) {
if(v[d]<min) {
min = v[d];
minind = d;
}
}
return minind;
}
/* functions code */
void determine_costs ( const int *left_image, const int *right_image, int *costs,
const int nx, const int ny, const int disp_range )
{
std::fill(costs, costs+nx*ny*disp_range, 255u);
for ( int j = 0; j < ny; j++ ) {
for ( int d = 0; d < disp_range; d++ ) {
for ( int i = d; i < nx; i++ ) {
COSTS(i,j,d) = abs( LEFT_IMAGE(i,j) - RIGHT_IMAGE(i-d,j) );
}
}
}
}
void iterate_direction_dirxpos ( const int dirx, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
const int WIDTH = nx;
const int HEIGHT = ny;
for ( int j = 0; j < HEIGHT; j++ ) {
for ( int i = 0; i < WIDTH; i++ ) {
if(i==0) {
for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(0,j,d) += COSTS(0,j,d);
}
}
else {
evaluate_path( &ACCUMULATED_COSTS(i-dirx,j,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i-dirx,j)) ,
&ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range);
}
}
}
}
void iterate_direction_dirypos ( const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
const int WIDTH = nx;
const int HEIGHT = ny;
for ( int i = 0; i < WIDTH; i++ ) {
for ( int j = 0; j < HEIGHT; j++ ) {
if(j==0) {
for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(i,0,d) += COSTS(i,0,d);
}
}
else {
evaluate_path( &ACCUMULATED_COSTS(i,j-diry,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i,j-diry)),
&ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range );
}
}
}
}
void iterate_direction_dirxneg ( const int dirx, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
const int WIDTH = nx;
const int HEIGHT = ny;
for ( int j = 0; j < HEIGHT; j++ ) {
for ( int i = WIDTH-1; i >= 0; i-- ) {
if(i==WIDTH-1) {
for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(WIDTH-1,j,d) += COSTS(WIDTH-1,j,d);
}
}
else {
evaluate_path( &ACCUMULATED_COSTS(i-dirx,j,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i-dirx,j)),
&ACCUMULATED_COSTS(i,j,0), nx, ny, disp_range );
}
}
}
}
void iterate_direction_diryneg ( const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
const int WIDTH = nx;
const int HEIGHT = ny;
for ( int i = 0; i < WIDTH; i++ ) {
for ( int j = HEIGHT-1; j >= 0; j-- ) {
if(j==HEIGHT-1) {
for ( int d = 0; d < disp_range; d++ ) {
ACCUMULATED_COSTS(i,HEIGHT-1,d) += COSTS(i,HEIGHT-1,d);
}
}
else {
evaluate_path( &ACCUMULATED_COSTS(i,j-diry,0),
&COSTS(i,j,0),
abs(LEFT_IMAGE(i,j)-LEFT_IMAGE(i,j-diry)),
&ACCUMULATED_COSTS(i,j,0) , nx, ny, disp_range);
}
}
}
}
/*
*d_iterate_direction: computes iterate_direction_dirxpos() using the
*the GPU
*/
void d_iterate_direction ( int* d_costs,
const int *d_left_image, int *d_accumulated_costs,
const int dirx, const int diry, //const int *left_image,
//const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
dim3 block1d(1);
dim3 grid1d(1);
// Walk along the edges in a clockwise fashion
if ( dirx > 0 ) {
// LEFT MOST EDGE
// Process every pixel along this edge
//iterate_direction_dirxpos(dirx,left_image,costs,accumulated_costs, nx,
// ny, disp_range);
block1d.z = disp_range;
grid1d.y = ny;
d_iterate_direction_dirxpos <<< grid1d, block1d >>> ( dirx, d_left_image,
d_costs, d_accumulated_costs,
nx, ny, disp_range );
}
else if ( diry > 0 ) {
// TOP MOST EDGE
// Process every pixel along this edge only if dirx ==
// 0. Otherwise skip the top left most pixel
// iterate_direction_dirypos(diry,left_image,costs,accumulated_costs, nx,
// ny, disp_range);
block1d.z = disp_range;
grid1d.y = nx;
d_iterate_direction_dirypos <<< grid1d, block1d >>> ( diry, d_left_image,
d_costs, d_accumulated_costs,
nx, ny, disp_range );
}
else if ( dirx < 0 ) {
// RIGHT MOST EDGE
// Process every pixel along this edge only if diry ==
// 0. Otherwise skip the top right most pixel
//iterate_direction_dirxneg(dirx,left_image,costs,accumulated_costs, nx, ny, disp_range);
block1d.z = disp_range;
grid1d.y = ny;
d_iterate_direction_dirxneg <<< grid1d, block1d >>> ( dirx, d_left_image,
d_costs, d_accumulated_costs,
nx, ny, disp_range );
}
else if ( diry < 0 ) {
// BOTTOM MOST EDGE
// Process every pixel along this edge only if dirx ==
// 0. Otherwise skip the bottom left and bottom right pixel
//iterate_direction_diryneg(diry,left_image,costs,accumulated_costs, nx, ny, disp_range);
block1d.z = disp_range;
grid1d.y = nx;
d_iterate_direction_diryneg <<< grid1d, block1d >>> ( diry, d_left_image,
d_costs, d_accumulated_costs,
nx, ny, disp_range );
}
}
void iterate_direction ( const int dirx, const int diry, const int *left_image,
const int* costs, int *accumulated_costs,
const int nx, const int ny, const int disp_range )
{
// Walk along the edges in a clockwise fashion
if ( dirx > 0 ) {
// LEFT MOST EDGE
// Process every pixel along this edge
iterate_direction_dirxpos(dirx,left_image,costs,accumulated_costs, nx, ny, disp_range);
}
else if ( diry > 0 ) {
// TOP MOST EDGE
// Process every pixel along this edge only if dirx ==
// 0. Otherwise skip the top left most pixel
iterate_direction_dirypos(diry,left_image,costs,accumulated_costs, nx, ny, disp_range);
}
else if ( dirx < 0 ) {
// RIGHT MOST EDGE
// Process every pixel along this edge only if diry ==
// 0. Otherwise skip the top right most pixel
iterate_direction_dirxneg(dirx,left_image,costs,accumulated_costs, nx, ny, disp_range);
}
else if ( diry < 0 ) {
// BOTTOM MOST EDGE
// Process every pixel along this edge only if dirx ==
// 0. Otherwise skip the bottom left and bottom right pixel
iterate_direction_diryneg(diry,left_image,costs,accumulated_costs, nx, ny, disp_range);
}
}
// ADD two cost images
void inplace_sum_views ( int * im1, const int * im2,
const int nx, const int ny, const int disp_range )
{
int *im1_init = im1;
while ( im1 != (im1_init + (nx*ny*disp_range)) ) {
*im1 += *im2;
im1++;
im2++;
}
}
int find_min_index ( const int *v, const int disp_range )
{
int min = std::numeric_limits<int>::max();
int minind = -1;
for (int d=0; d < disp_range; d++) {
if(v[d]<min) {
min = v[d];
minind = d;
}
}
return minind;
}
void evaluate_path ( const int *prior, const int *local,
int path_intensity_gradient, int *curr_cost ,
const int nx, const int ny, const int disp_range )
{
memcpy(curr_cost, local, sizeof(int)*disp_range);
for ( int d = 0; d < disp_range; d++ ) {
int e_smooth = std::numeric_limits<int>::max();
for ( int d_p = 0; d_p < disp_range; d_p++ ) {
if ( d_p - d == 0 ) {
// No penality
e_smooth = MMIN(e_smooth,prior[d_p]);
} else if ( abs(d_p - d) == 1 ) {
// Small penality
e_smooth = MMIN(e_smooth,prior[d_p]+PENALTY1);
} else {
// Large penality
e_smooth =
MMIN(e_smooth,prior[d_p] +
MMAX(PENALTY1,
path_intensity_gradient ? PENALTY2/path_intensity_gradient : PENALTY2));
}
}
curr_cost[d] += e_smooth;
}
int min = std::numeric_limits<int>::max();
for ( int d = 0; d < disp_range; d++ ) {
if (prior[d]<min) min=prior[d];
}
for ( int d = 0; d < disp_range; d++ ) {
curr_cost[d]-=min;
}
}
void create_disparity_view ( const int *accumulated_costs , int * disp_image,
const int nx, const int ny, const int disp_range )
{
for ( int j = 0; j < ny; j++ ) {
for ( int i = 0; i < nx; i++ ) {
DISP_IMAGE(i,j) =
4 * find_min_index( &ACCUMULATED_COSTS(i,j,0), disp_range );
}
}
}
/*
* Links:
* http://www.dlr.de/rmc/rm/en/desktopdefault.aspx/tabid-9389/16104_read-39811/
* http://lunokhod.org/?p=1356
*/
// sgm code to run on the host
void sgmHost ( const int *h_leftIm, const int *h_rightIm,
int *h_dispIm,
const int w, const int h, const int disp_range )
{
const int nx = w;
const int ny = h;
// Processing all costs. W*H*D. D= disp_range
int *costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
if (costs == NULL) {
fprintf(stderr, "sgm_cuda:"
" Failed memory allocation(s).\n");
exit(1);
}
determine_costs(h_leftIm, h_rightIm, costs, nx, ny, disp_range);
int *accumulated_costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
int *dir_accumulated_costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
if (accumulated_costs == NULL || dir_accumulated_costs == NULL) {
fprintf(stderr, "sgm_cuda:"
" Failed memory allocation(s).\n");
exit(1);
}
int dirx=0,diry=0;
for(dirx=-1; dirx<2; dirx++) {
if(dirx==0 && diry==0) continue;
std::fill(dir_accumulated_costs, dir_accumulated_costs+nx*ny*disp_range, 0);
iterate_direction( dirx,diry, h_leftIm, costs, dir_accumulated_costs, nx, ny, disp_range);
inplace_sum_views( accumulated_costs, dir_accumulated_costs, nx, ny, disp_range);
}
dirx=0;
for(diry=-1; diry<2; diry++) {
if(dirx==0 && diry==0) continue;
std::fill(dir_accumulated_costs, dir_accumulated_costs+nx*ny*disp_range, 0);
iterate_direction( dirx,diry, h_leftIm, costs, dir_accumulated_costs, nx, ny, disp_range);
inplace_sum_views( accumulated_costs, dir_accumulated_costs, nx, ny, disp_range);
}
free(costs);
free(dir_accumulated_costs);
create_disparity_view( accumulated_costs, h_dispIm, nx, ny, disp_range );
free(accumulated_costs);
}
// sgm code to run on the GPU
void sgmDevice ( const int *h_leftIm, const int *h_rightIm,
int *h_dispIm,
const int w, const int h, const int disp_range )
{
int nx = w;
int ny = h;
int image_size = nx * ny * sizeof(int); // size in bytes
int costs_size = disp_range * image_size;
// int image_dim = nx * ny;
int costs_dim = disp_range * nx * ny;
/* launching the determine_costs() kernel */
int *d_left_image;
int *d_right_image;
int *d_costs;
//error = cudaMalloc ((void **) &d_left_image, image_size)
cudaMalloc((void **) &d_left_image, image_size);
cudaMalloc((void **) &d_right_image, image_size);
cudaMalloc((void **) &d_costs, costs_size);
cudaMemset(d_costs, 0, costs_size);
cudaMemcpy(d_left_image, h_leftIm, image_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_right_image, h_rightIm, image_size, cudaMemcpyHostToDevice);
int block_x = 32;
int block_y = (disp_range >= 16) ? 16 : disp_range; // 32 * 16 = 512
int z_blocks = (disp_range % block_y)
? ceil((float) disp_range / block_y) + 1
: ceil((float) disp_range / block_y);
int grid_x = ceil((float) nx / block_x) * z_blocks;
int grid_y = ny;
dim3 block(block_x, block_y);
dim3 grid(grid_x, grid_y);
d_determine_costs <<< grid, block >>> (d_left_image, d_right_image, d_costs,
nx, ny, disp_range);
int *accumulated_costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
//int *dir_accumulated_costs = (int *) calloc(nx*ny*disp_range,sizeof(int));
if (accumulated_costs == NULL) {
fprintf(stderr, "sgm_cuda:"
" Failed memory allocation(s).\n");
exit(1);
}
/* allocating space for dir_accumulated_costs and accumulated_costs on device */
int *d_accumulated_costs;
cudaMalloc((void **) &d_accumulated_costs, costs_size);
cudaMemset( d_accumulated_costs, 0, costs_size);
int *d_dir_accumulated_costs;
cudaMalloc((void **) &d_dir_accumulated_costs, costs_size);
// geometry for d_inplace_sum_views
dim3 block1d(1);
dim3 grid1d(1);
if (costs_dim >= 512)
{
block1d.x = 512;
grid1d.x = ceil((float) costs_dim/512);
}
else
{
block1d.x = costs_dim;
grid1d.x = 1;
}
int dirx=0,diry=0;
for(dirx=-1; dirx<2; dirx++) {
if(dirx==0 && diry==0) continue;
cudaMemset( d_dir_accumulated_costs, 0, costs_size);
d_iterate_direction ( d_costs,
d_left_image, d_dir_accumulated_costs,
dirx, diry,
nx, ny, disp_range );
d_inplace_sum_views <<< grid1d, block1d >>> ( d_accumulated_costs, d_dir_accumulated_costs,
nx, ny, disp_range);
}
dirx=0;
for(diry=-1; diry<2; diry++) {
if(dirx==0 && diry==0) continue;
cudaMemset( d_dir_accumulated_costs, 0, costs_size);
d_iterate_direction ( d_costs,
d_left_image, d_dir_accumulated_costs,
dirx, diry,
nx, ny, disp_range );
d_inplace_sum_views <<< grid1d, block1d >>> ( d_accumulated_costs, d_dir_accumulated_costs,
nx, ny, disp_range);
}
cudaMemcpy(accumulated_costs, d_accumulated_costs, costs_size, cudaMemcpyDeviceToHost);
//free(costs);
// device memory mgmt
cudaFree(d_dir_accumulated_costs);
cudaFree(d_left_image);
cudaFree(d_right_image);
cudaFree(d_costs);
cudaFree(d_accumulated_costs);
create_disparity_view( accumulated_costs, h_dispIm, nx, ny, disp_range );
free(accumulated_costs);
}
// print command line format
void usage ( char *command )
{
printf("Usage: %s [-h] [-d device] [-l leftimage] [-r rightimage] [-o dev_dispimage] [-t host_dispimage] [-p disprange] \n",command);
}
// main
int
main ( int argc, char** argv )
{
// default command line options
int deviceId = 0;
int disp_range = 32;
char *leftIn =(char *)"lbull.pgm",
*rightIn =(char *)"rbull.pgm",
*fileOut =(char *)"d_dbull.pgm",
*referenceOut=(char *)"h_dbull.pgm";
// parse command line arguments
int opt;
while( (opt = getopt(argc,argv,"d:l:o:r:t:p:h")) !=-1)
{
switch(opt)
{
case 'd': // device
if(sscanf(optarg,"%d",&deviceId)!=1)
{
usage(argv[0]);
exit(1);
}
break;
case 'l': // left image filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
leftIn = strdup(optarg);
break;
case 'r': // right image filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
rightIn = strdup(optarg);
break;
case 'o': // output image (from device) filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
fileOut = strdup(optarg);
break;
case 't': // output image (from host) filename
if(strlen(optarg)==0)
{
usage(argv[0]);
exit(1);
}
referenceOut = strdup(optarg);
break;
case 'p': // disp_range
if(sscanf(optarg,"%d",&disp_range)==0)
{
usage(argv[0]);
exit(1);
}
break;
case 'h': // help
usage(argv[0]);
exit(0);
break;
}
}
if(optind < argc) {
fprintf(stderr,"Error in arguments\n");
usage(argv[0]);
exit(1);
}
// select cuda device
cutilSafeCall( cudaSetDevice( deviceId ) );
// create events to measure host sgm time and device sgm time
cudaEvent_t startH, stopH, startD, stopD;
cudaEventCreate(&startH);
cudaEventCreate(&stopH);
cudaEventCreate(&startD);
cudaEventCreate(&stopD);
// allocate host memory
int* h_ldata=NULL;
int* h_rdata=NULL;
unsigned int h,w;
//load left pgm
if (cutLoadPGMi(leftIn, (unsigned int **)&h_ldata, &w, &h) != CUTTrue) {
printf("Failed to load image file: %s\n", leftIn);
exit(1);
}
//load right pgm
if (cutLoadPGMi(rightIn, (unsigned int **)&h_rdata, &w, &h) != CUTTrue) {
printf("Failed to load image file: %s\n", rightIn);
exit(1);
}
// allocate mem for the result on host side
int* h_odata = (int*) malloc( h*w*sizeof(int));
int* reference = (int*) malloc( h*w*sizeof(int));
// sgm at host
cudaEventRecord( startH, 0 );
sgmHost(h_ldata, h_rdata, reference, w, h, disp_range);
cudaEventRecord( stopH, 0 );
cudaEventSynchronize( stopH );
// sgm at GPU
cudaEventRecord( startD, 0 );
sgmDevice(h_ldata, h_rdata, h_odata, w, h, disp_range);
cudaEventRecord( stopD, 0 );
cudaEventSynchronize( stopD );
// check if kernel execution generated and error
cutilCheckMsg("Kernel execution failed");
float timeH, timeD;
cudaEventElapsedTime( &timeH, startH, stopH );
printf( "Host processing time: %f (ms)\n", timeH);
cudaEventElapsedTime( &timeD, startD, stopD );
printf( "Device processing time: %f (ms)\n", timeD);
printf( "SpeedUp: %f (ms)\n", timeH /timeD);
// save output images
// save output images
if (cutSavePGMi(referenceOut, (unsigned int *)reference, w, h) != CUTTrue) {
printf("Failed to save image file: %s\n", referenceOut);
exit(1);
}
if (cutSavePGMi(fileOut,(unsigned int *) h_odata, w, h) != CUTTrue) {
printf("Failed to save image file: %s\n", fileOut);
exit(1);
}
// cleanup memory
cutFree( h_ldata);
cutFree( h_rdata);
free( h_odata);
free( reference);
cutilDeviceReset();
}
|
cutlassB_f16_aligned_k64_dropout.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/hip/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, true, false, 64, 64, 64>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, true, false, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k64_dropout_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, true, false, 64, 64, 64>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 500
#if __CUDA_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, true, false, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k64_dropout_sm50` is for sm50-sm70, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 64, 64, 64>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k64_dropout_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 64, 64, 64>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k64_dropout_sm70` is for sm70-sm75, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 64, 64, 64>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k64_dropout_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 64, 64, 64>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k64_dropout_sm75` is for sm75-sm80, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, true, 64, 64, 64>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, true, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k64_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, true, 64, 64, 64>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, true, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k64_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
| cutlassB_f16_aligned_k64_dropout.cu | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, true, false, 64, 64, 64>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, true, false, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k64_dropout_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, true, false, 64, 64, 64>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 500
#if __CUDA_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, cutlass::half_t, true, true, false, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k64_dropout_sm50` is for sm50-sm70, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 64, 64, 64>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k64_dropout_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 64, 64, 64>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, cutlass::half_t, true, true, false, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k64_dropout_sm70` is for sm70-sm75, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 64, 64, 64>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k64_dropout_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 64, 64, 64>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, cutlass::half_t, true, true, false, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k64_dropout_sm75` is for sm75-sm80, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, true, 64, 64, 64>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, true, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_64x64_k64_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, true, 64, 64, 64>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, true, true, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_64x64_k64_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
|
4a2dae36705c1030e9a91531f0e8359b36f6be4b.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - Neither the name(s) of the copyright holder(s) nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdlib.h>
#include <stdio.h>
#include <unordered_map>
#include <vector>
#include <hip/hip_runtime.h>
#include <cutensor.h>
#define HANDLE_ERROR(x) \
{ const auto err = x; \
if( err != CUTENSOR_STATUS_SUCCESS ) \
{ printf("Error: %s\n", cutensorGetErrorString(err)); return err; } \
};
#define HANDLE_CUDA_ERROR(x) \
{ const auto err = x; \
if( err != hipSuccess ) \
{ printf("Error: %s\n", hipGetErrorString(err)); return err; } \
};
/* This routine computes the tensor contraction \f[ D = alpha * A * B + beta * C \f] using the staged-API */
cutensorStatus_t cutensorContractionSimple(const cutensorHandle_t* handle,
const void* alpha, const void *A, const cutensorTensorDescriptor_t* descA, const int32_t modeA[],
const void *B, const cutensorTensorDescriptor_t* descB, const int32_t modeB[],
const void* beta, const void *C, const cutensorTensorDescriptor_t* descC, const int32_t modeC[],
void *D, const cutensorTensorDescriptor_t* descD, const int32_t modeD[],
cutensorComputeType_t typeCompute, cutensorAlgo_t algo, cutensorWorksizePreference_t workPref,
hipStream_t stream)
{
/**********************************************
* Retrieve the memory alignment for each tensor
**********************************************/
uint32_t alignmentRequirementA;
HANDLE_ERROR(cutensorGetAlignmentRequirement(handle,
A, descA, &alignmentRequirementA));
uint32_t alignmentRequirementB;
HANDLE_ERROR(cutensorGetAlignmentRequirement(handle,
B, descB, &alignmentRequirementB));
uint32_t alignmentRequirementC;
HANDLE_ERROR(cutensorGetAlignmentRequirement(handle,
C, descC, &alignmentRequirementC));
uint32_t alignmentRequirementD;
HANDLE_ERROR(cutensorGetAlignmentRequirement(handle,
D, descD, &alignmentRequirementD));
/*******************************
* Create Contraction Descriptor
*******************************/
cutensorContractionDescriptor_t desc;
HANDLE_ERROR(cutensorInitContractionDescriptor(handle,
&desc,
descA, modeA, alignmentRequirementA,
descB, modeB, alignmentRequirementB,
descC, modeC, alignmentRequirementC,
descD, modeD, alignmentRequirementD,
typeCompute));
/**************************
* Set the algorithm to use
***************************/
cutensorContractionFind_t find;
HANDLE_ERROR(cutensorInitContractionFind(
handle, &find,
algo));
/**********************
* Query workspace
**********************/
size_t worksize = 0;
HANDLE_ERROR(cutensorContractionGetWorkspaceSize(handle,
&desc,
&find,
workPref, &worksize));
void *work = nullptr;
if (worksize > 0)
{
if(hipSuccess != hipMalloc(&work, worksize))
{
work = nullptr;
worksize = 0;
}
}
/**************************
* Create Contraction Plan
**************************/
cutensorContractionPlan_t plan;
HANDLE_ERROR(cutensorInitContractionPlan(handle,
&plan,
&desc,
&find,
worksize));
/**********************
* Run
**********************/
HANDLE_ERROR(cutensorContraction(handle,
&plan,
(void*) &alpha, A, B,
(void*) &beta, C, D,
work, worksize, stream));
return CUTENSOR_STATUS_SUCCESS;
}
int main()
{
typedef float floatTypeA;
typedef float floatTypeB;
typedef float floatTypeC;
typedef float floatTypeCompute;
hipDataType typeA = HIP_R_32F;
hipDataType typeB = HIP_R_32F;
hipDataType typeC = HIP_R_32F;
cutensorComputeType_t typeCompute = CUTENSOR_COMPUTE_32F;
floatTypeCompute alpha = (floatTypeCompute) 1.1f;
floatTypeCompute beta = (floatTypeCompute) 0.f;
/**********************
* Computing: C_{m,u,n,v} = alpha * A_{m,h,k,n} B_{u,k,v,h} + beta * C_{m,u,n,v}
**********************/
std::vector<int> modeC{'m','u','n','v'};
std::vector<int> modeA{'m','h','k','n'};
std::vector<int> modeB{'u','k','v','h'};
int nmodeA = modeA.size();
int nmodeB = modeB.size();
int nmodeC = modeC.size();
std::unordered_map<int, int64_t> extent;
extent['m'] = 96;
extent['n'] = 96;
extent['u'] = 96;
extent['v'] = 64;
extent['h'] = 64;
extent['k'] = 64;
double gflops = (2.0 * extent['m'] * extent['n'] * extent['u'] * extent['v'] * extent['k'] * extent['h']) /1e9;
std::vector<int64_t> extentC;
for (auto mode : modeC)
extentC.push_back(extent[mode]);
std::vector<int64_t> extentA;
for (auto mode : modeA)
extentA.push_back(extent[mode]);
std::vector<int64_t> extentB;
for (auto mode : modeB)
extentB.push_back(extent[mode]);
/**********************
* Allocating data
**********************/
size_t elementsA = 1;
for (auto mode : modeA)
elementsA *= extent[mode];
size_t elementsB = 1;
for (auto mode : modeB)
elementsB *= extent[mode];
size_t elementsC = 1;
for (auto mode : modeC)
elementsC *= extent[mode];
size_t sizeA = sizeof(floatTypeA) * elementsA;
size_t sizeB = sizeof(floatTypeB) * elementsB;
size_t sizeC = sizeof(floatTypeC) * elementsC;
printf("Total memory: %.2f GiB\n", (sizeA + sizeB + sizeC)/1024./1024./1024);
void *A_d, *B_d, *C_d;
HANDLE_CUDA_ERROR(hipMalloc((void**) &A_d, sizeA));
HANDLE_CUDA_ERROR(hipMalloc((void**) &B_d, sizeB));
HANDLE_CUDA_ERROR(hipMalloc((void**) &C_d, sizeC));
floatTypeA *A = (floatTypeA*) malloc(sizeof(floatTypeA) * elementsA);
floatTypeB *B = (floatTypeB*) malloc(sizeof(floatTypeB) * elementsB);
floatTypeC *C = (floatTypeC*) malloc(sizeof(floatTypeC) * elementsC);
if (A == NULL || B == NULL || C == NULL)
{
printf("Error: Host allocation of A, B, or C.\n");
return -1;
}
/*******************
* Initialize data
*******************/
for (int64_t i = 0; i < elementsA; i++)
A[i] = (((float) rand())/RAND_MAX - 0.5)*100;
for (int64_t i = 0; i < elementsB; i++)
B[i] = (((float) rand())/RAND_MAX - 0.5)*100;
for (int64_t i = 0; i < elementsC; i++)
C[i] = (((float) rand())/RAND_MAX - 0.5)*100;
HANDLE_CUDA_ERROR(hipMemcpy(A_d, A, sizeA, hipMemcpyHostToDevice));
HANDLE_CUDA_ERROR(hipMemcpy(B_d, B, sizeB, hipMemcpyHostToDevice));
HANDLE_CUDA_ERROR(hipMemcpy(C_d, C, sizeC, hipMemcpyHostToDevice));
/*************************
* cuTENSOR
*************************/
cutensorHandle_t handle;
HANDLE_ERROR(cutensorInit(&handle));
/**********************
* Create Tensor Descriptors
**********************/
cutensorTensorDescriptor_t descA;
HANDLE_ERROR(cutensorInitTensorDescriptor(&handle,
&descA,
nmodeA,
extentA.data(),
NULL /* stride */,
typeA, CUTENSOR_OP_IDENTITY));
cutensorTensorDescriptor_t descB;
HANDLE_ERROR(cutensorInitTensorDescriptor(&handle,
&descB,
nmodeB,
extentB.data(),
NULL /* stride */,
typeB, CUTENSOR_OP_IDENTITY));
cutensorTensorDescriptor_t descC;
HANDLE_ERROR(cutensorInitTensorDescriptor(&handle,
&descC,
nmodeC,
extentC.data(),
NULL /* stride */,
typeC, CUTENSOR_OP_IDENTITY));
HANDLE_ERROR(cutensorContractionSimple(&handle,
(void*)&alpha, A_d, &descA, modeA.data(),
B_d, &descB, modeB.data(),
(void*)&beta, C_d, &descC, modeC.data(),
C_d, &descC, modeC.data(),
typeCompute, CUTENSOR_ALGO_DEFAULT,
CUTENSOR_WORKSPACE_RECOMMENDED, 0 /* stream */));
return 0;
}
| 4a2dae36705c1030e9a91531f0e8359b36f6be4b.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - Neither the name(s) of the copyright holder(s) nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdlib.h>
#include <stdio.h>
#include <unordered_map>
#include <vector>
#include <cuda_runtime.h>
#include <cutensor.h>
#define HANDLE_ERROR(x) \
{ const auto err = x; \
if( err != CUTENSOR_STATUS_SUCCESS ) \
{ printf("Error: %s\n", cutensorGetErrorString(err)); return err; } \
};
#define HANDLE_CUDA_ERROR(x) \
{ const auto err = x; \
if( err != cudaSuccess ) \
{ printf("Error: %s\n", cudaGetErrorString(err)); return err; } \
};
/* This routine computes the tensor contraction \f[ D = alpha * A * B + beta * C \f] using the staged-API */
cutensorStatus_t cutensorContractionSimple(const cutensorHandle_t* handle,
const void* alpha, const void *A, const cutensorTensorDescriptor_t* descA, const int32_t modeA[],
const void *B, const cutensorTensorDescriptor_t* descB, const int32_t modeB[],
const void* beta, const void *C, const cutensorTensorDescriptor_t* descC, const int32_t modeC[],
void *D, const cutensorTensorDescriptor_t* descD, const int32_t modeD[],
cutensorComputeType_t typeCompute, cutensorAlgo_t algo, cutensorWorksizePreference_t workPref,
cudaStream_t stream)
{
/**********************************************
* Retrieve the memory alignment for each tensor
**********************************************/
uint32_t alignmentRequirementA;
HANDLE_ERROR(cutensorGetAlignmentRequirement(handle,
A, descA, &alignmentRequirementA));
uint32_t alignmentRequirementB;
HANDLE_ERROR(cutensorGetAlignmentRequirement(handle,
B, descB, &alignmentRequirementB));
uint32_t alignmentRequirementC;
HANDLE_ERROR(cutensorGetAlignmentRequirement(handle,
C, descC, &alignmentRequirementC));
uint32_t alignmentRequirementD;
HANDLE_ERROR(cutensorGetAlignmentRequirement(handle,
D, descD, &alignmentRequirementD));
/*******************************
* Create Contraction Descriptor
*******************************/
cutensorContractionDescriptor_t desc;
HANDLE_ERROR(cutensorInitContractionDescriptor(handle,
&desc,
descA, modeA, alignmentRequirementA,
descB, modeB, alignmentRequirementB,
descC, modeC, alignmentRequirementC,
descD, modeD, alignmentRequirementD,
typeCompute));
/**************************
* Set the algorithm to use
***************************/
cutensorContractionFind_t find;
HANDLE_ERROR(cutensorInitContractionFind(
handle, &find,
algo));
/**********************
* Query workspace
**********************/
size_t worksize = 0;
HANDLE_ERROR(cutensorContractionGetWorkspaceSize(handle,
&desc,
&find,
workPref, &worksize));
void *work = nullptr;
if (worksize > 0)
{
if(cudaSuccess != cudaMalloc(&work, worksize))
{
work = nullptr;
worksize = 0;
}
}
/**************************
* Create Contraction Plan
**************************/
cutensorContractionPlan_t plan;
HANDLE_ERROR(cutensorInitContractionPlan(handle,
&plan,
&desc,
&find,
worksize));
/**********************
* Run
**********************/
HANDLE_ERROR(cutensorContraction(handle,
&plan,
(void*) &alpha, A, B,
(void*) &beta, C, D,
work, worksize, stream));
return CUTENSOR_STATUS_SUCCESS;
}
int main()
{
typedef float floatTypeA;
typedef float floatTypeB;
typedef float floatTypeC;
typedef float floatTypeCompute;
cudaDataType_t typeA = CUDA_R_32F;
cudaDataType_t typeB = CUDA_R_32F;
cudaDataType_t typeC = CUDA_R_32F;
cutensorComputeType_t typeCompute = CUTENSOR_COMPUTE_32F;
floatTypeCompute alpha = (floatTypeCompute) 1.1f;
floatTypeCompute beta = (floatTypeCompute) 0.f;
/**********************
* Computing: C_{m,u,n,v} = alpha * A_{m,h,k,n} B_{u,k,v,h} + beta * C_{m,u,n,v}
**********************/
std::vector<int> modeC{'m','u','n','v'};
std::vector<int> modeA{'m','h','k','n'};
std::vector<int> modeB{'u','k','v','h'};
int nmodeA = modeA.size();
int nmodeB = modeB.size();
int nmodeC = modeC.size();
std::unordered_map<int, int64_t> extent;
extent['m'] = 96;
extent['n'] = 96;
extent['u'] = 96;
extent['v'] = 64;
extent['h'] = 64;
extent['k'] = 64;
double gflops = (2.0 * extent['m'] * extent['n'] * extent['u'] * extent['v'] * extent['k'] * extent['h']) /1e9;
std::vector<int64_t> extentC;
for (auto mode : modeC)
extentC.push_back(extent[mode]);
std::vector<int64_t> extentA;
for (auto mode : modeA)
extentA.push_back(extent[mode]);
std::vector<int64_t> extentB;
for (auto mode : modeB)
extentB.push_back(extent[mode]);
/**********************
* Allocating data
**********************/
size_t elementsA = 1;
for (auto mode : modeA)
elementsA *= extent[mode];
size_t elementsB = 1;
for (auto mode : modeB)
elementsB *= extent[mode];
size_t elementsC = 1;
for (auto mode : modeC)
elementsC *= extent[mode];
size_t sizeA = sizeof(floatTypeA) * elementsA;
size_t sizeB = sizeof(floatTypeB) * elementsB;
size_t sizeC = sizeof(floatTypeC) * elementsC;
printf("Total memory: %.2f GiB\n", (sizeA + sizeB + sizeC)/1024./1024./1024);
void *A_d, *B_d, *C_d;
HANDLE_CUDA_ERROR(cudaMalloc((void**) &A_d, sizeA));
HANDLE_CUDA_ERROR(cudaMalloc((void**) &B_d, sizeB));
HANDLE_CUDA_ERROR(cudaMalloc((void**) &C_d, sizeC));
floatTypeA *A = (floatTypeA*) malloc(sizeof(floatTypeA) * elementsA);
floatTypeB *B = (floatTypeB*) malloc(sizeof(floatTypeB) * elementsB);
floatTypeC *C = (floatTypeC*) malloc(sizeof(floatTypeC) * elementsC);
if (A == NULL || B == NULL || C == NULL)
{
printf("Error: Host allocation of A, B, or C.\n");
return -1;
}
/*******************
* Initialize data
*******************/
for (int64_t i = 0; i < elementsA; i++)
A[i] = (((float) rand())/RAND_MAX - 0.5)*100;
for (int64_t i = 0; i < elementsB; i++)
B[i] = (((float) rand())/RAND_MAX - 0.5)*100;
for (int64_t i = 0; i < elementsC; i++)
C[i] = (((float) rand())/RAND_MAX - 0.5)*100;
HANDLE_CUDA_ERROR(cudaMemcpy(A_d, A, sizeA, cudaMemcpyHostToDevice));
HANDLE_CUDA_ERROR(cudaMemcpy(B_d, B, sizeB, cudaMemcpyHostToDevice));
HANDLE_CUDA_ERROR(cudaMemcpy(C_d, C, sizeC, cudaMemcpyHostToDevice));
/*************************
* cuTENSOR
*************************/
cutensorHandle_t handle;
HANDLE_ERROR(cutensorInit(&handle));
/**********************
* Create Tensor Descriptors
**********************/
cutensorTensorDescriptor_t descA;
HANDLE_ERROR(cutensorInitTensorDescriptor(&handle,
&descA,
nmodeA,
extentA.data(),
NULL /* stride */,
typeA, CUTENSOR_OP_IDENTITY));
cutensorTensorDescriptor_t descB;
HANDLE_ERROR(cutensorInitTensorDescriptor(&handle,
&descB,
nmodeB,
extentB.data(),
NULL /* stride */,
typeB, CUTENSOR_OP_IDENTITY));
cutensorTensorDescriptor_t descC;
HANDLE_ERROR(cutensorInitTensorDescriptor(&handle,
&descC,
nmodeC,
extentC.data(),
NULL /* stride */,
typeC, CUTENSOR_OP_IDENTITY));
HANDLE_ERROR(cutensorContractionSimple(&handle,
(void*)&alpha, A_d, &descA, modeA.data(),
B_d, &descB, modeB.data(),
(void*)&beta, C_d, &descC, modeC.data(),
C_d, &descC, modeC.data(),
typeCompute, CUTENSOR_ALGO_DEFAULT,
CUTENSOR_WORKSPACE_RECOMMENDED, 0 /* stream */));
return 0;
}
|
63a5020dbd2be27ff427e2df1a42a9ee23cdf77f.hip | // !!! This is a file automatically generated by hipify!!!
/*
# compile
$ nvcc -o sigmoid sigmoid.cu
# numpy counterpart
import numpy as np
m = np.array(((0, 1, 2), (3, 4, 5), (6, 7, 8), (9, 10, 11)))
s = 1/(1+np.exp(-m))
sd = s*(1-s)
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
// kernel of device sigmoid function
__global__
void kSigmoid(const int nThreads, float const *input, float *output){
/* Computes the value of the sigmoid function f(x) = 1/(1 + e^-x).
Inputs:
input: array
output: array, the results of the computation are to be stored here
*/
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < nThreads;
i += blockDim.x * gridDim.x)
{
output[i] = 1.0 / (1.0 + ::exp(-input[i]));
}
}
// cuda version (device-side) of sigmoid function
void dSigmoid(float const *input, float *output, const int height, const int width){
hipLaunchKernelGGL(( kSigmoid) , dim3(height), dim3(width) , 0, 0, height * width, input, output);
hipDeviceSynchronize();
}
// kernel of derivative of sigmoid function
__global__
void kSigmoid_d(const int nThreads, float const *input, float *output) {
/* Computes the value of the sigmoid function derivative f'(x) = f(x)(1 - f(x)),
where f(x) is sigmoid function.
Inputs:
input: array
output: array, the results of the computation are to be stored here:
x(1 - x) for every element of the input matrix m1.
*/
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < nThreads;
i += blockDim.x * gridDim.x)
{
output[i] = input[i] * (1 - input[i]);
}
}
// derivative of sigmoid function (d: device, d: derivative)
float* dSigmoid_d(float const *input, float *output, const int rows, const int columns){
hipLaunchKernelGGL(( kSigmoid_d) , dim3(rows), dim3(columns) , 0, 0, rows*columns, input, output);
hipDeviceSynchronize();
return output;
}
int main(void)
{
// host initialization
const int M1_SIZE = 12; // 4x3 matrix
const int M1_BYTES = M1_SIZE * sizeof(float);
float h_m1[M1_SIZE];
for (int i = 0; i < M1_SIZE; i++)
{
h_m1[i] = float(i); // 0, 1, .. 11
}
float h_out[M1_SIZE]; // sigmoid
// GPU
float *d_m1;
float *d_out;
hipMalloc((void**) &d_m1, M1_BYTES);
hipMalloc((void**) &d_out, M1_BYTES);
// sigmoid
hipMemcpy(d_m1, h_m1, M1_BYTES, hipMemcpyHostToDevice);
dSigmoid(d_m1, d_out, 4, 3);
hipMemcpy(h_out, d_out, M1_BYTES, hipMemcpyDeviceToHost);
// print result
printf("sigmoid\n");
for (int i = 0; i < M1_SIZE; i++)
{
printf("h_out[%d] = %f\n", i, h_out[i]);
}
// sigmoid derivative
hipMemcpy(d_m1, h_out, M1_BYTES, hipMemcpyHostToDevice);
dSigmoid_d(d_m1, d_out, 4, 3);
hipMemcpy(h_out, d_out, M1_BYTES, hipMemcpyDeviceToHost);
// print result
printf("sigmoid derivative\n");
for (int i = 0; i < M1_SIZE; i++)
{
printf("h_out[%d] = %f\n", i, h_out[i]);
}
// free memory
hipFree(d_m1);
hipFree(d_out);
// free(h_m1);
// free(h_m2);
// free(h_out);
} | 63a5020dbd2be27ff427e2df1a42a9ee23cdf77f.cu | /*
# compile
$ nvcc -o sigmoid sigmoid.cu
# numpy counterpart
import numpy as np
m = np.array(((0, 1, 2), (3, 4, 5), (6, 7, 8), (9, 10, 11)))
s = 1/(1+np.exp(-m))
sd = s*(1-s)
*/
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
// kernel of device sigmoid function
__global__
void kSigmoid(const int nThreads, float const *input, float *output){
/* Computes the value of the sigmoid function f(x) = 1/(1 + e^-x).
Inputs:
input: array
output: array, the results of the computation are to be stored here
*/
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < nThreads;
i += blockDim.x * gridDim.x)
{
output[i] = 1.0 / (1.0 + std::exp(-input[i]));
}
}
// cuda version (device-side) of sigmoid function
void dSigmoid(float const *input, float *output, const int height, const int width){
kSigmoid <<< height, width >>> (height * width, input, output);
cudaDeviceSynchronize();
}
// kernel of derivative of sigmoid function
__global__
void kSigmoid_d(const int nThreads, float const *input, float *output) {
/* Computes the value of the sigmoid function derivative f'(x) = f(x)(1 - f(x)),
where f(x) is sigmoid function.
Inputs:
input: array
output: array, the results of the computation are to be stored here:
x(1 - x) for every element of the input matrix m1.
*/
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < nThreads;
i += blockDim.x * gridDim.x)
{
output[i] = input[i] * (1 - input[i]);
}
}
// derivative of sigmoid function (d: device, d: derivative)
float* dSigmoid_d(float const *input, float *output, const int rows, const int columns){
kSigmoid_d <<< rows, columns >>> (rows*columns, input, output);
cudaDeviceSynchronize();
return output;
}
int main(void)
{
// host initialization
const int M1_SIZE = 12; // 4x3 matrix
const int M1_BYTES = M1_SIZE * sizeof(float);
float h_m1[M1_SIZE];
for (int i = 0; i < M1_SIZE; i++)
{
h_m1[i] = float(i); // 0, 1, .. 11
}
float h_out[M1_SIZE]; // sigmoid
// GPU
float *d_m1;
float *d_out;
cudaMalloc((void**) &d_m1, M1_BYTES);
cudaMalloc((void**) &d_out, M1_BYTES);
// sigmoid
cudaMemcpy(d_m1, h_m1, M1_BYTES, cudaMemcpyHostToDevice);
dSigmoid(d_m1, d_out, 4, 3);
cudaMemcpy(h_out, d_out, M1_BYTES, cudaMemcpyDeviceToHost);
// print result
printf("sigmoid\n");
for (int i = 0; i < M1_SIZE; i++)
{
printf("h_out[%d] = %f\n", i, h_out[i]);
}
// sigmoid derivative
cudaMemcpy(d_m1, h_out, M1_BYTES, cudaMemcpyHostToDevice);
dSigmoid_d(d_m1, d_out, 4, 3);
cudaMemcpy(h_out, d_out, M1_BYTES, cudaMemcpyDeviceToHost);
// print result
printf("sigmoid derivative\n");
for (int i = 0; i < M1_SIZE; i++)
{
printf("h_out[%d] = %f\n", i, h_out[i]);
}
// free memory
cudaFree(d_m1);
cudaFree(d_out);
// free(h_m1);
// free(h_m2);
// free(h_out);
} |
c07515b930a8893a6047ed7b42218acce424e9a7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
using at::PackedTensorAccessor64;
using at::RestrictPtrTraits;
// A chunk of work is blocksize-many points.
// There are N clouds in the batch, and P points in each cloud.
// The number of potential chunks to do per cloud is (1+(P-1)/blocksize),
// which we call chunks_per_cloud.
// These (N*chunks_per_cloud) chunks are divided among the gridSize-many blocks.
// In block b, we work on chunks b, b+gridSize, b+2*gridSize etc .
// In chunk i, we work on cloud (i/chunks_per_cloud) on points starting from
// blocksize*(i%chunks_per_cloud).
// Explanation of the calculation is in the cpp file.
// EightDirections(t) runs t(a,b,c) for every combination of boolean a, b, c.
template <class T>
static __device__ void EightDirections(T&& t) {
t(false, false, false);
t(false, false, true);
t(false, true, false);
t(false, true, true);
t(true, false, false);
t(true, false, true);
t(true, true, false);
t(true, true, true);
}
__global__ void PointsToVolumesForwardKernel(
const PackedTensorAccessor64<float, 3, RestrictPtrTraits> points_3d,
const PackedTensorAccessor64<float, 3, RestrictPtrTraits> points_features,
PackedTensorAccessor64<float, 5, RestrictPtrTraits> volume_densities,
PackedTensorAccessor64<float, 5, RestrictPtrTraits> volume_features,
PackedTensorAccessor64<int64_t, 2, RestrictPtrTraits> grid_sizes,
PackedTensorAccessor64<float, 2, RestrictPtrTraits> mask,
const float point_weight,
const bool align_corners,
const bool splat,
const int64_t batch_size,
const int64_t P,
const int64_t n_features) {
const int64_t chunks_per_cloud = (1 + (P - 1) / blockDim.x);
const int64_t chunks_to_do = batch_size * chunks_per_cloud;
const int scale_offset = align_corners ? 1 : 0;
const float offset = align_corners ? 0 : 0.5;
for (int64_t chunk = blockIdx.x; chunk < chunks_to_do; chunk += gridDim.x) {
const int64_t batch_index = chunk / chunks_per_cloud;
const int64_t start_point = blockDim.x * (chunk % chunks_per_cloud);
int64_t point_idx = start_point + threadIdx.x;
if (point_idx >= P) {
continue;
}
if (mask[batch_index][point_idx] == 0) {
continue;
}
auto volume_densities_aa = volume_densities[batch_index][0];
auto volume_features_aa = volume_features[batch_index];
auto point = points_3d[batch_index][point_idx];
auto point_features = points_features[batch_index][point_idx];
const int64_t grid_size_x = grid_sizes[batch_index][2];
const int64_t grid_size_y = grid_sizes[batch_index][1];
const int64_t grid_size_z = grid_sizes[batch_index][0];
auto increment_location =
[&](int64_t x, int64_t y, int64_t z, float weight) {
if (x >= grid_size_x || y >= grid_size_y || z >= grid_size_z) {
return;
}
if (x < 0 || y < 0 || z < 0) {
return;
}
atomicAdd(&volume_densities_aa[z][y][x], weight * point_weight);
for (int64_t feature_idx = 0; feature_idx < n_features;
++feature_idx) {
atomicAdd(
&volume_features_aa[feature_idx][z][y][x],
point_features[feature_idx] * weight * point_weight);
}
};
if (!splat) {
long x = std::lround(
(point[0] + 1) * 0.5 * (grid_size_x - scale_offset) - offset);
long y = std::lround(
(point[1] + 1) * 0.5 * (grid_size_y - scale_offset) - offset);
long z = std::lround(
(point[2] + 1) * 0.5 * (grid_size_z - scale_offset) - offset);
increment_location(x, y, z, 1);
} else {
float x = 0, y = 0, z = 0;
float rx = std::modf(
(point[0] + 1) * 0.5 * (grid_size_x - scale_offset) - offset, &x);
float ry = std::modf(
(point[1] + 1) * 0.5 * (grid_size_y - scale_offset) - offset, &y);
float rz = std::modf(
(point[2] + 1) * 0.5 * (grid_size_z - scale_offset) - offset, &z);
auto handle_point = [&](bool up_x, bool up_y, bool up_z) {
float weight =
(up_x ? rx : 1 - rx) * (up_y ? ry : 1 - ry) * (up_z ? rz : 1 - rz);
increment_location(x + up_x, y + up_y, z + up_z, weight);
};
EightDirections(handle_point);
}
}
}
void PointsToVolumesForwardCuda(
const at::Tensor& points_3d,
const at::Tensor& points_features,
const at::Tensor& volume_densities,
const at::Tensor& volume_features,
const at::Tensor& grid_sizes,
const at::Tensor& mask,
const float point_weight,
const bool align_corners,
const bool splat) {
// Check inputs are on the same device
at::TensorArg points_3d_t{points_3d, "points_3d", 1},
points_features_t{points_features, "points_features", 2},
volume_densities_t{volume_densities, "volume_densities", 3},
volume_features_t{volume_features, "volume_features", 4},
grid_sizes_t{grid_sizes, "grid_sizes", 5}, mask_t{mask, "mask", 6};
at::CheckedFrom c = "PointsToVolumesForwardCuda";
at::checkAllSameGPU(
c,
{points_3d_t,
points_features_t,
volume_densities_t,
volume_features_t,
grid_sizes_t,
mask_t});
// Set the device for the kernel launch based on the device of the input
at::hip::HIPGuardMasqueradingAsCUDA device_guard(points_3d.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
const int blocks = 1024;
const int threads = 32;
const int64_t batch_size = points_3d.size(0);
const int64_t P = points_3d.size(1);
const int64_t n_features = points_features.size(2);
hipLaunchKernelGGL(( PointsToVolumesForwardKernel), dim3(blocks), dim3(threads), 0, stream,
points_3d.packed_accessor64<float, 3, RestrictPtrTraits>(),
points_features.packed_accessor64<float, 3, RestrictPtrTraits>(),
volume_densities.packed_accessor64<float, 5, RestrictPtrTraits>(),
volume_features.packed_accessor64<float, 5, RestrictPtrTraits>(),
grid_sizes.packed_accessor64<int64_t, 2, RestrictPtrTraits>(),
mask.packed_accessor64<float, 2, RestrictPtrTraits>(),
point_weight,
align_corners,
splat,
batch_size,
P,
n_features);
}
__global__ void PointsToVolumesBackwardKernel(
const PackedTensorAccessor64<float, 3, RestrictPtrTraits> points_3d,
const PackedTensorAccessor64<float, 3, RestrictPtrTraits> points_features,
const PackedTensorAccessor64<int64_t, 2, RestrictPtrTraits> grid_sizes,
const PackedTensorAccessor64<float, 2, RestrictPtrTraits> mask,
PackedTensorAccessor64<float, 5, RestrictPtrTraits> grad_volume_densities,
PackedTensorAccessor64<float, 5, RestrictPtrTraits> grad_volume_features,
PackedTensorAccessor64<float, 3, RestrictPtrTraits> grad_points_3d,
PackedTensorAccessor64<float, 3, RestrictPtrTraits> grad_points_features,
const float point_weight,
const bool align_corners,
const bool splat,
const int64_t batch_size,
const int64_t P,
const int64_t n_features) {
const int64_t chunks_per_cloud = (1 + (P - 1) / blockDim.x);
const int64_t chunks_to_do = batch_size * chunks_per_cloud;
const int scale_offset = align_corners ? 1 : 0;
const float offset = align_corners ? 0 : 0.5;
// Note that the gradients belonging to each point are only touched by
// a single thread in one of our "chunks", which is in a single block.
// So unlike in the forward pass, there's no need for atomics here.
for (int64_t chunk = blockIdx.x; chunk < chunks_to_do; chunk += gridDim.x) {
const int64_t batch_index = chunk / chunks_per_cloud;
const int64_t start_point = blockDim.x * (chunk % chunks_per_cloud);
int64_t point_idx = start_point + threadIdx.x;
if (point_idx >= P) {
continue;
}
if (mask[batch_index][point_idx] == 0) {
continue;
}
auto point = points_3d[batch_index][point_idx];
auto point_features = points_features[batch_index][point_idx];
auto grad_point = grad_points_3d[batch_index][point_idx];
auto grad_point_features = grad_points_features[batch_index][point_idx];
auto grad_volume_densities_a = grad_volume_densities[batch_index][0];
auto grad_volume_features_a = grad_volume_features[batch_index];
const int64_t grid_size_x = grid_sizes[batch_index][2];
const int64_t grid_size_y = grid_sizes[batch_index][1];
const int64_t grid_size_z = grid_sizes[batch_index][0];
auto increment_location =
[&](int64_t x, int64_t y, int64_t z, float weight) {
if (x >= grid_size_x || y >= grid_size_y || z >= grid_size_z) {
return false;
}
if (x < 0 || y < 0 || z < 0) {
return false;
}
// This is a forward line, for comparison
// volume_densities_aa[z][y][x] += weight * point_weight;
for (int64_t feature_idx = 0; feature_idx < n_features;
++feature_idx) {
// This is a forward line, for comparison
// volume_features_aa[feature_idx][z][y][x] +=
// point_features[feature_idx] * weight * point_weight;
grad_point_features[feature_idx] +=
grad_volume_features_a[feature_idx][z][y][x] * weight *
point_weight;
}
return true;
};
if (!splat) {
long x = std::lround(
(point[0] + 1) * 0.5 * (grid_size_x - scale_offset) - offset);
long y = std::lround(
(point[1] + 1) * 0.5 * (grid_size_y - scale_offset) - offset);
long z = std::lround(
(point[2] + 1) * 0.5 * (grid_size_z - scale_offset) - offset);
increment_location(x, y, z, 1);
} else {
float x = 0, y = 0, z = 0;
float rx = std::modf(
(point[0] + 1) * 0.5 * (grid_size_x - scale_offset) - offset, &x);
float ry = std::modf(
(point[1] + 1) * 0.5 * (grid_size_y - scale_offset) - offset, &y);
float rz = std::modf(
(point[2] + 1) * 0.5 * (grid_size_z - scale_offset) - offset, &z);
auto handle_point = [&](bool up_x, bool up_y, bool up_z) {
float weight_x = (up_x ? rx : 1 - rx);
float weight_y = (up_y ? ry : 1 - ry);
float weight_z = (up_z ? rz : 1 - rz);
float weight = weight_x * weight_y * weight_z;
if (increment_location(x + up_x, y + up_y, z + up_z, weight)) {
// weight * point_weight has been added to
// volume_densities_aa[z+up_z][y+up_y][x+up_x]
// Also for each feature_idx,
// point_features[feature_idx] * weight * point_weight
// has been added to
// volume_features_aa[feature_idx][z+up_z][y+up_y][x+up_x]
double source_gradient =
grad_volume_densities_a[z + up_z][y + up_y][x + up_x];
for (int64_t feature_idx = 0; feature_idx < n_features;
++feature_idx) {
source_gradient += point_features[feature_idx] *
grad_volume_features_a[feature_idx][z + up_z][y + up_y]
[x + up_x];
}
grad_point[0] += source_gradient * (up_x ? 1 : -1) * weight_y *
weight_z * 0.5 * (grid_size_x - scale_offset) * point_weight;
grad_point[1] += source_gradient * (up_y ? 1 : -1) * weight_x *
weight_z * 0.5 * (grid_size_y - scale_offset) * point_weight;
grad_point[2] += source_gradient * (up_z ? 1 : -1) * weight_x *
weight_y * 0.5 * (grid_size_z - scale_offset) * point_weight;
}
};
EightDirections(handle_point);
}
}
}
void PointsToVolumesBackwardCuda(
const at::Tensor& points_3d,
const at::Tensor& points_features,
const at::Tensor& grid_sizes,
const at::Tensor& mask,
const float point_weight,
const bool align_corners,
const bool splat,
const at::Tensor& grad_volume_densities,
const at::Tensor& grad_volume_features,
const at::Tensor& grad_points_3d,
const at::Tensor& grad_points_features) {
// Check inputs are on the same device
at::TensorArg points_3d_t{points_3d, "points_3d", 1},
points_features_t{points_features, "points_features", 2},
grid_sizes_t{grid_sizes, "grid_sizes", 3}, mask_t{mask, "mask", 4},
grad_volume_densities_t{
grad_volume_densities, "grad_volume_densities", 8},
grad_volume_features_t{grad_volume_features, "grad_volume_features", 9},
grad_points_3d_t{grad_points_3d, "grad_points_3d", 10},
grad_points_features_t{grad_points_features, "grad_points_features", 11};
at::CheckedFrom c = "PointsToVolumesBackwardCuda";
at::checkAllSameGPU(
c,
{points_3d_t,
points_features_t,
grid_sizes_t,
mask_t,
grad_volume_densities_t,
grad_volume_features_t,
grad_points_3d_t,
grad_points_features_t});
// Set the device for the kernel launch based on the device of the input
at::hip::HIPGuardMasqueradingAsCUDA device_guard(points_3d.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
const int blocks = 1024;
const int threads = 32;
const int64_t batch_size = points_3d.size(0);
const int64_t P = points_3d.size(1);
const int64_t n_features = points_features.size(2);
hipLaunchKernelGGL(( PointsToVolumesBackwardKernel), dim3(blocks), dim3(threads), 0, stream,
points_3d.packed_accessor64<float, 3, RestrictPtrTraits>(),
points_features.packed_accessor64<float, 3, RestrictPtrTraits>(),
grid_sizes.packed_accessor64<int64_t, 2, RestrictPtrTraits>(),
mask.packed_accessor64<float, 2, RestrictPtrTraits>(),
grad_volume_densities.packed_accessor64<float, 5, RestrictPtrTraits>(),
grad_volume_features.packed_accessor64<float, 5, RestrictPtrTraits>(),
grad_points_3d.packed_accessor64<float, 3, RestrictPtrTraits>(),
grad_points_features.packed_accessor64<float, 3, RestrictPtrTraits>(),
point_weight,
align_corners,
splat,
batch_size,
P,
n_features);
}
| c07515b930a8893a6047ed7b42218acce424e9a7.cu | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDAGuard.h>
using at::PackedTensorAccessor64;
using at::RestrictPtrTraits;
// A chunk of work is blocksize-many points.
// There are N clouds in the batch, and P points in each cloud.
// The number of potential chunks to do per cloud is (1+(P-1)/blocksize),
// which we call chunks_per_cloud.
// These (N*chunks_per_cloud) chunks are divided among the gridSize-many blocks.
// In block b, we work on chunks b, b+gridSize, b+2*gridSize etc .
// In chunk i, we work on cloud (i/chunks_per_cloud) on points starting from
// blocksize*(i%chunks_per_cloud).
// Explanation of the calculation is in the cpp file.
// EightDirections(t) runs t(a,b,c) for every combination of boolean a, b, c.
template <class T>
static __device__ void EightDirections(T&& t) {
t(false, false, false);
t(false, false, true);
t(false, true, false);
t(false, true, true);
t(true, false, false);
t(true, false, true);
t(true, true, false);
t(true, true, true);
}
__global__ void PointsToVolumesForwardKernel(
const PackedTensorAccessor64<float, 3, RestrictPtrTraits> points_3d,
const PackedTensorAccessor64<float, 3, RestrictPtrTraits> points_features,
PackedTensorAccessor64<float, 5, RestrictPtrTraits> volume_densities,
PackedTensorAccessor64<float, 5, RestrictPtrTraits> volume_features,
PackedTensorAccessor64<int64_t, 2, RestrictPtrTraits> grid_sizes,
PackedTensorAccessor64<float, 2, RestrictPtrTraits> mask,
const float point_weight,
const bool align_corners,
const bool splat,
const int64_t batch_size,
const int64_t P,
const int64_t n_features) {
const int64_t chunks_per_cloud = (1 + (P - 1) / blockDim.x);
const int64_t chunks_to_do = batch_size * chunks_per_cloud;
const int scale_offset = align_corners ? 1 : 0;
const float offset = align_corners ? 0 : 0.5;
for (int64_t chunk = blockIdx.x; chunk < chunks_to_do; chunk += gridDim.x) {
const int64_t batch_index = chunk / chunks_per_cloud;
const int64_t start_point = blockDim.x * (chunk % chunks_per_cloud);
int64_t point_idx = start_point + threadIdx.x;
if (point_idx >= P) {
continue;
}
if (mask[batch_index][point_idx] == 0) {
continue;
}
auto volume_densities_aa = volume_densities[batch_index][0];
auto volume_features_aa = volume_features[batch_index];
auto point = points_3d[batch_index][point_idx];
auto point_features = points_features[batch_index][point_idx];
const int64_t grid_size_x = grid_sizes[batch_index][2];
const int64_t grid_size_y = grid_sizes[batch_index][1];
const int64_t grid_size_z = grid_sizes[batch_index][0];
auto increment_location =
[&](int64_t x, int64_t y, int64_t z, float weight) {
if (x >= grid_size_x || y >= grid_size_y || z >= grid_size_z) {
return;
}
if (x < 0 || y < 0 || z < 0) {
return;
}
atomicAdd(&volume_densities_aa[z][y][x], weight * point_weight);
for (int64_t feature_idx = 0; feature_idx < n_features;
++feature_idx) {
atomicAdd(
&volume_features_aa[feature_idx][z][y][x],
point_features[feature_idx] * weight * point_weight);
}
};
if (!splat) {
long x = std::lround(
(point[0] + 1) * 0.5 * (grid_size_x - scale_offset) - offset);
long y = std::lround(
(point[1] + 1) * 0.5 * (grid_size_y - scale_offset) - offset);
long z = std::lround(
(point[2] + 1) * 0.5 * (grid_size_z - scale_offset) - offset);
increment_location(x, y, z, 1);
} else {
float x = 0, y = 0, z = 0;
float rx = std::modf(
(point[0] + 1) * 0.5 * (grid_size_x - scale_offset) - offset, &x);
float ry = std::modf(
(point[1] + 1) * 0.5 * (grid_size_y - scale_offset) - offset, &y);
float rz = std::modf(
(point[2] + 1) * 0.5 * (grid_size_z - scale_offset) - offset, &z);
auto handle_point = [&](bool up_x, bool up_y, bool up_z) {
float weight =
(up_x ? rx : 1 - rx) * (up_y ? ry : 1 - ry) * (up_z ? rz : 1 - rz);
increment_location(x + up_x, y + up_y, z + up_z, weight);
};
EightDirections(handle_point);
}
}
}
void PointsToVolumesForwardCuda(
const at::Tensor& points_3d,
const at::Tensor& points_features,
const at::Tensor& volume_densities,
const at::Tensor& volume_features,
const at::Tensor& grid_sizes,
const at::Tensor& mask,
const float point_weight,
const bool align_corners,
const bool splat) {
// Check inputs are on the same device
at::TensorArg points_3d_t{points_3d, "points_3d", 1},
points_features_t{points_features, "points_features", 2},
volume_densities_t{volume_densities, "volume_densities", 3},
volume_features_t{volume_features, "volume_features", 4},
grid_sizes_t{grid_sizes, "grid_sizes", 5}, mask_t{mask, "mask", 6};
at::CheckedFrom c = "PointsToVolumesForwardCuda";
at::checkAllSameGPU(
c,
{points_3d_t,
points_features_t,
volume_densities_t,
volume_features_t,
grid_sizes_t,
mask_t});
// Set the device for the kernel launch based on the device of the input
at::cuda::CUDAGuard device_guard(points_3d.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
const int blocks = 1024;
const int threads = 32;
const int64_t batch_size = points_3d.size(0);
const int64_t P = points_3d.size(1);
const int64_t n_features = points_features.size(2);
PointsToVolumesForwardKernel<<<blocks, threads, 0, stream>>>(
points_3d.packed_accessor64<float, 3, RestrictPtrTraits>(),
points_features.packed_accessor64<float, 3, RestrictPtrTraits>(),
volume_densities.packed_accessor64<float, 5, RestrictPtrTraits>(),
volume_features.packed_accessor64<float, 5, RestrictPtrTraits>(),
grid_sizes.packed_accessor64<int64_t, 2, RestrictPtrTraits>(),
mask.packed_accessor64<float, 2, RestrictPtrTraits>(),
point_weight,
align_corners,
splat,
batch_size,
P,
n_features);
}
__global__ void PointsToVolumesBackwardKernel(
const PackedTensorAccessor64<float, 3, RestrictPtrTraits> points_3d,
const PackedTensorAccessor64<float, 3, RestrictPtrTraits> points_features,
const PackedTensorAccessor64<int64_t, 2, RestrictPtrTraits> grid_sizes,
const PackedTensorAccessor64<float, 2, RestrictPtrTraits> mask,
PackedTensorAccessor64<float, 5, RestrictPtrTraits> grad_volume_densities,
PackedTensorAccessor64<float, 5, RestrictPtrTraits> grad_volume_features,
PackedTensorAccessor64<float, 3, RestrictPtrTraits> grad_points_3d,
PackedTensorAccessor64<float, 3, RestrictPtrTraits> grad_points_features,
const float point_weight,
const bool align_corners,
const bool splat,
const int64_t batch_size,
const int64_t P,
const int64_t n_features) {
const int64_t chunks_per_cloud = (1 + (P - 1) / blockDim.x);
const int64_t chunks_to_do = batch_size * chunks_per_cloud;
const int scale_offset = align_corners ? 1 : 0;
const float offset = align_corners ? 0 : 0.5;
// Note that the gradients belonging to each point are only touched by
// a single thread in one of our "chunks", which is in a single block.
// So unlike in the forward pass, there's no need for atomics here.
for (int64_t chunk = blockIdx.x; chunk < chunks_to_do; chunk += gridDim.x) {
const int64_t batch_index = chunk / chunks_per_cloud;
const int64_t start_point = blockDim.x * (chunk % chunks_per_cloud);
int64_t point_idx = start_point + threadIdx.x;
if (point_idx >= P) {
continue;
}
if (mask[batch_index][point_idx] == 0) {
continue;
}
auto point = points_3d[batch_index][point_idx];
auto point_features = points_features[batch_index][point_idx];
auto grad_point = grad_points_3d[batch_index][point_idx];
auto grad_point_features = grad_points_features[batch_index][point_idx];
auto grad_volume_densities_a = grad_volume_densities[batch_index][0];
auto grad_volume_features_a = grad_volume_features[batch_index];
const int64_t grid_size_x = grid_sizes[batch_index][2];
const int64_t grid_size_y = grid_sizes[batch_index][1];
const int64_t grid_size_z = grid_sizes[batch_index][0];
auto increment_location =
[&](int64_t x, int64_t y, int64_t z, float weight) {
if (x >= grid_size_x || y >= grid_size_y || z >= grid_size_z) {
return false;
}
if (x < 0 || y < 0 || z < 0) {
return false;
}
// This is a forward line, for comparison
// volume_densities_aa[z][y][x] += weight * point_weight;
for (int64_t feature_idx = 0; feature_idx < n_features;
++feature_idx) {
// This is a forward line, for comparison
// volume_features_aa[feature_idx][z][y][x] +=
// point_features[feature_idx] * weight * point_weight;
grad_point_features[feature_idx] +=
grad_volume_features_a[feature_idx][z][y][x] * weight *
point_weight;
}
return true;
};
if (!splat) {
long x = std::lround(
(point[0] + 1) * 0.5 * (grid_size_x - scale_offset) - offset);
long y = std::lround(
(point[1] + 1) * 0.5 * (grid_size_y - scale_offset) - offset);
long z = std::lround(
(point[2] + 1) * 0.5 * (grid_size_z - scale_offset) - offset);
increment_location(x, y, z, 1);
} else {
float x = 0, y = 0, z = 0;
float rx = std::modf(
(point[0] + 1) * 0.5 * (grid_size_x - scale_offset) - offset, &x);
float ry = std::modf(
(point[1] + 1) * 0.5 * (grid_size_y - scale_offset) - offset, &y);
float rz = std::modf(
(point[2] + 1) * 0.5 * (grid_size_z - scale_offset) - offset, &z);
auto handle_point = [&](bool up_x, bool up_y, bool up_z) {
float weight_x = (up_x ? rx : 1 - rx);
float weight_y = (up_y ? ry : 1 - ry);
float weight_z = (up_z ? rz : 1 - rz);
float weight = weight_x * weight_y * weight_z;
if (increment_location(x + up_x, y + up_y, z + up_z, weight)) {
// weight * point_weight has been added to
// volume_densities_aa[z+up_z][y+up_y][x+up_x]
// Also for each feature_idx,
// point_features[feature_idx] * weight * point_weight
// has been added to
// volume_features_aa[feature_idx][z+up_z][y+up_y][x+up_x]
double source_gradient =
grad_volume_densities_a[z + up_z][y + up_y][x + up_x];
for (int64_t feature_idx = 0; feature_idx < n_features;
++feature_idx) {
source_gradient += point_features[feature_idx] *
grad_volume_features_a[feature_idx][z + up_z][y + up_y]
[x + up_x];
}
grad_point[0] += source_gradient * (up_x ? 1 : -1) * weight_y *
weight_z * 0.5 * (grid_size_x - scale_offset) * point_weight;
grad_point[1] += source_gradient * (up_y ? 1 : -1) * weight_x *
weight_z * 0.5 * (grid_size_y - scale_offset) * point_weight;
grad_point[2] += source_gradient * (up_z ? 1 : -1) * weight_x *
weight_y * 0.5 * (grid_size_z - scale_offset) * point_weight;
}
};
EightDirections(handle_point);
}
}
}
void PointsToVolumesBackwardCuda(
const at::Tensor& points_3d,
const at::Tensor& points_features,
const at::Tensor& grid_sizes,
const at::Tensor& mask,
const float point_weight,
const bool align_corners,
const bool splat,
const at::Tensor& grad_volume_densities,
const at::Tensor& grad_volume_features,
const at::Tensor& grad_points_3d,
const at::Tensor& grad_points_features) {
// Check inputs are on the same device
at::TensorArg points_3d_t{points_3d, "points_3d", 1},
points_features_t{points_features, "points_features", 2},
grid_sizes_t{grid_sizes, "grid_sizes", 3}, mask_t{mask, "mask", 4},
grad_volume_densities_t{
grad_volume_densities, "grad_volume_densities", 8},
grad_volume_features_t{grad_volume_features, "grad_volume_features", 9},
grad_points_3d_t{grad_points_3d, "grad_points_3d", 10},
grad_points_features_t{grad_points_features, "grad_points_features", 11};
at::CheckedFrom c = "PointsToVolumesBackwardCuda";
at::checkAllSameGPU(
c,
{points_3d_t,
points_features_t,
grid_sizes_t,
mask_t,
grad_volume_densities_t,
grad_volume_features_t,
grad_points_3d_t,
grad_points_features_t});
// Set the device for the kernel launch based on the device of the input
at::cuda::CUDAGuard device_guard(points_3d.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
const int blocks = 1024;
const int threads = 32;
const int64_t batch_size = points_3d.size(0);
const int64_t P = points_3d.size(1);
const int64_t n_features = points_features.size(2);
PointsToVolumesBackwardKernel<<<blocks, threads, 0, stream>>>(
points_3d.packed_accessor64<float, 3, RestrictPtrTraits>(),
points_features.packed_accessor64<float, 3, RestrictPtrTraits>(),
grid_sizes.packed_accessor64<int64_t, 2, RestrictPtrTraits>(),
mask.packed_accessor64<float, 2, RestrictPtrTraits>(),
grad_volume_densities.packed_accessor64<float, 5, RestrictPtrTraits>(),
grad_volume_features.packed_accessor64<float, 5, RestrictPtrTraits>(),
grad_points_3d.packed_accessor64<float, 3, RestrictPtrTraits>(),
grad_points_features.packed_accessor64<float, 3, RestrictPtrTraits>(),
point_weight,
align_corners,
splat,
batch_size,
P,
n_features);
}
|
3d5e101c0bf989b8acd830bf893492cbe6004e6b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <time.h>
#include <random>
#include <hiprand/hiprand.h>
#include <math.h>
#include <hiprand/hiprand_kernel.h>
__global__ void setup_kernel(hiprandState_t* state);
__global__ void monte_carlo_pi_kernel(hiprandState_t* state, int* count, int m);
__global__ void setup_kernel(hiprandState_t* state)
{
int index = threadIdx.x + blockDim.x * blockIdx.x;
hiprand_init(123, index, 0, &state[index]);
}
__global__ void monte_carlo_pi_kernel(hiprandState_t* state, int* count, int m)
{
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
__shared__ int memory[256];
memory[threadIdx.x] = 0;
__syncthreads();
unsigned int temp = 0;
while (temp < m) {
float x = hiprand_uniform(&state[index]);
float y = hiprand_uniform(&state[index]);
float r = x * x + y * y;
if (r <= 1) {
memory[threadIdx.x]++;
}
temp++;
}
__syncthreads();
// reduction
int i = blockDim.x / 2;
while (i != 0) {
if (threadIdx.x < i) {
memory[threadIdx.x] += memory[threadIdx.x + i];
}
i /= 2;
__syncthreads();
}
// update to our global variable count
if (threadIdx.x == 0) {
atomicAdd(count, memory[0]);
}
}
int main()
{
unsigned int n = 256* 256;
unsigned int m = 1000;
int* h_count;
int* d_count;
hiprandState_t* d_state;
float pi;
// allocate memory
h_count = (int*)malloc(n * sizeof(int));
hipMalloc((void**)&d_count, n * sizeof(int));
hipMalloc((void**)&d_state, n * sizeof(hiprandState_t));
hipMemset(d_count, 0, sizeof(int));
// set up timing stuff
float gpu_elapsed_time;
hipEvent_t gpu_start, gpu_stop;
hipEventCreate(&gpu_start);
hipEventCreate(&gpu_stop);
hipEventRecord(gpu_start, 0);
// set kernel
dim3 gridSize = 256;
dim3 blockSize = 256;
setup_kernel << < gridSize, blockSize >> > (d_state);
// monti carlo kernel
monte_carlo_pi_kernel << <gridSize, blockSize >> > (d_state, d_count, m);
// copy results back to the host
hipMemcpy(h_count, d_count, sizeof(int), hipMemcpyDeviceToHost);
hipEventRecord(gpu_stop, 0);
hipEventSynchronize(gpu_stop);
hipEventElapsedTime(&gpu_elapsed_time, gpu_start, gpu_stop);
hipEventDestroy(gpu_start);
hipEventDestroy(gpu_stop);
// display results and timings for gpu
pi = *h_count * 4.0 / (n * m);
std::cout << "Approximate pi calculated on GPU is: " << pi << " and calculation took " << gpu_elapsed_time << std::endl;
// delete memory
free(h_count);
hipFree(d_count);
hipFree(d_state);
}
| 3d5e101c0bf989b8acd830bf893492cbe6004e6b.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <time.h>
#include <random>
#include <curand.h>
#include <math.h>
#include <curand_kernel.h>
__global__ void setup_kernel(curandState* state);
__global__ void monte_carlo_pi_kernel(curandState* state, int* count, int m);
__global__ void setup_kernel(curandState* state)
{
int index = threadIdx.x + blockDim.x * blockIdx.x;
curand_init(123, index, 0, &state[index]);
}
__global__ void monte_carlo_pi_kernel(curandState* state, int* count, int m)
{
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
__shared__ int memory[256];
memory[threadIdx.x] = 0;
__syncthreads();
unsigned int temp = 0;
while (temp < m) {
float x = curand_uniform(&state[index]);
float y = curand_uniform(&state[index]);
float r = x * x + y * y;
if (r <= 1) {
memory[threadIdx.x]++;
}
temp++;
}
__syncthreads();
// reduction
int i = blockDim.x / 2;
while (i != 0) {
if (threadIdx.x < i) {
memory[threadIdx.x] += memory[threadIdx.x + i];
}
i /= 2;
__syncthreads();
}
// update to our global variable count
if (threadIdx.x == 0) {
atomicAdd(count, memory[0]);
}
}
int main()
{
unsigned int n = 256* 256;
unsigned int m = 1000;
int* h_count;
int* d_count;
curandState* d_state;
float pi;
// allocate memory
h_count = (int*)malloc(n * sizeof(int));
cudaMalloc((void**)&d_count, n * sizeof(int));
cudaMalloc((void**)&d_state, n * sizeof(curandState));
cudaMemset(d_count, 0, sizeof(int));
// set up timing stuff
float gpu_elapsed_time;
cudaEvent_t gpu_start, gpu_stop;
cudaEventCreate(&gpu_start);
cudaEventCreate(&gpu_stop);
cudaEventRecord(gpu_start, 0);
// set kernel
dim3 gridSize = 256;
dim3 blockSize = 256;
setup_kernel << < gridSize, blockSize >> > (d_state);
// monti carlo kernel
monte_carlo_pi_kernel << <gridSize, blockSize >> > (d_state, d_count, m);
// copy results back to the host
cudaMemcpy(h_count, d_count, sizeof(int), cudaMemcpyDeviceToHost);
cudaEventRecord(gpu_stop, 0);
cudaEventSynchronize(gpu_stop);
cudaEventElapsedTime(&gpu_elapsed_time, gpu_start, gpu_stop);
cudaEventDestroy(gpu_start);
cudaEventDestroy(gpu_stop);
// display results and timings for gpu
pi = *h_count * 4.0 / (n * m);
std::cout << "Approximate pi calculated on GPU is: " << pi << " and calculation took " << gpu_elapsed_time << std::endl;
// delete memory
free(h_count);
cudaFree(d_count);
cudaFree(d_state);
}
|
7caa1599e4d9e17d5eff6a057f42f63695401b6e.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef __MEDIANFILTER_CU_
#define __MEDIANFILTER_CU_
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <time.h>
#include <iostream>
#define datasize 100
inline void checkCudaErrors(hipError_t err) //cuda error handle function
{
if (hipSuccess != err)
{
fprintf(stderr, "CUDA Runtime API error:%s.\n", hipGetErrorString(err));
return;
}
}
__device__ int wb_checkColorSpace(double x) {
if (x > 255)
return 255;
if (x < 0)
return 0;
return x;
}
__global__ void white_balance(int *In, int *Out, int Width, int Height, double color_sum, double RGB_sum)
{
int y = blockDim.y * blockIdx.y + threadIdx.y;
int x = blockDim.x * blockIdx.x + threadIdx.x;
//RGB
double K = (RGB_sum) / (3 * color_sum);
if (x <= Width && x >= 0 && y <= Height && y >= 0)
{
Out[y* Width + x] = wb_checkColorSpace((*(In + Width * y + x))*K);
}
}
extern "C" void white_balance_host(int *pixel, int Width, int Height, double color_sum, double RGB_sum)
{
int *pixelIn, *pixelOut;
dim3 dimBlock(32, 32);
dim3 dimGrid((Width + dimBlock.x - 1) / dimBlock.x, (Height + dimBlock.y -
1) / dimBlock.y);
checkCudaErrors(hipMalloc((void**)&pixelIn, sizeof(int) * Width * Height));
checkCudaErrors(hipMalloc((void**)&pixelOut, sizeof(int) * Width * Height));
checkCudaErrors(hipMemcpy(pixelIn, pixel, sizeof(int) * Width * Height, hipMemcpyHostToDevice));
white_balance << <dimGrid, dimBlock >> > (pixelIn, pixelOut, Width , Height, color_sum, RGB_sum);
checkCudaErrors(hipMemcpy(pixel, pixelOut, sizeof(int) * Width * Height, hipMemcpyDeviceToHost));
hipFree(pixelIn);
hipFree(pixelOut);
}
#endif // ! __MEDIANFILTER_KERNEL_CU_ | 7caa1599e4d9e17d5eff6a057f42f63695401b6e.cu | #ifndef __MEDIANFILTER_CU_
#define __MEDIANFILTER_CU_
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <time.h>
#include <iostream>
#define datasize 100
inline void checkCudaErrors(cudaError err) //cuda error handle function
{
if (cudaSuccess != err)
{
fprintf(stderr, "CUDA Runtime API error:%s.\n", cudaGetErrorString(err));
return;
}
}
__device__ int wb_checkColorSpace(double x) {
if (x > 255)
return 255;
if (x < 0)
return 0;
return x;
}
__global__ void white_balance(int *In, int *Out, int Width, int Height, double color_sum, double RGB_sum)
{
int y = blockDim.y * blockIdx.y + threadIdx.y;
int x = blockDim.x * blockIdx.x + threadIdx.x;
//需要调整的RGB分量的增益
double K = (RGB_sum) / (3 * color_sum);
if (x <= Width && x >= 0 && y <= Height && y >= 0)
{
Out[y* Width + x] = wb_checkColorSpace((*(In + Width * y + x))*K);
}
}
extern "C" void white_balance_host(int *pixel, int Width, int Height, double color_sum, double RGB_sum)
{
int *pixelIn, *pixelOut;
dim3 dimBlock(32, 32);
dim3 dimGrid((Width + dimBlock.x - 1) / dimBlock.x, (Height + dimBlock.y -
1) / dimBlock.y);
checkCudaErrors(cudaMalloc((void**)&pixelIn, sizeof(int) * Width * Height));
checkCudaErrors(cudaMalloc((void**)&pixelOut, sizeof(int) * Width * Height));
checkCudaErrors(cudaMemcpy(pixelIn, pixel, sizeof(int) * Width * Height, cudaMemcpyHostToDevice));
white_balance << <dimGrid, dimBlock >> > (pixelIn, pixelOut, Width , Height, color_sum, RGB_sum);
checkCudaErrors(cudaMemcpy(pixel, pixelOut, sizeof(int) * Width * Height, cudaMemcpyDeviceToHost));
cudaFree(pixelIn);
cudaFree(pixelOut);
}
#endif // ! __MEDIANFILTER_KERNEL_CU_ |
15dafb806f2798fbba8539ee01b3506869548349.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "cudakernel/nn/topk.h"
#include "cudakernel/math/math.h"
#include "cudakernel/common/common.h"
#include "ppl/common/types.h"
#include "ppl/nn/common/tensor_shape.h"
#include "ppl/common/retcode.h"
#include <hip/hip_fp16.h>
#include <float.h>
#include <memory>
#define MAX_DIM 8 //tensor_shape.kMaxNumDimensions
#define killWARDependency 1
#define RADIX_SIZE 16
#define RADIX_BITS 4
const int RADIX_MASK = RADIX_SIZE - 1;
#define SORT_RADIX_SIZE 4
#define SORT_RADIX_BITS 2
struct TensorInfo {
int shape[MAX_DIM];
int strides[MAX_DIM];
const void *data;
int dims;
TensorInfo(ppl::nn::TensorShape* tensor_shape, const void *data_ptr)
{
for (unsigned int i = 0; i < tensor_shape->GetDimCount() && i < MAX_DIM; i++) {
shape[i] = tensor_shape->GetDim(i);
}
for (unsigned int i = tensor_shape->GetDimCount(); i < MAX_DIM; i++) {
shape[i] = 1;
}
dims = tensor_shape->GetDimCount();
data = data_ptr;
}
};
__device__ __inline__ int convert_f2u(float v)
{
unsigned int x = __float_as_uint(v);
unsigned int mask = (x & 0x80000000) ? 0xffffffff : 0x80000000;
unsigned int res = x ^ mask;
return res;
}
__device__ __inline__ float convert_u2f(int v)
{
unsigned int mask = (v & 0x80000000) ? 0x80000000 : 0xffffffff;
unsigned int x = v ^ mask;
return __uint_as_float(x);
}
template <typename T>
__device__ unsigned int convert2u(T value);
template <>
__device__ unsigned int convert2u(__half value)
{
// must use short, for reverse convert
unsigned short int x = __half_as_ushort(value);
unsigned short int mask = (x & 0x8000) ? 0xffff : 0x8000;
unsigned int res = x ^ mask;
return res;
}
template <>
__device__ unsigned int convert2u(float value)
{
unsigned int x = __float_as_uint(value);
unsigned int mask = (x & 0x80000000) ? 0xffffffff : 0x80000000;
unsigned int res = x ^ mask;
return res;
}
template <typename T>
__device__ T convertu2(unsigned int value);
template <>
__device__ __half convertu2(unsigned int value)
{
unsigned short int sht = (unsigned short int)value;
unsigned short int mask = (sht & 0x8000) ? 0x8000 : 0xffff;
unsigned short int x = sht ^ mask;
return __ushort_as_half(x);
}
template <>
__device__ float convertu2(unsigned int value)
{
unsigned int mask = (value & 0x80000000) ? 0x80000000 : 0xffffffff;
unsigned int x = value ^ mask;
return __uint_as_float(x);
}
// shape:[n,c,h,w]
__device__ int get_offset(int linearIdx, int Dims, TensorInfo info)
{
int offset = 0;
for (int i = Dims - 1; i > 0; --i) {
int curDimIdx = linearIdx % info.shape[i];
int curOffset = curDimIdx * info.strides[i];
linearIdx /= info.shape[i];
offset += curOffset;
}
return offset + linearIdx * info.strides[0];
}
template <typename T>
__device__ unsigned int find_desired(
unsigned int *smem,
int lane,
const unsigned int mask,
const unsigned int desired,
const int inputSliceStride,
const T *inputSlice,
const int sliceSize)
{
if (threadIdx.x == 0) {
smem[0] = 0;
}
__syncthreads();
for (int off = threadIdx.x; off - lane < sliceSize; off += blockDim.x) {
bool inRange = off < sliceSize;
T value = inRange ? inputSlice[off * inputSliceStride] : (T)0;
unsigned int intValue = convert2u<T>(value);
bool flag = inRange && ((intValue & mask) == desired);
if (flag) {
smem[0] = 1;
smem[1] = intValue;
}
__syncthreads();
unsigned int isFound = smem[0];
intValue = smem[1];
if (isFound) {
return intValue;
}
}
return 0;
}
template <typename T, bool dir>
__device__ T find_kth_value(
int *smem,
int K,
const int sliceSize,
const T *inputSlice,
const int inputSliceStride)
{
int count[RADIX_SIZE];
// use fixed higher bits to filter data
unsigned int mask = 0; //fixed high bit
unsigned int desired = 0; // current radix bits to fix
int *radix_hist = smem;
unsigned int kthValue;
for (int pos = 8 * sizeof(int) - RADIX_BITS; pos >= 0; pos -= RADIX_BITS)
{
//reinit radix_hist to 0 every loop
for (int i = 0; i < RADIX_SIZE; i++) {
count[i] = 0;
}
if (threadIdx.x < RADIX_SIZE) {
radix_hist[threadIdx.x] = 0;
}
__syncthreads();
const int lane = threadIdx.x & 31;
for (int off = threadIdx.x; off - lane < sliceSize; off += blockDim.x) {
bool inRange = off < sliceSize;
T value = inRange ? inputSlice[off * inputSliceStride] : (T)0;
unsigned int active = __ballot_sync(0xffffffff, inRange);
unsigned int intValue = convert2u<T>(value);
// filter with desired
bool inRadix = inRange && ((intValue & mask) == desired);
int valueRadix;
asm("bfe.u32 %0, %1, %2, %3;"
: "=r"(valueRadix)
: "r"(intValue), "r"(pos), "r"(RADIX_BITS));
#pragma unroll
for (int i = 0; i < RADIX_SIZE; i++) {
bool flag = inRadix && (valueRadix == i);
unsigned int ballot = __ballot_sync(active, flag);
count[i] += __popc(ballot);
}
}
if ((threadIdx.x & 31) == 0) {
for (int i = 0; i < RADIX_SIZE; i++) {
atomicAdd(radix_hist + i, count[i]);
}
}
__syncthreads();
// all threads in blk are the same
for (int i = 0; i < RADIX_SIZE; i++) {
count[i] = radix_hist[i];
}
if (killWARDependency) {
__syncthreads();
}
// search K count
if (dir == 1) { // topK largest
for (int i = RADIX_SIZE - 1; i >= 0; --i) {
if (K == count[i] && K == 1) {
asm("bfi.b32 %0, %1, %0, %2, %3;"
: "+r"(desired)
: "r"(i), "r"(pos), "r"(RADIX_BITS));
asm("bfi.b32 %0, %1, %0, %2, %3;"
: "+r"(mask)
: "r"(RADIX_MASK), "r"(pos), "r"(RADIX_BITS));
kthValue = find_desired<T>((unsigned int *)smem, threadIdx.x,
mask, desired,
inputSliceStride, inputSlice, sliceSize);
T fp_kthValue = convertu2<T>(kthValue);
return fp_kthValue;
} else if (K <= count[i]) { // narrow radix unitl K == count[i] == 1
asm("bfi.b32 %0, %1, %0, %2, %3;"
: "=r"(desired)
: "r"(i), "r"(pos), "r"(RADIX_BITS));
asm("bfi.b32 %0, %1, %0, %2, %3;"
: "=r"(mask)
: "r"(RADIX_MASK), "r"(pos), "r"(RADIX_BITS));
break;
}
K -= count[i];
}
}
else {
for (int i = 0; i < RADIX_SIZE; ++i) {
if (K == count[i] && K == 1) {
asm("bfi.b32 %0, %1, %0, %2, %3;"
: "+r"(desired)
: "r"(i), "r"(pos), "r"(RADIX_BITS));
asm("bfi.b32 %0, %1, %0, %2, %3;"
: "+r"(mask)
: "r"(RADIX_MASK), "r"(pos), "r"(RADIX_BITS));
kthValue = find_desired<T>((unsigned int *)smem, threadIdx.x,
mask, desired,
inputSliceStride, inputSlice, sliceSize);
T fp_kthValue = convertu2<T>(kthValue);
return fp_kthValue;
} else if (K <= count[i]) { // narrow radix unitl K == count[i] == 1
asm("bfi.b32 %0, %1, %0, %2, %3;"
: "+r"(desired)
: "r"(i), "r"(pos), "r"(RADIX_BITS));
asm("bfi.b32 %0, %1, %0, %2, %3;"
: "+r"(mask)
: "r"(RADIX_MASK), "r"(pos), "r"(RADIX_BITS));
break;
}
K -= count[i];
}
}
}
kthValue = desired;
T fp_kthValue = convertu2<T>(kthValue);
return fp_kthValue;
}
template <typename T>
__device__ T scanInWarp(T value, int lane);
__device__ void prefix_scan(
int *smem,
const unsigned int active,
const int activeWarps,
const bool flag,
int &index,
int &blkTotal)
{
if (threadIdx.x < blockDim.x / 32) {
smem[threadIdx.x] = 0;
}
__syncthreads();
unsigned int ballot = __ballot_sync(active, flag);
int lane = threadIdx.x & 31;
unsigned int laneMask = ~(0xffffffff << lane);
laneMask = active & laneMask;
int warpId = threadIdx.x >> 5;
unsigned int leader = __ffs(active) - 1;
int total = __popc(ballot);
int prefix = __popc(laneMask & ballot);
if (lane == leader) {
smem[warpId] = total;
}
__syncthreads();
int warpOff = 0;
if (threadIdx.x < 32) {
int value = smem[threadIdx.x];
int warpPrefix = scanInWarp<int>(value, lane);
smem[threadIdx.x] = warpPrefix;
}
__syncthreads();
if (warpId >= 1)
warpOff = smem[warpId - 1];
blkTotal = smem[activeWarps - 1];
if (flag) {
index = warpOff + prefix;
}
// write-after-read dependency
if (killWARDependency) {
__syncthreads();
}
}
// dir = 1: decrease order; 0: increase order
template <typename T, bool dir, int blockSize, bool sorted>
__global__ void selectTopK(
TensorInfo input,
TensorInfo topK,
TensorInfo indices,
const int K,
const int collapsedDims,
const int sliceSize,
const int inputSliceStride,
const int topKSliceStride,
const int indicesSliceStride)
{
int inputSliceStart = get_offset(blockIdx.x, collapsedDims, input);
// if sorted, transform the output to coalesced slices
int topKSliceStart = sorted ? blockIdx.x * K :
get_offset(blockIdx.x, collapsedDims, topK);
int indicesSliceStart = sorted ? blockIdx.x * K :
get_offset(blockIdx.x, collapsedDims, indices);
// inc or dec hist every bin until reach K
__shared__ int radix_hist[2 + blockSize / 32];
int *smem = radix_hist;
T *inputSlice = (T *)input.data + inputSliceStart;
T *topKSlice = (T *)topK.data + topKSliceStart;
int *indicesSlice = (int *)indices.data + indicesSliceStart;
T fp_kthValue = find_kth_value<T, dir>(smem, K, sliceSize, inputSlice, inputSliceStride);
int writeStart = 0;
int activeWarps = 0;
int tmpSize = sliceSize;
for (int off = threadIdx.x; off < Align(sliceSize, blockSize); off += blockSize) {
int curSize = tmpSize >= blockSize ? blockSize : tmpSize;
activeWarps = (curSize + 31) >> 5;
bool inRange = off < sliceSize;
T value = inRange ? inputSlice[off * inputSliceStride] : (T)0;
unsigned int active = __ballot_sync(0xffffffff, inRange);
bool flag;
if (dir) {
flag = inRange && Math<T, T, T>::gt(value, fp_kthValue);
} else {
flag = inRange && Math<T, T, T>::lt(value, fp_kthValue);
}
int index, blkTotal;
prefix_scan(smem, active, activeWarps, flag, index, blkTotal);
if (flag) {
int topKOffset = sorted ? (writeStart + index) :
(writeStart + index) * topKSliceStride;
int indexOffset = sorted ? (writeStart + index) :
(writeStart + index) * indicesSliceStride;
topKSlice[topKOffset] = value;
indicesSlice[indexOffset] = off;
}
writeStart += blkTotal;
//if tmpSize < 0, the loop breaks
tmpSize -= blockSize;
}
int topKRemaining = K - writeStart;
tmpSize = sliceSize;
for (int off = threadIdx.x; off < Align(sliceSize, blockSize); off += blockSize) {
int curSize = tmpSize >= blockSize ? blockSize : tmpSize;
activeWarps = (curSize + 31) >> 5;
bool inRange = off < sliceSize;
T value = inRange ? inputSlice[off * inputSliceStride] : (T)0;
unsigned int active = __ballot_sync(0xffffffff, inRange);
bool flag;
flag = inRange && Math<T, T, T>::eq(value, fp_kthValue);
int index, blkTotal;
prefix_scan(smem, active, activeWarps, flag, index, blkTotal);
if (flag) {
int outputIndex = writeStart + index;
if (outputIndex < K) {
int topKOffset = sorted ? outputIndex :
outputIndex * topKSliceStride;
int indexOffset = sorted ? outputIndex :
outputIndex * indicesSliceStride;
topKSlice[topKOffset] = value;
indicesSlice[indexOffset] = off;
}
}
if (topKRemaining < blkTotal) {
break;
}
topKRemaining -= blkTotal;
writeStart += blkTotal;
tmpSize -= blockSize;
}
}
template <typename KEY, typename VALUE, bool largest>
__device__ inline void swap(
const bool isOdd,
bool &valid1,
KEY &value1,
VALUE &index1,
bool &valid2,
KEY &value2,
VALUE &index2)
{
bool isLarge = (largest ^ Math<KEY, KEY, KEY>::lt(value1, value2) && valid1) ||
!valid2;
if (isLarge == isOdd) {
KEY tmpValue = value1;
VALUE tmpIndex = index1;
bool tmpValid = valid1;
value1 = value2;
index1 = index2;
valid1 = valid2;
value2 = tmpValue;
index2 = tmpIndex;
valid2 = tmpValid;
}
}
template <typename KEY, typename VALUE, bool dir, int power2SortSize>
__global__ void bitonicSort(
KEY *Key,
VALUE *Value,
const int sliceSize)
{
__shared__ KEY smemTopk[power2SortSize];
__shared__ VALUE smemIndices[power2SortSize];
__shared__ bool smemValid[power2SortSize];
KEY *topKSlice = Key + blockIdx.x * sliceSize;
VALUE *indicesSlice = Value + blockIdx.x * sliceSize;
int tid = threadIdx.x;
int off1 = threadIdx.x;
int off2 = threadIdx.x + power2SortSize / 2;
bool inRange1 = off1 < sliceSize;
bool inRange2 = off2 < sliceSize;
KEY value1 = inRange1 ? topKSlice[off1] : (KEY)0;
VALUE index1 = inRange1 ? indicesSlice[off1] : (VALUE)0;
KEY value2 = inRange2 ? topKSlice[off2] : (KEY)0;
VALUE index2 = inRange2 ? indicesSlice[off2] : (VALUE)0;
smemTopk[off1] = value1;
smemIndices[off1] = index1;
smemValid[off1] = inRange1;
smemTopk[off2] = value2;
smemIndices[off2] = index2;
smemValid[off2] = inRange2;
__syncthreads();
#pragma unroll
for (int size = 2; size < power2SortSize; size *= 2) {
int oddSeg = (tid & (size / 2)) != 0;
#pragma unroll
// sort each size
for (int sub_size = size; sub_size > 1; sub_size /= 2) {
int stride = sub_size / 2;
int off = (tid / stride) * sub_size + (tid & (stride - 1));
bool inRange1 = smemValid[off];
KEY value1 = smemTopk[off];
VALUE index1 = smemIndices[off];
bool inRange2 = smemValid[off + stride];
KEY value2 = smemTopk[off + stride];
VALUE index2 = smemIndices[off + stride];
swap<KEY, VALUE, dir>(oddSeg,
inRange1, value1, index1,
inRange2, value2, index2);
smemTopk[off] = value1;
smemIndices[off] = index1;
smemValid[off] = inRange1;
smemTopk[off + stride] = value2;
smemIndices[off + stride] = index2;
smemValid[off + stride] = inRange2;
__syncthreads();
}
}
// sort the whole power2SortSize
for (int sub_size = power2SortSize; sub_size > 1; sub_size /= 2) {
int stride = sub_size / 2;
int off = (tid / stride) * sub_size + (tid & (stride - 1));
bool inRange1 = smemValid[off];
KEY value1 = smemTopk[off];
VALUE index1 = smemIndices[off];
bool inRange2 = smemValid[off + stride];
KEY value2 = smemTopk[off + stride];
VALUE index2 = smemIndices[off + stride];
swap<KEY, VALUE, dir>(false,
inRange1, value1, index1,
inRange2, value2, index2);
smemTopk[off] = value1;
smemIndices[off] = index1;
smemValid[off] = inRange1;
smemTopk[off + stride] = value2;
smemIndices[off + stride] = index2;
smemValid[off + stride] = inRange2;
__syncthreads();
}
inRange1 = smemValid[off1];
value1 = smemTopk[off1];
index1 = smemIndices[off1];
inRange2 = smemValid[off2];
value2 = smemTopk[off2];
index2 = smemIndices[off2];
if (inRange1) {
topKSlice[off1] = value1;
indicesSlice[off1] = index1;
}
if (inRange2) {
topKSlice[off2] = value2;
indicesSlice[off2] = index2;
}
}
#define BLK_SORT_SIZE 1024
#define SIZE_PER_SCAN 1024
template <typename KEY, typename VALUE, bool dir>
void radix_sort(
hipStream_t stream,
KEY *key,
VALUE *value,
int size,
int sliceNum,
unsigned int *convertKey,
unsigned int *prefixData,
unsigned int *tmpPrefix,
unsigned int *keyBuf,
VALUE *valueBuf);
//tempBuf:
//*dims
//convertKey: size*sizeof(unsigned int). zero if inplace
//keyBuf: convertKey size
//valueBuf: value size
//prefixData: SORT_RADIX_SIZE * blocks * sizeof(int)
//tmpPrefix: SORT_RADIX_SIZE * block_x* sizeof(uint)
template <typename KEY, typename VALUE, bool dir>
void sortInplace(
hipStream_t stream,
KEY *Key,
VALUE *Value,
const int size,
const int slices_num,
void *temp_buffer,
int64_t temp_buffer_bytes)
{
const int blocks = slices_num;
if (size == 1) {
} else if (size <= 64) {
hipLaunchKernelGGL(( bitonicSort<KEY, VALUE, dir, 64>), dim3(blocks), dim3(32), 0, stream, Key, Value, size);
} else if (size <= 128) {
hipLaunchKernelGGL(( bitonicSort<KEY, VALUE, dir, 128>), dim3(blocks), dim3(64), 0, stream, Key, Value, size);
} else if (size <= 512) {
hipLaunchKernelGGL(( bitonicSort<KEY, VALUE, dir, 512>), dim3(blocks), dim3(256), 0, stream, Key, Value, size);
}
else {
int new_blocks = (size + BLK_SORT_SIZE - 1) / BLK_SORT_SIZE;
unsigned int *convert_key = (unsigned int *)temp_buffer;
unsigned int *topk_buf = (unsigned int *)(convert_key + slices_num * size);
VALUE *indices_buf = (VALUE *)(topk_buf + slices_num * size);
unsigned int *prefix_data = (unsigned int *)(indices_buf + slices_num * size);
unsigned int *tmp_prefix = (unsigned int *)(prefix_data + slices_num * SORT_RADIX_SIZE * new_blocks);
radix_sort<KEY, VALUE, dir>(stream, Key, Value, size, slices_num,
convert_key, prefix_data, tmp_prefix,
topk_buf, indices_buf);
}
}
int collapse_dim(TensorInfo *param, int dim)
{
int dimSize = param->shape[dim];
param->shape[dim] = 1;
int cur = -1;
int p = 0;
for (; p < dim; p++) {
if (param->shape[p] == 1)
continue;
cur++;
param->shape[cur] = param->shape[p];
p++;
break;
}
for (; p < dim; p++) {
if (param->shape[p] == 1)
continue;
cur++;
param->shape[cur] = param->shape[p];
}
// after dim
int markCur = cur;
for (; p < param->dims; p++) {
if (param->shape[p] == 1)
continue;
cur++;
param->shape[cur] = param->shape[p];
}
param->strides[cur] = 1;
for (int i = cur - 1; i > markCur; --i) {
param->strides[i] = param->shape[i + 1] * param->strides[i + 1];
}
int sliceStride = (dim == -1 || dim == param->dims - 1) ?
1 :
param->shape[markCur + 1] * param->strides[markCur + 1];
param->strides[markCur] = dimSize * sliceStride;
for (int i = markCur - 1; i >= 0; --i) {
param->strides[i] = param->shape[i + 1] * param->strides[i + 1];
}
param->dims = cur + 1;
return sliceStride;
}
// bitWidth: 2 or 4 SORT_RADIX_SIZE
// bitPos: 0-30
template <bool dir, typename VALUE>
__global__ void radixSort(
unsigned int *Key,
VALUE *Value,
int size,
int bitPos,
unsigned int *prefixData)
{
__shared__ unsigned int s_cnt[SORT_RADIX_SIZE * BLK_SORT_SIZE / 32];
__shared__ unsigned int s_key[BLK_SORT_SIZE];
__shared__ VALUE s_value[BLK_SORT_SIZE];
Key += blockIdx.y * size;
Value += blockIdx.y * size;
s_key[threadIdx.x] = 0;
s_value[threadIdx.x] = 0;
if (threadIdx.x < SORT_RADIX_SIZE * BLK_SORT_SIZE / 32)
s_cnt[threadIdx.x] = 0;
__syncthreads();
int lane = threadIdx.x & 31;
int warpId = threadIdx.x >> 5;
int activeWarps = (blockIdx.x == gridDim.x - 1) ?
DivUp((size & (BLK_SORT_SIZE - 1)), 32) :
BLK_SORT_SIZE / 32;
if (activeWarps == 0)
activeWarps = 32;
int64_t tid = blockIdx.x * blockDim.x + threadIdx.x;
bool inRange = tid < size;
unsigned int active = __ballot_sync(0xffffffff, inRange);
const unsigned int lane_mask_lt = ~(0xffffffff << (lane));
if (tid - lane < size) {
VALUE value = inRange ? Value[tid] : (VALUE)0;
unsigned int intKey = inRange ? Key[tid] : 0;
unsigned int keyRadix;
asm("bfe.u32 %0, %1, %2, %3;"
: "=r"(keyRadix)
: "r"(intKey), "r"(bitPos), "r"(SORT_RADIX_BITS));
int radixPrefix = 0;
for (int i = dir * (SORT_RADIX_SIZE - 1);
i != (1 - dir) * SORT_RADIX_SIZE + dir * (-1);
i += dir * (-1) + 1 - dir) {
bool flag = inRange && (keyRadix == i);
unsigned int ballot = __ballot_sync(active, flag);
int warpCnt = __popc(ballot);
int lanePrefix = __popc(ballot & lane_mask_lt);
int warpPrefix = 0;
if (inRange && lane == 0) {
s_cnt[i * BLK_SORT_SIZE / 32 + warpId] = warpCnt;
}
__syncthreads();
//prefix sum in warp
if (threadIdx.x < 32) {
warpCnt = s_cnt[i * BLK_SORT_SIZE / 32 + threadIdx.x];
unsigned int prefix = warpCnt;
for (int j = 1; j < 32; j <<= 1) {
warpCnt = __shfl_up_sync(0xffffffff, prefix, j, 32);
if (threadIdx.x >= j) {
prefix += warpCnt;
}
}
s_cnt[i * BLK_SORT_SIZE / 32 + threadIdx.x] = prefix;
}
__syncthreads();
if (inRange && warpId > 0) {
warpPrefix = s_cnt[i * BLK_SORT_SIZE / 32 + warpId - 1];
}
if (flag) {
s_key[radixPrefix + warpPrefix + lanePrefix] = intKey;
s_value[radixPrefix + warpPrefix + lanePrefix] = value;
}
radixPrefix += s_cnt[i * BLK_SORT_SIZE / 32 + activeWarps - 1];
__syncthreads(); //WAR
}
if (threadIdx.x == 0) {
for (int i = 0; i < SORT_RADIX_SIZE; i++) {
prefixData[blockIdx.y*gridDim.x*SORT_RADIX_SIZE + i*gridDim.x+blockIdx.x] =
s_cnt[i * BLK_SORT_SIZE / 32 + activeWarps - 1];
}
}
intKey = s_key[threadIdx.x];
value = s_value[threadIdx.x];
if (inRange) {
Key[tid] = intKey;
Value[tid] = value;
}
}
}
template <typename T>
__device__ T scanInWarp(T value, int lane)
{
T lanePrefix = value;
for (int i = 1; i < 32; i <<= 1) {
value = __shfl_up_sync(0xffffffff, lanePrefix, i, 32);
if (lane >= i) {
lanePrefix += value;
}
}
return lanePrefix;
}
//#define SIZE_PER_SCAN 1024
template <typename T>
__global__ void prefixSum(
T *prefixData,
const int size,
const int blkScanSize,
T *blkTotal)
{
__shared__ T warp_cnt[SIZE_PER_SCAN >> 5];
__shared__ T lane_prefix[SIZE_PER_SCAN];
int lane = threadIdx.x & 31;
int warpId = threadIdx.x >> 5;
int64_t off = blockIdx.x * blkScanSize + threadIdx.x;
prefixData += (blockIdx.z * SORT_RADIX_SIZE + blockIdx.y) * size;
blkTotal += blockIdx.z * SORT_RADIX_SIZE * gridDim.x +
blockIdx.y * gridDim.x + blockIdx.x;
T subScanPrefix = (T)0;
for (int iterOff = 0; iterOff < blkScanSize; iterOff += SIZE_PER_SCAN) {
bool inRange = (off + iterOff < size);
T data = inRange ? prefixData[off + iterOff] : 0;
T lanePrefix;
lanePrefix = data;
for (int i = 1; i < 32; i <<= 1) {
data = __shfl_up_sync(0xffffffff, lanePrefix, i, 32);
if (lane >= i) {
lanePrefix += data;
}
}
int stsOff = threadIdx.x + 1;
if (lane == 31) {
warp_cnt[warpId] = lanePrefix;
stsOff -= 32;
lanePrefix = 0;
}
__syncthreads();
lane_prefix[stsOff] = lanePrefix;
T warpPrefix = 0;
if (threadIdx.x < (SIZE_PER_SCAN >> 5)) {
data = warp_cnt[threadIdx.x];
T sum = data;
for (int i = 1; i < 32; i <<= 1) {
data = __shfl_up_sync(0xffffffff, sum, i, 32);
if (threadIdx.x >= i) {
sum += data;
}
}
warp_cnt[threadIdx.x] = sum;
}
__syncthreads();
lanePrefix = lane_prefix[threadIdx.x];
if (warpId > 0) {
warpPrefix = warp_cnt[warpId - 1];
}
T prefix = subScanPrefix + warpPrefix + lanePrefix;
if (inRange) {
prefixData[off + iterOff] = prefix;
}
subScanPrefix += warp_cnt[(SIZE_PER_SCAN >> 5) - 1];
}
//blk scan total
if (threadIdx.x == 0) {
blkTotal[0] = subScanPrefix;
}
}
template <typename T>
__global__ void finalPrefixSum(
T *prefixData,
const int size,
const int blkScanSize,
T *blkTotal)
{
int batchId = blockIdx.z * SORT_RADIX_SIZE + blockIdx.y;
int64_t off = blockIdx.x * blkScanSize + threadIdx.x;
prefixData += batchId * size;
T blkPrefix = 0;
for (int i = 0; i < blockIdx.x; i++) {
blkPrefix += blkTotal[batchId * gridDim.x + i];
}
for (int iterOff = 0; iterOff < blkScanSize; iterOff += SIZE_PER_SCAN) {
bool inRange = (off + iterOff < size);
T data = inRange ? prefixData[off + iterOff] : 0;
data += blkPrefix;
if (inRange) {
prefixData[off + iterOff] = data;
}
}
if (blockIdx.x == gridDim.x - 1 && threadIdx.x == 0) {
blkTotal[batchId * gridDim.x + blockIdx.x] =
blkPrefix + blkTotal[batchId * gridDim.x + blockIdx.x];
}
}
template <bool dir, typename VALUE>
__global__ void interBlkSort(
unsigned int *outKey,
VALUE *outValue,
unsigned int *Key,
VALUE *Value,
unsigned int size,
unsigned int *prefixData,
unsigned int *radixTotal,
unsigned int totalPos,
unsigned int bitPos)
{
prefixData += blockIdx.y * SORT_RADIX_SIZE * gridDim.x;
radixTotal += blockIdx.y * SORT_RADIX_SIZE * totalPos;
Key += blockIdx.y * size;
Value += blockIdx.y * size;
outKey += blockIdx.y * size;
outValue += blockIdx.y * size;
__shared__ unsigned int s_cnt[SORT_RADIX_SIZE * BLK_SORT_SIZE / 32];
int lane = threadIdx.x & 31;
int warpId = threadIdx.x / 32;
int64_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if (threadIdx.x < SORT_RADIX_SIZE * BLK_SORT_SIZE / 32)
s_cnt[threadIdx.x] = 0;
__syncthreads();
int activeWarps = (blockIdx.x == gridDim.x - 1) ?
DivUp((size & (BLK_SORT_SIZE - 1)), 31) :
BLK_SORT_SIZE / 32;
if (activeWarps == 0)
activeWarps = 32;
bool inRange = tid < size;
unsigned int active = __ballot_sync(0xffffffff, inRange);
const unsigned int lane_mask_lt = ~(0xffffffff << (lane));
if (tid - lane < size) {
unsigned int intKey = inRange ? Key[tid] : 0;
VALUE value = inRange ? Value[tid] : (VALUE)0;
unsigned int radixPrefix = 0;
unsigned int newOff;
unsigned int keyRadix;
asm("bfe.u32 %0, %1, %2, %3;"
: "=r"(keyRadix)
: "r"(intKey), "r"(bitPos), "r"(SORT_RADIX_BITS));
for (int i = dir * (SORT_RADIX_SIZE - 1);
i != (1 - dir) * SORT_RADIX_SIZE + dir * (-1);
i += dir * (-1) + 1 - dir)
{
unsigned int blkPrefix = prefixData[i * gridDim.x + blockIdx.x];
bool flag = inRange && (keyRadix == i);
unsigned int ballot = __ballot_sync(active, flag);
unsigned int lanePrefix = __popc(lane_mask_lt & ballot);
unsigned int warpCnt = __popc(ballot);
unsigned int warpPrefix = 0;
if (inRange && lane == 0) {
s_cnt[i * BLK_SORT_SIZE / 32 + warpId] = warpCnt;
}
__syncthreads();
if (threadIdx.x < 32) {
warpCnt = s_cnt[i * BLK_SORT_SIZE / 32 + threadIdx.x];
unsigned int prefix = warpCnt;
for (int j = 1; j < 32; j <<= 1) {
warpCnt = __shfl_up_sync(0xffffffff, prefix, j, 32);
if (threadIdx.x >= j) {
prefix += warpCnt;
}
}
s_cnt[i * BLK_SORT_SIZE / 32 + threadIdx.x] = prefix;
}
__syncthreads();
if (inRange && warpId > 0) {
warpPrefix = s_cnt[i * BLK_SORT_SIZE / 32 + warpId - 1];
}
__syncthreads();
if (flag) {
newOff = radixPrefix + blkPrefix + warpPrefix + lanePrefix;
}
__syncthreads();
radixPrefix += radixTotal[i * totalPos + totalPos - 1];
}
if (inRange) {
outKey[newOff] = intKey;
outValue[newOff] = value;
}
}
}
//for fp16, we can apply short int *outKey
template <typename KEY>
__global__ void convert(KEY *Key, unsigned int *outKey, int size)
{
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= size)
return;
KEY key = Key[tid];
unsigned int intKey = convert2u<KEY>(key);
outKey[tid] = intKey;
}
template <typename KEY>
__global__ void reverse_convert(unsigned int *Key, KEY *outKey, int size)
{
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= size)
return;
unsigned int intKey = Key[tid];
KEY key = convertu2<KEY>(intKey);
outKey[tid] = key;
}
//tempBuf:
//*dims
//convertKey: size*sizeof(unsigned int). zero if inplace
//keyBuf: convertKey size
//valueBuf: value size
//prefixData: SORT_RADIX_SIZE * blocks * sizeof(int)
//tmpPrefix: SORT_RADIX_SIZE * block_x* sizeof(uint)
template <typename KEY, typename VALUE, bool largest>
void radix_sort(
hipStream_t stream,
KEY *key,
VALUE *value,
int size,
int sliceNum,
unsigned int *convertKey,
unsigned int *prefixData,
unsigned int *tmpPrefix,
unsigned int *keyBuf,
VALUE *valueBuf)
{
hipLaunchKernelGGL(( convert<KEY>), dim3(DivUp(sliceNum *size, 1024)), dim3(1024), 0, stream,
key, convertKey, sliceNum * size);
int blocks = DivUp(size, BLK_SORT_SIZE);
constexpr int MAX_BLKS = 64;
int prefixSize = blocks;
int blkScanSize = max(DivUp(prefixSize, MAX_BLKS),
SIZE_PER_SCAN);
unsigned int block_x = DivUp(blocks, blkScanSize);
unsigned int *keyIn, *keyOut;
VALUE *valueIn, *valueOut;
keyIn = convertKey;
valueIn = value;
keyOut = keyBuf;
valueOut = valueBuf;
dim3 sort_grid = dim3(blocks, sliceNum, 1);
dim3 prefix_grid = dim3(block_x, SORT_RADIX_SIZE, sliceNum);
dim3 final_grid = dim3(block_x, SORT_RADIX_SIZE, sliceNum);
for (unsigned pos = 0; pos <= 8 * sizeof(KEY) - SORT_RADIX_BITS; pos += SORT_RADIX_BITS) {
hipLaunchKernelGGL(( radixSort<largest, VALUE>) , dim3(sort_grid), dim3(BLK_SORT_SIZE), 0, stream,
keyIn, valueIn, size, pos, prefixData);
hipLaunchKernelGGL(( prefixSum<unsigned int>) , dim3(prefix_grid), dim3(SIZE_PER_SCAN), 0, stream,
prefixData, blocks, blkScanSize, tmpPrefix);
if (block_x > 1) {
hipLaunchKernelGGL(( finalPrefixSum<unsigned int>), dim3(final_grid), dim3(SIZE_PER_SCAN), 0, stream,
prefixData, blocks, blkScanSize, tmpPrefix);
}
hipLaunchKernelGGL(( interBlkSort<largest, VALUE>) , dim3(sort_grid), dim3(BLK_SORT_SIZE), 0, stream,
keyOut, valueOut, keyIn, valueIn, size, prefixData, tmpPrefix, block_x, pos);
unsigned int *tmpk = keyIn;
VALUE *tmpv = valueIn;
keyIn = keyOut;
keyOut = tmpk;
valueIn = valueOut;
valueOut = tmpv;
}
if (keyIn != convertKey) {
hipMemcpyAsync(value, valueOut, size * sizeof(VALUE),
hipMemcpyDeviceToDevice, stream);
}
hipLaunchKernelGGL(( reverse_convert<KEY>), dim3(DivUp(sliceNum*size, 1024)), dim3(1024), 0, stream,
keyIn, key, sliceNum * size);
}
int64_t PPLTopKGetTempBufferSize(
const ppl::nn::TensorShape* indices_shape,
const int K,
int dim_k,
bool sorted)
{
if (sorted == false)
return 0;
if(dim_k == -1){
dim_k = indices_shape->GetDimCount() - 1;
}
int slices_num = 1;
for (unsigned int i = 0; i < indices_shape->GetDimCount(); i++) {
if (i != (unsigned int)dim_k) slices_num *= indices_shape->GetDim(i);
}
int64_t total_size = 0;
//keyBuf
total_size += slices_num * K * sizeof(unsigned int);
//valueBuf unsigned int
total_size += slices_num * K * sizeof(indices_shape->GetDataType());
//max bitonic sort size
if (K <= 512) return total_size;
//determined by GPU devices, SMs number
constexpr int MAX_BLKS = 64;
int new_blocks = (K + BLK_SORT_SIZE - 1) / BLK_SORT_SIZE;
int prefixSize = new_blocks;
int blkScanSize = max( DivUp(prefixSize, MAX_BLKS), SIZE_PER_SCAN);
unsigned int block_x = DivUp(new_blocks, blkScanSize);
//convertKey
total_size += slices_num * K * sizeof(unsigned int);
//prefixData
total_size += slices_num * SORT_RADIX_SIZE * new_blocks * sizeof(unsigned int);
//tmpPrefix
total_size += slices_num * SORT_RADIX_SIZE * block_x * sizeof(unsigned int);
return total_size;
}
template <typename T>
__global__ void transpose(
const T *input,
T *output,
const int batch,
const int input_h,
const int input_w)
{
__shared__ T smem[32][33];
int i_h = blockIdx.y * 32 + threadIdx.y;
int i_w = blockIdx.x * 32 + threadIdx.x;
int o_w = blockIdx.y * 32 + threadIdx.x;
int o_h = blockIdx.x * 32 + threadIdx.y;
bool inBound0 = i_h < input_h && i_w < input_w;
int64_t index = (blockIdx.z * input_h + i_h) * input_w + i_w;
bool inBound1 = o_h < input_w && o_w < input_h;
int64_t o_index = (blockIdx.z * input_w + o_h) * input_h + o_w;
T value = inBound0 ? input[index] : (T)0;
smem[threadIdx.x][threadIdx.y] = value;
__syncthreads();
value = smem[threadIdx.y][threadIdx.x];
if (inBound1) {
output[o_index] = value;
}
}
template <typename T, typename ID>
void topKGpuImpl(
const hipStream_t &stream,
const int K,
int dim,
TensorInfo inputInfo,
TensorInfo topKInfo,
TensorInfo indicesInfo,
void *temp_buffer,
int64_t temp_buffer_bytes,
const bool largest = true,
const bool sorted = true)
{
bool is_trans = false;
int batch = 1;
int trans_h = 1, trans_w = 1;
if (dim == -1) {
dim = inputInfo.dims - 1;
}
if (dim != inputInfo.dims - 1) {
is_trans = true;
trans_w = K;
for (int i = 0; i < dim; i++) {
batch *= topKInfo.shape[i];
}
for (int i = dim + 1; i < topKInfo.dims; i++) {
trans_h *= topKInfo.shape[i];
}
}
int sliceSize = inputInfo.shape[dim];
//collapse dim_k and dim which is size of 1
int inputSliceStride = collapse_dim(&inputInfo, dim);
int topKSliceStride = collapse_dim(&topKInfo, dim);
int indicesSliceStride = collapse_dim(&indicesInfo, dim);
int blocks = 1;
for (int i = 0; i < inputInfo.dims; ++i) {
blocks *= inputInfo.shape[i];
}
#define POSTLOG_TRANSPOSE(){ \
if (is_trans) { \
int trans_size = batch * trans_h * trans_w; \
dim3 block_size = dim3(32, 32, 1); \
dim3 grid = dim3(DivUp(trans_w, 32), \
DivUp(trans_h, 32), \
batch); \
T *trans_topk = reinterpret_cast<T*>(temp_buffer); \
ID *trans_indices = reinterpret_cast<ID*>(trans_topk + trans_size); \
\
hipLaunchKernelGGL(( transpose<T>), dim3(grid), dim3(block_size), 0, stream, \
(T *)topKInfo.data, trans_topk, \
batch, trans_h, trans_w); \
hipLaunchKernelGGL(( transpose<ID>), dim3(grid), dim3(block_size), 0, stream, \
(ID *)indicesInfo.data, trans_indices, \
batch, trans_h, trans_w); \
hipMemcpyAsync((T *)topKInfo.data, trans_topk, \
trans_size * sizeof(T ), hipMemcpyDeviceToDevice, stream); \
hipMemcpyAsync((ID*)indicesInfo.data, trans_indices, \
trans_size * sizeof(ID), hipMemcpyDeviceToDevice, stream); \
} \
}
constexpr int BLK_SIZE = 1024;
if (largest) {
if (sorted) { //index is int32 by default
hipLaunchKernelGGL(( selectTopK<T, 1, BLK_SIZE, 1>), dim3(blocks), dim3(BLK_SIZE), 0, stream,
inputInfo, topKInfo, indicesInfo,
K, inputInfo.dims, sliceSize,
inputSliceStride, topKSliceStride, indicesSliceStride);
sortInplace<T, ID, 1>(stream, (T *)topKInfo.data, (ID *)indicesInfo.data,
K, blocks, temp_buffer, temp_buffer_bytes);
//transpose
POSTLOG_TRANSPOSE();
} else {
hipLaunchKernelGGL(( selectTopK<T, 1, BLK_SIZE, 0>), dim3(blocks), dim3(BLK_SIZE), 0, stream,
inputInfo, topKInfo, indicesInfo,
K, inputInfo.dims, sliceSize,
inputSliceStride, topKSliceStride, indicesSliceStride);
}
} else {
if (sorted) {
hipLaunchKernelGGL(( selectTopK<T, 0, BLK_SIZE, 1>), dim3(blocks), dim3(BLK_SIZE), 0, stream,
inputInfo, topKInfo, indicesInfo,
K, inputInfo.dims, sliceSize,
inputSliceStride, topKSliceStride, indicesSliceStride);
sortInplace<T, ID, 0>(stream, (T *)topKInfo.data, (ID *)indicesInfo.data,
K, blocks, temp_buffer, temp_buffer_bytes);
//transpose
POSTLOG_TRANSPOSE();
} else {
hipLaunchKernelGGL(( selectTopK<T, 0, BLK_SIZE, 0>), dim3(blocks), dim3(BLK_SIZE), 0, stream,
inputInfo, topKInfo, indicesInfo,
K, inputInfo.dims, sliceSize,
inputSliceStride, topKSliceStride, indicesSliceStride);
}
}
#undef POSTLOG_TRANSPOSE
}
ppl::common::RetCode PPLCUDATopKForwardImp(
hipStream_t stream,
ppl::nn::TensorShape* input_shape,
const void *input,
ppl::nn::TensorShape* topk_shape,
void *topk,
ppl::nn::TensorShape* indices_shape,
int *indices,
void *temp_buffer,
int64_t temp_buffer_bytes,
int K,
int dim_k,
const bool largest,
const bool sorted)
{
TensorInfo input_info(input_shape, input);
TensorInfo topk_info(topk_shape, topk);
TensorInfo indices_info(indices_shape, indices);
if (input_shape->GetDataType() == ppl::common::DATATYPE_FLOAT32) {
topKGpuImpl<float, int>(stream, K, dim_k,
input_info, topk_info, indices_info,
temp_buffer, temp_buffer_bytes, largest, sorted);
} else if (input_shape->GetDataType() == ppl::common::DATATYPE_FLOAT16) {
topKGpuImpl<__half,int>(stream, K, dim_k,
input_info, topk_info, indices_info,
temp_buffer, temp_buffer_bytes, largest, sorted);
}
return ppl::common::RC_SUCCESS;
}
| 15dafb806f2798fbba8539ee01b3506869548349.cu | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "cudakernel/nn/topk.h"
#include "cudakernel/math/math.h"
#include "cudakernel/common/common.h"
#include "ppl/common/types.h"
#include "ppl/nn/common/tensor_shape.h"
#include "ppl/common/retcode.h"
#include <cuda_fp16.h>
#include <float.h>
#include <memory>
#define MAX_DIM 8 //tensor_shape.kMaxNumDimensions
#define killWARDependency 1
#define RADIX_SIZE 16
#define RADIX_BITS 4
const int RADIX_MASK = RADIX_SIZE - 1;
#define SORT_RADIX_SIZE 4
#define SORT_RADIX_BITS 2
struct TensorInfo {
int shape[MAX_DIM];
int strides[MAX_DIM];
const void *data;
int dims;
TensorInfo(ppl::nn::TensorShape* tensor_shape, const void *data_ptr)
{
for (unsigned int i = 0; i < tensor_shape->GetDimCount() && i < MAX_DIM; i++) {
shape[i] = tensor_shape->GetDim(i);
}
for (unsigned int i = tensor_shape->GetDimCount(); i < MAX_DIM; i++) {
shape[i] = 1;
}
dims = tensor_shape->GetDimCount();
data = data_ptr;
}
};
__device__ __inline__ int convert_f2u(float v)
{
unsigned int x = __float_as_uint(v);
unsigned int mask = (x & 0x80000000) ? 0xffffffff : 0x80000000;
unsigned int res = x ^ mask;
return res;
}
__device__ __inline__ float convert_u2f(int v)
{
unsigned int mask = (v & 0x80000000) ? 0x80000000 : 0xffffffff;
unsigned int x = v ^ mask;
return __uint_as_float(x);
}
template <typename T>
__device__ unsigned int convert2u(T value);
template <>
__device__ unsigned int convert2u(__half value)
{
// must use short, for reverse convert
unsigned short int x = __half_as_ushort(value);
unsigned short int mask = (x & 0x8000) ? 0xffff : 0x8000;
unsigned int res = x ^ mask;
return res;
}
template <>
__device__ unsigned int convert2u(float value)
{
unsigned int x = __float_as_uint(value);
unsigned int mask = (x & 0x80000000) ? 0xffffffff : 0x80000000;
unsigned int res = x ^ mask;
return res;
}
template <typename T>
__device__ T convertu2(unsigned int value);
template <>
__device__ __half convertu2(unsigned int value)
{
unsigned short int sht = (unsigned short int)value;
unsigned short int mask = (sht & 0x8000) ? 0x8000 : 0xffff;
unsigned short int x = sht ^ mask;
return __ushort_as_half(x);
}
template <>
__device__ float convertu2(unsigned int value)
{
unsigned int mask = (value & 0x80000000) ? 0x80000000 : 0xffffffff;
unsigned int x = value ^ mask;
return __uint_as_float(x);
}
// shape:[n,c,h,w]
__device__ int get_offset(int linearIdx, int Dims, TensorInfo info)
{
int offset = 0;
for (int i = Dims - 1; i > 0; --i) {
int curDimIdx = linearIdx % info.shape[i];
int curOffset = curDimIdx * info.strides[i];
linearIdx /= info.shape[i];
offset += curOffset;
}
return offset + linearIdx * info.strides[0];
}
template <typename T>
__device__ unsigned int find_desired(
unsigned int *smem,
int lane,
const unsigned int mask,
const unsigned int desired,
const int inputSliceStride,
const T *inputSlice,
const int sliceSize)
{
if (threadIdx.x == 0) {
smem[0] = 0;
}
__syncthreads();
for (int off = threadIdx.x; off - lane < sliceSize; off += blockDim.x) {
bool inRange = off < sliceSize;
T value = inRange ? inputSlice[off * inputSliceStride] : (T)0;
unsigned int intValue = convert2u<T>(value);
bool flag = inRange && ((intValue & mask) == desired);
if (flag) {
smem[0] = 1;
smem[1] = intValue;
}
__syncthreads();
unsigned int isFound = smem[0];
intValue = smem[1];
if (isFound) {
return intValue;
}
}
return 0;
}
template <typename T, bool dir>
__device__ T find_kth_value(
int *smem,
int K,
const int sliceSize,
const T *inputSlice,
const int inputSliceStride)
{
int count[RADIX_SIZE];
// use fixed higher bits to filter data
unsigned int mask = 0; //fixed high bit
unsigned int desired = 0; // current radix bits to fix
int *radix_hist = smem;
unsigned int kthValue;
for (int pos = 8 * sizeof(int) - RADIX_BITS; pos >= 0; pos -= RADIX_BITS)
{
//reinit radix_hist to 0 every loop
for (int i = 0; i < RADIX_SIZE; i++) {
count[i] = 0;
}
if (threadIdx.x < RADIX_SIZE) {
radix_hist[threadIdx.x] = 0;
}
__syncthreads();
const int lane = threadIdx.x & 31;
for (int off = threadIdx.x; off - lane < sliceSize; off += blockDim.x) {
bool inRange = off < sliceSize;
T value = inRange ? inputSlice[off * inputSliceStride] : (T)0;
unsigned int active = __ballot_sync(0xffffffff, inRange);
unsigned int intValue = convert2u<T>(value);
// filter with desired
bool inRadix = inRange && ((intValue & mask) == desired);
int valueRadix;
asm("bfe.u32 %0, %1, %2, %3;"
: "=r"(valueRadix)
: "r"(intValue), "r"(pos), "r"(RADIX_BITS));
#pragma unroll
for (int i = 0; i < RADIX_SIZE; i++) {
bool flag = inRadix && (valueRadix == i);
unsigned int ballot = __ballot_sync(active, flag);
count[i] += __popc(ballot);
}
}
if ((threadIdx.x & 31) == 0) {
for (int i = 0; i < RADIX_SIZE; i++) {
atomicAdd(radix_hist + i, count[i]);
}
}
__syncthreads();
// all threads in blk are the same
for (int i = 0; i < RADIX_SIZE; i++) {
count[i] = radix_hist[i];
}
if (killWARDependency) {
__syncthreads();
}
// search K count
if (dir == 1) { // topK largest
for (int i = RADIX_SIZE - 1; i >= 0; --i) {
if (K == count[i] && K == 1) {
asm("bfi.b32 %0, %1, %0, %2, %3;"
: "+r"(desired)
: "r"(i), "r"(pos), "r"(RADIX_BITS));
asm("bfi.b32 %0, %1, %0, %2, %3;"
: "+r"(mask)
: "r"(RADIX_MASK), "r"(pos), "r"(RADIX_BITS));
kthValue = find_desired<T>((unsigned int *)smem, threadIdx.x,
mask, desired,
inputSliceStride, inputSlice, sliceSize);
T fp_kthValue = convertu2<T>(kthValue);
return fp_kthValue;
} else if (K <= count[i]) { // narrow radix unitl K == count[i] == 1
asm("bfi.b32 %0, %1, %0, %2, %3;"
: "=r"(desired)
: "r"(i), "r"(pos), "r"(RADIX_BITS));
asm("bfi.b32 %0, %1, %0, %2, %3;"
: "=r"(mask)
: "r"(RADIX_MASK), "r"(pos), "r"(RADIX_BITS));
break;
}
K -= count[i];
}
}
else {
for (int i = 0; i < RADIX_SIZE; ++i) {
if (K == count[i] && K == 1) {
asm("bfi.b32 %0, %1, %0, %2, %3;"
: "+r"(desired)
: "r"(i), "r"(pos), "r"(RADIX_BITS));
asm("bfi.b32 %0, %1, %0, %2, %3;"
: "+r"(mask)
: "r"(RADIX_MASK), "r"(pos), "r"(RADIX_BITS));
kthValue = find_desired<T>((unsigned int *)smem, threadIdx.x,
mask, desired,
inputSliceStride, inputSlice, sliceSize);
T fp_kthValue = convertu2<T>(kthValue);
return fp_kthValue;
} else if (K <= count[i]) { // narrow radix unitl K == count[i] == 1
asm("bfi.b32 %0, %1, %0, %2, %3;"
: "+r"(desired)
: "r"(i), "r"(pos), "r"(RADIX_BITS));
asm("bfi.b32 %0, %1, %0, %2, %3;"
: "+r"(mask)
: "r"(RADIX_MASK), "r"(pos), "r"(RADIX_BITS));
break;
}
K -= count[i];
}
}
}
kthValue = desired;
T fp_kthValue = convertu2<T>(kthValue);
return fp_kthValue;
}
template <typename T>
__device__ T scanInWarp(T value, int lane);
__device__ void prefix_scan(
int *smem,
const unsigned int active,
const int activeWarps,
const bool flag,
int &index,
int &blkTotal)
{
if (threadIdx.x < blockDim.x / 32) {
smem[threadIdx.x] = 0;
}
__syncthreads();
unsigned int ballot = __ballot_sync(active, flag);
int lane = threadIdx.x & 31;
unsigned int laneMask = ~(0xffffffff << lane);
laneMask = active & laneMask;
int warpId = threadIdx.x >> 5;
unsigned int leader = __ffs(active) - 1;
int total = __popc(ballot);
int prefix = __popc(laneMask & ballot);
if (lane == leader) {
smem[warpId] = total;
}
__syncthreads();
int warpOff = 0;
if (threadIdx.x < 32) {
int value = smem[threadIdx.x];
int warpPrefix = scanInWarp<int>(value, lane);
smem[threadIdx.x] = warpPrefix;
}
__syncthreads();
if (warpId >= 1)
warpOff = smem[warpId - 1];
blkTotal = smem[activeWarps - 1];
if (flag) {
index = warpOff + prefix;
}
// write-after-read dependency
if (killWARDependency) {
__syncthreads();
}
}
// dir = 1: decrease order; 0: increase order
template <typename T, bool dir, int blockSize, bool sorted>
__global__ void selectTopK(
TensorInfo input,
TensorInfo topK,
TensorInfo indices,
const int K,
const int collapsedDims,
const int sliceSize,
const int inputSliceStride,
const int topKSliceStride,
const int indicesSliceStride)
{
int inputSliceStart = get_offset(blockIdx.x, collapsedDims, input);
// if sorted, transform the output to coalesced slices
int topKSliceStart = sorted ? blockIdx.x * K :
get_offset(blockIdx.x, collapsedDims, topK);
int indicesSliceStart = sorted ? blockIdx.x * K :
get_offset(blockIdx.x, collapsedDims, indices);
// inc or dec hist every bin until reach K
__shared__ int radix_hist[2 + blockSize / 32];
int *smem = radix_hist;
T *inputSlice = (T *)input.data + inputSliceStart;
T *topKSlice = (T *)topK.data + topKSliceStart;
int *indicesSlice = (int *)indices.data + indicesSliceStart;
T fp_kthValue = find_kth_value<T, dir>(smem, K, sliceSize, inputSlice, inputSliceStride);
int writeStart = 0;
int activeWarps = 0;
int tmpSize = sliceSize;
for (int off = threadIdx.x; off < Align(sliceSize, blockSize); off += blockSize) {
int curSize = tmpSize >= blockSize ? blockSize : tmpSize;
activeWarps = (curSize + 31) >> 5;
bool inRange = off < sliceSize;
T value = inRange ? inputSlice[off * inputSliceStride] : (T)0;
unsigned int active = __ballot_sync(0xffffffff, inRange);
bool flag;
if (dir) {
flag = inRange && Math<T, T, T>::gt(value, fp_kthValue);
} else {
flag = inRange && Math<T, T, T>::lt(value, fp_kthValue);
}
int index, blkTotal;
prefix_scan(smem, active, activeWarps, flag, index, blkTotal);
if (flag) {
int topKOffset = sorted ? (writeStart + index) :
(writeStart + index) * topKSliceStride;
int indexOffset = sorted ? (writeStart + index) :
(writeStart + index) * indicesSliceStride;
topKSlice[topKOffset] = value;
indicesSlice[indexOffset] = off;
}
writeStart += blkTotal;
//if tmpSize < 0, the loop breaks
tmpSize -= blockSize;
}
int topKRemaining = K - writeStart;
tmpSize = sliceSize;
for (int off = threadIdx.x; off < Align(sliceSize, blockSize); off += blockSize) {
int curSize = tmpSize >= blockSize ? blockSize : tmpSize;
activeWarps = (curSize + 31) >> 5;
bool inRange = off < sliceSize;
T value = inRange ? inputSlice[off * inputSliceStride] : (T)0;
unsigned int active = __ballot_sync(0xffffffff, inRange);
bool flag;
flag = inRange && Math<T, T, T>::eq(value, fp_kthValue);
int index, blkTotal;
prefix_scan(smem, active, activeWarps, flag, index, blkTotal);
if (flag) {
int outputIndex = writeStart + index;
if (outputIndex < K) {
int topKOffset = sorted ? outputIndex :
outputIndex * topKSliceStride;
int indexOffset = sorted ? outputIndex :
outputIndex * indicesSliceStride;
topKSlice[topKOffset] = value;
indicesSlice[indexOffset] = off;
}
}
if (topKRemaining < blkTotal) {
break;
}
topKRemaining -= blkTotal;
writeStart += blkTotal;
tmpSize -= blockSize;
}
}
template <typename KEY, typename VALUE, bool largest>
__device__ inline void swap(
const bool isOdd,
bool &valid1,
KEY &value1,
VALUE &index1,
bool &valid2,
KEY &value2,
VALUE &index2)
{
bool isLarge = (largest ^ Math<KEY, KEY, KEY>::lt(value1, value2) && valid1) ||
!valid2;
if (isLarge == isOdd) {
KEY tmpValue = value1;
VALUE tmpIndex = index1;
bool tmpValid = valid1;
value1 = value2;
index1 = index2;
valid1 = valid2;
value2 = tmpValue;
index2 = tmpIndex;
valid2 = tmpValid;
}
}
template <typename KEY, typename VALUE, bool dir, int power2SortSize>
__global__ void bitonicSort(
KEY *Key,
VALUE *Value,
const int sliceSize)
{
__shared__ KEY smemTopk[power2SortSize];
__shared__ VALUE smemIndices[power2SortSize];
__shared__ bool smemValid[power2SortSize];
KEY *topKSlice = Key + blockIdx.x * sliceSize;
VALUE *indicesSlice = Value + blockIdx.x * sliceSize;
int tid = threadIdx.x;
int off1 = threadIdx.x;
int off2 = threadIdx.x + power2SortSize / 2;
bool inRange1 = off1 < sliceSize;
bool inRange2 = off2 < sliceSize;
KEY value1 = inRange1 ? topKSlice[off1] : (KEY)0;
VALUE index1 = inRange1 ? indicesSlice[off1] : (VALUE)0;
KEY value2 = inRange2 ? topKSlice[off2] : (KEY)0;
VALUE index2 = inRange2 ? indicesSlice[off2] : (VALUE)0;
smemTopk[off1] = value1;
smemIndices[off1] = index1;
smemValid[off1] = inRange1;
smemTopk[off2] = value2;
smemIndices[off2] = index2;
smemValid[off2] = inRange2;
__syncthreads();
#pragma unroll
for (int size = 2; size < power2SortSize; size *= 2) {
int oddSeg = (tid & (size / 2)) != 0;
#pragma unroll
// sort each size
for (int sub_size = size; sub_size > 1; sub_size /= 2) {
int stride = sub_size / 2;
int off = (tid / stride) * sub_size + (tid & (stride - 1));
bool inRange1 = smemValid[off];
KEY value1 = smemTopk[off];
VALUE index1 = smemIndices[off];
bool inRange2 = smemValid[off + stride];
KEY value2 = smemTopk[off + stride];
VALUE index2 = smemIndices[off + stride];
swap<KEY, VALUE, dir>(oddSeg,
inRange1, value1, index1,
inRange2, value2, index2);
smemTopk[off] = value1;
smemIndices[off] = index1;
smemValid[off] = inRange1;
smemTopk[off + stride] = value2;
smemIndices[off + stride] = index2;
smemValid[off + stride] = inRange2;
__syncthreads();
}
}
// sort the whole power2SortSize
for (int sub_size = power2SortSize; sub_size > 1; sub_size /= 2) {
int stride = sub_size / 2;
int off = (tid / stride) * sub_size + (tid & (stride - 1));
bool inRange1 = smemValid[off];
KEY value1 = smemTopk[off];
VALUE index1 = smemIndices[off];
bool inRange2 = smemValid[off + stride];
KEY value2 = smemTopk[off + stride];
VALUE index2 = smemIndices[off + stride];
swap<KEY, VALUE, dir>(false,
inRange1, value1, index1,
inRange2, value2, index2);
smemTopk[off] = value1;
smemIndices[off] = index1;
smemValid[off] = inRange1;
smemTopk[off + stride] = value2;
smemIndices[off + stride] = index2;
smemValid[off + stride] = inRange2;
__syncthreads();
}
inRange1 = smemValid[off1];
value1 = smemTopk[off1];
index1 = smemIndices[off1];
inRange2 = smemValid[off2];
value2 = smemTopk[off2];
index2 = smemIndices[off2];
if (inRange1) {
topKSlice[off1] = value1;
indicesSlice[off1] = index1;
}
if (inRange2) {
topKSlice[off2] = value2;
indicesSlice[off2] = index2;
}
}
#define BLK_SORT_SIZE 1024
#define SIZE_PER_SCAN 1024
template <typename KEY, typename VALUE, bool dir>
void radix_sort(
cudaStream_t stream,
KEY *key,
VALUE *value,
int size,
int sliceNum,
unsigned int *convertKey,
unsigned int *prefixData,
unsigned int *tmpPrefix,
unsigned int *keyBuf,
VALUE *valueBuf);
//tempBuf:
//*dims
//convertKey: size*sizeof(unsigned int). zero if inplace
//keyBuf: convertKey size
//valueBuf: value size
//prefixData: SORT_RADIX_SIZE * blocks * sizeof(int)
//tmpPrefix: SORT_RADIX_SIZE * block_x* sizeof(uint)
template <typename KEY, typename VALUE, bool dir>
void sortInplace(
cudaStream_t stream,
KEY *Key,
VALUE *Value,
const int size,
const int slices_num,
void *temp_buffer,
int64_t temp_buffer_bytes)
{
const int blocks = slices_num;
if (size == 1) {
} else if (size <= 64) {
bitonicSort<KEY, VALUE, dir, 64><<<blocks, 32, 0, stream>>>(Key, Value, size);
} else if (size <= 128) {
bitonicSort<KEY, VALUE, dir, 128><<<blocks, 64, 0, stream>>>(Key, Value, size);
} else if (size <= 512) {
bitonicSort<KEY, VALUE, dir, 512><<<blocks, 256, 0, stream>>>(Key, Value, size);
}
else {
int new_blocks = (size + BLK_SORT_SIZE - 1) / BLK_SORT_SIZE;
unsigned int *convert_key = (unsigned int *)temp_buffer;
unsigned int *topk_buf = (unsigned int *)(convert_key + slices_num * size);
VALUE *indices_buf = (VALUE *)(topk_buf + slices_num * size);
unsigned int *prefix_data = (unsigned int *)(indices_buf + slices_num * size);
unsigned int *tmp_prefix = (unsigned int *)(prefix_data + slices_num * SORT_RADIX_SIZE * new_blocks);
radix_sort<KEY, VALUE, dir>(stream, Key, Value, size, slices_num,
convert_key, prefix_data, tmp_prefix,
topk_buf, indices_buf);
}
}
int collapse_dim(TensorInfo *param, int dim)
{
int dimSize = param->shape[dim];
param->shape[dim] = 1;
int cur = -1;
int p = 0;
for (; p < dim; p++) {
if (param->shape[p] == 1)
continue;
cur++;
param->shape[cur] = param->shape[p];
p++;
break;
}
for (; p < dim; p++) {
if (param->shape[p] == 1)
continue;
cur++;
param->shape[cur] = param->shape[p];
}
// after dim
int markCur = cur;
for (; p < param->dims; p++) {
if (param->shape[p] == 1)
continue;
cur++;
param->shape[cur] = param->shape[p];
}
param->strides[cur] = 1;
for (int i = cur - 1; i > markCur; --i) {
param->strides[i] = param->shape[i + 1] * param->strides[i + 1];
}
int sliceStride = (dim == -1 || dim == param->dims - 1) ?
1 :
param->shape[markCur + 1] * param->strides[markCur + 1];
param->strides[markCur] = dimSize * sliceStride;
for (int i = markCur - 1; i >= 0; --i) {
param->strides[i] = param->shape[i + 1] * param->strides[i + 1];
}
param->dims = cur + 1;
return sliceStride;
}
// bitWidth: 2 or 4 SORT_RADIX_SIZE
// bitPos: 0-30
template <bool dir, typename VALUE>
__global__ void radixSort(
unsigned int *Key,
VALUE *Value,
int size,
int bitPos,
unsigned int *prefixData)
{
__shared__ unsigned int s_cnt[SORT_RADIX_SIZE * BLK_SORT_SIZE / 32];
__shared__ unsigned int s_key[BLK_SORT_SIZE];
__shared__ VALUE s_value[BLK_SORT_SIZE];
Key += blockIdx.y * size;
Value += blockIdx.y * size;
s_key[threadIdx.x] = 0;
s_value[threadIdx.x] = 0;
if (threadIdx.x < SORT_RADIX_SIZE * BLK_SORT_SIZE / 32)
s_cnt[threadIdx.x] = 0;
__syncthreads();
int lane = threadIdx.x & 31;
int warpId = threadIdx.x >> 5;
int activeWarps = (blockIdx.x == gridDim.x - 1) ?
DivUp((size & (BLK_SORT_SIZE - 1)), 32) :
BLK_SORT_SIZE / 32;
if (activeWarps == 0)
activeWarps = 32;
int64_t tid = blockIdx.x * blockDim.x + threadIdx.x;
bool inRange = tid < size;
unsigned int active = __ballot_sync(0xffffffff, inRange);
const unsigned int lane_mask_lt = ~(0xffffffff << (lane));
if (tid - lane < size) {
VALUE value = inRange ? Value[tid] : (VALUE)0;
unsigned int intKey = inRange ? Key[tid] : 0;
unsigned int keyRadix;
asm("bfe.u32 %0, %1, %2, %3;"
: "=r"(keyRadix)
: "r"(intKey), "r"(bitPos), "r"(SORT_RADIX_BITS));
int radixPrefix = 0;
for (int i = dir * (SORT_RADIX_SIZE - 1);
i != (1 - dir) * SORT_RADIX_SIZE + dir * (-1);
i += dir * (-1) + 1 - dir) {
bool flag = inRange && (keyRadix == i);
unsigned int ballot = __ballot_sync(active, flag);
int warpCnt = __popc(ballot);
int lanePrefix = __popc(ballot & lane_mask_lt);
int warpPrefix = 0;
if (inRange && lane == 0) {
s_cnt[i * BLK_SORT_SIZE / 32 + warpId] = warpCnt;
}
__syncthreads();
//prefix sum in warp
if (threadIdx.x < 32) {
warpCnt = s_cnt[i * BLK_SORT_SIZE / 32 + threadIdx.x];
unsigned int prefix = warpCnt;
for (int j = 1; j < 32; j <<= 1) {
warpCnt = __shfl_up_sync(0xffffffff, prefix, j, 32);
if (threadIdx.x >= j) {
prefix += warpCnt;
}
}
s_cnt[i * BLK_SORT_SIZE / 32 + threadIdx.x] = prefix;
}
__syncthreads();
if (inRange && warpId > 0) {
warpPrefix = s_cnt[i * BLK_SORT_SIZE / 32 + warpId - 1];
}
if (flag) {
s_key[radixPrefix + warpPrefix + lanePrefix] = intKey;
s_value[radixPrefix + warpPrefix + lanePrefix] = value;
}
radixPrefix += s_cnt[i * BLK_SORT_SIZE / 32 + activeWarps - 1];
__syncthreads(); //WAR
}
if (threadIdx.x == 0) {
for (int i = 0; i < SORT_RADIX_SIZE; i++) {
prefixData[blockIdx.y*gridDim.x*SORT_RADIX_SIZE + i*gridDim.x+blockIdx.x] =
s_cnt[i * BLK_SORT_SIZE / 32 + activeWarps - 1];
}
}
intKey = s_key[threadIdx.x];
value = s_value[threadIdx.x];
if (inRange) {
Key[tid] = intKey;
Value[tid] = value;
}
}
}
template <typename T>
__device__ T scanInWarp(T value, int lane)
{
T lanePrefix = value;
for (int i = 1; i < 32; i <<= 1) {
value = __shfl_up_sync(0xffffffff, lanePrefix, i, 32);
if (lane >= i) {
lanePrefix += value;
}
}
return lanePrefix;
}
//#define SIZE_PER_SCAN 1024
template <typename T>
__global__ void prefixSum(
T *prefixData,
const int size,
const int blkScanSize,
T *blkTotal)
{
__shared__ T warp_cnt[SIZE_PER_SCAN >> 5];
__shared__ T lane_prefix[SIZE_PER_SCAN];
int lane = threadIdx.x & 31;
int warpId = threadIdx.x >> 5;
int64_t off = blockIdx.x * blkScanSize + threadIdx.x;
prefixData += (blockIdx.z * SORT_RADIX_SIZE + blockIdx.y) * size;
blkTotal += blockIdx.z * SORT_RADIX_SIZE * gridDim.x +
blockIdx.y * gridDim.x + blockIdx.x;
T subScanPrefix = (T)0;
for (int iterOff = 0; iterOff < blkScanSize; iterOff += SIZE_PER_SCAN) {
bool inRange = (off + iterOff < size);
T data = inRange ? prefixData[off + iterOff] : 0;
T lanePrefix;
lanePrefix = data;
for (int i = 1; i < 32; i <<= 1) {
data = __shfl_up_sync(0xffffffff, lanePrefix, i, 32);
if (lane >= i) {
lanePrefix += data;
}
}
int stsOff = threadIdx.x + 1;
if (lane == 31) {
warp_cnt[warpId] = lanePrefix;
stsOff -= 32;
lanePrefix = 0;
}
__syncthreads();
lane_prefix[stsOff] = lanePrefix;
T warpPrefix = 0;
if (threadIdx.x < (SIZE_PER_SCAN >> 5)) {
data = warp_cnt[threadIdx.x];
T sum = data;
for (int i = 1; i < 32; i <<= 1) {
data = __shfl_up_sync(0xffffffff, sum, i, 32);
if (threadIdx.x >= i) {
sum += data;
}
}
warp_cnt[threadIdx.x] = sum;
}
__syncthreads();
lanePrefix = lane_prefix[threadIdx.x];
if (warpId > 0) {
warpPrefix = warp_cnt[warpId - 1];
}
T prefix = subScanPrefix + warpPrefix + lanePrefix;
if (inRange) {
prefixData[off + iterOff] = prefix;
}
subScanPrefix += warp_cnt[(SIZE_PER_SCAN >> 5) - 1];
}
//blk scan total
if (threadIdx.x == 0) {
blkTotal[0] = subScanPrefix;
}
}
template <typename T>
__global__ void finalPrefixSum(
T *prefixData,
const int size,
const int blkScanSize,
T *blkTotal)
{
int batchId = blockIdx.z * SORT_RADIX_SIZE + blockIdx.y;
int64_t off = blockIdx.x * blkScanSize + threadIdx.x;
prefixData += batchId * size;
T blkPrefix = 0;
for (int i = 0; i < blockIdx.x; i++) {
blkPrefix += blkTotal[batchId * gridDim.x + i];
}
for (int iterOff = 0; iterOff < blkScanSize; iterOff += SIZE_PER_SCAN) {
bool inRange = (off + iterOff < size);
T data = inRange ? prefixData[off + iterOff] : 0;
data += blkPrefix;
if (inRange) {
prefixData[off + iterOff] = data;
}
}
if (blockIdx.x == gridDim.x - 1 && threadIdx.x == 0) {
blkTotal[batchId * gridDim.x + blockIdx.x] =
blkPrefix + blkTotal[batchId * gridDim.x + blockIdx.x];
}
}
template <bool dir, typename VALUE>
__global__ void interBlkSort(
unsigned int *outKey,
VALUE *outValue,
unsigned int *Key,
VALUE *Value,
unsigned int size,
unsigned int *prefixData,
unsigned int *radixTotal,
unsigned int totalPos,
unsigned int bitPos)
{
prefixData += blockIdx.y * SORT_RADIX_SIZE * gridDim.x;
radixTotal += blockIdx.y * SORT_RADIX_SIZE * totalPos;
Key += blockIdx.y * size;
Value += blockIdx.y * size;
outKey += blockIdx.y * size;
outValue += blockIdx.y * size;
__shared__ unsigned int s_cnt[SORT_RADIX_SIZE * BLK_SORT_SIZE / 32];
int lane = threadIdx.x & 31;
int warpId = threadIdx.x / 32;
int64_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if (threadIdx.x < SORT_RADIX_SIZE * BLK_SORT_SIZE / 32)
s_cnt[threadIdx.x] = 0;
__syncthreads();
int activeWarps = (blockIdx.x == gridDim.x - 1) ?
DivUp((size & (BLK_SORT_SIZE - 1)), 31) :
BLK_SORT_SIZE / 32;
if (activeWarps == 0)
activeWarps = 32;
bool inRange = tid < size;
unsigned int active = __ballot_sync(0xffffffff, inRange);
const unsigned int lane_mask_lt = ~(0xffffffff << (lane));
if (tid - lane < size) {
unsigned int intKey = inRange ? Key[tid] : 0;
VALUE value = inRange ? Value[tid] : (VALUE)0;
unsigned int radixPrefix = 0;
unsigned int newOff;
unsigned int keyRadix;
asm("bfe.u32 %0, %1, %2, %3;"
: "=r"(keyRadix)
: "r"(intKey), "r"(bitPos), "r"(SORT_RADIX_BITS));
for (int i = dir * (SORT_RADIX_SIZE - 1);
i != (1 - dir) * SORT_RADIX_SIZE + dir * (-1);
i += dir * (-1) + 1 - dir)
{
unsigned int blkPrefix = prefixData[i * gridDim.x + blockIdx.x];
bool flag = inRange && (keyRadix == i);
unsigned int ballot = __ballot_sync(active, flag);
unsigned int lanePrefix = __popc(lane_mask_lt & ballot);
unsigned int warpCnt = __popc(ballot);
unsigned int warpPrefix = 0;
if (inRange && lane == 0) {
s_cnt[i * BLK_SORT_SIZE / 32 + warpId] = warpCnt;
}
__syncthreads();
if (threadIdx.x < 32) {
warpCnt = s_cnt[i * BLK_SORT_SIZE / 32 + threadIdx.x];
unsigned int prefix = warpCnt;
for (int j = 1; j < 32; j <<= 1) {
warpCnt = __shfl_up_sync(0xffffffff, prefix, j, 32);
if (threadIdx.x >= j) {
prefix += warpCnt;
}
}
s_cnt[i * BLK_SORT_SIZE / 32 + threadIdx.x] = prefix;
}
__syncthreads();
if (inRange && warpId > 0) {
warpPrefix = s_cnt[i * BLK_SORT_SIZE / 32 + warpId - 1];
}
__syncthreads();
if (flag) {
newOff = radixPrefix + blkPrefix + warpPrefix + lanePrefix;
}
__syncthreads();
radixPrefix += radixTotal[i * totalPos + totalPos - 1];
}
if (inRange) {
outKey[newOff] = intKey;
outValue[newOff] = value;
}
}
}
//for fp16, we can apply short int *outKey
template <typename KEY>
__global__ void convert(KEY *Key, unsigned int *outKey, int size)
{
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= size)
return;
KEY key = Key[tid];
unsigned int intKey = convert2u<KEY>(key);
outKey[tid] = intKey;
}
template <typename KEY>
__global__ void reverse_convert(unsigned int *Key, KEY *outKey, int size)
{
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= size)
return;
unsigned int intKey = Key[tid];
KEY key = convertu2<KEY>(intKey);
outKey[tid] = key;
}
//tempBuf:
//*dims
//convertKey: size*sizeof(unsigned int). zero if inplace
//keyBuf: convertKey size
//valueBuf: value size
//prefixData: SORT_RADIX_SIZE * blocks * sizeof(int)
//tmpPrefix: SORT_RADIX_SIZE * block_x* sizeof(uint)
template <typename KEY, typename VALUE, bool largest>
void radix_sort(
cudaStream_t stream,
KEY *key,
VALUE *value,
int size,
int sliceNum,
unsigned int *convertKey,
unsigned int *prefixData,
unsigned int *tmpPrefix,
unsigned int *keyBuf,
VALUE *valueBuf)
{
convert<KEY><<<DivUp(sliceNum *size, 1024), 1024, 0, stream>>>
(key, convertKey, sliceNum * size);
int blocks = DivUp(size, BLK_SORT_SIZE);
constexpr int MAX_BLKS = 64;
int prefixSize = blocks;
int blkScanSize = max(DivUp(prefixSize, MAX_BLKS),
SIZE_PER_SCAN);
unsigned int block_x = DivUp(blocks, blkScanSize);
unsigned int *keyIn, *keyOut;
VALUE *valueIn, *valueOut;
keyIn = convertKey;
valueIn = value;
keyOut = keyBuf;
valueOut = valueBuf;
dim3 sort_grid = dim3(blocks, sliceNum, 1);
dim3 prefix_grid = dim3(block_x, SORT_RADIX_SIZE, sliceNum);
dim3 final_grid = dim3(block_x, SORT_RADIX_SIZE, sliceNum);
for (unsigned pos = 0; pos <= 8 * sizeof(KEY) - SORT_RADIX_BITS; pos += SORT_RADIX_BITS) {
radixSort<largest, VALUE> <<<sort_grid, BLK_SORT_SIZE, 0, stream>>>
(keyIn, valueIn, size, pos, prefixData);
prefixSum<unsigned int> <<<prefix_grid, SIZE_PER_SCAN, 0, stream>>>
(prefixData, blocks, blkScanSize, tmpPrefix);
if (block_x > 1) {
finalPrefixSum<unsigned int><<<final_grid, SIZE_PER_SCAN, 0, stream>>>
(prefixData, blocks, blkScanSize, tmpPrefix);
}
interBlkSort<largest, VALUE> <<<sort_grid, BLK_SORT_SIZE, 0, stream>>>
(keyOut, valueOut, keyIn, valueIn, size, prefixData, tmpPrefix, block_x, pos);
unsigned int *tmpk = keyIn;
VALUE *tmpv = valueIn;
keyIn = keyOut;
keyOut = tmpk;
valueIn = valueOut;
valueOut = tmpv;
}
if (keyIn != convertKey) {
cudaMemcpyAsync(value, valueOut, size * sizeof(VALUE),
cudaMemcpyDeviceToDevice, stream);
}
reverse_convert<KEY><<<DivUp(sliceNum*size, 1024), 1024, 0, stream>>>
(keyIn, key, sliceNum * size);
}
int64_t PPLTopKGetTempBufferSize(
const ppl::nn::TensorShape* indices_shape,
const int K,
int dim_k,
bool sorted)
{
if (sorted == false)
return 0;
if(dim_k == -1){
dim_k = indices_shape->GetDimCount() - 1;
}
int slices_num = 1;
for (unsigned int i = 0; i < indices_shape->GetDimCount(); i++) {
if (i != (unsigned int)dim_k) slices_num *= indices_shape->GetDim(i);
}
int64_t total_size = 0;
//keyBuf
total_size += slices_num * K * sizeof(unsigned int);
//valueBuf unsigned int
total_size += slices_num * K * sizeof(indices_shape->GetDataType());
//max bitonic sort size
if (K <= 512) return total_size;
//determined by GPU devices, SMs number
constexpr int MAX_BLKS = 64;
int new_blocks = (K + BLK_SORT_SIZE - 1) / BLK_SORT_SIZE;
int prefixSize = new_blocks;
int blkScanSize = max( DivUp(prefixSize, MAX_BLKS), SIZE_PER_SCAN);
unsigned int block_x = DivUp(new_blocks, blkScanSize);
//convertKey
total_size += slices_num * K * sizeof(unsigned int);
//prefixData
total_size += slices_num * SORT_RADIX_SIZE * new_blocks * sizeof(unsigned int);
//tmpPrefix
total_size += slices_num * SORT_RADIX_SIZE * block_x * sizeof(unsigned int);
return total_size;
}
template <typename T>
__global__ void transpose(
const T *input,
T *output,
const int batch,
const int input_h,
const int input_w)
{
__shared__ T smem[32][33];
int i_h = blockIdx.y * 32 + threadIdx.y;
int i_w = blockIdx.x * 32 + threadIdx.x;
int o_w = blockIdx.y * 32 + threadIdx.x;
int o_h = blockIdx.x * 32 + threadIdx.y;
bool inBound0 = i_h < input_h && i_w < input_w;
int64_t index = (blockIdx.z * input_h + i_h) * input_w + i_w;
bool inBound1 = o_h < input_w && o_w < input_h;
int64_t o_index = (blockIdx.z * input_w + o_h) * input_h + o_w;
T value = inBound0 ? input[index] : (T)0;
smem[threadIdx.x][threadIdx.y] = value;
__syncthreads();
value = smem[threadIdx.y][threadIdx.x];
if (inBound1) {
output[o_index] = value;
}
}
template <typename T, typename ID>
void topKGpuImpl(
const cudaStream_t &stream,
const int K,
int dim,
TensorInfo inputInfo,
TensorInfo topKInfo,
TensorInfo indicesInfo,
void *temp_buffer,
int64_t temp_buffer_bytes,
const bool largest = true,
const bool sorted = true)
{
bool is_trans = false;
int batch = 1;
int trans_h = 1, trans_w = 1;
if (dim == -1) {
dim = inputInfo.dims - 1;
}
if (dim != inputInfo.dims - 1) {
is_trans = true;
trans_w = K;
for (int i = 0; i < dim; i++) {
batch *= topKInfo.shape[i];
}
for (int i = dim + 1; i < topKInfo.dims; i++) {
trans_h *= topKInfo.shape[i];
}
}
int sliceSize = inputInfo.shape[dim];
//collapse dim_k and dim which is size of 1
int inputSliceStride = collapse_dim(&inputInfo, dim);
int topKSliceStride = collapse_dim(&topKInfo, dim);
int indicesSliceStride = collapse_dim(&indicesInfo, dim);
int blocks = 1;
for (int i = 0; i < inputInfo.dims; ++i) {
blocks *= inputInfo.shape[i];
}
#define POSTLOG_TRANSPOSE(){ \
if (is_trans) { \
int trans_size = batch * trans_h * trans_w; \
dim3 block_size = dim3(32, 32, 1); \
dim3 grid = dim3(DivUp(trans_w, 32), \
DivUp(trans_h, 32), \
batch); \
T *trans_topk = reinterpret_cast<T*>(temp_buffer); \
ID *trans_indices = reinterpret_cast<ID*>(trans_topk + trans_size); \
\
transpose<T><<<grid, block_size, 0, stream>>> \
((T *)topKInfo.data, trans_topk, \
batch, trans_h, trans_w); \
transpose<ID><<<grid, block_size, 0, stream>>> \
((ID *)indicesInfo.data, trans_indices, \
batch, trans_h, trans_w); \
cudaMemcpyAsync((T *)topKInfo.data, trans_topk, \
trans_size * sizeof(T ), cudaMemcpyDeviceToDevice, stream); \
cudaMemcpyAsync((ID*)indicesInfo.data, trans_indices, \
trans_size * sizeof(ID), cudaMemcpyDeviceToDevice, stream); \
} \
}
constexpr int BLK_SIZE = 1024;
if (largest) {
if (sorted) { //index is int32 by default
selectTopK<T, 1, BLK_SIZE, 1><<<blocks, BLK_SIZE, 0, stream>>>
(inputInfo, topKInfo, indicesInfo,
K, inputInfo.dims, sliceSize,
inputSliceStride, topKSliceStride, indicesSliceStride);
sortInplace<T, ID, 1>(stream, (T *)topKInfo.data, (ID *)indicesInfo.data,
K, blocks, temp_buffer, temp_buffer_bytes);
//transpose
POSTLOG_TRANSPOSE();
} else {
selectTopK<T, 1, BLK_SIZE, 0><<<blocks, BLK_SIZE, 0, stream>>>
(inputInfo, topKInfo, indicesInfo,
K, inputInfo.dims, sliceSize,
inputSliceStride, topKSliceStride, indicesSliceStride);
}
} else {
if (sorted) {
selectTopK<T, 0, BLK_SIZE, 1><<<blocks, BLK_SIZE, 0, stream>>>
(inputInfo, topKInfo, indicesInfo,
K, inputInfo.dims, sliceSize,
inputSliceStride, topKSliceStride, indicesSliceStride);
sortInplace<T, ID, 0>(stream, (T *)topKInfo.data, (ID *)indicesInfo.data,
K, blocks, temp_buffer, temp_buffer_bytes);
//transpose
POSTLOG_TRANSPOSE();
} else {
selectTopK<T, 0, BLK_SIZE, 0><<<blocks, BLK_SIZE, 0, stream>>>
(inputInfo, topKInfo, indicesInfo,
K, inputInfo.dims, sliceSize,
inputSliceStride, topKSliceStride, indicesSliceStride);
}
}
#undef POSTLOG_TRANSPOSE
}
ppl::common::RetCode PPLCUDATopKForwardImp(
cudaStream_t stream,
ppl::nn::TensorShape* input_shape,
const void *input,
ppl::nn::TensorShape* topk_shape,
void *topk,
ppl::nn::TensorShape* indices_shape,
int *indices,
void *temp_buffer,
int64_t temp_buffer_bytes,
int K,
int dim_k,
const bool largest,
const bool sorted)
{
TensorInfo input_info(input_shape, input);
TensorInfo topk_info(topk_shape, topk);
TensorInfo indices_info(indices_shape, indices);
if (input_shape->GetDataType() == ppl::common::DATATYPE_FLOAT32) {
topKGpuImpl<float, int>(stream, K, dim_k,
input_info, topk_info, indices_info,
temp_buffer, temp_buffer_bytes, largest, sorted);
} else if (input_shape->GetDataType() == ppl::common::DATATYPE_FLOAT16) {
topKGpuImpl<__half,int>(stream, K, dim_k,
input_info, topk_info, indices_info,
temp_buffer, temp_buffer_bytes, largest, sorted);
}
return ppl::common::RC_SUCCESS;
}
|
57166b15d5f6db59849966d29942209a8d6f36df.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "efficient.h"
#define blockSize 128
namespace StreamCompaction {
namespace Efficient {
__global__ void upSweep(int n, int *idata, int d) {
int k = (blockIdx.x * blockDim.x) + threadIdx.x;
if (k >= n)
return;
if (k % (d * 2) == (d * 2) - 1) {
idata[k] += idata[k - d];
}
}
__global__ void downSweep(int n, int *idata, int d) {
int k = (blockIdx.x * blockDim.x) + threadIdx.x;
if (k >= n)
return;
int temp;
if (k % (d * 2) == (d * 2) - 1) {
//printf("kernel: %d", k);
temp = idata[k - d];
idata[k - d] = idata[k]; // Set left child to this nodes value
idata[k] += temp;
}
}
__global__ void makeElementZero(int *data, int index) {
int k = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index == k) {
data[k] = 0;
}
}
__global__ void scan(int n, int D, int *odata, int *idata) {
extern __shared__ int s_idata[];
int k = (blockIdx.x * blockDim.x) + threadIdx.x;
if (k >= n)
return;
s_idata[k] = idata[k];
__syncthreads();
for (int d = 0; d < D; d++) {
if (k % ((1 << d) * 2) == ((1 << d) * 2) - 1) {
s_idata[k] += s_idata[k - (1 << d)];
}
}
__syncthreads();
if (n-1 == k) {
s_idata[k] = 0;
}
__syncthreads();
for (int d = D - 1; d >= 0; d--) {
int temp;
if (k % ((1 << d) * 2) == ((1 << d) * 2) - 1) {
//printf("kernel: %d", k);
temp = idata[k - (1 << d)];
idata[k - (1 << d)] = idata[k]; // Set left child to this nodes value
idata[k] += temp;
}
}
__syncthreads();
odata[k] = s_idata[k];
}
__global__ void copyElements(int n, int *src, int *dest) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= n)
return;
dest[index] = src[index];
}
/**
* Performs stream compaction on idata, storing the result into odata.
* All zeroes are discarded.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to compact.
* @returns The number of elements remaining after compaction.
*/
int compact(int n, PathSegment * dev_idata, PathSegment * dev_odata) {
int *dev_boolean;
int *dev_indices;
int count;
int paddedArraySize = 1 << ilog2ceil(n);
dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize);
dim3 fullBlocksPerGridPadded((paddedArraySize + blockSize - 1) / blockSize);
hipMalloc((void**)&dev_boolean, paddedArraySize * sizeof(int));
checkCUDAError("Cannot allocate memory for boolean");
hipMalloc((void**)&dev_indices, paddedArraySize * sizeof(int));
checkCUDAError("Cannot allocate memory for dev_indices");
StreamCompaction::Common::kernMapToBoolean << <fullBlocksPerGrid, blockSize >> >(n, dev_boolean, dev_idata);
copyElements << <fullBlocksPerGrid, blockSize >> >(n, dev_boolean, dev_indices);
for (int d = 0; d < ilog2ceil(paddedArraySize); d++) {
upSweep << <fullBlocksPerGridPadded, blockSize >> >(paddedArraySize, dev_indices, 1 << d);
}
makeElementZero << <fullBlocksPerGridPadded, blockSize >> >(dev_indices, paddedArraySize - 1);
for (int d = ilog2ceil(paddedArraySize) - 1; d >= 0; d--) {
downSweep << <fullBlocksPerGridPadded, blockSize >> >(paddedArraySize, dev_indices, 1 << d);
}
StreamCompaction::Common::kernScatter << <fullBlocksPerGrid, blockSize >> >(n, dev_odata, dev_idata, dev_boolean, dev_indices);
hipMemcpy(dev_idata, dev_odata, n*sizeof(PathSegment), hipMemcpyDeviceToDevice);
hipMemcpy(&count, dev_indices + paddedArraySize - 1, sizeof(int), hipMemcpyDeviceToHost);
hipFree(dev_boolean);
hipFree(dev_indices);
return count;
}
}
}
| 57166b15d5f6db59849966d29942209a8d6f36df.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "efficient.h"
#define blockSize 128
namespace StreamCompaction {
namespace Efficient {
__global__ void upSweep(int n, int *idata, int d) {
int k = (blockIdx.x * blockDim.x) + threadIdx.x;
if (k >= n)
return;
if (k % (d * 2) == (d * 2) - 1) {
idata[k] += idata[k - d];
}
}
__global__ void downSweep(int n, int *idata, int d) {
int k = (blockIdx.x * blockDim.x) + threadIdx.x;
if (k >= n)
return;
int temp;
if (k % (d * 2) == (d * 2) - 1) {
//printf("kernel: %d", k);
temp = idata[k - d];
idata[k - d] = idata[k]; // Set left child to this node’s value
idata[k] += temp;
}
}
__global__ void makeElementZero(int *data, int index) {
int k = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index == k) {
data[k] = 0;
}
}
__global__ void scan(int n, int D, int *odata, int *idata) {
extern __shared__ int s_idata[];
int k = (blockIdx.x * blockDim.x) + threadIdx.x;
if (k >= n)
return;
s_idata[k] = idata[k];
__syncthreads();
for (int d = 0; d < D; d++) {
if (k % ((1 << d) * 2) == ((1 << d) * 2) - 1) {
s_idata[k] += s_idata[k - (1 << d)];
}
}
__syncthreads();
if (n-1 == k) {
s_idata[k] = 0;
}
__syncthreads();
for (int d = D - 1; d >= 0; d--) {
int temp;
if (k % ((1 << d) * 2) == ((1 << d) * 2) - 1) {
//printf("kernel: %d", k);
temp = idata[k - (1 << d)];
idata[k - (1 << d)] = idata[k]; // Set left child to this node’s value
idata[k] += temp;
}
}
__syncthreads();
odata[k] = s_idata[k];
}
__global__ void copyElements(int n, int *src, int *dest) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= n)
return;
dest[index] = src[index];
}
/**
* Performs stream compaction on idata, storing the result into odata.
* All zeroes are discarded.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to compact.
* @returns The number of elements remaining after compaction.
*/
int compact(int n, PathSegment * dev_idata, PathSegment * dev_odata) {
int *dev_boolean;
int *dev_indices;
int count;
int paddedArraySize = 1 << ilog2ceil(n);
dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize);
dim3 fullBlocksPerGridPadded((paddedArraySize + blockSize - 1) / blockSize);
cudaMalloc((void**)&dev_boolean, paddedArraySize * sizeof(int));
checkCUDAError("Cannot allocate memory for boolean");
cudaMalloc((void**)&dev_indices, paddedArraySize * sizeof(int));
checkCUDAError("Cannot allocate memory for dev_indices");
StreamCompaction::Common::kernMapToBoolean << <fullBlocksPerGrid, blockSize >> >(n, dev_boolean, dev_idata);
copyElements << <fullBlocksPerGrid, blockSize >> >(n, dev_boolean, dev_indices);
for (int d = 0; d < ilog2ceil(paddedArraySize); d++) {
upSweep << <fullBlocksPerGridPadded, blockSize >> >(paddedArraySize, dev_indices, 1 << d);
}
makeElementZero << <fullBlocksPerGridPadded, blockSize >> >(dev_indices, paddedArraySize - 1);
for (int d = ilog2ceil(paddedArraySize) - 1; d >= 0; d--) {
downSweep << <fullBlocksPerGridPadded, blockSize >> >(paddedArraySize, dev_indices, 1 << d);
}
StreamCompaction::Common::kernScatter << <fullBlocksPerGrid, blockSize >> >(n, dev_odata, dev_idata, dev_boolean, dev_indices);
cudaMemcpy(dev_idata, dev_odata, n*sizeof(PathSegment), cudaMemcpyDeviceToDevice);
cudaMemcpy(&count, dev_indices + paddedArraySize - 1, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_boolean);
cudaFree(dev_indices);
return count;
}
}
}
|
3b3cc69485dafda0afa157b791beaf6d2d7e414a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/TemporalReplicationPadding.cu"
#else
void THNN_(TemporalReplicationPadding_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
int padL, int padR) {
THArgCheck(THCTensor_canUse32BitIndexMath(state, input), 2,
"input tensor must fit into 32-bit index math");
int planeDim = 0;
int dimw = 1;
int numBatch = 1;
int numInputDims = THCTensor_(nDimension)(state, input);
THCUNN_argCheck(state, !input->is_empty() && (numInputDims == 2 || numInputDims == 3), 2, input,
"2D or 3D (batch mode) tensor expected for input, but got: %s")
if (numInputDims == 3) {
numBatch = THCTensor_(size)(state, input, 0);
planeDim++;
dimw++;
}
int numPlanes = THCTensor_(size)(state, input, planeDim);
int inputW = THCTensor_(size)(state, input, dimw);
int outputW = inputW + padL + padR;
THArgCheck(outputW >= 1, 2,
"input (W: %d)is too small."
" Calculated output W: %d",
inputW, outputW);
THCDeviceTensor<real, 3> devInput;
THCDeviceTensor<real, 3> devOutput;
if (numInputDims == 2) {
THCTensor_(resize2d)(state, output, numPlanes, outputW);
devInput = toDeviceTensor<real, 2>(state, input).upcastOuter<3>();
devOutput = toDeviceTensor<real, 2>(state, output).upcastOuter<3>();
} else {
THCTensor_(resize3d)(state, output, numBatch, numPlanes, outputW);
devInput = toDeviceTensor<real, 3>(state, input);
devOutput = toDeviceTensor<real, 3>(state, output);
}
int outputPlaneSize = devOutput.getSize(2);
dim3 gridSize(THCCeilDiv(outputPlaneSize, 256),
devOutput.getSize(1),
devOutput.getSize(0));
dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize);
hipLaunchKernelGGL(( TemporalReplicationPadding_updateOutput), dim3(gridSize), dim3(blockSize), 0, THCState_getCurrentStream(state),
devInput, devOutput, padL, padR);
}
void THNN_(TemporalReplicationPadding_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
int padL, int padR) {
THArgCheck(THCTensor_canUse32BitIndexMath(state, input), 2,
"input tensor must fit into 32-bit index math");
THArgCheck(THCTensor_canUse32BitIndexMath(state, gradOutput), 3,
"output gradient tensor must fit into 32-bit index math");
int planeDim = 0;
int dimw = 1;
int numInputDims = THCTensor_(nDimension)(state, input);
if (numInputDims == 3) {
planeDim++;
dimw++;
}
int iwidth = input->size[dimw];
int owidth = iwidth + padL + padR;
THArgCheck(owidth == THCTensor_(size)(state, gradOutput, dimw), 3,
"gradOutput width unexpected. Expected: %d, Got: %d",
owidth, THCTensor_(size)(state, gradOutput, dimw));
THCTensor_(resizeAs)(state, gradInput, input);
THCTensor_(zero)(state, gradInput);
THCDeviceTensor<real, 3> devGradInput;
THCDeviceTensor<real, 3> devGradOutput;
if (numInputDims == 2) {
devGradInput = toDeviceTensor<real, 2>(state, gradInput).upcastOuter<3>();
devGradOutput = toDeviceTensor<real, 2>(state, gradOutput).upcastOuter<3>();
} else {
devGradInput = toDeviceTensor<real, 3>(state, gradInput);
devGradOutput = toDeviceTensor<real, 3>(state, gradOutput);
}
int outputPlaneSize = devGradOutput.getSize(2);
dim3 gridSize(THCCeilDiv(outputPlaneSize, 256),
devGradOutput.getSize(1),
devGradOutput.getSize(0));
dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize);
hipLaunchKernelGGL(( TemporalReplicationPadding_updateGradInput), dim3(gridSize), dim3(blockSize), 0, THCState_getCurrentStream(state),
devGradInput, devGradOutput, padL, padR);
}
#endif
| 3b3cc69485dafda0afa157b791beaf6d2d7e414a.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/TemporalReplicationPadding.cu"
#else
void THNN_(TemporalReplicationPadding_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
int padL, int padR) {
THArgCheck(THCTensor_canUse32BitIndexMath(state, input), 2,
"input tensor must fit into 32-bit index math");
int planeDim = 0;
int dimw = 1;
int numBatch = 1;
int numInputDims = THCTensor_(nDimension)(state, input);
THCUNN_argCheck(state, !input->is_empty() && (numInputDims == 2 || numInputDims == 3), 2, input,
"2D or 3D (batch mode) tensor expected for input, but got: %s")
if (numInputDims == 3) {
numBatch = THCTensor_(size)(state, input, 0);
planeDim++;
dimw++;
}
int numPlanes = THCTensor_(size)(state, input, planeDim);
int inputW = THCTensor_(size)(state, input, dimw);
int outputW = inputW + padL + padR;
THArgCheck(outputW >= 1, 2,
"input (W: %d)is too small."
" Calculated output W: %d",
inputW, outputW);
THCDeviceTensor<real, 3> devInput;
THCDeviceTensor<real, 3> devOutput;
if (numInputDims == 2) {
THCTensor_(resize2d)(state, output, numPlanes, outputW);
devInput = toDeviceTensor<real, 2>(state, input).upcastOuter<3>();
devOutput = toDeviceTensor<real, 2>(state, output).upcastOuter<3>();
} else {
THCTensor_(resize3d)(state, output, numBatch, numPlanes, outputW);
devInput = toDeviceTensor<real, 3>(state, input);
devOutput = toDeviceTensor<real, 3>(state, output);
}
int outputPlaneSize = devOutput.getSize(2);
dim3 gridSize(THCCeilDiv(outputPlaneSize, 256),
devOutput.getSize(1),
devOutput.getSize(0));
dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize);
TemporalReplicationPadding_updateOutput<<<gridSize, blockSize, 0, THCState_getCurrentStream(state)>>>(
devInput, devOutput, padL, padR);
}
void THNN_(TemporalReplicationPadding_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
int padL, int padR) {
THArgCheck(THCTensor_canUse32BitIndexMath(state, input), 2,
"input tensor must fit into 32-bit index math");
THArgCheck(THCTensor_canUse32BitIndexMath(state, gradOutput), 3,
"output gradient tensor must fit into 32-bit index math");
int planeDim = 0;
int dimw = 1;
int numInputDims = THCTensor_(nDimension)(state, input);
if (numInputDims == 3) {
planeDim++;
dimw++;
}
int iwidth = input->size[dimw];
int owidth = iwidth + padL + padR;
THArgCheck(owidth == THCTensor_(size)(state, gradOutput, dimw), 3,
"gradOutput width unexpected. Expected: %d, Got: %d",
owidth, THCTensor_(size)(state, gradOutput, dimw));
THCTensor_(resizeAs)(state, gradInput, input);
THCTensor_(zero)(state, gradInput);
THCDeviceTensor<real, 3> devGradInput;
THCDeviceTensor<real, 3> devGradOutput;
if (numInputDims == 2) {
devGradInput = toDeviceTensor<real, 2>(state, gradInput).upcastOuter<3>();
devGradOutput = toDeviceTensor<real, 2>(state, gradOutput).upcastOuter<3>();
} else {
devGradInput = toDeviceTensor<real, 3>(state, gradInput);
devGradOutput = toDeviceTensor<real, 3>(state, gradOutput);
}
int outputPlaneSize = devGradOutput.getSize(2);
dim3 gridSize(THCCeilDiv(outputPlaneSize, 256),
devGradOutput.getSize(1),
devGradOutput.getSize(0));
dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize);
TemporalReplicationPadding_updateGradInput<<<gridSize, blockSize, 0, THCState_getCurrentStream(state)>>>(
devGradInput, devGradOutput, padL, padR);
}
#endif
|
a441ae392ded3dc5b2c9749c8a9594029efed536.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdio.h>
#define BOARD_SIZE 9
#define SUB_BOARD_SIZE 3
#define MAX_NUM_BOARDS 200000 // 2 ^ 15
#define NUM_ELEMENTS_PER_BOARD 81
///<summary>Returns an index used to access a 1D array, board[index3D(boardIdx, row, col)] == board[boardIdx][row][col]</summary>
///<param name="boardIdx">Index of board, index3D(4,0,0) will return an index of the first element of the 5th board</param>
///<param name="row">Row to be accessed at board at boardIdx</param>
///<param name="col">Column to be accessed at board at boardIdx</param>
__device__ __host__ inline int index3D(const int boardIdx, const int row, const int col)
{
//Could replace NUM_ELEMENTS_PER_BOARD to BOARD_SIZE * BOARD_SIZE to have less preprocessor directives
return boardIdx * NUM_ELEMENTS_PER_BOARD + (row * BOARD_SIZE) + col;
}
///<summary>Returns an index used to access a 1D array, board[index2D(row, col)] == board[row][col]</summary>
///<param name="row">Row to be accessed</param>
///<param name="col">Column to be accessed</param>
__device__ __host__ inline int index2D(const int row, const int col)
{
return row * BOARD_SIZE + col;
}
///<summary>Prints the sudoku grid of BOARD_SIZE * BOARD_SIZE elements</summary>
///<param name="board">A pointer to the first integer of the sudoku board to be printed</param>
__device__ __host__ void printBoard(const int *board)
{
int i;
printf("Sudoku board: \n");
for (i = 0; i < NUM_ELEMENTS_PER_BOARD; i++)
{
if (i % BOARD_SIZE == 0)
printf("\n");
printf("%d ", board[i]);
}
printf("\n");
}
///<summary>
///Resets an array of bools of length n to all false. <para />
///Used for checking the validity of a sudoku board
///</summary>
///<param name="arr">A pointer to the first element of the bool array</param>
///<param name="n">Number of elements in the bool arr</param>
__device__ __host__ void resetBoolArr(bool *arr, int n)
{
int i;
for (i = 0; i < n; i++)
{
arr[i] = false;
}
}
///<summary>Checks if the board is a valid sudoku board (row constraint, column constraint, sub-board constraint)</summary>
///<param name="board">A pointer to the first integer of the sudoku board</param>
///<returns>True if board is valid, false otherwise</returns>
__device__ __host__ bool isBoardValid(const int *board)
{
int i, j;
const int BITMAP_SIZE = BOARD_SIZE + 1;
bool seen[BITMAP_SIZE];
resetBoolArr(seen, BITMAP_SIZE);
//Rows are valid
for (i = 0; i < BOARD_SIZE; i++)
{
resetBoolArr(seen, BITMAP_SIZE);
for (j = 0; j < BOARD_SIZE; j++)
{
int val = board[index2D(i, j)];
if (val != 0)
{
if (seen[val])
{
return false;
}
else
{
seen[val] = true;
}
}
}
}
//Columns are valid
for (i = 0; i < BOARD_SIZE; i++)
{
resetBoolArr(seen, BITMAP_SIZE);
for (j = 0; j < BOARD_SIZE; j++)
{
int val = board[index2D(i, j)];
if (val != 0)
{
if (seen[val])
{
return false;
}
else
{
seen[val] = true;
}
}
}
}
//Sub-boards are valid
for (int row = 0; row < SUB_BOARD_SIZE; row++)
{
for (int col = 0; col < SUB_BOARD_SIZE; col++)
{
resetBoolArr(seen, BITMAP_SIZE);
for (i = 0; i < SUB_BOARD_SIZE; i++)
{
for (j = 0; j < SUB_BOARD_SIZE; j++)
{
int val = board[index2D(row * SUB_BOARD_SIZE + i, col * SUB_BOARD_SIZE + j)];
if (val != 0)
{
if (seen[val])
{
return false;
}
else
{
seen[val] = true;
}
}
}
}
}
}
return true;
}
///<summary>Checks if the board is a valid sudoku board, but is optimized such that it only checks row/col/sub-board of the changed element</summary>
///<param name="board">A pointer to the first integer of the sudoku board</param>
///<param name="changedRow">Index of the changed row</param>
///<param name="changedCol">Index of the changed column</param>
///<returns>True if board is valid, false otherwise</returns>
__device__ bool isBoardValid(const int *board, int changedRow, int changedCol)
{
const int BITMAP_SIZE = BOARD_SIZE + 1;
bool seen[BITMAP_SIZE];
resetBoolArr(seen, BITMAP_SIZE);
if (changedRow < 0 || changedCol < 0)
{
return isBoardValid(board); // nothing was changed
}
if (board[index2D(changedRow, changedCol)] < 1 || board[index2D(changedRow, changedCol)] > 9)
{
return false;
}
//Changed row is still valid
for (int i = 0; i < BOARD_SIZE; i++)
{
int val = board[index2D(changedRow, i)];
if (val != 0)
{
if (seen[val])
{
return false;
}
else
{
seen[val] = true;
}
}
}
resetBoolArr(seen, BITMAP_SIZE);
//Changed column is still valid
for (int i = 0; i < BOARD_SIZE; i++)
{
int val = board[index2D(i, changedCol)];
if (val != 0)
{
if (seen[val])
{
return false;
}
else
{
seen[val] = true;
}
}
}
resetBoolArr(seen, BITMAP_SIZE);
int r = changedRow / SUB_BOARD_SIZE;
int c = changedCol / SUB_BOARD_SIZE;
//Changed sub-board is still valid
for (int i = 0; i < SUB_BOARD_SIZE; i++)
{
for (int j = 0; j < SUB_BOARD_SIZE; j++)
{
int val = board[index2D(r * SUB_BOARD_SIZE + i, c * SUB_BOARD_SIZE + j)];
if (val != 0)
{
if (seen[val])
{
return false;
}
else
{
seen[val] = true;
}
}
}
}
return true;
}
///<summary>
///Kernel function that finds new valid boards given a set of old boards. <para />
///Each threads works on its own old board and attempts to set some new values in the board's empty fields, <para />
///if the new value is valid it copies the whole newly found valid sudoku board to newBoards.<para />
///Once the board is copied we work on another old board at an index offset by the total number of threads in the program such that no 2 threads will work
///on the same board at the same time. <para />
///This function is essentially performing BFS (beadth-first search) because it searches the sudoku board "from left to right", ie
///it searches the first empty elements unlike DFS (depth-first search) which would check the last elements in the sudoku board first. <para />
///This function should be called by alternating the pointers "oldBoards" and "newBoards",
///such that the newly found boards in one iteration will become the boards to be processed in the next iteration.
///</summary>
///<param name = "oldBoards">A pointer to the first element of the array of boards to be processed, size of array is MAX_NUM_OF_BOARDS * NUM_OF_ELEMENTS_PER_BOARD</param>
///<param name = "newBoards">A pointer to the first element of the array of newly found boards using BFS, size of array is MAX_NUM_OF_BOARDS * NUM_OF_ELEMENTS_PER_BOARD</param>
///<param name = "emptyFields">A pointer to the first element of the array that stores the 2D indices of empty fields of a given board,<para />
///size of array is MAX_NUM_OF_BOARDS * NUM_OF_ELEMENTS_PER_BOARD</param>
///<param name = "numOfOldBoards">Number of boards in oldBoards, used so we know when to finish the loop</param>
///<param name = "boardIndex">Number of boards in oldBoards, used so we know when to finish looping</param>
///<param name = "numOfEmptyFields">Number of empty fields at a given board index, numOfEmptyFields[3] == 10 means there is 10 empty fields in the 4th board</param>
__global__ void createPartialSolutionUsingBFS(
int *oldBoards,
int *newBoards,
int *emptyFields,
const int numOfOldBoards,
int *boardIndex,
int *numOfEmptyFields)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x; // represents the current sudoku board index
//We have this condition so we do not overwrite the new set of valid boards
while (tid < numOfOldBoards)
{
bool foundNewBoard = false;
// This loop starts at the first index of the board at index tid and loops through the whole board until its last element
// For example, if tid == 2 and BOARD_SIZE == 9 then "i" will range from [162, 242] since index3D(2, 8, 8) = 2 * 81 + 8 * 9 + 8 = 242
// Therefore the range [162, 242] represents all the indices of elements belonging to the third sudoku board
// If we wish to pass a pointer to the first element of the board at index tid, we can do ptr + index3D(tid, 0, 0) which is 0 + tid * NUM_ELEMENTS_PER_BOARD + 0 * 0 + 0
for (int i = index3D(tid, 0, 0); i <= index3D(tid, BOARD_SIZE - 1, BOARD_SIZE - 1) && !foundNewBoard; i++)
{
if (oldBoards[i] == 0)
{
foundNewBoard = true;
for (int possibleValue = 1; possibleValue <= BOARD_SIZE; possibleValue++)
{
bool foundNewValidValue = false;
oldBoards[i] = possibleValue;
//We need to decode the row and column given a 3D index
int temp = i - BOARD_SIZE * BOARD_SIZE * tid; //Substract the current index by the total amount of elements in all previous boards
int r = temp / BOARD_SIZE;
int c = temp % BOARD_SIZE;
if (isBoardValid(oldBoards + index3D(tid, 0, 0), r, c))
{
foundNewValidValue = true;
}
else
{
oldBoards[i] = 0;
}
if (foundNewValidValue)
{
//We found a new valid sudoku value, so we copy the board to newBoards and find the indices of empty fields
//emptyFields will help us in sudokuBacktrack() where we will skip to the first empty field in the board
int nextBoardIndex = atomicAdd(boardIndex, 1);
//printf("NBI: %d", nextBoardIndex);
int currentEmptyIndex = 0;
for (int row = 0; row < BOARD_SIZE; row++)
{
for (int col = 0; col < BOARD_SIZE; col++)
{
newBoards[index3D(nextBoardIndex, row, col)] = oldBoards[index3D(tid, row, col)];
if (oldBoards[index3D(tid, row, col)] == 0)
{
emptyFields[index3D(nextBoardIndex, 0, currentEmptyIndex)] = index2D(row, col);
currentEmptyIndex++;
}
}
}
numOfEmptyFields[nextBoardIndex] = currentEmptyIndex;
}
}
}
}
tid += blockDim.x * gridDim.x; // offset by total number of threads in a given block
}
}
///<summary>
///Kernel function that makes each thread in parallel run the sudoku backtracking algorithm, described here: https://en.wikipedia.org/wiki/Sudoku_solving_algorithms#Backtracking <para />
///When one thread finds the solution, it sets the "finished" flag to 1 and all other threads will be notified
///</summary>
///<param name = "boards">Pointer to the first integer of the array of boards to run backtracking on. Size is MAX_NUM_OF_ELEMENTS_PER_BOARD * NUM_OF_ELEMENTS_PER_BOARD</param>
///<param name = "numOfBoards">Number of boards of size NUM_OF_ELEMENTS_PER_BOARD in the "boards" array.</param>
///<param name = "numOfEmptyFields">Number of empty fields at a given board index, numOfEmptyFields[3] == 10 means there is 10 empty fields in the 4th board</param>
///<param name = "finished">Pointer to a single integer, if the value at of that int is 0 then board is not yet solved, 1 if it is solved.</param>
///<param name = "solvedBoard">Pointer to the first integer of the array of size NUM_OF_ELEMENTS_PER_BOARD, stores the solved sudoku board</param>
__global__ void sudokuBacktrack(
int *boards,
const int numOfBoards,
int *emptyFields,
int *numOfEmptyFields,
int *finished,
int *solvedBoard)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int numOfEmptyFieldsInThisBoard;
while ((*finished == 0) && tid < numOfBoards)
{
numOfEmptyFieldsInThisBoard = numOfEmptyFields[tid];
int emptyIndex = 0;
while (emptyIndex >= 0 && (emptyIndex < numOfEmptyFieldsInThisBoard))
{
int row = emptyFields[index3D(tid, 0, emptyIndex)] / BOARD_SIZE;
int col = emptyFields[index3D(tid, 0, emptyIndex)] % BOARD_SIZE;
// Increment the value of the empty field until it is valid
boards[index3D(tid, row, col)]++;
if (!isBoardValid(boards + index3D(tid, 0, 0), row, col))
{
if (boards[index3D(tid, row, col)] >= BOARD_SIZE)
{
//If we have tried all possible values we backtrack to the last empty field we changed and try a different value for it
boards[index3D(tid, row, col)] = 0;
emptyIndex--;
}
}
else
{
// We have found a valid value for this field so we move forward in the backtracking algorithm
emptyIndex++;
}
if (emptyIndex == numOfEmptyFieldsInThisBoard)
{
// We have filled all empty fields in the board with valid values so we have solved the board
*finished = 1;
//printf("Thread at index %d has solved the board \n", tid);
//Copy board to solvedBoard, which will later be copied back the host
for (int r = 0; r < BOARD_SIZE; r++)
{
for (int c = 0; c < BOARD_SIZE; c++)
{
solvedBoard[index2D(r, c)] = boards[index3D(tid, r, c)];
}
}
}
}
tid += gridDim.x * blockDim.x;
}
}
///<summary>
///Initializes all data needed to call createPartialSolutionUsingBFS() and sudokuBacktrack()
///Then it runs the algorithm and prints the solved board
///</summary>
///<param name="numThreadsPerBlk">Number of threads in a block that will work</param>
///<param name="numBlocks">Number of total thread blocks that will work</param>
///<param name="inputBoard">Array of ints that stores the input board that we wish to solve</param>
///<returns>A value of type hipError_t. hipSuccess if no errors occured, hipError_t otherwise</returns>
hipError_t runParallelSudoku(
const int numThreadsPerBlk,
const int numBlocks,
int *inputBoard,
char* boardName)
{
hipError_t cudaStatus; // The return value of CUDA-library functions
int bfsIterations = 20; // Number of times to run BFS to find some new valid boards
int bfsBoardCount = 0; // The number of new boards we have found after a call to createPartialSolutionUsingBFS()
int *boardIndex; // Must start at 0 every time
// The meaning of the variables below has been described in the comments above createPartialSolutionUsingBFS() and sudokuBacktrack()
int *numOfEmptyFields;
int *finished = nullptr;
int *newBoards;
int *oldBoards;
int *solvedBoard;
int *dev_solvedBoard;
int *emptyFields;
int initialNumOfBoards = 1;
solvedBoard = new int[NUM_ELEMENTS_PER_BOARD];
memset(solvedBoard, 0, NUM_ELEMENTS_PER_BOARD);
//Allocate memory for our boards
cudaStatus = hipMalloc(&newBoards, MAX_NUM_BOARDS * NUM_ELEMENTS_PER_BOARD * sizeof(int));
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMalloc failed for new boards! ");
goto Error;
}
cudaStatus = hipMalloc(&oldBoards, MAX_NUM_BOARDS * NUM_ELEMENTS_PER_BOARD * sizeof(int));
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMalloc failed for old boards! ");
goto Error;
}
cudaStatus = hipMalloc(&numOfEmptyFields, MAX_NUM_BOARDS * sizeof(int));
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMalloc failed for numOfEmptyFields! ");
goto Error;
}
cudaStatus = hipMalloc(&emptyFields, MAX_NUM_BOARDS * NUM_ELEMENTS_PER_BOARD * sizeof(int));
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMalloc failed for numOfEmptyFields! ");
goto Error;
}
cudaStatus = hipMalloc(&boardIndex, sizeof(int));
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMalloc failed for boardIndex! ");
goto Error;
}
//Set memory to all zeros
cudaStatus = hipMemset(newBoards, 0, MAX_NUM_BOARDS * NUM_ELEMENTS_PER_BOARD * sizeof(int));
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMemset failed for new boards! ");
goto Error;
}
cudaStatus = hipMemset(oldBoards, 0, MAX_NUM_BOARDS * NUM_ELEMENTS_PER_BOARD * sizeof(int));
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMemset failed for old boards! ");
goto Error;
}
cudaStatus = hipMemset(boardIndex, 0, sizeof(int));
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMemset failed for boardIndex! ");
goto Error;
}
cudaStatus = hipMemset(emptyFields, 0, MAX_NUM_BOARDS * NUM_ELEMENTS_PER_BOARD * sizeof(int));
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMemset failed for emptyFields! ");
goto Error;
}
cudaStatus = hipMemset(numOfEmptyFields, 0, MAX_NUM_BOARDS * sizeof(int));
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMemset failed for numOfEmptyFields! ");
goto Error;
}
//Copy input board to oldBoards:
cudaStatus = hipMemcpy(oldBoards, inputBoard, NUM_ELEMENTS_PER_BOARD * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMemcpy failed for inputBoard -> oldBoards! ");
goto Error;
}
createPartialSolutionUsingBFS << <numBlocks, numThreadsPerBlk >> >(oldBoards, newBoards, emptyFields, initialNumOfBoards, boardIndex, numOfEmptyFields);
for (int i = 0; i < bfsIterations; i++)
{
cudaStatus = hipMemcpy(&bfsBoardCount, boardIndex, sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMemcpy failed for boardIndex -> bfsBoardCount! on iteration %d", i);
goto Error;
}
printf("Number of new boards found after iteration %d: %d\n", i, bfsBoardCount);
if (bfsBoardCount > MAX_NUM_BOARDS)
{
std::cout << "Too many boards found in BFS, " << bfsBoardCount << " is greater than " << MAX_NUM_BOARDS << std::endl;
bfsBoardCount = MAX_NUM_BOARDS;
break;
}
cudaStatus = hipMemset(boardIndex, 0, sizeof(int));
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMemset failed for boardIndex! on iteration %d", i);
goto Error;
}
if (i % 2 == 0)
{
createPartialSolutionUsingBFS << <numBlocks, numThreadsPerBlk >> >(newBoards, oldBoards, emptyFields, bfsBoardCount, boardIndex, numOfEmptyFields);
}
else
{
createPartialSolutionUsingBFS << <numBlocks, numThreadsPerBlk >> >(oldBoards, newBoards, emptyFields, bfsBoardCount, boardIndex, numOfEmptyFields);
}
}
/////////////////////////////////////////////
/////Done with BFS, now we run backtrack/////
/////////////////////////////////////////////
cudaStatus = hipMemcpy(&bfsBoardCount, boardIndex, sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMemcpy failed for boardIndex -> bfsBoardCount! Before sudoku backtrack");
goto Error;
}
cudaStatus = hipMalloc(&finished, sizeof(int));
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMalloc failed for finished! ");
goto Error;
}
cudaStatus = hipMalloc(&dev_solvedBoard, NUM_ELEMENTS_PER_BOARD * sizeof(int));
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMalloc failed for dev_solvedBoard! ");
goto Error;
}
cudaStatus = hipMemset(finished, 0, sizeof(int));
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMemset failed for finished! ");
goto Error;
}
if (bfsIterations % 2 == 1)
{
newBoards = oldBoards;
}
sudokuBacktrack << <numBlocks, numThreadsPerBlk >> >(newBoards, bfsBoardCount, emptyFields, numOfEmptyFields, finished, dev_solvedBoard);
//Get solved board
cudaStatus = hipMemcpy(solvedBoard, dev_solvedBoard, NUM_ELEMENTS_PER_BOARD * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed for dev_solvedBoard -> solvedBoard! ");
goto Error;
}
printf("Solved %s! \n", boardName);
printBoard(solvedBoard);
std::cout << "Is solved board valid? " << (isBoardValid(solvedBoard) ? "yes" : "no") << std::endl;
Error:
hipFree(finished);
hipFree(dev_solvedBoard);
hipFree(newBoards);
hipFree(oldBoards);
hipFree(emptyFields);
hipFree(boardIndex);
hipFree(numOfEmptyFields);
free(solvedBoard);
return cudaStatus;
}
int main(int argc, char** argv)
{
// examples from http://www.websudoku.com
int easyInputBoard[NUM_ELEMENTS_PER_BOARD] = {
0,6,0,3,0,0,8,0,4,
5,3,7,0,9,0,0,0,0,
0,4,0,0,0,6,3,0,7,
0,9,0,0,5,1,2,3,8,
0,0,0,0,0,0,0,0,0,
7,1,3,6,2,0,0,4,0,
0,0,0,0,6,0,5,2,3,
1,0,2,0,0,9,0,8,0,
3,0,6,0,0,2,0,0,0 };
int mediumInputBoard[NUM_ELEMENTS_PER_BOARD] = {
0,9,7,0,0,0,0,0,0,
0,0,0,0,7,0,0,0,3,
0,0,2,0,1,6,0,0,9,
0,5,8,0,2,9,3,0,0,
1,0,0,4,0,7,0,0,8,
0,0,4,3,8,0,9,5,0,
8,0,0,2,6,0,1,0,0,
9,0,0,0,4,0,0,0,0,
0,0,0,0,0,0,6,7,0 };
int hardInputBoard[NUM_ELEMENTS_PER_BOARD] = {
0,0,0,0,0,0,0,5,0,
0,4,5,0,0,1,0,0,0,
7,0,0,0,2,0,4,0,1,
0,9,0,1,0,7,2,0,0,
3,0,0,0,0,0,0,0,4,
0,0,4,6,0,3,0,8,0,
8,0,6,0,5,0,0,0,3,
0,0,0,3,0,0,5,7,0,
0,3,0,0,0,0,0,0,0 };
int veryHardInputBoard[NUM_ELEMENTS_PER_BOARD] = {
3,0,0,0,0,2,0,0,0,
0,4,6,0,0,0,0,0,0,
0,0,7,3,5,0,0,2,0,
5,0,0,0,6,1,0,0,0,
0,6,0,0,0,0,0,1,0,
0,0,0,4,7,0,0,0,2,
0,9,0,0,3,5,8,0,0,
0,0,0,0,0,0,9,5,0,
0,0,0,8,0,0,0,0,4 };
int allZeros[NUM_ELEMENTS_PER_BOARD] = {
0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0 };
int hardForBruteForce[NUM_ELEMENTS_PER_BOARD] = {
0,0,0,0,0,0,0,0,0,
0,0,0,0,0,3,0,8,5,
0,0,1,0,2,0,0,0,0,
0,0,0,5,0,7,0,0,0,
0,0,4,0,0,0,1,0,0,
0,9,0,0,0,0,0,0,0,
5,0,0,0,0,0,0,7,3,
0,0,2,0,1,0,0,0,0,
0,0,0,0,4,0,0,0,9
};
if (argc != 3)
{
printf("Usage: argv[1] is threads per block, argv[2] is num of blocks\n");
return 0;
}
const int threadsPerBlock = atoi(argv[1]);
const int maxBlocks = atoi(argv[2]);
printf("Threads per block: %d, num of blocks: %d \n", threadsPerBlock, maxBlocks);
printf("Easy board: \n");
printBoard(easyInputBoard);
std::cout << "Is board valid? " << (isBoardValid(easyInputBoard) ? "yes" : "no") << std::endl;
/////////////
printf("Medium board: \n");
printBoard(mediumInputBoard);
std::cout << "Is medium board valid? " << (isBoardValid(mediumInputBoard) ? "yes" : "no") << std::endl;
/////////////
printf("Hard board: \n");
printBoard(hardInputBoard);
std::cout << "Is hard board valid? " << (isBoardValid(hardInputBoard) ? "yes" : "no") << std::endl;
/////////////
printf("Very hard board: \n");
printBoard(veryHardInputBoard);
std::cout << "Is very hard board valid? " << (isBoardValid(veryHardInputBoard) ? "yes" : "no") << std::endl;
/////////////
printf("Hard for brute force board: \n");
printBoard(hardForBruteForce);
std::cout << "Is hard for brute force board valid? " << (isBoardValid(hardForBruteForce) ? "yes" : "no") << std::endl;
/////////////
hipError_t cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
return 1;
}
runParallelSudoku(threadsPerBlock, maxBlocks, easyInputBoard, "Easy board");
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "kernel launch failed for easy board: %s\n", hipGetErrorString(cudaStatus));
}
runParallelSudoku(threadsPerBlock, maxBlocks, mediumInputBoard, "Medium board");
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "kernel launch failed for medium board: %s\n", hipGetErrorString(cudaStatus));
}
runParallelSudoku(threadsPerBlock, maxBlocks, hardInputBoard, "Hard for human board");
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "kernel launch failed for hard board: %s\n", hipGetErrorString(cudaStatus));
}
runParallelSudoku(threadsPerBlock, maxBlocks, veryHardInputBoard, "Very hard for human board");
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "kernel launch failed for very hard: %s\n", hipGetErrorString(cudaStatus));
}
runParallelSudoku(threadsPerBlock, maxBlocks, allZeros, "All zeros board");
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "kernel launch failed for all zeros: %s\n", hipGetErrorString(cudaStatus));
}
/*
runParallelSudoku(threadsPerBlock, maxBlocks, hardForBruteForce, "Hard for brute force board");
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "kernel launch failed for hard for brute force board: %s\n", hipGetErrorString(cudaStatus));
}
*/
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
| a441ae392ded3dc5b2c9749c8a9594029efed536.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdio.h>
#define BOARD_SIZE 9
#define SUB_BOARD_SIZE 3
#define MAX_NUM_BOARDS 200000 // 2 ^ 15
#define NUM_ELEMENTS_PER_BOARD 81
///<summary>Returns an index used to access a 1D array, board[index3D(boardIdx, row, col)] == board[boardIdx][row][col]</summary>
///<param name="boardIdx">Index of board, index3D(4,0,0) will return an index of the first element of the 5th board</param>
///<param name="row">Row to be accessed at board at boardIdx</param>
///<param name="col">Column to be accessed at board at boardIdx</param>
__device__ __host__ inline int index3D(const int boardIdx, const int row, const int col)
{
//Could replace NUM_ELEMENTS_PER_BOARD to BOARD_SIZE * BOARD_SIZE to have less preprocessor directives
return boardIdx * NUM_ELEMENTS_PER_BOARD + (row * BOARD_SIZE) + col;
}
///<summary>Returns an index used to access a 1D array, board[index2D(row, col)] == board[row][col]</summary>
///<param name="row">Row to be accessed</param>
///<param name="col">Column to be accessed</param>
__device__ __host__ inline int index2D(const int row, const int col)
{
return row * BOARD_SIZE + col;
}
///<summary>Prints the sudoku grid of BOARD_SIZE * BOARD_SIZE elements</summary>
///<param name="board">A pointer to the first integer of the sudoku board to be printed</param>
__device__ __host__ void printBoard(const int *board)
{
int i;
printf("Sudoku board: \n");
for (i = 0; i < NUM_ELEMENTS_PER_BOARD; i++)
{
if (i % BOARD_SIZE == 0)
printf("\n");
printf("%d ", board[i]);
}
printf("\n");
}
///<summary>
///Resets an array of bools of length n to all false. <para />
///Used for checking the validity of a sudoku board
///</summary>
///<param name="arr">A pointer to the first element of the bool array</param>
///<param name="n">Number of elements in the bool arr</param>
__device__ __host__ void resetBoolArr(bool *arr, int n)
{
int i;
for (i = 0; i < n; i++)
{
arr[i] = false;
}
}
///<summary>Checks if the board is a valid sudoku board (row constraint, column constraint, sub-board constraint)</summary>
///<param name="board">A pointer to the first integer of the sudoku board</param>
///<returns>True if board is valid, false otherwise</returns>
__device__ __host__ bool isBoardValid(const int *board)
{
int i, j;
const int BITMAP_SIZE = BOARD_SIZE + 1;
bool seen[BITMAP_SIZE];
resetBoolArr(seen, BITMAP_SIZE);
//Rows are valid
for (i = 0; i < BOARD_SIZE; i++)
{
resetBoolArr(seen, BITMAP_SIZE);
for (j = 0; j < BOARD_SIZE; j++)
{
int val = board[index2D(i, j)];
if (val != 0)
{
if (seen[val])
{
return false;
}
else
{
seen[val] = true;
}
}
}
}
//Columns are valid
for (i = 0; i < BOARD_SIZE; i++)
{
resetBoolArr(seen, BITMAP_SIZE);
for (j = 0; j < BOARD_SIZE; j++)
{
int val = board[index2D(i, j)];
if (val != 0)
{
if (seen[val])
{
return false;
}
else
{
seen[val] = true;
}
}
}
}
//Sub-boards are valid
for (int row = 0; row < SUB_BOARD_SIZE; row++)
{
for (int col = 0; col < SUB_BOARD_SIZE; col++)
{
resetBoolArr(seen, BITMAP_SIZE);
for (i = 0; i < SUB_BOARD_SIZE; i++)
{
for (j = 0; j < SUB_BOARD_SIZE; j++)
{
int val = board[index2D(row * SUB_BOARD_SIZE + i, col * SUB_BOARD_SIZE + j)];
if (val != 0)
{
if (seen[val])
{
return false;
}
else
{
seen[val] = true;
}
}
}
}
}
}
return true;
}
///<summary>Checks if the board is a valid sudoku board, but is optimized such that it only checks row/col/sub-board of the changed element</summary>
///<param name="board">A pointer to the first integer of the sudoku board</param>
///<param name="changedRow">Index of the changed row</param>
///<param name="changedCol">Index of the changed column</param>
///<returns>True if board is valid, false otherwise</returns>
__device__ bool isBoardValid(const int *board, int changedRow, int changedCol)
{
const int BITMAP_SIZE = BOARD_SIZE + 1;
bool seen[BITMAP_SIZE];
resetBoolArr(seen, BITMAP_SIZE);
if (changedRow < 0 || changedCol < 0)
{
return isBoardValid(board); // nothing was changed
}
if (board[index2D(changedRow, changedCol)] < 1 || board[index2D(changedRow, changedCol)] > 9)
{
return false;
}
//Changed row is still valid
for (int i = 0; i < BOARD_SIZE; i++)
{
int val = board[index2D(changedRow, i)];
if (val != 0)
{
if (seen[val])
{
return false;
}
else
{
seen[val] = true;
}
}
}
resetBoolArr(seen, BITMAP_SIZE);
//Changed column is still valid
for (int i = 0; i < BOARD_SIZE; i++)
{
int val = board[index2D(i, changedCol)];
if (val != 0)
{
if (seen[val])
{
return false;
}
else
{
seen[val] = true;
}
}
}
resetBoolArr(seen, BITMAP_SIZE);
int r = changedRow / SUB_BOARD_SIZE;
int c = changedCol / SUB_BOARD_SIZE;
//Changed sub-board is still valid
for (int i = 0; i < SUB_BOARD_SIZE; i++)
{
for (int j = 0; j < SUB_BOARD_SIZE; j++)
{
int val = board[index2D(r * SUB_BOARD_SIZE + i, c * SUB_BOARD_SIZE + j)];
if (val != 0)
{
if (seen[val])
{
return false;
}
else
{
seen[val] = true;
}
}
}
}
return true;
}
///<summary>
///Kernel function that finds new valid boards given a set of old boards. <para />
///Each threads works on its own old board and attempts to set some new values in the board's empty fields, <para />
///if the new value is valid it copies the whole newly found valid sudoku board to newBoards.<para />
///Once the board is copied we work on another old board at an index offset by the total number of threads in the program such that no 2 threads will work
///on the same board at the same time. <para />
///This function is essentially performing BFS (beadth-first search) because it searches the sudoku board "from left to right", ie
///it searches the first empty elements unlike DFS (depth-first search) which would check the last elements in the sudoku board first. <para />
///This function should be called by alternating the pointers "oldBoards" and "newBoards",
///such that the newly found boards in one iteration will become the boards to be processed in the next iteration.
///</summary>
///<param name = "oldBoards">A pointer to the first element of the array of boards to be processed, size of array is MAX_NUM_OF_BOARDS * NUM_OF_ELEMENTS_PER_BOARD</param>
///<param name = "newBoards">A pointer to the first element of the array of newly found boards using BFS, size of array is MAX_NUM_OF_BOARDS * NUM_OF_ELEMENTS_PER_BOARD</param>
///<param name = "emptyFields">A pointer to the first element of the array that stores the 2D indices of empty fields of a given board,<para />
///size of array is MAX_NUM_OF_BOARDS * NUM_OF_ELEMENTS_PER_BOARD</param>
///<param name = "numOfOldBoards">Number of boards in oldBoards, used so we know when to finish the loop</param>
///<param name = "boardIndex">Number of boards in oldBoards, used so we know when to finish looping</param>
///<param name = "numOfEmptyFields">Number of empty fields at a given board index, numOfEmptyFields[3] == 10 means there is 10 empty fields in the 4th board</param>
__global__ void createPartialSolutionUsingBFS(
int *oldBoards,
int *newBoards,
int *emptyFields,
const int numOfOldBoards,
int *boardIndex,
int *numOfEmptyFields)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x; // represents the current sudoku board index
//We have this condition so we do not overwrite the new set of valid boards
while (tid < numOfOldBoards)
{
bool foundNewBoard = false;
// This loop starts at the first index of the board at index tid and loops through the whole board until its last element
// For example, if tid == 2 and BOARD_SIZE == 9 then "i" will range from [162, 242] since index3D(2, 8, 8) = 2 * 81 + 8 * 9 + 8 = 242
// Therefore the range [162, 242] represents all the indices of elements belonging to the third sudoku board
// If we wish to pass a pointer to the first element of the board at index tid, we can do ptr + index3D(tid, 0, 0) which is 0 + tid * NUM_ELEMENTS_PER_BOARD + 0 * 0 + 0
for (int i = index3D(tid, 0, 0); i <= index3D(tid, BOARD_SIZE - 1, BOARD_SIZE - 1) && !foundNewBoard; i++)
{
if (oldBoards[i] == 0)
{
foundNewBoard = true;
for (int possibleValue = 1; possibleValue <= BOARD_SIZE; possibleValue++)
{
bool foundNewValidValue = false;
oldBoards[i] = possibleValue;
//We need to decode the row and column given a 3D index
int temp = i - BOARD_SIZE * BOARD_SIZE * tid; //Substract the current index by the total amount of elements in all previous boards
int r = temp / BOARD_SIZE;
int c = temp % BOARD_SIZE;
if (isBoardValid(oldBoards + index3D(tid, 0, 0), r, c))
{
foundNewValidValue = true;
}
else
{
oldBoards[i] = 0;
}
if (foundNewValidValue)
{
//We found a new valid sudoku value, so we copy the board to newBoards and find the indices of empty fields
//emptyFields will help us in sudokuBacktrack() where we will skip to the first empty field in the board
int nextBoardIndex = atomicAdd(boardIndex, 1);
//printf("NBI: %d", nextBoardIndex);
int currentEmptyIndex = 0;
for (int row = 0; row < BOARD_SIZE; row++)
{
for (int col = 0; col < BOARD_SIZE; col++)
{
newBoards[index3D(nextBoardIndex, row, col)] = oldBoards[index3D(tid, row, col)];
if (oldBoards[index3D(tid, row, col)] == 0)
{
emptyFields[index3D(nextBoardIndex, 0, currentEmptyIndex)] = index2D(row, col);
currentEmptyIndex++;
}
}
}
numOfEmptyFields[nextBoardIndex] = currentEmptyIndex;
}
}
}
}
tid += blockDim.x * gridDim.x; // offset by total number of threads in a given block
}
}
///<summary>
///Kernel function that makes each thread in parallel run the sudoku backtracking algorithm, described here: https://en.wikipedia.org/wiki/Sudoku_solving_algorithms#Backtracking <para />
///When one thread finds the solution, it sets the "finished" flag to 1 and all other threads will be notified
///</summary>
///<param name = "boards">Pointer to the first integer of the array of boards to run backtracking on. Size is MAX_NUM_OF_ELEMENTS_PER_BOARD * NUM_OF_ELEMENTS_PER_BOARD</param>
///<param name = "numOfBoards">Number of boards of size NUM_OF_ELEMENTS_PER_BOARD in the "boards" array.</param>
///<param name = "numOfEmptyFields">Number of empty fields at a given board index, numOfEmptyFields[3] == 10 means there is 10 empty fields in the 4th board</param>
///<param name = "finished">Pointer to a single integer, if the value at of that int is 0 then board is not yet solved, 1 if it is solved.</param>
///<param name = "solvedBoard">Pointer to the first integer of the array of size NUM_OF_ELEMENTS_PER_BOARD, stores the solved sudoku board</param>
__global__ void sudokuBacktrack(
int *boards,
const int numOfBoards,
int *emptyFields,
int *numOfEmptyFields,
int *finished,
int *solvedBoard)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int numOfEmptyFieldsInThisBoard;
while ((*finished == 0) && tid < numOfBoards)
{
numOfEmptyFieldsInThisBoard = numOfEmptyFields[tid];
int emptyIndex = 0;
while (emptyIndex >= 0 && (emptyIndex < numOfEmptyFieldsInThisBoard))
{
int row = emptyFields[index3D(tid, 0, emptyIndex)] / BOARD_SIZE;
int col = emptyFields[index3D(tid, 0, emptyIndex)] % BOARD_SIZE;
// Increment the value of the empty field until it is valid
boards[index3D(tid, row, col)]++;
if (!isBoardValid(boards + index3D(tid, 0, 0), row, col))
{
if (boards[index3D(tid, row, col)] >= BOARD_SIZE)
{
//If we have tried all possible values we backtrack to the last empty field we changed and try a different value for it
boards[index3D(tid, row, col)] = 0;
emptyIndex--;
}
}
else
{
// We have found a valid value for this field so we move forward in the backtracking algorithm
emptyIndex++;
}
if (emptyIndex == numOfEmptyFieldsInThisBoard)
{
// We have filled all empty fields in the board with valid values so we have solved the board
*finished = 1;
//printf("Thread at index %d has solved the board \n", tid);
//Copy board to solvedBoard, which will later be copied back the host
for (int r = 0; r < BOARD_SIZE; r++)
{
for (int c = 0; c < BOARD_SIZE; c++)
{
solvedBoard[index2D(r, c)] = boards[index3D(tid, r, c)];
}
}
}
}
tid += gridDim.x * blockDim.x;
}
}
///<summary>
///Initializes all data needed to call createPartialSolutionUsingBFS() and sudokuBacktrack()
///Then it runs the algorithm and prints the solved board
///</summary>
///<param name="numThreadsPerBlk">Number of threads in a block that will work</param>
///<param name="numBlocks">Number of total thread blocks that will work</param>
///<param name="inputBoard">Array of ints that stores the input board that we wish to solve</param>
///<returns>A value of type cudaError_t. cudaSuccess if no errors occured, cudaError otherwise</returns>
cudaError_t runParallelSudoku(
const int numThreadsPerBlk,
const int numBlocks,
int *inputBoard,
char* boardName)
{
cudaError_t cudaStatus; // The return value of CUDA-library functions
int bfsIterations = 20; // Number of times to run BFS to find some new valid boards
int bfsBoardCount = 0; // The number of new boards we have found after a call to createPartialSolutionUsingBFS()
int *boardIndex; // Must start at 0 every time
// The meaning of the variables below has been described in the comments above createPartialSolutionUsingBFS() and sudokuBacktrack()
int *numOfEmptyFields;
int *finished = nullptr;
int *newBoards;
int *oldBoards;
int *solvedBoard;
int *dev_solvedBoard;
int *emptyFields;
int initialNumOfBoards = 1;
solvedBoard = new int[NUM_ELEMENTS_PER_BOARD];
memset(solvedBoard, 0, NUM_ELEMENTS_PER_BOARD);
//Allocate memory for our boards
cudaStatus = cudaMalloc(&newBoards, MAX_NUM_BOARDS * NUM_ELEMENTS_PER_BOARD * sizeof(int));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed for new boards! ");
goto Error;
}
cudaStatus = cudaMalloc(&oldBoards, MAX_NUM_BOARDS * NUM_ELEMENTS_PER_BOARD * sizeof(int));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed for old boards! ");
goto Error;
}
cudaStatus = cudaMalloc(&numOfEmptyFields, MAX_NUM_BOARDS * sizeof(int));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed for numOfEmptyFields! ");
goto Error;
}
cudaStatus = cudaMalloc(&emptyFields, MAX_NUM_BOARDS * NUM_ELEMENTS_PER_BOARD * sizeof(int));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed for numOfEmptyFields! ");
goto Error;
}
cudaStatus = cudaMalloc(&boardIndex, sizeof(int));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed for boardIndex! ");
goto Error;
}
//Set memory to all zeros
cudaStatus = cudaMemset(newBoards, 0, MAX_NUM_BOARDS * NUM_ELEMENTS_PER_BOARD * sizeof(int));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemset failed for new boards! ");
goto Error;
}
cudaStatus = cudaMemset(oldBoards, 0, MAX_NUM_BOARDS * NUM_ELEMENTS_PER_BOARD * sizeof(int));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemset failed for old boards! ");
goto Error;
}
cudaStatus = cudaMemset(boardIndex, 0, sizeof(int));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemset failed for boardIndex! ");
goto Error;
}
cudaStatus = cudaMemset(emptyFields, 0, MAX_NUM_BOARDS * NUM_ELEMENTS_PER_BOARD * sizeof(int));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemset failed for emptyFields! ");
goto Error;
}
cudaStatus = cudaMemset(numOfEmptyFields, 0, MAX_NUM_BOARDS * sizeof(int));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemset failed for numOfEmptyFields! ");
goto Error;
}
//Copy input board to oldBoards:
cudaStatus = cudaMemcpy(oldBoards, inputBoard, NUM_ELEMENTS_PER_BOARD * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed for inputBoard -> oldBoards! ");
goto Error;
}
createPartialSolutionUsingBFS << <numBlocks, numThreadsPerBlk >> >(oldBoards, newBoards, emptyFields, initialNumOfBoards, boardIndex, numOfEmptyFields);
for (int i = 0; i < bfsIterations; i++)
{
cudaStatus = cudaMemcpy(&bfsBoardCount, boardIndex, sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed for boardIndex -> bfsBoardCount! on iteration %d", i);
goto Error;
}
printf("Number of new boards found after iteration %d: %d\n", i, bfsBoardCount);
if (bfsBoardCount > MAX_NUM_BOARDS)
{
std::cout << "Too many boards found in BFS, " << bfsBoardCount << " is greater than " << MAX_NUM_BOARDS << std::endl;
bfsBoardCount = MAX_NUM_BOARDS;
break;
}
cudaStatus = cudaMemset(boardIndex, 0, sizeof(int));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemset failed for boardIndex! on iteration %d", i);
goto Error;
}
if (i % 2 == 0)
{
createPartialSolutionUsingBFS << <numBlocks, numThreadsPerBlk >> >(newBoards, oldBoards, emptyFields, bfsBoardCount, boardIndex, numOfEmptyFields);
}
else
{
createPartialSolutionUsingBFS << <numBlocks, numThreadsPerBlk >> >(oldBoards, newBoards, emptyFields, bfsBoardCount, boardIndex, numOfEmptyFields);
}
}
/////////////////////////////////////////////
/////Done with BFS, now we run backtrack/////
/////////////////////////////////////////////
cudaStatus = cudaMemcpy(&bfsBoardCount, boardIndex, sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed for boardIndex -> bfsBoardCount! Before sudoku backtrack");
goto Error;
}
cudaStatus = cudaMalloc(&finished, sizeof(int));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed for finished! ");
goto Error;
}
cudaStatus = cudaMalloc(&dev_solvedBoard, NUM_ELEMENTS_PER_BOARD * sizeof(int));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed for dev_solvedBoard! ");
goto Error;
}
cudaStatus = cudaMemset(finished, 0, sizeof(int));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemset failed for finished! ");
goto Error;
}
if (bfsIterations % 2 == 1)
{
newBoards = oldBoards;
}
sudokuBacktrack << <numBlocks, numThreadsPerBlk >> >(newBoards, bfsBoardCount, emptyFields, numOfEmptyFields, finished, dev_solvedBoard);
//Get solved board
cudaStatus = cudaMemcpy(solvedBoard, dev_solvedBoard, NUM_ELEMENTS_PER_BOARD * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed for dev_solvedBoard -> solvedBoard! ");
goto Error;
}
printf("Solved %s! \n", boardName);
printBoard(solvedBoard);
std::cout << "Is solved board valid? " << (isBoardValid(solvedBoard) ? "yes" : "no") << std::endl;
Error:
cudaFree(finished);
cudaFree(dev_solvedBoard);
cudaFree(newBoards);
cudaFree(oldBoards);
cudaFree(emptyFields);
cudaFree(boardIndex);
cudaFree(numOfEmptyFields);
free(solvedBoard);
return cudaStatus;
}
int main(int argc, char** argv)
{
// examples from http://www.websudoku.com
int easyInputBoard[NUM_ELEMENTS_PER_BOARD] = {
0,6,0,3,0,0,8,0,4,
5,3,7,0,9,0,0,0,0,
0,4,0,0,0,6,3,0,7,
0,9,0,0,5,1,2,3,8,
0,0,0,0,0,0,0,0,0,
7,1,3,6,2,0,0,4,0,
0,0,0,0,6,0,5,2,3,
1,0,2,0,0,9,0,8,0,
3,0,6,0,0,2,0,0,0 };
int mediumInputBoard[NUM_ELEMENTS_PER_BOARD] = {
0,9,7,0,0,0,0,0,0,
0,0,0,0,7,0,0,0,3,
0,0,2,0,1,6,0,0,9,
0,5,8,0,2,9,3,0,0,
1,0,0,4,0,7,0,0,8,
0,0,4,3,8,0,9,5,0,
8,0,0,2,6,0,1,0,0,
9,0,0,0,4,0,0,0,0,
0,0,0,0,0,0,6,7,0 };
int hardInputBoard[NUM_ELEMENTS_PER_BOARD] = {
0,0,0,0,0,0,0,5,0,
0,4,5,0,0,1,0,0,0,
7,0,0,0,2,0,4,0,1,
0,9,0,1,0,7,2,0,0,
3,0,0,0,0,0,0,0,4,
0,0,4,6,0,3,0,8,0,
8,0,6,0,5,0,0,0,3,
0,0,0,3,0,0,5,7,0,
0,3,0,0,0,0,0,0,0 };
int veryHardInputBoard[NUM_ELEMENTS_PER_BOARD] = {
3,0,0,0,0,2,0,0,0,
0,4,6,0,0,0,0,0,0,
0,0,7,3,5,0,0,2,0,
5,0,0,0,6,1,0,0,0,
0,6,0,0,0,0,0,1,0,
0,0,0,4,7,0,0,0,2,
0,9,0,0,3,5,8,0,0,
0,0,0,0,0,0,9,5,0,
0,0,0,8,0,0,0,0,4 };
int allZeros[NUM_ELEMENTS_PER_BOARD] = {
0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0 };
int hardForBruteForce[NUM_ELEMENTS_PER_BOARD] = {
0,0,0,0,0,0,0,0,0,
0,0,0,0,0,3,0,8,5,
0,0,1,0,2,0,0,0,0,
0,0,0,5,0,7,0,0,0,
0,0,4,0,0,0,1,0,0,
0,9,0,0,0,0,0,0,0,
5,0,0,0,0,0,0,7,3,
0,0,2,0,1,0,0,0,0,
0,0,0,0,4,0,0,0,9
};
if (argc != 3)
{
printf("Usage: argv[1] is threads per block, argv[2] is num of blocks\n");
return 0;
}
const int threadsPerBlock = atoi(argv[1]);
const int maxBlocks = atoi(argv[2]);
printf("Threads per block: %d, num of blocks: %d \n", threadsPerBlock, maxBlocks);
printf("Easy board: \n");
printBoard(easyInputBoard);
std::cout << "Is board valid? " << (isBoardValid(easyInputBoard) ? "yes" : "no") << std::endl;
/////////////
printf("Medium board: \n");
printBoard(mediumInputBoard);
std::cout << "Is medium board valid? " << (isBoardValid(mediumInputBoard) ? "yes" : "no") << std::endl;
/////////////
printf("Hard board: \n");
printBoard(hardInputBoard);
std::cout << "Is hard board valid? " << (isBoardValid(hardInputBoard) ? "yes" : "no") << std::endl;
/////////////
printf("Very hard board: \n");
printBoard(veryHardInputBoard);
std::cout << "Is very hard board valid? " << (isBoardValid(veryHardInputBoard) ? "yes" : "no") << std::endl;
/////////////
printf("Hard for brute force board: \n");
printBoard(hardForBruteForce);
std::cout << "Is hard for brute force board valid? " << (isBoardValid(hardForBruteForce) ? "yes" : "no") << std::endl;
/////////////
cudaError_t cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
return 1;
}
runParallelSudoku(threadsPerBlock, maxBlocks, easyInputBoard, "Easy board");
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "kernel launch failed for easy board: %s\n", cudaGetErrorString(cudaStatus));
}
runParallelSudoku(threadsPerBlock, maxBlocks, mediumInputBoard, "Medium board");
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "kernel launch failed for medium board: %s\n", cudaGetErrorString(cudaStatus));
}
runParallelSudoku(threadsPerBlock, maxBlocks, hardInputBoard, "Hard for human board");
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "kernel launch failed for hard board: %s\n", cudaGetErrorString(cudaStatus));
}
runParallelSudoku(threadsPerBlock, maxBlocks, veryHardInputBoard, "Very hard for human board");
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "kernel launch failed for very hard: %s\n", cudaGetErrorString(cudaStatus));
}
runParallelSudoku(threadsPerBlock, maxBlocks, allZeros, "All zeros board");
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "kernel launch failed for all zeros: %s\n", cudaGetErrorString(cudaStatus));
}
/*
runParallelSudoku(threadsPerBlock, maxBlocks, hardForBruteForce, "Hard for brute force board");
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "kernel launch failed for hard for brute force board: %s\n", cudaGetErrorString(cudaStatus));
}
*/
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
|
069b42b1fe3617972bda99e3d2207c26962b6f49.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//==========================================================================
// This file has been automatically generated for C++ Standalone by
// MadGraph5_aMC@NLO v. 2.7.3.py3, 2020-06-28
// By the MadGraph5_aMC@NLO Development Team
// Visit launchpad.net/madgraph5 and amcatnlo.web.cern.ch
//==========================================================================
//#include "HelAmps_sm.h"
#include <complex>
#include <cmath>
#include <iostream>
#include <cstdlib>
#include <thrust/complex.h>
using namespace std;
namespace MG5_sm
{
__device__ void ixxxxx(const double pvec[3], double fmass, int nhel, int nsf,
thrust::complex<double> fi[6])
{
thrust::complex<double> chi[2];
double sf[2], sfomega[2], omega[2], pp, pp3, sqp0p3, sqm[2];
int ip, im, nh;
double p[4] = {0, pvec[0], pvec[1], pvec[2]};
p[0] = sqrt(p[1] * p[1] + p[2] * p[2] + p[3] * p[3] + fmass * fmass);
fi[0] = thrust::complex<double> (-p[0] * nsf, -p[3] * nsf);
fi[1] = thrust::complex<double> (-p[1] * nsf, -p[2] * nsf);
nh = nhel * nsf;
if (fmass != 0.0)
{
pp = min(p[0], sqrt(p[1] * p[1] + p[2] * p[2] + p[3] * p[3]));
if (pp == 0.0)
{
sqm[0] = sqrt(std::abs(fmass));
sqm[1] = (fmass < 0) ? - abs(sqm[0]) : abs(sqm[0]);
ip = (1 + nh)/2;
im = (1 - nh)/2;
fi[2] = ip * sqm[ip];
fi[3] = im * nsf * sqm[ip];
fi[4] = ip * nsf * sqm[im];
fi[5] = im * sqm[im];
}
else
{
sf[0] = (1 + nsf + (1 - nsf) * nh) * 0.5;
sf[1] = (1 + nsf - (1 - nsf) * nh) * 0.5;
omega[0] = sqrt(p[0] + pp);
omega[1] = fmass/omega[0];
ip = (1 + nh)/2;
im = (1 - nh)/2;
sfomega[0] = sf[0] * omega[ip];
sfomega[1] = sf[1] * omega[im];
pp3 = max(pp + p[3], 0.0);
chi[0] = thrust::complex<double> (sqrt(pp3 * 0.5/pp), 0);
if (pp3 == 0.0)
{
chi[1] = thrust::complex<double> (-nh, 0);
}
else
{
chi[1] =
thrust::complex<double> (nh * p[1], p[2])/sqrt(2.0 * pp * pp3);
}
fi[2] = sfomega[0] * chi[im];
fi[3] = sfomega[0] * chi[ip];
fi[4] = sfomega[1] * chi[im];
fi[5] = sfomega[1] * chi[ip];
}
}
else
{
if (p[1] == 0.0 and p[2] == 0.0 and p[3] < 0.0)
{
sqp0p3 = 0.0;
}
else
{
sqp0p3 = sqrt(max(p[0] + p[3], 0.0)) * nsf;
}
chi[0] = thrust::complex<double> (sqp0p3, 0.0);
if (sqp0p3 == 0.0)
{
chi[1] = thrust::complex<double> (-nhel * sqrt(2.0 * p[0]), 0.0);
}
else
{
chi[1] = thrust::complex<double> (nh * p[1], p[2])/sqp0p3;
}
if (nh == 1)
{
fi[2] = thrust::complex<double> (0.0, 0.0);
fi[3] = thrust::complex<double> (0.0, 0.0);
fi[4] = chi[0];
fi[5] = chi[1];
}
else
{
fi[2] = chi[1];
fi[3] = chi[0];
fi[4] = thrust::complex<double> (0.0, 0.0);
fi[5] = thrust::complex<double> (0.0, 0.0);
}
}
return;
}
__device__ void txxxxx(const double pvec[3], double tmass, int nhel, int nst,
thrust::complex<double> tc[18])
{
thrust::complex<double> ft[6][4], ep[4], em[4], e0[4];
double pt, pt2, pp, pzpt, emp, sqh, sqs;
int i, j;
double p[4] = {0, pvec[0], pvec[1], pvec[2]};
p[0] = sqrt(p[1] * p[1] + p[2] * p[2] + p[3] * p[3] + tmass * tmass);
sqh = sqrt(0.5);
sqs = sqrt(0.5/3);
pt2 = p[1] * p[1] + p[2] * p[2];
pp = min(p[0], sqrt(pt2 + p[3] * p[3]));
pt = min(pp, sqrt(pt2));
ft[4][0] = thrust::complex<double> (p[0] * nst, p[3] * nst);
ft[5][0] = thrust::complex<double> (p[1] * nst, p[2] * nst);
// construct eps+
if (nhel >= 0)
{
if (pp == 0)
{
ep[0] = thrust::complex<double> (0, 0);
ep[1] = thrust::complex<double> (-sqh, 0);
ep[2] = thrust::complex<double> (0, nst * sqh);
ep[3] = thrust::complex<double> (0, 0);
}
else
{
ep[0] = thrust::complex<double> (0, 0);
ep[3] = thrust::complex<double> (pt/pp * sqh, 0);
if (pt != 0)
{
pzpt = p[3]/(pp * pt) * sqh;
ep[1] = thrust::complex<double> (-p[1] * pzpt, -nst * p[2]/pt * sqh);
ep[2] = thrust::complex<double> (-p[2] * pzpt, nst * p[1]/pt * sqh);
}
else
{
ep[1] = thrust::complex<double> (-sqh, 0);
ep[2] =
thrust::complex<double> (0, nst * (p[3] < 0) ? - abs(sqh) : abs(sqh));
}
}
}
// construct eps-
if (nhel <= 0)
{
if (pp == 0)
{
em[0] = thrust::complex<double> (0, 0);
em[1] = thrust::complex<double> (sqh, 0);
em[2] = thrust::complex<double> (0, nst * sqh);
em[3] = thrust::complex<double> (0, 0);
}
else
{
em[0] = thrust::complex<double> (0, 0);
em[3] = thrust::complex<double> (-pt/pp * sqh, 0);
if (pt != 0)
{
pzpt = -p[3]/(pp * pt) * sqh;
em[1] = thrust::complex<double> (-p[1] * pzpt, -nst * p[2]/pt * sqh);
em[2] = thrust::complex<double> (-p[2] * pzpt, nst * p[1]/pt * sqh);
}
else
{
em[1] = thrust::complex<double> (sqh, 0);
em[2] =
thrust::complex<double> (0, nst * (p[3] < 0) ? - abs(sqh) : abs(sqh));
}
}
}
// construct eps0
if (std::labs(nhel) <= 1)
{
if (pp == 0)
{
e0[0] = thrust::complex<double> (0, 0);
e0[1] = thrust::complex<double> (0, 0);
e0[2] = thrust::complex<double> (0, 0);
e0[3] = thrust::complex<double> (1, 0);
}
else
{
emp = p[0]/(tmass * pp);
e0[0] = thrust::complex<double> (pp/tmass, 0);
e0[3] = thrust::complex<double> (p[3] * emp, 0);
if (pt != 0)
{
e0[1] = thrust::complex<double> (p[1] * emp, 0);
e0[2] = thrust::complex<double> (p[2] * emp, 0);
}
else
{
e0[1] = thrust::complex<double> (0, 0);
e0[2] = thrust::complex<double> (0, 0);
}
}
}
if (nhel == 2)
{
for (j = 0; j < 4; j++ )
{
for (i = 0; i < 4; i++ )
ft[i][j] = ep[i] * ep[j];
}
}
else if (nhel == -2)
{
for (j = 0; j < 4; j++ )
{
for (i = 0; i < 4; i++ )
ft[i][j] = em[i] * em[j];
}
}
else if (tmass == 0)
{
for (j = 0; j < 4; j++ )
{
for (i = 0; i < 4; i++ )
ft[i][j] = 0;
}
}
else if (tmass != 0)
{
if (nhel == 1)
{
for (j = 0; j < 4; j++ )
{
for (i = 0; i < 4; i++ )
ft[i][j] = sqh * (ep[i] * e0[j] + e0[i] * ep[j]);
}
}
else if (nhel == 0)
{
for (j = 0; j < 4; j++ )
{
for (i = 0; i < 4; i++ )
ft[i][j] =
sqs * (ep[i] * em[j] + em[i] * ep[j] + 2.0 * e0[i] * e0[j]);
}
}
else if (nhel == -1)
{
for (j = 0; j < 4; j++ )
{
for (i = 0; i < 4; i++ )
ft[i][j] = sqh * (em[i] * e0[j] + e0[i] * em[j]);
}
}
else
{
// sr fixme // std::cerr << "Invalid helicity in txxxxx.\n";
// sr fixme // std::exit(1);
}
}
tc[0] = ft[4][0];
tc[1] = ft[5][0];
for (j = 0; j < 4; j++ )
{
for (i = 0; i < 4; i++ )
tc[j * 4 + i + 2] = ft[j][i];
}
}
__device__ void vxxxxx(const double pvec[3], double vmass, int nhel, int nsv,
thrust::complex<double> vc[6])
{
double hel, hel0, pt, pt2, pp, pzpt, emp, sqh;
int nsvahl;
double p[4] = {0, pvec[0], pvec[1], pvec[2]};
p[0] = sqrt(p[1] * p[1] + p[2] * p[2] + p[3] * p[3] + vmass * vmass);
sqh = sqrt(0.5);
hel = double(nhel);
nsvahl = nsv * std::abs(hel);
pt2 = (p[1] * p[1]) + (p[2] * p[2]);
pp = min(p[0], sqrt(pt2 + (p[3] * p[3])));
pt = min(pp, sqrt(pt2));
vc[0] = thrust::complex<double> (p[0] * nsv, p[3] * nsv);
vc[1] = thrust::complex<double> (p[1] * nsv, p[2] * nsv);
if (vmass != 0.0)
{
hel0 = 1.0 - std::abs(hel);
if (pp == 0.0)
{
vc[2] = thrust::complex<double> (0.0, 0.0);
vc[3] = thrust::complex<double> (-hel * sqh, 0.0);
vc[4] = thrust::complex<double> (0.0, nsvahl * sqh);
vc[5] = thrust::complex<double> (hel0, 0.0);
}
else
{
emp = p[0]/(vmass * pp);
vc[2] = thrust::complex<double> (hel0 * pp/vmass, 0.0);
vc[5] =
thrust::complex<double> (hel0 * p[3] * emp + hel * pt/pp * sqh, 0.0);
if (pt != 0.0)
{
pzpt = p[3]/(pp * pt) * sqh * hel;
vc[3] = thrust::complex<double> (hel0 * p[1] * emp - p[1] * pzpt,
- nsvahl * p[2]/pt * sqh);
vc[4] = thrust::complex<double> (hel0 * p[2] * emp - p[2] * pzpt,
nsvahl * p[1]/pt * sqh);
}
else
{
vc[3] = thrust::complex<double> (-hel * sqh, 0.0);
vc[4] = thrust::complex<double> (0.0, nsvahl * (p[3] < 0) ? - abs(sqh)
: abs(sqh));
}
}
}
else
{
pp = p[0];
pt = sqrt((p[1] * p[1]) + (p[2] * p[2]));
vc[2] = thrust::complex<double> (0.0, 0.0);
vc[5] = thrust::complex<double> (hel * pt/pp * sqh, 0.0);
if (pt != 0.0)
{
pzpt = p[3]/(pp * pt) * sqh * hel;
vc[3] = thrust::complex<double> (-p[1] * pzpt, -nsv * p[2]/pt * sqh);
vc[4] = thrust::complex<double> (-p[2] * pzpt, nsv * p[1]/pt * sqh);
}
else
{
vc[3] = thrust::complex<double> (-hel * sqh, 0.0);
vc[4] =
thrust::complex<double> (0.0, nsv * (p[3] < 0) ? - abs(sqh) : abs(sqh));
}
}
return;
}
__device__ void sxxxxx(const double pvec[3], int nss, thrust::complex<double> sc[3])
{
// double p[4] = {0, pvec[0], pvec[1], pvec[2]};
// p[0] = sqrt(p[1] * p[1] + p[2] * p[2] + p[3] * p[3]+fmass*fmass);
double p[4] = {0, 0, 0, 0};
printf("scalar not supported so far. to do: fix mass issue");
sc[2] = thrust::complex<double> (1.00, 0.00);
sc[0] = thrust::complex<double> (p[0] * nss, p[3] * nss);
sc[1] = thrust::complex<double> (p[1] * nss, p[2] * nss);
return;
}
__device__ void oxxxxx(const double pvec[3], double fmass, int nhel, int nsf,
thrust::complex<double> fo[6])
{
thrust::complex<double> chi[2];
double sf[2], sfomeg[2], omega[2], pp, pp3, sqp0p3, sqm[2];
int nh, ip, im;
double p[4] = {0, pvec[0], pvec[1], pvec[2]};
p[0] = sqrt(p[1] * p[1] + p[2] * p[2] + p[3] * p[3] + fmass * fmass);
fo[0] = thrust::complex<double> (p[0] * nsf, p[3] * nsf);
fo[1] = thrust::complex<double> (p[1] * nsf, p[2] * nsf);
nh = nhel * nsf;
if (fmass != 0.000)
{
pp = min(p[0], sqrt((p[1] * p[1]) + (p[2] * p[2]) + (p[3] * p[3])));
if (pp == 0.000)
{
sqm[0] = sqrt(std::abs(fmass));
sqm[1] = (fmass < 0) ? - abs(sqm[0]) : abs(sqm[0]);
ip = -((1 - nh)/2) * nhel;
im = (1 + nh)/2 * nhel;
fo[2] = im * sqm[std::abs(ip)];
fo[3] = ip * nsf * sqm[std::abs(ip)];
fo[4] = im * nsf * sqm[std::abs(im)];
fo[5] = ip * sqm[std::abs(im)];
}
else
{
pp = min(p[0], sqrt((p[1] * p[1]) + (p[2] * p[2]) + (p[3] * p[3])));
sf[0] = double(1 + nsf + (1 - nsf) * nh) * 0.5;
sf[1] = double(1 + nsf - (1 - nsf) * nh) * 0.5;
omega[0] = sqrt(p[0] + pp);
omega[1] = fmass/omega[0];
ip = (1 + nh)/2;
im = (1 - nh)/2;
sfomeg[0] = sf[0] * omega[ip];
sfomeg[1] = sf[1] * omega[im];
pp3 = max(pp + p[3], 0.00);
chi[0] = thrust::complex<double> (sqrt(pp3 * 0.5/pp), 0.00);
if (pp3 == 0.00)
{
chi[1] = thrust::complex<double> (-nh, 0.00);
}
else
{
chi[1] =
thrust::complex<double> (nh * p[1], -p[2])/sqrt(2.0 * pp * pp3);
}
fo[2] = sfomeg[1] * chi[im];
fo[3] = sfomeg[1] * chi[ip];
fo[4] = sfomeg[0] * chi[im];
fo[5] = sfomeg[0] * chi[ip];
}
}
else
{
if ((p[1] == 0.00) and (p[2] == 0.00) and (p[3] < 0.00))
{
sqp0p3 = 0.00;
}
else
{
sqp0p3 = sqrt(max(p[0] + p[3], 0.00)) * nsf;
}
chi[0] = thrust::complex<double> (sqp0p3, 0.00);
if (sqp0p3 == 0.000)
{
chi[1] = thrust::complex<double> (-nhel, 0.00) * sqrt(2.0 * p[0]);
}
else
{
chi[1] = thrust::complex<double> (nh * p[1], -p[2])/sqp0p3;
}
if (nh == 1)
{
fo[2] = chi[0];
fo[3] = chi[1];
fo[4] = thrust::complex<double> (0.00, 0.00);
fo[5] = thrust::complex<double> (0.00, 0.00);
}
else
{
fo[2] = thrust::complex<double> (0.00, 0.00);
fo[3] = thrust::complex<double> (0.00, 0.00);
fo[4] = chi[1];
fo[5] = chi[0];
}
}
return;
}
__device__ void FFV2_0(const thrust::complex<double> F1[], const
thrust::complex<double> F2[], const thrust::complex<double> V3[], const
thrust::complex<double> COUP, thrust::complex<double> * vertex)
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
thrust::complex<double> TMP0;
TMP0 = (F1[2] * (F2[4] * (V3[2] + V3[5]) + F2[5] * (V3[3] + cI * (V3[4]))) +
F1[3] * (F2[4] * (V3[3] - cI * (V3[4])) + F2[5] * (V3[2] - V3[5])));
(*vertex) = COUP * - cI * TMP0;
}
__device__ void FFV2_3(const thrust::complex<double> F1[], const
thrust::complex<double> F2[], const thrust::complex<double> COUP, const
double M3, const double W3, thrust::complex<double> V3[])
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
double OM3;
double P3[4];
thrust::complex<double> TMP1;
thrust::complex<double> denom;
OM3 = 0.;
if (M3 != 0.)
OM3 = 1./(M3 * M3);
V3[0] = +F1[0] + F2[0];
V3[1] = +F1[1] + F2[1];
P3[0] = -V3[0].real();
P3[1] = -V3[1].real();
P3[2] = -V3[1].imag();
P3[3] = -V3[0].imag();
TMP1 = (F1[2] * (F2[4] * (P3[0] + P3[3]) + F2[5] * (P3[1] + cI * (P3[2]))) +
F1[3] * (F2[4] * (P3[1] - cI * (P3[2])) + F2[5] * (P3[0] - P3[3])));
denom = COUP/((P3[0] * P3[0]) - (P3[1] * P3[1]) - (P3[2] * P3[2]) - (P3[3] *
P3[3]) - M3 * (M3 - cI * W3));
V3[2] = denom * (-cI) * (F1[2] * F2[4] + F1[3] * F2[5] - P3[0] * OM3 * TMP1);
V3[3] = denom * (-cI) * (-F1[2] * F2[5] - F1[3] * F2[4] - P3[1] * OM3 *
TMP1);
V3[4] = denom * (-cI) * (-cI * (F1[2] * F2[5]) + cI * (F1[3] * F2[4]) - P3[2]
* OM3 * TMP1);
V3[5] = denom * (-cI) * (-F1[2] * F2[4] - P3[3] * OM3 * TMP1 + F1[3] *
F2[5]);
}
__device__ void FFV4_0(const thrust::complex<double> F1[], const
thrust::complex<double> F2[], const thrust::complex<double> V3[], const
thrust::complex<double> COUP, thrust::complex<double> * vertex)
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
thrust::complex<double> TMP0;
thrust::complex<double> TMP2;
TMP2 = (F1[4] * (F2[2] * (V3[2] - V3[5]) - F2[3] * (V3[3] + cI * (V3[4]))) +
F1[5] * (F2[2] * (-V3[3] + cI * (V3[4])) + F2[3] * (V3[2] + V3[5])));
TMP0 = (F1[2] * (F2[4] * (V3[2] + V3[5]) + F2[5] * (V3[3] + cI * (V3[4]))) +
F1[3] * (F2[4] * (V3[3] - cI * (V3[4])) + F2[5] * (V3[2] - V3[5])));
(*vertex) = COUP * (-1.) * (+cI * (TMP0) + 2. * cI * (TMP2));
}
__device__ void FFV4_3(const thrust::complex<double> F1[], const
thrust::complex<double> F2[], const thrust::complex<double> COUP, const
double M3, const double W3, thrust::complex<double> V3[])
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
double OM3;
double P3[4];
thrust::complex<double> TMP1;
thrust::complex<double> TMP3;
thrust::complex<double> denom;
OM3 = 0.;
if (M3 != 0.)
OM3 = 1./(M3 * M3);
V3[0] = +F1[0] + F2[0];
V3[1] = +F1[1] + F2[1];
P3[0] = -V3[0].real();
P3[1] = -V3[1].real();
P3[2] = -V3[1].imag();
P3[3] = -V3[0].imag();
TMP1 = (F1[2] * (F2[4] * (P3[0] + P3[3]) + F2[5] * (P3[1] + cI * (P3[2]))) +
F1[3] * (F2[4] * (P3[1] - cI * (P3[2])) + F2[5] * (P3[0] - P3[3])));
TMP3 = (F1[4] * (F2[2] * (P3[0] - P3[3]) - F2[3] * (P3[1] + cI * (P3[2]))) +
F1[5] * (F2[2] * (-P3[1] + cI * (P3[2])) + F2[3] * (P3[0] + P3[3])));
denom = COUP/((P3[0] * P3[0]) - (P3[1] * P3[1]) - (P3[2] * P3[2]) - (P3[3] *
P3[3]) - M3 * (M3 - cI * W3));
V3[2] = denom * (-2. * cI) * (OM3 * - 1./2. * P3[0] * (TMP1 + 2. * (TMP3)) +
(+1./2. * (F1[2] * F2[4] + F1[3] * F2[5]) + F1[4] * F2[2] + F1[5] *
F2[3]));
V3[3] = denom * (-2. * cI) * (OM3 * - 1./2. * P3[1] * (TMP1 + 2. * (TMP3)) +
(-1./2. * (F1[2] * F2[5] + F1[3] * F2[4]) + F1[4] * F2[3] + F1[5] *
F2[2]));
V3[4] = denom * 2. * cI * (OM3 * 1./2. * P3[2] * (TMP1 + 2. * (TMP3)) +
(+1./2. * cI * (F1[2] * F2[5]) - 1./2. * cI * (F1[3] * F2[4]) - cI *
(F1[4] * F2[3]) + cI * (F1[5] * F2[2])));
V3[5] = denom * 2. * cI * (OM3 * 1./2. * P3[3] * (TMP1 + 2. * (TMP3)) +
(+1./2. * (F1[2] * F2[4]) - 1./2. * (F1[3] * F2[5]) - F1[4] * F2[2] +
F1[5] * F2[3]));
}
__device__ void FFV1_0(const thrust::complex<double> F1[], const
thrust::complex<double> F2[], const thrust::complex<double> V3[], const
thrust::complex<double> COUP, thrust::complex<double> * vertex)
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
thrust::complex<double> TMP4;
TMP4 = (F1[2] * (F2[4] * (V3[2] + V3[5]) + F2[5] * (V3[3] + cI * (V3[4]))) +
(F1[3] * (F2[4] * (V3[3] - cI * (V3[4])) + F2[5] * (V3[2] - V3[5])) +
(F1[4] * (F2[2] * (V3[2] - V3[5]) - F2[3] * (V3[3] + cI * (V3[4]))) +
F1[5] * (F2[2] * (-V3[3] + cI * (V3[4])) + F2[3] * (V3[2] + V3[5])))));
(*vertex) = COUP * - cI * TMP4;
}
__device__ void FFV1P0_3(const thrust::complex<double> F1[], const
thrust::complex<double> F2[], const thrust::complex<double> COUP, const
double M3, const double W3, thrust::complex<double> V3[])
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
double P3[4];
thrust::complex<double> denom;
V3[0] = +F1[0] + F2[0];
V3[1] = +F1[1] + F2[1];
P3[0] = -V3[0].real();
P3[1] = -V3[1].real();
P3[2] = -V3[1].imag();
P3[3] = -V3[0].imag();
denom = COUP/((P3[0] * P3[0]) - (P3[1] * P3[1]) - (P3[2] * P3[2]) - (P3[3] *
P3[3]) - M3 * (M3 - cI * W3));
V3[2] = denom * (-cI) * (F1[2] * F2[4] + F1[3] * F2[5] + F1[4] * F2[2] +
F1[5] * F2[3]);
V3[3] = denom * (-cI) * (-F1[2] * F2[5] - F1[3] * F2[4] + F1[4] * F2[3] +
F1[5] * F2[2]);
V3[4] = denom * (-cI) * (-cI * (F1[2] * F2[5] + F1[5] * F2[2]) + cI * (F1[3]
* F2[4] + F1[4] * F2[3]));
V3[5] = denom * (-cI) * (-F1[2] * F2[4] - F1[5] * F2[3] + F1[3] * F2[5] +
F1[4] * F2[2]);
}
__device__ void FFV2_4_0(const thrust::complex<double> F1[], const
thrust::complex<double> F2[], const thrust::complex<double> V3[], const
thrust::complex<double> COUP1, const thrust::complex<double> COUP2,
thrust::complex<double> * vertex)
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
thrust::complex<double> TMP0;
thrust::complex<double> TMP2;
TMP2 = (F1[4] * (F2[2] * (V3[2] - V3[5]) - F2[3] * (V3[3] + cI * (V3[4]))) +
F1[5] * (F2[2] * (-V3[3] + cI * (V3[4])) + F2[3] * (V3[2] + V3[5])));
TMP0 = (F1[2] * (F2[4] * (V3[2] + V3[5]) + F2[5] * (V3[3] + cI * (V3[4]))) +
F1[3] * (F2[4] * (V3[3] - cI * (V3[4])) + F2[5] * (V3[2] - V3[5])));
(*vertex) = (-1.) * (COUP2 * (+cI * (TMP0) + 2. * cI * (TMP2)) + cI * (TMP0 *
COUP1));
}
__device__ void FFV2_4_3(const thrust::complex<double> F1[], const
thrust::complex<double> F2[], const thrust::complex<double> COUP1, const
thrust::complex<double> COUP2, const double M3, const double W3,
thrust::complex<double> V3[])
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
double OM3;
double P3[4];
thrust::complex<double> TMP1;
thrust::complex<double> TMP3;
thrust::complex<double> denom;
OM3 = 0.;
if (M3 != 0.)
OM3 = 1./(M3 * M3);
V3[0] = +F1[0] + F2[0];
V3[1] = +F1[1] + F2[1];
P3[0] = -V3[0].real();
P3[1] = -V3[1].real();
P3[2] = -V3[1].imag();
P3[3] = -V3[0].imag();
TMP1 = (F1[2] * (F2[4] * (P3[0] + P3[3]) + F2[5] * (P3[1] + cI * (P3[2]))) +
F1[3] * (F2[4] * (P3[1] - cI * (P3[2])) + F2[5] * (P3[0] - P3[3])));
TMP3 = (F1[4] * (F2[2] * (P3[0] - P3[3]) - F2[3] * (P3[1] + cI * (P3[2]))) +
F1[5] * (F2[2] * (-P3[1] + cI * (P3[2])) + F2[3] * (P3[0] + P3[3])));
denom = 1./((P3[0] * P3[0]) - (P3[1] * P3[1]) - (P3[2] * P3[2]) - (P3[3] *
P3[3]) - M3 * (M3 - cI * W3));
V3[2] = denom * (-2. * cI) * (COUP2 * (OM3 * - 1./2. * P3[0] * (TMP1 + 2. *
(TMP3)) + (+1./2. * (F1[2] * F2[4] + F1[3] * F2[5]) + F1[4] * F2[2] +
F1[5] * F2[3])) + 1./2. * (COUP1 * (F1[2] * F2[4] + F1[3] * F2[5] - P3[0]
* OM3 * TMP1)));
V3[3] = denom * (-2. * cI) * (COUP2 * (OM3 * - 1./2. * P3[1] * (TMP1 + 2. *
(TMP3)) + (-1./2. * (F1[2] * F2[5] + F1[3] * F2[4]) + F1[4] * F2[3] +
F1[5] * F2[2])) - 1./2. * (COUP1 * (F1[2] * F2[5] + F1[3] * F2[4] + P3[1]
* OM3 * TMP1)));
V3[4] = denom * cI * (COUP2 * (OM3 * P3[2] * (TMP1 + 2. * (TMP3)) + (+cI *
(F1[2] * F2[5]) - cI * (F1[3] * F2[4]) - 2. * cI * (F1[4] * F2[3]) + 2. *
cI * (F1[5] * F2[2]))) + COUP1 * (+cI * (F1[2] * F2[5]) - cI * (F1[3] *
F2[4]) + P3[2] * OM3 * TMP1));
V3[5] = denom * 2. * cI * (COUP2 * (OM3 * 1./2. * P3[3] * (TMP1 + 2. *
(TMP3)) + (+1./2. * (F1[2] * F2[4]) - 1./2. * (F1[3] * F2[5]) - F1[4] *
F2[2] + F1[5] * F2[3])) + 1./2. * (COUP1 * (F1[2] * F2[4] + P3[3] * OM3 *
TMP1 - F1[3] * F2[5])));
}
} // end namespace $(namespace)s_sm
//==========================================================================
// This file has been automatically generated for C++ Standalone by
// MadGraph5_aMC@NLO v. 2.7.3.py3, 2020-06-28
// By the MadGraph5_aMC@NLO Development Team
// Visit launchpad.net/madgraph5 and amcatnlo.web.cern.ch
//==========================================================================
#include "CPPProcess.h"
//#include "HelAmps_sm.h"
#include <algorithm>
#include <iostream>
#include <thrust/complex.h>
//==========================================================================
// Class member functions for calculating the matrix elements for
// Process: e+ e- > mu+ mu- WEIGHTED<=4 @1
__constant__ int cHel[16][4];
// __constant__ double cmME[4]; value hardcoded now
// extern __constant__ int cPerm[4];
//
__constant__ double cIPC[6]; // coupling ?
__constant__ double cIPD[2];
// Evaluate |M|^2 for each subprocess
__device__ void calculate_wavefunctions(int ihel, double local_mom[4][3],
double &matrix)
{
using namespace MG5_sm;
thrust::complex<double> amp[2];
// Calculate wavefunctions for all processes
thrust::complex<double> w[5][6];
oxxxxx(local_mom[0], 0., cHel[ihel][0], -1, w[0]);
ixxxxx(local_mom[1], 0., cHel[ihel][1], +1, w[1]);
ixxxxx(local_mom[2], 0., cHel[ihel][2], -1, w[2]);
oxxxxx(local_mom[3], 0., cHel[ihel][3], +1, w[3]);
FFV1P0_3(w[1], w[0], thrust::complex<double> (cIPC[0], cIPC[1]), 0., 0.,
w[4]);
// Amplitude(s) for diagram number 1
FFV1_0(w[2], w[3], w[4], thrust::complex<double> (cIPC[0], cIPC[1]),
&[0]);
FFV2_4_3(w[1], w[0], thrust::complex<double> (cIPC[2], cIPC[3]),
thrust::complex<double> (cIPC[4], cIPC[5]), cIPD[0], cIPD[1], w[4]);
// Amplitude(s) for diagram number 2
FFV2_4_0(w[2], w[3], w[4], thrust::complex<double> (cIPC[2], cIPC[3]),
thrust::complex<double> (cIPC[4], cIPC[5]), &[1]);
// double CPPProcess::matrix_1_epem_mupmum() {
int i, j;
// Local variables
// const int ngraphs = 2;
const int ncolor = 1;
thrust::complex<double> ztemp;
thrust::complex<double> jamp[ncolor];
// The color matrix;
static const double denom[ncolor] = {1};
static const double cf[ncolor][ncolor] = {{1}};
// Calculate color flows
jamp[0] = -amp[0] - amp[1];
// Sum and square the color flows to get the matrix element
for(i = 0; i < ncolor; i++ )
{
ztemp = 0.;
for(j = 0; j < ncolor; j++ )
ztemp = ztemp + cf[i][j] * jamp[j];
matrix = matrix + (ztemp * conj(jamp[i])).real()/denom[i];
}
// Store the leading color flows for choice of color
// for(i=0;i < ncolor; i++)
// jamp2[0][i] += real(jamp[i]*conj(jamp[i]));
}
CPPProcess::CPPProcess(int numiterations, int gpublocks, int gputhreads,
bool verbose, bool debug)
: m_numiterations(numiterations), gpu_nblocks(gpublocks),
gpu_nthreads(gputhreads), m_verbose(verbose), m_debug(debug),
dim(gpu_nblocks * gpu_nthreads)
{
// Helicities for the process - nodim
static const int tHel[ncomb][nexternal] = {{-1, -1, -1, -1}, {-1, -1, -1, 1},
{-1, -1, 1, -1}, {-1, -1, 1, 1}, {-1, 1, -1, -1}, {-1, 1, -1, 1}, {-1, 1,
1, -1}, {-1, 1, 1, 1}, {1, -1, -1, -1}, {1, -1, -1, 1}, {1, -1, 1, -1},
{1, -1, 1, 1}, {1, 1, -1, -1}, {1, 1, -1, 1}, {1, 1, 1, -1}, {1, 1, 1,
1}};
gpuErrchk3( hipMemcpyToSymbol( cHel, tHel, ncomb * nexternal * sizeof(int) ) );
// perm - nodim
// static int perm[nexternal] = {0, 1, 2, 3};
}
CPPProcess::~CPPProcess() {}
const std::vector<double> &CPPProcess::getMasses() const {return mME;}
//--------------------------------------------------------------------------
// Initialize process.
void CPPProcess::initProc(string param_card_name)
{
// Instantiate the model class and set parameters that stay fixed during run
pars = Parameters_sm::getInstance();
SLHAReader slha(param_card_name, m_verbose);
pars->setIndependentParameters(slha);
pars->setIndependentCouplings();
if (m_verbose) {
pars->printIndependentParameters();
pars->printIndependentCouplings();
}
pars->setDependentParameters();
pars->setDependentCouplings();
// Set external particle masses for this matrix element
mME.push_back(pars->ZERO);
mME.push_back(pars->ZERO);
mME.push_back(pars->ZERO);
mME.push_back(pars->ZERO);
static thrust::complex<double> tIPC[3] = {pars->GC_3, pars->GC_50,
pars->GC_59};
static double tIPD[2] = {pars->mdl_MZ, pars->mdl_WZ};
gpuErrchk3( hipMemcpyToSymbol( cIPC, tIPC, 3 * sizeof(thrust::complex<double> ) ) );
gpuErrchk3( hipMemcpyToSymbol( cIPD, tIPD, 2 * sizeof(double) ) );
}
//--------------------------------------------------------------------------
// Evaluate |M|^2, part independent of incoming flavour.
__global__ void sigmaKin(double * allmomenta, double * output)
{
// Set the parameters which change event by event
// Need to discuss this with Stefan
// pars->setDependentParameters();
// pars->setDependentCouplings();
// Reset color flows
// for (int xx = 0; xx < 384; ++xx) {
const int nprocesses = 1;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// char *devPtr = (char *)tp.ptr;
// size_t dpt = tp.pitch;
// size_t slicePitch = dpt * 4;
// char *dps = devPtr + dim * slicePitch;
double matrix_element[nprocesses];
thrust::complex<double> amp[2];
double local_m[4][3];
int DIM = blockDim.x * gridDim.x;
// for (int i=0; i<20;i++){
// printf(" %f ", allmomenta[i]);
// }
// printf("\n");
// printf("DIM is %i/%i\n", tid, DIM);
for (int i = 0; i < 4; i++ )
{
for (int j = 0; j < 3; j++ )
{
local_m[i][j] = allmomenta[i * 3 * DIM + j * DIM + tid];
// printf(" %f ", local_m[i][j]);
}
// printf("\n");
}
// Local variables and constants
const int ncomb = 16;
// static bool goodhel[ncomb] = {ncomb * false};
// static int ntry = 0, sum_hel = 0, ngood = 0;
// static int igood[ncomb];
// static int jhel;
// std::complex<double> **wfs;
// double t[1];
// Helicities for the process
// static const int helicities[ncomb][nexternal] =
// {{-1,-1,-1,-1},{-1,-1,-1,1},{-1,-1,1,-1},{-1,-1,1,1},{-1,1,-1,-1},{-1,1,-1,
// 1},{-1,1,1,-1},{-1,1,1,1},{1,-1,-1,-1},{1,-1,-1,1},{1,-1,1,-1},{1,-1,1,1},{
// 1,1,-1,-1},{1,1,-1,1},{1,1,1,-1},{1,1,1,1}};
// Denominators: spins, colors and identical particles
const int denominators[1] = {4};
// Reset the matrix elements
for(int i = 0; i < nprocesses; i++ )
{
matrix_element[i] = 0.;
}
// Define permutation
// int perm[nexternal];
// for(int i = 0; i < nexternal; i++){
// perm[i]=i;
// }
for (int ihel = 0; ihel < ncomb; ihel++ )
{
calculate_wavefunctions(ihel, local_m, matrix_element[0]);
}
for (int i = 0; i < nprocesses; ++ i)
{
matrix_element[i] /= denominators[i];
}
for (int i = 0; i < nprocesses; ++ i)
{
output[i * nprocesses + tid] = matrix_element[i];
// printf("output %i %i %i %f", tid, i, i*nprocesses+tid,
// output[i*nprocesses+tid]);
}
}
//==========================================================================
// Private class member functions
//--------------------------------------------------------------------------
| 069b42b1fe3617972bda99e3d2207c26962b6f49.cu | //==========================================================================
// This file has been automatically generated for C++ Standalone by
// MadGraph5_aMC@NLO v. 2.7.3.py3, 2020-06-28
// By the MadGraph5_aMC@NLO Development Team
// Visit launchpad.net/madgraph5 and amcatnlo.web.cern.ch
//==========================================================================
//#include "HelAmps_sm.h"
#include <complex>
#include <cmath>
#include <iostream>
#include <cstdlib>
#include <thrust/complex.h>
using namespace std;
namespace MG5_sm
{
__device__ void ixxxxx(const double pvec[3], double fmass, int nhel, int nsf,
thrust::complex<double> fi[6])
{
thrust::complex<double> chi[2];
double sf[2], sfomega[2], omega[2], pp, pp3, sqp0p3, sqm[2];
int ip, im, nh;
double p[4] = {0, pvec[0], pvec[1], pvec[2]};
p[0] = sqrt(p[1] * p[1] + p[2] * p[2] + p[3] * p[3] + fmass * fmass);
fi[0] = thrust::complex<double> (-p[0] * nsf, -p[3] * nsf);
fi[1] = thrust::complex<double> (-p[1] * nsf, -p[2] * nsf);
nh = nhel * nsf;
if (fmass != 0.0)
{
pp = min(p[0], sqrt(p[1] * p[1] + p[2] * p[2] + p[3] * p[3]));
if (pp == 0.0)
{
sqm[0] = sqrt(std::abs(fmass));
sqm[1] = (fmass < 0) ? - abs(sqm[0]) : abs(sqm[0]);
ip = (1 + nh)/2;
im = (1 - nh)/2;
fi[2] = ip * sqm[ip];
fi[3] = im * nsf * sqm[ip];
fi[4] = ip * nsf * sqm[im];
fi[5] = im * sqm[im];
}
else
{
sf[0] = (1 + nsf + (1 - nsf) * nh) * 0.5;
sf[1] = (1 + nsf - (1 - nsf) * nh) * 0.5;
omega[0] = sqrt(p[0] + pp);
omega[1] = fmass/omega[0];
ip = (1 + nh)/2;
im = (1 - nh)/2;
sfomega[0] = sf[0] * omega[ip];
sfomega[1] = sf[1] * omega[im];
pp3 = max(pp + p[3], 0.0);
chi[0] = thrust::complex<double> (sqrt(pp3 * 0.5/pp), 0);
if (pp3 == 0.0)
{
chi[1] = thrust::complex<double> (-nh, 0);
}
else
{
chi[1] =
thrust::complex<double> (nh * p[1], p[2])/sqrt(2.0 * pp * pp3);
}
fi[2] = sfomega[0] * chi[im];
fi[3] = sfomega[0] * chi[ip];
fi[4] = sfomega[1] * chi[im];
fi[5] = sfomega[1] * chi[ip];
}
}
else
{
if (p[1] == 0.0 and p[2] == 0.0 and p[3] < 0.0)
{
sqp0p3 = 0.0;
}
else
{
sqp0p3 = sqrt(max(p[0] + p[3], 0.0)) * nsf;
}
chi[0] = thrust::complex<double> (sqp0p3, 0.0);
if (sqp0p3 == 0.0)
{
chi[1] = thrust::complex<double> (-nhel * sqrt(2.0 * p[0]), 0.0);
}
else
{
chi[1] = thrust::complex<double> (nh * p[1], p[2])/sqp0p3;
}
if (nh == 1)
{
fi[2] = thrust::complex<double> (0.0, 0.0);
fi[3] = thrust::complex<double> (0.0, 0.0);
fi[4] = chi[0];
fi[5] = chi[1];
}
else
{
fi[2] = chi[1];
fi[3] = chi[0];
fi[4] = thrust::complex<double> (0.0, 0.0);
fi[5] = thrust::complex<double> (0.0, 0.0);
}
}
return;
}
__device__ void txxxxx(const double pvec[3], double tmass, int nhel, int nst,
thrust::complex<double> tc[18])
{
thrust::complex<double> ft[6][4], ep[4], em[4], e0[4];
double pt, pt2, pp, pzpt, emp, sqh, sqs;
int i, j;
double p[4] = {0, pvec[0], pvec[1], pvec[2]};
p[0] = sqrt(p[1] * p[1] + p[2] * p[2] + p[3] * p[3] + tmass * tmass);
sqh = sqrt(0.5);
sqs = sqrt(0.5/3);
pt2 = p[1] * p[1] + p[2] * p[2];
pp = min(p[0], sqrt(pt2 + p[3] * p[3]));
pt = min(pp, sqrt(pt2));
ft[4][0] = thrust::complex<double> (p[0] * nst, p[3] * nst);
ft[5][0] = thrust::complex<double> (p[1] * nst, p[2] * nst);
// construct eps+
if (nhel >= 0)
{
if (pp == 0)
{
ep[0] = thrust::complex<double> (0, 0);
ep[1] = thrust::complex<double> (-sqh, 0);
ep[2] = thrust::complex<double> (0, nst * sqh);
ep[3] = thrust::complex<double> (0, 0);
}
else
{
ep[0] = thrust::complex<double> (0, 0);
ep[3] = thrust::complex<double> (pt/pp * sqh, 0);
if (pt != 0)
{
pzpt = p[3]/(pp * pt) * sqh;
ep[1] = thrust::complex<double> (-p[1] * pzpt, -nst * p[2]/pt * sqh);
ep[2] = thrust::complex<double> (-p[2] * pzpt, nst * p[1]/pt * sqh);
}
else
{
ep[1] = thrust::complex<double> (-sqh, 0);
ep[2] =
thrust::complex<double> (0, nst * (p[3] < 0) ? - abs(sqh) : abs(sqh));
}
}
}
// construct eps-
if (nhel <= 0)
{
if (pp == 0)
{
em[0] = thrust::complex<double> (0, 0);
em[1] = thrust::complex<double> (sqh, 0);
em[2] = thrust::complex<double> (0, nst * sqh);
em[3] = thrust::complex<double> (0, 0);
}
else
{
em[0] = thrust::complex<double> (0, 0);
em[3] = thrust::complex<double> (-pt/pp * sqh, 0);
if (pt != 0)
{
pzpt = -p[3]/(pp * pt) * sqh;
em[1] = thrust::complex<double> (-p[1] * pzpt, -nst * p[2]/pt * sqh);
em[2] = thrust::complex<double> (-p[2] * pzpt, nst * p[1]/pt * sqh);
}
else
{
em[1] = thrust::complex<double> (sqh, 0);
em[2] =
thrust::complex<double> (0, nst * (p[3] < 0) ? - abs(sqh) : abs(sqh));
}
}
}
// construct eps0
if (std::labs(nhel) <= 1)
{
if (pp == 0)
{
e0[0] = thrust::complex<double> (0, 0);
e0[1] = thrust::complex<double> (0, 0);
e0[2] = thrust::complex<double> (0, 0);
e0[3] = thrust::complex<double> (1, 0);
}
else
{
emp = p[0]/(tmass * pp);
e0[0] = thrust::complex<double> (pp/tmass, 0);
e0[3] = thrust::complex<double> (p[3] * emp, 0);
if (pt != 0)
{
e0[1] = thrust::complex<double> (p[1] * emp, 0);
e0[2] = thrust::complex<double> (p[2] * emp, 0);
}
else
{
e0[1] = thrust::complex<double> (0, 0);
e0[2] = thrust::complex<double> (0, 0);
}
}
}
if (nhel == 2)
{
for (j = 0; j < 4; j++ )
{
for (i = 0; i < 4; i++ )
ft[i][j] = ep[i] * ep[j];
}
}
else if (nhel == -2)
{
for (j = 0; j < 4; j++ )
{
for (i = 0; i < 4; i++ )
ft[i][j] = em[i] * em[j];
}
}
else if (tmass == 0)
{
for (j = 0; j < 4; j++ )
{
for (i = 0; i < 4; i++ )
ft[i][j] = 0;
}
}
else if (tmass != 0)
{
if (nhel == 1)
{
for (j = 0; j < 4; j++ )
{
for (i = 0; i < 4; i++ )
ft[i][j] = sqh * (ep[i] * e0[j] + e0[i] * ep[j]);
}
}
else if (nhel == 0)
{
for (j = 0; j < 4; j++ )
{
for (i = 0; i < 4; i++ )
ft[i][j] =
sqs * (ep[i] * em[j] + em[i] * ep[j] + 2.0 * e0[i] * e0[j]);
}
}
else if (nhel == -1)
{
for (j = 0; j < 4; j++ )
{
for (i = 0; i < 4; i++ )
ft[i][j] = sqh * (em[i] * e0[j] + e0[i] * em[j]);
}
}
else
{
// sr fixme // std::cerr << "Invalid helicity in txxxxx.\n";
// sr fixme // std::exit(1);
}
}
tc[0] = ft[4][0];
tc[1] = ft[5][0];
for (j = 0; j < 4; j++ )
{
for (i = 0; i < 4; i++ )
tc[j * 4 + i + 2] = ft[j][i];
}
}
__device__ void vxxxxx(const double pvec[3], double vmass, int nhel, int nsv,
thrust::complex<double> vc[6])
{
double hel, hel0, pt, pt2, pp, pzpt, emp, sqh;
int nsvahl;
double p[4] = {0, pvec[0], pvec[1], pvec[2]};
p[0] = sqrt(p[1] * p[1] + p[2] * p[2] + p[3] * p[3] + vmass * vmass);
sqh = sqrt(0.5);
hel = double(nhel);
nsvahl = nsv * std::abs(hel);
pt2 = (p[1] * p[1]) + (p[2] * p[2]);
pp = min(p[0], sqrt(pt2 + (p[3] * p[3])));
pt = min(pp, sqrt(pt2));
vc[0] = thrust::complex<double> (p[0] * nsv, p[3] * nsv);
vc[1] = thrust::complex<double> (p[1] * nsv, p[2] * nsv);
if (vmass != 0.0)
{
hel0 = 1.0 - std::abs(hel);
if (pp == 0.0)
{
vc[2] = thrust::complex<double> (0.0, 0.0);
vc[3] = thrust::complex<double> (-hel * sqh, 0.0);
vc[4] = thrust::complex<double> (0.0, nsvahl * sqh);
vc[5] = thrust::complex<double> (hel0, 0.0);
}
else
{
emp = p[0]/(vmass * pp);
vc[2] = thrust::complex<double> (hel0 * pp/vmass, 0.0);
vc[5] =
thrust::complex<double> (hel0 * p[3] * emp + hel * pt/pp * sqh, 0.0);
if (pt != 0.0)
{
pzpt = p[3]/(pp * pt) * sqh * hel;
vc[3] = thrust::complex<double> (hel0 * p[1] * emp - p[1] * pzpt,
- nsvahl * p[2]/pt * sqh);
vc[4] = thrust::complex<double> (hel0 * p[2] * emp - p[2] * pzpt,
nsvahl * p[1]/pt * sqh);
}
else
{
vc[3] = thrust::complex<double> (-hel * sqh, 0.0);
vc[4] = thrust::complex<double> (0.0, nsvahl * (p[3] < 0) ? - abs(sqh)
: abs(sqh));
}
}
}
else
{
pp = p[0];
pt = sqrt((p[1] * p[1]) + (p[2] * p[2]));
vc[2] = thrust::complex<double> (0.0, 0.0);
vc[5] = thrust::complex<double> (hel * pt/pp * sqh, 0.0);
if (pt != 0.0)
{
pzpt = p[3]/(pp * pt) * sqh * hel;
vc[3] = thrust::complex<double> (-p[1] * pzpt, -nsv * p[2]/pt * sqh);
vc[4] = thrust::complex<double> (-p[2] * pzpt, nsv * p[1]/pt * sqh);
}
else
{
vc[3] = thrust::complex<double> (-hel * sqh, 0.0);
vc[4] =
thrust::complex<double> (0.0, nsv * (p[3] < 0) ? - abs(sqh) : abs(sqh));
}
}
return;
}
__device__ void sxxxxx(const double pvec[3], int nss, thrust::complex<double> sc[3])
{
// double p[4] = {0, pvec[0], pvec[1], pvec[2]};
// p[0] = sqrt(p[1] * p[1] + p[2] * p[2] + p[3] * p[3]+fmass*fmass);
double p[4] = {0, 0, 0, 0};
printf("scalar not supported so far. to do: fix mass issue");
sc[2] = thrust::complex<double> (1.00, 0.00);
sc[0] = thrust::complex<double> (p[0] * nss, p[3] * nss);
sc[1] = thrust::complex<double> (p[1] * nss, p[2] * nss);
return;
}
__device__ void oxxxxx(const double pvec[3], double fmass, int nhel, int nsf,
thrust::complex<double> fo[6])
{
thrust::complex<double> chi[2];
double sf[2], sfomeg[2], omega[2], pp, pp3, sqp0p3, sqm[2];
int nh, ip, im;
double p[4] = {0, pvec[0], pvec[1], pvec[2]};
p[0] = sqrt(p[1] * p[1] + p[2] * p[2] + p[3] * p[3] + fmass * fmass);
fo[0] = thrust::complex<double> (p[0] * nsf, p[3] * nsf);
fo[1] = thrust::complex<double> (p[1] * nsf, p[2] * nsf);
nh = nhel * nsf;
if (fmass != 0.000)
{
pp = min(p[0], sqrt((p[1] * p[1]) + (p[2] * p[2]) + (p[3] * p[3])));
if (pp == 0.000)
{
sqm[0] = sqrt(std::abs(fmass));
sqm[1] = (fmass < 0) ? - abs(sqm[0]) : abs(sqm[0]);
ip = -((1 - nh)/2) * nhel;
im = (1 + nh)/2 * nhel;
fo[2] = im * sqm[std::abs(ip)];
fo[3] = ip * nsf * sqm[std::abs(ip)];
fo[4] = im * nsf * sqm[std::abs(im)];
fo[5] = ip * sqm[std::abs(im)];
}
else
{
pp = min(p[0], sqrt((p[1] * p[1]) + (p[2] * p[2]) + (p[3] * p[3])));
sf[0] = double(1 + nsf + (1 - nsf) * nh) * 0.5;
sf[1] = double(1 + nsf - (1 - nsf) * nh) * 0.5;
omega[0] = sqrt(p[0] + pp);
omega[1] = fmass/omega[0];
ip = (1 + nh)/2;
im = (1 - nh)/2;
sfomeg[0] = sf[0] * omega[ip];
sfomeg[1] = sf[1] * omega[im];
pp3 = max(pp + p[3], 0.00);
chi[0] = thrust::complex<double> (sqrt(pp3 * 0.5/pp), 0.00);
if (pp3 == 0.00)
{
chi[1] = thrust::complex<double> (-nh, 0.00);
}
else
{
chi[1] =
thrust::complex<double> (nh * p[1], -p[2])/sqrt(2.0 * pp * pp3);
}
fo[2] = sfomeg[1] * chi[im];
fo[3] = sfomeg[1] * chi[ip];
fo[4] = sfomeg[0] * chi[im];
fo[5] = sfomeg[0] * chi[ip];
}
}
else
{
if ((p[1] == 0.00) and (p[2] == 0.00) and (p[3] < 0.00))
{
sqp0p3 = 0.00;
}
else
{
sqp0p3 = sqrt(max(p[0] + p[3], 0.00)) * nsf;
}
chi[0] = thrust::complex<double> (sqp0p3, 0.00);
if (sqp0p3 == 0.000)
{
chi[1] = thrust::complex<double> (-nhel, 0.00) * sqrt(2.0 * p[0]);
}
else
{
chi[1] = thrust::complex<double> (nh * p[1], -p[2])/sqp0p3;
}
if (nh == 1)
{
fo[2] = chi[0];
fo[3] = chi[1];
fo[4] = thrust::complex<double> (0.00, 0.00);
fo[5] = thrust::complex<double> (0.00, 0.00);
}
else
{
fo[2] = thrust::complex<double> (0.00, 0.00);
fo[3] = thrust::complex<double> (0.00, 0.00);
fo[4] = chi[1];
fo[5] = chi[0];
}
}
return;
}
__device__ void FFV2_0(const thrust::complex<double> F1[], const
thrust::complex<double> F2[], const thrust::complex<double> V3[], const
thrust::complex<double> COUP, thrust::complex<double> * vertex)
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
thrust::complex<double> TMP0;
TMP0 = (F1[2] * (F2[4] * (V3[2] + V3[5]) + F2[5] * (V3[3] + cI * (V3[4]))) +
F1[3] * (F2[4] * (V3[3] - cI * (V3[4])) + F2[5] * (V3[2] - V3[5])));
(*vertex) = COUP * - cI * TMP0;
}
__device__ void FFV2_3(const thrust::complex<double> F1[], const
thrust::complex<double> F2[], const thrust::complex<double> COUP, const
double M3, const double W3, thrust::complex<double> V3[])
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
double OM3;
double P3[4];
thrust::complex<double> TMP1;
thrust::complex<double> denom;
OM3 = 0.;
if (M3 != 0.)
OM3 = 1./(M3 * M3);
V3[0] = +F1[0] + F2[0];
V3[1] = +F1[1] + F2[1];
P3[0] = -V3[0].real();
P3[1] = -V3[1].real();
P3[2] = -V3[1].imag();
P3[3] = -V3[0].imag();
TMP1 = (F1[2] * (F2[4] * (P3[0] + P3[3]) + F2[5] * (P3[1] + cI * (P3[2]))) +
F1[3] * (F2[4] * (P3[1] - cI * (P3[2])) + F2[5] * (P3[0] - P3[3])));
denom = COUP/((P3[0] * P3[0]) - (P3[1] * P3[1]) - (P3[2] * P3[2]) - (P3[3] *
P3[3]) - M3 * (M3 - cI * W3));
V3[2] = denom * (-cI) * (F1[2] * F2[4] + F1[3] * F2[5] - P3[0] * OM3 * TMP1);
V3[3] = denom * (-cI) * (-F1[2] * F2[5] - F1[3] * F2[4] - P3[1] * OM3 *
TMP1);
V3[4] = denom * (-cI) * (-cI * (F1[2] * F2[5]) + cI * (F1[3] * F2[4]) - P3[2]
* OM3 * TMP1);
V3[5] = denom * (-cI) * (-F1[2] * F2[4] - P3[3] * OM3 * TMP1 + F1[3] *
F2[5]);
}
__device__ void FFV4_0(const thrust::complex<double> F1[], const
thrust::complex<double> F2[], const thrust::complex<double> V3[], const
thrust::complex<double> COUP, thrust::complex<double> * vertex)
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
thrust::complex<double> TMP0;
thrust::complex<double> TMP2;
TMP2 = (F1[4] * (F2[2] * (V3[2] - V3[5]) - F2[3] * (V3[3] + cI * (V3[4]))) +
F1[5] * (F2[2] * (-V3[3] + cI * (V3[4])) + F2[3] * (V3[2] + V3[5])));
TMP0 = (F1[2] * (F2[4] * (V3[2] + V3[5]) + F2[5] * (V3[3] + cI * (V3[4]))) +
F1[3] * (F2[4] * (V3[3] - cI * (V3[4])) + F2[5] * (V3[2] - V3[5])));
(*vertex) = COUP * (-1.) * (+cI * (TMP0) + 2. * cI * (TMP2));
}
__device__ void FFV4_3(const thrust::complex<double> F1[], const
thrust::complex<double> F2[], const thrust::complex<double> COUP, const
double M3, const double W3, thrust::complex<double> V3[])
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
double OM3;
double P3[4];
thrust::complex<double> TMP1;
thrust::complex<double> TMP3;
thrust::complex<double> denom;
OM3 = 0.;
if (M3 != 0.)
OM3 = 1./(M3 * M3);
V3[0] = +F1[0] + F2[0];
V3[1] = +F1[1] + F2[1];
P3[0] = -V3[0].real();
P3[1] = -V3[1].real();
P3[2] = -V3[1].imag();
P3[3] = -V3[0].imag();
TMP1 = (F1[2] * (F2[4] * (P3[0] + P3[3]) + F2[5] * (P3[1] + cI * (P3[2]))) +
F1[3] * (F2[4] * (P3[1] - cI * (P3[2])) + F2[5] * (P3[0] - P3[3])));
TMP3 = (F1[4] * (F2[2] * (P3[0] - P3[3]) - F2[3] * (P3[1] + cI * (P3[2]))) +
F1[5] * (F2[2] * (-P3[1] + cI * (P3[2])) + F2[3] * (P3[0] + P3[3])));
denom = COUP/((P3[0] * P3[0]) - (P3[1] * P3[1]) - (P3[2] * P3[2]) - (P3[3] *
P3[3]) - M3 * (M3 - cI * W3));
V3[2] = denom * (-2. * cI) * (OM3 * - 1./2. * P3[0] * (TMP1 + 2. * (TMP3)) +
(+1./2. * (F1[2] * F2[4] + F1[3] * F2[5]) + F1[4] * F2[2] + F1[5] *
F2[3]));
V3[3] = denom * (-2. * cI) * (OM3 * - 1./2. * P3[1] * (TMP1 + 2. * (TMP3)) +
(-1./2. * (F1[2] * F2[5] + F1[3] * F2[4]) + F1[4] * F2[3] + F1[5] *
F2[2]));
V3[4] = denom * 2. * cI * (OM3 * 1./2. * P3[2] * (TMP1 + 2. * (TMP3)) +
(+1./2. * cI * (F1[2] * F2[5]) - 1./2. * cI * (F1[3] * F2[4]) - cI *
(F1[4] * F2[3]) + cI * (F1[5] * F2[2])));
V3[5] = denom * 2. * cI * (OM3 * 1./2. * P3[3] * (TMP1 + 2. * (TMP3)) +
(+1./2. * (F1[2] * F2[4]) - 1./2. * (F1[3] * F2[5]) - F1[4] * F2[2] +
F1[5] * F2[3]));
}
__device__ void FFV1_0(const thrust::complex<double> F1[], const
thrust::complex<double> F2[], const thrust::complex<double> V3[], const
thrust::complex<double> COUP, thrust::complex<double> * vertex)
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
thrust::complex<double> TMP4;
TMP4 = (F1[2] * (F2[4] * (V3[2] + V3[5]) + F2[5] * (V3[3] + cI * (V3[4]))) +
(F1[3] * (F2[4] * (V3[3] - cI * (V3[4])) + F2[5] * (V3[2] - V3[5])) +
(F1[4] * (F2[2] * (V3[2] - V3[5]) - F2[3] * (V3[3] + cI * (V3[4]))) +
F1[5] * (F2[2] * (-V3[3] + cI * (V3[4])) + F2[3] * (V3[2] + V3[5])))));
(*vertex) = COUP * - cI * TMP4;
}
__device__ void FFV1P0_3(const thrust::complex<double> F1[], const
thrust::complex<double> F2[], const thrust::complex<double> COUP, const
double M3, const double W3, thrust::complex<double> V3[])
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
double P3[4];
thrust::complex<double> denom;
V3[0] = +F1[0] + F2[0];
V3[1] = +F1[1] + F2[1];
P3[0] = -V3[0].real();
P3[1] = -V3[1].real();
P3[2] = -V3[1].imag();
P3[3] = -V3[0].imag();
denom = COUP/((P3[0] * P3[0]) - (P3[1] * P3[1]) - (P3[2] * P3[2]) - (P3[3] *
P3[3]) - M3 * (M3 - cI * W3));
V3[2] = denom * (-cI) * (F1[2] * F2[4] + F1[3] * F2[5] + F1[4] * F2[2] +
F1[5] * F2[3]);
V3[3] = denom * (-cI) * (-F1[2] * F2[5] - F1[3] * F2[4] + F1[4] * F2[3] +
F1[5] * F2[2]);
V3[4] = denom * (-cI) * (-cI * (F1[2] * F2[5] + F1[5] * F2[2]) + cI * (F1[3]
* F2[4] + F1[4] * F2[3]));
V3[5] = denom * (-cI) * (-F1[2] * F2[4] - F1[5] * F2[3] + F1[3] * F2[5] +
F1[4] * F2[2]);
}
__device__ void FFV2_4_0(const thrust::complex<double> F1[], const
thrust::complex<double> F2[], const thrust::complex<double> V3[], const
thrust::complex<double> COUP1, const thrust::complex<double> COUP2,
thrust::complex<double> * vertex)
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
thrust::complex<double> TMP0;
thrust::complex<double> TMP2;
TMP2 = (F1[4] * (F2[2] * (V3[2] - V3[5]) - F2[3] * (V3[3] + cI * (V3[4]))) +
F1[5] * (F2[2] * (-V3[3] + cI * (V3[4])) + F2[3] * (V3[2] + V3[5])));
TMP0 = (F1[2] * (F2[4] * (V3[2] + V3[5]) + F2[5] * (V3[3] + cI * (V3[4]))) +
F1[3] * (F2[4] * (V3[3] - cI * (V3[4])) + F2[5] * (V3[2] - V3[5])));
(*vertex) = (-1.) * (COUP2 * (+cI * (TMP0) + 2. * cI * (TMP2)) + cI * (TMP0 *
COUP1));
}
__device__ void FFV2_4_3(const thrust::complex<double> F1[], const
thrust::complex<double> F2[], const thrust::complex<double> COUP1, const
thrust::complex<double> COUP2, const double M3, const double W3,
thrust::complex<double> V3[])
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
double OM3;
double P3[4];
thrust::complex<double> TMP1;
thrust::complex<double> TMP3;
thrust::complex<double> denom;
OM3 = 0.;
if (M3 != 0.)
OM3 = 1./(M3 * M3);
V3[0] = +F1[0] + F2[0];
V3[1] = +F1[1] + F2[1];
P3[0] = -V3[0].real();
P3[1] = -V3[1].real();
P3[2] = -V3[1].imag();
P3[3] = -V3[0].imag();
TMP1 = (F1[2] * (F2[4] * (P3[0] + P3[3]) + F2[5] * (P3[1] + cI * (P3[2]))) +
F1[3] * (F2[4] * (P3[1] - cI * (P3[2])) + F2[5] * (P3[0] - P3[3])));
TMP3 = (F1[4] * (F2[2] * (P3[0] - P3[3]) - F2[3] * (P3[1] + cI * (P3[2]))) +
F1[5] * (F2[2] * (-P3[1] + cI * (P3[2])) + F2[3] * (P3[0] + P3[3])));
denom = 1./((P3[0] * P3[0]) - (P3[1] * P3[1]) - (P3[2] * P3[2]) - (P3[3] *
P3[3]) - M3 * (M3 - cI * W3));
V3[2] = denom * (-2. * cI) * (COUP2 * (OM3 * - 1./2. * P3[0] * (TMP1 + 2. *
(TMP3)) + (+1./2. * (F1[2] * F2[4] + F1[3] * F2[5]) + F1[4] * F2[2] +
F1[5] * F2[3])) + 1./2. * (COUP1 * (F1[2] * F2[4] + F1[3] * F2[5] - P3[0]
* OM3 * TMP1)));
V3[3] = denom * (-2. * cI) * (COUP2 * (OM3 * - 1./2. * P3[1] * (TMP1 + 2. *
(TMP3)) + (-1./2. * (F1[2] * F2[5] + F1[3] * F2[4]) + F1[4] * F2[3] +
F1[5] * F2[2])) - 1./2. * (COUP1 * (F1[2] * F2[5] + F1[3] * F2[4] + P3[1]
* OM3 * TMP1)));
V3[4] = denom * cI * (COUP2 * (OM3 * P3[2] * (TMP1 + 2. * (TMP3)) + (+cI *
(F1[2] * F2[5]) - cI * (F1[3] * F2[4]) - 2. * cI * (F1[4] * F2[3]) + 2. *
cI * (F1[5] * F2[2]))) + COUP1 * (+cI * (F1[2] * F2[5]) - cI * (F1[3] *
F2[4]) + P3[2] * OM3 * TMP1));
V3[5] = denom * 2. * cI * (COUP2 * (OM3 * 1./2. * P3[3] * (TMP1 + 2. *
(TMP3)) + (+1./2. * (F1[2] * F2[4]) - 1./2. * (F1[3] * F2[5]) - F1[4] *
F2[2] + F1[5] * F2[3])) + 1./2. * (COUP1 * (F1[2] * F2[4] + P3[3] * OM3 *
TMP1 - F1[3] * F2[5])));
}
} // end namespace $(namespace)s_sm
//==========================================================================
// This file has been automatically generated for C++ Standalone by
// MadGraph5_aMC@NLO v. 2.7.3.py3, 2020-06-28
// By the MadGraph5_aMC@NLO Development Team
// Visit launchpad.net/madgraph5 and amcatnlo.web.cern.ch
//==========================================================================
#include "CPPProcess.h"
//#include "HelAmps_sm.h"
#include <algorithm>
#include <iostream>
#include <thrust/complex.h>
//==========================================================================
// Class member functions for calculating the matrix elements for
// Process: e+ e- > mu+ mu- WEIGHTED<=4 @1
__constant__ int cHel[16][4];
// __constant__ double cmME[4]; value hardcoded now
// extern __constant__ int cPerm[4];
//
__constant__ double cIPC[6]; // coupling ?
__constant__ double cIPD[2];
// Evaluate |M|^2 for each subprocess
__device__ void calculate_wavefunctions(int ihel, double local_mom[4][3],
double &matrix)
{
using namespace MG5_sm;
thrust::complex<double> amp[2];
// Calculate wavefunctions for all processes
thrust::complex<double> w[5][6];
oxxxxx(local_mom[0], 0., cHel[ihel][0], -1, w[0]);
ixxxxx(local_mom[1], 0., cHel[ihel][1], +1, w[1]);
ixxxxx(local_mom[2], 0., cHel[ihel][2], -1, w[2]);
oxxxxx(local_mom[3], 0., cHel[ihel][3], +1, w[3]);
FFV1P0_3(w[1], w[0], thrust::complex<double> (cIPC[0], cIPC[1]), 0., 0.,
w[4]);
// Amplitude(s) for diagram number 1
FFV1_0(w[2], w[3], w[4], thrust::complex<double> (cIPC[0], cIPC[1]),
&[0]);
FFV2_4_3(w[1], w[0], thrust::complex<double> (cIPC[2], cIPC[3]),
thrust::complex<double> (cIPC[4], cIPC[5]), cIPD[0], cIPD[1], w[4]);
// Amplitude(s) for diagram number 2
FFV2_4_0(w[2], w[3], w[4], thrust::complex<double> (cIPC[2], cIPC[3]),
thrust::complex<double> (cIPC[4], cIPC[5]), &[1]);
// double CPPProcess::matrix_1_epem_mupmum() {
int i, j;
// Local variables
// const int ngraphs = 2;
const int ncolor = 1;
thrust::complex<double> ztemp;
thrust::complex<double> jamp[ncolor];
// The color matrix;
static const double denom[ncolor] = {1};
static const double cf[ncolor][ncolor] = {{1}};
// Calculate color flows
jamp[0] = -amp[0] - amp[1];
// Sum and square the color flows to get the matrix element
for(i = 0; i < ncolor; i++ )
{
ztemp = 0.;
for(j = 0; j < ncolor; j++ )
ztemp = ztemp + cf[i][j] * jamp[j];
matrix = matrix + (ztemp * conj(jamp[i])).real()/denom[i];
}
// Store the leading color flows for choice of color
// for(i=0;i < ncolor; i++)
// jamp2[0][i] += real(jamp[i]*conj(jamp[i]));
}
CPPProcess::CPPProcess(int numiterations, int gpublocks, int gputhreads,
bool verbose, bool debug)
: m_numiterations(numiterations), gpu_nblocks(gpublocks),
gpu_nthreads(gputhreads), m_verbose(verbose), m_debug(debug),
dim(gpu_nblocks * gpu_nthreads)
{
// Helicities for the process - nodim
static const int tHel[ncomb][nexternal] = {{-1, -1, -1, -1}, {-1, -1, -1, 1},
{-1, -1, 1, -1}, {-1, -1, 1, 1}, {-1, 1, -1, -1}, {-1, 1, -1, 1}, {-1, 1,
1, -1}, {-1, 1, 1, 1}, {1, -1, -1, -1}, {1, -1, -1, 1}, {1, -1, 1, -1},
{1, -1, 1, 1}, {1, 1, -1, -1}, {1, 1, -1, 1}, {1, 1, 1, -1}, {1, 1, 1,
1}};
gpuErrchk3( cudaMemcpyToSymbol( cHel, tHel, ncomb * nexternal * sizeof(int) ) );
// perm - nodim
// static int perm[nexternal] = {0, 1, 2, 3};
}
CPPProcess::~CPPProcess() {}
const std::vector<double> &CPPProcess::getMasses() const {return mME;}
//--------------------------------------------------------------------------
// Initialize process.
void CPPProcess::initProc(string param_card_name)
{
// Instantiate the model class and set parameters that stay fixed during run
pars = Parameters_sm::getInstance();
SLHAReader slha(param_card_name, m_verbose);
pars->setIndependentParameters(slha);
pars->setIndependentCouplings();
if (m_verbose) {
pars->printIndependentParameters();
pars->printIndependentCouplings();
}
pars->setDependentParameters();
pars->setDependentCouplings();
// Set external particle masses for this matrix element
mME.push_back(pars->ZERO);
mME.push_back(pars->ZERO);
mME.push_back(pars->ZERO);
mME.push_back(pars->ZERO);
static thrust::complex<double> tIPC[3] = {pars->GC_3, pars->GC_50,
pars->GC_59};
static double tIPD[2] = {pars->mdl_MZ, pars->mdl_WZ};
gpuErrchk3( cudaMemcpyToSymbol( cIPC, tIPC, 3 * sizeof(thrust::complex<double> ) ) );
gpuErrchk3( cudaMemcpyToSymbol( cIPD, tIPD, 2 * sizeof(double) ) );
}
//--------------------------------------------------------------------------
// Evaluate |M|^2, part independent of incoming flavour.
__global__ void sigmaKin(double * allmomenta, double * output)
{
// Set the parameters which change event by event
// Need to discuss this with Stefan
// pars->setDependentParameters();
// pars->setDependentCouplings();
// Reset color flows
// for (int xx = 0; xx < 384; ++xx) {
const int nprocesses = 1;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// char *devPtr = (char *)tp.ptr;
// size_t dpt = tp.pitch;
// size_t slicePitch = dpt * 4;
// char *dps = devPtr + dim * slicePitch;
double matrix_element[nprocesses];
thrust::complex<double> amp[2];
double local_m[4][3];
int DIM = blockDim.x * gridDim.x;
// for (int i=0; i<20;i++){
// printf(" %f ", allmomenta[i]);
// }
// printf("\n");
// printf("DIM is %i/%i\n", tid, DIM);
for (int i = 0; i < 4; i++ )
{
for (int j = 0; j < 3; j++ )
{
local_m[i][j] = allmomenta[i * 3 * DIM + j * DIM + tid];
// printf(" %f ", local_m[i][j]);
}
// printf("\n");
}
// Local variables and constants
const int ncomb = 16;
// static bool goodhel[ncomb] = {ncomb * false};
// static int ntry = 0, sum_hel = 0, ngood = 0;
// static int igood[ncomb];
// static int jhel;
// std::complex<double> **wfs;
// double t[1];
// Helicities for the process
// static const int helicities[ncomb][nexternal] =
// {{-1,-1,-1,-1},{-1,-1,-1,1},{-1,-1,1,-1},{-1,-1,1,1},{-1,1,-1,-1},{-1,1,-1,
// 1},{-1,1,1,-1},{-1,1,1,1},{1,-1,-1,-1},{1,-1,-1,1},{1,-1,1,-1},{1,-1,1,1},{
// 1,1,-1,-1},{1,1,-1,1},{1,1,1,-1},{1,1,1,1}};
// Denominators: spins, colors and identical particles
const int denominators[1] = {4};
// Reset the matrix elements
for(int i = 0; i < nprocesses; i++ )
{
matrix_element[i] = 0.;
}
// Define permutation
// int perm[nexternal];
// for(int i = 0; i < nexternal; i++){
// perm[i]=i;
// }
for (int ihel = 0; ihel < ncomb; ihel++ )
{
calculate_wavefunctions(ihel, local_m, matrix_element[0]);
}
for (int i = 0; i < nprocesses; ++ i)
{
matrix_element[i] /= denominators[i];
}
for (int i = 0; i < nprocesses; ++ i)
{
output[i * nprocesses + tid] = matrix_element[i];
// printf("output %i %i %i %f", tid, i, i*nprocesses+tid,
// output[i*nprocesses+tid]);
}
}
//==========================================================================
// Private class member functions
//--------------------------------------------------------------------------
|
63447a6d77104ce73075baea22e04463e192eff3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
******************* BEGIN Caffe Copyright Notice and Disclaimer ****************
*
* COPYRIGHT
*
* All contributions by the University of California:
* Copyright (c) 2014-2017 The Regents of the University of California (Regents)
* All rights reserved.
*
* All other contributions:
* Copyright (c) 2014-2017, the respective contributors
* All rights reserved.
*
* Caffe uses a shared copyright model: each contributor holds copyright over
* their contributions to Caffe. The project versioning records all such
* contribution and copyright details. If a contributor wants to further mark
* their specific copyright on a particular contribution, they should indicate
* their copyright solely in the commit message of the change when it is
* committed.
*
* LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* CONTRIBUTION AGREEMENT
*
* By contributing to the BVLC/caffe repository through pull-request, comment,
* or otherwise, the contributor releases their content to the
* license and copyright terms herein.
*
***************** END Caffe Copyright Notice and Disclaimer ********************
*
* Copyright (c) 2018 Microsoft
* Licensed under The MIT License [see LICENSE for details]
* \file modulated_deformable_im2col.cuh
* \brief Function definitions of converting an image to
* column matrix based on kernel, padding, dilation, and offset.
* These functions are mainly used in deformable convolution operators.
* \ref: https://arxiv.org/abs/1703.06211
* \author Yuwen Xiong, Haozhi Qi, Jifeng Dai, Xizhou Zhu, Han Hu, Dazhi Cheng
*/
#include <ATen/ATen.h>
#include <THH/THHAtomics.cuh>
#include <stdio.h>
#include <math.h>
#include <float.h>
using namespace at;
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
const int CUDA_NUM_THREADS = 1024;
const int kMaxGridNum = 65535;
inline int GET_BLOCKS(const int N)
{
return ::min(kMaxGridNum, (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS);
}
template <typename scalar_t>
__device__ scalar_t deformable_im2col_bilinear(const scalar_t *bottom_data, const int data_width,
const int height, const int width, scalar_t h, scalar_t w)
{
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
scalar_t lh = h - h_low;
scalar_t lw = w - w_low;
scalar_t hh = 1 - lh, hw = 1 - lw;
scalar_t v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = bottom_data[h_low * data_width + w_low];
scalar_t v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
scalar_t v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
scalar_t v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename scalar_t>
__device__ scalar_t get_gradient_weight(scalar_t argmax_h, scalar_t argmax_w,
const int h, const int w, const int height, const int width)
{
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
{
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
scalar_t weight = 0;
if (h == argmax_h_low && w == argmax_w_low)
weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
if (h == argmax_h_low && w == argmax_w_high)
weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
if (h == argmax_h_high && w == argmax_w_low)
weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
if (h == argmax_h_high && w == argmax_w_high)
weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
return weight;
}
template <typename scalar_t>
__device__ scalar_t get_coordinate_weight(scalar_t argmax_h, scalar_t argmax_w,
const int height, const int width, const scalar_t *im_data,
const int data_width, const int bp_dir)
{
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
{
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
scalar_t weight = 0;
if (bp_dir == 0)
{
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
else if (bp_dir == 1)
{
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
return weight;
}
template <typename scalar_t>
__global__ void deformable_im2col_gpu_kernel(const int n, const scalar_t *data_im, const scalar_t *data_offset,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, const int channel_per_deformable_group,
const int batch_size, const int num_channels, const int deformable_group,
const int height_col, const int width_col,
scalar_t *data_col)
{
CUDA_KERNEL_LOOP(index, n)
{
// index index of output matrix
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int b_col = (index / width_col / height_col) % batch_size;
const int c_im = (index / width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
//const scalar_t* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in;
const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width;
const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i)
{
for (int j = 0; j < kernel_w; ++j)
{
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
scalar_t val = static_cast<scalar_t>(0);
const scalar_t h_im = h_in + i * dilation_h + offset_h;
const scalar_t w_im = w_in + j * dilation_w + offset_w;
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width)
{
//const scalar_t map_h = i * dilation_h + offset_h;
//const scalar_t map_w = j * dilation_w + offset_w;
//const int cur_height = height - h_in;
//const int cur_width = width - w_in;
//val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w);
val = deformable_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im);
}
*data_col_ptr = val;
data_col_ptr += batch_size * height_col * width_col;
}
}
}
}
void deformable_im2col(
const at::Tensor data_im, const at::Tensor data_offset, const int channels,
const int height, const int width, const int ksize_h, const int ksize_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, const int parallel_imgs,
const int deformable_group, at::Tensor data_col)
{
// num_axes should be smaller than block size
// todo: check parallel_imgs is correctly passed in
int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * height_col * width_col * parallel_imgs;
int channel_per_deformable_group = channels / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_im.type(), "deformable_im2col_gpu", ([&] {
const scalar_t *data_im_ = data_im.data<scalar_t>();
const scalar_t *data_offset_ = data_offset.data<scalar_t>();
scalar_t *data_col_ = data_col.data<scalar_t>();
hipLaunchKernelGGL(( deformable_im2col_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im_, data_offset_, height, width, ksize_h, ksize_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
channel_per_deformable_group, parallel_imgs, channels, deformable_group,
height_col, width_col, data_col_);
}));
hipError_t err = hipGetLastError();
if (err != hipSuccess)
{
printf("error in deformable_im2col: %s\n", hipGetErrorString(err));
}
}
template <typename scalar_t>
__global__ void deformable_col2im_gpu_kernel(
const int n, const scalar_t *data_col, const scalar_t *data_offset,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int deformable_group,
const int height_col, const int width_col,
scalar_t *grad_im)
{
CUDA_KERNEL_LOOP(index, n)
{
const int j = (index / width_col / height_col / batch_size) % kernel_w;
const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h;
const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h;
// compute the start and end of the output
const int deformable_group_index = c / channel_per_deformable_group;
int w_out = index % width_col;
int h_out = (index / width_col) % height_col;
int b = (index / width_col / height_col) % batch_size;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) *
2 * kernel_h * kernel_w * height_col * width_col;
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h;
const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w;
const scalar_t cur_top_grad = data_col[index];
const int cur_h = (int)cur_inv_h_data;
const int cur_w = (int)cur_inv_w_data;
for (int dy = -2; dy <= 2; dy++)
{
for (int dx = -2; dx <= 2; dx++)
{
if (cur_h + dy >= 0 && cur_h + dy < height &&
cur_w + dx >= 0 && cur_w + dx < width &&
abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
abs(cur_inv_w_data - (cur_w + dx)) < 1)
{
int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx;
scalar_t weight = get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width);
atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad);
}
}
}
}
}
void deformable_col2im(
const at::Tensor data_col, const at::Tensor data_offset, const int channels,
const int height, const int width, const int ksize_h,
const int ksize_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int parallel_imgs, const int deformable_group,
at::Tensor grad_im)
{
// todo: make sure parallel_imgs is passed in correctly
int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * ksize_h * ksize_w * height_col * width_col * parallel_imgs;
int channel_per_deformable_group = channels / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.type(), "deformable_col2im_gpu", ([&] {
const scalar_t *data_col_ = data_col.data<scalar_t>();
const scalar_t *data_offset_ = data_offset.data<scalar_t>();
scalar_t *grad_im_ = grad_im.data<scalar_t>();
hipLaunchKernelGGL(( deformable_col2im_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0,
num_kernels, data_col_, data_offset_, channels, height, width, ksize_h,
ksize_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
parallel_imgs, deformable_group, height_col, width_col, grad_im_);
}));
hipError_t err = hipGetLastError();
if (err != hipSuccess)
{
printf("error in deformable_col2im: %s\n", hipGetErrorString(err));
}
}
template <typename scalar_t>
__global__ void deformable_col2im_coord_gpu_kernel(const int n, const scalar_t *data_col,
const scalar_t *data_im, const scalar_t *data_offset,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int offset_channels, const int deformable_group,
const int height_col, const int width_col, scalar_t *grad_offset)
{
CUDA_KERNEL_LOOP(index, n)
{
scalar_t val = 0;
int w = index % width_col;
int h = (index / width_col) % height_col;
int c = (index / width_col / height_col) % offset_channels;
int b = (index / width_col / height_col) / offset_channels;
// compute the start and end of the output
const int deformable_group_index = c / (2 * kernel_h * kernel_w);
const int col_step = kernel_h * kernel_w;
int cnt = 0;
const scalar_t *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group *
batch_size * width_col * height_col;
const scalar_t *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) *
channel_per_deformable_group / kernel_h / kernel_w * height * width;
const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 *
kernel_h * kernel_w * height_col * width_col;
const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step)
{
const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w;
const int bp_dir = offset_c % 2;
int j = (col_pos / width_col / height_col / batch_size) % kernel_w;
int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h;
int w_out = col_pos % width_col;
int h_out = (col_pos / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out);
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
scalar_t inv_h = h_in + i * dilation_h + offset_h;
scalar_t inv_w = w_in + j * dilation_w + offset_w;
if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width)
{
inv_h = inv_w = -2;
}
const scalar_t weight = get_coordinate_weight(
inv_h, inv_w,
height, width, data_im_ptr + cnt * height * width, width, bp_dir);
val += weight * data_col_ptr[col_pos];
cnt += 1;
}
grad_offset[index] = val;
}
}
void deformable_col2im_coord(
const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset,
const int channels, const int height, const int width, const int ksize_h,
const int ksize_w, const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
const int parallel_imgs, const int deformable_group, at::Tensor grad_offset)
{
int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = height_col * width_col * 2 * ksize_h * ksize_w * deformable_group * parallel_imgs;
int channel_per_deformable_group = channels * ksize_h * ksize_w / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.type(), "deformable_col2im_coord_gpu", ([&] {
const scalar_t *data_col_ = data_col.data<scalar_t>();
const scalar_t *data_im_ = data_im.data<scalar_t>();
const scalar_t *data_offset_ = data_offset.data<scalar_t>();
scalar_t *grad_offset_ = grad_offset.data<scalar_t>();
hipLaunchKernelGGL(( deformable_col2im_coord_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0,
num_kernels, data_col_, data_im_, data_offset_, channels, height, width,
ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
parallel_imgs, 2 * ksize_h * ksize_w * deformable_group, deformable_group,
height_col, width_col, grad_offset_);
}));
}
template <typename scalar_t>
__device__ scalar_t dmcn_im2col_bilinear(const scalar_t *bottom_data, const int data_width,
const int height, const int width, scalar_t h, scalar_t w)
{
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
scalar_t lh = h - h_low;
scalar_t lw = w - w_low;
scalar_t hh = 1 - lh, hw = 1 - lw;
scalar_t v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = bottom_data[h_low * data_width + w_low];
scalar_t v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
scalar_t v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
scalar_t v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename scalar_t>
__device__ scalar_t dmcn_get_gradient_weight(scalar_t argmax_h, scalar_t argmax_w,
const int h, const int w, const int height, const int width)
{
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
{
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
scalar_t weight = 0;
if (h == argmax_h_low && w == argmax_w_low)
weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
if (h == argmax_h_low && w == argmax_w_high)
weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
if (h == argmax_h_high && w == argmax_w_low)
weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
if (h == argmax_h_high && w == argmax_w_high)
weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
return weight;
}
template <typename scalar_t>
__device__ scalar_t dmcn_get_coordinate_weight(scalar_t argmax_h, scalar_t argmax_w,
const int height, const int width, const scalar_t *im_data,
const int data_width, const int bp_dir)
{
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
{
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
scalar_t weight = 0;
if (bp_dir == 0)
{
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
else if (bp_dir == 1)
{
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
return weight;
}
template <typename scalar_t>
__global__ void modulated_deformable_im2col_gpu_kernel(const int n,
const scalar_t *data_im, const scalar_t *data_offset, const scalar_t *data_mask,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int num_channels, const int deformable_group,
const int height_col, const int width_col,
scalar_t *data_col)
{
CUDA_KERNEL_LOOP(index, n)
{
// index index of output matrix
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int b_col = (index / width_col / height_col) % batch_size;
const int c_im = (index / width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
//const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in;
const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width;
const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const scalar_t *data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i)
{
for (int j = 0; j < kernel_w; ++j)
{
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t mask = data_mask_ptr[data_mask_hw_ptr];
scalar_t val = static_cast<scalar_t>(0);
const scalar_t h_im = h_in + i * dilation_h + offset_h;
const scalar_t w_im = w_in + j * dilation_w + offset_w;
//if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) {
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width)
{
//const float map_h = i * dilation_h + offset_h;
//const float map_w = j * dilation_w + offset_w;
//const int cur_height = height - h_in;
//const int cur_width = width - w_in;
//val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w);
val = dmcn_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im);
}
*data_col_ptr = val * mask;
data_col_ptr += batch_size * height_col * width_col;
//data_col_ptr += height_col * width_col;
}
}
}
}
template <typename scalar_t>
__global__ void modulated_deformable_col2im_gpu_kernel(const int n,
const scalar_t *data_col, const scalar_t *data_offset, const scalar_t *data_mask,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int deformable_group,
const int height_col, const int width_col,
scalar_t *grad_im)
{
CUDA_KERNEL_LOOP(index, n)
{
const int j = (index / width_col / height_col / batch_size) % kernel_w;
const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h;
const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h;
// compute the start and end of the output
const int deformable_group_index = c / channel_per_deformable_group;
int w_out = index % width_col;
int h_out = (index / width_col) % height_col;
int b = (index / width_col / height_col) % batch_size;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const scalar_t *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_out) * width_col + w_out;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t mask = data_mask_ptr[data_mask_hw_ptr];
const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h;
const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w;
const scalar_t cur_top_grad = data_col[index] * mask;
const int cur_h = (int)cur_inv_h_data;
const int cur_w = (int)cur_inv_w_data;
for (int dy = -2; dy <= 2; dy++)
{
for (int dx = -2; dx <= 2; dx++)
{
if (cur_h + dy >= 0 && cur_h + dy < height &&
cur_w + dx >= 0 && cur_w + dx < width &&
abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
abs(cur_inv_w_data - (cur_w + dx)) < 1)
{
int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx;
scalar_t weight = dmcn_get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width);
atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad);
}
}
}
}
}
template <typename scalar_t>
__global__ void modulated_deformable_col2im_coord_gpu_kernel(const int n,
const scalar_t *data_col, const scalar_t *data_im,
const scalar_t *data_offset, const scalar_t *data_mask,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int offset_channels, const int deformable_group,
const int height_col, const int width_col,
scalar_t *grad_offset, scalar_t *grad_mask)
{
CUDA_KERNEL_LOOP(index, n)
{
scalar_t val = 0, mval = 0;
int w = index % width_col;
int h = (index / width_col) % height_col;
int c = (index / width_col / height_col) % offset_channels;
int b = (index / width_col / height_col) / offset_channels;
// compute the start and end of the output
const int deformable_group_index = c / (2 * kernel_h * kernel_w);
const int col_step = kernel_h * kernel_w;
int cnt = 0;
const scalar_t *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col;
const scalar_t *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width;
const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const scalar_t *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step)
{
const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w;
const int bp_dir = offset_c % 2;
int j = (col_pos / width_col / height_col / batch_size) % kernel_w;
int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h;
int w_out = col_pos % width_col;
int h_out = (col_pos / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out);
const int data_mask_hw_ptr = (((i * kernel_w + j) * height_col + h_out) * width_col + w_out);
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t mask = data_mask_ptr[data_mask_hw_ptr];
scalar_t inv_h = h_in + i * dilation_h + offset_h;
scalar_t inv_w = w_in + j * dilation_w + offset_w;
if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width)
{
inv_h = inv_w = -2;
}
else
{
mval += data_col_ptr[col_pos] * dmcn_im2col_bilinear(data_im_ptr + cnt * height * width, width, height, width, inv_h, inv_w);
}
const scalar_t weight = dmcn_get_coordinate_weight(
inv_h, inv_w,
height, width, data_im_ptr + cnt * height * width, width, bp_dir);
val += weight * data_col_ptr[col_pos] * mask;
cnt += 1;
}
// KERNEL_ASSIGN(grad_offset[index], offset_req, val);
grad_offset[index] = val;
if (offset_c % 2 == 0)
// KERNEL_ASSIGN(grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w], mask_req, mval);
grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w] = mval;
}
}
void modulated_deformable_im2col_cuda(
const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kenerl_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group, at::Tensor data_col)
{
// num_axes should be smaller than block size
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * batch_size * height_col * width_col;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_im.type(), "modulated_deformable_im2col_gpu", ([&] {
const scalar_t *data_im_ = data_im.data<scalar_t>();
const scalar_t *data_offset_ = data_offset.data<scalar_t>();
const scalar_t *data_mask_ = data_mask.data<scalar_t>();
scalar_t *data_col_ = data_col.data<scalar_t>();
hipLaunchKernelGGL(( modulated_deformable_im2col_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im_, data_offset_, data_mask_, height_im, width_im, kernel_h, kenerl_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group,
batch_size, channels, deformable_group, height_col, width_col, data_col_);
}));
hipError_t err = hipGetLastError();
if (err != hipSuccess)
{
printf("error in modulated_deformable_im2col_cuda: %s\n", hipGetErrorString(err));
}
}
void modulated_deformable_col2im_cuda(
const at::Tensor data_col, const at::Tensor data_offset, const at::Tensor data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group, at::Tensor grad_im)
{
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * kernel_h * kernel_w * batch_size * height_col * width_col;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.type(), "modulated_deformable_col2im_gpu", ([&] {
const scalar_t *data_col_ = data_col.data<scalar_t>();
const scalar_t *data_offset_ = data_offset.data<scalar_t>();
const scalar_t *data_mask_ = data_mask.data<scalar_t>();
scalar_t *grad_im_ = grad_im.data<scalar_t>();
hipLaunchKernelGGL(( modulated_deformable_col2im_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0,
num_kernels, data_col_, data_offset_, data_mask_, channels, height_im, width_im,
kernel_h, kernel_w, pad_h, pad_h, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
batch_size, deformable_group, height_col, width_col, grad_im_);
}));
hipError_t err = hipGetLastError();
if (err != hipSuccess)
{
printf("error in modulated_deformable_col2im_cuda: %s\n", hipGetErrorString(err));
}
}
void modulated_deformable_col2im_coord_cuda(
const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group,
at::Tensor grad_offset, at::Tensor grad_mask)
{
const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h * kernel_w * deformable_group;
const int channel_per_deformable_group = channels * kernel_h * kernel_w / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.type(), "modulated_deformable_col2im_coord_gpu", ([&] {
const scalar_t *data_col_ = data_col.data<scalar_t>();
const scalar_t *data_im_ = data_im.data<scalar_t>();
const scalar_t *data_offset_ = data_offset.data<scalar_t>();
const scalar_t *data_mask_ = data_mask.data<scalar_t>();
scalar_t *grad_offset_ = grad_offset.data<scalar_t>();
scalar_t *grad_mask_ = grad_mask.data<scalar_t>();
hipLaunchKernelGGL(( modulated_deformable_col2im_coord_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0,
num_kernels, data_col_, data_im_, data_offset_, data_mask_, channels, height_im, width_im,
kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
batch_size, 2 * kernel_h * kernel_w * deformable_group, deformable_group, height_col, width_col,
grad_offset_, grad_mask_);
}));
hipError_t err = hipGetLastError();
if (err != hipSuccess)
{
printf("error in modulated_deformable_col2im_coord_cuda: %s\n", hipGetErrorString(err));
}
}
| 63447a6d77104ce73075baea22e04463e192eff3.cu | /*!
******************* BEGIN Caffe Copyright Notice and Disclaimer ****************
*
* COPYRIGHT
*
* All contributions by the University of California:
* Copyright (c) 2014-2017 The Regents of the University of California (Regents)
* All rights reserved.
*
* All other contributions:
* Copyright (c) 2014-2017, the respective contributors
* All rights reserved.
*
* Caffe uses a shared copyright model: each contributor holds copyright over
* their contributions to Caffe. The project versioning records all such
* contribution and copyright details. If a contributor wants to further mark
* their specific copyright on a particular contribution, they should indicate
* their copyright solely in the commit message of the change when it is
* committed.
*
* LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* CONTRIBUTION AGREEMENT
*
* By contributing to the BVLC/caffe repository through pull-request, comment,
* or otherwise, the contributor releases their content to the
* license and copyright terms herein.
*
***************** END Caffe Copyright Notice and Disclaimer ********************
*
* Copyright (c) 2018 Microsoft
* Licensed under The MIT License [see LICENSE for details]
* \file modulated_deformable_im2col.cuh
* \brief Function definitions of converting an image to
* column matrix based on kernel, padding, dilation, and offset.
* These functions are mainly used in deformable convolution operators.
* \ref: https://arxiv.org/abs/1703.06211
* \author Yuwen Xiong, Haozhi Qi, Jifeng Dai, Xizhou Zhu, Han Hu, Dazhi Cheng
*/
#include <ATen/ATen.h>
#include <THC/THCAtomics.cuh>
#include <stdio.h>
#include <math.h>
#include <float.h>
using namespace at;
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
const int CUDA_NUM_THREADS = 1024;
const int kMaxGridNum = 65535;
inline int GET_BLOCKS(const int N)
{
return std::min(kMaxGridNum, (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS);
}
template <typename scalar_t>
__device__ scalar_t deformable_im2col_bilinear(const scalar_t *bottom_data, const int data_width,
const int height, const int width, scalar_t h, scalar_t w)
{
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
scalar_t lh = h - h_low;
scalar_t lw = w - w_low;
scalar_t hh = 1 - lh, hw = 1 - lw;
scalar_t v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = bottom_data[h_low * data_width + w_low];
scalar_t v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
scalar_t v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
scalar_t v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename scalar_t>
__device__ scalar_t get_gradient_weight(scalar_t argmax_h, scalar_t argmax_w,
const int h, const int w, const int height, const int width)
{
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
{
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
scalar_t weight = 0;
if (h == argmax_h_low && w == argmax_w_low)
weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
if (h == argmax_h_low && w == argmax_w_high)
weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
if (h == argmax_h_high && w == argmax_w_low)
weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
if (h == argmax_h_high && w == argmax_w_high)
weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
return weight;
}
template <typename scalar_t>
__device__ scalar_t get_coordinate_weight(scalar_t argmax_h, scalar_t argmax_w,
const int height, const int width, const scalar_t *im_data,
const int data_width, const int bp_dir)
{
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
{
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
scalar_t weight = 0;
if (bp_dir == 0)
{
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
else if (bp_dir == 1)
{
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
return weight;
}
template <typename scalar_t>
__global__ void deformable_im2col_gpu_kernel(const int n, const scalar_t *data_im, const scalar_t *data_offset,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, const int channel_per_deformable_group,
const int batch_size, const int num_channels, const int deformable_group,
const int height_col, const int width_col,
scalar_t *data_col)
{
CUDA_KERNEL_LOOP(index, n)
{
// index index of output matrix
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int b_col = (index / width_col / height_col) % batch_size;
const int c_im = (index / width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
//const scalar_t* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in;
const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width;
const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i)
{
for (int j = 0; j < kernel_w; ++j)
{
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
scalar_t val = static_cast<scalar_t>(0);
const scalar_t h_im = h_in + i * dilation_h + offset_h;
const scalar_t w_im = w_in + j * dilation_w + offset_w;
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width)
{
//const scalar_t map_h = i * dilation_h + offset_h;
//const scalar_t map_w = j * dilation_w + offset_w;
//const int cur_height = height - h_in;
//const int cur_width = width - w_in;
//val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w);
val = deformable_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im);
}
*data_col_ptr = val;
data_col_ptr += batch_size * height_col * width_col;
}
}
}
}
void deformable_im2col(
const at::Tensor data_im, const at::Tensor data_offset, const int channels,
const int height, const int width, const int ksize_h, const int ksize_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, const int parallel_imgs,
const int deformable_group, at::Tensor data_col)
{
// num_axes should be smaller than block size
// todo: check parallel_imgs is correctly passed in
int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * height_col * width_col * parallel_imgs;
int channel_per_deformable_group = channels / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_im.type(), "deformable_im2col_gpu", ([&] {
const scalar_t *data_im_ = data_im.data<scalar_t>();
const scalar_t *data_offset_ = data_offset.data<scalar_t>();
scalar_t *data_col_ = data_col.data<scalar_t>();
deformable_im2col_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels, data_im_, data_offset_, height, width, ksize_h, ksize_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
channel_per_deformable_group, parallel_imgs, channels, deformable_group,
height_col, width_col, data_col_);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in deformable_im2col: %s\n", cudaGetErrorString(err));
}
}
template <typename scalar_t>
__global__ void deformable_col2im_gpu_kernel(
const int n, const scalar_t *data_col, const scalar_t *data_offset,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int deformable_group,
const int height_col, const int width_col,
scalar_t *grad_im)
{
CUDA_KERNEL_LOOP(index, n)
{
const int j = (index / width_col / height_col / batch_size) % kernel_w;
const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h;
const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h;
// compute the start and end of the output
const int deformable_group_index = c / channel_per_deformable_group;
int w_out = index % width_col;
int h_out = (index / width_col) % height_col;
int b = (index / width_col / height_col) % batch_size;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) *
2 * kernel_h * kernel_w * height_col * width_col;
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h;
const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w;
const scalar_t cur_top_grad = data_col[index];
const int cur_h = (int)cur_inv_h_data;
const int cur_w = (int)cur_inv_w_data;
for (int dy = -2; dy <= 2; dy++)
{
for (int dx = -2; dx <= 2; dx++)
{
if (cur_h + dy >= 0 && cur_h + dy < height &&
cur_w + dx >= 0 && cur_w + dx < width &&
abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
abs(cur_inv_w_data - (cur_w + dx)) < 1)
{
int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx;
scalar_t weight = get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width);
atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad);
}
}
}
}
}
void deformable_col2im(
const at::Tensor data_col, const at::Tensor data_offset, const int channels,
const int height, const int width, const int ksize_h,
const int ksize_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int parallel_imgs, const int deformable_group,
at::Tensor grad_im)
{
// todo: make sure parallel_imgs is passed in correctly
int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * ksize_h * ksize_w * height_col * width_col * parallel_imgs;
int channel_per_deformable_group = channels / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.type(), "deformable_col2im_gpu", ([&] {
const scalar_t *data_col_ = data_col.data<scalar_t>();
const scalar_t *data_offset_ = data_offset.data<scalar_t>();
scalar_t *grad_im_ = grad_im.data<scalar_t>();
deformable_col2im_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels, data_col_, data_offset_, channels, height, width, ksize_h,
ksize_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
parallel_imgs, deformable_group, height_col, width_col, grad_im_);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in deformable_col2im: %s\n", cudaGetErrorString(err));
}
}
template <typename scalar_t>
__global__ void deformable_col2im_coord_gpu_kernel(const int n, const scalar_t *data_col,
const scalar_t *data_im, const scalar_t *data_offset,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int offset_channels, const int deformable_group,
const int height_col, const int width_col, scalar_t *grad_offset)
{
CUDA_KERNEL_LOOP(index, n)
{
scalar_t val = 0;
int w = index % width_col;
int h = (index / width_col) % height_col;
int c = (index / width_col / height_col) % offset_channels;
int b = (index / width_col / height_col) / offset_channels;
// compute the start and end of the output
const int deformable_group_index = c / (2 * kernel_h * kernel_w);
const int col_step = kernel_h * kernel_w;
int cnt = 0;
const scalar_t *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group *
batch_size * width_col * height_col;
const scalar_t *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) *
channel_per_deformable_group / kernel_h / kernel_w * height * width;
const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 *
kernel_h * kernel_w * height_col * width_col;
const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step)
{
const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w;
const int bp_dir = offset_c % 2;
int j = (col_pos / width_col / height_col / batch_size) % kernel_w;
int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h;
int w_out = col_pos % width_col;
int h_out = (col_pos / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out);
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
scalar_t inv_h = h_in + i * dilation_h + offset_h;
scalar_t inv_w = w_in + j * dilation_w + offset_w;
if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width)
{
inv_h = inv_w = -2;
}
const scalar_t weight = get_coordinate_weight(
inv_h, inv_w,
height, width, data_im_ptr + cnt * height * width, width, bp_dir);
val += weight * data_col_ptr[col_pos];
cnt += 1;
}
grad_offset[index] = val;
}
}
void deformable_col2im_coord(
const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset,
const int channels, const int height, const int width, const int ksize_h,
const int ksize_w, const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
const int parallel_imgs, const int deformable_group, at::Tensor grad_offset)
{
int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = height_col * width_col * 2 * ksize_h * ksize_w * deformable_group * parallel_imgs;
int channel_per_deformable_group = channels * ksize_h * ksize_w / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.type(), "deformable_col2im_coord_gpu", ([&] {
const scalar_t *data_col_ = data_col.data<scalar_t>();
const scalar_t *data_im_ = data_im.data<scalar_t>();
const scalar_t *data_offset_ = data_offset.data<scalar_t>();
scalar_t *grad_offset_ = grad_offset.data<scalar_t>();
deformable_col2im_coord_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels, data_col_, data_im_, data_offset_, channels, height, width,
ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
parallel_imgs, 2 * ksize_h * ksize_w * deformable_group, deformable_group,
height_col, width_col, grad_offset_);
}));
}
template <typename scalar_t>
__device__ scalar_t dmcn_im2col_bilinear(const scalar_t *bottom_data, const int data_width,
const int height, const int width, scalar_t h, scalar_t w)
{
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
scalar_t lh = h - h_low;
scalar_t lw = w - w_low;
scalar_t hh = 1 - lh, hw = 1 - lw;
scalar_t v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = bottom_data[h_low * data_width + w_low];
scalar_t v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
scalar_t v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
scalar_t v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename scalar_t>
__device__ scalar_t dmcn_get_gradient_weight(scalar_t argmax_h, scalar_t argmax_w,
const int h, const int w, const int height, const int width)
{
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
{
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
scalar_t weight = 0;
if (h == argmax_h_low && w == argmax_w_low)
weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
if (h == argmax_h_low && w == argmax_w_high)
weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
if (h == argmax_h_high && w == argmax_w_low)
weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
if (h == argmax_h_high && w == argmax_w_high)
weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
return weight;
}
template <typename scalar_t>
__device__ scalar_t dmcn_get_coordinate_weight(scalar_t argmax_h, scalar_t argmax_w,
const int height, const int width, const scalar_t *im_data,
const int data_width, const int bp_dir)
{
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
{
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
scalar_t weight = 0;
if (bp_dir == 0)
{
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
else if (bp_dir == 1)
{
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
return weight;
}
template <typename scalar_t>
__global__ void modulated_deformable_im2col_gpu_kernel(const int n,
const scalar_t *data_im, const scalar_t *data_offset, const scalar_t *data_mask,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int num_channels, const int deformable_group,
const int height_col, const int width_col,
scalar_t *data_col)
{
CUDA_KERNEL_LOOP(index, n)
{
// index index of output matrix
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int b_col = (index / width_col / height_col) % batch_size;
const int c_im = (index / width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
//const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in;
const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width;
const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const scalar_t *data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i)
{
for (int j = 0; j < kernel_w; ++j)
{
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t mask = data_mask_ptr[data_mask_hw_ptr];
scalar_t val = static_cast<scalar_t>(0);
const scalar_t h_im = h_in + i * dilation_h + offset_h;
const scalar_t w_im = w_in + j * dilation_w + offset_w;
//if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) {
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width)
{
//const float map_h = i * dilation_h + offset_h;
//const float map_w = j * dilation_w + offset_w;
//const int cur_height = height - h_in;
//const int cur_width = width - w_in;
//val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w);
val = dmcn_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im);
}
*data_col_ptr = val * mask;
data_col_ptr += batch_size * height_col * width_col;
//data_col_ptr += height_col * width_col;
}
}
}
}
template <typename scalar_t>
__global__ void modulated_deformable_col2im_gpu_kernel(const int n,
const scalar_t *data_col, const scalar_t *data_offset, const scalar_t *data_mask,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int deformable_group,
const int height_col, const int width_col,
scalar_t *grad_im)
{
CUDA_KERNEL_LOOP(index, n)
{
const int j = (index / width_col / height_col / batch_size) % kernel_w;
const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h;
const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h;
// compute the start and end of the output
const int deformable_group_index = c / channel_per_deformable_group;
int w_out = index % width_col;
int h_out = (index / width_col) % height_col;
int b = (index / width_col / height_col) % batch_size;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const scalar_t *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_out) * width_col + w_out;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t mask = data_mask_ptr[data_mask_hw_ptr];
const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h;
const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w;
const scalar_t cur_top_grad = data_col[index] * mask;
const int cur_h = (int)cur_inv_h_data;
const int cur_w = (int)cur_inv_w_data;
for (int dy = -2; dy <= 2; dy++)
{
for (int dx = -2; dx <= 2; dx++)
{
if (cur_h + dy >= 0 && cur_h + dy < height &&
cur_w + dx >= 0 && cur_w + dx < width &&
abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
abs(cur_inv_w_data - (cur_w + dx)) < 1)
{
int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx;
scalar_t weight = dmcn_get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width);
atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad);
}
}
}
}
}
template <typename scalar_t>
__global__ void modulated_deformable_col2im_coord_gpu_kernel(const int n,
const scalar_t *data_col, const scalar_t *data_im,
const scalar_t *data_offset, const scalar_t *data_mask,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int offset_channels, const int deformable_group,
const int height_col, const int width_col,
scalar_t *grad_offset, scalar_t *grad_mask)
{
CUDA_KERNEL_LOOP(index, n)
{
scalar_t val = 0, mval = 0;
int w = index % width_col;
int h = (index / width_col) % height_col;
int c = (index / width_col / height_col) % offset_channels;
int b = (index / width_col / height_col) / offset_channels;
// compute the start and end of the output
const int deformable_group_index = c / (2 * kernel_h * kernel_w);
const int col_step = kernel_h * kernel_w;
int cnt = 0;
const scalar_t *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col;
const scalar_t *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width;
const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const scalar_t *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step)
{
const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w;
const int bp_dir = offset_c % 2;
int j = (col_pos / width_col / height_col / batch_size) % kernel_w;
int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h;
int w_out = col_pos % width_col;
int h_out = (col_pos / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out);
const int data_mask_hw_ptr = (((i * kernel_w + j) * height_col + h_out) * width_col + w_out);
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t mask = data_mask_ptr[data_mask_hw_ptr];
scalar_t inv_h = h_in + i * dilation_h + offset_h;
scalar_t inv_w = w_in + j * dilation_w + offset_w;
if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width)
{
inv_h = inv_w = -2;
}
else
{
mval += data_col_ptr[col_pos] * dmcn_im2col_bilinear(data_im_ptr + cnt * height * width, width, height, width, inv_h, inv_w);
}
const scalar_t weight = dmcn_get_coordinate_weight(
inv_h, inv_w,
height, width, data_im_ptr + cnt * height * width, width, bp_dir);
val += weight * data_col_ptr[col_pos] * mask;
cnt += 1;
}
// KERNEL_ASSIGN(grad_offset[index], offset_req, val);
grad_offset[index] = val;
if (offset_c % 2 == 0)
// KERNEL_ASSIGN(grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w], mask_req, mval);
grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w] = mval;
}
}
void modulated_deformable_im2col_cuda(
const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kenerl_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group, at::Tensor data_col)
{
// num_axes should be smaller than block size
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * batch_size * height_col * width_col;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_im.type(), "modulated_deformable_im2col_gpu", ([&] {
const scalar_t *data_im_ = data_im.data<scalar_t>();
const scalar_t *data_offset_ = data_offset.data<scalar_t>();
const scalar_t *data_mask_ = data_mask.data<scalar_t>();
scalar_t *data_col_ = data_col.data<scalar_t>();
modulated_deformable_im2col_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels, data_im_, data_offset_, data_mask_, height_im, width_im, kernel_h, kenerl_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group,
batch_size, channels, deformable_group, height_col, width_col, data_col_);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in modulated_deformable_im2col_cuda: %s\n", cudaGetErrorString(err));
}
}
void modulated_deformable_col2im_cuda(
const at::Tensor data_col, const at::Tensor data_offset, const at::Tensor data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group, at::Tensor grad_im)
{
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * kernel_h * kernel_w * batch_size * height_col * width_col;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.type(), "modulated_deformable_col2im_gpu", ([&] {
const scalar_t *data_col_ = data_col.data<scalar_t>();
const scalar_t *data_offset_ = data_offset.data<scalar_t>();
const scalar_t *data_mask_ = data_mask.data<scalar_t>();
scalar_t *grad_im_ = grad_im.data<scalar_t>();
modulated_deformable_col2im_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels, data_col_, data_offset_, data_mask_, channels, height_im, width_im,
kernel_h, kernel_w, pad_h, pad_h, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
batch_size, deformable_group, height_col, width_col, grad_im_);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in modulated_deformable_col2im_cuda: %s\n", cudaGetErrorString(err));
}
}
void modulated_deformable_col2im_coord_cuda(
const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group,
at::Tensor grad_offset, at::Tensor grad_mask)
{
const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h * kernel_w * deformable_group;
const int channel_per_deformable_group = channels * kernel_h * kernel_w / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.type(), "modulated_deformable_col2im_coord_gpu", ([&] {
const scalar_t *data_col_ = data_col.data<scalar_t>();
const scalar_t *data_im_ = data_im.data<scalar_t>();
const scalar_t *data_offset_ = data_offset.data<scalar_t>();
const scalar_t *data_mask_ = data_mask.data<scalar_t>();
scalar_t *grad_offset_ = grad_offset.data<scalar_t>();
scalar_t *grad_mask_ = grad_mask.data<scalar_t>();
modulated_deformable_col2im_coord_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels, data_col_, data_im_, data_offset_, data_mask_, channels, height_im, width_im,
kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
batch_size, 2 * kernel_h * kernel_w * deformable_group, deformable_group, height_col, width_col,
grad_offset_, grad_mask_);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in modulated_deformable_col2im_coord_cuda: %s\n", cudaGetErrorString(err));
}
}
|
9eec7feab7733abfc73f8250d47ab890bba1e44a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "math.h"
#include <iostream>
#include "Timer.h"
using namespace std;
#include <stdio.h>
__global__ void findPrimes(const int a, const int b, int* arr)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
const int c = i + a;
bool go = true;
if (c - 1 < b)
{
for (int k = 2; k < 7; k++)
{
if (c % k == 0)
{
arr[i] = 0;
go = false;
}
}
for (int j = 7; j < sqrtf(c) + 1 && go; j+=2)
{
if (c % j == 0)
{
arr[i] = 0;
go = false;
}
}
}
if ((go && c - 1 < b) || c == 2 || c == 3 || c == 5)
{
arr[i] = c;
}
}
int printPrimes(int* arr, int size)
{
int k = 0;
for (int i = 0; i < size; i++)
{
if (arr[i] != 0)
{
k++;
//printf("%d ", arr[i]);
}
}
return k;
}
int firstPrime(int* arr, int size)
{
int i = 0;
while (arr[i] == 0)
{
i++;
}
return arr[i];
}
int lastPrime(int* arr, int size)
{
int i = size - 1;
while (arr[i] == 0)
{
i--;
}
return arr[i];
}
int main()
{
int a;
int b;
int comps;
cout << "Lower bound: ";
cin >> a;
cout << "Upper bound: ";
cin >> b;
cout << "Threads per block: ";
cin >> comps;
Timer h;
const int size = sizeof(int) * (b - a + 1);
int* h_arr = (int*)malloc(size);
int* d_arr;
hipMalloc(&d_arr, size);
hipMemcpy(d_arr, h_arr, size, hipMemcpyHostToDevice);
findPrimes << <((b - a + 1) / comps) + 1, comps >> > (a, b, d_arr);
//findPrimes << <gridSize, blockSize >> > (a, b, d_arr);
hipMemcpy(h_arr, d_arr, size, hipMemcpyDeviceToHost);
hipFree(d_arr);
int numberOfPrimes = printPrimes(h_arr, b - a + 1);
cout << "\nThere are " << numberOfPrimes << " primes" << "\n" <<
"First prime: " << firstPrime(h_arr, b - a + 1) <<
"\n Last prime: " << lastPrime(h_arr, b - a + 1);
free(h_arr);
cout << "\nTime passed:" << h.elapsed() <<
"seconds. \n Blocks * threads: " <<
(b - a + 1) / comps << " * " << comps;
} | 9eec7feab7733abfc73f8250d47ab890bba1e44a.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "math.h"
#include <iostream>
#include "Timer.h"
using namespace std;
#include <stdio.h>
__global__ void findPrimes(const int a, const int b, int* arr)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
const int c = i + a;
bool go = true;
if (c - 1 < b)
{
for (int k = 2; k < 7; k++)
{
if (c % k == 0)
{
arr[i] = 0;
go = false;
}
}
for (int j = 7; j < sqrtf(c) + 1 && go; j+=2)
{
if (c % j == 0)
{
arr[i] = 0;
go = false;
}
}
}
if ((go && c - 1 < b) || c == 2 || c == 3 || c == 5)
{
arr[i] = c;
}
}
int printPrimes(int* arr, int size)
{
int k = 0;
for (int i = 0; i < size; i++)
{
if (arr[i] != 0)
{
k++;
//printf("%d ", arr[i]);
}
}
return k;
}
int firstPrime(int* arr, int size)
{
int i = 0;
while (arr[i] == 0)
{
i++;
}
return arr[i];
}
int lastPrime(int* arr, int size)
{
int i = size - 1;
while (arr[i] == 0)
{
i--;
}
return arr[i];
}
int main()
{
int a;
int b;
int comps;
cout << "Lower bound: ";
cin >> a;
cout << "Upper bound: ";
cin >> b;
cout << "Threads per block: ";
cin >> comps;
Timer h;
const int size = sizeof(int) * (b - a + 1);
int* h_arr = (int*)malloc(size);
int* d_arr;
cudaMalloc(&d_arr, size);
cudaMemcpy(d_arr, h_arr, size, cudaMemcpyHostToDevice);
findPrimes << <((b - a + 1) / comps) + 1, comps >> > (a, b, d_arr);
//findPrimes << <gridSize, blockSize >> > (a, b, d_arr);
cudaMemcpy(h_arr, d_arr, size, cudaMemcpyDeviceToHost);
cudaFree(d_arr);
int numberOfPrimes = printPrimes(h_arr, b - a + 1);
cout << "\nThere are " << numberOfPrimes << " primes" << "\n" <<
"First prime: " << firstPrime(h_arr, b - a + 1) <<
"\n Last prime: " << lastPrime(h_arr, b - a + 1);
free(h_arr);
cout << "\nTime passed:" << h.elapsed() <<
"seconds. \n Blocks * threads: " <<
(b - a + 1) / comps << " * " << comps;
} |
8c5ead10f6b36edf16e34527c5eaa0c3f18f6952.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <cutil_inline.h>
#include <cutil_math.h>
__device__ int DIM;
__global__ void updateWaveMapGPU2( float3* dev_newWave, float3* dev_oldWave){
int tid = threadIdx.x + (blockIdx.x * blockDim.x);
while (tid < (DIM* DIM)){
int y = int(tid / DIM);
int x = tid % DIM;
int up = (x - 1) + (y * DIM);
int down = (x + 1) + (y * DIM);
int leftp = x + ((y - 1)* DIM);
int rightp = x + ((y + 1)* DIM);
float n = 0.0f;
int no=0;
if (x-1 >= 0) {
n += dev_oldWave[up].y;
no++;
}
if (x + 1 < DIM) {
n += dev_oldWave[down].y;
no++;
}
if (y-1 >= 0) {
n += dev_oldWave[leftp].y;
no++;
}
if (y+1 < DIM) {
no++;
n += dev_oldWave[rightp].y;
}
n = n/(float)no;
n = (n*2) - dev_newWave[tid].y;
n = n - ((n/32.0f));
dev_newWave[tid].y = n;
tid += gridDim.x*blockDim.x;
}
}
void updateWaveMapGPU1(float3* dev_newWave, float3* dev_oldWave){
hipLaunchKernelGGL(( updateWaveMapGPU2), dim3(50),dim3(50) , 0, 0, dev_newWave,dev_oldWave);
}
__global__ void updateNormalsGPU2( float3* dev_vertices, float3* dev_normals){
int nIndex;
int tid= threadIdx.x + blockIdx.x * blockDim.x;
while (tid < (DIM* DIM)){
/*
1 2
*--*--*
| /| /|
6 *--*--* 3
| /| /|
*--*--*
5 4
*/
nIndex = tid;
int d = (int)(nIndex/ DIM);
int v4Index = (nIndex - 1);
int v3Index = (nIndex - DIM);
int v2Index = (nIndex - DIM + 1);
int v1Index = (nIndex + 1);
int v6Index = (nIndex + DIM);
int v5Index = (nIndex + DIM - 1);
float3 vertex = dev_vertices[nIndex];
float3 newNormal = make_float3(0,0,0);
if ((v1Index/ DIM) == d){
float3 v1 = dev_vertices[v1Index];
float3 s1 = v1 - vertex;
if ((v2Index >= 0)&&(d > 0)&&((v2Index/ DIM) == d - 1)){
float3 v2 = dev_vertices[v2Index];
float3 s2 = v2 - vertex;
newNormal = newNormal + cross(s2,s1);
}
if (v6Index < DIM){
float3 v6 = dev_vertices[v6Index];
float3 s6 = v6 - vertex;
newNormal = newNormal + cross(s1,s6);
}
}
if ((v4Index >= 0)&&((v4Index/ DIM) == d)){
float3 v4 = dev_vertices[v4Index];
float3 s4 = v4 - vertex;
if ((v5Index/ DIM) < DIM){
float3 v5 = dev_vertices[v5Index];
float3 s5 = v5 - vertex;
newNormal = newNormal + cross(s5,s4);
}
if ((v3Index >= 0)&&(d > 0)){
float3 v3 = dev_vertices[v3Index];
float3 s3 = v3 - vertex;
newNormal = newNormal + cross(s4,s3);
}
}
dev_normals[nIndex] = normalize(newNormal);
tid += gridDim.x*blockDim.x;
}
}
void updateNormalsGPU1(float3* dev_vertices, float3* dev_normals){
hipLaunchKernelGGL(( updateNormalsGPU2), dim3(50),dim3(50), 0, 0, dev_vertices,dev_normals);
}
| 8c5ead10f6b36edf16e34527c5eaa0c3f18f6952.cu |
#include <cuda.h>
#include <cutil_inline.h>
#include <cutil_math.h>
__device__ int DIM;
__global__ void updateWaveMapGPU2( float3* dev_newWave, float3* dev_oldWave){
int tid = threadIdx.x + (blockIdx.x * blockDim.x);
while (tid < (DIM* DIM)){
int y = int(tid / DIM);
int x = tid % DIM;
int up = (x - 1) + (y * DIM);
int down = (x + 1) + (y * DIM);
int leftp = x + ((y - 1)* DIM);
int rightp = x + ((y + 1)* DIM);
float n = 0.0f;
int no=0;
if (x-1 >= 0) {
n += dev_oldWave[up].y;
no++;
}
if (x + 1 < DIM) {
n += dev_oldWave[down].y;
no++;
}
if (y-1 >= 0) {
n += dev_oldWave[leftp].y;
no++;
}
if (y+1 < DIM) {
no++;
n += dev_oldWave[rightp].y;
}
n = n/(float)no;
n = (n*2) - dev_newWave[tid].y;
n = n - ((n/32.0f));
dev_newWave[tid].y = n;
tid += gridDim.x*blockDim.x;
}
}
void updateWaveMapGPU1(float3* dev_newWave, float3* dev_oldWave){
updateWaveMapGPU2<<< 50,50 >>>(dev_newWave,dev_oldWave);
}
__global__ void updateNormalsGPU2( float3* dev_vertices, float3* dev_normals){
int nIndex;
int tid= threadIdx.x + blockIdx.x * blockDim.x;
while (tid < (DIM* DIM)){
/*
1 2
*--*--*
| /| /|
6 *--*--* 3
| /| /|
*--*--*
5 4
*/
nIndex = tid;
int d = (int)(nIndex/ DIM);
int v4Index = (nIndex - 1);
int v3Index = (nIndex - DIM);
int v2Index = (nIndex - DIM + 1);
int v1Index = (nIndex + 1);
int v6Index = (nIndex + DIM);
int v5Index = (nIndex + DIM - 1);
float3 vertex = dev_vertices[nIndex];
float3 newNormal = make_float3(0,0,0);
if ((v1Index/ DIM) == d){
float3 v1 = dev_vertices[v1Index];
float3 s1 = v1 - vertex;
if ((v2Index >= 0)&&(d > 0)&&((v2Index/ DIM) == d - 1)){
float3 v2 = dev_vertices[v2Index];
float3 s2 = v2 - vertex;
newNormal = newNormal + cross(s2,s1);
}
if (v6Index < DIM){
float3 v6 = dev_vertices[v6Index];
float3 s6 = v6 - vertex;
newNormal = newNormal + cross(s1,s6);
}
}
if ((v4Index >= 0)&&((v4Index/ DIM) == d)){
float3 v4 = dev_vertices[v4Index];
float3 s4 = v4 - vertex;
if ((v5Index/ DIM) < DIM){
float3 v5 = dev_vertices[v5Index];
float3 s5 = v5 - vertex;
newNormal = newNormal + cross(s5,s4);
}
if ((v3Index >= 0)&&(d > 0)){
float3 v3 = dev_vertices[v3Index];
float3 s3 = v3 - vertex;
newNormal = newNormal + cross(s4,s3);
}
}
dev_normals[nIndex] = normalize(newNormal);
tid += gridDim.x*blockDim.x;
}
}
void updateNormalsGPU1(float3* dev_vertices, float3* dev_normals){
updateNormalsGPU2<<<50,50>>>(dev_vertices,dev_normals);
}
|
d0d526b5e5ee6553a331b67264750aab47237999.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void to_pbo_kernel1(unsigned char* g_in, int stride_in, uchar4* g_out, int stride_out, int width, int height)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x<width && y<height)
{
unsigned char value = g_in[y*stride_in+x];
g_out[y*stride_out+x] = make_uchar4(value, value, value, 1);
}
} | d0d526b5e5ee6553a331b67264750aab47237999.cu | #include "includes.h"
__global__ void to_pbo_kernel1(unsigned char* g_in, int stride_in, uchar4* g_out, int stride_out, int width, int height)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x<width && y<height)
{
unsigned char value = g_in[y*stride_in+x];
g_out[y*stride_out+x] = make_uchar4(value, value, value, 1);
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.